/arch/alpha/

18.x-centos In-kernel qcow2 (Kernel part)OpenSLX
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat
-rw-r--r--CREDITS12
-rw-r--r--Documentation/00-INDEX204
-rw-r--r--Documentation/ABI/README2
-rw-r--r--Documentation/ABI/stable/sysfs-devices14
-rw-r--r--Documentation/ABI/testing/sysfs-block42
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fsl-mc21
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio18
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector36
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8125
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-cros-ec18
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-light-isl2901819
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583 (renamed from drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583)14
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp45318
-rw-r--r--Documentation/ABI/testing/sysfs-bus-vfio-mdev111
-rw-r--r--Documentation/ABI/testing/sysfs-class-fpga-bridge11
-rw-r--r--Documentation/ABI/testing/sysfs-class-led14
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei16
-rw-r--r--Documentation/ABI/testing/sysfs-class-remoteproc50
-rw-r--r--Documentation/ABI/testing/sysfs-devices-deferred_probe12
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-ibm-rtl4
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-slab2
-rw-r--r--Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb215
-rw-r--r--Documentation/ABI/testing/sysfs-platform-sst-atom17
-rw-r--r--Documentation/ABI/testing/sysfs-power45
-rw-r--r--Documentation/BUG-HUNTING246
l---------[-rw-r--r--]Documentation/Changes486
-rw-r--r--Documentation/CodingStyle1063
-rw-r--r--Documentation/DocBook/Makefile7
-rw-r--r--Documentation/DocBook/alsa-driver-api.tmpl142
-rw-r--r--Documentation/DocBook/debugobjects.tmpl443
-rw-r--r--Documentation/DocBook/kernel-hacking.tmpl4
-rw-r--r--Documentation/DocBook/tracepoint.tmpl112
-rw-r--r--Documentation/DocBook/uio-howto.tmpl62
-rw-r--r--Documentation/DocBook/usb.tmpl992
-rw-r--r--Documentation/DocBook/writing-an-alsa-driver.tmpl6206
-rw-r--r--Documentation/IPMI.txt57
-rw-r--r--Documentation/Makefile.sphinx22
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html25
-rw-r--r--Documentation/RCU/whatisRCU.txt2
-rw-r--r--Documentation/SubmittingPatches842
-rw-r--r--Documentation/VGA-softcursor.txt39
-rw-r--r--Documentation/acpi/DSD-properties-rules.txt97
-rw-r--r--Documentation/acpi/enumeration.txt9
-rw-r--r--Documentation/acpi/gpio-properties.txt62
-rw-r--r--Documentation/acpi/osi.txt187
-rw-r--r--Documentation/acpi/video_extension.txt2
-rw-r--r--Documentation/admin-guide/README.rst411
-rw-r--r--Documentation/admin-guide/binfmt-misc.rst151
-rw-r--r--Documentation/admin-guide/braille-console.rst38
-rw-r--r--Documentation/admin-guide/bug-bisect.rst76
-rw-r--r--Documentation/admin-guide/bug-hunting.rst369
-rw-r--r--Documentation/admin-guide/conf.py10
-rw-r--r--Documentation/admin-guide/devices.rst268
-rw-r--r--Documentation/admin-guide/devices.txt (renamed from Documentation/devices.txt)1176
-rw-r--r--Documentation/admin-guide/dynamic-debug-howto.rst353
-rw-r--r--Documentation/admin-guide/index.rst68
-rw-r--r--Documentation/admin-guide/init.rst (renamed from Documentation/init.txt)29
-rw-r--r--Documentation/admin-guide/initrd.rst (renamed from Documentation/initrd.txt)199
-rw-r--r--Documentation/admin-guide/java.rst (renamed from Documentation/java.txt)297
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst209
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt (renamed from Documentation/kernel-parameters.txt)269
-rw-r--r--Documentation/admin-guide/md.rst (renamed from Documentation/md.txt)528
-rw-r--r--Documentation/admin-guide/module-signing.rst (renamed from Documentation/module-signing.txt)128
-rw-r--r--Documentation/admin-guide/mono.rst (renamed from Documentation/mono.txt)44
-rw-r--r--Documentation/admin-guide/parport.rst286
-rw-r--r--Documentation/admin-guide/ramoops.rst (renamed from Documentation/ramoops.txt)88
-rw-r--r--Documentation/admin-guide/reporting-bugs.rst (renamed from REPORTING-BUGS)70
-rw-r--r--Documentation/admin-guide/security-bugs.rst (renamed from Documentation/SecurityBugs)14
-rw-r--r--Documentation/admin-guide/serial-console.rst (renamed from Documentation/serial-console.txt)68
-rw-r--r--Documentation/admin-guide/sysfs-rules.rst192
-rw-r--r--Documentation/admin-guide/sysrq.rst289
-rw-r--r--Documentation/admin-guide/tainted-kernels.rst59
-rw-r--r--Documentation/admin-guide/unicode.rst (renamed from Documentation/unicode.txt)24
-rw-r--r--Documentation/admin-guide/vga-softcursor.rst62
-rw-r--r--Documentation/arm/Booting2
-rw-r--r--Documentation/assoc_array.txt574
-rw-r--r--Documentation/bad_memory.txt45
-rw-r--r--Documentation/basic_profiling.txt56
-rw-r--r--Documentation/binfmt_misc.txt131
-rw-r--r--Documentation/block/biodoc.txt6
-rw-r--r--Documentation/block/cfq-iosched.txt32
-rw-r--r--Documentation/block/null_blk.txt2
-rw-r--r--Documentation/block/queue-sysfs.txt23
-rw-r--r--Documentation/blockdev/cciss.txt2
-rw-r--r--Documentation/blockdev/ramdisk.txt2
-rw-r--r--Documentation/braille-console.txt34
-rw-r--r--Documentation/cgroup-v1/00-INDEX2
-rw-r--r--Documentation/circular-buffers.txt4
-rw-r--r--Documentation/conf.py22
-rw-r--r--Documentation/core-api/assoc_array.rst551
-rw-r--r--Documentation/core-api/atomic_ops.rst (renamed from Documentation/atomic_ops.txt)340
-rw-r--r--Documentation/core-api/conf.py10
-rw-r--r--Documentation/core-api/debug-objects.rst310
-rw-r--r--Documentation/core-api/index.rst33
-rw-r--r--Documentation/core-api/local_ops.rst206
-rw-r--r--Documentation/core-api/tracepoint.rst55
-rw-r--r--Documentation/core-api/workqueue.rst (renamed from Documentation/workqueue.txt)260
-rw-r--r--Documentation/cpu-freq/cpufreq-stats.txt6
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt54
-rw-r--r--Documentation/cpu-hotplug.txt32
-rw-r--r--Documentation/crypto/api-intro.txt5
-rw-r--r--Documentation/dev-tools/conf.py10
-rw-r--r--Documentation/dev-tools/gcov.rst8
-rw-r--r--Documentation/dev-tools/index.rst (renamed from Documentation/dev-tools/tools.rst)8
-rw-r--r--Documentation/dev-tools/kcov.rst4
-rw-r--r--Documentation/development-process/index.rst9
-rw-r--r--Documentation/device-mapper/delay.txt4
-rw-r--r--Documentation/device-mapper/dm-crypt.txt27
-rw-r--r--Documentation/device-mapper/dm-raid.txt12
-rw-r--r--Documentation/device-mapper/linear.txt8
-rw-r--r--Documentation/device-mapper/striped.txt4
-rw-r--r--Documentation/device-mapper/switch.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/arch_timer.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/arm-boards3
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-capacity.txt236
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt10
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5433-clock.txt13
-rw-r--r--Documentation/devicetree/bindings/clock/hisi-crg.txt (renamed from Documentation/devicetree/bindings/clock/hi3519-crg.txt)12
-rw-r--r--Documentation/devicetree/bindings/clock/oxnas,stdclk.txt19
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.txt37
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt5
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk1108-cru.txt59
-rw-r--r--Documentation/devicetree/bindings/clock/st,stm32-rcc.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi-ccu.txt1
-rw-r--r--Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt78
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec4.txt20
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt112
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt14
-rw-r--r--Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt (renamed from Documentation/devicetree/bindings/display/ti/ti,tfp410.txt)9
-rw-r--r--Documentation/devicetree/bindings/display/ht16k33.txt42
-rw-r--r--Documentation/devicetree/bindings/display/mxsfb.txt53
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g133han01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g185han01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timing.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/nvd,9128.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt36
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt12
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt10
-rw-r--r--Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt6
-rw-r--r--Documentation/devicetree/bindings/display/zte,vou.txt84
-rw-r--r--Documentation/devicetree/bindings/dma/nbpfaxi.txt8
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt12
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/st_fdma.txt87
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt3
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt16
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt23
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt39
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-socfpga-a10-fpga-mgr.txt19
-rw-r--r--Documentation/devicetree/bindings/fpga/fpga-region.txt494
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-sx150x.txt41
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio_oxnas.txt2
-rw-r--r--Documentation/devicetree/bindings/hwmon/mcp3021.txt21
-rw-r--r--Documentation/devicetree/bindings/hwmon/tmp108.txt14
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt9
-rw-r--r--Documentation/devicetree/bindings/iio/adc/envelope-detector.txt54
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt83
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/dac/dpot-dac.txt41
-rw-r--r--Documentation/devicetree/bindings/iio/dac/mcp4725.txt35
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt46
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/hts221.txt22
-rw-r--r--Documentation/devicetree/bindings/iio/light/isl29018.txt28
-rw-r--r--Documentation/devicetree/bindings/iio/light/tsl2583.txt26
-rw-r--r--Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt30
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt1
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt (renamed from Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt)4
-rw-r--r--Documentation/devicetree/bindings/leds/pca963x.txt3
-rw-r--r--Documentation/devicetree/bindings/mfd/lp873x.txt8
-rw-r--r--Documentation/devicetree/bindings/mfd/max77620.txt12
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65086.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/amlogic,meson-gx.txt32
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt9
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,mmcif.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-cadence.txt30
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci.txt13
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt13
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt2
-rw-r--r--Documentation/devicetree/bindings/net/brcm,amac.txt16
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt12
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_canfd.txt14
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt8
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt24
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt24
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt27
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt8
-rw-r--r--Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt7
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-net.txt1
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt4
-rw-r--r--Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt51
-rw-r--r--Documentation/devicetree/bindings/net/oxnas-dwmac.txt39
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt6
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan91c111.txt2
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt14
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83867.txt12
-rw-r--r--Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt (renamed from Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt)8
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt48
-rw-r--r--Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt17
-rw-r--r--Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt20
-rw-r--r--Documentation/devicetree/bindings/opp/opp.txt27
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie.txt11
-rw-r--r--Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt (renamed from Documentation/devicetree/bindings/phy/meson-usb2-phy.txt)6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt30
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/oxnas,pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt44
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt74
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt177
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt19
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt10
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ti,da850-pupd.txt55
-rw-r--r--Documentation/devicetree/bindings/power/domain-idle-state.txt33
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt43
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-hibvt.txt21
-rw-r--r--Documentation/devicetree/bindings/regulator/pwm-regulator.txt4
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt98
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt30
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/ramoops.txt3
-rw-r--r--Documentation/devicetree/bindings/reset/renesas,rst.txt37
-rw-r--r--Documentation/devicetree/bindings/rng/omap_rng.txt14
-rw-r--r--Documentation/devicetree/bindings/rtc/maxim,ds3231.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/pcf8563.txt2
-rw-r--r--Documentation/devicetree/bindings/scsi/hisilicon-sas.txt1
-rw-r--r--Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt41
-rw-r--r--Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt21
-rw-r--r--Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt25
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/guts.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/guts.txt)3
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt88
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l34.txt64
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l42.txt110
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcbsp.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt85
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/rt5514.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/rt5663.txt6
-rwxr-xr-xDocumentation/devicetree/bindings/sound/rt5665.txt68
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt38
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-codec.txt65
-rw-r--r--Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic31xx.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/wm8580.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-armada-3700.txt25
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt19
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sun6i.txt25
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.txt2
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.txt3
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-qcom.txt7
-rw-r--r--Documentation/devicetree/bindings/usb/da8xx-usb.txt43
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt5
-rw-r--r--Documentation/devicetree/bindings/usb/mt8173-mtu3.txt87
-rw-r--r--Documentation/devicetree/bindings/usb/mt8173-xhci.txt54
-rw-r--r--Documentation/devicetree/bindings/usb/ohci-da8xx.txt23
-rw-r--r--Documentation/devicetree/bindings/usb/s3c2410-usb.txt22
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt16
-rw-r--r--Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt33
-rw-r--r--Documentation/devicetree/booting-without-of.txt7
-rw-r--r--Documentation/dmaengine/client.txt16
-rw-r--r--Documentation/dmaengine/dmatest.txt10
-rw-r--r--Documentation/dmaengine/provider.txt2
-rw-r--r--Documentation/dmaengine/pxa_dma.txt2
-rw-r--r--Documentation/doc-guide/conf.py10
-rw-r--r--Documentation/doc-guide/docbook.rst90
-rw-r--r--Documentation/doc-guide/index.rst20
-rw-r--r--Documentation/doc-guide/kernel-doc.rst (renamed from Documentation/kernel-documentation.rst)323
-rw-r--r--Documentation/doc-guide/parse-headers.rst192
-rw-r--r--Documentation/doc-guide/sphinx.rst219
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/driver-api/80211/cfg80211.rst (renamed from Documentation/80211/cfg80211.rst)0
-rw-r--r--Documentation/driver-api/80211/conf.py (renamed from Documentation/80211/conf.py)5
-rw-r--r--Documentation/driver-api/80211/index.rst (renamed from Documentation/80211/index.rst)2
-rw-r--r--Documentation/driver-api/80211/introduction.rst (renamed from Documentation/80211/introduction.rst)0
-rw-r--r--Documentation/driver-api/80211/mac80211-advanced.rst (renamed from Documentation/80211/mac80211-advanced.rst)0
-rw-r--r--Documentation/driver-api/80211/mac80211.rst (renamed from Documentation/80211/mac80211.rst)0
-rw-r--r--Documentation/driver-api/conf.py10
-rw-r--r--Documentation/driver-api/device_link.rst279
-rw-r--r--Documentation/driver-api/dma-buf.rst73
-rw-r--r--Documentation/driver-api/index.rst12
-rw-r--r--Documentation/driver-api/infrastructure.rst70
-rw-r--r--Documentation/driver-api/usb.rst748
-rw-r--r--Documentation/driver-api/vme.rst (renamed from Documentation/vme_api.txt)115
-rw-r--r--Documentation/driver-model/devres.txt4
-rw-r--r--Documentation/dynamic-debug-howto.txt340
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/configfs/configfs.txt2
-rw-r--r--Documentation/filesystems/dax.txt22
-rw-r--r--Documentation/filesystems/ext4.txt13
-rw-r--r--Documentation/filesystems/locks.txt2
-rw-r--r--Documentation/filesystems/nfs/nfsroot.txt4
-rw-r--r--Documentation/filesystems/proc.txt13
-rw-r--r--Documentation/filesystems/vfs.txt1
-rw-r--r--Documentation/filesystems/xfs.txt12
-rw-r--r--Documentation/fpga/fpga-mgr.txt43
-rw-r--r--Documentation/frv/booting.txt2
-rw-r--r--Documentation/gpio/driver.txt62
-rw-r--r--Documentation/gpu/conf.py5
-rw-r--r--Documentation/gpu/drm-internals.rst20
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst11
-rw-r--r--Documentation/gpu/drm-kms.rst97
-rw-r--r--Documentation/gpu/drm-mm.rst2
-rw-r--r--Documentation/gpu/drm-uapi.rst6
-rw-r--r--Documentation/gpu/i915.rst17
-rw-r--r--Documentation/gpu/index.rst2
-rw-r--r--Documentation/hwmon/hwmon-kernel-api.txt58
-rw-r--r--Documentation/hwmon/submitting-patches8
-rw-r--r--Documentation/hwmon/tc65431
-rw-r--r--Documentation/hwmon/tmp10836
-rw-r--r--Documentation/i2c/i2c-topology4
-rw-r--r--Documentation/index.rst62
-rw-r--r--Documentation/isdn/README2
-rw-r--r--Documentation/kbuild/kconfig-language.txt29
-rw-r--r--Documentation/kernel-doc-nano-HOWTO.txt2
-rw-r--r--Documentation/kernel-per-CPU-kthreads.txt2
-rw-r--r--Documentation/kselftest.txt11
-rw-r--r--Documentation/leds/leds-lp5523.txt4
-rw-r--r--Documentation/leds/uleds.txt36
-rw-r--r--Documentation/livepatch/livepatch.txt2
-rw-r--r--Documentation/local_ops.txt191
-rw-r--r--Documentation/lockup-watchdogs.txt4
-rw-r--r--Documentation/m68k/kernel-options.txt2
-rw-r--r--Documentation/magic-number.txt158
-rw-r--r--Documentation/media/.gitignore3
-rw-r--r--Documentation/media/Makefile74
-rw-r--r--Documentation/media/dvb-drivers/intro.rst4
-rw-r--r--Documentation/media/index.rst5
-rw-r--r--Documentation/media/intro.rst4
-rw-r--r--Documentation/media/media_api_files/typical_media_device.pdfbin52895 -> 0 bytes
-rw-r--r--Documentation/media/typical_media_device.svg (renamed from Documentation/media/media_api_files/typical_media_device.svg)0
-rw-r--r--Documentation/media/uapi/dvb/dvbstb.svg651
-rw-r--r--Documentation/media/uapi/dvb/intro.rst4
-rw-r--r--Documentation/media/uapi/dvb/intro_files/dvbstb.pdfbin1881 -> 0 bytes
-rw-r--r--Documentation/media/uapi/dvb/intro_files/dvbstb.pngbin22655 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/bayer.svg984
-rw-r--r--Documentation/media/uapi/v4l/constraints.svg346
-rw-r--r--Documentation/media/uapi/v4l/crop.rst4
-rw-r--r--Documentation/media/uapi/v4l/crop.svg281
-rw-r--r--Documentation/media/uapi/v4l/crop_files/crop.gifbin5967 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/crop_files/crop.pdfbin5846 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-raw-vbi.rst12
-rw-r--r--Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.gifbin4741 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.pdfbin3706 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.gifbin5095 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.pdfbin3996 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_hsync.gifbin2400 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_hsync.pdfbin7405 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev.rst16
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev_files/pipeline.pdfbin20276 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev_files/pipeline.pngbin12130 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-crop.pdfbin20729 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-crop.svg63
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-full.pdfbin46311 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-full.svg163
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-scaling-multi-source.pdfbin36714 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-scaling-multi-source.svg116
-rw-r--r--Documentation/media/uapi/v4l/diff-v4l.rst4
-rw-r--r--Documentation/media/uapi/v4l/field-order.rst8
-rw-r--r--Documentation/media/uapi/v4l/field-order_files/fieldseq_bt.gifbin25430 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/field-order_files/fieldseq_bt.pdfbin9185 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/field-order_files/fieldseq_tb.gifbin25323 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/field-order_files/fieldseq_tb.pdfbin9173 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/fieldseq_bt.svg2613
-rw-r--r--Documentation/media/uapi/v4l/fieldseq_tb.svg2607
-rw-r--r--Documentation/media/uapi/v4l/nv12mt.svg450
-rw-r--r--Documentation/media/uapi/v4l/nv12mt_example.svg1589
-rw-r--r--Documentation/media/uapi/v4l/pipeline.dot12
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv12mt.rst8
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.pngbin1920 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.pngbin5261 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/selection-api-003.rst4
-rw-r--r--Documentation/media/uapi/v4l/selection-api-003_files/selection.pngbin11716 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/selection.svg5812
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst19
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats_files/bayer.pngbin9725 -> 0 bytes
-rw-r--r--Documentation/media/uapi/v4l/subdev-image-processing-crop.svg313
-rw-r--r--Documentation/media/uapi/v4l/subdev-image-processing-full.svg769
-rw-r--r--Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg560
-rw-r--r--Documentation/media/uapi/v4l/vbi_525.svg811
-rw-r--r--Documentation/media/uapi/v4l/vbi_625.svg858
-rw-r--r--Documentation/media/uapi/v4l/vbi_hsync.svg313
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-selection.rst4
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-selection_files/constraints.pngbin3313 -> 0 bytes
-rw-r--r--Documentation/media/v4l-drivers/bttv.rst4
-rw-r--r--Documentation/media/v4l-drivers/cafe_ccic.rst4
-rw-r--r--Documentation/memory-hotplug.txt2
-rw-r--r--Documentation/networking/batman-adv.txt35
-rw-r--r--Documentation/networking/dsa/dsa.txt3
-rw-r--r--Documentation/networking/ieee802154.txt26
-rw-r--r--Documentation/networking/ip-sysctl.txt33
-rw-r--r--Documentation/networking/l2tp.txt8
-rw-r--r--Documentation/networking/mac80211_hwsim/README2
-rw-r--r--Documentation/networking/netconsole.txt2
-rw-r--r--Documentation/networking/netdev-FAQ.txt16
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt36
-rw-r--r--Documentation/networking/phy.txt149
-rw-r--r--Documentation/networking/seg6-sysctl.txt18
-rw-r--r--Documentation/networking/stmmac.txt71
-rw-r--r--Documentation/networking/timestamping.txt10
-rw-r--r--Documentation/networking/vortex.txt2
-rw-r--r--Documentation/oops-tracing.txt279
-rw-r--r--Documentation/parport.txt267
-rw-r--r--Documentation/power/00-INDEX2
-rw-r--r--Documentation/power/devices.txt14
-rw-r--r--Documentation/power/pci.txt10
-rw-r--r--Documentation/power/runtime_pm.txt2
-rw-r--r--Documentation/power/states.txt62
-rw-r--r--Documentation/power/swsusp-dmcrypt.txt2
-rw-r--r--Documentation/process/1.Intro.rst (renamed from Documentation/development-process/1.Intro.rst)4
-rw-r--r--Documentation/process/2.Process.rst (renamed from Documentation/development-process/2.Process.rst)0
-rw-r--r--Documentation/process/3.Early-stage.rst (renamed from Documentation/development-process/3.Early-stage.rst)0
-rw-r--r--Documentation/process/4.Coding.rst (renamed from Documentation/development-process/4.Coding.rst)8
-rw-r--r--Documentation/process/5.Posting.rst (renamed from Documentation/development-process/5.Posting.rst)12
-rw-r--r--Documentation/process/6.Followthrough.rst (renamed from Documentation/development-process/6.Followthrough.rst)0
-rw-r--r--Documentation/process/7.AdvancedTopics.rst (renamed from Documentation/development-process/7.AdvancedTopics.rst)2
-rw-r--r--Documentation/process/8.Conclusion.rst (renamed from Documentation/development-process/8.Conclusion.rst)6
-rw-r--r--Documentation/process/adding-syscalls.rst (renamed from Documentation/adding-syscalls.txt)269
-rw-r--r--Documentation/process/applying-patches.rst (renamed from Documentation/applying-patches.txt)7
-rw-r--r--Documentation/process/changes.rst485
-rw-r--r--Documentation/process/code-of-conflict.rst (renamed from Documentation/CodeOfConflict)3
-rw-r--r--Documentation/process/coding-style.rst1062
-rw-r--r--Documentation/process/conf.py (renamed from Documentation/development-process/conf.py)2
-rw-r--r--Documentation/process/development-process.rst (renamed from Documentation/development-process/development-process.rst)1
-rw-r--r--Documentation/process/email-clients.rst (renamed from Documentation/email-clients.txt)0
-rw-r--r--Documentation/process/howto.rst (renamed from Documentation/HOWTO)58
-rw-r--r--Documentation/process/index.rst57
-rw-r--r--Documentation/process/kernel-docs.rst (renamed from Documentation/kernel-docs.txt)0
-rw-r--r--Documentation/process/magic-number.rst164
-rw-r--r--Documentation/process/management-style.rst (renamed from Documentation/ManagementStyle)2
-rw-r--r--Documentation/process/stable-api-nonsense.rst (renamed from Documentation/stable_api_nonsense.txt)0
-rw-r--r--Documentation/process/stable-kernel-rules.rst (renamed from Documentation/stable_kernel_rules.txt)4
-rw-r--r--Documentation/process/submit-checklist.rst (renamed from Documentation/SubmitChecklist)6
-rw-r--r--Documentation/process/submitting-drivers.rst (renamed from Documentation/SubmittingDrivers)16
-rw-r--r--Documentation/process/submitting-patches.rst836
-rw-r--r--Documentation/process/volatile-considered-harmful.rst (renamed from Documentation/volatile-considered-harmful.txt)22
-rw-r--r--Documentation/rfkill.txt2
-rw-r--r--Documentation/scheduler/completion.txt3
-rw-r--r--Documentation/scsi/scsi-parameters.txt2
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt2
-rw-r--r--Documentation/scsi/sym53c8xx_2.txt2
-rw-r--r--Documentation/security/conf.py8
-rw-r--r--Documentation/security/index.rst7
-rw-r--r--Documentation/security/keys-trusted-encrypted.txt2
-rw-r--r--Documentation/security/tpm/index.rst7
-rw-r--r--Documentation/security/tpm/tpm_vtpm_proxy.rst (renamed from Documentation/tpm/tpm_vtpm_proxy.txt)55
-rw-r--r--Documentation/security/tpm/xen-tpmfront.txt (renamed from Documentation/tpm/xen-tpmfront.txt)0
-rw-r--r--Documentation/sound/alsa-configuration.rst2683
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt2330
-rw-r--r--Documentation/sound/alsa/ControlNames.txt107
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt324
-rw-r--r--Documentation/sound/alsa/VIA82xx-mixer.txt8
-rw-r--r--Documentation/sound/alsa/alsa-parameters.txt135
-rw-r--r--Documentation/sound/alsa/seq_oss.html409
-rw-r--r--Documentation/sound/cards/audigy-mixer.rst (renamed from Documentation/sound/alsa/Audigy-mixer.txt)297
-rw-r--r--Documentation/sound/cards/audiophile-usb.rst (renamed from Documentation/sound/alsa/Audiophile-Usb.txt)258
-rw-r--r--Documentation/sound/cards/bt87x.rst (renamed from Documentation/sound/alsa/Bt87x.txt)23
-rw-r--r--Documentation/sound/cards/cmipci.rst (renamed from Documentation/sound/alsa/CMIPCI.txt)62
-rw-r--r--Documentation/sound/cards/emu10k1-jack.rst (renamed from Documentation/sound/alsa/emu10k1-jack.txt)20
-rw-r--r--Documentation/sound/cards/hdspm.rst (renamed from Documentation/sound/alsa/hdspm.txt)253
-rw-r--r--Documentation/sound/cards/img-spdif-in.rst (renamed from Documentation/sound/alsa/img,spdif-in.txt)24
-rw-r--r--Documentation/sound/cards/index.rst19
-rw-r--r--Documentation/sound/cards/joystick.rst (renamed from Documentation/sound/alsa/Joystick.txt)71
-rw-r--r--Documentation/sound/cards/maya44.rst (renamed from Documentation/sound/alsa/README.maya44)137
-rw-r--r--Documentation/sound/cards/mixart.rst (renamed from Documentation/sound/alsa/MIXART.txt)26
-rw-r--r--Documentation/sound/cards/sb-live-mixer.rst (renamed from Documentation/sound/alsa/SB-Live-mixer.txt)337
-rw-r--r--Documentation/sound/cards/serial-u16550.rst (renamed from Documentation/sound/alsa/serial-u16550.txt)21
-rw-r--r--Documentation/sound/cards/via82xx-mixer.rst8
-rw-r--r--Documentation/sound/designs/channel-mapping-api.rst (renamed from Documentation/sound/alsa/Channel-Mapping-API.txt)77
-rw-r--r--Documentation/sound/designs/compress-offload.rst (renamed from Documentation/sound/alsa/compress_offload.txt)127
-rw-r--r--Documentation/sound/designs/control-names.rst142
-rw-r--r--Documentation/sound/designs/index.rst15
-rw-r--r--Documentation/sound/designs/jack-controls.rst (renamed from Documentation/sound/alsa/Jack-Controls.txt)13
-rw-r--r--Documentation/sound/designs/oss-emulation.rst (renamed from Documentation/sound/alsa/OSS-Emulation.txt)169
-rw-r--r--Documentation/sound/designs/powersave.rst (renamed from Documentation/sound/alsa/powersave.txt)16
-rw-r--r--Documentation/sound/designs/procfile.rst (renamed from Documentation/sound/alsa/Procfile.txt)106
-rw-r--r--Documentation/sound/designs/seq-oss.rst371
-rw-r--r--Documentation/sound/designs/timestamping.rst (renamed from Documentation/sound/alsa/timestamping.txt)143
-rw-r--r--Documentation/sound/hd-audio/controls.rst (renamed from Documentation/sound/alsa/HD-Audio-Controls.txt)33
-rw-r--r--Documentation/sound/hd-audio/dp-mst.rst (renamed from Documentation/sound/alsa/HD-Audio-DP-MST-audio.txt)30
-rw-r--r--Documentation/sound/hd-audio/index.rst10
-rw-r--r--Documentation/sound/hd-audio/models.rst518
-rw-r--r--Documentation/sound/hd-audio/notes.rst (renamed from Documentation/sound/alsa/HD-Audio.txt)635
-rw-r--r--Documentation/sound/index.rst20
-rw-r--r--Documentation/sound/kernel-api/alsa-driver-api.rst134
-rw-r--r--Documentation/sound/kernel-api/index.rst8
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst4219
-rw-r--r--Documentation/sound/oss/oss-parameters.txt2
-rw-r--r--Documentation/sound/soc/clocking.rst (renamed from Documentation/sound/alsa/soc/clocking.txt)13
-rw-r--r--Documentation/sound/soc/codec-to-codec.rst108
-rw-r--r--Documentation/sound/soc/codec.rst (renamed from Documentation/sound/alsa/soc/codec.txt)75
-rw-r--r--Documentation/sound/soc/dai.rst (renamed from Documentation/sound/alsa/soc/DAI.txt)28
-rw-r--r--Documentation/sound/soc/dapm.rst (renamed from Documentation/sound/alsa/soc/dapm.txt)249
-rw-r--r--Documentation/sound/soc/dpcm.rst (renamed from Documentation/sound/alsa/soc/DPCM.txt)280
-rw-r--r--Documentation/sound/soc/index.rst20
-rw-r--r--Documentation/sound/soc/jack.rst (renamed from Documentation/sound/alsa/soc/jack.txt)1
-rw-r--r--Documentation/sound/soc/machine.rst (renamed from Documentation/sound/alsa/soc/machine.txt)22
-rw-r--r--Documentation/sound/soc/overview.rst (renamed from Documentation/sound/alsa/soc/overview.txt)32
-rw-r--r--Documentation/sound/soc/platform.rst (renamed from Documentation/sound/alsa/soc/platform.txt)31
-rw-r--r--Documentation/sound/soc/pops-clicks.rst (renamed from Documentation/sound/alsa/soc/pops_clicks.txt)15
-rw-r--r--Documentation/sphinx/kerneldoc.py (renamed from Documentation/sphinx/kernel-doc.py)0
-rwxr-xr-xDocumentation/sphinx/parse-headers.pl95
-rw-r--r--Documentation/sync_file.txt14
-rw-r--r--Documentation/sysctl/kernel.txt12
-rw-r--r--Documentation/sysfs-rules.txt184
-rw-r--r--Documentation/sysrq.txt257
-rw-r--r--Documentation/trace/ftrace.txt20
-rw-r--r--Documentation/trace/intel_th.txt22
-rw-r--r--Documentation/trace/stm.txt37
-rw-r--r--Documentation/trace/uprobetracer.txt6
-rw-r--r--Documentation/translations/ja_JP/HOWTO (renamed from Documentation/ja_JP/HOWTO)24
-rw-r--r--Documentation/translations/ja_JP/SubmitChecklist (renamed from Documentation/ja_JP/SubmitChecklist)8
-rw-r--r--Documentation/translations/ja_JP/SubmittingPatches (renamed from Documentation/ja_JP/SubmittingPatches)18
-rw-r--r--Documentation/translations/ja_JP/stable_api_nonsense.txt (renamed from Documentation/ja_JP/stable_api_nonsense.txt)4
-rw-r--r--Documentation/translations/ja_JP/stable_kernel_rules.txt (renamed from Documentation/ja_JP/stable_kernel_rules.txt)6
-rw-r--r--Documentation/translations/ko_KR/howto.rst (renamed from Documentation/ko_KR/HOWTO)172
-rw-r--r--Documentation/translations/ko_KR/index.rst12
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt (renamed from Documentation/ko_KR/memory-barriers.txt)36
-rw-r--r--Documentation/translations/ko_KR/stable_api_nonsense.txt (renamed from Documentation/ko_KR/stable_api_nonsense.txt)4
-rw-r--r--Documentation/translations/zh_CN/CodingStyle (renamed from Documentation/zh_CN/CodingStyle)8
-rw-r--r--Documentation/translations/zh_CN/HOWTO (renamed from Documentation/zh_CN/HOWTO)30
-rw-r--r--Documentation/translations/zh_CN/IRQ.txt (renamed from Documentation/zh_CN/IRQ.txt)0
-rw-r--r--Documentation/translations/zh_CN/SecurityBugs (renamed from Documentation/zh_CN/SecurityBugs)6
-rw-r--r--Documentation/translations/zh_CN/SubmittingDrivers (renamed from Documentation/zh_CN/SubmittingDrivers)12
-rw-r--r--Documentation/translations/zh_CN/SubmittingPatches (renamed from Documentation/zh_CN/SubmittingPatches)14
-rw-r--r--Documentation/translations/zh_CN/arm/Booting (renamed from Documentation/zh_CN/arm/Booting)2
-rw-r--r--Documentation/translations/zh_CN/arm/kernel_user_helpers.txt (renamed from Documentation/zh_CN/arm/kernel_user_helpers.txt)0
-rw-r--r--Documentation/translations/zh_CN/arm64/booting.txt (renamed from Documentation/zh_CN/arm64/booting.txt)0
-rw-r--r--Documentation/translations/zh_CN/arm64/legacy_instructions.txt (renamed from Documentation/zh_CN/arm64/legacy_instructions.txt)0
-rw-r--r--Documentation/translations/zh_CN/arm64/memory.txt (renamed from Documentation/zh_CN/arm64/memory.txt)0
-rw-r--r--Documentation/translations/zh_CN/arm64/silicon-errata.txt (renamed from Documentation/zh_CN/arm64/silicon-errata.txt)0
-rw-r--r--Documentation/translations/zh_CN/arm64/tagged-pointers.txt (renamed from Documentation/zh_CN/arm64/tagged-pointers.txt)0
-rw-r--r--Documentation/translations/zh_CN/basic_profiling.txt (renamed from Documentation/zh_CN/basic_profiling.txt)0
-rw-r--r--Documentation/translations/zh_CN/email-clients.txt (renamed from Documentation/zh_CN/email-clients.txt)4
-rw-r--r--Documentation/translations/zh_CN/filesystems/sysfs.txt (renamed from Documentation/zh_CN/filesystems/sysfs.txt)0
-rw-r--r--Documentation/translations/zh_CN/gpio.txt (renamed from Documentation/zh_CN/gpio.txt)0
-rw-r--r--Documentation/translations/zh_CN/io_ordering.txt (renamed from Documentation/zh_CN/io_ordering.txt)0
-rw-r--r--Documentation/translations/zh_CN/magic-number.txt (renamed from Documentation/zh_CN/magic-number.txt)0
-rw-r--r--Documentation/translations/zh_CN/oops-tracing.txt (renamed from Documentation/zh_CN/oops-tracing.txt)6
-rw-r--r--Documentation/translations/zh_CN/sparse.txt (renamed from Documentation/zh_CN/sparse.txt)0
-rw-r--r--Documentation/translations/zh_CN/stable_api_nonsense.txt (renamed from Documentation/zh_CN/stable_api_nonsense.txt)4
-rw-r--r--Documentation/translations/zh_CN/stable_kernel_rules.txt (renamed from Documentation/zh_CN/stable_kernel_rules.txt)6
-rw-r--r--Documentation/translations/zh_CN/video4linux/omap3isp.txt (renamed from Documentation/zh_CN/video4linux/omap3isp.txt)0
-rw-r--r--Documentation/translations/zh_CN/video4linux/v4l2-framework.txt (renamed from Documentation/zh_CN/video4linux/v4l2-framework.txt)0
-rw-r--r--Documentation/translations/zh_CN/volatile-considered-harmful.txt (renamed from Documentation/zh_CN/volatile-considered-harmful.txt)4
-rw-r--r--Documentation/vfio-mediated-device.txt398
-rw-r--r--Documentation/virtual/kvm/00-INDEX2
-rw-r--r--Documentation/virtual/kvm/api.txt16
-rw-r--r--Documentation/virtual/kvm/halt-polling.txt127
-rw-r--r--Documentation/virtual/kvm/locking.txt12
-rw-r--r--Documentation/virtual/kvm/msr.txt9
-rw-r--r--Documentation/virtual/kvm/review-checklist.txt4
-rw-r--r--Documentation/vm/numa2
-rw-r--r--Documentation/vm/transhuge.txt5
-rw-r--r--Documentation/watchdog/convert_drivers_to_kernel_api.txt2
-rw-r--r--Documentation/watchdog/watchdog-parameters.txt2
-rw-r--r--Documentation/x86/boot.txt2
-rw-r--r--Documentation/x86/x86_64/boot-options.txt4
-rw-r--r--MAINTAINERS390
-rw-r--r--Makefile30
-rw-r--r--README408
-rw-r--r--arch/Kconfig11
-rw-r--r--arch/alpha/include/asm/mutex.h9
-rw-r--r--arch/alpha/include/asm/processor.h1
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/alpha/kernel/osf_sys.c8
-rw-r--r--arch/alpha/kernel/ptrace.c2
-rw-r--r--arch/arc/Makefile7
-rw-r--r--arch/arc/boot/dts/abilis_tb10x.dtsi1
-rw-r--r--arch/arc/boot/dts/axc001.dtsi2
-rw-r--r--arch/arc/boot/dts/nsim_700.dts2
-rw-r--r--arch/arc/boot/dts/nsimosci.dts4
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig3
-rw-r--r--arch/arc/include/asm/arcregs.h2
-rw-r--r--arch/arc/include/asm/delay.h9
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/include/asm/processor.h3
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/kernel/devtree.c2
-rw-r--r--arch/arc/kernel/mcip.c32
-rw-r--r--arch/arc/kernel/process.c20
-rw-r--r--arch/arc/kernel/smp.c23
-rw-r--r--arch/arc/kernel/time.c19
-rw-r--r--arch/arc/mm/cache.c2
-rw-r--r--arch/arc/mm/dma.c31
-rw-r--r--arch/arc/plat-eznps/smp.c6
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/dra72-evm-revc.dts10
-rw-r--r--arch/arm/boot/dts/hisi-x5hd2.dtsi6
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts14
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi5
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi5
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi4
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi7
-rw-r--r--arch/arm/boot/dts/orion5x-linkstation-lsgl.dts4
-rw-r--r--arch/arm/boot/dts/r8a7778.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi5
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi1
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi2
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi16
-rw-r--r--arch/arm/boot/dts/stih410-b2260.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-gr8-evb.dts (renamed from arch/arm/boot/dts/ntc-gr8-evb.dts)2
-rw-r--r--arch/arm/boot/dts/sun5i-gr8.dtsi (renamed from arch/arm/boot/dts/ntc-gr8.dtsi)0
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-h3.dtsi2
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-b.dts51
-rw-r--r--arch/arm/common/bL_switcher.c34
-rw-r--r--arch/arm/common/dmabounce.c16
-rw-r--r--arch/arm/configs/multi_v7_defconfig8
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/qcom_defconfig1
-rw-r--r--arch/arm/crypto/Kconfig18
-rw-r--r--arch/arm/crypto/Makefile4
-rw-r--r--arch/arm/crypto/aes-ce-glue.c395
-rw-r--r--arch/arm/crypto/aesbs-glue.c380
-rw-r--r--arch/arm/crypto/crc32-ce-core.S306
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c242
-rw-r--r--arch/arm/crypto/crct10dif-ce-core.S427
-rw-r--r--arch/arm/crypto/crct10dif-ce-glue.c101
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/arch_gicv3.h54
-rw-r--r--arch/arm/include/asm/efi.h3
-rw-r--r--arch/arm/include/asm/io.h1
-rw-r--r--arch/arm/include/asm/kvm_asm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm/include/asm/mutex.h21
-rw-r--r--arch/arm/include/asm/processor.h2
-rw-r--r--arch/arm/include/asm/tlb.h21
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/asm/xen/hypercall.h88
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h40
-rw-r--r--arch/arm/include/asm/xen/interface.h86
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h99
-rw-r--r--arch/arm/include/asm/xen/page.h123
-rw-r--r--arch/arm/include/uapi/asm/kvm.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h3
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/armksyms.c183
-rw-r--r--arch/arm/kernel/calls.S3
-rw-r--r--arch/arm/kernel/entry-ftrace.S3
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/hw_breakpoint.c47
-rw-r--r--arch/arm/kernel/smccc-call.S3
-rw-r--r--arch/arm/kernel/traps.c20
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S5
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/kvm/Makefile1
-rw-r--r--arch/arm/kvm/arm.c33
-rw-r--r--arch/arm/kvm/hyp/tlb.c15
-rw-r--r--arch/arm/lib/ashldi3.S3
-rw-r--r--arch/arm/lib/ashrdi3.S3
-rw-r--r--arch/arm/lib/backtrace.S37
-rw-r--r--arch/arm/lib/bitops.h5
-rw-r--r--arch/arm/lib/bswapsdi2.S3
-rw-r--r--arch/arm/lib/clear_user.S4
-rw-r--r--arch/arm/lib/copy_from_user.S2
-rw-r--r--arch/arm/lib/copy_page.S2
-rw-r--r--arch/arm/lib/copy_to_user.S4
-rw-r--r--arch/arm/lib/csumipv6.S3
-rw-r--r--arch/arm/lib/csumpartial.S2
-rw-r--r--arch/arm/lib/csumpartialcopy.S1
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S2
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S1
-rw-r--r--arch/arm/lib/delay.c2
-rw-r--r--arch/arm/lib/div64.S2
-rw-r--r--arch/arm/lib/findbit.S9
-rw-r--r--arch/arm/lib/getuser.S9
-rw-r--r--arch/arm/lib/io-readsb.S2
-rw-r--r--arch/arm/lib/io-readsl.S2
-rw-r--r--arch/arm/lib/io-readsw-armv3.S3
-rw-r--r--arch/arm/lib/io-readsw-armv4.S2
-rw-r--r--arch/arm/lib/io-writesb.S2
-rw-r--r--arch/arm/lib/io-writesl.S2
-rw-r--r--arch/arm/lib/io-writesw-armv3.S2
-rw-r--r--arch/arm/lib/io-writesw-armv4.S2
-rw-r--r--arch/arm/lib/lib1funcs.S9
-rw-r--r--arch/arm/lib/lshrdi3.S3
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memcpy.S3
-rw-r--r--arch/arm/lib/memmove.S2
-rw-r--r--arch/arm/lib/memset.S3
-rw-r--r--arch/arm/lib/memzero.S2
-rw-r--r--arch/arm/lib/muldi3.S3
-rw-r--r--arch/arm/lib/putuser.S5
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c3
-rw-r--r--arch/arm/lib/ucmpdi2.S3
-rw-r--r--arch/arm/mach-imx/Makefile1
-rw-r--r--arch/arm/mach-imx/gpc.c17
-rw-r--r--arch/arm/mach-imx/ssi-fiq-ksym.c20
-rw-r--r--arch/arm/mach-imx/ssi-fiq.S7
-rw-r--r--arch/arm/mach-integrator/impd1.c1
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/id.c16
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c3
-rw-r--r--arch/arm/mach-omap2/voltage.c6
-rw-r--r--arch/arm/mach-pxa/idp.c1
-rw-r--r--arch/arm/mach-pxa/mainstone.c1
-rw-r--r--arch/arm/mach-pxa/stargate2.c1
-rw-r--r--arch/arm/mach-s3c64xx/pl080.c32
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c15
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c27
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c5
-rw-r--r--arch/arm/mm/abort-lv4t.S34
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/proc-v7m.S2
-rw-r--r--arch/arm/plat-samsung/devs.c24
-rw-r--r--arch/arm/xen/enlighten.c3
-rw-r--r--arch/arm/xen/mm.c1
-rw-r--r--arch/arm64/Kconfig13
-rw-r--r--arch/arm64/Kconfig.debug35
-rw-r--r--arch/arm64/Makefile10
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/juno-r1.dts2
-rw-r--r--arch/arm64/boot/dts/arm/juno-r2.dts2
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts5
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi6
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-db.dts23
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi27
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi14
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts63
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi29
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795.dtsi5
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi8
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/crypto/.gitignore2
-rw-r--r--arch/arm64/crypto/Kconfig23
-rw-r--r--arch/arm64/crypto/Makefile23
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S53
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c50
-rw-r--r--arch/arm64/crypto/aes-ce-cipher.c25
-rw-r--r--arch/arm64/crypto/aes-ce.S1
-rw-r--r--arch/arm64/crypto/aes-glue.c381
-rw-r--r--arch/arm64/crypto/aes-modes.S3
-rw-r--r--arch/arm64/crypto/aes-neon.S25
-rw-r--r--arch/arm64/crypto/crc32-ce-core.S266
-rw-r--r--arch/arm64/crypto/crc32-ce-glue.c212
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S392
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c95
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S6
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha256-core.S_shipped2061
-rw-r--r--arch/arm64/crypto/sha256-glue.c185
-rw-r--r--arch/arm64/crypto/sha512-armv8.pl778
-rw-r--r--arch/arm64/crypto/sha512-core.S_shipped1085
-rw-r--r--arch/arm64/crypto/sha512-glue.c94
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/acpi.h23
-rw-r--r--arch/arm64/include/asm/alternative.h2
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h61
-rw-r--r--arch/arm64/include/asm/assembler.h48
-rw-r--r--arch/arm64/include/asm/cacheflush.h7
-rw-r--r--arch/arm64/include/asm/cpucaps.h41
-rw-r--r--arch/arm64/include/asm/cpufeature.h50
-rw-r--r--arch/arm64/include/asm/current.h22
-rw-r--r--arch/arm64/include/asm/debug-monitors.h3
-rw-r--r--arch/arm64/include/asm/efi.h29
-rw-r--r--arch/arm64/include/asm/elf.h12
-rw-r--r--arch/arm64/include/asm/futex.h17
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h6
-rw-r--r--arch/arm64/include/asm/io.h1
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h7
-rw-r--r--arch/arm64/include/asm/kvm_asm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/include/asm/mmu.h3
-rw-r--r--arch/arm64/include/asm/mmu_context.h53
-rw-r--r--arch/arm64/include/asm/neon.h3
-rw-r--r--arch/arm64/include/asm/opcodes.h5
-rw-r--r--arch/arm64/include/asm/percpu.h18
-rw-r--r--arch/arm64/include/asm/perf_event.h12
-rw-r--r--arch/arm64/include/asm/probes.h21
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/ptdump.h22
-rw-r--r--arch/arm64/include/asm/ptrace.h8
-rw-r--r--arch/arm64/include/asm/smp.h14
-rw-r--r--arch/arm64/include/asm/stack_pointer.h9
-rw-r--r--arch/arm64/include/asm/suspend.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h45
-rw-r--r--arch/arm64/include/asm/thread_info.h40
-rw-r--r--arch/arm64/include/asm/uaccess.h175
-rw-r--r--arch/arm64/include/asm/uprobes.h36
-rw-r--r--arch/arm64/include/asm/xen/hypercall.h2
-rw-r--r--arch/arm64/include/asm/xen/hypervisor.h2
-rw-r--r--arch/arm64/include/asm/xen/interface.h2
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h2
-rw-r--r--arch/arm64/include/asm/xen/page.h2
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c16
-rw-r--r--arch/arm64/kernel/asm-offsets.c13
-rw-r--r--arch/arm64/kernel/cpufeature.c18
-rw-r--r--arch/arm64/kernel/cpuinfo.c36
-rw-r--r--arch/arm64/kernel/debug-monitors.c40
-rw-r--r--arch/arm64/kernel/efi.c8
-rw-r--r--arch/arm64/kernel/entry.S110
-rw-r--r--arch/arm64/kernel/fpsimd.c14
-rw-r--r--arch/arm64/kernel/head.S30
-rw-r--r--arch/arm64/kernel/hibernate.c4
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c153
-rw-r--r--arch/arm64/kernel/insn.c1
-rw-r--r--arch/arm64/kernel/kgdb.c3
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kernel/probes/Makefile2
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c33
-rw-r--r--arch/arm64/kernel/probes/decode-insn.h8
-rw-r--r--arch/arm64/kernel/probes/kprobes.c36
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c16
-rw-r--r--arch/arm64/kernel/probes/uprobes.c216
-rw-r--r--arch/arm64/kernel/process.c38
-rw-r--r--arch/arm64/kernel/ptrace.c7
-rw-r--r--arch/arm64/kernel/return_address.c1
-rw-r--r--arch/arm64/kernel/setup.c9
-rw-r--r--arch/arm64/kernel/signal.c3
-rw-r--r--arch/arm64/kernel/sleep.S3
-rw-r--r--arch/arm64/kernel/smp.c14
-rw-r--r--arch/arm64/kernel/stacktrace.c7
-rw-r--r--arch/arm64/kernel/suspend.c6
-rw-r--r--arch/arm64/kernel/topology.c223
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S5
-rw-r--r--arch/arm64/kvm/Kconfig4
-rw-r--r--arch/arm64/kvm/handle_exit.c11
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S9
-rw-r--r--arch/arm64/kvm/hyp/switch.c13
-rw-r--r--arch/arm64/kvm/hyp/tlb.c15
-rw-r--r--arch/arm64/kvm/reset.c6
-rw-r--r--arch/arm64/kvm/sys_regs.c10
-rw-r--r--arch/arm64/lib/clear_user.S11
-rw-r--r--arch/arm64/lib/copy_from_user.S11
-rw-r--r--arch/arm64/lib/copy_in_user.S11
-rw-r--r--arch/arm64/lib/copy_to_user.S11
-rw-r--r--arch/arm64/mm/Makefile3
-rw-r--r--arch/arm64/mm/cache.S6
-rw-r--r--arch/arm64/mm/context.c7
-rw-r--r--arch/arm64/mm/dma-mapping.c5
-rw-r--r--arch/arm64/mm/dump.c106
-rw-r--r--arch/arm64/mm/fault.c22
-rw-r--r--arch/arm64/mm/flush.c9
-rw-r--r--arch/arm64/mm/hugetlbpage.c22
-rw-r--r--arch/arm64/mm/mmu.c137
-rw-r--r--arch/arm64/mm/proc.S12
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c31
-rw-r--r--arch/arm64/xen/hypercall.S15
-rw-r--r--arch/avr32/include/asm/mutex.h9
-rw-r--r--arch/avr32/include/asm/processor.h1
-rw-r--r--arch/avr32/include/uapi/asm/socket.h2
-rw-r--r--arch/avr32/include/uapi/asm/unistd.h3
-rw-r--r--arch/avr32/kernel/syscall_table.S3
-rw-r--r--arch/avr32/mach-at32ap/clock.c33
-rw-r--r--arch/avr32/mach-at32ap/pio.c6
-rw-r--r--arch/avr32/mm/dma-coherent.c7
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/processor.h1
-rw-r--r--arch/blackfin/kernel/dma-mapping.c8
-rw-r--r--arch/blackfin/kernel/ptrace.c4
-rw-r--r--arch/blackfin/mach-bf561/coreb.c10
-rw-r--r--arch/c6x/include/asm/mutex.h6
-rw-r--r--arch/c6x/include/asm/processor.h1
-rw-r--r--arch/c6x/kernel/dma.c14
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c2
-rw-r--r--arch/cris/boot/compressed/Makefile3
-rw-r--r--arch/cris/boot/rescue/Makefile9
-rw-r--r--arch/cris/include/asm/mutex.h9
-rw-r--r--arch/cris/include/asm/processor.h1
-rw-r--r--arch/frv/include/asm/mutex.h9
-rw-r--r--arch/frv/include/asm/processor.h1
-rw-r--r--arch/frv/include/uapi/asm/socket.h2
-rw-r--r--arch/frv/mb93090-mb00/pci-dma-nommu.c14
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c9
-rw-r--r--arch/h8300/include/asm/mutex.h9
-rw-r--r--arch/h8300/include/asm/processor.h1
-rw-r--r--arch/hexagon/include/asm/mutex.h8
-rw-r--r--arch/hexagon/include/asm/processor.h1
-rw-r--r--arch/hexagon/kernel/dma.c6
-rw-r--r--arch/ia64/include/asm/mutex.h90
-rw-r--r--arch/ia64/include/asm/processor.h1
-rw-r--r--arch/ia64/include/asm/tlb.h25
-rw-r--r--arch/ia64/include/uapi/asm/socket.h2
-rw-r--r--arch/ia64/kernel/err_inject.c74
-rw-r--r--arch/ia64/kernel/palinfo.c60
-rw-r--r--arch/ia64/kernel/ptrace.c2
-rw-r--r--arch/ia64/kernel/salinfo.c83
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/ia64/kernel/topology.c54
-rw-r--r--arch/m32r/Kconfig2
-rw-r--r--arch/m32r/include/asm/device.h6
-rw-r--r--arch/m32r/include/asm/dma-mapping.h32
-rw-r--r--arch/m32r/include/asm/mutex.h9
-rw-r--r--arch/m32r/include/asm/processor.h1
-rw-r--r--arch/m32r/include/uapi/asm/socket.h2
-rw-r--r--arch/m32r/platforms/m32700ut/setup.c2
-rw-r--r--arch/m68k/Kconfig.machine6
-rw-r--r--arch/m68k/amiga/config.c3
-rw-r--r--arch/m68k/atari/config.c2
-rw-r--r--arch/m68k/coldfire/Makefile1
-rw-r--r--arch/m68k/coldfire/amcore.c156
-rw-r--r--arch/m68k/coldfire/device.c159
-rw-r--r--arch/m68k/coldfire/m5206.c12
-rw-r--r--arch/m68k/coldfire/m520x.c24
-rw-r--r--arch/m68k/coldfire/m523x.c18
-rw-r--r--arch/m68k/coldfire/m5249.c25
-rw-r--r--arch/m68k/coldfire/m525x.c10
-rw-r--r--arch/m68k/coldfire/m527x.c28
-rw-r--r--arch/m68k/coldfire/m528x.c18
-rw-r--r--arch/m68k/coldfire/m5307.c14
-rw-r--r--arch/m68k/coldfire/m53xx.c20
-rw-r--r--arch/m68k/coldfire/m5407.c14
-rw-r--r--arch/m68k/coldfire/m5441x.c12
-rw-r--r--arch/m68k/coldfire/m54xx.c17
-rw-r--r--arch/m68k/configs/amcore_defconfig118
-rw-r--r--arch/m68k/configs/amiga_defconfig8
-rw-r--r--arch/m68k/configs/apollo_defconfig8
-rw-r--r--arch/m68k/configs/atari_defconfig8
-rw-r--r--arch/m68k/configs/bvme6000_defconfig8
-rw-r--r--arch/m68k/configs/hp300_defconfig8
-rw-r--r--arch/m68k/configs/mac_defconfig8
-rw-r--r--arch/m68k/configs/multi_defconfig8
-rw-r--r--arch/m68k/configs/mvme147_defconfig8
-rw-r--r--arch/m68k/configs/mvme16x_defconfig8
-rw-r--r--arch/m68k/configs/q40_defconfig8
-rw-r--r--arch/m68k/configs/sun3_defconfig8
-rw-r--r--arch/m68k/configs/sun3x_defconfig8
-rw-r--r--arch/m68k/emu/nfeth.c1
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/delay.h2
-rw-r--r--arch/m68k/include/asm/m5206sim.h8
-rw-r--r--arch/m68k/include/asm/m520xsim.h8
-rw-r--r--arch/m68k/include/asm/m523xsim.h10
-rw-r--r--arch/m68k/include/asm/m527xsim.h8
-rw-r--r--arch/m68k/include/asm/m528xsim.h9
-rw-r--r--arch/m68k/include/asm/m5307sim.h9
-rw-r--r--arch/m68k/include/asm/m53xxsim.h8
-rw-r--r--arch/m68k/include/asm/m5407sim.h8
-rw-r--r--arch/m68k/include/asm/m54xxsim.h11
-rw-r--r--arch/m68k/include/asm/processor.h1
-rw-r--r--arch/m68k/kernel/dma.c8
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/processor.h1
-rw-r--r--arch/metag/kernel/dma.c16
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/irq.h2
-rw-r--r--arch/microblaze/include/asm/mutex.h1
-rw-r--r--arch/microblaze/include/asm/processor.h1
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/dma.c10
-rw-r--r--arch/microblaze/kernel/intc.c196
-rw-r--r--arch/microblaze/kernel/irq.c4
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/boot/dts/mti/malta.dts3
-rw-r--r--arch/mips/generic/init.c16
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/fpu_emulator.h13
-rw-r--r--arch/mips/include/asm/kvm_host.h7
-rw-r--r--arch/mips/include/asm/mipsregs.h6
-rw-r--r--arch/mips/include/asm/processor.h1
-rw-r--r--arch/mips/include/asm/switch_to.h18
-rw-r--r--arch/mips/include/asm/tlb.h13
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/mips-cpc.c11
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c10
-rw-r--r--arch/mips/kernel/ptrace.c8
-rw-r--r--arch/mips/kernel/ptrace32.c4
-rw-r--r--arch/mips/kernel/r2300_fpu.S138
-rw-r--r--arch/mips/kernel/r6000_fpu.S89
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/time.c2
-rw-r--r--arch/mips/kernel/traps.c137
-rw-r--r--arch/mips/kvm/emulate.c32
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/kvm/mmu.c4
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c2
-rw-r--r--arch/mips/lib/dump_tlb.c44
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c18
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c2
-rw-r--r--arch/mips/mm/dma-default.c8
-rw-r--r--arch/mips/mm/fault.c9
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c6
-rw-r--r--arch/mn10300/include/asm/mutex.h16
-rw-r--r--arch/mn10300/include/asm/processor.h1
-rw-r--r--arch/mn10300/include/uapi/asm/socket.h2
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/smc91111.h2
-rw-r--r--arch/nios2/include/asm/mutex.h1
-rw-r--r--arch/nios2/include/asm/processor.h1
-rw-r--r--arch/nios2/kernel/time.c1
-rw-r--r--arch/nios2/mm/dma-mapping.c26
-rw-r--r--arch/openrisc/Kconfig4
-rw-r--r--arch/openrisc/README.openrisc8
-rw-r--r--arch/openrisc/TODO.openrisc3
-rw-r--r--arch/openrisc/include/asm/cache.h2
-rw-r--r--arch/openrisc/include/asm/mutex.h27
-rw-r--r--arch/openrisc/include/asm/pgalloc.h1
-rw-r--r--arch/openrisc/include/asm/pgtable.h2
-rw-r--r--arch/openrisc/include/asm/processor.h1
-rw-r--r--arch/openrisc/kernel/dma.c3
-rw-r--r--arch/openrisc/kernel/entry.S12
-rw-r--r--arch/openrisc/kernel/process.c13
-rw-r--r--arch/openrisc/kernel/setup.c48
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S8
-rw-r--r--arch/openrisc/mm/init.c4
-rw-r--r--arch/openrisc/mm/ioremap.c4
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/pgtable.h8
-rw-r--r--arch/parisc/include/asm/processor.h1
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/cache.c40
-rw-r--r--arch/parisc/kernel/drivers.c6
-rw-r--r--arch/parisc/kernel/inventory.c8
-rw-r--r--arch/parisc/kernel/pacache.S49
-rw-r--r--arch/parisc/kernel/pci-dma.c22
-rw-r--r--arch/parisc/kernel/setup.c4
-rw-r--r--arch/parisc/kernel/syscall.S66
-rw-r--r--arch/parisc/kernel/time.c57
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/boot/Makefile3
-rw-r--r--arch/powerpc/boot/main.c8
-rw-r--r--arch/powerpc/boot/opal-calls.S13
-rw-r--r--arch/powerpc/boot/opal.c13
-rw-r--r--arch/powerpc/boot/ops.h1
-rw-r--r--arch/powerpc/configs/dpaa.config3
-rw-r--r--arch/powerpc/crypto/Makefile2
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h12
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h47
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h13
-rw-r--r--arch/powerpc/include/asm/checksum.h12
-rw-r--r--arch/powerpc/include/asm/cputime.h14
-rw-r--r--arch/powerpc/include/asm/exception-64s.h15
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h27
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h49
-rw-r--r--arch/powerpc/include/asm/mmu.h19
-rw-r--r--arch/powerpc/include/asm/mutex.h132
-rw-r--r--arch/powerpc/include/asm/opal.h3
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/reg.h15
-rw-r--r--arch/powerpc/include/asm/spinlock.h8
-rw-r--r--arch/powerpc/include/asm/tlb.h16
-rw-r--r--arch/powerpc/include/asm/xilinx_intc.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h5
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S10
-rw-r--r--arch/powerpc/kernel/dma.c9
-rw-r--r--arch/powerpc/kernel/eeh_driver.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S11
-rw-r--r--arch/powerpc/kernel/process.c42
-rw-r--r--arch/powerpc/kernel/ptrace32.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c20
-rw-r--r--arch/powerpc/kernel/sysfs.c50
-rw-r--r--arch/powerpc/kernel/time.c8
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c63
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c302
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c72
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c223
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c23
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S140
-rw-r--r--arch/powerpc/kvm/powerpc.c16
-rw-r--r--arch/powerpc/kvm/trace_hv.h2
-rw-r--r--arch/powerpc/mm/hash64_4k.c2
-rw-r--r--arch/powerpc/mm/hash64_64k.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c30
-rw-r--r--arch/powerpc/mm/hash_utils_64.c40
-rw-r--r--arch/powerpc/mm/numa.c13
-rw-r--r--arch/powerpc/mm/pgtable-radix.c22
-rw-r--r--arch/powerpc/mm/pgtable_64.c34
-rw-r--r--arch/powerpc/mm/tlb-radix.c4
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c4
-rw-r--r--arch/powerpc/platforms/40x/Kconfig1
-rw-r--r--arch/powerpc/platforms/40x/virtex.c2
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/virtex.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/ps3/htab.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c211
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/boot/compressed/Makefile2
-rw-r--r--arch/s390/boot/compressed/head.S2
-rw-r--r--arch/s390/configs/default_defconfig6
-rw-r--r--arch/s390/configs/gcov_defconfig4
-rw-r--r--arch/s390/configs/performance_defconfig4
-rw-r--r--arch/s390/crypto/prng.c6
-rw-r--r--arch/s390/hypfs/hypfs_diag.c6
-rw-r--r--arch/s390/hypfs/inode.c24
-rw-r--r--arch/s390/include/asm/Kbuild4
-rw-r--r--arch/s390/include/asm/asm-offsets.h1
-rw-r--r--arch/s390/include/asm/atomic.h207
-rw-r--r--arch/s390/include/asm/atomic_ops.h130
-rw-r--r--arch/s390/include/asm/bitops.h62
-rw-r--r--arch/s390/include/asm/cpu_mf.h3
-rw-r--r--arch/s390/include/asm/elf.h6
-rw-r--r--arch/s390/include/asm/facilities_src.h82
-rw-r--r--arch/s390/include/asm/ipl.h2
-rw-r--r--arch/s390/include/asm/lowcore.h5
-rw-r--r--arch/s390/include/asm/mutex.h9
-rw-r--r--arch/s390/include/asm/pci_clp.h5
-rw-r--r--arch/s390/include/asm/pgalloc.h22
-rw-r--r--arch/s390/include/asm/preempt.h137
-rw-r--r--arch/s390/include/asm/processor.h11
-rw-r--r--arch/s390/include/asm/sclp.h10
-rw-r--r--arch/s390/include/asm/scsw.h6
-rw-r--r--arch/s390/include/asm/smp.h8
-rw-r--r--arch/s390/include/asm/spinlock.h8
-rw-r--r--arch/s390/include/asm/string.h3
-rw-r--r--arch/s390/include/asm/sysinfo.h7
-rw-r--r--arch/s390/include/asm/thread_info.h24
-rw-r--r--arch/s390/include/asm/timex.h41
-rw-r--r--arch/s390/include/asm/tlb.h14
-rw-r--r--arch/s390/include/asm/topology.h28
-rw-r--r--arch/s390/include/asm/uaccess.h6
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/include/uapi/asm/Kbuild5
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
-rw-r--r--arch/s390/kernel/Makefile65
-rw-r--r--arch/s390/kernel/asm-offsets.c17
-rw-r--r--arch/s390/kernel/compat_signal.c4
-rw-r--r--arch/s390/kernel/early.c50
-rw-r--r--arch/s390/kernel/entry.S51
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/lgr.c5
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c53
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kernel/ptrace.c14
-rw-r--r--arch/s390/kernel/setup.c22
-rw-r--r--arch/s390/kernel/signal.c14
-rw-r--r--arch/s390/kernel/smp.c77
-rw-r--r--arch/s390/kernel/swsusp.S2
-rw-r--r--arch/s390/kernel/sysinfo.c33
-rw-r--r--arch/s390/kernel/time.c191
-rw-r--r--arch/s390/kernel/topology.c53
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S23
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S23
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S11
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S11
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kernel/vtime.c35
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c57
-rw-r--r--arch/s390/kvm/sthyi.c4
-rw-r--r--arch/s390/lib/mem.S39
-rw-r--r--arch/s390/lib/spinlock.c25
-rw-r--r--arch/s390/mm/fault.c1
-rw-r--r--arch/s390/mm/gmap.c2
-rw-r--r--arch/s390/mm/vmem.c9
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/s390/numa/mode_emu.c38
-rw-r--r--arch/s390/numa/toptree.c16
-rw-r--r--arch/s390/pci/pci.c8
-rw-r--r--arch/s390/pci/pci_clp.c3
-rw-r--r--arch/s390/pci/pci_debug.c2
-rw-r--r--arch/s390/pci/pci_dma.c38
-rw-r--r--arch/s390/tools/Makefile2
-rw-r--r--arch/s390/tools/gen_facilities.c76
-rw-r--r--arch/score/include/asm/mutex.h6
-rw-r--r--arch/score/include/asm/processor.h1
-rw-r--r--arch/sh/include/asm/mutex-llsc.h109
-rw-r--r--arch/sh/include/asm/mutex.h12
-rw-r--r--arch/sh/include/asm/processor.h1
-rw-r--r--arch/sh/include/asm/tlb.h15
-rw-r--r--arch/sh/kernel/cpu/Makefile2
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile2
-rw-r--r--arch/sh/kernel/dma-nommu.c7
-rw-r--r--arch/sparc/Kconfig26
-rw-r--r--arch/sparc/configs/sparc64_defconfig1
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/cpudata_64.h5
-rw-r--r--arch/sparc/include/asm/hypervisor.h343
-rw-r--r--arch/sparc/include/asm/iommu_64.h28
-rw-r--r--arch/sparc/include/asm/kdebug_64.h2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h2
-rw-r--r--arch/sparc/include/asm/processor_32.h1
-rw-r--r--arch/sparc/include/asm/processor_64.h1
-rw-r--r--arch/sparc/include/asm/ptrace.h35
-rw-r--r--arch/sparc/include/asm/spinlock_32.h2
-rw-r--r--arch/sparc/include/asm/spinlock_64.h12
-rw-r--r--arch/sparc/include/asm/thread_info_64.h6
-rw-r--r--arch/sparc/include/asm/topology_64.h9
-rw-r--r--arch/sparc/include/asm/ttable.h6
-rw-r--r--arch/sparc/include/asm/uaccess_64.h28
-rw-r--r--arch/sparc/include/asm/uprobes.h59
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/head_64.S37
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/iommu.c12
-rw-r--r--arch/sparc/kernel/iommu_common.h1
-rw-r--r--arch/sparc/kernel/ioport.c4
-rw-r--r--arch/sparc/kernel/jump_label.c23
-rw-r--r--arch/sparc/kernel/leon_kernel.c56
-rw-r--r--arch/sparc/kernel/mdesc.c46
-rw-r--r--arch/sparc/kernel/nmi.c44
-rw-r--r--arch/sparc/kernel/pci_sun4v.c421
-rw-r--r--arch/sparc/kernel/pci_sun4v.h21
-rw-r--r--arch/sparc/kernel/pci_sun4v_asm.S68
-rw-r--r--arch/sparc/kernel/power.c7
-rw-r--r--arch/sparc/kernel/ptrace_64.c54
-rw-r--r--arch/sparc/kernel/signal_32.c4
-rw-r--r--arch/sparc/kernel/signal_64.c2
-rw-r--r--arch/sparc/kernel/smp_64.c8
-rw-r--r--arch/sparc/kernel/sysfs.c45
-rw-r--r--arch/sparc/kernel/traps_64.c4
-rw-r--r--arch/sparc/kernel/ttable_64.S2
-rw-r--r--arch/sparc/kernel/uprobes.c331
-rw-r--r--arch/sparc/lib/GENcopy_from_user.S4
-rw-r--r--arch/sparc/lib/GENcopy_to_user.S4
-rw-r--r--arch/sparc/lib/GENmemcpy.S48
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG2memcpy.S228
-rw-r--r--arch/sparc/lib/NG4copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG4copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG4memcpy.S294
-rw-r--r--arch/sparc/lib/NGcopy_from_user.S4
-rw-r--r--arch/sparc/lib/NGcopy_to_user.S4
-rw-r--r--arch/sparc/lib/NGmemcpy.S233
-rw-r--r--arch/sparc/lib/U1copy_from_user.S8
-rw-r--r--arch/sparc/lib/U1copy_to_user.S8
-rw-r--r--arch/sparc/lib/U1memcpy.S345
-rw-r--r--arch/sparc/lib/U3copy_from_user.S8
-rw-r--r--arch/sparc/lib/U3copy_to_user.S8
-rw-r--r--arch/sparc/lib/U3memcpy.S227
-rw-r--r--arch/sparc/lib/copy_in_user.S35
-rw-r--r--arch/sparc/lib/user_fixup.c71
-rw-r--r--arch/sparc/mm/init_64.c71
-rw-r--r--arch/sparc/mm/tsb.c17
-rw-r--r--arch/sparc/mm/ultra.S374
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/cache.h3
-rw-r--r--arch/tile/include/asm/processor.h2
-rw-r--r--arch/tile/kernel/pci-dma.c12
-rw-r--r--arch/tile/kernel/time.c4
-rw-r--r--arch/um/drivers/net_kern.c8
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/tlb.h15
-rw-r--r--arch/unicore32/include/asm/mutex.h20
-rw-r--r--arch/unicore32/include/asm/processor.h1
-rw-r--r--arch/x86/Kconfig93
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile7
-rw-r--r--arch/x86/boot/compressed/eboot.c65
-rw-r--r--arch/x86/boot/compressed/head_64.S3
-rw-r--r--arch/x86/boot/cpu.c6
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c709
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c22
-rw-r--r--arch/x86/crypto/fpu.c207
-rw-r--r--arch/x86/crypto/glue_helper.c74
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c2
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_ctx.h2
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c2
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_ctx.h2
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c2
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_ctx.h2
-rw-r--r--arch/x86/entry/calling.h33
-rw-r--r--arch/x86/entry/entry_32.S141
-rw-r--r--arch/x86/entry/entry_64.S16
-rw-r--r--arch/x86/entry/vdso/vma.c14
-rw-r--r--arch/x86/events/amd/core.c8
-rw-r--r--arch/x86/events/core.c22
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/cstate.c1
-rw-r--r--arch/x86/events/intel/ds.c35
-rw-r--r--arch/x86/events/intel/pt.c45
-rw-r--r--arch/x86/events/intel/uncore.c8
-rw-r--r--arch/x86/events/intel/uncore_snb.c44
-rw-r--r--arch/x86/events/perf_event.h4
-rw-r--r--arch/x86/include/asm/Kbuild4
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/amd_nb.h23
-rw-r--r--arch/x86/include/asm/apic.h5
-rw-r--r--arch/x86/include/asm/compat.h4
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/cpufeatures.h7
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h39
-rw-r--r--arch/x86/include/asm/e820.h12
-rw-r--r--arch/x86/include/asm/efi.h16
-rw-r--r--arch/x86/include/asm/fpu/api.h10
-rw-r--r--arch/x86/include/asm/fpu/internal.h139
-rw-r--r--arch/x86/include/asm/fpu/types.h34
-rw-r--r--arch/x86/include/asm/fpu/xstate.h17
-rw-r--r--arch/x86/include/asm/idle.h22
-rw-r--r--arch/x86/include/asm/intel-mid.h1
-rw-r--r--arch/x86/include/asm/kdebug.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h21
-rw-r--r--arch/x86/include/asm/kvm_page_track.h14
-rw-r--r--arch/x86/include/asm/lguest_hcall.h1
-rw-r--r--arch/x86/include/asm/mce.h34
-rw-r--r--arch/x86/include/asm/microcode.h18
-rw-r--r--arch/x86/include/asm/microcode_amd.h30
-rw-r--r--arch/x86/include/asm/microcode_intel.h4
-rw-r--r--arch/x86/include/asm/mmu_context.h2
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/msr.h46
-rw-r--r--arch/x86/include/asm/mutex.h5
-rw-r--r--arch/x86/include/asm/mutex_32.h110
-rw-r--r--arch/x86/include/asm/mutex_64.h127
-rw-r--r--arch/x86/include/asm/paravirt.h10
-rw-r--r--arch/x86/include/asm/paravirt_types.h18
-rw-r--r--arch/x86/include/asm/percpu.h11
-rw-r--r--arch/x86/include/asm/preempt.h8
-rw-r--r--arch/x86/include/asm/processor.h19
-rw-r--r--arch/x86/include/asm/qspinlock.h6
-rw-r--r--arch/x86/include/asm/special_insns.h13
-rw-r--r--arch/x86/include/asm/stacktrace.h8
-rw-r--r--arch/x86/include/asm/topology.h32
-rw-r--r--arch/x86/include/asm/trace/fpu.h5
-rw-r--r--arch/x86/include/asm/uaccess.h13
-rw-r--r--arch/x86/include/asm/unwind.h16
-rw-r--r--arch/x86/include/asm/vgtod.h7
-rw-r--r--arch/x86/include/asm/vmx.h37
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h4
-rw-r--r--arch/x86/include/uapi/asm/mce.h1
-rw-r--r--arch/x86/include/uapi/asm/prctl.h8
-rw-r--r--arch/x86/include/uapi/asm/vmx.h5
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/apei.c3
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S9
-rw-r--r--arch/x86/kernel/amd_nb.c164
-rw-r--r--arch/x86/kernel/apic/apic.c4
-rw-r--r--arch/x86/kernel/apic/io_apic.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/apm_32.c9
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/amd.c55
-rw-r--r--arch/x86/kernel/cpu/bugs.c26
-rw-r--r--arch/x86/kernel/cpu/bugs_64.c33
-rw-r--r--arch/x86/kernel/cpu/common.c93
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c202
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c357
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c41
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c57
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/Makefile2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c421
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c83
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c834
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_lib.c184
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c1
-rw-r--r--arch/x86/kernel/cpu/scattered.c57
-rw-r--r--arch/x86/kernel/cpu/vmware.c86
-rw-r--r--arch/x86/kernel/cpuid.c73
-rw-r--r--arch/x86/kernel/dumpstack.c70
-rw-r--r--arch/x86/kernel/dumpstack_32.c56
-rw-r--r--arch/x86/kernel/dumpstack_64.c79
-rw-r--r--arch/x86/kernel/fpu/bugs.c7
-rw-r--r--arch/x86/kernel/fpu/core.c90
-rw-r--r--arch/x86/kernel/fpu/init.c107
-rw-r--r--arch/x86/kernel/fpu/signal.c8
-rw-r--r--arch/x86/kernel/fpu/xstate.c11
-rw-r--r--arch/x86/kernel/head_32.S58
-rw-r--r--arch/x86/kernel/head_64.S60
-rw-r--r--arch/x86/kernel/irq.c1
-rw-r--r--arch/x86/kernel/irq_64.c1
-rw-r--r--arch/x86/kernel/itmt.c215
-rw-r--r--arch/x86/kernel/kvm.c20
-rw-r--r--arch/x86/kernel/ldt.c14
-rw-r--r--arch/x86/kernel/machine_kexec_64.c6
-rw-r--r--arch/x86/kernel/msr.c69
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c14
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c14
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c14
-rw-r--r--arch/x86/kernel/process.c149
-rw-r--r--arch/x86/kernel/process_32.c13
-rw-r--r--arch/x86/kernel/process_64.c19
-rw-r--r--arch/x86/kernel/rtc.c9
-rw-r--r--arch/x86/kernel/setup.c24
-rw-r--r--arch/x86/kernel/setup_percpu.c3
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/smpboot.c57
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c39
-rw-r--r--arch/x86/kernel/traps.c20
-rw-r--r--arch/x86/kernel/unwind_frame.c161
-rw-r--r--arch/x86/kernel/unwind_guess.c13
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/kvm/cpuid.c30
-rw-r--r--arch/x86/kvm/emulate.c238
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/i8254.c15
-rw-r--r--arch/x86/kvm/i8254.h3
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/ioapic.h4
-rw-r--r--arch/x86/kvm/irq_comm.c71
-rw-r--r--arch/x86/kvm/lapic.c214
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.c43
-rw-r--r--arch/x86/kvm/page_track.c31
-rw-r--r--arch/x86/kvm/svm.c44
-rw-r--r--arch/x86/kvm/vmx.c1169
-rw-r--r--arch/x86/kvm/x86.c190
-rw-r--r--arch/x86/lguest/boot.c29
-rw-r--r--arch/x86/lib/copy_user_64.S47
-rw-r--r--arch/x86/lib/msr.c4
-rw-r--r--arch/x86/lib/usercopy.c49
-rw-r--r--arch/x86/lib/usercopy_32.c49
-rw-r--r--arch/x86/mm/extable.c7
-rw-r--r--arch/x86/mm/fault.c3
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/mm/pkeys.c3
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/oprofile/nmi_int.c71
-rw-r--r--arch/x86/pci/amd_bus.c34
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--arch/x86/platform/ce4100/ce4100.c2
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c80
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_wdt.c)34
-rw-r--r--arch/x86/platform/intel-mid/pwr.c20
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c1
-rw-r--r--arch/x86/platform/uv/uv_nmi.c4
-rw-r--r--arch/x86/power/hibernate_64.c94
-rw-r--r--arch/x86/purgatory/Makefile1
-rw-r--r--arch/x86/ras/mce_amd_inj.c2
-rw-r--r--arch/x86/realmode/rm/Makefile2
-rw-r--r--arch/x86/tools/insn_sanity.c3
-rw-r--r--arch/x86/tools/relocs.h2
-rw-r--r--arch/x86/tools/test_get_len.c2
-rw-r--r--arch/x86/um/asm/processor.h1
-rw-r--r--arch/x86/xen/enlighten.c13
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c1
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/xtensa/include/asm/mutex.h9
-rw-r--r--arch/xtensa/include/asm/processor.h1
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h2
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h9
-rw-r--r--arch/xtensa/kernel/pci-dma.c7
-rw-r--r--arch/xtensa/kernel/time.c14
-rw-r--r--arch/xtensa/kernel/traps.c74
-rw-r--r--block/Kconfig35
-rw-r--r--block/Makefile4
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/bio.c68
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-core.c265
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c27
-rw-r--r--block/blk-lib.c177
-rw-r--r--block/blk-map.c10
-rw-r--r--block/blk-merge.c89
-rw-r--r--block/blk-mq-cpumap.c1
-rw-r--r--block/blk-mq-sysfs.c47
-rw-r--r--block/blk-mq.c575
-rw-r--r--block/blk-mq.h10
-rw-r--r--block/blk-settings.c39
-rw-r--r--block/blk-stat.c256
-rw-r--r--block/blk-stat.h42
-rw-r--r--block/blk-sysfs.c191
-rw-r--r--block/blk-tag.c6
-rw-r--r--block/blk-throttle.c10
-rw-r--r--block/blk-wbt.c750
-rw-r--r--block/blk-wbt.h171
-rw-r--r--block/blk-zoned.c348
-rw-r--r--block/blk.h5
-rw-r--r--block/bsg-lib.c25
-rw-r--r--block/bsg.c4
-rw-r--r--block/cfq-iosched.c109
-rw-r--r--block/elevator.c44
-rw-r--r--block/ioctl.c4
-rw-r--r--block/partition-generic.c65
-rw-r--r--crypto/842.c81
-rw-r--r--crypto/Kconfig32
-rw-r--r--crypto/Makefile7
-rw-r--r--crypto/acompress.c169
-rw-r--r--crypto/algboss.c8
-rw-r--r--crypto/algif_aead.c73
-rw-r--r--crypto/algif_hash.c17
-rw-r--r--crypto/algif_skcipher.c22
-rw-r--r--crypto/api.c22
-rw-r--r--crypto/asymmetric_keys/public_key.c1
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c1
-rw-r--r--crypto/authenc.c8
-rw-r--r--crypto/authencesn.c8
-rw-r--r--crypto/cbc.c269
-rw-r--r--crypto/ccm.c8
-rw-r--r--crypto/chacha20poly1305.c8
-rw-r--r--crypto/cipher.c4
-rw-r--r--crypto/cmac.c14
-rw-r--r--crypto/compress.c4
-rw-r--r--crypto/cryptd.c286
-rw-r--r--crypto/crypto_engine.c26
-rw-r--r--crypto/crypto_user.c19
-rw-r--r--crypto/ctr.c8
-rw-r--r--crypto/cts.c8
-rw-r--r--crypto/deflate.c111
-rw-r--r--crypto/dh.c2
-rw-r--r--crypto/drbg.c30
-rw-r--r--crypto/gcm.c10
-rw-r--r--crypto/gf128mul.c59
-rw-r--r--crypto/internal.h3
-rw-r--r--crypto/jitterentropy-kcapi.c1
-rw-r--r--crypto/lrw.c507
-rw-r--r--crypto/lz4.c91
-rw-r--r--crypto/lz4hc.c92
-rw-r--r--crypto/lzo.c97
-rw-r--r--crypto/mcryptd.c19
-rw-r--r--crypto/pcbc.c201
-rw-r--r--crypto/poly1305_generic.c34
-rw-r--r--crypto/scatterwalk.c4
-rw-r--r--crypto/scompress.c356
-rw-r--r--crypto/simd.c226
-rw-r--r--crypto/skcipher.c542
-rw-r--r--crypto/testmgr.c318
-rw-r--r--crypto/testmgr.h70
-rw-r--r--crypto/xts.c547
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig4
-rw-r--r--drivers/acpi/acpi_apd.c17
-rw-r--r--drivers/acpi/acpi_lpss.c18
-rw-r--r--drivers/acpi/acpi_platform.c5
-rw-r--r--drivers/acpi/acpi_video.c11
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h19
-rw-r--r--drivers/acpi/acpica/acnamesp.h3
-rw-r--r--drivers/acpi/acpica/actables.h5
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h1
-rw-r--r--drivers/acpi/acpica/dsinit.c4
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c13
-rw-r--r--drivers/acpi/acpica/evrgnini.c59
-rw-r--r--drivers/acpi/acpica/exconfig.c42
-rw-r--r--drivers/acpi/acpica/nsnames.c45
-rw-r--r--drivers/acpi/acpica/nsxfname.c43
-rw-r--r--drivers/acpi/acpica/tbdata.c81
-rw-r--r--drivers/acpi/acpica/tbfadt.c10
-rw-r--r--drivers/acpi/acpica/tbxface.c16
-rw-r--r--drivers/acpi/acpica/tbxfload.c44
-rw-r--r--drivers/acpi/acpica/utdecode.c49
-rw-r--r--drivers/acpi/apei/ghes.c7
-rw-r--r--drivers/acpi/apei/hest.c13
-rw-r--r--drivers/acpi/battery.c72
-rw-r--r--drivers/acpi/blacklist.c28
-rw-r--r--drivers/acpi/bus.c10
-rw-r--r--drivers/acpi/cppc_acpi.c16
-rw-r--r--drivers/acpi/device_sysfs.c8
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c4
-rw-r--r--drivers/acpi/event.c6
-rw-r--r--drivers/acpi/nfit/core.c55
-rw-r--r--drivers/acpi/nfit/nfit.h2
-rw-r--r--drivers/acpi/osl.c19
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/processor_perflib.c55
-rw-r--r--drivers/acpi/property.c125
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c37
-rw-r--r--drivers/acpi/video_detect.c20
-rw-r--r--drivers/ata/ahci.c46
-rw-r--r--drivers/ata/ahci_qoriq.c16
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libata-core.c44
-rw-r--r--drivers/ata/libata-scsi.c87
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_imx.c82
-rw-r--r--drivers/ata/sata_mv.c15
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/lanai.c1
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/auxdisplay/Kconfig13
-rw-r--r--drivers/auxdisplay/Makefile1
-rw-r--r--drivers/auxdisplay/ht16k33.c563
-rw-r--r--drivers/base/Kconfig9
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/base.h15
-rw-r--r--drivers/base/cacheinfo.c195
-rw-r--r--drivers/base/class.c15
-rw-r--r--drivers/base/core.c578
-rw-r--r--drivers/base/dd.c84
-rw-r--r--drivers/base/devcoredump.c10
-rw-r--r--drivers/base/devres.c66
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/firmware_class.c178
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/domain.c363
-rw-r--r--drivers/base/power/main.c93
-rw-r--r--drivers/base/power/opp/core.c521
-rw-r--r--drivers/base/power/opp/debugfs.c52
-rw-r--r--drivers/base/power/opp/of.c111
-rw-r--r--drivers/base/power/opp/opp.h23
-rw-r--r--drivers/base/power/power.h29
-rw-r--r--drivers/base/power/qos.c6
-rw-r--r--drivers/base/power/runtime.c236
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c27
-rw-r--r--drivers/base/power/wakeirq.c76
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/base/regmap/regcache-lzo.c8
-rw-r--r--drivers/base/soc.c79
-rw-r--r--drivers/base/test/Kconfig9
-rw-r--r--drivers/base/test/Makefile1
-rw-r--r--drivers/base/test/test_async_driver_probe.c169
-rw-r--r--drivers/base/topology.c42
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/block/Kconfig5
-rw-r--r--drivers/block/aoe/aoecmd.c41
-rw-r--r--drivers/block/brd.c39
-rw-r--r--drivers/block/cciss_scsi.c72
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c16
-rw-r--r--drivers/block/floppy.c10
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c18
-rw-r--r--drivers/block/nbd.c443
-rw-r--r--drivers/block/null_blk.c1
-rw-r--r--drivers/block/pktcdvd.c49
-rw-r--r--drivers/block/skd_main.c238
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/block/xen-blkback/blkback.c10
-rw-r--r--drivers/block/xen-blkback/xenbus.c36
-rw-r--r--drivers/block/xen-blkfront.c84
-rw-r--r--drivers/block/zram/zcomp.c76
-rw-r--r--drivers/block/zram/zcomp.h5
-rw-r--r--drivers/block/zram/zram_drv.c20
-rw-r--r--drivers/bluetooth/btmrvl_drv.h1
-rw-r--r--drivers/bluetooth/btwilink.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/bluetooth/hci_bcsp.c4
-rw-r--r--drivers/bluetooth/hci_h5.c4
-rw-r--r--drivers/bluetooth/hci_qca.c9
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/agp/alpha-agp.c3
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c26
-rw-r--r--drivers/char/hw_random/core.c3
-rw-r--r--drivers/char/hw_random/meson-rng.c2
-rw-r--r--drivers/char/hw_random/msm-rng.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c162
-rw-r--r--drivers/char/hw_random/pic32-rng.c3
-rw-r--r--drivers/char/hw_random/pseries-rng.c5
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/char/ipmi/bt-bmc.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c10
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c190
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c37
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/pcmcia/Kconfig11
-rw-r--r--drivers/char/pcmcia/Makefile1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c4
-rw-r--r--drivers/char/pcmcia/scr24x_cs.c373
-rw-r--r--drivers/char/pcmcia/synclink_cs.c1
-rw-r--r--drivers/char/ppdev.c39
-rw-r--r--drivers/char/snsc.c2
-rw-r--r--drivers/char/tile-srom.c3
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/Makefile14
-rw-r--r--drivers/char/tpm/tpm-chip.c42
-rw-r--r--drivers/char/tpm/tpm-interface.c113
-rw-r--r--drivers/char/tpm/tpm-sysfs.c7
-rw-r--r--drivers/char/tpm/tpm.h41
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_acpi.c46
-rw-r--r--drivers/char/tpm/tpm_crb.c173
-rw-r--r--drivers/char/tpm/tpm_eventlog.c230
-rw-r--r--drivers/char/tpm/tpm_eventlog.h22
-rw-r--r--drivers/char/tpm/tpm_of.c48
-rw-r--r--drivers/char/tpm/tpm_tis.c11
-rw-r--r--drivers/char/tpm/tpm_tis_core.c64
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c85
-rw-r--r--drivers/char/tpm/xen-tpmfront.c9
-rw-r--r--drivers/char/virtio_console.c22
-rw-r--r--drivers/clk/Kconfig4
-rw-r--r--drivers/clk/bcm/Kconfig16
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c80
-rw-r--r--drivers/clk/berlin/bg2.c2
-rw-r--r--drivers/clk/berlin/bg2q.c2
-rw-r--r--drivers/clk/clk-cdce925.c2
-rw-r--r--drivers/clk/clk-devres.c21
-rw-r--r--drivers/clk/clk-efm32gg.c2
-rw-r--r--drivers/clk/clk-gate.c4
-rw-r--r--drivers/clk/clk-oxnas.c232
-rw-r--r--drivers/clk/clk-qoriq.c73
-rw-r--r--drivers/clk/clk-stm32f4.c435
-rw-r--r--drivers/clk/clk-wm831x.c2
-rw-r--r--drivers/clk/clk-xgene.c10
-rw-r--r--drivers/clk/hisilicon/Kconfig17
-rw-r--r--drivers/clk/hisilicon/Makefile2
-rw-r--r--drivers/clk/hisilicon/crg-hi3516cv300.c330
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c337
-rw-r--r--drivers/clk/hisilicon/crg.h34
-rw-r--r--drivers/clk/imx/clk-imx6q.c283
-rw-r--r--drivers/clk/imx/clk-imx6ul.c72
-rw-r--r--drivers/clk/imx/clk-pllv3.c16
-rw-r--r--drivers/clk/imx/clk.h8
-rw-r--r--drivers/clk/keystone/pll.c13
-rw-r--r--drivers/clk/mediatek/Kconfig43
-rw-r--r--drivers/clk/mediatek/Makefile7
-rw-r--r--drivers/clk/mediatek/clk-gate.c52
-rw-r--r--drivers/clk/mediatek/clk-gate.h2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c138
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c80
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c81
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c80
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c123
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c91
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c1035
-rw-r--r--drivers/clk/mediatek/clk-mtk.c40
-rw-r--r--drivers/clk/mediatek/clk-mtk.h41
-rw-r--r--drivers/clk/mediatek/clk-pll.c1
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c17
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c2
-rw-r--r--drivers/clk/mmp/clk-of-pxa1928.c3
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c23
-rw-r--r--drivers/clk/mvebu/ap806-system-controller.c23
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c167
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c5
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c32
-rw-r--r--drivers/clk/pxa/clk-pxa.c145
-rw-r--r--drivers/clk/pxa/clk-pxa.h59
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c114
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c168
-rw-r--r--drivers/clk/qcom/Kconfig37
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c187
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h25
-rw-r--r--drivers/clk/qcom/clk-pll.c31
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c76
-rw-r--r--drivers/clk/qcom/clk-rpm.c497
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c578
-rw-r--r--drivers/clk/qcom/common.c52
-rw-r--r--drivers/clk/qcom/common.h11
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c8
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c3
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c2300
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c33
-rw-r--r--drivers/clk/qcom/gdsc.c44
-rw-r--r--drivers/clk/qcom/gdsc.h3
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c26
-rw-r--r--drivers/clk/renesas/Kconfig2
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c26
-rw-r--r--drivers/clk/renesas/clk-r8a7779.c18
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c32
-rw-r--r--drivers/clk/renesas/r8a7743-cpg-mssr.c270
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c259
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c10
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c65
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c371
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h43
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c31
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h1
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c29
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h2
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-cpu.c9
-rw-r--r--drivers/clk/rockchip/clk-ddr.c5
-rw-r--r--drivers/clk/rockchip/clk-pll.c6
-rw-r--r--drivers/clk/rockchip/clk-rk1108.c531
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c13
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c31
-rw-r--r--drivers/clk/rockchip/clk.h15
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c22
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c30
-rw-r--r--drivers/clk/st/clk-flexgen.c5
-rw-r--r--drivers/clk/sunxi-ng/Kconfig14
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c915
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h72
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h6
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c12
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.h14
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c33
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.h17
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c43
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c45
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.h6
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c55
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.h8
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c58
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.h6
-rw-r--r--drivers/clk/sunxi/clk-mod0.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c2
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c21
-rw-r--r--drivers/clk/tegra/cvb.c10
-rw-r--r--drivers/clk/ti/clk-3xxx.c20
-rw-r--r--drivers/clk/ti/clk-7xx.c1
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c20
-rw-r--r--drivers/clk/ti/clock.h9
-rw-r--r--drivers/clk/ti/dpll.c19
-rw-r--r--drivers/clk/ti/dpll3xxx.c67
-rw-r--r--drivers/clk/uniphier/Makefile3
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c3
-rw-r--r--drivers/clk/uniphier/clk-uniphier-cpugear.c115
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c32
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h45
-rw-r--r--drivers/clocksource/arm_arch_timer.c14
-rw-r--r--drivers/clocksource/bcm2835_timer.c14
-rw-r--r--drivers/cpufreq/Kconfig.arm29
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c117
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c1057
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c7
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c15
-rw-r--r--drivers/cpufreq/cpufreq-dt.c12
-rw-r--r--drivers/cpufreq/cpufreq.c25
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c46
-rw-r--r--drivers/cpufreq/cpufreq_governor.c30
-rw-r--r--drivers/cpufreq/cpufreq_governor.h5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c17
-rw-r--r--drivers/cpufreq/cpufreq_stats.c22
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c239
-rw-r--r--drivers/cpufreq/intel_pstate.c882
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c65
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c2
-rw-r--r--drivers/cpuidle/cpuidle.c19
-rw-r--r--drivers/cpuidle/dt_idle_states.c6
-rw-r--r--drivers/cpuidle/governor.c4
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/cpuidle/sysfs.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c3
-rw-r--r--drivers/crypto/atmel-aes-regs.h4
-rw-r--r--drivers/crypto/atmel-aes.c189
-rw-r--r--drivers/crypto/caam/Kconfig11
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamalg.c1516
-rw-r--r--drivers/crypto/caam/caamalg_desc.c1306
-rw-r--r--drivers/crypto/caam/caamalg_desc.h97
-rw-r--r--drivers/crypto/caam/caamhash.c227
-rw-r--r--drivers/crypto/caam/caampkc.c4
-rw-r--r--drivers/crypto/caam/caamrng.c10
-rw-r--r--drivers/crypto/caam/ctrl.c80
-rw-r--r--drivers/crypto/caam/desc.h22
-rw-r--r--drivers/crypto/caam/desc_constr.h133
-rw-r--r--drivers/crypto/caam/error.c5
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c27
-rw-r--r--drivers/crypto/caam/key_gen.c62
-rw-r--r--drivers/crypto/caam/key_gen.h6
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h6
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c30
-rw-r--r--drivers/crypto/ccp/ccp-dev.c6
-rw-r--r--drivers/crypto/ccp/ccp-dev.h45
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2017
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h102
-rw-r--r--drivers/crypto/chelsio/chcr_core.c11
-rw-r--r--drivers/crypto/chelsio/chcr_core.h18
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h115
-rw-r--r--drivers/crypto/marvell/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa.h5
-rw-r--r--drivers/crypto/marvell/cipher.c8
-rw-r--r--drivers/crypto/marvell/hash.c76
-rw-r--r--drivers/crypto/marvell/tdma.c33
-rw-r--r--drivers/crypto/mv_cesa.c4
-rw-r--r--drivers/crypto/nx/nx.c1
-rw-r--r--drivers/crypto/padlock-aes.c23
-rw-r--r--drivers/crypto/padlock-sha.c18
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/vmx/Makefile12
-rw-r--r--drivers/dax/dax.c7
-rw-r--r--drivers/dax/pmem.c4
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/devfreq/event/exynos-nocp.c1
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c6
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c1
-rw-r--r--drivers/devfreq/exynos-bus.c29
-rw-r--r--drivers/devfreq/rk3399_dmc.c15
-rw-r--r--drivers/dma-buf/Kconfig2
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c28
-rw-r--r--drivers/dma-buf/dma-fence-array.c (renamed from drivers/dma-buf/fence-array.c)91
-rw-r--r--drivers/dma-buf/dma-fence.c (renamed from drivers/dma-buf/fence.c)221
-rw-r--r--drivers/dma-buf/reservation.c197
-rw-r--r--drivers/dma-buf/seqno-fence.c18
-rw-r--r--drivers/dma-buf/sw_sync.c50
-rw-r--r--drivers/dma-buf/sync_debug.c13
-rw-r--r--drivers/dma-buf/sync_debug.h9
-rw-r--r--drivers/dma-buf/sync_file.c66
-rw-r--r--drivers/dma/Kconfig17
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/at_hdmac.c3
-rw-r--r--drivers/dma/at_xdmac.c5
-rw-r--r--drivers/dma/cppi41.c31
-rw-r--r--drivers/dma/dmatest.c74
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c18
-rw-r--r--drivers/dma/dw/regs.h3
-rw-r--r--drivers/dma/edma.c4
-rw-r--r--drivers/dma/fsl_raid.c1
-rw-r--r--drivers/dma/hsu/pci.c8
-rw-r--r--drivers/dma/img-mdc-dma.c9
-rw-r--r--drivers/dma/imx-sdma.c13
-rw-r--r--drivers/dma/ioat/dma.c17
-rw-r--r--drivers/dma/ioat/init.c21
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/k3dma.c3
-rw-r--r--drivers/dma/mic_x100_dma.c2
-rw-r--r--drivers/dma/mv_xor.c190
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/nbpfaxi.c38
-rw-r--r--drivers/dma/omap-dma.c187
-rw-r--r--drivers/dma/pch_dma.c5
-rw-r--r--drivers/dma/pl330.c23
-rw-r--r--drivers/dma/pxa_dma.c28
-rw-r--r--drivers/dma/qcom/hidma.c173
-rw-r--r--drivers/dma/qcom/hidma.h9
-rw-r--r--drivers/dma/qcom/hidma_dbg.c4
-rw-r--r--drivers/dma/qcom/hidma_ll.c176
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c11
-rw-r--r--drivers/dma/s3c24xx-dma.c5
-rw-r--r--drivers/dma/sh/usb-dmac.c3
-rw-r--r--drivers/dma/sirf-dma.c4
-rw-r--r--drivers/dma/st_fdma.c889
-rw-r--r--drivers/dma/st_fdma.h249
-rw-r--r--drivers/dma/stm32-dma.c6
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/dma/zx296702_dma.c3
-rw-r--r--drivers/edac/altera_edac.c4
-rw-r--r--drivers/edac/amd64_edac.c692
-rw-r--r--drivers/edac/amd64_edac.h56
-rw-r--r--drivers/edac/edac_mc.c49
-rw-r--r--drivers/edac/mce_amd.c44
-rw-r--r--drivers/edac/mpc85xx_edac.c17
-rw-r--r--drivers/edac/sb_edac.c14
-rw-r--r--drivers/edac/skx_edac.c7
-rw-r--r--drivers/edac/xgene_edac.c6
-rw-r--r--drivers/extcon/extcon-arizona.c8
-rw-r--r--drivers/extcon/extcon-usb-gpio.c169
-rw-r--r--drivers/firewire/net.c80
-rw-r--r--drivers/firmware/efi/Kconfig18
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/apple-properties.c248
-rw-r--r--drivers/firmware/efi/arm-init.c4
-rw-r--r--drivers/firmware/efi/arm-runtime.c4
-rw-r--r--drivers/firmware/efi/dev-path-parser.c203
-rw-r--r--drivers/firmware/efi/efi.c76
-rw-r--r--drivers/firmware/efi/libstub/Makefile6
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c33
-rw-r--r--drivers/firmware/efi/libstub/efistub.h11
-rw-r--r--drivers/firmware/efi/libstub/random.c67
-rw-r--r--drivers/firmware/efi/test/efi_test.c15
-rw-r--r--drivers/fpga/Kconfig39
-rw-r--r--drivers/fpga/Makefile9
-rw-r--r--drivers/fpga/altera-fpga2sdram.c180
-rw-r--r--drivers/fpga/altera-freeze-bridge.c273
-rw-r--r--drivers/fpga/altera-hps2fpga.c222
-rw-r--r--drivers/fpga/fpga-bridge.c395
-rw-r--r--drivers/fpga/fpga-mgr.c97
-rw-r--r--drivers/fpga/fpga-region.c603
-rw-r--r--drivers/fpga/socfpga-a10.c557
-rw-r--r--drivers/fpga/socfpga.c7
-rw-r--r--drivers/fpga/zynq-fpga.c56
-rw-r--r--drivers/gpio/Kconfig42
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/gpio-adnp.c12
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c130
-rw-r--r--drivers/gpio/gpio-arizona.c9
-rw-r--r--drivers/gpio/gpio-axp209.c8
-rw-r--r--drivers/gpio/gpio-crystalcove.c6
-rw-r--r--drivers/gpio/gpio-davinci.c7
-rw-r--r--drivers/gpio/gpio-dln2.c1
-rw-r--r--drivers/gpio/gpio-etraxfs.c7
-rw-r--r--drivers/gpio/gpio-htc-egpio.c54
-rw-r--r--drivers/gpio/gpio-intel-mid.c7
-rw-r--r--drivers/gpio/gpio-max732x.c17
-rw-r--r--drivers/gpio/gpio-max77620.c11
-rw-r--r--drivers/gpio/gpio-mb86s7x.c6
-rw-r--r--drivers/gpio/gpio-mcp23s08.c17
-rw-r--r--drivers/gpio/gpio-merrifield.c33
-rw-r--r--drivers/gpio/gpio-mvebu.c92
-rw-r--r--drivers/gpio/gpio-mxs.c45
-rw-r--r--drivers/gpio/gpio-pca953x.c22
-rw-r--r--drivers/gpio/gpio-pcf857x.c11
-rw-r--r--drivers/gpio/gpio-pl061.c208
-rw-r--r--drivers/gpio/gpio-stmpe.c19
-rw-r--r--drivers/gpio/gpio-sx150x.c792
-rw-r--r--drivers/gpio/gpio-tc3589x.c19
-rw-r--r--drivers/gpio/gpio-vf610.c6
-rw-r--r--drivers/gpio/gpio-wcove.c6
-rw-r--r--drivers/gpio/gpiolib-acpi.c107
-rw-r--r--drivers/gpio/gpiolib-devprop.c67
-rw-r--r--drivers/gpio/gpiolib-of.c65
-rw-r--r--drivers/gpio/gpiolib.c171
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/Kconfig21
-rw-r--r--drivers/gpu/drm/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h898
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c310
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c593
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h450
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c276
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c449
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c834
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c810
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c334
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c435
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c858
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c604
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c356
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h272
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c1001
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_d.h661
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_sh_mask.h8127
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h0
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h4457
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h9836
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h1784
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_sh_mask.h12821
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_d.h1274
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_sh_mask.h11895
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h275
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h1079
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h148
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h715
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_d.h96
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h795
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h64
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h99
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c93
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c170
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h22
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c12
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c28
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c79
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h32
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c71
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c159
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c7
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c10
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c26
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c43
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c12
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h9
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c99
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c121
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h10
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c2
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c238
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c15
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c65
-rw-r--r--drivers/gpu/drm/armada/armada_trace.c4
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h66
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c7
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c5
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c41
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig23
-rw-r--r--drivers/gpu/drm/bridge/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h16
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c213
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c2
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c33
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-audio.h7
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c141
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c301
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.h39
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c7
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c7
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c1564
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.h1517
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c9
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c317
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c7
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c592
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c209
-rw-r--r--drivers/gpu/drm/drm_blend.c39
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c12
-rw-r--r--drivers/gpu/drm/drm_connector.c148
-rw-r--r--drivers/gpu/drm/drm_crtc.c776
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h36
-rw-r--r--drivers/gpu/drm/drm_debugfs.c52
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c352
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c121
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c130
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c128
-rw-r--r--drivers/gpu/drm/drm_edid.c207
-rw-r--r--drivers/gpu/drm/drm_encoder.c9
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c67
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c223
-rw-r--r--drivers/gpu/drm/drm_fops.c21
-rw-r--r--drivers/gpu/drm/drm_fourcc.c293
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c123
-rw-r--r--drivers/gpu/drm/drm_internal.h28
-rw-r--r--drivers/gpu/drm/drm_ioctl.c23
-rw-r--r--drivers/gpu/drm/drm_irq.c164
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_mm.c99
-rw-r--r--drivers/gpu/drm/drm_mode_config.c494
-rw-r--r--drivers/gpu/drm/drm_modes.c16
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c25
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c23
-rw-r--r--drivers/gpu/drm/drm_of.c28
-rw-r--r--drivers/gpu/drm/drm_plane.c16
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c11
-rw-r--r--drivers/gpu/drm/drm_prime.c12
-rw-r--r--drivers/gpu/drm/drm_print.c59
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c20
-rw-r--r--drivers/gpu/drm/drm_property.c54
-rw-r--r--drivers/gpu/drm/drm_rect.c11
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/etnaviv/cmdstream.xml.h60
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c37
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c19
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c50
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c146
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h2
-rw-r--r--drivers/gpu/drm/fsl-dcu/Makefile1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c13
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c32
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c14
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c37
-rw-r--r--drivers/gpu/drm/gma500/gem.c5
-rw-r--r--drivers/gpu/drm/gma500/gtt.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c14
-rw-r--r--drivers/gpu/drm/hisilicon/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/Makefile1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c477
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c456
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h114
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c267
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h196
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c140
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c558
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c9
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c863
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig64
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Makefile11
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile11
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c352
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c284
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2831
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h49
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h29
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c330
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h163
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c531
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h150
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c858
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h188
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c312
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2244
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h306
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c205
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h382
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2848
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h26
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c741
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h233
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c597
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c304
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h106
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h259
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c320
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h80
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c310
-rw-r--r--drivers/gpu/drm/i915/gvt/render.h43
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c292
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h58
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c583
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h139
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h286
-rw-r--r--drivers/gpu/drm/i915/gvt/trace_points.c (renamed from drivers/gpu/drm/i915/i915_gem_dmabuf.h)37
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c409
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c676
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c252
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1152
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3050
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c109
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c150
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c176
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c (renamed from drivers/gpu/drm/i915/i915_gem_fence.c)61
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h51
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1089
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h250
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c170
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h338
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c186
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c766
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h212
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c111
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c107
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c90
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c126
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c727
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c675
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c838
-rw-r--r--drivers/gpu/drm/i915/i915_params.c18
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c20
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h285
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c32
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c88
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h38
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c25
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c638
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h341
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c26
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c412
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c205
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c76
-rw-r--r--drivers/gpu/drm/i915/intel_color.c52
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c141
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c11
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c534
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2061
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c611
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c559
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c99
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h199
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c56
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c26
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c22
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c203
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c152
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c17
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c33
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h32
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h82
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c56
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c450
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c195
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c420
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c185
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c45
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c139
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c10
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1428
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c28
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c371
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h147
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c173
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c45
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c209
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c63
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c695
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c36
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c19
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c7
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c9
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c187
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c71
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c17
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c42
-rw-r--r--drivers/gpu/drm/meson/Kconfig9
-rw-r--r--drivers/gpu/drm/meson/Makefile4
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c68
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h42
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c208
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.h32
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c343
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h59
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c230
-rw-r--r--drivers/gpu/drm/meson/meson_plane.h30
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h1395
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c167
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h34
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c254
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h72
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c293
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.h41
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c331
-rw-r--r--drivers/gpu/drm/meson/meson_viu.h64
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c162
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.h35
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h27
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c112
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h111
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c119
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h3757
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c888
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h60
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c344
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c32
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c39
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h162
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h300
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h2
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c84
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c267
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c133
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h56
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c300
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c306
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h70
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c37
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c17
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c49
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h42
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c9
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c68
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h25
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c34
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c90
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c68
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h45
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h19
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig19
-rw-r--r--drivers/gpu/drm/mxsfb/Makefile2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c241
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c444
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.h54
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c131
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_regs.h114
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c81
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c650
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c349
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c97
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c82
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c139
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h57
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4190
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c)12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c)22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c227
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c141
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c47
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c50
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c20
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c31
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c33
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c30
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c25
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c59
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c52
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c58
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c53
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c57
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c228
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c78
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c40
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c160
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c85
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c73
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h98
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c33
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c97
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c87
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c70
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c56
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c207
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c69
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h12
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c37
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c54
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c85
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c22
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c32
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c19
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/savage/savage_state.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c7
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c32
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c23
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c27
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c45
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c4
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.c5
-rw-r--r--drivers/gpu/drm/tegra/fb.c6
-rw-r--r--drivers/gpu/drm/tegra/gem.c45
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c598
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c214
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h11
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c260
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.h5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c7
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h15
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c68
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c5
-rw-r--r--drivers/gpu/drm/udl/udl_main.c16
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c46
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h9
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h3
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c82
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c657
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c58
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c107
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/zte/Kconfig8
-rw-r--r--drivers/gpu/drm/zte/Makefile7
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c267
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.h36
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c624
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi_regs.h56
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c299
-rw-r--r--drivers/gpu/drm/zte/zx_plane.h26
-rw-r--r--drivers/gpu/drm/zte/zx_plane_regs.h91
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c661
-rw-r--r--drivers/gpu/drm/zte/zx_vou.h46
-rw-r--r--drivers/gpu/drm/zte/zx_vou_regs.h157
-rw-r--r--drivers/gpu/host1x/dev.h3
-rw-r--r--drivers/gpu/host1x/job.c9
-rw-r--r--drivers/gpu/host1x/syncpt.c23
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c43
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c16
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c80
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-asus.c299
-rw-r--r--drivers/hid/hid-core.c14
-rw-r--r--drivers/hid/hid-cp2112.c318
-rw-r--r--drivers/hid/hid-ids.h20
-rw-r--r--drivers/hid/hid-input.c96
-rw-r--r--drivers/hid/hid-lg.c14
-rw-r--r--drivers/hid/hid-magicmouse.c12
-rw-r--r--drivers/hid/hid-mf.c166
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c88
-rw-r--r--drivers/hid/hid-rmi.c10
-rw-r--r--drivers/hid/hid-sensor-custom.c6
-rw-r--r--drivers/hid/hid-sensor-hub.c22
-rw-r--r--drivers/hid/hid-sony.c445
-rw-r--r--drivers/hid/hid-udraw-ps3.c474
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c146
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c169
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c6
-rw-r--r--drivers/hid/intel-ish-hid/ipc/utils.h64
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c9
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c6
-rw-r--r--drivers/hid/wacom.h2
-rw-r--r--drivers/hid/wacom_sys.c92
-rw-r--r--drivers/hid/wacom_wac.c601
-rw-r--r--drivers/hid/wacom_wac.h76
-rw-r--r--drivers/hsi/clients/ssi_protocol.c14
-rw-r--r--drivers/hv/channel.c93
-rw-r--r--drivers/hv/channel_mgmt.c10
-rw-r--r--drivers/hv/connection.c1
-rw-r--r--drivers/hv/hv.c6
-rw-r--r--drivers/hv/hv_balloon.c44
-rw-r--r--drivers/hv/hv_snapshot.c33
-rw-r--r--drivers/hv/hv_util.c9
-rw-r--r--drivers/hv/hyperv_vmbus.h12
-rw-r--r--drivers/hv/ring_buffer.c44
-rw-r--r--drivers/hv/vmbus_drv.c176
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adm1025.c2
-rw-r--r--drivers/hwmon/adm1026.c26
-rw-r--r--drivers/hwmon/adm9240.c9
-rw-r--r--drivers/hwmon/adt7411.c301
-rw-r--r--drivers/hwmon/adt7462.c12
-rw-r--r--drivers/hwmon/adt7470.c6
-rw-r--r--drivers/hwmon/amc6821.c4
-rw-r--r--drivers/hwmon/coretemp.c321
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/emc2103.c4
-rw-r--r--drivers/hwmon/emc6w201.c8
-rw-r--r--drivers/hwmon/g762.c11
-rw-r--r--drivers/hwmon/hwmon.c85
-rw-r--r--drivers/hwmon/lm85.c3
-rw-r--r--drivers/hwmon/lm87.c136
-rw-r--r--drivers/hwmon/mcp3021.c50
-rw-r--r--drivers/hwmon/nct7802.c8
-rw-r--r--drivers/hwmon/pmbus/adm1275.c20
-rw-r--r--drivers/hwmon/scpi-hwmon.c1
-rw-r--r--drivers/hwmon/smsc47m192.c5
-rw-r--r--drivers/hwmon/tc654.c514
-rw-r--r--drivers/hwmon/tmp108.c469
-rw-r--r--drivers/hwmon/via-cputemp.c77
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c48
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight.c74
-rw-r--r--drivers/hwtracing/intel_th/core.c28
-rw-r--r--drivers/hwtracing/intel_th/gth.c26
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h4
-rw-r--r--drivers/hwtracing/stm/core.c8
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c64
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c4
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h27
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/muxes/Kconfig1
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c22
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c4
-rw-r--r--drivers/ide/ide-atapi.c6
-rw-r--r--drivers/ide/ide-cd.c46
-rw-r--r--drivers/ide/ide-cd.h2
-rw-r--r--drivers/ide/ide-cd_ioctl.c6
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/idle/Kconfig17
-rw-r--r--drivers/idle/Makefile1
-rw-r--r--drivers/idle/i7300_idle.c612
-rw-r--r--drivers/idle/intel_idle.c154
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/Makefile2
-rw-r--r--drivers/iio/accel/Kconfig45
-rw-r--r--drivers/iio/accel/Makefile5
-rw-r--r--drivers/iio/accel/da280.c183
-rw-r--r--drivers/iio/accel/da311.c305
-rw-r--r--drivers/iio/accel/dmard10.c266
-rw-r--r--drivers/iio/accel/mma7660.c2
-rw-r--r--drivers/iio/accel/mma8452.c79
-rw-r--r--drivers/iio/accel/sca3000.c1576
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c617
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c1
-rw-r--r--drivers/iio/adc/Kconfig46
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7766.c330
-rw-r--r--drivers/iio/adc/at91_adc.c28
-rw-r--r--drivers/iio/adc/envelope-detector.c422
-rw-r--r--drivers/iio/adc/max1027.c17
-rw-r--r--drivers/iio/adc/stm32-adc-core.c303
-rw-r--r--drivers/iio/adc/stm32-adc-core.h52
-rw-r--r--drivers/iio/adc/stm32-adc.c518
-rw-r--r--drivers/iio/adc/ti-adc0832.c106
-rw-r--r--drivers/iio/adc/ti-adc161s626.c55
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c148
-rw-r--r--drivers/iio/common/Kconfig1
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig22
-rw-r--r--drivers/iio/common/cros_ec_sensors/Makefile6
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c322
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c450
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h175
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c61
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c8
-rw-r--r--drivers/iio/counter/104-quad-8.c593
-rw-r--r--drivers/iio/counter/Kconfig24
-rw-r--r--drivers/iio/counter/Makefile7
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5592r.c2
-rw-r--r--drivers/iio/dac/dpot-dac.c266
-rw-r--r--drivers/iio/dac/mcp4725.c176
-rw-r--r--drivers/iio/gyro/Kconfig18
-rw-r--r--drivers/iio/gyro/Makefile5
-rw-r--r--drivers/iio/gyro/mpu3050-core.c1306
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c124
-rw-r--r--drivers/iio/gyro/mpu3050.h96
-rw-r--r--drivers/iio/gyro/st_gyro_core.c205
-rw-r--r--drivers/iio/humidity/Kconfig24
-rw-r--r--drivers/iio/humidity/Makefile7
-rw-r--r--drivers/iio/humidity/hdc100x.c130
-rw-r--r--drivers/iio/humidity/hts221.h73
-rw-r--r--drivers/iio/humidity/hts221_buffer.c168
-rw-r--r--drivers/iio/humidity/hts221_core.c687
-rw-r--r--drivers/iio/humidity/hts221_i2c.c110
-rw-r--r--drivers/iio/humidity/hts221_spi.c125
-rw-r--r--drivers/iio/humidity/si7020.c11
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/industrialio-buffer.c7
-rw-r--r--drivers/iio/industrialio-core.c261
-rw-r--r--drivers/iio/industrialio-trigger.c21
-rw-r--r--drivers/iio/inkern.c123
-rw-r--r--drivers/iio/light/Kconfig19
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/isl29018.c (renamed from drivers/staging/iio/light/isl29018.c)159
-rw-r--r--drivers/iio/light/ltr501.c111
-rw-r--r--drivers/iio/light/max44000.c5
-rw-r--r--drivers/iio/light/tsl2583.c913
-rw-r--r--drivers/iio/magnetometer/ak8974.c8
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c147
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c376
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c1
-rw-r--r--drivers/iio/potentiometer/mcp4531.c104
-rw-r--r--drivers/iio/potentiostat/Kconfig22
-rw-r--r--drivers/iio/potentiostat/Makefile6
-rw-r--r--drivers/iio/potentiostat/lmp91000.c446
-rw-r--r--drivers/iio/pressure/Kconfig10
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/abp060mg.c276
-rw-r--r--drivers/iio/pressure/mpl3115.c26
-rw-r--r--drivers/iio/pressure/ms5611_core.c19
-rw-r--r--drivers/iio/pressure/st_pressure_core.c257
-rw-r--r--drivers/iio/pressure/zpa2326.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/infiniband/core/addr.c11
-rw-r--r--drivers/infiniband/core/cm.c126
-rw-r--r--drivers/infiniband/core/cma.c77
-rw-r--r--drivers/infiniband/core/core_priv.h9
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c38
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c17
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c20
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c72
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c27
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c37
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c19
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h89
-rw-r--r--drivers/infiniband/hw/hfi1/init.c104
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c13
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c19
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c25
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h60
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c2
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c25
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c13
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h4
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c10
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig1
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c22
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c92
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/input/misc/arizona-haptics.c13
-rw-r--r--drivers/input/misc/xen-kbdfront.c13
-rw-r--r--drivers/input/mouse/focaltech.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c25
-rw-r--r--drivers/iommu/arm-smmu.c16
-rw-r--r--drivers/iommu/dmar.c4
-rw-r--r--drivers/iommu/intel-iommu.c51
-rw-r--r--drivers/iommu/intel-svm.c28
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c26
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c75
-rw-r--r--drivers/irqchip/irq-gic-v3.c13
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c241
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c4
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c1
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c1
-rw-r--r--drivers/leds/Kconfig21
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/led-class.c4
-rw-r--r--drivers/leds/led-core.c62
-rw-r--r--drivers/leds/leds-cobalt-raq.c6
-rw-r--r--drivers/leds/leds-lp3952.c1
-rw-r--r--drivers/leds/leds-mc13783.c5
-rw-r--r--drivers/leds/leds-mlxcpld.c5
-rw-r--r--drivers/leds/leds-netxbig.c1
-rw-r--r--drivers/leds/leds-nic78bx.c209
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/leds-pca955x.c24
-rw-r--r--drivers/leds/leds-pca963x.c80
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c2
-rw-r--r--drivers/leds/uleds.c235
-rw-r--r--drivers/lguest/hypercalls.c4
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c19
-rw-r--r--drivers/lightnvm/Makefile2
-rw-r--r--drivers/lightnvm/core.c387
-rw-r--r--drivers/lightnvm/gennvm.c645
-rw-r--r--drivers/lightnvm/gennvm.h34
-rw-r--r--drivers/lightnvm/lightnvm.h35
-rw-r--r--drivers/lightnvm/rrpc.c514
-rw-r--r--drivers/lightnvm/rrpc.h65
-rw-r--r--drivers/lightnvm/sysblk.c98
-rw-r--r--drivers/lightnvm/sysfs.c198
-rw-r--r--drivers/mailbox/pcc.c13
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/md/Kconfig10
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/debug.c15
-rw-r--r--drivers/md/bcache/io.c4
-rw-r--r--drivers/md/bcache/journal.c4
-rw-r--r--drivers/md/bcache/movinggc.c6
-rw-r--r--drivers/md/bcache/request.c8
-rw-r--r--drivers/md/bcache/super.c16
-rw-r--r--drivers/md/bcache/writeback.c5
-rw-r--r--drivers/md/bcache/writeback.h3
-rw-r--r--drivers/md/bitmap.c166
-rw-r--r--drivers/md/dm-bufio.c34
-rw-r--r--drivers/md/dm-cache-metadata.c3
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-cache-target.c3
-rw-r--r--drivers/md/dm-crypt.c216
-rw-r--r--drivers/md/dm-flakey.c53
-rw-r--r--drivers/md/dm-io.c34
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c42
-rw-r--r--drivers/md/dm-raid.c86
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-rq.c70
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm-table.c43
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/linear.c31
-rw-r--r--drivers/md/md.c711
-rw-r--r--drivers/md/md.h108
-rw-r--r--drivers/md/multipath.c94
-rw-r--r--drivers/md/persistent-data/dm-array.c2
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c19
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c4
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c14
-rw-r--r--drivers/md/raid0.c107
-rw-r--r--drivers/md/raid1.c273
-rw-r--r--drivers/md/raid1.h19
-rw-r--r--drivers/md/raid10.c302
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-cache.c1837
-rw-r--r--drivers/md/raid5.c634
-rw-r--r--drivers/md/raid5.h172
-rw-r--r--drivers/media/dvb-core/dvb_net.c2
-rw-r--r--drivers/media/dvb-frontends/Kconfig5
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c (renamed from drivers/media/usb/dvb-usb/gp8psk-fe.c)156
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.h82
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c2
-rw-r--r--drivers/media/tuners/tuner-xc2028.c37
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c105
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.h4
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c34
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb/Makefile2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c304
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c77
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c100
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c27
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h5
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c36
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c25
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c113
-rw-r--r--drivers/media/usb/dvb-usb/dibusb.h3
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c26
-rw-r--r--drivers/media/usb/dvb-usb/digitv.h5
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c128
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c104
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c10
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h9
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c2
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c132
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.h63
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c25
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c136
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c16
-rw-r--r--drivers/media/usb/s2255/s2255drv.c15
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c16
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c7
-rw-r--r--drivers/media/usb/uvc/uvc_video.c6
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c5
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/message/fusion/mptbase.c28
-rw-r--r--drivers/message/fusion/mptlan.c15
-rw-r--r--drivers/message/fusion/mptscsih.c11
-rw-r--r--drivers/mfd/Kconfig14
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/intel-lpss-pci.c31
-rw-r--r--drivers/mfd/intel-lpss.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c6
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/qcom-pm8xxx.c (renamed from drivers/mfd/pm8921-core.c)42
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/syscon.c4
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c1
-rw-r--r--drivers/mfd/wm8994-core.c16
-rw-r--r--drivers/misc/cxl/context.c5
-rw-r--r--drivers/misc/genwqe/card_base.h1
-rw-r--r--drivers/misc/genwqe/card_utils.c12
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/misc/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm_bugs.c71
-rw-r--r--drivers/misc/lkdtm_core.c2
-rw-r--r--drivers/misc/lkdtm_perms.c7
-rw-r--r--drivers/misc/mei/amthif.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c102
-rw-r--r--drivers/misc/mei/bus.c217
-rw-r--r--drivers/misc/mei/client.c23
-rw-r--r--drivers/misc/mei/client.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/hw-me.c90
-rw-r--r--drivers/misc/mei/hw-me.h2
-rw-r--r--drivers/misc/mei/hw-txe.c18
-rw-r--r--drivers/misc/mei/init.c6
-rw-r--r--drivers/misc/mei/interrupt.c7
-rw-r--r--drivers/misc/mei/main.c45
-rw-r--r--drivers/misc/mei/mei_dev.h38
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/sgi-gru/grumain.c2
-rw-r--r--drivers/misc/sgi-xp/xpnet.c21
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/mmc/Makefile1
-rw-r--r--drivers/mmc/card/Kconfig70
-rw-r--r--drivers/mmc/card/Makefile10
-rw-r--r--drivers/mmc/core/Kconfig66
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/block.c (renamed from drivers/mmc/card/block.c)501
-rw-r--r--drivers/mmc/core/block.h (renamed from drivers/mmc/card/block.h)0
-rw-r--r--drivers/mmc/core/core.c97
-rw-r--r--drivers/mmc/core/debugfs.c6
-rw-r--r--drivers/mmc/core/mmc.c116
-rw-r--r--drivers/mmc/core/mmc_ops.c220
-rw-r--r--drivers/mmc/core/mmc_ops.h7
-rw-r--r--drivers/mmc/core/mmc_test.c (renamed from drivers/mmc/card/mmc_test.c)66
-rw-r--r--drivers/mmc/core/queue.c (renamed from drivers/mmc/card/queue.c)318
-rw-r--r--drivers/mmc/core/queue.h (renamed from drivers/mmc/card/queue.h)29
-rw-r--r--drivers/mmc/core/sd.c14
-rw-r--r--drivers/mmc/core/sd_ops.c27
-rw-r--r--drivers/mmc/core/sdio.c17
-rw-r--r--drivers/mmc/core/sdio_cis.c3
-rw-r--r--drivers/mmc/core/sdio_irq.c12
-rw-r--r--drivers/mmc/core/sdio_uart.c (renamed from drivers/mmc/card/sdio_uart.c)4
-rw-r--r--drivers/mmc/core/slot-gpio.c8
-rw-r--r--drivers/mmc/host/Kconfig23
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/davinci_mmc.c130
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c52
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c39
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c29
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c33
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c42
-rw-r--r--drivers/mmc/host/dw_mmc.c184
-rw-r--r--drivers/mmc/host/dw_mmc.h7
-rw-r--r--drivers/mmc/host/jz4740_mmc.c3
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c851
-rw-r--r--drivers/mmc/host/mmci.c128
-rw-r--r--drivers/mmc/host/mmci.h71
-rw-r--r--drivers/mmc/host/mtk-sd.c4
-rw-r--r--drivers/mmc/host/mxs-mmc.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c3
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c3
-rw-r--r--drivers/mmc/host/s3cmci.c15
-rw-r--r--drivers/mmc/host/sdhci-acpi.c1
-rw-r--r--drivers/mmc/host/sdhci-cadence.c283
-rw-r--r--drivers/mmc/host/sdhci-iproc.c35
-rw-r--r--drivers/mmc/host/sdhci-msm.c695
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c34
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c98
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci.c390
-rw-r--r--drivers/mmc/host/sdhci.h5
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c274
-rw-r--r--drivers/mmc/host/sunxi-mmc.c15
-rw-r--r--drivers/mmc/host/tmio_mmc.h32
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c119
-rw-r--r--drivers/mmc/host/wbsd.c11
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/mtk_ecc.c19
-rw-r--r--drivers/mtd/nand/nand_base.c60
-rw-r--r--drivers/net/appletalk/ipddp.c1
-rw-r--r--drivers/net/bonding/bond_alb.c82
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/can/grcan.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c18
-rw-r--r--drivers/net/can/usb/kvaser_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h37
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c104
-rw-r--r--drivers/net/cris/eth_v10.c12
-rw-r--r--drivers/net/dsa/b53/b53_common.c16
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c20
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig1
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2017
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c367
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c186
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h17
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h244
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c729
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h71
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/3com/3c509.c56
-rw-r--r--drivers/net/ethernet/3com/3c515.c1
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c16
-rw-r--r--drivers/net/ethernet/3com/typhoon.c66
-rw-r--r--drivers/net/ethernet/8390/8390.c1
-rw-r--r--drivers/net/ethernet/8390/8390p.c1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c1
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c1
-rw-r--r--drivers/net/ethernet/8390/etherh.c1
-rw-r--r--drivers/net/ethernet/8390/hydra.c1
-rw-r--r--drivers/net/ethernet/8390/mac8390.c1
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c1
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c1
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c1
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c1
-rw-r--r--drivers/net/ethernet/8390/wd.c1
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c1
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c15
-rw-r--r--drivers/net/ethernet/adi/Kconfig2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c1
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c9
-rw-r--r--drivers/net/ethernet/agere/et131x.c7
-rw-r--r--drivers/net/ethernet/alacritech/Kconfig28
-rw-r--r--drivers/net/ethernet/alacritech/Makefile4
-rw-r--r--drivers/net/ethernet/alacritech/slic.h575
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c1870
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c26
-rw-r--r--drivers/net/ethernet/alteon/acenic.c70
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h12
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c128
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c18
-rw-r--r--drivers/net/ethernet/amd/Kconfig12
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c19
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c9
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/declance.c1
-rw-r--r--drivers/net/ethernet/amd/hplance.c1
-rw-r--r--drivers/net/ethernet/amd/lance.c1
-rw-r--r--drivers/net/ethernet/amd/mvme147.c1
-rw-r--r--drivers/net/ethernet/amd/ni65.c1
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c1
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h385
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c154
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c1054
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c349
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c87
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c492
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c705
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1130
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c529
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c845
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c3084
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c642
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h386
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.c704
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c70
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c152
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h29
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c353
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h31
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c13
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c146
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c140
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h9
-rw-r--r--drivers/net/ethernet/apple/bmac.c1
-rw-r--r--drivers/net/ethernet/apple/mace.c1
-rw-r--r--drivers/net/ethernet/apple/macmace.c1
-rw-r--r--drivers/net/ethernet/arc/Kconfig5
-rw-r--r--drivers/net/ethernet/arc/emac_main.c9
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h36
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c59
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c564
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c54
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c41
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c62
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c81
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c82
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h3
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c27
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig14
-rw-r--r--drivers/net/ethernet/broadcom/b44.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c45
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c17
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c22
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c74
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c41
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c172
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c506
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c502
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c190
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1622
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c346
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h93
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c17
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c10
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c11
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c54
-rw-r--r--drivers/net/ethernet/cadence/macb.c340
-rw-r--r--drivers/net/ethernet/cadence/macb.h17
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c37
-rw-r--r--drivers/net/ethernet/cavium/Kconfig14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c322
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h44
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c722
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h50
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h274
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c49
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h41
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h49
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c38
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h37
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h37
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c74
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c577
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c450
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c3251
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h101
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_image.h36
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h46
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c156
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c150
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h147
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c101
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h25
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c318
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h115
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h47
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h6
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c13
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h83
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c74
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c222
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c221
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c151
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h51
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c78
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c114
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c154
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c122
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c16
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c1
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c5
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c25
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c32
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c22
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/ec_bhf.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c23
-rw-r--r--drivers/net/ethernet/ethoc.c45
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/fealnx.c1
-rw-r--r--drivers/net/ethernet/freescale/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Makefile12
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2753
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h185
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c165
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h141
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c417
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c77
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c10
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c58
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c25
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c24
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c9
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c352
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c73
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c412
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c117
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c1
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c16
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c71
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c72
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e100.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c22
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h86
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c85
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c316
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c113
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c168
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1518
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c158
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c219
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h106
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c141
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c156
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h46
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h87
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c260
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c100
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c16
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h3
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c22
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c199
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c240
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c751
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c50
-rw-r--r--drivers/net/ethernet/jme.c12
-rw-r--r--drivers/net/ethernet/korina.c1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c27
-rw-r--r--drivers/net/ethernet/marvell/Kconfig20
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c198
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c395
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c39
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c8
-rw-r--r--drivers/net/ethernet/marvell/skge.c13
-rw-r--r--drivers/net/ethernet/marvell/skge.h4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c31
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c465
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c155
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c457
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c366
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c352
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c569
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c336
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c442
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c582
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/ib.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h208
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h229
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h888
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h127
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c536
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c605
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c651
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c33
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c1
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c22
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c7
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c13
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c9
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c92
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h73
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1255
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h13
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c50
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c94
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c45
-rw-r--r--drivers/net/ethernet/netx-eth.c1
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c9
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c13
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c1
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c12
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c12
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c456
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h124
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c1277
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c611
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h133
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c582
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c135
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c441
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h158
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c501
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h176
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c218
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c101
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c125
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c398
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c101
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h54
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h173
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c362
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c1514
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/Makefile4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c23
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c245
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c217
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c210
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c569
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.h6
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c18
-rw-r--r--drivers/net/ethernet/qualcomm/qca_framing.h6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/rdc/r6040.c1
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c8
-rw-r--r--drivers/net/ethernet/realtek/8139too.c13
-rw-r--r--drivers/net/ethernet/realtek/atp.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c19
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c20
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker.h1
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c132
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c5
-rw-r--r--drivers/net/ethernet/samsung/Kconfig2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c44
-rw-r--r--drivers/net/ethernet/seeq/ether3.c1
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile7
-rw-r--r--drivers/net/ethernet/sfc/ef10.c242
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h103
-rw-r--r--drivers/net/ethernet/sfc/efx.c92
-rw-r--r--drivers/net/ethernet/sfc/enum.h5
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c33
-rw-r--r--drivers/net/ethernet/sfc/falcon/Kconfig21
-rw-r--r--drivers/net/ethernet/sfc/falcon/Makefile6
-rw-r--r--drivers/net/ethernet/sfc/falcon/bitfield.h542
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c3350
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h277
-rw-r--r--drivers/net/ethernet/sfc/falcon/enum.h171
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c1343
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c (renamed from drivers/net/ethernet/sfc/falcon.c)1040
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon_boards.c (renamed from drivers/net/ethernet/sfc/falcon_boards.c)94
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c2892
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch_regs.h2932
-rw-r--r--drivers/net/ethernet/sfc/falcon/filter.h272
-rw-r--r--drivers/net/ethernet/sfc/falcon/io.h290
-rw-r--r--drivers/net/ethernet/sfc/falcon/mdio_10g.c (renamed from drivers/net/ethernet/sfc/mdio_10g.c)86
-rw-r--r--drivers/net/ethernet/sfc/falcon/mdio_10g.h (renamed from drivers/net/ethernet/sfc/mdio_10g.h)54
-rw-r--r--drivers/net/ethernet/sfc/falcon/mtd.c133
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h1464
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c527
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h513
-rw-r--r--drivers/net/ethernet/sfc/falcon/phy.h (renamed from drivers/net/ethernet/sfc/phy.h)18
-rw-r--r--drivers/net/ethernet/sfc/falcon/qt202x_phy.c (renamed from drivers/net/ethernet/sfc/qt202x_phy.c)140
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c974
-rw-r--r--drivers/net/ethernet/sfc/falcon/selftest.c808
-rw-r--r--drivers/net/ethernet/sfc/falcon/selftest.h55
-rw-r--r--drivers/net/ethernet/sfc/falcon/tenxpress.c (renamed from drivers/net/ethernet/sfc/tenxpress.c)108
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c649
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.h27
-rw-r--r--drivers/net/ethernet/sfc/falcon/txc43128_phy.c (renamed from drivers/net/ethernet/sfc/txc43128_phy.c)134
-rw-r--r--drivers/net/ethernet/sfc/falcon/workarounds.h44
-rw-r--r--drivers/net/ethernet/sfc/farch.c182
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h483
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h78
-rw-r--r--drivers/net/ethernet/sfc/nic.h174
-rw-r--r--drivers/net/ethernet/sfc/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/rx.c27
-rw-r--r--drivers/net/ethernet/sfc/siena.c4
-rw-r--r--drivers/net/ethernet/sfc/tx.c1055
-rw-r--r--drivers/net/ethernet/sfc/tx.h27
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c451
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h21
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1
-rw-r--r--drivers/net/ethernet/sgi/meth.c1
-rw-r--r--drivers/net/ethernet/silan/sc92031.c1
-rw-r--r--drivers/net/ethernet/sis/sis190.c1
-rw-r--r--drivers/net/ethernet/sis/sis900.c1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c1
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h80
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c23
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c217
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c110
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c100
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c156
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c83
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c141
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c489
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c78
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h72
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c5
-rw-r--r--drivers/net/ethernet/sun/niu.c7
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h2
-rw-r--r--drivers/net/ethernet/sun/sungem.c11
-rw-r--r--drivers/net/ethernet/sun/sunhme.c1
-rw-r--r--drivers/net/ethernet/sun/sunqe.c12
-rw-r--r--drivers/net/ethernet/sun/sunqe.h4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c5
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c26
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h3
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c28
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c14
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h3
-rw-r--r--drivers/net/ethernet/ti/Kconfig11
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c528
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c233
-rw-r--r--drivers/net/ethernet/ti/cpts.h80
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c561
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h6
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c20
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c52
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c479
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c21
-rw-r--r--drivers/net/ethernet/tile/tilepro.c27
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c23
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c3
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c24
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c1
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/ethernet/via/via-velocity.c11
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c18
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c20
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c1
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c11
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fjes/Makefile2
-rw-r--r--drivers/net/fjes/fjes.h16
-rw-r--r--drivers/net/fjes/fjes_debugfs.c117
-rw-r--r--drivers/net/fjes/fjes_ethtool.c181
-rw-r--r--drivers/net/fjes/fjes_hw.c171
-rw-r--r--drivers/net/fjes/fjes_hw.h34
-rw-r--r--drivers/net/fjes/fjes_main.c65
-rw-r--r--drivers/net/fjes/fjes_trace.c30
-rw-r--r--drivers/net/fjes/fjes_trace.h380
-rw-r--r--drivers/net/geneve.c754
-rw-r--r--drivers/net/gtp.c24
-rw-r--r--drivers/net/hippi/rrunner.c1
-rw-r--r--drivers/net/hyperv/hyperv_net.h4
-rw-r--r--drivers/net/hyperv/netvsc.c15
-rw-r--r--drivers/net/hyperv/netvsc_drv.c44
-rw-r--r--drivers/net/hyperv/rndis_filter.c6
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c16
-rw-r--r--drivers/net/ieee802154/atusb.c81
-rw-r--r--drivers/net/ieee802154/atusb.h11
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c27
-rw-r--r--drivers/net/irda/irda-usb.c1
-rw-r--r--drivers/net/irda/w83977af_ir.c673
-rw-r--r--drivers/net/macsec.c75
-rw-r--r--drivers/net/macvlan.c58
-rw-r--r--drivers/net/macvtap.c26
-rw-r--r--drivers/net/mii.c197
-rw-r--r--drivers/net/nlmon.c20
-rw-r--r--drivers/net/ntb_netdev.c3
-rw-r--r--drivers/net/phy/Kconfig22
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/aquantia.c28
-rw-r--r--drivers/net/phy/at803x.c78
-rw-r--r--drivers/net/phy/bcm-cygnus.c5
-rw-r--r--drivers/net/phy/bcm-phy-lib.c187
-rw-r--r--drivers/net/phy/bcm-phy-lib.h15
-rw-r--r--drivers/net/phy/bcm7xxx.c92
-rw-r--r--drivers/net/phy/broadcom.c139
-rw-r--r--drivers/net/phy/dp83640.c14
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/dp83867.c28
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/intel-xway.c24
-rw-r--r--drivers/net/phy/marvell.c10
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c60
-rw-r--r--drivers/net/phy/mdio_bus.c11
-rw-r--r--drivers/net/phy/mdio_device.c2
-rw-r--r--drivers/net/phy/meson-gxl.c81
-rw-r--r--drivers/net/phy/micrel.c38
-rw-r--r--drivers/net/phy/microchip.c39
-rw-r--r--drivers/net/phy/mscc.c358
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy.c136
-rw-r--r--drivers/net/phy/phy_device.c177
-rw-r--r--drivers/net/phy/phy_led_triggers.c134
-rw-r--r--drivers/net/phy/realtek.c20
-rw-r--r--drivers/net/phy/smsc.c18
-rw-r--r--drivers/net/phy/vitesse.c48
-rw-r--r--drivers/net/plip/plip.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/rionet.c15
-rw-r--r--drivers/net/sb1000.c1
-rw-r--r--drivers/net/slip/slip.c11
-rw-r--r--drivers/net/team/team.c25
-rw-r--r--drivers/net/tun.c62
-rw-r--r--drivers/net/usb/Kconfig5
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/asix_devices.c10
-rw-r--r--drivers/net/usb/ax88172a.c10
-rw-r--r--drivers/net/usb/ax88179_178a.c21
-rw-r--r--drivers/net/usb/catc.c1
-rw-r--r--drivers/net/usb/cdc-phonet.c12
-rw-r--r--drivers/net/usb/cdc_ether.c66
-rw-r--r--drivers/net/usb/cdc_mbim.c21
-rw-r--r--drivers/net/usb/cdc_ncm.c19
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/lan78xx.c463
-rw-r--r--drivers/net/usb/lan78xx.h14
-rw-r--r--drivers/net/usb/pegasus.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c45
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/usb/sierra_net.c13
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/veth.c17
-rw-r--r--drivers/net/virtio_net.c88
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c24
-rw-r--r--drivers/net/vrf.c25
-rw-r--r--drivers/net/vxlan.c430
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/c101.c1
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/dscc4.c1
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c1
-rw-r--r--drivers/net/wan/hdlc.c11
-rw-r--r--drivers/net/wan/hdlc_fr.c3
-rw-r--r--drivers/net/wan/hostess_sv11.c1
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wan/n2.c1
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/sbni.c1
-rw-r--r--drivers/net/wan/sealevel.c1
-rw-r--r--drivers/net/wan/slic_ds26522.c8
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c5
-rw-r--r--drivers/net/wimax/i2400m/netdev.c22
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/ath/ath.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h45
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c75
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h31
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c137
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c129
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c142
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c192
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c77
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c338
-rw-r--r--drivers/net/wireless/ath/main.c7
-rw-r--r--drivers/net/wireless/ath/regd.c3
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c129
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c100
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c160
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c55
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c112
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h25
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c160
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h586
-rw-r--r--drivers/net/wireless/atmel/atmel.c13
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c413
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c171
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c242
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/cisco/airo.c14
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c8
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c9
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c10
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211_rx.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c15
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c6
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c113
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/README23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c37
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c116
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h45
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c183
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c157
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c87
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c63
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c31
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/regs.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c67
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c25
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/ray_cs.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h31
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c10
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c122
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c175
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c134
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h4
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h32
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c24
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c28
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c9
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/wireless/wl3501_cs.c1
-rw-r--r--drivers/net/wireless/zydas/zd1201.c1
-rw-r--r--drivers/net/xen-netback/interface.c5
-rw-r--r--drivers/net/xen-netback/xenbus.c64
-rw-r--r--drivers/net/xen-netfront.c73
-rw-r--r--drivers/nfc/mei_phy.c44
-rw-r--r--drivers/nfc/microread/mei.c23
-rw-r--r--drivers/nfc/pn544/mei.c23
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c15
-rw-r--r--drivers/ntb/ntb_transport.c2
-rw-r--r--drivers/ntb/test/ntb_perf.c8
-rw-r--r--drivers/ntb/test/ntb_pingpong.c2
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/bus.c25
-rw-r--r--drivers/nvme/host/Kconfig17
-rw-r--r--drivers/nvme/host/Makefile3
-rw-r--r--drivers/nvme/host/core.c131
-rw-r--r--drivers/nvme/host/fabrics.c33
-rw-r--r--drivers/nvme/host/fc.c2586
-rw-r--r--drivers/nvme/host/lightnvm.c223
-rw-r--r--drivers/nvme/host/nvme.h51
-rw-r--r--drivers/nvme/host/pci.c90
-rw-r--r--drivers/nvme/host/rdma.c85
-rw-r--r--drivers/nvme/host/scsi.c11
-rw-r--r--drivers/nvme/target/Kconfig24
-rw-r--r--drivers/nvme/target/Makefile4
-rw-r--r--drivers/nvme/target/admin-cmd.c3
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c32
-rw-r--r--drivers/nvme/target/fabrics-cmd.c14
-rw-r--r--drivers/nvme/target/fc.c2288
-rw-r--r--drivers/nvme/target/fcloop.c1148
-rw-r--r--drivers/nvme/target/io-cmd.c39
-rw-r--r--drivers/nvme/target/loop.c26
-rw-r--r--drivers/nvme/target/nvmet.h8
-rw-r--r--drivers/nvme/target/rdma.c26
-rw-r--r--drivers/nvmem/Kconfig22
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/bcm-ocotp.c335
-rw-r--r--drivers/nvmem/lpc18xx_otp.c124
-rw-r--r--drivers/of/base.c11
-rw-r--r--drivers/of/fdt.c19
-rw-r--r--drivers/of/irq.c1
-rw-r--r--drivers/of/of_mdio.c21
-rw-r--r--drivers/of/of_numa.c7
-rw-r--r--drivers/of/overlay.c47
-rw-r--r--drivers/of/platform.c6
-rw-r--r--drivers/of/resolver.c358
-rw-r--r--drivers/oprofile/nmi_timer_int.c58
-rw-r--r--drivers/pci/host/pci-xgene-msi.c69
-rw-r--r--drivers/pci/host/pcie-designware-plat.c2
-rw-r--r--drivers/pci/host/pcie-designware.c7
-rw-r--r--drivers/pci/host/pcie-qcom.c2
-rw-r--r--drivers/pci/host/pcie-rockchip.c62
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c7
-rw-r--r--drivers/pci/msi.c78
-rw-r--r--drivers/pci/pci-mid.c6
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c14
-rw-r--r--drivers/pci/probe.c28
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/pci/xen-pcifront.c6
-rw-r--r--drivers/pcmcia/m32r_pcc.c41
-rw-r--r--drivers/pcmcia/soc_common.c2
-rw-r--r--drivers/phy/Kconfig33
-rw-r--r--drivers/phy/Makefile3
-rw-r--r--drivers/phy/phy-berlin-sata.c3
-rw-r--r--drivers/phy/phy-brcm-sata.c6
-rw-r--r--drivers/phy/phy-da8xx-usb.c10
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c15
-rw-r--r--drivers/phy/phy-exynos4210-usb2.c4
-rw-r--r--drivers/phy/phy-exynos4x12-usb2.c4
-rw-r--r--drivers/phy/phy-exynos5250-usb2.c2
-rw-r--r--drivers/phy/phy-meson8b-usb2.c286
-rw-r--r--drivers/phy/phy-miphy365x.c625
-rw-r--r--drivers/phy/phy-qcom-ufs-i.h7
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.c72
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.c65
-rw-r--r--drivers/phy/phy-qcom-ufs.c273
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c118
-rw-r--r--drivers/phy/phy-rockchip-emmc.c2
-rw-r--r--drivers/phy/phy-rockchip-inno-usb2.c607
-rw-r--r--drivers/phy/phy-rockchip-pcie.c13
-rw-r--r--drivers/phy/phy-s5pv210-usb2.c4
-rw-r--r--drivers/phy/phy-stih41x-usb.c188
-rw-r--r--drivers/phy/phy-sun4i-usb.c16
-rw-r--r--drivers/phy/phy-ti-pipe3.c10
-rw-r--r--drivers/phy/phy-twl4030-usb.c7
-rw-r--r--drivers/phy/tegra/xusb-tegra124.c3
-rw-r--r--drivers/phy/tegra/xusb.c10
-rw-r--r--drivers/pinctrl/Kconfig26
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c2
-rw-r--r--drivers/pinctrl/bcm/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c165
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c2
-rw-r--r--drivers/pinctrl/devicetree.c144
-rw-r--r--drivers/pinctrl/devicetree.h23
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c9
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c19
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c41
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6397.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8173.h2
-rw-r--r--drivers/pinctrl/meson/Makefile3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c589
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c23
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91.c21
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c210
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c605
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c86
-rw-r--r--drivers/pinctrl/pinctrl-single.c217
-rw-r--r--drivers/pinctrl/pinctrl-st.c6
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c1275
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c6
-rw-r--r--drivers/pinctrl/qcom/Kconfig9
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c1379
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c45
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c37
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c40
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c40
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h10
-rw-r--r--drivers/pinctrl/sh-pfc/core.c15
-rw-r--r--drivers/pinctrl/sh-pfc/core.h4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c342
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c616
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c576
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c3
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h14
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c8
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32f429.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-gr8.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31s.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c11
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c506
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h8
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-vt8500.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8505.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8650.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8750.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8850.c17
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c10
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.h1
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c159
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c6
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel-vbtn.c2
-rw-r--r--drivers/platform/x86/toshiba-wmi.c26
-rw-r--r--drivers/power/avs/rockchip-io-domain.c2
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-poweroff.c1
-rw-r--r--drivers/power/reset/at91-reset.c2
-rw-r--r--drivers/power/reset/piix4-poweroff.c113
-rw-r--r--drivers/power/reset/syscon-reboot-mode.c1
-rw-r--r--drivers/power/reset/zx-reboot.c1
-rw-r--r--drivers/power/supply/ab8500_fg.c8
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c1
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c44
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c4
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c2
-rw-r--r--drivers/power/supply/lp8788-charger.c3
-rw-r--r--drivers/power/supply/max17040_battery.c52
-rw-r--r--drivers/power/supply/max8997_charger.c1
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/wm97xx_battery.c25
-rw-r--r--drivers/powercap/intel_rapl.c389
-rw-r--r--drivers/ptp/Kconfig10
-rw-r--r--drivers/ptp/ptp_clock.c5
-rw-r--r--drivers/ptp/ptp_sysfs.c2
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-hibvt.c271
-rw-r--r--drivers/pwm/pwm-meson.c2
-rw-r--r--drivers/pwm/sysfs.c2
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/arizona-ldo1.c2
-rw-r--r--drivers/regulator/axp20x-regulator.c12
-rw-r--r--drivers/regulator/core.c40
-rw-r--r--drivers/regulator/fixed.c46
-rw-r--r--drivers/regulator/gpio-regulator.c9
-rw-r--r--drivers/regulator/helpers.c6
-rw-r--r--drivers/regulator/lp873x-regulator.c1
-rw-r--r--drivers/regulator/max77620-regulator.c47
-rw-r--r--drivers/regulator/rk808-regulator.c9
-rw-r--r--drivers/regulator/stw481x-vmmc.c3
-rw-r--r--drivers/regulator/tps6507x-regulator.c2
-rw-r--r--drivers/regulator/tps65086-regulator.c51
-rw-r--r--drivers/regulator/twl-regulator.c673
-rw-r--r--drivers/regulator/twl6030-regulator.c793
-rw-r--r--drivers/remoteproc/Kconfig52
-rw-r--r--drivers/remoteproc/Makefile9
-rw-r--r--drivers/remoteproc/qcom_adsp_pil.c428
-rw-r--r--drivers/remoteproc/qcom_mdt_loader.c1
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c1
-rw-r--r--drivers/remoteproc/qcom_wcnss.c53
-rw-r--r--drivers/remoteproc/qcom_wcnss.h2
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c9
-rw-r--r--drivers/remoteproc/remoteproc_core.c247
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c71
-rw-r--r--drivers/remoteproc/remoteproc_internal.h6
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c151
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c17
-rw-r--r--drivers/remoteproc/st_remoteproc.c4
-rw-r--r--drivers/remoteproc/st_slim_rproc.c364
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c342
-rw-r--r--drivers/rpmsg/qcom_smd.c18
-rw-r--r--drivers/rpmsg/rpmsg_core.c74
-rw-r--r--drivers/rtc/rtc-asm9260.c1
-rw-r--r--drivers/rtc/rtc-cmos.c22
-rw-r--r--drivers/rtc/rtc-omap.c38
-rw-r--r--drivers/s390/block/dasd.c301
-rw-r--r--drivers/s390/block/dasd_3990_erp.c52
-rw-r--r--drivers/s390/block/dasd_devmap.c326
-rw-r--r--drivers/s390/block/dasd_eckd.c323
-rw-r--r--drivers/s390/block/dasd_eckd.h5
-rw-r--r--drivers/s390/block/dasd_eer.c29
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_int.h449
-rw-r--r--drivers/s390/char/con3215.c12
-rw-r--r--drivers/s390/char/sclp.h23
-rw-r--r--drivers/s390/char/sclp_cmd.c25
-rw-r--r--drivers/s390/char/sclp_ctl.c4
-rw-r--r--drivers/s390/char/sclp_early.c31
-rw-r--r--drivers/s390/char/sclp_quiesce.c4
-rw-r--r--drivers/s390/char/sclp_tty.c3
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/zcore.c22
-rw-r--r--drivers/s390/cio/cmf.c10
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/cio/device.c6
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c6
-rw-r--r--drivers/s390/cio/device_ops.c5
-rw-r--r--drivers/s390/crypto/ap_bus.c12
-rw-r--r--drivers/s390/net/ctcm_main.c5
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/netiucv.c41
-rw-r--r--drivers/s390/net/qeth_core_main.c4
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c36
-rw-r--r--drivers/s390/virtio/virtio_ccw.c25
-rw-r--r--drivers/scsi/Kconfig35
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c137
-rw-r--r--drivers/scsi/NCR5380.h87
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/comminit.c10
-rw-r--r--drivers/scsi/aacraid/commsup.c25
-rw-r--r--drivers/scsi/aacraid/linit.c20
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c5
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h5
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c91
-rw-r--r--drivers/scsi/arm/cumana_1.c98
-rw-r--r--drivers/scsi/arm/oak.c34
-rw-r--r--drivers/scsi/atari_scsi.c77
-rw-r--r--drivers/scsi/be2iscsi/be_main.c8
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h30
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c62
-rw-r--r--drivers/scsi/bfa/bfad_im.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c3
-rw-r--r--drivers/scsi/cxlflash/common.h39
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c6
-rw-r--r--drivers/scsi/cxlflash/main.c410
-rw-r--r--drivers/scsi/cxlflash/sislite.h2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c29
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/dmx3191d.c33
-rw-r--r--drivers/scsi/dpt_i2o.c7
-rw-r--r--drivers/scsi/fcoe/fcoe.c25
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c157
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c83
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c30
-rw-r--r--drivers/scsi/fnic/fnic_trace.c4
-rw-r--r--drivers/scsi/fnic/fnic_trace.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c10
-rw-r--r--drivers/scsi/g_NCR5380.c296
-rw-r--r--drivers/scsi/g_NCR5380.h32
-rw-r--r--drivers/scsi/g_NCR5380_mmio.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c79
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c556
-rw-r--r--drivers/scsi/hpsa.c268
-rw-r--r--drivers/scsi/hpsa.h8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c40
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c900
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h5
-rw-r--r--drivers/scsi/ipr.c174
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/ips.c13
-rw-r--r--drivers/scsi/isci/host.h1
-rw-r--r--drivers/scsi/isci/init.c23
-rw-r--r--drivers/scsi/isci/probe_roms.c1
-rw-r--r--drivers/scsi/isci/remote_node_context.c7
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c61
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c256
-rw-r--r--drivers/scsi/libfc/fc_fcp.c235
-rw-r--r--drivers/scsi/libfc/fc_libfc.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c128
-rw-r--r--drivers/scsi/libfc/fc_rport.c561
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c422
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c83
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c139
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c23
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c186
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c69
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c153
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/pmcraid.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c449
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c21
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h18
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c27
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c97
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/qlogicpti.h4
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c143
-rw-r--r--drivers/scsi/scsi_transport_fc.c455
-rw-r--r--drivers/scsi/scsi_transport_srp.c52
-rw-r--r--drivers/scsi/sd.c185
-rw-r--r--drivers/scsi/sd.h70
-rw-r--r--drivers/scsi/sd_zbc.c648
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c102
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3_scsi.c80
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c39
-rw-r--r--drivers/scsi/ufs/ufs.h5
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c488
-rw-r--r--drivers/scsi/ufs/ufshcd.h46
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/ufs/unipro.h4
-rw-r--r--drivers/scsi/vmw_pvscsi.c5
-rw-r--r--drivers/scsi/vmw_pvscsi.h2
-rw-r--r--drivers/scsi/xen-scsifront.c193
-rw-r--r--drivers/sh/intc/virq.c2
-rw-r--r--drivers/soc/Kconfig3
-rw-r--r--drivers/soc/fsl/Kconfig18
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/guts.c239
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c46
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c39
-rw-r--r--drivers/soc/renesas/Makefile5
-rw-r--r--drivers/soc/renesas/rcar-rst.c92
-rw-r--r--drivers/spi/Kconfig19
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-armada-3700.c923
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-atmel.c324
-rw-r--r--drivers/spi/spi-axi-spi-engine.c1
-rw-r--r--drivers/spi/spi-dw.c1
-rw-r--r--drivers/spi/spi-fsl-dspi.c313
-rw-r--r--drivers/spi/spi-fsl-espi.c728
-rw-r--r--drivers/spi/spi-fsl-lib.h4
-rw-r--r--drivers/spi/spi-fsl-lpspi.c525
-rw-r--r--drivers/spi/spi-imx.c35
-rw-r--r--drivers/spi/spi-jcore.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c11
-rw-r--r--drivers/spi/spi-orion.c83
-rw-r--r--drivers/spi/spi-pxa2xx.h1
-rw-r--r--drivers/spi/spi-rspi.c52
-rw-r--r--drivers/spi/spi-s3c64xx.c21
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi-sun4i.c75
-rw-r--r--drivers/spi/spi-sun6i.c18
-rw-r--r--drivers/spi/spi-ti-qspi.c1
-rw-r--r--drivers/spi/spi-topcliff-pch.c13
-rw-r--r--drivers/spi/spi-xlp.c1
-rw-r--r--drivers/spi/spi.c31
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/ssb/pci.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/TODO8
-rw-r--r--drivers/staging/android/ashmem.c40
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c2
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/android/uapi/ion_test.h1
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c2
-rw-r--r--drivers/staging/comedi/comedi.h55
-rw-r--r--drivers/staging/comedi/comedidev.h12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c4
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c172
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h14
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c19
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c6
-rw-r--r--drivers/staging/comedi/drivers/s626.c182
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c12
-rw-r--r--drivers/staging/dgnc/Makefile3
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c44
-rw-r--r--drivers/staging/dgnc/dgnc_cls.h2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c558
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h189
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c6
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c111
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h58
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c703
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.h40
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c362
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h6
-rw-r--r--drivers/staging/dgnc/digi.h107
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c69
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c68
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c19
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c2
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c2
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c33
-rw-r--r--drivers/staging/fbtft/fbtft.h4
-rw-r--r--drivers/staging/fbtft/fbtft_device.c12
-rw-r--r--drivers/staging/fbtft/flexfb.c373
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig24
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp-cmd.h (renamed from drivers/staging/fsl-mc/include/dpbp-cmd.h)61
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c74
-rw-r--r--drivers/staging/fsl-mc/bus/dpcon-cmd.h (renamed from drivers/staging/fsl-mc/include/dpcon-cmd.h)4
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h49
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c70
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h141
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng-cmd.h14
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng.c37
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h90
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c23
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c69
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c78
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-bus.c66
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c2
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-private.h3
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c4
-rw-r--r--drivers/staging/fsl-mc/bus/mc-io.c4
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c12
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h169
-rw-r--r--drivers/staging/fsl-mc/include/dpmng.h18
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h402
-rw-r--r--drivers/staging/fsl-mc/include/mc-bus.h6
-rw-r--r--drivers/staging/fsl-mc/include/mc-cmd.h44
-rw-r--r--drivers/staging/fsl-mc/include/mc-sys.h3
-rw-r--r--drivers/staging/fsl-mc/include/mc.h4
-rw-r--r--drivers/staging/fwserial/fwserial.c6
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h14
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h1
-rw-r--r--drivers/staging/gdm724x/netlink_k.h3
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c8
-rw-r--r--drivers/staging/greybus/arche-platform.c4
-rw-r--r--drivers/staging/greybus/audio_codec.c5
-rw-r--r--drivers/staging/greybus/audio_codec.h1
-rw-r--r--drivers/staging/greybus/audio_manager.h3
-rw-r--r--drivers/staging/greybus/audio_manager_module.c35
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c16
-rw-r--r--drivers/staging/greybus/audio_module.c7
-rw-r--r--drivers/staging/greybus/audio_topology.c8
-rw-r--r--drivers/staging/greybus/camera.c7
-rw-r--r--drivers/staging/greybus/es2.c5
-rw-r--r--drivers/staging/greybus/log.c6
-rw-r--r--drivers/staging/greybus/sdio.c3
-rw-r--r--drivers/staging/greybus/timesync.c6
-rw-r--r--drivers/staging/greybus/uart.c36
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c2
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c1
-rw-r--r--drivers/staging/i4l/act2000/capi.c7
-rw-r--r--drivers/staging/i4l/act2000/module.c24
-rw-r--r--drivers/staging/i4l/icn/icn.c3
-rw-r--r--drivers/staging/i4l/icn/icn.h5
-rw-r--r--drivers/staging/i4l/pcbit/callbacks.c2
-rw-r--r--drivers/staging/i4l/pcbit/capi.c5
-rw-r--r--drivers/staging/i4l/pcbit/drv.c5
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c2
-rw-r--r--drivers/staging/i4l/pcbit/layer2.c2
-rw-r--r--drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl25836
-rw-r--r--drivers/staging/iio/TODO70
-rw-r--r--drivers/staging/iio/accel/Kconfig10
-rw-r--r--drivers/staging/iio/accel/Makefile3
-rw-r--r--drivers/staging/iio/accel/sca3000.h279
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c1210
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c350
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c127
-rw-r--r--drivers/staging/iio/adc/ad7280a.c2
-rw-r--r--drivers/staging/iio/adc/ad7606.c (renamed from drivers/staging/iio/adc/ad7606_core.c)436
-rw-r--r--drivers/staging/iio/adc/ad7606.h58
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c23
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c102
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c19
-rw-r--r--drivers/staging/iio/adc/ad7780.c22
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c4
-rw-r--r--drivers/staging/iio/cdc/ad7150.c2
-rw-r--r--drivers/staging/iio/cdc/ad7152.c140
-rw-r--r--drivers/staging/iio/cdc/ad7746.c151
-rw-r--r--drivers/staging/iio/frequency/ad9832.c66
-rw-r--r--drivers/staging/iio/frequency/ad9832.h6
-rw-r--r--drivers/staging/iio/frequency/ad9834.c19
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c38
-rw-r--r--drivers/staging/iio/light/Kconfig19
-rw-r--r--drivers/staging/iio/light/Makefile2
-rw-r--r--drivers/staging/iio/light/tsl2583.c963
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c86
-rw-r--r--drivers/staging/iio/ring_hw.h27
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c37
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.h22
-rw-r--r--drivers/staging/ks7010/ks_hostif.c124
-rw-r--r--drivers/staging/ks7010/ks_wlan.h143
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c436
-rw-r--r--drivers/staging/ks7010/michael_mic.c29
-rw-r--r--drivers/staging/ks7010/michael_mic.h20
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h20
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h53
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h185
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h13
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h24
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h8
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h8
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h8
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c26
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c31
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h134
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c44
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c26
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c36
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c5
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c143
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c20
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c22
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c58
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c187
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c32
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c62
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c10
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c26
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c108
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c50
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c18
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c17
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c73
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c36
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h23
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c29
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h25
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c25
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c22
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h8
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h30
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c7
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c18
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c8
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h379
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h65
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h75
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h438
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h44
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h50
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h13
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h44
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h898
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h717
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h70
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h7
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h102
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h291
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h264
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h15
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h199
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c22
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h88
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c45
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c316
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c26
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c36
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c132
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c42
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile10
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c195
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c968
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c139
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c47
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c395
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h121
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c342
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c69
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c68
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c303
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c65
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h41
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c191
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c122
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c360
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c88
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c11
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c407
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h29
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c52
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c208
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h100
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c116
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c50
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c720
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c698
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c293
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c46
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c292
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c61
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c22
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c98
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h12
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c80
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c60
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c107
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c317
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c167
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h23
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c290
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c452
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c68
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c228
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c80
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c139
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c27
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c84
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c70
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c7
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c65
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c140
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c11
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c65
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h50
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h43
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c330
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c143
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c171
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c186
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c52
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c669
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c338
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c95
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c66
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c98
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c15
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c106
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h34
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c48
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c410
-rw-r--r--drivers/staging/lustre/sysfs-fs-lustre2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c4
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c4
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c9
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c2
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.c1
-rw-r--r--drivers/staging/media/st-cec/stih-cec.c4
-rw-r--r--drivers/staging/most/aim-network/networking.c53
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c5
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c230
-rw-r--r--drivers/staging/most/mostcore/core.c55
-rw-r--r--drivers/staging/netlogic/xlr_net.c20
-rw-r--r--drivers/staging/nvec/nvec_ps2.c8
-rw-r--r--drivers/staging/octeon/ethernet.c22
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c78
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c32
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c46
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c69
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c22
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c70
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c51
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c20
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c25
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c21
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h11
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h2
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h6
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h3
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h20
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h12
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h13
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h2
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c33
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c27
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c58
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c8
-rw-r--r--drivers/staging/rtl8192e/dot11d.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c3
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c7
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c1
-rw-r--r--drivers/staging/rtl8712/osdep_service.h9
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h12
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c14
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h18
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c13
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c80
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c50
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h6
-rw-r--r--drivers/staging/rts5208/ms.c393
-rw-r--r--drivers/staging/rts5208/ms.h4
-rw-r--r--drivers/staging/rts5208/rtsx.c55
-rw-r--r--drivers/staging/rts5208/rtsx.h2
-rw-r--r--drivers/staging/rts5208/rtsx_card.c94
-rw-r--r--drivers/staging/rts5208/rtsx_card.h16
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c17
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h137
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c319
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h4
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h4
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h30
-rw-r--r--drivers/staging/rts5208/sd.c813
-rw-r--r--drivers/staging/rts5208/sd.h5
-rw-r--r--drivers/staging/rts5208/spi.c144
-rw-r--r--drivers/staging/rts5208/xd.c461
-rw-r--r--drivers/staging/rts5208/xd.h2
-rw-r--r--drivers/staging/skein/skein_api.c26
-rw-r--r--drivers/staging/skein/threefish_block.c16
-rw-r--r--drivers/staging/slicoss/Kconfig14
-rw-r--r--drivers/staging/slicoss/Makefile1
-rw-r--r--drivers/staging/slicoss/README7
-rw-r--r--drivers/staging/slicoss/TODO36
-rw-r--r--drivers/staging/slicoss/slic.h573
-rw-r--r--drivers/staging/slicoss/slichw.h652
-rw-r--r--drivers/staging/slicoss/slicoss.c3132
-rw-r--r--drivers/staging/sm750fb/Makefile2
-rw-r--r--drivers/staging/sm750fb/ddk750.h23
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c100
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h89
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c75
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h30
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_help.c17
-rw-r--r--drivers/staging/sm750fb/ddk750_help.h21
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c15
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c37
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c74
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h22
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h8
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.c31
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.h24
-rw-r--r--drivers/staging/sm750fb/sm750.c52
-rw-r--r--drivers/staging/sm750fb/sm750.h6
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c52
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h10
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c14
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h14
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c40
-rw-r--r--drivers/staging/speakup/TODO2
-rw-r--r--drivers/staging/speakup/main.c42
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/staging/speakup/serialio.c6
-rw-r--r--drivers/staging/speakup/speakup_soft.c46
-rw-r--r--drivers/staging/speakup/speakup_spkout.c31
-rw-r--r--drivers/staging/speakup/speakup_txprt.c29
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h148
-rw-r--r--drivers/staging/speakup/spk_types.h16
-rw-r--r--drivers/staging/speakup/synth.c22
-rw-r--r--drivers/staging/speakup/thread.c5
-rw-r--r--drivers/staging/speakup/varhandlers.c6
-rw-r--r--drivers/staging/unisys/include/iochannel.h341
-rw-r--r--drivers/staging/unisys/include/visorbus.h2
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h225
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c231
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h4
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c44
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c653
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h185
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c6
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c6
-rw-r--r--drivers/staging/vc04_services/Kconfig7
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/interface/vchi/TODO50
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h25
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h11
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c324
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c202
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c659
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h9
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c17
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h14
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c138
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h13
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c2
-rw-r--r--drivers/staging/vme/devices/vme_user.c8
-rw-r--r--drivers/staging/vt6655/baseband.c58
-rw-r--r--drivers/staging/vt6655/baseband.h11
-rw-r--r--drivers/staging/vt6655/card.c45
-rw-r--r--drivers/staging/vt6655/card.h6
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/channel.h4
-rw-r--r--drivers/staging/vt6655/desc.h4
-rw-r--r--drivers/staging/vt6655/device.h16
-rw-r--r--drivers/staging/vt6655/device_cfg.h4
-rw-r--r--drivers/staging/vt6655/device_main.c9
-rw-r--r--drivers/staging/vt6655/dpc.c4
-rw-r--r--drivers/staging/vt6655/dpc.h4
-rw-r--r--drivers/staging/vt6655/key.c5
-rw-r--r--drivers/staging/vt6655/key.h5
-rw-r--r--drivers/staging/vt6655/mac.c8
-rw-r--r--drivers/staging/vt6655/mac.h327
-rw-r--r--drivers/staging/vt6655/power.c6
-rw-r--r--drivers/staging/vt6655/power.h5
-rw-r--r--drivers/staging/vt6655/rf.c718
-rw-r--r--drivers/staging/vt6655/rf.h5
-rw-r--r--drivers/staging/vt6655/rxtx.c8
-rw-r--r--drivers/staging/vt6655/rxtx.h4
-rw-r--r--drivers/staging/vt6655/srom.c36
-rw-r--r--drivers/staging/vt6655/srom.h11
-rw-r--r--drivers/staging/vt6655/tmacro.h4
-rw-r--r--drivers/staging/vt6655/upc.h4
-rw-r--r--drivers/staging/vt6656/baseband.h20
-rw-r--r--drivers/staging/vt6656/card.c15
-rw-r--r--drivers/staging/vt6656/mac.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c8
-rw-r--r--drivers/staging/vt6656/rf.c10
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c4
-rw-r--r--drivers/staging/wilc1000/host_interface.c12
-rw-r--r--drivers/staging/wilc1000/host_interface.h1
-rw-r--r--drivers/staging/wilc1000/linux_mon.c4
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c51
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c6
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c3
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c6
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c24
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h6
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h128
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2479
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c322
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h100
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h118
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h120
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h88
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h194
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h90
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c655
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h102
-rw-r--r--drivers/staging/wlan-ng/p80211req.c189
-rw-r--r--drivers/staging/wlan-ng/p80211req.h90
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c100
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c559
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c544
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h125
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c102
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c210
-rw-r--r--drivers/staging/xgifb/XGI_main.h54
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c196
-rw-r--r--drivers/staging/xgifb/vb_init.c56
-rw-r--r--drivers/staging/xgifb/vb_setmode.c667
-rw-r--r--drivers/staging/xgifb/vb_table.h9
-rw-r--r--drivers/staging/xgifb/vb_util.h4
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c1
-rw-r--r--drivers/target/target_core_fabric_configfs.c7
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_user.c4
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_io.c4
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/db8500_thermal.c1
-rw-r--r--drivers/thermal/devfreq_cooling.c5
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c2
-rw-r--r--drivers/thermal/intel_pch_thermal.c64
-rw-r--r--drivers/thermal/intel_powerclamp.c369
-rw-r--r--drivers/thermal/max77620_thermal.c1
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c6
-rw-r--r--drivers/thermal/rockchip_thermal.c7
-rw-r--r--drivers/thermal/tango_thermal.c1
-rw-r--r--drivers/thermal/thermal_core.c1450
-rw-r--r--drivers/thermal/thermal_core.h26
-rw-r--r--drivers/thermal/thermal_helpers.c226
-rw-r--r--drivers/thermal/thermal_hwmon.c4
-rw-r--r--drivers/thermal/thermal_sysfs.c771
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c5
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c587
-rw-r--r--drivers/thunderbolt/Kconfig2
-rw-r--r--drivers/thunderbolt/eeprom.c43
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/switch.c2
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/n_gsm.c12
-rw-r--r--drivers/tty/nozomi.c51
-rw-r--r--drivers/tty/rocket.c4
-rw-r--r--drivers/tty/serial/8250/8250.h6
-rw-r--r--drivers/tty/serial/8250/8250_core.c7
-rw-r--r--drivers/tty/serial/8250/8250_dma.c9
-rw-r--r--drivers/tty/serial/8250/8250_dw.c22
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c231
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c9
-rw-r--r--drivers/tty/serial/8250/8250_mid.c4
-rw-r--r--drivers/tty/serial/8250/8250_of.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c57
-rw-r--r--drivers/tty/serial/8250/8250_port.c18
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c190
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c46
-rw-r--r--drivers/tty/serial/8250/Kconfig10
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig19
-rw-r--r--drivers/tty/serial/Makefile7
-rw-r--r--drivers/tty/serial/amba-pl011.c56
-rw-r--r--drivers/tty/serial/crisv10.c8
-rw-r--r--drivers/tty/serial/fsl_lpuart.c64
-rw-r--r--drivers/tty/serial/ifx6x60.c1
-rw-r--r--drivers/tty/serial/ioc4_serial.c7
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/tty/serial/serial_core.c17
-rw-r--r--drivers/tty/serial/sh-sci.c20
-rw-r--r--drivers/tty/serial/sunhv.c3
-rw-r--r--drivers/tty/serial/sunsu.c1
-rw-r--r--drivers/tty/synclink.c1
-rw-r--r--drivers/tty/synclink_gt.c1
-rw-r--r--drivers/tty/synclinkmp.c1
-rw-r--r--drivers/tty/vt/consolemap.c115
-rw-r--r--drivers/tty/vt/keyboard.c4
-rw-r--r--drivers/tty/vt/vt.c89
-rw-r--r--drivers/uio/Kconfig9
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_hv_generic.c218
-rw-r--r--drivers/uio/uio_pruss.c10
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h1
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/udc.c14
-rw-r--r--drivers/usb/chipidea/udc.h12
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c86
-rw-r--r--drivers/usb/class/cdc-acm.c220
-rw-r--r--drivers/usb/class/cdc-acm.h5
-rw-r--r--drivers/usb/class/usbtmc.c5
-rw-r--r--drivers/usb/core/buffer.c3
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/core/devices.c12
-rw-r--r--drivers/usb/core/driver.c3
-rw-r--r--drivers/usb/core/endpoint.c7
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hub.c114
-rw-r--r--drivers/usb/core/ledtrig-usbport.c7
-rw-r--r--drivers/usb/core/message.c3
-rw-r--r--drivers/usb/core/notify.c2
-rw-r--r--drivers/usb/core/sysfs.c17
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/core/usb.c3
-rw-r--r--drivers/usb/core/usb.h5
-rw-r--r--drivers/usb/dwc2/Makefile1
-rw-r--r--drivers/usb/dwc2/core.c930
-rw-r--r--drivers/usb/dwc2/core.h324
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/dwc2/debugfs.c2
-rw-r--r--drivers/usb/dwc2/gadget.c1126
-rw-r--r--drivers/usb/dwc2/hcd.c264
-rw-r--r--drivers/usb/dwc2/hcd.h7
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c56
-rw-r--r--drivers/usb/dwc2/hcd_intr.c55
-rw-r--r--drivers/usb/dwc2/hcd_queue.c87
-rw-r--r--drivers/usb/dwc2/hw.h48
-rw-r--r--drivers/usb/dwc2/params.c1435
-rw-r--r--drivers/usb/dwc2/pci.c18
-rw-r--r--drivers/usb/dwc2/platform.c207
-rw-r--r--drivers/usb/dwc3/Kconfig6
-rw-r--r--drivers/usb/dwc3/Makefile6
-rw-r--r--drivers/usb/dwc3/core.c363
-rw-r--r--drivers/usb/dwc3/core.h53
-rw-r--r--drivers/usb/dwc3/debug.c32
-rw-r--r--drivers/usb/dwc3/debug.h41
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c134
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/dwc3/ep0.c347
-rw-r--r--drivers/usb/dwc3/gadget.c578
-rw-r--r--drivers/usb/dwc3/gadget.h5
-rw-r--r--drivers/usb/dwc3/host.c88
-rw-r--r--drivers/usb/dwc3/io.h6
-rw-r--r--drivers/usb/dwc3/trace.h123
-rw-r--r--drivers/usb/gadget/composite.c23
-rw-r--r--drivers/usb/gadget/configfs.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c10
-rw-r--r--drivers/usb/gadget/function/f_hid.c73
-rw-r--r--drivers/usb/gadget/function/f_ncm.c11
-rw-r--r--drivers/usb/gadget/function/f_phonet.c11
-rw-r--r--drivers/usb/gadget/function/f_printer.c6
-rw-r--r--drivers/usb/gadget/function/f_uac2.c14
-rw-r--r--drivers/usb/gadget/function/rndis.c12
-rw-r--r--drivers/usb/gadget/function/rndis.h51
-rw-r--r--drivers/usb/gadget/function/u_ether.c27
-rw-r--r--drivers/usb/gadget/function/u_serial.c7
-rw-r--r--drivers/usb/gadget/function/uvc.h18
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c25
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c3
-rw-r--r--drivers/usb/gadget/function/uvc_video.c2
-rw-r--r--drivers/usb/gadget/udc/at91_udc.h2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c8
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h2
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c2
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c2
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c34
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/net2272.c4
-rw-r--r--drivers/usb/gadget/udc/net2280.c6
-rw-r--r--drivers/usb/gadget/udc/omap_udc.h2
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h2
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/host/Kconfig5
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-fsl.c3
-rw-r--r--drivers/usb/host/ehci-hub.c14
-rw-r--r--drivers/usb/host/ehci-pci.c3
-rw-r--r--drivers/usb/host/ehci-q.c30
-rw-r--r--drivers/usb/host/ehci-sched.c3
-rw-r--r--drivers/usb/host/ehci-w90x900.c30
-rw-r--r--drivers/usb/host/ehci.h8
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c27
-rw-r--r--drivers/usb/host/ohci-at91.c123
-rw-r--r--drivers/usb/host/ohci-da8xx.c522
-rw-r--r--drivers/usb/host/ohci-hcd.c18
-rw-r--r--drivers/usb/host/ohci-mem.c6
-rw-r--r--drivers/usb/host/ohci-nxp.c7
-rw-r--r--drivers/usb/host/ohci-omap.c39
-rw-r--r--drivers/usb/host/ohci-pxa27x.c36
-rw-r--r--drivers/usb/host/ohci-s3c2410.c47
-rw-r--r--drivers/usb/host/pci-quirks.c8
-rw-r--r--drivers/usb/host/xhci-mem.c16
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c4
-rw-r--r--drivers/usb/host/xhci-mtk.c38
-rw-r--r--drivers/usb/host/xhci-mtk.h1
-rw-r--r--drivers/usb/host/xhci-plat.c9
-rw-r--r--drivers/usb/host/xhci-rcar.c4
-rw-r--r--drivers/usb/host/xhci-rcar.h1
-rw-r--r--drivers/usb/host/xhci-ring.c686
-rw-r--r--drivers/usb/host/xhci.c44
-rw-r--r--drivers/usb/host/xhci.h13
-rw-r--r--drivers/usb/isp1760/isp1760-if.c2
-rw-r--r--drivers/usb/misc/chaoskey.c14
-rw-r--r--drivers/usb/misc/rio500.c2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c55
-rw-r--r--drivers/usb/misc/usbtest.c6
-rw-r--r--drivers/usb/mtu3/Kconfig54
-rw-r--r--drivers/usb/mtu3/Makefile18
-rw-r--r--drivers/usb/mtu3/mtu3.h417
-rw-r--r--drivers/usb/mtu3/mtu3_core.c863
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c379
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h108
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c730
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c881
-rw-r--r--drivers/usb/mtu3/mtu3_host.c294
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h473
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c484
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c573
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h43
-rw-r--r--drivers/usb/musb/da8xx.c73
-rw-r--r--drivers/usb/musb/musb_core.c181
-rw-r--r--drivers/usb/musb/musb_core.h19
-rw-r--r--drivers/usb/musb/musb_dsps.c58
-rw-r--r--drivers/usb/musb/musb_gadget.c47
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musb_virthub.c1
-rw-r--r--drivers/usb/musb/omap2430.c16
-rw-r--r--drivers/usb/musb/sunxi.c25
-rw-r--r--drivers/usb/musb/tusb6010.c6
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/phy/phy-am335x-control.c2
-rw-r--r--drivers/usb/phy/phy-generic.c9
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c23
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/Kconfig10
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/ch341.c113
-rw-r--r--drivers/usb/serial/cp210x.c412
-rw-r--r--drivers/usb/serial/f81534.c1409
-rw-r--r--drivers/usb/serial/ftdi_sio.c7
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/io_edgeport.c3
-rw-r--r--drivers/usb/serial/io_ti.c3
-rw-r--r--drivers/usb/serial/kl5kusb105.c35
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/usb/serial/opticon.c3
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/quatech2.c3
-rw-r--r--drivers/usb/serial/ssu100.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/usb_wwan.c3
-rw-r--r--drivers/usb/storage/transport.c7
-rw-r--r--drivers/usb/storage/usb.c1
-rw-r--r--drivers/usb/usbip/vhci_hcd.c3
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c1
-rw-r--r--drivers/usb/usbip/vudc_dev.c45
-rw-r--r--drivers/usb/usbip/vudc_transfer.c8
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c6
-rw-r--r--drivers/usb/wusbcore/security.c1
-rw-r--r--drivers/usb/wusbcore/wa-nep.c1
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c1
-rw-r--r--drivers/usb/wusbcore/wusbhc.c13
-rw-r--r--drivers/uwb/lc-rc.c16
-rw-r--r--drivers/uwb/pal.c2
-rw-r--r--drivers/vfio/Kconfig1
-rw-r--r--drivers/vfio/Makefile1
-rw-r--r--drivers/vfio/mdev/Kconfig17
-rw-r--r--drivers/vfio/mdev/Makefile5
-rw-r--r--drivers/vfio/mdev/mdev_core.c385
-rw-r--r--drivers/vfio/mdev/mdev_driver.c119
-rw-r--r--drivers/vfio/mdev/mdev_private.h41
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c286
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c148
-rw-r--r--drivers/vfio/pci/vfio_pci.c78
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c10
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c31
-rw-r--r--drivers/vfio/vfio.c461
-rw-r--r--drivers/vfio/vfio_iommu_type1.c885
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/vhost/vsock.c5
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/console/mdacon.c7
-rw-r--r--drivers/video/console/newport_con.c8
-rw-r--r--drivers/video/console/sticon.c7
-rw-r--r--drivers/video/console/vgacon.c133
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c4
-rw-r--r--drivers/video/fbdev/efifb.c59
-rw-r--r--drivers/video/fbdev/skeletonfb.c8
-rw-r--r--drivers/video/fbdev/xen-fbfront.c13
-rw-r--r--drivers/video/hdmi.c4
-rw-r--r--drivers/video/of_display_timing.c15
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/config.c12
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c16
-rw-r--r--drivers/virtio/virtio_ring.c16
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/watchdog/mei_wdt.c58
-rw-r--r--drivers/watchdog/octeon-wdt-main.c62
-rw-r--r--drivers/xen/balloon.c6
-rw-r--r--drivers/xen/events/events_base.c4
-rw-r--r--drivers/xen/gntalloc.c9
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/platform-pci.c6
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/swiotlb-xen.c27
-rw-r--r--drivers/xen/xen-pciback/xenbus.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c22
-rw-r--r--fs/9p/vfs_addr.c1
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Kconfig.binfmt4
-rw-r--r--fs/afs/cmservice.c6
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c3
-rw-r--r--fs/aio.c207
-rw-r--r--fs/binfmt_elf.c6
-rw-r--r--fs/block_dev.c271
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/extent_io.c24
-rw-r--r--fs/btrfs/inode.c24
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/btrfs/relocation.c9
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/btrfs/super.c24
-rw-r--r--fs/btrfs/tests/btrfs-tests.c1
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/buffer.c130
-rw-r--r--fs/ceph/dir.c24
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/cifs/cifsencrypt.c11
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c26
-rw-r--r--fs/cifs/transport.c1
-rw-r--r--fs/coredump.c3
-rw-r--r--fs/crypto/Kconfig2
-rw-r--r--fs/crypto/crypto.c123
-rw-r--r--fs/crypto/fname.c61
-rw-r--r--fs/crypto/fscrypt_private.h93
-rw-r--r--fs/crypto/keyinfo.c24
-rw-r--r--fs/crypto/policy.c36
-rw-r--r--fs/dax.c1320
-rw-r--r--fs/direct-io.c34
-rw-r--r--fs/dlm/ast.c2
-rw-r--r--fs/dlm/config.c2
-rw-r--r--fs/dlm/debug_fs.c2
-rw-r--r--fs/dlm/dlm_internal.h1
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/dlm/lowcomms.c28
-rw-r--r--fs/dlm/main.c2
-rw-r--r--fs/dlm/netlink.c18
-rw-r--r--fs/dlm/user.c1
-rw-r--r--fs/exec.c25
-rw-r--r--fs/ext2/file.c35
-rw-r--r--fs/ext2/inode.c20
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/ext4/acl.c2
-rw-r--r--fs/ext4/ext4.h32
-rw-r--r--fs/ext4/ext4_jbd2.h14
-rw-r--r--fs/ext4/extents.c40
-rw-r--r--fs/ext4/file.c184
-rw-r--r--fs/ext4/ialloc.c5
-rw-r--r--fs/ext4/inline.c18
-rw-r--r--fs/ext4/inode.c367
-rw-r--r--fs/ext4/ioctl.c82
-rw-r--r--fs/ext4/mballoc.c4
-rw-r--r--fs/ext4/mmp.c6
-rw-r--r--fs/ext4/namei.c24
-rw-r--r--fs/ext4/page-io.c7
-rw-r--r--fs/ext4/super.c171
-rw-r--r--fs/ext4/xattr.c45
-rw-r--r--fs/f2fs/acl.c2
-rw-r--r--fs/f2fs/checkpoint.c38
-rw-r--r--fs/f2fs/data.c212
-rw-r--r--fs/f2fs/debug.c29
-rw-r--r--fs/f2fs/dir.c30
-rw-r--r--fs/f2fs/extent_cache.c2
-rw-r--r--fs/f2fs/f2fs.h203
-rw-r--r--fs/f2fs/file.c86
-rw-r--r--fs/f2fs/gc.c35
-rw-r--r--fs/f2fs/inline.c16
-rw-r--r--fs/f2fs/inode.c47
-rw-r--r--fs/f2fs/namei.c6
-rw-r--r--fs/f2fs/node.c230
-rw-r--r--fs/f2fs/node.h13
-rw-r--r--fs/f2fs/recovery.c46
-rw-r--r--fs/f2fs/segment.c240
-rw-r--r--fs/f2fs/segment.h28
-rw-r--r--fs/f2fs/shrinker.c10
-rw-r--r--fs/f2fs/super.c283
-rw-r--r--fs/f2fs/xattr.c4
-rw-r--r--fs/fs-writeback.c16
-rw-r--r--fs/fuse/dir.c12
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/fuse/inode.c3
-rw-r--r--fs/gfs2/dir.c1
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/meta_io.c7
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/hfsplus/super.c4
-rw-r--r--fs/internal.h3
-rw-r--r--fs/iomap.c378
-rw-r--r--fs/isofs/compress.c1
-rw-r--r--fs/isofs/rock.c4
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jbd2/commit.c9
-rw-r--r--fs/jbd2/journal.c15
-rw-r--r--fs/jbd2/revoke.c2
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/jfs/jfs_logmgr.c4
-rw-r--r--fs/kernfs/inode.c4
-rw-r--r--fs/lockd/netns.h2
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/logfs/dev_bdev.c106
-rw-r--r--fs/mbcache.c41
-rw-r--r--fs/mpage.c9
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/netns.h2
-rw-r--r--fs/nfs/nfs4_fs.h7
-rw-r--r--fs/nfs/nfs4proc.c38
-rw-r--r--fs/nfs/nfs4session.c12
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfs_common/grace.c2
-rw-r--r--fs/nfsd/netns.h7
-rw-r--r--fs/nfsd/nfs4state.c38
-rw-r--r--fs/nfsd/nfsctl.c2
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/nsfs.c2
-rw-r--r--fs/ntfs/aops.c3
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ntfs/file.c5
-rw-r--r--fs/ntfs/logfile.c1
-rw-r--r--fs/ntfs/mft.c1
-rw-r--r--fs/ocfs2/aops.c9
-rw-r--r--fs/ocfs2/aops.h3
-rw-r--r--fs/ocfs2/buffer_head_io.c1
-rw-r--r--fs/ocfs2/cluster/heartbeat.c4
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c11
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/journal.c4
-rw-r--r--fs/ocfs2/mmap.c3
-rw-r--r--fs/ocfs2/namei.c6
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/refcounttree.c1
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/orangefs/inode.c1
-rw-r--r--fs/orangefs/orangefs-debugfs.c149
-rw-r--r--fs/orangefs/orangefs-mod.c6
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/super.c21
-rw-r--r--fs/proc/array.c7
-rw-r--r--fs/proc/base.c56
-rw-r--r--fs/proc/fd.c6
-rw-r--r--fs/proc/generic.c1
-rw-r--r--fs/proc/inode.c37
-rw-r--r--fs/proc/internal.h6
-rw-r--r--fs/proc/namespaces.c3
-rw-r--r--fs/proc/root.c1
-rw-r--r--fs/proc/task_mmu.c1
-rw-r--r--fs/pstore/Kconfig2
-rw-r--r--fs/pstore/ftrace.c11
-rw-r--r--fs/pstore/inode.c15
-rw-r--r--fs/pstore/internal.h34
-rw-r--r--fs/pstore/platform.c5
-rw-r--r--fs/pstore/ram.c327
-rw-r--r--fs/pstore/ram_core.c27
-rw-r--r--fs/quota/netlink.c10
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/reiserfs/journal.c6
-rw-r--r--fs/reiserfs/stree.c1
-rw-r--r--fs/splice.c9
-rw-r--r--fs/squashfs/block.c1
-rw-r--r--fs/udf/dir.c1
-rw-r--r--fs/udf/directory.c1
-rw-r--r--fs/udf/inode.c1
-rw-r--r--fs/ufs/balloc.c4
-rw-r--r--fs/ufs/inode.c3
-rw-r--r--fs/userfaultfd.c22
-rw-r--r--fs/xattr.c22
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c10
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c8
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h4
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c336
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h9
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_btree.c20
-rw-r--r--fs/xfs/libxfs/xfs_btree.h43
-rw-r--r--fs/xfs/libxfs/xfs_cksum.h26
-rw-r--r--fs/xfs/libxfs/xfs_defer.c17
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h5
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c26
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h1
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c18
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c4
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c16
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h4
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c77
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h7
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h4
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h2
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c1
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c1
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c1
-rw-r--r--fs/xfs/libxfs/xfs_sb.c13
-rw-r--r--fs/xfs/libxfs/xfs_types.h4
-rw-r--r--fs/xfs/xfs_aops.c309
-rw-r--r--fs/xfs/xfs_aops.h9
-rw-r--r--fs/xfs/xfs_attr.h4
-rw-r--r--fs/xfs/xfs_attr_list.c59
-rw-r--r--fs/xfs/xfs_bmap_util.c45
-rw-r--r--fs/xfs/xfs_buf.c125
-rw-r--r--fs/xfs/xfs_buf.h3
-rw-r--r--fs/xfs/xfs_dir2_readdir.c2
-rw-r--r--fs/xfs/xfs_file.c274
-rw-r--r--fs/xfs/xfs_icache.c40
-rw-r--r--fs/xfs/xfs_icreate_item.c2
-rw-r--r--fs/xfs/xfs_inode.c84
-rw-r--r--fs/xfs/xfs_inode.h18
-rw-r--r--fs/xfs/xfs_inode_item.c4
-rw-r--r--fs/xfs/xfs_ioctl.c8
-rw-r--r--fs/xfs/xfs_iomap.c104
-rw-r--r--fs/xfs/xfs_iops.c14
-rw-r--r--fs/xfs/xfs_linux.h1
-rw-r--r--fs/xfs/xfs_log.c41
-rw-r--r--fs/xfs/xfs_log_recover.c16
-rw-r--r--fs/xfs/xfs_mount.c7
-rw-r--r--fs/xfs/xfs_mount.h7
-rw-r--r--fs/xfs/xfs_pnfs.c7
-rw-r--r--fs/xfs/xfs_pnfs.h4
-rw-r--r--fs/xfs/xfs_qm.c2
-rw-r--r--fs/xfs/xfs_reflink.c191
-rw-r--r--fs/xfs/xfs_reflink.h6
-rw-r--r--fs/xfs/xfs_stats.c10
-rw-r--r--fs/xfs/xfs_stats.h200
-rw-r--r--fs/xfs/xfs_super.c27
-rw-r--r--fs/xfs/xfs_symlink.c7
-rw-r--r--fs/xfs/xfs_trace.h109
-rw-r--r--fs/xfs/xfs_xattr.c23
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acpixf.h9
-rw-r--r--include/acpi/actbl.h164
-rw-r--r--include/acpi/platform/aclinux.h3
-rw-r--r--include/acpi/processor.h3
-rw-r--r--include/acpi/video.h11
-rw-r--r--include/asm-generic/cputime_jiffies.h1
-rw-r--r--include/asm-generic/cputime_nsecs.h1
-rw-r--r--include/asm-generic/mutex-dec.h88
-rw-r--r--include/asm-generic/mutex-null.h19
-rw-r--r--include/asm-generic/mutex-xchg.h120
-rw-r--r--include/asm-generic/mutex.h9
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/asm-generic/pgtable.h22
-rw-r--r--include/asm-generic/sections.h3
-rw-r--r--include/asm-generic/tlb.h83
-rw-r--r--include/asm-generic/vmlinux.lds.h5
-rw-r--r--include/crypto/acompress.h269
-rw-r--r--include/crypto/cbc.h146
-rw-r--r--include/crypto/cryptd.h13
-rw-r--r--include/crypto/drbg.h2
-rw-r--r--include/crypto/engine.h6
-rw-r--r--include/crypto/gf128mul.h15
-rw-r--r--include/crypto/internal/acompress.h81
-rw-r--r--include/crypto/internal/scompress.h136
-rw-r--r--include/crypto/internal/simd.h17
-rw-r--r--include/crypto/internal/skcipher.h65
-rw-r--r--include/crypto/xts.h26
-rw-r--r--include/drm/bridge/mhl.h291
-rw-r--r--include/drm/drmP.h335
-rw-r--r--include/drm/drm_atomic.h55
-rw-r--r--include/drm/drm_blend.h10
-rw-r--r--include/drm/drm_connector.h72
-rw-r--r--include/drm/drm_crtc.h734
-rw-r--r--include/drm/drm_debugfs_crc.h73
-rw-r--r--include/drm/drm_dp_dual_mode_helper.h27
-rw-r--r--include/drm/drm_dp_helper.h6
-rw-r--r--include/drm/drm_drv.h435
-rw-r--r--include/drm/drm_edid.h1
-rw-r--r--include/drm/drm_encoder.h2
-rw-r--r--include/drm/drm_fb_cma_helper.h5
-rw-r--r--include/drm/drm_fb_helper.h4
-rw-r--r--include/drm/drm_fourcc.h33
-rw-r--r--include/drm/drm_framebuffer.h22
-rw-r--r--include/drm/drm_irq.h63
-rw-r--r--include/drm/drm_mm.h28
-rw-r--r--include/drm/drm_mode_config.h663
-rw-r--r--include/drm/drm_modeset_helper_vtables.h28
-rw-r--r--include/drm/drm_modeset_lock.h12
-rw-r--r--include/drm/drm_of.h13
-rw-r--r--include/drm/drm_plane.h108
-rw-r--r--include/drm/drm_print.h112
-rw-r--r--include/drm/i915_component.h6
-rw-r--r--include/drm/ttm/ttm_bo_api.h15
-rw-r--r--include/drm/ttm/ttm_bo_driver.h48
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h2
-rw-r--r--include/dt-bindings/clock/hi3516cv300-clock.h48
-rw-r--r--include/dt-bindings/clock/histb-clock.h66
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h15
-rw-r--r--include/dt-bindings/clock/oxsemi,ox810se.h30
-rw-r--r--include/dt-bindings/clock/oxsemi,ox820.h40
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8994.h137
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h69
-rw-r--r--include/dt-bindings/clock/r8a7743-cpg-mssr.h43
-rw-r--r--include/dt-bindings/clock/r8a7745-cpg-mssr.h44
-rw-r--r--include/dt-bindings/clock/rk1108-cru.h269
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h8
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h134
-rw-r--r--include/dt-bindings/gpio/meson-gxl-gpio.h131
-rw-r--r--include/dt-bindings/net/mdio.h19
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h21
-rw-r--r--include/dt-bindings/pinctrl/at91.h2
-rw-r--r--include/dt-bindings/reset/sun50i-a64-ccu.h98
-rw-r--r--include/dt-bindings/sound/cs42l42.h73
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/ahci-remap.h28
-rw-r--r--include/linux/alarmtimer.h5
-rw-r--r--include/linux/amba/pl061.h16
-rw-r--r--include/linux/amba/pl08x.h4
-rw-r--r--include/linux/ata.h6
-rw-r--r--include/linux/backing-dev-defs.h5
-rw-r--r--include/linux/bio.h49
-rw-r--r--include/linux/blk-cgroup.h11
-rw-r--r--include/linux/blk-mq.h12
-rw-r--r--include/linux/blk_types.h188
-rw-r--r--include/linux/blkdev.h245
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/bpf-cgroup.h92
-rw-r--r--include/linux/bpf.h19
-rw-r--r--include/linux/bpf_verifier.h15
-rw-r--r--include/linux/brcmphy.h27
-rw-r--r--include/linux/bsg-lib.h4
-rw-r--r--include/linux/buffer_head.h7
-rw-r--r--include/linux/bug.h17
-rw-r--r--include/linux/cacheinfo.h1
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/ccp.h6
-rw-r--r--include/linux/ceph/messenger.h2
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cgroup-defs.h4
-rw-r--r--include/linux/clk.h29
-rw-r--r--include/linux/clk/renesas.h4
-rw-r--r--include/linux/clocksource.h5
-rw-r--r--include/linux/cma.h3
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/configfs.h2
-rw-r--r--include/linux/console.h22
-rw-r--r--include/linux/cpu.h17
-rw-r--r--include/linux/cpufreq.h6
-rw-r--r--include/linux/cpuhotplug.h21
-rw-r--r--include/linux/cpuidle.h9
-rw-r--r--include/linux/crypto.h5
-rw-r--r--include/linux/dax.h67
-rw-r--r--include/linux/debugfs.h47
-rw-r--r--include/linux/devfreq_cooling.h9
-rw-r--r--include/linux/device.h112
-rw-r--r--include/linux/dm-io.h2
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dma-fence-array.h86
-rw-r--r--include/linux/dma-fence.h438
-rw-r--r--include/linux/dma-mapping.h20
-rw-r--r--include/linux/dmaengine.h8
-rw-r--r--include/linux/drbd_genl.h2
-rw-r--r--include/linux/edac.h8
-rw-r--r--include/linux/efi.h46
-rw-r--r--include/linux/elevator.h9
-rw-r--r--include/linux/f2fs_fs.h10
-rw-r--r--include/linux/fddidevice.h1
-rw-r--r--include/linux/fence-array.h83
-rw-r--r--include/linux/fence.h378
-rw-r--r--include/linux/filter.h37
-rw-r--r--include/linux/fpga/fpga-bridge.h60
-rw-r--r--include/linux/fpga/fpga-mgr.h29
-rw-r--r--include/linux/frontswap.h5
-rw-r--r--include/linux/fs.h70
-rw-r--r--include/linux/fscrypto.h134
-rw-r--r--include/linux/fsl/guts.h125
-rw-r--r--include/linux/fsl_devices.h1
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/genl_magic_func.h29
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gpio/driver.h34
-rw-r--r--include/linux/hdlc.h2
-rw-r--r--include/linux/hdmi.h2
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/hippidevice.h1
-rw-r--r--include/linux/huge_mm.h14
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/linux/hwmon.h31
-rw-r--r--include/linux/hyperv.h60
-rw-r--r--include/linux/idr.h40
-rw-r--r--include/linux/ieee80211.h26
-rw-r--r--include/linux/if_arp.h16
-rw-r--r--include/linux/if_vlan.h16
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h1
-rw-r--r--include/linux/iio/consumer.h41
-rw-r--r--include/linux/iio/dac/mcp4725.h12
-rw-r--r--include/linux/iio/iio.h48
-rw-r--r--include/linux/iio/sysfs.h24
-rw-r--r--include/linux/iio/trigger.h2
-rw-r--r--include/linux/iio/types.h5
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/interrupt.h20
-rw-r--r--include/linux/iomap.h12
-rw-r--r--include/linux/ipv6.h27
-rw-r--r--include/linux/irqchip/arm-gic-v3.h12
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kernel.h14
-rw-r--r--include/linux/kernel_stat.h4
-rw-r--r--include/linux/kexec.h6
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/kvm_host.h13
-rw-r--r--include/linux/leds.h25
-rw-r--r--include/linux/libata.h5
-rw-r--r--include/linux/libnvdimm.h2
-rw-r--r--include/linux/lightnvm.h244
-rw-r--r--include/linux/list.h37
-rw-r--r--include/linux/lockdep.h25
-rw-r--r--include/linux/mbus.h8
-rw-r--r--include/linux/mc146818rtc.h1
-rw-r--r--include/linux/mdev.h168
-rw-r--r--include/linux/mei_cl_bus.h51
-rw-r--r--include/linux/mempolicy.h8
-rw-r--r--include/linux/mfd/cros_ec.h10
-rw-r--r--include/linux/mfd/cros_ec_commands.h183
-rw-r--r--include/linux/mfd/max77620.h2
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h8
-rw-r--r--include/linux/mfd/tmio.h5
-rw-r--r--include/linux/mii.h4
-rw-r--r--include/linux/miscdevice.h7
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/device.h16
-rw-r--r--include/linux/mlx5/driver.h76
-rw-r--r--include/linux/mlx5/fs.h19
-rw-r--r--include/linux/mlx5/mlx5_ifc.h118
-rw-r--r--include/linux/mlx5/port.h9
-rw-r--r--include/linux/mlx5/vport.h10
-rw-r--r--include/linux/mm.h48
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--include/linux/mmc/card.h14
-rw-r--r--include/linux/mmc/core.h16
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h17
-rw-r--r--include/linux/mmc/mmc.h17
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/mutex-debug.h24
-rw-r--r--include/linux/mutex.h77
-rw-r--r--include/linux/netdevice.h266
-rw-r--r--include/linux/netfilter.h89
-rw-r--r--include/linux/netfilter/ipset/ip_set.h136
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h11
-rw-r--r--include/linux/netfilter/ipset/ip_set_counter.h75
-rw-r--r--include/linux/netfilter/ipset/ip_set_skbinfo.h46
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h2
-rw-r--r--include/linux/netfilter/x_tables.h86
-rw-r--r--include/linux/netfilter_ingress.h9
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--include/linux/nmi.h24
-rw-r--r--include/linux/ntb.h2
-rw-r--r--include/linux/nvme-fc-driver.h851
-rw-r--r--include/linux/nvme-fc.h268
-rw-r--r--include/linux/nvme.h43
-rw-r--r--include/linux/of.h25
-rw-r--r--include/linux/of_fdt.h1
-rw-r--r--include/linux/of_mdio.h4
-rw-r--r--include/linux/pagemap.h21
-rw-r--r--include/linux/parser.h1
-rw-r--r--include/linux/pci.h39
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/phy.h44
-rw-r--r--include/linux/phy/phy-qcom-ufs.h18
-rw-r--r--include/linux/phy/phy.h7
-rw-r--r--include/linux/phy_led_triggers.h51
-rw-r--r--include/linux/pim.h81
-rw-r--r--include/linux/platform_data/dma-dw.h5
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h3
-rw-r--r--include/linux/pm-trace.h9
-rw-r--r--include/linux/pm.h4
-rw-r--r--include/linux/pm_domain.h28
-rw-r--r--include/linux/pm_opp.h72
-rw-r--r--include/linux/pm_runtime.h21
-rw-r--r--include/linux/power/bq27xxx_battery.h3
-rw-r--r--include/linux/preempt.h21
-rw-r--r--include/linux/printk.h17
-rw-r--r--include/linux/proc_fs.h6
-rw-r--r--include/linux/pstore.h76
-rw-r--r--include/linux/pstore_ram.h14
-rw-r--r--include/linux/ptp_clock_kernel.h73
-rw-r--r--include/linux/ptrace.h4
-rw-r--r--include/linux/qed/qed_chain.h144
-rw-r--r--include/linux/qed/qed_eth_if.h61
-rw-r--r--include/linux/qed/qed_if.h50
-rw-r--r--include/linux/qed/qed_iscsi_if.h229
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/radix-tree.h208
-rw-r--r--include/linux/rculist.h8
-rw-r--r--include/linux/regmap.h11
-rw-r--r--include/linux/regulator/consumer.h27
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/remoteproc.h36
-rw-r--r--include/linux/remoteproc/st_slim_rproc.h58
-rw-r--r--include/linux/reservation.h41
-rw-r--r--include/linux/restart_block.h51
-rw-r--r--include/linux/ring_buffer.h6
-rw-r--r--include/linux/rmap.h10
-rw-r--r--include/linux/rpmsg.h125
-rw-r--r--include/linux/rpmsg/qcom_smd.h33
-rw-r--r--include/linux/sched.h121
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/seg6.h6
-rw-r--r--include/linux/seg6_genl.h6
-rw-r--r--include/linux/seg6_hmac.h6
-rw-r--r--include/linux/seg6_iptunnel.h6
-rw-r--r--include/linux/seqno-fence.h20
-rw-r--r--include/linux/serial_8250.h8
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/signal.h17
-rw-r--r--include/linux/skbuff.h24
-rw-r--r--include/linux/smc91x.h1
-rw-r--r--include/linux/soc/renesas/rcar-rst.h6
-rw-r--r--include/linux/spi/spi.h1
-rw-r--r--include/linux/stmmac.h5
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h37
-rw-r--r--include/linux/swiotlb.h14
-rw-r--r--include/linux/sync_file.h14
-rw-r--r--include/linux/sys_soc.h9
-rw-r--r--include/linux/tcp.h24
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/thread_info.h45
-rw-r--r--include/linux/time.h2
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--include/linux/udp.h3
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/uprobes.h1
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/cdc_ncm.h3
-rw-r--r--include/linux/usb/gadget.h4
-rw-r--r--include/linux/usb/hcd.h19
-rw-r--r--include/linux/userfaultfd_k.h4
-rw-r--r--include/linux/vfio.h48
-rw-r--r--include/linux/virtio_net.h4
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/vme.h1
-rw-r--r--include/linux/vt_kern.h2
-rw-r--r--include/linux/workqueue.h46
-rw-r--r--include/linux/writeback.h13
-rw-r--r--include/linux/ww_mutex.h2
-rw-r--r--include/net/act_api.h5
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/bluetooth/bluetooth.h25
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/busy_poll.h33
-rw-r--r--include/net/cfg80211.h193
-rw-r--r--include/net/devlink.h2
-rw-r--r--include/net/dst_metadata.h10
-rw-r--r--include/net/fib_rules.h9
-rw-r--r--include/net/flow.h10
-rw-r--r--include/net/flow_dissector.h22
-rw-r--r--include/net/flowcache.h2
-rw-r--r--include/net/gen_stats.h17
-rw-r--r--include/net/genetlink.h107
-rw-r--r--include/net/gro_cells.h3
-rw-r--r--include/net/ieee80211_radiotap.h4
-rw-r--r--include/net/if_inet6.h3
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/ip.h15
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/ip_fib.h9
-rw-r--r--include/net/ip_tunnels.h6
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/lwtunnel.h13
-rw-r--r--include/net/mac80211.h40
-rw-r--r--include/net/ndisc.h5
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h9
-rw-r--r--include/net/netfilter/ipv4/nf_defrag_ipv4.h3
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h9
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h10
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h16
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h20
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h3
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h2
-rw-r--r--include/net/netfilter/nf_dup_netdev.h1
-rw-r--r--include/net/netfilter/nf_log.h7
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h9
-rw-r--r--include/net/netfilter/nf_queue.h1
-rw-r--r--include/net/netfilter/nf_socket.h27
-rw-r--r--include/net/netfilter/nf_tables.h147
-rw-r--r--include/net/netfilter/nf_tables_core.h34
-rw-r--r--include/net/netfilter/nft_fib.h31
-rw-r--r--include/net/netfilter/xt_rateest.h10
-rw-r--r--include/net/netlink.h15
-rw-r--r--include/net/netns/conntrack.h44
-rw-r--r--include/net/netns/generic.h16
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/netfilter.h6
-rw-r--r--include/net/pkt_cls.h21
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/raw.h6
-rw-r--r--include/net/rawv6.h7
-rw-r--r--include/net/route.h5
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/structs.h7
-rw-r--r--include/net/secure_seq.h8
-rw-r--r--include/net/seg6.h62
-rw-r--r--include/net/seg6_hmac.h62
-rw-r--r--include/net/sock.h134
-rw-r--r--include/net/tc_act/tc_mirred.h6
-rw-r--r--include/net/tc_act/tc_skbedit.h1
-rw-r--r--include/net/tc_act/tc_tunnel_key.h37
-rw-r--r--include/net/tcp.h37
-rw-r--r--include/net/udp.h21
-rw-r--r--include/net/udplite.h1
-rw-r--r--include/net/vxlan.h14
-rw-r--r--include/scsi/libfc.h206
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_host.h8
-rw-r--r--include/scsi/scsi_proto.h17
-rw-r--r--include/scsi/scsi_transport_fc.h62
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h21
-rw-r--r--include/sound/compress_driver.h1
-rw-r--r--include/sound/core.h20
-rw-r--r--include/sound/cs35l34.h35
-rw-r--r--include/sound/dmaengine_pcm.h6
-rw-r--r--include/sound/emu10k1.h3
-rw-r--r--include/sound/hda_i915.h11
-rw-r--r--include/sound/rt5514.h20
-rwxr-xr-xinclude/sound/rt5665.h47
-rw-r--r--include/sound/simple_card_utils.h8
-rw-r--r--include/sound/soc-dai.h43
-rw-r--r--include/sound/soc-dapm.h14
-rw-r--r--include/sound/soc-topology.h2
-rw-r--r--include/sound/soc.h87
-rw-r--r--include/trace/events/alarmtimer.h96
-rw-r--r--include/trace/events/bcache.h12
-rw-r--r--include/trace/events/block.h31
-rw-r--r--include/trace/events/dma_fence.h (renamed from include/trace/events/fence.h)44
-rw-r--r--include/trace/events/f2fs.h33
-rw-r--r--include/trace/events/mdio.h42
-rw-r--r--include/trace/events/rcu.h5
-rw-r--r--include/trace/events/wbt.h153
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/drm/amdgpu_drm.h92
-rw-r--r--include/uapi/drm/drm_mode.h59
-rw-r--r--include/uapi/drm/i915_drm.h5
-rw-r--r--include/uapi/drm/msm_drm.h25
-rw-r--r--include/uapi/drm/vc4_drm.h2
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/atm_zatm.h1
-rw-r--r--include/uapi/linux/audit.h5
-rw-r--r--include/uapi/linux/blkzoned.h143
-rw-r--r--include/uapi/linux/bpf.h642
-rw-r--r--include/uapi/linux/bpqether.h2
-rw-r--r--include/uapi/linux/can.h1
-rw-r--r--include/uapi/linux/cryptouser.h5
-rw-r--r--include/uapi/linux/devlink.h8
-rw-r--r--include/uapi/linux/dm-log-userspace.h53
-rw-r--r--include/uapi/linux/ethtool.h21
-rw-r--r--include/uapi/linux/fib_rules.h6
-rw-r--r--include/uapi/linux/fs.h18
-rw-r--r--include/uapi/linux/genetlink.h3
-rw-r--r--include/uapi/linux/hw_breakpoint.h4
-rw-r--r--include/uapi/linux/if.h4
-rw-r--r--include/uapi/linux/if_ether.h3
-rw-r--r--include/uapi/linux/if_link.h6
-rw-r--r--include/uapi/linux/if_pppol2tp.h13
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--include/uapi/linux/in.h1
-rw-r--r--include/uapi/linux/in6.h1
-rw-r--r--include/uapi/linux/inet_diag.h17
-rw-r--r--include/uapi/linux/input-event-codes.h2
-rw-r--r--include/uapi/linux/ipv6.h4
-rw-r--r--include/uapi/linux/kvm.h12
-rw-r--r--include/uapi/linux/l2tp.h21
-rw-r--r--include/uapi/linux/lwtunnel.h24
-rw-r--r--include/uapi/linux/major.h2
-rw-r--r--include/uapi/linux/mmc/ioctl.h2
-rw-r--r--include/uapi/linux/nbd.h9
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--include/uapi/linux/netfilter.h2
-rw-r--r--include/uapi/linux/netfilter/Kbuild1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_tuple_common.h3
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h133
-rw-r--r--include/uapi/linux/netfilter/xt_bpf.h21
-rw-r--r--include/uapi/linux/nl80211.h76
-rw-r--r--include/uapi/linux/openvswitch.h15
-rw-r--r--include/uapi/linux/pkt_cls.h28
-rw-r--r--include/uapi/linux/raid/md_p.h7
-rw-r--r--include/uapi/linux/rtnetlink.h3
-rw-r--r--include/uapi/linux/seg6.h54
-rw-r--r--include/uapi/linux/seg6_genl.h32
-rw-r--r--include/uapi/linux/seg6_hmac.h21
-rw-r--r--include/uapi/linux/seg6_iptunnel.h44
-rw-r--r--include/uapi/linux/sockios.h3
-rw-r--r--include/uapi/linux/tc_act/Kbuild2
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h1
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h1
-rw-r--r--include/uapi/linux/tcp.h12
-rw-r--r--include/uapi/linux/uleds.h24
-rw-r--r--include/uapi/linux/usb/ch9.h24
-rw-r--r--include/uapi/linux/vfio.h10
-rw-r--r--include/uapi/linux/vtpm_proxy.h23
-rw-r--r--include/uapi/scsi/fc/fc_fs.h2
-rw-r--r--include/uapi/sound/asoc.h96
-rw-r--r--include/uapi/sound/snd_sst_tokens.h8
-rw-r--r--include/video/display_timing.h4
-rw-r--r--include/video/imx-ipu-v3.h3
-rw-r--r--include/video/of_display_timing.h15
-rw-r--r--include/xen/arm/hypercall.h87
-rw-r--r--include/xen/arm/hypervisor.h39
-rw-r--r--include/xen/arm/interface.h85
-rw-r--r--include/xen/arm/page-coherent.h98
-rw-r--r--include/xen/arm/page.h122
-rw-r--r--include/xen/swiotlb-xen.h3
-rw-r--r--include/xen/xenbus.h4
-rw-r--r--init/Kconfig31
-rw-r--r--init/do_mounts.c2
-rw-r--r--init/do_mounts_rd.c2
-rw-r--r--init/main.c22
-rw-r--r--ipc/mqueue.c4
-rw-r--r--ipc/msg.c13
-rw-r--r--ipc/sem.c512
-rw-r--r--ipc/shm.c13
-rw-r--r--kernel/Kconfig.locks2
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/audit.c537
-rw-r--r--kernel/audit_fsnotify.c5
-rw-r--r--kernel/audit_tree.c3
-rw-r--r--kernel/audit_watch.c5
-rw-r--r--kernel/auditfilter.c5
-rw-r--r--kernel/auditsc.c12
-rw-r--r--kernel/bpf/Makefile3
-rw-r--r--kernel/bpf/bpf_lru_list.c695
-rw-r--r--kernel/bpf/bpf_lru_list.h84
-rw-r--r--kernel/bpf/cgroup.c200
-rw-r--r--kernel/bpf/core.c68
-rw-r--r--kernel/bpf/hashtab.c441
-rw-r--r--kernel/bpf/helpers.c12
-rw-r--r--kernel/bpf/inode.c99
-rw-r--r--kernel/bpf/syscall.c186
-rw-r--r--kernel/bpf/verifier.c210
-rw-r--r--kernel/capability.c36
-rw-r--r--kernel/cgroup.c18
-rw-r--r--kernel/compat.c8
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/debug/debug_core.c4
-rw-r--r--kernel/debug/kdb/kdb_io.c39
-rw-r--r--kernel/debug/kdb/kdb_main.c1
-rw-r--r--kernel/debug/kdb/kdb_private.h1
-rw-r--r--kernel/events/core.c14
-rw-r--r--kernel/events/uprobes.c4
-rw-r--r--kernel/exit.c16
-rw-r--r--kernel/fork.c23
-rw-r--r--kernel/futex.c8
-rw-r--r--kernel/hung_task.c3
-rw-r--r--kernel/irq/affinity.c72
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/irq/msi.c4
-rw-r--r--kernel/kcov.c6
-rw-r--r--kernel/kexec_core.c5
-rw-r--r--kernel/kthread.c149
-rw-r--r--kernel/locking/lockdep.c141
-rw-r--r--kernel/locking/lockdep_internals.h20
-rw-r--r--kernel/locking/mcs_spinlock.h4
-rw-r--r--kernel/locking/mutex-debug.c13
-rw-r--r--kernel/locking/mutex-debug.h10
-rw-r--r--kernel/locking/mutex.c588
-rw-r--r--kernel/locking/mutex.h26
-rw-r--r--kernel/locking/osq_lock.c15
-rw-r--r--kernel/locking/qrwlock.c6
-rw-r--r--kernel/locking/rtmutex.c86
-rw-r--r--kernel/locking/rtmutex_common.h6
-rw-r--r--kernel/locking/rwsem-xadd.c28
-rw-r--r--kernel/module.c74
-rw-r--r--kernel/padata.c4
-rw-r--r--kernel/panic.c53
-rw-r--r--kernel/power/main.c88
-rw-r--r--kernel/power/power.h6
-rw-r--r--kernel/power/qos.c11
-rw-r--r--kernel/power/suspend.c69
-rw-r--r--kernel/power/suspend_test.c4
-rw-r--r--kernel/power/swap.c19
-rw-r--r--kernel/printk/nmi.c83
-rw-r--r--kernel/printk/printk.c273
-rw-r--r--kernel/ptrace.c70
-rw-r--r--kernel/rcu/rcutorture.c11
-rw-r--r--kernel/rcu/tree.c17
-rw-r--r--kernel/rcu/tree.h1
-rw-r--r--kernel/rcu/tree_exp.h12
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched/auto_group.c40
-rw-r--r--kernel/sched/core.c49
-rw-r--r--kernel/sched/cpuacct.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c119
-rw-r--r--kernel/sched/cputime.c124
-rw-r--r--kernel/sched/deadline.c4
-rw-r--r--kernel/sched/fair.c665
-rw-r--r--kernel/sched/idle.c175
-rw-r--r--kernel/sched/sched.h11
-rw-r--r--kernel/seccomp.c9
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/smp.c18
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sys.c13
-rw-r--r--kernel/sysctl.c22
-rw-r--r--kernel/sysctl_binary.c4
-rw-r--r--kernel/taskstats.c24
-rw-r--r--kernel/time/Makefile10
-rw-r--r--kernel/time/alarmtimer.c62
-rw-r--r--kernel/time/clocksource.c1
-rw-r--r--kernel/time/hrtimer.c20
-rw-r--r--kernel/time/itimer.c15
-rw-r--r--kernel/time/posix-cpu-timers.c8
-rw-r--r--kernel/time/posix-stubs.c123
-rw-r--r--kernel/time/tick-sched.c33
-rw-r--r--kernel/time/timekeeping.c90
-rw-r--r--kernel/time/timer.c48
-rw-r--r--kernel/trace/blktrace.c14
-rw-r--r--kernel/trace/bpf_trace.c2
-rw-r--r--kernel/trace/ftrace.c41
-rw-r--r--kernel/trace/ring_buffer.c137
-rw-r--r--kernel/trace/trace.c16
-rw-r--r--kernel/watchdog.c270
-rw-r--r--kernel/watchdog_hld.c227
-rw-r--r--kernel/workqueue.c103
-rw-r--r--lib/Kconfig.debug49
-rw-r--r--lib/Kconfig.ubsan3
-rw-r--r--lib/debugobjects.c10
-rw-r--r--lib/idr.c11
-rw-r--r--lib/iov_iter.c5
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/list_debug.c99
-rw-r--r--lib/locking-selftest.c66
-rw-r--r--lib/lockref.c2
-rw-r--r--lib/mpi/mpi-pow.c7
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/parser.c47
-rw-r--r--lib/percpu_counter.c25
-rw-r--r--lib/radix-tree.c1195
-rw-r--r--lib/raid6/avx2.c232
-rw-r--r--lib/rbtree.c23
-rw-r--r--lib/stackdepot.c2
-rw-r--r--lib/swiotlb.c81
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_kasan.c29
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/backing-dev.c1
-rw-r--r--mm/cma.c3
-rw-r--r--mm/compaction.c73
-rw-r--r--mm/debug.c4
-rw-r--r--mm/filemap.c92
-rw-r--r--mm/gup.c39
-rw-r--r--mm/huge_memory.c240
-rw-r--r--mm/hugetlb.c91
-rw-r--r--mm/init-mm.c2
-rw-r--r--mm/internal.h2
-rw-r--r--mm/kasan/kasan.c28
-rw-r--r--mm/kasan/kasan.h4
-rw-r--r--mm/kasan/quarantine.c94
-rw-r--r--mm/kasan/report.c5
-rw-r--r--mm/khugepaged.c64
-rw-r--r--mm/kmemleak.c3
-rw-r--r--mm/madvise.c1
-rw-r--r--mm/memcontrol.c39
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/memory.c932
-rw-r--r--mm/memory_hotplug.c20
-rw-r--r--mm/mempolicy.c30
-rw-r--r--mm/migrate.c19
-rw-r--r--mm/mlock.c7
-rw-r--r--mm/mprotect.c19
-rw-r--r--mm/mremap.c34
-rw-r--r--mm/nommu.c13
-rw-r--r--mm/page-writeback.c29
-rw-r--r--mm/page_alloc.c146
-rw-r--r--mm/page_io.c5
-rw-r--r--mm/percpu.c19
-rw-r--r--mm/process_vm_access.c12
-rw-r--r--mm/readahead.c39
-rw-r--r--mm/rmap.c69
-rw-r--r--mm/shmem.c64
-rw-r--r--mm/slab.c136
-rw-r--r--mm/slab.h20
-rw-r--r--mm/slab_common.c37
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c21
-rw-r--r--mm/swapfile.c15
-rw-r--r--mm/truncate.c29
-rw-r--r--mm/vmalloc.c196
-rw-r--r--mm/vmscan.c44
-rw-r--r--mm/vmstat.c95
-rw-r--r--mm/workingset.c114
-rw-r--r--mm/zsmalloc.c67
-rw-r--r--mm/zswap.c172
-rw-r--r--net/802/fddi.c11
-rw-r--r--net/802/hippi.c14
-rw-r--r--net/8021q/vlan.c11
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/Kconfig8
-rw-r--r--net/atm/br2684.c6
-rw-r--r--net/atm/lec.c11
-rw-r--r--net/batman-adv/Kconfig2
-rw-r--r--net/batman-adv/bat_iv_ogm.c58
-rw-r--r--net/batman-adv/bat_v.c8
-rw-r--r--net/batman-adv/bat_v_elp.c71
-rw-r--r--net/batman-adv/bat_v_ogm.c75
-rw-r--r--net/batman-adv/debugfs.c26
-rw-r--r--net/batman-adv/distributed-arp-table.c84
-rw-r--r--net/batman-adv/fragmentation.c82
-rw-r--r--net/batman-adv/fragmentation.h2
-rw-r--r--net/batman-adv/gateway_client.c9
-rw-r--r--net/batman-adv/hard-interface.c225
-rw-r--r--net/batman-adv/hard-interface.h21
-rw-r--r--net/batman-adv/hash.h30
-rw-r--r--net/batman-adv/icmp_socket.c5
-rw-r--r--net/batman-adv/log.c4
-rw-r--r--net/batman-adv/log.h14
-rw-r--r--net/batman-adv/main.c16
-rw-r--r--net/batman-adv/main.h28
-rw-r--r--net/batman-adv/multicast.c70
-rw-r--r--net/batman-adv/multicast.h6
-rw-r--r--net/batman-adv/netlink.c31
-rw-r--r--net/batman-adv/network-coding.c43
-rw-r--r--net/batman-adv/originator.c25
-rw-r--r--net/batman-adv/packet.h12
-rw-r--r--net/batman-adv/routing.c180
-rw-r--r--net/batman-adv/send.c419
-rw-r--r--net/batman-adv/send.h11
-rw-r--r--net/batman-adv/soft-interface.c27
-rw-r--r--net/batman-adv/sysfs.c53
-rw-r--r--net/batman-adv/tp_meter.c7
-rw-r--r--net/batman-adv/translation-table.c43
-rw-r--r--net/batman-adv/tvlv.c5
-rw-r--r--net/batman-adv/types.h45
-rw-r--r--net/bluetooth/6lowpan.c4
-rw-r--r--net/bluetooth/bnep/netdev.c3
-rw-r--r--net/bluetooth/hci_conn.c26
-rw-r--r--net/bluetooth/hci_request.c49
-rw-r--r--net/bluetooth/hci_request.h2
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bluetooth/mgmt.c26
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bluetooth/smp.c85
-rw-r--r--net/bluetooth/smp.h1
-rw-r--r--net/bridge/br_device.c5
-rw-r--r--net/bridge/br_fdb.c10
-rw-r--r--net/bridge/br_multicast.c211
-rw-r--r--net/bridge/br_netfilter_hooks.c26
-rw-r--r--net/bridge/br_netlink.c34
-rw-r--r--net/bridge/br_private.h11
-rw-r--r--net/bridge/br_private_stp.h1
-rw-r--r--net/bridge/br_stp.c65
-rw-r--r--net/bridge/br_stp_if.c14
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/bridge/br_sysfs_br.c41
-rw-r--r--net/bridge/netfilter/Kconfig1
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c3
-rw-r--r--net/bridge/netfilter/ebt_log.c11
-rw-r--r--net/bridge/netfilter/ebt_nflog.c6
-rw-r--r--net/bridge/netfilter/ebt_redirect.c6
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/bridge/netfilter/nf_log_bridge.c17
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c2
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c30
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_socket.c5
-rw-r--r--net/caif/cfcnfg.c9
-rw-r--r--net/can/bcm.c50
-rw-r--r--net/can/raw.c3
-rw-r--r--net/ceph/ceph_fs.c3
-rw-r--r--net/ceph/osd_client.c1
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/datagram.c72
-rw-r--r--net/core/dev.c913
-rw-r--r--net/core/devlink.c100
-rw-r--r--net/core/drop_monitor.c21
-rw-r--r--net/core/ethtool.c98
-rw-r--r--net/core/fib_rules.c78
-rw-r--r--net/core/filter.c384
-rw-r--r--net/core/flow.c66
-rw-r--r--net/core/flow_dissector.c58
-rw-r--r--net/core/gen_estimator.c294
-rw-r--r--net/core/gen_stats.c20
-rw-r--r--net/core/lwt_bpf.c396
-rw-r--r--net/core/lwtunnel.c17
-rw-r--r--net/core/neighbour.c15
-rw-r--r--net/core/net-sysfs.c65
-rw-r--r--net/core/net_namespace.c44
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/pktgen.c19
-rw-r--r--net/core/rtnetlink.c49
-rw-r--r--net/core/secure_seq.c11
-rw-r--r--net/core/skbuff.c70
-rw-r--r--net/core/sock.c94
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/core/stream.c28
-rw-r--r--net/core/sysctl_net_core.c5
-rw-r--r--net/dcb/dcbnl.c1
-rw-r--r--net/dccp/ipv4.c36
-rw-r--r--net/dccp/ipv6.c21
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/af_decnet.c16
-rw-r--r--net/dsa/dsa.c13
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/slave.c35
-rw-r--r--net/ethernet/eth.c10
-rw-r--r--net/hsr/hsr_device.c1
-rw-r--r--net/hsr/hsr_forward.c4
-rw-r--r--net/hsr/hsr_netlink.c23
-rw-r--r--net/ieee802154/netlink.c24
-rw-r--r--net/ieee802154/nl-phy.c6
-rw-r--r--net/ieee802154/nl802154.c44
-rw-r--r--net/ipv4/Kconfig9
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c25
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c23
-rw-r--r--net/ipv4/fib_semantics.c1
-rw-r--r--net/ipv4/fib_trie.c313
-rw-r--r--net/ipv4/fou.c27
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/icmp.c10
-rw-r--r--net/ipv4/igmp.c50
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/inet_diag.c73
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_output.c61
-rw-r--r--net/ipv4/ip_sockglue.c41
-rw-r--r--net/ipv4/ip_tunnel.c10
-rw-r--r--net/ipv4/ip_tunnel_core.c11
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c17
-rw-r--r--net/ipv4/netfilter.c5
-rw-r--r--net/ipv4/netfilter/Kconfig14
-rw-r--r--net/ipv4/netfilter/Makefile3
-rw-r--r--net/ipv4/netfilter/arp_tables.c46
-rw-r--r--net/ipv4/netfilter/ip_tables.c42
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c6
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c11
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c4
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c8
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c10
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c145
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c41
-rw-r--r--net/ipv4/netfilter/nf_socket_ipv4.c163
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c8
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c241
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c15
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c14
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c4
-rw-r--r--net/ipv4/ping.c9
-rw-r--r--net/ipv4/raw.c35
-rw-r--r--net/ipv4/raw_diag.c266
-rw-r--r--net/ipv4/route.c120
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c8
-rw-r--r--net/ipv4/tcp.c120
-rw-r--r--net/ipv4/tcp_bbr.c32
-rw-r--r--net/ipv4/tcp_cong.c18
-rw-r--r--net/ipv4/tcp_dctcp.c14
-rw-r--r--net/ipv4/tcp_highspeed.c11
-rw-r--r--net/ipv4/tcp_hybla.c1
-rw-r--r--net/ipv4/tcp_illinois.c10
-rw-r--r--net/ipv4/tcp_input.c55
-rw-r--r--net/ipv4/tcp_ipv4.c48
-rw-r--r--net/ipv4/tcp_lp.c1
-rw-r--r--net/ipv4/tcp_metrics.c23
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c185
-rw-r--r--net/ipv4/tcp_scalable.c15
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/tcp_vegas.c1
-rw-r--r--net/ipv4/tcp_veno.c10
-rw-r--r--net/ipv4/tcp_westwood.c1
-rw-r--r--net/ipv4/tcp_yeah.c10
-rw-r--r--net/ipv4/udp.c275
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv4/udplite.c3
-rw-r--r--net/ipv6/Kconfig35
-rw-r--r--net/ipv6/Makefile4
-rw-r--r--net/ipv6/addrconf.c169
-rw-r--r--net/ipv6/af_inet6.c18
-rw-r--r--net/ipv6/ah6.c5
-rw-r--r--net/ipv6/datagram.c10
-rw-r--r--net/ipv6/esp6.c7
-rw-r--r--net/ipv6/exthdrs.c268
-rw-r--r--net/ipv6/icmp.c13
-rw-r--r--net/ipv6/ila/ila_lwt.c92
-rw-r--r--net/ipv6/ila/ila_xlat.c43
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/inet6_hashtables.c13
-rw-r--r--net/ipv6/ip6_gre.c6
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c18
-rw-r--r--net/ipv6/ip6_tunnel.c27
-rw-r--r--net/ipv6/ip6_udp_tunnel.c3
-rw-r--r--net/ipv6/ip6_vti.c59
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ipcomp6.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c21
-rw-r--r--net/ipv6/mcast.c17
-rw-r--r--net/ipv6/ndisc.c29
-rw-r--r--net/ipv6/netfilter.c1
-rw-r--r--net/ipv6/netfilter/Kconfig14
-rw-r--r--net/ipv6/netfilter/Makefile3
-rw-r--r--net/ipv6/netfilter/ip6_tables.c43
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c23
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c8
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c146
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c44
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nf_socket_ipv6.c151
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c8
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c275
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c6
-rw-r--r--net/ipv6/output_core.c2
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/raw.c10
-rw-r--r--net/ipv6/reassembly.c10
-rw-r--r--net/ipv6/route.c105
-rw-r--r--net/ipv6/seg6.c495
-rw-r--r--net/ipv6/seg6_hmac.c484
-rw-r--r--net/ipv6/seg6_iptunnel.c431
-rw-r--r--net/ipv6/sit.c16
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c28
-rw-r--r--net/ipv6/udp.c57
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c3
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/irda/irlan/irlan_eth.c4
-rw-r--r--net/irda/irnetlink.c22
-rw-r--r--net/iucv/af_iucv.c34
-rw-r--r--net/iucv/iucv.c124
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/l2tp/l2tp_core.h10
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ip.c68
-rw-r--r--net/l2tp/l2tp_ip6.c87
-rw-r--r--net/l2tp/l2tp_netlink.c59
-rw-r--r--net/l2tp/l2tp_ppp.c60
-rw-r--r--net/llc/af_llc.c24
-rw-r--r--net/mac80211/Makefile1
-rw-r--r--net/mac80211/aes_ccm.c46
-rw-r--r--net/mac80211/aes_ccm.h8
-rw-r--r--net/mac80211/aes_cmac.c8
-rw-r--r--net/mac80211/aes_cmac.h4
-rw-r--r--net/mac80211/aes_gcm.c43
-rw-r--r--net/mac80211/aes_gcm.h6
-rw-r--r--net/mac80211/aes_gmac.c26
-rw-r--r--net/mac80211/aes_gmac.h4
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/cfg.c35
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/debugfs_netdev.c11
-rw-r--r--net/mac80211/debugfs_sta.c9
-rw-r--r--net/mac80211/fils_aead.c342
-rw-r--r--net/mac80211/fils_aead.h19
-rw-r--r--net/mac80211/ieee80211_i.h26
-rw-r--r--net/mac80211/iface.c31
-rw-r--r--net/mac80211/main.c5
-rw-r--r--net/mac80211/mlme.c81
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rx.c62
-rw-r--r--net/mac80211/sta_info.c25
-rw-r--r--net/mac80211/sta_info.h4
-rw-r--r--net/mac80211/tx.c69
-rw-r--r--net/mac80211/util.c61
-rw-r--r--net/mac80211/vht.c16
-rw-r--r--net/mac80211/wme.c23
-rw-r--r--net/mac80211/wpa.c24
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/mpls/mpls_iptunnel.c5
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-aen.c18
-rw-r--r--net/ncsi/ncsi-manage.c126
-rw-r--r--net/netfilter/Kconfig56
-rw-r--r--net/netfilter/Makefile24
-rw-r--r--net/netfilter/core.c107
-rw-r--r--net/netfilter/ipset/Kconfig9
-rw-r--r--net/netfilter/ipset/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h31
-rw-r--r--net/netfilter/ipset/ip_set_core.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h254
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c315
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c10
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c37
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c25
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c54
-rw-r--r--net/netfilter/nf_conntrack_core.c68
-rw-r--r--net/netfilter/nf_conntrack_helper.c11
-rw-r--r--net/netfilter/nf_conntrack_proto.c158
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c101
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c100
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c103
-rw-r--r--net/netfilter/nf_conntrack_sip.c5
-rw-r--r--net/netfilter/nf_conntrack_standalone.c10
-rw-r--r--net/netfilter/nf_dup_netdev.c35
-rw-r--r--net/netfilter/nf_internals.h7
-rw-r--r--net/netfilter/nf_log_common.c28
-rw-r--r--net/netfilter/nf_log_netdev.c81
-rw-r--r--net/netfilter/nf_nat_core.c61
-rw-r--r--net/netfilter/nf_nat_proto_dccp.c36
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c40
-rw-r--r--net/netfilter/nf_nat_proto_udplite.c35
-rw-r--r--net/netfilter/nf_queue.c78
-rw-r--r--net/netfilter/nf_synproxy_core.c2
-rw-r--r--net/netfilter/nf_tables_api.c752
-rw-r--r--net/netfilter/nf_tables_core.c91
-rw-r--r--net/netfilter/nf_tables_trace.c8
-rw-r--r--net/netfilter/nfnetlink_log.c5
-rw-r--r--net/netfilter/nfnetlink_queue.c6
-rw-r--r--net/netfilter/nft_bitwise.c13
-rw-r--r--net/netfilter/nft_byteorder.c13
-rw-r--r--net/netfilter/nft_cmp.c16
-rw-r--r--net/netfilter/nft_counter.c223
-rw-r--r--net/netfilter/nft_ct.c76
-rw-r--r--net/netfilter/nft_dynset.c38
-rw-r--r--net/netfilter/nft_exthdr.c3
-rw-r--r--net/netfilter/nft_fib.c159
-rw-r--r--net/netfilter/nft_fib_inet.c82
-rw-r--r--net/netfilter/nft_fwd_netdev.c4
-rw-r--r--net/netfilter/nft_hash.c14
-rw-r--r--net/netfilter/nft_immediate.c16
-rw-r--r--net/netfilter/nft_log.c5
-rw-r--r--net/netfilter/nft_lookup.c18
-rw-r--r--net/netfilter/nft_masq.c6
-rw-r--r--net/netfilter/nft_meta.c11
-rw-r--r--net/netfilter/nft_nat.c11
-rw-r--r--net/netfilter/nft_numgen.c2
-rw-r--r--net/netfilter/nft_objref.c226
-rw-r--r--net/netfilter/nft_payload.c120
-rw-r--r--net/netfilter/nft_queue.c2
-rw-r--r--net/netfilter/nft_quota.c158
-rw-r--r--net/netfilter/nft_range.c45
-rw-r--r--net/netfilter/nft_redir.c6
-rw-r--r--net/netfilter/nft_reject_inet.c18
-rw-r--r--net/netfilter/nft_rt.c153
-rw-r--r--net/netfilter/nft_set_hash.c44
-rw-r--r--net/netfilter/nft_set_rbtree.c14
-rw-r--r--net/netfilter/x_tables.c62
-rw-r--r--net/netfilter/xt_AUDIT.c10
-rw-r--r--net/netfilter/xt_CONNSECMARK.c4
-rw-r--r--net/netfilter/xt_CT.c6
-rw-r--r--net/netfilter/xt_LOG.c6
-rw-r--r--net/netfilter/xt_NETMAP.c31
-rw-r--r--net/netfilter/xt_NFLOG.c7
-rw-r--r--net/netfilter/xt_NFQUEUE.c4
-rw-r--r--net/netfilter/xt_RATEEST.c4
-rw-r--r--net/netfilter/xt_REDIRECT.c16
-rw-r--r--net/netfilter/xt_TCPMSS.c4
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/netfilter/xt_TPROXY.c31
-rw-r--r--net/netfilter/xt_addrtype.c10
-rw-r--r--net/netfilter/xt_bpf.c96
-rw-r--r--net/netfilter/xt_cluster.c2
-rw-r--r--net/netfilter/xt_connbytes.c4
-rw-r--r--net/netfilter/xt_connlabel.c6
-rw-r--r--net/netfilter/xt_connlimit.c14
-rw-r--r--net/netfilter/xt_connmark.c12
-rw-r--r--net/netfilter/xt_conntrack.c12
-rw-r--r--net/netfilter/xt_devgroup.c4
-rw-r--r--net/netfilter/xt_dscp.c2
-rw-r--r--net/netfilter/xt_hashlimit.c6
-rw-r--r--net/netfilter/xt_helper.c4
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/netfilter/xt_ipvs.c4
-rw-r--r--net/netfilter/xt_multiport.c52
-rw-r--r--net/netfilter/xt_nat.c18
-rw-r--r--net/netfilter/xt_nfacct.c2
-rw-r--r--net/netfilter/xt_osf.c10
-rw-r--r--net/netfilter/xt_owner.c2
-rw-r--r--net/netfilter/xt_pkttype.c4
-rw-r--r--net/netfilter/xt_policy.c4
-rw-r--r--net/netfilter/xt_rateest.c28
-rw-r--r--net/netfilter/xt_recent.c12
-rw-r--r--net/netfilter/xt_set.c38
-rw-r--r--net/netfilter/xt_socket.c336
-rw-r--r--net/netfilter/xt_state.c4
-rw-r--r--net/netlabel/netlabel_calipso.c21
-rw-r--r--net/netlabel/netlabel_cipso_v4.c22
-rw-r--r--net/netlabel/netlabel_mgmt.c21
-rw-r--r--net/netlabel/netlabel_unlabeled.c21
-rw-r--r--net/netlink/af_netlink.c29
-rw-r--r--net/netlink/af_netlink.h2
-rw-r--r--net/netlink/diag.c5
-rw-r--r--net/netlink/genetlink.c323
-rw-r--r--net/nfc/netlink.c34
-rw-r--r--net/openvswitch/actions.c129
-rw-r--r--net/openvswitch/conntrack.c13
-rw-r--r--net/openvswitch/datapath.c35
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/flow.c105
-rw-r--r--net/openvswitch/flow.h22
-rw-r--r--net/openvswitch/flow_netlink.c179
-rw-r--r--net/openvswitch/vport-internal_dev.c10
-rw-r--r--net/openvswitch/vport-netdev.c10
-rw-r--r--net/openvswitch/vport.c48
-rw-r--r--net/openvswitch/vport.h3
-rw-r--r--net/packet/af_packet.c96
-rw-r--r--net/phonet/pep-gprs.c12
-rw-r--r--net/phonet/pep.c9
-rw-r--r--net/phonet/pn_dev.c2
-rw-r--r--net/rds/Makefile2
-rw-r--r--net/rds/af_rds.c4
-rw-r--r--net/rds/connection.c18
-rw-r--r--net/rds/message.c1
-rw-r--r--net/rds/rds.h14
-rw-r--r--net/rds/recv.c36
-rw-r--r--net/rds/send.c9
-rw-r--r--net/rds/tcp.c24
-rw-r--r--net/rds/tcp_connect.c14
-rw-r--r--net/rds/tcp_listen.c31
-rw-r--r--net/rds/tcp_send.c3
-rw-r--r--net/rds/threads.c3
-rw-r--r--net/rxrpc/af_rxrpc.c11
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/conn_client.c4
-rw-r--r--net/rxrpc/input.c7
-rw-r--r--net/rxrpc/peer_object.c4
-rw-r--r--net/sched/act_api.c12
-rw-r--r--net/sched/act_bpf.c20
-rw-r--r--net/sched/act_connmark.c2
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ife.c2
-rw-r--r--net/sched/act_ipt.c16
-rw-r--r--net/sched/act_mirred.c90
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c26
-rw-r--r--net/sched/act_police.c23
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_skbedit.c23
-rw-r--r--net/sched/act_skbmod.c2
-rw-r--r--net/sched/act_tunnel_key.c17
-rw-r--r--net/sched/act_vlan.c2
-rw-r--r--net/sched/cls_api.c30
-rw-r--r--net/sched/cls_basic.c4
-rw-r--r--net/sched/cls_bpf.c53
-rw-r--r--net/sched/cls_cgroup.c7
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_flower.c337
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/cls_rsvp.h3
-rw-r--r--net/sched/cls_tcindex.c1
-rw-r--r--net/sched/em_ipset.c17
-rw-r--r--net/sched/em_meta.c9
-rw-r--r--net/sched/sch_api.c13
-rw-r--r--net/sched/sch_cbq.c6
-rw-r--r--net/sched/sch_drr.c6
-rw-r--r--net/sched/sch_fq.c4
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c6
-rw-r--r--net/sched/sch_qfq.c8
-rw-r--r--net/sched/sch_teql.c5
-rw-r--r--net/sctp/associola.c8
-rw-r--r--net/sctp/chunk.c32
-rw-r--r--net/sctp/input.c128
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/output.c437
-rw-r--r--net/sctp/outqueue.c4
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c39
-rw-r--r--net/socket.c53
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c13
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c82
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c21
-rw-r--r--net/sunrpc/clnt.c7
-rw-r--r--net/sunrpc/netns.h2
-rw-r--r--net/sunrpc/sunrpc_syms.c2
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/svcsock.c43
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c37
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c6
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h3
-rw-r--r--net/sunrpc/xprtsock.c5
-rw-r--r--net/switchdev/switchdev.c14
-rw-r--r--net/tipc/bcast.c14
-rw-r--r--net/tipc/bcast.h3
-rw-r--r--net/tipc/bearer.c11
-rw-r--r--net/tipc/bearer.h13
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/core.h2
-rw-r--r--net/tipc/link.c42
-rw-r--r--net/tipc/monitor.c10
-rw-r--r--net/tipc/msg.h19
-rw-r--r--net/tipc/name_distr.c1
-rw-r--r--net/tipc/netlink.c27
-rw-r--r--net/tipc/netlink_compat.c25
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/socket.c548
-rw-r--r--net/tipc/udp_media.c5
-rw-r--r--net/unix/af_unix.c24
-rw-r--r--net/vmw_vsock/virtio_transport.c56
-rw-r--r--net/vmw_vsock/virtio_transport_common.c10
-rw-r--r--net/wimax/stack.c22
-rw-r--r--net/wireless/core.c33
-rw-r--r--net/wireless/core.h6
-rw-r--r--net/wireless/lib80211_crypt_tkip.c2
-rw-r--r--net/wireless/mesh.c2
-rw-r--r--net/wireless/mlme.c18
-rw-r--r--net/wireless/nl80211.c551
-rw-r--r--net/wireless/rdev-ops.h24
-rw-r--r--net/wireless/scan.c69
-rw-r--r--net/wireless/sme.c16
-rw-r--r--net/wireless/sysfs.c5
-rw-r--r--net/wireless/trace.h37
-rw-r--r--net/wireless/util.c162
-rw-r--r--net/xfrm/xfrm_policy.c11
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--samples/bpf/Makefile32
-rw-r--r--samples/bpf/bpf_helpers.h8
-rw-r--r--samples/bpf/bpf_load.c112
-rw-r--r--samples/bpf/bpf_load.h2
-rw-r--r--samples/bpf/cgroup_helpers.c177
-rw-r--r--samples/bpf/cgroup_helpers.h16
-rw-r--r--samples/bpf/libbpf.c21
-rw-r--r--samples/bpf/libbpf.h5
-rw-r--r--samples/bpf/lwt_len_hist.sh37
-rw-r--r--samples/bpf/lwt_len_hist_kern.c82
-rw-r--r--samples/bpf/lwt_len_hist_user.c76
-rw-r--r--samples/bpf/map_perf_test_kern.c39
-rw-r--r--samples/bpf/map_perf_test_user.c32
-rw-r--r--samples/bpf/parse_ldabs.c1
-rw-r--r--samples/bpf/parse_simple.c1
-rw-r--r--samples/bpf/parse_varlen.c1
-rw-r--r--samples/bpf/sampleip_kern.c2
-rw-r--r--samples/bpf/sock_flags_kern.c44
-rw-r--r--samples/bpf/sockex2_kern.c2
-rwxr-xr-xsamples/bpf/tc_l2_redirect.sh173
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c236
-rw-r--r--samples/bpf/tc_l2_redirect_user.c73
-rw-r--r--samples/bpf/tcbpf1_kern.c1
-rw-r--r--samples/bpf/tcbpf2_kern.c1
-rw-r--r--samples/bpf/test_cgrp2_attach.c167
-rw-r--r--samples/bpf/test_cgrp2_attach2.c132
-rw-r--r--samples/bpf/test_cgrp2_sock.c83
-rwxr-xr-xsamples/bpf/test_cgrp2_sock.sh47
-rw-r--r--samples/bpf/test_cgrp2_sock2.c66
-rwxr-xr-xsamples/bpf/test_cgrp2_sock2.sh81
-rw-r--r--samples/bpf/test_cgrp2_tc_kern.c1
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c108
-rw-r--r--samples/bpf/test_lru_dist.c541
-rw-r--r--samples/bpf/test_lwt_bpf.c253
-rw-r--r--samples/bpf/test_lwt_bpf.sh399
-rw-r--r--samples/bpf/test_maps.c503
-rw-r--r--samples/bpf/trace_event_kern.c2
-rw-r--r--samples/bpf/tracex2_user.c4
-rw-r--r--samples/bpf/tracex3_user.c6
-rw-r--r--samples/bpf/xdp1_user.c97
-rw-r--r--samples/bpf/xdp_tx_iptunnel_common.h37
-rw-r--r--samples/bpf/xdp_tx_iptunnel_kern.c236
-rw-r--r--samples/bpf/xdp_tx_iptunnel_user.c256
-rw-r--r--samples/seccomp/Makefile4
-rw-r--r--samples/seccomp/bpf-helper.c38
-rw-r--r--samples/seccomp/dropper.c7
-rw-r--r--samples/vfio-mdev/Makefile13
-rw-r--r--samples/vfio-mdev/mtty.c1503
-rw-r--r--scripts/Makefile.build81
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/Makefile.kasan2
-rw-r--r--scripts/Makefile.ubsan4
-rwxr-xr-xscripts/bloat-o-meter28
-rwxr-xr-xscripts/check_00index.sh66
-rwxr-xr-xscripts/checkkconfigsymbols.py2
-rwxr-xr-xscripts/checkpatch.pl62
-rwxr-xr-xscripts/decode_stacktrace.sh3
-rwxr-xr-xscripts/faddr2line33
-rw-r--r--scripts/gcc-plugins/cyc_complexity_plugin.c4
-rw-r--r--scripts/gcc-plugins/gcc-common.h1
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c27
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c6
-rwxr-xr-xscripts/gcc-x86_64-has-stack-protector.sh2
-rwxr-xr-xscripts/get_maintainer.pl12
-rw-r--r--scripts/kconfig/Makefile2
-rw-r--r--scripts/kconfig/expr.h2
-rw-r--r--scripts/kconfig/menu.c55
-rw-r--r--scripts/kconfig/symbol.c24
-rw-r--r--scripts/kconfig/zconf.gperf1
-rw-r--r--scripts/kconfig/zconf.hash.c_shipped30
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped1581
-rw-r--r--scripts/kconfig/zconf.y16
-rwxr-xr-xscripts/kernel-doc20
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--[-rwxr-xr-x]scripts/sign-file.c2
-rwxr-xr-xscripts/tags.sh19
-rw-r--r--security/apparmor/domain.c6
-rw-r--r--security/integrity/digsig.c2
-rw-r--r--security/integrity/evm/evm_crypto.c12
-rw-r--r--security/integrity/evm/evm_main.c4
-rw-r--r--security/integrity/ima/ima_appraise.c13
-rw-r--r--security/integrity/ima/ima_fs.c2
-rw-r--r--security/integrity/ima/ima_init.c3
-rw-r--r--security/selinux/hooks.c131
-rw-r--r--security/selinux/include/classmap.h4
-rw-r--r--security/selinux/include/objsec.h5
-rw-r--r--security/selinux/selinuxfs.c6
-rw-r--r--security/smack/smack.h1
-rw-r--r--security/smack/smack_access.c7
-rw-r--r--security/smack/smack_lsm.c117
-rw-r--r--security/smack/smackfs.c3
-rw-r--r--security/tomoyo/domain.c2
-rw-r--r--security/yama/yama_lsm.c16
-rw-r--r--sound/core/info.c9
-rw-r--r--sound/core/misc.c20
-rw-r--r--sound/core/oss/pcm_oss.c2
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/drivers/opl3/opl3_lib.c2
-rw-r--r--sound/firewire/bebob/bebob.c4
-rw-r--r--sound/hda/hdac_i915.c18
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/als4000.c2
-rw-r--r--sound/pci/au88x0/au88x0_game.c2
-rw-r--r--sound/pci/azt3328.c2
-rw-r--r--sound/pci/cmipci.c2
-rw-r--r--sound/pci/cs4281.c2
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c2
-rw-r--r--sound/pci/cs46xx/dsp_spos.c3
-rw-r--r--sound/pci/echoaudio/layla24_dsp.c2
-rw-r--r--sound/pci/emu10k1/emu10k1.c11
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c228
-rw-r--r--sound/pci/ens1370.c2
-rw-r--r--sound/pci/es1938.c2
-rw-r--r--sound/pci/es1968.c2
-rw-r--r--sound/pci/hda/hda_auto_parser.c4
-rw-r--r--sound/pci/hda/patch_ca0132.c1
-rw-r--r--sound/pci/hda/patch_conexant.c17
-rw-r--r--sound/pci/hda/patch_hdmi.c7
-rw-r--r--sound/pci/hda/patch_realtek.c48
-rw-r--r--sound/pci/hda/thinkpad_helper.c3
-rw-r--r--sound/pci/riptide/riptide.c2
-rw-r--r--sound/pci/sonicvibes.c2
-rw-r--r--sound/pci/trident/trident_main.c2
-rw-r--r--sound/pci/via82xx.c2
-rw-r--r--sound/pci/ymfpci/ymfpci.h2
-rw-r--r--sound/soc/atmel/Kconfig10
-rw-r--r--sound/soc/atmel/Makefile2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c83
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.h1
-rw-r--r--sound/soc/atmel/atmel_wm8904.c2
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c472
-rw-r--r--sound/soc/bcm/Kconfig1
-rw-r--r--sound/soc/codecs/Kconfig31
-rw-r--r--sound/soc/codecs/Makefile11
-rw-r--r--sound/soc/codecs/ab8500-codec.c2
-rw-r--r--sound/soc/codecs/adau17x1.c2
-rw-r--r--sound/soc/codecs/ak4641.c22
-rw-r--r--sound/soc/codecs/ak4641.h47
-rw-r--r--sound/soc/codecs/arizona.c153
-rw-r--r--sound/soc/codecs/arizona.h109
-rw-r--r--sound/soc/codecs/cs35l34.c1251
-rw-r--r--sound/soc/codecs/cs35l34.h269
-rw-r--r--sound/soc/codecs/cs4270.c8
-rw-r--r--sound/soc/codecs/cs42l42.c1986
-rw-r--r--sound/soc/codecs/cs42l42.h776
-rw-r--r--sound/soc/codecs/cs42l56.c18
-rw-r--r--sound/soc/codecs/cs42l73.c4
-rw-r--r--sound/soc/codecs/cs42xx8.c10
-rw-r--r--sound/soc/codecs/cs47l24.c55
-rw-r--r--sound/soc/codecs/da7219-aad.c18
-rw-r--r--sound/soc/codecs/da7219.c142
-rw-r--r--sound/soc/codecs/da7219.h5
-rw-r--r--sound/soc/codecs/es8328.h37
-rw-r--r--sound/soc/codecs/hdac_hdmi.c2
-rw-r--r--sound/soc/codecs/hdmi-codec.c7
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c890
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c923
-rw-r--r--sound/soc/codecs/nau8825.c142
-rw-r--r--sound/soc/codecs/nau8825.h16
-rw-r--r--sound/soc/codecs/rl6231.c1
-rw-r--r--sound/soc/codecs/rl6347a.c2
-rw-r--r--sound/soc/codecs/rt298.c29
-rw-r--r--sound/soc/codecs/rt5514-spi.c1
-rw-r--r--sound/soc/codecs/rt5514.c17
-rw-r--r--sound/soc/codecs/rt5514.h2
-rw-r--r--sound/soc/codecs/rt5616.c3
-rw-r--r--sound/soc/codecs/rt5640.c5
-rw-r--r--sound/soc/codecs/rt5640.h6
-rw-r--r--sound/soc/codecs/rt5660.c4
-rw-r--r--sound/soc/codecs/rt5660.h3
-rw-r--r--sound/soc/codecs/rt5663.c1141
-rw-r--r--sound/soc/codecs/rt5663.h1162
-rw-r--r--sound/soc/codecs/rt5665.c4874
-rw-r--r--sound/soc/codecs/rt5665.h1990
-rw-r--r--sound/soc/codecs/rt5670.c16
-rw-r--r--sound/soc/codecs/rt5670.h1
-rw-r--r--sound/soc/codecs/rt5677-spi.c1
-rw-r--r--sound/soc/codecs/stac9766.c162
-rw-r--r--sound/soc/codecs/stac9766.h17
-rw-r--r--sound/soc/codecs/sti-sas.c181
-rw-r--r--sound/soc/codecs/tas571x.c37
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c3
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h1
-rw-r--r--sound/soc/codecs/tlv320aic3x.c2
-rw-r--r--sound/soc/codecs/uda1380.c77
-rw-r--r--sound/soc/codecs/uda1380.h4
-rw-r--r--sound/soc/codecs/wm2200.c4
-rw-r--r--sound/soc/codecs/wm5102.c59
-rw-r--r--sound/soc/codecs/wm5110.c61
-rw-r--r--sound/soc/codecs/wm8523.c24
-rw-r--r--sound/soc/codecs/wm8580.c123
-rw-r--r--sound/soc/codecs/wm8753.h3
-rw-r--r--sound/soc/codecs/wm8978.h2
-rw-r--r--sound/soc/codecs/wm8997.c39
-rw-r--r--sound/soc/codecs/wm8998.c38
-rw-r--r--sound/soc/codecs/wm9081.c2
-rw-r--r--sound/soc/codecs/wm9705.c138
-rw-r--r--sound/soc/codecs/wm9705.h11
-rw-r--r--sound/soc/codecs/wm9712.c177
-rw-r--r--sound/soc/codecs/wm9712.h11
-rw-r--r--sound/soc/codecs/wm9713.c2
-rw-r--r--sound/soc/codecs/wm9713.h4
-rw-r--r--sound/soc/codecs/wm_adsp.c354
-rw-r--r--sound/soc/codecs/wm_adsp.h27
-rw-r--r--sound/soc/codecs/wmfw.h4
-rw-r--r--sound/soc/fsl/Kconfig1
-rw-r--r--sound/soc/fsl/efika-audio-fabric.c1
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c2
-rw-r--r--sound/soc/fsl/imx-wm8962.c2
-rw-r--r--sound/soc/generic/simple-card-utils.c5
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/generic/simple-scu-card.c115
-rw-r--r--sound/soc/intel/Kconfig3
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c2
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c6
-rw-r--r--sound/soc/intel/atom/sst/sst.c39
-rw-r--r--sound/soc/intel/atom/sst/sst.h1
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c3
-rw-r--r--sound/soc/intel/atom/sst/sst_ipc.c11
-rw-r--r--sound/soc/intel/atom/sst/sst_stream.c4
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c3
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c2
-rw-r--r--sound/soc/intel/boards/broadwell.c18
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c30
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c68
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c4
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c4
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c10
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c46
-rw-r--r--sound/soc/intel/boards/haswell.c2
-rw-r--r--sound/soc/intel/boards/mfld_machine.c4
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c6
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c6
-rw-r--r--sound/soc/intel/boards/skl_rt286.c4
-rw-r--r--sound/soc/intel/common/sst-acpi.h17
-rw-r--r--sound/soc/intel/common/sst-ipc.c85
-rw-r--r--sound/soc/intel/common/sst-ipc.h8
-rw-r--r--sound/soc/intel/common/sst-match-acpi.c57
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c3
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c145
-rw-r--r--sound/soc/intel/skylake/skl-messages.c39
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c28
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.c1
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h12
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c71
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.h37
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c47
-rw-r--r--sound/soc/intel/skylake/skl-topology.h28
-rw-r--r--sound/soc/intel/skylake/skl.c67
-rw-r--r--sound/soc/intel/skylake/skl.h6
-rw-r--r--sound/soc/kirkwood/armada-370-db.c2
-rw-r--r--sound/soc/mxs/mxs-saif.c13
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c2
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/pxa/corgi.c6
-rw-r--r--sound/soc/pxa/e740_wm9705.c1
-rw-r--r--sound/soc/pxa/e750_wm9705.c1
-rw-r--r--sound/soc/pxa/e800_wm9712.c1
-rw-r--r--sound/soc/pxa/em-x270.c1
-rw-r--r--sound/soc/pxa/hx4700.c2
-rw-r--r--sound/soc/pxa/magician.c2
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c2
-rw-r--r--sound/soc/pxa/palm27x.c1
-rw-r--r--sound/soc/pxa/poodle.c4
-rw-r--r--sound/soc/pxa/pxa-ssp.h6
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.h3
-rw-r--r--sound/soc/pxa/spitz.c6
-rw-r--r--sound/soc/pxa/tosa.c7
-rw-r--r--sound/soc/qcom/apq8016_sbc.c11
-rw-r--r--sound/soc/qcom/lpass-cpu.c3
-rw-r--r--sound/soc/qcom/lpass-platform.c187
-rw-r--r--sound/soc/qcom/lpass.h1
-rw-r--r--sound/soc/qcom/storm.c2
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c8
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c2
-rw-r--r--sound/soc/rockchip/rockchip_rt5645.c2
-rw-r--r--sound/soc/samsung/Kconfig58
-rw-r--r--sound/soc/samsung/Makefile9
-rw-r--r--sound/soc/samsung/ac97.c437
-rw-r--r--sound/soc/samsung/dmaengine.c8
-rw-r--r--sound/soc/samsung/i2s.c27
-rw-r--r--sound/soc/samsung/ln2440sbc_alc650.c72
-rw-r--r--sound/soc/samsung/pcm.c71
-rw-r--r--sound/soc/samsung/regs-ac97.h66
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c18
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c57
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c79
-rw-r--r--sound/soc/samsung/smdk2443_wm9710.c68
-rw-r--r--sound/soc/samsung/smdk_wm8580.c30
-rw-r--r--sound/soc/samsung/smdk_wm8580pcm.c175
-rw-r--r--sound/soc/samsung/smdk_wm9713.c108
-rw-r--r--sound/soc/samsung/spdif.c19
-rw-r--r--sound/soc/samsung/tm2_wm5110.c552
-rw-r--r--sound/soc/sh/Kconfig3
-rw-r--r--sound/soc/sh/rcar/adg.c61
-rw-r--r--sound/soc/sh/rcar/core.c175
-rw-r--r--sound/soc/sh/rcar/dma.c295
-rw-r--r--sound/soc/sh/rcar/dvc.c2
-rw-r--r--sound/soc/sh/rcar/gen.c12
-rw-r--r--sound/soc/sh/rcar/rsnd.h156
-rw-r--r--sound/soc/sh/rcar/src.c13
-rw-r--r--sound/soc/sh/rcar/ssi.c28
-rw-r--r--sound/soc/sh/rcar/ssiu.c20
-rw-r--r--sound/soc/soc-compress.c98
-rw-r--r--sound/soc/soc-core.c181
-rw-r--r--sound/soc/soc-dapm.c154
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c13
-rw-r--r--sound/soc/soc-pcm.c2
-rw-r--r--sound/soc/soc-topology.c751
-rw-r--r--sound/soc/soc-utils.c199
-rw-r--r--sound/soc/sti/sti_uniperif.c43
-rw-r--r--sound/soc/sti/uniperif.h2
-rw-r--r--sound/soc/sti/uniperif_player.c97
-rw-r--r--sound/soc/sti/uniperif_reader.c41
-rw-r--r--sound/soc/sunxi/Kconfig8
-rw-r--r--sound/soc/sunxi/Makefile1
-rw-r--r--sound/soc/sunxi/sun4i-codec.c882
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c105
-rw-r--r--sound/soc/sunxi/sun8i-codec-analog.c665
-rw-r--r--sound/soc/tegra/tegra_alc5632.c2
-rw-r--r--sound/soc/tegra/tegra_max98090.c2
-rw-r--r--sound/soc/tegra/tegra_rt5640.c2
-rw-r--r--sound/soc/tegra/tegra_rt5677.c2
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c2
-rw-r--r--sound/soc/tegra/tegra_wm8753.c2
-rw-r--r--sound/soc/tegra/tegra_wm8903.c2
-rw-r--r--sound/soc/tegra/trimslice.c2
-rw-r--r--sound/soc/zte/Kconfig16
-rw-r--r--sound/soc/zte/Makefile4
-rw-r--r--sound/soc/zte/zx-i2s.c (renamed from sound/soc/zte/zx296702-i2s.c)0
-rw-r--r--sound/soc/zte/zx-spdif.c (renamed from sound/soc/zte/zx296702-spdif.c)2
-rw-r--r--sound/sparc/dbri.c27
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/endpoint.c32
-rw-r--r--sound/usb/hiface/pcm.c2
-rw-r--r--sound/usb/line6/driver.h9
-rw-r--r--sound/usb/line6/podhd.c26
-rw-r--r--sound/usb/mixer.c3
-rw-r--r--sound/usb/pcm.c31
-rw-r--r--sound/usb/quirks.c38
-rw-r--r--tools/Makefile7
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/build/Build.include22
-rw-r--r--tools/build/Documentation/Build.txt6
-rw-r--r--tools/build/Makefile.feature138
-rw-r--r--tools/build/feature/Makefile126
-rw-r--r--tools/build/feature/test-clang.cpp21
-rw-r--r--tools/build/feature/test-jvmti.c13
-rw-r--r--tools/build/feature/test-llvm-version.cpp11
-rw-r--r--tools/build/feature/test-llvm.cpp13
-rw-r--r--tools/build/fixdep.c5
-rw-r--r--tools/gpio/gpio-hammer.c67
-rw-r--r--tools/gpio/gpio-utils.c256
-rw-r--r--tools/gpio/gpio-utils.h16
-rw-r--r--tools/hv/Makefile3
-rwxr-xr-xtools/hv/bondvf.sh4
-rw-r--r--tools/hv/hv_fcopy_daemon.c7
-rw-r--r--tools/hv/hv_kvp_daemon.c20
-rw-r--r--tools/iio/iio_generic_buffer.c18
-rw-r--r--tools/include/asm-generic/bitops.h1
-rw-r--r--tools/include/asm-generic/bitops/__ffz.h12
-rw-r--r--tools/include/asm-generic/bitops/find.h28
-rw-r--r--tools/include/asm/bug.h11
-rw-r--r--tools/include/linux/bitmap.h26
-rw-r--r--tools/include/linux/bitops.h5
-rw-r--r--tools/include/linux/filter.h24
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h5
-rw-r--r--tools/include/uapi/linux/hw_breakpoint.h4
-rw-r--r--tools/leds/.gitignore1
-rw-r--r--tools/leds/Makefile13
-rw-r--r--tools/leds/uledmon.c63
-rw-r--r--tools/lib/bpf/bpf.c56
-rw-r--r--tools/lib/bpf/bpf.h7
-rw-r--r--tools/lib/bpf/libbpf.c177
-rw-r--r--tools/lib/bpf/libbpf.h13
-rw-r--r--tools/lib/find_bit.c25
-rw-r--r--tools/lib/subcmd/parse-options.c14
-rw-r--r--tools/lib/subcmd/parse-options.h2
-rw-r--r--tools/lib/traceevent/Makefile40
-rw-r--r--tools/lib/traceevent/event-parse.c41
-rw-r--r--tools/lib/traceevent/event-parse.h5
-rw-r--r--tools/objtool/arch/x86/decode.c2
-rw-r--r--tools/perf/Build1
-rw-r--r--tools/perf/Documentation/intel-pt.txt19
-rw-r--r--tools/perf/Documentation/jitdump-specification.txt170
-rw-r--r--tools/perf/Documentation/perf-c2c.txt290
-rw-r--r--tools/perf/Documentation/perf-config.txt35
-rw-r--r--tools/perf/Documentation/perf-kmem.txt7
-rw-r--r--tools/perf/Documentation/perf-record.txt9
-rw-r--r--tools/perf/Documentation/perf-report.txt10
-rw-r--r--tools/perf/Documentation/perf-sched.txt78
-rw-r--r--tools/perf/Documentation/perf-script.txt16
-rw-r--r--tools/perf/Documentation/perf-top.txt1
-rw-r--r--tools/perf/Documentation/perf-trace.txt5
-rw-r--r--tools/perf/MANIFEST1
-rw-r--r--tools/perf/Makefile.config94
-rw-r--r--tools/perf/Makefile.perf150
-rw-r--r--tools/perf/arch/arm/annotate/instructions.c59
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c2
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c62
-rw-r--r--tools/perf/arch/powerpc/annotate/instructions.c58
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c78
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl7
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c10
-rw-r--r--tools/perf/bench/futex-hash.c10
-rw-r--r--tools/perf/bench/futex-lock-pi.c7
-rw-r--r--tools/perf/bench/futex-requeue.c2
-rw-r--r--tools/perf/bench/futex-wake-parallel.c4
-rw-r--r--tools/perf/bench/futex-wake.c3
-rw-r--r--tools/perf/bench/futex.h4
-rw-r--r--tools/perf/bench/mem-functions.c77
-rw-r--r--tools/perf/builtin-c2c.c2780
-rw-r--r--tools/perf/builtin-config.c137
-rw-r--r--tools/perf/builtin-kmem.c36
-rw-r--r--tools/perf/builtin-record.c11
-rw-r--r--tools/perf/builtin-report.c29
-rw-r--r--tools/perf/builtin-sched.c1118
-rw-r--r--tools/perf/builtin-script.c51
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c68
-rw-r--r--tools/perf/builtin.h1
-rw-r--r--tools/perf/jvmti/Build8
-rw-r--r--tools/perf/jvmti/Makefile89
-rw-r--r--tools/perf/jvmti/jvmti_agent.c38
-rw-r--r--tools/perf/jvmti/libjvmti.c39
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv21
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/cache.json176
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/floating-point.json14
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/frontend.json470
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/marked.json794
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/memory.json212
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json4064
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/pipeline.json350
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/pmc.json140
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/translation.json176
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/cache.json746
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/floating-point.json261
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/frontend.json83
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/memory.json154
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/other.json450
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json364
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json124
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json3198
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json171
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json286
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json2845
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json1417
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json388
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json774
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json171
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/frontend.json286
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/memory.json433
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json1417
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json388
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json942
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json171
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/frontend.json286
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json649
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json1417
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json388
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json1127
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/frontend.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json34
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/other.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json433
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json75
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json1041
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json83
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/frontend.json294
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json655
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/other.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json1329
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json484
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json1077
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/floating-point.json83
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/frontend.json294
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json739
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/other.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json1329
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json484
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json1123
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json151
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/frontend.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/memory.json236
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json1307
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json180
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/cache.json1272
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/floating-point.json151
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/frontend.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/memory.json503
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json1307
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json198
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json1290
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/floating-point.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/frontend.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/memory.json422
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/other.json58
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json1220
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json149
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json2305
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/frontend.json34
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/memory.json1110
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json435
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json65
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv35
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/cache.json3229
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/memory.json739
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/other.json210
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json881
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/cache.json3184
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/memory.json739
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/other.json210
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json881
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json1879
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/memory.json445
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json58
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json1220
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json149
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json811
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/frontend.json47
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/memory.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/pipeline.json359
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json69
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json4299
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/floating-point.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json472
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json2309
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/other.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json939
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json272
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json2817
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json758
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/other.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json899
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json3233
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json739
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/other.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json899
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json149
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/cache.json3225
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/memory.json747
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/other.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/pipeline.json905
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json173
-rw-r--r--tools/perf/tests/Build2
-rw-r--r--tools/perf/tests/backward-ring-buffer.c2
-rw-r--r--tools/perf/tests/bpf.c8
-rw-r--r--tools/perf/tests/builtin-test.c105
-rw-r--r--tools/perf/tests/clang.c46
-rw-r--r--tools/perf/tests/llvm.c8
-rw-r--r--tools/perf/tests/llvm.h7
-rw-r--r--tools/perf/tests/make6
-rw-r--r--tools/perf/tests/perf-hooks.c48
-rw-r--r--tools/perf/tests/tests.h4
-rw-r--r--tools/perf/ui/browsers/annotate.c26
-rw-r--r--tools/perf/ui/browsers/hists.c75
-rw-r--r--tools/perf/ui/browsers/hists.h1
-rw-r--r--tools/perf/ui/gtk/annotate.c2
-rw-r--r--tools/perf/ui/helpline.c10
-rw-r--r--tools/perf/ui/helpline.h1
-rw-r--r--tools/perf/ui/stdio/hist.c35
-rw-r--r--tools/perf/util/Build7
-rw-r--r--tools/perf/util/annotate.c392
-rw-r--r--tools/perf/util/annotate.h23
-rw-r--r--tools/perf/util/bpf-loader.c33
-rw-r--r--tools/perf/util/c++/Build2
-rw-r--r--tools/perf/util/c++/clang-c.h43
-rw-r--r--tools/perf/util/c++/clang-test.cpp62
-rw-r--r--tools/perf/util/c++/clang.cpp195
-rw-r--r--tools/perf/util/c++/clang.h26
-rw-r--r--tools/perf/util/callchain.c232
-rw-r--r--tools/perf/util/callchain.h29
-rw-r--r--tools/perf/util/config.c20
-rw-r--r--tools/perf/util/config.h4
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/evsel.c15
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/evsel_fprintf.c25
-rw-r--r--tools/perf/util/genelf.c113
-rw-r--r--tools/perf/util/genelf.h5
-rw-r--r--tools/perf/util/header.c19
-rw-r--r--tools/perf/util/hist.c13
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/intel-bts.c9
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c13
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h6
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.c4
-rw-r--r--tools/perf/util/intel-pt.c19
-rw-r--r--tools/perf/util/jitdump.c82
-rw-r--r--tools/perf/util/jitdump.h12
-rw-r--r--tools/perf/util/llvm-utils.c78
-rw-r--r--tools/perf/util/llvm-utils.h6
-rw-r--r--tools/perf/util/machine.c82
-rw-r--r--tools/perf/util/map.c17
-rw-r--r--tools/perf/util/mem-events.c136
-rw-r--r--tools/perf/util/mem-events.h38
-rw-r--r--tools/perf/util/parse-branch-options.c85
-rw-r--r--tools/perf/util/parse-branch-options.h3
-rw-r--r--tools/perf/util/parse-events.c15
-rw-r--r--tools/perf/util/perf-hooks-list.h3
-rw-r--r--tools/perf/util/perf-hooks.c88
-rw-r--r--tools/perf/util/perf-hooks.h39
-rw-r--r--tools/perf/util/pmu.c14
-rw-r--r--tools/perf/util/probe-event.h2
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/quote.c2
-rw-r--r--tools/perf/util/session.c10
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/sort.h1
-rw-r--r--tools/perf/util/string.c21
-rw-r--r--tools/perf/util/symbol.c10
-rw-r--r--tools/perf/util/symbol.h11
-rw-r--r--tools/perf/util/symbol_fprintf.c11
-rw-r--r--tools/perf/util/time-utils.c119
-rw-r--r--tools/perf/util/time-utils.h14
-rw-r--r--tools/perf/util/trace-event-scripting.c39
-rw-r--r--tools/perf/util/unwind-libunwind-local.c4
-rw-r--r--tools/perf/util/util-cxx.h26
-rw-r--r--tools/perf/util/util.c88
-rw-r--r--tools/perf/util/util.h6
-rw-r--r--tools/perf/util/values.c81
-rw-r--r--tools/perf/util/values.h4
-rw-r--r--tools/power/acpi/Makefile.config23
-rw-r--r--tools/power/acpi/Makefile.rules40
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c13
-rw-r--r--tools/power/acpi/tools/acpidbg/Makefile4
-rw-r--r--tools/power/acpi/tools/acpidbg/acpidbg.c8
-rw-r--r--tools/power/acpi/tools/acpidump/Makefile12
-rw-r--r--tools/power/acpi/tools/ec/ec_access.c2
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c7
-rw-r--r--tools/spi/spidev_test.c2
-rwxr-xr-xtools/testing/ktest/ktest.pl8
-rw-r--r--tools/testing/nvdimm/Kbuild1
-rw-r--r--tools/testing/nvdimm/test/iomap.c23
-rw-r--r--tools/testing/nvdimm/test/nfit.c236
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h8
-rw-r--r--tools/testing/radix-tree/Makefile15
-rw-r--r--tools/testing/radix-tree/benchmark.c98
-rw-r--r--tools/testing/radix-tree/find_next_bit.c57
-rw-r--r--tools/testing/radix-tree/iteration_check.c123
-rw-r--r--tools/testing/radix-tree/linux.c67
-rw-r--r--tools/testing/radix-tree/linux/bitops.h40
-rw-r--r--tools/testing/radix-tree/linux/bitops/non-atomic.h13
-rw-r--r--tools/testing/radix-tree/linux/bug.h2
-rw-r--r--tools/testing/radix-tree/linux/cpu.h22
-rw-r--r--tools/testing/radix-tree/linux/gfp.h22
-rw-r--r--tools/testing/radix-tree/linux/kernel.h18
-rw-r--r--tools/testing/radix-tree/linux/notifier.h8
-rw-r--r--tools/testing/radix-tree/linux/preempt.h6
-rw-r--r--tools/testing/radix-tree/linux/slab.h11
-rw-r--r--tools/testing/radix-tree/linux/types.h2
-rw-r--r--tools/testing/radix-tree/main.c77
-rw-r--r--tools/testing/radix-tree/multiorder.c328
-rw-r--r--tools/testing/radix-tree/rcupdate.c86
-rw-r--r--tools/testing/radix-tree/regression2.c3
-rw-r--r--tools/testing/radix-tree/regression3.c8
-rw-r--r--tools/testing/radix-tree/tag_check.c12
-rw-r--r--tools/testing/radix-tree/test.c92
-rw-r--r--tools/testing/radix-tree/test.h21
-rw-r--r--tools/testing/selftests/Makefile3
-rw-r--r--tools/testing/selftests/bpf/.gitignore3
-rw-r--r--tools/testing/selftests/bpf/Makefile13
-rw-r--r--tools/testing/selftests/bpf/bpf_sys.h108
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h38
-rw-r--r--tools/testing/selftests/bpf/config5
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh39
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c587
-rw-r--r--tools/testing/selftests/bpf/test_maps.c526
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c (renamed from samples/bpf/test_verifier.c)1030
-rw-r--r--tools/testing/selftests/breakpoints/Makefile5
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c236
-rw-r--r--tools/testing/selftests/futex/README2
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile11
-rw-r--r--tools/testing/selftests/net/reuseport_bpf_numa.c255
-rw-r--r--tools/testing/selftests/rcutorture/.gitignore2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh5
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c2
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/test_vdso.c123
-rw-r--r--tools/usb/usbip/.gitignore4
-rw-r--r--tools/usb/usbip/src/usbipd.c7
-rw-r--r--tools/virtio/ringtest/Makefile4
-rw-r--r--tools/virtio/ringtest/main.c20
-rw-r--r--tools/virtio/ringtest/main.h4
-rw-r--r--tools/virtio/ringtest/noring.c6
-rw-r--r--tools/virtio/ringtest/ptr_ring.c22
-rw-r--r--tools/virtio/ringtest/ring.c18
-rw-r--r--tools/virtio/ringtest/virtio_ring_0_9.c64
-rw-r--r--usr/Kconfig127
-rw-r--r--usr/Makefile20
-rw-r--r--virt/kvm/arm/arch_timer.c17
-rw-r--r--virt/kvm/arm/pmu.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c11
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c41
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h14
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c6
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c6
-rw-r--r--virt/kvm/arm/vgic/vgic.c12
-rw-r--r--virt/kvm/arm/vgic/vgic.h26
-rw-r--r--virt/kvm/async_pf.c23
-rw-r--r--virt/kvm/eventfd.c22
-rw-r--r--virt/kvm/kvm_main.c120
-rw-r--r--virt/kvm/vfio.c18
8051 files changed, 586685 insertions, 175087 deletions
diff --git a/CREDITS b/CREDITS
index 837367624e45..10a9eee807b6 100644
--- a/CREDITS
+++ b/CREDITS
@@ -9,7 +9,7 @@
Linus
----------
-M: Matt Mackal
+N: Matt Mackal
E: mpm@selenic.com
D: SLOB slab allocator
@@ -1910,7 +1910,7 @@ S: Ra'annana, Israel
N: Andi Kleen
E: andi@firstfloor.org
-U: http://www.halobates.de
+W: http://www.halobates.de
D: network, x86, NUMA, various hacks
S: Schwalbenstr. 96
S: 85551 Ottobrunn
@@ -2089,8 +2089,8 @@ D: ST Microelectronics SPEAr13xx PCI host bridge driver
D: Synopsys Designware PCI host bridge driver
N: Gabor Kuti
-M: seasons@falcon.sch.bme.hu
-M: seasons@makosteszta.sote.hu
+E: seasons@falcon.sch.bme.hu
+E: seasons@makosteszta.sote.hu
D: Original author of software suspend
N: Jaroslav Kysela
@@ -2775,6 +2775,10 @@ S: C/ Mieses 20, 9-B
S: Valladolid 47009
S: Spain
+N: Peter Oruba
+D: AMD Microcode loader driver
+S: Germany
+
N: Jens Osterkamp
E: jens@de.ibm.com
D: Maintainer of Spidernet network driver for Cell
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 3acc4f1a6f84..5bd4b07c2f90 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -14,13 +14,8 @@ Following translations are available on the WWW:
- this file.
ABI/
- info on kernel <-> userspace ABI and relative interface stability.
-
-BUG-HUNTING
- - brute force method of doing binary search of patches to find bug.
-Changes
- - list of changes that break older software packages.
CodingStyle
- - how the maintainers expect the C code in the kernel to look.
+ - nothing here, just a pointer to process/coding-style.rst.
DMA-API.txt
- DMA API, pci_ API & extensions for non-consistent memory machines.
DMA-API-HOWTO.txt
@@ -33,8 +28,6 @@ DocBook/
- directory with DocBook templates etc. for kernel documentation.
EDID/
- directory with info on customizing EDID for broken gfx/displays.
-HOWTO
- - the process and procedures of how to do Linux kernel development.
IPMI.txt
- info on Linux Intelligent Platform Management Interface (IPMI) Driver.
IRQ-affinity.txt
@@ -46,62 +39,43 @@ IRQ.txt
Intel-IOMMU.txt
- basic info on the Intel IOMMU virtualization support.
Makefile
- - This file does nothing. Removing it breaks make htmldocs and
- make distclean.
-ManagementStyle
- - how to (attempt to) manage kernel hackers.
+ - It's not of interest for those who aren't touching the build system.
+Makefile.sphinx
+ - It's not of interest for those who aren't touching the build system.
+PCI/
+ - info related to PCI drivers.
RCU/
- directory with info on RCU (read-copy update).
SAK.txt
- info on Secure Attention Keys.
SM501.txt
- Silicon Motion SM501 multimedia companion chip
-SecurityBugs
- - procedure for reporting security bugs found in the kernel.
-SubmitChecklist
- - Linux kernel patch submission checklist.
-SubmittingDrivers
- - procedure to get a new driver source included into the kernel tree.
SubmittingPatches
- - procedure to get a source patch included into the kernel tree.
-VGA-softcursor.txt
- - how to change your VGA cursor from a blinking underscore.
+ - nothing here, just a pointer to process/coding-style.rst.
accounting/
- documentation on accounting and taskstats.
acpi/
- info on ACPI-specific hooks in the kernel.
+admin-guide/
+ - info related to Linux users and system admins.
aoe/
- description of AoE (ATA over Ethernet) along with config examples.
-applying-patches.txt
- - description of various trees and how to apply their patches.
arm/
- directory with info about Linux on the ARM architecture.
arm64/
- directory with info about Linux on the 64 bit ARM architecture.
-assoc_array.txt
- - generic associative array intro.
-atomic_ops.txt
- - semantics and behavior of atomic and bitmask operations.
auxdisplay/
- misc. LCD driver documentation (cfag12864b, ks0108).
backlight/
- directory with info on controlling backlights in flat panel displays
-bad_memory.txt
- - how to use kernel parameters to exclude bad RAM regions.
-basic_profiling.txt
- - basic instructions for those who wants to profile Linux kernel.
bcache.txt
- Block-layer cache on fast SSDs to improve slow (raid) I/O performance.
-binfmt_misc.txt
- - info on the kernel support for extra binary formats.
blackfin/
- directory with documentation for the Blackfin arch.
block/
- info on the Block I/O (BIO) layer.
blockdev/
- info on block devices & drivers
-braille-console.txt
- - info on how to use serial devices for Braille support.
bt8xxgpio.txt
- info on how to modify a bt8xx video card for GPIO usage.
btmrvl.txt
@@ -114,18 +88,24 @@ cachetlb.txt
- describes the cache/TLB flushing interfaces Linux uses.
cdrom/
- directory with information on the CD-ROM drivers that Linux has.
-cgroups/
- - cgroups features, including cpusets and memory controller.
+cgroup-v1/
+ - cgroups v1 features, including cpusets and memory controller.
+cgroup-v2.txt
+ - cgroups v2 features, including cpusets and memory controller.
circular-buffers.txt
- how to make use of the existing circular buffer infrastructure
clk.txt
- info on the common clock framework
-coccinelle.txt
- - info on how to get and use the Coccinelle code checking tool.
+cma/
+ - Continuous Memory Area (CMA) debugfs interface.
+conf.py
+ - It's not of interest for those who aren't touching the build system.
connector/
- docs on the netlink based userspace<->kernel space communication mod.
console/
- documentation on Linux console drivers.
+core-api/
+ - documentation on kernel core components.
cpu-freq/
- info on CPU frequency and voltage scaling.
cpu-hotplug.txt
@@ -150,26 +130,26 @@ debugging-via-ohci1394.txt
- how to use firewire like a hardware debugger memory reader.
dell_rbu.txt
- document demonstrating the use of the Dell Remote BIOS Update driver.
-development-process/
- - how to work with the mainline kernel development process.
+dev-tools/
+ - directory with info on development tools for the kernel.
device-mapper/
- directory with info on Device Mapper.
-devices.txt
- - plain ASCII listing of all the nodes in /dev/ with major minor #'s.
+dmaengine/
+ - the DMA engine and controller API guides.
devicetree/
- directory with info on device tree files used by OF/PowerPC/ARM
digsig.txt
-info on the Digital Signature Verification API
dma-buf-sharing.txt
- the DMA Buffer Sharing API Guide
+docutils.conf
+ - nothing here. Just a configuration file for docutils.
dontdiff
- file containing a list of files that should never be diff'ed.
+driver-api/
+ - the Linux driver implementer's API guide.
driver-model/
- directory with info about Linux driver model.
-dvb/
- - info on Linux Digital Video Broadcast (DVB) subsystem.
-dynamic-debug-howto.txt
- - how to use the dynamic debug (dyndbg) feature.
early-userspace/
- info about initramfs, klibc, and userspace early during boot.
edac.txt
@@ -178,14 +158,16 @@ efi-stub.txt
- How to use the EFI boot stub to bypass GRUB or elilo on EFI systems.
eisa.txt
- info on EISA bus support.
-email-clients.txt
- - info on how to use e-mail to send un-mangled (git) patches.
extcon/
- directory with porting guide for Android kernel switch driver.
+isa.txt
+ - info on EISA bus support.
fault-injection/
- dir with docs about the fault injection capabilities infrastructure.
fb/
- directory with info on the frame buffer graphics abstraction layer.
+features/
+ - status of feature implementation on different architectures.
filesystems/
- info on the vfs and the various filesystems that Linux supports.
firmware_class/
@@ -194,20 +176,22 @@ flexible-arrays.txt
- how to make use of flexible sized arrays in linux
fmc/
- information about the FMC bus abstraction
+fpga/
+ - FPGA Manager Core.
frv/
- Fujitsu FR-V Linux documentation.
futex-requeue-pi.txt
- info on requeueing of tasks from a non-PI futex to a PI futex
-gcov.txt
- - use of GCC's coverage testing tool "gcov" with the Linux kernel
+gcc-plugins.txt
+ - GCC plugin infrastructure.
gpio/
- gpio related documentation
+gpu/
+ - directory with information on GPU driver developer's guide.
hid/
- directory with information on human interface devices
highuid.txt
- notes on the change from 16 bit to 32 bit user/group IDs.
-hsi.txt
- - HSI subsystem overview.
hwspinlock.txt
- hardware spinlock provides hardware assistance for synchronization
timers/
@@ -218,18 +202,18 @@ hwmon/
- directory with docs on various hardware monitoring drivers.
i2c/
- directory with info about the I2C bus/protocol (2 wire, kHz speed).
-i2o/
- - directory with info about the Linux I2O subsystem.
x86/i386/
- directory with info about Linux on Intel 32 bit architecture.
ia64/
- directory with info about Linux on Intel 64 bit architecture.
+ide/
+ - Information regarding the Enhanced IDE drive.
+iio/
+ - info on industrial IIO configfs support.
+index.rst
+ - main index for the documentation at ReST format.
infiniband/
- directory with documents concerning Linux InfiniBand support.
-init.txt
- - what to do when the kernel can't find the 1st process to run.
-initrd.txt
- - how to use the RAM disk as an initial/temporary root filesystem.
input/
- info on Linux input device support.
intel_txt.txt
@@ -248,28 +232,16 @@ isapnp.txt
- info on Linux ISA Plug & Play support.
isdn/
- directory with info on the Linux ISDN support, and supported cards.
-java.txt
- - info on the in-kernel binary support for Java(tm).
-ja_JP/
- - directory with Japanese translations of various documents
kbuild/
- directory with info about the kernel build process.
+kernel-doc-nano-HOWTO.txt
+ - outdated info about kernel-doc documentation.
kdump/
- directory with mini HowTo on getting the crash dump code to work.
-kernel-docs.txt
- - listing of various WWW + books that document kernel internals.
-kernel-documentation.rst
+doc-guide/
- how to write and format reStructuredText kernel documentation
-kernel-parameters.txt
- - summary listing of command line / boot prompt args for the kernel.
kernel-per-CPU-kthreads.txt
- List of all per-CPU kthreads and how they introduce jitter.
-kmemcheck.txt
- - info on dynamic checker that detects uses of uninitialized memory.
-kmemleak.txt
- - info on how to make use of the kernel memory leak detection system
-ko_KR/
- - directory with Korean translations of various documents
kobject.txt
- info of the kobject infrastructure of the Linux kernel.
kprobes.txt
@@ -284,8 +256,8 @@ ldm.txt
- a brief description of LDM (Windows Dynamic Disks).
leds/
- directory with info about LED handling under Linux.
-local_ops.txt
- - semantics and behavior of local atomic operations.
+livepatch/
+ - info on kernel live patching.
locking/
- directory with info about kernel locking primitives
lockup-watchdogs.txt
@@ -298,22 +270,24 @@ lzo.txt
- kernel LZO decompressor input formats
m68k/
- directory with info about Linux on Motorola 68k architecture.
-magic-number.txt
- - list of magic numbers used to mark/protect kernel data structures.
mailbox.txt
- How to write drivers for the common mailbox framework (IPC).
-md.txt
- - info on boot arguments for the multiple devices driver.
-media-framework.txt
- - info on media framework, its data structures, functions and usage.
+md-cluster.txt
+ - info on shared-device RAID MD cluster.
+media/
+ - info on media drivers: uAPI, kAPI and driver documentation.
memory-barriers.txt
- info on Linux kernel memory barriers.
memory-devices/
- directory with info on parts like the Texas Instruments EMIF driver
memory-hotplug.txt
- Hotpluggable memory support, how to use and current status.
+men-chameleon-bus.txt
+ - info on MEN chameleon bus.
metag/
- directory with info about Linux on Meta architecture.
+mic/
+ - Intel Many Integrated Core (MIC) architecture device driver.
mips/
- directory with info about Linux on MIPS architecture.
misc-devices/
@@ -322,12 +296,8 @@ mmc/
- directory with info about the MMC subsystem
mn10300/
- directory with info about the mn10300 architecture port
-module-signing.txt
- - Kernel module signing for increased security when loading modules.
mtd/
- directory with info about memory technology devices (flash)
-mono.txt
- - how to execute Mono-based .NET binaries with the help of BINFMT_MISC.
namespaces/
- directory with various information about namespaces
netlabel/
@@ -336,30 +306,42 @@ networking/
- directory with info on various aspects of networking with Linux.
nfc/
- directory relating info about Near Field Communications support.
+nios2/
+ - Linux on the Nios II architecture.
nommu-mmap.txt
- documentation about no-mmu memory mapping support.
numastat.txt
- info on how to read Numa policy hit/miss statistics in sysfs.
-oops-tracing.txt
- - how to decode those nasty internal kernel error dump messages.
+ntb.txt
+ - info on Non-Transparent Bridge (NTB) drivers.
+nvdimm/
+ - info on non-volatile devices.
+nvmem/
+ - info on non volatile memory framework.
+output/
+ - default directory where html/LaTeX/pdf files will be written.
padata.txt
- An introduction to the "padata" parallel execution API
parisc/
- directory with info on using Linux on PA-RISC architecture.
-parport.txt
- - how to use the parallel-port driver.
parport-lowlevel.txt
- description and usage of the low level parallel port functions.
pcmcia/
- info on the Linux PCMCIA driver.
percpu-rw-semaphore.txt
- RCU based read-write semaphore optimized for locking for reading
+perf/
+ - info about the APM X-Gene SoC Performance Monitoring Unit (PMU).
+phy/
+ - ino on Samsung USB 2.0 PHY adaptation layer.
phy.txt
- Description of the generic PHY framework.
pi-futex.txt
- documentation on lightweight priority inheritance futexes.
pinctrl.txt
- info on pinctrl subsystem and the PINMUX/PINCONF and drivers
+platform/
+ - List of supported hardware by compal and Dell laptop.
pnp.txt
- Linux Plug and Play documentation.
power/
@@ -372,14 +354,16 @@ preempt-locking.txt
- info on locking under a preemptive kernel.
printk-formats.txt
- how to get printk format specifiers right
+process/
+ - how to work with the mainline kernel development process.
pps/
- directory with information on the pulse-per-second support
+pti/
+ - directory with info on Intel MID PTI.
ptp/
- directory with info on support for IEEE 1588 PTP clocks in Linux.
pwm.txt
- info on the pulse width modulation driver subsystem
-ramoops.txt
- - documentation of the ramoops oops/panic logging module.
rapidio/
- directory with info on RapidIO packet-based fabric interconnect
rbtree.txt
@@ -406,8 +390,6 @@ security/
- directory that contains security-related info
serial/
- directory with info on the low level serial API.
-serial-console.txt
- - how to set up Linux with a serial line console as the default.
sgi-ioc4.txt
- description of the SGI IOC4 PCI (multi function) device.
sh/
@@ -416,24 +398,20 @@ smsc_ece1099.txt
-info on the smsc Keyboard Scan Expansion/GPIO Expansion device.
sound/
- directory with info on sound card support.
-sparse.txt
- - info on how to obtain and use the sparse tool for typechecking.
spi/
- overview of Linux kernel Serial Peripheral Interface (SPI) support.
-stable_api_nonsense.txt
- - info on why the kernel does not have a stable in-kernel api or abi.
-stable_kernel_rules.txt
- - rules and procedures for the -stable kernel releases.
+sphinx/
+ - no documentation here, just files required by Sphinx toolchain.
+sphinx-static/
+ - no documentation here, just files required by Sphinx toolchain.
static-keys.txt
- info on how static keys allow debug code in hotpaths via patching
svga.txt
- short guide on selecting video modes at boot via VGA BIOS.
-sysfs-rules.txt
- - How not to use sysfs.
+sync_file.txt
+ - Sync file API guide.
sysctl/
- directory with info on the /proc/sys/* files.
-sysrq.txt
- - info on the magic SysRq key.
target/
- directory with info on generating TCM v4 fabric .ko modules
this_cpu_ops.txt
@@ -442,39 +420,29 @@ thermal/
- directory with information on managing thermal issues (CPU/temp)
trace/
- directory with info on tracing technologies within linux
+translations/
+ - translations of this document from English to another language
unaligned-memory-access.txt
- info on how to avoid arch breaking unaligned memory access in code.
-unicode.txt
- - info on the Unicode character/font mapping used in Linux.
unshare.txt
- description of the Linux unshare system call.
usb/
- directory with info regarding the Universal Serial Bus.
-vDSO/
- - directory with info regarding virtual dynamic shared objects
vfio.txt
- info on Virtual Function I/O used in guest/hypervisor instances.
-vgaarbiter.txt
- - info on enable/disable the legacy decoding on different VGA devices
video-output.txt
- sysfs class driver interface to enable/disable a video output device.
-video4linux/
- - directory with info regarding video/TV/radio cards and linux.
virtual/
- directory with information on the various linux virtualizations.
vm/
- directory with info on the Linux vm code.
-vme_api.txt
- - file relating info on the VME bus API in linux
-volatile-considered-harmful.txt
- - Why the "volatile" type class should not be used
w1/
- directory with documents regarding the 1-wire (w1) subsystem.
watchdog/
- how to auto-reboot Linux if it has "fallen and can't get up". ;-)
wimax/
- directory with info about Intel Wireless Wimax Connections
-workqueue.txt
+core-api/workqueue.rst
- information on the Concurrency Managed Workqueue implementation
x86/x86_64/
- directory with info on Linux support for AMD x86-64 (Hammer) machines.
@@ -484,7 +452,5 @@ xtensa/
- directory with documents relating to arch/xtensa port/implementation
xz.txt
- how to make use of the XZ data compression within linux kernel
-zh_CN/
- - directory with Chinese translations of various documents
zorro.txt
- info on writing drivers for Zorro bus devices found on Amigas.
diff --git a/Documentation/ABI/README b/Documentation/ABI/README
index 1fafc4b0753b..3121029dce21 100644
--- a/Documentation/ABI/README
+++ b/Documentation/ABI/README
@@ -84,4 +84,4 @@ stable:
- Kernel-internal symbols. Do not rely on the presence, absence, location, or
type of any kernel symbol, either in System.map files or the kernel binary
- itself. See Documentation/stable_api_nonsense.txt.
+ itself. See Documentation/process/stable-api-nonsense.rst.
diff --git a/Documentation/ABI/stable/sysfs-devices b/Documentation/ABI/stable/sysfs-devices
index df449d79b563..35c457f8ce73 100644
--- a/Documentation/ABI/stable/sysfs-devices
+++ b/Documentation/ABI/stable/sysfs-devices
@@ -8,3 +8,17 @@ Description:
Any device associated with a device-tree node will have
an of_path symlink pointing to the corresponding device
node in /sys/firmware/devicetree/
+
+What: /sys/devices/*/devspec
+Date: October 2016
+Contact: Device Tree mailing list <devicetree@vger.kernel.org>
+Description:
+ If CONFIG_OF is enabled, then this file is present. When
+ read, it returns full name of the device node.
+
+What: /sys/devices/*/obppath
+Date: October 2016
+Contact: Device Tree mailing list <devicetree@vger.kernel.org>
+Description:
+ If CONFIG_OF is enabled, then this file is present. When
+ read, it returns full name of the device node.
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 71d184dbb70d..2da04ce6aeef 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -235,3 +235,45 @@ Description:
write_same_max_bytes is 0, write same is not supported
by the device.
+What: /sys/block/<disk>/queue/write_zeroes_max_bytes
+Date: November 2016
+Contact: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Description:
+ Devices that support write zeroes operation in which a
+ single request can be issued to zero out the range of
+ contiguous blocks on storage without having any payload
+ in the request. This can be used to optimize writing zeroes
+ to the devices. write_zeroes_max_bytes indicates how many
+ bytes can be written in a single write zeroes command. If
+ write_zeroes_max_bytes is 0, write zeroes is not supported
+ by the device.
+
+What: /sys/block/<disk>/queue/zoned
+Date: September 2016
+Contact: Damien Le Moal <damien.lemoal@hgst.com>
+Description:
+ zoned indicates if the device is a zoned block device
+ and the zone model of the device if it is indeed zoned.
+ The possible values indicated by zoned are "none" for
+ regular block devices and "host-aware" or "host-managed"
+ for zoned block devices. The characteristics of
+ host-aware and host-managed zoned block devices are
+ described in the ZBC (Zoned Block Commands) and ZAC
+ (Zoned Device ATA Command Set) standards. These standards
+ also define the "drive-managed" zone model. However,
+ since drive-managed zoned block devices do not support
+ zone commands, they will be treated as regular block
+ devices and zoned will report "none".
+
+What: /sys/block/<disk>/queue/chunk_sectors
+Date: September 2016
+Contact: Hannes Reinecke <hare@suse.com>
+Description:
+ chunk_sectors has different meaning depending on the type
+ of the disk. For a RAID device (dm-raid), chunk_sectors
+ indicates the size in 512B sectors of the RAID volume
+ stripe segment. For a zoned block device, either
+ host-aware or host-managed, chunk_sectors indicates the
+ size of 512B sectors of the zones of the device, with
+ the eventual exception of the last zone of the device
+ which may be smaller.
diff --git a/Documentation/ABI/testing/sysfs-bus-fsl-mc b/Documentation/ABI/testing/sysfs-bus-fsl-mc
new file mode 100644
index 000000000000..80256b8b4f26
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-fsl-mc
@@ -0,0 +1,21 @@
+What: /sys/bus/fsl-mc/drivers/.../bind
+Date: December 2016
+Contact: stuart.yoder@nxp.com
+Description:
+ Writing a device location to this file will cause
+ the driver to attempt to bind to the device found at
+ this location. The format for the location is Object.Id
+ and is the same as found in /sys/bus/fsl-mc/devices/.
+ For example:
+ # echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/bind
+
+What: /sys/bus/fsl-mc/drivers/.../unbind
+Date: December 2016
+Contact: stuart.yoder@nxp.com
+Description:
+ Writing a device location to this file will cause the
+ driver to attempt to unbind from the device found at
+ this location. The format for the location is Object.Id
+ and is the same as found in /sys/bus/fsl-mc/devices/.
+ For example:
+ # echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/unbind
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index fee35c00cc4e..b8f220f978dd 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -329,6 +329,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_pressure_scale
What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale
What: /sys/bus/iio/devices/iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_scale
What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_scale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -1579,3 +1580,20 @@ Contact: linux-iio@vger.kernel.org
Description:
Raw (unscaled no offset etc.) electric conductivity reading that
can be processed to siemens per meter.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_raw
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw counter device counts from channel Y. For quadrature
+ counters, multiplication by an available [Y]_scale results in
+ the counts of a single quadrature signal phase from channel Y.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_indexY_raw
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw counter device index value from channel Y. This attribute
+ provides an absolute positional reference (e.g. a pulse once per
+ revolution) which may be used to home positional systems as
+ required.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
new file mode 100644
index 000000000000..2071f9bcfaa5
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
@@ -0,0 +1,36 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_invert
+Date: October 2016
+KernelVersion: 4.9
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ The DAC is used to find the peak level of an alternating
+ voltage input signal by a binary search using the output
+ of a comparator wired to an interrupt pin. Like so:
+ _
+ | \
+ input +------>-------|+ \
+ | \
+ .-------. | }---.
+ | | | / |
+ | dac|-->--|- / |
+ | | |_/ |
+ | | |
+ | | |
+ | irq|------<-------'
+ | |
+ '-------'
+ The boolean invert attribute (0/1) should be set when the
+ input signal is centered around the maximum value of the
+ dac instead of zero. The envelope detector will search
+ from below in this case and will also invert the result.
+ The edge/level of the interrupt is also switched to its
+ opposite value.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_compare_interval
+Date: October 2016
+KernelVersion: 4.9
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ Number of milliseconds to wait for the comparator in each
+ step of the binary search for the input peak level. Needs
+ to relate to the frequency of the input signal.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
new file mode 100644
index 000000000000..ba676520b953
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
@@ -0,0 +1,125 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_count_count_direction_available
+What: /sys/bus/iio/devices/iio:deviceX/in_count_count_mode_available
+What: /sys/bus/iio/devices/iio:deviceX/in_count_noise_error_available
+What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available
+What: /sys/bus/iio/devices/iio:deviceX/in_index_index_polarity_available
+What: /sys/bus/iio/devices/iio:deviceX/in_index_synchronous_mode_available
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Discrete set of available values for the respective counter
+ configuration are listed in this file.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_direction
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates whether the counter for
+ channel Y is counting up or down.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_mode
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Count mode for channel Y. Four count modes are available:
+ normal, range limit, non-recycle, and modulo-n. The preset value
+ for channel Y is used by the count mode where required.
+
+ Normal:
+ Counting is continuous in either direction.
+
+ Range Limit:
+ An upper or lower limit is set, mimicking limit switches
+ in the mechanical counterpart. The upper limit is set to
+ the preset value, while the lower limit is set to 0. The
+ counter freezes at count = preset when counting up, and
+ at count = 0 when counting down. At either of these
+ limits, the counting is resumed only when the count
+ direction is reversed.
+
+ Non-recycle:
+ Counter is disabled whenever a 24-bit count overflow or
+ underflow takes place. The counter is re-enabled when a
+ new count value is loaded to the counter via a preset
+ operation or write to raw.
+
+ Modulo-N:
+ A count boundary is set between 0 and the preset value.
+ The counter is reset to 0 at count = preset when
+ counting up, while the counter is set to the preset
+ value at count = 0 when counting down; the counter does
+ not freeze at the bundary points, but counts
+ continuously throughout.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_noise_error
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates whether excessive noise is
+ present at the channel Y count inputs in quadrature clock mode;
+ irrelevant in non-quadrature clock mode.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_preset
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ If the counter device supports preset registers, the preset
+ count for channel Y is provided by this attribute.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_quadrature_mode
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configure channel Y counter for non-quadrature or quadrature
+ clock mode. Selecting non-quadrature clock mode will disable
+ synchronous load mode. In quadrature clock mode, the channel Y
+ scale attribute selects the encoder phase division (scale of 1
+ selects full-cycle, scale of 0.5 selects half-cycle, scale of
+ 0.25 selects quarter-cycle) processed by the channel Y counter.
+
+ Non-quadrature:
+ The filter and decoder circuit are bypassed. Encoder A
+ input serves as the count input and B as the UP/DOWN
+ direction control input, with B = 1 selecting UP Count
+ mode and B = 0 selecting Down Count mode.
+
+ Quadrature:
+ Encoder A and B inputs are digitally filtered and
+ decoded for UP/DN clock.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_set_to_preset_on_index
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Whether to set channel Y counter with channel Y preset value
+ when channel Y index input is active, or continuously count.
+ Valid attribute values are boolean.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_indexY_index_polarity
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Active level of channel Y index input; irrelevant in
+ non-synchronous load mode.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_indexY_synchronous_mode
+KernelVersion: 4.9
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configure channel Y counter for non-synchronous or synchronous
+ load mode. Synchronous load mode cannot be selected in
+ non-quadrature clock mode.
+
+ Non-synchronous:
+ A logic low level is the active level at this index
+ input. The index function (as enabled via
+ set_to_preset_on_index) is performed directly on the
+ active level of the index input.
+
+ Synchronous:
+ Intended for interfacing with encoder Index output in
+ quadrature clock mode. The active level is configured
+ via index_polarity. The index function (as enabled via
+ set_to_preset_on_index) is performed synchronously with
+ the quadrature clock on the active level of the index
+ input.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-cros-ec b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
new file mode 100644
index 000000000000..297b9720f024
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
@@ -0,0 +1,18 @@
+What: /sys/bus/iio/devices/iio:deviceX/calibrate
+Date: July 2015
+KernelVersion: 4.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Writing '1' will perform a FOC (Fast Online Calibration). The
+ corresponding calibration offsets can be read from *_calibbias
+ entries.
+
+What: /sys/bus/iio/devices/iio:deviceX/location
+Date: July 2015
+KernelVersion: 4.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute returns a string with the physical location where
+ the motion sensor is placed. For example, in a laptop a motion
+ sensor can be located on the base or on the lid. Current valid
+ values are 'base' and 'lid'.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
new file mode 100644
index 000000000000..580e93f373f6
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
@@ -0,0 +1,8 @@
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw_available
+Date: October 2016
+KernelVersion: 4.9
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ The range of available values represented as the minimum value,
+ the step and the maximum value, all enclosed in square brackets.
+ Example: [0 1 256]
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018 b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018
new file mode 100644
index 000000000000..f0ce0a0476ea
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018
@@ -0,0 +1,19 @@
+What: /sys/bus/iio/devices/iio:deviceX/proximity_on_chip_ambient_infrared_suppression
+Date: January 2011
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the
+ infrared suppression:
+
+ Scheme 0, makes full n (4, 8, 12, 16) bits (unsigned) proximity
+ detection. The range of Scheme 0 proximity count is from 0 to
+ 2^n. Logic 1 of this bit, Scheme 1, makes n-1 (3, 7, 11, 15)
+ bits (2's complementary) proximity_less_ambient detection. The
+ range of Scheme 1 proximity count is from -2^(n-1) to 2^(n-1).
+ The sign bit is extended for resolutions less than 16. While
+ Scheme 0 has wider dynamic range, Scheme 1 proximity detection
+ is less affected by the ambient IR noise variation.
+
+ 0 Sensing IR from LED and ambient
+ 1 Sensing IR from LED with ambient IR rejection
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583 b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583
index 660781df409f..a2e19964e87e 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583
@@ -1,18 +1,18 @@
-What: /sys/bus/iio/devices/device[n]/lux_table
+What: /sys/bus/iio/devices/device[n]/in_illuminance_calibrate
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- This property gets/sets the table of coefficients
- used in calculating illuminance in lux.
+ This property causes an internal calibration of the als gain trim
+ value which is later used in calculating illuminance in lux.
-What: /sys/bus/iio/devices/device[n]/illuminance0_calibrate
+What: /sys/bus/iio/devices/device[n]/in_illuminance_lux_table
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- This property causes an internal calibration of the als gain trim
- value which is later used in calculating illuminance in lux.
+ This property gets/sets the table of coefficients
+ used in calculating illuminance in lux.
-What: /sys/bus/iio/devices/device[n]/illuminance0_input_target
+What: /sys/bus/iio/devices/device[n]/in_illuminance_input_target
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
new file mode 100644
index 000000000000..2a91fbe394fc
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
@@ -0,0 +1,8 @@
+What: /sys/bus/iio/devices/iio:deviceX/out_resistance_raw_available
+Date: October 2016
+KernelVersion: 4.9
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ The range of available values represented as the minimum value,
+ the step and the maximum value, all enclosed in square brackets.
+ Example: [0 1 256]
diff --git a/Documentation/ABI/testing/sysfs-bus-vfio-mdev b/Documentation/ABI/testing/sysfs-bus-vfio-mdev
new file mode 100644
index 000000000000..452dbe39270e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-vfio-mdev
@@ -0,0 +1,111 @@
+What: /sys/.../<device>/mdev_supported_types/
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ This directory contains list of directories of currently
+ supported mediated device types and their details for
+ <device>. Supported type attributes are defined by the
+ vendor driver who registers with Mediated device framework.
+ Each supported type is a directory whose name is created
+ by adding the device driver string as a prefix to the
+ string provided by the vendor driver.
+
+What: /sys/.../<device>/mdev_supported_types/<type-id>/
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ This directory gives details of supported type, like name,
+ description, available_instances, device_api etc.
+ 'device_api' and 'available_instances' are mandatory
+ attributes to be provided by vendor driver. 'name',
+ 'description' and other vendor driver specific attributes
+ are optional.
+
+What: /sys/.../mdev_supported_types/<type-id>/create
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ Writing UUID to this file will create mediated device of
+ type <type-id> for parent device <device>. This is a
+ write-only file.
+ For example:
+ # echo "83b8f4f2-509f-382f-3c1e-e6bfe0fa1001" > \
+ /sys/devices/foo/mdev_supported_types/foo-1/create
+
+What: /sys/.../mdev_supported_types/<type-id>/devices/
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ This directory contains symbolic links pointing to mdev
+ devices sysfs entries which are created of this <type-id>.
+
+What: /sys/.../mdev_supported_types/<type-id>/available_instances
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ Reading this attribute will show the number of mediated
+ devices of type <type-id> that can be created. This is a
+ readonly file.
+Users:
+ Userspace applications interested in creating mediated
+ device of that type. Userspace application should check
+ the number of available instances could be created before
+ creating mediated device of this type.
+
+What: /sys/.../mdev_supported_types/<type-id>/device_api
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ Reading this attribute will show VFIO device API supported
+ by this type. For example, "vfio-pci" for a PCI device,
+ "vfio-platform" for platform device.
+
+What: /sys/.../mdev_supported_types/<type-id>/name
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ Reading this attribute will show human readable name of the
+ mediated device that will get created of type <type-id>.
+ This is optional attribute. For example: "Grid M60-0Q"
+Users:
+ Userspace applications interested in knowing the name of
+ a particular <type-id> that can help in understanding the
+ type of mediated device.
+
+What: /sys/.../mdev_supported_types/<type-id>/description
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ Reading this attribute will show description of the type of
+ mediated device that will get created of type <type-id>.
+ This is optional attribute. For example:
+ "2 heads, 512M FB, 2560x1600 maximum resolution"
+Users:
+ Userspace applications interested in knowing the details of
+ a particular <type-id> that can help in understanding the
+ features provided by that type of mediated device.
+
+What: /sys/.../<device>/<UUID>/
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ This directory represents device directory of mediated
+ device. It contains all the attributes related to mediated
+ device.
+
+What: /sys/.../<device>/<UUID>/mdev_type
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ This is symbolic link pointing to supported type, <type-id>
+ directory of which this mediated device is created.
+
+What: /sys/.../<device>/<UUID>/remove
+Date: October 2016
+Contact: Kirti Wankhede <kwankhede@nvidia.com>
+Description:
+ Writing '1' to this file destroys the mediated device. The
+ vendor driver can fail the remove() callback if that device
+ is active and the vendor driver doesn't support hot unplug.
+ Example:
+ # echo 1 > /sys/bus/mdev/devices/<UUID>/remove
diff --git a/Documentation/ABI/testing/sysfs-class-fpga-bridge b/Documentation/ABI/testing/sysfs-class-fpga-bridge
new file mode 100644
index 000000000000..312ae2c579d8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-fpga-bridge
@@ -0,0 +1,11 @@
+What: /sys/class/fpga_bridge/<bridge>/name
+Date: January 2016
+KernelVersion: 4.5
+Contact: Alan Tull <atull@opensource.altera.com>
+Description: Name of low level FPGA bridge driver.
+
+What: /sys/class/fpga_bridge/<bridge>/state
+Date: January 2016
+KernelVersion: 4.5
+Contact: Alan Tull <atull@opensource.altera.com>
+Description: Show bridge state as "enabled" or "disabled"
diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
index 86ace287d48b..491cdeedc195 100644
--- a/Documentation/ABI/testing/sysfs-class-led
+++ b/Documentation/ABI/testing/sysfs-class-led
@@ -4,16 +4,24 @@ KernelVersion: 2.6.17
Contact: Richard Purdie <rpurdie@rpsys.net>
Description:
Set the brightness of the LED. Most LEDs don't
- have hardware brightness support so will just be turned on for
+ have hardware brightness support, so will just be turned on for
non-zero brightness settings. The value is between 0 and
/sys/class/leds/<led>/max_brightness.
+ Writing 0 to this file clears active trigger.
+
+ Writing non-zero to this file while trigger is active changes the
+ top brightness trigger is going to use.
+
What: /sys/class/leds/<led>/max_brightness
Date: March 2006
KernelVersion: 2.6.17
Contact: Richard Purdie <rpurdie@rpsys.net>
Description:
- Maximum brightness level for this led, default is 255 (LED_FULL).
+ Maximum brightness level for this LED, default is 255 (LED_FULL).
+
+ If the LED does not support different brightness levels, this
+ should be 1.
What: /sys/class/leds/<led>/trigger
Date: March 2006
@@ -21,7 +29,7 @@ KernelVersion: 2.6.17
Contact: Richard Purdie <rpurdie@rpsys.net>
Description:
Set the trigger for this LED. A trigger is a kernel based source
- of led events.
+ of LED events.
You can change triggers in a similar manner to the way an IO
scheduler is chosen. Trigger specific parameters can appear in
/sys/class/leds/<led> once a given trigger is selected. For
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index 80d9888a8ece..5096a82f4cde 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -29,3 +29,19 @@ Description: Display fw status registers content
Also number of registers varies between 1 and 6
depending on generation.
+What: /sys/class/mei/meiN/hbm_ver
+Date: Aug 2016
+KernelVersion: 4.9
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display the negotiated HBM protocol version.
+
+ The HBM protocol version negotiated
+ between the driver and the device.
+
+What: /sys/class/mei/meiN/hbm_ver_drv
+Date: Aug 2016
+KernelVersion: 4.9
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display the driver HBM protocol version.
+
+ The HBM protocol version supported by the driver.
diff --git a/Documentation/ABI/testing/sysfs-class-remoteproc b/Documentation/ABI/testing/sysfs-class-remoteproc
new file mode 100644
index 000000000000..d188afebc8ba
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-remoteproc
@@ -0,0 +1,50 @@
+What: /sys/class/remoteproc/.../firmware
+Date: October 2016
+Contact: Matt Redfearn <matt.redfearn@imgtec.com>
+Description: Remote processor firmware
+
+ Reports the name of the firmware currently loaded to the
+ remote processor.
+
+ To change the running firmware, ensure the remote processor is
+ stopped (using /sys/class/remoteproc/.../state) and write a new filename.
+
+What: /sys/class/remoteproc/.../state
+Date: October 2016
+Contact: Matt Redfearn <matt.redfearn@imgtec.com>
+Description: Remote processor state
+
+ Reports the state of the remote processor, which will be one of:
+
+ "offline"
+ "suspended"
+ "running"
+ "crashed"
+ "invalid"
+
+ "offline" means the remote processor is powered off.
+
+ "suspended" means that the remote processor is suspended and
+ must be woken to receive messages.
+
+ "running" is the normal state of an available remote processor
+
+ "crashed" indicates that a problem/crash has been detected on
+ the remote processor.
+
+ "invalid" is returned if the remote processor is in an
+ unknown state.
+
+ Writing this file controls the state of the remote processor.
+ The following states can be written:
+
+ "start"
+ "stop"
+
+ Writing "start" will attempt to start the processor running the
+ firmware indicated by, or written to,
+ /sys/class/remoteproc/.../firmware. The remote processor should
+ transition to "running" state.
+
+ Writing "stop" will attempt to halt the remote processor and
+ return it to the "offline" state.
diff --git a/Documentation/ABI/testing/sysfs-devices-deferred_probe b/Documentation/ABI/testing/sysfs-devices-deferred_probe
new file mode 100644
index 000000000000..58553d7a321f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-deferred_probe
@@ -0,0 +1,12 @@
+What: /sys/devices/.../deferred_probe
+Date: August 2016
+Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Description:
+ The /sys/devices/.../deferred_probe attribute is
+ present for all devices. If a driver detects during
+ probing a device that a related device is not yet
+ ready, it may defer probing of the first device. The
+ kernel will retry probing the first device after any
+ other device is successfully probed. This attribute
+ reads as 1 if probing of this device is currently
+ deferred, or 0 otherwise.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
index b82deeaec314..470def06ab0a 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
+++ b/Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
@@ -1,4 +1,4 @@
-What: state
+What: /sys/devices/system/ibm_rtl/state
Date: Sep 2010
KernelVersion: 2.6.37
Contact: Vernon Mauery <vernux@us.ibm.com>
@@ -10,7 +10,7 @@ Description: The state file allows a means by which to change in and
Users: The ibm-prtm userspace daemon uses this interface.
-What: version
+What: /sys/devices/system/ibm_rtl/version
Date: Sep 2010
KernelVersion: 2.6.37
Contact: Vernon Mauery <vernux@us.ibm.com>
diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index 91bd6ca5440f..2cc0a72b64be 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -347,7 +347,7 @@ Description:
because of fragmentation, SLUB will retry with the minimum order
possible depending on its characteristics.
When debug_guardpage_minorder=N (N > 0) parameter is specified
- (see Documentation/kernel-parameters.txt), the minimum possible
+ (see Documentation/admin-guide/kernel-parameters.rst), the minimum possible
order is used and this sysfs entry can not be used to change
the order at run time.
diff --git a/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2 b/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2
new file mode 100644
index 000000000000..6212697bbf6f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2
@@ -0,0 +1,15 @@
+What: /sys/devices/platform/<phy-name>/role
+Date: October 2016
+KernelVersion: 4.10
+Contact: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Description:
+ This file can be read and write.
+ The file can show/change the phy mode for role swap of usb.
+
+ Write the following strings to change the mode:
+ "host" - switching mode from peripheral to host.
+ "peripheral" - switching mode from host to peripheral.
+
+ Read the file, then it shows the following strings:
+ "host" - The mode is host now.
+ "peripheral" - The mode is peripheral now.
diff --git a/Documentation/ABI/testing/sysfs-platform-sst-atom b/Documentation/ABI/testing/sysfs-platform-sst-atom
new file mode 100644
index 000000000000..0d07c0395660
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-sst-atom
@@ -0,0 +1,17 @@
+What: /sys/devices/platform/8086%x:00/firmware_version
+Date: November 2016
+KernelVersion: 4.10
+Contact: "Sebastien Guiriec" <sebastien.guiriec@intel.com>
+Description:
+ LPE Firmware version for SST driver on all atom
+ plaforms (BYT/CHT/Merrifield/BSW).
+ If the FW has never been loaded it will display:
+ "FW not yet loaded"
+ If FW has been loaded it will display:
+ "v01.aa.bb.cc"
+ aa: Major version is reflecting SoC version:
+ 0d: BYT FW
+ 0b: BSW FW
+ 07: Merrifield FW
+ bb: Minor version
+ cc: Build version
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index 50b368d490b5..f523e5a3ac33 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -7,30 +7,35 @@ Description:
subsystem.
What: /sys/power/state
-Date: May 2014
+Date: November 2016
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description:
The /sys/power/state file controls system sleep states.
Reading from this file returns the available sleep state
- labels, which may be "mem", "standby", "freeze" and "disk"
- (hibernation). The meanings of the first three labels depend on
- the relative_sleep_states command line argument as follows:
- 1) relative_sleep_states = 1
- "mem", "standby", "freeze" represent non-hibernation sleep
- states from the deepest ("mem", always present) to the
- shallowest ("freeze"). "standby" and "freeze" may or may
- not be present depending on the capabilities of the
- platform. "freeze" can only be present if "standby" is
- present.
- 2) relative_sleep_states = 0 (default)
- "mem" - "suspend-to-RAM", present if supported.
- "standby" - "power-on suspend", present if supported.
- "freeze" - "suspend-to-idle", always present.
-
- Writing to this file one of these strings causes the system to
- transition into the corresponding state, if available. See
- Documentation/power/states.txt for a description of what
- "suspend-to-RAM", "power-on suspend" and "suspend-to-idle" mean.
+ labels, which may be "mem" (suspend), "standby" (power-on
+ suspend), "freeze" (suspend-to-idle) and "disk" (hibernation).
+
+ Writing one of the above strings to this file causes the system
+ to transition into the corresponding state, if available.
+
+ See Documentation/power/states.txt for more information.
+
+What: /sys/power/mem_sleep
+Date: November 2016
+Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
+Description:
+ The /sys/power/mem_sleep file controls the operating mode of
+ system suspend. Reading from it returns the available modes
+ as "s2idle" (always present), "shallow" and "deep" (present if
+ supported). The mode that will be used on subsequent attempts
+ to suspend the system (by writing "mem" to the /sys/power/state
+ file described above) is enclosed in square brackets.
+
+ Writing one of the above strings to this file causes the mode
+ represented by it to be used on subsequent attempts to suspend
+ the system.
+
+ See Documentation/power/states.txt for more information.
What: /sys/power/disk
Date: September 2006
diff --git a/Documentation/BUG-HUNTING b/Documentation/BUG-HUNTING
deleted file mode 100644
index 65022a87bf17..000000000000
--- a/Documentation/BUG-HUNTING
+++ /dev/null
@@ -1,246 +0,0 @@
-Table of contents
-=================
-
-Last updated: 20 December 2005
-
-Contents
-========
-
-- Introduction
-- Devices not appearing
-- Finding patch that caused a bug
--- Finding using git-bisect
--- Finding it the old way
-- Fixing the bug
-
-Introduction
-============
-
-Always try the latest kernel from kernel.org and build from source. If you are
-not confident in doing that please report the bug to your distribution vendor
-instead of to a kernel developer.
-
-Finding bugs is not always easy. Have a go though. If you can't find it don't
-give up. Report as much as you have found to the relevant maintainer. See
-MAINTAINERS for who that is for the subsystem you have worked on.
-
-Before you submit a bug report read REPORTING-BUGS.
-
-Devices not appearing
-=====================
-
-Often this is caused by udev. Check that first before blaming it on the
-kernel.
-
-Finding patch that caused a bug
-===============================
-
-
-
-Finding using git-bisect
-------------------------
-
-Using the provided tools with git makes finding bugs easy provided the bug is
-reproducible.
-
-Steps to do it:
-- start using git for the kernel source
-- read the man page for git-bisect
-- have fun
-
-Finding it the old way
-----------------------
-
-[Sat Mar 2 10:32:33 PST 1996 KERNEL_BUG-HOWTO lm@sgi.com (Larry McVoy)]
-
-This is how to track down a bug if you know nothing about kernel hacking.
-It's a brute force approach but it works pretty well.
-
-You need:
-
- . A reproducible bug - it has to happen predictably (sorry)
- . All the kernel tar files from a revision that worked to the
- revision that doesn't
-
-You will then do:
-
- . Rebuild a revision that you believe works, install, and verify that.
- . Do a binary search over the kernels to figure out which one
- introduced the bug. I.e., suppose 1.3.28 didn't have the bug, but
- you know that 1.3.69 does. Pick a kernel in the middle and build
- that, like 1.3.50. Build & test; if it works, pick the mid point
- between .50 and .69, else the mid point between .28 and .50.
- . You'll narrow it down to the kernel that introduced the bug. You
- can probably do better than this but it gets tricky.
-
- . Narrow it down to a subdirectory
-
- - Copy kernel that works into "test". Let's say that 3.62 works,
- but 3.63 doesn't. So you diff -r those two kernels and come
- up with a list of directories that changed. For each of those
- directories:
-
- Copy the non-working directory next to the working directory
- as "dir.63".
- One directory at time, try moving the working directory to
- "dir.62" and mv dir.63 dir"time, try
-
- mv dir dir.62
- mv dir.63 dir
- find dir -name '*.[oa]' -print | xargs rm -f
-
- And then rebuild and retest. Assuming that all related
- changes were contained in the sub directory, this should
- isolate the change to a directory.
-
- Problems: changes in header files may have occurred; I've
- found in my case that they were self explanatory - you may
- or may not want to give up when that happens.
-
- . Narrow it down to a file
-
- - You can apply the same technique to each file in the directory,
- hoping that the changes in that file are self contained.
-
- . Narrow it down to a routine
-
- - You can take the old file and the new file and manually create
- a merged file that has
-
- #ifdef VER62
- routine()
- {
- ...
- }
- #else
- routine()
- {
- ...
- }
- #endif
-
- And then walk through that file, one routine at a time and
- prefix it with
-
- #define VER62
- /* both routines here */
- #undef VER62
-
- Then recompile, retest, move the ifdefs until you find the one
- that makes the difference.
-
-Finally, you take all the info that you have, kernel revisions, bug
-description, the extent to which you have narrowed it down, and pass
-that off to whomever you believe is the maintainer of that section.
-A post to linux.dev.kernel isn't such a bad idea if you've done some
-work to narrow it down.
-
-If you get it down to a routine, you'll probably get a fix in 24 hours.
-
-My apologies to Linus and the other kernel hackers for describing this
-brute force approach, it's hardly what a kernel hacker would do. However,
-it does work and it lets non-hackers help fix bugs. And it is cool
-because Linux snapshots will let you do this - something that you can't
-do with vendor supplied releases.
-
-Fixing the bug
-==============
-
-Nobody is going to tell you how to fix bugs. Seriously. You need to work it
-out. But below are some hints on how to use the tools.
-
-To debug a kernel, use objdump and look for the hex offset from the crash
-output to find the valid line of code/assembler. Without debug symbols, you
-will see the assembler code for the routine shown, but if your kernel has
-debug symbols the C code will also be available. (Debug symbols can be enabled
-in the kernel hacking menu of the menu configuration.) For example:
-
- objdump -r -S -l --disassemble net/dccp/ipv4.o
-
-NB.: you need to be at the top level of the kernel tree for this to pick up
-your C files.
-
-If you don't have access to the code you can also debug on some crash dumps
-e.g. crash dump output as shown by Dave Miller.
-
-> EIP is at ip_queue_xmit+0x14/0x4c0
-> ...
-> Code: 44 24 04 e8 6f 05 00 00 e9 e8 fe ff ff 8d 76 00 8d bc 27 00 00
-> 00 00 55 57 56 53 81 ec bc 00 00 00 8b ac 24 d0 00 00 00 8b 5d 08
-> <8b> 83 3c 01 00 00 89 44 24 14 8b 45 28 85 c0 89 44 24 18 0f 85
->
-> Put the bytes into a "foo.s" file like this:
->
-> .text
-> .globl foo
-> foo:
-> .byte .... /* bytes from Code: part of OOPS dump */
->
-> Compile it with "gcc -c -o foo.o foo.s" then look at the output of
-> "objdump --disassemble foo.o".
->
-> Output:
->
-> ip_queue_xmit:
-> push %ebp
-> push %edi
-> push %esi
-> push %ebx
-> sub $0xbc, %esp
-> mov 0xd0(%esp), %ebp ! %ebp = arg0 (skb)
-> mov 0x8(%ebp), %ebx ! %ebx = skb->sk
-> mov 0x13c(%ebx), %eax ! %eax = inet_sk(sk)->opt
-
-In addition, you can use GDB to figure out the exact file and line
-number of the OOPS from the vmlinux file. If you have
-CONFIG_DEBUG_INFO enabled, you can simply copy the EIP value from the
-OOPS:
-
- EIP: 0060:[<c021e50e>] Not tainted VLI
-
-And use GDB to translate that to human-readable form:
-
- gdb vmlinux
- (gdb) l *0xc021e50e
-
-If you don't have CONFIG_DEBUG_INFO enabled, you use the function
-offset from the OOPS:
-
- EIP is at vt_ioctl+0xda8/0x1482
-
-And recompile the kernel with CONFIG_DEBUG_INFO enabled:
-
- make vmlinux
- gdb vmlinux
- (gdb) p vt_ioctl
- (gdb) l *(0x<address of vt_ioctl> + 0xda8)
-or, as one command
- (gdb) l *(vt_ioctl + 0xda8)
-
-If you have a call trace, such as :-
->Call Trace:
-> [<ffffffff8802c8e9>] :jbd:log_wait_commit+0xa3/0xf5
-> [<ffffffff810482d9>] autoremove_wake_function+0x0/0x2e
-> [<ffffffff8802770b>] :jbd:journal_stop+0x1be/0x1ee
-> ...
-this shows the problem in the :jbd: module. You can load that module in gdb
-and list the relevant code.
- gdb fs/jbd/jbd.ko
- (gdb) p log_wait_commit
- (gdb) l *(0x<address> + 0xa3)
-or
- (gdb) l *(log_wait_commit + 0xa3)
-
-
-Another very useful option of the Kernel Hacking section in menuconfig is
-Debug memory allocations. This will help you see whether data has been
-initialised and not set before use etc. To see the values that get assigned
-with this look at mm/slab.c and search for POISON_INUSE. When using this an
-Oops will often show the poisoned data instead of zero which is the default.
-
-Once you have worked out a fix please submit it upstream. After all open
-source is about sharing what you do and don't you want to be recognised for
-your genius?
-
-Please do read Documentation/SubmittingPatches though to help your code get
-accepted.
diff --git a/Documentation/Changes b/Documentation/Changes
index 22797a15dc24..7564ae1682ba 100644..120000
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -1,485 +1 @@
-.. _changes:
-
-Minimal requerements to compile the Kernel
-++++++++++++++++++++++++++++++++++++++++++
-
-Intro
-=====
-
-This document is designed to provide a list of the minimum levels of
-software necessary to run the 4.x kernels.
-
-This document is originally based on my "Changes" file for 2.0.x kernels
-and therefore owes credit to the same people as that file (Jared Mauch,
-Axel Boldt, Alessandro Sigala, and countless other users all over the
-'net).
-
-Current Minimal Requirements
-****************************
-
-Upgrade to at **least** these software revisions before thinking you've
-encountered a bug! If you're unsure what version you're currently
-running, the suggested command should tell you.
-
-Again, keep in mind that this list assumes you are already functionally
-running a Linux kernel. Also, not all tools are necessary on all
-systems; obviously, if you don't have any ISDN hardware, for example,
-you probably needn't concern yourself with isdn4k-utils.
-
-====================== =============== ========================================
- Program Minimal version Command to check the version
-====================== =============== ========================================
-GNU C 3.2 gcc --version
-GNU make 3.80 make --version
-binutils 2.12 ld -v
-util-linux 2.10o fdformat --version
-module-init-tools 0.9.10 depmod -V
-e2fsprogs 1.41.4 e2fsck -V
-jfsutils 1.1.3 fsck.jfs -V
-reiserfsprogs 3.6.3 reiserfsck -V
-xfsprogs 2.6.0 xfs_db -V
-squashfs-tools 4.0 mksquashfs -version
-btrfs-progs 0.18 btrfsck
-pcmciautils 004 pccardctl -V
-quota-tools 3.09 quota -V
-PPP 2.4.0 pppd --version
-isdn4k-utils 3.1pre1 isdnctrl 2>&1|grep version
-nfs-utils 1.0.5 showmount --version
-procps 3.2.0 ps --version
-oprofile 0.9 oprofiled --version
-udev 081 udevd --version
-grub 0.93 grub --version || grub-install --version
-mcelog 0.6 mcelog --version
-iptables 1.4.2 iptables -V
-openssl & libcrypto 1.0.0 openssl version
-bc 1.06.95 bc --version
-Sphinx\ [#f1]_ 1.2 sphinx-build --version
-====================== =============== ========================================
-
-.. [#f1] Sphinx is needed only to build the Kernel documentation
-
-Kernel compilation
-******************
-
-GCC
----
-
-The gcc version requirements may vary depending on the type of CPU in your
-computer.
-
-Make
-----
-
-You will need GNU make 3.80 or later to build the kernel.
-
-Binutils
---------
-
-Linux on IA-32 has recently switched from using ``as86`` to using ``gas`` for
-assembling the 16-bit boot code, removing the need for ``as86`` to compile
-your kernel. This change does, however, mean that you need a recent
-release of binutils.
-
-Perl
-----
-
-You will need perl 5 and the following modules: ``Getopt::Long``,
-``Getopt::Std``, ``File::Basename``, and ``File::Find`` to build the kernel.
-
-BC
---
-
-You will need bc to build kernels 3.10 and higher
-
-
-OpenSSL
--------
-
-Module signing and external certificate handling use the OpenSSL program and
-crypto library to do key creation and signature generation.
-
-You will need openssl to build kernels 3.7 and higher if module signing is
-enabled. You will also need openssl development packages to build kernels 4.3
-and higher.
-
-
-System utilities
-****************
-
-Architectural changes
----------------------
-
-DevFS has been obsoleted in favour of udev
-(http://www.kernel.org/pub/linux/utils/kernel/hotplug/)
-
-32-bit UID support is now in place. Have fun!
-
-Linux documentation for functions is transitioning to inline
-documentation via specially-formatted comments near their
-definitions in the source. These comments can be combined with the
-SGML templates in the Documentation/DocBook directory to make DocBook
-files, which can then be converted by DocBook stylesheets to PostScript,
-HTML, PDF files, and several other formats. In order to convert from
-DocBook format to a format of your choice, you'll need to install Jade as
-well as the desired DocBook stylesheets.
-
-Util-linux
-----------
-
-New versions of util-linux provide ``fdisk`` support for larger disks,
-support new options to mount, recognize more supported partition
-types, have a fdformat which works with 2.4 kernels, and similar goodies.
-You'll probably want to upgrade.
-
-Ksymoops
---------
-
-If the unthinkable happens and your kernel oopses, you may need the
-ksymoops tool to decode it, but in most cases you don't.
-It is generally preferred to build the kernel with ``CONFIG_KALLSYMS`` so
-that it produces readable dumps that can be used as-is (this also
-produces better output than ksymoops). If for some reason your kernel
-is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and
-reproduce the Oops with that option, then you can still decode that Oops
-with ksymoops.
-
-Module-Init-Tools
------------------
-
-A new module loader is now in the kernel that requires ``module-init-tools``
-to use. It is backward compatible with the 2.4.x series kernels.
-
-Mkinitrd
---------
-
-These changes to the ``/lib/modules`` file tree layout also require that
-mkinitrd be upgraded.
-
-E2fsprogs
----------
-
-The latest version of ``e2fsprogs`` fixes several bugs in fsck and
-debugfs. Obviously, it's a good idea to upgrade.
-
-JFSutils
---------
-
-The ``jfsutils`` package contains the utilities for the file system.
-The following utilities are available:
-
-- ``fsck.jfs`` - initiate replay of the transaction log, and check
- and repair a JFS formatted partition.
-
-- ``mkfs.jfs`` - create a JFS formatted partition.
-
-- other file system utilities are also available in this package.
-
-Reiserfsprogs
--------------
-
-The reiserfsprogs package should be used for reiserfs-3.6.x
-(Linux kernels 2.4.x). It is a combined package and contains working
-versions of ``mkreiserfs``, ``resize_reiserfs``, ``debugreiserfs`` and
-``reiserfsck``. These utils work on both i386 and alpha platforms.
-
-Xfsprogs
---------
-
-The latest version of ``xfsprogs`` contains ``mkfs.xfs``, ``xfs_db``, and the
-``xfs_repair`` utilities, among others, for the XFS filesystem. It is
-architecture independent and any version from 2.0.0 onward should
-work correctly with this version of the XFS kernel code (2.6.0 or
-later is recommended, due to some significant improvements).
-
-PCMCIAutils
------------
-
-PCMCIAutils replaces ``pcmcia-cs``. It properly sets up
-PCMCIA sockets at system startup and loads the appropriate modules
-for 16-bit PCMCIA devices if the kernel is modularized and the hotplug
-subsystem is used.
-
-Quota-tools
------------
-
-Support for 32 bit uid's and gid's is required if you want to use
-the newer version 2 quota format. Quota-tools version 3.07 and
-newer has this support. Use the recommended version or newer
-from the table above.
-
-Intel IA32 microcode
---------------------
-
-A driver has been added to allow updating of Intel IA32 microcode,
-accessible as a normal (misc) character device. If you are not using
-udev you may need to::
-
- mkdir /dev/cpu
- mknod /dev/cpu/microcode c 10 184
- chmod 0644 /dev/cpu/microcode
-
-as root before you can use this. You'll probably also want to
-get the user-space microcode_ctl utility to use with this.
-
-udev
-----
-
-``udev`` is a userspace application for populating ``/dev`` dynamically with
-only entries for devices actually present. ``udev`` replaces the basic
-functionality of devfs, while allowing persistent device naming for
-devices.
-
-FUSE
-----
-
-Needs libfuse 2.4.0 or later. Absolute minimum is 2.3.0 but mount
-options ``direct_io`` and ``kernel_cache`` won't work.
-
-Networking
-**********
-
-General changes
----------------
-
-If you have advanced network configuration needs, you should probably
-consider using the network tools from ip-route2.
-
-Packet Filter / NAT
--------------------
-The packet filtering and NAT code uses the same tools like the previous 2.4.x
-kernel series (iptables). It still includes backwards-compatibility modules
-for 2.2.x-style ipchains and 2.0.x-style ipfwadm.
-
-PPP
----
-
-The PPP driver has been restructured to support multilink and to
-enable it to operate over diverse media layers. If you use PPP,
-upgrade pppd to at least 2.4.0.
-
-If you are not using udev, you must have the device file /dev/ppp
-which can be made by::
-
- mknod /dev/ppp c 108 0
-
-as root.
-
-Isdn4k-utils
-------------
-
-Due to changes in the length of the phone number field, isdn4k-utils
-needs to be recompiled or (preferably) upgraded.
-
-NFS-utils
----------
-
-In ancient (2.4 and earlier) kernels, the nfs server needed to know
-about any client that expected to be able to access files via NFS. This
-information would be given to the kernel by ``mountd`` when the client
-mounted the filesystem, or by ``exportfs`` at system startup. exportfs
-would take information about active clients from ``/var/lib/nfs/rmtab``.
-
-This approach is quite fragile as it depends on rmtab being correct
-which is not always easy, particularly when trying to implement
-fail-over. Even when the system is working well, ``rmtab`` suffers from
-getting lots of old entries that never get removed.
-
-With modern kernels we have the option of having the kernel tell mountd
-when it gets a request from an unknown host, and mountd can give
-appropriate export information to the kernel. This removes the
-dependency on ``rmtab`` and means that the kernel only needs to know about
-currently active clients.
-
-To enable this new functionality, you need to::
-
- mount -t nfsd nfsd /proc/fs/nfsd
-
-before running exportfs or mountd. It is recommended that all NFS
-services be protected from the internet-at-large by a firewall where
-that is possible.
-
-mcelog
-------
-
-On x86 kernels the mcelog utility is needed to process and log machine check
-events when ``CONFIG_X86_MCE`` is enabled. Machine check events are errors
-reported by the CPU. Processing them is strongly encouraged.
-
-Kernel documentation
-********************
-
-Sphinx
-------
-
-The ReST markups currently used by the Documentation/ files are meant to be
-built with ``Sphinx`` version 1.2 or upper. If you're desiring to build
-PDF outputs, it is recommended to use version 1.4.6.
-
-.. note::
-
- Please notice that, for PDF and LaTeX output, you'll also need ``XeLaTeX``
- version 3.14159265. Depending on the distribution, you may also need
- to install a series of ``texlive`` packages that provide the minimal
- set of functionalities required for ``XeLaTex`` to work.
-
-Other tools
------------
-
-In order to produce documentation from DocBook, you'll also need ``xmlto``.
-Please notice, however, that we're currently migrating all documents to use
-``Sphinx``.
-
-Getting updated software
-========================
-
-Kernel compilation
-******************
-
-gcc
----
-
-- <ftp://ftp.gnu.org/gnu/gcc/>
-
-Make
-----
-
-- <ftp://ftp.gnu.org/gnu/make/>
-
-Binutils
---------
-
-- <ftp://ftp.kernel.org/pub/linux/devel/binutils/>
-
-OpenSSL
--------
-
-- <https://www.openssl.org/>
-
-System utilities
-****************
-
-Util-linux
-----------
-
-- <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>
-
-Ksymoops
---------
-
-- <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
-
-Module-Init-Tools
------------------
-
-- <ftp://ftp.kernel.org/pub/linux/kernel/people/rusty/modules/>
-
-Mkinitrd
---------
-
-- <https://code.launchpad.net/initrd-tools/main>
-
-E2fsprogs
----------
-
-- <http://prdownloads.sourceforge.net/e2fsprogs/e2fsprogs-1.29.tar.gz>
-
-JFSutils
---------
-
-- <http://jfs.sourceforge.net/>
-
-Reiserfsprogs
--------------
-
-- <http://www.kernel.org/pub/linux/utils/fs/reiserfs/>
-
-Xfsprogs
---------
-
-- <ftp://oss.sgi.com/projects/xfs/>
-
-Pcmciautils
------------
-
-- <ftp://ftp.kernel.org/pub/linux/utils/kernel/pcmcia/>
-
-Quota-tools
------------
-
-- <http://sourceforge.net/projects/linuxquota/>
-
-DocBook Stylesheets
--------------------
-
-- <http://sourceforge.net/projects/docbook/files/docbook-dsssl/>
-
-XMLTO XSLT Frontend
--------------------
-
-- <http://cyberelk.net/tim/xmlto/>
-
-Intel P6 microcode
-------------------
-
-- <https://downloadcenter.intel.com/>
-
-udev
-----
-
-- <http://www.freedesktop.org/software/systemd/man/udev.html>
-
-FUSE
-----
-
-- <http://sourceforge.net/projects/fuse>
-
-mcelog
-------
-
-- <http://www.mcelog.org/>
-
-Networking
-**********
-
-PPP
----
-
-- <ftp://ftp.samba.org/pub/ppp/>
-
-Isdn4k-utils
-------------
-
-- <ftp://ftp.isdn4linux.de/pub/isdn4linux/utils/>
-
-NFS-utils
----------
-
-- <http://sourceforge.net/project/showfiles.php?group_id=14>
-
-Iptables
---------
-
-- <http://www.iptables.org/downloads.html>
-
-Ip-route2
----------
-
-- <https://www.kernel.org/pub/linux/utils/net/iproute2/>
-
-OProfile
---------
-
-- <http://oprofile.sf.net/download/>
-
-NFS-Utils
----------
-
-- <http://nfs.sourceforge.net/>
-
-Kernel documentation
-********************
-
-Sphinx
-------
-
-- <http://www.sphinx-doc.org/>
+process/changes.rst \ No newline at end of file
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 9c61c039ccd9..320983ca114e 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -1,1062 +1 @@
-.. _codingstyle:
-
-Linux kernel coding style
-=========================
-
-This is a short document describing the preferred coding style for the
-linux kernel. Coding style is very personal, and I won't **force** my
-views on anybody, but this is what goes for anything that I have to be
-able to maintain, and I'd prefer it for most other things too. Please
-at least consider the points made here.
-
-First off, I'd suggest printing out a copy of the GNU coding standards,
-and NOT read it. Burn them, it's a great symbolic gesture.
-
-Anyway, here goes:
-
-
-1) Indentation
---------------
-
-Tabs are 8 characters, and thus indentations are also 8 characters.
-There are heretic movements that try to make indentations 4 (or even 2!)
-characters deep, and that is akin to trying to define the value of PI to
-be 3.
-
-Rationale: The whole idea behind indentation is to clearly define where
-a block of control starts and ends. Especially when you've been looking
-at your screen for 20 straight hours, you'll find it a lot easier to see
-how the indentation works if you have large indentations.
-
-Now, some people will claim that having 8-character indentations makes
-the code move too far to the right, and makes it hard to read on a
-80-character terminal screen. The answer to that is that if you need
-more than 3 levels of indentation, you're screwed anyway, and should fix
-your program.
-
-In short, 8-char indents make things easier to read, and have the added
-benefit of warning you when you're nesting your functions too deep.
-Heed that warning.
-
-The preferred way to ease multiple indentation levels in a switch statement is
-to align the ``switch`` and its subordinate ``case`` labels in the same column
-instead of ``double-indenting`` the ``case`` labels. E.g.:
-
-.. code-block:: c
-
- switch (suffix) {
- case 'G':
- case 'g':
- mem <<= 30;
- break;
- case 'M':
- case 'm':
- mem <<= 20;
- break;
- case 'K':
- case 'k':
- mem <<= 10;
- /* fall through */
- default:
- break;
- }
-
-Don't put multiple statements on a single line unless you have
-something to hide:
-
-.. code-block:: c
-
- if (condition) do_this;
- do_something_everytime;
-
-Don't put multiple assignments on a single line either. Kernel coding style
-is super simple. Avoid tricky expressions.
-
-Outside of comments, documentation and except in Kconfig, spaces are never
-used for indentation, and the above example is deliberately broken.
-
-Get a decent editor and don't leave whitespace at the end of lines.
-
-
-2) Breaking long lines and strings
-----------------------------------
-
-Coding style is all about readability and maintainability using commonly
-available tools.
-
-The limit on the length of lines is 80 columns and this is a strongly
-preferred limit.
-
-Statements longer than 80 columns will be broken into sensible chunks, unless
-exceeding 80 columns significantly increases readability and does not hide
-information. Descendants are always substantially shorter than the parent and
-are placed substantially to the right. The same applies to function headers
-with a long argument list. However, never break user-visible strings such as
-printk messages, because that breaks the ability to grep for them.
-
-
-3) Placing Braces and Spaces
-----------------------------
-
-The other issue that always comes up in C styling is the placement of
-braces. Unlike the indent size, there are few technical reasons to
-choose one placement strategy over the other, but the preferred way, as
-shown to us by the prophets Kernighan and Ritchie, is to put the opening
-brace last on the line, and put the closing brace first, thusly:
-
-.. code-block:: c
-
- if (x is true) {
- we do y
- }
-
-This applies to all non-function statement blocks (if, switch, for,
-while, do). E.g.:
-
-.. code-block:: c
-
- switch (action) {
- case KOBJ_ADD:
- return "add";
- case KOBJ_REMOVE:
- return "remove";
- case KOBJ_CHANGE:
- return "change";
- default:
- return NULL;
- }
-
-However, there is one special case, namely functions: they have the
-opening brace at the beginning of the next line, thus:
-
-.. code-block:: c
-
- int function(int x)
- {
- body of function
- }
-
-Heretic people all over the world have claimed that this inconsistency
-is ... well ... inconsistent, but all right-thinking people know that
-(a) K&R are **right** and (b) K&R are right. Besides, functions are
-special anyway (you can't nest them in C).
-
-Note that the closing brace is empty on a line of its own, **except** in
-the cases where it is followed by a continuation of the same statement,
-ie a ``while`` in a do-statement or an ``else`` in an if-statement, like
-this:
-
-.. code-block:: c
-
- do {
- body of do-loop
- } while (condition);
-
-and
-
-.. code-block:: c
-
- if (x == y) {
- ..
- } else if (x > y) {
- ...
- } else {
- ....
- }
-
-Rationale: K&R.
-
-Also, note that this brace-placement also minimizes the number of empty
-(or almost empty) lines, without any loss of readability. Thus, as the
-supply of new-lines on your screen is not a renewable resource (think
-25-line terminal screens here), you have more empty lines to put
-comments on.
-
-Do not unnecessarily use braces where a single statement will do.
-
-.. code-block:: c
-
- if (condition)
- action();
-
-and
-
-.. code-block:: none
-
- if (condition)
- do_this();
- else
- do_that();
-
-This does not apply if only one branch of a conditional statement is a single
-statement; in the latter case use braces in both branches:
-
-.. code-block:: c
-
- if (condition) {
- do_this();
- do_that();
- } else {
- otherwise();
- }
-
-3.1) Spaces
-***********
-
-Linux kernel style for use of spaces depends (mostly) on
-function-versus-keyword usage. Use a space after (most) keywords. The
-notable exceptions are sizeof, typeof, alignof, and __attribute__, which look
-somewhat like functions (and are usually used with parentheses in Linux,
-although they are not required in the language, as in: ``sizeof info`` after
-``struct fileinfo info;`` is declared).
-
-So use a space after these keywords::
-
- if, switch, case, for, do, while
-
-but not with sizeof, typeof, alignof, or __attribute__. E.g.,
-
-.. code-block:: c
-
-
- s = sizeof(struct file);
-
-Do not add spaces around (inside) parenthesized expressions. This example is
-**bad**:
-
-.. code-block:: c
-
-
- s = sizeof( struct file );
-
-When declaring pointer data or a function that returns a pointer type, the
-preferred use of ``*`` is adjacent to the data name or function name and not
-adjacent to the type name. Examples:
-
-.. code-block:: c
-
-
- char *linux_banner;
- unsigned long long memparse(char *ptr, char **retptr);
- char *match_strdup(substring_t *s);
-
-Use one space around (on each side of) most binary and ternary operators,
-such as any of these::
-
- = + - < > * / % | & ^ <= >= == != ? :
-
-but no space after unary operators::
-
- & * + - ~ ! sizeof typeof alignof __attribute__ defined
-
-no space before the postfix increment & decrement unary operators::
-
- ++ --
-
-no space after the prefix increment & decrement unary operators::
-
- ++ --
-
-and no space around the ``.`` and ``->`` structure member operators.
-
-Do not leave trailing whitespace at the ends of lines. Some editors with
-``smart`` indentation will insert whitespace at the beginning of new lines as
-appropriate, so you can start typing the next line of code right away.
-However, some such editors do not remove the whitespace if you end up not
-putting a line of code there, such as if you leave a blank line. As a result,
-you end up with lines containing trailing whitespace.
-
-Git will warn you about patches that introduce trailing whitespace, and can
-optionally strip the trailing whitespace for you; however, if applying a series
-of patches, this may make later patches in the series fail by changing their
-context lines.
-
-
-4) Naming
----------
-
-C is a Spartan language, and so should your naming be. Unlike Modula-2
-and Pascal programmers, C programmers do not use cute names like
-ThisVariableIsATemporaryCounter. A C programmer would call that
-variable ``tmp``, which is much easier to write, and not the least more
-difficult to understand.
-
-HOWEVER, while mixed-case names are frowned upon, descriptive names for
-global variables are a must. To call a global function ``foo`` is a
-shooting offense.
-
-GLOBAL variables (to be used only if you **really** need them) need to
-have descriptive names, as do global functions. If you have a function
-that counts the number of active users, you should call that
-``count_active_users()`` or similar, you should **not** call it ``cntusr()``.
-
-Encoding the type of a function into the name (so-called Hungarian
-notation) is brain damaged - the compiler knows the types anyway and can
-check those, and it only confuses the programmer. No wonder MicroSoft
-makes buggy programs.
-
-LOCAL variable names should be short, and to the point. If you have
-some random integer loop counter, it should probably be called ``i``.
-Calling it ``loop_counter`` is non-productive, if there is no chance of it
-being mis-understood. Similarly, ``tmp`` can be just about any type of
-variable that is used to hold a temporary value.
-
-If you are afraid to mix up your local variable names, you have another
-problem, which is called the function-growth-hormone-imbalance syndrome.
-See chapter 6 (Functions).
-
-
-5) Typedefs
------------
-
-Please don't use things like ``vps_t``.
-It's a **mistake** to use typedef for structures and pointers. When you see a
-
-.. code-block:: c
-
-
- vps_t a;
-
-in the source, what does it mean?
-In contrast, if it says
-
-.. code-block:: c
-
- struct virtual_container *a;
-
-you can actually tell what ``a`` is.
-
-Lots of people think that typedefs ``help readability``. Not so. They are
-useful only for:
-
- (a) totally opaque objects (where the typedef is actively used to **hide**
- what the object is).
-
- Example: ``pte_t`` etc. opaque objects that you can only access using
- the proper accessor functions.
-
- .. note::
-
- Opaqueness and ``accessor functions`` are not good in themselves.
- The reason we have them for things like pte_t etc. is that there
- really is absolutely **zero** portably accessible information there.
-
- (b) Clear integer types, where the abstraction **helps** avoid confusion
- whether it is ``int`` or ``long``.
-
- u8/u16/u32 are perfectly fine typedefs, although they fit into
- category (d) better than here.
-
- .. note::
-
- Again - there needs to be a **reason** for this. If something is
- ``unsigned long``, then there's no reason to do
-
- typedef unsigned long myflags_t;
-
- but if there is a clear reason for why it under certain circumstances
- might be an ``unsigned int`` and under other configurations might be
- ``unsigned long``, then by all means go ahead and use a typedef.
-
- (c) when you use sparse to literally create a **new** type for
- type-checking.
-
- (d) New types which are identical to standard C99 types, in certain
- exceptional circumstances.
-
- Although it would only take a short amount of time for the eyes and
- brain to become accustomed to the standard types like ``uint32_t``,
- some people object to their use anyway.
-
- Therefore, the Linux-specific ``u8/u16/u32/u64`` types and their
- signed equivalents which are identical to standard types are
- permitted -- although they are not mandatory in new code of your
- own.
-
- When editing existing code which already uses one or the other set
- of types, you should conform to the existing choices in that code.
-
- (e) Types safe for use in userspace.
-
- In certain structures which are visible to userspace, we cannot
- require C99 types and cannot use the ``u32`` form above. Thus, we
- use __u32 and similar types in all structures which are shared
- with userspace.
-
-Maybe there are other cases too, but the rule should basically be to NEVER
-EVER use a typedef unless you can clearly match one of those rules.
-
-In general, a pointer, or a struct that has elements that can reasonably
-be directly accessed should **never** be a typedef.
-
-
-6) Functions
-------------
-
-Functions should be short and sweet, and do just one thing. They should
-fit on one or two screenfuls of text (the ISO/ANSI screen size is 80x24,
-as we all know), and do one thing and do that well.
-
-The maximum length of a function is inversely proportional to the
-complexity and indentation level of that function. So, if you have a
-conceptually simple function that is just one long (but simple)
-case-statement, where you have to do lots of small things for a lot of
-different cases, it's OK to have a longer function.
-
-However, if you have a complex function, and you suspect that a
-less-than-gifted first-year high-school student might not even
-understand what the function is all about, you should adhere to the
-maximum limits all the more closely. Use helper functions with
-descriptive names (you can ask the compiler to in-line them if you think
-it's performance-critical, and it will probably do a better job of it
-than you would have done).
-
-Another measure of the function is the number of local variables. They
-shouldn't exceed 5-10, or you're doing something wrong. Re-think the
-function, and split it into smaller pieces. A human brain can
-generally easily keep track of about 7 different things, anything more
-and it gets confused. You know you're brilliant, but maybe you'd like
-to understand what you did 2 weeks from now.
-
-In source files, separate functions with one blank line. If the function is
-exported, the **EXPORT** macro for it should follow immediately after the
-closing function brace line. E.g.:
-
-.. code-block:: c
-
- int system_is_up(void)
- {
- return system_state == SYSTEM_RUNNING;
- }
- EXPORT_SYMBOL(system_is_up);
-
-In function prototypes, include parameter names with their data types.
-Although this is not required by the C language, it is preferred in Linux
-because it is a simple way to add valuable information for the reader.
-
-
-7) Centralized exiting of functions
------------------------------------
-
-Albeit deprecated by some people, the equivalent of the goto statement is
-used frequently by compilers in form of the unconditional jump instruction.
-
-The goto statement comes in handy when a function exits from multiple
-locations and some common work such as cleanup has to be done. If there is no
-cleanup needed then just return directly.
-
-Choose label names which say what the goto does or why the goto exists. An
-example of a good name could be ``out_free_buffer:`` if the goto frees ``buffer``.
-Avoid using GW-BASIC names like ``err1:`` and ``err2:``, as you would have to
-renumber them if you ever add or remove exit paths, and they make correctness
-difficult to verify anyway.
-
-The rationale for using gotos is:
-
-- unconditional statements are easier to understand and follow
-- nesting is reduced
-- errors by not updating individual exit points when making
- modifications are prevented
-- saves the compiler work to optimize redundant code away ;)
-
-.. code-block:: c
-
- int fun(int a)
- {
- int result = 0;
- char *buffer;
-
- buffer = kmalloc(SIZE, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- if (condition1) {
- while (loop1) {
- ...
- }
- result = 1;
- goto out_buffer;
- }
- ...
- out_free_buffer:
- kfree(buffer);
- return result;
- }
-
-A common type of bug to be aware of is ``one err bugs`` which look like this:
-
-.. code-block:: c
-
- err:
- kfree(foo->bar);
- kfree(foo);
- return ret;
-
-The bug in this code is that on some exit paths ``foo`` is NULL. Normally the
-fix for this is to split it up into two error labels ``err_free_bar:`` and
-``err_free_foo:``:
-
-.. code-block:: c
-
- err_free_bar:
- kfree(foo->bar);
- err_free_foo:
- kfree(foo);
- return ret;
-
-Ideally you should simulate errors to test all exit paths.
-
-
-8) Commenting
--------------
-
-Comments are good, but there is also a danger of over-commenting. NEVER
-try to explain HOW your code works in a comment: it's much better to
-write the code so that the **working** is obvious, and it's a waste of
-time to explain badly written code.
-
-Generally, you want your comments to tell WHAT your code does, not HOW.
-Also, try to avoid putting comments inside a function body: if the
-function is so complex that you need to separately comment parts of it,
-you should probably go back to chapter 6 for a while. You can make
-small comments to note or warn about something particularly clever (or
-ugly), but try to avoid excess. Instead, put the comments at the head
-of the function, telling people what it does, and possibly WHY it does
-it.
-
-When commenting the kernel API functions, please use the kernel-doc format.
-See the files Documentation/kernel-documentation.rst and scripts/kernel-doc
-for details.
-
-The preferred style for long (multi-line) comments is:
-
-.. code-block:: c
-
- /*
- * This is the preferred style for multi-line
- * comments in the Linux kernel source code.
- * Please use it consistently.
- *
- * Description: A column of asterisks on the left side,
- * with beginning and ending almost-blank lines.
- */
-
-For files in net/ and drivers/net/ the preferred style for long (multi-line)
-comments is a little different.
-
-.. code-block:: c
-
- /* The preferred comment style for files in net/ and drivers/net
- * looks like this.
- *
- * It is nearly the same as the generally preferred comment style,
- * but there is no initial almost-blank line.
- */
-
-It's also important to comment data, whether they are basic types or derived
-types. To this end, use just one data declaration per line (no commas for
-multiple data declarations). This leaves you room for a small comment on each
-item, explaining its use.
-
-
-9) You've made a mess of it
----------------------------
-
-That's OK, we all do. You've probably been told by your long-time Unix
-user helper that ``GNU emacs`` automatically formats the C sources for
-you, and you've noticed that yes, it does do that, but the defaults it
-uses are less than desirable (in fact, they are worse than random
-typing - an infinite number of monkeys typing into GNU emacs would never
-make a good program).
-
-So, you can either get rid of GNU emacs, or change it to use saner
-values. To do the latter, you can stick the following in your .emacs file:
-
-.. code-block:: none
-
- (defun c-lineup-arglist-tabs-only (ignored)
- "Line up argument lists by tabs, not spaces"
- (let* ((anchor (c-langelem-pos c-syntactic-element))
- (column (c-langelem-2nd-pos c-syntactic-element))
- (offset (- (1+ column) anchor))
- (steps (floor offset c-basic-offset)))
- (* (max steps 1)
- c-basic-offset)))
-
- (add-hook 'c-mode-common-hook
- (lambda ()
- ;; Add kernel style
- (c-add-style
- "linux-tabs-only"
- '("linux" (c-offsets-alist
- (arglist-cont-nonempty
- c-lineup-gcc-asm-reg
- c-lineup-arglist-tabs-only))))))
-
- (add-hook 'c-mode-hook
- (lambda ()
- (let ((filename (buffer-file-name)))
- ;; Enable kernel mode for the appropriate files
- (when (and filename
- (string-match (expand-file-name "~/src/linux-trees")
- filename))
- (setq indent-tabs-mode t)
- (setq show-trailing-whitespace t)
- (c-set-style "linux-tabs-only")))))
-
-This will make emacs go better with the kernel coding style for C
-files below ``~/src/linux-trees``.
-
-But even if you fail in getting emacs to do sane formatting, not
-everything is lost: use ``indent``.
-
-Now, again, GNU indent has the same brain-dead settings that GNU emacs
-has, which is why you need to give it a few command line options.
-However, that's not too bad, because even the makers of GNU indent
-recognize the authority of K&R (the GNU people aren't evil, they are
-just severely misguided in this matter), so you just give indent the
-options ``-kr -i8`` (stands for ``K&R, 8 character indents``), or use
-``scripts/Lindent``, which indents in the latest style.
-
-``indent`` has a lot of options, and especially when it comes to comment
-re-formatting you may want to take a look at the man page. But
-remember: ``indent`` is not a fix for bad programming.
-
-
-10) Kconfig configuration files
--------------------------------
-
-For all of the Kconfig* configuration files throughout the source tree,
-the indentation is somewhat different. Lines under a ``config`` definition
-are indented with one tab, while help text is indented an additional two
-spaces. Example::
-
- config AUDIT
- bool "Auditing support"
- depends on NET
- help
- Enable auditing infrastructure that can be used with another
- kernel subsystem, such as SELinux (which requires this for
- logging of avc messages output). Does not do system-call
- auditing without CONFIG_AUDITSYSCALL.
-
-Seriously dangerous features (such as write support for certain
-filesystems) should advertise this prominently in their prompt string::
-
- config ADFS_FS_RW
- bool "ADFS write support (DANGEROUS)"
- depends on ADFS_FS
- ...
-
-For full documentation on the configuration files, see the file
-Documentation/kbuild/kconfig-language.txt.
-
-
-11) Data structures
--------------------
-
-Data structures that have visibility outside the single-threaded
-environment they are created and destroyed in should always have
-reference counts. In the kernel, garbage collection doesn't exist (and
-outside the kernel garbage collection is slow and inefficient), which
-means that you absolutely **have** to reference count all your uses.
-
-Reference counting means that you can avoid locking, and allows multiple
-users to have access to the data structure in parallel - and not having
-to worry about the structure suddenly going away from under them just
-because they slept or did something else for a while.
-
-Note that locking is **not** a replacement for reference counting.
-Locking is used to keep data structures coherent, while reference
-counting is a memory management technique. Usually both are needed, and
-they are not to be confused with each other.
-
-Many data structures can indeed have two levels of reference counting,
-when there are users of different ``classes``. The subclass count counts
-the number of subclass users, and decrements the global count just once
-when the subclass count goes to zero.
-
-Examples of this kind of ``multi-level-reference-counting`` can be found in
-memory management (``struct mm_struct``: mm_users and mm_count), and in
-filesystem code (``struct super_block``: s_count and s_active).
-
-Remember: if another thread can find your data structure, and you don't
-have a reference count on it, you almost certainly have a bug.
-
-
-12) Macros, Enums and RTL
--------------------------
-
-Names of macros defining constants and labels in enums are capitalized.
-
-.. code-block:: c
-
- #define CONSTANT 0x12345
-
-Enums are preferred when defining several related constants.
-
-CAPITALIZED macro names are appreciated but macros resembling functions
-may be named in lower case.
-
-Generally, inline functions are preferable to macros resembling functions.
-
-Macros with multiple statements should be enclosed in a do - while block:
-
-.. code-block:: c
-
- #define macrofun(a, b, c) \
- do { \
- if (a == 5) \
- do_this(b, c); \
- } while (0)
-
-Things to avoid when using macros:
-
-1) macros that affect control flow:
-
-.. code-block:: c
-
- #define FOO(x) \
- do { \
- if (blah(x) < 0) \
- return -EBUGGERED; \
- } while (0)
-
-is a **very** bad idea. It looks like a function call but exits the ``calling``
-function; don't break the internal parsers of those who will read the code.
-
-2) macros that depend on having a local variable with a magic name:
-
-.. code-block:: c
-
- #define FOO(val) bar(index, val)
-
-might look like a good thing, but it's confusing as hell when one reads the
-code and it's prone to breakage from seemingly innocent changes.
-
-3) macros with arguments that are used as l-values: FOO(x) = y; will
-bite you if somebody e.g. turns FOO into an inline function.
-
-4) forgetting about precedence: macros defining constants using expressions
-must enclose the expression in parentheses. Beware of similar issues with
-macros using parameters.
-
-.. code-block:: c
-
- #define CONSTANT 0x4000
- #define CONSTEXP (CONSTANT | 3)
-
-5) namespace collisions when defining local variables in macros resembling
-functions:
-
-.. code-block:: c
-
- #define FOO(x) \
- ({ \
- typeof(x) ret; \
- ret = calc_ret(x); \
- (ret); \
- })
-
-ret is a common name for a local variable - __foo_ret is less likely
-to collide with an existing variable.
-
-The cpp manual deals with macros exhaustively. The gcc internals manual also
-covers RTL which is used frequently with assembly language in the kernel.
-
-
-13) Printing kernel messages
-----------------------------
-
-Kernel developers like to be seen as literate. Do mind the spelling
-of kernel messages to make a good impression. Do not use crippled
-words like ``dont``; use ``do not`` or ``don't`` instead. Make the messages
-concise, clear, and unambiguous.
-
-Kernel messages do not have to be terminated with a period.
-
-Printing numbers in parentheses (%d) adds no value and should be avoided.
-
-There are a number of driver model diagnostic macros in <linux/device.h>
-which you should use to make sure messages are matched to the right device
-and driver, and are tagged with the right level: dev_err(), dev_warn(),
-dev_info(), and so forth. For messages that aren't associated with a
-particular device, <linux/printk.h> defines pr_notice(), pr_info(),
-pr_warn(), pr_err(), etc.
-
-Coming up with good debugging messages can be quite a challenge; and once
-you have them, they can be a huge help for remote troubleshooting. However
-debug message printing is handled differently than printing other non-debug
-messages. While the other pr_XXX() functions print unconditionally,
-pr_debug() does not; it is compiled out by default, unless either DEBUG is
-defined or CONFIG_DYNAMIC_DEBUG is set. That is true for dev_dbg() also,
-and a related convention uses VERBOSE_DEBUG to add dev_vdbg() messages to
-the ones already enabled by DEBUG.
-
-Many subsystems have Kconfig debug options to turn on -DDEBUG in the
-corresponding Makefile; in other cases specific files #define DEBUG. And
-when a debug message should be unconditionally printed, such as if it is
-already inside a debug-related #ifdef section, printk(KERN_DEBUG ...) can be
-used.
-
-
-14) Allocating memory
----------------------
-
-The kernel provides the following general purpose memory allocators:
-kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), and
-vzalloc(). Please refer to the API documentation for further information
-about them.
-
-The preferred form for passing a size of a struct is the following:
-
-.. code-block:: c
-
- p = kmalloc(sizeof(*p), ...);
-
-The alternative form where struct name is spelled out hurts readability and
-introduces an opportunity for a bug when the pointer variable type is changed
-but the corresponding sizeof that is passed to a memory allocator is not.
-
-Casting the return value which is a void pointer is redundant. The conversion
-from void pointer to any other pointer type is guaranteed by the C programming
-language.
-
-The preferred form for allocating an array is the following:
-
-.. code-block:: c
-
- p = kmalloc_array(n, sizeof(...), ...);
-
-The preferred form for allocating a zeroed array is the following:
-
-.. code-block:: c
-
- p = kcalloc(n, sizeof(...), ...);
-
-Both forms check for overflow on the allocation size n * sizeof(...),
-and return NULL if that occurred.
-
-
-15) The inline disease
-----------------------
-
-There appears to be a common misperception that gcc has a magic "make me
-faster" speedup option called ``inline``. While the use of inlines can be
-appropriate (for example as a means of replacing macros, see Chapter 12), it
-very often is not. Abundant use of the inline keyword leads to a much bigger
-kernel, which in turn slows the system as a whole down, due to a bigger
-icache footprint for the CPU and simply because there is less memory
-available for the pagecache. Just think about it; a pagecache miss causes a
-disk seek, which easily takes 5 milliseconds. There are a LOT of cpu cycles
-that can go into these 5 milliseconds.
-
-A reasonable rule of thumb is to not put inline at functions that have more
-than 3 lines of code in them. An exception to this rule are the cases where
-a parameter is known to be a compiletime constant, and as a result of this
-constantness you *know* the compiler will be able to optimize most of your
-function away at compile time. For a good example of this later case, see
-the kmalloc() inline function.
-
-Often people argue that adding inline to functions that are static and used
-only once is always a win since there is no space tradeoff. While this is
-technically correct, gcc is capable of inlining these automatically without
-help, and the maintenance issue of removing the inline when a second user
-appears outweighs the potential value of the hint that tells gcc to do
-something it would have done anyway.
-
-
-16) Function return values and names
-------------------------------------
-
-Functions can return values of many different kinds, and one of the
-most common is a value indicating whether the function succeeded or
-failed. Such a value can be represented as an error-code integer
-(-Exxx = failure, 0 = success) or a ``succeeded`` boolean (0 = failure,
-non-zero = success).
-
-Mixing up these two sorts of representations is a fertile source of
-difficult-to-find bugs. If the C language included a strong distinction
-between integers and booleans then the compiler would find these mistakes
-for us... but it doesn't. To help prevent such bugs, always follow this
-convention::
-
- If the name of a function is an action or an imperative command,
- the function should return an error-code integer. If the name
- is a predicate, the function should return a "succeeded" boolean.
-
-For example, ``add work`` is a command, and the add_work() function returns 0
-for success or -EBUSY for failure. In the same way, ``PCI device present`` is
-a predicate, and the pci_dev_present() function returns 1 if it succeeds in
-finding a matching device or 0 if it doesn't.
-
-All EXPORTed functions must respect this convention, and so should all
-public functions. Private (static) functions need not, but it is
-recommended that they do.
-
-Functions whose return value is the actual result of a computation, rather
-than an indication of whether the computation succeeded, are not subject to
-this rule. Generally they indicate failure by returning some out-of-range
-result. Typical examples would be functions that return pointers; they use
-NULL or the ERR_PTR mechanism to report failure.
-
-
-17) Don't re-invent the kernel macros
--------------------------------------
-
-The header file include/linux/kernel.h contains a number of macros that
-you should use, rather than explicitly coding some variant of them yourself.
-For example, if you need to calculate the length of an array, take advantage
-of the macro
-
-.. code-block:: c
-
- #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
-Similarly, if you need to calculate the size of some structure member, use
-
-.. code-block:: c
-
- #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-
-There are also min() and max() macros that do strict type checking if you
-need them. Feel free to peruse that header file to see what else is already
-defined that you shouldn't reproduce in your code.
-
-
-18) Editor modelines and other cruft
-------------------------------------
-
-Some editors can interpret configuration information embedded in source files,
-indicated with special markers. For example, emacs interprets lines marked
-like this:
-
-.. code-block:: c
-
- -*- mode: c -*-
-
-Or like this:
-
-.. code-block:: c
-
- /*
- Local Variables:
- compile-command: "gcc -DMAGIC_DEBUG_FLAG foo.c"
- End:
- */
-
-Vim interprets markers that look like this:
-
-.. code-block:: c
-
- /* vim:set sw=8 noet */
-
-Do not include any of these in source files. People have their own personal
-editor configurations, and your source files should not override them. This
-includes markers for indentation and mode configuration. People may use their
-own custom mode, or may have some other magic method for making indentation
-work correctly.
-
-
-19) Inline assembly
--------------------
-
-In architecture-specific code, you may need to use inline assembly to interface
-with CPU or platform functionality. Don't hesitate to do so when necessary.
-However, don't use inline assembly gratuitously when C can do the job. You can
-and should poke hardware from C when possible.
-
-Consider writing simple helper functions that wrap common bits of inline
-assembly, rather than repeatedly writing them with slight variations. Remember
-that inline assembly can use C parameters.
-
-Large, non-trivial assembly functions should go in .S files, with corresponding
-C prototypes defined in C header files. The C prototypes for assembly
-functions should use ``asmlinkage``.
-
-You may need to mark your asm statement as volatile, to prevent GCC from
-removing it if GCC doesn't notice any side effects. You don't always need to
-do so, though, and doing so unnecessarily can limit optimization.
-
-When writing a single inline assembly statement containing multiple
-instructions, put each instruction on a separate line in a separate quoted
-string, and end each string except the last with \n\t to properly indent the
-next instruction in the assembly output:
-
-.. code-block:: c
-
- asm ("magic %reg1, #42\n\t"
- "more_magic %reg2, %reg3"
- : /* outputs */ : /* inputs */ : /* clobbers */);
-
-
-20) Conditional Compilation
----------------------------
-
-Wherever possible, don't use preprocessor conditionals (#if, #ifdef) in .c
-files; doing so makes code harder to read and logic harder to follow. Instead,
-use such conditionals in a header file defining functions for use in those .c
-files, providing no-op stub versions in the #else case, and then call those
-functions unconditionally from .c files. The compiler will avoid generating
-any code for the stub calls, producing identical results, but the logic will
-remain easy to follow.
-
-Prefer to compile out entire functions, rather than portions of functions or
-portions of expressions. Rather than putting an ifdef in an expression, factor
-out part or all of the expression into a separate helper function and apply the
-conditional to that function.
-
-If you have a function or variable which may potentially go unused in a
-particular configuration, and the compiler would warn about its definition
-going unused, mark the definition as __maybe_unused rather than wrapping it in
-a preprocessor conditional. (However, if a function or variable *always* goes
-unused, delete it.)
-
-Within code, where possible, use the IS_ENABLED macro to convert a Kconfig
-symbol into a C boolean expression, and use it in a normal C conditional:
-
-.. code-block:: c
-
- if (IS_ENABLED(CONFIG_SOMETHING)) {
- ...
- }
-
-The compiler will constant-fold the conditional away, and include or exclude
-the block of code just as with an #ifdef, so this will not add any runtime
-overhead. However, this approach still allows the C compiler to see the code
-inside the block, and check it for correctness (syntax, types, symbol
-references, etc). Thus, you still have to use an #ifdef if the code inside the
-block references symbols that will not exist if the condition is not met.
-
-At the end of any non-trivial #if or #ifdef block (more than a few lines),
-place a comment after the #endif on the same line, noting the conditional
-expression used. For instance:
-
-.. code-block:: c
-
- #ifdef CONFIG_SOMETHING
- ...
- #endif /* CONFIG_SOMETHING */
-
-
-Appendix I) References
-----------------------
-
-The C Programming Language, Second Edition
-by Brian W. Kernighan and Dennis M. Ritchie.
-Prentice Hall, Inc., 1988.
-ISBN 0-13-110362-8 (paperback), 0-13-110370-9 (hardback).
-
-The Practice of Programming
-by Brian W. Kernighan and Rob Pike.
-Addison-Wesley, Inc., 1999.
-ISBN 0-201-61586-X.
-
-GNU manuals - where in compliance with K&R and this text - for cpp, gcc,
-gcc internals and indent, all available from http://www.gnu.org/manual/
-
-WG14 is the international standardization working group for the programming
-language C, URL: http://www.open-std.org/JTC1/SC22/WG14/
-
-Kernel CodingStyle, by greg@kroah.com at OLS 2002:
-http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
+This file has moved to process/coding-style.rst
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index fdf8232d0eeb..caab9039362f 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -9,12 +9,10 @@
DOCBOOKS := z8530book.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
writing_usb_driver.xml networking.xml \
- kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
+ kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
- debugobjects.xml sh.xml regulator.xml \
- alsa-driver-api.xml writing-an-alsa-driver.xml \
- tracepoint.xml w1.xml \
+ 80211.xml sh.xml regulator.xml w1.xml \
writing_musb_glue_layer.xml crypto-API.xml iio.xml
ifeq ($(DOCBOOKS),)
@@ -264,6 +262,7 @@ clean-files := $(DOCBOOKS) \
$(patsubst %.xml, %.aux.xml, $(DOCBOOKS)) \
$(patsubst %.xml, %.xml.db, $(DOCBOOKS)) \
$(patsubst %.xml, %.xml, $(DOCBOOKS)) \
+ $(patsubst %.xml, .%.xml.cmd, $(DOCBOOKS)) \
$(index)
clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
diff --git a/Documentation/DocBook/alsa-driver-api.tmpl b/Documentation/DocBook/alsa-driver-api.tmpl
deleted file mode 100644
index 53f439dcc94b..000000000000
--- a/Documentation/DocBook/alsa-driver-api.tmpl
+++ /dev/null
@@ -1,142 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<!-- ****************************************************** -->
-<!-- Header -->
-<!-- ****************************************************** -->
-<book id="ALSA-Driver-API">
- <bookinfo>
- <title>The ALSA Driver API</title>
-
- <legalnotice>
- <para>
- This document is free; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
- </para>
-
- <para>
- This document is distributed in the hope that it will be useful,
- but <emphasis>WITHOUT ANY WARRANTY</emphasis>; without even the
- implied warranty of <emphasis>MERCHANTABILITY or FITNESS FOR A
- PARTICULAR PURPOSE</emphasis>. See the GNU General Public License
- for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
- </legalnotice>
-
- </bookinfo>
-
-<toc></toc>
-
- <chapter><title>Management of Cards and Devices</title>
- <sect1><title>Card Management</title>
-!Esound/core/init.c
- </sect1>
- <sect1><title>Device Components</title>
-!Esound/core/device.c
- </sect1>
- <sect1><title>Module requests and Device File Entries</title>
-!Esound/core/sound.c
- </sect1>
- <sect1><title>Memory Management Helpers</title>
-!Esound/core/memory.c
-!Esound/core/memalloc.c
- </sect1>
- </chapter>
- <chapter><title>PCM API</title>
- <sect1><title>PCM Core</title>
-!Esound/core/pcm.c
-!Esound/core/pcm_lib.c
-!Esound/core/pcm_native.c
-!Iinclude/sound/pcm.h
- </sect1>
- <sect1><title>PCM Format Helpers</title>
-!Esound/core/pcm_misc.c
- </sect1>
- <sect1><title>PCM Memory Management</title>
-!Esound/core/pcm_memory.c
- </sect1>
- <sect1><title>PCM DMA Engine API</title>
-!Esound/core/pcm_dmaengine.c
-!Iinclude/sound/dmaengine_pcm.h
- </sect1>
- </chapter>
- <chapter><title>Control/Mixer API</title>
- <sect1><title>General Control Interface</title>
-!Esound/core/control.c
- </sect1>
- <sect1><title>AC97 Codec API</title>
-!Esound/pci/ac97/ac97_codec.c
-!Esound/pci/ac97/ac97_pcm.c
- </sect1>
- <sect1><title>Virtual Master Control API</title>
-!Esound/core/vmaster.c
-!Iinclude/sound/control.h
- </sect1>
- </chapter>
- <chapter><title>MIDI API</title>
- <sect1><title>Raw MIDI API</title>
-!Esound/core/rawmidi.c
- </sect1>
- <sect1><title>MPU401-UART API</title>
-!Esound/drivers/mpu401/mpu401_uart.c
- </sect1>
- </chapter>
- <chapter><title>Proc Info API</title>
- <sect1><title>Proc Info Interface</title>
-!Esound/core/info.c
- </sect1>
- </chapter>
- <chapter><title>Compress Offload</title>
- <sect1><title>Compress Offload API</title>
-!Esound/core/compress_offload.c
-!Iinclude/uapi/sound/compress_offload.h
-!Iinclude/uapi/sound/compress_params.h
-!Iinclude/sound/compress_driver.h
- </sect1>
- </chapter>
- <chapter><title>ASoC</title>
- <sect1><title>ASoC Core API</title>
-!Iinclude/sound/soc.h
-!Esound/soc/soc-core.c
-<!-- !Esound/soc/soc-cache.c no docbook comments here -->
-!Esound/soc/soc-devres.c
-!Esound/soc/soc-io.c
-!Esound/soc/soc-pcm.c
-!Esound/soc/soc-ops.c
-!Esound/soc/soc-compress.c
- </sect1>
- <sect1><title>ASoC DAPM API</title>
-!Esound/soc/soc-dapm.c
- </sect1>
- <sect1><title>ASoC DMA Engine API</title>
-!Esound/soc/soc-generic-dmaengine-pcm.c
- </sect1>
- </chapter>
- <chapter><title>Miscellaneous Functions</title>
- <sect1><title>Hardware-Dependent Devices API</title>
-!Esound/core/hwdep.c
- </sect1>
- <sect1><title>Jack Abstraction Layer API</title>
-!Iinclude/sound/jack.h
-!Esound/core/jack.c
-!Esound/soc/soc-jack.c
- </sect1>
- <sect1><title>ISA DMA Helpers</title>
-!Esound/core/isadma.c
- </sect1>
- <sect1><title>Other Helper Macros</title>
-!Iinclude/sound/core.h
- </sect1>
- </chapter>
-
-</book>
diff --git a/Documentation/DocBook/debugobjects.tmpl b/Documentation/DocBook/debugobjects.tmpl
deleted file mode 100644
index 7e4f34fde697..000000000000
--- a/Documentation/DocBook/debugobjects.tmpl
+++ /dev/null
@@ -1,443 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="debug-objects-guide">
- <bookinfo>
- <title>Debug objects life time</title>
-
- <authorgroup>
- <author>
- <firstname>Thomas</firstname>
- <surname>Gleixner</surname>
- <affiliation>
- <address>
- <email>tglx@linutronix.de</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
- <copyright>
- <year>2008</year>
- <holder>Thomas Gleixner</holder>
- </copyright>
-
- <legalnotice>
- <para>
- This documentation is free software; you can redistribute
- it and/or modify it under the terms of the GNU General Public
- License version 2 as published by the Free Software Foundation.
- </para>
-
- <para>
- This program is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
-
- <para>
- For more details see the file COPYING in the source
- distribution of Linux.
- </para>
- </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
- <chapter id="intro">
- <title>Introduction</title>
- <para>
- debugobjects is a generic infrastructure to track the life time
- of kernel objects and validate the operations on those.
- </para>
- <para>
- debugobjects is useful to check for the following error patterns:
- <itemizedlist>
- <listitem><para>Activation of uninitialized objects</para></listitem>
- <listitem><para>Initialization of active objects</para></listitem>
- <listitem><para>Usage of freed/destroyed objects</para></listitem>
- </itemizedlist>
- </para>
- <para>
- debugobjects is not changing the data structure of the real
- object so it can be compiled in with a minimal runtime impact
- and enabled on demand with a kernel command line option.
- </para>
- </chapter>
-
- <chapter id="howto">
- <title>Howto use debugobjects</title>
- <para>
- A kernel subsystem needs to provide a data structure which
- describes the object type and add calls into the debug code at
- appropriate places. The data structure to describe the object
- type needs at minimum the name of the object type. Optional
- functions can and should be provided to fixup detected problems
- so the kernel can continue to work and the debug information can
- be retrieved from a live system instead of hard core debugging
- with serial consoles and stack trace transcripts from the
- monitor.
- </para>
- <para>
- The debug calls provided by debugobjects are:
- <itemizedlist>
- <listitem><para>debug_object_init</para></listitem>
- <listitem><para>debug_object_init_on_stack</para></listitem>
- <listitem><para>debug_object_activate</para></listitem>
- <listitem><para>debug_object_deactivate</para></listitem>
- <listitem><para>debug_object_destroy</para></listitem>
- <listitem><para>debug_object_free</para></listitem>
- <listitem><para>debug_object_assert_init</para></listitem>
- </itemizedlist>
- Each of these functions takes the address of the real object and
- a pointer to the object type specific debug description
- structure.
- </para>
- <para>
- Each detected error is reported in the statistics and a limited
- number of errors are printk'ed including a full stack trace.
- </para>
- <para>
- The statistics are available via /sys/kernel/debug/debug_objects/stats.
- They provide information about the number of warnings and the
- number of successful fixups along with information about the
- usage of the internal tracking objects and the state of the
- internal tracking objects pool.
- </para>
- </chapter>
- <chapter id="debugfunctions">
- <title>Debug functions</title>
- <sect1 id="prototypes">
- <title>Debug object function reference</title>
-!Elib/debugobjects.c
- </sect1>
- <sect1 id="debug_object_init">
- <title>debug_object_init</title>
- <para>
- This function is called whenever the initialization function
- of a real object is called.
- </para>
- <para>
- When the real object is already tracked by debugobjects it is
- checked, whether the object can be initialized. Initializing
- is not allowed for active and destroyed objects. When
- debugobjects detects an error, then it calls the fixup_init
- function of the object type description structure if provided
- by the caller. The fixup function can correct the problem
- before the real initialization of the object happens. E.g. it
- can deactivate an active object in order to prevent damage to
- the subsystem.
- </para>
- <para>
- When the real object is not yet tracked by debugobjects,
- debugobjects allocates a tracker object for the real object
- and sets the tracker object state to ODEBUG_STATE_INIT. It
- verifies that the object is not on the callers stack. If it is
- on the callers stack then a limited number of warnings
- including a full stack trace is printk'ed. The calling code
- must use debug_object_init_on_stack() and remove the object
- before leaving the function which allocated it. See next
- section.
- </para>
- </sect1>
-
- <sect1 id="debug_object_init_on_stack">
- <title>debug_object_init_on_stack</title>
- <para>
- This function is called whenever the initialization function
- of a real object which resides on the stack is called.
- </para>
- <para>
- When the real object is already tracked by debugobjects it is
- checked, whether the object can be initialized. Initializing
- is not allowed for active and destroyed objects. When
- debugobjects detects an error, then it calls the fixup_init
- function of the object type description structure if provided
- by the caller. The fixup function can correct the problem
- before the real initialization of the object happens. E.g. it
- can deactivate an active object in order to prevent damage to
- the subsystem.
- </para>
- <para>
- When the real object is not yet tracked by debugobjects
- debugobjects allocates a tracker object for the real object
- and sets the tracker object state to ODEBUG_STATE_INIT. It
- verifies that the object is on the callers stack.
- </para>
- <para>
- An object which is on the stack must be removed from the
- tracker by calling debug_object_free() before the function
- which allocates the object returns. Otherwise we keep track of
- stale objects.
- </para>
- </sect1>
-
- <sect1 id="debug_object_activate">
- <title>debug_object_activate</title>
- <para>
- This function is called whenever the activation function of a
- real object is called.
- </para>
- <para>
- When the real object is already tracked by debugobjects it is
- checked, whether the object can be activated. Activating is
- not allowed for active and destroyed objects. When
- debugobjects detects an error, then it calls the
- fixup_activate function of the object type description
- structure if provided by the caller. The fixup function can
- correct the problem before the real activation of the object
- happens. E.g. it can deactivate an active object in order to
- prevent damage to the subsystem.
- </para>
- <para>
- When the real object is not yet tracked by debugobjects then
- the fixup_activate function is called if available. This is
- necessary to allow the legitimate activation of statically
- allocated and initialized objects. The fixup function checks
- whether the object is valid and calls the debug_objects_init()
- function to initialize the tracking of this object.
- </para>
- <para>
- When the activation is legitimate, then the state of the
- associated tracker object is set to ODEBUG_STATE_ACTIVE.
- </para>
- </sect1>
-
- <sect1 id="debug_object_deactivate">
- <title>debug_object_deactivate</title>
- <para>
- This function is called whenever the deactivation function of
- a real object is called.
- </para>
- <para>
- When the real object is tracked by debugobjects it is checked,
- whether the object can be deactivated. Deactivating is not
- allowed for untracked or destroyed objects.
- </para>
- <para>
- When the deactivation is legitimate, then the state of the
- associated tracker object is set to ODEBUG_STATE_INACTIVE.
- </para>
- </sect1>
-
- <sect1 id="debug_object_destroy">
- <title>debug_object_destroy</title>
- <para>
- This function is called to mark an object destroyed. This is
- useful to prevent the usage of invalid objects, which are
- still available in memory: either statically allocated objects
- or objects which are freed later.
- </para>
- <para>
- When the real object is tracked by debugobjects it is checked,
- whether the object can be destroyed. Destruction is not
- allowed for active and destroyed objects. When debugobjects
- detects an error, then it calls the fixup_destroy function of
- the object type description structure if provided by the
- caller. The fixup function can correct the problem before the
- real destruction of the object happens. E.g. it can deactivate
- an active object in order to prevent damage to the subsystem.
- </para>
- <para>
- When the destruction is legitimate, then the state of the
- associated tracker object is set to ODEBUG_STATE_DESTROYED.
- </para>
- </sect1>
-
- <sect1 id="debug_object_free">
- <title>debug_object_free</title>
- <para>
- This function is called before an object is freed.
- </para>
- <para>
- When the real object is tracked by debugobjects it is checked,
- whether the object can be freed. Free is not allowed for
- active objects. When debugobjects detects an error, then it
- calls the fixup_free function of the object type description
- structure if provided by the caller. The fixup function can
- correct the problem before the real free of the object
- happens. E.g. it can deactivate an active object in order to
- prevent damage to the subsystem.
- </para>
- <para>
- Note that debug_object_free removes the object from the
- tracker. Later usage of the object is detected by the other
- debug checks.
- </para>
- </sect1>
-
- <sect1 id="debug_object_assert_init">
- <title>debug_object_assert_init</title>
- <para>
- This function is called to assert that an object has been
- initialized.
- </para>
- <para>
- When the real object is not tracked by debugobjects, it calls
- fixup_assert_init of the object type description structure
- provided by the caller, with the hardcoded object state
- ODEBUG_NOT_AVAILABLE. The fixup function can correct the problem
- by calling debug_object_init and other specific initializing
- functions.
- </para>
- <para>
- When the real object is already tracked by debugobjects it is
- ignored.
- </para>
- </sect1>
- </chapter>
- <chapter id="fixupfunctions">
- <title>Fixup functions</title>
- <sect1 id="debug_obj_descr">
- <title>Debug object type description structure</title>
-!Iinclude/linux/debugobjects.h
- </sect1>
- <sect1 id="fixup_init">
- <title>fixup_init</title>
- <para>
- This function is called from the debug code whenever a problem
- in debug_object_init is detected. The function takes the
- address of the object and the state which is currently
- recorded in the tracker.
- </para>
- <para>
- Called from debug_object_init when the object state is:
- <itemizedlist>
- <listitem><para>ODEBUG_STATE_ACTIVE</para></listitem>
- </itemizedlist>
- </para>
- <para>
- The function returns true when the fixup was successful,
- otherwise false. The return value is used to update the
- statistics.
- </para>
- <para>
- Note, that the function needs to call the debug_object_init()
- function again, after the damage has been repaired in order to
- keep the state consistent.
- </para>
- </sect1>
-
- <sect1 id="fixup_activate">
- <title>fixup_activate</title>
- <para>
- This function is called from the debug code whenever a problem
- in debug_object_activate is detected.
- </para>
- <para>
- Called from debug_object_activate when the object state is:
- <itemizedlist>
- <listitem><para>ODEBUG_STATE_NOTAVAILABLE</para></listitem>
- <listitem><para>ODEBUG_STATE_ACTIVE</para></listitem>
- </itemizedlist>
- </para>
- <para>
- The function returns true when the fixup was successful,
- otherwise false. The return value is used to update the
- statistics.
- </para>
- <para>
- Note that the function needs to call the debug_object_activate()
- function again after the damage has been repaired in order to
- keep the state consistent.
- </para>
- <para>
- The activation of statically initialized objects is a special
- case. When debug_object_activate() has no tracked object for
- this object address then fixup_activate() is called with
- object state ODEBUG_STATE_NOTAVAILABLE. The fixup function
- needs to check whether this is a legitimate case of a
- statically initialized object or not. In case it is it calls
- debug_object_init() and debug_object_activate() to make the
- object known to the tracker and marked active. In this case
- the function should return false because this is not a real
- fixup.
- </para>
- </sect1>
-
- <sect1 id="fixup_destroy">
- <title>fixup_destroy</title>
- <para>
- This function is called from the debug code whenever a problem
- in debug_object_destroy is detected.
- </para>
- <para>
- Called from debug_object_destroy when the object state is:
- <itemizedlist>
- <listitem><para>ODEBUG_STATE_ACTIVE</para></listitem>
- </itemizedlist>
- </para>
- <para>
- The function returns true when the fixup was successful,
- otherwise false. The return value is used to update the
- statistics.
- </para>
- </sect1>
- <sect1 id="fixup_free">
- <title>fixup_free</title>
- <para>
- This function is called from the debug code whenever a problem
- in debug_object_free is detected. Further it can be called
- from the debug checks in kfree/vfree, when an active object is
- detected from the debug_check_no_obj_freed() sanity checks.
- </para>
- <para>
- Called from debug_object_free() or debug_check_no_obj_freed()
- when the object state is:
- <itemizedlist>
- <listitem><para>ODEBUG_STATE_ACTIVE</para></listitem>
- </itemizedlist>
- </para>
- <para>
- The function returns true when the fixup was successful,
- otherwise false. The return value is used to update the
- statistics.
- </para>
- </sect1>
- <sect1 id="fixup_assert_init">
- <title>fixup_assert_init</title>
- <para>
- This function is called from the debug code whenever a problem
- in debug_object_assert_init is detected.
- </para>
- <para>
- Called from debug_object_assert_init() with a hardcoded state
- ODEBUG_STATE_NOTAVAILABLE when the object is not found in the
- debug bucket.
- </para>
- <para>
- The function returns true when the fixup was successful,
- otherwise false. The return value is used to update the
- statistics.
- </para>
- <para>
- Note, this function should make sure debug_object_init() is
- called before returning.
- </para>
- <para>
- The handling of statically initialized objects is a special
- case. The fixup function should check if this is a legitimate
- case of a statically initialized object or not. In this case only
- debug_object_init() should be called to make the object known to
- the tracker. Then the function should return false because this
- is not
- a real fixup.
- </para>
- </sect1>
- </chapter>
- <chapter id="bugs">
- <title>Known Bugs And Assumptions</title>
- <para>
- None (knock on wood).
- </para>
- </chapter>
-</book>
diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl
index 2a272275c81b..da5c087462b1 100644
--- a/Documentation/DocBook/kernel-hacking.tmpl
+++ b/Documentation/DocBook/kernel-hacking.tmpl
@@ -1208,8 +1208,8 @@ static struct block_device_operations opt_fops = {
<listitem>
<para>
- Finally, don't forget to read <filename>Documentation/SubmittingPatches</filename>
- and possibly <filename>Documentation/SubmittingDrivers</filename>.
+ Finally, don't forget to read <filename>Documentation/process/submitting-patches.rst</filename>
+ and possibly <filename>Documentation/process/submitting-drivers.rst</filename>.
</para>
</listitem>
</itemizedlist>
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl
deleted file mode 100644
index b57a9ede3224..000000000000
--- a/Documentation/DocBook/tracepoint.tmpl
+++ /dev/null
@@ -1,112 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="Tracepoints">
- <bookinfo>
- <title>The Linux Kernel Tracepoint API</title>
-
- <authorgroup>
- <author>
- <firstname>Jason</firstname>
- <surname>Baron</surname>
- <affiliation>
- <address>
- <email>jbaron@redhat.com</email>
- </address>
- </affiliation>
- </author>
- <author>
- <firstname>William</firstname>
- <surname>Cohen</surname>
- <affiliation>
- <address>
- <email>wcohen@redhat.com</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
- <legalnotice>
- <para>
- This documentation is free software; you can redistribute
- it and/or modify it under the terms of the GNU General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later
- version.
- </para>
-
- <para>
- This program is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
-
- <para>
- For more details see the file COPYING in the source
- distribution of Linux.
- </para>
- </legalnotice>
- </bookinfo>
-
- <toc></toc>
- <chapter id="intro">
- <title>Introduction</title>
- <para>
- Tracepoints are static probe points that are located in strategic points
- throughout the kernel. 'Probes' register/unregister with tracepoints
- via a callback mechanism. The 'probes' are strictly typed functions that
- are passed a unique set of parameters defined by each tracepoint.
- </para>
-
- <para>
- From this simple callback mechanism, 'probes' can be used to profile, debug,
- and understand kernel behavior. There are a number of tools that provide a
- framework for using 'probes'. These tools include Systemtap, ftrace, and
- LTTng.
- </para>
-
- <para>
- Tracepoints are defined in a number of header files via various macros. Thus,
- the purpose of this document is to provide a clear accounting of the available
- tracepoints. The intention is to understand not only what tracepoints are
- available but also to understand where future tracepoints might be added.
- </para>
-
- <para>
- The API presented has functions of the form:
- <function>trace_tracepointname(function parameters)</function>. These are the
- tracepoints callbacks that are found throughout the code. Registering and
- unregistering probes with these callback sites is covered in the
- <filename>Documentation/trace/*</filename> directory.
- </para>
- </chapter>
-
- <chapter id="irq">
- <title>IRQ</title>
-!Iinclude/trace/events/irq.h
- </chapter>
-
- <chapter id="signal">
- <title>SIGNAL</title>
-!Iinclude/trace/events/signal.h
- </chapter>
-
- <chapter id="block">
- <title>Block IO</title>
-!Iinclude/trace/events/block.h
- </chapter>
-
- <chapter id="workqueue">
- <title>Workqueue</title>
-!Iinclude/trace/events/workqueue.h
- </chapter>
-</book>
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index cd0e452dfed5..5210f8a577c6 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -46,6 +46,13 @@ GPL version 2.
<revhistory>
<revision>
+ <revnumber>0.10</revnumber>
+ <date>2016-10-17</date>
+ <authorinitials>sch</authorinitials>
+ <revremark>Added generic hyperv driver
+ </revremark>
+ </revision>
+ <revision>
<revnumber>0.9</revnumber>
<date>2009-07-16</date>
<authorinitials>mst</authorinitials>
@@ -1033,6 +1040,61 @@ int main()
</chapter>
+<chapter id="uio_hv_generic" xreflabel="Using Generic driver for Hyper-V VMBUS">
+<?dbhtml filename="uio_hv_generic.html"?>
+<title>Generic Hyper-V UIO driver</title>
+ <para>
+ The generic driver is a kernel module named uio_hv_generic.
+ It supports devices on the Hyper-V VMBus similar to uio_pci_generic
+ on PCI bus.
+ </para>
+
+<sect1 id="uio_hv_generic_binding">
+<title>Making the driver recognize the device</title>
+ <para>
+Since the driver does not declare any device GUID's, it will not get loaded
+automatically and will not automatically bind to any devices, you must load it
+and allocate id to the driver yourself. For example, to use the network device
+GUID:
+ <programlisting>
+ modprobe uio_hv_generic
+ echo &quot;f8615163-df3e-46c5-913f-f2d2f965ed0e&quot; &gt; /sys/bus/vmbus/drivers/uio_hv_generic/new_id
+ </programlisting>
+ </para>
+ <para>
+If there already is a hardware specific kernel driver for the device, the
+generic driver still won't bind to it, in this case if you want to use the
+generic driver (why would you?) you'll have to manually unbind the hardware
+specific driver and bind the generic driver, like this:
+ <programlisting>
+ echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 &gt; /sys/bus/vmbus/drivers/hv_netvsc/unbind
+ echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 &gt; /sys/bus/vmbus/drivers/uio_hv_generic/bind
+ </programlisting>
+ </para>
+ <para>
+You can verify that the device has been bound to the driver
+by looking for it in sysfs, for example like the following:
+ <programlisting>
+ ls -l /sys/bus/vmbus/devices/vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver
+ </programlisting>
+Which if successful should print
+ <programlisting>
+ .../vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver -&gt; ../../../bus/vmbus/drivers/uio_hv_generic
+ </programlisting>
+ </para>
+</sect1>
+
+<sect1 id="uio_hv_generic_internals">
+<title>Things to know about uio_hv_generic</title>
+ <para>
+On each interrupt, uio_hv_generic sets the Interrupt Disable bit.
+This prevents the device from generating further interrupts
+until the bit is cleared. The userspace driver should clear this
+bit before blocking and waiting for more interrupts.
+ </para>
+</sect1>
+</chapter>
+
<appendix id="app1">
<title>Further information</title>
<itemizedlist>
diff --git a/Documentation/DocBook/usb.tmpl b/Documentation/DocBook/usb.tmpl
deleted file mode 100644
index bc776be0f19c..000000000000
--- a/Documentation/DocBook/usb.tmpl
+++ /dev/null
@@ -1,992 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="Linux-USB-API">
- <bookinfo>
- <title>The Linux-USB Host Side API</title>
-
- <legalnotice>
- <para>
- This documentation is free software; you can redistribute
- it and/or modify it under the terms of the GNU General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later
- version.
- </para>
-
- <para>
- This program is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
-
- <para>
- For more details see the file COPYING in the source
- distribution of Linux.
- </para>
- </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-<chapter id="intro">
- <title>Introduction to USB on Linux</title>
-
- <para>A Universal Serial Bus (USB) is used to connect a host,
- such as a PC or workstation, to a number of peripheral
- devices. USB uses a tree structure, with the host as the
- root (the system's master), hubs as interior nodes, and
- peripherals as leaves (and slaves).
- Modern PCs support several such trees of USB devices, usually
- one USB 2.0 tree (480 Mbit/sec each) with
- a few USB 1.1 trees (12 Mbit/sec each) that are used when you
- connect a USB 1.1 device directly to the machine's "root hub".
- </para>
-
- <para>That master/slave asymmetry was designed-in for a number of
- reasons, one being ease of use. It is not physically possible to
- assemble (legal) USB cables incorrectly: all upstream "to the host"
- connectors are the rectangular type (matching the sockets on
- root hubs), and all downstream connectors are the squarish type
- (or they are built into the peripheral).
- Also, the host software doesn't need to deal with distributed
- auto-configuration since the pre-designated master node manages all that.
- And finally, at the electrical level, bus protocol overhead is reduced by
- eliminating arbitration and moving scheduling into the host software.
- </para>
-
- <para>USB 1.0 was announced in January 1996 and was revised
- as USB 1.1 (with improvements in hub specification and
- support for interrupt-out transfers) in September 1998.
- USB 2.0 was released in April 2000, adding high-speed
- transfers and transaction-translating hubs (used for USB 1.1
- and 1.0 backward compatibility).
- </para>
-
- <para>Kernel developers added USB support to Linux early in the 2.2 kernel
- series, shortly before 2.3 development forked. Updates from 2.3 were
- regularly folded back into 2.2 releases, which improved reliability and
- brought <filename>/sbin/hotplug</filename> support as well more drivers.
- Such improvements were continued in the 2.5 kernel series, where they added
- USB 2.0 support, improved performance, and made the host controller drivers
- (HCDs) more consistent. They also simplified the API (to make bugs less
- likely) and added internal "kerneldoc" documentation.
- </para>
-
- <para>Linux can run inside USB devices as well as on
- the hosts that control the devices.
- But USB device drivers running inside those peripherals
- don't do the same things as the ones running inside hosts,
- so they've been given a different name:
- <emphasis>gadget drivers</emphasis>.
- This document does not cover gadget drivers.
- </para>
-
- </chapter>
-
-<chapter id="host">
- <title>USB Host-Side API Model</title>
-
- <para>Host-side drivers for USB devices talk to the "usbcore" APIs.
- There are two. One is intended for
- <emphasis>general-purpose</emphasis> drivers (exposed through
- driver frameworks), and the other is for drivers that are
- <emphasis>part of the core</emphasis>.
- Such core drivers include the <emphasis>hub</emphasis> driver
- (which manages trees of USB devices) and several different kinds
- of <emphasis>host controller drivers</emphasis>,
- which control individual busses.
- </para>
-
- <para>The device model seen by USB drivers is relatively complex.
- </para>
-
- <itemizedlist>
-
- <listitem><para>USB supports four kinds of data transfers
- (control, bulk, interrupt, and isochronous). Two of them (control
- and bulk) use bandwidth as it's available,
- while the other two (interrupt and isochronous)
- are scheduled to provide guaranteed bandwidth.
- </para></listitem>
-
- <listitem><para>The device description model includes one or more
- "configurations" per device, only one of which is active at a time.
- Devices that are capable of high-speed operation must also support
- full-speed configurations, along with a way to ask about the
- "other speed" configurations which might be used.
- </para></listitem>
-
- <listitem><para>Configurations have one or more "interfaces", each
- of which may have "alternate settings". Interfaces may be
- standardized by USB "Class" specifications, or may be specific to
- a vendor or device.</para>
-
- <para>USB device drivers actually bind to interfaces, not devices.
- Think of them as "interface drivers", though you
- may not see many devices where the distinction is important.
- <emphasis>Most USB devices are simple, with only one configuration,
- one interface, and one alternate setting.</emphasis>
- </para></listitem>
-
- <listitem><para>Interfaces have one or more "endpoints", each of
- which supports one type and direction of data transfer such as
- "bulk out" or "interrupt in". The entire configuration may have
- up to sixteen endpoints in each direction, allocated as needed
- among all the interfaces.
- </para></listitem>
-
- <listitem><para>Data transfer on USB is packetized; each endpoint
- has a maximum packet size.
- Drivers must often be aware of conventions such as flagging the end
- of bulk transfers using "short" (including zero length) packets.
- </para></listitem>
-
- <listitem><para>The Linux USB API supports synchronous calls for
- control and bulk messages.
- It also supports asynchronous calls for all kinds of data transfer,
- using request structures called "URBs" (USB Request Blocks).
- </para></listitem>
-
- </itemizedlist>
-
- <para>Accordingly, the USB Core API exposed to device drivers
- covers quite a lot of territory. You'll probably need to consult
- the USB 2.0 specification, available online from www.usb.org at
- no cost, as well as class or device specifications.
- </para>
-
- <para>The only host-side drivers that actually touch hardware
- (reading/writing registers, handling IRQs, and so on) are the HCDs.
- In theory, all HCDs provide the same functionality through the same
- API. In practice, that's becoming more true on the 2.5 kernels,
- but there are still differences that crop up especially with
- fault handling. Different controllers don't necessarily report
- the same aspects of failures, and recovery from faults (including
- software-induced ones like unlinking an URB) isn't yet fully
- consistent.
- Device driver authors should make a point of doing disconnect
- testing (while the device is active) with each different host
- controller driver, to make sure drivers don't have bugs of
- their own as well as to make sure they aren't relying on some
- HCD-specific behavior.
- (You will need external USB 1.1 and/or
- USB 2.0 hubs to perform all those tests.)
- </para>
-
- </chapter>
-
-<chapter id="types"><title>USB-Standard Types</title>
-
- <para>In <filename>&lt;linux/usb/ch9.h&gt;</filename> you will find
- the USB data types defined in chapter 9 of the USB specification.
- These data types are used throughout USB, and in APIs including
- this host side API, gadget APIs, and usbfs.
- </para>
-
-!Iinclude/linux/usb/ch9.h
-
- </chapter>
-
-<chapter id="hostside"><title>Host-Side Data Types and Macros</title>
-
- <para>The host side API exposes several layers to drivers, some of
- which are more necessary than others.
- These support lifecycle models for host side drivers
- and devices, and support passing buffers through usbcore to
- some HCD that performs the I/O for the device driver.
- </para>
-
-
-!Iinclude/linux/usb.h
-
- </chapter>
-
- <chapter id="usbcore"><title>USB Core APIs</title>
-
- <para>There are two basic I/O models in the USB API.
- The most elemental one is asynchronous: drivers submit requests
- in the form of an URB, and the URB's completion callback
- handle the next step.
- All USB transfer types support that model, although there
- are special cases for control URBs (which always have setup
- and status stages, but may not have a data stage) and
- isochronous URBs (which allow large packets and include
- per-packet fault reports).
- Built on top of that is synchronous API support, where a
- driver calls a routine that allocates one or more URBs,
- submits them, and waits until they complete.
- There are synchronous wrappers for single-buffer control
- and bulk transfers (which are awkward to use in some
- driver disconnect scenarios), and for scatterlist based
- streaming i/o (bulk or interrupt).
- </para>
-
- <para>USB drivers need to provide buffers that can be
- used for DMA, although they don't necessarily need to
- provide the DMA mapping themselves.
- There are APIs to use used when allocating DMA buffers,
- which can prevent use of bounce buffers on some systems.
- In some cases, drivers may be able to rely on 64bit DMA
- to eliminate another kind of bounce buffer.
- </para>
-
-!Edrivers/usb/core/urb.c
-!Edrivers/usb/core/message.c
-!Edrivers/usb/core/file.c
-!Edrivers/usb/core/driver.c
-!Edrivers/usb/core/usb.c
-!Edrivers/usb/core/hub.c
- </chapter>
-
- <chapter id="hcd"><title>Host Controller APIs</title>
-
- <para>These APIs are only for use by host controller drivers,
- most of which implement standard register interfaces such as
- EHCI, OHCI, or UHCI.
- UHCI was one of the first interfaces, designed by Intel and
- also used by VIA; it doesn't do much in hardware.
- OHCI was designed later, to have the hardware do more work
- (bigger transfers, tracking protocol state, and so on).
- EHCI was designed with USB 2.0; its design has features that
- resemble OHCI (hardware does much more work) as well as
- UHCI (some parts of ISO support, TD list processing).
- </para>
-
- <para>There are host controllers other than the "big three",
- although most PCI based controllers (and a few non-PCI based
- ones) use one of those interfaces.
- Not all host controllers use DMA; some use PIO, and there
- is also a simulator.
- </para>
-
- <para>The same basic APIs are available to drivers for all
- those controllers.
- For historical reasons they are in two layers:
- <structname>struct usb_bus</structname> is a rather thin
- layer that became available in the 2.2 kernels, while
- <structname>struct usb_hcd</structname> is a more featureful
- layer (available in later 2.4 kernels and in 2.5) that
- lets HCDs share common code, to shrink driver size
- and significantly reduce hcd-specific behaviors.
- </para>
-
-!Edrivers/usb/core/hcd.c
-!Edrivers/usb/core/hcd-pci.c
-!Idrivers/usb/core/buffer.c
- </chapter>
-
- <chapter id="usbfs">
- <title>The USB Filesystem (usbfs)</title>
-
- <para>This chapter presents the Linux <emphasis>usbfs</emphasis>.
- You may prefer to avoid writing new kernel code for your
- USB driver; that's the problem that usbfs set out to solve.
- User mode device drivers are usually packaged as applications
- or libraries, and may use usbfs through some programming library
- that wraps it. Such libraries include
- <ulink url="http://libusb.sourceforge.net">libusb</ulink>
- for C/C++, and
- <ulink url="http://jUSB.sourceforge.net">jUSB</ulink> for Java.
- </para>
-
- <note><title>Unfinished</title>
- <para>This particular documentation is incomplete,
- especially with respect to the asynchronous mode.
- As of kernel 2.5.66 the code and this (new) documentation
- need to be cross-reviewed.
- </para>
- </note>
-
- <para>Configure usbfs into Linux kernels by enabling the
- <emphasis>USB filesystem</emphasis> option (CONFIG_USB_DEVICEFS),
- and you get basic support for user mode USB device drivers.
- Until relatively recently it was often (confusingly) called
- <emphasis>usbdevfs</emphasis> although it wasn't solving what
- <emphasis>devfs</emphasis> was.
- Every USB device will appear in usbfs, regardless of whether or
- not it has a kernel driver.
- </para>
-
- <sect1 id="usbfs-files">
- <title>What files are in "usbfs"?</title>
-
- <para>Conventionally mounted at
- <filename>/proc/bus/usb</filename>, usbfs
- features include:
- <itemizedlist>
- <listitem><para><filename>/proc/bus/usb/devices</filename>
- ... a text file
- showing each of the USB devices on known to the kernel,
- and their configuration descriptors.
- You can also poll() this to learn about new devices.
- </para></listitem>
- <listitem><para><filename>/proc/bus/usb/BBB/DDD</filename>
- ... magic files
- exposing the each device's configuration descriptors, and
- supporting a series of ioctls for making device requests,
- including I/O to devices. (Purely for access by programs.)
- </para></listitem>
- </itemizedlist>
- </para>
-
- <para> Each bus is given a number (BBB) based on when it was
- enumerated; within each bus, each device is given a similar
- number (DDD).
- Those BBB/DDD paths are not "stable" identifiers;
- expect them to change even if you always leave the devices
- plugged in to the same hub port.
- <emphasis>Don't even think of saving these in application
- configuration files.</emphasis>
- Stable identifiers are available, for user mode applications
- that want to use them. HID and networking devices expose
- these stable IDs, so that for example you can be sure that
- you told the right UPS to power down its second server.
- "usbfs" doesn't (yet) expose those IDs.
- </para>
-
- </sect1>
-
- <sect1 id="usbfs-fstab">
- <title>Mounting and Access Control</title>
-
- <para>There are a number of mount options for usbfs, which will
- be of most interest to you if you need to override the default
- access control policy.
- That policy is that only root may read or write device files
- (<filename>/proc/bus/BBB/DDD</filename>) although anyone may read
- the <filename>devices</filename>
- or <filename>drivers</filename> files.
- I/O requests to the device also need the CAP_SYS_RAWIO capability,
- </para>
-
- <para>The significance of that is that by default, all user mode
- device drivers need super-user privileges.
- You can change modes or ownership in a driver setup
- when the device hotplugs, or maye just start the
- driver right then, as a privileged server (or some activity
- within one).
- That's the most secure approach for multi-user systems,
- but for single user systems ("trusted" by that user)
- it's more convenient just to grant everyone all access
- (using the <emphasis>devmode=0666</emphasis> option)
- so the driver can start whenever it's needed.
- </para>
-
- <para>The mount options for usbfs, usable in /etc/fstab or
- in command line invocations of <emphasis>mount</emphasis>, are:
-
- <variablelist>
- <varlistentry>
- <term><emphasis>busgid</emphasis>=NNNNN</term>
- <listitem><para>Controls the GID used for the
- /proc/bus/usb/BBB
- directories. (Default: 0)</para></listitem></varlistentry>
- <varlistentry><term><emphasis>busmode</emphasis>=MMM</term>
- <listitem><para>Controls the file mode used for the
- /proc/bus/usb/BBB
- directories. (Default: 0555)
- </para></listitem></varlistentry>
- <varlistentry><term><emphasis>busuid</emphasis>=NNNNN</term>
- <listitem><para>Controls the UID used for the
- /proc/bus/usb/BBB
- directories. (Default: 0)</para></listitem></varlistentry>
-
- <varlistentry><term><emphasis>devgid</emphasis>=NNNNN</term>
- <listitem><para>Controls the GID used for the
- /proc/bus/usb/BBB/DDD
- files. (Default: 0)</para></listitem></varlistentry>
- <varlistentry><term><emphasis>devmode</emphasis>=MMM</term>
- <listitem><para>Controls the file mode used for the
- /proc/bus/usb/BBB/DDD
- files. (Default: 0644)</para></listitem></varlistentry>
- <varlistentry><term><emphasis>devuid</emphasis>=NNNNN</term>
- <listitem><para>Controls the UID used for the
- /proc/bus/usb/BBB/DDD
- files. (Default: 0)</para></listitem></varlistentry>
-
- <varlistentry><term><emphasis>listgid</emphasis>=NNNNN</term>
- <listitem><para>Controls the GID used for the
- /proc/bus/usb/devices and drivers files.
- (Default: 0)</para></listitem></varlistentry>
- <varlistentry><term><emphasis>listmode</emphasis>=MMM</term>
- <listitem><para>Controls the file mode used for the
- /proc/bus/usb/devices and drivers files.
- (Default: 0444)</para></listitem></varlistentry>
- <varlistentry><term><emphasis>listuid</emphasis>=NNNNN</term>
- <listitem><para>Controls the UID used for the
- /proc/bus/usb/devices and drivers files.
- (Default: 0)</para></listitem></varlistentry>
- </variablelist>
-
- </para>
-
- <para>Note that many Linux distributions hard-wire the mount options
- for usbfs in their init scripts, such as
- <filename>/etc/rc.d/rc.sysinit</filename>,
- rather than making it easy to set this per-system
- policy in <filename>/etc/fstab</filename>.
- </para>
-
- </sect1>
-
- <sect1 id="usbfs-devices">
- <title>/proc/bus/usb/devices</title>
-
- <para>This file is handy for status viewing tools in user
- mode, which can scan the text format and ignore most of it.
- More detailed device status (including class and vendor
- status) is available from device-specific files.
- For information about the current format of this file,
- see the
- <filename>Documentation/usb/proc_usb_info.txt</filename>
- file in your Linux kernel sources.
- </para>
-
- <para>This file, in combination with the poll() system call, can
- also be used to detect when devices are added or removed:
-<programlisting>int fd;
-struct pollfd pfd;
-
-fd = open("/proc/bus/usb/devices", O_RDONLY);
-pfd = { fd, POLLIN, 0 };
-for (;;) {
- /* The first time through, this call will return immediately. */
- poll(&amp;pfd, 1, -1);
-
- /* To see what's changed, compare the file's previous and current
- contents or scan the filesystem. (Scanning is more precise.) */
-}</programlisting>
- Note that this behavior is intended to be used for informational
- and debug purposes. It would be more appropriate to use programs
- such as udev or HAL to initialize a device or start a user-mode
- helper program, for instance.
- </para>
- </sect1>
-
- <sect1 id="usbfs-bbbddd">
- <title>/proc/bus/usb/BBB/DDD</title>
-
- <para>Use these files in one of these basic ways:
- </para>
-
- <para><emphasis>They can be read,</emphasis>
- producing first the device descriptor
- (18 bytes) and then the descriptors for the current configuration.
- See the USB 2.0 spec for details about those binary data formats.
- You'll need to convert most multibyte values from little endian
- format to your native host byte order, although a few of the
- fields in the device descriptor (both of the BCD-encoded fields,
- and the vendor and product IDs) will be byteswapped for you.
- Note that configuration descriptors include descriptors for
- interfaces, altsettings, endpoints, and maybe additional
- class descriptors.
- </para>
-
- <para><emphasis>Perform USB operations</emphasis> using
- <emphasis>ioctl()</emphasis> requests to make endpoint I/O
- requests (synchronously or asynchronously) or manage
- the device.
- These requests need the CAP_SYS_RAWIO capability,
- as well as filesystem access permissions.
- Only one ioctl request can be made on one of these
- device files at a time.
- This means that if you are synchronously reading an endpoint
- from one thread, you won't be able to write to a different
- endpoint from another thread until the read completes.
- This works for <emphasis>half duplex</emphasis> protocols,
- but otherwise you'd use asynchronous i/o requests.
- </para>
-
- </sect1>
-
-
- <sect1 id="usbfs-lifecycle">
- <title>Life Cycle of User Mode Drivers</title>
-
- <para>Such a driver first needs to find a device file
- for a device it knows how to handle.
- Maybe it was told about it because a
- <filename>/sbin/hotplug</filename> event handling agent
- chose that driver to handle the new device.
- Or maybe it's an application that scans all the
- /proc/bus/usb device files, and ignores most devices.
- In either case, it should <function>read()</function> all
- the descriptors from the device file,
- and check them against what it knows how to handle.
- It might just reject everything except a particular
- vendor and product ID, or need a more complex policy.
- </para>
-
- <para>Never assume there will only be one such device
- on the system at a time!
- If your code can't handle more than one device at
- a time, at least detect when there's more than one, and
- have your users choose which device to use.
- </para>
-
- <para>Once your user mode driver knows what device to use,
- it interacts with it in either of two styles.
- The simple style is to make only control requests; some
- devices don't need more complex interactions than those.
- (An example might be software using vendor-specific control
- requests for some initialization or configuration tasks,
- with a kernel driver for the rest.)
- </para>
-
- <para>More likely, you need a more complex style driver:
- one using non-control endpoints, reading or writing data
- and claiming exclusive use of an interface.
- <emphasis>Bulk</emphasis> transfers are easiest to use,
- but only their sibling <emphasis>interrupt</emphasis> transfers
- work with low speed devices.
- Both interrupt and <emphasis>isochronous</emphasis> transfers
- offer service guarantees because their bandwidth is reserved.
- Such "periodic" transfers are awkward to use through usbfs,
- unless you're using the asynchronous calls. However, interrupt
- transfers can also be used in a synchronous "one shot" style.
- </para>
-
- <para>Your user-mode driver should never need to worry
- about cleaning up request state when the device is
- disconnected, although it should close its open file
- descriptors as soon as it starts seeing the ENODEV
- errors.
- </para>
-
- </sect1>
-
- <sect1 id="usbfs-ioctl"><title>The ioctl() Requests</title>
-
- <para>To use these ioctls, you need to include the following
- headers in your userspace program:
-<programlisting>#include &lt;linux/usb.h&gt;
-#include &lt;linux/usbdevice_fs.h&gt;
-#include &lt;asm/byteorder.h&gt;</programlisting>
- The standard USB device model requests, from "Chapter 9" of
- the USB 2.0 specification, are automatically included from
- the <filename>&lt;linux/usb/ch9.h&gt;</filename> header.
- </para>
-
- <para>Unless noted otherwise, the ioctl requests
- described here will
- update the modification time on the usbfs file to which
- they are applied (unless they fail).
- A return of zero indicates success; otherwise, a
- standard USB error code is returned. (These are
- documented in
- <filename>Documentation/usb/error-codes.txt</filename>
- in your kernel sources.)
- </para>
-
- <para>Each of these files multiplexes access to several
- I/O streams, one per endpoint.
- Each device has one control endpoint (endpoint zero)
- which supports a limited RPC style RPC access.
- Devices are configured
- by hub_wq (in the kernel) setting a device-wide
- <emphasis>configuration</emphasis> that affects things
- like power consumption and basic functionality.
- The endpoints are part of USB <emphasis>interfaces</emphasis>,
- which may have <emphasis>altsettings</emphasis>
- affecting things like which endpoints are available.
- Many devices only have a single configuration and interface,
- so drivers for them will ignore configurations and altsettings.
- </para>
-
-
- <sect2 id="usbfs-mgmt">
- <title>Management/Status Requests</title>
-
- <para>A number of usbfs requests don't deal very directly
- with device I/O.
- They mostly relate to device management and status.
- These are all synchronous requests.
- </para>
-
- <variablelist>
-
- <varlistentry><term>USBDEVFS_CLAIMINTERFACE</term>
- <listitem><para>This is used to force usbfs to
- claim a specific interface,
- which has not previously been claimed by usbfs or any other
- kernel driver.
- The ioctl parameter is an integer holding the number of
- the interface (bInterfaceNumber from descriptor).
- </para><para>
- Note that if your driver doesn't claim an interface
- before trying to use one of its endpoints, and no
- other driver has bound to it, then the interface is
- automatically claimed by usbfs.
- </para><para>
- This claim will be released by a RELEASEINTERFACE ioctl,
- or by closing the file descriptor.
- File modification time is not updated by this request.
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_CONNECTINFO</term>
- <listitem><para>Says whether the device is lowspeed.
- The ioctl parameter points to a structure like this:
-<programlisting>struct usbdevfs_connectinfo {
- unsigned int devnum;
- unsigned char slow;
-}; </programlisting>
- File modification time is not updated by this request.
- </para><para>
- <emphasis>You can't tell whether a "not slow"
- device is connected at high speed (480 MBit/sec)
- or just full speed (12 MBit/sec).</emphasis>
- You should know the devnum value already,
- it's the DDD value of the device file name.
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_GETDRIVER</term>
- <listitem><para>Returns the name of the kernel driver
- bound to a given interface (a string). Parameter
- is a pointer to this structure, which is modified:
-<programlisting>struct usbdevfs_getdriver {
- unsigned int interface;
- char driver[USBDEVFS_MAXDRIVERNAME + 1];
-};</programlisting>
- File modification time is not updated by this request.
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_IOCTL</term>
- <listitem><para>Passes a request from userspace through
- to a kernel driver that has an ioctl entry in the
- <emphasis>struct usb_driver</emphasis> it registered.
-<programlisting>struct usbdevfs_ioctl {
- int ifno;
- int ioctl_code;
- void *data;
-};
-
-/* user mode call looks like this.
- * 'request' becomes the driver->ioctl() 'code' parameter.
- * the size of 'param' is encoded in 'request', and that data
- * is copied to or from the driver->ioctl() 'buf' parameter.
- */
-static int
-usbdev_ioctl (int fd, int ifno, unsigned request, void *param)
-{
- struct usbdevfs_ioctl wrapper;
-
- wrapper.ifno = ifno;
- wrapper.ioctl_code = request;
- wrapper.data = param;
-
- return ioctl (fd, USBDEVFS_IOCTL, &amp;wrapper);
-} </programlisting>
- File modification time is not updated by this request.
- </para><para>
- This request lets kernel drivers talk to user mode code
- through filesystem operations even when they don't create
- a character or block special device.
- It's also been used to do things like ask devices what
- device special file should be used.
- Two pre-defined ioctls are used
- to disconnect and reconnect kernel drivers, so
- that user mode code can completely manage binding
- and configuration of devices.
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_RELEASEINTERFACE</term>
- <listitem><para>This is used to release the claim usbfs
- made on interface, either implicitly or because of a
- USBDEVFS_CLAIMINTERFACE call, before the file
- descriptor is closed.
- The ioctl parameter is an integer holding the number of
- the interface (bInterfaceNumber from descriptor);
- File modification time is not updated by this request.
- </para><warning><para>
- <emphasis>No security check is made to ensure
- that the task which made the claim is the one
- which is releasing it.
- This means that user mode driver may interfere
- other ones. </emphasis>
- </para></warning></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_RESETEP</term>
- <listitem><para>Resets the data toggle value for an endpoint
- (bulk or interrupt) to DATA0.
- The ioctl parameter is an integer endpoint number
- (1 to 15, as identified in the endpoint descriptor),
- with USB_DIR_IN added if the device's endpoint sends
- data to the host.
- </para><warning><para>
- <emphasis>Avoid using this request.
- It should probably be removed.</emphasis>
- Using it typically means the device and driver will lose
- toggle synchronization. If you really lost synchronization,
- you likely need to completely handshake with the device,
- using a request like CLEAR_HALT
- or SET_INTERFACE.
- </para></warning></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_DROP_PRIVILEGES</term>
- <listitem><para>This is used to relinquish the ability
- to do certain operations which are considered to be
- privileged on a usbfs file descriptor.
- This includes claiming arbitrary interfaces, resetting
- a device on which there are currently claimed interfaces
- from other users, and issuing USBDEVFS_IOCTL calls.
- The ioctl parameter is a 32 bit mask of interfaces
- the user is allowed to claim on this file descriptor.
- You may issue this ioctl more than one time to narrow
- said mask.
- </para></listitem></varlistentry>
- </variablelist>
-
- </sect2>
-
- <sect2 id="usbfs-sync">
- <title>Synchronous I/O Support</title>
-
- <para>Synchronous requests involve the kernel blocking
- until the user mode request completes, either by
- finishing successfully or by reporting an error.
- In most cases this is the simplest way to use usbfs,
- although as noted above it does prevent performing I/O
- to more than one endpoint at a time.
- </para>
-
- <variablelist>
-
- <varlistentry><term>USBDEVFS_BULK</term>
- <listitem><para>Issues a bulk read or write request to the
- device.
- The ioctl parameter is a pointer to this structure:
-<programlisting>struct usbdevfs_bulktransfer {
- unsigned int ep;
- unsigned int len;
- unsigned int timeout; /* in milliseconds */
- void *data;
-};</programlisting>
- </para><para>The "ep" value identifies a
- bulk endpoint number (1 to 15, as identified in an endpoint
- descriptor),
- masked with USB_DIR_IN when referring to an endpoint which
- sends data to the host from the device.
- The length of the data buffer is identified by "len";
- Recent kernels support requests up to about 128KBytes.
- <emphasis>FIXME say how read length is returned,
- and how short reads are handled.</emphasis>.
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_CLEAR_HALT</term>
- <listitem><para>Clears endpoint halt (stall) and
- resets the endpoint toggle. This is only
- meaningful for bulk or interrupt endpoints.
- The ioctl parameter is an integer endpoint number
- (1 to 15, as identified in an endpoint descriptor),
- masked with USB_DIR_IN when referring to an endpoint which
- sends data to the host from the device.
- </para><para>
- Use this on bulk or interrupt endpoints which have
- stalled, returning <emphasis>-EPIPE</emphasis> status
- to a data transfer request.
- Do not issue the control request directly, since
- that could invalidate the host's record of the
- data toggle.
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_CONTROL</term>
- <listitem><para>Issues a control request to the device.
- The ioctl parameter points to a structure like this:
-<programlisting>struct usbdevfs_ctrltransfer {
- __u8 bRequestType;
- __u8 bRequest;
- __u16 wValue;
- __u16 wIndex;
- __u16 wLength;
- __u32 timeout; /* in milliseconds */
- void *data;
-};</programlisting>
- </para><para>
- The first eight bytes of this structure are the contents
- of the SETUP packet to be sent to the device; see the
- USB 2.0 specification for details.
- The bRequestType value is composed by combining a
- USB_TYPE_* value, a USB_DIR_* value, and a
- USB_RECIP_* value (from
- <emphasis>&lt;linux/usb.h&gt;</emphasis>).
- If wLength is nonzero, it describes the length of the data
- buffer, which is either written to the device
- (USB_DIR_OUT) or read from the device (USB_DIR_IN).
- </para><para>
- At this writing, you can't transfer more than 4 KBytes
- of data to or from a device; usbfs has a limit, and
- some host controller drivers have a limit.
- (That's not usually a problem.)
- <emphasis>Also</emphasis> there's no way to say it's
- not OK to get a short read back from the device.
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_RESET</term>
- <listitem><para>Does a USB level device reset.
- The ioctl parameter is ignored.
- After the reset, this rebinds all device interfaces.
- File modification time is not updated by this request.
- </para><warning><para>
- <emphasis>Avoid using this call</emphasis>
- until some usbcore bugs get fixed,
- since it does not fully synchronize device, interface,
- and driver (not just usbfs) state.
- </para></warning></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_SETINTERFACE</term>
- <listitem><para>Sets the alternate setting for an
- interface. The ioctl parameter is a pointer to a
- structure like this:
-<programlisting>struct usbdevfs_setinterface {
- unsigned int interface;
- unsigned int altsetting;
-}; </programlisting>
- File modification time is not updated by this request.
- </para><para>
- Those struct members are from some interface descriptor
- applying to the current configuration.
- The interface number is the bInterfaceNumber value, and
- the altsetting number is the bAlternateSetting value.
- (This resets each endpoint in the interface.)
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_SETCONFIGURATION</term>
- <listitem><para>Issues the
- <function>usb_set_configuration</function> call
- for the device.
- The parameter is an integer holding the number of
- a configuration (bConfigurationValue from descriptor).
- File modification time is not updated by this request.
- </para><warning><para>
- <emphasis>Avoid using this call</emphasis>
- until some usbcore bugs get fixed,
- since it does not fully synchronize device, interface,
- and driver (not just usbfs) state.
- </para></warning></listitem></varlistentry>
-
- </variablelist>
- </sect2>
-
- <sect2 id="usbfs-async">
- <title>Asynchronous I/O Support</title>
-
- <para>As mentioned above, there are situations where it may be
- important to initiate concurrent operations from user mode code.
- This is particularly important for periodic transfers
- (interrupt and isochronous), but it can be used for other
- kinds of USB requests too.
- In such cases, the asynchronous requests described here
- are essential. Rather than submitting one request and having
- the kernel block until it completes, the blocking is separate.
- </para>
-
- <para>These requests are packaged into a structure that
- resembles the URB used by kernel device drivers.
- (No POSIX Async I/O support here, sorry.)
- It identifies the endpoint type (USBDEVFS_URB_TYPE_*),
- endpoint (number, masked with USB_DIR_IN as appropriate),
- buffer and length, and a user "context" value serving to
- uniquely identify each request.
- (It's usually a pointer to per-request data.)
- Flags can modify requests (not as many as supported for
- kernel drivers).
- </para>
-
- <para>Each request can specify a realtime signal number
- (between SIGRTMIN and SIGRTMAX, inclusive) to request a
- signal be sent when the request completes.
- </para>
-
- <para>When usbfs returns these urbs, the status value
- is updated, and the buffer may have been modified.
- Except for isochronous transfers, the actual_length is
- updated to say how many bytes were transferred; if the
- USBDEVFS_URB_DISABLE_SPD flag is set
- ("short packets are not OK"), if fewer bytes were read
- than were requested then you get an error report.
- </para>
-
-<programlisting>struct usbdevfs_iso_packet_desc {
- unsigned int length;
- unsigned int actual_length;
- unsigned int status;
-};
-
-struct usbdevfs_urb {
- unsigned char type;
- unsigned char endpoint;
- int status;
- unsigned int flags;
- void *buffer;
- int buffer_length;
- int actual_length;
- int start_frame;
- int number_of_packets;
- int error_count;
- unsigned int signr;
- void *usercontext;
- struct usbdevfs_iso_packet_desc iso_frame_desc[];
-};</programlisting>
-
- <para> For these asynchronous requests, the file modification
- time reflects when the request was initiated.
- This contrasts with their use with the synchronous requests,
- where it reflects when requests complete.
- </para>
-
- <variablelist>
-
- <varlistentry><term>USBDEVFS_DISCARDURB</term>
- <listitem><para>
- <emphasis>TBS</emphasis>
- File modification time is not updated by this request.
- </para><para>
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_DISCSIGNAL</term>
- <listitem><para>
- <emphasis>TBS</emphasis>
- File modification time is not updated by this request.
- </para><para>
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_REAPURB</term>
- <listitem><para>
- <emphasis>TBS</emphasis>
- File modification time is not updated by this request.
- </para><para>
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_REAPURBNDELAY</term>
- <listitem><para>
- <emphasis>TBS</emphasis>
- File modification time is not updated by this request.
- </para><para>
- </para></listitem></varlistentry>
-
- <varlistentry><term>USBDEVFS_SUBMITURB</term>
- <listitem><para>
- <emphasis>TBS</emphasis>
- </para><para>
- </para></listitem></varlistentry>
-
- </variablelist>
- </sect2>
-
- </sect1>
-
- </chapter>
-
-</book>
-<!-- vim:syntax=sgml:sw=4
--->
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl
deleted file mode 100644
index a27ab9f53fb6..000000000000
--- a/Documentation/DocBook/writing-an-alsa-driver.tmpl
+++ /dev/null
@@ -1,6206 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<!-- ****************************************************** -->
-<!-- Header -->
-<!-- ****************************************************** -->
-<book id="Writing-an-ALSA-Driver">
- <bookinfo>
- <title>Writing an ALSA Driver</title>
- <author>
- <firstname>Takashi</firstname>
- <surname>Iwai</surname>
- <affiliation>
- <address>
- <email>tiwai@suse.de</email>
- </address>
- </affiliation>
- </author>
-
- <date>Oct 15, 2007</date>
- <edition>0.3.7</edition>
-
- <abstract>
- <para>
- This document describes how to write an ALSA (Advanced Linux
- Sound Architecture) driver.
- </para>
- </abstract>
-
- <legalnotice>
- <para>
- Copyright (c) 2002-2005 Takashi Iwai <email>tiwai@suse.de</email>
- </para>
-
- <para>
- This document is free; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
- </para>
-
- <para>
- This document is distributed in the hope that it will be useful,
- but <emphasis>WITHOUT ANY WARRANTY</emphasis>; without even the
- implied warranty of <emphasis>MERCHANTABILITY or FITNESS FOR A
- PARTICULAR PURPOSE</emphasis>. See the GNU General Public License
- for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
- </legalnotice>
-
- </bookinfo>
-
-<!-- ****************************************************** -->
-<!-- Preface -->
-<!-- ****************************************************** -->
- <preface id="preface">
- <title>Preface</title>
- <para>
- This document describes how to write an
- <ulink url="http://www.alsa-project.org/"><citetitle>
- ALSA (Advanced Linux Sound Architecture)</citetitle></ulink>
- driver. The document focuses mainly on PCI soundcards.
- In the case of other device types, the API might
- be different, too. However, at least the ALSA kernel API is
- consistent, and therefore it would be still a bit help for
- writing them.
- </para>
-
- <para>
- This document targets people who already have enough
- C language skills and have basic linux kernel programming
- knowledge. This document doesn't explain the general
- topic of linux kernel coding and doesn't cover low-level
- driver implementation details. It only describes
- the standard way to write a PCI sound driver on ALSA.
- </para>
-
- <para>
- If you are already familiar with the older ALSA ver.0.5.x API, you
- can check the drivers such as <filename>sound/pci/es1938.c</filename> or
- <filename>sound/pci/maestro3.c</filename> which have also almost the same
- code-base in the ALSA 0.5.x tree, so you can compare the differences.
- </para>
-
- <para>
- This document is still a draft version. Any feedback and
- corrections, please!!
- </para>
- </preface>
-
-
-<!-- ****************************************************** -->
-<!-- File Tree Structure -->
-<!-- ****************************************************** -->
- <chapter id="file-tree">
- <title>File Tree Structure</title>
-
- <section id="file-tree-general">
- <title>General</title>
- <para>
- The ALSA drivers are provided in two ways.
- </para>
-
- <para>
- One is the trees provided as a tarball or via cvs from the
- ALSA's ftp site, and another is the 2.6 (or later) Linux kernel
- tree. To synchronize both, the ALSA driver tree is split into
- two different trees: alsa-kernel and alsa-driver. The former
- contains purely the source code for the Linux 2.6 (or later)
- tree. This tree is designed only for compilation on 2.6 or
- later environment. The latter, alsa-driver, contains many subtle
- files for compiling ALSA drivers outside of the Linux kernel tree,
- wrapper functions for older 2.2 and 2.4 kernels, to adapt the latest kernel API,
- and additional drivers which are still in development or in
- tests. The drivers in alsa-driver tree will be moved to
- alsa-kernel (and eventually to the 2.6 kernel tree) when they are
- finished and confirmed to work fine.
- </para>
-
- <para>
- The file tree structure of ALSA driver is depicted below. Both
- alsa-kernel and alsa-driver have almost the same file
- structure, except for <quote>core</quote> directory. It's
- named as <quote>acore</quote> in alsa-driver tree.
-
- <example>
- <title>ALSA File Tree Structure</title>
- <literallayout>
- sound
- /core
- /oss
- /seq
- /oss
- /instr
- /ioctl32
- /include
- /drivers
- /mpu401
- /opl3
- /i2c
- /l3
- /synth
- /emux
- /pci
- /(cards)
- /isa
- /(cards)
- /arm
- /ppc
- /sparc
- /usb
- /pcmcia /(cards)
- /oss
- </literallayout>
- </example>
- </para>
- </section>
-
- <section id="file-tree-core-directory">
- <title>core directory</title>
- <para>
- This directory contains the middle layer which is the heart
- of ALSA drivers. In this directory, the native ALSA modules are
- stored. The sub-directories contain different modules and are
- dependent upon the kernel config.
- </para>
-
- <section id="file-tree-core-directory-oss">
- <title>core/oss</title>
-
- <para>
- The codes for PCM and mixer OSS emulation modules are stored
- in this directory. The rawmidi OSS emulation is included in
- the ALSA rawmidi code since it's quite small. The sequencer
- code is stored in <filename>core/seq/oss</filename> directory (see
- <link linkend="file-tree-core-directory-seq-oss"><citetitle>
- below</citetitle></link>).
- </para>
- </section>
-
- <section id="file-tree-core-directory-ioctl32">
- <title>core/ioctl32</title>
-
- <para>
- This directory contains the 32bit-ioctl wrappers for 64bit
- architectures such like x86-64, ppc64 and sparc64. For 32bit
- and alpha architectures, these are not compiled.
- </para>
- </section>
-
- <section id="file-tree-core-directory-seq">
- <title>core/seq</title>
- <para>
- This directory and its sub-directories are for the ALSA
- sequencer. This directory contains the sequencer core and
- primary sequencer modules such like snd-seq-midi,
- snd-seq-virmidi, etc. They are compiled only when
- <constant>CONFIG_SND_SEQUENCER</constant> is set in the kernel
- config.
- </para>
- </section>
-
- <section id="file-tree-core-directory-seq-oss">
- <title>core/seq/oss</title>
- <para>
- This contains the OSS sequencer emulation codes.
- </para>
- </section>
-
- <section id="file-tree-core-directory-deq-instr">
- <title>core/seq/instr</title>
- <para>
- This directory contains the modules for the sequencer
- instrument layer.
- </para>
- </section>
- </section>
-
- <section id="file-tree-include-directory">
- <title>include directory</title>
- <para>
- This is the place for the public header files of ALSA drivers,
- which are to be exported to user-space, or included by
- several files at different directories. Basically, the private
- header files should not be placed in this directory, but you may
- still find files there, due to historical reasons :)
- </para>
- </section>
-
- <section id="file-tree-drivers-directory">
- <title>drivers directory</title>
- <para>
- This directory contains code shared among different drivers
- on different architectures. They are hence supposed not to be
- architecture-specific.
- For example, the dummy pcm driver and the serial MIDI
- driver are found in this directory. In the sub-directories,
- there is code for components which are independent from
- bus and cpu architectures.
- </para>
-
- <section id="file-tree-drivers-directory-mpu401">
- <title>drivers/mpu401</title>
- <para>
- The MPU401 and MPU401-UART modules are stored here.
- </para>
- </section>
-
- <section id="file-tree-drivers-directory-opl3">
- <title>drivers/opl3 and opl4</title>
- <para>
- The OPL3 and OPL4 FM-synth stuff is found here.
- </para>
- </section>
- </section>
-
- <section id="file-tree-i2c-directory">
- <title>i2c directory</title>
- <para>
- This contains the ALSA i2c components.
- </para>
-
- <para>
- Although there is a standard i2c layer on Linux, ALSA has its
- own i2c code for some cards, because the soundcard needs only a
- simple operation and the standard i2c API is too complicated for
- such a purpose.
- </para>
-
- <section id="file-tree-i2c-directory-l3">
- <title>i2c/l3</title>
- <para>
- This is a sub-directory for ARM L3 i2c.
- </para>
- </section>
- </section>
-
- <section id="file-tree-synth-directory">
- <title>synth directory</title>
- <para>
- This contains the synth middle-level modules.
- </para>
-
- <para>
- So far, there is only Emu8000/Emu10k1 synth driver under
- the <filename>synth/emux</filename> sub-directory.
- </para>
- </section>
-
- <section id="file-tree-pci-directory">
- <title>pci directory</title>
- <para>
- This directory and its sub-directories hold the top-level card modules
- for PCI soundcards and the code specific to the PCI BUS.
- </para>
-
- <para>
- The drivers compiled from a single file are stored directly
- in the pci directory, while the drivers with several source files are
- stored on their own sub-directory (e.g. emu10k1, ice1712).
- </para>
- </section>
-
- <section id="file-tree-isa-directory">
- <title>isa directory</title>
- <para>
- This directory and its sub-directories hold the top-level card modules
- for ISA soundcards.
- </para>
- </section>
-
- <section id="file-tree-arm-ppc-sparc-directories">
- <title>arm, ppc, and sparc directories</title>
- <para>
- They are used for top-level card modules which are
- specific to one of these architectures.
- </para>
- </section>
-
- <section id="file-tree-usb-directory">
- <title>usb directory</title>
- <para>
- This directory contains the USB-audio driver. In the latest version, the
- USB MIDI driver is integrated in the usb-audio driver.
- </para>
- </section>
-
- <section id="file-tree-pcmcia-directory">
- <title>pcmcia directory</title>
- <para>
- The PCMCIA, especially PCCard drivers will go here. CardBus
- drivers will be in the pci directory, because their API is identical
- to that of standard PCI cards.
- </para>
- </section>
-
- <section id="file-tree-oss-directory">
- <title>oss directory</title>
- <para>
- The OSS/Lite source files are stored here in Linux 2.6 (or
- later) tree. In the ALSA driver tarball, this directory is empty,
- of course :)
- </para>
- </section>
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- Basic Flow for PCI Drivers -->
-<!-- ****************************************************** -->
- <chapter id="basic-flow">
- <title>Basic Flow for PCI Drivers</title>
-
- <section id="basic-flow-outline">
- <title>Outline</title>
- <para>
- The minimum flow for PCI soundcards is as follows:
-
- <itemizedlist>
- <listitem><para>define the PCI ID table (see the section
- <link linkend="pci-resource-entries"><citetitle>PCI Entries
- </citetitle></link>).</para></listitem>
- <listitem><para>create <function>probe()</function> callback.</para></listitem>
- <listitem><para>create <function>remove()</function> callback.</para></listitem>
- <listitem><para>create a <structname>pci_driver</structname> structure
- containing the three pointers above.</para></listitem>
- <listitem><para>create an <function>init()</function> function just calling
- the <function>pci_register_driver()</function> to register the pci_driver table
- defined above.</para></listitem>
- <listitem><para>create an <function>exit()</function> function to call
- the <function>pci_unregister_driver()</function> function.</para></listitem>
- </itemizedlist>
- </para>
- </section>
-
- <section id="basic-flow-example">
- <title>Full Code Example</title>
- <para>
- The code example is shown below. Some parts are kept
- unimplemented at this moment but will be filled in the
- next sections. The numbers in the comment lines of the
- <function>snd_mychip_probe()</function> function
- refer to details explained in the following section.
-
- <example>
- <title>Basic Flow for PCI Drivers - Example</title>
- <programlisting>
-<![CDATA[
- #include <linux/init.h>
- #include <linux/pci.h>
- #include <linux/slab.h>
- #include <sound/core.h>
- #include <sound/initval.h>
-
- /* module parameters (see "Module Parameters") */
- /* SNDRV_CARDS: maximum number of cards supported by this module */
- static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
- static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
- static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
-
- /* definition of the chip-specific record */
- struct mychip {
- struct snd_card *card;
- /* the rest of the implementation will be in section
- * "PCI Resource Management"
- */
- };
-
- /* chip-specific destructor
- * (see "PCI Resource Management")
- */
- static int snd_mychip_free(struct mychip *chip)
- {
- .... /* will be implemented later... */
- }
-
- /* component-destructor
- * (see "Management of Cards and Components")
- */
- static int snd_mychip_dev_free(struct snd_device *device)
- {
- return snd_mychip_free(device->device_data);
- }
-
- /* chip-specific constructor
- * (see "Management of Cards and Components")
- */
- static int snd_mychip_create(struct snd_card *card,
- struct pci_dev *pci,
- struct mychip **rchip)
- {
- struct mychip *chip;
- int err;
- static struct snd_device_ops ops = {
- .dev_free = snd_mychip_dev_free,
- };
-
- *rchip = NULL;
-
- /* check PCI availability here
- * (see "PCI Resource Management")
- */
- ....
-
- /* allocate a chip-specific data with zero filled */
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (chip == NULL)
- return -ENOMEM;
-
- chip->card = card;
-
- /* rest of initialization here; will be implemented
- * later, see "PCI Resource Management"
- */
- ....
-
- err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
- if (err < 0) {
- snd_mychip_free(chip);
- return err;
- }
-
- *rchip = chip;
- return 0;
- }
-
- /* constructor -- see "Constructor" sub-section */
- static int snd_mychip_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
- {
- static int dev;
- struct snd_card *card;
- struct mychip *chip;
- int err;
-
- /* (1) */
- if (dev >= SNDRV_CARDS)
- return -ENODEV;
- if (!enable[dev]) {
- dev++;
- return -ENOENT;
- }
-
- /* (2) */
- err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
- 0, &card);
- if (err < 0)
- return err;
-
- /* (3) */
- err = snd_mychip_create(card, pci, &chip);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
-
- /* (4) */
- strcpy(card->driver, "My Chip");
- strcpy(card->shortname, "My Own Chip 123");
- sprintf(card->longname, "%s at 0x%lx irq %i",
- card->shortname, chip->ioport, chip->irq);
-
- /* (5) */
- .... /* implemented later */
-
- /* (6) */
- err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
-
- /* (7) */
- pci_set_drvdata(pci, card);
- dev++;
- return 0;
- }
-
- /* destructor -- see the "Destructor" sub-section */
- static void snd_mychip_remove(struct pci_dev *pci)
- {
- snd_card_free(pci_get_drvdata(pci));
- pci_set_drvdata(pci, NULL);
- }
-]]>
- </programlisting>
- </example>
- </para>
- </section>
-
- <section id="basic-flow-constructor">
- <title>Constructor</title>
- <para>
- The real constructor of PCI drivers is the <function>probe</function> callback.
- The <function>probe</function> callback and other component-constructors which are called
- from the <function>probe</function> callback cannot be used with
- the <parameter>__init</parameter> prefix
- because any PCI device could be a hotplug device.
- </para>
-
- <para>
- In the <function>probe</function> callback, the following scheme is often used.
- </para>
-
- <section id="basic-flow-constructor-device-index">
- <title>1) Check and increment the device index.</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- static int dev;
- ....
- if (dev >= SNDRV_CARDS)
- return -ENODEV;
- if (!enable[dev]) {
- dev++;
- return -ENOENT;
- }
-]]>
- </programlisting>
- </informalexample>
-
- where enable[dev] is the module option.
- </para>
-
- <para>
- Each time the <function>probe</function> callback is called, check the
- availability of the device. If not available, simply increment
- the device index and returns. dev will be incremented also
- later (<link
- linkend="basic-flow-constructor-set-pci"><citetitle>step
- 7</citetitle></link>).
- </para>
- </section>
-
- <section id="basic-flow-constructor-create-card">
- <title>2) Create a card instance</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_card *card;
- int err;
- ....
- err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
- 0, &card);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The details will be explained in the section
- <link linkend="card-management-card-instance"><citetitle>
- Management of Cards and Components</citetitle></link>.
- </para>
- </section>
-
- <section id="basic-flow-constructor-create-main">
- <title>3) Create a main component</title>
- <para>
- In this part, the PCI resources are allocated.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct mychip *chip;
- ....
- err = snd_mychip_create(card, pci, &chip);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
-]]>
- </programlisting>
- </informalexample>
-
- The details will be explained in the section <link
- linkend="pci-resource"><citetitle>PCI Resource
- Management</citetitle></link>.
- </para>
- </section>
-
- <section id="basic-flow-constructor-main-component">
- <title>4) Set the driver ID and name strings.</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- strcpy(card->driver, "My Chip");
- strcpy(card->shortname, "My Own Chip 123");
- sprintf(card->longname, "%s at 0x%lx irq %i",
- card->shortname, chip->ioport, chip->irq);
-]]>
- </programlisting>
- </informalexample>
-
- The driver field holds the minimal ID string of the
- chip. This is used by alsa-lib's configurator, so keep it
- simple but unique.
- Even the same driver can have different driver IDs to
- distinguish the functionality of each chip type.
- </para>
-
- <para>
- The shortname field is a string shown as more verbose
- name. The longname field contains the information
- shown in <filename>/proc/asound/cards</filename>.
- </para>
- </section>
-
- <section id="basic-flow-constructor-create-other">
- <title>5) Create other components, such as mixer, MIDI, etc.</title>
- <para>
- Here you define the basic components such as
- <link linkend="pcm-interface"><citetitle>PCM</citetitle></link>,
- mixer (e.g. <link linkend="api-ac97"><citetitle>AC97</citetitle></link>),
- MIDI (e.g. <link linkend="midi-interface"><citetitle>MPU-401</citetitle></link>),
- and other interfaces.
- Also, if you want a <link linkend="proc-interface"><citetitle>proc
- file</citetitle></link>, define it here, too.
- </para>
- </section>
-
- <section id="basic-flow-constructor-register-card">
- <title>6) Register the card instance.</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Will be explained in the section <link
- linkend="card-management-registration"><citetitle>Management
- of Cards and Components</citetitle></link>, too.
- </para>
- </section>
-
- <section id="basic-flow-constructor-set-pci">
- <title>7) Set the PCI driver data and return zero.</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- pci_set_drvdata(pci, card);
- dev++;
- return 0;
-]]>
- </programlisting>
- </informalexample>
-
- In the above, the card record is stored. This pointer is
- used in the remove callback and power-management
- callbacks, too.
- </para>
- </section>
- </section>
-
- <section id="basic-flow-destructor">
- <title>Destructor</title>
- <para>
- The destructor, remove callback, simply releases the card
- instance. Then the ALSA middle layer will release all the
- attached components automatically.
- </para>
-
- <para>
- It would be typically like the following:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static void snd_mychip_remove(struct pci_dev *pci)
- {
- snd_card_free(pci_get_drvdata(pci));
- pci_set_drvdata(pci, NULL);
- }
-]]>
- </programlisting>
- </informalexample>
-
- The above code assumes that the card pointer is set to the PCI
- driver data.
- </para>
- </section>
-
- <section id="basic-flow-header-files">
- <title>Header Files</title>
- <para>
- For the above example, at least the following include files
- are necessary.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- #include <linux/init.h>
- #include <linux/pci.h>
- #include <linux/slab.h>
- #include <sound/core.h>
- #include <sound/initval.h>
-]]>
- </programlisting>
- </informalexample>
-
- where the last one is necessary only when module options are
- defined in the source file. If the code is split into several
- files, the files without module options don't need them.
- </para>
-
- <para>
- In addition to these headers, you'll need
- <filename>&lt;linux/interrupt.h&gt;</filename> for interrupt
- handling, and <filename>&lt;asm/io.h&gt;</filename> for I/O
- access. If you use the <function>mdelay()</function> or
- <function>udelay()</function> functions, you'll need to include
- <filename>&lt;linux/delay.h&gt;</filename> too.
- </para>
-
- <para>
- The ALSA interfaces like the PCM and control APIs are defined in other
- <filename>&lt;sound/xxx.h&gt;</filename> header files.
- They have to be included after
- <filename>&lt;sound/core.h&gt;</filename>.
- </para>
-
- </section>
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- Management of Cards and Components -->
-<!-- ****************************************************** -->
- <chapter id="card-management">
- <title>Management of Cards and Components</title>
-
- <section id="card-management-card-instance">
- <title>Card Instance</title>
- <para>
- For each soundcard, a <quote>card</quote> record must be allocated.
- </para>
-
- <para>
- A card record is the headquarters of the soundcard. It manages
- the whole list of devices (components) on the soundcard, such as
- PCM, mixers, MIDI, synthesizer, and so on. Also, the card
- record holds the ID and the name strings of the card, manages
- the root of proc files, and controls the power-management states
- and hotplug disconnections. The component list on the card
- record is used to manage the correct release of resources at
- destruction.
- </para>
-
- <para>
- As mentioned above, to create a card instance, call
- <function>snd_card_new()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_card *card;
- int err;
- err = snd_card_new(&pci->dev, index, id, module, extra_size, &card);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The function takes six arguments: the parent device pointer,
- the card-index number, the id string, the module pointer (usually
- <constant>THIS_MODULE</constant>),
- the size of extra-data space, and the pointer to return the
- card instance. The extra_size argument is used to
- allocate card-&gt;private_data for the
- chip-specific data. Note that these data
- are allocated by <function>snd_card_new()</function>.
- </para>
-
- <para>
- The first argument, the pointer of struct
- <structname>device</structname>, specifies the parent device.
- For PCI devices, typically &amp;pci-&gt; is passed there.
- </para>
- </section>
-
- <section id="card-management-component">
- <title>Components</title>
- <para>
- After the card is created, you can attach the components
- (devices) to the card instance. In an ALSA driver, a component is
- represented as a struct <structname>snd_device</structname> object.
- A component can be a PCM instance, a control interface, a raw
- MIDI interface, etc. Each such instance has one component
- entry.
- </para>
-
- <para>
- A component can be created via
- <function>snd_device_new()</function> function.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_device_new(card, SNDRV_DEV_XXX, chip, &ops);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- This takes the card pointer, the device-level
- (<constant>SNDRV_DEV_XXX</constant>), the data pointer, and the
- callback pointers (<parameter>&amp;ops</parameter>). The
- device-level defines the type of components and the order of
- registration and de-registration. For most components, the
- device-level is already defined. For a user-defined component,
- you can use <constant>SNDRV_DEV_LOWLEVEL</constant>.
- </para>
-
- <para>
- This function itself doesn't allocate the data space. The data
- must be allocated manually beforehand, and its pointer is passed
- as the argument. This pointer (<parameter>chip</parameter> in the
- above example) is used as the identifier for the instance.
- </para>
-
- <para>
- Each pre-defined ALSA component such as ac97 and pcm calls
- <function>snd_device_new()</function> inside its
- constructor. The destructor for each component is defined in the
- callback pointers. Hence, you don't need to take care of
- calling a destructor for such a component.
- </para>
-
- <para>
- If you wish to create your own component, you need to
- set the destructor function to the dev_free callback in
- the <parameter>ops</parameter>, so that it can be released
- automatically via <function>snd_card_free()</function>.
- The next example will show an implementation of chip-specific
- data.
- </para>
- </section>
-
- <section id="card-management-chip-specific">
- <title>Chip-Specific Data</title>
- <para>
- Chip-specific information, e.g. the I/O port address, its
- resource pointer, or the irq number, is stored in the
- chip-specific record.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct mychip {
- ....
- };
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- In general, there are two ways of allocating the chip record.
- </para>
-
- <section id="card-management-chip-specific-snd-card-new">
- <title>1. Allocating via <function>snd_card_new()</function>.</title>
- <para>
- As mentioned above, you can pass the extra-data-length
- to the 5th argument of <function>snd_card_new()</function>, i.e.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
- sizeof(struct mychip), &card);
-]]>
- </programlisting>
- </informalexample>
-
- struct <structname>mychip</structname> is the type of the chip record.
- </para>
-
- <para>
- In return, the allocated record can be accessed as
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct mychip *chip = card->private_data;
-]]>
- </programlisting>
- </informalexample>
-
- With this method, you don't have to allocate twice.
- The record is released together with the card instance.
- </para>
- </section>
-
- <section id="card-management-chip-specific-allocate-extra">
- <title>2. Allocating an extra device.</title>
-
- <para>
- After allocating a card instance via
- <function>snd_card_new()</function> (with
- <constant>0</constant> on the 4th arg), call
- <function>kzalloc()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_card *card;
- struct mychip *chip;
- err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
- 0, &card);
- .....
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The chip record should have the field to hold the card
- pointer at least,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct mychip {
- struct snd_card *card;
- ....
- };
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Then, set the card pointer in the returned chip instance.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- chip->card = card;
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Next, initialize the fields, and register this chip
- record as a low-level device with a specified
- <parameter>ops</parameter>,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static struct snd_device_ops ops = {
- .dev_free = snd_mychip_dev_free,
- };
- ....
- snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
-]]>
- </programlisting>
- </informalexample>
-
- <function>snd_mychip_dev_free()</function> is the
- device-destructor function, which will call the real
- destructor.
- </para>
-
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_mychip_dev_free(struct snd_device *device)
- {
- return snd_mychip_free(device->device_data);
- }
-]]>
- </programlisting>
- </informalexample>
-
- where <function>snd_mychip_free()</function> is the real destructor.
- </para>
- </section>
- </section>
-
- <section id="card-management-registration">
- <title>Registration and Release</title>
- <para>
- After all components are assigned, register the card instance
- by calling <function>snd_card_register()</function>. Access
- to the device files is enabled at this point. That is, before
- <function>snd_card_register()</function> is called, the
- components are safely inaccessible from external side. If this
- call fails, exit the probe function after releasing the card via
- <function>snd_card_free()</function>.
- </para>
-
- <para>
- For releasing the card instance, you can call simply
- <function>snd_card_free()</function>. As mentioned earlier, all
- components are released automatically by this call.
- </para>
-
- <para>
- For a device which allows hotplugging, you can use
- <function>snd_card_free_when_closed</function>. This one will
- postpone the destruction until all devices are closed.
- </para>
-
- </section>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- PCI Resource Management -->
-<!-- ****************************************************** -->
- <chapter id="pci-resource">
- <title>PCI Resource Management</title>
-
- <section id="pci-resource-example">
- <title>Full Code Example</title>
- <para>
- In this section, we'll complete the chip-specific constructor,
- destructor and PCI entries. Example code is shown first,
- below.
-
- <example>
- <title>PCI Resource Management Example</title>
- <programlisting>
-<![CDATA[
- struct mychip {
- struct snd_card *card;
- struct pci_dev *pci;
-
- unsigned long port;
- int irq;
- };
-
- static int snd_mychip_free(struct mychip *chip)
- {
- /* disable hardware here if any */
- .... /* (not implemented in this document) */
-
- /* release the irq */
- if (chip->irq >= 0)
- free_irq(chip->irq, chip);
- /* release the I/O ports & memory */
- pci_release_regions(chip->pci);
- /* disable the PCI entry */
- pci_disable_device(chip->pci);
- /* release the data */
- kfree(chip);
- return 0;
- }
-
- /* chip-specific constructor */
- static int snd_mychip_create(struct snd_card *card,
- struct pci_dev *pci,
- struct mychip **rchip)
- {
- struct mychip *chip;
- int err;
- static struct snd_device_ops ops = {
- .dev_free = snd_mychip_dev_free,
- };
-
- *rchip = NULL;
-
- /* initialize the PCI entry */
- err = pci_enable_device(pci);
- if (err < 0)
- return err;
- /* check PCI availability (28bit DMA) */
- if (pci_set_dma_mask(pci, DMA_BIT_MASK(28)) < 0 ||
- pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(28)) < 0) {
- printk(KERN_ERR "error to set 28bit mask DMA\n");
- pci_disable_device(pci);
- return -ENXIO;
- }
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (chip == NULL) {
- pci_disable_device(pci);
- return -ENOMEM;
- }
-
- /* initialize the stuff */
- chip->card = card;
- chip->pci = pci;
- chip->irq = -1;
-
- /* (1) PCI resource allocation */
- err = pci_request_regions(pci, "My Chip");
- if (err < 0) {
- kfree(chip);
- pci_disable_device(pci);
- return err;
- }
- chip->port = pci_resource_start(pci, 0);
- if (request_irq(pci->irq, snd_mychip_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, chip)) {
- printk(KERN_ERR "cannot grab irq %d\n", pci->irq);
- snd_mychip_free(chip);
- return -EBUSY;
- }
- chip->irq = pci->irq;
-
- /* (2) initialization of the chip hardware */
- .... /* (not implemented in this document) */
-
- err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
- if (err < 0) {
- snd_mychip_free(chip);
- return err;
- }
-
- *rchip = chip;
- return 0;
- }
-
- /* PCI IDs */
- static struct pci_device_id snd_mychip_ids[] = {
- { PCI_VENDOR_ID_FOO, PCI_DEVICE_ID_BAR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- ....
- { 0, }
- };
- MODULE_DEVICE_TABLE(pci, snd_mychip_ids);
-
- /* pci_driver definition */
- static struct pci_driver driver = {
- .name = KBUILD_MODNAME,
- .id_table = snd_mychip_ids,
- .probe = snd_mychip_probe,
- .remove = snd_mychip_remove,
- };
-
- /* module initialization */
- static int __init alsa_card_mychip_init(void)
- {
- return pci_register_driver(&driver);
- }
-
- /* module clean up */
- static void __exit alsa_card_mychip_exit(void)
- {
- pci_unregister_driver(&driver);
- }
-
- module_init(alsa_card_mychip_init)
- module_exit(alsa_card_mychip_exit)
-
- EXPORT_NO_SYMBOLS; /* for old kernels only */
-]]>
- </programlisting>
- </example>
- </para>
- </section>
-
- <section id="pci-resource-some-haftas">
- <title>Some Hafta's</title>
- <para>
- The allocation of PCI resources is done in the
- <function>probe()</function> function, and usually an extra
- <function>xxx_create()</function> function is written for this
- purpose.
- </para>
-
- <para>
- In the case of PCI devices, you first have to call
- the <function>pci_enable_device()</function> function before
- allocating resources. Also, you need to set the proper PCI DMA
- mask to limit the accessed I/O range. In some cases, you might
- need to call <function>pci_set_master()</function> function,
- too.
- </para>
-
- <para>
- Suppose the 28bit mask, and the code to be added would be like:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- err = pci_enable_device(pci);
- if (err < 0)
- return err;
- if (pci_set_dma_mask(pci, DMA_BIT_MASK(28)) < 0 ||
- pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(28)) < 0) {
- printk(KERN_ERR "error to set 28bit mask DMA\n");
- pci_disable_device(pci);
- return -ENXIO;
- }
-
-]]>
- </programlisting>
- </informalexample>
- </para>
- </section>
-
- <section id="pci-resource-resource-allocation">
- <title>Resource Allocation</title>
- <para>
- The allocation of I/O ports and irqs is done via standard kernel
- functions. Unlike ALSA ver.0.5.x., there are no helpers for
- that. And these resources must be released in the destructor
- function (see below). Also, on ALSA 0.9.x, you don't need to
- allocate (pseudo-)DMA for PCI like in ALSA 0.5.x.
- </para>
-
- <para>
- Now assume that the PCI device has an I/O port with 8 bytes
- and an interrupt. Then struct <structname>mychip</structname> will have the
- following fields:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct mychip {
- struct snd_card *card;
-
- unsigned long port;
- int irq;
- };
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- For an I/O port (and also a memory region), you need to have
- the resource pointer for the standard resource management. For
- an irq, you have to keep only the irq number (integer). But you
- need to initialize this number as -1 before actual allocation,
- since irq 0 is valid. The port address and its resource pointer
- can be initialized as null by
- <function>kzalloc()</function> automatically, so you
- don't have to take care of resetting them.
- </para>
-
- <para>
- The allocation of an I/O port is done like this:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- err = pci_request_regions(pci, "My Chip");
- if (err < 0) {
- kfree(chip);
- pci_disable_device(pci);
- return err;
- }
- chip->port = pci_resource_start(pci, 0);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- <!-- obsolete -->
- It will reserve the I/O port region of 8 bytes of the given
- PCI device. The returned value, chip-&gt;res_port, is allocated
- via <function>kmalloc()</function> by
- <function>request_region()</function>. The pointer must be
- released via <function>kfree()</function>, but there is a
- problem with this. This issue will be explained later.
- </para>
-
- <para>
- The allocation of an interrupt source is done like this:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- if (request_irq(pci->irq, snd_mychip_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, chip)) {
- printk(KERN_ERR "cannot grab irq %d\n", pci->irq);
- snd_mychip_free(chip);
- return -EBUSY;
- }
- chip->irq = pci->irq;
-]]>
- </programlisting>
- </informalexample>
-
- where <function>snd_mychip_interrupt()</function> is the
- interrupt handler defined <link
- linkend="pcm-interface-interrupt-handler"><citetitle>later</citetitle></link>.
- Note that chip-&gt;irq should be defined
- only when <function>request_irq()</function> succeeded.
- </para>
-
- <para>
- On the PCI bus, interrupts can be shared. Thus,
- <constant>IRQF_SHARED</constant> is used as the interrupt flag of
- <function>request_irq()</function>.
- </para>
-
- <para>
- The last argument of <function>request_irq()</function> is the
- data pointer passed to the interrupt handler. Usually, the
- chip-specific record is used for that, but you can use what you
- like, too.
- </para>
-
- <para>
- I won't give details about the interrupt handler at this
- point, but at least its appearance can be explained now. The
- interrupt handler looks usually like the following:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static irqreturn_t snd_mychip_interrupt(int irq, void *dev_id)
- {
- struct mychip *chip = dev_id;
- ....
- return IRQ_HANDLED;
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Now let's write the corresponding destructor for the resources
- above. The role of destructor is simple: disable the hardware
- (if already activated) and release the resources. So far, we
- have no hardware part, so the disabling code is not written here.
- </para>
-
- <para>
- To release the resources, the <quote>check-and-release</quote>
- method is a safer way. For the interrupt, do like this:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- if (chip->irq >= 0)
- free_irq(chip->irq, chip);
-]]>
- </programlisting>
- </informalexample>
-
- Since the irq number can start from 0, you should initialize
- chip-&gt;irq with a negative value (e.g. -1), so that you can
- check the validity of the irq number as above.
- </para>
-
- <para>
- When you requested I/O ports or memory regions via
- <function>pci_request_region()</function> or
- <function>pci_request_regions()</function> like in this example,
- release the resource(s) using the corresponding function,
- <function>pci_release_region()</function> or
- <function>pci_release_regions()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- pci_release_regions(chip->pci);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- When you requested manually via <function>request_region()</function>
- or <function>request_mem_region</function>, you can release it via
- <function>release_resource()</function>. Suppose that you keep
- the resource pointer returned from <function>request_region()</function>
- in chip-&gt;res_port, the release procedure looks like:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- release_and_free_resource(chip->res_port);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Don't forget to call <function>pci_disable_device()</function>
- before the end.
- </para>
-
- <para>
- And finally, release the chip-specific record.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- kfree(chip);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- We didn't implement the hardware disabling part in the above.
- If you need to do this, please note that the destructor may be
- called even before the initialization of the chip is completed.
- It would be better to have a flag to skip hardware disabling
- if the hardware was not initialized yet.
- </para>
-
- <para>
- When the chip-data is assigned to the card using
- <function>snd_device_new()</function> with
- <constant>SNDRV_DEV_LOWLELVEL</constant> , its destructor is
- called at the last. That is, it is assured that all other
- components like PCMs and controls have already been released.
- You don't have to stop PCMs, etc. explicitly, but just
- call low-level hardware stopping.
- </para>
-
- <para>
- The management of a memory-mapped region is almost as same as
- the management of an I/O port. You'll need three fields like
- the following:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct mychip {
- ....
- unsigned long iobase_phys;
- void __iomem *iobase_virt;
- };
-]]>
- </programlisting>
- </informalexample>
-
- and the allocation would be like below:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- if ((err = pci_request_regions(pci, "My Chip")) < 0) {
- kfree(chip);
- return err;
- }
- chip->iobase_phys = pci_resource_start(pci, 0);
- chip->iobase_virt = ioremap_nocache(chip->iobase_phys,
- pci_resource_len(pci, 0));
-]]>
- </programlisting>
- </informalexample>
-
- and the corresponding destructor would be:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_mychip_free(struct mychip *chip)
- {
- ....
- if (chip->iobase_virt)
- iounmap(chip->iobase_virt);
- ....
- pci_release_regions(chip->pci);
- ....
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- </section>
-
- <section id="pci-resource-entries">
- <title>PCI Entries</title>
- <para>
- So far, so good. Let's finish the missing PCI
- stuff. At first, we need a
- <structname>pci_device_id</structname> table for this
- chipset. It's a table of PCI vendor/device ID number, and some
- masks.
- </para>
-
- <para>
- For example,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static struct pci_device_id snd_mychip_ids[] = {
- { PCI_VENDOR_ID_FOO, PCI_DEVICE_ID_BAR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- ....
- { 0, }
- };
- MODULE_DEVICE_TABLE(pci, snd_mychip_ids);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The first and second fields of
- the <structname>pci_device_id</structname> structure are the vendor and
- device IDs. If you have no reason to filter the matching
- devices, you can leave the remaining fields as above. The last
- field of the <structname>pci_device_id</structname> struct contains
- private data for this entry. You can specify any value here, for
- example, to define specific operations for supported device IDs.
- Such an example is found in the intel8x0 driver.
- </para>
-
- <para>
- The last entry of this list is the terminator. You must
- specify this all-zero entry.
- </para>
-
- <para>
- Then, prepare the <structname>pci_driver</structname> record:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static struct pci_driver driver = {
- .name = KBUILD_MODNAME,
- .id_table = snd_mychip_ids,
- .probe = snd_mychip_probe,
- .remove = snd_mychip_remove,
- };
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The <structfield>probe</structfield> and
- <structfield>remove</structfield> functions have already
- been defined in the previous sections.
- The <structfield>name</structfield>
- field is the name string of this device. Note that you must not
- use a slash <quote>/</quote> in this string.
- </para>
-
- <para>
- And at last, the module entries:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int __init alsa_card_mychip_init(void)
- {
- return pci_register_driver(&driver);
- }
-
- static void __exit alsa_card_mychip_exit(void)
- {
- pci_unregister_driver(&driver);
- }
-
- module_init(alsa_card_mychip_init)
- module_exit(alsa_card_mychip_exit)
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Note that these module entries are tagged with
- <parameter>__init</parameter> and
- <parameter>__exit</parameter> prefixes.
- </para>
-
- <para>
- Oh, one thing was forgotten. If you have no exported symbols,
- you need to declare it in 2.2 or 2.4 kernels (it's not necessary in 2.6 kernels).
-
- <informalexample>
- <programlisting>
-<![CDATA[
- EXPORT_NO_SYMBOLS;
-]]>
- </programlisting>
- </informalexample>
-
- That's all!
- </para>
- </section>
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- PCM Interface -->
-<!-- ****************************************************** -->
- <chapter id="pcm-interface">
- <title>PCM Interface</title>
-
- <section id="pcm-interface-general">
- <title>General</title>
- <para>
- The PCM middle layer of ALSA is quite powerful and it is only
- necessary for each driver to implement the low-level functions
- to access its hardware.
- </para>
-
- <para>
- For accessing to the PCM layer, you need to include
- <filename>&lt;sound/pcm.h&gt;</filename> first. In addition,
- <filename>&lt;sound/pcm_params.h&gt;</filename> might be needed
- if you access to some functions related with hw_param.
- </para>
-
- <para>
- Each card device can have up to four pcm instances. A pcm
- instance corresponds to a pcm device file. The limitation of
- number of instances comes only from the available bit size of
- the Linux's device numbers. Once when 64bit device number is
- used, we'll have more pcm instances available.
- </para>
-
- <para>
- A pcm instance consists of pcm playback and capture streams,
- and each pcm stream consists of one or more pcm substreams. Some
- soundcards support multiple playback functions. For example,
- emu10k1 has a PCM playback of 32 stereo substreams. In this case, at
- each open, a free substream is (usually) automatically chosen
- and opened. Meanwhile, when only one substream exists and it was
- already opened, the successful open will either block
- or error with <constant>EAGAIN</constant> according to the
- file open mode. But you don't have to care about such details in your
- driver. The PCM middle layer will take care of such work.
- </para>
- </section>
-
- <section id="pcm-interface-example">
- <title>Full Code Example</title>
- <para>
- The example code below does not include any hardware access
- routines but shows only the skeleton, how to build up the PCM
- interfaces.
-
- <example>
- <title>PCM Example Code</title>
- <programlisting>
-<![CDATA[
- #include <sound/pcm.h>
- ....
-
- /* hardware definition */
- static struct snd_pcm_hardware snd_mychip_playback_hw = {
- .info = (SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP_VALID),
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .rate_min = 8000,
- .rate_max = 48000,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = 32768,
- .period_bytes_min = 4096,
- .period_bytes_max = 32768,
- .periods_min = 1,
- .periods_max = 1024,
- };
-
- /* hardware definition */
- static struct snd_pcm_hardware snd_mychip_capture_hw = {
- .info = (SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP_VALID),
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .rate_min = 8000,
- .rate_max = 48000,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = 32768,
- .period_bytes_min = 4096,
- .period_bytes_max = 32768,
- .periods_min = 1,
- .periods_max = 1024,
- };
-
- /* open callback */
- static int snd_mychip_playback_open(struct snd_pcm_substream *substream)
- {
- struct mychip *chip = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- runtime->hw = snd_mychip_playback_hw;
- /* more hardware-initialization will be done here */
- ....
- return 0;
- }
-
- /* close callback */
- static int snd_mychip_playback_close(struct snd_pcm_substream *substream)
- {
- struct mychip *chip = snd_pcm_substream_chip(substream);
- /* the hardware-specific codes will be here */
- ....
- return 0;
-
- }
-
- /* open callback */
- static int snd_mychip_capture_open(struct snd_pcm_substream *substream)
- {
- struct mychip *chip = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- runtime->hw = snd_mychip_capture_hw;
- /* more hardware-initialization will be done here */
- ....
- return 0;
- }
-
- /* close callback */
- static int snd_mychip_capture_close(struct snd_pcm_substream *substream)
- {
- struct mychip *chip = snd_pcm_substream_chip(substream);
- /* the hardware-specific codes will be here */
- ....
- return 0;
-
- }
-
- /* hw_params callback */
- static int snd_mychip_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
- {
- return snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- }
-
- /* hw_free callback */
- static int snd_mychip_pcm_hw_free(struct snd_pcm_substream *substream)
- {
- return snd_pcm_lib_free_pages(substream);
- }
-
- /* prepare callback */
- static int snd_mychip_pcm_prepare(struct snd_pcm_substream *substream)
- {
- struct mychip *chip = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- /* set up the hardware with the current configuration
- * for example...
- */
- mychip_set_sample_format(chip, runtime->format);
- mychip_set_sample_rate(chip, runtime->rate);
- mychip_set_channels(chip, runtime->channels);
- mychip_set_dma_setup(chip, runtime->dma_addr,
- chip->buffer_size,
- chip->period_size);
- return 0;
- }
-
- /* trigger callback */
- static int snd_mychip_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
- {
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- /* do something to start the PCM engine */
- ....
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- /* do something to stop the PCM engine */
- ....
- break;
- default:
- return -EINVAL;
- }
- }
-
- /* pointer callback */
- static snd_pcm_uframes_t
- snd_mychip_pcm_pointer(struct snd_pcm_substream *substream)
- {
- struct mychip *chip = snd_pcm_substream_chip(substream);
- unsigned int current_ptr;
-
- /* get the current hardware pointer */
- current_ptr = mychip_get_hw_pointer(chip);
- return current_ptr;
- }
-
- /* operators */
- static struct snd_pcm_ops snd_mychip_playback_ops = {
- .open = snd_mychip_playback_open,
- .close = snd_mychip_playback_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_mychip_pcm_hw_params,
- .hw_free = snd_mychip_pcm_hw_free,
- .prepare = snd_mychip_pcm_prepare,
- .trigger = snd_mychip_pcm_trigger,
- .pointer = snd_mychip_pcm_pointer,
- };
-
- /* operators */
- static struct snd_pcm_ops snd_mychip_capture_ops = {
- .open = snd_mychip_capture_open,
- .close = snd_mychip_capture_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_mychip_pcm_hw_params,
- .hw_free = snd_mychip_pcm_hw_free,
- .prepare = snd_mychip_pcm_prepare,
- .trigger = snd_mychip_pcm_trigger,
- .pointer = snd_mychip_pcm_pointer,
- };
-
- /*
- * definitions of capture are omitted here...
- */
-
- /* create a pcm device */
- static int snd_mychip_new_pcm(struct mychip *chip)
- {
- struct snd_pcm *pcm;
- int err;
-
- err = snd_pcm_new(chip->card, "My Chip", 0, 1, 1, &pcm);
- if (err < 0)
- return err;
- pcm->private_data = chip;
- strcpy(pcm->name, "My Chip");
- chip->pcm = pcm;
- /* set operators */
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- &snd_mychip_playback_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
- &snd_mychip_capture_ops);
- /* pre-allocation of buffers */
- /* NOTE: this may fail */
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
- return 0;
- }
-]]>
- </programlisting>
- </example>
- </para>
- </section>
-
- <section id="pcm-interface-constructor">
- <title>Constructor</title>
- <para>
- A pcm instance is allocated by the <function>snd_pcm_new()</function>
- function. It would be better to create a constructor for pcm,
- namely,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_mychip_new_pcm(struct mychip *chip)
- {
- struct snd_pcm *pcm;
- int err;
-
- err = snd_pcm_new(chip->card, "My Chip", 0, 1, 1, &pcm);
- if (err < 0)
- return err;
- pcm->private_data = chip;
- strcpy(pcm->name, "My Chip");
- chip->pcm = pcm;
- ....
- return 0;
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The <function>snd_pcm_new()</function> function takes four
- arguments. The first argument is the card pointer to which this
- pcm is assigned, and the second is the ID string.
- </para>
-
- <para>
- The third argument (<parameter>index</parameter>, 0 in the
- above) is the index of this new pcm. It begins from zero. If
- you create more than one pcm instances, specify the
- different numbers in this argument. For example,
- <parameter>index</parameter> = 1 for the second PCM device.
- </para>
-
- <para>
- The fourth and fifth arguments are the number of substreams
- for playback and capture, respectively. Here 1 is used for
- both arguments. When no playback or capture substreams are available,
- pass 0 to the corresponding argument.
- </para>
-
- <para>
- If a chip supports multiple playbacks or captures, you can
- specify more numbers, but they must be handled properly in
- open/close, etc. callbacks. When you need to know which
- substream you are referring to, then it can be obtained from
- struct <structname>snd_pcm_substream</structname> data passed to each callback
- as follows:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_pcm_substream *substream;
- int index = substream->number;
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- After the pcm is created, you need to set operators for each
- pcm stream.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- &snd_mychip_playback_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
- &snd_mychip_capture_ops);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The operators are defined typically like this:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static struct snd_pcm_ops snd_mychip_playback_ops = {
- .open = snd_mychip_pcm_open,
- .close = snd_mychip_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_mychip_pcm_hw_params,
- .hw_free = snd_mychip_pcm_hw_free,
- .prepare = snd_mychip_pcm_prepare,
- .trigger = snd_mychip_pcm_trigger,
- .pointer = snd_mychip_pcm_pointer,
- };
-]]>
- </programlisting>
- </informalexample>
-
- All the callbacks are described in the
- <link linkend="pcm-interface-operators"><citetitle>
- Operators</citetitle></link> subsection.
- </para>
-
- <para>
- After setting the operators, you probably will want to
- pre-allocate the buffer. For the pre-allocation, simply call
- the following:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
-]]>
- </programlisting>
- </informalexample>
-
- It will allocate a buffer up to 64kB as default.
- Buffer management details will be described in the later section <link
- linkend="buffer-and-memory"><citetitle>Buffer and Memory
- Management</citetitle></link>.
- </para>
-
- <para>
- Additionally, you can set some extra information for this pcm
- in pcm-&gt;info_flags.
- The available values are defined as
- <constant>SNDRV_PCM_INFO_XXX</constant> in
- <filename>&lt;sound/asound.h&gt;</filename>, which is used for
- the hardware definition (described later). When your soundchip
- supports only half-duplex, specify like this:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX;
-]]>
- </programlisting>
- </informalexample>
- </para>
- </section>
-
- <section id="pcm-interface-destructor">
- <title>... And the Destructor?</title>
- <para>
- The destructor for a pcm instance is not always
- necessary. Since the pcm device will be released by the middle
- layer code automatically, you don't have to call the destructor
- explicitly.
- </para>
-
- <para>
- The destructor would be necessary if you created
- special records internally and needed to release them. In such a
- case, set the destructor function to
- pcm-&gt;private_free:
-
- <example>
- <title>PCM Instance with a Destructor</title>
- <programlisting>
-<![CDATA[
- static void mychip_pcm_free(struct snd_pcm *pcm)
- {
- struct mychip *chip = snd_pcm_chip(pcm);
- /* free your own data */
- kfree(chip->my_private_pcm_data);
- /* do what you like else */
- ....
- }
-
- static int snd_mychip_new_pcm(struct mychip *chip)
- {
- struct snd_pcm *pcm;
- ....
- /* allocate your own data */
- chip->my_private_pcm_data = kmalloc(...);
- /* set the destructor */
- pcm->private_data = chip;
- pcm->private_free = mychip_pcm_free;
- ....
- }
-]]>
- </programlisting>
- </example>
- </para>
- </section>
-
- <section id="pcm-interface-runtime">
- <title>Runtime Pointer - The Chest of PCM Information</title>
- <para>
- When the PCM substream is opened, a PCM runtime instance is
- allocated and assigned to the substream. This pointer is
- accessible via <constant>substream-&gt;runtime</constant>.
- This runtime pointer holds most information you need
- to control the PCM: the copy of hw_params and sw_params configurations, the buffer
- pointers, mmap records, spinlocks, etc.
- </para>
-
- <para>
- The definition of runtime instance is found in
- <filename>&lt;sound/pcm.h&gt;</filename>. Here are
- the contents of this file:
- <informalexample>
- <programlisting>
-<![CDATA[
-struct _snd_pcm_runtime {
- /* -- Status -- */
- struct snd_pcm_substream *trigger_master;
- snd_timestamp_t trigger_tstamp; /* trigger timestamp */
- int overrange;
- snd_pcm_uframes_t avail_max;
- snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */
- snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time*/
-
- /* -- HW params -- */
- snd_pcm_access_t access; /* access mode */
- snd_pcm_format_t format; /* SNDRV_PCM_FORMAT_* */
- snd_pcm_subformat_t subformat; /* subformat */
- unsigned int rate; /* rate in Hz */
- unsigned int channels; /* channels */
- snd_pcm_uframes_t period_size; /* period size */
- unsigned int periods; /* periods */
- snd_pcm_uframes_t buffer_size; /* buffer size */
- unsigned int tick_time; /* tick time */
- snd_pcm_uframes_t min_align; /* Min alignment for the format */
- size_t byte_align;
- unsigned int frame_bits;
- unsigned int sample_bits;
- unsigned int info;
- unsigned int rate_num;
- unsigned int rate_den;
-
- /* -- SW params -- */
- struct timespec tstamp_mode; /* mmap timestamp is updated */
- unsigned int period_step;
- unsigned int sleep_min; /* min ticks to sleep */
- snd_pcm_uframes_t start_threshold;
- snd_pcm_uframes_t stop_threshold;
- snd_pcm_uframes_t silence_threshold; /* Silence filling happens when
- noise is nearest than this */
- snd_pcm_uframes_t silence_size; /* Silence filling size */
- snd_pcm_uframes_t boundary; /* pointers wrap point */
-
- snd_pcm_uframes_t silenced_start;
- snd_pcm_uframes_t silenced_size;
-
- snd_pcm_sync_id_t sync; /* hardware synchronization ID */
-
- /* -- mmap -- */
- volatile struct snd_pcm_mmap_status *status;
- volatile struct snd_pcm_mmap_control *control;
- atomic_t mmap_count;
-
- /* -- locking / scheduling -- */
- spinlock_t lock;
- wait_queue_head_t sleep;
- struct timer_list tick_timer;
- struct fasync_struct *fasync;
-
- /* -- private section -- */
- void *private_data;
- void (*private_free)(struct snd_pcm_runtime *runtime);
-
- /* -- hardware description -- */
- struct snd_pcm_hardware hw;
- struct snd_pcm_hw_constraints hw_constraints;
-
- /* -- timer -- */
- unsigned int timer_resolution; /* timer resolution */
-
- /* -- DMA -- */
- unsigned char *dma_area; /* DMA area */
- dma_addr_t dma_addr; /* physical bus address (not accessible from main CPU) */
- size_t dma_bytes; /* size of DMA area */
-
- struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */
-
-#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
- /* -- OSS things -- */
- struct snd_pcm_oss_runtime oss;
-#endif
-};
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- For the operators (callbacks) of each sound driver, most of
- these records are supposed to be read-only. Only the PCM
- middle-layer changes / updates them. The exceptions are
- the hardware description (hw) DMA buffer information and the
- private data. Besides, if you use the standard buffer allocation
- method via <function>snd_pcm_lib_malloc_pages()</function>,
- you don't need to set the DMA buffer information by yourself.
- </para>
-
- <para>
- In the sections below, important records are explained.
- </para>
-
- <section id="pcm-interface-runtime-hw">
- <title>Hardware Description</title>
- <para>
- The hardware descriptor (struct <structname>snd_pcm_hardware</structname>)
- contains the definitions of the fundamental hardware
- configuration. Above all, you'll need to define this in
- <link linkend="pcm-interface-operators-open-callback"><citetitle>
- the open callback</citetitle></link>.
- Note that the runtime instance holds the copy of the
- descriptor, not the pointer to the existing descriptor. That
- is, in the open callback, you can modify the copied descriptor
- (<constant>runtime-&gt;hw</constant>) as you need. For example, if the maximum
- number of channels is 1 only on some chip models, you can
- still use the same hardware descriptor and change the
- channels_max later:
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_pcm_runtime *runtime = substream->runtime;
- ...
- runtime->hw = snd_mychip_playback_hw; /* common definition */
- if (chip->model == VERY_OLD_ONE)
- runtime->hw.channels_max = 1;
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Typically, you'll have a hardware descriptor as below:
- <informalexample>
- <programlisting>
-<![CDATA[
- static struct snd_pcm_hardware snd_mychip_playback_hw = {
- .info = (SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP_VALID),
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_8000_48000,
- .rate_min = 8000,
- .rate_max = 48000,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = 32768,
- .period_bytes_min = 4096,
- .period_bytes_max = 32768,
- .periods_min = 1,
- .periods_max = 1024,
- };
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- <itemizedlist>
- <listitem><para>
- The <structfield>info</structfield> field contains the type and
- capabilities of this pcm. The bit flags are defined in
- <filename>&lt;sound/asound.h&gt;</filename> as
- <constant>SNDRV_PCM_INFO_XXX</constant>. Here, at least, you
- have to specify whether the mmap is supported and which
- interleaved format is supported.
- When the hardware supports mmap, add the
- <constant>SNDRV_PCM_INFO_MMAP</constant> flag here. When the
- hardware supports the interleaved or the non-interleaved
- formats, <constant>SNDRV_PCM_INFO_INTERLEAVED</constant> or
- <constant>SNDRV_PCM_INFO_NONINTERLEAVED</constant> flag must
- be set, respectively. If both are supported, you can set both,
- too.
- </para>
-
- <para>
- In the above example, <constant>MMAP_VALID</constant> and
- <constant>BLOCK_TRANSFER</constant> are specified for the OSS mmap
- mode. Usually both are set. Of course,
- <constant>MMAP_VALID</constant> is set only if the mmap is
- really supported.
- </para>
-
- <para>
- The other possible flags are
- <constant>SNDRV_PCM_INFO_PAUSE</constant> and
- <constant>SNDRV_PCM_INFO_RESUME</constant>. The
- <constant>PAUSE</constant> bit means that the pcm supports the
- <quote>pause</quote> operation, while the
- <constant>RESUME</constant> bit means that the pcm supports
- the full <quote>suspend/resume</quote> operation.
- If the <constant>PAUSE</constant> flag is set,
- the <structfield>trigger</structfield> callback below
- must handle the corresponding (pause push/release) commands.
- The suspend/resume trigger commands can be defined even without
- the <constant>RESUME</constant> flag. See <link
- linkend="power-management"><citetitle>
- Power Management</citetitle></link> section for details.
- </para>
-
- <para>
- When the PCM substreams can be synchronized (typically,
- synchronized start/stop of a playback and a capture streams),
- you can give <constant>SNDRV_PCM_INFO_SYNC_START</constant>,
- too. In this case, you'll need to check the linked-list of
- PCM substreams in the trigger callback. This will be
- described in the later section.
- </para>
- </listitem>
-
- <listitem>
- <para>
- <structfield>formats</structfield> field contains the bit-flags
- of supported formats (<constant>SNDRV_PCM_FMTBIT_XXX</constant>).
- If the hardware supports more than one format, give all or'ed
- bits. In the example above, the signed 16bit little-endian
- format is specified.
- </para>
- </listitem>
-
- <listitem>
- <para>
- <structfield>rates</structfield> field contains the bit-flags of
- supported rates (<constant>SNDRV_PCM_RATE_XXX</constant>).
- When the chip supports continuous rates, pass
- <constant>CONTINUOUS</constant> bit additionally.
- The pre-defined rate bits are provided only for typical
- rates. If your chip supports unconventional rates, you need to add
- the <constant>KNOT</constant> bit and set up the hardware
- constraint manually (explained later).
- </para>
- </listitem>
-
- <listitem>
- <para>
- <structfield>rate_min</structfield> and
- <structfield>rate_max</structfield> define the minimum and
- maximum sample rate. This should correspond somehow to
- <structfield>rates</structfield> bits.
- </para>
- </listitem>
-
- <listitem>
- <para>
- <structfield>channel_min</structfield> and
- <structfield>channel_max</structfield>
- define, as you might already expected, the minimum and maximum
- number of channels.
- </para>
- </listitem>
-
- <listitem>
- <para>
- <structfield>buffer_bytes_max</structfield> defines the
- maximum buffer size in bytes. There is no
- <structfield>buffer_bytes_min</structfield> field, since
- it can be calculated from the minimum period size and the
- minimum number of periods.
- Meanwhile, <structfield>period_bytes_min</structfield> and
- define the minimum and maximum size of the period in bytes.
- <structfield>periods_max</structfield> and
- <structfield>periods_min</structfield> define the maximum and
- minimum number of periods in the buffer.
- </para>
-
- <para>
- The <quote>period</quote> is a term that corresponds to
- a fragment in the OSS world. The period defines the size at
- which a PCM interrupt is generated. This size strongly
- depends on the hardware.
- Generally, the smaller period size will give you more
- interrupts, that is, more controls.
- In the case of capture, this size defines the input latency.
- On the other hand, the whole buffer size defines the
- output latency for the playback direction.
- </para>
- </listitem>
-
- <listitem>
- <para>
- There is also a field <structfield>fifo_size</structfield>.
- This specifies the size of the hardware FIFO, but currently it
- is neither used in the driver nor in the alsa-lib. So, you
- can ignore this field.
- </para>
- </listitem>
- </itemizedlist>
- </para>
- </section>
-
- <section id="pcm-interface-runtime-config">
- <title>PCM Configurations</title>
- <para>
- Ok, let's go back again to the PCM runtime records.
- The most frequently referred records in the runtime instance are
- the PCM configurations.
- The PCM configurations are stored in the runtime instance
- after the application sends <type>hw_params</type> data via
- alsa-lib. There are many fields copied from hw_params and
- sw_params structs. For example,
- <structfield>format</structfield> holds the format type
- chosen by the application. This field contains the enum value
- <constant>SNDRV_PCM_FORMAT_XXX</constant>.
- </para>
-
- <para>
- One thing to be noted is that the configured buffer and period
- sizes are stored in <quote>frames</quote> in the runtime.
- In the ALSA world, 1 frame = channels * samples-size.
- For conversion between frames and bytes, you can use the
- <function>frames_to_bytes()</function> and
- <function>bytes_to_frames()</function> helper functions.
- <informalexample>
- <programlisting>
-<![CDATA[
- period_bytes = frames_to_bytes(runtime, runtime->period_size);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Also, many software parameters (sw_params) are
- stored in frames, too. Please check the type of the field.
- <type>snd_pcm_uframes_t</type> is for the frames as unsigned
- integer while <type>snd_pcm_sframes_t</type> is for the frames
- as signed integer.
- </para>
- </section>
-
- <section id="pcm-interface-runtime-dma">
- <title>DMA Buffer Information</title>
- <para>
- The DMA buffer is defined by the following four fields,
- <structfield>dma_area</structfield>,
- <structfield>dma_addr</structfield>,
- <structfield>dma_bytes</structfield> and
- <structfield>dma_private</structfield>.
- The <structfield>dma_area</structfield> holds the buffer
- pointer (the logical address). You can call
- <function>memcpy</function> from/to
- this pointer. Meanwhile, <structfield>dma_addr</structfield>
- holds the physical address of the buffer. This field is
- specified only when the buffer is a linear buffer.
- <structfield>dma_bytes</structfield> holds the size of buffer
- in bytes. <structfield>dma_private</structfield> is used for
- the ALSA DMA allocator.
- </para>
-
- <para>
- If you use a standard ALSA function,
- <function>snd_pcm_lib_malloc_pages()</function>, for
- allocating the buffer, these fields are set by the ALSA middle
- layer, and you should <emphasis>not</emphasis> change them by
- yourself. You can read them but not write them.
- On the other hand, if you want to allocate the buffer by
- yourself, you'll need to manage it in hw_params callback.
- At least, <structfield>dma_bytes</structfield> is mandatory.
- <structfield>dma_area</structfield> is necessary when the
- buffer is mmapped. If your driver doesn't support mmap, this
- field is not necessary. <structfield>dma_addr</structfield>
- is also optional. You can use
- <structfield>dma_private</structfield> as you like, too.
- </para>
- </section>
-
- <section id="pcm-interface-runtime-status">
- <title>Running Status</title>
- <para>
- The running status can be referred via <constant>runtime-&gt;status</constant>.
- This is the pointer to the struct <structname>snd_pcm_mmap_status</structname>
- record. For example, you can get the current DMA hardware
- pointer via <constant>runtime-&gt;status-&gt;hw_ptr</constant>.
- </para>
-
- <para>
- The DMA application pointer can be referred via
- <constant>runtime-&gt;control</constant>, which points to the
- struct <structname>snd_pcm_mmap_control</structname> record.
- However, accessing directly to this value is not recommended.
- </para>
- </section>
-
- <section id="pcm-interface-runtime-private">
- <title>Private Data</title>
- <para>
- You can allocate a record for the substream and store it in
- <constant>runtime-&gt;private_data</constant>. Usually, this
- is done in
- <link linkend="pcm-interface-operators-open-callback"><citetitle>
- the open callback</citetitle></link>.
- Don't mix this with <constant>pcm-&gt;private_data</constant>.
- The <constant>pcm-&gt;private_data</constant> usually points to the
- chip instance assigned statically at the creation of PCM, while the
- <constant>runtime-&gt;private_data</constant> points to a dynamic
- data structure created at the PCM open callback.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_open(struct snd_pcm_substream *substream)
- {
- struct my_pcm_data *data;
- ....
- data = kmalloc(sizeof(*data), GFP_KERNEL);
- substream->runtime->private_data = data;
- ....
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The allocated object must be released in
- <link linkend="pcm-interface-operators-open-callback"><citetitle>
- the close callback</citetitle></link>.
- </para>
- </section>
-
- </section>
-
- <section id="pcm-interface-operators">
- <title>Operators</title>
- <para>
- OK, now let me give details about each pcm callback
- (<parameter>ops</parameter>). In general, every callback must
- return 0 if successful, or a negative error number
- such as <constant>-EINVAL</constant>. To choose an appropriate
- error number, it is advised to check what value other parts of
- the kernel return when the same kind of request fails.
- </para>
-
- <para>
- The callback function takes at least the argument with
- <structname>snd_pcm_substream</structname> pointer. To retrieve
- the chip record from the given substream instance, you can use the
- following macro.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- int xxx() {
- struct mychip *chip = snd_pcm_substream_chip(substream);
- ....
- }
-]]>
- </programlisting>
- </informalexample>
-
- The macro reads <constant>substream-&gt;private_data</constant>,
- which is a copy of <constant>pcm-&gt;private_data</constant>.
- You can override the former if you need to assign different data
- records per PCM substream. For example, the cmi8330 driver assigns
- different private_data for playback and capture directions,
- because it uses two different codecs (SB- and AD-compatible) for
- different directions.
- </para>
-
- <section id="pcm-interface-operators-open-callback">
- <title>open callback</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_open(struct snd_pcm_substream *substream);
-]]>
- </programlisting>
- </informalexample>
-
- This is called when a pcm substream is opened.
- </para>
-
- <para>
- At least, here you have to initialize the runtime-&gt;hw
- record. Typically, this is done by like this:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_open(struct snd_pcm_substream *substream)
- {
- struct mychip *chip = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- runtime->hw = snd_mychip_playback_hw;
- return 0;
- }
-]]>
- </programlisting>
- </informalexample>
-
- where <parameter>snd_mychip_playback_hw</parameter> is the
- pre-defined hardware description.
- </para>
-
- <para>
- You can allocate a private data in this callback, as described
- in <link linkend="pcm-interface-runtime-private"><citetitle>
- Private Data</citetitle></link> section.
- </para>
-
- <para>
- If the hardware configuration needs more constraints, set the
- hardware constraints here, too.
- See <link linkend="pcm-interface-constraints"><citetitle>
- Constraints</citetitle></link> for more details.
- </para>
- </section>
-
- <section id="pcm-interface-operators-close-callback">
- <title>close callback</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_close(struct snd_pcm_substream *substream);
-]]>
- </programlisting>
- </informalexample>
-
- Obviously, this is called when a pcm substream is closed.
- </para>
-
- <para>
- Any private instance for a pcm substream allocated in the
- open callback will be released here.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_close(struct snd_pcm_substream *substream)
- {
- ....
- kfree(substream->runtime->private_data);
- ....
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
- </section>
-
- <section id="pcm-interface-operators-ioctl-callback">
- <title>ioctl callback</title>
- <para>
- This is used for any special call to pcm ioctls. But
- usually you can pass a generic ioctl callback,
- <function>snd_pcm_lib_ioctl</function>.
- </para>
- </section>
-
- <section id="pcm-interface-operators-hw-params-callback">
- <title>hw_params callback</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- This is called when the hardware parameter
- (<structfield>hw_params</structfield>) is set
- up by the application,
- that is, once when the buffer size, the period size, the
- format, etc. are defined for the pcm substream.
- </para>
-
- <para>
- Many hardware setups should be done in this callback,
- including the allocation of buffers.
- </para>
-
- <para>
- Parameters to be initialized are retrieved by
- <function>params_xxx()</function> macros. To allocate
- buffer, you can call a helper function,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
-]]>
- </programlisting>
- </informalexample>
-
- <function>snd_pcm_lib_malloc_pages()</function> is available
- only when the DMA buffers have been pre-allocated.
- See the section <link
- linkend="buffer-and-memory-buffer-types"><citetitle>
- Buffer Types</citetitle></link> for more details.
- </para>
-
- <para>
- Note that this and <structfield>prepare</structfield> callbacks
- may be called multiple times per initialization.
- For example, the OSS emulation may
- call these callbacks at each change via its ioctl.
- </para>
-
- <para>
- Thus, you need to be careful not to allocate the same buffers
- many times, which will lead to memory leaks! Calling the
- helper function above many times is OK. It will release the
- previous buffer automatically when it was already allocated.
- </para>
-
- <para>
- Another note is that this callback is non-atomic
- (schedulable) as default, i.e. when no
- <structfield>nonatomic</structfield> flag set.
- This is important, because the
- <structfield>trigger</structfield> callback
- is atomic (non-schedulable). That is, mutexes or any
- schedule-related functions are not available in
- <structfield>trigger</structfield> callback.
- Please see the subsection
- <link linkend="pcm-interface-atomicity"><citetitle>
- Atomicity</citetitle></link> for details.
- </para>
- </section>
-
- <section id="pcm-interface-operators-hw-free-callback">
- <title>hw_free callback</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_hw_free(struct snd_pcm_substream *substream);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- This is called to release the resources allocated via
- <structfield>hw_params</structfield>. For example, releasing the
- buffer via
- <function>snd_pcm_lib_malloc_pages()</function> is done by
- calling the following:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_pcm_lib_free_pages(substream);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- This function is always called before the close callback is called.
- Also, the callback may be called multiple times, too.
- Keep track whether the resource was already released.
- </para>
- </section>
-
- <section id="pcm-interface-operators-prepare-callback">
- <title>prepare callback</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_prepare(struct snd_pcm_substream *substream);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- This callback is called when the pcm is
- <quote>prepared</quote>. You can set the format type, sample
- rate, etc. here. The difference from
- <structfield>hw_params</structfield> is that the
- <structfield>prepare</structfield> callback will be called each
- time
- <function>snd_pcm_prepare()</function> is called, i.e. when
- recovering after underruns, etc.
- </para>
-
- <para>
- Note that this callback is now non-atomic.
- You can use schedule-related functions safely in this callback.
- </para>
-
- <para>
- In this and the following callbacks, you can refer to the
- values via the runtime record,
- substream-&gt;runtime.
- For example, to get the current
- rate, format or channels, access to
- runtime-&gt;rate,
- runtime-&gt;format or
- runtime-&gt;channels, respectively.
- The physical address of the allocated buffer is set to
- runtime-&gt;dma_area. The buffer and period sizes are
- in runtime-&gt;buffer_size and runtime-&gt;period_size,
- respectively.
- </para>
-
- <para>
- Be careful that this callback will be called many times at
- each setup, too.
- </para>
- </section>
-
- <section id="pcm-interface-operators-trigger-callback">
- <title>trigger callback</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_trigger(struct snd_pcm_substream *substream, int cmd);
-]]>
- </programlisting>
- </informalexample>
-
- This is called when the pcm is started, stopped or paused.
- </para>
-
- <para>
- Which action is specified in the second argument,
- <constant>SNDRV_PCM_TRIGGER_XXX</constant> in
- <filename>&lt;sound/pcm.h&gt;</filename>. At least,
- the <constant>START</constant> and <constant>STOP</constant>
- commands must be defined in this callback.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- /* do something to start the PCM engine */
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- /* do something to stop the PCM engine */
- break;
- default:
- return -EINVAL;
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- When the pcm supports the pause operation (given in the info
- field of the hardware table), the <constant>PAUSE_PUSH</constant>
- and <constant>PAUSE_RELEASE</constant> commands must be
- handled here, too. The former is the command to pause the pcm,
- and the latter to restart the pcm again.
- </para>
-
- <para>
- When the pcm supports the suspend/resume operation,
- regardless of full or partial suspend/resume support,
- the <constant>SUSPEND</constant> and <constant>RESUME</constant>
- commands must be handled, too.
- These commands are issued when the power-management status is
- changed. Obviously, the <constant>SUSPEND</constant> and
- <constant>RESUME</constant> commands
- suspend and resume the pcm substream, and usually, they
- are identical to the <constant>STOP</constant> and
- <constant>START</constant> commands, respectively.
- See the <link linkend="power-management"><citetitle>
- Power Management</citetitle></link> section for details.
- </para>
-
- <para>
- As mentioned, this callback is atomic as default unless
- <structfield>nonatomic</structfield> flag set, and
- you cannot call functions which may sleep.
- The trigger callback should be as minimal as possible,
- just really triggering the DMA. The other stuff should be
- initialized hw_params and prepare callbacks properly
- beforehand.
- </para>
- </section>
-
- <section id="pcm-interface-operators-pointer-callback">
- <title>pointer callback</title>
- <para>
- <informalexample>
- <programlisting>
-<![CDATA[
- static snd_pcm_uframes_t snd_xxx_pointer(struct snd_pcm_substream *substream)
-]]>
- </programlisting>
- </informalexample>
-
- This callback is called when the PCM middle layer inquires
- the current hardware position on the buffer. The position must
- be returned in frames,
- ranging from 0 to buffer_size - 1.
- </para>
-
- <para>
- This is called usually from the buffer-update routine in the
- pcm middle layer, which is invoked when
- <function>snd_pcm_period_elapsed()</function> is called in the
- interrupt routine. Then the pcm middle layer updates the
- position and calculates the available space, and wakes up the
- sleeping poll threads, etc.
- </para>
-
- <para>
- This callback is also atomic as default.
- </para>
- </section>
-
- <section id="pcm-interface-operators-copy-silence">
- <title>copy and silence callbacks</title>
- <para>
- These callbacks are not mandatory, and can be omitted in
- most cases. These callbacks are used when the hardware buffer
- cannot be in the normal memory space. Some chips have their
- own buffer on the hardware which is not mappable. In such a
- case, you have to transfer the data manually from the memory
- buffer to the hardware buffer. Or, if the buffer is
- non-contiguous on both physical and virtual memory spaces,
- these callbacks must be defined, too.
- </para>
-
- <para>
- If these two callbacks are defined, copy and set-silence
- operations are done by them. The detailed will be described in
- the later section <link
- linkend="buffer-and-memory"><citetitle>Buffer and Memory
- Management</citetitle></link>.
- </para>
- </section>
-
- <section id="pcm-interface-operators-ack">
- <title>ack callback</title>
- <para>
- This callback is also not mandatory. This callback is called
- when the appl_ptr is updated in read or write operations.
- Some drivers like emu10k1-fx and cs46xx need to track the
- current appl_ptr for the internal buffer, and this callback
- is useful only for such a purpose.
- </para>
- <para>
- This callback is atomic as default.
- </para>
- </section>
-
- <section id="pcm-interface-operators-page-callback">
- <title>page callback</title>
-
- <para>
- This callback is optional too. This callback is used
- mainly for non-contiguous buffers. The mmap calls this
- callback to get the page address. Some examples will be
- explained in the later section <link
- linkend="buffer-and-memory"><citetitle>Buffer and Memory
- Management</citetitle></link>, too.
- </para>
- </section>
- </section>
-
- <section id="pcm-interface-interrupt-handler">
- <title>Interrupt Handler</title>
- <para>
- The rest of pcm stuff is the PCM interrupt handler. The
- role of PCM interrupt handler in the sound driver is to update
- the buffer position and to tell the PCM middle layer when the
- buffer position goes across the prescribed period size. To
- inform this, call the <function>snd_pcm_period_elapsed()</function>
- function.
- </para>
-
- <para>
- There are several types of sound chips to generate the interrupts.
- </para>
-
- <section id="pcm-interface-interrupt-handler-boundary">
- <title>Interrupts at the period (fragment) boundary</title>
- <para>
- This is the most frequently found type: the hardware
- generates an interrupt at each period boundary.
- In this case, you can call
- <function>snd_pcm_period_elapsed()</function> at each
- interrupt.
- </para>
-
- <para>
- <function>snd_pcm_period_elapsed()</function> takes the
- substream pointer as its argument. Thus, you need to keep the
- substream pointer accessible from the chip instance. For
- example, define substream field in the chip record to hold the
- current running substream pointer, and set the pointer value
- at open callback (and reset at close callback).
- </para>
-
- <para>
- If you acquire a spinlock in the interrupt handler, and the
- lock is used in other pcm callbacks, too, then you have to
- release the lock before calling
- <function>snd_pcm_period_elapsed()</function>, because
- <function>snd_pcm_period_elapsed()</function> calls other pcm
- callbacks inside.
- </para>
-
- <para>
- Typical code would be like:
-
- <example>
- <title>Interrupt Handler Case #1</title>
- <programlisting>
-<![CDATA[
- static irqreturn_t snd_mychip_interrupt(int irq, void *dev_id)
- {
- struct mychip *chip = dev_id;
- spin_lock(&chip->lock);
- ....
- if (pcm_irq_invoked(chip)) {
- /* call updater, unlock before it */
- spin_unlock(&chip->lock);
- snd_pcm_period_elapsed(chip->substream);
- spin_lock(&chip->lock);
- /* acknowledge the interrupt if necessary */
- }
- ....
- spin_unlock(&chip->lock);
- return IRQ_HANDLED;
- }
-]]>
- </programlisting>
- </example>
- </para>
- </section>
-
- <section id="pcm-interface-interrupt-handler-timer">
- <title>High frequency timer interrupts</title>
- <para>
- This happens when the hardware doesn't generate interrupts
- at the period boundary but issues timer interrupts at a fixed
- timer rate (e.g. es1968 or ymfpci drivers).
- In this case, you need to check the current hardware
- position and accumulate the processed sample length at each
- interrupt. When the accumulated size exceeds the period
- size, call
- <function>snd_pcm_period_elapsed()</function> and reset the
- accumulator.
- </para>
-
- <para>
- Typical code would be like the following.
-
- <example>
- <title>Interrupt Handler Case #2</title>
- <programlisting>
-<![CDATA[
- static irqreturn_t snd_mychip_interrupt(int irq, void *dev_id)
- {
- struct mychip *chip = dev_id;
- spin_lock(&chip->lock);
- ....
- if (pcm_irq_invoked(chip)) {
- unsigned int last_ptr, size;
- /* get the current hardware pointer (in frames) */
- last_ptr = get_hw_ptr(chip);
- /* calculate the processed frames since the
- * last update
- */
- if (last_ptr < chip->last_ptr)
- size = runtime->buffer_size + last_ptr
- - chip->last_ptr;
- else
- size = last_ptr - chip->last_ptr;
- /* remember the last updated point */
- chip->last_ptr = last_ptr;
- /* accumulate the size */
- chip->size += size;
- /* over the period boundary? */
- if (chip->size >= runtime->period_size) {
- /* reset the accumulator */
- chip->size %= runtime->period_size;
- /* call updater */
- spin_unlock(&chip->lock);
- snd_pcm_period_elapsed(substream);
- spin_lock(&chip->lock);
- }
- /* acknowledge the interrupt if necessary */
- }
- ....
- spin_unlock(&chip->lock);
- return IRQ_HANDLED;
- }
-]]>
- </programlisting>
- </example>
- </para>
- </section>
-
- <section id="pcm-interface-interrupt-handler-both">
- <title>On calling <function>snd_pcm_period_elapsed()</function></title>
- <para>
- In both cases, even if more than one period are elapsed, you
- don't have to call
- <function>snd_pcm_period_elapsed()</function> many times. Call
- only once. And the pcm layer will check the current hardware
- pointer and update to the latest status.
- </para>
- </section>
- </section>
-
- <section id="pcm-interface-atomicity">
- <title>Atomicity</title>
- <para>
- One of the most important (and thus difficult to debug) problems
- in kernel programming are race conditions.
- In the Linux kernel, they are usually avoided via spin-locks, mutexes
- or semaphores. In general, if a race condition can happen
- in an interrupt handler, it has to be managed atomically, and you
- have to use a spinlock to protect the critical session. If the
- critical section is not in interrupt handler code and
- if taking a relatively long time to execute is acceptable, you
- should use mutexes or semaphores instead.
- </para>
-
- <para>
- As already seen, some pcm callbacks are atomic and some are
- not. For example, the <parameter>hw_params</parameter> callback is
- non-atomic, while <parameter>trigger</parameter> callback is
- atomic. This means, the latter is called already in a spinlock
- held by the PCM middle layer. Please take this atomicity into
- account when you choose a locking scheme in the callbacks.
- </para>
-
- <para>
- In the atomic callbacks, you cannot use functions which may call
- <function>schedule</function> or go to
- <function>sleep</function>. Semaphores and mutexes can sleep,
- and hence they cannot be used inside the atomic callbacks
- (e.g. <parameter>trigger</parameter> callback).
- To implement some delay in such a callback, please use
- <function>udelay()</function> or <function>mdelay()</function>.
- </para>
-
- <para>
- All three atomic callbacks (trigger, pointer, and ack) are
- called with local interrupts disabled.
- </para>
-
- <para>
- The recent changes in PCM core code, however, allow all PCM
- operations to be non-atomic. This assumes that the all caller
- sides are in non-atomic contexts. For example, the function
- <function>snd_pcm_period_elapsed()</function> is called
- typically from the interrupt handler. But, if you set up the
- driver to use a threaded interrupt handler, this call can be in
- non-atomic context, too. In such a case, you can set
- <structfield>nonatomic</structfield> filed of
- <structname>snd_pcm</structname> object after creating it.
- When this flag is set, mutex and rwsem are used internally in
- the PCM core instead of spin and rwlocks, so that you can call
- all PCM functions safely in a non-atomic context.
- </para>
-
- </section>
- <section id="pcm-interface-constraints">
- <title>Constraints</title>
- <para>
- If your chip supports unconventional sample rates, or only the
- limited samples, you need to set a constraint for the
- condition.
- </para>
-
- <para>
- For example, in order to restrict the sample rates in the some
- supported values, use
- <function>snd_pcm_hw_constraint_list()</function>.
- You need to call this function in the open callback.
-
- <example>
- <title>Example of Hardware Constraints</title>
- <programlisting>
-<![CDATA[
- static unsigned int rates[] =
- {4000, 10000, 22050, 44100};
- static struct snd_pcm_hw_constraint_list constraints_rates = {
- .count = ARRAY_SIZE(rates),
- .list = rates,
- .mask = 0,
- };
-
- static int snd_mychip_pcm_open(struct snd_pcm_substream *substream)
- {
- int err;
- ....
- err = snd_pcm_hw_constraint_list(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_RATE,
- &constraints_rates);
- if (err < 0)
- return err;
- ....
- }
-]]>
- </programlisting>
- </example>
- </para>
-
- <para>
- There are many different constraints.
- Look at <filename>sound/pcm.h</filename> for a complete list.
- You can even define your own constraint rules.
- For example, let's suppose my_chip can manage a substream of 1 channel
- if and only if the format is S16_LE, otherwise it supports any format
- specified in the <structname>snd_pcm_hardware</structname> structure (or in any
- other constraint_list). You can build a rule like this:
-
- <example>
- <title>Example of Hardware Constraints for Channels</title>
- <programlisting>
-<![CDATA[
- static int hw_rule_channels_by_format(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
- {
- struct snd_interval *c = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_CHANNELS);
- struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
- struct snd_interval ch;
-
- snd_interval_any(&ch);
- if (f->bits[0] == SNDRV_PCM_FMTBIT_S16_LE) {
- ch.min = ch.max = 1;
- ch.integer = 1;
- return snd_interval_refine(c, &ch);
- }
- return 0;
- }
-]]>
- </programlisting>
- </example>
- </para>
-
- <para>
- Then you need to call this function to add your rule:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- hw_rule_channels_by_format, NULL,
- SNDRV_PCM_HW_PARAM_FORMAT, -1);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The rule function is called when an application sets the PCM
- format, and it refines the number of channels accordingly.
- But an application may set the number of channels before
- setting the format. Thus you also need to define the inverse rule:
-
- <example>
- <title>Example of Hardware Constraints for Formats</title>
- <programlisting>
-<![CDATA[
- static int hw_rule_format_by_channels(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
- {
- struct snd_interval *c = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_CHANNELS);
- struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
- struct snd_mask fmt;
-
- snd_mask_any(&fmt); /* Init the struct */
- if (c->min < 2) {
- fmt.bits[0] &= SNDRV_PCM_FMTBIT_S16_LE;
- return snd_mask_refine(f, &fmt);
- }
- return 0;
- }
-]]>
- </programlisting>
- </example>
- </para>
-
- <para>
- ...and in the open callback:
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
- hw_rule_format_by_channels, NULL,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- I won't give more details here, rather I
- would like to say, <quote>Luke, use the source.</quote>
- </para>
- </section>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- Control Interface -->
-<!-- ****************************************************** -->
- <chapter id="control-interface">
- <title>Control Interface</title>
-
- <section id="control-interface-general">
- <title>General</title>
- <para>
- The control interface is used widely for many switches,
- sliders, etc. which are accessed from user-space. Its most
- important use is the mixer interface. In other words, since ALSA
- 0.9.x, all the mixer stuff is implemented on the control kernel API.
- </para>
-
- <para>
- ALSA has a well-defined AC97 control module. If your chip
- supports only the AC97 and nothing else, you can skip this
- section.
- </para>
-
- <para>
- The control API is defined in
- <filename>&lt;sound/control.h&gt;</filename>.
- Include this file if you want to add your own controls.
- </para>
- </section>
-
- <section id="control-interface-definition">
- <title>Definition of Controls</title>
- <para>
- To create a new control, you need to define the
- following three
- callbacks: <structfield>info</structfield>,
- <structfield>get</structfield> and
- <structfield>put</structfield>. Then, define a
- struct <structname>snd_kcontrol_new</structname> record, such as:
-
- <example>
- <title>Definition of a Control</title>
- <programlisting>
-<![CDATA[
- static struct snd_kcontrol_new my_control = {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Switch",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .private_value = 0xffff,
- .info = my_control_info,
- .get = my_control_get,
- .put = my_control_put
- };
-]]>
- </programlisting>
- </example>
- </para>
-
- <para>
- The <structfield>iface</structfield> field specifies the control
- type, <constant>SNDRV_CTL_ELEM_IFACE_XXX</constant>, which
- is usually <constant>MIXER</constant>.
- Use <constant>CARD</constant> for global controls that are not
- logically part of the mixer.
- If the control is closely associated with some specific device on
- the sound card, use <constant>HWDEP</constant>,
- <constant>PCM</constant>, <constant>RAWMIDI</constant>,
- <constant>TIMER</constant>, or <constant>SEQUENCER</constant>, and
- specify the device number with the
- <structfield>device</structfield> and
- <structfield>subdevice</structfield> fields.
- </para>
-
- <para>
- The <structfield>name</structfield> is the name identifier
- string. Since ALSA 0.9.x, the control name is very important,
- because its role is classified from its name. There are
- pre-defined standard control names. The details are described in
- the <link linkend="control-interface-control-names"><citetitle>
- Control Names</citetitle></link> subsection.
- </para>
-
- <para>
- The <structfield>index</structfield> field holds the index number
- of this control. If there are several different controls with
- the same name, they can be distinguished by the index
- number. This is the case when
- several codecs exist on the card. If the index is zero, you can
- omit the definition above.
- </para>
-
- <para>
- The <structfield>access</structfield> field contains the access
- type of this control. Give the combination of bit masks,
- <constant>SNDRV_CTL_ELEM_ACCESS_XXX</constant>, there.
- The details will be explained in
- the <link linkend="control-interface-access-flags"><citetitle>
- Access Flags</citetitle></link> subsection.
- </para>
-
- <para>
- The <structfield>private_value</structfield> field contains
- an arbitrary long integer value for this record. When using
- the generic <structfield>info</structfield>,
- <structfield>get</structfield> and
- <structfield>put</structfield> callbacks, you can pass a value
- through this field. If several small numbers are necessary, you can
- combine them in bitwise. Or, it's possible to give a pointer
- (casted to unsigned long) of some record to this field, too.
- </para>
-
- <para>
- The <structfield>tlv</structfield> field can be used to provide
- metadata about the control; see the
- <link linkend="control-interface-tlv">
- <citetitle>Metadata</citetitle></link> subsection.
- </para>
-
- <para>
- The other three are
- <link linkend="control-interface-callbacks"><citetitle>
- callback functions</citetitle></link>.
- </para>
- </section>
-
- <section id="control-interface-control-names">
- <title>Control Names</title>
- <para>
- There are some standards to define the control names. A
- control is usually defined from the three parts as
- <quote>SOURCE DIRECTION FUNCTION</quote>.
- </para>
-
- <para>
- The first, <constant>SOURCE</constant>, specifies the source
- of the control, and is a string such as <quote>Master</quote>,
- <quote>PCM</quote>, <quote>CD</quote> and
- <quote>Line</quote>. There are many pre-defined sources.
- </para>
-
- <para>
- The second, <constant>DIRECTION</constant>, is one of the
- following strings according to the direction of the control:
- <quote>Playback</quote>, <quote>Capture</quote>, <quote>Bypass
- Playback</quote> and <quote>Bypass Capture</quote>. Or, it can
- be omitted, meaning both playback and capture directions.
- </para>
-
- <para>
- The third, <constant>FUNCTION</constant>, is one of the
- following strings according to the function of the control:
- <quote>Switch</quote>, <quote>Volume</quote> and
- <quote>Route</quote>.
- </para>
-
- <para>
- The example of control names are, thus, <quote>Master Capture
- Switch</quote> or <quote>PCM Playback Volume</quote>.
- </para>
-
- <para>
- There are some exceptions:
- </para>
-
- <section id="control-interface-control-names-global">
- <title>Global capture and playback</title>
- <para>
- <quote>Capture Source</quote>, <quote>Capture Switch</quote>
- and <quote>Capture Volume</quote> are used for the global
- capture (input) source, switch and volume. Similarly,
- <quote>Playback Switch</quote> and <quote>Playback
- Volume</quote> are used for the global output gain switch and
- volume.
- </para>
- </section>
-
- <section id="control-interface-control-names-tone">
- <title>Tone-controls</title>
- <para>
- tone-control switch and volumes are specified like
- <quote>Tone Control - XXX</quote>, e.g. <quote>Tone Control -
- Switch</quote>, <quote>Tone Control - Bass</quote>,
- <quote>Tone Control - Center</quote>.
- </para>
- </section>
-
- <section id="control-interface-control-names-3d">
- <title>3D controls</title>
- <para>
- 3D-control switches and volumes are specified like <quote>3D
- Control - XXX</quote>, e.g. <quote>3D Control -
- Switch</quote>, <quote>3D Control - Center</quote>, <quote>3D
- Control - Space</quote>.
- </para>
- </section>
-
- <section id="control-interface-control-names-mic">
- <title>Mic boost</title>
- <para>
- Mic-boost switch is set as <quote>Mic Boost</quote> or
- <quote>Mic Boost (6dB)</quote>.
- </para>
-
- <para>
- More precise information can be found in
- <filename>Documentation/sound/alsa/ControlNames.txt</filename>.
- </para>
- </section>
- </section>
-
- <section id="control-interface-access-flags">
- <title>Access Flags</title>
-
- <para>
- The access flag is the bitmask which specifies the access type
- of the given control. The default access type is
- <constant>SNDRV_CTL_ELEM_ACCESS_READWRITE</constant>,
- which means both read and write are allowed to this control.
- When the access flag is omitted (i.e. = 0), it is
- considered as <constant>READWRITE</constant> access as default.
- </para>
-
- <para>
- When the control is read-only, pass
- <constant>SNDRV_CTL_ELEM_ACCESS_READ</constant> instead.
- In this case, you don't have to define
- the <structfield>put</structfield> callback.
- Similarly, when the control is write-only (although it's a rare
- case), you can use the <constant>WRITE</constant> flag instead, and
- you don't need the <structfield>get</structfield> callback.
- </para>
-
- <para>
- If the control value changes frequently (e.g. the VU meter),
- <constant>VOLATILE</constant> flag should be given. This means
- that the control may be changed without
- <link linkend="control-interface-change-notification"><citetitle>
- notification</citetitle></link>. Applications should poll such
- a control constantly.
- </para>
-
- <para>
- When the control is inactive, set
- the <constant>INACTIVE</constant> flag, too.
- There are <constant>LOCK</constant> and
- <constant>OWNER</constant> flags to change the write
- permissions.
- </para>
-
- </section>
-
- <section id="control-interface-callbacks">
- <title>Callbacks</title>
-
- <section id="control-interface-callbacks-info">
- <title>info callback</title>
- <para>
- The <structfield>info</structfield> callback is used to get
- detailed information on this control. This must store the
- values of the given struct <structname>snd_ctl_elem_info</structname>
- object. For example, for a boolean control with a single
- element:
-
- <example>
- <title>Example of info callback</title>
- <programlisting>
-<![CDATA[
- static int snd_myctl_mono_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
- {
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
- uinfo->count = 1;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = 1;
- return 0;
- }
-]]>
- </programlisting>
- </example>
- </para>
-
- <para>
- The <structfield>type</structfield> field specifies the type
- of the control. There are <constant>BOOLEAN</constant>,
- <constant>INTEGER</constant>, <constant>ENUMERATED</constant>,
- <constant>BYTES</constant>, <constant>IEC958</constant> and
- <constant>INTEGER64</constant>. The
- <structfield>count</structfield> field specifies the
- number of elements in this control. For example, a stereo
- volume would have count = 2. The
- <structfield>value</structfield> field is a union, and
- the values stored are depending on the type. The boolean and
- integer types are identical.
- </para>
-
- <para>
- The enumerated type is a bit different from others. You'll
- need to set the string for the currently given item index.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_myctl_enum_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
- {
- static char *texts[4] = {
- "First", "Second", "Third", "Fourth"
- };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3)
- uinfo->value.enumerated.item = 3;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The above callback can be simplified with a helper function,
- <function>snd_ctl_enum_info</function>. The final code
- looks like below.
- (You can pass ARRAY_SIZE(texts) instead of 4 in the third
- argument; it's a matter of taste.)
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_myctl_enum_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
- {
- static char *texts[4] = {
- "First", "Second", "Third", "Fourth"
- };
- return snd_ctl_enum_info(uinfo, 1, 4, texts);
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Some common info callbacks are available for your convenience:
- <function>snd_ctl_boolean_mono_info()</function> and
- <function>snd_ctl_boolean_stereo_info()</function>.
- Obviously, the former is an info callback for a mono channel
- boolean item, just like <function>snd_myctl_mono_info</function>
- above, and the latter is for a stereo channel boolean item.
- </para>
-
- </section>
-
- <section id="control-interface-callbacks-get">
- <title>get callback</title>
-
- <para>
- This callback is used to read the current value of the
- control and to return to user-space.
- </para>
-
- <para>
- For example,
-
- <example>
- <title>Example of get callback</title>
- <programlisting>
-<![CDATA[
- static int snd_myctl_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
- {
- struct mychip *chip = snd_kcontrol_chip(kcontrol);
- ucontrol->value.integer.value[0] = get_some_value(chip);
- return 0;
- }
-]]>
- </programlisting>
- </example>
- </para>
-
- <para>
- The <structfield>value</structfield> field depends on
- the type of control as well as on the info callback. For example,
- the sb driver uses this field to store the register offset,
- the bit-shift and the bit-mask. The
- <structfield>private_value</structfield> field is set as follows:
- <informalexample>
- <programlisting>
-<![CDATA[
- .private_value = reg | (shift << 16) | (mask << 24)
-]]>
- </programlisting>
- </informalexample>
- and is retrieved in callbacks like
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_sbmixer_get_single(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
- {
- int reg = kcontrol->private_value & 0xff;
- int shift = (kcontrol->private_value >> 16) & 0xff;
- int mask = (kcontrol->private_value >> 24) & 0xff;
- ....
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- In the <structfield>get</structfield> callback,
- you have to fill all the elements if the
- control has more than one elements,
- i.e. <structfield>count</structfield> &gt; 1.
- In the example above, we filled only one element
- (<structfield>value.integer.value[0]</structfield>) since it's
- assumed as <structfield>count</structfield> = 1.
- </para>
- </section>
-
- <section id="control-interface-callbacks-put">
- <title>put callback</title>
-
- <para>
- This callback is used to write a value from user-space.
- </para>
-
- <para>
- For example,
-
- <example>
- <title>Example of put callback</title>
- <programlisting>
-<![CDATA[
- static int snd_myctl_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
- {
- struct mychip *chip = snd_kcontrol_chip(kcontrol);
- int changed = 0;
- if (chip->current_value !=
- ucontrol->value.integer.value[0]) {
- change_current_value(chip,
- ucontrol->value.integer.value[0]);
- changed = 1;
- }
- return changed;
- }
-]]>
- </programlisting>
- </example>
-
- As seen above, you have to return 1 if the value is
- changed. If the value is not changed, return 0 instead.
- If any fatal error happens, return a negative error code as
- usual.
- </para>
-
- <para>
- As in the <structfield>get</structfield> callback,
- when the control has more than one elements,
- all elements must be evaluated in this callback, too.
- </para>
- </section>
-
- <section id="control-interface-callbacks-all">
- <title>Callbacks are not atomic</title>
- <para>
- All these three callbacks are basically not atomic.
- </para>
- </section>
- </section>
-
- <section id="control-interface-constructor">
- <title>Constructor</title>
- <para>
- When everything is ready, finally we can create a new
- control. To create a control, there are two functions to be
- called, <function>snd_ctl_new1()</function> and
- <function>snd_ctl_add()</function>.
- </para>
-
- <para>
- In the simplest way, you can do like this:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- err = snd_ctl_add(card, snd_ctl_new1(&my_control, chip));
- if (err < 0)
- return err;
-]]>
- </programlisting>
- </informalexample>
-
- where <parameter>my_control</parameter> is the
- struct <structname>snd_kcontrol_new</structname> object defined above, and chip
- is the object pointer to be passed to
- kcontrol-&gt;private_data
- which can be referred to in callbacks.
- </para>
-
- <para>
- <function>snd_ctl_new1()</function> allocates a new
- <structname>snd_kcontrol</structname> instance,
- and <function>snd_ctl_add</function> assigns the given
- control component to the card.
- </para>
- </section>
-
- <section id="control-interface-change-notification">
- <title>Change Notification</title>
- <para>
- If you need to change and update a control in the interrupt
- routine, you can call <function>snd_ctl_notify()</function>. For
- example,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, id_pointer);
-]]>
- </programlisting>
- </informalexample>
-
- This function takes the card pointer, the event-mask, and the
- control id pointer for the notification. The event-mask
- specifies the types of notification, for example, in the above
- example, the change of control values is notified.
- The id pointer is the pointer of struct <structname>snd_ctl_elem_id</structname>
- to be notified.
- You can find some examples in <filename>es1938.c</filename> or
- <filename>es1968.c</filename> for hardware volume interrupts.
- </para>
- </section>
-
- <section id="control-interface-tlv">
- <title>Metadata</title>
- <para>
- To provide information about the dB values of a mixer control, use
- on of the <constant>DECLARE_TLV_xxx</constant> macros from
- <filename>&lt;sound/tlv.h&gt;</filename> to define a variable
- containing this information, set the<structfield>tlv.p
- </structfield> field to point to this variable, and include the
- <constant>SNDRV_CTL_ELEM_ACCESS_TLV_READ</constant> flag in the
- <structfield>access</structfield> field; like this:
- <informalexample>
- <programlisting>
-<![CDATA[
- static DECLARE_TLV_DB_SCALE(db_scale_my_control, -4050, 150, 0);
-
- static struct snd_kcontrol_new my_control = {
- ...
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
- SNDRV_CTL_ELEM_ACCESS_TLV_READ,
- ...
- .tlv.p = db_scale_my_control,
- };
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The <function>DECLARE_TLV_DB_SCALE</function> macro defines
- information about a mixer control where each step in the control's
- value changes the dB value by a constant dB amount.
- The first parameter is the name of the variable to be defined.
- The second parameter is the minimum value, in units of 0.01 dB.
- The third parameter is the step size, in units of 0.01 dB.
- Set the fourth parameter to 1 if the minimum value actually mutes
- the control.
- </para>
-
- <para>
- The <function>DECLARE_TLV_DB_LINEAR</function> macro defines
- information about a mixer control where the control's value affects
- the output linearly.
- The first parameter is the name of the variable to be defined.
- The second parameter is the minimum value, in units of 0.01 dB.
- The third parameter is the maximum value, in units of 0.01 dB.
- If the minimum value mutes the control, set the second parameter to
- <constant>TLV_DB_GAIN_MUTE</constant>.
- </para>
- </section>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- API for AC97 Codec -->
-<!-- ****************************************************** -->
- <chapter id="api-ac97">
- <title>API for AC97 Codec</title>
-
- <section>
- <title>General</title>
- <para>
- The ALSA AC97 codec layer is a well-defined one, and you don't
- have to write much code to control it. Only low-level control
- routines are necessary. The AC97 codec API is defined in
- <filename>&lt;sound/ac97_codec.h&gt;</filename>.
- </para>
- </section>
-
- <section id="api-ac97-example">
- <title>Full Code Example</title>
- <para>
- <example>
- <title>Example of AC97 Interface</title>
- <programlisting>
-<![CDATA[
- struct mychip {
- ....
- struct snd_ac97 *ac97;
- ....
- };
-
- static unsigned short snd_mychip_ac97_read(struct snd_ac97 *ac97,
- unsigned short reg)
- {
- struct mychip *chip = ac97->private_data;
- ....
- /* read a register value here from the codec */
- return the_register_value;
- }
-
- static void snd_mychip_ac97_write(struct snd_ac97 *ac97,
- unsigned short reg, unsigned short val)
- {
- struct mychip *chip = ac97->private_data;
- ....
- /* write the given register value to the codec */
- }
-
- static int snd_mychip_ac97(struct mychip *chip)
- {
- struct snd_ac97_bus *bus;
- struct snd_ac97_template ac97;
- int err;
- static struct snd_ac97_bus_ops ops = {
- .write = snd_mychip_ac97_write,
- .read = snd_mychip_ac97_read,
- };
-
- err = snd_ac97_bus(chip->card, 0, &ops, NULL, &bus);
- if (err < 0)
- return err;
- memset(&ac97, 0, sizeof(ac97));
- ac97.private_data = chip;
- return snd_ac97_mixer(bus, &ac97, &chip->ac97);
- }
-
-]]>
- </programlisting>
- </example>
- </para>
- </section>
-
- <section id="api-ac97-constructor">
- <title>Constructor</title>
- <para>
- To create an ac97 instance, first call <function>snd_ac97_bus</function>
- with an <type>ac97_bus_ops_t</type> record with callback functions.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_ac97_bus *bus;
- static struct snd_ac97_bus_ops ops = {
- .write = snd_mychip_ac97_write,
- .read = snd_mychip_ac97_read,
- };
-
- snd_ac97_bus(card, 0, &ops, NULL, &pbus);
-]]>
- </programlisting>
- </informalexample>
-
- The bus record is shared among all belonging ac97 instances.
- </para>
-
- <para>
- And then call <function>snd_ac97_mixer()</function> with an
- struct <structname>snd_ac97_template</structname>
- record together with the bus pointer created above.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_ac97_template ac97;
- int err;
-
- memset(&ac97, 0, sizeof(ac97));
- ac97.private_data = chip;
- snd_ac97_mixer(bus, &ac97, &chip->ac97);
-]]>
- </programlisting>
- </informalexample>
-
- where chip-&gt;ac97 is a pointer to a newly created
- <type>ac97_t</type> instance.
- In this case, the chip pointer is set as the private data, so that
- the read/write callback functions can refer to this chip instance.
- This instance is not necessarily stored in the chip
- record. If you need to change the register values from the
- driver, or need the suspend/resume of ac97 codecs, keep this
- pointer to pass to the corresponding functions.
- </para>
- </section>
-
- <section id="api-ac97-callbacks">
- <title>Callbacks</title>
- <para>
- The standard callbacks are <structfield>read</structfield> and
- <structfield>write</structfield>. Obviously they
- correspond to the functions for read and write accesses to the
- hardware low-level codes.
- </para>
-
- <para>
- The <structfield>read</structfield> callback returns the
- register value specified in the argument.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static unsigned short snd_mychip_ac97_read(struct snd_ac97 *ac97,
- unsigned short reg)
- {
- struct mychip *chip = ac97->private_data;
- ....
- return the_register_value;
- }
-]]>
- </programlisting>
- </informalexample>
-
- Here, the chip can be cast from ac97-&gt;private_data.
- </para>
-
- <para>
- Meanwhile, the <structfield>write</structfield> callback is
- used to set the register value.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static void snd_mychip_ac97_write(struct snd_ac97 *ac97,
- unsigned short reg, unsigned short val)
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- These callbacks are non-atomic like the control API callbacks.
- </para>
-
- <para>
- There are also other callbacks:
- <structfield>reset</structfield>,
- <structfield>wait</structfield> and
- <structfield>init</structfield>.
- </para>
-
- <para>
- The <structfield>reset</structfield> callback is used to reset
- the codec. If the chip requires a special kind of reset, you can
- define this callback.
- </para>
-
- <para>
- The <structfield>wait</structfield> callback is used to
- add some waiting time in the standard initialization of the codec. If the
- chip requires the extra waiting time, define this callback.
- </para>
-
- <para>
- The <structfield>init</structfield> callback is used for
- additional initialization of the codec.
- </para>
- </section>
-
- <section id="api-ac97-updating-registers">
- <title>Updating Registers in The Driver</title>
- <para>
- If you need to access to the codec from the driver, you can
- call the following functions:
- <function>snd_ac97_write()</function>,
- <function>snd_ac97_read()</function>,
- <function>snd_ac97_update()</function> and
- <function>snd_ac97_update_bits()</function>.
- </para>
-
- <para>
- Both <function>snd_ac97_write()</function> and
- <function>snd_ac97_update()</function> functions are used to
- set a value to the given register
- (<constant>AC97_XXX</constant>). The difference between them is
- that <function>snd_ac97_update()</function> doesn't write a
- value if the given value has been already set, while
- <function>snd_ac97_write()</function> always rewrites the
- value.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_ac97_write(ac97, AC97_MASTER, 0x8080);
- snd_ac97_update(ac97, AC97_MASTER, 0x8080);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- <function>snd_ac97_read()</function> is used to read the value
- of the given register. For example,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- value = snd_ac97_read(ac97, AC97_MASTER);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- <function>snd_ac97_update_bits()</function> is used to update
- some bits in the given register.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_ac97_update_bits(ac97, reg, mask, value);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Also, there is a function to change the sample rate (of a
- given register such as
- <constant>AC97_PCM_FRONT_DAC_RATE</constant>) when VRA or
- DRA is supported by the codec:
- <function>snd_ac97_set_rate()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_ac97_set_rate(ac97, AC97_PCM_FRONT_DAC_RATE, 44100);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The following registers are available to set the rate:
- <constant>AC97_PCM_MIC_ADC_RATE</constant>,
- <constant>AC97_PCM_FRONT_DAC_RATE</constant>,
- <constant>AC97_PCM_LR_ADC_RATE</constant>,
- <constant>AC97_SPDIF</constant>. When
- <constant>AC97_SPDIF</constant> is specified, the register is
- not really changed but the corresponding IEC958 status bits will
- be updated.
- </para>
- </section>
-
- <section id="api-ac97-clock-adjustment">
- <title>Clock Adjustment</title>
- <para>
- In some chips, the clock of the codec isn't 48000 but using a
- PCI clock (to save a quartz!). In this case, change the field
- bus-&gt;clock to the corresponding
- value. For example, intel8x0
- and es1968 drivers have their own function to read from the clock.
- </para>
- </section>
-
- <section id="api-ac97-proc-files">
- <title>Proc Files</title>
- <para>
- The ALSA AC97 interface will create a proc file such as
- <filename>/proc/asound/card0/codec97#0/ac97#0-0</filename> and
- <filename>ac97#0-0+regs</filename>. You can refer to these files to
- see the current status and registers of the codec.
- </para>
- </section>
-
- <section id="api-ac97-multiple-codecs">
- <title>Multiple Codecs</title>
- <para>
- When there are several codecs on the same card, you need to
- call <function>snd_ac97_mixer()</function> multiple times with
- ac97.num=1 or greater. The <structfield>num</structfield> field
- specifies the codec number.
- </para>
-
- <para>
- If you set up multiple codecs, you either need to write
- different callbacks for each codec or check
- ac97-&gt;num in the callback routines.
- </para>
- </section>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- MIDI (MPU401-UART) Interface -->
-<!-- ****************************************************** -->
- <chapter id="midi-interface">
- <title>MIDI (MPU401-UART) Interface</title>
-
- <section id="midi-interface-general">
- <title>General</title>
- <para>
- Many soundcards have built-in MIDI (MPU401-UART)
- interfaces. When the soundcard supports the standard MPU401-UART
- interface, most likely you can use the ALSA MPU401-UART API. The
- MPU401-UART API is defined in
- <filename>&lt;sound/mpu401.h&gt;</filename>.
- </para>
-
- <para>
- Some soundchips have a similar but slightly different
- implementation of mpu401 stuff. For example, emu10k1 has its own
- mpu401 routines.
- </para>
- </section>
-
- <section id="midi-interface-constructor">
- <title>Constructor</title>
- <para>
- To create a rawmidi object, call
- <function>snd_mpu401_uart_new()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_rawmidi *rmidi;
- snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, port, info_flags,
- irq, &rmidi);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The first argument is the card pointer, and the second is the
- index of this component. You can create up to 8 rawmidi
- devices.
- </para>
-
- <para>
- The third argument is the type of the hardware,
- <constant>MPU401_HW_XXX</constant>. If it's not a special one,
- you can use <constant>MPU401_HW_MPU401</constant>.
- </para>
-
- <para>
- The 4th argument is the I/O port address. Many
- backward-compatible MPU401 have an I/O port such as 0x330. Or, it
- might be a part of its own PCI I/O region. It depends on the
- chip design.
- </para>
-
- <para>
- The 5th argument is a bitflag for additional information.
- When the I/O port address above is part of the PCI I/O
- region, the MPU401 I/O port might have been already allocated
- (reserved) by the driver itself. In such a case, pass a bit flag
- <constant>MPU401_INFO_INTEGRATED</constant>,
- and the mpu401-uart layer will allocate the I/O ports by itself.
- </para>
-
- <para>
- When the controller supports only the input or output MIDI stream,
- pass the <constant>MPU401_INFO_INPUT</constant> or
- <constant>MPU401_INFO_OUTPUT</constant> bitflag, respectively.
- Then the rawmidi instance is created as a single stream.
- </para>
-
- <para>
- <constant>MPU401_INFO_MMIO</constant> bitflag is used to change
- the access method to MMIO (via readb and writeb) instead of
- iob and outb. In this case, you have to pass the iomapped address
- to <function>snd_mpu401_uart_new()</function>.
- </para>
-
- <para>
- When <constant>MPU401_INFO_TX_IRQ</constant> is set, the output
- stream isn't checked in the default interrupt handler. The driver
- needs to call <function>snd_mpu401_uart_interrupt_tx()</function>
- by itself to start processing the output stream in the irq handler.
- </para>
-
- <para>
- If the MPU-401 interface shares its interrupt with the other logical
- devices on the card, set <constant>MPU401_INFO_IRQ_HOOK</constant>
- (see <link linkend="midi-interface-interrupt-handler"><citetitle>
- below</citetitle></link>).
- </para>
-
- <para>
- Usually, the port address corresponds to the command port and
- port + 1 corresponds to the data port. If not, you may change
- the <structfield>cport</structfield> field of
- struct <structname>snd_mpu401</structname> manually
- afterward. However, <structname>snd_mpu401</structname> pointer is not
- returned explicitly by
- <function>snd_mpu401_uart_new()</function>. You need to cast
- rmidi-&gt;private_data to
- <structname>snd_mpu401</structname> explicitly,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_mpu401 *mpu;
- mpu = rmidi->private_data;
-]]>
- </programlisting>
- </informalexample>
-
- and reset the cport as you like:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- mpu->cport = my_own_control_port;
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The 6th argument specifies the ISA irq number that will be
- allocated. If no interrupt is to be allocated (because your
- code is already allocating a shared interrupt, or because the
- device does not use interrupts), pass -1 instead.
- For a MPU-401 device without an interrupt, a polling timer
- will be used instead.
- </para>
- </section>
-
- <section id="midi-interface-interrupt-handler">
- <title>Interrupt Handler</title>
- <para>
- When the interrupt is allocated in
- <function>snd_mpu401_uart_new()</function>, an exclusive ISA
- interrupt handler is automatically used, hence you don't have
- anything else to do than creating the mpu401 stuff. Otherwise, you
- have to set <constant>MPU401_INFO_IRQ_HOOK</constant>, and call
- <function>snd_mpu401_uart_interrupt()</function> explicitly from your
- own interrupt handler when it has determined that a UART interrupt
- has occurred.
- </para>
-
- <para>
- In this case, you need to pass the private_data of the
- returned rawmidi object from
- <function>snd_mpu401_uart_new()</function> as the second
- argument of <function>snd_mpu401_uart_interrupt()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_mpu401_uart_interrupt(irq, rmidi->private_data, regs);
-]]>
- </programlisting>
- </informalexample>
- </para>
- </section>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- RawMIDI Interface -->
-<!-- ****************************************************** -->
- <chapter id="rawmidi-interface">
- <title>RawMIDI Interface</title>
-
- <section id="rawmidi-interface-overview">
- <title>Overview</title>
-
- <para>
- The raw MIDI interface is used for hardware MIDI ports that can
- be accessed as a byte stream. It is not used for synthesizer
- chips that do not directly understand MIDI.
- </para>
-
- <para>
- ALSA handles file and buffer management. All you have to do is
- to write some code to move data between the buffer and the
- hardware.
- </para>
-
- <para>
- The rawmidi API is defined in
- <filename>&lt;sound/rawmidi.h&gt;</filename>.
- </para>
- </section>
-
- <section id="rawmidi-interface-constructor">
- <title>Constructor</title>
-
- <para>
- To create a rawmidi device, call the
- <function>snd_rawmidi_new</function> function:
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_rawmidi *rmidi;
- err = snd_rawmidi_new(chip->card, "MyMIDI", 0, outs, ins, &rmidi);
- if (err < 0)
- return err;
- rmidi->private_data = chip;
- strcpy(rmidi->name, "My MIDI");
- rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
- SNDRV_RAWMIDI_INFO_INPUT |
- SNDRV_RAWMIDI_INFO_DUPLEX;
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The first argument is the card pointer, the second argument is
- the ID string.
- </para>
-
- <para>
- The third argument is the index of this component. You can
- create up to 8 rawmidi devices.
- </para>
-
- <para>
- The fourth and fifth arguments are the number of output and
- input substreams, respectively, of this device (a substream is
- the equivalent of a MIDI port).
- </para>
-
- <para>
- Set the <structfield>info_flags</structfield> field to specify
- the capabilities of the device.
- Set <constant>SNDRV_RAWMIDI_INFO_OUTPUT</constant> if there is
- at least one output port,
- <constant>SNDRV_RAWMIDI_INFO_INPUT</constant> if there is at
- least one input port,
- and <constant>SNDRV_RAWMIDI_INFO_DUPLEX</constant> if the device
- can handle output and input at the same time.
- </para>
-
- <para>
- After the rawmidi device is created, you need to set the
- operators (callbacks) for each substream. There are helper
- functions to set the operators for all the substreams of a device:
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_mymidi_output_ops);
- snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_mymidi_input_ops);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The operators are usually defined like this:
- <informalexample>
- <programlisting>
-<![CDATA[
- static struct snd_rawmidi_ops snd_mymidi_output_ops = {
- .open = snd_mymidi_output_open,
- .close = snd_mymidi_output_close,
- .trigger = snd_mymidi_output_trigger,
- };
-]]>
- </programlisting>
- </informalexample>
- These callbacks are explained in the <link
- linkend="rawmidi-interface-callbacks"><citetitle>Callbacks</citetitle></link>
- section.
- </para>
-
- <para>
- If there are more than one substream, you should give a
- unique name to each of them:
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_rawmidi_substream *substream;
- list_for_each_entry(substream,
- &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT].substreams,
- list {
- sprintf(substream->name, "My MIDI Port %d", substream->number + 1);
- }
- /* same for SNDRV_RAWMIDI_STREAM_INPUT */
-]]>
- </programlisting>
- </informalexample>
- </para>
- </section>
-
- <section id="rawmidi-interface-callbacks">
- <title>Callbacks</title>
-
- <para>
- In all the callbacks, the private data that you've set for the
- rawmidi device can be accessed as
- substream-&gt;rmidi-&gt;private_data.
- <!-- <code> isn't available before DocBook 4.3 -->
- </para>
-
- <para>
- If there is more than one port, your callbacks can determine the
- port index from the struct snd_rawmidi_substream data passed to each
- callback:
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_rawmidi_substream *substream;
- int index = substream->number;
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <section id="rawmidi-interface-op-open">
- <title><function>open</function> callback</title>
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_open(struct snd_rawmidi_substream *substream);
-]]>
- </programlisting>
- </informalexample>
-
- <para>
- This is called when a substream is opened.
- You can initialize the hardware here, but you shouldn't
- start transmitting/receiving data yet.
- </para>
- </section>
-
- <section id="rawmidi-interface-op-close">
- <title><function>close</function> callback</title>
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_xxx_close(struct snd_rawmidi_substream *substream);
-]]>
- </programlisting>
- </informalexample>
-
- <para>
- Guess what.
- </para>
-
- <para>
- The <function>open</function> and <function>close</function>
- callbacks of a rawmidi device are serialized with a mutex,
- and can sleep.
- </para>
- </section>
-
- <section id="rawmidi-interface-op-trigger-out">
- <title><function>trigger</function> callback for output
- substreams</title>
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static void snd_xxx_output_trigger(struct snd_rawmidi_substream *substream, int up);
-]]>
- </programlisting>
- </informalexample>
-
- <para>
- This is called with a nonzero <parameter>up</parameter>
- parameter when there is some data in the substream buffer that
- must be transmitted.
- </para>
-
- <para>
- To read data from the buffer, call
- <function>snd_rawmidi_transmit_peek</function>. It will
- return the number of bytes that have been read; this will be
- less than the number of bytes requested when there are no more
- data in the buffer.
- After the data have been transmitted successfully, call
- <function>snd_rawmidi_transmit_ack</function> to remove the
- data from the substream buffer:
- <informalexample>
- <programlisting>
-<![CDATA[
- unsigned char data;
- while (snd_rawmidi_transmit_peek(substream, &data, 1) == 1) {
- if (snd_mychip_try_to_transmit(data))
- snd_rawmidi_transmit_ack(substream, 1);
- else
- break; /* hardware FIFO full */
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- If you know beforehand that the hardware will accept data, you
- can use the <function>snd_rawmidi_transmit</function> function
- which reads some data and removes them from the buffer at once:
- <informalexample>
- <programlisting>
-<![CDATA[
- while (snd_mychip_transmit_possible()) {
- unsigned char data;
- if (snd_rawmidi_transmit(substream, &data, 1) != 1)
- break; /* no more data */
- snd_mychip_transmit(data);
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- If you know beforehand how many bytes you can accept, you can
- use a buffer size greater than one with the
- <function>snd_rawmidi_transmit*</function> functions.
- </para>
-
- <para>
- The <function>trigger</function> callback must not sleep. If
- the hardware FIFO is full before the substream buffer has been
- emptied, you have to continue transmitting data later, either
- in an interrupt handler, or with a timer if the hardware
- doesn't have a MIDI transmit interrupt.
- </para>
-
- <para>
- The <function>trigger</function> callback is called with a
- zero <parameter>up</parameter> parameter when the transmission
- of data should be aborted.
- </para>
- </section>
-
- <section id="rawmidi-interface-op-trigger-in">
- <title><function>trigger</function> callback for input
- substreams</title>
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static void snd_xxx_input_trigger(struct snd_rawmidi_substream *substream, int up);
-]]>
- </programlisting>
- </informalexample>
-
- <para>
- This is called with a nonzero <parameter>up</parameter>
- parameter to enable receiving data, or with a zero
- <parameter>up</parameter> parameter do disable receiving data.
- </para>
-
- <para>
- The <function>trigger</function> callback must not sleep; the
- actual reading of data from the device is usually done in an
- interrupt handler.
- </para>
-
- <para>
- When data reception is enabled, your interrupt handler should
- call <function>snd_rawmidi_receive</function> for all received
- data:
- <informalexample>
- <programlisting>
-<![CDATA[
- void snd_mychip_midi_interrupt(...)
- {
- while (mychip_midi_available()) {
- unsigned char data;
- data = mychip_midi_read();
- snd_rawmidi_receive(substream, &data, 1);
- }
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
- </section>
-
- <section id="rawmidi-interface-op-drain">
- <title><function>drain</function> callback</title>
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static void snd_xxx_drain(struct snd_rawmidi_substream *substream);
-]]>
- </programlisting>
- </informalexample>
-
- <para>
- This is only used with output substreams. This function should wait
- until all data read from the substream buffer have been transmitted.
- This ensures that the device can be closed and the driver unloaded
- without losing data.
- </para>
-
- <para>
- This callback is optional. If you do not set
- <structfield>drain</structfield> in the struct snd_rawmidi_ops
- structure, ALSA will simply wait for 50&nbsp;milliseconds
- instead.
- </para>
- </section>
- </section>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- Miscellaneous Devices -->
-<!-- ****************************************************** -->
- <chapter id="misc-devices">
- <title>Miscellaneous Devices</title>
-
- <section id="misc-devices-opl3">
- <title>FM OPL3</title>
- <para>
- The FM OPL3 is still used in many chips (mainly for backward
- compatibility). ALSA has a nice OPL3 FM control layer, too. The
- OPL3 API is defined in
- <filename>&lt;sound/opl3.h&gt;</filename>.
- </para>
-
- <para>
- FM registers can be directly accessed through the direct-FM API,
- defined in <filename>&lt;sound/asound_fm.h&gt;</filename>. In
- ALSA native mode, FM registers are accessed through
- the Hardware-Dependent Device direct-FM extension API, whereas in
- OSS compatible mode, FM registers can be accessed with the OSS
- direct-FM compatible API in <filename>/dev/dmfmX</filename> device.
- </para>
-
- <para>
- To create the OPL3 component, you have two functions to
- call. The first one is a constructor for the <type>opl3_t</type>
- instance.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_opl3 *opl3;
- snd_opl3_create(card, lport, rport, OPL3_HW_OPL3_XXX,
- integrated, &opl3);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The first argument is the card pointer, the second one is the
- left port address, and the third is the right port address. In
- most cases, the right port is placed at the left port + 2.
- </para>
-
- <para>
- The fourth argument is the hardware type.
- </para>
-
- <para>
- When the left and right ports have been already allocated by
- the card driver, pass non-zero to the fifth argument
- (<parameter>integrated</parameter>). Otherwise, the opl3 module will
- allocate the specified ports by itself.
- </para>
-
- <para>
- When the accessing the hardware requires special method
- instead of the standard I/O access, you can create opl3 instance
- separately with <function>snd_opl3_new()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_opl3 *opl3;
- snd_opl3_new(card, OPL3_HW_OPL3_XXX, &opl3);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Then set <structfield>command</structfield>,
- <structfield>private_data</structfield> and
- <structfield>private_free</structfield> for the private
- access function, the private data and the destructor.
- The l_port and r_port are not necessarily set. Only the
- command must be set properly. You can retrieve the data
- from the opl3-&gt;private_data field.
- </para>
-
- <para>
- After creating the opl3 instance via <function>snd_opl3_new()</function>,
- call <function>snd_opl3_init()</function> to initialize the chip to the
- proper state. Note that <function>snd_opl3_create()</function> always
- calls it internally.
- </para>
-
- <para>
- If the opl3 instance is created successfully, then create a
- hwdep device for this opl3.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_hwdep *opl3hwdep;
- snd_opl3_hwdep_new(opl3, 0, 1, &opl3hwdep);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The first argument is the <type>opl3_t</type> instance you
- created, and the second is the index number, usually 0.
- </para>
-
- <para>
- The third argument is the index-offset for the sequencer
- client assigned to the OPL3 port. When there is an MPU401-UART,
- give 1 for here (UART always takes 0).
- </para>
- </section>
-
- <section id="misc-devices-hardware-dependent">
- <title>Hardware-Dependent Devices</title>
- <para>
- Some chips need user-space access for special
- controls or for loading the micro code. In such a case, you can
- create a hwdep (hardware-dependent) device. The hwdep API is
- defined in <filename>&lt;sound/hwdep.h&gt;</filename>. You can
- find examples in opl3 driver or
- <filename>isa/sb/sb16_csp.c</filename>.
- </para>
-
- <para>
- The creation of the <type>hwdep</type> instance is done via
- <function>snd_hwdep_new()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_hwdep *hw;
- snd_hwdep_new(card, "My HWDEP", 0, &hw);
-]]>
- </programlisting>
- </informalexample>
-
- where the third argument is the index number.
- </para>
-
- <para>
- You can then pass any pointer value to the
- <parameter>private_data</parameter>.
- If you assign a private data, you should define the
- destructor, too. The destructor function is set in
- the <structfield>private_free</structfield> field.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct mydata *p = kmalloc(sizeof(*p), GFP_KERNEL);
- hw->private_data = p;
- hw->private_free = mydata_free;
-]]>
- </programlisting>
- </informalexample>
-
- and the implementation of the destructor would be:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static void mydata_free(struct snd_hwdep *hw)
- {
- struct mydata *p = hw->private_data;
- kfree(p);
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The arbitrary file operations can be defined for this
- instance. The file operators are defined in
- the <parameter>ops</parameter> table. For example, assume that
- this chip needs an ioctl.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- hw->ops.open = mydata_open;
- hw->ops.ioctl = mydata_ioctl;
- hw->ops.release = mydata_release;
-]]>
- </programlisting>
- </informalexample>
-
- And implement the callback functions as you like.
- </para>
- </section>
-
- <section id="misc-devices-IEC958">
- <title>IEC958 (S/PDIF)</title>
- <para>
- Usually the controls for IEC958 devices are implemented via
- the control interface. There is a macro to compose a name string for
- IEC958 controls, <function>SNDRV_CTL_NAME_IEC958()</function>
- defined in <filename>&lt;include/asound.h&gt;</filename>.
- </para>
-
- <para>
- There are some standard controls for IEC958 status bits. These
- controls use the type <type>SNDRV_CTL_ELEM_TYPE_IEC958</type>,
- and the size of element is fixed as 4 bytes array
- (value.iec958.status[x]). For the <structfield>info</structfield>
- callback, you don't specify
- the value field for this type (the count field must be set,
- though).
- </para>
-
- <para>
- <quote>IEC958 Playback Con Mask</quote> is used to return the
- bit-mask for the IEC958 status bits of consumer mode. Similarly,
- <quote>IEC958 Playback Pro Mask</quote> returns the bitmask for
- professional mode. They are read-only controls, and are defined
- as MIXER controls (iface =
- <constant>SNDRV_CTL_ELEM_IFACE_MIXER</constant>).
- </para>
-
- <para>
- Meanwhile, <quote>IEC958 Playback Default</quote> control is
- defined for getting and setting the current default IEC958
- bits. Note that this one is usually defined as a PCM control
- (iface = <constant>SNDRV_CTL_ELEM_IFACE_PCM</constant>),
- although in some places it's defined as a MIXER control.
- </para>
-
- <para>
- In addition, you can define the control switches to
- enable/disable or to set the raw bit mode. The implementation
- will depend on the chip, but the control should be named as
- <quote>IEC958 xxx</quote>, preferably using
- the <function>SNDRV_CTL_NAME_IEC958()</function> macro.
- </para>
-
- <para>
- You can find several cases, for example,
- <filename>pci/emu10k1</filename>,
- <filename>pci/ice1712</filename>, or
- <filename>pci/cmipci.c</filename>.
- </para>
- </section>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- Buffer and Memory Management -->
-<!-- ****************************************************** -->
- <chapter id="buffer-and-memory">
- <title>Buffer and Memory Management</title>
-
- <section id="buffer-and-memory-buffer-types">
- <title>Buffer Types</title>
- <para>
- ALSA provides several different buffer allocation functions
- depending on the bus and the architecture. All these have a
- consistent API. The allocation of physically-contiguous pages is
- done via
- <function>snd_malloc_xxx_pages()</function> function, where xxx
- is the bus type.
- </para>
-
- <para>
- The allocation of pages with fallback is
- <function>snd_malloc_xxx_pages_fallback()</function>. This
- function tries to allocate the specified pages but if the pages
- are not available, it tries to reduce the page sizes until
- enough space is found.
- </para>
-
- <para>
- The release the pages, call
- <function>snd_free_xxx_pages()</function> function.
- </para>
-
- <para>
- Usually, ALSA drivers try to allocate and reserve
- a large contiguous physical space
- at the time the module is loaded for the later use.
- This is called <quote>pre-allocation</quote>.
- As already written, you can call the following function at
- pcm instance construction time (in the case of PCI bus).
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(pci), size, max);
-]]>
- </programlisting>
- </informalexample>
-
- where <parameter>size</parameter> is the byte size to be
- pre-allocated and the <parameter>max</parameter> is the maximum
- size to be changed via the <filename>prealloc</filename> proc file.
- The allocator will try to get an area as large as possible
- within the given size.
- </para>
-
- <para>
- The second argument (type) and the third argument (device pointer)
- are dependent on the bus.
- In the case of the ISA bus, pass <function>snd_dma_isa_data()</function>
- as the third argument with <constant>SNDRV_DMA_TYPE_DEV</constant> type.
- For the continuous buffer unrelated to the bus can be pre-allocated
- with <constant>SNDRV_DMA_TYPE_CONTINUOUS</constant> type and the
- <function>snd_dma_continuous_data(GFP_KERNEL)</function> device pointer,
- where <constant>GFP_KERNEL</constant> is the kernel allocation flag to
- use.
- For the PCI scatter-gather buffers, use
- <constant>SNDRV_DMA_TYPE_DEV_SG</constant> with
- <function>snd_dma_pci_data(pci)</function>
- (see the
- <link linkend="buffer-and-memory-non-contiguous"><citetitle>Non-Contiguous Buffers
- </citetitle></link> section).
- </para>
-
- <para>
- Once the buffer is pre-allocated, you can use the
- allocator in the <structfield>hw_params</structfield> callback:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_pcm_lib_malloc_pages(substream, size);
-]]>
- </programlisting>
- </informalexample>
-
- Note that you have to pre-allocate to use this function.
- </para>
- </section>
-
- <section id="buffer-and-memory-external-hardware">
- <title>External Hardware Buffers</title>
- <para>
- Some chips have their own hardware buffers and the DMA
- transfer from the host memory is not available. In such a case,
- you need to either 1) copy/set the audio data directly to the
- external hardware buffer, or 2) make an intermediate buffer and
- copy/set the data from it to the external hardware buffer in
- interrupts (or in tasklets, preferably).
- </para>
-
- <para>
- The first case works fine if the external hardware buffer is large
- enough. This method doesn't need any extra buffers and thus is
- more effective. You need to define the
- <structfield>copy</structfield> and
- <structfield>silence</structfield> callbacks for
- the data transfer. However, there is a drawback: it cannot
- be mmapped. The examples are GUS's GF1 PCM or emu8000's
- wavetable PCM.
- </para>
-
- <para>
- The second case allows for mmap on the buffer, although you have
- to handle an interrupt or a tasklet to transfer the data
- from the intermediate buffer to the hardware buffer. You can find an
- example in the vxpocket driver.
- </para>
-
- <para>
- Another case is when the chip uses a PCI memory-map
- region for the buffer instead of the host memory. In this case,
- mmap is available only on certain architectures like the Intel one.
- In non-mmap mode, the data cannot be transferred as in the normal
- way. Thus you need to define the <structfield>copy</structfield> and
- <structfield>silence</structfield> callbacks as well,
- as in the cases above. The examples are found in
- <filename>rme32.c</filename> and <filename>rme96.c</filename>.
- </para>
-
- <para>
- The implementation of the <structfield>copy</structfield> and
- <structfield>silence</structfield> callbacks depends upon
- whether the hardware supports interleaved or non-interleaved
- samples. The <structfield>copy</structfield> callback is
- defined like below, a bit
- differently depending whether the direction is playback or
- capture:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int playback_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void *src, snd_pcm_uframes_t count);
- static int capture_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void *dst, snd_pcm_uframes_t count);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- In the case of interleaved samples, the second argument
- (<parameter>channel</parameter>) is not used. The third argument
- (<parameter>pos</parameter>) points the
- current position offset in frames.
- </para>
-
- <para>
- The meaning of the fourth argument is different between
- playback and capture. For playback, it holds the source data
- pointer, and for capture, it's the destination data pointer.
- </para>
-
- <para>
- The last argument is the number of frames to be copied.
- </para>
-
- <para>
- What you have to do in this callback is again different
- between playback and capture directions. In the
- playback case, you copy the given amount of data
- (<parameter>count</parameter>) at the specified pointer
- (<parameter>src</parameter>) to the specified offset
- (<parameter>pos</parameter>) on the hardware buffer. When
- coded like memcpy-like way, the copy would be like:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- my_memcpy(my_buffer + frames_to_bytes(runtime, pos), src,
- frames_to_bytes(runtime, count));
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- For the capture direction, you copy the given amount of
- data (<parameter>count</parameter>) at the specified offset
- (<parameter>pos</parameter>) on the hardware buffer to the
- specified pointer (<parameter>dst</parameter>).
-
- <informalexample>
- <programlisting>
-<![CDATA[
- my_memcpy(dst, my_buffer + frames_to_bytes(runtime, pos),
- frames_to_bytes(runtime, count));
-]]>
- </programlisting>
- </informalexample>
-
- Note that both the position and the amount of data are given
- in frames.
- </para>
-
- <para>
- In the case of non-interleaved samples, the implementation
- will be a bit more complicated.
- </para>
-
- <para>
- You need to check the channel argument, and if it's -1, copy
- the whole channels. Otherwise, you have to copy only the
- specified channel. Please check
- <filename>isa/gus/gus_pcm.c</filename> as an example.
- </para>
-
- <para>
- The <structfield>silence</structfield> callback is also
- implemented in a similar way.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int silence(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, snd_pcm_uframes_t count);
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The meanings of arguments are the same as in the
- <structfield>copy</structfield>
- callback, although there is no <parameter>src/dst</parameter>
- argument. In the case of interleaved samples, the channel
- argument has no meaning, as well as on
- <structfield>copy</structfield> callback.
- </para>
-
- <para>
- The role of <structfield>silence</structfield> callback is to
- set the given amount
- (<parameter>count</parameter>) of silence data at the
- specified offset (<parameter>pos</parameter>) on the hardware
- buffer. Suppose that the data format is signed (that is, the
- silent-data is 0), and the implementation using a memset-like
- function would be like:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- my_memcpy(my_buffer + frames_to_bytes(runtime, pos), 0,
- frames_to_bytes(runtime, count));
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- In the case of non-interleaved samples, again, the
- implementation becomes a bit more complicated. See, for example,
- <filename>isa/gus/gus_pcm.c</filename>.
- </para>
- </section>
-
- <section id="buffer-and-memory-non-contiguous">
- <title>Non-Contiguous Buffers</title>
- <para>
- If your hardware supports the page table as in emu10k1 or the
- buffer descriptors as in via82xx, you can use the scatter-gather
- (SG) DMA. ALSA provides an interface for handling SG-buffers.
- The API is provided in <filename>&lt;sound/pcm.h&gt;</filename>.
- </para>
-
- <para>
- For creating the SG-buffer handler, call
- <function>snd_pcm_lib_preallocate_pages()</function> or
- <function>snd_pcm_lib_preallocate_pages_for_all()</function>
- with <constant>SNDRV_DMA_TYPE_DEV_SG</constant>
- in the PCM constructor like other PCI pre-allocator.
- You need to pass <function>snd_dma_pci_data(pci)</function>,
- where pci is the struct <structname>pci_dev</structname> pointer
- of the chip as well.
- The <type>struct snd_sg_buf</type> instance is created as
- substream-&gt;dma_private. You can cast
- the pointer like:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_sg_buf *sgbuf = (struct snd_sg_buf *)substream->dma_private;
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Then call <function>snd_pcm_lib_malloc_pages()</function>
- in the <structfield>hw_params</structfield> callback
- as well as in the case of normal PCI buffer.
- The SG-buffer handler will allocate the non-contiguous kernel
- pages of the given size and map them onto the virtually contiguous
- memory. The virtual pointer is addressed in runtime-&gt;dma_area.
- The physical address (runtime-&gt;dma_addr) is set to zero,
- because the buffer is physically non-contiguous.
- The physical address table is set up in sgbuf-&gt;table.
- You can get the physical address at a certain offset via
- <function>snd_pcm_sgbuf_get_addr()</function>.
- </para>
-
- <para>
- When a SG-handler is used, you need to set
- <function>snd_pcm_sgbuf_ops_page</function> as
- the <structfield>page</structfield> callback.
- (See <link linkend="pcm-interface-operators-page-callback">
- <citetitle>page callback section</citetitle></link>.)
- </para>
-
- <para>
- To release the data, call
- <function>snd_pcm_lib_free_pages()</function> in the
- <structfield>hw_free</structfield> callback as usual.
- </para>
- </section>
-
- <section id="buffer-and-memory-vmalloced">
- <title>Vmalloc'ed Buffers</title>
- <para>
- It's possible to use a buffer allocated via
- <function>vmalloc</function>, for example, for an intermediate
- buffer. Since the allocated pages are not contiguous, you need
- to set the <structfield>page</structfield> callback to obtain
- the physical address at every offset.
- </para>
-
- <para>
- The implementation of <structfield>page</structfield> callback
- would be like this:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- #include <linux/vmalloc.h>
-
- /* get the physical page pointer on the given offset */
- static struct page *mychip_page(struct snd_pcm_substream *substream,
- unsigned long offset)
- {
- void *pageptr = substream->runtime->dma_area + offset;
- return vmalloc_to_page(pageptr);
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
- </section>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- Proc Interface -->
-<!-- ****************************************************** -->
- <chapter id="proc-interface">
- <title>Proc Interface</title>
- <para>
- ALSA provides an easy interface for procfs. The proc files are
- very useful for debugging. I recommend you set up proc files if
- you write a driver and want to get a running status or register
- dumps. The API is found in
- <filename>&lt;sound/info.h&gt;</filename>.
- </para>
-
- <para>
- To create a proc file, call
- <function>snd_card_proc_new()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- struct snd_info_entry *entry;
- int err = snd_card_proc_new(card, "my-file", &entry);
-]]>
- </programlisting>
- </informalexample>
-
- where the second argument specifies the name of the proc file to be
- created. The above example will create a file
- <filename>my-file</filename> under the card directory,
- e.g. <filename>/proc/asound/card0/my-file</filename>.
- </para>
-
- <para>
- Like other components, the proc entry created via
- <function>snd_card_proc_new()</function> will be registered and
- released automatically in the card registration and release
- functions.
- </para>
-
- <para>
- When the creation is successful, the function stores a new
- instance in the pointer given in the third argument.
- It is initialized as a text proc file for read only. To use
- this proc file as a read-only text file as it is, set the read
- callback with a private data via
- <function>snd_info_set_text_ops()</function>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_info_set_text_ops(entry, chip, my_proc_read);
-]]>
- </programlisting>
- </informalexample>
-
- where the second argument (<parameter>chip</parameter>) is the
- private data to be used in the callbacks. The third parameter
- specifies the read buffer size and the fourth
- (<parameter>my_proc_read</parameter>) is the callback function, which
- is defined like
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static void my_proc_read(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer);
-]]>
- </programlisting>
- </informalexample>
-
- </para>
-
- <para>
- In the read callback, use <function>snd_iprintf()</function> for
- output strings, which works just like normal
- <function>printf()</function>. For example,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static void my_proc_read(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer)
- {
- struct my_chip *chip = entry->private_data;
-
- snd_iprintf(buffer, "This is my chip!\n");
- snd_iprintf(buffer, "Port = %ld\n", chip->port);
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The file permissions can be changed afterwards. As default, it's
- set as read only for all users. If you want to add write
- permission for the user (root as default), do as follows:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- entry->mode = S_IFREG | S_IRUGO | S_IWUSR;
-]]>
- </programlisting>
- </informalexample>
-
- and set the write buffer size and the callback
-
- <informalexample>
- <programlisting>
-<![CDATA[
- entry->c.text.write = my_proc_write;
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- For the write callback, you can use
- <function>snd_info_get_line()</function> to get a text line, and
- <function>snd_info_get_str()</function> to retrieve a string from
- the line. Some examples are found in
- <filename>core/oss/mixer_oss.c</filename>, core/oss/and
- <filename>pcm_oss.c</filename>.
- </para>
-
- <para>
- For a raw-data proc-file, set the attributes as follows:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static struct snd_info_entry_ops my_file_io_ops = {
- .read = my_file_io_read,
- };
-
- entry->content = SNDRV_INFO_CONTENT_DATA;
- entry->private_data = chip;
- entry->c.ops = &my_file_io_ops;
- entry->size = 4096;
- entry->mode = S_IFREG | S_IRUGO;
-]]>
- </programlisting>
- </informalexample>
-
- For the raw data, <structfield>size</structfield> field must be
- set properly. This specifies the maximum size of the proc file access.
- </para>
-
- <para>
- The read/write callbacks of raw mode are more direct than the text mode.
- You need to use a low-level I/O functions such as
- <function>copy_from/to_user()</function> to transfer the
- data.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static ssize_t my_file_io_read(struct snd_info_entry *entry,
- void *file_private_data,
- struct file *file,
- char *buf,
- size_t count,
- loff_t pos)
- {
- if (copy_to_user(buf, local_data + pos, count))
- return -EFAULT;
- return count;
- }
-]]>
- </programlisting>
- </informalexample>
-
- If the size of the info entry has been set up properly,
- <structfield>count</structfield> and <structfield>pos</structfield> are
- guaranteed to fit within 0 and the given size.
- You don't have to check the range in the callbacks unless any
- other condition is required.
-
- </para>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- Power Management -->
-<!-- ****************************************************** -->
- <chapter id="power-management">
- <title>Power Management</title>
- <para>
- If the chip is supposed to work with suspend/resume
- functions, you need to add power-management code to the
- driver. The additional code for power-management should be
- <function>ifdef</function>'ed with
- <constant>CONFIG_PM</constant>.
- </para>
-
- <para>
- If the driver <emphasis>fully</emphasis> supports suspend/resume
- that is, the device can be
- properly resumed to its state when suspend was called,
- you can set the <constant>SNDRV_PCM_INFO_RESUME</constant> flag
- in the pcm info field. Usually, this is possible when the
- registers of the chip can be safely saved and restored to
- RAM. If this is set, the trigger callback is called with
- <constant>SNDRV_PCM_TRIGGER_RESUME</constant> after the resume
- callback completes.
- </para>
-
- <para>
- Even if the driver doesn't support PM fully but
- partial suspend/resume is still possible, it's still worthy to
- implement suspend/resume callbacks. In such a case, applications
- would reset the status by calling
- <function>snd_pcm_prepare()</function> and restart the stream
- appropriately. Hence, you can define suspend/resume callbacks
- below but don't set <constant>SNDRV_PCM_INFO_RESUME</constant>
- info flag to the PCM.
- </para>
-
- <para>
- Note that the trigger with SUSPEND can always be called when
- <function>snd_pcm_suspend_all</function> is called,
- regardless of the <constant>SNDRV_PCM_INFO_RESUME</constant> flag.
- The <constant>RESUME</constant> flag affects only the behavior
- of <function>snd_pcm_resume()</function>.
- (Thus, in theory,
- <constant>SNDRV_PCM_TRIGGER_RESUME</constant> isn't needed
- to be handled in the trigger callback when no
- <constant>SNDRV_PCM_INFO_RESUME</constant> flag is set. But,
- it's better to keep it for compatibility reasons.)
- </para>
- <para>
- In the earlier version of ALSA drivers, a common
- power-management layer was provided, but it has been removed.
- The driver needs to define the suspend/resume hooks according to
- the bus the device is connected to. In the case of PCI drivers, the
- callbacks look like below:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- #ifdef CONFIG_PM
- static int snd_my_suspend(struct pci_dev *pci, pm_message_t state)
- {
- .... /* do things for suspend */
- return 0;
- }
- static int snd_my_resume(struct pci_dev *pci)
- {
- .... /* do things for suspend */
- return 0;
- }
- #endif
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The scheme of the real suspend job is as follows.
-
- <orderedlist>
- <listitem><para>Retrieve the card and the chip data.</para></listitem>
- <listitem><para>Call <function>snd_power_change_state()</function> with
- <constant>SNDRV_CTL_POWER_D3hot</constant> to change the
- power status.</para></listitem>
- <listitem><para>Call <function>snd_pcm_suspend_all()</function> to suspend the running PCM streams.</para></listitem>
- <listitem><para>If AC97 codecs are used, call
- <function>snd_ac97_suspend()</function> for each codec.</para></listitem>
- <listitem><para>Save the register values if necessary.</para></listitem>
- <listitem><para>Stop the hardware if necessary.</para></listitem>
- <listitem><para>Disable the PCI device by calling
- <function>pci_disable_device()</function>. Then, call
- <function>pci_save_state()</function> at last.</para></listitem>
- </orderedlist>
- </para>
-
- <para>
- A typical code would be like:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int mychip_suspend(struct pci_dev *pci, pm_message_t state)
- {
- /* (1) */
- struct snd_card *card = pci_get_drvdata(pci);
- struct mychip *chip = card->private_data;
- /* (2) */
- snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- /* (3) */
- snd_pcm_suspend_all(chip->pcm);
- /* (4) */
- snd_ac97_suspend(chip->ac97);
- /* (5) */
- snd_mychip_save_registers(chip);
- /* (6) */
- snd_mychip_stop_hardware(chip);
- /* (7) */
- pci_disable_device(pci);
- pci_save_state(pci);
- return 0;
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- The scheme of the real resume job is as follows.
-
- <orderedlist>
- <listitem><para>Retrieve the card and the chip data.</para></listitem>
- <listitem><para>Set up PCI. First, call <function>pci_restore_state()</function>.
- Then enable the pci device again by calling <function>pci_enable_device()</function>.
- Call <function>pci_set_master()</function> if necessary, too.</para></listitem>
- <listitem><para>Re-initialize the chip.</para></listitem>
- <listitem><para>Restore the saved registers if necessary.</para></listitem>
- <listitem><para>Resume the mixer, e.g. calling
- <function>snd_ac97_resume()</function>.</para></listitem>
- <listitem><para>Restart the hardware (if any).</para></listitem>
- <listitem><para>Call <function>snd_power_change_state()</function> with
- <constant>SNDRV_CTL_POWER_D0</constant> to notify the processes.</para></listitem>
- </orderedlist>
- </para>
-
- <para>
- A typical code would be like:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int mychip_resume(struct pci_dev *pci)
- {
- /* (1) */
- struct snd_card *card = pci_get_drvdata(pci);
- struct mychip *chip = card->private_data;
- /* (2) */
- pci_restore_state(pci);
- pci_enable_device(pci);
- pci_set_master(pci);
- /* (3) */
- snd_mychip_reinit_chip(chip);
- /* (4) */
- snd_mychip_restore_registers(chip);
- /* (5) */
- snd_ac97_resume(chip->ac97);
- /* (6) */
- snd_mychip_restart_chip(chip);
- /* (7) */
- snd_power_change_state(card, SNDRV_CTL_POWER_D0);
- return 0;
- }
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- As shown in the above, it's better to save registers after
- suspending the PCM operations via
- <function>snd_pcm_suspend_all()</function> or
- <function>snd_pcm_suspend()</function>. It means that the PCM
- streams are already stopped when the register snapshot is
- taken. But, remember that you don't have to restart the PCM
- stream in the resume callback. It'll be restarted via
- trigger call with <constant>SNDRV_PCM_TRIGGER_RESUME</constant>
- when necessary.
- </para>
-
- <para>
- OK, we have all callbacks now. Let's set them up. In the
- initialization of the card, make sure that you can get the chip
- data from the card instance, typically via
- <structfield>private_data</structfield> field, in case you
- created the chip data individually.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_mychip_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
- {
- ....
- struct snd_card *card;
- struct mychip *chip;
- int err;
- ....
- err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
- 0, &card);
- ....
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- ....
- card->private_data = chip;
- ....
- }
-]]>
- </programlisting>
- </informalexample>
-
- When you created the chip data with
- <function>snd_card_new()</function>, it's anyway accessible
- via <structfield>private_data</structfield> field.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int snd_mychip_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
- {
- ....
- struct snd_card *card;
- struct mychip *chip;
- int err;
- ....
- err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
- sizeof(struct mychip), &card);
- ....
- chip = card->private_data;
- ....
- }
-]]>
- </programlisting>
- </informalexample>
-
- </para>
-
- <para>
- If you need a space to save the registers, allocate the
- buffer for it here, too, since it would be fatal
- if you cannot allocate a memory in the suspend phase.
- The allocated buffer should be released in the corresponding
- destructor.
- </para>
-
- <para>
- And next, set suspend/resume callbacks to the pci_driver.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static struct pci_driver driver = {
- .name = KBUILD_MODNAME,
- .id_table = snd_my_ids,
- .probe = snd_my_probe,
- .remove = snd_my_remove,
- #ifdef CONFIG_PM
- .suspend = snd_my_suspend,
- .resume = snd_my_resume,
- #endif
- };
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- Module Parameters -->
-<!-- ****************************************************** -->
- <chapter id="module-parameters">
- <title>Module Parameters</title>
- <para>
- There are standard module options for ALSA. At least, each
- module should have the <parameter>index</parameter>,
- <parameter>id</parameter> and <parameter>enable</parameter>
- options.
- </para>
-
- <para>
- If the module supports multiple cards (usually up to
- 8 = <constant>SNDRV_CARDS</constant> cards), they should be
- arrays. The default initial values are defined already as
- constants for easier programming:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
- static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
- static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- If the module supports only a single card, they could be single
- variables, instead. <parameter>enable</parameter> option is not
- always necessary in this case, but it would be better to have a
- dummy option for compatibility.
- </para>
-
- <para>
- The module parameters must be declared with the standard
- <function>module_param()()</function>,
- <function>module_param_array()()</function> and
- <function>MODULE_PARM_DESC()</function> macros.
- </para>
-
- <para>
- The typical coding would be like below:
-
- <informalexample>
- <programlisting>
-<![CDATA[
- #define CARD_NAME "My Chip"
-
- module_param_array(index, int, NULL, 0444);
- MODULE_PARM_DESC(index, "Index value for " CARD_NAME " soundcard.");
- module_param_array(id, charp, NULL, 0444);
- MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard.");
- module_param_array(enable, bool, NULL, 0444);
- MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard.");
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- Also, don't forget to define the module description, classes,
- license and devices. Especially, the recent modprobe requires to
- define the module license as GPL, etc., otherwise the system is
- shown as <quote>tainted</quote>.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- MODULE_DESCRIPTION("My Chip");
- MODULE_LICENSE("GPL");
- MODULE_SUPPORTED_DEVICE("{{Vendor,My Chip Name}}");
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- How To Put Your Driver -->
-<!-- ****************************************************** -->
- <chapter id="how-to-put-your-driver">
- <title>How To Put Your Driver Into ALSA Tree</title>
- <section>
- <title>General</title>
- <para>
- So far, you've learned how to write the driver codes.
- And you might have a question now: how to put my own
- driver into the ALSA driver tree?
- Here (finally :) the standard procedure is described briefly.
- </para>
-
- <para>
- Suppose that you create a new PCI driver for the card
- <quote>xyz</quote>. The card module name would be
- snd-xyz. The new driver is usually put into the alsa-driver
- tree, <filename>alsa-driver/pci</filename> directory in
- the case of PCI cards.
- Then the driver is evaluated, audited and tested
- by developers and users. After a certain time, the driver
- will go to the alsa-kernel tree (to the corresponding directory,
- such as <filename>alsa-kernel/pci</filename>) and eventually
- will be integrated into the Linux 2.6 tree (the directory would be
- <filename>linux/sound/pci</filename>).
- </para>
-
- <para>
- In the following sections, the driver code is supposed
- to be put into alsa-driver tree. The two cases are covered:
- a driver consisting of a single source file and one consisting
- of several source files.
- </para>
- </section>
-
- <section>
- <title>Driver with A Single Source File</title>
- <para>
- <orderedlist>
- <listitem>
- <para>
- Modify alsa-driver/pci/Makefile
- </para>
-
- <para>
- Suppose you have a file xyz.c. Add the following
- two lines
- <informalexample>
- <programlisting>
-<![CDATA[
- snd-xyz-objs := xyz.o
- obj-$(CONFIG_SND_XYZ) += snd-xyz.o
-]]>
- </programlisting>
- </informalexample>
- </para>
- </listitem>
-
- <listitem>
- <para>
- Create the Kconfig entry
- </para>
-
- <para>
- Add the new entry of Kconfig for your xyz driver.
- <informalexample>
- <programlisting>
-<![CDATA[
- config SND_XYZ
- tristate "Foobar XYZ"
- depends on SND
- select SND_PCM
- help
- Say Y here to include support for Foobar XYZ soundcard.
-
- To compile this driver as a module, choose M here: the module
- will be called snd-xyz.
-]]>
- </programlisting>
- </informalexample>
-
- the line, select SND_PCM, specifies that the driver xyz supports
- PCM. In addition to SND_PCM, the following components are
- supported for select command:
- SND_RAWMIDI, SND_TIMER, SND_HWDEP, SND_MPU401_UART,
- SND_OPL3_LIB, SND_OPL4_LIB, SND_VX_LIB, SND_AC97_CODEC.
- Add the select command for each supported component.
- </para>
-
- <para>
- Note that some selections imply the lowlevel selections.
- For example, PCM includes TIMER, MPU401_UART includes RAWMIDI,
- AC97_CODEC includes PCM, and OPL3_LIB includes HWDEP.
- You don't need to give the lowlevel selections again.
- </para>
-
- <para>
- For the details of Kconfig script, refer to the kbuild
- documentation.
- </para>
-
- </listitem>
-
- <listitem>
- <para>
- Run cvscompile script to re-generate the configure script and
- build the whole stuff again.
- </para>
- </listitem>
- </orderedlist>
- </para>
- </section>
-
- <section>
- <title>Drivers with Several Source Files</title>
- <para>
- Suppose that the driver snd-xyz have several source files.
- They are located in the new subdirectory,
- pci/xyz.
-
- <orderedlist>
- <listitem>
- <para>
- Add a new directory (<filename>xyz</filename>) in
- <filename>alsa-driver/pci/Makefile</filename> as below
-
- <informalexample>
- <programlisting>
-<![CDATA[
- obj-$(CONFIG_SND) += xyz/
-]]>
- </programlisting>
- </informalexample>
- </para>
- </listitem>
-
- <listitem>
- <para>
- Under the directory <filename>xyz</filename>, create a Makefile
-
- <example>
- <title>Sample Makefile for a driver xyz</title>
- <programlisting>
-<![CDATA[
- ifndef SND_TOPDIR
- SND_TOPDIR=../..
- endif
-
- include $(SND_TOPDIR)/toplevel.config
- include $(SND_TOPDIR)/Makefile.conf
-
- snd-xyz-objs := xyz.o abc.o def.o
-
- obj-$(CONFIG_SND_XYZ) += snd-xyz.o
-
- include $(SND_TOPDIR)/Rules.make
-]]>
- </programlisting>
- </example>
- </para>
- </listitem>
-
- <listitem>
- <para>
- Create the Kconfig entry
- </para>
-
- <para>
- This procedure is as same as in the last section.
- </para>
- </listitem>
-
- <listitem>
- <para>
- Run cvscompile script to re-generate the configure script and
- build the whole stuff again.
- </para>
- </listitem>
- </orderedlist>
- </para>
- </section>
-
- </chapter>
-
-<!-- ****************************************************** -->
-<!-- Useful Functions -->
-<!-- ****************************************************** -->
- <chapter id="useful-functions">
- <title>Useful Functions</title>
-
- <section id="useful-functions-snd-printk">
- <title><function>snd_printk()</function> and friends</title>
- <para>
- ALSA provides a verbose version of the
- <function>printk()</function> function. If a kernel config
- <constant>CONFIG_SND_VERBOSE_PRINTK</constant> is set, this
- function prints the given message together with the file name
- and the line of the caller. The <constant>KERN_XXX</constant>
- prefix is processed as
- well as the original <function>printk()</function> does, so it's
- recommended to add this prefix, e.g.
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_printk(KERN_ERR "Oh my, sorry, it's extremely bad!\n");
-]]>
- </programlisting>
- </informalexample>
- </para>
-
- <para>
- There are also <function>printk()</function>'s for
- debugging. <function>snd_printd()</function> can be used for
- general debugging purposes. If
- <constant>CONFIG_SND_DEBUG</constant> is set, this function is
- compiled, and works just like
- <function>snd_printk()</function>. If the ALSA is compiled
- without the debugging flag, it's ignored.
- </para>
-
- <para>
- <function>snd_printdd()</function> is compiled in only when
- <constant>CONFIG_SND_DEBUG_VERBOSE</constant> is set. Please note
- that <constant>CONFIG_SND_DEBUG_VERBOSE</constant> is not set as default
- even if you configure the alsa-driver with
- <option>--with-debug=full</option> option. You need to give
- explicitly <option>--with-debug=detect</option> option instead.
- </para>
- </section>
-
- <section id="useful-functions-snd-bug">
- <title><function>snd_BUG()</function></title>
- <para>
- It shows the <computeroutput>BUG?</computeroutput> message and
- stack trace as well as <function>snd_BUG_ON</function> at the point.
- It's useful to show that a fatal error happens there.
- </para>
- <para>
- When no debug flag is set, this macro is ignored.
- </para>
- </section>
-
- <section id="useful-functions-snd-bug-on">
- <title><function>snd_BUG_ON()</function></title>
- <para>
- <function>snd_BUG_ON()</function> macro is similar with
- <function>WARN_ON()</function> macro. For example,
-
- <informalexample>
- <programlisting>
-<![CDATA[
- snd_BUG_ON(!pointer);
-]]>
- </programlisting>
- </informalexample>
-
- or it can be used as the condition,
- <informalexample>
- <programlisting>
-<![CDATA[
- if (snd_BUG_ON(non_zero_is_bug))
- return -EINVAL;
-]]>
- </programlisting>
- </informalexample>
-
- </para>
-
- <para>
- The macro takes an conditional expression to evaluate.
- When <constant>CONFIG_SND_DEBUG</constant>, is set, if the
- expression is non-zero, it shows the warning message such as
- <computeroutput>BUG? (xxx)</computeroutput>
- normally followed by stack trace.
-
- In both cases it returns the evaluated value.
- </para>
-
- </section>
-
- </chapter>
-
-
-<!-- ****************************************************** -->
-<!-- Acknowledgments -->
-<!-- ****************************************************** -->
- <chapter id="acknowledgments">
- <title>Acknowledgments</title>
- <para>
- I would like to thank Phil Kerr for his help for improvement and
- corrections of this document.
- </para>
- <para>
- Kevin Conder reformatted the original plain-text to the
- DocBook format.
- </para>
- <para>
- Giuliano Pochini corrected typos and contributed the example codes
- in the hardware constraints section.
- </para>
- </chapter>
-</book>
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index c0d8788e75d3..72292308d0f5 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -111,6 +111,8 @@ ipmi_ssif - A driver for accessing BMCs on the SMBus. It uses the
I2C kernel driver's SMBus interfaces to send and receive IPMI messages
over the SMBus.
+ipmi_powernv - A driver for access BMCs on POWERNV systems.
+
ipmi_watchdog - IPMI requires systems to have a very capable watchdog
timer. This driver implements the standard Linux watchdog timer
interface on top of the IPMI message handler.
@@ -118,17 +120,15 @@ interface on top of the IPMI message handler.
ipmi_poweroff - Some systems support the ability to be turned off via
IPMI commands.
-These are all individually selectable via configuration options.
+bt-bmc - This is not part of the main driver, but instead a driver for
+accessing a BMC-side interface of a BT interface. It is used on BMCs
+running Linux to provide an interface to the host.
-Note that the KCS-only interface has been removed. The af_ipmi driver
-is no longer supported and has been removed because it was impossible
-to do 32 bit emulation on 64-bit kernels with it.
+These are all individually selectable via configuration options.
Much documentation for the interface is in the include files. The
IPMI include files are:
-net/af_ipmi.h - Contains the socket interface.
-
linux/ipmi.h - Contains the user interface and IOCTL interface for IPMI.
linux/ipmi_smi.h - Contains the interface for system management interfaces
@@ -245,6 +245,16 @@ addressed (because some boards actually have multiple BMCs on them)
and the user should not have to care what type of SMI is below them.
+Watching For Interfaces
+
+When your code comes up, the IPMI driver may or may not have detected
+if IPMI devices exist. So you might have to defer your setup until
+the device is detected, or you might be able to do it immediately.
+To handle this, and to allow for discovery, you register an SMI
+watcher with ipmi_smi_watcher_register() to iterate over interfaces
+and tell you when they come and go.
+
+
Creating the User
To user the message handler, you must first create a user using
@@ -263,7 +273,7 @@ closing the device automatically destroys the user.
Messaging
-To send a message from kernel-land, the ipmi_request() call does
+To send a message from kernel-land, the ipmi_request_settime() call does
pretty much all message handling. Most of the parameter are
self-explanatory. However, it takes a "msgid" parameter. This is NOT
the sequence number of messages. It is simply a long value that is
@@ -352,11 +362,12 @@ that for more details.
The SI Driver
-------------
-The SI driver allows up to 4 KCS or SMIC interfaces to be configured
-in the system. By default, scan the ACPI tables for interfaces, and
-if it doesn't find any the driver will attempt to register one KCS
-interface at the spec-specified I/O port 0xca2 without interrupts.
-You can change this at module load time (for a module) with:
+The SI driver allows KCS, BT, and SMIC interfaces to be configured
+in the system. It discovers interfaces through a host of different
+methods, depending on the system.
+
+You can specify up to four interfaces on the module load line and
+control some module parameters:
modprobe ipmi_si.o type=<type1>,<type2>....
ports=<port1>,<port2>... addrs=<addr1>,<addr2>...
@@ -367,7 +378,7 @@ You can change this at module load time (for a module) with:
force_kipmid=<enable1>,<enable2>,...
kipmid_max_busy_us=<ustime1>,<ustime2>,...
unload_when_empty=[0|1]
- trydefaults=[0|1] trydmi=[0|1] tryacpi=[0|1]
+ trydmi=[0|1] tryacpi=[0|1]
tryplatform=[0|1] trypci=[0|1]
Each of these except try... items is a list, the first item for the
@@ -386,10 +397,6 @@ use the I/O port given as the device address.
If you specify irqs as non-zero for an interface, the driver will
attempt to use the given interrupt for the device.
-trydefaults sets whether the standard IPMI interface at 0xca2 and
-any interfaces specified by ACPE are tried. By default, the driver
-tries it, set this value to zero to turn this off.
-
The other try... items disable discovery by their corresponding
names. These are all enabled by default, set them to zero to disable
them. The tryplatform disables openfirmware.
@@ -434,7 +441,7 @@ kernel command line as:
ipmi_si.type=<type1>,<type2>...
ipmi_si.ports=<port1>,<port2>... ipmi_si.addrs=<addr1>,<addr2>...
- ipmi_si.irqs=<irq1>,<irq2>... ipmi_si.trydefaults=[0|1]
+ ipmi_si.irqs=<irq1>,<irq2>...
ipmi_si.regspacings=<sp1>,<sp2>,...
ipmi_si.regsizes=<size1>,<size2>,...
ipmi_si.regshifts=<shift1>,<shift2>,...
@@ -444,11 +451,6 @@ kernel command line as:
It works the same as the module parameters of the same names.
-By default, the driver will attempt to detect any device specified by
-ACPI, and if none of those then a KCS device at the spec-specified
-0xca2. If you want to turn this off, set the "trydefaults" option to
-false.
-
If your IPMI interface does not support interrupts and is a KCS or
SMIC interface, the IPMI driver will start a kernel thread for the
interface to help speed things up. This is a low-priority kernel
@@ -500,7 +502,8 @@ at module load time (for a module) with:
addr=<i2caddr1>[,<i2caddr2>[,...]]
adapter=<adapter1>[,<adapter2>[...]]
dbg=<flags1>,<flags2>...
- slave_addrs=<addr1>,<addr2>,...
+ slave_addrs=<addr1>,<addr2>,...
+ tryacpi=[0|1] trydmi=[0|1]
[dbg_probe=1]
The addresses are normal I2C addresses. The adapter is the string
@@ -513,6 +516,9 @@ spaces in kernel parameters.
The debug flags are bit flags for each BMC found, they are:
IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8
+The tryxxx parameters can be used to disable detecting interfaces
+from various sources.
+
Setting dbg_probe to 1 will enable debugging of the probing and
detection process for BMCs on the SMBusses.
@@ -535,7 +541,8 @@ kernel command line as:
ipmi_ssif.adapter=<adapter1>[,<adapter2>[...]]
ipmi_ssif.dbg=<flags1>[,<flags2>[...]]
ipmi_ssif.dbg_probe=1
- ipmi_ssif.slave_addrs=<addr1>[,<addr2>[...]]
+ ipmi_ssif.slave_addrs=<addr1>[,<addr2>[...]]
+ ipmi_ssif.tryacpi=[0|1] ipmi_ssif.trydmi=[0|1]
These are the same options as on the module command line.
diff --git a/Documentation/Makefile.sphinx b/Documentation/Makefile.sphinx
index 92deea30b183..707c65337ebf 100644
--- a/Documentation/Makefile.sphinx
+++ b/Documentation/Makefile.sphinx
@@ -10,6 +10,8 @@ _SPHINXDIRS = $(patsubst $(srctree)/Documentation/%/conf.py,%,$(wildcard $(src
SPHINX_CONF = conf.py
PAPER =
BUILDDIR = $(obj)/output
+PDFLATEX = xelatex
+LATEXOPTS = -interaction=batchmode
# User-friendly check for sphinx-build
HAVE_SPHINX := $(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi)
@@ -29,7 +31,7 @@ else ifneq ($(DOCBOOKS),)
else # HAVE_SPHINX
# User-friendly check for pdflatex
-HAVE_PDFLATEX := $(shell if which xelatex >/dev/null 2>&1; then echo 1; else echo 0; fi)
+HAVE_PDFLATEX := $(shell if which $(PDFLATEX) >/dev/null 2>&1; then echo 1; else echo 0; fi)
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
@@ -51,8 +53,8 @@ loop_cmd = $(echo-cmd) $(cmd_$(1))
# $5 reST source folder relative to $(srctree)/$(src),
# e.g. "media" for the linux-tv book-set at ./Documentation/media
-quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4);
- cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media all;\
+quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
+ cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media $2;\
BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
$(SPHINXBUILD) \
-b $2 \
@@ -67,16 +69,19 @@ htmldocs:
@$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,html,$(var),,$(var)))
latexdocs:
+ @$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,latex,$(var),latex,$(var)))
+
ifeq ($(HAVE_PDFLATEX),0)
- $(warning The 'xelatex' command was not found. Make sure you have it installed and in PATH to produce PDF output.)
+
+pdfdocs:
+ $(warning The '$(PDFLATEX)' command was not found. Make sure you have it installed and in PATH to produce PDF output.)
@echo " SKIP Sphinx $@ target."
+
else # HAVE_PDFLATEX
- @$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,latex,$(var),latex,$(var)))
-endif # HAVE_PDFLATEX
pdfdocs: latexdocs
-ifneq ($(HAVE_PDFLATEX),0)
- $(foreach var,$(SPHINXDIRS), $(MAKE) PDFLATEX=xelatex LATEXOPTS="-interaction=nonstopmode" -C $(BUILDDIR)/$(var)/latex)
+ $(foreach var,$(SPHINXDIRS), $(MAKE) PDFLATEX=$(PDFLATEX) LATEXOPTS="$(LATEXOPTS)" -C $(BUILDDIR)/$(var)/latex;)
+
endif # HAVE_PDFLATEX
epubdocs:
@@ -93,6 +98,7 @@ installmandocs:
cleandocs:
$(Q)rm -rf $(BUILDDIR)
+ $(Q)$(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) -C Documentation/media clean
endif # HAVE_SPHINX
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index a4d3838130e4..39bcb74ea733 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -547,7 +547,7 @@ The <tt>rcu_access_pointer()</tt> on line&nbsp;6 is similar to
It could reuse a value formerly fetched from this same pointer.
It could also fetch the pointer from <tt>gp</tt> in a byte-at-a-time
manner, resulting in <i>load tearing</i>, in turn resulting a bytewise
- mash-up of two distince pointer values.
+ mash-up of two distinct pointer values.
It might even use value-speculation optimizations, where it makes
a wrong guess, but by the time it gets around to checking the
value, an update has changed the pointer to match the wrong guess.
@@ -659,6 +659,29 @@ systems with more than one CPU:
In other words, a given instance of <tt>synchronize_rcu()</tt>
can avoid waiting on a given RCU read-side critical section only
if it can prove that <tt>synchronize_rcu()</tt> started first.
+
+ <p>
+ A related question is &ldquo;When <tt>rcu_read_lock()</tt>
+ doesn't generate any code, why does it matter how it relates
+ to a grace period?&rdquo;
+ The answer is that it is not the relationship of
+ <tt>rcu_read_lock()</tt> itself that is important, but rather
+ the relationship of the code within the enclosed RCU read-side
+ critical section to the code preceding and following the
+ grace period.
+ If we take this viewpoint, then a given RCU read-side critical
+ section begins before a given grace period when some access
+ preceding the grace period observes the effect of some access
+ within the critical section, in which case none of the accesses
+ within the critical section may observe the effects of any
+ access following the grace period.
+
+ <p>
+ As of late 2016, mathematical models of RCU take this
+ viewpoint, for example, see slides&nbsp;62 and&nbsp;63
+ of the
+ <a href="http://www2.rdrop.com/users/paulmck/scalability/paper/LinuxMM.2016.10.04c.LCE.pdf">2016 LinuxCon EU</a>
+ presentation.
</font></td></tr>
<tr><td>&nbsp;</td></tr>
</table>
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 204422719197..5cbd8b2395b8 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -237,7 +237,7 @@ rcu_dereference()
The reader uses rcu_dereference() to fetch an RCU-protected
pointer, which returns a value that may then be safely
- dereferenced. Note that rcu_deference() does not actually
+ dereferenced. Note that rcu_dereference() does not actually
dereference the pointer, instead, it protects the pointer for
later dereferencing. It also executes any needed memory-barrier
instructions for a given CPU architecture. Currently, only Alpha
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 36f1dedc944c..81455705e4a6 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -1,841 +1 @@
-.. _submittingpatches:
-
-How to Get Your Change Into the Linux Kernel or Care And Operation Of Your Linus Torvalds
-=========================================================================================
-
-For a person or company who wishes to submit a change to the Linux
-kernel, the process can sometimes be daunting if you're not familiar
-with "the system." This text is a collection of suggestions which
-can greatly increase the chances of your change being accepted.
-
-This document contains a large number of suggestions in a relatively terse
-format. For detailed information on how the kernel development process
-works, see :ref:`Documentation/development-process <development_process_main>`.
-Also, read :ref:`Documentation/SubmitChecklist <submitchecklist>`
-for a list of items to check before
-submitting code. If you are submitting a driver, also read
-:ref:`Documentation/SubmittingDrivers <submittingdrivers>`;
-for device tree binding patches, read
-Documentation/devicetree/bindings/submitting-patches.txt.
-
-Many of these steps describe the default behavior of the ``git`` version
-control system; if you use ``git`` to prepare your patches, you'll find much
-of the mechanical work done for you, though you'll still need to prepare
-and document a sensible set of patches. In general, use of ``git`` will make
-your life as a kernel developer easier.
-
-Creating and Sending your Change
-********************************
-
-
-0) Obtain a current source tree
--------------------------------
-
-If you do not have a repository with the current kernel source handy, use
-``git`` to obtain one. You'll want to start with the mainline repository,
-which can be grabbed with::
-
- git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-
-Note, however, that you may not want to develop against the mainline tree
-directly. Most subsystem maintainers run their own trees and want to see
-patches prepared against those trees. See the **T:** entry for the subsystem
-in the MAINTAINERS file to find that tree, or simply ask the maintainer if
-the tree is not listed there.
-
-It is still possible to download kernel releases via tarballs (as described
-in the next section), but that is the hard way to do kernel development.
-
-1) ``diff -up``
----------------
-
-If you must generate your patches by hand, use ``diff -up`` or ``diff -uprN``
-to create patches. Git generates patches in this form by default; if
-you're using ``git``, you can skip this section entirely.
-
-All changes to the Linux kernel occur in the form of patches, as
-generated by :manpage:`diff(1)`. When creating your patch, make sure to
-create it in "unified diff" format, as supplied by the ``-u`` argument
-to :manpage:`diff(1)`.
-Also, please use the ``-p`` argument which shows which C function each
-change is in - that makes the resultant ``diff`` a lot easier to read.
-Patches should be based in the root kernel source directory,
-not in any lower subdirectory.
-
-To create a patch for a single file, it is often sufficient to do::
-
- SRCTREE= linux
- MYFILE= drivers/net/mydriver.c
-
- cd $SRCTREE
- cp $MYFILE $MYFILE.orig
- vi $MYFILE # make your change
- cd ..
- diff -up $SRCTREE/$MYFILE{.orig,} > /tmp/patch
-
-To create a patch for multiple files, you should unpack a "vanilla",
-or unmodified kernel source tree, and generate a ``diff`` against your
-own source tree. For example::
-
- MYSRC= /devel/linux
-
- tar xvfz linux-3.19.tar.gz
- mv linux-3.19 linux-3.19-vanilla
- diff -uprN -X linux-3.19-vanilla/Documentation/dontdiff \
- linux-3.19-vanilla $MYSRC > /tmp/patch
-
-``dontdiff`` is a list of files which are generated by the kernel during
-the build process, and should be ignored in any :manpage:`diff(1)`-generated
-patch.
-
-Make sure your patch does not include any extra files which do not
-belong in a patch submission. Make sure to review your patch -after-
-generating it with :manpage:`diff(1)`, to ensure accuracy.
-
-If your changes produce a lot of deltas, you need to split them into
-individual patches which modify things in logical stages; see
-:ref:`split_changes`. This will facilitate review by other kernel developers,
-very important if you want your patch accepted.
-
-If you're using ``git``, ``git rebase -i`` can help you with this process. If
-you're not using ``git``, ``quilt`` <http://savannah.nongnu.org/projects/quilt>
-is another popular alternative.
-
-.. _describe_changes:
-
-2) Describe your changes
-------------------------
-
-Describe your problem. Whether your patch is a one-line bug fix or
-5000 lines of a new feature, there must be an underlying problem that
-motivated you to do this work. Convince the reviewer that there is a
-problem worth fixing and that it makes sense for them to read past the
-first paragraph.
-
-Describe user-visible impact. Straight up crashes and lockups are
-pretty convincing, but not all bugs are that blatant. Even if the
-problem was spotted during code review, describe the impact you think
-it can have on users. Keep in mind that the majority of Linux
-installations run kernels from secondary stable trees or
-vendor/product-specific trees that cherry-pick only specific patches
-from upstream, so include anything that could help route your change
-downstream: provoking circumstances, excerpts from dmesg, crash
-descriptions, performance regressions, latency spikes, lockups, etc.
-
-Quantify optimizations and trade-offs. If you claim improvements in
-performance, memory consumption, stack footprint, or binary size,
-include numbers that back them up. But also describe non-obvious
-costs. Optimizations usually aren't free but trade-offs between CPU,
-memory, and readability; or, when it comes to heuristics, between
-different workloads. Describe the expected downsides of your
-optimization so that the reviewer can weigh costs against benefits.
-
-Once the problem is established, describe what you are actually doing
-about it in technical detail. It's important to describe the change
-in plain English for the reviewer to verify that the code is behaving
-as you intend it to.
-
-The maintainer will thank you if you write your patch description in a
-form which can be easily pulled into Linux's source code management
-system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
-
-Solve only one problem per patch. If your description starts to get
-long, that's a sign that you probably need to split up your patch.
-See :ref:`split_changes`.
-
-When you submit or resubmit a patch or patch series, include the
-complete patch description and justification for it. Don't just
-say that this is version N of the patch (series). Don't expect the
-subsystem maintainer to refer back to earlier patch versions or referenced
-URLs to find the patch description and put that into the patch.
-I.e., the patch (series) and its description should be self-contained.
-This benefits both the maintainers and reviewers. Some reviewers
-probably didn't even receive earlier versions of the patch.
-
-Describe your changes in imperative mood, e.g. "make xyzzy do frotz"
-instead of "[This patch] makes xyzzy do frotz" or "[I] changed xyzzy
-to do frotz", as if you are giving orders to the codebase to change
-its behaviour.
-
-If the patch fixes a logged bug entry, refer to that bug entry by
-number and URL. If the patch follows from a mailing list discussion,
-give a URL to the mailing list archive; use the https://lkml.kernel.org/
-redirector with a ``Message-Id``, to ensure that the links cannot become
-stale.
-
-However, try to make your explanation understandable without external
-resources. In addition to giving a URL to a mailing list archive or
-bug, summarize the relevant points of the discussion that led to the
-patch as submitted.
-
-If you want to refer to a specific commit, don't just refer to the
-SHA-1 ID of the commit. Please also include the oneline summary of
-the commit, to make it easier for reviewers to know what it is about.
-Example::
-
- Commit e21d2170f36602ae2708 ("video: remove unnecessary
- platform_set_drvdata()") removed the unnecessary
- platform_set_drvdata(), but left the variable "dev" unused,
- delete it.
-
-You should also be sure to use at least the first twelve characters of the
-SHA-1 ID. The kernel repository holds a *lot* of objects, making
-collisions with shorter IDs a real possibility. Bear in mind that, even if
-there is no collision with your six-character ID now, that condition may
-change five years from now.
-
-If your patch fixes a bug in a specific commit, e.g. you found an issue using
-``git bisect``, please use the 'Fixes:' tag with the first 12 characters of
-the SHA-1 ID, and the one line summary. For example::
-
- Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
-
-The following ``git config`` settings can be used to add a pretty format for
-outputting the above style in the ``git log`` or ``git show`` commands::
-
- [core]
- abbrev = 12
- [pretty]
- fixes = Fixes: %h (\"%s\")
-
-.. _split_changes:
-
-3) Separate your changes
-------------------------
-
-Separate each **logical change** into a separate patch.
-
-For example, if your changes include both bug fixes and performance
-enhancements for a single driver, separate those changes into two
-or more patches. If your changes include an API update, and a new
-driver which uses that new API, separate those into two patches.
-
-On the other hand, if you make a single change to numerous files,
-group those changes into a single patch. Thus a single logical change
-is contained within a single patch.
-
-The point to remember is that each patch should make an easily understood
-change that can be verified by reviewers. Each patch should be justifiable
-on its own merits.
-
-If one patch depends on another patch in order for a change to be
-complete, that is OK. Simply note **"this patch depends on patch X"**
-in your patch description.
-
-When dividing your change into a series of patches, take special care to
-ensure that the kernel builds and runs properly after each patch in the
-series. Developers using ``git bisect`` to track down a problem can end up
-splitting your patch series at any point; they will not thank you if you
-introduce bugs in the middle.
-
-If you cannot condense your patch set into a smaller set of patches,
-then only post say 15 or so at a time and wait for review and integration.
-
-
-
-4) Style-check your changes
----------------------------
-
-Check your patch for basic style violations, details of which can be
-found in
-:ref:`Documentation/CodingStyle <codingstyle>`.
-Failure to do so simply wastes
-the reviewers time and will get your patch rejected, probably
-without even being read.
-
-One significant exception is when moving code from one file to
-another -- in this case you should not modify the moved code at all in
-the same patch which moves it. This clearly delineates the act of
-moving the code and your changes. This greatly aids review of the
-actual differences and allows tools to better track the history of
-the code itself.
-
-Check your patches with the patch style checker prior to submission
-(scripts/checkpatch.pl). Note, though, that the style checker should be
-viewed as a guide, not as a replacement for human judgment. If your code
-looks better with a violation then its probably best left alone.
-
-The checker reports at three levels:
- - ERROR: things that are very likely to be wrong
- - WARNING: things requiring careful review
- - CHECK: things requiring thought
-
-You should be able to justify all violations that remain in your
-patch.
-
-
-5) Select the recipients for your patch
----------------------------------------
-
-You should always copy the appropriate subsystem maintainer(s) on any patch
-to code that they maintain; look through the MAINTAINERS file and the
-source code revision history to see who those maintainers are. The
-script scripts/get_maintainer.pl can be very useful at this step. If you
-cannot find a maintainer for the subsystem you are working on, Andrew
-Morton (akpm@linux-foundation.org) serves as a maintainer of last resort.
-
-You should also normally choose at least one mailing list to receive a copy
-of your patch set. linux-kernel@vger.kernel.org functions as a list of
-last resort, but the volume on that list has caused a number of developers
-to tune it out. Look in the MAINTAINERS file for a subsystem-specific
-list; your patch will probably get more attention there. Please do not
-spam unrelated lists, though.
-
-Many kernel-related lists are hosted on vger.kernel.org; you can find a
-list of them at http://vger.kernel.org/vger-lists.html. There are
-kernel-related lists hosted elsewhere as well, though.
-
-Do not send more than 15 patches at once to the vger mailing lists!!!
-
-Linus Torvalds is the final arbiter of all changes accepted into the
-Linux kernel. His e-mail address is <torvalds@linux-foundation.org>.
-He gets a lot of e-mail, and, at this point, very few patches go through
-Linus directly, so typically you should do your best to -avoid-
-sending him e-mail.
-
-If you have a patch that fixes an exploitable security bug, send that patch
-to security@kernel.org. For severe bugs, a short embargo may be considered
-to allow distributors to get the patch out to users; in such cases,
-obviously, the patch should not be sent to any public lists.
-
-Patches that fix a severe bug in a released kernel should be directed
-toward the stable maintainers by putting a line like this::
-
- Cc: stable@vger.kernel.org
-
-into the sign-off area of your patch (note, NOT an email recipient). You
-should also read
-:ref:`Documentation/stable_kernel_rules.txt <stable_kernel_rules>`
-in addition to this file.
-
-Note, however, that some subsystem maintainers want to come to their own
-conclusions on which patches should go to the stable trees. The networking
-maintainer, in particular, would rather not see individual developers
-adding lines like the above to their patches.
-
-If changes affect userland-kernel interfaces, please send the MAN-PAGES
-maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
-least a notification of the change, so that some information makes its way
-into the manual pages. User-space API changes should also be copied to
-linux-api@vger.kernel.org.
-
-For small patches you may want to CC the Trivial Patch Monkey
-trivial@kernel.org which collects "trivial" patches. Have a look
-into the MAINTAINERS file for its current manager.
-
-Trivial patches must qualify for one of the following rules:
-
-- Spelling fixes in documentation
-- Spelling fixes for errors which could break :manpage:`grep(1)`
-- Warning fixes (cluttering with useless warnings is bad)
-- Compilation fixes (only if they are actually correct)
-- Runtime fixes (only if they actually fix things)
-- Removing use of deprecated functions/macros
-- Contact detail and documentation fixes
-- Non-portable code replaced by portable code (even in arch-specific,
- since people copy, as long as it's trivial)
-- Any fix by the author/maintainer of the file (ie. patch monkey
- in re-transmission mode)
-
-
-
-6) No MIME, no links, no compression, no attachments. Just plain text
-----------------------------------------------------------------------
-
-Linus and other kernel developers need to be able to read and comment
-on the changes you are submitting. It is important for a kernel
-developer to be able to "quote" your changes, using standard e-mail
-tools, so that they may comment on specific portions of your code.
-
-For this reason, all patches should be submitted by e-mail "inline".
-
-.. warning::
-
- Be wary of your editor's word-wrap corrupting your patch,
- if you choose to cut-n-paste your patch.
-
-Do not attach the patch as a MIME attachment, compressed or not.
-Many popular e-mail applications will not always transmit a MIME
-attachment as plain text, making it impossible to comment on your
-code. A MIME attachment also takes Linus a bit more time to process,
-decreasing the likelihood of your MIME-attached change being accepted.
-
-Exception: If your mailer is mangling patches then someone may ask
-you to re-send them using MIME.
-
-See :ref:`Documentation/email-clients.txt <email_clients>`
-for hints about configuring your e-mail client so that it sends your patches
-untouched.
-
-7) E-mail size
---------------
-
-Large changes are not appropriate for mailing lists, and some
-maintainers. If your patch, uncompressed, exceeds 300 kB in size,
-it is preferred that you store your patch on an Internet-accessible
-server, and provide instead a URL (link) pointing to your patch. But note
-that if your patch exceeds 300 kB, it almost certainly needs to be broken up
-anyway.
-
-8) Respond to review comments
------------------------------
-
-Your patch will almost certainly get comments from reviewers on ways in
-which the patch can be improved. You must respond to those comments;
-ignoring reviewers is a good way to get ignored in return. Review comments
-or questions that do not lead to a code change should almost certainly
-bring about a comment or changelog entry so that the next reviewer better
-understands what is going on.
-
-Be sure to tell the reviewers what changes you are making and to thank them
-for their time. Code review is a tiring and time-consuming process, and
-reviewers sometimes get grumpy. Even in that case, though, respond
-politely and address the problems they have pointed out.
-
-
-9) Don't get discouraged - or impatient
----------------------------------------
-
-After you have submitted your change, be patient and wait. Reviewers are
-busy people and may not get to your patch right away.
-
-Once upon a time, patches used to disappear into the void without comment,
-but the development process works more smoothly than that now. You should
-receive comments within a week or so; if that does not happen, make sure
-that you have sent your patches to the right place. Wait for a minimum of
-one week before resubmitting or pinging reviewers - possibly longer during
-busy times like merge windows.
-
-
-10) Include PATCH in the subject
---------------------------------
-
-Due to high e-mail traffic to Linus, and to linux-kernel, it is common
-convention to prefix your subject line with [PATCH]. This lets Linus
-and other kernel developers more easily distinguish patches from other
-e-mail discussions.
-
-
-
-11) Sign your work
-------------------
-
-To improve tracking of who did what, especially with patches that can
-percolate to their final resting place in the kernel through several
-layers of maintainers, we've introduced a "sign-off" procedure on
-patches that are being emailed around.
-
-The sign-off is a simple line at the end of the explanation for the
-patch, which certifies that you wrote it or otherwise have the right to
-pass it on as an open-source patch. The rules are pretty simple: if you
-can certify the below:
-
-Developer's Certificate of Origin 1.1
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-By making a contribution to this project, I certify that:
-
- (a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
- (b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
- (c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
- (d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-
-then you just add a line saying::
-
- Signed-off-by: Random J Developer <random@developer.example.org>
-
-using your real name (sorry, no pseudonyms or anonymous contributions.)
-
-Some people also put extra tags at the end. They'll just be ignored for
-now, but you can do this to mark internal company procedures or just
-point out some special detail about the sign-off.
-
-If you are a subsystem or branch maintainer, sometimes you need to slightly
-modify patches you receive in order to merge them, because the code is not
-exactly the same in your tree and the submitters'. If you stick strictly to
-rule (c), you should ask the submitter to rediff, but this is a totally
-counter-productive waste of time and energy. Rule (b) allows you to adjust
-the code, but then it is very impolite to change one submitter's code and
-make him endorse your bugs. To solve this problem, it is recommended that
-you add a line between the last Signed-off-by header and yours, indicating
-the nature of your changes. While there is nothing mandatory about this, it
-seems like prepending the description with your mail and/or name, all
-enclosed in square brackets, is noticeable enough to make it obvious that
-you are responsible for last-minute changes. Example::
-
- Signed-off-by: Random J Developer <random@developer.example.org>
- [lucky@maintainer.example.org: struct foo moved from foo.c to foo.h]
- Signed-off-by: Lucky K Maintainer <lucky@maintainer.example.org>
-
-This practice is particularly helpful if you maintain a stable branch and
-want at the same time to credit the author, track changes, merge the fix,
-and protect the submitter from complaints. Note that under no circumstances
-can you change the author's identity (the From header), as it is the one
-which appears in the changelog.
-
-Special note to back-porters: It seems to be a common and useful practice
-to insert an indication of the origin of a patch at the top of the commit
-message (just after the subject line) to facilitate tracking. For instance,
-here's what we see in a 3.x-stable release::
-
- Date: Tue Oct 7 07:26:38 2014 -0400
-
- libata: Un-break ATA blacklist
-
- commit 1c40279960bcd7d52dbdf1d466b20d24b99176c8 upstream.
-
-And here's what might appear in an older kernel once a patch is backported::
-
- Date: Tue May 13 22:12:27 2008 +0200
-
- wireless, airo: waitbusy() won't delay
-
- [backport of 2.6 commit b7acbdfbd1f277c1eb23f344f899cfa4cd0bf36a]
-
-Whatever the format, this information provides a valuable help to people
-tracking your trees, and to people trying to troubleshoot bugs in your
-tree.
-
-
-12) When to use Acked-by: and Cc:
----------------------------------
-
-The Signed-off-by: tag indicates that the signer was involved in the
-development of the patch, or that he/she was in the patch's delivery path.
-
-If a person was not directly involved in the preparation or handling of a
-patch but wishes to signify and record their approval of it then they can
-ask to have an Acked-by: line added to the patch's changelog.
-
-Acked-by: is often used by the maintainer of the affected code when that
-maintainer neither contributed to nor forwarded the patch.
-
-Acked-by: is not as formal as Signed-off-by:. It is a record that the acker
-has at least reviewed the patch and has indicated acceptance. Hence patch
-mergers will sometimes manually convert an acker's "yep, looks good to me"
-into an Acked-by: (but note that it is usually better to ask for an
-explicit ack).
-
-Acked-by: does not necessarily indicate acknowledgement of the entire patch.
-For example, if a patch affects multiple subsystems and has an Acked-by: from
-one subsystem maintainer then this usually indicates acknowledgement of just
-the part which affects that maintainer's code. Judgement should be used here.
-When in doubt people should refer to the original discussion in the mailing
-list archives.
-
-If a person has had the opportunity to comment on a patch, but has not
-provided such comments, you may optionally add a ``Cc:`` tag to the patch.
-This is the only tag which might be added without an explicit action by the
-person it names - but it should indicate that this person was copied on the
-patch. This tag documents that potentially interested parties
-have been included in the discussion.
-
-
-13) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
---------------------------------------------------------------------------
-
-The Reported-by tag gives credit to people who find bugs and report them and it
-hopefully inspires them to help us again in the future. Please note that if
-the bug was reported in private, then ask for permission first before using the
-Reported-by tag.
-
-A Tested-by: tag indicates that the patch has been successfully tested (in
-some environment) by the person named. This tag informs maintainers that
-some testing has been performed, provides a means to locate testers for
-future patches, and ensures credit for the testers.
-
-Reviewed-by:, instead, indicates that the patch has been reviewed and found
-acceptable according to the Reviewer's Statement:
-
-Reviewer's statement of oversight
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-By offering my Reviewed-by: tag, I state that:
-
- (a) I have carried out a technical review of this patch to
- evaluate its appropriateness and readiness for inclusion into
- the mainline kernel.
-
- (b) Any problems, concerns, or questions relating to the patch
- have been communicated back to the submitter. I am satisfied
- with the submitter's response to my comments.
-
- (c) While there may be things that could be improved with this
- submission, I believe that it is, at this time, (1) a
- worthwhile modification to the kernel, and (2) free of known
- issues which would argue against its inclusion.
-
- (d) While I have reviewed the patch and believe it to be sound, I
- do not (unless explicitly stated elsewhere) make any
- warranties or guarantees that it will achieve its stated
- purpose or function properly in any given situation.
-
-A Reviewed-by tag is a statement of opinion that the patch is an
-appropriate modification of the kernel without any remaining serious
-technical issues. Any interested reviewer (who has done the work) can
-offer a Reviewed-by tag for a patch. This tag serves to give credit to
-reviewers and to inform maintainers of the degree of review which has been
-done on the patch. Reviewed-by: tags, when supplied by reviewers known to
-understand the subject area and to perform thorough reviews, will normally
-increase the likelihood of your patch getting into the kernel.
-
-A Suggested-by: tag indicates that the patch idea is suggested by the person
-named and ensures credit to the person for the idea. Please note that this
-tag should not be added without the reporter's permission, especially if the
-idea was not posted in a public forum. That said, if we diligently credit our
-idea reporters, they will, hopefully, be inspired to help us again in the
-future.
-
-A Fixes: tag indicates that the patch fixes an issue in a previous commit. It
-is used to make it easy to determine where a bug originated, which can help
-review a bug fix. This tag also assists the stable kernel team in determining
-which stable kernel versions should receive your fix. This is the preferred
-method for indicating a bug fixed by the patch. See :ref:`describe_changes`
-for more details.
-
-
-14) The canonical patch format
-------------------------------
-
-This section describes how the patch itself should be formatted. Note
-that, if you have your patches stored in a ``git`` repository, proper patch
-formatting can be had with ``git format-patch``. The tools cannot create
-the necessary text, though, so read the instructions below anyway.
-
-The canonical patch subject line is::
-
- Subject: [PATCH 001/123] subsystem: summary phrase
-
-The canonical patch message body contains the following:
-
- - A ``from`` line specifying the patch author (only needed if the person
- sending the patch is not the author).
-
- - An empty line.
-
- - The body of the explanation, line wrapped at 75 columns, which will
- be copied to the permanent changelog to describe this patch.
-
- - The ``Signed-off-by:`` lines, described above, which will
- also go in the changelog.
-
- - A marker line containing simply ``---``.
-
- - Any additional comments not suitable for the changelog.
-
- - The actual patch (``diff`` output).
-
-The Subject line format makes it very easy to sort the emails
-alphabetically by subject line - pretty much any email reader will
-support that - since because the sequence number is zero-padded,
-the numerical and alphabetic sort is the same.
-
-The ``subsystem`` in the email's Subject should identify which
-area or subsystem of the kernel is being patched.
-
-The ``summary phrase`` in the email's Subject should concisely
-describe the patch which that email contains. The ``summary
-phrase`` should not be a filename. Do not use the same ``summary
-phrase`` for every patch in a whole patch series (where a ``patch
-series`` is an ordered sequence of multiple, related patches).
-
-Bear in mind that the ``summary phrase`` of your email becomes a
-globally-unique identifier for that patch. It propagates all the way
-into the ``git`` changelog. The ``summary phrase`` may later be used in
-developer discussions which refer to the patch. People will want to
-google for the ``summary phrase`` to read discussion regarding that
-patch. It will also be the only thing that people may quickly see
-when, two or three months later, they are going through perhaps
-thousands of patches using tools such as ``gitk`` or ``git log
---oneline``.
-
-For these reasons, the ``summary`` must be no more than 70-75
-characters, and it must describe both what the patch changes, as well
-as why the patch might be necessary. It is challenging to be both
-succinct and descriptive, but that is what a well-written summary
-should do.
-
-The ``summary phrase`` may be prefixed by tags enclosed in square
-brackets: "Subject: [PATCH <tag>...] <summary phrase>". The tags are
-not considered part of the summary phrase, but describe how the patch
-should be treated. Common tags might include a version descriptor if
-the multiple versions of the patch have been sent out in response to
-comments (i.e., "v1, v2, v3"), or "RFC" to indicate a request for
-comments. If there are four patches in a patch series the individual
-patches may be numbered like this: 1/4, 2/4, 3/4, 4/4. This assures
-that developers understand the order in which the patches should be
-applied and that they have reviewed or applied all of the patches in
-the patch series.
-
-A couple of example Subjects::
-
- Subject: [PATCH 2/5] ext2: improve scalability of bitmap searching
- Subject: [PATCH v2 01/27] x86: fix eflags tracking
-
-The ``from`` line must be the very first line in the message body,
-and has the form:
-
- From: Original Author <author@example.com>
-
-The ``from`` line specifies who will be credited as the author of the
-patch in the permanent changelog. If the ``from`` line is missing,
-then the ``From:`` line from the email header will be used to determine
-the patch author in the changelog.
-
-The explanation body will be committed to the permanent source
-changelog, so should make sense to a competent reader who has long
-since forgotten the immediate details of the discussion that might
-have led to this patch. Including symptoms of the failure which the
-patch addresses (kernel log messages, oops messages, etc.) is
-especially useful for people who might be searching the commit logs
-looking for the applicable patch. If a patch fixes a compile failure,
-it may not be necessary to include _all_ of the compile failures; just
-enough that it is likely that someone searching for the patch can find
-it. As in the ``summary phrase``, it is important to be both succinct as
-well as descriptive.
-
-The ``---`` marker line serves the essential purpose of marking for patch
-handling tools where the changelog message ends.
-
-One good use for the additional comments after the ``---`` marker is for
-a ``diffstat``, to show what files have changed, and the number of
-inserted and deleted lines per file. A ``diffstat`` is especially useful
-on bigger patches. Other comments relevant only to the moment or the
-maintainer, not suitable for the permanent changelog, should also go
-here. A good example of such comments might be ``patch changelogs``
-which describe what has changed between the v1 and v2 version of the
-patch.
-
-If you are going to include a ``diffstat`` after the ``---`` marker, please
-use ``diffstat`` options ``-p 1 -w 70`` so that filenames are listed from
-the top of the kernel source tree and don't use too much horizontal
-space (easily fit in 80 columns, maybe with some indentation). (``git``
-generates appropriate diffstats by default.)
-
-See more details on the proper patch format in the following
-references.
-
-.. _explicit_in_reply_to:
-
-15) Explicit In-Reply-To headers
---------------------------------
-
-It can be helpful to manually add In-Reply-To: headers to a patch
-(e.g., when using ``git send-email``) to associate the patch with
-previous relevant discussion, e.g. to link a bug fix to the email with
-the bug report. However, for a multi-patch series, it is generally
-best to avoid using In-Reply-To: to link to older versions of the
-series. This way multiple versions of the patch don't become an
-unmanageable forest of references in email clients. If a link is
-helpful, you can use the https://lkml.kernel.org/ redirector (e.g., in
-the cover email text) to link to an earlier version of the patch series.
-
-
-16) Sending ``git pull`` requests
----------------------------------
-
-If you have a series of patches, it may be most convenient to have the
-maintainer pull them directly into the subsystem repository with a
-``git pull`` operation. Note, however, that pulling patches from a developer
-requires a higher degree of trust than taking patches from a mailing list.
-As a result, many subsystem maintainers are reluctant to take pull
-requests, especially from new, unknown developers. If in doubt you can use
-the pull request as the cover letter for a normal posting of the patch
-series, giving the maintainer the option of using either.
-
-A pull request should have [GIT] or [PULL] in the subject line. The
-request itself should include the repository name and the branch of
-interest on a single line; it should look something like::
-
- Please pull from
-
- git://jdelvare.pck.nerim.net/jdelvare-2.6 i2c-for-linus
-
- to get these changes:
-
-A pull request should also include an overall message saying what will be
-included in the request, a ``git shortlog`` listing of the patches
-themselves, and a ``diffstat`` showing the overall effect of the patch series.
-The easiest way to get all this information together is, of course, to let
-``git`` do it for you with the ``git request-pull`` command.
-
-Some maintainers (including Linus) want to see pull requests from signed
-commits; that increases their confidence that the request actually came
-from you. Linus, in particular, will not pull from public hosting sites
-like GitHub in the absence of a signed tag.
-
-The first step toward creating such tags is to make a GNUPG key and get it
-signed by one or more core kernel developers. This step can be hard for
-new developers, but there is no way around it. Attending conferences can
-be a good way to find developers who can sign your key.
-
-Once you have prepared a patch series in ``git`` that you wish to have somebody
-pull, create a signed tag with ``git tag -s``. This will create a new tag
-identifying the last commit in the series and containing a signature
-created with your private key. You will also have the opportunity to add a
-changelog-style message to the tag; this is an ideal place to describe the
-effects of the pull request as a whole.
-
-If the tree the maintainer will be pulling from is not the repository you
-are working from, don't forget to push the signed tag explicitly to the
-public tree.
-
-When generating your pull request, use the signed tag as the target. A
-command like this will do the trick::
-
- git request-pull master git://my.public.tree/linux.git my-signed-tag
-
-
-REFERENCES
-**********
-
-Andrew Morton, "The perfect patch" (tpp).
- <http://www.ozlabs.org/~akpm/stuff/tpp.txt>
-
-Jeff Garzik, "Linux kernel patch submission format".
- <http://linux.yyz.us/patch-format.html>
-
-Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
- <http://www.kroah.com/log/linux/maintainer.html>
-
- <http://www.kroah.com/log/linux/maintainer-02.html>
-
- <http://www.kroah.com/log/linux/maintainer-03.html>
-
- <http://www.kroah.com/log/linux/maintainer-04.html>
-
- <http://www.kroah.com/log/linux/maintainer-05.html>
-
- <http://www.kroah.com/log/linux/maintainer-06.html>
-
-NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
- <https://lkml.org/lkml/2005/7/11/336>
-
-Kernel Documentation/CodingStyle:
- :ref:`Documentation/CodingStyle <codingstyle>`
-
-Linus Torvalds's mail on the canonical patch format:
- <http://lkml.org/lkml/2005/4/7/183>
-
-Andi Kleen, "On submitting kernel patches"
- Some strategies to get difficult or controversial changes in.
-
- http://halobates.de/on-submitting-patches.pdf
-
+This file has moved to process/submitting-patches.rst
diff --git a/Documentation/VGA-softcursor.txt b/Documentation/VGA-softcursor.txt
deleted file mode 100644
index 70acfbf399eb..000000000000
--- a/Documentation/VGA-softcursor.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Software cursor for VGA by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz>
-======================= and Martin Mares <mj@atrey.karlin.mff.cuni.cz>
-
- Linux now has some ability to manipulate cursor appearance. Normally, you
-can set the size of hardware cursor (and also work around some ugly bugs in
-those miserable Trident cards--see #define TRIDENT_GLITCH in drivers/video/
-vgacon.c). You can now play a few new tricks: you can make your cursor look
-like a non-blinking red block, make it inverse background of the character it's
-over or to highlight that character and still choose whether the original
-hardware cursor should remain visible or not. There may be other things I have
-never thought of.
-
- The cursor appearance is controlled by a "<ESC>[?1;2;3c" escape sequence
-where 1, 2 and 3 are parameters described below. If you omit any of them,
-they will default to zeroes.
-
- Parameter 1 specifies cursor size (0=default, 1=invisible, 2=underline, ...,
-8=full block) + 16 if you want the software cursor to be applied + 32 if you
-want to always change the background color + 64 if you dislike having the
-background the same as the foreground. Highlights are ignored for the last two
-flags.
-
- The second parameter selects character attribute bits you want to change
-(by simply XORing them with the value of this parameter). On standard VGA,
-the high four bits specify background and the low four the foreground. In both
-groups, low three bits set color (as in normal color codes used by the console)
-and the most significant one turns on highlight (or sometimes blinking--it
-depends on the configuration of your VGA).
-
- The third parameter consists of character attribute bits you want to set.
-Bit setting takes place before bit toggling, so you can simply clear a bit by
-including it in both the set mask and the toggle mask.
-
-Examples:
-=========
-
-To get normal blinking underline, use: echo -e '\033[?2c'
-To get blinking block, use: echo -e '\033[?6c'
-To get red non-blinking block, use: echo -e '\033[?17;0;64c'
diff --git a/Documentation/acpi/DSD-properties-rules.txt b/Documentation/acpi/DSD-properties-rules.txt
new file mode 100644
index 000000000000..3e4862bdad98
--- /dev/null
+++ b/Documentation/acpi/DSD-properties-rules.txt
@@ -0,0 +1,97 @@
+_DSD Device Properties Usage Rules
+----------------------------------
+
+Properties, Property Sets and Property Subsets
+----------------------------------------------
+
+The _DSD (Device Specific Data) configuration object, introduced in ACPI 5.1,
+allows any type of device configuration data to be provided via the ACPI
+namespace. In principle, the format of the data may be arbitrary, but it has to
+be identified by a UUID which must be recognized by the driver processing the
+_DSD output. However, there are generic UUIDs defined for _DSD recognized by
+the ACPI subsystem in the Linux kernel which automatically processes the data
+packages associated with them and makes those data available to device drivers
+as "device properties".
+
+A device property is a data item consisting of a string key and a value (of a
+specific type) associated with it.
+
+In the ACPI _DSD context it is an element of the sub-package following the
+generic Device Properties UUID in the _DSD return package as specified in the
+Device Properties UUID definition document [1].
+
+It also may be regarded as the definition of a key and the associated data type
+that can be returned by _DSD in the Device Properties UUID sub-package for a
+given device.
+
+A property set is a collection of properties applicable to a hardware entity
+like a device. In the ACPI _DSD context it is the set of all properties that
+can be returned in the Device Properties UUID sub-package for the device in
+question.
+
+Property subsets are nested collections of properties. Each of them is
+associated with an additional key (name) allowing the subset to be referred
+to as a whole (and to be treated as a separate entity). The canonical
+representation of property subsets is via the mechanism specified in the
+Hierarchical Properties Extension UUID definition document [2].
+
+Property sets may be hierarchical. That is, a property set may contain
+multiple property subsets that each may contain property subsets of its
+own and so on.
+
+General Validity Rule for Property Sets
+---------------------------------------
+
+Valid property sets must follow the guidance given by the Device Properties UUID
+definition document [1].
+
+_DSD properties are intended to be used in addition to, and not instead of, the
+existing mechanisms defined by the ACPI specification. Therefore, as a rule,
+they should only be used if the ACPI specification does not make direct
+provisions for handling the underlying use case. It generally is invalid to
+return property sets which do not follow that rule from _DSD in data packages
+associated with the Device Properties UUID.
+
+Additional Considerations
+-------------------------
+
+There are cases in which, even if the general rule given above is followed in
+principle, the property set may still not be regarded as a valid one.
+
+For example, that applies to device properties which may cause kernel code
+(either a device driver or a library/subsystem) to access hardware in a way
+possibly leading to a conflict with AML methods in the ACPI namespace. In
+particular, that may happen if the kernel code uses device properties to
+manipulate hardware normally controlled by ACPI methods related to power
+management, like _PSx and _DSW (for device objects) or _ON and _OFF (for power
+resource objects), or by ACPI device disabling/enabling methods, like _DIS and
+_SRS.
+
+In all cases in which kernel code may do something that will confuse AML as a
+result of using device properties, the device properties in question are not
+suitable for the ACPI environment and consequently they cannot belong to a valid
+property set.
+
+Property Sets and Device Tree Bindings
+--------------------------------------
+
+It often is useful to make _DSD return property sets that follow Device Tree
+bindings.
+
+In those cases, however, the above validity considerations must be taken into
+account in the first place and returning invalid property sets from _DSD must be
+avoided. For this reason, it may not be possible to make _DSD return a property
+set following the given DT binding literally and completely. Still, for the
+sake of code re-use, it may make sense to provide as much of the configuration
+data as possible in the form of device properties and complement that with an
+ACPI-specific mechanism suitable for the use case at hand.
+
+In any case, property sets following DT bindings literally should not be
+expected to automatically work in the ACPI environment regardless of their
+contents.
+
+References
+----------
+
+[1] http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
+[2] http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index a91ec5af52df..209a5eba6b87 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -415,3 +415,12 @@ the "compatible" property in the _DSD or a _CID as long as one of their
ancestors provides a _DSD with a valid "compatible" property. Such device
objects are then simply regarded as additional "blocks" providing hierarchical
configuration information to the driver of the composite ancestor device.
+
+However, PRP0001 can only be returned from either _HID or _CID of a device
+object if all of the properties returned by the _DSD associated with it (either
+the _DSD of the device object itself or the _DSD of its ancestor in the
+"composite device" case described above) can be used in the ACPI environment.
+Otherwise, the _DSD itself is regarded as invalid and therefore the "compatible"
+property returned by it is meaningless.
+
+Refer to DSD-properties-rules.txt for more information.
diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/acpi/gpio-properties.txt
index 5aafe0b351a1..2aff0349facd 100644
--- a/Documentation/acpi/gpio-properties.txt
+++ b/Documentation/acpi/gpio-properties.txt
@@ -51,6 +51,68 @@ it to 1 marks the GPIO as active low.
In our Bluetooth example the "reset-gpios" refers to the second GpioIo()
resource, second pin in that resource with the GPIO number of 31.
+It is possible to leave holes in the array of GPIOs. This is useful in
+cases like with SPI host controllers where some chip selects may be
+implemented as GPIOs and some as native signals. For example a SPI host
+controller can have chip selects 0 and 2 implemented as GPIOs and 1 as
+native:
+
+ Package () {
+ "cs-gpios",
+ Package () {
+ ^GPIO, 19, 0, 0, // chip select 0: GPIO
+ 0, // chip select 1: native signal
+ ^GPIO, 20, 0, 0, // chip select 2: GPIO
+ }
+ }
+
+Other supported properties
+--------------------------
+
+Following Device Tree compatible device properties are also supported by
+_DSD device properties for GPIO controllers:
+
+- gpio-hog
+- output-high
+- output-low
+- input
+- line-name
+
+Example:
+
+ Name (_DSD, Package () {
+ // _DSD Hierarchical Properties Extension UUID
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () {"hog-gpio8", "G8PU"}
+ }
+ })
+
+ Name (G8PU, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () {"gpio-hog", 1},
+ Package () {"gpios", Package () {8, 0}},
+ Package () {"output-high", 1},
+ Package () {"line-name", "gpio8-pullup"},
+ }
+ })
+
+- gpio-line-names
+
+Example:
+
+ Package () {
+ "gpio-line-names",
+ Package () {
+ "SPI0_CS_N", "EXP2_INT", "MUX6_IO", "UART0_RXD", "MUX7_IO",
+ "LVL_C_A1", "MUX0_IO", "SPI1_MISO"
+ }
+ }
+
+See Documentation/devicetree/bindings/gpio/gpio.txt for more information
+about these properties.
+
ACPI GPIO Mappings Provided by Drivers
--------------------------------------
diff --git a/Documentation/acpi/osi.txt b/Documentation/acpi/osi.txt
new file mode 100644
index 000000000000..50cde0ceb9b0
--- /dev/null
+++ b/Documentation/acpi/osi.txt
@@ -0,0 +1,187 @@
+ACPI _OSI and _REV methods
+--------------------------
+
+An ACPI BIOS can use the "Operating System Interfaces" method (_OSI)
+to find out what the operating system supports. Eg. If BIOS
+AML code includes _OSI("XYZ"), the kernel's AML interpreter
+can evaluate that method, look to see if it supports 'XYZ'
+and answer YES or NO to the BIOS.
+
+The ACPI _REV method returns the "Revision of the ACPI specification
+that OSPM supports"
+
+This document explains how and why the BIOS and Linux should use these methods.
+It also explains how and why they are widely misused.
+
+How to use _OSI
+---------------
+
+Linux runs on two groups of machines -- those that are tested by the OEM
+to be compatible with Linux, and those that were never tested with Linux,
+but where Linux was installed to replace the original OS (Windows or OSX).
+
+The larger group is the systems tested to run only Windows. Not only that,
+but many were tested to run with just one specific version of Windows.
+So even though the BIOS may use _OSI to query what version of Windows is running,
+only a single path through the BIOS has actually been tested.
+Experience shows that taking untested paths through the BIOS
+exposes Linux to an entire category of BIOS bugs.
+For this reason, Linux _OSI defaults must continue to claim compatibility
+with all versions of Windows.
+
+But Linux isn't actually compatible with Windows, and the Linux community
+has also been hurt with regressions when Linux adds the latest version of
+Windows to its list of _OSI strings. So it is possible that additional strings
+will be more thoroughly vetted before shipping upstream in the future.
+But it is likely that they will all eventually be added.
+
+What should an OEM do if they want to support Linux and Windows
+using the same BIOS image? Often they need to do something different
+for Linux to deal with how Linux is different from Windows.
+Here the BIOS should ask exactly what it wants to know:
+
+_OSI("Linux-OEM-my_interface_name")
+where 'OEM' is needed if this is an OEM-specific hook,
+and 'my_interface_name' describes the hook, which could be a
+quirk, a bug, or a bug-fix.
+
+In addition, the OEM should send a patch to upstream Linux
+via the linux-acpi@vger.kernel.org mailing list. When that patch
+is checked into Linux, the OS will answer "YES" when the BIOS
+on the OEM's system uses _OSI to ask if the interface is supported
+by the OS. Linux distributors can back-port that patch for Linux
+pre-installs, and it will be included by all distributions that
+re-base to upstream. If the distribution can not update the kernel binary,
+they can also add an acpi_osi=Linux-OEM-my_interface_name
+cmdline parameter to the boot loader, as needed.
+
+If the string refers to a feature where the upstream kernel
+eventually grows support, a patch should be sent to remove
+the string when that support is added to the kernel.
+
+That was easy. Read on, to find out how to do it wrong.
+
+Before _OSI, there was _OS
+--------------------------
+
+ACPI 1.0 specified "_OS" as an
+"object that evaluates to a string that identifies the operating system."
+
+The ACPI BIOS flow would include an evaluation of _OS, and the AML
+interpreter in the kernel would return to it a string identifying the OS:
+
+Windows 98, SE: "Microsoft Windows"
+Windows ME: "Microsoft WindowsME:Millenium Edition"
+Windows NT: "Microsoft Windows NT"
+
+The idea was on a platform tasked with running multiple OS's,
+the BIOS could use _OS to enable devices that an OS
+might support, or enable quirks or bug workarounds
+necessary to make the platform compatible with that pre-existing OS.
+
+But _OS had fundamental problems. First, the BIOS needed to know the name
+of every possible version of the OS that would run on it, and needed to know
+all the quirks of those OS's. Certainly it would make more sense
+for the BIOS to ask *specific* things of the OS, such
+"do you support a specific interface", and thus in ACPI 3.0,
+_OSI was born to replace _OS.
+
+_OS was abandoned, though even today, many BIOS look for
+_OS "Microsoft Windows NT", though it seems somewhat far-fetched
+that anybody would install those old operating systems
+over what came with the machine.
+
+Linux answers "Microsoft Windows NT" to please that BIOS idiom.
+That is the *only* viable strategy, as that is what modern Windows does,
+and so doing otherwise could steer the BIOS down an untested path.
+
+_OSI is born, and immediately misused
+--------------------------------------
+
+With _OSI, the *BIOS* provides the string describing an interface,
+and asks the OS: "YES/NO, are you compatible with this interface?"
+
+eg. _OSI("3.0 Thermal Model") would return TRUE if the OS knows how
+to deal with the thermal extensions made to the ACPI 3.0 specification.
+An old OS that doesn't know about those extensions would answer FALSE,
+and a new OS may be able to return TRUE.
+
+For an OS-specific interface, the ACPI spec said that the BIOS and the OS
+were to agree on a string of the form such as "Windows-interface_name".
+
+But two bad things happened. First, the Windows ecosystem used _OSI
+not as designed, but as a direct replacement for _OS -- identifying
+the OS version, rather than an OS supported interface. Indeed, right
+from the start, the ACPI 3.0 spec itself codified this misuse
+in example code using _OSI("Windows 2001").
+
+This misuse was adopted and continues today.
+
+Linux had no choice but to also return TRUE to _OSI("Windows 2001")
+and its successors. To do otherwise would virtually guarantee breaking
+a BIOS that has been tested only with that _OSI returning TRUE.
+
+This strategy is problematic, as Linux is never completely compatible with
+the latest version of Windows, and sometimes it takes more than a year
+to iron out incompatibilities.
+
+Not to be out-done, the Linux community made things worse by returning TRUE
+to _OSI("Linux"). Doing so is even worse than the Windows misuse
+of _OSI, as "Linux" does not even contain any version information.
+_OSI("Linux") led to some BIOS' malfunctioning due to BIOS writer's
+using it in untested BIOS flows. But some OEM's used _OSI("Linux")
+in tested flows to support real Linux features. In 2009, Linux
+removed _OSI("Linux"), and added a cmdline parameter to restore it
+for legacy systems still needed it. Further a BIOS_BUG warning prints
+for all BIOS's that invoke it.
+
+No BIOS should use _OSI("Linux").
+
+The result is a strategy for Linux to maximize compatibility with
+ACPI BIOS that are tested on Windows machines. There is a real risk
+of over-stating that compatibility; but the alternative has often been
+catastrophic failure resulting from the BIOS taking paths that
+were never validated under *any* OS.
+
+Do not use _REV
+---------------
+
+Since _OSI("Linux") went away, some BIOS writers used _REV
+to support Linux and Windows differences in the same BIOS.
+
+_REV was defined in ACPI 1.0 to return the version of ACPI
+supported by the OS and the OS AML interpreter.
+
+Modern Windows returns _REV = 2. Linux used ACPI_CA_SUPPORT_LEVEL,
+which would increment, based on the version of the spec supported.
+
+Unfortunately, _REV was also misused. eg. some BIOS would check
+for _REV = 3, and do something for Linux, but when Linux returned
+_REV = 4, that support broke.
+
+In response to this problem, Linux returns _REV = 2 always,
+from mid-2015 onward. The ACPI specification will also be updated
+to reflect that _REV is deprecated, and always returns 2.
+
+Apple Mac and _OSI("Darwin")
+----------------------------
+
+On Apple's Mac platforms, the ACPI BIOS invokes _OSI("Darwin")
+to determine if the machine is running Apple OSX.
+
+Like Linux's _OSI("*Windows*") strategy, Linux defaults to
+answering YES to _OSI("Darwin") to enable full access
+to the hardware and validated BIOS paths seen by OSX.
+Just like on Windows-tested platforms, this strategy has risks.
+
+Starting in Linux-3.18, the kernel answered YES to _OSI("Darwin")
+for the purpose of enabling Mac Thunderbolt support. Further,
+if the kernel noticed _OSI("Darwin") being invoked, it additionally
+disabled all _OSI("*Windows*") to keep poorly written Mac BIOS
+from going down untested combinations of paths.
+
+The Linux-3.18 change in default caused power regressions on Mac
+laptops, and the 3.18 implementation did not allow changing
+the default via cmdline "acpi_osi=!Darwin". Linux-4.7 fixed
+the ability to use acpi_osi=!Darwin as a workaround, and
+we hope to see Mac Thunderbolt power management support in Linux-4.11.
diff --git a/Documentation/acpi/video_extension.txt b/Documentation/acpi/video_extension.txt
index 78b32ac02466..79bf6a4921be 100644
--- a/Documentation/acpi/video_extension.txt
+++ b/Documentation/acpi/video_extension.txt
@@ -101,6 +101,6 @@ received a notification, it will set the backlight level accordingly. This does
not affect the sending of event to user space, they are always sent to user
space regardless of whether or not the video module controls the backlight level
directly. This behaviour can be controlled through the brightness_switch_enabled
-module parameter as documented in kernel-parameters.txt. It is recommended to
+module parameter as documented in admin-guide/kernel-parameters.rst. It is recommended to
disable this behaviour once a GUI environment starts up and wants to have full
control of the backlight level.
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
new file mode 100644
index 000000000000..1b6dfb2b3adb
--- /dev/null
+++ b/Documentation/admin-guide/README.rst
@@ -0,0 +1,411 @@
+Linux kernel release 4.x <http://kernel.org/>
+=============================================
+
+These are the release notes for Linux version 4. Read them carefully,
+as they tell you what this is all about, explain how to install the
+kernel, and what to do if something goes wrong.
+
+What is Linux?
+--------------
+
+ Linux is a clone of the operating system Unix, written from scratch by
+ Linus Torvalds with assistance from a loosely-knit team of hackers across
+ the Net. It aims towards POSIX and Single UNIX Specification compliance.
+
+ It has all the features you would expect in a modern fully-fledged Unix,
+ including true multitasking, virtual memory, shared libraries, demand
+ loading, shared copy-on-write executables, proper memory management,
+ and multistack networking including IPv4 and IPv6.
+
+ It is distributed under the GNU General Public License - see the
+ accompanying COPYING file for more details.
+
+On what hardware does it run?
+-----------------------------
+
+ Although originally developed first for 32-bit x86-based PCs (386 or higher),
+ today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and
+ UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell,
+ IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS,
+ Xtensa, Tilera TILE, AVR32, ARC and Renesas M32R architectures.
+
+ Linux is easily portable to most general-purpose 32- or 64-bit architectures
+ as long as they have a paged memory management unit (PMMU) and a port of the
+ GNU C compiler (gcc) (part of The GNU Compiler Collection, GCC). Linux has
+ also been ported to a number of architectures without a PMMU, although
+ functionality is then obviously somewhat limited.
+ Linux has also been ported to itself. You can now run the kernel as a
+ userspace application - this is called UserMode Linux (UML).
+
+Documentation
+-------------
+
+ - There is a lot of documentation available both in electronic form on
+ the Internet and in books, both Linux-specific and pertaining to
+ general UNIX questions. I'd recommend looking into the documentation
+ subdirectories on any Linux FTP site for the LDP (Linux Documentation
+ Project) books. This README is not meant to be documentation on the
+ system: there are much better sources available.
+
+ - There are various README files in the Documentation/ subdirectory:
+ these typically contain kernel-specific installation notes for some
+ drivers for example. See Documentation/00-INDEX for a list of what
+ is contained in each file. Please read the
+ :ref:`Documentation/process/changes.rst <changes>` file, as it
+ contains information about the problems, which may result by upgrading
+ your kernel.
+
+ - The Documentation/DocBook/ subdirectory contains several guides for
+ kernel developers and users. These guides can be rendered in a
+ number of formats: PostScript (.ps), PDF, HTML, & man-pages, among others.
+ After installation, ``make psdocs``, ``make pdfdocs``, ``make htmldocs``,
+ or ``make mandocs`` will render the documentation in the requested format.
+
+Installing the kernel source
+----------------------------
+
+ - If you install the full sources, put the kernel tarball in a
+ directory where you have permissions (e.g. your home directory) and
+ unpack it::
+
+ xz -cd linux-4.X.tar.xz | tar xvf -
+
+ Replace "X" with the version number of the latest kernel.
+
+ Do NOT use the /usr/src/linux area! This area has a (usually
+ incomplete) set of kernel headers that are used by the library header
+ files. They should match the library, and not get messed up by
+ whatever the kernel-du-jour happens to be.
+
+ - You can also upgrade between 4.x releases by patching. Patches are
+ distributed in the xz format. To install by patching, get all the
+ newer patch files, enter the top level directory of the kernel source
+ (linux-4.X) and execute::
+
+ xz -cd ../patch-4.x.xz | patch -p1
+
+ Replace "x" for all versions bigger than the version "X" of your current
+ source tree, **in_order**, and you should be ok. You may want to remove
+ the backup files (some-file-name~ or some-file-name.orig), and make sure
+ that there are no failed patches (some-file-name# or some-file-name.rej).
+ If there are, either you or I have made a mistake.
+
+ Unlike patches for the 4.x kernels, patches for the 4.x.y kernels
+ (also known as the -stable kernels) are not incremental but instead apply
+ directly to the base 4.x kernel. For example, if your base kernel is 4.0
+ and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1
+ and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and
+ want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is,
+ patch -R) **before** applying the 4.0.3 patch. You can read more on this in
+ :ref:`Documentation/process/applying-patches.rst <applying_patches>`.
+
+ Alternatively, the script patch-kernel can be used to automate this
+ process. It determines the current kernel version and applies any
+ patches found::
+
+ linux/scripts/patch-kernel linux
+
+ The first argument in the command above is the location of the
+ kernel source. Patches are applied from the current directory, but
+ an alternative directory can be specified as the second argument.
+
+ - Make sure you have no stale .o files and dependencies lying around::
+
+ cd linux
+ make mrproper
+
+ You should now have the sources correctly installed.
+
+Software requirements
+---------------------
+
+ Compiling and running the 4.x kernels requires up-to-date
+ versions of various software packages. Consult
+ :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers
+ required and how to get updates for these packages. Beware that using
+ excessively old versions of these packages can cause indirect
+ errors that are very difficult to track down, so don't assume that
+ you can just update packages when obvious problems arise during
+ build or operation.
+
+Build directory for the kernel
+------------------------------
+
+ When compiling the kernel, all output files will per default be
+ stored together with the kernel source code.
+ Using the option ``make O=output/dir`` allows you to specify an alternate
+ place for the output files (including .config).
+ Example::
+
+ kernel source code: /usr/src/linux-4.X
+ build directory: /home/name/build/kernel
+
+ To configure and build the kernel, use::
+
+ cd /usr/src/linux-4.X
+ make O=/home/name/build/kernel menuconfig
+ make O=/home/name/build/kernel
+ sudo make O=/home/name/build/kernel modules_install install
+
+ Please note: If the ``O=output/dir`` option is used, then it must be
+ used for all invocations of make.
+
+Configuring the kernel
+----------------------
+
+ Do not skip this step even if you are only upgrading one minor
+ version. New configuration options are added in each release, and
+ odd problems will turn up if the configuration files are not set up
+ as expected. If you want to carry your existing configuration to a
+ new version with minimal work, use ``make oldconfig``, which will
+ only ask you for the answers to new questions.
+
+ - Alternative configuration commands are::
+
+ "make config" Plain text interface.
+
+ "make menuconfig" Text based color menus, radiolists & dialogs.
+
+ "make nconfig" Enhanced text based color menus.
+
+ "make xconfig" Qt based configuration tool.
+
+ "make gconfig" GTK+ based configuration tool.
+
+ "make oldconfig" Default all questions based on the contents of
+ your existing ./.config file and asking about
+ new config symbols.
+
+ "make silentoldconfig"
+ Like above, but avoids cluttering the screen
+ with questions already answered.
+ Additionally updates the dependencies.
+
+ "make olddefconfig"
+ Like above, but sets new symbols to their default
+ values without prompting.
+
+ "make defconfig" Create a ./.config file by using the default
+ symbol values from either arch/$ARCH/defconfig
+ or arch/$ARCH/configs/${PLATFORM}_defconfig,
+ depending on the architecture.
+
+ "make ${PLATFORM}_defconfig"
+ Create a ./.config file by using the default
+ symbol values from
+ arch/$ARCH/configs/${PLATFORM}_defconfig.
+ Use "make help" to get a list of all available
+ platforms of your architecture.
+
+ "make allyesconfig"
+ Create a ./.config file by setting symbol
+ values to 'y' as much as possible.
+
+ "make allmodconfig"
+ Create a ./.config file by setting symbol
+ values to 'm' as much as possible.
+
+ "make allnoconfig" Create a ./.config file by setting symbol
+ values to 'n' as much as possible.
+
+ "make randconfig" Create a ./.config file by setting symbol
+ values to random values.
+
+ "make localmodconfig" Create a config based on current config and
+ loaded modules (lsmod). Disables any module
+ option that is not needed for the loaded modules.
+
+ To create a localmodconfig for another machine,
+ store the lsmod of that machine into a file
+ and pass it in as a LSMOD parameter.
+
+ target$ lsmod > /tmp/mylsmod
+ target$ scp /tmp/mylsmod host:/tmp
+
+ host$ make LSMOD=/tmp/mylsmod localmodconfig
+
+ The above also works when cross compiling.
+
+ "make localyesconfig" Similar to localmodconfig, except it will convert
+ all module options to built in (=y) options.
+
+ You can find more information on using the Linux kernel config tools
+ in Documentation/kbuild/kconfig.txt.
+
+ - NOTES on ``make config``:
+
+ - Having unnecessary drivers will make the kernel bigger, and can
+ under some circumstances lead to problems: probing for a
+ nonexistent controller card may confuse your other controllers
+
+ - A kernel with math-emulation compiled in will still use the
+ coprocessor if one is present: the math emulation will just
+ never get used in that case. The kernel will be slightly larger,
+ but will work on different machines regardless of whether they
+ have a math coprocessor or not.
+
+ - The "kernel hacking" configuration details usually result in a
+ bigger or slower kernel (or both), and can even make the kernel
+ less stable by configuring some routines to actively try to
+ break bad code to find kernel problems (kmalloc()). Thus you
+ should probably answer 'n' to the questions for "development",
+ "experimental", or "debugging" features.
+
+Compiling the kernel
+--------------------
+
+ - Make sure you have at least gcc 3.2 available.
+ For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
+
+ Please note that you can still run a.out user programs with this kernel.
+
+ - Do a ``make`` to create a compressed kernel image. It is also
+ possible to do ``make install`` if you have lilo installed to suit the
+ kernel makefiles, but you may want to check your particular lilo setup first.
+
+ To do the actual install, you have to be root, but none of the normal
+ build should require that. Don't take the name of root in vain.
+
+ - If you configured any of the parts of the kernel as ``modules``, you
+ will also have to do ``make modules_install``.
+
+ - Verbose kernel compile/build output:
+
+ Normally, the kernel build system runs in a fairly quiet mode (but not
+ totally silent). However, sometimes you or other kernel developers need
+ to see compile, link, or other commands exactly as they are executed.
+ For this, use "verbose" build mode. This is done by passing
+ ``V=1`` to the ``make`` command, e.g.::
+
+ make V=1 all
+
+ To have the build system also tell the reason for the rebuild of each
+ target, use ``V=2``. The default is ``V=0``.
+
+ - Keep a backup kernel handy in case something goes wrong. This is
+ especially true for the development releases, since each new release
+ contains new code which has not been debugged. Make sure you keep a
+ backup of the modules corresponding to that kernel, as well. If you
+ are installing a new kernel with the same version number as your
+ working kernel, make a backup of your modules directory before you
+ do a ``make modules_install``.
+
+ Alternatively, before compiling, use the kernel config option
+ "LOCALVERSION" to append a unique suffix to the regular kernel version.
+ LOCALVERSION can be set in the "General Setup" menu.
+
+ - In order to boot your new kernel, you'll need to copy the kernel
+ image (e.g. .../linux/arch/x86/boot/bzImage after compilation)
+ to the place where your regular bootable kernel is found.
+
+ - Booting a kernel directly from a floppy without the assistance of a
+ bootloader such as LILO, is no longer supported.
+
+ If you boot Linux from the hard drive, chances are you use LILO, which
+ uses the kernel image as specified in the file /etc/lilo.conf. The
+ kernel image file is usually /vmlinuz, /boot/vmlinuz, /bzImage or
+ /boot/bzImage. To use the new kernel, save a copy of the old image
+ and copy the new image over the old one. Then, you MUST RERUN LILO
+ to update the loading map! If you don't, you won't be able to boot
+ the new kernel image.
+
+ Reinstalling LILO is usually a matter of running /sbin/lilo.
+ You may wish to edit /etc/lilo.conf to specify an entry for your
+ old kernel image (say, /vmlinux.old) in case the new one does not
+ work. See the LILO docs for more information.
+
+ After reinstalling LILO, you should be all set. Shutdown the system,
+ reboot, and enjoy!
+
+ If you ever need to change the default root device, video mode,
+ ramdisk size, etc. in the kernel image, use the ``rdev`` program (or
+ alternatively the LILO boot options when appropriate). No need to
+ recompile the kernel to change these parameters.
+
+ - Reboot with the new kernel and enjoy.
+
+If something goes wrong
+-----------------------
+
+ - If you have problems that seem to be due to kernel bugs, please check
+ the file MAINTAINERS to see if there is a particular person associated
+ with the part of the kernel that you are having trouble with. If there
+ isn't anyone listed there, then the second best thing is to mail
+ them to me (torvalds@linux-foundation.org), and possibly to any other
+ relevant mailing-list or to the newsgroup.
+
+ - In all bug-reports, *please* tell what kernel you are talking about,
+ how to duplicate the problem, and what your setup is (use your common
+ sense). If the problem is new, tell me so, and if the problem is
+ old, please try to tell me when you first noticed it.
+
+ - If the bug results in a message like::
+
+ unable to handle kernel paging request at address C0000010
+ Oops: 0002
+ EIP: 0010:XXXXXXXX
+ eax: xxxxxxxx ebx: xxxxxxxx ecx: xxxxxxxx edx: xxxxxxxx
+ esi: xxxxxxxx edi: xxxxxxxx ebp: xxxxxxxx
+ ds: xxxx es: xxxx fs: xxxx gs: xxxx
+ Pid: xx, process nr: xx
+ xx xx xx xx xx xx xx xx xx xx
+
+ or similar kernel debugging information on your screen or in your
+ system log, please duplicate it *exactly*. The dump may look
+ incomprehensible to you, but it does contain information that may
+ help debugging the problem. The text above the dump is also
+ important: it tells something about why the kernel dumped code (in
+ the above example, it's due to a bad kernel pointer). More information
+ on making sense of the dump is in Documentation/admin-guide/oops-tracing.rst
+
+ - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump
+ as is, otherwise you will have to use the ``ksymoops`` program to make
+ sense of the dump (but compiling with CONFIG_KALLSYMS is usually preferred).
+ This utility can be downloaded from
+ ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops/ .
+ Alternatively, you can do the dump lookup by hand:
+
+ - In debugging dumps like the above, it helps enormously if you can
+ look up what the EIP value means. The hex value as such doesn't help
+ me or anybody else very much: it will depend on your particular
+ kernel setup. What you should do is take the hex value from the EIP
+ line (ignore the ``0010:``), and look it up in the kernel namelist to
+ see which kernel function contains the offending address.
+
+ To find out the kernel function name, you'll need to find the system
+ binary associated with the kernel that exhibited the symptom. This is
+ the file 'linux/vmlinux'. To extract the namelist and match it against
+ the EIP from the kernel crash, do::
+
+ nm vmlinux | sort | less
+
+ This will give you a list of kernel addresses sorted in ascending
+ order, from which it is simple to find the function that contains the
+ offending address. Note that the address given by the kernel
+ debugging messages will not necessarily match exactly with the
+ function addresses (in fact, that is very unlikely), so you can't
+ just 'grep' the list: the list will, however, give you the starting
+ point of each kernel function, so by looking for the function that
+ has a starting address lower than the one you are searching for but
+ is followed by a function with a higher address you will find the one
+ you want. In fact, it may be a good idea to include a bit of
+ "context" in your problem report, giving a few lines around the
+ interesting one.
+
+ If you for some reason cannot do the above (you have a pre-compiled
+ kernel image or similar), telling me as much about your setup as
+ possible will help. Please read the :ref:`admin-guide/reporting-bugs.rst <reportingbugs>`
+ document for details.
+
+ - Alternatively, you can use gdb on a running kernel. (read-only; i.e. you
+ cannot change values or set break points.) To do this, first compile the
+ kernel with -g; edit arch/x86/Makefile appropriately, then do a ``make
+ clean``. You'll also need to enable CONFIG_PROC_FS (via ``make config``).
+
+ After you've rebooted with the new kernel, do ``gdb vmlinux /proc/kcore``.
+ You can now use all the usual gdb commands. The command to look up the
+ point where your system crashed is ``l *0xXXXXXXXX``. (Replace the XXXes
+ with the EIP value.)
+
+ gdb'ing a non-running kernel currently fails because ``gdb`` (wrongly)
+ disregards the starting offset for which the kernel is compiled.
diff --git a/Documentation/admin-guide/binfmt-misc.rst b/Documentation/admin-guide/binfmt-misc.rst
new file mode 100644
index 000000000000..97b0d7927078
--- /dev/null
+++ b/Documentation/admin-guide/binfmt-misc.rst
@@ -0,0 +1,151 @@
+Kernel Support for miscellaneous (your favourite) Binary Formats v1.1
+=====================================================================
+
+This Kernel feature allows you to invoke almost (for restrictions see below)
+every program by simply typing its name in the shell.
+This includes for example compiled Java(TM), Python or Emacs programs.
+
+To achieve this you must tell binfmt_misc which interpreter has to be invoked
+with which binary. Binfmt_misc recognises the binary-type by matching some bytes
+at the beginning of the file with a magic byte sequence (masking out specified
+bits) you have supplied. Binfmt_misc can also recognise a filename extension
+aka ``.com`` or ``.exe``.
+
+First you must mount binfmt_misc::
+
+ mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc
+
+To actually register a new binary type, you have to set up a string looking like
+``:name:type:offset:magic:mask:interpreter:flags`` (where you can choose the
+``:`` upon your needs) and echo it to ``/proc/sys/fs/binfmt_misc/register``.
+
+Here is what the fields mean:
+
+- ``name``
+ is an identifier string. A new /proc file will be created with this
+ ``name below /proc/sys/fs/binfmt_misc``; cannot contain slashes ``/`` for
+ obvious reasons.
+- ``type``
+ is the type of recognition. Give ``M`` for magic and ``E`` for extension.
+- ``offset``
+ is the offset of the magic/mask in the file, counted in bytes. This
+ defaults to 0 if you omit it (i.e. you write ``:name:type::magic...``).
+ Ignored when using filename extension matching.
+- ``magic``
+ is the byte sequence binfmt_misc is matching for. The magic string
+ may contain hex-encoded characters like ``\x0a`` or ``\xA4``. Note that you
+ must escape any NUL bytes; parsing halts at the first one. In a shell
+ environment you might have to write ``\\x0a`` to prevent the shell from
+ eating your ``\``.
+ If you chose filename extension matching, this is the extension to be
+ recognised (without the ``.``, the ``\x0a`` specials are not allowed).
+ Extension matching is case sensitive, and slashes ``/`` are not allowed!
+- ``mask``
+ is an (optional, defaults to all 0xff) mask. You can mask out some
+ bits from matching by supplying a string like magic and as long as magic.
+ The mask is anded with the byte sequence of the file. Note that you must
+ escape any NUL bytes; parsing halts at the first one. Ignored when using
+ filename extension matching.
+- ``interpreter``
+ is the program that should be invoked with the binary as first
+ argument (specify the full path)
+- ``flags``
+ is an optional field that controls several aspects of the invocation
+ of the interpreter. It is a string of capital letters, each controls a
+ certain aspect. The following flags are supported:
+
+ ``P`` - preserve-argv[0]
+ Legacy behavior of binfmt_misc is to overwrite
+ the original argv[0] with the full path to the binary. When this
+ flag is included, binfmt_misc will add an argument to the argument
+ vector for this purpose, thus preserving the original ``argv[0]``.
+ e.g. If your interp is set to ``/bin/foo`` and you run ``blah``
+ (which is in ``/usr/local/bin``), then the kernel will execute
+ ``/bin/foo`` with ``argv[]`` set to ``["/bin/foo", "/usr/local/bin/blah", "blah"]``. The interp has to be aware of this so it can
+ execute ``/usr/local/bin/blah``
+ with ``argv[]`` set to ``["blah"]``.
+ ``O`` - open-binary
+ Legacy behavior of binfmt_misc is to pass the full path
+ of the binary to the interpreter as an argument. When this flag is
+ included, binfmt_misc will open the file for reading and pass its
+ descriptor as an argument, instead of the full path, thus allowing
+ the interpreter to execute non-readable binaries. This feature
+ should be used with care - the interpreter has to be trusted not to
+ emit the contents of the non-readable binary.
+ ``C`` - credentials
+ Currently, the behavior of binfmt_misc is to calculate
+ the credentials and security token of the new process according to
+ the interpreter. When this flag is included, these attributes are
+ calculated according to the binary. It also implies the ``O`` flag.
+ This feature should be used with care as the interpreter
+ will run with root permissions when a setuid binary owned by root
+ is run with binfmt_misc.
+ ``F`` - fix binary
+ The usual behaviour of binfmt_misc is to spawn the
+ binary lazily when the misc format file is invoked. However,
+ this doesn``t work very well in the face of mount namespaces and
+ changeroots, so the ``F`` mode opens the binary as soon as the
+ emulation is installed and uses the opened image to spawn the
+ emulator, meaning it is always available once installed,
+ regardless of how the environment changes.
+
+
+There are some restrictions:
+
+ - the whole register string may not exceed 1920 characters
+ - the magic must reside in the first 128 bytes of the file, i.e.
+ offset+size(magic) has to be less than 128
+ - the interpreter string may not exceed 127 characters
+
+To use binfmt_misc you have to mount it first. You can mount it with
+``mount -t binfmt_misc none /proc/sys/fs/binfmt_misc`` command, or you can add
+a line ``none /proc/sys/fs/binfmt_misc binfmt_misc defaults 0 0`` to your
+``/etc/fstab`` so it auto mounts on boot.
+
+You may want to add the binary formats in one of your ``/etc/rc`` scripts during
+boot-up. Read the manual of your init program to figure out how to do this
+right.
+
+Think about the order of adding entries! Later added entries are matched first!
+
+
+A few examples (assumed you are in ``/proc/sys/fs/binfmt_misc``):
+
+- enable support for em86 (like binfmt_em86, for Alpha AXP only)::
+
+ echo ':i386:M::\x7fELF\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfb\xff\xff:/bin/em86:' > register
+ echo ':i486:M::\x7fELF\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x06:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfb\xff\xff:/bin/em86:' > register
+
+- enable support for packed DOS applications (pre-configured dosemu hdimages)::
+
+ echo ':DEXE:M::\x0eDEX::/usr/bin/dosexec:' > register
+
+- enable support for Windows executables using wine::
+
+ echo ':DOSWin:M::MZ::/usr/local/bin/wine:' > register
+
+For java support see Documentation/admin-guide/java.rst
+
+
+You can enable/disable binfmt_misc or one binary type by echoing 0 (to disable)
+or 1 (to enable) to ``/proc/sys/fs/binfmt_misc/status`` or
+``/proc/.../the_name``.
+Catting the file tells you the current status of ``binfmt_misc/the_entry``.
+
+You can remove one entry or all entries by echoing -1 to ``/proc/.../the_name``
+or ``/proc/sys/fs/binfmt_misc/status``.
+
+
+Hints
+-----
+
+If you want to pass special arguments to your interpreter, you can
+write a wrapper script for it. See Documentation/admin-guide/java.rst for an
+example.
+
+Your interpreter should NOT look in the PATH for the filename; the kernel
+passes it the full filename (or the file descriptor) to use. Using ``$PATH`` can
+cause unexpected behaviour and can be a security hazard.
+
+
+Richard Günther <rguenth@tat.physik.uni-tuebingen.de>
diff --git a/Documentation/admin-guide/braille-console.rst b/Documentation/admin-guide/braille-console.rst
new file mode 100644
index 000000000000..18e79337dcfd
--- /dev/null
+++ b/Documentation/admin-guide/braille-console.rst
@@ -0,0 +1,38 @@
+Linux Braille Console
+=====================
+
+To get early boot messages on a braille device (before userspace screen
+readers can start), you first need to compile the support for the usual serial
+console (see :ref:`Documentation/admin-guide/serial-console.rst <serial_console>`), and
+for braille device
+(in :menuselection:`Device Drivers --> Accessibility support --> Console on braille device`).
+
+Then you need to specify a ``console=brl``, option on the kernel command line, the
+format is::
+
+ console=brl,serial_options...
+
+where ``serial_options...`` are the same as described in
+:ref:`Documentation/admin-guide/serial-console.rst <serial_console>`.
+
+So for instance you can use ``console=brl,ttyS0`` if the braille device is connected to the first serial port, and ``console=brl,ttyS0,115200`` to
+override the baud rate to 115200, etc.
+
+By default, the braille device will just show the last kernel message (console
+mode). To review previous messages, press the Insert key to switch to the VT
+review mode. In review mode, the arrow keys permit to browse in the VT content,
+:kbd:`PAGE-UP`/:kbd:`PAGE-DOWN` keys go at the top/bottom of the screen, and
+the :kbd:`HOME` key goes back
+to the cursor, hence providing very basic screen reviewing facility.
+
+Sound feedback can be obtained by adding the ``braille_console.sound=1`` kernel
+parameter.
+
+For simplicity, only one braille console can be enabled, other uses of
+``console=brl,...`` will be discarded. Also note that it does not interfere with
+the console selection mechanism described in
+:ref:`Documentation/admin-guide/serial-console.rst <serial_console>`.
+
+For now, only the VisioBraille device is supported.
+
+Samuel Thibault <samuel.thibault@ens-lyon.org>
diff --git a/Documentation/admin-guide/bug-bisect.rst b/Documentation/admin-guide/bug-bisect.rst
new file mode 100644
index 000000000000..59567da344e8
--- /dev/null
+++ b/Documentation/admin-guide/bug-bisect.rst
@@ -0,0 +1,76 @@
+Bisecting a bug
++++++++++++++++
+
+Last updated: 28 October 2016
+
+Introduction
+============
+
+Always try the latest kernel from kernel.org and build from source. If you are
+not confident in doing that please report the bug to your distribution vendor
+instead of to a kernel developer.
+
+Finding bugs is not always easy. Have a go though. If you can't find it don't
+give up. Report as much as you have found to the relevant maintainer. See
+MAINTAINERS for who that is for the subsystem you have worked on.
+
+Before you submit a bug report read
+:ref:`Documentation/admin-guide/reporting-bugs.rst <reportingbugs>`.
+
+Devices not appearing
+=====================
+
+Often this is caused by udev/systemd. Check that first before blaming it
+on the kernel.
+
+Finding patch that caused a bug
+===============================
+
+Using the provided tools with ``git`` makes finding bugs easy provided the bug
+is reproducible.
+
+Steps to do it:
+
+- build the Kernel from its git source
+- start bisect with [#f1]_::
+
+ $ git bisect start
+
+- mark the broken changeset with::
+
+ $ git bisect bad [commit]
+
+- mark a changeset where the code is known to work with::
+
+ $ git bisect good [commit]
+
+- rebuild the Kernel and test
+- interact with git bisect by using either::
+
+ $ git bisect good
+
+ or::
+
+ $ git bisect bad
+
+ depending if the bug happened on the changeset you're testing
+- After some interactions, git bisect will give you the changeset that
+ likely caused the bug.
+
+- For example, if you know that the current version is bad, and version
+ 4.8 is good, you could do::
+
+ $ git bisect start
+ $ git bisect bad # Current version is bad
+ $ git bisect good v4.8
+
+
+.. [#f1] You can, optionally, provide both good and bad arguments at git
+ start with ``git bisect start [BAD] [GOOD]``
+
+For further references, please read:
+
+- The man page for ``git-bisect``
+- `Fighting regressions with git bisect <https://www.kernel.org/pub/software/scm/git/docs/git-bisect-lk2009.html>`_
+- `Fully automated bisecting with "git bisect run" <https://lwn.net/Articles/317154>`_
+- `Using Git bisect to figure out when brokenness was introduced <http://webchick.net/node/99>`_
diff --git a/Documentation/admin-guide/bug-hunting.rst b/Documentation/admin-guide/bug-hunting.rst
new file mode 100644
index 000000000000..08c4b1308189
--- /dev/null
+++ b/Documentation/admin-guide/bug-hunting.rst
@@ -0,0 +1,369 @@
+Bug hunting
+===========
+
+Kernel bug reports often come with a stack dump like the one below::
+
+ ------------[ cut here ]------------
+ WARNING: CPU: 1 PID: 28102 at kernel/module.c:1108 module_put+0x57/0x70
+ Modules linked in: dvb_usb_gp8psk(-) dvb_usb dvb_core nvidia_drm(PO) nvidia_modeset(PO) snd_hda_codec_hdmi snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm snd_timer snd soundcore nvidia(PO) [last unloaded: rc_core]
+ CPU: 1 PID: 28102 Comm: rmmod Tainted: P WC O 4.8.4-build.1 #1
+ Hardware name: MSI MS-7309/MS-7309, BIOS V1.12 02/23/2009
+ 00000000 c12ba080 00000000 00000000 c103ed6a c1616014 00000001 00006dc6
+ c1615862 00000454 c109e8a7 c109e8a7 00000009 ffffffff 00000000 f13f6a10
+ f5f5a600 c103ee33 00000009 00000000 00000000 c109e8a7 f80ca4d0 c109f617
+ Call Trace:
+ [<c12ba080>] ? dump_stack+0x44/0x64
+ [<c103ed6a>] ? __warn+0xfa/0x120
+ [<c109e8a7>] ? module_put+0x57/0x70
+ [<c109e8a7>] ? module_put+0x57/0x70
+ [<c103ee33>] ? warn_slowpath_null+0x23/0x30
+ [<c109e8a7>] ? module_put+0x57/0x70
+ [<f80ca4d0>] ? gp8psk_fe_set_frontend+0x460/0x460 [dvb_usb_gp8psk]
+ [<c109f617>] ? symbol_put_addr+0x27/0x50
+ [<f80bc9ca>] ? dvb_usb_adapter_frontend_exit+0x3a/0x70 [dvb_usb]
+ [<f80bb3bf>] ? dvb_usb_exit+0x2f/0xd0 [dvb_usb]
+ [<c13d03bc>] ? usb_disable_endpoint+0x7c/0xb0
+ [<f80bb48a>] ? dvb_usb_device_exit+0x2a/0x50 [dvb_usb]
+ [<c13d2882>] ? usb_unbind_interface+0x62/0x250
+ [<c136b514>] ? __pm_runtime_idle+0x44/0x70
+ [<c13620d8>] ? __device_release_driver+0x78/0x120
+ [<c1362907>] ? driver_detach+0x87/0x90
+ [<c1361c48>] ? bus_remove_driver+0x38/0x90
+ [<c13d1c18>] ? usb_deregister+0x58/0xb0
+ [<c109fbb0>] ? SyS_delete_module+0x130/0x1f0
+ [<c1055654>] ? task_work_run+0x64/0x80
+ [<c1000fa5>] ? exit_to_usermode_loop+0x85/0x90
+ [<c10013f0>] ? do_fast_syscall_32+0x80/0x130
+ [<c1549f43>] ? sysenter_past_esp+0x40/0x6a
+ ---[ end trace 6ebc60ef3981792f ]---
+
+Such stack traces provide enough information to identify the line inside the
+Kernel's source code where the bug happened. Depending on the severity of
+the issue, it may also contain the word **Oops**, as on this one::
+
+ BUG: unable to handle kernel NULL pointer dereference at (null)
+ IP: [<c06969d4>] iret_exc+0x7d0/0xa59
+ *pdpt = 000000002258a001 *pde = 0000000000000000
+ Oops: 0002 [#1] PREEMPT SMP
+ ...
+
+Despite being an **Oops** or some other sort of stack trace, the offended
+line is usually required to identify and handle the bug. Along this chapter,
+we'll refer to "Oops" for all kinds of stack traces that need to be analized.
+
+.. note::
+
+ ``ksymoops`` is useless on 2.6 or upper. Please use the Oops in its original
+ format (from ``dmesg``, etc). Ignore any references in this or other docs to
+ "decoding the Oops" or "running it through ksymoops".
+ If you post an Oops from 2.6+ that has been run through ``ksymoops``,
+ people will just tell you to repost it.
+
+Where is the Oops message is located?
+-------------------------------------
+
+Normally the Oops text is read from the kernel buffers by klogd and
+handed to ``syslogd`` which writes it to a syslog file, typically
+``/var/log/messages`` (depends on ``/etc/syslog.conf``). On systems with
+systemd, it may also be stored by the ``journald`` daemon, and accessed
+by running ``journalctl`` command.
+
+Sometimes ``klogd`` dies, in which case you can run ``dmesg > file`` to
+read the data from the kernel buffers and save it. Or you can
+``cat /proc/kmsg > file``, however you have to break in to stop the transfer,
+``kmsg`` is a "never ending file".
+
+If the machine has crashed so badly that you cannot enter commands or
+the disk is not available then you have three options:
+
+(1) Hand copy the text from the screen and type it in after the machine
+ has restarted. Messy but it is the only option if you have not
+ planned for a crash. Alternatively, you can take a picture of
+ the screen with a digital camera - not nice, but better than
+ nothing. If the messages scroll off the top of the console, you
+ may find that booting with a higher resolution (eg, ``vga=791``)
+ will allow you to read more of the text. (Caveat: This needs ``vesafb``,
+ so won't help for 'early' oopses)
+
+(2) Boot with a serial console (see
+ :ref:`Documentation/admin-guide/serial-console.rst <serial_console>`),
+ run a null modem to a second machine and capture the output there
+ using your favourite communication program. Minicom works well.
+
+(3) Use Kdump (see Documentation/kdump/kdump.txt),
+ extract the kernel ring buffer from old memory with using dmesg
+ gdbmacro in Documentation/kdump/gdbmacros.txt.
+
+Finding the bug's location
+--------------------------
+
+Reporting a bug works best if you point the location of the bug at the
+Kernel source file. There are two methods for doing that. Usually, using
+``gdb`` is easier, but the Kernel should be pre-compiled with debug info.
+
+gdb
+^^^
+
+The GNU debug (``gdb``) is the best way to figure out the exact file and line
+number of the OOPS from the ``vmlinux`` file.
+
+The usage of gdb works best on a kernel compiled with ``CONFIG_DEBUG_INFO``.
+This can be set by running::
+
+ $ ./scripts/config -d COMPILE_TEST -e DEBUG_KERNEL -e DEBUG_INFO
+
+On a kernel compiled with ``CONFIG_DEBUG_INFO``, you can simply copy the
+EIP value from the OOPS::
+
+ EIP: 0060:[<c021e50e>] Not tainted VLI
+
+And use GDB to translate that to human-readable form::
+
+ $ gdb vmlinux
+ (gdb) l *0xc021e50e
+
+If you don't have ``CONFIG_DEBUG_INFO`` enabled, you use the function
+offset from the OOPS::
+
+ EIP is at vt_ioctl+0xda8/0x1482
+
+And recompile the kernel with ``CONFIG_DEBUG_INFO`` enabled::
+
+ $ ./scripts/config -d COMPILE_TEST -e DEBUG_KERNEL -e DEBUG_INFO
+ $ make vmlinux
+ $ gdb vmlinux
+ (gdb) l *vt_ioctl+0xda8
+ 0x1888 is in vt_ioctl (drivers/tty/vt/vt_ioctl.c:293).
+ 288 {
+ 289 struct vc_data *vc = NULL;
+ 290 int ret = 0;
+ 291
+ 292 console_lock();
+ 293 if (VT_BUSY(vc_num))
+ 294 ret = -EBUSY;
+ 295 else if (vc_num)
+ 296 vc = vc_deallocate(vc_num);
+ 297 console_unlock();
+
+or, if you want to be more verbose::
+
+ (gdb) p vt_ioctl
+ $1 = {int (struct tty_struct *, unsigned int, unsigned long)} 0xae0 <vt_ioctl>
+ (gdb) l *0xae0+0xda8
+
+You could, instead, use the object file::
+
+ $ make drivers/tty/
+ $ gdb drivers/tty/vt/vt_ioctl.o
+ (gdb) l *vt_ioctl+0xda8
+
+If you have a call trace, such as::
+
+ Call Trace:
+ [<ffffffff8802c8e9>] :jbd:log_wait_commit+0xa3/0xf5
+ [<ffffffff810482d9>] autoremove_wake_function+0x0/0x2e
+ [<ffffffff8802770b>] :jbd:journal_stop+0x1be/0x1ee
+ ...
+
+this shows the problem likely in the :jbd: module. You can load that module
+in gdb and list the relevant code::
+
+ $ gdb fs/jbd/jbd.ko
+ (gdb) l *log_wait_commit+0xa3
+
+.. note::
+
+ You can also do the same for any function call at the stack trace,
+ like this one::
+
+ [<f80bc9ca>] ? dvb_usb_adapter_frontend_exit+0x3a/0x70 [dvb_usb]
+
+ The position where the above call happened can be seen with::
+
+ $ gdb drivers/media/usb/dvb-usb/dvb-usb.o
+ (gdb) l *dvb_usb_adapter_frontend_exit+0x3a
+
+objdump
+^^^^^^^
+
+To debug a kernel, use objdump and look for the hex offset from the crash
+output to find the valid line of code/assembler. Without debug symbols, you
+will see the assembler code for the routine shown, but if your kernel has
+debug symbols the C code will also be available. (Debug symbols can be enabled
+in the kernel hacking menu of the menu configuration.) For example::
+
+ $ objdump -r -S -l --disassemble net/dccp/ipv4.o
+
+.. note::
+
+ You need to be at the top level of the kernel tree for this to pick up
+ your C files.
+
+If you don't have access to the code you can also debug on some crash dumps
+e.g. crash dump output as shown by Dave Miller::
+
+ EIP is at +0x14/0x4c0
+ ...
+ Code: 44 24 04 e8 6f 05 00 00 e9 e8 fe ff ff 8d 76 00 8d bc 27 00 00
+ 00 00 55 57 56 53 81 ec bc 00 00 00 8b ac 24 d0 00 00 00 8b 5d 08
+ <8b> 83 3c 01 00 00 89 44 24 14 8b 45 28 85 c0 89 44 24 18 0f 85
+
+ Put the bytes into a "foo.s" file like this:
+
+ .text
+ .globl foo
+ foo:
+ .byte .... /* bytes from Code: part of OOPS dump */
+
+ Compile it with "gcc -c -o foo.o foo.s" then look at the output of
+ "objdump --disassemble foo.o".
+
+ Output:
+
+ ip_queue_xmit:
+ push %ebp
+ push %edi
+ push %esi
+ push %ebx
+ sub $0xbc, %esp
+ mov 0xd0(%esp), %ebp ! %ebp = arg0 (skb)
+ mov 0x8(%ebp), %ebx ! %ebx = skb->sk
+ mov 0x13c(%ebx), %eax ! %eax = inet_sk(sk)->opt
+
+Reporting the bug
+-----------------
+
+Once you find where the bug happened, by inspecting its location,
+you could either try to fix it yourself or report it upstream.
+
+In order to report it upstream, you should identify the mailing list
+used for the development of the affected code. This can be done by using
+the ``get_maintainer.pl`` script.
+
+For example, if you find a bug at the gspca's conex.c file, you can get
+their maintainers with::
+
+ $ ./scripts/get_maintainer.pl -f drivers/media/usb/gspca/sonixj.c
+ Hans Verkuil <hverkuil@xs4all.nl> (odd fixer:GSPCA USB WEBCAM DRIVER,commit_signer:1/1=100%)
+ Mauro Carvalho Chehab <mchehab@kernel.org> (maintainer:MEDIA INPUT INFRASTRUCTURE (V4L/DVB),commit_signer:1/1=100%)
+ Tejun Heo <tj@kernel.org> (commit_signer:1/1=100%)
+ Bhaktipriya Shridhar <bhaktipriya96@gmail.com> (commit_signer:1/1=100%,authored:1/1=100%,added_lines:4/4=100%,removed_lines:9/9=100%)
+ linux-media@vger.kernel.org (open list:GSPCA USB WEBCAM DRIVER)
+ linux-kernel@vger.kernel.org (open list)
+
+Please notice that it will point to:
+
+- The last developers that touched on the source code. On the above example,
+ Tejun and Bhaktipriya (in this specific case, none really envolved on the
+ development of this file);
+- The driver maintainer (Hans Verkuil);
+- The subsystem maintainer (Mauro Carvalho Chehab)
+- The driver and/or subsystem mailing list (linux-media@vger.kernel.org);
+- the Linux Kernel mailing list (linux-kernel@vger.kernel.org).
+
+Usually, the fastest way to have your bug fixed is to report it to mailing
+list used for the development of the code (linux-media ML) copying the driver maintainer (Hans).
+
+If you are totally stumped as to whom to send the report, and
+``get_maintainer.pl`` didn't provide you anything useful, send it to
+linux-kernel@vger.kernel.org.
+
+Thanks for your help in making Linux as stable as humanly possible.
+
+Fixing the bug
+--------------
+
+If you know programming, you could help us by not only reporting the bug,
+but also providing us with a solution. After all open source is about
+sharing what you do and don't you want to be recognised for your genius?
+
+If you decide to take this way, once you have worked out a fix please submit
+it upstream.
+
+Please do read
+ref:`Documentation/process/submitting-patches.rst <submittingpatches>` though
+to help your code get accepted.
+
+
+---------------------------------------------------------------------------
+
+Notes on Oops tracing with ``klogd``
+------------------------------------
+
+In order to help Linus and the other kernel developers there has been
+substantial support incorporated into ``klogd`` for processing protection
+faults. In order to have full support for address resolution at least
+version 1.3-pl3 of the ``sysklogd`` package should be used.
+
+When a protection fault occurs the ``klogd`` daemon automatically
+translates important addresses in the kernel log messages to their
+symbolic equivalents. This translated kernel message is then
+forwarded through whatever reporting mechanism ``klogd`` is using. The
+protection fault message can be simply cut out of the message files
+and forwarded to the kernel developers.
+
+Two types of address resolution are performed by ``klogd``. The first is
+static translation and the second is dynamic translation. Static
+translation uses the System.map file in much the same manner that
+ksymoops does. In order to do static translation the ``klogd`` daemon
+must be able to find a system map file at daemon initialization time.
+See the klogd man page for information on how ``klogd`` searches for map
+files.
+
+Dynamic address translation is important when kernel loadable modules
+are being used. Since memory for kernel modules is allocated from the
+kernel's dynamic memory pools there are no fixed locations for either
+the start of the module or for functions and symbols in the module.
+
+The kernel supports system calls which allow a program to determine
+which modules are loaded and their location in memory. Using these
+system calls the klogd daemon builds a symbol table which can be used
+to debug a protection fault which occurs in a loadable kernel module.
+
+At the very minimum klogd will provide the name of the module which
+generated the protection fault. There may be additional symbolic
+information available if the developer of the loadable module chose to
+export symbol information from the module.
+
+Since the kernel module environment can be dynamic there must be a
+mechanism for notifying the ``klogd`` daemon when a change in module
+environment occurs. There are command line options available which
+allow klogd to signal the currently executing daemon that symbol
+information should be refreshed. See the ``klogd`` manual page for more
+information.
+
+A patch is included with the sysklogd distribution which modifies the
+``modules-2.0.0`` package to automatically signal klogd whenever a module
+is loaded or unloaded. Applying this patch provides essentially
+seamless support for debugging protection faults which occur with
+kernel loadable modules.
+
+The following is an example of a protection fault in a loadable module
+processed by ``klogd``::
+
+ Aug 29 09:51:01 blizard kernel: Unable to handle kernel paging request at virtual address f15e97cc
+ Aug 29 09:51:01 blizard kernel: current->tss.cr3 = 0062d000, %cr3 = 0062d000
+ Aug 29 09:51:01 blizard kernel: *pde = 00000000
+ Aug 29 09:51:01 blizard kernel: Oops: 0002
+ Aug 29 09:51:01 blizard kernel: CPU: 0
+ Aug 29 09:51:01 blizard kernel: EIP: 0010:[oops:_oops+16/3868]
+ Aug 29 09:51:01 blizard kernel: EFLAGS: 00010212
+ Aug 29 09:51:01 blizard kernel: eax: 315e97cc ebx: 003a6f80 ecx: 001be77b edx: 00237c0c
+ Aug 29 09:51:01 blizard kernel: esi: 00000000 edi: bffffdb3 ebp: 00589f90 esp: 00589f8c
+ Aug 29 09:51:01 blizard kernel: ds: 0018 es: 0018 fs: 002b gs: 002b ss: 0018
+ Aug 29 09:51:01 blizard kernel: Process oops_test (pid: 3374, process nr: 21, stackpage=00589000)
+ Aug 29 09:51:01 blizard kernel: Stack: 315e97cc 00589f98 0100b0b4 bffffed4 0012e38e 00240c64 003a6f80 00000001
+ Aug 29 09:51:01 blizard kernel: 00000000 00237810 bfffff00 0010a7fa 00000003 00000001 00000000 bfffff00
+ Aug 29 09:51:01 blizard kernel: bffffdb3 bffffed4 ffffffda 0000002b 0007002b 0000002b 0000002b 00000036
+ Aug 29 09:51:01 blizard kernel: Call Trace: [oops:_oops_ioctl+48/80] [_sys_ioctl+254/272] [_system_call+82/128]
+ Aug 29 09:51:01 blizard kernel: Code: c7 00 05 00 00 00 eb 08 90 90 90 90 90 90 90 90 89 ec 5d c3
+
+---------------------------------------------------------------------------
+
+::
+
+ Dr. G.W. Wettstein Oncology Research Div. Computing Facility
+ Roger Maris Cancer Center INTERNET: greg@wind.rmcc.com
+ 820 4th St. N.
+ Fargo, ND 58122
+ Phone: 701-234-7556
diff --git a/Documentation/admin-guide/conf.py b/Documentation/admin-guide/conf.py
new file mode 100644
index 000000000000..86f738953799
--- /dev/null
+++ b/Documentation/admin-guide/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = 'Linux Kernel User Documentation'
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'linux-user.tex', 'Linux Kernel User Documentation',
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/admin-guide/devices.rst b/Documentation/admin-guide/devices.rst
new file mode 100644
index 000000000000..7fadc05330dd
--- /dev/null
+++ b/Documentation/admin-guide/devices.rst
@@ -0,0 +1,268 @@
+
+Linux allocated devices (4.x+ version)
+======================================
+
+This list is the Linux Device List, the official registry of allocated
+device numbers and ``/dev`` directory nodes for the Linux operating
+system.
+
+The LaTeX version of this document is no longer maintained, nor is
+the document that used to reside at lanana.org. This version in the
+mainline Linux kernel is the master document. Updates shall be sent
+as patches to the kernel maintainers (see the
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>` document).
+Specifically explore the sections titled "CHAR and MISC DRIVERS", and
+"BLOCK LAYER" in the MAINTAINERS file to find the right maintainers
+to involve for character and block devices.
+
+This document is included by reference into the Filesystem Hierarchy
+Standard (FHS). The FHS is available from http://www.pathname.com/fhs/.
+
+Allocations marked (68k/Amiga) apply to Linux/68k on the Amiga
+platform only. Allocations marked (68k/Atari) apply to Linux/68k on
+the Atari platform only.
+
+This document is in the public domain. The authors requests, however,
+that semantically altered versions are not distributed without
+permission of the authors, assuming the authors can be contacted without
+an unreasonable effort.
+
+
+.. attention::
+
+ DEVICE DRIVERS AUTHORS PLEASE READ THIS
+
+ Linux now has extensive support for dynamic allocation of device numbering
+ and can use ``sysfs`` and ``udev`` (``systemd``) to handle the naming needs.
+ There are still some exceptions in the serial and boot device area. Before
+ asking for a device number make sure you actually need one.
+
+ To have a major number allocated, or a minor number in situations
+ where that applies (e.g. busmice), please submit a patch and send to
+ the authors as indicated above.
+
+ Keep the description of the device *in the same format
+ as this list*. The reason for this is that it is the only way we have
+ found to ensure we have all the requisite information to publish your
+ device and avoid conflicts.
+
+ Finally, sometimes we have to play "namespace police." Please don't be
+ offended. We often get submissions for ``/dev`` names that would be bound
+ to cause conflicts down the road. We are trying to avoid getting in a
+ situation where we would have to suffer an incompatible forward
+ change. Therefore, please consult with us **before** you make your
+ device names and numbers in any way public, at least to the point
+ where it would be at all difficult to get them changed.
+
+ Your cooperation is appreciated.
+
+.. include:: devices.txt
+ :literal:
+
+Additional ``/dev/`` directory entries
+--------------------------------------
+
+This section details additional entries that should or may exist in
+the /dev directory. It is preferred that symbolic links use the same
+form (absolute or relative) as is indicated here. Links are
+classified as "hard" or "symbolic" depending on the preferred type of
+link; if possible, the indicated type of link should be used.
+
+Compulsory links
+++++++++++++++++
+
+These links should exist on all systems:
+
+=============== =============== =============== ===============================
+/dev/fd /proc/self/fd symbolic File descriptors
+/dev/stdin fd/0 symbolic stdin file descriptor
+/dev/stdout fd/1 symbolic stdout file descriptor
+/dev/stderr fd/2 symbolic stderr file descriptor
+/dev/nfsd socksys symbolic Required by iBCS-2
+/dev/X0R null symbolic Required by iBCS-2
+=============== =============== =============== ===============================
+
+Note: ``/dev/X0R`` is <letter X>-<digit 0>-<letter R>.
+
+Recommended links
++++++++++++++++++
+
+It is recommended that these links exist on all systems:
+
+
+=============== =============== =============== ===============================
+/dev/core /proc/kcore symbolic Backward compatibility
+/dev/ramdisk ram0 symbolic Backward compatibility
+/dev/ftape qft0 symbolic Backward compatibility
+/dev/bttv0 video0 symbolic Backward compatibility
+/dev/radio radio0 symbolic Backward compatibility
+/dev/i2o* /dev/i2o/* symbolic Backward compatibility
+/dev/scd? sr? hard Alternate SCSI CD-ROM name
+=============== =============== =============== ===============================
+
+Locally defined links
++++++++++++++++++++++
+
+The following links may be established locally to conform to the
+configuration of the system. This is merely a tabulation of existing
+practice, and does not constitute a recommendation. However, if they
+exist, they should have the following uses.
+
+=============== =============== =============== ===============================
+/dev/mouse mouse port symbolic Current mouse device
+/dev/tape tape device symbolic Current tape device
+/dev/cdrom CD-ROM device symbolic Current CD-ROM device
+/dev/cdwriter CD-writer symbolic Current CD-writer device
+/dev/scanner scanner symbolic Current scanner device
+/dev/modem modem port symbolic Current dialout device
+/dev/root root device symbolic Current root filesystem
+/dev/swap swap device symbolic Current swap device
+=============== =============== =============== ===============================
+
+``/dev/modem`` should not be used for a modem which supports dialin as
+well as dialout, as it tends to cause lock file problems. If it
+exists, ``/dev/modem`` should point to the appropriate primary TTY device
+(the use of the alternate callout devices is deprecated).
+
+For SCSI devices, ``/dev/tape`` and ``/dev/cdrom`` should point to the
+*cooked* devices (``/dev/st*`` and ``/dev/sr*``, respectively), whereas
+``/dev/cdwriter`` and /dev/scanner should point to the appropriate generic
+SCSI devices (/dev/sg*).
+
+``/dev/mouse`` may point to a primary serial TTY device, a hardware mouse
+device, or a socket for a mouse driver program (e.g. ``/dev/gpmdata``).
+
+Sockets and pipes
++++++++++++++++++
+
+Non-transient sockets and named pipes may exist in /dev. Common entries are:
+
+=============== =============== ===============================================
+/dev/printer socket lpd local socket
+/dev/log socket syslog local socket
+/dev/gpmdata socket gpm mouse multiplexer
+=============== =============== ===============================================
+
+Mount points
+++++++++++++
+
+The following names are reserved for mounting special filesystems
+under /dev. These special filesystems provide kernel interfaces that
+cannot be provided with standard device nodes.
+
+=============== =============== ===============================================
+/dev/pts devpts PTY slave filesystem
+/dev/shm tmpfs POSIX shared memory maintenance access
+=============== =============== ===============================================
+
+Terminal devices
+----------------
+
+Terminal, or TTY devices are a special class of character devices. A
+terminal device is any device that could act as a controlling terminal
+for a session; this includes virtual consoles, serial ports, and
+pseudoterminals (PTYs).
+
+All terminal devices share a common set of capabilities known as line
+disciplines; these include the common terminal line discipline as well
+as SLIP and PPP modes.
+
+All terminal devices are named similarly; this section explains the
+naming and use of the various types of TTYs. Note that the naming
+conventions include several historical warts; some of these are
+Linux-specific, some were inherited from other systems, and some
+reflect Linux outgrowing a borrowed convention.
+
+A hash mark (``#``) in a device name is used here to indicate a decimal
+number without leading zeroes.
+
+Virtual consoles and the console device
++++++++++++++++++++++++++++++++++++++++
+
+Virtual consoles are full-screen terminal displays on the system video
+monitor. Virtual consoles are named ``/dev/tty#``, with numbering
+starting at ``/dev/tty1``; ``/dev/tty0`` is the current virtual console.
+``/dev/tty0`` is the device that should be used to access the system video
+card on those architectures for which the frame buffer devices
+(``/dev/fb*``) are not applicable. Do not use ``/dev/console``
+for this purpose.
+
+The console device, ``/dev/console``, is the device to which system
+messages should be sent, and on which logins should be permitted in
+single-user mode. Starting with Linux 2.1.71, ``/dev/console`` is managed
+by the kernel; for previous versions it should be a symbolic link to
+either ``/dev/tty0``, a specific virtual console such as ``/dev/tty1``, or to
+a serial port primary (``tty*``, not ``cu*``) device, depending on the
+configuration of the system.
+
+Serial ports
+++++++++++++
+
+Serial ports are RS-232 serial ports and any device which simulates
+one, either in hardware (such as internal modems) or in software (such
+as the ISDN driver.) Under Linux, each serial ports has two device
+names, the primary or callin device and the alternate or callout one.
+Each kind of device is indicated by a different letter. For any
+letter X, the names of the devices are ``/dev/ttyX#`` and ``/dev/cux#``,
+respectively; for historical reasons, ``/dev/ttyS#`` and ``/dev/ttyC#``
+correspond to ``/dev/cua#`` and ``/dev/cub#``. In the future, it should be
+expected that multiple letters will be used; all letters will be upper
+case for the "tty" device (e.g. ``/dev/ttyDP#``) and lower case for the
+"cu" device (e.g. ``/dev/cudp#``).
+
+The names ``/dev/ttyQ#`` and ``/dev/cuq#`` are reserved for local use.
+
+The alternate devices provide for kernel-based exclusion and somewhat
+different defaults than the primary devices. Their main purpose is to
+allow the use of serial ports with programs with no inherent or broken
+support for serial ports. Their use is deprecated, and they may be
+removed from a future version of Linux.
+
+Arbitration of serial ports is provided by the use of lock files with
+the names ``/var/lock/LCK..ttyX#``. The contents of the lock file should
+be the PID of the locking process as an ASCII number.
+
+It is common practice to install links such as /dev/modem
+which point to serial ports. In order to ensure proper locking in the
+presence of these links, it is recommended that software chase
+symlinks and lock all possible names; additionally, it is recommended
+that a lock file be installed with the corresponding alternate
+device. In order to avoid deadlocks, it is recommended that the locks
+are acquired in the following order, and released in the reverse:
+
+ 1. The symbolic link name, if any (``/var/lock/LCK..modem``)
+ 2. The "tty" name (``/var/lock/LCK..ttyS2``)
+ 3. The alternate device name (``/var/lock/LCK..cua2``)
+
+In the case of nested symbolic links, the lock files should be
+installed in the order the symlinks are resolved.
+
+Under no circumstances should an application hold a lock while waiting
+for another to be released. In addition, applications which attempt
+to create lock files for the corresponding alternate device names
+should take into account the possibility of being used on a non-serial
+port TTY, for which no alternate device would exist.
+
+Pseudoterminals (PTYs)
+++++++++++++++++++++++
+
+Pseudoterminals, or PTYs, are used to create login sessions or provide
+other capabilities requiring a TTY line discipline (including SLIP or
+PPP capability) to arbitrary data-generation processes. Each PTY has
+a master side, named ``/dev/pty[p-za-e][0-9a-f]``, and a slave side, named
+``/dev/tty[p-za-e][0-9a-f]``. The kernel arbitrates the use of PTYs by
+allowing each master side to be opened only once.
+
+Once the master side has been opened, the corresponding slave device
+can be used in the same manner as any TTY device. The master and
+slave devices are connected by the kernel, generating the equivalent
+of a bidirectional pipe with TTY capabilities.
+
+Recent versions of the Linux kernels and GNU libc contain support for
+the System V/Unix98 naming scheme for PTYs, which assigns a common
+device, ``/dev/ptmx``, to all the masters (opening it will automatically
+give you a previously unassigned PTY) and a subdirectory, ``/dev/pts``,
+for the slaves; the slaves are named with decimal integers (``/dev/pts/#``
+in our notation). This removes the problem of exhausting the
+namespace and enables the kernel to automatically create the device
+nodes for the slaves on demand using the "devpts" filesystem.
diff --git a/Documentation/devices.txt b/Documentation/admin-guide/devices.txt
index 4035eca87144..c9cea2e39c21 100644
--- a/Documentation/devices.txt
+++ b/Documentation/admin-guide/devices.txt
@@ -1,63 +1,8 @@
-
- LINUX ALLOCATED DEVICES (4.x+ version)
-
-This list is the Linux Device List, the official registry of allocated
-device numbers and /dev directory nodes for the Linux operating
-system.
-
-The LaTeX version of this document is no longer maintained, nor is
-the document that used to reside at lanana.org. This version in the
-mainline Linux kernel is the master document. Updates shall be sent
-as patches to the kernel maintainers (see the SubmittingPatches document).
-Specifically explore the sections titled "CHAR and MISC DRIVERS", and
-"BLOCK LAYER" in the MAINTAINERS file to find the right maintainers
-to involve for character and block devices.
-
-This document is included by reference into the Filesystem Hierarchy
-Standard (FHS). The FHS is available from http://www.pathname.com/fhs/.
-
-Allocations marked (68k/Amiga) apply to Linux/68k on the Amiga
-platform only. Allocations marked (68k/Atari) apply to Linux/68k on
-the Atari platform only.
-
-This document is in the public domain. The authors requests, however,
-that semantically altered versions are not distributed without
-permission of the authors, assuming the authors can be contacted without
-an unreasonable effort.
-
-
- **** DEVICE DRIVERS AUTHORS PLEASE READ THIS ****
-
-Linux now has extensive support for dynamic allocation of device numbering
-and can use sysfs and udev (systemd) to handle the naming needs. There are
-still some exceptions in the serial and boot device area. Before asking
-for a device number make sure you actually need one.
-
-To have a major number allocated, or a minor number in situations
-where that applies (e.g. busmice), please submit a patch and send to
-the authors as indicated above.
-
-Keep the description of the device *in the same format
-as this list*. The reason for this is that it is the only way we have
-found to ensure we have all the requisite information to publish your
-device and avoid conflicts.
-
-Finally, sometimes we have to play "namespace police." Please don't be
-offended. We often get submissions for /dev names that would be bound
-to cause conflicts down the road. We are trying to avoid getting in a
-situation where we would have to suffer an incompatible forward
-change. Therefore, please consult with us *before* you make your
-device names and numbers in any way public, at least to the point
-where it would be at all difficult to get them changed.
-
-Your cooperation is appreciated.
-
-
- 0 Unnamed devices (e.g. non-device mounts)
+ 0 Unnamed devices (e.g. non-device mounts)
0 = reserved as null device number
See block major 144, 145, 146 for expansion areas.
- 1 char Memory devices
+ 1 char Memory devices
1 = /dev/mem Physical memory access
2 = /dev/kmem Kernel virtual memory access
3 = /dev/null Null device
@@ -72,7 +17,7 @@ Your cooperation is appreciated.
export the buffered printk records.
12 = /dev/oldmem OBSOLETE - replaced by /proc/vmcore
- 1 block RAM disk
+ 1 block RAM disk
0 = /dev/ram0 First RAM disk
1 = /dev/ram1 Second RAM disk
...
@@ -83,7 +28,7 @@ Your cooperation is appreciated.
by the boot loader; newer kernels use /dev/ram0 for
the initrd.
- 2 char Pseudo-TTY masters
+ 2 char Pseudo-TTY masters
0 = /dev/ptyp0 First PTY master
1 = /dev/ptyp1 Second PTY master
...
@@ -101,7 +46,7 @@ Your cooperation is appreciated.
master multiplex (/dev/ptmx) to acquire a PTY on
demand.
- 2 block Floppy disks
+ 2 block Floppy disks
0 = /dev/fd0 Controller 0, drive 0, autodetect
1 = /dev/fd1 Controller 0, drive 1, autodetect
2 = /dev/fd2 Controller 0, drive 2, autodetect
@@ -158,7 +103,7 @@ Your cooperation is appreciated.
and E for the 3.5" models have been deprecated, since
the drive type is insignificant for these devices.
- 3 char Pseudo-TTY slaves
+ 3 char Pseudo-TTY slaves
0 = /dev/ttyp0 First PTY slave
1 = /dev/ttyp1 Second PTY slave
...
@@ -167,7 +112,7 @@ Your cooperation is appreciated.
These are the old-style (BSD) PTY devices; Unix98
devices are on major 136 and above.
- 3 block First MFM, RLL and IDE hard disk/CD-ROM interface
+ 3 block First MFM, RLL and IDE hard disk/CD-ROM interface
0 = /dev/hda Master: whole disk (or CD-ROM)
64 = /dev/hdb Slave: whole disk (or CD-ROM)
@@ -183,7 +128,7 @@ Your cooperation is appreciated.
Other versions of Linux use partitioning schemes
appropriate to their respective architectures.
- 4 char TTY devices
+ 4 char TTY devices
0 = /dev/tty0 Current virtual console
1 = /dev/tty1 First virtual console
@@ -199,13 +144,13 @@ Your cooperation is appreciated.
number for BSD PTY devices. As of Linux 2.1.115, this
is no longer supported. Use major numbers 2 and 3.
- 4 block Aliases for dynamically allocated major devices to be used
+ 4 block Aliases for dynamically allocated major devices to be used
when its not possible to create the real device nodes
because the root filesystem is mounted read-only.
- 0 = /dev/root
+ 0 = /dev/root
- 5 char Alternate TTY devices
+ 5 char Alternate TTY devices
0 = /dev/tty Current TTY device
1 = /dev/console System console
2 = /dev/ptmx PTY master multiplex
@@ -218,7 +163,7 @@ Your cooperation is appreciated.
the section on terminal devices for more information
on /dev/console.
- 6 char Parallel printer devices
+ 6 char Parallel printer devices
0 = /dev/lp0 Parallel printer on parport0
1 = /dev/lp1 Parallel printer on parport1
...
@@ -227,7 +172,7 @@ Your cooperation is appreciated.
between parallel ports and I/O addresses. Instead,
they are redirected through the parport multiplex layer.
- 7 char Virtual console capture devices
+ 7 char Virtual console capture devices
0 = /dev/vcs Current vc text contents
1 = /dev/vcs1 tty1 text contents
...
@@ -239,7 +184,7 @@ Your cooperation is appreciated.
NOTE: These devices permit both read and write access.
- 7 block Loopback devices
+ 7 block Loopback devices
0 = /dev/loop0 First loop device
1 = /dev/loop1 Second loop device
...
@@ -248,7 +193,7 @@ Your cooperation is appreciated.
associated with block devices. The binding to the
loop devices is handled by mount(8) or losetup(8).
- 8 block SCSI disk devices (0-15)
+ 8 block SCSI disk devices (0-15)
0 = /dev/sda First SCSI disk whole disk
16 = /dev/sdb Second SCSI disk whole disk
32 = /dev/sdc Third SCSI disk whole disk
@@ -259,7 +204,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 9 char SCSI tape devices
+ 9 char SCSI tape devices
0 = /dev/st0 First SCSI tape, mode 0
1 = /dev/st1 Second SCSI tape, mode 0
...
@@ -290,7 +235,7 @@ Your cooperation is appreciated.
ioctl()'s can be used to rewind the tape regardless of
the device used to access it.
- 9 block Metadisk (RAID) devices
+ 9 block Metadisk (RAID) devices
0 = /dev/md0 First metadisk group
1 = /dev/md1 Second metadisk group
...
@@ -298,7 +243,7 @@ Your cooperation is appreciated.
The metadisk driver is used to span a
filesystem across multiple physical disks.
- 10 char Non-serial mice, misc features
+ 10 char Non-serial mice, misc features
0 = /dev/logibm Logitech bus mouse
1 = /dev/psaux PS/2-style mouse port
2 = /dev/inportbm Microsoft Inport bus mouse
@@ -428,22 +373,22 @@ Your cooperation is appreciated.
240-254 Reserved for local use
255 Reserved for MISC_DYNAMIC_MINOR
- 11 char Raw keyboard device (Linux/SPARC only)
+ 11 char Raw keyboard device (Linux/SPARC only)
0 = /dev/kbd Raw keyboard device
- 11 char Serial Mux device (Linux/PA-RISC only)
+ 11 char Serial Mux device (Linux/PA-RISC only)
0 = /dev/ttyB0 First mux port
1 = /dev/ttyB1 Second mux port
...
- 11 block SCSI CD-ROM devices
+ 11 block SCSI CD-ROM devices
0 = /dev/scd0 First SCSI CD-ROM
1 = /dev/scd1 Second SCSI CD-ROM
...
The prefix /dev/sr (instead of /dev/scd) has been deprecated.
- 12 char QIC-02 tape
+ 12 char QIC-02 tape
2 = /dev/ntpqic11 QIC-11, no rewind-on-close
3 = /dev/tpqic11 QIC-11, rewind-on-close
4 = /dev/ntpqic24 QIC-24, no rewind-on-close
@@ -456,9 +401,9 @@ Your cooperation is appreciated.
The device names specified are proposed -- if there
are "standard" names for these devices, please let me know.
- 12 block
+ 12 block
- 13 char Input core
+ 13 char Input core
0 = /dev/input/js0 First joystick
1 = /dev/input/js1 Second joystick
...
@@ -472,10 +417,10 @@ Your cooperation is appreciated.
Each device type has 5 bits (32 minors).
- 13 block Previously used for the XT disk (/dev/xdN)
+ 13 block Previously used for the XT disk (/dev/xdN)
Deleted in kernel v3.9.
- 14 char Open Sound System (OSS)
+ 14 char Open Sound System (OSS)
0 = /dev/mixer Mixer control
1 = /dev/sequencer Audio sequencer
2 = /dev/midi00 First MIDI port
@@ -493,44 +438,44 @@ Your cooperation is appreciated.
34 = /dev/midi02 Third MIDI port
50 = /dev/midi03 Fourth MIDI port
- 14 block
+ 14 block
- 15 char Joystick
+ 15 char Joystick
0 = /dev/js0 First analog joystick
1 = /dev/js1 Second analog joystick
...
128 = /dev/djs0 First digital joystick
129 = /dev/djs1 Second digital joystick
...
- 15 block Sony CDU-31A/CDU-33A CD-ROM
+ 15 block Sony CDU-31A/CDU-33A CD-ROM
0 = /dev/sonycd Sony CDU-31a CD-ROM
- 16 char Non-SCSI scanners
+ 16 char Non-SCSI scanners
0 = /dev/gs4500 Genius 4500 handheld scanner
- 16 block GoldStar CD-ROM
+ 16 block GoldStar CD-ROM
0 = /dev/gscd GoldStar CD-ROM
- 17 char OBSOLETE (was Chase serial card)
+ 17 char OBSOLETE (was Chase serial card)
0 = /dev/ttyH0 First Chase port
1 = /dev/ttyH1 Second Chase port
...
- 17 block Optics Storage CD-ROM
+ 17 block Optics Storage CD-ROM
0 = /dev/optcd Optics Storage CD-ROM
- 18 char OBSOLETE (was Chase serial card - alternate devices)
+ 18 char OBSOLETE (was Chase serial card - alternate devices)
0 = /dev/cuh0 Callout device for ttyH0
1 = /dev/cuh1 Callout device for ttyH1
...
- 18 block Sanyo CD-ROM
+ 18 block Sanyo CD-ROM
0 = /dev/sjcd Sanyo CD-ROM
- 19 char Cyclades serial card
+ 19 char Cyclades serial card
0 = /dev/ttyC0 First Cyclades port
...
31 = /dev/ttyC31 32nd Cyclades port
- 19 block "Double" compressed disk
+ 19 block "Double" compressed disk
0 = /dev/double0 First compressed disk
...
7 = /dev/double7 Eighth compressed disk
@@ -541,15 +486,15 @@ Your cooperation is appreciated.
See the Double documentation for the meaning of the
mirror devices.
- 20 char Cyclades serial card - alternate devices
+ 20 char Cyclades serial card - alternate devices
0 = /dev/cub0 Callout device for ttyC0
...
31 = /dev/cub31 Callout device for ttyC31
- 20 block Hitachi CD-ROM (under development)
+ 20 block Hitachi CD-ROM (under development)
0 = /dev/hitcd Hitachi CD-ROM
- 21 char Generic SCSI access
+ 21 char Generic SCSI access
0 = /dev/sg0 First generic SCSI device
1 = /dev/sg1 Second generic SCSI device
...
@@ -559,7 +504,7 @@ Your cooperation is appreciated.
the system and is counter to standard Linux
device-naming practice.
- 21 block Acorn MFM hard drive interface
+ 21 block Acorn MFM hard drive interface
0 = /dev/mfma First MFM drive whole disk
64 = /dev/mfmb Second MFM drive whole disk
@@ -567,25 +512,25 @@ Your cooperation is appreciated.
Partitions are handled the same way as for IDE disks
(see major number 3).
- 22 char Digiboard serial card
+ 22 char Digiboard serial card
0 = /dev/ttyD0 First Digiboard port
1 = /dev/ttyD1 Second Digiboard port
...
- 22 block Second IDE hard disk/CD-ROM interface
+ 22 block Second IDE hard disk/CD-ROM interface
0 = /dev/hdc Master: whole disk (or CD-ROM)
64 = /dev/hdd Slave: whole disk (or CD-ROM)
Partitions are handled the same way as for the first
interface (see major number 3).
- 23 char Digiboard serial card - alternate devices
+ 23 char Digiboard serial card - alternate devices
0 = /dev/cud0 Callout device for ttyD0
1 = /dev/cud1 Callout device for ttyD1
...
- 23 block Mitsumi proprietary CD-ROM
+ 23 block Mitsumi proprietary CD-ROM
0 = /dev/mcd Mitsumi CD-ROM
- 24 char Stallion serial card
+ 24 char Stallion serial card
0 = /dev/ttyE0 Stallion port 0 card 0
1 = /dev/ttyE1 Stallion port 1 card 0
...
@@ -598,10 +543,10 @@ Your cooperation is appreciated.
192 = /dev/ttyE192 Stallion port 0 card 3
193 = /dev/ttyE193 Stallion port 1 card 3
...
- 24 block Sony CDU-535 CD-ROM
+ 24 block Sony CDU-535 CD-ROM
0 = /dev/cdu535 Sony CDU-535 CD-ROM
- 25 char Stallion serial card - alternate devices
+ 25 char Stallion serial card - alternate devices
0 = /dev/cue0 Callout device for ttyE0
1 = /dev/cue1 Callout device for ttyE1
...
@@ -614,21 +559,21 @@ Your cooperation is appreciated.
192 = /dev/cue192 Callout device for ttyE192
193 = /dev/cue193 Callout device for ttyE193
...
- 25 block First Matsushita (Panasonic/SoundBlaster) CD-ROM
+ 25 block First Matsushita (Panasonic/SoundBlaster) CD-ROM
0 = /dev/sbpcd0 Panasonic CD-ROM controller 0 unit 0
1 = /dev/sbpcd1 Panasonic CD-ROM controller 0 unit 1
2 = /dev/sbpcd2 Panasonic CD-ROM controller 0 unit 2
3 = /dev/sbpcd3 Panasonic CD-ROM controller 0 unit 3
- 26 char
+ 26 char
- 26 block Second Matsushita (Panasonic/SoundBlaster) CD-ROM
+ 26 block Second Matsushita (Panasonic/SoundBlaster) CD-ROM
0 = /dev/sbpcd4 Panasonic CD-ROM controller 1 unit 0
1 = /dev/sbpcd5 Panasonic CD-ROM controller 1 unit 1
2 = /dev/sbpcd6 Panasonic CD-ROM controller 1 unit 2
3 = /dev/sbpcd7 Panasonic CD-ROM controller 1 unit 3
- 27 char QIC-117 tape
+ 27 char QIC-117 tape
0 = /dev/qft0 Unit 0, rewind-on-close
1 = /dev/qft1 Unit 1, rewind-on-close
2 = /dev/qft2 Unit 2, rewind-on-close
@@ -654,29 +599,29 @@ Your cooperation is appreciated.
38 = /dev/nrawqft2 Unit 2, no rewind-on-close, no file marks
39 = /dev/nrawqft3 Unit 3, no rewind-on-close, no file marks
- 27 block Third Matsushita (Panasonic/SoundBlaster) CD-ROM
+ 27 block Third Matsushita (Panasonic/SoundBlaster) CD-ROM
0 = /dev/sbpcd8 Panasonic CD-ROM controller 2 unit 0
1 = /dev/sbpcd9 Panasonic CD-ROM controller 2 unit 1
2 = /dev/sbpcd10 Panasonic CD-ROM controller 2 unit 2
3 = /dev/sbpcd11 Panasonic CD-ROM controller 2 unit 3
- 28 char Stallion serial card - card programming
+ 28 char Stallion serial card - card programming
0 = /dev/staliomem0 First Stallion card I/O memory
1 = /dev/staliomem1 Second Stallion card I/O memory
2 = /dev/staliomem2 Third Stallion card I/O memory
3 = /dev/staliomem3 Fourth Stallion card I/O memory
- 28 char Atari SLM ACSI laser printer (68k/Atari)
+ 28 char Atari SLM ACSI laser printer (68k/Atari)
0 = /dev/slm0 First SLM laser printer
1 = /dev/slm1 Second SLM laser printer
...
- 28 block Fourth Matsushita (Panasonic/SoundBlaster) CD-ROM
+ 28 block Fourth Matsushita (Panasonic/SoundBlaster) CD-ROM
0 = /dev/sbpcd12 Panasonic CD-ROM controller 3 unit 0
1 = /dev/sbpcd13 Panasonic CD-ROM controller 3 unit 1
2 = /dev/sbpcd14 Panasonic CD-ROM controller 3 unit 2
3 = /dev/sbpcd15 Panasonic CD-ROM controller 3 unit 3
- 28 block ACSI disk (68k/Atari)
+ 28 block ACSI disk (68k/Atari)
0 = /dev/ada First ACSI disk whole disk
16 = /dev/adb Second ACSI disk whole disk
32 = /dev/adc Third ACSI disk whole disk
@@ -687,16 +632,16 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15, like SCSI.
- 29 char Universal frame buffer
+ 29 char Universal frame buffer
0 = /dev/fb0 First frame buffer
1 = /dev/fb1 Second frame buffer
...
31 = /dev/fb31 32nd frame buffer
- 29 block Aztech/Orchid/Okano/Wearnes CD-ROM
+ 29 block Aztech/Orchid/Okano/Wearnes CD-ROM
0 = /dev/aztcd Aztech CD-ROM
- 30 char iBCS-2 compatibility devices
+ 30 char iBCS-2 compatibility devices
0 = /dev/socksys Socket access
1 = /dev/spx SVR3 local X interface
32 = /dev/inet/ip Network access
@@ -727,17 +672,17 @@ Your cooperation is appreciated.
/dev/nfsd -> /dev/socksys
/dev/X0R -> /dev/null (? apparently not required ?)
- 30 block Philips LMS CM-205 CD-ROM
+ 30 block Philips LMS CM-205 CD-ROM
0 = /dev/cm205cd Philips LMS CM-205 CD-ROM
/dev/lmscd is an older name for this device. This
driver does not work with the CM-205MS CD-ROM.
- 31 char MPU-401 MIDI
+ 31 char MPU-401 MIDI
0 = /dev/mpu401data MPU-401 data port
1 = /dev/mpu401stat MPU-401 status port
- 31 block ROM/flash memory card
+ 31 block ROM/flash memory card
0 = /dev/rom0 First ROM card (rw)
...
7 = /dev/rom7 Eighth ROM card (rw)
@@ -756,25 +701,25 @@ Your cooperation is appreciated.
devices. The read-only devices (ro) support reading
only.
- 32 char Specialix serial card
+ 32 char Specialix serial card
0 = /dev/ttyX0 First Specialix port
1 = /dev/ttyX1 Second Specialix port
...
- 32 block Philips LMS CM-206 CD-ROM
+ 32 block Philips LMS CM-206 CD-ROM
0 = /dev/cm206cd Philips LMS CM-206 CD-ROM
- 33 char Specialix serial card - alternate devices
+ 33 char Specialix serial card - alternate devices
0 = /dev/cux0 Callout device for ttyX0
1 = /dev/cux1 Callout device for ttyX1
...
- 33 block Third IDE hard disk/CD-ROM interface
+ 33 block Third IDE hard disk/CD-ROM interface
0 = /dev/hde Master: whole disk (or CD-ROM)
64 = /dev/hdf Slave: whole disk (or CD-ROM)
Partitions are handled the same way as for the first
interface (see major number 3).
- 34 char Z8530 HDLC driver
+ 34 char Z8530 HDLC driver
0 = /dev/scc0 First Z8530, first port
1 = /dev/scc1 First Z8530, second port
2 = /dev/scc2 Second Z8530, first port
@@ -785,14 +730,14 @@ Your cooperation is appreciated.
/dev/sc1 for /dev/scc0, /dev/sc2 for /dev/scc1, and so
on.
- 34 block Fourth IDE hard disk/CD-ROM interface
+ 34 block Fourth IDE hard disk/CD-ROM interface
0 = /dev/hdg Master: whole disk (or CD-ROM)
64 = /dev/hdh Slave: whole disk (or CD-ROM)
Partitions are handled the same way as for the first
interface (see major number 3).
- 35 char tclmidi MIDI driver
+ 35 char tclmidi MIDI driver
0 = /dev/midi0 First MIDI port, kernel timed
1 = /dev/midi1 Second MIDI port, kernel timed
2 = /dev/midi2 Third MIDI port, kernel timed
@@ -806,10 +751,10 @@ Your cooperation is appreciated.
130 = /dev/smpte2 Third MIDI port, SMPTE timed
131 = /dev/smpte3 Fourth MIDI port, SMPTE timed
- 35 block Slow memory ramdisk
+ 35 block Slow memory ramdisk
0 = /dev/slram Slow memory ramdisk
- 36 char Netlink support
+ 36 char Netlink support
0 = /dev/route Routing, device updates, kernel to user
1 = /dev/skip enSKIP security cache control
3 = /dev/fwmonitor Firewall packet copies
@@ -817,9 +762,9 @@ Your cooperation is appreciated.
...
31 = /dev/tap15 16th Ethertap device
- 36 block OBSOLETE (was MCA ESDI hard disk)
+ 36 block OBSOLETE (was MCA ESDI hard disk)
- 37 char IDE tape
+ 37 char IDE tape
0 = /dev/ht0 First IDE tape
1 = /dev/ht1 Second IDE tape
...
@@ -829,10 +774,10 @@ Your cooperation is appreciated.
Currently, only one IDE tape drive is supported.
- 37 block Zorro II ramdisk
+ 37 block Zorro II ramdisk
0 = /dev/z2ram Zorro II ramdisk
- 38 char Myricom PCI Myrinet board
+ 38 char Myricom PCI Myrinet board
0 = /dev/mlanai0 First Myrinet board
1 = /dev/mlanai1 Second Myrinet board
...
@@ -841,9 +786,9 @@ Your cooperation is appreciated.
and "user level packet I/O." This board is also
accessible as a standard networking "eth" device.
- 38 block OBSOLETE (was Linux/AP+)
+ 38 block OBSOLETE (was Linux/AP+)
- 39 char ML-16P experimental I/O board
+ 39 char ML-16P experimental I/O board
0 = /dev/ml16pa-a0 First card, first analog channel
1 = /dev/ml16pa-a1 First card, second analog channel
...
@@ -861,20 +806,20 @@ Your cooperation is appreciated.
50 = /dev/ml16pb-c1 Second card, second counter/timer
51 = /dev/ml16pb-c2 Second card, third counter/timer
...
- 39 block
+ 39 block
- 40 char
+ 40 char
- 40 block
+ 40 block
- 41 char Yet Another Micro Monitor
+ 41 char Yet Another Micro Monitor
0 = /dev/yamm Yet Another Micro Monitor
- 41 block
+ 41 block
- 42 char Demo/sample use
+ 42 char Demo/sample use
- 42 block Demo/sample use
+ 42 block Demo/sample use
This number is intended for use in sample code, as
well as a general "example" device number. It
@@ -887,12 +832,12 @@ Your cooperation is appreciated.
IN PARTICULAR, ANY DISTRIBUTION WHICH CONTAINS A
DEVICE DRIVER USING MAJOR NUMBER 42 IS NONCOMPLIANT.
- 43 char isdn4linux virtual modem
+ 43 char isdn4linux virtual modem
0 = /dev/ttyI0 First virtual modem
...
63 = /dev/ttyI63 64th virtual modem
- 43 block Network block devices
+ 43 block Network block devices
0 = /dev/nb0 First network block device
1 = /dev/nb1 Second network block device
...
@@ -904,12 +849,12 @@ Your cooperation is appreciated.
to mounting filesystems over the net, swapping over
the net, implementing block device in userland etc.
- 44 char isdn4linux virtual modem - alternate devices
+ 44 char isdn4linux virtual modem - alternate devices
0 = /dev/cui0 Callout device for ttyI0
...
63 = /dev/cui63 Callout device for ttyI63
- 44 block Flash Translation Layer (FTL) filesystems
+ 44 block Flash Translation Layer (FTL) filesystems
0 = /dev/ftla FTL on first Memory Technology Device
16 = /dev/ftlb FTL on second Memory Technology Device
32 = /dev/ftlc FTL on third Memory Technology Device
@@ -920,7 +865,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the partition
limit is 15 rather than 63 per disk (same as SCSI.)
- 45 char isdn4linux ISDN BRI driver
+ 45 char isdn4linux ISDN BRI driver
0 = /dev/isdn0 First virtual B channel raw data
...
63 = /dev/isdn63 64th virtual B channel raw data
@@ -934,7 +879,7 @@ Your cooperation is appreciated.
255 = /dev/isdninfo ISDN monitor interface
- 45 block Parallel port IDE disk devices
+ 45 block Parallel port IDE disk devices
0 = /dev/pda First parallel port IDE disk
16 = /dev/pdb Second parallel port IDE disk
32 = /dev/pdc Third parallel port IDE disk
@@ -944,21 +889,21 @@ Your cooperation is appreciated.
disks (see major number 3) except that the partition
limit is 15 rather than 63 per disk.
- 46 char Comtrol Rocketport serial card
+ 46 char Comtrol Rocketport serial card
0 = /dev/ttyR0 First Rocketport port
1 = /dev/ttyR1 Second Rocketport port
...
- 46 block Parallel port ATAPI CD-ROM devices
+ 46 block Parallel port ATAPI CD-ROM devices
0 = /dev/pcd0 First parallel port ATAPI CD-ROM
1 = /dev/pcd1 Second parallel port ATAPI CD-ROM
2 = /dev/pcd2 Third parallel port ATAPI CD-ROM
3 = /dev/pcd3 Fourth parallel port ATAPI CD-ROM
- 47 char Comtrol Rocketport serial card - alternate devices
+ 47 char Comtrol Rocketport serial card - alternate devices
0 = /dev/cur0 Callout device for ttyR0
1 = /dev/cur1 Callout device for ttyR1
...
- 47 block Parallel port ATAPI disk devices
+ 47 block Parallel port ATAPI disk devices
0 = /dev/pf0 First parallel port ATAPI disk
1 = /dev/pf1 Second parallel port ATAPI disk
2 = /dev/pf2 Third parallel port ATAPI disk
@@ -967,11 +912,11 @@ Your cooperation is appreciated.
This driver is intended for floppy disks and similar
devices and hence does not support partitioning.
- 48 char SDL RISCom serial card
+ 48 char SDL RISCom serial card
0 = /dev/ttyL0 First RISCom port
1 = /dev/ttyL1 Second RISCom port
...
- 48 block Mylex DAC960 PCI RAID controller; first controller
+ 48 block Mylex DAC960 PCI RAID controller; first controller
0 = /dev/rd/c0d0 First disk, whole disk
8 = /dev/rd/c0d1 Second disk, whole disk
...
@@ -983,11 +928,11 @@ Your cooperation is appreciated.
...
7 = /dev/rd/c?d?p7 Seventh partition
- 49 char SDL RISCom serial card - alternate devices
+ 49 char SDL RISCom serial card - alternate devices
0 = /dev/cul0 Callout device for ttyL0
1 = /dev/cul1 Callout device for ttyL1
...
- 49 block Mylex DAC960 PCI RAID controller; second controller
+ 49 block Mylex DAC960 PCI RAID controller; second controller
0 = /dev/rd/c1d0 First disk, whole disk
8 = /dev/rd/c1d1 Second disk, whole disk
...
@@ -995,19 +940,19 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
- 50 char Reserved for GLINT
+ 50 char Reserved for GLINT
- 50 block Mylex DAC960 PCI RAID controller; third controller
+ 50 block Mylex DAC960 PCI RAID controller; third controller
0 = /dev/rd/c2d0 First disk, whole disk
8 = /dev/rd/c2d1 Second disk, whole disk
...
248 = /dev/rd/c2d31 32nd disk, whole disk
- 51 char Baycom radio modem OR Radio Tech BIM-XXX-RS232 radio modem
+ 51 char Baycom radio modem OR Radio Tech BIM-XXX-RS232 radio modem
0 = /dev/bc0 First Baycom radio modem
1 = /dev/bc1 Second Baycom radio modem
...
- 51 block Mylex DAC960 PCI RAID controller; fourth controller
+ 51 block Mylex DAC960 PCI RAID controller; fourth controller
0 = /dev/rd/c3d0 First disk, whole disk
8 = /dev/rd/c3d1 Second disk, whole disk
...
@@ -1015,13 +960,13 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
- 52 char Spellcaster DataComm/BRI ISDN card
+ 52 char Spellcaster DataComm/BRI ISDN card
0 = /dev/dcbri0 First DataComm card
1 = /dev/dcbri1 Second DataComm card
2 = /dev/dcbri2 Third DataComm card
3 = /dev/dcbri3 Fourth DataComm card
- 52 block Mylex DAC960 PCI RAID controller; fifth controller
+ 52 block Mylex DAC960 PCI RAID controller; fifth controller
0 = /dev/rd/c4d0 First disk, whole disk
8 = /dev/rd/c4d1 Second disk, whole disk
...
@@ -1029,7 +974,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
- 53 char BDM interface for remote debugging MC683xx microcontrollers
+ 53 char BDM interface for remote debugging MC683xx microcontrollers
0 = /dev/pd_bdm0 PD BDM interface on lp0
1 = /dev/pd_bdm1 PD BDM interface on lp1
2 = /dev/pd_bdm2 PD BDM interface on lp2
@@ -1043,7 +988,7 @@ Your cooperation is appreciated.
Domain Interface and ICD is the commercial interface
by P&E.
- 53 block Mylex DAC960 PCI RAID controller; sixth controller
+ 53 block Mylex DAC960 PCI RAID controller; sixth controller
0 = /dev/rd/c5d0 First disk, whole disk
8 = /dev/rd/c5d1 Second disk, whole disk
...
@@ -1051,7 +996,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
- 54 char Electrocardiognosis Holter serial card
+ 54 char Electrocardiognosis Holter serial card
0 = /dev/holter0 First Holter port
1 = /dev/holter1 Second Holter port
2 = /dev/holter2 Third Holter port
@@ -1060,7 +1005,7 @@ Your cooperation is appreciated.
<mseritan@ottonel.pub.ro> to transfer data from Holter
24-hour heart monitoring equipment.
- 54 block Mylex DAC960 PCI RAID controller; seventh controller
+ 54 block Mylex DAC960 PCI RAID controller; seventh controller
0 = /dev/rd/c6d0 First disk, whole disk
8 = /dev/rd/c6d1 Second disk, whole disk
...
@@ -1068,10 +1013,10 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
- 55 char DSP56001 digital signal processor
+ 55 char DSP56001 digital signal processor
0 = /dev/dsp56k First DSP56001
- 55 block Mylex DAC960 PCI RAID controller; eighth controller
+ 55 block Mylex DAC960 PCI RAID controller; eighth controller
0 = /dev/rd/c7d0 First disk, whole disk
8 = /dev/rd/c7d1 Second disk, whole disk
...
@@ -1079,42 +1024,42 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
- 56 char Apple Desktop Bus
+ 56 char Apple Desktop Bus
0 = /dev/adb ADB bus control
Additional devices will be added to this number, all
starting with /dev/adb.
- 56 block Fifth IDE hard disk/CD-ROM interface
+ 56 block Fifth IDE hard disk/CD-ROM interface
0 = /dev/hdi Master: whole disk (or CD-ROM)
64 = /dev/hdj Slave: whole disk (or CD-ROM)
Partitions are handled the same way as for the first
interface (see major number 3).
- 57 char Hayes ESP serial card
+ 57 char Hayes ESP serial card
0 = /dev/ttyP0 First ESP port
1 = /dev/ttyP1 Second ESP port
...
- 57 block Sixth IDE hard disk/CD-ROM interface
+ 57 block Sixth IDE hard disk/CD-ROM interface
0 = /dev/hdk Master: whole disk (or CD-ROM)
64 = /dev/hdl Slave: whole disk (or CD-ROM)
Partitions are handled the same way as for the first
interface (see major number 3).
- 58 char Hayes ESP serial card - alternate devices
+ 58 char Hayes ESP serial card - alternate devices
0 = /dev/cup0 Callout device for ttyP0
1 = /dev/cup1 Callout device for ttyP1
...
- 58 block Reserved for logical volume manager
+ 58 block Reserved for logical volume manager
- 59 char sf firewall package
+ 59 char sf firewall package
0 = /dev/firewall Communication with sf kernel module
- 59 block Generic PDA filesystem device
+ 59 block Generic PDA filesystem device
0 = /dev/pda0 First PDA device
1 = /dev/pda1 Second PDA device
...
@@ -1127,17 +1072,17 @@ Your cooperation is appreciated.
NAMING CONFLICT -- PROPOSED REVISED NAME /dev/rpda0 etc
- 60-63 char LOCAL/EXPERIMENTAL USE
+ 60-63 char LOCAL/EXPERIMENTAL USE
- 60-63 block LOCAL/EXPERIMENTAL USE
+ 60-63 block LOCAL/EXPERIMENTAL USE
Allocated for local/experimental use. For devices not
assigned official numbers, these ranges should be
used in order to avoid conflicting with future assignments.
- 64 char ENskip kernel encryption package
+ 64 char ENskip kernel encryption package
0 = /dev/enskip Communication with ENskip kernel module
- 64 block Scramdisk/DriveCrypt encrypted devices
+ 64 block Scramdisk/DriveCrypt encrypted devices
0 = /dev/scramdisk/master Master node for ioctls
1 = /dev/scramdisk/1 First encrypted device
2 = /dev/scramdisk/2 Second encrypted device
@@ -1152,7 +1097,7 @@ Your cooperation is appreciated.
Requested by: andy@scramdisklinux.org
- 65 char Sundance "plink" Transputer boards (obsolete, unused)
+ 65 char Sundance "plink" Transputer boards (obsolete, unused)
0 = /dev/plink0 First plink device
1 = /dev/plink1 Second plink device
2 = /dev/plink2 Third plink device
@@ -1173,7 +1118,7 @@ Your cooperation is appreciated.
This is a commercial driver; contact James Howes
<jth@prosig.demon.co.uk> for information.
- 65 block SCSI disk devices (16-31)
+ 65 block SCSI disk devices (16-31)
0 = /dev/sdq 17th SCSI disk whole disk
16 = /dev/sdr 18th SCSI disk whole disk
32 = /dev/sds 19th SCSI disk whole disk
@@ -1184,12 +1129,12 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 66 char YARC PowerPC PCI coprocessor card
+ 66 char YARC PowerPC PCI coprocessor card
0 = /dev/yppcpci0 First YARC card
1 = /dev/yppcpci1 Second YARC card
...
- 66 block SCSI disk devices (32-47)
+ 66 block SCSI disk devices (32-47)
0 = /dev/sdag 33th SCSI disk whole disk
16 = /dev/sdah 34th SCSI disk whole disk
32 = /dev/sdai 35th SCSI disk whole disk
@@ -1200,12 +1145,12 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 67 char Coda network file system
+ 67 char Coda network file system
0 = /dev/cfs0 Coda cache manager
See http://www.coda.cs.cmu.edu for information about Coda.
- 67 block SCSI disk devices (48-63)
+ 67 block SCSI disk devices (48-63)
0 = /dev/sdaw 49th SCSI disk whole disk
16 = /dev/sdax 50th SCSI disk whole disk
32 = /dev/sday 51st SCSI disk whole disk
@@ -1216,7 +1161,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 68 char CAPI 2.0 interface
+ 68 char CAPI 2.0 interface
0 = /dev/capi20 Control device
1 = /dev/capi20.00 First CAPI 2.0 application
2 = /dev/capi20.01 Second CAPI 2.0 application
@@ -1226,7 +1171,7 @@ Your cooperation is appreciated.
ISDN CAPI 2.0 driver for use with CAPI 2.0
applications; currently supports the AVM B1 card.
- 68 block SCSI disk devices (64-79)
+ 68 block SCSI disk devices (64-79)
0 = /dev/sdbm 65th SCSI disk whole disk
16 = /dev/sdbn 66th SCSI disk whole disk
32 = /dev/sdbo 67th SCSI disk whole disk
@@ -1237,10 +1182,10 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 69 char MA16 numeric accelerator card
+ 69 char MA16 numeric accelerator card
0 = /dev/ma16 Board memory access
- 69 block SCSI disk devices (80-95)
+ 69 block SCSI disk devices (80-95)
0 = /dev/sdcc 81st SCSI disk whole disk
16 = /dev/sdcd 82nd SCSI disk whole disk
32 = /dev/sdce 83th SCSI disk whole disk
@@ -1251,7 +1196,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 70 char SpellCaster Protocol Services Interface
+ 70 char SpellCaster Protocol Services Interface
0 = /dev/apscfg Configuration interface
1 = /dev/apsauth Authentication interface
2 = /dev/apslog Logging interface
@@ -1260,7 +1205,7 @@ Your cooperation is appreciated.
65 = /dev/apsasync Async command interface
128 = /dev/apsmon Monitor interface
- 70 block SCSI disk devices (96-111)
+ 70 block SCSI disk devices (96-111)
0 = /dev/sdcs 97th SCSI disk whole disk
16 = /dev/sdct 98th SCSI disk whole disk
32 = /dev/sdcu 99th SCSI disk whole disk
@@ -1271,7 +1216,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 71 char Computone IntelliPort II serial card
+ 71 char Computone IntelliPort II serial card
0 = /dev/ttyF0 IntelliPort II board 0, port 0
1 = /dev/ttyF1 IntelliPort II board 0, port 1
...
@@ -1289,7 +1234,7 @@ Your cooperation is appreciated.
...
255 = /dev/ttyF255 IntelliPort II board 3, port 63
- 71 block SCSI disk devices (112-127)
+ 71 block SCSI disk devices (112-127)
0 = /dev/sddi 113th SCSI disk whole disk
16 = /dev/sddj 114th SCSI disk whole disk
32 = /dev/sddk 115th SCSI disk whole disk
@@ -1300,7 +1245,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 72 char Computone IntelliPort II serial card - alternate devices
+ 72 char Computone IntelliPort II serial card - alternate devices
0 = /dev/cuf0 Callout device for ttyF0
1 = /dev/cuf1 Callout device for ttyF1
...
@@ -1318,7 +1263,7 @@ Your cooperation is appreciated.
...
255 = /dev/cuf255 Callout device for ttyF255
- 72 block Compaq Intelligent Drive Array, first controller
+ 72 block Compaq Intelligent Drive Array, first controller
0 = /dev/ida/c0d0 First logical drive whole disk
16 = /dev/ida/c0d1 Second logical drive whole disk
...
@@ -1328,7 +1273,7 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
- 73 char Computone IntelliPort II serial card - control devices
+ 73 char Computone IntelliPort II serial card - control devices
0 = /dev/ip2ipl0 Loadware device for board 0
1 = /dev/ip2stat0 Status device for board 0
4 = /dev/ip2ipl1 Loadware device for board 1
@@ -1338,7 +1283,7 @@ Your cooperation is appreciated.
12 = /dev/ip2ipl3 Loadware device for board 3
13 = /dev/ip2stat3 Status device for board 3
- 73 block Compaq Intelligent Drive Array, second controller
+ 73 block Compaq Intelligent Drive Array, second controller
0 = /dev/ida/c1d0 First logical drive whole disk
16 = /dev/ida/c1d1 Second logical drive whole disk
...
@@ -1348,7 +1293,7 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
- 74 char SCI bridge
+ 74 char SCI bridge
0 = /dev/SCI/0 SCI device 0
1 = /dev/SCI/1 SCI device 1
...
@@ -1356,7 +1301,7 @@ Your cooperation is appreciated.
Currently for Dolphin Interconnect Solutions' PCI-SCI
bridge.
- 74 block Compaq Intelligent Drive Array, third controller
+ 74 block Compaq Intelligent Drive Array, third controller
0 = /dev/ida/c2d0 First logical drive whole disk
16 = /dev/ida/c2d1 Second logical drive whole disk
...
@@ -1366,14 +1311,14 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
- 75 char Specialix IO8+ serial card
+ 75 char Specialix IO8+ serial card
0 = /dev/ttyW0 First IO8+ port, first card
1 = /dev/ttyW1 Second IO8+ port, first card
...
8 = /dev/ttyW8 First IO8+ port, second card
...
- 75 block Compaq Intelligent Drive Array, fourth controller
+ 75 block Compaq Intelligent Drive Array, fourth controller
0 = /dev/ida/c3d0 First logical drive whole disk
16 = /dev/ida/c3d1 Second logical drive whole disk
...
@@ -1383,14 +1328,14 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
- 76 char Specialix IO8+ serial card - alternate devices
+ 76 char Specialix IO8+ serial card - alternate devices
0 = /dev/cuw0 Callout device for ttyW0
1 = /dev/cuw1 Callout device for ttyW1
...
8 = /dev/cuw8 Callout device for ttyW8
...
- 76 block Compaq Intelligent Drive Array, fifth controller
+ 76 block Compaq Intelligent Drive Array, fifth controller
0 = /dev/ida/c4d0 First logical drive whole disk
16 = /dev/ida/c4d1 Second logical drive whole disk
...
@@ -1401,10 +1346,10 @@ Your cooperation is appreciated.
partitions is 15.
- 77 char ComScire Quantum Noise Generator
+ 77 char ComScire Quantum Noise Generator
0 = /dev/qng ComScire Quantum Noise Generator
- 77 block Compaq Intelligent Drive Array, sixth controller
+ 77 block Compaq Intelligent Drive Array, sixth controller
0 = /dev/ida/c5d0 First logical drive whole disk
16 = /dev/ida/c5d1 Second logical drive whole disk
...
@@ -1414,12 +1359,12 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
- 78 char PAM Software's multimodem boards
+ 78 char PAM Software's multimodem boards
0 = /dev/ttyM0 First PAM modem
1 = /dev/ttyM1 Second PAM modem
...
- 78 block Compaq Intelligent Drive Array, seventh controller
+ 78 block Compaq Intelligent Drive Array, seventh controller
0 = /dev/ida/c6d0 First logical drive whole disk
16 = /dev/ida/c6d1 Second logical drive whole disk
...
@@ -1429,12 +1374,12 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
- 79 char PAM Software's multimodem boards - alternate devices
+ 79 char PAM Software's multimodem boards - alternate devices
0 = /dev/cum0 Callout device for ttyM0
1 = /dev/cum1 Callout device for ttyM1
...
- 79 block Compaq Intelligent Drive Array, eighth controller
+ 79 block Compaq Intelligent Drive Array, eighth controller
0 = /dev/ida/c7d0 First logical drive whole disk
16 = /dev/ida/c7d1 Second logical drive whole disk
...
@@ -1444,10 +1389,10 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
- 80 char Photometrics AT200 CCD camera
+ 80 char Photometrics AT200 CCD camera
0 = /dev/at200 Photometrics AT200 CCD camera
- 80 block I2O hard disk
+ 80 block I2O hard disk
0 = /dev/i2o/hda First I2O hard disk, whole disk
16 = /dev/i2o/hdb Second I2O hard disk, whole disk
...
@@ -1457,7 +1402,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 81 char video4linux
+ 81 char video4linux
0 = /dev/video0 Video capture/overlay device
...
63 = /dev/video63 Video capture/overlay device
@@ -1475,7 +1420,7 @@ Your cooperation is appreciated.
CONFIG_VIDEO_FIXED_MINOR_RANGES (default n)
configuration option is set.
- 81 block I2O hard disk
+ 81 block I2O hard disk
0 = /dev/i2o/hdq 17th I2O hard disk, whole disk
16 = /dev/i2o/hdr 18th I2O hard disk, whole disk
...
@@ -1485,7 +1430,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 82 char WiNRADiO communications receiver card
+ 82 char WiNRADiO communications receiver card
0 = /dev/winradio0 First WiNRADiO card
1 = /dev/winradio1 Second WiNRADiO card
...
@@ -1493,7 +1438,7 @@ Your cooperation is appreciated.
The driver and documentation may be obtained from
http://www.winradio.com/
- 82 block I2O hard disk
+ 82 block I2O hard disk
0 = /dev/i2o/hdag 33rd I2O hard disk, whole disk
16 = /dev/i2o/hdah 34th I2O hard disk, whole disk
...
@@ -1503,14 +1448,14 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 83 char Matrox mga_vid video driver
- 0 = /dev/mga_vid0 1st video card
+ 83 char Matrox mga_vid video driver
+ 0 = /dev/mga_vid0 1st video card
1 = /dev/mga_vid1 2nd video card
2 = /dev/mga_vid2 3rd video card
...
- 15 = /dev/mga_vid15 16th video card
+ 15 = /dev/mga_vid15 16th video card
- 83 block I2O hard disk
+ 83 block I2O hard disk
0 = /dev/i2o/hdaw 49th I2O hard disk, whole disk
16 = /dev/i2o/hdax 50th I2O hard disk, whole disk
...
@@ -1520,11 +1465,11 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 84 char Ikon 1011[57] Versatec Greensheet Interface
+ 84 char Ikon 1011[57] Versatec Greensheet Interface
0 = /dev/ihcp0 First Greensheet port
1 = /dev/ihcp1 Second Greensheet port
- 84 block I2O hard disk
+ 84 block I2O hard disk
0 = /dev/i2o/hdbm 65th I2O hard disk, whole disk
16 = /dev/i2o/hdbn 66th I2O hard disk, whole disk
...
@@ -1534,13 +1479,13 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 85 char Linux/SGI shared memory input queue
+ 85 char Linux/SGI shared memory input queue
0 = /dev/shmiq Master shared input queue
1 = /dev/qcntl0 First device pushed
2 = /dev/qcntl1 Second device pushed
...
- 85 block I2O hard disk
+ 85 block I2O hard disk
0 = /dev/i2o/hdcc 81st I2O hard disk, whole disk
16 = /dev/i2o/hdcd 82nd I2O hard disk, whole disk
...
@@ -1550,12 +1495,12 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 86 char SCSI media changer
+ 86 char SCSI media changer
0 = /dev/sch0 First SCSI media changer
1 = /dev/sch1 Second SCSI media changer
...
- 86 block I2O hard disk
+ 86 block I2O hard disk
0 = /dev/i2o/hdcs 97th I2O hard disk, whole disk
16 = /dev/i2o/hdct 98th I2O hard disk, whole disk
...
@@ -1565,12 +1510,12 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 87 char Sony Control-A1 stereo control bus
+ 87 char Sony Control-A1 stereo control bus
0 = /dev/controla0 First device on chain
1 = /dev/controla1 Second device on chain
...
- 87 block I2O hard disk
+ 87 block I2O hard disk
0 = /dev/i2o/hddi 113rd I2O hard disk, whole disk
16 = /dev/i2o/hddj 114th I2O hard disk, whole disk
...
@@ -1580,59 +1525,59 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 88 char COMX synchronous serial card
+ 88 char COMX synchronous serial card
0 = /dev/comx0 COMX channel 0
1 = /dev/comx1 COMX channel 1
...
- 88 block Seventh IDE hard disk/CD-ROM interface
+ 88 block Seventh IDE hard disk/CD-ROM interface
0 = /dev/hdm Master: whole disk (or CD-ROM)
64 = /dev/hdn Slave: whole disk (or CD-ROM)
Partitions are handled the same way as for the first
interface (see major number 3).
- 89 char I2C bus interface
+ 89 char I2C bus interface
0 = /dev/i2c-0 First I2C adapter
1 = /dev/i2c-1 Second I2C adapter
...
- 89 block Eighth IDE hard disk/CD-ROM interface
+ 89 block Eighth IDE hard disk/CD-ROM interface
0 = /dev/hdo Master: whole disk (or CD-ROM)
64 = /dev/hdp Slave: whole disk (or CD-ROM)
Partitions are handled the same way as for the first
interface (see major number 3).
- 90 char Memory Technology Device (RAM, ROM, Flash)
+ 90 char Memory Technology Device (RAM, ROM, Flash)
0 = /dev/mtd0 First MTD (rw)
1 = /dev/mtdr0 First MTD (ro)
...
30 = /dev/mtd15 16th MTD (rw)
31 = /dev/mtdr15 16th MTD (ro)
- 90 block Ninth IDE hard disk/CD-ROM interface
+ 90 block Ninth IDE hard disk/CD-ROM interface
0 = /dev/hdq Master: whole disk (or CD-ROM)
64 = /dev/hdr Slave: whole disk (or CD-ROM)
Partitions are handled the same way as for the first
interface (see major number 3).
- 91 char CAN-Bus devices
+ 91 char CAN-Bus devices
0 = /dev/can0 First CAN-Bus controller
1 = /dev/can1 Second CAN-Bus controller
...
- 91 block Tenth IDE hard disk/CD-ROM interface
+ 91 block Tenth IDE hard disk/CD-ROM interface
0 = /dev/hds Master: whole disk (or CD-ROM)
64 = /dev/hdt Slave: whole disk (or CD-ROM)
Partitions are handled the same way as for the first
interface (see major number 3).
- 92 char Reserved for ith Kommunikationstechnik MIC ISDN card
+ 92 char Reserved for ith Kommunikationstechnik MIC ISDN card
- 92 block PPDD encrypted disk driver
+ 92 block PPDD encrypted disk driver
0 = /dev/ppdd0 First encrypted disk
1 = /dev/ppdd1 Second encrypted disk
...
@@ -1641,35 +1586,35 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
- 93 char
+ 93 char
- 93 block NAND Flash Translation Layer filesystem
+ 93 block NAND Flash Translation Layer filesystem
0 = /dev/nftla First NFTL layer
16 = /dev/nftlb Second NFTL layer
...
240 = /dev/nftlp 16th NTFL layer
- 94 char
+ 94 char
- 94 block IBM S/390 DASD block storage
- 0 = /dev/dasda First DASD device, major
- 1 = /dev/dasda1 First DASD device, block 1
- 2 = /dev/dasda2 First DASD device, block 2
- 3 = /dev/dasda3 First DASD device, block 3
- 4 = /dev/dasdb Second DASD device, major
- 5 = /dev/dasdb1 Second DASD device, block 1
- 6 = /dev/dasdb2 Second DASD device, block 2
- 7 = /dev/dasdb3 Second DASD device, block 3
+ 94 block IBM S/390 DASD block storage
+ 0 = /dev/dasda First DASD device, major
+ 1 = /dev/dasda1 First DASD device, block 1
+ 2 = /dev/dasda2 First DASD device, block 2
+ 3 = /dev/dasda3 First DASD device, block 3
+ 4 = /dev/dasdb Second DASD device, major
+ 5 = /dev/dasdb1 Second DASD device, block 1
+ 6 = /dev/dasdb2 Second DASD device, block 2
+ 7 = /dev/dasdb3 Second DASD device, block 3
...
- 95 char IP filter
+ 95 char IP filter
0 = /dev/ipl Filter control device/log file
1 = /dev/ipnat NAT control device/log file
2 = /dev/ipstate State information log file
3 = /dev/ipauth Authentication control device/log file
...
- 96 char Parallel port ATAPI tape devices
+ 96 char Parallel port ATAPI tape devices
0 = /dev/pt0 First parallel port ATAPI tape
1 = /dev/pt1 Second parallel port ATAPI tape
...
@@ -1677,13 +1622,13 @@ Your cooperation is appreciated.
129 = /dev/npt1 Second p.p. ATAPI tape, no rewind
...
- 96 block Inverse NAND Flash Translation Layer
+ 96 block Inverse NAND Flash Translation Layer
0 = /dev/inftla First INFTL layer
16 = /dev/inftlb Second INFTL layer
...
240 = /dev/inftlp 16th INTFL layer
- 97 char Parallel port generic ATAPI interface
+ 97 char Parallel port generic ATAPI interface
0 = /dev/pg0 First parallel port ATAPI device
1 = /dev/pg1 Second parallel port ATAPI device
2 = /dev/pg2 Third parallel port ATAPI device
@@ -1692,14 +1637,14 @@ Your cooperation is appreciated.
These devices support the same API as the generic SCSI
devices.
- 98 char Control and Measurement Device (comedi)
+ 98 char Control and Measurement Device (comedi)
0 = /dev/comedi0 First comedi device
1 = /dev/comedi1 Second comedi device
...
See http://stm.lbl.gov/comedi.
- 98 block User-mode virtual block device
+ 98 block User-mode virtual block device
0 = /dev/ubda First user-mode block device
16 = /dev/udbb Second user-mode block device
...
@@ -1710,26 +1655,26 @@ Your cooperation is appreciated.
This device is used by the user-mode virtual kernel port.
- 99 char Raw parallel ports
+ 99 char Raw parallel ports
0 = /dev/parport0 First parallel port
1 = /dev/parport1 Second parallel port
...
- 99 block JavaStation flash disk
+ 99 block JavaStation flash disk
0 = /dev/jsfd JavaStation flash disk
-100 char Telephony for Linux
+ 100 char Telephony for Linux
0 = /dev/phone0 First telephony device
1 = /dev/phone1 Second telephony device
...
-101 char Motorola DSP 56xxx board
+ 101 char Motorola DSP 56xxx board
0 = /dev/mdspstat Status information
1 = /dev/mdsp1 First DSP board I/O controls
...
16 = /dev/mdsp16 16th DSP board I/O controls
-101 block AMI HyperDisk RAID controller
+ 101 block AMI HyperDisk RAID controller
0 = /dev/amiraid/ar0 First array whole disk
16 = /dev/amiraid/ar1 Second array whole disk
...
@@ -1742,9 +1687,9 @@ Your cooperation is appreciated.
...
15 = /dev/amiraid/ar?p15 15th partition
-102 char
+ 102 char
-102 block Compressed block device
+ 102 block Compressed block device
0 = /dev/cbd/a First compressed block device, whole device
16 = /dev/cbd/b Second compressed block device, whole device
...
@@ -1754,7 +1699,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
-103 char Arla network file system
+ 103 char Arla network file system
0 = /dev/nnpfs0 First NNPFS device
1 = /dev/nnpfs1 Second NNPFS device
@@ -1765,12 +1710,12 @@ Your cooperation is appreciated.
write to <arla-drinkers@stacken.kth.se> or see
http://www.stacken.kth.se/project/arla/
-103 block Audit device
+ 103 block Audit device
0 = /dev/audit Audit device
-104 char Flash BIOS support
+ 104 char Flash BIOS support
-104 block Compaq Next Generation Drive Array, first controller
+ 104 block Compaq Next Generation Drive Array, first controller
0 = /dev/cciss/c0d0 First logical drive, whole disk
16 = /dev/cciss/c0d1 Second logical drive, whole disk
...
@@ -1780,12 +1725,12 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
-105 char Comtrol VS-1000 serial controller
+ 105 char Comtrol VS-1000 serial controller
0 = /dev/ttyV0 First VS-1000 port
1 = /dev/ttyV1 Second VS-1000 port
...
-105 block Compaq Next Generation Drive Array, second controller
+ 105 block Compaq Next Generation Drive Array, second controller
0 = /dev/cciss/c1d0 First logical drive, whole disk
16 = /dev/cciss/c1d1 Second logical drive, whole disk
...
@@ -1795,12 +1740,12 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
-106 char Comtrol VS-1000 serial controller - alternate devices
+ 106 char Comtrol VS-1000 serial controller - alternate devices
0 = /dev/cuv0 First VS-1000 port
1 = /dev/cuv1 Second VS-1000 port
...
-106 block Compaq Next Generation Drive Array, third controller
+ 106 block Compaq Next Generation Drive Array, third controller
0 = /dev/cciss/c2d0 First logical drive, whole disk
16 = /dev/cciss/c2d1 Second logical drive, whole disk
...
@@ -1810,10 +1755,10 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
-107 char 3Dfx Voodoo Graphics device
+ 107 char 3Dfx Voodoo Graphics device
0 = /dev/3dfx Primary 3Dfx graphics device
-107 block Compaq Next Generation Drive Array, fourth controller
+ 107 block Compaq Next Generation Drive Array, fourth controller
0 = /dev/cciss/c3d0 First logical drive, whole disk
16 = /dev/cciss/c3d1 Second logical drive, whole disk
...
@@ -1823,10 +1768,10 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
-108 char Device independent PPP interface
+ 108 char Device independent PPP interface
0 = /dev/ppp Device independent PPP interface
-108 block Compaq Next Generation Drive Array, fifth controller
+ 108 block Compaq Next Generation Drive Array, fifth controller
0 = /dev/cciss/c4d0 First logical drive, whole disk
16 = /dev/cciss/c4d1 Second logical drive, whole disk
...
@@ -1836,9 +1781,9 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
-109 char Reserved for logical volume manager
+ 109 char Reserved for logical volume manager
-109 block Compaq Next Generation Drive Array, sixth controller
+ 109 block Compaq Next Generation Drive Array, sixth controller
0 = /dev/cciss/c5d0 First logical drive, whole disk
16 = /dev/cciss/c5d1 Second logical drive, whole disk
...
@@ -1848,12 +1793,12 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
-110 char miroMEDIA Surround board
+ 110 char miroMEDIA Surround board
0 = /dev/srnd0 First miroMEDIA Surround board
1 = /dev/srnd1 Second miroMEDIA Surround board
...
-110 block Compaq Next Generation Drive Array, seventh controller
+ 110 block Compaq Next Generation Drive Array, seventh controller
0 = /dev/cciss/c6d0 First logical drive, whole disk
16 = /dev/cciss/c6d1 Second logical drive, whole disk
...
@@ -1863,9 +1808,9 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
-111 char
+ 111 char
-111 block Compaq Next Generation Drive Array, eighth controller
+ 111 block Compaq Next Generation Drive Array, eighth controller
0 = /dev/cciss/c7d0 First logical drive, whole disk
16 = /dev/cciss/c7d1 Second logical drive, whole disk
...
@@ -1875,7 +1820,7 @@ Your cooperation is appreciated.
DAC960 (see major number 48) except that the limit on
partitions is 15.
-112 char ISI serial card
+ 112 char ISI serial card
0 = /dev/ttyM0 First ISI port
1 = /dev/ttyM1 Second ISI port
...
@@ -1883,7 +1828,7 @@ Your cooperation is appreciated.
There is currently a device-naming conflict between
these and PAM multimodems (major 78).
-112 block IBM iSeries virtual disk
+ 112 block IBM iSeries virtual disk
0 = /dev/iseries/vda First virtual disk, whole disk
8 = /dev/iseries/vdb Second virtual disk, whole disk
...
@@ -1896,17 +1841,17 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 7.
-113 char ISI serial card - alternate devices
+ 113 char ISI serial card - alternate devices
0 = /dev/cum0 Callout device for ttyM0
1 = /dev/cum1 Callout device for ttyM1
...
-113 block IBM iSeries virtual CD-ROM
+ 113 block IBM iSeries virtual CD-ROM
0 = /dev/iseries/vcda First virtual CD-ROM
1 = /dev/iseries/vcdb Second virtual CD-ROM
...
-114 char Picture Elements ISE board
+ 114 char Picture Elements ISE board
0 = /dev/ise0 First ISE board
1 = /dev/ise1 Second ISE board
...
@@ -1919,24 +1864,24 @@ Your cooperation is appreciated.
I/O access to the board, the /dev/isex0 nodes command
nodes used to control the board.
-114 block IDE BIOS powered software RAID interfaces such as the
- Promise Fastrak
+ 114 block IDE BIOS powered software RAID interfaces such as the
+ Promise Fastrak
- 0 = /dev/ataraid/d0
- 1 = /dev/ataraid/d0p1
- 2 = /dev/ataraid/d0p2
- ...
- 16 = /dev/ataraid/d1
- 17 = /dev/ataraid/d1p1
- 18 = /dev/ataraid/d1p2
- ...
- 255 = /dev/ataraid/d15p15
+ 0 = /dev/ataraid/d0
+ 1 = /dev/ataraid/d0p1
+ 2 = /dev/ataraid/d0p2
+ ...
+ 16 = /dev/ataraid/d1
+ 17 = /dev/ataraid/d1p1
+ 18 = /dev/ataraid/d1p2
+ ...
+ 255 = /dev/ataraid/d15p15
Partitions are handled in the same way as for IDE
disks (see major number 3) except that the limit on
partitions is 15.
-115 char TI link cable devices (115 was formerly the console driver speaker)
+ 115 char TI link cable devices (115 was formerly the console driver speaker)
0 = /dev/tipar0 Parallel cable on first parallel port
...
7 = /dev/tipar7 Parallel cable on seventh parallel port
@@ -1949,28 +1894,28 @@ Your cooperation is appreciated.
...
47 = /dev/tiusb31 32nd USB cable
-115 block NetWare (NWFS) Devices (0-255)
+ 115 block NetWare (NWFS) Devices (0-255)
- The NWFS (NetWare) devices are used to present a
- collection of NetWare Mirror Groups or NetWare
- Partitions as a logical storage segment for
- use in mounting NetWare volumes. A maximum of
- 256 NetWare volumes can be supported in a single
- machine.
+ The NWFS (NetWare) devices are used to present a
+ collection of NetWare Mirror Groups or NetWare
+ Partitions as a logical storage segment for
+ use in mounting NetWare volumes. A maximum of
+ 256 NetWare volumes can be supported in a single
+ machine.
- http://cgfa.telepac.pt/ftp2/kernel.org/linux/kernel/people/jmerkey/nwfs/
+ http://cgfa.telepac.pt/ftp2/kernel.org/linux/kernel/people/jmerkey/nwfs/
- 0 = /dev/nwfs/v0 First NetWare (NWFS) Logical Volume
- 1 = /dev/nwfs/v1 Second NetWare (NWFS) Logical Volume
- 2 = /dev/nwfs/v2 Third NetWare (NWFS) Logical Volume
- ...
- 255 = /dev/nwfs/v255 Last NetWare (NWFS) Logical Volume
+ 0 = /dev/nwfs/v0 First NetWare (NWFS) Logical Volume
+ 1 = /dev/nwfs/v1 Second NetWare (NWFS) Logical Volume
+ 2 = /dev/nwfs/v2 Third NetWare (NWFS) Logical Volume
+ ...
+ 255 = /dev/nwfs/v255 Last NetWare (NWFS) Logical Volume
-116 char Advanced Linux Sound Driver (ALSA)
+ 116 char Advanced Linux Sound Driver (ALSA)
-116 block MicroMemory battery backed RAM adapter (NVRAM)
- Supports 16 boards, 15 partitions each.
- Requested by neilb at cse.unsw.edu.au.
+ 116 block MicroMemory battery backed RAM adapter (NVRAM)
+ Supports 16 boards, 15 partitions each.
+ Requested by neilb at cse.unsw.edu.au.
0 = /dev/umem/d0 Whole of first board
1 = /dev/umem/d0p1 First partition of first board
@@ -1982,7 +1927,7 @@ Your cooperation is appreciated.
...
255= /dev/umem/d15p15 15th partition of 16th board.
-117 char COSA/SRP synchronous serial card
+ 117 char COSA/SRP synchronous serial card
0 = /dev/cosa0c0 1st board, 1st channel
1 = /dev/cosa0c1 1st board, 2nd channel
...
@@ -1990,147 +1935,147 @@ Your cooperation is appreciated.
17 = /dev/cosa1c1 2nd board, 2nd channel
...
-117 block Enterprise Volume Management System (EVMS)
+ 117 block Enterprise Volume Management System (EVMS)
- The EVMS driver uses a layered, plug-in model to provide
- unparalleled flexibility and extensibility in managing
- storage. This allows for easy expansion or customization
- of various levels of volume management. Requested by
- Mark Peloquin (peloquin at us.ibm.com).
+ The EVMS driver uses a layered, plug-in model to provide
+ unparalleled flexibility and extensibility in managing
+ storage. This allows for easy expansion or customization
+ of various levels of volume management. Requested by
+ Mark Peloquin (peloquin at us.ibm.com).
- Note: EVMS populates and manages all the devnodes in
- /dev/evms.
+ Note: EVMS populates and manages all the devnodes in
+ /dev/evms.
- http://sf.net/projects/evms
+ http://sf.net/projects/evms
- 0 = /dev/evms/block_device EVMS block device
- 1 = /dev/evms/legacyname1 First EVMS legacy device
- 2 = /dev/evms/legacyname2 Second EVMS legacy device
- ...
- Both ranges can grow (down or up) until they meet.
- ...
- 254 = /dev/evms/EVMSname2 Second EVMS native device
- 255 = /dev/evms/EVMSname1 First EVMS native device
+ 0 = /dev/evms/block_device EVMS block device
+ 1 = /dev/evms/legacyname1 First EVMS legacy device
+ 2 = /dev/evms/legacyname2 Second EVMS legacy device
+ ...
+ Both ranges can grow (down or up) until they meet.
+ ...
+ 254 = /dev/evms/EVMSname2 Second EVMS native device
+ 255 = /dev/evms/EVMSname1 First EVMS native device
- Note: legacyname(s) are derived from the normal legacy
- device names. For example, /dev/hda5 would become
- /dev/evms/hda5.
+ Note: legacyname(s) are derived from the normal legacy
+ device names. For example, /dev/hda5 would become
+ /dev/evms/hda5.
-118 char IBM Cryptographic Accelerator
+ 118 char IBM Cryptographic Accelerator
0 = /dev/ica Virtual interface to all IBM Crypto Accelerators
1 = /dev/ica0 IBMCA Device 0
2 = /dev/ica1 IBMCA Device 1
...
-119 char VMware virtual network control
+ 119 char VMware virtual network control
0 = /dev/vnet0 1st virtual network
1 = /dev/vnet1 2nd virtual network
...
-120-127 char LOCAL/EXPERIMENTAL USE
+ 120-127 char LOCAL/EXPERIMENTAL USE
-120-127 block LOCAL/EXPERIMENTAL USE
+ 120-127 block LOCAL/EXPERIMENTAL USE
Allocated for local/experimental use. For devices not
assigned official numbers, these ranges should be
used in order to avoid conflicting with future assignments.
-128-135 char Unix98 PTY masters
+ 128-135 char Unix98 PTY masters
These devices should not have corresponding device
nodes; instead they should be accessed through the
/dev/ptmx cloning interface.
-128 block SCSI disk devices (128-143)
- 0 = /dev/sddy 129th SCSI disk whole disk
- 16 = /dev/sddz 130th SCSI disk whole disk
- 32 = /dev/sdea 131th SCSI disk whole disk
- ...
- 240 = /dev/sden 144th SCSI disk whole disk
+ 128 block SCSI disk devices (128-143)
+ 0 = /dev/sddy 129th SCSI disk whole disk
+ 16 = /dev/sddz 130th SCSI disk whole disk
+ 32 = /dev/sdea 131th SCSI disk whole disk
+ ...
+ 240 = /dev/sden 144th SCSI disk whole disk
Partitions are handled in the same way as for IDE
disks (see major number 3) except that the limit on
partitions is 15.
-129 block SCSI disk devices (144-159)
- 0 = /dev/sdeo 145th SCSI disk whole disk
- 16 = /dev/sdep 146th SCSI disk whole disk
- 32 = /dev/sdeq 147th SCSI disk whole disk
- ...
- 240 = /dev/sdfd 160th SCSI disk whole disk
+ 129 block SCSI disk devices (144-159)
+ 0 = /dev/sdeo 145th SCSI disk whole disk
+ 16 = /dev/sdep 146th SCSI disk whole disk
+ 32 = /dev/sdeq 147th SCSI disk whole disk
+ ...
+ 240 = /dev/sdfd 160th SCSI disk whole disk
Partitions are handled in the same way as for IDE
disks (see major number 3) except that the limit on
partitions is 15.
-130 char (Misc devices)
+ 130 char (Misc devices)
-130 block SCSI disk devices (160-175)
- 0 = /dev/sdfe 161st SCSI disk whole disk
- 16 = /dev/sdff 162nd SCSI disk whole disk
- 32 = /dev/sdfg 163rd SCSI disk whole disk
- ...
- 240 = /dev/sdft 176th SCSI disk whole disk
+ 130 block SCSI disk devices (160-175)
+ 0 = /dev/sdfe 161st SCSI disk whole disk
+ 16 = /dev/sdff 162nd SCSI disk whole disk
+ 32 = /dev/sdfg 163rd SCSI disk whole disk
+ ...
+ 240 = /dev/sdft 176th SCSI disk whole disk
Partitions are handled in the same way as for IDE
disks (see major number 3) except that the limit on
partitions is 15.
-131 block SCSI disk devices (176-191)
- 0 = /dev/sdfu 177th SCSI disk whole disk
- 16 = /dev/sdfv 178th SCSI disk whole disk
- 32 = /dev/sdfw 179th SCSI disk whole disk
- ...
- 240 = /dev/sdgj 192nd SCSI disk whole disk
+ 131 block SCSI disk devices (176-191)
+ 0 = /dev/sdfu 177th SCSI disk whole disk
+ 16 = /dev/sdfv 178th SCSI disk whole disk
+ 32 = /dev/sdfw 179th SCSI disk whole disk
+ ...
+ 240 = /dev/sdgj 192nd SCSI disk whole disk
Partitions are handled in the same way as for IDE
disks (see major number 3) except that the limit on
partitions is 15.
-132 block SCSI disk devices (192-207)
- 0 = /dev/sdgk 193rd SCSI disk whole disk
- 16 = /dev/sdgl 194th SCSI disk whole disk
- 32 = /dev/sdgm 195th SCSI disk whole disk
- ...
- 240 = /dev/sdgz 208th SCSI disk whole disk
+ 132 block SCSI disk devices (192-207)
+ 0 = /dev/sdgk 193rd SCSI disk whole disk
+ 16 = /dev/sdgl 194th SCSI disk whole disk
+ 32 = /dev/sdgm 195th SCSI disk whole disk
+ ...
+ 240 = /dev/sdgz 208th SCSI disk whole disk
Partitions are handled in the same way as for IDE
disks (see major number 3) except that the limit on
partitions is 15.
-133 block SCSI disk devices (208-223)
- 0 = /dev/sdha 209th SCSI disk whole disk
- 16 = /dev/sdhb 210th SCSI disk whole disk
- 32 = /dev/sdhc 211th SCSI disk whole disk
- ...
- 240 = /dev/sdhp 224th SCSI disk whole disk
+ 133 block SCSI disk devices (208-223)
+ 0 = /dev/sdha 209th SCSI disk whole disk
+ 16 = /dev/sdhb 210th SCSI disk whole disk
+ 32 = /dev/sdhc 211th SCSI disk whole disk
+ ...
+ 240 = /dev/sdhp 224th SCSI disk whole disk
Partitions are handled in the same way as for IDE
disks (see major number 3) except that the limit on
partitions is 15.
-134 block SCSI disk devices (224-239)
- 0 = /dev/sdhq 225th SCSI disk whole disk
- 16 = /dev/sdhr 226th SCSI disk whole disk
- 32 = /dev/sdhs 227th SCSI disk whole disk
- ...
- 240 = /dev/sdif 240th SCSI disk whole disk
+ 134 block SCSI disk devices (224-239)
+ 0 = /dev/sdhq 225th SCSI disk whole disk
+ 16 = /dev/sdhr 226th SCSI disk whole disk
+ 32 = /dev/sdhs 227th SCSI disk whole disk
+ ...
+ 240 = /dev/sdif 240th SCSI disk whole disk
Partitions are handled in the same way as for IDE
disks (see major number 3) except that the limit on
partitions is 15.
-135 block SCSI disk devices (240-255)
- 0 = /dev/sdig 241st SCSI disk whole disk
- 16 = /dev/sdih 242nd SCSI disk whole disk
- 32 = /dev/sdih 243rd SCSI disk whole disk
- ...
- 240 = /dev/sdiv 256th SCSI disk whole disk
+ 135 block SCSI disk devices (240-255)
+ 0 = /dev/sdig 241st SCSI disk whole disk
+ 16 = /dev/sdih 242nd SCSI disk whole disk
+ 32 = /dev/sdih 243rd SCSI disk whole disk
+ ...
+ 240 = /dev/sdiv 256th SCSI disk whole disk
Partitions are handled in the same way as for IDE
disks (see major number 3) except that the limit on
partitions is 15.
-136-143 char Unix98 PTY slaves
+ 136-143 char Unix98 PTY slaves
0 = /dev/pts/0 First Unix98 pseudo-TTY
1 = /dev/pts/1 Second Unix98 pseudo-TTY
...
@@ -2142,7 +2087,7 @@ Your cooperation is appreciated.
*most* distributions the appropriate options are
"mode=0620,gid=<gid of the "tty" group>".)
-136 block Mylex DAC960 PCI RAID controller; ninth controller
+ 136 block Mylex DAC960 PCI RAID controller; ninth controller
0 = /dev/rd/c8d0 First disk, whole disk
8 = /dev/rd/c8d1 Second disk, whole disk
...
@@ -2150,7 +2095,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
-137 block Mylex DAC960 PCI RAID controller; tenth controller
+ 137 block Mylex DAC960 PCI RAID controller; tenth controller
0 = /dev/rd/c9d0 First disk, whole disk
8 = /dev/rd/c9d1 Second disk, whole disk
...
@@ -2158,7 +2103,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
-138 block Mylex DAC960 PCI RAID controller; eleventh controller
+ 138 block Mylex DAC960 PCI RAID controller; eleventh controller
0 = /dev/rd/c10d0 First disk, whole disk
8 = /dev/rd/c10d1 Second disk, whole disk
...
@@ -2166,7 +2111,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
-139 block Mylex DAC960 PCI RAID controller; twelfth controller
+ 139 block Mylex DAC960 PCI RAID controller; twelfth controller
0 = /dev/rd/c11d0 First disk, whole disk
8 = /dev/rd/c11d1 Second disk, whole disk
...
@@ -2174,7 +2119,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
-140 block Mylex DAC960 PCI RAID controller; thirteenth controller
+ 140 block Mylex DAC960 PCI RAID controller; thirteenth controller
0 = /dev/rd/c12d0 First disk, whole disk
8 = /dev/rd/c12d1 Second disk, whole disk
...
@@ -2182,7 +2127,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
-141 block Mylex DAC960 PCI RAID controller; fourteenth controller
+ 141 block Mylex DAC960 PCI RAID controller; fourteenth controller
0 = /dev/rd/c13d0 First disk, whole disk
8 = /dev/rd/c13d1 Second disk, whole disk
...
@@ -2190,7 +2135,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
-142 block Mylex DAC960 PCI RAID controller; fifteenth controller
+ 142 block Mylex DAC960 PCI RAID controller; fifteenth controller
0 = /dev/rd/c14d0 First disk, whole disk
8 = /dev/rd/c14d1 Second disk, whole disk
...
@@ -2198,7 +2143,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
-143 block Mylex DAC960 PCI RAID controller; sixteenth controller
+ 143 block Mylex DAC960 PCI RAID controller; sixteenth controller
0 = /dev/rd/c15d0 First disk, whole disk
8 = /dev/rd/c15d1 Second disk, whole disk
...
@@ -2206,7 +2151,7 @@ Your cooperation is appreciated.
Partitions are handled as for major 48.
-144 char Encapsulated PPP
+ 144 char Encapsulated PPP
0 = /dev/pppox0 First PPP over Ethernet
...
63 = /dev/pppox63 64th PPP over Ethernet
@@ -2216,11 +2161,11 @@ Your cooperation is appreciated.
The SST 5136-DN DeviceNet interface driver has been
relocated to major 183 due to an unfortunate conflict.
-144 block Expansion Area #1 for more non-device (e.g. NFS) mounts
+ 144 block Expansion Area #1 for more non-device (e.g. NFS) mounts
0 = mounted device 256
255 = mounted device 511
-145 char SAM9407-based soundcard
+ 145 char SAM9407-based soundcard
0 = /dev/sam0_mixer
1 = /dev/sam0_sequencer
2 = /dev/sam0_midi00
@@ -2241,66 +2186,66 @@ Your cooperation is appreciated.
addons, which are sam9407 specific. OSS can be
operated simultaneously, taking care of the codec.
-145 block Expansion Area #2 for more non-device (e.g. NFS) mounts
+ 145 block Expansion Area #2 for more non-device (e.g. NFS) mounts
0 = mounted device 512
255 = mounted device 767
-146 char SYSTRAM SCRAMNet mirrored-memory network
+ 146 char SYSTRAM SCRAMNet mirrored-memory network
0 = /dev/scramnet0 First SCRAMNet device
1 = /dev/scramnet1 Second SCRAMNet device
...
-146 block Expansion Area #3 for more non-device (e.g. NFS) mounts
+ 146 block Expansion Area #3 for more non-device (e.g. NFS) mounts
0 = mounted device 768
255 = mounted device 1023
-147 char Aureal Semiconductor Vortex Audio device
+ 147 char Aureal Semiconductor Vortex Audio device
0 = /dev/aureal0 First Aureal Vortex
1 = /dev/aureal1 Second Aureal Vortex
...
-147 block Distributed Replicated Block Device (DRBD)
+ 147 block Distributed Replicated Block Device (DRBD)
0 = /dev/drbd0 First DRBD device
1 = /dev/drbd1 Second DRBD device
...
-148 char Technology Concepts serial card
+ 148 char Technology Concepts serial card
0 = /dev/ttyT0 First TCL port
1 = /dev/ttyT1 Second TCL port
...
-149 char Technology Concepts serial card - alternate devices
+ 149 char Technology Concepts serial card - alternate devices
0 = /dev/cut0 Callout device for ttyT0
1 = /dev/cut0 Callout device for ttyT1
...
-150 char Real-Time Linux FIFOs
+ 150 char Real-Time Linux FIFOs
0 = /dev/rtf0 First RTLinux FIFO
1 = /dev/rtf1 Second RTLinux FIFO
...
-151 char DPT I2O SmartRaid V controller
+ 151 char DPT I2O SmartRaid V controller
0 = /dev/dpti0 First DPT I2O adapter
1 = /dev/dpti1 Second DPT I2O adapter
...
-152 char EtherDrive Control Device
+ 152 char EtherDrive Control Device
0 = /dev/etherd/ctl Connect/Disconnect an EtherDrive
1 = /dev/etherd/err Monitor errors
2 = /dev/etherd/raw Raw AoE packet monitor
-152 block EtherDrive Block Devices
+ 152 block EtherDrive Block Devices
0 = /dev/etherd/0 EtherDrive 0
...
255 = /dev/etherd/255 EtherDrive 255
-153 char SPI Bus Interface (sometimes referred to as MicroWire)
+ 153 char SPI Bus Interface (sometimes referred to as MicroWire)
0 = /dev/spi0 First SPI device on the bus
1 = /dev/spi1 Second SPI device on the bus
...
15 = /dev/spi15 Sixteenth SPI device on the bus
-153 block Enhanced Metadisk RAID (EMD) storage units
+ 153 block Enhanced Metadisk RAID (EMD) storage units
0 = /dev/emd/0 First unit
1 = /dev/emd/0p1 Partition 1 on First unit
2 = /dev/emd/0p2 Partition 2 on First unit
@@ -2316,41 +2261,41 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 15.
-154 char Specialix RIO serial card
+ 154 char Specialix RIO serial card
0 = /dev/ttySR0 First RIO port
...
255 = /dev/ttySR255 256th RIO port
-155 char Specialix RIO serial card - alternate devices
+ 155 char Specialix RIO serial card - alternate devices
0 = /dev/cusr0 Callout device for ttySR0
...
255 = /dev/cusr255 Callout device for ttySR255
-156 char Specialix RIO serial card
+ 156 char Specialix RIO serial card
0 = /dev/ttySR256 257th RIO port
...
255 = /dev/ttySR511 512th RIO port
-157 char Specialix RIO serial card - alternate devices
+ 157 char Specialix RIO serial card - alternate devices
0 = /dev/cusr256 Callout device for ttySR256
...
255 = /dev/cusr511 Callout device for ttySR511
-158 char Dialogic GammaLink fax driver
+ 158 char Dialogic GammaLink fax driver
0 = /dev/gfax0 GammaLink channel 0
1 = /dev/gfax1 GammaLink channel 1
...
-159 char RESERVED
+ 159 char RESERVED
-159 block RESERVED
+ 159 block RESERVED
-160 char General Purpose Instrument Bus (GPIB)
+ 160 char General Purpose Instrument Bus (GPIB)
0 = /dev/gpib0 First GPIB bus
1 = /dev/gpib1 Second GPIB bus
...
-160 block Carmel 8-port SATA Disks on First Controller
+ 160 block Carmel 8-port SATA Disks on First Controller
0 = /dev/carmel/0 SATA disk 0 whole disk
1 = /dev/carmel/0p1 SATA disk 0 partition 1
...
@@ -2365,7 +2310,7 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 31.
-161 char IrCOMM devices (IrDA serial/parallel emulation)
+ 161 char IrCOMM devices (IrDA serial/parallel emulation)
0 = /dev/ircomm0 First IrCOMM device
1 = /dev/ircomm1 Second IrCOMM device
...
@@ -2373,7 +2318,7 @@ Your cooperation is appreciated.
17 = /dev/irlpt1 Second IrLPT device
...
-161 block Carmel 8-port SATA Disks on Second Controller
+ 161 block Carmel 8-port SATA Disks on Second Controller
0 = /dev/carmel/8 SATA disk 8 whole disk
1 = /dev/carmel/8p1 SATA disk 8 partition 1
...
@@ -2388,17 +2333,17 @@ Your cooperation is appreciated.
disks (see major number 3) except that the limit on
partitions is 31.
-162 char Raw block device interface
+ 162 char Raw block device interface
0 = /dev/rawctl Raw I/O control device
1 = /dev/raw/raw1 First raw I/O device
2 = /dev/raw/raw2 Second raw I/O device
...
- max minor number of raw device is set by kernel config
- MAX_RAW_DEVS or raw module parameter 'max_raw_devs'
+ max minor number of raw device is set by kernel config
+ MAX_RAW_DEVS or raw module parameter 'max_raw_devs'
-163 char
+ 163 char
-164 char Chase Research AT/PCI-Fast serial card
+ 164 char Chase Research AT/PCI-Fast serial card
0 = /dev/ttyCH0 AT/PCI-Fast board 0, port 0
...
15 = /dev/ttyCH15 AT/PCI-Fast board 0, port 15
@@ -2412,67 +2357,67 @@ Your cooperation is appreciated.
...
63 = /dev/ttyCH63 AT/PCI-Fast board 3, port 15
-165 char Chase Research AT/PCI-Fast serial card - alternate devices
+ 165 char Chase Research AT/PCI-Fast serial card - alternate devices
0 = /dev/cuch0 Callout device for ttyCH0
...
63 = /dev/cuch63 Callout device for ttyCH63
-166 char ACM USB modems
+ 166 char ACM USB modems
0 = /dev/ttyACM0 First ACM modem
1 = /dev/ttyACM1 Second ACM modem
...
-167 char ACM USB modems - alternate devices
+ 167 char ACM USB modems - alternate devices
0 = /dev/cuacm0 Callout device for ttyACM0
1 = /dev/cuacm1 Callout device for ttyACM1
...
-168 char Eracom CSA7000 PCI encryption adaptor
+ 168 char Eracom CSA7000 PCI encryption adaptor
0 = /dev/ecsa0 First CSA7000
1 = /dev/ecsa1 Second CSA7000
...
-169 char Eracom CSA8000 PCI encryption adaptor
+ 169 char Eracom CSA8000 PCI encryption adaptor
0 = /dev/ecsa8-0 First CSA8000
1 = /dev/ecsa8-1 Second CSA8000
...
-170 char AMI MegaRAC remote access controller
+ 170 char AMI MegaRAC remote access controller
0 = /dev/megarac0 First MegaRAC card
1 = /dev/megarac1 Second MegaRAC card
...
-171 char Reserved for IEEE 1394 (Firewire)
+ 171 char Reserved for IEEE 1394 (Firewire)
-172 char Moxa Intellio serial card
+ 172 char Moxa Intellio serial card
0 = /dev/ttyMX0 First Moxa port
1 = /dev/ttyMX1 Second Moxa port
...
127 = /dev/ttyMX127 128th Moxa port
128 = /dev/moxactl Moxa control port
-173 char Moxa Intellio serial card - alternate devices
+ 173 char Moxa Intellio serial card - alternate devices
0 = /dev/cumx0 Callout device for ttyMX0
1 = /dev/cumx1 Callout device for ttyMX1
...
127 = /dev/cumx127 Callout device for ttyMX127
-174 char SmartIO serial card
+ 174 char SmartIO serial card
0 = /dev/ttySI0 First SmartIO port
1 = /dev/ttySI1 Second SmartIO port
...
-175 char SmartIO serial card - alternate devices
+ 175 char SmartIO serial card - alternate devices
0 = /dev/cusi0 Callout device for ttySI0
1 = /dev/cusi1 Callout device for ttySI1
...
-176 char nCipher nFast PCI crypto accelerator
+ 176 char nCipher nFast PCI crypto accelerator
0 = /dev/nfastpci0 First nFast PCI device
1 = /dev/nfastpci1 First nFast PCI device
...
-177 char TI PCILynx memory spaces
+ 177 char TI PCILynx memory spaces
0 = /dev/pcilynx/aux0 AUX space of first PCILynx card
...
15 = /dev/pcilynx/aux15 AUX space of 16th PCILynx card
@@ -2483,12 +2428,12 @@ Your cooperation is appreciated.
...
47 = /dev/pcilynx/ram15 RAM space of 16th PCILynx card
-178 char Giganet cLAN1xxx virtual interface adapter
+ 178 char Giganet cLAN1xxx virtual interface adapter
0 = /dev/clanvi0 First cLAN adapter
1 = /dev/clanvi1 Second cLAN adapter
...
-179 block MMC block devices
+ 179 block MMC block devices
0 = /dev/mmcblk0 First SD/MMC card
1 = /dev/mmcblk0p1 First partition on first MMC card
8 = /dev/mmcblk1 Second SD/MMC card
@@ -2500,12 +2445,12 @@ Your cooperation is appreciated.
bump the offset between each card to be the configured
value instead of the default 8.
-179 char CCube DVXChip-based PCI products
+ 179 char CCube DVXChip-based PCI products
0 = /dev/dvxirq0 First DVX device
1 = /dev/dvxirq1 Second DVX device
...
-180 char USB devices
+ 180 char USB devices
0 = /dev/usb/lp0 First USB printer
...
15 = /dev/usb/lp15 16th USB printer
@@ -2539,23 +2484,23 @@ Your cooperation is appreciated.
...
209 = /dev/usb/yurex16 16th USB Yurex device
-180 block USB block devices
+ 180 block USB block devices
0 = /dev/uba First USB block device
8 = /dev/ubb Second USB block device
16 = /dev/ubc Third USB block device
- ...
+ ...
-181 char Conrad Electronic parallel port radio clocks
+ 181 char Conrad Electronic parallel port radio clocks
0 = /dev/pcfclock0 First Conrad radio clock
1 = /dev/pcfclock1 Second Conrad radio clock
...
-182 char Picture Elements THR2 binarizer
+ 182 char Picture Elements THR2 binarizer
0 = /dev/pethr0 First THR2 board
1 = /dev/pethr1 Second THR2 board
...
-183 char SST 5136-DN DeviceNet interface
+ 183 char SST 5136-DN DeviceNet interface
0 = /dev/ss5136dn0 First DeviceNet interface
1 = /dev/ss5136dn1 Second DeviceNet interface
...
@@ -2563,12 +2508,12 @@ Your cooperation is appreciated.
This device used to be assigned to major number 144.
It had to be moved due to an unfortunate conflict.
-184 char Picture Elements' video simulator/sender
+ 184 char Picture Elements' video simulator/sender
0 = /dev/pevss0 First sender board
1 = /dev/pevss1 Second sender board
...
-185 char InterMezzo high availability file system
+ 185 char InterMezzo high availability file system
0 = /dev/intermezzo0 First cache manager
1 = /dev/intermezzo1 Second cache manager
...
@@ -2576,48 +2521,48 @@ Your cooperation is appreciated.
See http://web.archive.org/web/20080115195241/
http://inter-mezzo.org/index.html
-186 char Object-based storage control device
+ 186 char Object-based storage control device
0 = /dev/obd0 First obd control device
1 = /dev/obd1 Second obd control device
...
See ftp://ftp.lustre.org/pub/obd for code and information.
-187 char DESkey hardware encryption device
+ 187 char DESkey hardware encryption device
0 = /dev/deskey0 First DES key
1 = /dev/deskey1 Second DES key
...
-188 char USB serial converters
+ 188 char USB serial converters
0 = /dev/ttyUSB0 First USB serial converter
1 = /dev/ttyUSB1 Second USB serial converter
...
-189 char USB serial converters - alternate devices
+ 189 char USB serial converters - alternate devices
0 = /dev/cuusb0 Callout device for ttyUSB0
1 = /dev/cuusb1 Callout device for ttyUSB1
...
-190 char Kansas City tracker/tuner card
+ 190 char Kansas City tracker/tuner card
0 = /dev/kctt0 First KCT/T card
1 = /dev/kctt1 Second KCT/T card
...
-191 char Reserved for PCMCIA
+ 191 char Reserved for PCMCIA
-192 char Kernel profiling interface
+ 192 char Kernel profiling interface
0 = /dev/profile Profiling control device
1 = /dev/profile0 Profiling device for CPU 0
2 = /dev/profile1 Profiling device for CPU 1
...
-193 char Kernel event-tracing interface
+ 193 char Kernel event-tracing interface
0 = /dev/trace Tracing control device
1 = /dev/trace0 Tracing device for CPU 0
2 = /dev/trace1 Tracing device for CPU 1
...
-194 char linVideoStreams (LINVS)
+ 194 char linVideoStreams (LINVS)
0 = /dev/mvideo/status0 Video compression status
1 = /dev/mvideo/stream0 Video stream
2 = /dev/mvideo/frame0 Single compressed frame
@@ -2633,13 +2578,13 @@ Your cooperation is appreciated.
240 = /dev/mvideo/status15 16th device
...
-195 char Nvidia graphics devices
+ 195 char Nvidia graphics devices
0 = /dev/nvidia0 First Nvidia card
1 = /dev/nvidia1 Second Nvidia card
...
255 = /dev/nvidiactl Nvidia card control device
-196 char Tormenta T1 card
+ 196 char Tormenta T1 card
0 = /dev/tor/0 Master control channel for all cards
1 = /dev/tor/1 First DS0
2 = /dev/tor/2 Second DS0
@@ -2649,24 +2594,24 @@ Your cooperation is appreciated.
50 = /dev/tor/50 Second pseudo-channel
...
-197 char OpenTNF tracing facility
+ 197 char OpenTNF tracing facility
0 = /dev/tnf/t0 Trace 0 data extraction
1 = /dev/tnf/t1 Trace 1 data extraction
...
128 = /dev/tnf/status Tracing facility status
130 = /dev/tnf/trace Tracing device
-198 char Total Impact TPMP2 quad coprocessor PCI card
+ 198 char Total Impact TPMP2 quad coprocessor PCI card
0 = /dev/tpmp2/0 First card
1 = /dev/tpmp2/1 Second card
...
-199 char Veritas volume manager (VxVM) volumes
+ 199 char Veritas volume manager (VxVM) volumes
0 = /dev/vx/rdsk/*/* First volume
1 = /dev/vx/rdsk/*/* Second volume
...
-199 block Veritas volume manager (VxVM) volumes
+ 199 block Veritas volume manager (VxVM) volumes
0 = /dev/vx/dsk/*/* First volume
1 = /dev/vx/dsk/*/* Second volume
...
@@ -2674,19 +2619,19 @@ Your cooperation is appreciated.
The namespace in these directories is maintained by
the user space VxVM software.
-200 char Veritas VxVM configuration interface
- 0 = /dev/vx/config Configuration access node
- 1 = /dev/vx/trace Volume i/o trace access node
- 2 = /dev/vx/iod Volume i/o daemon access node
- 3 = /dev/vx/info Volume information access node
- 4 = /dev/vx/task Volume tasks access node
- 5 = /dev/vx/taskmon Volume tasks monitor daemon
+ 200 char Veritas VxVM configuration interface
+ 0 = /dev/vx/config Configuration access node
+ 1 = /dev/vx/trace Volume i/o trace access node
+ 2 = /dev/vx/iod Volume i/o daemon access node
+ 3 = /dev/vx/info Volume information access node
+ 4 = /dev/vx/task Volume tasks access node
+ 5 = /dev/vx/taskmon Volume tasks monitor daemon
-201 char Veritas VxVM dynamic multipathing driver
+ 201 char Veritas VxVM dynamic multipathing driver
0 = /dev/vx/rdmp/* First multipath device
1 = /dev/vx/rdmp/* Second multipath device
...
-201 block Veritas VxVM dynamic multipathing driver
+ 201 block Veritas VxVM dynamic multipathing driver
0 = /dev/vx/dmp/* First multipath device
1 = /dev/vx/dmp/* Second multipath device
...
@@ -2694,28 +2639,28 @@ Your cooperation is appreciated.
The namespace in these directories is maintained by
the user space VxVM software.
-202 char CPU model-specific registers
+ 202 char CPU model-specific registers
0 = /dev/cpu/0/msr MSRs on CPU 0
1 = /dev/cpu/1/msr MSRs on CPU 1
...
-202 block Xen Virtual Block Device
+ 202 block Xen Virtual Block Device
0 = /dev/xvda First Xen VBD whole disk
16 = /dev/xvdb Second Xen VBD whole disk
32 = /dev/xvdc Third Xen VBD whole disk
...
240 = /dev/xvdp Sixteenth Xen VBD whole disk
- Partitions are handled in the same way as for IDE
- disks (see major number 3) except that the limit on
- partitions is 15.
+ Partitions are handled in the same way as for IDE
+ disks (see major number 3) except that the limit on
+ partitions is 15.
-203 char CPU CPUID information
+ 203 char CPU CPUID information
0 = /dev/cpu/0/cpuid CPUID on CPU 0
1 = /dev/cpu/1/cpuid CPUID on CPU 1
...
-204 char Low-density serial ports
+ 204 char Low-density serial ports
0 = /dev/ttyLU0 LinkUp Systems L72xx UART - port 0
1 = /dev/ttyLU1 LinkUp Systems L72xx UART - port 1
2 = /dev/ttyLU2 LinkUp Systems L72xx UART - port 2
@@ -2787,7 +2732,7 @@ Your cooperation is appreciated.
211 = /dev/ttyMAX2 MAX3100 serial port 2
212 = /dev/ttyMAX3 MAX3100 serial port 3
-205 char Low-density serial ports (alternate device)
+ 205 char Low-density serial ports (alternate device)
0 = /dev/culu0 Callout device for ttyLU0
1 = /dev/culu1 Callout device for ttyLU1
2 = /dev/culu2 Callout device for ttyLU2
@@ -2823,7 +2768,7 @@ Your cooperation is appreciated.
82 = /dev/cuvr0 Callout device for ttyVR0
83 = /dev/cuvr1 Callout device for ttyVR1
-206 char OnStream SC-x0 tape devices
+ 206 char OnStream SC-x0 tape devices
0 = /dev/osst0 First OnStream SCSI tape, mode 0
1 = /dev/osst1 Second OnStream SCSI tape, mode 0
...
@@ -2857,7 +2802,7 @@ Your cooperation is appreciated.
driver as well. The ADR-x0 drives are QIC-157
compliant and don't need osst.
-207 char Compaq ProLiant health feature indicate
+ 207 char Compaq ProLiant health feature indicate
0 = /dev/cpqhealth/cpqw Redirector interface
1 = /dev/cpqhealth/crom EISA CROM
2 = /dev/cpqhealth/cdt Data Table
@@ -2871,17 +2816,17 @@ Your cooperation is appreciated.
10 = /dev/cpqhealth/cram CMOS interface
11 = /dev/cpqhealth/cpci PCI IRQ interface
-208 char User space serial ports
+ 208 char User space serial ports
0 = /dev/ttyU0 First user space serial port
1 = /dev/ttyU1 Second user space serial port
...
-209 char User space serial ports (alternate devices)
+ 209 char User space serial ports (alternate devices)
0 = /dev/cuu0 Callout device for ttyU0
1 = /dev/cuu1 Callout device for ttyU1
...
-210 char SBE, Inc. sync/async serial card
+ 210 char SBE, Inc. sync/async serial card
0 = /dev/sbei/wxcfg0 Configuration device for board 0
1 = /dev/sbei/dld0 Download device for board 0
2 = /dev/sbei/wan00 WAN device, port 0, board 0
@@ -2906,12 +2851,12 @@ Your cooperation is appreciated.
Yes, each board is really spaced 10 (decimal) apart.
-211 char Addinum CPCI1500 digital I/O card
+ 211 char Addinum CPCI1500 digital I/O card
0 = /dev/addinum/cpci1500/0 First CPCI1500 card
1 = /dev/addinum/cpci1500/1 Second CPCI1500 card
...
-212 char LinuxTV.org DVB driver subsystem
+ 212 char LinuxTV.org DVB driver subsystem
0 = /dev/dvb/adapter0/video0 first video decoder of first card
1 = /dev/dvb/adapter0/audio0 first audio decoder of first card
2 = /dev/dvb/adapter0/sec0 (obsolete/unused)
@@ -2929,34 +2874,34 @@ Your cooperation is appreciated.
...
196 = /dev/dvb/adapter3/video0 first video decoder of fourth card
-216 char Bluetooth RFCOMM TTY devices
+ 216 char Bluetooth RFCOMM TTY devices
0 = /dev/rfcomm0 First Bluetooth RFCOMM TTY device
1 = /dev/rfcomm1 Second Bluetooth RFCOMM TTY device
...
-217 char Bluetooth RFCOMM TTY devices (alternate devices)
+ 217 char Bluetooth RFCOMM TTY devices (alternate devices)
0 = /dev/curf0 Callout device for rfcomm0
1 = /dev/curf1 Callout device for rfcomm1
...
-218 char The Logical Company bus Unibus/Qbus adapters
+ 218 char The Logical Company bus Unibus/Qbus adapters
0 = /dev/logicalco/bci/0 First bus adapter
1 = /dev/logicalco/bci/1 First bus adapter
...
-219 char The Logical Company DCI-1300 digital I/O card
+ 219 char The Logical Company DCI-1300 digital I/O card
0 = /dev/logicalco/dci1300/0 First DCI-1300 card
1 = /dev/logicalco/dci1300/1 Second DCI-1300 card
...
-220 char Myricom Myrinet "GM" board
+ 220 char Myricom Myrinet "GM" board
0 = /dev/myricom/gm0 First Myrinet GM board
1 = /dev/myricom/gmp0 First board "root access"
2 = /dev/myricom/gm1 Second Myrinet GM board
3 = /dev/myricom/gmp1 Second board "root access"
...
-221 char VME bus
+ 221 char VME bus
0 = /dev/bus/vme/m0 First master image
1 = /dev/bus/vme/m1 Second master image
2 = /dev/bus/vme/m2 Third master image
@@ -2971,38 +2916,38 @@ Your cooperation is appreciated.
same interface. For interface documentation see
http://www.vmelinux.org/.
-224 char A2232 serial card
+ 224 char A2232 serial card
0 = /dev/ttyY0 First A2232 port
1 = /dev/ttyY1 Second A2232 port
...
-225 char A2232 serial card (alternate devices)
+ 225 char A2232 serial card (alternate devices)
0 = /dev/cuy0 Callout device for ttyY0
1 = /dev/cuy1 Callout device for ttyY1
...
-226 char Direct Rendering Infrastructure (DRI)
+ 226 char Direct Rendering Infrastructure (DRI)
0 = /dev/dri/card0 First graphics card
1 = /dev/dri/card1 Second graphics card
...
-227 char IBM 3270 terminal Unix tty access
+ 227 char IBM 3270 terminal Unix tty access
1 = /dev/3270/tty1 First 3270 terminal
2 = /dev/3270/tty2 Seconds 3270 terminal
...
-228 char IBM 3270 terminal block-mode access
+ 228 char IBM 3270 terminal block-mode access
0 = /dev/3270/tub Controlling interface
1 = /dev/3270/tub1 First 3270 terminal
2 = /dev/3270/tub2 Second 3270 terminal
...
-229 char IBM iSeries/pSeries virtual console
+ 229 char IBM iSeries/pSeries virtual console
0 = /dev/hvc0 First console port
1 = /dev/hvc1 Second console port
...
-230 char IBM iSeries virtual tape
+ 230 char IBM iSeries virtual tape
0 = /dev/iseries/vt0 First virtual tape, mode 0
1 = /dev/iseries/vt1 Second virtual tape, mode 0
...
@@ -3033,7 +2978,7 @@ Your cooperation is appreciated.
ioctl()'s can be used to rewind the tape regardless of
the device used to access it.
-231 char InfiniBand
+ 231 char InfiniBand
0 = /dev/infiniband/umad0
1 = /dev/infiniband/umad1
...
@@ -3047,7 +2992,7 @@ Your cooperation is appreciated.
...
159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
-232 char Biometric Devices
+ 232 char Biometric Devices
0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device
1 = /dev/biometric/sensor0/iris first iris sensor on first device
2 = /dev/biometric/sensor0/retina first retina sensor on first device
@@ -3060,7 +3005,7 @@ Your cooperation is appreciated.
20 = /dev/biometric/sensor2/fingerprint first fingerprint sensor on third device
...
-233 char PathScale InfiniPath interconnect
+ 233 char PathScale InfiniPath interconnect
0 = /dev/ipath Primary device for programs (any unit)
1 = /dev/ipath0 Access specifically to unit 0
2 = /dev/ipath1 Access specifically to unit 1
@@ -3069,18 +3014,18 @@ Your cooperation is appreciated.
129 = /dev/ipath_sma Device used by Subnet Management Agent
130 = /dev/ipath_diag Device used by diagnostics programs
-234-254 char RESERVED FOR DYNAMIC ASSIGNMENT
+ 234-254 char RESERVED FOR DYNAMIC ASSIGNMENT
Character devices that request a dynamic allocation of major number will
take numbers starting from 254 and downward.
-240-254 block LOCAL/EXPERIMENTAL USE
+ 240-254 block LOCAL/EXPERIMENTAL USE
Allocated for local/experimental use. For devices not
assigned official numbers, these ranges should be
used in order to avoid conflicting with future assignments.
-255 char RESERVED
+ 255 char RESERVED
-255 block RESERVED
+ 255 block RESERVED
This major is reserved to assist the expansion to a
larger number space. No device nodes with this major
@@ -3088,25 +3033,25 @@ Your cooperation is appreciated.
(This is probably not true anymore, but I'll leave it
for now /Torben)
----LARGE MAJORS!!!!!---
+ ---LARGE MAJORS!!!!!---
-256 char Equinox SST multi-port serial boards
+ 256 char Equinox SST multi-port serial boards
0 = /dev/ttyEQ0 First serial port on first Equinox SST board
127 = /dev/ttyEQ127 Last serial port on first Equinox SST board
128 = /dev/ttyEQ128 First serial port on second Equinox SST board
...
1027 = /dev/ttyEQ1027 Last serial port on eighth Equinox SST board
-256 block Resident Flash Disk Flash Translation Layer
+ 256 block Resident Flash Disk Flash Translation Layer
0 = /dev/rfda First RFD FTL layer
16 = /dev/rfdb Second RFD FTL layer
...
240 = /dev/rfdp 16th RFD FTL layer
-257 char Phoenix Technologies Cryptographic Services Driver
+ 257 char Phoenix Technologies Cryptographic Services Driver
0 = /dev/ptlsec Crypto Services Driver
-257 block SSFDC Flash Translation Layer filesystem
+ 257 block SSFDC Flash Translation Layer filesystem
0 = /dev/ssfdca First SSFDC layer
8 = /dev/ssfdcb Second SSFDC layer
16 = /dev/ssfdcc Third SSFDC layer
@@ -3116,210 +3061,21 @@ Your cooperation is appreciated.
48 = /dev/ssfdcg 7th SSFDC layer
56 = /dev/ssfdch 8th SSFDC layer
-258 block ROM/Flash read-only translation layer
+ 258 block ROM/Flash read-only translation layer
0 = /dev/blockrom0 First ROM card's translation layer interface
1 = /dev/blockrom1 Second ROM card's translation layer interface
...
-259 block Block Extended Major
+ 259 block Block Extended Major
Used dynamically to hold additional partition minor
numbers and allow large numbers of partitions per device
-259 char FPGA configuration interfaces
+ 259 char FPGA configuration interfaces
0 = /dev/icap0 First Xilinx internal configuration
1 = /dev/icap1 Second Xilinx internal configuration
-260 char OSD (Object-based-device) SCSI Device
+ 260 char OSD (Object-based-device) SCSI Device
0 = /dev/osd0 First OSD Device
1 = /dev/osd1 Second OSD Device
...
255 = /dev/osd255 256th OSD Device
-
- **** ADDITIONAL /dev DIRECTORY ENTRIES
-
-This section details additional entries that should or may exist in
-the /dev directory. It is preferred that symbolic links use the same
-form (absolute or relative) as is indicated here. Links are
-classified as "hard" or "symbolic" depending on the preferred type of
-link; if possible, the indicated type of link should be used.
-
-
- Compulsory links
-
-These links should exist on all systems:
-
-/dev/fd /proc/self/fd symbolic File descriptors
-/dev/stdin fd/0 symbolic stdin file descriptor
-/dev/stdout fd/1 symbolic stdout file descriptor
-/dev/stderr fd/2 symbolic stderr file descriptor
-/dev/nfsd socksys symbolic Required by iBCS-2
-/dev/X0R null symbolic Required by iBCS-2
-
-Note: /dev/X0R is <letter X>-<digit 0>-<letter R>.
-
- Recommended links
-
-It is recommended that these links exist on all systems:
-
-/dev/core /proc/kcore symbolic Backward compatibility
-/dev/ramdisk ram0 symbolic Backward compatibility
-/dev/ftape qft0 symbolic Backward compatibility
-/dev/bttv0 video0 symbolic Backward compatibility
-/dev/radio radio0 symbolic Backward compatibility
-/dev/i2o* /dev/i2o/* symbolic Backward compatibility
-/dev/scd? sr? hard Alternate SCSI CD-ROM name
-
- Locally defined links
-
-The following links may be established locally to conform to the
-configuration of the system. This is merely a tabulation of existing
-practice, and does not constitute a recommendation. However, if they
-exist, they should have the following uses.
-
-/dev/mouse mouse port symbolic Current mouse device
-/dev/tape tape device symbolic Current tape device
-/dev/cdrom CD-ROM device symbolic Current CD-ROM device
-/dev/cdwriter CD-writer symbolic Current CD-writer device
-/dev/scanner scanner symbolic Current scanner device
-/dev/modem modem port symbolic Current dialout device
-/dev/root root device symbolic Current root filesystem
-/dev/swap swap device symbolic Current swap device
-
-/dev/modem should not be used for a modem which supports dialin as
-well as dialout, as it tends to cause lock file problems. If it
-exists, /dev/modem should point to the appropriate primary TTY device
-(the use of the alternate callout devices is deprecated).
-
-For SCSI devices, /dev/tape and /dev/cdrom should point to the
-``cooked'' devices (/dev/st* and /dev/sr*, respectively), whereas
-/dev/cdwriter and /dev/scanner should point to the appropriate generic
-SCSI devices (/dev/sg*).
-
-/dev/mouse may point to a primary serial TTY device, a hardware mouse
-device, or a socket for a mouse driver program (e.g. /dev/gpmdata).
-
- Sockets and pipes
-
-Non-transient sockets and named pipes may exist in /dev. Common entries are:
-
-/dev/printer socket lpd local socket
-/dev/log socket syslog local socket
-/dev/gpmdata socket gpm mouse multiplexer
-
- Mount points
-
-The following names are reserved for mounting special filesystems
-under /dev. These special filesystems provide kernel interfaces that
-cannot be provided with standard device nodes.
-
-/dev/pts devpts PTY slave filesystem
-/dev/shm tmpfs POSIX shared memory maintenance access
-
- **** TERMINAL DEVICES
-
-Terminal, or TTY devices are a special class of character devices. A
-terminal device is any device that could act as a controlling terminal
-for a session; this includes virtual consoles, serial ports, and
-pseudoterminals (PTYs).
-
-All terminal devices share a common set of capabilities known as line
-disciplines; these include the common terminal line discipline as well
-as SLIP and PPP modes.
-
-All terminal devices are named similarly; this section explains the
-naming and use of the various types of TTYs. Note that the naming
-conventions include several historical warts; some of these are
-Linux-specific, some were inherited from other systems, and some
-reflect Linux outgrowing a borrowed convention.
-
-A hash mark (#) in a device name is used here to indicate a decimal
-number without leading zeroes.
-
- Virtual consoles and the console device
-
-Virtual consoles are full-screen terminal displays on the system video
-monitor. Virtual consoles are named /dev/tty#, with numbering
-starting at /dev/tty1; /dev/tty0 is the current virtual console.
-/dev/tty0 is the device that should be used to access the system video
-card on those architectures for which the frame buffer devices
-(/dev/fb*) are not applicable. Do not use /dev/console
-for this purpose.
-
-The console device, /dev/console, is the device to which system
-messages should be sent, and on which logins should be permitted in
-single-user mode. Starting with Linux 2.1.71, /dev/console is managed
-by the kernel; for previous versions it should be a symbolic link to
-either /dev/tty0, a specific virtual console such as /dev/tty1, or to
-a serial port primary (tty*, not cu*) device, depending on the
-configuration of the system.
-
- Serial ports
-
-Serial ports are RS-232 serial ports and any device which simulates
-one, either in hardware (such as internal modems) or in software (such
-as the ISDN driver.) Under Linux, each serial ports has two device
-names, the primary or callin device and the alternate or callout one.
-Each kind of device is indicated by a different letter. For any
-letter X, the names of the devices are /dev/ttyX# and /dev/cux#,
-respectively; for historical reasons, /dev/ttyS# and /dev/ttyC#
-correspond to /dev/cua# and /dev/cub#. In the future, it should be
-expected that multiple letters will be used; all letters will be upper
-case for the "tty" device (e.g. /dev/ttyDP#) and lower case for the
-"cu" device (e.g. /dev/cudp#).
-
-The names /dev/ttyQ# and /dev/cuq# are reserved for local use.
-
-The alternate devices provide for kernel-based exclusion and somewhat
-different defaults than the primary devices. Their main purpose is to
-allow the use of serial ports with programs with no inherent or broken
-support for serial ports. Their use is deprecated, and they may be
-removed from a future version of Linux.
-
-Arbitration of serial ports is provided by the use of lock files with
-the names /var/lock/LCK..ttyX#. The contents of the lock file should
-be the PID of the locking process as an ASCII number.
-
-It is common practice to install links such as /dev/modem
-which point to serial ports. In order to ensure proper locking in the
-presence of these links, it is recommended that software chase
-symlinks and lock all possible names; additionally, it is recommended
-that a lock file be installed with the corresponding alternate
-device. In order to avoid deadlocks, it is recommended that the locks
-are acquired in the following order, and released in the reverse:
-
- 1. The symbolic link name, if any (/var/lock/LCK..modem)
- 2. The "tty" name (/var/lock/LCK..ttyS2)
- 3. The alternate device name (/var/lock/LCK..cua2)
-
-In the case of nested symbolic links, the lock files should be
-installed in the order the symlinks are resolved.
-
-Under no circumstances should an application hold a lock while waiting
-for another to be released. In addition, applications which attempt
-to create lock files for the corresponding alternate device names
-should take into account the possibility of being used on a non-serial
-port TTY, for which no alternate device would exist.
-
- Pseudoterminals (PTYs)
-
-Pseudoterminals, or PTYs, are used to create login sessions or provide
-other capabilities requiring a TTY line discipline (including SLIP or
-PPP capability) to arbitrary data-generation processes. Each PTY has
-a master side, named /dev/pty[p-za-e][0-9a-f], and a slave side, named
-/dev/tty[p-za-e][0-9a-f]. The kernel arbitrates the use of PTYs by
-allowing each master side to be opened only once.
-
-Once the master side has been opened, the corresponding slave device
-can be used in the same manner as any TTY device. The master and
-slave devices are connected by the kernel, generating the equivalent
-of a bidirectional pipe with TTY capabilities.
-
-Recent versions of the Linux kernels and GNU libc contain support for
-the System V/Unix98 naming scheme for PTYs, which assigns a common
-device, /dev/ptmx, to all the masters (opening it will automatically
-give you a previously unassigned PTY) and a subdirectory, /dev/pts,
-for the slaves; the slaves are named with decimal integers (/dev/pts/#
-in our notation). This removes the problem of exhausting the
-namespace and enables the kernel to automatically create the device
-nodes for the slaves on demand using the "devpts" filesystem.
-
diff --git a/Documentation/admin-guide/dynamic-debug-howto.rst b/Documentation/admin-guide/dynamic-debug-howto.rst
new file mode 100644
index 000000000000..88adcfdf5b2b
--- /dev/null
+++ b/Documentation/admin-guide/dynamic-debug-howto.rst
@@ -0,0 +1,353 @@
+Dynamic debug
++++++++++++++
+
+
+Introduction
+============
+
+This document describes how to use the dynamic debug (dyndbg) feature.
+
+Dynamic debug is designed to allow you to dynamically enable/disable
+kernel code to obtain additional kernel information. Currently, if
+``CONFIG_DYNAMIC_DEBUG`` is set, then all ``pr_debug()``/``dev_dbg()`` and
+``print_hex_dump_debug()``/``print_hex_dump_bytes()`` calls can be dynamically
+enabled per-callsite.
+
+If ``CONFIG_DYNAMIC_DEBUG`` is not set, ``print_hex_dump_debug()`` is just
+shortcut for ``print_hex_dump(KERN_DEBUG)``.
+
+For ``print_hex_dump_debug()``/``print_hex_dump_bytes()``, format string is
+its ``prefix_str`` argument, if it is constant string; or ``hexdump``
+in case ``prefix_str`` is build dynamically.
+
+Dynamic debug has even more useful features:
+
+ * Simple query language allows turning on and off debugging
+ statements by matching any combination of 0 or 1 of:
+
+ - source filename
+ - function name
+ - line number (including ranges of line numbers)
+ - module name
+ - format string
+
+ * Provides a debugfs control file: ``<debugfs>/dynamic_debug/control``
+ which can be read to display the complete list of known debug
+ statements, to help guide you
+
+Controlling dynamic debug Behaviour
+===================================
+
+The behaviour of ``pr_debug()``/``dev_dbg()`` are controlled via writing to a
+control file in the 'debugfs' filesystem. Thus, you must first mount
+the debugfs filesystem, in order to make use of this feature.
+Subsequently, we refer to the control file as:
+``<debugfs>/dynamic_debug/control``. For example, if you want to enable
+printing from source file ``svcsock.c``, line 1603 you simply do::
+
+ nullarbor:~ # echo 'file svcsock.c line 1603 +p' >
+ <debugfs>/dynamic_debug/control
+
+If you make a mistake with the syntax, the write will fail thus::
+
+ nullarbor:~ # echo 'file svcsock.c wtf 1 +p' >
+ <debugfs>/dynamic_debug/control
+ -bash: echo: write error: Invalid argument
+
+Viewing Dynamic Debug Behaviour
+===============================
+
+You can view the currently configured behaviour of all the debug
+statements via::
+
+ nullarbor:~ # cat <debugfs>/dynamic_debug/control
+ # filename:lineno [module]function flags format
+ /usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svc_rdma.c:323 [svcxprt_rdma]svc_rdma_cleanup =_ "SVCRDMA Module Removed, deregister RPC RDMA transport\012"
+ /usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svc_rdma.c:341 [svcxprt_rdma]svc_rdma_init =_ "\011max_inline : %d\012"
+ /usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svc_rdma.c:340 [svcxprt_rdma]svc_rdma_init =_ "\011sq_depth : %d\012"
+ /usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svc_rdma.c:338 [svcxprt_rdma]svc_rdma_init =_ "\011max_requests : %d\012"
+ ...
+
+
+You can also apply standard Unix text manipulation filters to this
+data, e.g.::
+
+ nullarbor:~ # grep -i rdma <debugfs>/dynamic_debug/control | wc -l
+ 62
+
+ nullarbor:~ # grep -i tcp <debugfs>/dynamic_debug/control | wc -l
+ 42
+
+The third column shows the currently enabled flags for each debug
+statement callsite (see below for definitions of the flags). The
+default value, with no flags enabled, is ``=_``. So you can view all
+the debug statement callsites with any non-default flags::
+
+ nullarbor:~ # awk '$3 != "=_"' <debugfs>/dynamic_debug/control
+ # filename:lineno [module]function flags format
+ /usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svcsock.c:1603 [sunrpc]svc_send p "svc_process: st_sendto returned %d\012"
+
+Command Language Reference
+==========================
+
+At the lexical level, a command comprises a sequence of words separated
+by spaces or tabs. So these are all equivalent::
+
+ nullarbor:~ # echo -c 'file svcsock.c line 1603 +p' >
+ <debugfs>/dynamic_debug/control
+ nullarbor:~ # echo -c ' file svcsock.c line 1603 +p ' >
+ <debugfs>/dynamic_debug/control
+ nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
+ <debugfs>/dynamic_debug/control
+
+Command submissions are bounded by a write() system call.
+Multiple commands can be written together, separated by ``;`` or ``\n``::
+
+ ~# echo "func pnpacpi_get_resources +p; func pnp_assign_mem +p" \
+ > <debugfs>/dynamic_debug/control
+
+If your query set is big, you can batch them too::
+
+ ~# cat query-batch-file > <debugfs>/dynamic_debug/control
+
+A another way is to use wildcard. The match rule support ``*`` (matches
+zero or more characters) and ``?`` (matches exactly one character).For
+example, you can match all usb drivers::
+
+ ~# echo "file drivers/usb/* +p" > <debugfs>/dynamic_debug/control
+
+At the syntactical level, a command comprises a sequence of match
+specifications, followed by a flags change specification::
+
+ command ::= match-spec* flags-spec
+
+The match-spec's are used to choose a subset of the known pr_debug()
+callsites to which to apply the flags-spec. Think of them as a query
+with implicit ANDs between each pair. Note that an empty list of
+match-specs will select all debug statement callsites.
+
+A match specification comprises a keyword, which controls the
+attribute of the callsite to be compared, and a value to compare
+against. Possible keywords are:::
+
+ match-spec ::= 'func' string |
+ 'file' string |
+ 'module' string |
+ 'format' string |
+ 'line' line-range
+
+ line-range ::= lineno |
+ '-'lineno |
+ lineno'-' |
+ lineno'-'lineno
+
+ lineno ::= unsigned-int
+
+.. note::
+
+ ``line-range`` cannot contain space, e.g.
+ "1-30" is valid range but "1 - 30" is not.
+
+
+The meanings of each keyword are:
+
+func
+ The given string is compared against the function name
+ of each callsite. Example::
+
+ func svc_tcp_accept
+
+file
+ The given string is compared against either the full pathname, the
+ src-root relative pathname, or the basename of the source file of
+ each callsite. Examples::
+
+ file svcsock.c
+ file kernel/freezer.c
+ file /usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svcsock.c
+
+module
+ The given string is compared against the module name
+ of each callsite. The module name is the string as
+ seen in ``lsmod``, i.e. without the directory or the ``.ko``
+ suffix and with ``-`` changed to ``_``. Examples::
+
+ module sunrpc
+ module nfsd
+
+format
+ The given string is searched for in the dynamic debug format
+ string. Note that the string does not need to match the
+ entire format, only some part. Whitespace and other
+ special characters can be escaped using C octal character
+ escape ``\ooo`` notation, e.g. the space character is ``\040``.
+ Alternatively, the string can be enclosed in double quote
+ characters (``"``) or single quote characters (``'``).
+ Examples::
+
+ format svcrdma: // many of the NFS/RDMA server pr_debugs
+ format readahead // some pr_debugs in the readahead cache
+ format nfsd:\040SETATTR // one way to match a format with whitespace
+ format "nfsd: SETATTR" // a neater way to match a format with whitespace
+ format 'nfsd: SETATTR' // yet another way to match a format with whitespace
+
+line
+ The given line number or range of line numbers is compared
+ against the line number of each ``pr_debug()`` callsite. A single
+ line number matches the callsite line number exactly. A
+ range of line numbers matches any callsite between the first
+ and last line number inclusive. An empty first number means
+ the first line in the file, an empty line number means the
+ last number in the file. Examples::
+
+ line 1603 // exactly line 1603
+ line 1600-1605 // the six lines from line 1600 to line 1605
+ line -1605 // the 1605 lines from line 1 to line 1605
+ line 1600- // all lines from line 1600 to the end of the file
+
+The flags specification comprises a change operation followed
+by one or more flag characters. The change operation is one
+of the characters::
+
+ - remove the given flags
+ + add the given flags
+ = set the flags to the given flags
+
+The flags are::
+
+ p enables the pr_debug() callsite.
+ f Include the function name in the printed message
+ l Include line number in the printed message
+ m Include module name in the printed message
+ t Include thread ID in messages not generated from interrupt context
+ _ No flags are set. (Or'd with others on input)
+
+For ``print_hex_dump_debug()`` and ``print_hex_dump_bytes()``, only ``p`` flag
+have meaning, other flags ignored.
+
+For display, the flags are preceded by ``=``
+(mnemonic: what the flags are currently equal to).
+
+Note the regexp ``^[-+=][flmpt_]+$`` matches a flags specification.
+To clear all flags at once, use ``=_`` or ``-flmpt``.
+
+
+Debug messages during Boot Process
+==================================
+
+To activate debug messages for core code and built-in modules during
+the boot process, even before userspace and debugfs exists, use
+``dyndbg="QUERY"``, ``module.dyndbg="QUERY"``, or ``ddebug_query="QUERY"``
+(``ddebug_query`` is obsoleted by ``dyndbg``, and deprecated). QUERY follows
+the syntax described above, but must not exceed 1023 characters. Your
+bootloader may impose lower limits.
+
+These ``dyndbg`` params are processed just after the ddebug tables are
+processed, as part of the arch_initcall. Thus you can enable debug
+messages in all code run after this arch_initcall via this boot
+parameter.
+
+On an x86 system for example ACPI enablement is a subsys_initcall and::
+
+ dyndbg="file ec.c +p"
+
+will show early Embedded Controller transactions during ACPI setup if
+your machine (typically a laptop) has an Embedded Controller.
+PCI (or other devices) initialization also is a hot candidate for using
+this boot parameter for debugging purposes.
+
+If ``foo`` module is not built-in, ``foo.dyndbg`` will still be processed at
+boot time, without effect, but will be reprocessed when module is
+loaded later. ``dyndbg_query=`` and bare ``dyndbg=`` are only processed at
+boot.
+
+
+Debug Messages at Module Initialization Time
+============================================
+
+When ``modprobe foo`` is called, modprobe scans ``/proc/cmdline`` for
+``foo.params``, strips ``foo.``, and passes them to the kernel along with
+params given in modprobe args or ``/etc/modprob.d/*.conf`` files,
+in the following order:
+
+1. parameters given via ``/etc/modprobe.d/*.conf``::
+
+ options foo dyndbg=+pt
+ options foo dyndbg # defaults to +p
+
+2. ``foo.dyndbg`` as given in boot args, ``foo.`` is stripped and passed::
+
+ foo.dyndbg=" func bar +p; func buz +mp"
+
+3. args to modprobe::
+
+ modprobe foo dyndbg==pmf # override previous settings
+
+These ``dyndbg`` queries are applied in order, with last having final say.
+This allows boot args to override or modify those from ``/etc/modprobe.d``
+(sensible, since 1 is system wide, 2 is kernel or boot specific), and
+modprobe args to override both.
+
+In the ``foo.dyndbg="QUERY"`` form, the query must exclude ``module foo``.
+``foo`` is extracted from the param-name, and applied to each query in
+``QUERY``, and only 1 match-spec of each type is allowed.
+
+The ``dyndbg`` option is a "fake" module parameter, which means:
+
+- modules do not need to define it explicitly
+- every module gets it tacitly, whether they use pr_debug or not
+- it doesn't appear in ``/sys/module/$module/parameters/``
+ To see it, grep the control file, or inspect ``/proc/cmdline.``
+
+For ``CONFIG_DYNAMIC_DEBUG`` kernels, any settings given at boot-time (or
+enabled by ``-DDEBUG`` flag during compilation) can be disabled later via
+the sysfs interface if the debug messages are no longer needed::
+
+ echo "module module_name -p" > <debugfs>/dynamic_debug/control
+
+Examples
+========
+
+::
+
+ // enable the message at line 1603 of file svcsock.c
+ nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
+ <debugfs>/dynamic_debug/control
+
+ // enable all the messages in file svcsock.c
+ nullarbor:~ # echo -n 'file svcsock.c +p' >
+ <debugfs>/dynamic_debug/control
+
+ // enable all the messages in the NFS server module
+ nullarbor:~ # echo -n 'module nfsd +p' >
+ <debugfs>/dynamic_debug/control
+
+ // enable all 12 messages in the function svc_process()
+ nullarbor:~ # echo -n 'func svc_process +p' >
+ <debugfs>/dynamic_debug/control
+
+ // disable all 12 messages in the function svc_process()
+ nullarbor:~ # echo -n 'func svc_process -p' >
+ <debugfs>/dynamic_debug/control
+
+ // enable messages for NFS calls READ, READLINK, READDIR and READDIR+.
+ nullarbor:~ # echo -n 'format "nfsd: READ" +p' >
+ <debugfs>/dynamic_debug/control
+
+ // enable messages in files of which the paths include string "usb"
+ nullarbor:~ # echo -n '*usb* +p' > <debugfs>/dynamic_debug/control
+
+ // enable all messages
+ nullarbor:~ # echo -n '+p' > <debugfs>/dynamic_debug/control
+
+ // add module, function to all enabled messages
+ nullarbor:~ # echo -n '+mf' > <debugfs>/dynamic_debug/control
+
+ // boot-args example, with newlines and comments for readability
+ Kernel command line: ...
+ // see whats going on in dyndbg=value processing
+ dynamic_debug.verbose=1
+ // enable pr_debugs in 2 builtins, #cmt is stripped
+ dyndbg="module params +p #cmt ; module sys +p"
+ // enable pr_debugs in 2 functions in a module loaded later
+ pc87360.dyndbg="func pc87360_init_device +p; func pc87360_find +p"
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
new file mode 100644
index 000000000000..2681cbd24cdd
--- /dev/null
+++ b/Documentation/admin-guide/index.rst
@@ -0,0 +1,68 @@
+The Linux kernel user's and administrator's guide
+=================================================
+
+The following is a collection of user-oriented documents that have been
+added to the kernel over time. There is, as yet, little overall order or
+organization here — this material was not written to be a single, coherent
+document! With luck things will improve quickly over time.
+
+This initial section contains overall information, including the README
+file describing the kernel as a whole, documentation on kernel parameters,
+etc.
+
+.. toctree::
+ :maxdepth: 1
+
+ README
+ kernel-parameters
+ devices
+
+Here is a set of documents aimed at users who are trying to track down
+problems and bugs in particular.
+
+.. toctree::
+ :maxdepth: 1
+
+ reporting-bugs
+ security-bugs
+ bug-hunting
+ bug-bisect
+ tainted-kernels
+ ramoops
+ dynamic-debug-howto
+ init
+
+This is the beginning of a section with information of interest to
+application developers. Documents covering various aspects of the kernel
+ABI will be found here.
+
+.. toctree::
+ :maxdepth: 1
+
+ sysfs-rules
+
+The rest of this manual consists of various unordered guides on how to
+configure specific aspects of kernel behavior to your liking.
+
+.. toctree::
+ :maxdepth: 1
+
+ initrd
+ serial-console
+ braille-console
+ parport
+ md
+ module-signing
+ sysrq
+ unicode
+ vga-softcursor
+ binfmt-misc
+ mono
+ java
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/init.txt b/Documentation/admin-guide/init.rst
index 535ad5e82b98..e89d97f31eaf 100644
--- a/Documentation/init.txt
+++ b/Documentation/admin-guide/init.rst
@@ -5,6 +5,7 @@ OK, so you've got this pretty unintuitive message (currently located
in init/main.c) and are wondering what the H*** went wrong.
Some high-level reasons for failure (listed roughly in order of execution)
to load the init binary are:
+
A) Unable to mount root FS
B) init binary doesn't exist on rootfs
C) broken console device
@@ -12,37 +13,39 @@ D) binary exists but dependencies not available
E) binary cannot be loaded
Detailed explanations:
-0) Set "debug" kernel parameter (in bootloader config file or CONFIG_CMDLINE)
+
+A) Set "debug" kernel parameter (in bootloader config file or CONFIG_CMDLINE)
to get more detailed kernel messages.
-A) make sure you have the correct root FS type
- (and root= kernel parameter points to the correct partition),
+B) make sure you have the correct root FS type
+ (and ``root=`` kernel parameter points to the correct partition),
required drivers such as storage hardware (such as SCSI or USB!)
and filesystem (ext3, jffs2 etc.) are builtin (alternatively as modules,
to be pre-loaded by an initrd)
-C) Possibly a conflict in console= setup --> initial console unavailable.
+C) Possibly a conflict in ``console= setup`` --> initial console unavailable.
E.g. some serial consoles are unreliable due to serial IRQ issues (e.g.
missing interrupt-based configuration).
- Try using a different console= device or e.g. netconsole= .
+ Try using a different ``console= device`` or e.g. ``netconsole=``.
D) e.g. required library dependencies of the init binary such as
- /lib/ld-linux.so.2 missing or broken. Use readelf -d <INIT>|grep NEEDED
- to find out which libraries are required.
+ ``/lib/ld-linux.so.2`` missing or broken. Use
+ ``readelf -d <INIT>|grep NEEDED`` to find out which libraries are required.
E) make sure the binary's architecture matches your hardware.
E.g. i386 vs. x86_64 mismatch, or trying to load x86 on ARM hardware.
In case you tried loading a non-binary file here (shell script?),
you should make sure that the script specifies an interpreter in its shebang
- header line (#!/...) that is fully working (including its library
+ header line (``#!/...``) that is fully working (including its library
dependencies). And before tackling scripts, better first test a simple
- non-script binary such as /bin/sh and confirm its successful execution.
- To find out more, add code to init/main.c to display kernel_execve()s
+ non-script binary such as ``/bin/sh`` and confirm its successful execution.
+ To find out more, add code ``to init/main.c`` to display kernel_execve()s
return values.
Please extend this explanation whenever you find new failure causes
(after all loading the init binary is a CRITICAL and hard transition step
which needs to be made as painless as possible), then submit patch to LKML.
Further TODOs:
-- Implement the various run_init_process() invocations via a struct array
- which can then store the kernel_execve() result value and on failure
- log it all by iterating over _all_ results (very important usability fix).
+
+- Implement the various ``run_init_process()`` invocations via a struct array
+ which can then store the ``kernel_execve()`` result value and on failure
+ log it all by iterating over **all** results (very important usability fix).
- try to make the implementation itself more helpful in general,
e.g. by providing additional error messages at affected places.
diff --git a/Documentation/initrd.txt b/Documentation/admin-guide/initrd.rst
index 4e1839ccb555..a03dabaaf3a3 100644
--- a/Documentation/initrd.txt
+++ b/Documentation/admin-guide/initrd.rst
@@ -2,7 +2,7 @@ Using the initial RAM disk (initrd)
===================================
Written 1996,2000 by Werner Almesberger <werner.almesberger@epfl.ch> and
- Hans Lermen <lermen@fgan.de>
+Hans Lermen <lermen@fgan.de>
initrd provides the capability to load a RAM disk by the boot loader.
@@ -16,7 +16,7 @@ where the kernel comes up with a minimum set of compiled-in drivers, and
where additional modules are loaded from initrd.
This document gives a brief overview of the use of initrd. A more detailed
-discussion of the boot process can be found in [1].
+discussion of the boot process can be found in [#f1]_.
Operation
@@ -27,10 +27,10 @@ When using initrd, the system typically boots as follows:
1) the boot loader loads the kernel and the initial RAM disk
2) the kernel converts initrd into a "normal" RAM disk and
frees the memory used by initrd
- 3) if the root device is not /dev/ram0, the old (deprecated)
+ 3) if the root device is not ``/dev/ram0``, the old (deprecated)
change_root procedure is followed. see the "Obsolete root change
mechanism" section below.
- 4) root device is mounted. if it is /dev/ram0, the initrd image is
+ 4) root device is mounted. if it is ``/dev/ram0``, the initrd image is
then mounted as root
5) /sbin/init is executed (this can be any valid executable, including
shell scripts; it is run with uid 0 and can do basically everything
@@ -38,7 +38,7 @@ When using initrd, the system typically boots as follows:
6) init mounts the "real" root file system
7) init places the root file system at the root directory using the
pivot_root system call
- 8) init execs the /sbin/init on the new root filesystem, performing
+ 8) init execs the ``/sbin/init`` on the new root filesystem, performing
the usual boot sequence
9) the initrd file system is removed
@@ -51,7 +51,7 @@ be accessible.
Boot command-line options
-------------------------
-initrd adds the following new options:
+initrd adds the following new options::
initrd=<path> (e.g. LOADLIN)
@@ -83,36 +83,36 @@ Recent kernels have support for populating a ramdisk from a compressed cpio
archive. On such systems, the creation of a ramdisk image doesn't need to
involve special block devices or loopbacks; you merely create a directory on
disk with the desired initrd content, cd to that directory, and run (as an
-example):
+example)::
-find . | cpio --quiet -H newc -o | gzip -9 -n > /boot/imagefile.img
+ find . | cpio --quiet -H newc -o | gzip -9 -n > /boot/imagefile.img
-Examining the contents of an existing image file is just as simple:
+Examining the contents of an existing image file is just as simple::
-mkdir /tmp/imagefile
-cd /tmp/imagefile
-gzip -cd /boot/imagefile.img | cpio -imd --quiet
+ mkdir /tmp/imagefile
+ cd /tmp/imagefile
+ gzip -cd /boot/imagefile.img | cpio -imd --quiet
Installation
------------
First, a directory for the initrd file system has to be created on the
-"normal" root file system, e.g.
+"normal" root file system, e.g.::
-# mkdir /initrd
+ # mkdir /initrd
-The name is not relevant. More details can be found on the pivot_root(2)
-man page.
+The name is not relevant. More details can be found on the
+:manpage:`pivot_root(2)` man page.
If the root file system is created during the boot procedure (i.e. if
you're building an install floppy), the root file system creation
-procedure should create the /initrd directory.
+procedure should create the ``/initrd`` directory.
If initrd will not be mounted in some cases, its content is still
-accessible if the following device has been created:
+accessible if the following device has been created::
-# mknod /dev/initrd b 1 250
-# chmod 400 /dev/initrd
+ # mknod /dev/initrd b 1 250
+ # chmod 400 /dev/initrd
Second, the kernel has to be compiled with RAM disk support and with
support for the initial RAM disk enabled. Also, at least all components
@@ -131,60 +131,76 @@ kernels, at least three types of devices are suitable for that:
We'll describe the loopback device method:
1) make sure loopback block devices are configured into the kernel
- 2) create an empty file system of the appropriate size, e.g.
- # dd if=/dev/zero of=initrd bs=300k count=1
- # mke2fs -F -m0 initrd
+ 2) create an empty file system of the appropriate size, e.g.::
+
+ # dd if=/dev/zero of=initrd bs=300k count=1
+ # mke2fs -F -m0 initrd
+
(if space is critical, you may want to use the Minix FS instead of Ext2)
- 3) mount the file system, e.g.
- # mount -t ext2 -o loop initrd /mnt
- 4) create the console device:
+ 3) mount the file system, e.g.::
+
+ # mount -t ext2 -o loop initrd /mnt
+
+ 4) create the console device::
+
# mkdir /mnt/dev
# mknod /mnt/dev/console c 5 1
+
5) copy all the files that are needed to properly use the initrd
- environment. Don't forget the most important file, /sbin/init
- Note that /sbin/init's permissions must include "x" (execute).
+ environment. Don't forget the most important file, ``/sbin/init``
+
+ .. note:: ``/sbin/init`` permissions must include "x" (execute).
+
6) correct operation the initrd environment can frequently be tested
- even without rebooting with the command
- # chroot /mnt /sbin/init
+ even without rebooting with the command::
+
+ # chroot /mnt /sbin/init
+
This is of course limited to initrds that do not interfere with the
general system state (e.g. by reconfiguring network interfaces,
overwriting mounted devices, trying to start already running demons,
etc. Note however that it is usually possible to use pivot_root in
such a chroot'ed initrd environment.)
- 7) unmount the file system
- # umount /mnt
+ 7) unmount the file system::
+
+ # umount /mnt
+
8) the initrd is now in the file "initrd". Optionally, it can now be
- compressed
- # gzip -9 initrd
+ compressed::
+
+ # gzip -9 initrd
For experimenting with initrd, you may want to take a rescue floppy and
-only add a symbolic link from /sbin/init to /bin/sh. Alternatively, you
-can try the experimental newlib environment [2] to create a small
+only add a symbolic link from ``/sbin/init`` to ``/bin/sh``. Alternatively, you
+can try the experimental newlib environment [#f2]_ to create a small
initrd.
Finally, you have to boot the kernel and load initrd. Almost all Linux
boot loaders support initrd. Since the boot process is still compatible
with an older mechanism, the following boot command line parameters
-have to be given:
+have to be given::
root=/dev/ram0 rw
(rw is only necessary if writing to the initrd file system.)
-With LOADLIN, you simply execute
+With LOADLIN, you simply execute::
LOADLIN <kernel> initrd=<disk_image>
-e.g. LOADLIN C:\LINUX\BZIMAGE initrd=C:\LINUX\INITRD.GZ root=/dev/ram0 rw
-With LILO, you add the option INITRD=<path> to either the global section
-or to the section of the respective kernel in /etc/lilo.conf, and pass
-the options using APPEND, e.g.
+e.g.::
+
+ LOADLIN C:\LINUX\BZIMAGE initrd=C:\LINUX\INITRD.GZ root=/dev/ram0 rw
+
+With LILO, you add the option ``INITRD=<path>`` to either the global section
+or to the section of the respective kernel in ``/etc/lilo.conf``, and pass
+the options using APPEND, e.g.::
image = /bzImage
initrd = /boot/initrd.gz
append = "root=/dev/ram0 rw"
-and run /sbin/lilo
+and run ``/sbin/lilo``
For other boot loaders, please refer to the respective documentation.
@@ -204,33 +220,33 @@ The procedure involves the following steps:
- unmounting the initrd file system and de-allocating the RAM disk
Mounting the new root file system is easy: it just needs to be mounted on
-a directory under the current root. Example:
+a directory under the current root. Example::
-# mkdir /new-root
-# mount -o ro /dev/hda1 /new-root
+ # mkdir /new-root
+ # mount -o ro /dev/hda1 /new-root
The root change is accomplished with the pivot_root system call, which
-is also available via the pivot_root utility (see pivot_root(8) man
-page; pivot_root is distributed with util-linux version 2.10h or higher
-[3]). pivot_root moves the current root to a directory under the new
+is also available via the ``pivot_root`` utility (see :manpage:`pivot_root(8)`
+man page; ``pivot_root`` is distributed with util-linux version 2.10h or higher
+[#f3]_). ``pivot_root`` moves the current root to a directory under the new
root, and puts the new root at its place. The directory for the old root
-must exist before calling pivot_root. Example:
+must exist before calling ``pivot_root``. Example::
-# cd /new-root
-# mkdir initrd
-# pivot_root . initrd
+ # cd /new-root
+ # mkdir initrd
+ # pivot_root . initrd
Now, the init process may still access the old root via its
executable, shared libraries, standard input/output/error, and its
current root directory. All these references are dropped by the
-following command:
+following command::
-# exec chroot . what-follows <dev/console >dev/console 2>&1
+ # exec chroot . what-follows <dev/console >dev/console 2>&1
-Where what-follows is a program under the new root, e.g. /sbin/init
+Where what-follows is a program under the new root, e.g. ``/sbin/init``
If the new root file system will be used with udev and has no valid
-/dev directory, udev must be initialized before invoking chroot in order
-to provide /dev/console.
+``/dev`` directory, udev must be initialized before invoking chroot in order
+to provide ``/dev/console``.
Note: implementation details of pivot_root may change with time. In order
to ensure compatibility, the following points should be observed:
@@ -244,13 +260,13 @@ to ensure compatibility, the following points should be observed:
- use relative paths for dev/console in the exec command
Now, the initrd can be unmounted and the memory allocated by the RAM
-disk can be freed:
+disk can be freed::
-# umount /initrd
-# blockdev --flushbufs /dev/ram0
+ # umount /initrd
+ # blockdev --flushbufs /dev/ram0
It is also possible to use initrd with an NFS-mounted root, see the
-pivot_root(8) man page for details.
+:manpage:`pivot_root(8)` man page for details.
Usage scenarios
@@ -263,21 +279,21 @@ as follows:
1) system boots from floppy or other media with a minimal kernel
(e.g. support for RAM disks, initrd, a.out, and the Ext2 FS) and
loads initrd
- 2) /sbin/init determines what is needed to (1) mount the "real" root FS
+ 2) ``/sbin/init`` determines what is needed to (1) mount the "real" root FS
(i.e. device type, device drivers, file system) and (2) the
distribution media (e.g. CD-ROM, network, tape, ...). This can be
done by asking the user, by auto-probing, or by using a hybrid
approach.
- 3) /sbin/init loads the necessary kernel modules
- 4) /sbin/init creates and populates the root file system (this doesn't
+ 3) ``/sbin/init`` loads the necessary kernel modules
+ 4) ``/sbin/init`` creates and populates the root file system (this doesn't
have to be a very usable system yet)
- 5) /sbin/init invokes pivot_root to change the root file system and
+ 5) ``/sbin/init`` invokes ``pivot_root`` to change the root file system and
execs - via chroot - a program that continues the installation
6) the boot loader is installed
7) the boot loader is configured to load an initrd with the set of
- modules that was used to bring up the system (e.g. /initrd can be
+ modules that was used to bring up the system (e.g. ``/initrd`` can be
modified, then unmounted, and finally, the image is written from
- /dev/ram0 or /dev/rd/0 to a file)
+ ``/dev/ram0`` or ``/dev/rd/0`` to a file)
8) now the system is bootable and additional installation tasks can be
performed
@@ -290,7 +306,7 @@ different hardware configurations in a single administrative domain. In
such cases, it is desirable to generate only a small set of kernels
(ideally only one) and to keep the system-specific part of configuration
information as small as possible. In this case, a common initrd could be
-generated with all the necessary modules. Then, only /sbin/init or a file
+generated with all the necessary modules. Then, only ``/sbin/init`` or a file
read by it would have to be different.
A third scenario is more convenient recovery disks, because information
@@ -301,9 +317,9 @@ auto-detection).
Last not least, CD-ROM distributors may use it for better installation
from CD, e.g. by using a boot floppy and bootstrapping a bigger RAM disk
-via initrd from CD; or by booting via a loader like LOADLIN or directly
+via initrd from CD; or by booting via a loader like ``LOADLIN`` or directly
from the CD-ROM, and loading the RAM disk from CD without need of
-floppies.
+floppies.
Obsolete root change mechanism
@@ -316,51 +332,52 @@ continued availability.
It works by mounting the "real" root device (i.e. the one set with rdev
in the kernel image or with root=... at the boot command line) as the
root file system when linuxrc exits. The initrd file system is then
-unmounted, or, if it is still busy, moved to a directory /initrd, if
+unmounted, or, if it is still busy, moved to a directory ``/initrd``, if
such a directory exists on the new root file system.
In order to use this mechanism, you do not have to specify the boot
command options root, init, or rw. (If specified, they will affect
the real root file system, not the initrd environment.)
-
+
If /proc is mounted, the "real" root device can be changed from within
linuxrc by writing the number of the new root FS device to the special
-file /proc/sys/kernel/real-root-dev, e.g.
+file /proc/sys/kernel/real-root-dev, e.g.::
# echo 0x301 >/proc/sys/kernel/real-root-dev
Note that the mechanism is incompatible with NFS and similar file
systems.
-This old, deprecated mechanism is commonly called "change_root", while
-the new, supported mechanism is called "pivot_root".
+This old, deprecated mechanism is commonly called ``change_root``, while
+the new, supported mechanism is called ``pivot_root``.
Mixed change_root and pivot_root mechanism
------------------------------------------
-In case you did not want to use root=/dev/ram0 to trigger the pivot_root
-mechanism, you may create both /linuxrc and /sbin/init in your initrd image.
+In case you did not want to use ``root=/dev/ram0`` to trigger the pivot_root
+mechanism, you may create both ``/linuxrc`` and ``/sbin/init`` in your initrd
+image.
-/linuxrc would contain only the following:
+``/linuxrc`` would contain only the following::
-#! /bin/sh
-mount -n -t proc proc /proc
-echo 0x0100 >/proc/sys/kernel/real-root-dev
-umount -n /proc
+ #! /bin/sh
+ mount -n -t proc proc /proc
+ echo 0x0100 >/proc/sys/kernel/real-root-dev
+ umount -n /proc
Once linuxrc exited, the kernel would mount again your initrd as root,
-this time executing /sbin/init. Again, it would be the duty of this init
-to build the right environment (maybe using the root= device passed on
-the cmdline) before the final execution of the real /sbin/init.
+this time executing ``/sbin/init``. Again, it would be the duty of this init
+to build the right environment (maybe using the ``root= device`` passed on
+the cmdline) before the final execution of the real ``/sbin/init``.
Resources
---------
-[1] Almesberger, Werner; "Booting Linux: The History and the Future"
+.. [#f1] Almesberger, Werner; "Booting Linux: The History and the Future"
http://www.almesberger.net/cv/papers/ols2k-9.ps.gz
-[2] newlib package (experimental), with initrd example
- http://sources.redhat.com/newlib/
-[3] util-linux: Miscellaneous utilities for Linux
- http://www.kernel.org/pub/linux/utils/util-linux/
+.. [#f2] newlib package (experimental), with initrd example
+ https://www.sourceware.org/newlib/
+.. [#f3] util-linux: Miscellaneous utilities for Linux
+ https://www.kernel.org/pub/linux/utils/util-linux/
diff --git a/Documentation/java.txt b/Documentation/admin-guide/java.rst
index 418020584ccc..8744e272e6f8 100644
--- a/Documentation/java.txt
+++ b/Documentation/admin-guide/java.rst
@@ -1,5 +1,5 @@
- Java(tm) Binary Kernel Support for Linux v1.03
- ----------------------------------------------
+Java(tm) Binary Kernel Support for Linux v1.03
+----------------------------------------------
Linux beats them ALL! While all other OS's are TALKING about direct
support of Java Binaries in the OS, Linux is doing it!
@@ -19,70 +19,82 @@ other program after you have done the following:
as the application itself).
2) You have to compile BINFMT_MISC either as a module or into
- the kernel (CONFIG_BINFMT_MISC) and set it up properly.
+ the kernel (``CONFIG_BINFMT_MISC``) and set it up properly.
If you choose to compile it as a module, you will have
to insert it manually with modprobe/insmod, as kmod
- cannot easily be supported with binfmt_misc.
+ cannot easily be supported with binfmt_misc.
Read the file 'binfmt_misc.txt' in this directory to know
more about the configuration process.
3) Add the following configuration items to binfmt_misc
- (you should really have read binfmt_misc.txt now):
- support for Java applications:
+ (you should really have read ``binfmt_misc.txt`` now):
+ support for Java applications::
+
':Java:M::\xca\xfe\xba\xbe::/usr/local/bin/javawrapper:'
- support for executable Jar files:
+
+ support for executable Jar files::
+
':ExecutableJAR:E::jar::/usr/local/bin/jarwrapper:'
- support for Java Applets:
+
+ support for Java Applets::
+
':Applet:E::html::/usr/bin/appletviewer:'
- or the following, if you want to be more selective:
+
+ or the following, if you want to be more selective::
+
':Applet:M::<!--applet::/usr/bin/appletviewer:'
Of course you have to fix the path names. The path/file names given in this
- document match the Debian 2.1 system. (i.e. jdk installed in /usr,
- custom wrappers from this document in /usr/local)
+ document match the Debian 2.1 system. (i.e. jdk installed in ``/usr``,
+ custom wrappers from this document in ``/usr/local``)
Note, that for the more selective applet support you have to modify
- existing html-files to contain <!--applet--> in the first line
- ('<' has to be the first character!) to let this work!
+ existing html-files to contain ``<!--applet-->`` in the first line
+ (``<`` has to be the first character!) to let this work!
For the compiled Java programs you need a wrapper script like the
following (this is because Java is broken in case of the filename
handling), again fix the path names, both in the script and in the
above given configuration string.
- You, too, need the little program after the script. Compile like
- gcc -O2 -o javaclassname javaclassname.c
- and stick it to /usr/local/bin.
+ You, too, need the little program after the script. Compile like::
+
+ gcc -O2 -o javaclassname javaclassname.c
+
+ and stick it to ``/usr/local/bin``.
Both the javawrapper shellscript and the javaclassname program
were supplied by Colin J. Watson <cjw44@cam.ac.uk>.
-====================== Cut here ===================
-#!/bin/bash
-# /usr/local/bin/javawrapper - the wrapper for binfmt_misc/java
+Javawrapper shell script:
+
+.. code-block:: sh
-if [ -z "$1" ]; then
+ #!/bin/bash
+ # /usr/local/bin/javawrapper - the wrapper for binfmt_misc/java
+
+ if [ -z "$1" ]; then
exec 1>&2
echo Usage: $0 class-file
exit 1
-fi
+ fi
-CLASS=$1
-FQCLASS=`/usr/local/bin/javaclassname $1`
-FQCLASSN=`echo $FQCLASS | sed -e 's/^.*\.\([^.]*\)$/\1/'`
-FQCLASSP=`echo $FQCLASS | sed -e 's-\.-/-g' -e 's-^[^/]*$--' -e 's-/[^/]*$--'`
+ CLASS=$1
+ FQCLASS=`/usr/local/bin/javaclassname $1`
+ FQCLASSN=`echo $FQCLASS | sed -e 's/^.*\.\([^.]*\)$/\1/'`
+ FQCLASSP=`echo $FQCLASS | sed -e 's-\.-/-g' -e 's-^[^/]*$--' -e 's-/[^/]*$--'`
-# for example:
-# CLASS=Test.class
-# FQCLASS=foo.bar.Test
-# FQCLASSN=Test
-# FQCLASSP=foo/bar
+ # for example:
+ # CLASS=Test.class
+ # FQCLASS=foo.bar.Test
+ # FQCLASSN=Test
+ # FQCLASSP=foo/bar
-unset CLASSBASE
+ unset CLASSBASE
-declare -i LINKLEVEL=0
+ declare -i LINKLEVEL=0
-while :; do
+ while :; do
if [ "`basename $CLASS .class`" == "$FQCLASSN" ]; then
# See if this directory works straight off
cd -L `dirname $CLASS`
@@ -119,9 +131,9 @@ while :; do
exit 1
fi
CLASS=`ls --color=no -l $CLASS | sed -e 's/^.* \([^ ]*\)$/\1/'`
-done
+ done
-if [ -z "$CLASSBASE" ]; then
+ if [ -z "$CLASSBASE" ]; then
if [ -z "$FQCLASSP" ]; then
GOODNAME=$FQCLASSN.class
else
@@ -131,96 +143,97 @@ if [ -z "$CLASSBASE" ]; then
echo $0:
echo " $FQCLASS should be in a file called $GOODNAME"
exit 1
-fi
+ fi
-if ! echo $CLASSPATH | grep -q "^\(.*:\)*$CLASSBASE\(:.*\)*"; then
+ if ! echo $CLASSPATH | grep -q "^\(.*:\)*$CLASSBASE\(:.*\)*"; then
# class is not in CLASSPATH, so prepend dir of class to CLASSPATH
if [ -z "${CLASSPATH}" ] ; then
export CLASSPATH=$CLASSBASE
else
export CLASSPATH=$CLASSBASE:$CLASSPATH
fi
-fi
-
-shift
-/usr/bin/java $FQCLASS "$@"
-====================== Cut here ===================
-
-
-====================== Cut here ===================
-/* javaclassname.c
- *
- * Extracts the class name from a Java class file; intended for use in a Java
- * wrapper of the type supported by the binfmt_misc option in the Linux kernel.
- *
- * Copyright (C) 1999 Colin J. Watson <cjw44@cam.ac.uk>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <sys/types.h>
-
-/* From Sun's Java VM Specification, as tag entries in the constant pool. */
-
-#define CP_UTF8 1
-#define CP_INTEGER 3
-#define CP_FLOAT 4
-#define CP_LONG 5
-#define CP_DOUBLE 6
-#define CP_CLASS 7
-#define CP_STRING 8
-#define CP_FIELDREF 9
-#define CP_METHODREF 10
-#define CP_INTERFACEMETHODREF 11
-#define CP_NAMEANDTYPE 12
-#define CP_METHODHANDLE 15
-#define CP_METHODTYPE 16
-#define CP_INVOKEDYNAMIC 18
-
-/* Define some commonly used error messages */
-
-#define seek_error() error("%s: Cannot seek\n", program)
-#define corrupt_error() error("%s: Class file corrupt\n", program)
-#define eof_error() error("%s: Unexpected end of file\n", program)
-#define utf8_error() error("%s: Only ASCII 1-255 supported\n", program);
-
-char *program;
-
-long *pool;
-
-u_int8_t read_8(FILE *classfile);
-u_int16_t read_16(FILE *classfile);
-void skip_constant(FILE *classfile, u_int16_t *cur);
-void error(const char *format, ...);
-int main(int argc, char **argv);
-
-/* Reads in an unsigned 8-bit integer. */
-u_int8_t read_8(FILE *classfile)
-{
+ fi
+
+ shift
+ /usr/bin/java $FQCLASS "$@"
+
+javaclassname.c:
+
+.. code-block:: c
+
+ /* javaclassname.c
+ *
+ * Extracts the class name from a Java class file; intended for use in a Java
+ * wrapper of the type supported by the binfmt_misc option in the Linux kernel.
+ *
+ * Copyright (C) 1999 Colin J. Watson <cjw44@cam.ac.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <stdarg.h>
+ #include <sys/types.h>
+
+ /* From Sun's Java VM Specification, as tag entries in the constant pool. */
+
+ #define CP_UTF8 1
+ #define CP_INTEGER 3
+ #define CP_FLOAT 4
+ #define CP_LONG 5
+ #define CP_DOUBLE 6
+ #define CP_CLASS 7
+ #define CP_STRING 8
+ #define CP_FIELDREF 9
+ #define CP_METHODREF 10
+ #define CP_INTERFACEMETHODREF 11
+ #define CP_NAMEANDTYPE 12
+ #define CP_METHODHANDLE 15
+ #define CP_METHODTYPE 16
+ #define CP_INVOKEDYNAMIC 18
+
+ /* Define some commonly used error messages */
+
+ #define seek_error() error("%s: Cannot seek\n", program)
+ #define corrupt_error() error("%s: Class file corrupt\n", program)
+ #define eof_error() error("%s: Unexpected end of file\n", program)
+ #define utf8_error() error("%s: Only ASCII 1-255 supported\n", program);
+
+ char *program;
+
+ long *pool;
+
+ u_int8_t read_8(FILE *classfile);
+ u_int16_t read_16(FILE *classfile);
+ void skip_constant(FILE *classfile, u_int16_t *cur);
+ void error(const char *format, ...);
+ int main(int argc, char **argv);
+
+ /* Reads in an unsigned 8-bit integer. */
+ u_int8_t read_8(FILE *classfile)
+ {
int b = fgetc(classfile);
if(b == EOF)
eof_error();
return (u_int8_t)b;
-}
+ }
-/* Reads in an unsigned 16-bit integer. */
-u_int16_t read_16(FILE *classfile)
-{
+ /* Reads in an unsigned 16-bit integer. */
+ u_int16_t read_16(FILE *classfile)
+ {
int b1, b2;
b1 = fgetc(classfile);
if(b1 == EOF)
@@ -229,11 +242,11 @@ u_int16_t read_16(FILE *classfile)
if(b2 == EOF)
eof_error();
return (u_int16_t)((b1 << 8) | b2);
-}
+ }
-/* Reads in a value from the constant pool. */
-void skip_constant(FILE *classfile, u_int16_t *cur)
-{
+ /* Reads in a value from the constant pool. */
+ void skip_constant(FILE *classfile, u_int16_t *cur)
+ {
u_int16_t len;
int seekerr = 1;
pool[*cur] = ftell(classfile);
@@ -270,19 +283,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
}
if(seekerr)
seek_error();
-}
+ }
-void error(const char *format, ...)
-{
+ void error(const char *format, ...)
+ {
va_list ap;
va_start(ap, format);
vfprintf(stderr, format, ap);
va_end(ap);
exit(1);
-}
+ }
-int main(int argc, char **argv)
-{
+ int main(int argc, char **argv)
+ {
FILE *classfile;
u_int16_t cp_count, i, this_class, classinfo_ptr;
u_int8_t length;
@@ -349,19 +362,19 @@ int main(int argc, char **argv)
free(pool);
fclose(classfile);
return 0;
-}
-====================== Cut here ===================
+ }
+
+jarwrapper::
+ #!/bin/bash
+ # /usr/local/java/bin/jarwrapper - the wrapper for binfmt_misc/jar
-====================== Cut here ===================
-#!/bin/bash
-# /usr/local/java/bin/jarwrapper - the wrapper for binfmt_misc/jar
+ java -jar $1
-java -jar $1
-====================== Cut here ===================
+Now simply ``chmod +x`` the ``.class``, ``.jar`` and/or ``.html`` files you
+want to execute.
-Now simply chmod +x the .class, .jar and/or .html files you want to execute.
To add a Java program to your path best put a symbolic link to the main
.class file into /usr/bin (or another place you like) omitting the .class
extension. The directory containing the original .class file will be
@@ -371,29 +384,36 @@ added to your CLASSPATH during execution.
To test your new setup, enter in the following simple Java app, and name
it "HelloWorld.java":
+.. code-block:: java
+
class HelloWorld {
public static void main(String args[]) {
System.out.println("Hello World!");
}
}
-Now compile the application with:
+Now compile the application with::
+
javac HelloWorld.java
-Set the executable permissions of the binary file, with:
+Set the executable permissions of the binary file, with::
+
chmod 755 HelloWorld.class
-And then execute it:
+And then execute it::
+
./HelloWorld.class
-To execute Java Jar files, simple chmod the *.jar files to include
-the execution bit, then just do
+To execute Java Jar files, simple chmod the ``*.jar`` files to include
+the execution bit, then just do::
+
./Application.jar
-To execute Java Applets, simple chmod the *.html files to include
-the execution bit, then just do
+To execute Java Applets, simple chmod the ``*.html`` files to include
+the execution bit, then just do::
+
./Applet.html
@@ -401,4 +421,3 @@ originally by Brian A. Lantz, brian@lantz.com
heavily edited for binfmt_misc by Richard Günther
new scripts by Colin J. Watson <cjw44@cam.ac.uk>
added executable Jar file support by Kurt Huwig <kurt@iku-netz.de>
-
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
new file mode 100644
index 000000000000..b516164999a8
--- /dev/null
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -0,0 +1,209 @@
+The kernel's command-line parameters
+====================================
+
+The following is a consolidated list of the kernel parameters as
+implemented by the __setup(), core_param() and module_param() macros
+and sorted into English Dictionary order (defined as ignoring all
+punctuation and sorting digits before letters in a case insensitive
+manner), and with descriptions where known.
+
+The kernel parses parameters from the kernel command line up to "--";
+if it doesn't recognize a parameter and it doesn't contain a '.', the
+parameter gets passed to init: parameters with '=' go into init's
+environment, others are passed as command line arguments to init.
+Everything after "--" is passed as an argument to init.
+
+Module parameters can be specified in two ways: via the kernel command
+line with a module name prefix, or via modprobe, e.g.::
+
+ (kernel command line) usbcore.blinkenlights=1
+ (modprobe command line) modprobe usbcore blinkenlights=1
+
+Parameters for modules which are built into the kernel need to be
+specified on the kernel command line. modprobe looks through the
+kernel command line (/proc/cmdline) and collects module parameters
+when it loads a module, so the kernel command line can be used for
+loadable modules too.
+
+Hyphens (dashes) and underscores are equivalent in parameter names, so::
+
+ log_buf_len=1M print-fatal-signals=1
+
+can also be entered as::
+
+ log-buf-len=1M print_fatal_signals=1
+
+Double-quotes can be used to protect spaces in values, e.g.::
+
+ param="spaces in here"
+
+cpu lists:
+----------
+
+Some kernel parameters take a list of CPUs as a value, e.g. isolcpus,
+nohz_full, irqaffinity, rcu_nocbs. The format of this list is:
+
+ <cpu number>,...,<cpu number>
+
+or
+
+ <cpu number>-<cpu number>
+ (must be a positive range in ascending order)
+
+or a mixture
+
+<cpu number>,...,<cpu number>-<cpu number>
+
+Note that for the special case of a range one can split the range into equal
+sized groups and for each group use some amount from the beginning of that
+group:
+
+ <cpu number>-cpu number>:<used size>/<group size>
+
+For example one can add to the command line following parameter:
+
+ isolcpus=1,2,10-20,100-2000:2/25
+
+where the final item represents CPUs 100,101,125,126,150,151,...
+
+
+
+This document may not be entirely up to date and comprehensive. The command
+"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
+module. Loadable modules, after being loaded into the running kernel, also
+reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
+parameters may be changed at runtime by the command
+``echo -n ${value} > /sys/module/${modulename}/parameters/${parm}``.
+
+The parameters listed below are only valid if certain kernel build options were
+enabled and if respective hardware is present. The text in square brackets at
+the beginning of each description states the restrictions within which a
+parameter is applicable::
+
+ ACPI ACPI support is enabled.
+ AGP AGP (Accelerated Graphics Port) is enabled.
+ ALSA ALSA sound support is enabled.
+ APIC APIC support is enabled.
+ APM Advanced Power Management support is enabled.
+ ARM ARM architecture is enabled.
+ AVR32 AVR32 architecture is enabled.
+ AX25 Appropriate AX.25 support is enabled.
+ BLACKFIN Blackfin architecture is enabled.
+ CLK Common clock infrastructure is enabled.
+ CMA Contiguous Memory Area support is enabled.
+ DRM Direct Rendering Management support is enabled.
+ DYNAMIC_DEBUG Build in debug messages and enable them at runtime
+ EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
+ EFI EFI Partitioning (GPT) is enabled
+ EIDE EIDE/ATAPI support is enabled.
+ EVM Extended Verification Module
+ FB The frame buffer device is enabled.
+ FTRACE Function tracing enabled.
+ GCOV GCOV profiling is enabled.
+ HW Appropriate hardware is enabled.
+ IA-64 IA-64 architecture is enabled.
+ IMA Integrity measurement architecture is enabled.
+ IOSCHED More than one I/O scheduler is enabled.
+ IP_PNP IP DHCP, BOOTP, or RARP is enabled.
+ IPV6 IPv6 support is enabled.
+ ISAPNP ISA PnP code is enabled.
+ ISDN Appropriate ISDN support is enabled.
+ JOY Appropriate joystick support is enabled.
+ KGDB Kernel debugger support is enabled.
+ KVM Kernel Virtual Machine support is enabled.
+ LIBATA Libata driver is enabled
+ LP Printer support is enabled.
+ LOOP Loopback device support is enabled.
+ M68k M68k architecture is enabled.
+ These options have more detailed description inside of
+ Documentation/m68k/kernel-options.txt.
+ MDA MDA console support is enabled.
+ MIPS MIPS architecture is enabled.
+ MOUSE Appropriate mouse support is enabled.
+ MSI Message Signaled Interrupts (PCI).
+ MTD MTD (Memory Technology Device) support is enabled.
+ NET Appropriate network support is enabled.
+ NUMA NUMA support is enabled.
+ NFS Appropriate NFS support is enabled.
+ OSS OSS sound support is enabled.
+ PV_OPS A paravirtualized kernel is enabled.
+ PARIDE The ParIDE (parallel port IDE) subsystem is enabled.
+ PARISC The PA-RISC architecture is enabled.
+ PCI PCI bus support is enabled.
+ PCIE PCI Express support is enabled.
+ PCMCIA The PCMCIA subsystem is enabled.
+ PNP Plug & Play support is enabled.
+ PPC PowerPC architecture is enabled.
+ PPT Parallel port support is enabled.
+ PS2 Appropriate PS/2 support is enabled.
+ RAM RAM disk support is enabled.
+ S390 S390 architecture is enabled.
+ SCSI Appropriate SCSI support is enabled.
+ A lot of drivers have their options described inside
+ the Documentation/scsi/ sub-directory.
+ SECURITY Different security models are enabled.
+ SELINUX SELinux support is enabled.
+ APPARMOR AppArmor support is enabled.
+ SERIAL Serial support is enabled.
+ SH SuperH architecture is enabled.
+ SMP The kernel is an SMP kernel.
+ SPARC Sparc architecture is enabled.
+ SWSUSP Software suspend (hibernation) is enabled.
+ SUSPEND System suspend states are enabled.
+ TPM TPM drivers are enabled.
+ TS Appropriate touchscreen support is enabled.
+ UMS USB Mass Storage support is enabled.
+ USB USB support is enabled.
+ USBHID USB Human Interface Device support is enabled.
+ V4L Video For Linux support is enabled.
+ VMMIO Driver for memory mapped virtio devices is enabled.
+ VGA The VGA console has been enabled.
+ VT Virtual terminal support is enabled.
+ WDT Watchdog support is enabled.
+ XT IBM PC/XT MFM hard disk support is enabled.
+ X86-32 X86-32, aka i386 architecture is enabled.
+ X86-64 X86-64 architecture is enabled.
+ More X86-64 boot options can be found in
+ Documentation/x86/x86_64/boot-options.txt .
+ X86 Either 32-bit or 64-bit x86 (same as X86-32+X86-64)
+ X86_UV SGI UV support is enabled.
+ XEN Xen support is enabled
+
+In addition, the following text indicates that the option::
+
+ BUGS= Relates to possible processor bugs on the said processor.
+ KNL Is a kernel start-up parameter.
+ BOOT Is a boot loader parameter.
+
+Parameters denoted with BOOT are actually interpreted by the boot
+loader, and have no meaning to the kernel directly.
+Do not modify the syntax of boot loader parameters without extreme
+need or coordination with <Documentation/x86/boot.txt>.
+
+There are also arch-specific kernel-parameters not documented here.
+See for example <Documentation/x86/x86_64/boot-options.txt>.
+
+Note that ALL kernel parameters listed below are CASE SENSITIVE, and that
+a trailing = on the name of any parameter states that that parameter will
+be entered as an environment variable, whereas its absence indicates that
+it will appear as a kernel argument readable via /proc/cmdline by programs
+running once the system is up.
+
+The number of kernel parameters is not limited, but the length of the
+complete command line (parameters including spaces etc.) is limited to
+a fixed number of characters. This limit depends on the architecture
+and is between 256 and 4096 characters. It is defined in the file
+./include/asm/setup.h as COMMAND_LINE_SIZE.
+
+Finally, the [KMG] suffix is commonly described after a number of kernel
+parameter values. These 'K', 'M', and 'G' letters represent the _binary_
+multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
+bytes respectively. Such letter suffixes can also be entirely omitted:
+
+.. include:: kernel-parameters.txt
+ :literal:
+
+Todo
+----
+
+ Add more DRM drivers.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 37babf91f2cb..be2d6d0a03a4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1,202 +1,3 @@
- Kernel Parameters
- ~~~~~~~~~~~~~~~~~
-
-The following is a consolidated list of the kernel parameters as
-implemented by the __setup(), core_param() and module_param() macros
-and sorted into English Dictionary order (defined as ignoring all
-punctuation and sorting digits before letters in a case insensitive
-manner), and with descriptions where known.
-
-The kernel parses parameters from the kernel command line up to "--";
-if it doesn't recognize a parameter and it doesn't contain a '.', the
-parameter gets passed to init: parameters with '=' go into init's
-environment, others are passed as command line arguments to init.
-Everything after "--" is passed as an argument to init.
-
-Module parameters can be specified in two ways: via the kernel command
-line with a module name prefix, or via modprobe, e.g.:
-
- (kernel command line) usbcore.blinkenlights=1
- (modprobe command line) modprobe usbcore blinkenlights=1
-
-Parameters for modules which are built into the kernel need to be
-specified on the kernel command line. modprobe looks through the
-kernel command line (/proc/cmdline) and collects module parameters
-when it loads a module, so the kernel command line can be used for
-loadable modules too.
-
-Hyphens (dashes) and underscores are equivalent in parameter names, so
- log_buf_len=1M print-fatal-signals=1
-can also be entered as
- log-buf-len=1M print_fatal_signals=1
-
-Double-quotes can be used to protect spaces in values, e.g.:
- param="spaces in here"
-
-cpu lists:
-----------
-
-Some kernel parameters take a list of CPUs as a value, e.g. isolcpus,
-nohz_full, irqaffinity, rcu_nocbs. The format of this list is:
-
- <cpu number>,...,<cpu number>
-
-or
-
- <cpu number>-<cpu number>
- (must be a positive range in ascending order)
-
-or a mixture
-
-<cpu number>,...,<cpu number>-<cpu number>
-
-Note that for the special case of a range one can split the range into equal
-sized groups and for each group use some amount from the beginning of that
-group:
-
- <cpu number>-cpu number>:<used size>/<group size>
-
-For example one can add to the command line following parameter:
-
- isolcpus=1,2,10-20,100-2000:2/25
-
-where the final item represents CPUs 100,101,125,126,150,151,...
-
-
-
-This document may not be entirely up to date and comprehensive. The command
-"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
-module. Loadable modules, after being loaded into the running kernel, also
-reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
-parameters may be changed at runtime by the command
-"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
-
-The parameters listed below are only valid if certain kernel build options were
-enabled and if respective hardware is present. The text in square brackets at
-the beginning of each description states the restrictions within which a
-parameter is applicable:
-
- ACPI ACPI support is enabled.
- AGP AGP (Accelerated Graphics Port) is enabled.
- ALSA ALSA sound support is enabled.
- APIC APIC support is enabled.
- APM Advanced Power Management support is enabled.
- ARM ARM architecture is enabled.
- AVR32 AVR32 architecture is enabled.
- AX25 Appropriate AX.25 support is enabled.
- BLACKFIN Blackfin architecture is enabled.
- CLK Common clock infrastructure is enabled.
- CMA Contiguous Memory Area support is enabled.
- DRM Direct Rendering Management support is enabled.
- DYNAMIC_DEBUG Build in debug messages and enable them at runtime
- EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
- EFI EFI Partitioning (GPT) is enabled
- EIDE EIDE/ATAPI support is enabled.
- EVM Extended Verification Module
- FB The frame buffer device is enabled.
- FTRACE Function tracing enabled.
- GCOV GCOV profiling is enabled.
- HW Appropriate hardware is enabled.
- IA-64 IA-64 architecture is enabled.
- IMA Integrity measurement architecture is enabled.
- IOSCHED More than one I/O scheduler is enabled.
- IP_PNP IP DHCP, BOOTP, or RARP is enabled.
- IPV6 IPv6 support is enabled.
- ISAPNP ISA PnP code is enabled.
- ISDN Appropriate ISDN support is enabled.
- JOY Appropriate joystick support is enabled.
- KGDB Kernel debugger support is enabled.
- KVM Kernel Virtual Machine support is enabled.
- LIBATA Libata driver is enabled
- LP Printer support is enabled.
- LOOP Loopback device support is enabled.
- M68k M68k architecture is enabled.
- These options have more detailed description inside of
- Documentation/m68k/kernel-options.txt.
- MDA MDA console support is enabled.
- MIPS MIPS architecture is enabled.
- MOUSE Appropriate mouse support is enabled.
- MSI Message Signaled Interrupts (PCI).
- MTD MTD (Memory Technology Device) support is enabled.
- NET Appropriate network support is enabled.
- NUMA NUMA support is enabled.
- NFS Appropriate NFS support is enabled.
- OSS OSS sound support is enabled.
- PV_OPS A paravirtualized kernel is enabled.
- PARIDE The ParIDE (parallel port IDE) subsystem is enabled.
- PARISC The PA-RISC architecture is enabled.
- PCI PCI bus support is enabled.
- PCIE PCI Express support is enabled.
- PCMCIA The PCMCIA subsystem is enabled.
- PNP Plug & Play support is enabled.
- PPC PowerPC architecture is enabled.
- PPT Parallel port support is enabled.
- PS2 Appropriate PS/2 support is enabled.
- RAM RAM disk support is enabled.
- S390 S390 architecture is enabled.
- SCSI Appropriate SCSI support is enabled.
- A lot of drivers have their options described inside
- the Documentation/scsi/ sub-directory.
- SECURITY Different security models are enabled.
- SELINUX SELinux support is enabled.
- APPARMOR AppArmor support is enabled.
- SERIAL Serial support is enabled.
- SH SuperH architecture is enabled.
- SMP The kernel is an SMP kernel.
- SPARC Sparc architecture is enabled.
- SWSUSP Software suspend (hibernation) is enabled.
- SUSPEND System suspend states are enabled.
- TPM TPM drivers are enabled.
- TS Appropriate touchscreen support is enabled.
- UMS USB Mass Storage support is enabled.
- USB USB support is enabled.
- USBHID USB Human Interface Device support is enabled.
- V4L Video For Linux support is enabled.
- VMMIO Driver for memory mapped virtio devices is enabled.
- VGA The VGA console has been enabled.
- VT Virtual terminal support is enabled.
- WDT Watchdog support is enabled.
- XT IBM PC/XT MFM hard disk support is enabled.
- X86-32 X86-32, aka i386 architecture is enabled.
- X86-64 X86-64 architecture is enabled.
- More X86-64 boot options can be found in
- Documentation/x86/x86_64/boot-options.txt .
- X86 Either 32-bit or 64-bit x86 (same as X86-32+X86-64)
- X86_UV SGI UV support is enabled.
- XEN Xen support is enabled
-
-In addition, the following text indicates that the option:
-
- BUGS= Relates to possible processor bugs on the said processor.
- KNL Is a kernel start-up parameter.
- BOOT Is a boot loader parameter.
-
-Parameters denoted with BOOT are actually interpreted by the boot
-loader, and have no meaning to the kernel directly.
-Do not modify the syntax of boot loader parameters without extreme
-need or coordination with <Documentation/x86/boot.txt>.
-
-There are also arch-specific kernel-parameters not documented here.
-See for example <Documentation/x86/x86_64/boot-options.txt>.
-
-Note that ALL kernel parameters listed below are CASE SENSITIVE, and that
-a trailing = on the name of any parameter states that that parameter will
-be entered as an environment variable, whereas its absence indicates that
-it will appear as a kernel argument readable via /proc/cmdline by programs
-running once the system is up.
-
-The number of kernel parameters is not limited, but the length of the
-complete command line (parameters including spaces etc.) is limited to
-a fixed number of characters. This limit depends on the architecture
-and is between 256 and 4096 characters. It is defined in the file
-./include/asm/setup.h as COMMAND_LINE_SIZE.
-
-Finally, the [KMG] suffix is commonly described after a number of kernel
-parameter values. These 'K', 'M', and 'G' letters represent the _binary_
-multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
-bytes respectively. Such letter suffixes can also be entirely omitted.
-
-
acpi= [HW,ACPI,X86,ARM64]
Advanced Configuration and Power Interface
Format: { force | on | off | strict | noirq | rsdt |
@@ -811,7 +612,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
bits, and "f" is flow control ("r" for RTS or
omit it). Default is "9600n8".
- See Documentation/serial-console.txt for more
+ See Documentation/admin-guide/serial-console.rst for more
information. See
Documentation/networking/netconsole.txt for an
alternative.
@@ -1062,6 +863,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
dscc4.setup= [NET]
+ dump_apple_properties [X86]
+ Dump name and content of EFI device properties on
+ x86 Macs. Useful for driver authors to determine
+ what data is available or for reverse-engineering.
+
dyndbg[="val"] [KNL,DYNAMIC_DEBUG]
module.dyndbg[="val"]
Enable debug messages at boot time. See
@@ -1074,12 +880,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nopku [X86] Disable Memory Protection Keys CPU feature found
in some Intel CPUs.
- eagerfpu= [X86]
- on enable eager fpu restore
- off disable eager fpu restore
- auto selects the default scheme, which automatically
- enables eagerfpu restore for xsaveopt.
-
module.async_probe [KNL]
Enable asynchronous probe on this module.
@@ -1760,6 +1560,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
disable
Do not enable intel_pstate as the default
scaling driver for the supported processors
+ passive
+ Use intel_pstate as a scaling driver, but configure it
+ to work with generic cpufreq governors (instead of
+ enabling its internal governor). This mode cannot be
+ used along with the hardware-managed P-states (HWP)
+ feature.
force
Enable intel_pstate on systems that prohibit it by default
in favor of acpi-cpufreq. Forcing the intel_pstate driver
@@ -1780,6 +1586,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Description Table, specifies preferred power management
profile as "Enterprise Server" or "Performance Server",
then this feature is turned on by default.
+ per_cpu_perf_limits
+ Allow per-logical-CPU P-State performance control limits using
+ cpufreq sysfs interface
intremap= [X86-64, Intel-IOMMU]
on enable Interrupt Remapping (default)
@@ -1958,9 +1767,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
kmemcheck=2 (one-shot mode)
Default: 2 (one-shot mode)
- kstack=N [X86] Print N words from the kernel stack
- in oops dumps.
-
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP)
@@ -2235,7 +2041,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
mce=option [X86-64] See Documentation/x86/x86_64/boot-options.txt
md= [HW] RAID subsystems devices and level
- See Documentation/md.txt.
+ See Documentation/admin-guide/md.rst.
mdacon= [MDA]
Format: <first>,<last>
@@ -2325,6 +2131,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
memory contents and reserves bad memory
regions that are detected.
+ mem_sleep_default= [SUSPEND] Default system suspend mode:
+ s2idle - Suspend-To-Idle
+ shallow - Power-On Suspend or equivalent (if supported)
+ deep - Suspend-To-RAM or equivalent (if supported)
+ See Documentation/power/states.txt.
+
meye.*= [HW] Set MotionEye Camera parameters
See Documentation/video4linux/meye.txt.
@@ -2401,7 +2213,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
that the amount of memory usable for all allocations
is not too small.
- movable_node [KNL,X86] Boot-time switch to enable the effects
+ movable_node [KNL] Boot-time switch to enable the effects
of CONFIG_MOVABLE_NODE=y. See mm/Kconfig for details.
MTD_Partition= [MTD]
@@ -2545,7 +2357,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
will be sent.
The default is to send the implementation identification
information.
-
+
nfs.recover_lost_locks =
[NFSv4] Attempt to recover locks that were lost due
to a lease timeout on the server. Please note that
@@ -2754,6 +2566,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
no-kvmapf [X86,KVM] Disable paravirtualized asynchronous page
fault handling.
+ no-vmw-sched-clock
+ [X86,PV_OPS] Disable paravirtualized VMware scheduler
+ clock and use the default one.
+
no-steal-acc [X86,KVM] Disable paravirtualized steal time accounting.
steal time is computed, but won't influence scheduler
behaviour
@@ -3235,6 +3051,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
may be specified.
Format: <port>,<port>....
+ powersave=off [PPC] This option disables power saving features.
+ It specifically disables cpuidle and sets the
+ platform machine description specific power_save
+ function to NULL. On Idle the CPU just reduces
+ execution priority.
+
ppc_strict_facility_enable
[PPC] This option catches any kernel floating point,
Altivec, VSX and SPE outside of regions specifically
@@ -3318,7 +3140,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
r128= [HW,DRM]
raid= [HW,RAID]
- See Documentation/md.txt.
+ See Documentation/admin-guide/md.rst.
ramdisk_size= [RAM] Sizes of RAM disks in kilobytes
See Documentation/blockdev/ramdisk.txt.
@@ -3668,13 +3490,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
[KNL, SMP] Set scheduler's default relax_domain_level.
See Documentation/cgroup-v1/cpusets.txt.
- relative_sleep_states=
- [SUSPEND] Use sleep state labeling where the deepest
- state available other than hibernation is always "mem".
- Format: { "0" | "1" }
- 0 -- Traditional sleep state labels.
- 1 -- Relative sleep state labels.
-
reserve= [KNL,BUGS] Force the kernel to ignore some iomem area
reservetop= [X86-32]
@@ -3824,12 +3639,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
shapers= [NET]
Maximal number of shapers.
- show_msr= [x86] show boot-time MSR settings
- Format: { <integer> }
- Show boot-time (BIOS-initialized) MSR settings.
- The parameter means the number of CPUs to show,
- for example 1 means boot CPU only.
-
simeth= [IA-64]
simscsi=
@@ -4197,7 +4006,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See also Documentation/input/joystick-parport.txt
udbg-immortal [PPC] When debugging early kernel crashes that
- happen after console_init() and before a proper
+ happen after console_init() and before a proper
console driver takes over, this boot options might
help "seeing" what's going on.
@@ -4564,9 +4373,3 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
-
-______________________________________________________________________
-
-TODO:
-
- Add more DRM drivers.
diff --git a/Documentation/md.txt b/Documentation/admin-guide/md.rst
index d6e2fcf27337..e449fb5f277c 100644
--- a/Documentation/md.txt
+++ b/Documentation/admin-guide/md.rst
@@ -1,42 +1,77 @@
-Tools that manage md devices can be found at
- http://www.kernel.org/pub/linux/utils/raid/
-
+RAID arrays
+===========
Boot time assembly of RAID arrays
---------------------------------
+Tools that manage md devices can be found at
+ http://www.kernel.org/pub/linux/utils/raid/
+
+
You can boot with your md device with the following kernel command
lines:
-for old raid arrays without persistent superblocks:
+for old raid arrays without persistent superblocks::
+
md=<md device no.>,<raid level>,<chunk size factor>,<fault level>,dev0,dev1,...,devn
-for raid arrays with persistent superblocks
+for raid arrays with persistent superblocks::
+
md=<md device no.>,dev0,dev1,...,devn
-or, to assemble a partitionable array:
+
+or, to assemble a partitionable array::
+
md=d<md device no.>,dev0,dev1,...,devn
-
-md device no. = the number of the md device ...
- 0 means md0,
- 1 md1,
- 2 md2,
- 3 md3,
- 4 md4
-
-raid level = -1 linear mode
- 0 striped mode
- other modes are only supported with persistent super blocks
-
-chunk size factor = (raid-0 and raid-1 only)
- Set the chunk size as 4k << n.
-
-fault level = totally ignored
-
-dev0-devn: e.g. /dev/hda1,/dev/hdc1,/dev/sda1,/dev/sdb1
-
-A possible loadlin line (Harald Hoyer <HarryH@Royal.Net>) looks like this:
-
-e:\loadlin\loadlin e:\zimage root=/dev/md0 md=0,0,4,0,/dev/hdb2,/dev/hdc3 ro
+
+``md device no.``
++++++++++++++++++
+
+The number of the md device
+
+================= =========
+``md device no.`` device
+================= =========
+ 0 md0
+ 1 md1
+ 2 md2
+ 3 md3
+ 4 md4
+================= =========
+
+``raid level``
+++++++++++++++
+
+level of the RAID array
+
+=============== =============
+``raid level`` level
+=============== =============
+-1 linear mode
+0 striped mode
+=============== =============
+
+other modes are only supported with persistent super blocks
+
+``chunk size factor``
++++++++++++++++++++++
+
+(raid-0 and raid-1 only)
+
+Set the chunk size as 4k << n.
+
+``fault level``
++++++++++++++++
+
+Totally ignored
+
+``dev0`` to ``devn``
+++++++++++++++++++++
+
+e.g. ``/dev/hda1``, ``/dev/hdc1``, ``/dev/sda1``, ``/dev/sdb1``
+
+A possible loadlin line (Harald Hoyer <HarryH@Royal.Net>) looks like this::
+
+ e:\loadlin\loadlin e:\zimage root=/dev/md0 md=0,0,4,0,/dev/hdb2,/dev/hdc3 ro
Boot time autodetection of RAID arrays
@@ -45,10 +80,10 @@ Boot time autodetection of RAID arrays
When md is compiled into the kernel (not as module), partitions of
type 0xfd are scanned and automatically assembled into RAID arrays.
This autodetection may be suppressed with the kernel parameter
-"raid=noautodetect". As of kernel 2.6.9, only drives with a type 0
+``raid=noautodetect``. As of kernel 2.6.9, only drives with a type 0
superblock can be autodetected and run at boot time.
-The kernel parameter "raid=partitionable" (or "raid=part") means
+The kernel parameter ``raid=partitionable`` (or ``raid=part``) means
that all auto-detected arrays are assembled as partitionable.
Boot time assembly of degraded/dirty arrays
@@ -56,22 +91,23 @@ Boot time assembly of degraded/dirty arrays
If a raid5 or raid6 array is both dirty and degraded, it could have
undetectable data corruption. This is because the fact that it is
-'dirty' means that the parity cannot be trusted, and the fact that it
+``dirty`` means that the parity cannot be trusted, and the fact that it
is degraded means that some datablocks are missing and cannot reliably
be reconstructed (due to no parity).
For this reason, md will normally refuse to start such an array. This
requires the sysadmin to take action to explicitly start the array
-despite possible corruption. This is normally done with
+despite possible corruption. This is normally done with::
+
mdadm --assemble --force ....
This option is not really available if the array has the root
filesystem on it. In order to support this booting from such an
-array, md supports a module parameter "start_dirty_degraded" which,
+array, md supports a module parameter ``start_dirty_degraded`` which,
when set to 1, bypassed the checks and will allows dirty degraded
arrays to be started.
-So, to boot with a root filesystem of a dirty degraded raid[56], use
+So, to boot with a root filesystem of a dirty degraded raid 5 or 6, use::
md-mod.start_dirty_degraded=1
@@ -80,30 +116,30 @@ Superblock formats
------------------
The md driver can support a variety of different superblock formats.
-Currently, it supports superblock formats "0.90.0" and the "md-1" format
+Currently, it supports superblock formats ``0.90.0`` and the ``md-1`` format
introduced in the 2.5 development series.
The kernel will autodetect which format superblock is being used.
-Superblock format '0' is treated differently to others for legacy
+Superblock format ``0`` is treated differently to others for legacy
reasons - it is the original superblock format.
General Rules - apply for all superblock formats
------------------------------------------------
-An array is 'created' by writing appropriate superblocks to all
+An array is ``created`` by writing appropriate superblocks to all
devices.
-It is 'assembled' by associating each of these devices with an
+It is ``assembled`` by associating each of these devices with an
particular md virtual device. Once it is completely assembled, it can
be accessed.
An array should be created by a user-space tool. This will write
superblocks to all devices. It will usually mark the array as
-'unclean', or with some devices missing so that the kernel md driver
-can create appropriate redundancy (copying in raid1, parity
-calculation in raid4/5).
+``unclean``, or with some devices missing so that the kernel md driver
+can create appropriate redundancy (copying in raid 1, parity
+calculation in raid 4/5).
When an array is assembled, it is first initialized with the
SET_ARRAY_INFO ioctl. This contains, in particular, a major and minor
@@ -126,13 +162,12 @@ Devices that have failed or are not yet active can be detached from an
array using HOT_REMOVE_DISK.
-Specific Rules that apply to format-0 super block arrays, and
- arrays with no superblock (non-persistent).
--------------------------------------------------------------
+Specific Rules that apply to format-0 super block arrays, and arrays with no superblock (non-persistent)
+--------------------------------------------------------------------------------------------------------
-An array can be 'created' by describing the array (level, chunksize
-etc) in a SET_ARRAY_INFO ioctl. This must have major_version==0 and
-raid_disks != 0.
+An array can be ``created`` by describing the array (level, chunksize
+etc) in a SET_ARRAY_INFO ioctl. This must have ``major_version==0`` and
+``raid_disks != 0``.
Then uninitialized devices can be added with ADD_NEW_DISK. The
structure passed to ADD_NEW_DISK must specify the state of the device
@@ -142,24 +177,26 @@ Once started with RUN_ARRAY, uninitialized spares can be added with
HOT_ADD_DISK.
-
MD devices in sysfs
-------------------
-md devices appear in sysfs (/sys) as regular block devices,
-e.g.
+
+md devices appear in sysfs (``/sys``) as regular block devices,
+e.g.::
+
/sys/block/md0
-Each 'md' device will contain a subdirectory called 'md' which
+Each ``md`` device will contain a subdirectory called ``md`` which
contains further md-specific information about the device.
All md devices contain:
+
level
- a text file indicating the 'raid level'. e.g. raid0, raid1,
+ a text file indicating the ``raid level``. e.g. raid0, raid1,
raid5, linear, multipath, faulty.
If no raid level has been set yet (array is still being
assembled), the value will reflect whatever has been written
to it, which may be a name like the above, or may be a number
- such as '0', '5', etc.
+ such as ``0``, ``5``, etc.
raid_disks
a text file with a simple number indicating the number of devices
@@ -172,10 +209,10 @@ All md devices contain:
A change to this attribute will not be permitted if it would
reduce the size of the array. To reduce the number of drives
in an e.g. raid5, the array size must first be reduced by
- setting the 'array_size' attribute.
+ setting the ``array_size`` attribute.
chunk_size
- This is the size in bytes for 'chunks' and is only relevant to
+ This is the size in bytes for ``chunks`` and is only relevant to
raid levels that involve striping (0,4,5,6,10). The address space
of the array is conceptually divided into chunks and consecutive
chunks are striped onto neighbouring devices.
@@ -183,7 +220,7 @@ All md devices contain:
of 2. This can only be set while assembling an array
layout
- The "layout" for the array for the particular level. This is
+ The ``layout`` for the array for the particular level. This is
simply a number that is interpretted differently by different
levels. It can be written while assembling an array.
@@ -193,22 +230,24 @@ All md devices contain:
devices. Writing a number (in Kilobytes) which is less than
the available size will set the size. Any reconfiguration of the
array (e.g. adding devices) will not cause the size to change.
- Writing the word 'default' will cause the effective size of the
+ Writing the word ``default`` will cause the effective size of the
array to be whatever size is actually available based on
- 'level', 'chunk_size' and 'component_size'.
+ ``level``, ``chunk_size`` and ``component_size``.
This can be used to reduce the size of the array before reducing
the number of devices in a raid4/5/6, or to support external
metadata formats which mandate such clipping.
reshape_position
- This is either "none" or a sector number within the devices of
- the array where "reshape" is up to. If this is set, the three
+ This is either ``none`` or a sector number within the devices of
+ the array where ``reshape`` is up to. If this is set, the three
attributes mentioned above (raid_disks, chunk_size, layout) can
potentially have 2 values, an old and a new value. If these
- values differ, reading the attribute returns
+ values differ, reading the attribute returns::
+
new (old)
- and writing will effect the 'new' value, leaving the 'old'
+
+ and writing will effect the ``new`` value, leaving the ``old``
unchanged.
component_size
@@ -223,9 +262,9 @@ All md devices contain:
metadata_version
This indicates the format that is being used to record metadata
about the array. It can be 0.90 (traditional format), 1.0, 1.1,
- 1.2 (newer format in varying locations) or "none" indicating that
+ 1.2 (newer format in varying locations) or ``none`` indicating that
the kernel isn't managing metadata at all.
- Alternately it can be "external:" followed by a string which
+ Alternately it can be ``external:`` followed by a string which
is set by user-space. This indicates that metadata is managed
by a user-space program. Any device failure or other event that
requires a metadata update will cause array activity to be
@@ -233,9 +272,9 @@ All md devices contain:
resync_start
The point at which resync should start. If no resync is needed,
- this will be a very large number (or 'none' since 2.6.30-rc1). At
+ this will be a very large number (or ``none`` since 2.6.30-rc1). At
array creation it will default to 0, though starting the array as
- 'clean' will set it much larger.
+ ``clean`` will set it much larger.
new_dev
This file can be written but not read. The value written should
@@ -246,10 +285,10 @@ All md devices contain:
safe_mode_delay
When an md array has seen no write requests for a certain period
- of time, it will be marked as 'clean'. When another write
- request arrives, the array is marked as 'dirty' before the write
- commences. This is known as 'safe_mode'.
- The 'certain period' is controlled by this file which stores the
+ of time, it will be marked as ``clean``. When another write
+ request arrives, the array is marked as ``dirty`` before the write
+ commences. This is known as ``safe_mode``.
+ The ``certain period`` is controlled by this file which stores the
period as a number of seconds. The default is 200msec (0.200).
Writing a value of 0 disables safemode.
@@ -260,38 +299,50 @@ All md devices contain:
cannot be explicitly set, and some transitions are not allowed.
Select/poll works on this file. All changes except between
- active_idle and active (which can be frequent and are not
- very interesting) are notified. active->active_idle is
- reported if the metadata is externally managed.
+ Active_idle and active (which can be frequent and are not
+ very interesting) are notified. active->active_idle is
+ reported if the metadata is externally managed.
clear
No devices, no size, no level
+
Writing is equivalent to STOP_ARRAY ioctl
+
inactive
May have some settings, but array is not active
- all IO results in error
+ all IO results in error
+
When written, doesn't tear down array, but just stops it
+
suspended (not supported yet)
All IO requests will block. The array can be reconfigured.
+
Writing this, if accepted, will block until array is quiessent
+
readonly
no resync can happen. no superblocks get written.
- write requests fail
+
+ Write requests fail
+
read-auto
- like readonly, but behaves like 'clean' on a write request.
+ like readonly, but behaves like ``clean`` on a write request.
+
+ clean
+ no pending writes, but otherwise active.
- clean - no pending writes, but otherwise active.
When written to inactive array, starts without resync
+
If a write request arrives then
- if metadata is known, mark 'dirty' and switch to 'active'.
- if not known, block and switch to write-pending
+ if metadata is known, mark ``dirty`` and switch to ``active``.
+ if not known, block and switch to write-pending
+
If written to an active array that has pending writes, then fails.
active
fully active: IO and resync can be happening.
When written to inactive array, starts with resync
write-pending
- clean, but writes are blocked waiting for 'active' to be written.
+ clean, but writes are blocked waiting for ``active`` to be written.
active-idle
like active, but no writes have been seen for a while (safe_mode_delay).
@@ -299,57 +350,71 @@ All md devices contain:
bitmap/location
This indicates where the write-intent bitmap for the array is
stored.
- It can be one of "none", "file" or "[+-]N".
- "file" may later be extended to "file:/file/name"
- "[+-]N" means that many sectors from the start of the metadata.
- This is replicated on all devices. For arrays with externally
- managed metadata, the offset is from the beginning of the
- device.
+
+ It can be one of ``none``, ``file`` or ``[+-]N``.
+ ``file`` may later be extended to ``file:/file/name``
+ ``[+-]N`` means that many sectors from the start of the metadata.
+
+ This is replicated on all devices. For arrays with externally
+ managed metadata, the offset is from the beginning of the
+ device.
+
bitmap/chunksize
The size, in bytes, of the chunk which will be represented by a
single bit. For RAID456, it is a portion of an individual
device. For RAID10, it is a portion of the array. For RAID1, it
is both (they come to the same thing).
+
bitmap/time_base
The time, in seconds, between looking for bits in the bitmap to
be cleared. In the current implementation, a bit will be cleared
- between 2 and 3 times "time_base" after all the covered blocks
+ between 2 and 3 times ``time_base`` after all the covered blocks
are known to be in-sync.
+
bitmap/backlog
When write-mostly devices are active in a RAID1, write requests
to those devices proceed in the background - the filesystem (or
other user of the device) does not have to wait for them.
- 'backlog' sets a limit on the number of concurrent background
+ ``backlog`` sets a limit on the number of concurrent background
writes. If there are more than this, new writes will by
synchronous.
+
bitmap/metadata
- This can be either 'internal' or 'external'.
- 'internal' is the default and means the metadata for the bitmap
- is stored in the first 256 bytes of the allocated space and is
- managed by the md module.
- 'external' means that bitmap metadata is managed externally to
- the kernel (i.e. by some userspace program)
+ This can be either ``internal`` or ``external``.
+
+ ``internal``
+ is the default and means the metadata for the bitmap
+ is stored in the first 256 bytes of the allocated space and is
+ managed by the md module.
+
+ ``external``
+ means that bitmap metadata is managed externally to
+ the kernel (i.e. by some userspace program)
+
bitmap/can_clear
- This is either 'true' or 'false'. If 'true', then bits in the
+ This is either ``true`` or ``false``. If ``true``, then bits in the
bitmap will be cleared when the corresponding blocks are thought
- to be in-sync. If 'false', bits will never be cleared.
- This is automatically set to 'false' if a write happens on a
+ to be in-sync. If ``false``, bits will never be cleared.
+ This is automatically set to ``false`` if a write happens on a
degraded array, or if the array becomes degraded during a write.
When metadata is managed externally, it should be set to true
once the array becomes non-degraded, and this fact has been
recorded in the metadata.
-
-
-
-As component devices are added to an md array, they appear in the 'md'
-directory as new directories named
+
+
+
+As component devices are added to an md array, they appear in the ``md``
+directory as new directories named::
+
dev-XXX
-where XXX is a name that the kernel knows for the device, e.g. hdb1.
+
+where ``XXX`` is a name that the kernel knows for the device, e.g. hdb1.
Each directory contains:
block
- a symlink to the block device in /sys/block, e.g.
+ a symlink to the block device in /sys/block, e.g.::
+
/sys/block/md0/md/dev-hdb1/block -> ../../../../block/hdb/hdb1
super
@@ -358,51 +423,83 @@ Each directory contains:
state
A file recording the current state of the device in the array
- which can be a comma separated list of
- faulty - device has been kicked from active use due to
- a detected fault, or it has unacknowledged bad
- blocks
- in_sync - device is a fully in-sync member of the array
- writemostly - device will only be subject to read
- requests if there are no other options.
- This applies only to raid1 arrays.
- blocked - device has failed, and the failure hasn't been
- acknowledged yet by the metadata handler.
- Writes that would write to this device if
- it were not faulty are blocked.
- spare - device is working, but not a full member.
- This includes spares that are in the process
- of being recovered to
- write_error - device has ever seen a write error.
- want_replacement - device is (mostly) working but probably
- should be replaced, either due to errors or
- due to user request.
- replacement - device is a replacement for another active
- device with same raid_disk.
+ which can be a comma separated list of:
+
+ faulty
+ device has been kicked from active use due to
+ a detected fault, or it has unacknowledged bad
+ blocks
+
+ in_sync
+ device is a fully in-sync member of the array
+
+ writemostly
+ device will only be subject to read
+ requests if there are no other options.
+
+ This applies only to raid1 arrays.
+
+ blocked
+ device has failed, and the failure hasn't been
+ acknowledged yet by the metadata handler.
+
+ Writes that would write to this device if
+ it were not faulty are blocked.
+
+ spare
+ device is working, but not a full member.
+
+ This includes spares that are in the process
+ of being recovered to
+
+ write_error
+ device has ever seen a write error.
+
+ want_replacement
+ device is (mostly) working but probably
+ should be replaced, either due to errors or
+ due to user request.
+
+ replacement
+ device is a replacement for another active
+ device with same raid_disk.
This list may grow in future.
+
This can be written to.
- Writing "faulty" simulates a failure on the device.
- Writing "remove" removes the device from the array.
- Writing "writemostly" sets the writemostly flag.
- Writing "-writemostly" clears the writemostly flag.
- Writing "blocked" sets the "blocked" flag.
- Writing "-blocked" clears the "blocked" flags and allows writes
- to complete and possibly simulates an error.
- Writing "in_sync" sets the in_sync flag.
- Writing "write_error" sets writeerrorseen flag.
- Writing "-write_error" clears writeerrorseen flag.
- Writing "want_replacement" is allowed at any time except to a
- replacement device or a spare. It sets the flag.
- Writing "-want_replacement" is allowed at any time. It clears
- the flag.
- Writing "replacement" or "-replacement" is only allowed before
- starting the array. It sets or clears the flag.
-
-
- This file responds to select/poll. Any change to 'faulty'
- or 'blocked' causes an event.
+
+ Writing ``faulty`` simulates a failure on the device.
+
+ Writing ``remove`` removes the device from the array.
+
+ Writing ``writemostly`` sets the writemostly flag.
+
+ Writing ``-writemostly`` clears the writemostly flag.
+
+ Writing ``blocked`` sets the ``blocked`` flag.
+
+ Writing ``-blocked`` clears the ``blocked`` flags and allows writes
+ to complete and possibly simulates an error.
+
+ Writing ``in_sync`` sets the in_sync flag.
+
+ Writing ``write_error`` sets writeerrorseen flag.
+
+ Writing ``-write_error`` clears writeerrorseen flag.
+
+ Writing ``want_replacement`` is allowed at any time except to a
+ replacement device or a spare. It sets the flag.
+
+ Writing ``-want_replacement`` is allowed at any time. It clears
+ the flag.
+
+ Writing ``replacement`` or ``-replacement`` is only allowed before
+ starting the array. It sets or clears the flag.
+
+
+ This file responds to select/poll. Any change to ``faulty``
+ or ``blocked`` causes an event.
errors
An approximate count of read errors that have been detected on
@@ -417,9 +514,9 @@ Each directory contains:
slot
This gives the role that the device has in the array. It will
- either be 'none' if the device is not active in the array
+ either be ``none`` if the device is not active in the array
(i.e. is a spare or has failed) or an integer less than the
- 'raid_disks' number for the array indicating which position
+ ``raid_disks`` number for the array indicating which position
it currently fills. This can only be set while assembling an
array. A device for which this is set is assumed to be working.
@@ -437,7 +534,7 @@ Each directory contains:
written, it will be rejected.
recovery_start
- When the device is not 'in_sync', this records the number of
+ When the device is not ``in_sync``, this records the number of
sectors from the start of the device which are known to be
correct. This is normally zero, but during a recovery
operation it will steadily increase, and if the recovery is
@@ -447,21 +544,21 @@ Each directory contains:
This can be set whenever the device is not an active member of
the array, either before the array is activated, or before
- the 'slot' is set.
+ the ``slot`` is set.
+
+ Setting this to ``none`` is equivalent to setting ``in_sync``.
+ Setting to any other value also clears the ``in_sync`` flag.
- Setting this to 'none' is equivalent to setting 'in_sync'.
- Setting to any other value also clears the 'in_sync' flag.
-
bad_blocks
This gives the list of all known bad blocks in the form of
start address and length (in sectors respectively). If output
is too big to fit in a page, it will be truncated. Writing
- "sector length" to this file adds new acknowledged (i.e.
+ ``sector length`` to this file adds new acknowledged (i.e.
recorded to disk safely) bad blocks.
unacknowledged_bad_blocks
This gives the list of known-but-not-yet-saved-to-disk bad
- blocks in the same form of 'bad_blocks'. If output is too big
+ blocks in the same form of ``bad_blocks``. If output is too big
to fit in a page, it will be truncated. Writing to this file
adds bad blocks without acknowledging them. This is largely
for testing.
@@ -469,16 +566,18 @@ Each directory contains:
An active md device will also contain an entry for each active device
-in the array. These are named
+in the array. These are named::
rdNN
-where 'NN' is the position in the array, starting from 0.
+where ``NN`` is the position in the array, starting from 0.
So for a 3 drive array there will be rd0, rd1, rd2.
-These are symbolic links to the appropriate 'dev-XXX' entry.
-Thus, for example,
+These are symbolic links to the appropriate ``dev-XXX`` entry.
+Thus, for example::
+
cat /sys/block/md*/md/rd*/state
-will show 'in_sync' on every line.
+
+will show ``in_sync`` on every line.
@@ -488,50 +587,62 @@ also have
sync_action
a text file that can be used to monitor and control the rebuild
process. It contains one word which can be one of:
- resync - redundancy is being recalculated after unclean
- shutdown or creation
- recover - a hot spare is being built to replace a
- failed/missing device
- idle - nothing is happening
- check - A full check of redundancy was requested and is
- happening. This reads all blocks and checks
- them. A repair may also happen for some raid
- levels.
- repair - A full check and repair is happening. This is
- similar to 'resync', but was requested by the
- user, and the write-intent bitmap is NOT used to
- optimise the process.
+
+ resync
+ redundancy is being recalculated after unclean
+ shutdown or creation
+
+ recover
+ a hot spare is being built to replace a
+ failed/missing device
+
+ idle
+ nothing is happening
+ check
+ A full check of redundancy was requested and is
+ happening. This reads all blocks and checks
+ them. A repair may also happen for some raid
+ levels.
+
+ repair
+ A full check and repair is happening. This is
+ similar to ``resync``, but was requested by the
+ user, and the write-intent bitmap is NOT used to
+ optimise the process.
This file is writable, and each of the strings that could be
read are meaningful for writing.
- 'idle' will stop an active resync/recovery etc. There is no
- guarantee that another resync/recovery may not be automatically
- started again, though some event will be needed to trigger
- this.
- 'resync' or 'recovery' can be used to restart the
- corresponding operation if it was stopped with 'idle'.
- 'check' and 'repair' will start the appropriate process
- providing the current state is 'idle'.
+ ``idle`` will stop an active resync/recovery etc. There is no
+ guarantee that another resync/recovery may not be automatically
+ started again, though some event will be needed to trigger
+ this.
+
+ ``resync`` or ``recovery`` can be used to restart the
+ corresponding operation if it was stopped with ``idle``.
+
+ ``check`` and ``repair`` will start the appropriate process
+ providing the current state is ``idle``.
This file responds to select/poll. Any important change in the value
triggers a poll event. Sometimes the value will briefly be
- "recover" if a recovery seems to be needed, but cannot be
- achieved. In that case, the transition to "recover" isn't
+ ``recover`` if a recovery seems to be needed, but cannot be
+ achieved. In that case, the transition to ``recover`` isn't
notified, but the transition away is.
degraded
This contains a count of the number of devices by which the
- arrays is degraded. So an optimal array will show '0'. A
- single failed/missing drive will show '1', etc.
+ arrays is degraded. So an optimal array will show ``0``. A
+ single failed/missing drive will show ``1``, etc.
+
This file responds to select/poll, any increase or decrease
in the count of missing devices will trigger an event.
mismatch_count
- When performing 'check' and 'repair', and possibly when
- performing 'resync', md will count the number of errors that are
- found. The count in 'mismatch_cnt' is the number of sectors
- that were re-written, or (for 'check') would have been
+ When performing ``check`` and ``repair``, and possibly when
+ performing ``resync``, md will count the number of errors that are
+ found. The count in ``mismatch_cnt`` is the number of sectors
+ that were re-written, or (for ``check``) would have been
re-written. As most raid levels work in units of pages rather
than sectors, this may be larger than the number of actual errors
by a factor of the number of sectors in a page.
@@ -542,27 +653,30 @@ also have
would need to check the corresponding blocks. Either individual
numbers or start-end pairs can be written. Multiple numbers
can be separated by a space.
- Note that the numbers are 'bit' numbers, not 'block' numbers.
+
+ Note that the numbers are ``bit`` numbers, not ``block`` numbers.
They should be scaled by the bitmap_chunksize.
- sync_speed_min
- sync_speed_max
- This are similar to /proc/sys/dev/raid/speed_limit_{min,max}
+ sync_speed_min, sync_speed_max
+ This are similar to ``/proc/sys/dev/raid/speed_limit_{min,max}``
however they only apply to the particular array.
- If no value has been written to these, or if the word 'system'
+
+ If no value has been written to these, or if the word ``system``
is written, then the system-wide value is used. If a value,
in kibibytes-per-second is written, then it is used.
+
When the files are read, they show the currently active value
- followed by "(local)" or "(system)" depending on whether it is
+ followed by ``(local)`` or ``(system)`` depending on whether it is
a locally set or system-wide value.
sync_completed
This shows the number of sectors that have been completed of
whatever the current sync_action is, followed by the number of
sectors in total that could need to be processed. The two
- numbers are separated by a '/' thus effectively showing one
+ numbers are separated by a ``/`` thus effectively showing one
value, a fraction of the process that is complete.
- A 'select' on this attribute will return when resync completes,
+
+ A ``select`` on this attribute will return when resync completes,
when it reaches the current sync_max (below) and possibly at
other times.
@@ -570,26 +684,24 @@ also have
This shows the current actual speed, in K/sec, of the current
sync_action. It is averaged over the last 30 seconds.
- suspend_lo
- suspend_hi
+ suspend_lo, suspend_hi
The two values, given as numbers of sectors, indicate a range
within the array where IO will be blocked. This is currently
only supported for raid4/5/6.
- sync_min
- sync_max
+ sync_min, sync_max
The two values, given as numbers of sectors, indicate a range
- within the array where 'check'/'repair' will operate. Must be
- a multiple of chunk_size. When it reaches "sync_max" it will
+ within the array where ``check``/``repair`` will operate. Must be
+ a multiple of chunk_size. When it reaches ``sync_max`` it will
pause, rather than complete.
- You can use 'select' or 'poll' on "sync_completed" to wait for
+ You can use ``select`` or ``poll`` on ``sync_completed`` to wait for
that number to reach sync_max. Then you can either increase
- "sync_max", or can write 'idle' to "sync_action".
+ ``sync_max``, or can write ``idle`` to ``sync_action``.
- The value of 'max' for "sync_max" effectively disables the limit.
+ The value of ``max`` for ``sync_max`` effectively disables the limit.
When a resync is active, the value can only ever be increased,
never decreased.
- The value of '0' is the minimum for "sync_min".
+ The value of ``0`` is the minimum for ``sync_min``.
@@ -598,13 +710,15 @@ personality module that manages it.
These are specific to the implementation of the module and could
change substantially if the implementation changes.
-These currently include
+These currently include:
stripe_cache_size (currently raid5 only)
number of entries in the stripe cache. This is writable, but
there are upper and lower limits (32768, 17). Default is 256.
+
strip_cache_active (currently raid5 only)
number of active entries in the stripe cache
+
preread_bypass_threshold (currently raid5 only)
number of times a stripe requiring preread will be bypassed by
a stripe that does not require preread. For fairness defaults
diff --git a/Documentation/module-signing.txt b/Documentation/admin-guide/module-signing.rst
index f0e3361db20c..27e59498b487 100644
--- a/Documentation/module-signing.txt
+++ b/Documentation/admin-guide/module-signing.rst
@@ -1,22 +1,21 @@
- ==============================
- KERNEL MODULE SIGNING FACILITY
- ==============================
-
-CONTENTS
-
- - Overview.
- - Configuring module signing.
- - Generating signing keys.
- - Public keys in the kernel.
- - Manually signing modules.
- - Signed modules and stripping.
- - Loading signed modules.
- - Non-valid signatures and unsigned modules.
- - Administering/protecting the private key.
+Kernel module signing facility
+------------------------------
+
+.. CONTENTS
+..
+.. - Overview.
+.. - Configuring module signing.
+.. - Generating signing keys.
+.. - Public keys in the kernel.
+.. - Manually signing modules.
+.. - Signed modules and stripping.
+.. - Loading signed modules.
+.. - Non-valid signatures and unsigned modules.
+.. - Administering/protecting the private key.
========
-OVERVIEW
+Overview
========
The kernel module signing facility cryptographically signs modules during
@@ -36,17 +35,19 @@ SHA-512 (the algorithm is selected by data in the signature).
==========================
-CONFIGURING MODULE SIGNING
+Configuring module signing
==========================
-The module signing facility is enabled by going to the "Enable Loadable Module
-Support" section of the kernel configuration and turning on
+The module signing facility is enabled by going to the
+:menuselection:`Enable Loadable Module Support` section of
+the kernel configuration and turning on::
CONFIG_MODULE_SIG "Module signature verification"
This has a number of options available:
- (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
+ (1) :menuselection:`Require modules to be validly signed`
+ (``CONFIG_MODULE_SIG_FORCE``)
This specifies how the kernel should deal with a module that has a
signature for which the key is not known or a module that is unsigned.
@@ -64,35 +65,39 @@ This has a number of options available:
cannot be parsed, it will be rejected out of hand.
- (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
+ (2) :menuselection:`Automatically sign all modules`
+ (``CONFIG_MODULE_SIG_ALL``)
If this is on then modules will be automatically signed during the
modules_install phase of a build. If this is off, then the modules must
- be signed manually using:
+ be signed manually using::
scripts/sign-file
- (3) "Which hash algorithm should modules be signed with?"
+ (3) :menuselection:`Which hash algorithm should modules be signed with?`
This presents a choice of which hash algorithm the installation phase will
sign the modules with:
- CONFIG_MODULE_SIG_SHA1 "Sign modules with SHA-1"
- CONFIG_MODULE_SIG_SHA224 "Sign modules with SHA-224"
- CONFIG_MODULE_SIG_SHA256 "Sign modules with SHA-256"
- CONFIG_MODULE_SIG_SHA384 "Sign modules with SHA-384"
- CONFIG_MODULE_SIG_SHA512 "Sign modules with SHA-512"
+ =============================== ==========================================
+ ``CONFIG_MODULE_SIG_SHA1`` :menuselection:`Sign modules with SHA-1`
+ ``CONFIG_MODULE_SIG_SHA224`` :menuselection:`Sign modules with SHA-224`
+ ``CONFIG_MODULE_SIG_SHA256`` :menuselection:`Sign modules with SHA-256`
+ ``CONFIG_MODULE_SIG_SHA384`` :menuselection:`Sign modules with SHA-384`
+ ``CONFIG_MODULE_SIG_SHA512`` :menuselection:`Sign modules with SHA-512`
+ =============================== ==========================================
The algorithm selected here will also be built into the kernel (rather
than being a module) so that modules signed with that algorithm can have
their signatures checked without causing a dependency loop.
- (4) "File name or PKCS#11 URI of module signing key" (CONFIG_MODULE_SIG_KEY)
+ (4) :menuselection:`File name or PKCS#11 URI of module signing key`
+ (``CONFIG_MODULE_SIG_KEY``)
Setting this option to something other than its default of
- "certs/signing_key.pem" will disable the autogeneration of signing keys
+ ``certs/signing_key.pem`` will disable the autogeneration of signing keys
and allow the kernel modules to be signed with a key of your choosing.
The string provided should identify a file containing both a private key
and its corresponding X.509 certificate in PEM form, or — on systems where
@@ -102,10 +107,11 @@ This has a number of options available:
If the PEM file containing the private key is encrypted, or if the
PKCS#11 token requries a PIN, this can be provided at build time by
- means of the KBUILD_SIGN_PIN variable.
+ means of the ``KBUILD_SIGN_PIN`` variable.
- (5) "Additional X.509 keys for default system keyring" (CONFIG_SYSTEM_TRUSTED_KEYS)
+ (5) :menuselection:`Additional X.509 keys for default system keyring`
+ (``CONFIG_SYSTEM_TRUSTED_KEYS``)
This option can be set to the filename of a PEM-encoded file containing
additional certificates which will be included in the system keyring by
@@ -116,7 +122,7 @@ packages to the kernel build processes for the tool that does the signing.
=======================
-GENERATING SIGNING KEYS
+Generating signing keys
=======================
Cryptographic keypairs are required to generate and check signatures. A
@@ -126,14 +132,14 @@ it can be deleted or stored securely. The public key gets built into the
kernel so that it can be used to check the signatures as the modules are
loaded.
-Under normal conditions, when CONFIG_MODULE_SIG_KEY is unchanged from its
+Under normal conditions, when ``CONFIG_MODULE_SIG_KEY`` is unchanged from its
default, the kernel build will automatically generate a new keypair using
-openssl if one does not exist in the file:
+openssl if one does not exist in the file::
certs/signing_key.pem
during the building of vmlinux (the public part of the key needs to be built
-into vmlinux) using parameters in the:
+into vmlinux) using parameters in the::
certs/x509.genkey
@@ -142,14 +148,14 @@ file (which is also generated if it does not already exist).
It is strongly recommended that you provide your own x509.genkey file.
Most notably, in the x509.genkey file, the req_distinguished_name section
-should be altered from the default:
+should be altered from the default::
[ req_distinguished_name ]
#O = Unspecified company
CN = Build time autogenerated kernel key
#emailAddress = unspecified.user@unspecified.company
-The generated RSA key size can also be set with:
+The generated RSA key size can also be set with::
[ req ]
default_bits = 4096
@@ -158,23 +164,23 @@ The generated RSA key size can also be set with:
It is also possible to manually generate the key private/public files using the
x509.genkey key generation configuration file in the root node of the Linux
kernel sources tree and the openssl command. The following is an example to
-generate the public/private key files:
+generate the public/private key files::
openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
-config x509.genkey -outform PEM -out kernel_key.pem \
-keyout kernel_key.pem
The full pathname for the resulting kernel_key.pem file can then be specified
-in the CONFIG_MODULE_SIG_KEY option, and the certificate and key therein will
+in the ``CONFIG_MODULE_SIG_KEY`` option, and the certificate and key therein will
be used instead of an autogenerated keypair.
=========================
-PUBLIC KEYS IN THE KERNEL
+Public keys in the kernel
=========================
The kernel contains a ring of public keys that can be viewed by root. They're
-in a keyring called ".system_keyring" that can be seen by:
+in a keyring called ".system_keyring" that can be seen by::
[root@deneb ~]# cat /proc/keys
...
@@ -184,27 +190,27 @@ in a keyring called ".system_keyring" that can be seen by:
Beyond the public key generated specifically for module signing, additional
trusted certificates can be provided in a PEM-encoded file referenced by the
-CONFIG_SYSTEM_TRUSTED_KEYS configuration option.
+``CONFIG_SYSTEM_TRUSTED_KEYS`` configuration option.
Further, the architecture code may take public keys from a hardware store and
add those in also (e.g. from the UEFI key database).
-Finally, it is possible to add additional public keys by doing:
+Finally, it is possible to add additional public keys by doing::
keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
-e.g.:
+e.g.::
keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
Note, however, that the kernel will only permit keys to be added to
-.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
+``.system_keyring _if_`` the new key's X.509 wrapper is validly signed by a key
that is already resident in the .system_keyring at the time the key was added.
-=========================
-MANUALLY SIGNING MODULES
-=========================
+========================
+Manually signing modules
+========================
To manually sign a module, use the scripts/sign-file tool available in
the Linux kernel source tree. The script requires 4 arguments:
@@ -214,7 +220,7 @@ the Linux kernel source tree. The script requires 4 arguments:
3. The public key filename
4. The kernel module to be signed
-The following is an example to sign a kernel module:
+The following is an example to sign a kernel module::
scripts/sign-file sha512 kernel-signkey.priv \
kernel-signkey.x509 module.ko
@@ -228,11 +234,11 @@ $KBUILD_SIGN_PIN environment variable.
============================
-SIGNED MODULES AND STRIPPING
+Signed modules and stripping
============================
A signed module has a digital signature simply appended at the end. The string
-"~Module signature appended~." at the end of the module's file confirms that a
+``~Module signature appended~.`` at the end of the module's file confirms that a
signature is present but it does not confirm that the signature is valid!
Signed modules are BRITTLE as the signature is outside of the defined ELF
@@ -242,19 +248,19 @@ debug information present at the time of signing.
======================
-LOADING SIGNED MODULES
+Loading signed modules
======================
-Modules are loaded with insmod, modprobe, init_module() or finit_module(),
-exactly as for unsigned modules as no processing is done in userspace. The
-signature checking is all done within the kernel.
+Modules are loaded with insmod, modprobe, ``init_module()`` or
+``finit_module()``, exactly as for unsigned modules as no processing is
+done in userspace. The signature checking is all done within the kernel.
=========================================
-NON-VALID SIGNATURES AND UNSIGNED MODULES
+Non-valid signatures and unsigned modules
=========================================
-If CONFIG_MODULE_SIG_FORCE is enabled or module.sig_enforce=1 is supplied on
+If ``CONFIG_MODULE_SIG_FORCE`` is enabled or module.sig_enforce=1 is supplied on
the kernel command line, the kernel will only load validly signed modules
for which it has a public key. Otherwise, it will also load modules that are
unsigned. Any module for which the kernel has a key, but which proves to have
@@ -264,7 +270,7 @@ Any module that has an unparseable signature will be rejected.
=========================================
-ADMINISTERING/PROTECTING THE PRIVATE KEY
+Administering/protecting the private key
=========================================
Since the private key is used to sign modules, viruses and malware could use
@@ -275,5 +281,5 @@ in the root node of the kernel source tree.
If you use the same private key to sign modules for multiple kernel
configurations, you must ensure that the module version information is
sufficient to prevent loading a module into a different kernel. Either
-set CONFIG_MODVERSIONS=y or ensure that each configuration has a different
-kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION.
+set ``CONFIG_MODVERSIONS=y`` or ensure that each configuration has a different
+kernel release string by changing ``EXTRAVERSION`` or ``CONFIG_LOCALVERSION``.
diff --git a/Documentation/mono.txt b/Documentation/admin-guide/mono.rst
index d01ac6052194..cdddc099af64 100644
--- a/Documentation/mono.txt
+++ b/Documentation/admin-guide/mono.rst
@@ -1,5 +1,5 @@
- Mono(tm) Binary Kernel Support for Linux
- -----------------------------------------
+Mono(tm) Binary Kernel Support for Linux
+-----------------------------------------
To configure Linux to automatically execute Mono-based .NET binaries
(in the form of .exe files) without the need to use the mono CLR
@@ -19,22 +19,24 @@ other program after you have done the following:
http://www.go-mono.com/compiling.html
Once the Mono CLR support has been installed, just check that
- /usr/bin/mono (which could be located elsewhere, for example
- /usr/local/bin/mono) is working.
+ ``/usr/bin/mono`` (which could be located elsewhere, for example
+ ``/usr/local/bin/mono``) is working.
2) You have to compile BINFMT_MISC either as a module or into
- the kernel (CONFIG_BINFMT_MISC) and set it up properly.
+ the kernel (``CONFIG_BINFMT_MISC``) and set it up properly.
If you choose to compile it as a module, you will have
to insert it manually with modprobe/insmod, as kmod
- cannot be easily supported with binfmt_misc.
- Read the file 'binfmt_misc.txt' in this directory to know
+ cannot be easily supported with binfmt_misc.
+ Read the file ``binfmt_misc.txt`` in this directory to know
more about the configuration process.
-3) Add the following entries to /etc/rc.local or similar script
+3) Add the following entries to ``/etc/rc.local`` or similar script
to be run at system startup:
-# Insert BINFMT_MISC module into the kernel
-if [ ! -e /proc/sys/fs/binfmt_misc/register ]; then
+ .. code-block:: sh
+
+ # Insert BINFMT_MISC module into the kernel
+ if [ ! -e /proc/sys/fs/binfmt_misc/register ]; then
/sbin/modprobe binfmt_misc
# Some distributions, like Fedora Core, perform
# the following command automatically when the
@@ -43,24 +45,26 @@ if [ ! -e /proc/sys/fs/binfmt_misc/register ]; then
# Thus, it is possible that the following line
# is not needed at all.
mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
-fi
+ fi
-# Register support for .NET CLR binaries
-if [ -e /proc/sys/fs/binfmt_misc/register ]; then
+ # Register support for .NET CLR binaries
+ if [ -e /proc/sys/fs/binfmt_misc/register ]; then
# Replace /usr/bin/mono with the correct pathname to
# the Mono CLR runtime (usually /usr/local/bin/mono
# when compiling from sources or CVS).
echo ':CLR:M::MZ::/usr/bin/mono:' > /proc/sys/fs/binfmt_misc/register
-else
+ else
echo "No binfmt_misc support"
exit 1
-fi
+ fi
-4) Check that .exe binaries can be ran without the need of a
- wrapper script, simply by launching the .exe file directly
- from a command prompt, for example:
+4) Check that ``.exe`` binaries can be ran without the need of a
+ wrapper script, simply by launching the ``.exe`` file directly
+ from a command prompt, for example::
/usr/bin/xsd.exe
- NOTE: If this fails with a permission denied error, check
- that the .exe file has execute permissions.
+ .. note::
+
+ If this fails with a permission denied error, check
+ that the ``.exe`` file has execute permissions.
diff --git a/Documentation/admin-guide/parport.rst b/Documentation/admin-guide/parport.rst
new file mode 100644
index 000000000000..ad3f9b8a11e1
--- /dev/null
+++ b/Documentation/admin-guide/parport.rst
@@ -0,0 +1,286 @@
+Parport
++++++++
+
+The ``parport`` code provides parallel-port support under Linux. This
+includes the ability to share one port between multiple device
+drivers.
+
+You can pass parameters to the ``parport`` code to override its automatic
+detection of your hardware. This is particularly useful if you want
+to use IRQs, since in general these can't be autoprobed successfully.
+By default IRQs are not used even if they **can** be probed. This is
+because there are a lot of people using the same IRQ for their
+parallel port and a sound card or network card.
+
+The ``parport`` code is split into two parts: generic (which deals with
+port-sharing) and architecture-dependent (which deals with actually
+using the port).
+
+
+Parport as modules
+==================
+
+If you load the `parport`` code as a module, say::
+
+ # insmod parport
+
+to load the generic ``parport`` code. You then must load the
+architecture-dependent code with (for example)::
+
+ # insmod parport_pc io=0x3bc,0x378,0x278 irq=none,7,auto
+
+to tell the ``parport`` code that you want three PC-style ports, one at
+0x3bc with no IRQ, one at 0x378 using IRQ 7, and one at 0x278 with an
+auto-detected IRQ. Currently, PC-style (``parport_pc``), Sun ``bpp``,
+Amiga, Atari, and MFC3 hardware is supported.
+
+PCI parallel I/O card support comes from ``parport_pc``. Base I/O
+addresses should not be specified for supported PCI cards since they
+are automatically detected.
+
+
+modprobe
+--------
+
+If you use modprobe , you will find it useful to add lines as below to a
+configuration file in /etc/modprobe.d/ directory::
+
+ alias parport_lowlevel parport_pc
+ options parport_pc io=0x378,0x278 irq=7,auto
+
+modprobe will load ``parport_pc`` (with the options ``io=0x378,0x278 irq=7,auto``)
+whenever a parallel port device driver (such as ``lp``) is loaded.
+
+Note that these are example lines only! You shouldn't in general need
+to specify any options to ``parport_pc`` in order to be able to use a
+parallel port.
+
+
+Parport probe [optional]
+------------------------
+
+In 2.2 kernels there was a module called ``parport_probe``, which was used
+for collecting IEEE 1284 device ID information. This has now been
+enhanced and now lives with the IEEE 1284 support. When a parallel
+port is detected, the devices that are connected to it are analysed,
+and information is logged like this::
+
+ parport0: Printer, BJC-210 (Canon)
+
+The probe information is available from files in ``/proc/sys/dev/parport/``.
+
+
+Parport linked into the kernel statically
+=========================================
+
+If you compile the ``parport`` code into the kernel, then you can use
+kernel boot parameters to get the same effect. Add something like the
+following to your LILO command line::
+
+ parport=0x3bc parport=0x378,7 parport=0x278,auto,nofifo
+
+You can have many ``parport=...`` statements, one for each port you want
+to add. Adding ``parport=0`` to the kernel command-line will disable
+parport support entirely. Adding ``parport=auto`` to the kernel
+command-line will make ``parport`` use any IRQ lines or DMA channels that
+it auto-detects.
+
+
+Files in /proc
+==============
+
+If you have configured the ``/proc`` filesystem into your kernel, you will
+see a new directory entry: ``/proc/sys/dev/parport``. In there will be a
+directory entry for each parallel port for which parport is
+configured. In each of those directories are a collection of files
+describing that parallel port.
+
+The ``/proc/sys/dev/parport`` directory tree looks like::
+
+ parport
+ |-- default
+ | |-- spintime
+ | `-- timeslice
+ |-- parport0
+ | |-- autoprobe
+ | |-- autoprobe0
+ | |-- autoprobe1
+ | |-- autoprobe2
+ | |-- autoprobe3
+ | |-- devices
+ | | |-- active
+ | | `-- lp
+ | | `-- timeslice
+ | |-- base-addr
+ | |-- irq
+ | |-- dma
+ | |-- modes
+ | `-- spintime
+ `-- parport1
+ |-- autoprobe
+ |-- autoprobe0
+ |-- autoprobe1
+ |-- autoprobe2
+ |-- autoprobe3
+ |-- devices
+ | |-- active
+ | `-- ppa
+ | `-- timeslice
+ |-- base-addr
+ |-- irq
+ |-- dma
+ |-- modes
+ `-- spintime
+
+.. tabularcolumns:: |p{4.0cm}|p{13.5cm}|
+
+======================= =======================================================
+File Contents
+======================= =======================================================
+``devices/active`` A list of the device drivers using that port. A "+"
+ will appear by the name of the device currently using
+ the port (it might not appear against any). The
+ string "none" means that there are no device drivers
+ using that port.
+
+``base-addr`` Parallel port's base address, or addresses if the port
+ has more than one in which case they are separated
+ with tabs. These values might not have any sensible
+ meaning for some ports.
+
+``irq`` Parallel port's IRQ, or -1 if none is being used.
+
+``dma`` Parallel port's DMA channel, or -1 if none is being
+ used.
+
+``modes`` Parallel port's hardware modes, comma-separated,
+ meaning:
+
+ - PCSPP
+ PC-style SPP registers are available.
+
+ - TRISTATE
+ Port is bidirectional.
+
+ - COMPAT
+ Hardware acceleration for printers is
+ available and will be used.
+
+ - EPP
+ Hardware acceleration for EPP protocol
+ is available and will be used.
+
+ - ECP
+ Hardware acceleration for ECP protocol
+ is available and will be used.
+
+ - DMA
+ DMA is available and will be used.
+
+ Note that the current implementation will only take
+ advantage of COMPAT and ECP modes if it has an IRQ
+ line to use.
+
+``autoprobe`` Any IEEE-1284 device ID information that has been
+ acquired from the (non-IEEE 1284.3) device.
+
+``autoprobe[0-3]`` IEEE 1284 device ID information retrieved from
+ daisy-chain devices that conform to IEEE 1284.3.
+
+``spintime`` The number of microseconds to busy-loop while waiting
+ for the peripheral to respond. You might find that
+ adjusting this improves performance, depending on your
+ peripherals. This is a port-wide setting, i.e. it
+ applies to all devices on a particular port.
+
+``timeslice`` The number of milliseconds that a device driver is
+ allowed to keep a port claimed for. This is advisory,
+ and driver can ignore it if it must.
+
+``default/*`` The defaults for spintime and timeslice. When a new
+ port is registered, it picks up the default spintime.
+ When a new device is registered, it picks up the
+ default timeslice.
+======================= =======================================================
+
+Device drivers
+==============
+
+Once the parport code is initialised, you can attach device drivers to
+specific ports. Normally this happens automatically; if the lp driver
+is loaded it will create one lp device for each port found. You can
+override this, though, by using parameters either when you load the lp
+driver::
+
+ # insmod lp parport=0,2
+
+or on the LILO command line::
+
+ lp=parport0 lp=parport2
+
+Both the above examples would inform lp that you want ``/dev/lp0`` to be
+the first parallel port, and /dev/lp1 to be the **third** parallel port,
+with no lp device associated with the second port (parport1). Note
+that this is different to the way older kernels worked; there used to
+be a static association between the I/O port address and the device
+name, so ``/dev/lp0`` was always the port at 0x3bc. This is no longer the
+case - if you only have one port, it will default to being ``/dev/lp0``,
+regardless of base address.
+
+Also:
+
+ * If you selected the IEEE 1284 support at compile time, you can say
+ ``lp=auto`` on the kernel command line, and lp will create devices
+ only for those ports that seem to have printers attached.
+
+ * If you give PLIP the ``timid`` parameter, either with ``plip=timid`` on
+ the command line, or with ``insmod plip timid=1`` when using modules,
+ it will avoid any ports that seem to be in use by other devices.
+
+ * IRQ autoprobing works only for a few port types at the moment.
+
+Reporting printer problems with parport
+=======================================
+
+If you are having problems printing, please go through these steps to
+try to narrow down where the problem area is.
+
+When reporting problems with parport, really you need to give all of
+the messages that ``parport_pc`` spits out when it initialises. There are
+several code paths:
+
+- polling
+- interrupt-driven, protocol in software
+- interrupt-driven, protocol in hardware using PIO
+- interrupt-driven, protocol in hardware using DMA
+
+The kernel messages that ``parport_pc`` logs give an indication of which
+code path is being used. (They could be a lot better actually..)
+
+For normal printer protocol, having IEEE 1284 modes enabled or not
+should not make a difference.
+
+To turn off the 'protocol in hardware' code paths, disable
+``CONFIG_PARPORT_PC_FIFO``. Note that when they are enabled they are not
+necessarily **used**; it depends on whether the hardware is available,
+enabled by the BIOS, and detected by the driver.
+
+So, to start with, disable ``CONFIG_PARPORT_PC_FIFO``, and load ``parport_pc``
+with ``irq=none``. See if printing works then. It really should,
+because this is the simplest code path.
+
+If that works fine, try with ``io=0x378 irq=7`` (adjust for your
+hardware), to make it use interrupt-driven in-software protocol.
+
+If **that** works fine, then one of the hardware modes isn't working
+right. Enable ``CONFIG_FIFO`` (no, it isn't a module option,
+and yes, it should be), set the port to ECP mode in the BIOS and note
+the DMA channel, and try with::
+
+ io=0x378 irq=7 dma=none (for PIO)
+ io=0x378 irq=7 dma=3 (for DMA)
+
+----------
+
+philb@gnu.org
+tim@cyberelk.net
diff --git a/Documentation/ramoops.txt b/Documentation/admin-guide/ramoops.rst
index 26b9f31cf65a..4efd7ce77565 100644
--- a/Documentation/ramoops.txt
+++ b/Documentation/admin-guide/ramoops.rst
@@ -5,34 +5,37 @@ Sergiu Iordache <sergiu@chromium.org>
Updated: 17 November 2011
-0. Introduction
+Introduction
+------------
Ramoops is an oops/panic logger that writes its logs to RAM before the system
crashes. It works by logging oopses and panics in a circular buffer. Ramoops
needs a system with persistent RAM so that the content of that area can
survive after a restart.
-1. Ramoops concepts
+Ramoops concepts
+----------------
Ramoops uses a predefined memory area to store the dump. The start and size
and type of the memory area are set using three variables:
- * "mem_address" for the start
- * "mem_size" for the size. The memory size will be rounded down to a
- power of two.
- * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
-
-Typically the default value of mem_type=0 should be used as that sets the pstore
-mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
-pgprot_noncached, which only works on some platforms. This is because pstore
+
+ * ``mem_address`` for the start
+ * ``mem_size`` for the size. The memory size will be rounded down to a
+ power of two.
+ * ``mem_type`` to specifiy if the memory type (default is pgprot_writecombine).
+
+Typically the default value of ``mem_type=0`` should be used as that sets the pstore
+mapping to pgprot_writecombine. Setting ``mem_type=1`` attempts to use
+``pgprot_noncached``, which only works on some platforms. This is because pstore
depends on atomic operations. At least on ARM, pgprot_noncached causes the
memory to be mapped strongly ordered, and atomic operations on strongly ordered
memory are implementation defined, and won't work on many ARMs such as omaps.
-The memory area is divided into "record_size" chunks (also rounded down to
-power of two) and each oops/panic writes a "record_size" chunk of
+The memory area is divided into ``record_size`` chunks (also rounded down to
+power of two) and each oops/panic writes a ``record_size`` chunk of
information.
-Dumping both oopses and panics can be done by setting 1 in the "dump_oops"
+Dumping both oopses and panics can be done by setting 1 in the ``dump_oops``
variable while setting 0 in that variable dumps only the panics.
The module uses a counter to record multiple dumps but the counter gets reset
@@ -43,7 +46,8 @@ This might be useful when a hardware reset was used to bring the machine back
to life (i.e. a watchdog triggered). In such cases, RAM may be somewhat
corrupt, but usually it is restorable.
-2. Setting the parameters
+Setting the parameters
+----------------------
Setting the ramoops parameters can be done in several different manners:
@@ -52,12 +56,13 @@ Setting the ramoops parameters can be done in several different manners:
boot and then use the reserved memory for ramoops. For example, assuming a
machine with > 128 MB of memory, the following kernel command line will tell
the kernel to use only the first 128 MB of memory, and place ECC-protected
- ramoops region at 128 MB boundary:
- "mem=128M ramoops.mem_address=0x8000000 ramoops.ecc=1"
+ ramoops region at 128 MB boundary::
+
+ mem=128M ramoops.mem_address=0x8000000 ramoops.ecc=1
B. Use Device Tree bindings, as described in
- Documentation/device-tree/bindings/reserved-memory/ramoops.txt.
- For example:
+ ``Documentation/device-tree/bindings/reserved-memory/admin-guide/ramoops.rst``.
+ For example::
reserved-memory {
#address-cells = <2>;
@@ -75,58 +80,63 @@ Setting the ramoops parameters can be done in several different manners:
C. Use a platform device and set the platform data. The parameters can then
be set through that platform data. An example of doing that is:
-#include <linux/pstore_ram.h>
-[...]
+ .. code-block:: c
+
+ #include <linux/pstore_ram.h>
+ [...]
-static struct ramoops_platform_data ramoops_data = {
+ static struct ramoops_platform_data ramoops_data = {
.mem_size = <...>,
.mem_address = <...>,
.mem_type = <...>,
.record_size = <...>,
.dump_oops = <...>,
.ecc = <...>,
-};
+ };
-static struct platform_device ramoops_dev = {
+ static struct platform_device ramoops_dev = {
.name = "ramoops",
.dev = {
.platform_data = &ramoops_data,
},
-};
+ };
-[... inside a function ...]
-int ret;
+ [... inside a function ...]
+ int ret;
-ret = platform_device_register(&ramoops_dev);
-if (ret) {
+ ret = platform_device_register(&ramoops_dev);
+ if (ret) {
printk(KERN_ERR "unable to register platform device\n");
return ret;
-}
+ }
You can specify either RAM memory or peripheral devices' memory. However, when
specifying RAM, be sure to reserve the memory by issuing memblock_reserve()
-very early in the architecture code, e.g.:
+very early in the architecture code, e.g.::
-#include <linux/memblock.h>
+ #include <linux/memblock.h>
-memblock_reserve(ramoops_data.mem_address, ramoops_data.mem_size);
+ memblock_reserve(ramoops_data.mem_address, ramoops_data.mem_size);
-3. Dump format
+Dump format
+-----------
-The data dump begins with a header, currently defined as "====" followed by a
+The data dump begins with a header, currently defined as ``====`` followed by a
timestamp and a new line. The dump then continues with the actual data.
-4. Reading the data
+Reading the data
+----------------
The dump data can be read from the pstore filesystem. The format for these
-files is "dmesg-ramoops-N", where N is the record number in memory. To delete
+files is ``dmesg-ramoops-N``, where N is the record number in memory. To delete
a stored record from RAM, simply unlink the respective pstore file.
-5. Persistent function tracing
+Persistent function tracing
+---------------------------
Persistent function tracing might be useful for debugging software or hardware
-related hangs. The functions call chain log is stored in a "ftrace-ramoops"
-file. Here is an example of usage:
+related hangs. The functions call chain log is stored in a ``ftrace-ramoops``
+file. Here is an example of usage::
# mount -t debugfs debugfs /sys/kernel/debug/
# echo 1 > /sys/kernel/debug/pstore/record_ftrace
diff --git a/REPORTING-BUGS b/Documentation/admin-guide/reporting-bugs.rst
index 914baf9cf5fa..26b60b419652 100644
--- a/REPORTING-BUGS
+++ b/Documentation/admin-guide/reporting-bugs.rst
@@ -1,3 +1,8 @@
+.. _reportingbugs:
+
+Reporting bugs
+++++++++++++++
+
Background
==========
@@ -50,12 +55,13 @@ maintainer replies to you, make sure to 'Reply-all' in order to keep the
public mailing list(s) in the email thread.
If you know which driver is causing issues, you can pass one of the driver
-files to the get_maintainer.pl script:
+files to the get_maintainer.pl script::
+
perl scripts/get_maintainer.pl -f <filename>
If it is a security bug, please copy the Security Contact listed in the
MAINTAINERS file. They can help coordinate bugfix and disclosure. See
-Documentation/SecurityBugs for more information.
+:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>` for more information.
If you can't figure out which subsystem caused the issue, you should file
a bug in kernel.org bugzilla and send email to
@@ -69,8 +75,9 @@ Tips for reporting bugs
If you haven't reported a bug before, please read:
-http://www.chiark.greenend.org.uk/~sgtatham/bugs.html
-http://www.catb.org/esr/faqs/smart-questions.html
+ http://www.chiark.greenend.org.uk/~sgtatham/bugs.html
+
+ http://www.catb.org/esr/faqs/smart-questions.html
It's REALLY important to report bugs that seem unrelated as separate email
threads or separate bugzilla entries. If you report several unrelated
@@ -87,7 +94,7 @@ step-by-step instructions for how a user can trigger the bug.
If the failure includes an "OOPS:", take a picture of the screen, capture
a netconsole trace, or type the message from your screen into the bug
-report. Please read "Documentation/oops-tracing.txt" before posting your
+report. Please read "Documentation/admin-guide/oops-tracing.rst" before posting your
bug report. This explains what you should do with the "Oops" information
to make it useful to the recipient.
@@ -99,34 +106,34 @@ relevant to your bug, feel free to exclude it.
First run the ver_linux script included as scripts/ver_linux, which
reports the version of some important subsystems. Run this script with
-the command "sh scripts/ver_linux".
+the command ``awk -f scripts/ver_linux``.
Use that information to fill in all fields of the bug report form, and
post it to the mailing list with a subject of "PROBLEM: <one line
-summary from [1.]>" for easy identification by the developers.
-
-[1.] One line summary of the problem:
-[2.] Full description of the problem/report:
-[3.] Keywords (i.e., modules, networking, kernel):
-[4.] Kernel information
-[4.1.] Kernel version (from /proc/version):
-[4.2.] Kernel .config file:
-[5.] Most recent kernel version which did not have the bug:
-[6.] Output of Oops.. message (if applicable) with symbolic information
- resolved (see Documentation/oops-tracing.txt)
-[7.] A small shell script or example program which triggers the
- problem (if possible)
-[8.] Environment
-[8.1.] Software (add the output of the ver_linux script here)
-[8.2.] Processor information (from /proc/cpuinfo):
-[8.3.] Module information (from /proc/modules):
-[8.4.] Loaded driver and hardware information (/proc/ioports, /proc/iomem)
-[8.5.] PCI information ('lspci -vvv' as root)
-[8.6.] SCSI information (from /proc/scsi/scsi)
-[8.7.] Other information that might be relevant to the problem
- (please look in /proc and include all information that you
- think to be relevant):
-[X.] Other notes, patches, fixes, workarounds:
+summary from [1.]>" for easy identification by the developers::
+
+ [1.] One line summary of the problem:
+ [2.] Full description of the problem/report:
+ [3.] Keywords (i.e., modules, networking, kernel):
+ [4.] Kernel information
+ [4.1.] Kernel version (from /proc/version):
+ [4.2.] Kernel .config file:
+ [5.] Most recent kernel version which did not have the bug:
+ [6.] Output of Oops.. message (if applicable) with symbolic information
+ resolved (see Documentation/admin-guide/oops-tracing.rst)
+ [7.] A small shell script or example program which triggers the
+ problem (if possible)
+ [8.] Environment
+ [8.1.] Software (add the output of the ver_linux script here)
+ [8.2.] Processor information (from /proc/cpuinfo):
+ [8.3.] Module information (from /proc/modules):
+ [8.4.] Loaded driver and hardware information (/proc/ioports, /proc/iomem)
+ [8.5.] PCI information ('lspci -vvv' as root)
+ [8.6.] SCSI information (from /proc/scsi/scsi)
+ [8.7.] Other information that might be relevant to the problem
+ (please look in /proc and include all information that you
+ think to be relevant):
+ [X.] Other notes, patches, fixes, workarounds:
Follow up
@@ -153,7 +160,8 @@ Expectations for kernel maintainers
Linux kernel maintainers are busy, overworked human beings. Some times
they may not be able to address your bug in a day, a week, or two weeks.
If they don't answer your email, they may be on vacation, or at a Linux
-conference. Check the conference schedule at LWN.net for more info:
+conference. Check the conference schedule at https://LWN.net for more info:
+
https://lwn.net/Calendar/
In general, kernel maintainers take 1 to 5 business days to respond to
diff --git a/Documentation/SecurityBugs b/Documentation/admin-guide/security-bugs.rst
index 342d769834f6..4f7414cad586 100644
--- a/Documentation/SecurityBugs
+++ b/Documentation/admin-guide/security-bugs.rst
@@ -8,8 +8,8 @@ like to know when a security bug is found so that it can be fixed and
disclosed as quickly as possible. Please report security bugs to the
Linux kernel security team.
-1) Contact
-----------
+Contact
+-------
The Linux kernel security team can be contacted by email at
<security@kernel.org>. This is a private list of security officers
@@ -19,12 +19,12 @@ area maintainers to understand and fix the security vulnerability.
As it is with any bug, the more information provided the easier it
will be to diagnose and fix. Please review the procedure outlined in
-REPORTING-BUGS if you are unclear about what information is helpful.
+admin-guide/reporting-bugs.rst if you are unclear about what information is helpful.
Any exploit code is very helpful and will not be released without
consent from the reporter unless it has already been made public.
-2) Disclosure
--------------
+Disclosure
+----------
The goal of the Linux kernel security team is to work with the
bug submitter to bug resolution as well as disclosure. We prefer
@@ -39,8 +39,8 @@ disclosure is from immediate (esp. if it's already publicly known)
to a few weeks. As a basic default policy, we expect report date to
disclosure date to be on the order of 7 days.
-3) Non-disclosure agreements
-----------------------------
+Non-disclosure agreements
+-------------------------
The Linux kernel security team is not a formal body and therefore unable
to enter any non-disclosure agreements.
diff --git a/Documentation/serial-console.txt b/Documentation/admin-guide/serial-console.rst
index 9a7bc8b3f479..a8d1e36b627a 100644
--- a/Documentation/serial-console.txt
+++ b/Documentation/admin-guide/serial-console.rst
@@ -1,15 +1,21 @@
- Linux Serial Console
+.. _serial_console:
+
+Linux Serial Console
+====================
To use a serial port as console you need to compile the support into your
kernel - by default it is not compiled in. For PC style serial ports
-it's the config option next to "Standard/generic (dumb) serial support".
+it's the config option next to menu option:
+
+:menuselection:`Character devices --> Serial drivers --> 8250/16550 and compatible serial support --> Console on 8250/16550 and compatible serial port`
+
You must compile serial support into the kernel and not as a module.
It is possible to specify multiple devices for console output. You can
define a new kernel command line option to select which device(s) to
use for console output.
-The format of this option is:
+The format of this option is::
console=device,options
@@ -28,11 +34,11 @@ The format of this option is:
You can specify multiple console= options on the kernel command line.
Output will appear on all of them. The last device will be used when
-you open /dev/console. So, for example:
+you open ``/dev/console``. So, for example::
console=ttyS1,9600 console=tty0
-defines that opening /dev/console will get you the current foreground
+defines that opening ``/dev/console`` will get you the current foreground
virtual console, and kernel messages will appear on both the VGA
console and the 2nd serial port (ttyS1 or COM2) at 9600 baud.
@@ -44,61 +50,61 @@ first looks for a VGA card and then for a serial port. So if you don't
have a VGA card in your system the first serial port will automatically
become the console.
-You will need to create a new device to use /dev/console. The official
-/dev/console is now character device 5,1.
+You will need to create a new device to use ``/dev/console``. The official
+``/dev/console`` is now character device 5,1.
(You can also use a network device as a console. See
-Documentation/networking/netconsole.txt for information on that.)
+``Documentation/networking/netconsole.txt`` for information on that.)
-Here's an example that will use /dev/ttyS1 (COM2) as the console.
+Here's an example that will use ``/dev/ttyS1`` (COM2) as the console.
Replace the sample values as needed.
-1. Create /dev/console (real console) and /dev/tty0 (master virtual
- console):
+1. Create ``/dev/console`` (real console) and ``/dev/tty0`` (master virtual
+ console)::
- cd /dev
- rm -f console tty0
- mknod -m 622 console c 5 1
- mknod -m 622 tty0 c 4 0
+ cd /dev
+ rm -f console tty0
+ mknod -m 622 console c 5 1
+ mknod -m 622 tty0 c 4 0
2. LILO can also take input from a serial device. This is a very
useful option. To tell LILO to use the serial port:
- In lilo.conf (global section):
+ In lilo.conf (global section)::
- serial = 1,9600n8 (ttyS1, 9600 bd, no parity, 8 bits)
+ serial = 1,9600n8 (ttyS1, 9600 bd, no parity, 8 bits)
3. Adjust to kernel flags for the new kernel,
- again in lilo.conf (kernel section)
+ again in lilo.conf (kernel section)::
- append = "console=ttyS1,9600"
+ append = "console=ttyS1,9600"
4. Make sure a getty runs on the serial port so that you can login to
it once the system is done booting. This is done by adding a line
- like this to /etc/inittab (exact syntax depends on your getty):
+ like this to ``/etc/inittab`` (exact syntax depends on your getty)::
- S1:23:respawn:/sbin/getty -L ttyS1 9600 vt100
+ S1:23:respawn:/sbin/getty -L ttyS1 9600 vt100
-5. Init and /etc/ioctl.save
+5. Init and ``/etc/ioctl.save``
- Sysvinit remembers its stty settings in a file in /etc, called
- `/etc/ioctl.save'. REMOVE THIS FILE before using the serial
+ Sysvinit remembers its stty settings in a file in ``/etc``, called
+ ``/etc/ioctl.save``. REMOVE THIS FILE before using the serial
console for the first time, because otherwise init will probably
set the baudrate to 38400 (baudrate of the virtual console).
-6. /dev/console and X
+6. ``/dev/console`` and X
Programs that want to do something with the virtual console usually
- open /dev/console. If you have created the new /dev/console device,
+ open ``/dev/console``. If you have created the new ``/dev/console`` device,
and your console is NOT the virtual console some programs will fail.
Those are programs that want to access the VT interface, and use
- /dev/console instead of /dev/tty0. Some of those programs are:
+ ``/dev/console instead of /dev/tty0``. Some of those programs are::
- Xfree86, svgalib, gpm, SVGATextMode
+ Xfree86, svgalib, gpm, SVGATextMode
It should be fixed in modern versions of these programs though.
- Note that if you boot without a console= option (or with
- console=/dev/tty0), /dev/console is the same as /dev/tty0. In that
- case everything will still work.
+ Note that if you boot without a ``console=`` option (or with
+ ``console=/dev/tty0``), ``/dev/console`` is the same as ``/dev/tty0``.
+ In that case everything will still work.
7. Thanks
diff --git a/Documentation/admin-guide/sysfs-rules.rst b/Documentation/admin-guide/sysfs-rules.rst
new file mode 100644
index 000000000000..abad33526aca
--- /dev/null
+++ b/Documentation/admin-guide/sysfs-rules.rst
@@ -0,0 +1,192 @@
+Rules on how to access information in sysfs
+===========================================
+
+The kernel-exported sysfs exports internal kernel implementation details
+and depends on internal kernel structures and layout. It is agreed upon
+by the kernel developers that the Linux kernel does not provide a stable
+internal API. Therefore, there are aspects of the sysfs interface that
+may not be stable across kernel releases.
+
+To minimize the risk of breaking users of sysfs, which are in most cases
+low-level userspace applications, with a new kernel release, the users
+of sysfs must follow some rules to use an as-abstract-as-possible way to
+access this filesystem. The current udev and HAL programs already
+implement this and users are encouraged to plug, if possible, into the
+abstractions these programs provide instead of accessing sysfs directly.
+
+But if you really do want or need to access sysfs directly, please follow
+the following rules and then your programs should work with future
+versions of the sysfs interface.
+
+- Do not use libsysfs
+ It makes assumptions about sysfs which are not true. Its API does not
+ offer any abstraction, it exposes all the kernel driver-core
+ implementation details in its own API. Therefore it is not better than
+ reading directories and opening the files yourself.
+ Also, it is not actively maintained, in the sense of reflecting the
+ current kernel development. The goal of providing a stable interface
+ to sysfs has failed; it causes more problems than it solves. It
+ violates many of the rules in this document.
+
+- sysfs is always at ``/sys``
+ Parsing ``/proc/mounts`` is a waste of time. Other mount points are a
+ system configuration bug you should not try to solve. For test cases,
+ possibly support a ``SYSFS_PATH`` environment variable to overwrite the
+ application's behavior, but never try to search for sysfs. Never try
+ to mount it, if you are not an early boot script.
+
+- devices are only "devices"
+ There is no such thing like class-, bus-, physical devices,
+ interfaces, and such that you can rely on in userspace. Everything is
+ just simply a "device". Class-, bus-, physical, ... types are just
+ kernel implementation details which should not be expected by
+ applications that look for devices in sysfs.
+
+ The properties of a device are:
+
+ - devpath (``/devices/pci0000:00/0000:00:1d.1/usb2/2-2/2-2:1.0``)
+
+ - identical to the DEVPATH value in the event sent from the kernel
+ at device creation and removal
+ - the unique key to the device at that point in time
+ - the kernel's path to the device directory without the leading
+ ``/sys``, and always starting with a slash
+ - all elements of a devpath must be real directories. Symlinks
+ pointing to /sys/devices must always be resolved to their real
+ target and the target path must be used to access the device.
+ That way the devpath to the device matches the devpath of the
+ kernel used at event time.
+ - using or exposing symlink values as elements in a devpath string
+ is a bug in the application
+
+ - kernel name (``sda``, ``tty``, ``0000:00:1f.2``, ...)
+
+ - a directory name, identical to the last element of the devpath
+ - applications need to handle spaces and characters like ``!`` in
+ the name
+
+ - subsystem (``block``, ``tty``, ``pci``, ...)
+
+ - simple string, never a path or a link
+ - retrieved by reading the "subsystem"-link and using only the
+ last element of the target path
+
+ - driver (``tg3``, ``ata_piix``, ``uhci_hcd``)
+
+ - a simple string, which may contain spaces, never a path or a
+ link
+ - it is retrieved by reading the "driver"-link and using only the
+ last element of the target path
+ - devices which do not have "driver"-link just do not have a
+ driver; copying the driver value in a child device context is a
+ bug in the application
+
+ - attributes
+
+ - the files in the device directory or files below subdirectories
+ of the same device directory
+ - accessing attributes reached by a symlink pointing to another device,
+ like the "device"-link, is a bug in the application
+
+ Everything else is just a kernel driver-core implementation detail
+ that should not be assumed to be stable across kernel releases.
+
+- Properties of parent devices never belong into a child device.
+ Always look at the parent devices themselves for determining device
+ context properties. If the device ``eth0`` or ``sda`` does not have a
+ "driver"-link, then this device does not have a driver. Its value is empty.
+ Never copy any property of the parent-device into a child-device. Parent
+ device properties may change dynamically without any notice to the
+ child device.
+
+- Hierarchy in a single device tree
+ There is only one valid place in sysfs where hierarchy can be examined
+ and this is below: ``/sys/devices.``
+ It is planned that all device directories will end up in the tree
+ below this directory.
+
+- Classification by subsystem
+ There are currently three places for classification of devices:
+ ``/sys/block,`` ``/sys/class`` and ``/sys/bus.`` It is planned that these will
+ not contain any device directories themselves, but only flat lists of
+ symlinks pointing to the unified ``/sys/devices`` tree.
+ All three places have completely different rules on how to access
+ device information. It is planned to merge all three
+ classification directories into one place at ``/sys/subsystem``,
+ following the layout of the bus directories. All buses and
+ classes, including the converted block subsystem, will show up
+ there.
+ The devices belonging to a subsystem will create a symlink in the
+ "devices" directory at ``/sys/subsystem/<name>/devices``,
+
+ If ``/sys/subsystem`` exists, ``/sys/bus``, ``/sys/class`` and ``/sys/block``
+ can be ignored. If it does not exist, you always have to scan all three
+ places, as the kernel is free to move a subsystem from one place to
+ the other, as long as the devices are still reachable by the same
+ subsystem name.
+
+ Assuming ``/sys/class/<subsystem>`` and ``/sys/bus/<subsystem>``, or
+ ``/sys/block`` and ``/sys/class/block`` are not interchangeable is a bug in
+ the application.
+
+- Block
+ The converted block subsystem at ``/sys/class/block`` or
+ ``/sys/subsystem/block`` will contain the links for disks and partitions
+ at the same level, never in a hierarchy. Assuming the block subsystem to
+ contain only disks and not partition devices in the same flat list is
+ a bug in the application.
+
+- "device"-link and <subsystem>:<kernel name>-links
+ Never depend on the "device"-link. The "device"-link is a workaround
+ for the old layout, where class devices are not created in
+ ``/sys/devices/`` like the bus devices. If the link-resolving of a
+ device directory does not end in ``/sys/devices/``, you can use the
+ "device"-link to find the parent devices in ``/sys/devices/``, That is the
+ single valid use of the "device"-link; it must never appear in any
+ path as an element. Assuming the existence of the "device"-link for
+ a device in ``/sys/devices/`` is a bug in the application.
+ Accessing ``/sys/class/net/eth0/device`` is a bug in the application.
+
+ Never depend on the class-specific links back to the ``/sys/class``
+ directory. These links are also a workaround for the design mistake
+ that class devices are not created in ``/sys/devices.`` If a device
+ directory does not contain directories for child devices, these links
+ may be used to find the child devices in ``/sys/class.`` That is the single
+ valid use of these links; they must never appear in any path as an
+ element. Assuming the existence of these links for devices which are
+ real child device directories in the ``/sys/devices`` tree is a bug in
+ the application.
+
+ It is planned to remove all these links when all class device
+ directories live in ``/sys/devices.``
+
+- Position of devices along device chain can change.
+ Never depend on a specific parent device position in the devpath,
+ or the chain of parent devices. The kernel is free to insert devices into
+ the chain. You must always request the parent device you are looking for
+ by its subsystem value. You need to walk up the chain until you find
+ the device that matches the expected subsystem. Depending on a specific
+ position of a parent device or exposing relative paths using ``../`` to
+ access the chain of parents is a bug in the application.
+
+- When reading and writing sysfs device attribute files, avoid dependency
+ on specific error codes wherever possible. This minimizes coupling to
+ the error handling implementation within the kernel.
+
+ In general, failures to read or write sysfs device attributes shall
+ propagate errors wherever possible. Common errors include, but are not
+ limited to:
+
+ ``-EIO``: The read or store operation is not supported, typically
+ returned by the sysfs system itself if the read or store pointer
+ is ``NULL``.
+
+ ``-ENXIO``: The read or store operation failed
+
+ Error codes will not be changed without good reason, and should a change
+ to error codes result in user-space breakage, it will be fixed, or the
+ the offending change will be reverted.
+
+ Userspace applications can, however, expect the format and contents of
+ the attribute files to remain consistent in the absence of a version
+ attribute change in the context of a given attribute.
diff --git a/Documentation/admin-guide/sysrq.rst b/Documentation/admin-guide/sysrq.rst
new file mode 100644
index 000000000000..d1712ea2d314
--- /dev/null
+++ b/Documentation/admin-guide/sysrq.rst
@@ -0,0 +1,289 @@
+Linux Magic System Request Key Hacks
+====================================
+
+Documentation for sysrq.c
+
+What is the magic SysRq key?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is a 'magical' key combo you can hit which the kernel will respond to
+regardless of whatever else it is doing, unless it is completely locked up.
+
+How do I enable the magic SysRq key?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You need to say "yes" to 'Magic SysRq key (CONFIG_MAGIC_SYSRQ)' when
+configuring the kernel. When running a kernel with SysRq compiled in,
+/proc/sys/kernel/sysrq controls the functions allowed to be invoked via
+the SysRq key. The default value in this file is set by the
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE config symbol, which itself defaults
+to 1. Here is the list of possible values in /proc/sys/kernel/sysrq:
+
+ - 0 - disable sysrq completely
+ - 1 - enable all functions of sysrq
+ - >1 - bitmask of allowed sysrq functions (see below for detailed function
+ description)::
+
+ 2 = 0x2 - enable control of console logging level
+ 4 = 0x4 - enable control of keyboard (SAK, unraw)
+ 8 = 0x8 - enable debugging dumps of processes etc.
+ 16 = 0x10 - enable sync command
+ 32 = 0x20 - enable remount read-only
+ 64 = 0x40 - enable signalling of processes (term, kill, oom-kill)
+ 128 = 0x80 - allow reboot/poweroff
+ 256 = 0x100 - allow nicing of all RT tasks
+
+You can set the value in the file by the following command::
+
+ echo "number" >/proc/sys/kernel/sysrq
+
+The number may be written here either as decimal or as hexadecimal
+with the 0x prefix. CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE must always be
+written in hexadecimal.
+
+Note that the value of ``/proc/sys/kernel/sysrq`` influences only the invocation
+via a keyboard. Invocation of any operation via ``/proc/sysrq-trigger`` is
+always allowed (by a user with admin privileges).
+
+How do I use the magic SysRq key?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+On x86 - You press the key combo :kbd:`ALT-SysRq-<command key>`.
+
+.. note::
+ Some
+ keyboards may not have a key labeled 'SysRq'. The 'SysRq' key is
+ also known as the 'Print Screen' key. Also some keyboards cannot
+ handle so many keys being pressed at the same time, so you might
+ have better luck with press :kbd:`Alt`, press :kbd:`SysRq`,
+ release :kbd:`SysRq`, press :kbd:`<command key>`, release everything.
+
+On SPARC - You press :kbd:`ALT-STOP-<command key>`, I believe.
+
+On the serial console (PC style standard serial ports only)
+ You send a ``BREAK``, then within 5 seconds a command key. Sending
+ ``BREAK`` twice is interpreted as a normal BREAK.
+
+On PowerPC
+ Press :kbd:`ALT - Print Screen` (or :kbd:`F13`) - :kbd:`<command key>`,
+ :kbd:`Print Screen` (or :kbd:`F13`) - :kbd:`<command key>` may suffice.
+
+On other
+ If you know of the key combos for other architectures, please
+ let me know so I can add them to this section.
+
+On all
+ write a character to /proc/sysrq-trigger. e.g.::
+
+ echo t > /proc/sysrq-trigger
+
+What are the 'command' keys?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+=========== ===================================================================
+Command Function
+=========== ===================================================================
+``b`` Will immediately reboot the system without syncing or unmounting
+ your disks.
+
+``c`` Will perform a system crash by a NULL pointer dereference.
+ A crashdump will be taken if configured.
+
+``d`` Shows all locks that are held.
+
+``e`` Send a SIGTERM to all processes, except for init.
+
+``f`` Will call the oom killer to kill a memory hog process, but do not
+ panic if nothing can be killed.
+
+``g`` Used by kgdb (kernel debugger)
+
+``h`` Will display help (actually any other key than those listed
+ here will display help. but ``h`` is easy to remember :-)
+
+``i`` Send a SIGKILL to all processes, except for init.
+
+``j`` Forcibly "Just thaw it" - filesystems frozen by the FIFREEZE ioctl.
+
+``k`` Secure Access Key (SAK) Kills all programs on the current virtual
+ console. NOTE: See important comments below in SAK section.
+
+``l`` Shows a stack backtrace for all active CPUs.
+
+``m`` Will dump current memory info to your console.
+
+``n`` Used to make RT tasks nice-able
+
+``o`` Will shut your system off (if configured and supported).
+
+``p`` Will dump the current registers and flags to your console.
+
+``q`` Will dump per CPU lists of all armed hrtimers (but NOT regular
+ timer_list timers) and detailed information about all
+ clockevent devices.
+
+``r`` Turns off keyboard raw mode and sets it to XLATE.
+
+``s`` Will attempt to sync all mounted filesystems.
+
+``t`` Will dump a list of current tasks and their information to your
+ console.
+
+``u`` Will attempt to remount all mounted filesystems read-only.
+
+``v`` Forcefully restores framebuffer console
+``v`` Causes ETM buffer dump [ARM-specific]
+
+``w`` Dumps tasks that are in uninterruptable (blocked) state.
+
+``x`` Used by xmon interface on ppc/powerpc platforms.
+ Show global PMU Registers on sparc64.
+ Dump all TLB entries on MIPS.
+
+``y`` Show global CPU Registers [SPARC-64 specific]
+
+``z`` Dump the ftrace buffer
+
+``0``-``9`` Sets the console log level, controlling which kernel messages
+ will be printed to your console. (``0``, for example would make
+ it so that only emergency messages like PANICs or OOPSes would
+ make it to your console.)
+=========== ===================================================================
+
+Okay, so what can I use them for?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Well, unraw(r) is very handy when your X server or a svgalib program crashes.
+
+sak(k) (Secure Access Key) is useful when you want to be sure there is no
+trojan program running at console which could grab your password
+when you would try to login. It will kill all programs on given console,
+thus letting you make sure that the login prompt you see is actually
+the one from init, not some trojan program.
+
+.. important::
+
+ In its true form it is not a true SAK like the one in a
+ c2 compliant system, and it should not be mistaken as
+ such.
+
+It seems others find it useful as (System Attention Key) which is
+useful when you want to exit a program that will not let you switch consoles.
+(For example, X or a svgalib program.)
+
+``reboot(b)`` is good when you're unable to shut down. But you should also
+``sync(s)`` and ``umount(u)`` first.
+
+``crash(c)`` can be used to manually trigger a crashdump when the system is hung.
+Note that this just triggers a crash if there is no dump mechanism available.
+
+``sync(s)`` is great when your system is locked up, it allows you to sync your
+disks and will certainly lessen the chance of data loss and fscking. Note
+that the sync hasn't taken place until you see the "OK" and "Done" appear
+on the screen. (If the kernel is really in strife, you may not ever get the
+OK or Done message...)
+
+``umount(u)`` is basically useful in the same ways as ``sync(s)``. I generally
+``sync(s)``, ``umount(u)``, then ``reboot(b)`` when my system locks. It's saved
+me many a fsck. Again, the unmount (remount read-only) hasn't taken place until
+you see the "OK" and "Done" message appear on the screen.
+
+The loglevels ``0``-``9`` are useful when your console is being flooded with
+kernel messages you do not want to see. Selecting ``0`` will prevent all but
+the most urgent kernel messages from reaching your console. (They will
+still be logged if syslogd/klogd are alive, though.)
+
+``term(e)`` and ``kill(i)`` are useful if you have some sort of runaway process
+you are unable to kill any other way, especially if it's spawning other
+processes.
+
+"just thaw ``it(j)``" is useful if your system becomes unresponsive due to a
+frozen (probably root) filesystem via the FIFREEZE ioctl.
+
+Sometimes SysRq seems to get 'stuck' after using it, what can I do?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+That happens to me, also. I've found that tapping shift, alt, and control
+on both sides of the keyboard, and hitting an invalid sysrq sequence again
+will fix the problem. (i.e., something like :kbd:`alt-sysrq-z`). Switching to
+another virtual console (:kbd:`ALT+Fn`) and then back again should also help.
+
+I hit SysRq, but nothing seems to happen, what's wrong?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are some keyboards that produce a different keycode for SysRq than the
+pre-defined value of 99 (see ``KEY_SYSRQ`` in ``include/linux/input.h``), or
+which don't have a SysRq key at all. In these cases, run ``showkey -s`` to find
+an appropriate scancode sequence, and use ``setkeycodes <sequence> 99`` to map
+this sequence to the usual SysRq code (e.g., ``setkeycodes e05b 99``). It's
+probably best to put this command in a boot script. Oh, and by the way, you
+exit ``showkey`` by not typing anything for ten seconds.
+
+I want to add SysRQ key events to a module, how does it work?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to register a basic function with the table, you must first include
+the header ``include/linux/sysrq.h``, this will define everything else you need.
+Next, you must create a ``sysrq_key_op`` struct, and populate it with A) the key
+handler function you will use, B) a help_msg string, that will print when SysRQ
+prints help, and C) an action_msg string, that will print right before your
+handler is called. Your handler must conform to the prototype in 'sysrq.h'.
+
+After the ``sysrq_key_op`` is created, you can call the kernel function
+``register_sysrq_key(int key, struct sysrq_key_op *op_p);`` this will
+register the operation pointed to by ``op_p`` at table key 'key',
+if that slot in the table is blank. At module unload time, you must call
+the function ``unregister_sysrq_key(int key, struct sysrq_key_op *op_p)``, which
+will remove the key op pointed to by 'op_p' from the key 'key', if and only if
+it is currently registered in that slot. This is in case the slot has been
+overwritten since you registered it.
+
+The Magic SysRQ system works by registering key operations against a key op
+lookup table, which is defined in 'drivers/tty/sysrq.c'. This key table has
+a number of operations registered into it at compile time, but is mutable,
+and 2 functions are exported for interface to it::
+
+ register_sysrq_key and unregister_sysrq_key.
+
+Of course, never ever leave an invalid pointer in the table. I.e., when
+your module that called register_sysrq_key() exits, it must call
+unregister_sysrq_key() to clean up the sysrq key table entry that it used.
+Null pointers in the table are always safe. :)
+
+If for some reason you feel the need to call the handle_sysrq function from
+within a function called by handle_sysrq, you must be aware that you are in
+a lock (you are also in an interrupt handler, which means don't sleep!), so
+you must call ``__handle_sysrq_nolock`` instead.
+
+When I hit a SysRq key combination only the header appears on the console?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sysrq output is subject to the same console loglevel control as all
+other console output. This means that if the kernel was booted 'quiet'
+as is common on distro kernels the output may not appear on the actual
+console, even though it will appear in the dmesg buffer, and be accessible
+via the dmesg command and to the consumers of ``/proc/kmsg``. As a specific
+exception the header line from the sysrq command is passed to all console
+consumers as if the current loglevel was maximum. If only the header
+is emitted it is almost certain that the kernel loglevel is too low.
+Should you require the output on the console channel then you will need
+to temporarily up the console loglevel using :kbd:`alt-sysrq-8` or::
+
+ echo 8 > /proc/sysrq-trigger
+
+Remember to return the loglevel to normal after triggering the sysrq
+command you are interested in.
+
+I have more questions, who can I ask?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Just ask them on the linux-kernel mailing list:
+ linux-kernel@vger.kernel.org
+
+Credits
+~~~~~~~
+
+Written by Mydraal <vulpyne@vulpyne.net>
+Updated by Adam Sulmicki <adam@cfar.umd.edu>
+Updated by Jeremy M. Dolan <jmd@turbogeek.org> 2001/01/28 10:15:59
+Added to by Crutcher Dunnavant <crutcher+kernel@datastacks.com>
diff --git a/Documentation/admin-guide/tainted-kernels.rst b/Documentation/admin-guide/tainted-kernels.rst
new file mode 100644
index 000000000000..1df03b5cb02f
--- /dev/null
+++ b/Documentation/admin-guide/tainted-kernels.rst
@@ -0,0 +1,59 @@
+Tainted kernels
+---------------
+
+Some oops reports contain the string **'Tainted: '** after the program
+counter. This indicates that the kernel has been tainted by some
+mechanism. The string is followed by a series of position-sensitive
+characters, each representing a particular tainted value.
+
+ 1) 'G' if all modules loaded have a GPL or compatible license, 'P' if
+ any proprietary module has been loaded. Modules without a
+ MODULE_LICENSE or with a MODULE_LICENSE that is not recognised by
+ insmod as GPL compatible are assumed to be proprietary.
+
+ 2) ``F`` if any module was force loaded by ``insmod -f``, ``' '`` if all
+ modules were loaded normally.
+
+ 3) ``S`` if the oops occurred on an SMP kernel running on hardware that
+ hasn't been certified as safe to run multiprocessor.
+ Currently this occurs only on various Athlons that are not
+ SMP capable.
+
+ 4) ``R`` if a module was force unloaded by ``rmmod -f``, ``' '`` if all
+ modules were unloaded normally.
+
+ 5) ``M`` if any processor has reported a Machine Check Exception,
+ ``' '`` if no Machine Check Exceptions have occurred.
+
+ 6) ``B`` if a page-release function has found a bad page reference or
+ some unexpected page flags.
+
+ 7) ``U`` if a user or user application specifically requested that the
+ Tainted flag be set, ``' '`` otherwise.
+
+ 8) ``D`` if the kernel has died recently, i.e. there was an OOPS or BUG.
+
+ 9) ``A`` if the ACPI table has been overridden.
+
+ 10) ``W`` if a warning has previously been issued by the kernel.
+ (Though some warnings may set more specific taint flags.)
+
+ 11) ``C`` if a staging driver has been loaded.
+
+ 12) ``I`` if the kernel is working around a severe bug in the platform
+ firmware (BIOS or similar).
+
+ 13) ``O`` if an externally-built ("out-of-tree") module has been loaded.
+
+ 14) ``E`` if an unsigned module has been loaded in a kernel supporting
+ module signature.
+
+ 15) ``L`` if a soft lockup has previously occurred on the system.
+
+ 16) ``K`` if the kernel has been live patched.
+
+The primary reason for the **'Tainted: '** string is to tell kernel
+debuggers if this is a clean kernel or if anything unusual has
+occurred. Tainting is permanent: even if an offending module is
+unloaded, the tainted value remains to indicate that the kernel is not
+trustworthy.
diff --git a/Documentation/unicode.txt b/Documentation/admin-guide/unicode.rst
index 4a33f81cadb1..7425a3351321 100644
--- a/Documentation/unicode.txt
+++ b/Documentation/admin-guide/unicode.rst
@@ -1,12 +1,16 @@
+Unicode support
+===============
+
Last update: 2005-01-17, version 1.4
This file is maintained by H. Peter Anvin <unicode@lanana.org> as part
of the Linux Assigned Names And Numbers Authority (LANANA) project.
The current version can be found at:
- http://www.lanana.org/docs/unicode/unicode.txt
+ http://www.lanana.org/docs/unicode/admin-guide/unicode.rst
- ------------------------
+Introduction
+------------
The Linux kernel code has been rewritten to use Unicode to map
characters to fonts. By downloading a single Unicode-to-font table,
@@ -16,12 +20,14 @@ the font as indicated.
This changes the semantics of the eight-bit character tables subtly.
The four character tables are now:
+=============== =============================== ================
Map symbol Map name Escape code (G0)
-
+=============== =============================== ================
LAT1_MAP Latin-1 (ISO 8859-1) ESC ( B
GRAF_MAP DEC VT100 pseudographics ESC ( 0
IBMPC_MAP IBM code page 437 ESC ( U
USER_MAP User defined ESC ( K
+=============== =============================== ================
In particular, ESC ( U is no longer "straight to font", since the font
might be completely different than the IBM character set. This
@@ -55,10 +61,12 @@ In addition, the following characters not present in Unicode 1.1.4
have been defined; these are used by the DEC VT graphics map. [v1.2]
THIS USE IS OBSOLETE AND SHOULD NO LONGER BE USED; PLEASE SEE BELOW.
+====== ======================================
U+F800 DEC VT GRAPHICS HORIZONTAL LINE SCAN 1
U+F801 DEC VT GRAPHICS HORIZONTAL LINE SCAN 3
U+F803 DEC VT GRAPHICS HORIZONTAL LINE SCAN 7
U+F804 DEC VT GRAPHICS HORIZONTAL LINE SCAN 9
+====== ======================================
The DEC VT220 uses a 6x10 character matrix, and these characters form
a smooth progression in the DEC VT graphics character set. I have
@@ -74,10 +82,12 @@ keyboard symbols that are unlikely to ever be added to Unicode proper
since they are horribly vendor-specific. This, of course, is an
excellent example of horrible design.
+====== ======================================
U+F810 KEYBOARD SYMBOL FLYING FLAG
U+F811 KEYBOARD SYMBOL PULLDOWN MENU
U+F812 KEYBOARD SYMBOL OPEN APPLE
U+F813 KEYBOARD SYMBOL SOLID APPLE
+====== ======================================
Klingon language support
------------------------
@@ -99,8 +109,10 @@ of the dingbats/symbols/forms type and this is a language, I have
located it at the end, on a 16-cell boundary in keeping with standard
Unicode practice.
-NOTE: This range is now officially managed by the ConScript Unicode
-Registry. The normative reference is at:
+.. note::
+
+ This range is now officially managed by the ConScript Unicode
+ Registry. The normative reference is at:
http://www.evertype.com/standards/csur/klingon.html
@@ -112,6 +124,7 @@ However, since the set of symbols appear to be consistent throughout,
with only the actual shapes being different, in keeping with standard
Unicode practice these differences are considered font variants.
+====== =======================================================
U+F8D0 KLINGON LETTER A
U+F8D1 KLINGON LETTER B
U+F8D2 KLINGON LETTER CH
@@ -155,6 +168,7 @@ U+F8F9 KLINGON DIGIT NINE
U+F8FD KLINGON COMMA
U+F8FE KLINGON FULL STOP
U+F8FF KLINGON SYMBOL FOR EMPIRE
+====== =======================================================
Other Fictional and Artificial Scripts
--------------------------------------
diff --git a/Documentation/admin-guide/vga-softcursor.rst b/Documentation/admin-guide/vga-softcursor.rst
new file mode 100644
index 000000000000..f52175457e60
--- /dev/null
+++ b/Documentation/admin-guide/vga-softcursor.rst
@@ -0,0 +1,62 @@
+Software cursor for VGA
+=======================
+
+by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz>
+and Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+
+Linux now has some ability to manipulate cursor appearance. Normally,
+you can set the size of hardware cursor. You can now play a few new
+tricks: you can make your cursor look like a non-blinking red block,
+make it inverse background of the character it's over or to highlight
+that character and still choose whether the original hardware cursor
+should remain visible or not. There may be other things I have never
+thought of.
+
+The cursor appearance is controlled by a ``<ESC>[?1;2;3c`` escape sequence
+where 1, 2 and 3 are parameters described below. If you omit any of them,
+they will default to zeroes.
+
+first Parameter
+ specifies cursor size::
+
+ 0=default
+ 1=invisible
+ 2=underline,
+ ...
+ 8=full block
+ + 16 if you want the software cursor to be applied
+ + 32 if you want to always change the background color
+ + 64 if you dislike having the background the same as the
+ foreground.
+
+ Highlights are ignored for the last two flags.
+
+second parameter
+ selects character attribute bits you want to change
+ (by simply XORing them with the value of this parameter). On standard
+ VGA, the high four bits specify background and the low four the
+ foreground. In both groups, low three bits set color (as in normal
+ color codes used by the console) and the most significant one turns
+ on highlight (or sometimes blinking -- it depends on the configuration
+ of your VGA).
+
+third parameter
+ consists of character attribute bits you want to set.
+
+ Bit setting takes place before bit toggling, so you can simply clear a
+ bit by including it in both the set mask and the toggle mask.
+
+Examples
+--------
+
+To get normal blinking underline, use::
+
+ echo -e '\033[?2c'
+
+To get blinking block, use::
+
+ echo -e '\033[?6c'
+
+To get red non-blinking block, use::
+
+ echo -e '\033[?17;0;64c'
diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting
index 83c1df2fc758..259f00af3ab3 100644
--- a/Documentation/arm/Booting
+++ b/Documentation/arm/Booting
@@ -51,7 +51,7 @@ As an alternative, the boot loader can pass the relevant 'console='
option to the kernel via the tagged lists specifying the port, and
serial format options as described in
- Documentation/kernel-parameters.txt.
+ Documentation/admin-guide/kernel-parameters.rst.
3. Detect the machine type
diff --git a/Documentation/assoc_array.txt b/Documentation/assoc_array.txt
deleted file mode 100644
index 2f2c6cdd73c0..000000000000
--- a/Documentation/assoc_array.txt
+++ /dev/null
@@ -1,574 +0,0 @@
- ========================================
- GENERIC ASSOCIATIVE ARRAY IMPLEMENTATION
- ========================================
-
-Contents:
-
- - Overview.
-
- - The public API.
- - Edit script.
- - Operations table.
- - Manipulation functions.
- - Access functions.
- - Index key form.
-
- - Internal workings.
- - Basic internal tree layout.
- - Shortcuts.
- - Splitting and collapsing nodes.
- - Non-recursive iteration.
- - Simultaneous alteration and iteration.
-
-
-========
-OVERVIEW
-========
-
-This associative array implementation is an object container with the following
-properties:
-
- (1) Objects are opaque pointers. The implementation does not care where they
- point (if anywhere) or what they point to (if anything).
-
- [!] NOTE: Pointers to objects _must_ be zero in the least significant bit.
-
- (2) Objects do not need to contain linkage blocks for use by the array. This
- permits an object to be located in multiple arrays simultaneously.
- Rather, the array is made up of metadata blocks that point to objects.
-
- (3) Objects require index keys to locate them within the array.
-
- (4) Index keys must be unique. Inserting an object with the same key as one
- already in the array will replace the old object.
-
- (5) Index keys can be of any length and can be of different lengths.
-
- (6) Index keys should encode the length early on, before any variation due to
- length is seen.
-
- (7) Index keys can include a hash to scatter objects throughout the array.
-
- (8) The array can iterated over. The objects will not necessarily come out in
- key order.
-
- (9) The array can be iterated over whilst it is being modified, provided the
- RCU readlock is being held by the iterator. Note, however, under these
- circumstances, some objects may be seen more than once. If this is a
- problem, the iterator should lock against modification. Objects will not
- be missed, however, unless deleted.
-
-(10) Objects in the array can be looked up by means of their index key.
-
-(11) Objects can be looked up whilst the array is being modified, provided the
- RCU readlock is being held by the thread doing the look up.
-
-The implementation uses a tree of 16-pointer nodes internally that are indexed
-on each level by nibbles from the index key in the same manner as in a radix
-tree. To improve memory efficiency, shortcuts can be emplaced to skip over
-what would otherwise be a series of single-occupancy nodes. Further, nodes
-pack leaf object pointers into spare space in the node rather than making an
-extra branch until as such time an object needs to be added to a full node.
-
-
-==============
-THE PUBLIC API
-==============
-
-The public API can be found in <linux/assoc_array.h>. The associative array is
-rooted on the following structure:
-
- struct assoc_array {
- ...
- };
-
-The code is selected by enabling CONFIG_ASSOCIATIVE_ARRAY.
-
-
-EDIT SCRIPT
------------
-
-The insertion and deletion functions produce an 'edit script' that can later be
-applied to effect the changes without risking ENOMEM. This retains the
-preallocated metadata blocks that will be installed in the internal tree and
-keeps track of the metadata blocks that will be removed from the tree when the
-script is applied.
-
-This is also used to keep track of dead blocks and dead objects after the
-script has been applied so that they can be freed later. The freeing is done
-after an RCU grace period has passed - thus allowing access functions to
-proceed under the RCU read lock.
-
-The script appears as outside of the API as a pointer of the type:
-
- struct assoc_array_edit;
-
-There are two functions for dealing with the script:
-
- (1) Apply an edit script.
-
- void assoc_array_apply_edit(struct assoc_array_edit *edit);
-
- This will perform the edit functions, interpolating various write barriers
- to permit accesses under the RCU read lock to continue. The edit script
- will then be passed to call_rcu() to free it and any dead stuff it points
- to.
-
- (2) Cancel an edit script.
-
- void assoc_array_cancel_edit(struct assoc_array_edit *edit);
-
- This frees the edit script and all preallocated memory immediately. If
- this was for insertion, the new object is _not_ released by this function,
- but must rather be released by the caller.
-
-These functions are guaranteed not to fail.
-
-
-OPERATIONS TABLE
-----------------
-
-Various functions take a table of operations:
-
- struct assoc_array_ops {
- ...
- };
-
-This points to a number of methods, all of which need to be provided:
-
- (1) Get a chunk of index key from caller data:
-
- unsigned long (*get_key_chunk)(const void *index_key, int level);
-
- This should return a chunk of caller-supplied index key starting at the
- *bit* position given by the level argument. The level argument will be a
- multiple of ASSOC_ARRAY_KEY_CHUNK_SIZE and the function should return
- ASSOC_ARRAY_KEY_CHUNK_SIZE bits. No error is possible.
-
-
- (2) Get a chunk of an object's index key.
-
- unsigned long (*get_object_key_chunk)(const void *object, int level);
-
- As the previous function, but gets its data from an object in the array
- rather than from a caller-supplied index key.
-
-
- (3) See if this is the object we're looking for.
-
- bool (*compare_object)(const void *object, const void *index_key);
-
- Compare the object against an index key and return true if it matches and
- false if it doesn't.
-
-
- (4) Diff the index keys of two objects.
-
- int (*diff_objects)(const void *object, const void *index_key);
-
- Return the bit position at which the index key of the specified object
- differs from the given index key or -1 if they are the same.
-
-
- (5) Free an object.
-
- void (*free_object)(void *object);
-
- Free the specified object. Note that this may be called an RCU grace
- period after assoc_array_apply_edit() was called, so synchronize_rcu() may
- be necessary on module unloading.
-
-
-MANIPULATION FUNCTIONS
-----------------------
-
-There are a number of functions for manipulating an associative array:
-
- (1) Initialise an associative array.
-
- void assoc_array_init(struct assoc_array *array);
-
- This initialises the base structure for an associative array. It can't
- fail.
-
-
- (2) Insert/replace an object in an associative array.
-
- struct assoc_array_edit *
- assoc_array_insert(struct assoc_array *array,
- const struct assoc_array_ops *ops,
- const void *index_key,
- void *object);
-
- This inserts the given object into the array. Note that the least
- significant bit of the pointer must be zero as it's used to type-mark
- pointers internally.
-
- If an object already exists for that key then it will be replaced with the
- new object and the old one will be freed automatically.
-
- The index_key argument should hold index key information and is
- passed to the methods in the ops table when they are called.
-
- This function makes no alteration to the array itself, but rather returns
- an edit script that must be applied. -ENOMEM is returned in the case of
- an out-of-memory error.
-
- The caller should lock exclusively against other modifiers of the array.
-
-
- (3) Delete an object from an associative array.
-
- struct assoc_array_edit *
- assoc_array_delete(struct assoc_array *array,
- const struct assoc_array_ops *ops,
- const void *index_key);
-
- This deletes an object that matches the specified data from the array.
-
- The index_key argument should hold index key information and is
- passed to the methods in the ops table when they are called.
-
- This function makes no alteration to the array itself, but rather returns
- an edit script that must be applied. -ENOMEM is returned in the case of
- an out-of-memory error. NULL will be returned if the specified object is
- not found within the array.
-
- The caller should lock exclusively against other modifiers of the array.
-
-
- (4) Delete all objects from an associative array.
-
- struct assoc_array_edit *
- assoc_array_clear(struct assoc_array *array,
- const struct assoc_array_ops *ops);
-
- This deletes all the objects from an associative array and leaves it
- completely empty.
-
- This function makes no alteration to the array itself, but rather returns
- an edit script that must be applied. -ENOMEM is returned in the case of
- an out-of-memory error.
-
- The caller should lock exclusively against other modifiers of the array.
-
-
- (5) Destroy an associative array, deleting all objects.
-
- void assoc_array_destroy(struct assoc_array *array,
- const struct assoc_array_ops *ops);
-
- This destroys the contents of the associative array and leaves it
- completely empty. It is not permitted for another thread to be traversing
- the array under the RCU read lock at the same time as this function is
- destroying it as no RCU deferral is performed on memory release -
- something that would require memory to be allocated.
-
- The caller should lock exclusively against other modifiers and accessors
- of the array.
-
-
- (6) Garbage collect an associative array.
-
- int assoc_array_gc(struct assoc_array *array,
- const struct assoc_array_ops *ops,
- bool (*iterator)(void *object, void *iterator_data),
- void *iterator_data);
-
- This iterates over the objects in an associative array and passes each one
- to iterator(). If iterator() returns true, the object is kept. If it
- returns false, the object will be freed. If the iterator() function
- returns true, it must perform any appropriate refcount incrementing on the
- object before returning.
-
- The internal tree will be packed down if possible as part of the iteration
- to reduce the number of nodes in it.
-
- The iterator_data is passed directly to iterator() and is otherwise
- ignored by the function.
-
- The function will return 0 if successful and -ENOMEM if there wasn't
- enough memory.
-
- It is possible for other threads to iterate over or search the array under
- the RCU read lock whilst this function is in progress. The caller should
- lock exclusively against other modifiers of the array.
-
-
-ACCESS FUNCTIONS
-----------------
-
-There are two functions for accessing an associative array:
-
- (1) Iterate over all the objects in an associative array.
-
- int assoc_array_iterate(const struct assoc_array *array,
- int (*iterator)(const void *object,
- void *iterator_data),
- void *iterator_data);
-
- This passes each object in the array to the iterator callback function.
- iterator_data is private data for that function.
-
- This may be used on an array at the same time as the array is being
- modified, provided the RCU read lock is held. Under such circumstances,
- it is possible for the iteration function to see some objects twice. If
- this is a problem, then modification should be locked against. The
- iteration algorithm should not, however, miss any objects.
-
- The function will return 0 if no objects were in the array or else it will
- return the result of the last iterator function called. Iteration stops
- immediately if any call to the iteration function results in a non-zero
- return.
-
-
- (2) Find an object in an associative array.
-
- void *assoc_array_find(const struct assoc_array *array,
- const struct assoc_array_ops *ops,
- const void *index_key);
-
- This walks through the array's internal tree directly to the object
- specified by the index key..
-
- This may be used on an array at the same time as the array is being
- modified, provided the RCU read lock is held.
-
- The function will return the object if found (and set *_type to the object
- type) or will return NULL if the object was not found.
-
-
-INDEX KEY FORM
---------------
-
-The index key can be of any form, but since the algorithms aren't told how long
-the key is, it is strongly recommended that the index key includes its length
-very early on before any variation due to the length would have an effect on
-comparisons.
-
-This will cause leaves with different length keys to scatter away from each
-other - and those with the same length keys to cluster together.
-
-It is also recommended that the index key begin with a hash of the rest of the
-key to maximise scattering throughout keyspace.
-
-The better the scattering, the wider and lower the internal tree will be.
-
-Poor scattering isn't too much of a problem as there are shortcuts and nodes
-can contain mixtures of leaves and metadata pointers.
-
-The index key is read in chunks of machine word. Each chunk is subdivided into
-one nibble (4 bits) per level, so on a 32-bit CPU this is good for 8 levels and
-on a 64-bit CPU, 16 levels. Unless the scattering is really poor, it is
-unlikely that more than one word of any particular index key will have to be
-used.
-
-
-=================
-INTERNAL WORKINGS
-=================
-
-The associative array data structure has an internal tree. This tree is
-constructed of two types of metadata blocks: nodes and shortcuts.
-
-A node is an array of slots. Each slot can contain one of four things:
-
- (*) A NULL pointer, indicating that the slot is empty.
-
- (*) A pointer to an object (a leaf).
-
- (*) A pointer to a node at the next level.
-
- (*) A pointer to a shortcut.
-
-
-BASIC INTERNAL TREE LAYOUT
---------------------------
-
-Ignoring shortcuts for the moment, the nodes form a multilevel tree. The index
-key space is strictly subdivided by the nodes in the tree and nodes occur on
-fixed levels. For example:
-
- Level: 0 1 2 3
- =============== =============== =============== ===============
- NODE D
- NODE B NODE C +------>+---+
- +------>+---+ +------>+---+ | | 0 |
- NODE A | | 0 | | | 0 | | +---+
- +---+ | +---+ | +---+ | : :
- | 0 | | : : | : : | +---+
- +---+ | +---+ | +---+ | | f |
- | 1 |---+ | 3 |---+ | 7 |---+ +---+
- +---+ +---+ +---+
- : : : : | 8 |---+
- +---+ +---+ +---+ | NODE E
- | e |---+ | f | : : +------>+---+
- +---+ | +---+ +---+ | 0 |
- | f | | | f | +---+
- +---+ | +---+ : :
- | NODE F +---+
- +------>+---+ | f |
- | 0 | NODE G +---+
- +---+ +------>+---+
- : : | | 0 |
- +---+ | +---+
- | 6 |---+ : :
- +---+ +---+
- : : | f |
- +---+ +---+
- | f |
- +---+
-
-In the above example, there are 7 nodes (A-G), each with 16 slots (0-f).
-Assuming no other meta data nodes in the tree, the key space is divided thusly:
-
- KEY PREFIX NODE
- ========== ====
- 137* D
- 138* E
- 13[0-69-f]* C
- 1[0-24-f]* B
- e6* G
- e[0-57-f]* F
- [02-df]* A
-
-So, for instance, keys with the following example index keys will be found in
-the appropriate nodes:
-
- INDEX KEY PREFIX NODE
- =============== ======= ====
- 13694892892489 13 C
- 13795289025897 137 D
- 13889dde88793 138 E
- 138bbb89003093 138 E
- 1394879524789 12 C
- 1458952489 1 B
- 9431809de993ba - A
- b4542910809cd - A
- e5284310def98 e F
- e68428974237 e6 G
- e7fffcbd443 e F
- f3842239082 - A
-
-To save memory, if a node can hold all the leaves in its portion of keyspace,
-then the node will have all those leaves in it and will not have any metadata
-pointers - even if some of those leaves would like to be in the same slot.
-
-A node can contain a heterogeneous mix of leaves and metadata pointers.
-Metadata pointers must be in the slots that match their subdivisions of key
-space. The leaves can be in any slot not occupied by a metadata pointer. It
-is guaranteed that none of the leaves in a node will match a slot occupied by a
-metadata pointer. If the metadata pointer is there, any leaf whose key matches
-the metadata key prefix must be in the subtree that the metadata pointer points
-to.
-
-In the above example list of index keys, node A will contain:
-
- SLOT CONTENT INDEX KEY (PREFIX)
- ==== =============== ==================
- 1 PTR TO NODE B 1*
- any LEAF 9431809de993ba
- any LEAF b4542910809cd
- e PTR TO NODE F e*
- any LEAF f3842239082
-
-and node B:
-
- 3 PTR TO NODE C 13*
- any LEAF 1458952489
-
-
-SHORTCUTS
----------
-
-Shortcuts are metadata records that jump over a piece of keyspace. A shortcut
-is a replacement for a series of single-occupancy nodes ascending through the
-levels. Shortcuts exist to save memory and to speed up traversal.
-
-It is possible for the root of the tree to be a shortcut - say, for example,
-the tree contains at least 17 nodes all with key prefix '1111'. The insertion
-algorithm will insert a shortcut to skip over the '1111' keyspace in a single
-bound and get to the fourth level where these actually become different.
-
-
-SPLITTING AND COLLAPSING NODES
-------------------------------
-
-Each node has a maximum capacity of 16 leaves and metadata pointers. If the
-insertion algorithm finds that it is trying to insert a 17th object into a
-node, that node will be split such that at least two leaves that have a common
-key segment at that level end up in a separate node rooted on that slot for
-that common key segment.
-
-If the leaves in a full node and the leaf that is being inserted are
-sufficiently similar, then a shortcut will be inserted into the tree.
-
-When the number of objects in the subtree rooted at a node falls to 16 or
-fewer, then the subtree will be collapsed down to a single node - and this will
-ripple towards the root if possible.
-
-
-NON-RECURSIVE ITERATION
------------------------
-
-Each node and shortcut contains a back pointer to its parent and the number of
-slot in that parent that points to it. None-recursive iteration uses these to
-proceed rootwards through the tree, going to the parent node, slot N + 1 to
-make sure progress is made without the need for a stack.
-
-The backpointers, however, make simultaneous alteration and iteration tricky.
-
-
-SIMULTANEOUS ALTERATION AND ITERATION
--------------------------------------
-
-There are a number of cases to consider:
-
- (1) Simple insert/replace. This involves simply replacing a NULL or old
- matching leaf pointer with the pointer to the new leaf after a barrier.
- The metadata blocks don't change otherwise. An old leaf won't be freed
- until after the RCU grace period.
-
- (2) Simple delete. This involves just clearing an old matching leaf. The
- metadata blocks don't change otherwise. The old leaf won't be freed until
- after the RCU grace period.
-
- (3) Insertion replacing part of a subtree that we haven't yet entered. This
- may involve replacement of part of that subtree - but that won't affect
- the iteration as we won't have reached the pointer to it yet and the
- ancestry blocks are not replaced (the layout of those does not change).
-
- (4) Insertion replacing nodes that we're actively processing. This isn't a
- problem as we've passed the anchoring pointer and won't switch onto the
- new layout until we follow the back pointers - at which point we've
- already examined the leaves in the replaced node (we iterate over all the
- leaves in a node before following any of its metadata pointers).
-
- We might, however, re-see some leaves that have been split out into a new
- branch that's in a slot further along than we were at.
-
- (5) Insertion replacing nodes that we're processing a dependent branch of.
- This won't affect us until we follow the back pointers. Similar to (4).
-
- (6) Deletion collapsing a branch under us. This doesn't affect us because the
- back pointers will get us back to the parent of the new node before we
- could see the new node. The entire collapsed subtree is thrown away
- unchanged - and will still be rooted on the same slot, so we shouldn't
- process it a second time as we'll go back to slot + 1.
-
-Note:
-
- (*) Under some circumstances, we need to simultaneously change the parent
- pointer and the parent slot pointer on a node (say, for example, we
- inserted another node before it and moved it up a level). We cannot do
- this without locking against a read - so we have to replace that node too.
-
- However, when we're changing a shortcut into a node this isn't a problem
- as shortcuts only have one slot and so the parent slot number isn't used
- when traversing backwards over one. This means that it's okay to change
- the slot number first - provided suitable barriers are used to make sure
- the parent slot number is read after the back pointer.
-
-Obsolete blocks and leaves are freed up after an RCU grace period has passed,
-so as long as anyone doing walking or iteration holds the RCU read lock, the
-old superstructure should not go away on them.
diff --git a/Documentation/bad_memory.txt b/Documentation/bad_memory.txt
deleted file mode 100644
index df8416213202..000000000000
--- a/Documentation/bad_memory.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-March 2008
-Jan-Simon Moeller, dl9pf@gmx.de
-
-
-How to deal with bad memory e.g. reported by memtest86+ ?
-#########################################################
-
-There are three possibilities I know of:
-
-1) Reinsert/swap the memory modules
-
-2) Buy new modules (best!) or try to exchange the memory
- if you have spare-parts
-
-3) Use BadRAM or memmap
-
-This Howto is about number 3) .
-
-
-BadRAM
-######
-BadRAM is the actively developed and available as kernel-patch
-here: http://rick.vanrein.org/linux/badram/
-
-For more details see the BadRAM documentation.
-
-memmap
-######
-
-memmap is already in the kernel and usable as kernel-parameter at
-boot-time. Its syntax is slightly strange and you may need to
-calculate the values by yourself!
-
-Syntax to exclude a memory area (see kernel-parameters.txt for details):
-memmap=<size>$<address>
-
-Example: memtest86+ reported here errors at address 0x18691458, 0x18698424 and
- some others. All had 0x1869xxxx in common, so I chose a pattern of
- 0x18690000,0xffff0000.
-
-With the numbers of the example above:
-memmap=64K$0x18690000
- or
-memmap=0x10000$0x18690000
-
diff --git a/Documentation/basic_profiling.txt b/Documentation/basic_profiling.txt
deleted file mode 100644
index 8764e9f70821..000000000000
--- a/Documentation/basic_profiling.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-These instructions are deliberately very basic. If you want something clever,
-go read the real docs ;-) Please don't add more stuff, but feel free to
-correct my mistakes ;-) (mbligh@aracnet.com)
-Thanks to John Levon, Dave Hansen, et al. for help writing this.
-
-<test> is the thing you're trying to measure.
-Make sure you have the correct System.map / vmlinux referenced!
-
-It is probably easiest to use "make install" for linux and hack
-/sbin/installkernel to copy vmlinux to /boot, in addition to vmlinuz,
-config, System.map, which are usually installed by default.
-
-Readprofile
------------
-A recent readprofile command is needed for 2.6, such as found in util-linux
-2.12a, which can be downloaded from:
-
-http://www.kernel.org/pub/linux/utils/util-linux/
-
-Most distributions will ship it already.
-
-Add "profile=2" to the kernel command line.
-
-clear readprofile -r
- <test>
-dump output readprofile -m /boot/System.map > captured_profile
-
-Oprofile
---------
-
-Get the source (see Changes for required version) from
-http://oprofile.sourceforge.net/ and add "idle=poll" to the kernel command
-line.
-
-Configure with CONFIG_PROFILING=y and CONFIG_OPROFILE=y & reboot on new kernel
-
-./configure --with-kernel-support
-make install
-
-For superior results, be sure to enable the local APIC. If opreport sees
-a 0Hz CPU, APIC was not on. Be aware that idle=poll may mean a performance
-penalty.
-
-One time setup:
- opcontrol --setup --vmlinux=/boot/vmlinux
-
-clear opcontrol --reset
-start opcontrol --start
- <test>
-stop opcontrol --stop
-dump output opreport > output_file
-
-To only report on the kernel, run opreport -l /boot/vmlinux > output_file
-
-A reset is needed to clear old statistics, which survive a reboot.
-
diff --git a/Documentation/binfmt_misc.txt b/Documentation/binfmt_misc.txt
deleted file mode 100644
index ec83bbce547a..000000000000
--- a/Documentation/binfmt_misc.txt
+++ /dev/null
@@ -1,131 +0,0 @@
- Kernel Support for miscellaneous (your favourite) Binary Formats v1.1
- =====================================================================
-
-This Kernel feature allows you to invoke almost (for restrictions see below)
-every program by simply typing its name in the shell.
-This includes for example compiled Java(TM), Python or Emacs programs.
-
-To achieve this you must tell binfmt_misc which interpreter has to be invoked
-with which binary. Binfmt_misc recognises the binary-type by matching some bytes
-at the beginning of the file with a magic byte sequence (masking out specified
-bits) you have supplied. Binfmt_misc can also recognise a filename extension
-aka '.com' or '.exe'.
-
-First you must mount binfmt_misc:
- mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc
-
-To actually register a new binary type, you have to set up a string looking like
-:name:type:offset:magic:mask:interpreter:flags (where you can choose the ':'
-upon your needs) and echo it to /proc/sys/fs/binfmt_misc/register.
-
-Here is what the fields mean:
- - 'name' is an identifier string. A new /proc file will be created with this
- name below /proc/sys/fs/binfmt_misc; cannot contain slashes '/' for obvious
- reasons.
- - 'type' is the type of recognition. Give 'M' for magic and 'E' for extension.
- - 'offset' is the offset of the magic/mask in the file, counted in bytes. This
- defaults to 0 if you omit it (i.e. you write ':name:type::magic...'). Ignored
- when using filename extension matching.
- - 'magic' is the byte sequence binfmt_misc is matching for. The magic string
- may contain hex-encoded characters like \x0a or \xA4. Note that you must
- escape any NUL bytes; parsing halts at the first one. In a shell environment
- you might have to write \\x0a to prevent the shell from eating your \.
- If you chose filename extension matching, this is the extension to be
- recognised (without the '.', the \x0a specials are not allowed). Extension
- matching is case sensitive, and slashes '/' are not allowed!
- - 'mask' is an (optional, defaults to all 0xff) mask. You can mask out some
- bits from matching by supplying a string like magic and as long as magic.
- The mask is anded with the byte sequence of the file. Note that you must
- escape any NUL bytes; parsing halts at the first one. Ignored when using
- filename extension matching.
- - 'interpreter' is the program that should be invoked with the binary as first
- argument (specify the full path)
- - 'flags' is an optional field that controls several aspects of the invocation
- of the interpreter. It is a string of capital letters, each controls a
- certain aspect. The following flags are supported -
- 'P' - preserve-argv[0]. Legacy behavior of binfmt_misc is to overwrite
- the original argv[0] with the full path to the binary. When this
- flag is included, binfmt_misc will add an argument to the argument
- vector for this purpose, thus preserving the original argv[0].
- e.g. If your interp is set to /bin/foo and you run `blah` (which is
- in /usr/local/bin), then the kernel will execute /bin/foo with
- argv[] set to ["/bin/foo", "/usr/local/bin/blah", "blah"]. The
- interp has to be aware of this so it can execute /usr/local/bin/blah
- with argv[] set to ["blah"].
- 'O' - open-binary. Legacy behavior of binfmt_misc is to pass the full path
- of the binary to the interpreter as an argument. When this flag is
- included, binfmt_misc will open the file for reading and pass its
- descriptor as an argument, instead of the full path, thus allowing
- the interpreter to execute non-readable binaries. This feature
- should be used with care - the interpreter has to be trusted not to
- emit the contents of the non-readable binary.
- 'C' - credentials. Currently, the behavior of binfmt_misc is to calculate
- the credentials and security token of the new process according to
- the interpreter. When this flag is included, these attributes are
- calculated according to the binary. It also implies the 'O' flag.
- This feature should be used with care as the interpreter
- will run with root permissions when a setuid binary owned by root
- is run with binfmt_misc.
- 'F' - fix binary. The usual behaviour of binfmt_misc is to spawn the
- binary lazily when the misc format file is invoked. However,
- this doesn't work very well in the face of mount namespaces and
- changeroots, so the F mode opens the binary as soon as the
- emulation is installed and uses the opened image to spawn the
- emulator, meaning it is always available once installed,
- regardless of how the environment changes.
-
-
-There are some restrictions:
- - the whole register string may not exceed 1920 characters
- - the magic must reside in the first 128 bytes of the file, i.e.
- offset+size(magic) has to be less than 128
- - the interpreter string may not exceed 127 characters
-
-To use binfmt_misc you have to mount it first. You can mount it with
-"mount -t binfmt_misc none /proc/sys/fs/binfmt_misc" command, or you can add
-a line "none /proc/sys/fs/binfmt_misc binfmt_misc defaults 0 0" to your
-/etc/fstab so it auto mounts on boot.
-
-You may want to add the binary formats in one of your /etc/rc scripts during
-boot-up. Read the manual of your init program to figure out how to do this
-right.
-
-Think about the order of adding entries! Later added entries are matched first!
-
-
-A few examples (assumed you are in /proc/sys/fs/binfmt_misc):
-
-- enable support for em86 (like binfmt_em86, for Alpha AXP only):
- echo ':i386:M::\x7fELF\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfb\xff\xff:/bin/em86:' > register
- echo ':i486:M::\x7fELF\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x06:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfb\xff\xff:/bin/em86:' > register
-
-- enable support for packed DOS applications (pre-configured dosemu hdimages):
- echo ':DEXE:M::\x0eDEX::/usr/bin/dosexec:' > register
-
-- enable support for Windows executables using wine:
- echo ':DOSWin:M::MZ::/usr/local/bin/wine:' > register
-
-For java support see Documentation/java.txt
-
-
-You can enable/disable binfmt_misc or one binary type by echoing 0 (to disable)
-or 1 (to enable) to /proc/sys/fs/binfmt_misc/status or /proc/.../the_name.
-Catting the file tells you the current status of binfmt_misc/the entry.
-
-You can remove one entry or all entries by echoing -1 to /proc/.../the_name
-or /proc/sys/fs/binfmt_misc/status.
-
-
-HINTS:
-======
-
-If you want to pass special arguments to your interpreter, you can
-write a wrapper script for it. See Documentation/java.txt for an
-example.
-
-Your interpreter should NOT look in the PATH for the filename; the kernel
-passes it the full filename (or the file descriptor) to use. Using $PATH can
-cause unexpected behaviour and can be a security hazard.
-
-
-Richard Günther <rguenth@tat.physik.uni-tuebingen.de>
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 918e1e0d0e78..01ddeaf64b0f 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -348,7 +348,7 @@ Drivers can now specify a request prepare function (q->prep_rq_fn) that the
block layer would invoke to pre-build device commands for a given request,
or perform other preparatory processing for the request. This is routine is
called by elv_next_request(), i.e. typically just before servicing a request.
-(The prepare function would not be called for requests that have REQ_DONTPREP
+(The prepare function would not be called for requests that have RQF_DONTPREP
enabled)
Aside:
@@ -553,8 +553,8 @@ struct request {
struct request_list *rl;
}
-See the rq_flag_bits definitions for an explanation of the various flags
-available. Some bits are used by the block layer or i/o scheduler.
+See the req_ops and req_flag_bits definitions for an explanation of the various
+flags available. Some bits are used by the block layer or i/o scheduler.
The behaviour of the various sector counts are almost the same as before,
except that since we have multi-segment bios, current_nr_sectors refers
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
index 1e4f835a659d..895bd3813115 100644
--- a/Documentation/block/cfq-iosched.txt
+++ b/Documentation/block/cfq-iosched.txt
@@ -240,11 +240,11 @@ All cfq queues doing synchronous sequential IO go on to sync-idle tree.
On this tree we idle on each queue individually.
All synchronous non-sequential queues go on sync-noidle tree. Also any
-request which are marked with REQ_NOIDLE go on this service tree. On this
-tree we do not idle on individual queues instead idle on the whole group
-of queues or the tree. So if there are 4 queues waiting for IO to dispatch
-we will idle only once last queue has dispatched the IO and there is
-no more IO on this service tree.
+synchronous write request which is not marked with REQ_IDLE goes on this
+service tree. On this tree we do not idle on individual queues instead idle
+on the whole group of queues or the tree. So if there are 4 queues waiting
+for IO to dispatch we will idle only once last queue has dispatched the IO
+and there is no more IO on this service tree.
All async writes go on async service tree. There is no idling on async
queues.
@@ -257,17 +257,17 @@ tree idling provides isolation with buffered write queues on async tree.
FAQ
===
-Q1. Why to idle at all on queues marked with REQ_NOIDLE.
+Q1. Why to idle at all on queues not marked with REQ_IDLE.
-A1. We only do tree idle (all queues on sync-noidle tree) on queues marked
- with REQ_NOIDLE. This helps in providing isolation with all the sync-idle
+A1. We only do tree idle (all queues on sync-noidle tree) on queues not marked
+ with REQ_IDLE. This helps in providing isolation with all the sync-idle
queues. Otherwise in presence of many sequential readers, other
synchronous IO might not get fair share of disk.
For example, if there are 10 sequential readers doing IO and they get
- 100ms each. If a REQ_NOIDLE request comes in, it will be scheduled
- roughly after 1 second. If after completion of REQ_NOIDLE request we
- do not idle, and after a couple of milli seconds a another REQ_NOIDLE
+ 100ms each. If a !REQ_IDLE request comes in, it will be scheduled
+ roughly after 1 second. If after completion of !REQ_IDLE request we
+ do not idle, and after a couple of milli seconds a another !REQ_IDLE
request comes in, again it will be scheduled after 1second. Repeat it
and notice how a workload can lose its disk share and suffer due to
multiple sequential readers.
@@ -276,16 +276,16 @@ A1. We only do tree idle (all queues on sync-noidle tree) on queues marked
context of fsync, and later some journaling data is written. Journaling
data comes in only after fsync has finished its IO (atleast for ext4
that seemed to be the case). Now if one decides not to idle on fsync
- thread due to REQ_NOIDLE, then next journaling write will not get
+ thread due to !REQ_IDLE, then next journaling write will not get
scheduled for another second. A process doing small fsync, will suffer
badly in presence of multiple sequential readers.
- Hence doing tree idling on threads using REQ_NOIDLE flag on requests
+ Hence doing tree idling on threads using !REQ_IDLE flag on requests
provides isolation from multiple sequential readers and at the same
time we do not idle on individual threads.
-Q2. When to specify REQ_NOIDLE
-A2. I would think whenever one is doing synchronous write and not expecting
+Q2. When to specify REQ_IDLE
+A2. I would think whenever one is doing synchronous write and expecting
more writes to be dispatched from same context soon, should be able
- to specify REQ_NOIDLE on writes and that probably should work well for
+ to specify REQ_IDLE on writes and that probably should work well for
most of the cases.
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index d8880ca30af4..3140dbd860d8 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -72,4 +72,4 @@ use_per_node_hctx=[0/1]: Default: 0
queue for each CPU node in the system.
use_lightnvm=[0/1]: Default: 0
- Register device with LightNVM. Requires blk-mq to be used.
+ Register device with LightNVM. Requires blk-mq and CONFIG_NVM to be enabled.
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 2a3904030dea..51642159aedb 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -58,6 +58,20 @@ When read, this file shows the total number of block IO polls and how
many returned success. Writing '0' to this file will disable polling
for this device. Writing any non-zero value will enable this feature.
+io_poll_delay (RW)
+------------------
+If polling is enabled, this controls what kind of polling will be
+performed. It defaults to -1, which is classic polling. In this mode,
+the CPU will repeatedly ask for completions without giving up any time.
+If set to 0, a hybrid polling mode is used, where the kernel will attempt
+to make an educated guess at when the IO will complete. Based on this
+guess, the kernel will put the process issuing IO to sleep for an amount
+of time, before entering a classic poll loop. This mode might be a
+little slower than pure classic polling, but it will be more efficient.
+If set to a value larger than 0, the kernel will put the process issuing
+IO to sleep for this amont of microseconds before entering classic
+polling.
+
iostats (RW)
-------------
This file is used to control (on/off) the iostats accounting of the
@@ -169,5 +183,14 @@ This is the number of bytes the device can write in a single write-same
command. A value of '0' means write-same is not supported by this
device.
+wb_lat_usec (RW)
+----------------
+If the device is registered for writeback throttling, then this file shows
+the target minimum read latency. If this latency is exceeded in a given
+window of time (see wb_window_usec), then the writeback throttling will start
+scaling back writes. Writing a value of '0' to this file disables the
+feature. Writing a value of '-1' to this file resets the value to the
+default setting.
+
Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/Documentation/blockdev/cciss.txt b/Documentation/blockdev/cciss.txt
index b79d0a13e7cd..3a5477cc456e 100644
--- a/Documentation/blockdev/cciss.txt
+++ b/Documentation/blockdev/cciss.txt
@@ -184,7 +184,7 @@ infrequently used and the primary purpose of Smart Array controllers is to
act as a RAID controller for disk drives, so the vast majority of commands
are allocated for disk devices. However, if you have more than a few tape
drives attached to a smart array, the default number of commands may not be
-enought (for example, if you have 8 tape drives, you could only rewind 6
+enough (for example, if you have 8 tape drives, you could only rewind 6
at one time with the default number of commands.) The cciss_tape_cmds module
parameter allows more commands (up to 16 more) to be allocated for use by
tape drives. For example:
diff --git a/Documentation/blockdev/ramdisk.txt b/Documentation/blockdev/ramdisk.txt
index fe2ef978d85a..501e12e0323e 100644
--- a/Documentation/blockdev/ramdisk.txt
+++ b/Documentation/blockdev/ramdisk.txt
@@ -14,7 +14,7 @@ Contents:
The RAM disk driver is a way to use main system memory as a block device. It
is required for initrd, an initial filesystem used if you need to load modules
-in order to access the root filesystem (see Documentation/initrd.txt). It can
+in order to access the root filesystem (see Documentation/admin-guide/initrd.rst). It can
also be used for a temporary filesystem for crypto work, since the contents
are erased on reboot.
diff --git a/Documentation/braille-console.txt b/Documentation/braille-console.txt
deleted file mode 100644
index d0d042c2fd5e..000000000000
--- a/Documentation/braille-console.txt
+++ /dev/null
@@ -1,34 +0,0 @@
- Linux Braille Console
-
-To get early boot messages on a braille device (before userspace screen
-readers can start), you first need to compile the support for the usual serial
-console (see serial-console.txt), and for braille device (in Device Drivers -
-Accessibility).
-
-Then you need to specify a console=brl, option on the kernel command line, the
-format is:
-
- console=brl,serial_options...
-
-where serial_options... are the same as described in serial-console.txt
-
-So for instance you can use console=brl,ttyS0 if the braille device is connected
-to the first serial port, and console=brl,ttyS0,115200 to override the baud rate
-to 115200, etc.
-
-By default, the braille device will just show the last kernel message (console
-mode). To review previous messages, press the Insert key to switch to the VT
-review mode. In review mode, the arrow keys permit to browse in the VT content,
-page up/down keys go at the top/bottom of the screen, and the home key goes back
-to the cursor, hence providing very basic screen reviewing facility.
-
-Sound feedback can be obtained by adding the braille_console.sound=1 kernel
-parameter.
-
-For simplicity, only one braille console can be enabled, other uses of
-console=brl,... will be discarded. Also note that it does not interfere with
-the console selection mechanism described in serial-console.txt
-
-For now, only the VisioBraille device is supported.
-
-Samuel Thibault <samuel.thibault@ens-lyon.org>
diff --git a/Documentation/cgroup-v1/00-INDEX b/Documentation/cgroup-v1/00-INDEX
index 106885ad670d..13e0c85e7b35 100644
--- a/Documentation/cgroup-v1/00-INDEX
+++ b/Documentation/cgroup-v1/00-INDEX
@@ -8,7 +8,7 @@ cpuacct.txt
- CPU Accounting Controller; account CPU usage for groups of tasks.
cpusets.txt
- documents the cpusets feature; assign CPUs and Mem to a set of tasks.
-devices.txt
+admin-guide/devices.rst
- Device Whitelist Controller; description, interface and security.
freezer-subsystem.txt
- checkpointing; rationale to not use signals, interface.
diff --git a/Documentation/circular-buffers.txt b/Documentation/circular-buffers.txt
index 88951b179262..4a824d232472 100644
--- a/Documentation/circular-buffers.txt
+++ b/Documentation/circular-buffers.txt
@@ -161,7 +161,7 @@ The producer will look something like this:
unsigned long head = buffer->head;
/* The spin_unlock() and next spin_lock() provide needed ordering. */
- unsigned long tail = ACCESS_ONCE(buffer->tail);
+ unsigned long tail = READ_ONCE(buffer->tail);
if (CIRC_SPACE(head, tail, buffer->size) >= 1) {
/* insert one item into the buffer */
@@ -222,7 +222,7 @@ This will instruct the CPU to make sure the index is up to date before reading
the new item, and then it shall make sure the CPU has finished reading the item
before it writes the new tail pointer, which will erase the item.
-Note the use of ACCESS_ONCE() and smp_load_acquire() to read the
+Note the use of READ_ONCE() and smp_load_acquire() to read the
opposition index. This prevents the compiler from discarding and
reloading its cached value - which some compilers will do across
smp_read_barrier_depends(). This isn't strictly needed if you can
diff --git a/Documentation/conf.py b/Documentation/conf.py
index bf6f310e5170..1ac958c0333d 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -34,10 +34,10 @@ from load_config import loadConfig
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['kernel-doc', 'rstFlatTable', 'kernel_include', 'cdomain']
+extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain']
# The name of the math extension changed on Sphinx 1.4
-if minor > 3:
+if major == 1 and minor > 3:
extensions.append("sphinx.ext.imgmath")
else:
extensions.append("sphinx.ext.pngmath")
@@ -136,7 +136,7 @@ pygments_style = 'sphinx'
todo_include_todos = False
primary_domain = 'C'
-highlight_language = 'guess'
+highlight_language = 'none'
# -- Options for HTML output ----------------------------------------------
@@ -332,18 +332,32 @@ latex_elements = {
'''
}
+# Fix reference escape troubles with Sphinx 1.4.x
+if major == 1 and minor > 3:
+ latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
+
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
+ ('doc-guide/index', 'kernel-doc-guide.tex', 'Linux Kernel Documentation Guide',
+ 'The kernel development community', 'manual'),
+ ('admin-guide/index', 'linux-user.tex', 'Linux Kernel User Documentation',
+ 'The kernel development community', 'manual'),
+ ('core-api/index', 'core-api.tex', 'The kernel core API manual',
+ 'The kernel development community', 'manual'),
+ ('driver-api/index', 'driver-api.tex', 'The kernel driver API manual',
+ 'The kernel development community', 'manual'),
('kernel-documentation', 'kernel-documentation.tex', 'The Linux Kernel Documentation',
'The kernel development community', 'manual'),
- ('development-process/index', 'development-process.tex', 'Linux Kernel Development Documentation',
+ ('process/index', 'development-process.tex', 'Linux Kernel Development Documentation',
'The kernel development community', 'manual'),
('gpu/index', 'gpu.tex', 'Linux GPU Driver Developer\'s Guide',
'The kernel development community', 'manual'),
('media/index', 'media.tex', 'Linux Media Subsystem Documentation',
'The kernel development community', 'manual'),
+ ('security/index', 'security.tex', 'The kernel security subsystem manual',
+ 'The kernel development community', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
diff --git a/Documentation/core-api/assoc_array.rst b/Documentation/core-api/assoc_array.rst
new file mode 100644
index 000000000000..d83cfff9ea43
--- /dev/null
+++ b/Documentation/core-api/assoc_array.rst
@@ -0,0 +1,551 @@
+========================================
+Generic Associative Array Implementation
+========================================
+
+Overview
+========
+
+This associative array implementation is an object container with the following
+properties:
+
+1. Objects are opaque pointers. The implementation does not care where they
+ point (if anywhere) or what they point to (if anything).
+.. note:: Pointers to objects _must_ be zero in the least significant bit.
+
+2. Objects do not need to contain linkage blocks for use by the array. This
+ permits an object to be located in multiple arrays simultaneously.
+ Rather, the array is made up of metadata blocks that point to objects.
+
+3. Objects require index keys to locate them within the array.
+
+4. Index keys must be unique. Inserting an object with the same key as one
+ already in the array will replace the old object.
+
+5. Index keys can be of any length and can be of different lengths.
+
+6. Index keys should encode the length early on, before any variation due to
+ length is seen.
+
+7. Index keys can include a hash to scatter objects throughout the array.
+
+8. The array can iterated over. The objects will not necessarily come out in
+ key order.
+
+9. The array can be iterated over whilst it is being modified, provided the
+ RCU readlock is being held by the iterator. Note, however, under these
+ circumstances, some objects may be seen more than once. If this is a
+ problem, the iterator should lock against modification. Objects will not
+ be missed, however, unless deleted.
+
+10. Objects in the array can be looked up by means of their index key.
+
+11. Objects can be looked up whilst the array is being modified, provided the
+ RCU readlock is being held by the thread doing the look up.
+
+The implementation uses a tree of 16-pointer nodes internally that are indexed
+on each level by nibbles from the index key in the same manner as in a radix
+tree. To improve memory efficiency, shortcuts can be emplaced to skip over
+what would otherwise be a series of single-occupancy nodes. Further, nodes
+pack leaf object pointers into spare space in the node rather than making an
+extra branch until as such time an object needs to be added to a full node.
+
+
+The Public API
+==============
+
+The public API can be found in ``<linux/assoc_array.h>``. The associative
+array is rooted on the following structure::
+
+ struct assoc_array {
+ ...
+ };
+
+The code is selected by enabling ``CONFIG_ASSOCIATIVE_ARRAY`` with::
+
+ ./script/config -e ASSOCIATIVE_ARRAY
+
+
+Edit Script
+-----------
+
+The insertion and deletion functions produce an 'edit script' that can later be
+applied to effect the changes without risking ``ENOMEM``. This retains the
+preallocated metadata blocks that will be installed in the internal tree and
+keeps track of the metadata blocks that will be removed from the tree when the
+script is applied.
+
+This is also used to keep track of dead blocks and dead objects after the
+script has been applied so that they can be freed later. The freeing is done
+after an RCU grace period has passed - thus allowing access functions to
+proceed under the RCU read lock.
+
+The script appears as outside of the API as a pointer of the type::
+
+ struct assoc_array_edit;
+
+There are two functions for dealing with the script:
+
+1. Apply an edit script::
+
+ void assoc_array_apply_edit(struct assoc_array_edit *edit);
+
+This will perform the edit functions, interpolating various write barriers
+to permit accesses under the RCU read lock to continue. The edit script
+will then be passed to ``call_rcu()`` to free it and any dead stuff it points
+to.
+
+2. Cancel an edit script::
+
+ void assoc_array_cancel_edit(struct assoc_array_edit *edit);
+
+This frees the edit script and all preallocated memory immediately. If
+this was for insertion, the new object is _not_ released by this function,
+but must rather be released by the caller.
+
+These functions are guaranteed not to fail.
+
+
+Operations Table
+----------------
+
+Various functions take a table of operations::
+
+ struct assoc_array_ops {
+ ...
+ };
+
+This points to a number of methods, all of which need to be provided:
+
+1. Get a chunk of index key from caller data::
+
+ unsigned long (*get_key_chunk)(const void *index_key, int level);
+
+This should return a chunk of caller-supplied index key starting at the
+*bit* position given by the level argument. The level argument will be a
+multiple of ``ASSOC_ARRAY_KEY_CHUNK_SIZE`` and the function should return
+``ASSOC_ARRAY_KEY_CHUNK_SIZE bits``. No error is possible.
+
+
+2. Get a chunk of an object's index key::
+
+ unsigned long (*get_object_key_chunk)(const void *object, int level);
+
+As the previous function, but gets its data from an object in the array
+rather than from a caller-supplied index key.
+
+
+3. See if this is the object we're looking for::
+
+ bool (*compare_object)(const void *object, const void *index_key);
+
+Compare the object against an index key and return ``true`` if it matches and
+``false`` if it doesn't.
+
+
+4. Diff the index keys of two objects::
+
+ int (*diff_objects)(const void *object, const void *index_key);
+
+Return the bit position at which the index key of the specified object
+differs from the given index key or -1 if they are the same.
+
+
+5. Free an object::
+
+ void (*free_object)(void *object);
+
+Free the specified object. Note that this may be called an RCU grace period
+after ``assoc_array_apply_edit()`` was called, so ``synchronize_rcu()`` may be
+necessary on module unloading.
+
+
+Manipulation Functions
+----------------------
+
+There are a number of functions for manipulating an associative array:
+
+1. Initialise an associative array::
+
+ void assoc_array_init(struct assoc_array *array);
+
+This initialises the base structure for an associative array. It can't fail.
+
+
+2. Insert/replace an object in an associative array::
+
+ struct assoc_array_edit *
+ assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object);
+
+This inserts the given object into the array. Note that the least
+significant bit of the pointer must be zero as it's used to type-mark
+pointers internally.
+
+If an object already exists for that key then it will be replaced with the
+new object and the old one will be freed automatically.
+
+The ``index_key`` argument should hold index key information and is
+passed to the methods in the ops table when they are called.
+
+This function makes no alteration to the array itself, but rather returns
+an edit script that must be applied. ``-ENOMEM`` is returned in the case of
+an out-of-memory error.
+
+The caller should lock exclusively against other modifiers of the array.
+
+
+3. Delete an object from an associative array::
+
+ struct assoc_array_edit *
+ assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+
+This deletes an object that matches the specified data from the array.
+
+The ``index_key`` argument should hold index key information and is
+passed to the methods in the ops table when they are called.
+
+This function makes no alteration to the array itself, but rather returns
+an edit script that must be applied. ``-ENOMEM`` is returned in the case of
+an out-of-memory error. ``NULL`` will be returned if the specified object is
+not found within the array.
+
+The caller should lock exclusively against other modifiers of the array.
+
+
+4. Delete all objects from an associative array::
+
+ struct assoc_array_edit *
+ assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+
+This deletes all the objects from an associative array and leaves it
+completely empty.
+
+This function makes no alteration to the array itself, but rather returns
+an edit script that must be applied. ``-ENOMEM`` is returned in the case of
+an out-of-memory error.
+
+The caller should lock exclusively against other modifiers of the array.
+
+
+5. Destroy an associative array, deleting all objects::
+
+ void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+
+This destroys the contents of the associative array and leaves it
+completely empty. It is not permitted for another thread to be traversing
+the array under the RCU read lock at the same time as this function is
+destroying it as no RCU deferral is performed on memory release -
+something that would require memory to be allocated.
+
+The caller should lock exclusively against other modifiers and accessors
+of the array.
+
+
+6. Garbage collect an associative array::
+
+ int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data);
+
+This iterates over the objects in an associative array and passes each one to
+``iterator()``. If ``iterator()`` returns ``true``, the object is kept. If it
+returns ``false``, the object will be freed. If the ``iterator()`` function
+returns ``true``, it must perform any appropriate refcount incrementing on the
+object before returning.
+
+The internal tree will be packed down if possible as part of the iteration
+to reduce the number of nodes in it.
+
+The ``iterator_data`` is passed directly to ``iterator()`` and is otherwise
+ignored by the function.
+
+The function will return ``0`` if successful and ``-ENOMEM`` if there wasn't
+enough memory.
+
+It is possible for other threads to iterate over or search the array under
+the RCU read lock whilst this function is in progress. The caller should
+lock exclusively against other modifiers of the array.
+
+
+Access Functions
+----------------
+
+There are two functions for accessing an associative array:
+
+1. Iterate over all the objects in an associative array::
+
+ int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data);
+
+This passes each object in the array to the iterator callback function.
+``iterator_data`` is private data for that function.
+
+This may be used on an array at the same time as the array is being
+modified, provided the RCU read lock is held. Under such circumstances,
+it is possible for the iteration function to see some objects twice. If
+this is a problem, then modification should be locked against. The
+iteration algorithm should not, however, miss any objects.
+
+The function will return ``0`` if no objects were in the array or else it will
+return the result of the last iterator function called. Iteration stops
+immediately if any call to the iteration function results in a non-zero
+return.
+
+
+2. Find an object in an associative array::
+
+ void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+
+This walks through the array's internal tree directly to the object
+specified by the index key..
+
+This may be used on an array at the same time as the array is being
+modified, provided the RCU read lock is held.
+
+The function will return the object if found (and set ``*_type`` to the object
+type) or will return ``NULL`` if the object was not found.
+
+
+Index Key Form
+--------------
+
+The index key can be of any form, but since the algorithms aren't told how long
+the key is, it is strongly recommended that the index key includes its length
+very early on before any variation due to the length would have an effect on
+comparisons.
+
+This will cause leaves with different length keys to scatter away from each
+other - and those with the same length keys to cluster together.
+
+It is also recommended that the index key begin with a hash of the rest of the
+key to maximise scattering throughout keyspace.
+
+The better the scattering, the wider and lower the internal tree will be.
+
+Poor scattering isn't too much of a problem as there are shortcuts and nodes
+can contain mixtures of leaves and metadata pointers.
+
+The index key is read in chunks of machine word. Each chunk is subdivided into
+one nibble (4 bits) per level, so on a 32-bit CPU this is good for 8 levels and
+on a 64-bit CPU, 16 levels. Unless the scattering is really poor, it is
+unlikely that more than one word of any particular index key will have to be
+used.
+
+
+Internal Workings
+=================
+
+The associative array data structure has an internal tree. This tree is
+constructed of two types of metadata blocks: nodes and shortcuts.
+
+A node is an array of slots. Each slot can contain one of four things:
+
+* A NULL pointer, indicating that the slot is empty.
+* A pointer to an object (a leaf).
+* A pointer to a node at the next level.
+* A pointer to a shortcut.
+
+
+Basic Internal Tree Layout
+--------------------------
+
+Ignoring shortcuts for the moment, the nodes form a multilevel tree. The index
+key space is strictly subdivided by the nodes in the tree and nodes occur on
+fixed levels. For example::
+
+ Level: 0 1 2 3
+ =============== =============== =============== ===============
+ NODE D
+ NODE B NODE C +------>+---+
+ +------>+---+ +------>+---+ | | 0 |
+ NODE A | | 0 | | | 0 | | +---+
+ +---+ | +---+ | +---+ | : :
+ | 0 | | : : | : : | +---+
+ +---+ | +---+ | +---+ | | f |
+ | 1 |---+ | 3 |---+ | 7 |---+ +---+
+ +---+ +---+ +---+
+ : : : : | 8 |---+
+ +---+ +---+ +---+ | NODE E
+ | e |---+ | f | : : +------>+---+
+ +---+ | +---+ +---+ | 0 |
+ | f | | | f | +---+
+ +---+ | +---+ : :
+ | NODE F +---+
+ +------>+---+ | f |
+ | 0 | NODE G +---+
+ +---+ +------>+---+
+ : : | | 0 |
+ +---+ | +---+
+ | 6 |---+ : :
+ +---+ +---+
+ : : | f |
+ +---+ +---+
+ | f |
+ +---+
+
+In the above example, there are 7 nodes (A-G), each with 16 slots (0-f).
+Assuming no other meta data nodes in the tree, the key space is divided
+thusly::
+
+ KEY PREFIX NODE
+ ========== ====
+ 137* D
+ 138* E
+ 13[0-69-f]* C
+ 1[0-24-f]* B
+ e6* G
+ e[0-57-f]* F
+ [02-df]* A
+
+So, for instance, keys with the following example index keys will be found in
+the appropriate nodes::
+
+ INDEX KEY PREFIX NODE
+ =============== ======= ====
+ 13694892892489 13 C
+ 13795289025897 137 D
+ 13889dde88793 138 E
+ 138bbb89003093 138 E
+ 1394879524789 12 C
+ 1458952489 1 B
+ 9431809de993ba - A
+ b4542910809cd - A
+ e5284310def98 e F
+ e68428974237 e6 G
+ e7fffcbd443 e F
+ f3842239082 - A
+
+To save memory, if a node can hold all the leaves in its portion of keyspace,
+then the node will have all those leaves in it and will not have any metadata
+pointers - even if some of those leaves would like to be in the same slot.
+
+A node can contain a heterogeneous mix of leaves and metadata pointers.
+Metadata pointers must be in the slots that match their subdivisions of key
+space. The leaves can be in any slot not occupied by a metadata pointer. It
+is guaranteed that none of the leaves in a node will match a slot occupied by a
+metadata pointer. If the metadata pointer is there, any leaf whose key matches
+the metadata key prefix must be in the subtree that the metadata pointer points
+to.
+
+In the above example list of index keys, node A will contain::
+
+ SLOT CONTENT INDEX KEY (PREFIX)
+ ==== =============== ==================
+ 1 PTR TO NODE B 1*
+ any LEAF 9431809de993ba
+ any LEAF b4542910809cd
+ e PTR TO NODE F e*
+ any LEAF f3842239082
+
+and node B::
+
+ 3 PTR TO NODE C 13*
+ any LEAF 1458952489
+
+
+Shortcuts
+---------
+
+Shortcuts are metadata records that jump over a piece of keyspace. A shortcut
+is a replacement for a series of single-occupancy nodes ascending through the
+levels. Shortcuts exist to save memory and to speed up traversal.
+
+It is possible for the root of the tree to be a shortcut - say, for example,
+the tree contains at least 17 nodes all with key prefix ``1111``. The
+insertion algorithm will insert a shortcut to skip over the ``1111`` keyspace
+in a single bound and get to the fourth level where these actually become
+different.
+
+
+Splitting And Collapsing Nodes
+------------------------------
+
+Each node has a maximum capacity of 16 leaves and metadata pointers. If the
+insertion algorithm finds that it is trying to insert a 17th object into a
+node, that node will be split such that at least two leaves that have a common
+key segment at that level end up in a separate node rooted on that slot for
+that common key segment.
+
+If the leaves in a full node and the leaf that is being inserted are
+sufficiently similar, then a shortcut will be inserted into the tree.
+
+When the number of objects in the subtree rooted at a node falls to 16 or
+fewer, then the subtree will be collapsed down to a single node - and this will
+ripple towards the root if possible.
+
+
+Non-Recursive Iteration
+-----------------------
+
+Each node and shortcut contains a back pointer to its parent and the number of
+slot in that parent that points to it. None-recursive iteration uses these to
+proceed rootwards through the tree, going to the parent node, slot N + 1 to
+make sure progress is made without the need for a stack.
+
+The backpointers, however, make simultaneous alteration and iteration tricky.
+
+
+Simultaneous Alteration And Iteration
+-------------------------------------
+
+There are a number of cases to consider:
+
+1. Simple insert/replace. This involves simply replacing a NULL or old
+ matching leaf pointer with the pointer to the new leaf after a barrier.
+ The metadata blocks don't change otherwise. An old leaf won't be freed
+ until after the RCU grace period.
+
+2. Simple delete. This involves just clearing an old matching leaf. The
+ metadata blocks don't change otherwise. The old leaf won't be freed until
+ after the RCU grace period.
+
+3. Insertion replacing part of a subtree that we haven't yet entered. This
+ may involve replacement of part of that subtree - but that won't affect
+ the iteration as we won't have reached the pointer to it yet and the
+ ancestry blocks are not replaced (the layout of those does not change).
+
+4. Insertion replacing nodes that we're actively processing. This isn't a
+ problem as we've passed the anchoring pointer and won't switch onto the
+ new layout until we follow the back pointers - at which point we've
+ already examined the leaves in the replaced node (we iterate over all the
+ leaves in a node before following any of its metadata pointers).
+
+ We might, however, re-see some leaves that have been split out into a new
+ branch that's in a slot further along than we were at.
+
+5. Insertion replacing nodes that we're processing a dependent branch of.
+ This won't affect us until we follow the back pointers. Similar to (4).
+
+6. Deletion collapsing a branch under us. This doesn't affect us because the
+ back pointers will get us back to the parent of the new node before we
+ could see the new node. The entire collapsed subtree is thrown away
+ unchanged - and will still be rooted on the same slot, so we shouldn't
+ process it a second time as we'll go back to slot + 1.
+
+.. note::
+
+ Under some circumstances, we need to simultaneously change the parent
+ pointer and the parent slot pointer on a node (say, for example, we
+ inserted another node before it and moved it up a level). We cannot do
+ this without locking against a read - so we have to replace that node too.
+
+ However, when we're changing a shortcut into a node this isn't a problem
+ as shortcuts only have one slot and so the parent slot number isn't used
+ when traversing backwards over one. This means that it's okay to change
+ the slot number first - provided suitable barriers are used to make sure
+ the parent slot number is read after the back pointer.
+
+Obsolete blocks and leaves are freed up after an RCU grace period has passed,
+so as long as anyone doing walking or iteration holds the RCU read lock, the
+old superstructure should not go away on them.
diff --git a/Documentation/atomic_ops.txt b/Documentation/core-api/atomic_ops.rst
index c9d1cacb4395..55e43f1c80de 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/core-api/atomic_ops.rst
@@ -1,36 +1,42 @@
- Semantics and Behavior of Atomic and
- Bitmask Operations
+=======================================================
+Semantics and Behavior of Atomic and Bitmask Operations
+=======================================================
- David S. Miller
+:Author: David S. Miller
- This document is intended to serve as a guide to Linux port
+This document is intended to serve as a guide to Linux port
maintainers on how to implement atomic counter, bitops, and spinlock
interfaces properly.
- The atomic_t type should be defined as a signed integer and
+Atomic Type And Operations
+==========================
+
+The atomic_t type should be defined as a signed integer and
the atomic_long_t type as a signed long integer. Also, they should
be made opaque such that any kind of cast to a normal C integer type
-will fail. Something like the following should suffice:
+will fail. Something like the following should suffice::
typedef struct { int counter; } atomic_t;
typedef struct { long counter; } atomic_long_t;
Historically, counter has been declared volatile. This is now discouraged.
-See Documentation/volatile-considered-harmful.txt for the complete rationale.
+See :ref:`Documentation/process/volatile-considered-harmful.rst
+<volatile_considered_harmful>` for the complete rationale.
local_t is very similar to atomic_t. If the counter is per CPU and only
updated by one CPU, local_t is probably more appropriate. Please see
-Documentation/local_ops.txt for the semantics of local_t.
+:ref:`Documentation/core-api/local_ops.rst <local_ops>` for the semantics of
+local_t.
The first operations to implement for atomic_t's are the initializers and
-plain reads.
+plain reads. ::
#define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) ((v)->counter = (i))
-The first macro is used in definitions, such as:
+The first macro is used in definitions, such as::
-static atomic_t my_counter = ATOMIC_INIT(1);
+ static atomic_t my_counter = ATOMIC_INIT(1);
The initializer is atomic in that the return values of the atomic operations
are guaranteed to be correct reflecting the initialized value if the
@@ -38,10 +44,10 @@ initializer is used before runtime. If the initializer is used at runtime, a
proper implicit or explicit read memory barrier is needed before reading the
value with atomic_read from another thread.
-As with all of the atomic_ interfaces, replace the leading "atomic_"
-with "atomic_long_" to operate on atomic_long_t.
+As with all of the ``atomic_`` interfaces, replace the leading ``atomic_``
+with ``atomic_long_`` to operate on atomic_long_t.
-The second interface can be used at runtime, as in:
+The second interface can be used at runtime, as in::
struct foo { atomic_t counter; };
...
@@ -59,7 +65,7 @@ been set with this operation or set with another operation. A proper implicit
or explicit memory barrier is needed before the value set with the operation
is guaranteed to be readable with atomic_read from another thread.
-Next, we have:
+Next, we have::
#define atomic_read(v) ((v)->counter)
@@ -73,36 +79,37 @@ initialization by any other thread is visible yet, so the user of the
interface must take care of that with a proper implicit or explicit memory
barrier.
-*** WARNING: atomic_read() and atomic_set() DO NOT IMPLY BARRIERS! ***
+.. warning::
-Some architectures may choose to use the volatile keyword, barriers, or inline
-assembly to guarantee some degree of immediacy for atomic_read() and
-atomic_set(). This is not uniformly guaranteed, and may change in the future,
-so all users of atomic_t should treat atomic_read() and atomic_set() as simple
-C statements that may be reordered or optimized away entirely by the compiler
-or processor, and explicitly invoke the appropriate compiler and/or memory
-barrier for each use case. Failure to do so will result in code that may
-suddenly break when used with different architectures or compiler
-optimizations, or even changes in unrelated code which changes how the
-compiler optimizes the section accessing atomic_t variables.
+ ``atomic_read()`` and ``atomic_set()`` DO NOT IMPLY BARRIERS!
-*** YOU HAVE BEEN WARNED! ***
+ Some architectures may choose to use the volatile keyword, barriers, or
+ inline assembly to guarantee some degree of immediacy for atomic_read()
+ and atomic_set(). This is not uniformly guaranteed, and may change in
+ the future, so all users of atomic_t should treat atomic_read() and
+ atomic_set() as simple C statements that may be reordered or optimized
+ away entirely by the compiler or processor, and explicitly invoke the
+ appropriate compiler and/or memory barrier for each use case. Failure
+ to do so will result in code that may suddenly break when used with
+ different architectures or compiler optimizations, or even changes in
+ unrelated code which changes how the compiler optimizes the section
+ accessing atomic_t variables.
Properly aligned pointers, longs, ints, and chars (and unsigned
equivalents) may be atomically loaded from and stored to in the same
-sense as described for atomic_read() and atomic_set(). The ACCESS_ONCE()
-macro should be used to prevent the compiler from using optimizations
-that might otherwise optimize accesses out of existence on the one hand,
-or that might create unsolicited accesses on the other.
+sense as described for atomic_read() and atomic_set(). The READ_ONCE()
+and WRITE_ONCE() macros should be used to prevent the compiler from using
+optimizations that might otherwise optimize accesses out of existence on
+the one hand, or that might create unsolicited accesses on the other.
-For example consider the following code:
+For example consider the following code::
while (a > 0)
do_something();
If the compiler can prove that do_something() does not store to the
variable a, then the compiler is within its rights transforming this to
-the following:
+the following::
tmp = a;
if (a > 0)
@@ -110,14 +117,14 @@ the following:
do_something();
If you don't want the compiler to do this (and you probably don't), then
-you should use something like the following:
+you should use something like the following::
- while (ACCESS_ONCE(a) < 0)
+ while (READ_ONCE(a) < 0)
do_something();
Alternatively, you could place a barrier() call in the loop.
-For another example, consider the following code:
+For another example, consider the following code::
tmp_a = a;
do_something_with(tmp_a);
@@ -125,7 +132,7 @@ For another example, consider the following code:
If the compiler can prove that do_something_with() does not store to the
variable a, then the compiler is within its rights to manufacture an
-additional load as follows:
+additional load as follows::
tmp_a = a;
do_something_with(tmp_a);
@@ -139,15 +146,15 @@ The compiler would be likely to manufacture this additional load if
do_something_with() was an inline function that made very heavy use
of registers: reloading from variable a could save a flush to the
stack and later reload. To prevent the compiler from attacking your
-code in this manner, write the following:
+code in this manner, write the following::
- tmp_a = ACCESS_ONCE(a);
+ tmp_a = READ_ONCE(a);
do_something_with(tmp_a);
do_something_else_with(tmp_a);
For a final example, consider the following code, assuming that the
variable a is set at boot time before the second CPU is brought online
-and never changed later, so that memory barriers are not needed:
+and never changed later, so that memory barriers are not needed::
if (a)
b = 9;
@@ -155,7 +162,7 @@ and never changed later, so that memory barriers are not needed:
b = 42;
The compiler is within its rights to manufacture an additional store
-by transforming the above code into the following:
+by transforming the above code into the following::
b = 42;
if (a)
@@ -163,20 +170,22 @@ by transforming the above code into the following:
This could come as a fatal surprise to other code running concurrently
that expected b to never have the value 42 if a was zero. To prevent
-the compiler from doing this, write something like:
+the compiler from doing this, write something like::
if (a)
- ACCESS_ONCE(b) = 9;
+ WRITE_ONCE(b, 9);
else
- ACCESS_ONCE(b) = 42;
+ WRITE_ONCE(b, 42);
Don't even -think- about doing this without proper use of memory barriers,
locks, or atomic operations if variable a can change at runtime!
-*** WARNING: ACCESS_ONCE() DOES NOT IMPLY A BARRIER! ***
+.. warning::
+
+ ``READ_ONCE()`` OR ``WRITE_ONCE()`` DO NOT IMPLY A BARRIER!
Now, we move onto the atomic operation interfaces typically implemented with
-the help of assembly code.
+the help of assembly code. ::
void atomic_add(int i, atomic_t *v);
void atomic_sub(int i, atomic_t *v);
@@ -192,7 +201,7 @@ One very important aspect of these two routines is that they DO NOT
require any explicit memory barriers. They need only perform the
atomic_t counter update in an SMP safe manner.
-Next, we have:
+Next, we have::
int atomic_inc_return(atomic_t *v);
int atomic_dec_return(atomic_t *v);
@@ -214,7 +223,7 @@ If the atomic instructions used in an implementation provide explicit
memory barrier semantics which satisfy the above requirements, that is
fine as well.
-Let's move on:
+Let's move on::
int atomic_add_return(int i, atomic_t *v);
int atomic_sub_return(int i, atomic_t *v);
@@ -224,7 +233,7 @@ explicit counter adjustment is given instead of the implicit "1".
This means that like atomic_{inc,dec}_return(), the memory barrier
semantics are required.
-Next:
+Next::
int atomic_inc_and_test(atomic_t *v);
int atomic_dec_and_test(atomic_t *v);
@@ -234,13 +243,13 @@ given atomic counter. They return a boolean indicating whether the
resulting counter value was zero or not.
Again, these primitives provide explicit memory barrier semantics around
-the atomic operation.
+the atomic operation::
int atomic_sub_and_test(int i, atomic_t *v);
This is identical to atomic_dec_and_test() except that an explicit
decrement is given instead of the implicit "1". This primitive must
-provide explicit memory barrier semantics around the operation.
+provide explicit memory barrier semantics around the operation::
int atomic_add_negative(int i, atomic_t *v);
@@ -249,7 +258,7 @@ is return which indicates whether the resulting counter value is negative.
This primitive must provide explicit memory barrier semantics around
the operation.
-Then:
+Then::
int atomic_xchg(atomic_t *v, int new);
@@ -257,14 +266,14 @@ This performs an atomic exchange operation on the atomic variable v, setting
the given new value. It returns the old value that the atomic variable v had
just before the operation.
-atomic_xchg must provide explicit memory barriers around the operation.
+atomic_xchg must provide explicit memory barriers around the operation. ::
int atomic_cmpxchg(atomic_t *v, int old, int new);
This performs an atomic compare exchange operation on the atomic value v,
with the given old and new values. Like all atomic_xxx operations,
atomic_cmpxchg will only satisfy its atomicity semantics as long as all
-other accesses of *v are performed through atomic_xxx operations.
+other accesses of \*v are performed through atomic_xxx operations.
atomic_cmpxchg must provide explicit memory barriers around the operation,
although if the comparison fails then no memory ordering guarantees are
@@ -273,7 +282,7 @@ required.
The semantics for atomic_cmpxchg are the same as those defined for 'cas'
below.
-Finally:
+Finally::
int atomic_add_unless(atomic_t *v, int a, int u);
@@ -289,12 +298,12 @@ atomic_inc_not_zero, equivalent to atomic_add_unless(v, 1, 0)
If a caller requires memory barrier semantics around an atomic_t
operation which does not return a value, a set of interfaces are
-defined which accomplish this:
+defined which accomplish this::
void smp_mb__before_atomic(void);
void smp_mb__after_atomic(void);
-For example, smp_mb__before_atomic() can be used like so:
+For example, smp_mb__before_atomic() can be used like so::
obj->dead = 1;
smp_mb__before_atomic();
@@ -315,67 +324,69 @@ atomic_t implementation above can have disastrous results. Here is
an example, which follows a pattern occurring frequently in the Linux
kernel. It is the use of atomic counters to implement reference
counting, and it works such that once the counter falls to zero it can
-be guaranteed that no other entity can be accessing the object:
-
-static void obj_list_add(struct obj *obj, struct list_head *head)
-{
- obj->active = 1;
- list_add(&obj->list, head);
-}
-
-static void obj_list_del(struct obj *obj)
-{
- list_del(&obj->list);
- obj->active = 0;
-}
-
-static void obj_destroy(struct obj *obj)
-{
- BUG_ON(obj->active);
- kfree(obj);
-}
-
-struct obj *obj_list_peek(struct list_head *head)
-{
- if (!list_empty(head)) {
- struct obj *obj;
+be guaranteed that no other entity can be accessing the object::
+
+ static void obj_list_add(struct obj *obj, struct list_head *head)
+ {
+ obj->active = 1;
+ list_add(&obj->list, head);
+ }
+
+ static void obj_list_del(struct obj *obj)
+ {
+ list_del(&obj->list);
+ obj->active = 0;
+ }
- obj = list_entry(head->next, struct obj, list);
- atomic_inc(&obj->refcnt);
- return obj;
+ static void obj_destroy(struct obj *obj)
+ {
+ BUG_ON(obj->active);
+ kfree(obj);
}
- return NULL;
-}
-void obj_poke(void)
-{
- struct obj *obj;
+ struct obj *obj_list_peek(struct list_head *head)
+ {
+ if (!list_empty(head)) {
+ struct obj *obj;
+
+ obj = list_entry(head->next, struct obj, list);
+ atomic_inc(&obj->refcnt);
+ return obj;
+ }
+ return NULL;
+ }
+
+ void obj_poke(void)
+ {
+ struct obj *obj;
+
+ spin_lock(&global_list_lock);
+ obj = obj_list_peek(&global_list);
+ spin_unlock(&global_list_lock);
- spin_lock(&global_list_lock);
- obj = obj_list_peek(&global_list);
- spin_unlock(&global_list_lock);
+ if (obj) {
+ obj->ops->poke(obj);
+ if (atomic_dec_and_test(&obj->refcnt))
+ obj_destroy(obj);
+ }
+ }
+
+ void obj_timeout(struct obj *obj)
+ {
+ spin_lock(&global_list_lock);
+ obj_list_del(obj);
+ spin_unlock(&global_list_lock);
- if (obj) {
- obj->ops->poke(obj);
if (atomic_dec_and_test(&obj->refcnt))
obj_destroy(obj);
}
-}
-
-void obj_timeout(struct obj *obj)
-{
- spin_lock(&global_list_lock);
- obj_list_del(obj);
- spin_unlock(&global_list_lock);
- if (atomic_dec_and_test(&obj->refcnt))
- obj_destroy(obj);
-}
+.. note::
-(This is a simplification of the ARP queue management in the
- generic neighbour discover code of the networking. Olaf Kirch
- found a bug wrt. memory barriers in kfree_skb() that exposed
- the atomic_t memory barrier requirements quite clearly.)
+ This is a simplification of the ARP queue management in the generic
+ neighbour discover code of the networking. Olaf Kirch found a bug wrt.
+ memory barriers in kfree_skb() that exposed the atomic_t memory barrier
+ requirements quite clearly.
Given the above scheme, it must be the case that the obj->active
update done by the obj list deletion be visible to other processors
@@ -383,7 +394,7 @@ before the atomic counter decrement is performed.
Otherwise, the counter could fall to zero, yet obj->active would still
be set, thus triggering the assertion in obj_destroy(). The error
-sequence looks like this:
+sequence looks like this::
cpu 0 cpu 1
obj_poke() obj_timeout()
@@ -420,6 +431,10 @@ same scheme.
Another note is that the atomic_t operations returning values are
extremely slow on an old 386.
+
+Atomic Bitmask
+==============
+
We will now cover the atomic bitmask operations. You will find that
their SMP and memory barrier semantics are similar in shape and scope
to the atomic_t ops above.
@@ -427,7 +442,7 @@ to the atomic_t ops above.
Native atomic bit operations are defined to operate on objects aligned
to the size of an "unsigned long" C data type, and are least of that
size. The endianness of the bits within each "unsigned long" are the
-native endianness of the cpu.
+native endianness of the cpu. ::
void set_bit(unsigned long nr, volatile unsigned long *addr);
void clear_bit(unsigned long nr, volatile unsigned long *addr);
@@ -437,7 +452,7 @@ These routines set, clear, and change, respectively, the bit number
indicated by "nr" on the bit mask pointed to by "ADDR".
They must execute atomically, yet there are no implicit memory barrier
-semantics required of these interfaces.
+semantics required of these interfaces. ::
int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
@@ -466,7 +481,7 @@ must provide explicit memory barrier semantics around their execution.
All memory operations before the atomic bit operation call must be
made visible globally before the atomic bit operation is made visible.
Likewise, the atomic bit operation must be visible globally before any
-subsequent memory operation is made visible. For example:
+subsequent memory operation is made visible. For example::
obj->dead = 1;
if (test_and_set_bit(0, &obj->flags))
@@ -479,7 +494,7 @@ done by test_and_set_bit() becomes visible. Likewise, the atomic
memory operation done by test_and_set_bit() must become visible before
"obj->killed = 1;" is visible.
-Finally there is the basic operation:
+Finally there is the basic operation::
int test_bit(unsigned long nr, __const__ volatile unsigned long *addr);
@@ -488,13 +503,13 @@ pointed to by "addr".
If explicit memory barriers are required around {set,clear}_bit() (which do
not return a value, and thus does not need to provide memory barrier
-semantics), two interfaces are provided:
+semantics), two interfaces are provided::
void smp_mb__before_atomic(void);
void smp_mb__after_atomic(void);
They are used as follows, and are akin to their atomic_t operation
-brothers:
+brothers::
/* All memory operations before this call will
* be globally visible before the clear_bit().
@@ -511,7 +526,7 @@ There are two special bitops with lock barrier semantics (acquire/release,
same as spinlocks). These operate in the same way as their non-_lock/unlock
postfixed variants, except that they are to provide acquire/release semantics,
respectively. This means they can be used for bit_spin_trylock and
-bit_spin_unlock type operations without specifying any more barriers.
+bit_spin_unlock type operations without specifying any more barriers. ::
int test_and_set_bit_lock(unsigned long nr, unsigned long *addr);
void clear_bit_unlock(unsigned long nr, unsigned long *addr);
@@ -526,7 +541,7 @@ provided. They are used in contexts where some other higher-level SMP
locking scheme is being used to protect the bitmask, and thus less
expensive non-atomic operations may be used in the implementation.
They have names similar to the above bitmask operation interfaces,
-except that two underscores are prefixed to the interface name.
+except that two underscores are prefixed to the interface name. ::
void __set_bit(unsigned long nr, volatile unsigned long *addr);
void __clear_bit(unsigned long nr, volatile unsigned long *addr);
@@ -542,9 +557,11 @@ The routines xchg() and cmpxchg() must provide the same exact
memory-barrier semantics as the atomic and bit operations returning
values.
-Note: If someone wants to use xchg(), cmpxchg() and their variants,
-linux/atomic.h should be included rather than asm/cmpxchg.h, unless
-the code is in arch/* and can take care of itself.
+.. note::
+
+ If someone wants to use xchg(), cmpxchg() and their variants,
+ linux/atomic.h should be included rather than asm/cmpxchg.h, unless the
+ code is in arch/* and can take care of itself.
Spinlocks and rwlocks have memory barrier expectations as well.
The rule to follow is simple:
@@ -558,7 +575,7 @@ The rule to follow is simple:
Which finally brings us to _atomic_dec_and_lock(). There is an
architecture-neutral version implemented in lib/dec_and_lock.c,
-but most platforms will wish to optimize this in assembler.
+but most platforms will wish to optimize this in assembler. ::
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
@@ -573,7 +590,7 @@ sure the spinlock operation is globally visible before any
subsequent memory operation.
We can demonstrate this operation more clearly if we define
-an abstract atomic operation:
+an abstract atomic operation::
long cas(long *mem, long old, long new);
@@ -584,48 +601,48 @@ an abstract atomic operation:
3) Regardless, the current value at "mem" is returned.
As an example usage, here is what an atomic counter update
-might look like:
+might look like::
-void example_atomic_inc(long *counter)
-{
- long old, new, ret;
+ void example_atomic_inc(long *counter)
+ {
+ long old, new, ret;
- while (1) {
- old = *counter;
- new = old + 1;
+ while (1) {
+ old = *counter;
+ new = old + 1;
- ret = cas(counter, old, new);
- if (ret == old)
- break;
- }
-}
-
-Let's use cas() in order to build a pseudo-C atomic_dec_and_lock():
-
-int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
-{
- long old, new, ret;
- int went_to_zero;
-
- went_to_zero = 0;
- while (1) {
- old = atomic_read(atomic);
- new = old - 1;
- if (new == 0) {
- went_to_zero = 1;
- spin_lock(lock);
- }
- ret = cas(atomic, old, new);
- if (ret == old)
- break;
- if (went_to_zero) {
- spin_unlock(lock);
- went_to_zero = 0;
+ ret = cas(counter, old, new);
+ if (ret == old)
+ break;
}
}
- return went_to_zero;
-}
+Let's use cas() in order to build a pseudo-C atomic_dec_and_lock()::
+
+ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+ {
+ long old, new, ret;
+ int went_to_zero;
+
+ went_to_zero = 0;
+ while (1) {
+ old = atomic_read(atomic);
+ new = old - 1;
+ if (new == 0) {
+ went_to_zero = 1;
+ spin_lock(lock);
+ }
+ ret = cas(atomic, old, new);
+ if (ret == old)
+ break;
+ if (went_to_zero) {
+ spin_unlock(lock);
+ went_to_zero = 0;
+ }
+ }
+
+ return went_to_zero;
+ }
Now, as far as memory barriers go, as long as spin_lock()
strictly orders all subsequent memory operations (including
@@ -635,6 +652,7 @@ Said another way, _atomic_dec_and_lock() must guarantee that
a counter dropping to zero is never made visible before the
spinlock being acquired.
-Note that this also means that for the case where the counter
-is not dropping to zero, there are no memory ordering
-requirements.
+.. note::
+
+ Note that this also means that for the case where the counter is not
+ dropping to zero, there are no memory ordering requirements.
diff --git a/Documentation/core-api/conf.py b/Documentation/core-api/conf.py
new file mode 100644
index 000000000000..db1f7659f3da
--- /dev/null
+++ b/Documentation/core-api/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "Core-API Documentation"
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'core-api.tex', project,
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/core-api/debug-objects.rst b/Documentation/core-api/debug-objects.rst
new file mode 100644
index 000000000000..ac926fd55a64
--- /dev/null
+++ b/Documentation/core-api/debug-objects.rst
@@ -0,0 +1,310 @@
+============================================
+The object-lifetime debugging infrastructure
+============================================
+
+:Author: Thomas Gleixner
+
+Introduction
+============
+
+debugobjects is a generic infrastructure to track the life time of
+kernel objects and validate the operations on those.
+
+debugobjects is useful to check for the following error patterns:
+
+- Activation of uninitialized objects
+
+- Initialization of active objects
+
+- Usage of freed/destroyed objects
+
+debugobjects is not changing the data structure of the real object so it
+can be compiled in with a minimal runtime impact and enabled on demand
+with a kernel command line option.
+
+Howto use debugobjects
+======================
+
+A kernel subsystem needs to provide a data structure which describes the
+object type and add calls into the debug code at appropriate places. The
+data structure to describe the object type needs at minimum the name of
+the object type. Optional functions can and should be provided to fixup
+detected problems so the kernel can continue to work and the debug
+information can be retrieved from a live system instead of hard core
+debugging with serial consoles and stack trace transcripts from the
+monitor.
+
+The debug calls provided by debugobjects are:
+
+- debug_object_init
+
+- debug_object_init_on_stack
+
+- debug_object_activate
+
+- debug_object_deactivate
+
+- debug_object_destroy
+
+- debug_object_free
+
+- debug_object_assert_init
+
+Each of these functions takes the address of the real object and a
+pointer to the object type specific debug description structure.
+
+Each detected error is reported in the statistics and a limited number
+of errors are printk'ed including a full stack trace.
+
+The statistics are available via /sys/kernel/debug/debug_objects/stats.
+They provide information about the number of warnings and the number of
+successful fixups along with information about the usage of the internal
+tracking objects and the state of the internal tracking objects pool.
+
+Debug functions
+===============
+
+.. kernel-doc:: lib/debugobjects.c
+ :functions: debug_object_init
+
+This function is called whenever the initialization function of a real
+object is called.
+
+When the real object is already tracked by debugobjects it is checked,
+whether the object can be initialized. Initializing is not allowed for
+active and destroyed objects. When debugobjects detects an error, then
+it calls the fixup_init function of the object type description
+structure if provided by the caller. The fixup function can correct the
+problem before the real initialization of the object happens. E.g. it
+can deactivate an active object in order to prevent damage to the
+subsystem.
+
+When the real object is not yet tracked by debugobjects, debugobjects
+allocates a tracker object for the real object and sets the tracker
+object state to ODEBUG_STATE_INIT. It verifies that the object is not
+on the callers stack. If it is on the callers stack then a limited
+number of warnings including a full stack trace is printk'ed. The
+calling code must use debug_object_init_on_stack() and remove the
+object before leaving the function which allocated it. See next section.
+
+.. kernel-doc:: lib/debugobjects.c
+ :functions: debug_object_init_on_stack
+
+This function is called whenever the initialization function of a real
+object which resides on the stack is called.
+
+When the real object is already tracked by debugobjects it is checked,
+whether the object can be initialized. Initializing is not allowed for
+active and destroyed objects. When debugobjects detects an error, then
+it calls the fixup_init function of the object type description
+structure if provided by the caller. The fixup function can correct the
+problem before the real initialization of the object happens. E.g. it
+can deactivate an active object in order to prevent damage to the
+subsystem.
+
+When the real object is not yet tracked by debugobjects debugobjects
+allocates a tracker object for the real object and sets the tracker
+object state to ODEBUG_STATE_INIT. It verifies that the object is on
+the callers stack.
+
+An object which is on the stack must be removed from the tracker by
+calling debug_object_free() before the function which allocates the
+object returns. Otherwise we keep track of stale objects.
+
+.. kernel-doc:: lib/debugobjects.c
+ :functions: debug_object_activate
+
+This function is called whenever the activation function of a real
+object is called.
+
+When the real object is already tracked by debugobjects it is checked,
+whether the object can be activated. Activating is not allowed for
+active and destroyed objects. When debugobjects detects an error, then
+it calls the fixup_activate function of the object type description
+structure if provided by the caller. The fixup function can correct the
+problem before the real activation of the object happens. E.g. it can
+deactivate an active object in order to prevent damage to the subsystem.
+
+When the real object is not yet tracked by debugobjects then the
+fixup_activate function is called if available. This is necessary to
+allow the legitimate activation of statically allocated and initialized
+objects. The fixup function checks whether the object is valid and calls
+the debug_objects_init() function to initialize the tracking of this
+object.
+
+When the activation is legitimate, then the state of the associated
+tracker object is set to ODEBUG_STATE_ACTIVE.
+
+
+.. kernel-doc:: lib/debugobjects.c
+ :functions: debug_object_deactivate
+
+This function is called whenever the deactivation function of a real
+object is called.
+
+When the real object is tracked by debugobjects it is checked, whether
+the object can be deactivated. Deactivating is not allowed for untracked
+or destroyed objects.
+
+When the deactivation is legitimate, then the state of the associated
+tracker object is set to ODEBUG_STATE_INACTIVE.
+
+.. kernel-doc:: lib/debugobjects.c
+ :functions: debug_object_destroy
+
+This function is called to mark an object destroyed. This is useful to
+prevent the usage of invalid objects, which are still available in
+memory: either statically allocated objects or objects which are freed
+later.
+
+When the real object is tracked by debugobjects it is checked, whether
+the object can be destroyed. Destruction is not allowed for active and
+destroyed objects. When debugobjects detects an error, then it calls the
+fixup_destroy function of the object type description structure if
+provided by the caller. The fixup function can correct the problem
+before the real destruction of the object happens. E.g. it can
+deactivate an active object in order to prevent damage to the subsystem.
+
+When the destruction is legitimate, then the state of the associated
+tracker object is set to ODEBUG_STATE_DESTROYED.
+
+.. kernel-doc:: lib/debugobjects.c
+ :functions: debug_object_free
+
+This function is called before an object is freed.
+
+When the real object is tracked by debugobjects it is checked, whether
+the object can be freed. Free is not allowed for active objects. When
+debugobjects detects an error, then it calls the fixup_free function of
+the object type description structure if provided by the caller. The
+fixup function can correct the problem before the real free of the
+object happens. E.g. it can deactivate an active object in order to
+prevent damage to the subsystem.
+
+Note that debug_object_free removes the object from the tracker. Later
+usage of the object is detected by the other debug checks.
+
+
+.. kernel-doc:: lib/debugobjects.c
+ :functions: debug_object_assert_init
+
+This function is called to assert that an object has been initialized.
+
+When the real object is not tracked by debugobjects, it calls
+fixup_assert_init of the object type description structure provided by
+the caller, with the hardcoded object state ODEBUG_NOT_AVAILABLE. The
+fixup function can correct the problem by calling debug_object_init
+and other specific initializing functions.
+
+When the real object is already tracked by debugobjects it is ignored.
+
+Fixup functions
+===============
+
+Debug object type description structure
+---------------------------------------
+
+.. kernel-doc:: include/linux/debugobjects.h
+ :internal:
+
+fixup_init
+-----------
+
+This function is called from the debug code whenever a problem in
+debug_object_init is detected. The function takes the address of the
+object and the state which is currently recorded in the tracker.
+
+Called from debug_object_init when the object state is:
+
+- ODEBUG_STATE_ACTIVE
+
+The function returns true when the fixup was successful, otherwise
+false. The return value is used to update the statistics.
+
+Note, that the function needs to call the debug_object_init() function
+again, after the damage has been repaired in order to keep the state
+consistent.
+
+fixup_activate
+---------------
+
+This function is called from the debug code whenever a problem in
+debug_object_activate is detected.
+
+Called from debug_object_activate when the object state is:
+
+- ODEBUG_STATE_NOTAVAILABLE
+
+- ODEBUG_STATE_ACTIVE
+
+The function returns true when the fixup was successful, otherwise
+false. The return value is used to update the statistics.
+
+Note that the function needs to call the debug_object_activate()
+function again after the damage has been repaired in order to keep the
+state consistent.
+
+The activation of statically initialized objects is a special case. When
+debug_object_activate() has no tracked object for this object address
+then fixup_activate() is called with object state
+ODEBUG_STATE_NOTAVAILABLE. The fixup function needs to check whether
+this is a legitimate case of a statically initialized object or not. In
+case it is it calls debug_object_init() and debug_object_activate()
+to make the object known to the tracker and marked active. In this case
+the function should return false because this is not a real fixup.
+
+fixup_destroy
+--------------
+
+This function is called from the debug code whenever a problem in
+debug_object_destroy is detected.
+
+Called from debug_object_destroy when the object state is:
+
+- ODEBUG_STATE_ACTIVE
+
+The function returns true when the fixup was successful, otherwise
+false. The return value is used to update the statistics.
+
+fixup_free
+-----------
+
+This function is called from the debug code whenever a problem in
+debug_object_free is detected. Further it can be called from the debug
+checks in kfree/vfree, when an active object is detected from the
+debug_check_no_obj_freed() sanity checks.
+
+Called from debug_object_free() or debug_check_no_obj_freed() when
+the object state is:
+
+- ODEBUG_STATE_ACTIVE
+
+The function returns true when the fixup was successful, otherwise
+false. The return value is used to update the statistics.
+
+fixup_assert_init
+-------------------
+
+This function is called from the debug code whenever a problem in
+debug_object_assert_init is detected.
+
+Called from debug_object_assert_init() with a hardcoded state
+ODEBUG_STATE_NOTAVAILABLE when the object is not found in the debug
+bucket.
+
+The function returns true when the fixup was successful, otherwise
+false. The return value is used to update the statistics.
+
+Note, this function should make sure debug_object_init() is called
+before returning.
+
+The handling of statically initialized objects is a special case. The
+fixup function should check if this is a legitimate case of a statically
+initialized object or not. In this case only debug_object_init()
+should be called to make the object known to the tracker. Then the
+function should return false because this is not a real fixup.
+
+Known Bugs And Assumptions
+==========================
+
+None (knock on wood).
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
new file mode 100644
index 000000000000..2872ca1a52f1
--- /dev/null
+++ b/Documentation/core-api/index.rst
@@ -0,0 +1,33 @@
+======================
+Core API Documentation
+======================
+
+This is the beginning of a manual for core kernel APIs. The conversion
+(and writing!) of documents for this manual is much appreciated!
+
+Core utilities
+==============
+
+.. toctree::
+ :maxdepth: 1
+
+ assoc_array
+ atomic_ops
+ local_ops
+ workqueue
+
+Interfaces for kernel debugging
+===============================
+
+.. toctree::
+ :maxdepth: 1
+
+ debug-objects
+ tracepoint
+
+.. only:: subproject
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/core-api/local_ops.rst b/Documentation/core-api/local_ops.rst
new file mode 100644
index 000000000000..1062ddba62c7
--- /dev/null
+++ b/Documentation/core-api/local_ops.rst
@@ -0,0 +1,206 @@
+
+.. _local_ops:
+
+=================================================
+Semantics and Behavior of Local Atomic Operations
+=================================================
+
+:Author: Mathieu Desnoyers
+
+
+This document explains the purpose of the local atomic operations, how
+to implement them for any given architecture and shows how they can be used
+properly. It also stresses on the precautions that must be taken when reading
+those local variables across CPUs when the order of memory writes matters.
+
+.. note::
+
+ Note that ``local_t`` based operations are not recommended for general
+ kernel use. Please use the ``this_cpu`` operations instead unless there is
+ really a special purpose. Most uses of ``local_t`` in the kernel have been
+ replaced by ``this_cpu`` operations. ``this_cpu`` operations combine the
+ relocation with the ``local_t`` like semantics in a single instruction and
+ yield more compact and faster executing code.
+
+
+Purpose of local atomic operations
+==================================
+
+Local atomic operations are meant to provide fast and highly reentrant per CPU
+counters. They minimize the performance cost of standard atomic operations by
+removing the LOCK prefix and memory barriers normally required to synchronize
+across CPUs.
+
+Having fast per CPU atomic counters is interesting in many cases: it does not
+require disabling interrupts to protect from interrupt handlers and it permits
+coherent counters in NMI handlers. It is especially useful for tracing purposes
+and for various performance monitoring counters.
+
+Local atomic operations only guarantee variable modification atomicity wrt the
+CPU which owns the data. Therefore, care must taken to make sure that only one
+CPU writes to the ``local_t`` data. This is done by using per cpu data and
+making sure that we modify it from within a preemption safe context. It is
+however permitted to read ``local_t`` data from any CPU: it will then appear to
+be written out of order wrt other memory writes by the owner CPU.
+
+
+Implementation for a given architecture
+=======================================
+
+It can be done by slightly modifying the standard atomic operations: only
+their UP variant must be kept. It typically means removing LOCK prefix (on
+i386 and x86_64) and any SMP synchronization barrier. If the architecture does
+not have a different behavior between SMP and UP, including
+``asm-generic/local.h`` in your architecture's ``local.h`` is sufficient.
+
+The ``local_t`` type is defined as an opaque ``signed long`` by embedding an
+``atomic_long_t`` inside a structure. This is made so a cast from this type to
+a ``long`` fails. The definition looks like::
+
+ typedef struct { atomic_long_t a; } local_t;
+
+
+Rules to follow when using local atomic operations
+==================================================
+
+* Variables touched by local ops must be per cpu variables.
+* *Only* the CPU owner of these variables must write to them.
+* This CPU can use local ops from any context (process, irq, softirq, nmi, ...)
+ to update its ``local_t`` variables.
+* Preemption (or interrupts) must be disabled when using local ops in
+ process context to make sure the process won't be migrated to a
+ different CPU between getting the per-cpu variable and doing the
+ actual local op.
+* When using local ops in interrupt context, no special care must be
+ taken on a mainline kernel, since they will run on the local CPU with
+ preemption already disabled. I suggest, however, to explicitly
+ disable preemption anyway to make sure it will still work correctly on
+ -rt kernels.
+* Reading the local cpu variable will provide the current copy of the
+ variable.
+* Reads of these variables can be done from any CPU, because updates to
+ "``long``", aligned, variables are always atomic. Since no memory
+ synchronization is done by the writer CPU, an outdated copy of the
+ variable can be read when reading some *other* cpu's variables.
+
+
+How to use local atomic operations
+==================================
+
+::
+
+ #include <linux/percpu.h>
+ #include <asm/local.h>
+
+ static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
+
+
+Counting
+========
+
+Counting is done on all the bits of a signed long.
+
+In preemptible context, use ``get_cpu_var()`` and ``put_cpu_var()`` around
+local atomic operations: it makes sure that preemption is disabled around write
+access to the per cpu variable. For instance::
+
+ local_inc(&get_cpu_var(counters));
+ put_cpu_var(counters);
+
+If you are already in a preemption-safe context, you can use
+``this_cpu_ptr()`` instead::
+
+ local_inc(this_cpu_ptr(&counters));
+
+
+
+Reading the counters
+====================
+
+Those local counters can be read from foreign CPUs to sum the count. Note that
+the data seen by local_read across CPUs must be considered to be out of order
+relatively to other memory writes happening on the CPU that owns the data::
+
+ long sum = 0;
+ for_each_online_cpu(cpu)
+ sum += local_read(&per_cpu(counters, cpu));
+
+If you want to use a remote local_read to synchronize access to a resource
+between CPUs, explicit ``smp_wmb()`` and ``smp_rmb()`` memory barriers must be used
+respectively on the writer and the reader CPUs. It would be the case if you use
+the ``local_t`` variable as a counter of bytes written in a buffer: there should
+be a ``smp_wmb()`` between the buffer write and the counter increment and also a
+``smp_rmb()`` between the counter read and the buffer read.
+
+
+Here is a sample module which implements a basic per cpu counter using
+``local.h``::
+
+ /* test-local.c
+ *
+ * Sample module for local.h usage.
+ */
+
+
+ #include <asm/local.h>
+ #include <linux/module.h>
+ #include <linux/timer.h>
+
+ static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
+
+ static struct timer_list test_timer;
+
+ /* IPI called on each CPU. */
+ static void test_each(void *info)
+ {
+ /* Increment the counter from a non preemptible context */
+ printk("Increment on cpu %d\n", smp_processor_id());
+ local_inc(this_cpu_ptr(&counters));
+
+ /* This is what incrementing the variable would look like within a
+ * preemptible context (it disables preemption) :
+ *
+ * local_inc(&get_cpu_var(counters));
+ * put_cpu_var(counters);
+ */
+ }
+
+ static void do_test_timer(unsigned long data)
+ {
+ int cpu;
+
+ /* Increment the counters */
+ on_each_cpu(test_each, NULL, 1);
+ /* Read all the counters */
+ printk("Counters read from CPU %d\n", smp_processor_id());
+ for_each_online_cpu(cpu) {
+ printk("Read : CPU %d, count %ld\n", cpu,
+ local_read(&per_cpu(counters, cpu)));
+ }
+ del_timer(&test_timer);
+ test_timer.expires = jiffies + 1000;
+ add_timer(&test_timer);
+ }
+
+ static int __init test_init(void)
+ {
+ /* initialize the timer that will increment the counter */
+ init_timer(&test_timer);
+ test_timer.function = do_test_timer;
+ test_timer.expires = jiffies + 1;
+ add_timer(&test_timer);
+
+ return 0;
+ }
+
+ static void __exit test_exit(void)
+ {
+ del_timer_sync(&test_timer);
+ }
+
+ module_init(test_init);
+ module_exit(test_exit);
+
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Mathieu Desnoyers");
+ MODULE_DESCRIPTION("Local Atomic Ops");
diff --git a/Documentation/core-api/tracepoint.rst b/Documentation/core-api/tracepoint.rst
new file mode 100644
index 000000000000..6b44bec0de43
--- /dev/null
+++ b/Documentation/core-api/tracepoint.rst
@@ -0,0 +1,55 @@
+===============================
+The Linux Kernel Tracepoint API
+===============================
+
+:Author: Jason Baron
+:Author: William Cohen
+
+Introduction
+============
+
+Tracepoints are static probe points that are located in strategic points
+throughout the kernel. 'Probes' register/unregister with tracepoints via
+a callback mechanism. The 'probes' are strictly typed functions that are
+passed a unique set of parameters defined by each tracepoint.
+
+From this simple callback mechanism, 'probes' can be used to profile,
+debug, and understand kernel behavior. There are a number of tools that
+provide a framework for using 'probes'. These tools include Systemtap,
+ftrace, and LTTng.
+
+Tracepoints are defined in a number of header files via various macros.
+Thus, the purpose of this document is to provide a clear accounting of
+the available tracepoints. The intention is to understand not only what
+tracepoints are available but also to understand where future
+tracepoints might be added.
+
+The API presented has functions of the form:
+``trace_tracepointname(function parameters)``. These are the tracepoints
+callbacks that are found throughout the code. Registering and
+unregistering probes with these callback sites is covered in the
+``Documentation/trace/*`` directory.
+
+IRQ
+===
+
+.. kernel-doc:: include/trace/events/irq.h
+ :internal:
+
+SIGNAL
+======
+
+.. kernel-doc:: include/trace/events/signal.h
+ :internal:
+
+Block IO
+========
+
+.. kernel-doc:: include/trace/events/block.h
+ :internal:
+
+Workqueue
+=========
+
+.. kernel-doc:: include/trace/events/workqueue.h
+ :internal:
diff --git a/Documentation/workqueue.txt b/Documentation/core-api/workqueue.rst
index c49e3178178d..ffdec94fbca1 100644
--- a/Documentation/workqueue.txt
+++ b/Documentation/core-api/workqueue.rst
@@ -1,21 +1,14 @@
-
+====================================
Concurrency Managed Workqueue (cmwq)
+====================================
-September, 2010 Tejun Heo <tj@kernel.org>
- Florian Mickler <florian@mickler.org>
-
-CONTENTS
-
-1. Introduction
-2. Why cmwq?
-3. The Design
-4. Application Programming Interface (API)
-5. Example Execution Scenarios
-6. Guidelines
-7. Debugging
+:Date: September, 2010
+:Author: Tejun Heo <tj@kernel.org>
+:Author: Florian Mickler <florian@mickler.org>
-1. Introduction
+Introduction
+============
There are many cases where an asynchronous process execution context
is needed and the workqueue (wq) API is the most commonly used
@@ -32,7 +25,8 @@ there is no work item left on the workqueue the worker becomes idle.
When a new work item gets queued, the worker begins executing again.
-2. Why cmwq?
+Why cmwq?
+=========
In the original wq implementation, a multi threaded (MT) wq had one
worker thread per CPU and a single threaded (ST) wq had one worker
@@ -71,7 +65,8 @@ focus on the following goals.
the API users don't need to worry about such details.
-3. The Design
+The Design
+==========
In order to ease the asynchronous execution of functions a new
abstraction, the work item, is introduced.
@@ -102,7 +97,7 @@ aspects of the way the work items are executed by setting flags on the
workqueue they are putting the work item on. These flags include
things like CPU locality, concurrency limits, priority and more. To
get a detailed overview refer to the API description of
-alloc_workqueue() below.
+``alloc_workqueue()`` below.
When a work item is queued to a workqueue, the target worker-pool is
determined according to the queue parameters and workqueue attributes
@@ -136,7 +131,7 @@ them.
For unbound workqueues, the number of backing pools is dynamic.
Unbound workqueue can be assigned custom attributes using
-apply_workqueue_attrs() and workqueue will automatically create
+``apply_workqueue_attrs()`` and workqueue will automatically create
backing worker pools matching the attributes. The responsibility of
regulating concurrency level is on the users. There is also a flag to
mark a bound wq to ignore the concurrency management. Please refer to
@@ -151,94 +146,95 @@ pressure. Else it is possible that the worker-pool deadlocks waiting
for execution contexts to free up.
-4. Application Programming Interface (API)
+Application Programming Interface (API)
+=======================================
-alloc_workqueue() allocates a wq. The original create_*workqueue()
-functions are deprecated and scheduled for removal. alloc_workqueue()
-takes three arguments - @name, @flags and @max_active. @name is the
-name of the wq and also used as the name of the rescuer thread if
-there is one.
+``alloc_workqueue()`` allocates a wq. The original
+``create_*workqueue()`` functions are deprecated and scheduled for
+removal. ``alloc_workqueue()`` takes three arguments - @``name``,
+``@flags`` and ``@max_active``. ``@name`` is the name of the wq and
+also used as the name of the rescuer thread if there is one.
A wq no longer manages execution resources but serves as a domain for
-forward progress guarantee, flush and work item attributes. @flags
-and @max_active control how work items are assigned execution
+forward progress guarantee, flush and work item attributes. ``@flags``
+and ``@max_active`` control how work items are assigned execution
resources, scheduled and executed.
-@flags:
-
- WQ_UNBOUND
-
- Work items queued to an unbound wq are served by the special
- worker-pools which host workers which are not bound to any
- specific CPU. This makes the wq behave as a simple execution
- context provider without concurrency management. The unbound
- worker-pools try to start execution of work items as soon as
- possible. Unbound wq sacrifices locality but is useful for
- the following cases.
-
- * Wide fluctuation in the concurrency level requirement is
- expected and using bound wq may end up creating large number
- of mostly unused workers across different CPUs as the issuer
- hops through different CPUs.
-
- * Long running CPU intensive workloads which can be better
- managed by the system scheduler.
-
- WQ_FREEZABLE
-
- A freezable wq participates in the freeze phase of the system
- suspend operations. Work items on the wq are drained and no
- new work item starts execution until thawed.
-
- WQ_MEM_RECLAIM
-
- All wq which might be used in the memory reclaim paths _MUST_
- have this flag set. The wq is guaranteed to have at least one
- execution context regardless of memory pressure.
-
- WQ_HIGHPRI
- Work items of a highpri wq are queued to the highpri
- worker-pool of the target cpu. Highpri worker-pools are
- served by worker threads with elevated nice level.
-
- Note that normal and highpri worker-pools don't interact with
- each other. Each maintain its separate pool of workers and
- implements concurrency management among its workers.
-
- WQ_CPU_INTENSIVE
-
- Work items of a CPU intensive wq do not contribute to the
- concurrency level. In other words, runnable CPU intensive
- work items will not prevent other work items in the same
- worker-pool from starting execution. This is useful for bound
- work items which are expected to hog CPU cycles so that their
- execution is regulated by the system scheduler.
-
- Although CPU intensive work items don't contribute to the
- concurrency level, start of their executions is still
- regulated by the concurrency management and runnable
- non-CPU-intensive work items can delay execution of CPU
- intensive work items.
-
- This flag is meaningless for unbound wq.
-
-Note that the flag WQ_NON_REENTRANT no longer exists as all workqueues
-are now non-reentrant - any work item is guaranteed to be executed by
-at most one worker system-wide at any given time.
-
-@max_active:
-
-@max_active determines the maximum number of execution contexts per
-CPU which can be assigned to the work items of a wq. For example,
-with @max_active of 16, at most 16 work items of the wq can be
+``flags``
+---------
+
+``WQ_UNBOUND``
+ Work items queued to an unbound wq are served by the special
+ worker-pools which host workers which are not bound to any
+ specific CPU. This makes the wq behave as a simple execution
+ context provider without concurrency management. The unbound
+ worker-pools try to start execution of work items as soon as
+ possible. Unbound wq sacrifices locality but is useful for
+ the following cases.
+
+ * Wide fluctuation in the concurrency level requirement is
+ expected and using bound wq may end up creating large number
+ of mostly unused workers across different CPUs as the issuer
+ hops through different CPUs.
+
+ * Long running CPU intensive workloads which can be better
+ managed by the system scheduler.
+
+``WQ_FREEZABLE``
+ A freezable wq participates in the freeze phase of the system
+ suspend operations. Work items on the wq are drained and no
+ new work item starts execution until thawed.
+
+``WQ_MEM_RECLAIM``
+ All wq which might be used in the memory reclaim paths **MUST**
+ have this flag set. The wq is guaranteed to have at least one
+ execution context regardless of memory pressure.
+
+``WQ_HIGHPRI``
+ Work items of a highpri wq are queued to the highpri
+ worker-pool of the target cpu. Highpri worker-pools are
+ served by worker threads with elevated nice level.
+
+ Note that normal and highpri worker-pools don't interact with
+ each other. Each maintain its separate pool of workers and
+ implements concurrency management among its workers.
+
+``WQ_CPU_INTENSIVE``
+ Work items of a CPU intensive wq do not contribute to the
+ concurrency level. In other words, runnable CPU intensive
+ work items will not prevent other work items in the same
+ worker-pool from starting execution. This is useful for bound
+ work items which are expected to hog CPU cycles so that their
+ execution is regulated by the system scheduler.
+
+ Although CPU intensive work items don't contribute to the
+ concurrency level, start of their executions is still
+ regulated by the concurrency management and runnable
+ non-CPU-intensive work items can delay execution of CPU
+ intensive work items.
+
+ This flag is meaningless for unbound wq.
+
+Note that the flag ``WQ_NON_REENTRANT`` no longer exists as all
+workqueues are now non-reentrant - any work item is guaranteed to be
+executed by at most one worker system-wide at any given time.
+
+
+``max_active``
+--------------
+
+``@max_active`` determines the maximum number of execution contexts
+per CPU which can be assigned to the work items of a wq. For example,
+with ``@max_active`` of 16, at most 16 work items of the wq can be
executing at the same time per CPU.
-Currently, for a bound wq, the maximum limit for @max_active is 512
-and the default value used when 0 is specified is 256. For an unbound
-wq, the limit is higher of 512 and 4 * num_possible_cpus(). These
-values are chosen sufficiently high such that they are not the
-limiting factor while providing protection in runaway cases.
+Currently, for a bound wq, the maximum limit for ``@max_active`` is
+512 and the default value used when 0 is specified is 256. For an
+unbound wq, the limit is higher of 512 and 4 *
+``num_possible_cpus()``. These values are chosen sufficiently high
+such that they are not the limiting factor while providing protection
+in runaway cases.
The number of active work items of a wq is usually regulated by the
users of the wq, more specifically, by how many work items the users
@@ -247,13 +243,14 @@ throttling the number of active work items, specifying '0' is
recommended.
Some users depend on the strict execution ordering of ST wq. The
-combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
-behavior. Work items on such wq are always queued to the unbound
-worker-pools and only one work item can be active at any given time thus
-achieving the same ordering property as ST wq.
+combination of ``@max_active`` of 1 and ``WQ_UNBOUND`` is used to
+achieve this behavior. Work items on such wq are always queued to the
+unbound worker-pools and only one work item can be active at any given
+time thus achieving the same ordering property as ST wq.
-5. Example Execution Scenarios
+Example Execution Scenarios
+===========================
The following example execution scenarios try to illustrate how cmwq
behave under different configurations.
@@ -265,7 +262,7 @@ behave under different configurations.
Ignoring all other tasks, works and processing overhead, and assuming
simple FIFO scheduling, the following is one highly simplified version
-of possible sequences of events with the original wq.
+of possible sequences of events with the original wq. ::
TIME IN MSECS EVENT
0 w0 starts and burns CPU
@@ -279,7 +276,7 @@ of possible sequences of events with the original wq.
40 w2 sleeps
50 w2 wakes up and finishes
-And with cmwq with @max_active >= 3,
+And with cmwq with ``@max_active`` >= 3, ::
TIME IN MSECS EVENT
0 w0 starts and burns CPU
@@ -293,7 +290,7 @@ And with cmwq with @max_active >= 3,
20 w1 wakes up and finishes
25 w2 wakes up and finishes
-If @max_active == 2,
+If ``@max_active`` == 2, ::
TIME IN MSECS EVENT
0 w0 starts and burns CPU
@@ -308,7 +305,7 @@ If @max_active == 2,
35 w2 wakes up and finishes
Now, let's assume w1 and w2 are queued to a different wq q1 which has
-WQ_CPU_INTENSIVE set,
+``WQ_CPU_INTENSIVE`` set, ::
TIME IN MSECS EVENT
0 w0 starts and burns CPU
@@ -322,13 +319,15 @@ WQ_CPU_INTENSIVE set,
25 w2 wakes up and finishes
-6. Guidelines
+Guidelines
+==========
-* Do not forget to use WQ_MEM_RECLAIM if a wq may process work items
- which are used during memory reclaim. Each wq with WQ_MEM_RECLAIM
- set has an execution context reserved for it. If there is
- dependency among multiple work items used during memory reclaim,
- they should be queued to separate wq each with WQ_MEM_RECLAIM.
+* Do not forget to use ``WQ_MEM_RECLAIM`` if a wq may process work
+ items which are used during memory reclaim. Each wq with
+ ``WQ_MEM_RECLAIM`` set has an execution context reserved for it. If
+ there is dependency among multiple work items used during memory
+ reclaim, they should be queued to separate wq each with
+ ``WQ_MEM_RECLAIM``.
* Unless strict ordering is required, there is no need to use ST wq.
@@ -337,30 +336,31 @@ WQ_CPU_INTENSIVE set,
well under the default limit.
* A wq serves as a domain for forward progress guarantee
- (WQ_MEM_RECLAIM, flush and work item attributes. Work items which
- are not involved in memory reclaim and don't need to be flushed as a
- part of a group of work items, and don't require any special
- attribute, can use one of the system wq. There is no difference in
- execution characteristics between using a dedicated wq and a system
- wq.
+ (``WQ_MEM_RECLAIM``, flush and work item attributes. Work items
+ which are not involved in memory reclaim and don't need to be
+ flushed as a part of a group of work items, and don't require any
+ special attribute, can use one of the system wq. There is no
+ difference in execution characteristics between using a dedicated wq
+ and a system wq.
* Unless work items are expected to consume a huge amount of CPU
cycles, using a bound wq is usually beneficial due to the increased
level of locality in wq operations and work item execution.
-7. Debugging
+Debugging
+=========
Because the work functions are executed by generic worker threads
there are a few tricks needed to shed some light on misbehaving
workqueue users.
-Worker threads show up in the process list as:
+Worker threads show up in the process list as: ::
-root 5671 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/0:1]
-root 5672 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/1:2]
-root 5673 0.0 0.0 0 0 ? S 12:12 0:00 [kworker/0:0]
-root 5674 0.0 0.0 0 0 ? S 12:13 0:00 [kworker/1:0]
+ root 5671 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/0:1]
+ root 5672 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/1:2]
+ root 5673 0.0 0.0 0 0 ? S 12:12 0:00 [kworker/0:0]
+ root 5674 0.0 0.0 0 0 ? S 12:13 0:00 [kworker/1:0]
If kworkers are going crazy (using too much cpu), there are two types
of possible problems:
@@ -368,7 +368,7 @@ of possible problems:
1. Something being scheduled in rapid succession
2. A single work item that consumes lots of cpu cycles
-The first one can be tracked using tracing:
+The first one can be tracked using tracing: ::
$ echo workqueue:workqueue_queue_work > /sys/kernel/debug/tracing/set_event
$ cat /sys/kernel/debug/tracing/trace_pipe > out.txt
@@ -380,9 +380,15 @@ the output and the offender can be determined with the work item
function.
For the second type of problems it should be possible to just check
-the stack trace of the offending worker thread.
+the stack trace of the offending worker thread. ::
$ cat /proc/THE_OFFENDING_KWORKER/stack
The work item's function should be trivially visible in the stack
trace.
+
+
+Kernel Inline Documentations Reference
+======================================
+
+.. kernel-doc:: include/linux/workqueue.h
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt
index 8d9773f23550..3c355f6ad834 100644
--- a/Documentation/cpu-freq/cpufreq-stats.txt
+++ b/Documentation/cpu-freq/cpufreq-stats.txt
@@ -44,11 +44,17 @@ the stats driver insertion.
total 0
drwxr-xr-x 2 root root 0 May 14 16:06 .
drwxr-xr-x 3 root root 0 May 14 15:58 ..
+--w------- 1 root root 4096 May 14 16:06 reset
-r--r--r-- 1 root root 4096 May 14 16:06 time_in_state
-r--r--r-- 1 root root 4096 May 14 16:06 total_trans
-r--r--r-- 1 root root 4096 May 14 16:06 trans_table
--------------------------------------------------------------------------------
+- reset
+Write-only attribute that can be used to reset the stat counters. This can be
+useful for evaluating system behaviour under different governors without the
+need for a reboot.
+
- time_in_state
This gives the amount of time spent in each of the frequencies supported by
this CPU. The cat output will have "<frequency> <time>" pair in each line, which
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index e6bd1e6512a5..1953994ef5e6 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -48,7 +48,7 @@ In addition to the frequency-controlling interfaces provided by the cpufreq
core, the driver provides its own sysfs files to control the P-State selection.
These files have been added to /sys/devices/system/cpu/intel_pstate/.
Any changes made to these files are applicable to all CPUs (even in a
-multi-package system).
+multi-package system, Refer to later section on placing "Per-CPU limits").
max_perf_pct: Limits the maximum P-State that will be requested by
the driver. It states it as a percentage of the available performance. The
@@ -120,13 +120,57 @@ frequency is fictional for Intel Core processors. Even if the scaling
driver selects a single P-State, the actual frequency the processor
will run at is selected by the processor itself.
+Per-CPU limits
+
+The kernel command line option "intel_pstate=per_cpu_perf_limits" forces
+the intel_pstate driver to use per-CPU performance limits. When it is set,
+the sysfs control interface described above is subject to limitations.
+- The following controls are not available for both read and write
+ /sys/devices/system/cpu/intel_pstate/max_perf_pct
+ /sys/devices/system/cpu/intel_pstate/min_perf_pct
+- The following controls can be used to set performance limits, as far as the
+architecture of the processor permits:
+ /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq
+ /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq
+ /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
+- User can still observe turbo percent and number of P-States from
+ /sys/devices/system/cpu/intel_pstate/turbo_pct
+ /sys/devices/system/cpu/intel_pstate/num_pstates
+- User can read write system wide turbo status
+ /sys/devices/system/cpu/no_turbo
+
+Support of energy performance hints
+It is possible to provide hints to the HWP algorithms in the processor
+to be more performance centric to more energy centric. When the driver
+is using HWP, two additional cpufreq sysfs attributes are presented for
+each logical CPU.
+These attributes are:
+ - energy_performance_available_preferences
+ - energy_performance_preference
+
+To get list of supported hints:
+$ cat energy_performance_available_preferences
+ default performance balance_performance balance_power power
+
+The current preference can be read or changed via cpufreq sysfs
+attribute "energy_performance_preference". Reading from this attribute
+will display current effective setting. User can write any of the valid
+preference string to this attribute. User can always restore to power-on
+default by writing "default".
+
+Since threads can migrate to different CPUs, this is possible that the
+new CPU may have different energy performance preference than the previous
+one. To avoid such issues, either threads can be pinned to specific CPUs
+or set the same energy performance preference value to all CPUs.
+
Tuning Intel P-State driver
-When HWP mode is not used, debugfs files have also been added to allow the
-tuning of the internal governor algorithm. These files are located at
-/sys/kernel/debug/pstate_snb/. The algorithm uses a PID (Proportional
-Integral Derivative) controller. The PID tunable parameters are:
+When the performance can be tuned using PID (Proportional Integral
+Derivative) controller, debugfs files are provided for adjusting performance.
+They are presented under:
+/sys/kernel/debug/pstate_snb/
+The PID tunable parameters are:
deadband
d_gain_pct
i_gain_pct
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index dd68821c22d4..d02e8a451872 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -84,9 +84,9 @@ are added or removed anytime. Trimming it accurately for your system needs
upfront can save some boot time memory. See below for how we use heuristics
in x86_64 case to keep this under check.
-cpu_online_mask: Bitmap of all CPUs currently online. Its set in __cpu_up()
-after a cpu is available for kernel scheduling and ready to receive
-interrupts from devices. Its cleared when a cpu is brought down using
+cpu_online_mask: Bitmap of all CPUs currently online. It's set in __cpu_up()
+after a CPU is available for kernel scheduling and ready to receive
+interrupts from devices. It's cleared when a CPU is brought down using
__cpu_disable(), before which all OS services including interrupts are
migrated to another target CPU.
@@ -181,7 +181,7 @@ To support physical addition/removal, one would need some BIOS hooks and
the platform should have something like an attention button in PCI hotplug.
CONFIG_ACPI_HOTPLUG_CPU enables ACPI support for physical add/remove of CPUs.
-Q: How do i logically offline a CPU?
+Q: How do I logically offline a CPU?
A: Do the following.
#echo 0 > /sys/devices/system/cpu/cpuX/online
@@ -191,15 +191,15 @@ Once the logical offline is successful, check
#cat /proc/interrupts
You should now not see the CPU that you removed. Also online file will report
-the state as 0 when a cpu if offline and 1 when its online.
+the state as 0 when a CPU is offline and 1 when it's online.
#To display the current cpu state.
#cat /sys/devices/system/cpu/cpuX/online
-Q: Why can't i remove CPU0 on some systems?
+Q: Why can't I remove CPU0 on some systems?
A: Some architectures may have some special dependency on a certain CPU.
-For e.g in IA64 platforms we have ability to sent platform interrupts to the
+For e.g in IA64 platforms we have ability to send platform interrupts to the
OS. a.k.a Corrected Platform Error Interrupts (CPEI). In current ACPI
specifications, we didn't have a way to change the target CPU. Hence if the
current ACPI version doesn't support such re-direction, we disable that CPU
@@ -231,7 +231,7 @@ either by CONFIG_BOOTPARAM_HOTPLUG_CPU0 or by kernel parameter cpu0_hotplug.
--Fenghua Yu <fenghua.yu@intel.com>
-Q: How do i find out if a particular CPU is not removable?
+Q: How do I find out if a particular CPU is not removable?
A: Depending on the implementation, some architectures may show this by the
absence of the "online" file. This is done if it can be determined ahead of
time that this CPU cannot be removed.
@@ -250,7 +250,7 @@ A: The following happen, listed in no particular order :-)
- All processes are migrated away from this outgoing CPU to new CPUs.
The new CPU is chosen from each process' current cpuset, which may be
a subset of all online CPUs.
-- All interrupts targeted to this CPU is migrated to a new CPU
+- All interrupts targeted to this CPU are migrated to a new CPU
- timers/bottom half/task lets are also migrated to a new CPU
- Once all services are migrated, kernel calls an arch specific routine
__cpu_disable() to perform arch specific cleanup.
@@ -259,10 +259,10 @@ A: The following happen, listed in no particular order :-)
CPU is being offlined).
"It is expected that each service cleans up when the CPU_DOWN_PREPARE
- notifier is called, when CPU_DEAD is called its expected there is nothing
+ notifier is called, when CPU_DEAD is called it's expected there is nothing
running on behalf of this CPU that was offlined"
-Q: If i have some kernel code that needs to be aware of CPU arrival and
+Q: If I have some kernel code that needs to be aware of CPU arrival and
departure, how to i arrange for proper notification?
A: This is what you would need in your kernel code to receive notifications.
@@ -311,7 +311,7 @@ things will happen if a notifier in path sent a BAD notify code.
Q: I don't see my action being called for all CPUs already up and running?
A: Yes, CPU notifiers are called only when new CPUs are on-lined or offlined.
- If you need to perform some action for each cpu already in the system, then
+ If you need to perform some action for each CPU already in the system, then
do this:
for_each_online_cpu(i) {
@@ -363,8 +363,8 @@ A: Yes, CPU notifiers are called only when new CPUs are on-lined or offlined.
callbacks as well as initialize the already online CPUs.
-Q: If i would like to develop cpu hotplug support for a new architecture,
- what do i need at a minimum?
+Q: If I would like to develop CPU hotplug support for a new architecture,
+ what do I need at a minimum?
A: The following are what is required for CPU hotplug infrastructure to work
correctly.
@@ -382,8 +382,8 @@ A: The following are what is required for CPU hotplug infrastructure to work
per_cpu state to be set, to ensure the processor
dead routine is called to be sure positively.
-Q: I need to ensure that a particular cpu is not removed when there is some
- work specific to this cpu is in progress.
+Q: I need to ensure that a particular CPU is not removed when there is some
+ work specific to this CPU in progress.
A: There are two ways. If your code can be run in interrupt context, use
smp_call_function_single(), otherwise use work_on_cpu(). Note that
work_on_cpu() is slow, and can fail due to out of memory:
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt
index beda682e8d77..45d943fcae5b 100644
--- a/Documentation/crypto/api-intro.txt
+++ b/Documentation/crypto/api-intro.txt
@@ -44,12 +44,9 @@ one block while the former can operate on an arbitrary amount of data,
subject to block size requirements (i.e., non-stream ciphers can only
process multiples of blocks).
-Support for hardware crypto devices via an asynchronous interface is
-under development.
-
Here's an example of how to use the API:
- #include <crypto/ahash.h>
+ #include <crypto/hash.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
diff --git a/Documentation/dev-tools/conf.py b/Documentation/dev-tools/conf.py
new file mode 100644
index 000000000000..7faafa3f7888
--- /dev/null
+++ b/Documentation/dev-tools/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "Development tools for the kernel"
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'dev-tools.tex', project,
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/dev-tools/gcov.rst b/Documentation/dev-tools/gcov.rst
index 19eedfea8800..69a7d90c320a 100644
--- a/Documentation/dev-tools/gcov.rst
+++ b/Documentation/dev-tools/gcov.rst
@@ -201,7 +201,9 @@ Appendix A: gather_on_build.sh
------------------------------
Sample script to gather coverage meta files on the build machine
-(see 6a)::
+(see 6a):
+
+.. code-block:: sh
#!/bin/bash
@@ -232,7 +234,9 @@ Appendix B: gather_on_test.sh
-----------------------------
Sample script to gather coverage data files on the test machine
-(see 6b)::
+(see 6b):
+
+.. code-block:: sh
#!/bin/bash -e
diff --git a/Documentation/dev-tools/tools.rst b/Documentation/dev-tools/index.rst
index 824ae8e54dd5..07d881147ef3 100644
--- a/Documentation/dev-tools/tools.rst
+++ b/Documentation/dev-tools/index.rst
@@ -23,3 +23,11 @@ whole; patches welcome!
kmemleak
kmemcheck
gdb-kernel-debugging
+
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst
index aca0e27ca197..2c41b713841f 100644
--- a/Documentation/dev-tools/kcov.rst
+++ b/Documentation/dev-tools/kcov.rst
@@ -24,7 +24,9 @@ Profiling data will only become accessible once debugfs has been mounted::
mount -t debugfs none /sys/kernel/debug
-The following program demonstrates kcov usage from within a test program::
+The following program demonstrates kcov usage from within a test program:
+
+.. code-block:: c
#include <stdio.h>
#include <stddef.h>
diff --git a/Documentation/development-process/index.rst b/Documentation/development-process/index.rst
deleted file mode 100644
index c37475d91090..000000000000
--- a/Documentation/development-process/index.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-Linux Kernel Development Documentation
-======================================
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
- development-process
diff --git a/Documentation/device-mapper/delay.txt b/Documentation/device-mapper/delay.txt
index a07b5927f4a8..4b1d22a44ce4 100644
--- a/Documentation/device-mapper/delay.txt
+++ b/Documentation/device-mapper/delay.txt
@@ -16,12 +16,12 @@ Example scripts
[[
#!/bin/sh
# Create device delaying rw operation for 500ms
-echo "0 `blockdev --getsize $1` delay $1 0 500" | dmsetup create delayed
+echo "0 `blockdev --getsz $1` delay $1 0 500" | dmsetup create delayed
]]
[[
#!/bin/sh
# Create device delaying only write operation for 500ms and
# splitting reads and writes to different devices $1 $2
-echo "0 `blockdev --getsize $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
+echo "0 `blockdev --getsz $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
]]
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index 692171fe9da0..ff1f87bf26e8 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -21,13 +21,30 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
/proc/crypto contains supported crypto modes
<key>
- Key used for encryption. It is encoded as a hexadecimal number.
+ Key used for encryption. It is encoded either as a hexadecimal number
+ or it can be passed as <key_string> prefixed with single colon
+ character (':') for keys residing in kernel keyring service.
You can only use key sizes that are valid for the selected cipher
in combination with the selected iv mode.
Note that for some iv modes the key string can contain additional
keys (for example IV seed) so the key contains more parts concatenated
into a single string.
+<key_string>
+ The kernel keyring key is identified by string in following format:
+ <key_size>:<key_type>:<key_description>.
+
+<key_size>
+ The encryption key size in bytes. The kernel key payload size must match
+ the value passed in <key_size>.
+
+<key_type>
+ Either 'logon' or 'user' kernel key type.
+
+<key_description>
+ The kernel keyring key description crypt target should look for
+ when loading key of <key_type>.
+
<keycount>
Multi-key compatibility mode. You can define <keycount> keys and
then sectors are encrypted according to their offsets (sector 0 uses key0;
@@ -85,7 +102,13 @@ https://gitlab.com/cryptsetup/cryptsetup
[[
#!/bin/sh
# Create a crypt device using dmsetup
-dmsetup create crypt1 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
+dmsetup create crypt1 --table "0 `blockdev --getsz $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
+]]
+
+[[
+#!/bin/sh
+# Create a crypt device using dmsetup when encryption key is stored in keyring service
+dmsetup create crypt2 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 :32:logon:my_prefix:my_key 0 $1 0"
]]
[[
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index c75b64a85859..5e3786fd9ea7 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -17,7 +17,7 @@ The target is named "raid" and it accepts the following parameters:
raid0 RAID0 striping (no resilience)
raid1 RAID1 mirroring
raid4 RAID4 with dedicated last parity disk
- raid5_n RAID5 with dedicated last parity disk suporting takeover
+ raid5_n RAID5 with dedicated last parity disk supporting takeover
Same as raid4
-Transitory layout
raid5_la RAID5 left asymmetric
@@ -36,7 +36,7 @@ The target is named "raid" and it accepts the following parameters:
- rotating parity N (right-to-left) with data continuation
raid6_n_6 RAID6 with dedicate parity disks
- parity and Q-syndrome on the last 2 disks;
- laylout for takeover from/to raid4/raid5_n
+ layout for takeover from/to raid4/raid5_n
raid6_la_6 Same as "raid_la" plus dedicated last Q-syndrome disk
- layout for takeover from raid5_la from/to raid6
raid6_ra_6 Same as "raid5_ra" dedicated last Q-syndrome disk
@@ -137,8 +137,8 @@ The target is named "raid" and it accepts the following parameters:
device removal (negative value) or device addition (positive
value) to any reshape supporting raid levels 4/5/6 and 10.
RAID levels 4/5/6 allow for addition of devices (metadata
- and data device tupel), raid10_near and raid10_offset only
- allow for device addtion. raid10_far does not support any
+ and data device tuple), raid10_near and raid10_offset only
+ allow for device addition. raid10_far does not support any
reshaping at all.
A minimum of devices have to be kept to enforce resilience,
which is 3 devices for raid4/5 and 4 devices for raid6.
@@ -242,6 +242,10 @@ recovery. Here is a fuller description of the individual fields:
in RAID1/10 or wrong parity values found in RAID4/5/6.
This value is valid only after a "check" of the array
is performed. A healthy array has a 'mismatch_cnt' of 0.
+ <data_offset> The current data offset to the start of the user data on
+ each component device of a raid set (see the respective
+ raid parameter to support out-of-place reshaping).
+
Message Interface
-----------------
diff --git a/Documentation/device-mapper/linear.txt b/Documentation/device-mapper/linear.txt
index d5307d380a45..7cb98d89d3f8 100644
--- a/Documentation/device-mapper/linear.txt
+++ b/Documentation/device-mapper/linear.txt
@@ -16,15 +16,15 @@ Example scripts
[[
#!/bin/sh
# Create an identity mapping for a device
-echo "0 `blockdev --getsize $1` linear $1 0" | dmsetup create identity
+echo "0 `blockdev --getsz $1` linear $1 0" | dmsetup create identity
]]
[[
#!/bin/sh
# Join 2 devices together
-size1=`blockdev --getsize $1`
-size2=`blockdev --getsize $2`
+size1=`blockdev --getsz $1`
+size2=`blockdev --getsz $2`
echo "0 $size1 linear $1 0
$size1 $size2 linear $2 0" | dmsetup create joined
]]
@@ -44,7 +44,7 @@ if (!defined($dev)) {
die("Please specify a device.\n");
}
-my $dev_size = `blockdev --getsize $dev`;
+my $dev_size = `blockdev --getsz $dev`;
my $extents = int($dev_size / $extent_size) -
(($dev_size % $extent_size) ? 1 : 0);
diff --git a/Documentation/device-mapper/striped.txt b/Documentation/device-mapper/striped.txt
index 45f3b91ea4c3..07ec492cceee 100644
--- a/Documentation/device-mapper/striped.txt
+++ b/Documentation/device-mapper/striped.txt
@@ -37,9 +37,9 @@ if (!$num_devs) {
die("Specify at least one device\n");
}
-$min_dev_size = `blockdev --getsize $devs[0]`;
+$min_dev_size = `blockdev --getsz $devs[0]`;
for ($i = 1; $i < $num_devs; $i++) {
- my $this_size = `blockdev --getsize $devs[$i]`;
+ my $this_size = `blockdev --getsz $devs[$i]`;
$min_dev_size = ($min_dev_size < $this_size) ?
$min_dev_size : $this_size;
}
diff --git a/Documentation/device-mapper/switch.txt b/Documentation/device-mapper/switch.txt
index 424835e57f27..5bd4831db4a8 100644
--- a/Documentation/device-mapper/switch.txt
+++ b/Documentation/device-mapper/switch.txt
@@ -123,7 +123,7 @@ Assume that you have volumes vg1/switch0 vg1/switch1 vg1/switch2 with
the same size.
Create a switch device with 64kB region size:
- dmsetup create switch --table "0 `blockdev --getsize /dev/vg1/switch0`
+ dmsetup create switch --table "0 `blockdev --getsz /dev/vg1/switch0`
switch 3 128 0 /dev/vg1/switch0 0 /dev/vg1/switch1 0 /dev/vg1/switch2 0"
Set mappings for the first 7 entries to point to devices switch0, switch1,
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index ef5fbe9a77c7..ad440a2b8051 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -38,6 +38,11 @@ to deliver its interrupts via SPIs.
architecturally-defined reset values. Only supported for 32-bit
systems which follow the ARMv7 architected reset values.
+- arm,no-tick-in-suspend : The main counter does not tick when the system is in
+ low-power system suspend on some SoCs. This behavior does not match the
+ Architecture Reference Manual's specification that the system counter "must
+ be implemented in an always-on power domain."
+
Example:
diff --git a/Documentation/devicetree/bindings/arm/arm-boards b/Documentation/devicetree/bindings/arm/arm-boards
index ab318a56fca2..b6e810c2781a 100644
--- a/Documentation/devicetree/bindings/arm/arm-boards
+++ b/Documentation/devicetree/bindings/arm/arm-boards
@@ -148,11 +148,12 @@ Example:
/dts-v1/;
#include <dt-bindings/interrupt-controller/irq.h>
-#include "skeleton.dtsi"
/ {
model = "ARM RealView PB1176 with device tree";
compatible = "arm,realview-pb1176";
+ #address-cells = <1>;
+ #size-cells = <1>;
soc {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
new file mode 100644
index 000000000000..7809fbe0cdb7
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
@@ -0,0 +1,236 @@
+==========================================
+ARM CPUs capacity bindings
+==========================================
+
+==========================================
+1 - Introduction
+==========================================
+
+ARM systems may be configured to have cpus with different power/performance
+characteristics within the same chip. In this case, additional information has
+to be made available to the kernel for it to be aware of such differences and
+take decisions accordingly.
+
+==========================================
+2 - CPU capacity definition
+==========================================
+
+CPU capacity is a number that provides the scheduler information about CPUs
+heterogeneity. Such heterogeneity can come from micro-architectural differences
+(e.g., ARM big.LITTLE systems) or maximum frequency at which CPUs can run
+(e.g., SMP systems with multiple frequency domains). Heterogeneity in this
+context is about differing performance characteristics; this binding tries to
+capture a first-order approximation of the relative performance of CPUs.
+
+CPU capacities are obtained by running a suitable benchmark. This binding makes
+no guarantees on the validity or suitability of any particular benchmark, the
+final capacity should, however, be:
+
+* A "single-threaded" or CPU affine benchmark
+* Divided by the running frequency of the CPU executing the benchmark
+* Not subject to dynamic frequency scaling of the CPU
+
+For the time being we however advise usage of the Dhrystone benchmark. What
+above thus becomes:
+
+CPU capacities are obtained by running the Dhrystone benchmark on each CPU at
+max frequency (with caches enabled). The obtained DMIPS score is then divided
+by the frequency (in MHz) at which the benchmark has been run, so that
+DMIPS/MHz are obtained. Such values are then normalized w.r.t. the highest
+score obtained in the system.
+
+==========================================
+3 - capacity-dmips-mhz
+==========================================
+
+capacity-dmips-mhz is an optional cpu node [1] property: u32 value
+representing CPU capacity expressed in normalized DMIPS/MHz. At boot time, the
+maximum frequency available to the cpu is then used to calculate the capacity
+value internally used by the kernel.
+
+capacity-dmips-mhz property is all-or-nothing: if it is specified for a cpu
+node, it has to be specified for every other cpu nodes, or the system will
+fall back to the default capacity value for every CPU. If cpufreq is not
+available, final capacities are calculated by directly using capacity-dmips-
+mhz values (normalized w.r.t. the highest value found while parsing the DT).
+
+===========================================
+4 - Examples
+===========================================
+
+Example 1 (ARM 64-bit, 6-cpu system, two clusters):
+capacities-dmips-mhz are scaled w.r.t. 1024 (cpu@0 and cpu@1)
+supposing cluster0@max-freq=1100 and custer1@max-freq=850,
+final capacities are 1024 for cluster0 and 446 for cluster1
+
+cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&A57_0>;
+ };
+ core1 {
+ cpu = <&A57_1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&A53_0>;
+ };
+ core1 {
+ cpu = <&A53_1>;
+ };
+ core2 {
+ cpu = <&A53_2>;
+ };
+ core3 {
+ cpu = <&A53_3>;
+ };
+ };
+ };
+
+ idle-states {
+ entry-method = "arm,psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x0010000>;
+ local-timer-stop;
+ entry-latency-us = <100>;
+ exit-latency-us = <250>;
+ min-residency-us = <150>;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x1010000>;
+ local-timer-stop;
+ entry-latency-us = <800>;
+ exit-latency-us = <700>;
+ min-residency-us = <2500>;
+ };
+ };
+
+ A57_0: cpu@0 {
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x0>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A57_L2>;
+ clocks = <&scpi_dvfs 0>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <1024>;
+ };
+
+ A57_1: cpu@1 {
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x0 0x1>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A57_L2>;
+ clocks = <&scpi_dvfs 0>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <1024>;
+ };
+
+ A53_0: cpu@100 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x100>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <578>;
+ };
+
+ A53_1: cpu@101 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x101>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <578>;
+ };
+
+ A53_2: cpu@102 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x102>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <578>;
+ };
+
+ A53_3: cpu@103 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x103>;
+ device_type = "cpu";
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ clocks = <&scpi_dvfs 1>;
+ cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+ capacity-dmips-mhz = <578>;
+ };
+
+ A57_L2: l2-cache0 {
+ compatible = "cache";
+ };
+
+ A53_L2: l2-cache1 {
+ compatible = "cache";
+ };
+};
+
+Example 2 (ARM 32-bit, 4-cpu system, two clusters,
+ cpus 0,1@1GHz, cpus 2,3@500MHz):
+capacities-dmips-mhz are scaled w.r.t. 2 (cpu@0 and cpu@1), this means that first
+cpu@0 and cpu@1 are twice fast than cpu@2 and cpu@3 (at the same frequency)
+
+cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ capacity-dmips-mhz = <2>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ capacity-dmips-mhz = <2>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x100>;
+ capacity-dmips-mhz = <1>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0x101>;
+ capacity-dmips-mhz = <1>;
+ };
+};
+
+===========================================
+5 - References
+===========================================
+
+[1] ARM Linux Kernel documentation - CPUs bindings
+ Documentation/devicetree/bindings/arm/cpus.txt
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index e6782d50cbcd..c1dcf4cade2e 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -241,6 +241,14 @@ nodes to be present and contain the properties described below.
# List of phandles to idle state nodes supported
by this cpu [3].
+ - capacity-dmips-mhz
+ Usage: Optional
+ Value type: <u32>
+ Definition:
+ # u32 value representing CPU capacity [3] in
+ DMIPS/MHz, relative to highest capacity-dmips-mhz
+ in the system.
+
- rockchip,pmu
Usage: optional for systems that have an "enable-method"
property value of "rockchip,rk3066-smp"
@@ -464,3 +472,5 @@ cpus {
[2] arm/msm/qcom,kpss-acc.txt
[3] ARM Linux kernel documentation - idle states bindings
Documentation/devicetree/bindings/arm/idle-states.txt
+[3] ARM Linux kernel documentation - cpu capacity bindings
+ Documentation/devicetree/bindings/arm/cpu-capacity.txt
diff --git a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
index 63379b04e052..1dc80f8811fe 100644
--- a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
@@ -79,7 +79,7 @@ Required Properties:
Input clocks for fsys clock controller:
- oscclk
- sclk_ufs_mphy
- - div_aclk_fsys_200
+ - aclk_fsys_200
- sclk_pcie_100_fsys
- sclk_ufsunipro_fsys
- sclk_mmc2_fsys
@@ -104,6 +104,10 @@ Required Properties:
- sclk_decon_tv_vclk_disp
- aclk_disp_333
+ Input clocks for audio clock controller:
+ - oscclk
+ - fout_aud_pll
+
Input clocks for bus0 clock controller:
- aclk_bus0_400
@@ -235,7 +239,7 @@ Example 2: Examples of clock controller nodes are listed below.
clock-names = "oscclk",
"sclk_ufs_mphy",
- "div_aclk_fsys_200",
+ "aclk_fsys_200",
"sclk_pcie_100_fsys",
"sclk_ufsunipro_fsys",
"sclk_mmc2_fsys",
@@ -245,7 +249,7 @@ Example 2: Examples of clock controller nodes are listed below.
"sclk_usbdrd30_fsys";
clocks = <&xxti>,
<&cmu_cpif CLK_SCLK_UFS_MPHY>,
- <&cmu_top CLK_DIV_ACLK_FSYS_200>,
+ <&cmu_top CLK_ACLK_FSYS_200>,
<&cmu_top CLK_SCLK_PCIE_100_FSYS>,
<&cmu_top CLK_SCLK_UFSUNIPRO_FSYS>,
<&cmu_top CLK_SCLK_MMC2_FSYS>,
@@ -297,6 +301,9 @@ Example 2: Examples of clock controller nodes are listed below.
compatible = "samsung,exynos5433-cmu-aud";
reg = <0x114c0000 0x0b04>;
#clock-cells = <1>;
+
+ clock-names = "oscclk", "fout_aud_pll";
+ clocks = <&xxti>, <&cmu_top CLK_FOUT_AUD_PLL>;
};
cmu_bus0: clock-controller@13600000 {
diff --git a/Documentation/devicetree/bindings/clock/hi3519-crg.txt b/Documentation/devicetree/bindings/clock/hisi-crg.txt
index acd1f235d548..cc60b3d423f3 100644
--- a/Documentation/devicetree/bindings/clock/hi3519-crg.txt
+++ b/Documentation/devicetree/bindings/clock/hisi-crg.txt
@@ -1,7 +1,7 @@
-* Hisilicon Hi3519 Clock and Reset Generator(CRG)
+* HiSilicon Clock and Reset Generator(CRG)
-The Hi3519 CRG module provides clock and reset signals to various
-controllers within the SoC.
+The CRG module provides clock and reset signals to various
+modules within the SoC.
This binding uses the following bindings:
Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -10,7 +10,11 @@ This binding uses the following bindings:
Required Properties:
- compatible: should be one of the following.
- - "hisilicon,hi3519-crg" - controller compatible with Hi3519 SoC.
+ - "hisilicon,hi3516cv300-crg"
+ - "hisilicon,hi3516cv300-sysctrl"
+ - "hisilicon,hi3519-crg"
+ - "hisilicon,hi3798cv200-crg"
+ - "hisilicon,hi3798cv200-sysctrl"
- reg: physical base address of the controller and length of memory mapped
region.
diff --git a/Documentation/devicetree/bindings/clock/oxnas,stdclk.txt b/Documentation/devicetree/bindings/clock/oxnas,stdclk.txt
index 208cca6ac4ec..b652f3fb7796 100644
--- a/Documentation/devicetree/bindings/clock/oxnas,stdclk.txt
+++ b/Documentation/devicetree/bindings/clock/oxnas,stdclk.txt
@@ -5,22 +5,15 @@ Please also refer to clock-bindings.txt in this directory for common clock
bindings usage.
Required properties:
-- compatible: Should be "oxsemi,ox810se-stdclk"
+- compatible: For OX810SE, should be "oxsemi,ox810se-stdclk"
+ For OX820, should be "oxsemi,ox820-stdclk"
- #clock-cells: 1, see below
Parent node should have the following properties :
-- compatible: Should be "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd"
-
-For OX810SE, the clock indices are :
- - 0: LEON
- - 1: DMA_SGDMA
- - 2: CIPHER
- - 3: SATA
- - 4: AUDIO
- - 5: USBMPH
- - 6: ETHA
- - 7: PCIA
- - 8: NAND
+- compatible: For OX810SE, should be
+ "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd"
+ For OX820, should be
+ "oxsemi,ox820-sys-ctrl", "syscon", "simple-mfd"
example:
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 3cf44217068e..5b4dfc1ea54f 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -14,6 +14,7 @@ Required properties :
"qcom,gcc-msm8974"
"qcom,gcc-msm8974pro"
"qcom,gcc-msm8974pro-ac"
+ "qcom,gcc-msm8994"
"qcom,gcc-msm8996"
"qcom,gcc-mdm9615"
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
new file mode 100644
index 000000000000..87d3714b956a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
@@ -0,0 +1,37 @@
+Qualcomm RPM Clock Controller Binding
+------------------------------------------------
+The RPM is a dedicated hardware engine for managing the shared
+SoC resources in order to keep the lowest power profile. It
+communicates with other hardware subsystems via shared memory
+and accepts clock requests, aggregates the requests and turns
+the clocks on/off or scales them on demand.
+
+Required properties :
+- compatible : shall contain only one of the following. The generic
+ compatible "qcom,rpmcc" should be also included.
+
+ "qcom,rpmcc-msm8916", "qcom,rpmcc"
+ "qcom,rpmcc-apq8064", "qcom,rpmcc"
+
+- #clock-cells : shall contain 1
+
+Example:
+ smd {
+ compatible = "qcom,smd";
+
+ rpm {
+ interrupts = <0 168 1>;
+ qcom,ipc = <&apcs 8 0>;
+ qcom,smd-edge = <15>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8916";
+ qcom,smd-channels = "rpm_requests";
+
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
index 394d725ac7e0..c46919412953 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
@@ -13,6 +13,8 @@ They provide the following functionalities:
Required Properties:
- compatible: Must be one of:
+ - "renesas,r8a7743-cpg-mssr" for the r8a7743 SoC (RZ/G1M)
+ - "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E)
- "renesas,r8a7795-cpg-mssr" for the r8a7795 SoC (R-Car H3)
- "renesas,r8a7796-cpg-mssr" for the r8a7796 SoC (R-Car M3-W)
@@ -22,8 +24,9 @@ Required Properties:
- clocks: References to external parent clocks, one entry for each entry in
clock-names
- clock-names: List of external parent clock names. Valid names are:
- - "extal" (r8a7795, r8a7796)
+ - "extal" (r8a7743, r8a7745, r8a7795, r8a7796)
- "extalr" (r8a7795, r8a7796)
+ - "usb_extal" (r8a7743, r8a7745)
- #clock-cells: Must be 2
- For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk1108-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk1108-cru.txt
new file mode 100644
index 000000000000..4da126116cf0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk1108-cru.txt
@@ -0,0 +1,59 @@
+* Rockchip RK1108 Clock and Reset Unit
+
+The RK1108 clock controller generates and supplies clock to various
+controllers within the SoC and also implements a reset controller for SoC
+peripherals.
+
+Required Properties:
+
+- compatible: should be "rockchip,rk1108-cru"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Optional Properties:
+
+- rockchip,grf: phandle to the syscon managing the "general register files"
+ If missing pll rates are not changeable, due to the missing pll lock status.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/rk1108-cru.h headers and can be
+used in device tree sources. Similar macros exist for the reset sources in
+these files.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "ext_vip" - external VIP clock - optional
+ - "ext_i2s" - external I2S clock - optional
+ - "ext_gmac" - external GMAC clock - optional
+ - "hdmiphy" - external clock input derived from HDMI PHY - optional
+ - "usbphy" - external clock input derived from USB PHY - optional
+
+Example: Clock controller node:
+
+ cru: cru@20200000 {
+ compatible = "rockchip,rk1108-cru";
+ reg = <0x20200000 0x1000>;
+ rockchip,grf = <&grf>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+Example: UART controller node that consumes the clock generated by the clock
+ controller:
+
+ uart0: serial@10230000 {
+ compatible = "rockchip,rk1108-uart", "snps,dw-apb-uart";
+ reg = <0x10230000 0x100>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&cru SCLK_UART0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
index c209de6cfadb..0532d815dae3 100644
--- a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
+++ b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
@@ -7,7 +7,9 @@ Please refer to clock-bindings.txt for common clock controller binding usage.
Please also refer to reset.txt for common reset controller binding usage.
Required properties:
-- compatible: Should be "st,stm32f42xx-rcc"
+- compatible: Should be:
+ "st,stm32f42xx-rcc"
+ "st,stm32f469-rcc"
- reg: should be register base and length as documented in the
datasheet
- #reset-cells: 1, see below
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index 3868458a5feb..74d44a4273f2 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -7,6 +7,7 @@ Required properties :
- "allwinner,sun8i-a23-ccu"
- "allwinner,sun8i-a33-ccu"
- "allwinner,sun8i-h3-ccu"
+ - "allwinner,sun50i-a64-ccu"
- reg: Must contain the registers base address and length
- clocks: phandle to the oscillators feeding the CCU. Two are needed:
diff --git a/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
new file mode 100644
index 000000000000..af2385795d78
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
@@ -0,0 +1,78 @@
+Broadcom AVS mail box and interrupt register bindings
+=====================================================
+
+A total of three DT nodes are required. One node (brcm,avs-cpu-data-mem)
+references the mailbox register used to communicate with the AVS CPU[1]. The
+second node (brcm,avs-cpu-l2-intr) is required to trigger an interrupt on
+the AVS CPU. The interrupt tells the AVS CPU that it needs to process a
+command sent to it by a driver. Interrupting the AVS CPU is mandatory for
+commands to be processed.
+
+The interface also requires a reference to the AVS host interrupt controller,
+so a driver can react to interrupts generated by the AVS CPU whenever a command
+has been processed. See [2] for more information on the brcm,l2-intc node.
+
+[1] The AVS CPU is an independent co-processor that runs proprietary
+firmware. On some SoCs, this firmware supports DFS and DVFS in addition to
+Adaptive Voltage Scaling.
+
+[2] Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
+
+
+Node brcm,avs-cpu-data-mem
+--------------------------
+
+Required properties:
+- compatible: must include: brcm,avs-cpu-data-mem and
+ should include: one of brcm,bcm7271-avs-cpu-data-mem or
+ brcm,bcm7268-avs-cpu-data-mem
+- reg: Specifies base physical address and size of the registers.
+- interrupts: The interrupt that the AVS CPU will use to interrupt the host
+ when a command completed.
+- interrupt-parent: The interrupt controller the above interrupt is routed
+ through.
+- interrupt-names: The name of the interrupt used to interrupt the host.
+
+Optional properties:
+- None
+
+Node brcm,avs-cpu-l2-intr
+-------------------------
+
+Required properties:
+- compatible: must include: brcm,avs-cpu-l2-intr and
+ should include: one of brcm,bcm7271-avs-cpu-l2-intr or
+ brcm,bcm7268-avs-cpu-l2-intr
+- reg: Specifies base physical address and size of the registers.
+
+Optional properties:
+- None
+
+
+Example
+=======
+
+ avs_host_l2_intc: interrupt-controller@f04d1200 {
+ #interrupt-cells = <1>;
+ compatible = "brcm,l2-intc";
+ interrupt-parent = <&intc>;
+ reg = <0xf04d1200 0x48>;
+ interrupt-controller;
+ interrupts = <0x0 0x19 0x0>;
+ interrupt-names = "avs";
+ };
+
+ avs-cpu-data-mem@f04c4000 {
+ compatible = "brcm,bcm7271-avs-cpu-data-mem",
+ "brcm,avs-cpu-data-mem";
+ reg = <0xf04c4000 0x60>;
+ interrupts = <0x1a>;
+ interrupt-parent = <&avs_host_l2_intc>;
+ interrupt-names = "sw_intr";
+ };
+
+ avs-cpu-l2-intr@f04d1100 {
+ compatible = "brcm,bcm7271-avs-cpu-l2-intr",
+ "brcm,avs-cpu-l2-intr";
+ reg = <0xf04d1100 0x10>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index adeca34c5a33..10a425f451fc 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -123,6 +123,9 @@ PROPERTIES
EXAMPLE
+
+iMX6QDL/SX requires four clocks
+
crypto@300000 {
compatible = "fsl,sec-v4.0";
fsl,sec-era = <2>;
@@ -139,6 +142,23 @@ EXAMPLE
clock-names = "mem", "aclk", "ipg", "emi_slow";
};
+
+iMX6UL does only require three clocks
+
+ crypto: caam@2140000 {
+ compatible = "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x2140000 0x3c000>;
+ ranges = <0 0x2140000 0x3c000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&clks IMX6UL_CLK_CAAM_MEM>,
+ <&clks IMX6UL_CLK_CAAM_ACLK>,
+ <&clks IMX6UL_CLK_CAAM_IPG>;
+ clock-names = "mem", "aclk", "ipg";
+ };
+
=====================================================================
Job Ring (JR) Node
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
new file mode 100644
index 000000000000..00f74bad1e95
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
@@ -0,0 +1,112 @@
+Amlogic Meson Display Controller
+================================
+
+The Amlogic Meson Display controller is composed of several components
+that are going to be documented below:
+
+DMC|---------------VPU (Video Processing Unit)----------------|------HHI------|
+ | vd1 _______ _____________ _________________ | |
+D |-------| |----| | | | | HDMI PLL |
+D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK |
+R |-------| |----| Processing | | | | |
+ | osd2 | | | |---| Enci ----------|----|-----VDAC------|
+R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----|
+A | osd1 | | | Blenders | | Encl ----------|----|---------------|
+M |-------|______|----|____________| |________________| | |
+___|__________________________________________________________|_______________|
+
+
+VIU: Video Input Unit
+---------------------
+
+The Video Input Unit is in charge of the pixel scanout from the DDR memory.
+It fetches the frames addresses, stride and parameters from the "Canvas" memory.
+This part is also in charge of the CSC (Colorspace Conversion).
+It can handle 2 OSD Planes and 2 Video Planes.
+
+VPP: Video Post Processing
+--------------------------
+
+The Video Post Processing is in charge of the scaling and blending of the
+various planes into a single pixel stream.
+There is a special "pre-blending" used by the video planes with a dedicated
+scaler and a "post-blending" to merge with the OSD Planes.
+The OSD planes also have a dedicated scaler for one of the OSD.
+
+VENC: Video Encoders
+--------------------
+
+The VENC is composed of the multiple pixel encoders :
+ - ENCI : Interlace Video encoder for CVBS and Interlace HDMI
+ - ENCP : Progressive Video Encoder for HDMI
+ - ENCL : LCD LVDS Encoder
+The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock
+tree and provides the scanout clock to the VPP and VIU.
+The ENCI is connected to a single VDAC for Composite Output.
+The ENCI and ENCP are connected to an on-chip HDMI Transceiver.
+
+Device Tree Bindings:
+---------------------
+
+VPU: Video Processing Unit
+--------------------------
+
+Required properties:
+- compatible: value should be different for each SoC family as :
+ - GXBB (S905) : "amlogic,meson-gxbb-vpu"
+ - GXL (S905X, S905D) : "amlogic,meson-gxl-vpu"
+ - GXM (S912) : "amlogic,meson-gxm-vpu"
+ followed by the common "amlogic,meson-gx-vpu"
+- reg: base address and size of he following memory-mapped regions :
+ - vpu
+ - hhi
+ - dmc
+- reg-names: should contain the names of the previous memory regions
+- interrupts: should contain the VENC Vsync interrupt number
+
+Required nodes:
+
+The connections to the VPU output video ports are modeled using the OF graph
+bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+The following table lists for each supported model the port number
+corresponding to each VPU output.
+
+ Port 0 Port 1
+-----------------------------------------
+ S905 (GXBB) CVBS VDAC HDMI-TX
+ S905X (GXL) CVBS VDAC HDMI-TX
+ S905D (GXL) CVBS VDAC HDMI-TX
+ S912 (GXM) CVBS VDAC HDMI-TX
+
+Example:
+
+tv-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ tv_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+};
+
+vpu: vpu@d0100000 {
+ compatible = "amlogic,meson-gxbb-vpu";
+ reg = <0x0 0xd0100000 0x0 0x100000>,
+ <0x0 0xc883c000 0x0 0x1000>,
+ <0x0 0xc8838000 0x0 0x1000>;
+ reg-names = "vpu", "hhi", "dmc";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* CVBS VDAC output port */
+ port@0 {
+ reg = <0>;
+
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&tv_connector_in>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
index a5ea451e67fc..e2768703ac2b 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
+++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
@@ -43,6 +43,13 @@ Required properties for DPI:
- port: Port node with a single endpoint connecting to the panel
device, as defined in [1]
+Required properties for VEC:
+- compatible: Should be "brcm,bcm2835-vec"
+- reg: Physical base address and length of the registers
+- clocks: The core clock the unit runs on
+- interrupts: The interrupt number
+ See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+
Required properties for V3D:
- compatible: Should be "brcm,bcm2835-v3d"
- reg: Physical base address and length of the V3D's registers
@@ -92,6 +99,13 @@ dpi: dpi@7e208000 {
};
};
+vec: vec@7e806000 {
+ compatible = "brcm,bcm2835-vec";
+ reg = <0x7e806000 0x1000>;
+ clocks = <&clocks BCM2835_CLOCK_VEC>;
+ interrupts = <2 27>;
+};
+
v3d: v3d@7ec00000 {
compatible = "brcm,bcm2835-v3d";
reg = <0x7ec00000 0x1000>;
diff --git a/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt b/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
index 003bc246a270..164cbb15f04c 100644
--- a/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
+++ b/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
@@ -16,6 +16,8 @@ graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Video port 0 for RGB input
- Video port 1 for VGA output
+Optional properties:
+- vdd-supply: Power supply for DAC
Example
-------
diff --git a/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt b/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
index dc1452f0d5d8..5e9a84d6e5f1 100644
--- a/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
+++ b/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
@@ -19,7 +19,9 @@ Required properties:
Optional properties
- reg-io-width: the width of the reg:1,4, default set to 1 if not present
-- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing,
+ if the property is omitted, a functionally reduced I2C bus
+ controller on DW HDMI is probed
- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
Example:
diff --git a/Documentation/devicetree/bindings/display/ti/ti,tfp410.txt b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt
index 2cbe32a3d0bb..54d7e31525ec 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,tfp410.txt
+++ b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt
@@ -6,10 +6,15 @@ Required properties:
Optional properties:
- powerdown-gpios: power-down gpio
+- reg: I2C address. If and only if present the device node
+ should be placed into the i2c controller node where the
+ tfp410 i2c is connected to.
Required nodes:
-- Video port 0 for DPI input
-- Video port 1 for DVI output
+- Video port 0 for DPI input [1].
+- Video port 1 for DVI output [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example
-------
diff --git a/Documentation/devicetree/bindings/display/ht16k33.txt b/Documentation/devicetree/bindings/display/ht16k33.txt
new file mode 100644
index 000000000000..8e5b30b87754
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ht16k33.txt
@@ -0,0 +1,42 @@
+Holtek ht16k33 RAM mapping 16*8 LED controller driver with keyscan
+-------------------------------------------------------------------------------
+
+Required properties:
+- compatible: "holtek,ht16k33"
+- reg: I2C slave address of the chip.
+- interrupt-parent: A phandle pointing to the interrupt controller
+ serving the interrupt for this chip.
+- interrupts: Interrupt specification for the key pressed interrupt.
+- refresh-rate-hz: Display update interval in HZ.
+- debounce-delay-ms: Debouncing interval time in milliseconds.
+- linux,keymap: The keymap for keys as described in the binding
+ document (devicetree/bindings/input/matrix-keymap.txt).
+
+Optional properties:
+- linux,no-autorepeat: Disable keyrepeat.
+- default-brightness-level: Initial brightness level [0-15] (default: 15).
+
+Example:
+
+&i2c1 {
+ ht16k33: ht16k33@70 {
+ compatible = "holtek,ht16k33";
+ reg = <0x70>;
+ refresh-rate-hz = <20>;
+ debounce-delay-ms = <50>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)>;
+ linux,keymap = <
+ MATRIX_KEY(2, 0, KEY_F6)
+ MATRIX_KEY(3, 0, KEY_F8)
+ MATRIX_KEY(4, 0, KEY_F10)
+ MATRIX_KEY(5, 0, KEY_F4)
+ MATRIX_KEY(6, 0, KEY_F2)
+ MATRIX_KEY(2, 1, KEY_F5)
+ MATRIX_KEY(3, 1, KEY_F7)
+ MATRIX_KEY(4, 1, KEY_F9)
+ MATRIX_KEY(5, 1, KEY_F3)
+ MATRIX_KEY(6, 1, KEY_F1)
+ >;
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/mxsfb.txt b/Documentation/devicetree/bindings/display/mxsfb.txt
index 96ec5179c8a0..472e1ea6c591 100644
--- a/Documentation/devicetree/bindings/display/mxsfb.txt
+++ b/Documentation/devicetree/bindings/display/mxsfb.txt
@@ -1,20 +1,57 @@
* Freescale MXS LCD Interface (LCDIF)
+New bindings:
+=============
Required properties:
-- compatible: Should be "fsl,<chip>-lcdif". Supported chips include
- imx23 and imx28.
-- reg: Address and length of the register set for lcdif
-- interrupts: Should contain lcdif interrupts
-- display : phandle to display node (see below for details)
+- compatible: Should be "fsl,imx23-lcdif" for i.MX23.
+ Should be "fsl,imx28-lcdif" for i.MX28.
+ Should be "fsl,imx6sx-lcdif" for i.MX6SX.
+- reg: Address and length of the register set for LCDIF
+- interrupts: Should contain LCDIF interrupt
+- clocks: A list of phandle + clock-specifier pairs, one for each
+ entry in 'clock-names'.
+- clock-names: A list of clock names. For MXSFB it should contain:
+ - "pix" for the LCDIF block clock
+ - (MX6SX-only) "axi", "disp_axi" for the bus interface clock
+
+Required sub-nodes:
+ - port: The connection to an encoder chip.
+
+Example:
+
+ lcdif1: display-controller@2220000 {
+ compatible = "fsl,imx6sx-lcdif", "fsl,imx28-lcdif";
+ reg = <0x02220000 0x4000>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SX_CLK_LCDIF1_PIX>,
+ <&clks IMX6SX_CLK_LCDIF_APB>,
+ <&clks IMX6SX_CLK_DISPLAY_AXI>;
+ clock-names = "pix", "axi", "disp_axi";
+
+ port {
+ parallel_out: endpoint {
+ remote-endpoint = <&panel_in_parallel>;
+ };
+ };
+ };
+
+Deprecated bindings:
+====================
+Required properties:
+- compatible: Should be "fsl,imx23-lcdif" for i.MX23.
+ Should be "fsl,imx28-lcdif" for i.MX28.
+- reg: Address and length of the register set for LCDIF
+- interrupts: Should contain LCDIF interrupts
+- display: phandle to display node (see below for details)
* display node
Required properties:
-- bits-per-pixel : <16> for RGB565, <32> for RGB888/666.
-- bus-width : number of data lines. Could be <8>, <16>, <18> or <24>.
+- bits-per-pixel: <16> for RGB565, <32> for RGB888/666.
+- bus-width: number of data lines. Could be <8>, <16>, <18> or <24>.
Required sub-node:
-- display-timings : Refer to binding doc display-timing.txt for details.
+- display-timings: Refer to binding doc display-timing.txt for details.
Examples:
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt b/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt
new file mode 100644
index 000000000000..3afc76747824
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g133han01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt b/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt
new file mode 100644
index 000000000000..ed657c2141d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g185han01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt
new file mode 100644
index 000000000000..cbd9da3f03b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,t215hvn01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt
new file mode 100644
index 000000000000..dd22685d2adc
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt
@@ -0,0 +1,7 @@
+Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "chunghwa,claa070wp03xg"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/display-timing.txt b/Documentation/devicetree/bindings/display/panel/display-timing.txt
index e1d4a0b59612..81a75893d1b8 100644
--- a/Documentation/devicetree/bindings/display/panel/display-timing.txt
+++ b/Documentation/devicetree/bindings/display/panel/display-timing.txt
@@ -32,6 +32,14 @@ optional properties:
- active low = drive pixel data on falling edge/
sample data on rising edge
- ignored = ignored
+ - syncclk-active: with
+ - active high = drive sync on rising edge/
+ sample sync on falling edge of pixel
+ clock
+ - active low = drive sync on falling edge/
+ sample sync on rising edge of pixel
+ clock
+ - omitted = same configuration as pixelclk-active
- interlaced (bool): boolean to enable interlaced mode
- doublescan (bool): boolean to enable doublescan mode
- doubleclk (bool): boolean to enable doubleclock mode
diff --git a/Documentation/devicetree/bindings/display/panel/nvd,9128.txt b/Documentation/devicetree/bindings/display/panel/nvd,9128.txt
new file mode 100644
index 000000000000..17bcd017c678
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/nvd,9128.txt
@@ -0,0 +1,7 @@
+New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
+
+Required properties:
+- compatible: should be "nvd,9128"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt
new file mode 100644
index 000000000000..0f57c3143506
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,lq150x1lg11.txt
@@ -0,0 +1,36 @@
+Sharp 15" LQ150X1LG11 XGA TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq150x1lg11"
+- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
+
+Optional properties:
+- backlight: phandle of the backlight device
+- rlud-gpios: a single GPIO for the RL/UD (rotate 180 degrees) pin.
+- sellvds-gpios: a single GPIO for the SELLVDS pin.
+
+If rlud-gpios and/or sellvds-gpios are not specified, the RL/UD and/or SELLVDS
+pins are assumed to be handled appropriately by the hardware.
+
+Example:
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0 100000>; /* VBR */
+
+ brightness-levels = <0 20 40 60 80 100>;
+ default-brightness-level = <2>;
+
+ power-supply = <&vdd_12v_reg>; /* VDD */
+ enable-gpios = <&gpio 42 GPIO_ACTIVE_HIGH>; /* XSTABY */
+ };
+
+ panel {
+ compatible = "sharp,lq150x1lg11";
+
+ power-supply = <&vcc_3v3_reg>; /* VCC */
+
+ backlight = <&backlight>;
+ rlud-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>; /* RL/UD */
+ sellvds-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>; /* SELLVDS */
+ };
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 0d30e42e40be..1a02f099a0ff 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -6,9 +6,11 @@ Required Properties:
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
+ - "renesas,du-r8a7792" for R8A7792 (R-Car V2H) compatible DU
- "renesas,du-r8a7793" for R8A7793 (R-Car M2-N) compatible DU
- "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU
- "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU
+ - "renesas,du-r8a7796" for R8A7796 (R-Car M3-W) compatible DU
- reg: A list of base address and length of each memory resource, one for
each entry in the reg-names property.
@@ -25,10 +27,10 @@ Required Properties:
- clock-names: Name of the clocks. This property is model-dependent.
- R8A7779 uses a single functional clock. The clock doesn't need to be
named.
- - R8A779[01345] use one functional clock per channel and one clock per LVDS
- encoder (if available). The functional clocks must be named "du.x" with
- "x" being the channel numerical index. The LVDS clocks must be named
- "lvds.x" with "x" being the LVDS encoder numerical index.
+ - R8A779[0123456] use one functional clock per channel and one clock per
+ LVDS encoder (if available). The functional clocks must be named "du.x"
+ with "x" being the channel numerical index. The LVDS clocks must be
+ named "lvds.x" with "x" being the LVDS encoder numerical index.
- In addition to the functional and encoder clocks, all DU versions also
support externally supplied pixel clocks. Those clocks are optional.
When supplied they must be named "dclkin.x" with "x" being the input
@@ -47,9 +49,11 @@ corresponding to each DU output.
R8A7779 (H1) DPAD 0 DPAD 1 - -
R8A7790 (H2) DPAD LVDS 0 LVDS 1 -
R8A7791 (M2-W) DPAD LVDS 0 - -
+ R8A7792 (V2H) DPAD 0 DPAD 1 - -
R8A7793 (M2-N) DPAD LVDS 0 - -
R8A7794 (E2) DPAD 0 DPAD 1 - -
R8A7795 (H3) DPAD HDMI 0 HDMI 1 LVDS
+ R8A7796 (M3-W) DPAD HDMI LVDS -
Example: R8A7790 (R-Car H2) DU
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index b95696d748c7..b82c00449468 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -28,6 +28,8 @@ The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
Required properties:
- compatible: value must be either:
* allwinner,sun5i-a13-tcon
+ * allwinner,sun6i-a31-tcon
+ * allwinner,sun6i-a31s-tcon
* allwinner,sun8i-a33-tcon
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
@@ -50,7 +52,7 @@ Required properties:
second the block connected to the TCON channel 1 (usually the TV
encoder)
-On the A13, there is one more clock required:
+On SoCs other than the A33, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
DRC
@@ -64,6 +66,8 @@ adaptive backlight control.
Required properties:
- compatible: value must be one of:
+ * allwinner,sun6i-a31-drc
+ * allwinner,sun6i-a31s-drc
* allwinner,sun8i-a33-drc
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@@ -87,6 +91,7 @@ system.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-backend
+ * allwinner,sun6i-a31-display-backend
* allwinner,sun8i-a33-display-backend
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the frontend and backend
@@ -117,6 +122,7 @@ deinterlacing and color space conversion.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-frontend
+ * allwinner,sun6i-a31-display-frontend
* allwinner,sun8i-a33-display-frontend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@@ -142,6 +148,8 @@ extra node.
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-engine
+ * allwinner,sun6i-a31-display-engine
+ * allwinner,sun6i-a31s-display-engine
* allwinner,sun8i-a33-display-engine
- allwinner,pipelines: list of phandle to the display engine
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
index a83abd79c55c..6fddb4f4f71a 100644
--- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
@@ -1,7 +1,9 @@
Device-Tree bindings for tilcdc DRM driver
Required properties:
- - compatible: value should be "ti,am33xx-tilcdc".
+ - compatible: value should be one of the following:
+ - "ti,am33xx-tilcdc" for AM335x based boards
+ - "ti,da850-tilcdc" for DA850/AM18x/OMAP-L138 based boards
- interrupts: the interrupt number
- reg: base address and size of the LCDC device
@@ -51,7 +53,7 @@ Optional nodes:
Example:
fb: fb@4830e000 {
- compatible = "ti,am33xx-tilcdc";
+ compatible = "ti,am33xx-tilcdc", "ti,da850-tilcdc";
reg = <0x4830e000 0x1000>;
interrupt-parent = <&intc>;
interrupts = <36>;
diff --git a/Documentation/devicetree/bindings/display/zte,vou.txt b/Documentation/devicetree/bindings/display/zte,vou.txt
new file mode 100644
index 000000000000..740e5bd2e4f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/zte,vou.txt
@@ -0,0 +1,84 @@
+ZTE VOU Display Controller
+
+This is a display controller found on ZTE ZX296718 SoC. It includes multiple
+Graphic Layer (GL) and Video Layer (VL), two Mixers/Channels, and a few blocks
+handling scaling, color space conversion etc. VOU also integrates the support
+for typical output devices, like HDMI, TV Encoder, VGA, and RGB LCD.
+
+* Master VOU node
+
+It must be the parent node of all the sub-device nodes.
+
+Required properties:
+ - compatible: should be "zte,zx296718-vou"
+ - #address-cells: should be <1>
+ - #size-cells: should be <1>
+ - ranges: list of address translations between VOU and sub-devices
+
+* VOU DPC device
+
+Required properties:
+ - compatible: should be "zte,zx296718-dpc"
+ - reg: Physical base address and length of DPC register regions, one for each
+ entry in 'reg-names'
+ - reg-names: The names of register regions. The following regions are required:
+ "osd"
+ "timing_ctrl"
+ "dtrc"
+ "vou_ctrl"
+ "otfppu"
+ - interrupts: VOU DPC interrupt number to CPU
+ - clocks: A list of phandle + clock-specifier pairs, one for each entry
+ in 'clock-names'
+ - clock-names: A list of clock names. The following clocks are required:
+ "aclk"
+ "ppu_wclk"
+ "main_wclk"
+ "aux_wclk"
+
+* HDMI output device
+
+Required properties:
+ - compatible: should be "zte,zx296718-hdmi"
+ - reg: Physical base address and length of the HDMI device IO region
+ - interrupts : HDMI interrupt number to CPU
+ - clocks: A list of phandle + clock-specifier pairs, one for each entry
+ in 'clock-names'
+ - clock-names: A list of clock names. The following clocks are required:
+ "osc_cec"
+ "osc_clk"
+ "xclk"
+
+Example:
+
+vou: vou@1440000 {
+ compatible = "zte,zx296718-vou";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x1440000 0x10000>;
+
+ dpc: dpc@0 {
+ compatible = "zte,zx296718-dpc";
+ reg = <0x0000 0x1000>, <0x1000 0x1000>,
+ <0x5000 0x1000>, <0x6000 0x1000>,
+ <0xa000 0x1000>;
+ reg-names = "osd", "timing_ctrl",
+ "dtrc", "vou_ctrl",
+ "otfppu";
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topcrm VOU_ACLK>, <&topcrm VOU_PPU_WCLK>,
+ <&topcrm VOU_MAIN_WCLK>, <&topcrm VOU_AUX_WCLK>;
+ clock-names = "aclk", "ppu_wclk",
+ "main_wclk", "aux_wclk";
+ };
+
+ hdmi: hdmi@c000 {
+ compatible = "zte,zx296718-hdmi";
+ reg = <0xc000 0x4000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&topcrm HDMI_OSC_CEC>,
+ <&topcrm HDMI_OSC_CLK>,
+ <&topcrm HDMI_XCLK>;
+ clock-names = "osc_cec", "osc_clk", "xclk";
+ };
+};
diff --git a/Documentation/devicetree/bindings/dma/nbpfaxi.txt b/Documentation/devicetree/bindings/dma/nbpfaxi.txt
index d5e2522b9ec1..d2e1e62e346a 100644
--- a/Documentation/devicetree/bindings/dma/nbpfaxi.txt
+++ b/Documentation/devicetree/bindings/dma/nbpfaxi.txt
@@ -23,6 +23,14 @@ Required properties
#define NBPF_SLAVE_RQ_LEVEL 4
Optional properties:
+- max-burst-mem-read: limit burst size for memory reads
+ (DMA_MEM_TO_MEM/DMA_MEM_TO_DEV) to this value, specified in bytes, rather
+ than using the maximum burst size allowed by the hardware's buffer size.
+- max-burst-mem-write: limit burst size for memory writes
+ (DMA_DEV_TO_MEM/DMA_MEM_TO_MEM) to this value, specified in bytes, rather
+ than using the maximum burst size allowed by the hardware's buffer size.
+ If both max-burst-mem-read and max-burst-mem-write are set, DMA_MEM_TO_MEM
+ will use the lower value.
You can use dma-channels and dma-requests as described in dma.txt, although they
won't be used, this information is derived from the compatibility string.
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
index fd5618bd8fbc..55492c264d17 100644
--- a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
@@ -5,13 +5,13 @@ memcpy and memset capabilities. It has been designed for virtualized
environments.
Each HIDMA HW instance consists of multiple DMA channels. These channels
-share the same bandwidth. The bandwidth utilization can be parititioned
+share the same bandwidth. The bandwidth utilization can be partitioned
among channels based on the priority and weight assignments.
There are only two priority levels and 15 weigh assignments possible.
Other parameters here determine how much of the system bus this HIDMA
-instance can use like maximum read/write request and and number of bytes to
+instance can use like maximum read/write request and number of bytes to
read/write in a single burst.
Main node required properties:
@@ -47,12 +47,18 @@ When the OS is not in control of the management interface (i.e. it's a guest),
the channel nodes appear on their own, not under a management node.
Required properties:
-- compatible: must contain "qcom,hidma-1.0"
+- compatible: must contain "qcom,hidma-1.0" for initial HW or "qcom,hidma-1.1"
+for MSI capable HW.
- reg: Addresses for the transfer and event channel
- interrupts: Should contain the event interrupt
- desc-count: Number of asynchronous requests this channel can handle
- iommus: required a iommu node
+Optional properties for MSI:
+- msi-parent : See the generic MSI binding described in
+ devicetree/bindings/interrupt-controller/msi.txt for a description of the
+ msi-parent property.
+
Example:
Hypervisor OS configuration:
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index 5f2ce669789a..3316a9c2e638 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -24,6 +24,7 @@ Required Properties:
- "renesas,dmac-r8a7793" (R-Car M2-N)
- "renesas,dmac-r8a7794" (R-Car E2)
- "renesas,dmac-r8a7795" (R-Car H3)
+ - "renesas,dmac-r8a7796" (R-Car M3-W)
- reg: base address and length of the registers block for the DMAC
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index 0f5583293c9c..4775c66f4508 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -27,6 +27,8 @@ Optional properties:
that services interrupts for this device
- is_private: The device channels should be marked as private and not for by the
general purpose DMA channel allocator. False if not passed.
+- multi-block: Multi block transfers supported by hardware. Array property with
+ one cell per channel. 0: not supported, 1 (default): supported.
Example:
diff --git a/Documentation/devicetree/bindings/dma/st_fdma.txt b/Documentation/devicetree/bindings/dma/st_fdma.txt
new file mode 100644
index 000000000000..495d853c569b
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/st_fdma.txt
@@ -0,0 +1,87 @@
+* STMicroelectronics Flexible Direct Memory Access Device Tree bindings
+
+The FDMA is a general-purpose direct memory access controller capable of
+supporting 16 independent DMA channels. It accepts up to 32 DMA requests.
+The FDMA is based on a Slim processor which requires a firmware.
+
+* FDMA Controller
+
+Required properties:
+- compatible : Should be one of
+ - st,stih407-fdma-mpe31-11, "st,slim-rproc";
+ - st,stih407-fdma-mpe31-12, "st,slim-rproc";
+ - st,stih407-fdma-mpe31-13, "st,slim-rproc";
+- reg : Should contain an entry for each name in reg-names
+- reg-names : Must contain "slimcore", "dmem", "peripherals", "imem" entries
+- interrupts : Should contain one interrupt shared by all channels
+- dma-channels : Number of channels supported by the controller
+- #dma-cells : Must be <3>. See DMA client section below
+- clocks : Must contain an entry for each clock
+See: Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+
+Example:
+
+ fdma0: dma-controller@8e20000 {
+ compatible = "st,stih407-fdma-mpe31-11", "st,slim-rproc";
+ reg = <0x8e20000 0x8000>,
+ <0x8e30000 0x3000>,
+ <0x8e37000 0x1000>,
+ <0x8e38000 0x8000>;
+ reg-names = "slimcore", "dmem", "peripherals", "imem";
+ clocks = <&clk_s_c0_flexgen CLK_FDMA>,
+ <&clk_s_c0_flexgen CLK_EXT2F_A9>,
+ <&clk_s_c0_flexgen CLK_EXT2F_A9>,
+ <&clk_s_c0_flexgen CLK_EXT2F_A9>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_NONE>;
+ dma-channels = <16>;
+ #dma-cells = <3>;
+ };
+
+* DMA client
+
+Required properties:
+- dmas: Comma separated list of dma channel requests
+- dma-names: Names of the aforementioned requested channels
+
+Each dmas request consists of 4 cells:
+1. A phandle pointing to the FDMA controller
+2. The request line number
+3. A 32bit mask specifying (see include/linux/platform_data/dma-st-fdma.h)
+ -bit 2-0: Holdoff value, dreq will be masked for
+ 0x0: 0-0.5us
+ 0x1: 0.5-1us
+ 0x2: 1-1.5us
+ -bit 17: data swap
+ 0x0: disabled
+ 0x1: enabled
+ -bit 21: Increment Address
+ 0x0: no address increment between transfers
+ 0x1: increment address between transfers
+ -bit 22: 2 STBus Initiator Coprocessor interface
+ 0x0: high priority port
+ 0x1: low priority port
+4. transfers type
+ 0 free running
+ 1 paced
+
+Example:
+
+ sti_uni_player2: sti-uni-player@2 {
+ compatible = "st,sti-uni-player";
+ status = "disabled";
+ #sound-dai-cells = <0>;
+ st,syscfg = <&syscfg_core>;
+ clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
+ assigned-clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
+ assigned-clock-parents = <&clk_s_d0_quadfs 2>;
+ assigned-clock-rates = <50000000>;
+ reg = <0x8D82000 0x158>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+ dmas = <&fdma0 4 0 1>;
+ dai-name = "Uni Player #1 (DAC)";
+ dma-names = "tx";
+ st,uniperiph-id = <2>;
+ st,version = <5>;
+ st,mode = "PCM";
+ };
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
index af0b903de293..dfc14f71e81f 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
@@ -5,7 +5,10 @@ connected to a GPIO pin.
Required properties:
- compatible: Should be "linux,extcon-usb-gpio"
+
+Either one of id-gpio or vbus-gpio must be present. Both can be present as well.
- id-gpio: gpio for USB ID pin. See gpio binding.
+- vbus-gpio: gpio for USB VBUS pin.
Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
extcon_usb1 {
diff --git a/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt
new file mode 100644
index 000000000000..817a8d4bf903
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt
@@ -0,0 +1,16 @@
+Altera FPGA To SDRAM Bridge Driver
+
+Required properties:
+- compatible : Should contain "altr,socfpga-fpga2sdram-bridge"
+
+Optional properties:
+- bridge-enable : 0 if driver should disable bridge at startup
+ 1 if driver should enable bridge at startup
+ Default is to leave bridge in current state.
+
+Example:
+ fpga_bridge3: fpga-bridge@ffc25080 {
+ compatible = "altr,socfpga-fpga2sdram-bridge";
+ reg = <0xffc25080 0x4>;
+ bridge-enable = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt
new file mode 100644
index 000000000000..f8e288c71b2d
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt
@@ -0,0 +1,23 @@
+Altera Freeze Bridge Controller Driver
+
+The Altera Freeze Bridge Controller manages one or more freeze bridges.
+The controller can freeze/disable the bridges which prevents signal
+changes from passing through the bridge. The controller can also
+unfreeze/enable the bridges which allows traffic to pass through the
+bridge normally.
+
+Required properties:
+- compatible : Should contain "altr,freeze-bridge-controller"
+- regs : base address and size for freeze bridge module
+
+Optional properties:
+- bridge-enable : 0 if driver should disable bridge at startup
+ 1 if driver should enable bridge at startup
+ Default is to leave bridge in current state.
+
+Example:
+ freeze-controller@100000450 {
+ compatible = "altr,freeze-bridge-controller";
+ regs = <0x1000 0x10>;
+ bridge-enable = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt
new file mode 100644
index 000000000000..6406f9337eeb
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt
@@ -0,0 +1,39 @@
+Altera FPGA/HPS Bridge Driver
+
+Required properties:
+- regs : base address and size for AXI bridge module
+- compatible : Should contain one of:
+ "altr,socfpga-lwhps2fpga-bridge",
+ "altr,socfpga-hps2fpga-bridge", or
+ "altr,socfpga-fpga2hps-bridge"
+- resets : Phandle and reset specifier for this bridge's reset
+- clocks : Clocks used by this module.
+
+Optional properties:
+- bridge-enable : 0 if driver should disable bridge at startup.
+ 1 if driver should enable bridge at startup.
+ Default is to leave bridge in its current state.
+
+Example:
+ fpga_bridge0: fpga-bridge@ff400000 {
+ compatible = "altr,socfpga-lwhps2fpga-bridge";
+ reg = <0xff400000 0x100000>;
+ resets = <&rst LWHPS2FPGA_RESET>;
+ clocks = <&l4_main_clk>;
+ bridge-enable = <0>;
+ };
+
+ fpga_bridge1: fpga-bridge@ff500000 {
+ compatible = "altr,socfpga-hps2fpga-bridge";
+ reg = <0xff500000 0x10000>;
+ resets = <&rst HPS2FPGA_RESET>;
+ clocks = <&l4_main_clk>;
+ bridge-enable = <1>;
+ };
+
+ fpga_bridge2: fpga-bridge@ff600000 {
+ compatible = "altr,socfpga-fpga2hps-bridge";
+ reg = <0xff600000 0x100000>;
+ resets = <&rst FPGA2HPS_RESET>;
+ clocks = <&l4_main_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/fpga/altera-socfpga-a10-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/altera-socfpga-a10-fpga-mgr.txt
new file mode 100644
index 000000000000..2fd8e7a84734
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-socfpga-a10-fpga-mgr.txt
@@ -0,0 +1,19 @@
+Altera SOCFPGA Arria10 FPGA Manager
+
+Required properties:
+- compatible : should contain "altr,socfpga-a10-fpga-mgr"
+- reg : base address and size for memory mapped io.
+ - The first index is for FPGA manager register access.
+ - The second index is for writing FPGA configuration data.
+- resets : Phandle and reset specifier for the device's reset.
+- clocks : Clocks used by the device.
+
+Example:
+
+ fpga_mgr: fpga-mgr@ffd03000 {
+ compatible = "altr,socfpga-a10-fpga-mgr";
+ reg = <0xffd03000 0x100
+ 0xffcfe400 0x20>;
+ clocks = <&l4_mp_clk>;
+ resets = <&rst FPGAMGR_RESET>;
+ };
diff --git a/Documentation/devicetree/bindings/fpga/fpga-region.txt b/Documentation/devicetree/bindings/fpga/fpga-region.txt
new file mode 100644
index 000000000000..3b32ba15a717
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/fpga-region.txt
@@ -0,0 +1,494 @@
+FPGA Region Device Tree Binding
+
+Alan Tull 2016
+
+ CONTENTS
+ - Introduction
+ - Terminology
+ - Sequence
+ - FPGA Region
+ - Supported Use Models
+ - Device Tree Examples
+ - Constraints
+
+
+Introduction
+============
+
+FPGA Regions represent FPGA's and partial reconfiguration regions of FPGA's in
+the Device Tree. FPGA Regions provide a way to program FPGAs under device tree
+control.
+
+This device tree binding document hits some of the high points of FPGA usage and
+attempts to include terminology used by both major FPGA manufacturers. This
+document isn't a replacement for any manufacturers specifications for FPGA
+usage.
+
+
+Terminology
+===========
+
+Full Reconfiguration
+ * The entire FPGA is programmed.
+
+Partial Reconfiguration (PR)
+ * A section of an FPGA is reprogrammed while the rest of the FPGA is not
+ affected.
+ * Not all FPGA's support PR.
+
+Partial Reconfiguration Region (PRR)
+ * Also called a "reconfigurable partition"
+ * A PRR is a specific section of a FPGA reserved for reconfiguration.
+ * A base (or static) FPGA image may create a set of PRR's that later may
+ be independently reprogrammed many times.
+ * The size and specific location of each PRR is fixed.
+ * The connections at the edge of each PRR are fixed. The image that is loaded
+ into a PRR must fit and must use a subset of the region's connections.
+ * The busses within the FPGA are split such that each region gets its own
+ branch that may be gated independently.
+
+Persona
+ * Also called a "partial bit stream"
+ * An FPGA image that is designed to be loaded into a PRR. There may be
+ any number of personas designed to fit into a PRR, but only one at at time
+ may be loaded.
+ * A persona may create more regions.
+
+FPGA Bridge
+ * FPGA Bridges gate bus signals between a host and FPGA.
+ * FPGA Bridges should be disabled while the FPGA is being programmed to
+ prevent spurious signals on the cpu bus and to the soft logic.
+ * FPGA bridges may be actual hardware or soft logic on an FPGA.
+ * During Full Reconfiguration, hardware bridges between the host and FPGA
+ will be disabled.
+ * During Partial Reconfiguration of a specific region, that region's bridge
+ will be used to gate the busses. Traffic to other regions is not affected.
+ * In some implementations, the FPGA Manager transparantly handles gating the
+ buses, eliminating the need to show the hardware FPGA bridges in the
+ device tree.
+ * An FPGA image may create a set of reprogrammable regions, each having its
+ own bridge and its own split of the busses in the FPGA.
+
+FPGA Manager
+ * An FPGA Manager is a hardware block that programs an FPGA under the control
+ of a host processor.
+
+Base Image
+ * Also called the "static image"
+ * An FPGA image that is designed to do full reconfiguration of the FPGA.
+ * A base image may set up a set of partial reconfiguration regions that may
+ later be reprogrammed.
+
+ ---------------- ----------------------------------
+ | Host CPU | | FPGA |
+ | | | |
+ | ----| | ----------- -------- |
+ | | H | | |==>| Bridge0 |<==>| PRR0 | |
+ | | W | | | ----------- -------- |
+ | | | | | |
+ | | B |<=====>|<==| ----------- -------- |
+ | | R | | |==>| Bridge1 |<==>| PRR1 | |
+ | | I | | | ----------- -------- |
+ | | D | | | |
+ | | G | | | ----------- -------- |
+ | | E | | |==>| Bridge2 |<==>| PRR2 | |
+ | ----| | ----------- -------- |
+ | | | |
+ ---------------- ----------------------------------
+
+Figure 1: An FPGA set up with a base image that created three regions. Each
+region (PRR0-2) gets its own split of the busses that is independently gated by
+a soft logic bridge (Bridge0-2) in the FPGA. The contents of each PRR can be
+reprogrammed independently while the rest of the system continues to function.
+
+
+Sequence
+========
+
+When a DT overlay that targets a FPGA Region is applied, the FPGA Region will
+do the following:
+
+ 1. Disable appropriate FPGA bridges.
+ 2. Program the FPGA using the FPGA manager.
+ 3. Enable the FPGA bridges.
+ 4. The Device Tree overlay is accepted into the live tree.
+ 5. Child devices are populated.
+
+When the overlay is removed, the child nodes will be removed and the FPGA Region
+will disable the bridges.
+
+
+FPGA Region
+===========
+
+FPGA Regions represent FPGA's and FPGA PR regions in the device tree. An FPGA
+Region brings together the elements needed to program on a running system and
+add the child devices:
+
+ * FPGA Manager
+ * FPGA Bridges
+ * image-specific information needed to to the programming.
+ * child nodes
+
+The intended use is that a Device Tree overlay (DTO) can be used to reprogram an
+FPGA while an operating system is running.
+
+An FPGA Region that exists in the live Device Tree reflects the current state.
+If the live tree shows a "firmware-name" property or child nodes under a FPGA
+Region, the FPGA already has been programmed. A DTO that targets a FPGA Region
+and adds the "firmware-name" property is taken as a request to reprogram the
+FPGA. After reprogramming is successful, the overlay is accepted into the live
+tree.
+
+The base FPGA Region in the device tree represents the FPGA and supports full
+reconfiguration. It must include a phandle to an FPGA Manager. The base
+FPGA region will be the child of one of the hardware bridges (the bridge that
+allows register access) between the cpu and the FPGA. If there are more than
+one bridge to control during FPGA programming, the region will also contain a
+list of phandles to the additional hardware FPGA Bridges.
+
+For partial reconfiguration (PR), each PR region will have an FPGA Region.
+These FPGA regions are children of FPGA bridges which are then children of the
+base FPGA region. The "Full Reconfiguration to add PRR's" example below shows
+this.
+
+If an FPGA Region does not specify a FPGA Manager, it will inherit the FPGA
+Manager specified by its ancestor FPGA Region. This supports both the case
+where the same FPGA Manager is used for all of a FPGA as well the case where
+a different FPGA Manager is used for each region.
+
+FPGA Regions do not inherit their ancestor FPGA regions' bridges. This prevents
+shutting down bridges that are upstream from the other active regions while one
+region is getting reconfigured (see Figure 1 above). During PR, the FPGA's
+hardware bridges remain enabled. The PR regions' bridges will be FPGA bridges
+within the static image of the FPGA.
+
+Required properties:
+- compatible : should contain "fpga-region"
+- fpga-mgr : should contain a phandle to an FPGA Manager. Child FPGA Regions
+ inherit this property from their ancestor regions. A fpga-mgr property
+ in a region will override any inherited FPGA manager.
+- #address-cells, #size-cells, ranges : must be present to handle address space
+ mapping for child nodes.
+
+Optional properties:
+- firmware-name : should contain the name of an FPGA image file located on the
+ firmware search path. If this property shows up in a live device tree
+ it indicates that the FPGA has already been programmed with this image.
+ If this property is in an overlay targeting a FPGA region, it is a
+ request to program the FPGA with that image.
+- fpga-bridges : should contain a list of phandles to FPGA Bridges that must be
+ controlled during FPGA programming along with the parent FPGA bridge.
+ This property is optional if the FPGA Manager handles the bridges.
+ If the fpga-region is the child of a fpga-bridge, the list should not
+ contain the parent bridge.
+- partial-fpga-config : boolean, set if partial reconfiguration is to be done,
+ otherwise full reconfiguration is done.
+- external-fpga-config : boolean, set if the FPGA has already been configured
+ prior to OS boot up.
+- region-unfreeze-timeout-us : The maximum time in microseconds to wait for
+ bridges to successfully become enabled after the region has been
+ programmed.
+- region-freeze-timeout-us : The maximum time in microseconds to wait for
+ bridges to successfully become disabled before the region has been
+ programmed.
+- child nodes : devices in the FPGA after programming.
+
+In the example below, when an overlay is applied targeting fpga-region0,
+fpga_mgr is used to program the FPGA. Two bridges are controlled during
+programming: the parent fpga_bridge0 and fpga_bridge1. Because the region is
+the child of fpga_bridge0, only fpga_bridge1 needs to be specified in the
+fpga-bridges property. During programming, these bridges are disabled, the
+firmware specified in the overlay is loaded to the FPGA using the FPGA manager
+specified in the region. If FPGA programming succeeds, the bridges are
+reenabled and the overlay makes it into the live device tree. The child devices
+are then populated. If FPGA programming fails, the bridges are left disabled
+and the overlay is rejected. The overlay's ranges property maps the lwhps
+bridge's region (0xff200000) and the hps bridge's region (0xc0000000) for use by
+the two child devices.
+
+Example:
+Base tree contains:
+
+ fpga_mgr: fpga-mgr@ff706000 {
+ compatible = "altr,socfpga-fpga-mgr";
+ reg = <0xff706000 0x1000
+ 0xffb90000 0x20>;
+ interrupts = <0 175 4>;
+ };
+
+ fpga_bridge0: fpga-bridge@ff400000 {
+ compatible = "altr,socfpga-lwhps2fpga-bridge";
+ reg = <0xff400000 0x100000>;
+ resets = <&rst LWHPS2FPGA_RESET>;
+ clocks = <&l4_main_clk>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ fpga_region0: fpga-region0 {
+ compatible = "fpga-region";
+ fpga-mgr = <&fpga_mgr>;
+ };
+ };
+
+ fpga_bridge1: fpga-bridge@ff500000 {
+ compatible = "altr,socfpga-hps2fpga-bridge";
+ reg = <0xff500000 0x10000>;
+ resets = <&rst HPS2FPGA_RESET>;
+ clocks = <&l4_main_clk>;
+ };
+
+Overlay contains:
+
+/dts-v1/ /plugin/;
+/ {
+ fragment@0 {
+ target = <&fpga_region0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ firmware-name = "soc_system.rbf";
+ fpga-bridges = <&fpga_bridge1>;
+ ranges = <0x20000 0xff200000 0x100000>,
+ <0x0 0xc0000000 0x20000000>;
+
+ gpio@10040 {
+ compatible = "altr,pio-1.0";
+ reg = <0x10040 0x20>;
+ altr,gpio-bank-width = <4>;
+ #gpio-cells = <2>;
+ clocks = <2>;
+ gpio-controller;
+ };
+
+ onchip-memory {
+ device_type = "memory";
+ compatible = "altr,onchipmem-15.1";
+ reg = <0x0 0x10000>;
+ };
+ };
+ };
+};
+
+
+Supported Use Models
+====================
+
+In all cases the live DT must have the FPGA Manager, FPGA Bridges (if any), and
+a FPGA Region. The target of the Device Tree Overlay is the FPGA Region. Some
+uses are specific to a FPGA device.
+
+ * No FPGA Bridges
+ In this case, the FPGA Manager which programs the FPGA also handles the
+ bridges behind the scenes. No FPGA Bridge devices are needed for full
+ reconfiguration.
+
+ * Full reconfiguration with hardware bridges
+ In this case, there are hardware bridges between the processor and FPGA that
+ need to be controlled during full reconfiguration. Before the overlay is
+ applied, the live DT must include the FPGA Manager, FPGA Bridges, and a
+ FPGA Region. The FPGA Region is the child of the bridge that allows
+ register access to the FPGA. Additional bridges may be listed in a
+ fpga-bridges property in the FPGA region or in the device tree overlay.
+
+ * Partial reconfiguration with bridges in the FPGA
+ In this case, the FPGA will have one or more PRR's that may be programmed
+ separately while the rest of the FPGA can remain active. To manage this,
+ bridges need to exist in the FPGA that can gate the buses going to each FPGA
+ region while the buses are enabled for other sections. Before any partial
+ reconfiguration can be done, a base FPGA image must be loaded which includes
+ PRR's with FPGA bridges. The device tree should have a FPGA region for each
+ PRR.
+
+Device Tree Examples
+====================
+
+The intention of this section is to give some simple examples, focusing on
+the placement of the elements detailed above, especially:
+ * FPGA Manager
+ * FPGA Bridges
+ * FPGA Region
+ * ranges
+ * target-path or target
+
+For the purposes of this section, I'm dividing the Device Tree into two parts,
+each with its own requirements. The two parts are:
+ * The live DT prior to the overlay being added
+ * The DT overlay
+
+The live Device Tree must contain an FPGA Region, an FPGA Manager, and any FPGA
+Bridges. The FPGA Region's "fpga-mgr" property specifies the manager by phandle
+to handle programming the FPGA. If the FPGA Region is the child of another FPGA
+Region, the parent's FPGA Manager is used. If FPGA Bridges need to be involved,
+they are specified in the FPGA Region by the "fpga-bridges" property. During
+FPGA programming, the FPGA Region will disable the bridges that are in its
+"fpga-bridges" list and will re-enable them after FPGA programming has
+succeeded.
+
+The Device Tree Overlay will contain:
+ * "target-path" or "target"
+ The insertion point where the the contents of the overlay will go into the
+ live tree. target-path is a full path, while target is a phandle.
+ * "ranges"
+ The address space mapping from processor to FPGA bus(ses).
+ * "firmware-name"
+ Specifies the name of the FPGA image file on the firmware search
+ path. The search path is described in the firmware class documentation.
+ * "partial-fpga-config"
+ This binding is a boolean and should be present if partial reconfiguration
+ is to be done.
+ * child nodes corresponding to hardware that will be loaded in this region of
+ the FPGA.
+
+Device Tree Example: Full Reconfiguration without Bridges
+=========================================================
+
+Live Device Tree contains:
+ fpga_mgr0: fpga-mgr@f8007000 {
+ compatible = "xlnx,zynq-devcfg-1.0";
+ reg = <0xf8007000 0x100>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 8 4>;
+ clocks = <&clkc 12>;
+ clock-names = "ref_clk";
+ syscon = <&slcr>;
+ };
+
+ fpga_region0: fpga-region0 {
+ compatible = "fpga-region";
+ fpga-mgr = <&fpga_mgr0>;
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ ranges;
+ };
+
+DT Overlay contains:
+/dts-v1/ /plugin/;
+/ {
+fragment@0 {
+ target = <&fpga_region0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ firmware-name = "zynq-gpio.bin";
+
+ gpio1: gpio@40000000 {
+ compatible = "xlnx,xps-gpio-1.00.a";
+ reg = <0x40000000 0x10000>;
+ gpio-controller;
+ #gpio-cells = <0x2>;
+ xlnx,gpio-width= <0x6>;
+ };
+ };
+};
+
+Device Tree Example: Full Reconfiguration to add PRR's
+======================================================
+
+The base FPGA Region is specified similar to the first example above.
+
+This example programs the FPGA to have two regions that can later be partially
+configured. Each region has its own bridge in the FPGA fabric.
+
+DT Overlay contains:
+/dts-v1/ /plugin/;
+/ {
+ fragment@0 {
+ target = <&fpga_region0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ firmware-name = "base.rbf";
+
+ fpga-bridge@4400 {
+ compatible = "altr,freeze-bridge";
+ reg = <0x4400 0x10>;
+
+ fpga_region1: fpga-region1 {
+ compatible = "fpga-region";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ ranges;
+ };
+ };
+
+ fpga-bridge@4420 {
+ compatible = "altr,freeze-bridge";
+ reg = <0x4420 0x10>;
+
+ fpga_region2: fpga-region2 {
+ compatible = "fpga-region";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ ranges;
+ };
+ };
+ };
+ };
+};
+
+Device Tree Example: Partial Reconfiguration
+============================================
+
+This example reprograms one of the PRR's set up in the previous example.
+
+The sequence that occurs when this overlay is similar to the above, the only
+differences are that the FPGA is partially reconfigured due to the
+"partial-fpga-config" boolean and the only bridge that is controlled during
+programming is the FPGA based bridge of fpga_region1.
+
+/dts-v1/ /plugin/;
+/ {
+ fragment@0 {
+ target = <&fpga_region1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ firmware-name = "soc_image2.rbf";
+ partial-fpga-config;
+
+ gpio@10040 {
+ compatible = "altr,pio-1.0";
+ reg = <0x10040 0x20>;
+ clocks = <0x2>;
+ altr,gpio-bank-width = <0x4>;
+ resetvalue = <0x0>;
+ #gpio-cells = <0x2>;
+ gpio-controller;
+ };
+ };
+ };
+};
+
+Constraints
+===========
+
+It is beyond the scope of this document to fully describe all the FPGA design
+constraints required to make partial reconfiguration work[1] [2] [3], but a few
+deserve quick mention.
+
+A persona must have boundary connections that line up with those of the partion
+or region it is designed to go into.
+
+During programming, transactions through those connections must be stopped and
+the connections must be held at a fixed logic level. This can be achieved by
+FPGA Bridges that exist on the FPGA fabric prior to the partial reconfiguration.
+
+--
+[1] www.altera.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug_partrecon.pdf
+[2] tspace.library.utoronto.ca/bitstream/1807/67932/1/Byma_Stuart_A_201411_MAS_thesis.pdf
+[3] http://www.xilinx.com/support/documentation/sw_manuals/xilinx14_1/ug702.pdf
diff --git a/Documentation/devicetree/bindings/gpio/gpio-sx150x.txt b/Documentation/devicetree/bindings/gpio/gpio-sx150x.txt
deleted file mode 100644
index c809acb9c71b..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-sx150x.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-SEMTECH SX150x GPIO expander bindings
-
-
-Required properties:
-
-- compatible: should be "semtech,sx1506q",
- "semtech,sx1508q",
- "semtech,sx1509q",
- "semtech,sx1502q".
-
-- reg: The I2C slave address for this device.
-
-- interrupt-parent: phandle of the parent interrupt controller.
-
-- interrupts: Interrupt specifier for the controllers interrupt.
-
-- #gpio-cells: Should be 2. The first cell is the GPIO number and the
- second cell is used to specify optional parameters:
- bit 0: polarity (0: normal, 1: inverted)
-
-- gpio-controller: Marks the device as a GPIO controller.
-
-- interrupt-controller: Marks the device as a interrupt controller.
-
-The GPIO expander can optionally be used as an interrupt controller, in
-which case it uses the default two cell specifier as described in
-Documentation/devicetree/bindings/interrupt-controller/interrupts.txt.
-
-Example:
-
- i2c_gpio_expander@20{
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- compatible = "semtech,sx1506q";
- reg = <0x20>;
- interrupt-parent = <&gpio_1>;
- interrupts = <16 0>;
-
- gpio-controller;
- interrupt-controller;
- };
diff --git a/Documentation/devicetree/bindings/gpio/gpio_oxnas.txt b/Documentation/devicetree/bindings/gpio/gpio_oxnas.txt
index 928ed4f43907..966514744df4 100644
--- a/Documentation/devicetree/bindings/gpio/gpio_oxnas.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio_oxnas.txt
@@ -3,7 +3,7 @@
Please refer to gpio.txt for generic information regarding GPIO bindings.
Required properties:
- - compatible: "oxsemi,ox810se-gpio"
+ - compatible: "oxsemi,ox810se-gpio" or "oxsemi,ox820-gpio"
- reg: Base address and length for the device.
- interrupts: The port interrupt shared by all pins.
- gpio-controller: Marks the port as GPIO controller.
diff --git a/Documentation/devicetree/bindings/hwmon/mcp3021.txt b/Documentation/devicetree/bindings/hwmon/mcp3021.txt
new file mode 100644
index 000000000000..294318ba6914
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/mcp3021.txt
@@ -0,0 +1,21 @@
+mcp3021 properties
+
+Required properties:
+- compatible: Must be one of the following:
+ - "microchip,mcp3021" for mcp3021
+ - "microchip,mcp3221" for mcp3221
+- reg: I2C address
+
+Optional properties:
+
+- reference-voltage-microvolt
+ Reference voltage in microvolt (uV)
+
+Example:
+
+mcp3021@4d {
+ compatible = "microchip,mcp3021";
+ reg = <0x4d>;
+
+ reference-voltage-microvolt = <4500000>; /* 4.5 V */
+};
diff --git a/Documentation/devicetree/bindings/hwmon/tmp108.txt b/Documentation/devicetree/bindings/hwmon/tmp108.txt
new file mode 100644
index 000000000000..8c4b10df86d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/tmp108.txt
@@ -0,0 +1,14 @@
+TMP108 temperature sensor
+-------------------------
+
+This device supports I2C only.
+
+Requires node properties:
+- compatible : "ti,tmp108"
+- reg : the I2C address of the device. This is 0x48, 0x49, 0x4a, or 0x4b.
+
+Example:
+ tmp108@48 {
+ compatible = "ti,tmp108";
+ reg = <0x48>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index fbbad6446741..df720ca00fcf 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -39,11 +39,13 @@ dallas,ds75 Digital Thermometer and Thermostat
dlg,da9053 DA9053: flexible system level PMIC with multicore support
dlg,da9063 DA9063: system PMIC for quad-core application processors
domintech,dmard09 DMARD09: 3-axis Accelerometer
+domintech,dmard10 DMARD10: 3-axis Accelerometer
epson,rx8010 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
+fsl,mma7660 MMA7660FC: 3-Axis Orientation/Motion Detection Sensor
fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
fsl,mpl3115 MPL3115: Absolute Digital Pressure Sensor
fsl,mpr121 MPR121: Proximity Capacitive Touch Sensor Controller
@@ -57,6 +59,7 @@ maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
mc,rv3029c2 Real Time Clock Module with I2C-Bus
mcube,mc3230 mCube 3-axis 8-bit digital accelerometer
+memsic,mxc6225 MEMSIC 2-axis 8-bit digital accelerometer
microchip,mcp4531-502 Microchip 7-bit Single I2C Digital Potentiometer (5k)
microchip,mcp4531-103 Microchip 7-bit Single I2C Digital Potentiometer (10k)
microchip,mcp4531-503 Microchip 7-bit Single I2C Digital Potentiometer (50k)
@@ -121,6 +124,11 @@ microchip,mcp4662-502 Microchip 8-bit Dual I2C Digital Potentiometer with NV Mem
microchip,mcp4662-103 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (10k)
microchip,mcp4662-503 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (50k)
microchip,mcp4662-104 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k)
+microchip,tc654 PWM Fan Speed Controller With Fan Fault Detection
+microchip,tc655 PWM Fan Speed Controller With Fan Fault Detection
+miramems,da226 MiraMEMS DA226 2-axis 14-bit digital accelerometer
+miramems,da280 MiraMEMS DA280 3-axis 14-bit digital accelerometer
+miramems,da311 MiraMEMS DA311 3-axis 12-bit digital accelerometer
national,lm63 Temperature sensor with integrated fan control
national,lm75 I2C TEMP SENSOR
national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
@@ -146,6 +154,7 @@ ricoh,rv5c387a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
samsung,24ad0xd1 S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
sgx,vz89x SGX Sensortech VZ89X Sensors
sii,s35390a 2-wire CMOS real-time clock
+silabs,si7020 Relative Humidity and Temperature Sensors
skyworks,sky81452 Skyworks SKY81452: Six-Channel White LED Driver with Touch Panel Bias Supply
st,24c256 i2c serial eeprom (24cxx)
st,m41t00 Serial real-time clock (RTC)
diff --git a/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt b/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
new file mode 100644
index 000000000000..27544bdd4478
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
@@ -0,0 +1,54 @@
+Bindings for ADC envelope detector using a DAC and a comparator
+
+The DAC is used to find the peak level of an alternating voltage input
+signal by a binary search using the output of a comparator wired to
+an interrupt pin. Like so:
+ _
+ | \
+ input +------>-------|+ \
+ | \
+ .-------. | }---.
+ | | | / |
+ | dac|-->--|- / |
+ | | |_/ |
+ | | |
+ | | |
+ | irq|------<-------'
+ | |
+ '-------'
+
+Required properties:
+- compatible: Should be "axentia,tse850-envelope-detector"
+- io-channels: Channel node of the dac to be used for comparator input.
+- io-channel-names: Should be "dac".
+- interrupt specification for one client interrupt,
+ see ../../interrupt-controller/interrupts.txt for details.
+- interrupt-names: Should be "comp".
+
+Example:
+
+ &i2c {
+ dpot: mcp4651-104@28 {
+ compatible = "microchip,mcp4651-104";
+ reg = <0x28>;
+ #io-channel-cells = <1>;
+ };
+ };
+
+ dac: dac {
+ compatible = "dpot-dac";
+ vref-supply = <&reg_3v3>;
+ io-channels = <&dpot 0>;
+ io-channel-names = "dpot";
+ #io-channel-cells = <1>;
+ };
+
+ envelope-detector {
+ compatible = "axentia,tse850-envelope-detector";
+ io-channels = <&dac 0>;
+ io-channel-names = "dac";
+
+ interrupt-parent = <&gpio>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "comp";
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
new file mode 100644
index 000000000000..49ed82e89870
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -0,0 +1,83 @@
+STMicroelectronics STM32 ADC device driver
+
+STM32 ADC is a successive approximation analog-to-digital converter.
+It has several multiplexed input channels. Conversions can be performed
+in single, continuous, scan or discontinuous mode. Result of the ADC is
+stored in a left-aligned or right-aligned 32-bit data register.
+Conversions can be launched in software or using hardware triggers.
+
+The analog watchdog feature allows the application to detect if the input
+voltage goes beyond the user-defined, higher or lower thresholds.
+
+Each STM32 ADC block can have up to 3 ADC instances.
+
+Each instance supports two contexts to manage conversions, each one has its
+own configurable sequence and trigger:
+- regular conversion can be done in sequence, running in background
+- injected conversions have higher priority, and so have the ability to
+ interrupt regular conversion sequence (either triggered in SW or HW).
+ Regular sequence is resumed, in case it has been interrupted.
+
+Contents of a stm32 adc root node:
+-----------------------------------
+Required properties:
+- compatible: Should be "st,stm32f4-adc-core".
+- reg: Offset and length of the ADC block register set.
+- interrupts: Must contain the interrupt for ADC block.
+- clocks: Clock for the analog circuitry (common to all ADCs).
+- clock-names: Must be "adc".
+- interrupt-controller: Identifies the controller node as interrupt-parent
+- vref-supply: Phandle to the vref input analog reference voltage.
+- #interrupt-cells = <1>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- A pinctrl state named "default" for each ADC channel may be defined to set
+ inX ADC pins in mode of operation for analog input on external pin.
+
+Contents of a stm32 adc child node:
+-----------------------------------
+An ADC block node should contain at least one subnode, representing an
+ADC instance available on the machine.
+
+Required properties:
+- compatible: Should be "st,stm32f4-adc".
+- reg: Offset of ADC instance in ADC block (e.g. may be 0x0, 0x100, 0x200).
+- clocks: Input clock private to this ADC instance.
+- interrupt-parent: Phandle to the parent interrupt controller.
+- interrupts: IRQ Line for the ADC (e.g. may be 0 for adc@0, 1 for adc@100 or
+ 2 for adc@200).
+- st,adc-channels: List of single-ended channels muxed for this ADC.
+ It can have up to 16 channels, numbered from 0 to 15 (resp. for in0..in15).
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
+ Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+Example:
+ adc: adc@40012000 {
+ compatible = "st,stm32f4-adc-core";
+ reg = <0x40012000 0x400>;
+ interrupts = <18>;
+ clocks = <&rcc 0 168>;
+ clock-names = "adc";
+ vref-supply = <&reg_vref>;
+ interrupt-controller;
+ pinctrl-names = "default";
+ pinctrl-0 = <&adc3_in8_pin>;
+
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "st,stm32f4-adc";
+ #io-channel-cells = <1>;
+ reg = <0x0>;
+ clocks = <&rcc 0 168>;
+ interrupt-parent = <&adc>;
+ interrupts = <0>;
+ st,adc-channels = <8>;
+ };
+ ...
+ other adc child nodes follow...
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
index 9ed2315781e4..3d25011f0c99 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
+++ b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
@@ -3,6 +3,7 @@
Required properties:
- compatible: Should be "ti,adc141s626" or "ti,adc161s626"
- reg: spi chip select number for the device
+ - vdda-supply: supply voltage to VDDA pin
Recommended properties:
- spi-max-frequency: Definition as per
@@ -11,6 +12,7 @@ Recommended properties:
Example:
adc@0 {
compatible = "ti,adc161s626";
+ vdda-supply = <&vdda_fixed>;
reg = <0>;
spi-max-frequency = <4300000>;
};
diff --git a/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt b/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
new file mode 100644
index 000000000000..fdf47a01bfef
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
@@ -0,0 +1,41 @@
+Bindings for DAC emulation using a digital potentiometer
+
+It is assumed that the dpot is used as a voltage divider between the
+current dpot wiper setting and the maximum resistance of the dpot. The
+divided voltage is provided by a vref regulator.
+
+ .------.
+ .-----------. | |
+ | vref |--' .---.
+ | regulator |--. | |
+ '-----------' | | d |
+ | | p |
+ | | o | wiper
+ | | t |<---------+
+ | | |
+ | '---' dac output voltage
+ | |
+ '------+------------+
+
+Required properties:
+- compatible: Should be "dpot-dac"
+- vref-supply: The regulator supplying the voltage divider.
+- io-channels: Channel node of the dpot to be used for the voltage division.
+- io-channel-names: Should be "dpot".
+
+Example:
+
+ &i2c {
+ dpot: mcp4651-503@28 {
+ compatible = "microchip,mcp4651-503";
+ reg = <0x28>;
+ #io-channel-cells = <1>;
+ };
+ };
+
+ dac {
+ compatible = "dpot-dac";
+ vref-supply = <&reg_3v3>;
+ io-channels = <&dpot 0>;
+ io-channel-names = "dpot";
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/mcp4725.txt b/Documentation/devicetree/bindings/iio/dac/mcp4725.txt
new file mode 100644
index 000000000000..1bc6c093fbfe
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/mcp4725.txt
@@ -0,0 +1,35 @@
+Microchip mcp4725 and mcp4726 DAC device driver
+
+Required properties:
+ - compatible: Must be "microchip,mcp4725" or "microchip,mcp4726"
+ - reg: Should contain the DAC I2C address
+ - vdd-supply: Phandle to the Vdd power supply. This supply is used as a
+ voltage reference on mcp4725. It is used as a voltage reference on
+ mcp4726 if there is no vref-supply specified.
+
+Optional properties (valid only for mcp4726):
+ - vref-supply: Optional phandle to the Vref power supply. Vref pin is
+ used as a voltage reference when this supply is specified.
+ - microchip,vref-buffered: Boolean to enable buffering of the external
+ Vref pin. This boolean is not valid without the vref-supply. Quoting
+ the datasheet: This is offered in cases where the reference voltage
+ does not have the current capability not to drop its voltage when
+ connected to the internal resistor ladder circuit.
+
+Examples:
+
+ /* simple mcp4725 */
+ mcp4725@60 {
+ compatible = "microchip,mcp4725";
+ reg = <0x60>;
+ vdd-supply = <&vdac_vdd>;
+ };
+
+ /* mcp4726 with the buffered external reference voltage */
+ mcp4726@60 {
+ compatible = "microchip,mcp4726";
+ reg = <0x60>;
+ vdd-supply = <&vdac_vdd>;
+ vref-supply = <&vdac_vref>;
+ microchip,vref-buffered;
+ };
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
new file mode 100644
index 000000000000..b0d3b59966bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
@@ -0,0 +1,46 @@
+Invensense MPU-3050 Gyroscope device tree bindings
+
+Required properties:
+ - compatible : should be "invensense,mpu3050"
+ - reg : the I2C address of the sensor
+
+Optional properties:
+ - interrupt-parent : should be the phandle for the interrupt controller
+ - interrupts : interrupt mapping for the trigger interrupt from the
+ internal oscillator. The following IRQ modes are supported:
+ IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_HIGH and
+ IRQ_TYPE_LEVEL_LOW. The driver should detect and configure the hardware
+ for the desired interrupt type.
+ - vdd-supply : supply regulator for the main power voltage.
+ - vlogic-supply : supply regulator for the signal voltage.
+ - mount-matrix : see iio/mount-matrix.txt
+
+Optional subnodes:
+ - The MPU-3050 will pass through and forward the I2C signals from the
+ incoming I2C bus, alternatively drive traffic to a slave device (usually
+ an accelerometer) on its own initiative. Therefore is supports a subnode
+ i2c gate node. For details see: i2c/i2c-gate.txt
+
+Example:
+
+mpu3050@68 {
+ compatible = "invensense,mpu3050";
+ reg = <0x68>;
+ interrupt-parent = <&foo>;
+ interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&bar>;
+ vlogic-supply = <&baz>;
+
+ /* External I2C interface */
+ i2c-gate {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fnord@18 {
+ compatible = "fnord";
+ reg = <0x18>;
+ interrupt-parent = <&foo>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/iio/humidity/hts221.txt b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
new file mode 100644
index 000000000000..b20ab9c12080
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
@@ -0,0 +1,22 @@
+* HTS221 STM humidity + temperature sensor
+
+Required properties:
+- compatible: should be "st,hts221"
+- reg: i2c address of the sensor / spi cs line
+
+Optional properties:
+- interrupt-parent: should be the phandle for the interrupt controller
+- interrupts: interrupt mapping for IRQ. It should be configured with
+ flags IRQ_TYPE_LEVEL_HIGH or IRQ_TYPE_EDGE_RISING.
+
+ Refer to interrupt-controller/interrupts.txt for generic interrupt
+ client node bindings.
+
+Example:
+
+hts221@5f {
+ compatible = "st,hts221";
+ reg = <0x5f>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+};
diff --git a/Documentation/devicetree/bindings/iio/light/isl29018.txt b/Documentation/devicetree/bindings/iio/light/isl29018.txt
new file mode 100644
index 000000000000..425ab459e209
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/isl29018.txt
@@ -0,0 +1,28 @@
+* ISL 29018/29023/29035 I2C ALS, Proximity, and Infrared sensor
+
+Required properties:
+
+ - compatible: Should be one of
+ "isil,isl29018"
+ "isil,isl29023"
+ "isil,isl29035"
+ - reg: the I2C address of the device
+
+Optional properties:
+
+ - interrupt-parent: should be the phandle for the interrupt controller
+ - interrupts: the sole interrupt generated by the device
+
+ Refer to interrupt-controller/interrupts.txt for generic interrupt client
+ node bindings.
+
+ - vcc-supply: phandle to the regulator that provides power to the sensor.
+
+Example:
+
+isl29018@44 {
+ compatible = "isil,isl29018";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(Z, 2) IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2583.txt b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
new file mode 100644
index 000000000000..8e2066c83f70
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
@@ -0,0 +1,26 @@
+* TAOS TSL 2580/2581/2583 ALS sensor
+
+Required properties:
+
+ - compatible: Should be one of
+ "amstaos,tsl2580"
+ "amstaos,tsl2581"
+ "amstaos,tsl2583"
+ - reg: the I2C address of the device
+
+Optional properties:
+
+ - interrupt-parent: should be the phandle for the interrupt controller
+ - interrupts: the sole interrupt generated by the device
+
+ Refer to interrupt-controller/interrupts.txt for generic interrupt client
+ node bindings.
+
+ - vcc-supply: phandle to the regulator that provides power to the sensor.
+
+Example:
+
+tsl2581@29 {
+ compatible = "amstaos,tsl2581";
+ reg = <0x29>;
+};
diff --git a/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
new file mode 100644
index 000000000000..b9b621e94cd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
@@ -0,0 +1,30 @@
+* Texas Instruments LMP91000 potentiostat
+
+http://www.ti.com/lit/ds/symlink/lmp91000.pdf
+
+Required properties:
+
+ - compatible: should be "ti,lmp91000"
+ - reg: the I2C address of the device
+ - io-channels: the phandle of the iio provider
+
+ - ti,external-tia-resistor: if the property ti,tia-gain-ohm is not defined this
+ needs to be set to signal that an external resistor value is being used.
+
+Optional properties:
+
+ - ti,tia-gain-ohm: ohm value of the internal resistor for the transimpedance
+ amplifier. Must be 2750, 3500, 7000, 14000, 35000, 120000, or 350000 ohms.
+
+ - ti,rload-ohm: ohm value of the internal resistor load applied to the gas
+ sensor. Must be 10, 33, 50, or 100 (default) ohms.
+
+Example:
+
+lmp91000@48 {
+ compatible = "ti,lmp91000";
+ reg = <0x48>;
+ ti,tia-gain-ohm = <7500>;
+ ti,rload = <100>;
+ io-channels = <&adc>;
+};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index e41fe340162b..c040c9ad1889 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -42,6 +42,7 @@ Accelerometers:
- st,lsm303agr-accel
- st,lis2dh12-accel
- st,h3lis331dl-accel
+- st,lng2dm-accel
Gyroscopes:
- st,l3g4200d-gyro
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
index fbbacd958240..6f28969af9dc 100644
--- a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
@@ -6,7 +6,7 @@ perform in-band IPMI communication with their host.
Required properties:
-- compatible : should be "aspeed,ast2400-bt-bmc"
+- compatible : should be "aspeed,ast2400-ibt-bmc"
- reg: physical address and size of the registers
Optional properties:
@@ -17,7 +17,7 @@ Optional properties:
Example:
ibt@1e789140 {
- compatible = "aspeed,ast2400-bt-bmc";
+ compatible = "aspeed,ast2400-ibt-bmc";
reg = <0x1e789140 0x18>;
interrupts = <8>;
};
diff --git a/Documentation/devicetree/bindings/leds/pca963x.txt b/Documentation/devicetree/bindings/leds/pca963x.txt
index dafbe9931c2b..dfbdb123a9bf 100644
--- a/Documentation/devicetree/bindings/leds/pca963x.txt
+++ b/Documentation/devicetree/bindings/leds/pca963x.txt
@@ -7,6 +7,9 @@ Optional properties:
- nxp,totem-pole : use totem pole (push-pull) instead of open-drain (pca9632 defaults
to open-drain, newer chips to totem pole)
- nxp,hw-blink : use hardware blinking instead of software blinking
+- nxp,period-scale : In some configurations, the chip blinks faster than expected.
+ This parameter provides a scaling ratio (fixed point, decimal divided
+ by 1000) to compensate, e.g. 1300=1.3x and 750=0.75x.
Each led is represented as a sub-node of the nxp,pca963x device.
diff --git a/Documentation/devicetree/bindings/mfd/lp873x.txt b/Documentation/devicetree/bindings/mfd/lp873x.txt
index 52766c2035f7..ae9cf39bd101 100644
--- a/Documentation/devicetree/bindings/mfd/lp873x.txt
+++ b/Documentation/devicetree/bindings/mfd/lp873x.txt
@@ -7,6 +7,9 @@ Required properties:
- #gpio-cells: Should be two. The first cell is the pin number and
the second cell is used to specify flags.
See ../gpio/gpio.txt for more information.
+ - xxx-in-supply: Phandle to parent supply node of each regulator
+ populated under regulators node. xxx can be
+ buck0, buck1, ldo0 or ldo1.
- regulators: List of child nodes that specify the regulator
initialization data.
Example:
@@ -17,6 +20,11 @@ pmic: lp8733@60 {
gpio-controller;
#gpio-cells = <2>;
+ buck0-in-supply = <&vsys_3v3>;
+ buck1-in-supply = <&vsys_3v3>;
+ ldo0-in-supply = <&vsys_3v3>;
+ ldo1-in-supply = <&vsys_3v3>;
+
regulators {
lp8733_buck0: buck0 {
regulator-name = "lp8733-buck0";
diff --git a/Documentation/devicetree/bindings/mfd/max77620.txt b/Documentation/devicetree/bindings/mfd/max77620.txt
index 2ad44f7e4880..9c16d51cc15b 100644
--- a/Documentation/devicetree/bindings/mfd/max77620.txt
+++ b/Documentation/devicetree/bindings/mfd/max77620.txt
@@ -106,6 +106,18 @@ Here supported time periods by device in microseconds are as follows:
MAX77620 supports 40, 80, 160, 320, 640, 1280, 2560 and 5120 microseconds.
MAX20024 supports 20, 40, 80, 160, 320, 640, 1280 and 2540 microseconds.
+-maxim,power-ok-control: configure map power ok bit
+ 1: Enables POK(Power OK) to control nRST_IO and GPIO1
+ POK function.
+ 0: Disables POK control.
+ if property missing, do not configure MPOK bit.
+ If POK mapping is enabled for GPIO1/nRST_IO then,
+ GPIO1/nRST_IO pins are HIGH only if all rails
+ that have POK control enabled are HIGH.
+ If any of the rails goes down(which are enabled for POK
+ control) then, GPIO1/nRST_IO goes LOW.
+ this property is valid for max20024 only.
+
For DT binding details of different sub modules like GPIO, pincontrol,
regulator, power, please refer respective device-tree binding document
under their respective sub-system directories.
diff --git a/Documentation/devicetree/bindings/mfd/tps65086.txt b/Documentation/devicetree/bindings/mfd/tps65086.txt
index d3705612a846..9cfa886fe99f 100644
--- a/Documentation/devicetree/bindings/mfd/tps65086.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65086.txt
@@ -23,7 +23,7 @@ Required properties:
defined below.
Optional regulator properties:
- - ti,regulator-step-size-25mv : This is applicable for buck[1,2,6], set this
+ - ti,regulator-step-size-25mv : This is applicable for buck[1-6], set this
if the regulator is factory set with a 25mv
step voltage mapping.
- ti,regulator-decay : This is applicable for buck[1-6], set this if
diff --git a/Documentation/devicetree/bindings/mmc/amlogic,meson-gx.txt b/Documentation/devicetree/bindings/mmc/amlogic,meson-gx.txt
new file mode 100644
index 000000000000..7f95ec400863
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/amlogic,meson-gx.txt
@@ -0,0 +1,32 @@
+Amlogic SD / eMMC controller for S905/GXBB family SoCs
+
+The MMC 5.1 compliant host controller on Amlogic provides the
+interface for SD, eMMC and SDIO devices.
+
+This file documents the properties in addition to those available in
+the MMC core bindings, documented by mmc.txt.
+
+Required properties:
+- compatible : contains one of:
+ - "amlogic,meson-gx-mmc"
+ - "amlogic,meson-gxbb-mmc"
+ - "amlogic,meson-gxl-mmc"
+ - "amlogic,meson-gxm-mmc"
+- clocks : A list of phandle + clock-specifier pairs for the clocks listed in clock-names.
+- clock-names: Should contain the following:
+ "core" - Main peripheral bus clock
+ "clkin0" - Parent clock of internal mux
+ "clkin1" - Other parent clock of internal mux
+ The driver has an interal mux clock which switches between clkin0 and clkin1 depending on the
+ clock rate requested by the MMC core.
+
+Example:
+
+ sd_emmc_a: mmc@70000 {
+ compatible = "amlogic,meson-gxbb-mmc";
+ reg = <0x0 0x70000 0x0 0x2000>;
+ interrupts = < GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc CLKID_SD_EMMC_A>, <&xtal>, <&clkc CLKID_FCLK_DIV2>;
+ clock-names = "core", "clkin0", "clkin1";
+ pinctrl-0 = <&emmc_pins>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt b/Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
index be56d2bd474a..954561d09a8e 100644
--- a/Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
+++ b/Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
@@ -7,6 +7,15 @@ Required properties:
- compatible : Should be one of the following
"brcm,bcm2835-sdhci"
"brcm,sdhci-iproc-cygnus"
+ "brcm,sdhci-iproc"
+
+Use brcm2835-sdhci for Rasperry PI.
+
+Use sdhci-iproc-cygnus for Broadcom SDHCI Controllers
+restricted to 32bit host accesses to SDHCI registers.
+
+Use sdhci-iproc for Broadcom SDHCI Controllers that allow standard
+8, 16, 32-bit host access to SDHCI register.
- clocks : The clock feeding the SDHCI controller.
diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
index ff611fa66871..e4ba92aa035e 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
+++ b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
@@ -8,11 +8,14 @@ Required properties:
- compatible: should be "renesas,mmcif-<soctype>", "renesas,sh-mmcif" as a
fallback. Examples with <soctype> are:
+ - "renesas,mmcif-r8a73a4" for the MMCIF found in r8a73a4 SoCs
- "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
+ - "renesas,mmcif-r8a7778" for the MMCIF found in r8a7778 SoCs
- "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
- "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs
- "renesas,mmcif-r8a7793" for the MMCIF found in r8a7793 SoCs
- "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs
+ - "renesas,mmcif-sh73a0" for the MMCIF found in sh73a0 SoCs
- clocks: reference to the functional clock
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt b/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
new file mode 100644
index 000000000000..750374fc9d94
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
@@ -0,0 +1,30 @@
+* Cadence SD/SDIO/eMMC Host Controller
+
+Required properties:
+- compatible: should be "cdns,sd4hc".
+- reg: offset and length of the register set for the device.
+- interrupts: a single interrupt specifier.
+- clocks: phandle to the input clock.
+
+Optional properties:
+For eMMC configuration, supported speed modes are not indicated by the SDHCI
+Capabilities Register. Instead, the following properties should be specified
+if supported. See mmc.txt for details.
+- mmc-ddr-1_8v
+- mmc-ddr-1_2v
+- mmc-hs200-1_8v
+- mmc-hs200-1_2v
+- mmc-hs400-1_8v
+- mmc-hs400-1_2v
+
+Example:
+ emmc: sdhci@5a000000 {
+ compatible = "cdns,sd4hc";
+ reg = <0x5a000000 0x400>;
+ interrupts = <0 78 4>;
+ clocks = <&clk 4>;
+ bus-width = <8>;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 485483a63d8c..0576264eab5e 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -17,6 +17,7 @@ Required properties:
"iface" - Main peripheral bus clock (PCLK/HCLK - AHB Bus clock) (required)
"core" - SDC MMC clock (MCLK) (required)
"bus" - SDCC bus voter clock (optional)
+ "xo" - TCXO clock (optional)
Example:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci.txt b/Documentation/devicetree/bindings/mmc/sdhci.txt
new file mode 100644
index 000000000000..1c95a1a555c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci.txt
@@ -0,0 +1,13 @@
+The properties specific for SD host controllers. For properties shared by MMC
+host controllers refer to the mmc[1] bindings.
+
+ [1] Documentation/devicetree/bindings/mmc/mmc.txt
+
+Optional properties:
+- sdhci-caps-mask: The sdhci capabilities register is incorrect. This 64bit
+ property corresponds to the bits in the sdhci capabilty register. If the bit
+ is on in the mask then the bit is incorrect in the register and should be
+ turned off, before applying sdhci-caps.
+- sdhci-caps: The sdhci capabilities register is incorrect. This 64bit
+ property corresponds to the bits in the sdhci capability register. If the
+ bit is on in the property then the bit should be turned on.
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 4e00e859e885..7fd17c3da116 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -43,6 +43,9 @@ Optional properties:
reset signal present internally in some host controller IC designs.
See Documentation/devicetree/bindings/reset/reset.txt for details.
+* reset-names: request name for using "resets" property. Must be "reset".
+ (It will be used together with "resets" property.)
+
* clocks: from common clock binding: handle to biu and ciu clocks for the
bus interface unit clock and the card interface unit clock.
@@ -56,8 +59,9 @@ Optional properties:
is specified and the ciu clock is specified then we'll try to set the ciu
clock to this at probe time.
-* clock-freq-min-max: Minimum and Maximum clock frequency for card output
+* clock-freq-min-max (DEPRECATED): Minimum and Maximum clock frequency for card output
clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default.
+ (Use the "max-frequency" instead of "clock-freq-min-max".)
* num-slots: specifies the number of slots supported by the controller.
The number of physical slots actually used could be equal or less than the
@@ -71,11 +75,6 @@ Optional properties:
* card-detect-delay: Delay in milli-seconds before detecting card after card
insert event. The default value is 0.
-* supports-highspeed (DEPRECATED): Enables support for high speed cards (up to 50MHz)
- (use "cap-mmc-highspeed" or "cap-sd-highspeed" instead)
-
-* broken-cd: as documented in mmc core bindings.
-
* vmmc-supply: The phandle to the regulator to use for vmmc. If this is
specified we'll defer probe until we can find this regulator.
@@ -103,6 +102,8 @@ board specific portions as listed below.
interrupts = <0 75 0>;
#address-cells = <1>;
#size-cells = <0>;
+ resets = <&rst 20>;
+ reset-names = "reset";
};
[board specific internal DMA resources]
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 13df9c2399c3..a1650edfd2b7 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -11,8 +11,8 @@ optional bindings can be used.
Required properties:
- compatible: "renesas,sdhi-shmobile" - a generic sh-mobile SDHI unit
- "renesas,sdhi-sh7372" - SDHI IP on SH7372 SoC
"renesas,sdhi-sh73a0" - SDHI IP on SH73A0 SoC
+ "renesas,sdhi-r7s72100" - SDHI IP on R7S72100 SoC
"renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
"renesas,sdhi-r8a7740" - SDHI IP on R8A7740 SoC
"renesas,sdhi-r8a7778" - SDHI IP on R8A7778 SoC
diff --git a/Documentation/devicetree/bindings/net/brcm,amac.txt b/Documentation/devicetree/bindings/net/brcm,amac.txt
index ba5ecc1041a5..2fefa1a44afd 100644
--- a/Documentation/devicetree/bindings/net/brcm,amac.txt
+++ b/Documentation/devicetree/bindings/net/brcm,amac.txt
@@ -2,11 +2,17 @@ Broadcom AMAC Ethernet Controller Device Tree Bindings
-------------------------------------------------------------
Required properties:
- - compatible: "brcm,amac" or "brcm,nsp-amac"
- - reg: Address and length of the GMAC registers,
- Address and length of the GMAC IDM registers
- - reg-names: Names of the registers. Must have both "amac_base" and
- "idm_base"
+ - compatible: "brcm,amac"
+ "brcm,nsp-amac"
+ "brcm,ns2-amac"
+ - reg: Address and length of the register set for the device. It
+ contains the information of registers in the same order as
+ described by reg-names
+ - reg-names: Names of the registers.
+ "amac_base": Address and length of the GMAC registers
+ "idm_base": Address and length of the GMAC IDM registers
+ "nicpm_base": Address and length of the NIC Port Manager
+ registers (required for Northstar2)
- interrupts: Interrupt number
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index 8d40ab27bc8c..06bb7cc334c8 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -10,6 +10,7 @@ Required properties:
"renesas,can-r8a7793" if CAN controller is a part of R8A7793 SoC.
"renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
"renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
+ "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
"renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
"renesas,rcar-gen2-can" for a generic R-Car Gen2 compatible device.
"renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device.
@@ -24,11 +25,12 @@ Required properties:
- pinctrl-0: pin control group to be used for this controller.
- pinctrl-names: must be "default".
-Required properties for "renesas,can-r8a7795" compatible:
-In R8A7795 SoC, "clkp2" can be CANFD clock. This is a div6 clock and can be
-used by both CAN and CAN FD controller at the same time. It needs to be scaled
-to maximum frequency if any of these controllers use it. This is done using
-the below properties.
+Required properties for "renesas,can-r8a7795" and "renesas,can-r8a7796"
+compatible:
+In R8A7795 and R8A7796 SoCs, "clkp2" can be CANFD clock. This is a div6 clock
+and can be used by both CAN and CAN FD controller at the same time. It needs to
+be scaled to maximum frequency if any of these controllers use it. This is done
+using the below properties:
- assigned-clocks: phandle of clkp2(CANFD) clock.
- assigned-clock-rates: maximum frequency of this clock.
diff --git a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
index 22a6f10bab05..93c3a6ae32f9 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
@@ -5,13 +5,14 @@ Required properties:
- compatible: Must contain one or more of the following:
- "renesas,rcar-gen3-canfd" for R-Car Gen3 compatible controller.
- "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller.
+ - "renesas,r8a7796-canfd" for R8A7796 (R-Car M3) compatible controller.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first, followed by the
family-specific and/or generic versions.
- reg: physical base address and size of the R-Car CAN FD register map.
-- interrupts: interrupt specifier for the Global & Channel interrupts
+- interrupts: interrupt specifiers for the Channel & Global interrupts
- clocks: phandles and clock specifiers for 3 clock inputs.
- clock-names: 3 clock input name strings: "fck", "canfd", "can_clk".
- pinctrl-0: pin control group to be used for this controller.
@@ -23,11 +24,12 @@ The name of the child nodes are "channel0" and "channel1" respectively. Each
child node supports the "status" property only, which is used to
enable/disable the respective channel.
-Required properties for "renesas,r8a7795-canfd" compatible:
-In R8A7795 SoC, canfd clock is a div6 clock and can be used by both CAN
-and CAN FD controller at the same time. It needs to be scaled to maximum
-frequency if any of these controllers use it. This is done using the
-below properties.
+Required properties for "renesas,r8a7795-canfd" and "renesas,r8a7796-canfd"
+compatible:
+In R8A7795 and R8A7796 SoCs, canfd clock is a div6 clock and can be used by both
+CAN and CAN FD controller at the same time. It needs to be scaled to maximum
+frequency if any of these controllers use it. This is done using the below
+properties:
- assigned-clocks: phandle of canfd clock.
- assigned-clock-rates: maximum frequency of this clock.
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 5ad439f30135..ebda7c93453a 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -20,8 +20,6 @@ Required properties:
- slaves : Specifies number for slaves
- active_slave : Specifies the slave to use for time stamping,
ethtool and SIOCGMIIPHY
-- cpts_clock_mult : Numerator to convert input clock ticks into nanoseconds
-- cpts_clock_shift : Denominator to convert input clock ticks into nanoseconds
Optional properties:
- ti,hwmods : Must be "cpgmac0"
@@ -35,7 +33,11 @@ Optional properties:
For example in dra72x-evm, pcf gpio has to be
driven low so that cpsw slave 0 and phy data
lines are connected via mux.
-
+- cpts_clock_mult : Numerator to convert input clock ticks into nanoseconds
+- cpts_clock_shift : Denominator to convert input clock ticks into nanoseconds
+ Mult and shift will be calculated basing on CPTS
+ rftclk frequency if both cpts_clock_shift and
+ cpts_clock_mult properties are not provided.
Slave Properties:
Required properties:
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 7629189398aa..b3dd6b40e0de 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -14,22 +14,42 @@ The properties described here are those specific to Marvell devices.
Additional required and optional properties can be found in dsa.txt.
Required properties:
-- compatible : Should be one of "marvell,mv88e6085",
+- compatible : Should be one of "marvell,mv88e6085" or
+ "marvell,mv88e6190"
- reg : Address on the MII bus for the switch.
Optional properties:
- reset-gpios : Should be a gpio specifier for a reset line
-
+- interrupt-parent : Parent interrupt controller
+- interrupts : Interrupt from the switch
+- interrupt-controller : Indicates the switch is itself an interrupt
+ controller. This is used for the PHY interrupts.
+#interrupt-cells = <2> : Controller uses two cells, number and flag
+- mdio : container of PHY and devices on the switches MDIO
+ bus
Example:
mdio {
#address-cells = <1>;
#size-cells = <0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
switch0: switch@0 {
compatible = "marvell,mv88e6085";
reg = <0>;
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
};
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy0: switch1phy0@0 {
+ reg = <0>;
+ interrupt-parent = <&switch0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index e1d76812419c..05150957ecfd 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -9,10 +9,26 @@ The following properties are common to the Ethernet controllers:
- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
the maximum frame size (there's contradiction in ePAPR).
-- phy-mode: string, operation mode of the PHY interface; supported values are
- "mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
- "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii", "trgmii"; this is now a
- de-facto standard property;
+- phy-mode: string, operation mode of the PHY interface. This is now a de-facto
+ standard property; supported values are:
+ * "mii"
+ * "gmii"
+ * "sgmii"
+ * "qsgmii"
+ * "tbi"
+ * "rev-mii"
+ * "rmii"
+ * "rgmii" (RX and TX delays are added by the MAC when required)
+ * "rgmii-id" (RGMII with internal RX and TX delays provided by the PHY, the
+ MAC should not add the RX or TX delays in this case)
+ * "rgmii-rxid" (RGMII with internal RX delay provided by the PHY, the MAC
+ should not add an RX delay in this case)
+ * "rgmii-txid" (RGMII with internal TX delay provided by the PHY, the MAC
+ should not add an TX delay in this case)
+ * "rtbi"
+ * "smii"
+ * "xgmii"
+ * "trgmii"
- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
- phy-handle: phandle, specifies a reference to a node representing a PHY
device; this property is described in ePAPR and so preferred;
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
index 75d398bb1fbb..063c02da018a 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
@@ -1,7 +1,12 @@
Hisilicon hix5hd2 gmac controller
Required properties:
-- compatible: should be "hisilicon,hix5hd2-gmac".
+- compatible: should contain one of the following SoC strings:
+ * "hisilicon,hix5hd2-gemac"
+ * "hisilicon,hi3798cv200-gemac"
+ and one of the following version string:
+ * "hisilicon,hisi-gemac-v1"
+ * "hisilicon,hisi-gemac-v2"
- reg: specifies base physical address(s) and size of the device registers.
The first region is the MAC register base and size.
The second region is external interface control register.
@@ -12,6 +17,16 @@ Required properties:
- phy-handle: see ethernet.txt [1].
- mac-address: see ethernet.txt [1].
- clocks: clock phandle and specifier pair.
+- clock-names: contain the clock name "mac_core"(required) and "mac_ifc"(optional).
+- resets: should contain the phandle to the MAC core reset signal(optional),
+ the MAC interface reset signal(optional)
+ and the PHY reset signal(optional).
+- reset-names: contain the reset signal name "mac_core"(optional),
+ "mac_ifc"(optional) and "phy"(optional).
+- hisilicon,phy-reset-delays-us: triplet of delays if PHY reset signal given.
+ The 1st cell is reset pre-delay in micro seconds.
+ The 2nd cell is reset pulse in micro seconds.
+ The 3rd cell is reset post-delay in micro seconds.
- PHY subnode: inherits from phy binding [2]
@@ -20,15 +35,19 @@ Required properties:
Example:
gmac0: ethernet@f9840000 {
- compatible = "hisilicon,hix5hd2-gmac";
+ compatible = "hisilicon,hi3798cv200-gemac", "hisilicon,hisi-gemac-v2";
reg = <0xf9840000 0x1000>,<0xf984300c 0x4>;
interrupts = <0 71 4>;
#address-cells = <1>;
#size-cells = <0>;
- phy-mode = "mii";
+ phy-mode = "rgmii";
phy-handle = <&phy2>;
mac-address = [00 00 00 00 00 00];
- clocks = <&clock HIX5HD2_MAC0_CLK>;
+ clocks = <&crg HISTB_ETH0_MAC_CLK>, <&crg HISTB_ETH0_MACIF_CLK>;
+ clock-names = "mac_core", "mac_ifc";
+ resets = <&crg 0xcc 8>, <&crg 0xcc 10>, <&crg 0xcc 12>;
+ reset-names = "mac_core", "mac_ifc", "phy";
+ hisilicon,phy-reset-delays-us = <10000 10000 30000>;
phy2: ethernet-phy@2 {
reg = <2>;
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
index d4b7f2e49984..abfbeecbcf39 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
@@ -45,6 +45,12 @@ Required properties:
depends on the hardware user manual.
- port-mode-offset: is offset of port mode field for each port in dsaf. Its
value depends on the hardware user manual.
+- mc-mac-mask: mask of multicast address, determines bit in multicast address
+ to set:
+ 1 stands for this bit will be precisely matched, TCAM will check this bit of
+ MAC address.
+ 0 stands for this bit will be fuzzy matched, TCAM won't care about this bit
+ of MAC address.
[1] Documentation/devicetree/bindings/net/phy.txt
@@ -74,10 +80,12 @@ dsaf0: dsa@c7000000 {
reg = 0;
phy-handle = <&phy0>;
serdes-syscon = <&serdes>;
+ mc-mac-mask = [ff f0 00 00 00 00];
};
port@1 {
reg = 1;
serdes-syscon = <&serdes>;
+ mc-mac-mask = [ff f0 00 00 00 00];
};
};
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
index 73be8970815e..7aa840c8768d 100644
--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -1,7 +1,10 @@
-* Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
+* Marvell Armada 370 / Armada XP / Armada 3700 Ethernet Controller (NETA)
Required properties:
-- compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta".
+- compatible: could be one of the followings
+ "marvell,armada-370-neta"
+ "marvell,armada-xp-neta"
+ "marvell,armada-3700-neta"
- reg: address and length of the register set for the device.
- interrupts: interrupt for the device
- phy: See ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
index bce52b2ec55e..6fd988c84c4f 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-net.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
@@ -49,6 +49,7 @@ Optional port properties:
and
- phy-handle: See ethernet.txt file in the same directory.
+ - phy-mode: See ethernet.txt file in the same directory.
or
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
index 8516929c7251..065e8bdb957d 100644
--- a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
+++ b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
@@ -3,7 +3,7 @@ Properties for an MDIO bus multiplexer controlled by a memory-mapped device
This is a special case of a MDIO bus multiplexer. A memory-mapped device,
like an FPGA, is used to control which child bus is connected. The mdio-mux
node must be a child of the memory-mapped device. The driver currently only
-supports devices with eight-bit registers.
+supports devices with 8, 16 or 32-bit registers.
Required properties in addition to the generic multiplexer properties:
@@ -11,7 +11,7 @@ Required properties in addition to the generic multiplexer properties:
- reg : integer, contains the offset of the register that controls the bus
multiplexer. The size field in the 'reg' property is the size of
- register, and must therefore be 1.
+ register, and must therefore be 1, 2, or 4.
- mux-mask : integer, contains an eight-bit mask that specifies which
bits in the register control the actual bus multiplexer. The
diff --git a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
index 99c7eb0a00c8..bdefefc66594 100644
--- a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
+++ b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
@@ -6,22 +6,27 @@ Required properties:
Documentation/devicetree/bindings/net/phy.txt
Optional properties:
-- vsc8531,vddmac : The vddmac in mV.
+- vsc8531,vddmac : The vddmac in mV. Allowed values is listed
+ in the first row of Table 1 (below).
+ This property is only used in combination
+ with the 'edge-slowdown' property.
+ Default value is 3300.
- vsc8531,edge-slowdown : % the edge should be slowed down relative to
- the fastest possible edge time. Native sign
- need not enter.
+ the fastest possible edge time.
Edge rate sets the drive strength of the MAC
- interface output signals. Changing the drive
- strength will affect the edge rate of the output
- signal. The goal of this setting is to help
- reduce electrical emission (EMI) by being able
- to reprogram drive strength and in effect slow
- down the edge rate if desired. Table 1 shows the
- impact to the edge rate per VDDMAC supply for each
- drive strength setting.
- Ref: Table:1 - Edge rate change below.
-
-Note: see dt-bindings/net/mscc-phy-vsc8531.h for applicable values
+ interface output signals. Changing the
+ drive strength will affect the edge rate of
+ the output signal. The goal of this setting
+ is to help reduce electrical emission (EMI)
+ by being able to reprogram drive strength
+ and in effect slow down the edge rate if
+ desired.
+ To adjust the edge-slowdown, the 'vddmac'
+ must be specified. Table 1 lists the
+ supported edge-slowdown values for a given
+ 'vddmac'.
+ Default value is 0%.
+ Ref: Table:1 - Edge rate change (below).
Table: 1 - Edge rate change
----------------------------------------------------------------|
@@ -29,23 +34,23 @@ Table: 1 - Edge rate change
| |
| 3300 mV 2500 mV 1800 mV 1500 mV |
|---------------------------------------------------------------|
-| Default Deafult Default Default |
+| 0% 0% 0% 0% |
| (Fastest) (recommended) (recommended) |
|---------------------------------------------------------------|
-| -2% -3% -5% -6% |
+| 2% 3% 5% 6% |
|---------------------------------------------------------------|
-| -4% -6% -9% -14% |
+| 4% 6% 9% 14% |
|---------------------------------------------------------------|
-| -7% -10% -16% -21% |
+| 7% 10% 16% 21% |
|(recommended) (recommended) |
|---------------------------------------------------------------|
-| -10% -14% -23% -29% |
+| 10% 14% 23% 29% |
|---------------------------------------------------------------|
-| -17% -23% -35% -42% |
+| 17% 23% 35% 42% |
|---------------------------------------------------------------|
-| -29% -37% -52% -58% |
+| 29% 37% 52% 58% |
|---------------------------------------------------------------|
-| -53% -63% -76% -77% |
+| 53% 63% 76% 77% |
| (slowest) |
|---------------------------------------------------------------|
@@ -54,5 +59,5 @@ Example:
vsc8531_0: ethernet-phy@0 {
compatible = "ethernet-phy-id0007.0570";
vsc8531,vddmac = <3300>;
- vsc8531,edge-slowdown = <21>;
+ vsc8531,edge-slowdown = <7>;
};
diff --git a/Documentation/devicetree/bindings/net/oxnas-dwmac.txt b/Documentation/devicetree/bindings/net/oxnas-dwmac.txt
new file mode 100644
index 000000000000..df0534e2eda1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/oxnas-dwmac.txt
@@ -0,0 +1,39 @@
+* Oxford Semiconductor OXNAS DWMAC Ethernet controller
+
+The device inherits all the properties of the dwmac/stmmac devices
+described in the file stmmac.txt in the current directory with the
+following changes.
+
+Required properties on all platforms:
+
+- compatible: For the OX820 SoC, it should be :
+ - "oxsemi,ox820-dwmac" to select glue
+ - "snps,dwmac-3.512" to select IP version.
+
+- clocks: Should contain phandles to the following clocks
+- clock-names: Should contain the following:
+ - "stmmaceth" for the host clock - see stmmac.txt
+ - "gmac" for the peripheral gate clock
+
+- oxsemi,sys-ctrl: a phandle to the system controller syscon node
+
+Example :
+
+etha: ethernet@40400000 {
+ compatible = "oxsemi,ox820-dwmac", "snps,dwmac-3.512";
+ reg = <0x40400000 0x2000>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ mac-address = [000000000000]; /* Filled in by U-Boot */
+ phy-mode = "rgmii";
+
+ clocks = <&stdclk CLK_820_ETHA>, <&gmacclk>;
+ clock-names = "gmac", "stmmaceth";
+ resets = <&reset RESET_MAC>;
+
+ /* Regmap for sys registers */
+ oxsemi,sys-ctrl = <&sys>;
+
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index bc1c3c8bf8fa..54749b60a466 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -35,6 +35,12 @@ Optional Properties:
- broken-turn-around: If set, indicates the PHY device does not correctly
release the turn around line low at the end of a MDIO transaction.
+- enet-phy-lane-swap: If set, indicates the PHY will swap the TX/RX lanes to
+ compensate for the board being designed with the lanes swapped.
+
+- eee-broken-modes: Bits to clear in the MDIO_AN_EEE_ADV register to
+ disable EEE broken modes.
+
Example:
ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
index e77e167593db..309e37eb7c7c 100644
--- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
+++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
@@ -13,3 +13,5 @@ Optional properties:
16-bit access only.
- power-gpios: GPIO to control the PWRDWN pin
- reset-gpios: GPIO to control the RESET pin
+- pxa-u16-align4 : Boolean, put in place the workaround the force all
+ u16 writes to be 32 bits aligned
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 41b49e6075f5..128da752fec9 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -1,7 +1,7 @@
* STMicroelectronics 10/100/1000 Ethernet driver (GMAC)
Required properties:
-- compatible: Should be "snps,dwmac-<ip_version>" "snps,dwmac"
+- compatible: Should be "snps,dwmac-<ip_version>", "snps,dwmac"
For backwards compatibility: "st,spear600-gmac" is also supported.
- reg: Address and length of the register set for the device
- interrupt-parent: Should be the phandle for the interrupt controller
@@ -34,7 +34,13 @@ Optional properties:
platforms.
- tx-fifo-depth: See ethernet.txt file in the same directory
- rx-fifo-depth: See ethernet.txt file in the same directory
-- snps,pbl Programmable Burst Length
+- snps,pbl Programmable Burst Length (tx and rx)
+- snps,txpbl Tx Programmable Burst Length. Only for GMAC and newer.
+ If set, DMA tx will use this value rather than snps,pbl.
+- snps,rxpbl Rx Programmable Burst Length. Only for GMAC and newer.
+ If set, DMA rx will use this value rather than snps,pbl.
+- snps,no-pbl-x8 Don't multiply the pbl/txpbl/rxpbl values by 8.
+ For core rev < 3.50, don't multiply the values by 4.
- snps,aal Address-Aligned Beats
- snps,fixed-burst Program the DMA to use the fixed burst mode
- snps,mixed-burst Program the DMA to use the mixed burst mode
@@ -50,6 +56,8 @@ Optional properties:
- snps,ps-speed: port selection speed that can be passed to the core when
PCS is supported. For example, this is used in case of SGMII
and MAC2MAC connection.
+- snps,tso: this enables the TSO feature otherwise it will be managed by
+ MAC HW capability register. Only for GMAC4 and newer.
- AXI BUS Mode parameters: below the list of all the parameters to program the
AXI register inside the DMA module:
- snps,lpi_en: enable Low Power Interface
@@ -62,8 +70,6 @@ Optional properties:
- snps,fb: fixed-burst
- snps,mb: mixed-burst
- snps,rb: rebuild INCRx Burst
- - snps,tso: this enables the TSO feature otherwise it will be managed by
- MAC HW capability register.
- mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
Examples:
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
index 5d21141a68b5..85bf945b898f 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83867.txt
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -9,6 +9,18 @@ Required properties:
- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
for applicable values
+Optional property:
+ - ti,min-output-impedance - MAC Interface Impedance control to set
+ the programmable output impedance to
+ minimum value (35 ohms).
+ - ti,max-output-impedance - MAC Interface Impedance control to set
+ the programmable output impedance to
+ maximum value (70 ohms).
+
+Note: ti,min-output-impedance and ti,max-output-impedance are mutually
+ exclusive. When both properties are present ti,max-output-impedance
+ takes precedence.
+
Default child nodes are standard Ethernet PHY device
nodes as described in Documentation/devicetree/bindings/net/phy.txt
diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
index c421aba0a5bc..980b16df74c3 100644
--- a/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt
+++ b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
@@ -1,8 +1,8 @@
-Marvell 8897/8997 (sd8897/sd8997) SDIO devices
+Marvell 8897/8997 (sd8897/sd8997/pcie8997) SDIO/PCIE devices
------
-This node provides properties for controlling the marvell sdio wireless device.
-The node is expected to be specified as a child node to the SDIO controller that
+This node provides properties for controlling the Marvell SDIO/PCIE wireless device.
+The node is expected to be specified as a child node to the SDIO/PCIE controller that
connects the device to the system.
Required properties:
@@ -10,6 +10,8 @@ Required properties:
- compatible : should be one of the following:
* "marvell,sd8897"
* "marvell,sd8997"
+ * "pci11ab,2b42"
+ * "pci1b4b,2b42"
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt
new file mode 100644
index 000000000000..b7396c8c271c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt
@@ -0,0 +1,48 @@
+* Qualcomm Atheros ath9k wireless devices
+
+This node provides properties for configuring the ath9k wireless device. The
+node is expected to be specified as a child node of the PCI controller to
+which the wireless chip is connected.
+
+Required properties:
+- compatible: For PCI and PCIe devices this should be an identifier following
+ the format as defined in "PCI Bus Binding to Open Firmware"
+ Revision 2.1. One of the possible formats is "pciVVVV,DDDD"
+ where VVVV is the PCI vendor ID and DDDD is PCI device ID.
+ Typically QCA's PCI vendor ID 168c is used while the PCI device
+ ID depends on the chipset - see the following (possibly
+ incomplete) list:
+ - 0023 for AR5416
+ - 0024 for AR5418
+ - 0027 for AR9160
+ - 0029 for AR9220 and AR9223
+ - 002a for AR9280 and AR9283
+ - 002b for AR9285
+ - 002c for AR2427
+ - 002d for AR9227
+ - 002e for AR9287
+ - 0030 for AR9380, AR9381 and AR9382
+ - 0032 for AR9485
+ - 0033 for AR9580 and AR9590
+ - 0034 for AR9462
+ - 0036 for AR9565
+ - 0037 for AR9485
+- reg: Address and length of the register set for the device.
+
+Optional properties:
+- qca,no-eeprom: Indicates that there is no physical EEPROM connected to the
+ ath9k wireless chip (in this case the calibration /
+ EEPROM data will be loaded from userspace using the
+ kernel firmware loader).
+- mac-address: See ethernet.txt in the parent directory
+- local-mac-address: See ethernet.txt in the parent directory
+
+
+In this example, the node is defined as child node of the PCI controller:
+&pci0 {
+ wifi@168c,002d {
+ compatible = "pci168c,002d";
+ reg = <0x7000 0 0 0 0x1000>;
+ qca,no-eeprom;
+ };
+};
diff --git a/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt b/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
new file mode 100644
index 000000000000..6462e12d8de6
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
@@ -0,0 +1,17 @@
+Broadcom OTP memory controller
+
+Required Properties:
+- compatible: "brcm,ocotp" for the first generation Broadcom OTPC which is used
+ in Cygnus and supports 32 bit read/write. Use "brcm,ocotp-v2" for the second
+ generation Broadcom OTPC which is used in SoC's such as Stingray and supports
+ 64-bit read/write.
+- reg: Base address of the OTP controller.
+- brcm,ocotp-size: Amount of memory available, in 32 bit words
+
+Example:
+
+otp: otp@0301c800 {
+ compatible = "brcm,ocotp";
+ reg = <0x0301c800 0x2c>;
+ brcm,ocotp-size = <2048>;
+};
diff --git a/Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt b/Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt
new file mode 100644
index 000000000000..853b6a754644
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt
@@ -0,0 +1,20 @@
+* NXP LPC18xx OTP memory
+
+Internal OTP (One Time Programmable) memory for NXP LPC18xx/43xx devices.
+
+Required properties:
+ - compatible: Should be "nxp,lpc1850-otp"
+ - reg: Must contain an entry with the physical base address and length
+ for each entry in reg-names.
+ - address-cells: must be set to 1.
+ - size-cells: must be set to 1.
+
+See nvmem.txt for more information.
+
+Example:
+ otp: otp@40045000 {
+ compatible = "nxp,lpc1850-otp";
+ reg = <0x40045000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
index ee91cbdd95ee..9f5ca4457b5f 100644
--- a/Documentation/devicetree/bindings/opp/opp.txt
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -86,8 +86,14 @@ Optional properties:
Single entry is for target voltage and three entries are for <target min max>
voltages.
- Entries for multiple regulators must be present in the same order as
- regulators are specified in device's DT node.
+ Entries for multiple regulators shall be provided in the same field separated
+ by angular brackets <>. The OPP binding doesn't provide any provisions to
+ relate the values to their power supplies or the order in which the supplies
+ need to be configured and that is left for the implementation specific
+ binding.
+
+ Entries for all regulators shall be of the same size, i.e. either all use a
+ single value or triplets.
- opp-microvolt-<name>: Named opp-microvolt property. This is exactly similar to
the above opp-microvolt property, but allows multiple voltage ranges to be
@@ -104,10 +110,13 @@ Optional properties:
Should only be set if opp-microvolt is set for the OPP.
- Entries for multiple regulators must be present in the same order as
- regulators are specified in device's DT node. If this property isn't required
- for few regulators, then this should be marked as zero for them. If it isn't
- required for any regulator, then this property need not be present.
+ Entries for multiple regulators shall be provided in the same field separated
+ by angular brackets <>. If current values aren't required for a regulator,
+ then it shall be filled with 0. If current values aren't required for any of
+ the regulators, then this field is not required. The OPP binding doesn't
+ provide any provisions to relate the values to their power supplies or the
+ order in which the supplies need to be configured and that is left for the
+ implementation specific binding.
- opp-microamp-<name>: Named opp-microamp property. Similar to
opp-microvolt-<name> property, but for microamp instead.
@@ -386,10 +395,12 @@ Example 4: Handling multiple regulators
/ {
cpus {
cpu@0 {
- compatible = "arm,cortex-a7";
+ compatible = "vendor,cpu-type";
...
- cpu-supply = <&cpu_supply0>, <&cpu_supply1>, <&cpu_supply2>;
+ vcc0-supply = <&cpu_supply0>;
+ vcc1-supply = <&cpu_supply1>;
+ vcc2-supply = <&cpu_supply2>;
operating-points-v2 = <&cpu0_opp_table>;
};
};
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
index ba67b39939c1..71aeda1ca055 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
@@ -26,13 +26,16 @@ Required properties:
- "sys"
- "legacy"
- "client"
-- resets: Must contain five entries for each entry in reset-names.
+- resets: Must contain seven entries for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names: Must include the following names
- "core"
- "mgmt"
- "mgmt-sticky"
- "pipe"
+ - "pm"
+ - "aclk"
+ - "pclk"
- pinctrl-names : The pin control state names
- pinctrl-0: The "default" pinctrl state
- #interrupt-cells: specifies the number of cells needed to encode an
@@ -86,8 +89,10 @@ pcie0: pcie@f8000000 {
reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
phys = <&pcie_phy>;
phy-names = "pcie-phy";
pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt b/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
index 9da5ea234154..5fa73b9d20f5 100644
--- a/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt
+++ b/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
@@ -1,4 +1,4 @@
-* Amlogic USB2 PHY
+* Amlogic Meson8b and GXBB USB2 PHY
Required properties:
- compatible: Depending on the platform this should be one of:
@@ -16,10 +16,10 @@ Optional properties:
Example:
-usb0_phy: usb_phy@0 {
+usb0_phy: usb-phy@c0000000 {
compatible = "amlogic,meson-gxbb-usb2-phy";
#phy-cells = <0>;
- reg = <0x0 0x0 0x0 0x20>;
+ reg = <0x0 0xc0000000 0x0 0x20>;
resets = <&reset RESET_USB_OTG>;
clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB0>;
clock-names = "usb_general", "usb";
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 1685821eea41..de1378b4efad 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -28,6 +28,20 @@ Required properties:
- reg: Should contain the register physical address and length for the
pin controller.
+- clocks: phandle to the clocks feeding the pin controller:
+ - "apb": the gated APB parent clock
+ - "hosc": the high frequency oscillator in the system
+ - "losc": the low frequency oscillator in the system
+
+Note: For backward compatibility reasons, the hosc and losc clocks are only
+required if you need to use the optional input-debounce property. Any new
+device tree should set them.
+
+Optional properties:
+ - input-debounce: Array of debouncing periods in microseconds. One period per
+ irq bank found in the controller. 0 if no setup required.
+
+
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices.
@@ -37,6 +51,22 @@ pins it needs, and how they should be configured, with regard to muxer
configuration, drive strength and pullups. If one of these options is
not set, its actual value will be unspecified.
+This driver supports the generic pin multiplexing and configuration
+bindings. For details on each properties, you can refer to
+./pinctrl-bindings.txt.
+
+Required sub-node properties:
+ - pins
+ - function
+
+Optional sub-node properties:
+ - bias-disable
+ - bias-pull-up
+ - bias-pull-down
+ - drive-strength
+
+*** Deprecated pin configuration and multiplexing binding
+
Required subnode-properties:
- allwinner,pins: List of strings containing the pin name.
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
index b7a93e80a302..9a8a45d9d8ab 100644
--- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
@@ -98,6 +98,8 @@ DRIVE_STRENGTH (3 << 5): indicate the drive strength of the pin using the
01 - Low
10 - Medium
11 - High
+OUTPUT (1 << 7): indicate this pin need to be configured as an output.
+OUTPUT_VAL (1 << 8): output val (1 = high, 0 = low)
DEBOUNCE (1 << 16): indicate this pin needs debounce.
DEBOUNCE_VAL (0x3fff << 17): debounce value.
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index fe7fe0b03cfb..2392557ede27 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -7,6 +7,8 @@ Required properties for the root node:
"amlogic,meson8b-aobus-pinctrl"
"amlogic,meson-gxbb-periphs-pinctrl"
"amlogic,meson-gxbb-aobus-pinctrl"
+ "amlogic,meson-gxl-periphs-pinctrl"
+ "amlogic,meson-gxl-aobus-pinctrl"
- reg: address and size of registers controlling irq functionality
=== GPIO sub-nodes ===
diff --git a/Documentation/devicetree/bindings/pinctrl/oxnas,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/oxnas,pinctrl.txt
index d6074321f730..09e81a95bbfd 100644
--- a/Documentation/devicetree/bindings/pinctrl/oxnas,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/oxnas,pinctrl.txt
@@ -9,7 +9,7 @@ used for a specific device or function. This node represents configurations of
pins, optional function, and optional mux related configuration.
Required properties for pin controller node:
- - compatible: "oxsemi,ox810se-pinctrl"
+ - compatible: "oxsemi,ox810se-pinctrl" or "oxsemi,ox820-pinctrl"
- oxsemi,sys-ctrl: a phandle to the system controller syscon node
Required properties for pin configuration sub-nodes:
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index b73c96d24f59..bf3f7b014724 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -97,6 +97,11 @@ For example:
};
== Pin controller devices ==
+Required properties: See the pin controller driver specific documentation
+
+Optional properties:
+#pinctrl-cells: Number of pin control cells in addition to the index within the
+ pin controller device instance
Pin controller devices should contain the pin configuration nodes that client
devices reference.
@@ -119,7 +124,8 @@ For example:
The contents of each of those pin configuration child nodes is defined
entirely by the binding for the individual pin controller device. There
-exists no common standard for this content.
+exists no common standard for this content. The pinctrl framework only
+provides generic helper bindings that the pin controller driver can use.
The pin configuration nodes need not be direct children of the pin controller
device; they may be grandchildren, for example. Whether this is legal, and
@@ -156,6 +162,42 @@ state_2_node_a {
pins = "mfio29", "mfio30";
};
+Optionally an altenative binding can be used if more suitable depending on the
+pin controller hardware. For hardaware where there is a large number of identical
+pin controller instances, naming each pin and function can easily become
+unmaintainable. This is especially the case if the same controller is used for
+different pins and functions depending on the SoC revision and packaging.
+
+For cases like this, the pin controller driver may use pinctrl-pin-array helper
+binding with a hardware based index and a number of pin configuration values:
+
+pincontroller {
+ ... /* Standard DT properties for the device itself elided */
+ #pinctrl-cells = <2>;
+
+ state_0_node_a {
+ pinctrl-pin-array = <
+ 0 A_DELAY_PS(0) G_DELAY_PS(120)
+ 4 A_DELAY_PS(0) G_DELAY_PS(360)
+ ...
+ >;
+ };
+ ...
+};
+
+Above #pinctrl-cells specifies the number of value cells in addition to the
+index of the registers. This is similar to the interrupts-extended binding with
+one exception. There is no need to specify the phandle for each entry as that
+is already known as the defined pins are always children of the pin controller
+node. Further having the phandle pointing to another pin controller would not
+currently work as the pinctrl framework uses named modes to group pins for each
+pin control device.
+
+The index for pinctrl-pin-array must relate to the hardware for the pinctrl
+registers, and must not be a virtual index of pin instances. The reason for
+this is to avoid mapping of the index in the dts files and the pin controller
+driver as it can change.
+
== Generic pin configuration node content ==
Many data items that are represented in a pin configuration node are common
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
index caf297bee1fb..c28d4eb83b76 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
@@ -35,6 +35,15 @@ Optional properties:
- ti,palmas-enable-dvfs2: Enable DVFS2. Configure pins for DVFS2 mode.
Selection primary or secondary function associated to GPADC_START
and SYSEN2 pin/pad for DVFS2 interface
+- ti,palmas-override-powerhold: This is applicable for PMICs for which
+ GPIO7 is configured in POWERHOLD mode which has higher priority
+ over DEV_ON bit and keeps the PMIC supplies on even after the DEV_ON
+ bit is turned off. This property enables driver to over ride the
+ POWERHOLD value to GPIO7 so as to turn off the PMIC in power off
+ scenarios. So for GPIO7 if ti,palmas-override-powerhold is set
+ then the GPIO_7 field should never be muxed to anything else.
+ It should be set to POWERHOLD by default and only in case of
+ power off scenarios the driver will over ride the mux value.
This binding uses the following generic properties as defined in
pinctrl-bindings.txt:
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt
new file mode 100644
index 000000000000..bf76867168e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt
@@ -0,0 +1,74 @@
+SEMTECH SX150x GPIO expander bindings
+
+Please refer to pinctrl-bindings.txt, ../gpio/gpio.txt, and
+../interrupt-controller/interrupts.txt for generic information regarding
+pin controller, GPIO, and interrupt bindings.
+
+Required properties:
+- compatible: should be one of :
+ "semtech,sx1501q",
+ "semtech,sx1502q",
+ "semtech,sx1503q",
+ "semtech,sx1504q",
+ "semtech,sx1505q",
+ "semtech,sx1506q",
+ "semtech,sx1507q",
+ "semtech,sx1508q",
+ "semtech,sx1509q".
+
+- reg: The I2C slave address for this device.
+
+- #gpio-cells: Should be 2. The first cell is the GPIO number and the
+ second cell is used to specify optional parameters:
+ bit 0: polarity (0: normal, 1: inverted)
+
+- gpio-controller: Marks the device as a GPIO controller.
+
+Optional properties :
+- interrupt-parent: phandle of the parent interrupt controller.
+
+- interrupts: Interrupt specifier for the controllers interrupt.
+
+- interrupt-controller: Marks the device as a interrupt controller.
+
+- semtech,probe-reset: Will trigger a reset of the GPIO expander on probe,
+ only for sx1507q, sx1508q and sx1509q
+
+The GPIO expander can optionally be used as an interrupt controller, in
+which case it uses the default two cell specifier.
+
+Required properties for pin configuration sub-nodes:
+ - pins: List of pins to which the configuration applies.
+
+Optional properties for pin configuration sub-nodes:
+----------------------------------------------------
+ - bias-disable: disable any pin bias, except the OSCIO pin
+ - bias-pull-up: pull up the pin, except the OSCIO pin
+ - bias-pull-down: pull down the pin, except the OSCIO pin
+ - bias-pull-pin-default: use pin-default pull state, except the OSCIO pin
+ - drive-push-pull: drive actively high and low
+ - drive-open-drain: drive with open drain only for sx1507q, sx1508q and sx1509q and except the OSCIO pin
+ - output-low: set the pin to output mode with low level
+ - output-high: set the pin to output mode with high level
+
+Example:
+
+ i2c0gpio-expander@20{
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "semtech,sx1506q";
+ reg = <0x20>;
+ interrupt-parent = <&gpio_1>;
+ interrupts = <16 0>;
+
+ gpio-controller;
+ interrupt-controller;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio1_cfg_pins>;
+
+ gpio1_cfg_pins: gpio1-cfg {
+ pins = "gpio1";
+ bias-pull-up;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt
new file mode 100644
index 000000000000..13cd629f896e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt
@@ -0,0 +1,177 @@
+Qualcomm MSM8994 TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+MSM8994 platform.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should contain one of:
+ "qcom,msm8992-pinctrl",
+ "qcom,msm8994-pinctrl".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode.
+
+ Valid pins are:
+ gpio0-gpio145
+ Supports mux, bias and drive-strength
+
+ sdc1_clk, sdc1_cmd, sdc1_data sdc1_rclk, sdc2_clk,
+ sdc2_cmd, sdc2_data
+ Supports bias and drive-strength
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+
+ audio_ref_clk, blsp_i2c1, blsp_i2c2, blsp_i2c3, blsp_i2c4, blsp_i2c5,
+ blsp_i2c6, blsp_i2c7, blsp_i2c8, blsp_i2c9, blsp_i2c10, blsp_i2c11,
+ blsp_i2c12, blsp_spi1, blsp_spi1_cs1, blsp_spi1_cs2, blsp_spi1_cs3,
+ blsp_spi2, blsp_spi2_cs1, blsp_spi2_cs2, blsp_spi2_cs3, blsp_spi3,
+ blsp_spi4, blsp_spi5, blsp_spi6, blsp_spi7, blsp_spi8, blsp_spi9,
+ blsp_spi10, blsp_spi10_cs1, blsp_spi10_cs2, blsp_spi10_cs3, blsp_spi11,
+ blsp_spi12, blsp_uart1, blsp_uart2, blsp_uart3, blsp_uart4, blsp_uart5,
+ blsp_uart6, blsp_uart7, blsp_uart8, blsp_uart9, blsp_uart10, blsp_uart11,
+ blsp_uart12, blsp_uim1, blsp_uim2, blsp_uim3, blsp_uim4, blsp_uim5,
+ blsp_uim6, blsp_uim7, blsp_uim8, blsp_uim9, blsp_uim10, blsp_uim11,
+ blsp_uim12, blsp11_i2c_scl_b, blsp11_i2c_sda_b, blsp11_uart_rx_b,
+ blsp11_uart_tx_b, cam_mclk0, cam_mclk1, cam_mclk2, cam_mclk3,
+ cci_async_in0, cci_async_in1, cci_async_in2, cci_i2c0, cci_i2c1,
+ cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4,
+ gcc_gp1_clk_a, gcc_gp1_clk_b, gcc_gp2_clk_a, gcc_gp2_clk_b, gcc_gp3_clk_a,
+ gcc_gp3_clk_b, gp_mn, gp_pdm0, gp_pdm1, gp_pdm2, gp0_clk,
+ gp1_clk, gps_tx, gsm_tx, hdmi_cec, hdmi_ddc, hdmi_hpd, hdmi_rcv,
+ mdp_vsync, mss_lte, nav_pps, nav_tsync, qdss_cti_trig_in_a,
+ qdss_cti_trig_in_b, qdss_cti_trig_in_c, qdss_cti_trig_in_d,
+ qdss_cti_trig_out_a, qdss_cti_trig_out_b, qdss_cti_trig_out_c,
+ qdss_cti_trig_out_d, qdss_traceclk_a, qdss_traceclk_b, qdss_tracectl_a,
+ qdss_tracectl_b, qdss_tracedata_a, qdss_tracedata_b, qua_mi2s, pci_e0,
+ pci_e1, pri_mi2s, sdc4, sec_mi2s, slimbus, spkr_i2s, ter_mi2s, tsif1,
+ tsif2, uim_batt_alarm, uim1, uim2, uim3, uim4, gpio
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+ Not valid for sdc pins.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+ Not valid for sdc pins.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ msmgpio: pinctrl@fd510000 {
+ compatible = "qcom,msm8994-pinctrl";
+ reg = <0xfd510000 0x4000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ blsp1_uart2_default: blsp1_uart2_default {
+ pinmux {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart2";
+ };
+ pinconf {
+ pins = "gpio4", "gpio5";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
index c68b9554561f..4722bc61a1a2 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
@@ -19,10 +19,11 @@ The pins are grouped into up to 5 individual pin banks which need to be
defined as gpio sub-nodes of the pinmux controller.
Required properties for iomux controller:
- - compatible: one of "rockchip,rk2928-pinctrl", "rockchip,rk3066a-pinctrl"
- "rockchip,rk3066b-pinctrl", "rockchip,rk3188-pinctrl"
- "rockchip,rk3228-pinctrl", "rockchip,rk3288-pinctrl"
- "rockchip,rk3368-pinctrl", "rockchip,rk3399-pinctrl"
+ - compatible: one of "rockchip,rk1108-pinctrl", "rockchip,rk2928-pinctrl"
+ "rockchip,rk3066a-pinctrl", "rockchip,rk3066b-pinctrl"
+ "rockchip,rk3188-pinctrl", "rockchip,rk3228-pinctrl"
+ "rockchip,rk3288-pinctrl", "rockchip,rk3368-pinctrl"
+ "rockchip,rk3399-pinctrl"
- rockchip,grf: phandle referencing a syscon providing the
"general register files"
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index d49e22d2a8b5..1baf19eecabf 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -19,11 +19,30 @@ Required Properties:
- "samsung,exynos5260-pinctrl": for Exynos5260 compatible pin-controller.
- "samsung,exynos5410-pinctrl": for Exynos5410 compatible pin-controller.
- "samsung,exynos5420-pinctrl": for Exynos5420 compatible pin-controller.
+ - "samsung,exynos5433-pinctrl": for Exynos5433 compatible pin-controller.
- "samsung,exynos7-pinctrl": for Exynos7 compatible pin-controller.
- reg: Base address of the pin controller hardware module and length of
the address space it occupies.
+ - reg: Second base address of the pin controller if the specific registers
+ of the pin controller are separated into the different base address.
+
+ Eg: GPF[1-5] of Exynos5433 are separated into the two base address.
+ - First base address is for GPAx and GPF[1-5] external interrupt
+ registers.
+ - Second base address is for GPF[1-5] pinctrl registers.
+
+ pinctrl_0: pinctrl@10580000 {
+ compatible = "samsung,exynos5433-pinctrl";
+ reg = <0x10580000 0x1a20>, <0x11090000 0x100>;
+
+ wakeup-interrupt-controller {
+ compatible = "samsung,exynos7-wakeup-eint";
+ interrupts = <0 16 0>;
+ };
+ };
+
- Pin banks as child nodes: Pin banks of the controller are represented by child
nodes of the controller node. Bank name is taken from name of the node. Each
bank node must contain following properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
index f9753c416974..b24583aa34c3 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
@@ -14,11 +14,6 @@ Required properies:
- #size-cells : The value of this property must be 1
- ranges : defines mapping between pin controller node (parent) to
gpio-bank node (children).
- - interrupt-parent: phandle of the interrupt parent to which the external
- GPIO interrupts are forwarded to.
- - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
- which includes IRQ mux selection register, and the offset of the IRQ mux
- selection register.
- pins-are-numbered: Specify the subnodes are using numbered pinmux to
specify pins.
@@ -37,6 +32,11 @@ Required properties:
Optional properties:
- reset: : Reference to the reset controller
+ - interrupt-parent: phandle of the interrupt parent to which the external
+ GPIO interrupts are forwarded to.
+ - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
+ which includes IRQ mux selection register, and the offset of the IRQ mux
+ selection register.
Example:
#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
diff --git a/Documentation/devicetree/bindings/pinctrl/ti,da850-pupd.txt b/Documentation/devicetree/bindings/pinctrl/ti,da850-pupd.txt
new file mode 100644
index 000000000000..7f2980567c9f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/ti,da850-pupd.txt
@@ -0,0 +1,55 @@
+* Pin configuration for TI DA850/OMAP-L138/AM18x
+
+These SoCs have a separate controller for setting bias (internal pullup/down).
+Bias can only be selected for groups rather than individual pins.
+
+Required Properties:
+
+ - compatible: Must be "ti,da850-pupd"
+ - reg: Base address and length of the memory resource used by the pullup/down
+ controller hardware module.
+
+The controller node also acts as a container for pin group configuration nodes.
+The names of these groups are ignored.
+
+Pin Group Node Properties:
+
+- groups: An array of strings, each string containing the name of a pin group.
+ Valid names are "cp0".."cp31".
+
+The pin configuration parameters use the generic pinconf bindings defined in
+pinctrl-bindings.txt in this directory. The supported parameters are
+bias-disable, bias-pull-up, bias-pull-down.
+
+
+Example
+-------
+
+In common dtsi file:
+
+ pinconf: pin-controller@22c00c {
+ compatible = "ti,da850-pupd";
+ reg = <0x22c00c 0x8>;
+ };
+
+In board-specific file:
+
+ &pinconf {
+ pinctrl-0 = <&pinconf_bias_groups>;
+ pinctrl-names = "default";
+
+ pinconf_bias_groups: bias-groups {
+ pull-up {
+ groups = "cp30", "cp31";
+ bias-pull-up;
+ };
+ pull-down {
+ groups = "cp29", "cp28";
+ bias-pull-down;
+ };
+ disable {
+ groups = "cp27", "cp26";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.txt b/Documentation/devicetree/bindings/power/domain-idle-state.txt
new file mode 100644
index 000000000000..eefc7ed22ca2
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/domain-idle-state.txt
@@ -0,0 +1,33 @@
+PM Domain Idle State Node:
+
+A domain idle state node represents the state parameters that will be used to
+select the state when there are no active components in the domain.
+
+The state node has the following parameters -
+
+- compatible:
+ Usage: Required
+ Value type: <string>
+ Definition: Must be "domain-idle-state".
+
+- entry-latency-us
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: u32 value representing worst case latency in
+ microseconds required to enter the idle state.
+ The exit-latency-us duration may be guaranteed
+ only after entry-latency-us has passed.
+
+- exit-latency-us
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: u32 value representing worst case latency
+ in microseconds required to exit the idle state.
+
+- min-residency-us
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: u32 value representing minimum residency duration
+ in microseconds after which the idle state will yield
+ power benefits after overcoming the overhead in entering
+i the idle state.
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 025b5e7df61c..723e1ad937da 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -29,6 +29,15 @@ Optional properties:
specified by this binding. More details about power domain specifier are
available in the next section.
+- domain-idle-states : A phandle of an idle-state that shall be soaked into a
+ generic domain power state. The idle state definitions are
+ compatible with domain-idle-state specified in [1].
+ The domain-idle-state property reflects the idle state of this PM domain and
+ not the idle states of the devices or sub-domains in the PM domain. Devices
+ and sub-domains have their own idle-states independent of the parent
+ domain's idle states. In the absence of this property, the domain would be
+ considered as capable of being powered-on or powered-off.
+
Example:
power: power-controller@12340000 {
@@ -59,6 +68,38 @@ The nodes above define two power controllers: 'parent' and 'child'.
Domains created by the 'child' power controller are subdomains of '0' power
domain provided by the 'parent' power controller.
+Example 3:
+ parent: power-controller@12340000 {
+ compatible = "foo,power-controller";
+ reg = <0x12340000 0x1000>;
+ #power-domain-cells = <0>;
+ domain-idle-states = <&DOMAIN_RET>, <&DOMAIN_PWR_DN>;
+ };
+
+ child: power-controller@12341000 {
+ compatible = "foo,power-controller";
+ reg = <0x12341000 0x1000>;
+ power-domains = <&parent 0>;
+ #power-domain-cells = <0>;
+ domain-idle-states = <&DOMAIN_PWR_DN>;
+ };
+
+ DOMAIN_RET: state@0 {
+ compatible = "domain-idle-state";
+ reg = <0x0>;
+ entry-latency-us = <1000>;
+ exit-latency-us = <2000>;
+ min-residency-us = <10000>;
+ };
+
+ DOMAIN_PWR_DN: state@1 {
+ compatible = "domain-idle-state";
+ reg = <0x1>;
+ entry-latency-us = <5000>;
+ exit-latency-us = <8000>;
+ min-residency-us = <7000>;
+ };
+
==PM domain consumers==
Required properties:
@@ -76,3 +117,5 @@ Example:
The node above defines a typical PM domain consumer device, which is located
inside a PM domain with index 0 of a power controller represented by a node
with the label "power".
+
+[1]. Documentation/devicetree/bindings/power/domain-idle-state.txt
diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
index fb6fb31bc4c4..cf573e85b11d 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
@@ -3,7 +3,7 @@ BCM2835 PWM controller (Raspberry Pi controller)
Required properties:
- compatible: should be "brcm,bcm2835-pwm"
- reg: physical base address and length of the controller's registers
-- clock: This clock defines the base clock frequency of the PWM hardware
+- clocks: This clock defines the base clock frequency of the PWM hardware
system, the period and the duty_cycle of the PWM signal is a multiple of
the base period.
- #pwm-cells: Should be 2. See pwm.txt in this directory for a description of
diff --git a/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
new file mode 100644
index 000000000000..fa7849d67836
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
@@ -0,0 +1,21 @@
+Hisilicon PWM controller
+
+Required properties:
+-compatible: should contain one SoC specific compatible string
+ The SoC specific strings supported including:
+ "hisilicon,hi3516cv300-pwm"
+ "hisilicon,hi3519v100-pwm"
+- reg: physical base address and length of the controller's registers.
+- clocks: phandle and clock specifier of the PWM reference clock.
+- resets: phandle and reset specifier for the PWM controller reset.
+- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
+ the cells format.
+
+Example:
+ pwm: pwm@12130000 {
+ compatible = "hisilicon,hi3516cv300-pwm";
+ reg = <0x12130000 0x10000>;
+ clocks = <&crg_ctrl HI3516CV300_PWM_CLK>;
+ resets = <&crg_ctrl 0x38 0>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
index 3aeba9f86ed8..bf85aa9ad6a7 100644
--- a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
@@ -59,7 +59,7 @@ Any property defined as part of the core regulator binding can also be used.
Continuous Voltage With Enable GPIO Example:
pwm_regulator {
- compatible = "pwm-regulator;
+ compatible = "pwm-regulator";
pwms = <&pwm1 0 8448 0>;
enable-gpios = <&gpio0 23 GPIO_ACTIVE_HIGH>;
regulator-min-microvolt = <1016000>;
@@ -76,7 +76,7 @@ Continuous Voltage With Enable GPIO Example:
Voltage Table Example:
pwm_regulator {
- compatible = "pwm-regulator;
+ compatible = "pwm-regulator";
pwms = <&pwm1 0 8448 0>;
regulator-min-microvolt = <1016000>;
regulator-max-microvolt = <1114000>;
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
new file mode 100644
index 000000000000..b85885a298d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
@@ -0,0 +1,98 @@
+Qualcomm ADSP Peripheral Image Loader
+
+This document defines the binding for a component that loads and boots firmware
+on the Qualcomm ADSP Hexagon core.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,msm8974-adsp-pil"
+ "qcom,msm8996-adsp-pil"
+
+- interrupts-extended:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must list the watchdog, fatal IRQs ready, handover and
+ stop-ack IRQs
+
+- interrupt-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "wdog", "fatal", "ready", "handover", "stop-ack"
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to the xo clock to be held on behalf of the
+ booting Hexagon core
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "xo"
+
+- cx-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the regulator to be held on behalf of the
+ booting Hexagon core
+
+- memory-region:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the reserved-memory for the ADSP
+
+- qcom,smem-states:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the smem state for requesting the ADSP to
+ shut down
+
+- qcom,smem-state-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "stop"
+
+
+= SUBNODES
+The adsp node may have an subnode named "smd-edge" that describes the SMD edge,
+channels and devices related to the ADSP. See ../soc/qcom/qcom,smd.txt for
+details on how to describe the SMD edge.
+
+
+= EXAMPLE
+The following example describes the resources needed to boot control the
+ADSP, as it is found on MSM8974 boards.
+
+ adsp {
+ compatible = "qcom,msm8974-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ clocks = <&rpmcc RPM_CXO_CLK>;
+ clock-names = "xo";
+
+ cx-supply = <&pm8841_s2>;
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ smd-edge {
+ interrupts = <0 156 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 8 8>;
+ qcom,smd-edge = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
index 0d2361ebe3d7..d420f84ddfb0 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
@@ -60,8 +60,8 @@ on the Qualcomm WCNSS core.
see ../reserved-memory/reserved-memory.txt
= SUBNODES
-A single subnode of the WCNSS PIL describes the attached rf module and its
-resource dependencies.
+A required subnode of the WCNSS PIL is used to describe the attached rf module
+and its resource dependencies. It is described by the following properties:
- compatible:
Usage: required
@@ -90,6 +90,11 @@ resource dependencies.
Definition: reference to the regulators to be held on behalf of the
booting of the WCNSS core
+
+The wcnss node can also have an subnode named "smd-edge" that describes the SMD
+edge, channels and devices related to the WCNSS.
+See ../soc/qcom/qcom,smd.txt for details on how to describe the SMD edge.
+
= EXAMPLE
The following example describes the resources needed to boot control the WCNSS,
with attached WCN3680, as it is commonly found on MSM8974 boards.
@@ -129,4 +134,25 @@ pronto@fb204000 {
vddpa-supply = <&pm8941_l19>;
vdddig-supply = <&pm8941_s3>;
};
+
+ smd-edge {
+ interrupts = <0 142 1>;
+
+ qcom,ipc = <&apcs 8 17>;
+ qcom,smd-edge = <6>;
+ qcom,remote-pid = <4>;
+
+ label = "pronto";
+
+ wcnss {
+ compatible = "qcom,wcnss";
+ qcom,smd-channels = "WCNSS_CTRL";
+
+ qcom,mmio = <&pronto>;
+
+ bt {
+ compatible = "qcom,wcnss-bt";
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/reserved-memory/ramoops.txt b/Documentation/devicetree/bindings/reserved-memory/ramoops.txt
index e81f821a2135..0eba562fe5c6 100644
--- a/Documentation/devicetree/bindings/reserved-memory/ramoops.txt
+++ b/Documentation/devicetree/bindings/reserved-memory/ramoops.txt
@@ -46,3 +46,6 @@ Optional properties:
(defaults to buffered mappings)
- no-dump-oops: if present, only dump panics (defaults to panics and oops)
+
+- flags: if present, pass ramoops behavioral flags (defaults to 0,
+ see include/linux/pstore_ram.h RAMOOPS_FLAG_* for flag values).
diff --git a/Documentation/devicetree/bindings/reset/renesas,rst.txt b/Documentation/devicetree/bindings/reset/renesas,rst.txt
new file mode 100644
index 000000000000..fe5e0f37b3c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/renesas,rst.txt
@@ -0,0 +1,37 @@
+DT bindings for the Renesas R-Car and RZ/G Reset Controllers
+
+The R-Car and RZ/G Reset Controllers provide reset control, and implement the
+following functions:
+ - Latching of the levels on mode pins when PRESET# is negated,
+ - Mode monitoring register,
+ - Reset control of peripheral devices (on R-Car Gen1),
+ - Watchdog timer (on R-Car Gen1),
+ - Register-based reset control and boot address registers for the various CPU
+ cores (on R-Car Gen2 and Gen3, and on RZ/G).
+
+
+Required properties:
+ - compatible: Should be
+ - "renesas,<soctype>-reset-wdt" for R-Car Gen1,
+ - "renesas,<soctype>-rst" for R-Car Gen2 and Gen3, and RZ/G
+ Examples with soctypes are:
+ - "renesas,r8a7743-rst" (RZ/G1M)
+ - "renesas,r8a7745-rst" (RZ/G1E)
+ - "renesas,r8a7778-reset-wdt" (R-Car M1A)
+ - "renesas,r8a7779-reset-wdt" (R-Car H1)
+ - "renesas,r8a7790-rst" (R-Car H2)
+ - "renesas,r8a7791-rst" (R-Car M2-W)
+ - "renesas,r8a7792-rst" (R-Car V2H
+ - "renesas,r8a7793-rst" (R-Car M2-N)
+ - "renesas,r8a7794-rst" (R-Car E2)
+ - "renesas,r8a7795-rst" (R-Car H3)
+ - "renesas,r8a7796-rst" (R-Car M3-W)
+ - reg: Address start and address range for the device.
+
+
+Example:
+
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7795-rst";
+ reg = <0 0xe6160000 0 0x0200>;
+ };
diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt
index 6a62acd86953..471477299ece 100644
--- a/Documentation/devicetree/bindings/rng/omap_rng.txt
+++ b/Documentation/devicetree/bindings/rng/omap_rng.txt
@@ -1,4 +1,4 @@
-OMAP SoC HWRNG Module
+OMAP SoC and Inside-Secure HWRNG Module
Required properties:
@@ -6,11 +6,13 @@ Required properties:
RNG versions:
- "ti,omap2-rng" for OMAP2.
- "ti,omap4-rng" for OMAP4, OMAP5 and AM33XX.
+ - "inside-secure,safexcel-eip76" for SoCs with EIP76 IP block
Note that these two versions are incompatible.
- ti,hwmods: Name of the hwmod associated with the RNG module
- reg : Offset and length of the register set for the module
- interrupts : the interrupt number for the RNG module.
- Only used for "ti,omap4-rng".
+ Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
+- clocks: the trng clock source
Example:
/* AM335x */
@@ -20,3 +22,11 @@ rng: rng@48310000 {
reg = <0x48310000 0x2000>;
interrupts = <111>;
};
+
+/* SafeXcel IP-76 */
+trng: rng@f2760000 {
+ compatible = "inside-secure,safexcel-eip76";
+ reg = <0xf2760000 0x7d>;
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpm_syscon0 1 25>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt b/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt
index ddef330d2709..1ad4c1c2b3b3 100644
--- a/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt
+++ b/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt
@@ -1,7 +1,7 @@
* Maxim DS3231 Real Time Clock
Required properties:
-see: Documentation/devicetree/bindings/i2c/trivial-devices.txt
+see: Documentation/devicetree/bindings/i2c/trivial-admin-guide/devices.rst
Optional property:
- #clock-cells: Should be 1.
diff --git a/Documentation/devicetree/bindings/rtc/pcf8563.txt b/Documentation/devicetree/bindings/rtc/pcf8563.txt
index 72f6d2c9665e..086c998c5561 100644
--- a/Documentation/devicetree/bindings/rtc/pcf8563.txt
+++ b/Documentation/devicetree/bindings/rtc/pcf8563.txt
@@ -3,7 +3,7 @@
Philips PCF8563/Epson RTC8564 Real Time Clock
Required properties:
-see: Documentation/devicetree/bindings/i2c/trivial-devices.txt
+see: Documentation/devicetree/bindings/i2c/trivial-admin-guide/devices.rst
Optional property:
- #clock-cells: Should be 0.
diff --git a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
index bf2411f366e5..2a42a323fa1a 100644
--- a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
+++ b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
@@ -6,6 +6,7 @@ Main node required properties:
- compatible : value should be as follows:
(a) "hisilicon,hip05-sas-v1" for v1 hw in hip05 chipset
(b) "hisilicon,hip06-sas-v2" for v2 hw in hip06 chipset
+ (c) "hisilicon,hip07-sas-v2" for v2 hw in hip07 chipset
- sas-addr : array of 8 bytes for host SAS address
- reg : Address and length of the SAS register
- hisilicon,sas-syscon: phandle of syscon used for sas control
diff --git a/Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt b/Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt
new file mode 100644
index 000000000000..d89f99971368
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt
@@ -0,0 +1,41 @@
+* Device Tree Bindings for IBM Virtual Trusted Platform Module(vtpm)
+
+Required properties:
+
+- compatible : property name that conveys the platform architecture
+ identifiers, as 'IBM,vtpm'
+- device_type : specifies type of virtual device
+- interrupts : property specifying the interrupt source number and
+ sense code associated with this virtual I/O Adapters
+- ibm,my-drc-index : integer index for the connector between the device
+ and its parent - present only if Dynamic
+ Reconfiguration(DR) Connector is enabled
+- ibm,#dma-address-cells: specifies the number of cells that are used to
+ encode the physical address field of dma-window
+ properties
+- ibm,#dma-size-cells : specifies the number of cells that are used to
+ encode the size field of dma-window properties
+- ibm,my-dma-window : specifies DMA window associated with this virtual
+ IOA
+- ibm,loc-code : specifies the unique and persistent location code
+ associated with this virtual I/O Adapters
+- linux,sml-base : 64-bit base address of the reserved memory allocated
+ for the firmware event log
+- linux,sml-size : size of the memory allocated for the firmware event log
+
+Example (IBM Virtual Trusted Platform Module)
+---------------------------------------------
+
+ vtpm@30000003 {
+ ibm,#dma-size-cells = <0x2>;
+ compatible = "IBM,vtpm";
+ device_type = "IBM,vtpm";
+ ibm,my-drc-index = <0x30000003>;
+ ibm,#dma-address-cells = <0x2>;
+ linux,sml-base = <0xc60e 0x0>;
+ interrupts = <0xa0003 0x0>;
+ ibm,my-dma-window = <0x10000003 0x0 0x0 0x0 0x10000000>;
+ ibm,loc-code = "U8286.41A.10082DV-V3-C3";
+ reg = <0x30000003>;
+ linux,sml-size = <0xbce10200>;
+ };
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt
new file mode 100644
index 000000000000..8cb638b7e89c
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt
@@ -0,0 +1,21 @@
+* Device Tree Bindings for I2C based Trusted Platform Module(TPM)
+
+Required properties:
+
+- compatible : 'manufacturer,model', eg. nuvoton,npct650
+- label : human readable string describing the device, eg. "tpm"
+- linux,sml-base : 64-bit base address of the reserved memory allocated for
+ the firmware event log
+- linux,sml-size : size of the memory allocated for the firmware event log
+
+Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C)
+----------------------------------------------------------
+
+tpm@57 {
+ reg = <0x57>;
+ label = "tpm";
+ compatible = "nuvoton,npct650", "nuvoton,npct601";
+ linux,sml-base = <0x7f 0xfd450000>;
+ linux,sml-size = <0x10000>;
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt b/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
new file mode 100644
index 000000000000..41d740545189
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
@@ -0,0 +1,25 @@
+Trusted Computing Group MMIO Trusted Platform Module
+
+The TCG defines multi vendor standard for accessing a TPM chip, this
+is the standard protocol defined to access the TPM via MMIO. Typically
+this interface will be implemented over Intel's LPC bus.
+
+Refer to the 'TCG PC Client Specific TPM Interface Specification (TIS)' TCG
+publication for the specification.
+
+Required properties:
+
+- compatible: should contain a string below for the chip, followed by
+ "tcg,tpm-tis-mmio". Valid chip strings are:
+ * "atmel,at97sc3204"
+- reg: The location of the MMIO registers, should be at least 0x5000 bytes
+- interrupt-parent/interrupts: An optional interrupt indicating command completion.
+
+Example:
+
+ tpm_tis@90000 {
+ compatible = "atmel,at97sc3204", "tcg,tpm-tis-mmio";
+ reg = <0x90000 0x5000>;
+ interrupt-parent = <&EIC0>;
+ interrupts = <1 2>;
+ };
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/guts.txt b/Documentation/devicetree/bindings/soc/fsl/guts.txt
index b71b2039e112..07adca914d3d 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/guts.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/guts.txt
@@ -25,6 +25,9 @@ Recommended properties:
- fsl,liodn-bits : Indicates the number of defined bits in the LIODN
registers, for those SOCs that have a PAMU device.
+ - little-endian : Indicates that the global utilities block is little
+ endian. The default is big endian.
+
Examples:
global-utilities@e0000 { /* global utilities block */
compatible = "fsl,mpc8548-guts";
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt
index 97d9b3e1bf39..ea1dc75ec9ea 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt
@@ -43,6 +43,13 @@ The edge is described by the following properties:
Definition: the identifier for the remote processor as known by the rest
of the system.
+- label:
+ Usage: optional
+ Value type: <string>
+ Definition: name of the edge, used for debugging and identification
+ purposes. The node name will be used if this is not
+ present.
+
= SMD DEVICES
In turn, subnodes of the "edges" represent devices tied to SMD channels on that
diff --git a/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt b/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt
new file mode 100644
index 000000000000..5b9b38f578bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt
@@ -0,0 +1,88 @@
+Devicetree bindings for the Axentia TSE-850 audio complex
+
+Required properties:
+ - compatible: "axentia,tse850-pcm5142"
+ - axentia,ssc-controller: The phandle of the atmel SSC controller used as
+ cpu dai.
+ - axentia,audio-codec: The phandle of the PCM5142 codec.
+ - axentia,add-gpios: gpio specifier that controls the mixer.
+ - axentia,loop1-gpios: gpio specifier that controls loop relays on channel 1.
+ - axentia,loop2-gpios: gpio specifier that controls loop relays on channel 2.
+ - axentia,ana-supply: Regulator that supplies the output amplifier. Must
+ support voltages in the 2V - 20V range, in 1V steps.
+
+The schematics explaining the gpios are as follows:
+
+ loop1 relays
+ IN1 +---o +------------+ o---+ OUT1
+ \ /
+ + +
+ | / |
+ +--o +--. |
+ | add | |
+ | V |
+ | .---. |
+ DAC +----------->|Sum|---+
+ | '---' |
+ | |
+ + +
+
+ IN2 +---o--+------------+--o---+ OUT2
+ loop2 relays
+
+The 'loop1' gpio pin controlls two relays, which are either in loop position,
+meaning that input and output are directly connected, or they are in mixer
+position, meaning that the signal is passed through the 'Sum' mixer. Similarly
+for 'loop2'.
+
+In the above, the 'loop1' relays are inactive, thus feeding IN1 to the mixer
+(if 'add' is active) and feeding the mixer output to OUT1. The 'loop2' relays
+are active, short-cutting the TSE-850 from channel 2. IN1, IN2, OUT1 and OUT2
+are TSE-850 connectors and DAC is the PCB name of the (filtered) output from
+the PCM5142 codec.
+
+Example:
+
+ &i2c {
+ codec: pcm5142@4c {
+ compatible = "ti,pcm5142";
+
+ reg = <0x4c>;
+
+ AVDD-supply = <&reg_3v3>;
+ DVDD-supply = <&reg_3v3>;
+ CPVDD-supply = <&reg_3v3>;
+
+ clocks = <&sck>;
+
+ pll-in = <3>;
+ pll-out = <6>;
+ };
+ };
+
+ ana: ana-reg {
+ compatible = "pwm-regulator";
+
+ regulator-name = "ANA";
+
+ pwms = <&pwm0 2 1000 PWM_POLARITY_INVERTED>;
+ pwm-dutycycle-unit = <1000>;
+ pwm-dutycycle-range = <100 1000>;
+
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <20000000>;
+ regulator-ramp-delay = <1000>;
+ };
+
+ sound {
+ compatible = "axentia,tse850-pcm5142";
+
+ axentia,ssc-controller = <&ssc0>;
+ axentia,audio-codec = <&codec>;
+
+ axentia,add-gpios = <&pioA 8 GPIO_ACTIVE_LOW>;
+ axentia,loop1-gpios = <&pioA 10 GPIO_ACTIVE_LOW>;
+ axentia,loop2-gpios = <&pioA 11 GPIO_ACTIVE_LOW>;
+
+ axentia,ana-supply = <&ana>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/cs35l34.txt b/Documentation/devicetree/bindings/sound/cs35l34.txt
new file mode 100644
index 000000000000..b218ead2e68e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs35l34.txt
@@ -0,0 +1,64 @@
+CS35L34 Speaker Amplifier
+
+Required properties:
+
+ - compatible : "cirrus,cs35l34"
+
+ - reg : the I2C address of the device for I2C.
+
+ - VA-supply, VP-supply : power supplies for the device,
+ as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+ - cirrus,boost-vtge-millivolt : Boost Voltage Value. Configures the boost
+ converter's output voltage in mV. The range is from VP to 8V with
+ increments of 100mV.
+
+ - cirrus,boost-nanohenry: Inductor value for boost converter. The value is
+ in nH and they can be values of 1000nH, 1100nH, 1200nH, 1500nH, and 2200nH.
+
+Optional properties:
+
+ - reset-gpios: GPIO used to reset the amplifier.
+
+ - interrupt-parent : Specifies the phandle of the interrupt controller to
+ which the IRQs from CS35L34 are delivered to.
+ - interrupts : IRQ line info CS35L34.
+ (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+ for further information relating to interrupt properties)
+
+ - cirrus,boost-peak-milliamp : Boost converter peak current limit in mA. The
+ range starts at 1200mA and goes to a maximum of 3840mA with increments of
+ 80mA. The default value is 2480mA.
+
+ - cirrus,i2s-sdinloc : ADSP SDIN I2S channel location. Indicates whether the
+ received mono data is in the left or right portion of the I2S frame
+ according to the AD0 pin or directly via this configuration.
+ 0x0 (Default) = Selected by AD0 input (if AD0 = LOW, use left channel),
+ 0x2 = Left,
+ 0x1 = Selected by the inversion of the AD0 input (if AD0 = LOW, use right
+ channel),
+ 0x3 = Right.
+
+ - cirrus,gain-zc-disable: Boolean property. If set, the gain change will take
+ effect without waiting for a zero cross.
+
+ - cirrus,tdm-rising-edge: Boolean property. If set, data is on the rising edge of
+ SCLK. Otherwise, data is on the falling edge of SCLK.
+
+
+Example:
+
+cs35l34: cs35l34@40 {
+ compatible = "cirrus,cs35l34";
+ reg = <0x40>;
+
+ interrupt-parent = <&gpio8>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&gpio 10 0>;
+
+ cirrus,boost-vtge-milltvolt = <8000>; /* 8V */
+ cirrus,boost-ind-nanohenry = <1000>; /* 1uH */
+ cirrus,boost-peak-milliamp = <3000>; /* 3A */
+};
diff --git a/Documentation/devicetree/bindings/sound/cs42l42.txt b/Documentation/devicetree/bindings/sound/cs42l42.txt
new file mode 100644
index 000000000000..9a2c5e2423d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs42l42.txt
@@ -0,0 +1,110 @@
+CS42L42 audio CODEC
+
+Required properties:
+
+ - compatible : "cirrus,cs42l42"
+
+ - reg : the I2C address of the device for I2C.
+
+ - VP-supply, VCP-supply, VD_FILT-supply, VL-supply, VA-supply :
+ power supplies for the device, as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Optional properties:
+
+ - reset-gpios : a GPIO spec for the reset pin. If specified, it will be
+ deasserted before communication to the codec starts.
+
+ - interrupt-parent : Specifies the phandle of the interrupt controller to
+ which the IRQs from CS42L42 are delivered to.
+
+ - interrupts : IRQ line info CS42L42.
+ (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+ for further information relating to interrupt properties)
+
+ - cirrus,ts-inv : Boolean property. For jacks that invert the tip sense
+ polarity. Normal jacks will short tip sense pin to HS1 when headphones are
+ plugged in and leave tip sense floating when not plugged in. Inverting jacks
+ short tip sense when unplugged and float when plugged in.
+
+ 0 = (Default) Non-inverted
+ 1 = Inverted
+
+ - cirrus,ts-dbnc-rise : Debounce the rising edge of TIP_SENSE_PLUG. With no
+ debounce, the tip sense pin might be noisy on a plug event.
+
+ 0 - 0ms,
+ 1 - 125ms,
+ 2 - 250ms,
+ 3 - 500ms,
+ 4 - 750ms,
+ 5 - (Default) 1s,
+ 6 - 1.25s,
+ 7 - 1.5s,
+
+ - cirrus,ts-dbnc-fall : Debounce the falling edge of TIP_SENSE_UNPLUG.
+ With no debounce, the tip sense pin might be noisy on an unplug event.
+
+ 0 - 0ms,
+ 1 - 125ms,
+ 2 - 250ms,
+ 3 - 500ms,
+ 4 - 750ms,
+ 5 - (Default) 1s,
+ 6 - 1.25s,
+ 7 - 1.5s,
+
+ - cirrus,btn-det-init-dbnce : This sets how long the driver sleeps after
+ enabling button detection interrupts. After auto-detection and before
+ servicing button interrupts, the HS bias needs time to settle. If you
+ don't wait, there is possibility for erroneous button interrupt.
+
+ 0ms - 200ms,
+ Default = 100ms
+
+ - cirrus,btn-det-event-dbnce : This sets how long the driver delays after
+ receiving a button press interrupt. With level detect interrupts, you want
+ to wait a small amount of time to make sure the button press is making a
+ clean connection with the bias resistors.
+
+ 0ms - 20ms,
+ Default = 10ms
+
+ - cirrus,bias-lvls : For a level-detect headset button scheme, each button
+ will bias the mic pin to a certain voltage. To determine which button was
+ pressed, the driver will compare this biased voltage to sequential,
+ decreasing voltages and will stop when a comparator is tripped,
+ indicating a comparator voltage < bias voltage. This value represents a
+ percentage of the internally generated HS bias voltage. For different
+ hardware setups, a designer might want to tweak this. This is an array of
+ descending values for the comparator voltage.
+
+ Array of 4 values
+ Each 0-63
+ < x1 x2 x3 x4 >
+ Default = < 15 8 4 1>
+
+
+Example:
+
+cs42l42: cs42l42@48 {
+ compatible = "cirrus,cs42l42";
+ reg = <0x48>;
+ VA-supply = <&dummy_vreg>;
+ VP-supply = <&dummy_vreg>;
+ VCP-supply = <&dummy_vreg>;
+ VD_FILT-supply = <&dummy_vreg>;
+ VL-supply = <&dummy_vreg>;
+
+ reset-gpios = <&axi_gpio_0 1 0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <55 8>
+
+ cirrus,ts-inv = <0x00>;
+ cirrus,ts-dbnc-rise = <0x05>;
+ cirrus,ts-dbnc-fall = <0x00>;
+ cirrus,btn-det-init-dbnce = <100>;
+ cirrus,btn-det-event-dbnce = <10>;
+ cirrus,bias-lvls = <0x0F 0x08 0x04 0x01>;
+ cirrus,hs-bias-ramp-rate = <0x02>;
+}; \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
index 55b53e1fd72c..e0b6165c9cfc 100644
--- a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
+++ b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
@@ -43,7 +43,7 @@ mcbsp0: mcbsp@1d10000 {
<0x00310000 0x1000>;
reg-names = "mpu", "dat";
interrupts = <97 98>;
- interrupts-names = "rx", "tx";
+ interrupt-names = "rx", "tx";
dmas = <&edma0 3 1
&edma0 2 1>;
dma-names = "tx", "rx";
diff --git a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
index fd40c852d7c7..462b04e8209f 100644
--- a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
+++ b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
@@ -12,7 +12,7 @@ Required properties:
Optional properties:
- ti,dmic: phandle for the OMAP dmic node if the machine have it connected
-- ti,jack_detection: Need to be present if the board capable to detect jack
+- ti,jack-detection: Need to be present if the board capable to detect jack
insertion, removal.
Available audio endpoints for the audio-routing table:
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
index d9d8635ff94c..6a4aadc4ce06 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
@@ -44,8 +44,7 @@ Required dai-link subnodes:
Required CPU/CODEC subnodes properties:
-link-name : Name of the dai link.
--sound-dai : phandle and port of CPU/CODEC
--capture-dai : phandle and port of CPU/CODEC
+-sound-dai : phandle/s and port of CPU/CODEC
Example:
@@ -73,7 +72,7 @@ sound: sound {
sound-dai = <&lpass MI2S_PRIMARY>;
};
codec {
- sound-dai = <&wcd_codec 0>;
+ sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
new file mode 100644
index 000000000000..ccb401cfef9d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
@@ -0,0 +1,85 @@
+msm8916 analog audio CODEC
+
+Bindings for codec Analog IP which is integrated in pmic pm8916,
+
+## Bindings for codec core on pmic:
+
+Required properties
+ - compatible = "qcom,pm8916-wcd-analog-codec";
+ - reg: represents the slave base address provided to the peripheral.
+ - interrupt-parent : The parent interrupt controller.
+ - interrupts: List of interrupts in given SPMI peripheral.
+ - interrupt-names: Names specified to above list of interrupts in same
+ order. List of supported interrupt names are:
+ "cdc_spk_cnp_int" - Speaker click and pop interrupt.
+ "cdc_spk_clip_int" - Speaker clip interrupt.
+ "cdc_spk_ocp_int" - Speaker over current protect interrupt.
+ "mbhc_ins_rem_det1" - jack insert removal detect interrupt 1.
+ "mbhc_but_rel_det" - button release interrupt.
+ "mbhc_but_press_det" - button press event
+ "mbhc_ins_rem_det" - jack insert removal detect interrupt.
+ "mbhc_switch_int" - multi button headset interrupt.
+ "cdc_ear_ocp_int" - Earphone over current protect interrupt.
+ "cdc_hphr_ocp_int" - Headphone R over current protect interrupt.
+ "cdc_hphl_ocp_det" - Headphone L over current protect interrupt.
+ "cdc_ear_cnp_int" - earphone cnp interrupt.
+ "cdc_hphr_cnp_int" - hphr click and pop interrupt.
+ "cdc_hphl_cnp_int" - hphl click and pop interrupt.
+
+ - clocks: Handle to mclk.
+ - clock-names: should be "mclk"
+ - vdd-cdc-io-supply: phandle to VDD_CDC_IO regulator DT node.
+ - vdd-cdc-tx-rx-cx-supply: phandle to VDD_CDC_TX/RX/CX regulator DT node.
+ - vdd-micbias-supply: phandle of VDD_MICBIAS supply's regulator DT node.
+
+Optional Properties:
+- qcom,micbias1-ext-cap: boolean, present if micbias1 has external capacitor
+ connected.
+- qcom,micbias2-ext-cap: boolean, present if micbias2 has external capacitor
+ connected.
+
+Example:
+
+spmi_bus {
+ ...
+ audio-codec@f000{
+ compatible = "qcom,pm8916-wcd-analog-codec";
+ reg = <0xf000 0x200>;
+ reg-names = "pmic-codec-core";
+ clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
+ clock-names = "mclk";
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x1 0xf0 0x0 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x1 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x2 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x3 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x4 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x5 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x6 IRQ_TYPE_NONE>,
+ <0x1 0xf0 0x7 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x0 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x1 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x2 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x3 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x4 IRQ_TYPE_NONE>,
+ <0x1 0xf1 0x5 IRQ_TYPE_NONE>;
+ interrupt-names = "cdc_spk_cnp_int",
+ "cdc_spk_clip_int",
+ "cdc_spk_ocp_int",
+ "mbhc_ins_rem_det1",
+ "mbhc_but_rel_det",
+ "mbhc_but_press_det",
+ "mbhc_ins_rem_det",
+ "mbhc_switch_int",
+ "cdc_ear_ocp_int",
+ "cdc_hphr_ocp_int",
+ "cdc_hphl_ocp_det",
+ "cdc_ear_cnp_int",
+ "cdc_hphr_cnp_int",
+ "cdc_hphl_cnp_int";
+ VDD-CDC-IO-supply = <&pm8916_l5>;
+ VDD-CDC-TX-RX-CX-supply = <&pm8916_l5>;
+ VDD-MICBIAS-supply = <&pm8916_l13>;
+ #sound-dai-cells = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt
new file mode 100644
index 000000000000..1c8e4cb25176
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt
@@ -0,0 +1,20 @@
+msm8916 digital audio CODEC
+
+## Bindings for codec core in lpass:
+
+Required properties
+ - compatible = "qcom,msm8916-wcd-digital-codec";
+ - reg: address space for lpass codec.
+ - clocks: Handle to mclk and ahbclk
+ - clock-names: should be "mclk", "ahbix-clk".
+
+Example:
+
+audio-codec@771c000{
+ compatible = "qcom,msm8916-wcd-digital-codec";
+ reg = <0x0771c000 0x400>;
+ clocks = <&gcc GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK>,
+ <&gcc GCC_CODEC_DIGCODEC_CLK>;
+ clock-names = "ahbix-clk", "mclk";
+ #sound-dai-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rt5514.txt b/Documentation/devicetree/bindings/sound/rt5514.txt
index 9cabfc18cb57..929ca6756b02 100644
--- a/Documentation/devicetree/bindings/sound/rt5514.txt
+++ b/Documentation/devicetree/bindings/sound/rt5514.txt
@@ -13,6 +13,9 @@ Optional properties:
- clocks: The phandle of the master clock to the CODEC
- clock-names: Should be "mclk"
+- realtek,dmic-init-delay-ms
+ Set the DMIC initial delay (ms) to wait it ready.
+
Pins on the device (for linking into audio routes) for RT5514:
* DMIC1L
diff --git a/Documentation/devicetree/bindings/sound/rt5663.txt b/Documentation/devicetree/bindings/sound/rt5663.txt
index 7d3c974c6e2e..70eaeaed2b18 100644
--- a/Documentation/devicetree/bindings/sound/rt5663.txt
+++ b/Documentation/devicetree/bindings/sound/rt5663.txt
@@ -1,10 +1,10 @@
-RT5663/RT5668 audio CODEC
+RT5663 audio CODEC
This device supports I2C only.
Required properties:
-- compatible : One of "realtek,rt5663" or "realtek,rt5668".
+- compatible : "realtek,rt5663".
- reg : The I2C address of the device.
@@ -12,7 +12,7 @@ Required properties:
Optional properties:
-Pins on the device (for linking into audio routes) for RT5663/RT5668:
+Pins on the device (for linking into audio routes) for RT5663:
* IN1P
* IN1N
diff --git a/Documentation/devicetree/bindings/sound/rt5665.txt b/Documentation/devicetree/bindings/sound/rt5665.txt
new file mode 100755
index 000000000000..419c89219681
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt5665.txt
@@ -0,0 +1,68 @@
+RT5665/RT5666/RT5668 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : One of "realtek,rt5665", "realtek,rt5666" or "realtek,rt5668".
+
+- reg : The I2C address of the device.
+
+- interrupts : The CODEC's interrupt output.
+
+Optional properties:
+
+- realtek,in1-differential
+- realtek,in2-differential
+- realtek,in3-differential
+- realtek,in4-differential
+ Boolean. Indicate MIC1/2/3/4 input are differential, rather than single-ended.
+
+- realtek,dmic1-data-pin
+ 0: dmic1 is not used
+ 1: using GPIO4 pin as dmic1 data pin
+ 2: using IN2N pin as dmic2 data pin
+
+- realtek,dmic2-data-pin
+ 0: dmic2 is not used
+ 1: using GPIO5 pin as dmic2 data pin
+ 2: using IN2P pin as dmic2 data pin
+
+- realtek,jd-src
+ 0: No JD is used
+ 1: using JD1 as JD source
+
+- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
+
+Pins on the device (for linking into audio routes) for RT5659/RT5658:
+
+ * DMIC L1
+ * DMIC R1
+ * DMIC L2
+ * DMIC R2
+ * IN1P
+ * IN1N
+ * IN2P
+ * IN2N
+ * IN3P
+ * IN3N
+ * IN4P
+ * IN4N
+ * HPOL
+ * HPOR
+ * LOUTL
+ * LOUTR
+ * MONOOUT
+ * PDML
+ * PDMR
+
+Example:
+
+rt5659 {
+ compatible = "realtek,rt5665";
+ reg = <0x1b>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(W, 3) GPIO_ACTIVE_HIGH>;
+ realtek,ldo1-en-gpios =
+ <&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt b/Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt
new file mode 100644
index 000000000000..94442e5673b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt
@@ -0,0 +1,38 @@
+Samsung Exynos5433 TM2(E) audio complex with WM5110 codec
+
+Required properties:
+
+ - compatible : "samsung,tm2-audio"
+ - model : the user-visible name of this sound complex
+ - audio-codec : the phandle of the wm5110 audio codec node,
+ as described in ../mfd/arizona.txt
+ - i2s-controller : the phandle of the I2S controller
+ - audio-amplifier : the phandle of the MAX98504 amplifier
+ - samsung,audio-routing : a list of the connections between audio components;
+ each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source; valid names for sources and sinks are the
+ WM5110's and MAX98504's pins and the jacks on the
+ board: HP, SPK, Main Mic, Sub Mic, Third Mic,
+ Headset Mic
+ - mic-bias-gpios : GPIO pin that enables the Main Mic bias regulator
+
+
+Example:
+
+sound {
+ compatible = "samsung,tm2-audio";
+ audio-codec = <&wm5110>;
+ i2s-controller = <&i2s0>;
+ audio-amplifier = <&max98504>;
+ mic-bias-gpios = <&gpr3 2 0>;
+ model = "wm5110";
+ samsung,audio-routing =
+ "HP", "HPOUT1L",
+ "HP", "HPOUT1R",
+ "SPK", "SPKOUT",
+ "SPKOUT", "HPOUT2L",
+ "SPKOUT", "HPOUT2R",
+ "Main Mic", "MICBIAS2",
+ "IN1R", "Main Mic";
+};
diff --git a/Documentation/devicetree/bindings/sound/sun4i-codec.txt b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
index 0dce690f78f5..3033bd8aab0f 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-codec.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
@@ -1,8 +1,12 @@
* Allwinner A10 Codec
Required properties:
-- compatible: must be either "allwinner,sun4i-a10-codec" or
- "allwinner,sun7i-a20-codec"
+- compatible: must be one of the following compatibles:
+ - "allwinner,sun4i-a10-codec"
+ - "allwinner,sun6i-a31-codec"
+ - "allwinner,sun7i-a20-codec"
+ - "allwinner,sun8i-a23-codec"
+ - "allwinner,sun8i-h3-codec"
- reg: must contain the registers location and length
- interrupts: must contain the codec interrupt
- dmas: DMA channels for tx and rx dma. See the DMA client binding,
@@ -17,6 +21,43 @@ Required properties:
Optional properties:
- allwinner,pa-gpios: gpio to enable external amplifier
+Required properties for the following compatibles:
+ - "allwinner,sun6i-a31-codec"
+ - "allwinner,sun8i-a23-codec"
+ - "allwinner,sun8i-h3-codec"
+- resets: phandle to the reset control for this device
+- allwinner,audio-routing: A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names include:
+
+ Audio pins on the SoC:
+ "HP"
+ "HPCOM"
+ "LINEIN"
+ "LINEOUT" (not on sun8i-a23)
+ "MIC1"
+ "MIC2"
+ "MIC3" (sun6i-a31 only)
+
+ Microphone biases from the SoC:
+ "HBIAS"
+ "MBIAS"
+
+ Board connectors:
+ "Headphone"
+ "Headset Mic"
+ "Line In"
+ "Line Out"
+ "Mic"
+ "Speaker"
+
+Required properties for the following compatibles:
+ - "allwinner,sun8i-a23-codec"
+ - "allwinner,sun8i-h3-codec"
+- allwinner,codec-analog-controls: A phandle to the codec analog controls
+ block in the PRCM.
+
Example:
codec: codec@01c22c00 {
#sound-dai-cells = <0>;
@@ -28,3 +69,23 @@ codec: codec@01c22c00 {
dmas = <&dma 0 19>, <&dma 0 19>;
dma-names = "rx", "tx";
};
+
+codec: codec@01c22c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun6i-a31-codec";
+ reg = <0x01c22c00 0x98>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_APB1_CODEC>, <&ccu CLK_CODEC>;
+ clock-names = "apb", "codec";
+ resets = <&ccu RST_APB1_CODEC>;
+ dmas = <&dma 15>, <&dma 15>;
+ dma-names = "rx", "tx";
+ allwinner,audio-routing =
+ "Headphone", "HP",
+ "Speaker", "LINEOUT",
+ "LINEIN", "Line In",
+ "MIC1", "MBIAS",
+ "MIC1", "Mic",
+ "MIC2", "HBIAS",
+ "MIC2", "Headset Mic";
+};
diff --git a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
new file mode 100644
index 000000000000..779b735781ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
@@ -0,0 +1,16 @@
+* Allwinner Codec Analog Controls
+
+Required properties:
+- compatible: must be one of the following compatibles:
+ - "allwinner,sun8i-a23-codec-analog"
+ - "allwinner,sun8i-h3-codec-analog"
+
+Required properties if not a sub-node of the PRCM node:
+- reg: must contain the registers location and length
+
+Example:
+prcm: prcm@01f01400 {
+ codec_analog: codec-analog {
+ compatible = "allwinner,sun8i-a23-codec-analog";
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index 9340d2ddcc54..6fbba562eaa7 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -12,6 +12,7 @@ Required properties:
"ti,tlv320aic3120" - TLV320AIC3120 (mono speaker amp, MiniDSP)
"ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
"ti,tlv320dac3100" - TLV320DAC3100 (no ADC, mono speaker amp, no MiniDSP)
+ "ti,tlv320dac3101" - TLV320DAC3101 (no ADC, stereo speaker amp, no MiniDSP)
- reg - <int> - I2C slave address
- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
diff --git a/Documentation/devicetree/bindings/sound/wm8580.txt b/Documentation/devicetree/bindings/sound/wm8580.txt
index 7d9821f348da..78fce9b14954 100644
--- a/Documentation/devicetree/bindings/sound/wm8580.txt
+++ b/Documentation/devicetree/bindings/sound/wm8580.txt
@@ -1,10 +1,10 @@
-WM8580 audio CODEC
+WM8580 and WM8581 audio CODEC
This device supports I2C only.
Required properties:
- - compatible : "wlf,wm8580"
+ - compatible : "wlf,wm8580", "wlf,wm8581"
- reg : the I2C address of the device.
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index aa005c1d10d9..da6614c63796 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -10,6 +10,7 @@ Required properties:
"renesas,msiof-r8a7792" (R-Car V2H)
"renesas,msiof-r8a7793" (R-Car M2-N)
"renesas,msiof-r8a7794" (R-Car E2)
+ "renesas,msiof-r8a7796" (R-Car M3-W)
"renesas,msiof-sh73a0" (SH-Mobile AG5)
- reg : A list of offsets and lengths of the register sets for
the device.
diff --git a/Documentation/devicetree/bindings/spi/spi-armada-3700.txt b/Documentation/devicetree/bindings/spi/spi-armada-3700.txt
new file mode 100644
index 000000000000..1564aa8c02cd
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-armada-3700.txt
@@ -0,0 +1,25 @@
+* Marvell Armada 3700 SPI Controller
+
+Required Properties:
+
+- compatible: should be "marvell,armada-3700-spi"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: The interrupt number. The interrupt specifier format depends on
+ the interrupt controller and of its driver.
+- clocks: Must contain the clock source, usually from the North Bridge clocks.
+- num-cs: The number of chip selects that is supported by this SPI Controller
+- #address-cells: should be 1.
+- #size-cells: should be 0.
+
+Example:
+
+ spi0: spi@10600 {
+ compatible = "marvell,armada-3700-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10600 0x5d>;
+ clocks = <&nb_perih_clk 7>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ num-cs = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
new file mode 100644
index 000000000000..225ace1d0c65
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
@@ -0,0 +1,19 @@
+* Freescale Low Power SPI (LPSPI) for i.MX
+
+Required properties:
+- compatible :
+ - "fsl,imx7ulp-spi" for LPSPI compatible with the one integrated on i.MX7ULP soc
+- reg : address and length of the lpspi master registers
+- interrupt-parent : core interrupt controller
+- interrupts : lpspi interrupt
+- clocks : lpspi clock specifier
+
+Examples:
+
+lpspi2: lpspi@40290000 {
+ compatible = "fsl,imx7ulp-spi";
+ reg = <0x40290000 0x10000>;
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7ULP_CLK_LPSPI2>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-sun6i.txt b/Documentation/devicetree/bindings/spi/spi-sun6i.txt
index 21de73db6a05..2ec99b86b622 100644
--- a/Documentation/devicetree/bindings/spi/spi-sun6i.txt
+++ b/Documentation/devicetree/bindings/spi/spi-sun6i.txt
@@ -1,7 +1,7 @@
-Allwinner A31 SPI controller
+Allwinner A31/H3 SPI controller
Required properties:
-- compatible: Should be "allwinner,sun6i-a31-spi".
+- compatible: Should be "allwinner,sun6i-a31-spi" or "allwinner,sun8i-h3-spi".
- reg: Should contain register location and length.
- interrupts: Should contain interrupt.
- clocks: phandle to the clocks feeding the SPI controller. Two are
@@ -12,6 +12,11 @@ Required properties:
- resets: phandle to the reset controller asserting this device in
reset
+Optional properties:
+- dmas: DMA specifiers for rx and tx dma. See the DMA client binding,
+ Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: DMA request names should include "rx" and "tx" if present.
+
Example:
spi1: spi@01c69000 {
@@ -22,3 +27,19 @@ spi1: spi@01c69000 {
clock-names = "ahb", "mod";
resets = <&ahb1_rst 21>;
};
+
+spi0: spi@01c68000 {
+ compatible = "allwinner,sun8i-h3-spi";
+ reg = <0x01c68000 0x1000>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma 23>, <&dma 23>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pins>;
+ resets = <&ccu RST_BUS_SPI0>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
index 7d44eae7ab0b..274058c583dd 100644
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ b/Documentation/devicetree/bindings/submitting-patches.txt
@@ -3,7 +3,7 @@
I. For patch submitters
- 0) Normal patch submission rules from Documentation/SubmittingPatches
+ 0) Normal patch submission rules from Documentation/process/submitting-patches.rst
applies.
1) The Documentation/ portion of the patch should be a separate patch.
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
index 08efe6bc2193..43003aec94bd 100644
--- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
@@ -22,10 +22,13 @@ Required properties:
TSADC controller.
- pinctrl-2 : The "sleep" pinctrl state, it will be in for suspend.
- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description.
+
+Optional properties:
- rockchip,hw-tshut-temp : The hardware-controlled shutdown temperature value.
- rockchip,hw-tshut-mode : The hardware-controlled shutdown mode 0:CRU 1:GPIO.
- rockchip,hw-tshut-polarity : The hardware-controlled active polarity 0:LOW
1:HIGH.
+- rockchip,grf : The phandle of the syscon node for the general register file.
Exiample:
tsadc: tsadc@ff280000 {
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 070baf4d7d97..b6b5130e5f65 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -7,8 +7,11 @@ To bind UFS PHY with UFS host controller, the controller node should
contain a phandle reference to UFS PHY node.
Required properties:
-- compatible : compatible list, contains "qcom,ufs-phy-qmp-20nm"
- or "qcom,ufs-phy-qmp-14nm" according to the relevant phy in use.
+- compatible : compatible list, contains one of the following -
+ "qcom,ufs-phy-qmp-20nm" for 20nm ufs phy,
+ "qcom,ufs-phy-qmp-14nm" for legacy 14nm ufs phy,
+ "qcom,msm8996-ufs-phy-qmp-14nm" for 14nm ufs phy
+ present on MSM8996 chipset.
- reg : should contain PHY register address space (mandatory),
- reg-names : indicates various resources passed to driver (via reg proptery) by name.
Required "reg-names" is "phy_mem".
diff --git a/Documentation/devicetree/bindings/usb/da8xx-usb.txt b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
new file mode 100644
index 000000000000..ccb844aba7d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
@@ -0,0 +1,43 @@
+TI DA8xx MUSB
+~~~~~~~~~~~~~
+For DA8xx/OMAP-L1x/AM17xx/AM18xx platforms.
+
+Required properties:
+~~~~~~~~~~~~~~~~~~~~
+ - compatible : Should be set to "ti,da830-musb".
+
+ - reg: Offset and length of the USB controller register set.
+
+ - interrupts: The USB interrupt number.
+
+ - interrupt-names: Should be set to "mc".
+
+ - dr_mode: The USB operation mode. Should be one of "host", "peripheral" or "otg".
+
+ - phys: Phandle for the PHY device
+
+ - phy-names: Should be "usb-phy"
+
+Optional properties:
+~~~~~~~~~~~~~~~~~~~~
+ - vbus-supply: Phandle to a regulator providing the USB bus power.
+
+Example:
+ usb_phy: usb-phy {
+ compatible = "ti,da830-usb-phy";
+ #phy-cells = <0>;
+ status = "okay";
+ };
+ usb0: usb@200000 {
+ compatible = "ti,da830-musb";
+ reg = <0x00200000 0x10000>;
+ interrupts = <58>;
+ interrupt-names = "mc";
+
+ dr_mode = "host";
+ vbus-supply = <&usb_vbus>;
+ phys = <&usb_phy 0>;
+ phy-names = "usb-phy";
+
+ status = "okay";
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 2c30a5479069..6c7c2bce6d0c 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -12,6 +12,7 @@ Required properties:
- "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
- "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
- "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
+ - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
- snps,dwc2: A generic DWC2 USB controller with default parameters.
- reg : Should contain 1 register range (address and length)
- interrupts : Should contain 1 interrupt
@@ -25,11 +26,13 @@ Optional properties:
Refer to phy/phy-bindings.txt for generic phy consumer properties
- dr_mode: shall be one of "host", "peripheral" and "otg"
Refer to usb/generic.txt
-- g-use-dma: enable dma usage in gadget driver.
- g-rx-fifo-size: size of rx fifo size in gadget mode.
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
+Deprecated properties:
+- g-use-dma: gadget DMA mode is automatically detected
+
Example:
usb@101c0000 {
diff --git a/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt b/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt
new file mode 100644
index 000000000000..e049d199bf0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt
@@ -0,0 +1,87 @@
+The device node for Mediatek USB3.0 DRD controller
+
+Required properties:
+ - compatible : should be "mediatek,mt8173-mtu3"
+ - reg : specifies physical base address and size of the registers
+ - reg-names: should be "mac" for device IP and "ippc" for IP port control
+ - interrupts : interrupt used by the device IP
+ - power-domains : a phandle to USB power domain node to control USB's
+ mtcmos
+ - vusb33-supply : regulator of USB avdd3.3v
+ - clocks : a list of phandle + clock-specifier pairs, one for each
+ entry in clock-names
+ - clock-names : must contain "sys_ck" for clock of controller;
+ "wakeup_deb_p0" and "wakeup_deb_p1" are optional, they are
+ depends on "mediatek,enable-wakeup"
+ - phys : a list of phandle + phy specifier pairs
+ - dr_mode : should be one of "host", "peripheral" or "otg",
+ refer to usb/generic.txt
+
+Optional properties:
+ - #address-cells, #size-cells : should be '2' if the device has sub-nodes
+ with 'reg' property
+ - ranges : allows valid 1:1 translation between child's address space and
+ parent's address space
+ - extcon : external connector for vbus and idpin changes detection, needed
+ when supports dual-role mode.
+ - vbus-supply : reference to the VBUS regulator, needed when supports
+ dual-role mode.
+ - pinctl-names : a pinctrl state named "default" must be defined,
+ "id_float" and "id_ground" are optinal which depends on
+ "mediatek,enable-manual-drd"
+ - pinctrl-0 : pin control group
+ See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+
+ - maximum-speed : valid arguments are "super-speed", "high-speed" and
+ "full-speed"; refer to usb/generic.txt
+ - enable-manual-drd : supports manual dual-role switch via debugfs; usually
+ used when receptacle is TYPE-A and also wants to support dual-role
+ mode.
+ - mediatek,enable-wakeup : supports ip sleep wakeup used by host mode
+ - mediatek,syscon-wakeup : phandle to syscon used to access USB wakeup
+ control register, it depends on "mediatek,enable-wakeup".
+
+Sub-nodes:
+The xhci should be added as subnode to mtu3 as shown in the following example
+if host mode is enabled. The DT binding details of xhci can be found in:
+Documentation/devicetree/bindings/usb/mt8173-xhci.txt
+
+Example:
+ssusb: usb@11271000 {
+ compatible = "mediatek,mt8173-mtu3";
+ reg = <0 0x11271000 0 0x3000>,
+ <0 0x11280700 0 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&phy_port0 PHY_TYPE_USB3>,
+ <&phy_port1 PHY_TYPE_USB2>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>,
+ <&pericfg CLK_PERI_USB0>,
+ <&pericfg CLK_PERI_USB1>;
+ clock-names = "sys_ck",
+ "wakeup_deb_p0",
+ "wakeup_deb_p1";
+ vusb33-supply = <&mt6397_vusb_reg>;
+ vbus-supply = <&usb_p0_vbus>;
+ extcon = <&extcon_usb>;
+ dr_mode = "otg";
+ mediatek,enable-wakeup;
+ mediatek,syscon-wakeup = <&pericfg>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ usb_host: xhci@11270000 {
+ compatible = "mediatek,mt8173-xhci";
+ reg = <0 0x11270000 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ vusb33-supply = <&mt6397_vusb_reg>;
+ status = "disabled";
+ };
+};
diff --git a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt b/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
index b3a7ffa48852..2a930bd52b94 100644
--- a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
@@ -2,10 +2,18 @@ MT8173 xHCI
The device node for Mediatek SOC USB3.0 host controller
+There are two scenarios: the first one only supports xHCI driver;
+the second one supports dual-role mode, and the host is based on xHCI
+driver. Take account of backward compatibility, we divide bindings
+into two parts.
+
+1st: only supports xHCI driver
+------------------------------------------------------------------------
+
Required properties:
- compatible : should contain "mediatek,mt8173-xhci"
- - reg : specifies physical base address and size of the registers,
- the first one for MAC, the second for IPPC
+ - reg : specifies physical base address and size of the registers
+ - reg-names: should be "mac" for xHCI MAC and "ippc" for IP port control
- interrupts : interrupt used by the controller
- power-domains : a phandle to USB power domain node to control USB's
mtcmos
@@ -27,12 +35,16 @@ Optional properties:
control register, it depends on "mediatek,wakeup-src".
- vbus-supply : reference to the VBUS regulator;
- usb3-lpm-capable : supports USB3.0 LPM
+ - pinctrl-names : a pinctrl state named "default" must be defined
+ - pinctrl-0 : pin control group
+ See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
Example:
usb30: usb@11270000 {
compatible = "mediatek,mt8173-xhci";
reg = <0 0x11270000 0 0x1000>,
<0 0x11280700 0 0x0100>;
+ reg-names = "mac", "ippc";
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
clocks = <&topckgen CLK_TOP_USB30_SEL>,
@@ -49,3 +61,41 @@ usb30: usb@11270000 {
mediatek,syscon-wakeup = <&pericfg>;
mediatek,wakeup-src = <1>;
};
+
+2nd: dual-role mode with xHCI driver
+------------------------------------------------------------------------
+
+In the case, xhci is added as subnode to mtu3. An example and the DT binding
+details of mtu3 can be found in:
+Documentation/devicetree/bindings/usb/mtu3.txt
+
+Required properties:
+ - compatible : should contain "mediatek,mt8173-xhci"
+ - reg : specifies physical base address and size of the registers
+ - reg-names: should be "mac" for xHCI MAC
+ - interrupts : interrupt used by the host controller
+ - power-domains : a phandle to USB power domain node to control USB's
+ mtcmos
+ - vusb33-supply : regulator of USB avdd3.3v
+
+ - clocks : a list of phandle + clock-specifier pairs, one for each
+ entry in clock-names
+ - clock-names : must be
+ "sys_ck": for clock of xHCI MAC
+
+Optional properties:
+ - vbus-supply : reference to the VBUS regulator;
+ - usb3-lpm-capable : supports USB3.0 LPM
+
+Example:
+usb30: usb@11270000 {
+ compatible = "mediatek,mt8173-xhci";
+ reg = <0 0x11270000 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ vusb33-supply = <&mt6397_vusb_reg>;
+ usb3-lpm-capable;
+};
diff --git a/Documentation/devicetree/bindings/usb/ohci-da8xx.txt b/Documentation/devicetree/bindings/usb/ohci-da8xx.txt
new file mode 100644
index 000000000000..2dc8f67eda39
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ohci-da8xx.txt
@@ -0,0 +1,23 @@
+DA8XX USB OHCI controller
+
+Required properties:
+
+ - compatible: Should be "ti,da830-ohci"
+ - reg: Should contain one register range i.e. start and length
+ - interrupts: Description of the interrupt line
+ - phys: Phandle for the PHY device
+ - phy-names: Should be "usb-phy"
+
+Optional properties:
+ - vbus-supply: phandle of regulator that controls vbus power / over-current
+
+Example:
+
+ohci: usb@0225000 {
+ compatible = "ti,da830-ohci";
+ reg = <0x225000 0x1000>;
+ interrupts = <59>;
+ phys = <&usb_phy 1>;
+ phy-names = "usb-phy";
+ vbus-supply = <&reg_usb_ohci>;
+};
diff --git a/Documentation/devicetree/bindings/usb/s3c2410-usb.txt b/Documentation/devicetree/bindings/usb/s3c2410-usb.txt
new file mode 100644
index 000000000000..e45b38ce2986
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/s3c2410-usb.txt
@@ -0,0 +1,22 @@
+Samsung S3C2410 and compatible SoC USB controller
+
+OHCI
+
+Required properties:
+ - compatible: should be "samsung,s3c2410-ohci" for USB host controller
+ - reg: address and lenght of the controller memory mapped region
+ - interrupts: interrupt number for the USB OHCI controller
+ - clocks: Should reference the bus and host clocks
+ - clock-names: Should contain two strings
+ "usb-bus-host" for the USB bus clock
+ "usb-host" for the USB host clock
+
+Example:
+
+usb0: ohci@49000000 {
+ compatible = "samsung,s3c2410-ohci";
+ reg = <0x49000000 0x100>;
+ interrupts = <0 0 26 3>;
+ clocks = <&clocks UCLK>, <&clocks HCLK_USBH>;
+ clock-names = "usb-bus-host", "usb-host";
+};
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index 966885c636d0..0b7d8576001c 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -11,6 +11,7 @@ Required properties:
- "renesas,xhci-r8a7791" for r8a7791 SoC
- "renesas,xhci-r8a7793" for r8a7793 SoC
- "renesas,xhci-r8a7795" for r8a7795 SoC
+ - "renesas,xhci-r8a7796" for r8a7796 SoC
- "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 compatible device
- "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
- "xhci-platform" (deprecated)
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index f0a48ea78659..6e25c912a5c2 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -24,9 +24,11 @@ ampire Ampire Co., Ltd.
ams AMS AG
amstaos AMS-Taos Inc.
analogix Analogix Semiconductor, Inc.
+andestech Andes Technology Corporation
apm Applied Micro Circuits Corporation (APM)
aptina Aptina Imaging
arasan Arasan Chip Systems
+aries Aries Embedded GmbH
arm ARM Ltd.
armadeus ARMadeus Systems SARL
arrow Arrow Electronics
@@ -39,6 +41,7 @@ auo AU Optronics Corporation
auvidea Auvidea GmbH
avago Avago Technologies
avic Shanghai AVIC Optoelectronics Co., Ltd.
+axentia Axentia Technologies AB
axis Axis Communications AB
boe BOE Technology Group Co., Ltd.
bosch Bosch Sensortec GmbH
@@ -126,6 +129,7 @@ hitex Hitex Development Tools
holt Holt Integrated Circuits, Inc.
honeywell Honeywell
hp Hewlett Packard
+holtek Holtek Semiconductor, Inc.
i2se I2SE GmbH
ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
@@ -135,6 +139,7 @@ infineon Infineon Technologies
inforce Inforce Computing
ingenic Ingenic Semiconductor
innolux Innolux Corporation
+inside-secure INSIDE Secure
intel Intel Corporation
intercontrol Inter Control Group
invensense InvenSense Inc.
@@ -158,18 +163,22 @@ lg LG Corporation
linux Linux-specific binding
lltc Linear Technology Corporation
lsi LSI Corp. (LSI Logic)
+macnica Macnica Americas
marvell Marvell Technology Group Ltd.
maxim Maxim Integrated Products
+mcube mCube
meas Measurement Specialties
mediatek MediaTek Inc.
melexis Melexis N.V.
melfas MELFAS Inc.
+memsic MEMSIC Inc.
merrii Merrii Technology Co., Ltd.
micrel Micrel Inc.
microchip Microchip Technology Inc.
microcrystal Micro Crystal AG
micron Micron Technology Inc.
minix MINIX Technology Ltd.
+miramems MiraMEMS Sensing Technology Co., Ltd.
mitsubishi Mitsubishi Electric Corporation
mosaixtech Mosaix Technologies, Inc.
moxa Moxa
@@ -180,6 +189,7 @@ mti Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
mundoreader Mundo Reader S.L.
murata Murata Manufacturing Co., Ltd.
mxicy Macronix International Co., Ltd.
+myir MYIR Tech Limited
national National Semiconductor
nec NEC LCD Technologies, Ltd.
neonode Neonode Inc.
@@ -187,12 +197,15 @@ netgear NETGEAR
netlogic Broadcom Corporation (formerly NetLogic Microsystems)
netxeon Shenzhen Netxeon Technology CO., LTD
newhaven Newhaven Display International
+ni National Instruments
nintendo Nintendo
nokia Nokia
nuvoton Nuvoton Technology Corporation
+nvd New Vision Display
nvidia NVIDIA
nxp NXP Semiconductors
okaya Okaya Electric America, Inc.
+oki Oki Electric Industry Co., Ltd.
olimex OLIMEX Ltd.
onion Onion Corporation
onnn ON Semiconductor Corp.
@@ -231,6 +244,7 @@ richtek Richtek Technology Corporation
ricoh Ricoh Co. Ltd.
rockchip Fuzhou Rockchip Electronics Co., Ltd
samsung Samsung Semiconductor
+samtec Samtec/Softing company
sandisk Sandisk Corporation
sbs Smart Battery System
schindler Schindler
@@ -275,6 +289,7 @@ tcg Trusted Computing Group
tcl Toby Churchill Ltd.
technexion TechNexion
technologic Technologic Systems
+terasic Terasic Inc.
thine THine Electronics, Inc.
ti Texas Instruments
tlm Trusted Logic Mobility
@@ -289,6 +304,7 @@ tronfy Tronfy
tronsmart Tronsmart
truly Truly Semiconductors Limited
tyan Tyan Computer Corporation
+udoo Udoo
uniwest United Western Technologies Corp (UniWest)
upisemi uPI Semiconductor Corp.
urt United Radiant Technology Corporation
diff --git a/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt
new file mode 100644
index 000000000000..9409d9c6a260
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt
@@ -0,0 +1,33 @@
+Silicon Image SiI8620 HDMI/MHL bridge bindings
+
+Required properties:
+ - compatible: "sil,sii8620"
+ - reg: i2c address of the bridge
+ - cvcc10-supply: Digital Core Supply Voltage (1.0V)
+ - iovcc18-supply: I/O Supply Voltage (1.8V)
+ - interrupts, interrupt-parent: interrupt specifier of INT pin
+ - reset-gpios: gpio specifier of RESET pin
+ - clocks, clock-names: specification and name of "xtal" clock
+ - video interfaces: Device node can contain video interface port
+ node for HDMI encoder according to [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+ sii8620@39 {
+ reg = <0x39>;
+ compatible = "sil,sii8620";
+ cvcc10-supply = <&ldo36_reg>;
+ iovcc18-supply = <&ldo34_reg>;
+ interrupt-parent = <&gpf0>;
+ interrupts = <2 0>;
+ reset-gpio = <&gpv7 0 0>;
+ clocks = <&pmu_system_controller 0>;
+ clock-names = "xtal";
+
+ port {
+ mhl_to_hdmi: endpoint {
+ remote-endpoint = <&hdmi_to_mhl>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index 3f1437fbca6b..280d283304bb 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -974,6 +974,13 @@ compatibility.
4Gb. Some vendors prefer splitting those ranges into smaller
segments, but the kernel doesn't care.
+ Additional properties:
+
+ - hotpluggable : The presence of this property provides an explicit
+ hint to the operating system that this memory may potentially be
+ removed later. The kernel can take this into consideration when
+ doing nonmovable allocations and when laying out memory zones.
+
e) The /chosen node
This node is a bit "special". Normally, that's where Open Firmware
diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt
index 9e33189745f0..c72b4563de10 100644
--- a/Documentation/dmaengine/client.txt
+++ b/Documentation/dmaengine/client.txt
@@ -37,8 +37,8 @@ The slave DMA usage consists of following steps:
2. Set slave and controller specific parameters
Next step is always to pass some specific information to the DMA
- driver. Most of the generic information which a slave DMA can use
- is in struct dma_slave_config. This allows the clients to specify
+ driver. Most of the generic information which a slave DMA can use
+ is in struct dma_slave_config. This allows the clients to specify
DMA direction, DMA addresses, bus widths, DMA burst lengths etc
for the peripheral.
@@ -52,7 +52,7 @@ The slave DMA usage consists of following steps:
struct dma_slave_config *config)
Please see the dma_slave_config structure definition in dmaengine.h
- for a detailed explanation of the struct members. Please note
+ for a detailed explanation of the struct members. Please note
that the 'direction' member will be going away as it duplicates the
direction given in the prepare call.
@@ -101,7 +101,7 @@ The slave DMA usage consists of following steps:
desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags);
Once a descriptor has been obtained, the callback information can be
- added and the descriptor must then be submitted. Some DMA engine
+ added and the descriptor must then be submitted. Some DMA engine
drivers may hold a spinlock between a successful preparation and
submission so it is important that these two operations are closely
paired.
@@ -138,7 +138,7 @@ The slave DMA usage consists of following steps:
activity via other DMA engine calls not covered in this document.
dmaengine_submit() will not start the DMA operation, it merely adds
- it to the pending queue. For this, see step 5, dma_async_issue_pending.
+ it to the pending queue. For this, see step 5, dma_async_issue_pending.
5. Issue pending DMA requests and wait for callback notification
@@ -184,13 +184,13 @@ Further APIs:
3. int dmaengine_resume(struct dma_chan *chan)
- Resume a previously paused DMA channel. It is invalid to resume a
+ Resume a previously paused DMA channel. It is invalid to resume a
channel which is not currently paused.
4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
- This can be used to check the status of the channel. Please see
+ This can be used to check the status of the channel. Please see
the documentation in include/linux/dmaengine.h for a more complete
description of this API.
@@ -200,7 +200,7 @@ Further APIs:
Note:
Not all DMA engine drivers can return reliable information for
- a running DMA channel. It is recommended that DMA engine users
+ a running DMA channel. It is recommended that DMA engine users
pause or stop (via dmaengine_terminate_all()) the channel before
using this API.
diff --git a/Documentation/dmaengine/dmatest.txt b/Documentation/dmaengine/dmatest.txt
index dd77a81bdb80..fb683c72dea8 100644
--- a/Documentation/dmaengine/dmatest.txt
+++ b/Documentation/dmaengine/dmatest.txt
@@ -34,7 +34,7 @@ command:
% ls -1 /sys/class/dma/
Once started a message like "dmatest: Started 1 threads using dma0chan0" is
-emitted. After that only test failure messages are reported until the test
+emitted. After that only test failure messages are reported until the test
stops.
Note that running a new test will not stop any in progress test.
@@ -43,11 +43,11 @@ The following command returns the state of the test.
% cat /sys/module/dmatest/parameters/run
To wait for test completion userpace can poll 'run' until it is false, or use
-the wait parameter. Specifying 'wait=1' when loading the module causes module
+the wait parameter. Specifying 'wait=1' when loading the module causes module
initialization to pause until a test run has completed, while reading
/sys/module/dmatest/parameters/wait waits for any running test to complete
-before returning. For example, the following scripts wait for 42 tests
-to complete before exiting. Note that if 'iterations' is set to 'infinite' then
+before returning. For example, the following scripts wait for 42 tests
+to complete before exiting. Note that if 'iterations' is set to 'infinite' then
waiting is disabled.
Example:
@@ -81,7 +81,7 @@ Example of output:
The message format is unified across the different types of errors. A number in
the parens represents additional information, e.g. error code, error counter,
-or status. A test thread also emits a summary line at completion listing the
+or status. A test thread also emits a summary line at completion listing the
number of tests executed, number that failed, and a result code.
Example:
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index c4fd47540b31..e33bc1c8ed2c 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -384,7 +384,7 @@ where to put them)
- The descriptor should be prepared for reuse by invoking
dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE.
- dmaengine_desc_set_reuse() will succeed only when channel support
- reusable descriptor as exhibited by capablities
+ reusable descriptor as exhibited by capabilities
- As a consequence, if a device driver wants to skip the dma_map_sg() and
dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
it can resubmit the transfer right after its completion.
diff --git a/Documentation/dmaengine/pxa_dma.txt b/Documentation/dmaengine/pxa_dma.txt
index 413ef9cfaa4d..0736d44b5438 100644
--- a/Documentation/dmaengine/pxa_dma.txt
+++ b/Documentation/dmaengine/pxa_dma.txt
@@ -29,7 +29,7 @@ Constraints
d) Bandwidth guarantee
The PXA architecture has 4 levels of DMAs priorities : high, normal, low.
- The high prorities get twice as much bandwidth as the normal, which get twice
+ The high priorities get twice as much bandwidth as the normal, which get twice
as much as the low priorities.
A driver should be able to request a priority, especially the real-time
ones such as pxa_camera with (big) throughputs.
diff --git a/Documentation/doc-guide/conf.py b/Documentation/doc-guide/conf.py
new file mode 100644
index 000000000000..fd3731182d5a
--- /dev/null
+++ b/Documentation/doc-guide/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = 'Linux Kernel Documentation Guide'
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'kernel-doc-guide.tex', 'Linux Kernel Documentation Guide',
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/doc-guide/docbook.rst b/Documentation/doc-guide/docbook.rst
new file mode 100644
index 000000000000..d8bf04308b43
--- /dev/null
+++ b/Documentation/doc-guide/docbook.rst
@@ -0,0 +1,90 @@
+DocBook XML [DEPRECATED]
+========================
+
+.. attention::
+
+ This section describes the deprecated DocBook XML toolchain. Please do not
+ create new DocBook XML template files. Please consider converting existing
+ DocBook XML templates files to Sphinx/reStructuredText.
+
+Converting DocBook to Sphinx
+----------------------------
+
+Over time, we expect all of the documents under ``Documentation/DocBook`` to be
+converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
+enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,
+which uses ``pandoc`` under the hood. For example::
+
+ $ cd Documentation/sphinx
+ $ ./tmplcvt ../DocBook/in.tmpl ../out.rst
+
+Then edit the resulting rst files to fix any remaining issues, and add the
+document in the ``toctree`` in ``Documentation/index.rst``.
+
+Components of the kernel-doc system
+-----------------------------------
+
+Many places in the source tree have extractable documentation in the form of
+block comments above functions. The components of this system are:
+
+- ``scripts/kernel-doc``
+
+ This is a perl script that hunts for the block comments and can mark them up
+ directly into reStructuredText, DocBook, man, text, and HTML. (No, not
+ texinfo.)
+
+- ``Documentation/DocBook/*.tmpl``
+
+ These are XML template files, which are normal XML files with special
+ place-holders for where the extracted documentation should go.
+
+- ``scripts/docproc.c``
+
+ This is a program for converting XML template files into XML files. When a
+ file is referenced it is searched for symbols exported (EXPORT_SYMBOL), to be
+ able to distinguish between internal and external functions.
+
+ It invokes kernel-doc, giving it the list of functions that are to be
+ documented.
+
+ Additionally it is used to scan the XML template files to locate all the files
+ referenced herein. This is used to generate dependency information as used by
+ make.
+
+- ``Makefile``
+
+ The targets 'xmldocs', 'psdocs', 'pdfdocs', and 'htmldocs' are used to build
+ DocBook XML files, PostScript files, PDF files, and html files in
+ Documentation/DocBook. The older target 'sgmldocs' is equivalent to 'xmldocs'.
+
+- ``Documentation/DocBook/Makefile``
+
+ This is where C files are associated with SGML templates.
+
+How to use kernel-doc comments in DocBook XML template files
+------------------------------------------------------------
+
+DocBook XML template files (\*.tmpl) are like normal XML files, except that they
+can contain escape sequences where extracted documentation should be inserted.
+
+``!E<filename>`` is replaced by the documentation, in ``<filename>``, for
+functions that are exported using ``EXPORT_SYMBOL``: the function list is
+collected from files listed in ``Documentation/DocBook/Makefile``.
+
+``!I<filename>`` is replaced by the documentation for functions that are **not**
+exported using ``EXPORT_SYMBOL``.
+
+``!D<filename>`` is used to name additional files to search for functions
+exported using ``EXPORT_SYMBOL``.
+
+``!F<filename> <function [functions...]>`` is replaced by the documentation, in
+``<filename>``, for the functions listed.
+
+``!P<filename> <section title>`` is replaced by the contents of the ``DOC:``
+section titled ``<section title>`` from ``<filename>``. Spaces are allowed in
+``<section title>``; do not quote the ``<section title>``.
+
+``!C<filename>`` is replaced by nothing, but makes the tools check that all DOC:
+sections and documented functions, symbols, etc. are used. This makes sense to
+use when you use ``!F`` or ``!P`` only and want to verify that all documentation
+is included.
diff --git a/Documentation/doc-guide/index.rst b/Documentation/doc-guide/index.rst
new file mode 100644
index 000000000000..6fff4024606e
--- /dev/null
+++ b/Documentation/doc-guide/index.rst
@@ -0,0 +1,20 @@
+.. _doc_guide:
+
+=================================
+How to write kernel documentation
+=================================
+
+.. toctree::
+ :maxdepth: 1
+
+ sphinx.rst
+ kernel-doc.rst
+ parse-headers.rst
+ docbook.rst
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/kernel-documentation.rst b/Documentation/doc-guide/kernel-doc.rst
index 10cc7ddb6235..b32e4813ff6f 100644
--- a/Documentation/kernel-documentation.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -1,228 +1,3 @@
-==========================
-Linux Kernel Documentation
-==========================
-
-Introduction
-============
-
-The Linux kernel uses `Sphinx`_ to generate pretty documentation from
-`reStructuredText`_ files under ``Documentation``. To build the documentation in
-HTML or PDF formats, use ``make htmldocs`` or ``make pdfdocs``. The generated
-documentation is placed in ``Documentation/output``.
-
-.. _Sphinx: http://www.sphinx-doc.org/
-.. _reStructuredText: http://docutils.sourceforge.net/rst.html
-
-The reStructuredText files may contain directives to include structured
-documentation comments, or kernel-doc comments, from source files. Usually these
-are used to describe the functions and types and design of the code. The
-kernel-doc comments have some special structure and formatting, but beyond that
-they are also treated as reStructuredText.
-
-There is also the deprecated DocBook toolchain to generate documentation from
-DocBook XML template files under ``Documentation/DocBook``. The DocBook files
-are to be converted to reStructuredText, and the toolchain is slated to be
-removed.
-
-Finally, there are thousands of plain text documentation files scattered around
-``Documentation``. Some of these will likely be converted to reStructuredText
-over time, but the bulk of them will remain in plain text.
-
-Sphinx Build
-============
-
-The usual way to generate the documentation is to run ``make htmldocs`` or
-``make pdfdocs``. There are also other formats available, see the documentation
-section of ``make help``. The generated documentation is placed in
-format-specific subdirectories under ``Documentation/output``.
-
-To generate documentation, Sphinx (``sphinx-build``) must obviously be
-installed. For prettier HTML output, the Read the Docs Sphinx theme
-(``sphinx_rtd_theme``) is used if available. For PDF output, ``rst2pdf`` is also
-needed. All of these are widely available and packaged in distributions.
-
-To pass extra options to Sphinx, you can use the ``SPHINXOPTS`` make
-variable. For example, use ``make SPHINXOPTS=-v htmldocs`` to get more verbose
-output.
-
-To remove the generated documentation, run ``make cleandocs``.
-
-Writing Documentation
-=====================
-
-Adding new documentation can be as simple as:
-
-1. Add a new ``.rst`` file somewhere under ``Documentation``.
-2. Refer to it from the Sphinx main `TOC tree`_ in ``Documentation/index.rst``.
-
-.. _TOC tree: http://www.sphinx-doc.org/en/stable/markup/toctree.html
-
-This is usually good enough for simple documentation (like the one you're
-reading right now), but for larger documents it may be advisable to create a
-subdirectory (or use an existing one). For example, the graphics subsystem
-documentation is under ``Documentation/gpu``, split to several ``.rst`` files,
-and has a separate ``index.rst`` (with a ``toctree`` of its own) referenced from
-the main index.
-
-See the documentation for `Sphinx`_ and `reStructuredText`_ on what you can do
-with them. In particular, the Sphinx `reStructuredText Primer`_ is a good place
-to get started with reStructuredText. There are also some `Sphinx specific
-markup constructs`_.
-
-.. _reStructuredText Primer: http://www.sphinx-doc.org/en/stable/rest.html
-.. _Sphinx specific markup constructs: http://www.sphinx-doc.org/en/stable/markup/index.html
-
-Specific guidelines for the kernel documentation
-------------------------------------------------
-
-Here are some specific guidelines for the kernel documentation:
-
-* Please don't go overboard with reStructuredText markup. Keep it simple.
-
-* Please stick to this order of heading adornments:
-
- 1. ``=`` with overline for document title::
-
- ==============
- Document title
- ==============
-
- 2. ``=`` for chapters::
-
- Chapters
- ========
-
- 3. ``-`` for sections::
-
- Section
- -------
-
- 4. ``~`` for subsections::
-
- Subsection
- ~~~~~~~~~~
-
- Although RST doesn't mandate a specific order ("Rather than imposing a fixed
- number and order of section title adornment styles, the order enforced will be
- the order as encountered."), having the higher levels the same overall makes
- it easier to follow the documents.
-
-
-the C domain
-------------
-
-The `Sphinx C Domain`_ (name c) is suited for documentation of C API. E.g. a
-function prototype:
-
-.. code-block:: rst
-
- .. c:function:: int ioctl( int fd, int request )
-
-The C domain of the kernel-doc has some additional features. E.g. you can
-*rename* the reference name of a function with a common name like ``open`` or
-``ioctl``:
-
-.. code-block:: rst
-
- .. c:function:: int ioctl( int fd, int request )
- :name: VIDIOC_LOG_STATUS
-
-The func-name (e.g. ioctl) remains in the output but the ref-name changed from
-``ioctl`` to ``VIDIOC_LOG_STATUS``. The index entry for this function is also
-changed to ``VIDIOC_LOG_STATUS`` and the function can now referenced by:
-
-.. code-block:: rst
-
- :c:func:`VIDIOC_LOG_STATUS`
-
-
-list tables
------------
-
-We recommend the use of *list table* formats. The *list table* formats are
-double-stage lists. Compared to the ASCII-art they might not be as
-comfortable for
-readers of the text files. Their advantage is that they are easy to
-create or modify and that the diff of a modification is much more meaningful,
-because it is limited to the modified content.
-
-The ``flat-table`` is a double-stage list similar to the ``list-table`` with
-some additional features:
-
-* column-span: with the role ``cspan`` a cell can be extended through
- additional columns
-
-* row-span: with the role ``rspan`` a cell can be extended through
- additional rows
-
-* auto span rightmost cell of a table row over the missing cells on the right
- side of that table-row. With Option ``:fill-cells:`` this behavior can
- changed from *auto span* to *auto fill*, which automatically inserts (empty)
- cells instead of spanning the last cell.
-
-options:
-
-* ``:header-rows:`` [int] count of header rows
-* ``:stub-columns:`` [int] count of stub columns
-* ``:widths:`` [[int] [int] ... ] widths of columns
-* ``:fill-cells:`` instead of auto-spanning missing cells, insert missing cells
-
-roles:
-
-* ``:cspan:`` [int] additional columns (*morecols*)
-* ``:rspan:`` [int] additional rows (*morerows*)
-
-The example below shows how to use this markup. The first level of the staged
-list is the *table-row*. In the *table-row* there is only one markup allowed,
-the list of the cells in this *table-row*. Exceptions are *comments* ( ``..`` )
-and *targets* (e.g. a ref to ``:ref:`last row <last row>``` / :ref:`last row
-<last row>`).
-
-.. code-block:: rst
-
- .. flat-table:: table title
- :widths: 2 1 1 3
-
- * - head col 1
- - head col 2
- - head col 3
- - head col 4
-
- * - column 1
- - field 1.1
- - field 1.2 with autospan
-
- * - column 2
- - field 2.1
- - :rspan:`1` :cspan:`1` field 2.2 - 3.3
-
- * .. _`last row`:
-
- - column 3
-
-Rendered as:
-
- .. flat-table:: table title
- :widths: 2 1 1 3
-
- * - head col 1
- - head col 2
- - head col 3
- - head col 4
-
- * - column 1
- - field 1.1
- - field 1.2 with autospan
-
- * - column 2
- - field 2.1
- - :rspan:`1` :cspan:`1` field 2.2 - 3.3
-
- * .. _`last row`:
-
- - column 3
-
-
Including kernel-doc comments
=============================
@@ -484,7 +259,10 @@ span multiple lines. The continuation lines may contain indentation.
In-line member documentation comments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The structure members may also be documented in-line within the definition::
+The structure members may also be documented in-line within the definition.
+There are two styles, single-line comments where both the opening ``/**`` and
+closing ``*/`` are on the same line, and multi-line comments where they are each
+on a line of their own, like all other kernel-doc comments::
/**
* struct foo - Brief description.
@@ -502,6 +280,8 @@ The structure members may also be documented in-line within the definition::
* Here, the member description may contain several paragraphs.
*/
int baz;
+ /** @foobar: Single line description. */
+ int foobar;
}
Private members
@@ -586,94 +366,3 @@ file.
Data structures visible in kernel include files should also be documented using
kernel-doc formatted comments.
-
-DocBook XML [DEPRECATED]
-========================
-
-.. attention::
-
- This section describes the deprecated DocBook XML toolchain. Please do not
- create new DocBook XML template files. Please consider converting existing
- DocBook XML templates files to Sphinx/reStructuredText.
-
-Converting DocBook to Sphinx
-----------------------------
-
-Over time, we expect all of the documents under ``Documentation/DocBook`` to be
-converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
-enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,
-which uses ``pandoc`` under the hood. For example::
-
- $ cd Documentation/sphinx
- $ ./tmplcvt ../DocBook/in.tmpl ../out.rst
-
-Then edit the resulting rst files to fix any remaining issues, and add the
-document in the ``toctree`` in ``Documentation/index.rst``.
-
-Components of the kernel-doc system
------------------------------------
-
-Many places in the source tree have extractable documentation in the form of
-block comments above functions. The components of this system are:
-
-- ``scripts/kernel-doc``
-
- This is a perl script that hunts for the block comments and can mark them up
- directly into reStructuredText, DocBook, man, text, and HTML. (No, not
- texinfo.)
-
-- ``Documentation/DocBook/*.tmpl``
-
- These are XML template files, which are normal XML files with special
- place-holders for where the extracted documentation should go.
-
-- ``scripts/docproc.c``
-
- This is a program for converting XML template files into XML files. When a
- file is referenced it is searched for symbols exported (EXPORT_SYMBOL), to be
- able to distinguish between internal and external functions.
-
- It invokes kernel-doc, giving it the list of functions that are to be
- documented.
-
- Additionally it is used to scan the XML template files to locate all the files
- referenced herein. This is used to generate dependency information as used by
- make.
-
-- ``Makefile``
-
- The targets 'xmldocs', 'psdocs', 'pdfdocs', and 'htmldocs' are used to build
- DocBook XML files, PostScript files, PDF files, and html files in
- Documentation/DocBook. The older target 'sgmldocs' is equivalent to 'xmldocs'.
-
-- ``Documentation/DocBook/Makefile``
-
- This is where C files are associated with SGML templates.
-
-How to use kernel-doc comments in DocBook XML template files
-------------------------------------------------------------
-
-DocBook XML template files (\*.tmpl) are like normal XML files, except that they
-can contain escape sequences where extracted documentation should be inserted.
-
-``!E<filename>`` is replaced by the documentation, in ``<filename>``, for
-functions that are exported using ``EXPORT_SYMBOL``: the function list is
-collected from files listed in ``Documentation/DocBook/Makefile``.
-
-``!I<filename>`` is replaced by the documentation for functions that are **not**
-exported using ``EXPORT_SYMBOL``.
-
-``!D<filename>`` is used to name additional files to search for functions
-exported using ``EXPORT_SYMBOL``.
-
-``!F<filename> <function [functions...]>`` is replaced by the documentation, in
-``<filename>``, for the functions listed.
-
-``!P<filename> <section title>`` is replaced by the contents of the ``DOC:``
-section titled ``<section title>`` from ``<filename>``. Spaces are allowed in
-``<section title>``; do not quote the ``<section title>``.
-
-``!C<filename>`` is replaced by nothing, but makes the tools check that all DOC:
-sections and documented functions, symbols, etc. are used. This makes sense to
-use when you use ``!F`` or ``!P`` only and want to verify that all documentation
-is included.
diff --git a/Documentation/doc-guide/parse-headers.rst b/Documentation/doc-guide/parse-headers.rst
new file mode 100644
index 000000000000..96a0423d5dba
--- /dev/null
+++ b/Documentation/doc-guide/parse-headers.rst
@@ -0,0 +1,192 @@
+===========================
+Including uAPI header files
+===========================
+
+Sometimes, it is useful to include header files and C example codes in
+order to describe the userspace API and to generate cross-references
+between the code and the documentation. Adding cross-references for
+userspace API files has an additional vantage: Sphinx will generate warnings
+if a symbol is not found at the documentation. That helps to keep the
+uAPI documentation in sync with the Kernel changes.
+The :ref:`parse_headers.pl <parse_headers>` provide a way to generate such
+cross-references. It has to be called via Makefile, while building the
+documentation. Please see ``Documentation/media/Makefile`` for an example
+about how to use it inside the Kernel tree.
+
+.. _parse_headers:
+
+parse_headers.pl
+^^^^^^^^^^^^^^^^
+
+NAME
+****
+
+
+parse_headers.pl - parse a C file, in order to identify functions, structs,
+enums and defines and create cross-references to a Sphinx book.
+
+
+SYNOPSIS
+********
+
+
+\ **parse_headers.pl**\ [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
+
+Where <options> can be: --debug, --help or --man.
+
+
+OPTIONS
+*******
+
+
+
+\ **--debug**\
+
+ Put the script in verbose mode, useful for debugging.
+
+
+
+\ **--usage**\
+
+ Prints a brief help message and exits.
+
+
+
+\ **--help**\
+
+ Prints a more detailed help message and exits.
+
+
+DESCRIPTION
+***********
+
+
+Convert a C header or source file (C_FILE), into a ReStructured Text
+included via ..parsed-literal block with cross-references for the
+documentation files that describe the API. It accepts an optional
+EXCEPTIONS_FILE with describes what elements will be either ignored or
+be pointed to a non-default reference.
+
+The output is written at the (OUT_FILE).
+
+It is capable of identifying defines, functions, structs, typedefs,
+enums and enum symbols and create cross-references for all of them.
+It is also capable of distinguish #define used for specifying a Linux
+ioctl.
+
+The EXCEPTIONS_FILE contain two types of statements: \ **ignore**\ or \ **replace**\ .
+
+The syntax for the ignore tag is:
+
+
+ignore \ **type**\ \ **name**\
+
+The \ **ignore**\ means that it won't generate cross references for a
+\ **name**\ symbol of type \ **type**\ .
+
+The syntax for the replace tag is:
+
+
+replace \ **type**\ \ **name**\ \ **new_value**\
+
+The \ **replace**\ means that it will generate cross references for a
+\ **name**\ symbol of type \ **type**\ , but, instead of using the default
+replacement rule, it will use \ **new_value**\ .
+
+For both statements, \ **type**\ can be either one of the following:
+
+
+\ **ioctl**\
+
+ The ignore or replace statement will apply to ioctl definitions like:
+
+ #define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
+
+
+
+\ **define**\
+
+ The ignore or replace statement will apply to any other #define found
+ at C_FILE.
+
+
+
+\ **typedef**\
+
+ The ignore or replace statement will apply to typedef statements at C_FILE.
+
+
+
+\ **struct**\
+
+ The ignore or replace statement will apply to the name of struct statements
+ at C_FILE.
+
+
+
+\ **enum**\
+
+ The ignore or replace statement will apply to the name of enum statements
+ at C_FILE.
+
+
+
+\ **symbol**\
+
+ The ignore or replace statement will apply to the name of enum statements
+ at C_FILE.
+
+ For replace statements, \ **new_value**\ will automatically use :c:type:
+ references for \ **typedef**\ , \ **enum**\ and \ **struct**\ types. It will use :ref:
+ for \ **ioctl**\ , \ **define**\ and \ **symbol**\ types. The type of reference can
+ also be explicitly defined at the replace statement.
+
+
+
+EXAMPLES
+********
+
+
+ignore define _VIDEODEV2_H
+
+
+Ignore a #define _VIDEODEV2_H at the C_FILE.
+
+ignore symbol PRIVATE
+
+
+On a struct like:
+
+enum foo { BAR1, BAR2, PRIVATE };
+
+It won't generate cross-references for \ **PRIVATE**\ .
+
+replace symbol BAR1 :c:type:\`foo\`
+replace symbol BAR2 :c:type:\`foo\`
+
+
+On a struct like:
+
+enum foo { BAR1, BAR2, PRIVATE };
+
+It will make the BAR1 and BAR2 enum symbols to cross reference the foo
+symbol at the C domain.
+
+
+BUGS
+****
+
+
+Report bugs to Mauro Carvalho Chehab <mchehab@s-opensource.com>
+
+
+COPYRIGHT
+*********
+
+
+Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>.
+
+License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
+
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
new file mode 100644
index 000000000000..96fe7ccb2c67
--- /dev/null
+++ b/Documentation/doc-guide/sphinx.rst
@@ -0,0 +1,219 @@
+Introduction
+============
+
+The Linux kernel uses `Sphinx`_ to generate pretty documentation from
+`reStructuredText`_ files under ``Documentation``. To build the documentation in
+HTML or PDF formats, use ``make htmldocs`` or ``make pdfdocs``. The generated
+documentation is placed in ``Documentation/output``.
+
+.. _Sphinx: http://www.sphinx-doc.org/
+.. _reStructuredText: http://docutils.sourceforge.net/rst.html
+
+The reStructuredText files may contain directives to include structured
+documentation comments, or kernel-doc comments, from source files. Usually these
+are used to describe the functions and types and design of the code. The
+kernel-doc comments have some special structure and formatting, but beyond that
+they are also treated as reStructuredText.
+
+There is also the deprecated DocBook toolchain to generate documentation from
+DocBook XML template files under ``Documentation/DocBook``. The DocBook files
+are to be converted to reStructuredText, and the toolchain is slated to be
+removed.
+
+Finally, there are thousands of plain text documentation files scattered around
+``Documentation``. Some of these will likely be converted to reStructuredText
+over time, but the bulk of them will remain in plain text.
+
+Sphinx Build
+============
+
+The usual way to generate the documentation is to run ``make htmldocs`` or
+``make pdfdocs``. There are also other formats available, see the documentation
+section of ``make help``. The generated documentation is placed in
+format-specific subdirectories under ``Documentation/output``.
+
+To generate documentation, Sphinx (``sphinx-build``) must obviously be
+installed. For prettier HTML output, the Read the Docs Sphinx theme
+(``sphinx_rtd_theme``) is used if available. For PDF output, ``rst2pdf`` is also
+needed. All of these are widely available and packaged in distributions.
+
+To pass extra options to Sphinx, you can use the ``SPHINXOPTS`` make
+variable. For example, use ``make SPHINXOPTS=-v htmldocs`` to get more verbose
+output.
+
+To remove the generated documentation, run ``make cleandocs``.
+
+Writing Documentation
+=====================
+
+Adding new documentation can be as simple as:
+
+1. Add a new ``.rst`` file somewhere under ``Documentation``.
+2. Refer to it from the Sphinx main `TOC tree`_ in ``Documentation/index.rst``.
+
+.. _TOC tree: http://www.sphinx-doc.org/en/stable/markup/toctree.html
+
+This is usually good enough for simple documentation (like the one you're
+reading right now), but for larger documents it may be advisable to create a
+subdirectory (or use an existing one). For example, the graphics subsystem
+documentation is under ``Documentation/gpu``, split to several ``.rst`` files,
+and has a separate ``index.rst`` (with a ``toctree`` of its own) referenced from
+the main index.
+
+See the documentation for `Sphinx`_ and `reStructuredText`_ on what you can do
+with them. In particular, the Sphinx `reStructuredText Primer`_ is a good place
+to get started with reStructuredText. There are also some `Sphinx specific
+markup constructs`_.
+
+.. _reStructuredText Primer: http://www.sphinx-doc.org/en/stable/rest.html
+.. _Sphinx specific markup constructs: http://www.sphinx-doc.org/en/stable/markup/index.html
+
+Specific guidelines for the kernel documentation
+------------------------------------------------
+
+Here are some specific guidelines for the kernel documentation:
+
+* Please don't go overboard with reStructuredText markup. Keep it simple.
+
+* Please stick to this order of heading adornments:
+
+ 1. ``=`` with overline for document title::
+
+ ==============
+ Document title
+ ==============
+
+ 2. ``=`` for chapters::
+
+ Chapters
+ ========
+
+ 3. ``-`` for sections::
+
+ Section
+ -------
+
+ 4. ``~`` for subsections::
+
+ Subsection
+ ~~~~~~~~~~
+
+ Although RST doesn't mandate a specific order ("Rather than imposing a fixed
+ number and order of section title adornment styles, the order enforced will be
+ the order as encountered."), having the higher levels the same overall makes
+ it easier to follow the documents.
+
+
+the C domain
+------------
+
+The `Sphinx C Domain`_ (name c) is suited for documentation of C API. E.g. a
+function prototype:
+
+.. code-block:: rst
+
+ .. c:function:: int ioctl( int fd, int request )
+
+The C domain of the kernel-doc has some additional features. E.g. you can
+*rename* the reference name of a function with a common name like ``open`` or
+``ioctl``:
+
+.. code-block:: rst
+
+ .. c:function:: int ioctl( int fd, int request )
+ :name: VIDIOC_LOG_STATUS
+
+The func-name (e.g. ioctl) remains in the output but the ref-name changed from
+``ioctl`` to ``VIDIOC_LOG_STATUS``. The index entry for this function is also
+changed to ``VIDIOC_LOG_STATUS`` and the function can now referenced by:
+
+.. code-block:: rst
+
+ :c:func:`VIDIOC_LOG_STATUS`
+
+
+list tables
+-----------
+
+We recommend the use of *list table* formats. The *list table* formats are
+double-stage lists. Compared to the ASCII-art they might not be as
+comfortable for
+readers of the text files. Their advantage is that they are easy to
+create or modify and that the diff of a modification is much more meaningful,
+because it is limited to the modified content.
+
+The ``flat-table`` is a double-stage list similar to the ``list-table`` with
+some additional features:
+
+* column-span: with the role ``cspan`` a cell can be extended through
+ additional columns
+
+* row-span: with the role ``rspan`` a cell can be extended through
+ additional rows
+
+* auto span rightmost cell of a table row over the missing cells on the right
+ side of that table-row. With Option ``:fill-cells:`` this behavior can
+ changed from *auto span* to *auto fill*, which automatically inserts (empty)
+ cells instead of spanning the last cell.
+
+options:
+
+* ``:header-rows:`` [int] count of header rows
+* ``:stub-columns:`` [int] count of stub columns
+* ``:widths:`` [[int] [int] ... ] widths of columns
+* ``:fill-cells:`` instead of auto-spanning missing cells, insert missing cells
+
+roles:
+
+* ``:cspan:`` [int] additional columns (*morecols*)
+* ``:rspan:`` [int] additional rows (*morerows*)
+
+The example below shows how to use this markup. The first level of the staged
+list is the *table-row*. In the *table-row* there is only one markup allowed,
+the list of the cells in this *table-row*. Exceptions are *comments* ( ``..`` )
+and *targets* (e.g. a ref to ``:ref:`last row <last row>``` / :ref:`last row
+<last row>`).
+
+.. code-block:: rst
+
+ .. flat-table:: table title
+ :widths: 2 1 1 3
+
+ * - head col 1
+ - head col 2
+ - head col 3
+ - head col 4
+
+ * - column 1
+ - field 1.1
+ - field 1.2 with autospan
+
+ * - column 2
+ - field 2.1
+ - :rspan:`1` :cspan:`1` field 2.2 - 3.3
+
+ * .. _`last row`:
+
+ - column 3
+
+Rendered as:
+
+ .. flat-table:: table title
+ :widths: 2 1 1 3
+
+ * - head col 1
+ - head col 2
+ - head col 3
+ - head col 4
+
+ * - column 1
+ - field 1.1
+ - field 1.2 with autospan
+
+ * - column 2
+ - field 2.1
+ - :rspan:`1` :cspan:`1` field 2.2 - 3.3
+
+ * .. _`last row`:
+
+ - column 3
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 5385cba941d2..a23edccd2059 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -139,7 +139,6 @@ hpet_example
hugepage-mmap
hugepage-shm
ihex2fw
-ikconfig.h*
inat-tables.c
initramfs_list
int16.c
diff --git a/Documentation/80211/cfg80211.rst b/Documentation/driver-api/80211/cfg80211.rst
index b1e149ea6fee..b1e149ea6fee 100644
--- a/Documentation/80211/cfg80211.rst
+++ b/Documentation/driver-api/80211/cfg80211.rst
diff --git a/Documentation/80211/conf.py b/Documentation/driver-api/80211/conf.py
index 20c7c275ef4a..4424b4b0b9c3 100644
--- a/Documentation/80211/conf.py
+++ b/Documentation/driver-api/80211/conf.py
@@ -3,3 +3,8 @@
project = "Linux 802.11 Driver Developer's Guide"
tags.add("subproject")
+
+latex_documents = [
+ ('index', '80211.tex', project,
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/80211/index.rst b/Documentation/driver-api/80211/index.rst
index 90bba476f442..af210859d3e1 100644
--- a/Documentation/80211/index.rst
+++ b/Documentation/driver-api/80211/index.rst
@@ -9,7 +9,7 @@ Linux 802.11 Driver Developer's Guide
mac80211
mac80211-advanced
-.. only:: subproject
+.. only:: subproject and html
Indices
=======
diff --git a/Documentation/80211/introduction.rst b/Documentation/driver-api/80211/introduction.rst
index 4938fa87691c..4938fa87691c 100644
--- a/Documentation/80211/introduction.rst
+++ b/Documentation/driver-api/80211/introduction.rst
diff --git a/Documentation/80211/mac80211-advanced.rst b/Documentation/driver-api/80211/mac80211-advanced.rst
index 70a89b2163c2..70a89b2163c2 100644
--- a/Documentation/80211/mac80211-advanced.rst
+++ b/Documentation/driver-api/80211/mac80211-advanced.rst
diff --git a/Documentation/80211/mac80211.rst b/Documentation/driver-api/80211/mac80211.rst
index 85a8335e80b6..85a8335e80b6 100644
--- a/Documentation/80211/mac80211.rst
+++ b/Documentation/driver-api/80211/mac80211.rst
diff --git a/Documentation/driver-api/conf.py b/Documentation/driver-api/conf.py
new file mode 100644
index 000000000000..202726d20088
--- /dev/null
+++ b/Documentation/driver-api/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "The Linux driver implementer's API guide"
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'driver-api.tex', project,
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst
new file mode 100644
index 000000000000..5f5713448703
--- /dev/null
+++ b/Documentation/driver-api/device_link.rst
@@ -0,0 +1,279 @@
+============
+Device links
+============
+
+By default, the driver core only enforces dependencies between devices
+that are borne out of a parent/child relationship within the device
+hierarchy: When suspending, resuming or shutting down the system, devices
+are ordered based on this relationship, i.e. children are always suspended
+before their parent, and the parent is always resumed before its children.
+
+Sometimes there is a need to represent device dependencies beyond the
+mere parent/child relationship, e.g. between siblings, and have the
+driver core automatically take care of them.
+
+Secondly, the driver core by default does not enforce any driver presence
+dependencies, i.e. that one device must be bound to a driver before
+another one can probe or function correctly.
+
+Often these two dependency types come together, so a device depends on
+another one both with regards to driver presence *and* with regards to
+suspend/resume and shutdown ordering.
+
+Device links allow representation of such dependencies in the driver core.
+
+In its standard form, a device link combines *both* dependency types:
+It guarantees correct suspend/resume and shutdown ordering between a
+"supplier" device and its "consumer" devices, and it guarantees driver
+presence on the supplier. The consumer devices are not probed before the
+supplier is bound to a driver, and they're unbound before the supplier
+is unbound.
+
+When driver presence on the supplier is irrelevant and only correct
+suspend/resume and shutdown ordering is needed, the device link may
+simply be set up with the ``DL_FLAG_STATELESS`` flag. In other words,
+enforcing driver presence on the supplier is optional.
+
+Another optional feature is runtime PM integration: By setting the
+``DL_FLAG_PM_RUNTIME`` flag on addition of the device link, the PM core
+is instructed to runtime resume the supplier and keep it active
+whenever and for as long as the consumer is runtime resumed.
+
+Usage
+=====
+
+The earliest point in time when device links can be added is after
+:c:func:`device_add()` has been called for the supplier and
+:c:func:`device_initialize()` has been called for the consumer.
+
+It is legal to add them later, but care must be taken that the system
+remains in a consistent state: E.g. a device link cannot be added in
+the midst of a suspend/resume transition, so either commencement of
+such a transition needs to be prevented with :c:func:`lock_system_sleep()`,
+or the device link needs to be added from a function which is guaranteed
+not to run in parallel to a suspend/resume transition, such as from a
+device ``->probe`` callback or a boot-time PCI quirk.
+
+Another example for an inconsistent state would be a device link that
+represents a driver presence dependency, yet is added from the consumer's
+``->probe`` callback while the supplier hasn't probed yet: Had the driver
+core known about the device link earlier, it wouldn't have probed the
+consumer in the first place. The onus is thus on the consumer to check
+presence of the supplier after adding the link, and defer probing on
+non-presence.
+
+If a device link is added in the ``->probe`` callback of the supplier or
+consumer driver, it is typically deleted in its ``->remove`` callback for
+symmetry. That way, if the driver is compiled as a module, the device
+link is added on module load and orderly deleted on unload. The same
+restrictions that apply to device link addition (e.g. exclusion of a
+parallel suspend/resume transition) apply equally to deletion.
+
+Several flags may be specified on device link addition, two of which
+have already been mentioned above: ``DL_FLAG_STATELESS`` to express that no
+driver presence dependency is needed (but only correct suspend/resume and
+shutdown ordering) and ``DL_FLAG_PM_RUNTIME`` to express that runtime PM
+integration is desired.
+
+Two other flags are specifically targeted at use cases where the device
+link is added from the consumer's ``->probe`` callback: ``DL_FLAG_RPM_ACTIVE``
+can be specified to runtime resume the supplier upon addition of the
+device link. ``DL_FLAG_AUTOREMOVE`` causes the device link to be automatically
+purged when the consumer fails to probe or later unbinds. This obviates
+the need to explicitly delete the link in the ``->remove`` callback or in
+the error path of the ``->probe`` callback.
+
+Limitations
+===========
+
+Driver authors should be aware that a driver presence dependency (i.e. when
+``DL_FLAG_STATELESS`` is not specified on link addition) may cause probing of
+the consumer to be deferred indefinitely. This can become a problem if the
+consumer is required to probe before a certain initcall level is reached.
+Worse, if the supplier driver is blacklisted or missing, the consumer will
+never be probed.
+
+Sometimes drivers depend on optional resources. They are able to operate
+in a degraded mode (reduced feature set or performance) when those resources
+are not present. An example is an SPI controller that can use a DMA engine
+or work in PIO mode. The controller can determine presence of the optional
+resources at probe time but on non-presence there is no way to know whether
+they will become available in the near future (due to a supplier driver
+probing) or never. Consequently it cannot be determined whether to defer
+probing or not. It would be possible to notify drivers when optional
+resources become available after probing, but it would come at a high cost
+for drivers as switching between modes of operation at runtime based on the
+availability of such resources would be much more complex than a mechanism
+based on probe deferral. In any case optional resources are beyond the
+scope of device links.
+
+Examples
+========
+
+* An MMU device exists alongside a busmaster device, both are in the same
+ power domain. The MMU implements DMA address translation for the busmaster
+ device and shall be runtime resumed and kept active whenever and as long
+ as the busmaster device is active. The busmaster device's driver shall
+ not bind before the MMU is bound. To achieve this, a device link with
+ runtime PM integration is added from the busmaster device (consumer)
+ to the MMU device (supplier). The effect with regards to runtime PM
+ is the same as if the MMU was the parent of the master device.
+
+ The fact that both devices share the same power domain would normally
+ suggest usage of a :c:type:`struct dev_pm_domain` or :c:type:`struct
+ generic_pm_domain`, however these are not independent devices that
+ happen to share a power switch, but rather the MMU device serves the
+ busmaster device and is useless without it. A device link creates a
+ synthetic hierarchical relationship between the devices and is thus
+ more apt.
+
+* A Thunderbolt host controller comprises a number of PCIe hotplug ports
+ and an NHI device to manage the PCIe switch. On resume from system sleep,
+ the NHI device needs to re-establish PCI tunnels to attached devices
+ before the hotplug ports can resume. If the hotplug ports were children
+ of the NHI, this resume order would automatically be enforced by the
+ PM core, but unfortunately they're aunts. The solution is to add
+ device links from the hotplug ports (consumers) to the NHI device
+ (supplier). A driver presence dependency is not necessary for this
+ use case.
+
+* Discrete GPUs in hybrid graphics laptops often feature an HDA controller
+ for HDMI/DP audio. In the device hierarchy the HDA controller is a sibling
+ of the VGA device, yet both share the same power domain and the HDA
+ controller is only ever needed when an HDMI/DP display is attached to the
+ VGA device. A device link from the HDA controller (consumer) to the
+ VGA device (supplier) aptly represents this relationship.
+
+* ACPI allows definition of a device start order by way of _DEP objects.
+ A classical example is when ACPI power management methods on one device
+ are implemented in terms of I\ :sup:`2`\ C accesses and require a specific
+ I\ :sup:`2`\ C controller to be present and functional for the power
+ management of the device in question to work.
+
+* In some SoCs a functional dependency exists from display, video codec and
+ video processing IP cores on transparent memory access IP cores that handle
+ burst access and compression/decompression.
+
+Alternatives
+============
+
+* A :c:type:`struct dev_pm_domain` can be used to override the bus,
+ class or device type callbacks. It is intended for devices sharing
+ a single on/off switch, however it does not guarantee a specific
+ suspend/resume ordering, this needs to be implemented separately.
+ It also does not by itself track the runtime PM status of the involved
+ devices and turn off the power switch only when all of them are runtime
+ suspended. Furthermore it cannot be used to enforce a specific shutdown
+ ordering or a driver presence dependency.
+
+* A :c:type:`struct generic_pm_domain` is a lot more heavyweight than a
+ device link and does not allow for shutdown ordering or driver presence
+ dependencies. It also cannot be used on ACPI systems.
+
+Implementation
+==============
+
+The device hierarchy, which -- as the name implies -- is a tree,
+becomes a directed acyclic graph once device links are added.
+
+Ordering of these devices during suspend/resume is determined by the
+dpm_list. During shutdown it is determined by the devices_kset. With
+no device links present, the two lists are a flattened, one-dimensional
+representations of the device tree such that a device is placed behind
+all its ancestors. That is achieved by traversing the ACPI namespace
+or OpenFirmware device tree top-down and appending devices to the lists
+as they are discovered.
+
+Once device links are added, the lists need to satisfy the additional
+constraint that a device is placed behind all its suppliers, recursively.
+To ensure this, upon addition of the device link the consumer and the
+entire sub-graph below it (all children and consumers of the consumer)
+are moved to the end of the list. (Call to :c:func:`device_reorder_to_tail()`
+from :c:func:`device_link_add()`.)
+
+To prevent introduction of dependency loops into the graph, it is
+verified upon device link addition that the supplier is not dependent
+on the consumer or any children or consumers of the consumer.
+(Call to :c:func:`device_is_dependent()` from :c:func:`device_link_add()`.)
+If that constraint is violated, :c:func:`device_link_add()` will return
+``NULL`` and a ``WARNING`` will be logged.
+
+Notably this also prevents the addition of a device link from a parent
+device to a child. However the converse is allowed, i.e. a device link
+from a child to a parent. Since the driver core already guarantees
+correct suspend/resume and shutdown ordering between parent and child,
+such a device link only makes sense if a driver presence dependency is
+needed on top of that. In this case driver authors should weigh
+carefully if a device link is at all the right tool for the purpose.
+A more suitable approach might be to simply use deferred probing or
+add a device flag causing the parent driver to be probed before the
+child one.
+
+State machine
+=============
+
+.. kernel-doc:: include/linux/device.h
+ :functions: device_link_state
+
+::
+
+ .=============================.
+ | |
+ v |
+ DORMANT <=> AVAILABLE <=> CONSUMER_PROBE => ACTIVE
+ ^ |
+ | |
+ '============ SUPPLIER_UNBIND <============'
+
+* The initial state of a device link is automatically determined by
+ :c:func:`device_link_add()` based on the driver presence on the supplier
+ and consumer. If the link is created before any devices are probed, it
+ is set to ``DL_STATE_DORMANT``.
+
+* When a supplier device is bound to a driver, links to its consumers
+ progress to ``DL_STATE_AVAILABLE``.
+ (Call to :c:func:`device_links_driver_bound()` from
+ :c:func:`driver_bound()`.)
+
+* Before a consumer device is probed, presence of supplier drivers is
+ verified by checking that links to suppliers are in ``DL_STATE_AVAILABLE``
+ state. The state of the links is updated to ``DL_STATE_CONSUMER_PROBE``.
+ (Call to :c:func:`device_links_check_suppliers()` from
+ :c:func:`really_probe()`.)
+ This prevents the supplier from unbinding.
+ (Call to :c:func:`wait_for_device_probe()` from
+ :c:func:`device_links_unbind_consumers()`.)
+
+* If the probe fails, links to suppliers revert back to ``DL_STATE_AVAILABLE``.
+ (Call to :c:func:`device_links_no_driver()` from :c:func:`really_probe()`.)
+
+* If the probe succeeds, links to suppliers progress to ``DL_STATE_ACTIVE``.
+ (Call to :c:func:`device_links_driver_bound()` from :c:func:`driver_bound()`.)
+
+* When the consumer's driver is later on removed, links to suppliers revert
+ back to ``DL_STATE_AVAILABLE``.
+ (Call to :c:func:`__device_links_no_driver()` from
+ :c:func:`device_links_driver_cleanup()`, which in turn is called from
+ :c:func:`__device_release_driver()`.)
+
+* Before a supplier's driver is removed, links to consumers that are not
+ bound to a driver are updated to ``DL_STATE_SUPPLIER_UNBIND``.
+ (Call to :c:func:`device_links_busy()` from
+ :c:func:`__device_release_driver()`.)
+ This prevents the consumers from binding.
+ (Call to :c:func:`device_links_check_suppliers()` from
+ :c:func:`really_probe()`.)
+ Consumers that are bound are freed from their driver; consumers that are
+ probing are waited for until they are done.
+ (Call to :c:func:`device_links_unbind_consumers()` from
+ :c:func:`__device_release_driver()`.)
+ Once all links to consumers are in ``DL_STATE_SUPPLIER_UNBIND`` state,
+ the supplier driver is released and the links revert to ``DL_STATE_DORMANT``.
+ (Call to :c:func:`device_links_driver_cleanup()` from
+ :c:func:`__device_release_driver()`.)
+
+API
+===
+
+.. kernel-doc:: drivers/base/core.c
+ :functions: device_link_add device_link_del
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
new file mode 100644
index 000000000000..a9b457a4b949
--- /dev/null
+++ b/Documentation/driver-api/dma-buf.rst
@@ -0,0 +1,73 @@
+Buffer Sharing and Synchronization
+==================================
+
+The dma-buf subsystem provides the framework for sharing buffers for
+hardware (DMA) access across multiple device drivers and subsystems, and
+for synchronizing asynchronous hardware access.
+
+This is used, for example, by drm "prime" multi-GPU support, but is of
+course not limited to GPU use cases.
+
+The three main components of this are: (1) dma-buf, representing a
+sg_table and exposed to userspace as a file descriptor to allow passing
+between devices, (2) fence, which provides a mechanism to signal when
+one device as finished access, and (3) reservation, which manages the
+shared or exclusive fence(s) associated with the buffer.
+
+Shared DMA Buffers
+------------------
+
+.. kernel-doc:: drivers/dma-buf/dma-buf.c
+ :export:
+
+.. kernel-doc:: include/linux/dma-buf.h
+ :internal:
+
+Reservation Objects
+-------------------
+
+.. kernel-doc:: drivers/dma-buf/reservation.c
+ :doc: Reservation Object Overview
+
+.. kernel-doc:: drivers/dma-buf/reservation.c
+ :export:
+
+.. kernel-doc:: include/linux/reservation.h
+ :internal:
+
+DMA Fences
+----------
+
+.. kernel-doc:: drivers/dma-buf/dma-fence.c
+ :export:
+
+.. kernel-doc:: include/linux/dma-fence.h
+ :internal:
+
+Seqno Hardware Fences
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/seqno-fence.c
+ :export:
+
+.. kernel-doc:: include/linux/seqno-fence.h
+ :internal:
+
+DMA Fence Array
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/dma-fence-array.c
+ :export:
+
+.. kernel-doc:: include/linux/dma-fence-array.h
+ :internal:
+
+DMA Fence uABI/Sync File
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/sync_file.c
+ :export:
+
+.. kernel-doc:: include/linux/sync_file.h
+ :internal:
+
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 8e259c5d0322..a528178a54a5 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -16,11 +16,23 @@ available subsections can be seen below.
basics
infrastructure
+ dma-buf
+ device_link
message-based
sound
frame-buffer
input
+ usb
spi
i2c
hsi
miscellaneous
+ vme
+ 80211/index
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
index 5d50d6733db3..0bb0b5fc9512 100644
--- a/Documentation/driver-api/infrastructure.rst
+++ b/Documentation/driver-api/infrastructure.rst
@@ -46,76 +46,6 @@ Device Drivers Base
.. kernel-doc:: drivers/base/bus.c
:export:
-Buffer Sharing and Synchronization
-----------------------------------
-
-The dma-buf subsystem provides the framework for sharing buffers for
-hardware (DMA) access across multiple device drivers and subsystems, and
-for synchronizing asynchronous hardware access.
-
-This is used, for example, by drm "prime" multi-GPU support, but is of
-course not limited to GPU use cases.
-
-The three main components of this are: (1) dma-buf, representing a
-sg_table and exposed to userspace as a file descriptor to allow passing
-between devices, (2) fence, which provides a mechanism to signal when
-one device as finished access, and (3) reservation, which manages the
-shared or exclusive fence(s) associated with the buffer.
-
-dma-buf
-~~~~~~~
-
-.. kernel-doc:: drivers/dma-buf/dma-buf.c
- :export:
-
-.. kernel-doc:: include/linux/dma-buf.h
- :internal:
-
-reservation
-~~~~~~~~~~~
-
-.. kernel-doc:: drivers/dma-buf/reservation.c
- :doc: Reservation Object Overview
-
-.. kernel-doc:: drivers/dma-buf/reservation.c
- :export:
-
-.. kernel-doc:: include/linux/reservation.h
- :internal:
-
-fence
-~~~~~
-
-.. kernel-doc:: drivers/dma-buf/fence.c
- :export:
-
-.. kernel-doc:: include/linux/fence.h
- :internal:
-
-.. kernel-doc:: drivers/dma-buf/seqno-fence.c
- :export:
-
-.. kernel-doc:: include/linux/seqno-fence.h
- :internal:
-
-.. kernel-doc:: drivers/dma-buf/fence-array.c
- :export:
-
-.. kernel-doc:: include/linux/fence-array.h
- :internal:
-
-.. kernel-doc:: drivers/dma-buf/reservation.c
- :export:
-
-.. kernel-doc:: include/linux/reservation.h
- :internal:
-
-.. kernel-doc:: drivers/dma-buf/sync_file.c
- :export:
-
-.. kernel-doc:: include/linux/sync_file.h
- :internal:
-
Device Drivers DMA Management
-----------------------------
diff --git a/Documentation/driver-api/usb.rst b/Documentation/driver-api/usb.rst
new file mode 100644
index 000000000000..851cc40b66b5
--- /dev/null
+++ b/Documentation/driver-api/usb.rst
@@ -0,0 +1,748 @@
+===========================
+The Linux-USB Host Side API
+===========================
+
+Introduction to USB on Linux
+============================
+
+A Universal Serial Bus (USB) is used to connect a host, such as a PC or
+workstation, to a number of peripheral devices. USB uses a tree
+structure, with the host as the root (the system's master), hubs as
+interior nodes, and peripherals as leaves (and slaves). Modern PCs
+support several such trees of USB devices, usually
+a few USB 3.0 (5 GBit/s) or USB 3.1 (10 GBit/s) and some legacy
+USB 2.0 (480 MBit/s) busses just in case.
+
+That master/slave asymmetry was designed-in for a number of reasons, one
+being ease of use. It is not physically possible to mistake upstream and
+downstream or it does not matter with a type C plug (or they are built into the
+peripheral). Also, the host software doesn't need to deal with
+distributed auto-configuration since the pre-designated master node
+manages all that.
+
+Kernel developers added USB support to Linux early in the 2.2 kernel
+series and have been developing it further since then. Besides support
+for each new generation of USB, various host controllers gained support,
+new drivers for peripherals have been added and advanced features for latency
+measurement and improved power management introduced.
+
+Linux can run inside USB devices as well as on the hosts that control
+the devices. But USB device drivers running inside those peripherals
+don't do the same things as the ones running inside hosts, so they've
+been given a different name: *gadget drivers*. This document does not
+cover gadget drivers.
+
+USB Host-Side API Model
+=======================
+
+Host-side drivers for USB devices talk to the "usbcore" APIs. There are
+two. One is intended for *general-purpose* drivers (exposed through
+driver frameworks), and the other is for drivers that are *part of the
+core*. Such core drivers include the *hub* driver (which manages trees
+of USB devices) and several different kinds of *host controller
+drivers*, which control individual busses.
+
+The device model seen by USB drivers is relatively complex.
+
+- USB supports four kinds of data transfers (control, bulk, interrupt,
+ and isochronous). Two of them (control and bulk) use bandwidth as
+ it's available, while the other two (interrupt and isochronous) are
+ scheduled to provide guaranteed bandwidth.
+
+- The device description model includes one or more "configurations"
+ per device, only one of which is active at a time. Devices are supposed
+ to be capable of operating at lower than their top
+ speeds and may provide a BOS descriptor showing the lowest speed they
+ remain fully operational at.
+
+- From USB 3.0 on configurations have one or more "functions", which
+ provide a common functionality and are grouped together for purposes
+ of power management.
+
+- Configurations or functions have one or more "interfaces", each of which may have
+ "alternate settings". Interfaces may be standardized by USB "Class"
+ specifications, or may be specific to a vendor or device.
+
+ USB device drivers actually bind to interfaces, not devices. Think of
+ them as "interface drivers", though you may not see many devices
+ where the distinction is important. *Most USB devices are simple,
+ with only one function, one configuration, one interface, and one alternate
+ setting.*
+
+- Interfaces have one or more "endpoints", each of which supports one
+ type and direction of data transfer such as "bulk out" or "interrupt
+ in". The entire configuration may have up to sixteen endpoints in
+ each direction, allocated as needed among all the interfaces.
+
+- Data transfer on USB is packetized; each endpoint has a maximum
+ packet size. Drivers must often be aware of conventions such as
+ flagging the end of bulk transfers using "short" (including zero
+ length) packets.
+
+- The Linux USB API supports synchronous calls for control and bulk
+ messages. It also supports asynchronous calls for all kinds of data
+ transfer, using request structures called "URBs" (USB Request
+ Blocks).
+
+Accordingly, the USB Core API exposed to device drivers covers quite a
+lot of territory. You'll probably need to consult the USB 3.0
+specification, available online from www.usb.org at no cost, as well as
+class or device specifications.
+
+The only host-side drivers that actually touch hardware (reading/writing
+registers, handling IRQs, and so on) are the HCDs. In theory, all HCDs
+provide the same functionality through the same API. In practice, that's
+becoming more true, but there are still differences
+that crop up especially with fault handling on the less common controllers.
+Different controllers don't
+necessarily report the same aspects of failures, and recovery from
+faults (including software-induced ones like unlinking an URB) isn't yet
+fully consistent. Device driver authors should make a point of doing
+disconnect testing (while the device is active) with each different host
+controller driver, to make sure drivers don't have bugs of their own as
+well as to make sure they aren't relying on some HCD-specific behavior.
+
+USB-Standard Types
+==================
+
+In ``<linux/usb/ch9.h>`` you will find the USB data types defined in
+chapter 9 of the USB specification. These data types are used throughout
+USB, and in APIs including this host side API, gadget APIs, and usbfs.
+
+.. kernel-doc:: include/linux/usb/ch9.h
+ :internal:
+
+Host-Side Data Types and Macros
+===============================
+
+The host side API exposes several layers to drivers, some of which are
+more necessary than others. These support lifecycle models for host side
+drivers and devices, and support passing buffers through usbcore to some
+HCD that performs the I/O for the device driver.
+
+.. kernel-doc:: include/linux/usb.h
+ :internal:
+
+USB Core APIs
+=============
+
+There are two basic I/O models in the USB API. The most elemental one is
+asynchronous: drivers submit requests in the form of an URB, and the
+URB's completion callback handles the next step. All USB transfer types
+support that model, although there are special cases for control URBs
+(which always have setup and status stages, but may not have a data
+stage) and isochronous URBs (which allow large packets and include
+per-packet fault reports). Built on top of that is synchronous API
+support, where a driver calls a routine that allocates one or more URBs,
+submits them, and waits until they complete. There are synchronous
+wrappers for single-buffer control and bulk transfers (which are awkward
+to use in some driver disconnect scenarios), and for scatterlist based
+streaming i/o (bulk or interrupt).
+
+USB drivers need to provide buffers that can be used for DMA, although
+they don't necessarily need to provide the DMA mapping themselves. There
+are APIs to use used when allocating DMA buffers, which can prevent use
+of bounce buffers on some systems. In some cases, drivers may be able to
+rely on 64bit DMA to eliminate another kind of bounce buffer.
+
+.. kernel-doc:: drivers/usb/core/urb.c
+ :export:
+
+.. kernel-doc:: drivers/usb/core/message.c
+ :export:
+
+.. kernel-doc:: drivers/usb/core/file.c
+ :export:
+
+.. kernel-doc:: drivers/usb/core/driver.c
+ :export:
+
+.. kernel-doc:: drivers/usb/core/usb.c
+ :export:
+
+.. kernel-doc:: drivers/usb/core/hub.c
+ :export:
+
+Host Controller APIs
+====================
+
+These APIs are only for use by host controller drivers, most of which
+implement standard register interfaces such as XHCI, EHCI, OHCI, or UHCI. UHCI
+was one of the first interfaces, designed by Intel and also used by VIA;
+it doesn't do much in hardware. OHCI was designed later, to have the
+hardware do more work (bigger transfers, tracking protocol state, and so
+on). EHCI was designed with USB 2.0; its design has features that
+resemble OHCI (hardware does much more work) as well as UHCI (some parts
+of ISO support, TD list processing). XHCI was designed with USB 3.0. It
+continues to shift support for functionality into hardware.
+
+There are host controllers other than the "big three", although most PCI
+based controllers (and a few non-PCI based ones) use one of those
+interfaces. Not all host controllers use DMA; some use PIO, and there is
+also a simulator and a virtual host controller to pipe USB over the network.
+
+The same basic APIs are available to drivers for all those controllers.
+For historical reasons they are in two layers: :c:type:`struct
+usb_bus <usb_bus>` is a rather thin layer that became available
+in the 2.2 kernels, while :c:type:`struct usb_hcd <usb_hcd>`
+is a more featureful layer
+that lets HCDs share common code, to shrink driver size and
+significantly reduce hcd-specific behaviors.
+
+.. kernel-doc:: drivers/usb/core/hcd.c
+ :export:
+
+.. kernel-doc:: drivers/usb/core/hcd-pci.c
+ :export:
+
+.. kernel-doc:: drivers/usb/core/buffer.c
+ :internal:
+
+The USB Filesystem (usbfs)
+==========================
+
+This chapter presents the Linux *usbfs*. You may prefer to avoid writing
+new kernel code for your USB driver; that's the problem that usbfs set
+out to solve. User mode device drivers are usually packaged as
+applications or libraries, and may use usbfs through some programming
+library that wraps it. Such libraries include
+`libusb <http://libusb.sourceforge.net>`__ for C/C++, and
+`jUSB <http://jUSB.sourceforge.net>`__ for Java.
+
+ **Note**
+
+ This particular documentation is incomplete, especially with respect
+ to the asynchronous mode. As of kernel 2.5.66 the code and this
+ (new) documentation need to be cross-reviewed.
+
+Configure usbfs into Linux kernels by enabling the *USB filesystem*
+option (CONFIG_USB_DEVICEFS), and you get basic support for user mode
+USB device drivers. Until relatively recently it was often (confusingly)
+called *usbdevfs* although it wasn't solving what *devfs* was. Every USB
+device will appear in usbfs, regardless of whether or not it has a
+kernel driver.
+
+What files are in "usbfs"?
+--------------------------
+
+Conventionally mounted at ``/proc/bus/usb``, usbfs features include:
+
+- ``/proc/bus/usb/devices`` ... a text file showing each of the USB
+ devices on known to the kernel, and their configuration descriptors.
+ You can also poll() this to learn about new devices.
+
+- ``/proc/bus/usb/BBB/DDD`` ... magic files exposing the each device's
+ configuration descriptors, and supporting a series of ioctls for
+ making device requests, including I/O to devices. (Purely for access
+ by programs.)
+
+Each bus is given a number (BBB) based on when it was enumerated; within
+each bus, each device is given a similar number (DDD). Those BBB/DDD
+paths are not "stable" identifiers; expect them to change even if you
+always leave the devices plugged in to the same hub port. *Don't even
+think of saving these in application configuration files.* Stable
+identifiers are available, for user mode applications that want to use
+them. HID and networking devices expose these stable IDs, so that for
+example you can be sure that you told the right UPS to power down its
+second server. "usbfs" doesn't (yet) expose those IDs.
+
+Mounting and Access Control
+---------------------------
+
+There are a number of mount options for usbfs, which will be of most
+interest to you if you need to override the default access control
+policy. That policy is that only root may read or write device files
+(``/proc/bus/BBB/DDD``) although anyone may read the ``devices`` or
+``drivers`` files. I/O requests to the device also need the
+CAP_SYS_RAWIO capability,
+
+The significance of that is that by default, all user mode device
+drivers need super-user privileges. You can change modes or ownership in
+a driver setup when the device hotplugs, or maye just start the driver
+right then, as a privileged server (or some activity within one). That's
+the most secure approach for multi-user systems, but for single user
+systems ("trusted" by that user) it's more convenient just to grant
+everyone all access (using the *devmode=0666* option) so the driver can
+start whenever it's needed.
+
+The mount options for usbfs, usable in /etc/fstab or in command line
+invocations of *mount*, are:
+
+*busgid*\ =NNNNN
+ Controls the GID used for the /proc/bus/usb/BBB directories.
+ (Default: 0)
+
+*busmode*\ =MMM
+ Controls the file mode used for the /proc/bus/usb/BBB directories.
+ (Default: 0555)
+
+*busuid*\ =NNNNN
+ Controls the UID used for the /proc/bus/usb/BBB directories.
+ (Default: 0)
+
+*devgid*\ =NNNNN
+ Controls the GID used for the /proc/bus/usb/BBB/DDD files. (Default:
+ 0)
+
+*devmode*\ =MMM
+ Controls the file mode used for the /proc/bus/usb/BBB/DDD files.
+ (Default: 0644)
+
+*devuid*\ =NNNNN
+ Controls the UID used for the /proc/bus/usb/BBB/DDD files. (Default:
+ 0)
+
+*listgid*\ =NNNNN
+ Controls the GID used for the /proc/bus/usb/devices and drivers
+ files. (Default: 0)
+
+*listmode*\ =MMM
+ Controls the file mode used for the /proc/bus/usb/devices and
+ drivers files. (Default: 0444)
+
+*listuid*\ =NNNNN
+ Controls the UID used for the /proc/bus/usb/devices and drivers
+ files. (Default: 0)
+
+Note that many Linux distributions hard-wire the mount options for usbfs
+in their init scripts, such as ``/etc/rc.d/rc.sysinit``, rather than
+making it easy to set this per-system policy in ``/etc/fstab``.
+
+/proc/bus/usb/devices
+---------------------
+
+This file is handy for status viewing tools in user mode, which can scan
+the text format and ignore most of it. More detailed device status
+(including class and vendor status) is available from device-specific
+files. For information about the current format of this file, see the
+``Documentation/usb/proc_usb_info.txt`` file in your Linux kernel
+sources.
+
+This file, in combination with the poll() system call, can also be used
+to detect when devices are added or removed:
+
+::
+
+ int fd;
+ struct pollfd pfd;
+
+ fd = open("/proc/bus/usb/devices", O_RDONLY);
+ pfd = { fd, POLLIN, 0 };
+ for (;;) {
+ /* The first time through, this call will return immediately. */
+ poll(&pfd, 1, -1);
+
+ /* To see what's changed, compare the file's previous and current
+ contents or scan the filesystem. (Scanning is more precise.) */
+ }
+
+Note that this behavior is intended to be used for informational and
+debug purposes. It would be more appropriate to use programs such as
+udev or HAL to initialize a device or start a user-mode helper program,
+for instance.
+
+/proc/bus/usb/BBB/DDD
+---------------------
+
+Use these files in one of these basic ways:
+
+*They can be read,* producing first the device descriptor (18 bytes) and
+then the descriptors for the current configuration. See the USB 2.0 spec
+for details about those binary data formats. You'll need to convert most
+multibyte values from little endian format to your native host byte
+order, although a few of the fields in the device descriptor (both of
+the BCD-encoded fields, and the vendor and product IDs) will be
+byteswapped for you. Note that configuration descriptors include
+descriptors for interfaces, altsettings, endpoints, and maybe additional
+class descriptors.
+
+*Perform USB operations* using *ioctl()* requests to make endpoint I/O
+requests (synchronously or asynchronously) or manage the device. These
+requests need the CAP_SYS_RAWIO capability, as well as filesystem
+access permissions. Only one ioctl request can be made on one of these
+device files at a time. This means that if you are synchronously reading
+an endpoint from one thread, you won't be able to write to a different
+endpoint from another thread until the read completes. This works for
+*half duplex* protocols, but otherwise you'd use asynchronous i/o
+requests.
+
+Life Cycle of User Mode Drivers
+-------------------------------
+
+Such a driver first needs to find a device file for a device it knows
+how to handle. Maybe it was told about it because a ``/sbin/hotplug``
+event handling agent chose that driver to handle the new device. Or
+maybe it's an application that scans all the /proc/bus/usb device files,
+and ignores most devices. In either case, it should :c:func:`read()`
+all the descriptors from the device file, and check them against what it
+knows how to handle. It might just reject everything except a particular
+vendor and product ID, or need a more complex policy.
+
+Never assume there will only be one such device on the system at a time!
+If your code can't handle more than one device at a time, at least
+detect when there's more than one, and have your users choose which
+device to use.
+
+Once your user mode driver knows what device to use, it interacts with
+it in either of two styles. The simple style is to make only control
+requests; some devices don't need more complex interactions than those.
+(An example might be software using vendor-specific control requests for
+some initialization or configuration tasks, with a kernel driver for the
+rest.)
+
+More likely, you need a more complex style driver: one using non-control
+endpoints, reading or writing data and claiming exclusive use of an
+interface. *Bulk* transfers are easiest to use, but only their sibling
+*interrupt* transfers work with low speed devices. Both interrupt and
+*isochronous* transfers offer service guarantees because their bandwidth
+is reserved. Such "periodic" transfers are awkward to use through usbfs,
+unless you're using the asynchronous calls. However, interrupt transfers
+can also be used in a synchronous "one shot" style.
+
+Your user-mode driver should never need to worry about cleaning up
+request state when the device is disconnected, although it should close
+its open file descriptors as soon as it starts seeing the ENODEV errors.
+
+The ioctl() Requests
+--------------------
+
+To use these ioctls, you need to include the following headers in your
+userspace program:
+
+::
+
+ #include <linux/usb.h>
+ #include <linux/usbdevice_fs.h>
+ #include <asm/byteorder.h>
+
+The standard USB device model requests, from "Chapter 9" of the USB 2.0
+specification, are automatically included from the ``<linux/usb/ch9.h>``
+header.
+
+Unless noted otherwise, the ioctl requests described here will update
+the modification time on the usbfs file to which they are applied
+(unless they fail). A return of zero indicates success; otherwise, a
+standard USB error code is returned. (These are documented in
+``Documentation/usb/error-codes.txt`` in your kernel sources.)
+
+Each of these files multiplexes access to several I/O streams, one per
+endpoint. Each device has one control endpoint (endpoint zero) which
+supports a limited RPC style RPC access. Devices are configured by
+hub_wq (in the kernel) setting a device-wide *configuration* that
+affects things like power consumption and basic functionality. The
+endpoints are part of USB *interfaces*, which may have *altsettings*
+affecting things like which endpoints are available. Many devices only
+have a single configuration and interface, so drivers for them will
+ignore configurations and altsettings.
+
+Management/Status Requests
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A number of usbfs requests don't deal very directly with device I/O.
+They mostly relate to device management and status. These are all
+synchronous requests.
+
+USBDEVFS_CLAIMINTERFACE
+ This is used to force usbfs to claim a specific interface, which has
+ not previously been claimed by usbfs or any other kernel driver. The
+ ioctl parameter is an integer holding the number of the interface
+ (bInterfaceNumber from descriptor).
+
+ Note that if your driver doesn't claim an interface before trying to
+ use one of its endpoints, and no other driver has bound to it, then
+ the interface is automatically claimed by usbfs.
+
+ This claim will be released by a RELEASEINTERFACE ioctl, or by
+ closing the file descriptor. File modification time is not updated
+ by this request.
+
+USBDEVFS_CONNECTINFO
+ Says whether the device is lowspeed. The ioctl parameter points to a
+ structure like this:
+
+ ::
+
+ struct usbdevfs_connectinfo {
+ unsigned int devnum;
+ unsigned char slow;
+ };
+
+ File modification time is not updated by this request.
+
+ *You can't tell whether a "not slow" device is connected at high
+ speed (480 MBit/sec) or just full speed (12 MBit/sec).* You should
+ know the devnum value already, it's the DDD value of the device file
+ name.
+
+USBDEVFS_GETDRIVER
+ Returns the name of the kernel driver bound to a given interface (a
+ string). Parameter is a pointer to this structure, which is
+ modified:
+
+ ::
+
+ struct usbdevfs_getdriver {
+ unsigned int interface;
+ char driver[USBDEVFS_MAXDRIVERNAME + 1];
+ };
+
+ File modification time is not updated by this request.
+
+USBDEVFS_IOCTL
+ Passes a request from userspace through to a kernel driver that has
+ an ioctl entry in the *struct usb_driver* it registered.
+
+ ::
+
+ struct usbdevfs_ioctl {
+ int ifno;
+ int ioctl_code;
+ void *data;
+ };
+
+ /* user mode call looks like this.
+ * 'request' becomes the driver->ioctl() 'code' parameter.
+ * the size of 'param' is encoded in 'request', and that data
+ * is copied to or from the driver->ioctl() 'buf' parameter.
+ */
+ static int
+ usbdev_ioctl (int fd, int ifno, unsigned request, void *param)
+ {
+ struct usbdevfs_ioctl wrapper;
+
+ wrapper.ifno = ifno;
+ wrapper.ioctl_code = request;
+ wrapper.data = param;
+
+ return ioctl (fd, USBDEVFS_IOCTL, &wrapper);
+ }
+
+ File modification time is not updated by this request.
+
+ This request lets kernel drivers talk to user mode code through
+ filesystem operations even when they don't create a character or
+ block special device. It's also been used to do things like ask
+ devices what device special file should be used. Two pre-defined
+ ioctls are used to disconnect and reconnect kernel drivers, so that
+ user mode code can completely manage binding and configuration of
+ devices.
+
+USBDEVFS_RELEASEINTERFACE
+ This is used to release the claim usbfs made on interface, either
+ implicitly or because of a USBDEVFS_CLAIMINTERFACE call, before the
+ file descriptor is closed. The ioctl parameter is an integer holding
+ the number of the interface (bInterfaceNumber from descriptor); File
+ modification time is not updated by this request.
+
+ **Warning**
+
+ *No security check is made to ensure that the task which made
+ the claim is the one which is releasing it. This means that user
+ mode driver may interfere other ones.*
+
+USBDEVFS_RESETEP
+ Resets the data toggle value for an endpoint (bulk or interrupt) to
+ DATA0. The ioctl parameter is an integer endpoint number (1 to 15,
+ as identified in the endpoint descriptor), with USB_DIR_IN added
+ if the device's endpoint sends data to the host.
+
+ **Warning**
+
+ *Avoid using this request. It should probably be removed.* Using
+ it typically means the device and driver will lose toggle
+ synchronization. If you really lost synchronization, you likely
+ need to completely handshake with the device, using a request
+ like CLEAR_HALT or SET_INTERFACE.
+
+USBDEVFS_DROP_PRIVILEGES
+ This is used to relinquish the ability to do certain operations
+ which are considered to be privileged on a usbfs file descriptor.
+ This includes claiming arbitrary interfaces, resetting a device on
+ which there are currently claimed interfaces from other users, and
+ issuing USBDEVFS_IOCTL calls. The ioctl parameter is a 32 bit mask
+ of interfaces the user is allowed to claim on this file descriptor.
+ You may issue this ioctl more than one time to narrow said mask.
+
+Synchronous I/O Support
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Synchronous requests involve the kernel blocking until the user mode
+request completes, either by finishing successfully or by reporting an
+error. In most cases this is the simplest way to use usbfs, although as
+noted above it does prevent performing I/O to more than one endpoint at
+a time.
+
+USBDEVFS_BULK
+ Issues a bulk read or write request to the device. The ioctl
+ parameter is a pointer to this structure:
+
+ ::
+
+ struct usbdevfs_bulktransfer {
+ unsigned int ep;
+ unsigned int len;
+ unsigned int timeout; /* in milliseconds */
+ void *data;
+ };
+
+ The "ep" value identifies a bulk endpoint number (1 to 15, as
+ identified in an endpoint descriptor), masked with USB_DIR_IN when
+ referring to an endpoint which sends data to the host from the
+ device. The length of the data buffer is identified by "len"; Recent
+ kernels support requests up to about 128KBytes. *FIXME say how read
+ length is returned, and how short reads are handled.*.
+
+USBDEVFS_CLEAR_HALT
+ Clears endpoint halt (stall) and resets the endpoint toggle. This is
+ only meaningful for bulk or interrupt endpoints. The ioctl parameter
+ is an integer endpoint number (1 to 15, as identified in an endpoint
+ descriptor), masked with USB_DIR_IN when referring to an endpoint
+ which sends data to the host from the device.
+
+ Use this on bulk or interrupt endpoints which have stalled,
+ returning *-EPIPE* status to a data transfer request. Do not issue
+ the control request directly, since that could invalidate the host's
+ record of the data toggle.
+
+USBDEVFS_CONTROL
+ Issues a control request to the device. The ioctl parameter points
+ to a structure like this:
+
+ ::
+
+ struct usbdevfs_ctrltransfer {
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
+ __u32 timeout; /* in milliseconds */
+ void *data;
+ };
+
+ The first eight bytes of this structure are the contents of the
+ SETUP packet to be sent to the device; see the USB 2.0 specification
+ for details. The bRequestType value is composed by combining a
+ USB_TYPE_\* value, a USB_DIR_\* value, and a USB_RECIP_\*
+ value (from *<linux/usb.h>*). If wLength is nonzero, it describes
+ the length of the data buffer, which is either written to the device
+ (USB_DIR_OUT) or read from the device (USB_DIR_IN).
+
+ At this writing, you can't transfer more than 4 KBytes of data to or
+ from a device; usbfs has a limit, and some host controller drivers
+ have a limit. (That's not usually a problem.) *Also* there's no way
+ to say it's not OK to get a short read back from the device.
+
+USBDEVFS_RESET
+ Does a USB level device reset. The ioctl parameter is ignored. After
+ the reset, this rebinds all device interfaces. File modification
+ time is not updated by this request.
+
+ **Warning**
+
+ *Avoid using this call* until some usbcore bugs get fixed, since
+ it does not fully synchronize device, interface, and driver (not
+ just usbfs) state.
+
+USBDEVFS_SETINTERFACE
+ Sets the alternate setting for an interface. The ioctl parameter is
+ a pointer to a structure like this:
+
+ ::
+
+ struct usbdevfs_setinterface {
+ unsigned int interface;
+ unsigned int altsetting;
+ };
+
+ File modification time is not updated by this request.
+
+ Those struct members are from some interface descriptor applying to
+ the current configuration. The interface number is the
+ bInterfaceNumber value, and the altsetting number is the
+ bAlternateSetting value. (This resets each endpoint in the
+ interface.)
+
+USBDEVFS_SETCONFIGURATION
+ Issues the :c:func:`usb_set_configuration()` call for the
+ device. The parameter is an integer holding the number of a
+ configuration (bConfigurationValue from descriptor). File
+ modification time is not updated by this request.
+
+ **Warning**
+
+ *Avoid using this call* until some usbcore bugs get fixed, since
+ it does not fully synchronize device, interface, and driver (not
+ just usbfs) state.
+
+Asynchronous I/O Support
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+As mentioned above, there are situations where it may be important to
+initiate concurrent operations from user mode code. This is particularly
+important for periodic transfers (interrupt and isochronous), but it can
+be used for other kinds of USB requests too. In such cases, the
+asynchronous requests described here are essential. Rather than
+submitting one request and having the kernel block until it completes,
+the blocking is separate.
+
+These requests are packaged into a structure that resembles the URB used
+by kernel device drivers. (No POSIX Async I/O support here, sorry.) It
+identifies the endpoint type (USBDEVFS_URB_TYPE_\*), endpoint
+(number, masked with USB_DIR_IN as appropriate), buffer and length,
+and a user "context" value serving to uniquely identify each request.
+(It's usually a pointer to per-request data.) Flags can modify requests
+(not as many as supported for kernel drivers).
+
+Each request can specify a realtime signal number (between SIGRTMIN and
+SIGRTMAX, inclusive) to request a signal be sent when the request
+completes.
+
+When usbfs returns these urbs, the status value is updated, and the
+buffer may have been modified. Except for isochronous transfers, the
+actual_length is updated to say how many bytes were transferred; if the
+USBDEVFS_URB_DISABLE_SPD flag is set ("short packets are not OK"), if
+fewer bytes were read than were requested then you get an error report.
+
+::
+
+ struct usbdevfs_iso_packet_desc {
+ unsigned int length;
+ unsigned int actual_length;
+ unsigned int status;
+ };
+
+ struct usbdevfs_urb {
+ unsigned char type;
+ unsigned char endpoint;
+ int status;
+ unsigned int flags;
+ void *buffer;
+ int buffer_length;
+ int actual_length;
+ int start_frame;
+ int number_of_packets;
+ int error_count;
+ unsigned int signr;
+ void *usercontext;
+ struct usbdevfs_iso_packet_desc iso_frame_desc[];
+ };
+
+For these asynchronous requests, the file modification time reflects
+when the request was initiated. This contrasts with their use with the
+synchronous requests, where it reflects when requests complete.
+
+USBDEVFS_DISCARDURB
+ *TBS* File modification time is not updated by this request.
+
+USBDEVFS_DISCSIGNAL
+ *TBS* File modification time is not updated by this request.
+
+USBDEVFS_REAPURB
+ *TBS* File modification time is not updated by this request.
+
+USBDEVFS_REAPURBNDELAY
+ *TBS* File modification time is not updated by this request.
+
+USBDEVFS_SUBMITURB
+ *TBS*
diff --git a/Documentation/vme_api.txt b/Documentation/driver-api/vme.rst
index 90006550f485..89776fb3c8bd 100644
--- a/Documentation/vme_api.txt
+++ b/Documentation/driver-api/vme.rst
@@ -1,13 +1,15 @@
- VME Device Driver API
- =====================
+VME Device Drivers
+==================
Driver registration
-===================
+-------------------
As with other subsystems within the Linux kernel, VME device drivers register
with the VME subsystem, typically called from the devices init routine. This is
achieved via a call to the following function:
+.. code-block:: c
+
int vme_register_driver (struct vme_driver *driver, unsigned int ndevs);
If driver registration is successful this function returns zero, if an error
@@ -17,6 +19,8 @@ A pointer to a structure of type 'vme_driver' must be provided to the
registration function. Along with ndevs, which is the number of devices your
driver is able to support. The structure is as follows:
+.. code-block:: c
+
struct vme_driver {
struct list_head node;
const char *name;
@@ -38,6 +42,8 @@ with the driver. The match function should return 1 if a device should be
probed and 0 otherwise. This example match function (from vme_user.c) limits
the number of devices probed to one:
+.. code-block:: c
+
#define USER_BUS_MAX 1
...
static int vme_user_match(struct vme_dev *vdev)
@@ -51,6 +57,8 @@ The '.probe' element should contain a pointer to the probe routine. The
probe routine is passed a 'struct vme_dev' pointer as an argument. The
'struct vme_dev' structure looks like the following:
+.. code-block:: c
+
struct vme_dev {
int num;
struct vme_bridge *bridge;
@@ -66,11 +74,13 @@ dev->bridge->num.
A function is also provided to unregister the driver from the VME core and is
usually called from the device driver's exit routine:
+.. code-block:: c
+
void vme_unregister_driver (struct vme_driver *driver);
Resource management
-===================
+-------------------
Once a driver has registered with the VME core the provided match routine will
be called the number of times specified during the registration. If a match
@@ -86,6 +96,8 @@ specific window or DMA channel (which may be used by a different driver) this
driver allows a resource to be assigned based on the required attributes of the
driver in question:
+.. code-block:: c
+
struct vme_resource * vme_master_request(struct vme_dev *dev,
u32 aspace, u32 cycle, u32 width);
@@ -112,6 +124,8 @@ Functions are also provided to free window allocations once they are no longer
required. These functions should be passed the pointer to the resource provided
during resource allocation:
+.. code-block:: c
+
void vme_master_free(struct vme_resource *res);
void vme_slave_free(struct vme_resource *res);
@@ -120,7 +134,7 @@ during resource allocation:
Master windows
-==============
+--------------
Master windows provide access from the local processor[s] out onto the VME bus.
The number of windows available and the available access modes is dependent on
@@ -128,11 +142,13 @@ the underlying chipset. A window must be configured before it can be used.
Master window configuration
----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
Once a master window has been assigned the following functions can be used to
configure it and retrieve the current settings:
+.. code-block:: c
+
int vme_master_set (struct vme_resource *res, int enabled,
unsigned long long base, unsigned long long size, u32 aspace,
u32 cycle, u32 width);
@@ -149,11 +165,13 @@ These functions return 0 on success or an error code should the call fail.
Master window access
---------------------
+~~~~~~~~~~~~~~~~~~~~
The following functions can be used to read from and write to configured master
windows. These functions return the number of bytes copied:
+.. code-block:: c
+
ssize_t vme_master_read(struct vme_resource *res, void *buf,
size_t count, loff_t offset);
@@ -164,6 +182,8 @@ In addition to simple reads and writes, a function is provided to do a
read-modify-write transaction. This function returns the original value of the
VME bus location :
+.. code-block:: c
+
unsigned int vme_master_rmw (struct vme_resource *res,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset);
@@ -175,12 +195,14 @@ the value of swap is written the specified offset.
Parts of a VME window can be mapped into user space memory using the following
function:
+.. code-block:: c
+
int vme_master_mmap(struct vme_resource *resource,
struct vm_area_struct *vma)
Slave windows
-=============
+-------------
Slave windows provide devices on the VME bus access into mapped portions of the
local memory. The number of windows available and the access modes that can be
@@ -189,11 +211,13 @@ it can be used.
Slave window configuration
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
Once a slave window has been assigned the following functions can be used to
configure it and retrieve the current settings:
+.. code-block:: c
+
int vme_slave_set (struct vme_resource *res, int enabled,
unsigned long long base, unsigned long long size,
dma_addr_t mem, u32 aspace, u32 cycle);
@@ -210,13 +234,15 @@ These functions return 0 on success or an error code should the call fail.
Slave window buffer allocation
-------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Functions are provided to allow the user to allocate and free a contiguous
buffers which will be accessible by the VME bridge. These functions do not have
to be used, other methods can be used to allocate a buffer, though care must be
taken to ensure that they are contiguous and accessible by the VME bridge:
+.. code-block:: c
+
void * vme_alloc_consistent(struct vme_resource *res, size_t size,
dma_addr_t *mem);
@@ -225,14 +251,14 @@ taken to ensure that they are contiguous and accessible by the VME bridge:
Slave window access
--------------------
+~~~~~~~~~~~~~~~~~~~
Slave windows map local memory onto the VME bus, the standard methods for
accessing memory should be used.
DMA channels
-============
+------------
The VME DMA transfer provides the ability to run link-list DMA transfers. The
API introduces the concept of DMA lists. Each DMA list is a link-list which can
@@ -241,29 +267,35 @@ executed, reused and destroyed.
List Management
----------------
+~~~~~~~~~~~~~~~
The following functions are provided to create and destroy DMA lists. Execution
of a list will not automatically destroy the list, thus enabling a list to be
reused for repetitive tasks:
+.. code-block:: c
+
struct vme_dma_list *vme_new_dma_list(struct vme_resource *res);
int vme_dma_list_free(struct vme_dma_list *list);
List Population
----------------
+~~~~~~~~~~~~~~~
An item can be added to a list using the following function ( the source and
destination attributes need to be created before calling this function, this is
covered under "Transfer Attributes"):
+.. code-block:: c
+
int vme_dma_list_add(struct vme_dma_list *list,
struct vme_dma_attr *src, struct vme_dma_attr *dest,
size_t count);
-NOTE: The detailed attributes of the transfers source and destination
+.. note::
+
+ The detailed attributes of the transfers source and destination
are not checked until an entry is added to a DMA list, the request
for a DMA channel purely checks the directions in which the
controller is expected to transfer data. As a result it is
@@ -271,7 +303,7 @@ NOTE: The detailed attributes of the transfers source and destination
source or destination is in an unsupported VME address space.
Transfer Attributes
--------------------
+~~~~~~~~~~~~~~~~~~~
The attributes for the source and destination are handled separately from adding
an item to a list. This is due to the diverse attributes required for each type
@@ -280,33 +312,43 @@ and pattern sources and destinations (where appropriate):
Pattern source:
+.. code-block:: c
+
struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type);
PCI source or destination:
+.. code-block:: c
+
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t mem);
VME source or destination:
+.. code-block:: c
+
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
u32 aspace, u32 cycle, u32 width);
The following function should be used to free an attribute:
+.. code-block:: c
+
void vme_dma_free_attribute(struct vme_dma_attr *attr);
List Execution
---------------
+~~~~~~~~~~~~~~
The following function queues a list for execution. The function will return
once the list has been executed:
+.. code-block:: c
+
int vme_dma_list_exec(struct vme_dma_list *list);
Interrupts
-==========
+----------
The VME API provides functions to attach and detach callbacks to specific VME
level and status ID combinations and for the generation of VME interrupts with
@@ -314,13 +356,15 @@ specific VME level and status IDs.
Attaching Interrupt Handlers
-----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following functions can be used to attach and free a specific VME level and
status ID combination. Any given combination can only be assigned a single
callback function. A void pointer parameter is provided, the value of which is
passed to the callback function, the use of this pointer is user undefined:
+.. code-block:: c
+
int vme_irq_request(struct vme_dev *dev, int level, int statid,
void (*callback)(int, int, void *), void *priv);
@@ -329,31 +373,37 @@ passed to the callback function, the use of this pointer is user undefined:
The callback parameters are as follows. Care must be taken in writing a callback
function, callback functions run in interrupt context:
+.. code-block:: c
+
void callback(int level, int statid, void *priv);
Interrupt Generation
---------------------
+~~~~~~~~~~~~~~~~~~~~
The following function can be used to generate a VME interrupt at a given VME
level and VME status ID:
+.. code-block:: c
+
int vme_irq_generate(struct vme_dev *dev, int level, int statid);
Location monitors
-=================
+-----------------
The VME API provides the following functionality to configure the location
monitor.
Location Monitor Management
----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following functions are provided to request the use of a block of location
monitors and to free them after they are no longer required:
+.. code-block:: c
+
struct vme_resource * vme_lm_request(struct vme_dev *dev);
void vme_lm_free(struct vme_resource * res);
@@ -362,15 +412,19 @@ Each block may provide a number of location monitors, monitoring adjacent
locations. The following function can be used to determine how many locations
are provided:
+.. code-block:: c
+
int vme_lm_count(struct vme_resource * res);
Location Monitor Configuration
-------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Once a bank of location monitors has been allocated, the following functions
are provided to configure the location and mode of the location monitor:
+.. code-block:: c
+
int vme_lm_set(struct vme_resource *res, unsigned long long base,
u32 aspace, u32 cycle);
@@ -379,12 +433,14 @@ are provided to configure the location and mode of the location monitor:
Location Monitor Use
---------------------
+~~~~~~~~~~~~~~~~~~~~
The following functions allow a callback to be attached and detached from each
location monitor location. Each location monitor can monitor a number of
adjacent locations:
+.. code-block:: c
+
int vme_lm_attach(struct vme_resource *res, int num,
void (*callback)(void *));
@@ -392,22 +448,27 @@ adjacent locations:
The callback function is declared as follows.
+.. code-block:: c
+
void callback(void *data);
Slot Detection
-==============
+--------------
This function returns the slot ID of the provided bridge.
+.. code-block:: c
+
int vme_slot_num(struct vme_dev *dev);
Bus Detection
-=============
+-------------
This function returns the bus ID of the provided bridge.
- int vme_bus_num(struct vme_dev *dev);
+.. code-block:: c
+ int vme_bus_num(struct vme_dev *dev);
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 167070895498..ca9d1eb46bc0 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -332,6 +332,10 @@ MEM
MFD
devm_mfd_add_devices()
+PER-CPU MEM
+ devm_alloc_percpu()
+ devm_free_percpu()
+
PCI
pcim_enable_device() : after success, all PCI ops become managed
pcim_pin_device() : keep PCI device enabled after release
diff --git a/Documentation/dynamic-debug-howto.txt b/Documentation/dynamic-debug-howto.txt
deleted file mode 100644
index 9417871b8758..000000000000
--- a/Documentation/dynamic-debug-howto.txt
+++ /dev/null
@@ -1,340 +0,0 @@
-
-Introduction
-============
-
-This document describes how to use the dynamic debug (dyndbg) feature.
-
-Dynamic debug is designed to allow you to dynamically enable/disable
-kernel code to obtain additional kernel information. Currently, if
-CONFIG_DYNAMIC_DEBUG is set, then all pr_debug()/dev_dbg() and
-print_hex_dump_debug()/print_hex_dump_bytes() calls can be dynamically
-enabled per-callsite.
-
-If CONFIG_DYNAMIC_DEBUG is not set, print_hex_dump_debug() is just
-shortcut for print_hex_dump(KERN_DEBUG).
-
-For print_hex_dump_debug()/print_hex_dump_bytes(), format string is
-its 'prefix_str' argument, if it is constant string; or "hexdump"
-in case 'prefix_str' is build dynamically.
-
-Dynamic debug has even more useful features:
-
- * Simple query language allows turning on and off debugging
- statements by matching any combination of 0 or 1 of:
-
- - source filename
- - function name
- - line number (including ranges of line numbers)
- - module name
- - format string
-
- * Provides a debugfs control file: <debugfs>/dynamic_debug/control
- which can be read to display the complete list of known debug
- statements, to help guide you
-
-Controlling dynamic debug Behaviour
-===================================
-
-The behaviour of pr_debug()/dev_dbg()s are controlled via writing to a
-control file in the 'debugfs' filesystem. Thus, you must first mount
-the debugfs filesystem, in order to make use of this feature.
-Subsequently, we refer to the control file as:
-<debugfs>/dynamic_debug/control. For example, if you want to enable
-printing from source file 'svcsock.c', line 1603 you simply do:
-
-nullarbor:~ # echo 'file svcsock.c line 1603 +p' >
- <debugfs>/dynamic_debug/control
-
-If you make a mistake with the syntax, the write will fail thus:
-
-nullarbor:~ # echo 'file svcsock.c wtf 1 +p' >
- <debugfs>/dynamic_debug/control
--bash: echo: write error: Invalid argument
-
-Viewing Dynamic Debug Behaviour
-===========================
-
-You can view the currently configured behaviour of all the debug
-statements via:
-
-nullarbor:~ # cat <debugfs>/dynamic_debug/control
-# filename:lineno [module]function flags format
-/usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svc_rdma.c:323 [svcxprt_rdma]svc_rdma_cleanup =_ "SVCRDMA Module Removed, deregister RPC RDMA transport\012"
-/usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svc_rdma.c:341 [svcxprt_rdma]svc_rdma_init =_ "\011max_inline : %d\012"
-/usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svc_rdma.c:340 [svcxprt_rdma]svc_rdma_init =_ "\011sq_depth : %d\012"
-/usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svc_rdma.c:338 [svcxprt_rdma]svc_rdma_init =_ "\011max_requests : %d\012"
-...
-
-
-You can also apply standard Unix text manipulation filters to this
-data, e.g.
-
-nullarbor:~ # grep -i rdma <debugfs>/dynamic_debug/control | wc -l
-62
-
-nullarbor:~ # grep -i tcp <debugfs>/dynamic_debug/control | wc -l
-42
-
-The third column shows the currently enabled flags for each debug
-statement callsite (see below for definitions of the flags). The
-default value, with no flags enabled, is "=_". So you can view all
-the debug statement callsites with any non-default flags:
-
-nullarbor:~ # awk '$3 != "=_"' <debugfs>/dynamic_debug/control
-# filename:lineno [module]function flags format
-/usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svcsock.c:1603 [sunrpc]svc_send p "svc_process: st_sendto returned %d\012"
-
-
-Command Language Reference
-==========================
-
-At the lexical level, a command comprises a sequence of words separated
-by spaces or tabs. So these are all equivalent:
-
-nullarbor:~ # echo -c 'file svcsock.c line 1603 +p' >
- <debugfs>/dynamic_debug/control
-nullarbor:~ # echo -c ' file svcsock.c line 1603 +p ' >
- <debugfs>/dynamic_debug/control
-nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
- <debugfs>/dynamic_debug/control
-
-Command submissions are bounded by a write() system call.
-Multiple commands can be written together, separated by ';' or '\n'.
-
- ~# echo "func pnpacpi_get_resources +p; func pnp_assign_mem +p" \
- > <debugfs>/dynamic_debug/control
-
-If your query set is big, you can batch them too:
-
- ~# cat query-batch-file > <debugfs>/dynamic_debug/control
-
-A another way is to use wildcard. The match rule support '*' (matches
-zero or more characters) and '?' (matches exactly one character).For
-example, you can match all usb drivers:
-
- ~# echo "file drivers/usb/* +p" > <debugfs>/dynamic_debug/control
-
-At the syntactical level, a command comprises a sequence of match
-specifications, followed by a flags change specification.
-
-command ::= match-spec* flags-spec
-
-The match-spec's are used to choose a subset of the known pr_debug()
-callsites to which to apply the flags-spec. Think of them as a query
-with implicit ANDs between each pair. Note that an empty list of
-match-specs will select all debug statement callsites.
-
-A match specification comprises a keyword, which controls the
-attribute of the callsite to be compared, and a value to compare
-against. Possible keywords are:
-
-match-spec ::= 'func' string |
- 'file' string |
- 'module' string |
- 'format' string |
- 'line' line-range
-
-line-range ::= lineno |
- '-'lineno |
- lineno'-' |
- lineno'-'lineno
-// Note: line-range cannot contain space, e.g.
-// "1-30" is valid range but "1 - 30" is not.
-
-lineno ::= unsigned-int
-
-The meanings of each keyword are:
-
-func
- The given string is compared against the function name
- of each callsite. Example:
-
- func svc_tcp_accept
-
-file
- The given string is compared against either the full pathname, the
- src-root relative pathname, or the basename of the source file of
- each callsite. Examples:
-
- file svcsock.c
- file kernel/freezer.c
- file /usr/src/packages/BUILD/sgi-enhancednfs-1.4/default/net/sunrpc/svcsock.c
-
-module
- The given string is compared against the module name
- of each callsite. The module name is the string as
- seen in "lsmod", i.e. without the directory or the .ko
- suffix and with '-' changed to '_'. Examples:
-
- module sunrpc
- module nfsd
-
-format
- The given string is searched for in the dynamic debug format
- string. Note that the string does not need to match the
- entire format, only some part. Whitespace and other
- special characters can be escaped using C octal character
- escape \ooo notation, e.g. the space character is \040.
- Alternatively, the string can be enclosed in double quote
- characters (") or single quote characters (').
- Examples:
-
- format svcrdma: // many of the NFS/RDMA server pr_debugs
- format readahead // some pr_debugs in the readahead cache
- format nfsd:\040SETATTR // one way to match a format with whitespace
- format "nfsd: SETATTR" // a neater way to match a format with whitespace
- format 'nfsd: SETATTR' // yet another way to match a format with whitespace
-
-line
- The given line number or range of line numbers is compared
- against the line number of each pr_debug() callsite. A single
- line number matches the callsite line number exactly. A
- range of line numbers matches any callsite between the first
- and last line number inclusive. An empty first number means
- the first line in the file, an empty line number means the
- last number in the file. Examples:
-
- line 1603 // exactly line 1603
- line 1600-1605 // the six lines from line 1600 to line 1605
- line -1605 // the 1605 lines from line 1 to line 1605
- line 1600- // all lines from line 1600 to the end of the file
-
-The flags specification comprises a change operation followed
-by one or more flag characters. The change operation is one
-of the characters:
-
- - remove the given flags
- + add the given flags
- = set the flags to the given flags
-
-The flags are:
-
- p enables the pr_debug() callsite.
- f Include the function name in the printed message
- l Include line number in the printed message
- m Include module name in the printed message
- t Include thread ID in messages not generated from interrupt context
- _ No flags are set. (Or'd with others on input)
-
-For print_hex_dump_debug() and print_hex_dump_bytes(), only 'p' flag
-have meaning, other flags ignored.
-
-For display, the flags are preceded by '='
-(mnemonic: what the flags are currently equal to).
-
-Note the regexp ^[-+=][flmpt_]+$ matches a flags specification.
-To clear all flags at once, use "=_" or "-flmpt".
-
-
-Debug messages during Boot Process
-==================================
-
-To activate debug messages for core code and built-in modules during
-the boot process, even before userspace and debugfs exists, use
-dyndbg="QUERY", module.dyndbg="QUERY", or ddebug_query="QUERY"
-(ddebug_query is obsoleted by dyndbg, and deprecated). QUERY follows
-the syntax described above, but must not exceed 1023 characters. Your
-bootloader may impose lower limits.
-
-These dyndbg params are processed just after the ddebug tables are
-processed, as part of the arch_initcall. Thus you can enable debug
-messages in all code run after this arch_initcall via this boot
-parameter.
-
-On an x86 system for example ACPI enablement is a subsys_initcall and
- dyndbg="file ec.c +p"
-will show early Embedded Controller transactions during ACPI setup if
-your machine (typically a laptop) has an Embedded Controller.
-PCI (or other devices) initialization also is a hot candidate for using
-this boot parameter for debugging purposes.
-
-If foo module is not built-in, foo.dyndbg will still be processed at
-boot time, without effect, but will be reprocessed when module is
-loaded later. dyndbg_query= and bare dyndbg= are only processed at
-boot.
-
-
-Debug Messages at Module Initialization Time
-============================================
-
-When "modprobe foo" is called, modprobe scans /proc/cmdline for
-foo.params, strips "foo.", and passes them to the kernel along with
-params given in modprobe args or /etc/modprob.d/*.conf files,
-in the following order:
-
-1. # parameters given via /etc/modprobe.d/*.conf
- options foo dyndbg=+pt
- options foo dyndbg # defaults to +p
-
-2. # foo.dyndbg as given in boot args, "foo." is stripped and passed
- foo.dyndbg=" func bar +p; func buz +mp"
-
-3. # args to modprobe
- modprobe foo dyndbg==pmf # override previous settings
-
-These dyndbg queries are applied in order, with last having final say.
-This allows boot args to override or modify those from /etc/modprobe.d
-(sensible, since 1 is system wide, 2 is kernel or boot specific), and
-modprobe args to override both.
-
-In the foo.dyndbg="QUERY" form, the query must exclude "module foo".
-"foo" is extracted from the param-name, and applied to each query in
-"QUERY", and only 1 match-spec of each type is allowed.
-
-The dyndbg option is a "fake" module parameter, which means:
-
-- modules do not need to define it explicitly
-- every module gets it tacitly, whether they use pr_debug or not
-- it doesn't appear in /sys/module/$module/parameters/
- To see it, grep the control file, or inspect /proc/cmdline.
-
-For CONFIG_DYNAMIC_DEBUG kernels, any settings given at boot-time (or
-enabled by -DDEBUG flag during compilation) can be disabled later via
-the sysfs interface if the debug messages are no longer needed:
-
- echo "module module_name -p" > <debugfs>/dynamic_debug/control
-
-Examples
-========
-
-// enable the message at line 1603 of file svcsock.c
-nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
- <debugfs>/dynamic_debug/control
-
-// enable all the messages in file svcsock.c
-nullarbor:~ # echo -n 'file svcsock.c +p' >
- <debugfs>/dynamic_debug/control
-
-// enable all the messages in the NFS server module
-nullarbor:~ # echo -n 'module nfsd +p' >
- <debugfs>/dynamic_debug/control
-
-// enable all 12 messages in the function svc_process()
-nullarbor:~ # echo -n 'func svc_process +p' >
- <debugfs>/dynamic_debug/control
-
-// disable all 12 messages in the function svc_process()
-nullarbor:~ # echo -n 'func svc_process -p' >
- <debugfs>/dynamic_debug/control
-
-// enable messages for NFS calls READ, READLINK, READDIR and READDIR+.
-nullarbor:~ # echo -n 'format "nfsd: READ" +p' >
- <debugfs>/dynamic_debug/control
-
-// enable messages in files of which the paths include string "usb"
-nullarbor:~ # echo -n '*usb* +p' > <debugfs>/dynamic_debug/control
-
-// enable all messages
-nullarbor:~ # echo -n '+p' > <debugfs>/dynamic_debug/control
-
-// add module, function to all enabled messages
-nullarbor:~ # echo -n '+mf' > <debugfs>/dynamic_debug/control
-
-// boot-args example, with newlines and comments for readability
-Kernel command line: ...
- // see whats going on in dyndbg=value processing
- dynamic_debug.verbose=1
- // enable pr_debugs in 2 builtins, #cmt is stripped
- dyndbg="module params +p #cmt ; module sys +p"
- // enable pr_debugs in 2 functions in a module loaded later
- pc87360.dyndbg="func pc87360_init_device +p; func pc87360_find +p"
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 14cdc101d165..69e2387ca278 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -447,7 +447,6 @@ prototypes:
int (*flush) (struct file *);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
- int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
@@ -557,7 +556,7 @@ till "end_pgoff". ->map_pages() is called with page table locked and must
not block. If it's not possible to reach a page without blocking,
filesystem should skip it. Filesystem should use do_set_pte() to setup
page table entry. Pointer to entry associated with the page is passed in
-"pte" field in fault_env structure. Pointers to entries for other offsets
+"pte" field in vm_fault structure. Pointers to entries for other offsets
should be calculated relative to "pte".
->page_mkwrite() is called when a previously read-only pte is
diff --git a/Documentation/filesystems/configfs/configfs.txt b/Documentation/filesystems/configfs/configfs.txt
index 8ec9136aae56..3828e85345ae 100644
--- a/Documentation/filesystems/configfs/configfs.txt
+++ b/Documentation/filesystems/configfs/configfs.txt
@@ -174,7 +174,7 @@ among other things. For that, it needs a type.
void (*release)(struct config_item *);
int (*allow_link)(struct config_item *src,
struct config_item *target);
- int (*drop_link)(struct config_item *src,
+ void (*drop_link)(struct config_item *src,
struct config_item *target);
};
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index 23d18b8a49d5..a7e6e14aeb08 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -58,22 +58,22 @@ Implementation Tips for Filesystem Writers
Filesystem support consists of
- adding support to mark inodes as being DAX by setting the S_DAX flag in
i_flags
-- implementing the direct_IO address space operation, and calling
- dax_do_io() instead of blockdev_direct_IO() if S_DAX is set
+- implementing ->read_iter and ->write_iter operations which use dax_iomap_rw()
+ when inode has S_DAX flag set
- implementing an mmap file operation for DAX files which sets the
VM_MIXEDMAP and VM_HUGEPAGE flags on the VMA, and setting the vm_ops to
- include handlers for fault, pmd_fault and page_mkwrite (which should
- probably call dax_fault(), dax_pmd_fault() and dax_mkwrite(), passing the
- appropriate get_block() callback)
-- calling dax_truncate_page() instead of block_truncate_page() for DAX files
-- calling dax_zero_page_range() instead of zero_user() for DAX files
+ include handlers for fault, pmd_fault, page_mkwrite, pfn_mkwrite. These
+ handlers should probably call dax_iomap_fault() (for fault and page_mkwrite
+ handlers), dax_iomap_pmd_fault(), dax_pfn_mkwrite() passing the appropriate
+ iomap operations.
+- calling iomap_zero_range() passing appropriate iomap operations instead of
+ block_truncate_page() for DAX files
- ensuring that there is sufficient locking between reads, writes,
truncates and page faults
-The get_block() callback passed to the DAX functions may return
-uninitialised extents. If it does, it must ensure that simultaneous
-calls to get_block() (for example by a page-fault racing with a read()
-or a write()) work correctly.
+The iomap handlers for allocating blocks must make sure that allocated blocks
+are zeroed out and converted to written extents before being returned to avoid
+exposure of uninitialized data through mmap.
These filesystems may be used for inspiration:
- ext2: see Documentation/filesystems/ext2.txt
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 6c0108eb0137..3698ed3146e3 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -351,14 +351,13 @@ nouid32 Disables 32-bit UIDs and GIDs. This is for
interoperability with older kernels which only
store and expect 16-bit values.
-block_validity This options allows to enables/disables the in-kernel
+block_validity(*) These options enable or disable the in-kernel
noblock_validity facility for tracking filesystem metadata blocks
- within internal data structures. This allows multi-
- block allocator and other routines to quickly locate
- extents which might overlap with filesystem metadata
- blocks. This option is intended for debugging
- purposes and since it negatively affects the
- performance, it is off by default.
+ within internal data structures. This allows multi-
+ block allocator and other routines to notice
+ bugs or corrupted allocation bitmaps which cause
+ blocks to be allocated which overlap with
+ filesystem metadata blocks.
dioread_lock Controls whether or not ext4 should use the DIO read
dioread_nolock locking. If the dioread_nolock option is specified
diff --git a/Documentation/filesystems/locks.txt b/Documentation/filesystems/locks.txt
index 2cf81082581d..5368690f412e 100644
--- a/Documentation/filesystems/locks.txt
+++ b/Documentation/filesystems/locks.txt
@@ -19,7 +19,7 @@ forever.
This should not cause problems for anybody, since everybody using a
2.1.x kernel should have updated their C library to a suitable version
-anyway (see the file "Documentation/Changes".)
+anyway (see the file "Documentation/process/changes.rst".)
1.2 Allow Mixed Locks Again
---------------------------
diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt
index 0b2883b17d4c..5efae00f6c7f 100644
--- a/Documentation/filesystems/nfs/nfsroot.txt
+++ b/Documentation/filesystems/nfs/nfsroot.txt
@@ -11,7 +11,7 @@ Updated 2006 by Horms <horms@verge.net.au>
In order to use a diskless system, such as an X-terminal or printer server
for example, it is necessary for the root filesystem to be present on a
non-disk device. This may be an initramfs (see Documentation/filesystems/
-ramfs-rootfs-initramfs.txt), a ramdisk (see Documentation/initrd.txt) or a
+ramfs-rootfs-initramfs.txt), a ramdisk (see Documentation/admin-guide/initrd.rst) or a
filesystem mounted via NFS. The following text describes on how to use NFS
for the root filesystem. For the rest of this text 'client' means the
diskless system, and 'server' means the NFS server.
@@ -284,7 +284,7 @@ They depend on various facilities being available:
"kernel <relative-path-below /tftpboot>". The nfsroot parameters
are passed to the kernel by adding them to the "append" line.
It is common to use serial console in conjunction with pxeliunx,
- see Documentation/serial-console.txt for more information.
+ see Documentation/admin-guide/serial-console.rst for more information.
For more information on isolinux, including how to create bootdisks
for prebuilt kernels, see http://syslinux.zytor.com/
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 74329fd0add2..72624a16b792 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -191,6 +191,7 @@ read the file /proc/PID/status:
CapPrm: 0000000000000000
CapEff: 0000000000000000
CapBnd: ffffffffffffffff
+ NoNewPrivs: 0
Seccomp: 0
voluntary_ctxt_switches: 0
nonvoluntary_ctxt_switches: 1
@@ -262,6 +263,7 @@ Table 1-2: Contents of the status files (as of 4.1)
CapPrm bitmap of permitted capabilities
CapEff bitmap of effective capabilities
CapBnd bitmap of capabilities bounding set
+ NoNewPrivs no_new_privs, like prctl(PR_GET_NO_NEW_PRIV, ...)
Seccomp seccomp mode, like prctl(PR_GET_SECCOMP, ...)
Cpus_allowed mask of CPUs on which this process may run
Cpus_allowed_list Same as previous, but in "list format"
@@ -1305,7 +1307,16 @@ second). The meanings of the columns are as follows, from left to right:
- nice: niced processes executing in user mode
- system: processes executing in kernel mode
- idle: twiddling thumbs
-- iowait: waiting for I/O to complete
+- iowait: In a word, iowait stands for waiting for I/O to complete. But there
+ are several problems:
+ 1. Cpu will not wait for I/O to complete, iowait is the time that a task is
+ waiting for I/O to complete. When cpu goes into idle state for
+ outstanding task io, another task will be scheduled on this CPU.
+ 2. In a multi-core CPU, the task waiting for I/O to complete is not running
+ on any CPU, so the iowait of each CPU is difficult to calculate.
+ 3. The value of iowait field in /proc/stat will decrease in certain
+ conditions.
+ So, the iowait is not reliable by reading from /proc/stat.
- irq: servicing interrupts
- softirq: servicing softirqs
- steal: involuntary wait
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index d619c8d71966..b5039a00caaf 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -828,7 +828,6 @@ struct file_operations {
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
- int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index c2d44e6e117b..3b9b5c149f32 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -51,13 +51,6 @@ default behaviour.
CRC enabled filesystems always use the attr2 format, and so
will reject the noattr2 mount option if it is set.
- barrier (*)
- nobarrier
- Enables/disables the use of block layer write barriers for
- writes into the journal and for data integrity operations.
- This allows for drive level write caching to be enabled, for
- devices that support write barriers.
-
discard
nodiscard (*)
Enable/disable the issuing of commands to let the block
@@ -228,7 +221,10 @@ default behaviour.
Deprecated Mount Options
========================
-None at present.
+ Name Removal Schedule
+ ---- ----------------
+ barrier no earlier than v4.15
+ nobarrier no earlier than v4.15
Removed Mount Options
diff --git a/Documentation/fpga/fpga-mgr.txt b/Documentation/fpga/fpga-mgr.txt
index ce3e84fa9023..86ee5078fd03 100644
--- a/Documentation/fpga/fpga-mgr.txt
+++ b/Documentation/fpga/fpga-mgr.txt
@@ -18,31 +18,37 @@ API Functions:
To program the FPGA from a file or from a buffer:
-------------------------------------------------
- int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
+ int fpga_mgr_buf_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count);
Load the FPGA from an image which exists as a buffer in memory.
- int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+ int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *image_name);
Load the FPGA from an image which exists as a file. The image file must be on
-the firmware search path (see the firmware class documentation).
-
-For both these functions, flags == 0 for normal full reconfiguration or
-FPGA_MGR_PARTIAL_RECONFIG for partial reconfiguration. If successful, the FPGA
-ends up in operating mode. Return 0 on success or a negative error code.
+the firmware search path (see the firmware class documentation). If successful,
+the FPGA ends up in operating mode. Return 0 on success or a negative error
+code.
+A FPGA design contained in a FPGA image file will likely have particulars that
+affect how the image is programmed to the FPGA. These are contained in struct
+fpga_image_info. Currently the only such particular is a single flag bit
+indicating whether the image is for full or partial reconfiguration.
To get/put a reference to a FPGA manager:
-----------------------------------------
struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
+ struct fpga_manager *fpga_mgr_get(struct device *dev);
+
+Given a DT node or device, get an exclusive reference to a FPGA manager.
void fpga_mgr_put(struct fpga_manager *mgr);
-Given a DT node, get an exclusive reference to a FPGA manager or release
-the reference.
+Release the reference.
To register or unregister the low level FPGA-specific driver:
@@ -70,8 +76,11 @@ struct device_node *mgr_node = ...
char *buf = ...
int count = ...
+/* struct with information about the FPGA image to program. */
+struct fpga_image_info info;
+
/* flags indicates whether to do full or partial reconfiguration */
-int flags = 0;
+info.flags = 0;
int ret;
@@ -79,7 +88,7 @@ int ret;
struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
/* Load the buffer to the FPGA */
-ret = fpga_mgr_buf_load(mgr, flags, buf, count);
+ret = fpga_mgr_buf_load(mgr, &info, buf, count);
/* Release the FPGA manager */
fpga_mgr_put(mgr);
@@ -96,8 +105,11 @@ struct device_node *mgr_node = ...
/* FPGA image is in this file which is in the firmware search path */
const char *path = "fpga-image-9.rbf"
+/* struct with information about the FPGA image to program. */
+struct fpga_image_info info;
+
/* flags indicates whether to do full or partial reconfiguration */
-int flags = 0;
+info.flags = 0;
int ret;
@@ -105,7 +117,7 @@ int ret;
struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
/* Get the firmware image (path) and load it to the FPGA */
-ret = fpga_mgr_firmware_load(mgr, flags, path);
+ret = fpga_mgr_firmware_load(mgr, &info, path);
/* Release the FPGA manager */
fpga_mgr_put(mgr);
@@ -157,7 +169,10 @@ The programming sequence is:
2. .write (may be called once or multiple times)
3. .write_complete
-The .write_init function will prepare the FPGA to receive the image data.
+The .write_init function will prepare the FPGA to receive the image data. The
+buffer passed into .write_init will be atmost .initial_header_size bytes long,
+if the whole bitstream is not immediately available then the core code will
+buffer up at least this much before starting.
The .write function writes a buffer to the FPGA. The buffer may be contain the
whole FPGA image or may be a smaller chunk of an FPGA image. In the latter
diff --git a/Documentation/frv/booting.txt b/Documentation/frv/booting.txt
index 9bdf4b46e741..cd9dc1dfb144 100644
--- a/Documentation/frv/booting.txt
+++ b/Documentation/frv/booting.txt
@@ -119,7 +119,7 @@ separated by spaces:
253:0 Device with major 253 and minor 0
Authoritative information can be found in
- "Documentation/kernel-parameters.txt".
+ "Documentation/admin-guide/kernel-parameters.rst".
(*) rw
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
index 368d5a294d89..747c721776ed 100644
--- a/Documentation/gpio/driver.txt
+++ b/Documentation/gpio/driver.txt
@@ -175,8 +175,8 @@ The IRQ portions of the GPIO block are implemented using an irqchip, using
the header <linux/irq.h>. So basically such a driver is utilizing two sub-
systems simultaneously: gpio and irq.
-RT_FULL: GPIO driver should not use spinlock_t or any sleepable APIs
-(like PM runtime) as part of its irq_chip implementation on -RT.
+RT_FULL: a realtime compliant GPIO driver should not use spinlock_t or any
+sleepable APIs (like PM runtime) as part of its irq_chip implementation.
- spinlock_t should be replaced with raw_spinlock_t [1].
- If sleepable APIs have to be used, these can be done from the .irq_bus_lock()
and .irq_bus_unlock() callbacks, as these are the only slowpath callbacks
@@ -185,33 +185,32 @@ RT_FULL: GPIO driver should not use spinlock_t or any sleepable APIs
GPIO irqchips usually fall in one of two categories:
* CHAINED GPIO irqchips: these are usually the type that is embedded on
- an SoC. This means that there is a fast IRQ handler for the GPIOs that
+ an SoC. This means that there is a fast IRQ flow handler for the GPIOs that
gets called in a chain from the parent IRQ handler, most typically the
- system interrupt controller. This means the GPIO irqchip is registered
- using irq_set_chained_handler() or the corresponding
- gpiochip_set_chained_irqchip() helper function, and the GPIO irqchip
- handler will be called immediately from the parent irqchip, while
- holding the IRQs disabled. The GPIO irqchip will then end up calling
- something like this sequence in its interrupt handler:
-
- static irqreturn_t tc3589x_gpio_irq(int irq, void *data)
+ system interrupt controller. This means that the GPIO irqchip handler will
+ be called immediately from the parent irqchip, while holding the IRQs
+ disabled. The GPIO irqchip will then end up calling something like this
+ sequence in its interrupt handler:
+
+ static irqreturn_t foo_gpio_irq(int irq, void *data)
chained_irq_enter(...);
generic_handle_irq(...);
chained_irq_exit(...);
Chained GPIO irqchips typically can NOT set the .can_sleep flag on
- struct gpio_chip, as everything happens directly in the callbacks.
+ struct gpio_chip, as everything happens directly in the callbacks: no
+ slow bus traffic like I2C can be used.
RT_FULL: Note, chained IRQ handlers will not be forced threaded on -RT.
As result, spinlock_t or any sleepable APIs (like PM runtime) can't be used
in chained IRQ handler.
- if required (and if it can't be converted to the nested threaded GPIO irqchip)
- - chained IRQ handler can be converted to generic irq handler and this way
- it will be threaded IRQ handler on -RT and hard IRQ handler on non-RT
+ If required (and if it can't be converted to the nested threaded GPIO irqchip)
+ a chained IRQ handler can be converted to generic irq handler and this way
+ it will be a threaded IRQ handler on -RT and a hard IRQ handler on non-RT
(for example, see [3]).
Know W/A: The generic_handle_irq() is expected to be called with IRQ disabled,
- so IRQ core will complain if it will be called from IRQ handler which is
- forced thread. The "fake?" raw lock can be used to W/A this problem:
+ so the IRQ core will complain if it is called from an IRQ handler which is
+ forced to a thread. The "fake?" raw lock can be used to W/A this problem:
raw_spinlock_t wa_lock;
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
@@ -243,7 +242,7 @@ GPIO irqchips usually fall in one of two categories:
by the driver. The hallmark of this driver is to call something like
this in its interrupt handler:
- static irqreturn_t tc3589x_gpio_irq(int irq, void *data)
+ static irqreturn_t foo_gpio_irq(int irq, void *data)
...
handle_nested_irq(irq);
@@ -256,23 +255,31 @@ associated irqdomain and resource allocation callbacks, the gpiolib has
some helpers that can be enabled by selecting the GPIOLIB_IRQCHIP Kconfig
symbol:
-* gpiochip_irqchip_add(): adds an irqchip to a gpiochip. It will pass
+* gpiochip_irqchip_add(): adds a chained irqchip to a gpiochip. It will pass
the struct gpio_chip* for the chip to all IRQ callbacks, so the callbacks
need to embed the gpio_chip in its state container and obtain a pointer
to the container using container_of().
(See Documentation/driver-model/design-patterns.txt)
- If there is a need to exclude certain GPIOs from the IRQ domain, one can
- set .irq_need_valid_mask of the gpiochip before gpiochip_add_data() is
- called. This allocates .irq_valid_mask with as many bits set as there are
- GPIOs in the chip. Drivers can exclude GPIOs by clearing bits from this
- mask. The mask must be filled in before gpiochip_irqchip_add() is called.
+* gpiochip_irqchip_add_nested(): adds a nested irqchip to a gpiochip.
+ Apart from that it works exactly like the chained irqchip.
* gpiochip_set_chained_irqchip(): sets up a chained irq handler for a
gpio_chip from a parent IRQ and passes the struct gpio_chip* as handler
data. (Notice handler data, since the irqchip data is likely used by the
- parent irqchip!) This is for the chained type of chip. This is also used
- to set up a nested irqchip if NULL is passed as handler.
+ parent irqchip!).
+
+* gpiochip_set_nested_irqchip(): sets up a nested irq handler for a
+ gpio_chip from a parent IRQ. As the parent IRQ has usually been
+ explicitly requested by the driver, this does very little more than
+ mark all the child IRQs as having the other IRQ as parent.
+
+If there is a need to exclude certain GPIOs from the IRQ domain, you can
+set .irq_need_valid_mask of the gpiochip before gpiochip_add_data() is
+called. This allocates an .irq_valid_mask with as many bits set as there
+are GPIOs in the chip. Drivers can exclude GPIOs by clearing bits from this
+mask. The mask must be filled in before gpiochip_irqchip_add() or
+gpiochip_irqchip_add_nested() is called.
To use the helpers please keep the following in mind:
@@ -323,6 +330,9 @@ When implementing an irqchip inside a GPIO driver, these two functions should
typically be called in the .startup() and .shutdown() callbacks from the
irqchip.
+When using the gpiolib irqchip helpers, these callback are automatically
+assigned.
+
Real-Time compliance for GPIO IRQ chips
---------------------------------------
diff --git a/Documentation/gpu/conf.py b/Documentation/gpu/conf.py
index 6314d1708230..1757b040fb32 100644
--- a/Documentation/gpu/conf.py
+++ b/Documentation/gpu/conf.py
@@ -3,3 +3,8 @@
project = "Linux GPU Driver Developer's Guide"
tags.add("subproject")
+
+latex_documents = [
+ ('index', 'gpu.tex', project,
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 37284bcc7764..e35920db1f4c 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -143,6 +143,9 @@ Device Instance and Driver Handling
.. kernel-doc:: drivers/gpu/drm/drm_drv.c
:export:
+.. kernel-doc:: include/drm/drm_drv.h
+ :internal:
+
Driver Load
-----------
@@ -350,6 +353,23 @@ how the ioctl is allowed to be called.
.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
:export:
+
+Misc Utilities
+==============
+
+Printer
+-------
+
+.. kernel-doc:: include/drm/drm_print.h
+ :doc: print
+
+.. kernel-doc:: include/drm/drm_print.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_print.c
+ :export:
+
+
Legacy Support Code
===================
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index bb4254d19cbb..03040aa14fe8 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -63,6 +63,9 @@ Atomic State Reset and Initialization
.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
:doc: atomic state reset and initialization
+Helper Functions Reference
+--------------------------
+
.. kernel-doc:: include/drm/drm_atomic_helper.h
:internal:
@@ -261,14 +264,6 @@ Plane Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
:export:
-Tile group
-==========
-
-# FIXME: This should probably be moved into a property documentation section
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
- :doc: Tile group
-
Auxiliary Modeset Helpers
=========================
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 53b872c105d2..0c9abdc0ee31 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -15,25 +15,24 @@ be setup by initializing the following fields.
- struct drm_mode_config_funcs \*funcs;
Mode setting functions.
-Modeset Base Object Abstraction
-===============================
+Mode Configuration
-.. kernel-doc:: include/drm/drm_mode_object.h
- :internal:
+KMS Core Structures and Functions
+=================================
-.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
+.. kernel-doc:: drivers/gpu/drm/drm_mode_config.c
:export:
-KMS Data Structures
-===================
-
-.. kernel-doc:: include/drm/drm_crtc.h
+.. kernel-doc:: include/drm/drm_mode_config.h
:internal:
-KMS API Functions
-=================
+Modeset Base Object Abstraction
+===============================
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+.. kernel-doc:: include/drm/drm_mode_object.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
:export:
Atomic Mode Setting Function Reference
@@ -45,6 +44,15 @@ Atomic Mode Setting Function Reference
.. kernel-doc:: include/drm/drm_atomic.h
:internal:
+CRTC Abstraction
+================
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_crtc.h
+ :internal:
+
Frame Buffer Abstraction
========================
@@ -63,52 +71,17 @@ Frame Buffer Functions Reference
DRM Format Handling
===================
+.. kernel-doc:: include/drm/drm_fourcc.h
+ :internal:
+
.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
:export:
Dumb Buffer Objects
===================
-The KMS API doesn't standardize backing storage object creation and
-leaves it to driver-specific ioctls. Furthermore actually creating a
-buffer object even for GEM-based drivers is done through a
-driver-specific ioctl - GEM only has a common userspace interface for
-sharing and destroying objects. While not an issue for full-fledged
-graphics stacks that include device-specific userspace components (in
-libdrm for instance), this limit makes DRM-based early boot graphics
-unnecessarily complex.
-
-Dumb objects partly alleviate the problem by providing a standard API to
-create dumb buffers suitable for scanout, which can then be used to
-create KMS frame buffers.
-
-To support dumb objects drivers must implement the dumb_create,
-dumb_destroy and dumb_map_offset operations.
-
-- int (\*dumb_create)(struct drm_file \*file_priv, struct
- drm_device \*dev, struct drm_mode_create_dumb \*args);
- The dumb_create operation creates a driver object (GEM or TTM
- handle) suitable for scanout based on the width, height and depth
- from the struct :c:type:`struct drm_mode_create_dumb
- <drm_mode_create_dumb>` argument. It fills the argument's
- handle, pitch and size fields with a handle for the newly created
- object and its line pitch and size in bytes.
-
-- int (\*dumb_destroy)(struct drm_file \*file_priv, struct
- drm_device \*dev, uint32_t handle);
- The dumb_destroy operation destroys a dumb object created by
- dumb_create.
-
-- int (\*dumb_map_offset)(struct drm_file \*file_priv, struct
- drm_device \*dev, uint32_t handle, uint64_t \*offset);
- The dumb_map_offset operation associates an mmap fake offset with
- the object given by the handle and returns it. Drivers must use the
- :c:func:`drm_gem_create_mmap_offset()` function to associate
- the fake offset as described in ?.
-
-Note that dumb objects may not be used for gpu acceleration, as has been
-attempted on some ARM embedded platforms. Such drivers really must have
-a hardware-specific ioctl to allocate suitable buffer objects.
+.. kernel-doc:: drivers/gpu/drm/drm_dumb_buffers.c
+ :doc: overview
Plane Abstraction
=================
@@ -215,7 +188,7 @@ Connectors state change detection must be cleanup up with a call to
Output discovery and initialization example
-------------------------------------------
-::
+.. code-block:: c
void intel_crt_init(struct drm_device *dev)
{
@@ -287,6 +260,12 @@ Property Types and Blob Property Support
.. kernel-doc:: drivers/gpu/drm/drm_property.c
:export:
+Standard Connector Properties
+-----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: standard connector properties
+
Plane Composition Properties
----------------------------
@@ -308,6 +287,18 @@ Color Management Properties
.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
:export:
+Tile Group Property
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: Tile group
+
+Explicit Fencing Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+ :doc: explicit fencing properties
+
Existing KMS Properties
-----------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index bca808535dfd..cb5daffcd6be 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -45,7 +45,7 @@ the radeon_ttm.c file for an example of usage.
The ttm_global_reference structure is made up of several fields:
-::
+.. code-block:: c
struct ttm_global_reference {
enum ttm_global_types global_type;
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 1ba301cebe16..de3ac9f90f8f 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -216,3 +216,9 @@ interfaces. Especially since all hardware-acceleration interfaces to
userspace are driver specific for efficiency and other reasons these
interfaces can be rather substantial. Hence every driver has its own
chapter.
+
+Testing and validation
+======================
+
+.. kernel-doc:: drivers/gpu/drm/drm_debugfs_crc.c
+ :doc: CRC ABI
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 87aaffc22920..117d2ab7a5f7 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -49,6 +49,15 @@ Intel GVT-g Guest Support(vGPU)
.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
:internal:
+Intel GVT-g Host Support(vGPU device model)
+-------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
+ :doc: Intel GVT-g host support
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
+ :internal:
+
Display Hardware Handling
=========================
@@ -180,7 +189,7 @@ Display Refresh Rate Switching (DRRS)
DPIO
----
-.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dpio_phy.c
:doc: DPIO
CSR firmware support for DMC
@@ -249,19 +258,19 @@ Global GTT views
GTT Fences and Swizzling
------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:internal:
Global GTT Fence Handling
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:doc: fence register handling
Hardware Tiling and Swizzling Details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
:doc: tiling swizzling details
Object Tiling IOCTLs
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index be0dafcf5556..367d7c36b8e9 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -14,7 +14,7 @@ Linux GPU Driver Developer's Guide
vga-switcheroo
vgaarbiter
-.. only:: subproject
+.. only:: subproject and html
Indices
=======
diff --git a/Documentation/hwmon/hwmon-kernel-api.txt b/Documentation/hwmon/hwmon-kernel-api.txt
index ef9d74947f5c..2505ae67e2b6 100644
--- a/Documentation/hwmon/hwmon-kernel-api.txt
+++ b/Documentation/hwmon/hwmon-kernel-api.txt
@@ -23,7 +23,6 @@ Each hardware monitoring driver must #include <linux/hwmon.h> and, in most
cases, <linux/hwmon-sysfs.h>. linux/hwmon.h declares the following
register/unregister functions:
-struct device *hwmon_device_register(struct device *dev);
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
@@ -38,36 +37,31 @@ struct device *
hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
- const struct attribute_group **groups);
+ const struct attribute_group **extra_groups);
struct device *
devm_hwmon_device_register_with_info(struct device *dev,
- const char *name,
- void *drvdata,
- const struct hwmon_chip_info *info,
- const struct attribute_group **groups);
+ const char *name,
+ void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **extra_groups);
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
-hwmon_device_register registers a hardware monitoring device. The parameter
-of this function is a pointer to the parent device.
-This function returns a pointer to the newly created hardware monitoring device
-or PTR_ERR for failure. If this registration function is used, hardware
-monitoring sysfs attributes are expected to have been created and attached to
-the parent device prior to calling hwmon_device_register. A name attribute must
-have been created by the caller.
-
-hwmon_device_register_with_groups is similar to hwmon_device_register. However,
-it has additional parameters. The name parameter is a pointer to the hwmon
-device name. The registration function wil create a name sysfs attribute
-pointing to this name. The drvdata parameter is the pointer to the local
-driver data. hwmon_device_register_with_groups will attach this pointer
-to the newly allocated hwmon device. The pointer can be retrieved by the driver
-using dev_get_drvdata() on the hwmon device pointer. The groups parameter is
+hwmon_device_register_with_groups registers a hardware monitoring device.
+The first parameter of this function is a pointer to the parent device.
+The name parameter is a pointer to the hwmon device name. The registration
+function wil create a name sysfs attribute pointing to this name.
+The drvdata parameter is the pointer to the local driver data.
+hwmon_device_register_with_groups will attach this pointer to the newly
+allocated hwmon device. The pointer can be retrieved by the driver using
+dev_get_drvdata() on the hwmon device pointer. The groups parameter is
a pointer to a list of sysfs attribute groups. The list must be NULL terminated.
hwmon_device_register_with_groups creates the hwmon device with name attribute
as well as all sysfs attributes attached to the hwmon device.
+This function returns a pointer to the newly created hardware monitoring device
+or PTR_ERR for failure.
devm_hwmon_device_register_with_groups is similar to
hwmon_device_register_with_groups. However, it is device managed, meaning the
@@ -87,13 +81,13 @@ hwmon_device_unregister deregisters a registered hardware monitoring device.
The parameter of this function is the pointer to the registered hardware
monitoring device structure. This function must be called from the driver
remove function if the hardware monitoring device was registered with
-hwmon_device_register, hwmon_device_register_with_groups, or
-hwmon_device_register_with_info.
+hwmon_device_register_with_groups or hwmon_device_register_with_info.
devm_hwmon_device_unregister does not normally have to be called. It is only
needed for error handling, and only needed if the driver probe fails after
-the call to devm_hwmon_device_register_with_groups and if the automatic
-(device managed) removal would be too late.
+the call to devm_hwmon_device_register_with_groups or
+hwmon_device_register_with_info and if the automatic (device managed)
+removal would be too late.
Using devm_hwmon_device_register_with_info()
--------------------------------------------
@@ -106,9 +100,9 @@ const char *name Device name
void *drvdata Driver private data
const struct hwmon_chip_info *info
Pointer to chip description.
-const struct attribute_group **groups
- Null-terminated list of additional sysfs attribute
- groups.
+const struct attribute_group **extra_groups
+ Null-terminated list of additional non-standard
+ sysfs attribute groups.
This function returns a pointer to the created hardware monitoring device
on success and a negative error code for failure.
@@ -160,7 +154,7 @@ It contains following fields:
* type: The hardware monitoring sensor type.
Supported sensor types are
* hwmon_chip A virtual sensor type, used to describe attributes
- which apply to the entire chip.
+ * which are not bound to a specific input or output
* hwmon_temp Temperature sensor
* hwmon_in Voltage sensor
* hwmon_curr Current sensor
@@ -293,9 +287,9 @@ Driver-provided sysfs attributes
If the hardware monitoring device is registered with
hwmon_device_register_with_info or devm_hwmon_device_register_with_info,
-it is most likely not necessary to provide sysfs attributes. Only non-standard
-sysfs attributes need to be provided when one of those registration functions
-is used.
+it is most likely not necessary to provide sysfs attributes. Only additional
+non-standard sysfs attributes need to be provided when one of those registration
+functions is used.
The header file linux/hwmon-sysfs.h provides a number of useful macros to
declare and use hardware monitoring sysfs attributes.
diff --git a/Documentation/hwmon/submitting-patches b/Documentation/hwmon/submitting-patches
index 57f60307accc..f88221b46153 100644
--- a/Documentation/hwmon/submitting-patches
+++ b/Documentation/hwmon/submitting-patches
@@ -10,10 +10,10 @@ increase the chances of your change being accepted.
----------
* It should be unnecessary to mention, but please read and follow
- Documentation/SubmitChecklist
- Documentation/SubmittingDrivers
- Documentation/SubmittingPatches
- Documentation/CodingStyle
+ Documentation/process/submit-checklist.rst
+ Documentation/process/submitting-drivers.rst
+ Documentation/process/submitting-patches.rst
+ Documentation/process/coding-style.rst
* Please run your patch through 'checkpatch --strict'. There should be no
errors, no warnings, and few if any check messages. If there are any
diff --git a/Documentation/hwmon/tc654 b/Documentation/hwmon/tc654
new file mode 100644
index 000000000000..91a2843f5f98
--- /dev/null
+++ b/Documentation/hwmon/tc654
@@ -0,0 +1,31 @@
+Kernel driver tc654
+===================
+
+Supported chips:
+ * Microship TC654 and TC655
+ Prefix: 'tc654'
+ Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20001734C.pdf
+
+Authors:
+ Chris Packham <chris.packham@alliedtelesis.co.nz>
+ Masahiko Iwamoto <iwamoto@allied-telesis.co.jp>
+
+Description
+-----------
+This driver implements support for the Microchip TC654 and TC655.
+
+The TC654 uses the 2-wire interface compatible with the SMBUS 2.0
+specification. The TC654 has two (2) inputs for measuring fan RPM and
+one (1) PWM output which can be used for fan control.
+
+Configuration Notes
+-------------------
+Ordinarily the pwm1_mode ABI is used for controlling the pwm output
+mode. However, for this chip the output is always pwm, and the
+pwm1_mode determines if the pwm output is controlled via the pwm1 value
+or via the Vin analog input.
+
+
+Setting pwm1_mode to 1 will cause the pwm output to be driven based on
+the pwm1 value. Setting pwm1_mode to 0 will cause the pwm output to be
+driven based on the Vin input.
diff --git a/Documentation/hwmon/tmp108 b/Documentation/hwmon/tmp108
new file mode 100644
index 000000000000..25802df23010
--- /dev/null
+++ b/Documentation/hwmon/tmp108
@@ -0,0 +1,36 @@
+Kernel driver tmp108
+====================
+
+Supported chips:
+ * Texas Instruments TMP108
+ Prefix: 'tmp108'
+ Addresses scanned: none
+ Datasheet: http://www.ti.com/product/tmp108
+
+Author:
+ John Muir <john@jmuir.com>
+
+Description
+-----------
+
+The Texas Instruments TMP108 implements one temperature sensor. An alert pin
+can be set when temperatures exceed minimum or maximum values plus or minus a
+hysteresis value. (This driver does not support interrupts for the alert pin,
+and the device runs in comparator mode.)
+
+The sensor is accurate to 0.75C over the range of -25 to +85 C, and to 1.0
+degree from -40 to +125 C. Resolution of the sensor is 0.0625 degree. The
+operating temperature has a minimum of -55 C and a maximum of +150 C.
+Hysteresis values can be set to 0, 1, 2, or 4C.
+
+The TMP108 has a programmable update rate that can select between 8, 4, 1, and
+0.5 Hz.
+
+By default the TMP108 reads the temperature continuously. To conserve power,
+the TMP108 has a one-shot mode where the device is normally shut-down. When a
+one shot is requested the temperature is read, the result can be retrieved,
+and then the device is shut down automatically. (This driver only supports
+continuous mode.)
+
+The driver provides the common sysfs-interface for temperatures (see
+Documentation/hwmon/sysfs-interface under Temperatures).
diff --git a/Documentation/i2c/i2c-topology b/Documentation/i2c/i2c-topology
index e0aefeece551..1a014fede0b7 100644
--- a/Documentation/i2c/i2c-topology
+++ b/Documentation/i2c/i2c-topology
@@ -326,7 +326,7 @@ Two parent-locked sibling muxes
This is a good topology.
- .--------.
+ .--------.
.----------. .--| dev D1 |
| parent- |--' '--------'
.--| locked | .--------.
@@ -350,7 +350,7 @@ Mux-locked and parent-locked sibling muxes
This is a good topology.
- .--------.
+ .--------.
.----------. .--| dev D1 |
| mux- |--' '--------'
.--| locked | .--------.
diff --git a/Documentation/index.rst b/Documentation/index.rst
index c53d089455a4..2bd8fdc9207c 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -3,21 +3,69 @@
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
-Welcome to The Linux Kernel's documentation!
-============================================
+Welcome to The Linux Kernel's documentation
+===========================================
-Contents:
+This is the top level of the kernel's documentation tree. Kernel
+documentation, like the kernel itself, is very much a work in progress;
+that is especially true as we work to integrate our many scattered
+documents into a coherent whole. Please note that improvements to the
+documentation are welcome; join the linux-doc list at vger.kernel.org if
+you want to help out.
+
+User-oriented documentation
+---------------------------
+
+The following manuals are written for *users* of the kernel — those who are
+trying to get it to work optimally on a given system.
+
+.. toctree::
+ :maxdepth: 2
+
+ admin-guide/index
+
+Introduction to kernel development
+----------------------------------
+
+These manuals contain overall information about how to develop the kernel.
+The kernel community is quite large, with thousands of developers
+contributing over the course of a year. As with any large community,
+knowing how things are done will make the process of getting your changes
+merged much easier.
+
+.. toctree::
+ :maxdepth: 2
+
+ process/index
+ dev-tools/index
+ doc-guide/index
+
+Kernel API documentation
+------------------------
+
+These books get into the details of how specific kernel subsystems work
+from the point of view of a kernel developer. Much of the information here
+is taken directly from the kernel source, with supplemental material added
+as needed (or at least as we managed to add it — probably *not* all that is
+needed).
.. toctree::
:maxdepth: 2
- kernel-documentation
- development-process/index
- dev-tools/tools
driver-api/index
+ core-api/index
media/index
gpu/index
- 80211/index
+ security/index
+ sound/index
+
+Korean translations
+-------------------
+
+.. toctree::
+ :maxdepth: 1
+
+ translations/ko_KR/index
Indices and tables
==================
diff --git a/Documentation/isdn/README b/Documentation/isdn/README
index cfb1884342ee..32d4e80c2c03 100644
--- a/Documentation/isdn/README
+++ b/Documentation/isdn/README
@@ -332,7 +332,7 @@ README for the ISDN-subsystem
4. Device-inodes
The major and minor numbers and their names are described in
- Documentation/devices.txt. The major numbers are:
+ Documentation/admin-guide/devices.rst. The major numbers are:
43 for the ISDN-tty's.
44 for the ISDN-callout-tty's.
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 069fcb3eef6e..262722d8867b 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -113,6 +113,34 @@ applicable everywhere (see syntax).
That will limit the usefulness but on the other hand avoid
the illegal configurations all over.
+- weak reverse dependencies: "imply" <symbol> ["if" <expr>]
+ This is similar to "select" as it enforces a lower limit on another
+ symbol except that the "implied" symbol's value may still be set to n
+ from a direct dependency or with a visible prompt.
+
+ Given the following example:
+
+ config FOO
+ tristate
+ imply BAZ
+
+ config BAZ
+ tristate
+ depends on BAR
+
+ The following values are possible:
+
+ FOO BAR BAZ's default choice for BAZ
+ --- --- ------------- --------------
+ n y n N/m/y
+ m y m M/y/n
+ y y y Y/n
+ y n * N
+
+ This is useful e.g. with multiple drivers that want to indicate their
+ ability to hook into a secondary subsystem while allowing the user to
+ configure that subsystem out without also having to unset these drivers.
+
- limiting menu display: "visible if" <expr>
This attribute is only applicable to menu blocks, if the condition is
false, the menu block is not displayed to the user (the symbols
@@ -481,6 +509,7 @@ historical issues resolved through these different solutions.
b) Match dependency semantics:
b1) Swap all "select FOO" to "depends on FOO" or,
b2) Swap all "depends on FOO" to "select FOO"
+ c) Consider the use of "imply" instead of "select"
The resolution to a) can be tested with the sample Kconfig file
Documentation/kbuild/Kconfig.recursion-issue-01 through the removal
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index 062e3af271b7..104740ea0041 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -1,5 +1,5 @@
NOTE: this document is outdated and will eventually be removed. See
-Documentation/kernel-documentation.rst for current information.
+Documentation/doc-guide/ for current information.
kernel-doc nano-HOWTO
=====================
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
index bbc3a8b8cff4..df31e30b6a02 100644
--- a/Documentation/kernel-per-CPU-kthreads.txt
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -264,7 +264,7 @@ To reduce its OS jitter, do at least one of the following:
kthreads from being created in the first place.
2. Boot with "nosoftlockup=0", which will also prevent these kthreads
from being created. Other related watchdog and softlockup boot
- parameters may be found in Documentation/kernel-parameters.txt
+ parameters may be found in Documentation/admin-guide/kernel-parameters.rst
and Documentation/watchdog/watchdog-parameters.txt.
3. Echo a zero to /proc/sys/kernel/watchdog to disable the
watchdog timer.
diff --git a/Documentation/kselftest.txt b/Documentation/kselftest.txt
index 54bee77fa728..e5c7254e73d7 100644
--- a/Documentation/kselftest.txt
+++ b/Documentation/kselftest.txt
@@ -70,6 +70,17 @@ To install selftests in an user specified location:
$ cd tools/testing/selftests
$ ./kselftest_install.sh install_dir
+Running installed selftests
+===========================
+
+Kselftest install as well as the Kselftest tarball provide a script
+named "run_kselftest.sh" to run the tests.
+
+You can simply do the following to run the installed Kselftests. Please
+note some tests will require root privileges.
+
+cd kselftest
+./run_kselftest.sh
Contributing new tests
======================
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
index 0dbbd279c9b9..0961a060fc4d 100644
--- a/Documentation/leds/leds-lp5523.txt
+++ b/Documentation/leds/leds-lp5523.txt
@@ -34,8 +34,8 @@ There are two ways to run LED patterns.
Control interface for the engines:
x is 1 .. 3
enginex_mode : disabled, load, run
- enginex_load : microcode load (visible only in load mode)
- enginex_leds : led mux control (visible only in load mode)
+ enginex_load : microcode load
+ enginex_leds : led mux control
cd /sys/class/leds/lp5523:channel2/device
echo "load" > engine3_mode
diff --git a/Documentation/leds/uleds.txt b/Documentation/leds/uleds.txt
new file mode 100644
index 000000000000..13e375a580f9
--- /dev/null
+++ b/Documentation/leds/uleds.txt
@@ -0,0 +1,36 @@
+Userspace LEDs
+==============
+
+The uleds driver supports userspace LEDs. This can be useful for testing
+triggers and can also be used to implement virtual LEDs.
+
+
+Usage
+=====
+
+When the driver is loaded, a character device is created at /dev/uleds. To
+create a new LED class device, open /dev/uleds and write a uleds_user_dev
+structure to it (found in kernel public header file linux/uleds.h).
+
+ #define LED_MAX_NAME_SIZE 64
+
+ struct uleds_user_dev {
+ char name[LED_MAX_NAME_SIZE];
+ };
+
+A new LED class device will be created with the name given. The name can be
+any valid sysfs device node name, but consider using the LED class naming
+convention of "devicename:color:function".
+
+The current brightness is found by reading a single byte from the character
+device. Values are unsigned: 0 to 255. Reading will block until the brightness
+changes. The device node can also be polled to notify when the brightness value
+changes.
+
+The LED class device will be removed when the open file handle to /dev/uleds
+is closed.
+
+Multiple LED class devices are created by opening additional file handles to
+/dev/uleds.
+
+See tools/leds/uledmon.c for an example userspace program.
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
index 6c43f6ebee8d..f5967316deb9 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.txt
@@ -87,7 +87,7 @@ The theory about how to apply functions a safe way is rather complex.
The aim is to define a so-called consistency model. It attempts to define
conditions when the new implementation could be used so that the system
stays consistent. The theory is not yet finished. See the discussion at
-http://thread.gmane.org/gmane.linux.kernel/1823033/focus=1828189
+https://lkml.kernel.org/r/20141107140458.GA21774@suse.cz
The current consistency model is very simple. It guarantees that either
the old or the new function is called. But various functions get redirected
diff --git a/Documentation/local_ops.txt b/Documentation/local_ops.txt
deleted file mode 100644
index 407576a23317..000000000000
--- a/Documentation/local_ops.txt
+++ /dev/null
@@ -1,191 +0,0 @@
- Semantics and Behavior of Local Atomic Operations
-
- Mathieu Desnoyers
-
-
- This document explains the purpose of the local atomic operations, how
-to implement them for any given architecture and shows how they can be used
-properly. It also stresses on the precautions that must be taken when reading
-those local variables across CPUs when the order of memory writes matters.
-
-Note that local_t based operations are not recommended for general kernel use.
-Please use the this_cpu operations instead unless there is really a special purpose.
-Most uses of local_t in the kernel have been replaced by this_cpu operations.
-this_cpu operations combine the relocation with the local_t like semantics in
-a single instruction and yield more compact and faster executing code.
-
-
-* Purpose of local atomic operations
-
-Local atomic operations are meant to provide fast and highly reentrant per CPU
-counters. They minimize the performance cost of standard atomic operations by
-removing the LOCK prefix and memory barriers normally required to synchronize
-across CPUs.
-
-Having fast per CPU atomic counters is interesting in many cases : it does not
-require disabling interrupts to protect from interrupt handlers and it permits
-coherent counters in NMI handlers. It is especially useful for tracing purposes
-and for various performance monitoring counters.
-
-Local atomic operations only guarantee variable modification atomicity wrt the
-CPU which owns the data. Therefore, care must taken to make sure that only one
-CPU writes to the local_t data. This is done by using per cpu data and making
-sure that we modify it from within a preemption safe context. It is however
-permitted to read local_t data from any CPU : it will then appear to be written
-out of order wrt other memory writes by the owner CPU.
-
-
-* Implementation for a given architecture
-
-It can be done by slightly modifying the standard atomic operations : only
-their UP variant must be kept. It typically means removing LOCK prefix (on
-i386 and x86_64) and any SMP synchronization barrier. If the architecture does
-not have a different behavior between SMP and UP, including asm-generic/local.h
-in your architecture's local.h is sufficient.
-
-The local_t type is defined as an opaque signed long by embedding an
-atomic_long_t inside a structure. This is made so a cast from this type to a
-long fails. The definition looks like :
-
-typedef struct { atomic_long_t a; } local_t;
-
-
-* Rules to follow when using local atomic operations
-
-- Variables touched by local ops must be per cpu variables.
-- _Only_ the CPU owner of these variables must write to them.
-- This CPU can use local ops from any context (process, irq, softirq, nmi, ...)
- to update its local_t variables.
-- Preemption (or interrupts) must be disabled when using local ops in
- process context to make sure the process won't be migrated to a
- different CPU between getting the per-cpu variable and doing the
- actual local op.
-- When using local ops in interrupt context, no special care must be
- taken on a mainline kernel, since they will run on the local CPU with
- preemption already disabled. I suggest, however, to explicitly
- disable preemption anyway to make sure it will still work correctly on
- -rt kernels.
-- Reading the local cpu variable will provide the current copy of the
- variable.
-- Reads of these variables can be done from any CPU, because updates to
- "long", aligned, variables are always atomic. Since no memory
- synchronization is done by the writer CPU, an outdated copy of the
- variable can be read when reading some _other_ cpu's variables.
-
-
-* How to use local atomic operations
-
-#include <linux/percpu.h>
-#include <asm/local.h>
-
-static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
-
-
-* Counting
-
-Counting is done on all the bits of a signed long.
-
-In preemptible context, use get_cpu_var() and put_cpu_var() around local atomic
-operations : it makes sure that preemption is disabled around write access to
-the per cpu variable. For instance :
-
- local_inc(&get_cpu_var(counters));
- put_cpu_var(counters);
-
-If you are already in a preemption-safe context, you can use
-this_cpu_ptr() instead.
-
- local_inc(this_cpu_ptr(&counters));
-
-
-
-* Reading the counters
-
-Those local counters can be read from foreign CPUs to sum the count. Note that
-the data seen by local_read across CPUs must be considered to be out of order
-relatively to other memory writes happening on the CPU that owns the data.
-
- long sum = 0;
- for_each_online_cpu(cpu)
- sum += local_read(&per_cpu(counters, cpu));
-
-If you want to use a remote local_read to synchronize access to a resource
-between CPUs, explicit smp_wmb() and smp_rmb() memory barriers must be used
-respectively on the writer and the reader CPUs. It would be the case if you use
-the local_t variable as a counter of bytes written in a buffer : there should
-be a smp_wmb() between the buffer write and the counter increment and also a
-smp_rmb() between the counter read and the buffer read.
-
-
-Here is a sample module which implements a basic per cpu counter using local.h.
-
---- BEGIN ---
-/* test-local.c
- *
- * Sample module for local.h usage.
- */
-
-
-#include <asm/local.h>
-#include <linux/module.h>
-#include <linux/timer.h>
-
-static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
-
-static struct timer_list test_timer;
-
-/* IPI called on each CPU. */
-static void test_each(void *info)
-{
- /* Increment the counter from a non preemptible context */
- printk("Increment on cpu %d\n", smp_processor_id());
- local_inc(this_cpu_ptr(&counters));
-
- /* This is what incrementing the variable would look like within a
- * preemptible context (it disables preemption) :
- *
- * local_inc(&get_cpu_var(counters));
- * put_cpu_var(counters);
- */
-}
-
-static void do_test_timer(unsigned long data)
-{
- int cpu;
-
- /* Increment the counters */
- on_each_cpu(test_each, NULL, 1);
- /* Read all the counters */
- printk("Counters read from CPU %d\n", smp_processor_id());
- for_each_online_cpu(cpu) {
- printk("Read : CPU %d, count %ld\n", cpu,
- local_read(&per_cpu(counters, cpu)));
- }
- del_timer(&test_timer);
- test_timer.expires = jiffies + 1000;
- add_timer(&test_timer);
-}
-
-static int __init test_init(void)
-{
- /* initialize the timer that will increment the counter */
- init_timer(&test_timer);
- test_timer.function = do_test_timer;
- test_timer.expires = jiffies + 1;
- add_timer(&test_timer);
-
- return 0;
-}
-
-static void __exit test_exit(void)
-{
- del_timer_sync(&test_timer);
-}
-
-module_init(test_init);
-module_exit(test_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mathieu Desnoyers");
-MODULE_DESCRIPTION("Local Atomic Ops");
---- END ---
diff --git a/Documentation/lockup-watchdogs.txt b/Documentation/lockup-watchdogs.txt
index 4a6e33e1af61..c8b8378513d6 100644
--- a/Documentation/lockup-watchdogs.txt
+++ b/Documentation/lockup-watchdogs.txt
@@ -11,7 +11,7 @@ details), without giving other tasks a chance to run. The current
stack trace is displayed upon detection and, by default, the system
will stay locked up. Alternatively, the kernel can be configured to
panic; a sysctl, "kernel.softlockup_panic", a kernel parameter,
-"softlockup_panic" (see "Documentation/kernel-parameters.txt" for
+"softlockup_panic" (see "Documentation/admin-guide/kernel-parameters.rst" for
details), and a compile option, "BOOTPARAM_SOFTLOCKUP_PANIC", are
provided for this.
@@ -23,7 +23,7 @@ upon detection and the system will stay locked up unless the default
behavior is changed, which can be done through a sysctl,
'hardlockup_panic', a compile time knob, "BOOTPARAM_HARDLOCKUP_PANIC",
and a kernel parameter, "nmi_watchdog"
-(see "Documentation/kernel-parameters.txt" for details).
+(see "Documentation/admin-guide/kernel-parameters.rst" for details).
The panic option can be used in combination with panic_timeout (this
timeout is set through the confusingly named "kernel.panic" sysctl),
diff --git a/Documentation/m68k/kernel-options.txt b/Documentation/m68k/kernel-options.txt
index eaf32a1fd0b1..79d21246c75a 100644
--- a/Documentation/m68k/kernel-options.txt
+++ b/Documentation/m68k/kernel-options.txt
@@ -139,7 +139,7 @@ follows:
PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF/PARTNROFF=-2
Authoritative information can be found in
-"Documentation/kernel-parameters.txt".
+"Documentation/admin-guide/kernel-parameters.rst".
2.2) ro, rw
diff --git a/Documentation/magic-number.txt b/Documentation/magic-number.txt
deleted file mode 100644
index 28befed9f610..000000000000
--- a/Documentation/magic-number.txt
+++ /dev/null
@@ -1,158 +0,0 @@
-This file is a registry of magic numbers which are in use. When you
-add a magic number to a structure, you should also add it to this
-file, since it is best if the magic numbers used by various structures
-are unique.
-
-It is a *very* good idea to protect kernel data structures with magic
-numbers. This allows you to check at run time whether (a) a structure
-has been clobbered, or (b) you've passed the wrong structure to a
-routine. This last is especially useful --- particularly when you are
-passing pointers to structures via a void * pointer. The tty code,
-for example, does this frequently to pass driver-specific and line
-discipline-specific structures back and forth.
-
-The way to use magic numbers is to declare then at the beginning of
-the structure, like so:
-
-struct tty_ldisc {
- int magic;
- ...
-};
-
-Please follow this discipline when you are adding future enhancements
-to the kernel! It has saved me countless hours of debugging,
-especially in the screwy cases where an array has been overrun and
-structures following the array have been overwritten. Using this
-discipline, these cases get detected quickly and safely.
-
- Theodore Ts'o
- 31 Mar 94
-
-The magic table is current to Linux 2.1.55.
-
- Michael Chastain
- <mailto:mec@shout.net>
- 22 Sep 1997
-
-Now it should be up to date with Linux 2.1.112. Because
-we are in feature freeze time it is very unlikely that
-something will change before 2.2.x. The entries are
-sorted by number field.
-
- Krzysztof G. Baranowski
- <mailto: kgb@knm.org.pl>
- 29 Jul 1998
-
-Updated the magic table to Linux 2.5.45. Right over the feature freeze,
-but it is possible that some new magic numbers will sneak into the
-kernel before 2.6.x yet.
-
- Petr Baudis
- <pasky@ucw.cz>
- 03 Nov 2002
-
-Updated the magic table to Linux 2.5.74.
-
- Fabian Frederick
- <ffrederick@users.sourceforge.net>
- 09 Jul 2003
-
-
-Magic Name Number Structure File
-===========================================================================
-PG_MAGIC 'P' pg_{read,write}_hdr include/linux/pg.h
-CMAGIC 0x0111 user include/linux/a.out.h
-MKISS_DRIVER_MAGIC 0x04bf mkiss_channel drivers/net/mkiss.h
-HDLC_MAGIC 0x239e n_hdlc drivers/char/n_hdlc.c
-APM_BIOS_MAGIC 0x4101 apm_user arch/x86/kernel/apm_32.c
-CYCLADES_MAGIC 0x4359 cyclades_port include/linux/cyclades.h
-DB_MAGIC 0x4442 fc_info drivers/net/iph5526_novram.c
-DL_MAGIC 0x444d fc_info drivers/net/iph5526_novram.c
-FASYNC_MAGIC 0x4601 fasync_struct include/linux/fs.h
-FF_MAGIC 0x4646 fc_info drivers/net/iph5526_novram.c
-ISICOM_MAGIC 0x4d54 isi_port include/linux/isicom.h
-PTY_MAGIC 0x5001 drivers/char/pty.c
-PPP_MAGIC 0x5002 ppp include/linux/if_pppvar.h
-SERIAL_MAGIC 0x5301 async_struct include/linux/serial.h
-SSTATE_MAGIC 0x5302 serial_state include/linux/serial.h
-SLIP_MAGIC 0x5302 slip drivers/net/slip.h
-STRIP_MAGIC 0x5303 strip drivers/net/strip.c
-X25_ASY_MAGIC 0x5303 x25_asy drivers/net/x25_asy.h
-SIXPACK_MAGIC 0x5304 sixpack drivers/net/hamradio/6pack.h
-AX25_MAGIC 0x5316 ax_disp drivers/net/mkiss.h
-TTY_MAGIC 0x5401 tty_struct include/linux/tty.h
-MGSL_MAGIC 0x5401 mgsl_info drivers/char/synclink.c
-TTY_DRIVER_MAGIC 0x5402 tty_driver include/linux/tty_driver.h
-MGSLPC_MAGIC 0x5402 mgslpc_info drivers/char/pcmcia/synclink_cs.c
-TTY_LDISC_MAGIC 0x5403 tty_ldisc include/linux/tty_ldisc.h
-USB_SERIAL_MAGIC 0x6702 usb_serial drivers/usb/serial/usb-serial.h
-FULL_DUPLEX_MAGIC 0x6969 drivers/net/ethernet/dec/tulip/de2104x.c
-USB_BLUETOOTH_MAGIC 0x6d02 usb_bluetooth drivers/usb/class/bluetty.c
-RFCOMM_TTY_MAGIC 0x6d02 net/bluetooth/rfcomm/tty.c
-USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port drivers/usb/serial/usb-serial.h
-CG_MAGIC 0x00090255 ufs_cylinder_group include/linux/ufs_fs.h
-RPORT_MAGIC 0x00525001 r_port drivers/char/rocket_int.h
-LSEMAGIC 0x05091998 lse drivers/fc4/fc.c
-GDTIOCTL_MAGIC 0x06030f07 gdth_iowr_str drivers/scsi/gdth_ioctl.h
-RIEBL_MAGIC 0x09051990 drivers/net/atarilance.c
-NBD_REQUEST_MAGIC 0x12560953 nbd_request include/linux/nbd.h
-RED_MAGIC2 0x170fc2a5 (any) mm/slab.c
-BAYCOM_MAGIC 0x19730510 baycom_state drivers/net/baycom_epp.c
-ISDN_X25IFACE_MAGIC 0x1e75a2b9 isdn_x25iface_proto_data
- drivers/isdn/isdn_x25iface.h
-ECP_MAGIC 0x21504345 cdkecpsig include/linux/cdk.h
-LSOMAGIC 0x27091997 lso drivers/fc4/fc.c
-LSMAGIC 0x2a3b4d2a ls drivers/fc4/fc.c
-WANPIPE_MAGIC 0x414C4453 sdla_{dump,exec} include/linux/wanpipe.h
-CS_CARD_MAGIC 0x43525553 cs_card sound/oss/cs46xx.c
-LABELCL_MAGIC 0x4857434c labelcl_info_s include/asm/ia64/sn/labelcl.h
-ISDN_ASYNC_MAGIC 0x49344C01 modem_info include/linux/isdn.h
-CTC_ASYNC_MAGIC 0x49344C01 ctc_tty_info drivers/s390/net/ctctty.c
-ISDN_NET_MAGIC 0x49344C02 isdn_net_local_s drivers/isdn/i4l/isdn_net_lib.h
-SAVEKMSG_MAGIC2 0x4B4D5347 savekmsg arch/*/amiga/config.c
-CS_STATE_MAGIC 0x4c4f4749 cs_state sound/oss/cs46xx.c
-SLAB_C_MAGIC 0x4f17a36d kmem_cache mm/slab.c
-COW_MAGIC 0x4f4f4f4d cow_header_v1 arch/um/drivers/ubd_user.c
-I810_CARD_MAGIC 0x5072696E i810_card sound/oss/i810_audio.c
-TRIDENT_CARD_MAGIC 0x5072696E trident_card sound/oss/trident.c
-ROUTER_MAGIC 0x524d4157 wan_device [in wanrouter.h pre 3.9]
-SAVEKMSG_MAGIC1 0x53415645 savekmsg arch/*/amiga/config.c
-GDA_MAGIC 0x58464552 gda arch/mips/include/asm/sn/gda.h
-RED_MAGIC1 0x5a2cf071 (any) mm/slab.c
-EEPROM_MAGIC_VALUE 0x5ab478d2 lanai_dev drivers/atm/lanai.c
-HDLCDRV_MAGIC 0x5ac6e778 hdlcdrv_state include/linux/hdlcdrv.h
-PCXX_MAGIC 0x5c6df104 channel drivers/char/pcxx.h
-KV_MAGIC 0x5f4b565f kernel_vars_s arch/mips/include/asm/sn/klkernvars.h
-I810_STATE_MAGIC 0x63657373 i810_state sound/oss/i810_audio.c
-TRIDENT_STATE_MAGIC 0x63657373 trient_state sound/oss/trident.c
-M3_CARD_MAGIC 0x646e6f50 m3_card sound/oss/maestro3.c
-FW_HEADER_MAGIC 0x65726F66 fw_header drivers/atm/fore200e.h
-SLOT_MAGIC 0x67267321 slot drivers/hotplug/cpqphp.h
-SLOT_MAGIC 0x67267322 slot drivers/hotplug/acpiphp.h
-LO_MAGIC 0x68797548 nbd_device include/linux/nbd.h
-OPROFILE_MAGIC 0x6f70726f super_block drivers/oprofile/oprofilefs.h
-M3_STATE_MAGIC 0x734d724d m3_state sound/oss/maestro3.c
-VMALLOC_MAGIC 0x87654320 snd_alloc_track sound/core/memory.c
-KMALLOC_MAGIC 0x87654321 snd_alloc_track sound/core/memory.c
-PWC_MAGIC 0x89DC10AB pwc_device drivers/usb/media/pwc.h
-NBD_REPLY_MAGIC 0x96744668 nbd_reply include/linux/nbd.h
-ENI155_MAGIC 0xa54b872d midway_eprom drivers/atm/eni.h
-CODA_MAGIC 0xC0DAC0DA coda_file_info fs/coda/coda_fs_i.h
-DPMEM_MAGIC 0xc0ffee11 gdt_pci_sram drivers/scsi/gdth.h
-YAM_MAGIC 0xF10A7654 yam_port drivers/net/hamradio/yam.c
-CCB_MAGIC 0xf2691ad2 ccb drivers/scsi/ncr53c8xx.c
-QUEUE_MAGIC_FREE 0xf7e1c9a3 queue_entry drivers/scsi/arm/queue.c
-QUEUE_MAGIC_USED 0xf7e1cc33 queue_entry drivers/scsi/arm/queue.c
-HTB_CMAGIC 0xFEFAFEF1 htb_class net/sched/sch_htb.c
-NMI_MAGIC 0x48414d4d455201 nmi_s arch/mips/include/asm/sn/nmi.h
-
-Note that there are also defined special per-driver magic numbers in sound
-memory management. See include/sound/sndmagic.h for complete list of them. Many
-OSS sound drivers have their magic numbers constructed from the soundcard PCI
-ID - these are not listed here as well.
-
-IrDA subsystem also uses large number of own magic numbers, see
-include/net/irda/irda.h for a complete list of them.
-
-HFS is another larger user of magic numbers - you can find them in
-fs/hfs/hfs.h.
diff --git a/Documentation/media/.gitignore b/Documentation/media/.gitignore
new file mode 100644
index 000000000000..08b21de3ef94
--- /dev/null
+++ b/Documentation/media/.gitignore
@@ -0,0 +1,3 @@
+*.pdf
+# Files generated from *.dot
+uapi/v4l/pipeline.svg
diff --git a/Documentation/media/Makefile b/Documentation/media/Makefile
index a7fb35291f6c..4d8e2ff378c4 100644
--- a/Documentation/media/Makefile
+++ b/Documentation/media/Makefile
@@ -1,23 +1,60 @@
-# Generate the *.h.rst files from uAPI headers
+# Rules to convert DOT and SVG to Sphinx images
+
+SRC_DIR=$(srctree)/Documentation/media
+
+DOTS = \
+ uapi/v4l/pipeline.dot \
+
+IMAGES = \
+ typical_media_device.svg \
+ uapi/dvb/dvbstb.svg \
+ uapi/v4l/bayer.svg \
+ uapi/v4l/constraints.svg \
+ uapi/v4l/crop.svg \
+ uapi/v4l/fieldseq_bt.svg \
+ uapi/v4l/fieldseq_tb.svg \
+ uapi/v4l/nv12mt.svg \
+ uapi/v4l/nv12mt_example.svg \
+ uapi/v4l/pipeline.svg \
+ uapi/v4l/selection.svg \
+ uapi/v4l/subdev-image-processing-full.svg \
+ uapi/v4l/subdev-image-processing-scaling-multi-source.svg \
+ uapi/v4l/subdev-image-processing-crop.svg \
+ uapi/v4l/vbi_525.svg \
+ uapi/v4l/vbi_625.svg \
+ uapi/v4l/vbi_hsync.svg \
+
+DOTTGT := $(patsubst %.dot,%.svg,$(DOTS))
+IMGDOT := $(patsubst %,$(SRC_DIR)/%,$(DOTTGT))
+
+IMGTGT := $(patsubst %.svg,%.pdf,$(IMAGES))
+IMGPDF := $(patsubst %,$(SRC_DIR)/%,$(IMGTGT))
+
+cmd = $(echo-cmd) $(cmd_$(1))
+
+quiet_cmd_genpdf = GENPDF $2
+ cmd_genpdf = convert $2 $3
+
+quiet_cmd_gendot = DOT $2
+ cmd_gendot = dot -Tsvg $2 > $3
+
+%.pdf: %.svg
+ @$(call cmd,genpdf,$<,$@)
+
+%.svg: %.dot
+ @$(call cmd,gendot,$<,$@)
+
+# Rules to convert a .h file to inline RST documentation
PARSER = $(srctree)/Documentation/sphinx/parse-headers.pl
UAPI = $(srctree)/include/uapi/linux
KAPI = $(srctree)/include/linux
-SRC_DIR=$(srctree)/Documentation/media
FILES = audio.h.rst ca.h.rst dmx.h.rst frontend.h.rst net.h.rst video.h.rst \
videodev2.h.rst media.h.rst cec.h.rst lirc.h.rst
TARGETS := $(addprefix $(BUILDDIR)/, $(FILES))
-.PHONY: all
-all: $(BUILDDIR) ${TARGETS}
-
-$(BUILDDIR):
- $(Q)mkdir -p $@
-
-# Rule to convert a .h file to inline RST documentation
-
gen_rst = \
echo ${PARSER} $< $@ $(SRC_DIR)/$(notdir $@).exceptions; \
${PARSER} $< $@ $(SRC_DIR)/$(notdir $@).exceptions
@@ -57,5 +94,18 @@ $(BUILDDIR)/cec.h.rst: ${KAPI}/cec.h ${PARSER} $(SRC_DIR)/cec.h.rst.exceptions
$(BUILDDIR)/lirc.h.rst: ${UAPI}/lirc.h ${PARSER} $(SRC_DIR)/lirc.h.rst.exceptions
@$($(quiet)gen_rst)
-cleandocs:
- -rm ${TARGETS}
+# Media build rules
+
+.PHONY: all html epub xml latex
+
+all: $(IMGDOT) $(BUILDDIR) ${TARGETS}
+html: all
+epub: all
+xml: all
+latex: $(IMGPDF) all
+
+clean:
+ -rm -f $(DOTTGT) $(IMGTGT) ${TARGETS} 2>/dev/null
+
+$(BUILDDIR):
+ $(Q)mkdir -p $@
diff --git a/Documentation/media/dvb-drivers/intro.rst b/Documentation/media/dvb-drivers/intro.rst
index 7681835ea76d..d6eeb2708b9b 100644
--- a/Documentation/media/dvb-drivers/intro.rst
+++ b/Documentation/media/dvb-drivers/intro.rst
@@ -1,5 +1,5 @@
-Introdution
-===========
+Introduction
+============
The main development site and GIT repository for these
drivers is https://linuxtv.org.
diff --git a/Documentation/media/index.rst b/Documentation/media/index.rst
index e347a3e7bdef..7f8f0af620ce 100644
--- a/Documentation/media/index.rst
+++ b/Documentation/media/index.rst
@@ -1,11 +1,6 @@
Linux Media Subsystem Documentation
===================================
-.. Sphinx 1.4.x has a definition for DUrole that doesn't work on alltt blocks
-.. raw:: latex
-
- \renewcommand*{\DUrole}[2]{ #2 }
-
Contents:
.. toctree::
diff --git a/Documentation/media/intro.rst b/Documentation/media/intro.rst
index f6086c159772..8f7490c9a8ef 100644
--- a/Documentation/media/intro.rst
+++ b/Documentation/media/intro.rst
@@ -13,8 +13,8 @@ A typical media device hardware is shown at :ref:`typical_media_device`.
.. _typical_media_device:
-.. figure:: media_api_files/typical_media_device.*
- :alt: typical_media_device.svg
+.. figure:: typical_media_device.*
+ :alt: typical_media_device.pdf / typical_media_device.svg
:align: center
Typical Media Device
diff --git a/Documentation/media/media_api_files/typical_media_device.pdf b/Documentation/media/media_api_files/typical_media_device.pdf
deleted file mode 100644
index d000d802b20f..000000000000
--- a/Documentation/media/media_api_files/typical_media_device.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/media_api_files/typical_media_device.svg b/Documentation/media/typical_media_device.svg
index f0c82f72c4b6..f0c82f72c4b6 100644
--- a/Documentation/media/media_api_files/typical_media_device.svg
+++ b/Documentation/media/typical_media_device.svg
diff --git a/Documentation/media/uapi/dvb/dvbstb.svg b/Documentation/media/uapi/dvb/dvbstb.svg
new file mode 100644
index 000000000000..c4140fb518af
--- /dev/null
+++ b/Documentation/media/uapi/dvb/dvbstb.svg
@@ -0,0 +1,651 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ version="1.2"
+ width="237.70221mm"
+ height="126.28221mm"
+ viewBox="0 0 23770.221 12628.221"
+ preserveAspectRatio="xMidYMid"
+ xml:space="preserve"
+ id="svg2"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="dvbstb.svg"
+ style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
+ id="metadata519"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview517"
+ showgrid="false"
+ inkscape:zoom="1.0818519"
+ inkscape:cx="411.31718"
+ inkscape:cy="274.87517"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg2"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0" /><defs
+ class="ClipPathGroup"
+ id="defs4" /><defs
+ id="defs9" /><defs
+ id="defs90" /><defs
+ id="defs113" /><defs
+ class="TextShapeIndex"
+ id="defs124" /><defs
+ class="EmbeddedBulletChars"
+ id="defs128" /><defs
+ class="TextEmbeddedBitmaps"
+ id="defs157" /><rect
+ class="BoundingBox"
+ x="5355.1108"
+ y="13.111"
+ width="18403"
+ height="9603"
+ id="rect197"
+ style="fill:none;stroke:none" /><path
+ d="m 14556.111,9614.111 -9200,0 0,-9600 18400,0 0,9600 -9200,0 z"
+ id="path199"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 14556.111,9614.111 -9200,0 0,-9600 18400,0 0,9600 -9200,0 z"
+ id="path201"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><rect
+ class="BoundingBox"
+ x="13.111"
+ y="4013.1111"
+ width="4544"
+ height="2403"
+ id="rect206"
+ style="fill:none;stroke:none" /><path
+ d="m 2285.111,6414.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path208"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 2285.111,6414.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path210"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><text
+ class="TextShape"
+ id="text212"
+ y="-4585.8892"
+ x="-2443.8889"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan214"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="1281.111"
+ y="5435.1108"
+ id="tspan216"><tspan
+ id="tspan218"
+ style="fill:#000000;stroke:none">Antena</tspan></tspan></tspan></text>
+<rect
+ class="BoundingBox"
+ x="6213.1108"
+ y="1813.111"
+ width="4544"
+ height="2403"
+ id="rect223"
+ style="fill:none;stroke:none" /><path
+ d="m 8485.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path225"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 8485.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path227"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><text
+ class="TextShape"
+ id="text229"
+ x="-2443.8889"
+ y="-4585.8892"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan231"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="7217.1108"
+ y="3235.1111"
+ id="tspan233"><tspan
+ id="tspan235"
+ style="fill:#000000;stroke:none">Frontend</tspan></tspan></tspan></text>
+<rect
+ class="BoundingBox"
+ x="12113.111"
+ y="1813.111"
+ width="4544"
+ height="2403"
+ id="rect240"
+ style="fill:none;stroke:none" /><path
+ d="m 14385.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path242"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 14385.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path244"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><text
+ class="TextShape"
+ id="text246"
+ x="-2443.8889"
+ y="-4585.8892"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan248"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="13944.111"
+ y="3235.1111"
+ id="tspan250"><tspan
+ id="tspan252"
+ style="fill:#000000;stroke:none">CA</tspan></tspan></tspan></text>
+<rect
+ class="BoundingBox"
+ x="18113.111"
+ y="1813.111"
+ width="4544"
+ height="2403"
+ id="rect257"
+ style="fill:none;stroke:none" /><path
+ d="m 20385.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path259"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 20385.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path261"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><text
+ class="TextShape"
+ id="text263"
+ x="-2443.8889"
+ y="-4585.8892"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan265"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="19384.111"
+ y="3235.1111"
+ id="tspan267"><tspan
+ id="tspan269"
+ style="fill:#000000;stroke:none">Demux</tspan></tspan></tspan></text>
+<rect
+ class="BoundingBox"
+ x="6113.1108"
+ y="5813.1108"
+ width="4544"
+ height="2403"
+ id="rect274"
+ style="fill:none;stroke:none" /><path
+ d="m 8385.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path276"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 8385.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path278"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><text
+ class="TextShape"
+ id="text280"
+ x="-2443.8889"
+ y="-4585.8892"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan282"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="7733.1108"
+ y="7235.1108"
+ id="tspan284"><tspan
+ id="tspan286"
+ style="fill:#000000;stroke:none">SEC</tspan></tspan></tspan></text>
+<rect
+ class="BoundingBox"
+ x="12213.111"
+ y="5813.1108"
+ width="4544"
+ height="2403"
+ id="rect291"
+ style="fill:none;stroke:none" /><path
+ d="m 14485.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path293"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 14485.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path295"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><text
+ class="TextShape"
+ id="text297"
+ x="-2443.8889"
+ y="-4585.8892"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan299"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="13676.111"
+ y="7235.1108"
+ id="tspan301"><tspan
+ id="tspan303"
+ style="fill:#000000;stroke:none">Audio</tspan></tspan></tspan></text>
+<rect
+ class="BoundingBox"
+ x="18113.111"
+ y="5813.1108"
+ width="4544"
+ height="2403"
+ id="rect308"
+ style="fill:none;stroke:none" /><path
+ d="m 20385.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path310"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 20385.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path312"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><text
+ class="TextShape"
+ id="text314"
+ x="-2443.8889"
+ y="-4585.8892"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan316"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="19583.111"
+ y="7235.1108"
+ id="tspan318"><tspan
+ id="tspan320"
+ style="fill:#000000;stroke:none">Video</tspan></tspan></tspan></text>
+<rect
+ class="BoundingBox"
+ x="15213.111"
+ y="10213.111"
+ width="4544"
+ height="2403"
+ id="rect325"
+ style="fill:none;stroke:none" /><path
+ d="m 17485.111,12614.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path327"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 17485.111,12614.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
+ id="path329"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><text
+ class="TextShape"
+ id="text331"
+ x="-2443.8889"
+ y="-4585.8892"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan333"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="17076.111"
+ y="11635.111"
+ id="tspan335"><tspan
+ id="tspan337"
+ style="fill:#000000;stroke:none">TV</tspan></tspan></tspan></text>
+<rect
+ class="BoundingBox"
+ x="4555.1108"
+ y="3014.1111"
+ width="1661"
+ height="2202"
+ id="rect342"
+ style="fill:none;stroke:none" /><path
+ d="m 4556.111,5214.111 1400,-1857"
+ id="path344"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 6215.111,3014.111 -391,269 240,181 151,-450 z"
+ id="path346"
+ inkscape:connector-curvature="0"
+ style="fill:#000000;stroke:none" /><rect
+ class="BoundingBox"
+ x="4555.1108"
+ y="5213.1108"
+ width="1561"
+ height="1802"
+ id="rect351"
+ style="fill:none;stroke:none" /><path
+ d="m 4556.111,5214.111 1277,1475"
+ id="path353"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 6115.111,7014.111 -181,-438 -227,196 408,242 z"
+ id="path355"
+ inkscape:connector-curvature="0"
+ style="fill:#000000;stroke:none" /><rect
+ class="BoundingBox"
+ x="10755.111"
+ y="2864.1111"
+ width="1361"
+ height="301"
+ id="rect360"
+ style="fill:none;stroke:none" /><path
+ d="m 10756.111,3014.111 929,0"
+ id="path362"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 12115.111,3014.111 -450,-150 0,300 450,-150 z"
+ id="path364"
+ inkscape:connector-curvature="0"
+ style="fill:#000000;stroke:none" /><rect
+ class="BoundingBox"
+ x="16655.111"
+ y="2864.1111"
+ width="1461"
+ height="301"
+ id="rect369"
+ style="fill:none;stroke:none" /><path
+ d="m 16656.111,3014.111 1029,0"
+ id="path371"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18115.111,3014.111 -450,-150 0,300 450,-150 z"
+ id="path373"
+ inkscape:connector-curvature="0"
+ style="fill:#000000;stroke:none" /><rect
+ class="BoundingBox"
+ x="20235.111"
+ y="4213.1108"
+ width="301"
+ height="1602"
+ id="rect378"
+ style="fill:none;stroke:none" /><path
+ d="m 20385.111,4214.111 0,1170"
+ id="path380"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 20385.111,5814.111 150,-450 -300,0 150,450 z"
+ id="path382"
+ inkscape:connector-curvature="0"
+ style="fill:#000000;stroke:none" /><rect
+ class="BoundingBox"
+ x="17485.111"
+ y="8213.1113"
+ width="2902"
+ height="2002"
+ id="rect387"
+ style="fill:none;stroke:none" /><path
+ d="m 20385.111,8214.111 -2546,1756"
+ id="path389"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17485.111,10214.111 456,-132 -171,-247 -285,379 z"
+ id="path391"
+ inkscape:connector-curvature="0"
+ style="fill:#000000;stroke:none" /><rect
+ class="BoundingBox"
+ x="14484.111"
+ y="8213.1113"
+ width="3002"
+ height="2002"
+ id="rect396"
+ style="fill:none;stroke:none" /><path
+ d="m 14485.111,8214.111 2642,1761"
+ id="path398"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17485.111,10214.111 -291,-374 -167,249 458,125 z"
+ id="path400"
+ inkscape:connector-curvature="0"
+ style="fill:#000000;stroke:none" /><rect
+ class="BoundingBox"
+ x="14485.111"
+ y="4213.1108"
+ width="5902"
+ height="1629"
+ id="rect405"
+ style="fill:none;stroke:none" /><path
+ d="m 20385.111,4214.111 -51,14"
+ id="path407"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 20283.111,4242.111 -52,14"
+ id="path409"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 20180.111,4270.111 -51,13"
+ id="path411"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 20078.111,4297.111 -52,14"
+ id="path413"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19975.111,4325.111 -51,14"
+ id="path415"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19873.111,4353.111 -52,14"
+ id="path417"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19770.111,4381.111 -51,14"
+ id="path419"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19668.111,4409.111 -52,13"
+ id="path421"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19565.111,4436.111 -51,14"
+ id="path423"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19463.111,4464.111 -52,14"
+ id="path425"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19360.111,4492.111 -51,14"
+ id="path427"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19258.111,4520.111 -52,14"
+ id="path429"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19155.111,4547.111 -51,14"
+ id="path431"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 19053.111,4575.111 -52,14"
+ id="path433"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18950.111,4603.111 -51,14"
+ id="path435"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18848.111,4631.111 -51,14"
+ id="path437"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18745.111,4659.111 -51,14"
+ id="path439"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18643.111,4686.111 -51,14"
+ id="path441"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18540.111,4714.111 -51,14"
+ id="path443"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18438.111,4742.111 -51,14"
+ id="path445"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18335.111,4770.111 -51,14"
+ id="path447"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18233.111,4798.111 -51,14"
+ id="path449"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18130.111,4825.111 -51,14"
+ id="path451"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 18028.111,4853.111 -51,14"
+ id="path453"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17925.111,4881.111 -51,14"
+ id="path455"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17823.111,4909.111 -51,14"
+ id="path457"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17720.111,4937.111 -51,13"
+ id="path459"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17618.111,4964.111 -51,14"
+ id="path461"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17516.111,4992.111 -52,14"
+ id="path463"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17413.111,5020.111 -51,14"
+ id="path465"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17311.111,5048.111 -52,14"
+ id="path467"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17208.111,5076.111 -51,13"
+ id="path469"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17106.111,5103.111 -52,14"
+ id="path471"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 17003.111,5131.111 -51,14"
+ id="path473"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 16901.111,5159.111 -52,14"
+ id="path475"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 16798.111,5187.111 -51,14"
+ id="path477"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 16696.111,5214.111 -52,14"
+ id="path479"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 16593.111,5242.111 -51,14"
+ id="path481"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 16491.111,5270.111 -52,14"
+ id="path483"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 16388.111,5298.111 -51,14"
+ id="path485"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 16286.111,5326.111 -52,14"
+ id="path487"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 16183.111,5353.111 -51,14"
+ id="path489"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 16081.111,5381.111 -51,14"
+ id="path491"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15978.111,5409.111 -51,14"
+ id="path493"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15876.111,5437.111 -51,14"
+ id="path495"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15773.111,5465.111 -51,14"
+ id="path497"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15671.111,5492.111 -51,14"
+ id="path499"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15568.111,5520.111 -51,14"
+ id="path501"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15466.111,5548.111 -51,14"
+ id="path503"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15363.111,5576.111 -51,14"
+ id="path505"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15261.111,5604.111 -51,13"
+ id="path507"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15158.111,5631.111 -51,14"
+ id="path509"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 15056.111,5659.111 -51,14"
+ id="path511"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 14953.111,5687.111 -51,14"
+ id="path513"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000" /><path
+ d="m 14485.111,5814.111 474,27 -79,-290 -395,263 z"
+ id="path515"
+ inkscape:connector-curvature="0"
+ style="fill:#000000;stroke:none" /></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/dvb/intro.rst b/Documentation/media/uapi/dvb/intro.rst
index b61081d00a9f..2ed5c23102b4 100644
--- a/Documentation/media/uapi/dvb/intro.rst
+++ b/Documentation/media/uapi/dvb/intro.rst
@@ -55,8 +55,8 @@ Overview
.. _stb_components:
-.. figure:: intro_files/dvbstb.*
- :alt: dvbstb.pdf / dvbstb.png
+.. figure:: dvbstb.*
+ :alt: dvbstb.pdf / dvbstb.svg
:align: center
Components of a DVB card/STB
diff --git a/Documentation/media/uapi/dvb/intro_files/dvbstb.pdf b/Documentation/media/uapi/dvb/intro_files/dvbstb.pdf
deleted file mode 100644
index 0fa75d90c3eb..000000000000
--- a/Documentation/media/uapi/dvb/intro_files/dvbstb.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/dvb/intro_files/dvbstb.png b/Documentation/media/uapi/dvb/intro_files/dvbstb.png
deleted file mode 100644
index 9b8f372e7afd..000000000000
--- a/Documentation/media/uapi/dvb/intro_files/dvbstb.png
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/bayer.svg b/Documentation/media/uapi/v4l/bayer.svg
new file mode 100644
index 000000000000..fbd4cfb5e6bf
--- /dev/null
+++ b/Documentation/media/uapi/v4l/bayer.svg
@@ -0,0 +1,984 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ version="1.2"
+ width="164.15334mm"
+ height="46.771107mm"
+ viewBox="0 0 16415.333 4677.1107"
+ preserveAspectRatio="xMidYMid"
+ xml:space="preserve"
+ id="svg2"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="bayer.svg"
+ style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
+ id="metadata652"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview650"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:zoom="2.4000866"
+ inkscape:cx="290.82284"
+ inkscape:cy="82.862197"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg2" /><defs
+ class="ClipPathGroup"
+ id="defs4" /><defs
+ id="defs9" /><defs
+ id="defs82" /><defs
+ id="defs105" /><defs
+ class="TextShapeIndex"
+ id="defs116" /><defs
+ class="EmbeddedBulletChars"
+ id="defs120" /><defs
+ class="TextEmbeddedBitmaps"
+ id="defs149" /><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g186"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id6"><rect
+ class="BoundingBox"
+ x="3299"
+ y="3199"
+ width="1303"
+ height="1203"
+ id="rect189"
+ style="fill:none;stroke:none" /><path
+ d="m 3950,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path191"
+ inkscape:connector-curvature="0"
+ style="fill:#0000ff;stroke:none" /><path
+ d="m 3950,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path193"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text195"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan197"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="3739"
+ y="4021"
+ id="tspan199"><tspan
+ id="tspan201"
+ style="fill:#ffffff;stroke:none">B</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g203"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id7"><rect
+ class="BoundingBox"
+ x="4599"
+ y="3199"
+ width="1303"
+ height="1203"
+ id="rect206"
+ style="fill:none;stroke:none" /><path
+ d="m 5250,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path208"
+ inkscape:connector-curvature="0"
+ style="fill:#00cc00;stroke:none" /><path
+ d="m 5250,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path210"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text212"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan214"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="5003"
+ y="4021"
+ id="tspan216"><tspan
+ id="tspan218"
+ style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g220"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id8"><rect
+ class="BoundingBox"
+ x="3299"
+ y="4399"
+ width="1303"
+ height="1203"
+ id="rect223"
+ style="fill:none;stroke:none" /><path
+ d="m 3950,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path225"
+ inkscape:connector-curvature="0"
+ style="fill:#00cc00;stroke:none" /><path
+ d="m 3950,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path227"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text229"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan231"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="3703"
+ y="5221"
+ id="tspan233"><tspan
+ id="tspan235"
+ style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g237"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id9"><rect
+ class="BoundingBox"
+ x="4599"
+ y="4399"
+ width="1303"
+ height="1203"
+ id="rect240"
+ style="fill:none;stroke:none" /><path
+ d="m 5250,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path242"
+ inkscape:connector-curvature="0"
+ style="fill:#ff0000;stroke:none" /><path
+ d="m 5250,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path244"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text246"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan248"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="5022"
+ y="5221"
+ id="tspan250"><tspan
+ id="tspan252"
+ style="fill:#ffffff;stroke:none">R</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g254"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id10"><rect
+ class="BoundingBox"
+ x="5999"
+ y="3299"
+ width="1003"
+ height="1003"
+ id="rect257"
+ style="fill:none;stroke:none" /><path
+ d="m 6500,4300 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path259"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#0000ff" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g261"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id11"><rect
+ class="BoundingBox"
+ x="4699"
+ y="5699"
+ width="1003"
+ height="1003"
+ id="rect264"
+ style="fill:none;stroke:none" /><path
+ d="m 5200,6700 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path266"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g><g
+ class="com.sun.star.drawing.TextShape"
+ id="g268"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id12"><rect
+ class="BoundingBox"
+ x="4000"
+ y="6900"
+ width="2374"
+ height="963"
+ id="rect271"
+ style="fill:none;stroke:none" /><text
+ class="TextShape"
+ id="text273"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan275"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="4250"
+ y="7601"
+ id="tspan277"><tspan
+ id="tspan279"
+ style="fill:#000000;stroke:none">BGGR</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g281"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id13"><rect
+ class="BoundingBox"
+ x="8799"
+ y="3199"
+ width="1303"
+ height="1203"
+ id="rect284"
+ style="fill:none;stroke:none" /><path
+ d="m 9450,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path286"
+ inkscape:connector-curvature="0"
+ style="fill:#0000ff;stroke:none" /><path
+ d="m 9450,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path288"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text290"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan292"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="9239"
+ y="4021"
+ id="tspan294"><tspan
+ id="tspan296"
+ style="fill:#ffffff;stroke:none">B</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g298"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id14"><rect
+ class="BoundingBox"
+ x="7499"
+ y="3199"
+ width="1303"
+ height="1203"
+ id="rect301"
+ style="fill:none;stroke:none" /><path
+ d="m 8150,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path303"
+ inkscape:connector-curvature="0"
+ style="fill:#00cc00;stroke:none" /><path
+ d="m 8150,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path305"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text307"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan309"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="7903"
+ y="4021"
+ id="tspan311"><tspan
+ id="tspan313"
+ style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g315"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id15"><rect
+ class="BoundingBox"
+ x="8799"
+ y="4399"
+ width="1303"
+ height="1203"
+ id="rect318"
+ style="fill:none;stroke:none" /><path
+ d="m 9450,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path320"
+ inkscape:connector-curvature="0"
+ style="fill:#00cc00;stroke:none" /><path
+ d="m 9450,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path322"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text324"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan326"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="9203"
+ y="5221"
+ id="tspan328"><tspan
+ id="tspan330"
+ style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g332"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id16"><rect
+ class="BoundingBox"
+ x="7499"
+ y="4399"
+ width="1303"
+ height="1203"
+ id="rect335"
+ style="fill:none;stroke:none" /><path
+ d="m 8150,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path337"
+ inkscape:connector-curvature="0"
+ style="fill:#ff0000;stroke:none" /><path
+ d="m 8150,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path339"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text341"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan343"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="7922"
+ y="5221"
+ id="tspan345"><tspan
+ id="tspan347"
+ style="fill:#ffffff;stroke:none">R</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.TextShape"
+ id="g349"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id17"><rect
+ class="BoundingBox"
+ x="8200"
+ y="6900"
+ width="2374"
+ height="963"
+ id="rect352"
+ style="fill:none;stroke:none" /><text
+ class="TextShape"
+ id="text354"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan356"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="8450"
+ y="7601"
+ id="tspan358"><tspan
+ id="tspan360"
+ style="fill:#000000;stroke:none">GBRG</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g362"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id18"><rect
+ class="BoundingBox"
+ x="17299"
+ y="4399"
+ width="1303"
+ height="1203"
+ id="rect365"
+ style="fill:none;stroke:none" /><path
+ d="m 17950,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path367"
+ inkscape:connector-curvature="0"
+ style="fill:#0000ff;stroke:none" /><path
+ d="m 17950,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path369"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text371"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan373"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="17739"
+ y="5221"
+ id="tspan375"><tspan
+ id="tspan377"
+ style="fill:#ffffff;stroke:none">B</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g379"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id19"><rect
+ class="BoundingBox"
+ x="17299"
+ y="3199"
+ width="1303"
+ height="1203"
+ id="rect382"
+ style="fill:none;stroke:none" /><path
+ d="m 17950,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path384"
+ inkscape:connector-curvature="0"
+ style="fill:#00cc00;stroke:none" /><path
+ d="m 17950,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path386"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text388"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan390"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="17703"
+ y="4021"
+ id="tspan392"><tspan
+ id="tspan394"
+ style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g396"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id20"><rect
+ class="BoundingBox"
+ x="15999"
+ y="4399"
+ width="1303"
+ height="1203"
+ id="rect399"
+ style="fill:none;stroke:none" /><path
+ d="m 16650,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path401"
+ inkscape:connector-curvature="0"
+ style="fill:#00cc00;stroke:none" /><path
+ d="m 16650,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path403"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text405"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan407"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="16403"
+ y="5221"
+ id="tspan409"><tspan
+ id="tspan411"
+ style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g413"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id21"><rect
+ class="BoundingBox"
+ x="15999"
+ y="3199"
+ width="1303"
+ height="1203"
+ id="rect416"
+ style="fill:none;stroke:none" /><path
+ d="m 16650,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path418"
+ inkscape:connector-curvature="0"
+ style="fill:#ff0000;stroke:none" /><path
+ d="m 16650,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path420"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text422"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan424"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="16422"
+ y="4021"
+ id="tspan426"><tspan
+ id="tspan428"
+ style="fill:#ffffff;stroke:none">R</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.TextShape"
+ id="g430"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id22"><rect
+ class="BoundingBox"
+ x="16700"
+ y="6900"
+ width="2374"
+ height="963"
+ id="rect433"
+ style="fill:none;stroke:none" /><text
+ class="TextShape"
+ id="text435"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan437"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="16950"
+ y="7601"
+ id="tspan439"><tspan
+ id="tspan441"
+ style="fill:#000000;stroke:none">RGGB</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g443"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id23"><rect
+ class="BoundingBox"
+ x="11699"
+ y="4399"
+ width="1303"
+ height="1203"
+ id="rect446"
+ style="fill:none;stroke:none" /><path
+ d="m 12350,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path448"
+ inkscape:connector-curvature="0"
+ style="fill:#0000ff;stroke:none" /><path
+ d="m 12350,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path450"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text452"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan454"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="12139"
+ y="5221"
+ id="tspan456"><tspan
+ id="tspan458"
+ style="fill:#ffffff;stroke:none">B</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g460"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id24"><rect
+ class="BoundingBox"
+ x="11699"
+ y="3199"
+ width="1303"
+ height="1203"
+ id="rect463"
+ style="fill:none;stroke:none" /><path
+ d="m 12350,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path465"
+ inkscape:connector-curvature="0"
+ style="fill:#00cc00;stroke:none" /><path
+ d="m 12350,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path467"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text469"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan471"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="12103"
+ y="4021"
+ id="tspan473"><tspan
+ id="tspan475"
+ style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g477"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id25"><rect
+ class="BoundingBox"
+ x="12999"
+ y="4399"
+ width="1303"
+ height="1203"
+ id="rect480"
+ style="fill:none;stroke:none" /><path
+ d="m 13650,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path482"
+ inkscape:connector-curvature="0"
+ style="fill:#00cc00;stroke:none" /><path
+ d="m 13650,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path484"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text486"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan488"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="13403"
+ y="5221"
+ id="tspan490"><tspan
+ id="tspan492"
+ style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g494"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id26"><rect
+ class="BoundingBox"
+ x="12999"
+ y="3199"
+ width="1303"
+ height="1203"
+ id="rect497"
+ style="fill:none;stroke:none" /><path
+ d="m 13650,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path499"
+ inkscape:connector-curvature="0"
+ style="fill:#ff0000;stroke:none" /><path
+ d="m 13650,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
+ id="path501"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text503"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan505"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="13422"
+ y="4021"
+ id="tspan507"><tspan
+ id="tspan509"
+ style="fill:#ffffff;stroke:none">R</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.TextShape"
+ id="g511"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id27"><rect
+ class="BoundingBox"
+ x="12400"
+ y="6900"
+ width="2374"
+ height="963"
+ id="rect514"
+ style="fill:none;stroke:none" /><text
+ class="TextShape"
+ id="text516"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan518"
+ style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="12650"
+ y="7601"
+ id="tspan520"><tspan
+ id="tspan522"
+ style="fill:#000000;stroke:none">GRBG</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g524"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id28"><rect
+ class="BoundingBox"
+ x="5999"
+ y="5699"
+ width="1003"
+ height="1003"
+ id="rect527"
+ style="fill:none;stroke:none" /><path
+ d="m 6500,6700 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path529"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#0000ff" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g531"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id29"><rect
+ class="BoundingBox"
+ x="3399"
+ y="5699"
+ width="1003"
+ height="1003"
+ id="rect534"
+ style="fill:none;stroke:none" /><path
+ d="m 3900,6700 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path536"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#0000ff" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g538"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id30"><rect
+ class="BoundingBox"
+ x="5999"
+ y="4499"
+ width="1003"
+ height="1003"
+ id="rect541"
+ style="fill:none;stroke:none" /><path
+ d="m 6500,5500 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path543"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g545"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id31"><rect
+ class="BoundingBox"
+ x="7599"
+ y="5799"
+ width="1003"
+ height="1003"
+ id="rect548"
+ style="fill:none;stroke:none" /><path
+ d="m 8100,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path550"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g552"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id32"><rect
+ class="BoundingBox"
+ x="10199"
+ y="5799"
+ width="1003"
+ height="1003"
+ id="rect555"
+ style="fill:none;stroke:none" /><path
+ d="m 10700,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path557"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g559"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id33"><rect
+ class="BoundingBox"
+ x="8899"
+ y="5799"
+ width="1003"
+ height="1003"
+ id="rect562"
+ style="fill:none;stroke:none" /><path
+ d="m 9400,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path564"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#0000ff" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g566"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id34"><rect
+ class="BoundingBox"
+ x="10199"
+ y="4499"
+ width="1003"
+ height="1003"
+ id="rect569"
+ style="fill:none;stroke:none" /><path
+ d="m 10700,5500 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path571"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g573"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id35"><rect
+ class="BoundingBox"
+ x="10199"
+ y="3299"
+ width="1003"
+ height="1003"
+ id="rect576"
+ style="fill:none;stroke:none" /><path
+ d="m 10700,4300 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path578"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g580"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id36"><rect
+ class="BoundingBox"
+ x="14399"
+ y="3299"
+ width="1003"
+ height="1003"
+ id="rect583"
+ style="fill:none;stroke:none" /><path
+ d="m 14900,4300 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path585"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g587"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id37"><rect
+ class="BoundingBox"
+ x="14399"
+ y="5799"
+ width="1003"
+ height="1003"
+ id="rect590"
+ style="fill:none;stroke:none" /><path
+ d="m 14900,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path592"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g594"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id38"><rect
+ class="BoundingBox"
+ x="11799"
+ y="5799"
+ width="1003"
+ height="1003"
+ id="rect597"
+ style="fill:none;stroke:none" /><path
+ d="m 12300,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path599"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g601"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id39"><rect
+ class="BoundingBox"
+ x="14399"
+ y="4499"
+ width="1003"
+ height="1003"
+ id="rect604"
+ style="fill:none;stroke:none" /><path
+ d="m 14900,5500 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path606"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#0000ff" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g608"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id40"><rect
+ class="BoundingBox"
+ x="13099"
+ y="5799"
+ width="1003"
+ height="1003"
+ id="rect611"
+ style="fill:none;stroke:none" /><path
+ d="m 13600,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path613"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g615"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id41"><rect
+ class="BoundingBox"
+ x="16099"
+ y="5799"
+ width="1003"
+ height="1003"
+ id="rect618"
+ style="fill:none;stroke:none" /><path
+ d="m 16600,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path620"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g622"
+ transform="translate(-3398.7778,-3185.889)"><g
+ id="id42"><rect
+ class="BoundingBox"
+ x="18799"
+ y="5799"
+ width="1003"
+ height="1003"
+ id="rect625"
+ style="fill:none;stroke:none" /><path
+ d="m 19300,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path627"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g629"
+ transform="translate(-3398.7778,-3185.889)"><g
+ id="id43"><rect
+ class="BoundingBox"
+ x="18799"
+ y="3299"
+ width="1003"
+ height="1003"
+ id="rect632"
+ style="fill:none;stroke:none" /><path
+ d="m 19300,4300 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path634"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g636"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id44"><rect
+ class="BoundingBox"
+ x="17399"
+ y="5799"
+ width="1003"
+ height="1003"
+ id="rect639"
+ style="fill:none;stroke:none" /><path
+ d="m 17900,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path641"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g643"
+ transform="translate(-3398.7778,-3185.889)"><g
+ id="id45"><rect
+ class="BoundingBox"
+ x="18799"
+ y="4499"
+ width="1003"
+ height="1003"
+ id="rect646"
+ style="fill:none;stroke:none" /><path
+ d="m 19300,5500 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
+ id="path648"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#00cc00" /></g></g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/constraints.svg b/Documentation/media/uapi/v4l/constraints.svg
new file mode 100644
index 000000000000..f710ee46b1f8
--- /dev/null
+++ b/Documentation/media/uapi/v4l/constraints.svg
@@ -0,0 +1,346 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ version="1.2"
+ width="249.00998mm"
+ height="143.00999mm"
+ viewBox="0 0 24900.998 14300.999"
+ preserveAspectRatio="xMidYMid"
+ xml:space="preserve"
+ id="svg2"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="constraints.svg"
+ style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
+ id="metadata325"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview323"
+ showgrid="false"
+ inkscape:zoom="1.0818519"
+ inkscape:cx="270.29272"
+ inkscape:cy="249.83854"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg2"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0" /><defs
+ class="ClipPathGroup"
+ id="defs4"><marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker6261"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Mend"><path
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path6263"
+ inkscape:connector-curvature="0" /></marker><marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6125"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always"><path
+ id="path6127"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" /></marker><marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker6001"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Mend"
+ inkscape:collect="always"><path
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path6003"
+ inkscape:connector-curvature="0" /></marker><marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker5693"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always"><path
+ id="path5695"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" /></marker><marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker5575"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Mend"
+ inkscape:collect="always"><path
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ style="fill:#000080;fill-opacity:1;fill-rule:evenodd;stroke:#000080;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path5577"
+ inkscape:connector-curvature="0" /></marker><marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker5469"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always"><path
+ id="path5471"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#000080;fill-opacity:1;fill-rule:evenodd;stroke:#000080;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" /></marker><marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker5259"
+ style="overflow:visible"
+ inkscape:isstock="true"><path
+ id="path5261"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#000080;fill-opacity:1;fill-rule:evenodd;stroke:#000080;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" /></marker><marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible"
+ inkscape:isstock="true"><path
+ id="path4241"
+ style="fill:#000080;fill-opacity:1;fill-rule:evenodd;stroke:#000080;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" /></marker></defs><defs
+ id="defs9" /><defs
+ id="defs100" /><defs
+ id="defs123" /><defs
+ class="TextShapeIndex"
+ id="defs134" /><defs
+ class="EmbeddedBulletChars"
+ id="defs138" /><defs
+ class="TextEmbeddedBitmaps"
+ id="defs167" /><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g204"
+ transform="translate(-1350,-3250)"><g
+ id="id6"><rect
+ class="BoundingBox"
+ x="1350"
+ y="3250"
+ width="24901"
+ height="14301"
+ id="rect207"
+ style="fill:none;stroke:none" /><path
+ d="m 13800,17500 -12400,0 0,-14200 24800,0 0,14200 -12400,0 z"
+ id="path209"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 13800,17500 -12400,0 0,-14200 24800,0 0,14200 -12400,0 z"
+ id="path211"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000;stroke-width:100;stroke-linejoin:round" /><text
+ class="TextShape"
+ id="text213"><tspan
+ class="TextParagraph"
+ font-size="846px"
+ font-weight="400"
+ id="tspan215"
+ style="font-weight:400;font-size:846px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="1652"
+ y="17093"
+ id="tspan217"><tspan
+ id="tspan219"
+ style="fill:#ff0000;stroke:none" /><tspan
+ id="tspan221"
+ style="fill:#ff0000;stroke:none">V4L2_SEL_FLAG_GE</tspan></tspan></tspan></text>
+</g></g><rect
+ class="BoundingBox"
+ x="3000"
+ y="2200"
+ width="18101"
+ height="10101"
+ id="rect226"
+ style="fill:none;stroke:none" /><path
+ d="m 12050,12250 -9000,0 0,-10000 18000,0 0,10000 -9000,0 z"
+ id="path228"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 12050,12250 -9000,0 0,-10000 18000,0 0,10000 -9000,0 z"
+ id="path230"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000;stroke-width:100;stroke-linejoin:round" /><text
+ class="TextShape"
+ id="text232"
+ x="-1350"
+ y="-3250"><tspan
+ class="TextParagraph"
+ font-size="987px"
+ font-weight="400"
+ id="tspan234"
+ style="font-weight:400;font-size:987px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="3227"
+ y="11503"
+ id="tspan236"><tspan
+ id="tspan238"
+ style="fill:#000000;stroke:none" /><tspan
+ id="tspan240"
+ style="fill:#000000;stroke:none">ORIGINAL</tspan></tspan></tspan></text>
+<g
+ class="com.sun.star.drawing.CustomShape"
+ id="g242"
+ transform="translate(-1350,-3250)"><g
+ id="id8"><rect
+ class="BoundingBox"
+ x="7050"
+ y="7950"
+ width="7901"
+ height="5501"
+ id="rect245"
+ style="fill:none;stroke:none" /><path
+ d="m 11000,13400 -3900,0 0,-5400 7800,0 0,5400 -3900,0 z"
+ id="path247"
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;stroke:none" /><path
+ d="m 11000,13400 -3900,0 0,-5400 7800,0 0,5400 -3900,0 z"
+ id="path249"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4;stroke-width:100;stroke-linejoin:round" /><text
+ class="TextShape"
+ id="text251"><tspan
+ class="TextParagraph"
+ font-size="776px"
+ font-weight="400"
+ id="tspan253"
+ style="font-weight:400;font-size:776px;font-family:'Liberation Sans', sans-serif"><tspan
+ class="TextPosition"
+ x="7228"
+ y="10969"
+ id="tspan255"><tspan
+ id="tspan257"
+ style="fill:#000080;stroke:none">V4L2_SEL_FLAG_LE</tspan></tspan></tspan></text>
+</g></g><rect
+ class="BoundingBox"
+ x="13700"
+ y="7100"
+ width="7101"
+ height="101"
+ id="rect262"
+ style="fill:none;stroke:none" /><path
+ d="m 20750,7150 -7000,0"
+ id="path264"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000080;stroke-width:99.99134064;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Mend)" /><rect
+ class="BoundingBox"
+ x="3400"
+ y="7100"
+ width="2101"
+ height="101"
+ id="rect269"
+ style="fill:none;stroke:none" /><path
+ d="m 3450,7150 2000,0"
+ id="path271"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000080;stroke-width:100;stroke-linejoin:round;marker-end:url(#marker5575)" /><rect
+ class="BoundingBox"
+ x="9800"
+ y="2900"
+ width="101"
+ height="1501"
+ id="rect276"
+ style="fill:none;stroke:none" /><path
+ d="m 9850,2950 0,1400"
+ id="path278"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000080;stroke-width:100;stroke-linejoin:round;marker-end:url(#marker5259)" /><rect
+ class="BoundingBox"
+ x="9600"
+ y="10600"
+ width="101"
+ height="1301"
+ id="rect283"
+ style="fill:none;stroke:none" /><path
+ d="m 9650,11850 0,-1200"
+ id="path285"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000080;stroke-width:100;stroke-linejoin:round;marker-end:url(#marker5469)" /><rect
+ class="BoundingBox"
+ x="450"
+ y="6850"
+ width="2051"
+ height="601"
+ id="rect290"
+ style="fill:none;stroke:none" /><path
+ d="m 2450,7150 -2000.8696,0"
+ id="path292"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000;stroke-width:132.48202515;stroke-linejoin:round;marker-end:url(#marker6125)" /><rect
+ class="BoundingBox"
+ x="21600"
+ y="6750"
+ width="2651"
+ height="601"
+ id="rect299"
+ style="fill:none;stroke:none" /><path
+ d="m 21650,7050 2522.609,0"
+ id="path301"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000;stroke-width:120.40660858;stroke-linejoin:round;marker-end:url(#marker6001)" /><rect
+ class="BoundingBox"
+ x="9550"
+ y="550"
+ width="601"
+ height="1451"
+ id="rect308"
+ style="fill:none;stroke:none" /><path
+ d="m 9836.957,1950 0,-1453.0435"
+ id="path310"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000;stroke-width:164.03721619;stroke-linejoin:round;marker-end:url(#marker6261)" /><rect
+ class="BoundingBox"
+ x="9350"
+ y="12500"
+ width="601"
+ height="1451"
+ id="rect317"
+ style="fill:none;stroke:none" /><path
+ d="m 9650,12550 0,1505.217"
+ id="path319"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff0000;stroke-width:166.95626831;stroke-linejoin:round;marker-end:url(#marker5693)" /></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/crop.rst b/Documentation/media/uapi/v4l/crop.rst
index 3ea733a8eef8..be58894c9c89 100644
--- a/Documentation/media/uapi/v4l/crop.rst
+++ b/Documentation/media/uapi/v4l/crop.rst
@@ -53,8 +53,8 @@ Cropping Structures
.. _crop-scale:
-.. figure:: crop_files/crop.*
- :alt: crop.pdf / crop.gif
+.. figure:: crop.*
+ :alt: crop.pdf / crop.svg
:align: center
Image Cropping, Insertion and Scaling
diff --git a/Documentation/media/uapi/v4l/crop.svg b/Documentation/media/uapi/v4l/crop.svg
new file mode 100644
index 000000000000..dc9a471bae6b
--- /dev/null
+++ b/Documentation/media/uapi/v4l/crop.svg
@@ -0,0 +1,281 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ xml:space="preserve"
+ width="208.59436mm"
+ height="95.859146mm"
+ viewBox="0 0 739.11388 339.6584"
+ sodipodi:docname="crop.svg"><metadata
+ id="metadata8"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ id="defs6"><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath44"><path
+ d="m 0,0 0,1895 4118,0 L 4118,0 0,0 Z m 3051.62,250.48 8.19,17.01 -46.93,23.31 29.61,-25.515 -38.12,8.505 47.25,-23.31 z m -1559.25,800.73 -8.5,-17.01 46.93,-23.31 -29.29,25.2 37.8,-8.19 -46.94,23.31 z"
+ id="path46"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath64"><path
+ d="m 0,0 0,1895 4118,0 0,-1626 -1,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -1,0 0,-1 1,0 0,-1 1,0 0,-1 2,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 2,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 2,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 -4,0 0,1 -5,0 0,1 -4,0 0,1 -5,0 0,1 -4,0 0,1 -4,0 0,1 -4,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 1,0 0,3 1,0 0,2 1,0 0,2 1,0 L 4118,0 0,0 Z m 4074,272 0,-1 1,0 0,1 -1,0 z m -1486,743 0,-1 1,0 0,1 -1,0 z m -2,1 0,-1 1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -2,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -2,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -2,0 0,1 -1,0 0,2 2,0 0,-1 4,0 0,-1 5,0 0,-1 4,0 0,-1 5,0 0,-1 5,0 0,-1 4,0 0,-1 3,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -3,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 z"
+ id="path66"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath84"><path
+ d="m 0,0 0,1895 4118,0 0,-136 -3,0 0,-1 -11,0 0,-1 -11,0 0,-1 -11,0 0,-1 -11,0 0,-1 5,0 0,-1 6,0 0,-1 7,0 0,-1 6,0 0,-1 6,0 0,-1 4,0 0,-1 -1,0 0,-1 -3,0 0,-1 -3,0 0,-1 -2,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 7,0 0,1 11,0 0,1 11,0 0,1 11,0 0,1 3,0 L 4118,0 0,0 Z m 2552,1599 0,-1 2,0 0,1 11,0 0,1 11,0 0,1 11,0 0,1 11,0 0,1 -4,0 0,1 -7,0 0,1 -6,0 0,1 -7,0 0,1 -6,0 0,1 -3,0 0,1 2,0 0,1 2,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 2,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 -7,0 0,-1 -12,0 0,-1 -11,0 0,-1 -11,0 0,-1 -4,0 0,-1 1,0 0,-12 1,0 0,-4 z"
+ id="path86"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath104"><path
+ d="m 0,0 0,1895 4118,0 L 4118,0 0,0 Z m 3056.98,1740.43 -1.58,18.9 -52.6,-4.72 38.74,-6.3 -36.85,-12.6 52.29,4.72 z m -1570.28,-123.79 1.58,-18.9 52.6,4.72 -38.43,5.99 36.54,12.91 -52.29,-4.72 z"
+ id="path106"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath></defs><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview4"
+ showgrid="false"
+ inkscape:zoom="0.99625351"
+ inkscape:cx="-73.227122"
+ inkscape:cy="114.17568"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g10"
+ units="mm"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0" /><g
+ id="g10"
+ inkscape:groupmode="layer"
+ inkscape:label="crop"
+ transform="matrix(1.25,0,0,-1.25,-0.35237428,339.91141)"><path
+ d="m 411.08474,0.37218832 0,271.38551168 -37.17867,0 0,-1.94649 -205.3611,1.94649 0,-2.58046 -155.911966,0 0,-1.90192 17.977663,-264.3219406 -17.977663,0 0,-2.58119108 193.135216,0 0,2.58119108 -137.934309,0 0,264.3219406 333.650679,0 0,-264.3219406 -177.10547,0 0,-1.947197 149.52695,0 0,-0.63399408"
+ style="fill:#7f7f7f;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path16"
+ inkscape:connector-curvature="0" /><path
+ d="m 411.08474,0.37218832 0,271.38551168 -37.17867,0 0,-1.94649 -205.3611,1.94649 0,-2.58046 -155.911966,0 0,-1.90192 17.977663,-264.3219406 -17.977663,0 0,-2.58119108 193.135216,0 0,2.58119108 -137.934309,0 0,264.3219406 333.650679,0 0,-264.3219406 -177.10547,0 0,-1.947197 149.52695,0 0,-0.63399408 37.17867,0 z"
+ style="fill:none;stroke:#000000;stroke-width:0.33962813;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path18"
+ inkscape:connector-curvature="0" /><path
+ d="m 40.256106,6.8024838 366.979524,0 0,253.4078762 -366.979524,0 0,-253.4078762 z"
+ style="fill:none;stroke:#ff0000;stroke-width:0.33962813;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path20"
+ inkscape:connector-curvature="0" /><path
+ d="m 77.434066,7.4364707 316.352284,0 0,244.4416893 -316.352284,0 0,-244.4416893 z"
+ style="fill:none;stroke:#0000ff;stroke-width:0.33962813;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path22"
+ inkscape:connector-curvature="0" /><path
+ d="m 214.41669,149.49157 152.83195,0 0,81.51075 -152.83195,0 0,-81.51075 z"
+ style="fill:none;stroke:#00b000;stroke-width:0.33962813;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path24"
+ inkscape:connector-curvature="0" /><path
+ d="m 438.57126,37.414285 152.83194,0 0,213.966445 -152.83194,0 0,-213.966445 z"
+ style="fill:none;stroke:#d100d1;stroke-width:0.33962813;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path26"
+ inkscape:connector-curvature="0" /><path
+ d="m 0.45168907,271.7577 168.09328093,0 0,-2.58046 -155.911966,0 0,-1.90192 17.977663,0 0,-264.3219406 -17.977663,0 0,-2.58119108 -12.18131493,0"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path28"
+ inkscape:connector-curvature="0" /><path
+ d="m 0.45168907,271.7577 168.09328093,0 0,-2.58046 -155.911966,0 0,-1.90192 17.977663,0 0,-264.3219406 -17.977663,0 0,-2.58119108 -12.18131493,0 0,271.38551168 z"
+ style="fill:none;stroke:#000000;stroke-width:0.33962813;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path30"
+ inkscape:connector-curvature="0" /><path
+ d="m 373.90607,0.37218832 0,0.63399408 -149.52695,0 0,1.947197 -18.6109,0 0,-2.58119108"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path32"
+ inkscape:connector-curvature="0" /><path
+ d="m 373.90607,0.37218832 0,0.63399408 -149.52695,0 0,1.947197 -18.6109,0 0,-2.58119108 168.13785,0 z"
+ style="fill:none;stroke:#000000;stroke-width:0.33962813;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path34"
+ inkscape:connector-curvature="0" /><path
+ d="m 205.76822,271.7577 168.13785,0 0,-1.94649 -149.52695,0 0,-2.53589 -18.6109,0"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path36"
+ inkscape:connector-curvature="0" /><path
+ d="m 205.76822,271.7577 168.13785,0 0,-1.94649 -149.52695,0 0,-2.53589 -18.6109,0 0,4.48238 z"
+ style="fill:none;stroke:#000000;stroke-width:0.33962813;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path38"
+ inkscape:connector-curvature="0" /><g
+ id="g40"
+ transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><g
+ clip-path="url(#clipPath44)"
+ id="g42"><path
+ inkscape:connector-curvature="0"
+ id="path48"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="M 1492.37,1040.5 3051.62,260.875" /></g></g><g
+ id="g50"
+ transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><path
+ inkscape:connector-curvature="0"
+ id="path52"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 1539.31,1027.9 -37.8,8.19 29.29,-25.2 8.51,17.01" /><path
+ inkscape:connector-curvature="0"
+ id="path54"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1539.31,1027.9 -37.8,8.19 29.29,-25.2 8.51,17.01 z" /><path
+ inkscape:connector-curvature="0"
+ id="path56"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 3004.37,273.79 38.12,-8.505 -29.61,25.515 -8.51,-17.01" /><path
+ inkscape:connector-curvature="0"
+ id="path58"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3004.37,273.79 38.12,-8.505 -29.61,25.515 -8.51,-17.01 z" /></g><g
+ id="g60"
+ transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><g
+ clip-path="url(#clipPath64)"
+ id="g62"><path
+ inkscape:connector-curvature="0"
+ id="path68"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="M 2555.5,1040.5 4114.75,260.875" /></g></g><g
+ id="g70"
+ transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><path
+ inkscape:connector-curvature="0"
+ id="path72"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 2602.43,1027.9 -37.8,8.19 29.3,-25.2 8.5,17.01" /><path
+ inkscape:connector-curvature="0"
+ id="path74"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2602.43,1027.9 -37.8,8.19 29.3,-25.2 8.5,17.01 z" /><path
+ inkscape:connector-curvature="0"
+ id="path76"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 4067.5,273.79 38.11,-8.505 -29.61,25.515 -8.5,-17.01" /><path
+ inkscape:connector-curvature="0"
+ id="path78"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4067.5,273.79 38.11,-8.505 -29.61,25.515 -8.5,-17.01 z" /></g><g
+ id="g80"
+ transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><g
+ clip-path="url(#clipPath84)"
+ id="g82"><path
+ inkscape:connector-curvature="0"
+ id="path88"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2555.5,1607.5 1559.25,141.75" /></g></g><g
+ id="g90"
+ transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><path
+ inkscape:connector-curvature="0"
+ id="path92"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 2602.12,1621.36 -36.54,-12.91 38.43,-5.99 -1.89,18.9" /><path
+ inkscape:connector-curvature="0"
+ id="path94"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2602.12,1621.36 -36.54,-12.91 38.43,-5.99 -1.89,18.9 z" /><path
+ inkscape:connector-curvature="0"
+ id="path96"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 4067.81,1735.71 36.86,12.6 -38.75,6.3 1.89,-18.9" /><path
+ inkscape:connector-curvature="0"
+ id="path98"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4067.81,1735.71 36.86,12.6 -38.75,6.3 1.89,-18.9 z" /></g><g
+ id="g100"
+ transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><g
+ clip-path="url(#clipPath104)"
+ id="g102"><path
+ inkscape:connector-curvature="0"
+ id="path108"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1492.37,1607.5 1559.25,141.75" /></g></g><path
+ d="m 221.11869,232.99481 -5.25292,-1.85592 5.52462,-0.86111 -0.2717,2.71703"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path112"
+ inkscape:connector-curvature="0" /><path
+ d="m 221.11869,232.99481 -5.25292,-1.85592 5.52462,-0.86111 -0.2717,2.71703 z"
+ style="fill:none;stroke:#000000;stroke-width:0.67925626;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path114"
+ inkscape:connector-curvature="0" /><path
+ d="m 431.8247,249.43353 5.29748,1.81135 -5.56918,0.90567 0.2717,-2.71702"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path116"
+ inkscape:connector-curvature="0" /><path
+ d="m 431.8247,249.43353 5.29748,1.81135 -5.56918,0.90567 0.2717,-2.71702 z"
+ style="fill:none;stroke:#000000;stroke-width:0.67925626;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path118"
+ inkscape:connector-curvature="0" /><g
+ id="g120"
+ transform="matrix(1.4375794,0,0,1.4375794,-0.12334269,-0.08856738)"><text
+ transform="matrix(1,0,0,-1,204.52,9.07751)"
+ style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#d10000;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="text122"><tspan
+ x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 19.11735 21.320145 24.998085 28.676025 31.983524 35.661465 39.339405 41.178375 44.856316 48.534256 52.212196 55.890137 59.568073"
+ y="0"
+ sodipodi:role="line"
+ id="tspan124">v4l2_cropcap.bounds</tspan></text>
+</g><g
+ id="g126"
+ transform="matrix(1.4375794,0,0,1.4375794,-0.12334269,-0.08856738)"><text
+ transform="matrix(1,0,0,-1,58.5175,166.42)"
+ style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#0000d1;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="text128"><tspan
+ x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 19.11735 21.320145 24.998085 28.676025 31.983524 35.661465 39.339405 41.178375 44.856316 48.534256 50.373226 52.576019 56.25396 59.561459"
+ y="0"
+ sodipodi:role="line"
+ id="tspan130">v4l2_cropcap.defrect</tspan></text>
+</g><g
+ id="g132"
+ transform="matrix(1.4375794,0,0,1.4375794,-0.12334269,-0.08856738)"><text
+ transform="matrix(1,0,0,-1,153.49,152.245)"
+ style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#008f00;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="text134"><tspan
+ x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 19.11735 21.320145 24.998085 28.676025 30.514996"
+ y="0"
+ sodipodi:role="line"
+ id="tspan136">v4l2_crop.c</tspan></text>
+</g><g
+ id="g138"
+ transform="matrix(1.4375794,0,0,1.4375794,-0.12334269,-0.08856738)"><text
+ transform="matrix(1,0,0,-1,309.415,30.34)"
+ style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#b000b0;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="text140"><tspan
+ x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 17.648821 21.326759 23.529554 29.03985 32.717789"
+ y="0"
+ sodipodi:role="line"
+ id="tspan142">v4l2_format</tspan></text>
+</g><text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:32px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="-99.291145"
+ y="-239.49893"
+ id="text3396"
+ sodipodi:linespacing="125%"
+ transform="scale(1,-1)"><tspan
+ sodipodi:role="line"
+ id="tspan3398"
+ x="-99.291145"
+ y="-239.49893"></tspan><tspan
+ sodipodi:role="line"
+ x="-99.291145"
+ y="-199.49893"
+ id="tspan3400" /></text>
+</g></svg>
diff --git a/Documentation/media/uapi/v4l/crop_files/crop.gif b/Documentation/media/uapi/v4l/crop_files/crop.gif
deleted file mode 100644
index 3b9e7d836d4b..000000000000
--- a/Documentation/media/uapi/v4l/crop_files/crop.gif
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/crop_files/crop.pdf b/Documentation/media/uapi/v4l/crop_files/crop.pdf
deleted file mode 100644
index c9fb81cd32f3..000000000000
--- a/Documentation/media/uapi/v4l/crop_files/crop.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi.rst b/Documentation/media/uapi/v4l/dev-raw-vbi.rst
index b82d837e4ff1..baf5f2483927 100644
--- a/Documentation/media/uapi/v4l/dev-raw-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-raw-vbi.rst
@@ -221,8 +221,8 @@ and always returns default parameters as :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` does
.. _vbi-hsync:
-.. figure:: dev-raw-vbi_files/vbi_hsync.*
- :alt: vbi_hsync.pdf / vbi_hsync.gif
+.. figure:: vbi_hsync.*
+ :alt: vbi_hsync.pdf / vbi_hsync.svg
:align: center
**Figure 4.1. Line synchronization**
@@ -230,8 +230,8 @@ and always returns default parameters as :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` does
.. _vbi-525:
-.. figure:: dev-raw-vbi_files/vbi_525.*
- :alt: vbi_525.pdf / vbi_525.gif
+.. figure:: vbi_525.*
+ :alt: vbi_525.pdf / vbi_525.svg
:align: center
**Figure 4.2. ITU-R 525 line numbering (M/NTSC and M/PAL)**
@@ -240,8 +240,8 @@ and always returns default parameters as :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` does
.. _vbi-625:
-.. figure:: dev-raw-vbi_files/vbi_625.*
- :alt: vbi_625.pdf / vbi_625.gif
+.. figure:: vbi_625.*
+ :alt: vbi_625.pdf / vbi_625.svg
:align: center
**Figure 4.3. ITU-R 625 line numbering**
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.gif b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.gif
deleted file mode 100644
index 5580b690d504..000000000000
--- a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.gif
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.pdf b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.pdf
deleted file mode 100644
index 0bae28385dfa..000000000000
--- a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.gif b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.gif
deleted file mode 100644
index 34e3251983c4..000000000000
--- a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.gif
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.pdf b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.pdf
deleted file mode 100644
index bf29b95dcd08..000000000000
--- a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_hsync.gif b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_hsync.gif
deleted file mode 100644
index b02434d3b356..000000000000
--- a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_hsync.gif
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_hsync.pdf b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_hsync.pdf
deleted file mode 100644
index 200b668189bf..000000000000
--- a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_hsync.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-subdev.rst b/Documentation/media/uapi/v4l/dev-subdev.rst
index fb4d0d45b216..cd2870180208 100644
--- a/Documentation/media/uapi/v4l/dev-subdev.rst
+++ b/Documentation/media/uapi/v4l/dev-subdev.rst
@@ -99,8 +99,8 @@ the video sensor and the host image processing hardware.
.. _pipeline-scaling:
-.. figure:: dev-subdev_files/pipeline.*
- :alt: pipeline.pdf / pipeline.png
+.. figure:: pipeline.*
+ :alt: pipeline.pdf / pipeline.svg
:align: center
Image Format Negotiation on Pipelines
@@ -404,8 +404,8 @@ selection will refer to the sink pad format dimensions instead.
.. _subdev-image-processing-crop:
-.. figure:: dev-subdev_files/subdev-image-processing-crop.*
- :alt: subdev-image-processing-crop.svg
+.. figure:: subdev-image-processing-crop.*
+ :alt: subdev-image-processing-crop.pdf / subdev-image-processing-crop.svg
:align: center
**Figure 4.5. Image processing in subdevs: simple crop example**
@@ -421,8 +421,8 @@ pad.
.. _subdev-image-processing-scaling-multi-source:
-.. figure:: dev-subdev_files/subdev-image-processing-scaling-multi-source.*
- :alt: subdev-image-processing-scaling-multi-source.svg
+.. figure:: subdev-image-processing-scaling-multi-source.*
+ :alt: subdev-image-processing-scaling-multi-source.pdf / subdev-image-processing-scaling-multi-source.svg
:align: center
**Figure 4.6. Image processing in subdevs: scaling with multiple sources**
@@ -437,8 +437,8 @@ an area at location specified by the source crop rectangle from it.
.. _subdev-image-processing-full:
-.. figure:: dev-subdev_files/subdev-image-processing-full.*
- :alt: subdev-image-processing-full.svg
+.. figure:: subdev-image-processing-full.*
+ :alt: subdev-image-processing-full.pdf / subdev-image-processing-full.svg
:align: center
**Figure 4.7. Image processing in subdevs: scaling and composition with multiple sinks and sources**
diff --git a/Documentation/media/uapi/v4l/dev-subdev_files/pipeline.pdf b/Documentation/media/uapi/v4l/dev-subdev_files/pipeline.pdf
deleted file mode 100644
index ee3e37f04b6a..000000000000
--- a/Documentation/media/uapi/v4l/dev-subdev_files/pipeline.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-subdev_files/pipeline.png b/Documentation/media/uapi/v4l/dev-subdev_files/pipeline.png
deleted file mode 100644
index f19b86c2c24d..000000000000
--- a/Documentation/media/uapi/v4l/dev-subdev_files/pipeline.png
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-crop.pdf b/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-crop.pdf
deleted file mode 100644
index 29a806f839b4..000000000000
--- a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-crop.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-crop.svg b/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-crop.svg
deleted file mode 100644
index 18b0f5de9ed2..000000000000
--- a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-crop.svg
+++ /dev/null
@@ -1,63 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/PR-SVG-20010719/DTD/svg10.dtd">
-<svg width="43cm" height="10cm" viewBox="-194 128 844 196" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x="-8" y="130" width="469.774" height="193"/>
- <g>
- <rect style="fill: #ffffff" x="4.5" y="189" width="159" height="104"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="4.5" y="189" width="159" height="104"/>
- </g>
- <g>
- <rect style="fill: #ffffff" x="63.5" y="211" width="94" height="77"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="63.5" y="211" width="94" height="77"/>
- </g>
- <text style="fill: #0000ff;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="74.5" y="227.75">
- <tspan x="74.5" y="227.75">sink</tspan>
- <tspan x="74.5" y="243.75">crop</tspan>
- <tspan x="74.5" y="259.75">selection</tspan>
- </text>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="29.5" y="158">
- <tspan x="29.5" y="158"></tspan>
- </text>
- <text style="fill: #a52a2a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="8.53836" y="157.914">
- <tspan x="8.53836" y="157.914">sink media</tspan>
- <tspan x="8.53836" y="173.914">bus format</tspan>
- </text>
- <text style="fill: #8b6914;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="349.774" y="155">
- <tspan x="349.774" y="155">source media</tspan>
- <tspan x="349.774" y="171">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="350.488" y="190.834" width="93.2863" height="75.166"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="350.488" y="190.834" width="93.2863" height="75.166"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="350.488" y1="266" x2="63.5" y2="288"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="350.488" y1="190.834" x2="63.5" y2="211"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="443.774" y1="266" x2="157.5" y2="288"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="443.774" y1="190.834" x2="157.5" y2="211"/>
- <g>
- <ellipse style="fill: #ffffff" cx="473.1" cy="219.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="473.1" cy="219.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="473.1" cy="219.984" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="481.6" y1="219.984" x2="637.934" y2="220.012"/>
- <polygon style="fill: #000000" points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="506.908" y="209.8">
- <tspan x="506.908" y="209.8">pad 1 (source)</tspan>
- </text>
- <g>
- <ellipse style="fill: #ffffff" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-192.398" y1="241.8" x2="-38.6343" y2="241.529"/>
- <polygon style="fill: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-147.858" y="229.8">
- <tspan x="-147.858" y="229.8">pad 0 (sink)</tspan>
- </text>
-</svg>
diff --git a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-full.pdf b/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-full.pdf
deleted file mode 100644
index b78a8e8f6b35..000000000000
--- a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-full.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-full.svg b/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-full.svg
deleted file mode 100644
index 3322cf4c0093..000000000000
--- a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-full.svg
+++ /dev/null
@@ -1,163 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/PR-SVG-20010719/DTD/svg10.dtd">
-<svg width="59cm" height="18cm" viewBox="-186 71 1178 346" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
- <g>
- <rect style="fill: #ffffff" x="318.9" y="129" width="208.1" height="249"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #ff765a" x="318.9" y="129" width="208.1" height="249"/>
- </g>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x="-2" y="73" width="806" height="343"/>
- <g>
- <ellipse style="fill: #ffffff" cx="-12.5" cy="166.712" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.5" cy="166.712" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.5" cy="166.712" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <ellipse style="fill: #ffffff" cx="815.232" cy="205.184" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.232" cy="205.184" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.232" cy="205.184" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-184.5" y1="167" x2="-30.7361" y2="166.729"/>
- <polygon style="fill: #000000" points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="823.732" y1="205.184" x2="980.066" y2="205.212"/>
- <polygon style="fill: #000000" points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-139.96" y="155">
- <tspan x="-139.96" y="155">pad 0 (sink)</tspan>
- </text>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="849.04" y="195">
- <tspan x="849.04" y="195">pad 2 (source)</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="5.5" y="120" width="159" height="104"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="5.5" y="120" width="159" height="104"/>
- </g>
- <g>
- <rect style="fill: #ffffff" x="62.5" y="136" width="94" height="77"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="62.5" y="136" width="94" height="77"/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="30.5" y="89">
- <tspan x="30.5" y="89"></tspan>
- </text>
- <text style="fill: #a52a2a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="9.53836" y="88.9138">
- <tspan x="9.53836" y="88.9138">sink media</tspan>
- <tspan x="9.53836" y="104.914">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="333.644" y="185.65" width="165.2" height="172.478"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #00ff00" x="333.644" y="185.65" width="165.2" height="172.478"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="358.128" x2="62.5" y2="213"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="185.65" x2="62.5" y2="136"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="358.128" x2="156.5" y2="213"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="185.65" x2="156.5" y2="136"/>
- <text style="fill: #00ff00;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="334.704" y="149.442">
- <tspan x="334.704" y="149.442">sink compose</tspan>
- <tspan x="334.704" y="165.442">selection (scaling)</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="409.322" y="194.565" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="409.322" y="194.565" width="100.186" height="71.4523"/>
- </g>
- <text style="fill: #8b6914;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="689.5" y="105.128">
- <tspan x="689.5" y="105.128">source media</tspan>
- <tspan x="689.5" y="121.128">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="688.488" y="173.834" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="688.488" y="173.834" width="100.186" height="71.4523"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="688.488" y1="245.286" x2="409.322" y2="266.018"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="688.488" y1="173.834" x2="409.322" y2="194.565"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="788.674" y1="245.286" x2="509.508" y2="266.018"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="788.674" y1="173.834" x2="509.508" y2="194.565"/>
- <text style="fill: #ff765a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="325" y="103">
- <tspan x="325" y="103">sink compose</tspan>
- <tspan x="325" y="119">bounds selection</tspan>
- </text>
- <g>
- <ellipse style="fill: #ffffff" cx="-12.0982" cy="341.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.0982" cy="341.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.0982" cy="341.512" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-184.098" y1="341.8" x2="-30.3343" y2="341.529"/>
- <polygon style="fill: #000000" points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-139" y="329">
- <tspan x="-139" y="329">pad 1 (sink)</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="7.80824" y="292.8" width="112.092" height="82.2"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="7.80824" y="292.8" width="112.092" height="82.2"/>
- </g>
- <g>
- <rect style="fill: #ffffff" x="52.9" y="314.8" width="58.1" height="50.2"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="52.9" y="314.8" width="58.1" height="50.2"/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="31.9" y="259.8">
- <tspan x="31.9" y="259.8"></tspan>
- </text>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="358.9" y1="251.9" x2="52.9" y2="314.8"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="358.9" y1="316" x2="52.9" y2="365"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="434" y1="316" x2="111" y2="365"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="434" y1="251.9" x2="111" y2="314.8"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #00ff00" x="358.9" y="251.9" width="75.1" height="64.1"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="443.262" y="284.466" width="64.738" height="48.534"/>
- <g>
- <rect style="fill: #ffffff" x="693.428" y="324.734" width="63.572" height="49.266"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="693.428" y="324.734" width="63.572" height="49.266"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="693.428" y1="374" x2="443.262" y2="333"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="693.428" y1="324.734" x2="443.262" y2="284.466"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="757" y1="374" x2="508" y2="333"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="757" y1="324.734" x2="508" y2="284.466"/>
- <g>
- <ellipse style="fill: #ffffff" cx="815.44" cy="343.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.44" cy="343.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.44" cy="343.984" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="823.94" y1="343.984" x2="980.274" y2="344.012"/>
- <polygon style="fill: #000000" points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="849.248" y="333.8">
- <tspan x="849.248" y="333.8">pad 3 (source)</tspan>
- </text>
- <text style="fill: #0000ff;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="197" y="91">
- <tspan x="197" y="91">sink</tspan>
- <tspan x="197" y="107">crop</tspan>
- <tspan x="197" y="123">selection</tspan>
- </text>
- <text style="fill: #a020f0;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="553" y="95">
- <tspan x="553" y="95">source</tspan>
- <tspan x="553" y="111">crop</tspan>
- <tspan x="553" y="127">selection</tspan>
- </text>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x1="211" y1="132" x2="166.21" y2="135.287"/>
- <polygon style="fill: #0000ff" points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x1="209" y1="131" x2="115.581" y2="306.209"/>
- <polygon style="fill: #0000ff" points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="550.492" y1="133.214" x2="514.916" y2="186.469"/>
- <polygon style="fill: #a020f0" points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="550.072" y1="133.787" x2="510.618" y2="275.089"/>
- <polygon style="fill: #a020f0" points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "/>
- </g>
-</svg>
diff --git a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-scaling-multi-source.pdf b/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-scaling-multi-source.pdf
deleted file mode 100644
index 8f7a95b6eb4d..000000000000
--- a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-scaling-multi-source.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-scaling-multi-source.svg b/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-scaling-multi-source.svg
deleted file mode 100644
index 2340c0f8bc92..000000000000
--- a/Documentation/media/uapi/v4l/dev-subdev_files/subdev-image-processing-scaling-multi-source.svg
+++ /dev/null
@@ -1,116 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/PR-SVG-20010719/DTD/svg10.dtd">
-<svg width="59cm" height="17cm" viewBox="-194 128 1179 330" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x="-8" y="130" width="806" height="327"/>
- <g>
- <rect style="fill: #ffffff" x="4.5" y="189" width="159" height="104"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="4.5" y="189" width="159" height="104"/>
- </g>
- <g>
- <rect style="fill: #ffffff" x="49.5" y="204" width="94" height="77"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="49.5" y="204" width="94" height="77"/>
- </g>
- <text style="fill: #0000ff;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="60" y="224">
- <tspan x="60" y="224">sink</tspan>
- <tspan x="60" y="240">crop</tspan>
- <tspan x="60" y="256">selection</tspan>
- </text>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="29.5" y="158">
- <tspan x="29.5" y="158"></tspan>
- </text>
- <text style="fill: #a52a2a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="8.53836" y="157.914">
- <tspan x="8.53836" y="157.914">sink media</tspan>
- <tspan x="8.53836" y="173.914">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="333.644" y="185.65" width="165.2" height="172.478"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #00ff00" x="333.644" y="185.65" width="165.2" height="172.478"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="358.128" x2="49.5" y2="281"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="185.65" x2="49.5" y2="204"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="358.128" x2="143.5" y2="281"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="185.65" x2="143.5" y2="204"/>
- <text style="fill: #00ff00;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="334.704" y="149.442">
- <tspan x="334.704" y="149.442">sink compose</tspan>
- <tspan x="334.704" y="165.442">selection (scaling)</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="382.322" y="199.565" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="382.322" y="199.565" width="100.186" height="71.4523"/>
- </g>
- <text style="fill: #a020f0;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="543.322" y="149.442">
- <tspan x="543.322" y="149.442">source</tspan>
- <tspan x="543.322" y="165.442">crop</tspan>
- <tspan x="543.322" y="181.442">selection</tspan>
- </text>
- <text style="fill: #8b6914;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="691.5" y="157.128">
- <tspan x="691.5" y="157.128">source media</tspan>
- <tspan x="691.5" y="173.128">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="690.488" y="225.834" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="690.488" y="225.834" width="100.186" height="71.4523"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="690.488" y1="297.286" x2="382.322" y2="271.018"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="690.488" y1="225.834" x2="382.322" y2="199.565"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.674" y1="297.286" x2="482.508" y2="271.018"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.674" y1="225.834" x2="482.508" y2="199.565"/>
- <g>
- <ellipse style="fill: #ffffff" cx="808.1" cy="249.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="808.1" cy="249.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="808.1" cy="249.984" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="816.6" y1="249.984" x2="972.934" y2="250.012"/>
- <polygon style="fill: #000000" points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="841.908" y="239.8">
- <tspan x="841.908" y="239.8">pad 1 (source)</tspan>
- </text>
- <g>
- <ellipse style="fill: #ffffff" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-192.398" y1="241.8" x2="-38.6343" y2="241.529"/>
- <polygon style="fill: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-147.858" y="229.8">
- <tspan x="-147.858" y="229.8">pad 0 (sink)</tspan>
- </text>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="389.822" y="276.666" width="100.186" height="71.4523"/>
- <g>
- <rect style="fill: #ffffff" x="689.988" y="345.934" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="689.988" y="345.934" width="100.186" height="71.4523"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="689.988" y1="417.386" x2="389.822" y2="348.118"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="689.988" y1="345.934" x2="389.822" y2="276.666"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.174" y1="417.386" x2="490.008" y2="348.118"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.174" y1="345.934" x2="490.008" y2="276.666"/>
- <g>
- <ellipse style="fill: #ffffff" cx="805.6" cy="384.084" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="805.6" cy="384.084" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="805.6" cy="384.084" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="814.1" y1="384.084" x2="970.434" y2="384.112"/>
- <polygon style="fill: #000000" points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="839.408" y="373.9">
- <tspan x="839.408" y="373.9">pad 2 (source)</tspan>
- </text>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="546" y1="191" x2="492.157" y2="198.263"/>
- <polygon style="fill: #a020f0" points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="546.908" y1="190.725" x2="495.383" y2="268.548"/>
- <polygon style="fill: #a020f0" points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "/>
- </g>
-</svg>
diff --git a/Documentation/media/uapi/v4l/diff-v4l.rst b/Documentation/media/uapi/v4l/diff-v4l.rst
index 76b2ecab8657..8209eeb63dd2 100644
--- a/Documentation/media/uapi/v4l/diff-v4l.rst
+++ b/Documentation/media/uapi/v4l/diff-v4l.rst
@@ -648,12 +648,12 @@ microcode programming. A new interface for MPEG compression and playback
devices is documented in :ref:`extended-controls`.
.. [#f1]
- According to Documentation/devices.txt these should be symbolic links
+ According to Documentation/admin-guide/devices.rst these should be symbolic links
to ``/dev/video0``. Note the original bttv interface is not
compatible with V4L or V4L2.
.. [#f2]
- According to ``Documentation/devices.txt`` a symbolic link to
+ According to ``Documentation/admin-guide/devices.rst`` a symbolic link to
``/dev/radio0``.
.. [#f3]
diff --git a/Documentation/media/uapi/v4l/field-order.rst b/Documentation/media/uapi/v4l/field-order.rst
index 50779a67c3fd..e05fb1041363 100644
--- a/Documentation/media/uapi/v4l/field-order.rst
+++ b/Documentation/media/uapi/v4l/field-order.rst
@@ -141,8 +141,8 @@ enum v4l2_field
Field Order, Top Field First Transmitted
========================================
-.. figure:: field-order_files/fieldseq_tb.*
- :alt: fieldseq_tb.pdf / fieldseq_tb.gif
+.. figure:: fieldseq_tb.*
+ :alt: fieldseq_tb.pdf / fieldseq_tb.svg
:align: center
@@ -151,7 +151,7 @@ Field Order, Top Field First Transmitted
Field Order, Bottom Field First Transmitted
===========================================
-.. figure:: field-order_files/fieldseq_bt.*
- :alt: fieldseq_bt.pdf / fieldseq_bt.gif
+.. figure:: fieldseq_bt.*
+ :alt: fieldseq_bt.pdf / fieldseq_bt.svg
:align: center
diff --git a/Documentation/media/uapi/v4l/field-order_files/fieldseq_bt.gif b/Documentation/media/uapi/v4l/field-order_files/fieldseq_bt.gif
deleted file mode 100644
index 60e8569a76c9..000000000000
--- a/Documentation/media/uapi/v4l/field-order_files/fieldseq_bt.gif
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/field-order_files/fieldseq_bt.pdf b/Documentation/media/uapi/v4l/field-order_files/fieldseq_bt.pdf
deleted file mode 100644
index 26598b23f80d..000000000000
--- a/Documentation/media/uapi/v4l/field-order_files/fieldseq_bt.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/field-order_files/fieldseq_tb.gif b/Documentation/media/uapi/v4l/field-order_files/fieldseq_tb.gif
deleted file mode 100644
index 718492f1cfc7..000000000000
--- a/Documentation/media/uapi/v4l/field-order_files/fieldseq_tb.gif
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/field-order_files/fieldseq_tb.pdf b/Documentation/media/uapi/v4l/field-order_files/fieldseq_tb.pdf
deleted file mode 100644
index 4965b22ddb3a..000000000000
--- a/Documentation/media/uapi/v4l/field-order_files/fieldseq_tb.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/fieldseq_bt.svg b/Documentation/media/uapi/v4l/fieldseq_bt.svg
new file mode 100644
index 000000000000..b195301771ce
--- /dev/null
+++ b/Documentation/media/uapi/v4l/fieldseq_bt.svg
@@ -0,0 +1,2613 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg3619"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ xml:space="preserve"
+ width="198.48296mm"
+ height="211.89406mm"
+ viewBox="0 0 703.28606 750.80571"
+ sodipodi:docname="fieldseq_bt.svg"><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview3621"
+ showgrid="false"
+ units="mm"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:zoom="1.0721815"
+ inkscape:cx="8.4175633"
+ inkscape:cy="371.67214"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g3627" /><metadata
+ id="metadata3625"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ id="defs3623"><clipPath
+ id="clipPath4301"
+ clipPathUnits="userSpaceOnUse"><path
+ style="clip-rule:evenodd"
+ inkscape:connector-curvature="0"
+ id="path4303"
+ d="M 0,6040 0,0 l 5650,0 0,6040 -5650,0 z m 4786.76,-99.89 103.92,0 0,56.69 -103.92,0 0,0 85.03,-28.35 -85.03,-28.34 z" /></clipPath></defs><g
+ transform="matrix(1.25,0,0,-1.25,-1.0537,751.94632)"
+ inkscape:label="fieldseq_bt"
+ inkscape:groupmode="layer"
+ id="g3627"><path
+ d="m 188.622,346.001 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3631"
+ inkscape:connector-curvature="0" /><path
+ d="m 375.693,346.001 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3633"
+ inkscape:connector-curvature="0" /><path
+ d="m 282.157,346.001 93.5355,0 0,42.516 -93.5355,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3635"
+ inkscape:connector-curvature="0" /><path
+ d="m 469.228,346.001 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3637"
+ inkscape:connector-curvature="0" /><path
+ d="m 95.0867,346.001 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3639"
+ inkscape:connector-curvature="0" /><path
+ d="m 1.55156,346.001 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3641"
+ inkscape:connector-curvature="0" /><path
+ d="m 95.0867,482.052 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3643"
+ inkscape:connector-curvature="0" /><path
+ d="m 469.228,482.052 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3645"
+ inkscape:connector-curvature="0" /><path
+ d="m 1.55156,414.027 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3647"
+ inkscape:connector-curvature="0" /><path
+ d="m 188.622,414.027 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3649"
+ inkscape:connector-curvature="0" /><path
+ d="m 375.693,414.027 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3651"
+ inkscape:connector-curvature="0" /><path
+ d="m 469.228,214.201 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3653"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3655"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3657"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3659"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3661"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3663"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3665"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3667"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3669"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3671"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3673"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3675"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3677"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3679"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3681"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3683"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3685"
+ inkscape:connector-curvature="0" /><path
+ d="m 282.157,482.052 93.5355,0 0,42.516 -93.5355,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3687"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,222.704 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3689"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,222.704 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3691"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,231.207 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3693"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,231.207 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3695"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,226.956 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3697"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,226.956 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3699"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,239.711 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3701"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,239.711 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3703"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,235.459 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3705"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,235.459 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3707"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,248.214 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3709"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,248.214 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3711"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,243.962 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3713"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,243.962 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3715"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,256.717 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3717"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,256.717 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3719"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,252.466 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3721"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,252.466 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3723"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,265.22 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3725"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,265.22 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3727"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,260.969 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3729"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,260.969 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3731"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,273.723 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3733"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,273.723 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3735"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,269.472 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3737"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,269.472 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3739"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,277.975 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3741"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,277.975 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3743"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,218.453 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3745"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,218.453 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3747"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,282.227 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3749"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,282.227 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3751"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3753"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3755"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3757"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3759"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3761"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3763"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3765"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3767"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3769"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3771"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3773"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3775"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3777"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3779"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3781"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3783"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3785"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3787"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3789"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3791"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3793"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3795"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3797"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3799"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3801"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3803"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3805"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3807"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3809"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3811"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3813"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3815"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3817"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3819"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3821"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3823"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3825"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3827"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3829"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3831"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3833"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3835"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3837"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3839"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3841"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3843"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3845"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3847"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,380.014 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3849"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,380.014 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3851"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,371.511 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3853"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,371.511 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3855"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,363.007 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3857"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,363.007 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3859"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,354.504 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3861"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,354.504 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3863"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,358.755 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3865"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,358.755 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3867"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,350.252 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3869"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,350.252 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3871"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,367.259 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3873"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,367.259 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3875"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,375.762 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3877"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,375.762 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3879"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3881"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3883"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3885"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3887"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3889"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3891"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3893"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3895"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3897"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3899"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3901"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3903"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3905"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3907"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3909"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3911"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3913"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,375.762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3915"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3917"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,367.259 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3919"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3921"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,358.755 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3923"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3925"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,350.252 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3927"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3929"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,380.014 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3931"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3933"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,371.511 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3935"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3937"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,363.007 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3939"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3941"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,354.504 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3943"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,448.039 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3945"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,448.039 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3947"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,439.536 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3949"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,439.536 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3951"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,431.033 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3953"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,431.033 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3955"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,422.53 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3957"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,422.53 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3959"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,443.788 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3961"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,443.788 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3963"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,435.284 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3965"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,435.284 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3967"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,426.781 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3969"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,426.781 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3971"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,418.278 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3973"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,418.278 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3975"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,431.033 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3977"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,431.033 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3979"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,439.536 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3981"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,439.536 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3983"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,422.53 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3985"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,422.53 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3987"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,443.788 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3989"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,443.788 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3991"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,435.284 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3993"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,435.284 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3995"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,426.781 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path3997"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,426.781 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path3999"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,418.278 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4001"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,418.278 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4003"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,448.039 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4005"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,448.039 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4007"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,448.039 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4009"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,448.039 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4011"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,439.536 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4013"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,439.536 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4015"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,431.033 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4017"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,431.033 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4019"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,422.53 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4021"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,422.53 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4023"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,443.788 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4025"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,443.788 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4027"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,435.284 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4029"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,435.284 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4031"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,426.781 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4033"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,426.781 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4035"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,418.278 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4037"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,418.278 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4039"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,511.813 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4041"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,511.813 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4043"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,503.31 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4045"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,503.31 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4047"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,494.807 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4049"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,494.807 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4051"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,486.304 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4053"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,486.304 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4055"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,516.065 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4057"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,516.065 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4059"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,507.562 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4061"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,507.562 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4063"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,499.059 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4065"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,499.059 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4067"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,490.555 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4069"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,490.555 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4071"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,516.065 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4073"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,516.065 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4075"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,507.562 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4077"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,507.562 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4079"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,499.059 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4081"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,499.059 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4083"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,490.555 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4085"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,490.555 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4087"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,494.807 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4089"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,494.807 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4091"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,486.304 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4093"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,486.304 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4095"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,503.31 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4097"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,503.31 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4099"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,511.813 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4101"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,511.813 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4103"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,511.813 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4105"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,511.813 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4107"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,503.31 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4109"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,503.31 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4111"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,494.807 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4113"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,494.807 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4115"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,486.304 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4117"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,486.304 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4119"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,516.065 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4121"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,516.065 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4123"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,507.562 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4125"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,507.562 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4127"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,499.059 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4129"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,499.059 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4131"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,490.555 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4133"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,490.555 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4135"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4137"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4139"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4141"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4143"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4145"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4147"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4149"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4151"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4153"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4155"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4157"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4159"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4161"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4163"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4165"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4167"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,579.839 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4169"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,579.839 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4171"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,571.336 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4173"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,571.336 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4175"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,562.832 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4177"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,562.832 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4179"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,554.329 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4181"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,554.329 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4183"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,558.581 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4185"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,558.581 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4187"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,550.078 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4189"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,550.078 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4191"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,567.084 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4193"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,567.084 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4195"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,575.587 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4197"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,575.587 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4199"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4201"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4203"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4205"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4207"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4209"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4211"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4213"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4215"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4217"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4219"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4221"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4223"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4225"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4227"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4229"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4231"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4233"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4235"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4237"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4239"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4241"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4243"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4245"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4247"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4249"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4251"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4253"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4255"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4257"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4259"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4261"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4263"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4265"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,579.839 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4267"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4269"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,571.336 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4271"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4273"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,562.832 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4275"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4277"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,554.329 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4279"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4281"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,575.587 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4283"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4285"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,567.084 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4287"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4289"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,558.581 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4291"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4293"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,550.078 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4295"
+ inkscape:connector-curvature="0" /><g
+ transform="scale(0.1,0.1)"
+ id="g4297"
+ style=""><g
+ id="g4299"
+ clip-path="url(#clipPath4301)"
+ style=""><path
+ d="m 3778.18,5968.45 1105.42,0"
+ style="fill:none;stroke:#000000;stroke-width:14.17199993;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4305"
+ inkscape:connector-curvature="0" /></g></g><path
+ d="m 478.676,594.011 8.503,2.834 -8.503,2.835 0,-5.669"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4307"
+ inkscape:connector-curvature="0" /><path
+ d="m 478.676,594.011 8.503,2.834 -8.503,2.835 0,-5.669 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4309"
+ inkscape:connector-curvature="0" /><path
+ d="m 95.0867,1.62109 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4311"
+ inkscape:connector-curvature="0" /><path
+ d="m 282.157,1.62109 93.5355,0 0,76.5289 -93.5355,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4313"
+ inkscape:connector-curvature="0" /><path
+ d="m 469.228,1.62109 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4315"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,10.1242 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4317"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,10.1242 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4319"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,18.6277 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4321"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,18.6277 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4323"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,27.1309 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4325"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,27.1309 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4327"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,35.634 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4329"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,35.634 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4331"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,5.87266 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4333"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,5.87266 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4335"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,14.3762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4337"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,14.3762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4339"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,22.8793 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4341"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,22.8793 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4343"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,31.3824 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4345"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,31.3824 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4347"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,10.1242 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4349"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,10.1242 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4351"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,18.6277 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4353"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,18.6277 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4355"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,27.1309 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4357"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,27.1309 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4359"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,35.634 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4361"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,35.634 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4363"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,5.87266 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4365"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,5.87266 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4367"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,14.3762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4369"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,14.3762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4371"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,22.8793 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4373"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,22.8793 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4375"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,31.3824 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4377"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,31.3824 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4379"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,69.6469 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4381"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,69.6469 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4383"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,65.3953 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4385"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,65.3953 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4387"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,61.1438 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4389"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,61.1438 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4391"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,56.8922 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4393"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,56.8922 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4395"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,52.6402 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4397"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,52.6402 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4399"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,48.3887 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4401"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,48.3887 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4403"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,44.1371 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4405"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,44.1371 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4407"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,5.87266 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4409"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,5.87266 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4411"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,10.1242 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4413"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,10.1242 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4415"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,14.3762 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4417"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,14.3762 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4419"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,18.6277 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4421"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,18.6277 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4423"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,22.8793 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4425"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,22.8793 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4427"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,27.1309 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4429"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,27.1309 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4431"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,31.3824 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4433"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,31.3824 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4435"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,39.8855 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4437"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,39.8855 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4439"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,35.634 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4441"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,35.634 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4443"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,39.8855 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4445"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,39.8855 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4447"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,48.3887 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4449"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,48.3887 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4451"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,56.8922 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4453"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,56.8922 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4455"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,65.3953 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4457"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,65.3953 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4459"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,44.1371 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4461"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,44.1371 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4463"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,52.6402 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4465"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,52.6402 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4467"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,61.1438 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4469"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,61.1438 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4471"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,69.6469 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4473"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,69.6469 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4475"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,39.8855 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4477"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,39.8855 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4479"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,48.3887 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4481"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,48.3887 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4483"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,56.8922 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4485"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,56.8922 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4487"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,65.3953 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4489"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,65.3953 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4491"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,44.1371 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4493"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,44.1371 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4495"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,52.6402 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4497"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,52.6402 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4499"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,61.1438 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4501"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,61.1438 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4503"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,69.6469 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4505"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,69.6469 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4507"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,116.414 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4509"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,116.414 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4511"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,112.163 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4513"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,112.163 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4515"
+ inkscape:connector-curvature="0" /><path
+ d="m 188.622,107.911 93.5352,0 0,76.5285 -93.5352,0 0,-76.5285 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4517"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,116.414 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4519"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,116.414 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4521"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,112.163 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4523"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,112.163 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4525"
+ inkscape:connector-curvature="0" /><path
+ d="m 375.693,107.911 93.5352,0 0,76.5285 -93.5352,0 0,-76.5285 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4527"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,116.414 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4529"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,116.414 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4531"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,112.163 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4533"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,112.163 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4535"
+ inkscape:connector-curvature="0" /><path
+ d="m 1.55156,107.911 93.5352,0 0,76.5285 -93.5352,0 0,-76.5285 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4537"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,175.937 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4539"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,175.937 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4541"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,167.434 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4543"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,167.434 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4545"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,158.93 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4547"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,158.93 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4549"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,150.427 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4551"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,150.427 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4553"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,141.924 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4555"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,141.924 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4557"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,133.421 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4559"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,133.421 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4561"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,124.918 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4563"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,124.918 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4565"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,171.685 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4567"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,171.685 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4569"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,163.182 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4571"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,163.182 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4573"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,154.679 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4575"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,154.679 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4577"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,146.176 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4579"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,146.176 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4581"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,137.672 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4583"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,137.672 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4585"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,129.169 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4587"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,129.169 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4589"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,120.666 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4591"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,120.666 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4593"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,175.937 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4595"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,175.937 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4597"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,167.434 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4599"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,167.434 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4601"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,158.93 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4603"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,158.93 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4605"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,150.427 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4607"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,150.427 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4609"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,141.924 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4611"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,141.924 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4613"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,133.421 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4615"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,133.421 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4617"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,124.918 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4619"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,124.918 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4621"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,171.685 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4623"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,171.685 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4625"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,163.182 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4627"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,163.182 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4629"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,154.679 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4631"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,154.679 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4633"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,146.176 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4635"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,146.176 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4637"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,137.672 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4639"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,137.672 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4641"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,129.169 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4643"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,129.169 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4645"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,120.666 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4647"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,120.666 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4649"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,175.937 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4651"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,175.937 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4653"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,167.434 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4655"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,167.434 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4657"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,158.93 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4659"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,158.93 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4661"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,150.427 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4663"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,150.427 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4665"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,141.924 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4667"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,141.924 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4669"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,133.421 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4671"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,133.421 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4673"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,124.918 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4675"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,124.918 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4677"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,171.685 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4679"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,171.685 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4681"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,163.182 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4683"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,163.182 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4685"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,154.679 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4687"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,154.679 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4689"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,146.176 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4691"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,146.176 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4693"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,137.672 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4695"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,137.672 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4697"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,129.169 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4699"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,129.169 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4701"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,120.666 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4703"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,120.666 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4705"
+ inkscape:connector-curvature="0" /><path
+ d="m 95.0867,214.201 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4707"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,222.704 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4709"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,222.704 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4711"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,231.207 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4713"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,231.207 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4715"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,226.956 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4717"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,226.956 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4719"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,239.711 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4721"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,239.711 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4723"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,235.459 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4725"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,235.459 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4727"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,248.214 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4729"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,248.214 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4731"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,243.962 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4733"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,243.962 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4735"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,256.717 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4737"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,256.717 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4739"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,252.466 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4741"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,252.466 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4743"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,265.22 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4745"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,265.22 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4747"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,260.969 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4749"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,260.969 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4751"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,273.723 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4753"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,273.723 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4755"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,269.472 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4757"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,269.472 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4759"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,277.975 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4761"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,277.975 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4763"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,218.453 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4765"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,218.453 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4767"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,282.227 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4769"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,282.227 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4771"
+ inkscape:connector-curvature="0" /><path
+ d="m 282.157,214.201 93.5355,0 0,76.5289 -93.5355,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4773"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,277.975 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4775"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,277.975 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4777"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,282.227 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4779"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,282.227 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4781"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,273.723 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4783"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,273.723 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4785"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,248.214 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4787"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,248.214 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4789"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,239.711 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4791"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,239.711 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4793"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,231.207 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4795"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,231.207 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4797"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,222.704 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4799"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,222.704 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4801"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,269.472 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4803"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,269.472 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4805"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,256.717 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4807"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,256.717 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4809"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,265.22 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4811"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,265.22 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4813"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,260.969 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4815"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,260.969 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4817"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,252.466 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4819"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,252.466 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4821"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,243.962 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4823"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,243.962 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4825"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,235.459 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4827"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,235.459 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4829"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,226.956 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4831"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,226.956 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4833"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,218.453 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path4835"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,218.453 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4837"
+ inkscape:connector-curvature="0" /><text
+ y="-533.07098"
+ x="5.8031301"
+ id="text4841"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4843"
+ sodipodi:role="line"
+ y="-533.07098"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan><tspan
+ id="tspan4845"
+ sodipodi:role="line"
+ y="-465.04559"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan><tspan
+ id="tspan4847"
+ sodipodi:role="line"
+ y="-397.0202"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
+<text
+ y="-316.23969"
+ x="103.58983"
+ id="text4849"
+ style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4851"
+ sodipodi:role="line"
+ y="-316.23969"
+ x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOM</tspan><tspan
+ id="tspan4853"
+ sodipodi:role="line"
+ y="-328.99481"
+ x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322">v4l2_buffer.field:</tspan></text>
+<text
+ y="-592.59381"
+ x="5.8034"
+ id="text4855"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4857"
+ sodipodi:role="line"
+ y="-592.59381"
+ x="5.8034 13.134789 19.806232 29.801399 36.472843 43.144287 47.139954 53.811398 56.475174 59.810898 66.482346 70.478004 77.149452 83.820892 87.816566 91.152283 94.488007 101.15945 107.83089 111.16662 114.50233 121.17377 131.16895 134.50468 137.84041 140.50418 147.17563 149.8394 156.51085 159.84657 163.1823 165.84607 169.84174 175.84123 179.17696 182.51268 185.8484 189.84407 196.51552 203.18695 209.18646 219.18163 221.8454 225.18112 228.51685 235.18829 241.85973 245.19545 249.19112 255.86256 259.19827 265.86972 269.20544 272.54117 282.53635 285.87207 294.53534 301.86673 309.87006 318.53336">Temporal order, bottom field first transmitted (e.g. M/NTSC)</tspan></text>
+<text
+ y="-316.23981"
+ x="290.6604"
+ id="text4859"
+ style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4861"
+ sodipodi:role="line"
+ y="-316.23981"
+ x="290.6604 296.16284 300.74957 305.3363 309.92303 314.50977 319.55023 321.8436 327.34601 331.93274 337.88889 342.47565 347.51608 353.9342 477.73062 483.23306 487.81979 492.40652 496.99326 501.57999 506.62045 508.91382 514.41626 519.00299 524.95911 529.5459 534.5863 541.00446">V4L2_FIELD_TOPV4L2_FIELD_TOP</tspan></text>
+<text
+ y="-299.23349"
+ x="5.8034"
+ id="text4863"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4865"
+ sodipodi:role="line"
+ y="-299.23349"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BT</tspan><tspan
+ id="tspan4867"
+ sodipodi:role="line"
+ y="-192.9435"
+ x="1.5518398 9.5551729 16.226616 22.898062 29.569506 36.240948 43.572334 46.908058 54.911392 61.582836 70.246117 76.917557 80.253281 88.916557 96.247948 104.25128 112.91456 119.586 127.58932 136.25262 144.25595 152.91924 159.59067 166.92206 174.9254 178.26112 182.25679 192.25195 194.91573 200.91524 207.58667 210.25046 212.91423 219.58568 226.25713 232.92856 239.60001">V4L2_FIELD_INTERLACED_TB (misaligned)</tspan><tspan
+ id="tspan4869"
+ sodipodi:role="line"
+ y="-86.653496"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 89.172447 97.175781 106.511 113.18245 121.18579">V4L2_FIELD_SEQ_BT</tspan></text>
+<text
+ y="-533.07098"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464"
+ id="text4592"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4594"
+ sodipodi:role="line"
+ y="-533.07098"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan></text>
+<text
+ y="-465.04559"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054"
+ id="text4596"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4598"
+ sodipodi:role="line"
+ y="-465.04559"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan></text>
+<text
+ y="-397.0202"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963"
+ id="text4600"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4602"
+ sodipodi:role="line"
+ y="-397.0202"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
+<text
+ y="-299.23349"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012"
+ id="text5862"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan5864"
+ sodipodi:role="line"
+ y="-299.23349"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BT</tspan></text>
+<text
+ y="-192.9435"
+ x="1.5518398 9.5551729 16.226616 22.898062 29.569506 36.240948 43.572334 46.908058 54.911392 61.582836 70.246117 76.917557 80.253281 88.916557 96.247948 104.25128 112.91456 119.586 127.58932 136.25262 144.25595 152.91924 159.59067 166.92206 174.9254 178.26112 182.25679 192.25195 194.91573 200.91524 207.58667 210.25046 212.91423 219.58568 226.25713 232.92856 239.60001"
+ id="text5866"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan5868"
+ sodipodi:role="line"
+ y="-192.9435"
+ x="1.5518398 9.5551729 16.226616 22.898062 29.569506 36.240948 43.572334 46.908058 54.911392 61.582836 70.246117 76.917557 80.253281 88.916557 96.247948 104.25128 112.91456 119.586 127.58932 136.25262 144.25595 152.91924 159.59067 166.92206 174.9254 178.26112 182.25679 192.25195 194.91573 200.91524 207.58667 210.25046 212.91423 219.58568 226.25713 232.92856 239.60001">V4L2_FIELD_INTERLACED_TB (misaligned)</tspan></text>
+<text
+ y="-86.653496"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 89.172447 97.175781 106.511 113.18245 121.18579"
+ id="text5870"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan5872"
+ sodipodi:role="line"
+ y="-86.653496"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 89.172447 97.175781 106.511 113.18245 121.18579">V4L2_FIELD_SEQ_BT</tspan></text>
+<text
+ y="-316.23969"
+ x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659"
+ id="text7144"
+ style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan7146"
+ sodipodi:role="line"
+ y="-316.23969"
+ x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOM</tspan></text>
+<text
+ y="-328.99481"
+ x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322"
+ id="text7148"
+ style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan7150"
+ sodipodi:role="line"
+ y="-328.99481"
+ x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322">v4l2_buffer.field:</tspan></text>
+</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/fieldseq_tb.svg b/Documentation/media/uapi/v4l/fieldseq_tb.svg
new file mode 100644
index 000000000000..6a7b10ad4ab8
--- /dev/null
+++ b/Documentation/media/uapi/v4l/fieldseq_tb.svg
@@ -0,0 +1,2607 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg5543"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ xml:space="preserve"
+ width="198.48296mm"
+ height="210.39415mm"
+ viewBox="0 0 703.28607 745.49109"
+ sodipodi:docname="fieldseq_tb.svg"><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview5545"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ units="mm"
+ inkscape:zoom="1.0733333"
+ inkscape:cx="9.8391479"
+ inkscape:cy="370.19322"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g5551" /><metadata
+ id="metadata5549"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ id="defs5547"><clipPath
+ id="clipPath6753"
+ clipPathUnits="userSpaceOnUse"><path
+ style="clip-rule:evenodd"
+ inkscape:connector-curvature="0"
+ id="path6755"
+ d="M 0,6000 0,0 l 5660,0 0,6000 -5660,0 z m 4786.76,-102.89 103.92,0 0,56.69 -103.92,0 0,0 85.03,-28.35 -85.03,-28.34 z" /></clipPath></defs><g
+ transform="matrix(1.25,0,0,-1.25,-1.0537,746.57119)"
+ inkscape:label="fieldseq_tb"
+ inkscape:groupmode="layer"
+ id="g5551"><path
+ d="m 379.944,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5555"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5557"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5559"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5561"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5563"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5565"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5567"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5569"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5571"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5573"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5575"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5577"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5579"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5581"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5583"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5585"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5587"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5589"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5591"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5593"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5595"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5597"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5599"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5601"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5603"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5605"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5607"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5609"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5611"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5613"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5615"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5617"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,507.513 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5619"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,507.513 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5621"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,499.01 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5623"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,499.01 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5625"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,490.507 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5627"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,490.507 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5629"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,482.004 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5631"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,482.004 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5633"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,511.765 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5635"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,511.765 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5637"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,503.262 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5639"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,503.262 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5641"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,494.759 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5643"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,494.759 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5645"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,486.255 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5647"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,486.255 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5649"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,443.739 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5651"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,443.739 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5653"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,435.236 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5655"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,435.236 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5657"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,426.733 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5659"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,426.733 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5661"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,418.23 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5663"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,418.23 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5665"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,439.488 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5667"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,439.488 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5669"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,430.984 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5671"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,430.984 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5673"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,422.481 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5675"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,422.481 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5677"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,413.978 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5679"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,413.978 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5681"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,218.404 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5683"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,218.404 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5685"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,226.907 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5687"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,226.907 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5689"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,222.656 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5691"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,222.656 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5693"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,235.411 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5695"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,235.411 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5697"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,231.159 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5699"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,231.159 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5701"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,243.914 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5703"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,243.914 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5705"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,239.662 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5707"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,239.662 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5709"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,252.417 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5711"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,252.417 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5713"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,248.166 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5715"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,248.166 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5717"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,260.92 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5719"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,260.92 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5721"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,256.669 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5723"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,256.669 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5725"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,269.423 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5727"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,269.423 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5729"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,265.172 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5731"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,265.172 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5733"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,273.675 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5735"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,273.675 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5737"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,214.153 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5739"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,214.153 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5741"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,277.927 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5743"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,277.927 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5745"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,218.404 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5747"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,218.404 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5749"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,226.907 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5751"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,226.907 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5753"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,222.656 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5755"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,222.656 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5757"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,235.411 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5759"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,235.411 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5761"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,231.159 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5763"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,231.159 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5765"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,243.914 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5767"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,243.914 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5769"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,239.662 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5771"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,239.662 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5773"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,252.417 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5775"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,252.417 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5777"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,248.166 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5779"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,248.166 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5781"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,260.92 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5783"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,260.92 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5785"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,256.669 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5787"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,256.669 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5789"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,269.423 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5791"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,269.423 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5793"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,265.172 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5795"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,265.172 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5797"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,273.675 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5799"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,273.675 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5801"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,214.153 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5803"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,214.153 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5805"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,277.927 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5807"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,277.927 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5809"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5811"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5813"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5815"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5817"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5819"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5821"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5823"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5825"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5827"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5829"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5831"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5833"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5835"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5837"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5839"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5841"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5843"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5845"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5847"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5849"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5851"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5853"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5855"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5857"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5859"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5861"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5863"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5865"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5867"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5869"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5871"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5873"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,35.5855 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5875"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,35.5855 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5877"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,27.0824 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5879"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,27.0824 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5881"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,18.5793 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5883"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,18.5793 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5885"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,10.0762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5887"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,10.0762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5889"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,31.334 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5891"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,31.334 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5893"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,22.8309 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5895"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,22.8309 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5897"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,14.3277 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5899"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,14.3277 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5901"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,5.82422 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5903"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,5.82422 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5905"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,35.5855 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5907"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,35.5855 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5909"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,27.0824 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5911"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,27.0824 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5913"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,18.5793 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5915"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,18.5793 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5917"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,10.0762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5919"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,10.0762 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5921"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,31.334 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5923"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,31.334 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5925"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,22.8309 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5927"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,22.8309 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5929"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,14.3277 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5931"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,14.3277 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5933"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,5.82422 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5935"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,5.82422 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5937"
+ inkscape:connector-curvature="0" /><path
+ d="m 95.0867,409.727 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5939"
+ inkscape:connector-curvature="0" /><path
+ d="m 282.157,409.727 93.5355,0 0,42.516 -93.5355,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5941"
+ inkscape:connector-curvature="0" /><path
+ d="m 469.228,409.727 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5943"
+ inkscape:connector-curvature="0" /><path
+ d="m 469.228,209.901 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5945"
+ inkscape:connector-curvature="0" /><path
+ d="m 282.157,209.901 93.5355,0 0,76.5289 -93.5355,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5947"
+ inkscape:connector-curvature="0" /><path
+ d="m 95.0867,209.901 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5949"
+ inkscape:connector-curvature="0" /><path
+ d="m 95.0867,341.701 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5951"
+ inkscape:connector-curvature="0" /><path
+ d="m 282.157,341.701 93.5355,0 0,42.516 -93.5355,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5953"
+ inkscape:connector-curvature="0" /><path
+ d="m 469.228,341.701 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5955"
+ inkscape:connector-curvature="0" /><path
+ d="m 1.55156,341.701 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5957"
+ inkscape:connector-curvature="0" /><path
+ d="m 188.622,341.701 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5959"
+ inkscape:connector-curvature="0" /><path
+ d="m 375.693,341.701 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5961"
+ inkscape:connector-curvature="0" /><path
+ d="m 1.55156,477.752 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5963"
+ inkscape:connector-curvature="0" /><path
+ d="m 188.622,477.752 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5965"
+ inkscape:connector-curvature="0" /><path
+ d="m 375.693,477.752 93.5352,0 0,42.516 -93.5352,0 0,-42.516 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5967"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5969"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5971"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5973"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5975"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5977"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5979"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5981"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5983"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5985"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5987"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5989"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5991"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5993"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5995"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path5997"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path5999"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6001"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6003"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6005"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6007"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6009"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6011"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6013"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6015"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6017"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6019"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6021"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6023"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6025"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6027"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6029"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6031"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,507.513 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6033"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,507.513 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6035"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,499.01 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6037"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,499.01 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6039"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,490.507 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6041"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,490.507 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6043"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,482.004 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6045"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,482.004 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6047"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,511.765 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6049"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,511.765 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6051"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,503.262 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6053"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,503.262 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6055"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,494.759 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6057"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,494.759 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6059"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,486.255 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6061"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,486.255 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6063"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,443.739 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6065"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,443.739 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6067"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,435.236 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6069"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,435.236 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6071"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,426.733 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6073"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,426.733 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6075"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,418.23 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6077"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,418.23 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6079"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,439.488 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6081"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,439.488 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6083"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,430.984 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6085"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,430.984 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6087"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,422.481 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6089"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,422.481 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6091"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,413.978 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6093"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,413.978 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6095"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6097"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,575.539 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6099"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,575.539 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6101"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,575.539 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6103"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6105"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,571.287 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6107"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6109"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,567.036 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6111"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6113"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,562.784 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6115"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6117"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,558.532 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6119"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6121"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,554.281 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6123"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6125"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,550.029 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6127"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6129"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,545.778 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6131"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,511.765 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6133"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,511.765 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6135"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,507.513 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6137"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,507.513 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6139"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,503.262 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6141"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,503.262 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6143"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,499.01 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6145"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,499.01 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6147"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,494.759 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6149"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,494.759 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6151"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,490.507 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6153"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,490.507 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6155"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,486.255 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6157"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,486.255 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6159"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,482.004 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6161"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,482.004 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6163"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6165"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6167"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6169"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6171"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6173"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6175"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6177"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6179"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6181"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6183"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6185"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6187"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6189"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6191"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6193"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6195"
+ inkscape:connector-curvature="0" /><path
+ d="m 1.55156,107.863 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6197"
+ inkscape:connector-curvature="0" /><path
+ d="m 188.622,107.863 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6199"
+ inkscape:connector-curvature="0" /><path
+ d="m 375.693,107.863 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6201"
+ inkscape:connector-curvature="0" /><path
+ d="m 95.0867,1.57266 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6203"
+ inkscape:connector-curvature="0" /><path
+ d="m 282.157,1.57266 93.5355,0 0,76.5289 -93.5355,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6205"
+ inkscape:connector-curvature="0" /><path
+ d="m 469.228,1.57266 93.5352,0 0,76.5289 -93.5352,0 0,-76.5289 z"
+ style="fill:none;stroke:#000000;stroke-width:1.41719997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6207"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,65.3469 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6209"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,65.3469 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6211"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,56.8438 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6213"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,56.8438 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6215"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,48.3402 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6217"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,48.3402 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6219"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,39.8371 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6221"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,39.8371 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6223"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,69.5984 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6225"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,69.5984 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6227"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,61.0953 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6229"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,61.0953 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6231"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,52.5922 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6233"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,52.5922 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6235"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,44.0887 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6237"
+ inkscape:connector-curvature="0" /><path
+ d="m 473.479,44.0887 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6239"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,65.3469 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6241"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,65.3469 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6243"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,56.8438 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6245"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,56.8438 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6247"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,48.3402 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6249"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,48.3402 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6251"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,39.8371 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6253"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,39.8371 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6255"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,69.5984 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6257"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,69.5984 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6259"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,61.0953 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6261"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,61.0953 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6263"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,52.5922 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6265"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,52.5922 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6267"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,44.0887 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6269"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,44.0887 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6271"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,277.927 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6273"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,277.927 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6275"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,269.423 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6277"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,269.423 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6279"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,260.92 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6281"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,260.92 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6283"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,252.417 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6285"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,252.417 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6287"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,243.914 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6289"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,243.914 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6291"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,235.411 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6293"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,235.411 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6295"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,226.907 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6297"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,226.907 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6299"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,218.404 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6301"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,218.404 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6303"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,69.5984 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6305"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,69.5984 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6307"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,65.3469 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6309"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,65.3469 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6311"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,61.0953 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6313"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,61.0953 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6315"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,56.8438 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6317"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,56.8438 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6319"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,52.5922 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6321"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,52.5922 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6323"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,48.3402 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6325"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,48.3402 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6327"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,44.0887 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6329"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,44.0887 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6331"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,39.8371 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6333"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,39.8371 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6335"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,571.287 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6337"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,571.287 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6339"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,567.036 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6341"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,567.036 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6343"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,562.784 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6345"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,562.784 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6347"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,558.532 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6349"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,558.532 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6351"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,554.281 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6353"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,554.281 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6355"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,550.029 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6357"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,550.029 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6359"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,545.778 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6361"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,545.778 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6363"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,443.739 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6365"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,443.739 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6367"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,439.488 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6369"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,439.488 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6371"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,435.236 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6373"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,435.236 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6375"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,430.984 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6377"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,430.984 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6379"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,426.733 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6381"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,426.733 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6383"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,422.481 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6385"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,422.481 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6387"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,418.23 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6389"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,418.23 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6391"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,413.978 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6393"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,413.978 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6395"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,375.714 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6397"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,375.714 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6399"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,371.462 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6401"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,371.462 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6403"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,367.211 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6405"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,367.211 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6407"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,362.959 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6409"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,362.959 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6411"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,358.707 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6413"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,358.707 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6415"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,354.455 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6417"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,354.455 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6419"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,350.204 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6421"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,350.204 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6423"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,345.952 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6425"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,345.952 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6427"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,273.675 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6429"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,273.675 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6431"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,265.172 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6433"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,265.172 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6435"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,256.669 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6437"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,256.669 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6439"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,248.166 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6441"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,248.166 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6443"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,239.662 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6445"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,239.662 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6447"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,231.159 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6449"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,231.159 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6451"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,222.656 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6453"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,222.656 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6455"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,214.153 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6457"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,214.153 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6459"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,35.5855 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6461"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,35.5855 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6463"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,31.334 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6465"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,31.334 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6467"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,27.0824 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6469"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,27.0824 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6471"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,22.8309 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6473"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,22.8309 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6475"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,18.5793 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6477"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,18.5793 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6479"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,14.3277 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6481"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,14.3277 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6483"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,10.0762 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6485"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,10.0762 85.0316,0 0,4.25156 -85.0316,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6487"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,5.82422 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6489"
+ inkscape:connector-curvature="0" /><path
+ d="m 286.409,5.82422 85.0316,0 0,4.25195 -85.0316,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6491"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,175.888 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6493"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,175.888 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6495"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,167.385 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6497"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,167.385 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6499"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,158.882 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6501"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,158.882 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6503"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,150.379 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6505"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,150.379 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6507"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,141.876 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6509"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,141.876 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6511"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,133.372 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6513"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,133.372 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6515"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,124.869 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6517"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,124.869 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6519"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,116.366 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#008f00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6521"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,116.366 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6523"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6525"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6527"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6529"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6531"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6533"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6535"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6537"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6539"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6541"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6543"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6545"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6547"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6549"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6551"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6553"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6555"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,175.888 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6557"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,175.888 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6559"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,167.385 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6561"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,167.385 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6563"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,158.882 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6565"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,158.882 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6567"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,150.379 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6569"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,150.379 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6571"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,141.876 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6573"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,141.876 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6575"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,133.372 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6577"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,133.372 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6579"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,124.869 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6581"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,124.869 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6583"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,116.366 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6585"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,116.366 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6587"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,175.888 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6589"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,175.888 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6591"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,167.385 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6593"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,167.385 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6595"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,158.882 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6597"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,158.882 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6599"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,150.379 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6601"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,150.379 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6603"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,141.876 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6605"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,141.876 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6607"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,133.372 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6609"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,133.372 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6611"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,124.869 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6613"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,124.869 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6615"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,116.366 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#d10000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6617"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,116.366 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6619"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6621"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,375.714 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6623"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6625"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,367.211 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6627"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6629"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,358.707 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6631"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6633"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,350.204 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6635"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6637"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,371.462 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6639"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6641"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,362.959 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6643"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6645"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,354.455 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6647"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6649"
+ inkscape:connector-curvature="0" /><path
+ d="m 99.3383,345.952 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6651"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,171.637 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6653"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,171.637 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6655"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,163.134 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6657"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,163.134 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6659"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,154.63 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6661"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,154.63 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6663"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,146.127 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6665"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,146.127 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6667"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,137.624 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6669"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,137.624 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6671"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,129.121 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6673"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,129.121 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6675"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,120.618 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6677"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,120.618 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6679"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,112.114 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#0000d1;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6681"
+ inkscape:connector-curvature="0" /><path
+ d="m 192.873,112.114 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6683"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,171.637 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6685"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,171.637 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6687"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,163.134 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6689"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,163.134 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6691"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,154.63 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6693"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,154.63 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6695"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,146.127 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6697"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,146.127 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6699"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,137.624 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6701"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,137.624 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6703"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,129.121 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6705"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,129.121 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6707"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,120.618 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6709"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,120.618 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6711"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,112.114 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6713"
+ inkscape:connector-curvature="0" /><path
+ d="m 379.944,112.114 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6715"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,171.637 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6717"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,171.637 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6719"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,163.134 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6721"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,163.134 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6723"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,154.63 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6725"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,154.63 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6727"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,146.127 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6729"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,146.127 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6731"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,137.624 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6733"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,137.624 85.032,0 0,4.25195 -85.032,0 0,-4.25195 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6735"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,129.121 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6737"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,129.121 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6739"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,120.618 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6741"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,120.618 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6743"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,112.114 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:#ffff00;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6745"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.80312,112.114 85.032,0 0,4.25156 -85.032,0 0,-4.25156 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6747"
+ inkscape:connector-curvature="0" /><g
+ transform="scale(0.1,0.1)"
+ id="g6749"
+ style=""><g
+ id="g6751"
+ clip-path="url(#clipPath6753)"
+ style=""><path
+ d="m 3778.18,5925.45 1105.42,0"
+ style="fill:none;stroke:#000000;stroke-width:14.17199993;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6757"
+ inkscape:connector-curvature="0" /></g></g><path
+ d="m 478.676,589.711 8.503,2.834 -8.503,2.835 0,-5.669"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path6759"
+ inkscape:connector-curvature="0" /><path
+ d="m 478.676,589.711 8.503,2.834 -8.503,2.835 0,-5.669 z"
+ style="fill:none;stroke:#000000;stroke-width:0.35429999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6761"
+ inkscape:connector-curvature="0" /><text
+ y="-528.771"
+ x="5.8031301"
+ id="text6765"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan6767"
+ sodipodi:role="line"
+ y="-528.771"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan><tspan
+ id="tspan6769"
+ sodipodi:role="line"
+ y="-460.74561"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan><tspan
+ id="tspan6771"
+ sodipodi:role="line"
+ y="-392.72021"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
+<text
+ y="-324.69479"
+ x="10.05469"
+ id="text6773"
+ style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan6775"
+ sodipodi:role="line"
+ y="-324.69479"
+ x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048">v4l2_buffer.field:</tspan><tspan
+ id="tspan6777"
+ sodipodi:role="line"
+ y="-311.9397"
+ x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338 538.00385 543.04431 549.4624">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM</tspan></text>
+<text
+ y="-588.2937"
+ x="5.8031301"
+ id="text6779"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan6781"
+ sodipodi:role="line"
+ y="-588.2937"
+ x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096">Temporal order, top field first transmitted (e.g. BG/PAL)</tspan><tspan
+ id="tspan6783"
+ sodipodi:role="line"
+ y="-86.604706"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 97.175514 106.51073 113.18218 120.51357">V4L2_FIELD_SEQ_TB</tspan><tspan
+ id="tspan6785"
+ sodipodi:role="line"
+ y="-192.89471"
+ x="10.05469 18.058023 24.729465 31.400909 38.072357 44.743801 52.075188 55.410912 63.414246 70.085686 78.748962 85.42041 88.756134 97.419411 104.7508 112.75413 121.41741 128.08885 136.09219 144.75546 152.7588 161.42207 168.09352 176.09685 183.42824 186.76396 190.75963 200.75479 203.41858 209.41808 216.08952 218.7533 221.41707 228.08852 234.75996 241.43141 248.10286">V4L2_FIELD_INTERLACED_BT (misaligned)</tspan><tspan
+ id="tspan6787"
+ sodipodi:role="line"
+ y="-294.93271"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 84.50457 93.167847 100.49924 108.50257 117.16585 123.83729 131.84062 140.50391 148.50723 157.17052 160.50624 163.84196 167.17769 175.18102 181.85246 188.52391 195.19534 201.86679 209.19818 212.53391 220.53723 227.20868 235.87196 242.5434 245.87912 254.5424 261.87378 269.87714 278.54041 285.21185 293.21518 301.87845 309.88177 318.54507 325.21652 332.54791">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB</tspan></text>
+<text
+ y="-528.771"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464"
+ id="text4583"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4585"
+ sodipodi:role="line"
+ y="-528.771"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan></text>
+<text
+ y="-460.74561"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054"
+ id="text4587"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4589"
+ sodipodi:role="line"
+ y="-460.74561"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan></text>
+<text
+ y="-392.72021"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963"
+ id="text4591"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan4593"
+ sodipodi:role="line"
+ y="-392.72021"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
+<text
+ y="-588.2937"
+ x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096"
+ id="text5847"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan5849"
+ sodipodi:role="line"
+ y="-588.2937"
+ x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096">Temporal order, top field first transmitted (e.g. BG/PAL)</tspan></text>
+<text
+ y="-86.604706"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 97.175514 106.51073 113.18218 120.51357"
+ id="text5851"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan5853"
+ sodipodi:role="line"
+ y="-86.604706"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 97.175514 106.51073 113.18218 120.51357">V4L2_FIELD_SEQ_TB</tspan></text>
+<text
+ y="-192.89471"
+ x="10.05469 18.058023 24.729465 31.400909 38.072357 44.743801 52.075188 55.410912 63.414246 70.085686 78.748962 85.42041 88.756134 97.419411 104.7508 112.75413 121.41741 128.08885 136.09219 144.75546 152.7588 161.42207 168.09352 176.09685 183.42824 186.76396 190.75963 200.75479 203.41858 209.41808 216.08952 218.7533 221.41707 228.08852 234.75996 241.43141 248.10286"
+ id="text5855"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan5857"
+ sodipodi:role="line"
+ y="-192.89471"
+ x="10.05469 18.058023 24.729465 31.400909 38.072357 44.743801 52.075188 55.410912 63.414246 70.085686 78.748962 85.42041 88.756134 97.419411 104.7508 112.75413 121.41741 128.08885 136.09219 144.75546 152.7588 161.42207 168.09352 176.09685 183.42824 186.76396 190.75963 200.75479 203.41858 209.41808 216.08952 218.7533 221.41707 228.08852 234.75996 241.43141 248.10286">V4L2_FIELD_INTERLACED_BT (misaligned)</tspan></text>
+<text
+ y="-294.93271"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 84.50457 93.167847 100.49924 108.50257 117.16585 123.83729 131.84062 140.50391 148.50723 157.17052 160.50624 163.84196 167.17769 175.18102 181.85246 188.52391 195.19534 201.86679 209.19818 212.53391 220.53723 227.20868 235.87196 242.5434 245.87912 254.5424 261.87378 269.87714 278.54041 285.21185 293.21518 301.87845 309.88177 318.54507 325.21652 332.54791"
+ id="text5859"
+ style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan5861"
+ sodipodi:role="line"
+ y="-294.93271"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 84.50457 93.167847 100.49924 108.50257 117.16585 123.83729 131.84062 140.50391 148.50723 157.17052 160.50624 163.84196 167.17769 175.18102 181.85246 188.52391 195.19534 201.86679 209.19818 212.53391 220.53723 227.20868 235.87196 242.5434 245.87912 254.5424 261.87378 269.87714 278.54041 285.21185 293.21518 301.87845 309.88177 318.54507 325.21652 332.54791">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB</tspan></text>
+<text
+ y="-324.69479"
+ x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048"
+ id="text7131"
+ style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan7133"
+ sodipodi:role="line"
+ y="-324.69479"
+ x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048">v4l2_buffer.field:</tspan></text>
+<text
+ y="-311.9397"
+ x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338 538.00385 543.04431 549.4624"
+ id="text7135"
+ style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan7137"
+ sodipodi:role="line"
+ y="-311.9397"
+ x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338 538.00385 543.04431 549.4624">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM</tspan></text>
+</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/nv12mt.svg b/Documentation/media/uapi/v4l/nv12mt.svg
new file mode 100644
index 000000000000..21fcccda9723
--- /dev/null
+++ b/Documentation/media/uapi/v4l/nv12mt.svg
@@ -0,0 +1,450 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ version="1.2"
+ width="96.282211mm"
+ height="28.282219mm"
+ viewBox="0 0 9628.2211 2828.2219"
+ preserveAspectRatio="xMidYMid"
+ xml:space="preserve"
+ id="svg2"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="nv12mt.svg"
+ style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
+ id="metadata383"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview381"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:zoom="4.0919524"
+ inkscape:cx="170.57872"
+ inkscape:cy="50.106293"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg2" /><defs
+ class="ClipPathGroup"
+ id="defs4"><clipPath
+ id="presentation_clip_path"
+ clipPathUnits="userSpaceOnUse"><rect
+ x="0"
+ y="0"
+ width="28000"
+ height="21000"
+ id="rect7" /></clipPath></defs><defs
+ id="defs9" /><defs
+ id="defs80" /><defs
+ id="defs103" /><defs
+ class="TextShapeIndex"
+ id="defs114" /><defs
+ class="EmbeddedBulletChars"
+ id="defs118" /><defs
+ class="TextEmbeddedBitmaps"
+ id="defs147" /><g
+ class="SlideGroup"
+ id="g177"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="g179"><g
+ id="id1"
+ class="Slide"
+ clip-path="url(#presentation_clip_path)"><g
+ class="Page"
+ id="g182"><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g184"><g
+ id="id6"><rect
+ class="BoundingBox"
+ x="3299"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect187"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path189"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text191"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan193"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4325"
+ y="4121"
+ id="tspan195"><tspan
+ id="tspan197"
+ style="fill:#000000;stroke:none">0</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g199"><g
+ id="id7"><rect
+ class="BoundingBox"
+ x="5699"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect202"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path204"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g206"><g
+ id="id8"><rect
+ class="BoundingBox"
+ x="8099"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect209"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path211"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text213"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan215"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="9125"
+ y="4121"
+ id="tspan217"><tspan
+ id="tspan219"
+ style="fill:#000000;stroke:none">6</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g221"><g
+ id="id9"><rect
+ class="BoundingBox"
+ x="5699"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect224"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path226"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text228"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan230"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6725"
+ y="4121"
+ id="tspan232"><tspan
+ id="tspan234"
+ style="fill:#000000;stroke:none">1</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g236"><g
+ id="id10"><rect
+ class="BoundingBox"
+ x="10499"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect239"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path241"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text243"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan245"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11525"
+ y="4121"
+ id="tspan247"><tspan
+ id="tspan249"
+ style="fill:#000000;stroke:none">7</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g251"><g
+ id="id11"><rect
+ class="BoundingBox"
+ x="3299"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect254"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path256"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text258"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan260"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4325"
+ y="5521"
+ id="tspan262"><tspan
+ id="tspan264"
+ style="fill:#000000;stroke:none">2</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g266"><g
+ id="id12"><rect
+ class="BoundingBox"
+ x="5699"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect269"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path271"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g273"><g
+ id="id13"><rect
+ class="BoundingBox"
+ x="8099"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect276"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path278"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text280"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan282"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="9125"
+ y="5521"
+ id="tspan284"><tspan
+ id="tspan286"
+ style="fill:#000000;stroke:none">4</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g288"><g
+ id="id14"><rect
+ class="BoundingBox"
+ x="5699"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect291"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path293"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text295"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan297"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6725"
+ y="5521"
+ id="tspan299"><tspan
+ id="tspan301"
+ style="fill:#000000;stroke:none">3</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g303"><g
+ id="id15"><rect
+ class="BoundingBox"
+ x="10499"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect306"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path308"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text310"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan312"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11525"
+ y="5521"
+ id="tspan314"><tspan
+ id="tspan316"
+ style="fill:#000000;stroke:none">5</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g318"><g
+ id="id16"><rect
+ class="BoundingBox"
+ x="5199"
+ y="3850"
+ width="1402"
+ height="301"
+ id="rect321"
+ style="fill:none;stroke:none" /><path
+ d="m 5200,4000 970,0"
+ id="path323"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,4000 -450,-150 0,300 450,-150 z"
+ id="path325"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g327"><g
+ id="id17"><rect
+ class="BoundingBox"
+ x="5000"
+ y="4299"
+ width="1202"
+ height="802"
+ id="rect330"
+ style="fill:none;stroke:none" /><path
+ d="m 6200,4300 -842,561"
+ id="path332"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 5000,5100 458,-125 -167,-249 -291,374 z"
+ id="path334"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g336"><g
+ id="id18"><rect
+ class="BoundingBox"
+ x="5399"
+ y="5250"
+ width="1202"
+ height="301"
+ id="rect339"
+ style="fill:none;stroke:none" /><path
+ d="m 5400,5400 770,0"
+ id="path341"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,5400 -450,-150 0,300 450,-150 z"
+ id="path343"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g345"><g
+ id="id19"><rect
+ class="BoundingBox"
+ x="7599"
+ y="5250"
+ width="1202"
+ height="301"
+ id="rect348"
+ style="fill:none;stroke:none" /><path
+ d="m 7600,5400 770,0"
+ id="path350"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 8800,5400 -450,-150 0,300 450,-150 z"
+ id="path352"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g354"><g
+ id="id20"><rect
+ class="BoundingBox"
+ x="9799"
+ y="5250"
+ width="1402"
+ height="301"
+ id="rect357"
+ style="fill:none;stroke:none" /><path
+ d="m 9800,5400 970,0"
+ id="path359"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11200,5400 -450,-150 0,300 450,-150 z"
+ id="path361"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g363"><g
+ id="id21"><rect
+ class="BoundingBox"
+ x="9900"
+ y="4200"
+ width="1202"
+ height="802"
+ id="rect366"
+ style="fill:none;stroke:none" /><path
+ d="m 11100,5000 -842,-561"
+ id="path368"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 9900,4200 291,374 167,-249 -458,-125 z"
+ id="path370"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g372"><g
+ id="id22"><rect
+ class="BoundingBox"
+ x="9999"
+ y="3850"
+ width="1402"
+ height="301"
+ id="rect375"
+ style="fill:none;stroke:none" /><path
+ d="m 10000,4000 970,0"
+ id="path377"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11400,4000 -450,-150 0,300 450,-150 z"
+ id="path379"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g></g></g></g></g></svg>
diff --git a/Documentation/media/uapi/v4l/nv12mt_example.svg b/Documentation/media/uapi/v4l/nv12mt_example.svg
new file mode 100644
index 000000000000..d65d989ee73b
--- /dev/null
+++ b/Documentation/media/uapi/v4l/nv12mt_example.svg
@@ -0,0 +1,1589 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ version="1.2"
+ width="144.28223mm"
+ height="70.282227mm"
+ viewBox="0 0 14428.222 7028.2226"
+ preserveAspectRatio="xMidYMid"
+ xml:space="preserve"
+ id="svg2"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="nv12mt_example.svg"
+ style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
+ id="metadata953"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview951"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:zoom="2.7306359"
+ inkscape:cx="255.61812"
+ inkscape:cy="124.51576"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg2" /><defs
+ class="ClipPathGroup"
+ id="defs4" /><defs
+ id="defs9" /><defs
+ id="defs84" /><defs
+ id="defs107" /><defs
+ class="TextShapeIndex"
+ id="defs118" /><defs
+ class="EmbeddedBulletChars"
+ id="defs122" /><defs
+ class="TextEmbeddedBitmaps"
+ id="defs151" /><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g188"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id6"><rect
+ class="BoundingBox"
+ x="3299"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect191"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path193"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text195"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan197"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4325"
+ y="4121"
+ id="tspan199"><tspan
+ id="tspan201"
+ style="fill:#000000;stroke:none">0</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g203"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id7"><rect
+ class="BoundingBox"
+ x="5699"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect206"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path208"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g210"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id8"><rect
+ class="BoundingBox"
+ x="8099"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect213"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path215"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text217"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan219"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="9125"
+ y="4121"
+ id="tspan221"><tspan
+ id="tspan223"
+ style="fill:#000000;stroke:none">6</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g225"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id9"><rect
+ class="BoundingBox"
+ x="5699"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect228"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path230"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text232"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan234"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6725"
+ y="4121"
+ id="tspan236"><tspan
+ id="tspan238"
+ style="fill:#000000;stroke:none">1</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g240"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id10"><rect
+ class="BoundingBox"
+ x="10499"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect243"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path245"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text247"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan249"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11525"
+ y="4121"
+ id="tspan251"><tspan
+ id="tspan253"
+ style="fill:#000000;stroke:none">7</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g255"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id11"><rect
+ class="BoundingBox"
+ x="12899"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect258"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path260"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g262"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id12"><rect
+ class="BoundingBox"
+ x="15299"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect265"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path267"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text269"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan271"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16325"
+ y="4121"
+ id="tspan273"><tspan
+ id="tspan275"
+ style="fill:#000000;stroke:none">9</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g277"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id13"><rect
+ class="BoundingBox"
+ x="12899"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect280"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path282"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text284"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan286"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13925"
+ y="4121"
+ id="tspan288"><tspan
+ id="tspan290"
+ style="fill:#000000;stroke:none">8</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g292"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id14"><rect
+ class="BoundingBox"
+ x="3299"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect295"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path297"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text299"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan301"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4325"
+ y="5521"
+ id="tspan303"><tspan
+ id="tspan305"
+ style="fill:#000000;stroke:none">2</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g307"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id15"><rect
+ class="BoundingBox"
+ x="5699"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect310"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path312"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g314"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id16"><rect
+ class="BoundingBox"
+ x="8099"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect317"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path319"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text321"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan323"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="9125"
+ y="5521"
+ id="tspan325"><tspan
+ id="tspan327"
+ style="fill:#000000;stroke:none">4</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g329"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id17"><rect
+ class="BoundingBox"
+ x="5699"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect332"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path334"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text336"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan338"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6725"
+ y="5521"
+ id="tspan340"><tspan
+ id="tspan342"
+ style="fill:#000000;stroke:none">3</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g344"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id18"><rect
+ class="BoundingBox"
+ x="10499"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect347"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path349"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text351"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan353"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11525"
+ y="5521"
+ id="tspan355"><tspan
+ id="tspan357"
+ style="fill:#000000;stroke:none">5</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g359"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id19"><rect
+ class="BoundingBox"
+ x="12899"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect362"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path364"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g366"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id20"><rect
+ class="BoundingBox"
+ x="15299"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect369"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path371"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text373"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan375"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16149"
+ y="5521"
+ id="tspan377"><tspan
+ id="tspan379"
+ style="fill:#000000;stroke:none">11</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g381"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id21"><rect
+ class="BoundingBox"
+ x="12899"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect384"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path386"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text388"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan390"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13749"
+ y="5521"
+ id="tspan392"><tspan
+ id="tspan394"
+ style="fill:#000000;stroke:none">10</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g396"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id22"><rect
+ class="BoundingBox"
+ x="3299"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect399"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path401"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text403"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan405"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4149"
+ y="6921"
+ id="tspan407"><tspan
+ id="tspan409"
+ style="fill:#000000;stroke:none">12</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g411"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id23"><rect
+ class="BoundingBox"
+ x="5699"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect414"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path416"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g418"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id24"><rect
+ class="BoundingBox"
+ x="8099"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect421"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path423"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text425"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan427"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="8949"
+ y="6921"
+ id="tspan429"><tspan
+ id="tspan431"
+ style="fill:#000000;stroke:none">18</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g433"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id25"><rect
+ class="BoundingBox"
+ x="5699"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect436"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path438"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text440"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan442"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6549"
+ y="6921"
+ id="tspan444"><tspan
+ id="tspan446"
+ style="fill:#000000;stroke:none">13</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g448"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id26"><rect
+ class="BoundingBox"
+ x="10499"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect451"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path453"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text455"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan457"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11349"
+ y="6921"
+ id="tspan459"><tspan
+ id="tspan461"
+ style="fill:#000000;stroke:none">19</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g463"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id27"><rect
+ class="BoundingBox"
+ x="12899"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect466"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path468"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g470"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id28"><rect
+ class="BoundingBox"
+ x="15299"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect473"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path475"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text477"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan479"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16149"
+ y="6921"
+ id="tspan481"><tspan
+ id="tspan483"
+ style="fill:#000000;stroke:none">21</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g485"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id29"><rect
+ class="BoundingBox"
+ x="12899"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect488"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path490"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text492"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan494"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13749"
+ y="6921"
+ id="tspan496"><tspan
+ id="tspan498"
+ style="fill:#000000;stroke:none">20</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g500"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id30"><rect
+ class="BoundingBox"
+ x="3299"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect503"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path505"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text507"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan509"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4149"
+ y="8321"
+ id="tspan511"><tspan
+ id="tspan513"
+ style="fill:#000000;stroke:none">14</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g515"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id31"><rect
+ class="BoundingBox"
+ x="5699"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect518"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path520"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g522"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id32"><rect
+ class="BoundingBox"
+ x="8099"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect525"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path527"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text529"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan531"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="8949"
+ y="8321"
+ id="tspan533"><tspan
+ id="tspan535"
+ style="fill:#000000;stroke:none">16</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g537"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id33"><rect
+ class="BoundingBox"
+ x="5699"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect540"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path542"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text544"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan546"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6549"
+ y="8321"
+ id="tspan548"><tspan
+ id="tspan550"
+ style="fill:#000000;stroke:none">15</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g552"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id34"><rect
+ class="BoundingBox"
+ x="10499"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect555"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path557"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text559"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan561"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11349"
+ y="8321"
+ id="tspan563"><tspan
+ id="tspan565"
+ style="fill:#000000;stroke:none">17</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g567"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id35"><rect
+ class="BoundingBox"
+ x="12899"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect570"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path572"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g574"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id36"><rect
+ class="BoundingBox"
+ x="15299"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect577"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path579"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text581"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan583"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16149"
+ y="8321"
+ id="tspan585"><tspan
+ id="tspan587"
+ style="fill:#000000;stroke:none">23</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g589"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id37"><rect
+ class="BoundingBox"
+ x="12899"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect592"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path594"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text596"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan598"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13749"
+ y="8321"
+ id="tspan600"><tspan
+ id="tspan602"
+ style="fill:#000000;stroke:none">22</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g604"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id38"><rect
+ class="BoundingBox"
+ x="3299"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect607"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path609"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text611"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan613"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4149"
+ y="9721"
+ id="tspan615"><tspan
+ id="tspan617"
+ style="fill:#000000;stroke:none">24</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g619"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id39"><rect
+ class="BoundingBox"
+ x="5699"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect622"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path624"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g626"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id40"><rect
+ class="BoundingBox"
+ x="8099"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect629"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path631"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text633"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan635"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="8949"
+ y="9721"
+ id="tspan637"><tspan
+ id="tspan639"
+ style="fill:#000000;stroke:none">26</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g641"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id41"><rect
+ class="BoundingBox"
+ x="5699"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect644"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path646"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text648"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan650"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6549"
+ y="9721"
+ id="tspan652"><tspan
+ id="tspan654"
+ style="fill:#000000;stroke:none">25</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g656"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id42"><rect
+ class="BoundingBox"
+ x="10499"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect659"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path661"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text663"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan665"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11349"
+ y="9721"
+ id="tspan667"><tspan
+ id="tspan669"
+ style="fill:#000000;stroke:none">27</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g671"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id43"><rect
+ class="BoundingBox"
+ x="12899"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect674"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path676"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g678"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id44"><rect
+ class="BoundingBox"
+ x="15299"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect681"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path683"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text685"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan687"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16149"
+ y="9721"
+ id="tspan689"><tspan
+ id="tspan691"
+ style="fill:#000000;stroke:none">29</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g693"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id45"><rect
+ class="BoundingBox"
+ x="12899"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect696"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path698"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text700"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan702"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13749"
+ y="9721"
+ id="tspan704"><tspan
+ id="tspan706"
+ style="fill:#000000;stroke:none">28</tspan></tspan></tspan></text>
+</g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g708"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id46"><rect
+ class="BoundingBox"
+ x="5199"
+ y="3850"
+ width="1402"
+ height="301"
+ id="rect711"
+ style="fill:none;stroke:none" /><path
+ d="m 5200,4000 970,0"
+ id="path713"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,4000 -450,-150 0,300 450,-150 z"
+ id="path715"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g717"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id47"><rect
+ class="BoundingBox"
+ x="5000"
+ y="4299"
+ width="1202"
+ height="802"
+ id="rect720"
+ style="fill:none;stroke:none" /><path
+ d="m 6200,4300 -842,561"
+ id="path722"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 5000,5100 458,-125 -167,-249 -291,374 z"
+ id="path724"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g726"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id48"><rect
+ class="BoundingBox"
+ x="5399"
+ y="5250"
+ width="1202"
+ height="301"
+ id="rect729"
+ style="fill:none;stroke:none" /><path
+ d="m 5400,5400 770,0"
+ id="path731"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,5400 -450,-150 0,300 450,-150 z"
+ id="path733"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g735"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id49"><rect
+ class="BoundingBox"
+ x="7599"
+ y="5250"
+ width="1202"
+ height="301"
+ id="rect738"
+ style="fill:none;stroke:none" /><path
+ d="m 7600,5400 770,0"
+ id="path740"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 8800,5400 -450,-150 0,300 450,-150 z"
+ id="path742"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g744"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id50"><rect
+ class="BoundingBox"
+ x="9799"
+ y="5250"
+ width="1402"
+ height="301"
+ id="rect747"
+ style="fill:none;stroke:none" /><path
+ d="m 9800,5400 970,0"
+ id="path749"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11200,5400 -450,-150 0,300 450,-150 z"
+ id="path751"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g753"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id51"><rect
+ class="BoundingBox"
+ x="9900"
+ y="4200"
+ width="1202"
+ height="802"
+ id="rect756"
+ style="fill:none;stroke:none" /><path
+ d="m 11100,5000 -842,-561"
+ id="path758"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 9900,4200 291,374 167,-249 -458,-125 z"
+ id="path760"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g762"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id52"><rect
+ class="BoundingBox"
+ x="9999"
+ y="3850"
+ width="1402"
+ height="301"
+ id="rect765"
+ style="fill:none;stroke:none" /><path
+ d="m 10000,4000 970,0"
+ id="path767"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11400,4000 -450,-150 0,300 450,-150 z"
+ id="path769"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g771"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id53"><rect
+ class="BoundingBox"
+ x="12399"
+ y="3850"
+ width="1202"
+ height="301"
+ id="rect774"
+ style="fill:none;stroke:none" /><path
+ d="m 12400,4000 770,0"
+ id="path776"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 13600,4000 -450,-150 0,300 450,-150 z"
+ id="path778"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g780"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id54"><rect
+ class="BoundingBox"
+ x="14799"
+ y="3850"
+ width="1202"
+ height="301"
+ id="rect783"
+ style="fill:none;stroke:none" /><path
+ d="m 14800,4000 770,0"
+ id="path785"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,4000 -450,-150 0,300 450,-150 z"
+ id="path787"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g789"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id55"><rect
+ class="BoundingBox"
+ x="14400"
+ y="4399"
+ width="1402"
+ height="602"
+ id="rect792"
+ style="fill:none;stroke:none" /><path
+ d="m 15800,4400 -1005,431"
+ id="path794"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 14400,5000 473,-39 -118,-276 -355,315 z"
+ id="path796"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g798"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id56"><rect
+ class="BoundingBox"
+ x="14599"
+ y="5250"
+ width="1402"
+ height="301"
+ id="rect801"
+ style="fill:none;stroke:none" /><path
+ d="m 14600,5400 970,0"
+ id="path803"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,5400 -450,-150 0,300 450,-150 z"
+ id="path805"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g807"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id57"><rect
+ class="BoundingBox"
+ x="5199"
+ y="6550"
+ width="1402"
+ height="301"
+ id="rect810"
+ style="fill:none;stroke:none" /><path
+ d="m 5200,6700 970,0"
+ id="path812"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,6700 -450,-150 0,300 450,-150 z"
+ id="path814"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g816"
+ transform="translate(-3285.889,-3129.4446)"><g
+ id="id58"><rect
+ class="BoundingBox"
+ x="5000"
+ y="6999"
+ width="1202"
+ height="802"
+ id="rect819"
+ style="fill:none;stroke:none" /><path
+ d="m 6200,7000 -842,561"
+ id="path821"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 5000,7800 458,-125 -167,-249 -291,374 z"
+ id="path823"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g825"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id59"><rect
+ class="BoundingBox"
+ x="5399"
+ y="7950"
+ width="1202"
+ height="301"
+ id="rect828"
+ style="fill:none;stroke:none" /><path
+ d="m 5400,8100 770,0"
+ id="path830"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,8100 -450,-150 0,300 450,-150 z"
+ id="path832"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g834"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id60"><rect
+ class="BoundingBox"
+ x="7599"
+ y="7950"
+ width="1202"
+ height="301"
+ id="rect837"
+ style="fill:none;stroke:none" /><path
+ d="m 7600,8100 770,0"
+ id="path839"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 8800,8100 -450,-150 0,300 450,-150 z"
+ id="path841"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g843"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id61"><rect
+ class="BoundingBox"
+ x="9799"
+ y="7950"
+ width="1402"
+ height="301"
+ id="rect846"
+ style="fill:none;stroke:none" /><path
+ d="m 9800,8100 970,0"
+ id="path848"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11200,8100 -450,-150 0,300 450,-150 z"
+ id="path850"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g852"
+ transform="translate(-3285.889,-3129.4446)"><g
+ id="id62"><rect
+ class="BoundingBox"
+ x="9900"
+ y="6900"
+ width="1202"
+ height="802"
+ id="rect855"
+ style="fill:none;stroke:none" /><path
+ d="m 11100,7700 -842,-561"
+ id="path857"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 9900,6900 291,374 167,-249 -458,-125 z"
+ id="path859"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g861"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id63"><rect
+ class="BoundingBox"
+ x="9999"
+ y="6550"
+ width="1402"
+ height="301"
+ id="rect864"
+ style="fill:none;stroke:none" /><path
+ d="m 10000,6700 970,0"
+ id="path866"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11400,6700 -450,-150 0,300 450,-150 z"
+ id="path868"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g870"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id64"><rect
+ class="BoundingBox"
+ x="12399"
+ y="6550"
+ width="1202"
+ height="301"
+ id="rect873"
+ style="fill:none;stroke:none" /><path
+ d="m 12400,6700 770,0"
+ id="path875"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 13600,6700 -450,-150 0,300 450,-150 z"
+ id="path877"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g879"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id65"><rect
+ class="BoundingBox"
+ x="14799"
+ y="6550"
+ width="1202"
+ height="301"
+ id="rect882"
+ style="fill:none;stroke:none" /><path
+ d="m 14800,6700 770,0"
+ id="path884"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,6700 -450,-150 0,300 450,-150 z"
+ id="path886"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g888"
+ transform="translate(-3285.889,-3129.4446)"><g
+ id="id66"><rect
+ class="BoundingBox"
+ x="14400"
+ y="7099"
+ width="1402"
+ height="602"
+ id="rect891"
+ style="fill:none;stroke:none" /><path
+ d="m 15800,7100 -1005,431"
+ id="path893"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 14400,7700 473,-39 -118,-276 -355,315 z"
+ id="path895"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g897"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id67"><rect
+ class="BoundingBox"
+ x="14599"
+ y="7950"
+ width="1402"
+ height="301"
+ id="rect900"
+ style="fill:none;stroke:none" /><path
+ d="m 14600,8100 970,0"
+ id="path902"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,8100 -450,-150 0,300 450,-150 z"
+ id="path904"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g906"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id68"><rect
+ class="BoundingBox"
+ x="5399"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect909"
+ style="fill:none;stroke:none" /><path
+ d="m 5400,9600 770,0"
+ id="path911"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,9600 -450,-150 0,300 450,-150 z"
+ id="path913"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g915"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id69"><rect
+ class="BoundingBox"
+ x="7599"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect918"
+ style="fill:none;stroke:none" /><path
+ d="m 7600,9600 770,0"
+ id="path920"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 8800,9600 -450,-150 0,300 450,-150 z"
+ id="path922"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g924"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id70"><rect
+ class="BoundingBox"
+ x="9999"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect927"
+ style="fill:none;stroke:none" /><path
+ d="m 10000,9600 770,0"
+ id="path929"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11200,9600 -450,-150 0,300 450,-150 z"
+ id="path931"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g933"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id71"><rect
+ class="BoundingBox"
+ x="12399"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect936"
+ style="fill:none;stroke:none" /><path
+ d="m 12400,9600 770,0"
+ id="path938"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 13600,9600 -450,-150 0,300 450,-150 z"
+ id="path940"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g942"
+ transform="translate(-3285.889,-3185.889)"><g
+ id="id72"><rect
+ class="BoundingBox"
+ x="14799"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect945"
+ style="fill:none;stroke:none" /><path
+ d="m 14800,9600 770,0"
+ id="path947"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,9600 -450,-150 0,300 450,-150 z"
+ id="path949"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g></svg>
diff --git a/Documentation/media/uapi/v4l/pipeline.dot b/Documentation/media/uapi/v4l/pipeline.dot
new file mode 100644
index 000000000000..02d7fcf12b26
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pipeline.dot
@@ -0,0 +1,12 @@
+digraph board {
+ rankdir=TB
+ colorscheme=x11
+ scaler [label="{<scaler_0> 0} | Host\nScaler | {<scaler_1> 1} ", shape=Mrecord, style=filled, fillcolor=lightblue]
+ frontend [label="{<frontend_0> 0} | Host\nFrontend | {<frontend_1> 1}", shape=Mrecord, style=filled, fillcolor=lightblue]
+ sensor [label="Sensor | {<sensor_0> 0}", shape=Mrecord, style=filled, fillcolor=aquamarine]
+ io [label="{<io_0> 0} | V4L I/O", shape=Mrecord, style=filled, fillcolor=aquamarine]
+
+ sensor:sensor_0 -> frontend:frontend_0 [color=blue, label="HQ: 2592x1968\nHS: 1296x984"]
+ frontend:frontend_1 -> scaler:scaler_0 [color=blue, label="HQ: 2592x1968\nHS: 1296x984"]
+ scaler:scaler_1 -> io:io_0 [color=blue, label="HQ: 1280x720\nHS: 1280x720"]
+}
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst b/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst
index 9f250a1df2f6..32d0c8743460 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst
@@ -33,8 +33,8 @@ Layout of macroblocks in memory is presented in the following figure.
.. _nv12mt:
-.. figure:: pixfmt-nv12mt_files/nv12mt.*
- :alt: nv12mt.png
+.. figure:: nv12mt.*
+ :alt: nv12mt.pdf / nv12mt.svg
:align: center
V4L2_PIX_FMT_NV12MT macroblock Z shape memory layout
@@ -50,8 +50,8 @@ interleaved. Height of the buffer is aligned to 32.
.. _nv12mt_ex:
-.. figure:: pixfmt-nv12mt_files/nv12mt_example.*
- :alt: nv12mt_example.png
+.. figure:: nv12mt_example.*
+ :alt: nv12mt_example.pdf / nv12mt_example.svg
:align: center
Example V4L2_PIX_FMT_NV12MT memory layout of macroblocks
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.png b/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.png
deleted file mode 100644
index 41401860fb73..000000000000
--- a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.png
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.png b/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.png
deleted file mode 100644
index 7775f5d7cc46..000000000000
--- a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.png
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/selection-api-003.rst b/Documentation/media/uapi/v4l/selection-api-003.rst
index 15cb3b79f12c..21686f93c38f 100644
--- a/Documentation/media/uapi/v4l/selection-api-003.rst
+++ b/Documentation/media/uapi/v4l/selection-api-003.rst
@@ -7,8 +7,8 @@ Selection targets
.. _sel-targets-capture:
-.. figure:: selection-api-003_files/selection.*
- :alt: selection.png
+.. figure:: selection.*
+ :alt: selection.pdf / selection.svg
:align: center
Cropping and composing targets
diff --git a/Documentation/media/uapi/v4l/selection-api-003_files/selection.png b/Documentation/media/uapi/v4l/selection-api-003_files/selection.png
deleted file mode 100644
index bfc523eae570..000000000000
--- a/Documentation/media/uapi/v4l/selection-api-003_files/selection.png
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/selection.svg b/Documentation/media/uapi/v4l/selection.svg
new file mode 100644
index 000000000000..d309187af967
--- /dev/null
+++ b/Documentation/media/uapi/v4l/selection.svg
@@ -0,0 +1,5812 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="4226.3345"
+ height="1686.8481"
+ id="svg2"
+ sodipodi:version="0.32"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="selection.svg"
+ inkscape:output_extension="org.inkscape.output.svg.inkscape"
+ version="1.0"
+ style="display:inline;enable-background:new"
+ inkscape:export-filename="/home/cheeseness/Documents/LCA09/mascot/tuz_final.png"
+ inkscape:export-xdpi="100.03588"
+ inkscape:export-ydpi="100.03588">
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ gridtolerance="10000"
+ guidetolerance="10"
+ objecttolerance="10"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.32297491"
+ inkscape:cx="2113.1672"
+ inkscape:cy="843.42407"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer16"
+ showgrid="false"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ showguides="false"
+ inkscape:guide-bbox="true"
+ units="mm"
+ inkscape:window-maximized="1"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0" />
+ <defs
+ id="defs4">
+ <pattern
+ inkscape:collect="always"
+ xlink:href="#Strips1_1"
+ id="pattern5557"
+ patternTransform="matrix(5.4431804,0,0,10.10048,1808.3554,-48.222348)" />
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7188"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#f8d615;fill-opacity:1;fill-rule:evenodd;stroke:#f8d615;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <pattern
+ inkscape:isstock="true"
+ inkscape:stockid="Stripes 1:1"
+ id="Strips1_1"
+ patternTransform="translate(0,0) scale(10,10)"
+ height="1"
+ width="2"
+ patternUnits="userSpaceOnUse"
+ inkscape:collect="always">
+ <rect
+ id="rect5945"
+ height="2"
+ width="1"
+ y="-0.5"
+ x="0"
+ style="fill:#f815bb;stroke:none" />
+ </pattern>
+ <linearGradient
+ id="linearGradient10954"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#d9f90b;stop-opacity:1;"
+ offset="0"
+ id="stop10956" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient9165"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#000000;stop-opacity:0.31330472;"
+ offset="0"
+ id="stop9167" />
+ </linearGradient>
+ <filter
+ inkscape:collect="always"
+ x="-0.084654994"
+ width="1.16931"
+ y="-0.36592469"
+ height="1.7318494"
+ id="filter11361">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="4.5740586"
+ id="feGaussianBlur11363" />
+ </filter>
+ <linearGradient
+ id="linearGradient7622">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop7624" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop7626" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4113">
+ <stop
+ style="stop-color:#000000;stop-opacity:0;"
+ offset="0"
+ id="stop4115" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="1"
+ id="stop4117" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient3660">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop3662" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3664" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3627">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop3629" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="1"
+ id="stop3631" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient2843">
+ <stop
+ id="stop2845"
+ offset="0"
+ style="stop-color:#000000;stop-opacity:1;" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0.02188784"
+ id="stop2847" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0.75866222"
+ id="stop2849" />
+ <stop
+ id="stop2851"
+ offset="0.88508981"
+ style="stop-color:#232323;stop-opacity:1;" />
+ <stop
+ id="stop2853"
+ offset="1"
+ style="stop-color:#595959;stop-opacity:1;" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient8964">
+ <stop
+ style="stop-color:#1a1a1a;stop-opacity:1;"
+ offset="0"
+ id="stop8966" />
+ <stop
+ style="stop-color:#1a1a1a;stop-opacity:0;"
+ offset="1"
+ id="stop8968" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient8952">
+ <stop
+ style="stop-color:#0a0c0c;stop-opacity:1;"
+ offset="0"
+ id="stop8954" />
+ <stop
+ style="stop-color:#1f2727;stop-opacity:0;"
+ offset="1"
+ id="stop8956" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient8430">
+ <stop
+ style="stop-color:#1e2323;stop-opacity:1;"
+ offset="0"
+ id="stop8432" />
+ <stop
+ id="stop8438"
+ offset="0.55992389"
+ style="stop-color:#181d1d;stop-opacity:1;" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="1"
+ id="stop8434" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient8398">
+ <stop
+ style="stop-color:#283131;stop-opacity:0;"
+ offset="0"
+ id="stop8400" />
+ <stop
+ id="stop8402"
+ offset="0.5125587"
+ style="stop-color:#1e2424;stop-opacity:0;" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="1"
+ id="stop8404" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient4870">
+ <stop
+ style="stop-color:#c7bd80;stop-opacity:1;"
+ offset="0"
+ id="stop4872" />
+ <stop
+ style="stop-color:#c7bd80;stop-opacity:0;"
+ offset="1"
+ id="stop4874" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient4862">
+ <stop
+ style="stop-color:#e2e2e2;stop-opacity:1;"
+ offset="0"
+ id="stop4864" />
+ <stop
+ style="stop-color:#e2e2e2;stop-opacity:0;"
+ offset="1"
+ id="stop4866" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4478">
+ <stop
+ style="stop-color:#f9eed3;stop-opacity:1;"
+ offset="0"
+ id="stop4480" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0;"
+ offset="1"
+ id="stop4482" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4106">
+ <stop
+ style="stop-color:#d9e002;stop-opacity:1;"
+ offset="0"
+ id="stop4108" />
+ <stop
+ id="stop4114"
+ offset="0.5"
+ style="stop-color:#a9ae01;stop-opacity:1;" />
+ <stop
+ style="stop-color:#717501;stop-opacity:1;"
+ offset="1"
+ id="stop4110" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4084">
+ <stop
+ style="stop-color:#7d7d00;stop-opacity:1;"
+ offset="0"
+ id="stop4086" />
+ <stop
+ id="stop4088"
+ offset="0.3636601"
+ style="stop-color:#c6c700;stop-opacity:1;" />
+ <stop
+ style="stop-color:#f6f800;stop-opacity:1;"
+ offset="1"
+ id="stop4090" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4041">
+ <stop
+ id="stop4043"
+ offset="0"
+ style="stop-color:#ffff00;stop-opacity:1;" />
+ <stop
+ id="stop4045"
+ offset="1"
+ style="stop-color:#ffff00;stop-opacity:0;" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4025">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop4027" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4031" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4013">
+ <stop
+ style="stop-color:#ffff00;stop-opacity:1;"
+ offset="0"
+ id="stop4015" />
+ <stop
+ style="stop-color:#b2b200;stop-opacity:1;"
+ offset="1"
+ id="stop4017" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3985">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop3987" />
+ <stop
+ style="stop-color:#1d1d1d;stop-opacity:1;"
+ offset="1"
+ id="stop3989" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3961">
+ <stop
+ style="stop-color:#283131;stop-opacity:0;"
+ offset="0"
+ id="stop3963" />
+ <stop
+ id="stop3965"
+ offset="0.5"
+ style="stop-color:#1e2424;stop-opacity:1;" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="1"
+ id="stop3967" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3951">
+ <stop
+ id="stop3953"
+ offset="0"
+ style="stop-color:#344040;stop-opacity:1;" />
+ <stop
+ style="stop-color:#222929;stop-opacity:1;"
+ offset="0.5"
+ id="stop3955" />
+ <stop
+ id="stop3957"
+ offset="1"
+ style="stop-color:#000000;stop-opacity:1;" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3909">
+ <stop
+ style="stop-color:#283131;stop-opacity:1;"
+ offset="0"
+ id="stop3911" />
+ <stop
+ id="stop3917"
+ offset="0.5"
+ style="stop-color:#1e2424;stop-opacity:1;" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="1"
+ id="stop3913" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3537">
+ <stop
+ style="stop-color:#ada469;stop-opacity:1;"
+ offset="0"
+ id="stop3539" />
+ <stop
+ id="stop3545"
+ offset="0.81132078"
+ style="stop-color:#ada469;stop-opacity:1;" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="1"
+ id="stop3541" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3317">
+ <stop
+ style="stop-color:#cfc690;stop-opacity:1"
+ offset="0"
+ id="stop3319" />
+ <stop
+ id="stop3321"
+ offset="0.21161865"
+ style="stop-color:#afa775;stop-opacity:1;" />
+ <stop
+ id="stop3323"
+ offset="0.53408515"
+ style="stop-color:#615c3a;stop-opacity:1;" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0.76504093"
+ id="stop3325" />
+ <stop
+ id="stop3327"
+ offset="1"
+ style="stop-color:#403518;stop-opacity:1;" />
+ </linearGradient>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3317"
+ id="radialGradient3315"
+ cx="543.6698"
+ cy="147.3131"
+ fx="543.6698"
+ fy="147.3131"
+ r="47.863216"
+ gradientTransform="matrix(2.1382256,0,0,2.3382884,-77.03847,-101.68704)"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3537"
+ id="radialGradient3543"
+ cx="385"
+ cy="237.00504"
+ fx="385"
+ fy="237.00504"
+ r="86.928574"
+ gradientTransform="matrix(1,0,0,0.8562038,0,34.080427)"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3909"
+ id="radialGradient3915"
+ cx="418.30365"
+ cy="342.47794"
+ fx="418.30365"
+ fy="342.47794"
+ r="131.4509"
+ gradientTransform="matrix(1.3957347,0.6211056,-0.4244067,0.9537174,-15.061913,-227.96711)"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3951"
+ id="radialGradient3933"
+ cx="397.16388"
+ cy="336.95245"
+ fx="397.16388"
+ fy="336.95245"
+ r="36.75"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.9449972,2.4894837e-7,-2.4894833e-7,1.9449969,-375.31868,-318.41912)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3961"
+ id="linearGradient3959"
+ x1="398.21429"
+ y1="343.52289"
+ x2="379.28571"
+ y2="265.30862"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(450.03125,73.843964)" />
+ <filter
+ inkscape:collect="always"
+ id="filter3981"
+ x="-0.30000001"
+ width="1.6"
+ y="-0.30000001"
+ height="1.6">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="2"
+ id="feGaussianBlur3983" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3985"
+ id="radialGradient3991"
+ cx="402.48898"
+ cy="317.23578"
+ fx="402.48898"
+ fy="317.23578"
+ r="23.714285"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(4.3776616,0,0,4.3776616,-1358.3025,-1070.7357)" />
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3999">
+ <path
+ style="display:inline;opacity:1;fill:#f5ff04;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 12.61031,-67.04463 3.21429,-93.92857 -9.43424,-26.99328 -34.96741,-59.12448 -66.42857,-69.64285 -31.03327,-10.37532 -65.01776,-4.84837 -84.28571,5.71428 z"
+ id="path4001"
+ sodipodi:nodetypes="czzczzzzc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4013"
+ id="radialGradient4056"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.1323239,0.7659488,-1.4550286,2.1510098,588.75376,-711.79716)"
+ cx="228.81355"
+ cy="440.26971"
+ fx="228.81355"
+ fy="440.26971"
+ r="119.17509" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4041"
+ id="radialGradient4060"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.05911206,2.6869855,-0.7234268,0.01591495,408.72779,-424.56452)"
+ cx="275.4422"
+ cy="335.34866"
+ fx="275.4422"
+ fy="335.34866"
+ r="36.75" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4025"
+ id="radialGradient4062"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.05911206,2.6869855,-0.7234268,0.01591495,408.72779,-424.56452)"
+ cx="275.4422"
+ cy="335.34866"
+ fx="275.4422"
+ fy="335.34866"
+ r="36.75" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4084"
+ id="linearGradient4082"
+ gradientUnits="userSpaceOnUse"
+ x1="182.35046"
+ y1="256.11136"
+ x2="145.53348"
+ y2="542.20502" />
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath4100">
+ <path
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.9000755px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 265.93541,126.68393 -18.76721,168.86308 174.10543,-73.12068 61.9544,88.65883 57.8844,-31.9903 -37.53442,-180.059677 -237.6426,27.648747 z"
+ id="path4102"
+ sodipodi:nodetypes="ccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4106"
+ id="radialGradient4112"
+ cx="250.22678"
+ cy="475.09763"
+ fx="250.22678"
+ fy="475.09763"
+ r="95.98877"
+ gradientTransform="matrix(1.2259004,-0.7077739,0.1413989,0.2449102,322.22326,608.91815)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient4484"
+ x1="412.08926"
+ y1="404.91574"
+ x2="417.375"
+ y2="401.82648"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient4486"
+ x1="411.91071"
+ y1="404.91577"
+ x2="417.375"
+ y2="401.82648"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient4488"
+ x1="411.91071"
+ y1="405.54077"
+ x2="417.375"
+ y2="401.82648"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient4490"
+ x1="412.08926"
+ y1="405.54077"
+ x2="417.375"
+ y2="401.82648"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient4492"
+ x1="411.73212"
+ y1="405.54077"
+ x2="417.375"
+ y2="401.82648"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4862"
+ id="radialGradient4868"
+ cx="429.56738"
+ cy="377.42877"
+ fx="429.56738"
+ fy="377.42877"
+ r="72.079735"
+ gradientTransform="matrix(1,0,0,0.618034,0,144.16496)"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4870"
+ id="radialGradient4876"
+ cx="437.6991"
+ cy="391.21735"
+ fx="437.6991"
+ fy="391.21735"
+ r="36.611931"
+ gradientTransform="matrix(1,0,0,0.618034,0,149.43174)"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4013"
+ id="radialGradient3585"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.1323239,0.7659488,-1.4550286,2.1510098,588.75376,-711.79716)"
+ cx="228.81355"
+ cy="440.26971"
+ fx="228.81355"
+ fy="440.26971"
+ r="119.17509" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4084"
+ id="linearGradient3587"
+ gradientUnits="userSpaceOnUse"
+ x1="182.35046"
+ y1="256.11136"
+ x2="145.53348"
+ y2="542.20502" />
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8514">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8516"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8604">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8606"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8610">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8612"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8616">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8618"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8622">
+ <path
+ style="display:inline;opacity:1;fill:#202020;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 821.64329,477.88997 c 0,0 22.61947,-6.50681 35.74275,-5.87273 13.12328,0.63409 30.64158,1.93862 43.70885,12.18619 13.06727,10.24756 25.06774,27.14007 34.11239,58.36965 9.04465,31.22958 1.69832,99.25201 -6.17603,143.34735 -7.87435,44.09534 -28.2651,106.11298 -45,140 -16.7349,33.88702 -49.79771,77.4952 -60.56943,89.87616 -11.36422,13.06197 -56.20589,36.42617 -79.43057,42.26667 5.3033,-10.6066 48.89976,-50.58884 35,-60.71426 -14.01897,-10.21226 -45.76009,45.98236 -84.29315,29.03317 21.38231,-13.13212 41.7794,-51.18606 34.04061,-66.59445 -7.84025,-15.61039 -30.70493,48.75757 -93.53554,37.01288 30.05204,-27.52666 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.29315 -60.46175,54.29315 0,0 -2.8219,-41.70118 13.7732,-68.60732 16.63935,-26.97787 79.65297,-81.61527 99.55313,-111.70342 19.90015,-30.08814 33.61256,-66.00902 42.13542,-92.51794 8.52286,-26.50892 15.80094,-77.09954 15.80094,-77.09954"
+ id="path8624"
+ sodipodi:nodetypes="czzzzzzczczczczzzc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8642">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 366.88839,504.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57144 -62.5,123.57144 l 76.07143,18.21428 c 0,0 11.80712,-12.82335 31.07142,-46.07143 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
+ id="path8644"
+ sodipodi:nodetypes="czzcczcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8658">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0b0b0b;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 569.03125,1018.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -99.77493,25.9619 -142.85715,35.7143 -43.08222,9.7524 -117.26443,34.816 -156.91262,27.2654 -39.64818,-7.5506 -89.51595,-64.4083 -89.51595,-64.4083 l 4.28572,-94.28571 c 0,0 85.88551,-16.20094 112.14285,-33.57143 26.25735,-17.37049 45.58238,-49.66598 59.28572,-71.42857 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
+ id="path8660"
+ sodipodi:nodetypes="czzzcczzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8802"
+ x="-0.35311759"
+ width="1.7062352"
+ y="-0.1817714"
+ height="1.3635428">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="48.038491"
+ id="feGaussianBlur8804" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8806"
+ x="-0.61142862"
+ width="2.2228572"
+ y="-0.14930232"
+ height="1.2986046">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="37.830213"
+ id="feGaussianBlur8808" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8810"
+ x="-0.23519406"
+ width="1.4703881"
+ y="-0.24500646"
+ height="1.4900129">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="58.328041"
+ id="feGaussianBlur8812" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8814"
+ x="-0.20466694"
+ width="1.4093339"
+ y="-0.29007819"
+ height="1.5801564">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="22.300169"
+ id="feGaussianBlur8816" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8818"
+ x="-0.34381232"
+ width="1.6876246"
+ y="-0.18433961"
+ height="1.3686792">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="34.542167"
+ id="feGaussianBlur8820" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8822"
+ x="-0.2742857"
+ width="1.5485713"
+ y="-0.21333334"
+ height="1.4266667">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="11.313708"
+ id="feGaussianBlur8824" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8826"
+ x="-0.25894088"
+ width="1.5178818"
+ y="-0.2236412"
+ height="1.4472824">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="19.631544"
+ id="feGaussianBlur8828" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8856"
+ x="-0.3253231"
+ width="1.6506462"
+ y="-0.19013336"
+ height="1.3802667">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="28.712591"
+ id="feGaussianBlur8858" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8860"
+ x="-0.38093024"
+ width="1.7618605"
+ y="-0.17518716"
+ height="1.3503743">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="19.304015"
+ id="feGaussianBlur8862" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8888"
+ x="-0.2112188"
+ width="1.4224375"
+ y="-0.16808605"
+ height="1.3361721">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="8.3693583"
+ id="feGaussianBlur8890" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8892"
+ x="-0.18692794"
+ width="1.3738559"
+ y="-0.23646873"
+ height="1.4729375">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="31.21228"
+ id="feGaussianBlur8894" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8906">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8908"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8940"
+ x="-0.25152978"
+ width="1.5030596"
+ y="-0.053035267"
+ height="1.1060705">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="13.024603"
+ id="feGaussianBlur8942" />
+ </filter>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient8952"
+ id="linearGradient8958"
+ x1="609.31244"
+ y1="239.46866"
+ x2="560.83142"
+ y2="262.86206"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(450.03125,73.843964)" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient8964"
+ id="linearGradient8970"
+ x1="603.84064"
+ y1="627.85303"
+ x2="616.24396"
+ y2="585.42664"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(450.03125,73.843964)" />
+ <filter
+ inkscape:collect="always"
+ id="filter9020"
+ x="-0.32861114"
+ width="1.6572223"
+ y="-0.182"
+ height="1.364">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="20.912684"
+ id="feGaussianBlur9022" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter9024"
+ x="-0.55453134"
+ width="2.1090627"
+ y="-0.51434779"
+ height="2.0286956">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="20.912684"
+ id="feGaussianBlur9026" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter9044"
+ x="-0.32631579"
+ width="1.6526316"
+ y="-0.84545463"
+ height="2.6909094">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="21.92031"
+ id="feGaussianBlur9046" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter9048"
+ x="-0.40879121"
+ width="1.8175824"
+ y="-0.71538466"
+ height="2.4307692">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="21.92031"
+ id="feGaussianBlur9050" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter3587"
+ x="-0.1">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="8.881432"
+ id="feGaussianBlur3589" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3602">
+ <path
+ sodipodi:nodetypes="czzzzzzczczczczzzc"
+ id="path3604"
+ d="m 647.61204,540.04601 c 0,0 22.61947,-6.50681 35.74275,-5.87273 13.12328,0.63409 30.64158,1.93862 43.70885,12.18619 13.06727,10.24756 25.06774,27.14007 34.11239,58.36965 9.04465,31.22958 1.69832,99.25201 -6.17603,143.34735 -7.87435,44.09534 -28.2651,106.11298 -45,140 -16.7349,33.88702 -49.79771,77.4952 -60.56943,89.87616 -11.36422,13.06197 -56.20589,36.42617 -79.43057,42.26667 5.3033,-10.6066 48.89976,-50.58884 35,-60.71426 -14.01897,-10.21226 -45.76009,45.98236 -84.29315,29.03317 21.38231,-13.13212 41.7794,-51.18606 34.04061,-66.59445 -7.84025,-15.61039 -30.70493,48.75757 -93.53554,37.01288 30.05204,-27.52666 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.29315 -60.46175,54.29315 0,0 -2.8219,-41.70118 13.7732,-68.60732 16.63935,-26.97787 79.65297,-81.61527 99.55313,-111.70342 19.90015,-30.08814 33.61256,-66.00902 42.13542,-92.51794 8.52286,-26.50892 15.80094,-77.09954 15.80094,-77.09954"
+ style="display:inline;opacity:1;fill:#202020;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter4120"
+ x="-0.2770822"
+ width="1.5541644"
+ y="-0.32482043"
+ height="1.6496409">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="19.956289"
+ id="feGaussianBlur4122" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3631">
+ <path
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ id="path3633"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3665">
+ <path
+ sodipodi:nodetypes="czzcczcc"
+ id="path3667"
+ d="m 366.88839,504.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57144 -62.5,123.57144 l 76.07143,18.21428 c 0,0 11.80712,-12.82335 31.07142,-46.07143 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3677">
+ <path
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 586.13271,997.98981 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.9123 -3.78268,51.8008 -2.90046,70.6561 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.0839 38.76107,-114.49733 44.6608,-149.76855 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ id="path3679"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter3898">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="10.892985"
+ id="feGaussianBlur3900" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4130"
+ x="-0.49509686"
+ width="1.9901937"
+ y="-0.26708817"
+ height="1.5341763">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="10.730622"
+ id="feGaussianBlur4132" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4141"
+ x="-0.40611032"
+ width="1.8122206"
+ y="-0.30260596"
+ height="1.6052119">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="9.8586086"
+ id="feGaussianBlur4143" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath4177">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path4179"
+ d="m 586.13271,997.98981 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.9123 -3.78268,51.8008 -2.90046,70.6561 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.0839 38.76107,-114.49733 44.6608,-149.76855 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter4185">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="3.6164709"
+ id="feGaussianBlur4187" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4105">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="3.8640966"
+ id="feGaussianBlur4107" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath2833">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#292929;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 569.03125,1018.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -57.31395,4.9661 -135.78608,17.3296 -79.85178,12.5808 -94.06436,42.5423 -108.12225,47.0643 -14.70014,4.7286 -145.37739,-65.8225 -145.37739,-65.8225 l 4.28572,-94.28571 c 0,0 85.88551,-16.20094 112.14285,-33.57143 26.25735,-17.37049 45.58238,-49.66598 59.28572,-71.42857 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
+ id="path2835"
+ sodipodi:nodetypes="czzzcczzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2843"
+ id="linearGradient2841"
+ gradientUnits="userSpaceOnUse"
+ x1="347.89655"
+ y1="1070.2124"
+ x2="275.58191"
+ y2="867.97992" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3627"
+ id="linearGradient3688"
+ gradientUnits="userSpaceOnUse"
+ x1="699.32867"
+ y1="269.76755"
+ x2="698.97504"
+ y2="346.1351" />
+ <mask
+ maskUnits="userSpaceOnUse"
+ id="mask3684">
+ <ellipse
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient3688);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.43724918px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path3686"
+ transform="translate(-174.03125,62.156036)"
+ cx="579.474"
+ cy="260.57516"
+ rx="192.6866"
+ ry="164.04877" />
+ </mask>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3622">
+ <path
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 266.27183,924.57186 c -1.40727,18.80121 -1.1449,32.75103 2.08174,49.30328 3.22665,16.55234 16.40608,45.90736 20.3344,63.18376 3.92622,17.2671 2.69413,38.3096 -12.45944,51.1482 -15.31761,12.9774 -42.05127,21.5989 -67.8323,15.7338 -25.78106,-5.8653 -69.54907,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045183,939.86194 41.867508,909.43681 27.689836,879.01169 29.207903,872.71824 33.747793,863.90708 24.381071,839.38658 21.334081,813.84027 0.03533552,788.33044 30.360815,791.44488 43.915625,815.28677 60.161025,835.47019 54.631129,787.39416 42.10631,771.05369 31.787073,744.74589 c 29.994295,6.08166 50.57936,31.8724 63.979783,72.7125 9.554154,-3.91791 18.237764,-9.37294 30.187414,-9.0612 -11.2975,-41.6958 -17.94946,-69.91584 -36.687255,-101.06994 53.441965,5.67033 83.657025,80.63932 78.971425,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24896,-38.34702 -21.04781,-76.8679 -3.65971,-118.64818 0,0 48.28678,65.43687 54.38966,85.80577 6.10287,20.36891 1.51881,38.70052 1.51881,38.70052 0,0 16.95957,31.08529 20.29392,51.09413 3.3731,20.24135 -3.53269,59.10332 -4.94582,77.98324 z"
+ id="path3624"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3636">
+ <path
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ id="path3638"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3660"
+ id="linearGradient3666"
+ x1="1255.7386"
+ y1="667.09216"
+ x2="893.69995"
+ y2="858.01099"
+ gradientUnits="userSpaceOnUse" />
+ <filter
+ inkscape:collect="always"
+ id="filter3779"
+ x="-0.087980822"
+ width="1.1759616"
+ y="-0.17728332"
+ height="1.3545666">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="16.340344"
+ id="feGaussianBlur3781" />
+ </filter>
+ <filter
+ id="filter3785"
+ inkscape:label="White Fur">
+ <feTurbulence
+ id="feTurbulence3787"
+ type="fractalNoise"
+ baseFrequency="0.24044943820224721"
+ numOctaves="10"
+ seed="655"
+ result="result0" />
+ <feDisplacementMap
+ id="feDisplacementMap3789"
+ in="SourceGraphic"
+ in2="result0"
+ scale="62"
+ xChannelSelector="B"
+ yChannelSelector="G" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter3677">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="2.0397518"
+ id="feGaussianBlur3679" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3722">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
+ id="path3724"
+ sodipodi:nodetypes="czzzzzzzzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3986">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
+ id="path3988"
+ sodipodi:nodetypes="czzzzzzzzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3992">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
+ id="path3994"
+ sodipodi:nodetypes="czzzzzzzzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3998">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 178.21428,274.14789 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.55405 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401287 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.3574 -122.78647,50.053 -187.06988,59.0023 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.1982 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path4000"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter4002"
+ x="-0.24334238"
+ width="1.4866848"
+ y="-0.39104807"
+ height="1.7820961">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="14.589518"
+ id="feGaussianBlur4004" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4010"
+ x="-0.14577261"
+ width="1.2915452"
+ y="-0.23523259"
+ height="1.4704652">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="4.4442907"
+ id="feGaussianBlur4012" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4053">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.6062947"
+ id="feGaussianBlur4055" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4079">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="6.5887624"
+ id="feGaussianBlur4081" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4083">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.5052066"
+ id="feGaussianBlur4085" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4113"
+ id="radialGradient4119"
+ cx="296.33783"
+ cy="427.17749"
+ fx="296.33783"
+ fy="427.17749"
+ r="19.704132"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(2.9797125,0,0,2.9797125,-599.28727,-827.0855)" />
+ <filter
+ inkscape:collect="always"
+ id="filter6949"
+ x="-0.10294895"
+ width="1.2058979"
+ y="-0.34224695"
+ height="1.6844939">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6951" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6953"
+ x="-0.098320946"
+ width="1.1966419"
+ y="-0.19750816"
+ height="1.3950163">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6955" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6957"
+ x="-0.098213427"
+ width="1.1964267"
+ y="-0.19838208"
+ height="1.3967642">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6959" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6961"
+ x="-0.09919104"
+ width="1.1983821"
+ y="-0.22643611"
+ height="1.4528722">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6963" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6965"
+ x="-0.099081434"
+ width="1.1981629"
+ y="-0.22529824"
+ height="1.4505965">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6967" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6969"
+ x="-0.10450897"
+ width="1.2090179"
+ y="-0.40468886"
+ height="1.8093777">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6971" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6973"
+ x="-0.10330495"
+ width="1.2066098"
+ y="-0.36439717"
+ height="1.7287945">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6975" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6977"
+ x="-0.10224481"
+ width="1.2044896"
+ y="-0.32371372"
+ height="1.6474274">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6979" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6981"
+ x="-0.10052545"
+ width="1.2010509"
+ y="-0.2742162"
+ height="1.5484324">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6983" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6985"
+ x="-0.098428868"
+ width="1.1968577"
+ y="-0.20853186"
+ height="1.4170637">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6987" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6989"
+ x="-0.098428868"
+ width="1.1968577"
+ y="-0.20287035"
+ height="1.4057407">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6991" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6993"
+ x="-0.098213255"
+ width="1.1964265"
+ y="-0.19838208"
+ height="1.3967642">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6995" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6997">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6999" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7001">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur7003" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7285"
+ x="-0.030884685"
+ width="1.0617694"
+ y="-0.10267408"
+ height="1.2053483">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7287" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7289">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7291" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7293">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7295" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7297">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7299" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7301">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7303" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7305">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7307" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7309">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7311" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7313">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7315" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7317">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7319" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7321">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7323" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7325"
+ x="-0.031352691"
+ width="1.0627054"
+ y="-0.12140666"
+ height="1.2428133">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7327" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7329"
+ x="-0.030991485"
+ width="1.061983"
+ y="-0.10931916"
+ height="1.2186383">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7331" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7333">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7335" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7337">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7339" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7345">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.7233839"
+ id="feGaussianBlur7347" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath7421">
+ <path
+ sodipodi:type="inkscape:offset"
+ inkscape:radius="0"
+ inkscape:original="M 1111.4062 -285.9375 L 1107.4688 -284.0625 C 1107.4283 -284.05228 1107.3692 -284.04201 1107.3438 -284.03125 C 1106.925 -283.8184 1107.1791 -283.93067 1106.6875 -283.71875 C 1106.2014 -283.50919 1104.9499 -283.13456 1102.5938 -282.25 C 1099.2626 -280.99942 1096.7895 -280.10016 1095.5938 -279.1875 C 1094.0576 -279.16623 1091.8733 -278.95419 1089.9375 -278.46875 C 1086.956 -277.72108 1085.0823 -277.29474 1083.1875 -276.875 C 1081.2927 -276.45527 1081.512 -276.23281 1080.3125 -276 C 1079.0159 -275.74833 1078.5911 -276.00899 1074.875 -275.21875 C 1071.3851 -274.4766 1065.9802 -273.28768 1064.7188 -272.53125 C 1063.1348 -272.71203 1060.8513 -272.85303 1058.875 -272.5625 C 1055.8346 -272.11554 1053.9588 -271.88974 1052.0312 -271.65625 C 1051.3758 -271.57687 1050.9902 -271.45547 1050.6875 -271.375 C 1050.2613 -271.24334 1050.0017 -271.11498 1049.3125 -271.03125 C 1048.0009 -270.87188 1047.5503 -271.18808 1043.7812 -270.75 C 1040.2273 -270.33691 1034.7758 -269.47718 1033.5312 -268.8125 C 1031.9322 -269.10979 1029.6735 -269.34669 1027.6875 -269.15625 C 1024.6287 -268.86293 1022.7155 -268.67226 1020.7812 -268.5 C 1018.847 -268.32773 1019.0926 -268.07763 1017.875 -267.96875 C 1016.5588 -267.85105 1016.1152 -268.13238 1012.3438 -267.71875 C 1008.8017 -267.3303 1003.3359 -266.50948 1002.0625 -265.84375 C 1000.4636 -266.13844 998.1753 -266.35076 996.1875 -266.15625 C 993.12921 -265.857 991.2463 -265.67601 989.3125 -265.5 C 988.65501 -265.44015 988.27245 -265.32144 987.96875 -265.25 C 987.54105 -265.13104 987.28525 -265.03193 986.59375 -264.96875 C 985.27775 -264.84849 984.834 -265.16363 981.0625 -264.75 C 977.50631 -264.35998 972.0569 -263.51084 970.8125 -262.84375 C 969.21381 -263.13793 966.95265 -263.36747 964.96875 -263.15625 C 961.91305 -262.83092 959.9947 -262.63001 958.0625 -262.4375 C 956.13031 -262.24499 956.37275 -261.99662 955.15625 -261.875 C 953.84137 -261.74353 953.3932 -262.03954 949.625 -261.59375 C 946.08611 -261.17509 940.6473 -260.30158 939.375 -259.625 C 937.77741 -259.90604 935.51505 -260.04543 933.53125 -259.8125 C 930.47927 -259.45413 928.58625 -259.24464 926.65625 -259.03125 C 926.00007 -258.95869 925.6156 -258.85856 925.3125 -258.78125 C 924.88571 -258.65402 924.6276 -258.51405 923.9375 -258.4375 C 922.62411 -258.29181 922.17015 -258.61152 918.40625 -258.125 C 914.85737 -257.66624 909.4276 -256.70598 908.1875 -256 C 906.59441 -256.24424 904.3537 -256.38135 902.375 -256.125 C 899.32741 -255.73018 897.4243 -255.47655 895.5 -255.21875 C 893.57571 -254.96096 893.7739 -254.72522 892.5625 -254.5625 C 891.25301 -254.3866 890.8153 -254.66688 887.0625 -254.09375 C 883.53821 -253.55551 878.1393 -252.39458 876.875 -251.65625 C 875.28751 -251.85979 873.0295 -251.91098 871.0625 -251.5625 C 868.03631 -251.02638 866.1636 -250.70081 864.25 -250.375 C 863.59941 -250.26423 863.2363 -250.10406 862.9375 -250 C 862.51681 -249.83512 862.27405 -249.6687 861.59375 -249.53125 C 860.29905 -249.26966 859.86665 -249.53745 856.15625 -248.71875 C 852.65777 -247.9468 847.31035 -246.33582 846.09375 -245.5 C 844.53085 -245.57745 842.33625 -245.41472 840.40625 -244.90625 C 837.43387 -244.12312 835.58855 -243.67416 833.71875 -243.15625 C 831.84875 -242.63835 832.0521 -242.38897 830.875 -242.0625 C 829.60251 -241.7096 829.17795 -241.95541 825.53125 -240.875 C 822.10657 -239.86037 816.88185 -237.94183 815.65625 -237.03125 C 814.11747 -237.01851 811.93645 -236.75903 810.03125 -236.15625 C 807.10027 -235.22891 805.2809 -234.69783 803.4375 -234.09375 C 802.81071 -233.88837 802.44585 -233.70117 802.15625 -233.5625 C 801.74867 -233.34889 801.50295 -233.15375 800.84375 -232.9375 C 799.58925 -232.52596 799.1576 -232.74846 795.5625 -231.5 C 792.17261 -230.32283 786.96755 -228.2863 785.78125 -227.34375 C 784.25737 -227.28408 782.1312 -226.94888 780.25 -226.28125 C 777.35261 -225.25296 775.55095 -224.60577 773.71875 -223.96875 C 771.88655 -223.33174 772.0909 -223.12021 770.9375 -222.71875 C 769.69071 -222.28479 769.27395 -222.51903 765.71875 -221.15625 C 762.38005 -219.87645 757.23165 -217.6737 756.03125 -216.6875 C 754.52407 -216.57981 752.39555 -216.1887 750.53125 -215.46875 C 747.66307 -214.36115 745.90735 -213.68719 744.09375 -213 C 743.47705 -212.76637 743.0973 -212.55797 742.8125 -212.40625 C 742.81251 -212.40625 742.8125 -212.37673 742.8125 -212.375 L 734.8125 -209.1875 L 736.625 -194.46875 C 736.36701 -194.52956 742.8125 -191.15625 742.8125 -191.15625 C 743.03891 -191.30093 743.26145 -191.42886 743.53125 -191.53125 C 744.61177 -191.94123 745.70285 -191.74702 749.53125 -193.21875 C 753.35977 -194.69049 754.7553 -195.22373 755.4375 -195.625 C 756.11711 -196.02478 757.04925 -196.50437 757.65625 -197.15625 C 759.48317 -197.294 761.22705 -197.64948 762.59375 -198.15625 C 765.56175 -199.25677 767.4691 -199.96244 769.375 -200.625 C 771.28081 -201.28754 771.72915 -202.03987 772.78125 -202.40625 C 773.87287 -202.78636 774.97635 -202.57163 778.84375 -203.9375 C 782.71115 -205.30336 784.1269 -205.76458 784.8125 -206.15625 C 785.51361 -206.55677 786.5133 -207.08923 787.125 -207.75 C 789.09581 -207.80466 790.94195 -208.13463 792.40625 -208.625 C 795.40777 -209.63008 797.3324 -210.24671 799.25 -210.875 C 800.78861 -211.3791 801.42415 -211.92177 802.15625 -212.3125 C 802.38647 -212.44681 802.63215 -212.56623 802.90625 -212.65625 C 804.00457 -213.01673 805.0877 -212.73762 809 -213.96875 C 812.91231 -215.19988 814.366 -215.6417 815.0625 -216 C 815.75641 -216.35697 816.6926 -216.79261 817.3125 -217.40625 C 819.17771 -217.42891 820.94835 -217.67308 822.34375 -218.09375 C 825.37415 -219.00729 827.33615 -219.52385 829.28125 -220.0625 C 831.22637 -220.60114 831.70745 -221.32702 832.78125 -221.625 C 833.89527 -221.93415 835.00125 -221.61761 838.96875 -222.65625 C 842.93625 -223.69488 844.38625 -224.08898 845.09375 -224.40625 C 845.82855 -224.73584 846.90765 -225.15997 847.53125 -225.78125 C 849.52907 -225.66525 851.3887 -225.80134 852.875 -226.15625 C 855.95311 -226.89125 857.9584 -227.25719 859.9375 -227.65625 C 861.52541 -227.97643 862.1818 -228.4468 862.9375 -228.75 C 863.17501 -228.8568 863.4044 -228.94276 863.6875 -229 C 864.82091 -229.22919 865.99215 -228.79107 870.03125 -229.5 C 874.07067 -230.20893 875.5315 -230.42709 876.25 -230.6875 C 876.96581 -230.94694 877.95435 -231.25474 878.59375 -231.78125 C 880.51795 -231.54176 882.34165 -231.55672 883.78125 -231.78125 C 886.90767 -232.26887 888.9358 -232.48192 890.9375 -232.75 C 892.93921 -233.01807 893.42625 -233.69514 894.53125 -233.84375 C 895.67767 -233.99793 896.8071 -233.54218 900.875 -234.0625 C 904.94281 -234.58282 906.43525 -234.75823 907.15625 -235 C 907.89337 -235.24714 908.95435 -235.58623 909.59375 -236.125 C 911.64375 -235.78947 913.56745 -235.72704 915.09375 -235.90625 C 918.23595 -236.27521 920.27375 -236.46561 922.28125 -236.6875 C 923.89207 -236.86552 924.5459 -237.2957 925.3125 -237.53125 C 925.55341 -237.61677 925.80655 -237.68685 926.09375 -237.71875 C 927.24345 -237.84647 928.39505 -237.3721 932.46875 -237.84375 C 936.54245 -238.3154 938.0278 -238.45435 938.75 -238.6875 C 939.46941 -238.91977 940.45025 -239.16096 941.09375 -239.65625 C 943.03005 -239.32279 944.8638 -239.25201 946.3125 -239.40625 C 949.45851 -239.7412 951.49 -239.92484 953.5 -240.125 C 955.50991 -240.32514 955.98415 -240.95139 957.09375 -241.0625 C 958.24485 -241.17778 959.39025 -240.69744 963.46875 -241.125 C 967.54725 -241.55256 969.05765 -241.68709 969.78125 -241.90625 C 970.52047 -242.13011 971.57685 -242.4195 972.21875 -242.9375 C 974.27575 -242.53883 976.2206 -242.4441 977.75 -242.59375 C 980.89871 -242.90185 982.9258 -243.067 984.9375 -243.25 C 986.55151 -243.39682 987.20055 -243.81055 987.96875 -244.03125 C 988.21005 -244.11211 988.4623 -244.16116 988.75 -244.1875 C 989.90211 -244.29295 991.0429 -243.79475 995.125 -244.1875 C 999.20711 -244.58025 1000.7139 -244.71834 1001.4375 -244.9375 C 1002.1584 -245.15583 1003.1371 -245.3852 1003.7812 -245.875 C 1005.7193 -245.52501 1007.5501 -245.42062 1009 -245.5625 C 1012.1487 -245.8706 1014.1758 -246.03575 1016.1875 -246.21875 C 1018.1991 -246.40174 1018.7017 -247.05677 1019.8125 -247.15625 C 1020.9648 -247.25948 1022.1047 -246.77142 1026.1875 -247.15625 C 1030.2704 -247.54107 1031.7762 -247.65725 1032.5 -247.875 C 1033.2393 -248.09743 1034.2956 -248.38949 1034.9375 -248.90625 C 1036.9949 -248.50448 1038.9404 -248.40292 1040.4688 -248.5625 C 1043.6153 -248.89102 1045.6458 -249.0852 1047.6562 -249.28125 C 1049.2692 -249.43854 1049.9219 -249.91273 1050.6875 -250.15625 C 1050.9282 -250.24429 1051.1507 -250.27762 1051.4375 -250.3125 C 1052.5858 -250.4522 1053.7542 -249.97259 1057.8125 -250.5625 C 1061.8708 -251.15242 1063.3743 -251.33964 1064.0938 -251.59375 C 1064.8104 -251.84691 1065.7684 -252.15182 1066.4062 -252.6875 C 1068.3259 -252.47556 1070.1262 -252.53609 1071.5625 -252.78125 C 1074.6816 -253.31365 1076.6741 -253.70986 1078.6562 -254.09375 C 1080.6383 -254.47762 1081.1305 -255.1334 1082.2188 -255.375 C 1083.3475 -255.62566 1084.489 -255.25871 1088.4688 -256.25 C 1092.4483 -257.24127 1093.8983 -257.6693 1094.5938 -258.03125 C 1095.316 -258.40725 1096.3555 -258.90183 1096.9688 -259.5625 C 1098.9317 -259.57454 1100.7625 -259.85355 1102.1875 -260.40625 C 1105.1387 -261.55085 1107.0607 -262.27567 1108.875 -263.15625 C 1110.3307 -263.86277 1111.1941 -264.85828 1111.4062 -265.15625 C 1111.6185 -265.4542 1111.5051 -265.8848 1111.5312 -265.90625 C 1111.5742 -265.94148 1111.8716 -266.00028 1112.0312 -266.34375 C 1112.8902 -268.19082 1114.3544 -271.97139 1114.4688 -272.65625 C 1114.5825 -273.33839 1114.6368 -274.00902 1114.6875 -274.40625 C 1114.7169 -274.63575 1114.5404 -275.28515 1114.5625 -275.34375 C 1114.5934 -275.42579 1114.8508 -275.59432 1114.9062 -275.84375 C 1115.1725 -277.04206 1114.9953 -278.05111 1114.7812 -279.46875 C 1114.5673 -280.88638 1113.8096 -284.08338 1113.1562 -284.9375 C 1112.4973 -285.79922 1111.9314 -285.94801 1111.4062 -285.9375 z "
+ style="display:inline;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ id="path7423"
+ d="m 1111.4062,-285.9375 -3.9374,1.875 c -0.041,0.0102 -0.1,0.0205 -0.125,0.0312 -0.4188,0.21285 -0.1647,0.10058 -0.6563,0.3125 -0.4861,0.20956 -1.7376,0.58419 -4.0937,1.46875 -3.3312,1.25058 -5.8043,2.14984 -7,3.0625 -1.5362,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74767 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41973 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25167 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74215 -8.8948,1.93107 -10.1562,2.6875 -1.584,-0.18078 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44696 -4.9162,0.67276 -6.8438,0.90625 -0.6554,0.0794 -1.041,0.20078 -1.3437,0.28125 -0.4262,0.13166 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15937 -1.7622,-0.15683 -5.5313,0.28125 -3.5539,0.41309 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.29729 -3.8577,-0.53419 -5.8437,-0.34375 -3.0588,0.29332 -4.972,0.48399 -6.9063,0.65625 -1.9342,0.17227 -1.6886,0.42237 -2.9062,0.53125 -1.3162,0.1177 -1.7598,-0.16363 -5.5312,0.25 -3.5421,0.38845 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.29469 -3.8872,-0.50701 -5.875,-0.3125 -3.05829,0.29925 -4.9412,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04005,0.17856 -1.34375,0.25 -0.4277,0.11896 -0.6835,0.21807 -1.375,0.28125 -1.316,0.12026 -1.75975,-0.19488 -5.53125,0.21875 -3.55619,0.39002 -9.0056,1.23916 -10.25,1.90625 -1.59869,-0.29418 -3.85985,-0.52372 -5.84375,-0.3125 -3.0557,0.32533 -4.97405,0.52624 -6.90625,0.71875 -1.93219,0.19251 -1.68975,0.44088 -2.90625,0.5625 -1.31488,0.13147 -1.76305,-0.16454 -5.53125,0.28125 -3.53889,0.41866 -8.9777,1.29217 -10.25,1.96875 -1.59759,-0.28104 -3.85995,-0.42043 -5.84375,-0.1875 -3.05198,0.35837 -4.945,0.56786 -6.875,0.78125 -0.65618,0.0726 -1.04065,0.17269 -1.34375,0.25 -0.42679,0.12723 -0.6849,0.2672 -1.375,0.34375 -1.31339,0.14569 -1.76735,-0.17402 -5.53125,0.3125 -3.54888,0.45876 -8.97865,1.41902 -10.21875,2.125 -1.59309,-0.24424 -3.8338,-0.38135 -5.8125,-0.125 -3.04759,0.39482 -4.9507,0.64845 -6.875,0.90625 -1.92429,0.25779 -1.7261,0.49353 -2.9375,0.65625 -1.30949,0.1759 -1.7472,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.9232,1.69917 -10.1875,2.4375 -1.58749,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02619,0.53612 -4.8989,0.86169 -6.8125,1.1875 -0.65059,0.11077 -1.0137,0.27094 -1.3125,0.375 -0.42069,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.2947,0.26159 -1.7271,-0.006 -5.4375,0.8125 -3.49848,0.77195 -8.8459,2.38293 -10.0625,3.21875 -1.5629,-0.0774 -3.7575,0.0853 -5.6875,0.59375 -2.97238,0.78313 -4.8177,1.23209 -6.6875,1.75 -1.87,0.5179 -1.66665,0.76728 -2.84375,1.09375 -1.27249,0.3529 -1.69705,0.10709 -5.34375,1.1875 -3.42468,1.01463 -8.6494,2.93317 -9.875,3.84375 -1.53878,0.0127 -3.7198,0.27222 -5.625,0.875 -2.93098,0.92734 -4.75035,1.45842 -6.59375,2.0625 -0.62679,0.20538 -0.99165,0.39258 -1.28125,0.53125 -0.40758,0.21361 -0.6533,0.40875 -1.3125,0.625 -1.2545,0.41154 -1.68615,0.18904 -5.28125,1.4375 -3.38989,1.17717 -8.59495,3.2137 -9.78125,4.15625 -1.52388,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69905,1.67548 -6.53125,2.3125 -1.8322,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24679,0.43396 -1.66355,0.19972 -5.21875,1.5625 -3.3387,1.2798 -8.4871,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.6357,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6239,1.78156 -6.4375,2.46875 -0.6167,0.23363 -0.99645,0.44203 -1.28125,0.59375 10e-6,0 0,0.0295 0,0.0312 l -8,3.1875 1.8125,14.71875 c -0.25799,-0.0608 6.1875,3.3125 6.1875,3.3125 0.22641,-0.14468 0.44895,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.1716,-0.21577 6,-1.6875 3.82852,-1.47174 5.22405,-2.00498 5.90625,-2.40625 0.67961,-0.39978 1.61175,-0.87937 2.21875,-1.53125 1.82692,-0.13775 3.5708,-0.49323 4.9375,-1 2.968,-1.10052 4.87535,-1.80619 6.78125,-2.46875 1.90581,-0.66254 2.35415,-1.41487 3.40625,-1.78125 1.09162,-0.38011 2.1951,-0.16538 6.0625,-1.53125 3.8674,-1.36586 5.28315,-1.82708 5.96875,-2.21875 0.70111,-0.40052 1.7008,-0.93298 2.3125,-1.59375 1.97081,-0.0547 3.81695,-0.38463 5.28125,-0.875 3.00152,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.53861,-0.5041 2.17415,-1.04677 2.90625,-1.4375 0.23022,-0.13431 0.4759,-0.25373 0.75,-0.34375 1.09832,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91231,-1.23113 5.366,-1.67295 6.0625,-2.03125 0.69391,-0.35697 1.6301,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63585,-0.26683 5.03125,-0.6875 3.0304,-0.91354 4.9924,-1.4301 6.9375,-1.96875 1.94512,-0.53864 2.4262,-1.26452 3.5,-1.5625 1.11402,-0.30915 2.22,0.007 6.1875,-1.03125 3.9675,-1.03863 5.4175,-1.43273 6.125,-1.75 0.7348,-0.32959 1.8139,-0.75372 2.4375,-1.375 1.99782,0.116 3.85745,-0.0201 5.34375,-0.375 3.07811,-0.735 5.0834,-1.10094 7.0625,-1.5 1.58791,-0.32018 2.2443,-0.79055 3,-1.09375 0.23751,-0.1068 0.4669,-0.19276 0.75,-0.25 1.13341,-0.22919 2.30465,0.20893 6.34375,-0.5 4.03942,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71581,-0.25944 1.70435,-0.56724 2.34375,-1.09375 1.9242,0.23949 3.7479,0.22453 5.1875,0 3.12642,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48875,-0.94514 3.59375,-1.09375 1.14642,-0.15418 2.27585,0.30157 6.34375,-0.21875 4.06781,-0.52032 5.56025,-0.69573 6.28125,-0.9375 0.73712,-0.24714 1.7981,-0.58623 2.4375,-1.125 2.05,0.33553 3.9737,0.39796 5.5,0.21875 3.1422,-0.36896 5.18,-0.55936 7.1875,-0.78125 1.61082,-0.17802 2.26465,-0.6082 3.03125,-0.84375 0.24091,-0.0855 0.49405,-0.1556 0.78125,-0.1875 1.1497,-0.12772 2.3013,0.34665 6.375,-0.125 4.0737,-0.47165 5.55905,-0.6106 6.28125,-0.84375 0.71941,-0.23227 1.70025,-0.47346 2.34375,-0.96875 1.9363,0.33346 3.77005,0.40424 5.21875,0.25 3.14601,-0.33495 5.1775,-0.51859 7.1875,-0.71875 2.00991,-0.20014 2.48415,-0.82639 3.59375,-0.9375 1.1511,-0.11528 2.2965,0.36506 6.375,-0.0625 4.0785,-0.42756 5.5889,-0.56209 6.3125,-0.78125 0.73922,-0.22386 1.7956,-0.51325 2.4375,-1.03125 2.057,0.39867 4.00185,0.4934 5.53125,0.34375 3.14871,-0.3081 5.1758,-0.47325 7.1875,-0.65625 1.61401,-0.14682 2.26305,-0.56055 3.03125,-0.78125 0.2413,-0.0809 0.49355,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.2929,0.39275 6.375,0 4.08211,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6996,-0.4477 2.3437,-0.9375 1.9381,0.34999 3.7689,0.45438 5.2188,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1465,-0.32852 5.177,-0.5227 7.1874,-0.71875 1.613,-0.15729 2.2657,-0.63148 3.0313,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7166,-0.25316 1.6746,-0.55807 2.3124,-1.09375 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99127 5.4295,-1.4193 6.125,-1.78125 0.7222,-0.376 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.1446 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70652 2.3191,-1.70203 2.5312,-2 0.2123,-0.29795 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3404,-0.094 0.5,-0.4375 0.859,-1.84707 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68214 0.168,-1.35277 0.2187,-1.75 0.029,-0.2295 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19831 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41763 -0.9716,-4.61463 -1.625,-5.46875 -0.6589,-0.86172 -1.2248,-1.01051 -1.75,-1 z"
+ transform="translate(0.08004571,-0.03125)" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter7578"
+ x="-0.08160872"
+ width="1.1632174"
+ y="-0.22659944"
+ height="1.4531989">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="2.437399"
+ id="feGaussianBlur7580" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7594"
+ x="-0.040804356"
+ width="1.0816087"
+ y="-0.11329972"
+ height="1.2265995">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.2186995"
+ id="feGaussianBlur7596" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath7606">
+ <path
+ id="path7608"
+ d="m 1049.205,-282.26672 -0.09,0.008 c -1.3874,0.88445 -6.6033,1.6072 -6.629,9.52344 -0.024,7.42525 15.0129,17.09146 17.1563,18.09375 1.7302,0.80909 3.5916,1.40876 5.4063,1.71875 l 1.4374,0.21875 c 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99128 5.4294,-1.4193 6.125,-1.78125 0.7222,-0.37601 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3191,-1.70203 2.5312,-2 0.2123,-0.29796 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3405,-0.094 0.5,-0.4375 0.859,-1.84708 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68215 0.168,-1.35277 0.2187,-1.75 0.029,-0.22951 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19832 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41764 -0.9716,-4.61463 -1.625,-5.46875 -0.4194,-0.54857 -0.7993,-0.7925 -1.1562,-0.90625 -0.067,-0.0173 -0.1239,-0.0467 -0.1875,-0.0625 -0.021,-0.004 -0.042,0.003 -0.062,0 -0.3116,-0.0755 -0.6085,-0.15867 -1.1562,-0.21875 -0.9855,-0.10812 -2.4247,-0.2594 -3.9688,-0.25 -0.5147,0.003 -1.0371,0.0476 -1.5625,0.0937 -3.5589,0.31228 -9.0098,0.99108 -10.2187,1.625 -1.6331,-0.33402 -3.9482,-0.61223 -5.9376,-0.46875 -3.064,0.22097 -4.9677,0.34219 -6.9062,0.46875 -1.9384,0.12655 -1.6861,0.38864 -2.9062,0.46875 -1.3191,0.0866 -1.7869,-0.22325 -5.5626,0.0937 -3.5457,0.29772 -8.9806,0.99317 -10.2187,1.625 -1.6334,-0.33451 -3.9459,-0.61239 -5.9375,-0.46875 -3.0642,0.22098 -4.9678,0.37344 -6.9062,0.5 -0.6592,0.043 -1.0424,0.12393 -1.3438,0.1875 z"
+ style="display:inline;opacity:0.82448976;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter7610"
+ x="-0.021942979"
+ width="1.0438859"
+ y="-0.10017137"
+ height="1.2003427">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.57530213"
+ id="feGaussianBlur7612" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath7616">
+ <path
+ id="path7618"
+ d="m 205.47016,-408.97318 -0.0901,0.002 c -1.44563,0.78566 -6.69921,1.14335 -7.27625,9.03857 -0.54134,7.40553 13.78595,18.09566 15.85433,19.24481 1.66964,0.92764 3.48475,1.65551 5.27345,2.09115 l 1.41867,0.31834 c 1.90027,0.34514 3.70042,0.41015 5.15031,0.26563 3.1486,-0.31384 5.16386,-0.57031 7.16789,-0.8152 2.00402,-0.24488 2.5407,-0.86478 3.64319,-1.02999 1.14342,-0.17143 2.25659,0.27414 6.29577,-0.43753 4.03888,-0.71169 5.51507,-1.03768 6.23419,-1.3503 0.74664,-0.32479 1.81806,-0.74575 2.47589,-1.3621 1.95897,0.12471 3.80476,-0.0261 5.2648,-0.47819 3.02376,-0.93627 4.99157,-1.52544 6.8628,-2.27751 1.50138,-0.60342 2.43202,-1.53636 2.66436,-1.81883 0.23254,-0.28245 0.14951,-0.71989 0.17694,-0.73948 0.0453,-0.0322 0.34622,-0.0701 0.52926,-0.40161 0.98557,-1.78276 2.70955,-5.45215 2.87137,-6.12738 0.16094,-0.67257 0.26182,-1.33778 0.34007,-1.73051 0.0453,-0.22691 -0.0855,-0.88701 -0.0594,-0.94393 0.0365,-0.0797 0.30505,-0.22988 0.37769,-0.47485 0.34913,-1.17686 0.24274,-2.19578 0.1278,-3.6249 -0.11463,-1.42909 -0.64781,-4.6711 -1.24013,-5.56865 -0.38017,-0.57646 -0.74215,-0.84625 -1.09026,-0.98459 -0.0657,-0.0219 -0.12035,-0.0553 -0.1827,-0.0754 -0.0207,-0.005 -0.0418,2.3e-4 -0.0623,-0.004 -0.30559,-0.097 -0.59597,-0.20067 -1.13816,-0.29875 -0.97557,-0.1765 -2.40074,-0.42766 -3.94175,-0.52584 -0.51366,-0.0327 -1.0379,-0.0247 -1.56523,-0.0153 -3.57201,0.0636 -9.05695,0.3611 -10.30707,0.90928 -1.60587,-0.44697 -3.89597,-0.88576 -5.89053,-0.8812 -3.07195,0.007 -4.97947,-0.005 -6.92207,-0.0134 -1.94251,-0.009 -1.70908,0.27025 -2.9318,0.26518 -1.32192,-0.005 -1.76701,-0.34717 -5.55562,-0.29393 -3.55782,0.05 -9.02796,0.36522 -10.30706,0.90927 -1.60614,-0.44747 -3.89367,-0.88575 -5.89043,-0.88118 -3.07215,0.007 -4.98175,0.0265 -6.92426,0.0177 -0.66059,-0.003 -1.0485,0.051 -1.35359,0.0934 z"
+ style="display:inline;opacity:0.82448976;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient7622"
+ id="linearGradient7708"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(-19.091883,4.2426407)"
+ x1="774.97668"
+ y1="-211.87105"
+ x2="755.11584"
+ y2="-202.67865" />
+ <mask
+ maskUnits="userSpaceOnUse"
+ id="mask7704">
+ <path
+ style="fill:url(#linearGradient7708);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 718.40812,-224.31217 33.25,56 275.99998,-24 159.5,-48.25 -66.5,-82.75 -402.24998,99 z"
+ id="path7706"
+ inkscape:connector-curvature="0" />
+ </mask>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient8430"
+ id="radialGradient7904"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(-0.3324832,0.9022288,-0.9582407,-0.3531242,305.29227,19.909497)"
+ cx="142.95833"
+ cy="107.09234"
+ fx="142.95833"
+ fy="107.09234"
+ r="66.981766" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3317"
+ id="radialGradient7906"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.0036478,-1.0345492e-7,1.7124628e-7,1.6613125,-160.53487,-96.205369)"
+ cx="317.78754"
+ cy="129.65378"
+ fx="317.78754"
+ fy="129.65378"
+ r="47.863216" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient8398"
+ id="radialGradient7908"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(2.0747661,-0.1577957,0.2382425,3.1325183,-550.77432,-65.728909)"
+ cx="325.30847"
+ cy="80.909554"
+ fx="325.30847"
+ fy="80.909554"
+ r="26.937988" />
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8209">
+ <path
+ sodipodi:nodetypes="czcc"
+ id="path8211"
+ d="m 734.03125,519.49186 c 0,0 16.75513,37.01806 28.70141,53.95395 11.94629,16.93589 52.72716,56.04605 52.72716,56.04605 l 0.59717,-138.58975"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#1a1a1a;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8225">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="10.661912"
+ id="feGaussianBlur8227" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8333">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="7.18"
+ id="feGaussianBlur8335" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8338">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path8340"
+ d="m 266.27183,924.57185 c -1.40727,18.80122 -1.1449,32.75104 2.08174,49.30328 3.22666,16.55238 16.40609,45.90737 20.33441,63.18377 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1482 -15.31761,12.9775 -42.05127,21.599 -67.8323,15.7338 -25.78106,-5.8653 -69.54908,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045182,939.86193 41.867507,909.4368 27.689835,879.01168 29.207902,872.71823 33.747792,863.90708 24.38107,839.38658 21.33408,813.84026 0.03533448,788.33044 30.360814,791.44487 43.915624,815.28676 60.161024,835.47019 54.631128,787.39416 42.106309,771.05368 31.787072,744.74589 c 29.994295,6.08165 50.57936,31.87239 63.979783,72.7125 9.554155,-3.91792 18.237765,-9.37294 30.187415,-9.0612 -11.2975,-41.6958 -17.94947,-69.91585 -36.687256,-101.06994 53.441966,5.67032 83.657026,80.63932 78.971426,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34703 -21.04782,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38965,85.80578 6.10288,20.3689 1.51882,38.70051 1.51882,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24134 -3.53269,59.10331 -4.94582,77.98323 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8354">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="6.82"
+ id="feGaussianBlur8356" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8359">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path8361"
+ d="m 266.27183,924.57185 c -1.40727,18.80122 -1.1449,32.75104 2.08174,49.30328 3.22666,16.55238 16.40609,45.90737 20.33441,63.18377 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1482 -15.31761,12.9775 -42.05127,21.599 -67.8323,15.7338 -25.78106,-5.8653 -69.54908,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045182,939.86193 41.867507,909.4368 27.689835,879.01168 29.207902,872.71823 33.747792,863.90708 24.38107,839.38658 21.33408,813.84026 0.03533448,788.33044 30.360814,791.44487 43.915624,815.28676 60.161024,835.47019 54.631128,787.39416 42.106309,771.05368 31.787072,744.74589 c 29.994295,6.08165 50.57936,31.87239 63.979783,72.7125 9.554155,-3.91792 18.237765,-9.37294 30.187415,-9.0612 -11.2975,-41.6958 -17.94947,-69.91585 -36.687256,-101.06994 53.441966,5.67032 83.657026,80.63932 78.971426,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34703 -21.04782,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38965,85.80578 6.10288,20.3689 1.51882,38.70051 1.51882,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24134 -3.53269,59.10331 -4.94582,77.98323 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8379"
+ x="-0.14413793"
+ width="1.288276"
+ y="-0.10278689"
+ height="1.2055738">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="7.389266"
+ id="feGaussianBlur8381" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8392">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path8394"
+ d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8404"
+ x="-0.090268657"
+ width="1.1805373"
+ y="-0.10250848"
+ height="1.205017">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="5.3457272"
+ id="feGaussianBlur8406" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8417">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path8419"
+ d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ x="-0.084654994"
+ width="1.16931"
+ y="-0.36592469"
+ height="1.7318494"
+ id="filter11361-3">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="4.5740586"
+ id="feGaussianBlur11363-6" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient8430"
+ id="radialGradient7904-7"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(-0.3324832,0.9022288,-0.9582407,-0.3531242,305.29227,19.909497)"
+ cx="142.95833"
+ cy="107.09234"
+ fx="142.95833"
+ fy="107.09234"
+ r="66.981766" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3317"
+ id="radialGradient7906-6"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.0036478,-1.0345492e-7,1.7124628e-7,1.6613125,-160.53487,-96.205369)"
+ cx="317.78754"
+ cy="129.65378"
+ fx="317.78754"
+ fy="129.65378"
+ r="47.863216" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient8398"
+ id="radialGradient7908-0"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(2.0747661,-0.1577957,0.2382425,3.1325183,-550.77432,-65.728909)"
+ cx="325.30847"
+ cy="80.909554"
+ fx="325.30847"
+ fy="80.909554"
+ r="26.937988" />
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8658-06">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0b0b0b;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 569.03125,1018.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -99.77493,25.9619 -142.85715,35.7143 -43.08222,9.7524 -117.26443,34.816 -156.91262,27.2654 -39.64818,-7.5506 -89.51595,-64.4083 -89.51595,-64.4083 l 4.28572,-94.28571 c 0,0 85.88551,-16.20094 112.14285,-33.57143 26.25735,-17.37049 45.58238,-49.66598 59.28572,-71.42857 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
+ id="path8660-2"
+ sodipodi:nodetypes="czzzcczzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8888-6"
+ x="-0.2112188"
+ width="1.4224375"
+ y="-0.16808605"
+ height="1.3361721">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="8.3693583"
+ id="feGaussianBlur8890-1" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath2833-2">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#292929;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 569.03125,1018.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -57.31395,4.9661 -135.78608,17.3296 -79.85178,12.5808 -94.06436,42.5423 -108.12225,47.0643 -14.70014,4.7286 -145.37739,-65.8225 -145.37739,-65.8225 l 4.28572,-94.28571 c 0,0 85.88551,-16.20094 112.14285,-33.57143 26.25735,-17.37049 45.58238,-49.66598 59.28572,-71.42857 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
+ id="path2835-3"
+ sodipodi:nodetypes="czzzcczzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8892-7"
+ x="-0.18692794"
+ width="1.3738559"
+ y="-0.23646873"
+ height="1.4729375">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="31.21228"
+ id="feGaussianBlur8894-5" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3665-9">
+ <path
+ sodipodi:nodetypes="czzcczcc"
+ id="path3667-2"
+ d="m 366.88839,504.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57144 -62.5,123.57144 l 76.07143,18.21428 c 0,0 11.80712,-12.82335 31.07142,-46.07143 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8856-2"
+ x="-0.3253231"
+ width="1.6506462"
+ y="-0.19013336"
+ height="1.3802667">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="28.712591"
+ id="feGaussianBlur8858-8" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8642-9">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 366.88839,504.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57144 -62.5,123.57144 l 76.07143,18.21428 c 0,0 11.80712,-12.82335 31.07142,-46.07143 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
+ id="path8644-7"
+ sodipodi:nodetypes="czzcczcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8860-3"
+ x="-0.38093024"
+ width="1.7618605"
+ y="-0.17518716"
+ height="1.3503743">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="19.304015"
+ id="feGaussianBlur8862-6" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath7616-1">
+ <path
+ id="path7618-2"
+ d="m 205.47016,-408.97318 -0.0901,0.002 c -1.44563,0.78566 -6.69921,1.14335 -7.27625,9.03857 -0.54134,7.40553 13.78595,18.09566 15.85433,19.24481 1.66964,0.92764 3.48475,1.65551 5.27345,2.09115 l 1.41867,0.31834 c 1.90027,0.34514 3.70042,0.41015 5.15031,0.26563 3.1486,-0.31384 5.16386,-0.57031 7.16789,-0.8152 2.00402,-0.24488 2.5407,-0.86478 3.64319,-1.02999 1.14342,-0.17143 2.25659,0.27414 6.29577,-0.43753 4.03888,-0.71169 5.51507,-1.03768 6.23419,-1.3503 0.74664,-0.32479 1.81806,-0.74575 2.47589,-1.3621 1.95897,0.12471 3.80476,-0.0261 5.2648,-0.47819 3.02376,-0.93627 4.99157,-1.52544 6.8628,-2.27751 1.50138,-0.60342 2.43202,-1.53636 2.66436,-1.81883 0.23254,-0.28245 0.14951,-0.71989 0.17694,-0.73948 0.0453,-0.0322 0.34622,-0.0701 0.52926,-0.40161 0.98557,-1.78276 2.70955,-5.45215 2.87137,-6.12738 0.16094,-0.67257 0.26182,-1.33778 0.34007,-1.73051 0.0453,-0.22691 -0.0855,-0.88701 -0.0594,-0.94393 0.0365,-0.0797 0.30505,-0.22988 0.37769,-0.47485 0.34913,-1.17686 0.24274,-2.19578 0.1278,-3.6249 -0.11463,-1.42909 -0.64781,-4.6711 -1.24013,-5.56865 -0.38017,-0.57646 -0.74215,-0.84625 -1.09026,-0.98459 -0.0657,-0.0219 -0.12035,-0.0553 -0.1827,-0.0754 -0.0207,-0.005 -0.0418,2.3e-4 -0.0623,-0.004 -0.30559,-0.097 -0.59597,-0.20067 -1.13816,-0.29875 -0.97557,-0.1765 -2.40074,-0.42766 -3.94175,-0.52584 -0.51366,-0.0327 -1.0379,-0.0247 -1.56523,-0.0153 -3.57201,0.0636 -9.05695,0.3611 -10.30707,0.90928 -1.60587,-0.44697 -3.89597,-0.88576 -5.89053,-0.8812 -3.07195,0.007 -4.97947,-0.005 -6.92207,-0.0134 -1.94251,-0.009 -1.70908,0.27025 -2.9318,0.26518 -1.32192,-0.005 -1.76701,-0.34717 -5.55562,-0.29393 -3.55782,0.05 -9.02796,0.36522 -10.30706,0.90927 -1.60614,-0.44747 -3.89367,-0.88575 -5.89043,-0.88118 -3.07215,0.007 -4.98175,0.0265 -6.92426,0.0177 -0.66059,-0.003 -1.0485,0.051 -1.35359,0.0934 z"
+ style="display:inline;opacity:0.82448976;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter7610-9"
+ x="-0.021942979"
+ width="1.0438859"
+ y="-0.10017137"
+ height="1.2003427">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.57530213"
+ id="feGaussianBlur7612-3" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath7606-1">
+ <path
+ id="path7608-9"
+ d="m 1049.205,-282.26672 -0.09,0.008 c -1.3874,0.88445 -6.6033,1.6072 -6.629,9.52344 -0.024,7.42525 15.0129,17.09146 17.1563,18.09375 1.7302,0.80909 3.5916,1.40876 5.4063,1.71875 l 1.4374,0.21875 c 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99128 5.4294,-1.4193 6.125,-1.78125 0.7222,-0.37601 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3191,-1.70203 2.5312,-2 0.2123,-0.29796 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3405,-0.094 0.5,-0.4375 0.859,-1.84708 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68215 0.168,-1.35277 0.2187,-1.75 0.029,-0.22951 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19832 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41764 -0.9716,-4.61463 -1.625,-5.46875 -0.4194,-0.54857 -0.7993,-0.7925 -1.1562,-0.90625 -0.067,-0.0173 -0.1239,-0.0467 -0.1875,-0.0625 -0.021,-0.004 -0.042,0.003 -0.062,0 -0.3116,-0.0755 -0.6085,-0.15867 -1.1562,-0.21875 -0.9855,-0.10812 -2.4247,-0.2594 -3.9688,-0.25 -0.5147,0.003 -1.0371,0.0476 -1.5625,0.0937 -3.5589,0.31228 -9.0098,0.99108 -10.2187,1.625 -1.6331,-0.33402 -3.9482,-0.61223 -5.9376,-0.46875 -3.064,0.22097 -4.9677,0.34219 -6.9062,0.46875 -1.9384,0.12655 -1.6861,0.38864 -2.9062,0.46875 -1.3191,0.0866 -1.7869,-0.22325 -5.5626,0.0937 -3.5457,0.29772 -8.9806,0.99317 -10.2187,1.625 -1.6334,-0.33451 -3.9459,-0.61239 -5.9375,-0.46875 -3.0642,0.22098 -4.9678,0.37344 -6.9062,0.5 -0.6592,0.043 -1.0424,0.12393 -1.3438,0.1875 z"
+ style="display:inline;opacity:0.82448976;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter7578-4"
+ x="-0.08160872"
+ width="1.1632174"
+ y="-0.22659944"
+ height="1.4531989">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="2.437399"
+ id="feGaussianBlur7580-7" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7594-8"
+ x="-0.040804356"
+ width="1.0816087"
+ y="-0.11329972"
+ height="1.2265995">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.2186995"
+ id="feGaussianBlur7596-4" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8616-5">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8618-0"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8940-3"
+ x="-0.25152978"
+ width="1.5030596"
+ y="-0.053035267"
+ height="1.1060705">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="13.024603"
+ id="feGaussianBlur8942-6" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8209-6">
+ <path
+ sodipodi:nodetypes="czcc"
+ id="path8211-3"
+ d="m 734.03125,519.49186 c 0,0 16.75513,37.01806 28.70141,53.95395 11.94629,16.93589 52.72716,56.04605 52.72716,56.04605 l 0.59717,-138.58975"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#1a1a1a;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8822-2"
+ x="-0.2742857"
+ width="1.5485713"
+ y="-0.21333334"
+ height="1.4266667">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="11.313708"
+ id="feGaussianBlur8824-0" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3998-6">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 178.21428,274.14789 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.55405 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401287 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.3574 -122.78647,50.053 -187.06988,59.0023 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.1982 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path4000-1"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter3677-5">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="2.0397518"
+ id="feGaussianBlur3679-5" />
+ </filter>
+ <filter
+ id="filter3785-4"
+ inkscape:label="White Fur">
+ <feTurbulence
+ id="feTurbulence3787-7"
+ type="fractalNoise"
+ baseFrequency="0.24044943820224721"
+ numOctaves="10"
+ seed="655"
+ result="result0" />
+ <feDisplacementMap
+ id="feDisplacementMap3789-65"
+ in="SourceGraphic"
+ in2="result0"
+ scale="62"
+ xChannelSelector="B"
+ yChannelSelector="G" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8604-69">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8606-3"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8802-7"
+ x="-0.35311759"
+ width="1.7062352"
+ y="-0.1817714"
+ height="1.3635428">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="48.038491"
+ id="feGaussianBlur8804-4" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3317"
+ id="radialGradient3315-5"
+ cx="543.6698"
+ cy="147.3131"
+ fx="543.6698"
+ fy="147.3131"
+ r="47.863216"
+ gradientTransform="matrix(2.1382256,0,0,2.3382884,-77.03847,-101.68704)"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3537"
+ id="radialGradient3543-4"
+ cx="385"
+ cy="237.00504"
+ fx="385"
+ fy="237.00504"
+ r="86.928574"
+ gradientTransform="matrix(1,0,0,0.8562038,0,34.080427)"
+ gradientUnits="userSpaceOnUse" />
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath4100-3">
+ <path
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.9000755px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 265.93541,126.68393 -18.76721,168.86308 174.10543,-73.12068 61.9544,88.65883 57.8844,-31.9903 -37.53442,-180.059677 -237.6426,27.648747 z"
+ id="path4102-0"
+ sodipodi:nodetypes="ccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter4120-7"
+ x="-0.2770822"
+ width="1.5541644"
+ y="-0.32482043"
+ height="1.6496409">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="19.956289"
+ id="feGaussianBlur4122-8" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3961"
+ id="radialGradient3915-6"
+ cx="418.30365"
+ cy="342.47794"
+ fx="418.30365"
+ fy="342.47794"
+ r="131.4509"
+ gradientTransform="matrix(1.3957347,0.6211056,-0.4244067,0.9537174,-15.061913,-227.96711)"
+ gradientUnits="userSpaceOnUse" />
+ <mask
+ maskUnits="userSpaceOnUse"
+ id="mask3684-3">
+ <ellipse
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient3688);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.43724918px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path3686-1"
+ transform="translate(-174.03125,62.156036)"
+ cx="579.474"
+ cy="260.57516"
+ rx="192.6866"
+ ry="164.04877" />
+ </mask>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3951"
+ id="radialGradient3933-8"
+ cx="397.16388"
+ cy="336.95245"
+ fx="397.16388"
+ fy="336.95245"
+ r="36.75"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.9449972,2.4894837e-7,-2.4894833e-7,1.9449969,-375.31868,-318.41912)" />
+ <filter
+ inkscape:collect="always"
+ id="filter8806-6"
+ x="-0.61142862"
+ width="2.2228572"
+ y="-0.14930232"
+ height="1.2986046">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="37.830213"
+ id="feGaussianBlur8808-4" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter8826-9"
+ x="-0.25894088"
+ width="1.5178818"
+ y="-0.2236412"
+ height="1.4472824">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="19.631544"
+ id="feGaussianBlur8828-5" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3985"
+ id="radialGradient3991-0"
+ cx="402.48898"
+ cy="317.23578"
+ fx="402.48898"
+ fy="317.23578"
+ r="23.714285"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(4.3776616,0,0,4.3776616,-1358.3025,-1070.7357)" />
+ <filter
+ inkscape:collect="always"
+ id="filter3981-7"
+ x="-0.30000001"
+ width="1.6"
+ y="-0.30000001"
+ height="1.6">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="2"
+ id="feGaussianBlur3983-1" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4106"
+ id="radialGradient4112-7"
+ cx="250.22678"
+ cy="475.09763"
+ fx="250.22678"
+ fy="475.09763"
+ r="95.98877"
+ gradientTransform="matrix(1.2259004,-0.7077739,0.1413989,0.2449102,322.22326,608.91815)"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4013"
+ id="radialGradient3585-2"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.1323239,0.7659488,-1.4550286,2.1510098,588.75376,-711.79716)"
+ cx="228.81355"
+ cy="440.26971"
+ fx="228.81355"
+ fy="440.26971"
+ r="119.17509" />
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3999-0">
+ <path
+ style="display:inline;opacity:1;fill:#f5ff04;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 12.61031,-67.04463 3.21429,-93.92857 -9.43424,-26.99328 -34.96741,-59.12448 -66.42857,-69.64285 -31.03327,-10.37532 -65.01776,-4.84837 -84.28571,5.71428 z"
+ id="path4001-61"
+ sodipodi:nodetypes="czzczzzzc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4041"
+ id="radialGradient4060-5"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.05911206,2.6869855,-0.7234268,0.01591495,408.72779,-424.56452)"
+ cx="275.4422"
+ cy="335.34866"
+ fx="275.4422"
+ fy="335.34866"
+ r="36.75" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient7622"
+ id="radialGradient4062-9"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(0.05911206,2.6869855,-0.7234268,0.01591495,408.72779,-424.56452)"
+ cx="275.4422"
+ cy="335.34866"
+ fx="275.4422"
+ fy="335.34866"
+ r="36.75" />
+ <filter
+ inkscape:collect="always"
+ id="filter4079-1">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="6.5887624"
+ id="feGaussianBlur4081-1" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4013"
+ id="radialGradient4056-5"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.1323239,0.7659488,-1.4550286,2.1510098,588.75376,-711.79716)"
+ cx="228.81355"
+ cy="440.26971"
+ fx="228.81355"
+ fy="440.26971"
+ r="119.17509" />
+ <filter
+ inkscape:collect="always"
+ id="filter4083-9">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.5052066"
+ id="feGaussianBlur4085-7" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4113"
+ id="radialGradient4119-7"
+ cx="296.33783"
+ cy="427.17749"
+ fx="296.33783"
+ fy="427.17749"
+ r="19.704132"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(2.9797125,0,0,2.9797125,-599.28727,-827.0855)" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4862"
+ id="radialGradient4868-3"
+ cx="429.56738"
+ cy="377.42877"
+ fx="429.56738"
+ fy="377.42877"
+ r="72.079735"
+ gradientTransform="matrix(1,0,0,0.618034,0,144.16496)"
+ gradientUnits="userSpaceOnUse" />
+ <filter
+ inkscape:collect="always"
+ id="filter4002-6"
+ x="-0.24334238"
+ width="1.4866848"
+ y="-0.39104807"
+ height="1.7820961">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="14.589518"
+ id="feGaussianBlur4004-3" />
+ </filter>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4870"
+ id="radialGradient4876-9"
+ cx="437.6991"
+ cy="391.21735"
+ fx="437.6991"
+ fy="391.21735"
+ r="36.611931"
+ gradientTransform="matrix(1,0,0,0.618034,0,149.43174)"
+ gradientUnits="userSpaceOnUse" />
+ <filter
+ inkscape:collect="always"
+ id="filter4010-1"
+ x="-0.14577261"
+ width="1.2915452"
+ y="-0.23523259"
+ height="1.4704652">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="4.4442907"
+ id="feGaussianBlur4012-2" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4053-9">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.6062947"
+ id="feGaussianBlur4055-3" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8514-8">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8516-8"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8814-5"
+ x="-0.20466694"
+ width="1.4093339"
+ y="-0.29007819"
+ height="1.5801564">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="22.300169"
+ id="feGaussianBlur8816-0" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8610-9">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8612-6"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8810-3"
+ x="-0.23519406"
+ width="1.4703881"
+ y="-0.24500646"
+ height="1.4900129">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="58.328041"
+ id="feGaussianBlur8812-8" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8622-5">
+ <path
+ style="display:inline;opacity:1;fill:#202020;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 821.64329,477.88997 c 0,0 22.61947,-6.50681 35.74275,-5.87273 13.12328,0.63409 30.64158,1.93862 43.70885,12.18619 13.06727,10.24756 25.06774,27.14007 34.11239,58.36965 9.04465,31.22958 1.69832,99.25201 -6.17603,143.34735 -7.87435,44.09534 -28.2651,106.11298 -45,140 -16.7349,33.88702 -49.79771,77.4952 -60.56943,89.87616 -11.36422,13.06197 -56.20589,36.42617 -79.43057,42.26667 5.3033,-10.6066 48.89976,-50.58884 35,-60.71426 -14.01897,-10.21226 -45.76009,45.98236 -84.29315,29.03317 21.38231,-13.13212 41.7794,-51.18606 34.04061,-66.59445 -7.84025,-15.61039 -30.70493,48.75757 -93.53554,37.01288 30.05204,-27.52666 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.29315 -60.46175,54.29315 0,0 -2.8219,-41.70118 13.7732,-68.60732 16.63935,-26.97787 79.65297,-81.61527 99.55313,-111.70342 19.90015,-30.08814 33.61256,-66.00902 42.13542,-92.51794 8.52286,-26.50892 15.80094,-77.09954 15.80094,-77.09954"
+ id="path8624-61"
+ sodipodi:nodetypes="czzzzzzczczczczzzc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8818-1"
+ x="-0.34381232"
+ width="1.6876246"
+ y="-0.18433961"
+ height="1.3686792">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="34.542167"
+ id="feGaussianBlur8820-5" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8906-9">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path8908-8"
+ sodipodi:nodetypes="cscccccccccccc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3602-4">
+ <path
+ sodipodi:nodetypes="czzzzzzczczczczzzc"
+ id="path3604-8"
+ d="m 647.61204,540.04601 c 0,0 22.61947,-6.50681 35.74275,-5.87273 13.12328,0.63409 30.64158,1.93862 43.70885,12.18619 13.06727,10.24756 25.06774,27.14007 34.11239,58.36965 9.04465,31.22958 1.69832,99.25201 -6.17603,143.34735 -7.87435,44.09534 -28.2651,106.11298 -45,140 -16.7349,33.88702 -49.79771,77.4952 -60.56943,89.87616 -11.36422,13.06197 -56.20589,36.42617 -79.43057,42.26667 5.3033,-10.6066 48.89976,-50.58884 35,-60.71426 -14.01897,-10.21226 -45.76009,45.98236 -84.29315,29.03317 21.38231,-13.13212 41.7794,-51.18606 34.04061,-66.59445 -7.84025,-15.61039 -30.70493,48.75757 -93.53554,37.01288 30.05204,-27.52666 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.29315 -60.46175,54.29315 0,0 -2.8219,-41.70118 13.7732,-68.60732 16.63935,-26.97787 79.65297,-81.61527 99.55313,-111.70342 19.90015,-30.08814 33.61256,-66.00902 42.13542,-92.51794 8.52286,-26.50892 15.80094,-77.09954 15.80094,-77.09954"
+ style="display:inline;opacity:1;fill:#202020;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter3587-1"
+ x="-0.1">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="8.881432"
+ id="feGaussianBlur3589-0" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3992-4">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
+ id="path3994-4"
+ sodipodi:nodetypes="czzzzzzzzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter3779-4"
+ x="-0.087980822"
+ width="1.1759616"
+ y="-0.17728332"
+ height="1.3545666">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="16.340344"
+ id="feGaussianBlur3781-4" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3986-7">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
+ id="path3988-6"
+ sodipodi:nodetypes="czzzzzzzzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3722-3">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
+ id="path3724-1"
+ sodipodi:nodetypes="czzzzzzzzcc"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8225-7">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="10.661912"
+ id="feGaussianBlur8227-5" />
+ </filter>
+ <mask
+ maskUnits="userSpaceOnUse"
+ id="mask7704-9">
+ <path
+ style="fill:url(#linearGradient7708);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 718.40812,-224.31217 33.25,56 275.99998,-24 159.5,-48.25 -66.5,-82.75 -402.24998,99 z"
+ id="path7706-6"
+ inkscape:connector-curvature="0" />
+ </mask>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath7421-7">
+ <path
+ sodipodi:type="inkscape:offset"
+ inkscape:radius="0"
+ inkscape:original="M 1111.4062 -285.9375 L 1107.4688 -284.0625 C 1107.4283 -284.05228 1107.3692 -284.04201 1107.3438 -284.03125 C 1106.925 -283.8184 1107.1791 -283.93067 1106.6875 -283.71875 C 1106.2014 -283.50919 1104.9499 -283.13456 1102.5938 -282.25 C 1099.2626 -280.99942 1096.7895 -280.10016 1095.5938 -279.1875 C 1094.0576 -279.16623 1091.8733 -278.95419 1089.9375 -278.46875 C 1086.956 -277.72108 1085.0823 -277.29474 1083.1875 -276.875 C 1081.2927 -276.45527 1081.512 -276.23281 1080.3125 -276 C 1079.0159 -275.74833 1078.5911 -276.00899 1074.875 -275.21875 C 1071.3851 -274.4766 1065.9802 -273.28768 1064.7188 -272.53125 C 1063.1348 -272.71203 1060.8513 -272.85303 1058.875 -272.5625 C 1055.8346 -272.11554 1053.9588 -271.88974 1052.0312 -271.65625 C 1051.3758 -271.57687 1050.9902 -271.45547 1050.6875 -271.375 C 1050.2613 -271.24334 1050.0017 -271.11498 1049.3125 -271.03125 C 1048.0009 -270.87188 1047.5503 -271.18808 1043.7812 -270.75 C 1040.2273 -270.33691 1034.7758 -269.47718 1033.5312 -268.8125 C 1031.9322 -269.10979 1029.6735 -269.34669 1027.6875 -269.15625 C 1024.6287 -268.86293 1022.7155 -268.67226 1020.7812 -268.5 C 1018.847 -268.32773 1019.0926 -268.07763 1017.875 -267.96875 C 1016.5588 -267.85105 1016.1152 -268.13238 1012.3438 -267.71875 C 1008.8017 -267.3303 1003.3359 -266.50948 1002.0625 -265.84375 C 1000.4636 -266.13844 998.1753 -266.35076 996.1875 -266.15625 C 993.12921 -265.857 991.2463 -265.67601 989.3125 -265.5 C 988.65501 -265.44015 988.27245 -265.32144 987.96875 -265.25 C 987.54105 -265.13104 987.28525 -265.03193 986.59375 -264.96875 C 985.27775 -264.84849 984.834 -265.16363 981.0625 -264.75 C 977.50631 -264.35998 972.0569 -263.51084 970.8125 -262.84375 C 969.21381 -263.13793 966.95265 -263.36747 964.96875 -263.15625 C 961.91305 -262.83092 959.9947 -262.63001 958.0625 -262.4375 C 956.13031 -262.24499 956.37275 -261.99662 955.15625 -261.875 C 953.84137 -261.74353 953.3932 -262.03954 949.625 -261.59375 C 946.08611 -261.17509 940.6473 -260.30158 939.375 -259.625 C 937.77741 -259.90604 935.51505 -260.04543 933.53125 -259.8125 C 930.47927 -259.45413 928.58625 -259.24464 926.65625 -259.03125 C 926.00007 -258.95869 925.6156 -258.85856 925.3125 -258.78125 C 924.88571 -258.65402 924.6276 -258.51405 923.9375 -258.4375 C 922.62411 -258.29181 922.17015 -258.61152 918.40625 -258.125 C 914.85737 -257.66624 909.4276 -256.70598 908.1875 -256 C 906.59441 -256.24424 904.3537 -256.38135 902.375 -256.125 C 899.32741 -255.73018 897.4243 -255.47655 895.5 -255.21875 C 893.57571 -254.96096 893.7739 -254.72522 892.5625 -254.5625 C 891.25301 -254.3866 890.8153 -254.66688 887.0625 -254.09375 C 883.53821 -253.55551 878.1393 -252.39458 876.875 -251.65625 C 875.28751 -251.85979 873.0295 -251.91098 871.0625 -251.5625 C 868.03631 -251.02638 866.1636 -250.70081 864.25 -250.375 C 863.59941 -250.26423 863.2363 -250.10406 862.9375 -250 C 862.51681 -249.83512 862.27405 -249.6687 861.59375 -249.53125 C 860.29905 -249.26966 859.86665 -249.53745 856.15625 -248.71875 C 852.65777 -247.9468 847.31035 -246.33582 846.09375 -245.5 C 844.53085 -245.57745 842.33625 -245.41472 840.40625 -244.90625 C 837.43387 -244.12312 835.58855 -243.67416 833.71875 -243.15625 C 831.84875 -242.63835 832.0521 -242.38897 830.875 -242.0625 C 829.60251 -241.7096 829.17795 -241.95541 825.53125 -240.875 C 822.10657 -239.86037 816.88185 -237.94183 815.65625 -237.03125 C 814.11747 -237.01851 811.93645 -236.75903 810.03125 -236.15625 C 807.10027 -235.22891 805.2809 -234.69783 803.4375 -234.09375 C 802.81071 -233.88837 802.44585 -233.70117 802.15625 -233.5625 C 801.74867 -233.34889 801.50295 -233.15375 800.84375 -232.9375 C 799.58925 -232.52596 799.1576 -232.74846 795.5625 -231.5 C 792.17261 -230.32283 786.96755 -228.2863 785.78125 -227.34375 C 784.25737 -227.28408 782.1312 -226.94888 780.25 -226.28125 C 777.35261 -225.25296 775.55095 -224.60577 773.71875 -223.96875 C 771.88655 -223.33174 772.0909 -223.12021 770.9375 -222.71875 C 769.69071 -222.28479 769.27395 -222.51903 765.71875 -221.15625 C 762.38005 -219.87645 757.23165 -217.6737 756.03125 -216.6875 C 754.52407 -216.57981 752.39555 -216.1887 750.53125 -215.46875 C 747.66307 -214.36115 745.90735 -213.68719 744.09375 -213 C 743.47705 -212.76637 743.0973 -212.55797 742.8125 -212.40625 C 742.81251 -212.40625 742.8125 -212.37673 742.8125 -212.375 L 734.8125 -209.1875 L 736.625 -194.46875 C 736.36701 -194.52956 742.8125 -191.15625 742.8125 -191.15625 C 743.03891 -191.30093 743.26145 -191.42886 743.53125 -191.53125 C 744.61177 -191.94123 745.70285 -191.74702 749.53125 -193.21875 C 753.35977 -194.69049 754.7553 -195.22373 755.4375 -195.625 C 756.11711 -196.02478 757.04925 -196.50437 757.65625 -197.15625 C 759.48317 -197.294 761.22705 -197.64948 762.59375 -198.15625 C 765.56175 -199.25677 767.4691 -199.96244 769.375 -200.625 C 771.28081 -201.28754 771.72915 -202.03987 772.78125 -202.40625 C 773.87287 -202.78636 774.97635 -202.57163 778.84375 -203.9375 C 782.71115 -205.30336 784.1269 -205.76458 784.8125 -206.15625 C 785.51361 -206.55677 786.5133 -207.08923 787.125 -207.75 C 789.09581 -207.80466 790.94195 -208.13463 792.40625 -208.625 C 795.40777 -209.63008 797.3324 -210.24671 799.25 -210.875 C 800.78861 -211.3791 801.42415 -211.92177 802.15625 -212.3125 C 802.38647 -212.44681 802.63215 -212.56623 802.90625 -212.65625 C 804.00457 -213.01673 805.0877 -212.73762 809 -213.96875 C 812.91231 -215.19988 814.366 -215.6417 815.0625 -216 C 815.75641 -216.35697 816.6926 -216.79261 817.3125 -217.40625 C 819.17771 -217.42891 820.94835 -217.67308 822.34375 -218.09375 C 825.37415 -219.00729 827.33615 -219.52385 829.28125 -220.0625 C 831.22637 -220.60114 831.70745 -221.32702 832.78125 -221.625 C 833.89527 -221.93415 835.00125 -221.61761 838.96875 -222.65625 C 842.93625 -223.69488 844.38625 -224.08898 845.09375 -224.40625 C 845.82855 -224.73584 846.90765 -225.15997 847.53125 -225.78125 C 849.52907 -225.66525 851.3887 -225.80134 852.875 -226.15625 C 855.95311 -226.89125 857.9584 -227.25719 859.9375 -227.65625 C 861.52541 -227.97643 862.1818 -228.4468 862.9375 -228.75 C 863.17501 -228.8568 863.4044 -228.94276 863.6875 -229 C 864.82091 -229.22919 865.99215 -228.79107 870.03125 -229.5 C 874.07067 -230.20893 875.5315 -230.42709 876.25 -230.6875 C 876.96581 -230.94694 877.95435 -231.25474 878.59375 -231.78125 C 880.51795 -231.54176 882.34165 -231.55672 883.78125 -231.78125 C 886.90767 -232.26887 888.9358 -232.48192 890.9375 -232.75 C 892.93921 -233.01807 893.42625 -233.69514 894.53125 -233.84375 C 895.67767 -233.99793 896.8071 -233.54218 900.875 -234.0625 C 904.94281 -234.58282 906.43525 -234.75823 907.15625 -235 C 907.89337 -235.24714 908.95435 -235.58623 909.59375 -236.125 C 911.64375 -235.78947 913.56745 -235.72704 915.09375 -235.90625 C 918.23595 -236.27521 920.27375 -236.46561 922.28125 -236.6875 C 923.89207 -236.86552 924.5459 -237.2957 925.3125 -237.53125 C 925.55341 -237.61677 925.80655 -237.68685 926.09375 -237.71875 C 927.24345 -237.84647 928.39505 -237.3721 932.46875 -237.84375 C 936.54245 -238.3154 938.0278 -238.45435 938.75 -238.6875 C 939.46941 -238.91977 940.45025 -239.16096 941.09375 -239.65625 C 943.03005 -239.32279 944.8638 -239.25201 946.3125 -239.40625 C 949.45851 -239.7412 951.49 -239.92484 953.5 -240.125 C 955.50991 -240.32514 955.98415 -240.95139 957.09375 -241.0625 C 958.24485 -241.17778 959.39025 -240.69744 963.46875 -241.125 C 967.54725 -241.55256 969.05765 -241.68709 969.78125 -241.90625 C 970.52047 -242.13011 971.57685 -242.4195 972.21875 -242.9375 C 974.27575 -242.53883 976.2206 -242.4441 977.75 -242.59375 C 980.89871 -242.90185 982.9258 -243.067 984.9375 -243.25 C 986.55151 -243.39682 987.20055 -243.81055 987.96875 -244.03125 C 988.21005 -244.11211 988.4623 -244.16116 988.75 -244.1875 C 989.90211 -244.29295 991.0429 -243.79475 995.125 -244.1875 C 999.20711 -244.58025 1000.7139 -244.71834 1001.4375 -244.9375 C 1002.1584 -245.15583 1003.1371 -245.3852 1003.7812 -245.875 C 1005.7193 -245.52501 1007.5501 -245.42062 1009 -245.5625 C 1012.1487 -245.8706 1014.1758 -246.03575 1016.1875 -246.21875 C 1018.1991 -246.40174 1018.7017 -247.05677 1019.8125 -247.15625 C 1020.9648 -247.25948 1022.1047 -246.77142 1026.1875 -247.15625 C 1030.2704 -247.54107 1031.7762 -247.65725 1032.5 -247.875 C 1033.2393 -248.09743 1034.2956 -248.38949 1034.9375 -248.90625 C 1036.9949 -248.50448 1038.9404 -248.40292 1040.4688 -248.5625 C 1043.6153 -248.89102 1045.6458 -249.0852 1047.6562 -249.28125 C 1049.2692 -249.43854 1049.9219 -249.91273 1050.6875 -250.15625 C 1050.9282 -250.24429 1051.1507 -250.27762 1051.4375 -250.3125 C 1052.5858 -250.4522 1053.7542 -249.97259 1057.8125 -250.5625 C 1061.8708 -251.15242 1063.3743 -251.33964 1064.0938 -251.59375 C 1064.8104 -251.84691 1065.7684 -252.15182 1066.4062 -252.6875 C 1068.3259 -252.47556 1070.1262 -252.53609 1071.5625 -252.78125 C 1074.6816 -253.31365 1076.6741 -253.70986 1078.6562 -254.09375 C 1080.6383 -254.47762 1081.1305 -255.1334 1082.2188 -255.375 C 1083.3475 -255.62566 1084.489 -255.25871 1088.4688 -256.25 C 1092.4483 -257.24127 1093.8983 -257.6693 1094.5938 -258.03125 C 1095.316 -258.40725 1096.3555 -258.90183 1096.9688 -259.5625 C 1098.9317 -259.57454 1100.7625 -259.85355 1102.1875 -260.40625 C 1105.1387 -261.55085 1107.0607 -262.27567 1108.875 -263.15625 C 1110.3307 -263.86277 1111.1941 -264.85828 1111.4062 -265.15625 C 1111.6185 -265.4542 1111.5051 -265.8848 1111.5312 -265.90625 C 1111.5742 -265.94148 1111.8716 -266.00028 1112.0312 -266.34375 C 1112.8902 -268.19082 1114.3544 -271.97139 1114.4688 -272.65625 C 1114.5825 -273.33839 1114.6368 -274.00902 1114.6875 -274.40625 C 1114.7169 -274.63575 1114.5404 -275.28515 1114.5625 -275.34375 C 1114.5934 -275.42579 1114.8508 -275.59432 1114.9062 -275.84375 C 1115.1725 -277.04206 1114.9953 -278.05111 1114.7812 -279.46875 C 1114.5673 -280.88638 1113.8096 -284.08338 1113.1562 -284.9375 C 1112.4973 -285.79922 1111.9314 -285.94801 1111.4062 -285.9375 z "
+ style="display:inline;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ id="path7423-8"
+ d="m 1111.4062,-285.9375 -3.9374,1.875 c -0.041,0.0102 -0.1,0.0205 -0.125,0.0312 -0.4188,0.21285 -0.1647,0.10058 -0.6563,0.3125 -0.4861,0.20956 -1.7376,0.58419 -4.0937,1.46875 -3.3312,1.25058 -5.8043,2.14984 -7,3.0625 -1.5362,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74767 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41973 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25167 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74215 -8.8948,1.93107 -10.1562,2.6875 -1.584,-0.18078 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44696 -4.9162,0.67276 -6.8438,0.90625 -0.6554,0.0794 -1.041,0.20078 -1.3437,0.28125 -0.4262,0.13166 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15937 -1.7622,-0.15683 -5.5313,0.28125 -3.5539,0.41309 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.29729 -3.8577,-0.53419 -5.8437,-0.34375 -3.0588,0.29332 -4.972,0.48399 -6.9063,0.65625 -1.9342,0.17227 -1.6886,0.42237 -2.9062,0.53125 -1.3162,0.1177 -1.7598,-0.16363 -5.5312,0.25 -3.5421,0.38845 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.29469 -3.8872,-0.50701 -5.875,-0.3125 -3.05829,0.29925 -4.9412,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04005,0.17856 -1.34375,0.25 -0.4277,0.11896 -0.6835,0.21807 -1.375,0.28125 -1.316,0.12026 -1.75975,-0.19488 -5.53125,0.21875 -3.55619,0.39002 -9.0056,1.23916 -10.25,1.90625 -1.59869,-0.29418 -3.85985,-0.52372 -5.84375,-0.3125 -3.0557,0.32533 -4.97405,0.52624 -6.90625,0.71875 -1.93219,0.19251 -1.68975,0.44088 -2.90625,0.5625 -1.31488,0.13147 -1.76305,-0.16454 -5.53125,0.28125 -3.53889,0.41866 -8.9777,1.29217 -10.25,1.96875 -1.59759,-0.28104 -3.85995,-0.42043 -5.84375,-0.1875 -3.05198,0.35837 -4.945,0.56786 -6.875,0.78125 -0.65618,0.0726 -1.04065,0.17269 -1.34375,0.25 -0.42679,0.12723 -0.6849,0.2672 -1.375,0.34375 -1.31339,0.14569 -1.76735,-0.17402 -5.53125,0.3125 -3.54888,0.45876 -8.97865,1.41902 -10.21875,2.125 -1.59309,-0.24424 -3.8338,-0.38135 -5.8125,-0.125 -3.04759,0.39482 -4.9507,0.64845 -6.875,0.90625 -1.92429,0.25779 -1.7261,0.49353 -2.9375,0.65625 -1.30949,0.1759 -1.7472,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.9232,1.69917 -10.1875,2.4375 -1.58749,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02619,0.53612 -4.8989,0.86169 -6.8125,1.1875 -0.65059,0.11077 -1.0137,0.27094 -1.3125,0.375 -0.42069,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.2947,0.26159 -1.7271,-0.006 -5.4375,0.8125 -3.49848,0.77195 -8.8459,2.38293 -10.0625,3.21875 -1.5629,-0.0774 -3.7575,0.0853 -5.6875,0.59375 -2.97238,0.78313 -4.8177,1.23209 -6.6875,1.75 -1.87,0.5179 -1.66665,0.76728 -2.84375,1.09375 -1.27249,0.3529 -1.69705,0.10709 -5.34375,1.1875 -3.42468,1.01463 -8.6494,2.93317 -9.875,3.84375 -1.53878,0.0127 -3.7198,0.27222 -5.625,0.875 -2.93098,0.92734 -4.75035,1.45842 -6.59375,2.0625 -0.62679,0.20538 -0.99165,0.39258 -1.28125,0.53125 -0.40758,0.21361 -0.6533,0.40875 -1.3125,0.625 -1.2545,0.41154 -1.68615,0.18904 -5.28125,1.4375 -3.38989,1.17717 -8.59495,3.2137 -9.78125,4.15625 -1.52388,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69905,1.67548 -6.53125,2.3125 -1.8322,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24679,0.43396 -1.66355,0.19972 -5.21875,1.5625 -3.3387,1.2798 -8.4871,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.6357,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6239,1.78156 -6.4375,2.46875 -0.6167,0.23363 -0.99645,0.44203 -1.28125,0.59375 10e-6,0 0,0.0295 0,0.0312 l -8,3.1875 1.8125,14.71875 c -0.25799,-0.0608 6.1875,3.3125 6.1875,3.3125 0.22641,-0.14468 0.44895,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.1716,-0.21577 6,-1.6875 3.82852,-1.47174 5.22405,-2.00498 5.90625,-2.40625 0.67961,-0.39978 1.61175,-0.87937 2.21875,-1.53125 1.82692,-0.13775 3.5708,-0.49323 4.9375,-1 2.968,-1.10052 4.87535,-1.80619 6.78125,-2.46875 1.90581,-0.66254 2.35415,-1.41487 3.40625,-1.78125 1.09162,-0.38011 2.1951,-0.16538 6.0625,-1.53125 3.8674,-1.36586 5.28315,-1.82708 5.96875,-2.21875 0.70111,-0.40052 1.7008,-0.93298 2.3125,-1.59375 1.97081,-0.0547 3.81695,-0.38463 5.28125,-0.875 3.00152,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.53861,-0.5041 2.17415,-1.04677 2.90625,-1.4375 0.23022,-0.13431 0.4759,-0.25373 0.75,-0.34375 1.09832,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91231,-1.23113 5.366,-1.67295 6.0625,-2.03125 0.69391,-0.35697 1.6301,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63585,-0.26683 5.03125,-0.6875 3.0304,-0.91354 4.9924,-1.4301 6.9375,-1.96875 1.94512,-0.53864 2.4262,-1.26452 3.5,-1.5625 1.11402,-0.30915 2.22,0.007 6.1875,-1.03125 3.9675,-1.03863 5.4175,-1.43273 6.125,-1.75 0.7348,-0.32959 1.8139,-0.75372 2.4375,-1.375 1.99782,0.116 3.85745,-0.0201 5.34375,-0.375 3.07811,-0.735 5.0834,-1.10094 7.0625,-1.5 1.58791,-0.32018 2.2443,-0.79055 3,-1.09375 0.23751,-0.1068 0.4669,-0.19276 0.75,-0.25 1.13341,-0.22919 2.30465,0.20893 6.34375,-0.5 4.03942,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71581,-0.25944 1.70435,-0.56724 2.34375,-1.09375 1.9242,0.23949 3.7479,0.22453 5.1875,0 3.12642,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48875,-0.94514 3.59375,-1.09375 1.14642,-0.15418 2.27585,0.30157 6.34375,-0.21875 4.06781,-0.52032 5.56025,-0.69573 6.28125,-0.9375 0.73712,-0.24714 1.7981,-0.58623 2.4375,-1.125 2.05,0.33553 3.9737,0.39796 5.5,0.21875 3.1422,-0.36896 5.18,-0.55936 7.1875,-0.78125 1.61082,-0.17802 2.26465,-0.6082 3.03125,-0.84375 0.24091,-0.0855 0.49405,-0.1556 0.78125,-0.1875 1.1497,-0.12772 2.3013,0.34665 6.375,-0.125 4.0737,-0.47165 5.55905,-0.6106 6.28125,-0.84375 0.71941,-0.23227 1.70025,-0.47346 2.34375,-0.96875 1.9363,0.33346 3.77005,0.40424 5.21875,0.25 3.14601,-0.33495 5.1775,-0.51859 7.1875,-0.71875 2.00991,-0.20014 2.48415,-0.82639 3.59375,-0.9375 1.1511,-0.11528 2.2965,0.36506 6.375,-0.0625 4.0785,-0.42756 5.5889,-0.56209 6.3125,-0.78125 0.73922,-0.22386 1.7956,-0.51325 2.4375,-1.03125 2.057,0.39867 4.00185,0.4934 5.53125,0.34375 3.14871,-0.3081 5.1758,-0.47325 7.1875,-0.65625 1.61401,-0.14682 2.26305,-0.56055 3.03125,-0.78125 0.2413,-0.0809 0.49355,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.2929,0.39275 6.375,0 4.08211,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6996,-0.4477 2.3437,-0.9375 1.9381,0.34999 3.7689,0.45438 5.2188,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1465,-0.32852 5.177,-0.5227 7.1874,-0.71875 1.613,-0.15729 2.2657,-0.63148 3.0313,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7166,-0.25316 1.6746,-0.55807 2.3124,-1.09375 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99127 5.4295,-1.4193 6.125,-1.78125 0.7222,-0.376 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.1446 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70652 2.3191,-1.70203 2.5312,-2 0.2123,-0.29795 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3404,-0.094 0.5,-0.4375 0.859,-1.84707 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68214 0.168,-1.35277 0.2187,-1.75 0.029,-0.2295 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19831 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41763 -0.9716,-4.61463 -1.625,-5.46875 -0.6589,-0.86172 -1.2248,-1.01051 -1.75,-1 z"
+ transform="translate(0.08004571,-0.03125)" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter7001-5">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur7003-7" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6949-4"
+ x="-0.10294895"
+ width="1.2058979"
+ y="-0.34224695"
+ height="1.6844939">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6951-1" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6961-8"
+ x="-0.09919104"
+ width="1.1983821"
+ y="-0.22643611"
+ height="1.4528722">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6963-5" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6957-9"
+ x="-0.098213427"
+ width="1.1964267"
+ y="-0.19838208"
+ height="1.3967642">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6959-7" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6997-5">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6999-3" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6953-8"
+ x="-0.098320946"
+ width="1.1966419"
+ y="-0.19750816"
+ height="1.3950163">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6955-8" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6993-3"
+ x="-0.098213255"
+ width="1.1964265"
+ y="-0.19838208"
+ height="1.3967642">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6995-1" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6989-8"
+ x="-0.098428868"
+ width="1.1968577"
+ y="-0.20287035"
+ height="1.4057407">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6991-9" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6985-6"
+ x="-0.098428868"
+ width="1.1968577"
+ y="-0.20853186"
+ height="1.4170637">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6987-4" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6965-3"
+ x="-0.099081434"
+ width="1.1981629"
+ y="-0.22529824"
+ height="1.4505965">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6967-3" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6981-3"
+ x="-0.10052545"
+ width="1.2010509"
+ y="-0.2742162"
+ height="1.5484324">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6983-8" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6977-6"
+ x="-0.10224481"
+ width="1.2044896"
+ y="-0.32371372"
+ height="1.6474274">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6979-0" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6973-4"
+ x="-0.10330495"
+ width="1.2066098"
+ y="-0.36439717"
+ height="1.7287945">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6975-8" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter6969-8"
+ x="-0.10450897"
+ width="1.2090179"
+ y="-0.40468886"
+ height="1.8093777">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.1675612"
+ id="feGaussianBlur6971-8" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7345-9">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="1.7233839"
+ id="feGaussianBlur7347-7" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7333-7">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7335-6" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7285-4"
+ x="-0.030884685"
+ width="1.0617694"
+ y="-0.10267408"
+ height="1.2053483">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7287-3" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7289-0">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7291-3" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7293-0">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7295-9" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7337-2">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7339-5" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7297-4">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7299-0" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7301-5">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7303-9" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7305-4">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7307-6" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7309-9">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7311-2" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7313-2">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7315-4" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7317-7">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7319-7" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7321-5">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7323-4" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7329-8"
+ x="-0.030991485"
+ width="1.061983"
+ y="-0.10931916"
+ height="1.2186383">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7331-1" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter7325-2"
+ x="-0.031352691"
+ width="1.0627054"
+ y="-0.12140666"
+ height="1.2428133">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.35026836"
+ id="feGaussianBlur7327-8" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter9048-9"
+ x="-0.40879121"
+ width="1.8175824"
+ y="-0.71538466"
+ height="2.4307692">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="21.92031"
+ id="feGaussianBlur9050-3" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3631-6">
+ <path
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ id="path3633-8"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3677-0">
+ <path
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 586.13271,997.98981 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.9123 -3.78268,51.8008 -2.90046,70.6561 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.0839 38.76107,-114.49733 44.6608,-149.76855 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ id="path3679-2"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter3898-1">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="10.892985"
+ id="feGaussianBlur3900-0" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3622-5">
+ <path
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 266.27183,924.57186 c -1.40727,18.80121 -1.1449,32.75103 2.08174,49.30328 3.22665,16.55234 16.40608,45.90736 20.3344,63.18376 3.92622,17.2671 2.69413,38.3096 -12.45944,51.1482 -15.31761,12.9774 -42.05127,21.5989 -67.8323,15.7338 -25.78106,-5.8653 -69.54907,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045183,939.86194 41.867508,909.43681 27.689836,879.01169 29.207903,872.71824 33.747793,863.90708 24.381071,839.38658 21.334081,813.84027 0.03533552,788.33044 30.360815,791.44488 43.915625,815.28677 60.161025,835.47019 54.631129,787.39416 42.10631,771.05369 31.787073,744.74589 c 29.994295,6.08166 50.57936,31.8724 63.979783,72.7125 9.554154,-3.91791 18.237764,-9.37294 30.187414,-9.0612 -11.2975,-41.6958 -17.94946,-69.91584 -36.687255,-101.06994 53.441965,5.67033 83.657025,80.63932 78.971425,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24896,-38.34702 -21.04781,-76.8679 -3.65971,-118.64818 0,0 48.28678,65.43687 54.38966,85.80577 6.10287,20.36891 1.51881,38.70052 1.51881,38.70052 0,0 16.95957,31.08529 20.29392,51.09413 3.3731,20.24135 -3.53269,59.10332 -4.94582,77.98324 z"
+ id="path3624-1"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter9024-1"
+ x="-0.55453134"
+ width="2.1090627"
+ y="-0.51434779"
+ height="2.0286956">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="20.912684"
+ id="feGaussianBlur9026-0" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter9020-8"
+ x="-0.32861114"
+ width="1.6572223"
+ y="-0.182"
+ height="1.364">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="20.912684"
+ id="feGaussianBlur9022-5" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter9044-0"
+ x="-0.32631579"
+ width="1.6526316"
+ y="-0.84545463"
+ height="2.6909094">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="21.92031"
+ id="feGaussianBlur9046-6" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath4177-4">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path4179-6"
+ d="m 586.13271,997.98981 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.9123 -3.78268,51.8008 -2.90046,70.6561 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.0839 38.76107,-114.49733 44.6608,-149.76855 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter4105-2">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="3.8640966"
+ id="feGaussianBlur4107-5" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4130-8"
+ x="-0.49509686"
+ width="1.9901937"
+ y="-0.26708817"
+ height="1.5341763">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="10.730622"
+ id="feGaussianBlur4132-6" />
+ </filter>
+ <filter
+ inkscape:collect="always"
+ id="filter4141-2"
+ x="-0.40611032"
+ width="1.8122206"
+ y="-0.30260596"
+ height="1.6052119">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="9.8586086"
+ id="feGaussianBlur4143-8" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8338-4">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path8340-7"
+ d="m 266.27183,924.57185 c -1.40727,18.80122 -1.1449,32.75104 2.08174,49.30328 3.22666,16.55238 16.40609,45.90737 20.33441,63.18377 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1482 -15.31761,12.9775 -42.05127,21.599 -67.8323,15.7338 -25.78106,-5.8653 -69.54908,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045182,939.86193 41.867507,909.4368 27.689835,879.01168 29.207902,872.71823 33.747792,863.90708 24.38107,839.38658 21.33408,813.84026 0.03533448,788.33044 30.360814,791.44487 43.915624,815.28676 60.161024,835.47019 54.631128,787.39416 42.106309,771.05368 31.787072,744.74589 c 29.994295,6.08165 50.57936,31.87239 63.979783,72.7125 9.554155,-3.91792 18.237765,-9.37294 30.187415,-9.0612 -11.2975,-41.6958 -17.94947,-69.91585 -36.687256,-101.06994 53.441966,5.67032 83.657026,80.63932 78.971426,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34703 -21.04782,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38965,85.80578 6.10288,20.3689 1.51882,38.70051 1.51882,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24134 -3.53269,59.10331 -4.94582,77.98323 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8333-2">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="7.18"
+ id="feGaussianBlur8335-4" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8359-0">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path8361-6"
+ d="m 266.27183,924.57185 c -1.40727,18.80122 -1.1449,32.75104 2.08174,49.30328 3.22666,16.55238 16.40609,45.90737 20.33441,63.18377 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1482 -15.31761,12.9775 -42.05127,21.599 -67.8323,15.7338 -25.78106,-5.8653 -69.54908,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045182,939.86193 41.867507,909.4368 27.689835,879.01168 29.207902,872.71823 33.747792,863.90708 24.38107,839.38658 21.33408,813.84026 0.03533448,788.33044 30.360814,791.44487 43.915624,815.28676 60.161024,835.47019 54.631128,787.39416 42.106309,771.05368 31.787072,744.74589 c 29.994295,6.08165 50.57936,31.87239 63.979783,72.7125 9.554155,-3.91792 18.237765,-9.37294 30.187415,-9.0612 -11.2975,-41.6958 -17.94947,-69.91585 -36.687256,-101.06994 53.441966,5.67032 83.657026,80.63932 78.971426,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34703 -21.04782,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38965,85.80578 6.10288,20.3689 1.51882,38.70051 1.51882,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24134 -3.53269,59.10331 -4.94582,77.98323 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8354-2">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="6.82"
+ id="feGaussianBlur8356-9" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath3636-90">
+ <path
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ id="path3638-8"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter4185-1">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="3.6164709"
+ id="feGaussianBlur4187-3" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8392-1">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path8394-1"
+ d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8379-0"
+ x="-0.14413793"
+ width="1.288276"
+ y="-0.10278689"
+ height="1.2055738">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="7.389266"
+ id="feGaussianBlur8381-3" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath8417-4">
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path8419-03"
+ d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter8404-9"
+ x="-0.090268657"
+ width="1.1805373"
+ y="-0.10250848"
+ height="1.205017">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="5.3457272"
+ id="feGaussianBlur8406-1" />
+ </filter>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient2843"
+ id="linearGradient6951"
+ gradientUnits="userSpaceOnUse"
+ x1="347.89655"
+ y1="1070.2124"
+ x2="275.58191"
+ y2="867.97992" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient8964"
+ id="linearGradient6953"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(450.03125,73.843964)"
+ x1="603.84064"
+ y1="627.85303"
+ x2="616.24396"
+ y2="585.42664" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient8952"
+ id="linearGradient6955"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(450.03125,73.843964)"
+ x1="609.31244"
+ y1="239.46866"
+ x2="560.83142"
+ y2="262.86206" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3961"
+ id="linearGradient6957"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="translate(450.03125,73.843964)"
+ x1="398.21429"
+ y1="343.52289"
+ x2="379.28571"
+ y2="265.30862" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4084"
+ id="linearGradient6959"
+ gradientUnits="userSpaceOnUse"
+ x1="182.35046"
+ y1="256.11136"
+ x2="145.53348"
+ y2="542.20502" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4084"
+ id="linearGradient6961"
+ gradientUnits="userSpaceOnUse"
+ x1="182.35046"
+ y1="256.11136"
+ x2="145.53348"
+ y2="542.20502" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient6963"
+ gradientUnits="userSpaceOnUse"
+ x1="412.08926"
+ y1="404.91574"
+ x2="417.375"
+ y2="401.82648" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient6965"
+ gradientUnits="userSpaceOnUse"
+ x1="411.91071"
+ y1="404.91577"
+ x2="417.375"
+ y2="401.82648" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient6967"
+ gradientUnits="userSpaceOnUse"
+ x1="411.91071"
+ y1="405.54077"
+ x2="417.375"
+ y2="401.82648" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient6969"
+ gradientUnits="userSpaceOnUse"
+ x1="412.08926"
+ y1="405.54077"
+ x2="417.375"
+ y2="401.82648" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4478"
+ id="linearGradient6971"
+ gradientUnits="userSpaceOnUse"
+ x1="411.73212"
+ y1="405.54077"
+ x2="417.375"
+ y2="401.82648" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3660"
+ id="linearGradient6973"
+ gradientUnits="userSpaceOnUse"
+ x1="1255.7386"
+ y1="667.09216"
+ x2="893.69995"
+ y2="858.01099" />
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath6975">
+ <rect
+ id="rect6977"
+ width="440"
+ height="376"
+ x="547.99994"
+ y="205.32277"
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:#f8d615;stroke-width:18;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;enable-background:new" />
+ </clipPath>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-4"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path7188-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#f8d615;fill-opacity:1;fill-rule:evenodd;stroke:#f8d615;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ </defs>
+ <metadata
+ id="metadata7">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:groupmode="layer"
+ id="layer1"
+ inkscape:label="Shadow"
+ transform="translate(48.571445,195.53053)" />
+ <g
+ inkscape:groupmode="layer"
+ id="layer20"
+ inkscape:label="New Ear"
+ transform="translate(48.571445,195.53053)" />
+ <g
+ inkscape:groupmode="layer"
+ id="layer21"
+ inkscape:label="Rendered2"
+ style="display:inline"
+ transform="translate(48.571445,195.53053)" />
+ <g
+ inkscape:groupmode="layer"
+ id="layer15"
+ inkscape:label="Feet"
+ style="display:inline"
+ transform="translate(48.571445,195.53053)" />
+ <g
+ inkscape:groupmode="layer"
+ id="layer16"
+ inkscape:label="Left Foot"
+ style="display:inline"
+ transform="translate(48.571445,195.53053)">
+ <rect
+ style="display:inline;opacity:1;fill:#a8a8a8;fill-opacity:1;stroke:#000000;stroke-width:20.89992332;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;enable-background:new"
+ id="rect6676-3-7-5"
+ width="1876.7191"
+ height="1562.9667"
+ x="-38.121483"
+ y="-86.153076" />
+ <rect
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:#000000;stroke-width:20.92477036;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;enable-background:new"
+ id="rect6676-3-7"
+ width="1878.7875"
+ height="1564.9603"
+ x="2288.5129"
+ y="-84.10511" />
+ <rect
+ style="display:inline;opacity:1;fill:#a8a8a8;fill-opacity:1;stroke:#f83615;stroke-width:20.39127541;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;enable-background:new"
+ id="rect6676-3"
+ width="1833.4282"
+ height="1522.9458"
+ x="2309.7717"
+ y="-62.567806" />
+ <g
+ id="g4303">
+ <path
+ inkscape:export-ydpi="142.10527"
+ inkscape:export-xdpi="142.10527"
+ inkscape:export-filename="/home/cheeseness/Documents/LCA09/mascot/tuz_new.png"
+ transform="matrix(10.726753,0,0,10.726753,-2882.1235,-4565.4583)"
+ sodipodi:nodetypes="cccccccccsccccccccccc"
+ id="path10326"
+ d="m 304.64285,526.6479 c -10,0.35715 -18.21428,2.85714 -18.21428,2.85714 l 7.5,6.07143 10.35714,3.57143 16.07143,0.35714 22.5,-5.35714 7.85714,1.07143 20.35715,-2.14286 -10.35715,6.78572 c 5.45923,-1.02361 17.39329,3.56911 9.64286,5.35714 -1.74,0.40142 13.92857,-4.64285 13.92857,-4.64285 l 2.5,-4.64287 3.57143,-9.28571 11.42857,0 18.21428,-4.64286 3.57144,-4.99999 -16.07144,1.07142 -12.14285,2.14286 -14.64286,-5 -70.6921,16.70774 -5.37933,-5.27917 z"
+ style="display:inline;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter11361);enable-background:new"
+ inkscape:connector-curvature="0" />
+ <g
+ transform="matrix(0.71084,-0.1937433,0.262963,0.9648058,503.68027,136.48399)"
+ id="g7882"
+ style="display:inline;opacity:1;enable-background:new">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient7904);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 245.12255,100.05344 c 0,0 -47.12811,-31.646921 -67.21465,-35.800939 -20.03792,-4.143963 -38.4729,-3.317578 -51.93364,13.607323 -13.46074,16.924901 -12.07739,61.265196 -13.53554,86.969546 -1.45815,25.70435 2.54945,70.17701 17.6046,88.66552 15.05516,18.4885 45.88634,13.58502 49.92695,21.4137 2.21283,4.28736 65.15228,-174.85515 65.15228,-174.85515 z"
+ id="path7876"
+ sodipodi:nodetypes="czzzzcc"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient7906);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 135.37935,82.017807 c 0,0 26.34355,1.938783 37.63307,13.903188 11.41494,12.097335 13.73457,21.331515 15.29586,37.734585 1.56337,16.42499 -0.84957,28.41812 -7.81382,36.03734 -6.96425,7.61922 -1.00429,19.58332 -25.91605,12.07107 -24.91176,-7.51225 -27.03224,-27.78298 -26.51523,-46.30475 0.51721,-18.52898 7.31617,-53.441433 7.31617,-53.441433 z"
+ id="path7878"
+ sodipodi:nodetypes="czzzzzc"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:1;fill:url(#radialGradient7908);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 135.648,81.927211 c 0,0 -4.64465,16.365075 0.58825,28.563099 5.48794,12.79254 27.22425,44.26007 27.22425,54.65565 l 22.65625,-5 c 2.54218,-6.96644 3.21052,-15.75206 2.1875,-26.5 -1.56129,-16.40307 -3.8663,-25.62141 -15.28125,-37.718749 -9.65488,-10.232047 -31.59311,-13.374857 -37.375,-14 z"
+ id="path7880"
+ sodipodi:nodetypes="czccssc"
+ inkscape:connector-curvature="0" />
+ </g>
+ <path
+ sodipodi:nodetypes="czzzcczzcc"
+ id="path7917"
+ d="m 845.03125,1154.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -57.31395,4.9661 -135.78608,17.3296 -79.85178,12.5808 -94.06436,42.5423 -108.12225,47.0643 -14.70014,4.7286 -145.37739,-65.8225 -145.37739,-65.8225 l 4.28572,-94.2857 c 0,0 85.88551,-16.2009 112.14285,-33.5714 26.25735,-17.3705 45.58238,-49.66602 59.28572,-71.42861 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ sodipodi:nodetypes="ccccc"
+ clip-path="url(#clipPath8658)"
+ id="path7919"
+ d="m 332.34019,898.38549 -32.73181,-61.29956 -37.61734,45.10646 c 2.17675,1.31711 5.77425,-20.85603 45.6004,-64.41708 l 24.74875,80.61018 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8888);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ sodipodi:nodetypes="cccccc"
+ clip-path="url(#clipPath2833)"
+ id="path7923"
+ d="m 200.81833,863.03015 146.3711,-51.61879 243.95184,226.27414 -241.83052,140.0072 -181.01934,-87.6813 32.52692,-226.98125 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient2841);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8892);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czzcczcc"
+ id="path7921"
+ d="m 642.88839,640.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57145 -62.5,123.57145 l 76.07143,18.2143 c 0,0 11.80712,-12.8234 31.07142,-46.07146 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ clip-path="url(#clipPath3665)"
+ sodipodi:nodetypes="ccccccc"
+ id="path7925"
+ d="m 430.28131,381.94122 c -7.07106,2.82843 -236.18124,32.15181 -236.18124,32.15181 l -39.63961,359.83304 90.19849,92.63961 52.3259,-114.5513 100.46804,-186.39192 32.82842,-183.68124 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.4;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8856);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czzzzcc"
+ id="path7927"
+ d="m 969.67051,1164.0346 c 0,0 23.25628,11.3937 36.06779,20.4761 12.6974,9.0015 29.4724,24.6491 41.6924,37.3605 12.3055,12.8002 20.1127,22.5987 41.5327,24.1608 21.4322,1.5629 53.2824,-8.7876 73.296,-24.6642 20.0135,-15.8766 45.6469,-69.2328 45.6469,-69.2328 l -127.1608,-143.0717"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ sodipodi:nodetypes="ccccc"
+ clip-path="url(#clipPath8642)"
+ id="path7929"
+ d="M 331.34019,641.50471 216.17367,835.36467 260.2153,925.96265 357.79603,732.21539 331.34019,641.50471 Z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8860);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <g
+ inkscape:transform-center-y="-28.255779"
+ inkscape:transform-center-x="-347.89063"
+ transform="matrix(0.9934486,0.1142802,-0.1142802,0.9934486,-9.24324,588.09054)"
+ id="g7931"
+ style="display:inline;opacity:1;enable-background:new">
+ <path
+ id="path7933"
+ d="m 1049.205,-282.26672 -0.09,0.008 c -1.3874,0.88445 -6.6033,1.6072 -6.629,9.52344 -0.024,7.42525 15.0129,17.09146 17.1563,18.09375 1.7302,0.80909 3.5916,1.40876 5.4063,1.71875 l 1.4374,0.21875 c 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99128 5.4294,-1.4193 6.125,-1.78125 0.7222,-0.37601 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3191,-1.70203 2.5312,-2 0.2123,-0.29796 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3405,-0.094 0.5,-0.4375 0.859,-1.84708 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68215 0.168,-1.35277 0.2187,-1.75 0.029,-0.22951 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19832 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41764 -0.9716,-4.61463 -1.625,-5.46875 -0.4194,-0.54857 -0.7993,-0.7925 -1.1562,-0.90625 -0.067,-0.0173 -0.1239,-0.0467 -0.1875,-0.0625 -0.021,-0.004 -0.042,0.003 -0.062,0 -0.3116,-0.0755 -0.6085,-0.15867 -1.1562,-0.21875 -0.9855,-0.10812 -2.4247,-0.2594 -3.9688,-0.25 -0.5147,0.003 -1.0371,0.0476 -1.5625,0.0937 -3.5589,0.31228 -9.0098,0.99108 -10.2187,1.625 -1.6331,-0.33402 -3.9482,-0.61223 -5.9376,-0.46875 -3.064,0.22097 -4.9677,0.34219 -6.9062,0.46875 -1.9384,0.12655 -1.6861,0.38864 -2.9062,0.46875 -1.3191,0.0866 -1.7869,-0.22325 -5.5626,0.0937 -3.5457,0.29772 -8.9806,0.99317 -10.2187,1.625 -1.6334,-0.33451 -3.9459,-0.61239 -5.9375,-0.46875 -3.0642,0.22098 -4.9678,0.37344 -6.9062,0.5 -0.6592,0.043 -1.0424,0.12393 -1.3438,0.1875 z"
+ style="display:inline;opacity:1;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <g
+ transform="matrix(0.9975712,-0.06965428,0.06965428,0.9975712,872.72062,140.02502)"
+ id="g7935"
+ style="display:inline;filter:url(#filter7610);enable-background:new"
+ clip-path="url(#clipPath7616)">
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 229.94262,-409.12268 c -3.55781,0.05 -9.0242,0.36009 -10.30334,0.90414 -1.60609,-0.44747 -3.90316,-0.88131 -5.89995,-0.87674 -3.07199,0.007 -4.96469,0.009 -6.90727,0 -0.66047,-0.003 -1.04759,0.0672 -1.35267,0.10959 0,0 0,1.09593 0,1.09593 0.11972,-0.17947 0.39252,-0.69046 0.94975,-0.76715 0.74758,-0.10289 5.16928,-0.15123 7.31019,-0.1096 1.7746,0.0345 4.45523,0.27427 6.38921,0.95895 0.3214,0.11378 0.61925,0.27378 0.89219,0.41097 1.96342,0.98693 7.94336,4.30154 7.94336,4.30154 0,0 -6.63275,-3.94768 -7.48287,-4.43853 -0.20331,-0.11739 -0.57464,-0.25769 -1.03609,-0.41098 1.22063,-0.44779 5.07597,-0.61971 7.82823,-0.71235 3.0245,-0.10182 3.34776,-0.0896 5.41069,0.19179 2.12931,0.29043 3.33851,0.60276 3.33851,0.60276 -1e-5,0 -0.0784,-0.64118 1.03609,-0.79455 0.74757,-0.10289 5.16929,-0.15123 7.31019,-0.1096 2.0695,0.0403 5.36605,0.40716 7.2814,1.36992 1.00332,0.50433 3.03564,1.56863 4.79535,2.53571 l 0.0956,-0.0194 c 0,0 -3.58034,-2.16242 -4.43047,-2.65327 -0.20331,-0.11739 -0.57463,-0.25769 -1.03609,-0.41098 1.22062,-0.44779 5.04719,-0.61971 7.79945,-0.71235 3.0245,-0.10182 3.34775,-0.0896 5.41069,0.19179 1.95316,0.2664 3.01292,0.53006 3.19461,0.57536 0,0 -0.0271,-0.31146 -0.0271,-0.31146 -0.40903,-0.13645 -0.71424,-0.23335 -1.40038,-0.35748 -1.30081,-0.23533 -3.39912,-0.60156 -5.50857,-0.56398 -3.57195,0.0636 -9.05328,0.35596 -10.30334,0.90414 -1.60583,-0.44695 -3.87662,-0.8813 -5.87117,-0.87674 -3.07199,0.007 -4.99348,0.009 -6.93605,0 -1.94256,-0.009 -1.71268,0.27907 -2.93558,0.27398 -1.32191,-0.005 -1.76612,-0.35463 -5.55459,-0.30138 0,0 0,0 0,0"
+ id="path7937"
+ sodipodi:nodetypes="ccssscsssscscsscsssccscssccsscssscc"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="cssccsscc"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 206.1989,-407.47878 c 1.92021,0.81706 4.57715,2.19283 6.15897,3.39739 1.58184,1.20456 2.90757,1.77368 5.55459,3.91795 0.88557,0.71738 1.74865,1.34985 2.59193,1.92174 l 0.54057,-0.19011 c -0.71323,-0.48339 -1.46776,-1.02031 -2.26909,-1.62203 -2.82223,-2.11921 -3.62655,-2.80973 -6.01507,-4.27414 -2.38854,-1.4644 -4.09948,-2.36576 -6.5619,-3.1508 0,0 0,0 0,0"
+ id="path7939"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="csccscc"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 237.79963,-407.47878 c 1.92021,0.81706 4.60594,2.19283 6.18775,3.39739 0.81307,0.61916 1.55849,1.07042 2.45046,1.65401 l 0.649,-0.11666 c -0.79831,-0.57637 -1.57177,-1.09435 -2.69653,-1.78394 -2.38854,-1.4644 -4.12826,-2.36576 -6.59068,-3.1508 0,0 0,0 0,0"
+ id="path7941"
+ inkscape:connector-curvature="0" />
+ </g>
+ <g
+ id="g7943"
+ clip-path="url(#clipPath7606)">
+ <path
+ sodipodi:nodetypes="czzzzzzzzzzzzzz"
+ id="path7945"
+ d="m 1056.25,-278.80481 c 4.1446,-1.47877 10,3.125 10,3.125 0.899,0.28092 2.7251,-0.89447 2.6243,-1.68614 0,0 -1.5503,-1.86062 -0.3743,-2.93886 1.176,-1.07824 5.296,1.50738 7.5,1.625 2.204,0.11762 5.5621,-0.22941 7,-0.75 1.4379,-0.52059 1.1129,-1.42459 2.625,-1.75 1.5121,-0.32541 5.1189,1.03754 7.0605,1.16883 1.9416,0.13129 4.6481,0.33427 5.8145,-0.16883 1.1664,-0.5031 0.1782,-1.15921 1.875,-1.875 1.6968,-0.71579 7.7602,-0.95662 9.625,-0.125 1.8648,0.83162 1.8099,0.5192 2.625,3 0.8151,2.4808 7.4398,5.16285 -1.125,13.375 -8.5648,8.21215 -59.3779,13.78594 -65.625,2.75 -6.2471,-11.03594 6.2304,-14.27123 10.375,-15.75 z"
+ style="display:inline;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7578);enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czzzzzzzzzzzzzz"
+ id="path7947"
+ d="m 1058.5,-275.42981 c 4.1446,-1.47877 10,3.125 10,3.125 0.899,0.28092 2.7251,-0.89447 2.6243,-1.68614 0,0 -1.5503,-1.86062 -0.3743,-2.93886 1.176,-1.07824 5.296,1.50738 7.5,1.625 2.204,0.11762 5.5621,-0.22941 7,-0.75 1.4379,-0.52059 1.1129,-1.42459 2.625,-1.75 1.5121,-0.32541 5.1189,1.03754 7.0605,1.16883 1.9416,0.13129 4.6481,0.33427 5.8145,-0.16883 1.1664,-0.5031 0.1782,-1.15921 1.875,-1.875 1.6968,-0.71579 7.7602,-0.95662 9.625,-0.125 1.8648,0.83162 1.8099,0.5192 2.625,3 0.8151,2.4808 7.4398,5.16285 -1.125,13.375 -8.5648,8.21215 -59.3779,13.78594 -65.625,2.75 -6.2471,-11.03594 6.2304,-14.27123 10.375,-15.75 z"
+ style="display:inline;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7594);enable-background:new"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ <path
+ sodipodi:nodetypes="cscccccccccccc"
+ id="path7949"
+ d="m 628.24553,347.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.55405 36.34702,-65.29583 116.94091,-84.69468 185.93466,-91.46542 86.92239,-11.0168 184.91267,17.94007 233.37138,95.40128 54.124,75.7333 56.6747,172.53912 80.612,259.52795 29.4378,127.1276 54.7791,256.21414 60.3922,386.85035 -3.0634,78.18185 -8.4263,165.18417 -60.5032,228.13417 -48.0265,50.3574 -122.7864,50.053 -187.06985,59.0023 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.1982 -64.77564,-37.94 -95.73019,-113.47867 -97.2794,-186.01962 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#101414;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ clip-path="url(#clipPath8616)"
+ sodipodi:nodetypes="ccccczzzcc"
+ id="path7951"
+ d="m 311.83409,415.43155 9.8995,121.62237 -60.10408,136.47161 15.55635,174.65537 c 15.61326,61.8792 32.18545,98.66905 74.37615,117.05383 4.31911,-36.23998 -38.61152,-142.95988 -39.24264,-189.11984 -0.63145,-46.18445 10.83034,-108.60786 30.67767,-158.29647 20.04835,-50.19188 36.89674,-44.84642 42.12489,-92.59293 5.22815,-47.74651 -17.4264,-149.39192 -17.4264,-149.39192 l -55.86144,39.59798 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8940);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czcc"
+ id="path7953"
+ d="m 1010.0312,655.49186 c 0,0 16.7552,37.01806 28.7015,53.95395 11.9462,16.93589 52.7271,56.04605 52.7271,56.04605 l 52.5972,-127.58975"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient8970);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ clip-path="url(#clipPath8209)"
+ sodipodi:nodetypes="cccc"
+ id="path7955"
+ d="m 730.31998,536.56864 c 0,8.48528 42.54774,58.46803 42.54774,58.46803 l 12.60659,-28.76954 -55.15433,-29.69849 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.07999998;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8822);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <g
+ clip-path="url(#clipPath3998)"
+ id="g7957"
+ style="display:inline;opacity:1;enable-background:new"
+ transform="translate(450.03125,73.843964)">
+ <g
+ id="g7959"
+ style="filter:url(#filter3677)"
+ transform="translate(-174.03125,62.156036)">
+ <g
+ style="filter:url(#filter3785)"
+ id="g7961">
+ <path
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 425.88244,476.99186 c 10.80543,-1.47866 24.74401,3.35451 44.64286,3.21428 19.89885,-0.14023 57.45322,-16.91122 82.14285,-17.14286 24.68963,-0.23164 62.7517,12.28406 79.28572,15 16.53402,2.71594 22.84832,-0.15852 27.49999,7.85715 4.65167,8.01567 1.92671,10.74724 -10.35714,20.71429 -12.28385,9.96705 -40.78968,12.63632 -66.07143,12.85714 -25.28234,0.22082 -70.38129,7.07852 -95.35714,3.92856 -24.97585,-3.14996 -56.93756,-7.82267 -68.92857,-17.85714 -11.99101,-10.03447 -19.85084,-16.73182 -17.5,-23.92857 2.35084,-7.19675 13.83743,-3.16419 24.64286,-4.64285 z"
+ id="path7963"
+ sodipodi:nodetypes="czzzzzzzzzz"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect7965"
+ width="381.83765"
+ height="181.01935"
+ x="343.6539"
+ y="412.60312" />
+ </g>
+ <g
+ style="filter:url(#filter3785)"
+ id="g7967">
+ <path
+ transform="translate(174.03125,-62.156036)"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 687.14286,452.36218 c -10.46169,9.71443 -86.9796,19.00514 -100.71429,29.28572 -13.73469,10.28058 -14.75252,12.88826 -12.14286,20 2.60966,7.11174 6.54527,9.40572 25.71429,8.57142 19.16902,-0.8343 98.57143,-27.62172 98.57143,-21.42857 l -11.42857,-36.42857 z"
+ id="path7969"
+ sodipodi:nodetypes="czzzcc"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect7971"
+ width="207.8894"
+ height="162.63455"
+ x="702.86414"
+ y="344.82138" />
+ </g>
+ </g>
+ <g
+ id="g7973"
+ style="display:inline;opacity:0.18000004;enable-background:new"
+ transform="translate(-174.03125,62.156036)">
+ <g
+ style="filter:url(#filter3785)"
+ id="g7975">
+ <path
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 425.88244,476.99186 c 10.80543,-1.47866 24.74401,3.35451 44.64286,3.21428 19.89885,-0.14023 57.45322,-16.91122 82.14285,-17.14286 24.68963,-0.23164 62.7517,12.28406 79.28572,15 16.53402,2.71594 22.84832,-0.15852 27.49999,7.85715 4.65167,8.01567 1.92671,10.74724 -10.35714,20.71429 -12.28385,9.96705 -40.78968,12.63632 -66.07143,12.85714 -25.28234,0.22082 -70.38129,7.07852 -95.35714,3.92856 -24.97585,-3.14996 -56.93756,-7.82267 -68.92857,-17.85714 -11.99101,-10.03447 -19.85084,-16.73182 -17.5,-23.92857 2.35084,-7.19675 13.83743,-3.16419 24.64286,-4.64285 z"
+ id="path7977"
+ sodipodi:nodetypes="czzzzzzzzzz"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect7979"
+ width="381.83765"
+ height="181.01935"
+ x="343.6539"
+ y="412.60312" />
+ </g>
+ <g
+ style="filter:url(#filter3785)"
+ id="g7981">
+ <path
+ transform="translate(174.03125,-62.156036)"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 687.14286,452.36218 c -10.46169,9.71443 -86.9796,19.00514 -100.71429,29.28572 -13.73469,10.28058 -14.75252,12.88826 -12.14286,20 2.60966,7.11174 6.54527,9.40572 25.71429,8.57142 19.16902,-0.8343 98.57143,-27.62172 98.57143,-21.42857 l -11.42857,-36.42857 z"
+ id="path7983"
+ sodipodi:nodetypes="czzzcc"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect7985"
+ width="207.8894"
+ height="162.63455"
+ x="702.86414"
+ y="344.82138" />
+ </g>
+ </g>
+ </g>
+ <path
+ transform="translate(276,136)"
+ sodipodi:nodetypes="cccccscc"
+ clip-path="url(#clipPath8604)"
+ id="path7987"
+ d="M 582.65599,-7.4183011 695.79307,78.848726 804.68752,337.64981 842.87128,545.5392 963.07944,637.46308 c 0,0 -12.72793,-287.08535 -19.799,-313.95541 C 936.20938,296.63761 793.37381,-69.643698 793.37381,-69.643698 L 582.65599,-7.4183011 Z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8802);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czzzzzzc"
+ id="path7989"
+ d="m 964.13839,239.599 c 0,0 8.67732,10.89662 24.10715,11.96428 15.42986,1.06766 49.72166,-39.95267 70.17856,-52.14285 20.4793,-12.20353 47.0464,-26.60225 63.9286,-20.35714 16.8821,6.2451 22.1578,26.43609 27.8571,48.03571 5.6994,21.59961 6.7186,61.81389 -2.6785,92.85715 -9.3972,31.04325 -50.5033,73.10375 -65.3572,103.39285 -14.8539,30.2891 -11.6071,39.82143 -11.6071,39.82143"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient8958);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czzzzc"
+ id="path7991"
+ d="m 1124.4955,207.63471 c -15.8928,-0.89286 -49.7188,12.10583 -66.0714,24.28572 -16.4386,12.2439 -29.2209,24.1144 -29.2857,52.14285 -0.065,28.20604 13.1191,39.07641 29.1071,46.96429 15.988,7.88789 33.6862,7.11928 51.9643,-11.78571 18.2782,-18.905 14.2857,-111.60715 14.2857,-111.60715 z"
+ style="display:inline;opacity:1;fill:url(#radialGradient3315);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <ellipse
+ clip-path="url(#clipPath4100)"
+ transform="matrix(0.9434749,-0.1239943,0.1440089,1.0957669,451.94827,134.5988)"
+ id="path7993"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:url(#radialGradient3543);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4120);enable-background:accumulate"
+ cx="385"
+ cy="237.00504"
+ rx="86.428574"
+ ry="73.928574" />
+ <path
+ mask="url(#mask3684)"
+ sodipodi:nodetypes="csczzc"
+ id="path7995"
+ d="m 527.60588,407.44884 c 0,0 -122.04144,38.40348 -187.51434,9.63181 -65.47289,-28.77166 -74.37725,-124.71847 -74.37725,-124.71847 0,0 73.38158,-80.50393 129.92078,-83.61476 55.82705,-3.07164 90.57386,20.14332 114.87001,65.85171 24.352,45.81348 17.1008,132.84971 17.1008,132.84971 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3915);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ transform="translate(450.03125,73.843964)"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czcc"
+ id="path7997"
+ d="m 772.17411,393.349 c 0,0 36.21754,-27.38247 51.60714,-35.89286 15.17734,-8.39301 25.71428,-11.60714 35.89285,-11.60714 l -15.53571,66.96428"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient3959);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <circle
+ transform="translate(449.49554,74.915393)"
+ id="path7999"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3933);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ cx="409.28571"
+ cy="306.64789"
+ r="36.25" />
+ <path
+ transform="translate(276,136)"
+ clip-path="url(#clipPath8616)"
+ sodipodi:nodetypes="cccccccccc"
+ id="path8001"
+ d="m 311.83409,415.43155 9.8995,121.62237 -60.10408,136.47161 15.55635,174.65537 c 15.61326,61.8792 32.18545,98.66905 74.37615,117.05383 4.31911,-36.23998 8.68161,-72.36764 -31.24264,-223.11984 l 17.67767,-69.29647 72.12489,-138.59293 -42.4264,-158.39192 -55.86144,39.59798 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.3;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8806);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czzcc"
+ id="path8003"
+ d="m 635.21025,581.13004 c -14.14214,12.72792 39.23347,34.58015 76.36753,24.04163 37.13406,-10.53852 104.64487,-35.56437 103.23759,-79.19596 -1.40728,-43.63158 -76.36753,-128.69343 -76.36753,-128.69343 L 635.21025,581.13004 Z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8826);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <circle
+ transform="translate(449.67411,74.915393)"
+ id="path8005"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3991);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ cx="410"
+ cy="306.64789"
+ r="23.214285" />
+ <circle
+ transform="translate(451.99554,73.486821)"
+ id="path8007"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3981);enable-background:accumulate"
+ cx="414.28571"
+ cy="303.07648"
+ r="7.5" />
+ <path
+ sodipodi:nodetypes="czzzczc"
+ id="path8009"
+ d="m 789.31696,478.349 c 0,0 7.02281,19.56859 -1.07143,35 -8.09424,15.43141 -42.32317,38.98822 -67.49999,50 -25.30972,11.06991 -85.473,32.96393 -101.78572,41.96428 -16.46148,9.08243 -18.21428,12.67857 -18.21428,12.67857 0,0 -7.14693,-19.06441 28.74999,-51.7857 36.17211,-32.97214 142.02712,-48.0495 159.82143,-87.85715 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4112);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <g
+ transform="translate(780.74553,74.55825)"
+ id="g8011"
+ style="display:inline;opacity:1;enable-background:new">
+ <path
+ style="display:inline;opacity:1;fill:url(#radialGradient3585);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 8.20587,-79.64664 3.21429,-93.92857 -4.99158,-14.28193 -1.23663,-3.37974 -1.94602,-5.09301 -10.68928,-25.81592 -34.21432,-54.4303 -64.48255,-64.54984 -30.26823,-10.11954 -65.01776,-4.84837 -84.28571,5.71428 z"
+ id="path8013"
+ sodipodi:nodetypes="czzczzzszc"
+ clip-path="url(#clipPath3999)"
+ transform="translate(-329.81481,0)"
+ inkscape:connector-curvature="0" />
+ <ellipse
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4060);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8015"
+ transform="matrix(0.8823874,0.4705236,-0.4705236,0.8823874,-166.62245,2.387362)"
+ cx="183.57143"
+ cy="338.07648"
+ rx="64.715881"
+ ry="134.00607" />
+ <ellipse
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4062);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8017"
+ transform="matrix(0.8823874,0.4705236,-0.4705236,0.8823874,-162.19388,-18.755495)"
+ cx="183.57143"
+ cy="338.07648"
+ rx="64.715881"
+ ry="134.00607" />
+ <path
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient3587);stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4079);enable-background:new"
+ d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 8.20587,-79.64664 3.21429,-93.92857 -4.99158,-14.28193 -1.23663,-3.37974 -1.94602,-5.09301 -10.68928,-25.81592 -34.21432,-54.4303 -64.48255,-64.54984 -30.26823,-10.11954 -65.01776,-4.84837 -84.28571,5.71428 z"
+ id="path8019"
+ sodipodi:nodetypes="czzczzzszc"
+ clip-path="url(#clipPath3999)"
+ transform="translate(-329.81481,3e-7)"
+ inkscape:connector-curvature="0" />
+ </g>
+ <circle
+ transform="translate(452.55663,72.581273)"
+ id="path8021"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ cx="310.71429"
+ cy="398.07648"
+ r="19.704132" />
+ <circle
+ transform="translate(450.55663,72.581273)"
+ id="path8023"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4056);fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4082);stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4083);enable-background:accumulate"
+ cx="310.71429"
+ cy="398.07648"
+ r="19.704132" />
+ <circle
+ transform="translate(450.55663,72.581273)"
+ id="path8025"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4119);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ cx="310.71429"
+ cy="398.07648"
+ r="19.704132" />
+ <ellipse
+ inkscape:transform-center-y="-3.6935079"
+ inkscape:transform-center-x="-47.231976"
+ transform="matrix(0.9969564,-0.07796167,0.07796167,0.9969564,436.61877,125.29509)"
+ id="path8027"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4868);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4002);enable-background:accumulate"
+ cx="429.56738"
+ cy="377.42877"
+ rx="72.079735"
+ ry="44.547726" />
+ <ellipse
+ inkscape:transform-center-y="-13.056625"
+ inkscape:transform-center-x="-20.955902"
+ transform="matrix(1.4357951,-0.06999104,0.06999104,1.4357951,235.18065,-63.86546)"
+ id="path8029"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4876);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4010);enable-background:accumulate"
+ cx="437.6991"
+ cy="391.21735"
+ rx="36.611931"
+ ry="22.627417" />
+ <g
+ style="display:inline;opacity:1;filter:url(#filter4053);enable-background:new"
+ id="g8031"
+ transform="translate(450.03125,73.843964)">
+ <circle
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4484);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8033"
+ cx="413.66071"
+ cy="401.82648"
+ r="3.2142856" />
+ <circle
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4486);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8035"
+ transform="translate(13.125009,8.1249913)"
+ cx="413.66071"
+ cy="401.82648"
+ r="3.2142856" />
+ <circle
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4488);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8037"
+ transform="translate(32.946437,7.4999913)"
+ cx="413.66071"
+ cy="401.82648"
+ r="3.2142856" />
+ <circle
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4490);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8039"
+ transform="translate(24.910723,-10.267866)"
+ cx="413.66071"
+ cy="401.82648"
+ r="3.2142856" />
+ <circle
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4492);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8041"
+ transform="translate(47.589294,-0.6250087)"
+ cx="413.66071"
+ cy="401.82648"
+ r="3.2142856" />
+ </g>
+ <path
+ sodipodi:nodetypes="ccccccccc"
+ id="path8043"
+ d="m 896.20301,482.92837 c 0.98509,4.35008 4.53707,6.17948 7.38673,7.89182 4.46068,2.51292 6.52016,1.52211 9.15451,-0.75761 1.60195,-1.92117 10.68311,-4.69865 15.59423,-7.07107 4.32961,-1.45891 8.9033,-5.35873 13.38452,-8.33376 3.39514,-1.62724 5.34664,0.35464 7.82868,1.01015 2.94412,0.71661 4.41117,2.17175 6.06092,3.53554 2.39616,1.17519 -0.9279,3.14313 3.283,4.29314 1.19091,0.21794 2.41695,0.57645 3.28299,-0.50507"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="cccccccccccc"
+ id="path8045"
+ d="m 910.85021,475.35223 c 2.31494,-0.032 3.17778,0.64253 5.49271,-0.82075 3.45564,-3.08113 5.40254,-3.14477 7.95495,-4.41942 3.02657,-1.31523 6.5357,8.15169 10.10153,9.84899 2.39509,-0.82142 1.28914,1.79379 1.45209,2.65165 0.0571,2.64684 2.80694,3.67806 4.35628,5.42957 3.31604,2.25549 7.37523,6.29546 11.11168,5.3033 6.44525,-2.93107 10.27922,-1.28146 16.28871,-7.38674 0.70405,-1.18134 -0.58425,-6.8946 3.09359,-7.19734 2.52399,0.25338 4.16667,0.0502 6.06092,0.56822 5.441,2.11719 7.73778,6.45 14.71034,7.95495 6.1829,0.96639 7.61264,3.79426 13.88959,5.05076"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="ccccccccccccc"
+ id="path8047"
+ d="m 876.98133,483.52197 c 2.39858,-0.7938 6.10613,4.1921 8.17313,7.04568 0.59281,2.67952 1.15377,5.48645 0.75761,12.12183 0.78513,2.41754 2.68049,3.03095 4.79823,3.283 3.11745,-0.53678 5.87669,-1.3243 7.3236,-3.03046 1.8716,-1.94167 5.31253,2.39394 8.08122,4.04061 3.61009,1.91209 7.77378,1.97886 11.8693,2.27284 1.70358,-0.23064 2.3704,4.51515 3.28299,8.08123 0.38414,4.37806 -0.88544,6.89569 -1.76776,9.84898 -0.2943,2.49655 2.9885,3.52974 6.31345,4.54569 3.18244,0.74124 6.54424,1.66184 9.09137,1.76777 5.14186,0.87491 8.08874,2.69052 12.12183,4.04061 2.23914,0.81655 3.26019,2.24216 4.54569,3.53553"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ clip-path="url(#clipPath8514)"
+ id="path8049"
+ d="m 332,187.69519 c 0,0 57.5,-25.5 57.5,-28 0,-2.5 5.5,-52 5.5,-52 0,0 91,-48.500001 91.5,-50.500001 0.5,-2 86,-62.0000004 86,-62.0000004 L 386.5,17.195189 311,123.19519 l 21,64.5 z"
+ style="display:inline;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter8814);enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ id="path8051"
+ d="m 1697.2846,722.5514 c 0,0 -115.9655,73.5391 -123.0365,77.78174 -7.0711,4.24264 -230.5169,137.17872 -230.5169,137.17872 l 4.2427,39.59798 216.3747,-100.40917 117.3797,-101.82337 15.5563,-52.3259 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ sodipodi:nodetypes="cccccscccc"
+ clip-path="url(#clipPath8610)"
+ id="path8053"
+ d="m 528.91587,556.85291 c -5.65685,-1.41421 -181.01933,74.95332 -181.01933,74.95332 l -33.94113,181.01934 51.09546,193.94823 257.2031,67.6813 c 0,0 206.47518,152.735 212.13203,148.4924 5.65686,-4.2426 168.2914,-193.7473 168.2914,-193.7473 L 842.87128,845.35248 796.20224,667.16157 528.91587,556.85291 Z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8810);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czzzzzzczczczczzzc"
+ id="path8055"
+ d="m 1097.6433,613.88997 c 0,0 22.6195,-6.50681 35.7427,-5.87273 13.1233,0.63409 30.6416,1.93862 43.7089,12.18619 13.0673,10.24756 25.0677,27.14007 34.1124,58.36965 9.0446,31.22958 1.6983,99.25201 -6.1761,143.34735 -7.8743,44.09534 -28.2651,106.11298 -45,140 -16.7348,33.88702 -49.7977,77.49517 -60.5694,89.87617 -11.3642,13.062 -56.2059,36.4262 -79.4306,42.2667 5.3034,-10.6066 48.8998,-50.5889 35,-60.7143 -14.0189,-10.2123 -45.76,45.9824 -84.2931,29.0332 21.38231,-13.1321 41.7794,-51.1861 34.0406,-66.59448 -7.84024,-15.61039 -30.70492,48.75758 -93.53553,37.01288 30.05204,-27.5267 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.2932 -60.46175,54.2932 0,0 -2.8219,-41.70123 13.7732,-68.60737 16.63935,-26.97787 79.65297,-81.61527 99.55308,-111.70342 19.9002,-30.08814 33.6126,-66.00902 42.1355,-92.51794 8.5228,-26.50892 15.8009,-77.09954 15.8009,-77.09954"
+ style="display:inline;opacity:1;fill:#0c0c0c;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ sodipodi:nodetypes="cccccccccc"
+ clip-path="url(#clipPath8622)"
+ id="path8057"
+ d="m 770.74639,609.17881 -50.91169,97.58074 -79.90307,111.01576 34.64824,71.41778 42.42641,79.19597 72.12489,-45.25484 14.14214,-192.33305 21.2132,-138.59292 -14.14214,-90.15612 -39.59798,107.12668 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8818);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ sodipodi:nodetypes="cczcccccc"
+ clip-path="url(#clipPath8906)"
+ id="path8059"
+ d="m 295,846.19519 6.64488,-68.92285 c 0,0 90.31951,89.00457 162.35512,122.92285 72.03561,33.91828 308,62 308,62 l 154,-26 -36,162.00001 -286,26 -298,-89 -11,-189.00001 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8810);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ clip-path="url(#clipPath3602)"
+ sodipodi:nodetypes="cccccccccccc"
+ id="path8061"
+ d="m 405.79629,845.99023 74.95332,65.05383 2.49963,16.8804 19.40336,10.15891 6.49204,23.05109 31.70905,-8.3711 14.84924,48.08324 c 12.25652,12.7279 89.79344,-113.1097 55.86143,38.1838 l -60.81118,16.2635 -89.20292,-94.69286 -62.82503,-53.79963 7.07106,-60.81118 z"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter3587);enable-background:new"
+ transform="translate(450.03125,73.843964)"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czzzzzzzzcc"
+ id="path8063"
+ d="m 1159.317,918.349 c 54.2857,-1.42857 126.035,-15.05199 170,-26.78572 44.0527,-11.75714 125.8863,-36.34724 175.357,-57.85714 49.3393,-21.45272 113.6038,-59.2816 154.2859,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7142,-33.57143 8.3691,22.36779 -16.4069,56.32562 -37.8571,81.07143 -21.6042,24.9234 -52.7314,52.70533 -98.9287,89.28571 -46.1973,36.58038 -156.0825,101.58463 -212.8571,128.5714 -57.066,27.1254 -128.2033,58.2385 -172.1428,72.5001 -43.9395,14.2616 -131.4286,31.0714 -131.4286,31.0714 L 1159.317,918.349 Z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ clip-path="url(#clipPath3992)"
+ sodipodi:nodetypes="czczzcc"
+ id="path8065"
+ d="m 1241.5965,652.95007 c 0,0 -64.7215,54.33706 -145.6639,98.99494 -82.0244,45.25484 -284.25704,93.3381 -284.25704,93.3381 0,0 -15.10137,21.05196 45.25489,28.28428 60.35626,7.23232 224.08195,-53.30069 278.60015,-96.16654 54.5182,-42.86585 120.2081,-111.72286 120.2081,-111.72286 l -14.1422,-12.72792 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:url(#linearGradient3666);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3779);enable-background:accumulate"
+ transform="translate(450.03125,73.843964)"
+ inkscape:connector-curvature="0" />
+ <g
+ clip-path="url(#clipPath3986)"
+ id="g8067"
+ style="display:inline;opacity:1;enable-background:new"
+ transform="translate(450.03125,73.843964)">
+ <g
+ id="g8069"
+ style="filter:url(#filter3677)"
+ transform="translate(-174.03125,62.156036)">
+ <g
+ id="g8071"
+ style="filter:url(#filter3785)">
+ <path
+ transform="translate(174.03125,-62.156036)"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1094.2857,725.93361 c 0,0 -0.2961,26.16091 4.6428,37.85715 4.9389,11.69624 20.0381,26.48665 28.5715,31.42857 8.5334,4.94192 18.9286,8.57142 18.9286,8.57142 l 117.8571,-115 17.8572,-75.71428 -96.4286,38.57143 -91.4286,74.28571 z"
+ id="path8073"
+ sodipodi:nodetypes="czzccccc"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect8075"
+ width="333.75412"
+ height="309.71277"
+ x="1197.8389"
+ y="486.14224" />
+ </g>
+ </g>
+ <g
+ id="g8077"
+ style="display:inline;opacity:0.18000004;enable-background:new"
+ transform="translate(-174.03125,62.156036)">
+ <g
+ id="g8079"
+ style="filter:url(#filter3785)">
+ <path
+ transform="translate(174.03125,-62.156036)"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1094.2857,725.93361 c 0,0 -0.2961,26.16091 4.6428,37.85715 4.9389,11.69624 20.0381,26.48665 28.5715,31.42857 8.5334,4.94192 18.9286,8.57142 18.9286,8.57142 l 117.8571,-115 17.8572,-75.71428 -96.4286,38.57143 -91.4286,74.28571 z"
+ id="path8081"
+ sodipodi:nodetypes="czzccccc"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect8083"
+ width="333.75412"
+ height="309.71277"
+ x="1197.8389"
+ y="486.14224" />
+ </g>
+ </g>
+ </g>
+ <path
+ sodipodi:nodetypes="cssssccccccssssssssccssssssccssssc"
+ clip-path="url(#clipPath3722)"
+ id="path8085"
+ d="m 1264.1875,605 c -4.4911,0.73268 -8.157,3.45509 -11.9375,6.40625 -10.0813,7.86976 -28.1695,34.42524 -48.0312,50.46875 -39.8674,32.20316 -103.996,69.97701 -152.5626,91.09375 -48.614,21.13738 -130.54122,45.81801 -174.31245,57.5 -43.39821,11.58246 -115.04403,25.13107 -168.25,26.53125 l -4.5625,0.125 -2,4.125 -92.84375,192.125 -6.5,13.4688 14.65625,-2.8438 c 0,0 87.26968,-16.6514 132.34375,-31.2812 44.7252,-14.51667 115.79086,-45.66683 173.03125,-72.87505 C 980.82199,912.46306 1090.1551,847.86412 1137.5,810.375 c 46.3608,-36.70982 77.8049,-64.71682 99.9375,-90.25 10.9011,-12.576 22.7448,-27.53144 31.0313,-42.75 8.2864,-15.21856 19.1597,-44.21808 13.6874,-58.84375 -1.2177,-3.25474 -2.5514,-6.0613 -4.5937,-8.5 -2.0423,-2.4387 -8.4747,-1.57199 -8.5625,-5.03125 -0.2098,-8.26482 -3.3155,-0.24423 -4.8125,0 z m 2.1563,15.21875 c 0.4148,0.58574 1.0311,1.55766 1.7812,3.5625 2.8968,7.74213 -1.4407,31.89875 -8.8125,45.4375 -7.3718,13.53875 -22.6384,28.92394 -33.1875,41.09375 -21.0754,24.31356 -51.9037,51.86156 -97.9375,88.3125 -45.0496,35.67159 -155.46033,101.09459 -211.40625,127.6875 -56.89173,27.04249 -128.09616,58.1184 -171.25,72.125 -36.36491,11.8031 -95.84471,23.8338 -115.71875,27.7813 L 714.09375,851.75 c 54.70691,-2.0493 123.79259,-15.21635 167.125,-26.78125 44.33422,-11.83225 126.07865,-36.33633 176.40625,-58.21875 50.112,-21.78871 112.5344,-61.16816 154.0312,-94.6875 20.6464,-16.67721 41.7449,-42.54588 49.8126,-48.84375 2.437,-1.90242 4.0806,-2.6358 4.875,-3 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.83300003;fill:#050505;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:15;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;filter:url(#filter8225);enable-background:accumulate"
+ transform="translate(450.03125,73.843964)"
+ inkscape:connector-curvature="0" />
+ <g
+ inkscape:transform-center-y="-12.859654"
+ inkscape:transform-center-x="-185.09603"
+ transform="matrix(0.9934486,0.1142802,-0.1142802,0.9934486,-9.24324,588.09054)"
+ mask="url(#mask7704)"
+ id="g8087"
+ style="display:inline;opacity:1;enable-background:new">
+ <path
+ sodipodi:nodetypes="ccssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssscccccssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssssssssc"
+ id="path8089"
+ d="m 1111.4062,-285.9375 -3.9374,1.875 c -0.041,0.0102 -0.1,0.0205 -0.125,0.0312 -0.4188,0.21285 -0.1647,0.10058 -0.6563,0.3125 -0.4861,0.20956 -1.7376,0.58419 -4.0937,1.46875 -3.3312,1.25058 -5.8043,2.14984 -7,3.0625 -1.5362,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74767 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41973 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25167 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74215 -8.8948,1.93107 -10.1562,2.6875 -1.584,-0.18078 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44696 -4.9162,0.67276 -6.8438,0.90625 -0.6554,0.0794 -1.041,0.20078 -1.3437,0.28125 -0.4262,0.13166 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15937 -1.7622,-0.15683 -5.5313,0.28125 -3.5539,0.41309 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.29729 -3.8577,-0.53419 -5.8437,-0.34375 -3.0588,0.29332 -4.972,0.48399 -6.9063,0.65625 -1.9342,0.17227 -1.6886,0.42237 -2.9062,0.53125 -1.3162,0.1177 -1.7598,-0.16363 -5.5312,0.25 -3.5421,0.38845 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.29469 -3.8872,-0.50701 -5.875,-0.3125 -3.05829,0.29925 -4.9412,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04005,0.17856 -1.34375,0.25 -0.4277,0.11896 -0.6835,0.21807 -1.375,0.28125 -1.316,0.12026 -1.75975,-0.19488 -5.53125,0.21875 -3.55619,0.39002 -9.0056,1.23916 -10.25,1.90625 -1.59869,-0.29418 -3.85985,-0.52372 -5.84375,-0.3125 -3.0557,0.32533 -4.97405,0.52624 -6.90625,0.71875 -1.93219,0.19251 -1.68975,0.44088 -2.90625,0.5625 -1.31488,0.13147 -1.76305,-0.16454 -5.53125,0.28125 -3.53889,0.41866 -8.9777,1.29217 -10.25,1.96875 -1.59759,-0.28104 -3.85995,-0.42043 -5.84375,-0.1875 -3.05198,0.35837 -4.945,0.56786 -6.875,0.78125 -0.65618,0.0726 -1.04065,0.17269 -1.34375,0.25 -0.42679,0.12723 -0.6849,0.2672 -1.375,0.34375 -1.31339,0.14569 -1.76735,-0.17402 -5.53125,0.3125 -3.54888,0.45876 -8.97865,1.41902 -10.21875,2.125 -1.59309,-0.24424 -3.8338,-0.38135 -5.8125,-0.125 -3.04759,0.39482 -4.9507,0.64845 -6.875,0.90625 -1.92429,0.25779 -1.7261,0.49353 -2.9375,0.65625 -1.30949,0.1759 -1.7472,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.9232,1.69917 -10.1875,2.4375 -1.58749,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02619,0.53612 -4.8989,0.86169 -6.8125,1.1875 -0.65059,0.11077 -1.0137,0.27094 -1.3125,0.375 -0.42069,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.2947,0.26159 -1.7271,-0.006 -5.4375,0.8125 -3.49848,0.77195 -8.8459,2.38293 -10.0625,3.21875 -1.5629,-0.0774 -3.7575,0.0853 -5.6875,0.59375 -2.97238,0.78313 -4.8177,1.23209 -6.6875,1.75 -1.87,0.5179 -1.66665,0.76728 -2.84375,1.09375 -1.27249,0.3529 -1.69705,0.10709 -5.34375,1.1875 -3.42468,1.01463 -8.6494,2.93317 -9.875,3.84375 -1.53878,0.0127 -3.7198,0.27222 -5.625,0.875 -2.93098,0.92734 -4.75035,1.45842 -6.59375,2.0625 -0.62679,0.20538 -0.99165,0.39258 -1.28125,0.53125 -0.40758,0.21361 -0.6533,0.40875 -1.3125,0.625 -1.2545,0.41154 -1.68615,0.18904 -5.28125,1.4375 -3.38989,1.17717 -8.59495,3.2137 -9.78125,4.15625 -1.52388,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69905,1.67548 -6.53125,2.3125 -1.8322,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24679,0.43396 -1.66355,0.19972 -5.21875,1.5625 -3.3387,1.2798 -8.4871,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.6357,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6239,1.78156 -6.4375,2.46875 -0.6167,0.23363 -0.99645,0.44203 -1.28125,0.59375 10e-6,0 0,0.0295 0,0.0312 l -8,3.1875 -12.4759,3.49189 7.92966,19.27772 c -0.59163,1.97357 12.54624,-4.73836 12.54624,-4.73836 0.22641,-0.14468 0.44895,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.1716,-0.21577 6,-1.6875 3.82852,-1.47174 5.22405,-2.00498 5.90625,-2.40625 0.67961,-0.39978 1.61175,-0.87937 2.21875,-1.53125 1.82692,-0.13775 3.5708,-0.49323 4.9375,-1 2.968,-1.10052 4.87535,-1.80619 6.78125,-2.46875 1.90581,-0.66254 2.35415,-1.41487 3.40625,-1.78125 1.09162,-0.38011 2.1951,-0.16538 6.0625,-1.53125 3.8674,-1.36586 5.28315,-1.82708 5.96875,-2.21875 0.70111,-0.40052 1.7008,-0.93298 2.3125,-1.59375 1.97081,-0.0547 3.81695,-0.38463 5.28125,-0.875 3.00152,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.53861,-0.5041 2.17415,-1.04677 2.90625,-1.4375 0.23022,-0.13431 0.4759,-0.25373 0.75,-0.34375 1.09832,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91231,-1.23113 5.366,-1.67295 6.0625,-2.03125 0.69391,-0.35697 1.6301,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63585,-0.26683 5.03125,-0.6875 3.0304,-0.91354 4.9924,-1.4301 6.9375,-1.96875 1.94512,-0.53864 2.4262,-1.26452 3.5,-1.5625 1.11402,-0.30915 2.22,0.007 6.1875,-1.03125 3.9675,-1.03863 5.4175,-1.43273 6.125,-1.75 0.7348,-0.32959 1.8139,-0.75372 2.4375,-1.375 1.99782,0.116 3.85745,-0.0201 5.34375,-0.375 3.07811,-0.735 5.0834,-1.10094 7.0625,-1.5 1.58791,-0.32018 2.2443,-0.79055 3,-1.09375 0.23751,-0.1068 0.4669,-0.19276 0.75,-0.25 1.13341,-0.22919 2.30465,0.20893 6.34375,-0.5 4.03942,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71581,-0.25944 1.70435,-0.56724 2.34375,-1.09375 1.9242,0.23949 3.7479,0.22453 5.1875,0 3.12642,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48875,-0.94514 3.59375,-1.09375 1.14642,-0.15418 2.27585,0.30157 6.34375,-0.21875 4.06781,-0.52032 5.56025,-0.69573 6.28125,-0.9375 0.73712,-0.24714 1.7981,-0.58623 2.4375,-1.125 2.05,0.33553 3.9737,0.39796 5.5,0.21875 3.1422,-0.36896 5.18,-0.55936 7.1875,-0.78125 1.61082,-0.17802 2.26465,-0.6082 3.03125,-0.84375 0.24091,-0.0855 0.49405,-0.1556 0.78125,-0.1875 1.1497,-0.12772 2.3013,0.34665 6.375,-0.125 4.0737,-0.47165 5.55905,-0.6106 6.28125,-0.84375 0.71941,-0.23227 1.70025,-0.47346 2.34375,-0.96875 1.9363,0.33346 3.77005,0.40424 5.21875,0.25 3.14601,-0.33495 5.1775,-0.51859 7.1875,-0.71875 2.00991,-0.20014 2.48415,-0.82639 3.59375,-0.9375 1.1511,-0.11528 2.2965,0.36506 6.375,-0.0625 4.0785,-0.42756 5.5889,-0.56209 6.3125,-0.78125 0.73922,-0.22386 1.7956,-0.51325 2.4375,-1.03125 2.057,0.39867 4.00185,0.4934 5.53125,0.34375 3.14871,-0.3081 5.1758,-0.47325 7.1875,-0.65625 1.61401,-0.14682 2.26305,-0.56055 3.03125,-0.78125 0.2413,-0.0809 0.49355,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.2929,0.39275 6.375,0 4.08211,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6996,-0.4477 2.3437,-0.9375 1.9381,0.34999 3.7689,0.45438 5.2188,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1465,-0.32852 5.177,-0.5227 7.1874,-0.71875 1.613,-0.15729 2.2657,-0.63148 3.0313,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7166,-0.25316 1.6746,-0.55807 2.3124,-1.09375 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99127 5.4295,-1.4193 6.125,-1.78125 0.7222,-0.376 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.1446 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70652 2.3191,-1.70203 2.5312,-2 0.2123,-0.29795 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3404,-0.094 0.5,-0.4375 0.859,-1.84707 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68214 0.168,-1.35277 0.2187,-1.75 0.029,-0.2295 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19831 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41763 -0.9716,-4.61463 -1.625,-5.46875 -0.6589,-0.86172 -1.2248,-1.01051 -1.75,-1 z"
+ style="display:inline;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ transform="translate(0.08004571,-0.03125)"
+ inkscape:connector-curvature="0" />
+ <g
+ id="g8091"
+ clip-path="url(#clipPath7421)">
+ <path
+ sodipodi:nodetypes="czscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssccsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscc"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7001);enable-background:new"
+ d="m 1107.409,-284.04961 c -0.4187,0.21283 -0.1556,0.0939 -0.6472,0.30581 -0.4861,0.20954 -1.7234,0.57439 -4.0796,1.45895 -3.3311,1.25057 -5.8302,2.15344 -7.0259,3.0661 -1.5361,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74766 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41972 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25166 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74214 -8.8948,1.93107 -10.1562,2.6875 -1.5839,-0.18079 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44695 -4.9162,0.67276 -6.8437,0.90625 -0.6554,0.0794 -1.0411,0.20078 -1.3438,0.28125 -0.4262,0.13165 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15936 -1.7622,-0.15683 -5.5312,0.28125 -3.5539,0.41308 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.2973 -3.8578,-0.53419 -5.8438,-0.34375 -3.0588,0.29331 -4.972,0.48399 -6.9062,0.65625 -1.9343,0.17226 -1.6887,0.42237 -2.9063,0.53125 -1.3162,0.11769 -1.7598,-0.16363 -5.5312,0.25 -3.5419,0.38844 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.2947 -3.88717,-0.50701 -5.875,-0.3125 -3.05824,0.29924 -4.94113,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04004,0.17856 -1.34375,0.25 -0.42765,0.11895 -0.68351,0.21807 -1.375,0.28125 -1.31596,0.12025 -1.75976,-0.19488 -5.53125,0.21875 -3.55614,0.39001 -9.00554,1.23916 -10.25,1.90625 -1.59863,-0.29419 -3.85984,-0.52372 -5.84375,-0.3125 -3.0556,0.32532 -4.97404,0.52624 -6.90625,0.71875 -1.93221,0.1925 -1.68987,0.44088 -2.90625,0.5625 -1.31488,0.13146 -1.76298,-0.16454 -5.53125,0.28125 -3.53887,0.41865 -8.97768,1.29217 -10.25,1.96875 -1.59755,-0.28105 -3.85996,-0.42043 -5.84375,-0.1875 -3.05198,0.35836 -4.94508,0.56786 -6.875,0.78125 -0.6562,0.0725 -1.04066,0.17269 -1.34375,0.25 -0.42677,0.12722 -0.68491,0.2672 -1.375,0.34375 -1.31333,0.14568 -1.76746,-0.17402 -5.53125,0.3125 -3.54889,0.45875 -8.97863,1.41902 -10.21875,2.125 -1.59305,-0.24424 -3.83381,-0.38135 -5.8125,-0.125 -3.04759,0.39481 -4.95071,0.64845 -6.875,0.90625 -1.92428,0.25779 -1.72611,0.49353 -2.9375,0.65625 -1.30946,0.1759 -1.74719,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.92315,1.69917 -10.1875,2.4375 -1.5875,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02617,0.53612 -4.89889,0.86169 -6.8125,1.1875 -0.65061,0.11077 -1.01371,0.27094 -1.3125,0.375 -0.42067,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.29465,0.26159 -1.72712,-0.006 -5.4375,0.8125 -3.49853,0.77195 -8.84595,2.38293 -10.0625,3.21875 -1.56278,-0.0774 -3.75758,0.0853 -5.6875,0.59375 -2.97244,0.78313 -4.81761,1.23209 -6.6875,1.75 -1.86988,0.5179 -1.6666,0.76728 -2.84375,1.09375 -1.27246,0.3529 -1.69703,0.10709 -5.34375,1.1875 -3.4247,1.01463 -8.64944,2.93317 -9.875,3.84375 -1.53883,0.0127 -3.71983,0.27222 -5.625,0.875 -2.93106,0.92734 -4.75031,1.45842 -6.59375,2.0625 -0.62676,0.20538 -0.99173,0.39258 -1.28125,0.53125 -0.40763,0.21361 -0.65334,0.40875 -1.3125,0.625 -1.25446,0.41154 -1.68611,0.18904 -5.28125,1.4375 -3.38985,1.17717 -8.59498,3.2137 -9.78125,4.15625 -1.52389,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69908,1.67548 -6.53125,2.3125 -1.83217,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24678,0.43396 -1.66361,0.19972 -5.21875,1.5625 -3.33867,1.2798 -8.48715,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.63569,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6238,1.78156 -6.4375,2.46875 -0.61666,0.23363 -0.99641,0.44203 -1.28125,0.59375 0,0 0,1.09375 0,1.09375 0.11178,-0.22236 0.38599,-0.81743 0.90625,-1.09375 0.69797,-0.37072 4.81363,-1.99337 6.8125,-2.71875 1.65686,-0.60125 4.15389,-1.32868 5.96875,-1.3125 0.30162,0.003 0.58762,0.0509 0.84375,0.0937 1.84249,0.30825 7.46875,1.5625 7.46875,1.5625 -10e-6,0 -6.23349,-1.64675 -7.03125,-1.84375 -0.19079,-0.0471 -0.53572,-0.0687 -0.96875,-0.0625 1.14546,-0.86971 4.761,-2.39351 7.34375,-3.4375 2.83822,-1.14727 3.11681,-1.25182 5.0625,-1.65625 2.0083,-0.41744 3.15625,-0.5 3.15625,-0.5 0,10e-6 -0.0824,-0.60114 0.96875,-1.125 0.7051,-0.35141 4.88702,-1.8924 6.90625,-2.5625 1.9519,-0.64773 5.0574,-1.3585 6.875,-1 1.86323,0.3675 7.53125,1.8125 7.53125,1.8125 10e-6,0 -6.287,-1.87111 -7.09375,-2.09375 -0.19292,-0.0532 -0.53084,-0.086 -0.96875,-0.0937 1.15834,-0.83288 4.79444,-2.19532 7.40625,-3.15625 2.87016,-1.05601 3.16734,-1.1618 5.125,-1.53125 1.85349,-0.34979 2.85884,-0.42548 3.03125,-0.4375 0.1136,-0.21724 0.37745,-0.81002 0.90625,-1.0625 0.70944,-0.33874 4.92607,-1.71275 6.96875,-2.3125 1.69317,-0.49711 4.24077,-1.03677 6.09375,-0.90625 0.30795,0.0217 0.61349,0.0973 0.875,0.15625 1.88118,0.42432 7.59375,2.03125 7.59375,2.03125 10e-6,0 -6.34174,-2.06525 -7.15625,-2.3125 -0.19479,-0.0591 -0.55788,-0.10394 -1,-0.125 1.16949,-0.79755 4.86302,-2.05622 7.5,-2.9375 2.89781,-0.96847 3.23301,-1.00332 5.21875,-1.28125 2.04965,-0.28689 3.1875,-0.3125 3.1875,-0.3125 -2e-5,0 -0.0728,-0.60697 1,-1.0625 0.7196,-0.30557 4.99098,-1.50075 7.0625,-2 2.00244,-0.48258 5.19849,-0.92829 7.0625,-0.40625 1.91078,0.53515 7.71875,2.5 7.71875,2.5 0,0 -6.42266,-2.42351 -7.25,-2.71875 -0.19784,-0.0706 -0.58216,-0.14039 -1.03125,-0.1875 1.1879,-0.72865 4.91527,-1.77408 7.59375,-2.5 2.94342,-0.79775 3.29208,-0.77083 5.3125,-0.90625 1.91289,-0.12823 2.94705,-0.0711 3.125,-0.0625 0.11728,-0.20366 0.39176,-0.77948 0.9375,-0.96875 0.73219,-0.25394 5.07852,-1.04789 7.1875,-1.375 1.74813,-0.27111 4.40088,-0.4847 6.3125,-0.0937 0.31766,0.065 0.60522,0.18551 0.875,0.28125 1.94074,0.68873 7.84375,3.09375 7.84375,3.09375 10e-6,0 -6.53471,-2.95077 -7.375,-3.3125 -0.20097,-0.0865 -0.57513,-0.16679 -1.03125,-0.25 1.2065,-0.63318 5.02956,-1.3956 7.75,-1.90625 2.98953,-0.56119 3.30023,-0.52954 5.34375,-0.53125 2.10926,-0.002 3.3125,0.125 3.3125,0.125 0,1e-5 -0.0727,-0.63119 1.03125,-0.9375 0.74052,-0.20547 5.12612,-0.83387 7.25,-1.0625 2.05302,-0.22099 5.31863,-0.25222 7.21875,0.46875 1.94779,0.73907 7.84375,3.375 7.84375,3.375 2e-5,0 -6.56288,-3.17897 -7.40625,-3.5625 -0.20168,-0.0917 -0.54221,-0.18621 -1,-0.28125 1.21092,-0.60188 4.98442,-1.24884 7.71875,-1.65625 3.0048,-0.44772 3.32551,-0.4517 5.375,-0.40625 1.94045,0.043 3.00699,0.19423 3.1875,0.21875 0.11892,-0.19316 0.3839,-0.76583 0.9375,-0.90625 0.74271,-0.18838 5.15429,-0.73428 7.28125,-0.9375 1.76303,-0.16842 4.42009,-0.23429 6.34375,0.25 0.31968,0.0805 0.60351,0.20359 0.875,0.3125 1.95293,0.78349 7.90625,3.46875 7.90625,3.46875 -2e-5,0 -6.59191,-3.25348 -7.4375,-3.65625 -0.20222,-0.0963 -0.57226,-0.20703 -1.03125,-0.3125 1.21414,-0.57427 5.04366,-1.12219 7.78125,-1.5 3.00838,-0.4152 3.32307,-0.44263 5.375,-0.375 2.11798,0.0698 3.3125,0.25 3.3125,0.25 -2e-5,0 -0.0773,-0.63741 1.03125,-0.90625 0.74362,-0.18035 5.15176,-0.66355 7.28125,-0.84375 2.05847,-0.17417 5.34324,-0.12432 7.25,0.65625 1.95459,0.80016 7.875,3.53125 7.875,3.53125 -2e-5,0 -6.55993,-3.30876 -7.40625,-3.71875 -0.20237,-0.0981 -0.57186,-0.2031 -1.03125,-0.3125 1.21517,-0.5639 5.01008,-1.1143 7.75,-1.46875 3.01091,-0.38952 3.32131,-0.39765 5.375,-0.3125 1.94439,0.0806 3.00663,0.25324 3.1875,0.28125 0.11916,-0.19086 0.38277,-0.74531 0.9375,-0.875 0.74426,-0.174 5.14993,-0.65047 7.28125,-0.8125 1.76662,-0.13427 4.4497,-0.12571 6.375,0.375 0.32,0.0832 0.6033,0.20127 0.875,0.3125 1.9546,0.80016 7.9063,3.5625 7.9063,3.5625 -10e-5,0 -6.5912,-3.34001 -7.4375,-3.75 -0.2024,-0.0981 -0.5719,-0.20311 -1.0313,-0.3125 1.2151,-0.5639 5.0413,-1.08306 7.7813,-1.4375 3.0109,-0.38953 3.3525,-0.4289 5.4062,-0.34375 2.1197,0.0879 3.3125,0.3125 3.3125,0.3125 0,0 -0.078,-0.64902 1.0313,-0.90625 0.7443,-0.17256 5.1495,-0.62336 7.2812,-0.78125 2.0606,-0.1526 5.3429,-0.0968 7.25,0.6875 1.955,0.80395 7.875,3.5 7.875,3.5 0,0 -6.5598,-3.27587 -7.4062,-3.6875 -0.2025,-0.0984 -0.5718,-0.20222 -1.0313,-0.3125 1.2154,-0.56154 5.0119,-1.12778 7.75,-1.5 3.009,-0.40905 3.3227,-0.41558 5.375,-0.34375 1.9431,0.068 3.0072,0.16485 3.1875,0.1875 0.1188,-0.1944 0.3846,-0.72881 0.9375,-0.875 0.7418,-0.19612 5.1311,-0.82878 7.25,-1.09375 1.7564,-0.21961 4.4053,-0.33231 6.3125,0.0312 0.3169,0.0604 0.6058,0.18938 0.875,0.28125 1.9362,0.66092 7.8438,2.9375 7.8438,2.9375 -1e-4,0 -6.5367,-2.80655 -7.375,-3.15625 -0.2005,-0.0836 -0.5762,-0.17333 -1.0313,-0.25 1.2037,-0.65046 5.0191,-1.37195 7.7188,-2 2.9667,-0.6902 3.2889,-0.75507 5.3125,-0.875 2.0886,-0.1238 3.2812,-0.0312 3.2812,-0.0312 0,1e-5 -0.087,-0.63205 1,-1.03125 0.7292,-0.2678 5.0472,-1.33797 7.125,-1.8125 2.0085,-0.45869 5.1679,-1.0293 7,-0.625 1.8781,0.41446 13.5782,3.01563 13.5782,3.01563 0,0 -12.3275,-3.02266 -13.1407,-3.26563 -0.1945,-0.0581 -0.5586,-0.10626 -1,-0.125 1.1676,-0.80369 3.5142,-1.6873 6.1094,-2.70312 1.6814,-0.65818 0.9237,-0.37659 2.7759,-1.0036 1.7536,-0.59366 2.4854,-1.01071 2.6304,-1.11299 0.3461,-0.20651 -0.356,-0.12188 -0.5442,-0.0424 z"
+ id="path8093"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6949);enable-background:new"
+ d="m 1082.625,-275.125 c 1.873,0.39348 4.4961,1.14555 6.0313,1.96875 1.5352,0.82319 2.8222,1.056 5.375,2.5 2.5266,1.42926 4.7958,2.00696 6.9687,2.53125 2.3476,0.56642 5.4354,0.71523 8.8438,1.1875 -1.0889,-0.83975 -6.6074,-1.17245 -8.4063,-1.5625 -1.7989,-0.39006 -3.8941,-1.01616 -6.5937,-2.3125 -2.6997,-1.29634 -3.4944,-1.79896 -5.8125,-2.6875 -2.3182,-0.88854 -4.0044,-1.38314 -6.4063,-1.625 z"
+ id="path8095"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6961);enable-background:new"
+ d="m 1051.4688,-270 c 1.9053,0.57759 4.5281,1.61572 6.0937,2.59375 1.5656,0.97802 2.8802,1.35981 5.5,3.125 2.593,1.74716 4.9859,2.70927 7.25,3.59375 2.4461,0.95557 5.6826,1.65713 9.4063,3.0625 -1.1896,-1.13784 -7.0631,-2.68675 -8.9375,-3.375 -1.8745,-0.68825 -4.0818,-1.5662 -6.875,-3.28125 -2.7933,-1.71504 -3.5736,-2.2839 -5.9375,-3.40625 -2.3641,-1.12234 -4.0567,-1.83455 -6.5,-2.3125 z"
+ id="path8097"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6957);enable-background:new"
+ d="m 1020.2188,-266.84375 c 1.9119,0.63811 4.5812,1.75536 6.1562,2.8125 1.5751,1.05715 2.8956,1.50867 5.5313,3.40625 2.6086,1.87821 5.0284,3.03003 7.3125,4.0625 2.4677,1.11545 5.7645,2.1733 9.5312,3.84375 -1.2033,-1.22253 -7.2028,-3.31423 -9.0937,-4.125 -1.891,-0.81077 -4.0649,-1.89379 -6.875,-3.75 -2.8102,-1.8562 -3.6218,-2.47693 -6,-3.71875 -2.3783,-1.2418 -4.1107,-1.97569 -6.5625,-2.53125 z"
+ id="path8099"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="cssscscsscsssccscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssccscsscscssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsszsszssszzcczzzczzzc"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6997);enable-background:new"
+ d="m 1110.1719,-266.89063 c 0.1508,0.0486 0.688,0.631 0.1094,1.48438 -0.8101,1.19459 -5.7049,3.32429 -8.5625,4.125 -2.8449,0.79712 -6.2901,0.97774 -10.5625,-0.375 -4.3016,-1.36195 -5.4697,-2.46872 -10.6563,-4.3125 4.664,2.11517 6.1953,3.95233 10.125,5.34375 1.6207,0.57387 3.3671,0.9396 5.0625,1.03125 -0.4451,0.32563 -1.5303,0.9833 -3.5625,1.59375 -2.7955,0.83969 -6.6491,1.53378 -8.25,1.625 -1.5146,0.0863 -3.142,-0.51249 -3.4375,-0.625 0.1667,0.10308 0.3732,0.37734 -0.25,1.03125 -0.8993,0.94363 -6.1474,1.923 -9.125,2.25 -2.9643,0.32555 -6.5216,-0.016 -10.9062,-1.90625 -3.978,-1.71497 -5.339,-2.91536 -9.4063,-4.75 0,0 0,0.15625 0,0.15625 3.6431,2.09529 5.284,3.88327 8.875,5.5625 1.7302,0.80909 3.5917,1.40876 5.4063,1.71875 -0.5349,0.28676 -1.5578,0.71151 -3.4375,1.03125 -2.869,0.48796 -6.809,0.81614 -8.4375,0.75 -0.8507,-0.0345 -1.7286,-0.18437 -2.4063,-0.40625 -0.6848,-0.21488 -1.1897,-0.44467 -1.3125,-0.5 0.1694,0.10721 0.4311,0.40288 -0.2187,1.03125 -0.9097,0.87962 -6.2461,1.33638 -9.25,1.46875 -2.9905,0.13179 -6.5889,-0.45063 -11,-2.5625 -4.4412,-2.12626 -5.6415,-3.4016 -10.9063,-5.78125 4.7343,2.59704 6.2865,4.6291 10.3438,6.71875 1.6733,0.86185 3.4852,1.49425 5.25,1.9375 -0.4633,0.23332 -1.5894,0.68814 -3.6875,0.9375 -2.8863,0.34298 -6.8346,0.49288 -8.4688,0.375 -1.5462,-0.1115 -3.2312,-0.85696 -3.5312,-1 0.1691,0.12029 0.4138,0.41048 -0.2188,1 -0.9128,0.85073 -6.2441,1.26212 -9.25,1.375 -2.9925,0.11237 -6.5897,-0.49043 -11,-2.59375 -4.00125,-1.90823 -5.38803,-3.13783 -9.46875,-5.09375 -3e-5,0 0,0.15625 0,0.15625 3.65506,2.20392 5.29421,4.05255 8.90625,5.90625 1.74029,0.89315 3.637,1.52827 5.4688,1.96875 -0.54,0.2483 -1.5781,0.61533 -3.4688,0.84375 -2.88568,0.34858 -6.86605,0.52095 -8.5,0.40625 -0.85345,-0.0599 -1.72631,-0.25791 -2.40625,-0.5 -0.6871,-0.2353 -1.18935,-0.47226 -1.3125,-0.53125 0.16998,0.11227 0.46448,0.42225 -0.1875,1.03125 -0.91265,0.8525 -6.27533,1.29337 -9.28125,1.40625 -2.99246,0.11237 -6.59346,-0.52805 -11,-2.59375 -4.43653,-2.07978 -5.64688,-3.33171 -10.90625,-5.65625 4.72938,2.54749 6.29074,4.5778 10.34375,6.625 1.67155,0.84433 3.48554,1.46643 5.25,1.90625 -0.46323,0.23422 -1.5897,0.68407 -3.6875,0.9375 -2.88569,0.34858 -6.8362,0.56952 -8.46875,0.46875 -1.54456,-0.0953 -3.20031,-0.82885 -3.5,-0.96875 0.16899,0.11853 0.38192,0.40385 -0.25,1 -0.91186,0.86028 -6.24665,1.33025 -9.25,1.46875 -2.98995,0.1379 -6.56745,-0.45068 -10.96875,-2.46875 -3.99308,-1.83089 -5.36511,-3.0292 -9.4375,-4.90625 -2e-5,0 0,0.15625 0,0.15625 3.64761,2.13327 5.27033,3.93487 8.875,5.71875 1.73675,0.85951 3.60727,1.45014 5.4375,1.875 -0.53947,0.2529 -1.55063,0.64129 -3.4375,0.90625 -2.87978,0.40436 -6.83813,0.64562 -8.46875,0.5625 -0.85172,-0.0434 -1.7277,-0.20855 -2.40625,-0.4375 -0.68569,-0.22201 -1.1896,-0.44339 -1.3125,-0.5 0.16959,0.10899 0.4319,0.40965 -0.21875,1.03125 -0.91079,0.87014 -6.25021,1.39152 -9.25,1.5625 -2.98633,0.17021 -6.57381,-0.31577 -10.96875,-2.28125 -4.42489,-1.97888 -5.60596,-3.22819 -10.84375,-5.375 4.70997,2.38767 6.27017,4.38873 10.3125,6.34375 1.66715,0.80631 3.46043,1.39658 5.21875,1.78125 -0.46163,0.2487 -1.597,0.71225 -3.6875,1.03125 -2.8756,0.43876 -6.7804,0.7331 -8.40625,0.6875 -1.53823,-0.0431 -3.2328,-0.74522 -3.53125,-0.875 0.16833,0.11282 0.41057,0.41375 -0.21875,1.03125 -0.90812,0.8911 -6.20295,1.52825 -9.1875,1.8125 -2.97118,0.28298 -6.57342,-0.1758 -10.9375,-1.9375 -3.95934,-1.59831 -5.32915,-2.79487 -9.34375,-4.3125 3e-5,0 0,0.15625 0,0.15625 3.5959,1.81135 5.23831,3.58233 8.8125,5.15625 1.72207,0.75835 3.58748,1.28895 5.40625,1.625 -0.53609,0.27908 -1.56658,0.68763 -3.4375,1.0625 -2.85539,0.5721 -6.78942,1.01939 -8.40625,1.03125 -0.84451,0.006 -1.70608,-0.0809 -2.375,-0.25 -0.67591,-0.16151 -1.16009,-0.32923 -1.28125,-0.375 0.16722,0.094 0.42267,0.38348 -0.21875,1.0625 -0.89787,0.95052 -6.18648,1.91708 -9.125,2.4375 -2.92534,0.51809 -6.43215,0.37424 -10.71875,-1.03125 -4.3158,-1.41507 -5.47277,-2.52994 -10.5625,-3.96875 4.57685,1.75101 6.08855,3.56006 10.03125,5 1.62608,0.59389 3.36885,0.95565 5.09375,1.15625 -0.45285,0.29702 -1.55478,0.88339 -3.59375,1.46875 -2.80472,0.80517 -6.63886,1.57583 -8.21875,1.75 -1.49475,0.1648 -3.11623,-0.31681 -3.40625,-0.40625 0.16356,0.0901 0.39278,0.35993 -0.21875,1.0625 -0.88247,1.01385 -6.04452,2.37165 -8.9375,3.0625 -2.88002,0.68778 -6.3356,0.76002 -10.5625,-0.4375 -3.83485,-1.08645 -5.17258,-2.07237 -9.0625,-3.125 -10e-6,0 0,0.15625 0,0.15625 3.48418,1.39485 5.06941,2.9194 8.53125,4.03125 1.66793,0.53572 3.45578,0.78674 5.21875,0.875 -0.51964,0.35212 -1.50039,0.91452 -3.3125,1.53125 -2.76566,0.94125 -6.59024,1.93537 -8.15625,2.15625 -0.81794,0.11539 -1.6331,0.12283 -2.28125,0.0312 -0.65496,-0.0832 -1.1326,-0.21827 -1.25,-0.25 0.16204,0.0746 0.43399,0.34044 -0.1875,1.09375 -0.87,1.05453 -6.00963,2.65925 -8.875,3.4375 -2.85253,0.77476 -6.25912,0.9582 -10.4375,-0.0937 -4.20683,-1.05913 -5.35669,-2.04166 -10.34375,-3.15625 4.48454,1.45946 5.96935,3.13523 9.8125,4.25 1.58504,0.45977 3.28679,0.63825 4.96875,0.6875 -0.44157,0.33676 -1.51251,1.02773 -3.5,1.78125 -2.73393,1.03649 -6.45198,2.16269 -8,2.4375 -1.46462,0.26002 -3.05958,-0.11654 -3.34375,-0.1875 0.16025,0.0796 0.38044,0.32098 -0.21875,1.0625 -0.86466,1.07006 -5.91652,2.81815 -8.75,3.6875 -2.8208,0.86547 -6.2075,1.15631 -10.34375,0.21875 -3.75259,-0.85061 -5.04785,-1.71647 -8.875,-2.59375 0,0 0,0.15625 0,0.15625 3.42796,1.23779 4.98741,2.6323 8.375,3.53125 1.63216,0.43314 3.36704,0.58301 5.09375,0.5625 -0.50893,0.38417 -1.47675,1.02182 -3.25,1.75 -2.70634,1.11134 -6.43633,2.30781 -7.96875,2.625 -0.8004,0.16569 -1.61231,0.21862 -2.25,0.15625 0,0 0,0.51552 0,0.92229 0,0.26507 0,0.48396 0,0.48396 0.22645,-0.14468 0.44891,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.17161,-0.21577 6,-1.6875 3.82843,-1.47174 5.22412,-2.00498 5.90625,-2.40625 0.6796,-0.39978 1.61165,-0.87937 2.21875,-1.53125 1.82685,-0.13775 3.57075,-0.49323 4.9375,-1 2.96812,-1.10052 4.87537,-1.80619 6.78125,-2.46875 1.90586,-0.66254 2.35409,-1.41487 3.40625,-1.78125 1.09155,-0.38011 2.19511,-0.16538 6.0625,-1.53125 3.86745,-1.36586 5.28316,-1.82708 5.96875,-2.21875 0.70109,-0.40052 1.70081,-0.93298 2.3125,-1.59375 1.9708,-0.0547 3.81685,-0.38463 5.28125,-0.875 3.00148,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.5386,-0.5041 2.17402,-1.04677 2.90625,-1.4375 0.23016,-0.13431 0.47574,-0.25373 0.75,-0.34375 1.09823,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91233,-1.23113 5.36605,-1.67295 6.0625,-2.03125 0.69388,-0.35697 1.63015,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63581,-0.26683 5.03125,-0.6875 3.03043,-0.91354 4.99238,-1.4301 6.9375,-1.96875 1.94511,-0.53864 2.42618,-1.26452 3.5,-1.5625 1.11401,-0.30915 2.21994,0.007 6.1875,-1.03125 3.96761,-1.03863 5.41758,-1.43273 6.125,-1.75 0.73487,-0.32959 1.81383,-0.75372 2.4375,-1.375 1.99774,0.116 3.85743,-0.0201 5.34375,-0.375 3.07811,-0.735 5.08344,-1.10094 7.0625,-1.5 1.58792,-0.32018 2.24429,-0.79055 3,-1.09375 0.23757,-0.1068 0.46695,-0.19276 0.75,-0.25 1.13347,-0.22919 2.30448,0.20893 6.34375,-0.5 4.03933,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71586,-0.25944 1.70428,-0.56724 2.34375,-1.09375 1.92427,0.23949 3.74788,0.22453 5.1875,0 3.12633,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48869,-0.94514 3.59375,-1.09375 1.14639,-0.15418 2.27592,0.30157 6.34375,-0.21875 4.06784,-0.52032 5.56013,-0.69573 6.28125,-0.9375 0.7371,-0.24714 1.79809,-0.58623 2.4375,-1.125 2.05007,0.33553 3.97378,0.39796 5.5,0.21875 3.14231,-0.36896 5.17994,-0.55936 7.1875,-0.78125 1.61076,-0.17802 2.26467,-0.6082 3.03125,-0.84375 0.24094,-0.0855 0.49412,-0.1556 0.78125,-0.1875 1.14978,-0.12772 2.30129,0.34665 6.375,-0.125 4.07374,-0.47165 5.55909,-0.6106 6.28125,-0.84375 0.71946,-0.23227 1.70024,-0.47346 2.34375,-0.96875 1.93637,0.33346 3.77006,0.40424 5.21875,0.25 3.14602,-0.33495 5.17756,-0.51859 7.1875,-0.71875 2.00996,-0.20014 2.48414,-0.82639 3.59375,-0.9375 1.15114,-0.11528 2.29643,0.36506 6.375,-0.0625 4.07861,-0.42756 5.58886,-0.56209 6.3125,-0.78125 0.73915,-0.22386 1.79572,-0.51325 2.4375,-1.03125 2.0571,0.39867 4.00187,0.4934 5.53125,0.34375 3.14873,-0.3081 5.17584,-0.47325 7.1875,-0.65625 1.61407,-0.14682 2.2631,-0.56055 3.03125,-0.78125 0.24142,-0.0809 0.49353,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.29296,0.39275 6.375,0 4.08208,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6997,-0.4477 2.3438,-0.9375 1.938,0.34999 3.7688,0.45438 5.2187,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1466,-0.32852 5.1771,-0.5227 7.1875,-0.71875 1.613,-0.15729 2.2656,-0.63148 3.0312,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7167,-0.25316 1.6745,-0.55807 2.3125,-1.09375 1.9197,0.21194 3.7199,0.15141 5.1562,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0938,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5625,-1.28125 1.1288,-0.25066 2.2703,0.11629 6.25,-0.875 3.9796,-0.99128 5.4296,-1.4193 6.125,-1.78125 0.7223,-0.37601 1.7619,-0.87058 2.375,-1.53125 1.963,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3348,-1.68641 2.5469,-1.98438 0.2122,-0.29796 0.1118,-0.7453 0.1379,-0.76675 0.043,-0.0352 0.3193,-0.085 0.479,-0.42844 0.8589,-1.84708 2.321,-5.64459 2.4352,-6.32945 0.1137,-0.68216 0.1638,-1.34774 0.2145,-1.74497 0.029,-0.22952 -0.1467,-0.86544 -0.1246,-0.92404 0.031,-0.0821 0.3045,-0.26528 0.3599,-0.51471 0.2663,-1.19833 0.089,-2.19129 -0.1251,-3.60893 -0.214,-1.41764 -0.9837,-4.62214 -1.6369,-5.47626 -0.6589,-0.86172 -1.2229,-1.01117 -1.7479,-1.00066 -0.2086,0.26976 0.1368,0.26309 0.1626,0.31261 0.6806,0.0508 0.934,0.36864 1.4192,0.89662 0.4852,0.52798 1.4428,3.93956 1.5794,5.38995 0.1366,1.45039 0.19,2.8602 -0.088,3.46864 -0.2781,0.60845 -0.9442,0.42864 -1.2366,0.49452 0.531,0.18589 0.8908,0.21322 0.9524,1.05768 0.059,0.81338 -0.1332,1.63969 -0.5198,2.80562 -0.3912,1.18001 -1.8452,4.34998 -2.2857,4.59877 -0.4523,0.25551 -0.9524,0.18199 -1.288,0.0511 z"
+ id="path8101"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6953);enable-background:new"
+ d="m 988.75,-263.84375 c 1.91161,0.6344 4.55027,1.75841 6.125,2.8125 1.57477,1.05409 2.8961,1.48252 5.5313,3.375 2.6082,1.87314 5.0269,3.01522 7.3125,4.0625 2.4693,1.13147 5.7521,2.15474 9.5312,3.9375 -1.2072,-1.2584 -7.139,-3.36445 -9.0312,-4.1875 -1.8922,-0.82304 -4.128,-1.93049 -6.9375,-3.78125 -2.80961,-1.85075 -3.62224,-2.48154 -6.00005,-3.71875 -2.37782,-1.23719 -4.07988,-1.9492 -6.53125,-2.5 z"
+ id="path8103"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6993);enable-background:new"
+ d="m 957.5,-260.78125 c 1.91,0.6181 4.58288,1.70934 6.15625,2.75 1.57339,1.04066 2.89608,1.48252 5.53125,3.375 2.60823,1.87315 5.02692,3.01521 7.3125,4.0625 2.46931,1.13147 5.75213,2.15475 9.53125,3.9375 -1.20728,-1.2584 -7.20154,-3.3957 -9.09375,-4.21875 -1.89217,-0.82304 -4.09666,-1.9305 -6.90625,-3.78125 -2.80958,-1.85075 -3.59295,-2.43932 -5.96875,-3.65625 -2.37578,-1.21691 -4.11321,-1.93885 -6.5625,-2.46875 z"
+ id="path8105"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6989);enable-background:new"
+ d="m 926.09375,-257.375 c 1.90772,0.59745 4.55348,1.66384 6.125,2.6875 1.5715,1.02365 2.87022,1.43971 5.5,3.28125 2.60291,1.82273 5.02887,2.9722 7.3125,4 2.4672,1.11041 5.75535,2.09323 9.53125,3.84375 -1.20623,-1.2481 -7.1719,-3.31809 -9.0625,-4.125 -1.89058,-0.8069 -4.10242,-1.89104 -6.90625,-3.6875 -2.80385,-1.79644 -3.62704,-2.40251 -6,-3.59375 -2.37297,-1.19124 -4.05362,-1.90283 -6.5,-2.40625 z"
+ id="path8107"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6985);enable-background:new"
+ d="m 894.90625,-253.5625 c 1.90213,0.55355 4.58701,1.58887 6.15625,2.59375 1.56923,1.00487 2.87401,1.40864 5.5,3.21875 2.59912,1.79164 5.00034,2.87189 7.28125,3.875 2.46428,1.08374 5.75984,2.04029 9.53125,3.75 -1.2048,-1.23507 -7.17416,-3.24478 -9.0625,-4.03125 -1.88832,-0.78647 -4.0752,-1.8308 -6.875,-3.59375 -2.79977,-1.76294 -3.59919,-2.36836 -5.96875,-3.53125 -2.36957,-1.16288 -4.12325,-1.83412 -6.5625,-2.28125 z"
+ id="path8109"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6965);enable-background:new"
+ d="m 863.71875,-248.65625 c 1.88062,0.42909 4.50427,1.38038 6.0625,2.3125 1.55823,0.93211 2.85233,1.25776 5.46875,3 2.58971,1.72444 4.98067,2.70802 7.25,3.625 2.45176,0.99069 5.73959,1.87707 9.5,3.5 -1.20131,-1.20734 -7.15249,-3.06609 -9.03125,-3.78125 -1.87875,-0.71517 -4.0854,-1.68442 -6.875,-3.375 -2.78963,-1.69057 -3.58461,-2.22822 -5.9375,-3.28125 -2.35292,-1.05301 -4.02584,-1.71248 -6.4375,-2 z"
+ id="path8111"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6981);enable-background:new"
+ d="m 833.15625,-241.375 c 1.84836,0.29644 4.46945,0.97632 6,1.78125 1.53058,0.80493 2.81374,1.05573 5.375,2.53125 2.53504,1.46046 4.89068,2.32509 7.125,3.0625 2.41399,0.79668 5.65711,1.46689 9.375,2.84375 -1.18771,-1.12873 -7.08772,-2.58975 -8.9375,-3.15625 -1.84977,-0.5665 -4.00342,-1.37392 -6.75,-2.84375 -2.74657,-1.46983 -3.50136,-1.92028 -5.8125,-2.78125 -2.31115,-0.86095 -4.00471,-1.32009 -6.375,-1.4375 z"
+ id="path8113"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6977);enable-background:new"
+ d="m 802.90625,-232.3125 c 1.8222,0.21127 4.36576,0.80057 5.875,1.53125 1.50925,0.73066 2.75568,0.92998 5.28125,2.28125 2.49976,1.33746 4.83154,2.04843 7.03125,2.65625 2.37653,0.65667 5.56464,1.07288 9.21875,2.1875 -1.16735,-1.04496 -6.92888,-2.10329 -8.75,-2.5625 -1.82111,-0.45921 -3.95225,-1.12696 -6.65625,-2.4375 -2.70403,-1.31052 -3.47106,-1.7199 -5.75,-2.46875 -2.27895,-0.74883 -3.91325,-1.17931 -6.25,-1.1875 z"
+ id="path8115"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6973);enable-background:new"
+ d="m 773.1875,-222.1875 c 1.81109,0.1787 4.32059,0.66506 5.8125,1.34375 1.49194,0.67869 2.7534,0.79822 5.25,2.0625 2.47107,1.25138 4.79005,1.89614 6.96875,2.4375 2.35387,0.58488 5.49134,0.89752 9.09375,1.84375 -1.15084,-0.99116 -6.85251,-1.7833 -8.65625,-2.1875 -1.80372,-0.4042 -3.91553,-1.02116 -6.59375,-2.25 -2.67818,-1.22884 -3.40345,-1.61089 -5.65625,-2.28125 -2.25279,-0.67034 -3.89627,-1.00232 -6.21875,-0.96875 z"
+ id="path8117"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6969);enable-background:new"
+ d="m 743.5625,-211.1875 c 1.79281,0.12911 4.27313,0.54965 5.75,1.1875 1.4769,0.63785 2.7161,0.74156 5.1875,1.9375 2.44618,1.18372 4.72054,1.74666 6.875,2.21875 2.32767,0.51003 5.4196,0.68064 9,1.5625 -1.14379,-0.9706 -6.74759,-1.59065 -8.53125,-1.9375 -1.78367,-0.34684 -3.88285,-0.88756 -6.53125,-2.03125 -2.64841,-1.14368 -3.39495,-1.51631 -5.625,-2.125 -2.23008,-0.60868 -3.82594,-0.90966 -6.125,-0.8125 z"
+ id="path8119"
+ inkscape:connector-curvature="0" />
+ <g
+ style="fill:#ffffff;fill-opacity:1;filter:url(#filter7345)"
+ id="g8121">
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 744.9375,-212.11731 c 0,0 7.22229,-3.22318 9.0625,-3.5 1.84021,-0.27682 3.35225,-0.003 6,0.5625 2.64775,0.56573 8.7357,2.21518 11.1875,3.375 2.4518,1.15982 5.3125,3.5625 5.3125,3.5625 0,0 -7.14644,-2.78019 -10.1875,-3.5625 -3.04106,-0.78231 -7.64461,-2.08374 -10.375,-2.3125 -2.73039,-0.22876 -11,1.875 -11,1.875 z"
+ id="path8123"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 735.46875,-206.95416 c 0,0 3.65979,-2.22318 5.5,-2.5 1.84021,-0.27682 3.66475,0.24677 6.3125,0.8125 2.64775,0.56573 8.7357,2.21518 11.1875,3.375 2.4518,1.15982 6.5625,2.125 6.5625,2.125 0,0 -8.39644,-1.34269 -11.4375,-2.125 -3.04106,-0.78231 -7.95711,-2.33374 -10.6875,-2.5625 -2.73039,-0.22876 -7.4375,0.875 -7.4375,0.875 z"
+ id="path8125"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 759.85042,-217.61116 c 0,0 8.5437,-3.29857 10.39778,-3.45786 1.85409,-0.1593 3.64166,0.4792 6.2481,1.21208 2.60644,0.73288 8.57724,2.76594 10.95036,4.07925 2.37312,1.31331 6.41417,2.53782 6.41417,2.53782 0,0 -8.29413,-1.87365 -11.27931,-2.84767 -2.98519,-0.97402 -7.79269,-2.83478 -10.50302,-3.23662 -2.71033,-0.40184 -12.22808,1.713 -12.22808,1.713 z"
+ id="path8127"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 775.19813,-223.2266 c 0,0 7.77133,-2.78244 9.62831,-2.90349 1.85697,-0.12104 3.631,0.55417 6.22178,1.34062 2.59077,0.78645 8.5184,2.94217 10.86394,4.30412 2.34555,1.36195 6.36049,2.6695 6.36049,2.6695 0,0 -8.25373,-2.04423 -11.21821,-3.07958 -2.96447,-1.03535 -7.73259,-2.99481 -10.43406,-3.45243 -2.70147,-0.45763 -11.42225,1.12126 -11.42225,1.12126 z"
+ id="path8129"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 789.64298,-227.95417 c 0,0 8.68256,-3.52031 10.54154,-3.60535 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -12.3006,1.78871 -12.3006,1.78871 z"
+ id="path8131"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.852145"
+ inkscape:transform-center-y="-4.3190906"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 804.49513,-233.32948 c 0,0 7.80756,-2.58281 9.66654,-2.66785 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -11.4256,0.85121 -11.4256,0.85121 z"
+ id="path8133"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.852145"
+ inkscape:transform-center-y="-4.3190906"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 819.55763,-237.57948 c 0,0 8.55756,-2.58281 10.41654,-2.66785 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -12.1756,0.85121 -12.1756,0.85121 z"
+ id="path8135"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.852145"
+ inkscape:transform-center-y="-4.3190906"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 836.23395,-242.60125 c 0,0 6.96702,-1.98723 8.82784,-1.96757 1.86081,0.0197 3.57873,0.82702 6.10265,1.80705 2.52393,0.98 8.27166,3.57758 10.50756,5.11291 2.2359,1.53535 6.14053,3.14261 6.14053,3.14261 0,0 -8.07561,-2.66222 -10.95336,-3.91866 -2.87774,-1.25645 -7.48412,-3.5707 -10.14328,-4.23121 -2.65915,-0.66049 -10.48194,0.0549 -10.48194,0.0549 z"
+ id="path8137"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.64141"
+ inkscape:transform-center-y="-4.9269042"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 850.73028,-246.00461 c 0,0 7.68784,-2.02768 9.54782,-1.96854 1.85997,0.0592 3.56038,0.90279 6.06293,1.93616 2.50255,1.03334 8.19387,3.75232 10.39668,5.33475 2.20282,1.58245 6.07245,3.2722 6.07245,3.2722 0,0 -8.01729,-2.83298 -10.86772,-4.15022 -2.85043,-1.31723 -7.40666,-3.72872 -10.0512,-4.4455 -2.64454,-0.71678 -11.16096,0.0211 -11.16096,0.0211 z"
+ id="path8139"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.55068"
+ inkscape:transform-center-y="-5.1542119"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 864.82496,-249.21081 c 0,0 8.16952,-1.96906 10.02688,-1.85396 1.85735,0.11512 3.53158,1.00956 6.0019,2.11779 2.47031,1.10821 8.0772,3.99727 10.23138,5.64531 2.15418,1.64804 5.9712,3.45352 5.9712,3.45352 0,0 -7.92839,-3.07306 -10.73787,-4.4755 -2.80949,-1.40244 -7.29106,-3.94999 -9.91283,-4.74606 -2.62176,-0.79606 -11.58066,-0.1411 -11.58066,-0.1411 z"
+ id="path8141"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.41151"
+ inkscape:transform-center-y="-5.4740887"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 881.38485,-251.60282 c 0,0 8.08536,-1.90809 9.93837,-1.73664 1.853,0.17147 3.4993,1.11633 5.93482,2.29908 2.43553,1.18271 7.95209,4.2407 10.05523,5.95339 2.10314,1.7127 5.86357,3.63326 5.86357,3.63326 0,0 -7.8314,-3.3124 -10.597,-4.7995 -2.76561,-1.48712 -7.16775,-4.16959 -9.76414,-5.04491 -2.59637,-0.87531 -11.43085,-0.30468 -11.43085,-0.30468 z"
+ id="path8143"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.258805"
+ inkscape:transform-center-y="-5.79376"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 896.58415,-254.34724 c 0,0 7.64166,-1.4277 9.49547,-1.26515 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -10.99774,-0.76897 -10.99774,-0.76897 z"
+ id="path8145"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 911.45328,-255.98544 c 0,0 8.64166,-1.5527 10.49547,-1.39015 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.99774,-0.64397 -11.99774,-0.64397 z"
+ id="path8147"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 927.70328,-258.29794 c 0,0 7.64166,-0.8652 9.49547,-0.70265 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -10.99774,-1.33147 -10.99774,-1.33147 z"
+ id="path8149"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 942.82828,-259.48544 c 0,0 8.57916,-1.4902 10.43297,-1.32765 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.93524,-0.70647 -11.93524,-0.70647 z"
+ id="path8151"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 959.07828,-261.54794 c 0,0 7.82916,-0.8027 9.68297,-0.64015 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.18524,-1.39397 -11.18524,-1.39397 z"
+ id="path8153"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 974.45328,-262.79794 c 0,0 8.39166,-1.1777 10.24547,-1.01515 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08376,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.84721,-3.27474 -10.61993,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.74774,-1.01897 -11.74774,-1.01897 z"
+ id="path8155"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 990.64078,-264.86044 c 0,0 6.89166,-0.9902 8.74547,-0.82765 1.85385,0.16256 3.50465,1.0995 5.94575,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.60053,-0.86282 -10.24772,-1.20647 -10.24772,-1.20647 z"
+ id="path8157"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1007.7658,-265.79794 c 0,0 6.8291,-1.1777 8.683,-1.01515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -10.1852,-1.01897 -10.1852,-1.01897 z"
+ id="path8159"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1023.8908,-267.79794 c 0,0 6.0791,-0.4277 7.933,-0.26515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -9.4352,-1.76897 -9.4352,-1.76897 z"
+ id="path8161"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1039.7033,-269.17294 c 0,0 6.4541,-0.6777 8.308,-0.51515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -9.8102,-1.51897 -9.8102,-1.51897 z"
+ id="path8163"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.28378"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1055.2718,-271.03319 c 0,0 5.4976,-0.90945 7.3578,-0.85348 1.8601,0.056 3.5619,0.89674 6.0661,1.92586 2.5044,1.0291 8.2003,3.7384 10.4058,5.31709 2.2055,1.57871 6.078,3.2619 6.078,3.2619 0,0 -8.022,-2.81939 -10.8748,-4.13178 -2.8526,-1.31238 -7.4129,-3.71613 -10.0587,-4.42843 -2.6457,-0.71228 -8.9742,-1.09116 -8.9742,-1.09116 z"
+ id="path8165"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.55813"
+ inkscape:transform-center-y="-5.1360724"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1072.7007,-273.48537 c 0,0 4.5472,-1.15581 6.408,-1.18621 1.8607,-0.0304 3.5996,0.73049 6.1489,1.64231 2.5494,0.91177 8.3649,3.35386 10.6414,4.8285 2.2763,1.47468 6.2227,2.97636 6.2227,2.97636 0,0 -8.1442,-2.44411 -11.0547,-3.62272 -2.9105,-1.1786 -7.5774,-3.36815 -10.2534,-3.95691 -2.6759,-0.58875 -8.1129,-0.68133 -8.1129,-0.68133 z"
+ id="path8167"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.74758"
+ inkscape:transform-center-y="-4.6370147"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1087.1585,-276.5244 c 0,0 5.96,-1.77355 7.8202,-1.83024 1.86,-0.0567 3.6096,0.67955 6.1715,1.55525 2.562,0.87566 2.5226,0.85713 5.3335,1.49015 2.7969,0.62986 7.0767,1.51313 7.0767,1.51313 0,0 -3.6155,-0.0163 -6.7923,-0.46614 -3.1155,-0.44119 -7.3743,-1.69825 -10.0584,-2.24913 -2.6839,-0.55088 -9.5512,-0.013 -9.5512,-0.013 z"
+ id="path8169"
+ sodipodi:nodetypes="czzzczzc"
+ inkscape:transform-center-x="13.79933"
+ inkscape:transform-center-y="-4.4842392"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 1099.25,-279.92981 c 0.1612,0.26862 11.2081,-4.60046 12.1875,-4.6875 0.9794,-0.087 2,3.125 2,3.125 0,0 -0.7751,-1.50434 -2.875,-1.0625 -2.0999,0.44184 -11.3009,2.67141 -11.3125,2.625 z"
+ id="path8171"
+ sodipodi:nodetypes="czczc"
+ inkscape:connector-curvature="0" />
+ </g>
+ <path
+ sodipodi:nodetypes="czscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssccsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscc"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7333);enable-background:new"
+ d="m 1107.4532,-284.0938 c -0.4187,0.21283 -0.1556,0.0939 -0.6472,0.30581 -0.4861,0.20954 -1.7234,0.57439 -4.0796,1.45895 -3.3311,1.25057 -5.8302,2.15344 -7.0259,3.0661 -1.5361,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74766 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41972 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25166 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74214 -8.8948,1.93107 -10.1562,2.6875 -1.5839,-0.18079 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44695 -4.9162,0.67276 -6.8437,0.90625 -0.6554,0.0794 -1.0411,0.20078 -1.3438,0.28125 -0.4262,0.13165 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15936 -1.7622,-0.15683 -5.5312,0.28125 -3.5539,0.41308 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.2973 -3.8578,-0.53419 -5.8438,-0.34375 -3.0588,0.29331 -4.972,0.48399 -6.9062,0.65625 -1.9343,0.17226 -1.6887,0.42237 -2.9063,0.53125 -1.3162,0.11769 -1.7598,-0.16363 -5.5312,0.25 -3.5419,0.38844 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.2947 -3.88718,-0.50701 -5.87501,-0.3125 -3.05824,0.29924 -4.94113,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04004,0.17856 -1.34375,0.25 -0.42765,0.11895 -0.68351,0.21807 -1.375,0.28125 -1.31596,0.12025 -1.75976,-0.19488 -5.53125,0.21875 -3.55614,0.39001 -9.00554,1.23916 -10.25,1.90625 -1.59863,-0.29419 -3.85984,-0.52372 -5.84375,-0.3125 -3.0556,0.32532 -4.97404,0.52624 -6.90625,0.71875 -1.93221,0.1925 -1.68987,0.44088 -2.90625,0.5625 -1.31488,0.13146 -1.76298,-0.16454 -5.53125,0.28125 -3.53887,0.41865 -8.97768,1.29217 -10.25,1.96875 -1.59755,-0.28105 -3.85996,-0.42043 -5.84375,-0.1875 -3.05198,0.35836 -4.94508,0.56786 -6.875,0.78125 -0.6562,0.0726 -1.04066,0.17269 -1.34375,0.25 -0.42677,0.12722 -0.68491,0.2672 -1.375,0.34375 -1.31333,0.14568 -1.76746,-0.17402 -5.53125,0.3125 -3.54889,0.45875 -8.97863,1.41902 -10.21875,2.125 -1.59305,-0.24424 -3.83381,-0.38135 -5.8125,-0.125 -3.04759,0.39481 -4.95071,0.64845 -6.875,0.90625 -1.92428,0.25779 -1.72611,0.49353 -2.9375,0.65625 -1.30946,0.1759 -1.74719,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.92315,1.69917 -10.1875,2.4375 -1.5875,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02617,0.53612 -4.89889,0.86169 -6.8125,1.1875 -0.65061,0.11077 -1.01371,0.27094 -1.3125,0.375 -0.42067,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.29465,0.26159 -1.72712,-0.006 -5.4375,0.8125 -3.49853,0.77195 -8.84595,2.38293 -10.0625,3.21875 -1.56278,-0.0775 -3.75758,0.0853 -5.6875,0.59375 -2.97244,0.78313 -4.81761,1.23209 -6.6875,1.75 -1.86988,0.5179 -1.6666,0.76728 -2.84375,1.09375 -1.27246,0.3529 -1.69703,0.10709 -5.34375,1.1875 -3.4247,1.01463 -8.64944,2.93317 -9.875,3.84375 -1.53883,0.0127 -3.71983,0.27222 -5.625,0.875 -2.93106,0.92734 -4.75031,1.45842 -6.59375,2.0625 -0.62676,0.20538 -0.99173,0.39258 -1.28125,0.53125 -0.40763,0.21361 -0.65334,0.40875 -1.3125,0.625 -1.25446,0.41154 -1.68611,0.18904 -5.28125,1.4375 -3.38985,1.17717 -8.59498,3.2137 -9.78125,4.15625 -1.52389,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69908,1.67548 -6.53125,2.3125 -1.83217,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24678,0.43396 -1.66361,0.19972 -5.21875,1.5625 -3.33867,1.2798 -8.48715,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.63569,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6238,1.78156 -6.4375,2.46875 -0.61666,0.23363 -0.99641,0.44203 -1.28125,0.59375 0,0 0,1.09375 0,1.09375 0.11178,-0.22236 0.38599,-0.81743 0.90625,-1.09375 0.69797,-0.37072 4.81363,-1.99337 6.8125,-2.71875 1.65686,-0.60125 4.15389,-1.32868 5.96875,-1.3125 0.30162,0.003 0.58762,0.0509 0.84375,0.0937 1.84249,0.30825 7.46875,1.5625 7.46875,1.5625 -10e-6,0 -6.23349,-1.64675 -7.03125,-1.84375 -0.19079,-0.0471 -0.53572,-0.0687 -0.96875,-0.0625 1.14546,-0.86971 4.761,-2.39351 7.34375,-3.4375 2.83822,-1.14727 3.11681,-1.25182 5.0625,-1.65625 2.0083,-0.41744 3.15625,-0.5 3.15625,-0.5 0,1e-5 -0.0824,-0.60114 0.96875,-1.125 0.7051,-0.35141 4.88702,-1.8924 6.90625,-2.5625 1.9519,-0.64773 5.0574,-1.3585 6.875,-1 1.86323,0.3675 7.53125,1.8125 7.53125,1.8125 1e-5,0 -6.287,-1.87111 -7.09375,-2.09375 -0.19292,-0.0533 -0.53084,-0.086 -0.96875,-0.0937 1.15834,-0.83288 4.79444,-2.19532 7.40625,-3.15625 2.87016,-1.05601 3.16734,-1.1618 5.125,-1.53125 1.85349,-0.34979 2.85884,-0.42548 3.03125,-0.4375 0.1136,-0.21724 0.37745,-0.81002 0.90625,-1.0625 0.70944,-0.33874 4.92607,-1.71275 6.96875,-2.3125 1.69317,-0.49711 4.24077,-1.03677 6.09375,-0.90625 0.30795,0.0217 0.61349,0.0973 0.875,0.15625 1.88118,0.42432 7.59375,2.03125 7.59375,2.03125 1e-5,0 -6.34174,-2.06525 -7.15625,-2.3125 -0.19479,-0.0591 -0.55788,-0.10394 -1,-0.125 1.16949,-0.79755 4.86302,-2.05622 7.5,-2.9375 2.89781,-0.96847 3.23301,-1.00332 5.21875,-1.28125 2.04965,-0.28689 3.1875,-0.3125 3.1875,-0.3125 -2e-5,0 -0.0727,-0.60697 1,-1.0625 0.7196,-0.30557 4.99098,-1.50075 7.0625,-2 2.00244,-0.48258 5.19849,-0.92829 7.0625,-0.40625 1.91078,0.53515 7.71875,2.5 7.71875,2.5 0,0 -6.42266,-2.42351 -7.25,-2.71875 -0.19784,-0.0706 -0.58216,-0.14039 -1.03125,-0.1875 1.1879,-0.72865 4.91527,-1.77408 7.59375,-2.5 2.94342,-0.79775 3.29208,-0.77083 5.3125,-0.90625 1.91289,-0.12823 2.94705,-0.0711 3.125,-0.0625 0.11728,-0.20366 0.39176,-0.77948 0.9375,-0.96875 0.73219,-0.25394 5.07852,-1.04789 7.1875,-1.375 1.74813,-0.27111 4.40088,-0.4847 6.3125,-0.0937 0.31766,0.065 0.60522,0.18551 0.875,0.28125 1.94074,0.68873 7.84375,3.09375 7.84375,3.09375 1e-5,0 -6.53471,-2.95077 -7.375,-3.3125 -0.20097,-0.0865 -0.57513,-0.16679 -1.03125,-0.25 1.2065,-0.63318 5.02956,-1.3956 7.75,-1.90625 2.98953,-0.56119 3.30023,-0.52954 5.34375,-0.53125 2.10926,-0.002 3.3125,0.125 3.3125,0.125 0,1e-5 -0.0727,-0.63119 1.03125,-0.9375 0.74052,-0.20547 5.12612,-0.83387 7.25,-1.0625 2.05302,-0.22099 5.31863,-0.25222 7.21875,0.46875 1.94779,0.73907 7.84375,3.375 7.84375,3.375 2e-5,0 -6.56288,-3.17897 -7.40625,-3.5625 -0.20168,-0.0917 -0.54221,-0.18621 -1,-0.28125 1.21092,-0.60188 4.98442,-1.24884 7.71875,-1.65625 3.0048,-0.44772 3.32551,-0.4517 5.375,-0.40625 1.94045,0.043 3.00699,0.19423 3.1875,0.21875 0.11892,-0.19316 0.3839,-0.76583 0.9375,-0.90625 0.74271,-0.18838 5.15429,-0.73428 7.28125,-0.9375 1.76303,-0.16842 4.42009,-0.23429 6.34375,0.25 0.31968,0.0805 0.60351,0.20359 0.875,0.3125 1.95293,0.78349 7.90625,3.46875 7.90625,3.46875 -2e-5,0 -6.59191,-3.25348 -7.4375,-3.65625 -0.20222,-0.0963 -0.57226,-0.20703 -1.03125,-0.3125 1.21414,-0.57427 5.04366,-1.12219 7.78125,-1.5 3.00838,-0.4152 3.32307,-0.44263 5.375,-0.375 2.11798,0.0698 3.3125,0.25 3.3125,0.25 -2e-5,0 -0.0772,-0.63741 1.03125,-0.90625 0.74362,-0.18035 5.15176,-0.66355 7.28125,-0.84375 2.05847,-0.17417 5.34324,-0.12432 7.25,0.65625 1.95459,0.80016 7.875,3.53125 7.875,3.53125 -2e-5,0 -6.55993,-3.30876 -7.40625,-3.71875 -0.20237,-0.0981 -0.57186,-0.2031 -1.03125,-0.3125 1.21517,-0.5639 5.01008,-1.1143 7.75,-1.46875 3.01091,-0.38952 3.32131,-0.39765 5.375,-0.3125 1.94439,0.0806 3.00663,0.25324 3.1875,0.28125 0.11916,-0.19086 0.38277,-0.74531 0.9375,-0.875 0.74426,-0.174 5.14993,-0.65047 7.28125,-0.8125 1.76662,-0.13427 4.44971,-0.12571 6.37501,0.375 0.32,0.0832 0.6033,0.20127 0.875,0.3125 1.9546,0.80016 7.9063,3.5625 7.9063,3.5625 -1e-4,0 -6.5912,-3.34001 -7.4375,-3.75 -0.2024,-0.0981 -0.5719,-0.20311 -1.0313,-0.3125 1.2151,-0.5639 5.0413,-1.08306 7.7813,-1.4375 3.0109,-0.38953 3.3525,-0.4289 5.4062,-0.34375 2.1197,0.0879 3.3125,0.3125 3.3125,0.3125 0,0 -0.078,-0.64902 1.0313,-0.90625 0.7443,-0.17256 5.1495,-0.62336 7.2812,-0.78125 2.0606,-0.1526 5.3429,-0.0968 7.25,0.6875 1.955,0.80395 7.875,3.5 7.875,3.5 0,0 -6.5598,-3.27587 -7.4062,-3.6875 -0.2025,-0.0984 -0.5718,-0.20222 -1.0313,-0.3125 1.2154,-0.56154 5.0119,-1.12778 7.75,-1.5 3.009,-0.40905 3.3227,-0.41558 5.375,-0.34375 1.9431,0.068 3.0072,0.16485 3.1875,0.1875 0.1188,-0.1944 0.3846,-0.72881 0.9375,-0.875 0.7418,-0.19612 5.1311,-0.82878 7.25,-1.09375 1.7564,-0.21961 4.4053,-0.33231 6.3125,0.0312 0.3169,0.0604 0.6058,0.18938 0.875,0.28125 1.9362,0.66092 7.8438,2.9375 7.8438,2.9375 -10e-5,0 -6.5367,-2.80655 -7.375,-3.15625 -0.2005,-0.0836 -0.5762,-0.17333 -1.0313,-0.25 1.2037,-0.65046 5.0191,-1.37195 7.7188,-2 2.9667,-0.6902 3.2889,-0.75507 5.3125,-0.875 2.0886,-0.1238 3.2812,-0.0312 3.2812,-0.0312 0,1e-5 -0.087,-0.63205 1,-1.03125 0.7292,-0.2678 5.0472,-1.33797 7.125,-1.8125 2.0085,-0.45869 5.1679,-1.0293 7,-0.625 1.8781,0.41446 13.5782,3.01563 13.5782,3.01563 0,0 -12.3275,-3.02266 -13.1407,-3.26563 -0.1945,-0.0581 -0.5586,-0.10626 -1,-0.125 1.1676,-0.80369 3.5142,-1.6873 6.1094,-2.70312 1.6814,-0.65818 0.9237,-0.37659 2.7759,-1.0036 1.7536,-0.59366 2.4854,-1.01071 2.6304,-1.11299 0.3461,-0.20651 -0.356,-0.12188 -0.5442,-0.0424 z"
+ id="path8173"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7285);enable-background:new"
+ d="m 1082.625,-275.125 c 1.873,0.39348 4.4961,1.14555 6.0313,1.96875 1.5352,0.82319 2.8222,1.056 5.375,2.5 2.5266,1.42926 4.7958,2.00696 6.9687,2.53125 2.3476,0.56642 5.4354,0.71523 8.8438,1.1875 -1.0889,-0.83975 -6.6074,-1.17245 -8.4063,-1.5625 -1.7989,-0.39006 -3.8941,-1.01616 -6.5937,-2.3125 -2.6997,-1.29634 -3.4944,-1.79896 -5.8125,-2.6875 -2.3182,-0.88854 -4.0044,-1.38314 -6.4063,-1.625 z"
+ id="path8175"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7289);enable-background:new"
+ d="m 1051.4688,-270 c 1.9053,0.57759 4.5281,1.61572 6.0937,2.59375 1.5656,0.97802 2.8802,1.35981 5.5,3.125 2.593,1.74716 4.9859,2.70927 7.25,3.59375 2.4461,0.95557 5.6826,1.65713 9.4063,3.0625 -1.1896,-1.13784 -7.0631,-2.68675 -8.9375,-3.375 -1.8745,-0.68825 -4.0818,-1.5662 -6.875,-3.28125 -2.7933,-1.71504 -3.5736,-2.2839 -5.9375,-3.40625 -2.3641,-1.12234 -4.0567,-1.83455 -6.5,-2.3125 z"
+ id="path8177"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7293);enable-background:new"
+ d="m 1020.2188,-266.84375 c 1.9119,0.63811 4.5812,1.75536 6.1562,2.8125 1.5751,1.05715 2.8956,1.50867 5.5313,3.40625 2.6086,1.87821 5.0284,3.03003 7.3125,4.0625 2.4677,1.11545 5.7645,2.1733 9.5312,3.84375 -1.2033,-1.22253 -7.2028,-3.31423 -9.0937,-4.125 -1.891,-0.81077 -4.0649,-1.89379 -6.875,-3.75 -2.8102,-1.8562 -3.6218,-2.47693 -6,-3.71875 -2.3783,-1.2418 -4.1107,-1.97569 -6.5625,-2.53125 z"
+ id="path8179"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="cssscscsscsssccscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssccscsscscssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsszsszssszzcczzzczzzc"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7337);enable-background:new"
+ d="m 1110.1719,-266.89063 c 0.1508,0.0486 0.688,0.631 0.1094,1.48438 -0.8101,1.19459 -5.7049,3.32429 -8.5625,4.125 -2.8449,0.79712 -6.2901,0.97774 -10.5625,-0.375 -4.3016,-1.36195 -5.4697,-2.46872 -10.6563,-4.3125 4.664,2.11517 6.1953,3.95233 10.125,5.34375 1.6207,0.57387 3.3671,0.9396 5.0625,1.03125 -0.4451,0.32563 -1.5303,0.9833 -3.5625,1.59375 -2.7955,0.83969 -6.6491,1.53378 -8.25,1.625 -1.5146,0.0863 -3.142,-0.51249 -3.4375,-0.625 0.1667,0.10308 0.3732,0.37734 -0.25,1.03125 -0.8993,0.94363 -6.1474,1.923 -9.125,2.25 -2.9643,0.32555 -6.5216,-0.016 -10.9062,-1.90625 -3.978,-1.71497 -5.339,-2.91536 -9.4063,-4.75 0,0 0,0.15625 0,0.15625 3.6431,2.09529 5.284,3.88327 8.875,5.5625 1.7302,0.80909 3.5917,1.40876 5.4063,1.71875 -0.5349,0.28676 -1.5578,0.71151 -3.4375,1.03125 -2.869,0.48796 -6.809,0.81614 -8.4375,0.75 -0.8507,-0.0345 -1.7286,-0.18437 -2.4063,-0.40625 -0.6848,-0.21488 -1.1897,-0.44467 -1.3125,-0.5 0.1694,0.10721 0.4311,0.40288 -0.2187,1.03125 -0.9097,0.87962 -6.2461,1.33638 -9.25,1.46875 -2.9905,0.13179 -6.5889,-0.45063 -11,-2.5625 -4.4412,-2.12626 -5.6415,-3.4016 -10.9063,-5.78125 4.7343,2.59704 6.2865,4.6291 10.3438,6.71875 1.6733,0.86185 3.4852,1.49425 5.25,1.9375 -0.4633,0.23332 -1.5894,0.68814 -3.6875,0.9375 -2.8863,0.34298 -6.8346,0.49288 -8.4688,0.375 -1.5462,-0.1115 -3.2312,-0.85696 -3.5312,-1 0.1691,0.12029 0.4138,0.41048 -0.2188,1 -0.9128,0.85073 -6.2441,1.26212 -9.25,1.375 -2.9925,0.11237 -6.5897,-0.49043 -11,-2.59375 -4.00125,-1.90823 -5.38803,-3.13783 -9.46875,-5.09375 -3e-5,0 0,0.15625 0,0.15625 3.65506,2.20392 5.29421,4.05255 8.90625,5.90625 1.74029,0.89315 3.637,1.52827 5.4688,1.96875 -0.54,0.2483 -1.5781,0.61533 -3.4688,0.84375 -2.88568,0.34858 -6.86605,0.52095 -8.5,0.40625 -0.85345,-0.0599 -1.72631,-0.25791 -2.40625,-0.5 -0.6871,-0.2353 -1.18935,-0.47226 -1.3125,-0.53125 0.16998,0.11227 0.46448,0.42225 -0.1875,1.03125 -0.91265,0.8525 -6.27533,1.29337 -9.28125,1.40625 -2.99246,0.11237 -6.59346,-0.52805 -11,-2.59375 -4.43653,-2.07978 -5.64688,-3.33171 -10.90625,-5.65625 4.72938,2.54749 6.29074,4.5778 10.34375,6.625 1.67155,0.84433 3.48554,1.46643 5.25,1.90625 -0.46323,0.23422 -1.5897,0.68407 -3.6875,0.9375 -2.88569,0.34858 -6.8362,0.56952 -8.46875,0.46875 -1.54456,-0.0953 -3.20031,-0.82885 -3.5,-0.96875 0.16899,0.11853 0.38192,0.40385 -0.25,1 -0.91186,0.86028 -6.24665,1.33025 -9.25,1.46875 -2.98995,0.1379 -6.56745,-0.45068 -10.96875,-2.46875 -3.99308,-1.83089 -5.36511,-3.0292 -9.4375,-4.90625 -2e-5,0 0,0.15625 0,0.15625 3.64761,2.13327 5.27033,3.93487 8.875,5.71875 1.73675,0.85951 3.60727,1.45014 5.4375,1.875 -0.53947,0.2529 -1.55063,0.64129 -3.4375,0.90625 -2.87978,0.40436 -6.83813,0.64562 -8.46875,0.5625 -0.85172,-0.0434 -1.7277,-0.20855 -2.40625,-0.4375 -0.68569,-0.22201 -1.1896,-0.44339 -1.3125,-0.5 0.16959,0.10899 0.4319,0.40965 -0.21875,1.03125 -0.91079,0.87014 -6.25021,1.39152 -9.25,1.5625 -2.98633,0.17021 -6.57381,-0.31577 -10.96875,-2.28125 -4.42489,-1.97888 -5.60596,-3.22819 -10.84375,-5.375 4.70997,2.38767 6.27017,4.38873 10.3125,6.34375 1.66715,0.80631 3.46043,1.39658 5.21875,1.78125 -0.46163,0.2487 -1.597,0.71225 -3.6875,1.03125 -2.8756,0.43876 -6.7804,0.7331 -8.40625,0.6875 -1.53823,-0.0431 -3.2328,-0.74522 -3.53125,-0.875 0.16833,0.11282 0.41057,0.41375 -0.21875,1.03125 -0.90812,0.8911 -6.20295,1.52825 -9.1875,1.8125 -2.97118,0.28298 -6.57342,-0.1758 -10.9375,-1.9375 -3.95934,-1.59831 -5.32915,-2.79487 -9.34375,-4.3125 3e-5,0 0,0.15625 0,0.15625 3.5959,1.81135 5.23831,3.58233 8.8125,5.15625 1.72207,0.75835 3.58748,1.28895 5.40625,1.625 -0.53609,0.27908 -1.56658,0.68763 -3.4375,1.0625 -2.85539,0.5721 -6.78942,1.01939 -8.40625,1.03125 -0.84451,0.006 -1.70608,-0.0809 -2.375,-0.25 -0.67591,-0.16151 -1.16009,-0.32923 -1.28125,-0.375 0.16722,0.094 0.42267,0.38348 -0.21875,1.0625 -0.89787,0.95052 -6.18648,1.91708 -9.125,2.4375 -2.92534,0.51809 -6.43215,0.37424 -10.71875,-1.03125 -4.3158,-1.41507 -5.47277,-2.52994 -10.5625,-3.96875 4.57685,1.75101 6.08855,3.56006 10.03125,5 1.62608,0.59389 3.36885,0.95565 5.09375,1.15625 -0.45285,0.29702 -1.55478,0.88339 -3.59375,1.46875 -2.80472,0.80517 -6.63886,1.57583 -8.21875,1.75 -1.49475,0.1648 -3.11623,-0.31681 -3.40625,-0.40625 0.16356,0.0901 0.39278,0.35993 -0.21875,1.0625 -0.88247,1.01385 -6.04452,2.37165 -8.9375,3.0625 -2.88002,0.68778 -6.3356,0.76002 -10.5625,-0.4375 -3.83485,-1.08645 -5.17258,-2.07237 -9.0625,-3.125 -10e-6,0 0,0.15625 0,0.15625 3.48418,1.39485 5.06941,2.9194 8.53125,4.03125 1.66793,0.53572 3.45578,0.78674 5.21875,0.875 -0.51964,0.35212 -1.50039,0.91452 -3.3125,1.53125 -2.76566,0.94125 -6.59024,1.93537 -8.15625,2.15625 -0.81794,0.11539 -1.6331,0.12283 -2.28125,0.0312 -0.65496,-0.0832 -1.1326,-0.21827 -1.25,-0.25 0.16204,0.0746 0.43399,0.34044 -0.1875,1.09375 -0.87,1.05453 -6.00963,2.65925 -8.875,3.4375 -2.85253,0.77476 -6.25912,0.9582 -10.4375,-0.0937 -4.20683,-1.05913 -5.35669,-2.04166 -10.34375,-3.15625 4.48454,1.45946 5.96935,3.13523 9.8125,4.25 1.58504,0.45977 3.28679,0.63825 4.96875,0.6875 -0.44157,0.33676 -1.51251,1.02773 -3.5,1.78125 -2.73393,1.03649 -6.45198,2.16269 -8,2.4375 -1.46462,0.26002 -3.05958,-0.11654 -3.34375,-0.1875 0.16025,0.0796 0.38044,0.32098 -0.21875,1.0625 -0.86466,1.07006 -5.91652,2.81815 -8.75,3.6875 -2.8208,0.86547 -6.2075,1.15631 -10.34375,0.21875 -3.75259,-0.85061 -5.04785,-1.71647 -8.875,-2.59375 0,0 0,0.15625 0,0.15625 3.42796,1.23779 4.98741,2.6323 8.375,3.53125 1.63216,0.43314 3.36704,0.58301 5.09375,0.5625 -0.50893,0.38417 -1.47675,1.02182 -3.25,1.75 -2.70634,1.11134 -6.43633,2.30781 -7.96875,2.625 -0.8004,0.16569 -1.61231,0.21862 -2.25,0.15625 0,0 0,0.51552 0,0.92229 0,0.26507 0,0.48396 0,0.48396 0.22645,-0.14468 0.44891,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.17161,-0.21577 6,-1.6875 3.82843,-1.47174 5.22412,-2.00498 5.90625,-2.40625 0.6796,-0.39978 1.61165,-0.87937 2.21875,-1.53125 1.82685,-0.13775 3.57075,-0.49323 4.9375,-1 2.96812,-1.10052 4.87537,-1.80619 6.78125,-2.46875 1.90586,-0.66254 2.35409,-1.41487 3.40625,-1.78125 1.09155,-0.38011 2.19511,-0.16538 6.0625,-1.53125 3.86745,-1.36586 5.28316,-1.82708 5.96875,-2.21875 0.70109,-0.40052 1.70081,-0.93298 2.3125,-1.59375 1.9708,-0.0547 3.81685,-0.38463 5.28125,-0.875 3.00148,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.5386,-0.5041 2.17402,-1.04677 2.90625,-1.4375 0.23016,-0.13431 0.47574,-0.25373 0.75,-0.34375 1.09823,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91233,-1.23113 5.36605,-1.67295 6.0625,-2.03125 0.69388,-0.35697 1.63015,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63581,-0.26683 5.03125,-0.6875 3.03043,-0.91354 4.99238,-1.4301 6.9375,-1.96875 1.94511,-0.53864 2.42618,-1.26452 3.5,-1.5625 1.11401,-0.30915 2.21994,0.007 6.1875,-1.03125 3.96761,-1.03863 5.41758,-1.43273 6.125,-1.75 0.73487,-0.32959 1.81383,-0.75372 2.4375,-1.375 1.99774,0.116 3.85743,-0.0201 5.34375,-0.375 3.07811,-0.735 5.08344,-1.10094 7.0625,-1.5 1.58792,-0.32018 2.24429,-0.79055 3,-1.09375 0.23757,-0.1068 0.46695,-0.19276 0.75,-0.25 1.13347,-0.22919 2.30448,0.20893 6.34375,-0.5 4.03933,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71586,-0.25944 1.70428,-0.56724 2.34375,-1.09375 1.92427,0.23949 3.74788,0.22453 5.1875,0 3.12633,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48869,-0.94514 3.59375,-1.09375 1.14639,-0.15418 2.27592,0.30157 6.34375,-0.21875 4.06784,-0.52032 5.56013,-0.69573 6.28125,-0.9375 0.7371,-0.24714 1.79809,-0.58623 2.4375,-1.125 2.05007,0.33553 3.97378,0.39796 5.5,0.21875 3.14231,-0.36896 5.17994,-0.55936 7.1875,-0.78125 1.61076,-0.17802 2.26467,-0.6082 3.03125,-0.84375 0.24094,-0.0855 0.49412,-0.1556 0.78125,-0.1875 1.14978,-0.12772 2.30129,0.34665 6.375,-0.125 4.07374,-0.47165 5.55909,-0.6106 6.28125,-0.84375 0.71946,-0.23227 1.70024,-0.47346 2.34375,-0.96875 1.93637,0.33346 3.77006,0.40424 5.21875,0.25 3.14602,-0.33495 5.17756,-0.51859 7.1875,-0.71875 2.00996,-0.20014 2.48414,-0.82639 3.59375,-0.9375 1.15114,-0.11528 2.29643,0.36506 6.375,-0.0625 4.07861,-0.42756 5.58886,-0.56209 6.3125,-0.78125 0.73915,-0.22386 1.79572,-0.51325 2.4375,-1.03125 2.0571,0.39867 4.00187,0.4934 5.53125,0.34375 3.14873,-0.3081 5.17584,-0.47325 7.1875,-0.65625 1.61407,-0.14682 2.2631,-0.56055 3.03125,-0.78125 0.24142,-0.0809 0.49353,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.29296,0.39275 6.375,0 4.08208,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6997,-0.4477 2.3438,-0.9375 1.938,0.34999 3.7688,0.45438 5.2187,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1466,-0.32852 5.1771,-0.5227 7.1875,-0.71875 1.613,-0.15729 2.2656,-0.63148 3.0312,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7167,-0.25316 1.6745,-0.55807 2.3125,-1.09375 1.9197,0.21194 3.7199,0.15141 5.1562,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0938,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5625,-1.28125 1.1288,-0.25066 2.2703,0.11629 6.25,-0.875 3.9796,-0.99128 5.4296,-1.4193 6.125,-1.78125 0.7223,-0.37601 1.7619,-0.87058 2.375,-1.53125 1.963,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3348,-1.68641 2.5469,-1.98438 0.2122,-0.29796 0.1118,-0.7453 0.1379,-0.76675 0.043,-0.0352 0.3193,-0.085 0.479,-0.42844 0.8589,-1.84708 2.321,-5.64459 2.4352,-6.32945 0.1137,-0.68216 0.1638,-1.34774 0.2145,-1.74497 0.029,-0.22952 -0.1467,-0.86544 -0.1246,-0.92404 0.031,-0.0821 0.3045,-0.26528 0.3599,-0.51471 0.2663,-1.19833 0.089,-2.19129 -0.1251,-3.60893 -0.214,-1.41764 -0.9837,-4.62214 -1.6369,-5.47626 -0.6589,-0.86172 -1.2229,-1.01117 -1.7479,-1.00066 -0.2086,0.26976 0.1368,0.26309 0.1626,0.31261 0.6806,0.0508 0.934,0.36864 1.4192,0.89662 0.4852,0.52798 1.2218,3.85117 1.3584,5.30156 0.1366,1.45039 0.19,2.8602 -0.088,3.46864 -0.2781,0.60845 -0.7232,0.51703 -1.0156,0.58291 0.531,0.18589 0.6698,0.12483 0.7314,0.96929 0.059,0.81338 -0.1332,1.63969 -0.5198,2.80562 -0.3912,1.18001 -1.8452,4.34998 -2.2857,4.59877 -0.4523,0.25551 -0.7314,0.27038 -1.067,0.13944 z"
+ id="path8181"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7297);enable-background:new"
+ d="m 988.75,-263.84375 c 1.91161,0.6344 4.55027,1.75841 6.125,2.8125 1.57477,1.05409 2.8961,1.48252 5.5313,3.375 2.6082,1.87314 5.0269,3.01522 7.3125,4.0625 2.4693,1.13147 5.7521,2.15474 9.5312,3.9375 -1.2072,-1.2584 -7.139,-3.36445 -9.0312,-4.1875 -1.8922,-0.82304 -4.128,-1.93049 -6.9375,-3.78125 -2.80961,-1.85075 -3.62224,-2.48154 -6.00005,-3.71875 -2.37782,-1.23719 -4.07988,-1.9492 -6.53125,-2.5 z"
+ id="path8183"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7301);enable-background:new"
+ d="m 957.5,-260.78125 c 1.91,0.6181 4.58288,1.70934 6.15625,2.75 1.57339,1.04066 2.89608,1.48252 5.53125,3.375 2.60823,1.87315 5.02692,3.01521 7.3125,4.0625 2.46931,1.13147 5.75213,2.15475 9.53125,3.9375 -1.20728,-1.2584 -7.20154,-3.3957 -9.09375,-4.21875 -1.89217,-0.82304 -4.09666,-1.9305 -6.90625,-3.78125 -2.80958,-1.85075 -3.59295,-2.43932 -5.96875,-3.65625 -2.37578,-1.21691 -4.11321,-1.93885 -6.5625,-2.46875 z"
+ id="path8185"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7305);enable-background:new"
+ d="m 926.09375,-257.375 c 1.90772,0.59745 4.55348,1.66384 6.125,2.6875 1.5715,1.02365 2.87022,1.43971 5.5,3.28125 2.60291,1.82273 5.02887,2.9722 7.3125,4 2.4672,1.11041 5.75535,2.09323 9.53125,3.84375 -1.20623,-1.2481 -7.1719,-3.31809 -9.0625,-4.125 -1.89058,-0.8069 -4.10242,-1.89104 -6.90625,-3.6875 -2.80385,-1.79644 -3.62704,-2.40251 -6,-3.59375 -2.37297,-1.19124 -4.05362,-1.90283 -6.5,-2.40625 z"
+ id="path8187"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7309);enable-background:new"
+ d="m 894.90625,-253.5625 c 1.90213,0.55355 4.58701,1.58887 6.15625,2.59375 1.56923,1.00487 2.87401,1.40864 5.5,3.21875 2.59912,1.79164 5.00034,2.87189 7.28125,3.875 2.46428,1.08374 5.75984,2.04029 9.53125,3.75 -1.2048,-1.23507 -7.17416,-3.24478 -9.0625,-4.03125 -1.88832,-0.78647 -4.0752,-1.8308 -6.875,-3.59375 -2.79977,-1.76294 -3.59919,-2.36836 -5.96875,-3.53125 -2.36957,-1.16288 -4.12325,-1.83412 -6.5625,-2.28125 z"
+ id="path8189"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7313);enable-background:new"
+ d="m 863.71875,-248.65625 c 1.88062,0.42909 4.50427,1.38038 6.0625,2.3125 1.55823,0.93211 2.85233,1.25776 5.46875,3 2.58971,1.72444 4.98067,2.70802 7.25,3.625 2.45176,0.99069 5.73959,1.87707 9.5,3.5 -1.20131,-1.20734 -7.15249,-3.06609 -9.03125,-3.78125 -1.87875,-0.71517 -4.0854,-1.68442 -6.875,-3.375 -2.78963,-1.69057 -3.58461,-2.22822 -5.9375,-3.28125 -2.35292,-1.05301 -4.02584,-1.71248 -6.4375,-2 z"
+ id="path8191"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7317);enable-background:new"
+ d="m 833.15625,-241.375 c 1.84836,0.29644 4.46945,0.97632 6,1.78125 1.53058,0.80493 2.81374,1.05573 5.375,2.53125 2.53504,1.46046 4.89068,2.32509 7.125,3.0625 2.41399,0.79668 5.65711,1.46689 9.375,2.84375 -1.18771,-1.12873 -7.08772,-2.58975 -8.9375,-3.15625 -1.84977,-0.5665 -4.00342,-1.37392 -6.75,-2.84375 -2.74657,-1.46983 -3.50136,-1.92028 -5.8125,-2.78125 -2.31115,-0.86095 -4.00471,-1.32009 -6.375,-1.4375 z"
+ id="path8193"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7321);enable-background:new"
+ d="m 802.90625,-232.3125 c 1.8222,0.21127 4.36576,0.80057 5.875,1.53125 1.50925,0.73066 2.75568,0.92998 5.28125,2.28125 2.49976,1.33746 4.83154,2.04843 7.03125,2.65625 2.37653,0.65667 5.56464,1.07288 9.21875,2.1875 -1.16735,-1.04496 -6.92888,-2.10329 -8.75,-2.5625 -1.82111,-0.45921 -3.95225,-1.12696 -6.65625,-2.4375 -2.70403,-1.31052 -3.47106,-1.7199 -5.75,-2.46875 -2.27895,-0.74883 -3.91325,-1.17931 -6.25,-1.1875 z"
+ id="path8195"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7329);enable-background:new"
+ d="m 773.1875,-222.1875 c 1.81109,0.1787 4.32059,0.66506 5.8125,1.34375 1.49194,0.67869 2.7534,0.79822 5.25,2.0625 2.47107,1.25138 4.79005,1.89614 6.96875,2.4375 2.35387,0.58488 5.49134,0.89752 9.09375,1.84375 -1.15084,-0.99116 -6.85251,-1.7833 -8.65625,-2.1875 -1.80372,-0.4042 -3.91553,-1.02116 -6.59375,-2.25 -2.67818,-1.22884 -3.40345,-1.61089 -5.65625,-2.28125 -2.25279,-0.67034 -3.89627,-1.00232 -6.21875,-0.96875 z"
+ id="path8197"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7325);enable-background:new"
+ d="m 743.5625,-211.1875 c 1.79281,0.12911 4.27313,0.54965 5.75,1.1875 1.4769,0.63785 2.7161,0.74156 5.1875,1.9375 2.44618,1.18372 4.72054,1.74666 6.875,2.21875 2.32767,0.51003 5.4196,0.68064 9,1.5625 -1.14379,-0.9706 -6.74759,-1.59065 -8.53125,-1.9375 -1.78367,-0.34684 -3.88285,-0.88756 -6.53125,-2.03125 -2.64841,-1.14368 -3.39495,-1.51631 -5.625,-2.125 -2.23008,-0.60868 -3.82594,-0.90966 -6.125,-0.8125 z"
+ id="path8199"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ <path
+ sodipodi:nodetypes="ccccccccc"
+ id="path8201"
+ d="m 863.87812,475.6679 c 1.64212,-3.218 3.51781,-5.73529 4.86136,-9.84898 0.79872,-3.65789 3.31204,-2.03073 7.26047,-8.3969 1.40193,-2.2395 5.47653,0.39136 8.9651,-2.39911 1.27072,-0.80319 2.88488,-0.40431 4.48256,-0.0631 3.76539,1.31896 5.82576,3.70355 8.33376,5.80837 6.13906,5.97023 20.53414,7.94327 23.48604,6.31346 1.43405,-2.90474 7.88128,-5.40888 12.37437,-11.11168 0.74811,-1.12267 11.72936,-8.74446 14.64721,-6.56599"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="ccccccccccc"
+ id="path8203"
+ d="m 888.50059,465.25071 c 7.36341,-3.23297 13.8109,-8.9084 20.70813,-13.38452 3.31057,-1.96954 6.86983,3.21601 10.796,3.59866 2.29773,-0.21813 3.7129,1.20259 5.68211,1.6415 5.15636,1.31779 2.39793,3.86488 9.97526,6.43972 6.15561,1.7204 8.9074,-6.79847 14.89975,-7.3236 4.87739,-0.50299 8.09892,-0.31603 11.61675,-0.25254 3.92696,0.13889 4.07855,-3.4976 6.06092,-5.3033 2.98056,-2.80522 7.15561,-1.84972 10.14485,-4.7409 1.01754,-1.38468 1.95458,-3.01085 2.73459,-5.10809 0.88201,-2.00034 3.04006,0.30598 4.79823,1.26269"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ id="path8994"
+ d="m 403.27922,1056.3058 56.56854,-42.4264 72.12489,14.1421 -46.66904,52.3259 -53.74012,7.0711 -28.28427,-31.1127 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9048);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path4189"
+ d="m 542.27183,1060.5719 c -1.40727,18.8012 -1.1449,32.751 2.08174,49.3033 3.22666,16.5523 16.40609,45.9073 20.33441,63.1837 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1483 -15.31761,12.9774 -42.05128,21.5989 -67.83231,15.7337 -25.78105,-5.8652 -69.54907,-49.2234 -88.59019,-70.2283 -19.11214,-21.0833 -63.76086,-93.8506 -77.93853,-124.2758 -14.17767,-30.4251 -12.65961,-36.7186 -8.11972,-45.52972 -9.36672,-24.5205 -12.41371,-50.06681 -33.71245,-75.57664 30.32547,3.11444 43.88028,26.95633 60.12568,47.13975 -5.52989,-48.07603 -18.05471,-64.4165 -28.37395,-90.7243 29.9943,6.08165 50.57936,31.87239 63.97979,72.7125 9.55415,-3.91791 18.23776,-9.37294 30.18741,-9.0612 -11.2975,-41.6958 -17.94946,-69.91584 -36.68725,-101.06994 53.44196,5.67033 83.65702,80.63932 78.97142,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34702 -21.04781,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38966,85.80578 6.10287,20.3689 1.51881,38.70051 1.51881,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24138 -3.53269,59.10328 -4.94582,77.98328 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)"
+ clip-path="url(#clipPath3631)"
+ sodipodi:nodetypes="cccccccccccccccccccccc"
+ id="path4191"
+ d="m 719.5,738.69519 18.31177,15.43196 44.41103,-15.38821 23.2772,-25.54375 11.46397,19.22065 30.67161,12.78354 25.09737,5.72837 L 892,723.19519 908.02309,747.02126 947,752.19519 l 10.24541,-6.19852 6.75471,8.6982 25.49988,11.00032 2,-40.5 L 955.94866,710.6576 923.45591,689.1305 883.0038,677.66492 861.69668,662.13148 840,685.19519 755.02878,638.61208 722,676.69519 l -2.5,62 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3587);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,822.28931,10.93589)"
+ sodipodi:nodetypes="ccssscsssssssssssssccccscccccccccsscccccccccccssscccccccccccccccsccccssssssssssssscccsssc"
+ clip-path="url(#clipPath3677)"
+ id="path4193"
+ d="m 584,696.5 -6.5625,17.15625 c 0,0 -7.81152,20.36488 -15.6875,43.65625 -3.93799,11.64568 -7.88302,24.04145 -10.9375,35.125 -3.05448,11.08355 -5.33586,20.37986 -5.5,28.28125 -0.39807,19.16196 5.74653,34.8883 8.9375,41.75 -0.77153,3.55523 -1.99137,9.45432 -3.34375,18.09375 -1.92042,12.26821 -3.71827,27.15441 -2.375,39.875 1.38209,13.08835 6.81222,28.18765 12.59375,43.03125 5.78153,14.8436 12.05435,29.22711 15.21875,38.03125 6.63206,18.4519 9.99296,31.5763 11.3125,48.5 0.58135,7.4561 -0.24227,20.336 -1.25,33.375 -1.00773,13.039 -2.18661,26.3014 -1.6875,36.9688 0.98911,21.1398 9.32798,46.8347 33.375,57.9374 22.77483,10.5154 55.32682,11.7022 83.4375,-3.4374 16.15992,-8.7034 30.07634,-27.0976 43.375,-46.9063 13.29866,-19.8087 24.96917,-41.0534 31.9375,-54.9063 15.35292,-30.5212 39.39353,-115.46418 45.625,-152.7187 3.01859,-18.04653 3.92166,-29.06555 2.625,-38.03125 -0.97853,-6.76604 -3.82819,-12.1474 -6.875,-16.21875 2.04274,-27.50791 -0.73207,-51.36878 11.96875,-79.40625 L 840.75,763.375 l -23.8125,9.3125 c -17.48975,6.83753 -28.90164,19.04536 -36.59375,32.0625 -0.32251,0.54577 -0.56314,1.10776 -0.875,1.65625 0.22203,-22.51521 4.40784,-37.63759 6.59375,-58.6875 l 1.96875,-19 L 771,737.375 c -30.59449,15.55571 -45.69489,48.19321 -49.71875,90.21875 -4.24532,-0.62547 -8.8314,-1.01965 -13.8125,-0.84375 -0.29149,-39.18036 -0.39629,-67.03685 8.59375,-99.375 l 5.59375,-20.125 -19.4375,7.65625 c -30.90937,12.20394 -47.85954,41.93073 -56.625,68.375 -4.38273,13.22214 -6.74582,25.80121 -7.59375,35.9375 -0.23203,2.77373 -0.31106,5.31132 -0.3125,7.71875 -3.24187,-0.0364 -6.42052,0.13589 -10.0625,0.5 0.0416,-39.00473 -3.48424,-79.75415 -32.28125,-116.5 L 584,696.5 Z m 5.8125,43.8125 c 16.80691,30.64383 17.47451,63.96728 16.9375,99.75 l -0.21875,15.0625 12.03493,-6.53921 c 8.66205,-3.13302 19.56058,-0.22752 31.93382,-0.83579 l 14.67465,9.3566 -6.3309,-25.7941 c -0.0897,-0.22997 -0.22046,-0.41669 -0.25,-0.71875 -0.19951,-2.03986 -0.22232,-5.47307 0.125,-9.625 0.69464,-8.30386 2.78957,-19.58524 6.625,-31.15625 5.15532,-15.55294 13.48801,-31.19248 25.125,-42.53125 -4.68381,28.63798 -3.21559,60.25934 -3.01164,95.80514 l -2.76593,13.26164 15.49632,-7.59803 c 9.0294,-2.75771 17.18897,-0.34996 29.28125,1.09375 l 13.24632,9.44423 L 741.09375,840 c 1.44793,-30.97177 8.22149,-53.67808 20.71875,-68.875 -2.98688,19.77884 -5.43043,41.7848 0.3125,78.34375 l 1.06552,6.37318 -2.93815,11.51685 10.61711,-8.16818 9.18973,10.22198 -1.54828,-10.4636 L 781.9375,852 c 5.70102,-13.21149 10.17282,-26.21337 16.34375,-36.65625 0.95986,-1.62434 2.03153,-3.06436 3.0625,-4.5625 -3.68066,21.15535 -2.42716,40.20815 -4.09375,57.78125 l -4.68014,7.80698 7.39889,0.22427 c 3.22005,3.48361 3.8675,3.85068 4.5625,8.65625 0.695,4.80557 0.31862,14.40035 -2.5625,31.625 -5.56799,33.28792 -31.84562,77.83981 -43.7404,101.4864 -6.60491,13.1304 -18.52833,57.4859 -31.12335,76.2465 -12.59502,18.7605 -28.53137,39.7673 -37.17204,44.4209 -21.49052,11.5742 -44.55594,25.5059 -60.61889,18.0895 -14.37486,-6.637 -23.03969,-21.1927 -23.81407,-37.7433 -0.38311,-8.188 0.61279,-21.3092 1.625,-34.4062 1.01221,-13.0971 11.28891,-22.5708 15.42339,-36.5626 5.37229,-18.1808 -1.44687,-36.5944 -12.5,-53.93745 -6.48655,-10.17778 -23.9768,-24.2579 -29.54839,-38.5625 -5.57159,-14.3046 -10.36751,-29.00315 -11.28125,-37.65625 -0.92621,-8.77113 0.4225,-23.02502 2.21875,-34.5 1.79625,-11.47497 3.84375,-20.28125 3.84375,-20.28125 l 9.42278,-3.6152 -10.48528,-3.8848 c 0,0 -8.49889,-15.3101 -8.09375,-34.8125 0.0711,-3.42316 1.83626,-12.72805 4.71875,-23.1875 2.88249,-10.45945 6.76466,-22.55271 10.625,-33.96875 3.04439,-9.00308 5.78063,-16.60345 8.34375,-23.6875 z"
+ style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter3898);enable-background:new"
+ inkscape:connector-curvature="0" />
+ <g
+ transform="translate(276,136)"
+ clip-path="url(#clipPath3622)"
+ id="g3617"
+ style="display:inline;enable-background:new">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9024);enable-background:accumulate"
+ d="m -15.66751,843.48852 -49.49748,-15.55635 -26.87005,52.3259 41.01219,45.25484 49.49747,-38.18377 -14.14213,-43.84062 z"
+ id="path4195"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,-52.200498,74.09707)"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9020);enable-background:accumulate"
+ d="m 118.70648,859.93048 -55.154328,-46.66904 -43.84062,36.76955 33.94113,53.74011 -13.596814,85.46203 -39.44536579,28.29217 -41.01220021,11.3137 -2.82842,46.669 56.56854,25.4559 18.943987,-69.65 23.45655,-58.85663 46.347541,-72.61491 16.62,-39.91188 z"
+ id="path4197"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,-46.92842,75.511284)"
+ sodipodi:nodetypes="ccccccccccccc"
+ inkscape:connector-curvature="0" />
+ </g>
+ <path
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,229.07158,211.51128)"
+ id="path4199"
+ d="m -70.82184,932.58397 60.81118,-26.87005 100.40916,31.1127 -63.63961,31.11269 -82.02438,-16.97056 -15.55635,-18.38478 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9044);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,822.28931,10.93589)"
+ clip-path="url(#clipPath4177)"
+ sodipodi:nodetypes="ccccccccccccccccccccccccccccccccccccccccccccccccccczzzcccccc"
+ id="path4201"
+ d="m 583.0625,715.75 c -12.10609,34.44974 -26.7145,68.53333 -31.75,104.84375 -0.83208,14.92867 4.58915,29.15943 8.84375,43.0625 -5.91624,27.20126 -10.13681,56.89995 1.15625,83.125 13.51717,38.16085 35.00147,75.68215 32.42279,117.46825 -0.9483,29.2942 -9.01444,60.9941 5.38971,88.2817 10.19864,19.3348 33.13956,27.3117 53.96785,27.6676 27.86219,1.1741 56.46261,-11.6216 72.0009,-35.2613 22.59549,-29.3717 41.80051,-61.4973 55.23865,-96.0598 16.89053,-45.506 29.6718,-92.56072 37.93402,-140.3989 1.8244,-12.94106 3.10108,-27.46985 -4.57892,-38.82255 -3.43115,-7.33632 0.0421,-15.56014 -0.68457,-23.30977 0.674,-24.99466 4.01232,-50.66376 16.65332,-72.59648 -17.73313,6.4446 -35.07268,16.55971 -44.00307,33.86425 -3.93508,6.70955 -7.60482,13.57413 -11.37193,20.38575 -3.54999,-30.01408 3.71963,-59.64828 6.78125,-89.28125 -20.16604,9.05463 -36.87672,25.65522 -44.17495,46.682 -6.30463,15.58003 -8.80222,32.31718 -10.26255,49.03675 -8.25334,-1.51925 -16.68447,-2.10155 -25.0625,-1.5 -0.96308,-38.69787 -0.46696,-79.40715 10.96875,-115.90625 -18.68113,6.21776 -35.16621,18.73551 -45.62803,35.38723 -13.85254,20.87979 -21.2614,45.75395 -23.05947,70.61277 0.58534,4.32454 -0.0613,11.84009 -6.34375,9.875 -5.33118,0.0176 -10.62908,0.67883 -15.9375,1.09375 1.14784,-39.38148 -3.34144,-81.6282 -27.0625,-114.21875 -3.06071,-3.63717 -5.63685,-7.68438 -8.625,-11.34375 -0.9375,2.4375 -1.875,4.875 -2.8125,7.3125 z m 7.75,13.84375 c 18.56527,29.29629 22.4825,64.82012 22.125,98.875 0.20409,5.17526 -0.51656,11.8292 0.125,16.0625 12.31856,-6.10275 26.73912,-2.4399 39.78125,-2.1875 2.31712,1.22325 3.1921,1.65243 1.90625,-1.40625 -4.16455,-13.95285 -1.84828,-28.613 1.80504,-42.40764 6.36687,-26.29064 20.62828,-51.08798 42.81996,-67.02986 -8.61709,37.23706 -5.71658,76.56161 -6.09375,113.96875 12.25344,-6.9099 27.27879,-3.44613 40.03125,-0.25 3.39222,3.5348 2.28935,-0.72948 2.1875,-3.8125 -0.48309,-21.37058 4.13133,-43.06963 13.6875,-62.15625 5.96266,-10.68727 14.24338,-19.80379 22.4375,-28.875 -7.87156,33.8381 -9.2029,69.33593 -2.71875,103.5 1.72485,-1.41118 4.60681,-0.45414 5.65625,-0.375 9.68369,-21.23682 16.35112,-45.38062 34.89016,-60.74185 1.87329,-0.37122 -1.44818,8.52495 -1.48391,11.8981 -3.53488,21.84581 -7.17516,44.14234 -8.78421,66.21911 -8.78379,2.34171 2.84835,2.32354 3.46875,4.0625 7.92311,10.5658 4.66299,24.40472 3.63165,36.35334 -7.06405,45.03355 -22.14231,87.36194 -35.95355,130.6798 -12.07476,32.9493 -27.3742,58.8525 -47.88808,87.2015 -10.95257,13.5514 -23.24472,27.8513 -40.84375,32.5 -20.15601,6.2413 -44.20676,10.8769 -62.59956,0.046 -17.28966,-12.3414 -21.02393,-35.7089 -19.26226,-55.6864 0.0488,-15.8262 4.93886,-28.5121 4.4106,-43.4918 -0.53824,-15.2629 -2.29135,-30.5647 -6.54261,-46.8663 -4.25126,-16.30162 -9.04325,-24.91794 -16.11906,-41.57338 -7.24111,-17.04456 -15.07015,-36.74863 -18.20542,-56.28842 -1.74948,-18.62714 2.89171,-37.12262 5.78125,-55.25 3.29623,-2.83696 -1.59799,-5.19659 -2.3125,-8.1875 -7.60113,-17.01508 -8.40747,-36.7749 -2.74234,-54.55998 7.1302,-25.0723 15.76087,-49.63241 24.67984,-74.12752 0.70833,1.30208 1.41667,2.60417 2.125,3.90625 z"
+ style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4105);enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)"
+ clip-path="url(#clipPath3631)"
+ sodipodi:nodetypes="cccccccc"
+ id="path4203"
+ d="m 735.05635,733.03834 2.75542,21.08881 44.41103,-15.38821 4.85063,-22.38975 -3.93617,-22.05222 -22.45163,-36.59301 -8.28004,30.30494 -17.34924,45.02944 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4130);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)"
+ clip-path="url(#clipPath3631)"
+ sodipodi:nodetypes="cccccccc"
+ id="path4205"
+ d="m 831.81321,730.29452 15.82237,14.90486 20.85473,2.89994 -1.59029,-39.92598 8.32561,-30.50842 -7.16499,-6.34106 -21.69669,20.9424 -14.55074,38.02826 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4141);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <g
+ transform="translate(276,136)"
+ clip-path="url(#clipPath8338)"
+ style="display:inline;filter:url(#filter8333);enable-background:new"
+ id="g8317">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 964.00012,754.69487 18.42881,7.46479 9.07107,-36.96447 -14.87031,4.83886 -12.62957,24.66082 z"
+ id="path4209"
+ sodipodi:nodetypes="ccccc"
+ clip-path="none"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,719.28646,-112.46507)"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect8315"
+ width="182"
+ height="177"
+ x="-55"
+ y="757.19519" />
+ </g>
+ <g
+ transform="translate(276,136)"
+ clip-path="url(#clipPath8359)"
+ style="display:inline;filter:url(#filter8354);enable-background:new"
+ id="g8346">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 910.14441,746.31415 32.61295,5.17393 -0.36119,-23.87619 7.18853,-29.68221 -8.45112,-5.26365 -21.82194,26.51077 -9.16723,27.13735 z"
+ id="path4207"
+ sodipodi:nodetypes="ccccccc"
+ clip-path="none"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,719.28646,-112.46507)"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect8344"
+ width="165"
+ height="176"
+ x="-22"
+ y="696.19519" />
+ </g>
+ <path
+ sodipodi:nodetypes="czzzzzzcccccccccczczz"
+ id="path8848"
+ d="m 1036.164,1071.8338 c 6.7941,18.9028 10.4937,33.2997 11.8903,51.2119 1.3966,17.9123 -3.7827,51.8008 -2.9005,70.6561 0.8818,18.8452 8.1337,40.099 27.3446,48.9689 19.4189,8.9658 49.3193,10.2113 74.1199,-3.1456 24.8006,-13.357 57.401,-70.3255 70.9742,-97.3087 13.6239,-27.0839 38.7611,-114.4974 44.6608,-149.76859 5.8998,-35.27121 2.5506,-41.30077 -4.6174,-49.05549 2.6403,-27.84015 -1.4998,-54.93543 13.1096,-87.18618 -30.249,11.8257 -37.3823,40.1607 -48.3189,65.50508 -8.0009,-50.93293 0.2092,-71.27319 3.3189,-101.21936 -29.0647,14.77791 -42.8615,47.11402 -45,92.85714 -10.9239,-1.3042 -21.3914,-4.43423 -33.5714,-0.71429 -0.264,-46.02334 -1.4635,-76.88941 8.9106,-114.20649 -53.2554,21.02686 -62.9472,106.5941 -56.0535,112.77792 -10.8828,0.535 -21.371,-1.2973 -32.8571,2.85715 0.6389,-42.57135 -0.2605,-84.90861 -30,-122.85715 0,0 -30.958,80.92234 -31.4286,103.57143 -0.4705,22.64909 9.4516,40.16588 9.4516,40.16588 0,0 -8.568,36.74051 -6.2986,58.23223 2.2959,21.74142 20.4429,59.67622 27.2655,78.65812 z"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ clip-path="url(#clipPath3631)"
+ sodipodi:nodetypes="cccccccccccccccccccccc"
+ id="path3635"
+ d="m 719.5,738.69519 18.31177,15.43196 44.41103,-15.38821 23.2772,-25.54375 11.46397,19.22065 30.67161,12.78354 25.09737,5.72837 L 892,723.19519 908.02309,747.02126 947,752.19519 l 10.24541,-6.19852 6.75471,8.6982 25.49988,11.00032 2,-40.5 L 955.94866,710.6576 923.45591,689.1305 883.0038,677.66492 861.69668,662.13148 840,685.19519 755.02878,638.61208 722,676.69519 l -2.5,62 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3587);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:nodetypes="ccssscsssssssssssssccccscccccccccsscccccccccccssscccccccccccccccsccccssssssssssssscccsssc"
+ clip-path="url(#clipPath3677)"
+ id="path3669"
+ d="m 584,696.5 -6.5625,17.15625 c 0,0 -7.81152,20.36488 -15.6875,43.65625 -3.93799,11.64568 -7.88302,24.04145 -10.9375,35.125 -3.05448,11.08355 -5.33586,20.37986 -5.5,28.28125 -0.39807,19.16196 5.74653,34.8883 8.9375,41.75 -0.77153,3.55523 -1.99137,9.45432 -3.34375,18.09375 -1.92042,12.26821 -3.71827,27.15441 -2.375,39.875 1.38209,13.08835 6.81222,28.18765 12.59375,43.03125 5.78153,14.8436 12.05435,29.22711 15.21875,38.03125 6.63206,18.4519 9.99296,31.5763 11.3125,48.5 0.58135,7.4561 -0.24227,20.336 -1.25,33.375 -1.00773,13.039 -2.18661,26.3014 -1.6875,36.9688 0.98911,21.1398 9.32798,46.8347 33.375,57.9374 22.77483,10.5154 55.32682,11.7022 83.4375,-3.4374 16.15992,-8.7034 30.07634,-27.0976 43.375,-46.9063 13.29866,-19.8087 24.96917,-41.0534 31.9375,-54.9063 15.35292,-30.5212 39.39353,-115.46418 45.625,-152.7187 3.01859,-18.04653 3.92166,-29.06555 2.625,-38.03125 -0.97853,-6.76604 -3.82819,-12.1474 -6.875,-16.21875 2.04274,-27.50791 -0.73207,-51.36878 11.96875,-79.40625 L 840.75,763.375 l -23.8125,9.3125 c -17.48975,6.83753 -28.90164,19.04536 -36.59375,32.0625 -0.32251,0.54577 -0.56314,1.10776 -0.875,1.65625 0.22203,-22.51521 4.40784,-37.63759 6.59375,-58.6875 l 1.96875,-19 L 771,737.375 c -30.59449,15.55571 -45.69489,48.19321 -49.71875,90.21875 -4.24532,-0.62547 -8.8314,-1.01965 -13.8125,-0.84375 -0.29149,-39.18036 -0.39629,-67.03685 8.59375,-99.375 l 5.59375,-20.125 -19.4375,7.65625 c -30.90937,12.20394 -47.85954,41.93073 -56.625,68.375 -4.38273,13.22214 -6.74582,25.80121 -7.59375,35.9375 -0.23203,2.77373 -0.31106,5.31132 -0.3125,7.71875 -3.24187,-0.0364 -6.42052,0.13589 -10.0625,0.5 0.0416,-39.00473 -3.48424,-79.75415 -32.28125,-116.5 L 584,696.5 Z m 5.8125,43.8125 c 16.80691,30.64383 17.47451,63.96728 16.9375,99.75 l -0.21875,15.0625 12.03493,-6.53921 c 8.66205,-3.13302 19.56058,-0.22752 31.93382,-0.83579 l 14.67465,9.3566 -6.3309,-25.7941 c -0.0897,-0.22997 -0.22046,-0.41669 -0.25,-0.71875 -0.19951,-2.03986 -0.22232,-5.47307 0.125,-9.625 0.69464,-8.30386 2.78957,-19.58524 6.625,-31.15625 5.15532,-15.55294 13.48801,-31.19248 25.125,-42.53125 -4.68381,28.63798 -3.21559,60.25934 -3.01164,95.80514 l -2.76593,13.26164 15.49632,-7.59803 c 9.0294,-2.75771 17.18897,-0.34996 29.28125,1.09375 l 13.24632,9.44423 L 741.09375,840 c 1.44793,-30.97177 8.22149,-53.67808 20.71875,-68.875 -2.98688,19.77884 -5.43043,41.7848 0.3125,78.34375 l 1.06552,6.37318 -2.93815,11.51685 10.61711,-8.16818 9.18973,10.22198 -1.54828,-10.4636 L 781.9375,852 c 5.70102,-13.21149 10.17282,-26.21337 16.34375,-36.65625 0.95986,-1.62434 2.03153,-3.06436 3.0625,-4.5625 -3.68066,21.15535 -2.42716,40.20815 -4.09375,57.78125 l -4.68014,7.80698 7.39889,0.22427 c 3.22005,3.48361 3.8675,3.85068 4.5625,8.65625 0.695,4.80557 0.31862,14.40035 -2.5625,31.625 -5.56799,33.28792 -31.79272,123.1659 -43.6875,146.8125 -6.60491,13.1304 -18.02998,33.8957 -30.625,52.6563 -12.59502,18.7605 -27.35933,35.5338 -36,40.1874 -21.49052,11.5742 -48.7808,10.2602 -64.84375,2.8438 -14.37486,-6.637 -20.53812,-23.4494 -21.3125,-40 -0.38311,-8.188 0.61279,-21.3092 1.625,-34.4062 1.01221,-13.0971 11.28891,-22.5708 15.42339,-36.5626 5.37229,-18.1808 -1.44687,-36.5944 -12.5,-53.93745 -6.48655,-10.17778 -23.9768,-24.2579 -29.54839,-38.5625 -5.57159,-14.3046 -10.36751,-29.00315 -11.28125,-37.65625 -0.92621,-8.77113 0.4225,-23.02502 2.21875,-34.5 1.79625,-11.47497 3.84375,-20.28125 3.84375,-20.28125 l 9.42278,-3.6152 -10.48528,-3.8848 c 0,0 -8.49889,-15.3101 -8.09375,-34.8125 0.0711,-3.42316 1.83626,-12.72805 4.71875,-23.1875 2.88249,-10.45945 6.76466,-22.55271 10.625,-33.96875 3.04439,-9.00308 5.78063,-16.60345 8.34375,-23.6875 z"
+ style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter3898);enable-background:new"
+ transform="translate(450.03125,73.843964)"
+ inkscape:connector-curvature="0" />
+ <g
+ transform="translate(276,136)"
+ clip-path="url(#clipPath3636)"
+ id="g3628">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9024);enable-background:accumulate"
+ d="m 824.48651,818.48242 -49.49748,-15.55635 -26.87005,52.3259 41.01219,45.25484 49.49747,-38.18377 -14.14213,-43.84062 z"
+ id="path8988"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9020);enable-background:accumulate"
+ d="m 964.49365,855.25197 -55.15433,-46.66904 -43.84062,36.76955 33.94113,53.74011 7.07106,66.46804 -50.91168,35.35537 -41.0122,11.3137 -2.82842,46.669 56.56854,25.4559 63.63961,-76.3676 24.04163,-94.75227 8.48528,-57.98276 z"
+ id="path8990"
+ inkscape:connector-curvature="0" />
+ </g>
+ <path
+ id="path8992"
+ d="m 1045.3322,1043.5779 60.8112,-26.8701 100.4091,31.1127 -63.6396,31.1127 -82.0244,-16.9706 -15.5563,-18.3847 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9044);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ clip-path="url(#clipPath4177)"
+ sodipodi:nodetypes="ccccccccccccccccccccccccccccccccccccccccccccccccccczzzcccccc"
+ id="path4149"
+ d="m 583.0625,715.75 c -12.10609,34.44974 -26.7145,68.53333 -31.75,104.84375 -0.83208,14.92867 4.58915,29.15943 8.84375,43.0625 -5.91624,27.20126 -10.13681,56.89995 1.15625,83.125 13.51717,38.16085 35.00147,75.68215 32.42279,117.46825 -0.9483,29.2942 -9.01444,60.9941 5.38971,88.2817 10.19864,19.3348 33.13956,27.3117 53.96785,27.6676 27.86219,1.1741 56.46261,-11.6216 72.0009,-35.2613 22.59549,-29.3717 41.80051,-61.4973 55.23865,-96.0598 16.89053,-45.506 29.6718,-92.56072 37.93402,-140.3989 1.8244,-12.94106 3.10108,-27.46985 -4.57892,-38.82255 -3.43115,-7.33632 0.0421,-15.56014 -0.68457,-23.30977 0.674,-24.99466 4.01232,-50.66376 16.65332,-72.59648 -17.73313,6.4446 -35.07268,16.55971 -44.00307,33.86425 -3.93508,6.70955 -7.60482,13.57413 -11.37193,20.38575 -3.54999,-30.01408 3.71963,-59.64828 6.78125,-89.28125 -20.16604,9.05463 -36.87672,25.65522 -44.17495,46.682 -6.30463,15.58003 -8.80222,32.31718 -10.26255,49.03675 -8.25334,-1.51925 -16.68447,-2.10155 -25.0625,-1.5 -0.96308,-38.69787 -0.46696,-79.40715 10.96875,-115.90625 -18.68113,6.21776 -35.16621,18.73551 -45.62803,35.38723 -13.85254,20.87979 -21.2614,45.75395 -23.05947,70.61277 0.58534,4.32454 -0.0613,11.84009 -6.34375,9.875 -5.33118,0.0176 -10.62908,0.67883 -15.9375,1.09375 1.14784,-39.38148 -3.34144,-81.6282 -27.0625,-114.21875 -3.06071,-3.63717 -5.63685,-7.68438 -8.625,-11.34375 -0.9375,2.4375 -1.875,4.875 -2.8125,7.3125 z m 7.75,13.84375 c 18.56527,29.29629 22.4825,64.82012 22.125,98.875 0.20409,5.17526 -0.51656,11.8292 0.125,16.0625 12.31856,-6.10275 26.73912,-2.4399 39.78125,-2.1875 2.31712,1.22325 3.1921,1.65243 1.90625,-1.40625 -4.16455,-13.95285 -1.84828,-28.613 1.80504,-42.40764 6.36687,-26.29064 20.62828,-51.08798 42.81996,-67.02986 -8.61709,37.23706 -5.71658,76.56161 -6.09375,113.96875 12.25344,-6.9099 27.27879,-3.44613 40.03125,-0.25 3.39222,3.5348 2.28935,-0.72948 2.1875,-3.8125 -0.48309,-21.37058 4.13133,-43.06963 13.6875,-62.15625 5.96266,-10.68727 14.24338,-19.80379 22.4375,-28.875 -7.87156,33.8381 -9.2029,69.33593 -2.71875,103.5 1.72485,-1.41118 4.60681,-0.45414 5.65625,-0.375 9.68369,-21.23682 16.35112,-45.38062 34.89016,-60.74185 1.87329,-0.37122 -1.44818,8.52495 -1.48391,11.8981 -3.53488,21.84581 -3.2972,44.17323 -4.90625,66.25 -1.31238,1.37679 2.84835,2.32354 3.46875,4.0625 7.92311,10.5658 3.12294,24.83149 2.0916,36.78011 -7.06405,45.03355 -21.76553,88.37934 -35.57677,131.69714 -12.07476,32.9493 -30.7197,63.08 -51.23358,91.429 -10.95257,13.5514 -23.24472,27.8513 -40.84375,32.5 -20.15601,6.2413 -43.57595,5.1744 -61.96875,-5.6562 -17.28966,-12.3414 -21.02393,-35.7089 -19.26226,-55.6864 0.0488,-15.8262 2.37211,-27.8008 7.91747,-42.8053 5.54535,-15.0045 2.47105,-31.3317 -1.78021,-47.6333 -4.25126,-16.3016 -12.17903,-26.26002 -21.82158,-42.20417 -9.64255,-15.94415 -17.6369,-36.03734 -20.77217,-55.57713 -1.74948,-18.62714 2.89171,-37.12262 5.78125,-55.25 3.29623,-2.83696 -1.59799,-5.19659 -2.3125,-8.1875 -7.60113,-17.01508 -8.40747,-36.7749 -2.74234,-54.55998 7.1302,-25.0723 15.76087,-49.63241 24.67984,-74.12752 0.70833,1.30208 1.41667,2.60417 2.125,3.90625 z"
+ style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4185);enable-background:new"
+ transform="translate(450.03125,73.843964)"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ clip-path="url(#clipPath3631)"
+ sodipodi:nodetypes="cccccccc"
+ id="path3902"
+ d="m 735.05635,733.03834 2.75542,21.08881 44.41103,-15.38821 4.85063,-22.38975 -3.93617,-22.05222 -22.45163,-36.59301 -8.28004,30.30494 -17.34924,45.02944 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4130);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="translate(276,136)"
+ clip-path="url(#clipPath3631)"
+ sodipodi:nodetypes="cccccccc"
+ id="path4135"
+ d="m 831.81321,730.29452 15.82237,14.90486 20.85473,2.89994 -1.59029,-39.92598 8.32561,-30.50842 -7.16499,-6.34106 -21.69669,20.9424 -14.55074,38.02826 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4141);enable-background:accumulate"
+ inkscape:connector-curvature="0" />
+ <g
+ transform="translate(276,136)"
+ clip-path="url(#clipPath8392)"
+ style="filter:url(#filter8379)"
+ id="g8367">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 910.14441,746.31415 32.61295,5.17393 -0.36119,-23.87619 7.18853,-29.68221 -8.45112,-5.26365 -21.82194,26.51077 -9.16723,27.13735 z"
+ id="path4145"
+ sodipodi:nodetypes="ccccccc"
+ clip-path="none"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect8365"
+ width="123.03658"
+ height="172.53406"
+ x="877.51953"
+ y="650.19098" />
+ </g>
+ <g
+ transform="translate(276,136)"
+ clip-path="url(#clipPath8417)"
+ style="filter:url(#filter8404)"
+ id="g8400">
+ <path
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 964.00012,754.69487 18.42881,7.46479 9.07107,-36.96447 -14.87031,4.83886 -12.62957,24.66082 z"
+ id="path4147"
+ sodipodi:nodetypes="ccccc"
+ clip-path="none"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="rect8398"
+ width="142.12846"
+ height="125.1579"
+ x="924.89569"
+ y="677.06104" />
+ </g>
+ </g>
+ <rect
+ id="rect6247"
+ width="440"
+ height="376"
+ x="548"
+ y="205.32275"
+ style="opacity:1;fill:none;fill-opacity:1;stroke:#f8d615;stroke-width:18;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <rect
+ style="opacity:1;fill:none;fill-opacity:1;stroke:#f83615;stroke-width:18;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6676"
+ width="1684"
+ height="1292"
+ x="56"
+ y="53.322754" />
+ <rect
+ style="opacity:1;fill:url(#pattern5557);fill-opacity:1;stroke:#f815bb;stroke-width:13.34657478;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6731"
+ width="522.56604"
+ height="1182.4679"
+ x="3493.3721"
+ y="87.178711" />
+ <g
+ id="g7477"
+ transform="matrix(0.53474256,0,0,1,1882.7509,3.0962157)">
+ <rect
+ y="99.705269"
+ x="3039.4895"
+ height="902.66437"
+ width="818.51605"
+ id="rect6979"
+ style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:#f8d615;stroke-width:18;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <g
+ clip-path="url(#clipPath6975)"
+ id="g4303-9"
+ style="display:inline;enable-background:new"
+ transform="matrix(1.8806916,0,0,2.3994874,1997.8763,-394.32602)">
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter11361-3);enable-background:new"
+ d="m 304.64285,526.6479 c -10,0.35715 -18.21428,2.85714 -18.21428,2.85714 l 7.5,6.07143 10.35714,3.57143 16.07143,0.35714 22.5,-5.35714 7.85714,1.07143 20.35715,-2.14286 -10.35715,6.78572 c 5.45923,-1.02361 17.39329,3.56911 9.64286,5.35714 -1.74,0.40142 13.92857,-4.64285 13.92857,-4.64285 l 2.5,-4.64287 3.57143,-9.28571 11.42857,0 18.21428,-4.64286 3.57144,-4.99999 -16.07144,1.07142 -12.14285,2.14286 -14.64286,-5 -70.6921,16.70774 -5.37933,-5.27917 z"
+ id="path10326-6"
+ sodipodi:nodetypes="cccccccccsccccccccccc"
+ transform="matrix(10.726753,0,0,10.726753,-2882.1235,-4565.4583)"
+ inkscape:export-filename="/home/cheeseness/Documents/LCA09/mascot/tuz_new.png"
+ inkscape:export-xdpi="142.10527"
+ inkscape:export-ydpi="142.10527" />
+ <g
+ style="display:inline;opacity:1;enable-background:new"
+ id="g7882-9"
+ transform="matrix(0.71084,-0.1937433,0.262963,0.9648058,503.68027,136.48399)">
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzzcc"
+ id="path7876-3"
+ d="m 245.12255,100.05344 c 0,0 -47.12811,-31.646921 -67.21465,-35.800939 -20.03792,-4.143963 -38.4729,-3.317578 -51.93364,13.607323 -13.46074,16.924901 -12.07739,61.265196 -13.53554,86.969546 -1.45815,25.70435 2.54945,70.17701 17.6046,88.66552 15.05516,18.4885 45.88634,13.58502 49.92695,21.4137 2.21283,4.28736 65.15228,-174.85515 65.15228,-174.85515 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient7904-7);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzzzc"
+ id="path7878-3"
+ d="m 135.37935,82.017807 c 0,0 26.34355,1.938783 37.63307,13.903188 11.41494,12.097335 13.73457,21.331515 15.29586,37.734585 1.56337,16.42499 -0.84957,28.41812 -7.81382,36.03734 -6.96425,7.61922 -1.00429,19.58332 -25.91605,12.07107 -24.91176,-7.51225 -27.03224,-27.78298 -26.51523,-46.30475 0.51721,-18.52898 7.31617,-53.441433 7.31617,-53.441433 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient7906-6);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czccssc"
+ id="path7880-8"
+ d="m 135.648,81.927211 c 0,0 -4.64465,16.365075 0.58825,28.563099 5.48794,12.79254 27.22425,44.26007 27.22425,54.65565 l 22.65625,-5 c 2.54218,-6.96644 3.21052,-15.75206 2.1875,-26.5 -1.56129,-16.40307 -3.8663,-25.62141 -15.28125,-37.718749 -9.65488,-10.232047 -31.59311,-13.374857 -37.375,-14 z"
+ style="display:inline;opacity:1;fill:url(#radialGradient7908-0);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 845.03125,1154.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -57.31395,4.9661 -135.78608,17.3296 -79.85178,12.5808 -94.06436,42.5423 -108.12225,47.0643 -14.70014,4.7286 -145.37739,-65.8225 -145.37739,-65.8225 l 4.28572,-94.2857 c 0,0 85.88551,-16.2009 112.14285,-33.5714 26.25735,-17.3705 45.58238,-49.66602 59.28572,-71.42861 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
+ id="path7917-0"
+ sodipodi:nodetypes="czzzcczzcc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8888-6);enable-background:accumulate"
+ d="m 332.34019,898.38549 -32.73181,-61.29956 -37.61734,45.10646 c 2.17675,1.31711 5.77425,-20.85603 45.6004,-64.41708 l 24.74875,80.61018 z"
+ id="path7919-5"
+ clip-path="url(#clipPath8658-06)"
+ sodipodi:nodetypes="ccccc"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient6951);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8892-7);enable-background:accumulate"
+ d="m 200.81833,863.03015 146.3711,-51.61879 243.95184,226.27414 -241.83052,140.0072 -181.01934,-87.6813 32.52692,-226.98125 z"
+ id="path7923-6"
+ clip-path="url(#clipPath2833-2)"
+ sodipodi:nodetypes="cccccc"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 642.88839,640.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57145 -62.5,123.57145 l 76.07143,18.2143 c 0,0 11.80712,-12.8234 31.07142,-46.07146 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
+ id="path7921-6"
+ sodipodi:nodetypes="czzcczcc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.4;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8856-2);enable-background:accumulate"
+ d="m 430.28131,381.94122 c -7.07106,2.82843 -236.18124,32.15181 -236.18124,32.15181 l -39.63961,359.83304 90.19849,92.63961 52.3259,-114.5513 100.46804,-186.39192 32.82842,-183.68124 z"
+ id="path7925-4"
+ sodipodi:nodetypes="ccccccc"
+ clip-path="url(#clipPath3665-9)"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 969.67051,1164.0346 c 0,0 23.25628,11.3937 36.06779,20.4761 12.6974,9.0015 29.4724,24.6491 41.6924,37.3605 12.3055,12.8002 20.1127,22.5987 41.5327,24.1608 21.4322,1.5629 53.2824,-8.7876 73.296,-24.6642 20.0135,-15.8766 45.6469,-69.2328 45.6469,-69.2328 l -127.1608,-143.0717"
+ id="path7927-0"
+ sodipodi:nodetypes="czzzzcc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8860-3);enable-background:accumulate"
+ d="M 331.34019,641.50471 216.17367,835.36467 260.2153,925.96265 357.79603,732.21539 331.34019,641.50471 Z"
+ id="path7929-0"
+ clip-path="url(#clipPath8642-9)"
+ sodipodi:nodetypes="ccccc"
+ transform="translate(276,136)" />
+ <g
+ style="display:inline;opacity:1;enable-background:new"
+ id="g7931-4"
+ transform="matrix(0.9934486,0.1142802,-0.1142802,0.9934486,-9.24324,588.09054)"
+ inkscape:transform-center-x="-347.89063"
+ inkscape:transform-center-y="-28.255779">
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1049.205,-282.26672 -0.09,0.008 c -1.3874,0.88445 -6.6033,1.6072 -6.629,9.52344 -0.024,7.42525 15.0129,17.09146 17.1563,18.09375 1.7302,0.80909 3.5916,1.40876 5.4063,1.71875 l 1.4374,0.21875 c 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99128 5.4294,-1.4193 6.125,-1.78125 0.7222,-0.37601 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3191,-1.70203 2.5312,-2 0.2123,-0.29796 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3405,-0.094 0.5,-0.4375 0.859,-1.84708 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68215 0.168,-1.35277 0.2187,-1.75 0.029,-0.22951 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19832 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41764 -0.9716,-4.61463 -1.625,-5.46875 -0.4194,-0.54857 -0.7993,-0.7925 -1.1562,-0.90625 -0.067,-0.0173 -0.1239,-0.0467 -0.1875,-0.0625 -0.021,-0.004 -0.042,0.003 -0.062,0 -0.3116,-0.0755 -0.6085,-0.15867 -1.1562,-0.21875 -0.9855,-0.10812 -2.4247,-0.2594 -3.9688,-0.25 -0.5147,0.003 -1.0371,0.0476 -1.5625,0.0937 -3.5589,0.31228 -9.0098,0.99108 -10.2187,1.625 -1.6331,-0.33402 -3.9482,-0.61223 -5.9376,-0.46875 -3.064,0.22097 -4.9677,0.34219 -6.9062,0.46875 -1.9384,0.12655 -1.6861,0.38864 -2.9062,0.46875 -1.3191,0.0866 -1.7869,-0.22325 -5.5626,0.0937 -3.5457,0.29772 -8.9806,0.99317 -10.2187,1.625 -1.6334,-0.33451 -3.9459,-0.61239 -5.9375,-0.46875 -3.0642,0.22098 -4.9678,0.37344 -6.9062,0.5 -0.6592,0.043 -1.0424,0.12393 -1.3438,0.1875 z"
+ id="path7933-6" />
+ <g
+ clip-path="url(#clipPath7616-1)"
+ style="display:inline;filter:url(#filter7610-9);enable-background:new"
+ id="g7935-2"
+ transform="matrix(0.9975712,-0.06965428,0.06965428,0.9975712,872.72062,140.02502)">
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccssscsssscscsscsssccscssccsscssscc"
+ id="path7937-6"
+ d="m 229.94262,-409.12268 c -3.55781,0.05 -9.0242,0.36009 -10.30334,0.90414 -1.60609,-0.44747 -3.90316,-0.88131 -5.89995,-0.87674 -3.07199,0.007 -4.96469,0.009 -6.90727,0 -0.66047,-0.003 -1.04759,0.0672 -1.35267,0.10959 0,0 0,1.09593 0,1.09593 0.11972,-0.17947 0.39252,-0.69046 0.94975,-0.76715 0.74758,-0.10289 5.16928,-0.15123 7.31019,-0.1096 1.7746,0.0345 4.45523,0.27427 6.38921,0.95895 0.3214,0.11378 0.61925,0.27378 0.89219,0.41097 1.96342,0.98693 7.94336,4.30154 7.94336,4.30154 0,0 -6.63275,-3.94768 -7.48287,-4.43853 -0.20331,-0.11739 -0.57464,-0.25769 -1.03609,-0.41098 1.22063,-0.44779 5.07597,-0.61971 7.82823,-0.71235 3.0245,-0.10182 3.34776,-0.0896 5.41069,0.19179 2.12931,0.29043 3.33851,0.60276 3.33851,0.60276 -1e-5,0 -0.0784,-0.64118 1.03609,-0.79455 0.74757,-0.10289 5.16929,-0.15123 7.31019,-0.1096 2.0695,0.0403 5.36605,0.40716 7.2814,1.36992 1.00332,0.50433 3.03564,1.56863 4.79535,2.53571 l 0.0956,-0.0194 c 0,0 -3.58034,-2.16242 -4.43047,-2.65327 -0.20331,-0.11739 -0.57463,-0.25769 -1.03609,-0.41098 1.22062,-0.44779 5.04719,-0.61971 7.79945,-0.71235 3.0245,-0.10182 3.34775,-0.0896 5.41069,0.19179 1.95316,0.2664 3.01292,0.53006 3.19461,0.57536 0,0 -0.0271,-0.31146 -0.0271,-0.31146 -0.40903,-0.13645 -0.71424,-0.23335 -1.40038,-0.35748 -1.30081,-0.23533 -3.39912,-0.60156 -5.50857,-0.56398 -3.57195,0.0636 -9.05328,0.35596 -10.30334,0.90414 -1.60583,-0.44695 -3.87662,-0.8813 -5.87117,-0.87674 -3.07199,0.007 -4.99348,0.009 -6.93605,0 -1.94256,-0.009 -1.71268,0.27907 -2.93558,0.27398 -1.32191,-0.005 -1.76612,-0.35463 -5.55459,-0.30138 0,0 0,0 0,0"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path7939-7"
+ d="m 206.1989,-407.47878 c 1.92021,0.81706 4.57715,2.19283 6.15897,3.39739 1.58184,1.20456 2.90757,1.77368 5.55459,3.91795 0.88557,0.71738 1.74865,1.34985 2.59193,1.92174 l 0.54057,-0.19011 c -0.71323,-0.48339 -1.46776,-1.02031 -2.26909,-1.62203 -2.82223,-2.11921 -3.62655,-2.80973 -6.01507,-4.27414 -2.38854,-1.4644 -4.09948,-2.36576 -6.5619,-3.1508 0,0 0,0 0,0"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ sodipodi:nodetypes="cssccsscc" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path7941-5"
+ d="m 237.79963,-407.47878 c 1.92021,0.81706 4.60594,2.19283 6.18775,3.39739 0.81307,0.61916 1.55849,1.07042 2.45046,1.65401 l 0.649,-0.11666 c -0.79831,-0.57637 -1.57177,-1.09435 -2.69653,-1.78394 -2.38854,-1.4644 -4.12826,-2.36576 -6.59068,-3.1508 0,0 0,0 0,0"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ sodipodi:nodetypes="csccscc" />
+ </g>
+ <g
+ clip-path="url(#clipPath7606-1)"
+ id="g7943-6">
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7578-4);enable-background:new"
+ d="m 1056.25,-278.80481 c 4.1446,-1.47877 10,3.125 10,3.125 0.899,0.28092 2.7251,-0.89447 2.6243,-1.68614 0,0 -1.5503,-1.86062 -0.3743,-2.93886 1.176,-1.07824 5.296,1.50738 7.5,1.625 2.204,0.11762 5.5621,-0.22941 7,-0.75 1.4379,-0.52059 1.1129,-1.42459 2.625,-1.75 1.5121,-0.32541 5.1189,1.03754 7.0605,1.16883 1.9416,0.13129 4.6481,0.33427 5.8145,-0.16883 1.1664,-0.5031 0.1782,-1.15921 1.875,-1.875 1.6968,-0.71579 7.7602,-0.95662 9.625,-0.125 1.8648,0.83162 1.8099,0.5192 2.625,3 0.8151,2.4808 7.4398,5.16285 -1.125,13.375 -8.5648,8.21215 -59.3779,13.78594 -65.625,2.75 -6.2471,-11.03594 6.2304,-14.27123 10.375,-15.75 z"
+ id="path7945-9"
+ sodipodi:nodetypes="czzzzzzzzzzzzzz" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7594-8);enable-background:new"
+ d="m 1058.5,-275.42981 c 4.1446,-1.47877 10,3.125 10,3.125 0.899,0.28092 2.7251,-0.89447 2.6243,-1.68614 0,0 -1.5503,-1.86062 -0.3743,-2.93886 1.176,-1.07824 5.296,1.50738 7.5,1.625 2.204,0.11762 5.5621,-0.22941 7,-0.75 1.4379,-0.52059 1.1129,-1.42459 2.625,-1.75 1.5121,-0.32541 5.1189,1.03754 7.0605,1.16883 1.9416,0.13129 4.6481,0.33427 5.8145,-0.16883 1.1664,-0.5031 0.1782,-1.15921 1.875,-1.875 1.6968,-0.71579 7.7602,-0.95662 9.625,-0.125 1.8648,0.83162 1.8099,0.5192 2.625,3 0.8151,2.4808 7.4398,5.16285 -1.125,13.375 -8.5648,8.21215 -59.3779,13.78594 -65.625,2.75 -6.2471,-11.03594 6.2304,-14.27123 10.375,-15.75 z"
+ id="path7947-8"
+ sodipodi:nodetypes="czzzzzzzzzzzzzz" />
+ </g>
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#101414;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 628.24553,347.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.55405 36.34702,-65.29583 116.94091,-84.69468 185.93466,-91.46542 86.92239,-11.0168 184.91267,17.94007 233.37138,95.40128 54.124,75.7333 56.6747,172.53912 80.612,259.52795 29.4378,127.1276 54.7791,256.21414 60.3922,386.85035 -3.0634,78.18185 -8.4263,165.18417 -60.5032,228.13417 -48.0265,50.3574 -122.7864,50.053 -187.06985,59.0023 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.1982 -64.77564,-37.94 -95.73019,-113.47867 -97.2794,-186.01962 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
+ id="path7949-7"
+ sodipodi:nodetypes="cscccccccccccc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8940-3);enable-background:accumulate"
+ d="m 311.83409,415.43155 9.8995,121.62237 -60.10408,136.47161 15.55635,174.65537 c 15.61326,61.8792 32.18545,98.66905 74.37615,117.05383 4.31911,-36.23998 -38.61152,-142.95988 -39.24264,-189.11984 -0.63145,-46.18445 10.83034,-108.60786 30.67767,-158.29647 20.04835,-50.19188 36.89674,-44.84642 42.12489,-92.59293 5.22815,-47.74651 -17.4264,-149.39192 -17.4264,-149.39192 l -55.86144,39.59798 z"
+ id="path7951-2"
+ sodipodi:nodetypes="ccccczzzcc"
+ clip-path="url(#clipPath8616-5)"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient6953);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 1010.0312,655.49186 c 0,0 16.7552,37.01806 28.7015,53.95395 11.9462,16.93589 52.7271,56.04605 52.7271,56.04605 l 52.5972,-127.58975"
+ id="path7953-8"
+ sodipodi:nodetypes="czcc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.07999998;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8822-2);enable-background:accumulate"
+ d="m 730.31998,536.56864 c 0,8.48528 42.54774,58.46803 42.54774,58.46803 l 12.60659,-28.76954 -55.15433,-29.69849 z"
+ id="path7955-2"
+ sodipodi:nodetypes="cccc"
+ clip-path="url(#clipPath8209-6)"
+ transform="translate(276,136)" />
+ <g
+ transform="translate(450.03125,73.843964)"
+ style="display:inline;opacity:1;enable-background:new"
+ id="g7957-9"
+ clip-path="url(#clipPath3998-6)">
+ <g
+ transform="translate(-174.03125,62.156036)"
+ style="filter:url(#filter3677-5)"
+ id="g7959-9">
+ <g
+ id="g7961-6"
+ style="filter:url(#filter3785-4)">
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzzzzzzzz"
+ id="path7963-0"
+ d="m 425.88244,476.99186 c 10.80543,-1.47866 24.74401,3.35451 44.64286,3.21428 19.89885,-0.14023 57.45322,-16.91122 82.14285,-17.14286 24.68963,-0.23164 62.7517,12.28406 79.28572,15 16.53402,2.71594 22.84832,-0.15852 27.49999,7.85715 4.65167,8.01567 1.92671,10.74724 -10.35714,20.71429 -12.28385,9.96705 -40.78968,12.63632 -66.07143,12.85714 -25.28234,0.22082 -70.38129,7.07852 -95.35714,3.92856 -24.97585,-3.14996 -56.93756,-7.82267 -68.92857,-17.85714 -11.99101,-10.03447 -19.85084,-16.73182 -17.5,-23.92857 2.35084,-7.19675 13.83743,-3.16419 24.64286,-4.64285 z"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <rect
+ y="412.60312"
+ x="343.6539"
+ height="181.01935"
+ width="381.83765"
+ id="rect7965-2"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ <g
+ id="g7967-7"
+ style="filter:url(#filter3785-4)">
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzcc"
+ id="path7969-6"
+ d="m 687.14286,452.36218 c -10.46169,9.71443 -86.9796,19.00514 -100.71429,29.28572 -13.73469,10.28058 -14.75252,12.88826 -12.14286,20 2.60966,7.11174 6.54527,9.40572 25.71429,8.57142 19.16902,-0.8343 98.57143,-27.62172 98.57143,-21.42857 l -11.42857,-36.42857 z"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ transform="translate(174.03125,-62.156036)" />
+ <rect
+ y="344.82138"
+ x="702.86414"
+ height="162.63455"
+ width="207.8894"
+ id="rect7971-1"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ </g>
+ <g
+ transform="translate(-174.03125,62.156036)"
+ style="display:inline;opacity:0.18000004;enable-background:new"
+ id="g7973-3">
+ <g
+ id="g7975-2"
+ style="filter:url(#filter3785-4)">
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzzzzzzzz"
+ id="path7977-1"
+ d="m 425.88244,476.99186 c 10.80543,-1.47866 24.74401,3.35451 44.64286,3.21428 19.89885,-0.14023 57.45322,-16.91122 82.14285,-17.14286 24.68963,-0.23164 62.7517,12.28406 79.28572,15 16.53402,2.71594 22.84832,-0.15852 27.49999,7.85715 4.65167,8.01567 1.92671,10.74724 -10.35714,20.71429 -12.28385,9.96705 -40.78968,12.63632 -66.07143,12.85714 -25.28234,0.22082 -70.38129,7.07852 -95.35714,3.92856 -24.97585,-3.14996 -56.93756,-7.82267 -68.92857,-17.85714 -11.99101,-10.03447 -19.85084,-16.73182 -17.5,-23.92857 2.35084,-7.19675 13.83743,-3.16419 24.64286,-4.64285 z"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <rect
+ y="412.60312"
+ x="343.6539"
+ height="181.01935"
+ width="381.83765"
+ id="rect7979-5"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ <g
+ id="g7981-9"
+ style="filter:url(#filter3785-4)">
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzcc"
+ id="path7983-9"
+ d="m 687.14286,452.36218 c -10.46169,9.71443 -86.9796,19.00514 -100.71429,29.28572 -13.73469,10.28058 -14.75252,12.88826 -12.14286,20 2.60966,7.11174 6.54527,9.40572 25.71429,8.57142 19.16902,-0.8343 98.57143,-27.62172 98.57143,-21.42857 l -11.42857,-36.42857 z"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ transform="translate(174.03125,-62.156036)" />
+ <rect
+ y="344.82138"
+ x="702.86414"
+ height="162.63455"
+ width="207.8894"
+ id="rect7985-1"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ </g>
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8802-7);enable-background:accumulate"
+ d="M 582.65599,-7.4183011 695.79307,78.848726 804.68752,337.64981 842.87128,545.5392 963.07944,637.46308 c 0,0 -12.72793,-287.08535 -19.799,-313.95541 C 936.20938,296.63761 793.37381,-69.643698 793.37381,-69.643698 L 582.65599,-7.4183011 Z"
+ id="path7987-4"
+ clip-path="url(#clipPath8604-69)"
+ sodipodi:nodetypes="cccccscc"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient6955);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 964.13839,239.599 c 0,0 8.67732,10.89662 24.10715,11.96428 15.42986,1.06766 49.72166,-39.95267 70.17856,-52.14285 20.4793,-12.20353 47.0464,-26.60225 63.9286,-20.35714 16.8821,6.2451 22.1578,26.43609 27.8571,48.03571 5.6994,21.59961 6.7186,61.81389 -2.6785,92.85715 -9.3972,31.04325 -50.5033,73.10375 -65.3572,103.39285 -14.8539,30.2891 -11.6071,39.82143 -11.6071,39.82143"
+ id="path7989-9"
+ sodipodi:nodetypes="czzzzzzc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:url(#radialGradient3315-5);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1124.4955,207.63471 c -15.8928,-0.89286 -49.7188,12.10583 -66.0714,24.28572 -16.4386,12.2439 -29.2209,24.1144 -29.2857,52.14285 -0.065,28.20604 13.1191,39.07641 29.1071,46.96429 15.988,7.88789 33.6862,7.11928 51.9643,-11.78571 18.2782,-18.905 14.2857,-111.60715 14.2857,-111.60715 z"
+ id="path7991-1"
+ sodipodi:nodetypes="czzzzc" />
+ <ellipse
+ ry="73.928574"
+ rx="86.428574"
+ cy="237.00504"
+ cx="385"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:url(#radialGradient3543-4);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4120-7);enable-background:accumulate"
+ id="path7993-0"
+ transform="matrix(0.9434749,-0.1239943,0.1440089,1.0957669,451.94827,134.5988)"
+ clip-path="url(#clipPath4100-3)" />
+ <path
+ inkscape:connector-curvature="0"
+ transform="translate(450.03125,73.843964)"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3915-6);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 527.60588,407.44884 c 0,0 -122.04144,38.40348 -187.51434,9.63181 -65.47289,-28.77166 -74.37725,-124.71847 -74.37725,-124.71847 0,0 73.38158,-80.50393 129.92078,-83.61476 55.82705,-3.07164 90.57386,20.14332 114.87001,65.85171 24.352,45.81348 17.1008,132.84971 17.1008,132.84971 z"
+ id="path7995-7"
+ sodipodi:nodetypes="csczzc"
+ mask="url(#mask3684-3)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient6957);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 772.17411,393.349 c 0,0 36.21754,-27.38247 51.60714,-35.89286 15.17734,-8.39301 25.71428,-11.60714 35.89285,-11.60714 l -15.53571,66.96428"
+ id="path7997-5"
+ sodipodi:nodetypes="czcc" />
+ <circle
+ r="36.25"
+ cy="306.64789"
+ cx="409.28571"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3933-8);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path7999-8"
+ transform="translate(449.49554,74.915393)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.3;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8806-6);enable-background:accumulate"
+ d="m 311.83409,415.43155 9.8995,121.62237 -60.10408,136.47161 15.55635,174.65537 c 15.61326,61.8792 32.18545,98.66905 74.37615,117.05383 4.31911,-36.23998 8.68161,-72.36764 -31.24264,-223.11984 l 17.67767,-69.29647 72.12489,-138.59293 -42.4264,-158.39192 -55.86144,39.59798 z"
+ id="path8001-7"
+ sodipodi:nodetypes="cccccccccc"
+ clip-path="url(#clipPath8616-5)"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8826-9);enable-background:accumulate"
+ d="m 635.21025,581.13004 c -14.14214,12.72792 39.23347,34.58015 76.36753,24.04163 37.13406,-10.53852 104.64487,-35.56437 103.23759,-79.19596 -1.40728,-43.63158 -76.36753,-128.69343 -76.36753,-128.69343 L 635.21025,581.13004 Z"
+ id="path8003-0"
+ sodipodi:nodetypes="czzcc" />
+ <circle
+ r="23.214285"
+ cy="306.64789"
+ cx="410"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3991-0);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8005-4"
+ transform="translate(449.67411,74.915393)" />
+ <circle
+ r="7.5"
+ cy="303.07648"
+ cx="414.28571"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3981-7);enable-background:accumulate"
+ id="path8007-8"
+ transform="translate(451.99554,73.486821)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4112-7);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 789.31696,478.349 c 0,0 7.02281,19.56859 -1.07143,35 -8.09424,15.43141 -42.32317,38.98822 -67.49999,50 -25.30972,11.06991 -85.473,32.96393 -101.78572,41.96428 -16.46148,9.08243 -18.21428,12.67857 -18.21428,12.67857 0,0 -7.14693,-19.06441 28.74999,-51.7857 36.17211,-32.97214 142.02712,-48.0495 159.82143,-87.85715 z"
+ id="path8009-0"
+ sodipodi:nodetypes="czzzczc" />
+ <g
+ style="display:inline;opacity:1;enable-background:new"
+ id="g8011-4"
+ transform="translate(780.74553,74.55825)">
+ <path
+ inkscape:connector-curvature="0"
+ transform="translate(-329.81481,0)"
+ clip-path="url(#clipPath3999-0)"
+ sodipodi:nodetypes="czzczzzszc"
+ id="path8013-2"
+ d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 8.20587,-79.64664 3.21429,-93.92857 -4.99158,-14.28193 -1.23663,-3.37974 -1.94602,-5.09301 -10.68928,-25.81592 -34.21432,-54.4303 -64.48255,-64.54984 -30.26823,-10.11954 -65.01776,-4.84837 -84.28571,5.71428 z"
+ style="display:inline;opacity:1;fill:url(#radialGradient3585-2);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <ellipse
+ ry="134.00607"
+ rx="64.715881"
+ cy="338.07648"
+ cx="183.57143"
+ transform="matrix(0.8823874,0.4705236,-0.4705236,0.8823874,-166.62245,2.387362)"
+ id="path8015-9"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4060-5);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <ellipse
+ ry="134.00607"
+ rx="64.715881"
+ cy="338.07648"
+ cx="183.57143"
+ transform="matrix(0.8823874,0.4705236,-0.4705236,0.8823874,-162.19388,-18.755495)"
+ id="path8017-6"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4062-9);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <path
+ inkscape:connector-curvature="0"
+ transform="translate(-329.81481,3e-7)"
+ clip-path="url(#clipPath3999-0)"
+ sodipodi:nodetypes="czzczzzszc"
+ id="path8019-1"
+ d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 8.20587,-79.64664 3.21429,-93.92857 -4.99158,-14.28193 -1.23663,-3.37974 -1.94602,-5.09301 -10.68928,-25.81592 -34.21432,-54.4303 -64.48255,-64.54984 -30.26823,-10.11954 -65.01776,-4.84837 -84.28571,5.71428 z"
+ style="display:inline;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6959);stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4079-1);enable-background:new" />
+ </g>
+ <circle
+ r="19.704132"
+ cy="398.07648"
+ cx="310.71429"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8021-0"
+ transform="translate(452.55663,72.581273)" />
+ <circle
+ r="19.704132"
+ cy="398.07648"
+ cx="310.71429"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4056-5);fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6961);stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4083-9);enable-background:accumulate"
+ id="path8023-4"
+ transform="translate(450.55663,72.581273)" />
+ <circle
+ r="19.704132"
+ cy="398.07648"
+ cx="310.71429"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4119-7);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ id="path8025-2"
+ transform="translate(450.55663,72.581273)" />
+ <ellipse
+ ry="44.547726"
+ rx="72.079735"
+ cy="377.42877"
+ cx="429.56738"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4868-3);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4002-6);enable-background:accumulate"
+ id="path8027-2"
+ transform="matrix(0.9969564,-0.07796167,0.07796167,0.9969564,436.61877,125.29509)"
+ inkscape:transform-center-x="-47.231976"
+ inkscape:transform-center-y="-3.6935079" />
+ <ellipse
+ ry="22.627417"
+ rx="36.611931"
+ cy="391.21735"
+ cx="437.6991"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4876-9);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4010-1);enable-background:accumulate"
+ id="path8029-2"
+ transform="matrix(1.4357951,-0.06999104,0.06999104,1.4357951,235.18065,-63.86546)"
+ inkscape:transform-center-x="-20.955902"
+ inkscape:transform-center-y="-13.056625" />
+ <g
+ transform="translate(450.03125,73.843964)"
+ id="g8031-0"
+ style="display:inline;opacity:1;filter:url(#filter4053-9);enable-background:new">
+ <circle
+ r="3.2142856"
+ cy="401.82648"
+ cx="413.66071"
+ id="path8033-5"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6963);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <circle
+ r="3.2142856"
+ cy="401.82648"
+ cx="413.66071"
+ transform="translate(13.125009,8.1249913)"
+ id="path8035-5"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6965);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <circle
+ r="3.2142856"
+ cy="401.82648"
+ cx="413.66071"
+ transform="translate(32.946437,7.4999913)"
+ id="path8037-2"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6967);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <circle
+ r="3.2142856"
+ cy="401.82648"
+ cx="413.66071"
+ transform="translate(24.910723,-10.267866)"
+ id="path8039-9"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6969);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <circle
+ r="3.2142856"
+ cy="401.82648"
+ cx="413.66071"
+ transform="translate(47.589294,-0.6250087)"
+ id="path8041-0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6971);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 896.20301,482.92837 c 0.98509,4.35008 4.53707,6.17948 7.38673,7.89182 4.46068,2.51292 6.52016,1.52211 9.15451,-0.75761 1.60195,-1.92117 10.68311,-4.69865 15.59423,-7.07107 4.32961,-1.45891 8.9033,-5.35873 13.38452,-8.33376 3.39514,-1.62724 5.34664,0.35464 7.82868,1.01015 2.94412,0.71661 4.41117,2.17175 6.06092,3.53554 2.39616,1.17519 -0.9279,3.14313 3.283,4.29314 1.19091,0.21794 2.41695,0.57645 3.28299,-0.50507"
+ id="path8043-2"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 910.85021,475.35223 c 2.31494,-0.032 3.17778,0.64253 5.49271,-0.82075 3.45564,-3.08113 5.40254,-3.14477 7.95495,-4.41942 3.02657,-1.31523 6.5357,8.15169 10.10153,9.84899 2.39509,-0.82142 1.28914,1.79379 1.45209,2.65165 0.0571,2.64684 2.80694,3.67806 4.35628,5.42957 3.31604,2.25549 7.37523,6.29546 11.11168,5.3033 6.44525,-2.93107 10.27922,-1.28146 16.28871,-7.38674 0.70405,-1.18134 -0.58425,-6.8946 3.09359,-7.19734 2.52399,0.25338 4.16667,0.0502 6.06092,0.56822 5.441,2.11719 7.73778,6.45 14.71034,7.95495 6.1829,0.96639 7.61264,3.79426 13.88959,5.05076"
+ id="path8045-8"
+ sodipodi:nodetypes="cccccccccccc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 876.98133,483.52197 c 2.39858,-0.7938 6.10613,4.1921 8.17313,7.04568 0.59281,2.67952 1.15377,5.48645 0.75761,12.12183 0.78513,2.41754 2.68049,3.03095 4.79823,3.283 3.11745,-0.53678 5.87669,-1.3243 7.3236,-3.03046 1.8716,-1.94167 5.31253,2.39394 8.08122,4.04061 3.61009,1.91209 7.77378,1.97886 11.8693,2.27284 1.70358,-0.23064 2.3704,4.51515 3.28299,8.08123 0.38414,4.37806 -0.88544,6.89569 -1.76776,9.84898 -0.2943,2.49655 2.9885,3.52974 6.31345,4.54569 3.18244,0.74124 6.54424,1.66184 9.09137,1.76777 5.14186,0.87491 8.08874,2.69052 12.12183,4.04061 2.23914,0.81655 3.26019,2.24216 4.54569,3.53553"
+ id="path8047-3"
+ sodipodi:nodetypes="ccccccccccccc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter8814-5);enable-background:new"
+ d="m 332,187.69519 c 0,0 57.5,-25.5 57.5,-28 0,-2.5 5.5,-52 5.5,-52 0,0 91,-48.500001 91.5,-50.500001 0.5,-2 86,-62.0000004 86,-62.0000004 L 386.5,17.195189 311,123.19519 l 21,64.5 z"
+ id="path8049-8"
+ clip-path="url(#clipPath8514-8)"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 1697.2846,722.5514 c 0,0 -115.9655,73.5391 -123.0365,77.78174 -7.0711,4.24264 -230.5169,137.17872 -230.5169,137.17872 l 4.2427,39.59798 216.3747,-100.40917 117.3797,-101.82337 15.5563,-52.3259 z"
+ id="path8051-0" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8810-3);enable-background:accumulate"
+ d="m 528.91587,556.85291 c -5.65685,-1.41421 -181.01933,74.95332 -181.01933,74.95332 l -33.94113,181.01934 51.09546,193.94823 257.2031,67.6813 c 0,0 206.47518,152.735 212.13203,148.4924 5.65686,-4.2426 168.2914,-193.7473 168.2914,-193.7473 L 842.87128,845.35248 796.20224,667.16157 528.91587,556.85291 Z"
+ id="path8053-4"
+ clip-path="url(#clipPath8610-9)"
+ sodipodi:nodetypes="cccccscccc"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:#0c0c0c;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1097.6433,613.88997 c 0,0 22.6195,-6.50681 35.7427,-5.87273 13.1233,0.63409 30.6416,1.93862 43.7089,12.18619 13.0673,10.24756 25.0677,27.14007 34.1124,58.36965 9.0446,31.22958 1.6983,99.25201 -6.1761,143.34735 -7.8743,44.09534 -28.2651,106.11298 -45,140 -16.7348,33.88702 -49.7977,77.49517 -60.5694,89.87617 -11.3642,13.062 -56.2059,36.4262 -79.4306,42.2667 5.3034,-10.6066 48.8998,-50.5889 35,-60.7143 -14.0189,-10.2123 -45.76,45.9824 -84.2931,29.0332 21.38231,-13.1321 41.7794,-51.1861 34.0406,-66.59448 -7.84024,-15.61039 -30.70492,48.75758 -93.53553,37.01288 30.05204,-27.5267 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.2932 -60.46175,54.2932 0,0 -2.8219,-41.70123 13.7732,-68.60737 16.63935,-26.97787 79.65297,-81.61527 99.55308,-111.70342 19.9002,-30.08814 33.6126,-66.00902 42.1355,-92.51794 8.5228,-26.50892 15.8009,-77.09954 15.8009,-77.09954"
+ id="path8055-0"
+ sodipodi:nodetypes="czzzzzzczczczczzzc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8818-1);enable-background:accumulate"
+ d="m 770.74639,609.17881 -50.91169,97.58074 -79.90307,111.01576 34.64824,71.41778 42.42641,79.19597 72.12489,-45.25484 14.14214,-192.33305 21.2132,-138.59292 -14.14214,-90.15612 -39.59798,107.12668 z"
+ id="path8057-9"
+ clip-path="url(#clipPath8622-5)"
+ sodipodi:nodetypes="cccccccccc"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8810-3);enable-background:accumulate"
+ d="m 295,846.19519 6.64488,-68.92285 c 0,0 90.31951,89.00457 162.35512,122.92285 72.03561,33.91828 308,62 308,62 l 154,-26 -36,162.00001 -286,26 -298,-89 -11,-189.00001 z"
+ id="path8059-1"
+ clip-path="url(#clipPath8906-9)"
+ sodipodi:nodetypes="cczcccccc"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ transform="translate(450.03125,73.843964)"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter3587-1);enable-background:new"
+ d="m 405.79629,845.99023 74.95332,65.05383 2.49963,16.8804 19.40336,10.15891 6.49204,23.05109 31.70905,-8.3711 14.84924,48.08324 c 12.25652,12.7279 89.79344,-113.1097 55.86143,38.1838 l -60.81118,16.2635 -89.20292,-94.69286 -62.82503,-53.79963 7.07106,-60.81118 z"
+ id="path8061-9"
+ sodipodi:nodetypes="cccccccccccc"
+ clip-path="url(#clipPath3602-4)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
+ d="m 1159.317,918.349 c 54.2857,-1.42857 126.035,-15.05199 170,-26.78572 44.0527,-11.75714 125.8863,-36.34724 175.357,-57.85714 49.3393,-21.45272 113.6038,-59.2816 154.2859,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7142,-33.57143 8.3691,22.36779 -16.4069,56.32562 -37.8571,81.07143 -21.6042,24.9234 -52.7314,52.70533 -98.9287,89.28571 -46.1973,36.58038 -156.0825,101.58463 -212.8571,128.5714 -57.066,27.1254 -128.2033,58.2385 -172.1428,72.5001 -43.9395,14.2616 -131.4286,31.0714 -131.4286,31.0714 L 1159.317,918.349 Z"
+ id="path8063-6"
+ sodipodi:nodetypes="czzzzzzzzcc" />
+ <path
+ inkscape:connector-curvature="0"
+ transform="translate(450.03125,73.843964)"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:url(#linearGradient6973);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3779-4);enable-background:accumulate"
+ d="m 1241.5965,652.95007 c 0,0 -64.7215,54.33706 -145.6639,98.99494 -82.0244,45.25484 -284.25704,93.3381 -284.25704,93.3381 0,0 -15.10137,21.05196 45.25489,28.28428 60.35626,7.23232 224.08195,-53.30069 278.60015,-96.16654 54.5182,-42.86585 120.2081,-111.72286 120.2081,-111.72286 l -14.1422,-12.72792 z"
+ id="path8065-2"
+ sodipodi:nodetypes="czczzcc"
+ clip-path="url(#clipPath3992-4)" />
+ <g
+ transform="translate(450.03125,73.843964)"
+ style="display:inline;opacity:1;enable-background:new"
+ id="g8067-5"
+ clip-path="url(#clipPath3986-7)">
+ <g
+ transform="translate(-174.03125,62.156036)"
+ style="filter:url(#filter3677-5)"
+ id="g8069-4">
+ <g
+ style="filter:url(#filter3785-4)"
+ id="g8071-4">
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzccccc"
+ id="path8073-9"
+ d="m 1094.2857,725.93361 c 0,0 -0.2961,26.16091 4.6428,37.85715 4.9389,11.69624 20.0381,26.48665 28.5715,31.42857 8.5334,4.94192 18.9286,8.57142 18.9286,8.57142 l 117.8571,-115 17.8572,-75.71428 -96.4286,38.57143 -91.4286,74.28571 z"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ transform="translate(174.03125,-62.156036)" />
+ <rect
+ y="486.14224"
+ x="1197.8389"
+ height="309.71277"
+ width="333.75412"
+ id="rect8075-9"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ </g>
+ <g
+ transform="translate(-174.03125,62.156036)"
+ style="display:inline;opacity:0.18000004;enable-background:new"
+ id="g8077-3">
+ <g
+ style="filter:url(#filter3785-4)"
+ id="g8079-6">
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzccccc"
+ id="path8081-0"
+ d="m 1094.2857,725.93361 c 0,0 -0.2961,26.16091 4.6428,37.85715 4.9389,11.69624 20.0381,26.48665 28.5715,31.42857 8.5334,4.94192 18.9286,8.57142 18.9286,8.57142 l 117.8571,-115 17.8572,-75.71428 -96.4286,38.57143 -91.4286,74.28571 z"
+ style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ transform="translate(174.03125,-62.156036)" />
+ <rect
+ y="486.14224"
+ x="1197.8389"
+ height="309.71277"
+ width="333.75412"
+ id="rect8083-5"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ </g>
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ transform="translate(450.03125,73.843964)"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.83300003;fill:#050505;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:15;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;filter:url(#filter8225-7);enable-background:accumulate"
+ d="m 1264.1875,605 c -4.4911,0.73268 -8.157,3.45509 -11.9375,6.40625 -10.0813,7.86976 -28.1695,34.42524 -48.0312,50.46875 -39.8674,32.20316 -103.996,69.97701 -152.5626,91.09375 -48.614,21.13738 -130.54122,45.81801 -174.31245,57.5 -43.39821,11.58246 -115.04403,25.13107 -168.25,26.53125 l -4.5625,0.125 -2,4.125 -92.84375,192.125 -6.5,13.4688 14.65625,-2.8438 c 0,0 87.26968,-16.6514 132.34375,-31.2812 44.7252,-14.51667 115.79086,-45.66683 173.03125,-72.87505 C 980.82199,912.46306 1090.1551,847.86412 1137.5,810.375 c 46.3608,-36.70982 77.8049,-64.71682 99.9375,-90.25 10.9011,-12.576 22.7448,-27.53144 31.0313,-42.75 8.2864,-15.21856 19.1597,-44.21808 13.6874,-58.84375 -1.2177,-3.25474 -2.5514,-6.0613 -4.5937,-8.5 -2.0423,-2.4387 -8.4747,-1.57199 -8.5625,-5.03125 -0.2098,-8.26482 -3.3155,-0.24423 -4.8125,0 z m 2.1563,15.21875 c 0.4148,0.58574 1.0311,1.55766 1.7812,3.5625 2.8968,7.74213 -1.4407,31.89875 -8.8125,45.4375 -7.3718,13.53875 -22.6384,28.92394 -33.1875,41.09375 -21.0754,24.31356 -51.9037,51.86156 -97.9375,88.3125 -45.0496,35.67159 -155.46033,101.09459 -211.40625,127.6875 -56.89173,27.04249 -128.09616,58.1184 -171.25,72.125 -36.36491,11.8031 -95.84471,23.8338 -115.71875,27.7813 L 714.09375,851.75 c 54.70691,-2.0493 123.79259,-15.21635 167.125,-26.78125 44.33422,-11.83225 126.07865,-36.33633 176.40625,-58.21875 50.112,-21.78871 112.5344,-61.16816 154.0312,-94.6875 20.6464,-16.67721 41.7449,-42.54588 49.8126,-48.84375 2.437,-1.90242 4.0806,-2.6358 4.875,-3 z"
+ id="path8085-0"
+ clip-path="url(#clipPath3722-3)"
+ sodipodi:nodetypes="cssssccccccssssssssccssssssccssssc" />
+ <g
+ style="display:inline;opacity:1;enable-background:new"
+ id="g8087-2"
+ mask="url(#mask7704-9)"
+ transform="matrix(0.9934486,0.1142802,-0.1142802,0.9934486,-9.24324,588.09054)"
+ inkscape:transform-center-x="-185.09603"
+ inkscape:transform-center-y="-12.859654">
+ <path
+ inkscape:connector-curvature="0"
+ transform="translate(0.08004571,-0.03125)"
+ style="display:inline;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1111.4062,-285.9375 -3.9374,1.875 c -0.041,0.0102 -0.1,0.0205 -0.125,0.0312 -0.4188,0.21285 -0.1647,0.10058 -0.6563,0.3125 -0.4861,0.20956 -1.7376,0.58419 -4.0937,1.46875 -3.3312,1.25058 -5.8043,2.14984 -7,3.0625 -1.5362,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74767 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41973 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25167 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74215 -8.8948,1.93107 -10.1562,2.6875 -1.584,-0.18078 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44696 -4.9162,0.67276 -6.8438,0.90625 -0.6554,0.0794 -1.041,0.20078 -1.3437,0.28125 -0.4262,0.13166 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15937 -1.7622,-0.15683 -5.5313,0.28125 -3.5539,0.41309 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.29729 -3.8577,-0.53419 -5.8437,-0.34375 -3.0588,0.29332 -4.972,0.48399 -6.9063,0.65625 -1.9342,0.17227 -1.6886,0.42237 -2.9062,0.53125 -1.3162,0.1177 -1.7598,-0.16363 -5.5312,0.25 -3.5421,0.38845 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.29469 -3.8872,-0.50701 -5.875,-0.3125 -3.05829,0.29925 -4.9412,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04005,0.17856 -1.34375,0.25 -0.4277,0.11896 -0.6835,0.21807 -1.375,0.28125 -1.316,0.12026 -1.75975,-0.19488 -5.53125,0.21875 -3.55619,0.39002 -9.0056,1.23916 -10.25,1.90625 -1.59869,-0.29418 -3.85985,-0.52372 -5.84375,-0.3125 -3.0557,0.32533 -4.97405,0.52624 -6.90625,0.71875 -1.93219,0.19251 -1.68975,0.44088 -2.90625,0.5625 -1.31488,0.13147 -1.76305,-0.16454 -5.53125,0.28125 -3.53889,0.41866 -8.9777,1.29217 -10.25,1.96875 -1.59759,-0.28104 -3.85995,-0.42043 -5.84375,-0.1875 -3.05198,0.35837 -4.945,0.56786 -6.875,0.78125 -0.65618,0.0726 -1.04065,0.17269 -1.34375,0.25 -0.42679,0.12723 -0.6849,0.2672 -1.375,0.34375 -1.31339,0.14569 -1.76735,-0.17402 -5.53125,0.3125 -3.54888,0.45876 -8.97865,1.41902 -10.21875,2.125 -1.59309,-0.24424 -3.8338,-0.38135 -5.8125,-0.125 -3.04759,0.39482 -4.9507,0.64845 -6.875,0.90625 -1.92429,0.25779 -1.7261,0.49353 -2.9375,0.65625 -1.30949,0.1759 -1.7472,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.9232,1.69917 -10.1875,2.4375 -1.58749,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02619,0.53612 -4.8989,0.86169 -6.8125,1.1875 -0.65059,0.11077 -1.0137,0.27094 -1.3125,0.375 -0.42069,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.2947,0.26159 -1.7271,-0.006 -5.4375,0.8125 -3.49848,0.77195 -8.8459,2.38293 -10.0625,3.21875 -1.5629,-0.0774 -3.7575,0.0853 -5.6875,0.59375 -2.97238,0.78313 -4.8177,1.23209 -6.6875,1.75 -1.87,0.5179 -1.66665,0.76728 -2.84375,1.09375 -1.27249,0.3529 -1.69705,0.10709 -5.34375,1.1875 -3.42468,1.01463 -8.6494,2.93317 -9.875,3.84375 -1.53878,0.0127 -3.7198,0.27222 -5.625,0.875 -2.93098,0.92734 -4.75035,1.45842 -6.59375,2.0625 -0.62679,0.20538 -0.99165,0.39258 -1.28125,0.53125 -0.40758,0.21361 -0.6533,0.40875 -1.3125,0.625 -1.2545,0.41154 -1.68615,0.18904 -5.28125,1.4375 -3.38989,1.17717 -8.59495,3.2137 -9.78125,4.15625 -1.52388,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69905,1.67548 -6.53125,2.3125 -1.8322,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24679,0.43396 -1.66355,0.19972 -5.21875,1.5625 -3.3387,1.2798 -8.4871,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.6357,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6239,1.78156 -6.4375,2.46875 -0.6167,0.23363 -0.99645,0.44203 -1.28125,0.59375 10e-6,0 0,0.0295 0,0.0312 l -8,3.1875 -12.4759,3.49189 7.92966,19.27772 c -0.59163,1.97357 12.54624,-4.73836 12.54624,-4.73836 0.22641,-0.14468 0.44895,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.1716,-0.21577 6,-1.6875 3.82852,-1.47174 5.22405,-2.00498 5.90625,-2.40625 0.67961,-0.39978 1.61175,-0.87937 2.21875,-1.53125 1.82692,-0.13775 3.5708,-0.49323 4.9375,-1 2.968,-1.10052 4.87535,-1.80619 6.78125,-2.46875 1.90581,-0.66254 2.35415,-1.41487 3.40625,-1.78125 1.09162,-0.38011 2.1951,-0.16538 6.0625,-1.53125 3.8674,-1.36586 5.28315,-1.82708 5.96875,-2.21875 0.70111,-0.40052 1.7008,-0.93298 2.3125,-1.59375 1.97081,-0.0547 3.81695,-0.38463 5.28125,-0.875 3.00152,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.53861,-0.5041 2.17415,-1.04677 2.90625,-1.4375 0.23022,-0.13431 0.4759,-0.25373 0.75,-0.34375 1.09832,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91231,-1.23113 5.366,-1.67295 6.0625,-2.03125 0.69391,-0.35697 1.6301,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63585,-0.26683 5.03125,-0.6875 3.0304,-0.91354 4.9924,-1.4301 6.9375,-1.96875 1.94512,-0.53864 2.4262,-1.26452 3.5,-1.5625 1.11402,-0.30915 2.22,0.007 6.1875,-1.03125 3.9675,-1.03863 5.4175,-1.43273 6.125,-1.75 0.7348,-0.32959 1.8139,-0.75372 2.4375,-1.375 1.99782,0.116 3.85745,-0.0201 5.34375,-0.375 3.07811,-0.735 5.0834,-1.10094 7.0625,-1.5 1.58791,-0.32018 2.2443,-0.79055 3,-1.09375 0.23751,-0.1068 0.4669,-0.19276 0.75,-0.25 1.13341,-0.22919 2.30465,0.20893 6.34375,-0.5 4.03942,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71581,-0.25944 1.70435,-0.56724 2.34375,-1.09375 1.9242,0.23949 3.7479,0.22453 5.1875,0 3.12642,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48875,-0.94514 3.59375,-1.09375 1.14642,-0.15418 2.27585,0.30157 6.34375,-0.21875 4.06781,-0.52032 5.56025,-0.69573 6.28125,-0.9375 0.73712,-0.24714 1.7981,-0.58623 2.4375,-1.125 2.05,0.33553 3.9737,0.39796 5.5,0.21875 3.1422,-0.36896 5.18,-0.55936 7.1875,-0.78125 1.61082,-0.17802 2.26465,-0.6082 3.03125,-0.84375 0.24091,-0.0855 0.49405,-0.1556 0.78125,-0.1875 1.1497,-0.12772 2.3013,0.34665 6.375,-0.125 4.0737,-0.47165 5.55905,-0.6106 6.28125,-0.84375 0.71941,-0.23227 1.70025,-0.47346 2.34375,-0.96875 1.9363,0.33346 3.77005,0.40424 5.21875,0.25 3.14601,-0.33495 5.1775,-0.51859 7.1875,-0.71875 2.00991,-0.20014 2.48415,-0.82639 3.59375,-0.9375 1.1511,-0.11528 2.2965,0.36506 6.375,-0.0625 4.0785,-0.42756 5.5889,-0.56209 6.3125,-0.78125 0.73922,-0.22386 1.7956,-0.51325 2.4375,-1.03125 2.057,0.39867 4.00185,0.4934 5.53125,0.34375 3.14871,-0.3081 5.1758,-0.47325 7.1875,-0.65625 1.61401,-0.14682 2.26305,-0.56055 3.03125,-0.78125 0.2413,-0.0809 0.49355,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.2929,0.39275 6.375,0 4.08211,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6996,-0.4477 2.3437,-0.9375 1.9381,0.34999 3.7689,0.45438 5.2188,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1465,-0.32852 5.177,-0.5227 7.1874,-0.71875 1.613,-0.15729 2.2657,-0.63148 3.0313,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7166,-0.25316 1.6746,-0.55807 2.3124,-1.09375 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99127 5.4295,-1.4193 6.125,-1.78125 0.7222,-0.376 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.1446 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70652 2.3191,-1.70203 2.5312,-2 0.2123,-0.29795 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3404,-0.094 0.5,-0.4375 0.859,-1.84707 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68214 0.168,-1.35277 0.2187,-1.75 0.029,-0.2295 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19831 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41763 -0.9716,-4.61463 -1.625,-5.46875 -0.6589,-0.86172 -1.2248,-1.01051 -1.75,-1 z"
+ id="path8089-9"
+ sodipodi:nodetypes="ccssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssscccccssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssssssssc" />
+ <g
+ clip-path="url(#clipPath7421-7)"
+ id="g8091-4">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8093-3"
+ d="m 1107.409,-284.04961 c -0.4187,0.21283 -0.1556,0.0939 -0.6472,0.30581 -0.4861,0.20954 -1.7234,0.57439 -4.0796,1.45895 -3.3311,1.25057 -5.8302,2.15344 -7.0259,3.0661 -1.5361,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74766 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41972 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25166 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74214 -8.8948,1.93107 -10.1562,2.6875 -1.5839,-0.18079 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44695 -4.9162,0.67276 -6.8437,0.90625 -0.6554,0.0794 -1.0411,0.20078 -1.3438,0.28125 -0.4262,0.13165 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15936 -1.7622,-0.15683 -5.5312,0.28125 -3.5539,0.41308 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.2973 -3.8578,-0.53419 -5.8438,-0.34375 -3.0588,0.29331 -4.972,0.48399 -6.9062,0.65625 -1.9343,0.17226 -1.6887,0.42237 -2.9063,0.53125 -1.3162,0.11769 -1.7598,-0.16363 -5.5312,0.25 -3.5419,0.38844 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.2947 -3.88717,-0.50701 -5.875,-0.3125 -3.05824,0.29924 -4.94113,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04004,0.17856 -1.34375,0.25 -0.42765,0.11895 -0.68351,0.21807 -1.375,0.28125 -1.31596,0.12025 -1.75976,-0.19488 -5.53125,0.21875 -3.55614,0.39001 -9.00554,1.23916 -10.25,1.90625 -1.59863,-0.29419 -3.85984,-0.52372 -5.84375,-0.3125 -3.0556,0.32532 -4.97404,0.52624 -6.90625,0.71875 -1.93221,0.1925 -1.68987,0.44088 -2.90625,0.5625 -1.31488,0.13146 -1.76298,-0.16454 -5.53125,0.28125 -3.53887,0.41865 -8.97768,1.29217 -10.25,1.96875 -1.59755,-0.28105 -3.85996,-0.42043 -5.84375,-0.1875 -3.05198,0.35836 -4.94508,0.56786 -6.875,0.78125 -0.6562,0.0725 -1.04066,0.17269 -1.34375,0.25 -0.42677,0.12722 -0.68491,0.2672 -1.375,0.34375 -1.31333,0.14568 -1.76746,-0.17402 -5.53125,0.3125 -3.54889,0.45875 -8.97863,1.41902 -10.21875,2.125 -1.59305,-0.24424 -3.83381,-0.38135 -5.8125,-0.125 -3.04759,0.39481 -4.95071,0.64845 -6.875,0.90625 -1.92428,0.25779 -1.72611,0.49353 -2.9375,0.65625 -1.30946,0.1759 -1.74719,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.92315,1.69917 -10.1875,2.4375 -1.5875,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02617,0.53612 -4.89889,0.86169 -6.8125,1.1875 -0.65061,0.11077 -1.01371,0.27094 -1.3125,0.375 -0.42067,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.29465,0.26159 -1.72712,-0.006 -5.4375,0.8125 -3.49853,0.77195 -8.84595,2.38293 -10.0625,3.21875 -1.56278,-0.0774 -3.75758,0.0853 -5.6875,0.59375 -2.97244,0.78313 -4.81761,1.23209 -6.6875,1.75 -1.86988,0.5179 -1.6666,0.76728 -2.84375,1.09375 -1.27246,0.3529 -1.69703,0.10709 -5.34375,1.1875 -3.4247,1.01463 -8.64944,2.93317 -9.875,3.84375 -1.53883,0.0127 -3.71983,0.27222 -5.625,0.875 -2.93106,0.92734 -4.75031,1.45842 -6.59375,2.0625 -0.62676,0.20538 -0.99173,0.39258 -1.28125,0.53125 -0.40763,0.21361 -0.65334,0.40875 -1.3125,0.625 -1.25446,0.41154 -1.68611,0.18904 -5.28125,1.4375 -3.38985,1.17717 -8.59498,3.2137 -9.78125,4.15625 -1.52389,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69908,1.67548 -6.53125,2.3125 -1.83217,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24678,0.43396 -1.66361,0.19972 -5.21875,1.5625 -3.33867,1.2798 -8.48715,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.63569,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6238,1.78156 -6.4375,2.46875 -0.61666,0.23363 -0.99641,0.44203 -1.28125,0.59375 0,0 0,1.09375 0,1.09375 0.11178,-0.22236 0.38599,-0.81743 0.90625,-1.09375 0.69797,-0.37072 4.81363,-1.99337 6.8125,-2.71875 1.65686,-0.60125 4.15389,-1.32868 5.96875,-1.3125 0.30162,0.003 0.58762,0.0509 0.84375,0.0937 1.84249,0.30825 7.46875,1.5625 7.46875,1.5625 -10e-6,0 -6.23349,-1.64675 -7.03125,-1.84375 -0.19079,-0.0471 -0.53572,-0.0687 -0.96875,-0.0625 1.14546,-0.86971 4.761,-2.39351 7.34375,-3.4375 2.83822,-1.14727 3.11681,-1.25182 5.0625,-1.65625 2.0083,-0.41744 3.15625,-0.5 3.15625,-0.5 0,10e-6 -0.0824,-0.60114 0.96875,-1.125 0.7051,-0.35141 4.88702,-1.8924 6.90625,-2.5625 1.9519,-0.64773 5.0574,-1.3585 6.875,-1 1.86323,0.3675 7.53125,1.8125 7.53125,1.8125 10e-6,0 -6.287,-1.87111 -7.09375,-2.09375 -0.19292,-0.0532 -0.53084,-0.086 -0.96875,-0.0937 1.15834,-0.83288 4.79444,-2.19532 7.40625,-3.15625 2.87016,-1.05601 3.16734,-1.1618 5.125,-1.53125 1.85349,-0.34979 2.85884,-0.42548 3.03125,-0.4375 0.1136,-0.21724 0.37745,-0.81002 0.90625,-1.0625 0.70944,-0.33874 4.92607,-1.71275 6.96875,-2.3125 1.69317,-0.49711 4.24077,-1.03677 6.09375,-0.90625 0.30795,0.0217 0.61349,0.0973 0.875,0.15625 1.88118,0.42432 7.59375,2.03125 7.59375,2.03125 10e-6,0 -6.34174,-2.06525 -7.15625,-2.3125 -0.19479,-0.0591 -0.55788,-0.10394 -1,-0.125 1.16949,-0.79755 4.86302,-2.05622 7.5,-2.9375 2.89781,-0.96847 3.23301,-1.00332 5.21875,-1.28125 2.04965,-0.28689 3.1875,-0.3125 3.1875,-0.3125 -2e-5,0 -0.0728,-0.60697 1,-1.0625 0.7196,-0.30557 4.99098,-1.50075 7.0625,-2 2.00244,-0.48258 5.19849,-0.92829 7.0625,-0.40625 1.91078,0.53515 7.71875,2.5 7.71875,2.5 0,0 -6.42266,-2.42351 -7.25,-2.71875 -0.19784,-0.0706 -0.58216,-0.14039 -1.03125,-0.1875 1.1879,-0.72865 4.91527,-1.77408 7.59375,-2.5 2.94342,-0.79775 3.29208,-0.77083 5.3125,-0.90625 1.91289,-0.12823 2.94705,-0.0711 3.125,-0.0625 0.11728,-0.20366 0.39176,-0.77948 0.9375,-0.96875 0.73219,-0.25394 5.07852,-1.04789 7.1875,-1.375 1.74813,-0.27111 4.40088,-0.4847 6.3125,-0.0937 0.31766,0.065 0.60522,0.18551 0.875,0.28125 1.94074,0.68873 7.84375,3.09375 7.84375,3.09375 10e-6,0 -6.53471,-2.95077 -7.375,-3.3125 -0.20097,-0.0865 -0.57513,-0.16679 -1.03125,-0.25 1.2065,-0.63318 5.02956,-1.3956 7.75,-1.90625 2.98953,-0.56119 3.30023,-0.52954 5.34375,-0.53125 2.10926,-0.002 3.3125,0.125 3.3125,0.125 0,1e-5 -0.0727,-0.63119 1.03125,-0.9375 0.74052,-0.20547 5.12612,-0.83387 7.25,-1.0625 2.05302,-0.22099 5.31863,-0.25222 7.21875,0.46875 1.94779,0.73907 7.84375,3.375 7.84375,3.375 2e-5,0 -6.56288,-3.17897 -7.40625,-3.5625 -0.20168,-0.0917 -0.54221,-0.18621 -1,-0.28125 1.21092,-0.60188 4.98442,-1.24884 7.71875,-1.65625 3.0048,-0.44772 3.32551,-0.4517 5.375,-0.40625 1.94045,0.043 3.00699,0.19423 3.1875,0.21875 0.11892,-0.19316 0.3839,-0.76583 0.9375,-0.90625 0.74271,-0.18838 5.15429,-0.73428 7.28125,-0.9375 1.76303,-0.16842 4.42009,-0.23429 6.34375,0.25 0.31968,0.0805 0.60351,0.20359 0.875,0.3125 1.95293,0.78349 7.90625,3.46875 7.90625,3.46875 -2e-5,0 -6.59191,-3.25348 -7.4375,-3.65625 -0.20222,-0.0963 -0.57226,-0.20703 -1.03125,-0.3125 1.21414,-0.57427 5.04366,-1.12219 7.78125,-1.5 3.00838,-0.4152 3.32307,-0.44263 5.375,-0.375 2.11798,0.0698 3.3125,0.25 3.3125,0.25 -2e-5,0 -0.0773,-0.63741 1.03125,-0.90625 0.74362,-0.18035 5.15176,-0.66355 7.28125,-0.84375 2.05847,-0.17417 5.34324,-0.12432 7.25,0.65625 1.95459,0.80016 7.875,3.53125 7.875,3.53125 -2e-5,0 -6.55993,-3.30876 -7.40625,-3.71875 -0.20237,-0.0981 -0.57186,-0.2031 -1.03125,-0.3125 1.21517,-0.5639 5.01008,-1.1143 7.75,-1.46875 3.01091,-0.38952 3.32131,-0.39765 5.375,-0.3125 1.94439,0.0806 3.00663,0.25324 3.1875,0.28125 0.11916,-0.19086 0.38277,-0.74531 0.9375,-0.875 0.74426,-0.174 5.14993,-0.65047 7.28125,-0.8125 1.76662,-0.13427 4.4497,-0.12571 6.375,0.375 0.32,0.0832 0.6033,0.20127 0.875,0.3125 1.9546,0.80016 7.9063,3.5625 7.9063,3.5625 -10e-5,0 -6.5912,-3.34001 -7.4375,-3.75 -0.2024,-0.0981 -0.5719,-0.20311 -1.0313,-0.3125 1.2151,-0.5639 5.0413,-1.08306 7.7813,-1.4375 3.0109,-0.38953 3.3525,-0.4289 5.4062,-0.34375 2.1197,0.0879 3.3125,0.3125 3.3125,0.3125 0,0 -0.078,-0.64902 1.0313,-0.90625 0.7443,-0.17256 5.1495,-0.62336 7.2812,-0.78125 2.0606,-0.1526 5.3429,-0.0968 7.25,0.6875 1.955,0.80395 7.875,3.5 7.875,3.5 0,0 -6.5598,-3.27587 -7.4062,-3.6875 -0.2025,-0.0984 -0.5718,-0.20222 -1.0313,-0.3125 1.2154,-0.56154 5.0119,-1.12778 7.75,-1.5 3.009,-0.40905 3.3227,-0.41558 5.375,-0.34375 1.9431,0.068 3.0072,0.16485 3.1875,0.1875 0.1188,-0.1944 0.3846,-0.72881 0.9375,-0.875 0.7418,-0.19612 5.1311,-0.82878 7.25,-1.09375 1.7564,-0.21961 4.4053,-0.33231 6.3125,0.0312 0.3169,0.0604 0.6058,0.18938 0.875,0.28125 1.9362,0.66092 7.8438,2.9375 7.8438,2.9375 -1e-4,0 -6.5367,-2.80655 -7.375,-3.15625 -0.2005,-0.0836 -0.5762,-0.17333 -1.0313,-0.25 1.2037,-0.65046 5.0191,-1.37195 7.7188,-2 2.9667,-0.6902 3.2889,-0.75507 5.3125,-0.875 2.0886,-0.1238 3.2812,-0.0312 3.2812,-0.0312 0,1e-5 -0.087,-0.63205 1,-1.03125 0.7292,-0.2678 5.0472,-1.33797 7.125,-1.8125 2.0085,-0.45869 5.1679,-1.0293 7,-0.625 1.8781,0.41446 13.5782,3.01563 13.5782,3.01563 0,0 -12.3275,-3.02266 -13.1407,-3.26563 -0.1945,-0.0581 -0.5586,-0.10626 -1,-0.125 1.1676,-0.80369 3.5142,-1.6873 6.1094,-2.70312 1.6814,-0.65818 0.9237,-0.37659 2.7759,-1.0036 1.7536,-0.59366 2.4854,-1.01071 2.6304,-1.11299 0.3461,-0.20651 -0.356,-0.12188 -0.5442,-0.0424 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7001-5);enable-background:new"
+ sodipodi:nodetypes="czscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssccsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscc" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8095-5"
+ d="m 1082.625,-275.125 c 1.873,0.39348 4.4961,1.14555 6.0313,1.96875 1.5352,0.82319 2.8222,1.056 5.375,2.5 2.5266,1.42926 4.7958,2.00696 6.9687,2.53125 2.3476,0.56642 5.4354,0.71523 8.8438,1.1875 -1.0889,-0.83975 -6.6074,-1.17245 -8.4063,-1.5625 -1.7989,-0.39006 -3.8941,-1.01616 -6.5937,-2.3125 -2.6997,-1.29634 -3.4944,-1.79896 -5.8125,-2.6875 -2.3182,-0.88854 -4.0044,-1.38314 -6.4063,-1.625 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6949-4);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8097-1"
+ d="m 1051.4688,-270 c 1.9053,0.57759 4.5281,1.61572 6.0937,2.59375 1.5656,0.97802 2.8802,1.35981 5.5,3.125 2.593,1.74716 4.9859,2.70927 7.25,3.59375 2.4461,0.95557 5.6826,1.65713 9.4063,3.0625 -1.1896,-1.13784 -7.0631,-2.68675 -8.9375,-3.375 -1.8745,-0.68825 -4.0818,-1.5662 -6.875,-3.28125 -2.7933,-1.71504 -3.5736,-2.2839 -5.9375,-3.40625 -2.3641,-1.12234 -4.0567,-1.83455 -6.5,-2.3125 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6961-8);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8099-7"
+ d="m 1020.2188,-266.84375 c 1.9119,0.63811 4.5812,1.75536 6.1562,2.8125 1.5751,1.05715 2.8956,1.50867 5.5313,3.40625 2.6086,1.87821 5.0284,3.03003 7.3125,4.0625 2.4677,1.11545 5.7645,2.1733 9.5312,3.84375 -1.2033,-1.22253 -7.2028,-3.31423 -9.0937,-4.125 -1.891,-0.81077 -4.0649,-1.89379 -6.875,-3.75 -2.8102,-1.8562 -3.6218,-2.47693 -6,-3.71875 -2.3783,-1.2418 -4.1107,-1.97569 -6.5625,-2.53125 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6957-9);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8101-4"
+ d="m 1110.1719,-266.89063 c 0.1508,0.0486 0.688,0.631 0.1094,1.48438 -0.8101,1.19459 -5.7049,3.32429 -8.5625,4.125 -2.8449,0.79712 -6.2901,0.97774 -10.5625,-0.375 -4.3016,-1.36195 -5.4697,-2.46872 -10.6563,-4.3125 4.664,2.11517 6.1953,3.95233 10.125,5.34375 1.6207,0.57387 3.3671,0.9396 5.0625,1.03125 -0.4451,0.32563 -1.5303,0.9833 -3.5625,1.59375 -2.7955,0.83969 -6.6491,1.53378 -8.25,1.625 -1.5146,0.0863 -3.142,-0.51249 -3.4375,-0.625 0.1667,0.10308 0.3732,0.37734 -0.25,1.03125 -0.8993,0.94363 -6.1474,1.923 -9.125,2.25 -2.9643,0.32555 -6.5216,-0.016 -10.9062,-1.90625 -3.978,-1.71497 -5.339,-2.91536 -9.4063,-4.75 0,0 0,0.15625 0,0.15625 3.6431,2.09529 5.284,3.88327 8.875,5.5625 1.7302,0.80909 3.5917,1.40876 5.4063,1.71875 -0.5349,0.28676 -1.5578,0.71151 -3.4375,1.03125 -2.869,0.48796 -6.809,0.81614 -8.4375,0.75 -0.8507,-0.0345 -1.7286,-0.18437 -2.4063,-0.40625 -0.6848,-0.21488 -1.1897,-0.44467 -1.3125,-0.5 0.1694,0.10721 0.4311,0.40288 -0.2187,1.03125 -0.9097,0.87962 -6.2461,1.33638 -9.25,1.46875 -2.9905,0.13179 -6.5889,-0.45063 -11,-2.5625 -4.4412,-2.12626 -5.6415,-3.4016 -10.9063,-5.78125 4.7343,2.59704 6.2865,4.6291 10.3438,6.71875 1.6733,0.86185 3.4852,1.49425 5.25,1.9375 -0.4633,0.23332 -1.5894,0.68814 -3.6875,0.9375 -2.8863,0.34298 -6.8346,0.49288 -8.4688,0.375 -1.5462,-0.1115 -3.2312,-0.85696 -3.5312,-1 0.1691,0.12029 0.4138,0.41048 -0.2188,1 -0.9128,0.85073 -6.2441,1.26212 -9.25,1.375 -2.9925,0.11237 -6.5897,-0.49043 -11,-2.59375 -4.00125,-1.90823 -5.38803,-3.13783 -9.46875,-5.09375 -3e-5,0 0,0.15625 0,0.15625 3.65506,2.20392 5.29421,4.05255 8.90625,5.90625 1.74029,0.89315 3.637,1.52827 5.4688,1.96875 -0.54,0.2483 -1.5781,0.61533 -3.4688,0.84375 -2.88568,0.34858 -6.86605,0.52095 -8.5,0.40625 -0.85345,-0.0599 -1.72631,-0.25791 -2.40625,-0.5 -0.6871,-0.2353 -1.18935,-0.47226 -1.3125,-0.53125 0.16998,0.11227 0.46448,0.42225 -0.1875,1.03125 -0.91265,0.8525 -6.27533,1.29337 -9.28125,1.40625 -2.99246,0.11237 -6.59346,-0.52805 -11,-2.59375 -4.43653,-2.07978 -5.64688,-3.33171 -10.90625,-5.65625 4.72938,2.54749 6.29074,4.5778 10.34375,6.625 1.67155,0.84433 3.48554,1.46643 5.25,1.90625 -0.46323,0.23422 -1.5897,0.68407 -3.6875,0.9375 -2.88569,0.34858 -6.8362,0.56952 -8.46875,0.46875 -1.54456,-0.0953 -3.20031,-0.82885 -3.5,-0.96875 0.16899,0.11853 0.38192,0.40385 -0.25,1 -0.91186,0.86028 -6.24665,1.33025 -9.25,1.46875 -2.98995,0.1379 -6.56745,-0.45068 -10.96875,-2.46875 -3.99308,-1.83089 -5.36511,-3.0292 -9.4375,-4.90625 -2e-5,0 0,0.15625 0,0.15625 3.64761,2.13327 5.27033,3.93487 8.875,5.71875 1.73675,0.85951 3.60727,1.45014 5.4375,1.875 -0.53947,0.2529 -1.55063,0.64129 -3.4375,0.90625 -2.87978,0.40436 -6.83813,0.64562 -8.46875,0.5625 -0.85172,-0.0434 -1.7277,-0.20855 -2.40625,-0.4375 -0.68569,-0.22201 -1.1896,-0.44339 -1.3125,-0.5 0.16959,0.10899 0.4319,0.40965 -0.21875,1.03125 -0.91079,0.87014 -6.25021,1.39152 -9.25,1.5625 -2.98633,0.17021 -6.57381,-0.31577 -10.96875,-2.28125 -4.42489,-1.97888 -5.60596,-3.22819 -10.84375,-5.375 4.70997,2.38767 6.27017,4.38873 10.3125,6.34375 1.66715,0.80631 3.46043,1.39658 5.21875,1.78125 -0.46163,0.2487 -1.597,0.71225 -3.6875,1.03125 -2.8756,0.43876 -6.7804,0.7331 -8.40625,0.6875 -1.53823,-0.0431 -3.2328,-0.74522 -3.53125,-0.875 0.16833,0.11282 0.41057,0.41375 -0.21875,1.03125 -0.90812,0.8911 -6.20295,1.52825 -9.1875,1.8125 -2.97118,0.28298 -6.57342,-0.1758 -10.9375,-1.9375 -3.95934,-1.59831 -5.32915,-2.79487 -9.34375,-4.3125 3e-5,0 0,0.15625 0,0.15625 3.5959,1.81135 5.23831,3.58233 8.8125,5.15625 1.72207,0.75835 3.58748,1.28895 5.40625,1.625 -0.53609,0.27908 -1.56658,0.68763 -3.4375,1.0625 -2.85539,0.5721 -6.78942,1.01939 -8.40625,1.03125 -0.84451,0.006 -1.70608,-0.0809 -2.375,-0.25 -0.67591,-0.16151 -1.16009,-0.32923 -1.28125,-0.375 0.16722,0.094 0.42267,0.38348 -0.21875,1.0625 -0.89787,0.95052 -6.18648,1.91708 -9.125,2.4375 -2.92534,0.51809 -6.43215,0.37424 -10.71875,-1.03125 -4.3158,-1.41507 -5.47277,-2.52994 -10.5625,-3.96875 4.57685,1.75101 6.08855,3.56006 10.03125,5 1.62608,0.59389 3.36885,0.95565 5.09375,1.15625 -0.45285,0.29702 -1.55478,0.88339 -3.59375,1.46875 -2.80472,0.80517 -6.63886,1.57583 -8.21875,1.75 -1.49475,0.1648 -3.11623,-0.31681 -3.40625,-0.40625 0.16356,0.0901 0.39278,0.35993 -0.21875,1.0625 -0.88247,1.01385 -6.04452,2.37165 -8.9375,3.0625 -2.88002,0.68778 -6.3356,0.76002 -10.5625,-0.4375 -3.83485,-1.08645 -5.17258,-2.07237 -9.0625,-3.125 -10e-6,0 0,0.15625 0,0.15625 3.48418,1.39485 5.06941,2.9194 8.53125,4.03125 1.66793,0.53572 3.45578,0.78674 5.21875,0.875 -0.51964,0.35212 -1.50039,0.91452 -3.3125,1.53125 -2.76566,0.94125 -6.59024,1.93537 -8.15625,2.15625 -0.81794,0.11539 -1.6331,0.12283 -2.28125,0.0312 -0.65496,-0.0832 -1.1326,-0.21827 -1.25,-0.25 0.16204,0.0746 0.43399,0.34044 -0.1875,1.09375 -0.87,1.05453 -6.00963,2.65925 -8.875,3.4375 -2.85253,0.77476 -6.25912,0.9582 -10.4375,-0.0937 -4.20683,-1.05913 -5.35669,-2.04166 -10.34375,-3.15625 4.48454,1.45946 5.96935,3.13523 9.8125,4.25 1.58504,0.45977 3.28679,0.63825 4.96875,0.6875 -0.44157,0.33676 -1.51251,1.02773 -3.5,1.78125 -2.73393,1.03649 -6.45198,2.16269 -8,2.4375 -1.46462,0.26002 -3.05958,-0.11654 -3.34375,-0.1875 0.16025,0.0796 0.38044,0.32098 -0.21875,1.0625 -0.86466,1.07006 -5.91652,2.81815 -8.75,3.6875 -2.8208,0.86547 -6.2075,1.15631 -10.34375,0.21875 -3.75259,-0.85061 -5.04785,-1.71647 -8.875,-2.59375 0,0 0,0.15625 0,0.15625 3.42796,1.23779 4.98741,2.6323 8.375,3.53125 1.63216,0.43314 3.36704,0.58301 5.09375,0.5625 -0.50893,0.38417 -1.47675,1.02182 -3.25,1.75 -2.70634,1.11134 -6.43633,2.30781 -7.96875,2.625 -0.8004,0.16569 -1.61231,0.21862 -2.25,0.15625 0,0 0,0.51552 0,0.92229 0,0.26507 0,0.48396 0,0.48396 0.22645,-0.14468 0.44891,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.17161,-0.21577 6,-1.6875 3.82843,-1.47174 5.22412,-2.00498 5.90625,-2.40625 0.6796,-0.39978 1.61165,-0.87937 2.21875,-1.53125 1.82685,-0.13775 3.57075,-0.49323 4.9375,-1 2.96812,-1.10052 4.87537,-1.80619 6.78125,-2.46875 1.90586,-0.66254 2.35409,-1.41487 3.40625,-1.78125 1.09155,-0.38011 2.19511,-0.16538 6.0625,-1.53125 3.86745,-1.36586 5.28316,-1.82708 5.96875,-2.21875 0.70109,-0.40052 1.70081,-0.93298 2.3125,-1.59375 1.9708,-0.0547 3.81685,-0.38463 5.28125,-0.875 3.00148,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.5386,-0.5041 2.17402,-1.04677 2.90625,-1.4375 0.23016,-0.13431 0.47574,-0.25373 0.75,-0.34375 1.09823,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91233,-1.23113 5.36605,-1.67295 6.0625,-2.03125 0.69388,-0.35697 1.63015,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63581,-0.26683 5.03125,-0.6875 3.03043,-0.91354 4.99238,-1.4301 6.9375,-1.96875 1.94511,-0.53864 2.42618,-1.26452 3.5,-1.5625 1.11401,-0.30915 2.21994,0.007 6.1875,-1.03125 3.96761,-1.03863 5.41758,-1.43273 6.125,-1.75 0.73487,-0.32959 1.81383,-0.75372 2.4375,-1.375 1.99774,0.116 3.85743,-0.0201 5.34375,-0.375 3.07811,-0.735 5.08344,-1.10094 7.0625,-1.5 1.58792,-0.32018 2.24429,-0.79055 3,-1.09375 0.23757,-0.1068 0.46695,-0.19276 0.75,-0.25 1.13347,-0.22919 2.30448,0.20893 6.34375,-0.5 4.03933,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71586,-0.25944 1.70428,-0.56724 2.34375,-1.09375 1.92427,0.23949 3.74788,0.22453 5.1875,0 3.12633,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48869,-0.94514 3.59375,-1.09375 1.14639,-0.15418 2.27592,0.30157 6.34375,-0.21875 4.06784,-0.52032 5.56013,-0.69573 6.28125,-0.9375 0.7371,-0.24714 1.79809,-0.58623 2.4375,-1.125 2.05007,0.33553 3.97378,0.39796 5.5,0.21875 3.14231,-0.36896 5.17994,-0.55936 7.1875,-0.78125 1.61076,-0.17802 2.26467,-0.6082 3.03125,-0.84375 0.24094,-0.0855 0.49412,-0.1556 0.78125,-0.1875 1.14978,-0.12772 2.30129,0.34665 6.375,-0.125 4.07374,-0.47165 5.55909,-0.6106 6.28125,-0.84375 0.71946,-0.23227 1.70024,-0.47346 2.34375,-0.96875 1.93637,0.33346 3.77006,0.40424 5.21875,0.25 3.14602,-0.33495 5.17756,-0.51859 7.1875,-0.71875 2.00996,-0.20014 2.48414,-0.82639 3.59375,-0.9375 1.15114,-0.11528 2.29643,0.36506 6.375,-0.0625 4.07861,-0.42756 5.58886,-0.56209 6.3125,-0.78125 0.73915,-0.22386 1.79572,-0.51325 2.4375,-1.03125 2.0571,0.39867 4.00187,0.4934 5.53125,0.34375 3.14873,-0.3081 5.17584,-0.47325 7.1875,-0.65625 1.61407,-0.14682 2.2631,-0.56055 3.03125,-0.78125 0.24142,-0.0809 0.49353,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.29296,0.39275 6.375,0 4.08208,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6997,-0.4477 2.3438,-0.9375 1.938,0.34999 3.7688,0.45438 5.2187,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1466,-0.32852 5.1771,-0.5227 7.1875,-0.71875 1.613,-0.15729 2.2656,-0.63148 3.0312,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7167,-0.25316 1.6745,-0.55807 2.3125,-1.09375 1.9197,0.21194 3.7199,0.15141 5.1562,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0938,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5625,-1.28125 1.1288,-0.25066 2.2703,0.11629 6.25,-0.875 3.9796,-0.99128 5.4296,-1.4193 6.125,-1.78125 0.7223,-0.37601 1.7619,-0.87058 2.375,-1.53125 1.963,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3348,-1.68641 2.5469,-1.98438 0.2122,-0.29796 0.1118,-0.7453 0.1379,-0.76675 0.043,-0.0352 0.3193,-0.085 0.479,-0.42844 0.8589,-1.84708 2.321,-5.64459 2.4352,-6.32945 0.1137,-0.68216 0.1638,-1.34774 0.2145,-1.74497 0.029,-0.22952 -0.1467,-0.86544 -0.1246,-0.92404 0.031,-0.0821 0.3045,-0.26528 0.3599,-0.51471 0.2663,-1.19833 0.089,-2.19129 -0.1251,-3.60893 -0.214,-1.41764 -0.9837,-4.62214 -1.6369,-5.47626 -0.6589,-0.86172 -1.2229,-1.01117 -1.7479,-1.00066 -0.2086,0.26976 0.1368,0.26309 0.1626,0.31261 0.6806,0.0508 0.934,0.36864 1.4192,0.89662 0.4852,0.52798 1.4428,3.93956 1.5794,5.38995 0.1366,1.45039 0.19,2.8602 -0.088,3.46864 -0.2781,0.60845 -0.9442,0.42864 -1.2366,0.49452 0.531,0.18589 0.8908,0.21322 0.9524,1.05768 0.059,0.81338 -0.1332,1.63969 -0.5198,2.80562 -0.3912,1.18001 -1.8452,4.34998 -2.2857,4.59877 -0.4523,0.25551 -0.9524,0.18199 -1.288,0.0511 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6997-5);enable-background:new"
+ sodipodi:nodetypes="cssscscsscsssccscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssccscsscscssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsszsszssszzcczzzczzzc" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8103-3"
+ d="m 988.75,-263.84375 c 1.91161,0.6344 4.55027,1.75841 6.125,2.8125 1.57477,1.05409 2.8961,1.48252 5.5313,3.375 2.6082,1.87314 5.0269,3.01522 7.3125,4.0625 2.4693,1.13147 5.7521,2.15474 9.5312,3.9375 -1.2072,-1.2584 -7.139,-3.36445 -9.0312,-4.1875 -1.8922,-0.82304 -4.128,-1.93049 -6.9375,-3.78125 -2.80961,-1.85075 -3.62224,-2.48154 -6.00005,-3.71875 -2.37782,-1.23719 -4.07988,-1.9492 -6.53125,-2.5 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6953-8);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8105-1"
+ d="m 957.5,-260.78125 c 1.91,0.6181 4.58288,1.70934 6.15625,2.75 1.57339,1.04066 2.89608,1.48252 5.53125,3.375 2.60823,1.87315 5.02692,3.01521 7.3125,4.0625 2.46931,1.13147 5.75213,2.15475 9.53125,3.9375 -1.20728,-1.2584 -7.20154,-3.3957 -9.09375,-4.21875 -1.89217,-0.82304 -4.09666,-1.9305 -6.90625,-3.78125 -2.80958,-1.85075 -3.59295,-2.43932 -5.96875,-3.65625 -2.37578,-1.21691 -4.11321,-1.93885 -6.5625,-2.46875 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6993-3);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8107-4"
+ d="m 926.09375,-257.375 c 1.90772,0.59745 4.55348,1.66384 6.125,2.6875 1.5715,1.02365 2.87022,1.43971 5.5,3.28125 2.60291,1.82273 5.02887,2.9722 7.3125,4 2.4672,1.11041 5.75535,2.09323 9.53125,3.84375 -1.20623,-1.2481 -7.1719,-3.31809 -9.0625,-4.125 -1.89058,-0.8069 -4.10242,-1.89104 -6.90625,-3.6875 -2.80385,-1.79644 -3.62704,-2.40251 -6,-3.59375 -2.37297,-1.19124 -4.05362,-1.90283 -6.5,-2.40625 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6989-8);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8109-6"
+ d="m 894.90625,-253.5625 c 1.90213,0.55355 4.58701,1.58887 6.15625,2.59375 1.56923,1.00487 2.87401,1.40864 5.5,3.21875 2.59912,1.79164 5.00034,2.87189 7.28125,3.875 2.46428,1.08374 5.75984,2.04029 9.53125,3.75 -1.2048,-1.23507 -7.17416,-3.24478 -9.0625,-4.03125 -1.88832,-0.78647 -4.0752,-1.8308 -6.875,-3.59375 -2.79977,-1.76294 -3.59919,-2.36836 -5.96875,-3.53125 -2.36957,-1.16288 -4.12325,-1.83412 -6.5625,-2.28125 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6985-6);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8111-9"
+ d="m 863.71875,-248.65625 c 1.88062,0.42909 4.50427,1.38038 6.0625,2.3125 1.55823,0.93211 2.85233,1.25776 5.46875,3 2.58971,1.72444 4.98067,2.70802 7.25,3.625 2.45176,0.99069 5.73959,1.87707 9.5,3.5 -1.20131,-1.20734 -7.15249,-3.06609 -9.03125,-3.78125 -1.87875,-0.71517 -4.0854,-1.68442 -6.875,-3.375 -2.78963,-1.69057 -3.58461,-2.22822 -5.9375,-3.28125 -2.35292,-1.05301 -4.02584,-1.71248 -6.4375,-2 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6965-3);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8113-4"
+ d="m 833.15625,-241.375 c 1.84836,0.29644 4.46945,0.97632 6,1.78125 1.53058,0.80493 2.81374,1.05573 5.375,2.53125 2.53504,1.46046 4.89068,2.32509 7.125,3.0625 2.41399,0.79668 5.65711,1.46689 9.375,2.84375 -1.18771,-1.12873 -7.08772,-2.58975 -8.9375,-3.15625 -1.84977,-0.5665 -4.00342,-1.37392 -6.75,-2.84375 -2.74657,-1.46983 -3.50136,-1.92028 -5.8125,-2.78125 -2.31115,-0.86095 -4.00471,-1.32009 -6.375,-1.4375 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6981-3);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8115-2"
+ d="m 802.90625,-232.3125 c 1.8222,0.21127 4.36576,0.80057 5.875,1.53125 1.50925,0.73066 2.75568,0.92998 5.28125,2.28125 2.49976,1.33746 4.83154,2.04843 7.03125,2.65625 2.37653,0.65667 5.56464,1.07288 9.21875,2.1875 -1.16735,-1.04496 -6.92888,-2.10329 -8.75,-2.5625 -1.82111,-0.45921 -3.95225,-1.12696 -6.65625,-2.4375 -2.70403,-1.31052 -3.47106,-1.7199 -5.75,-2.46875 -2.27895,-0.74883 -3.91325,-1.17931 -6.25,-1.1875 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6977-6);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8117-2"
+ d="m 773.1875,-222.1875 c 1.81109,0.1787 4.32059,0.66506 5.8125,1.34375 1.49194,0.67869 2.7534,0.79822 5.25,2.0625 2.47107,1.25138 4.79005,1.89614 6.96875,2.4375 2.35387,0.58488 5.49134,0.89752 9.09375,1.84375 -1.15084,-0.99116 -6.85251,-1.7833 -8.65625,-2.1875 -1.80372,-0.4042 -3.91553,-1.02116 -6.59375,-2.25 -2.67818,-1.22884 -3.40345,-1.61089 -5.65625,-2.28125 -2.25279,-0.67034 -3.89627,-1.00232 -6.21875,-0.96875 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6973-4);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8119-6"
+ d="m 743.5625,-211.1875 c 1.79281,0.12911 4.27313,0.54965 5.75,1.1875 1.4769,0.63785 2.7161,0.74156 5.1875,1.9375 2.44618,1.18372 4.72054,1.74666 6.875,2.21875 2.32767,0.51003 5.4196,0.68064 9,1.5625 -1.14379,-0.9706 -6.74759,-1.59065 -8.53125,-1.9375 -1.78367,-0.34684 -3.88285,-0.88756 -6.53125,-2.03125 -2.64841,-1.14368 -3.39495,-1.51631 -5.625,-2.125 -2.23008,-0.60868 -3.82594,-0.90966 -6.125,-0.8125 z"
+ style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6969-8);enable-background:new" />
+ <g
+ id="g8121-4"
+ style="fill:#ffffff;fill-opacity:1;filter:url(#filter7345-9)">
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8123-1"
+ d="m 744.9375,-212.11731 c 0,0 7.22229,-3.22318 9.0625,-3.5 1.84021,-0.27682 3.35225,-0.003 6,0.5625 2.64775,0.56573 8.7357,2.21518 11.1875,3.375 2.4518,1.15982 5.3125,3.5625 5.3125,3.5625 0,0 -7.14644,-2.78019 -10.1875,-3.5625 -3.04106,-0.78231 -7.64461,-2.08374 -10.375,-2.3125 -2.73039,-0.22876 -11,1.875 -11,1.875 z"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8125-2"
+ d="m 735.46875,-206.95416 c 0,0 3.65979,-2.22318 5.5,-2.5 1.84021,-0.27682 3.66475,0.24677 6.3125,0.8125 2.64775,0.56573 8.7357,2.21518 11.1875,3.375 2.4518,1.15982 6.5625,2.125 6.5625,2.125 0,0 -8.39644,-1.34269 -11.4375,-2.125 -3.04106,-0.78231 -7.95711,-2.33374 -10.6875,-2.5625 -2.73039,-0.22876 -7.4375,0.875 -7.4375,0.875 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8127-8"
+ d="m 759.85042,-217.61116 c 0,0 8.5437,-3.29857 10.39778,-3.45786 1.85409,-0.1593 3.64166,0.4792 6.2481,1.21208 2.60644,0.73288 8.57724,2.76594 10.95036,4.07925 2.37312,1.31331 6.41417,2.53782 6.41417,2.53782 0,0 -8.29413,-1.87365 -11.27931,-2.84767 -2.98519,-0.97402 -7.79269,-2.83478 -10.50302,-3.23662 -2.71033,-0.40184 -12.22808,1.713 -12.22808,1.713 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8129-8"
+ d="m 775.19813,-223.2266 c 0,0 7.77133,-2.78244 9.62831,-2.90349 1.85697,-0.12104 3.631,0.55417 6.22178,1.34062 2.59077,0.78645 8.5184,2.94217 10.86394,4.30412 2.34555,1.36195 6.36049,2.6695 6.36049,2.6695 0,0 -8.25373,-2.04423 -11.21821,-3.07958 -2.96447,-1.03535 -7.73259,-2.99481 -10.43406,-3.45243 -2.70147,-0.45763 -11.42225,1.12126 -11.42225,1.12126 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-4.3190906"
+ inkscape:transform-center-x="13.852145"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8131-9"
+ d="m 789.64298,-227.95417 c 0,0 8.68256,-3.52031 10.54154,-3.60535 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -12.3006,1.78871 -12.3006,1.78871 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-4.3190906"
+ inkscape:transform-center-x="13.852145"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8133-2"
+ d="m 804.49513,-233.32948 c 0,0 7.80756,-2.58281 9.66654,-2.66785 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -11.4256,0.85121 -11.4256,0.85121 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-4.3190906"
+ inkscape:transform-center-x="13.852145"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8135-8"
+ d="m 819.55763,-237.57948 c 0,0 8.55756,-2.58281 10.41654,-2.66785 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -12.1756,0.85121 -12.1756,0.85121 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-4.9269042"
+ inkscape:transform-center-x="13.64141"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8137-8"
+ d="m 836.23395,-242.60125 c 0,0 6.96702,-1.98723 8.82784,-1.96757 1.86081,0.0197 3.57873,0.82702 6.10265,1.80705 2.52393,0.98 8.27166,3.57758 10.50756,5.11291 2.2359,1.53535 6.14053,3.14261 6.14053,3.14261 0,0 -8.07561,-2.66222 -10.95336,-3.91866 -2.87774,-1.25645 -7.48412,-3.5707 -10.14328,-4.23121 -2.65915,-0.66049 -10.48194,0.0549 -10.48194,0.0549 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.1542119"
+ inkscape:transform-center-x="13.55068"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8139-8"
+ d="m 850.73028,-246.00461 c 0,0 7.68784,-2.02768 9.54782,-1.96854 1.85997,0.0592 3.56038,0.90279 6.06293,1.93616 2.50255,1.03334 8.19387,3.75232 10.39668,5.33475 2.20282,1.58245 6.07245,3.2722 6.07245,3.2722 0,0 -8.01729,-2.83298 -10.86772,-4.15022 -2.85043,-1.31723 -7.40666,-3.72872 -10.0512,-4.4455 -2.64454,-0.71678 -11.16096,0.0211 -11.16096,0.0211 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.4740887"
+ inkscape:transform-center-x="13.41151"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8141-6"
+ d="m 864.82496,-249.21081 c 0,0 8.16952,-1.96906 10.02688,-1.85396 1.85735,0.11512 3.53158,1.00956 6.0019,2.11779 2.47031,1.10821 8.0772,3.99727 10.23138,5.64531 2.15418,1.64804 5.9712,3.45352 5.9712,3.45352 0,0 -7.92839,-3.07306 -10.73787,-4.4755 -2.80949,-1.40244 -7.29106,-3.94999 -9.91283,-4.74606 -2.62176,-0.79606 -11.58066,-0.1411 -11.58066,-0.1411 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.79376"
+ inkscape:transform-center-x="13.258805"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8143-8"
+ d="m 881.38485,-251.60282 c 0,0 8.08536,-1.90809 9.93837,-1.73664 1.853,0.17147 3.4993,1.11633 5.93482,2.29908 2.43553,1.18271 7.95209,4.2407 10.05523,5.95339 2.10314,1.7127 5.86357,3.63326 5.86357,3.63326 0,0 -7.8314,-3.3124 -10.597,-4.7995 -2.76561,-1.48712 -7.16775,-4.16959 -9.76414,-5.04491 -2.59637,-0.87531 -11.43085,-0.30468 -11.43085,-0.30468 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8145-3"
+ d="m 896.58415,-254.34724 c 0,0 7.64166,-1.4277 9.49547,-1.26515 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -10.99774,-0.76897 -10.99774,-0.76897 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8147-8"
+ d="m 911.45328,-255.98544 c 0,0 8.64166,-1.5527 10.49547,-1.39015 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.99774,-0.64397 -11.99774,-0.64397 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8149-3"
+ d="m 927.70328,-258.29794 c 0,0 7.64166,-0.8652 9.49547,-0.70265 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -10.99774,-1.33147 -10.99774,-1.33147 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8151-3"
+ d="m 942.82828,-259.48544 c 0,0 8.57916,-1.4902 10.43297,-1.32765 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.93524,-0.70647 -11.93524,-0.70647 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8153-3"
+ d="m 959.07828,-261.54794 c 0,0 7.82916,-0.8027 9.68297,-0.64015 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.18524,-1.39397 -11.18524,-1.39397 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8155-8"
+ d="m 974.45328,-262.79794 c 0,0 8.39166,-1.1777 10.24547,-1.01515 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08376,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.84721,-3.27474 -10.61993,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.74774,-1.01897 -11.74774,-1.01897 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8157-0"
+ d="m 990.64078,-264.86044 c 0,0 6.89166,-0.9902 8.74547,-0.82765 1.85385,0.16256 3.50465,1.0995 5.94575,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.60053,-0.86282 -10.24772,-1.20647 -10.24772,-1.20647 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8159-4"
+ d="m 1007.7658,-265.79794 c 0,0 6.8291,-1.1777 8.683,-1.01515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -10.1852,-1.01897 -10.1852,-1.01897 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8161-7"
+ d="m 1023.8908,-267.79794 c 0,0 6.0791,-0.4277 7.933,-0.26515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -9.4352,-1.76897 -9.4352,-1.76897 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.7433893"
+ inkscape:transform-center-x="13.28378"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8163-6"
+ d="m 1039.7033,-269.17294 c 0,0 6.4541,-0.6777 8.308,-0.51515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -9.8102,-1.51897 -9.8102,-1.51897 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-5.1360724"
+ inkscape:transform-center-x="13.55813"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8165-8"
+ d="m 1055.2718,-271.03319 c 0,0 5.4976,-0.90945 7.3578,-0.85348 1.8601,0.056 3.5619,0.89674 6.0661,1.92586 2.5044,1.0291 8.2003,3.7384 10.4058,5.31709 2.2055,1.57871 6.078,3.2619 6.078,3.2619 0,0 -8.022,-2.81939 -10.8748,-4.13178 -2.8526,-1.31238 -7.4129,-3.71613 -10.0587,-4.42843 -2.6457,-0.71228 -8.9742,-1.09116 -8.9742,-1.09116 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-4.6370147"
+ inkscape:transform-center-x="13.74758"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8167-9"
+ d="m 1072.7007,-273.48537 c 0,0 4.5472,-1.15581 6.408,-1.18621 1.8607,-0.0304 3.5996,0.73049 6.1489,1.64231 2.5494,0.91177 8.3649,3.35386 10.6414,4.8285 2.2763,1.47468 6.2227,2.97636 6.2227,2.97636 0,0 -8.1442,-2.44411 -11.0547,-3.62272 -2.9105,-1.1786 -7.5774,-3.36815 -10.2534,-3.95691 -2.6759,-0.58875 -8.1129,-0.68133 -8.1129,-0.68133 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="-4.4842392"
+ inkscape:transform-center-x="13.79933"
+ sodipodi:nodetypes="czzzczzc"
+ id="path8169-0"
+ d="m 1087.1585,-276.5244 c 0,0 5.96,-1.77355 7.8202,-1.83024 1.86,-0.0567 3.6096,0.67955 6.1715,1.55525 2.562,0.87566 2.5226,0.85713 5.3335,1.49015 2.7969,0.62986 7.0767,1.51313 7.0767,1.51313 0,0 -3.6155,-0.0163 -6.7923,-0.46614 -3.1155,-0.44119 -7.3743,-1.69825 -10.0584,-2.24913 -2.6839,-0.55088 -9.5512,-0.013 -9.5512,-0.013 z"
+ style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czczc"
+ id="path8171-6"
+ d="m 1099.25,-279.92981 c 0.1612,0.26862 11.2081,-4.60046 12.1875,-4.6875 0.9794,-0.087 2,3.125 2,3.125 0,0 -0.7751,-1.50434 -2.875,-1.0625 -2.0999,0.44184 -11.3009,2.67141 -11.3125,2.625 z"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ id="path8173-8"
+ d="m 1107.4532,-284.0938 c -0.4187,0.21283 -0.1556,0.0939 -0.6472,0.30581 -0.4861,0.20954 -1.7234,0.57439 -4.0796,1.45895 -3.3311,1.25057 -5.8302,2.15344 -7.0259,3.0661 -1.5361,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74766 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41972 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25166 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74214 -8.8948,1.93107 -10.1562,2.6875 -1.5839,-0.18079 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44695 -4.9162,0.67276 -6.8437,0.90625 -0.6554,0.0794 -1.0411,0.20078 -1.3438,0.28125 -0.4262,0.13165 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15936 -1.7622,-0.15683 -5.5312,0.28125 -3.5539,0.41308 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.2973 -3.8578,-0.53419 -5.8438,-0.34375 -3.0588,0.29331 -4.972,0.48399 -6.9062,0.65625 -1.9343,0.17226 -1.6887,0.42237 -2.9063,0.53125 -1.3162,0.11769 -1.7598,-0.16363 -5.5312,0.25 -3.5419,0.38844 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.2947 -3.88718,-0.50701 -5.87501,-0.3125 -3.05824,0.29924 -4.94113,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04004,0.17856 -1.34375,0.25 -0.42765,0.11895 -0.68351,0.21807 -1.375,0.28125 -1.31596,0.12025 -1.75976,-0.19488 -5.53125,0.21875 -3.55614,0.39001 -9.00554,1.23916 -10.25,1.90625 -1.59863,-0.29419 -3.85984,-0.52372 -5.84375,-0.3125 -3.0556,0.32532 -4.97404,0.52624 -6.90625,0.71875 -1.93221,0.1925 -1.68987,0.44088 -2.90625,0.5625 -1.31488,0.13146 -1.76298,-0.16454 -5.53125,0.28125 -3.53887,0.41865 -8.97768,1.29217 -10.25,1.96875 -1.59755,-0.28105 -3.85996,-0.42043 -5.84375,-0.1875 -3.05198,0.35836 -4.94508,0.56786 -6.875,0.78125 -0.6562,0.0726 -1.04066,0.17269 -1.34375,0.25 -0.42677,0.12722 -0.68491,0.2672 -1.375,0.34375 -1.31333,0.14568 -1.76746,-0.17402 -5.53125,0.3125 -3.54889,0.45875 -8.97863,1.41902 -10.21875,2.125 -1.59305,-0.24424 -3.83381,-0.38135 -5.8125,-0.125 -3.04759,0.39481 -4.95071,0.64845 -6.875,0.90625 -1.92428,0.25779 -1.72611,0.49353 -2.9375,0.65625 -1.30946,0.1759 -1.74719,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.92315,1.69917 -10.1875,2.4375 -1.5875,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02617,0.53612 -4.89889,0.86169 -6.8125,1.1875 -0.65061,0.11077 -1.01371,0.27094 -1.3125,0.375 -0.42067,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.29465,0.26159 -1.72712,-0.006 -5.4375,0.8125 -3.49853,0.77195 -8.84595,2.38293 -10.0625,3.21875 -1.56278,-0.0775 -3.75758,0.0853 -5.6875,0.59375 -2.97244,0.78313 -4.81761,1.23209 -6.6875,1.75 -1.86988,0.5179 -1.6666,0.76728 -2.84375,1.09375 -1.27246,0.3529 -1.69703,0.10709 -5.34375,1.1875 -3.4247,1.01463 -8.64944,2.93317 -9.875,3.84375 -1.53883,0.0127 -3.71983,0.27222 -5.625,0.875 -2.93106,0.92734 -4.75031,1.45842 -6.59375,2.0625 -0.62676,0.20538 -0.99173,0.39258 -1.28125,0.53125 -0.40763,0.21361 -0.65334,0.40875 -1.3125,0.625 -1.25446,0.41154 -1.68611,0.18904 -5.28125,1.4375 -3.38985,1.17717 -8.59498,3.2137 -9.78125,4.15625 -1.52389,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69908,1.67548 -6.53125,2.3125 -1.83217,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24678,0.43396 -1.66361,0.19972 -5.21875,1.5625 -3.33867,1.2798 -8.48715,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.63569,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6238,1.78156 -6.4375,2.46875 -0.61666,0.23363 -0.99641,0.44203 -1.28125,0.59375 0,0 0,1.09375 0,1.09375 0.11178,-0.22236 0.38599,-0.81743 0.90625,-1.09375 0.69797,-0.37072 4.81363,-1.99337 6.8125,-2.71875 1.65686,-0.60125 4.15389,-1.32868 5.96875,-1.3125 0.30162,0.003 0.58762,0.0509 0.84375,0.0937 1.84249,0.30825 7.46875,1.5625 7.46875,1.5625 -10e-6,0 -6.23349,-1.64675 -7.03125,-1.84375 -0.19079,-0.0471 -0.53572,-0.0687 -0.96875,-0.0625 1.14546,-0.86971 4.761,-2.39351 7.34375,-3.4375 2.83822,-1.14727 3.11681,-1.25182 5.0625,-1.65625 2.0083,-0.41744 3.15625,-0.5 3.15625,-0.5 0,1e-5 -0.0824,-0.60114 0.96875,-1.125 0.7051,-0.35141 4.88702,-1.8924 6.90625,-2.5625 1.9519,-0.64773 5.0574,-1.3585 6.875,-1 1.86323,0.3675 7.53125,1.8125 7.53125,1.8125 1e-5,0 -6.287,-1.87111 -7.09375,-2.09375 -0.19292,-0.0533 -0.53084,-0.086 -0.96875,-0.0937 1.15834,-0.83288 4.79444,-2.19532 7.40625,-3.15625 2.87016,-1.05601 3.16734,-1.1618 5.125,-1.53125 1.85349,-0.34979 2.85884,-0.42548 3.03125,-0.4375 0.1136,-0.21724 0.37745,-0.81002 0.90625,-1.0625 0.70944,-0.33874 4.92607,-1.71275 6.96875,-2.3125 1.69317,-0.49711 4.24077,-1.03677 6.09375,-0.90625 0.30795,0.0217 0.61349,0.0973 0.875,0.15625 1.88118,0.42432 7.59375,2.03125 7.59375,2.03125 1e-5,0 -6.34174,-2.06525 -7.15625,-2.3125 -0.19479,-0.0591 -0.55788,-0.10394 -1,-0.125 1.16949,-0.79755 4.86302,-2.05622 7.5,-2.9375 2.89781,-0.96847 3.23301,-1.00332 5.21875,-1.28125 2.04965,-0.28689 3.1875,-0.3125 3.1875,-0.3125 -2e-5,0 -0.0727,-0.60697 1,-1.0625 0.7196,-0.30557 4.99098,-1.50075 7.0625,-2 2.00244,-0.48258 5.19849,-0.92829 7.0625,-0.40625 1.91078,0.53515 7.71875,2.5 7.71875,2.5 0,0 -6.42266,-2.42351 -7.25,-2.71875 -0.19784,-0.0706 -0.58216,-0.14039 -1.03125,-0.1875 1.1879,-0.72865 4.91527,-1.77408 7.59375,-2.5 2.94342,-0.79775 3.29208,-0.77083 5.3125,-0.90625 1.91289,-0.12823 2.94705,-0.0711 3.125,-0.0625 0.11728,-0.20366 0.39176,-0.77948 0.9375,-0.96875 0.73219,-0.25394 5.07852,-1.04789 7.1875,-1.375 1.74813,-0.27111 4.40088,-0.4847 6.3125,-0.0937 0.31766,0.065 0.60522,0.18551 0.875,0.28125 1.94074,0.68873 7.84375,3.09375 7.84375,3.09375 1e-5,0 -6.53471,-2.95077 -7.375,-3.3125 -0.20097,-0.0865 -0.57513,-0.16679 -1.03125,-0.25 1.2065,-0.63318 5.02956,-1.3956 7.75,-1.90625 2.98953,-0.56119 3.30023,-0.52954 5.34375,-0.53125 2.10926,-0.002 3.3125,0.125 3.3125,0.125 0,1e-5 -0.0727,-0.63119 1.03125,-0.9375 0.74052,-0.20547 5.12612,-0.83387 7.25,-1.0625 2.05302,-0.22099 5.31863,-0.25222 7.21875,0.46875 1.94779,0.73907 7.84375,3.375 7.84375,3.375 2e-5,0 -6.56288,-3.17897 -7.40625,-3.5625 -0.20168,-0.0917 -0.54221,-0.18621 -1,-0.28125 1.21092,-0.60188 4.98442,-1.24884 7.71875,-1.65625 3.0048,-0.44772 3.32551,-0.4517 5.375,-0.40625 1.94045,0.043 3.00699,0.19423 3.1875,0.21875 0.11892,-0.19316 0.3839,-0.76583 0.9375,-0.90625 0.74271,-0.18838 5.15429,-0.73428 7.28125,-0.9375 1.76303,-0.16842 4.42009,-0.23429 6.34375,0.25 0.31968,0.0805 0.60351,0.20359 0.875,0.3125 1.95293,0.78349 7.90625,3.46875 7.90625,3.46875 -2e-5,0 -6.59191,-3.25348 -7.4375,-3.65625 -0.20222,-0.0963 -0.57226,-0.20703 -1.03125,-0.3125 1.21414,-0.57427 5.04366,-1.12219 7.78125,-1.5 3.00838,-0.4152 3.32307,-0.44263 5.375,-0.375 2.11798,0.0698 3.3125,0.25 3.3125,0.25 -2e-5,0 -0.0772,-0.63741 1.03125,-0.90625 0.74362,-0.18035 5.15176,-0.66355 7.28125,-0.84375 2.05847,-0.17417 5.34324,-0.12432 7.25,0.65625 1.95459,0.80016 7.875,3.53125 7.875,3.53125 -2e-5,0 -6.55993,-3.30876 -7.40625,-3.71875 -0.20237,-0.0981 -0.57186,-0.2031 -1.03125,-0.3125 1.21517,-0.5639 5.01008,-1.1143 7.75,-1.46875 3.01091,-0.38952 3.32131,-0.39765 5.375,-0.3125 1.94439,0.0806 3.00663,0.25324 3.1875,0.28125 0.11916,-0.19086 0.38277,-0.74531 0.9375,-0.875 0.74426,-0.174 5.14993,-0.65047 7.28125,-0.8125 1.76662,-0.13427 4.44971,-0.12571 6.37501,0.375 0.32,0.0832 0.6033,0.20127 0.875,0.3125 1.9546,0.80016 7.9063,3.5625 7.9063,3.5625 -1e-4,0 -6.5912,-3.34001 -7.4375,-3.75 -0.2024,-0.0981 -0.5719,-0.20311 -1.0313,-0.3125 1.2151,-0.5639 5.0413,-1.08306 7.7813,-1.4375 3.0109,-0.38953 3.3525,-0.4289 5.4062,-0.34375 2.1197,0.0879 3.3125,0.3125 3.3125,0.3125 0,0 -0.078,-0.64902 1.0313,-0.90625 0.7443,-0.17256 5.1495,-0.62336 7.2812,-0.78125 2.0606,-0.1526 5.3429,-0.0968 7.25,0.6875 1.955,0.80395 7.875,3.5 7.875,3.5 0,0 -6.5598,-3.27587 -7.4062,-3.6875 -0.2025,-0.0984 -0.5718,-0.20222 -1.0313,-0.3125 1.2154,-0.56154 5.0119,-1.12778 7.75,-1.5 3.009,-0.40905 3.3227,-0.41558 5.375,-0.34375 1.9431,0.068 3.0072,0.16485 3.1875,0.1875 0.1188,-0.1944 0.3846,-0.72881 0.9375,-0.875 0.7418,-0.19612 5.1311,-0.82878 7.25,-1.09375 1.7564,-0.21961 4.4053,-0.33231 6.3125,0.0312 0.3169,0.0604 0.6058,0.18938 0.875,0.28125 1.9362,0.66092 7.8438,2.9375 7.8438,2.9375 -10e-5,0 -6.5367,-2.80655 -7.375,-3.15625 -0.2005,-0.0836 -0.5762,-0.17333 -1.0313,-0.25 1.2037,-0.65046 5.0191,-1.37195 7.7188,-2 2.9667,-0.6902 3.2889,-0.75507 5.3125,-0.875 2.0886,-0.1238 3.2812,-0.0312 3.2812,-0.0312 0,1e-5 -0.087,-0.63205 1,-1.03125 0.7292,-0.2678 5.0472,-1.33797 7.125,-1.8125 2.0085,-0.45869 5.1679,-1.0293 7,-0.625 1.8781,0.41446 13.5782,3.01563 13.5782,3.01563 0,0 -12.3275,-3.02266 -13.1407,-3.26563 -0.1945,-0.0581 -0.5586,-0.10626 -1,-0.125 1.1676,-0.80369 3.5142,-1.6873 6.1094,-2.70312 1.6814,-0.65818 0.9237,-0.37659 2.7759,-1.0036 1.7536,-0.59366 2.4854,-1.01071 2.6304,-1.11299 0.3461,-0.20651 -0.356,-0.12188 -0.5442,-0.0424 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7333-7);enable-background:new"
+ sodipodi:nodetypes="czscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssccsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscc" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8175-7"
+ d="m 1082.625,-275.125 c 1.873,0.39348 4.4961,1.14555 6.0313,1.96875 1.5352,0.82319 2.8222,1.056 5.375,2.5 2.5266,1.42926 4.7958,2.00696 6.9687,2.53125 2.3476,0.56642 5.4354,0.71523 8.8438,1.1875 -1.0889,-0.83975 -6.6074,-1.17245 -8.4063,-1.5625 -1.7989,-0.39006 -3.8941,-1.01616 -6.5937,-2.3125 -2.6997,-1.29634 -3.4944,-1.79896 -5.8125,-2.6875 -2.3182,-0.88854 -4.0044,-1.38314 -6.4063,-1.625 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7285-4);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8177-9"
+ d="m 1051.4688,-270 c 1.9053,0.57759 4.5281,1.61572 6.0937,2.59375 1.5656,0.97802 2.8802,1.35981 5.5,3.125 2.593,1.74716 4.9859,2.70927 7.25,3.59375 2.4461,0.95557 5.6826,1.65713 9.4063,3.0625 -1.1896,-1.13784 -7.0631,-2.68675 -8.9375,-3.375 -1.8745,-0.68825 -4.0818,-1.5662 -6.875,-3.28125 -2.7933,-1.71504 -3.5736,-2.2839 -5.9375,-3.40625 -2.3641,-1.12234 -4.0567,-1.83455 -6.5,-2.3125 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7289-0);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8179-0"
+ d="m 1020.2188,-266.84375 c 1.9119,0.63811 4.5812,1.75536 6.1562,2.8125 1.5751,1.05715 2.8956,1.50867 5.5313,3.40625 2.6086,1.87821 5.0284,3.03003 7.3125,4.0625 2.4677,1.11545 5.7645,2.1733 9.5312,3.84375 -1.2033,-1.22253 -7.2028,-3.31423 -9.0937,-4.125 -1.891,-0.81077 -4.0649,-1.89379 -6.875,-3.75 -2.8102,-1.8562 -3.6218,-2.47693 -6,-3.71875 -2.3783,-1.2418 -4.1107,-1.97569 -6.5625,-2.53125 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7293-0);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8181-3"
+ d="m 1110.1719,-266.89063 c 0.1508,0.0486 0.688,0.631 0.1094,1.48438 -0.8101,1.19459 -5.7049,3.32429 -8.5625,4.125 -2.8449,0.79712 -6.2901,0.97774 -10.5625,-0.375 -4.3016,-1.36195 -5.4697,-2.46872 -10.6563,-4.3125 4.664,2.11517 6.1953,3.95233 10.125,5.34375 1.6207,0.57387 3.3671,0.9396 5.0625,1.03125 -0.4451,0.32563 -1.5303,0.9833 -3.5625,1.59375 -2.7955,0.83969 -6.6491,1.53378 -8.25,1.625 -1.5146,0.0863 -3.142,-0.51249 -3.4375,-0.625 0.1667,0.10308 0.3732,0.37734 -0.25,1.03125 -0.8993,0.94363 -6.1474,1.923 -9.125,2.25 -2.9643,0.32555 -6.5216,-0.016 -10.9062,-1.90625 -3.978,-1.71497 -5.339,-2.91536 -9.4063,-4.75 0,0 0,0.15625 0,0.15625 3.6431,2.09529 5.284,3.88327 8.875,5.5625 1.7302,0.80909 3.5917,1.40876 5.4063,1.71875 -0.5349,0.28676 -1.5578,0.71151 -3.4375,1.03125 -2.869,0.48796 -6.809,0.81614 -8.4375,0.75 -0.8507,-0.0345 -1.7286,-0.18437 -2.4063,-0.40625 -0.6848,-0.21488 -1.1897,-0.44467 -1.3125,-0.5 0.1694,0.10721 0.4311,0.40288 -0.2187,1.03125 -0.9097,0.87962 -6.2461,1.33638 -9.25,1.46875 -2.9905,0.13179 -6.5889,-0.45063 -11,-2.5625 -4.4412,-2.12626 -5.6415,-3.4016 -10.9063,-5.78125 4.7343,2.59704 6.2865,4.6291 10.3438,6.71875 1.6733,0.86185 3.4852,1.49425 5.25,1.9375 -0.4633,0.23332 -1.5894,0.68814 -3.6875,0.9375 -2.8863,0.34298 -6.8346,0.49288 -8.4688,0.375 -1.5462,-0.1115 -3.2312,-0.85696 -3.5312,-1 0.1691,0.12029 0.4138,0.41048 -0.2188,1 -0.9128,0.85073 -6.2441,1.26212 -9.25,1.375 -2.9925,0.11237 -6.5897,-0.49043 -11,-2.59375 -4.00125,-1.90823 -5.38803,-3.13783 -9.46875,-5.09375 -3e-5,0 0,0.15625 0,0.15625 3.65506,2.20392 5.29421,4.05255 8.90625,5.90625 1.74029,0.89315 3.637,1.52827 5.4688,1.96875 -0.54,0.2483 -1.5781,0.61533 -3.4688,0.84375 -2.88568,0.34858 -6.86605,0.52095 -8.5,0.40625 -0.85345,-0.0599 -1.72631,-0.25791 -2.40625,-0.5 -0.6871,-0.2353 -1.18935,-0.47226 -1.3125,-0.53125 0.16998,0.11227 0.46448,0.42225 -0.1875,1.03125 -0.91265,0.8525 -6.27533,1.29337 -9.28125,1.40625 -2.99246,0.11237 -6.59346,-0.52805 -11,-2.59375 -4.43653,-2.07978 -5.64688,-3.33171 -10.90625,-5.65625 4.72938,2.54749 6.29074,4.5778 10.34375,6.625 1.67155,0.84433 3.48554,1.46643 5.25,1.90625 -0.46323,0.23422 -1.5897,0.68407 -3.6875,0.9375 -2.88569,0.34858 -6.8362,0.56952 -8.46875,0.46875 -1.54456,-0.0953 -3.20031,-0.82885 -3.5,-0.96875 0.16899,0.11853 0.38192,0.40385 -0.25,1 -0.91186,0.86028 -6.24665,1.33025 -9.25,1.46875 -2.98995,0.1379 -6.56745,-0.45068 -10.96875,-2.46875 -3.99308,-1.83089 -5.36511,-3.0292 -9.4375,-4.90625 -2e-5,0 0,0.15625 0,0.15625 3.64761,2.13327 5.27033,3.93487 8.875,5.71875 1.73675,0.85951 3.60727,1.45014 5.4375,1.875 -0.53947,0.2529 -1.55063,0.64129 -3.4375,0.90625 -2.87978,0.40436 -6.83813,0.64562 -8.46875,0.5625 -0.85172,-0.0434 -1.7277,-0.20855 -2.40625,-0.4375 -0.68569,-0.22201 -1.1896,-0.44339 -1.3125,-0.5 0.16959,0.10899 0.4319,0.40965 -0.21875,1.03125 -0.91079,0.87014 -6.25021,1.39152 -9.25,1.5625 -2.98633,0.17021 -6.57381,-0.31577 -10.96875,-2.28125 -4.42489,-1.97888 -5.60596,-3.22819 -10.84375,-5.375 4.70997,2.38767 6.27017,4.38873 10.3125,6.34375 1.66715,0.80631 3.46043,1.39658 5.21875,1.78125 -0.46163,0.2487 -1.597,0.71225 -3.6875,1.03125 -2.8756,0.43876 -6.7804,0.7331 -8.40625,0.6875 -1.53823,-0.0431 -3.2328,-0.74522 -3.53125,-0.875 0.16833,0.11282 0.41057,0.41375 -0.21875,1.03125 -0.90812,0.8911 -6.20295,1.52825 -9.1875,1.8125 -2.97118,0.28298 -6.57342,-0.1758 -10.9375,-1.9375 -3.95934,-1.59831 -5.32915,-2.79487 -9.34375,-4.3125 3e-5,0 0,0.15625 0,0.15625 3.5959,1.81135 5.23831,3.58233 8.8125,5.15625 1.72207,0.75835 3.58748,1.28895 5.40625,1.625 -0.53609,0.27908 -1.56658,0.68763 -3.4375,1.0625 -2.85539,0.5721 -6.78942,1.01939 -8.40625,1.03125 -0.84451,0.006 -1.70608,-0.0809 -2.375,-0.25 -0.67591,-0.16151 -1.16009,-0.32923 -1.28125,-0.375 0.16722,0.094 0.42267,0.38348 -0.21875,1.0625 -0.89787,0.95052 -6.18648,1.91708 -9.125,2.4375 -2.92534,0.51809 -6.43215,0.37424 -10.71875,-1.03125 -4.3158,-1.41507 -5.47277,-2.52994 -10.5625,-3.96875 4.57685,1.75101 6.08855,3.56006 10.03125,5 1.62608,0.59389 3.36885,0.95565 5.09375,1.15625 -0.45285,0.29702 -1.55478,0.88339 -3.59375,1.46875 -2.80472,0.80517 -6.63886,1.57583 -8.21875,1.75 -1.49475,0.1648 -3.11623,-0.31681 -3.40625,-0.40625 0.16356,0.0901 0.39278,0.35993 -0.21875,1.0625 -0.88247,1.01385 -6.04452,2.37165 -8.9375,3.0625 -2.88002,0.68778 -6.3356,0.76002 -10.5625,-0.4375 -3.83485,-1.08645 -5.17258,-2.07237 -9.0625,-3.125 -10e-6,0 0,0.15625 0,0.15625 3.48418,1.39485 5.06941,2.9194 8.53125,4.03125 1.66793,0.53572 3.45578,0.78674 5.21875,0.875 -0.51964,0.35212 -1.50039,0.91452 -3.3125,1.53125 -2.76566,0.94125 -6.59024,1.93537 -8.15625,2.15625 -0.81794,0.11539 -1.6331,0.12283 -2.28125,0.0312 -0.65496,-0.0832 -1.1326,-0.21827 -1.25,-0.25 0.16204,0.0746 0.43399,0.34044 -0.1875,1.09375 -0.87,1.05453 -6.00963,2.65925 -8.875,3.4375 -2.85253,0.77476 -6.25912,0.9582 -10.4375,-0.0937 -4.20683,-1.05913 -5.35669,-2.04166 -10.34375,-3.15625 4.48454,1.45946 5.96935,3.13523 9.8125,4.25 1.58504,0.45977 3.28679,0.63825 4.96875,0.6875 -0.44157,0.33676 -1.51251,1.02773 -3.5,1.78125 -2.73393,1.03649 -6.45198,2.16269 -8,2.4375 -1.46462,0.26002 -3.05958,-0.11654 -3.34375,-0.1875 0.16025,0.0796 0.38044,0.32098 -0.21875,1.0625 -0.86466,1.07006 -5.91652,2.81815 -8.75,3.6875 -2.8208,0.86547 -6.2075,1.15631 -10.34375,0.21875 -3.75259,-0.85061 -5.04785,-1.71647 -8.875,-2.59375 0,0 0,0.15625 0,0.15625 3.42796,1.23779 4.98741,2.6323 8.375,3.53125 1.63216,0.43314 3.36704,0.58301 5.09375,0.5625 -0.50893,0.38417 -1.47675,1.02182 -3.25,1.75 -2.70634,1.11134 -6.43633,2.30781 -7.96875,2.625 -0.8004,0.16569 -1.61231,0.21862 -2.25,0.15625 0,0 0,0.51552 0,0.92229 0,0.26507 0,0.48396 0,0.48396 0.22645,-0.14468 0.44891,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.17161,-0.21577 6,-1.6875 3.82843,-1.47174 5.22412,-2.00498 5.90625,-2.40625 0.6796,-0.39978 1.61165,-0.87937 2.21875,-1.53125 1.82685,-0.13775 3.57075,-0.49323 4.9375,-1 2.96812,-1.10052 4.87537,-1.80619 6.78125,-2.46875 1.90586,-0.66254 2.35409,-1.41487 3.40625,-1.78125 1.09155,-0.38011 2.19511,-0.16538 6.0625,-1.53125 3.86745,-1.36586 5.28316,-1.82708 5.96875,-2.21875 0.70109,-0.40052 1.70081,-0.93298 2.3125,-1.59375 1.9708,-0.0547 3.81685,-0.38463 5.28125,-0.875 3.00148,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.5386,-0.5041 2.17402,-1.04677 2.90625,-1.4375 0.23016,-0.13431 0.47574,-0.25373 0.75,-0.34375 1.09823,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91233,-1.23113 5.36605,-1.67295 6.0625,-2.03125 0.69388,-0.35697 1.63015,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63581,-0.26683 5.03125,-0.6875 3.03043,-0.91354 4.99238,-1.4301 6.9375,-1.96875 1.94511,-0.53864 2.42618,-1.26452 3.5,-1.5625 1.11401,-0.30915 2.21994,0.007 6.1875,-1.03125 3.96761,-1.03863 5.41758,-1.43273 6.125,-1.75 0.73487,-0.32959 1.81383,-0.75372 2.4375,-1.375 1.99774,0.116 3.85743,-0.0201 5.34375,-0.375 3.07811,-0.735 5.08344,-1.10094 7.0625,-1.5 1.58792,-0.32018 2.24429,-0.79055 3,-1.09375 0.23757,-0.1068 0.46695,-0.19276 0.75,-0.25 1.13347,-0.22919 2.30448,0.20893 6.34375,-0.5 4.03933,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71586,-0.25944 1.70428,-0.56724 2.34375,-1.09375 1.92427,0.23949 3.74788,0.22453 5.1875,0 3.12633,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48869,-0.94514 3.59375,-1.09375 1.14639,-0.15418 2.27592,0.30157 6.34375,-0.21875 4.06784,-0.52032 5.56013,-0.69573 6.28125,-0.9375 0.7371,-0.24714 1.79809,-0.58623 2.4375,-1.125 2.05007,0.33553 3.97378,0.39796 5.5,0.21875 3.14231,-0.36896 5.17994,-0.55936 7.1875,-0.78125 1.61076,-0.17802 2.26467,-0.6082 3.03125,-0.84375 0.24094,-0.0855 0.49412,-0.1556 0.78125,-0.1875 1.14978,-0.12772 2.30129,0.34665 6.375,-0.125 4.07374,-0.47165 5.55909,-0.6106 6.28125,-0.84375 0.71946,-0.23227 1.70024,-0.47346 2.34375,-0.96875 1.93637,0.33346 3.77006,0.40424 5.21875,0.25 3.14602,-0.33495 5.17756,-0.51859 7.1875,-0.71875 2.00996,-0.20014 2.48414,-0.82639 3.59375,-0.9375 1.15114,-0.11528 2.29643,0.36506 6.375,-0.0625 4.07861,-0.42756 5.58886,-0.56209 6.3125,-0.78125 0.73915,-0.22386 1.79572,-0.51325 2.4375,-1.03125 2.0571,0.39867 4.00187,0.4934 5.53125,0.34375 3.14873,-0.3081 5.17584,-0.47325 7.1875,-0.65625 1.61407,-0.14682 2.2631,-0.56055 3.03125,-0.78125 0.24142,-0.0809 0.49353,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.29296,0.39275 6.375,0 4.08208,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6997,-0.4477 2.3438,-0.9375 1.938,0.34999 3.7688,0.45438 5.2187,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1466,-0.32852 5.1771,-0.5227 7.1875,-0.71875 1.613,-0.15729 2.2656,-0.63148 3.0312,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7167,-0.25316 1.6745,-0.55807 2.3125,-1.09375 1.9197,0.21194 3.7199,0.15141 5.1562,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0938,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5625,-1.28125 1.1288,-0.25066 2.2703,0.11629 6.25,-0.875 3.9796,-0.99128 5.4296,-1.4193 6.125,-1.78125 0.7223,-0.37601 1.7619,-0.87058 2.375,-1.53125 1.963,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3348,-1.68641 2.5469,-1.98438 0.2122,-0.29796 0.1118,-0.7453 0.1379,-0.76675 0.043,-0.0352 0.3193,-0.085 0.479,-0.42844 0.8589,-1.84708 2.321,-5.64459 2.4352,-6.32945 0.1137,-0.68216 0.1638,-1.34774 0.2145,-1.74497 0.029,-0.22952 -0.1467,-0.86544 -0.1246,-0.92404 0.031,-0.0821 0.3045,-0.26528 0.3599,-0.51471 0.2663,-1.19833 0.089,-2.19129 -0.1251,-3.60893 -0.214,-1.41764 -0.9837,-4.62214 -1.6369,-5.47626 -0.6589,-0.86172 -1.2229,-1.01117 -1.7479,-1.00066 -0.2086,0.26976 0.1368,0.26309 0.1626,0.31261 0.6806,0.0508 0.934,0.36864 1.4192,0.89662 0.4852,0.52798 1.2218,3.85117 1.3584,5.30156 0.1366,1.45039 0.19,2.8602 -0.088,3.46864 -0.2781,0.60845 -0.7232,0.51703 -1.0156,0.58291 0.531,0.18589 0.6698,0.12483 0.7314,0.96929 0.059,0.81338 -0.1332,1.63969 -0.5198,2.80562 -0.3912,1.18001 -1.8452,4.34998 -2.2857,4.59877 -0.4523,0.25551 -0.7314,0.27038 -1.067,0.13944 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7337-2);enable-background:new"
+ sodipodi:nodetypes="cssscscsscsssccscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssccscsscscssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsszsszssszzcczzzczzzc" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8183-3"
+ d="m 988.75,-263.84375 c 1.91161,0.6344 4.55027,1.75841 6.125,2.8125 1.57477,1.05409 2.8961,1.48252 5.5313,3.375 2.6082,1.87314 5.0269,3.01522 7.3125,4.0625 2.4693,1.13147 5.7521,2.15474 9.5312,3.9375 -1.2072,-1.2584 -7.139,-3.36445 -9.0312,-4.1875 -1.8922,-0.82304 -4.128,-1.93049 -6.9375,-3.78125 -2.80961,-1.85075 -3.62224,-2.48154 -6.00005,-3.71875 -2.37782,-1.23719 -4.07988,-1.9492 -6.53125,-2.5 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7297-4);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8185-3"
+ d="m 957.5,-260.78125 c 1.91,0.6181 4.58288,1.70934 6.15625,2.75 1.57339,1.04066 2.89608,1.48252 5.53125,3.375 2.60823,1.87315 5.02692,3.01521 7.3125,4.0625 2.46931,1.13147 5.75213,2.15475 9.53125,3.9375 -1.20728,-1.2584 -7.20154,-3.3957 -9.09375,-4.21875 -1.89217,-0.82304 -4.09666,-1.9305 -6.90625,-3.78125 -2.80958,-1.85075 -3.59295,-2.43932 -5.96875,-3.65625 -2.37578,-1.21691 -4.11321,-1.93885 -6.5625,-2.46875 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7301-5);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8187-7"
+ d="m 926.09375,-257.375 c 1.90772,0.59745 4.55348,1.66384 6.125,2.6875 1.5715,1.02365 2.87022,1.43971 5.5,3.28125 2.60291,1.82273 5.02887,2.9722 7.3125,4 2.4672,1.11041 5.75535,2.09323 9.53125,3.84375 -1.20623,-1.2481 -7.1719,-3.31809 -9.0625,-4.125 -1.89058,-0.8069 -4.10242,-1.89104 -6.90625,-3.6875 -2.80385,-1.79644 -3.62704,-2.40251 -6,-3.59375 -2.37297,-1.19124 -4.05362,-1.90283 -6.5,-2.40625 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7305-4);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8189-3"
+ d="m 894.90625,-253.5625 c 1.90213,0.55355 4.58701,1.58887 6.15625,2.59375 1.56923,1.00487 2.87401,1.40864 5.5,3.21875 2.59912,1.79164 5.00034,2.87189 7.28125,3.875 2.46428,1.08374 5.75984,2.04029 9.53125,3.75 -1.2048,-1.23507 -7.17416,-3.24478 -9.0625,-4.03125 -1.88832,-0.78647 -4.0752,-1.8308 -6.875,-3.59375 -2.79977,-1.76294 -3.59919,-2.36836 -5.96875,-3.53125 -2.36957,-1.16288 -4.12325,-1.83412 -6.5625,-2.28125 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7309-9);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8191-2"
+ d="m 863.71875,-248.65625 c 1.88062,0.42909 4.50427,1.38038 6.0625,2.3125 1.55823,0.93211 2.85233,1.25776 5.46875,3 2.58971,1.72444 4.98067,2.70802 7.25,3.625 2.45176,0.99069 5.73959,1.87707 9.5,3.5 -1.20131,-1.20734 -7.15249,-3.06609 -9.03125,-3.78125 -1.87875,-0.71517 -4.0854,-1.68442 -6.875,-3.375 -2.78963,-1.69057 -3.58461,-2.22822 -5.9375,-3.28125 -2.35292,-1.05301 -4.02584,-1.71248 -6.4375,-2 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7313-2);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8193-6"
+ d="m 833.15625,-241.375 c 1.84836,0.29644 4.46945,0.97632 6,1.78125 1.53058,0.80493 2.81374,1.05573 5.375,2.53125 2.53504,1.46046 4.89068,2.32509 7.125,3.0625 2.41399,0.79668 5.65711,1.46689 9.375,2.84375 -1.18771,-1.12873 -7.08772,-2.58975 -8.9375,-3.15625 -1.84977,-0.5665 -4.00342,-1.37392 -6.75,-2.84375 -2.74657,-1.46983 -3.50136,-1.92028 -5.8125,-2.78125 -2.31115,-0.86095 -4.00471,-1.32009 -6.375,-1.4375 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7317-7);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8195-5"
+ d="m 802.90625,-232.3125 c 1.8222,0.21127 4.36576,0.80057 5.875,1.53125 1.50925,0.73066 2.75568,0.92998 5.28125,2.28125 2.49976,1.33746 4.83154,2.04843 7.03125,2.65625 2.37653,0.65667 5.56464,1.07288 9.21875,2.1875 -1.16735,-1.04496 -6.92888,-2.10329 -8.75,-2.5625 -1.82111,-0.45921 -3.95225,-1.12696 -6.65625,-2.4375 -2.70403,-1.31052 -3.47106,-1.7199 -5.75,-2.46875 -2.27895,-0.74883 -3.91325,-1.17931 -6.25,-1.1875 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7321-5);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8197-2"
+ d="m 773.1875,-222.1875 c 1.81109,0.1787 4.32059,0.66506 5.8125,1.34375 1.49194,0.67869 2.7534,0.79822 5.25,2.0625 2.47107,1.25138 4.79005,1.89614 6.96875,2.4375 2.35387,0.58488 5.49134,0.89752 9.09375,1.84375 -1.15084,-0.99116 -6.85251,-1.7833 -8.65625,-2.1875 -1.80372,-0.4042 -3.91553,-1.02116 -6.59375,-2.25 -2.67818,-1.22884 -3.40345,-1.61089 -5.65625,-2.28125 -2.25279,-0.67034 -3.89627,-1.00232 -6.21875,-0.96875 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7329-8);enable-background:new" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8199-6"
+ d="m 743.5625,-211.1875 c 1.79281,0.12911 4.27313,0.54965 5.75,1.1875 1.4769,0.63785 2.7161,0.74156 5.1875,1.9375 2.44618,1.18372 4.72054,1.74666 6.875,2.21875 2.32767,0.51003 5.4196,0.68064 9,1.5625 -1.14379,-0.9706 -6.74759,-1.59065 -8.53125,-1.9375 -1.78367,-0.34684 -3.88285,-0.88756 -6.53125,-2.03125 -2.64841,-1.14368 -3.39495,-1.51631 -5.625,-2.125 -2.23008,-0.60868 -3.82594,-0.90966 -6.125,-0.8125 z"
+ style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7325-2);enable-background:new" />
+ </g>
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 863.87812,475.6679 c 1.64212,-3.218 3.51781,-5.73529 4.86136,-9.84898 0.79872,-3.65789 3.31204,-2.03073 7.26047,-8.3969 1.40193,-2.2395 5.47653,0.39136 8.9651,-2.39911 1.27072,-0.80319 2.88488,-0.40431 4.48256,-0.0631 3.76539,1.31896 5.82576,3.70355 8.33376,5.80837 6.13906,5.97023 20.53414,7.94327 23.48604,6.31346 1.43405,-2.90474 7.88128,-5.40888 12.37437,-11.11168 0.74811,-1.12267 11.72936,-8.74446 14.64721,-6.56599"
+ id="path8201-5"
+ sodipodi:nodetypes="ccccccccc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 888.50059,465.25071 c 7.36341,-3.23297 13.8109,-8.9084 20.70813,-13.38452 3.31057,-1.96954 6.86983,3.21601 10.796,3.59866 2.29773,-0.21813 3.7129,1.20259 5.68211,1.6415 5.15636,1.31779 2.39793,3.86488 9.97526,6.43972 6.15561,1.7204 8.9074,-6.79847 14.89975,-7.3236 4.87739,-0.50299 8.09892,-0.31603 11.61675,-0.25254 3.92696,0.13889 4.07855,-3.4976 6.06092,-5.3033 2.98056,-2.80522 7.15561,-1.84972 10.14485,-4.7409 1.01754,-1.38468 1.95458,-3.01085 2.73459,-5.10809 0.88201,-2.00034 3.04006,0.30598 4.79823,1.26269"
+ id="path8203-8"
+ sodipodi:nodetypes="ccccccccccc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9048-9);enable-background:accumulate"
+ d="m 403.27922,1056.3058 56.56854,-42.4264 72.12489,14.1421 -46.66904,52.3259 -53.74012,7.0711 -28.28427,-31.1127 z"
+ id="path8994-7" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 542.27183,1060.5719 c -1.40727,18.8012 -1.1449,32.751 2.08174,49.3033 3.22666,16.5523 16.40609,45.9073 20.33441,63.1837 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1483 -15.31761,12.9774 -42.05128,21.5989 -67.83231,15.7337 -25.78105,-5.8652 -69.54907,-49.2234 -88.59019,-70.2283 -19.11214,-21.0833 -63.76086,-93.8506 -77.93853,-124.2758 -14.17767,-30.4251 -12.65961,-36.7186 -8.11972,-45.52972 -9.36672,-24.5205 -12.41371,-50.06681 -33.71245,-75.57664 30.32547,3.11444 43.88028,26.95633 60.12568,47.13975 -5.52989,-48.07603 -18.05471,-64.4165 -28.37395,-90.7243 29.9943,6.08165 50.57936,31.87239 63.97979,72.7125 9.55415,-3.91791 18.23776,-9.37294 30.18741,-9.0612 -11.2975,-41.6958 -17.94946,-69.91584 -36.68725,-101.06994 53.44196,5.67033 83.65702,80.63932 78.97142,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34702 -21.04781,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38966,85.80578 6.10287,20.3689 1.51881,38.70051 1.51881,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24138 -3.53269,59.10328 -4.94582,77.98328 z"
+ id="path4189-9"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3587-1);enable-background:accumulate"
+ d="m 719.5,738.69519 18.31177,15.43196 44.41103,-15.38821 23.2772,-25.54375 11.46397,19.22065 30.67161,12.78354 25.09737,5.72837 L 892,723.19519 908.02309,747.02126 947,752.19519 l 10.24541,-6.19852 6.75471,8.6982 25.49988,11.00032 2,-40.5 L 955.94866,710.6576 923.45591,689.1305 883.0038,677.66492 861.69668,662.13148 840,685.19519 755.02878,638.61208 722,676.69519 l -2.5,62 z"
+ id="path4191-6"
+ sodipodi:nodetypes="cccccccccccccccccccccc"
+ clip-path="url(#clipPath3631-6)"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter3898-1);enable-background:new"
+ d="m 584,696.5 -6.5625,17.15625 c 0,0 -7.81152,20.36488 -15.6875,43.65625 -3.93799,11.64568 -7.88302,24.04145 -10.9375,35.125 -3.05448,11.08355 -5.33586,20.37986 -5.5,28.28125 -0.39807,19.16196 5.74653,34.8883 8.9375,41.75 -0.77153,3.55523 -1.99137,9.45432 -3.34375,18.09375 -1.92042,12.26821 -3.71827,27.15441 -2.375,39.875 1.38209,13.08835 6.81222,28.18765 12.59375,43.03125 5.78153,14.8436 12.05435,29.22711 15.21875,38.03125 6.63206,18.4519 9.99296,31.5763 11.3125,48.5 0.58135,7.4561 -0.24227,20.336 -1.25,33.375 -1.00773,13.039 -2.18661,26.3014 -1.6875,36.9688 0.98911,21.1398 9.32798,46.8347 33.375,57.9374 22.77483,10.5154 55.32682,11.7022 83.4375,-3.4374 16.15992,-8.7034 30.07634,-27.0976 43.375,-46.9063 13.29866,-19.8087 24.96917,-41.0534 31.9375,-54.9063 15.35292,-30.5212 39.39353,-115.46418 45.625,-152.7187 3.01859,-18.04653 3.92166,-29.06555 2.625,-38.03125 -0.97853,-6.76604 -3.82819,-12.1474 -6.875,-16.21875 2.04274,-27.50791 -0.73207,-51.36878 11.96875,-79.40625 L 840.75,763.375 l -23.8125,9.3125 c -17.48975,6.83753 -28.90164,19.04536 -36.59375,32.0625 -0.32251,0.54577 -0.56314,1.10776 -0.875,1.65625 0.22203,-22.51521 4.40784,-37.63759 6.59375,-58.6875 l 1.96875,-19 L 771,737.375 c -30.59449,15.55571 -45.69489,48.19321 -49.71875,90.21875 -4.24532,-0.62547 -8.8314,-1.01965 -13.8125,-0.84375 -0.29149,-39.18036 -0.39629,-67.03685 8.59375,-99.375 l 5.59375,-20.125 -19.4375,7.65625 c -30.90937,12.20394 -47.85954,41.93073 -56.625,68.375 -4.38273,13.22214 -6.74582,25.80121 -7.59375,35.9375 -0.23203,2.77373 -0.31106,5.31132 -0.3125,7.71875 -3.24187,-0.0364 -6.42052,0.13589 -10.0625,0.5 0.0416,-39.00473 -3.48424,-79.75415 -32.28125,-116.5 L 584,696.5 Z m 5.8125,43.8125 c 16.80691,30.64383 17.47451,63.96728 16.9375,99.75 l -0.21875,15.0625 12.03493,-6.53921 c 8.66205,-3.13302 19.56058,-0.22752 31.93382,-0.83579 l 14.67465,9.3566 -6.3309,-25.7941 c -0.0897,-0.22997 -0.22046,-0.41669 -0.25,-0.71875 -0.19951,-2.03986 -0.22232,-5.47307 0.125,-9.625 0.69464,-8.30386 2.78957,-19.58524 6.625,-31.15625 5.15532,-15.55294 13.48801,-31.19248 25.125,-42.53125 -4.68381,28.63798 -3.21559,60.25934 -3.01164,95.80514 l -2.76593,13.26164 15.49632,-7.59803 c 9.0294,-2.75771 17.18897,-0.34996 29.28125,1.09375 l 13.24632,9.44423 L 741.09375,840 c 1.44793,-30.97177 8.22149,-53.67808 20.71875,-68.875 -2.98688,19.77884 -5.43043,41.7848 0.3125,78.34375 l 1.06552,6.37318 -2.93815,11.51685 10.61711,-8.16818 9.18973,10.22198 -1.54828,-10.4636 L 781.9375,852 c 5.70102,-13.21149 10.17282,-26.21337 16.34375,-36.65625 0.95986,-1.62434 2.03153,-3.06436 3.0625,-4.5625 -3.68066,21.15535 -2.42716,40.20815 -4.09375,57.78125 l -4.68014,7.80698 7.39889,0.22427 c 3.22005,3.48361 3.8675,3.85068 4.5625,8.65625 0.695,4.80557 0.31862,14.40035 -2.5625,31.625 -5.56799,33.28792 -31.84562,77.83981 -43.7404,101.4864 -6.60491,13.1304 -18.52833,57.4859 -31.12335,76.2465 -12.59502,18.7605 -28.53137,39.7673 -37.17204,44.4209 -21.49052,11.5742 -44.55594,25.5059 -60.61889,18.0895 -14.37486,-6.637 -23.03969,-21.1927 -23.81407,-37.7433 -0.38311,-8.188 0.61279,-21.3092 1.625,-34.4062 1.01221,-13.0971 11.28891,-22.5708 15.42339,-36.5626 5.37229,-18.1808 -1.44687,-36.5944 -12.5,-53.93745 -6.48655,-10.17778 -23.9768,-24.2579 -29.54839,-38.5625 -5.57159,-14.3046 -10.36751,-29.00315 -11.28125,-37.65625 -0.92621,-8.77113 0.4225,-23.02502 2.21875,-34.5 1.79625,-11.47497 3.84375,-20.28125 3.84375,-20.28125 l 9.42278,-3.6152 -10.48528,-3.8848 c 0,0 -8.49889,-15.3101 -8.09375,-34.8125 0.0711,-3.42316 1.83626,-12.72805 4.71875,-23.1875 2.88249,-10.45945 6.76466,-22.55271 10.625,-33.96875 3.04439,-9.00308 5.78063,-16.60345 8.34375,-23.6875 z"
+ id="path4193-0"
+ clip-path="url(#clipPath3677-0)"
+ sodipodi:nodetypes="ccssscsssssssssssssccccscccccccccsscccccccccccssscccccccccccccccsccccssssssssssssscccsssc"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,822.28931,10.93589)" />
+ <g
+ style="display:inline;enable-background:new"
+ id="g3617-4"
+ clip-path="url(#clipPath3622-5)"
+ transform="translate(276,136)">
+ <path
+ inkscape:connector-curvature="0"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,-52.200498,74.09707)"
+ id="path4195-1"
+ d="m -15.66751,843.48852 -49.49748,-15.55635 -26.87005,52.3259 41.01219,45.25484 49.49747,-38.18377 -14.14213,-43.84062 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9024-1);enable-background:accumulate" />
+ <path
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccccccccccc"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,-46.92842,75.511284)"
+ id="path4197-0"
+ d="m 118.70648,859.93048 -55.154328,-46.66904 -43.84062,36.76955 33.94113,53.74011 -13.596814,85.46203 -39.44536579,28.29217 -41.01220021,11.3137 -2.82842,46.669 56.56854,25.4559 18.943987,-69.65 23.45655,-58.85663 46.347541,-72.61491 16.62,-39.91188 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9020-8);enable-background:accumulate" />
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9044-0);enable-background:accumulate"
+ d="m -70.82184,932.58397 60.81118,-26.87005 100.40916,31.1127 -63.63961,31.11269 -82.02438,-16.97056 -15.55635,-18.38478 z"
+ id="path4199-4"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,229.07158,211.51128)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4105-2);enable-background:new"
+ d="m 583.0625,715.75 c -12.10609,34.44974 -26.7145,68.53333 -31.75,104.84375 -0.83208,14.92867 4.58915,29.15943 8.84375,43.0625 -5.91624,27.20126 -10.13681,56.89995 1.15625,83.125 13.51717,38.16085 35.00147,75.68215 32.42279,117.46825 -0.9483,29.2942 -9.01444,60.9941 5.38971,88.2817 10.19864,19.3348 33.13956,27.3117 53.96785,27.6676 27.86219,1.1741 56.46261,-11.6216 72.0009,-35.2613 22.59549,-29.3717 41.80051,-61.4973 55.23865,-96.0598 16.89053,-45.506 29.6718,-92.56072 37.93402,-140.3989 1.8244,-12.94106 3.10108,-27.46985 -4.57892,-38.82255 -3.43115,-7.33632 0.0421,-15.56014 -0.68457,-23.30977 0.674,-24.99466 4.01232,-50.66376 16.65332,-72.59648 -17.73313,6.4446 -35.07268,16.55971 -44.00307,33.86425 -3.93508,6.70955 -7.60482,13.57413 -11.37193,20.38575 -3.54999,-30.01408 3.71963,-59.64828 6.78125,-89.28125 -20.16604,9.05463 -36.87672,25.65522 -44.17495,46.682 -6.30463,15.58003 -8.80222,32.31718 -10.26255,49.03675 -8.25334,-1.51925 -16.68447,-2.10155 -25.0625,-1.5 -0.96308,-38.69787 -0.46696,-79.40715 10.96875,-115.90625 -18.68113,6.21776 -35.16621,18.73551 -45.62803,35.38723 -13.85254,20.87979 -21.2614,45.75395 -23.05947,70.61277 0.58534,4.32454 -0.0613,11.84009 -6.34375,9.875 -5.33118,0.0176 -10.62908,0.67883 -15.9375,1.09375 1.14784,-39.38148 -3.34144,-81.6282 -27.0625,-114.21875 -3.06071,-3.63717 -5.63685,-7.68438 -8.625,-11.34375 -0.9375,2.4375 -1.875,4.875 -2.8125,7.3125 z m 7.75,13.84375 c 18.56527,29.29629 22.4825,64.82012 22.125,98.875 0.20409,5.17526 -0.51656,11.8292 0.125,16.0625 12.31856,-6.10275 26.73912,-2.4399 39.78125,-2.1875 2.31712,1.22325 3.1921,1.65243 1.90625,-1.40625 -4.16455,-13.95285 -1.84828,-28.613 1.80504,-42.40764 6.36687,-26.29064 20.62828,-51.08798 42.81996,-67.02986 -8.61709,37.23706 -5.71658,76.56161 -6.09375,113.96875 12.25344,-6.9099 27.27879,-3.44613 40.03125,-0.25 3.39222,3.5348 2.28935,-0.72948 2.1875,-3.8125 -0.48309,-21.37058 4.13133,-43.06963 13.6875,-62.15625 5.96266,-10.68727 14.24338,-19.80379 22.4375,-28.875 -7.87156,33.8381 -9.2029,69.33593 -2.71875,103.5 1.72485,-1.41118 4.60681,-0.45414 5.65625,-0.375 9.68369,-21.23682 16.35112,-45.38062 34.89016,-60.74185 1.87329,-0.37122 -1.44818,8.52495 -1.48391,11.8981 -3.53488,21.84581 -7.17516,44.14234 -8.78421,66.21911 -8.78379,2.34171 2.84835,2.32354 3.46875,4.0625 7.92311,10.5658 4.66299,24.40472 3.63165,36.35334 -7.06405,45.03355 -22.14231,87.36194 -35.95355,130.6798 -12.07476,32.9493 -27.3742,58.8525 -47.88808,87.2015 -10.95257,13.5514 -23.24472,27.8513 -40.84375,32.5 -20.15601,6.2413 -44.20676,10.8769 -62.59956,0.046 -17.28966,-12.3414 -21.02393,-35.7089 -19.26226,-55.6864 0.0488,-15.8262 4.93886,-28.5121 4.4106,-43.4918 -0.53824,-15.2629 -2.29135,-30.5647 -6.54261,-46.8663 -4.25126,-16.30162 -9.04325,-24.91794 -16.11906,-41.57338 -7.24111,-17.04456 -15.07015,-36.74863 -18.20542,-56.28842 -1.74948,-18.62714 2.89171,-37.12262 5.78125,-55.25 3.29623,-2.83696 -1.59799,-5.19659 -2.3125,-8.1875 -7.60113,-17.01508 -8.40747,-36.7749 -2.74234,-54.55998 7.1302,-25.0723 15.76087,-49.63241 24.67984,-74.12752 0.70833,1.30208 1.41667,2.60417 2.125,3.90625 z"
+ id="path4201-8"
+ sodipodi:nodetypes="ccccccccccccccccccccccccccccccccccccccccccccccccccczzzcccccc"
+ clip-path="url(#clipPath4177-4)"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,822.28931,10.93589)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4130-8);enable-background:accumulate"
+ d="m 735.05635,733.03834 2.75542,21.08881 44.41103,-15.38821 4.85063,-22.38975 -3.93617,-22.05222 -22.45163,-36.59301 -8.28004,30.30494 -17.34924,45.02944 z"
+ id="path4203-7"
+ sodipodi:nodetypes="cccccccc"
+ clip-path="url(#clipPath3631-6)"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4141-2);enable-background:accumulate"
+ d="m 831.81321,730.29452 15.82237,14.90486 20.85473,2.89994 -1.59029,-39.92598 8.32561,-30.50842 -7.16499,-6.34106 -21.69669,20.9424 -14.55074,38.02826 z"
+ id="path4205-0"
+ sodipodi:nodetypes="cccccccc"
+ clip-path="url(#clipPath3631-6)"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)" />
+ <g
+ id="g8317-8"
+ style="display:inline;filter:url(#filter8333-2);enable-background:new"
+ clip-path="url(#clipPath8338-4)"
+ transform="translate(276,136)">
+ <path
+ inkscape:connector-curvature="0"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,719.28646,-112.46507)"
+ clip-path="none"
+ sodipodi:nodetypes="ccccc"
+ id="path4209-6"
+ d="m 964.00012,754.69487 18.42881,7.46479 9.07107,-36.96447 -14.87031,4.83886 -12.62957,24.66082 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <rect
+ y="757.19519"
+ x="-55"
+ height="177"
+ width="182"
+ id="rect8315-2"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ <g
+ id="g8346-4"
+ style="display:inline;filter:url(#filter8354-2);enable-background:new"
+ clip-path="url(#clipPath8359-0)"
+ transform="translate(276,136)">
+ <path
+ inkscape:connector-curvature="0"
+ transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,719.28646,-112.46507)"
+ clip-path="none"
+ sodipodi:nodetypes="ccccccc"
+ id="path4207-7"
+ d="m 910.14441,746.31415 32.61295,5.17393 -0.36119,-23.87619 7.18853,-29.68221 -8.45112,-5.26365 -21.82194,26.51077 -9.16723,27.13735 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <rect
+ y="696.19519"
+ x="-22"
+ height="176"
+ width="165"
+ id="rect8344-9"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ d="m 1036.164,1071.8338 c 6.7941,18.9028 10.4937,33.2997 11.8903,51.2119 1.3966,17.9123 -3.7827,51.8008 -2.9005,70.6561 0.8818,18.8452 8.1337,40.099 27.3446,48.9689 19.4189,8.9658 49.3193,10.2113 74.1199,-3.1456 24.8006,-13.357 57.401,-70.3255 70.9742,-97.3087 13.6239,-27.0839 38.7611,-114.4974 44.6608,-149.76859 5.8998,-35.27121 2.5506,-41.30077 -4.6174,-49.05549 2.6403,-27.84015 -1.4998,-54.93543 13.1096,-87.18618 -30.249,11.8257 -37.3823,40.1607 -48.3189,65.50508 -8.0009,-50.93293 0.2092,-71.27319 3.3189,-101.21936 -29.0647,14.77791 -42.8615,47.11402 -45,92.85714 -10.9239,-1.3042 -21.3914,-4.43423 -33.5714,-0.71429 -0.264,-46.02334 -1.4635,-76.88941 8.9106,-114.20649 -53.2554,21.02686 -62.9472,106.5941 -56.0535,112.77792 -10.8828,0.535 -21.371,-1.2973 -32.8571,2.85715 0.6389,-42.57135 -0.2605,-84.90861 -30,-122.85715 0,0 -30.958,80.92234 -31.4286,103.57143 -0.4705,22.64909 9.4516,40.16588 9.4516,40.16588 0,0 -8.568,36.74051 -6.2986,58.23223 2.2959,21.74142 20.4429,59.67622 27.2655,78.65812 z"
+ id="path8848-3"
+ sodipodi:nodetypes="czzzzzzcccccccccczczz" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3587-1);enable-background:accumulate"
+ d="m 719.5,738.69519 18.31177,15.43196 44.41103,-15.38821 23.2772,-25.54375 11.46397,19.22065 30.67161,12.78354 25.09737,5.72837 L 892,723.19519 908.02309,747.02126 947,752.19519 l 10.24541,-6.19852 6.75471,8.6982 25.49988,11.00032 2,-40.5 L 955.94866,710.6576 923.45591,689.1305 883.0038,677.66492 861.69668,662.13148 840,685.19519 755.02878,638.61208 722,676.69519 l -2.5,62 z"
+ id="path3635-9"
+ sodipodi:nodetypes="cccccccccccccccccccccc"
+ clip-path="url(#clipPath3631-6)"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ transform="translate(450.03125,73.843964)"
+ style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter3898-1);enable-background:new"
+ d="m 584,696.5 -6.5625,17.15625 c 0,0 -7.81152,20.36488 -15.6875,43.65625 -3.93799,11.64568 -7.88302,24.04145 -10.9375,35.125 -3.05448,11.08355 -5.33586,20.37986 -5.5,28.28125 -0.39807,19.16196 5.74653,34.8883 8.9375,41.75 -0.77153,3.55523 -1.99137,9.45432 -3.34375,18.09375 -1.92042,12.26821 -3.71827,27.15441 -2.375,39.875 1.38209,13.08835 6.81222,28.18765 12.59375,43.03125 5.78153,14.8436 12.05435,29.22711 15.21875,38.03125 6.63206,18.4519 9.99296,31.5763 11.3125,48.5 0.58135,7.4561 -0.24227,20.336 -1.25,33.375 -1.00773,13.039 -2.18661,26.3014 -1.6875,36.9688 0.98911,21.1398 9.32798,46.8347 33.375,57.9374 22.77483,10.5154 55.32682,11.7022 83.4375,-3.4374 16.15992,-8.7034 30.07634,-27.0976 43.375,-46.9063 13.29866,-19.8087 24.96917,-41.0534 31.9375,-54.9063 15.35292,-30.5212 39.39353,-115.46418 45.625,-152.7187 3.01859,-18.04653 3.92166,-29.06555 2.625,-38.03125 -0.97853,-6.76604 -3.82819,-12.1474 -6.875,-16.21875 2.04274,-27.50791 -0.73207,-51.36878 11.96875,-79.40625 L 840.75,763.375 l -23.8125,9.3125 c -17.48975,6.83753 -28.90164,19.04536 -36.59375,32.0625 -0.32251,0.54577 -0.56314,1.10776 -0.875,1.65625 0.22203,-22.51521 4.40784,-37.63759 6.59375,-58.6875 l 1.96875,-19 L 771,737.375 c -30.59449,15.55571 -45.69489,48.19321 -49.71875,90.21875 -4.24532,-0.62547 -8.8314,-1.01965 -13.8125,-0.84375 -0.29149,-39.18036 -0.39629,-67.03685 8.59375,-99.375 l 5.59375,-20.125 -19.4375,7.65625 c -30.90937,12.20394 -47.85954,41.93073 -56.625,68.375 -4.38273,13.22214 -6.74582,25.80121 -7.59375,35.9375 -0.23203,2.77373 -0.31106,5.31132 -0.3125,7.71875 -3.24187,-0.0364 -6.42052,0.13589 -10.0625,0.5 0.0416,-39.00473 -3.48424,-79.75415 -32.28125,-116.5 L 584,696.5 Z m 5.8125,43.8125 c 16.80691,30.64383 17.47451,63.96728 16.9375,99.75 l -0.21875,15.0625 12.03493,-6.53921 c 8.66205,-3.13302 19.56058,-0.22752 31.93382,-0.83579 l 14.67465,9.3566 -6.3309,-25.7941 c -0.0897,-0.22997 -0.22046,-0.41669 -0.25,-0.71875 -0.19951,-2.03986 -0.22232,-5.47307 0.125,-9.625 0.69464,-8.30386 2.78957,-19.58524 6.625,-31.15625 5.15532,-15.55294 13.48801,-31.19248 25.125,-42.53125 -4.68381,28.63798 -3.21559,60.25934 -3.01164,95.80514 l -2.76593,13.26164 15.49632,-7.59803 c 9.0294,-2.75771 17.18897,-0.34996 29.28125,1.09375 l 13.24632,9.44423 L 741.09375,840 c 1.44793,-30.97177 8.22149,-53.67808 20.71875,-68.875 -2.98688,19.77884 -5.43043,41.7848 0.3125,78.34375 l 1.06552,6.37318 -2.93815,11.51685 10.61711,-8.16818 9.18973,10.22198 -1.54828,-10.4636 L 781.9375,852 c 5.70102,-13.21149 10.17282,-26.21337 16.34375,-36.65625 0.95986,-1.62434 2.03153,-3.06436 3.0625,-4.5625 -3.68066,21.15535 -2.42716,40.20815 -4.09375,57.78125 l -4.68014,7.80698 7.39889,0.22427 c 3.22005,3.48361 3.8675,3.85068 4.5625,8.65625 0.695,4.80557 0.31862,14.40035 -2.5625,31.625 -5.56799,33.28792 -31.79272,123.1659 -43.6875,146.8125 -6.60491,13.1304 -18.02998,33.8957 -30.625,52.6563 -12.59502,18.7605 -27.35933,35.5338 -36,40.1874 -21.49052,11.5742 -48.7808,10.2602 -64.84375,2.8438 -14.37486,-6.637 -20.53812,-23.4494 -21.3125,-40 -0.38311,-8.188 0.61279,-21.3092 1.625,-34.4062 1.01221,-13.0971 11.28891,-22.5708 15.42339,-36.5626 5.37229,-18.1808 -1.44687,-36.5944 -12.5,-53.93745 -6.48655,-10.17778 -23.9768,-24.2579 -29.54839,-38.5625 -5.57159,-14.3046 -10.36751,-29.00315 -11.28125,-37.65625 -0.92621,-8.77113 0.4225,-23.02502 2.21875,-34.5 1.79625,-11.47497 3.84375,-20.28125 3.84375,-20.28125 l 9.42278,-3.6152 -10.48528,-3.8848 c 0,0 -8.49889,-15.3101 -8.09375,-34.8125 0.0711,-3.42316 1.83626,-12.72805 4.71875,-23.1875 2.88249,-10.45945 6.76466,-22.55271 10.625,-33.96875 3.04439,-9.00308 5.78063,-16.60345 8.34375,-23.6875 z"
+ id="path3669-2"
+ clip-path="url(#clipPath3677-0)"
+ sodipodi:nodetypes="ccssscsssssssssssssccccscccccccccsscccccccccccssscccccccccccccccsccccssssssssssssscccsssc" />
+ <g
+ id="g3628-8"
+ clip-path="url(#clipPath3636-90)"
+ transform="translate(276,136)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8988-3"
+ d="m 824.48651,818.48242 -49.49748,-15.55635 -26.87005,52.3259 41.01219,45.25484 49.49747,-38.18377 -14.14213,-43.84062 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9024-1);enable-background:accumulate" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path8990-0"
+ d="m 964.49365,855.25197 -55.15433,-46.66904 -43.84062,36.76955 33.94113,53.74011 7.07106,66.46804 -50.91168,35.35537 -41.0122,11.3137 -2.82842,46.669 56.56854,25.4559 63.63961,-76.3676 24.04163,-94.75227 8.48528,-57.98276 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9020-8);enable-background:accumulate" />
+ </g>
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9044-0);enable-background:accumulate"
+ d="m 1045.3322,1043.5779 60.8112,-26.8701 100.4091,31.1127 -63.6396,31.1127 -82.0244,-16.9706 -15.5563,-18.3847 z"
+ id="path8992-1" />
+ <path
+ inkscape:connector-curvature="0"
+ transform="translate(450.03125,73.843964)"
+ style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4185-1);enable-background:new"
+ d="m 583.0625,715.75 c -12.10609,34.44974 -26.7145,68.53333 -31.75,104.84375 -0.83208,14.92867 4.58915,29.15943 8.84375,43.0625 -5.91624,27.20126 -10.13681,56.89995 1.15625,83.125 13.51717,38.16085 35.00147,75.68215 32.42279,117.46825 -0.9483,29.2942 -9.01444,60.9941 5.38971,88.2817 10.19864,19.3348 33.13956,27.3117 53.96785,27.6676 27.86219,1.1741 56.46261,-11.6216 72.0009,-35.2613 22.59549,-29.3717 41.80051,-61.4973 55.23865,-96.0598 16.89053,-45.506 29.6718,-92.56072 37.93402,-140.3989 1.8244,-12.94106 3.10108,-27.46985 -4.57892,-38.82255 -3.43115,-7.33632 0.0421,-15.56014 -0.68457,-23.30977 0.674,-24.99466 4.01232,-50.66376 16.65332,-72.59648 -17.73313,6.4446 -35.07268,16.55971 -44.00307,33.86425 -3.93508,6.70955 -7.60482,13.57413 -11.37193,20.38575 -3.54999,-30.01408 3.71963,-59.64828 6.78125,-89.28125 -20.16604,9.05463 -36.87672,25.65522 -44.17495,46.682 -6.30463,15.58003 -8.80222,32.31718 -10.26255,49.03675 -8.25334,-1.51925 -16.68447,-2.10155 -25.0625,-1.5 -0.96308,-38.69787 -0.46696,-79.40715 10.96875,-115.90625 -18.68113,6.21776 -35.16621,18.73551 -45.62803,35.38723 -13.85254,20.87979 -21.2614,45.75395 -23.05947,70.61277 0.58534,4.32454 -0.0613,11.84009 -6.34375,9.875 -5.33118,0.0176 -10.62908,0.67883 -15.9375,1.09375 1.14784,-39.38148 -3.34144,-81.6282 -27.0625,-114.21875 -3.06071,-3.63717 -5.63685,-7.68438 -8.625,-11.34375 -0.9375,2.4375 -1.875,4.875 -2.8125,7.3125 z m 7.75,13.84375 c 18.56527,29.29629 22.4825,64.82012 22.125,98.875 0.20409,5.17526 -0.51656,11.8292 0.125,16.0625 12.31856,-6.10275 26.73912,-2.4399 39.78125,-2.1875 2.31712,1.22325 3.1921,1.65243 1.90625,-1.40625 -4.16455,-13.95285 -1.84828,-28.613 1.80504,-42.40764 6.36687,-26.29064 20.62828,-51.08798 42.81996,-67.02986 -8.61709,37.23706 -5.71658,76.56161 -6.09375,113.96875 12.25344,-6.9099 27.27879,-3.44613 40.03125,-0.25 3.39222,3.5348 2.28935,-0.72948 2.1875,-3.8125 -0.48309,-21.37058 4.13133,-43.06963 13.6875,-62.15625 5.96266,-10.68727 14.24338,-19.80379 22.4375,-28.875 -7.87156,33.8381 -9.2029,69.33593 -2.71875,103.5 1.72485,-1.41118 4.60681,-0.45414 5.65625,-0.375 9.68369,-21.23682 16.35112,-45.38062 34.89016,-60.74185 1.87329,-0.37122 -1.44818,8.52495 -1.48391,11.8981 -3.53488,21.84581 -3.2972,44.17323 -4.90625,66.25 -1.31238,1.37679 2.84835,2.32354 3.46875,4.0625 7.92311,10.5658 3.12294,24.83149 2.0916,36.78011 -7.06405,45.03355 -21.76553,88.37934 -35.57677,131.69714 -12.07476,32.9493 -30.7197,63.08 -51.23358,91.429 -10.95257,13.5514 -23.24472,27.8513 -40.84375,32.5 -20.15601,6.2413 -43.57595,5.1744 -61.96875,-5.6562 -17.28966,-12.3414 -21.02393,-35.7089 -19.26226,-55.6864 0.0488,-15.8262 2.37211,-27.8008 7.91747,-42.8053 5.54535,-15.0045 2.47105,-31.3317 -1.78021,-47.6333 -4.25126,-16.3016 -12.17903,-26.26002 -21.82158,-42.20417 -9.64255,-15.94415 -17.6369,-36.03734 -20.77217,-55.57713 -1.74948,-18.62714 2.89171,-37.12262 5.78125,-55.25 3.29623,-2.83696 -1.59799,-5.19659 -2.3125,-8.1875 -7.60113,-17.01508 -8.40747,-36.7749 -2.74234,-54.55998 7.1302,-25.0723 15.76087,-49.63241 24.67984,-74.12752 0.70833,1.30208 1.41667,2.60417 2.125,3.90625 z"
+ id="path4149-7"
+ sodipodi:nodetypes="ccccccccccccccccccccccccccccccccccccccccccccccccccczzzcccccc"
+ clip-path="url(#clipPath4177-4)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4130-8);enable-background:accumulate"
+ d="m 735.05635,733.03834 2.75542,21.08881 44.41103,-15.38821 4.85063,-22.38975 -3.93617,-22.05222 -22.45163,-36.59301 -8.28004,30.30494 -17.34924,45.02944 z"
+ id="path3902-8"
+ sodipodi:nodetypes="cccccccc"
+ clip-path="url(#clipPath3631-6)"
+ transform="translate(276,136)" />
+ <path
+ inkscape:connector-curvature="0"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4141-2);enable-background:accumulate"
+ d="m 831.81321,730.29452 15.82237,14.90486 20.85473,2.89994 -1.59029,-39.92598 8.32561,-30.50842 -7.16499,-6.34106 -21.69669,20.9424 -14.55074,38.02826 z"
+ id="path4135-9"
+ sodipodi:nodetypes="cccccccc"
+ clip-path="url(#clipPath3631-6)"
+ transform="translate(276,136)" />
+ <g
+ id="g8367-1"
+ style="filter:url(#filter8379-0)"
+ clip-path="url(#clipPath8392-1)"
+ transform="translate(276,136)">
+ <path
+ inkscape:connector-curvature="0"
+ clip-path="none"
+ sodipodi:nodetypes="ccccccc"
+ id="path4145-5"
+ d="m 910.14441,746.31415 32.61295,5.17393 -0.36119,-23.87619 7.18853,-29.68221 -8.45112,-5.26365 -21.82194,26.51077 -9.16723,27.13735 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <rect
+ y="650.19098"
+ x="877.51953"
+ height="172.53406"
+ width="123.03658"
+ id="rect8365-4"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ <g
+ id="g8400-9"
+ style="filter:url(#filter8404-9)"
+ clip-path="url(#clipPath8417-4)"
+ transform="translate(276,136)">
+ <path
+ inkscape:connector-curvature="0"
+ clip-path="none"
+ sodipodi:nodetypes="ccccc"
+ id="path4147-2"
+ d="m 964.00012,754.69487 18.42881,7.46479 9.07107,-36.96447 -14.87031,4.83886 -12.62957,24.66082 z"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ <rect
+ y="677.06104"
+ x="924.89569"
+ height="125.1579"
+ width="142.12846"
+ id="rect8398-5"
+ style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
+ </g>
+ </g>
+ </g>
+ <path
+ style="fill:#f8d615;fill-opacity:1;fill-rule:evenodd;stroke:#f8d615;stroke-width:17.84425545;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Send)"
+ d="M 544.23337,203.09259 3443.746,100.92806"
+ id="path7167"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="display:inline;fill:#f8d615;fill-opacity:1;fill-rule:evenodd;stroke:#f8d615;stroke-width:18;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Send-4);enable-background:new"
+ d="M 527.91203,584.39421 3442.4188,1000.8355"
+ id="path7167-9"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#f83615;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="80.219048"
+ y="107.38741"
+ id="text8200"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan8202"
+ x="80.219048"
+ y="107.38741"
+ style="font-size:50px;fill:#f83615;fill-opacity:1">CROP_DEFAULT</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f80000;fill-opacity:0;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ x="3861.3669"
+ y="1281.7198"
+ id="text8200-4"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96105877,1.0405191)"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5"
+ x="3861.3669"
+ y="1281.7198"
+ style="font-size:56.64243317px;fill:#f80000;fill-opacity:0">COMPOSE_PADDED</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f8d615;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ x="3615.1545"
+ y="49.156631"
+ id="text8200-4-9"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96105877,1.0405191)"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5-3"
+ x="3615.1545"
+ y="49.156631"
+ style="font-size:50px;fill:#f8d615;fill-opacity:1">COMPOSE_ACTIVE</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f83615;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ x="2429.1526"
+ y="-3.1657715"
+ id="text8200-4-5"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96105878,1.0405191)"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5-7"
+ x="2429.1526"
+ y="-3.1657715"
+ style="font-size:49.99999958px;fill:#f83615;fill-opacity:1">COMPOSE_DEFAULT</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f815bb;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ x="3681.5449"
+ y="1289.9539"
+ id="text8200-4-9-3"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96105877,1.0405191)"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5-3-6"
+ x="3681.5449"
+ y="1289.9539"
+ style="font-size:50px;fill:#f815bb;fill-opacity:1">COMPOSE_PADDED</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:50px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new;"
+ x="2438.0618"
+ y="1368.4291"
+ id="text8200-4-9-3-5"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96105877,1.0405191)"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5-3-6-3"
+ x="2438.0618"
+ y="1368.4291"
+ style="font-size:50px;fill:#000000;fill-opacity:1;">COMPOSE_BONDS</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ x="8.0815096"
+ y="1438.8961"
+ id="text8200-4-9-3-5-6"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5-3-6-3-2"
+ x="8.0815096"
+ y="1438.8961"
+ style="font-size:50px;fill:#000000;fill-opacity:1">CROP_BONDS</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ x="1455.4426"
+ y="-26.808125"
+ id="text8200-4-9-3-5-6-9"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5-3-6-3-2-1"
+ x="1455.4426"
+ y="-26.808125"
+ style="font-size:50px;fill:#000000;fill-opacity:1">overscan area</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f8d615;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ x="179.63055"
+ y="385.38785"
+ id="text8200-4-9-2"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5-3-7"
+ x="179.63055"
+ y="385.38785"
+ style="font-size:50px;fill:#f8d615;fill-opacity:1">CROP_ACTIVE</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ x="636.67419"
+ y="-138.84549"
+ id="text8200-4-9-3-5-6-0"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5-3-6-3-2-9"
+ x="636.67419"
+ y="-138.84549"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:70px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';fill:#000000;fill-opacity:1">DATA SOURCE</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
+ x="3178.7151"
+ y="-129.06131"
+ id="text8200-4-9-3-5-6-0-3"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96105877,1.0405191)"><tspan
+ sodipodi:role="line"
+ id="tspan8202-5-3-6-3-2-9-6"
+ x="3178.7151"
+ y="-129.06131"
+ style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:69.99999978px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';fill:#000000;fill-opacity:1">DATA SINK</tspan></text>
+ <flowRoot
+ xml:space="preserve"
+ id="flowRoot7469"
+ style="fill:black;stroke:none;stroke-opacity:1;stroke-width:1px;stroke-linejoin:miter;stroke-linecap:butt;fill-opacity:1;font-family:sans-serif;font-style:normal;font-weight:normal;font-size:57.5px;line-height:125%;letter-spacing:0px;word-spacing:0px;"><flowRegion
+ id="flowRegion7471"><rect
+ id="rect7473"
+ width="4297.5474"
+ height="1851.537"
+ x="-52.635666"
+ y="70.623535"
+ style="font-size:57.5px;" /></flowRegion><flowPara
+ id="flowPara7475"></flowPara></flowRoot> </g>
+</svg>
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index e144370f62a0..d6152c907b8b 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -1514,23 +1514,28 @@ be named ``MEDIA_BUS_FMT_SRGGB10_2X8_PADHI_LE``.
.. _bayer-patterns:
-.. figure:: subdev-formats_files/bayer.*
- :alt: bayer.png
+.. figure:: bayer.*
+ :alt: bayer.pdf / bayer.svg
:align: center
**Figure 4.8 Bayer Patterns**
-
-
The following table lists existing packed Bayer formats. The data
organization is given as an example for the first pixel only.
+.. HACK: ideally, we would be using adjustbox here. However, Sphinx
+.. is a very bad behaviored guy: if the table has more than 30 cols,
+.. it switches to long table, and there's no way to override it.
+
+
.. raw:: latex
- \newline\newline\begin{adjustbox}{width=\columnwidth}
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
-.. tabularcolumns:: |p{7.6cm}|p{1.6cm}|p{0.7cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|
+.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.3cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
.. _v4l2-mbus-pixelcode-bayer:
@@ -2314,7 +2319,7 @@ organization is given as an example for the first pixel only.
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \endgroup
Packed YUV Formats
diff --git a/Documentation/media/uapi/v4l/subdev-formats_files/bayer.png b/Documentation/media/uapi/v4l/subdev-formats_files/bayer.png
deleted file mode 100644
index 9b15fb22e817..000000000000
--- a/Documentation/media/uapi/v4l/subdev-formats_files/bayer.png
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
new file mode 100644
index 000000000000..ba02e6f6214d
--- /dev/null
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
@@ -0,0 +1,313 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="43cm"
+ height="10cm"
+ viewBox="-194 128 844 196"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="subdev-image-processing-crop.svg">
+ <metadata
+ id="metadata100">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs98" />
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview96"
+ showgrid="false"
+ inkscape:zoom="0.3649199"
+ inkscape:cx="767.29168"
+ inkscape:cy="177.16535"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg2" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x="-8"
+ y="130"
+ width="469.774"
+ height="193"
+ id="rect4" />
+ <g
+ id="g6"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="4.5"
+ y="189"
+ width="159"
+ height="104"
+ id="rect8" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a52a2a"
+ x="4.5"
+ y="189"
+ width="159"
+ height="104"
+ id="rect10" />
+ </g>
+ <g
+ id="g12"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="63.5"
+ y="211"
+ width="94"
+ height="77"
+ id="rect14" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+ x="63.5"
+ y="211"
+ width="94"
+ height="77"
+ id="rect16" />
+ </g>
+ <text
+ style="fill:#0000ff;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="74.5"
+ y="227.75"
+ id="text18">
+ <tspan
+ x="74.5"
+ y="227.75"
+ id="tspan20"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink</tspan>
+ <tspan
+ x="74.5"
+ y="243.75"
+ id="tspan22"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+ <tspan
+ x="74.5"
+ y="259.75"
+ id="tspan24"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+ </text>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="29.5"
+ y="158"
+ id="text26">
+ <tspan
+ x="29.5"
+ y="158"
+ id="tspan28"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;" />
+ </text>
+ <text
+ style="fill:#a52a2a;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="8.53836"
+ y="157.914"
+ id="text30">
+ <tspan
+ x="8.53836"
+ y="157.914"
+ id="tspan32"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink media</tspan>
+ <tspan
+ x="8.53836"
+ y="173.914"
+ id="tspan34"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+ </text>
+ <text
+ style="fill:#8b6914;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="349.774"
+ y="155"
+ id="text36">
+ <tspan
+ x="349.774"
+ y="155"
+ id="tspan38"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source media</tspan>
+ <tspan
+ x="349.774"
+ y="171"
+ id="tspan40"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+ </text>
+ <g
+ id="g42"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="350.488"
+ y="190.834"
+ width="93.2863"
+ height="75.166"
+ id="rect44" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
+ x="350.488"
+ y="190.834"
+ width="93.2863"
+ height="75.166"
+ id="rect46" />
+ </g>
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="350.488"
+ y1="266"
+ x2="63.5"
+ y2="288"
+ id="line48" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="350.488"
+ y1="190.834"
+ x2="63.5"
+ y2="211"
+ id="line50" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="443.774"
+ y1="266"
+ x2="157.5"
+ y2="288"
+ id="line52" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="443.774"
+ y1="190.834"
+ x2="157.5"
+ y2="211"
+ id="line54" />
+ <g
+ id="g56"
+ style="">
+ <ellipse
+ style="fill:#ffffff"
+ cx="473.1"
+ cy="219.984"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse58" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="473.1"
+ cy="219.984"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse60" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="473.1"
+ cy="219.984"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse62" />
+ </g>
+ <g
+ id="g64"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x1="481.6"
+ y1="219.984"
+ x2="637.934"
+ y2="220.012"
+ id="line66" />
+ <polygon
+ style="fill:#000000"
+ points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "
+ id="polygon68" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "
+ id="polygon70" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="506.908"
+ y="209.8"
+ id="text72">
+ <tspan
+ x="506.908"
+ y="209.8"
+ id="tspan74"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 1 (source)</tspan>
+ </text>
+ <g
+ id="g76"
+ style="">
+ <ellipse
+ style="fill:#ffffff"
+ cx="-20.3982"
+ cy="241.512"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse78" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="-20.3982"
+ cy="241.512"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse80" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="-20.3982"
+ cy="241.512"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse82" />
+ </g>
+ <g
+ id="g84"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x1="-192.398"
+ y1="241.8"
+ x2="-38.6343"
+ y2="241.529"
+ id="line86" />
+ <polygon
+ style="fill:#000000"
+ points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "
+ id="polygon88" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "
+ id="polygon90" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="-147.858"
+ y="229.8"
+ id="text92">
+ <tspan
+ x="-147.858"
+ y="229.8"
+ id="tspan94"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 0 (sink)</tspan>
+ </text>
+</svg>
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-full.svg b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
new file mode 100644
index 000000000000..c82291a4493e
--- /dev/null
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
@@ -0,0 +1,769 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="59cm"
+ height="18cm"
+ viewBox="-186 71 1178 346"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="subdev-image-processing-full.svg">
+ <metadata
+ id="metadata260">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs258" />
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview256"
+ showgrid="false"
+ inkscape:zoom="0.26595857"
+ inkscape:cx="1052.7956"
+ inkscape:cy="318.89764"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg2" />
+ <g
+ id="g4"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="318.9"
+ y="129"
+ width="208.1"
+ height="249"
+ id="rect6" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#ff765a"
+ x="318.9"
+ y="129"
+ width="208.1"
+ height="249"
+ id="rect8" />
+ </g>
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x="-2"
+ y="73"
+ width="806"
+ height="343"
+ id="rect10" />
+ <g
+ id="g12"
+ style="">
+ <ellipse
+ style="fill:#ffffff"
+ cx="-12.5"
+ cy="166.712"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse14" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="-12.5"
+ cy="166.712"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse16" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="-12.5"
+ cy="166.712"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse18" />
+ </g>
+ <g
+ id="g20"
+ style="">
+ <ellipse
+ style="fill:#ffffff"
+ cx="815.232"
+ cy="205.184"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse22" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="815.232"
+ cy="205.184"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse24" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="815.232"
+ cy="205.184"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse26" />
+ </g>
+ <g
+ id="g28"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x1="-184.5"
+ y1="167"
+ x2="-30.7361"
+ y2="166.729"
+ id="line30" />
+ <polygon
+ style="fill:#000000"
+ points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "
+ id="polygon32" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "
+ id="polygon34" />
+ </g>
+ <g
+ id="g36"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x1="823.732"
+ y1="205.184"
+ x2="980.066"
+ y2="205.212"
+ id="line38" />
+ <polygon
+ style="fill:#000000"
+ points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "
+ id="polygon40" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "
+ id="polygon42" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="-139.96"
+ y="155"
+ id="text44">
+ <tspan
+ x="-139.96"
+ y="155"
+ id="tspan46"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 0 (sink)</tspan>
+ </text>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="849.04"
+ y="195"
+ id="text48">
+ <tspan
+ x="849.04"
+ y="195"
+ id="tspan50"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 2 (source)</tspan>
+ </text>
+ <g
+ id="g52"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="5.5"
+ y="120"
+ width="159"
+ height="104"
+ id="rect54" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a52a2a"
+ x="5.5"
+ y="120"
+ width="159"
+ height="104"
+ id="rect56" />
+ </g>
+ <g
+ id="g58"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="62.5"
+ y="136"
+ width="94"
+ height="77"
+ id="rect60" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+ x="62.5"
+ y="136"
+ width="94"
+ height="77"
+ id="rect62" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="30.5"
+ y="89"
+ id="text64">
+ <tspan
+ x="30.5"
+ y="89"
+ id="tspan66"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;" />
+ </text>
+ <text
+ style="fill:#a52a2a;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="9.53836"
+ y="88.9138"
+ id="text68">
+ <tspan
+ x="9.53836"
+ y="88.9138"
+ id="tspan70"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink media</tspan>
+ <tspan
+ x="9.53836"
+ y="104.914"
+ id="tspan72"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+ </text>
+ <g
+ id="g74"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="333.644"
+ y="185.65"
+ width="165.2"
+ height="172.478"
+ id="rect76" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#00ff00"
+ x="333.644"
+ y="185.65"
+ width="165.2"
+ height="172.478"
+ id="rect78" />
+ </g>
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="333.644"
+ y1="358.128"
+ x2="62.5"
+ y2="213"
+ id="line80" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="333.644"
+ y1="185.65"
+ x2="62.5"
+ y2="136"
+ id="line82" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="498.844"
+ y1="358.128"
+ x2="156.5"
+ y2="213"
+ id="line84" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="498.844"
+ y1="185.65"
+ x2="156.5"
+ y2="136"
+ id="line86" />
+ <text
+ style="fill:#00ff00;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="334.704"
+ y="149.442"
+ id="text88">
+ <tspan
+ x="334.704"
+ y="149.442"
+ id="tspan90"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink compose</tspan>
+ <tspan
+ x="334.704"
+ y="165.442"
+ id="tspan92"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection (scaling)</tspan>
+ </text>
+ <g
+ id="g94"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="409.322"
+ y="194.565"
+ width="100.186"
+ height="71.4523"
+ id="rect96" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ x="409.322"
+ y="194.565"
+ width="100.186"
+ height="71.4523"
+ id="rect98" />
+ </g>
+ <text
+ style="fill:#8b6914;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="689.5"
+ y="105.128"
+ id="text100">
+ <tspan
+ x="689.5"
+ y="105.128"
+ id="tspan102"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source media</tspan>
+ <tspan
+ x="689.5"
+ y="121.128"
+ id="tspan104"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+ </text>
+ <g
+ id="g106"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="688.488"
+ y="173.834"
+ width="100.186"
+ height="71.4523"
+ id="rect108" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
+ x="688.488"
+ y="173.834"
+ width="100.186"
+ height="71.4523"
+ id="rect110" />
+ </g>
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="688.488"
+ y1="245.286"
+ x2="409.322"
+ y2="266.018"
+ id="line112" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="688.488"
+ y1="173.834"
+ x2="409.322"
+ y2="194.565"
+ id="line114" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="788.674"
+ y1="245.286"
+ x2="509.508"
+ y2="266.018"
+ id="line116" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="788.674"
+ y1="173.834"
+ x2="509.508"
+ y2="194.565"
+ id="line118" />
+ <text
+ style="fill:#ff765a;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="325"
+ y="103"
+ id="text120">
+ <tspan
+ x="325"
+ y="103"
+ id="tspan122"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink compose</tspan>
+ <tspan
+ x="325"
+ y="119"
+ id="tspan124"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bounds selection</tspan>
+ </text>
+ <g
+ id="g126"
+ style="">
+ <ellipse
+ style="fill:#ffffff"
+ cx="-12.0982"
+ cy="341.512"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse128" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="-12.0982"
+ cy="341.512"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse130" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="-12.0982"
+ cy="341.512"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse132" />
+ </g>
+ <g
+ id="g134"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x1="-184.098"
+ y1="341.8"
+ x2="-30.3343"
+ y2="341.529"
+ id="line136" />
+ <polygon
+ style="fill:#000000"
+ points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "
+ id="polygon138" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "
+ id="polygon140" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="-139"
+ y="329"
+ id="text142">
+ <tspan
+ x="-139"
+ y="329"
+ id="tspan144"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 1 (sink)</tspan>
+ </text>
+ <g
+ id="g146"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="7.80824"
+ y="292.8"
+ width="112.092"
+ height="82.2"
+ id="rect148" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a52a2a"
+ x="7.80824"
+ y="292.8"
+ width="112.092"
+ height="82.2"
+ id="rect150" />
+ </g>
+ <g
+ id="g152"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="52.9"
+ y="314.8"
+ width="58.1"
+ height="50.2"
+ id="rect154" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+ x="52.9"
+ y="314.8"
+ width="58.1"
+ height="50.2"
+ id="rect156" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="31.9"
+ y="259.8"
+ id="text158">
+ <tspan
+ x="31.9"
+ y="259.8"
+ id="tspan160"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;" />
+ </text>
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="358.9"
+ y1="251.9"
+ x2="52.9"
+ y2="314.8"
+ id="line162" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="358.9"
+ y1="316"
+ x2="52.9"
+ y2="365"
+ id="line164" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="434"
+ y1="316"
+ x2="111"
+ y2="365"
+ id="line166" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="434"
+ y1="251.9"
+ x2="111"
+ y2="314.8"
+ id="line168" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#00ff00"
+ x="358.9"
+ y="251.9"
+ width="75.1"
+ height="64.1"
+ id="rect170" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ x="443.262"
+ y="284.466"
+ width="64.738"
+ height="48.534"
+ id="rect172" />
+ <g
+ id="g174"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="693.428"
+ y="324.734"
+ width="63.572"
+ height="49.266"
+ id="rect176" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
+ x="693.428"
+ y="324.734"
+ width="63.572"
+ height="49.266"
+ id="rect178" />
+ </g>
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="693.428"
+ y1="374"
+ x2="443.262"
+ y2="333"
+ id="line180" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="693.428"
+ y1="324.734"
+ x2="443.262"
+ y2="284.466"
+ id="line182" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="757"
+ y1="374"
+ x2="508"
+ y2="333"
+ id="line184" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="757"
+ y1="324.734"
+ x2="508"
+ y2="284.466"
+ id="line186" />
+ <g
+ id="g188"
+ style="">
+ <ellipse
+ style="fill:#ffffff"
+ cx="815.44"
+ cy="343.984"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse190" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="815.44"
+ cy="343.984"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse192" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="815.44"
+ cy="343.984"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse194" />
+ </g>
+ <g
+ id="g196"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x1="823.94"
+ y1="343.984"
+ x2="980.274"
+ y2="344.012"
+ id="line198" />
+ <polygon
+ style="fill:#000000"
+ points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "
+ id="polygon200" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "
+ id="polygon202" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="849.248"
+ y="333.8"
+ id="text204">
+ <tspan
+ x="849.248"
+ y="333.8"
+ id="tspan206"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 3 (source)</tspan>
+ </text>
+ <text
+ style="fill:#0000ff;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="197"
+ y="91"
+ id="text208">
+ <tspan
+ x="197"
+ y="91"
+ id="tspan210"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink</tspan>
+ <tspan
+ x="197"
+ y="107"
+ id="tspan212"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+ <tspan
+ x="197"
+ y="123"
+ id="tspan214"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+ </text>
+ <text
+ style="fill:#a020f0;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="553"
+ y="95"
+ id="text216">
+ <tspan
+ x="553"
+ y="95"
+ id="tspan218"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source</tspan>
+ <tspan
+ x="553"
+ y="111"
+ id="tspan220"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+ <tspan
+ x="553"
+ y="127"
+ id="tspan222"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+ </text>
+ <g
+ id="g224"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+ x1="211"
+ y1="132"
+ x2="166.21"
+ y2="135.287"
+ id="line226" />
+ <polygon
+ style="fill:#0000ff"
+ points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "
+ id="polygon228" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+ points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "
+ id="polygon230" />
+ </g>
+ <g
+ id="g232"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+ x1="209"
+ y1="131"
+ x2="115.581"
+ y2="306.209"
+ id="line234" />
+ <polygon
+ style="fill:#0000ff"
+ points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "
+ id="polygon236" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+ points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "
+ id="polygon238" />
+ </g>
+ <g
+ id="g240"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ x1="550.492"
+ y1="133.214"
+ x2="514.916"
+ y2="186.469"
+ id="line242" />
+ <polygon
+ style="fill:#a020f0"
+ points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "
+ id="polygon244" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "
+ id="polygon246" />
+ </g>
+ <g
+ id="g248"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ x1="550.072"
+ y1="133.787"
+ x2="510.618"
+ y2="275.089"
+ id="line250" />
+ <polygon
+ style="fill:#a020f0"
+ points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "
+ id="polygon252" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "
+ id="polygon254" />
+ </g>
+</svg>
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
new file mode 100644
index 000000000000..e7b3786f8a9b
--- /dev/null
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
@@ -0,0 +1,560 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="59cm"
+ height="17cm"
+ viewBox="-194 128 1179 330"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="subdev-image-processing-scaling-multi-source.svg">
+ <metadata
+ id="metadata186">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs184" />
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview182"
+ showgrid="false"
+ inkscape:zoom="0.26595857"
+ inkscape:cx="1052.7956"
+ inkscape:cy="301.1811"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg2" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x="-8"
+ y="130"
+ width="806"
+ height="327"
+ id="rect4" />
+ <g
+ id="g6"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="4.5"
+ y="189"
+ width="159"
+ height="104"
+ id="rect8" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a52a2a"
+ x="4.5"
+ y="189"
+ width="159"
+ height="104"
+ id="rect10" />
+ </g>
+ <g
+ id="g12"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="49.5"
+ y="204"
+ width="94"
+ height="77"
+ id="rect14" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+ x="49.5"
+ y="204"
+ width="94"
+ height="77"
+ id="rect16" />
+ </g>
+ <text
+ style="fill:#0000ff;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="60"
+ y="224"
+ id="text18">
+ <tspan
+ x="60"
+ y="224"
+ id="tspan20"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink</tspan>
+ <tspan
+ x="60"
+ y="240"
+ id="tspan22"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+ <tspan
+ x="60"
+ y="256"
+ id="tspan24"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+ </text>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="29.5"
+ y="158"
+ id="text26">
+ <tspan
+ x="29.5"
+ y="158"
+ id="tspan28"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;" />
+ </text>
+ <text
+ style="fill:#a52a2a;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="8.53836"
+ y="157.914"
+ id="text30">
+ <tspan
+ x="8.53836"
+ y="157.914"
+ id="tspan32"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink media</tspan>
+ <tspan
+ x="8.53836"
+ y="173.914"
+ id="tspan34"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+ </text>
+ <g
+ id="g36"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="333.644"
+ y="185.65"
+ width="165.2"
+ height="172.478"
+ id="rect38" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#00ff00"
+ x="333.644"
+ y="185.65"
+ width="165.2"
+ height="172.478"
+ id="rect40" />
+ </g>
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="333.644"
+ y1="358.128"
+ x2="49.5"
+ y2="281"
+ id="line42" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="333.644"
+ y1="185.65"
+ x2="49.5"
+ y2="204"
+ id="line44" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="498.844"
+ y1="358.128"
+ x2="143.5"
+ y2="281"
+ id="line46" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="498.844"
+ y1="185.65"
+ x2="143.5"
+ y2="204"
+ id="line48" />
+ <text
+ style="fill:#00ff00;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="334.704"
+ y="149.442"
+ id="text50">
+ <tspan
+ x="334.704"
+ y="149.442"
+ id="tspan52"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink compose</tspan>
+ <tspan
+ x="334.704"
+ y="165.442"
+ id="tspan54"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection (scaling)</tspan>
+ </text>
+ <g
+ id="g56"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="382.322"
+ y="199.565"
+ width="100.186"
+ height="71.4523"
+ id="rect58" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ x="382.322"
+ y="199.565"
+ width="100.186"
+ height="71.4523"
+ id="rect60" />
+ </g>
+ <text
+ style="fill:#a020f0;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="543.322"
+ y="149.442"
+ id="text62">
+ <tspan
+ x="543.322"
+ y="149.442"
+ id="tspan64"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source</tspan>
+ <tspan
+ x="543.322"
+ y="165.442"
+ id="tspan66"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+ <tspan
+ x="543.322"
+ y="181.442"
+ id="tspan68"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+ </text>
+ <text
+ style="fill:#8b6914;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="691.5"
+ y="157.128"
+ id="text70">
+ <tspan
+ x="691.5"
+ y="157.128"
+ id="tspan72"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source media</tspan>
+ <tspan
+ x="691.5"
+ y="173.128"
+ id="tspan74"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+ </text>
+ <g
+ id="g76"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="690.488"
+ y="225.834"
+ width="100.186"
+ height="71.4523"
+ id="rect78" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
+ x="690.488"
+ y="225.834"
+ width="100.186"
+ height="71.4523"
+ id="rect80" />
+ </g>
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="690.488"
+ y1="297.286"
+ x2="382.322"
+ y2="271.018"
+ id="line82" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="690.488"
+ y1="225.834"
+ x2="382.322"
+ y2="199.565"
+ id="line84" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="790.674"
+ y1="297.286"
+ x2="482.508"
+ y2="271.018"
+ id="line86" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="790.674"
+ y1="225.834"
+ x2="482.508"
+ y2="199.565"
+ id="line88" />
+ <g
+ id="g90"
+ style="">
+ <ellipse
+ style="fill:#ffffff"
+ cx="808.1"
+ cy="249.984"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse92" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="808.1"
+ cy="249.984"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse94" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="808.1"
+ cy="249.984"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse96" />
+ </g>
+ <g
+ id="g98"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x1="816.6"
+ y1="249.984"
+ x2="972.934"
+ y2="250.012"
+ id="line100" />
+ <polygon
+ style="fill:#000000"
+ points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "
+ id="polygon102" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "
+ id="polygon104" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="841.908"
+ y="239.8"
+ id="text106">
+ <tspan
+ x="841.908"
+ y="239.8"
+ id="tspan108"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 1 (source)</tspan>
+ </text>
+ <g
+ id="g110"
+ style="">
+ <ellipse
+ style="fill:#ffffff"
+ cx="-20.3982"
+ cy="241.512"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse112" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="-20.3982"
+ cy="241.512"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse114" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="-20.3982"
+ cy="241.512"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse116" />
+ </g>
+ <g
+ id="g118"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x1="-192.398"
+ y1="241.8"
+ x2="-38.6343"
+ y2="241.529"
+ id="line120" />
+ <polygon
+ style="fill:#000000"
+ points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "
+ id="polygon122" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "
+ id="polygon124" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="-147.858"
+ y="229.8"
+ id="text126">
+ <tspan
+ x="-147.858"
+ y="229.8"
+ id="tspan128"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 0 (sink)</tspan>
+ </text>
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ x="389.822"
+ y="276.666"
+ width="100.186"
+ height="71.4523"
+ id="rect130" />
+ <g
+ id="g132"
+ style="">
+ <rect
+ style="fill:#ffffff"
+ x="689.988"
+ y="345.934"
+ width="100.186"
+ height="71.4523"
+ id="rect134" />
+ <rect
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
+ x="689.988"
+ y="345.934"
+ width="100.186"
+ height="71.4523"
+ id="rect136" />
+ </g>
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="689.988"
+ y1="417.386"
+ x2="389.822"
+ y2="348.118"
+ id="line138" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="689.988"
+ y1="345.934"
+ x2="389.822"
+ y2="276.666"
+ id="line140" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="790.174"
+ y1="417.386"
+ x2="490.008"
+ y2="348.118"
+ id="line142" />
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
+ x1="790.174"
+ y1="345.934"
+ x2="490.008"
+ y2="276.666"
+ id="line144" />
+ <g
+ id="g146"
+ style="">
+ <ellipse
+ style="fill:#ffffff"
+ cx="805.6"
+ cy="384.084"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse148" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="805.6"
+ cy="384.084"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse150" />
+ <ellipse
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ cx="805.6"
+ cy="384.084"
+ rx="8.5"
+ ry="8.5"
+ id="ellipse152" />
+ </g>
+ <g
+ id="g154"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ x1="814.1"
+ y1="384.084"
+ x2="970.434"
+ y2="384.112"
+ id="line156" />
+ <polygon
+ style="fill:#000000"
+ points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "
+ id="polygon158" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+ points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "
+ id="polygon160" />
+ </g>
+ <text
+ style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
+ x="839.408"
+ y="373.9"
+ id="text162">
+ <tspan
+ x="839.408"
+ y="373.9"
+ id="tspan164"
+ style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 2 (source)</tspan>
+ </text>
+ <g
+ id="g166"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ x1="546"
+ y1="191"
+ x2="492.157"
+ y2="198.263"
+ id="line168" />
+ <polygon
+ style="fill:#a020f0"
+ points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "
+ id="polygon170" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "
+ id="polygon172" />
+ </g>
+ <g
+ id="g174"
+ style="">
+ <line
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ x1="546.908"
+ y1="190.725"
+ x2="495.383"
+ y2="268.548"
+ id="line176" />
+ <polygon
+ style="fill:#a020f0"
+ points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "
+ id="polygon178" />
+ <polygon
+ style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+ points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "
+ id="polygon180" />
+ </g>
+</svg>
diff --git a/Documentation/media/uapi/v4l/vbi_525.svg b/Documentation/media/uapi/v4l/vbi_525.svg
new file mode 100644
index 000000000000..b05f7777ccf8
--- /dev/null
+++ b/Documentation/media/uapi/v4l/vbi_525.svg
@@ -0,0 +1,811 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ xml:space="preserve"
+ width="208.73068mm"
+ height="51.395489mm"
+ viewBox="0 0 739.59691 182.11"
+ sodipodi:docname="vbi_525.svg"><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview4"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:zoom="1.5350601"
+ inkscape:cx="264.23387"
+ inkscape:cy="44.916942"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g10"
+ units="mm" /><metadata
+ id="metadata8"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ id="defs6"><clipPath
+ id="clipPath20"
+ clipPathUnits="userSpaceOnUse"><path
+ inkscape:connector-curvature="0"
+ id="path22"
+ d="m 0,0 5950,0 0,3922 L 0,3922 0,0 Z m 0,3922 5950,0 0,1 -5950,0 0,-1 z m 0,1 1359,0 0,1 -1359,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1363,0 0,1 -1363,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1367,0 0,1 -1367,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1371,0 0,1 -1371,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1375,0 0,1 -1375,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1379,0 0,1 -1379,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1383,0 0,1 -1383,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1387,0 0,1 -1387,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1391,0 0,1 -1391,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1390,0 0,1 -1390,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1386,0 0,1 -1386,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1382,0 0,1 -1382,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1378,0 0,1 -1378,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1374,0 0,1 -1374,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1370,0 0,1 -1370,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1366,0 0,1 -1366,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1362,0 0,1 -1362,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1358,0 0,1 -1358,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,4478 -5950,0 0,-4478 z" /></clipPath><clipPath
+ id="clipPath98"
+ clipPathUnits="userSpaceOnUse"><path
+ inkscape:connector-curvature="0"
+ id="path100"
+ d="m 0,0 5950,0 0,4546 L 0,4546 0,0 Z m 0,4546 5950,0 0,1 -5950,0 0,-1 z m 0,1 1360,0 0,1 -1360,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1364,0 0,1 -1364,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1368,0 0,1 -1368,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1372,0 0,1 -1372,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1376,0 0,1 -1376,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1380,0 0,1 -1380,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1384,0 0,1 -1384,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1388,0 0,1 -1388,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1391,0 0,1 -1391,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1389,0 0,1 -1389,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1385,0 0,1 -1385,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1381,0 0,1 -1381,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1377,0 0,1 -1377,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1373,0 0,1 -1373,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1369,0 0,1 -1369,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1365,0 0,1 -1365,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1361,0 0,1 -1361,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1357,0 0,1 -1357,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,3854 -5950,0 0,-3854 z" /></clipPath></defs><g
+ transform="matrix(0.125,0,0,-0.125,-87.571875,638.05691)"
+ inkscape:label="vbi_525"
+ inkscape:groupmode="layer"
+ id="g10"><g
+ transform="matrix(1.3000026,0,0,1.3000026,-210.17435,-1094.2823)"
+ id="g12"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path14"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,3974.45 0,-85.05" /></g><g
+ transform="matrix(1.3000026,0,0,1.3000026,-210.17435,-1094.2823)"
+ id="g16"
+ style=""><g
+ clip-path="url(#clipPath20)"
+ id="g18"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path24"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,3931.93 113.4,0" /></g></g><g
+ transform="matrix(1.3000026,0,0,1.3000026,-210.17435,-1094.2823)"
+ id="g26"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path28"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 1352.31,3922.48 37.8,9.45 -37.8,9.45 0,-18.9" /><path
+ inkscape:connector-curvature="0"
+ id="path30"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1352.31,3922.48 37.8,9.45 -37.8,9.45 0,-18.9 z" /><path
+ inkscape:connector-curvature="0"
+ id="path32"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4683.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path34"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4400.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path36"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4116.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path38"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3833.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path40"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3549.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path42"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3266.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path44"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2982.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path46"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2699.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path48"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2415.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path50"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2132.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path52"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1848.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path54"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1565.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path56"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path58"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 998.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path60"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 714.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path62"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4683.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path64"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4400.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path66"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4116.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path68"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3833.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path70"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3549.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path72"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3266.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path74"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2982.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path76"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2699.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path78"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2415.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path80"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2132.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path82"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1848.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path84"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1565.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path86"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path88"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 998.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path90"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 714.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path92"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,4598.15 0,-85.05" /></g><g
+ transform="matrix(1.3000026,0,0,1.3000026,-210.17435,-1094.2823)"
+ id="g94"
+ style=""><g
+ clip-path="url(#clipPath98)"
+ id="g96"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path102"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,4555.63 113.4,0" /></g></g><path
+ d="m 1547.8322,4815.7637 49.1401,12.2851 -49.1401,12.285 0,-24.5701"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path106"
+ inkscape:connector-curvature="0" /><path
+ d="m 1547.8322,4815.7637 49.1401,12.2851 -49.1401,12.285 0,-24.5701 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path108"
+ inkscape:connector-curvature="0" /><path
+ d="m 1456.104,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path110"
+ inkscape:connector-curvature="0" /><path
+ d="m 1824.6548,5030.7452 0,73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path112"
+ inkscape:connector-curvature="0" /><path
+ d="m 2193.2055,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path114"
+ inkscape:connector-curvature="0" /><path
+ d="m 2561.7563,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path116"
+ inkscape:connector-curvature="0" /><path
+ d="m 2930.307,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path118"
+ inkscape:connector-curvature="0" /><path
+ d="m 3298.8578,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path120"
+ inkscape:connector-curvature="0" /><path
+ d="m 3667.4085,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path122"
+ inkscape:connector-curvature="0" /><path
+ d="m 4035.9593,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path124"
+ inkscape:connector-curvature="0" /><path
+ d="m 4404.51,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path126"
+ inkscape:connector-curvature="0" /><path
+ d="m 4773.0608,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path128"
+ inkscape:connector-curvature="0" /><path
+ d="m 5141.6115,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path130"
+ inkscape:connector-curvature="0" /><path
+ d="m 5510.1623,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path132"
+ inkscape:connector-curvature="0" /><path
+ d="m 5878.713,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path134"
+ inkscape:connector-curvature="0" /><path
+ d="m 1456.104,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path136"
+ inkscape:connector-curvature="0" /><path
+ d="m 1824.6548,4920.1799 0,73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path138"
+ inkscape:connector-curvature="0" /><path
+ d="m 2193.2055,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path140"
+ inkscape:connector-curvature="0" /><path
+ d="m 2561.7563,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path142"
+ inkscape:connector-curvature="0" /><path
+ d="m 2930.307,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path144"
+ inkscape:connector-curvature="0" /><path
+ d="m 3298.8578,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path146"
+ inkscape:connector-curvature="0" /><path
+ d="m 3667.4085,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path148"
+ inkscape:connector-curvature="0" /><path
+ d="m 4035.9593,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path150"
+ inkscape:connector-curvature="0" /><path
+ d="m 4404.51,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path152"
+ inkscape:connector-curvature="0" /><path
+ d="m 4773.0608,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path154"
+ inkscape:connector-curvature="0" /><path
+ d="m 5141.6115,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path156"
+ inkscape:connector-curvature="0" /><path
+ d="m 5510.1623,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path158"
+ inkscape:connector-curvature="0" /><path
+ d="m 5878.713,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path160"
+ inkscape:connector-curvature="0" /><path
+ d="m 719.00254,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path162"
+ inkscape:connector-curvature="0" /><path
+ d="m 719.00254,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path164"
+ inkscape:connector-curvature="0" /><path
+ d="m 1087.5533,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path166"
+ inkscape:connector-curvature="0" /><path
+ d="m 1087.5533,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path168"
+ inkscape:connector-curvature="0" /><path
+ d="m 700.575,4735.9046 18.42754,0 0,-110.5653 36.85507,0 0,110.5653 18.42754,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path170"
+ inkscape:connector-curvature="0" /><path
+ d="m 774.28515,4680.6285 18.42754,0 0,110.5652 -18.42754,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path172"
+ inkscape:connector-curvature="0" /><path
+ d="m 792.71269,4735.9046 18.42753,0 0,92.1442 18.42754,-18.4341 18.42754,36.8551 18.42754,-36.8551 18.42753,36.8551 18.42754,-55.2761 18.42754,55.2761 18.42754,-18.421 18.42753,55.2761 18.42754,-55.2761 18.42754,18.421 18.4275,36.8551 18.4276,-92.1312 18.4275,55.2761 18.4275,-55.2761 0,-55.2891"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path174"
+ inkscape:connector-curvature="0" /><path
+ d="m 1069.1257,4735.9046 18.4276,0 0,-110.5653 36.8551,0 0,110.5653 18.421,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path176"
+ inkscape:connector-curvature="0" /><path
+ d="m 1142.8294,4680.6285 18.4275,0 0,110.5652 -18.4275,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path178"
+ inkscape:connector-curvature="0" /><path
+ d="m 1456.104,4735.9046 0,-110.5653 18.4211,0 0,110.5653 165.8543,0 0,-110.5653 18.421,0 0,110.5653 165.8544,0 0,-110.5653 18.421,0 0,110.5653 165.8544,0 0,-110.5653 18.421,0 0,110.5653 165.8543,0 0,-110.5653 18.4211,0 0,110.5653 165.8543,0 0,-110.5653 18.421,0 0,110.5653 165.8544,0 0,-110.5653 147.4203,0 0,110.5653 36.8551,0 0,-110.5653 147.4203,0 0,110.5653 36.855,0 0,-110.5653 147.4203,0 0,110.5653 36.8551,0 0,-110.5653 147.4203,0 0,110.5653 36.8551,0 0,-110.5653 147.4203,0 0,110.5653 36.855,0 0,-110.5653 147.4203,0 0,110.5653 36.8551,0 0,-110.5653 18.4211,0 0,110.5653 165.8543,0 0,-110.5653 18.421,0 0,110.5653 165.8544,0 0,-110.5653 18.421,0 0,110.5653 165.8543,0 0,-110.5653 18.4211,0 0,110.5653 165.8543,0 0,-110.5653 18.4211,0 0,110.5653 165.8543,0 0,-110.5653 18.421,0 0,110.5653 165.8544,0 0,-110.5653 36.855,0 0,110.5653 18.4211,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path180"
+ inkscape:connector-curvature="0" /><path
+ d="m 1161.2634,4735.9046 294.8406,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path182"
+ inkscape:connector-curvature="0" /><path
+ d="m 6320.9739,4735.9046 18.421,0 0,92.1442 18.4341,-18.4341 18.421,73.7102 18.4341,-55.2761 18.421,36.855 18.434,-18.434 18.4211,36.8551 18.434,-36.8551 18.421,36.8551 18.4341,-36.8551 18.421,18.434 18.4341,-36.855 18.421,36.855 18.434,-36.855 18.4211,-18.4341 0,-73.7101 18.434,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path184"
+ inkscape:connector-curvature="0" /><path
+ d="m 4828.3369,4680.6285 18.4275,0 0,110.5652 -18.4275,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path186"
+ inkscape:connector-curvature="0" /><path
+ d="m 4846.7709,4735.9046 294.8406,0 0,-110.5653 36.8551,0 0,110.5653 18.421,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path188"
+ inkscape:connector-curvature="0" /><path
+ d="m 5196.8876,4680.6285 18.4276,0 0,110.5652 -18.4276,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path190"
+ inkscape:connector-curvature="0" /><path
+ d="m 5565.4384,4680.6285 18.4275,0 0,110.5652 -18.4275,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path192"
+ inkscape:connector-curvature="0" /><path
+ d="m 5933.9891,4680.6285 18.4276,0 0,110.5652 -18.4276,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path194"
+ inkscape:connector-curvature="0" /><path
+ d="m 5952.4232,4735.9046 294.8406,0 0,-110.5653 36.855,0 0,110.5653 18.4211,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path196"
+ inkscape:connector-curvature="0" /><path
+ d="m 6302.5399,4680.6285 18.4275,0 0,110.5652 -18.4275,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path198"
+ inkscape:connector-curvature="0" /><path
+ d="m 5786.5688,4735.9046 92.1442,0 0,-110.5653 36.8551,0 0,110.5653 18.421,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path200"
+ inkscape:connector-curvature="0" /><path
+ d="m 5805.0029,4791.1937 -36.8551,-110.5652"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path202"
+ inkscape:connector-curvature="0" /><path
+ d="m 5583.8724,4735.9046 165.8414,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path204"
+ inkscape:connector-curvature="0" /><path
+ d="m 5768.1478,4791.1937 -36.8551,-110.5652"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path206"
+ inkscape:connector-curvature="0" /><path
+ d="m 5215.3217,4735.9046 294.8406,0 0,-110.5653 36.855,0 0,110.5653 18.4211,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path208"
+ inkscape:connector-curvature="0" /><path
+ d="m 6247.2638,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path210"
+ inkscape:connector-curvature="0" /><path
+ d="m 6247.2638,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path212"
+ inkscape:connector-curvature="0" /><path
+ d="m 6615.8145,5104.4553 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path214"
+ inkscape:connector-curvature="0" /><path
+ d="m 6615.8145,4993.8901 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path216"
+ inkscape:connector-curvature="0" /><path
+ d="m 700.575,3925.0929 18.42754,0 0,-110.5652 36.85507,0 0,110.5652 18.42754,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path218"
+ inkscape:connector-curvature="0" /><path
+ d="m 774.28515,3869.8168 18.42754,0 0,110.5652 -18.42754,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path220"
+ inkscape:connector-curvature="0" /><path
+ d="m 1142.8294,3869.8168 18.4275,0 0,110.5652 -18.4275,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path222"
+ inkscape:connector-curvature="0" /><path
+ d="m 792.71269,3925.0929 18.42753,0 0,110.5652 18.42754,-36.855 18.42754,18.434 18.42754,-36.8551 18.42753,36.8551 0,-92.1442 202.70293,0 0,-110.5652 36.8551,0 0,110.5652 18.421,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path224"
+ inkscape:connector-curvature="0" /><path
+ d="m 1161.2634,3925.0929 110.5653,0 0,-110.5652 18.421,0 0,110.5652 165.8543,0 0,-110.5652 18.4211,0 0,110.5652 165.8543,0 0,-110.5652 18.421,0 0,110.5652 165.8544,0 0,-110.5652 18.421,0 0,110.5652 165.8544,0 0,-110.5652 18.421,0 0,110.5652 165.8543,0 0,-110.5652 18.4211,0 0,110.5652 165.8543,0 0,-110.5652 147.4203,0 0,110.5652 36.8551,0 0,-110.5652 147.4203,0 0,110.5652 36.8551,0 0,-110.5652 147.4203,0 0,110.5652 36.855,0 0,-110.5652 147.4203,0 0,110.5652 36.8551,0 0,-110.5652 147.4203,0 0,110.5652 36.8551,0 0,-110.5652 147.4203,0 0,110.5652 36.855,0 0,-110.5652 18.4211,0 0,110.5652 165.8543,0 0,-110.5652 18.4211,0 0,110.5652 165.8543,0 0,-110.5652 18.421,0 0,110.5652 165.8544,0 0,-110.5652 18.421,0 0,110.5652 165.8543,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path226"
+ inkscape:connector-curvature="0" /><path
+ d="m 4220.2346,3925.0929 0,-110.5652 18.4211,0 0,110.5652 165.8543,0 0,-110.5652 18.4211,0 0,110.5652 55.2891,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path228"
+ inkscape:connector-curvature="0" /><path
+ d="m 4478.2202,3925.0929 294.8406,0 0,-110.5652 36.855,0 0,110.5652 18.4211,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path230"
+ inkscape:connector-curvature="0" /><path
+ d="m 5952.4232,3925.0929 128.9862,0 0,73.7102 18.4341,36.855 18.421,-18.421 18.434,36.8551 18.4211,-36.8551 18.434,18.421 18.421,-36.855 18.4341,18.434 18.421,-36.8551 0,-55.2891 18.4341,0 0,-110.5652 36.855,0 0,110.5652 18.4211,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path232"
+ inkscape:connector-curvature="0" /><path
+ d="m 6302.5399,3869.8168 18.4275,0 0,110.5652 -18.4275,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path234"
+ inkscape:connector-curvature="0" /><path
+ d="m 6320.9739,3925.0929 18.421,0 0,73.7102 18.4341,36.855 18.421,-36.855 18.4341,36.855 18.421,-18.421 18.434,0 18.4211,-36.8551 18.434,55.2761 18.421,-18.421 0,36.8551 18.4341,-18.4341 18.421,18.4341 18.4341,-36.8551 18.421,18.421 18.434,-55.2761 18.4211,18.4211 0,-73.7102 18.434,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path236"
+ inkscape:connector-curvature="0" /><path
+ d="m 5933.9891,3869.8168 18.4276,0 0,110.5652 -18.4276,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path238"
+ inkscape:connector-curvature="0" /><path
+ d="m 5786.5688,3925.0929 92.1442,0 0,-110.5652 36.8551,0 0,110.5652 18.421,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path240"
+ inkscape:connector-curvature="0" /><path
+ d="m 5805.0029,3980.382 -36.8551,-110.5652"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path242"
+ inkscape:connector-curvature="0" /><path
+ d="m 5768.1478,3980.382 -36.8551,-110.5652"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path244"
+ inkscape:connector-curvature="0" /><path
+ d="m 4828.3369,3869.8168 18.4275,0 0,110.5652 -18.4275,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path246"
+ inkscape:connector-curvature="0" /><path
+ d="m 5583.8724,3925.0929 165.8414,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path248"
+ inkscape:connector-curvature="0" /><path
+ d="m 4846.7709,3925.0929 294.8406,0 0,-110.5652 36.8551,0 0,110.5652 18.421,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path250"
+ inkscape:connector-curvature="0" /><path
+ d="m 5196.8876,3869.8168 18.4276,0 0,110.5652 -18.4276,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path252"
+ inkscape:connector-curvature="0" /><path
+ d="m 5215.3217,3925.0929 294.8406,0 0,-110.5652 36.855,0 0,110.5652 18.4211,0"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path254"
+ inkscape:connector-curvature="0" /><path
+ d="m 5565.4384,3869.8168 18.4275,0 0,110.5652 -18.4275,0 0,-110.5652 z"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path256"
+ inkscape:connector-curvature="0" /><path
+ d="m 6247.2638,4293.6437 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path258"
+ inkscape:connector-curvature="0" /><path
+ d="m 6247.2638,4183.0784 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path260"
+ inkscape:connector-curvature="0" /><path
+ d="m 6615.8145,4293.6437 0,-73.7102"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path262"
+ inkscape:connector-curvature="0" /><path
+ d="m 6615.8145,4183.0784 0,-73.7101"
+ style="fill:none;stroke:#000000;stroke-width:3.07125616;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path264"
+ inkscape:connector-curvature="0" /><text
+ y="-4035.6582"
+ x="1621.9453"
+ id="text268"
+ style="font-variant:normal;font-weight:normal;font-size:61.42512512px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan270"
+ sodipodi:role="line"
+ y="-4035.6582"
+ x="1621.9453 1642.3999 1676.5522">(1)</tspan></text>
+<text
+ y="-4127.7959"
+ x="4199.7334"
+ id="text272"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan274"
+ sodipodi:role="line"
+ y="-4127.7959"
+ x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732">874569101211322631262</tspan><tspan
+ id="tspan276"
+ sodipodi:role="line"
+ y="-4238.3613"
+ x="4158.748 4199.7314 4240.7144 3790.1975 3831.1807 3872.1633 2684.5457 2725.5283 2766.5112 3071.5237 3112.5063 3153.4895 3421.647 3462.6299 3503.6125 4527.2988 4568.2822 4609.2646 4895.8496 4936.833 4977.8154 5632.9517 5673.9341 5714.917 5264.4009 5305.3833 5346.3662 2315.9946 2356.9775 2397.9604 1947.444 1988.4269 2029.4097 1210.3424 1251.3252 1292.3081 1578.8931 1619.876 1660.8589 841.79163 882.77454 923.75732">271270267268269272273275274266265263264262</tspan><tspan
+ id="tspan278"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="2725.5347 4568.2881 1988.4331 2356.9839 1619.8822 3094.0852 3462.636 4916.3506 4957.334 5284.9019 5325.8843 5653.4526 5694.4351 3812.7656 4181.3164">492315610111278</tspan><tspan
+ id="tspan280"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="2725.5474 4568.3013 1988.446 2356.9966 1619.8953 3094.0981 3462.6489 4916.3638 4957.3472 5284.9146 5325.8975 5653.4653 5694.4482 3812.7788 4181.3296">492315610111278</tspan><tspan
+ id="tspan282"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="841.81781 882.8006 923.78326">524</tspan><tspan
+ id="tspan284"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="841.81781 882.8006 923.78326">261</tspan><tspan
+ id="tspan286"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="1210.3684 1251.3512 1292.3342">525</tspan><tspan
+ id="tspan288"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="1210.3684 1251.3512 1292.3342">262</tspan><tspan
+ id="tspan290"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="6022.0161 6062.999">22</tspan><tspan
+ id="tspan292"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="6022.0161 6062.999">22</tspan><tspan
+ id="tspan294"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="6390.5669 6431.5498">23</tspan><tspan
+ id="tspan296"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="6390.5669 6431.5498">23</tspan><tspan
+ id="tspan298"
+ sodipodi:role="line"
+ y="-4238.3623"
+ x="6001.5244 6042.5068 6083.4902">285</tspan><tspan
+ id="tspan300"
+ sodipodi:role="line"
+ y="-4127.7964"
+ x="6022.0156 6062.9985">22</tspan><tspan
+ id="tspan302"
+ sodipodi:role="line"
+ y="-4238.3623"
+ x="6370.0747 6411.0571 6452.04">286</tspan><tspan
+ id="tspan304"
+ sodipodi:role="line"
+ y="-4127.7964"
+ x="6390.5664 6431.5493">23</tspan><tspan
+ id="tspan306"
+ sodipodi:role="line"
+ y="-4459.4922"
+ x="3540.4146 3581.3972 3618.2522 3638.7437 3659.2354 3679.7266 3696.0901 3737.073 3753.4365">1st field</tspan><tspan
+ id="tspan308"
+ sodipodi:role="line"
+ y="-3648.6809"
+ x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463">2nd field</tspan></text>
+<text
+ y="-4127.7959"
+ x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732"
+ id="text3632"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3634"
+ sodipodi:role="line"
+ y="-4127.7959"
+ x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732">874569101211322631262</tspan></text>
+<text
+ y="-4238.3613"
+ x="4158.748 4199.7314 4240.7144 3790.1975 3831.1807 3872.1633 2684.5457 2725.5283 2766.5112 3071.5237 3112.5063 3153.4895 3421.647 3462.6299 3503.6125 4527.2988 4568.2822 4609.2646 4895.8496 4936.833 4977.8154 5632.9517 5673.9341 5714.917 5264.4009 5305.3833 5346.3662 2315.9946 2356.9775 2397.9604 1947.444 1988.4269 2029.4097 1210.3424 1251.3252 1292.3081 1578.8931 1619.876 1660.8589 841.79163 882.77454 923.75732"
+ id="text3636"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3638"
+ sodipodi:role="line"
+ y="-4238.3613"
+ x="4158.748 4199.7314 4240.7144 3790.1975 3831.1807 3872.1633 2684.5457 2725.5283 2766.5112 3071.5237 3112.5063 3153.4895 3421.647 3462.6299 3503.6125 4527.2988 4568.2822 4609.2646 4895.8496 4936.833 4977.8154 5632.9517 5673.9341 5714.917 5264.4009 5305.3833 5346.3662 2315.9946 2356.9775 2397.9604 1947.444 1988.4269 2029.4097 1210.3424 1251.3252 1292.3081 1578.8931 1619.876 1660.8589 841.79163 882.77454 923.75732">271270267268269272273275274266265263264262</tspan></text>
+<text
+ y="-5049.1729"
+ x="2725.5347 4568.2881 1988.4331 2356.9839 1619.8822 3094.0852 3462.636 4916.3506 4957.334 5284.9019 5325.8843 5653.4526 5694.4351 3812.7656 4181.3164"
+ id="text3640"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3642"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="2725.5347 4568.2881 1988.4331 2356.9839 1619.8822 3094.0852 3462.636 4916.3506 4957.334 5284.9019 5325.8843 5653.4526 5694.4351 3812.7656 4181.3164">492315610111278</tspan></text>
+<text
+ y="-4938.6074"
+ x="2725.5474 4568.3013 1988.446 2356.9966 1619.8953 3094.0981 3462.6489 4916.3638 4957.3472 5284.9146 5325.8975 5653.4653 5694.4482 3812.7788 4181.3296"
+ id="text3644"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3646"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="2725.5474 4568.3013 1988.446 2356.9966 1619.8953 3094.0981 3462.6489 4916.3638 4957.3472 5284.9146 5325.8975 5653.4653 5694.4482 3812.7788 4181.3296">492315610111278</tspan></text>
+<text
+ y="-5049.1729"
+ x="841.81781 882.8006 923.78326"
+ id="text3648"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3650"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="841.81781 882.8006 923.78326">524</tspan></text>
+<text
+ y="-4938.6074"
+ x="841.81781 882.8006 923.78326"
+ id="text3652"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3654"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="841.81781 882.8006 923.78326">261</tspan></text>
+<text
+ y="-5049.1729"
+ x="1210.3684 1251.3512 1292.3342"
+ id="text3656"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3658"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="1210.3684 1251.3512 1292.3342">525</tspan></text>
+<text
+ y="-4938.6074"
+ x="1210.3684 1251.3512 1292.3342"
+ id="text3660"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3662"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="1210.3684 1251.3512 1292.3342">262</tspan></text>
+<text
+ y="-5049.1729"
+ x="6022.0161 6062.999"
+ id="text3664"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3666"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="6022.0161 6062.999">22</tspan></text>
+<text
+ y="-4938.6074"
+ x="6022.0161 6062.999"
+ id="text3668"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3670"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="6022.0161 6062.999">22</tspan></text>
+<text
+ y="-5049.1729"
+ x="6390.5669 6431.5498"
+ id="text3672"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3674"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="6390.5669 6431.5498">23</tspan></text>
+<text
+ y="-4938.6074"
+ x="6390.5669 6431.5498"
+ id="text3676"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3678"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="6390.5669 6431.5498">23</tspan></text>
+<text
+ y="-4238.3623"
+ x="6001.5244 6042.5068 6083.4902"
+ id="text3680"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3682"
+ sodipodi:role="line"
+ y="-4238.3623"
+ x="6001.5244 6042.5068 6083.4902">285</tspan></text>
+<text
+ y="-4127.7964"
+ x="6022.0156 6062.9985"
+ id="text3684"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3686"
+ sodipodi:role="line"
+ y="-4127.7964"
+ x="6022.0156 6062.9985">22</tspan></text>
+<text
+ y="-4238.3623"
+ x="6370.0747 6411.0571 6452.04"
+ id="text3688"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3690"
+ sodipodi:role="line"
+ y="-4238.3623"
+ x="6370.0747 6411.0571 6452.04">286</tspan></text>
+<text
+ y="-4127.7964"
+ x="6390.5664 6431.5493"
+ id="text3692"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3694"
+ sodipodi:role="line"
+ y="-4127.7964"
+ x="6390.5664 6431.5493">23</tspan></text>
+<text
+ y="-4459.4922"
+ x="3540.4146 3581.3972 3618.2522 3638.7437 3659.2354 3679.7266 3696.0901 3737.073 3753.4365"
+ id="text3696"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3698"
+ sodipodi:role="line"
+ y="-4459.4922"
+ x="3540.4146 3581.3972 3618.2522 3638.7437 3659.2354 3679.7266 3696.0901 3737.073 3753.4365">1st field</tspan></text>
+<text
+ y="-3648.6809"
+ x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463"
+ id="text3700"
+ style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"><tspan
+ id="tspan3702"
+ sodipodi:role="line"
+ y="-3648.6809"
+ x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463">2nd field</tspan></text>
+</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/vbi_625.svg b/Documentation/media/uapi/v4l/vbi_625.svg
new file mode 100644
index 000000000000..c117ddb7bf7e
--- /dev/null
+++ b/Documentation/media/uapi/v4l/vbi_625.svg
@@ -0,0 +1,858 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ xml:space="preserve"
+ width="209.46608mm"
+ height="51.576824mm"
+ viewBox="0 0 742.20265 182.75252"
+ sodipodi:docname="vbi_625.svg"><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview4"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:zoom="1.5350601"
+ inkscape:cx="288.91482"
+ inkscape:cy="170.67667"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g10"
+ units="mm" /><metadata
+ id="metadata8"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ id="defs6"><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath20"><path
+ d="m 0,0 5950,0 0,4546 L 0,4546 0,0 Z m 0,4546 5950,0 0,1 -5950,0 0,-1 z m 0,1 2211,0 0,1 -2211,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2215,0 0,1 -2215,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2219,0 0,1 -2219,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2223,0 0,1 -2223,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2227,0 0,1 -2227,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2231,0 0,1 -2231,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2235,0 0,1 -2235,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2239,0 0,1 -2239,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2241,0 0,1 -2241,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2240,0 0,1 -2240,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2236,0 0,1 -2236,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2232,0 0,1 -2232,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2228,0 0,1 -2228,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2224,0 0,1 -2224,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2220,0 0,1 -2220,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2216,0 0,1 -2216,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2212,0 0,1 -2212,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2208,0 0,1 -2208,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,3854 -5950,0 0,-3854 z"
+ id="path22"
+ inkscape:connector-curvature="0" /></clipPath><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath98"><path
+ d="m 0,0 5950,0 0,3922 L 0,3922 0,0 Z m 0,3922 5950,0 0,1 -5950,0 0,-1 z m 0,1 2209,0 0,1 -2209,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2213,0 0,1 -2213,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2217,0 0,1 -2217,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2221,0 0,1 -2221,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2225,0 0,1 -2225,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2229,0 0,1 -2229,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2233,0 0,1 -2233,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2237,0 0,1 -2237,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2241,0 0,1 -2241,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2241,0 0,1 -2241,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2237,0 0,1 -2237,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2233,0 0,1 -2233,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2229,0 0,1 -2229,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2225,0 0,1 -2225,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2221,0 0,1 -2221,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2217,0 0,1 -2217,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2213,0 0,1 -2213,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2209,0 0,1 -2209,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,4478 -5950,0 0,-4478 z"
+ id="path100"
+ inkscape:connector-curvature="0" /></clipPath></defs><g
+ id="g10"
+ inkscape:groupmode="layer"
+ inkscape:label="vbi_625"
+ transform="matrix(0.125,0,0,-0.125,-87.571875,638.69874)"><g
+ id="g12"
+ transform="matrix(1.3045828,0,0,1.3045828,-213.38312,-1110.9872)"
+ style=""><path
+ d="m 2132.25,4598.15 0,-85.05"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path14"
+ inkscape:connector-curvature="0" /></g><g
+ id="g16"
+ transform="matrix(1.3045828,0,0,1.3045828,-213.38312,-1110.9872)"
+ style=""><g
+ id="g18"
+ clip-path="url(#clipPath20)"
+ style=""><path
+ d="m 2132.25,4555.63 113.4,0"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path24"
+ inkscape:connector-curvature="0" /></g></g><path
+ inkscape:connector-curvature="0"
+ id="path28"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 2660.3649,4819.881 49.3132,12.3283 -49.3132,12.3283 0,-24.6566" /><path
+ inkscape:connector-curvature="0"
+ id="path30"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2660.3649,4819.881 49.3132,12.3283 -49.3132,12.3283 0,-24.6566 z" /><path
+ inkscape:connector-curvature="0"
+ id="path32"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1828.6151,4924.6651 0,73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path34"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2198.4643,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path36"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2568.3136,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path38"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2938.1628,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path40"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3308.012,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path42"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3677.8612,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path44"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4047.7105,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path46"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4417.5597,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path48"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4787.4089,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path50"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5157.2581,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path52"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5527.1073,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path54"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5896.9566,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path56"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1458.7659,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path58"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1828.6151,5035.6199 0,73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path60"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2198.4643,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path62"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2568.3136,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path64"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2938.1628,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path66"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3308.012,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path68"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3677.8612,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path70"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4047.7105,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path72"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4417.5597,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path74"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4787.4089,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path76"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5157.2581,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path78"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5527.1073,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path80"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5896.9566,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path82"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1458.7659,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path84"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 719.06744,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path86"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 719.06744,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path88"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1088.9167,4998.635 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path90"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1088.9167,5109.5897 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path92"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2568.3136,4074.0119 0,-110.9548" /><g
+ id="g94"
+ transform="matrix(1.3045828,0,0,1.3045828,-213.38312,-1110.9872)"
+ style=""><g
+ id="g96"
+ clip-path="url(#clipPath98)"
+ style=""><path
+ d="m 2132.25,3931.93 113.4,0"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path102"
+ inkscape:connector-curvature="0" /></g></g><path
+ inkscape:connector-curvature="0"
+ id="path106"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 2660.365,4006.2129 49.3132,12.3283 -49.3132,12.3283 0,-24.6566" /><path
+ inkscape:connector-curvature="0"
+ id="path108"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2660.365,4006.2129 49.3132,12.3283 -49.3132,12.3283 0,-24.6566 z" /><path
+ inkscape:connector-curvature="0"
+ id="path110"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6266.806,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path112"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5896.9567,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path114"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5527.1075,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path116"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5157.2583,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path118"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4787.409,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path120"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4417.5598,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path122"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4047.7106,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path124"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3677.8613,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path126"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3308.0121,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path128"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2938.1629,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path130"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2568.3136,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path132"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2198.4644,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path134"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1828.6152,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path136"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1458.7659,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path138"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6266.806,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path140"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5896.9567,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path142"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5527.1075,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path144"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5157.2583,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path146"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4787.409,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path148"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4417.5598,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path150"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4047.7106,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path152"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3677.8613,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path154"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3308.0121,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path156"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2938.1629,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path158"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2568.3136,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path160"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2198.4644,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path162"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1828.6152,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path164"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1458.7659,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path166"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 719.06746,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path168"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 719.06746,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path170"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1088.9167,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path172"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1088.9167,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path174"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1144.3876,4684.2698 18.4924,0 0,110.9548 -18.4924,0 0,-110.9548 z" /><path
+ inkscape:connector-curvature="0"
+ id="path176"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1070.4242,4739.7407 18.4925,0 0,-110.9548 36.9849,0 0,110.9548 18.486,0" /><path
+ inkscape:connector-curvature="0"
+ id="path178"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 700.575,4739.7407 18.49246,0 0,-110.9548 36.98492,0 0,110.9548 18.49247,0" /><path
+ inkscape:connector-curvature="0"
+ id="path180"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 809.886,4739.7407 3.28754,0" /><path
+ inkscape:connector-curvature="0"
+ id="path182"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 809.886,4739.7407 3.28754,0" /><path
+ inkscape:connector-curvature="0"
+ id="path184"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 774.54485,4684.2698 18.49246,0 0,110.9548 -18.49246,0 0,-110.9548 z" /><path
+ inkscape:connector-curvature="0"
+ id="path186"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 793.03731,4739.7407 18.49246,0 18.49246,73.9698 18.49246,-36.9849 18.49246,92.4688 18.49247,-55.4839 18.49246,18.499 18.49246,-36.9849 18.49246,55.4708 18.49246,-73.9698 18.49246,55.4839 18.49247,-73.9699 18.49241,110.9548 18.4925,-92.4688 18.4925,55.4839 18.4924,-55.4839 0,-36.9849" /><path
+ inkscape:connector-curvature="0"
+ id="path188"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1162.8865,4739.7407 18.486,0 0,92.4688 18.499,-18.499 18.4859,36.9849 18.499,-36.9849 18.4859,36.9849 18.499,-55.4708 18.4859,55.4708 18.499,-18.4859 18.486,55.4708 18.499,-55.4708 18.4859,18.4859 18.499,36.9849 18.4859,-73.9698 18.499,36.9849 18.4859,-55.4708 0,-55.4839 18.499,0 0,-110.9548 36.985,0 0,110.9548 18.4859,0 0,55.4839 18.499,36.9849 18.4859,-36.9849 18.499,55.4708 18.4859,-18.4859 18.499,55.4708 18.486,-18.4859 0,-129.4537 18.4989,0 0,-110.9548 18.486,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 147.9397,0 0,110.9548 36.985,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.486,0 0,110.9548 166.4386,0 0,-110.9548 18.486,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 36.9849,0 0,110.9548 332.8643,0 0,-110.9548 36.9849,0 0,110.9548 18.486,0" /><path
+ inkscape:connector-curvature="0"
+ id="path190"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 700.575,3926.0723 18.49246,0 0,-110.9547 36.98492,0 0,110.9547 18.49247,0" /><path
+ inkscape:connector-curvature="0"
+ id="path192"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 774.54485,3870.6015 18.49246,0 0,110.9547 -18.49246,0 0,-110.9547 z" /><path
+ inkscape:connector-curvature="0"
+ id="path194"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 793.03731,3926.0723 18.49246,0 0,92.4689 18.49246,-36.985 18.49246,73.9699 18.49246,-73.9699 18.49247,73.9699 18.49246,-36.9849 18.49246,18.4859 18.49246,-73.9698 18.49246,55.4839 18.49246,-18.499 18.49247,36.9849 18.49241,-73.9698 18.4925,92.4688 18.4925,-92.4688 18.4924,55.4839 0,-92.4689" /><path
+ inkscape:connector-curvature="0"
+ id="path196"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1070.4242,3926.0723 18.4925,0 0,-110.9547 36.9849,0 0,110.9547 18.486,0" /><path
+ inkscape:connector-curvature="0"
+ id="path198"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1144.3876,3870.6015 18.4924,0 0,110.9547 -18.4924,0 0,-110.9547 z" /><path
+ inkscape:connector-curvature="0"
+ id="path200"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1162.8865,3926.0723 18.486,0 0,55.4839 18.499,92.4558 18.4859,-55.4708 36.9849,36.9849 18.499,-55.4839 18.4859,18.499 18.499,-36.985 18.486,55.4709 18.499,-18.4859 18.4859,-36.985 18.499,18.486 18.4859,36.9849 18.499,-36.9849 18.4859,36.9849 0,-110.9548 18.499,0 0,-110.9547 18.486,0 0,110.9547 166.4386,0 0,-110.9547 18.486,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 147.9397,0 0,110.9547 36.985,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.486,0 0,110.9547 166.4386,0 0,-110.9547 18.486,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 351.3633,0 0,-110.9547 36.9849,0 0,110.9547 18.486,0" /><path
+ inkscape:connector-curvature="0"
+ id="path202"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5453.1376,4795.2246 -36.9849,-110.9548" /><path
+ inkscape:connector-curvature="0"
+ id="path204"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5434.6387,4739.7407 92.4688,0 0,-110.9548 36.9849,0 0,110.9548 18.486,0" /><path
+ inkscape:connector-curvature="0"
+ id="path206"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5582.5784,4684.2698 18.4924,0 0,110.9548 -18.4924,0 0,-110.9548 z" /><path
+ inkscape:connector-curvature="0"
+ id="path208"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5601.0773,4739.7407 295.8794,0 0,-110.9548 36.9849,0 0,110.9548 18.486,0" /><path
+ inkscape:connector-curvature="0"
+ id="path210"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5952.4276,4684.2698 18.4924,0 0,110.9548 -18.4924,0 0,-110.9548 z" /><path
+ inkscape:connector-curvature="0"
+ id="path212"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5970.9266,4739.7407 129.4407,0 0,147.9396 18.499,-36.9849 18.4859,36.9849 18.499,-36.9849 18.4859,18.499 18.499,-36.9849 18.4859,36.9849 18.499,-36.9849 18.486,-18.499 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path214"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4842.8799,4684.2698 18.4924,0 0,110.9548 -18.4924,0 0,-110.9548 z" /><path
+ inkscape:connector-curvature="0"
+ id="path216"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4861.3789,4739.7407 295.8794,0 0,-110.9548 36.9849,0 0,110.9548 18.4859,0" /><path
+ inkscape:connector-curvature="0"
+ id="path218"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5212.7291,4684.2698 18.4925,0 0,110.9548 -18.4925,0 0,-110.9548 z" /><path
+ inkscape:connector-curvature="0"
+ id="path220"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5231.2281,4739.7407 166.4256,0" /><path
+ inkscape:connector-curvature="0"
+ id="path222"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5416.1527,4795.2246 -36.9849,-110.9548" /><path
+ inkscape:connector-curvature="0"
+ id="path224"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4473.0307,3870.6015 18.4924,0 0,110.9547 -18.4924,0 0,-110.9547 z" /><path
+ inkscape:connector-curvature="0"
+ id="path226"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4491.5296,3926.0723 295.8794,0 0,-110.9547 36.9849,0 0,110.9547 18.486,0" /><path
+ inkscape:connector-curvature="0"
+ id="path228"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4842.8799,3870.6015 18.4924,0 0,110.9547 -18.4924,0 0,-110.9547 z" /><path
+ inkscape:connector-curvature="0"
+ id="path230"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4861.3789,3926.0723 295.8794,0 0,-110.9547 36.9849,0 0,110.9547 18.4859,0" /><path
+ inkscape:connector-curvature="0"
+ id="path232"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5212.7291,3870.6015 18.4925,0 0,110.9547 -18.4925,0 0,-110.9547 z" /><path
+ inkscape:connector-curvature="0"
+ id="path234"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5416.1527,3981.5562 -36.9849,-110.9547" /><path
+ inkscape:connector-curvature="0"
+ id="path236"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5231.2281,3926.0723 166.4256,0" /><path
+ inkscape:connector-curvature="0"
+ id="path238"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5453.1376,3981.5562 -36.9849,-110.9547" /><path
+ inkscape:connector-curvature="0"
+ id="path240"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5434.6387,3926.0723 92.4688,0 0,-110.9547 36.9849,0 0,110.9547 18.486,0" /><path
+ inkscape:connector-curvature="0"
+ id="path242"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5582.5784,3870.6015 18.4924,0 0,110.9547 -18.4924,0 0,-110.9547 z" /><path
+ inkscape:connector-curvature="0"
+ id="path244"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5601.0773,3926.0723 295.8794,0 0,-110.9547 36.9849,0 0,110.9547 18.486,0" /><path
+ inkscape:connector-curvature="0"
+ id="path246"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5952.4276,3870.6015 18.4924,0 0,110.9547 -18.4924,0 0,-110.9547 z" /><path
+ inkscape:connector-curvature="0"
+ id="path248"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 5970.9266,3926.0723 18.4859,0 0,73.9699 18.499,36.9849 18.4859,-36.9849 18.499,36.9849 18.486,-18.4859 18.4989,0 18.486,-36.985 18.499,55.4709 18.4859,-18.4859 0,36.9849 18.499,-18.499 18.4859,18.499 18.499,-36.9849 18.4859,18.4859 18.499,-55.4709 18.486,18.486 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path250"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6248.307,4739.7407 18.499,0 0,-110.9548 36.9849,0 0,110.9548 18.4859,0" /><path
+ inkscape:connector-curvature="0"
+ id="path252"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6248.307,3926.0723 18.499,0 0,-110.9547 36.9849,0 0,110.9547 18.4859,0" /><path
+ inkscape:connector-curvature="0"
+ id="path254"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6322.2768,3870.6015 18.4925,0 0,110.9547 -18.4925,0 0,-110.9547 z" /><path
+ inkscape:connector-curvature="0"
+ id="path256"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6322.2768,4684.2698 18.4925,0 0,110.9548 -18.4925,0 0,-110.9548 z" /><path
+ inkscape:connector-curvature="0"
+ id="path258"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6340.7758,4739.7407 18.4859,0 0,55.4839 18.499,55.4708 18.486,-92.4558 18.4989,55.4709 18.486,0 18.499,55.4839 18.4859,-73.9698 18.499,36.9849 18.4859,-36.9849 36.985,73.9698 18.4989,-36.9849 18.486,36.9849 18.499,-55.4839 18.4859,36.9849 0,-110.9547 18.499,0" /><path
+ inkscape:connector-curvature="0"
+ id="path260"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6340.7758,3926.0723 18.4859,0 0,129.4538 18.499,-18.499 18.486,18.499 18.4989,-36.9849 18.486,18.4859 18.499,-36.9849 18.4859,18.499 18.499,-18.499 18.4859,55.4839 18.499,-18.499 18.486,36.9849 18.4989,-73.9698 18.486,55.4839 18.499,-55.4839 18.4859,36.9849 0,-110.9548 18.499,0" /><path
+ inkscape:connector-curvature="0"
+ id="path262"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6636.6552,4184.9668 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path264"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6636.6552,4295.9216 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path266"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6266.806,4998.6351 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path268"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6266.806,5109.5899 0,-73.9699" /><path
+ inkscape:connector-curvature="0"
+ id="path270"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6636.6552,4998.6351 0,-73.9698" /><path
+ inkscape:connector-curvature="0"
+ id="path272"
+ style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 6636.6552,5109.5899 0,-73.9699" /><text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text276"
+ x="3550.4165"
+ y="-4462.3472"><tspan
+ x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895"
+ y="-4462.3472"
+ sodipodi:role="line"
+ id="tspan278">1st field</tspan><tspan
+ x="2732.6792 3823.7344 4193.5835 4581.9253 4951.7744 5321.6235 3472.3777 3102.5283 2321.7029 2362.8303 2403.9575 1951.8538 1992.981 2034.1083 1582.0045 1623.132 1664.259 5670.9062 5712.0337"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan280">1456783231231131022</tspan><tspan
+ x="2732.6726 3823.7278 4193.5771 4581.9189 4951.7681 5321.6172 3472.3711 3102.522 2321.6965 2362.8237 2403.9509 1951.8473 1992.9745 2034.1018 1581.998 1623.1254 1664.2524 5670.8999 5712.0269"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan282">1456783262562462322</tspan><tspan
+ x="842.29962 883.42694 924.55408"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan284">308</tspan><tspan
+ x="842.29962 883.42694 924.55408"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan286">621</tspan><tspan
+ x="1212.1489 1253.276 1294.4033"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan288">309</tspan><tspan
+ x="1212.1489 1253.276 1294.4033"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan290">622</tspan><tspan
+ x="3538.0635 3579.1907 3620.3179 3661.4451 3682.0088 3702.5723 3718.9937 3760.1208 3776.5422"
+ y="-3648.6792"
+ sodipodi:role="line"
+ id="tspan292">2nd field</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:61.64153671px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text294"
+ x="2734.751"
+ y="-4037.021"><tspan
+ x="2734.751 2755.2776 2789.5503"
+ y="-4037.021"
+ sodipodi:role="line"
+ id="tspan296">(1)</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text298"
+ x="4951.772"
+ y="-4129.4834"><tspan
+ x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan300">765432313312311</tspan><tspan
+ x="6020.1929 6061.3198 6102.4473 5650.3433 5691.4707 5732.5981 5280.4941 5321.6216 5362.7485 4910.645 4951.7725 4992.8994 4540.7959 4581.9229 4623.0503 4170.9468 4212.0737 4253.2012 3801.0974 3842.2246 3883.3518 3449.7405 3490.8677 3531.9951 3061.3989 3102.5261 3143.6533 2691.5496 2732.677 2773.8042 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan302">336335321320319318317316315314313312311</tspan><tspan
+ x="2732.6765 5321.6211 5670.9062 5712.0337 6040.7554 6081.8828 842.30634 883.43323 924.56055"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan304">182223309</tspan><tspan
+ x="842.30634 883.43323 924.56055"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan306">309</tspan><tspan
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan308">310</tspan><tspan
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan310">310</tspan><tspan
+ x="6410.605 6451.7319"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan312">24</tspan><tspan
+ x="6390.041 6431.1685 6472.2954"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan314">337</tspan><tspan
+ x="6040.7559 6081.8833"
+ y="-4943.1504"
+ sodipodi:role="line"
+ id="tspan316">23</tspan><tspan
+ x="6040.7559 6081.8833"
+ y="-5054.106"
+ sodipodi:role="line"
+ id="tspan318">23</tspan><tspan
+ x="6410.605 6451.7324"
+ y="-4943.1504"
+ sodipodi:role="line"
+ id="tspan320">24</tspan><tspan
+ x="6410.605 6451.7324"
+ y="-5054.106"
+ sodipodi:role="line"
+ id="tspan322">24</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text3671"
+ x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895"
+ y="-4462.3472"><tspan
+ x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895"
+ y="-4462.3472"
+ sodipodi:role="line"
+ id="tspan3673">1st field</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text3675"
+ x="2732.6792 3823.7344 4193.5835 4581.9253 4951.7744 5321.6235 3472.3777 3102.5283 2321.7029 2362.8303 2403.9575 1951.8538 1992.981 2034.1083 1582.0045 1623.132 1664.259 5670.9062 5712.0337"
+ y="-4943.1509"><tspan
+ x="2732.6792 3823.7344 4193.5835 4581.9253 4951.7744 5321.6235 3472.3777 3102.5283 2321.7029 2362.8303 2403.9575 1951.8538 1992.981 2034.1083 1582.0045 1623.132 1664.259 5670.9062 5712.0337"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan3677">1456783231231131022</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text3679"
+ x="2732.6726 3823.7278 4193.5771 4581.9189 4951.7681 5321.6172 3472.3711 3102.522 2321.6965 2362.8237 2403.9509 1951.8473 1992.9745 2034.1018 1581.998 1623.1254 1664.2524 5670.8999 5712.0269"
+ y="-5054.1064"><tspan
+ x="2732.6726 3823.7278 4193.5771 4581.9189 4951.7681 5321.6172 3472.3711 3102.522 2321.6965 2362.8237 2403.9509 1951.8473 1992.9745 2034.1018 1581.998 1623.1254 1664.2524 5670.8999 5712.0269"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan3681">1456783262562462322</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text3683"
+ x="842.29962 883.42694 924.55408"
+ y="-4943.1509"><tspan
+ x="842.29962 883.42694 924.55408"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan3685">308</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text3687"
+ x="842.29962 883.42694 924.55408"
+ y="-5054.1064"><tspan
+ x="842.29962 883.42694 924.55408"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan3689">621</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text3691"
+ x="1212.1489 1253.276 1294.4033"
+ y="-4943.1509"><tspan
+ x="1212.1489 1253.276 1294.4033"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan3693">309</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text3695"
+ x="1212.1489 1253.276 1294.4033"
+ y="-5054.1064"><tspan
+ x="1212.1489 1253.276 1294.4033"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan3697">622</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text3699"
+ x="3538.0635 3579.1907 3620.3179 3661.4451 3682.0088 3702.5723 3718.9937 3760.1208 3776.5422"
+ y="-3648.6792"><tspan
+ x="3538.0635 3579.1907 3620.3179 3661.4451 3682.0088 3702.5723 3718.9937 3760.1208 3776.5422"
+ y="-3648.6792"
+ sodipodi:role="line"
+ id="tspan3701">2nd field</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4083"
+ x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4129.4834"><tspan
+ x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan4085">765432313312311</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4087"
+ x="6020.1929 6061.3198 6102.4473 5650.3433 5691.4707 5732.5981 5280.4941 5321.6216 5362.7485 4910.645 4951.7725 4992.8994 4540.7959 4581.9229 4623.0503 4170.9468 4212.0737 4253.2012 3801.0974 3842.2246 3883.3518 3449.7405 3490.8677 3531.9951 3061.3989 3102.5261 3143.6533 2691.5496 2732.677 2773.8042 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4240.4385"><tspan
+ x="6020.1929 6061.3198 6102.4473 5650.3433 5691.4707 5732.5981 5280.4941 5321.6216 5362.7485 4910.645 4951.7725 4992.8994 4540.7959 4581.9229 4623.0503 4170.9468 4212.0737 4253.2012 3801.0974 3842.2246 3883.3518 3449.7405 3490.8677 3531.9951 3061.3989 3102.5261 3143.6533 2691.5496 2732.677 2773.8042 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan4089">336335321320319318317316315314313312311</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4091"
+ x="2732.6765 5321.6211 5670.9062 5712.0337 6040.7554 6081.8828 842.30634 883.43323 924.56055"
+ y="-4129.4834"><tspan
+ x="2732.6765 5321.6211 5670.9062 5712.0337 6040.7554 6081.8828 842.30634 883.43323 924.56055"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan4093">182223309</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4095"
+ x="842.30634 883.43323 924.56055"
+ y="-4240.4385"><tspan
+ x="842.30634 883.43323 924.56055"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan4097">309</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4099"
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4129.4834"><tspan
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan4101">310</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4103"
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4240.4385"><tspan
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan4105">310</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4107"
+ x="6410.605 6451.7319"
+ y="-4129.4834"><tspan
+ x="6410.605 6451.7319"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan4109">24</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4111"
+ x="6390.041 6431.1685 6472.2954"
+ y="-4240.4385"><tspan
+ x="6390.041 6431.1685 6472.2954"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan4113">337</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4115"
+ x="6040.7559 6081.8833"
+ y="-4943.1504"><tspan
+ x="6040.7559 6081.8833"
+ y="-4943.1504"
+ sodipodi:role="line"
+ id="tspan4117">23</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4119"
+ x="6040.7559 6081.8833"
+ y="-5054.106"><tspan
+ x="6040.7559 6081.8833"
+ y="-5054.106"
+ sodipodi:role="line"
+ id="tspan4121">23</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4123"
+ x="6410.605 6451.7324"
+ y="-4943.1504"><tspan
+ x="6410.605 6451.7324"
+ y="-4943.1504"
+ sodipodi:role="line"
+ id="tspan4125">24</tspan></text>
+<text
+ transform="scale(1,-1)"
+ style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ id="text4127"
+ x="6410.605 6451.7324"
+ y="-5054.106"><tspan
+ x="6410.605 6451.7324"
+ y="-5054.106"
+ sodipodi:role="line"
+ id="tspan4129">24</tspan></text>
+</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/vbi_hsync.svg b/Documentation/media/uapi/v4l/vbi_hsync.svg
new file mode 100644
index 000000000000..4d5c0b4f146e
--- /dev/null
+++ b/Documentation/media/uapi/v4l/vbi_hsync.svg
@@ -0,0 +1,313 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ xml:space="preserve"
+ width="192.39857mm"
+ height="146.83536mm"
+ viewBox="0 0 681.72724 520.28277"
+ sodipodi:docname="vbi_hsync.svg"><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="997"
+ id="namedview4"
+ showgrid="false"
+ inkscape:zoom="1.5350601"
+ inkscape:cx="131.95463"
+ inkscape:cy="428.88132"
+ inkscape:window-x="1920"
+ inkscape:window-y="30"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g10"
+ units="mm"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0" /><metadata
+ id="metadata8"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ id="defs6"><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath30"><path
+ d="m 0,0 0,1163 1544,0 L 1544,0 0,0 Z m 187.184,836.05 0,-19.278 48.517,0 -38.556,9.639 38.556,9.639 -48.517,0 z m 689.189,-19.278 0,19.278 -48.516,0 38.556,-9.639 -38.556,-9.639 48.516,0 z"
+ id="path32"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath52"><path
+ d="m 0,0 0,1163 1544,0 L 1544,0 0,0 Z m 804.08,79.3887 0,19.2778 -48.516,0 38.556,-9.6389 -38.556,-9.6389 48.516,0 z m -703.647,19.2778 0,-19.2778 48.517,0 -38.556,9.6389 38.556,9.6389 -48.517,0 z"
+ id="path54"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath94"><path
+ d="m 0,0 0,1163 1544,0 L 1544,0 0,0 Z m 471.535,195.057 0,19.278 -48.516,0 38.555,-9.639 -38.555,-9.639 48.516,0 z m -284.351,19.278 0,-19.278 48.517,0 -38.556,9.639 38.556,9.639 -48.517,0 z"
+ id="path96"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath></defs><g
+ id="g10"
+ inkscape:groupmode="layer"
+ inkscape:label="vbi_hsync"
+ transform="matrix(1.25,0,0,-1.25,-0.3625824,520.79867)"><g
+ id="g14"
+ transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path16"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="M 32.9604,580.617 4.04346,493.866" /><path
+ inkscape:connector-curvature="0"
+ id="path18"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 192.004,855.328 0,-665.091" /><path
+ inkscape:connector-curvature="0"
+ id="path20"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 466.715,392.656 0,-202.419" /><path
+ inkscape:connector-curvature="0"
+ id="path22"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 799.261,508.324 0,-433.7549" /><path
+ inkscape:connector-curvature="0"
+ id="path24"
+ style="fill:none;stroke:#000000;stroke-width:4.81949997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 857.095,537.241 231.335,0" /></g><g
+ id="g26"
+ transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
+ style=""><g
+ clip-path="url(#clipPath30)"
+ id="g28"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path34"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 871.553,826.411 -679.549,0" /></g></g><g
+ id="g36"
+ transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path38"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 827.857,816.772 38.556,9.639 -38.556,9.639 0,-19.278" /><path
+ inkscape:connector-curvature="0"
+ id="path40"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 827.857,816.772 38.556,9.639 -38.556,9.639 0,-19.278 z" /><path
+ inkscape:connector-curvature="0"
+ id="path42"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 235.701,836.05 -38.556,-9.639 38.556,-9.639 0,19.278" /><path
+ inkscape:connector-curvature="0"
+ id="path44"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 235.701,836.05 -38.556,-9.639 38.556,-9.639 0,19.278 z" /><path
+ inkscape:connector-curvature="0"
+ id="path46"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1073.97,493.866 28.92,86.751" /></g><g
+ id="g48"
+ transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
+ style=""><g
+ clip-path="url(#clipPath52)"
+ id="g50"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path56"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 105.253,89.0276 694.008,0" /></g></g><path
+ d="m 52.91205,34.475403 -13.891817,-3.472918 13.891817,-3.472918 0,6.945836"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path60"
+ inkscape:connector-curvature="0" /><path
+ d="m 52.91205,34.475403 -13.891817,-3.472918 13.891817,-3.472918 0,6.945836 z"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path62"
+ inkscape:connector-curvature="0" /><path
+ d="m 271.4765,27.529567 13.89182,3.472918 -13.89182,3.472918 0,-6.945836"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path64"
+ inkscape:connector-curvature="0" /><path
+ d="m 271.4765,27.529567 13.89182,3.472918 -13.89182,3.472918 0,-6.945836 z"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path66"
+ inkscape:connector-curvature="0" /><path
+ d="m 5.9113292,192.49483 31.2565888,0 0,-10.41887 26.046978,0 10.418863,-93.769765 88.560511,0 10.41887,93.769765 10.41886,0"
+ style="fill:none;stroke:#000000;stroke-width:1.73647714;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path68"
+ inkscape:connector-curvature="0" /><path
+ d="m 162.19427,88.306195 260.47086,0"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path70"
+ inkscape:connector-curvature="0" /><path
+ d="m 37.167918,182.07596 0,-156.282907"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path72"
+ inkscape:connector-curvature="0" /><path
+ d="m 235.12632,182.07596 52.09431,0 0,10.41887 20.83773,0 10.41886,208.3769 83.35162,0"
+ style="fill:none;stroke:#000000;stroke-width:1.73647714;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path74"
+ inkscape:connector-curvature="0" /><path
+ d="m 391.4089,192.49483 31.25623,0"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path76"
+ inkscape:connector-curvature="0" /><path
+ d="m 401.82884,400.87173 20.83629,0"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path78"
+ inkscape:connector-curvature="0" /><path
+ d="m 328.89608,192.49483 10.41887,208.3769"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path80"
+ inkscape:connector-curvature="0" /><path
+ d="m 349.73381,192.49483 10.41886,208.3769"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path82"
+ inkscape:connector-curvature="0" /><path
+ d="m 370.57262,192.49483 10.41634,208.3769"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path84"
+ inkscape:connector-curvature="0" /><path
+ d="m 396.61887,385.24541 10.41995,31.25623"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path86"
+ inkscape:connector-curvature="0" /><path
+ d="m 183.032,135.19126 52.09432,0 0,93.76977 -52.09432,0 0,-93.76977 z"
+ style="fill:none;stroke:#000000;stroke-width:1.73647714;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path88"
+ inkscape:connector-curvature="0" /><g
+ id="g90"
+ transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
+ style=""><g
+ clip-path="url(#clipPath94)"
+ id="g92"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path98"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 192.004,204.696 274.711,0" /></g></g><path
+ d="m 84.168639,76.151036 -13.891817,-3.472955 13.891817,-3.472954 0,6.945909"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path102"
+ inkscape:connector-curvature="0" /><path
+ d="m 84.168639,76.151036 -13.891817,-3.472955 13.891817,-3.472954 0,6.945909 z"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path104"
+ inkscape:connector-curvature="0" /><path
+ d="m 151.65975,69.205127 13.89146,3.472954 -13.89146,3.472955 0,-6.945909"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ id="path106"
+ inkscape:connector-curvature="0" /><path
+ d="m 151.65975,69.205127 13.89146,3.472954 -13.89146,3.472955 0,-6.945909 z"
+ style="fill:none;stroke:#000000;stroke-width:0.86823857;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path108"
+ inkscape:connector-curvature="0" /><text
+ id="text112"
+ style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"
+ x="438.29504"
+ y="-187.28558"><tspan
+ id="tspan114"
+ sodipodi:role="line"
+ y="-187.28558"
+ x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234">Black Level</tspan><tspan
+ id="tspan116"
+ sodipodi:role="line"
+ y="-83.096947"
+ x="438.29504 452.19382 462.61267 474.19846 484.61731 490.41019 501.99597 513.58173 524.00061 535.58636">Sync Level</tspan><tspan
+ id="tspan118"
+ sodipodi:role="line"
+ y="-395.66284"
+ x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533">White Level</tspan></text>
+<text
+ id="text120"
+ style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"
+ x="159.88258"
+ y="-270.63647"><tspan
+ id="tspan122"
+ sodipodi:role="line"
+ y="-270.63647"
+ x="159.88258 172.61443 179.55339 186.49236 198.07812 209.66391">offset</tspan></text>
+<text
+ id="text124"
+ style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"
+ x="46.973549"
+ y="-46.630745"><tspan
+ id="tspan126"
+ sodipodi:role="line"
+ y="-46.630745"
+ x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319">Line synchr. pulse</tspan><tspan
+ id="tspan128"
+ sodipodi:role="line"
+ y="-4.9552913"
+ x="100.80776 112.39354 117.01952 128.60529 140.19107 145.98395 157.56973 162.19569 173.78148 185.36726 195.78612 200.41209 211.99788">Line blanking</tspan></text>
+<text
+ id="text3473"
+ style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"
+ x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319"
+ y="-46.630745"><tspan
+ id="tspan3475"
+ sodipodi:role="line"
+ y="-46.630745"
+ x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319">Line synchr. pulse</tspan></text>
+<text
+ id="text3477"
+ style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"
+ x="100.80776 112.39354 117.01952 128.60529 140.19107 145.98395 157.56973 162.19569 173.78148 185.36726 195.78612 200.41209 211.99788"
+ y="-4.9552913"><tspan
+ id="tspan3479"
+ sodipodi:role="line"
+ y="-4.9552913"
+ x="100.80776 112.39354 117.01952 128.60529 140.19107 145.98395 157.56973 162.19569 173.78148 185.36726 195.78612 200.41209 211.99788">Line blanking</tspan></text>
+<text
+ id="text3607"
+ style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"
+ x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234"
+ y="-187.28558"><tspan
+ id="tspan3609"
+ sodipodi:role="line"
+ y="-187.28558"
+ x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234">Black Level</tspan></text>
+<text
+ id="text3611"
+ style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"
+ x="438.29504 452.19382 462.61267 474.19846 484.61731 490.41019 501.99597 513.58173 524.00061 535.58636"
+ y="-83.096947"><tspan
+ id="tspan3613"
+ sodipodi:role="line"
+ y="-83.096947"
+ x="438.29504 452.19382 462.61267 474.19846 484.61731 490.41019 501.99597 513.58173 524.00061 535.58636">Sync Level</tspan></text>
+<text
+ id="text3615"
+ style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
+ transform="scale(1,-1)"
+ x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533"
+ y="-395.66284"><tspan
+ id="tspan3617"
+ sodipodi:role="line"
+ y="-395.66284"
+ x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533">White Level</tspan></text>
+</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/vidioc-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
index 3145a9166bad..deb1f6fb473b 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-selection.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
@@ -129,8 +129,8 @@ Selection targets and flags are documented in
.. _sel-const-adjust:
-.. figure:: vidioc-g-selection_files/constraints.*
- :alt: constraints.png
+.. figure:: constraints.*
+ :alt: constraints.pdf / constraints.svg
:align: center
Size adjustments with constraint flags.
diff --git a/Documentation/media/uapi/v4l/vidioc-g-selection_files/constraints.png b/Documentation/media/uapi/v4l/vidioc-g-selection_files/constraints.png
deleted file mode 100644
index 20228d2c5504..000000000000
--- a/Documentation/media/uapi/v4l/vidioc-g-selection_files/constraints.png
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/v4l-drivers/bttv.rst b/Documentation/media/v4l-drivers/bttv.rst
index 7abc1c9a261b..bc63b12efafd 100644
--- a/Documentation/media/v4l-drivers/bttv.rst
+++ b/Documentation/media/v4l-drivers/bttv.rst
@@ -304,10 +304,10 @@ bug. It is very helpful if you can tell where exactly it broke
With a hard freeze you probably doesn't find anything in the logfiles.
The only way to capture any kernel messages is to hook up a serial
console and let some terminal application log the messages. /me uses
-screen. See Documentation/serial-console.txt for details on setting
+screen. See Documentation/admin-guide/serial-console.rst for details on setting
up a serial console.
-Read Documentation/oops-tracing.txt to learn how to get any useful
+Read Documentation/admin-guide/oops-tracing.rst to learn how to get any useful
information out of a register+stack dump printed by the kernel on
protection faults (so-called "kernel oops").
diff --git a/Documentation/media/v4l-drivers/cafe_ccic.rst b/Documentation/media/v4l-drivers/cafe_ccic.rst
index b98eb3b7cb4a..94f0f58ebe37 100644
--- a/Documentation/media/v4l-drivers/cafe_ccic.rst
+++ b/Documentation/media/v4l-drivers/cafe_ccic.rst
@@ -3,8 +3,8 @@ The cafe_ccic driver
Author: Jonathan Corbet <corbet@lwn.net>
-Introdution
------------
+Introduction
+------------
"cafe_ccic" is a driver for the Marvell 88ALP01 "cafe" CMOS camera
controller. This is the controller found in first-generation OLPC systems,
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index 0d7cb955aa01..5de846d3ecc0 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -324,7 +324,7 @@ guarantee that the memory block contains only migratable pages.
Now, a boot option for making a memory block which consists of migratable pages
is supported. By specifying "kernelcore=" or "movablecore=" boot option, you can
create ZONE_MOVABLE...a zone which is just used for movable pages.
-(See also Documentation/kernel-parameters.txt)
+(See also Documentation/admin-guide/kernel-parameters.rst)
Assume the system has "TOTAL" amount of memory at boot time, this boot option
creates ZONE_MOVABLE as following.
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 8a8d3d96f6c6..ccf94677b240 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -32,7 +32,7 @@ compatible interfaces. Once found, it will create subfolders in
the /sys directories of each supported interface, e.g.
# ls /sys/class/net/eth0/batman_adv/
-# iface_status mesh_iface
+# elp_interval iface_status mesh_iface throughput_override
If an interface does not have the "batman_adv" subfolder it prob-
ably is not supported. Not supported interfaces are: loopback,
@@ -71,17 +71,19 @@ All mesh wide settings can be found in batman's own interface
folder:
# ls /sys/class/net/bat0/mesh/
-#aggregated_ogms distributed_arp_table gw_sel_class orig_interval
-#ap_isolation fragmentation hop_penalty routing_algo
-#bonding gw_bandwidth isolation_mark vlan0
-#bridge_loop_avoidance gw_mode log_level
+# aggregated_ogms fragmentation isolation_mark routing_algo
+# ap_isolation gw_bandwidth log_level vlan0
+# bonding gw_mode multicast_mode
+# bridge_loop_avoidance gw_sel_class network_coding
+# distributed_arp_table hop_penalty orig_interval
There is a special folder for debugging information:
# ls /sys/kernel/debug/batman_adv/bat0/
-# bla_backbone_table log transtable_global
-# bla_claim_table originators transtable_local
-# gateways socket
+# bla_backbone_table log neighbors transtable_local
+# bla_claim_table mcast_flags originators
+# dat_cache nc socket
+# gateways nc_nodes transtable_global
Some of the files contain all sort of status information regard-
ing the mesh network. For example, you can view the table of
@@ -159,13 +161,16 @@ file in debugfs
The additional debug output is by default disabled. It can be en-
abled during run time. Following log_levels are defined:
-0 - All debug output disabled
-1 - Enable messages related to routing / flooding / broadcasting
-2 - Enable messages related to route added / changed / deleted
-4 - Enable messages related to translation table operations
-8 - Enable messages related to bridge loop avoidance
-16 - Enable messaged related to DAT, ARP snooping and parsing
-31 - Enable all messages
+ 0 - All debug output disabled
+ 1 - Enable messages related to routing / flooding / broadcasting
+ 2 - Enable messages related to route added / changed / deleted
+ 4 - Enable messages related to translation table operations
+ 8 - Enable messages related to bridge loop avoidance
+ 16 - Enable messages related to DAT, ARP snooping and parsing
+ 32 - Enable messages related to network coding
+ 64 - Enable messages related to multicast
+128 - Enable messages related to throughput meter
+255 - Enable all messages
The debug output can be changed at runtime using the file
/sys/class/net/bat0/mesh/log_level. e.g.
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 6d6c07cf1a9a..63912ef34606 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and
Switch tagging protocols
------------------------
-DSA currently supports 4 different tagging protocols, and a tag-less mode as
+DSA currently supports 5 different tagging protocols, and a tag-less mode as
well. The different protocols are implemented in:
net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
net/dsa/tag_dsa.c: Marvell's original DSA tag
net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
+net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
The exact format of the tag protocol is vendor specific, but in general, they
all contain something which:
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index aa69ccc481db..c4114346f054 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -4,20 +4,20 @@
Introduction
============
-The IEEE 802.15.4 working group focuses on standardization of bottom
-two layers: Medium Access Control (MAC) and Physical (PHY). And there
+The IEEE 802.15.4 working group focuses on standardization of the bottom
+two layers: Medium Access Control (MAC) and Physical access (PHY). And there
are mainly two options available for upper layers:
- ZigBee - proprietary protocol from the ZigBee Alliance
- 6LoWPAN - IPv6 networking over low rate personal area networks
-The linux-wpan project goal is to provide a complete implementation
+The goal of the Linux-wpan is to provide a complete implementation
of the IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack
of protocols for organizing Low-Rate Wireless Personal Area Networks.
The stack is composed of three main parts:
- IEEE 802.15.4 layer; We have chosen to use plain Berkeley socket API,
- the generic Linux networking stack to transfer IEEE 802.15.4 messages
- and a special protocol over genetlink for configuration/management
+ the generic Linux networking stack to transfer IEEE 802.15.4 data
+ messages and a special protocol over netlink for configuration/management
- MAC - provides access to shared channel and reliable data delivery
- PHY - represents device drivers
@@ -33,15 +33,13 @@ include/net/af_ieee802154.h header or in the special header
in the userspace package (see either http://wpan.cakelab.org/ or the
git tree at https://github.com/linux-wpan/wpan-tools).
-One can use SOCK_RAW for passing raw data towards device xmit function. YMMV.
-
Kernel side
=============
Like with WiFi, there are several types of devices implementing IEEE 802.15.4.
1) 'HardMAC'. The MAC layer is implemented in the device itself, the device
- exports MLME and data API.
+ exports a management (e.g. MLME) and data API.
2) 'SoftMAC' or just radio. These types of devices are just radio transceivers
possibly with some kinds of acceleration like automatic CRC computation and
comparation, automagic ACK handling, address matching, etc.
@@ -106,7 +104,7 @@ Fake drivers
In addition there is a driver available which simulates a real device with
SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option
-provides possibility to test and debug stack without usage of real hardware.
+provides a possibility to test and debug the stack without usage of real hardware.
See sources in drivers/net/ieee802154 folder for more details.
@@ -125,17 +123,15 @@ to support the IPv6 minimum MTU requirement [RFC2460], and stateless header
compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the
relatively large IPv6 and UDP headers down to (in the best case) several bytes.
-In Semptember 2011 the standard update was published - [RFC6282].
+In September 2011 the standard update was published - [RFC6282].
It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
used in this Linux implementation.
All the code related to 6lowpan you may find in files: net/6lowpan/*
and net/ieee802154/6lowpan/*
-To setup 6lowpan interface you need (busybox release > 1.17.0):
-1. Add IEEE802.15.4 interface and initialize PANid;
+To setup a 6LoWPAN interface you need:
+1. Add IEEE802.15.4 interface and set channel and PAN ID;
2. Add 6lowpan interface by command like:
# ip link add link wpan0 name lowpan0 type lowpan
-3. Set MAC (if needs):
- # ip link set lowpan0 address de:ad:be:ef:ca:fe:ba:be
-4. Bring up 'lowpan0' interface
+3. Bring up 'lowpan0' interface
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 3db8c67d2c8d..7dd65c9cf707 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -610,8 +610,13 @@ tcp_syn_retries - INTEGER
with the current initial RTO of 1second. With this the final timeout
for an active TCP connection attempt will happen after 127seconds.
-tcp_timestamps - BOOLEAN
- Enable timestamps as defined in RFC1323.
+tcp_timestamps - INTEGER
+Enable timestamps as defined in RFC1323.
+ 0: Disabled.
+ 1: Enable timestamps as defined in RFC1323 and use random offset for
+ each connection rather than only using the current time.
+ 2: Like 1, but without random offsets.
+ Default: 1
tcp_min_tso_segs - INTEGER
Minimal number of segments per TSO frame.
@@ -967,6 +972,21 @@ igmp_qrv - INTEGER
Default: 2 (as specified by RFC2236 8.1)
Minimum: 1 (as specified by RFC6636 4.5)
+force_igmp_version - INTEGER
+ 0 - (default) No enforcement of a IGMP version, IGMPv1/v2 fallback
+ allowed. Will back to IGMPv3 mode again if all IGMPv1/v2 Querier
+ Present timer expires.
+ 1 - Enforce to use IGMP version 1. Will also reply IGMPv1 report if
+ receive IGMPv2/v3 query.
+ 2 - Enforce to use IGMP version 2. Will fallback to IGMPv1 if receive
+ IGMPv1 query message. Will reply report if receive IGMPv3 query.
+ 3 - Enforce to use IGMP version 3. The same react with default 0.
+
+ Note: this is not the same with force_mld_version because IGMPv3 RFC3376
+ Security Considerations does not have clear description that we could
+ ignore other version messages completely as MLDv2 RFC3810. So make
+ this value as default 0 is recommended.
+
conf/interface/* changes special settings per interface (where
"interface" is the name of your network interface)
@@ -1714,6 +1734,15 @@ drop_unsolicited_na - BOOLEAN
By default this is turned off.
+enhanced_dad - BOOLEAN
+ Include a nonce option in the IPv6 neighbor solicitation messages used for
+ duplicate address detection per RFC7527. A received DAD NS will only signal
+ a duplicate address if the nonce is different. This avoids any false
+ detection of duplicates due to loopback of the NS messages that we send.
+ The nonce option will be sent on an interface unless both of
+ conf/{all,interface}/enhanced_dad are set to FALSE.
+ Default: TRUE
+
icmp/*:
ratelimit - INTEGER
Limit the maximal rates for sending ICMPv6 packets.
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
index 4650a00ed012..9bc271cdc9a8 100644
--- a/Documentation/networking/l2tp.txt
+++ b/Documentation/networking/l2tp.txt
@@ -177,10 +177,10 @@ setsockopt on the PPPoX socket to set a debug mask.
The following debug mask bits are available:
-PPPOL2TP_MSG_DEBUG verbose debug (if compiled in)
-PPPOL2TP_MSG_CONTROL userspace - kernel interface
-PPPOL2TP_MSG_SEQ sequence numbers handling
-PPPOL2TP_MSG_DATA data packets
+L2TP_MSG_DEBUG verbose debug (if compiled in)
+L2TP_MSG_CONTROL userspace - kernel interface
+L2TP_MSG_SEQ sequence numbers handling
+L2TP_MSG_DATA data packets
If enabled, files under a l2tp debugfs directory can be used to dump
kernel state about L2TP tunnels and sessions. To access it, the
diff --git a/Documentation/networking/mac80211_hwsim/README b/Documentation/networking/mac80211_hwsim/README
index 24ac91d56698..3566a725d19c 100644
--- a/Documentation/networking/mac80211_hwsim/README
+++ b/Documentation/networking/mac80211_hwsim/README
@@ -60,7 +60,7 @@ modprobe mac80211_hwsim
hostapd hostapd.conf
# Run wpa_supplicant (station) for wlan1
-wpa_supplicant -Dwext -iwlan1 -c wpa_supplicant.conf
+wpa_supplicant -Dnl80211 -iwlan1 -c wpa_supplicant.conf
More test cases are available in hostap.git:
diff --git a/Documentation/networking/netconsole.txt b/Documentation/networking/netconsole.txt
index 30409a36e95d..296ea00fd3eb 100644
--- a/Documentation/networking/netconsole.txt
+++ b/Documentation/networking/netconsole.txt
@@ -200,7 +200,7 @@ priority messages to the console. You can change this at runtime using:
or by specifying "debug" on the kernel command line at boot, to send
all kernel messages to the console. A specific value for this parameter
can also be set using the "loglevel" kernel boot option. See the
-dmesg(8) man page and Documentation/kernel-parameters.txt for details.
+dmesg(8) man page and Documentation/admin-guide/kernel-parameters.rst for details.
Netconsole was designed to be as instantaneous as possible, to
enable the logging of even the most critical kernel bugs. It works
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index 0fe1c6e0dbcd..247a30ba8e17 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -29,8 +29,8 @@ A: There are always two trees (git repositories) in play. Both are driven
Linus, and net-next is where the new code goes for the future release.
You can find the trees here:
- http://git.kernel.org/?p=linux/kernel/git/davem/net.git
- http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
Q: How often do changes from these trees make it to the mainline Linus tree?
@@ -76,7 +76,7 @@ Q: So where are we now in this cycle?
A: Load the mainline (Linus) page here:
- http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
and note the top of the "tags" section. If it is rc1, it is early
in the dev cycle. If it was tagged rc7 a week ago, then a release
@@ -123,7 +123,7 @@ A: Normally Greg Kroah-Hartman collects stable commits himself, but
It contains the patches which Dave has selected, but not yet handed
off to Greg. If Greg already has the patch, then it will be here:
- http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
A quick way to find whether the patch is in this stable-queue is
to simply clone the repo, and then git grep the mainline commit ID, e.g.
@@ -136,14 +136,14 @@ A: Normally Greg Kroah-Hartman collects stable commits himself, but
Q: I see a network patch and I think it should be backported to stable.
Should I request it via "stable@vger.kernel.org" like the references in
- the kernel's Documentation/stable_kernel_rules.txt file say?
+ the kernel's Documentation/process/stable-kernel-rules.rst file say?
A: No, not for networking. Check the stable queues as per above 1st to see
if it is already queued. If not, then send a mail to netdev, listing
the upstream commit ID and why you think it should be a stable candidate.
Before you jump to go do the above, do note that the normal stable rules
- in Documentation/stable_kernel_rules.txt still apply. So you need to
+ in Documentation/process/stable-kernel-rules.rst still apply. So you need to
explicitly indicate why it is a critical fix and exactly what users are
impacted. In addition, you need to convince yourself that you _really_
think it has been overlooked, vs. having been considered and rejected.
@@ -165,7 +165,7 @@ A: No. See above answer. In short, if you think it really belongs in
If you think there is some valid information relating to it being in
stable that does _not_ belong in the commit log, then use the three
- dash marker line as described in Documentation/SubmittingPatches to
+ dash marker line as described in Documentation/process/submitting-patches.rst to
temporarily embed that information into the patch that you send.
Q: Someone said that the comment style and coding convention is different
@@ -220,5 +220,5 @@ A: Attention to detail. Re-read your own work as if you were the
If it is your first patch, mail it to yourself so you can test apply
it to an unpatched tree to confirm infrastructure didn't mangle it.
- Finally, go back and read Documentation/SubmittingPatches to be
+ Finally, go back and read Documentation/process/submitting-patches.rst to be
sure you are not repeating some common mistake documented there.
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 4fb51d32fccc..497d668288f9 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -33,24 +33,6 @@ nf_conntrack_events - BOOLEAN
If this option is enabled, the connection tracking code will
provide userspace with connection tracking events via ctnetlink.
-nf_conntrack_events_retry_timeout - INTEGER (seconds)
- default 15
-
- This option is only relevant when "reliable connection tracking
- events" are used. Normally, ctnetlink is "lossy", that is,
- events are normally dropped when userspace listeners can't keep up.
-
- Userspace can request "reliable event mode". When this mode is
- active, the conntrack will only be destroyed after the event was
- delivered. If event delivery fails, the kernel periodically
- re-tries to send the event to userspace.
-
- This is the maximum interval the kernel should use when re-trying
- to deliver the destroy event.
-
- A higher number means there will be fewer delivery retries and it
- will take longer for a backlog to be processed.
-
nf_conntrack_expect_max - INTEGER
Maximum size of expectation table. Default value is
nf_conntrack_buckets / 256. Minimum is 1.
@@ -80,10 +62,13 @@ nf_conntrack_generic_timeout - INTEGER (seconds)
protocols.
nf_conntrack_helper - BOOLEAN
- 0 - disabled
- not 0 - enabled (default)
+ 0 - disabled (default)
+ not 0 - enabled
Enable automatic conntrack helper assignment.
+ If disabled it is required to set up iptables rules to assign
+ helpers to connections. See the CT target description in the
+ iptables-extensions(8) man page for further information.
nf_conntrack_icmp_timeout - INTEGER (seconds)
default 30
@@ -111,6 +96,17 @@ nf_conntrack_max - INTEGER
Size of connection tracking table. Default value is
nf_conntrack_buckets value * 4.
+nf_conntrack_default_on - BOOLEAN
+ 0 - don't register conntrack in new net namespaces
+ 1 - register conntrack in new net namespaces (default)
+
+ This controls wheter newly created network namespaces have connection
+ tracking enabled by default. It will be enabled automatically
+ regardless of this setting if the new net namespace requires
+ connection tracking, e.g. when NAT rules are created.
+ This setting is only visible in initial user namespace, it has no
+ effect on existing namespaces.
+
nf_conntrack_tcp_be_liberal - BOOLEAN
0 - disabled (default)
not 0 - enabled
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 7ab9404a8412..16f90d817224 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -65,6 +65,83 @@ The MDIO bus
drivers/net/ethernet/freescale/fsl_pq_mdio.c and an associated DTS file
for one of the users. (e.g. "git grep fsl,.*-mdio arch/powerpc/boot/dts/")
+(RG)MII/electrical interface considerations
+
+ The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin
+ electrical signal interface using a synchronous 125Mhz clock signal and several
+ data lines. Due to this design decision, a 1.5ns to 2ns delay must be added
+ between the clock line (RXC or TXC) and the data lines to let the PHY (clock
+ sink) have enough setup and hold times to sample the data lines correctly. The
+ PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let
+ the PHY driver and optionally the MAC driver, implement the required delay. The
+ values of phy_interface_t must be understood from the perspective of the PHY
+ device itself, leading to the following:
+
+ * PHY_INTERFACE_MODE_RGMII: the PHY is not responsible for inserting any
+ internal delay by itself, it assumes that either the Ethernet MAC (if capable
+ or the PCB traces) insert the correct 1.5-2ns delay
+
+ * PHY_INTERFACE_MODE_RGMII_TXID: the PHY should insert an internal delay
+ for the transmit data lines (TXD[3:0]) processed by the PHY device
+
+ * PHY_INTERFACE_MODE_RGMII_RXID: the PHY should insert an internal delay
+ for the receive data lines (RXD[3:0]) processed by the PHY device
+
+ * PHY_INTERFACE_MODE_RGMII_ID: the PHY should insert internal delays for
+ both transmit AND receive data lines from/to the PHY device
+
+ Whenever possible, use the PHY side RGMII delay for these reasons:
+
+ * PHY devices may offer sub-nanosecond granularity in how they allow a
+ receiver/transmitter side delay (e.g: 0.5, 1.0, 1.5ns) to be specified. Such
+ precision may be required to account for differences in PCB trace lengths
+
+ * PHY devices are typically qualified for a large range of applications
+ (industrial, medical, automotive...), and they provide a constant and
+ reliable delay across temperature/pressure/voltage ranges
+
+ * PHY device drivers in PHYLIB being reusable by nature, being able to
+ configure correctly a specified delay enables more designs with similar delay
+ requirements to be operate correctly
+
+ For cases where the PHY is not capable of providing this delay, but the
+ Ethernet MAC driver is capable of doing so, the correct phy_interface_t value
+ should be PHY_INTERFACE_MODE_RGMII, and the Ethernet MAC driver should be
+ configured correctly in order to provide the required transmit and/or receive
+ side delay from the perspective of the PHY device. Conversely, if the Ethernet
+ MAC driver looks at the phy_interface_t value, for any other mode but
+ PHY_INTERFACE_MODE_RGMII, it should make sure that the MAC-level delays are
+ disabled.
+
+ In case neither the Ethernet MAC, nor the PHY are capable of providing the
+ required delays, as defined per the RGMII standard, several options may be
+ available:
+
+ * Some SoCs may offer a pin pad/mux/controller capable of configuring a given
+ set of pins'strength, delays, and voltage; and it may be a suitable
+ option to insert the expected 2ns RGMII delay.
+
+ * Modifying the PCB design to include a fixed delay (e.g: using a specifically
+ designed serpentine), which may not require software configuration at all.
+
+Common problems with RGMII delay mismatch
+
+ When there is a RGMII delay mismatch between the Ethernet MAC and the PHY, this
+ will most likely result in the clock and data line signals to be unstable when
+ the PHY or MAC take a snapshot of these signals to translate them into logical
+ 1 or 0 states and reconstruct the data being transmitted/received. Typical
+ symptoms include:
+
+ * Transmission/reception partially works, and there is frequent or occasional
+ packet loss observed
+
+ * Ethernet MAC may report some or all packets ingressing with a FCS/CRC error,
+ or just discard them all
+
+ * Switching to lower speeds such as 10/100Mbits/sec makes the problem go away
+ (since there is enough setup/hold time in that case)
+
+
Connecting to a PHY
Sometime during startup, the network driver needs to establish a connection
@@ -127,8 +204,9 @@ Letting the PHY Abstraction Layer do Everything
values pruned from them which don't make sense for your controller (a 10/100
controller may be connected to a gigabit capable PHY, so you would need to
mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions
- for these bitfields. Note that you should not SET any bits, or the PHY may
- get put into an unsupported state.
+ for these bitfields. Note that you should not SET any bits, except the
+ SUPPORTED_Pause and SUPPORTED_AsymPause bits (see below), or the PHY may get
+ put into an unsupported state.
Lastly, once the controller is ready to handle network traffic, you call
phy_start(phydev). This tells the PAL that you are ready, and configures the
@@ -139,6 +217,19 @@ Letting the PHY Abstraction Layer do Everything
When you want to disconnect from the network (even if just briefly), you call
phy_stop(phydev).
+Pause frames / flow control
+
+ The PHY does not participate directly in flow control/pause frames except by
+ making sure that the SUPPORTED_Pause and SUPPORTED_AsymPause bits are set in
+ MII_ADVERTISE to indicate towards the link partner that the Ethernet MAC
+ controller supports such a thing. Since flow control/pause frames generation
+ involves the Ethernet MAC driver, it is recommended that this driver takes care
+ of properly indicating advertisement and support for such features by setting
+ the SUPPORTED_Pause and SUPPORTED_AsymPause bits accordingly. This can be done
+ either before or after phy_connect() and/or as a result of implementing the
+ ethtool::set_pauseparam feature.
+
+
Keeping Close Tabs on the PAL
It is possible that the PAL's built-in state machine needs a little help to
@@ -251,39 +342,8 @@ Writing a PHY driver
PHY_BASIC_FEATURES, but you can look in include/mii.h for other
features.
- Each driver consists of a number of function pointers:
-
- soft_reset: perform a PHY software reset
- config_init: configures PHY into a sane state after a reset.
- For instance, a Davicom PHY requires descrambling disabled.
- probe: Allocate phy->priv, optionally refuse to bind.
- PHY may not have been reset or had fixups run yet.
- suspend/resume: power management
- config_aneg: Changes the speed/duplex/negotiation settings
- aneg_done: Determines the auto-negotiation result
- read_status: Reads the current speed/duplex/negotiation settings
- ack_interrupt: Clear a pending interrupt
- did_interrupt: Checks if the PHY generated an interrupt
- config_intr: Enable or disable interrupts
- remove: Does any driver take-down
- ts_info: Queries about the HW timestamping status
- match_phy_device: used for Clause 45 capable PHYs to match devices
- in package and ensure they are compatible
- hwtstamp: Set the PHY HW timestamping configuration
- rxtstamp: Requests a receive timestamp at the PHY level for a 'skb'
- txtsamp: Requests a transmit timestamp at the PHY level for a 'skb'
- set_wol: Enable Wake-on-LAN at the PHY level
- get_wol: Get the Wake-on-LAN status at the PHY level
- link_change_notify: called to inform the core is about to change the
- link state, can be used to work around bogus PHY between state changes
- read_mmd_indirect: Read PHY MMD indirect register
- write_mmd_indirect: Write PHY MMD indirect register
- module_info: Get the size and type of an EEPROM contained in an plug-in
- module
- module_eeprom: Get EEPROM information of a plug-in module
- get_sset_count: Get number of strings sets that get_strings will count
- get_strings: Get strings from requested objects (statistics)
- get_stats: Get the extended statistics from the PHY device
+ Each driver consists of a number of function pointers, documented
+ in include/linux/phy.h under the phy_driver structure.
Of these, only config_aneg and read_status are required to be
assigned by the driver code. The rest are optional. Also, it is
@@ -347,3 +407,22 @@ Board Fixups
The stubs set one of the two matching criteria, and set the other one to
match anything.
+ When phy_register_fixup() or *_for_uid()/*_for_id() is called at module,
+ unregister fixup and free allocate memory are required.
+
+ Call one of following function before unloading module.
+
+ int phy_unregister_fixup(const char *phy_id, u32 phy_uid, u32 phy_uid_mask);
+ int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
+ int phy_register_fixup_for_id(const char *phy_id);
+
+Standards
+
+ IEEE Standard 802.3: CSMA/CD Access Method and Physical Layer Specifications, Section Two:
+ http://standards.ieee.org/getieee802/download/802.3-2008_section2.pdf
+
+ RGMII v1.3:
+ http://web.archive.org/web/20160303212629/http://www.hp.com/rnd/pdfs/RGMIIv1_3.pdf
+
+ RGMII v2.0:
+ http://web.archive.org/web/20160303171328/http://www.hp.com/rnd/pdfs/RGMIIv2_0_final_hp.pdf
diff --git a/Documentation/networking/seg6-sysctl.txt b/Documentation/networking/seg6-sysctl.txt
new file mode 100644
index 000000000000..bdbde23b19cb
--- /dev/null
+++ b/Documentation/networking/seg6-sysctl.txt
@@ -0,0 +1,18 @@
+/proc/sys/net/conf/<iface>/seg6_* variables:
+
+seg6_enabled - BOOL
+ Accept or drop SR-enabled IPv6 packets on this interface.
+
+ Relevant packets are those with SRH present and DA = local.
+
+ 0 - disabled (default)
+ not 0 - enabled
+
+seg6_require_hmac - INTEGER
+ Define HMAC policy for ingress SR-enabled packets on this interface.
+
+ -1 - Ignore HMAC field
+ 0 - Accept SR packets without HMAC, validate SR packets with HMAC
+ 1 - Drop SR packets without HMAC, validate SR packets with HMAC
+
+ Default is 0.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index e226f8925c9e..2bb07078f535 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -28,8 +28,6 @@ CONFIG_STMMAC_PCI: is to enable the pci driver.
2) Driver parameters list:
debug: message level (0: no output, 16: all);
phyaddr: to manually provide the physical address to the PHY device;
- dma_rxsize: DMA rx ring size;
- dma_txsize: DMA tx ring size;
buf_sz: DMA buffer size;
tc: control the HW FIFO threshold;
watchdog: transmit timeout (in milliseconds);
@@ -40,31 +38,31 @@ CONFIG_STMMAC_PCI: is to enable the pci driver.
3) Command line options
Driver parameters can be also passed in command line by using:
- stmmaceth=dma_rxsize:128,dma_txsize:512
+ stmmaceth=watchdog:100,chain_mode=1
4) Driver information and notes
4.1) Transmit process
The xmit method is invoked when the kernel needs to transmit a packet; it sets
-the descriptors in the ring and informs the DMA engine that there is a packet
+the descriptors in the ring and informs the DMA engine, that there is a packet
ready to be transmitted.
By default, the driver sets the NETIF_F_SG bit in the features field of the
-net_device structure enabling the scatter-gather feature. This is true on
+net_device structure, enabling the scatter-gather feature. This is true on
chips and configurations where the checksum can be done in hardware.
-Once the controller has finished transmitting the packet, napi will be
+Once the controller has finished transmitting the packet, timer will be
scheduled to release the transmit resources.
4.2) Receive process
When one or more packets are received, an interrupt happens. The interrupts
-are not queued so the driver has to scan all the descriptors in the ring during
+are not queued, so the driver has to scan all the descriptors in the ring during
the receive process.
-This is based on NAPI so the interrupt handler signals only if there is work
+This is based on NAPI, so the interrupt handler signals only if there is work
to be done, and it exits.
Then the poll method will be scheduled at some future point.
The incoming packets are stored, by the DMA, in a list of pre-allocated socket
buffers in order to avoid the memcpy (zero-copy).
-4.3) Interrupt Mitigation
+4.3) Interrupt mitigation
The driver is able to mitigate the number of its DMA interrupts
using NAPI for the reception on chips older than the 3.50.
New chips have an HW RX-Watchdog used for this mitigation.
@@ -88,19 +86,20 @@ the list, hence creating the explicit chaining in the descriptor itself,
whereas such explicit chaining is not possible in RING mode.
4.5.1) Extended descriptors
- The extended descriptors give us information about the Ethernet payload
- when it is carrying PTP packets or TCP/UDP/ICMP over IP.
- These are not available on GMAC Synopsys chips older than the 3.50.
- At probe time the driver will decide if these can be actually used.
- This support also is mandatory for PTPv2 because the extra descriptors
- are used for saving the hardware timestamps and Extended Status.
+The extended descriptors give us information about the Ethernet payload
+when it is carrying PTP packets or TCP/UDP/ICMP over IP.
+These are not available on GMAC Synopsys chips older than the 3.50.
+At probe time the driver will decide if these can be actually used.
+This support also is mandatory for PTPv2 because the extra descriptors
+are used for saving the hardware timestamps and Extended Status.
4.6) Ethtool support
Ethtool is supported.
For example, driver statistics (including RMON), internal errors can be taken
using:
- # ethtool -S ethX command
+ # ethtool -S ethX
+command
4.7) Jumbo and Segmentation Offloading
Jumbo frames are supported and tested for the GMAC.
@@ -153,8 +152,10 @@ Where:
o dma_cfg: internal DMA parameters
o pbl: the Programmable Burst Length is maximum number of beats to
be transferred in one DMA transaction.
- GMAC also enables the 4xPBL by default.
- o fixed_burst/mixed_burst/burst_len
+ GMAC also enables the 4xPBL by default. (8xPBL for GMAC 3.50 and newer)
+ o txpbl/rxpbl: GMAC and newer supports independent DMA pbl for tx/rx.
+ o pblx8: Enable 8xPBL (4xPBL for core rev < 3.50). Enabled by default.
+ o fixed_burst/mixed_burst/aal
o clk_csr: fixed CSR Clock range selection.
o has_gmac: uses the GMAC core.
o enh_desc: if sets the MAC will use the enhanced descriptor structure.
@@ -206,16 +207,24 @@ tuned according to the HW capabilities.
struct stmmac_dma_cfg {
int pbl;
+ int txpbl;
+ int rxpbl;
+ bool pblx8;
int fixed_burst;
- int burst_len_supported;
+ int mixed_burst;
+ bool aal;
};
Where:
- o pbl: Programmable Burst Length
+ o pbl: Programmable Burst Length (tx and rx)
+ o txpbl: Transmit Programmable Burst Length. Only for GMAC and newer.
+ If set, DMA tx will use this value rather than pbl.
+ o rxpbl: Receive Programmable Burst Length. Only for GMAC and newer.
+ If set, DMA rx will use this value rather than pbl.
+ o pblx8: Enable 8xPBL (4xPBL for core rev < 3.50). Enabled by default.
o fixed_burst: program the DMA to use the fixed burst mode
- o burst_len: this is the value we put in the register
- supported values are provided as macros in
- linux/stmmac.h header file.
+ o mixed_burst: program the DMA to use the mixed burst mode
+ o aal: Address-Aligned Beats
---
@@ -275,11 +284,11 @@ Please see the following document:
Documentation/devicetree/bindings/net/stmmac.txt
4.11) This is a summary of the content of some relevant files:
- o stmmac_main.c: to implement the main network device driver;
- o stmmac_mdio.c: to provide mdio functions;
- o stmmac_pci: this the PCI driver;
- o stmmac_platform.c: this the platform driver (OF supported)
- o stmmac_ethtool.c: to implement the ethtool support;
+ o stmmac_main.c: implements the main network device driver;
+ o stmmac_mdio.c: provides MDIO functions;
+ o stmmac_pci: this is the PCI driver;
+ o stmmac_platform.c: this the platform driver (OF supported);
+ o stmmac_ethtool.c: implements the ethtool support;
o stmmac.h: private driver structure;
o common.h: common definitions and VFTs;
o mmc_core.c/mmc.h: Management MAC Counters;
@@ -381,12 +390,12 @@ In addition to the basic timestamp features mentioned in IEEE 1588-2002
Timestamps, new GMAC cores support the advanced timestamp features.
IEEE 1588-2008 that can be enabled when configure the Kernel.
-8) SGMII/RGMII supports
+8) SGMII/RGMII support
New GMAC devices provide own way to manage RGMII/SGMII.
This information is available at run-time by looking at the
HW capability register. This means that the stmmac can manage
-auto-negotiation and link status w/o using the PHYLIB stuff
+auto-negotiation and link status w/o using the PHYLIB stuff.
In fact, the HW provides a subset of extended registers to
restart the ANE, verify Full/Half duplex mode and Speed.
-Also thanks to these registers it is possible to look at the
+Thanks to these registers, it is possible to look at the
Auto-negotiated Link Parter Ability.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 671cccf0dcd2..96f50694a748 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -182,6 +182,16 @@ SOF_TIMESTAMPING_OPT_TSONLY:
the timestamp even if sysctl net.core.tstamp_allow_data is 0.
This option disables SOF_TIMESTAMPING_OPT_CMSG.
+SOF_TIMESTAMPING_OPT_STATS:
+
+ Optional stats that are obtained along with the transmit timestamps.
+ It must be used together with SOF_TIMESTAMPING_OPT_TSONLY. When the
+ transmit timestamp is available, the stats are available in a
+ separate control message of type SCM_TIMESTAMPING_OPT_STATS, as a
+ list of TLVs (struct nlattr) of types. These stats allow the
+ application to associate various transport layer stats with
+ the transmit timestamps, such as how long a certain block of
+ data was limited by peer's receiver window.
New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to
disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt
index 97282da82b75..ad3dead052a4 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/vortex.txt
@@ -364,7 +364,7 @@ steps you should take:
- The contents of your report will vary a lot depending upon the
problem. If it's a kernel crash then you should refer to the
- REPORTING-BUGS file.
+ admin-guide/reporting-bugs.rst file.
But for most problems it is useful to provide the following:
diff --git a/Documentation/oops-tracing.txt b/Documentation/oops-tracing.txt
deleted file mode 100644
index f3ac05cc23e4..000000000000
--- a/Documentation/oops-tracing.txt
+++ /dev/null
@@ -1,279 +0,0 @@
-NOTE: ksymoops is useless on 2.6. Please use the Oops in its original format
-(from dmesg, etc). Ignore any references in this or other docs to "decoding
-the Oops" or "running it through ksymoops". If you post an Oops from 2.6 that
-has been run through ksymoops, people will just tell you to repost it.
-
-Quick Summary
--------------
-
-Find the Oops and send it to the maintainer of the kernel area that seems to be
-involved with the problem. Don't worry too much about getting the wrong person.
-If you are unsure send it to the person responsible for the code relevant to
-what you were doing. If it occurs repeatably try and describe how to recreate
-it. That's worth even more than the oops.
-
-If you are totally stumped as to whom to send the report, send it to
-linux-kernel@vger.kernel.org. Thanks for your help in making Linux as
-stable as humanly possible.
-
-Where is the Oops?
-----------------------
-
-Normally the Oops text is read from the kernel buffers by klogd and
-handed to syslogd which writes it to a syslog file, typically
-/var/log/messages (depends on /etc/syslog.conf). Sometimes klogd dies,
-in which case you can run dmesg > file to read the data from the kernel
-buffers and save it. Or you can cat /proc/kmsg > file, however you
-have to break in to stop the transfer, kmsg is a "never ending file".
-If the machine has crashed so badly that you cannot enter commands or
-the disk is not available then you have three options :-
-
-(1) Hand copy the text from the screen and type it in after the machine
- has restarted. Messy but it is the only option if you have not
- planned for a crash. Alternatively, you can take a picture of
- the screen with a digital camera - not nice, but better than
- nothing. If the messages scroll off the top of the console, you
- may find that booting with a higher resolution (eg, vga=791)
- will allow you to read more of the text. (Caveat: This needs vesafb,
- so won't help for 'early' oopses)
-
-(2) Boot with a serial console (see Documentation/serial-console.txt),
- run a null modem to a second machine and capture the output there
- using your favourite communication program. Minicom works well.
-
-(3) Use Kdump (see Documentation/kdump/kdump.txt),
- extract the kernel ring buffer from old memory with using dmesg
- gdbmacro in Documentation/kdump/gdbmacros.txt.
-
-
-Full Information
-----------------
-
-NOTE: the message from Linus below applies to 2.4 kernel. I have preserved it
-for historical reasons, and because some of the information in it still
-applies. Especially, please ignore any references to ksymoops.
-
-From: Linus Torvalds <torvalds@osdl.org>
-
-How to track down an Oops.. [originally a mail to linux-kernel]
-
-The main trick is having 5 years of experience with those pesky oops
-messages ;-)
-
-Actually, there are things you can do that make this easier. I have two
-separate approaches:
-
- gdb /usr/src/linux/vmlinux
- gdb> disassemble <offending_function>
-
-That's the easy way to find the problem, at least if the bug-report is
-well made (like this one was - run through ksymoops to get the
-information of which function and the offset in the function that it
-happened in).
-
-Oh, it helps if the report happens on a kernel that is compiled with the
-same compiler and similar setups.
-
-The other thing to do is disassemble the "Code:" part of the bug report:
-ksymoops will do this too with the correct tools, but if you don't have
-the tools you can just do a silly program:
-
- char str[] = "\xXX\xXX\xXX...";
- main(){}
-
-and compile it with gcc -g and then do "disassemble str" (where the "XX"
-stuff are the values reported by the Oops - you can just cut-and-paste
-and do a replace of spaces to "\x" - that's what I do, as I'm too lazy
-to write a program to automate this all).
-
-Alternatively, you can use the shell script in scripts/decodecode.
-Its usage is: decodecode < oops.txt
-
-The hex bytes that follow "Code:" may (in some architectures) have a series
-of bytes that precede the current instruction pointer as well as bytes at and
-following the current instruction pointer. In some cases, one instruction
-byte or word is surrounded by <> or (), as in "<86>" or "(f00d)". These
-<> or () markings indicate the current instruction pointer. Example from
-i386, split into multiple lines for readability:
-
-Code: f9 0f 8d f9 00 00 00 8d 42 0c e8 dd 26 11 c7 a1 60 ea 2b f9 8b 50 08 a1
-64 ea 2b f9 8d 34 82 8b 1e 85 db 74 6d 8b 15 60 ea 2b f9 <8b> 43 04 39 42 54
-7e 04 40 89 42 54 8b 43 04 3b 05 00 f6 52 c0
-
-Finally, if you want to see where the code comes from, you can do
-
- cd /usr/src/linux
- make fs/buffer.s # or whatever file the bug happened in
-
-and then you get a better idea of what happens than with the gdb
-disassembly.
-
-Now, the trick is just then to combine all the data you have: the C
-sources (and general knowledge of what it _should_ do), the assembly
-listing and the code disassembly (and additionally the register dump you
-also get from the "oops" message - that can be useful to see _what_ the
-corrupted pointers were, and when you have the assembler listing you can
-also match the other registers to whatever C expressions they were used
-for).
-
-Essentially, you just look at what doesn't match (in this case it was the
-"Code" disassembly that didn't match with what the compiler generated).
-Then you need to find out _why_ they don't match. Often it's simple - you
-see that the code uses a NULL pointer and then you look at the code and
-wonder how the NULL pointer got there, and if it's a valid thing to do
-you just check against it..
-
-Now, if somebody gets the idea that this is time-consuming and requires
-some small amount of concentration, you're right. Which is why I will
-mostly just ignore any panic reports that don't have the symbol table
-info etc looked up: it simply gets too hard to look it up (I have some
-programs to search for specific patterns in the kernel code segment, and
-sometimes I have been able to look up those kinds of panics too, but
-that really requires pretty good knowledge of the kernel just to be able
-to pick out the right sequences etc..)
-
-_Sometimes_ it happens that I just see the disassembled code sequence
-from the panic, and I know immediately where it's coming from. That's when
-I get worried that I've been doing this for too long ;-)
-
- Linus
-
-
----------------------------------------------------------------------------
-Notes on Oops tracing with klogd:
-
-In order to help Linus and the other kernel developers there has been
-substantial support incorporated into klogd for processing protection
-faults. In order to have full support for address resolution at least
-version 1.3-pl3 of the sysklogd package should be used.
-
-When a protection fault occurs the klogd daemon automatically
-translates important addresses in the kernel log messages to their
-symbolic equivalents. This translated kernel message is then
-forwarded through whatever reporting mechanism klogd is using. The
-protection fault message can be simply cut out of the message files
-and forwarded to the kernel developers.
-
-Two types of address resolution are performed by klogd. The first is
-static translation and the second is dynamic translation. Static
-translation uses the System.map file in much the same manner that
-ksymoops does. In order to do static translation the klogd daemon
-must be able to find a system map file at daemon initialization time.
-See the klogd man page for information on how klogd searches for map
-files.
-
-Dynamic address translation is important when kernel loadable modules
-are being used. Since memory for kernel modules is allocated from the
-kernel's dynamic memory pools there are no fixed locations for either
-the start of the module or for functions and symbols in the module.
-
-The kernel supports system calls which allow a program to determine
-which modules are loaded and their location in memory. Using these
-system calls the klogd daemon builds a symbol table which can be used
-to debug a protection fault which occurs in a loadable kernel module.
-
-At the very minimum klogd will provide the name of the module which
-generated the protection fault. There may be additional symbolic
-information available if the developer of the loadable module chose to
-export symbol information from the module.
-
-Since the kernel module environment can be dynamic there must be a
-mechanism for notifying the klogd daemon when a change in module
-environment occurs. There are command line options available which
-allow klogd to signal the currently executing daemon that symbol
-information should be refreshed. See the klogd manual page for more
-information.
-
-A patch is included with the sysklogd distribution which modifies the
-modules-2.0.0 package to automatically signal klogd whenever a module
-is loaded or unloaded. Applying this patch provides essentially
-seamless support for debugging protection faults which occur with
-kernel loadable modules.
-
-The following is an example of a protection fault in a loadable module
-processed by klogd:
----------------------------------------------------------------------------
-Aug 29 09:51:01 blizard kernel: Unable to handle kernel paging request at virtual address f15e97cc
-Aug 29 09:51:01 blizard kernel: current->tss.cr3 = 0062d000, %cr3 = 0062d000
-Aug 29 09:51:01 blizard kernel: *pde = 00000000
-Aug 29 09:51:01 blizard kernel: Oops: 0002
-Aug 29 09:51:01 blizard kernel: CPU: 0
-Aug 29 09:51:01 blizard kernel: EIP: 0010:[oops:_oops+16/3868]
-Aug 29 09:51:01 blizard kernel: EFLAGS: 00010212
-Aug 29 09:51:01 blizard kernel: eax: 315e97cc ebx: 003a6f80 ecx: 001be77b edx: 00237c0c
-Aug 29 09:51:01 blizard kernel: esi: 00000000 edi: bffffdb3 ebp: 00589f90 esp: 00589f8c
-Aug 29 09:51:01 blizard kernel: ds: 0018 es: 0018 fs: 002b gs: 002b ss: 0018
-Aug 29 09:51:01 blizard kernel: Process oops_test (pid: 3374, process nr: 21, stackpage=00589000)
-Aug 29 09:51:01 blizard kernel: Stack: 315e97cc 00589f98 0100b0b4 bffffed4 0012e38e 00240c64 003a6f80 00000001
-Aug 29 09:51:01 blizard kernel: 00000000 00237810 bfffff00 0010a7fa 00000003 00000001 00000000 bfffff00
-Aug 29 09:51:01 blizard kernel: bffffdb3 bffffed4 ffffffda 0000002b 0007002b 0000002b 0000002b 00000036
-Aug 29 09:51:01 blizard kernel: Call Trace: [oops:_oops_ioctl+48/80] [_sys_ioctl+254/272] [_system_call+82/128]
-Aug 29 09:51:01 blizard kernel: Code: c7 00 05 00 00 00 eb 08 90 90 90 90 90 90 90 90 89 ec 5d c3
----------------------------------------------------------------------------
-
-Dr. G.W. Wettstein Oncology Research Div. Computing Facility
-Roger Maris Cancer Center INTERNET: greg@wind.rmcc.com
-820 4th St. N.
-Fargo, ND 58122
-Phone: 701-234-7556
-
-
----------------------------------------------------------------------------
-Tainted kernels:
-
-Some oops reports contain the string 'Tainted: ' after the program
-counter. This indicates that the kernel has been tainted by some
-mechanism. The string is followed by a series of position-sensitive
-characters, each representing a particular tainted value.
-
- 1: 'G' if all modules loaded have a GPL or compatible license, 'P' if
- any proprietary module has been loaded. Modules without a
- MODULE_LICENSE or with a MODULE_LICENSE that is not recognised by
- insmod as GPL compatible are assumed to be proprietary.
-
- 2: 'F' if any module was force loaded by "insmod -f", ' ' if all
- modules were loaded normally.
-
- 3: 'S' if the oops occurred on an SMP kernel running on hardware that
- hasn't been certified as safe to run multiprocessor.
- Currently this occurs only on various Athlons that are not
- SMP capable.
-
- 4: 'R' if a module was force unloaded by "rmmod -f", ' ' if all
- modules were unloaded normally.
-
- 5: 'M' if any processor has reported a Machine Check Exception,
- ' ' if no Machine Check Exceptions have occurred.
-
- 6: 'B' if a page-release function has found a bad page reference or
- some unexpected page flags.
-
- 7: 'U' if a user or user application specifically requested that the
- Tainted flag be set, ' ' otherwise.
-
- 8: 'D' if the kernel has died recently, i.e. there was an OOPS or BUG.
-
- 9: 'A' if the ACPI table has been overridden.
-
- 10: 'W' if a warning has previously been issued by the kernel.
- (Though some warnings may set more specific taint flags.)
-
- 11: 'C' if a staging driver has been loaded.
-
- 12: 'I' if the kernel is working around a severe bug in the platform
- firmware (BIOS or similar).
-
- 13: 'O' if an externally-built ("out-of-tree") module has been loaded.
-
- 14: 'E' if an unsigned module has been loaded in a kernel supporting
- module signature.
-
- 15: 'L' if a soft lockup has previously occurred on the system.
-
- 16: 'K' if the kernel has been live patched.
-
-The primary reason for the 'Tainted: ' string is to tell kernel
-debuggers if this is a clean kernel or if anything unusual has
-occurred. Tainting is permanent: even if an offending module is
-unloaded, the tainted value remains to indicate that the kernel is not
-trustworthy.
diff --git a/Documentation/parport.txt b/Documentation/parport.txt
deleted file mode 100644
index c208e4366c03..000000000000
--- a/Documentation/parport.txt
+++ /dev/null
@@ -1,267 +0,0 @@
-The `parport' code provides parallel-port support under Linux. This
-includes the ability to share one port between multiple device
-drivers.
-
-You can pass parameters to the parport code to override its automatic
-detection of your hardware. This is particularly useful if you want
-to use IRQs, since in general these can't be autoprobed successfully.
-By default IRQs are not used even if they _can_ be probed. This is
-because there are a lot of people using the same IRQ for their
-parallel port and a sound card or network card.
-
-The parport code is split into two parts: generic (which deals with
-port-sharing) and architecture-dependent (which deals with actually
-using the port).
-
-
-Parport as modules
-==================
-
-If you load the parport code as a module, say
-
- # insmod parport
-
-to load the generic parport code. You then must load the
-architecture-dependent code with (for example):
-
- # insmod parport_pc io=0x3bc,0x378,0x278 irq=none,7,auto
-
-to tell the parport code that you want three PC-style ports, one at
-0x3bc with no IRQ, one at 0x378 using IRQ 7, and one at 0x278 with an
-auto-detected IRQ. Currently, PC-style (parport_pc), Sun `bpp',
-Amiga, Atari, and MFC3 hardware is supported.
-
-PCI parallel I/O card support comes from parport_pc. Base I/O
-addresses should not be specified for supported PCI cards since they
-are automatically detected.
-
-
-modprobe
---------
-
-If you use modprobe , you will find it useful to add lines as below to a
-configuration file in /etc/modprobe.d/ directory:.
-
- alias parport_lowlevel parport_pc
- options parport_pc io=0x378,0x278 irq=7,auto
-
-modprobe will load parport_pc (with the options "io=0x378,0x278 irq=7,auto")
-whenever a parallel port device driver (such as lp) is loaded.
-
-Note that these are example lines only! You shouldn't in general need
-to specify any options to parport_pc in order to be able to use a
-parallel port.
-
-
-Parport probe [optional]
--------------
-
-In 2.2 kernels there was a module called parport_probe, which was used
-for collecting IEEE 1284 device ID information. This has now been
-enhanced and now lives with the IEEE 1284 support. When a parallel
-port is detected, the devices that are connected to it are analysed,
-and information is logged like this:
-
- parport0: Printer, BJC-210 (Canon)
-
-The probe information is available from files in /proc/sys/dev/parport/.
-
-
-Parport linked into the kernel statically
-=========================================
-
-If you compile the parport code into the kernel, then you can use
-kernel boot parameters to get the same effect. Add something like the
-following to your LILO command line:
-
- parport=0x3bc parport=0x378,7 parport=0x278,auto,nofifo
-
-You can have many `parport=...' statements, one for each port you want
-to add. Adding `parport=0' to the kernel command-line will disable
-parport support entirely. Adding `parport=auto' to the kernel
-command-line will make parport use any IRQ lines or DMA channels that
-it auto-detects.
-
-
-Files in /proc
-==============
-
-If you have configured the /proc filesystem into your kernel, you will
-see a new directory entry: /proc/sys/dev/parport. In there will be a
-directory entry for each parallel port for which parport is
-configured. In each of those directories are a collection of files
-describing that parallel port.
-
-The /proc/sys/dev/parport directory tree looks like:
-
-parport
-|-- default
-| |-- spintime
-| `-- timeslice
-|-- parport0
-| |-- autoprobe
-| |-- autoprobe0
-| |-- autoprobe1
-| |-- autoprobe2
-| |-- autoprobe3
-| |-- devices
-| | |-- active
-| | `-- lp
-| | `-- timeslice
-| |-- base-addr
-| |-- irq
-| |-- dma
-| |-- modes
-| `-- spintime
-`-- parport1
- |-- autoprobe
- |-- autoprobe0
- |-- autoprobe1
- |-- autoprobe2
- |-- autoprobe3
- |-- devices
- | |-- active
- | `-- ppa
- | `-- timeslice
- |-- base-addr
- |-- irq
- |-- dma
- |-- modes
- `-- spintime
-
-
-File: Contents:
-
-devices/active A list of the device drivers using that port. A "+"
- will appear by the name of the device currently using
- the port (it might not appear against any). The
- string "none" means that there are no device drivers
- using that port.
-
-base-addr Parallel port's base address, or addresses if the port
- has more than one in which case they are separated
- with tabs. These values might not have any sensible
- meaning for some ports.
-
-irq Parallel port's IRQ, or -1 if none is being used.
-
-dma Parallel port's DMA channel, or -1 if none is being
- used.
-
-modes Parallel port's hardware modes, comma-separated,
- meaning:
-
- PCSPP PC-style SPP registers are available.
- TRISTATE Port is bidirectional.
- COMPAT Hardware acceleration for printers is
- available and will be used.
- EPP Hardware acceleration for EPP protocol
- is available and will be used.
- ECP Hardware acceleration for ECP protocol
- is available and will be used.
- DMA DMA is available and will be used.
-
- Note that the current implementation will only take
- advantage of COMPAT and ECP modes if it has an IRQ
- line to use.
-
-autoprobe Any IEEE-1284 device ID information that has been
- acquired from the (non-IEEE 1284.3) device.
-
-autoprobe[0-3] IEEE 1284 device ID information retrieved from
- daisy-chain devices that conform to IEEE 1284.3.
-
-spintime The number of microseconds to busy-loop while waiting
- for the peripheral to respond. You might find that
- adjusting this improves performance, depending on your
- peripherals. This is a port-wide setting, i.e. it
- applies to all devices on a particular port.
-
-timeslice The number of milliseconds that a device driver is
- allowed to keep a port claimed for. This is advisory,
- and driver can ignore it if it must.
-
-default/* The defaults for spintime and timeslice. When a new
- port is registered, it picks up the default spintime.
- When a new device is registered, it picks up the
- default timeslice.
-
-Device drivers
-==============
-
-Once the parport code is initialised, you can attach device drivers to
-specific ports. Normally this happens automatically; if the lp driver
-is loaded it will create one lp device for each port found. You can
-override this, though, by using parameters either when you load the lp
-driver:
-
- # insmod lp parport=0,2
-
-or on the LILO command line:
-
- lp=parport0 lp=parport2
-
-Both the above examples would inform lp that you want /dev/lp0 to be
-the first parallel port, and /dev/lp1 to be the _third_ parallel port,
-with no lp device associated with the second port (parport1). Note
-that this is different to the way older kernels worked; there used to
-be a static association between the I/O port address and the device
-name, so /dev/lp0 was always the port at 0x3bc. This is no longer the
-case - if you only have one port, it will default to being /dev/lp0,
-regardless of base address.
-
-Also:
-
- * If you selected the IEEE 1284 support at compile time, you can say
- `lp=auto' on the kernel command line, and lp will create devices
- only for those ports that seem to have printers attached.
-
- * If you give PLIP the `timid' parameter, either with `plip=timid' on
- the command line, or with `insmod plip timid=1' when using modules,
- it will avoid any ports that seem to be in use by other devices.
-
- * IRQ autoprobing works only for a few port types at the moment.
-
-Reporting printer problems with parport
-=======================================
-
-If you are having problems printing, please go through these steps to
-try to narrow down where the problem area is.
-
-When reporting problems with parport, really you need to give all of
-the messages that parport_pc spits out when it initialises. There are
-several code paths:
-
-o polling
-o interrupt-driven, protocol in software
-o interrupt-driven, protocol in hardware using PIO
-o interrupt-driven, protocol in hardware using DMA
-
-The kernel messages that parport_pc logs give an indication of which
-code path is being used. (They could be a lot better actually..)
-
-For normal printer protocol, having IEEE 1284 modes enabled or not
-should not make a difference.
-
-To turn off the 'protocol in hardware' code paths, disable
-CONFIG_PARPORT_PC_FIFO. Note that when they are enabled they are not
-necessarily _used_; it depends on whether the hardware is available,
-enabled by the BIOS, and detected by the driver.
-
-So, to start with, disable CONFIG_PARPORT_PC_FIFO, and load parport_pc
-with 'irq=none'. See if printing works then. It really should,
-because this is the simplest code path.
-
-If that works fine, try with 'io=0x378 irq=7' (adjust for your
-hardware), to make it use interrupt-driven in-software protocol.
-
-If _that_ works fine, then one of the hardware modes isn't working
-right. Enable CONFIG_PARPORT_PC_FIFO (no, it isn't a module option,
-and yes, it should be), set the port to ECP mode in the BIOS and note
-the DMA channel, and try with:
-
- io=0x378 irq=7 dma=none (for PIO)
- io=0x378 irq=7 dma=3 (for DMA)
---
-philb@gnu.org
-tim@cyberelk.net
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
index ad04cc8097ed..7cb6085839f3 100644
--- a/Documentation/power/00-INDEX
+++ b/Documentation/power/00-INDEX
@@ -6,7 +6,7 @@ basic-pm-debugging.txt
- Debugging suspend and resume
charger-manager.txt
- Battery charger management.
-devices.txt
+admin-guide/devices.rst
- How drivers interact with system-wide power management
drivers-testing.txt
- Testing suspend and resume support in device drivers
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 8ba6625fdd63..73ddea39a9ce 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -607,7 +607,9 @@ individually. Instead, a set of devices sharing a power resource can be put
into a low-power state together at the same time by turning off the shared
power resource. Of course, they also need to be put into the full-power state
together, by turning the shared power resource on. A set of devices with this
-property is often referred to as a power domain.
+property is often referred to as a power domain. A power domain may also be
+nested inside another power domain. The nested domain is referred to as the
+sub-domain of the parent domain.
Support for power domains is provided through the pm_domain field of struct
device. This field is a pointer to an object of type struct dev_pm_domain,
@@ -629,6 +631,16 @@ support for power domains into subsystem-level callbacks, for example by
modifying the platform bus type. Other platforms need not implement it or take
it into account in any way.
+Devices may be defined as IRQ-safe which indicates to the PM core that their
+runtime PM callbacks may be invoked with disabled interrupts (see
+Documentation/power/runtime_pm.txt for more information). If an IRQ-safe
+device belongs to a PM domain, the runtime PM of the domain will be
+disallowed, unless the domain itself is defined as IRQ-safe. However, it
+makes sense to define a PM domain as IRQ-safe only if all the devices in it
+are IRQ-safe. Moreover, if an IRQ-safe domain has a parent domain, the runtime
+PM of the parent is only allowed if the parent itself is IRQ-safe too with the
+additional restriction that all child domains of an IRQ-safe parent must also
+be IRQ-safe.
Device Low Power (suspend) States
---------------------------------
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index 44558882aa60..85c746cbab2c 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -8,7 +8,7 @@ management. Based on previous work by Patrick Mochel <mochel@transmeta.com>
This document only covers the aspects of power management specific to PCI
devices. For general description of the kernel's interfaces related to device
-power management refer to Documentation/power/devices.txt and
+power management refer to Documentation/power/admin-guide/devices.rst and
Documentation/power/runtime_pm.txt.
---------------------------------------------------------------------------
@@ -417,7 +417,7 @@ pm->runtime_idle() callback.
2.4. System-Wide Power Transitions
----------------------------------
There are a few different types of system-wide power transitions, described in
-Documentation/power/devices.txt. Each of them requires devices to be handled
+Documentation/power/admin-guide/devices.rst. Each of them requires devices to be handled
in a specific way and the PM core executes subsystem-level power management
callbacks for this purpose. They are executed in phases such that each phase
involves executing the same subsystem-level callback for every device belonging
@@ -623,7 +623,7 @@ System restore requires a hibernation image to be loaded into memory and the
pre-hibernation memory contents to be restored before the pre-hibernation system
activity can be resumed.
-As described in Documentation/power/devices.txt, the hibernation image is loaded
+As described in Documentation/power/admin-guide/devices.rst, the hibernation image is loaded
into memory by a fresh instance of the kernel, called the boot kernel, which in
turn is loaded and run by a boot loader in the usual way. After the boot kernel
has loaded the image, it needs to replace its own code and data with the code
@@ -677,7 +677,7 @@ controlling the runtime power management of their devices.
At the time of this writing there are two ways to define power management
callbacks for a PCI device driver, the recommended one, based on using a
-dev_pm_ops structure described in Documentation/power/devices.txt, and the
+dev_pm_ops structure described in Documentation/power/admin-guide/devices.rst, and the
"legacy" one, in which the .suspend(), .suspend_late(), .resume_early(), and
.resume() callbacks from struct pci_driver are used. The legacy approach,
however, doesn't allow one to define runtime power management callbacks and is
@@ -1046,5 +1046,5 @@ PCI Local Bus Specification, Rev. 3.0
PCI Bus Power Management Interface Specification, Rev. 1.2
Advanced Configuration and Power Interface (ACPI) Specification, Rev. 3.0b
PCI Express Base Specification, Rev. 2.0
-Documentation/power/devices.txt
+Documentation/power/admin-guide/devices.rst
Documentation/power/runtime_pm.txt
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 1fd1fbe9ce95..4870980e967e 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -674,7 +674,7 @@ left in runtime suspend. If that happens, the PM core will not execute any
system suspend and resume callbacks for all of those devices, except for the
complete callback, which is then entirely responsible for handling the device
as appropriate. This only applies to system suspend transitions that are not
-related to hibernation (see Documentation/power/devices.txt for more
+related to hibernation (see Documentation/power/admin-guide/devices.rst for more
information).
The PM core does its best to reduce the probability of race conditions between
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 50f3ef9177c1..8a39ce45d8a0 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -8,25 +8,43 @@ for each state.
The states are represented by strings that can be read or written to the
/sys/power/state file. Those strings may be "mem", "standby", "freeze" and
-"disk", where the last one always represents hibernation (Suspend-To-Disk) and
-the meaning of the remaining ones depends on the relative_sleep_states command
-line argument.
-
-For relative_sleep_states=1, the strings "mem", "standby" and "freeze" label the
-available non-hibernation sleep states from the deepest to the shallowest,
-respectively. In that case, "mem" is always present in /sys/power/state,
-because there is at least one non-hibernation sleep state in every system. If
-the given system supports two non-hibernation sleep states, "standby" is present
-in /sys/power/state in addition to "mem". If the system supports three
-non-hibernation sleep states, "freeze" will be present in /sys/power/state in
-addition to "mem" and "standby".
-
-For relative_sleep_states=0, which is the default, the following descriptions
-apply.
-
-state: Suspend-To-Idle
+"disk", where the last three always represent Power-On Suspend (if supported),
+Suspend-To-Idle and hibernation (Suspend-To-Disk), respectively.
+
+The meaning of the "mem" string is controlled by the /sys/power/mem_sleep file.
+It contains strings representing the available modes of system suspend that may
+be triggered by writing "mem" to /sys/power/state. These modes are "s2idle"
+(Suspend-To-Idle), "shallow" (Power-On Suspend) and "deep" (Suspend-To-RAM).
+The "s2idle" mode is always available, while the other ones are only available
+if supported by the platform (if not supported, the strings representing them
+are not present in /sys/power/mem_sleep). The string representing the suspend
+mode to be used subsequently is enclosed in square brackets. Writing one of
+the other strings present in /sys/power/mem_sleep to it causes the suspend mode
+to be used subsequently to change to the one represented by that string.
+
+Consequently, there are two ways to cause the system to go into the
+Suspend-To-Idle sleep state. The first one is to write "freeze" directly to
+/sys/power/state. The second one is to write "s2idle" to /sys/power/mem_sleep
+and then to wrtie "mem" to /sys/power/state. Similarly, there are two ways
+to cause the system to go into the Power-On Suspend sleep state (the strings to
+write to the control files in that case are "standby" or "shallow" and "mem",
+respectively) if that state is supported by the platform. In turn, there is
+only one way to cause the system to go into the Suspend-To-RAM state (write
+"deep" into /sys/power/mem_sleep and "mem" into /sys/power/state).
+
+The default suspend mode (ie. the one to be used without writing anything into
+/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
+"s2idle", but it can be overridden by the value of the "mem_sleep_default"
+parameter in the kernel command line. On some ACPI-based systems, depending on
+the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
+is supported.
+
+The properties of all of the sleep states are described below.
+
+
+State: Suspend-To-Idle
ACPI state: S0
-Label: "freeze"
+Label: "s2idle" ("freeze")
This state is a generic, pure software, light-weight, system sleep state.
It allows more energy to be saved relative to runtime idle by freezing user
@@ -35,13 +53,13 @@ lower-power than available at run time), such that the processors can
spend more time in their idle states.
This state can be used for platforms without Power-On Suspend/Suspend-to-RAM
-support, or it can be used in addition to Suspend-to-RAM (memory sleep)
-to provide reduced resume latency. It is always supported.
+support, or it can be used in addition to Suspend-to-RAM to provide reduced
+resume latency. It is always supported.
State: Standby / Power-On Suspend
ACPI State: S1
-Label: "standby"
+Label: "shallow" ("standby")
This state, if supported, offers moderate, though real, power savings, while
providing a relatively low-latency transition back to a working system. No
@@ -58,7 +76,7 @@ state.
State: Suspend-to-RAM
ACPI State: S3
-Label: "mem"
+Label: "deep"
This state, if supported, offers significant power savings as everything in the
system is put into a low-power state, except for memory, which should be placed
diff --git a/Documentation/power/swsusp-dmcrypt.txt b/Documentation/power/swsusp-dmcrypt.txt
index 59931b46ff7e..b802fbfd95ef 100644
--- a/Documentation/power/swsusp-dmcrypt.txt
+++ b/Documentation/power/swsusp-dmcrypt.txt
@@ -8,7 +8,7 @@ Some prerequisites:
You know how dm-crypt works. If not, visit the following web page:
http://www.saout.de/misc/dm-crypt/
You have read Documentation/power/swsusp.txt and understand it.
-You did read Documentation/initrd.txt and know how an initrd works.
+You did read Documentation/admin-guide/initrd.rst and know how an initrd works.
You know how to create or how to modify an initrd.
Now your system is properly set up, your disk is encrypted except for
diff --git a/Documentation/development-process/1.Intro.rst b/Documentation/process/1.Intro.rst
index 22642b3fe903..e782ae2eef58 100644
--- a/Documentation/development-process/1.Intro.rst
+++ b/Documentation/process/1.Intro.rst
@@ -1,5 +1,5 @@
-Introdution
-===========
+Introduction
+============
Executive summary
-----------------
diff --git a/Documentation/development-process/2.Process.rst b/Documentation/process/2.Process.rst
index ce5561bb3f8e..ce5561bb3f8e 100644
--- a/Documentation/development-process/2.Process.rst
+++ b/Documentation/process/2.Process.rst
diff --git a/Documentation/development-process/3.Early-stage.rst b/Documentation/process/3.Early-stage.rst
index af2c0af931d6..af2c0af931d6 100644
--- a/Documentation/development-process/3.Early-stage.rst
+++ b/Documentation/process/3.Early-stage.rst
diff --git a/Documentation/development-process/4.Coding.rst b/Documentation/process/4.Coding.rst
index 9d5cef996f7f..2a728d898fc5 100644
--- a/Documentation/development-process/4.Coding.rst
+++ b/Documentation/process/4.Coding.rst
@@ -22,7 +22,7 @@ Coding style
************
The kernel has long had a standard coding style, described in
-Documentation/CodingStyle. For much of that time, the policies described
+Documentation/process/coding-style.rst. For much of that time, the policies described
in that file were taken as being, at most, advisory. As a result, there is
a substantial amount of code in the kernel which does not meet the coding
style guidelines. The presence of that code leads to two independent
@@ -343,7 +343,7 @@ user-space developers to know what they are working with. See
Documentation/ABI/README for a description of how this documentation should
be formatted and what information needs to be provided.
-The file Documentation/kernel-parameters.txt describes all of the kernel's
+The file Documentation/admin-guide/kernel-parameters.rst describes all of the kernel's
boot-time parameters. Any patch which adds new parameters should add the
appropriate entries to this file.
@@ -358,8 +358,8 @@ them, as appropriate, for externally-available functions. Even in areas
which have not been so documented, there is no harm in adding kerneldoc
comments for the future; indeed, this can be a useful activity for
beginning kernel developers. The format of these comments, along with some
-information on how to create kerneldoc templates can be found in the file
-Documentation/kernel-documentation.rst.
+information on how to create kerneldoc templates can be found at
+:ref:`Documentation/doc-guide/ <doc_guide>`.
Anybody who reads through a significant amount of existing kernel code will
note that, often, comments are most notable by their absence. Once again,
diff --git a/Documentation/development-process/5.Posting.rst b/Documentation/process/5.Posting.rst
index b511ddf7e82a..1b7728b19ea7 100644
--- a/Documentation/development-process/5.Posting.rst
+++ b/Documentation/process/5.Posting.rst
@@ -9,8 +9,8 @@ kernel. Unsurprisingly, the kernel development community has evolved a set
of conventions and procedures which are used in the posting of patches;
following them will make life much easier for everybody involved. This
document will attempt to cover these expectations in reasonable detail;
-more information can also be found in the files SubmittingPatches,
-SubmittingDrivers, and SubmitChecklist in the kernel documentation
+more information can also be found in the files process/submitting-patches.rst,
+process/submitting-drivers.rst, and process/submit-checklist.rst in the kernel documentation
directory.
@@ -198,7 +198,7 @@ pass it to diff with the "-X" option.
The tags mentioned above are used to describe how various developers have
been associated with the development of this patch. They are described in
-detail in the SubmittingPatches document; what follows here is a brief
+detail in the process/submitting-patches.rst document; what follows here is a brief
summary. Each of these lines has the format:
::
@@ -210,7 +210,7 @@ The tags in common use are:
- Signed-off-by: this is a developer's certification that he or she has
the right to submit the patch for inclusion into the kernel. It is an
agreement to the Developer's Certificate of Origin, the full text of
- which can be found in Documentation/SubmittingPatches. Code without a
+ which can be found in Documentation/process/submitting-patches.rst. Code without a
proper signoff cannot be merged into the mainline.
- Acked-by: indicates an agreement by another developer (often a
@@ -221,7 +221,7 @@ The tags in common use are:
it to work.
- Reviewed-by: the named developer has reviewed the patch for correctness;
- see the reviewer's statement in Documentation/SubmittingPatches for more
+ see the reviewer's statement in Documentation/process/submitting-patches.rst for more
detail.
- Reported-by: names a user who reported a problem which is fixed by this
@@ -248,7 +248,7 @@ take care of:
be examined in any detail. If there is any doubt at all, mail the patch
to yourself and convince yourself that it shows up intact.
- Documentation/email-clients.txt has some helpful hints on making
+ Documentation/process/email-clients.rst has some helpful hints on making
specific mail clients work for sending patches.
- Are you sure your patch is free of silly mistakes? You should always
diff --git a/Documentation/development-process/6.Followthrough.rst b/Documentation/process/6.Followthrough.rst
index a173cd5f93d2..a173cd5f93d2 100644
--- a/Documentation/development-process/6.Followthrough.rst
+++ b/Documentation/process/6.Followthrough.rst
diff --git a/Documentation/development-process/7.AdvancedTopics.rst b/Documentation/process/7.AdvancedTopics.rst
index 81d61c5d62dd..172733cff097 100644
--- a/Documentation/development-process/7.AdvancedTopics.rst
+++ b/Documentation/process/7.AdvancedTopics.rst
@@ -176,5 +176,3 @@ security issues, duplication of code found elsewhere, adequate
documentation, adverse effects on performance, user-space ABI changes, etc.
All types of review, if they lead to better code going into the kernel, are
welcome and worthwhile.
-
-
diff --git a/Documentation/development-process/8.Conclusion.rst b/Documentation/process/8.Conclusion.rst
index 23ec7cbc2d2b..1c7f54cd0261 100644
--- a/Documentation/development-process/8.Conclusion.rst
+++ b/Documentation/process/8.Conclusion.rst
@@ -5,9 +5,9 @@ For more information
There are numerous sources of information on Linux kernel development and
related topics. First among those will always be the Documentation
-directory found in the kernel source distribution. The top-level HOWTO
-file is an important starting point; SubmittingPatches and
-SubmittingDrivers are also something which all kernel developers should
+directory found in the kernel source distribution. The top-level process/howto.rst
+file is an important starting point; process/submitting-patches.rst and
+process/submitting-drivers.rst are also something which all kernel developers should
read. Many internal kernel APIs are documented using the kerneldoc
mechanism; "make htmldocs" or "make pdfdocs" can be used to generate those
documents in HTML or PDF format (though the version of TeX shipped by some
diff --git a/Documentation/adding-syscalls.txt b/Documentation/process/adding-syscalls.rst
index bbb31e091b28..8cc25a06f353 100644
--- a/Documentation/adding-syscalls.txt
+++ b/Documentation/process/adding-syscalls.rst
@@ -3,7 +3,7 @@ Adding a New System Call
This document describes what's involved in adding a new system call to the
Linux kernel, over and above the normal submission advice in
-Documentation/SubmittingPatches.
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`.
System Call Alternatives
@@ -19,30 +19,33 @@ interface.
object, it may make more sense to create a new filesystem or device. This
also makes it easier to encapsulate the new functionality in a kernel module
rather than requiring it to be built into the main kernel.
+
- If the new functionality involves operations where the kernel notifies
userspace that something has happened, then returning a new file
descriptor for the relevant object allows userspace to use
- poll/select/epoll to receive that notification.
- - However, operations that don't map to read(2)/write(2)-like operations
- have to be implemented as ioctl(2) requests, which can lead to a
- somewhat opaque API.
+ ``poll``/``select``/``epoll`` to receive that notification.
+ - However, operations that don't map to
+ :manpage:`read(2)`/:manpage:`write(2)`-like operations
+ have to be implemented as :manpage:`ioctl(2)` requests, which can lead
+ to a somewhat opaque API.
+
- If you're just exposing runtime system information, a new node in sysfs
- (see Documentation/filesystems/sysfs.txt) or the /proc filesystem may be
- more appropriate. However, access to these mechanisms requires that the
+ (see ``Documentation/filesystems/sysfs.txt``) or the ``/proc`` filesystem may
+ be more appropriate. However, access to these mechanisms requires that the
relevant filesystem is mounted, which might not always be the case (e.g.
in a namespaced/sandboxed/chrooted environment). Avoid adding any API to
debugfs, as this is not considered a 'production' interface to userspace.
- If the operation is specific to a particular file or file descriptor, then
- an additional fcntl(2) command option may be more appropriate. However,
- fcntl(2) is a multiplexing system call that hides a lot of complexity, so
+ an additional :manpage:`fcntl(2)` command option may be more appropriate. However,
+ :manpage:`fcntl(2)` is a multiplexing system call that hides a lot of complexity, so
this option is best for when the new function is closely analogous to
- existing fcntl(2) functionality, or the new functionality is very simple
+ existing :manpage:`fcntl(2)` functionality, or the new functionality is very simple
(for example, getting/setting a simple flag related to a file descriptor).
- If the operation is specific to a particular task or process, then an
- additional prctl(2) command option may be more appropriate. As with
- fcntl(2), this system call is a complicated multiplexor so is best reserved
- for near-analogs of existing prctl() commands or getting/setting a simple
- flag related to a process.
+ additional :manpage:`prctl(2)` command option may be more appropriate. As
+ with :manpage:`fcntl(2)`, this system call is a complicated multiplexor so
+ is best reserved for near-analogs of existing ``prctl()`` commands or
+ getting/setting a simple flag related to a process.
Designing the API: Planning for Extension
@@ -54,15 +57,16 @@ interface on the kernel mailing list, and it's important to plan for future
extensions of the interface.
(The syscall table is littered with historical examples where this wasn't done,
-together with the corresponding follow-up system calls -- eventfd/eventfd2,
-dup2/dup3, inotify_init/inotify_init1, pipe/pipe2, renameat/renameat2 -- so
+together with the corresponding follow-up system calls --
+``eventfd``/``eventfd2``, ``dup2``/``dup3``, ``inotify_init``/``inotify_init1``,
+``pipe``/``pipe2``, ``renameat``/``renameat2`` -- so
learn from the history of the kernel and plan for extensions from the start.)
For simpler system calls that only take a couple of arguments, the preferred
way to allow for future extensibility is to include a flags argument to the
system call. To make sure that userspace programs can safely use flags
between kernel versions, check whether the flags value holds any unknown
-flags, and reject the system call (with EINVAL) if it does:
+flags, and reject the system call (with ``EINVAL``) if it does::
if (flags & ~(THING_FLAG1 | THING_FLAG2 | THING_FLAG3))
return -EINVAL;
@@ -72,7 +76,7 @@ flags, and reject the system call (with EINVAL) if it does:
For more sophisticated system calls that involve a larger number of arguments,
it's preferred to encapsulate the majority of the arguments into a structure
that is passed in by pointer. Such a structure can cope with future extension
-by including a size argument in the structure:
+by including a size argument in the structure::
struct xyzzy_params {
u32 size; /* userspace sets p->size = sizeof(struct xyzzy_params) */
@@ -81,19 +85,19 @@ by including a size argument in the structure:
u64 param_3;
};
-As long as any subsequently added field, say param_4, is designed so that a
+As long as any subsequently added field, say ``param_4``, is designed so that a
zero value gives the previous behaviour, then this allows both directions of
version mismatch:
- To cope with a later userspace program calling an older kernel, the kernel
code should check that any memory beyond the size of the structure that it
- expects is zero (effectively checking that param_4 == 0).
+ expects is zero (effectively checking that ``param_4 == 0``).
- To cope with an older userspace program calling a newer kernel, the kernel
code can zero-extend a smaller instance of the structure (effectively
- setting param_4 = 0).
+ setting ``param_4 = 0``).
-See perf_event_open(2) and the perf_copy_attr() function (in
-kernel/events/core.c) for an example of this approach.
+See :manpage:`perf_event_open(2)` and the ``perf_copy_attr()`` function (in
+``kernel/events/core.c``) for an example of this approach.
Designing the API: Other Considerations
@@ -104,57 +108,60 @@ should use a file descriptor as the handle for that object -- don't invent a
new type of userspace object handle when the kernel already has mechanisms and
well-defined semantics for using file descriptors.
-If your new xyzzy(2) system call does return a new file descriptor, then the
-flags argument should include a value that is equivalent to setting O_CLOEXEC
-on the new FD. This makes it possible for userspace to close the timing
-window between xyzzy() and calling fcntl(fd, F_SETFD, FD_CLOEXEC), where an
-unexpected fork() and execve() in another thread could leak a descriptor to
+If your new :manpage:`xyzzy(2)` system call does return a new file descriptor,
+then the flags argument should include a value that is equivalent to setting
+``O_CLOEXEC`` on the new FD. This makes it possible for userspace to close
+the timing window between ``xyzzy()`` and calling
+``fcntl(fd, F_SETFD, FD_CLOEXEC)``, where an unexpected ``fork()`` and
+``execve()`` in another thread could leak a descriptor to
the exec'ed program. (However, resist the temptation to re-use the actual value
-of the O_CLOEXEC constant, as it is architecture-specific and is part of a
-numbering space of O_* flags that is fairly full.)
+of the ``O_CLOEXEC`` constant, as it is architecture-specific and is part of a
+numbering space of ``O_*`` flags that is fairly full.)
If your system call returns a new file descriptor, you should also consider
-what it means to use the poll(2) family of system calls on that file
+what it means to use the :manpage:`poll(2)` family of system calls on that file
descriptor. Making a file descriptor ready for reading or writing is the
normal way for the kernel to indicate to userspace that an event has
occurred on the corresponding kernel object.
-If your new xyzzy(2) system call involves a filename argument:
+If your new :manpage:`xyzzy(2)` system call involves a filename argument::
int sys_xyzzy(const char __user *path, ..., unsigned int flags);
-you should also consider whether an xyzzyat(2) version is more appropriate:
+you should also consider whether an :manpage:`xyzzyat(2)` version is more appropriate::
int sys_xyzzyat(int dfd, const char __user *path, ..., unsigned int flags);
This allows more flexibility for how userspace specifies the file in question;
in particular it allows userspace to request the functionality for an
-already-opened file descriptor using the AT_EMPTY_PATH flag, effectively giving
-an fxyzzy(3) operation for free:
+already-opened file descriptor using the ``AT_EMPTY_PATH`` flag, effectively
+giving an :manpage:`fxyzzy(3)` operation for free::
- xyzzyat(AT_FDCWD, path, ..., 0) is equivalent to xyzzy(path,...)
- xyzzyat(fd, "", ..., AT_EMPTY_PATH) is equivalent to fxyzzy(fd, ...)
-(For more details on the rationale of the *at() calls, see the openat(2) man
-page; for an example of AT_EMPTY_PATH, see the fstatat(2) man page.)
-
-If your new xyzzy(2) system call involves a parameter describing an offset
-within a file, make its type loff_t so that 64-bit offsets can be supported
-even on 32-bit architectures.
-
-If your new xyzzy(2) system call involves privileged functionality, it needs
-to be governed by the appropriate Linux capability bit (checked with a call to
-capable()), as described in the capabilities(7) man page. Choose an existing
-capability bit that governs related functionality, but try to avoid combining
-lots of only vaguely related functions together under the same bit, as this
-goes against capabilities' purpose of splitting the power of root. In
-particular, avoid adding new uses of the already overly-general CAP_SYS_ADMIN
-capability.
-
-If your new xyzzy(2) system call manipulates a process other than the calling
-process, it should be restricted (using a call to ptrace_may_access()) so that
-only a calling process with the same permissions as the target process, or
-with the necessary capabilities, can manipulate the target process.
+(For more details on the rationale of the \*at() calls, see the
+:manpage:`openat(2)` man page; for an example of AT_EMPTY_PATH, see the
+:manpage:`fstatat(2)` man page.)
+
+If your new :manpage:`xyzzy(2)` system call involves a parameter describing an
+offset within a file, make its type ``loff_t`` so that 64-bit offsets can be
+supported even on 32-bit architectures.
+
+If your new :manpage:`xyzzy(2)` system call involves privileged functionality,
+it needs to be governed by the appropriate Linux capability bit (checked with
+a call to ``capable()``), as described in the :manpage:`capabilities(7)` man
+page. Choose an existing capability bit that governs related functionality,
+but try to avoid combining lots of only vaguely related functions together
+under the same bit, as this goes against capabilities' purpose of splitting
+the power of root. In particular, avoid adding new uses of the already
+overly-general ``CAP_SYS_ADMIN`` capability.
+
+If your new :manpage:`xyzzy(2)` system call manipulates a process other than
+the calling process, it should be restricted (using a call to
+``ptrace_may_access()``) so that only a calling process with the same
+permissions as the target process, or with the necessary capabilities, can
+manipulate the target process.
Finally, be aware that some non-x86 architectures have an easier time if
system call parameters that are explicitly 64-bit fall on odd-numbered
@@ -175,7 +182,7 @@ distinct commits (each of which is described further below):
- Wiring up of the new system call for one particular architecture, usually
x86 (including all of x86_64, x86_32 and x32).
- A demonstration of the use of the new system call in userspace via a
- selftest in tools/testing/selftests/.
+ selftest in ``tools/testing/selftests/``.
- A draft man-page for the new system call, either as plain text in the
cover letter, or as a patch to the (separate) man-pages repository.
@@ -186,24 +193,24 @@ be cc'ed to linux-api@vger.kernel.org.
Generic System Call Implementation
----------------------------------
-The main entry point for your new xyzzy(2) system call will be called
-sys_xyzzy(), but you add this entry point with the appropriate
-SYSCALL_DEFINEn() macro rather than explicitly. The 'n' indicates the number
-of arguments to the system call, and the macro takes the system call name
+The main entry point for your new :manpage:`xyzzy(2)` system call will be called
+``sys_xyzzy()``, but you add this entry point with the appropriate
+``SYSCALL_DEFINEn()`` macro rather than explicitly. The 'n' indicates the
+number of arguments to the system call, and the macro takes the system call name
followed by the (type, name) pairs for the parameters as arguments. Using
this macro allows metadata about the new system call to be made available for
other tools.
The new entry point also needs a corresponding function prototype, in
-include/linux/syscalls.h, marked as asmlinkage to match the way that system
-calls are invoked:
+``include/linux/syscalls.h``, marked as asmlinkage to match the way that system
+calls are invoked::
asmlinkage long sys_xyzzy(...);
Some architectures (e.g. x86) have their own architecture-specific syscall
tables, but several other architectures share a generic syscall table. Add your
new system call to the generic list by adding an entry to the list in
-include/uapi/asm-generic/unistd.h:
+``include/uapi/asm-generic/unistd.h``::
#define __NR_xyzzy 292
__SYSCALL(__NR_xyzzy, sys_xyzzy)
@@ -212,30 +219,30 @@ Also update the __NR_syscalls count to reflect the additional system call, and
note that if multiple new system calls are added in the same merge window,
your new syscall number may get adjusted to resolve conflicts.
-The file kernel/sys_ni.c provides a fallback stub implementation of each system
-call, returning -ENOSYS. Add your new system call here too:
+The file ``kernel/sys_ni.c`` provides a fallback stub implementation of each
+system call, returning ``-ENOSYS``. Add your new system call here too::
cond_syscall(sys_xyzzy);
Your new kernel functionality, and the system call that controls it, should
-normally be optional, so add a CONFIG option (typically to init/Kconfig) for
-it. As usual for new CONFIG options:
+normally be optional, so add a ``CONFIG`` option (typically to
+``init/Kconfig``) for it. As usual for new ``CONFIG`` options:
- Include a description of the new functionality and system call controlled
by the option.
- Make the option depend on EXPERT if it should be hidden from normal users.
- Make any new source files implementing the function dependent on the CONFIG
- option in the Makefile (e.g. "obj-$(CONFIG_XYZZY_SYSCALL) += xyzzy.c").
+ option in the Makefile (e.g. ``obj-$(CONFIG_XYZZY_SYSCALL) += xyzzy.c``).
- Double check that the kernel still builds with the new CONFIG option turned
off.
To summarize, you need a commit that includes:
- - CONFIG option for the new function, normally in init/Kconfig
- - SYSCALL_DEFINEn(xyzzy, ...) for the entry point
- - corresponding prototype in include/linux/syscalls.h
- - generic table entry in include/uapi/asm-generic/unistd.h
- - fallback stub in kernel/sys_ni.c
+ - ``CONFIG`` option for the new function, normally in ``init/Kconfig``
+ - ``SYSCALL_DEFINEn(xyzzy, ...)`` for the entry point
+ - corresponding prototype in ``include/linux/syscalls.h``
+ - generic table entry in ``include/uapi/asm-generic/unistd.h``
+ - fallback stub in ``kernel/sys_ni.c``
x86 System Call Implementation
@@ -244,11 +251,11 @@ x86 System Call Implementation
To wire up your new system call for x86 platforms, you need to update the
master syscall tables. Assuming your new system call isn't special in some
way (see below), this involves a "common" entry (for x86_64 and x32) in
-arch/x86/entry/syscalls/syscall_64.tbl:
+arch/x86/entry/syscalls/syscall_64.tbl::
333 common xyzzy sys_xyzzy
-and an "i386" entry in arch/x86/entry/syscalls/syscall_32.tbl:
+and an "i386" entry in ``arch/x86/entry/syscalls/syscall_32.tbl``::
380 i386 xyzzy sys_xyzzy
@@ -267,48 +274,49 @@ However, there are a couple of situations where a compatibility layer is
needed to cope with size differences between 32-bit and 64-bit.
The first is if the 64-bit kernel also supports 32-bit userspace programs, and
-so needs to parse areas of (__user) memory that could hold either 32-bit or
+so needs to parse areas of (``__user``) memory that could hold either 32-bit or
64-bit values. In particular, this is needed whenever a system call argument
is:
- a pointer to a pointer
- - a pointer to a struct containing a pointer (e.g. struct iovec __user *)
- - a pointer to a varying sized integral type (time_t, off_t, long, ...)
+ - a pointer to a struct containing a pointer (e.g. ``struct iovec __user *``)
+ - a pointer to a varying sized integral type (``time_t``, ``off_t``,
+ ``long``, ...)
- a pointer to a struct containing a varying sized integral type.
The second situation that requires a compatibility layer is if one of the
system call's arguments has a type that is explicitly 64-bit even on a 32-bit
-architecture, for example loff_t or __u64. In this case, a value that arrives
-at a 64-bit kernel from a 32-bit application will be split into two 32-bit
-values, which then need to be re-assembled in the compatibility layer.
+architecture, for example ``loff_t`` or ``__u64``. In this case, a value that
+arrives at a 64-bit kernel from a 32-bit application will be split into two
+32-bit values, which then need to be re-assembled in the compatibility layer.
(Note that a system call argument that's a pointer to an explicit 64-bit type
-does *not* need a compatibility layer; for example, splice(2)'s arguments of
-type loff_t __user * do not trigger the need for a compat_ system call.)
+does **not** need a compatibility layer; for example, :manpage:`splice(2)`'s arguments of
+type ``loff_t __user *`` do not trigger the need for a ``compat_`` system call.)
-The compatibility version of the system call is called compat_sys_xyzzy(), and
-is added with the COMPAT_SYSCALL_DEFINEn() macro, analogously to
+The compatibility version of the system call is called ``compat_sys_xyzzy()``,
+and is added with the ``COMPAT_SYSCALL_DEFINEn()`` macro, analogously to
SYSCALL_DEFINEn. This version of the implementation runs as part of a 64-bit
kernel, but expects to receive 32-bit parameter values and does whatever is
-needed to deal with them. (Typically, the compat_sys_ version converts the
-values to 64-bit versions and either calls on to the sys_ version, or both of
+needed to deal with them. (Typically, the ``compat_sys_`` version converts the
+values to 64-bit versions and either calls on to the ``sys_`` version, or both of
them call a common inner implementation function.)
The compat entry point also needs a corresponding function prototype, in
-include/linux/compat.h, marked as asmlinkage to match the way that system
-calls are invoked:
+``include/linux/compat.h``, marked as asmlinkage to match the way that system
+calls are invoked::
asmlinkage long compat_sys_xyzzy(...);
If the system call involves a structure that is laid out differently on 32-bit
-and 64-bit systems, say struct xyzzy_args, then the include/linux/compat.h
-header file should also include a compat version of the structure (struct
-compat_xyzzy_args) where each variable-size field has the appropriate compat_
-type that corresponds to the type in struct xyzzy_args. The
-compat_sys_xyzzy() routine can then use this compat_ structure to parse the
-arguments from a 32-bit invocation.
+and 64-bit systems, say ``struct xyzzy_args``, then the include/linux/compat.h
+header file should also include a compat version of the structure (``struct
+compat_xyzzy_args``) where each variable-size field has the appropriate
+``compat_`` type that corresponds to the type in ``struct xyzzy_args``. The
+``compat_sys_xyzzy()`` routine can then use this ``compat_`` structure to
+parse the arguments from a 32-bit invocation.
-For example, if there are fields:
+For example, if there are fields::
struct xyzzy_args {
const char __user *ptr;
@@ -317,7 +325,7 @@ For example, if there are fields:
/* ... */
};
-in struct xyzzy_args, then struct compat_xyzzy_args would have:
+in struct xyzzy_args, then struct compat_xyzzy_args would have::
struct compat_xyzzy_args {
compat_uptr_t ptr;
@@ -327,18 +335,19 @@ in struct xyzzy_args, then struct compat_xyzzy_args would have:
};
The generic system call list also needs adjusting to allow for the compat
-version; the entry in include/uapi/asm-generic/unistd.h should use
-__SC_COMP rather than __SYSCALL:
+version; the entry in ``include/uapi/asm-generic/unistd.h`` should use
+``__SC_COMP`` rather than ``__SYSCALL``::
#define __NR_xyzzy 292
__SC_COMP(__NR_xyzzy, sys_xyzzy, compat_sys_xyzzy)
To summarize, you need:
- - a COMPAT_SYSCALL_DEFINEn(xyzzy, ...) for the compat entry point
- - corresponding prototype in include/linux/compat.h
- - (if needed) 32-bit mapping struct in include/linux/compat.h
- - instance of __SC_COMP not __SYSCALL in include/uapi/asm-generic/unistd.h
+ - a ``COMPAT_SYSCALL_DEFINEn(xyzzy, ...)`` for the compat entry point
+ - corresponding prototype in ``include/linux/compat.h``
+ - (if needed) 32-bit mapping struct in ``include/linux/compat.h``
+ - instance of ``__SC_COMP`` not ``__SYSCALL`` in
+ ``include/uapi/asm-generic/unistd.h``
Compatibility System Calls (x86)
@@ -347,9 +356,9 @@ Compatibility System Calls (x86)
To wire up the x86 architecture of a system call with a compatibility version,
the entries in the syscall tables need to be adjusted.
-First, the entry in arch/x86/entry/syscalls/syscall_32.tbl gets an extra
+First, the entry in ``arch/x86/entry/syscalls/syscall_32.tbl`` gets an extra
column to indicate that a 32-bit userspace program running on a 64-bit kernel
-should hit the compat entry point:
+should hit the compat entry point::
380 i386 xyzzy sys_xyzzy compat_sys_xyzzy
@@ -359,8 +368,8 @@ should either match the 64-bit version or the 32-bit version.
If there's a pointer-to-a-pointer involved, the decision is easy: x32 is
ILP32, so the layout should match the 32-bit version, and the entry in
-arch/x86/entry/syscalls/syscall_64.tbl is split so that x32 programs hit the
-compatibility wrapper:
+``arch/x86/entry/syscalls/syscall_64.tbl`` is split so that x32 programs hit
+the compatibility wrapper::
333 64 xyzzy sys_xyzzy
...
@@ -384,8 +393,9 @@ stack the same and most of the registers the same as before the system call,
and with the same virtual memory space.
However, a few system calls do things differently. They might return to a
-different location (rt_sigreturn) or change the memory space (fork/vfork/clone)
-or even architecture (execve/execveat) of the program.
+different location (``rt_sigreturn``) or change the memory space
+(``fork``/``vfork``/``clone``) or even architecture (``execve``/``execveat``)
+of the program.
To allow for this, the kernel implementation of the system call may need to
save and restore additional registers to the kernel stack, allowing complete
@@ -395,31 +405,31 @@ This is arch-specific, but typically involves defining assembly entry points
that save/restore additional registers and invoke the real system call entry
point.
-For x86_64, this is implemented as a stub_xyzzy entry point in
-arch/x86/entry/entry_64.S, and the entry in the syscall table
-(arch/x86/entry/syscalls/syscall_64.tbl) is adjusted to match:
+For x86_64, this is implemented as a ``stub_xyzzy`` entry point in
+``arch/x86/entry/entry_64.S``, and the entry in the syscall table
+(``arch/x86/entry/syscalls/syscall_64.tbl``) is adjusted to match::
333 common xyzzy stub_xyzzy
The equivalent for 32-bit programs running on a 64-bit kernel is normally
-called stub32_xyzzy and implemented in arch/x86/entry/entry_64_compat.S,
+called ``stub32_xyzzy`` and implemented in ``arch/x86/entry/entry_64_compat.S``,
with the corresponding syscall table adjustment in
-arch/x86/entry/syscalls/syscall_32.tbl:
+``arch/x86/entry/syscalls/syscall_32.tbl``::
380 i386 xyzzy sys_xyzzy stub32_xyzzy
If the system call needs a compatibility layer (as in the previous section)
-then the stub32_ version needs to call on to the compat_sys_ version of the
-system call rather than the native 64-bit version. Also, if the x32 ABI
+then the ``stub32_`` version needs to call on to the ``compat_sys_`` version
+of the system call rather than the native 64-bit version. Also, if the x32 ABI
implementation is not common with the x86_64 version, then its syscall
-table will also need to invoke a stub that calls on to the compat_sys_
+table will also need to invoke a stub that calls on to the ``compat_sys_``
version.
For completeness, it's also nice to set up a mapping so that user-mode Linux
still works -- its syscall table will reference stub_xyzzy, but the UML build
-doesn't include arch/x86/entry/entry_64.S implementation (because UML
+doesn't include ``arch/x86/entry/entry_64.S`` implementation (because UML
simulates registers etc). Fixing this is as simple as adding a #define to
-arch/x86/um/sys_call_table_64.c:
+``arch/x86/um/sys_call_table_64.c``::
#define stub_xyzzy sys_xyzzy
@@ -432,9 +442,9 @@ occasional exception that may need updating for your particular system call.
The audit subsystem is one such special case; it includes (arch-specific)
functions that classify some special types of system call -- specifically
-file open (open/openat), program execution (execve/exeveat) or socket
-multiplexor (socketcall) operations. If your new system call is analogous to
-one of these, then the audit system should be updated.
+file open (``open``/``openat``), program execution (``execve``/``exeveat``) or
+socket multiplexor (``socketcall``) operations. If your new system call is
+analogous to one of these, then the audit system should be updated.
More generally, if there is an existing system call that is analogous to your
new system call, it's worth doing a kernel-wide grep for the existing system
@@ -447,10 +457,10 @@ Testing
A new system call should obviously be tested; it is also useful to provide
reviewers with a demonstration of how user space programs will use the system
call. A good way to combine these aims is to include a simple self-test
-program in a new directory under tools/testing/selftests/.
+program in a new directory under ``tools/testing/selftests/``.
For a new system call, there will obviously be no libc wrapper function and so
-the test will need to invoke it using syscall(); also, if the system call
+the test will need to invoke it using ``syscall()``; also, if the system call
involves a new userspace-visible structure, the corresponding header will need
to be installed to compile the test.
@@ -461,6 +471,7 @@ and x32 (-mx32) ABI program.
For more extensive and thorough testing of new functionality, you should also
consider adding tests to the Linux Test Project, or to the xfstests project
for filesystem-related changes.
+
- https://linux-test-project.github.io/
- git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git
@@ -487,12 +498,14 @@ References and Sources
arguments: https://lwn.net/Articles/311630/
- Pair of LWN articles from David Drysdale that describe the system call
implementation paths in detail for v3.14:
+
- https://lwn.net/Articles/604287/
- https://lwn.net/Articles/604515/
+
- Architecture-specific requirements for system calls are discussed in the
- syscall(2) man-page:
+ :manpage:`syscall(2)` man-page:
http://man7.org/linux/man-pages/man2/syscall.2.html#NOTES
- - Collated emails from Linus Torvalds discussing the problems with ioctl():
+ - Collated emails from Linus Torvalds discussing the problems with ``ioctl()``:
http://yarchive.net/comp/linux/ioctl.html
- "How to not invent kernel interfaces", Arnd Bergmann,
http://www.ukuug.org/events/linux2007/2007/papers/Bergmann.pdf
@@ -507,17 +520,19 @@ References and Sources
commit: https://lkml.org/lkml/2014/11/19/254
- Suggestion from Greg Kroah-Hartman that it's good for new system calls to
come with a man-page & selftest: https://lkml.org/lkml/2014/3/19/710
- - Discussion from Michael Kerrisk of new system call vs. prctl(2) extension:
+ - Discussion from Michael Kerrisk of new system call vs. :manpage:`prctl(2)` extension:
https://lkml.org/lkml/2014/6/3/411
- Suggestion from Ingo Molnar that system calls that involve multiple
arguments should encapsulate those arguments in a struct, which includes a
size field for future extensibility: https://lkml.org/lkml/2015/7/30/117
- Numbering oddities arising from (re-)use of O_* numbering space flags:
+
- commit 75069f2b5bfb ("vfs: renumber FMODE_NONOTIFY and add to uniqueness
check")
- commit 12ed2e36c98a ("fanotify: FMODE_NONOTIFY and __O_SYNC in sparc
conflict")
- commit bb458c644a59 ("Safer ABI for O_TMPFILE")
+
- Discussion from Matthew Wilcox about restrictions on 64-bit arguments:
https://lkml.org/lkml/2008/12/12/187
- Recommendation from Greg Kroah-Hartman that unknown flags should be
diff --git a/Documentation/applying-patches.txt b/Documentation/process/applying-patches.rst
index 02ce4924468e..87825cf96f33 100644
--- a/Documentation/applying-patches.txt
+++ b/Documentation/process/applying-patches.rst
@@ -9,6 +9,10 @@ Original by:
Last update:
2016-09-14
+.. note::
+
+ This document is obsolete. In most cases, rather than using ``patch``
+ manually, you'll almost certainly want to look at using Git instead.
A frequently asked question on the Linux Kernel Mailing List is how to apply
a patch to the kernel or, more specifically, what base kernel a patch for
@@ -427,7 +431,7 @@ The -mm patches are experimental patches released by Andrew Morton.
In the past, -mm tree were used to also test subsystem patches, but this
function is now done via the
-:ref:`linux-next <https://www.kernel.org/doc/man-pages/linux-next.html>`
+`linux-next <https://www.kernel.org/doc/man-pages/linux-next.html>`
tree. The Subsystem maintainers push their patches first to linux-next,
and, during the merge window, sends them directly to Linus.
@@ -462,4 +466,3 @@ the kernel.
Thank you's to Randy Dunlap, Rolf Eike Beer, Linus Torvalds, Bodo Eggert,
Johannes Stezenbach, Grant Coady, Pavel Machek and others that I may have
forgotten for their reviews and contributions to this document.
-
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
new file mode 100644
index 000000000000..56ce66114665
--- /dev/null
+++ b/Documentation/process/changes.rst
@@ -0,0 +1,485 @@
+.. _changes:
+
+Minimal requirements to compile the Kernel
+++++++++++++++++++++++++++++++++++++++++++
+
+Intro
+=====
+
+This document is designed to provide a list of the minimum levels of
+software necessary to run the 4.x kernels.
+
+This document is originally based on my "Changes" file for 2.0.x kernels
+and therefore owes credit to the same people as that file (Jared Mauch,
+Axel Boldt, Alessandro Sigala, and countless other users all over the
+'net).
+
+Current Minimal Requirements
+****************************
+
+Upgrade to at **least** these software revisions before thinking you've
+encountered a bug! If you're unsure what version you're currently
+running, the suggested command should tell you.
+
+Again, keep in mind that this list assumes you are already functionally
+running a Linux kernel. Also, not all tools are necessary on all
+systems; obviously, if you don't have any ISDN hardware, for example,
+you probably needn't concern yourself with isdn4k-utils.
+
+====================== =============== ========================================
+ Program Minimal version Command to check the version
+====================== =============== ========================================
+GNU C 3.2 gcc --version
+GNU make 3.80 make --version
+binutils 2.12 ld -v
+util-linux 2.10o fdformat --version
+module-init-tools 0.9.10 depmod -V
+e2fsprogs 1.41.4 e2fsck -V
+jfsutils 1.1.3 fsck.jfs -V
+reiserfsprogs 3.6.3 reiserfsck -V
+xfsprogs 2.6.0 xfs_db -V
+squashfs-tools 4.0 mksquashfs -version
+btrfs-progs 0.18 btrfsck
+pcmciautils 004 pccardctl -V
+quota-tools 3.09 quota -V
+PPP 2.4.0 pppd --version
+isdn4k-utils 3.1pre1 isdnctrl 2>&1|grep version
+nfs-utils 1.0.5 showmount --version
+procps 3.2.0 ps --version
+oprofile 0.9 oprofiled --version
+udev 081 udevd --version
+grub 0.93 grub --version || grub-install --version
+mcelog 0.6 mcelog --version
+iptables 1.4.2 iptables -V
+openssl & libcrypto 1.0.0 openssl version
+bc 1.06.95 bc --version
+Sphinx\ [#f1]_ 1.2 sphinx-build --version
+====================== =============== ========================================
+
+.. [#f1] Sphinx is needed only to build the Kernel documentation
+
+Kernel compilation
+******************
+
+GCC
+---
+
+The gcc version requirements may vary depending on the type of CPU in your
+computer.
+
+Make
+----
+
+You will need GNU make 3.80 or later to build the kernel.
+
+Binutils
+--------
+
+Linux on IA-32 has recently switched from using ``as86`` to using ``gas`` for
+assembling the 16-bit boot code, removing the need for ``as86`` to compile
+your kernel. This change does, however, mean that you need a recent
+release of binutils.
+
+Perl
+----
+
+You will need perl 5 and the following modules: ``Getopt::Long``,
+``Getopt::Std``, ``File::Basename``, and ``File::Find`` to build the kernel.
+
+BC
+--
+
+You will need bc to build kernels 3.10 and higher
+
+
+OpenSSL
+-------
+
+Module signing and external certificate handling use the OpenSSL program and
+crypto library to do key creation and signature generation.
+
+You will need openssl to build kernels 3.7 and higher if module signing is
+enabled. You will also need openssl development packages to build kernels 4.3
+and higher.
+
+
+System utilities
+****************
+
+Architectural changes
+---------------------
+
+DevFS has been obsoleted in favour of udev
+(http://www.kernel.org/pub/linux/utils/kernel/hotplug/)
+
+32-bit UID support is now in place. Have fun!
+
+Linux documentation for functions is transitioning to inline
+documentation via specially-formatted comments near their
+definitions in the source. These comments can be combined with the
+SGML templates in the Documentation/DocBook directory to make DocBook
+files, which can then be converted by DocBook stylesheets to PostScript,
+HTML, PDF files, and several other formats. In order to convert from
+DocBook format to a format of your choice, you'll need to install Jade as
+well as the desired DocBook stylesheets.
+
+Util-linux
+----------
+
+New versions of util-linux provide ``fdisk`` support for larger disks,
+support new options to mount, recognize more supported partition
+types, have a fdformat which works with 2.4 kernels, and similar goodies.
+You'll probably want to upgrade.
+
+Ksymoops
+--------
+
+If the unthinkable happens and your kernel oopses, you may need the
+ksymoops tool to decode it, but in most cases you don't.
+It is generally preferred to build the kernel with ``CONFIG_KALLSYMS`` so
+that it produces readable dumps that can be used as-is (this also
+produces better output than ksymoops). If for some reason your kernel
+is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and
+reproduce the Oops with that option, then you can still decode that Oops
+with ksymoops.
+
+Module-Init-Tools
+-----------------
+
+A new module loader is now in the kernel that requires ``module-init-tools``
+to use. It is backward compatible with the 2.4.x series kernels.
+
+Mkinitrd
+--------
+
+These changes to the ``/lib/modules`` file tree layout also require that
+mkinitrd be upgraded.
+
+E2fsprogs
+---------
+
+The latest version of ``e2fsprogs`` fixes several bugs in fsck and
+debugfs. Obviously, it's a good idea to upgrade.
+
+JFSutils
+--------
+
+The ``jfsutils`` package contains the utilities for the file system.
+The following utilities are available:
+
+- ``fsck.jfs`` - initiate replay of the transaction log, and check
+ and repair a JFS formatted partition.
+
+- ``mkfs.jfs`` - create a JFS formatted partition.
+
+- other file system utilities are also available in this package.
+
+Reiserfsprogs
+-------------
+
+The reiserfsprogs package should be used for reiserfs-3.6.x
+(Linux kernels 2.4.x). It is a combined package and contains working
+versions of ``mkreiserfs``, ``resize_reiserfs``, ``debugreiserfs`` and
+``reiserfsck``. These utils work on both i386 and alpha platforms.
+
+Xfsprogs
+--------
+
+The latest version of ``xfsprogs`` contains ``mkfs.xfs``, ``xfs_db``, and the
+``xfs_repair`` utilities, among others, for the XFS filesystem. It is
+architecture independent and any version from 2.0.0 onward should
+work correctly with this version of the XFS kernel code (2.6.0 or
+later is recommended, due to some significant improvements).
+
+PCMCIAutils
+-----------
+
+PCMCIAutils replaces ``pcmcia-cs``. It properly sets up
+PCMCIA sockets at system startup and loads the appropriate modules
+for 16-bit PCMCIA devices if the kernel is modularized and the hotplug
+subsystem is used.
+
+Quota-tools
+-----------
+
+Support for 32 bit uid's and gid's is required if you want to use
+the newer version 2 quota format. Quota-tools version 3.07 and
+newer has this support. Use the recommended version or newer
+from the table above.
+
+Intel IA32 microcode
+--------------------
+
+A driver has been added to allow updating of Intel IA32 microcode,
+accessible as a normal (misc) character device. If you are not using
+udev you may need to::
+
+ mkdir /dev/cpu
+ mknod /dev/cpu/microcode c 10 184
+ chmod 0644 /dev/cpu/microcode
+
+as root before you can use this. You'll probably also want to
+get the user-space microcode_ctl utility to use with this.
+
+udev
+----
+
+``udev`` is a userspace application for populating ``/dev`` dynamically with
+only entries for devices actually present. ``udev`` replaces the basic
+functionality of devfs, while allowing persistent device naming for
+devices.
+
+FUSE
+----
+
+Needs libfuse 2.4.0 or later. Absolute minimum is 2.3.0 but mount
+options ``direct_io`` and ``kernel_cache`` won't work.
+
+Networking
+**********
+
+General changes
+---------------
+
+If you have advanced network configuration needs, you should probably
+consider using the network tools from ip-route2.
+
+Packet Filter / NAT
+-------------------
+The packet filtering and NAT code uses the same tools like the previous 2.4.x
+kernel series (iptables). It still includes backwards-compatibility modules
+for 2.2.x-style ipchains and 2.0.x-style ipfwadm.
+
+PPP
+---
+
+The PPP driver has been restructured to support multilink and to
+enable it to operate over diverse media layers. If you use PPP,
+upgrade pppd to at least 2.4.0.
+
+If you are not using udev, you must have the device file /dev/ppp
+which can be made by::
+
+ mknod /dev/ppp c 108 0
+
+as root.
+
+Isdn4k-utils
+------------
+
+Due to changes in the length of the phone number field, isdn4k-utils
+needs to be recompiled or (preferably) upgraded.
+
+NFS-utils
+---------
+
+In ancient (2.4 and earlier) kernels, the nfs server needed to know
+about any client that expected to be able to access files via NFS. This
+information would be given to the kernel by ``mountd`` when the client
+mounted the filesystem, or by ``exportfs`` at system startup. exportfs
+would take information about active clients from ``/var/lib/nfs/rmtab``.
+
+This approach is quite fragile as it depends on rmtab being correct
+which is not always easy, particularly when trying to implement
+fail-over. Even when the system is working well, ``rmtab`` suffers from
+getting lots of old entries that never get removed.
+
+With modern kernels we have the option of having the kernel tell mountd
+when it gets a request from an unknown host, and mountd can give
+appropriate export information to the kernel. This removes the
+dependency on ``rmtab`` and means that the kernel only needs to know about
+currently active clients.
+
+To enable this new functionality, you need to::
+
+ mount -t nfsd nfsd /proc/fs/nfsd
+
+before running exportfs or mountd. It is recommended that all NFS
+services be protected from the internet-at-large by a firewall where
+that is possible.
+
+mcelog
+------
+
+On x86 kernels the mcelog utility is needed to process and log machine check
+events when ``CONFIG_X86_MCE`` is enabled. Machine check events are errors
+reported by the CPU. Processing them is strongly encouraged.
+
+Kernel documentation
+********************
+
+Sphinx
+------
+
+The ReST markups currently used by the Documentation/ files are meant to be
+built with ``Sphinx`` version 1.2 or upper. If you're desiring to build
+PDF outputs, it is recommended to use version 1.4.6.
+
+.. note::
+
+ Please notice that, for PDF and LaTeX output, you'll also need ``XeLaTeX``
+ version 3.14159265. Depending on the distribution, you may also need
+ to install a series of ``texlive`` packages that provide the minimal
+ set of functionalities required for ``XeLaTex`` to work.
+
+Other tools
+-----------
+
+In order to produce documentation from DocBook, you'll also need ``xmlto``.
+Please notice, however, that we're currently migrating all documents to use
+``Sphinx``.
+
+Getting updated software
+========================
+
+Kernel compilation
+******************
+
+gcc
+---
+
+- <ftp://ftp.gnu.org/gnu/gcc/>
+
+Make
+----
+
+- <ftp://ftp.gnu.org/gnu/make/>
+
+Binutils
+--------
+
+- <ftp://ftp.kernel.org/pub/linux/devel/binutils/>
+
+OpenSSL
+-------
+
+- <https://www.openssl.org/>
+
+System utilities
+****************
+
+Util-linux
+----------
+
+- <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>
+
+Ksymoops
+--------
+
+- <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
+
+Module-Init-Tools
+-----------------
+
+- <ftp://ftp.kernel.org/pub/linux/kernel/people/rusty/modules/>
+
+Mkinitrd
+--------
+
+- <https://code.launchpad.net/initrd-tools/main>
+
+E2fsprogs
+---------
+
+- <http://prdownloads.sourceforge.net/e2fsprogs/e2fsprogs-1.29.tar.gz>
+
+JFSutils
+--------
+
+- <http://jfs.sourceforge.net/>
+
+Reiserfsprogs
+-------------
+
+- <http://www.kernel.org/pub/linux/utils/fs/reiserfs/>
+
+Xfsprogs
+--------
+
+- <ftp://oss.sgi.com/projects/xfs/>
+
+Pcmciautils
+-----------
+
+- <ftp://ftp.kernel.org/pub/linux/utils/kernel/pcmcia/>
+
+Quota-tools
+-----------
+
+- <http://sourceforge.net/projects/linuxquota/>
+
+DocBook Stylesheets
+-------------------
+
+- <http://sourceforge.net/projects/docbook/files/docbook-dsssl/>
+
+XMLTO XSLT Frontend
+-------------------
+
+- <http://cyberelk.net/tim/xmlto/>
+
+Intel P6 microcode
+------------------
+
+- <https://downloadcenter.intel.com/>
+
+udev
+----
+
+- <http://www.freedesktop.org/software/systemd/man/udev.html>
+
+FUSE
+----
+
+- <http://sourceforge.net/projects/fuse>
+
+mcelog
+------
+
+- <http://www.mcelog.org/>
+
+Networking
+**********
+
+PPP
+---
+
+- <ftp://ftp.samba.org/pub/ppp/>
+
+Isdn4k-utils
+------------
+
+- <ftp://ftp.isdn4linux.de/pub/isdn4linux/utils/>
+
+NFS-utils
+---------
+
+- <http://sourceforge.net/project/showfiles.php?group_id=14>
+
+Iptables
+--------
+
+- <http://www.iptables.org/downloads.html>
+
+Ip-route2
+---------
+
+- <https://www.kernel.org/pub/linux/utils/net/iproute2/>
+
+OProfile
+--------
+
+- <http://oprofile.sf.net/download/>
+
+NFS-Utils
+---------
+
+- <http://nfs.sourceforge.net/>
+
+Kernel documentation
+********************
+
+Sphinx
+------
+
+- <http://www.sphinx-doc.org/>
diff --git a/Documentation/CodeOfConflict b/Documentation/process/code-of-conflict.rst
index 49a8ecc157a2..47b6de763203 100644
--- a/Documentation/CodeOfConflict
+++ b/Documentation/process/code-of-conflict.rst
@@ -19,7 +19,8 @@ please contact the Linux Foundation's Technical Advisory Board at
will work to resolve the issue to the best of their ability. For more
information on who is on the Technical Advisory Board and what their
role is, please see:
- http://www.linuxfoundation.org/projects/linux/tab
+
+ - http://www.linuxfoundation.org/projects/linux/tab
As a reviewer of code, please strive to keep things civil and focused on
the technical issues involved. We are all humans, and frustrations can
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
new file mode 100644
index 000000000000..d20d52a4d812
--- /dev/null
+++ b/Documentation/process/coding-style.rst
@@ -0,0 +1,1062 @@
+.. _codingstyle:
+
+Linux kernel coding style
+=========================
+
+This is a short document describing the preferred coding style for the
+linux kernel. Coding style is very personal, and I won't **force** my
+views on anybody, but this is what goes for anything that I have to be
+able to maintain, and I'd prefer it for most other things too. Please
+at least consider the points made here.
+
+First off, I'd suggest printing out a copy of the GNU coding standards,
+and NOT read it. Burn them, it's a great symbolic gesture.
+
+Anyway, here goes:
+
+
+1) Indentation
+--------------
+
+Tabs are 8 characters, and thus indentations are also 8 characters.
+There are heretic movements that try to make indentations 4 (or even 2!)
+characters deep, and that is akin to trying to define the value of PI to
+be 3.
+
+Rationale: The whole idea behind indentation is to clearly define where
+a block of control starts and ends. Especially when you've been looking
+at your screen for 20 straight hours, you'll find it a lot easier to see
+how the indentation works if you have large indentations.
+
+Now, some people will claim that having 8-character indentations makes
+the code move too far to the right, and makes it hard to read on a
+80-character terminal screen. The answer to that is that if you need
+more than 3 levels of indentation, you're screwed anyway, and should fix
+your program.
+
+In short, 8-char indents make things easier to read, and have the added
+benefit of warning you when you're nesting your functions too deep.
+Heed that warning.
+
+The preferred way to ease multiple indentation levels in a switch statement is
+to align the ``switch`` and its subordinate ``case`` labels in the same column
+instead of ``double-indenting`` the ``case`` labels. E.g.:
+
+.. code-block:: c
+
+ switch (suffix) {
+ case 'G':
+ case 'g':
+ mem <<= 30;
+ break;
+ case 'M':
+ case 'm':
+ mem <<= 20;
+ break;
+ case 'K':
+ case 'k':
+ mem <<= 10;
+ /* fall through */
+ default:
+ break;
+ }
+
+Don't put multiple statements on a single line unless you have
+something to hide:
+
+.. code-block:: c
+
+ if (condition) do_this;
+ do_something_everytime;
+
+Don't put multiple assignments on a single line either. Kernel coding style
+is super simple. Avoid tricky expressions.
+
+Outside of comments, documentation and except in Kconfig, spaces are never
+used for indentation, and the above example is deliberately broken.
+
+Get a decent editor and don't leave whitespace at the end of lines.
+
+
+2) Breaking long lines and strings
+----------------------------------
+
+Coding style is all about readability and maintainability using commonly
+available tools.
+
+The limit on the length of lines is 80 columns and this is a strongly
+preferred limit.
+
+Statements longer than 80 columns will be broken into sensible chunks, unless
+exceeding 80 columns significantly increases readability and does not hide
+information. Descendants are always substantially shorter than the parent and
+are placed substantially to the right. The same applies to function headers
+with a long argument list. However, never break user-visible strings such as
+printk messages, because that breaks the ability to grep for them.
+
+
+3) Placing Braces and Spaces
+----------------------------
+
+The other issue that always comes up in C styling is the placement of
+braces. Unlike the indent size, there are few technical reasons to
+choose one placement strategy over the other, but the preferred way, as
+shown to us by the prophets Kernighan and Ritchie, is to put the opening
+brace last on the line, and put the closing brace first, thusly:
+
+.. code-block:: c
+
+ if (x is true) {
+ we do y
+ }
+
+This applies to all non-function statement blocks (if, switch, for,
+while, do). E.g.:
+
+.. code-block:: c
+
+ switch (action) {
+ case KOBJ_ADD:
+ return "add";
+ case KOBJ_REMOVE:
+ return "remove";
+ case KOBJ_CHANGE:
+ return "change";
+ default:
+ return NULL;
+ }
+
+However, there is one special case, namely functions: they have the
+opening brace at the beginning of the next line, thus:
+
+.. code-block:: c
+
+ int function(int x)
+ {
+ body of function
+ }
+
+Heretic people all over the world have claimed that this inconsistency
+is ... well ... inconsistent, but all right-thinking people know that
+(a) K&R are **right** and (b) K&R are right. Besides, functions are
+special anyway (you can't nest them in C).
+
+Note that the closing brace is empty on a line of its own, **except** in
+the cases where it is followed by a continuation of the same statement,
+ie a ``while`` in a do-statement or an ``else`` in an if-statement, like
+this:
+
+.. code-block:: c
+
+ do {
+ body of do-loop
+ } while (condition);
+
+and
+
+.. code-block:: c
+
+ if (x == y) {
+ ..
+ } else if (x > y) {
+ ...
+ } else {
+ ....
+ }
+
+Rationale: K&R.
+
+Also, note that this brace-placement also minimizes the number of empty
+(or almost empty) lines, without any loss of readability. Thus, as the
+supply of new-lines on your screen is not a renewable resource (think
+25-line terminal screens here), you have more empty lines to put
+comments on.
+
+Do not unnecessarily use braces where a single statement will do.
+
+.. code-block:: c
+
+ if (condition)
+ action();
+
+and
+
+.. code-block:: none
+
+ if (condition)
+ do_this();
+ else
+ do_that();
+
+This does not apply if only one branch of a conditional statement is a single
+statement; in the latter case use braces in both branches:
+
+.. code-block:: c
+
+ if (condition) {
+ do_this();
+ do_that();
+ } else {
+ otherwise();
+ }
+
+3.1) Spaces
+***********
+
+Linux kernel style for use of spaces depends (mostly) on
+function-versus-keyword usage. Use a space after (most) keywords. The
+notable exceptions are sizeof, typeof, alignof, and __attribute__, which look
+somewhat like functions (and are usually used with parentheses in Linux,
+although they are not required in the language, as in: ``sizeof info`` after
+``struct fileinfo info;`` is declared).
+
+So use a space after these keywords::
+
+ if, switch, case, for, do, while
+
+but not with sizeof, typeof, alignof, or __attribute__. E.g.,
+
+.. code-block:: c
+
+
+ s = sizeof(struct file);
+
+Do not add spaces around (inside) parenthesized expressions. This example is
+**bad**:
+
+.. code-block:: c
+
+
+ s = sizeof( struct file );
+
+When declaring pointer data or a function that returns a pointer type, the
+preferred use of ``*`` is adjacent to the data name or function name and not
+adjacent to the type name. Examples:
+
+.. code-block:: c
+
+
+ char *linux_banner;
+ unsigned long long memparse(char *ptr, char **retptr);
+ char *match_strdup(substring_t *s);
+
+Use one space around (on each side of) most binary and ternary operators,
+such as any of these::
+
+ = + - < > * / % | & ^ <= >= == != ? :
+
+but no space after unary operators::
+
+ & * + - ~ ! sizeof typeof alignof __attribute__ defined
+
+no space before the postfix increment & decrement unary operators::
+
+ ++ --
+
+no space after the prefix increment & decrement unary operators::
+
+ ++ --
+
+and no space around the ``.`` and ``->`` structure member operators.
+
+Do not leave trailing whitespace at the ends of lines. Some editors with
+``smart`` indentation will insert whitespace at the beginning of new lines as
+appropriate, so you can start typing the next line of code right away.
+However, some such editors do not remove the whitespace if you end up not
+putting a line of code there, such as if you leave a blank line. As a result,
+you end up with lines containing trailing whitespace.
+
+Git will warn you about patches that introduce trailing whitespace, and can
+optionally strip the trailing whitespace for you; however, if applying a series
+of patches, this may make later patches in the series fail by changing their
+context lines.
+
+
+4) Naming
+---------
+
+C is a Spartan language, and so should your naming be. Unlike Modula-2
+and Pascal programmers, C programmers do not use cute names like
+ThisVariableIsATemporaryCounter. A C programmer would call that
+variable ``tmp``, which is much easier to write, and not the least more
+difficult to understand.
+
+HOWEVER, while mixed-case names are frowned upon, descriptive names for
+global variables are a must. To call a global function ``foo`` is a
+shooting offense.
+
+GLOBAL variables (to be used only if you **really** need them) need to
+have descriptive names, as do global functions. If you have a function
+that counts the number of active users, you should call that
+``count_active_users()`` or similar, you should **not** call it ``cntusr()``.
+
+Encoding the type of a function into the name (so-called Hungarian
+notation) is brain damaged - the compiler knows the types anyway and can
+check those, and it only confuses the programmer. No wonder MicroSoft
+makes buggy programs.
+
+LOCAL variable names should be short, and to the point. If you have
+some random integer loop counter, it should probably be called ``i``.
+Calling it ``loop_counter`` is non-productive, if there is no chance of it
+being mis-understood. Similarly, ``tmp`` can be just about any type of
+variable that is used to hold a temporary value.
+
+If you are afraid to mix up your local variable names, you have another
+problem, which is called the function-growth-hormone-imbalance syndrome.
+See chapter 6 (Functions).
+
+
+5) Typedefs
+-----------
+
+Please don't use things like ``vps_t``.
+It's a **mistake** to use typedef for structures and pointers. When you see a
+
+.. code-block:: c
+
+
+ vps_t a;
+
+in the source, what does it mean?
+In contrast, if it says
+
+.. code-block:: c
+
+ struct virtual_container *a;
+
+you can actually tell what ``a`` is.
+
+Lots of people think that typedefs ``help readability``. Not so. They are
+useful only for:
+
+ (a) totally opaque objects (where the typedef is actively used to **hide**
+ what the object is).
+
+ Example: ``pte_t`` etc. opaque objects that you can only access using
+ the proper accessor functions.
+
+ .. note::
+
+ Opaqueness and ``accessor functions`` are not good in themselves.
+ The reason we have them for things like pte_t etc. is that there
+ really is absolutely **zero** portably accessible information there.
+
+ (b) Clear integer types, where the abstraction **helps** avoid confusion
+ whether it is ``int`` or ``long``.
+
+ u8/u16/u32 are perfectly fine typedefs, although they fit into
+ category (d) better than here.
+
+ .. note::
+
+ Again - there needs to be a **reason** for this. If something is
+ ``unsigned long``, then there's no reason to do
+
+ typedef unsigned long myflags_t;
+
+ but if there is a clear reason for why it under certain circumstances
+ might be an ``unsigned int`` and under other configurations might be
+ ``unsigned long``, then by all means go ahead and use a typedef.
+
+ (c) when you use sparse to literally create a **new** type for
+ type-checking.
+
+ (d) New types which are identical to standard C99 types, in certain
+ exceptional circumstances.
+
+ Although it would only take a short amount of time for the eyes and
+ brain to become accustomed to the standard types like ``uint32_t``,
+ some people object to their use anyway.
+
+ Therefore, the Linux-specific ``u8/u16/u32/u64`` types and their
+ signed equivalents which are identical to standard types are
+ permitted -- although they are not mandatory in new code of your
+ own.
+
+ When editing existing code which already uses one or the other set
+ of types, you should conform to the existing choices in that code.
+
+ (e) Types safe for use in userspace.
+
+ In certain structures which are visible to userspace, we cannot
+ require C99 types and cannot use the ``u32`` form above. Thus, we
+ use __u32 and similar types in all structures which are shared
+ with userspace.
+
+Maybe there are other cases too, but the rule should basically be to NEVER
+EVER use a typedef unless you can clearly match one of those rules.
+
+In general, a pointer, or a struct that has elements that can reasonably
+be directly accessed should **never** be a typedef.
+
+
+6) Functions
+------------
+
+Functions should be short and sweet, and do just one thing. They should
+fit on one or two screenfuls of text (the ISO/ANSI screen size is 80x24,
+as we all know), and do one thing and do that well.
+
+The maximum length of a function is inversely proportional to the
+complexity and indentation level of that function. So, if you have a
+conceptually simple function that is just one long (but simple)
+case-statement, where you have to do lots of small things for a lot of
+different cases, it's OK to have a longer function.
+
+However, if you have a complex function, and you suspect that a
+less-than-gifted first-year high-school student might not even
+understand what the function is all about, you should adhere to the
+maximum limits all the more closely. Use helper functions with
+descriptive names (you can ask the compiler to in-line them if you think
+it's performance-critical, and it will probably do a better job of it
+than you would have done).
+
+Another measure of the function is the number of local variables. They
+shouldn't exceed 5-10, or you're doing something wrong. Re-think the
+function, and split it into smaller pieces. A human brain can
+generally easily keep track of about 7 different things, anything more
+and it gets confused. You know you're brilliant, but maybe you'd like
+to understand what you did 2 weeks from now.
+
+In source files, separate functions with one blank line. If the function is
+exported, the **EXPORT** macro for it should follow immediately after the
+closing function brace line. E.g.:
+
+.. code-block:: c
+
+ int system_is_up(void)
+ {
+ return system_state == SYSTEM_RUNNING;
+ }
+ EXPORT_SYMBOL(system_is_up);
+
+In function prototypes, include parameter names with their data types.
+Although this is not required by the C language, it is preferred in Linux
+because it is a simple way to add valuable information for the reader.
+
+
+7) Centralized exiting of functions
+-----------------------------------
+
+Albeit deprecated by some people, the equivalent of the goto statement is
+used frequently by compilers in form of the unconditional jump instruction.
+
+The goto statement comes in handy when a function exits from multiple
+locations and some common work such as cleanup has to be done. If there is no
+cleanup needed then just return directly.
+
+Choose label names which say what the goto does or why the goto exists. An
+example of a good name could be ``out_free_buffer:`` if the goto frees ``buffer``.
+Avoid using GW-BASIC names like ``err1:`` and ``err2:``, as you would have to
+renumber them if you ever add or remove exit paths, and they make correctness
+difficult to verify anyway.
+
+The rationale for using gotos is:
+
+- unconditional statements are easier to understand and follow
+- nesting is reduced
+- errors by not updating individual exit points when making
+ modifications are prevented
+- saves the compiler work to optimize redundant code away ;)
+
+.. code-block:: c
+
+ int fun(int a)
+ {
+ int result = 0;
+ char *buffer;
+
+ buffer = kmalloc(SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ if (condition1) {
+ while (loop1) {
+ ...
+ }
+ result = 1;
+ goto out_free_buffer;
+ }
+ ...
+ out_free_buffer:
+ kfree(buffer);
+ return result;
+ }
+
+A common type of bug to be aware of is ``one err bugs`` which look like this:
+
+.. code-block:: c
+
+ err:
+ kfree(foo->bar);
+ kfree(foo);
+ return ret;
+
+The bug in this code is that on some exit paths ``foo`` is NULL. Normally the
+fix for this is to split it up into two error labels ``err_free_bar:`` and
+``err_free_foo:``:
+
+.. code-block:: c
+
+ err_free_bar:
+ kfree(foo->bar);
+ err_free_foo:
+ kfree(foo);
+ return ret;
+
+Ideally you should simulate errors to test all exit paths.
+
+
+8) Commenting
+-------------
+
+Comments are good, but there is also a danger of over-commenting. NEVER
+try to explain HOW your code works in a comment: it's much better to
+write the code so that the **working** is obvious, and it's a waste of
+time to explain badly written code.
+
+Generally, you want your comments to tell WHAT your code does, not HOW.
+Also, try to avoid putting comments inside a function body: if the
+function is so complex that you need to separately comment parts of it,
+you should probably go back to chapter 6 for a while. You can make
+small comments to note or warn about something particularly clever (or
+ugly), but try to avoid excess. Instead, put the comments at the head
+of the function, telling people what it does, and possibly WHY it does
+it.
+
+When commenting the kernel API functions, please use the kernel-doc format.
+See the files at :ref:`Documentation/doc-guide/ <doc_guide>` and
+``scripts/kernel-doc`` for details.
+
+The preferred style for long (multi-line) comments is:
+
+.. code-block:: c
+
+ /*
+ * This is the preferred style for multi-line
+ * comments in the Linux kernel source code.
+ * Please use it consistently.
+ *
+ * Description: A column of asterisks on the left side,
+ * with beginning and ending almost-blank lines.
+ */
+
+For files in net/ and drivers/net/ the preferred style for long (multi-line)
+comments is a little different.
+
+.. code-block:: c
+
+ /* The preferred comment style for files in net/ and drivers/net
+ * looks like this.
+ *
+ * It is nearly the same as the generally preferred comment style,
+ * but there is no initial almost-blank line.
+ */
+
+It's also important to comment data, whether they are basic types or derived
+types. To this end, use just one data declaration per line (no commas for
+multiple data declarations). This leaves you room for a small comment on each
+item, explaining its use.
+
+
+9) You've made a mess of it
+---------------------------
+
+That's OK, we all do. You've probably been told by your long-time Unix
+user helper that ``GNU emacs`` automatically formats the C sources for
+you, and you've noticed that yes, it does do that, but the defaults it
+uses are less than desirable (in fact, they are worse than random
+typing - an infinite number of monkeys typing into GNU emacs would never
+make a good program).
+
+So, you can either get rid of GNU emacs, or change it to use saner
+values. To do the latter, you can stick the following in your .emacs file:
+
+.. code-block:: none
+
+ (defun c-lineup-arglist-tabs-only (ignored)
+ "Line up argument lists by tabs, not spaces"
+ (let* ((anchor (c-langelem-pos c-syntactic-element))
+ (column (c-langelem-2nd-pos c-syntactic-element))
+ (offset (- (1+ column) anchor))
+ (steps (floor offset c-basic-offset)))
+ (* (max steps 1)
+ c-basic-offset)))
+
+ (add-hook 'c-mode-common-hook
+ (lambda ()
+ ;; Add kernel style
+ (c-add-style
+ "linux-tabs-only"
+ '("linux" (c-offsets-alist
+ (arglist-cont-nonempty
+ c-lineup-gcc-asm-reg
+ c-lineup-arglist-tabs-only))))))
+
+ (add-hook 'c-mode-hook
+ (lambda ()
+ (let ((filename (buffer-file-name)))
+ ;; Enable kernel mode for the appropriate files
+ (when (and filename
+ (string-match (expand-file-name "~/src/linux-trees")
+ filename))
+ (setq indent-tabs-mode t)
+ (setq show-trailing-whitespace t)
+ (c-set-style "linux-tabs-only")))))
+
+This will make emacs go better with the kernel coding style for C
+files below ``~/src/linux-trees``.
+
+But even if you fail in getting emacs to do sane formatting, not
+everything is lost: use ``indent``.
+
+Now, again, GNU indent has the same brain-dead settings that GNU emacs
+has, which is why you need to give it a few command line options.
+However, that's not too bad, because even the makers of GNU indent
+recognize the authority of K&R (the GNU people aren't evil, they are
+just severely misguided in this matter), so you just give indent the
+options ``-kr -i8`` (stands for ``K&R, 8 character indents``), or use
+``scripts/Lindent``, which indents in the latest style.
+
+``indent`` has a lot of options, and especially when it comes to comment
+re-formatting you may want to take a look at the man page. But
+remember: ``indent`` is not a fix for bad programming.
+
+
+10) Kconfig configuration files
+-------------------------------
+
+For all of the Kconfig* configuration files throughout the source tree,
+the indentation is somewhat different. Lines under a ``config`` definition
+are indented with one tab, while help text is indented an additional two
+spaces. Example::
+
+ config AUDIT
+ bool "Auditing support"
+ depends on NET
+ help
+ Enable auditing infrastructure that can be used with another
+ kernel subsystem, such as SELinux (which requires this for
+ logging of avc messages output). Does not do system-call
+ auditing without CONFIG_AUDITSYSCALL.
+
+Seriously dangerous features (such as write support for certain
+filesystems) should advertise this prominently in their prompt string::
+
+ config ADFS_FS_RW
+ bool "ADFS write support (DANGEROUS)"
+ depends on ADFS_FS
+ ...
+
+For full documentation on the configuration files, see the file
+Documentation/kbuild/kconfig-language.txt.
+
+
+11) Data structures
+-------------------
+
+Data structures that have visibility outside the single-threaded
+environment they are created and destroyed in should always have
+reference counts. In the kernel, garbage collection doesn't exist (and
+outside the kernel garbage collection is slow and inefficient), which
+means that you absolutely **have** to reference count all your uses.
+
+Reference counting means that you can avoid locking, and allows multiple
+users to have access to the data structure in parallel - and not having
+to worry about the structure suddenly going away from under them just
+because they slept or did something else for a while.
+
+Note that locking is **not** a replacement for reference counting.
+Locking is used to keep data structures coherent, while reference
+counting is a memory management technique. Usually both are needed, and
+they are not to be confused with each other.
+
+Many data structures can indeed have two levels of reference counting,
+when there are users of different ``classes``. The subclass count counts
+the number of subclass users, and decrements the global count just once
+when the subclass count goes to zero.
+
+Examples of this kind of ``multi-level-reference-counting`` can be found in
+memory management (``struct mm_struct``: mm_users and mm_count), and in
+filesystem code (``struct super_block``: s_count and s_active).
+
+Remember: if another thread can find your data structure, and you don't
+have a reference count on it, you almost certainly have a bug.
+
+
+12) Macros, Enums and RTL
+-------------------------
+
+Names of macros defining constants and labels in enums are capitalized.
+
+.. code-block:: c
+
+ #define CONSTANT 0x12345
+
+Enums are preferred when defining several related constants.
+
+CAPITALIZED macro names are appreciated but macros resembling functions
+may be named in lower case.
+
+Generally, inline functions are preferable to macros resembling functions.
+
+Macros with multiple statements should be enclosed in a do - while block:
+
+.. code-block:: c
+
+ #define macrofun(a, b, c) \
+ do { \
+ if (a == 5) \
+ do_this(b, c); \
+ } while (0)
+
+Things to avoid when using macros:
+
+1) macros that affect control flow:
+
+.. code-block:: c
+
+ #define FOO(x) \
+ do { \
+ if (blah(x) < 0) \
+ return -EBUGGERED; \
+ } while (0)
+
+is a **very** bad idea. It looks like a function call but exits the ``calling``
+function; don't break the internal parsers of those who will read the code.
+
+2) macros that depend on having a local variable with a magic name:
+
+.. code-block:: c
+
+ #define FOO(val) bar(index, val)
+
+might look like a good thing, but it's confusing as hell when one reads the
+code and it's prone to breakage from seemingly innocent changes.
+
+3) macros with arguments that are used as l-values: FOO(x) = y; will
+bite you if somebody e.g. turns FOO into an inline function.
+
+4) forgetting about precedence: macros defining constants using expressions
+must enclose the expression in parentheses. Beware of similar issues with
+macros using parameters.
+
+.. code-block:: c
+
+ #define CONSTANT 0x4000
+ #define CONSTEXP (CONSTANT | 3)
+
+5) namespace collisions when defining local variables in macros resembling
+functions:
+
+.. code-block:: c
+
+ #define FOO(x) \
+ ({ \
+ typeof(x) ret; \
+ ret = calc_ret(x); \
+ (ret); \
+ })
+
+ret is a common name for a local variable - __foo_ret is less likely
+to collide with an existing variable.
+
+The cpp manual deals with macros exhaustively. The gcc internals manual also
+covers RTL which is used frequently with assembly language in the kernel.
+
+
+13) Printing kernel messages
+----------------------------
+
+Kernel developers like to be seen as literate. Do mind the spelling
+of kernel messages to make a good impression. Do not use crippled
+words like ``dont``; use ``do not`` or ``don't`` instead. Make the messages
+concise, clear, and unambiguous.
+
+Kernel messages do not have to be terminated with a period.
+
+Printing numbers in parentheses (%d) adds no value and should be avoided.
+
+There are a number of driver model diagnostic macros in <linux/device.h>
+which you should use to make sure messages are matched to the right device
+and driver, and are tagged with the right level: dev_err(), dev_warn(),
+dev_info(), and so forth. For messages that aren't associated with a
+particular device, <linux/printk.h> defines pr_notice(), pr_info(),
+pr_warn(), pr_err(), etc.
+
+Coming up with good debugging messages can be quite a challenge; and once
+you have them, they can be a huge help for remote troubleshooting. However
+debug message printing is handled differently than printing other non-debug
+messages. While the other pr_XXX() functions print unconditionally,
+pr_debug() does not; it is compiled out by default, unless either DEBUG is
+defined or CONFIG_DYNAMIC_DEBUG is set. That is true for dev_dbg() also,
+and a related convention uses VERBOSE_DEBUG to add dev_vdbg() messages to
+the ones already enabled by DEBUG.
+
+Many subsystems have Kconfig debug options to turn on -DDEBUG in the
+corresponding Makefile; in other cases specific files #define DEBUG. And
+when a debug message should be unconditionally printed, such as if it is
+already inside a debug-related #ifdef section, printk(KERN_DEBUG ...) can be
+used.
+
+
+14) Allocating memory
+---------------------
+
+The kernel provides the following general purpose memory allocators:
+kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), and
+vzalloc(). Please refer to the API documentation for further information
+about them.
+
+The preferred form for passing a size of a struct is the following:
+
+.. code-block:: c
+
+ p = kmalloc(sizeof(*p), ...);
+
+The alternative form where struct name is spelled out hurts readability and
+introduces an opportunity for a bug when the pointer variable type is changed
+but the corresponding sizeof that is passed to a memory allocator is not.
+
+Casting the return value which is a void pointer is redundant. The conversion
+from void pointer to any other pointer type is guaranteed by the C programming
+language.
+
+The preferred form for allocating an array is the following:
+
+.. code-block:: c
+
+ p = kmalloc_array(n, sizeof(...), ...);
+
+The preferred form for allocating a zeroed array is the following:
+
+.. code-block:: c
+
+ p = kcalloc(n, sizeof(...), ...);
+
+Both forms check for overflow on the allocation size n * sizeof(...),
+and return NULL if that occurred.
+
+
+15) The inline disease
+----------------------
+
+There appears to be a common misperception that gcc has a magic "make me
+faster" speedup option called ``inline``. While the use of inlines can be
+appropriate (for example as a means of replacing macros, see Chapter 12), it
+very often is not. Abundant use of the inline keyword leads to a much bigger
+kernel, which in turn slows the system as a whole down, due to a bigger
+icache footprint for the CPU and simply because there is less memory
+available for the pagecache. Just think about it; a pagecache miss causes a
+disk seek, which easily takes 5 milliseconds. There are a LOT of cpu cycles
+that can go into these 5 milliseconds.
+
+A reasonable rule of thumb is to not put inline at functions that have more
+than 3 lines of code in them. An exception to this rule are the cases where
+a parameter is known to be a compiletime constant, and as a result of this
+constantness you *know* the compiler will be able to optimize most of your
+function away at compile time. For a good example of this later case, see
+the kmalloc() inline function.
+
+Often people argue that adding inline to functions that are static and used
+only once is always a win since there is no space tradeoff. While this is
+technically correct, gcc is capable of inlining these automatically without
+help, and the maintenance issue of removing the inline when a second user
+appears outweighs the potential value of the hint that tells gcc to do
+something it would have done anyway.
+
+
+16) Function return values and names
+------------------------------------
+
+Functions can return values of many different kinds, and one of the
+most common is a value indicating whether the function succeeded or
+failed. Such a value can be represented as an error-code integer
+(-Exxx = failure, 0 = success) or a ``succeeded`` boolean (0 = failure,
+non-zero = success).
+
+Mixing up these two sorts of representations is a fertile source of
+difficult-to-find bugs. If the C language included a strong distinction
+between integers and booleans then the compiler would find these mistakes
+for us... but it doesn't. To help prevent such bugs, always follow this
+convention::
+
+ If the name of a function is an action or an imperative command,
+ the function should return an error-code integer. If the name
+ is a predicate, the function should return a "succeeded" boolean.
+
+For example, ``add work`` is a command, and the add_work() function returns 0
+for success or -EBUSY for failure. In the same way, ``PCI device present`` is
+a predicate, and the pci_dev_present() function returns 1 if it succeeds in
+finding a matching device or 0 if it doesn't.
+
+All EXPORTed functions must respect this convention, and so should all
+public functions. Private (static) functions need not, but it is
+recommended that they do.
+
+Functions whose return value is the actual result of a computation, rather
+than an indication of whether the computation succeeded, are not subject to
+this rule. Generally they indicate failure by returning some out-of-range
+result. Typical examples would be functions that return pointers; they use
+NULL or the ERR_PTR mechanism to report failure.
+
+
+17) Don't re-invent the kernel macros
+-------------------------------------
+
+The header file include/linux/kernel.h contains a number of macros that
+you should use, rather than explicitly coding some variant of them yourself.
+For example, if you need to calculate the length of an array, take advantage
+of the macro
+
+.. code-block:: c
+
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+Similarly, if you need to calculate the size of some structure member, use
+
+.. code-block:: c
+
+ #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+
+There are also min() and max() macros that do strict type checking if you
+need them. Feel free to peruse that header file to see what else is already
+defined that you shouldn't reproduce in your code.
+
+
+18) Editor modelines and other cruft
+------------------------------------
+
+Some editors can interpret configuration information embedded in source files,
+indicated with special markers. For example, emacs interprets lines marked
+like this:
+
+.. code-block:: c
+
+ -*- mode: c -*-
+
+Or like this:
+
+.. code-block:: c
+
+ /*
+ Local Variables:
+ compile-command: "gcc -DMAGIC_DEBUG_FLAG foo.c"
+ End:
+ */
+
+Vim interprets markers that look like this:
+
+.. code-block:: c
+
+ /* vim:set sw=8 noet */
+
+Do not include any of these in source files. People have their own personal
+editor configurations, and your source files should not override them. This
+includes markers for indentation and mode configuration. People may use their
+own custom mode, or may have some other magic method for making indentation
+work correctly.
+
+
+19) Inline assembly
+-------------------
+
+In architecture-specific code, you may need to use inline assembly to interface
+with CPU or platform functionality. Don't hesitate to do so when necessary.
+However, don't use inline assembly gratuitously when C can do the job. You can
+and should poke hardware from C when possible.
+
+Consider writing simple helper functions that wrap common bits of inline
+assembly, rather than repeatedly writing them with slight variations. Remember
+that inline assembly can use C parameters.
+
+Large, non-trivial assembly functions should go in .S files, with corresponding
+C prototypes defined in C header files. The C prototypes for assembly
+functions should use ``asmlinkage``.
+
+You may need to mark your asm statement as volatile, to prevent GCC from
+removing it if GCC doesn't notice any side effects. You don't always need to
+do so, though, and doing so unnecessarily can limit optimization.
+
+When writing a single inline assembly statement containing multiple
+instructions, put each instruction on a separate line in a separate quoted
+string, and end each string except the last with \n\t to properly indent the
+next instruction in the assembly output:
+
+.. code-block:: c
+
+ asm ("magic %reg1, #42\n\t"
+ "more_magic %reg2, %reg3"
+ : /* outputs */ : /* inputs */ : /* clobbers */);
+
+
+20) Conditional Compilation
+---------------------------
+
+Wherever possible, don't use preprocessor conditionals (#if, #ifdef) in .c
+files; doing so makes code harder to read and logic harder to follow. Instead,
+use such conditionals in a header file defining functions for use in those .c
+files, providing no-op stub versions in the #else case, and then call those
+functions unconditionally from .c files. The compiler will avoid generating
+any code for the stub calls, producing identical results, but the logic will
+remain easy to follow.
+
+Prefer to compile out entire functions, rather than portions of functions or
+portions of expressions. Rather than putting an ifdef in an expression, factor
+out part or all of the expression into a separate helper function and apply the
+conditional to that function.
+
+If you have a function or variable which may potentially go unused in a
+particular configuration, and the compiler would warn about its definition
+going unused, mark the definition as __maybe_unused rather than wrapping it in
+a preprocessor conditional. (However, if a function or variable *always* goes
+unused, delete it.)
+
+Within code, where possible, use the IS_ENABLED macro to convert a Kconfig
+symbol into a C boolean expression, and use it in a normal C conditional:
+
+.. code-block:: c
+
+ if (IS_ENABLED(CONFIG_SOMETHING)) {
+ ...
+ }
+
+The compiler will constant-fold the conditional away, and include or exclude
+the block of code just as with an #ifdef, so this will not add any runtime
+overhead. However, this approach still allows the C compiler to see the code
+inside the block, and check it for correctness (syntax, types, symbol
+references, etc). Thus, you still have to use an #ifdef if the code inside the
+block references symbols that will not exist if the condition is not met.
+
+At the end of any non-trivial #if or #ifdef block (more than a few lines),
+place a comment after the #endif on the same line, noting the conditional
+expression used. For instance:
+
+.. code-block:: c
+
+ #ifdef CONFIG_SOMETHING
+ ...
+ #endif /* CONFIG_SOMETHING */
+
+
+Appendix I) References
+----------------------
+
+The C Programming Language, Second Edition
+by Brian W. Kernighan and Dennis M. Ritchie.
+Prentice Hall, Inc., 1988.
+ISBN 0-13-110362-8 (paperback), 0-13-110370-9 (hardback).
+
+The Practice of Programming
+by Brian W. Kernighan and Rob Pike.
+Addison-Wesley, Inc., 1999.
+ISBN 0-201-61586-X.
+
+GNU manuals - where in compliance with K&R and this text - for cpp, gcc,
+gcc internals and indent, all available from http://www.gnu.org/manual/
+
+WG14 is the international standardization working group for the programming
+language C, URL: http://www.open-std.org/JTC1/SC22/WG14/
+
+Kernel process/coding-style.rst, by greg@kroah.com at OLS 2002:
+http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
diff --git a/Documentation/development-process/conf.py b/Documentation/process/conf.py
index 4b4a12dace02..1b01a80ad9ce 100644
--- a/Documentation/development-process/conf.py
+++ b/Documentation/process/conf.py
@@ -5,6 +5,6 @@ project = 'Linux Kernel Development Documentation'
tags.add("subproject")
latex_documents = [
- ('index', 'development-process.tex', 'Linux Kernel Development Documentation',
+ ('index', 'process.tex', 'Linux Kernel Development Documentation',
'The kernel development community', 'manual'),
]
diff --git a/Documentation/development-process/development-process.rst b/Documentation/process/development-process.rst
index bd1399f7202a..61c627e41ba8 100644
--- a/Documentation/development-process/development-process.rst
+++ b/Documentation/process/development-process.rst
@@ -26,4 +26,3 @@ development (or, indeed, free software development in general). While
there is some technical material here, this is very much a process-oriented
discussion which does not require a deep knowledge of kernel programming to
understand.
-
diff --git a/Documentation/email-clients.txt b/Documentation/process/email-clients.rst
index ac892b30815e..ac892b30815e 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/process/email-clients.rst
diff --git a/Documentation/HOWTO b/Documentation/process/howto.rst
index 5f042349f987..1260f60d4cb9 100644
--- a/Documentation/HOWTO
+++ b/Documentation/process/howto.rst
@@ -90,19 +90,19 @@ required reading:
what is necessary to do to configure and build the kernel. People
who are new to the kernel should start here.
- :ref:`Documentation/Changes <changes>`
+ :ref:`Documentation/process/changes.rst <changes>`
This file gives a list of the minimum levels of various software
packages that are necessary to build and run the kernel
successfully.
- :ref:`Documentation/CodingStyle <codingstyle>`
+ :ref:`Documentation/process/coding-style.rst <codingstyle>`
This describes the Linux kernel coding style, and some of the
rationale behind it. All new code is expected to follow the
guidelines in this document. Most maintainers will only accept
patches if these rules are followed, and many people will only
review code if it is in the proper style.
- :ref:`Documentation/SubmittingPatches <submittingpatches>` and :ref:`Documentation/SubmittingDrivers <submittingdrivers>`
+ :ref:`Documentation/process/submitting-patches.rst <submittingpatches>` and :ref:`Documentation/process/submitting-drivers.rst <submittingdrivers>`
These files describe in explicit detail how to successfully create
and send a patch, including (but not limited to):
@@ -122,7 +122,7 @@ required reading:
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
- :ref:`Documentation/stable_api_nonsense.txt <stable_api_nonsense>`
+ :ref:`Documentation/process/stable-api-nonsense.rst <stable_api_nonsense>`
This file describes the rationale behind the conscious decision to
not have a stable API within the kernel, including things like:
@@ -135,29 +135,29 @@ required reading:
philosophy and is very important for people moving to Linux from
development on other Operating Systems.
- :ref:`Documentation/SecurityBugs <securitybugs>`
+ :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
If you feel you have found a security problem in the Linux kernel,
please follow the steps in this document to help notify the kernel
developers, and help solve the issue.
- :ref:`Documentation/ManagementStyle <managementstyle>`
+ :ref:`Documentation/process/management-style.rst <managementstyle>`
This document describes how Linux kernel maintainers operate and the
shared ethos behind their methodologies. This is important reading
for anyone new to kernel development (or anyone simply curious about
it), as it resolves a lot of common misconceptions and confusion
about the unique behavior of kernel maintainers.
- :ref:`Documentation/stable_kernel_rules.txt <stable_kernel_rules>`
+ :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
This file describes the rules on how the stable kernel releases
happen, and what to do if you want to get a change into one of these
releases.
- :ref:`Documentation/kernel-docs.txt <kernel_docs>`
+ :ref:`Documentation/process/kernel-docs.rst <kernel_docs>`
A list of external documentation that pertains to kernel
development. Please consult this list if you do not find what you
are looking for within the in-kernel documentation.
- :ref:`Documentation/applying-patches.txt <applying_patches>`
+ :ref:`Documentation/process/applying-patches.rst <applying_patches>`
A good introduction describing exactly what a patch is and how to
apply it to the different development branches of the kernel.
@@ -254,7 +254,8 @@ branches. These different branches are:
- the 4.x -next kernel tree for integration tests
4.x kernel tree
------------------
+~~~~~~~~~~~~~~~
+
4.x kernels are maintained by Linus Torvalds, and can be found on
https://kernel.org in the pub/linux/kernel/v4.x/ directory. Its development
process is as follows:
@@ -266,15 +267,16 @@ process is as follows:
is using git (the kernel's source management tool, more information
can be found at https://git-scm.com/) but plain patches are also just
fine.
- - After two weeks a -rc1 kernel is released it is now possible to push
- only patches that do not include new features that could affect the
- stability of the whole kernel. Please note that a whole new driver
- (or filesystem) might be accepted after -rc1 because there is no
- risk of causing regressions with such a change as long as the change
- is self-contained and does not affect areas outside of the code that
- is being added. git can be used to send patches to Linus after -rc1
- is released, but the patches need to also be sent to a public
- mailing list for review.
+ - After two weeks a -rc1 kernel is released and the focus is on making the
+ new kernel as rock solid as possible. Most of the patches at this point
+ should fix a regression. Bugs that have always existed are not
+ regressions, so only push these kinds of fixes if they are important.
+ Please note that a whole new driver (or filesystem) might be accepted
+ after -rc1 because there is no risk of causing regressions with such a
+ change as long as the change is self-contained and does not affect areas
+ outside of the code that is being added. git can be used to send
+ patches to Linus after -rc1 is released, but the patches need to also be
+ sent to a public mailing list for review.
- A new -rc is released whenever Linus deems the current git tree to
be in a reasonably sane state adequate for testing. The goal is to
release a new -rc kernel every week.
@@ -289,7 +291,8 @@ mailing list about kernel releases:
preconceived timeline."*
4.x.y -stable kernel tree
--------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
Kernels with 3-part versions are -stable kernels. They contain
relatively small and critical fixes for security problems or significant
regressions discovered in a given 4.x kernel.
@@ -307,12 +310,13 @@ two weeks, but it can be longer if there are no pressing problems. A
security-related problem, instead, can cause a release to happen almost
instantly.
-The file Documentation/stable_kernel_rules.txt in the kernel tree
+The file Documentation/process/stable-kernel-rules.rst in the kernel tree
documents what kinds of changes are acceptable for the -stable tree, and
how the release process works.
4.x -git patches
-----------------
+~~~~~~~~~~~~~~~~
+
These are daily snapshots of Linus' kernel tree which are managed in a
git repository (hence the name.) These patches are usually released
daily and represent the current state of Linus' tree. They are more
@@ -320,7 +324,8 @@ experimental than -rc kernels since they are generated automatically
without even a cursory glance to see if they are sane.
Subsystem Specific kernel trees and patches
--------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
The maintainers of the various kernel subsystems --- and also many
kernel subsystem developers --- expose their current state of
development in source repositories. That way, others can see what is
@@ -344,7 +349,8 @@ accepted, or rejected. Most of these patchwork sites are listed at
https://patchwork.kernel.org/.
4.x -next kernel tree for integration tests
--------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
Before updates from subsystem trees are merged into the mainline 4.x
tree, they need to be integration-tested. For this purpose, a special
testing repository exists into which virtually all subsystem trees are
@@ -366,7 +372,7 @@ tool. For details on how to use the kernel bugzilla, please see:
https://bugzilla.kernel.org/page.cgi?id=faq.html
-The file REPORTING-BUGS in the main kernel source directory has a good
+The file admin-guide/reporting-bugs.rst in the main kernel source directory has a good
template for how to report a possible kernel bug, and details what kind
of information is needed by the kernel developers to help track down the
problem.
@@ -440,7 +446,7 @@ add your statements between the individual quoted sections instead of
writing at the top of the mail.
If you add patches to your mail, make sure they are plain readable text
-as stated in Documentation/SubmittingPatches.
+as stated in Documentation/process/submitting-patches.rst.
Kernel developers don't want to deal with
attachments or compressed patches; they may want to comment on
individual lines of your patch, which works only that way. Make sure you
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
new file mode 100644
index 000000000000..10aa6920709a
--- /dev/null
+++ b/Documentation/process/index.rst
@@ -0,0 +1,57 @@
+.. raw:: latex
+
+ \renewcommand\thesection*
+ \renewcommand\thesubsection*
+
+
+Working with the kernel development community
+=============================================
+
+So you want to be a Linux kernel developer? Welcome! While there is a lot
+to be learned about the kernel in a technical sense, it is also important
+to learn about how our community works. Reading these documents will make
+it much easier for you to get your changes merged with a minimum of
+trouble.
+
+Below are the essential guides that every developer should read.
+
+.. toctree::
+ :maxdepth: 1
+
+ howto
+ code-of-conflict
+ development-process
+ submitting-patches
+ coding-style
+ email-clients
+
+Other guides to the community that are of interest to most developers are:
+
+.. toctree::
+ :maxdepth: 1
+
+ changes
+ submitting-drivers
+ stable-api-nonsense
+ management-style
+ stable-kernel-rules
+ submit-checklist
+ kernel-docs
+
+These are some overall technical guides that have been put here for now for
+lack of a better place.
+
+.. toctree::
+ :maxdepth: 1
+
+ applying-patches
+ adding-syscalls
+ magic-number
+ volatile-considered-harmful
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/kernel-docs.txt b/Documentation/process/kernel-docs.rst
index 05a7857a4a83..05a7857a4a83 100644
--- a/Documentation/kernel-docs.txt
+++ b/Documentation/process/kernel-docs.rst
diff --git a/Documentation/process/magic-number.rst b/Documentation/process/magic-number.rst
new file mode 100644
index 000000000000..c74199f60c6c
--- /dev/null
+++ b/Documentation/process/magic-number.rst
@@ -0,0 +1,164 @@
+Linux magic numbers
+===================
+
+This file is a registry of magic numbers which are in use. When you
+add a magic number to a structure, you should also add it to this
+file, since it is best if the magic numbers used by various structures
+are unique.
+
+It is a **very** good idea to protect kernel data structures with magic
+numbers. This allows you to check at run time whether (a) a structure
+has been clobbered, or (b) you've passed the wrong structure to a
+routine. This last is especially useful --- particularly when you are
+passing pointers to structures via a void * pointer. The tty code,
+for example, does this frequently to pass driver-specific and line
+discipline-specific structures back and forth.
+
+The way to use magic numbers is to declare then at the beginning of
+the structure, like so::
+
+ struct tty_ldisc {
+ int magic;
+ ...
+ };
+
+Please follow this discipline when you are adding future enhancements
+to the kernel! It has saved me countless hours of debugging,
+especially in the screwy cases where an array has been overrun and
+structures following the array have been overwritten. Using this
+discipline, these cases get detected quickly and safely.
+
+Changelog::
+
+ Theodore Ts'o
+ 31 Mar 94
+
+ The magic table is current to Linux 2.1.55.
+
+ Michael Chastain
+ <mailto:mec@shout.net>
+ 22 Sep 1997
+
+ Now it should be up to date with Linux 2.1.112. Because
+ we are in feature freeze time it is very unlikely that
+ something will change before 2.2.x. The entries are
+ sorted by number field.
+
+ Krzysztof G. Baranowski
+ <mailto: kgb@knm.org.pl>
+ 29 Jul 1998
+
+ Updated the magic table to Linux 2.5.45. Right over the feature freeze,
+ but it is possible that some new magic numbers will sneak into the
+ kernel before 2.6.x yet.
+
+ Petr Baudis
+ <pasky@ucw.cz>
+ 03 Nov 2002
+
+ Updated the magic table to Linux 2.5.74.
+
+ Fabian Frederick
+ <ffrederick@users.sourceforge.net>
+ 09 Jul 2003
+
+
+===================== ================ ======================== ==========================================
+Magic Name Number Structure File
+===================== ================ ======================== ==========================================
+PG_MAGIC 'P' pg_{read,write}_hdr ``include/linux/pg.h``
+CMAGIC 0x0111 user ``include/linux/a.out.h``
+MKISS_DRIVER_MAGIC 0x04bf mkiss_channel ``drivers/net/mkiss.h``
+HDLC_MAGIC 0x239e n_hdlc ``drivers/char/n_hdlc.c``
+APM_BIOS_MAGIC 0x4101 apm_user ``arch/x86/kernel/apm_32.c``
+CYCLADES_MAGIC 0x4359 cyclades_port ``include/linux/cyclades.h``
+DB_MAGIC 0x4442 fc_info ``drivers/net/iph5526_novram.c``
+DL_MAGIC 0x444d fc_info ``drivers/net/iph5526_novram.c``
+FASYNC_MAGIC 0x4601 fasync_struct ``include/linux/fs.h``
+FF_MAGIC 0x4646 fc_info ``drivers/net/iph5526_novram.c``
+ISICOM_MAGIC 0x4d54 isi_port ``include/linux/isicom.h``
+PTY_MAGIC 0x5001 ``drivers/char/pty.c``
+PPP_MAGIC 0x5002 ppp ``include/linux/if_pppvar.h``
+SERIAL_MAGIC 0x5301 async_struct ``include/linux/serial.h``
+SSTATE_MAGIC 0x5302 serial_state ``include/linux/serial.h``
+SLIP_MAGIC 0x5302 slip ``drivers/net/slip.h``
+STRIP_MAGIC 0x5303 strip ``drivers/net/strip.c``
+X25_ASY_MAGIC 0x5303 x25_asy ``drivers/net/x25_asy.h``
+SIXPACK_MAGIC 0x5304 sixpack ``drivers/net/hamradio/6pack.h``
+AX25_MAGIC 0x5316 ax_disp ``drivers/net/mkiss.h``
+TTY_MAGIC 0x5401 tty_struct ``include/linux/tty.h``
+MGSL_MAGIC 0x5401 mgsl_info ``drivers/char/synclink.c``
+TTY_DRIVER_MAGIC 0x5402 tty_driver ``include/linux/tty_driver.h``
+MGSLPC_MAGIC 0x5402 mgslpc_info ``drivers/char/pcmcia/synclink_cs.c``
+TTY_LDISC_MAGIC 0x5403 tty_ldisc ``include/linux/tty_ldisc.h``
+USB_SERIAL_MAGIC 0x6702 usb_serial ``drivers/usb/serial/usb-serial.h``
+FULL_DUPLEX_MAGIC 0x6969 ``drivers/net/ethernet/dec/tulip/de2104x.c``
+USB_BLUETOOTH_MAGIC 0x6d02 usb_bluetooth ``drivers/usb/class/bluetty.c``
+RFCOMM_TTY_MAGIC 0x6d02 ``net/bluetooth/rfcomm/tty.c``
+USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port ``drivers/usb/serial/usb-serial.h``
+CG_MAGIC 0x00090255 ufs_cylinder_group ``include/linux/ufs_fs.h``
+RPORT_MAGIC 0x00525001 r_port ``drivers/char/rocket_int.h``
+LSEMAGIC 0x05091998 lse ``drivers/fc4/fc.c``
+GDTIOCTL_MAGIC 0x06030f07 gdth_iowr_str ``drivers/scsi/gdth_ioctl.h``
+RIEBL_MAGIC 0x09051990 ``drivers/net/atarilance.c``
+NBD_REQUEST_MAGIC 0x12560953 nbd_request ``include/linux/nbd.h``
+RED_MAGIC2 0x170fc2a5 (any) ``mm/slab.c``
+BAYCOM_MAGIC 0x19730510 baycom_state ``drivers/net/baycom_epp.c``
+ISDN_X25IFACE_MAGIC 0x1e75a2b9 isdn_x25iface_proto_data ``drivers/isdn/isdn_x25iface.h``
+ECP_MAGIC 0x21504345 cdkecpsig ``include/linux/cdk.h``
+LSOMAGIC 0x27091997 lso ``drivers/fc4/fc.c``
+LSMAGIC 0x2a3b4d2a ls ``drivers/fc4/fc.c``
+WANPIPE_MAGIC 0x414C4453 sdla_{dump,exec} ``include/linux/wanpipe.h``
+CS_CARD_MAGIC 0x43525553 cs_card ``sound/oss/cs46xx.c``
+LABELCL_MAGIC 0x4857434c labelcl_info_s ``include/asm/ia64/sn/labelcl.h``
+ISDN_ASYNC_MAGIC 0x49344C01 modem_info ``include/linux/isdn.h``
+CTC_ASYNC_MAGIC 0x49344C01 ctc_tty_info ``drivers/s390/net/ctctty.c``
+ISDN_NET_MAGIC 0x49344C02 isdn_net_local_s ``drivers/isdn/i4l/isdn_net_lib.h``
+SAVEKMSG_MAGIC2 0x4B4D5347 savekmsg ``arch/*/amiga/config.c``
+CS_STATE_MAGIC 0x4c4f4749 cs_state ``sound/oss/cs46xx.c``
+SLAB_C_MAGIC 0x4f17a36d kmem_cache ``mm/slab.c``
+COW_MAGIC 0x4f4f4f4d cow_header_v1 ``arch/um/drivers/ubd_user.c``
+I810_CARD_MAGIC 0x5072696E i810_card ``sound/oss/i810_audio.c``
+TRIDENT_CARD_MAGIC 0x5072696E trident_card ``sound/oss/trident.c``
+ROUTER_MAGIC 0x524d4157 wan_device [in ``wanrouter.h`` pre 3.9]
+SAVEKMSG_MAGIC1 0x53415645 savekmsg ``arch/*/amiga/config.c``
+GDA_MAGIC 0x58464552 gda ``arch/mips/include/asm/sn/gda.h``
+RED_MAGIC1 0x5a2cf071 (any) ``mm/slab.c``
+EEPROM_MAGIC_VALUE 0x5ab478d2 lanai_dev ``drivers/atm/lanai.c``
+HDLCDRV_MAGIC 0x5ac6e778 hdlcdrv_state ``include/linux/hdlcdrv.h``
+PCXX_MAGIC 0x5c6df104 channel ``drivers/char/pcxx.h``
+KV_MAGIC 0x5f4b565f kernel_vars_s ``arch/mips/include/asm/sn/klkernvars.h``
+I810_STATE_MAGIC 0x63657373 i810_state ``sound/oss/i810_audio.c``
+TRIDENT_STATE_MAGIC 0x63657373 trient_state ``sound/oss/trident.c``
+M3_CARD_MAGIC 0x646e6f50 m3_card ``sound/oss/maestro3.c``
+FW_HEADER_MAGIC 0x65726F66 fw_header ``drivers/atm/fore200e.h``
+SLOT_MAGIC 0x67267321 slot ``drivers/hotplug/cpqphp.h``
+SLOT_MAGIC 0x67267322 slot ``drivers/hotplug/acpiphp.h``
+LO_MAGIC 0x68797548 nbd_device ``include/linux/nbd.h``
+OPROFILE_MAGIC 0x6f70726f super_block ``drivers/oprofile/oprofilefs.h``
+M3_STATE_MAGIC 0x734d724d m3_state ``sound/oss/maestro3.c``
+VMALLOC_MAGIC 0x87654320 snd_alloc_track ``sound/core/memory.c``
+KMALLOC_MAGIC 0x87654321 snd_alloc_track ``sound/core/memory.c``
+PWC_MAGIC 0x89DC10AB pwc_device ``drivers/usb/media/pwc.h``
+NBD_REPLY_MAGIC 0x96744668 nbd_reply ``include/linux/nbd.h``
+ENI155_MAGIC 0xa54b872d midway_eprom ``drivers/atm/eni.h``
+CODA_MAGIC 0xC0DAC0DA coda_file_info ``fs/coda/coda_fs_i.h``
+DPMEM_MAGIC 0xc0ffee11 gdt_pci_sram ``drivers/scsi/gdth.h``
+YAM_MAGIC 0xF10A7654 yam_port ``drivers/net/hamradio/yam.c``
+CCB_MAGIC 0xf2691ad2 ccb ``drivers/scsi/ncr53c8xx.c``
+QUEUE_MAGIC_FREE 0xf7e1c9a3 queue_entry ``drivers/scsi/arm/queue.c``
+QUEUE_MAGIC_USED 0xf7e1cc33 queue_entry ``drivers/scsi/arm/queue.c``
+HTB_CMAGIC 0xFEFAFEF1 htb_class ``net/sched/sch_htb.c``
+NMI_MAGIC 0x48414d4d455201 nmi_s ``arch/mips/include/asm/sn/nmi.h``
+===================== ================ ======================== ==========================================
+
+Note that there are also defined special per-driver magic numbers in sound
+memory management. See ``include/sound/sndmagic.h`` for complete list of them. Many
+OSS sound drivers have their magic numbers constructed from the soundcard PCI
+ID - these are not listed here as well.
+
+IrDA subsystem also uses large number of own magic numbers, see
+``include/net/irda/irda.h`` for a complete list of them.
+
+HFS is another larger user of magic numbers - you can find them in
+``fs/hfs/hfs.h``.
diff --git a/Documentation/ManagementStyle b/Documentation/process/management-style.rst
index dea2e66c9a10..45595fd8a66b 100644
--- a/Documentation/ManagementStyle
+++ b/Documentation/process/management-style.rst
@@ -5,7 +5,7 @@ Linux kernel management style
This is a short document describing the preferred (or made up, depending
on who you ask) management style for the linux kernel. It's meant to
-mirror the CodingStyle document to some degree, and mainly written to
+mirror the process/coding-style.rst document to some degree, and mainly written to
avoid answering [#f1]_ the same (or similar) questions over and over again.
Management style is very personal and much harder to quantify than
diff --git a/Documentation/stable_api_nonsense.txt b/Documentation/process/stable-api-nonsense.rst
index 24f5aeecee91..24f5aeecee91 100644
--- a/Documentation/stable_api_nonsense.txt
+++ b/Documentation/process/stable-api-nonsense.rst
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/process/stable-kernel-rules.rst
index 4d82e31b7958..11ec2d93a5e0 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -27,7 +27,7 @@ Rules on what kind of patches are accepted, and which ones are not, into the
- It cannot contain any "trivial" fixes in it (spelling changes,
whitespace cleanups, etc).
- It must follow the
- :ref:`Documentation/SubmittingPatches <submittingpatches>`
+ :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
rules.
- It or an equivalent fix must already exist in Linus' tree (upstream).
@@ -40,7 +40,7 @@ Procedure for submitting patches to the -stable tree
Documentation/networking/netdev-FAQ.txt
- Security patches should not be handled (solely) by the -stable review
process but should follow the procedures in
- :ref:`Documentation/SecurityBugs <securitybugs>`.
+ :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
For all other submissions, choose one of the following procedures
-----------------------------------------------------------------
diff --git a/Documentation/SubmitChecklist b/Documentation/process/submit-checklist.rst
index 894289b22b15..a0d9d34bfb6d 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/process/submit-checklist.rst
@@ -7,7 +7,7 @@ Here are some basic things that developers should do if they want to see their
kernel patch submissions accepted more quickly.
These are all above and beyond the documentation that is provided in
-:ref:`Documentation/SubmittingPatches <submittingpatches>`
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
and elsewhere regarding submitting Linux kernel patches.
@@ -31,7 +31,7 @@ and elsewhere regarding submitting Linux kernel patches.
tends to use ``unsigned long`` for 64-bit quantities.
5) Check your patch for general style as detailed in
- :ref:`Documentation/CodingStyle <codingstyle>`.
+ :ref:`Documentation/process/coding-style.rst <codingstyle>`.
Check for trivial violations with the patch style checker prior to
submission (``scripts/checkpatch.pl``).
You should be able to justify all violations that remain in
@@ -78,7 +78,7 @@ and elsewhere regarding submitting Linux kernel patches.
16) All new ``/proc`` entries are documented under ``Documentation/``
17) All new kernel boot parameters are documented in
- ``Documentation/kernel-parameters.txt``.
+ ``Documentation/admin-guide/kernel-parameters.rst``.
18) All new module parameters are documented with ``MODULE_PARM_DESC()``
diff --git a/Documentation/SubmittingDrivers b/Documentation/process/submitting-drivers.rst
index 252b77a23fad..afb82ee0cbea 100644
--- a/Documentation/SubmittingDrivers
+++ b/Documentation/process/submitting-drivers.rst
@@ -8,7 +8,15 @@ various kernel trees. Note that if you are interested in video card drivers
you should probably talk to XFree86 (http://www.xfree86.org/) and/or X.Org
(http://x.org/) instead.
-Also read the Documentation/SubmittingPatches document.
+.. note::
+
+ This document is old and has seen little maintenance in recent years; it
+ should probably be updated or, perhaps better, just deleted. Most of
+ what is here can be found in the other development documents anyway.
+
+ Oh, and we don't really recommend submitting changes to XFree86 :)
+
+Also read the Documentation/process/submitting-patches.rst document.
Allocating Device Numbers
@@ -19,7 +27,7 @@ by the Linux assigned name and number authority (currently this is
Torben Mathiasen). The site is http://www.lanana.org/. This
also deals with allocating numbers for devices that are not going to
be submitted to the mainstream kernel.
-See Documentation/devices.txt for more information on this.
+See Documentation/admin-guide/devices.rst for more information on this.
If you don't use assigned numbers then when your device is submitted it will
be given an assigned number even if that is different from values you may
@@ -73,7 +81,7 @@ Interfaces:
Code:
Please use the Linux style of code formatting as documented
- in :ref:`Documentation/CodingStyle <codingStyle>`.
+ in :ref:`Documentation/process/coding-style.rst <codingStyle>`.
If you have sections of code
that need to be in other formats, for example because they
are shared with a windows driver kit and you want to
@@ -109,7 +117,7 @@ PM support:
anything. For the driver testing instructions see
Documentation/power/drivers-testing.txt and for a relatively
complete overview of the power management issues related to
- drivers see Documentation/power/devices.txt .
+ drivers see Documentation/power/admin-guide/devices.rst .
Control:
In general if there is active maintenance of a driver by
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
new file mode 100644
index 000000000000..3e10719fee35
--- /dev/null
+++ b/Documentation/process/submitting-patches.rst
@@ -0,0 +1,836 @@
+.. _submittingpatches:
+
+Submitting patches: the essential guide to getting your code into the kernel
+============================================================================
+
+For a person or company who wishes to submit a change to the Linux
+kernel, the process can sometimes be daunting if you're not familiar
+with "the system." This text is a collection of suggestions which
+can greatly increase the chances of your change being accepted.
+
+This document contains a large number of suggestions in a relatively terse
+format. For detailed information on how the kernel development process
+works, see :ref:`Documentation/process <development_process_main>`.
+Also, read :ref:`Documentation/process/submit-checklist.rst <submitchecklist>`
+for a list of items to check before
+submitting code. If you are submitting a driver, also read
+:ref:`Documentation/process/submitting-drivers.rst <submittingdrivers>`;
+for device tree binding patches, read
+Documentation/devicetree/bindings/submitting-patches.txt.
+
+Many of these steps describe the default behavior of the ``git`` version
+control system; if you use ``git`` to prepare your patches, you'll find much
+of the mechanical work done for you, though you'll still need to prepare
+and document a sensible set of patches. In general, use of ``git`` will make
+your life as a kernel developer easier.
+
+0) Obtain a current source tree
+-------------------------------
+
+If you do not have a repository with the current kernel source handy, use
+``git`` to obtain one. You'll want to start with the mainline repository,
+which can be grabbed with::
+
+ git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+Note, however, that you may not want to develop against the mainline tree
+directly. Most subsystem maintainers run their own trees and want to see
+patches prepared against those trees. See the **T:** entry for the subsystem
+in the MAINTAINERS file to find that tree, or simply ask the maintainer if
+the tree is not listed there.
+
+It is still possible to download kernel releases via tarballs (as described
+in the next section), but that is the hard way to do kernel development.
+
+1) ``diff -up``
+---------------
+
+If you must generate your patches by hand, use ``diff -up`` or ``diff -uprN``
+to create patches. Git generates patches in this form by default; if
+you're using ``git``, you can skip this section entirely.
+
+All changes to the Linux kernel occur in the form of patches, as
+generated by :manpage:`diff(1)`. When creating your patch, make sure to
+create it in "unified diff" format, as supplied by the ``-u`` argument
+to :manpage:`diff(1)`.
+Also, please use the ``-p`` argument which shows which C function each
+change is in - that makes the resultant ``diff`` a lot easier to read.
+Patches should be based in the root kernel source directory,
+not in any lower subdirectory.
+
+To create a patch for a single file, it is often sufficient to do::
+
+ SRCTREE= linux
+ MYFILE= drivers/net/mydriver.c
+
+ cd $SRCTREE
+ cp $MYFILE $MYFILE.orig
+ vi $MYFILE # make your change
+ cd ..
+ diff -up $SRCTREE/$MYFILE{.orig,} > /tmp/patch
+
+To create a patch for multiple files, you should unpack a "vanilla",
+or unmodified kernel source tree, and generate a ``diff`` against your
+own source tree. For example::
+
+ MYSRC= /devel/linux
+
+ tar xvfz linux-3.19.tar.gz
+ mv linux-3.19 linux-3.19-vanilla
+ diff -uprN -X linux-3.19-vanilla/Documentation/dontdiff \
+ linux-3.19-vanilla $MYSRC > /tmp/patch
+
+``dontdiff`` is a list of files which are generated by the kernel during
+the build process, and should be ignored in any :manpage:`diff(1)`-generated
+patch.
+
+Make sure your patch does not include any extra files which do not
+belong in a patch submission. Make sure to review your patch -after-
+generating it with :manpage:`diff(1)`, to ensure accuracy.
+
+If your changes produce a lot of deltas, you need to split them into
+individual patches which modify things in logical stages; see
+:ref:`split_changes`. This will facilitate review by other kernel developers,
+very important if you want your patch accepted.
+
+If you're using ``git``, ``git rebase -i`` can help you with this process. If
+you're not using ``git``, ``quilt`` <http://savannah.nongnu.org/projects/quilt>
+is another popular alternative.
+
+.. _describe_changes:
+
+2) Describe your changes
+------------------------
+
+Describe your problem. Whether your patch is a one-line bug fix or
+5000 lines of a new feature, there must be an underlying problem that
+motivated you to do this work. Convince the reviewer that there is a
+problem worth fixing and that it makes sense for them to read past the
+first paragraph.
+
+Describe user-visible impact. Straight up crashes and lockups are
+pretty convincing, but not all bugs are that blatant. Even if the
+problem was spotted during code review, describe the impact you think
+it can have on users. Keep in mind that the majority of Linux
+installations run kernels from secondary stable trees or
+vendor/product-specific trees that cherry-pick only specific patches
+from upstream, so include anything that could help route your change
+downstream: provoking circumstances, excerpts from dmesg, crash
+descriptions, performance regressions, latency spikes, lockups, etc.
+
+Quantify optimizations and trade-offs. If you claim improvements in
+performance, memory consumption, stack footprint, or binary size,
+include numbers that back them up. But also describe non-obvious
+costs. Optimizations usually aren't free but trade-offs between CPU,
+memory, and readability; or, when it comes to heuristics, between
+different workloads. Describe the expected downsides of your
+optimization so that the reviewer can weigh costs against benefits.
+
+Once the problem is established, describe what you are actually doing
+about it in technical detail. It's important to describe the change
+in plain English for the reviewer to verify that the code is behaving
+as you intend it to.
+
+The maintainer will thank you if you write your patch description in a
+form which can be easily pulled into Linux's source code management
+system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
+
+Solve only one problem per patch. If your description starts to get
+long, that's a sign that you probably need to split up your patch.
+See :ref:`split_changes`.
+
+When you submit or resubmit a patch or patch series, include the
+complete patch description and justification for it. Don't just
+say that this is version N of the patch (series). Don't expect the
+subsystem maintainer to refer back to earlier patch versions or referenced
+URLs to find the patch description and put that into the patch.
+I.e., the patch (series) and its description should be self-contained.
+This benefits both the maintainers and reviewers. Some reviewers
+probably didn't even receive earlier versions of the patch.
+
+Describe your changes in imperative mood, e.g. "make xyzzy do frotz"
+instead of "[This patch] makes xyzzy do frotz" or "[I] changed xyzzy
+to do frotz", as if you are giving orders to the codebase to change
+its behaviour.
+
+If the patch fixes a logged bug entry, refer to that bug entry by
+number and URL. If the patch follows from a mailing list discussion,
+give a URL to the mailing list archive; use the https://lkml.kernel.org/
+redirector with a ``Message-Id``, to ensure that the links cannot become
+stale.
+
+However, try to make your explanation understandable without external
+resources. In addition to giving a URL to a mailing list archive or
+bug, summarize the relevant points of the discussion that led to the
+patch as submitted.
+
+If you want to refer to a specific commit, don't just refer to the
+SHA-1 ID of the commit. Please also include the oneline summary of
+the commit, to make it easier for reviewers to know what it is about.
+Example::
+
+ Commit e21d2170f36602ae2708 ("video: remove unnecessary
+ platform_set_drvdata()") removed the unnecessary
+ platform_set_drvdata(), but left the variable "dev" unused,
+ delete it.
+
+You should also be sure to use at least the first twelve characters of the
+SHA-1 ID. The kernel repository holds a *lot* of objects, making
+collisions with shorter IDs a real possibility. Bear in mind that, even if
+there is no collision with your six-character ID now, that condition may
+change five years from now.
+
+If your patch fixes a bug in a specific commit, e.g. you found an issue using
+``git bisect``, please use the 'Fixes:' tag with the first 12 characters of
+the SHA-1 ID, and the one line summary. For example::
+
+ Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
+
+The following ``git config`` settings can be used to add a pretty format for
+outputting the above style in the ``git log`` or ``git show`` commands::
+
+ [core]
+ abbrev = 12
+ [pretty]
+ fixes = Fixes: %h (\"%s\")
+
+.. _split_changes:
+
+3) Separate your changes
+------------------------
+
+Separate each **logical change** into a separate patch.
+
+For example, if your changes include both bug fixes and performance
+enhancements for a single driver, separate those changes into two
+or more patches. If your changes include an API update, and a new
+driver which uses that new API, separate those into two patches.
+
+On the other hand, if you make a single change to numerous files,
+group those changes into a single patch. Thus a single logical change
+is contained within a single patch.
+
+The point to remember is that each patch should make an easily understood
+change that can be verified by reviewers. Each patch should be justifiable
+on its own merits.
+
+If one patch depends on another patch in order for a change to be
+complete, that is OK. Simply note **"this patch depends on patch X"**
+in your patch description.
+
+When dividing your change into a series of patches, take special care to
+ensure that the kernel builds and runs properly after each patch in the
+series. Developers using ``git bisect`` to track down a problem can end up
+splitting your patch series at any point; they will not thank you if you
+introduce bugs in the middle.
+
+If you cannot condense your patch set into a smaller set of patches,
+then only post say 15 or so at a time and wait for review and integration.
+
+
+
+4) Style-check your changes
+---------------------------
+
+Check your patch for basic style violations, details of which can be
+found in
+:ref:`Documentation/process/coding-style.rst <codingstyle>`.
+Failure to do so simply wastes
+the reviewers time and will get your patch rejected, probably
+without even being read.
+
+One significant exception is when moving code from one file to
+another -- in this case you should not modify the moved code at all in
+the same patch which moves it. This clearly delineates the act of
+moving the code and your changes. This greatly aids review of the
+actual differences and allows tools to better track the history of
+the code itself.
+
+Check your patches with the patch style checker prior to submission
+(scripts/checkpatch.pl). Note, though, that the style checker should be
+viewed as a guide, not as a replacement for human judgment. If your code
+looks better with a violation then its probably best left alone.
+
+The checker reports at three levels:
+ - ERROR: things that are very likely to be wrong
+ - WARNING: things requiring careful review
+ - CHECK: things requiring thought
+
+You should be able to justify all violations that remain in your
+patch.
+
+
+5) Select the recipients for your patch
+---------------------------------------
+
+You should always copy the appropriate subsystem maintainer(s) on any patch
+to code that they maintain; look through the MAINTAINERS file and the
+source code revision history to see who those maintainers are. The
+script scripts/get_maintainer.pl can be very useful at this step. If you
+cannot find a maintainer for the subsystem you are working on, Andrew
+Morton (akpm@linux-foundation.org) serves as a maintainer of last resort.
+
+You should also normally choose at least one mailing list to receive a copy
+of your patch set. linux-kernel@vger.kernel.org functions as a list of
+last resort, but the volume on that list has caused a number of developers
+to tune it out. Look in the MAINTAINERS file for a subsystem-specific
+list; your patch will probably get more attention there. Please do not
+spam unrelated lists, though.
+
+Many kernel-related lists are hosted on vger.kernel.org; you can find a
+list of them at http://vger.kernel.org/vger-lists.html. There are
+kernel-related lists hosted elsewhere as well, though.
+
+Do not send more than 15 patches at once to the vger mailing lists!!!
+
+Linus Torvalds is the final arbiter of all changes accepted into the
+Linux kernel. His e-mail address is <torvalds@linux-foundation.org>.
+He gets a lot of e-mail, and, at this point, very few patches go through
+Linus directly, so typically you should do your best to -avoid-
+sending him e-mail.
+
+If you have a patch that fixes an exploitable security bug, send that patch
+to security@kernel.org. For severe bugs, a short embargo may be considered
+to allow distributors to get the patch out to users; in such cases,
+obviously, the patch should not be sent to any public lists.
+
+Patches that fix a severe bug in a released kernel should be directed
+toward the stable maintainers by putting a line like this::
+
+ Cc: stable@vger.kernel.org
+
+into the sign-off area of your patch (note, NOT an email recipient). You
+should also read
+:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+in addition to this file.
+
+Note, however, that some subsystem maintainers want to come to their own
+conclusions on which patches should go to the stable trees. The networking
+maintainer, in particular, would rather not see individual developers
+adding lines like the above to their patches.
+
+If changes affect userland-kernel interfaces, please send the MAN-PAGES
+maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
+least a notification of the change, so that some information makes its way
+into the manual pages. User-space API changes should also be copied to
+linux-api@vger.kernel.org.
+
+For small patches you may want to CC the Trivial Patch Monkey
+trivial@kernel.org which collects "trivial" patches. Have a look
+into the MAINTAINERS file for its current manager.
+
+Trivial patches must qualify for one of the following rules:
+
+- Spelling fixes in documentation
+- Spelling fixes for errors which could break :manpage:`grep(1)`
+- Warning fixes (cluttering with useless warnings is bad)
+- Compilation fixes (only if they are actually correct)
+- Runtime fixes (only if they actually fix things)
+- Removing use of deprecated functions/macros
+- Contact detail and documentation fixes
+- Non-portable code replaced by portable code (even in arch-specific,
+ since people copy, as long as it's trivial)
+- Any fix by the author/maintainer of the file (ie. patch monkey
+ in re-transmission mode)
+
+
+
+6) No MIME, no links, no compression, no attachments. Just plain text
+----------------------------------------------------------------------
+
+Linus and other kernel developers need to be able to read and comment
+on the changes you are submitting. It is important for a kernel
+developer to be able to "quote" your changes, using standard e-mail
+tools, so that they may comment on specific portions of your code.
+
+For this reason, all patches should be submitted by e-mail "inline".
+
+.. warning::
+
+ Be wary of your editor's word-wrap corrupting your patch,
+ if you choose to cut-n-paste your patch.
+
+Do not attach the patch as a MIME attachment, compressed or not.
+Many popular e-mail applications will not always transmit a MIME
+attachment as plain text, making it impossible to comment on your
+code. A MIME attachment also takes Linus a bit more time to process,
+decreasing the likelihood of your MIME-attached change being accepted.
+
+Exception: If your mailer is mangling patches then someone may ask
+you to re-send them using MIME.
+
+See :ref:`Documentation/process/email-clients.rst <email_clients>`
+for hints about configuring your e-mail client so that it sends your patches
+untouched.
+
+7) E-mail size
+--------------
+
+Large changes are not appropriate for mailing lists, and some
+maintainers. If your patch, uncompressed, exceeds 300 kB in size,
+it is preferred that you store your patch on an Internet-accessible
+server, and provide instead a URL (link) pointing to your patch. But note
+that if your patch exceeds 300 kB, it almost certainly needs to be broken up
+anyway.
+
+8) Respond to review comments
+-----------------------------
+
+Your patch will almost certainly get comments from reviewers on ways in
+which the patch can be improved. You must respond to those comments;
+ignoring reviewers is a good way to get ignored in return. Review comments
+or questions that do not lead to a code change should almost certainly
+bring about a comment or changelog entry so that the next reviewer better
+understands what is going on.
+
+Be sure to tell the reviewers what changes you are making and to thank them
+for their time. Code review is a tiring and time-consuming process, and
+reviewers sometimes get grumpy. Even in that case, though, respond
+politely and address the problems they have pointed out.
+
+
+9) Don't get discouraged - or impatient
+---------------------------------------
+
+After you have submitted your change, be patient and wait. Reviewers are
+busy people and may not get to your patch right away.
+
+Once upon a time, patches used to disappear into the void without comment,
+but the development process works more smoothly than that now. You should
+receive comments within a week or so; if that does not happen, make sure
+that you have sent your patches to the right place. Wait for a minimum of
+one week before resubmitting or pinging reviewers - possibly longer during
+busy times like merge windows.
+
+
+10) Include PATCH in the subject
+--------------------------------
+
+Due to high e-mail traffic to Linus, and to linux-kernel, it is common
+convention to prefix your subject line with [PATCH]. This lets Linus
+and other kernel developers more easily distinguish patches from other
+e-mail discussions.
+
+
+
+11) Sign your work — the Developer's Certificate of Origin
+----------------------------------------------------------
+
+To improve tracking of who did what, especially with patches that can
+percolate to their final resting place in the kernel through several
+layers of maintainers, we've introduced a "sign-off" procedure on
+patches that are being emailed around.
+
+The sign-off is a simple line at the end of the explanation for the
+patch, which certifies that you wrote it or otherwise have the right to
+pass it on as an open-source patch. The rules are pretty simple: if you
+can certify the below:
+
+Developer's Certificate of Origin 1.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By making a contribution to this project, I certify that:
+
+ (a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+ (b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+ (c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+ (d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
+then you just add a line saying::
+
+ Signed-off-by: Random J Developer <random@developer.example.org>
+
+using your real name (sorry, no pseudonyms or anonymous contributions.)
+
+Some people also put extra tags at the end. They'll just be ignored for
+now, but you can do this to mark internal company procedures or just
+point out some special detail about the sign-off.
+
+If you are a subsystem or branch maintainer, sometimes you need to slightly
+modify patches you receive in order to merge them, because the code is not
+exactly the same in your tree and the submitters'. If you stick strictly to
+rule (c), you should ask the submitter to rediff, but this is a totally
+counter-productive waste of time and energy. Rule (b) allows you to adjust
+the code, but then it is very impolite to change one submitter's code and
+make him endorse your bugs. To solve this problem, it is recommended that
+you add a line between the last Signed-off-by header and yours, indicating
+the nature of your changes. While there is nothing mandatory about this, it
+seems like prepending the description with your mail and/or name, all
+enclosed in square brackets, is noticeable enough to make it obvious that
+you are responsible for last-minute changes. Example::
+
+ Signed-off-by: Random J Developer <random@developer.example.org>
+ [lucky@maintainer.example.org: struct foo moved from foo.c to foo.h]
+ Signed-off-by: Lucky K Maintainer <lucky@maintainer.example.org>
+
+This practice is particularly helpful if you maintain a stable branch and
+want at the same time to credit the author, track changes, merge the fix,
+and protect the submitter from complaints. Note that under no circumstances
+can you change the author's identity (the From header), as it is the one
+which appears in the changelog.
+
+Special note to back-porters: It seems to be a common and useful practice
+to insert an indication of the origin of a patch at the top of the commit
+message (just after the subject line) to facilitate tracking. For instance,
+here's what we see in a 3.x-stable release::
+
+ Date: Tue Oct 7 07:26:38 2014 -0400
+
+ libata: Un-break ATA blacklist
+
+ commit 1c40279960bcd7d52dbdf1d466b20d24b99176c8 upstream.
+
+And here's what might appear in an older kernel once a patch is backported::
+
+ Date: Tue May 13 22:12:27 2008 +0200
+
+ wireless, airo: waitbusy() won't delay
+
+ [backport of 2.6 commit b7acbdfbd1f277c1eb23f344f899cfa4cd0bf36a]
+
+Whatever the format, this information provides a valuable help to people
+tracking your trees, and to people trying to troubleshoot bugs in your
+tree.
+
+
+12) When to use Acked-by: and Cc:
+---------------------------------
+
+The Signed-off-by: tag indicates that the signer was involved in the
+development of the patch, or that he/she was in the patch's delivery path.
+
+If a person was not directly involved in the preparation or handling of a
+patch but wishes to signify and record their approval of it then they can
+ask to have an Acked-by: line added to the patch's changelog.
+
+Acked-by: is often used by the maintainer of the affected code when that
+maintainer neither contributed to nor forwarded the patch.
+
+Acked-by: is not as formal as Signed-off-by:. It is a record that the acker
+has at least reviewed the patch and has indicated acceptance. Hence patch
+mergers will sometimes manually convert an acker's "yep, looks good to me"
+into an Acked-by: (but note that it is usually better to ask for an
+explicit ack).
+
+Acked-by: does not necessarily indicate acknowledgement of the entire patch.
+For example, if a patch affects multiple subsystems and has an Acked-by: from
+one subsystem maintainer then this usually indicates acknowledgement of just
+the part which affects that maintainer's code. Judgement should be used here.
+When in doubt people should refer to the original discussion in the mailing
+list archives.
+
+If a person has had the opportunity to comment on a patch, but has not
+provided such comments, you may optionally add a ``Cc:`` tag to the patch.
+This is the only tag which might be added without an explicit action by the
+person it names - but it should indicate that this person was copied on the
+patch. This tag documents that potentially interested parties
+have been included in the discussion.
+
+
+13) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
+--------------------------------------------------------------------------
+
+The Reported-by tag gives credit to people who find bugs and report them and it
+hopefully inspires them to help us again in the future. Please note that if
+the bug was reported in private, then ask for permission first before using the
+Reported-by tag.
+
+A Tested-by: tag indicates that the patch has been successfully tested (in
+some environment) by the person named. This tag informs maintainers that
+some testing has been performed, provides a means to locate testers for
+future patches, and ensures credit for the testers.
+
+Reviewed-by:, instead, indicates that the patch has been reviewed and found
+acceptable according to the Reviewer's Statement:
+
+Reviewer's statement of oversight
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By offering my Reviewed-by: tag, I state that:
+
+ (a) I have carried out a technical review of this patch to
+ evaluate its appropriateness and readiness for inclusion into
+ the mainline kernel.
+
+ (b) Any problems, concerns, or questions relating to the patch
+ have been communicated back to the submitter. I am satisfied
+ with the submitter's response to my comments.
+
+ (c) While there may be things that could be improved with this
+ submission, I believe that it is, at this time, (1) a
+ worthwhile modification to the kernel, and (2) free of known
+ issues which would argue against its inclusion.
+
+ (d) While I have reviewed the patch and believe it to be sound, I
+ do not (unless explicitly stated elsewhere) make any
+ warranties or guarantees that it will achieve its stated
+ purpose or function properly in any given situation.
+
+A Reviewed-by tag is a statement of opinion that the patch is an
+appropriate modification of the kernel without any remaining serious
+technical issues. Any interested reviewer (who has done the work) can
+offer a Reviewed-by tag for a patch. This tag serves to give credit to
+reviewers and to inform maintainers of the degree of review which has been
+done on the patch. Reviewed-by: tags, when supplied by reviewers known to
+understand the subject area and to perform thorough reviews, will normally
+increase the likelihood of your patch getting into the kernel.
+
+A Suggested-by: tag indicates that the patch idea is suggested by the person
+named and ensures credit to the person for the idea. Please note that this
+tag should not be added without the reporter's permission, especially if the
+idea was not posted in a public forum. That said, if we diligently credit our
+idea reporters, they will, hopefully, be inspired to help us again in the
+future.
+
+A Fixes: tag indicates that the patch fixes an issue in a previous commit. It
+is used to make it easy to determine where a bug originated, which can help
+review a bug fix. This tag also assists the stable kernel team in determining
+which stable kernel versions should receive your fix. This is the preferred
+method for indicating a bug fixed by the patch. See :ref:`describe_changes`
+for more details.
+
+
+14) The canonical patch format
+------------------------------
+
+This section describes how the patch itself should be formatted. Note
+that, if you have your patches stored in a ``git`` repository, proper patch
+formatting can be had with ``git format-patch``. The tools cannot create
+the necessary text, though, so read the instructions below anyway.
+
+The canonical patch subject line is::
+
+ Subject: [PATCH 001/123] subsystem: summary phrase
+
+The canonical patch message body contains the following:
+
+ - A ``from`` line specifying the patch author (only needed if the person
+ sending the patch is not the author).
+
+ - An empty line.
+
+ - The body of the explanation, line wrapped at 75 columns, which will
+ be copied to the permanent changelog to describe this patch.
+
+ - The ``Signed-off-by:`` lines, described above, which will
+ also go in the changelog.
+
+ - A marker line containing simply ``---``.
+
+ - Any additional comments not suitable for the changelog.
+
+ - The actual patch (``diff`` output).
+
+The Subject line format makes it very easy to sort the emails
+alphabetically by subject line - pretty much any email reader will
+support that - since because the sequence number is zero-padded,
+the numerical and alphabetic sort is the same.
+
+The ``subsystem`` in the email's Subject should identify which
+area or subsystem of the kernel is being patched.
+
+The ``summary phrase`` in the email's Subject should concisely
+describe the patch which that email contains. The ``summary
+phrase`` should not be a filename. Do not use the same ``summary
+phrase`` for every patch in a whole patch series (where a ``patch
+series`` is an ordered sequence of multiple, related patches).
+
+Bear in mind that the ``summary phrase`` of your email becomes a
+globally-unique identifier for that patch. It propagates all the way
+into the ``git`` changelog. The ``summary phrase`` may later be used in
+developer discussions which refer to the patch. People will want to
+google for the ``summary phrase`` to read discussion regarding that
+patch. It will also be the only thing that people may quickly see
+when, two or three months later, they are going through perhaps
+thousands of patches using tools such as ``gitk`` or ``git log
+--oneline``.
+
+For these reasons, the ``summary`` must be no more than 70-75
+characters, and it must describe both what the patch changes, as well
+as why the patch might be necessary. It is challenging to be both
+succinct and descriptive, but that is what a well-written summary
+should do.
+
+The ``summary phrase`` may be prefixed by tags enclosed in square
+brackets: "Subject: [PATCH <tag>...] <summary phrase>". The tags are
+not considered part of the summary phrase, but describe how the patch
+should be treated. Common tags might include a version descriptor if
+the multiple versions of the patch have been sent out in response to
+comments (i.e., "v1, v2, v3"), or "RFC" to indicate a request for
+comments. If there are four patches in a patch series the individual
+patches may be numbered like this: 1/4, 2/4, 3/4, 4/4. This assures
+that developers understand the order in which the patches should be
+applied and that they have reviewed or applied all of the patches in
+the patch series.
+
+A couple of example Subjects::
+
+ Subject: [PATCH 2/5] ext2: improve scalability of bitmap searching
+ Subject: [PATCH v2 01/27] x86: fix eflags tracking
+
+The ``from`` line must be the very first line in the message body,
+and has the form:
+
+ From: Original Author <author@example.com>
+
+The ``from`` line specifies who will be credited as the author of the
+patch in the permanent changelog. If the ``from`` line is missing,
+then the ``From:`` line from the email header will be used to determine
+the patch author in the changelog.
+
+The explanation body will be committed to the permanent source
+changelog, so should make sense to a competent reader who has long
+since forgotten the immediate details of the discussion that might
+have led to this patch. Including symptoms of the failure which the
+patch addresses (kernel log messages, oops messages, etc.) is
+especially useful for people who might be searching the commit logs
+looking for the applicable patch. If a patch fixes a compile failure,
+it may not be necessary to include _all_ of the compile failures; just
+enough that it is likely that someone searching for the patch can find
+it. As in the ``summary phrase``, it is important to be both succinct as
+well as descriptive.
+
+The ``---`` marker line serves the essential purpose of marking for patch
+handling tools where the changelog message ends.
+
+One good use for the additional comments after the ``---`` marker is for
+a ``diffstat``, to show what files have changed, and the number of
+inserted and deleted lines per file. A ``diffstat`` is especially useful
+on bigger patches. Other comments relevant only to the moment or the
+maintainer, not suitable for the permanent changelog, should also go
+here. A good example of such comments might be ``patch changelogs``
+which describe what has changed between the v1 and v2 version of the
+patch.
+
+If you are going to include a ``diffstat`` after the ``---`` marker, please
+use ``diffstat`` options ``-p 1 -w 70`` so that filenames are listed from
+the top of the kernel source tree and don't use too much horizontal
+space (easily fit in 80 columns, maybe with some indentation). (``git``
+generates appropriate diffstats by default.)
+
+See more details on the proper patch format in the following
+references.
+
+.. _explicit_in_reply_to:
+
+15) Explicit In-Reply-To headers
+--------------------------------
+
+It can be helpful to manually add In-Reply-To: headers to a patch
+(e.g., when using ``git send-email``) to associate the patch with
+previous relevant discussion, e.g. to link a bug fix to the email with
+the bug report. However, for a multi-patch series, it is generally
+best to avoid using In-Reply-To: to link to older versions of the
+series. This way multiple versions of the patch don't become an
+unmanageable forest of references in email clients. If a link is
+helpful, you can use the https://lkml.kernel.org/ redirector (e.g., in
+the cover email text) to link to an earlier version of the patch series.
+
+
+16) Sending ``git pull`` requests
+---------------------------------
+
+If you have a series of patches, it may be most convenient to have the
+maintainer pull them directly into the subsystem repository with a
+``git pull`` operation. Note, however, that pulling patches from a developer
+requires a higher degree of trust than taking patches from a mailing list.
+As a result, many subsystem maintainers are reluctant to take pull
+requests, especially from new, unknown developers. If in doubt you can use
+the pull request as the cover letter for a normal posting of the patch
+series, giving the maintainer the option of using either.
+
+A pull request should have [GIT] or [PULL] in the subject line. The
+request itself should include the repository name and the branch of
+interest on a single line; it should look something like::
+
+ Please pull from
+
+ git://jdelvare.pck.nerim.net/jdelvare-2.6 i2c-for-linus
+
+ to get these changes:
+
+A pull request should also include an overall message saying what will be
+included in the request, a ``git shortlog`` listing of the patches
+themselves, and a ``diffstat`` showing the overall effect of the patch series.
+The easiest way to get all this information together is, of course, to let
+``git`` do it for you with the ``git request-pull`` command.
+
+Some maintainers (including Linus) want to see pull requests from signed
+commits; that increases their confidence that the request actually came
+from you. Linus, in particular, will not pull from public hosting sites
+like GitHub in the absence of a signed tag.
+
+The first step toward creating such tags is to make a GNUPG key and get it
+signed by one or more core kernel developers. This step can be hard for
+new developers, but there is no way around it. Attending conferences can
+be a good way to find developers who can sign your key.
+
+Once you have prepared a patch series in ``git`` that you wish to have somebody
+pull, create a signed tag with ``git tag -s``. This will create a new tag
+identifying the last commit in the series and containing a signature
+created with your private key. You will also have the opportunity to add a
+changelog-style message to the tag; this is an ideal place to describe the
+effects of the pull request as a whole.
+
+If the tree the maintainer will be pulling from is not the repository you
+are working from, don't forget to push the signed tag explicitly to the
+public tree.
+
+When generating your pull request, use the signed tag as the target. A
+command like this will do the trick::
+
+ git request-pull master git://my.public.tree/linux.git my-signed-tag
+
+
+References
+----------
+
+Andrew Morton, "The perfect patch" (tpp).
+ <http://www.ozlabs.org/~akpm/stuff/tpp.txt>
+
+Jeff Garzik, "Linux kernel patch submission format".
+ <http://linux.yyz.us/patch-format.html>
+
+Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
+ <http://www.kroah.com/log/linux/maintainer.html>
+
+ <http://www.kroah.com/log/linux/maintainer-02.html>
+
+ <http://www.kroah.com/log/linux/maintainer-03.html>
+
+ <http://www.kroah.com/log/linux/maintainer-04.html>
+
+ <http://www.kroah.com/log/linux/maintainer-05.html>
+
+ <http://www.kroah.com/log/linux/maintainer-06.html>
+
+NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
+ <https://lkml.org/lkml/2005/7/11/336>
+
+Kernel Documentation/process/coding-style.rst:
+ :ref:`Documentation/process/coding-style.rst <codingstyle>`
+
+Linus Torvalds's mail on the canonical patch format:
+ <http://lkml.org/lkml/2005/4/7/183>
+
+Andi Kleen, "On submitting kernel patches"
+ Some strategies to get difficult or controversial changes in.
+
+ http://halobates.de/on-submitting-patches.pdf
diff --git a/Documentation/volatile-considered-harmful.txt b/Documentation/process/volatile-considered-harmful.rst
index db0cb228d64a..4934e656a6f3 100644
--- a/Documentation/volatile-considered-harmful.txt
+++ b/Documentation/process/volatile-considered-harmful.rst
@@ -1,3 +1,6 @@
+
+.. _volatile_considered_harmful:
+
Why the "volatile" type class should not be used
------------------------------------------------
@@ -22,7 +25,7 @@ need to use volatile as well. If volatile is still necessary, there is
almost certainly a bug in the code somewhere. In properly-written kernel
code, volatile can only serve to slow things down.
-Consider a typical block of kernel code:
+Consider a typical block of kernel code::
spin_lock(&the_lock);
do_something_on(&shared_data);
@@ -57,7 +60,7 @@ optimization, so, once again, volatile is unnecessary.
Another situation where one might be tempted to use volatile is
when the processor is busy-waiting on the value of a variable. The right
-way to perform a busy wait is:
+way to perform a busy wait is::
while (my_variable != what_i_want)
cpu_relax();
@@ -103,17 +106,20 @@ they come with a justification which shows that the concurrency issues have
been properly thought through.
-NOTES
------
+References
+==========
[1] http://lwn.net/Articles/233481/
+
[2] http://lwn.net/Articles/233482/
-CREDITS
--------
+Credits
+=======
Original impetus and research by Randy Dunlap
+
Written by Jonathan Corbet
+
Improvements via comments from Satyam Sharma, Johannes Stezenbach, Jesper
- Juhl, Heikki Orsila, H. Peter Anvin, Philipp Hahn, and Stefan
- Richter.
+Juhl, Heikki Orsila, H. Peter Anvin, Philipp Hahn, and Stefan
+Richter.
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index 1f0c27049340..8c174063b3f0 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -26,7 +26,7 @@ whether they can be changed or not:
the system software.
The rfkill subsystem has two parameters, rfkill.default_state and
-rfkill.master_switch_mode, which are documented in kernel-parameters.txt.
+rfkill.master_switch_mode, which are documented in admin-guide/kernel-parameters.rst.
2. Implementation details
diff --git a/Documentation/scheduler/completion.txt b/Documentation/scheduler/completion.txt
index 2622bc7a188b..656cf803c006 100644
--- a/Documentation/scheduler/completion.txt
+++ b/Documentation/scheduler/completion.txt
@@ -25,8 +25,7 @@ struct completion that tells the waiting threads of execution if they
can continue safely.
As completions are scheduling related, the code is found in
-kernel/sched/completion.c - for details on completion design and
-implementation see completions-design.txt
+kernel/sched/completion.c.
Usage:
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index 8e66dafa41e1..8477655c0e46 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -1,7 +1,7 @@
SCSI Kernel Parameters
~~~~~~~~~~~~~~~~~~~~~~
-See Documentation/kernel-parameters.txt for general information on
+See Documentation/admin-guide/kernel-parameters.rst for general information on
specifying module parameters.
This document may not be entirely up to date and comprehensive. The command
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index 255075157511..6338400eed73 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -336,7 +336,7 @@ in parallel by these functions.
Conventions
===========
First, Linus Torvalds's thoughts on C coding style can be found in the
-Documentation/CodingStyle file.
+Documentation/process/coding-style.rst file.
Next, there is a movement to "outlaw" typedefs introducing synonyms for
struct tags. Both can be still found in the SCSI subsystem, but
diff --git a/Documentation/scsi/sym53c8xx_2.txt b/Documentation/scsi/sym53c8xx_2.txt
index 6af8f7a7770f..d28186553fb0 100644
--- a/Documentation/scsi/sym53c8xx_2.txt
+++ b/Documentation/scsi/sym53c8xx_2.txt
@@ -427,7 +427,7 @@ Synchronous transfers frequency (default answer: 80)
10.1 Syntax
Setup commands can be passed to the driver either at boot time or as
-parameters to modprobe, as described in Documentation/kernel-parameters.txt
+parameters to modprobe, as described in Documentation/admin-guide/kernel-parameters.rst
Example of boot setup command under lilo prompt:
diff --git a/Documentation/security/conf.py b/Documentation/security/conf.py
new file mode 100644
index 000000000000..472fc9a8eb67
--- /dev/null
+++ b/Documentation/security/conf.py
@@ -0,0 +1,8 @@
+project = "The kernel security subsystem manual"
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'security.tex', project,
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/security/index.rst b/Documentation/security/index.rst
new file mode 100644
index 000000000000..9bae6bb20e7f
--- /dev/null
+++ b/Documentation/security/index.rst
@@ -0,0 +1,7 @@
+======================
+Security documentation
+======================
+
+.. toctree::
+
+ tpm/index
diff --git a/Documentation/security/keys-trusted-encrypted.txt b/Documentation/security/keys-trusted-encrypted.txt
index 324ddf5223b3..b20a993a32af 100644
--- a/Documentation/security/keys-trusted-encrypted.txt
+++ b/Documentation/security/keys-trusted-encrypted.txt
@@ -32,8 +32,6 @@ Usage:
(40 ascii zeros)
blobauth= ascii hex auth for sealed data default 0x00...
(40 ascii zeros)
- blobauth= ascii hex auth for sealed data default 0x00...
- (40 ascii zeros)
pcrinfo= ascii hex of PCR_INFO or PCR_INFO_LONG (no default)
pcrlock= pcr number to be extended to "lock" blob
migratable= 0|1 indicating permission to reseal to new PCR values,
diff --git a/Documentation/security/tpm/index.rst b/Documentation/security/tpm/index.rst
new file mode 100644
index 000000000000..af77a7bbb070
--- /dev/null
+++ b/Documentation/security/tpm/index.rst
@@ -0,0 +1,7 @@
+=====================================
+Trusted Platform Module documentation
+=====================================
+
+.. toctree::
+
+ tpm_vtpm_proxy
diff --git a/Documentation/tpm/tpm_vtpm_proxy.txt b/Documentation/security/tpm/tpm_vtpm_proxy.rst
index 30d19022f869..ea08e76b17f5 100644
--- a/Documentation/tpm/tpm_vtpm_proxy.txt
+++ b/Documentation/security/tpm/tpm_vtpm_proxy.rst
@@ -1,71 +1,50 @@
+=============================================
Virtual TPM Proxy Driver for Linux Containers
+=============================================
-Authors: Stefan Berger (IBM)
+| Authors:
+| Stefan Berger <stefanb@linux.vnet.ibm.com>
This document describes the virtual Trusted Platform Module (vTPM)
proxy device driver for Linux containers.
-INTRODUCTION
-------------
+Introduction
+============
The goal of this work is to provide TPM functionality to each Linux
container. This allows programs to interact with a TPM in a container
the same way they interact with a TPM on the physical system. Each
container gets its own unique, emulated, software TPM.
-
-DESIGN
-------
+Design
+======
To make an emulated software TPM available to each container, the container
management stack needs to create a device pair consisting of a client TPM
-character device /dev/tpmX (with X=0,1,2...) and a 'server side' file
+character device ``/dev/tpmX`` (with X=0,1,2...) and a 'server side' file
descriptor. The former is moved into the container by creating a character
device with the appropriate major and minor numbers while the file descriptor
is passed to the TPM emulator. Software inside the container can then send
TPM commands using the character device and the emulator will receive the
commands via the file descriptor and use it for sending back responses.
-To support this, the virtual TPM proxy driver provides a device /dev/vtpmx
+To support this, the virtual TPM proxy driver provides a device ``/dev/vtpmx``
that is used to create device pairs using an ioctl. The ioctl takes as
an input flags for configuring the device. The flags for example indicate
whether TPM 1.2 or TPM 2 functionality is supported by the TPM emulator.
The result of the ioctl are the file descriptor for the 'server side'
as well as the major and minor numbers of the character device that was created.
-Besides that the number of the TPM character device is return. If for
-example /dev/tpm10 was created, the number (dev_num) 10 is returned.
-
-The following is the data structure of the TPM_PROXY_IOC_NEW_DEV ioctl:
-
-struct vtpm_proxy_new_dev {
- __u32 flags; /* input */
- __u32 tpm_num; /* output */
- __u32 fd; /* output */
- __u32 major; /* output */
- __u32 minor; /* output */
-};
-
-Note that if unsupported flags are passed to the device driver, the ioctl will
-fail and errno will be set to EOPNOTSUPP. Similarly, if an unsupported ioctl is
-called on the device driver, the ioctl will fail and errno will be set to
-ENOTTY.
-
-See /usr/include/linux/vtpm_proxy.h for definitions related to the public interface
-of this vTPM device driver.
+Besides that the number of the TPM character device is returned. If for
+example ``/dev/tpm10`` was created, the number (``dev_num``) 10 is returned.
Once the device has been created, the driver will immediately try to talk
to the TPM. All commands from the driver can be read from the file descriptor
returned by the ioctl. The commands should be responded to immediately.
-Depending on the version of TPM the following commands will be sent by the
-driver:
+UAPI
+====
-- TPM 1.2:
- - the driver will send a TPM_Startup command to the TPM emulator
- - the driver will send commands to read the command durations and
- interface timeouts from the TPM emulator
-- TPM 2:
- - the driver will send a TPM2_Startup command to the TPM emulator
+.. kernel-doc:: include/uapi/linux/vtpm_proxy.h
-The TPM device /dev/tpmX will only appear if all of the relevant commands
-were responded to properly.
+.. kernel-doc:: drivers/char/tpm/tpm_vtpm_proxy.c
+ :functions: vtpmx_ioc_new_dev
diff --git a/Documentation/tpm/xen-tpmfront.txt b/Documentation/security/tpm/xen-tpmfront.txt
index 69346de87ff3..69346de87ff3 100644
--- a/Documentation/tpm/xen-tpmfront.txt
+++ b/Documentation/security/tpm/xen-tpmfront.txt
diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst
new file mode 100644
index 000000000000..aed6b4fb8e46
--- /dev/null
+++ b/Documentation/sound/alsa-configuration.rst
@@ -0,0 +1,2683 @@
+==============================================================
+Advanced Linux Sound Architecture - Driver Configuration guide
+==============================================================
+
+
+Kernel Configuration
+====================
+
+To enable ALSA support you need at least to build the kernel with
+primary sound card support (``CONFIG_SOUND``). Since ALSA can emulate
+OSS, you don't have to choose any of the OSS modules.
+
+Enable "OSS API emulation" (``CONFIG_SND_OSSEMUL``) and both OSS mixer
+and PCM supports if you want to run OSS applications with ALSA.
+
+If you want to support the WaveTable functionality on cards such as
+SB Live! then you need to enable "Sequencer support"
+(``CONFIG_SND_SEQUENCER``).
+
+To make ALSA debug messages more verbose, enable the "Verbose printk"
+and "Debug" options. To check for memory leaks, turn on "Debug memory"
+too. "Debug detection" will add checks for the detection of cards.
+
+Please note that all the ALSA ISA drivers support the Linux isapnp API
+(if the card supports ISA PnP). You don't need to configure the cards
+using isapnptools.
+
+
+Module parameters
+=================
+
+The user can load modules with options. If the module supports more than
+one card and you have more than one card of the same type then you can
+specify multiple values for the option separated by commas.
+
+
+Module snd
+----------
+
+The core ALSA module. It is used by all ALSA card drivers.
+It takes the following options which have global effects.
+
+major
+ major number for sound driver;
+ Default: 116
+cards_limit
+ limiting card index for auto-loading (1-8);
+ Default: 1;
+ For auto-loading more than one card, specify this option
+ together with snd-card-X aliases.
+slots
+ Reserve the slot index for the given driver;
+ This option takes multiple strings.
+ See `Module Autoloading Support`_ section for details.
+debug
+ Specifies the debug message level;
+ (0 = disable debug prints, 1 = normal debug messages,
+ 2 = verbose debug messages);
+ This option appears only when ``CONFIG_SND_DEBUG=y``.
+ This option can be dynamically changed via sysfs
+ /sys/modules/snd/parameters/debug file.
+
+Module snd-pcm-oss
+------------------
+
+The PCM OSS emulation module.
+This module takes options which change the mapping of devices.
+
+dsp_map
+ PCM device number maps assigned to the 1st OSS device;
+ Default: 0
+adsp_map
+ PCM device number maps assigned to the 2st OSS device;
+ Default: 1
+nonblock_open
+ Don't block opening busy PCM devices;
+ Default: 1
+
+For example, when ``dsp_map=2``, /dev/dsp will be mapped to PCM #2 of
+the card #0. Similarly, when ``adsp_map=0``, /dev/adsp will be mapped
+to PCM #0 of the card #0.
+For changing the second or later card, specify the option with
+commas, such like ``dsp_map=0,1``.
+
+``nonblock_open`` option is used to change the behavior of the PCM
+regarding opening the device. When this option is non-zero,
+opening a busy OSS PCM device won't be blocked but return
+immediately with EAGAIN (just like O_NONBLOCK flag).
+
+Module snd-rawmidi
+------------------
+
+This module takes options which change the mapping of devices.
+similar to those of the snd-pcm-oss module.
+
+midi_map
+ MIDI device number maps assigned to the 1st OSS device;
+ Default: 0
+amidi_map
+ MIDI device number maps assigned to the 2st OSS device;
+ Default: 1
+
+Common parameters for top sound card modules
+--------------------------------------------
+
+Each of top level sound card module takes the following options.
+
+index
+ index (slot #) of sound card;
+ Values: 0 through 31 or negative;
+ If nonnegative, assign that index number;
+ if negative, interpret as a bitmask of permissible indices;
+ the first free permitted index is assigned;
+ Default: -1
+id
+ card ID (identifier or name);
+ Can be up to 15 characters long;
+ Default: the card type;
+ A directory by this name is created under /proc/asound/
+ containing information about the card;
+ This ID can be used instead of the index number in
+ identifying the card
+enable
+ enable card;
+ Default: enabled, for PCI and ISA PnP cards
+
+Module snd-adlib
+----------------
+
+Module for AdLib FM cards.
+
+port
+ port # for OPL chip
+
+This module supports multiple cards. It does not support autoprobe, so
+the port must be specified. For actual AdLib FM cards it will be 0x388.
+Note that this card does not have PCM support and no mixer; only FM
+synthesis.
+
+Make sure you have ``sbiload`` from the alsa-tools package available and,
+after loading the module, find out the assigned ALSA sequencer port
+number through ``sbiload -l``.
+
+Example output:
+::
+
+ Port Client name Port name
+ 64:0 OPL2 FM synth OPL2 FM Port
+
+Load the ``std.sb`` and ``drums.sb`` patches also supplied by ``sbiload``:
+::
+
+ sbiload -p 64:0 std.sb drums.sb
+
+If you use this driver to drive an OPL3, you can use ``std.o3`` and ``drums.o3``
+instead. To have the card produce sound, use ``aplaymidi`` from alsa-utils:
+::
+
+ aplaymidi -p 64:0 foo.mid
+
+Module snd-ad1816a
+------------------
+
+Module for sound cards based on Analog Devices AD1816A/AD1815 ISA chips.
+
+clockfreq
+ Clock frequency for AD1816A chip (default = 0, 33000Hz)
+
+This module supports multiple cards, autoprobe and PnP.
+
+Module snd-ad1848
+-----------------
+
+Module for sound cards based on AD1848/AD1847/CS4248 ISA chips.
+
+port
+ port # for AD1848 chip
+irq
+ IRQ # for AD1848 chip
+dma1
+ DMA # for AD1848 chip (0,1,3)
+
+This module supports multiple cards. It does not support autoprobe
+thus main port must be specified!!! Other ports are optional.
+
+The power-management is supported.
+
+Module snd-ad1889
+-----------------
+
+Module for Analog Devices AD1889 chips.
+
+ac97_quirk
+ AC'97 workaround for strange hardware;
+ See the description of intel8x0 module for details.
+
+This module supports multiple cards.
+
+Module snd-ali5451
+------------------
+
+Module for ALi M5451 PCI chip.
+
+pcm_channels
+ Number of hardware channels assigned for PCM
+spdif
+ Support SPDIF I/O;
+ Default: disabled
+
+This module supports one chip and autoprobe.
+
+The power-management is supported.
+
+Module snd-als100
+-----------------
+
+Module for sound cards based on Avance Logic ALS100/ALS120 ISA chips.
+
+This module supports multiple cards, autoprobe and PnP.
+
+The power-management is supported.
+
+Module snd-als300
+-----------------
+
+Module for Avance Logic ALS300 and ALS300+
+
+This module supports multiple cards.
+
+The power-management is supported.
+
+Module snd-als4000
+------------------
+
+Module for sound cards based on Avance Logic ALS4000 PCI chip.
+
+joystick_port
+ port # for legacy joystick support;
+ 0 = disabled (default), 1 = auto-detect
+
+This module supports multiple cards, autoprobe and PnP.
+
+The power-management is supported.
+
+Module snd-asihpi
+-----------------
+
+Module for AudioScience ASI soundcards
+
+enable_hpi_hwdep
+ enable HPI hwdep for AudioScience soundcard
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-atiixp
+-----------------
+
+Module for ATI IXP 150/200/250/400 AC97 controllers.
+
+ac97_clock
+ AC'97 clock (default = 48000)
+ac97_quirk
+ AC'97 workaround for strange hardware;
+ See `AC97 Quirk Option`_ section below.
+ac97_codec
+ Workaround to specify which AC'97 codec instead of probing.
+ If this works for you file a bug with your `lspci -vn` output.
+ (-2 = Force probing, -1 = Default behavior, 0-2 = Use the
+ specified codec.)
+spdif_aclink
+ S/PDIF transfer over AC-link (default = 1)
+
+This module supports one card and autoprobe.
+
+ATI IXP has two different methods to control SPDIF output. One is
+over AC-link and another is over the "direct" SPDIF output. The
+implementation depends on the motherboard, and you'll need to
+choose the correct one via spdif_aclink module option.
+
+The power-management is supported.
+
+Module snd-atiixp-modem
+-----------------------
+
+Module for ATI IXP 150/200/250 AC97 modem controllers.
+
+This module supports one card and autoprobe.
+
+Note: The default index value of this module is -2, i.e. the first
+slot is excluded.
+
+The power-management is supported.
+
+Module snd-au8810, snd-au8820, snd-au8830
+-----------------------------------------
+
+Module for Aureal Vortex, Vortex2 and Advantage device.
+
+pcifix
+ Control PCI workarounds;
+ 0 = Disable all workarounds,
+ 1 = Force the PCI latency of the Aureal card to 0xff,
+ 2 = Force the Extend PCI#2 Internal Master for Efficient
+ Handling of Dummy Requests on the VIA KT133 AGP Bridge,
+ 3 = Force both settings,
+ 255 = Autodetect what is required (default)
+
+This module supports all ADB PCM channels, ac97 mixer, SPDIF, hardware
+EQ, mpu401, gameport. A3D and wavetable support are still in development.
+Development and reverse engineering work is being coordinated at
+http://savannah.nongnu.org/projects/openvortex/
+SPDIF output has a copy of the AC97 codec output, unless you use the
+``spdif`` pcm device, which allows raw data passthru.
+The hardware EQ hardware and SPDIF is only present in the Vortex2 and
+Advantage.
+
+Note: Some ALSA mixer applications don't handle the SPDIF sample rate
+control correctly. If you have problems regarding this, try
+another ALSA compliant mixer (alsamixer works).
+
+Module snd-azt1605
+------------------
+
+Module for Aztech Sound Galaxy soundcards based on the Aztech AZT1605
+chipset.
+
+port
+ port # for BASE (0x220,0x240,0x260,0x280)
+wss_port
+ port # for WSS (0x530,0x604,0xe80,0xf40)
+irq
+ IRQ # for WSS (7,9,10,11)
+dma1
+ DMA # for WSS playback (0,1,3)
+dma2
+ DMA # for WSS capture (0,1), -1 = disabled (default)
+mpu_port
+ port # for MPU-401 UART (0x300,0x330), -1 = disabled (default)
+mpu_irq
+ IRQ # for MPU-401 UART (3,5,7,9), -1 = disabled (default)
+fm_port
+ port # for OPL3 (0x388), -1 = disabled (default)
+
+This module supports multiple cards. It does not support autoprobe:
+``port``, ``wss_port``, ``irq`` and ``dma1`` have to be specified.
+The other values are optional.
+
+``port`` needs to match the BASE ADDRESS jumper on the card (0x220 or 0x240)
+or the value stored in the card's EEPROM for cards that have an EEPROM and
+their "CONFIG MODE" jumper set to "EEPROM SETTING". The other values can
+be chosen freely from the options enumerated above.
+
+If ``dma2`` is specified and different from ``dma1``, the card will operate in
+full-duplex mode. When ``dma1=3``, only ``dma2=0`` is valid and the only way to
+enable capture since only channels 0 and 1 are available for capture.
+
+Generic settings are ``port=0x220 wss_port=0x530 irq=10 dma1=1 dma2=0
+mpu_port=0x330 mpu_irq=9 fm_port=0x388``.
+
+Whatever IRQ and DMA channels you pick, be sure to reserve them for
+legacy ISA in your BIOS.
+
+Module snd-azt2316
+------------------
+
+Module for Aztech Sound Galaxy soundcards based on the Aztech AZT2316
+chipset.
+
+port
+ port # for BASE (0x220,0x240,0x260,0x280)
+wss_port
+ port # for WSS (0x530,0x604,0xe80,0xf40)
+irq
+ IRQ # for WSS (7,9,10,11)
+dma1
+ DMA # for WSS playback (0,1,3)
+dma2
+ DMA # for WSS capture (0,1), -1 = disabled (default)
+mpu_port
+ port # for MPU-401 UART (0x300,0x330), -1 = disabled (default)
+mpu_irq
+ IRQ # for MPU-401 UART (5,7,9,10), -1 = disabled (default)
+fm_port
+ port # for OPL3 (0x388), -1 = disabled (default)
+
+This module supports multiple cards. It does not support autoprobe:
+``port``, ``wss_port``, ``irq`` and ``dma1`` have to be specified.
+The other values are optional.
+
+``port`` needs to match the BASE ADDRESS jumper on the card (0x220 or 0x240)
+or the value stored in the card's EEPROM for cards that have an EEPROM and
+their "CONFIG MODE" jumper set to "EEPROM SETTING". The other values can
+be chosen freely from the options enumerated above.
+
+If ``dma2`` is specified and different from ``dma1``, the card will operate in
+full-duplex mode. When ``dma1=3``, only ``dma2=0`` is valid and the only way to
+enable capture since only channels 0 and 1 are available for capture.
+
+Generic settings are ``port=0x220 wss_port=0x530 irq=10 dma1=1 dma2=0
+mpu_port=0x330 mpu_irq=9 fm_port=0x388``.
+
+Whatever IRQ and DMA channels you pick, be sure to reserve them for
+legacy ISA in your BIOS.
+
+Module snd-aw2
+--------------
+
+Module for Audiowerk2 sound card
+
+This module supports multiple cards.
+
+Module snd-azt2320
+------------------
+
+Module for sound cards based on Aztech System AZT2320 ISA chip (PnP only).
+
+This module supports multiple cards, PnP and autoprobe.
+
+The power-management is supported.
+
+Module snd-azt3328
+------------------
+
+Module for sound cards based on Aztech AZF3328 PCI chip.
+
+joystick
+ Enable joystick (default off)
+
+This module supports multiple cards.
+
+Module snd-bt87x
+----------------
+
+Module for video cards based on Bt87x chips.
+
+digital_rate
+ Override the default digital rate (Hz)
+load_all
+ Load the driver even if the card model isn't known
+
+This module supports multiple cards.
+
+Note: The default index value of this module is -2, i.e. the first
+slot is excluded.
+
+Module snd-ca0106
+-----------------
+
+Module for Creative Audigy LS and SB Live 24bit
+
+This module supports multiple cards.
+
+
+Module snd-cmi8330
+------------------
+
+Module for sound cards based on C-Media CMI8330 ISA chips.
+
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with ``isapnp=0``, the following options are available:
+
+wssport
+ port # for CMI8330 chip (WSS)
+wssirq
+ IRQ # for CMI8330 chip (WSS)
+wssdma
+ first DMA # for CMI8330 chip (WSS)
+sbport
+ port # for CMI8330 chip (SB16)
+sbirq
+ IRQ # for CMI8330 chip (SB16)
+sbdma8
+ 8bit DMA # for CMI8330 chip (SB16)
+sbdma16
+ 16bit DMA # for CMI8330 chip (SB16)
+fmport
+ (optional) OPL3 I/O port
+mpuport
+ (optional) MPU401 I/O port
+mpuirq
+ (optional) MPU401 irq #
+
+This module supports multiple cards and autoprobe.
+
+The power-management is supported.
+
+Module snd-cmipci
+-----------------
+
+Module for C-Media CMI8338/8738/8768/8770 PCI sound cards.
+
+mpu_port
+ port address of MIDI interface (8338 only):
+ 0x300,0x310,0x320,0x330 = legacy port,
+ 0 = disable (default)
+fm_port
+ port address of OPL-3 FM synthesizer (8x38 only):
+ 0x388 = legacy port,
+ 1 = integrated PCI port (default on 8738),
+ 0 = disable
+soft_ac3
+ Software-conversion of raw SPDIF packets (model 033 only) (default = 1)
+joystick_port
+ Joystick port address (0 = disable, 1 = auto-detect)
+
+This module supports autoprobe and multiple cards.
+
+The power-management is supported.
+
+Module snd-cs4231
+-----------------
+
+Module for sound cards based on CS4231 ISA chips.
+
+port
+ port # for CS4231 chip
+mpu_port
+ port # for MPU-401 UART (optional), -1 = disable
+irq
+ IRQ # for CS4231 chip
+mpu_irq
+ IRQ # for MPU-401 UART
+dma1
+ first DMA # for CS4231 chip
+dma2
+ second DMA # for CS4231 chip
+
+This module supports multiple cards. This module does not support autoprobe
+thus main port must be specified!!! Other ports are optional.
+
+The power-management is supported.
+
+Module snd-cs4236
+-----------------
+
+Module for sound cards based on CS4232/CS4232A,
+CS4235/CS4236/CS4236B/CS4237B/CS4238B/CS4239 ISA chips.
+
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with ``isapnp=0``, the following options are available:
+
+port
+ port # for CS4236 chip (PnP setup - 0x534)
+cport
+ control port # for CS4236 chip (PnP setup - 0x120,0x210,0xf00)
+mpu_port
+ port # for MPU-401 UART (PnP setup - 0x300), -1 = disable
+fm_port
+ FM port # for CS4236 chip (PnP setup - 0x388), -1 = disable
+irq
+ IRQ # for CS4236 chip (5,7,9,11,12,15)
+mpu_irq
+ IRQ # for MPU-401 UART (9,11,12,15)
+dma1
+ first DMA # for CS4236 chip (0,1,3)
+dma2
+ second DMA # for CS4236 chip (0,1,3), -1 = disable
+
+This module supports multiple cards. This module does not support autoprobe
+(if ISA PnP is not used) thus main port and control port must be
+specified!!! Other ports are optional.
+
+The power-management is supported.
+
+This module is aliased as snd-cs4232 since it provides the old
+snd-cs4232 functionality, too.
+
+Module snd-cs4281
+-----------------
+
+Module for Cirrus Logic CS4281 soundchip.
+
+dual_codec
+ Secondary codec ID (0 = disable, default)
+
+This module supports multiple cards.
+
+The power-management is supported.
+
+Module snd-cs46xx
+-----------------
+
+Module for PCI sound cards based on CS4610/CS4612/CS4614/CS4615/CS4622/
+CS4624/CS4630/CS4280 PCI chips.
+
+external_amp
+ Force to enable external amplifier.
+thinkpad
+ Force to enable Thinkpad's CLKRUN control.
+mmap_valid
+ Support OSS mmap mode (default = 0).
+
+This module supports multiple cards and autoprobe.
+Usually external amp and CLKRUN controls are detected automatically
+from PCI sub vendor/device ids. If they don't work, give the options
+above explicitly.
+
+The power-management is supported.
+
+Module snd-cs5530
+-----------------
+
+Module for Cyrix/NatSemi Geode 5530 chip.
+
+Module snd-cs5535audio
+----------------------
+
+Module for multifunction CS5535 companion PCI device
+
+The power-management is supported.
+
+Module snd-ctxfi
+----------------
+
+Module for Creative Sound Blaster X-Fi boards (20k1 / 20k2 chips)
+
+* Creative Sound Blaster X-Fi Titanium Fatal1ty Champion Series
+* Creative Sound Blaster X-Fi Titanium Fatal1ty Professional Series
+* Creative Sound Blaster X-Fi Titanium Professional Audio
+* Creative Sound Blaster X-Fi Titanium
+* Creative Sound Blaster X-Fi Elite Pro
+* Creative Sound Blaster X-Fi Platinum
+* Creative Sound Blaster X-Fi Fatal1ty
+* Creative Sound Blaster X-Fi XtremeGamer
+* Creative Sound Blaster X-Fi XtremeMusic
+
+reference_rate
+ reference sample rate, 44100 or 48000 (default)
+multiple
+ multiple to ref. sample rate, 1 or 2 (default)
+subsystem
+ override the PCI SSID for probing;
+ the value consists of SSVID << 16 | SSDID.
+ The default is zero, which means no override.
+
+This module supports multiple cards.
+
+Module snd-darla20
+------------------
+
+Module for Echoaudio Darla20
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-darla24
+------------------
+
+Module for Echoaudio Darla24
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-dt019x
+-----------------
+
+Module for Diamond Technologies DT-019X / Avance Logic ALS-007 (PnP
+only)
+
+This module supports multiple cards. This module is enabled only with
+ISA PnP support.
+
+The power-management is supported.
+
+Module snd-dummy
+----------------
+
+Module for the dummy sound card. This "card" doesn't do any output
+or input, but you may use this module for any application which
+requires a sound card (like RealPlayer).
+
+pcm_devs
+ Number of PCM devices assigned to each card (default = 1, up to 4)
+pcm_substreams
+ Number of PCM substreams assigned to each PCM (default = 8, up to 128)
+hrtimer
+ Use hrtimer (=1, default) or system timer (=0)
+fake_buffer
+ Fake buffer allocations (default = 1)
+
+When multiple PCM devices are created, snd-dummy gives different
+behavior to each PCM device:
+* 0 = interleaved with mmap support
+* 1 = non-interleaved with mmap support
+* 2 = interleaved without mmap
+* 3 = non-interleaved without mmap
+
+As default, snd-dummy drivers doesn't allocate the real buffers
+but either ignores read/write or mmap a single dummy page to all
+buffer pages, in order to save the resources. If your apps need
+the read/ written buffer data to be consistent, pass fake_buffer=0
+option.
+
+The power-management is supported.
+
+Module snd-echo3g
+-----------------
+
+Module for Echoaudio 3G cards (Gina3G/Layla3G)
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-emu10k1
+------------------
+
+Module for EMU10K1/EMU10k2 based PCI sound cards.
+
+* Sound Blaster Live!
+* Sound Blaster PCI 512
+* Emu APS (partially supported)
+* Sound Blaster Audigy
+
+extin
+ bitmap of available external inputs for FX8010 (see bellow)
+extout
+ bitmap of available external outputs for FX8010 (see bellow)
+seq_ports
+ allocated sequencer ports (4 by default)
+max_synth_voices
+ limit of voices used for wavetable (64 by default)
+max_buffer_size
+ specifies the maximum size of wavetable/pcm buffers given in MB
+ unit. Default value is 128.
+enable_ir
+ enable IR
+
+This module supports multiple cards and autoprobe.
+
+Input & Output configurations [extin/extout]
+* Creative Card wo/Digital out [0x0003/0x1f03]
+* Creative Card w/Digital out [0x0003/0x1f0f]
+* Creative Card w/Digital CD in [0x000f/0x1f0f]
+* Creative Card wo/Digital out + LiveDrive [0x3fc3/0x1fc3]
+* Creative Card w/Digital out + LiveDrive [0x3fc3/0x1fcf]
+* Creative Card w/Digital CD in + LiveDrive [0x3fcf/0x1fcf]
+* Creative Card wo/Digital out + Digital I/O 2 [0x0fc3/0x1f0f]
+* Creative Card w/Digital out + Digital I/O 2 [0x0fc3/0x1f0f]
+* Creative Card w/Digital CD in + Digital I/O 2 [0x0fcf/0x1f0f]
+* Creative Card 5.1/w Digital out + LiveDrive [0x3fc3/0x1fff]
+* Creative Card 5.1 (c) 2003 [0x3fc3/0x7cff]
+* Creative Card all ins and outs [0x3fff/0x7fff]
+
+The power-management is supported.
+
+Module snd-emu10k1x
+-------------------
+
+Module for Creative Emu10k1X (SB Live Dell OEM version)
+
+This module supports multiple cards.
+
+Module snd-ens1370
+------------------
+
+Module for Ensoniq AudioPCI ES1370 PCI sound cards.
+
+* SoundBlaster PCI 64
+* SoundBlaster PCI 128
+
+joystick
+ Enable joystick (default off)
+
+This module supports multiple cards and autoprobe.
+
+The power-management is supported.
+
+Module snd-ens1371
+------------------
+
+Module for Ensoniq AudioPCI ES1371 PCI sound cards.
+
+* SoundBlaster PCI 64
+* SoundBlaster PCI 128
+* SoundBlaster Vibra PCI
+
+joystick_port
+ port # for joystick (0x200,0x208,0x210,0x218), 0 = disable
+ (default), 1 = auto-detect
+
+This module supports multiple cards and autoprobe.
+
+The power-management is supported.
+
+Module snd-es1688
+-----------------
+
+Module for ESS AudioDrive ES-1688 and ES-688 sound cards.
+
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+mpu_port
+ port # for MPU-401 port (0x300,0x310,0x320,0x330), -1 = disable (default)
+mpu_irq
+ IRQ # for MPU-401 port (5,7,9,10)
+fm_port
+ port # for OPL3 (option; share the same port as default)
+
+with ``isapnp=0``, the following additional options are available:
+
+port
+ port # for ES-1688 chip (0x220,0x240,0x260)
+irq
+ IRQ # for ES-1688 chip (5,7,9,10)
+dma8
+ DMA # for ES-1688 chip (0,1,3)
+
+This module supports multiple cards and autoprobe (without MPU-401 port)
+and PnP with the ES968 chip.
+
+Module snd-es18xx
+-----------------
+
+Module for ESS AudioDrive ES-18xx sound cards.
+
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with ``isapnp=0``, the following options are available:
+
+port
+ port # for ES-18xx chip (0x220,0x240,0x260)
+mpu_port
+ port # for MPU-401 port (0x300,0x310,0x320,0x330), -1 = disable (default)
+fm_port
+ port # for FM (optional, not used)
+irq
+ IRQ # for ES-18xx chip (5,7,9,10)
+dma1
+ first DMA # for ES-18xx chip (0,1,3)
+dma2
+ first DMA # for ES-18xx chip (0,1,3)
+
+This module supports multiple cards, ISA PnP and autoprobe (without MPU-401
+port if native ISA PnP routines are not used).
+When ``dma2`` is equal with ``dma1``, the driver works as half-duplex.
+
+The power-management is supported.
+
+Module snd-es1938
+-----------------
+
+Module for sound cards based on ESS Solo-1 (ES1938,ES1946) chips.
+
+This module supports multiple cards and autoprobe.
+
+The power-management is supported.
+
+Module snd-es1968
+-----------------
+
+Module for sound cards based on ESS Maestro-1/2/2E (ES1968/ES1978) chips.
+
+total_bufsize
+ total buffer size in kB (1-4096kB)
+pcm_substreams_p
+ playback channels (1-8, default=2)
+pcm_substreams_c
+ capture channels (1-8, default=0)
+clock
+ clock (0 = auto-detection)
+use_pm
+ support the power-management (0 = off, 1 = on, 2 = auto (default))
+enable_mpu
+ enable MPU401 (0 = off, 1 = on, 2 = auto (default))
+joystick
+ enable joystick (default off)
+
+This module supports multiple cards and autoprobe.
+
+The power-management is supported.
+
+Module snd-fm801
+----------------
+
+Module for ForteMedia FM801 based PCI sound cards.
+
+tea575x_tuner
+ Enable TEA575x tuner;
+ 1 = MediaForte 256-PCS,
+ 2 = MediaForte 256-PCPR,
+ 3 = MediaForte 64-PCR
+ High 16-bits are video (radio) device number + 1;
+ example: 0x10002 (MediaForte 256-PCPR, device 1)
+
+This module supports multiple cards and autoprobe.
+
+The power-management is supported.
+
+Module snd-gina20
+-----------------
+
+Module for Echoaudio Gina20
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-gina24
+-----------------
+
+Module for Echoaudio Gina24
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-gusclassic
+---------------------
+
+Module for Gravis UltraSound Classic sound card.
+
+port
+ port # for GF1 chip (0x220,0x230,0x240,0x250,0x260)
+irq
+ IRQ # for GF1 chip (3,5,9,11,12,15)
+dma1
+ DMA # for GF1 chip (1,3,5,6,7)
+dma2
+ DMA # for GF1 chip (1,3,5,6,7,-1=disable)
+joystick_dac
+ 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
+voices
+ GF1 voices limit (14-32)
+pcm_voices
+ reserved PCM voices
+
+This module supports multiple cards and autoprobe.
+
+Module snd-gusextreme
+---------------------
+
+Module for Gravis UltraSound Extreme (Synergy ViperMax) sound card.
+
+port
+ port # for ES-1688 chip (0x220,0x230,0x240,0x250,0x260)
+gf1_port
+ port # for GF1 chip (0x210,0x220,0x230,0x240,0x250,0x260,0x270)
+mpu_port
+ port # for MPU-401 port (0x300,0x310,0x320,0x330), -1 = disable
+irq
+ IRQ # for ES-1688 chip (5,7,9,10)
+gf1_irq
+ IRQ # for GF1 chip (3,5,9,11,12,15)
+mpu_irq
+ IRQ # for MPU-401 port (5,7,9,10)
+dma8
+ DMA # for ES-1688 chip (0,1,3)
+dma1
+ DMA # for GF1 chip (1,3,5,6,7)
+joystick_dac
+ 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
+voices
+ GF1 voices limit (14-32)
+pcm_voices
+ reserved PCM voices
+
+This module supports multiple cards and autoprobe (without MPU-401 port).
+
+Module snd-gusmax
+-----------------
+
+Module for Gravis UltraSound MAX sound card.
+
+port
+ port # for GF1 chip (0x220,0x230,0x240,0x250,0x260)
+irq
+ IRQ # for GF1 chip (3,5,9,11,12,15)
+dma1
+ DMA # for GF1 chip (1,3,5,6,7)
+dma2
+ DMA # for GF1 chip (1,3,5,6,7,-1=disable)
+joystick_dac
+ 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
+voices
+ GF1 voices limit (14-32)
+pcm_voices
+ reserved PCM voices
+
+This module supports multiple cards and autoprobe.
+
+Module snd-hda-intel
+--------------------
+
+Module for Intel HD Audio (ICH6, ICH6M, ESB2, ICH7, ICH8, ICH9, ICH10,
+PCH, SCH), ATI SB450, SB600, R600, RS600, RS690, RS780, RV610, RV620,
+RV630, RV635, RV670, RV770, VIA VT8251/VT8237A, SIS966, ULI M5461
+
+[Multiple options for each card instance]
+
+model
+ force the model name
+position_fix
+ Fix DMA pointer;
+ -1 = system default: choose appropriate one per controller hardware,
+ 0 = auto: falls back to LPIB when POSBUF doesn't work,
+ 1 = use LPIB,
+ 2 = POSBUF: use position buffer,
+ 3 = VIACOMBO: VIA-specific workaround for capture,
+ 4 = COMBO: use LPIB for playback, auto for capture stream
+probe_mask
+ Bitmask to probe codecs (default = -1, meaning all slots);
+ When the bit 8 (0x100) is set, the lower 8 bits are used
+ as the "fixed" codec slots; i.e. the driver probes the
+ slots regardless what hardware reports back
+probe_only
+ Only probing and no codec initialization (default=off);
+ Useful to check the initial codec status for debugging
+bdl_pos_adj
+ Specifies the DMA IRQ timing delay in samples.
+ Passing -1 will make the driver to choose the appropriate
+ value based on the controller chip.
+patch
+ Specifies the early "patch" files to modify the HD-audio setup
+ before initializing the codecs.
+ This option is available only when ``CONFIG_SND_HDA_PATCH_LOADER=y``
+ is set. See hd-audio/notes.rst for details.
+beep_mode
+ Selects the beep registration mode (0=off, 1=on);
+ default value is set via ``CONFIG_SND_HDA_INPUT_BEEP_MODE`` kconfig.
+
+[Single (global) options]
+
+single_cmd
+ Use single immediate commands to communicate with codecs
+ (for debugging only)
+enable_msi
+ Enable Message Signaled Interrupt (MSI) (default = off)
+power_save
+ Automatic power-saving timeout (in second, 0 = disable)
+power_save_controller
+ Reset HD-audio controller in power-saving mode (default = on)
+align_buffer_size
+ Force rounding of buffer/period sizes to multiples of 128 bytes.
+ This is more efficient in terms of memory access but isn't
+ required by the HDA spec and prevents users from specifying
+ exact period/buffer sizes. (default = on)
+snoop
+ Enable/disable snooping (default = on)
+
+This module supports multiple cards and autoprobe.
+
+See hd-audio/notes.rst for more details about HD-audio driver.
+
+Each codec may have a model table for different configurations.
+If your machine isn't listed there, the default (usually minimal)
+configuration is set up. You can pass ``model=<name>`` option to
+specify a certain model in such a case. There are different
+models depending on the codec chip. The list of available models
+is found in hd-audio/models.rst.
+
+The model name ``generic`` is treated as a special case. When this
+model is given, the driver uses the generic codec parser without
+"codec-patch". It's sometimes good for testing and debugging.
+
+If the default configuration doesn't work and one of the above
+matches with your device, report it together with alsa-info.sh
+output (with ``--no-upload`` option) to kernel bugzilla or alsa-devel
+ML (see the section `Links and Addresses`_).
+
+``power_save`` and ``power_save_controller`` options are for power-saving
+mode. See powersave.txt for details.
+
+Note 2: If you get click noises on output, try the module option
+``position_fix=1`` or ``2``. ``position_fix=1`` will use the SD_LPIB
+register value without FIFO size correction as the current
+DMA pointer. ``position_fix=2`` will make the driver to use
+the position buffer instead of reading SD_LPIB register.
+(Usually SD_LPIB register is more accurate than the
+position buffer.)
+
+``position_fix=3`` is specific to VIA devices. The position
+of the capture stream is checked from both LPIB and POSBUF
+values. ``position_fix=4`` is a combination mode, using LPIB
+for playback and POSBUF for capture.
+
+NB: If you get many ``azx_get_response timeout`` messages at
+loading, it's likely a problem of interrupts (e.g. ACPI irq
+routing). Try to boot with options like ``pci=noacpi``. Also, you
+can try ``single_cmd=1`` module option. This will switch the
+communication method between HDA controller and codecs to the
+single immediate commands instead of CORB/RIRB. Basically, the
+single command mode is provided only for BIOS, and you won't get
+unsolicited events, too. But, at least, this works independently
+from the irq. Remember this is a last resort, and should be
+avoided as much as possible...
+
+MORE NOTES ON ``azx_get_response timeout`` PROBLEMS:
+On some hardware, you may need to add a proper probe_mask option
+to avoid the ``azx_get_response timeout`` problem above, instead.
+This occurs when the access to non-existing or non-working codec slot
+(likely a modem one) causes a stall of the communication via HD-audio
+bus. You can see which codec slots are probed by enabling
+``CONFIG_SND_DEBUG_VERBOSE``, or simply from the file name of the codec
+proc files. Then limit the slots to probe by probe_mask option.
+For example, ``probe_mask=1`` means to probe only the first slot, and
+``probe_mask=4`` means only the third slot.
+
+The power-management is supported.
+
+Module snd-hdsp
+---------------
+
+Module for RME Hammerfall DSP audio interface(s)
+
+This module supports multiple cards.
+
+Note: The firmware data can be automatically loaded via hotplug
+when ``CONFIG_FW_LOADER`` is set. Otherwise, you need to load
+the firmware via hdsploader utility included in alsa-tools
+package.
+The firmware data is found in alsa-firmware package.
+
+Note: snd-page-alloc module does the job which snd-hammerfall-mem
+module did formerly. It will allocate the buffers in advance
+when any HDSP cards are found. To make the buffer
+allocation sure, load snd-page-alloc module in the early
+stage of boot sequence. See `Early Buffer Allocation`_
+section.
+
+Module snd-hdspm
+----------------
+
+Module for RME HDSP MADI board.
+
+precise_ptr
+ Enable precise pointer, or disable.
+line_outs_monitor
+ Send playback streams to analog outs by default.
+enable_monitor
+ Enable Analog Out on Channel 63/64 by default.
+
+See hdspm.txt for details.
+
+Module snd-ice1712
+------------------
+
+Module for Envy24 (ICE1712) based PCI sound cards.
+
+* MidiMan M Audio Delta 1010
+* MidiMan M Audio Delta 1010LT
+* MidiMan M Audio Delta DiO 2496
+* MidiMan M Audio Delta 66
+* MidiMan M Audio Delta 44
+* MidiMan M Audio Delta 410
+* MidiMan M Audio Audiophile 2496
+* TerraTec EWS 88MT
+* TerraTec EWS 88D
+* TerraTec EWX 24/96
+* TerraTec DMX 6Fire
+* TerraTec Phase 88
+* Hoontech SoundTrack DSP 24
+* Hoontech SoundTrack DSP 24 Value
+* Hoontech SoundTrack DSP 24 Media 7.1
+* Event Electronics, EZ8
+* Digigram VX442
+* Lionstracs, Mediastaton
+* Terrasoniq TS 88
+
+model
+ Use the given board model, one of the following:
+ delta1010, dio2496, delta66, delta44, audiophile, delta410,
+ delta1010lt, vx442, ewx2496, ews88mt, ews88mt_new, ews88d,
+ dmx6fire, dsp24, dsp24_value, dsp24_71, ez8,
+ phase88, mediastation
+omni
+ Omni I/O support for MidiMan M-Audio Delta44/66
+cs8427_timeout
+ reset timeout for the CS8427 chip (S/PDIF transceiver) in msec
+ resolution, default value is 500 (0.5 sec)
+
+This module supports multiple cards and autoprobe.
+Note: The consumer part is not used with all Envy24 based cards (for
+example in the MidiMan Delta siree).
+
+Note: The supported board is detected by reading EEPROM or PCI
+SSID (if EEPROM isn't available). You can override the
+model by passing ``model`` module option in case that the
+driver isn't configured properly or you want to try another
+type for testing.
+
+Module snd-ice1724
+------------------
+
+Module for Envy24HT (VT/ICE1724), Envy24PT (VT1720) based PCI sound cards.
+
+* MidiMan M Audio Revolution 5.1
+* MidiMan M Audio Revolution 7.1
+* MidiMan M Audio Audiophile 192
+* AMP Ltd AUDIO2000
+* TerraTec Aureon 5.1 Sky
+* TerraTec Aureon 7.1 Space
+* TerraTec Aureon 7.1 Universe
+* TerraTec Phase 22
+* TerraTec Phase 28
+* AudioTrak Prodigy 7.1
+* AudioTrak Prodigy 7.1 LT
+* AudioTrak Prodigy 7.1 XT
+* AudioTrak Prodigy 7.1 HIFI
+* AudioTrak Prodigy 7.1 HD2
+* AudioTrak Prodigy 192
+* Pontis MS300
+* Albatron K8X800 Pro II
+* Chaintech ZNF3-150
+* Chaintech ZNF3-250
+* Chaintech 9CJS
+* Chaintech AV-710
+* Shuttle SN25P
+* Onkyo SE-90PCI
+* Onkyo SE-200PCI
+* ESI Juli@
+* ESI Maya44
+* Hercules Fortissimo IV
+* EGO-SYS WaveTerminal 192M
+
+model
+ Use the given board model, one of the following:
+ revo51, revo71, amp2000, prodigy71, prodigy71lt,
+ prodigy71xt, prodigy71hifi, prodigyhd2, prodigy192,
+ juli, aureon51, aureon71, universe, ap192, k8x800,
+ phase22, phase28, ms300, av710, se200pci, se90pci,
+ fortissimo4, sn25p, WT192M, maya44
+
+This module supports multiple cards and autoprobe.
+
+Note: The supported board is detected by reading EEPROM or PCI
+SSID (if EEPROM isn't available). You can override the
+model by passing ``model`` module option in case that the
+driver isn't configured properly or you want to try another
+type for testing.
+
+Module snd-indigo
+-----------------
+
+Module for Echoaudio Indigo
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-indigodj
+-------------------
+
+Module for Echoaudio Indigo DJ
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-indigoio
+-------------------
+
+Module for Echoaudio Indigo IO
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-intel8x0
+-------------------
+
+Module for AC'97 motherboards from Intel and compatibles.
+
+* Intel i810/810E, i815, i820, i830, i84x, MX440 ICH5, ICH6, ICH7,
+ 6300ESB, ESB2
+* SiS 7012 (SiS 735)
+* NVidia NForce, NForce2, NForce3, MCP04, CK804 CK8, CK8S, MCP501
+* AMD AMD768, AMD8111
+* ALi m5455
+
+ac97_clock
+ AC'97 codec clock base (0 = auto-detect)
+ac97_quirk
+ AC'97 workaround for strange hardware;
+ See `AC97 Quirk Option`_ section below.
+buggy_irq
+ Enable workaround for buggy interrupts on some motherboards
+ (default yes on nForce chips, otherwise off)
+buggy_semaphore
+ Enable workaround for hardware with buggy semaphores (e.g. on some
+ ASUS laptops) (default off)
+spdif_aclink
+ Use S/PDIF over AC-link instead of direct connection from the
+ controller chip (0 = off, 1 = on, -1 = default)
+
+This module supports one chip and autoprobe.
+
+Note: the latest driver supports auto-detection of chip clock.
+if you still encounter too fast playback, specify the clock
+explicitly via the module option ``ac97_clock=41194``.
+
+Joystick/MIDI ports are not supported by this driver. If your
+motherboard has these devices, use the ns558 or snd-mpu401
+modules, respectively.
+
+The power-management is supported.
+
+Module snd-intel8x0m
+--------------------
+
+Module for Intel ICH (i8x0) chipset MC97 modems.
+
+* Intel i810/810E, i815, i820, i830, i84x, MX440 ICH5, ICH6, ICH7
+* SiS 7013 (SiS 735)
+* NVidia NForce, NForce2, NForce2s, NForce3
+* AMD AMD8111
+* ALi m5455
+
+ac97_clock
+ AC'97 codec clock base (0 = auto-detect)
+
+This module supports one card and autoprobe.
+
+Note: The default index value of this module is -2, i.e. the first
+slot is excluded.
+
+The power-management is supported.
+
+Module snd-interwave
+--------------------
+
+Module for Gravis UltraSound PnP, Dynasonic 3-D/Pro, STB Sound Rage 32
+and other sound cards based on AMD InterWave (tm) chip.
+
+joystick_dac
+ 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
+midi
+ 1 = MIDI UART enable, 0 = MIDI UART disable (default)
+pcm_voices
+ reserved PCM voices for the synthesizer (default 2)
+effect
+ 1 = InterWave effects enable (default 0); requires 8 voices
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with ``isapnp=0``, the following options are available:
+
+port
+ port # for InterWave chip (0x210,0x220,0x230,0x240,0x250,0x260)
+irq
+ IRQ # for InterWave chip (3,5,9,11,12,15)
+dma1
+ DMA # for InterWave chip (0,1,3,5,6,7)
+dma2
+ DMA # for InterWave chip (0,1,3,5,6,7,-1=disable)
+
+This module supports multiple cards, autoprobe and ISA PnP.
+
+Module snd-interwave-stb
+------------------------
+
+Module for UltraSound 32-Pro (sound card from STB used by Compaq)
+and other sound cards based on AMD InterWave (tm) chip with TEA6330T
+circuit for extended control of bass, treble and master volume.
+
+joystick_dac
+ 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
+midi
+ 1 = MIDI UART enable, 0 = MIDI UART disable (default)
+pcm_voices
+ reserved PCM voices for the synthesizer (default 2)
+effect
+ 1 = InterWave effects enable (default 0); requires 8 voices
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with ``isapnp=0``, the following options are available:
+
+port
+ port # for InterWave chip (0x210,0x220,0x230,0x240,0x250,0x260)
+port_tc
+ tone control (i2c bus) port # for TEA6330T chip (0x350,0x360,0x370,0x380)
+irq
+ IRQ # for InterWave chip (3,5,9,11,12,15)
+dma1
+ DMA # for InterWave chip (0,1,3,5,6,7)
+dma2
+ DMA # for InterWave chip (0,1,3,5,6,7,-1=disable)
+
+This module supports multiple cards, autoprobe and ISA PnP.
+
+Module snd-jazz16
+-------------------
+
+Module for Media Vision Jazz16 chipset. The chipset consists of 3 chips:
+MVD1216 + MVA416 + MVA514.
+
+port
+ port # for SB DSP chip (0x210,0x220,0x230,0x240,0x250,0x260)
+irq
+ IRQ # for SB DSP chip (3,5,7,9,10,15)
+dma8
+ DMA # for SB DSP chip (1,3)
+dma16
+ DMA # for SB DSP chip (5,7)
+mpu_port
+ MPU-401 port # (0x300,0x310,0x320,0x330)
+mpu_irq
+ MPU-401 irq # (2,3,5,7)
+
+This module supports multiple cards.
+
+Module snd-korg1212
+-------------------
+
+Module for Korg 1212 IO PCI card
+
+This module supports multiple cards.
+
+Module snd-layla20
+------------------
+
+Module for Echoaudio Layla20
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-layla24
+------------------
+
+Module for Echoaudio Layla24
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-lola
+---------------
+
+Module for Digigram Lola PCI-e boards
+
+This module supports multiple cards.
+
+Module snd-lx6464es
+-------------------
+
+Module for Digigram LX6464ES boards
+
+This module supports multiple cards.
+
+Module snd-maestro3
+-------------------
+
+Module for Allegro/Maestro3 chips
+
+external_amp
+ enable external amp (enabled by default)
+amp_gpio
+ GPIO pin number for external amp (0-15) or -1 for default pin (8
+ for allegro, 1 for others)
+
+This module supports autoprobe and multiple chips.
+
+Note: the binding of amplifier is dependent on hardware.
+If there is no sound even though all channels are unmuted, try to
+specify other gpio connection via amp_gpio option.
+For example, a Panasonic notebook might need ``amp_gpio=0x0d``
+option.
+
+The power-management is supported.
+
+Module snd-mia
+---------------
+
+Module for Echoaudio Mia
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-miro
+---------------
+
+Module for Miro soundcards: miroSOUND PCM 1 pro, miroSOUND PCM 12,
+miroSOUND PCM 20 Radio.
+
+port
+ Port # (0x530,0x604,0xe80,0xf40)
+irq
+ IRQ # (5,7,9,10,11)
+dma1
+ 1st dma # (0,1,3)
+dma2
+ 2nd dma # (0,1)
+mpu_port
+ MPU-401 port # (0x300,0x310,0x320,0x330)
+mpu_irq
+ MPU-401 irq # (5,7,9,10)
+fm_port
+ FM Port # (0x388)
+wss
+ enable WSS mode
+ide
+ enable onboard ide support
+
+Module snd-mixart
+-----------------
+
+Module for Digigram miXart8 sound cards.
+
+This module supports multiple cards.
+Note: One miXart8 board will be represented as 4 alsa cards.
+See MIXART.txt for details.
+
+When the driver is compiled as a module and the hotplug firmware
+is supported, the firmware data is loaded via hotplug automatically.
+Install the necessary firmware files in alsa-firmware package.
+When no hotplug fw loader is available, you need to load the
+firmware via mixartloader utility in alsa-tools package.
+
+Module snd-mona
+---------------
+
+Module for Echoaudio Mona
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+
+Module snd-mpu401
+-----------------
+
+Module for MPU-401 UART devices.
+
+port
+ port number or -1 (disable)
+irq
+ IRQ number or -1 (disable)
+pnp
+ PnP detection - 0 = disable, 1 = enable (default)
+
+This module supports multiple devices and PnP.
+
+Module snd-msnd-classic
+-----------------------
+
+Module for Turtle Beach MultiSound Classic, Tahiti or Monterey
+soundcards.
+
+io
+ Port # for msnd-classic card
+irq
+ IRQ # for msnd-classic card
+mem
+ Memory address (0xb0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000 or 0xe8000)
+write_ndelay
+ enable write ndelay (default = 1)
+calibrate_signal
+ calibrate signal (default = 0)
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+digital
+ Digital daughterboard present (default = 0)
+cfg
+ Config port (0x250, 0x260 or 0x270) default = PnP
+reset
+ Reset all devices
+mpu_io
+ MPU401 I/O port
+mpu_irq
+ MPU401 irq#
+ide_io0
+ IDE port #0
+ide_io1
+ IDE port #1
+ide_irq
+ IDE irq#
+joystick_io
+ Joystick I/O port
+
+The driver requires firmware files ``turtlebeach/msndinit.bin`` and
+``turtlebeach/msndperm.bin`` in the proper firmware directory.
+
+See Documentation/sound/oss/MultiSound for important information
+about this driver. Note that it has been discontinued, but the
+Voyetra Turtle Beach knowledge base entry for it is still available
+at
+http://www.turtlebeach.com
+
+Module snd-msnd-pinnacle
+------------------------
+
+Module for Turtle Beach MultiSound Pinnacle/Fiji soundcards.
+
+io
+ Port # for pinnacle/fiji card
+irq
+ IRQ # for pinnalce/fiji card
+mem
+ Memory address (0xb0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000 or 0xe8000)
+write_ndelay
+ enable write ndelay (default = 1)
+calibrate_signal
+ calibrate signal (default = 0)
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+The driver requires firmware files ``turtlebeach/pndspini.bin`` and
+``turtlebeach/pndsperm.bin`` in the proper firmware directory.
+
+Module snd-mtpav
+----------------
+
+Module for MOTU MidiTimePiece AV multiport MIDI (on the parallel
+port).
+
+port
+ I/O port # for MTPAV (0x378,0x278, default=0x378)
+irq
+ IRQ # for MTPAV (7,5, default=7)
+hwports
+ number of supported hardware ports, default=8.
+
+Module supports only 1 card. This module has no enable option.
+
+Module snd-mts64
+----------------
+
+Module for Ego Systems (ESI) Miditerminal 4140
+
+This module supports multiple devices.
+Requires parport (``CONFIG_PARPORT``).
+
+Module snd-nm256
+----------------
+
+Module for NeoMagic NM256AV/ZX chips
+
+playback_bufsize
+ max playback frame size in kB (4-128kB)
+capture_bufsize
+ max capture frame size in kB (4-128kB)
+force_ac97
+ 0 or 1 (disabled by default)
+buffer_top
+ specify buffer top address
+use_cache
+ 0 or 1 (disabled by default)
+vaio_hack
+ alias buffer_top=0x25a800
+reset_workaround
+ enable AC97 RESET workaround for some laptops
+reset_workaround2
+ enable extended AC97 RESET workaround for some other laptops
+
+This module supports one chip and autoprobe.
+
+The power-management is supported.
+
+Note: on some notebooks the buffer address cannot be detected
+automatically, or causes hang-up during initialization.
+In such a case, specify the buffer top address explicitly via
+the buffer_top option.
+For example,
+Sony F250: buffer_top=0x25a800
+Sony F270: buffer_top=0x272800
+The driver supports only ac97 codec. It's possible to force
+to initialize/use ac97 although it's not detected. In such a
+case, use ``force_ac97=1`` option - but *NO* guarantee whether it
+works!
+
+Note: The NM256 chip can be linked internally with non-AC97
+codecs. This driver supports only the AC97 codec, and won't work
+with machines with other (most likely CS423x or OPL3SAx) chips,
+even though the device is detected in lspci. In such a case, try
+other drivers, e.g. snd-cs4232 or snd-opl3sa2. Some has ISA-PnP
+but some doesn't have ISA PnP. You'll need to specify ``isapnp=0``
+and proper hardware parameters in the case without ISA PnP.
+
+Note: some laptops need a workaround for AC97 RESET. For the
+known hardware like Dell Latitude LS and Sony PCG-F305, this
+workaround is enabled automatically. For other laptops with a
+hard freeze, you can try ``reset_workaround=1`` option.
+
+Note: Dell Latitude CSx laptops have another problem regarding
+AC97 RESET. On these laptops, reset_workaround2 option is
+turned on as default. This option is worth to try if the
+previous reset_workaround option doesn't help.
+
+Note: This driver is really crappy. It's a porting from the
+OSS driver, which is a result of black-magic reverse engineering.
+The detection of codec will fail if the driver is loaded *after*
+X-server as described above. You might be able to force to load
+the module, but it may result in hang-up. Hence, make sure that
+you load this module *before* X if you encounter this kind of
+problem.
+
+Module snd-opl3sa2
+------------------
+
+Module for Yamaha OPL3-SA2/SA3 sound cards.
+
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with ``isapnp=0``, the following options are available:
+
+port
+ control port # for OPL3-SA chip (0x370)
+sb_port
+ SB port # for OPL3-SA chip (0x220,0x240)
+wss_port
+ WSS port # for OPL3-SA chip (0x530,0xe80,0xf40,0x604)
+midi_port
+ port # for MPU-401 UART (0x300,0x330), -1 = disable
+fm_port
+ FM port # for OPL3-SA chip (0x388), -1 = disable
+irq
+ IRQ # for OPL3-SA chip (5,7,9,10)
+dma1
+ first DMA # for Yamaha OPL3-SA chip (0,1,3)
+dma2
+ second DMA # for Yamaha OPL3-SA chip (0,1,3), -1 = disable
+
+This module supports multiple cards and ISA PnP. It does not support
+autoprobe (if ISA PnP is not used) thus all ports must be specified!!!
+
+The power-management is supported.
+
+Module snd-opti92x-ad1848
+-------------------------
+
+Module for sound cards based on OPTi 82c92x and Analog Devices AD1848 chips.
+Module works with OAK Mozart cards as well.
+
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with ``isapnp=0``, the following options are available:
+
+port
+ port # for WSS chip (0x530,0xe80,0xf40,0x604)
+mpu_port
+ port # for MPU-401 UART (0x300,0x310,0x320,0x330)
+fm_port
+ port # for OPL3 device (0x388)
+irq
+ IRQ # for WSS chip (5,7,9,10,11)
+mpu_irq
+ IRQ # for MPU-401 UART (5,7,9,10)
+dma1
+ first DMA # for WSS chip (0,1,3)
+
+This module supports only one card, autoprobe and PnP.
+
+Module snd-opti92x-cs4231
+-------------------------
+
+Module for sound cards based on OPTi 82c92x and Crystal CS4231 chips.
+
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with ``isapnp=0``, the following options are available:
+
+port
+ port # for WSS chip (0x530,0xe80,0xf40,0x604)
+mpu_port
+ port # for MPU-401 UART (0x300,0x310,0x320,0x330)
+fm_port
+ port # for OPL3 device (0x388)
+irq
+ IRQ # for WSS chip (5,7,9,10,11)
+mpu_irq
+ IRQ # for MPU-401 UART (5,7,9,10)
+dma1
+ first DMA # for WSS chip (0,1,3)
+dma2
+ second DMA # for WSS chip (0,1,3)
+
+This module supports only one card, autoprobe and PnP.
+
+Module snd-opti93x
+------------------
+
+Module for sound cards based on OPTi 82c93x chips.
+
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with ``isapnp=0``, the following options are available:
+
+port
+ port # for WSS chip (0x530,0xe80,0xf40,0x604)
+mpu_port
+ port # for MPU-401 UART (0x300,0x310,0x320,0x330)
+fm_port
+ port # for OPL3 device (0x388)
+irq
+ IRQ # for WSS chip (5,7,9,10,11)
+mpu_irq
+ IRQ # for MPU-401 UART (5,7,9,10)
+dma1
+ first DMA # for WSS chip (0,1,3)
+dma2
+ second DMA # for WSS chip (0,1,3)
+
+This module supports only one card, autoprobe and PnP.
+
+Module snd-oxygen
+-----------------
+
+Module for sound cards based on the C-Media CMI8786/8787/8788 chip:
+
+* Asound A-8788
+* Asus Xonar DG/DGX
+* AuzenTech X-Meridian
+* AuzenTech X-Meridian 2G
+* Bgears b-Enspirer
+* Club3D Theatron DTS
+* HT-Omega Claro (plus)
+* HT-Omega Claro halo (XT)
+* Kuroutoshikou CMI8787-HG2PCI
+* Razer Barracuda AC-1
+* Sondigo Inferno
+* TempoTec HiFier Fantasia
+* TempoTec HiFier Serenade
+
+This module supports autoprobe and multiple cards.
+
+Module snd-pcsp
+---------------
+
+Module for internal PC-Speaker.
+
+nopcm
+ Disable PC-Speaker PCM sound. Only beeps remain.
+nforce_wa
+ enable NForce chipset workaround. Expect bad sound.
+
+This module supports system beeps, some kind of PCM playback and
+even a few mixer controls.
+
+Module snd-pcxhr
+----------------
+
+Module for Digigram PCXHR boards
+
+This module supports multiple cards.
+
+Module snd-portman2x4
+---------------------
+
+Module for Midiman Portman 2x4 parallel port MIDI interface
+
+This module supports multiple cards.
+
+Module snd-powermac (on ppc only)
+---------------------------------
+
+Module for PowerMac, iMac and iBook on-board soundchips
+
+enable_beep
+ enable beep using PCM (enabled as default)
+
+Module supports autoprobe a chip.
+
+Note: the driver may have problems regarding endianness.
+
+The power-management is supported.
+
+Module snd-pxa2xx-ac97 (on arm only)
+------------------------------------
+
+Module for AC97 driver for the Intel PXA2xx chip
+
+For ARM architecture only.
+
+The power-management is supported.
+
+Module snd-riptide
+------------------
+
+Module for Conexant Riptide chip
+
+joystick_port
+ Joystick port # (default: 0x200)
+mpu_port
+ MPU401 port # (default: 0x330)
+opl3_port
+ OPL3 port # (default: 0x388)
+
+This module supports multiple cards.
+The driver requires the firmware loader support on kernel.
+You need to install the firmware file ``riptide.hex`` to the standard
+firmware path (e.g. /lib/firmware).
+
+Module snd-rme32
+----------------
+
+Module for RME Digi32, Digi32 Pro and Digi32/8 (Sek'd Prodif32,
+Prodif96 and Prodif Gold) sound cards.
+
+This module supports multiple cards.
+
+Module snd-rme96
+----------------
+
+Module for RME Digi96, Digi96/8 and Digi96/8 PRO/PAD/PST sound cards.
+
+This module supports multiple cards.
+
+Module snd-rme9652
+------------------
+
+Module for RME Digi9652 (Hammerfall, Hammerfall-Light) sound cards.
+
+precise_ptr
+ Enable precise pointer (doesn't work reliably). (default = 0)
+
+This module supports multiple cards.
+
+Note: snd-page-alloc module does the job which snd-hammerfall-mem
+module did formerly. It will allocate the buffers in advance
+when any RME9652 cards are found. To make the buffer
+allocation sure, load snd-page-alloc module in the early
+stage of boot sequence. See `Early Buffer Allocation`_
+section.
+
+Module snd-sa11xx-uda1341 (on arm only)
+---------------------------------------
+
+Module for Philips UDA1341TS on Compaq iPAQ H3600 sound card.
+
+Module supports only one card.
+Module has no enable and index options.
+
+The power-management is supported.
+
+Module snd-sb8
+--------------
+
+Module for 8-bit SoundBlaster cards: SoundBlaster 1.0, SoundBlaster 2.0,
+SoundBlaster Pro
+
+port
+ port # for SB DSP chip (0x220,0x240,0x260)
+irq
+ IRQ # for SB DSP chip (5,7,9,10)
+dma8
+ DMA # for SB DSP chip (1,3)
+
+This module supports multiple cards and autoprobe.
+
+The power-management is supported.
+
+Module snd-sb16 and snd-sbawe
+-----------------------------
+
+Module for 16-bit SoundBlaster cards: SoundBlaster 16 (PnP),
+SoundBlaster AWE 32 (PnP), SoundBlaster AWE 64 PnP
+
+mic_agc
+ Mic Auto-Gain-Control - 0 = disable, 1 = enable (default)
+csp
+ ASP/CSP chip support - 0 = disable (default), 1 = enable
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with isapnp=0, the following options are available:
+
+port
+ port # for SB DSP 4.x chip (0x220,0x240,0x260)
+mpu_port
+ port # for MPU-401 UART (0x300,0x330), -1 = disable
+awe_port
+ base port # for EMU8000 synthesizer (0x620,0x640,0x660) (snd-sbawe
+ module only)
+irq
+ IRQ # for SB DSP 4.x chip (5,7,9,10)
+dma8
+ 8-bit DMA # for SB DSP 4.x chip (0,1,3)
+dma16
+ 16-bit DMA # for SB DSP 4.x chip (5,6,7)
+
+This module supports multiple cards, autoprobe and ISA PnP.
+
+Note: To use Vibra16X cards in 16-bit half duplex mode, you must
+disable 16bit DMA with dma16 = -1 module parameter.
+Also, all Sound Blaster 16 type cards can operate in 16-bit
+half duplex mode through 8-bit DMA channel by disabling their
+16-bit DMA channel.
+
+The power-management is supported.
+
+Module snd-sc6000
+-----------------
+
+Module for Gallant SC-6000 soundcard and later models: SC-6600 and
+SC-7000.
+
+port
+ Port # (0x220 or 0x240)
+mss_port
+ MSS Port # (0x530 or 0xe80)
+irq
+ IRQ # (5,7,9,10,11)
+mpu_irq
+ MPU-401 IRQ # (5,7,9,10) ,0 - no MPU-401 irq
+dma
+ DMA # (1,3,0)
+joystick
+ Enable gameport - 0 = disable (default), 1 = enable
+
+This module supports multiple cards.
+
+This card is also known as Audio Excel DSP 16 or Zoltrix AV302.
+
+Module snd-sscape
+-----------------
+
+Module for ENSONIQ SoundScape cards.
+
+port
+ Port # (PnP setup)
+wss_port
+ WSS Port # (PnP setup)
+irq
+ IRQ # (PnP setup)
+mpu_irq
+ MPU-401 IRQ # (PnP setup)
+dma
+ DMA # (PnP setup)
+dma2
+ 2nd DMA # (PnP setup, -1 to disable)
+joystick
+ Enable gameport - 0 = disable (default), 1 = enable
+
+This module supports multiple cards.
+
+The driver requires the firmware loader support on kernel.
+
+Module snd-sun-amd7930 (on sparc only)
+--------------------------------------
+
+Module for AMD7930 sound chips found on Sparcs.
+
+This module supports multiple cards.
+
+Module snd-sun-cs4231 (on sparc only)
+-------------------------------------
+
+Module for CS4231 sound chips found on Sparcs.
+
+This module supports multiple cards.
+
+Module snd-sun-dbri (on sparc only)
+-----------------------------------
+
+Module for DBRI sound chips found on Sparcs.
+
+This module supports multiple cards.
+
+Module snd-wavefront
+--------------------
+
+Module for Turtle Beach Maui, Tropez and Tropez+ sound cards.
+
+use_cs4232_midi
+ Use CS4232 MPU-401 interface
+ (inaccessibly located inside your computer)
+isapnp
+ ISA PnP detection - 0 = disable, 1 = enable (default)
+
+with isapnp=0, the following options are available:
+
+cs4232_pcm_port
+ Port # for CS4232 PCM interface.
+cs4232_pcm_irq
+ IRQ # for CS4232 PCM interface (5,7,9,11,12,15).
+cs4232_mpu_port
+ Port # for CS4232 MPU-401 interface.
+cs4232_mpu_irq
+ IRQ # for CS4232 MPU-401 interface (9,11,12,15).
+ics2115_port
+ Port # for ICS2115
+ics2115_irq
+ IRQ # for ICS2115
+fm_port
+ FM OPL-3 Port #
+dma1
+ DMA1 # for CS4232 PCM interface.
+dma2
+ DMA2 # for CS4232 PCM interface.
+
+The below are options for wavefront_synth features:
+
+wf_raw
+ Assume that we need to boot the OS (default:no);
+ If yes, then during driver loading, the state of the board is
+ ignored, and we reset the board and load the firmware anyway.
+fx_raw
+ Assume that the FX process needs help (default:yes);
+ If false, we'll leave the FX processor in whatever state it is
+ when the driver is loaded. The default is to download the
+ microprogram and associated coefficients to set it up for
+ "default" operation, whatever that means.
+debug_default
+ Debug parameters for card initialization
+wait_usecs
+ How long to wait without sleeping, usecs (default:150);
+ This magic number seems to give pretty optimal throughput
+ based on my limited experimentation.
+ If you want to play around with it and find a better value, be
+ my guest. Remember, the idea is to get a number that causes us
+ to just busy wait for as many WaveFront commands as possible,
+ without coming up with a number so large that we hog the whole
+ CPU.
+ Specifically, with this number, out of about 134,000 status
+ waits, only about 250 result in a sleep.
+sleep_interval
+ How long to sleep when waiting for reply (default: 100)
+sleep_tries
+ How many times to try sleeping during a wait (default: 50)
+ospath
+ Pathname to processed ICS2115 OS firmware (default:wavefront.os);
+ The path name of the ISC2115 OS firmware. In the recent
+ version, it's handled via firmware loader framework, so it
+ must be installed in the proper path, typically,
+ /lib/firmware.
+reset_time
+ How long to wait for a reset to take effect (default:2)
+ramcheck_time
+ How many seconds to wait for the RAM test (default:20)
+osrun_time
+ How many seconds to wait for the ICS2115 OS (default:10)
+
+This module supports multiple cards and ISA PnP.
+
+Note: the firmware file ``wavefront.os`` was located in the earlier
+version in /etc. Now it's loaded via firmware loader, and
+must be in the proper firmware path, such as /lib/firmware.
+Copy (or symlink) the file appropriately if you get an error
+regarding firmware downloading after upgrading the kernel.
+
+Module snd-sonicvibes
+---------------------
+
+Module for S3 SonicVibes PCI sound cards.
+* PINE Schubert 32 PCI
+
+reverb
+ Reverb Enable - 1 = enable, 0 = disable (default);
+ SoundCard must have onboard SRAM for this.
+mge
+ Mic Gain Enable - 1 = enable, 0 = disable (default)
+
+This module supports multiple cards and autoprobe.
+
+Module snd-serial-u16550
+------------------------
+
+Module for UART16550A serial MIDI ports.
+
+port
+ port # for UART16550A chip
+irq
+ IRQ # for UART16550A chip, -1 = poll mode
+speed
+ speed in bauds (9600,19200,38400,57600,115200)
+ 38400 = default
+base
+ base for divisor in bauds (57600,115200,230400,460800)
+ 115200 = default
+outs
+ number of MIDI ports in a serial port (1-4)
+ 1 = default
+adaptor
+ Type of adaptor.
+ 0 = Soundcanvas, 1 = MS-124T, 2 = MS-124W S/A,
+ 3 = MS-124W M/B, 4 = Generic
+
+This module supports multiple cards. This module does not support autoprobe
+thus the main port must be specified!!! Other options are optional.
+
+Module snd-trident
+------------------
+
+Module for Trident 4DWave DX/NX sound cards.
+* Best Union Miss Melody 4DWave PCI
+* HIS 4DWave PCI
+* Warpspeed ONSpeed 4DWave PCI
+* AzTech PCI 64-Q3D
+* Addonics SV 750
+* CHIC True Sound 4Dwave
+* Shark Predator4D-PCI
+* Jaton SonicWave 4D
+* SiS SI7018 PCI Audio
+* Hoontech SoundTrack Digital 4DWave NX
+
+pcm_channels
+ max channels (voices) reserved for PCM
+wavetable_size
+ max wavetable size in kB (4-?kb)
+
+This module supports multiple cards and autoprobe.
+
+The power-management is supported.
+
+Module snd-ua101
+----------------
+
+Module for the Edirol UA-101/UA-1000 audio/MIDI interfaces.
+
+This module supports multiple devices, autoprobe and hotplugging.
+
+Module snd-usb-audio
+--------------------
+
+Module for USB audio and USB MIDI devices.
+
+vid
+ Vendor ID for the device (optional)
+pid
+ Product ID for the device (optional)
+nrpacks
+ Max. number of packets per URB (default: 8)
+device_setup
+ Device specific magic number (optional);
+ Influence depends on the device
+ Default: 0x0000
+ignore_ctl_error
+ Ignore any USB-controller regarding mixer interface (default: no)
+autoclock
+ Enable auto-clock selection for UAC2 devices (default: yes)
+quirk_alias
+ Quirk alias list, pass strings like ``0123abcd:5678beef``, which
+ applies the existing quirk for the device 5678:beef to a new
+ device 0123:abcd.
+
+This module supports multiple devices, autoprobe and hotplugging.
+
+NB: ``nrpacks`` parameter can be modified dynamically via sysfs.
+Don't put the value over 20. Changing via sysfs has no sanity
+check.
+
+NB: ``ignore_ctl_error=1`` may help when you get an error at accessing
+the mixer element such as URB error -22. This happens on some
+buggy USB device or the controller.
+
+NB: quirk_alias option is provided only for testing / development.
+If you want to have a proper support, contact to upstream for
+adding the matching quirk in the driver code statically.
+
+Module snd-usb-caiaq
+--------------------
+
+Module for caiaq UB audio interfaces,
+
+* Native Instruments RigKontrol2
+* Native Instruments Kore Controller
+* Native Instruments Audio Kontrol 1
+* Native Instruments Audio 8 DJ
+
+This module supports multiple devices, autoprobe and hotplugging.
+
+Module snd-usb-usx2y
+--------------------
+
+Module for Tascam USB US-122, US-224 and US-428 devices.
+
+This module supports multiple devices, autoprobe and hotplugging.
+
+Note: you need to load the firmware via ``usx2yloader`` utility included
+in alsa-tools and alsa-firmware packages.
+
+Module snd-via82xx
+------------------
+
+Module for AC'97 motherboards based on VIA 82C686A/686B, 8233, 8233A,
+8233C, 8235, 8237 (south) bridge.
+
+mpu_port
+ 0x300,0x310,0x320,0x330, otherwise obtain BIOS setup
+ [VIA686A/686B only]
+joystick
+ Enable joystick (default off) [VIA686A/686B only]
+ac97_clock
+ AC'97 codec clock base (default 48000Hz)
+dxs_support
+ support DXS channels, 0 = auto (default), 1 = enable, 2 = disable,
+ 3 = 48k only, 4 = no VRA, 5 = enable any sample rate and different
+ sample rates on different channels [VIA8233/C, 8235, 8237 only]
+ac97_quirk
+ AC'97 workaround for strange hardware;
+ See `AC97 Quirk Option`_ section below.
+
+This module supports one chip and autoprobe.
+
+Note: on some SMP motherboards like MSI 694D the interrupts might
+not be generated properly. In such a case, please try to
+set the SMP (or MPS) version on BIOS to 1.1 instead of
+default value 1.4. Then the interrupt number will be
+assigned under 15. You might also upgrade your BIOS.
+
+Note: VIA8233/5/7 (not VIA8233A) can support DXS (direct sound)
+channels as the first PCM. On these channels, up to 4
+streams can be played at the same time, and the controller
+can perform sample rate conversion with separate rates for
+each channel.
+As default (``dxs_support = 0``), 48k fixed rate is chosen
+except for the known devices since the output is often
+noisy except for 48k on some mother boards due to the
+bug of BIOS.
+Please try once ``dxs_support=5`` and if it works on other
+sample rates (e.g. 44.1kHz of mp3 playback), please let us
+know the PCI subsystem vendor/device id's (output of
+``lspci -nv``).
+If ``dxs_support=5`` does not work, try ``dxs_support=4``; if it
+doesn't work too, try dxs_support=1. (dxs_support=1 is
+usually for old motherboards. The correct implemented
+board should work with 4 or 5.) If it still doesn't
+work and the default setting is ok, ``dxs_support=3`` is the
+right choice. If the default setting doesn't work at all,
+try ``dxs_support=2`` to disable the DXS channels.
+In any cases, please let us know the result and the
+subsystem vendor/device ids. See `Links and Addresses`_
+below.
+
+Note: for the MPU401 on VIA823x, use snd-mpu401 driver
+additionally. The mpu_port option is for VIA686 chips only.
+
+The power-management is supported.
+
+Module snd-via82xx-modem
+------------------------
+
+Module for VIA82xx AC97 modem
+
+ac97_clock
+ AC'97 codec clock base (default 48000Hz)
+
+This module supports one card and autoprobe.
+
+Note: The default index value of this module is -2, i.e. the first
+slot is excluded.
+
+The power-management is supported.
+
+Module snd-virmidi
+------------------
+
+Module for virtual rawmidi devices.
+This module creates virtual rawmidi devices which communicate
+to the corresponding ALSA sequencer ports.
+
+midi_devs
+ MIDI devices # (1-4, default=4)
+
+This module supports multiple cards.
+
+Module snd-virtuoso
+-------------------
+
+Module for sound cards based on the Asus AV66/AV100/AV200 chips,
+i.e., Xonar D1, DX, D2, D2X, DS, DSX, Essence ST (Deluxe),
+Essence STX (II), HDAV1.3 (Deluxe), and HDAV1.3 Slim.
+
+This module supports autoprobe and multiple cards.
+
+Module snd-vx222
+----------------
+
+Module for Digigram VX-Pocket VX222, V222 v2 and Mic cards.
+
+mic
+ Enable Microphone on V222 Mic (NYI)
+ibl
+ Capture IBL size. (default = 0, minimum size)
+
+This module supports multiple cards.
+
+When the driver is compiled as a module and the hotplug firmware
+is supported, the firmware data is loaded via hotplug automatically.
+Install the necessary firmware files in alsa-firmware package.
+When no hotplug fw loader is available, you need to load the
+firmware via vxloader utility in alsa-tools package. To invoke
+vxloader automatically, add the following to /etc/modprobe.d/alsa.conf
+
+::
+
+ install snd-vx222 /sbin/modprobe --first-time -i snd-vx222\
+ && /usr/bin/vxloader
+
+(for 2.2/2.4 kernels, add ``post-install /usr/bin/vxloader`` to
+/etc/modules.conf, instead.)
+IBL size defines the interrupts period for PCM. The smaller size
+gives smaller latency but leads to more CPU consumption, too.
+The size is usually aligned to 126. As default (=0), the smallest
+size is chosen. The possible IBL values can be found in
+/proc/asound/cardX/vx-status proc file.
+
+The power-management is supported.
+
+Module snd-vxpocket
+-------------------
+
+Module for Digigram VX-Pocket VX2 and 440 PCMCIA cards.
+
+ibl
+ Capture IBL size. (default = 0, minimum size)
+
+This module supports multiple cards. The module is compiled only when
+PCMCIA is supported on kernel.
+
+With the older 2.6.x kernel, to activate the driver via the card
+manager, you'll need to set up /etc/pcmcia/vxpocket.conf. See the
+sound/pcmcia/vx/vxpocket.c. 2.6.13 or later kernel requires no
+longer require a config file.
+
+When the driver is compiled as a module and the hotplug firmware
+is supported, the firmware data is loaded via hotplug automatically.
+Install the necessary firmware files in alsa-firmware package.
+When no hotplug fw loader is available, you need to load the
+firmware via vxloader utility in alsa-tools package.
+
+About capture IBL, see the description of snd-vx222 module.
+
+Note: snd-vxp440 driver is merged to snd-vxpocket driver since
+ALSA 1.0.10.
+
+The power-management is supported.
+
+Module snd-ymfpci
+-----------------
+
+Module for Yamaha PCI chips (YMF72x, YMF74x & YMF75x).
+
+mpu_port
+ 0x300,0x330,0x332,0x334, 0 (disable) by default,
+ 1 (auto-detect for YMF744/754 only)
+fm_port
+ 0x388,0x398,0x3a0,0x3a8, 0 (disable) by default
+ 1 (auto-detect for YMF744/754 only)
+joystick_port
+ 0x201,0x202,0x204,0x205, 0 (disable) by default,
+ 1 (auto-detect)
+rear_switch
+ enable shared rear/line-in switch (bool)
+
+This module supports autoprobe and multiple chips.
+
+The power-management is supported.
+
+Module snd-pdaudiocf
+--------------------
+
+Module for Sound Core PDAudioCF sound card.
+
+The power-management is supported.
+
+
+AC97 Quirk Option
+=================
+
+The ac97_quirk option is used to enable/override the workaround for
+specific devices on drivers for on-board AC'97 controllers like
+snd-intel8x0. Some hardware have swapped output pins between Master
+and Headphone, or Surround (thanks to confusion of AC'97
+specifications from version to version :-)
+
+The driver provides the auto-detection of known problematic devices,
+but some might be unknown or wrongly detected. In such a case, pass
+the proper value with this option.
+
+The following strings are accepted:
+
+default
+ Don't override the default setting
+none
+ Disable the quirk
+hp_only
+ Bind Master and Headphone controls as a single control
+swap_hp
+ Swap headphone and master controls
+swap_surround
+ Swap master and surround controls
+ad_sharing
+ For AD1985, turn on OMS bit and use headphone
+alc_jack
+ For ALC65x, turn on the jack sense mode
+inv_eapd
+ Inverted EAPD implementation
+mute_led
+ Bind EAPD bit for turning on/off mute LED
+
+For backward compatibility, the corresponding integer value -1, 0, ...
+are accepted, too.
+
+For example, if ``Master`` volume control has no effect on your device
+but only ``Headphone`` does, pass ac97_quirk=hp_only module option.
+
+
+Configuring Non-ISAPNP Cards
+============================
+
+When the kernel is configured with ISA-PnP support, the modules
+supporting the isapnp cards will have module options ``isapnp``.
+If this option is set, *only* the ISA-PnP devices will be probed.
+For probing the non ISA-PnP cards, you have to pass ``isapnp=0`` option
+together with the proper i/o and irq configuration.
+
+When the kernel is configured without ISA-PnP support, isapnp option
+will be not built in.
+
+
+Module Autoloading Support
+==========================
+
+The ALSA drivers can be loaded automatically on demand by defining
+module aliases. The string ``snd-card-%1`` is requested for ALSA native
+devices where ``%i`` is sound card number from zero to seven.
+
+To auto-load an ALSA driver for OSS services, define the string
+``sound-slot-%i`` where ``%i`` means the slot number for OSS, which
+corresponds to the card index of ALSA. Usually, define this
+as the same card module.
+
+An example configuration for a single emu10k1 card is like below:
+::
+
+ ----- /etc/modprobe.d/alsa.conf
+ alias snd-card-0 snd-emu10k1
+ alias sound-slot-0 snd-emu10k1
+ ----- /etc/modprobe.d/alsa.conf
+
+The available number of auto-loaded sound cards depends on the module
+option ``cards_limit`` of snd module. As default it's set to 1.
+To enable the auto-loading of multiple cards, specify the number of
+sound cards in that option.
+
+When multiple cards are available, it'd better to specify the index
+number for each card via module option, too, so that the order of
+cards is kept consistent.
+
+An example configuration for two sound cards is like below:
+::
+
+ ----- /etc/modprobe.d/alsa.conf
+ # ALSA portion
+ options snd cards_limit=2
+ alias snd-card-0 snd-interwave
+ alias snd-card-1 snd-ens1371
+ options snd-interwave index=0
+ options snd-ens1371 index=1
+ # OSS/Free portion
+ alias sound-slot-0 snd-interwave
+ alias sound-slot-1 snd-ens1371
+ ----- /etc/modprobe.d/alsa.conf
+
+In this example, the interwave card is always loaded as the first card
+(index 0) and ens1371 as the second (index 1).
+
+Alternative (and new) way to fixate the slot assignment is to use
+``slots`` option of snd module. In the case above, specify like the
+following:
+::
+
+ options snd slots=snd-interwave,snd-ens1371
+
+Then, the first slot (#0) is reserved for snd-interwave driver, and
+the second (#1) for snd-ens1371. You can omit index option in each
+driver if slots option is used (although you can still have them at
+the same time as long as they don't conflict).
+
+The slots option is especially useful for avoiding the possible
+hot-plugging and the resultant slot conflict. For example, in the
+case above again, the first two slots are already reserved. If any
+other driver (e.g. snd-usb-audio) is loaded before snd-interwave or
+snd-ens1371, it will be assigned to the third or later slot.
+
+When a module name is given with '!', the slot will be given for any
+modules but that name. For example, ``slots=!snd-pcsp`` will reserve
+the first slot for any modules but snd-pcsp.
+
+
+ALSA PCM devices to OSS devices mapping
+=======================================
+::
+
+ /dev/snd/pcmC0D0[c|p] -> /dev/audio0 (/dev/audio) -> minor 4
+ /dev/snd/pcmC0D0[c|p] -> /dev/dsp0 (/dev/dsp) -> minor 3
+ /dev/snd/pcmC0D1[c|p] -> /dev/adsp0 (/dev/adsp) -> minor 12
+ /dev/snd/pcmC1D0[c|p] -> /dev/audio1 -> minor 4+16 = 20
+ /dev/snd/pcmC1D0[c|p] -> /dev/dsp1 -> minor 3+16 = 19
+ /dev/snd/pcmC1D1[c|p] -> /dev/adsp1 -> minor 12+16 = 28
+ /dev/snd/pcmC2D0[c|p] -> /dev/audio2 -> minor 4+32 = 36
+ /dev/snd/pcmC2D0[c|p] -> /dev/dsp2 -> minor 3+32 = 39
+ /dev/snd/pcmC2D1[c|p] -> /dev/adsp2 -> minor 12+32 = 44
+
+The first number from ``/dev/snd/pcmC{X}D{Y}[c|p]`` expression means
+sound card number and second means device number. The ALSA devices
+have either ``c`` or ``p`` suffix indicating the direction, capture and
+playback, respectively.
+
+Please note that the device mapping above may be varied via the module
+options of snd-pcm-oss module.
+
+
+Proc interfaces (/proc/asound)
+==============================
+
+/proc/asound/card#/pcm#[cp]/oss
+-------------------------------
+erase
+ erase all additional information about OSS applications
+
+<app_name> <fragments> <fragment_size> [<options>]
+ <app_name>
+ name of application with (higher priority) or without path
+ <fragments>
+ number of fragments or zero if auto
+ <fragment_size>
+ size of fragment in bytes or zero if auto
+ <options>
+ optional parameters
+
+ disable
+ the application tries to open a pcm device for
+ this channel but does not want to use it.
+ (Cause a bug or mmap needs)
+ It's good for Quake etc...
+ direct
+ don't use plugins
+ block
+ force block mode (rvplayer)
+ non-block
+ force non-block mode
+ whole-frag
+ write only whole fragments (optimization affecting
+ playback only)
+ no-silence
+ do not fill silence ahead to avoid clicks
+ buggy-ptr
+ Returns the whitespace blocks in GETOPTR ioctl
+ instead of filled blocks
+
+Example:
+::
+
+ echo "x11amp 128 16384" > /proc/asound/card0/pcm0p/oss
+ echo "squake 0 0 disable" > /proc/asound/card0/pcm0c/oss
+ echo "rvplayer 0 0 block" > /proc/asound/card0/pcm0p/oss
+
+
+Early Buffer Allocation
+=======================
+
+Some drivers (e.g. hdsp) require the large contiguous buffers, and
+sometimes it's too late to find such spaces when the driver module is
+actually loaded due to memory fragmentation. You can pre-allocate the
+PCM buffers by loading snd-page-alloc module and write commands to its
+proc file in prior, for example, in the early boot stage like
+``/etc/init.d/*.local`` scripts.
+
+Reading the proc file /proc/drivers/snd-page-alloc shows the current
+usage of page allocation. In writing, you can send the following
+commands to the snd-page-alloc driver:
+
+* add VENDOR DEVICE MASK SIZE BUFFERS
+
+VENDOR and DEVICE are PCI vendor and device IDs. They take
+integer numbers (0x prefix is needed for the hex).
+MASK is the PCI DMA mask. Pass 0 if not restricted.
+SIZE is the size of each buffer to allocate. You can pass
+k and m suffix for KB and MB. The max number is 16MB.
+BUFFERS is the number of buffers to allocate. It must be greater
+than 0. The max number is 4.
+
+* erase
+
+This will erase the all pre-allocated buffers which are not in
+use.
+
+
+Links and Addresses
+===================
+
+ALSA project homepage
+ http://www.alsa-project.org
+Kernel Bugzilla
+ http://bugzilla.kernel.org/
+ALSA Developers ML
+ mailto:alsa-devel@alsa-project.org
+alsa-info.sh script
+ http://www.alsa-project.org/alsa-info.sh
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
deleted file mode 100644
index fc53ccd9a629..000000000000
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ /dev/null
@@ -1,2330 +0,0 @@
-
- Advanced Linux Sound Architecture - Driver
- ==========================================
- Configuration guide
-
-
-Kernel Configuration
-====================
-
-To enable ALSA support you need at least to build the kernel with
-primary sound card support (CONFIG_SOUND). Since ALSA can emulate OSS,
-you don't have to choose any of the OSS modules.
-
-Enable "OSS API emulation" (CONFIG_SND_OSSEMUL) and both OSS mixer and
-PCM supports if you want to run OSS applications with ALSA.
-
-If you want to support the WaveTable functionality on cards such as
-SB Live! then you need to enable "Sequencer support"
-(CONFIG_SND_SEQUENCER).
-
-To make ALSA debug messages more verbose, enable the "Verbose printk"
-and "Debug" options. To check for memory leaks, turn on "Debug memory"
-too. "Debug detection" will add checks for the detection of cards.
-
-Please note that all the ALSA ISA drivers support the Linux isapnp API
-(if the card supports ISA PnP). You don't need to configure the cards
-using isapnptools.
-
-
-Creating ALSA devices
-=====================
-
-This depends on your distribution, but normally you use the /dev/MAKEDEV
-script to create the necessary device nodes. On some systems you use a
-script named 'snddevices'.
-
-
-Module parameters
-=================
-
-The user can load modules with options. If the module supports more than
-one card and you have more than one card of the same type then you can
-specify multiple values for the option separated by commas.
-
-Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
-
- Module snd
- ----------
-
- The core ALSA module. It is used by all ALSA card drivers.
- It takes the following options which have global effects.
-
- major - major number for sound driver
- - Default: 116
- cards_limit
- - limiting card index for auto-loading (1-8)
- - Default: 1
- - For auto-loading more than one card, specify this
- option together with snd-card-X aliases.
- slots - Reserve the slot index for the given driver.
- This option takes multiple strings.
- See "Module Autoloading Support" section for details.
- debug - Specifies the debug message level
- (0 = disable debug prints, 1 = normal debug messages,
- 2 = verbose debug messages)
- This option appears only when CONFIG_SND_DEBUG=y.
- This option can be dynamically changed via sysfs
- /sys/modules/snd/parameters/debug file.
-
- Module snd-pcm-oss
- ------------------
-
- The PCM OSS emulation module.
- This module takes options which change the mapping of devices.
-
- dsp_map - PCM device number maps assigned to the 1st OSS device.
- - Default: 0
- adsp_map - PCM device number maps assigned to the 2st OSS device.
- - Default: 1
- nonblock_open
- - Don't block opening busy PCM devices. Default: 1
-
- For example, when dsp_map=2, /dev/dsp will be mapped to PCM #2 of
- the card #0. Similarly, when adsp_map=0, /dev/adsp will be mapped
- to PCM #0 of the card #0.
- For changing the second or later card, specify the option with
- commas, such like "dsp_map=0,1".
-
- nonblock_open option is used to change the behavior of the PCM
- regarding opening the device. When this option is non-zero,
- opening a busy OSS PCM device won't be blocked but return
- immediately with EAGAIN (just like O_NONBLOCK flag).
-
- Module snd-rawmidi
- ------------------
-
- This module takes options which change the mapping of devices.
- similar to those of the snd-pcm-oss module.
-
- midi_map - MIDI device number maps assigned to the 1st OSS device.
- - Default: 0
- amidi_map - MIDI device number maps assigned to the 2st OSS device.
- - Default: 1
-
- Common parameters for top sound card modules
- --------------------------------------------
-
- Each of top level sound card module takes the following options.
-
- index - index (slot #) of sound card
- - Values: 0 through 31 or negative
- - If nonnegative, assign that index number
- - if negative, interpret as a bitmask of permissible
- indices; the first free permitted index is assigned
- - Default: -1
- id - card ID (identifier or name)
- - Can be up to 15 characters long
- - Default: the card type
- - A directory by this name is created under /proc/asound/
- containing information about the card
- - This ID can be used instead of the index number in
- identifying the card
- enable - enable card
- - Default: enabled, for PCI and ISA PnP cards
-
- Module snd-adlib
- ----------------
-
- Module for AdLib FM cards.
-
- port - port # for OPL chip
-
- This module supports multiple cards. It does not support autoprobe, so
- the port must be specified. For actual AdLib FM cards it will be 0x388.
- Note that this card does not have PCM support and no mixer; only FM
- synthesis.
-
- Make sure you have "sbiload" from the alsa-tools package available and,
- after loading the module, find out the assigned ALSA sequencer port
- number through "sbiload -l". Example output:
-
- Port Client name Port name
- 64:0 OPL2 FM synth OPL2 FM Port
-
- Load the std.sb and drums.sb patches also supplied by sbiload:
-
- sbiload -p 64:0 std.sb drums.sb
-
- If you use this driver to drive an OPL3, you can use std.o3 and drums.o3
- instead. To have the card produce sound, use aplaymidi from alsa-utils:
-
- aplaymidi -p 64:0 foo.mid
-
- Module snd-ad1816a
- ------------------
-
- Module for sound cards based on Analog Devices AD1816A/AD1815 ISA chips.
-
- clockfreq - Clock frequency for AD1816A chip (default = 0, 33000Hz)
-
- This module supports multiple cards, autoprobe and PnP.
-
- Module snd-ad1848
- -----------------
-
- Module for sound cards based on AD1848/AD1847/CS4248 ISA chips.
-
- port - port # for AD1848 chip
- irq - IRQ # for AD1848 chip
- dma1 - DMA # for AD1848 chip (0,1,3)
-
- This module supports multiple cards. It does not support autoprobe
- thus main port must be specified!!! Other ports are optional.
-
- The power-management is supported.
-
- Module snd-ad1889
- -----------------
-
- Module for Analog Devices AD1889 chips.
-
- ac97_quirk - AC'97 workaround for strange hardware
- See the description of intel8x0 module for details.
-
- This module supports multiple cards.
-
- Module snd-ali5451
- ------------------
-
- Module for ALi M5451 PCI chip.
-
- pcm_channels - Number of hardware channels assigned for PCM
- spdif - Support SPDIF I/O
- - Default: disabled
-
- This module supports one chip and autoprobe.
-
- The power-management is supported.
-
- Module snd-als100
- -----------------
-
- Module for sound cards based on Avance Logic ALS100/ALS120 ISA chips.
-
- This module supports multiple cards, autoprobe and PnP.
-
- The power-management is supported.
-
- Module snd-als300
- -----------------
-
- Module for Avance Logic ALS300 and ALS300+
-
- This module supports multiple cards.
-
- The power-management is supported.
-
- Module snd-als4000
- ------------------
-
- Module for sound cards based on Avance Logic ALS4000 PCI chip.
-
- joystick_port - port # for legacy joystick support.
- 0 = disabled (default), 1 = auto-detect
-
- This module supports multiple cards, autoprobe and PnP.
-
- The power-management is supported.
-
- Module snd-asihpi
- -----------------
-
- Module for AudioScience ASI soundcards
-
- enable_hpi_hwdep - enable HPI hwdep for AudioScience soundcard
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-atiixp
- -----------------
-
- Module for ATI IXP 150/200/250/400 AC97 controllers.
-
- ac97_clock - AC'97 clock (default = 48000)
- ac97_quirk - AC'97 workaround for strange hardware
- See "AC97 Quirk Option" section below.
- ac97_codec - Workaround to specify which AC'97 codec
- instead of probing. If this works for you
- file a bug with your `lspci -vn` output.
- -2 -- Force probing.
- -1 -- Default behavior.
- 0-2 -- Use the specified codec.
- spdif_aclink - S/PDIF transfer over AC-link (default = 1)
-
- This module supports one card and autoprobe.
-
- ATI IXP has two different methods to control SPDIF output. One is
- over AC-link and another is over the "direct" SPDIF output. The
- implementation depends on the motherboard, and you'll need to
- choose the correct one via spdif_aclink module option.
-
- The power-management is supported.
-
- Module snd-atiixp-modem
- -----------------------
-
- Module for ATI IXP 150/200/250 AC97 modem controllers.
-
- This module supports one card and autoprobe.
-
- Note: The default index value of this module is -2, i.e. the first
- slot is excluded.
-
- The power-management is supported.
-
- Module snd-au8810, snd-au8820, snd-au8830
- -----------------------------------------
-
- Module for Aureal Vortex, Vortex2 and Advantage device.
-
- pcifix - Control PCI workarounds
- 0 = Disable all workarounds
- 1 = Force the PCI latency of the Aureal card to 0xff
- 2 = Force the Extend PCI#2 Internal Master for Efficient
- Handling of Dummy Requests on the VIA KT133 AGP Bridge
- 3 = Force both settings
- 255 = Autodetect what is required (default)
-
- This module supports all ADB PCM channels, ac97 mixer, SPDIF, hardware
- EQ, mpu401, gameport. A3D and wavetable support are still in development.
- Development and reverse engineering work is being coordinated at
- http://savannah.nongnu.org/projects/openvortex/
- SPDIF output has a copy of the AC97 codec output, unless you use the
- "spdif" pcm device, which allows raw data passthru.
- The hardware EQ hardware and SPDIF is only present in the Vortex2 and
- Advantage.
-
- Note: Some ALSA mixer applications don't handle the SPDIF sample rate
- control correctly. If you have problems regarding this, try
- another ALSA compliant mixer (alsamixer works).
-
- Module snd-azt1605
- ------------------
-
- Module for Aztech Sound Galaxy soundcards based on the Aztech AZT1605
- chipset.
-
- port - port # for BASE (0x220,0x240,0x260,0x280)
- wss_port - port # for WSS (0x530,0x604,0xe80,0xf40)
- irq - IRQ # for WSS (7,9,10,11)
- dma1 - DMA # for WSS playback (0,1,3)
- dma2 - DMA # for WSS capture (0,1), -1 = disabled (default)
- mpu_port - port # for MPU-401 UART (0x300,0x330), -1 = disabled (default)
- mpu_irq - IRQ # for MPU-401 UART (3,5,7,9), -1 = disabled (default)
- fm_port - port # for OPL3 (0x388), -1 = disabled (default)
-
- This module supports multiple cards. It does not support autoprobe: port,
- wss_port, irq and dma1 have to be specified. The other values are
- optional.
-
- "port" needs to match the BASE ADDRESS jumper on the card (0x220 or 0x240)
- or the value stored in the card's EEPROM for cards that have an EEPROM and
- their "CONFIG MODE" jumper set to "EEPROM SETTING". The other values can
- be chosen freely from the options enumerated above.
-
- If dma2 is specified and different from dma1, the card will operate in
- full-duplex mode. When dma1=3, only dma2=0 is valid and the only way to
- enable capture since only channels 0 and 1 are available for capture.
-
- Generic settings are "port=0x220 wss_port=0x530 irq=10 dma1=1 dma2=0
- mpu_port=0x330 mpu_irq=9 fm_port=0x388".
-
- Whatever IRQ and DMA channels you pick, be sure to reserve them for
- legacy ISA in your BIOS.
-
- Module snd-azt2316
- ------------------
-
- Module for Aztech Sound Galaxy soundcards based on the Aztech AZT2316
- chipset.
-
- port - port # for BASE (0x220,0x240,0x260,0x280)
- wss_port - port # for WSS (0x530,0x604,0xe80,0xf40)
- irq - IRQ # for WSS (7,9,10,11)
- dma1 - DMA # for WSS playback (0,1,3)
- dma2 - DMA # for WSS capture (0,1), -1 = disabled (default)
- mpu_port - port # for MPU-401 UART (0x300,0x330), -1 = disabled (default)
- mpu_irq - IRQ # for MPU-401 UART (5,7,9,10), -1 = disabled (default)
- fm_port - port # for OPL3 (0x388), -1 = disabled (default)
-
- This module supports multiple cards. It does not support autoprobe: port,
- wss_port, irq and dma1 have to be specified. The other values are
- optional.
-
- "port" needs to match the BASE ADDRESS jumper on the card (0x220 or 0x240)
- or the value stored in the card's EEPROM for cards that have an EEPROM and
- their "CONFIG MODE" jumper set to "EEPROM SETTING". The other values can
- be chosen freely from the options enumerated above.
-
- If dma2 is specified and different from dma1, the card will operate in
- full-duplex mode. When dma1=3, only dma2=0 is valid and the only way to
- enable capture since only channels 0 and 1 are available for capture.
-
- Generic settings are "port=0x220 wss_port=0x530 irq=10 dma1=1 dma2=0
- mpu_port=0x330 mpu_irq=9 fm_port=0x388".
-
- Whatever IRQ and DMA channels you pick, be sure to reserve them for
- legacy ISA in your BIOS.
-
- Module snd-aw2
- --------------
-
- Module for Audiowerk2 sound card
-
- This module supports multiple cards.
-
- Module snd-azt2320
- ------------------
-
- Module for sound cards based on Aztech System AZT2320 ISA chip (PnP only).
-
- This module supports multiple cards, PnP and autoprobe.
-
- The power-management is supported.
-
- Module snd-azt3328
- ------------------
-
- Module for sound cards based on Aztech AZF3328 PCI chip.
-
- joystick - Enable joystick (default off)
-
- This module supports multiple cards.
-
- Module snd-bt87x
- ----------------
-
- Module for video cards based on Bt87x chips.
-
- digital_rate - Override the default digital rate (Hz)
- load_all - Load the driver even if the card model isn't known
-
- This module supports multiple cards.
-
- Note: The default index value of this module is -2, i.e. the first
- slot is excluded.
-
- Module snd-ca0106
- -----------------
-
- Module for Creative Audigy LS and SB Live 24bit
-
- This module supports multiple cards.
-
-
- Module snd-cmi8330
- ------------------
-
- Module for sound cards based on C-Media CMI8330 ISA chips.
-
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- wssport - port # for CMI8330 chip (WSS)
- wssirq - IRQ # for CMI8330 chip (WSS)
- wssdma - first DMA # for CMI8330 chip (WSS)
- sbport - port # for CMI8330 chip (SB16)
- sbirq - IRQ # for CMI8330 chip (SB16)
- sbdma8 - 8bit DMA # for CMI8330 chip (SB16)
- sbdma16 - 16bit DMA # for CMI8330 chip (SB16)
- fmport - (optional) OPL3 I/O port
- mpuport - (optional) MPU401 I/O port
- mpuirq - (optional) MPU401 irq #
-
- This module supports multiple cards and autoprobe.
-
- The power-management is supported.
-
- Module snd-cmipci
- -----------------
-
- Module for C-Media CMI8338/8738/8768/8770 PCI sound cards.
-
- mpu_port - port address of MIDI interface (8338 only):
- 0x300,0x310,0x320,0x330 = legacy port,
- 0 = disable (default)
- fm_port - port address of OPL-3 FM synthesizer (8x38 only):
- 0x388 = legacy port,
- 1 = integrated PCI port (default on 8738),
- 0 = disable
- soft_ac3 - Software-conversion of raw SPDIF packets (model 033 only)
- (default = 1)
- joystick_port - Joystick port address (0 = disable, 1 = auto-detect)
-
- This module supports autoprobe and multiple cards.
-
- The power-management is supported.
-
- Module snd-cs4231
- -----------------
-
- Module for sound cards based on CS4231 ISA chips.
-
- port - port # for CS4231 chip
- mpu_port - port # for MPU-401 UART (optional), -1 = disable
- irq - IRQ # for CS4231 chip
- mpu_irq - IRQ # for MPU-401 UART
- dma1 - first DMA # for CS4231 chip
- dma2 - second DMA # for CS4231 chip
-
- This module supports multiple cards. This module does not support autoprobe
- thus main port must be specified!!! Other ports are optional.
-
- The power-management is supported.
-
- Module snd-cs4236
- -----------------
-
- Module for sound cards based on CS4232/CS4232A,
- CS4235/CS4236/CS4236B/CS4237B/
- CS4238B/CS4239 ISA chips.
-
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- port - port # for CS4236 chip (PnP setup - 0x534)
- cport - control port # for CS4236 chip (PnP setup - 0x120,0x210,0xf00)
- mpu_port - port # for MPU-401 UART (PnP setup - 0x300), -1 = disable
- fm_port - FM port # for CS4236 chip (PnP setup - 0x388), -1 = disable
- irq - IRQ # for CS4236 chip (5,7,9,11,12,15)
- mpu_irq - IRQ # for MPU-401 UART (9,11,12,15)
- dma1 - first DMA # for CS4236 chip (0,1,3)
- dma2 - second DMA # for CS4236 chip (0,1,3), -1 = disable
-
- This module supports multiple cards. This module does not support autoprobe
- (if ISA PnP is not used) thus main port and control port must be
- specified!!! Other ports are optional.
-
- The power-management is supported.
-
- This module is aliased as snd-cs4232 since it provides the old
- snd-cs4232 functionality, too.
-
- Module snd-cs4281
- -----------------
-
- Module for Cirrus Logic CS4281 soundchip.
-
- dual_codec - Secondary codec ID (0 = disable, default)
-
- This module supports multiple cards.
-
- The power-management is supported.
-
- Module snd-cs46xx
- -----------------
-
- Module for PCI sound cards based on CS4610/CS4612/CS4614/CS4615/CS4622/
- CS4624/CS4630/CS4280 PCI chips.
-
- external_amp - Force to enable external amplifier.
- thinkpad - Force to enable Thinkpad's CLKRUN control.
- mmap_valid - Support OSS mmap mode (default = 0).
-
- This module supports multiple cards and autoprobe.
- Usually external amp and CLKRUN controls are detected automatically
- from PCI sub vendor/device ids. If they don't work, give the options
- above explicitly.
-
- The power-management is supported.
-
- Module snd-cs5530
- _________________
-
- Module for Cyrix/NatSemi Geode 5530 chip.
-
- Module snd-cs5535audio
- ----------------------
-
- Module for multifunction CS5535 companion PCI device
-
- The power-management is supported.
-
- Module snd-ctxfi
- ----------------
-
- Module for Creative Sound Blaster X-Fi boards (20k1 / 20k2 chips)
- * Creative Sound Blaster X-Fi Titanium Fatal1ty Champion Series
- * Creative Sound Blaster X-Fi Titanium Fatal1ty Professional Series
- * Creative Sound Blaster X-Fi Titanium Professional Audio
- * Creative Sound Blaster X-Fi Titanium
- * Creative Sound Blaster X-Fi Elite Pro
- * Creative Sound Blaster X-Fi Platinum
- * Creative Sound Blaster X-Fi Fatal1ty
- * Creative Sound Blaster X-Fi XtremeGamer
- * Creative Sound Blaster X-Fi XtremeMusic
-
- reference_rate - reference sample rate, 44100 or 48000 (default)
- multiple - multiple to ref. sample rate, 1 or 2 (default)
- subsystem - override the PCI SSID for probing; the value
- consists of SSVID << 16 | SSDID. The default is
- zero, which means no override.
-
- This module supports multiple cards.
-
- Module snd-darla20
- ------------------
-
- Module for Echoaudio Darla20
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-darla24
- ------------------
-
- Module for Echoaudio Darla24
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-dt019x
- -----------------
-
- Module for Diamond Technologies DT-019X / Avance Logic ALS-007 (PnP
- only)
-
- This module supports multiple cards. This module is enabled only with
- ISA PnP support.
-
- The power-management is supported.
-
- Module snd-dummy
- ----------------
-
- Module for the dummy sound card. This "card" doesn't do any output
- or input, but you may use this module for any application which
- requires a sound card (like RealPlayer).
-
- pcm_devs - Number of PCM devices assigned to each card
- (default = 1, up to 4)
- pcm_substreams - Number of PCM substreams assigned to each PCM
- (default = 8, up to 128)
- hrtimer - Use hrtimer (=1, default) or system timer (=0)
- fake_buffer - Fake buffer allocations (default = 1)
-
- When multiple PCM devices are created, snd-dummy gives different
- behavior to each PCM device:
- 0 = interleaved with mmap support
- 1 = non-interleaved with mmap support
- 2 = interleaved without mmap
- 3 = non-interleaved without mmap
-
- As default, snd-dummy drivers doesn't allocate the real buffers
- but either ignores read/write or mmap a single dummy page to all
- buffer pages, in order to save the resources. If your apps need
- the read/ written buffer data to be consistent, pass fake_buffer=0
- option.
-
- The power-management is supported.
-
- Module snd-echo3g
- -----------------
-
- Module for Echoaudio 3G cards (Gina3G/Layla3G)
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-emu10k1
- ------------------
-
- Module for EMU10K1/EMU10k2 based PCI sound cards.
- * Sound Blaster Live!
- * Sound Blaster PCI 512
- * Emu APS (partially supported)
- * Sound Blaster Audigy
-
- extin - bitmap of available external inputs for FX8010 (see bellow)
- extout - bitmap of available external outputs for FX8010 (see bellow)
- seq_ports - allocated sequencer ports (4 by default)
- max_synth_voices - limit of voices used for wavetable (64 by default)
- max_buffer_size - specifies the maximum size of wavetable/pcm buffers
- given in MB unit. Default value is 128.
- enable_ir - enable IR
-
- This module supports multiple cards and autoprobe.
-
- Input & Output configurations [extin/extout]
- * Creative Card wo/Digital out [0x0003/0x1f03]
- * Creative Card w/Digital out [0x0003/0x1f0f]
- * Creative Card w/Digital CD in [0x000f/0x1f0f]
- * Creative Card wo/Digital out + LiveDrive [0x3fc3/0x1fc3]
- * Creative Card w/Digital out + LiveDrive [0x3fc3/0x1fcf]
- * Creative Card w/Digital CD in + LiveDrive [0x3fcf/0x1fcf]
- * Creative Card wo/Digital out + Digital I/O 2 [0x0fc3/0x1f0f]
- * Creative Card w/Digital out + Digital I/O 2 [0x0fc3/0x1f0f]
- * Creative Card w/Digital CD in + Digital I/O 2 [0x0fcf/0x1f0f]
- * Creative Card 5.1/w Digital out + LiveDrive [0x3fc3/0x1fff]
- * Creative Card 5.1 (c) 2003 [0x3fc3/0x7cff]
- * Creative Card all ins and outs [0x3fff/0x7fff]
-
- The power-management is supported.
-
- Module snd-emu10k1x
- -------------------
-
- Module for Creative Emu10k1X (SB Live Dell OEM version)
-
- This module supports multiple cards.
-
- Module snd-ens1370
- ------------------
-
- Module for Ensoniq AudioPCI ES1370 PCI sound cards.
- * SoundBlaster PCI 64
- * SoundBlaster PCI 128
-
- joystick - Enable joystick (default off)
-
- This module supports multiple cards and autoprobe.
-
- The power-management is supported.
-
- Module snd-ens1371
- ------------------
-
- Module for Ensoniq AudioPCI ES1371 PCI sound cards.
- * SoundBlaster PCI 64
- * SoundBlaster PCI 128
- * SoundBlaster Vibra PCI
-
- joystick_port - port # for joystick (0x200,0x208,0x210,0x218),
- 0 = disable (default), 1 = auto-detect
-
- This module supports multiple cards and autoprobe.
-
- The power-management is supported.
-
- Module snd-es1688
- -----------------
-
- Module for ESS AudioDrive ES-1688 and ES-688 sound cards.
-
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
- mpu_port - port # for MPU-401 port (0x300,0x310,0x320,0x330), -1 = disable (default)
- mpu_irq - IRQ # for MPU-401 port (5,7,9,10)
- fm_port - port # for OPL3 (option; share the same port as default)
-
- with isapnp=0, the following additional options are available:
- port - port # for ES-1688 chip (0x220,0x240,0x260)
- irq - IRQ # for ES-1688 chip (5,7,9,10)
- dma8 - DMA # for ES-1688 chip (0,1,3)
-
- This module supports multiple cards and autoprobe (without MPU-401 port)
- and PnP with the ES968 chip.
-
- Module snd-es18xx
- -----------------
-
- Module for ESS AudioDrive ES-18xx sound cards.
-
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- port - port # for ES-18xx chip (0x220,0x240,0x260)
- mpu_port - port # for MPU-401 port (0x300,0x310,0x320,0x330), -1 = disable (default)
- fm_port - port # for FM (optional, not used)
- irq - IRQ # for ES-18xx chip (5,7,9,10)
- dma1 - first DMA # for ES-18xx chip (0,1,3)
- dma2 - first DMA # for ES-18xx chip (0,1,3)
-
- This module supports multiple cards, ISA PnP and autoprobe (without MPU-401
- port if native ISA PnP routines are not used).
- When dma2 is equal with dma1, the driver works as half-duplex.
-
- The power-management is supported.
-
- Module snd-es1938
- -----------------
-
- Module for sound cards based on ESS Solo-1 (ES1938,ES1946) chips.
-
- This module supports multiple cards and autoprobe.
-
- The power-management is supported.
-
- Module snd-es1968
- -----------------
-
- Module for sound cards based on ESS Maestro-1/2/2E (ES1968/ES1978) chips.
-
- total_bufsize - total buffer size in kB (1-4096kB)
- pcm_substreams_p - playback channels (1-8, default=2)
- pcm_substreams_c - capture channels (1-8, default=0)
- clock - clock (0 = auto-detection)
- use_pm - support the power-management (0 = off, 1 = on,
- 2 = auto (default))
- enable_mpu - enable MPU401 (0 = off, 1 = on, 2 = auto (default))
- joystick - enable joystick (default off)
-
- This module supports multiple cards and autoprobe.
-
- The power-management is supported.
-
- Module snd-fm801
- ----------------
-
- Module for ForteMedia FM801 based PCI sound cards.
-
- tea575x_tuner - Enable TEA575x tuner
- - 1 = MediaForte 256-PCS
- - 2 = MediaForte 256-PCPR
- - 3 = MediaForte 64-PCR
- - High 16-bits are video (radio) device number + 1
- - example: 0x10002 (MediaForte 256-PCPR, device 1)
-
- This module supports multiple cards and autoprobe.
-
- The power-management is supported.
-
- Module snd-gina20
- -----------------
-
- Module for Echoaudio Gina20
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-gina24
- -----------------
-
- Module for Echoaudio Gina24
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-gusclassic
- ---------------------
-
- Module for Gravis UltraSound Classic sound card.
-
- port - port # for GF1 chip (0x220,0x230,0x240,0x250,0x260)
- irq - IRQ # for GF1 chip (3,5,9,11,12,15)
- dma1 - DMA # for GF1 chip (1,3,5,6,7)
- dma2 - DMA # for GF1 chip (1,3,5,6,7,-1=disable)
- joystick_dac - 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
- voices - GF1 voices limit (14-32)
- pcm_voices - reserved PCM voices
-
- This module supports multiple cards and autoprobe.
-
- Module snd-gusextreme
- ---------------------
-
- Module for Gravis UltraSound Extreme (Synergy ViperMax) sound card.
-
- port - port # for ES-1688 chip (0x220,0x230,0x240,0x250,0x260)
- gf1_port - port # for GF1 chip (0x210,0x220,0x230,0x240,0x250,0x260,0x270)
- mpu_port - port # for MPU-401 port (0x300,0x310,0x320,0x330), -1 = disable
- irq - IRQ # for ES-1688 chip (5,7,9,10)
- gf1_irq - IRQ # for GF1 chip (3,5,9,11,12,15)
- mpu_irq - IRQ # for MPU-401 port (5,7,9,10)
- dma8 - DMA # for ES-1688 chip (0,1,3)
- dma1 - DMA # for GF1 chip (1,3,5,6,7)
- joystick_dac - 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
- voices - GF1 voices limit (14-32)
- pcm_voices - reserved PCM voices
-
- This module supports multiple cards and autoprobe (without MPU-401 port).
-
- Module snd-gusmax
- -----------------
-
- Module for Gravis UltraSound MAX sound card.
-
- port - port # for GF1 chip (0x220,0x230,0x240,0x250,0x260)
- irq - IRQ # for GF1 chip (3,5,9,11,12,15)
- dma1 - DMA # for GF1 chip (1,3,5,6,7)
- dma2 - DMA # for GF1 chip (1,3,5,6,7,-1=disable)
- joystick_dac - 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
- voices - GF1 voices limit (14-32)
- pcm_voices - reserved PCM voices
-
- This module supports multiple cards and autoprobe.
-
- Module snd-hda-intel
- --------------------
-
- Module for Intel HD Audio (ICH6, ICH6M, ESB2, ICH7, ICH8, ICH9, ICH10,
- PCH, SCH),
- ATI SB450, SB600, R600, RS600, RS690, RS780, RV610, RV620,
- RV630, RV635, RV670, RV770,
- VIA VT8251/VT8237A,
- SIS966, ULI M5461
-
- [Multiple options for each card instance]
- model - force the model name
- position_fix - Fix DMA pointer
- -1 = system default: choose appropriate one per controller
- hardware
- 0 = auto: falls back to LPIB when POSBUF doesn't work
- 1 = use LPIB
- 2 = POSBUF: use position buffer
- 3 = VIACOMBO: VIA-specific workaround for capture
- 4 = COMBO: use LPIB for playback, auto for capture stream
- probe_mask - Bitmask to probe codecs (default = -1, meaning all slots)
- When the bit 8 (0x100) is set, the lower 8 bits are used
- as the "fixed" codec slots; i.e. the driver probes the
- slots regardless what hardware reports back
- probe_only - Only probing and no codec initialization (default=off);
- Useful to check the initial codec status for debugging
- bdl_pos_adj - Specifies the DMA IRQ timing delay in samples.
- Passing -1 will make the driver to choose the appropriate
- value based on the controller chip.
- patch - Specifies the early "patch" files to modify the HD-audio
- setup before initializing the codecs. This option is
- available only when CONFIG_SND_HDA_PATCH_LOADER=y is set.
- See HD-Audio.txt for details.
- beep_mode - Selects the beep registration mode (0=off, 1=on); default
- value is set via CONFIG_SND_HDA_INPUT_BEEP_MODE kconfig.
-
- [Single (global) options]
- single_cmd - Use single immediate commands to communicate with
- codecs (for debugging only)
- enable_msi - Enable Message Signaled Interrupt (MSI) (default = off)
- power_save - Automatic power-saving timeout (in second, 0 =
- disable)
- power_save_controller - Reset HD-audio controller in power-saving mode
- (default = on)
- align_buffer_size - Force rounding of buffer/period sizes to multiples
- of 128 bytes. This is more efficient in terms of memory
- access but isn't required by the HDA spec and prevents
- users from specifying exact period/buffer sizes.
- (default = on)
- snoop - Enable/disable snooping (default = on)
-
- This module supports multiple cards and autoprobe.
-
- See Documentation/sound/alsa/HD-Audio.txt for more details about
- HD-audio driver.
-
- Each codec may have a model table for different configurations.
- If your machine isn't listed there, the default (usually minimal)
- configuration is set up. You can pass "model=<name>" option to
- specify a certain model in such a case. There are different
- models depending on the codec chip. The list of available models
- is found in HD-Audio-Models.txt
-
- The model name "generic" is treated as a special case. When this
- model is given, the driver uses the generic codec parser without
- "codec-patch". It's sometimes good for testing and debugging.
-
- If the default configuration doesn't work and one of the above
- matches with your device, report it together with alsa-info.sh
- output (with --no-upload option) to kernel bugzilla or alsa-devel
- ML (see the section "Links and Addresses").
-
- power_save and power_save_controller options are for power-saving
- mode. See powersave.txt for details.
-
- Note 2: If you get click noises on output, try the module option
- position_fix=1 or 2. position_fix=1 will use the SD_LPIB
- register value without FIFO size correction as the current
- DMA pointer. position_fix=2 will make the driver to use
- the position buffer instead of reading SD_LPIB register.
- (Usually SD_LPIB register is more accurate than the
- position buffer.)
-
- position_fix=3 is specific to VIA devices. The position
- of the capture stream is checked from both LPIB and POSBUF
- values. position_fix=4 is a combination mode, using LPIB
- for playback and POSBUF for capture.
-
- NB: If you get many "azx_get_response timeout" messages at
- loading, it's likely a problem of interrupts (e.g. ACPI irq
- routing). Try to boot with options like "pci=noacpi". Also, you
- can try "single_cmd=1" module option. This will switch the
- communication method between HDA controller and codecs to the
- single immediate commands instead of CORB/RIRB. Basically, the
- single command mode is provided only for BIOS, and you won't get
- unsolicited events, too. But, at least, this works independently
- from the irq. Remember this is a last resort, and should be
- avoided as much as possible...
-
- MORE NOTES ON "azx_get_response timeout" PROBLEMS:
- On some hardware, you may need to add a proper probe_mask option
- to avoid the "azx_get_response timeout" problem above, instead.
- This occurs when the access to non-existing or non-working codec slot
- (likely a modem one) causes a stall of the communication via HD-audio
- bus. You can see which codec slots are probed by enabling
- CONFIG_SND_DEBUG_VERBOSE, or simply from the file name of the codec
- proc files. Then limit the slots to probe by probe_mask option.
- For example, probe_mask=1 means to probe only the first slot, and
- probe_mask=4 means only the third slot.
-
- The power-management is supported.
-
- Module snd-hdsp
- ---------------
-
- Module for RME Hammerfall DSP audio interface(s)
-
- This module supports multiple cards.
-
- Note: The firmware data can be automatically loaded via hotplug
- when CONFIG_FW_LOADER is set. Otherwise, you need to load
- the firmware via hdsploader utility included in alsa-tools
- package.
- The firmware data is found in alsa-firmware package.
-
- Note: snd-page-alloc module does the job which snd-hammerfall-mem
- module did formerly. It will allocate the buffers in advance
- when any HDSP cards are found. To make the buffer
- allocation sure, load snd-page-alloc module in the early
- stage of boot sequence. See "Early Buffer Allocation"
- section.
-
- Module snd-hdspm
- ----------------
-
- Module for RME HDSP MADI board.
-
- precise_ptr - Enable precise pointer, or disable.
- line_outs_monitor - Send playback streams to analog outs by default.
- enable_monitor - Enable Analog Out on Channel 63/64 by default.
-
- See hdspm.txt for details.
-
- Module snd-ice1712
- ------------------
-
- Module for Envy24 (ICE1712) based PCI sound cards.
- * MidiMan M Audio Delta 1010
- * MidiMan M Audio Delta 1010LT
- * MidiMan M Audio Delta DiO 2496
- * MidiMan M Audio Delta 66
- * MidiMan M Audio Delta 44
- * MidiMan M Audio Delta 410
- * MidiMan M Audio Audiophile 2496
- * TerraTec EWS 88MT
- * TerraTec EWS 88D
- * TerraTec EWX 24/96
- * TerraTec DMX 6Fire
- * TerraTec Phase 88
- * Hoontech SoundTrack DSP 24
- * Hoontech SoundTrack DSP 24 Value
- * Hoontech SoundTrack DSP 24 Media 7.1
- * Event Electronics, EZ8
- * Digigram VX442
- * Lionstracs, Mediastaton
- * Terrasoniq TS 88
-
- model - Use the given board model, one of the following:
- delta1010, dio2496, delta66, delta44, audiophile, delta410,
- delta1010lt, vx442, ewx2496, ews88mt, ews88mt_new, ews88d,
- dmx6fire, dsp24, dsp24_value, dsp24_71, ez8,
- phase88, mediastation
- omni - Omni I/O support for MidiMan M-Audio Delta44/66
- cs8427_timeout - reset timeout for the CS8427 chip (S/PDIF transceiver)
- in msec resolution, default value is 500 (0.5 sec)
-
- This module supports multiple cards and autoprobe. Note: The consumer part
- is not used with all Envy24 based cards (for example in the MidiMan Delta
- serie).
-
- Note: The supported board is detected by reading EEPROM or PCI
- SSID (if EEPROM isn't available). You can override the
- model by passing "model" module option in case that the
- driver isn't configured properly or you want to try another
- type for testing.
-
- Module snd-ice1724
- ------------------
-
- Module for Envy24HT (VT/ICE1724), Envy24PT (VT1720) based PCI sound cards.
- * MidiMan M Audio Revolution 5.1
- * MidiMan M Audio Revolution 7.1
- * MidiMan M Audio Audiophile 192
- * AMP Ltd AUDIO2000
- * TerraTec Aureon 5.1 Sky
- * TerraTec Aureon 7.1 Space
- * TerraTec Aureon 7.1 Universe
- * TerraTec Phase 22
- * TerraTec Phase 28
- * AudioTrak Prodigy 7.1
- * AudioTrak Prodigy 7.1 LT
- * AudioTrak Prodigy 7.1 XT
- * AudioTrak Prodigy 7.1 HIFI
- * AudioTrak Prodigy 7.1 HD2
- * AudioTrak Prodigy 192
- * Pontis MS300
- * Albatron K8X800 Pro II
- * Chaintech ZNF3-150
- * Chaintech ZNF3-250
- * Chaintech 9CJS
- * Chaintech AV-710
- * Shuttle SN25P
- * Onkyo SE-90PCI
- * Onkyo SE-200PCI
- * ESI Juli@
- * ESI Maya44
- * Hercules Fortissimo IV
- * EGO-SYS WaveTerminal 192M
-
- model - Use the given board model, one of the following:
- revo51, revo71, amp2000, prodigy71, prodigy71lt,
- prodigy71xt, prodigy71hifi, prodigyhd2, prodigy192,
- juli, aureon51, aureon71, universe, ap192, k8x800,
- phase22, phase28, ms300, av710, se200pci, se90pci,
- fortissimo4, sn25p, WT192M, maya44
-
- This module supports multiple cards and autoprobe.
-
- Note: The supported board is detected by reading EEPROM or PCI
- SSID (if EEPROM isn't available). You can override the
- model by passing "model" module option in case that the
- driver isn't configured properly or you want to try another
- type for testing.
-
- Module snd-indigo
- -----------------
-
- Module for Echoaudio Indigo
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-indigodj
- -------------------
-
- Module for Echoaudio Indigo DJ
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-indigoio
- -------------------
-
- Module for Echoaudio Indigo IO
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-intel8x0
- -------------------
-
- Module for AC'97 motherboards from Intel and compatibles.
- * Intel i810/810E, i815, i820, i830, i84x, MX440
- ICH5, ICH6, ICH7, 6300ESB, ESB2
- * SiS 7012 (SiS 735)
- * NVidia NForce, NForce2, NForce3, MCP04, CK804
- CK8, CK8S, MCP501
- * AMD AMD768, AMD8111
- * ALi m5455
-
- ac97_clock - AC'97 codec clock base (0 = auto-detect)
- ac97_quirk - AC'97 workaround for strange hardware
- See "AC97 Quirk Option" section below.
- buggy_irq - Enable workaround for buggy interrupts on some
- motherboards (default yes on nForce chips,
- otherwise off)
- buggy_semaphore - Enable workaround for hardware with buggy
- semaphores (e.g. on some ASUS laptops)
- (default off)
- spdif_aclink - Use S/PDIF over AC-link instead of direct connection
- from the controller chip
- (0 = off, 1 = on, -1 = default)
-
- This module supports one chip and autoprobe.
-
- Note: the latest driver supports auto-detection of chip clock.
- if you still encounter too fast playback, specify the clock
- explicitly via the module option "ac97_clock=41194".
-
- Joystick/MIDI ports are not supported by this driver. If your
- motherboard has these devices, use the ns558 or snd-mpu401
- modules, respectively.
-
- The power-management is supported.
-
- Module snd-intel8x0m
- --------------------
-
- Module for Intel ICH (i8x0) chipset MC97 modems.
- * Intel i810/810E, i815, i820, i830, i84x, MX440
- ICH5, ICH6, ICH7
- * SiS 7013 (SiS 735)
- * NVidia NForce, NForce2, NForce2s, NForce3
- * AMD AMD8111
- * ALi m5455
-
- ac97_clock - AC'97 codec clock base (0 = auto-detect)
-
- This module supports one card and autoprobe.
-
- Note: The default index value of this module is -2, i.e. the first
- slot is excluded.
-
- The power-management is supported.
-
- Module snd-interwave
- --------------------
-
- Module for Gravis UltraSound PnP, Dynasonic 3-D/Pro, STB Sound Rage 32
- and other sound cards based on AMD InterWave (tm) chip.
-
- joystick_dac - 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
- midi - 1 = MIDI UART enable, 0 = MIDI UART disable (default)
- pcm_voices - reserved PCM voices for the synthesizer (default 2)
- effect - 1 = InterWave effects enable (default 0);
- requires 8 voices
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- port - port # for InterWave chip (0x210,0x220,0x230,0x240,0x250,0x260)
- irq - IRQ # for InterWave chip (3,5,9,11,12,15)
- dma1 - DMA # for InterWave chip (0,1,3,5,6,7)
- dma2 - DMA # for InterWave chip (0,1,3,5,6,7,-1=disable)
-
- This module supports multiple cards, autoprobe and ISA PnP.
-
- Module snd-interwave-stb
- ------------------------
-
- Module for UltraSound 32-Pro (sound card from STB used by Compaq)
- and other sound cards based on AMD InterWave (tm) chip with TEA6330T
- circuit for extended control of bass, treble and master volume.
-
- joystick_dac - 0 to 31, (0.59V-4.52V or 0.389V-2.98V)
- midi - 1 = MIDI UART enable, 0 = MIDI UART disable (default)
- pcm_voices - reserved PCM voices for the synthesizer (default 2)
- effect - 1 = InterWave effects enable (default 0);
- requires 8 voices
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- port - port # for InterWave chip (0x210,0x220,0x230,0x240,0x250,0x260)
- port_tc - tone control (i2c bus) port # for TEA6330T chip (0x350,0x360,0x370,0x380)
- irq - IRQ # for InterWave chip (3,5,9,11,12,15)
- dma1 - DMA # for InterWave chip (0,1,3,5,6,7)
- dma2 - DMA # for InterWave chip (0,1,3,5,6,7,-1=disable)
-
- This module supports multiple cards, autoprobe and ISA PnP.
-
- Module snd-jazz16
- -------------------
-
- Module for Media Vision Jazz16 chipset. The chipset consists of 3 chips:
- MVD1216 + MVA416 + MVA514.
-
- port - port # for SB DSP chip (0x210,0x220,0x230,0x240,0x250,0x260)
- irq - IRQ # for SB DSP chip (3,5,7,9,10,15)
- dma8 - DMA # for SB DSP chip (1,3)
- dma16 - DMA # for SB DSP chip (5,7)
- mpu_port - MPU-401 port # (0x300,0x310,0x320,0x330)
- mpu_irq - MPU-401 irq # (2,3,5,7)
-
- This module supports multiple cards.
-
- Module snd-korg1212
- -------------------
-
- Module for Korg 1212 IO PCI card
-
- This module supports multiple cards.
-
- Module snd-layla20
- ------------------
-
- Module for Echoaudio Layla20
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-layla24
- ------------------
-
- Module for Echoaudio Layla24
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-lola
- ---------------
-
- Module for Digigram Lola PCI-e boards
-
- This module supports multiple cards.
-
- Module snd-lx6464es
- -------------------
-
- Module for Digigram LX6464ES boards
-
- This module supports multiple cards.
-
- Module snd-maestro3
- -------------------
-
- Module for Allegro/Maestro3 chips
-
- external_amp - enable external amp (enabled by default)
- amp_gpio - GPIO pin number for external amp (0-15) or
- -1 for default pin (8 for allegro, 1 for
- others)
-
- This module supports autoprobe and multiple chips.
-
- Note: the binding of amplifier is dependent on hardware.
- If there is no sound even though all channels are unmuted, try to
- specify other gpio connection via amp_gpio option.
- For example, a Panasonic notebook might need "amp_gpio=0x0d"
- option.
-
- The power-management is supported.
-
- Module snd-mia
- ---------------
-
- Module for Echoaudio Mia
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-miro
- ---------------
-
- Module for Miro soundcards: miroSOUND PCM 1 pro,
- miroSOUND PCM 12,
- miroSOUND PCM 20 Radio.
-
- port - Port # (0x530,0x604,0xe80,0xf40)
- irq - IRQ # (5,7,9,10,11)
- dma1 - 1st dma # (0,1,3)
- dma2 - 2nd dma # (0,1)
- mpu_port - MPU-401 port # (0x300,0x310,0x320,0x330)
- mpu_irq - MPU-401 irq # (5,7,9,10)
- fm_port - FM Port # (0x388)
- wss - enable WSS mode
- ide - enable onboard ide support
-
- Module snd-mixart
- -----------------
-
- Module for Digigram miXart8 sound cards.
-
- This module supports multiple cards.
- Note: One miXart8 board will be represented as 4 alsa cards.
- See MIXART.txt for details.
-
- When the driver is compiled as a module and the hotplug firmware
- is supported, the firmware data is loaded via hotplug automatically.
- Install the necessary firmware files in alsa-firmware package.
- When no hotplug fw loader is available, you need to load the
- firmware via mixartloader utility in alsa-tools package.
-
- Module snd-mona
- ---------------
-
- Module for Echoaudio Mona
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
-
- Module snd-mpu401
- -----------------
-
- Module for MPU-401 UART devices.
-
- port - port number or -1 (disable)
- irq - IRQ number or -1 (disable)
- pnp - PnP detection - 0 = disable, 1 = enable (default)
-
- This module supports multiple devices and PnP.
-
- Module snd-msnd-classic
- -----------------------
-
- Module for Turtle Beach MultiSound Classic, Tahiti or Monterey
- soundcards.
-
- io - Port # for msnd-classic card
- irq - IRQ # for msnd-classic card
- mem - Memory address (0xb0000, 0xc8000, 0xd0000, 0xd8000,
- 0xe0000 or 0xe8000)
- write_ndelay - enable write ndelay (default = 1)
- calibrate_signal - calibrate signal (default = 0)
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
- digital - Digital daughterboard present (default = 0)
- cfg - Config port (0x250, 0x260 or 0x270) default = PnP
- reset - Reset all devices
- mpu_io - MPU401 I/O port
- mpu_irq - MPU401 irq#
- ide_io0 - IDE port #0
- ide_io1 - IDE port #1
- ide_irq - IDE irq#
- joystick_io - Joystick I/O port
-
- The driver requires firmware files "turtlebeach/msndinit.bin" and
- "turtlebeach/msndperm.bin" in the proper firmware directory.
-
- See Documentation/sound/oss/MultiSound for important information
- about this driver. Note that it has been discontinued, but the
- Voyetra Turtle Beach knowledge base entry for it is still available
- at
- http://www.turtlebeach.com
-
- Module snd-msnd-pinnacle
- ------------------------
-
- Module for Turtle Beach MultiSound Pinnacle/Fiji soundcards.
-
- io - Port # for pinnacle/fiji card
- irq - IRQ # for pinnalce/fiji card
- mem - Memory address (0xb0000, 0xc8000, 0xd0000, 0xd8000,
- 0xe0000 or 0xe8000)
- write_ndelay - enable write ndelay (default = 1)
- calibrate_signal - calibrate signal (default = 0)
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- The driver requires firmware files "turtlebeach/pndspini.bin" and
- "turtlebeach/pndsperm.bin" in the proper firmware directory.
-
- Module snd-mtpav
- ----------------
-
- Module for MOTU MidiTimePiece AV multiport MIDI (on the parallel
- port).
-
- port - I/O port # for MTPAV (0x378,0x278, default=0x378)
- irq - IRQ # for MTPAV (7,5, default=7)
- hwports - number of supported hardware ports, default=8.
-
- Module supports only 1 card. This module has no enable option.
-
- Module snd-mts64
- ----------------
-
- Module for Ego Systems (ESI) Miditerminal 4140
-
- This module supports multiple devices.
- Requires parport (CONFIG_PARPORT).
-
- Module snd-nm256
- ----------------
-
- Module for NeoMagic NM256AV/ZX chips
-
- playback_bufsize - max playback frame size in kB (4-128kB)
- capture_bufsize - max capture frame size in kB (4-128kB)
- force_ac97 - 0 or 1 (disabled by default)
- buffer_top - specify buffer top address
- use_cache - 0 or 1 (disabled by default)
- vaio_hack - alias buffer_top=0x25a800
- reset_workaround - enable AC97 RESET workaround for some laptops
- reset_workaround2 - enable extended AC97 RESET workaround for some
- other laptops
-
- This module supports one chip and autoprobe.
-
- The power-management is supported.
-
- Note: on some notebooks the buffer address cannot be detected
- automatically, or causes hang-up during initialization.
- In such a case, specify the buffer top address explicitly via
- the buffer_top option.
- For example,
- Sony F250: buffer_top=0x25a800
- Sony F270: buffer_top=0x272800
- The driver supports only ac97 codec. It's possible to force
- to initialize/use ac97 although it's not detected. In such a
- case, use force_ac97=1 option - but *NO* guarantee whether it
- works!
-
- Note: The NM256 chip can be linked internally with non-AC97
- codecs. This driver supports only the AC97 codec, and won't work
- with machines with other (most likely CS423x or OPL3SAx) chips,
- even though the device is detected in lspci. In such a case, try
- other drivers, e.g. snd-cs4232 or snd-opl3sa2. Some has ISA-PnP
- but some doesn't have ISA PnP. You'll need to specify isapnp=0
- and proper hardware parameters in the case without ISA PnP.
-
- Note: some laptops need a workaround for AC97 RESET. For the
- known hardware like Dell Latitude LS and Sony PCG-F305, this
- workaround is enabled automatically. For other laptops with a
- hard freeze, you can try reset_workaround=1 option.
-
- Note: Dell Latitude CSx laptops have another problem regarding
- AC97 RESET. On these laptops, reset_workaround2 option is
- turned on as default. This option is worth to try if the
- previous reset_workaround option doesn't help.
-
- Note: This driver is really crappy. It's a porting from the
- OSS driver, which is a result of black-magic reverse engineering.
- The detection of codec will fail if the driver is loaded *after*
- X-server as described above. You might be able to force to load
- the module, but it may result in hang-up. Hence, make sure that
- you load this module *before* X if you encounter this kind of
- problem.
-
- Module snd-opl3sa2
- ------------------
-
- Module for Yamaha OPL3-SA2/SA3 sound cards.
-
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- port - control port # for OPL3-SA chip (0x370)
- sb_port - SB port # for OPL3-SA chip (0x220,0x240)
- wss_port - WSS port # for OPL3-SA chip (0x530,0xe80,0xf40,0x604)
- midi_port - port # for MPU-401 UART (0x300,0x330), -1 = disable
- fm_port - FM port # for OPL3-SA chip (0x388), -1 = disable
- irq - IRQ # for OPL3-SA chip (5,7,9,10)
- dma1 - first DMA # for Yamaha OPL3-SA chip (0,1,3)
- dma2 - second DMA # for Yamaha OPL3-SA chip (0,1,3), -1 = disable
-
- This module supports multiple cards and ISA PnP. It does not support
- autoprobe (if ISA PnP is not used) thus all ports must be specified!!!
-
- The power-management is supported.
-
- Module snd-opti92x-ad1848
- -------------------------
-
- Module for sound cards based on OPTi 82c92x and Analog Devices AD1848 chips.
- Module works with OAK Mozart cards as well.
-
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- port - port # for WSS chip (0x530,0xe80,0xf40,0x604)
- mpu_port - port # for MPU-401 UART (0x300,0x310,0x320,0x330)
- fm_port - port # for OPL3 device (0x388)
- irq - IRQ # for WSS chip (5,7,9,10,11)
- mpu_irq - IRQ # for MPU-401 UART (5,7,9,10)
- dma1 - first DMA # for WSS chip (0,1,3)
-
- This module supports only one card, autoprobe and PnP.
-
- Module snd-opti92x-cs4231
- -------------------------
-
- Module for sound cards based on OPTi 82c92x and Crystal CS4231 chips.
-
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- port - port # for WSS chip (0x530,0xe80,0xf40,0x604)
- mpu_port - port # for MPU-401 UART (0x300,0x310,0x320,0x330)
- fm_port - port # for OPL3 device (0x388)
- irq - IRQ # for WSS chip (5,7,9,10,11)
- mpu_irq - IRQ # for MPU-401 UART (5,7,9,10)
- dma1 - first DMA # for WSS chip (0,1,3)
- dma2 - second DMA # for WSS chip (0,1,3)
-
- This module supports only one card, autoprobe and PnP.
-
- Module snd-opti93x
- ------------------
-
- Module for sound cards based on OPTi 82c93x chips.
-
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- port - port # for WSS chip (0x530,0xe80,0xf40,0x604)
- mpu_port - port # for MPU-401 UART (0x300,0x310,0x320,0x330)
- fm_port - port # for OPL3 device (0x388)
- irq - IRQ # for WSS chip (5,7,9,10,11)
- mpu_irq - IRQ # for MPU-401 UART (5,7,9,10)
- dma1 - first DMA # for WSS chip (0,1,3)
- dma2 - second DMA # for WSS chip (0,1,3)
-
- This module supports only one card, autoprobe and PnP.
-
- Module snd-oxygen
- -----------------
-
- Module for sound cards based on the C-Media CMI8786/8787/8788 chip:
- * Asound A-8788
- * Asus Xonar DG/DGX
- * AuzenTech X-Meridian
- * AuzenTech X-Meridian 2G
- * Bgears b-Enspirer
- * Club3D Theatron DTS
- * HT-Omega Claro (plus)
- * HT-Omega Claro halo (XT)
- * Kuroutoshikou CMI8787-HG2PCI
- * Razer Barracuda AC-1
- * Sondigo Inferno
- * TempoTec HiFier Fantasia
- * TempoTec HiFier Serenade
-
- This module supports autoprobe and multiple cards.
-
- Module snd-pcsp
- -----------------
-
- Module for internal PC-Speaker.
-
- nopcm - Disable PC-Speaker PCM sound. Only beeps remain.
- nforce_wa - enable NForce chipset workaround. Expect bad sound.
-
- This module supports system beeps, some kind of PCM playback and
- even a few mixer controls.
-
- Module snd-pcxhr
- ----------------
-
- Module for Digigram PCXHR boards
-
- This module supports multiple cards.
-
- Module snd-portman2x4
- ---------------------
-
- Module for Midiman Portman 2x4 parallel port MIDI interface
-
- This module supports multiple cards.
-
- Module snd-powermac (on ppc only)
- ---------------------------------
-
- Module for PowerMac, iMac and iBook on-board soundchips
-
- enable_beep - enable beep using PCM (enabled as default)
-
- Module supports autoprobe a chip.
-
- Note: the driver may have problems regarding endianness.
-
- The power-management is supported.
-
- Module snd-pxa2xx-ac97 (on arm only)
- ------------------------------------
-
- Module for AC97 driver for the Intel PXA2xx chip
-
- For ARM architecture only.
-
- The power-management is supported.
-
- Module snd-riptide
- ------------------
-
- Module for Conexant Riptide chip
-
- joystick_port - Joystick port # (default: 0x200)
- mpu_port - MPU401 port # (default: 0x330)
- opl3_port - OPL3 port # (default: 0x388)
-
- This module supports multiple cards.
- The driver requires the firmware loader support on kernel.
- You need to install the firmware file "riptide.hex" to the standard
- firmware path (e.g. /lib/firmware).
-
- Module snd-rme32
- ----------------
-
- Module for RME Digi32, Digi32 Pro and Digi32/8 (Sek'd Prodif32,
- Prodif96 and Prodif Gold) sound cards.
-
- This module supports multiple cards.
-
- Module snd-rme96
- ----------------
-
- Module for RME Digi96, Digi96/8 and Digi96/8 PRO/PAD/PST sound cards.
-
- This module supports multiple cards.
-
- Module snd-rme9652
- ------------------
-
- Module for RME Digi9652 (Hammerfall, Hammerfall-Light) sound cards.
-
- precise_ptr - Enable precise pointer (doesn't work reliably).
- (default = 0)
-
- This module supports multiple cards.
-
- Note: snd-page-alloc module does the job which snd-hammerfall-mem
- module did formerly. It will allocate the buffers in advance
- when any RME9652 cards are found. To make the buffer
- allocation sure, load snd-page-alloc module in the early
- stage of boot sequence. See "Early Buffer Allocation"
- section.
-
- Module snd-sa11xx-uda1341 (on arm only)
- ---------------------------------------
-
- Module for Philips UDA1341TS on Compaq iPAQ H3600 sound card.
-
- Module supports only one card.
- Module has no enable and index options.
-
- The power-management is supported.
-
- Module snd-sb8
- --------------
-
- Module for 8-bit SoundBlaster cards: SoundBlaster 1.0,
- SoundBlaster 2.0,
- SoundBlaster Pro
-
- port - port # for SB DSP chip (0x220,0x240,0x260)
- irq - IRQ # for SB DSP chip (5,7,9,10)
- dma8 - DMA # for SB DSP chip (1,3)
-
- This module supports multiple cards and autoprobe.
-
- The power-management is supported.
-
- Module snd-sb16 and snd-sbawe
- -----------------------------
-
- Module for 16-bit SoundBlaster cards: SoundBlaster 16 (PnP),
- SoundBlaster AWE 32 (PnP),
- SoundBlaster AWE 64 PnP
-
- mic_agc - Mic Auto-Gain-Control - 0 = disable, 1 = enable (default)
- csp - ASP/CSP chip support - 0 = disable (default), 1 = enable
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- port - port # for SB DSP 4.x chip (0x220,0x240,0x260)
- mpu_port - port # for MPU-401 UART (0x300,0x330), -1 = disable
- awe_port - base port # for EMU8000 synthesizer (0x620,0x640,0x660)
- (snd-sbawe module only)
- irq - IRQ # for SB DSP 4.x chip (5,7,9,10)
- dma8 - 8-bit DMA # for SB DSP 4.x chip (0,1,3)
- dma16 - 16-bit DMA # for SB DSP 4.x chip (5,6,7)
-
- This module supports multiple cards, autoprobe and ISA PnP.
-
- Note: To use Vibra16X cards in 16-bit half duplex mode, you must
- disable 16bit DMA with dma16 = -1 module parameter.
- Also, all Sound Blaster 16 type cards can operate in 16-bit
- half duplex mode through 8-bit DMA channel by disabling their
- 16-bit DMA channel.
-
- The power-management is supported.
-
- Module snd-sc6000
- -----------------
-
- Module for Gallant SC-6000 soundcard and later models: SC-6600
- and SC-7000.
-
- port - Port # (0x220 or 0x240)
- mss_port - MSS Port # (0x530 or 0xe80)
- irq - IRQ # (5,7,9,10,11)
- mpu_irq - MPU-401 IRQ # (5,7,9,10) ,0 - no MPU-401 irq
- dma - DMA # (1,3,0)
- joystick - Enable gameport - 0 = disable (default), 1 = enable
-
- This module supports multiple cards.
-
- This card is also known as Audio Excel DSP 16 or Zoltrix AV302.
-
- Module snd-sscape
- -----------------
-
- Module for ENSONIQ SoundScape cards.
-
- port - Port # (PnP setup)
- wss_port - WSS Port # (PnP setup)
- irq - IRQ # (PnP setup)
- mpu_irq - MPU-401 IRQ # (PnP setup)
- dma - DMA # (PnP setup)
- dma2 - 2nd DMA # (PnP setup, -1 to disable)
- joystick - Enable gameport - 0 = disable (default), 1 = enable
-
- This module supports multiple cards.
-
- The driver requires the firmware loader support on kernel.
-
- Module snd-sun-amd7930 (on sparc only)
- --------------------------------------
-
- Module for AMD7930 sound chips found on Sparcs.
-
- This module supports multiple cards.
-
- Module snd-sun-cs4231 (on sparc only)
- -------------------------------------
-
- Module for CS4231 sound chips found on Sparcs.
-
- This module supports multiple cards.
-
- Module snd-sun-dbri (on sparc only)
- -----------------------------------
-
- Module for DBRI sound chips found on Sparcs.
-
- This module supports multiple cards.
-
- Module snd-wavefront
- --------------------
-
- Module for Turtle Beach Maui, Tropez and Tropez+ sound cards.
-
- use_cs4232_midi - Use CS4232 MPU-401 interface
- (inaccessibly located inside your computer)
- isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
-
- with isapnp=0, the following options are available:
-
- cs4232_pcm_port - Port # for CS4232 PCM interface.
- cs4232_pcm_irq - IRQ # for CS4232 PCM interface (5,7,9,11,12,15).
- cs4232_mpu_port - Port # for CS4232 MPU-401 interface.
- cs4232_mpu_irq - IRQ # for CS4232 MPU-401 interface (9,11,12,15).
- ics2115_port - Port # for ICS2115
- ics2115_irq - IRQ # for ICS2115
- fm_port - FM OPL-3 Port #
- dma1 - DMA1 # for CS4232 PCM interface.
- dma2 - DMA2 # for CS4232 PCM interface.
-
- The below are options for wavefront_synth features:
- wf_raw - Assume that we need to boot the OS (default:no)
- If yes, then during driver loading, the state of the board is
- ignored, and we reset the board and load the firmware anyway.
- fx_raw - Assume that the FX process needs help (default:yes)
- If false, we'll leave the FX processor in whatever state it is
- when the driver is loaded. The default is to download the
- microprogram and associated coefficients to set it up for
- "default" operation, whatever that means.
- debug_default - Debug parameters for card initialization
- wait_usecs - How long to wait without sleeping, usecs
- (default:150)
- This magic number seems to give pretty optimal throughput
- based on my limited experimentation.
- If you want to play around with it and find a better value, be
- my guest. Remember, the idea is to get a number that causes us
- to just busy wait for as many WaveFront commands as possible,
- without coming up with a number so large that we hog the whole
- CPU.
- Specifically, with this number, out of about 134,000 status
- waits, only about 250 result in a sleep.
- sleep_interval - How long to sleep when waiting for reply
- (default: 100)
- sleep_tries - How many times to try sleeping during a wait
- (default: 50)
- ospath - Pathname to processed ICS2115 OS firmware
- (default:wavefront.os)
- The path name of the ISC2115 OS firmware. In the recent
- version, it's handled via firmware loader framework, so it
- must be installed in the proper path, typically,
- /lib/firmware.
- reset_time - How long to wait for a reset to take effect
- (default:2)
- ramcheck_time - How many seconds to wait for the RAM test
- (default:20)
- osrun_time - How many seconds to wait for the ICS2115 OS
- (default:10)
-
- This module supports multiple cards and ISA PnP.
-
- Note: the firmware file "wavefront.os" was located in the earlier
- version in /etc. Now it's loaded via firmware loader, and
- must be in the proper firmware path, such as /lib/firmware.
- Copy (or symlink) the file appropriately if you get an error
- regarding firmware downloading after upgrading the kernel.
-
- Module snd-sonicvibes
- ---------------------
-
- Module for S3 SonicVibes PCI sound cards.
- * PINE Schubert 32 PCI
-
- reverb - Reverb Enable - 1 = enable, 0 = disable (default)
- - SoundCard must have onboard SRAM for this.
- mge - Mic Gain Enable - 1 = enable, 0 = disable (default)
-
- This module supports multiple cards and autoprobe.
-
- Module snd-serial-u16550
- ------------------------
-
- Module for UART16550A serial MIDI ports.
-
- port - port # for UART16550A chip
- irq - IRQ # for UART16550A chip, -1 = poll mode
- speed - speed in bauds (9600,19200,38400,57600,115200)
- 38400 = default
- base - base for divisor in bauds (57600,115200,230400,460800)
- 115200 = default
- outs - number of MIDI ports in a serial port (1-4)
- 1 = default
- adaptor - Type of adaptor.
- 0 = Soundcanvas, 1 = MS-124T, 2 = MS-124W S/A,
- 3 = MS-124W M/B, 4 = Generic
-
- This module supports multiple cards. This module does not support autoprobe
- thus the main port must be specified!!! Other options are optional.
-
- Module snd-trident
- ------------------
-
- Module for Trident 4DWave DX/NX sound cards.
- * Best Union Miss Melody 4DWave PCI
- * HIS 4DWave PCI
- * Warpspeed ONSpeed 4DWave PCI
- * AzTech PCI 64-Q3D
- * Addonics SV 750
- * CHIC True Sound 4Dwave
- * Shark Predator4D-PCI
- * Jaton SonicWave 4D
- * SiS SI7018 PCI Audio
- * Hoontech SoundTrack Digital 4DWave NX
-
- pcm_channels - max channels (voices) reserved for PCM
- wavetable_size - max wavetable size in kB (4-?kb)
-
- This module supports multiple cards and autoprobe.
-
- The power-management is supported.
-
- Module snd-ua101
- ----------------
-
- Module for the Edirol UA-101/UA-1000 audio/MIDI interfaces.
-
- This module supports multiple devices, autoprobe and hotplugging.
-
- Module snd-usb-audio
- --------------------
-
- Module for USB audio and USB MIDI devices.
-
- vid - Vendor ID for the device (optional)
- pid - Product ID for the device (optional)
- nrpacks - Max. number of packets per URB (default: 8)
- device_setup - Device specific magic number (optional)
- - Influence depends on the device
- - Default: 0x0000
- ignore_ctl_error - Ignore any USB-controller regarding mixer
- interface (default: no)
- autoclock - Enable auto-clock selection for UAC2 devices
- (default: yes)
- quirk_alias - Quirk alias list, pass strings like
- "0123abcd:5678beef", which applies the existing
- quirk for the device 5678:beef to a new device
- 0123:abcd.
-
- This module supports multiple devices, autoprobe and hotplugging.
-
- NB: nrpacks parameter can be modified dynamically via sysfs.
- Don't put the value over 20. Changing via sysfs has no sanity
- check.
- NB: ignore_ctl_error=1 may help when you get an error at accessing
- the mixer element such as URB error -22. This happens on some
- buggy USB device or the controller.
- NB: quirk_alias option is provided only for testing / development.
- If you want to have a proper support, contact to upstream for
- adding the matching quirk in the driver code statically.
-
- Module snd-usb-caiaq
- --------------------
-
- Module for caiaq UB audio interfaces,
- * Native Instruments RigKontrol2
- * Native Instruments Kore Controller
- * Native Instruments Audio Kontrol 1
- * Native Instruments Audio 8 DJ
-
- This module supports multiple devices, autoprobe and hotplugging.
-
- Module snd-usb-usx2y
- --------------------
-
- Module for Tascam USB US-122, US-224 and US-428 devices.
-
- This module supports multiple devices, autoprobe and hotplugging.
-
- Note: you need to load the firmware via usx2yloader utility included
- in alsa-tools and alsa-firmware packages.
-
- Module snd-via82xx
- ------------------
-
- Module for AC'97 motherboards based on VIA 82C686A/686B, 8233,
- 8233A, 8233C, 8235, 8237 (south) bridge.
-
- mpu_port - 0x300,0x310,0x320,0x330, otherwise obtain BIOS setup
- [VIA686A/686B only]
- joystick - Enable joystick (default off) [VIA686A/686B only]
- ac97_clock - AC'97 codec clock base (default 48000Hz)
- dxs_support - support DXS channels,
- 0 = auto (default), 1 = enable, 2 = disable,
- 3 = 48k only, 4 = no VRA, 5 = enable any sample
- rate and different sample rates on different
- channels
- [VIA8233/C, 8235, 8237 only]
- ac97_quirk - AC'97 workaround for strange hardware
- See "AC97 Quirk Option" section below.
-
- This module supports one chip and autoprobe.
-
- Note: on some SMP motherboards like MSI 694D the interrupts might
- not be generated properly. In such a case, please try to
- set the SMP (or MPS) version on BIOS to 1.1 instead of
- default value 1.4. Then the interrupt number will be
- assigned under 15. You might also upgrade your BIOS.
-
- Note: VIA8233/5/7 (not VIA8233A) can support DXS (direct sound)
- channels as the first PCM. On these channels, up to 4
- streams can be played at the same time, and the controller
- can perform sample rate conversion with separate rates for
- each channel.
- As default (dxs_support = 0), 48k fixed rate is chosen
- except for the known devices since the output is often
- noisy except for 48k on some mother boards due to the
- bug of BIOS.
- Please try once dxs_support=5 and if it works on other
- sample rates (e.g. 44.1kHz of mp3 playback), please let us
- know the PCI subsystem vendor/device id's (output of
- "lspci -nv").
- If dxs_support=5 does not work, try dxs_support=4; if it
- doesn't work too, try dxs_support=1. (dxs_support=1 is
- usually for old motherboards. The correct implemented
- board should work with 4 or 5.) If it still doesn't
- work and the default setting is ok, dxs_support=3 is the
- right choice. If the default setting doesn't work at all,
- try dxs_support=2 to disable the DXS channels.
- In any cases, please let us know the result and the
- subsystem vendor/device ids. See "Links and Addresses"
- below.
-
- Note: for the MPU401 on VIA823x, use snd-mpu401 driver
- additionally. The mpu_port option is for VIA686 chips only.
-
- The power-management is supported.
-
- Module snd-via82xx-modem
- ------------------------
-
- Module for VIA82xx AC97 modem
-
- ac97_clock - AC'97 codec clock base (default 48000Hz)
-
- This module supports one card and autoprobe.
-
- Note: The default index value of this module is -2, i.e. the first
- slot is excluded.
-
- The power-management is supported.
-
- Module snd-virmidi
- ------------------
-
- Module for virtual rawmidi devices.
- This module creates virtual rawmidi devices which communicate
- to the corresponding ALSA sequencer ports.
-
- midi_devs - MIDI devices # (1-4, default=4)
-
- This module supports multiple cards.
-
- Module snd-virtuoso
- -------------------
-
- Module for sound cards based on the Asus AV66/AV100/AV200 chips,
- i.e., Xonar D1, DX, D2, D2X, DS, DSX, Essence ST (Deluxe),
- Essence STX (II), HDAV1.3 (Deluxe), and HDAV1.3 Slim.
-
- This module supports autoprobe and multiple cards.
-
- Module snd-vx222
- ----------------
-
- Module for Digigram VX-Pocket VX222, V222 v2 and Mic cards.
-
- mic - Enable Microphone on V222 Mic (NYI)
- ibl - Capture IBL size. (default = 0, minimum size)
-
- This module supports multiple cards.
-
- When the driver is compiled as a module and the hotplug firmware
- is supported, the firmware data is loaded via hotplug automatically.
- Install the necessary firmware files in alsa-firmware package.
- When no hotplug fw loader is available, you need to load the
- firmware via vxloader utility in alsa-tools package. To invoke
- vxloader automatically, add the following to /etc/modprobe.d/alsa.conf
-
- install snd-vx222 /sbin/modprobe --first-time -i snd-vx222 && /usr/bin/vxloader
-
- (for 2.2/2.4 kernels, add "post-install /usr/bin/vxloader" to
- /etc/modules.conf, instead.)
- IBL size defines the interrupts period for PCM. The smaller size
- gives smaller latency but leads to more CPU consumption, too.
- The size is usually aligned to 126. As default (=0), the smallest
- size is chosen. The possible IBL values can be found in
- /proc/asound/cardX/vx-status proc file.
-
- The power-management is supported.
-
- Module snd-vxpocket
- -------------------
-
- Module for Digigram VX-Pocket VX2 and 440 PCMCIA cards.
-
- ibl - Capture IBL size. (default = 0, minimum size)
-
- This module supports multiple cards. The module is compiled only when
- PCMCIA is supported on kernel.
-
- With the older 2.6.x kernel, to activate the driver via the card
- manager, you'll need to set up /etc/pcmcia/vxpocket.conf. See the
- sound/pcmcia/vx/vxpocket.c. 2.6.13 or later kernel requires no
- longer require a config file.
-
- When the driver is compiled as a module and the hotplug firmware
- is supported, the firmware data is loaded via hotplug automatically.
- Install the necessary firmware files in alsa-firmware package.
- When no hotplug fw loader is available, you need to load the
- firmware via vxloader utility in alsa-tools package.
-
- About capture IBL, see the description of snd-vx222 module.
-
- Note: snd-vxp440 driver is merged to snd-vxpocket driver since
- ALSA 1.0.10.
-
- The power-management is supported.
-
- Module snd-ymfpci
- -----------------
-
- Module for Yamaha PCI chips (YMF72x, YMF74x & YMF75x).
-
- mpu_port - 0x300,0x330,0x332,0x334, 0 (disable) by default,
- 1 (auto-detect for YMF744/754 only)
- fm_port - 0x388,0x398,0x3a0,0x3a8, 0 (disable) by default
- 1 (auto-detect for YMF744/754 only)
- joystick_port - 0x201,0x202,0x204,0x205, 0 (disable) by default,
- 1 (auto-detect)
- rear_switch - enable shared rear/line-in switch (bool)
-
- This module supports autoprobe and multiple chips.
-
- The power-management is supported.
-
- Module snd-pdaudiocf
- --------------------
-
- Module for Sound Core PDAudioCF sound card.
-
- The power-management is supported.
-
-
-AC97 Quirk Option
-=================
-
-The ac97_quirk option is used to enable/override the workaround for
-specific devices on drivers for on-board AC'97 controllers like
-snd-intel8x0. Some hardware have swapped output pins between Master
-and Headphone, or Surround (thanks to confusion of AC'97
-specifications from version to version :-)
-
-The driver provides the auto-detection of known problematic devices,
-but some might be unknown or wrongly detected. In such a case, pass
-the proper value with this option.
-
-The following strings are accepted:
- - default Don't override the default setting
- - none Disable the quirk
- - hp_only Bind Master and Headphone controls as a single control
- - swap_hp Swap headphone and master controls
- - swap_surround Swap master and surround controls
- - ad_sharing For AD1985, turn on OMS bit and use headphone
- - alc_jack For ALC65x, turn on the jack sense mode
- - inv_eapd Inverted EAPD implementation
- - mute_led Bind EAPD bit for turning on/off mute LED
-
-For backward compatibility, the corresponding integer value -1, 0,
-... are accepted, too.
-
-For example, if "Master" volume control has no effect on your device
-but only "Headphone" does, pass ac97_quirk=hp_only module option.
-
-
-Configuring Non-ISAPNP Cards
-============================
-
-When the kernel is configured with ISA-PnP support, the modules
-supporting the isapnp cards will have module options "isapnp".
-If this option is set, *only* the ISA-PnP devices will be probed.
-For probing the non ISA-PnP cards, you have to pass "isapnp=0" option
-together with the proper i/o and irq configuration.
-
-When the kernel is configured without ISA-PnP support, isapnp option
-will be not built in.
-
-
-Module Autoloading Support
-==========================
-
-The ALSA drivers can be loaded automatically on demand by defining
-module aliases. The string 'snd-card-%1' is requested for ALSA native
-devices where %i is sound card number from zero to seven.
-
-To auto-load an ALSA driver for OSS services, define the string
-'sound-slot-%i' where %i means the slot number for OSS, which
-corresponds to the card index of ALSA. Usually, define this
-as the same card module.
-
-An example configuration for a single emu10k1 card is like below:
------ /etc/modprobe.d/alsa.conf
-alias snd-card-0 snd-emu10k1
-alias sound-slot-0 snd-emu10k1
------ /etc/modprobe.d/alsa.conf
-
-The available number of auto-loaded sound cards depends on the module
-option "cards_limit" of snd module. As default it's set to 1.
-To enable the auto-loading of multiple cards, specify the number of
-sound cards in that option.
-
-When multiple cards are available, it'd better to specify the index
-number for each card via module option, too, so that the order of
-cards is kept consistent.
-
-An example configuration for two sound cards is like below:
-
------ /etc/modprobe.d/alsa.conf
-# ALSA portion
-options snd cards_limit=2
-alias snd-card-0 snd-interwave
-alias snd-card-1 snd-ens1371
-options snd-interwave index=0
-options snd-ens1371 index=1
-# OSS/Free portion
-alias sound-slot-0 snd-interwave
-alias sound-slot-1 snd-ens1371
------ /etc/modprobe.d/alsa.conf
-
-In this example, the interwave card is always loaded as the first card
-(index 0) and ens1371 as the second (index 1).
-
-Alternative (and new) way to fixate the slot assignment is to use
-"slots" option of snd module. In the case above, specify like the
-following:
-
-options snd slots=snd-interwave,snd-ens1371
-
-Then, the first slot (#0) is reserved for snd-interwave driver, and
-the second (#1) for snd-ens1371. You can omit index option in each
-driver if slots option is used (although you can still have them at
-the same time as long as they don't conflict).
-
-The slots option is especially useful for avoiding the possible
-hot-plugging and the resultant slot conflict. For example, in the
-case above again, the first two slots are already reserved. If any
-other driver (e.g. snd-usb-audio) is loaded before snd-interwave or
-snd-ens1371, it will be assigned to the third or later slot.
-
-When a module name is given with '!', the slot will be given for any
-modules but that name. For example, "slots=!snd-pcsp" will reserve
-the first slot for any modules but snd-pcsp.
-
-
-ALSA PCM devices to OSS devices mapping
-=======================================
-
-/dev/snd/pcmC0D0[c|p] -> /dev/audio0 (/dev/audio) -> minor 4
-/dev/snd/pcmC0D0[c|p] -> /dev/dsp0 (/dev/dsp) -> minor 3
-/dev/snd/pcmC0D1[c|p] -> /dev/adsp0 (/dev/adsp) -> minor 12
-/dev/snd/pcmC1D0[c|p] -> /dev/audio1 -> minor 4+16 = 20
-/dev/snd/pcmC1D0[c|p] -> /dev/dsp1 -> minor 3+16 = 19
-/dev/snd/pcmC1D1[c|p] -> /dev/adsp1 -> minor 12+16 = 28
-/dev/snd/pcmC2D0[c|p] -> /dev/audio2 -> minor 4+32 = 36
-/dev/snd/pcmC2D0[c|p] -> /dev/dsp2 -> minor 3+32 = 39
-/dev/snd/pcmC2D1[c|p] -> /dev/adsp2 -> minor 12+32 = 44
-
-The first number from /dev/snd/pcmC{X}D{Y}[c|p] expression means
-sound card number and second means device number. The ALSA devices
-have either 'c' or 'p' suffix indicating the direction, capture and
-playback, respectively.
-
-Please note that the device mapping above may be varied via the module
-options of snd-pcm-oss module.
-
-
-Proc interfaces (/proc/asound)
-==============================
-
-/proc/asound/card#/pcm#[cp]/oss
--------------------------------
- String "erase" - erase all additional information about OSS applications
- String "<app_name> <fragments> <fragment_size> [<options>]"
-
- <app_name> - name of application with (higher priority) or without path
- <fragments> - number of fragments or zero if auto
- <fragment_size> - size of fragment in bytes or zero if auto
- <options> - optional parameters
- - disable the application tries to open a pcm device for
- this channel but does not want to use it.
- (Cause a bug or mmap needs)
- It's good for Quake etc...
- - direct don't use plugins
- - block force block mode (rvplayer)
- - non-block force non-block mode
- - whole-frag write only whole fragments (optimization affecting
- playback only)
- - no-silence do not fill silence ahead to avoid clicks
- - buggy-ptr Returns the whitespace blocks in GETOPTR ioctl
- instead of filled blocks
-
- Example: echo "x11amp 128 16384" > /proc/asound/card0/pcm0p/oss
- echo "squake 0 0 disable" > /proc/asound/card0/pcm0c/oss
- echo "rvplayer 0 0 block" > /proc/asound/card0/pcm0p/oss
-
-
-Early Buffer Allocation
-=======================
-
-Some drivers (e.g. hdsp) require the large contiguous buffers, and
-sometimes it's too late to find such spaces when the driver module is
-actually loaded due to memory fragmentation. You can pre-allocate the
-PCM buffers by loading snd-page-alloc module and write commands to its
-proc file in prior, for example, in the early boot stage like
-/etc/init.d/*.local scripts.
-
-Reading the proc file /proc/drivers/snd-page-alloc shows the current
-usage of page allocation. In writing, you can send the following
-commands to the snd-page-alloc driver:
-
- - add VENDOR DEVICE MASK SIZE BUFFERS
-
- VENDOR and DEVICE are PCI vendor and device IDs. They take
- integer numbers (0x prefix is needed for the hex).
- MASK is the PCI DMA mask. Pass 0 if not restricted.
- SIZE is the size of each buffer to allocate. You can pass
- k and m suffix for KB and MB. The max number is 16MB.
- BUFFERS is the number of buffers to allocate. It must be greater
- than 0. The max number is 4.
-
- - erase
-
- This will erase the all pre-allocated buffers which are not in
- use.
-
-
-Links and Addresses
-===================
-
- ALSA project homepage
- http://www.alsa-project.org
-
- Kernel Bugzilla
- http://bugzilla.kernel.org/
-
- ALSA Developers ML
- mailto:alsa-devel@alsa-project.org
-
- alsa-info.sh script
- http://www.alsa-project.org/alsa-info.sh
diff --git a/Documentation/sound/alsa/ControlNames.txt b/Documentation/sound/alsa/ControlNames.txt
deleted file mode 100644
index 3fc1cf50d28e..000000000000
--- a/Documentation/sound/alsa/ControlNames.txt
+++ /dev/null
@@ -1,107 +0,0 @@
-This document describes standard names of mixer controls.
-
-Syntax: [LOCATION] SOURCE [CHANNEL] [DIRECTION] FUNCTION
-
-DIRECTION:
- <nothing> (both directions)
- Playback
- Capture
- Bypass Playback
- Bypass Capture
-
-FUNCTION:
- Switch (on/off switch)
- Volume
- Route (route control, hardware specific)
-
-CHANNEL:
- <nothing> (channel independent, or applies to all channels)
- Front
- Surround (rear left/right in 4.0/5.1 surround)
- CLFE
- Center
- LFE
- Side (side left/right for 7.1 surround)
-
-LOCATION: (physical location of source)
- Front
- Rear
- Dock (docking station)
- Internal
-
-SOURCE:
- Master
- Master Mono
- Hardware Master
- Speaker (internal speaker)
- Bass Speaker (internal LFE speaker)
- Headphone
- Line Out
- Beep (beep generator)
- Phone
- Phone Input
- Phone Output
- Synth
- FM
- Mic
- Headset Mic (mic part of combined headset jack - 4-pin headphone + mic)
- Headphone Mic (mic part of either/or - 3-pin headphone or mic)
- Line (input only, use "Line Out" for output)
- CD
- Video
- Zoom Video
- Aux
- PCM
- PCM Pan
- Loopback
- Analog Loopback (D/A -> A/D loopback)
- Digital Loopback (playback -> capture loopback - without analog path)
- Mono
- Mono Output
- Multi
- ADC
- Wave
- Music
- I2S
- IEC958
- HDMI
- SPDIF (output only)
- SPDIF In
- Digital In
- HDMI/DP (either HDMI or DisplayPort)
-
-Exceptions (deprecated):
- [Analogue|Digital] Capture Source
- [Analogue|Digital] Capture Switch (aka input gain switch)
- [Analogue|Digital] Capture Volume (aka input gain volume)
- [Analogue|Digital] Playback Switch (aka output gain switch)
- [Analogue|Digital] Playback Volume (aka output gain volume)
- Tone Control - Switch
- Tone Control - Bass
- Tone Control - Treble
- 3D Control - Switch
- 3D Control - Center
- 3D Control - Depth
- 3D Control - Wide
- 3D Control - Space
- 3D Control - Level
- Mic Boost [(?dB)]
-
-PCM interface:
-
- Sample Clock Source { "Word", "Internal", "AutoSync" }
- Clock Sync Status { "Lock", "Sync", "No Lock" }
- External Rate /* external capture rate */
- Capture Rate /* capture rate taken from external source */
-
-IEC958 (S/PDIF) interface:
-
- IEC958 [...] [Playback|Capture] Switch /* turn on/off the IEC958 interface */
- IEC958 [...] [Playback|Capture] Volume /* digital volume control */
- IEC958 [...] [Playback|Capture] Default /* default or global value - read/write */
- IEC958 [...] [Playback|Capture] Mask /* consumer and professional mask */
- IEC958 [...] [Playback|Capture] Con Mask /* consumer mask */
- IEC958 [...] [Playback|Capture] Pro Mask /* professional mask */
- IEC958 [...] [Playback|Capture] PCM Stream /* the settings assigned to a PCM stream */
- IEC958 Q-subcode [Playback|Capture] Default /* Q-subcode bits */
- IEC958 Preamble [Playback|Capture] Default /* burst preamble words (4*16bits) */
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
deleted file mode 100644
index ec099d4343f2..000000000000
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ /dev/null
@@ -1,324 +0,0 @@
- Model name Description
- ---------- -----------
-ALC880
-======
- 3stack 3-jack in back and a headphone out
- 3stack-digout 3-jack in back, a HP out and a SPDIF out
- 5stack 5-jack in back, 2-jack in front
- 5stack-digout 5-jack in back, 2-jack in front, a SPDIF out
- 6stack 6-jack in back, 2-jack in front
- 6stack-digout 6-jack with a SPDIF out
-
-ALC260
-======
- gpio1 Enable GPIO1
- coef Enable EAPD via COEF table
- fujitsu Quirk for FSC S7020
- fujitsu-jwse Quirk for FSC S7020 with jack modes and HP mic support
-
-ALC262
-======
- inv-dmic Inverted internal mic workaround
-
-ALC267/268
-==========
- inv-dmic Inverted internal mic workaround
- hp-eapd Disable HP EAPD on NID 0x15
-
-ALC22x/23x/25x/269/27x/28x/29x (and vendor-specific ALC3xxx models)
-======
- laptop-amic Laptops with analog-mic input
- laptop-dmic Laptops with digital-mic input
- alc269-dmic Enable ALC269(VA) digital mic workaround
- alc271-dmic Enable ALC271X digital mic workaround
- inv-dmic Inverted internal mic workaround
- headset-mic Indicates a combined headset (headphone+mic) jack
- headset-mode More comprehensive headset support for ALC269 & co
- headset-mode-no-hp-mic Headset mode support without headphone mic
- lenovo-dock Enables docking station I/O for some Lenovos
- hp-gpio-led GPIO LED support on HP laptops
- dell-headset-multi Headset jack, which can also be used as mic-in
- dell-headset-dock Headset jack (without mic-in), and also dock I/O
- alc283-dac-wcaps Fixups for Chromebook with ALC283
- alc283-sense-combo Combo jack sensing on ALC283
- tpt440-dock Pin configs for Lenovo Thinkpad Dock support
-
-ALC66x/67x/892
-==============
- mario Chromebook mario model fixup
- asus-mode1 ASUS
- asus-mode2 ASUS
- asus-mode3 ASUS
- asus-mode4 ASUS
- asus-mode5 ASUS
- asus-mode6 ASUS
- asus-mode7 ASUS
- asus-mode8 ASUS
- inv-dmic Inverted internal mic workaround
- dell-headset-multi Headset jack, which can also be used as mic-in
-
-ALC680
-======
- N/A
-
-ALC88x/898/1150
-======================
- acer-aspire-4930g Acer Aspire 4930G/5930G/6530G/6930G/7730G
- acer-aspire-8930g Acer Aspire 8330G/6935G
- acer-aspire Acer Aspire others
- inv-dmic Inverted internal mic workaround
- no-primary-hp VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC)
-
-ALC861/660
-==========
- N/A
-
-ALC861VD/660VD
-==============
- N/A
-
-CMI9880
-=======
- minimal 3-jack in back
- min_fp 3-jack in back, 2-jack in front
- full 6-jack in back, 2-jack in front
- full_dig 6-jack in back, 2-jack in front, SPDIF I/O
- allout 5-jack in back, 2-jack in front, SPDIF out
- auto auto-config reading BIOS (default)
-
-AD1882 / AD1882A
-================
- 3stack 3-stack mode
- 3stack-automute 3-stack with automute front HP (default)
- 6stack 6-stack mode
-
-AD1884A / AD1883 / AD1984A / AD1984B
-====================================
- desktop 3-stack desktop (default)
- laptop laptop with HP jack sensing
- mobile mobile devices with HP jack sensing
- thinkpad Lenovo Thinkpad X300
- touchsmart HP Touchsmart
-
-AD1884
-======
- N/A
-
-AD1981
-======
- basic 3-jack (default)
- hp HP nx6320
- thinkpad Lenovo Thinkpad T60/X60/Z60
- toshiba Toshiba U205
-
-AD1983
-======
- N/A
-
-AD1984
-======
- basic default configuration
- thinkpad Lenovo Thinkpad T61/X61
- dell_desktop Dell T3400
-
-AD1986A
-=======
- 3stack 3-stack, shared surrounds
- laptop 2-channel only (FSC V2060, Samsung M50)
- laptop-imic 2-channel with built-in mic
- eapd Turn on EAPD constantly
-
-AD1988/AD1988B/AD1989A/AD1989B
-==============================
- 6stack 6-jack
- 6stack-dig ditto with SPDIF
- 3stack 3-jack
- 3stack-dig ditto with SPDIF
- laptop 3-jack with hp-jack automute
- laptop-dig ditto with SPDIF
- auto auto-config reading BIOS (default)
-
-Conexant 5045
-=============
- laptop-hpsense Laptop with HP sense (old model laptop)
- laptop-micsense Laptop with Mic sense (old model fujitsu)
- laptop-hpmicsense Laptop with HP and Mic senses
- benq Benq R55E
- laptop-hp530 HP 530 laptop
- test for testing/debugging purpose, almost all controls
- can be adjusted. Appearing only when compiled with
- $CONFIG_SND_DEBUG=y
-
-Conexant 5047
-=============
- laptop Basic Laptop config
- laptop-hp Laptop config for some HP models (subdevice 30A5)
- laptop-eapd Laptop config with EAPD support
- test for testing/debugging purpose, almost all controls
- can be adjusted. Appearing only when compiled with
- $CONFIG_SND_DEBUG=y
-
-Conexant 5051
-=============
- laptop Basic Laptop config (default)
- hp HP Spartan laptop
- hp-dv6736 HP dv6736
- hp-f700 HP Compaq Presario F700
- ideapad Lenovo IdeaPad laptop
- toshiba Toshiba Satellite M300
-
-Conexant 5066
-=============
- laptop Basic Laptop config (default)
- hp-laptop HP laptops, e g G60
- asus Asus K52JU, Lenovo G560
- dell-laptop Dell laptops
- dell-vostro Dell Vostro
- olpc-xo-1_5 OLPC XO 1.5
- ideapad Lenovo IdeaPad U150
- thinkpad Lenovo Thinkpad
-
-STAC9200
-========
- ref Reference board
- oqo OQO Model 2
- dell-d21 Dell (unknown)
- dell-d22 Dell (unknown)
- dell-d23 Dell (unknown)
- dell-m21 Dell Inspiron 630m, Dell Inspiron 640m
- dell-m22 Dell Latitude D620, Dell Latitude D820
- dell-m23 Dell XPS M1710, Dell Precision M90
- dell-m24 Dell Latitude 120L
- dell-m25 Dell Inspiron E1505n
- dell-m26 Dell Inspiron 1501
- dell-m27 Dell Inspiron E1705/9400
- gateway-m4 Gateway laptops with EAPD control
- gateway-m4-2 Gateway laptops with EAPD control
- panasonic Panasonic CF-74
- auto BIOS setup (default)
-
-STAC9205/9254
-=============
- ref Reference board
- dell-m42 Dell (unknown)
- dell-m43 Dell Precision
- dell-m44 Dell Inspiron
- eapd Keep EAPD on (e.g. Gateway T1616)
- auto BIOS setup (default)
-
-STAC9220/9221
-=============
- ref Reference board
- 3stack D945 3stack
- 5stack D945 5stack + SPDIF
- intel-mac-v1 Intel Mac Type 1
- intel-mac-v2 Intel Mac Type 2
- intel-mac-v3 Intel Mac Type 3
- intel-mac-v4 Intel Mac Type 4
- intel-mac-v5 Intel Mac Type 5
- intel-mac-auto Intel Mac (detect type according to subsystem id)
- macmini Intel Mac Mini (equivalent with type 3)
- macbook Intel Mac Book (eq. type 5)
- macbook-pro-v1 Intel Mac Book Pro 1st generation (eq. type 3)
- macbook-pro Intel Mac Book Pro 2nd generation (eq. type 3)
- imac-intel Intel iMac (eq. type 2)
- imac-intel-20 Intel iMac (newer version) (eq. type 3)
- ecs202 ECS/PC chips
- dell-d81 Dell (unknown)
- dell-d82 Dell (unknown)
- dell-m81 Dell (unknown)
- dell-m82 Dell XPS M1210
- auto BIOS setup (default)
-
-STAC9202/9250/9251
-==================
- ref Reference board, base config
- m1 Some Gateway MX series laptops (NX560XL)
- m1-2 Some Gateway MX series laptops (MX6453)
- m2 Some Gateway MX series laptops (M255)
- m2-2 Some Gateway MX series laptops
- m3 Some Gateway MX series laptops
- m5 Some Gateway MX series laptops (MP6954)
- m6 Some Gateway NX series laptops
- auto BIOS setup (default)
-
-STAC9227/9228/9229/927x
-=======================
- ref Reference board
- ref-no-jd Reference board without HP/Mic jack detection
- 3stack D965 3stack
- 5stack D965 5stack + SPDIF
- 5stack-no-fp D965 5stack without front panel
- dell-3stack Dell Dimension E520
- dell-bios Fixes with Dell BIOS setup
- dell-bios-amic Fixes with Dell BIOS setup including analog mic
- volknob Fixes with volume-knob widget 0x24
- auto BIOS setup (default)
-
-STAC92HD71B*
-============
- ref Reference board
- dell-m4-1 Dell desktops
- dell-m4-2 Dell desktops
- dell-m4-3 Dell desktops
- hp-m4 HP mini 1000
- hp-dv5 HP dv series
- hp-hdx HP HDX series
- hp-dv4-1222nr HP dv4-1222nr (with LED support)
- auto BIOS setup (default)
-
-STAC92HD73*
-===========
- ref Reference board
- no-jd BIOS setup but without jack-detection
- intel Intel DG45* mobos
- dell-m6-amic Dell desktops/laptops with analog mics
- dell-m6-dmic Dell desktops/laptops with digital mics
- dell-m6 Dell desktops/laptops with both type of mics
- dell-eq Dell desktops/laptops
- alienware Alienware M17x
- auto BIOS setup (default)
-
-STAC92HD83*
-===========
- ref Reference board
- mic-ref Reference board with power management for ports
- dell-s14 Dell laptop
- dell-vostro-3500 Dell Vostro 3500 laptop
- hp-dv7-4000 HP dv-7 4000
- hp_cNB11_intquad HP CNB models with 4 speakers
- hp-zephyr HP Zephyr
- hp-led HP with broken BIOS for mute LED
- hp-inv-led HP with broken BIOS for inverted mute LED
- hp-mic-led HP with mic-mute LED
- headset-jack Dell Latitude with a 4-pin headset jack
- hp-envy-bass Pin fixup for HP Envy bass speaker (NID 0x0f)
- hp-envy-ts-bass Pin fixup for HP Envy TS bass speaker (NID 0x10)
- hp-bnb13-eq Hardware equalizer setup for HP laptops
- auto BIOS setup (default)
-
-STAC92HD95
-==========
- hp-led LED support for HP laptops
- hp-bass Bass HPF setup for HP Spectre 13
-
-STAC9872
-========
- vaio VAIO laptop without SPDIF
- auto BIOS setup (default)
-
-Cirrus Logic CS4206/4207
-========================
- mbp55 MacBook Pro 5,5
- imac27 IMac 27 Inch
- auto BIOS setup (default)
-
-Cirrus Logic CS4208
-===================
- mba6 MacBook Air 6,1 and 6,2
- gpio0 Enable GPIO 0 amp
- auto BIOS setup (default)
-
-VIA VT17xx/VT18xx/VT20xx
-========================
- auto BIOS setup (default)
diff --git a/Documentation/sound/alsa/VIA82xx-mixer.txt b/Documentation/sound/alsa/VIA82xx-mixer.txt
deleted file mode 100644
index 1b0ac06ba95d..000000000000
--- a/Documentation/sound/alsa/VIA82xx-mixer.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-
- VIA82xx mixer
- =============
-
-On many VIA82xx boards, the 'Input Source Select' mixer control does not work.
-Setting it to 'Input2' on such boards will cause recording to hang, or fail
-with EIO (input/output error) via OSS emulation. This control should be left
-at 'Input1' for such cards.
diff --git a/Documentation/sound/alsa/alsa-parameters.txt b/Documentation/sound/alsa/alsa-parameters.txt
deleted file mode 100644
index 0fa40679b080..000000000000
--- a/Documentation/sound/alsa/alsa-parameters.txt
+++ /dev/null
@@ -1,135 +0,0 @@
- ALSA Kernel Parameters
- ~~~~~~~~~~~~~~~~~~~~~~
-
-See Documentation/kernel-parameters.txt for general information on
-specifying module parameters.
-
-This document may not be entirely up to date and comprehensive. The command
-"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
-module. Loadable modules, after being loaded into the running kernel, also
-reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
-parameters may be changed at runtime by the command
-"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
-
-
- snd-ad1816a= [HW,ALSA]
-
- snd-ad1848= [HW,ALSA]
-
- snd-ali5451= [HW,ALSA]
-
- snd-als100= [HW,ALSA]
-
- snd-als4000= [HW,ALSA]
-
- snd-azt2320= [HW,ALSA]
-
- snd-cmi8330= [HW,ALSA]
-
- snd-cmipci= [HW,ALSA]
-
- snd-cs4231= [HW,ALSA]
-
- snd-cs4232= [HW,ALSA]
-
- snd-cs4236= [HW,ALSA]
-
- snd-cs4281= [HW,ALSA]
-
- snd-cs46xx= [HW,ALSA]
-
- snd-dt019x= [HW,ALSA]
-
- snd-dummy= [HW,ALSA]
-
- snd-emu10k1= [HW,ALSA]
-
- snd-ens1370= [HW,ALSA]
-
- snd-ens1371= [HW,ALSA]
-
- snd-es968= [HW,ALSA]
-
- snd-es1688= [HW,ALSA]
-
- snd-es18xx= [HW,ALSA]
-
- snd-es1938= [HW,ALSA]
-
- snd-es1968= [HW,ALSA]
-
- snd-fm801= [HW,ALSA]
-
- snd-gusclassic= [HW,ALSA]
-
- snd-gusextreme= [HW,ALSA]
-
- snd-gusmax= [HW,ALSA]
-
- snd-hdsp= [HW,ALSA]
-
- snd-ice1712= [HW,ALSA]
-
- snd-intel8x0= [HW,ALSA]
-
- snd-interwave= [HW,ALSA]
-
- snd-interwave-stb=
- [HW,ALSA]
-
- snd-korg1212= [HW,ALSA]
-
- snd-maestro3= [HW,ALSA]
-
- snd-mpu401= [HW,ALSA]
-
- snd-mtpav= [HW,ALSA]
-
- snd-nm256= [HW,ALSA]
-
- snd-opl3sa2= [HW,ALSA]
-
- snd-opti92x-ad1848=
- [HW,ALSA]
-
- snd-opti92x-cs4231=
- [HW,ALSA]
-
- snd-opti93x= [HW,ALSA]
-
- snd-pmac= [HW,ALSA]
-
- snd-rme32= [HW,ALSA]
-
- snd-rme96= [HW,ALSA]
-
- snd-rme9652= [HW,ALSA]
-
- snd-sb8= [HW,ALSA]
-
- snd-sb16= [HW,ALSA]
-
- snd-sbawe= [HW,ALSA]
-
- snd-serial= [HW,ALSA]
-
- snd-sgalaxy= [HW,ALSA]
-
- snd-sonicvibes= [HW,ALSA]
-
- snd-sun-amd7930=
- [HW,ALSA]
-
- snd-sun-cs4231= [HW,ALSA]
-
- snd-trident= [HW,ALSA]
-
- snd-usb-audio= [HW,ALSA,USB]
-
- snd-via82xx= [HW,ALSA]
-
- snd-virmidi= [HW,ALSA]
-
- snd-wavefront= [HW,ALSA]
-
- snd-ymfpci= [HW,ALSA]
diff --git a/Documentation/sound/alsa/seq_oss.html b/Documentation/sound/alsa/seq_oss.html
deleted file mode 100644
index 9663b45f6fde..000000000000
--- a/Documentation/sound/alsa/seq_oss.html
+++ /dev/null
@@ -1,409 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
-<HTML>
-<HEAD>
- <TITLE>OSS Sequencer Emulation on ALSA</TITLE>
-</HEAD>
-<BODY>
-
-<CENTER>
-<H1>
-
-<HR WIDTH="100%"></H1></CENTER>
-
-<CENTER>
-<H1>
-OSS Sequencer Emulation on ALSA</H1></CENTER>
-
-<HR WIDTH="100%">
-<P>Copyright (c) 1998,1999 by Takashi Iwai
-<TT><A HREF="mailto:iwai@ww.uni-erlangen.de">&lt;iwai@ww.uni-erlangen.de></A></TT>
-<P>ver.0.1.8; Nov. 16, 1999
-<H2>
-
-<HR WIDTH="100%"></H2>
-
-<H2>
-1. Description</H2>
-This directory contains the OSS sequencer emulation driver on ALSA. Note
-that this program is still in the development state.
-<P>What this does - it provides the emulation of the OSS sequencer, access
-via
-<TT>/dev/sequencer</TT> and <TT>/dev/music</TT> devices.
-The most of applications using OSS can run if the appropriate ALSA
-sequencer is prepared.
-<P>The following features are emulated by this driver:
-<UL>
-<LI>
-Normal sequencer and MIDI events:</LI>
-
-<BR>They are converted to the ALSA sequencer events, and sent to the corresponding
-port.
-<LI>
-Timer events:</LI>
-
-<BR>The timer is not selectable by ioctl. The control rate is fixed to
-100 regardless of HZ. That is, even on Alpha system, a tick is always
-1/100 second. The base rate and tempo can be changed in <TT>/dev/music</TT>.
-
-<LI>
-Patch loading:</LI>
-
-<BR>It purely depends on the synth drivers whether it's supported since
-the patch loading is realized by callback to the synth driver.
-<LI>
-I/O controls:</LI>
-
-<BR>Most of controls are accepted. Some controls
-are dependent on the synth driver, as well as even on original OSS.</UL>
-Furthermore, you can find the following advanced features:
-<UL>
-<LI>
-Better queue mechanism:</LI>
-
-<BR>The events are queued before processing them.
-<LI>
-Multiple applications:</LI>
-
-<BR>You can run two or more applications simultaneously (even for OSS sequencer)!
-However, each MIDI device is exclusive - that is, if a MIDI device is opened
-once by some application, other applications can't use it. No such a restriction
-in synth devices.
-<LI>
-Real-time event processing:</LI>
-
-<BR>The events can be processed in real time without using out of bound
-ioctl. To switch to real-time mode, send ABSTIME 0 event. The followed
-events will be processed in real-time without queued. To switch off the
-real-time mode, send RELTIME 0 event.
-<LI>
-<TT>/proc</TT> interface:</LI>
-
-<BR>The status of applications and devices can be shown via <TT>/proc/asound/seq/oss</TT>
-at any time. In the later version, configuration will be changed via <TT>/proc</TT>
-interface, too.</UL>
-
-<H2>
-2. Installation</H2>
-Run configure script with both sequencer support (<TT>--with-sequencer=yes</TT>)
-and OSS emulation (<TT>--with-oss=yes</TT>) options. A module <TT>snd-seq-oss.o</TT>
-will be created. If the synth module of your sound card supports for OSS
-emulation (so far, only Emu8000 driver), this module will be loaded automatically.
-Otherwise, you need to load this module manually.
-<P>At beginning, this module probes all the MIDI ports which have been
-already connected to the sequencer. Once after that, the creation and deletion
-of ports are watched by announcement mechanism of ALSA sequencer.
-<P>The available synth and MIDI devices can be found in proc interface.
-Run "<TT>cat /proc/asound/seq/oss</TT>", and check the devices. For example,
-if you use an AWE64 card, you'll see like the following:
-<PRE>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; OSS sequencer emulation version 0.1.8
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; ALSA client number 63
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; ALSA receiver port 0
-
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Number of applications: 0
-
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Number of synth devices: 1
-
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; synth 0: [EMU8000]
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; type 0x1 : subtype 0x20 : voices 32
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; capabilties : ioctl enabled / load_patch enabled
-
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Number of MIDI devices: 3
-
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; midi 0: [Emu8000 Port-0] ALSA port 65:0
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; capability write / opened none
-
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; midi 1: [Emu8000 Port-1] ALSA port 65:1
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; capability write / opened none
-
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; midi 2: [0: MPU-401 (UART)] ALSA port 64:0
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; capability read/write / opened none</PRE>
-Note that the device number may be different from the information of
-<TT>/proc/asound/oss-devices</TT>
-or ones of the original OSS driver. Use the device number listed in <TT>/proc/asound/seq/oss</TT>
-to play via OSS sequencer emulation.
-<H2>
-3. Using Synthesizer Devices</H2>
-Run your favorite program. I've tested playmidi-2.4, awemidi-0.4.3, gmod-3.1
-and xmp-1.1.5. You can load samples via <TT>/dev/sequencer</TT> like sfxload,
-too.
-<P>If the lowlevel driver supports multiple access to synth devices (like
-Emu8000 driver), two or more applications are allowed to run at the same
-time.
-<H2>
-4. Using MIDI Devices</H2>
-So far, only MIDI output was tested. MIDI input was not checked at all,
-but hopefully it will work. Use the device number listed in <TT>/proc/asound/seq/oss</TT>.
-Be aware that these numbers are mostly different from the list in
-<TT>/proc/asound/oss-devices</TT>.
-<H2>
-5. Module Options</H2>
-The following module options are available:
-<UL>
-<LI>
-<TT>maxqlen</TT></LI>
-
-<BR>specifies the maximum read/write queue length. This queue is private
-for OSS sequencer, so that it is independent from the queue length of ALSA
-sequencer. Default value is 1024.
-<LI>
-<TT>seq_oss_debug</TT></LI>
-
-<BR>specifies the debug level and accepts zero (= no debug message) or
-positive integer. Default value is 0.</UL>
-
-<H2>
-6. Queue Mechanism</H2>
-OSS sequencer emulation uses an ALSA priority queue. The
-events from <TT>/dev/sequencer</TT> are processed and put onto the queue
-specified by module option.
-<P>All the events from <TT>/dev/sequencer</TT> are parsed at beginning.
-The timing events are also parsed at this moment, so that the events may
-be processed in real-time. Sending an event ABSTIME 0 switches the operation
-mode to real-time mode, and sending an event RELTIME 0 switches it off.
-In the real-time mode, all events are dispatched immediately.
-<P>The queued events are dispatched to the corresponding ALSA sequencer
-ports after scheduled time by ALSA sequencer dispatcher.
-<P>If the write-queue is full, the application sleeps until a certain amount
-(as default one half) becomes empty in blocking mode. The synchronization
-to write timing was implemented, too.
-<P>The input from MIDI devices or echo-back events are stored on read FIFO
-queue. If application reads <TT>/dev/sequencer</TT> in blocking mode, the
-process will be awaked.
-
-<H2>
-7. Interface to Synthesizer Device</H2>
-
-<H3>
-7.1. Registration</H3>
-To register an OSS synthesizer device, use <TT>snd_seq_oss_synth_register</TT>
-function.
-<PRE>int snd_seq_oss_synth_register(char *name, int type, int subtype, int nvoices,
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; snd_seq_oss_callback_t *oper, void *private_data)</PRE>
-The arguments <TT>name</TT>, <TT>type</TT>, <TT>subtype</TT> and
-<TT>nvoices</TT>
-are used for making the appropriate synth_info structure for ioctl. The
-return value is an index number of this device. This index must be remembered
-for unregister. If registration is failed, -errno will be returned.
-<P>To release this device, call <TT>snd_seq_oss_synth_unregister function</TT>:
-<PRE>int snd_seq_oss_synth_unregister(int index),</PRE>
-where the <TT>index</TT> is the index number returned by register function.
-<H3>
-7.2. Callbacks</H3>
-OSS synthesizer devices have capability for sample downloading and ioctls
-like sample reset. In OSS emulation, these special features are realized
-by using callbacks. The registration argument oper is used to specify these
-callbacks. The following callback functions must be defined:
-<PRE>snd_seq_oss_callback_t:
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int (*open)(snd_seq_oss_arg_t *p, void *closure);
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int (*close)(snd_seq_oss_arg_t *p);
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int (*ioctl)(snd_seq_oss_arg_t *p, unsigned int cmd, unsigned long arg);
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int (*load_patch)(snd_seq_oss_arg_t *p, int format, const char *buf, int offs, int count);
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int (*reset)(snd_seq_oss_arg_t *p);
-Except for <TT>open</TT> and <TT>close</TT> callbacks, they are allowed
-to be NULL.
-<P>Each callback function takes the argument type snd_seq_oss_arg_t as the
-first argument.
-<PRE>struct snd_seq_oss_arg_t {
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int app_index;
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int file_mode;
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int seq_mode;
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; snd_seq_addr_t addr;
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; void *private_data;
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; int event_passing;
-};</PRE>
-The first three fields, <TT>app_index</TT>, <TT>file_mode</TT> and
-<TT>seq_mode</TT>
-are initialized by OSS sequencer. The <TT>app_index</TT> is the application
-index which is unique to each application opening OSS sequencer. The
-<TT>file_mode</TT>
-is bit-flags indicating the file operation mode. See
-<TT>seq_oss.h</TT>
-for its meaning. The <TT>seq_mode</TT> is sequencer operation mode. In
-the current version, only <TT>SND_OSSSEQ_MODE_SYNTH</TT> is used.
-<P>The next two fields, <TT>addr</TT> and <TT>private_data</TT>, must be
-filled by the synth driver at open callback. The <TT>addr</TT> contains
-the address of ALSA sequencer port which is assigned to this device. If
-the driver allocates memory for <TT>private_data</TT>, it must be released
-in close callback by itself.
-<P>The last field, <TT>event_passing</TT>, indicates how to translate note-on
-/ off events. In <TT>PROCESS_EVENTS</TT> mode, the note 255 is regarded
-as velocity change, and key pressure event is passed to the port. In <TT>PASS_EVENTS</TT>
-mode, all note on/off events are passed to the port without modified. <TT>PROCESS_KEYPRESS</TT>
-mode checks the note above 128 and regards it as key pressure event (mainly
-for Emu8000 driver).
-<H4>
-7.2.1. Open Callback</H4>
-The <TT>open</TT> is called at each time this device is opened by an application
-using OSS sequencer. This must not be NULL. Typically, the open callback
-does the following procedure:
-<OL>
-<LI>
-Allocate private data record.</LI>
-
-<LI>
-Create an ALSA sequencer port.</LI>
-
-<LI>
-Set the new port address on arg->addr.</LI>
-
-<LI>
-Set the private data record pointer on arg->private_data.</LI>
-</OL>
-Note that the type bit-flags in port_info of this synth port must NOT contain
-<TT>TYPE_MIDI_GENERIC</TT>
-bit. Instead, <TT>TYPE_SPECIFIC</TT> should be used. Also, <TT>CAP_SUBSCRIPTION</TT>
-bit should NOT be included, too. This is necessary to tell it from other
-normal MIDI devices. If the open procedure succeeded, return zero. Otherwise,
-return -errno.
-<H4>
-7.2.2 Ioctl Callback</H4>
-The <TT>ioctl</TT> callback is called when the sequencer receives device-specific
-ioctls. The following two ioctls should be processed by this callback:
-<UL>
-<LI>
-<TT>IOCTL_SEQ_RESET_SAMPLES</TT></LI>
-
-<BR>reset all samples on memory -- return 0
-<LI>
-<TT>IOCTL_SYNTH_MEMAVL</TT></LI>
-
-<BR>return the available memory size
-<LI>
-<TT>FM_4OP_ENABLE</TT></LI>
-
-<BR>can be ignored usually</UL>
-The other ioctls are processed inside the sequencer without passing to
-the lowlevel driver.
-<H4>
-7.2.3 Load_Patch Callback</H4>
-The <TT>load_patch</TT> callback is used for sample-downloading. This callback
-must read the data on user-space and transfer to each device. Return 0
-if succeeded, and -errno if failed. The format argument is the patch key
-in patch_info record. The buf is user-space pointer where patch_info record
-is stored. The offs can be ignored. The count is total data size of this
-sample data.
-<H4>
-7.2.4 Close Callback</H4>
-The <TT>close</TT> callback is called when this device is closed by the
-application. If any private data was allocated in open callback, it must
-be released in the close callback. The deletion of ALSA port should be
-done here, too. This callback must not be NULL.
-<H4>
-7.2.5 Reset Callback</H4>
-The <TT>reset</TT> callback is called when sequencer device is reset or
-closed by applications. The callback should turn off the sounds on the
-relevant port immediately, and initialize the status of the port. If this
-callback is undefined, OSS seq sends a <TT>HEARTBEAT</TT> event to the
-port.
-<H3>
-7.3 Events</H3>
-Most of the events are processed by sequencer and translated to the adequate
-ALSA sequencer events, so that each synth device can receive by input_event
-callback of ALSA sequencer port. The following ALSA events should be implemented
-by the driver:
-<BR>&nbsp;
-<TABLE BORDER WIDTH="75%" NOSAVE >
-<TR NOSAVE>
-<TD NOSAVE><B>ALSA event</B></TD>
-
-<TD><B>Original OSS events</B></TD>
-</TR>
-
-<TR>
-<TD>NOTEON</TD>
-
-<TD>SEQ_NOTEON
-<BR>MIDI_NOTEON</TD>
-</TR>
-
-<TR>
-<TD>NOTE</TD>
-
-<TD>SEQ_NOTEOFF
-<BR>MIDI_NOTEOFF</TD>
-</TR>
-
-<TR NOSAVE>
-<TD NOSAVE>KEYPRESS</TD>
-
-<TD>MIDI_KEY_PRESSURE</TD>
-</TR>
-
-<TR NOSAVE>
-<TD>CHANPRESS</TD>
-
-<TD NOSAVE>SEQ_AFTERTOUCH
-<BR>MIDI_CHN_PRESSURE</TD>
-</TR>
-
-<TR NOSAVE>
-<TD NOSAVE>PGMCHANGE</TD>
-
-<TD NOSAVE>SEQ_PGMCHANGE
-<BR>MIDI_PGM_CHANGE</TD>
-</TR>
-
-<TR>
-<TD>PITCHBEND</TD>
-
-<TD>SEQ_CONTROLLER(CTRL_PITCH_BENDER)
-<BR>MIDI_PITCH_BEND</TD>
-</TR>
-
-<TR>
-<TD>CONTROLLER</TD>
-
-<TD>MIDI_CTL_CHANGE
-<BR>SEQ_BALANCE (with CTL_PAN)</TD>
-</TR>
-
-<TR>
-<TD>CONTROL14</TD>
-
-<TD>SEQ_CONTROLLER</TD>
-</TR>
-
-<TR>
-<TD>REGPARAM</TD>
-
-<TD>SEQ_CONTROLLER(CTRL_PITCH_BENDER_RANGE)</TD>
-</TR>
-
-<TR>
-<TD>SYSEX</TD>
-
-<TD>SEQ_SYSEX</TD>
-</TR>
-</TABLE>
-
-<P>The most of these behavior can be realized by MIDI emulation driver
-included in the Emu8000 lowlevel driver. In the future release, this module
-will be independent.
-<P>Some OSS events (<TT>SEQ_PRIVATE</TT> and <TT>SEQ_VOLUME</TT> events) are passed as event
-type SND_SEQ_OSS_PRIVATE. The OSS sequencer passes these event 8 byte
-packets without any modification. The lowlevel driver should process these
-events appropriately.
-<H2>
-8. Interface to MIDI Device</H2>
-Since the OSS emulation probes the creation and deletion of ALSA MIDI sequencer
-ports automatically by receiving announcement from ALSA sequencer, the
-MIDI devices don't need to be registered explicitly like synth devices.
-However, the MIDI port_info registered to ALSA sequencer must include a group
-name <TT>SND_SEQ_GROUP_DEVICE</TT> and a capability-bit <TT>CAP_READ</TT> or
-<TT>CAP_WRITE</TT>. Also, subscription capabilities, <TT>CAP_SUBS_READ</TT> or <TT>CAP_SUBS_WRITE</TT>,
-must be defined, too. If these conditions are not satisfied, the port is not
-registered as OSS sequencer MIDI device.
-<P>The events via MIDI devices are parsed in OSS sequencer and converted
-to the corresponding ALSA sequencer events. The input from MIDI sequencer
-is also converted to MIDI byte events by OSS sequencer. This works just
-a reverse way of seq_midi module.
-<H2>
-9. Known Problems / TODO's</H2>
-
-<UL>
-<LI>
-Patch loading via ALSA instrument layer is not implemented yet.</LI>
-</UL>
-
-</BODY>
-</HTML>
diff --git a/Documentation/sound/alsa/Audigy-mixer.txt b/Documentation/sound/cards/audigy-mixer.rst
index 7f10dc6ff28c..86213234435f 100644
--- a/Documentation/sound/alsa/Audigy-mixer.txt
+++ b/Documentation/sound/cards/audigy-mixer.rst
@@ -1,8 +1,8 @@
+=============================================
+Sound Blaster Audigy mixer / default DSP code
+=============================================
- Sound Blaster Audigy mixer / default DSP code
- ===========================================
-
-This is based on SB-Live-mixer.txt.
+This is based on sb-live-mixer.rst.
The EMU10K2 chips have a DSP part which can be programmed to support
various ways of sample processing, which is described here.
@@ -13,8 +13,8 @@ The ALSA driver programs this portion of chip by default code
(can be altered later) which offers the following functionality:
-1) Digital mixer controls
--------------------------
+Digital mixer controls
+======================
These controls are built using the DSP instructions. They offer extended
functionality. Only the default build-in code in the ALSA driver is described
@@ -26,320 +26,343 @@ is mentioned in multiple controls, the signal is accumulated and can be wrapped
Explanation of used abbreviations:
-DAC - digital to analog converter
-ADC - analog to digital converter
-I2S - one-way three wire serial bus for digital sound by Philips Semiconductors
- (this standard is used for connecting standalone DAC and ADC converters)
-LFE - low frequency effects (subwoofer signal)
-AC97 - a chip containing an analog mixer, DAC and ADC converters
-IEC958 - S/PDIF
-FX-bus - the EMU10K2 chip has an effect bus containing 64 accumulators.
- Each of the synthesizer voices can feed its output to these accumulators
- and the DSP microcontroller can operate with the resulting sum.
+DAC
+ digital to analog converter
+ADC
+ analog to digital converter
+I2S
+ one-way three wire serial bus for digital sound by Philips Semiconductors
+ (this standard is used for connecting standalone DAC and ADC converters)
+LFE
+ low frequency effects (subwoofer signal)
+AC97
+ a chip containing an analog mixer, DAC and ADC converters
+IEC958
+ S/PDIF
+FX-bus
+ the EMU10K2 chip has an effect bus containing 64 accumulators.
+ Each of the synthesizer voices can feed its output to these accumulators
+ and the DSP microcontroller can operate with the resulting sum.
name='PCM Front Playback Volume',index=0
-
+----------------------------------------
This control is used to attenuate samples for left and right front PCM FX-bus
accumulators. ALSA uses accumulators 8 and 9 for left and right front PCM
samples for 5.1 playback. The result samples are forwarded to the front DAC PCM
slots of the Philips DAC.
name='PCM Surround Playback Volume',index=0
-
+-------------------------------------------
This control is used to attenuate samples for left and right surround PCM FX-bus
accumulators. ALSA uses accumulators 2 and 3 for left and right surround PCM
samples for 5.1 playback. The result samples are forwarded to the surround DAC PCM
slots of the Philips DAC.
name='PCM Center Playback Volume',index=0
-
+-----------------------------------------
This control is used to attenuate samples for center PCM FX-bus accumulator.
ALSA uses accumulator 6 for center PCM sample for 5.1 playback. The result sample
is forwarded to the center DAC PCM slot of the Philips DAC.
name='PCM LFE Playback Volume',index=0
-
+--------------------------------------
This control is used to attenuate sample for LFE PCM FX-bus accumulator.
ALSA uses accumulator 7 for LFE PCM sample for 5.1 playback. The result sample
is forwarded to the LFE DAC PCM slot of the Philips DAC.
name='PCM Playback Volume',index=0
-
+----------------------------------
This control is used to attenuate samples for left and right PCM FX-bus
accumulators. ALSA uses accumulators 0 and 1 for left and right PCM samples for
stereo playback. The result samples are forwarded to the front DAC PCM slots
of the Philips DAC.
name='PCM Capture Volume',index=0
-
+---------------------------------
This control is used to attenuate samples for left and right PCM FX-bus
accumulator. ALSA uses accumulators 0 and 1 for left and right PCM.
The result is forwarded to the ADC capture FIFO (thus to the standard capture
PCM device).
name='Music Playback Volume',index=0
-
+------------------------------------
This control is used to attenuate samples for left and right MIDI FX-bus
accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples.
The result samples are forwarded to the front DAC PCM slots of the AC97 codec.
name='Music Capture Volume',index=0
-
+-----------------------------------
These controls are used to attenuate samples for left and right MIDI FX-bus
accumulator. ALSA uses accumulators 4 and 5 for left and right PCM.
The result is forwarded to the ADC capture FIFO (thus to the standard capture
PCM device).
name='Mic Playback Volume',index=0
-
+----------------------------------
This control is used to attenuate samples for left and right Mic input.
For Mic input is used AC97 codec. The result samples are forwarded to
the front DAC PCM slots of the Philips DAC. Samples are forwarded to Mic
capture FIFO (device 1 - 16bit/8KHz mono) too without volume control.
name='Mic Capture Volume',index=0
-
+---------------------------------
This control is used to attenuate samples for left and right Mic input.
The result is forwarded to the ADC capture FIFO (thus to the standard capture
PCM device).
name='Audigy CD Playback Volume',index=0
-
+----------------------------------------
This control is used to attenuate samples from left and right IEC958 TTL
digital inputs (usually used by a CDROM drive). The result samples are
forwarded to the front DAC PCM slots of the Philips DAC.
name='Audigy CD Capture Volume',index=0
-
+---------------------------------------
This control is used to attenuate samples from left and right IEC958 TTL
digital inputs (usually used by a CDROM drive). The result samples are
forwarded to the ADC capture FIFO (thus to the standard capture PCM device).
name='IEC958 Optical Playback Volume',index=0
-
+---------------------------------------------
This control is used to attenuate samples from left and right IEC958 optical
digital input. The result samples are forwarded to the front DAC PCM slots
of the Philips DAC.
name='IEC958 Optical Capture Volume',index=0
-
+--------------------------------------------
This control is used to attenuate samples from left and right IEC958 optical
digital inputs. The result samples are forwarded to the ADC capture FIFO
(thus to the standard capture PCM device).
name='Line2 Playback Volume',index=0
-
+------------------------------------
This control is used to attenuate samples from left and right I2S ADC
inputs (on the AudigyDrive). The result samples are forwarded to the front
DAC PCM slots of the Philips DAC.
name='Line2 Capture Volume',index=1
-
+-----------------------------------
This control is used to attenuate samples from left and right I2S ADC
inputs (on the AudigyDrive). The result samples are forwarded to the ADC
capture FIFO (thus to the standard capture PCM device).
name='Analog Mix Playback Volume',index=0
-
+-----------------------------------------
This control is used to attenuate samples from left and right I2S ADC
inputs from Philips ADC. The result samples are forwarded to the front
DAC PCM slots of the Philips DAC. This contains mix from analog sources
like CD, Line In, Aux, ....
name='Analog Mix Capture Volume',index=1
-
+----------------------------------------
This control is used to attenuate samples from left and right I2S ADC
inputs Philips ADC. The result samples are forwarded to the ADC
capture FIFO (thus to the standard capture PCM device).
name='Aux2 Playback Volume',index=0
-
+-----------------------------------
This control is used to attenuate samples from left and right I2S ADC
inputs (on the AudigyDrive). The result samples are forwarded to the front
DAC PCM slots of the Philips DAC.
name='Aux2 Capture Volume',index=1
-
+----------------------------------
This control is used to attenuate samples from left and right I2S ADC
inputs (on the AudigyDrive). The result samples are forwarded to the ADC
capture FIFO (thus to the standard capture PCM device).
name='Front Playback Volume',index=0
-
+------------------------------------
All stereo signals are mixed together and mirrored to surround, center and LFE.
This control is used to attenuate samples for left and right front speakers of
this mix.
name='Surround Playback Volume',index=0
-
+---------------------------------------
All stereo signals are mixed together and mirrored to surround, center and LFE.
This control is used to attenuate samples for left and right surround speakers of
this mix.
name='Center Playback Volume',index=0
-
+-------------------------------------
All stereo signals are mixed together and mirrored to surround, center and LFE.
This control is used to attenuate sample for center speaker of this mix.
name='LFE Playback Volume',index=0
-
+----------------------------------
All stereo signals are mixed together and mirrored to surround, center and LFE.
This control is used to attenuate sample for LFE speaker of this mix.
name='Tone Control - Switch',index=0
-
+------------------------------------
This control turns the tone control on or off. The samples for front, rear
and center / LFE outputs are affected.
name='Tone Control - Bass',index=0
-
+----------------------------------
This control sets the bass intensity. There is no neutral value!!
When the tone control code is activated, the samples are always modified.
The closest value to pure signal is 20.
name='Tone Control - Treble',index=0
-
+------------------------------------
This control sets the treble intensity. There is no neutral value!!
When the tone control code is activated, the samples are always modified.
The closest value to pure signal is 20.
name='Master Playback Volume',index=0
-
+-------------------------------------
This control is used to attenuate samples for front, surround, center and
LFE outputs.
name='IEC958 Optical Raw Playback Switch',index=0
-
+-------------------------------------------------
If this switch is on, then the samples for the IEC958 (S/PDIF) digital
output are taken only from the raw FX8010 PCM, otherwise standard front
PCM samples are taken.
-2) PCM stream related controls
-------------------------------
+PCM stream related controls
+===========================
name='EMU10K1 PCM Volume',index 0-31
-
+------------------------------------
Channel volume attenuation in range 0-0xffff. The maximum value (no
attenuation) is default. The channel mapping for three values is
as follows:
- 0 - mono, default 0xffff (no attenuation)
- 1 - left, default 0xffff (no attenuation)
- 2 - right, default 0xffff (no attenuation)
+* 0 - mono, default 0xffff (no attenuation)
+* 1 - left, default 0xffff (no attenuation)
+* 2 - right, default 0xffff (no attenuation)
name='EMU10K1 PCM Send Routing',index 0-31
-
+------------------------------------------
This control specifies the destination - FX-bus accumulators. There 24
values with this mapping:
- 0 - mono, A destination (FX-bus 0-63), default 0
- 1 - mono, B destination (FX-bus 0-63), default 1
- 2 - mono, C destination (FX-bus 0-63), default 2
- 3 - mono, D destination (FX-bus 0-63), default 3
- 4 - mono, E destination (FX-bus 0-63), default 0
- 5 - mono, F destination (FX-bus 0-63), default 0
- 6 - mono, G destination (FX-bus 0-63), default 0
- 7 - mono, H destination (FX-bus 0-63), default 0
- 8 - left, A destination (FX-bus 0-63), default 0
- 9 - left, B destination (FX-bus 0-63), default 1
- 10 - left, C destination (FX-bus 0-63), default 2
- 11 - left, D destination (FX-bus 0-63), default 3
- 12 - left, E destination (FX-bus 0-63), default 0
- 13 - left, F destination (FX-bus 0-63), default 0
- 14 - left, G destination (FX-bus 0-63), default 0
- 15 - left, H destination (FX-bus 0-63), default 0
- 16 - right, A destination (FX-bus 0-63), default 0
- 17 - right, B destination (FX-bus 0-63), default 1
- 18 - right, C destination (FX-bus 0-63), default 2
- 19 - right, D destination (FX-bus 0-63), default 3
- 20 - right, E destination (FX-bus 0-63), default 0
- 21 - right, F destination (FX-bus 0-63), default 0
- 22 - right, G destination (FX-bus 0-63), default 0
- 23 - right, H destination (FX-bus 0-63), default 0
+* 0 - mono, A destination (FX-bus 0-63), default 0
+* 1 - mono, B destination (FX-bus 0-63), default 1
+* 2 - mono, C destination (FX-bus 0-63), default 2
+* 3 - mono, D destination (FX-bus 0-63), default 3
+* 4 - mono, E destination (FX-bus 0-63), default 0
+* 5 - mono, F destination (FX-bus 0-63), default 0
+* 6 - mono, G destination (FX-bus 0-63), default 0
+* 7 - mono, H destination (FX-bus 0-63), default 0
+* 8 - left, A destination (FX-bus 0-63), default 0
+* 9 - left, B destination (FX-bus 0-63), default 1
+* 10 - left, C destination (FX-bus 0-63), default 2
+* 11 - left, D destination (FX-bus 0-63), default 3
+* 12 - left, E destination (FX-bus 0-63), default 0
+* 13 - left, F destination (FX-bus 0-63), default 0
+* 14 - left, G destination (FX-bus 0-63), default 0
+* 15 - left, H destination (FX-bus 0-63), default 0
+* 16 - right, A destination (FX-bus 0-63), default 0
+* 17 - right, B destination (FX-bus 0-63), default 1
+* 18 - right, C destination (FX-bus 0-63), default 2
+* 19 - right, D destination (FX-bus 0-63), default 3
+* 20 - right, E destination (FX-bus 0-63), default 0
+* 21 - right, F destination (FX-bus 0-63), default 0
+* 22 - right, G destination (FX-bus 0-63), default 0
+* 23 - right, H destination (FX-bus 0-63), default 0
Don't forget that it's illegal to assign a channel to the same FX-bus accumulator
more than once (it means 0=0 && 1=0 is an invalid combination).
name='EMU10K1 PCM Send Volume',index 0-31
-
+-----------------------------------------
It specifies the attenuation (amount) for given destination in range 0-255.
The channel mapping is following:
- 0 - mono, A destination attn, default 255 (no attenuation)
- 1 - mono, B destination attn, default 255 (no attenuation)
- 2 - mono, C destination attn, default 0 (mute)
- 3 - mono, D destination attn, default 0 (mute)
- 4 - mono, E destination attn, default 0 (mute)
- 5 - mono, F destination attn, default 0 (mute)
- 6 - mono, G destination attn, default 0 (mute)
- 7 - mono, H destination attn, default 0 (mute)
- 8 - left, A destination attn, default 255 (no attenuation)
- 9 - left, B destination attn, default 0 (mute)
- 10 - left, C destination attn, default 0 (mute)
- 11 - left, D destination attn, default 0 (mute)
- 12 - left, E destination attn, default 0 (mute)
- 13 - left, F destination attn, default 0 (mute)
- 14 - left, G destination attn, default 0 (mute)
- 15 - left, H destination attn, default 0 (mute)
- 16 - right, A destination attn, default 0 (mute)
- 17 - right, B destination attn, default 255 (no attenuation)
- 18 - right, C destination attn, default 0 (mute)
- 19 - right, D destination attn, default 0 (mute)
- 20 - right, E destination attn, default 0 (mute)
- 21 - right, F destination attn, default 0 (mute)
- 22 - right, G destination attn, default 0 (mute)
- 23 - right, H destination attn, default 0 (mute)
-
-
-
-4) MANUALS/PATENTS:
--------------------
+* 0 - mono, A destination attn, default 255 (no attenuation)
+* 1 - mono, B destination attn, default 255 (no attenuation)
+* 2 - mono, C destination attn, default 0 (mute)
+* 3 - mono, D destination attn, default 0 (mute)
+* 4 - mono, E destination attn, default 0 (mute)
+* 5 - mono, F destination attn, default 0 (mute)
+* 6 - mono, G destination attn, default 0 (mute)
+* 7 - mono, H destination attn, default 0 (mute)
+* 8 - left, A destination attn, default 255 (no attenuation)
+* 9 - left, B destination attn, default 0 (mute)
+* 10 - left, C destination attn, default 0 (mute)
+* 11 - left, D destination attn, default 0 (mute)
+* 12 - left, E destination attn, default 0 (mute)
+* 13 - left, F destination attn, default 0 (mute)
+* 14 - left, G destination attn, default 0 (mute)
+* 15 - left, H destination attn, default 0 (mute)
+* 16 - right, A destination attn, default 0 (mute)
+* 17 - right, B destination attn, default 255 (no attenuation)
+* 18 - right, C destination attn, default 0 (mute)
+* 19 - right, D destination attn, default 0 (mute)
+* 20 - right, E destination attn, default 0 (mute)
+* 21 - right, F destination attn, default 0 (mute)
+* 22 - right, G destination attn, default 0 (mute)
+* 23 - right, H destination attn, default 0 (mute)
+
+
+
+MANUALS/PATENTS
+===============
ftp://opensource.creative.com/pub/doc
-------------------------------------
- Files:
- LM4545.pdf AC97 Codec
+LM4545.pdf
+ AC97 Codec
- m2049.pdf The EMU10K1 Digital Audio Processor
+m2049.pdf
+ The EMU10K1 Digital Audio Processor
- hog63.ps FX8010 - A DSP Chip Architecture for Audio Effects
+hog63.ps
+ FX8010 - A DSP Chip Architecture for Audio Effects
WIPO Patents
------------
- Patent numbers:
- WO 9901813 (A1) Audio Effects Processor with multiple asynchronous (Jan. 14, 1999)
- streams
- WO 9901814 (A1) Processor with Instruction Set for Audio Effects (Jan. 14, 1999)
+WO 9901813 (A1)
+ Audio Effects Processor with multiple asynchronous streams
+ (Jan. 14, 1999)
+
+WO 9901814 (A1)
+ Processor with Instruction Set for Audio Effects (Jan. 14, 1999)
- WO 9901953 (A1) Audio Effects Processor having Decoupled Instruction
- Execution and Audio Data Sequencing (Jan. 14, 1999)
+WO 9901953 (A1)
+ Audio Effects Processor having Decoupled Instruction
+ Execution and Audio Data Sequencing (Jan. 14, 1999)
US Patents (http://www.uspto.gov/)
----------------------------------
- US 5925841 Digital Sampling Instrument employing cache memory (Jul. 20, 1999)
-
- US 5928342 Audio Effects Processor integrated on a single chip (Jul. 27, 1999)
- with a multiport memory onto which multiple asynchronous
- digital sound samples can be concurrently loaded
-
- US 5930158 Processor with Instruction Set for Audio Effects (Jul. 27, 1999)
-
- US 6032235 Memory initialization circuit (Tram) (Feb. 29, 2000)
-
- US 6138207 Interpolation looping of audio samples in cache connected to (Oct. 24, 2000)
- system bus with prioritization and modification of bus transfers
- in accordance with loop ends and minimum block sizes
-
- US 6151670 Method for conserving memory storage using a (Nov. 21, 2000)
- pool of short term memory registers
-
- US 6195715 Interrupt control for multiple programs communicating with (Feb. 27, 2001)
- a common interrupt by associating programs to GP registers,
- defining interrupt register, polling GP registers, and invoking
- callback routine associated with defined interrupt register
+US 5925841
+ Digital Sampling Instrument employing cache memory (Jul. 20, 1999)
+
+US 5928342
+ Audio Effects Processor integrated on a single chip
+ with a multiport memory onto which multiple asynchronous
+ digital sound samples can be concurrently loaded
+ (Jul. 27, 1999)
+
+US 5930158
+ Processor with Instruction Set for Audio Effects (Jul. 27, 1999)
+
+US 6032235
+ Memory initialization circuit (Tram) (Feb. 29, 2000)
+
+US 6138207
+ Interpolation looping of audio samples in cache connected to
+ system bus with prioritization and modification of bus transfers
+ in accordance with loop ends and minimum block sizes
+ (Oct. 24, 2000)
+
+US 6151670
+ Method for conserving memory storage using a
+ pool of short term memory registers
+ (Nov. 21, 2000)
+
+US 6195715
+ Interrupt control for multiple programs communicating with
+ a common interrupt by associating programs to GP registers,
+ defining interrupt register, polling GP registers, and invoking
+ callback routine associated with defined interrupt register
+ (Feb. 27, 2001)
diff --git a/Documentation/sound/alsa/Audiophile-Usb.txt b/Documentation/sound/cards/audiophile-usb.rst
index e7a5ed4dcae8..a7bb5648331f 100644
--- a/Documentation/sound/alsa/Audiophile-Usb.txt
+++ b/Documentation/sound/cards/audiophile-usb.rst
@@ -1,32 +1,41 @@
- Guide to using M-Audio Audiophile USB with ALSA and Jack v1.5
- ========================================================
+========================================================
+Guide to using M-Audio Audiophile USB with ALSA and Jack
+========================================================
- Thibault Le Meur <Thibault.LeMeur@supelec.fr>
+v1.5
+
+Thibault Le Meur <Thibault.LeMeur@supelec.fr>
This document is a guide to using the M-Audio Audiophile USB (tm) device with
ALSA and JACK.
History
=======
+
* v1.4 - Thibault Le Meur (2007-07-11)
- - Added Low Endianness nature of 16bits-modes
- found by Hakan Lennestal <Hakan.Lennestal@brfsodrahamn.se>
- - Modifying document structure
+
+ - Added Low Endianness nature of 16bits-modes
+ found by Hakan Lennestal <Hakan.Lennestal@brfsodrahamn.se>
+ - Modifying document structure
+
* v1.5 - Thibault Le Meur (2007-07-12)
- - Added AC3/DTS passthru info
+ - Added AC3/DTS passthru info
-1 - Audiophile USB Specs and correct usage
-==========================================
+Audiophile USB Specs and correct usage
+======================================
This part is a reminder of important facts about the functions and limitations
of the device.
The device has 4 audio interfaces, and 2 MIDI ports:
+
* Analog Stereo Input (Ai)
+
- This port supports 2 pairs of line-level audio inputs (1/4" TS and RCA)
- When the 1/4" TS (jack) connectors are connected, the RCA connectors
are disabled
+
* Analog Stereo Output (Ao)
* Digital Stereo Input (Di)
* Digital Stereo Output (Do)
@@ -34,56 +43,69 @@ The device has 4 audio interfaces, and 2 MIDI ports:
* Midi Out (Mo)
The internal DAC/ADC has the following characteristics:
+
* sample depth of 16 or 24 bits
* sample rate from 8kHz to 96kHz
* Two interfaces can't use different sample depths at the same time.
+
Moreover, the Audiophile USB documentation gives the following Warning:
-"Please exit any audio application running before switching between bit depths"
+ Please exit any audio application running before switching between bit depths
Due to the USB 1.1 bandwidth limitation, a limited number of interfaces can be
activated at the same time depending on the audio mode selected:
+
* 16-bit/48kHz ==> 4 channels in + 4 channels out
+
- Ai+Ao+Di+Do
+
* 24-bit/48kHz ==> 4 channels in + 2 channels out,
- or 2 channels in + 4 channels out
+ or 2 channels in + 4 channels out
+
- Ai+Ao+Do or Ai+Di+Ao or Ai+Di+Do or Di+Ao+Do
+
* 24-bit/96kHz ==> 2 channels in _or_ 2 channels out (half duplex only)
+
- Ai or Ao or Di or Do
Important facts about the Digital interface:
--------------------------------------------
+
* The Do port additionally supports surround-encoded AC-3 and DTS passthrough,
-though I haven't tested it under Linux
+ though I haven't tested it under Linux
+
- Note that in this setup only the Do interface can be enabled
+
* Apart from recording an audio digital stream, enabling the Di port is a way
-to synchronize the device to an external sample clock
+ to synchronize the device to an external sample clock
+
- As a consequence, the Di port must be enable only if an active Digital
-source is connected
+ source is connected
- Enabling Di when no digital source is connected can result in a
-synchronization error (for instance sound played at an odd sample rate)
+ synchronization error (for instance sound played at an odd sample rate)
-2 - Audiophile USB MIDI support in ALSA
-=======================================
+Audiophile USB MIDI support in ALSA
+===================================
The Audiophile USB MIDI ports will be automatically supported once the
following modules have been loaded:
+
* snd-usb-audio
* snd-seq-midi
No additional setting is required.
-3 - Audiophile USB Audio support in ALSA
-========================================
+Audiophile USB Audio support in ALSA
+====================================
Audio functions of the Audiophile USB device are handled by the snd-usb-audio
module. This module can work in a default mode (without any device-specific
parameter), or in an "advanced" mode with the device-specific parameter called
-"device_setup".
+``device_setup``.
-3.1 - Default Alsa driver mode
-------------------------------
+Default Alsa driver mode
+------------------------
The default behavior of the snd-usb-audio driver is to list the device
capabilities at startup and activate the required mode when required
@@ -101,6 +123,7 @@ Default Alsa driver mode can lead to device misconfigurations.
Let's get back to the Default Alsa driver mode for now. In this case the
Audiophile interfaces are mapped to alsa pcm devices in the following
way (I suppose the device's index is 1):
+
* hw:1,0 is Ao in playback and Di in capture
* hw:1,1 is Do in playback and Ai in capture
* hw:1,2 is Do in AC3/DTS passthrough mode
@@ -115,20 +138,28 @@ This has been fixed in kernel 2.6.23 and above and now the hw:1,2 interface
is reported to be big endian in this default driver mode.
Examples:
- * playing a S24_3BE encoded raw file to the Ao port
+
+ * playing a S24_3BE encoded raw file to the Ao port::
+
% aplay -D hw:1,0 -c2 -t raw -r48000 -fS24_3BE test.raw
- * recording a S24_3BE encoded raw file from the Ai port
+
+ * recording a S24_3BE encoded raw file from the Ai port::
+
% arecord -D hw:1,1 -c2 -t raw -r48000 -fS24_3BE test.raw
- * playing a S16_BE encoded raw file to the Do port
+
+ * playing a S16_BE encoded raw file to the Do port::
+
% aplay -D hw:1,1 -c2 -t raw -r48000 -fS16_BE test.raw
- * playing an ac3 sample file to the Do port
+
+ * playing an ac3 sample file to the Do port::
+
% aplay -D hw:1,2 --channels=6 ac3_S16_BE_encoded_file.raw
If you're happy with the default Alsa driver mode and don't experience any
issue with this mode, then you can skip the following chapter.
-3.2 - Advanced module setup
----------------------------
+Advanced module setup
+---------------------
Due to the hardware constraints described above, the device initialization made
by the Alsa driver in default mode may result in a corrupted state of the
@@ -137,34 +168,39 @@ from the Ai interface sounds distorted (as if boosted with an excessive high
volume gain).
For people having this problem, the snd-usb-audio module has a new module
-parameter called "device_setup" (this parameter was introduced in kernel
+parameter called ``device_setup`` (this parameter was introduced in kernel
release 2.6.17)
-3.2.1 - Initializing the working mode of the Audiophile USB
+Initializing the working mode of the Audiophile USB
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As far as the Audiophile USB device is concerned, this value let the user
specify:
+
* the sample depth
* the sample rate
* whether the Di port is used or not
-When initialized with "device_setup=0x00", the snd-usb-audio module has
+When initialized with ``device_setup=0x00``, the snd-usb-audio module has
the same behaviour as when the parameter is omitted (see paragraph "Default
Alsa driver mode" above)
Others modes are described in the following subsections.
-3.2.1.1 - 16-bit modes
+16-bit modes
+~~~~~~~~~~~~
The two supported modes are:
- * device_setup=0x01
+ * ``device_setup=0x01``
+
- 16bits 48kHz mode with Di disabled
- Ai,Ao,Do can be used at the same time
- hw:1,0 is not available in capture mode
- hw:1,2 is not available
- * device_setup=0x11
+ * ``device_setup=0x11``
+
- 16bits 48kHz mode with Di enabled
- Ai,Ao,Di,Do can be used at the same time
- hw:1,0 is available in capture mode
@@ -173,33 +209,43 @@ The two supported modes are:
In this modes the device operates only at 16bits-modes. Before kernel 2.6.23,
the devices where reported to be Big-Endian when in fact they were Little-Endian
so that playing a file was a matter of using:
+::
+
% aplay -D hw:1,1 -c2 -t raw -r48000 -fS16_BE test_S16_LE.raw
+
where "test_S16_LE.raw" was in fact a little-endian sample file.
Thanks to Hakan Lennestal (who discovered the Little-Endiannes of the device in
these modes) a fix has been committed (expected in kernel 2.6.23) and
Alsa now reports Little-Endian interfaces. Thus playing a file now is as simple as
using:
+::
+
% aplay -D hw:1,1 -c2 -t raw -r48000 -fS16_LE test_S16_LE.raw
-3.2.1.2 - 24-bit modes
+
+24-bit modes
+~~~~~~~~~~~~
The three supported modes are:
- * device_setup=0x09
+ * ``device_setup=0x09``
+
- 24bits 48kHz mode with Di disabled
- Ai,Ao,Do can be used at the same time
- hw:1,0 is not available in capture mode
- hw:1,2 is not available
- * device_setup=0x19
+ * ``device_setup=0x19``
+
- 24bits 48kHz mode with Di enabled
- 3 ports from {Ai,Ao,Di,Do} can be used at the same time
- hw:1,0 is available in capture mode and an active digital source must be
connected to Di
- hw:1,2 is not available
- * device_setup=0x0D or 0x10
+ * ``device_setup=0x0D`` or ``0x10``
+
- 24bits 96kHz mode
- Di is enabled by default for this mode but does not need to be connected
to an active source
@@ -210,29 +256,35 @@ The three supported modes are:
In these modes the device is only Big-Endian compliant (see "Default Alsa driver
mode" above for an aplay command example)
-3.2.1.3 - AC3 w/ DTS passthru mode
+AC3 w/ DTS passthru mode
+~~~~~~~~~~~~~~~~~~~~~~~~
Thanks to Hakan Lennestal, I now have a report saying that this mode works.
- * device_setup=0x03
+ * ``device_setup=0x03``
+
- 16bits 48kHz mode with only the Do port enabled
- AC3 with DTS passthru
- Caution with this setup the Do port is mapped to the pcm device hw:1,0
The command line used to playback the AC3/DTS encoded .wav-files in this mode:
+::
+
% aplay -D hw:1,0 --channels=6 ac3_S16_LE_encoded_file.raw
-3.2.2 - How to use the device_setup parameter
-----------------------------------------------
+How to use the ``device_setup`` parameter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The parameter can be given:
- * By manually probing the device (as root):
+ * By manually probing the device (as root):::
+
# modprobe -r snd-usb-audio
# modprobe snd-usb-audio index=1 device_setup=0x09
* Or while configuring the modules options in your modules configuration file
- (typically a .conf file in /etc/modprobe.d/ directory:
+ (typically a .conf file in /etc/modprobe.d/ directory:::
+
alias snd-card-1 snd-usb-audio
options snd-usb-audio index=1 device_setup=0x09
@@ -250,26 +302,31 @@ CAUTION when initializing the device
* If you've correctly initialized the device in a valid mode and then want to switch
to another mode (possibly with another sample-depth), please use also the following
procedure:
+
- first turn off the device
- de-register the snd-usb-audio module (modprobe -r)
- change the device_setup parameter by changing the device_setup
- option in /etc/modprobe.d/*.conf
+ option in ``/etc/modprobe.d/*.conf``
- turn on the device
+
* A workaround for this last issue has been applied to kernel 2.6.23, but it may not
be enough to ensure the 'stability' of the device initialization.
-3.2.3 - Technical details for hackers
--------------------------------------
+Technical details for hackers
+-----------------------------
+
This section is for hackers, wanting to understand details about the device
internals and how Alsa supports it.
-3.2.3.1 - Audiophile USB's device_setup structure
+Audiophile USB's ``device_setup`` structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to understand the device_setup magic numbers for the Audiophile
USB, you need some very basic understanding of binary computation. However,
this is not required to use the parameter and you may skip this section.
The device_setup is one byte long and its structure is the following:
+::
+---+---+---+---+---+---+---+---+
| b7| b6| b5| b4| b3| b2| b1| b0|
@@ -278,38 +335,55 @@ The device_setup is one byte long and its structure is the following:
+---+---+---+---+---+---+---+---+
Where:
- * b0 is the "SET" bit
+
+ * b0 is the ``SET`` bit
+
- it MUST be set if device_setup is initialized
- * b1 is the "DTS" bit
+
+ * b1 is the ``DTS`` bit
+
- it is set only for Digital output with DTS/AC3
- this setup is not tested
+
* b2 is the Rate selection flag
- - When set to "1" the rate range is 48.1-96kHz
+
+ - When set to ``1`` the rate range is 48.1-96kHz
- Otherwise the sample rate range is 8-48kHz
+
* b3 is the bit depth selection flag
- - When set to "1" samples are 24bits long
+
+ - When set to ``1`` samples are 24bits long
- Otherwise they are 16bits long
- Note that b2 implies b3 as the 96kHz mode is only supported for 24 bits
samples
+
* b4 is the Digital input flag
- - When set to "1" the device assumes that an active digital source is
+
+ - When set to ``1`` the device assumes that an active digital source is
connected
- You shouldn't enable Di if no source is seen on the port (this leads to
synchronization issues)
- b4 is implied by b2 (since only one port is enabled at a time no synch
error can occur)
- * b5 to b7 are reserved for future uses, and must be set to "0"
+
+ * b5 to b7 are reserved for future uses, and must be set to ``0``
+
- might become Ao, Do, Ai, for b7, b6, b4 respectively
Caution:
+
* there is no check on the value you will give to device_setup
+
- for instance choosing 0x05 (16bits 96kHz) will fail back to 0x09 since
b2 implies b3. But _there_will_be_no_warning_ in /var/log/messages
+
* Hardware constraints due to the USB bus limitation aren't checked
+
- choosing b2 will prepare all interfaces for 24bits/96kHz but you'll
only be able to use one at the same time
-3.2.3.2 - USB implementation details for this device
+USB implementation details for this device
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You may safely skip this section if you're not interested in driver
hacking.
@@ -319,46 +393,72 @@ data I got by usb-snooping the windows and Linux drivers.
The M-Audio Audiophile USB has 7 USB Interfaces:
a "USB interface":
+
* USB Interface nb.0
* USB Interface nb.1
+
- Audio Control function
+
* USB Interface nb.2
+
- Analog Output
+
* USB Interface nb.3
+
- Digital Output
+
* USB Interface nb.4
+
- Analog Input
+
* USB Interface nb.5
+
- Digital Input
+
* USB Interface nb.6
+
- MIDI interface compliant with the MIDIMAN quirk
Each interface has 5 altsettings (AltSet 1,2,3,4,5) except:
+
* Interface 3 (Digital Out) has an extra Alset nb.6
* Interface 5 (Digital In) does not have Alset nb.3 and 5
Here is a short description of the AltSettings capabilities:
- * AltSettings 1 corresponds to
+
+* AltSettings 1 corresponds to
+
- 24-bit depth, 48.1-96kHz sample mode
- Adaptive playback (Ao and Do), Synch capture (Ai), or Asynch capture (Di)
- * AltSettings 2 corresponds to
+
+* AltSettings 2 corresponds to
+
- 24-bit depth, 8-48kHz sample mode
- Asynch capture and playback (Ao,Ai,Do,Di)
- * AltSettings 3 corresponds to
+
+* AltSettings 3 corresponds to
+
- 24-bit depth, 8-48kHz sample mode
- Synch capture (Ai) and Adaptive playback (Ao,Do)
- * AltSettings 4 corresponds to
+
+* AltSettings 4 corresponds to
+
- 16-bit depth, 8-48kHz sample mode
- Asynch capture and playback (Ao,Ai,Do,Di)
- * AltSettings 5 corresponds to
+
+* AltSettings 5 corresponds to
+
- 16-bit depth, 8-48kHz sample mode
- Synch capture (Ai) and Adaptive playback (Ao,Do)
- * AltSettings 6 corresponds to
+
+* AltSettings 6 corresponds to
+
- 16-bit depth, 8-48kHz sample mode
- Synch playback (Do), audio format type III IEC1937_AC-3
In order to ensure a correct initialization of the device, the driver
-_must_know_ how the device will be used:
+*must* *know* how the device will be used:
+
* if DTS is chosen, only Interface 2 with AltSet nb.6 must be
registered
* if 96KHz only AltSets nb.1 of each interface must be selected
@@ -371,20 +471,21 @@ _must_know_ how the device will be used:
When device_setup is given as a parameter to the snd-usb-audio module, the
parse_audio_endpoints function uses a quirk called
-"audiophile_skip_setting_quirk" in order to prevent AltSettings not
+``audiophile_skip_setting_quirk`` in order to prevent AltSettings not
corresponding to device_setup from being registered in the driver.
-4 - Audiophile USB and Jack support
-===================================
+Audiophile USB and Jack support
+===============================
This section deals with support of the Audiophile USB device in Jack.
There are 2 main potential issues when using Jackd with the device:
+
* support for Big-Endian devices in 24-bit modes
* support for 4-in / 4-out channels
-4.1 - Direct support in Jackd
------------------------------
+Direct support in Jackd
+-----------------------
Jack supports big endian devices only in recent versions (thanks to
Andreas Steinmetz for his first big-endian patch). I can't remember
@@ -396,29 +497,35 @@ are now Little Endians ;-) ).
You can run jackd with the following command for playback with Ao and
record with Ai:
+::
+
% jackd -R -dalsa -Phw:1,0 -r48000 -p128 -n2 -D -Chw:1,1
-4.2 - Using Alsa plughw
------------------------
+Using Alsa plughw
+-----------------
+
If you don't have a recent Jackd installed, you can downgrade to using
-the Alsa "plug" converter.
+the Alsa ``plug`` converter.
For instance here is one way to run Jack with 2 playback channels on Ao and 2
capture channels from Ai:
+::
+
% jackd -R -dalsa -dplughw:1 -r48000 -p256 -n2 -D -Cplughw:1,1
However you may see the following warning message:
-"You appear to be using the ALSA software "plug" layer, probably a result of
-using the "default" ALSA device. This is less efficient than it could be.
-Consider using a hardware device instead rather than using the plug layer."
+ You appear to be using the ALSA software "plug" layer, probably a result of
+ using the "default" ALSA device. This is less efficient than it could be.
+ Consider using a hardware device instead rather than using the plug layer.
-4.3 - Getting 2 input and/or output interfaces in Jack
-------------------------------------------------------
+Getting 2 input and/or output interfaces in Jack
+------------------------------------------------
As you can see, starting the Jack server this way will only enable 1 stereo
input (Di or Ai) and 1 stereo output (Ao or Do).
This is due to the following restrictions:
+
* Jack can only open one capture device and one playback device at a time
* The Audiophile USB is seen as 2 (or three) Alsa devices: hw:1,0, hw:1,1
(and optionally hw:1,2)
@@ -432,6 +539,7 @@ It is related to another device (ice1712) but can be adapted to suit
the Audiophile USB.
Enabling multiple Audiophile USB interfaces for Jackd will certainly require:
+
* Making sure your Jackd version has the MMAP_COMPLEX patch (see the ice1712 page)
* (maybe) patching the alsa-lib/src/pcm/pcm_multi.c file (see the ice1712 page)
* define a multi device (combination of hw:1,0 and hw:1,1) in your .asoundrc
diff --git a/Documentation/sound/alsa/Bt87x.txt b/Documentation/sound/cards/bt87x.rst
index f158cde8b065..912732d3ef9e 100644
--- a/Documentation/sound/alsa/Bt87x.txt
+++ b/Documentation/sound/cards/bt87x.rst
@@ -1,18 +1,23 @@
+=================
+ALSA BT87x Driver
+=================
+
Intro
=====
You might have noticed that the bt878 grabber cards have actually
-_two_ PCI functions:
+*two* PCI functions:
+::
-$ lspci
-[ ... ]
-00:0a.0 Multimedia video controller: Brooktree Corporation Bt878 (rev 02)
-00:0a.1 Multimedia controller: Brooktree Corporation Bt878 (rev 02)
-[ ... ]
+ $ lspci
+ [ ... ]
+ 00:0a.0 Multimedia video controller: Brooktree Corporation Bt878 (rev 02)
+ 00:0a.1 Multimedia controller: Brooktree Corporation Bt878 (rev 02)
+ [ ... ]
The first does video, it is backward compatible to the bt848. The second
does audio. snd-bt87x is a driver for the second function. It's a sound
-driver which can be used for recording sound (and _only_ recording, no
+driver which can be used for recording sound (and *only* recording, no
playback). As most TV cards come with a short cable which can be plugged
into your sound card's line-in you probably don't need this driver if all
you want to do is just watching TV...
@@ -30,9 +35,9 @@ The driver is now stable. However, it doesn't know about many TV cards,
and it refuses to load for cards it doesn't know.
If the driver complains ("Unknown TV card found, the audio driver will
-not load"), you can specify the load_all=1 option to force the driver to
+not load"), you can specify the ``load_all=1`` option to force the driver to
try to use the audio capture function of your card. If the frequency of
-recorded data is not right, try to specify the digital_rate option with
+recorded data is not right, try to specify the ``digital_rate`` option with
other values than the default 32000 (often it's 44100 or 64000).
If you have an unknown card, please mail the ID and board name to
diff --git a/Documentation/sound/alsa/CMIPCI.txt b/Documentation/sound/cards/cmipci.rst
index 4e36e6e809ca..9ea1de6ec4ce 100644
--- a/Documentation/sound/alsa/CMIPCI.txt
+++ b/Documentation/sound/cards/cmipci.rst
@@ -1,7 +1,8 @@
- Brief Notes on C-Media 8338/8738/8768/8770 Driver
- =================================================
+=================================================
+Brief Notes on C-Media 8338/8738/8768/8770 Driver
+=================================================
- Takashi Iwai <tiwai@suse.de>
+Takashi Iwai <tiwai@suse.de>
Front/Rear Multi-channel Playback
@@ -30,19 +31,20 @@ The rear output can be heard only when "Four Channel Mode" switch is
disabled. Otherwise no signal will be routed to the rear speakers.
As default it's turned on.
-*** WARNING ***
-When "Four Channel Mode" switch is off, the output from rear speakers
-will be FULL VOLUME regardless of Master and PCM volumes.
-This might damage your audio equipment. Please disconnect speakers
-before your turn off this switch.
-*** WARNING ***
+.. WARNING::
+ When "Four Channel Mode" switch is off, the output from rear speakers
+ will be FULL VOLUME regardless of Master and PCM volumes [#]_.
+ This might damage your audio equipment. Please disconnect speakers
+ before your turn off this switch.
-[ Well.. I once got the output with correct volume (i.e. same with the
+
+.. [#]
+ Well.. I once got the output with correct volume (i.e. same with the
front one) and was so excited. It was even with "Four Channel" bit
on and "double DAC" mode. Actually I could hear separate 4 channels
from front and rear speakers! But.. after reboot, all was gone.
It's a very pity that I didn't save the register dump at that
- time.. Maybe there is an unknown register to achieve this... ]
+ time.. Maybe there is an unknown register to achieve this...
If your card has an extra output jack for the rear output, the rear
playback should be routed there as default. If not, there is a
@@ -73,12 +75,14 @@ cannot operate with full-duplex.
The 4.0 and 5.1 modes are defined as the pcm "surround40" and "surround51"
in alsa-lib. For example, you can play a WAV file with 6 channels like
+::
% aplay -Dsurround51 sixchannels.wav
For programming the 4/6 channel playback, you need to specify the PCM
channels as you like and set the format S16LE. For example, for playback
with 4 channels,
+::
snd_pcm_hw_params_set_access(pcm, hw, SND_PCM_ACCESS_RW_INTERLEAVED);
// or mmap if you like
@@ -89,13 +93,15 @@ and use the interleaved 4 channel data.
There are some control switches affecting to the speaker connections:
-"Line-In Mode" - an enum control to change the behavior of line-in
+Line-In Mode
+ an enum control to change the behavior of line-in
jack. Either "Line-In", "Rear Output" or "Bass Output" can
be selected. The last item is available only with model 039
or newer.
When "Rear Output" is chosen, the surround channels 3 and 4
are output to line-in jack.
-"Mic-In Mode" - an enum control to change the behavior of mic-in
+Mic-In Mode
+ an enum control to change the behavior of mic-in
jack. Either "Mic-In" or "Center/LFE Output" can be
selected.
When "Center/LFE Output" is chosen, the center and bass
@@ -111,11 +117,14 @@ The SPDIF playback and capture are done via the third PCM device
(hw:0,2). Usually this is assigned to the PCM device "spdif".
The available rates are 44100 and 48000 Hz.
For playback with aplay, you can run like below:
+::
% aplay -Dhw:0,2 foo.wav
or
+::
+
% aplay -Dspdif foo.wav
24bit format is also supported experimentally.
@@ -140,31 +149,40 @@ off. (Also don't forget to turn on "IEC958 Output Switch", too.)
Additionally there are relevant control switches:
-"IEC958 Mix Analog" - Mix analog PCM playback and FM-OPL/3 streams and
+IEC958 Mix Analog
+ Mix analog PCM playback and FM-OPL/3 streams and
output through SPDIF. This switch appears only on old chip
models (CM8738 033 and 037).
+
Note: without this control you can output PCM to SPDIF.
This is "mixing" of streams, so e.g. it's not for AC3 output
(see the next section).
-"IEC958 In Select" - Select SPDIF input, the internal CD-in (false)
+IEC958 In Select
+ Select SPDIF input, the internal CD-in (false)
and the external input (true).
-"IEC958 Loop" - SPDIF input data is loop back into SPDIF
+IEC958 Loop
+ SPDIF input data is loop back into SPDIF
output (aka bypass)
-"IEC958 Copyright" - Set the copyright bit.
+IEC958 Copyright
+ Set the copyright bit.
-"IEC958 5V" - Select 0.5V (coax) or 5V (optical) interface.
+IEC958 5V
+ Select 0.5V (coax) or 5V (optical) interface.
On some cards this doesn't work and you need to change the
configuration with hardware dip-switch.
-"IEC958 In Monitor" - SPDIF input is routed to DAC.
+IEC958 In Monitor
+ SPDIF input is routed to DAC.
-"IEC958 In Phase Inverse" - Set SPDIF input format as inverse.
+IEC958 In Phase Inverse
+ Set SPDIF input format as inverse.
[FIXME: this doesn't work on all chips..]
-"IEC958 In Valid" - Set input validity flag detection.
+IEC958 In Valid
+ Set input validity flag detection.
Note: When "PCM Playback Switch" is on, you'll hear the digital output
stream through analog line-out.
@@ -217,7 +235,7 @@ to enable MIDI support. Valid I/O ports are 0x300, 0x310, 0x320 and
With CMI8738 and newer chips, the MIDI interface is enabled by default
and the driver automatically chooses a port address.
-There is _no_ hardware wavetable function on this chip (except for
+There is *no* hardware wavetable function on this chip (except for
OPL3 synth below).
What's said as MIDI synth on Windows is a software synthesizer
emulation. On Linux use TiMidity or other softsynth program for
diff --git a/Documentation/sound/alsa/emu10k1-jack.txt b/Documentation/sound/cards/emu10k1-jack.rst
index 751d45036a05..6597f1ea83f0 100644
--- a/Documentation/sound/alsa/emu10k1-jack.txt
+++ b/Documentation/sound/cards/emu10k1-jack.rst
@@ -1,3 +1,7 @@
+=================================================================
+Low latency, multichannel audio with JACK and the emu10k1/emu10k2
+=================================================================
+
This document is a guide to using the emu10k1 based devices with JACK for low
latency, multichannel recording functionality. All of my recent work to allow
Linux users to use the full capabilities of their hardware has been inspired
@@ -7,8 +11,6 @@ power of this hardware.
http://www.kxproject.com
- Lee Revell, 2005.03.30
-Low latency, multichannel audio with JACK and the emu10k1/emu10k2
------------------------------------------------------------------
Until recently, emu10k1 users on Linux did not have access to the same low
latency, multichannel features offered by the "kX ASIO" feature of their
@@ -23,14 +25,15 @@ select the correct device for JACK to use. Actually, for qjackctl users it's
fairly self explanatory - select Duplex, then for capture and playback select
the multichannel devices, set the in and out channels to 16, and the sample
rate to 48000Hz. The command line looks like this:
+::
-/usr/local/bin/jackd -R -dalsa -r48000 -p64 -n2 -D -Chw:0,2 -Phw:0,3 -S
+ /usr/local/bin/jackd -R -dalsa -r48000 -p64 -n2 -D -Chw:0,2 -Phw:0,3 -S
This will give you 16 input ports and 16 output ports.
The 16 output ports map onto the 16 FX buses (or the first 16 of 64, for the
Audigy). The mapping from FX bus to physical output is described in
-SB-Live-mixer.txt (or Audigy-mixer.txt).
+sb-live-mixer.rst (or audigy-mixer.rst).
The 16 input ports are connected to the 16 physical inputs. Contrary to
popular belief, all emu10k1 cards are multichannel cards. Which of these
@@ -49,10 +52,11 @@ This chart, borrowed from kxfxlib/da_asio51.cpp, describes the mapping of JACK
ports to FXBUS2 (multitrack recording input) and EXTOUT (physical output)
channels.
-/*JACK (& ASIO) mappings on 10k1 5.1 SBLive cards:
---------------------------------------------
+JACK (& ASIO) mappings on 10k1 5.1 SBLive cards:
+
+============== ======== ============
JACK Epilog FXBUS2(nr)
---------------------------------------------
+============== ======== ============
capture_1 asio14 FXBUS2(0xe)
capture_2 asio15 FXBUS2(0xf)
capture_3 asio0 FXBUS2(0x0)
@@ -69,6 +73,6 @@ capture_13 asio10 FXBUS2(0xa)
capture_14 asio11 FXBUS2(0xb)
capture_15 asio12 FXBUS2(0xc)
capture_16 asio13 FXBUS2(0xd)
-*/
+============== ======== ============
TODO: describe use of ld10k1/qlo10k1 in conjunction with JACK
diff --git a/Documentation/sound/alsa/hdspm.txt b/Documentation/sound/cards/hdspm.rst
index 7ba31948dea7..5373e51ed076 100644
--- a/Documentation/sound/alsa/hdspm.txt
+++ b/Documentation/sound/cards/hdspm.rst
@@ -1,21 +1,24 @@
+=======================================
Software Interface ALSA-DSP MADI Driver
+=======================================
(translated from German, so no good English ;-),
-2004 - winfried ritsch
-
+2004 - winfried ritsch
- Full functionality has been added to the driver. Since some of
- the Controls and startup-options are ALSA-Standard and only the
- special Controls are described and discussed below.
+Full functionality has been added to the driver. Since some of
+the Controls and startup-options are ALSA-Standard and only the
+special Controls are described and discussed below.
- hardware functionality:
+Hardware functionality
+======================
- Audio transmission:
+Audio transmission
+------------------
- number of channels -- depends on transmission mode
+* number of channels -- depends on transmission mode
The number of channels chosen is from 1..Nmax. The reason to
use for a lower number of channels is only resource allocation,
@@ -23,31 +26,34 @@ Software Interface ALSA-DSP MADI Driver
allocated. So also the throughput of the PCI system can be
scaled. (Only important for low performance boards).
- Single Speed -- 1..64 channels
+* Single Speed -- 1..64 channels
+.. note::
(Note: Choosing the 56channel mode for transmission or as
receiver, only 56 are transmitted/received over the MADI, but
all 64 channels are available for the mixer, so channel count
for the driver)
- Double Speed -- 1..32 channels
+* Double Speed -- 1..32 channels
+.. note::
Note: Choosing the 56-channel mode for
transmission/receive-mode , only 28 are transmitted/received
over the MADI, but all 32 channels are available for the mixer,
so channel count for the driver
- Quad Speed -- 1..16 channels
+* Quad Speed -- 1..16 channels
- Note: Choosing the 56-channel mode for
+.. note::
+ Choosing the 56-channel mode for
transmission/receive-mode , only 14 are transmitted/received
over the MADI, but all 16 channels are available for the mixer,
so channel count for the driver
- Format -- signed 32 Bit Little Endian (SNDRV_PCM_FMTBIT_S32_LE)
+* Format -- signed 32 Bit Little Endian (SNDRV_PCM_FMTBIT_S32_LE)
- Sample Rates --
+* Sample Rates --
Single Speed -- 32000, 44100, 48000
@@ -55,14 +61,13 @@ Software Interface ALSA-DSP MADI Driver
Quad Speed -- 128000, 176400, 192000 (untested)
- access-mode -- MMAP (memory mapped), Not interleaved
- (PCM_NON-INTERLEAVED)
+* access-mode -- MMAP (memory mapped), Not interleaved (PCM_NON-INTERLEAVED)
- buffer-sizes -- 64,128,256,512,1024,2048,8192 Samples
+* buffer-sizes -- 64,128,256,512,1024,2048,8192 Samples
- fragments -- 2
+* fragments -- 2
- Hardware-pointer -- 2 Modi
+* Hardware-pointer -- 2 Modi
The Card supports the readout of the actual Buffer-pointer,
@@ -74,53 +79,54 @@ Software Interface ALSA-DSP MADI Driver
precise-pointer.
+.. hint::
(Hint: Experimenting I found that the pointer is maximum 64 to
large never to small. So if you subtract 64 you always have a
safe pointer for writing, which is used on this mode inside
ALSA. In theory now you can get now a latency as low as 16
Samples, which is a quarter of the interrupt possibilities.)
- Precise Pointer -- off
+ * Precise Pointer -- off
interrupt used for pointer-calculation
-
- Precise Pointer -- on
+
+ * Precise Pointer -- on
hardware pointer used.
- Controller:
-
+Controller
+----------
- Since DSP-MADI-Mixer has 8152 Fader, it does not make sense to
- use the standard mixer-controls, since this would break most of
- (especially graphic) ALSA-Mixer GUIs. So Mixer control has be
- provided by a 2-dimensional controller using the
- hwdep-interface.
+Since DSP-MADI-Mixer has 8152 Fader, it does not make sense to
+use the standard mixer-controls, since this would break most of
+(especially graphic) ALSA-Mixer GUIs. So Mixer control has be
+provided by a 2-dimensional controller using the
+hwdep-interface.
- Also all 128+256 Peak and RMS-Meter can be accessed via the
- hwdep-interface. Since it could be a performance problem always
- copying and converting Peak and RMS-Levels even if you just need
- one, I decided to export the hardware structure, so that of
- needed some driver-guru can implement a memory-mapping of mixer
- or peak-meters over ioctl, or also to do only copying and no
- conversion. A test-application shows the usage of the controller.
-
- Latency Controls --- not implemented !!!
+Also all 128+256 Peak and RMS-Meter can be accessed via the
+hwdep-interface. Since it could be a performance problem always
+copying and converting Peak and RMS-Levels even if you just need
+one, I decided to export the hardware structure, so that of
+needed some driver-guru can implement a memory-mapping of mixer
+or peak-meters over ioctl, or also to do only copying and no
+conversion. A test-application shows the usage of the controller.
+* Latency Controls --- not implemented !!!
+.. note::
Note: Within the windows-driver the latency is accessible of a
control-panel, but buffer-sizes are controlled with ALSA from
hwparams-calls and should not be changed in run-state, I did not
implement it here.
- System Clock -- suspended !!!!
-
- Name -- "System Clock Mode"
+* System Clock -- suspended !!!!
- Access -- Read Write
-
- Values -- "Master" "Slave"
+ * Name -- "System Clock Mode"
+ * Access -- Read Write
+
+ * Values -- "Master" "Slave"
+.. note::
!!!! This is a hardware-function but is in conflict with the
Clock-source controller, which is a kind of ALSA-standard. I
makes sense to set the card to a special mode (master at some
@@ -128,106 +134,107 @@ Software Interface ALSA-DSP MADI Driver
a studio should have working synchronisations setup. So use
Clock-source-controller instead !!!!
- Clock Source
+* Clock Source
- Name -- "Sample Clock Source"
+ * Name -- "Sample Clock Source"
- Access -- Read Write
+ * Access -- Read Write
- Values -- "AutoSync", "Internal 32.0 kHz", "Internal 44.1 kHz",
- "Internal 48.0 kHz", "Internal 64.0 kHz", "Internal 88.2 kHz",
- "Internal 96.0 kHz"
+ * Values -- "AutoSync", "Internal 32.0 kHz", "Internal 44.1 kHz",
+ "Internal 48.0 kHz", "Internal 64.0 kHz", "Internal 88.2 kHz",
+ "Internal 96.0 kHz"
Choose between Master at a specific Frequency and so also the
Speed-mode or Slave (Autosync). Also see "Preferred Sync Ref"
-
+.. warning::
!!!! This is no pure hardware function but was implemented by
ALSA by some ALSA-drivers before, so I use it also. !!!
- Preferred Sync Ref
+* Preferred Sync Ref
- Name -- "Preferred Sync Reference"
+ * Name -- "Preferred Sync Reference"
- Access -- Read Write
+ * Access -- Read Write
- Values -- "Word" "MADI"
+ * Values -- "Word" "MADI"
Within the Auto-sync-Mode the preferred Sync Source can be
chosen. If it is not available another is used if possible.
+.. note::
Note: Since MADI has a much higher bit-rate than word-clock, the
card should synchronise better in MADI Mode. But since the
RME-PLL is very good, there are almost no problems with
word-clock too. I never found a difference.
- TX 64 channel ---
+* TX 64 channel
- Name -- "TX 64 channels mode"
+ * Name -- "TX 64 channels mode"
- Access -- Read Write
+ * Access -- Read Write
- Values -- 0 1
+ * Values -- 0 1
Using 64-channel-modus (1) or 56-channel-modus for
MADI-transmission (0).
+.. note::
Note: This control is for output only. Input-mode is detected
automatically from hardware sending MADI.
- Clear TMS ---
+* Clear TMS
- Name -- "Clear Track Marker"
+ * Name -- "Clear Track Marker"
- Access -- Read Write
+ * Access -- Read Write
- Values -- 0 1
+ * Values -- 0 1
Don't use to lower 5 Audio-bits on AES as additional Bits.
- Safe Mode oder Auto Input ---
+* Safe Mode oder Auto Input
- Name -- "Safe Mode"
+ * Name -- "Safe Mode"
- Access -- Read Write
+ * Access -- Read Write
- Values -- 0 1
-
- (default on)
+ * Values -- 0 1 (default on)
If on (1), then if either the optical or coaxial connection
has a failure, there is a takeover to the working one, with no
sample failure. Its only useful if you use the second as a
backup connection.
- Input ---
+* Input
- Name -- "Input Select"
+ * Name -- "Input Select"
- Access -- Read Write
+ * Access -- Read Write
- Values -- optical coaxial
+ * Values -- optical coaxial
Choosing the Input, optical or coaxial. If Safe-mode is active,
this is the preferred Input.
--------------- Mixer ----------------------
+Mixer
+-----
- Mixer
+* Mixer
- Name -- "Mixer"
+ * Name -- "Mixer"
- Access -- Read Write
+ * Access -- Read Write
- Values - <channel-number 0-127> <Value 0-65535>
+ * Values - <channel-number 0-127> <Value 0-65535>
Here as a first value the channel-index is taken to get/set the
@@ -235,40 +242,41 @@ Software Interface ALSA-DSP MADI Driver
fader and 64-127 the playback to outputs fader. Value 0
is channel muted 0 and 32768 an amplification of 1.
- Chn 1-64
+* Chn 1-64
fast mixer for the ALSA-mixer utils. The diagonal of the
mixer-matrix is implemented from playback to output.
- Line Out
+* Line Out
- Name -- "Line Out"
+ * Name -- "Line Out"
- Access -- Read Write
+ * Access -- Read Write
- Values -- 0 1
+ * Values -- 0 1
Switching on and off the analog out, which has nothing to do
with mixing or routing. the analog outs reflects channel 63,64.
---- information (only read access):
+Information (only read access)
+------------------------------
- Sample Rate
+* Sample Rate
- Name -- "System Sample Rate"
+ * Name -- "System Sample Rate"
- Access -- Read-only
+ * Access -- Read-only
getting the sample rate.
- External Rate measured
+* External Rate measured
- Name -- "External Rate"
+ * Name -- "External Rate"
- Access -- Read only
+ * Access -- Read only
Should be "Autosync Rate", but Name used is
@@ -276,79 +284,86 @@ Software Interface ALSA-DSP MADI Driver
reported.
- MADI Sync Status
+* MADI Sync Status
- Name -- "MADI Sync Lock Status"
+ * Name -- "MADI Sync Lock Status"
- Access -- Read
+ * Access -- Read
- Values -- 0,1,2
+ * Values -- 0,1,2
MADI-Input is 0=Unlocked, 1=Locked, or 2=Synced.
- Word Clock Sync Status
+* Word Clock Sync Status
- Name -- "Word Clock Lock Status"
+ * Name -- "Word Clock Lock Status"
- Access -- Read
+ * Access -- Read
- Values -- 0,1,2
+ * Values -- 0,1,2
Word Clock Input is 0=Unlocked, 1=Locked, or 2=Synced.
- AutoSync
+* AutoSync
- Name -- "AutoSync Reference"
+ * Name -- "AutoSync Reference"
- Access -- Read
+ * Access -- Read
- Values -- "WordClock", "MADI", "None"
+ * Values -- "WordClock", "MADI", "None"
Sync-Reference is either "WordClock", "MADI" or none.
- RX 64ch --- noch nicht implementiert
+* RX 64ch --- noch nicht implementiert
MADI-Receiver is in 64 channel mode oder 56 channel mode.
- AB_inp --- not tested
+* AB_inp --- not tested
Used input for Auto-Input.
- actual Buffer Position --- not implemented
+* actual Buffer Position --- not implemented
!!! this is a ALSA internal function, so no control is used !!!
-Calling Parameter:
+Calling Parameter
+=================
+
+* index int array (min = 1, max = 8)
- index int array (min = 1, max = 8),
- "Index value for RME HDSPM interface." card-index within ALSA
+ Index value for RME HDSPM interface. card-index within ALSA
note: ALSA-standard
- id string array (min = 1, max = 8),
- "ID string for RME HDSPM interface."
+* id string array (min = 1, max = 8)
+
+ ID string for RME HDSPM interface.
note: ALSA-standard
- enable int array (min = 1, max = 8),
- "Enable/disable specific HDSPM sound-cards."
+* enable int array (min = 1, max = 8)
+
+ Enable/disable specific HDSPM sound-cards.
note: ALSA-standard
- precise_ptr int array (min = 1, max = 8),
- "Enable precise pointer, or disable."
+* precise_ptr int array (min = 1, max = 8)
+ Enable precise pointer, or disable.
+
+.. note::
note: Use only when the application supports this (which is a special case).
- line_outs_monitor int array (min = 1, max = 8),
- "Send playback streams to analog outs by default."
+* line_outs_monitor int array (min = 1, max = 8)
+ Send playback streams to analog outs by default.
+.. note::
note: each playback channel is mixed to the same numbered output
channel (routed). This is against the ALSA-convention, where all
channels have to be muted on after loading the driver, but was
@@ -356,7 +371,9 @@ Calling Parameter:
- enable_monitor int array (min = 1, max = 8),
- "Enable Analog Out on Channel 63/64 by default."
+* enable_monitor int array (min = 1, max = 8)
+
+ Enable Analog Out on Channel 63/64 by default.
+.. note ::
note: here the analog output is enabled (but not routed).
diff --git a/Documentation/sound/alsa/img,spdif-in.txt b/Documentation/sound/cards/img-spdif-in.rst
index 8b7505785fa6..7df9f5ae2609 100644
--- a/Documentation/sound/alsa/img,spdif-in.txt
+++ b/Documentation/sound/cards/img-spdif-in.rst
@@ -1,21 +1,25 @@
+================================================
+Imagination Technologies SPDIF Input Controllers
+================================================
+
The Imagination Technologies SPDIF Input controller contains the following
controls:
-name='IEC958 Capture Mask',index=0
+* name='IEC958 Capture Mask',index=0
This control returns a mask that shows which of the IEC958 status bits
can be read using the 'IEC958 Capture Default' control.
-name='IEC958 Capture Default',index=0
+* name='IEC958 Capture Default',index=0
This control returns the status bits contained within the SPDIF stream that
is being received. The 'IEC958 Capture Mask' shows which bits can be read
from this control.
-name='SPDIF In Multi Frequency Acquire',index=0
-name='SPDIF In Multi Frequency Acquire',index=1
-name='SPDIF In Multi Frequency Acquire',index=2
-name='SPDIF In Multi Frequency Acquire',index=3
+* name='SPDIF In Multi Frequency Acquire',index=0
+* name='SPDIF In Multi Frequency Acquire',index=1
+* name='SPDIF In Multi Frequency Acquire',index=2
+* name='SPDIF In Multi Frequency Acquire',index=3
This control is used to attempt acquisition of up to four different sample
rates. The active rate can be obtained by reading the 'SPDIF In Lock Frequency'
@@ -29,21 +33,21 @@ four sample rates set here.
If less than four rates are required, the same rate can be specified more than
once
-name='SPDIF In Lock Frequency',index=0
+* name='SPDIF In Lock Frequency',index=0
This control returns the active capture rate, or 0 if a lock has not been
acquired
-name='SPDIF In Lock TRK',index=0
+* name='SPDIF In Lock TRK',index=0
This control is used to modify the locking/jitter rejection characteristics
of the block. Larger values increase the locking range, but reduce jitter
rejection.
-name='SPDIF In Lock Acquire Threshold',index=0
+* name='SPDIF In Lock Acquire Threshold',index=0
This control is used to change the threshold at which a lock is acquired.
-name='SPDIF In Lock Release Threshold',index=0
+* name='SPDIF In Lock Release Threshold',index=0
This control is used to change the threshold at which a lock is released.
diff --git a/Documentation/sound/cards/index.rst b/Documentation/sound/cards/index.rst
new file mode 100644
index 000000000000..c016f8c3b88b
--- /dev/null
+++ b/Documentation/sound/cards/index.rst
@@ -0,0 +1,19 @@
+Card-Specific Information
+=========================
+
+.. toctree::
+ :maxdepth: 2
+
+ joystick
+ cmipci
+ sb-live-mixer
+ audigy-mixer
+ emu10k1-jack
+ via82xx-mixer
+ audiophile-usb
+ mixart
+ bt87x
+ maya44
+ hdspm
+ serial-u16550
+ img-spdif-in
diff --git a/Documentation/sound/alsa/Joystick.txt b/Documentation/sound/cards/joystick.rst
index ccda41b10f8a..a6e468c81d02 100644
--- a/Documentation/sound/alsa/Joystick.txt
+++ b/Documentation/sound/cards/joystick.rst
@@ -1,7 +1,10 @@
+=======================================
Analog Joystick Support on ALSA Drivers
=======================================
- Oct. 14, 2003
- Takashi Iwai <tiwai@suse.de>
+
+Oct. 14, 2003
+
+Takashi Iwai <tiwai@suse.de>
General
-------
@@ -34,44 +37,46 @@ stability and the resource management.
The following PCI drivers support the joystick natively.
- Driver Module Option Available Values
- ---------------------------------------------------------------------------
- als4000 joystick_port 0 = disable (default), 1 = auto-detect,
- manual: any address (e.g. 0x200)
- au88x0 N/A N/A
- azf3328 joystick 0 = disable, 1 = enable, -1 = auto (default)
- ens1370 joystick 0 = disable (default), 1 = enable
- ens1371 joystick_port 0 = disable (default), 1 = auto-detect,
- manual: 0x200, 0x208, 0x210, 0x218
- cmipci joystick_port 0 = disable (default), 1 = auto-detect,
- manual: any address (e.g. 0x200)
- cs4281 N/A N/A
- cs46xx N/A N/A
- es1938 N/A N/A
- es1968 joystick 0 = disable (default), 1 = enable
- sonicvibes N/A N/A
- trident N/A N/A
- via82xx(*1) joystick 0 = disable (default), 1 = enable
- ymfpci joystick_port 0 = disable (default), 1 = auto-detect,
- manual: 0x201, 0x202, 0x204, 0x205(*2)
- ---------------------------------------------------------------------------
-
- *1) VIA686A/B only
- *2) With YMF744/754 chips, the port address can be chosen arbitrarily
+============== ============= ============================================
+Driver Module Option Available Values
+============== ============= ============================================
+als4000 joystick_port 0 = disable (default), 1 = auto-detect,
+ manual: any address (e.g. 0x200)
+au88x0 N/A N/A
+azf3328 joystick 0 = disable, 1 = enable, -1 = auto (default)
+ens1370 joystick 0 = disable (default), 1 = enable
+ens1371 joystick_port 0 = disable (default), 1 = auto-detect,
+ manual: 0x200, 0x208, 0x210, 0x218
+cmipci joystick_port 0 = disable (default), 1 = auto-detect,
+ manual: any address (e.g. 0x200)
+cs4281 N/A N/A
+cs46xx N/A N/A
+es1938 N/A N/A
+es1968 joystick 0 = disable (default), 1 = enable
+sonicvibes N/A N/A
+trident N/A N/A
+via82xx [#f1]_ joystick 0 = disable (default), 1 = enable
+ymfpci joystick_port 0 = disable (default), 1 = auto-detect,
+ manual: 0x201, 0x202, 0x204, 0x205 [#f2]_
+============== ============= ============================================
+
+.. [#f1] VIA686A/B only
+.. [#f2] With YMF744/754 chips, the port address can be chosen arbitrarily
The following drivers don't support gameport natively, but there are
additional modules. Load the corresponding module to add the gameport
support.
- Driver Additional Module
- -----------------------------
- emu10k1 emu10k1-gp
- fm801 fm801-gp
- -----------------------------
+======= =================
+Driver Additional Module
+======= =================
+emu10k1 emu10k1-gp
+fm801 fm801-gp
+======= =================
Note: the "pcigame" and "cs461x" modules are for the OSS drivers only.
- These ALSA drivers (cs46xx, trident and au88x0) have the
- built-in gameport support.
+These ALSA drivers (cs46xx, trident and au88x0) have the
+built-in gameport support.
As mentioned above, ALSA PCI drivers have the built-in gameport
support, so you don't have to load ns558 module. Just load "joydev"
diff --git a/Documentation/sound/alsa/README.maya44 b/Documentation/sound/cards/maya44.rst
index 67b2ea1cc31d..bf09a584b443 100644
--- a/Documentation/sound/alsa/README.maya44
+++ b/Documentation/sound/cards/maya44.rst
@@ -1,10 +1,18 @@
-NOTE: The following is the original document of Rainer's patch that the
-current maya44 code based on. Some contents might be obsoleted, but I
-keep here as reference -- tiwai
+=================================
+Notes on Maya44 USB Audio Support
+=================================
-----------------------------------------------------------------
+.. note::
+ The following is the original document of Rainer's patch that the
+ current maya44 code based on. Some contents might be obsoleted, but I
+ keep here as reference -- tiwai
+
+Feb 14, 2008
+
+Rainer Zimmermann <mail@lightshed.de>
-STATE OF DEVELOPMENT:
+STATE OF DEVELOPMENT
+====================
This driver is being developed on the initiative of Piotr Makowski (oponek@gmail.com) and financed by Lars Bergmann.
Development is carried out by Rainer Zimmermann (mail@lightshed.de).
@@ -44,16 +52,17 @@ Things that do not seem to work:
- Ardour 2.1 seems to work only via JACK, not using ALSA directly or via OSS. This still needs to be tracked down.
-DRIVER DETAILS:
+DRIVER DETAILS
+==============
the following files were added:
-pci/ice1724/maya44.c - Maya44 specific code
-pci/ice1724/maya44.h
-pci/ice1724/ice1724.patch
-pci/ice1724/ice1724.h.patch - PROPOSED patch to ice1724.h (see SAMPLING RATES)
-i2c/other/wm8776.c - low-level access routines for Wolfson WM8776 codecs
-include/wm8776.h
+* pci/ice1724/maya44.c - Maya44 specific code
+* pci/ice1724/maya44.h
+* pci/ice1724/ice1724.patch
+* pci/ice1724/ice1724.h.patch - PROPOSED patch to ice1724.h (see SAMPLING RATES)
+* i2c/other/wm8776.c - low-level access routines for Wolfson WM8776 codecs
+* include/wm8776.h
Note that the wm8776.c code is meant to be card-independent and does not actually register the codec with the ALSA infrastructure.
@@ -62,25 +71,26 @@ This is done in maya44.c, mainly because some of the WM8776 controls are used in
the following files were created in pci/ice1724, simply #including the corresponding file from the alsa-kernel tree:
-wtm.h
-vt1720_mobo.h
-revo.h
-prodigy192.h
-pontis.h
-phase.h
-maya44.h
-juli.h
-aureon.h
-amp.h
-envy24ht.h
-se.h
-prodigy_hifi.h
+* wtm.h
+* vt1720_mobo.h
+* revo.h
+* prodigy192.h
+* pontis.h
+* phase.h
+* maya44.h
+* juli.h
+* aureon.h
+* amp.h
+* envy24ht.h
+* se.h
+* prodigy_hifi.h
*I hope this is the correct way to do things.*
-SAMPLING RATES:
+SAMPLING RATES
+==============
The Maya44 card (or more exactly, the Wolfson WM8776 codecs) allow a maximum sampling rate of 192 kHz for playback and 92 kHz for capture.
@@ -98,66 +108,79 @@ I propose some additional code for limiting the sampling rate when setting on a
The proposed code (currently deactivated) is in ice1712.h.patch, ice1724.c and maya44.c (in pci/ice1712).
-SOUND DEVICES:
+SOUND DEVICES
+=============
PCM devices correspond to inputs/outputs as follows (assuming Maya44 is card #0):
-hw:0,0 input - stereo, analog input 1+2
-hw:0,0 output - stereo, analog output 1+2
-hw:0,1 input - stereo, analog input 3+4 OR S/PDIF input
-hw:0,1 output - stereo, analog output 3+4 (and SPDIF out)
+* hw:0,0 input - stereo, analog input 1+2
+* hw:0,0 output - stereo, analog output 1+2
+* hw:0,1 input - stereo, analog input 3+4 OR S/PDIF input
+* hw:0,1 output - stereo, analog output 3+4 (and SPDIF out)
-NAMING OF MIXER CONTROLS:
+NAMING OF MIXER CONTROLS
+========================
(for more information about the signal flow, please refer to the block diagram on p.24 of the ESI Maya44 manual, or in the ESI windows software).
-PCM: (digital) output level for channel 1+2
-PCM 1: same for channel 3+4
+PCM
+ (digital) output level for channel 1+2
+PCM 1
+ same for channel 3+4
+
+Mic Phantom+48V
+ switch for +48V phantom power for electrostatic microphones on input 1/2.
-Mic Phantom+48V: switch for +48V phantom power for electrostatic microphones on input 1/2.
Make sure this is not turned on while any other source is connected to input 1/2.
It might damage the source and/or the maya44 card.
-Mic/Line input: if switch is on, input jack 1/2 is microphone input (mono), otherwise line input (stereo).
+Mic/Line input
+ if switch is on, input jack 1/2 is microphone input (mono), otherwise line input (stereo).
+
+Bypass
+ analogue bypass from ADC input to output for channel 1+2. Same as "Monitor" in the windows driver.
+Bypass 1
+ same for channel 3+4.
-Bypass: analogue bypass from ADC input to output for channel 1+2. Same as "Monitor" in the windows driver.
-Bypass 1: same for channel 3+4.
+Crossmix
+ cross-mixer from channels 1+2 to channels 3+4
+Crossmix 1
+ cross-mixer from channels 3+4 to channels 1+2
-Crossmix: cross-mixer from channels 1+2 to channels 3+4
-Crossmix 1: cross-mixer from channels 3+4 to channels 1+2
+IEC958 Output
+ switch for S/PDIF output.
-IEC958 Output: switch for S/PDIF output.
This is not supported by the ESI windows driver.
S/PDIF should output the same signal as channel 3+4. [untested!]
-Digitial output selectors:
-
+Digitial output selectors
These switches allow a direct digital routing from the ADCs to the DACs.
Each switch determines where the digital input data to one of the DACs comes from.
They are not supported by the ESI windows driver.
For normal operation, they should all be set to "PCM out".
-H/W: Output source channel 1
-H/W 1: Output source channel 2
-H/W 2: Output source channel 3
-H/W 3: Output source channel 4
+H/W
+ Output source channel 1
+H/W 1
+ Output source channel 2
+H/W 2
+ Output source channel 3
+H/W 3
+ Output source channel 4
+
+H/W 4 ... H/W 9
+ unknown function, left in to enable testing.
-H/W 4 ... H/W 9: unknown function, left in to enable testing.
Possibly some of these control S/PDIF output(s).
If these turn out to be unused, they will go away in later driver versions.
Selectable values for each of the digital output selectors are:
- "PCM out" -> DAC output of the corresponding channel (default setting)
- "Input 1"...
- "Input 4" -> direct routing from ADC output of the selected input channel
-
---------
-
-Feb 14, 2008
-Rainer Zimmermann
-mail@lightshed.de
+PCM out
+ DAC output of the corresponding channel (default setting)
+Input 1 ... Input 4
+ direct routing from ADC output of the selected input channel
diff --git a/Documentation/sound/alsa/MIXART.txt b/Documentation/sound/cards/mixart.rst
index 4ee35b4fbe4a..48aba98b088f 100644
--- a/Documentation/sound/alsa/MIXART.txt
+++ b/Documentation/sound/cards/mixart.rst
@@ -1,5 +1,8 @@
- Alsa driver for Digigram miXart8 and miXart8AES/EBU soundcards
- Digigram <alsa@digigram.com>
+==============================================================
+Alsa driver for Digigram miXart8 and miXart8AES/EBU soundcards
+==============================================================
+
+Digigram <alsa@digigram.com>
GENERAL
@@ -48,11 +51,15 @@ formats are supported.
Mixer
-----
-<Master> and <Master Capture> : analog volume control of playback and capture PCM.
-<PCM 0-3> and <PCM Capture> : digital volume control of each analog substream.
-<AES 0-3> and <AES Capture> : digital volume control of each AES/EBU substream.
-<Monitoring> : Loopback from 'pcm0c' to 'pcm0p' with digital volume
-and mute control.
+<Master> and <Master Capture>
+ analog volume control of playback and capture PCM.
+<PCM 0-3> and <PCM Capture>
+ digital volume control of each analog substream.
+<AES 0-3> and <AES Capture>
+ digital volume control of each AES/EBU substream.
+<Monitoring>
+ Loopback from 'pcm0c' to 'pcm0p' with digital volume
+ and mute control.
Rem : for best audio quality try to keep a 0 attenuation on the PCM
and AES volume controls which is set by 219 in the range from 0 to 255
@@ -79,11 +86,14 @@ FIRMWARE
For loading the firmware automatically after the module is loaded, use a
install command. For example, add the following entry to
/etc/modprobe.d/mixart.conf for miXart driver:
+::
install snd-mixart /sbin/modprobe --first-time -i snd-mixart && \
/usr/bin/mixartloader
+
+
(for 2.2/2.4 kernels, add "post-install snd-mixart /usr/bin/vxloader" to
- /etc/modules.conf, instead.)
+/etc/modules.conf, instead.)
The firmware binaries are installed on /usr/share/alsa/firmware
(or /usr/local/share/alsa/firmware, depending to the prefix option of
diff --git a/Documentation/sound/alsa/SB-Live-mixer.txt b/Documentation/sound/cards/sb-live-mixer.rst
index f4b5988f450c..bcb62fc99bbb 100644
--- a/Documentation/sound/alsa/SB-Live-mixer.txt
+++ b/Documentation/sound/cards/sb-live-mixer.rst
@@ -1,6 +1,6 @@
-
- Sound Blaster Live mixer / default DSP code
- ===========================================
+===========================================
+Sound Blaster Live mixer / default DSP code
+===========================================
The EMU10K1 chips have a DSP part which can be programmed to support
@@ -12,8 +12,8 @@ The ALSA driver programs this portion of chip by default code
(can be altered later) which offers the following functionality:
-1) IEC958 (S/PDIF) raw PCM
---------------------------
+IEC958 (S/PDIF) raw PCM
+=======================
This PCM device (it's the 4th PCM device (index 3!) and first subdevice
(index 0) for a given card) allows to forward 48kHz, stereo, 16-bit
@@ -27,8 +27,8 @@ at the time.
Look to tram_poke routines in lowlevel/emu10k1/emufx.c for more details.
-2) Digital mixer controls
--------------------------
+Digital mixer controls
+======================
These controls are built using the DSP instructions. They offer extended
functionality. Only the default build-in code in the ALSA driver is described
@@ -40,317 +40,334 @@ is mentioned in multiple controls, the signal is accumulated and can be wrapped
Explanation of used abbreviations:
-DAC - digital to analog converter
-ADC - analog to digital converter
-I2S - one-way three wire serial bus for digital sound by Philips Semiconductors
- (this standard is used for connecting standalone DAC and ADC converters)
-LFE - low frequency effects (subwoofer signal)
-AC97 - a chip containing an analog mixer, DAC and ADC converters
-IEC958 - S/PDIF
-FX-bus - the EMU10K1 chip has an effect bus containing 16 accumulators.
- Each of the synthesizer voices can feed its output to these accumulators
- and the DSP microcontroller can operate with the resulting sum.
-
-
-name='Wave Playback Volume',index=0
-
+DAC
+ digital to analog converter
+ADC
+ analog to digital converter
+I2S
+ one-way three wire serial bus for digital sound by Philips Semiconductors
+ (this standard is used for connecting standalone DAC and ADC converters)
+LFE
+ low frequency effects (subwoofer signal)
+AC97
+ a chip containing an analog mixer, DAC and ADC converters
+IEC958
+ S/PDIF
+FX-bus
+ the EMU10K1 chip has an effect bus containing 16 accumulators.
+ Each of the synthesizer voices can feed its output to these accumulators
+ and the DSP microcontroller can operate with the resulting sum.
+
+
+``name='Wave Playback Volume',index=0``
+---------------------------------------
This control is used to attenuate samples for left and right PCM FX-bus
accumulators. ALSA uses accumulators 0 and 1 for left and right PCM samples.
The result samples are forwarded to the front DAC PCM slots of the AC97 codec.
-name='Wave Surround Playback Volume',index=0
-
+``name='Wave Surround Playback Volume',index=0``
+------------------------------------------------
This control is used to attenuate samples for left and right PCM FX-bus
accumulators. ALSA uses accumulators 0 and 1 for left and right PCM samples.
The result samples are forwarded to the rear I2S DACs. These DACs operates
separately (they are not inside the AC97 codec).
-name='Wave Center Playback Volume',index=0
-
+``name='Wave Center Playback Volume',index=0``
+----------------------------------------------
This control is used to attenuate samples for left and right PCM FX-bus
accumulators. ALSA uses accumulators 0 and 1 for left and right PCM samples.
The result is mixed to mono signal (single channel) and forwarded to
the ??rear?? right DAC PCM slot of the AC97 codec.
-name='Wave LFE Playback Volume',index=0
-
+``name='Wave LFE Playback Volume',index=0``
+-------------------------------------------
This control is used to attenuate samples for left and right PCM FX-bus
accumulators. ALSA uses accumulators 0 and 1 for left and right PCM.
The result is mixed to mono signal (single channel) and forwarded to
the ??rear?? left DAC PCM slot of the AC97 codec.
-name='Wave Capture Volume',index=0
-name='Wave Capture Switch',index=0
-
+``name='Wave Capture Volume',index=0``, ``name='Wave Capture Switch',index=0``
+------------------------------------------------------------------------------
These controls are used to attenuate samples for left and right PCM FX-bus
accumulator. ALSA uses accumulators 0 and 1 for left and right PCM.
The result is forwarded to the ADC capture FIFO (thus to the standard capture
PCM device).
-name='Synth Playback Volume',index=0
-
+``name='Synth Playback Volume',index=0``
+----------------------------------------
This control is used to attenuate samples for left and right MIDI FX-bus
accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples.
The result samples are forwarded to the front DAC PCM slots of the AC97 codec.
-name='Synth Capture Volume',index=0
-name='Synth Capture Switch',index=0
-
+``name='Synth Capture Volume',index=0``, ``name='Synth Capture Switch',index=0``
+--------------------------------------------------------------------------------
These controls are used to attenuate samples for left and right MIDI FX-bus
accumulator. ALSA uses accumulators 4 and 5 for left and right PCM.
The result is forwarded to the ADC capture FIFO (thus to the standard capture
PCM device).
-name='Surround Playback Volume',index=0
-
+``name='Surround Playback Volume',index=0``
+-------------------------------------------
This control is used to attenuate samples for left and right rear PCM FX-bus
accumulators. ALSA uses accumulators 2 and 3 for left and right rear PCM samples.
The result samples are forwarded to the rear I2S DACs. These DACs operate
separately (they are not inside the AC97 codec).
-name='Surround Capture Volume',index=0
-name='Surround Capture Switch',index=0
-
+``name='Surround Capture Volume',index=0``, ``name='Surround Capture Switch',index=0``
+--------------------------------------------------------------------------------------
These controls are used to attenuate samples for left and right rear PCM FX-bus
accumulators. ALSA uses accumulators 2 and 3 for left and right rear PCM samples.
The result is forwarded to the ADC capture FIFO (thus to the standard capture
PCM device).
-name='Center Playback Volume',index=0
-
+``name='Center Playback Volume',index=0``
+-----------------------------------------
This control is used to attenuate sample for center PCM FX-bus accumulator.
ALSA uses accumulator 6 for center PCM sample. The result sample is forwarded
to the ??rear?? right DAC PCM slot of the AC97 codec.
-name='LFE Playback Volume',index=0
-
+``name='LFE Playback Volume',index=0``
+--------------------------------------
This control is used to attenuate sample for center PCM FX-bus accumulator.
ALSA uses accumulator 6 for center PCM sample. The result sample is forwarded
to the ??rear?? left DAC PCM slot of the AC97 codec.
-name='AC97 Playback Volume',index=0
-
+``name='AC97 Playback Volume',index=0``
+---------------------------------------
This control is used to attenuate samples for left and right front ADC PCM slots
of the AC97 codec. The result samples are forwarded to the front DAC PCM
slots of the AC97 codec.
-********************************************************************************
-*** Note: This control should be zero for the standard operations, otherwise ***
-*** a digital loopback is activated. ***
-********************************************************************************
-name='AC97 Capture Volume',index=0
+.. note::
+ This control should be zero for the standard operations, otherwise
+ a digital loopback is activated.
+
+``name='AC97 Capture Volume',index=0``
+--------------------------------------
This control is used to attenuate samples for left and right front ADC PCM slots
of the AC97 codec. The result is forwarded to the ADC capture FIFO (thus to
the standard capture PCM device).
-********************************************************************************
-*** Note: This control should be 100 (maximal value), otherwise no analog ***
-*** inputs of the AC97 codec can be captured (recorded). ***
-********************************************************************************
-name='IEC958 TTL Playback Volume',index=0
+.. note::
+ This control should be 100 (maximal value), otherwise no analog
+ inputs of the AC97 codec can be captured (recorded).
+``name='IEC958 TTL Playback Volume',index=0``
+---------------------------------------------
This control is used to attenuate samples from left and right IEC958 TTL
digital inputs (usually used by a CDROM drive). The result samples are
forwarded to the front DAC PCM slots of the AC97 codec.
-name='IEC958 TTL Capture Volume',index=0
-
+``name='IEC958 TTL Capture Volume',index=0``
+--------------------------------------------
This control is used to attenuate samples from left and right IEC958 TTL
digital inputs (usually used by a CDROM drive). The result samples are
forwarded to the ADC capture FIFO (thus to the standard capture PCM device).
-name='Zoom Video Playback Volume',index=0
-
+``name='Zoom Video Playback Volume',index=0``
+---------------------------------------------
This control is used to attenuate samples from left and right zoom video
digital inputs (usually used by a CDROM drive). The result samples are
forwarded to the front DAC PCM slots of the AC97 codec.
-name='Zoom Video Capture Volume',index=0
-
+``name='Zoom Video Capture Volume',index=0``
+--------------------------------------------
This control is used to attenuate samples from left and right zoom video
digital inputs (usually used by a CDROM drive). The result samples are
forwarded to the ADC capture FIFO (thus to the standard capture PCM device).
-name='IEC958 LiveDrive Playback Volume',index=0
-
+``name='IEC958 LiveDrive Playback Volume',index=0``
+---------------------------------------------------
This control is used to attenuate samples from left and right IEC958 optical
digital input. The result samples are forwarded to the front DAC PCM slots
of the AC97 codec.
-name='IEC958 LiveDrive Capture Volume',index=0
-
+``name='IEC958 LiveDrive Capture Volume',index=0``
+--------------------------------------------------
This control is used to attenuate samples from left and right IEC958 optical
digital inputs. The result samples are forwarded to the ADC capture FIFO
(thus to the standard capture PCM device).
-name='IEC958 Coaxial Playback Volume',index=0
-
+``name='IEC958 Coaxial Playback Volume',index=0``
+-------------------------------------------------
This control is used to attenuate samples from left and right IEC958 coaxial
digital inputs. The result samples are forwarded to the front DAC PCM slots
of the AC97 codec.
-name='IEC958 Coaxial Capture Volume',index=0
-
+``name='IEC958 Coaxial Capture Volume',index=0``
+------------------------------------------------
This control is used to attenuate samples from left and right IEC958 coaxial
digital inputs. The result samples are forwarded to the ADC capture FIFO
(thus to the standard capture PCM device).
-name='Line LiveDrive Playback Volume',index=0
-name='Line LiveDrive Playback Volume',index=1
-
+``name='Line LiveDrive Playback Volume',index=0``, ``name='Line LiveDrive Playback Volume',index=1``
+----------------------------------------------------------------------------------------------------
This control is used to attenuate samples from left and right I2S ADC
inputs (on the LiveDrive). The result samples are forwarded to the front
DAC PCM slots of the AC97 codec.
-name='Line LiveDrive Capture Volume',index=1
-name='Line LiveDrive Capture Volume',index=1
-
+``name='Line LiveDrive Capture Volume',index=1``, ``name='Line LiveDrive Capture Volume',index=1``
+--------------------------------------------------------------------------------------------------
This control is used to attenuate samples from left and right I2S ADC
inputs (on the LiveDrive). The result samples are forwarded to the ADC
capture FIFO (thus to the standard capture PCM device).
-name='Tone Control - Switch',index=0
-
+``name='Tone Control - Switch',index=0``
+----------------------------------------
This control turns the tone control on or off. The samples for front, rear
and center / LFE outputs are affected.
-name='Tone Control - Bass',index=0
-
+``name='Tone Control - Bass',index=0``
+--------------------------------------
This control sets the bass intensity. There is no neutral value!!
When the tone control code is activated, the samples are always modified.
The closest value to pure signal is 20.
-name='Tone Control - Treble',index=0
-
+``name='Tone Control - Treble',index=0``
+----------------------------------------
This control sets the treble intensity. There is no neutral value!!
When the tone control code is activated, the samples are always modified.
The closest value to pure signal is 20.
-name='IEC958 Optical Raw Playback Switch',index=0
-
+``name='IEC958 Optical Raw Playback Switch',index=0``
+-----------------------------------------------------
If this switch is on, then the samples for the IEC958 (S/PDIF) digital
output are taken only from the raw FX8010 PCM, otherwise standard front
PCM samples are taken.
-name='Headphone Playback Volume',index=1
-
+``name='Headphone Playback Volume',index=1``
+--------------------------------------------
This control attenuates the samples for the headphone output.
-name='Headphone Center Playback Switch',index=1
-
+``name='Headphone Center Playback Switch',index=1``
+---------------------------------------------------
If this switch is on, then the sample for the center PCM is put to the
left headphone output (useful for SB Live cards without separate center/LFE
output).
-name='Headphone LFE Playback Switch',index=1
-
+``name='Headphone LFE Playback Switch',index=1``
+------------------------------------------------
If this switch is on, then the sample for the center PCM is put to the
right headphone output (useful for SB Live cards without separate center/LFE
output).
-3) PCM stream related controls
-------------------------------
-
-name='EMU10K1 PCM Volume',index 0-31
+PCM stream related controls
+===========================
+``name='EMU10K1 PCM Volume',index 0-31``
+----------------------------------------
Channel volume attenuation in range 0-0xffff. The maximum value (no
attenuation) is default. The channel mapping for three values is
as follows:
- 0 - mono, default 0xffff (no attenuation)
- 1 - left, default 0xffff (no attenuation)
- 2 - right, default 0xffff (no attenuation)
-
-name='EMU10K1 PCM Send Routing',index 0-31
+* 0 - mono, default 0xffff (no attenuation)
+* 1 - left, default 0xffff (no attenuation)
+* 2 - right, default 0xffff (no attenuation)
+``name='EMU10K1 PCM Send Routing',index 0-31``
+----------------------------------------------
This control specifies the destination - FX-bus accumulators. There are
twelve values with this mapping:
- 0 - mono, A destination (FX-bus 0-15), default 0
- 1 - mono, B destination (FX-bus 0-15), default 1
- 2 - mono, C destination (FX-bus 0-15), default 2
- 3 - mono, D destination (FX-bus 0-15), default 3
- 4 - left, A destination (FX-bus 0-15), default 0
- 5 - left, B destination (FX-bus 0-15), default 1
- 6 - left, C destination (FX-bus 0-15), default 2
- 7 - left, D destination (FX-bus 0-15), default 3
- 8 - right, A destination (FX-bus 0-15), default 0
- 9 - right, B destination (FX-bus 0-15), default 1
- 10 - right, C destination (FX-bus 0-15), default 2
- 11 - right, D destination (FX-bus 0-15), default 3
+* 0 - mono, A destination (FX-bus 0-15), default 0
+* 1 - mono, B destination (FX-bus 0-15), default 1
+* 2 - mono, C destination (FX-bus 0-15), default 2
+* 3 - mono, D destination (FX-bus 0-15), default 3
+* 4 - left, A destination (FX-bus 0-15), default 0
+* 5 - left, B destination (FX-bus 0-15), default 1
+* 6 - left, C destination (FX-bus 0-15), default 2
+* 7 - left, D destination (FX-bus 0-15), default 3
+* 8 - right, A destination (FX-bus 0-15), default 0
+* 9 - right, B destination (FX-bus 0-15), default 1
+* 10 - right, C destination (FX-bus 0-15), default 2
+* 11 - right, D destination (FX-bus 0-15), default 3
Don't forget that it's illegal to assign a channel to the same FX-bus accumulator
more than once (it means 0=0 && 1=0 is an invalid combination).
-name='EMU10K1 PCM Send Volume',index 0-31
-
+``name='EMU10K1 PCM Send Volume',index 0-31``
+---------------------------------------------
It specifies the attenuation (amount) for given destination in range 0-255.
The channel mapping is following:
- 0 - mono, A destination attn, default 255 (no attenuation)
- 1 - mono, B destination attn, default 255 (no attenuation)
- 2 - mono, C destination attn, default 0 (mute)
- 3 - mono, D destination attn, default 0 (mute)
- 4 - left, A destination attn, default 255 (no attenuation)
- 5 - left, B destination attn, default 0 (mute)
- 6 - left, C destination attn, default 0 (mute)
- 7 - left, D destination attn, default 0 (mute)
- 8 - right, A destination attn, default 0 (mute)
- 9 - right, B destination attn, default 255 (no attenuation)
- 10 - right, C destination attn, default 0 (mute)
- 11 - right, D destination attn, default 0 (mute)
+* 0 - mono, A destination attn, default 255 (no attenuation)
+* 1 - mono, B destination attn, default 255 (no attenuation)
+* 2 - mono, C destination attn, default 0 (mute)
+* 3 - mono, D destination attn, default 0 (mute)
+* 4 - left, A destination attn, default 255 (no attenuation)
+* 5 - left, B destination attn, default 0 (mute)
+* 6 - left, C destination attn, default 0 (mute)
+* 7 - left, D destination attn, default 0 (mute)
+* 8 - right, A destination attn, default 0 (mute)
+* 9 - right, B destination attn, default 255 (no attenuation)
+* 10 - right, C destination attn, default 0 (mute)
+* 11 - right, D destination attn, default 0 (mute)
-4) MANUALS/PATENTS:
--------------------
+MANUALS/PATENTS
+===============
ftp://opensource.creative.com/pub/doc
-------------------------------------
- Files:
- LM4545.pdf AC97 Codec
-
- m2049.pdf The EMU10K1 Digital Audio Processor
-
- hog63.ps FX8010 - A DSP Chip Architecture for Audio Effects
+LM4545.pdf
+ AC97 Codec
+m2049.pdf
+ The EMU10K1 Digital Audio Processor
+hog63.ps
+ FX8010 - A DSP Chip Architecture for Audio Effects
WIPO Patents
------------
- Patent numbers:
- WO 9901813 (A1) Audio Effects Processor with multiple asynchronous (Jan. 14, 1999)
- streams
- WO 9901814 (A1) Processor with Instruction Set for Audio Effects (Jan. 14, 1999)
+WO 9901813 (A1)
+ Audio Effects Processor with multiple asynchronous streams
+ (Jan. 14, 1999)
+
+WO 9901814 (A1)
+ Processor with Instruction Set for Audio Effects (Jan. 14, 1999)
- WO 9901953 (A1) Audio Effects Processor having Decoupled Instruction
- Execution and Audio Data Sequencing (Jan. 14, 1999)
+WO 9901953 (A1)
+ Audio Effects Processor having Decoupled Instruction
+ Execution and Audio Data Sequencing (Jan. 14, 1999)
US Patents (http://www.uspto.gov/)
----------------------------------
- US 5925841 Digital Sampling Instrument employing cache memory (Jul. 20, 1999)
-
- US 5928342 Audio Effects Processor integrated on a single chip (Jul. 27, 1999)
- with a multiport memory onto which multiple asynchronous
- digital sound samples can be concurrently loaded
-
- US 5930158 Processor with Instruction Set for Audio Effects (Jul. 27, 1999)
-
- US 6032235 Memory initialization circuit (Tram) (Feb. 29, 2000)
-
- US 6138207 Interpolation looping of audio samples in cache connected to (Oct. 24, 2000)
- system bus with prioritization and modification of bus transfers
- in accordance with loop ends and minimum block sizes
-
- US 6151670 Method for conserving memory storage using a (Nov. 21, 2000)
- pool of short term memory registers
-
- US 6195715 Interrupt control for multiple programs communicating with (Feb. 27, 2001)
- a common interrupt by associating programs to GP registers,
- defining interrupt register, polling GP registers, and invoking
- callback routine associated with defined interrupt register
+US 5925841
+ Digital Sampling Instrument employing cache memory (Jul. 20, 1999)
+
+US 5928342
+ Audio Effects Processor integrated on a single chip
+ with a multiport memory onto which multiple asynchronous
+ digital sound samples can be concurrently loaded
+ (Jul. 27, 1999)
+
+US 5930158
+ Processor with Instruction Set for Audio Effects (Jul. 27, 1999)
+
+US 6032235
+ Memory initialization circuit (Tram) (Feb. 29, 2000)
+
+US 6138207
+ Interpolation looping of audio samples in cache connected to
+ system bus with prioritization and modification of bus transfers
+ in accordance with loop ends and minimum block sizes
+ (Oct. 24, 2000)
+
+US 6151670
+ Method for conserving memory storage using a
+ pool of short term memory registers
+ (Nov. 21, 2000)
+
+US 6195715
+ Interrupt control for multiple programs communicating with
+ a common interrupt by associating programs to GP registers,
+ defining interrupt register, polling GP registers, and invoking
+ callback routine associated with defined interrupt register
+ (Feb. 27, 2001)
diff --git a/Documentation/sound/alsa/serial-u16550.txt b/Documentation/sound/cards/serial-u16550.rst
index c1919559d509..197aeacea3da 100644
--- a/Documentation/sound/alsa/serial-u16550.txt
+++ b/Documentation/sound/cards/serial-u16550.rst
@@ -1,14 +1,14 @@
-
- Serial UART 16450/16550 MIDI driver
- ===================================
+===================================
+Serial UART 16450/16550 MIDI driver
+===================================
The adaptor module parameter allows you to select either:
- 0 - Roland Soundcanvas support (default)
- 1 - Midiator MS-124T support (1)
- 2 - Midiator MS-124W S/A mode (2)
- 3 - MS-124W M/B mode support (3)
- 4 - Generic device with multiple input support (4)
+* 0 - Roland Soundcanvas support (default)
+* 1 - Midiator MS-124T support (1)
+* 2 - Midiator MS-124W S/A mode (2)
+* 3 - MS-124W M/B mode support (3)
+* 4 - Generic device with multiple input support (4)
For the Midiator MS-124W, you must set the physical M-S and A-B
switches on the Midiator to match the driver mode you select.
@@ -22,11 +22,13 @@ substream. The driver provides no way to send F5 00 (no selection) or to not
send the F5 NN command sequence at all; perhaps it ought to.
Usage example for simple serial converter:
+::
/sbin/setserial /dev/ttyS0 uart none
/sbin/modprobe snd-serial-u16550 port=0x3f8 irq=4 speed=115200
Usage example for Roland SoundCanvas with 4 MIDI ports:
+::
/sbin/setserial /dev/ttyS0 uart none
/sbin/modprobe snd-serial-u16550 port=0x3f8 irq=4 outs=4
@@ -37,6 +39,7 @@ all four MIDI Out connectors. Set the A-B switch and the speed module
parameter to match (A=19200, B=9600).
Usage example for MS-124T, with A-B switch in A position:
+::
/sbin/setserial /dev/ttyS0 uart none
/sbin/modprobe snd-serial-u16550 port=0x3f8 irq=4 adaptor=1 \
@@ -47,6 +50,7 @@ the outs module parameter is automatically set to 1. The driver sends
the same data to all four MIDI Out connectors at full MIDI speed.
Usage example for S/A mode:
+::
/sbin/setserial /dev/ttyS0 uart none
/sbin/modprobe snd-serial-u16550 port=0x3f8 irq=4 adaptor=2
@@ -63,6 +67,7 @@ at most one byte every 520 us, as compared with the full MIDI data rate of
one byte every 320 us per port.
Usage example for M/B mode:
+::
/sbin/setserial /dev/ttyS0 uart none
/sbin/modprobe snd-serial-u16550 port=0x3f8 irq=4 adaptor=3
diff --git a/Documentation/sound/cards/via82xx-mixer.rst b/Documentation/sound/cards/via82xx-mixer.rst
new file mode 100644
index 000000000000..6ee993d4535b
--- /dev/null
+++ b/Documentation/sound/cards/via82xx-mixer.rst
@@ -0,0 +1,8 @@
+=============
+VIA82xx mixer
+=============
+
+On many VIA82xx boards, the ``Input Source Select`` mixer control does not work.
+Setting it to ``Input2`` on such boards will cause recording to hang, or fail
+with EIO (input/output error) via OSS emulation. This control should be left
+at ``Input1`` for such cards.
diff --git a/Documentation/sound/alsa/Channel-Mapping-API.txt b/Documentation/sound/designs/channel-mapping-api.rst
index 3c43d1a4ca0e..58e6312a43c0 100644
--- a/Documentation/sound/alsa/Channel-Mapping-API.txt
+++ b/Documentation/sound/designs/channel-mapping-api.rst
@@ -1,9 +1,11 @@
+============================
ALSA PCM channel-mapping API
============================
- Takashi Iwai <tiwai@suse.de>
-GENERAL
--------
+Takashi Iwai <tiwai@suse.de>
+
+General
+=======
The channel mapping API allows user to query the possible channel maps
and the current channel map, also optionally to modify the channel map
@@ -11,9 +13,9 @@ of the current stream.
A channel map is an array of position for each PCM channel.
Typically, a stereo PCM stream has a channel map of
- { front_left, front_right }
+``{ front_left, front_right }``
while a 4.0 surround PCM stream has a channel map of
- { front left, front right, rear left, rear right }.
+``{ front left, front right, rear left, rear right }.``
The problem, so far, was that we had no standard channel map
explicitly, and applications had no way to know which channel
@@ -29,8 +31,8 @@ specification. These are the main motivations for the new channel
mapping API.
-DESIGN
-------
+Design
+======
Actually, "the channel mapping API" doesn't introduce anything new in
the kernel/user-space ABI perspective. It uses only the existing
@@ -39,10 +41,11 @@ control element features.
As a ground design, each PCM substream may contain a control element
providing the channel mapping information and configuration. This
element is specified by:
- iface = SNDRV_CTL_ELEM_IFACE_PCM
- name = "Playback Channel Map" or "Capture Channel Map"
- device = the same device number for the assigned PCM substream
- index = the same index number for the assigned PCM substream
+
+* iface = SNDRV_CTL_ELEM_IFACE_PCM
+* name = "Playback Channel Map" or "Capture Channel Map"
+* device = the same device number for the assigned PCM substream
+* index = the same index number for the assigned PCM substream
Note the name is different depending on the PCM substream direction.
@@ -50,32 +53,35 @@ Each control element provides at least the TLV read operation and the
read operation. Optionally, the write operation can be provided to
allow user to change the channel map dynamically.
-* TLV
+TLV
+---
The TLV operation gives the list of available channel
maps. A list item of a channel map is usually a TLV of
- type data-bytes ch0 ch1 ch2...
+``type data-bytes ch0 ch1 ch2...``
where type is the TLV type value, the second argument is the total
bytes (not the numbers) of channel values, and the rest are the
position value for each channel.
-As a TLV type, either SNDRV_CTL_TLVT_CHMAP_FIXED,
-SNDRV_CTL_TLV_CHMAP_VAR or SNDRV_CTL_TLVT_CHMAP_PAIRED can be used.
-The _FIXED type is for a channel map with the fixed channel position
-while the latter two are for flexible channel positions. _VAR type is
-for a channel map where all channels are freely swappable and _PAIRED
+As a TLV type, either ``SNDRV_CTL_TLVT_CHMAP_FIXED``,
+``SNDRV_CTL_TLV_CHMAP_VAR`` or ``SNDRV_CTL_TLVT_CHMAP_PAIRED`` can be used.
+The ``_FIXED`` type is for a channel map with the fixed channel position
+while the latter two are for flexible channel positions. ``_VAR`` type is
+for a channel map where all channels are freely swappable and ``_PAIRED``
type is where pair-wise channels are swappable. For example, when you
-have {FL/FR/RL/RR} channel map, _PAIRED type would allow you to swap
-only {RL/RR/FL/FR} while _VAR type would allow even swapping FL and
+have {FL/FR/RL/RR} channel map, ``_PAIRED`` type would allow you to swap
+only {RL/RR/FL/FR} while ``_VAR`` type would allow even swapping FL and
RR.
-These new TLV types are defined in sound/tlv.h.
+These new TLV types are defined in ``sound/tlv.h``.
-The available channel position values are defined in sound/asound.h,
+The available channel position values are defined in ``sound/asound.h``,
here is a cut:
-/* channel positions */
-enum {
+::
+
+ /* channel positions */
+ enum {
SNDRV_CHMAP_UNKNOWN = 0,
SNDRV_CHMAP_NA, /* N/A, silent */
SNDRV_CHMAP_MONO, /* mono stream */
@@ -107,11 +113,13 @@ enum {
SNDRV_CHMAP_TRR, /* top rear right */
SNDRV_CHMAP_TRC, /* top rear center */
SNDRV_CHMAP_LAST = SNDRV_CHMAP_TRC,
-};
+ };
When a PCM stream can provide more than one channel map, you can
provide multiple channel maps in a TLV container type. The TLV data
to be returned will contain such as:
+::
+
SNDRV_CTL_TLVT_CONTAINER 96
SNDRV_CTL_TLVT_CHMAP_FIXED 4 SNDRV_CHMAP_FC
SNDRV_CTL_TLVT_CHMAP_FIXED 8 SNDRV_CHMAP_FL SNDRV_CHMAP_FR
@@ -120,19 +128,21 @@ to be returned will contain such as:
The channel position is provided in LSB 16bits. The upper bits are
used for bit flags.
+::
-#define SNDRV_CHMAP_POSITION_MASK 0xffff
-#define SNDRV_CHMAP_PHASE_INVERSE (0x01 << 16)
-#define SNDRV_CHMAP_DRIVER_SPEC (0x02 << 16)
+ #define SNDRV_CHMAP_POSITION_MASK 0xffff
+ #define SNDRV_CHMAP_PHASE_INVERSE (0x01 << 16)
+ #define SNDRV_CHMAP_DRIVER_SPEC (0x02 << 16)
-SNDRV_CHMAP_PHASE_INVERSE indicates the channel is phase inverted,
+``SNDRV_CHMAP_PHASE_INVERSE`` indicates the channel is phase inverted,
(thus summing left and right channels would result in almost silence).
Some digital mic devices have this.
-When SNDRV_CHMAP_DRIVER_SPEC is set, all the channel position values
+When ``SNDRV_CHMAP_DRIVER_SPEC`` is set, all the channel position values
don't follow the standard definition above but driver-specific.
-* READ OPERATION
+Read Operation
+--------------
The control read operation is for providing the current channel map of
the given stream. The control element returns an integer array
@@ -140,9 +150,10 @@ containing the position of each channel.
When this is performed before the number of the channel is specified
(i.e. hw_params is set), it should return all channels set to
-UNKNOWN.
+``UNKNOWN``.
-* WRITE OPERATION
+Write Operation
+---------------
The control write operation is optional, and only for devices that can
change the channel configuration on the fly, such as HDMI. User needs
diff --git a/Documentation/sound/alsa/compress_offload.txt b/Documentation/sound/designs/compress-offload.rst
index 8ba556a131c3..ad4bfbdacc83 100644
--- a/Documentation/sound/alsa/compress_offload.txt
+++ b/Documentation/sound/designs/compress-offload.rst
@@ -1,10 +1,14 @@
- compress_offload.txt
- =====================
- Pierre-Louis.Bossart <pierre-louis.bossart@linux.intel.com>
- Vinod Koul <vinod.koul@linux.intel.com>
+=========================
+ALSA Compress-Offload API
+=========================
+
+Pierre-Louis.Bossart <pierre-louis.bossart@linux.intel.com>
+
+Vinod Koul <vinod.koul@linux.intel.com>
-Overview
+Overview
+========
Since its early days, the ALSA API was defined with PCM support or
constant bitrates payloads such as IEC61937 in mind. Arguments and
returned values in frames are the norm, making it a challenge to
@@ -27,8 +31,9 @@ Intel Moorestown SOC, with many corrections required to upstream the
API in the mainline kernel instead of the staging tree and make it
usable by others.
-Requirements
+Requirements
+============
The main requirements are:
- separation between byte counts and time. Compressed formats may have
@@ -63,7 +68,7 @@ The main requirements are:
streaming compressed data to a DSP, with the assumption that the
decoded samples are routed to a physical output or logical back-end.
- - Complexity hiding. Existing user-space multimedia frameworks all
+- Complexity hiding. Existing user-space multimedia frameworks all
have existing enums/structures for each compressed format. This new
API assumes the existence of a platform-specific compatibility layer
to expose, translate and make use of the capabilities of the audio
@@ -72,7 +77,7 @@ The main requirements are:
Design
-
+======
The new API shares a number of concepts with the PCM API for flow
control. Start, pause, resume, drain and stop commands have the same
semantics no matter what the content is.
@@ -95,43 +100,44 @@ mandatory routines and possibly make use of optional ones.
The main additions are
-- get_caps
-This routine returns the list of audio formats supported. Querying the
-codecs on a capture stream will return encoders, decoders will be
-listed for playback streams.
-
-- get_codec_caps For each codec, this routine returns a list of
-capabilities. The intent is to make sure all the capabilities
-correspond to valid settings, and to minimize the risks of
-configuration failures. For example, for a complex codec such as AAC,
-the number of channels supported may depend on a specific profile. If
-the capabilities were exposed with a single descriptor, it may happen
-that a specific combination of profiles/channels/formats may not be
-supported. Likewise, embedded DSPs have limited memory and cpu cycles,
-it is likely that some implementations make the list of capabilities
-dynamic and dependent on existing workloads. In addition to codec
-settings, this routine returns the minimum buffer size handled by the
-implementation. This information can be a function of the DMA buffer
-sizes, the number of bytes required to synchronize, etc, and can be
-used by userspace to define how much needs to be written in the ring
-buffer before playback can start.
-
-- set_params
-This routine sets the configuration chosen for a specific codec. The
-most important field in the parameters is the codec type; in most
-cases decoders will ignore other fields, while encoders will strictly
-comply to the settings
-
-- get_params
-This routines returns the actual settings used by the DSP. Changes to
-the settings should remain the exception.
-
-- get_timestamp
-The timestamp becomes a multiple field structure. It lists the number
-of bytes transferred, the number of samples processed and the number
-of samples rendered/grabbed. All these values can be used to determine
-the average bitrate, figure out if the ring buffer needs to be
-refilled or the delay due to decoding/encoding/io on the DSP.
+get_caps
+ This routine returns the list of audio formats supported. Querying the
+ codecs on a capture stream will return encoders, decoders will be
+ listed for playback streams.
+
+get_codec_caps
+ For each codec, this routine returns a list of
+ capabilities. The intent is to make sure all the capabilities
+ correspond to valid settings, and to minimize the risks of
+ configuration failures. For example, for a complex codec such as AAC,
+ the number of channels supported may depend on a specific profile. If
+ the capabilities were exposed with a single descriptor, it may happen
+ that a specific combination of profiles/channels/formats may not be
+ supported. Likewise, embedded DSPs have limited memory and cpu cycles,
+ it is likely that some implementations make the list of capabilities
+ dynamic and dependent on existing workloads. In addition to codec
+ settings, this routine returns the minimum buffer size handled by the
+ implementation. This information can be a function of the DMA buffer
+ sizes, the number of bytes required to synchronize, etc, and can be
+ used by userspace to define how much needs to be written in the ring
+ buffer before playback can start.
+
+set_params
+ This routine sets the configuration chosen for a specific codec. The
+ most important field in the parameters is the codec type; in most
+ cases decoders will ignore other fields, while encoders will strictly
+ comply to the settings
+
+get_params
+ This routines returns the actual settings used by the DSP. Changes to
+ the settings should remain the exception.
+
+get_timestamp
+ The timestamp becomes a multiple field structure. It lists the number
+ of bytes transferred, the number of samples processed and the number
+ of samples rendered/grabbed. All these values can be used to determine
+ the average bitrate, figure out if the ring buffer needs to be
+ refilled or the delay due to decoding/encoding/io on the DSP.
Note that the list of codecs/profiles/modes was derived from the
OpenMAX AL specification instead of reinventing the wheel.
@@ -145,6 +151,7 @@ Modifications include:
- Addition of encoding options when required (derived from OpenMAX IL)
- Addition of rateControlSupported (missing in OpenMAX AL)
+
Gapless Playback
================
When playing thru an album, the decoders have the ability to skip the encoder
@@ -162,19 +169,19 @@ switch from one track to another and start using data for second track.
The main additions are:
-- set_metadata
-This routine sets the encoder delay and encoder padding. This can be used by
-decoder to strip the silence. This needs to be set before the data in the track
-is written.
+set_metadata
+ This routine sets the encoder delay and encoder padding. This can be used by
+ decoder to strip the silence. This needs to be set before the data in the track
+ is written.
-- set_next_track
-This routine tells DSP that metadata and write operation sent after this would
-correspond to subsequent track
+set_next_track
+ This routine tells DSP that metadata and write operation sent after this would
+ correspond to subsequent track
-- partial drain
-This is called when end of file is reached. The userspace can inform DSP that
-EOF is reached and now DSP can start skipping padding delay. Also next write
-data would belong to next track
+partial drain
+ This is called when end of file is reached. The userspace can inform DSP that
+ EOF is reached and now DSP can start skipping padding delay. Also next write
+ data would belong to next track
Sequence flow for gapless would be:
- Open
@@ -189,10 +196,12 @@ Sequence flow for gapless would be:
- then call partial_drain to flush most of buffer in DSP
- Fill data of the next track
- DSP switches to second track
+
(note: order for partial_drain and write for next track can be reversed as well)
-Not supported:
+Not supported
+=============
- Support for VoIP/circuit-switched calls is not the target of this
API. Support for dynamic bit-rate changes would require a tight
coupling between the DSP and the host stack, limiting power savings.
@@ -225,7 +234,9 @@ Not supported:
rendered output in time, this does not deal with underrun/overrun and
maybe dealt in user-library
-Credits:
+
+Credits
+=======
- Mark Brown and Liam Girdwood for discussions on the need for this API
- Harsha Priya for her work on intel_sst compressed API
- Rakesh Ughreja for valuable feedback
diff --git a/Documentation/sound/designs/control-names.rst b/Documentation/sound/designs/control-names.rst
new file mode 100644
index 000000000000..7fedd0f33cd9
--- /dev/null
+++ b/Documentation/sound/designs/control-names.rst
@@ -0,0 +1,142 @@
+===========================
+Standard ALSA Control Names
+===========================
+
+This document describes standard names of mixer controls.
+
+Standard Syntax
+---------------
+Syntax: [LOCATION] SOURCE [CHANNEL] [DIRECTION] FUNCTION
+
+
+DIRECTION
+~~~~~~~~~
+================ ===============
+<nothing> both directions
+Playback one direction
+Capture one direction
+Bypass Playback one direction
+Bypass Capture one direction
+================ ===============
+
+FUNCTION
+~~~~~~~~
+======== =================================
+Switch on/off switch
+Volume amplifier
+Route route control, hardware specific
+======== =================================
+
+CHANNEL
+~~~~~~~
+============ ==================================================
+<nothing> channel independent, or applies to all channels
+Front front left/right channels
+Surround rear left/right in 4.0/5.1 surround
+CLFE C/LFE channels
+Center center cannel
+LFE LFE channel
+Side side left/right for 7.1 surround
+============ ==================================================
+
+LOCATION (Physical location of source)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+============ =====================
+Front front position
+Rear rear position
+Dock on docking station
+Internal internal
+============ =====================
+
+SOURCE
+~~~~~~
+=================== =================================================
+Master
+Master Mono
+Hardware Master
+Speaker internal speaker
+Bass Speaker internal LFE speaker
+Headphone
+Line Out
+Beep beep generator
+Phone
+Phone Input
+Phone Output
+Synth
+FM
+Mic
+Headset Mic mic part of combined headset jack - 4-pin
+ headphone + mic
+Headphone Mic mic part of either/or - 3-pin headphone or mic
+Line input only, use "Line Out" for output
+CD
+Video
+Zoom Video
+Aux
+PCM
+PCM Pan
+Loopback
+Analog Loopback D/A -> A/D loopback
+Digital Loopback playback -> capture loopback -
+ without analog path
+Mono
+Mono Output
+Multi
+ADC
+Wave
+Music
+I2S
+IEC958
+HDMI
+SPDIF output only
+SPDIF In
+Digital In
+HDMI/DP either HDMI or DisplayPort
+=================== =================================================
+
+Exceptions (deprecated)
+-----------------------
+
+===================================== =======================
+[Analogue|Digital] Capture Source
+[Analogue|Digital] Capture Switch aka input gain switch
+[Analogue|Digital] Capture Volume aka input gain volume
+[Analogue|Digital] Playback Switch aka output gain switch
+[Analogue|Digital] Playback Volume aka output gain volume
+Tone Control - Switch
+Tone Control - Bass
+Tone Control - Treble
+3D Control - Switch
+3D Control - Center
+3D Control - Depth
+3D Control - Wide
+3D Control - Space
+3D Control - Level
+Mic Boost [(?dB)]
+===================================== =======================
+
+PCM interface
+-------------
+
+=================== ========================================
+Sample Clock Source { "Word", "Internal", "AutoSync" }
+Clock Sync Status { "Lock", "Sync", "No Lock" }
+External Rate external capture rate
+Capture Rate capture rate taken from external source
+=================== ========================================
+
+IEC958 (S/PDIF) interface
+-------------------------
+
+============================================ ======================================
+IEC958 [...] [Playback|Capture] Switch turn on/off the IEC958 interface
+IEC958 [...] [Playback|Capture] Volume digital volume control
+IEC958 [...] [Playback|Capture] Default default or global value - read/write
+IEC958 [...] [Playback|Capture] Mask consumer and professional mask
+IEC958 [...] [Playback|Capture] Con Mask consumer mask
+IEC958 [...] [Playback|Capture] Pro Mask professional mask
+IEC958 [...] [Playback|Capture] PCM Stream the settings assigned to a PCM stream
+IEC958 Q-subcode [Playback|Capture] Default Q-subcode bits
+
+IEC958 Preamble [Playback|Capture] Default burst preamble words (4*16bits)
+============================================ ======================================
diff --git a/Documentation/sound/designs/index.rst b/Documentation/sound/designs/index.rst
new file mode 100644
index 000000000000..04dcdae3e4f2
--- /dev/null
+++ b/Documentation/sound/designs/index.rst
@@ -0,0 +1,15 @@
+Designs and Implementations
+===========================
+
+.. toctree::
+ :maxdepth: 2
+
+ control-names
+ channel-mapping-api
+ compress-offload
+ timestamping
+ jack-controls
+ procfile
+ powersave
+ oss-emulation
+ seq-oss
diff --git a/Documentation/sound/alsa/Jack-Controls.txt b/Documentation/sound/designs/jack-controls.rst
index fe1c5e0c8555..ae25b1531bb0 100644
--- a/Documentation/sound/alsa/Jack-Controls.txt
+++ b/Documentation/sound/designs/jack-controls.rst
@@ -1,3 +1,7 @@
+==================
+ALSA Jack Controls
+==================
+
Why we need Jack kcontrols
==========================
@@ -29,11 +33,12 @@ How to use jack kcontrols
=========================
In order to keep compatibility, snd_jack_new() has been modified by
-adding two params :-
+adding two params:
- - @initial_kctl: if true, create a kcontrol and add it to the jack
- list.
- - @phantom_jack: Don't create a input device for phantom jacks.
+initial_kctl
+ if true, create a kcontrol and add it to the jack list.
+phantom_jack
+ Don't create a input device for phantom jacks.
HDA jacks can set phantom_jack to true in order to create a phantom
jack and set initial_kctl to true to create an initial kcontrol with
diff --git a/Documentation/sound/alsa/OSS-Emulation.txt b/Documentation/sound/designs/oss-emulation.rst
index 152ca2a3f1bd..e8dcb9633e7b 100644
--- a/Documentation/sound/alsa/OSS-Emulation.txt
+++ b/Documentation/sound/designs/oss-emulation.rst
@@ -1,7 +1,8 @@
- NOTES ON KERNEL OSS-EMULATION
- =============================
+=============================
+Notes on Kernel OSS-Emulation
+=============================
- Jan. 22, 2004 Takashi Iwai <tiwai@suse.de>
+Jan. 22, 2004 Takashi Iwai <tiwai@suse.de>
Modules
@@ -14,18 +15,18 @@ When you need to access the OSS PCM, mixer or sequencer devices, the
corresponding module has to be loaded.
These modules are loaded automatically when the corresponding service
-is called. The alias is defined sound-service-x-y, where x and y are
+is called. The alias is defined ``sound-service-x-y``, where x and y are
the card number and the minor unit number. Usually you don't have to
define these aliases by yourself.
Only necessary step for auto-loading of OSS modules is to define the
-card alias in /etc/modprobe.d/alsa.conf, such as
+card alias in ``/etc/modprobe.d/alsa.conf``, such as::
alias sound-slot-0 snd-emu10k1
-As the second card, define sound-slot-1 as well.
+As the second card, define ``sound-slot-1`` as well.
Note that you can't use the aliased name as the target name (i.e.
-"alias sound-slot-0 snd-card-0" doesn't work any more like the old
+``alias sound-slot-0 snd-card-0`` doesn't work any more like the old
modutils).
The currently available OSS configuration is shown in
@@ -42,6 +43,7 @@ Device Mapping
==============
ALSA supports the following OSS device files:
+::
PCM:
/dev/dspX
@@ -61,48 +63,55 @@ ALSA supports the following OSS device files:
where X is the card number from 0 to 7.
(NOTE: Some distributions have the device files like /dev/midi0 and
- /dev/midi1. They are NOT for OSS but for tclmidi, which is
- a totally different thing.)
+/dev/midi1. They are NOT for OSS but for tclmidi, which is
+a totally different thing.)
Unlike the real OSS, ALSA cannot use the device files more than the
assigned ones. For example, the first card cannot use /dev/dsp1 or
/dev/dsp2, but only /dev/dsp0 and /dev/adsp0.
As seen above, PCM and MIDI may have two devices. Usually, the first
-PCM device (hw:0,0 in ALSA) is mapped to /dev/dsp and the secondary
-device (hw:0,1) to /dev/adsp (if available). For MIDI, /dev/midi and
+PCM device (``hw:0,0`` in ALSA) is mapped to /dev/dsp and the secondary
+device (``hw:0,1``) to /dev/adsp (if available). For MIDI, /dev/midi and
/dev/amidi, respectively.
You can change this device mapping via the module options of
snd-pcm-oss and snd-rawmidi. In the case of PCM, the following
options are available for snd-pcm-oss:
- dsp_map PCM device number assigned to /dev/dspX
- (default = 0)
- adsp_map PCM device number assigned to /dev/adspX
- (default = 1)
+dsp_map
+ PCM device number assigned to /dev/dspX
+ (default = 0)
+adsp_map
+ PCM device number assigned to /dev/adspX
+ (default = 1)
-For example, to map the third PCM device (hw:0,2) to /dev/adsp0,
+For example, to map the third PCM device (``hw:0,2``) to /dev/adsp0,
define like this:
+::
options snd-pcm-oss adsp_map=2
The options take arrays. For configuring the second card, specify
two entries separated by comma. For example, to map the third PCM
device on the second card to /dev/adsp1, define like below:
+::
options snd-pcm-oss adsp_map=0,2
To change the mapping of MIDI devices, the following options are
available for snd-rawmidi:
- midi_map MIDI device number assigned to /dev/midi0X
- (default = 0)
- amidi_map MIDI device number assigned to /dev/amidi0X
- (default = 1)
+midi_map
+ MIDI device number assigned to /dev/midi0X
+ (default = 0)
+amidi_map
+ MIDI device number assigned to /dev/amidi0X
+ (default = 1)
For example, to assign the third MIDI device on the first card to
/dev/midi00, define as follows:
+::
options snd-rawmidi midi_map=2
@@ -118,43 +127,52 @@ wine, especially if they use the card only in the MMAP mode.
In such a case, you can change the behavior of PCM per application by
writing a command to the proc file. There is a proc file for each PCM
-stream, /proc/asound/cardX/pcmY[cp]/oss, where X is the card number
-(zero-based), Y the PCM device number (zero-based), and 'p' is for
-playback and 'c' for capture, respectively. Note that this proc file
+stream, ``/proc/asound/cardX/pcmY[cp]/oss``, where X is the card number
+(zero-based), Y the PCM device number (zero-based), and ``p`` is for
+playback and ``c`` for capture, respectively. Note that this proc file
exists only after snd-pcm-oss module is loaded.
The command sequence has the following syntax:
+::
app_name fragments fragment_size [options]
-app_name is the name of application with (higher priority) or without
+``app_name`` is the name of application with (higher priority) or without
path.
-fragments specifies the number of fragments or zero if no specific
+``fragments`` specifies the number of fragments or zero if no specific
number is given.
-fragment_size is the size of fragment in bytes or zero if not given.
-options is the optional parameters. The following options are
+``fragment_size`` is the size of fragment in bytes or zero if not given.
+``options`` is the optional parameters. The following options are
available:
- disable the application tries to open a pcm device for
- this channel but does not want to use it.
- direct don't use plugins
- block force block open mode
- non-block force non-block open mode
- partial-frag write also partial fragments (affects playback only)
- no-silence do not fill silence ahead to avoid clicks
-
-The disable option is useful when one stream direction (playback or
+disable
+ the application tries to open a pcm device for
+ this channel but does not want to use it.
+direct
+ don't use plugins
+block
+ force block open mode
+non-block
+ force non-block open mode
+partial-frag
+ write also partial fragments (affects playback only)
+no-silence
+ do not fill silence ahead to avoid clicks
+
+The ``disable`` option is useful when one stream direction (playback or
capture) is not handled correctly by the application although the
hardware itself does support both directions.
-The direct option is used, as mentioned above, to bypass the automatic
+The ``direct`` option is used, as mentioned above, to bypass the automatic
conversion and useful for MMAP-applications.
For example, to playback the first PCM device without plugins for
quake, send a command via echo like the following:
+::
% echo "quake 0 0 direct" > /proc/asound/card0/pcm0p/oss
While quake wants only playback, you may append the second command
to notify driver that only this direction is about to be allocated:
+::
% echo "quake 0 0 disable" > /proc/asound/card0/pcm0c/oss
@@ -171,10 +189,11 @@ the file when it's busy. The -EBUSY error is returned in this case.
This blocking behavior can be changed globally via nonblock_open
module option of snd-pcm-oss. For using the blocking mode as default
for OSS devices, define like the following:
+::
options snd-pcm-oss nonblock_open=0
-The partial-frag and no-silence commands have been added recently.
+The ``partial-frag`` and ``no-silence`` commands have been added recently.
Both commands are for optimization use only. The former command
specifies to invoke the write transfer only when the whole fragment is
filled. The latter stops writing the silence data ahead
@@ -183,15 +202,18 @@ automatically. Both are disabled as default.
You can check the currently defined configuration by reading the proc
file. The read image can be sent to the proc file again, hence you
can save the current configuration
+::
% cat /proc/asound/card0/pcm0p/oss > /somewhere/oss-cfg
and restore it like
+::
% cat /somewhere/oss-cfg > /proc/asound/card0/pcm0p/oss
-Also, for clearing all the current configuration, send "erase" command
+Also, for clearing all the current configuration, send ``erase`` command
as below:
+::
% echo "erase" > /proc/asound/card0/pcm0p/oss
@@ -211,40 +233,43 @@ automatically.
As default, ALSA uses the following control for OSS volumes:
- OSS volume ALSA control Index
- -----------------------------------------------------
- SOUND_MIXER_VOLUME Master 0
- SOUND_MIXER_BASS Tone Control - Bass 0
- SOUND_MIXER_TREBLE Tone Control - Treble 0
- SOUND_MIXER_SYNTH Synth 0
- SOUND_MIXER_PCM PCM 0
- SOUND_MIXER_SPEAKER PC Speaker 0
- SOUND_MIXER_LINE Line 0
- SOUND_MIXER_MIC Mic 0
- SOUND_MIXER_CD CD 0
- SOUND_MIXER_IMIX Monitor Mix 0
- SOUND_MIXER_ALTPCM PCM 1
- SOUND_MIXER_RECLEV (not assigned)
- SOUND_MIXER_IGAIN Capture 0
- SOUND_MIXER_OGAIN Playback 0
- SOUND_MIXER_LINE1 Aux 0
- SOUND_MIXER_LINE2 Aux 1
- SOUND_MIXER_LINE3 Aux 2
- SOUND_MIXER_DIGITAL1 Digital 0
- SOUND_MIXER_DIGITAL2 Digital 1
- SOUND_MIXER_DIGITAL3 Digital 2
- SOUND_MIXER_PHONEIN Phone 0
- SOUND_MIXER_PHONEOUT Phone 1
- SOUND_MIXER_VIDEO Video 0
- SOUND_MIXER_RADIO Radio 0
- SOUND_MIXER_MONITOR Monitor 0
+==================== ===================== =====
+OSS volume ALSA control Index
+==================== ===================== =====
+SOUND_MIXER_VOLUME Master 0
+SOUND_MIXER_BASS Tone Control - Bass 0
+SOUND_MIXER_TREBLE Tone Control - Treble 0
+SOUND_MIXER_SYNTH Synth 0
+SOUND_MIXER_PCM PCM 0
+SOUND_MIXER_SPEAKER PC Speaker 0
+SOUND_MIXER_LINE Line 0
+SOUND_MIXER_MIC Mic 0
+SOUND_MIXER_CD CD 0
+SOUND_MIXER_IMIX Monitor Mix 0
+SOUND_MIXER_ALTPCM PCM 1
+SOUND_MIXER_RECLEV (not assigned)
+SOUND_MIXER_IGAIN Capture 0
+SOUND_MIXER_OGAIN Playback 0
+SOUND_MIXER_LINE1 Aux 0
+SOUND_MIXER_LINE2 Aux 1
+SOUND_MIXER_LINE3 Aux 2
+SOUND_MIXER_DIGITAL1 Digital 0
+SOUND_MIXER_DIGITAL2 Digital 1
+SOUND_MIXER_DIGITAL3 Digital 2
+SOUND_MIXER_PHONEIN Phone 0
+SOUND_MIXER_PHONEOUT Phone 1
+SOUND_MIXER_VIDEO Video 0
+SOUND_MIXER_RADIO Radio 0
+SOUND_MIXER_MONITOR Monitor 0
+==================== ===================== =====
The second column is the base-string of the corresponding ALSA
-control. In fact, the controls with "XXX [Playback|Capture]
-[Volume|Switch]" will be checked in addition.
+control. In fact, the controls with ``XXX [Playback|Capture]
+[Volume|Switch]`` will be checked in addition.
The current assignment of these mixer elements is listed in the proc
file, /proc/asound/cardX/oss_mixer, which will be like the following
+::
VOLUME "Master" 0
BASS "" 0
@@ -261,6 +286,7 @@ corresponding OSS control is not available.
For changing the assignment, you can write the configuration to this
proc file. For example, to map "Wave Playback" to the PCM volume,
send the command like the following:
+::
% echo 'VOLUME "Wave Playback" 0' > /proc/asound/card0/oss_mixer
@@ -284,12 +310,18 @@ Duplex Streams
Note that when attempting to use a single device file for playback and
capture, the OSS API provides no way to set the format, sample rate or
number of channels different in each direction. Thus
+::
+
io_handle = open("device", O_RDWR)
+
will only function correctly if the values are the same in each direction.
To use different values in the two directions, use both
+::
+
input_handle = open("device", O_RDONLY)
output_handle = open("device", O_WRONLY)
+
and set the values for the corresponding handle.
@@ -302,4 +334,3 @@ ICE1712 supports only the unconventional format, interleaved
10-channels 24bit (packed in 32bit) format. Therefore you cannot mmap
the buffer as the conventional (mono or 2-channels, 8 or 16bit) format
on OSS.
-
diff --git a/Documentation/sound/alsa/powersave.txt b/Documentation/sound/designs/powersave.rst
index 9657e8099228..138157452eb9 100644
--- a/Documentation/sound/alsa/powersave.txt
+++ b/Documentation/sound/designs/powersave.rst
@@ -1,9 +1,10 @@
+==========================
Notes on Power-Saving Mode
==========================
AC97 and HD-audio drivers have the automatic power-saving mode.
-This feature is enabled via Kconfig CONFIG_SND_AC97_POWER_SAVE
-and CONFIG_SND_HDA_POWER_SAVE options, respectively.
+This feature is enabled via Kconfig ``CONFIG_SND_AC97_POWER_SAVE``
+and ``CONFIG_SND_HDA_POWER_SAVE`` options, respectively.
With the automatic power-saving, the driver turns off the codec power
appropriately when no operation is required. When no applications use
@@ -11,20 +12,21 @@ the device and/or no analog loopback is set, the power disablement is
done fully or partially. It'll save a certain power consumption, thus
good for laptops (even for desktops).
-The time-out for automatic power-off can be specified via power_save
+The time-out for automatic power-off can be specified via ``power_save``
module option of snd-ac97-codec and snd-hda-intel modules. Specify
the time-out value in seconds. 0 means to disable the automatic
power-saving. The default value of timeout is given via
-CONFIG_SND_AC97_POWER_SAVE_DEFAULT and
-CONFIG_SND_HDA_POWER_SAVE_DEFAULT Kconfig options. Setting this to 1
+``CONFIG_SND_AC97_POWER_SAVE_DEFAULT`` and
+``CONFIG_SND_HDA_POWER_SAVE_DEFAULT`` Kconfig options. Setting this to 1
(the minimum value) isn't recommended because many applications try to
reopen the device frequently. 10 would be a good choice for normal
operations.
-The power_save option is exported as writable. This means you can
+The ``power_save`` option is exported as writable. This means you can
adjust the value via sysfs on the fly. For example, to turn on the
automatic power-save mode with 10 seconds, write to
-/sys/modules/snd_ac97_codec/parameters/power_save (usually as root):
+``/sys/modules/snd_ac97_codec/parameters/power_save`` (usually as root):
+::
# echo 10 > /sys/modules/snd_ac97_codec/parameters/power_save
diff --git a/Documentation/sound/alsa/Procfile.txt b/Documentation/sound/designs/procfile.rst
index 7f8a0d325905..29a466851fd2 100644
--- a/Documentation/sound/alsa/Procfile.txt
+++ b/Documentation/sound/designs/procfile.rst
@@ -1,20 +1,22 @@
- Proc Files of ALSA Drivers
- ==========================
- Takashi Iwai <tiwai@suse.de>
+==========================
+Proc Files of ALSA Drivers
+==========================
+
+Takashi Iwai <tiwai@suse.de>
General
--------
+=======
ALSA has its own proc tree, /proc/asound. Many useful information are
found in this tree. When you encounter a problem and need debugging,
check the files listed in the following sections.
Each card has its subtree cardX, where X is from 0 to 7. The
-card-specific files are stored in the card* subdirectories.
+card-specific files are stored in the ``card*`` subdirectories.
Global Information
-------------------
+==================
cards
Shows the list of currently configured ALSA drivers,
@@ -31,15 +33,15 @@ devices
meminfo
Shows the status of allocated pages via ALSA drivers.
- Appears only when CONFIG_SND_DEBUG=y.
+ Appears only when ``CONFIG_SND_DEBUG=y``.
hwdep
Lists the currently available hwdep devices in format of
- <card>-<device>: <name>
+ ``<card>-<device>: <name>``
pcm
Lists the currently available PCM devices in format of
- <card>-<device>: <id>: <name> : <sub-streams>
+ ``<card>-<device>: <id>: <name> : <sub-streams>``
timer
Lists the currently available timer devices
@@ -54,23 +56,23 @@ oss/sndstat
Card Specific Files
--------------------
+===================
-The card-specific files are found in /proc/asound/card* directories.
+The card-specific files are found in ``/proc/asound/card*`` directories.
Some drivers (e.g. cmipci) have their own proc entries for the
-register dump, etc (e.g. /proc/asound/card*/cmipci shows the register
+register dump, etc (e.g. ``/proc/asound/card*/cmipci`` shows the register
dump). These files would be really helpful for debugging.
When PCM devices are available on this card, you can see directories
like pcm0p or pcm1c. They hold the PCM information for each PCM
-stream. The number after 'pcm' is the PCM device number from 0, and
-the last 'p' or 'c' means playback or capture direction. The files in
+stream. The number after ``pcm`` is the PCM device number from 0, and
+the last ``p`` or ``c`` means playback or capture direction. The files in
this subtree is described later.
-The status of MIDI I/O is found in midi* files. It shows the device
+The status of MIDI I/O is found in ``midi*`` files. It shows the device
name and the received/transmitted bytes through the MIDI device.
-When the card is equipped with AC97 codecs, there are codec97#*
+When the card is equipped with AC97 codecs, there are ``codec97#*``
subdirectories (described later).
When the OSS mixer emulation is enabled (and the module is loaded),
@@ -81,26 +83,27 @@ details.
PCM Proc Files
---------------
+==============
-card*/pcm*/info
+``card*/pcm*/info``
The general information of this PCM device: card #, device #,
substreams, etc.
-card*/pcm*/xrun_debug
- This file appears when CONFIG_SND_DEBUG=y and
- CONFIG_PCM_XRUN_DEBUG=y.
+``card*/pcm*/xrun_debug``
+ This file appears when ``CONFIG_SND_DEBUG=y`` and
+ ``CONFIG_PCM_XRUN_DEBUG=y``.
This shows the status of xrun (= buffer overrun/xrun) and
invalid PCM position debug/check of ALSA PCM middle layer.
It takes an integer value, can be changed by writing to this
- file, such as
+ file, such as::
# echo 5 > /proc/asound/card0/pcm0p/xrun_debug
The value consists of the following bit flags:
- bit 0 = Enable XRUN/jiffies debug messages
- bit 1 = Show stack trace at XRUN / jiffies check
- bit 2 = Enable additional jiffies check
+
+ * bit 0 = Enable XRUN/jiffies debug messages
+ * bit 1 = Show stack trace at XRUN / jiffies check
+ * bit 2 = Enable additional jiffies check
When the bit 0 is set, the driver will show the messages to
kernel log when an xrun is detected. The debug message is
@@ -117,72 +120,74 @@ card*/pcm*/xrun_debug
buggy) hardware that doesn't give smooth pointer updates.
This feature is enabled via the bit 2.
-card*/pcm*/sub*/info
+``card*/pcm*/sub*/info``
The general information of this PCM sub-stream.
-card*/pcm*/sub*/status
+``card*/pcm*/sub*/status``
The current status of this PCM sub-stream, elapsed time,
H/W position, etc.
-card*/pcm*/sub*/hw_params
+``card*/pcm*/sub*/hw_params``
The hardware parameters set for this sub-stream.
-card*/pcm*/sub*/sw_params
+``card*/pcm*/sub*/sw_params``
The soft parameters set for this sub-stream.
-card*/pcm*/sub*/prealloc
+``card*/pcm*/sub*/prealloc``
The buffer pre-allocation information.
-card*/pcm*/sub*/xrun_injection
+``card*/pcm*/sub*/xrun_injection``
Triggers an XRUN to the running stream when any value is
written to this proc file. Used for fault injection.
This entry is write-only.
AC97 Codec Information
-----------------------
+======================
-card*/codec97#*/ac97#?-?
+``card*/codec97#*/ac97#?-?``
Shows the general information of this AC97 codec chip, such as
name, capabilities, set up.
-card*/codec97#0/ac97#?-?+regs
+``card*/codec97#0/ac97#?-?+regs``
Shows the AC97 register dump. Useful for debugging.
When CONFIG_SND_DEBUG is enabled, you can write to this file for
changing an AC97 register directly. Pass two hex numbers.
For example,
+::
+
# echo 02 9f1f > /proc/asound/card0/codec97#0/ac97#0-0+regs
USB Audio Streams
------------------
+=================
-card*/stream*
+``card*/stream*``
Shows the assignment and the current status of each audio stream
of the given card. This information is very useful for debugging.
HD-Audio Codecs
----------------
+===============
-card*/codec#*
+``card*/codec#*``
Shows the general codec information and the attribute of each
widget node.
-card*/eld#*
+``card*/eld#*``
Available for HDMI or DisplayPort interfaces.
Shows ELD(EDID Like Data) info retrieved from the attached HDMI sink,
and describes its audio capabilities and configurations.
- Some ELD fields may be modified by doing `echo name hex_value > eld#*`.
+ Some ELD fields may be modified by doing ``echo name hex_value > eld#*``.
Only do this if you are sure the HDMI sink provided value is wrong.
And if that makes your HDMI audio work, please report to us so that we
can fix it in future kernel releases.
Sequencer Information
----------------------
+=====================
seq/drivers
Lists the currently available ALSA sequencer drivers.
@@ -203,7 +208,7 @@ seq/oss
Help For Debugging?
--------------------
+===================
When the problem is related with PCM, first try to turn on xrun_debug
mode. This will give you the kernel messages when and where xrun
@@ -211,24 +216,23 @@ happened.
If it's really a bug, report it with the following information:
- - the name of the driver/card, show in /proc/asound/cards
- - the register dump, if available (e.g. card*/cmipci)
+- the name of the driver/card, show in ``/proc/asound/cards``
+- the register dump, if available (e.g. ``card*/cmipci``)
when it's a PCM problem,
- - set-up of PCM, shown in hw_parms, sw_params, and status in the PCM
- sub-stream directory
+- set-up of PCM, shown in hw_parms, sw_params, and status in the PCM
+ sub-stream directory
when it's a mixer problem,
- - AC97 proc files, codec97#*/* files
+- AC97 proc files, ``codec97#*/*`` files
for USB audio/midi,
- - output of lsusb -v
- - stream* files in card directory
+- output of ``lsusb -v``
+- ``stream*`` files in card directory
The ALSA bug-tracking system is found at:
-
- https://bugtrack.alsa-project.org/alsa-bug/
+https://bugtrack.alsa-project.org/alsa-bug/
diff --git a/Documentation/sound/designs/seq-oss.rst b/Documentation/sound/designs/seq-oss.rst
new file mode 100644
index 000000000000..e82ffe0e7f43
--- /dev/null
+++ b/Documentation/sound/designs/seq-oss.rst
@@ -0,0 +1,371 @@
+===============================
+OSS Sequencer Emulation on ALSA
+===============================
+
+Copyright (c) 1998,1999 by Takashi Iwai
+
+ver.0.1.8; Nov. 16, 1999
+
+Description
+===========
+
+This directory contains the OSS sequencer emulation driver on ALSA. Note
+that this program is still in the development state.
+
+What this does - it provides the emulation of the OSS sequencer, access
+via ``/dev/sequencer`` and ``/dev/music`` devices.
+The most of applications using OSS can run if the appropriate ALSA
+sequencer is prepared.
+
+The following features are emulated by this driver:
+
+* Normal sequencer and MIDI events:
+
+ They are converted to the ALSA sequencer events, and sent to the
+ corresponding port.
+
+* Timer events:
+
+ The timer is not selectable by ioctl. The control rate is fixed to
+ 100 regardless of HZ. That is, even on Alpha system, a tick is always
+ 1/100 second. The base rate and tempo can be changed in ``/dev/music``.
+
+* Patch loading:
+
+ It purely depends on the synth drivers whether it's supported since
+ the patch loading is realized by callback to the synth driver.
+
+* I/O controls:
+
+ Most of controls are accepted. Some controls
+ are dependent on the synth driver, as well as even on original OSS.
+
+Furthermore, you can find the following advanced features:
+
+* Better queue mechanism:
+
+ The events are queued before processing them.
+
+* Multiple applications:
+
+ You can run two or more applications simultaneously (even for OSS
+ sequencer)!
+ However, each MIDI device is exclusive - that is, if a MIDI device
+ is opened once by some application, other applications can't use
+ it. No such a restriction in synth devices.
+
+* Real-time event processing:
+
+ The events can be processed in real time without using out of bound
+ ioctl. To switch to real-time mode, send ABSTIME 0 event. The followed
+ events will be processed in real-time without queued. To switch off the
+ real-time mode, send RELTIME 0 event.
+
+* ``/proc`` interface:
+
+ The status of applications and devices can be shown via
+ ``/proc/asound/seq/oss`` at any time. In the later version,
+ configuration will be changed via ``/proc`` interface, too.
+
+
+Installation
+============
+
+Run configure script with both sequencer support (``--with-sequencer=yes``)
+and OSS emulation (``--with-oss=yes``) options. A module ``snd-seq-oss.o``
+will be created. If the synth module of your sound card supports for OSS
+emulation (so far, only Emu8000 driver), this module will be loaded
+automatically.
+Otherwise, you need to load this module manually.
+
+At beginning, this module probes all the MIDI ports which have been
+already connected to the sequencer. Once after that, the creation and deletion
+of ports are watched by announcement mechanism of ALSA sequencer.
+
+The available synth and MIDI devices can be found in proc interface.
+Run ``cat /proc/asound/seq/oss``, and check the devices. For example,
+if you use an AWE64 card, you'll see like the following:
+::
+
+ OSS sequencer emulation version 0.1.8
+ ALSA client number 63
+ ALSA receiver port 0
+
+ Number of applications: 0
+
+ Number of synth devices: 1
+ synth 0: [EMU8000]
+ type 0x1 : subtype 0x20 : voices 32
+ capabilties : ioctl enabled / load_patch enabled
+
+ Number of MIDI devices: 3
+ midi 0: [Emu8000 Port-0] ALSA port 65:0
+ capability write / opened none
+
+ midi 1: [Emu8000 Port-1] ALSA port 65:1
+ capability write / opened none
+
+ midi 2: [0: MPU-401 (UART)] ALSA port 64:0
+ capability read/write / opened none
+
+Note that the device number may be different from the information of
+``/proc/asound/oss-devices`` or ones of the original OSS driver.
+Use the device number listed in ``/proc/asound/seq/oss``
+to play via OSS sequencer emulation.
+
+Using Synthesizer Devices
+=========================
+
+Run your favorite program. I've tested playmidi-2.4, awemidi-0.4.3, gmod-3.1
+and xmp-1.1.5. You can load samples via ``/dev/sequencer`` like sfxload,
+too.
+
+If the lowlevel driver supports multiple access to synth devices (like
+Emu8000 driver), two or more applications are allowed to run at the same
+time.
+
+Using MIDI Devices
+==================
+
+So far, only MIDI output was tested. MIDI input was not checked at all,
+but hopefully it will work. Use the device number listed in
+``/proc/asound/seq/oss``.
+Be aware that these numbers are mostly different from the list in
+``/proc/asound/oss-devices``.
+
+Module Options
+==============
+
+The following module options are available:
+
+maxqlen
+ specifies the maximum read/write queue length. This queue is private
+ for OSS sequencer, so that it is independent from the queue length of ALSA
+ sequencer. Default value is 1024.
+
+seq_oss_debug
+ specifies the debug level and accepts zero (= no debug message) or
+ positive integer. Default value is 0.
+
+Queue Mechanism
+===============
+
+OSS sequencer emulation uses an ALSA priority queue. The
+events from ``/dev/sequencer`` are processed and put onto the queue
+specified by module option.
+
+All the events from ``/dev/sequencer`` are parsed at beginning.
+The timing events are also parsed at this moment, so that the events may
+be processed in real-time. Sending an event ABSTIME 0 switches the operation
+mode to real-time mode, and sending an event RELTIME 0 switches it off.
+In the real-time mode, all events are dispatched immediately.
+
+The queued events are dispatched to the corresponding ALSA sequencer
+ports after scheduled time by ALSA sequencer dispatcher.
+
+If the write-queue is full, the application sleeps until a certain amount
+(as default one half) becomes empty in blocking mode. The synchronization
+to write timing was implemented, too.
+
+The input from MIDI devices or echo-back events are stored on read FIFO
+queue. If application reads ``/dev/sequencer`` in blocking mode, the
+process will be awaked.
+
+Interface to Synthesizer Device
+===============================
+
+Registration
+------------
+
+To register an OSS synthesizer device, use snd_seq_oss_synth_register()
+function:
+::
+
+ int snd_seq_oss_synth_register(char *name, int type, int subtype, int nvoices,
+ snd_seq_oss_callback_t *oper, void *private_data)
+
+The arguments ``name``, ``type``, ``subtype`` and ``nvoices``
+are used for making the appropriate synth_info structure for ioctl. The
+return value is an index number of this device. This index must be remembered
+for unregister. If registration is failed, -errno will be returned.
+
+To release this device, call snd_seq_oss_synth_unregister() function:
+::
+
+ int snd_seq_oss_synth_unregister(int index)
+
+where the ``index`` is the index number returned by register function.
+
+Callbacks
+---------
+
+OSS synthesizer devices have capability for sample downloading and ioctls
+like sample reset. In OSS emulation, these special features are realized
+by using callbacks. The registration argument oper is used to specify these
+callbacks. The following callback functions must be defined:
+::
+
+ snd_seq_oss_callback_t:
+ int (*open)(snd_seq_oss_arg_t *p, void *closure);
+ int (*close)(snd_seq_oss_arg_t *p);
+ int (*ioctl)(snd_seq_oss_arg_t *p, unsigned int cmd, unsigned long arg);
+ int (*load_patch)(snd_seq_oss_arg_t *p, int format, const char *buf, int offs, int count);
+ int (*reset)(snd_seq_oss_arg_t *p);
+
+Except for ``open`` and ``close`` callbacks, they are allowed to be NULL.
+
+Each callback function takes the argument type ``snd_seq_oss_arg_t`` as the
+first argument.
+::
+
+ struct snd_seq_oss_arg_t {
+ int app_index;
+ int file_mode;
+ int seq_mode;
+ snd_seq_addr_t addr;
+ void *private_data;
+ int event_passing;
+ };
+
+The first three fields, ``app_index``, ``file_mode`` and ``seq_mode``
+are initialized by OSS sequencer. The ``app_index`` is the application
+index which is unique to each application opening OSS sequencer. The
+``file_mode`` is bit-flags indicating the file operation mode. See
+``seq_oss.h`` for its meaning. The ``seq_mode`` is sequencer operation
+mode. In the current version, only ``SND_OSSSEQ_MODE_SYNTH`` is used.
+
+The next two fields, ``addr`` and ``private_data``, must be
+filled by the synth driver at open callback. The ``addr`` contains
+the address of ALSA sequencer port which is assigned to this device. If
+the driver allocates memory for ``private_data``, it must be released
+in close callback by itself.
+
+The last field, ``event_passing``, indicates how to translate note-on
+/ off events. In ``PROCESS_EVENTS`` mode, the note 255 is regarded
+as velocity change, and key pressure event is passed to the port. In
+``PASS_EVENTS`` mode, all note on/off events are passed to the port
+without modified. ``PROCESS_KEYPRESS`` mode checks the note above 128
+and regards it as key pressure event (mainly for Emu8000 driver).
+
+Open Callback
+-------------
+
+The ``open`` is called at each time this device is opened by an application
+using OSS sequencer. This must not be NULL. Typically, the open callback
+does the following procedure:
+
+#. Allocate private data record.
+#. Create an ALSA sequencer port.
+#. Set the new port address on ``arg->addr``.
+#. Set the private data record pointer on ``arg->private_data``.
+
+Note that the type bit-flags in port_info of this synth port must NOT contain
+``TYPE_MIDI_GENERIC``
+bit. Instead, ``TYPE_SPECIFIC`` should be used. Also, ``CAP_SUBSCRIPTION``
+bit should NOT be included, too. This is necessary to tell it from other
+normal MIDI devices. If the open procedure succeeded, return zero. Otherwise,
+return -errno.
+
+Ioctl Callback
+--------------
+
+The ``ioctl`` callback is called when the sequencer receives device-specific
+ioctls. The following two ioctls should be processed by this callback:
+
+IOCTL_SEQ_RESET_SAMPLES
+ reset all samples on memory -- return 0
+
+IOCTL_SYNTH_MEMAVL
+ return the available memory size
+
+FM_4OP_ENABLE
+ can be ignored usually
+
+The other ioctls are processed inside the sequencer without passing to
+the lowlevel driver.
+
+Load_Patch Callback
+-------------------
+
+The ``load_patch`` callback is used for sample-downloading. This callback
+must read the data on user-space and transfer to each device. Return 0
+if succeeded, and -errno if failed. The format argument is the patch key
+in patch_info record. The buf is user-space pointer where patch_info record
+is stored. The offs can be ignored. The count is total data size of this
+sample data.
+
+Close Callback
+--------------
+
+The ``close`` callback is called when this device is closed by the
+application. If any private data was allocated in open callback, it must
+be released in the close callback. The deletion of ALSA port should be
+done here, too. This callback must not be NULL.
+
+Reset Callback
+--------------
+
+The ``reset`` callback is called when sequencer device is reset or
+closed by applications. The callback should turn off the sounds on the
+relevant port immediately, and initialize the status of the port. If this
+callback is undefined, OSS seq sends a ``HEARTBEAT`` event to the
+port.
+
+Events
+======
+
+Most of the events are processed by sequencer and translated to the adequate
+ALSA sequencer events, so that each synth device can receive by input_event
+callback of ALSA sequencer port. The following ALSA events should be
+implemented by the driver:
+
+============= ===================
+ALSA event Original OSS events
+============= ===================
+NOTEON SEQ_NOTEON, MIDI_NOTEON
+NOTE SEQ_NOTEOFF, MIDI_NOTEOFF
+KEYPRESS MIDI_KEY_PRESSURE
+CHANPRESS SEQ_AFTERTOUCH, MIDI_CHN_PRESSURE
+PGMCHANGE SEQ_PGMCHANGE, MIDI_PGM_CHANGE
+PITCHBEND SEQ_CONTROLLER(CTRL_PITCH_BENDER),
+ MIDI_PITCH_BEND
+CONTROLLER MIDI_CTL_CHANGE,
+ SEQ_BALANCE (with CTL_PAN)
+CONTROL14 SEQ_CONTROLLER
+REGPARAM SEQ_CONTROLLER(CTRL_PITCH_BENDER_RANGE)
+SYSEX SEQ_SYSEX
+============= ===================
+
+The most of these behavior can be realized by MIDI emulation driver
+included in the Emu8000 lowlevel driver. In the future release, this module
+will be independent.
+
+Some OSS events (``SEQ_PRIVATE`` and ``SEQ_VOLUME`` events) are passed as event
+type SND_SEQ_OSS_PRIVATE. The OSS sequencer passes these event 8 byte
+packets without any modification. The lowlevel driver should process these
+events appropriately.
+
+Interface to MIDI Device
+========================
+
+Since the OSS emulation probes the creation and deletion of ALSA MIDI
+sequencer ports automatically by receiving announcement from ALSA
+sequencer, the MIDI devices don't need to be registered explicitly
+like synth devices.
+However, the MIDI port_info registered to ALSA sequencer must include
+a group name ``SND_SEQ_GROUP_DEVICE`` and a capability-bit
+``CAP_READ`` or ``CAP_WRITE``. Also, subscription capabilities,
+``CAP_SUBS_READ`` or ``CAP_SUBS_WRITE``, must be defined, too. If
+these conditions are not satisfied, the port is not registered as OSS
+sequencer MIDI device.
+
+The events via MIDI devices are parsed in OSS sequencer and converted
+to the corresponding ALSA sequencer events. The input from MIDI sequencer
+is also converted to MIDI byte events by OSS sequencer. This works just
+a reverse way of seq_midi module.
+
+Known Problems / TODO's
+=======================
+
+* Patch loading via ALSA instrument layer is not implemented yet.
+
diff --git a/Documentation/sound/alsa/timestamping.txt b/Documentation/sound/designs/timestamping.rst
index 9d579aefbffd..2b0fff503415 100644
--- a/Documentation/sound/alsa/timestamping.txt
+++ b/Documentation/sound/designs/timestamping.rst
@@ -1,18 +1,22 @@
+=====================
+ALSA PCM Timestamping
+=====================
+
The ALSA API can provide two different system timestamps:
- Trigger_tstamp is the system time snapshot taken when the .trigger
-callback is invoked. This snapshot is taken by the ALSA core in the
-general case, but specific hardware may have synchronization
-capabilities or conversely may only be able to provide a correct
-estimate with a delay. In the latter two cases, the low-level driver
-is responsible for updating the trigger_tstamp at the most appropriate
-and precise moment. Applications should not rely solely on the first
-trigger_tstamp but update their internal calculations if the driver
-provides a refined estimate with a delay.
+ callback is invoked. This snapshot is taken by the ALSA core in the
+ general case, but specific hardware may have synchronization
+ capabilities or conversely may only be able to provide a correct
+ estimate with a delay. In the latter two cases, the low-level driver
+ is responsible for updating the trigger_tstamp at the most appropriate
+ and precise moment. Applications should not rely solely on the first
+ trigger_tstamp but update their internal calculations if the driver
+ provides a refined estimate with a delay.
- tstamp is the current system timestamp updated during the last
-event or application query.
-The difference (tstamp - trigger_tstamp) defines the elapsed time.
+ event or application query.
+ The difference (tstamp - trigger_tstamp) defines the elapsed time.
The ALSA API provides two basic pieces of information, avail
and delay, which combined with the trigger and current system
@@ -22,15 +26,15 @@ the ring buffer and the amount of queued samples.
The use of these different pointers and time information depends on
the application needs:
-- 'avail' reports how much can be written in the ring buffer
-- 'delay' reports the time it will take to hear a new sample after all
-queued samples have been played out.
+- ``avail`` reports how much can be written in the ring buffer
+- ``delay`` reports the time it will take to hear a new sample after all
+ queued samples have been played out.
When timestamps are enabled, the avail/delay information is reported
along with a snapshot of system time. Applications can select from
-CLOCK_REALTIME (NTP corrections including going backwards),
-CLOCK_MONOTONIC (NTP corrections but never going backwards),
-CLOCK_MONOTIC_RAW (without NTP corrections) and change the mode
+``CLOCK_REALTIME`` (NTP corrections including going backwards),
+``CLOCK_MONOTONIC`` (NTP corrections but never going backwards),
+``CLOCK_MONOTIC_RAW`` (without NTP corrections) and change the mode
dynamically with sw_params
@@ -38,17 +42,18 @@ The ALSA API also provide an audio_tstamp which reflects the passage
of time as measured by different components of audio hardware. In
ascii-art, this could be represented as follows (for the playback
case):
+::
+ --------------------------------------------------------------> time
+ ^ ^ ^ ^ ^
+ | | | | |
+ analog link dma app FullBuffer
+ time time time time time
+ | | | | |
+ |< codec delay >|<--hw delay-->|<queued samples>|<---avail->|
+ |<----------------- delay---------------------->| |
+ |<----ring buffer length---->|
---------------------------------------------------------------> time
- ^ ^ ^ ^ ^
- | | | | |
- analog link dma app FullBuffer
- time time time time time
- | | | | |
- |< codec delay >|<--hw delay-->|<queued samples>|<---avail->|
- |<----------------- delay---------------------->| |
- |<----ring buffer length---->|
The analog time is taken at the last stage of the playback, as close
as possible to the actual transducer
@@ -113,11 +118,11 @@ audio applications...
Due to the varied nature of timestamping needs, even for a single
application, the audio_tstamp_config can be changed dynamically. In
-the STATUS ioctl, the parameters are read-only and do not allow for
+the ``STATUS`` ioctl, the parameters are read-only and do not allow for
any application selection. To work around this limitation without
-impacting legacy applications, a new STATUS_EXT ioctl is introduced
+impacting legacy applications, a new ``STATUS_EXT`` ioctl is introduced
with read/write parameters. ALSA-lib will be modified to make use of
-STATUS_EXT and effectively deprecate STATUS.
+``STATUS_EXT`` and effectively deprecate ``STATUS``.
The ALSA API only allows for a single audio timestamp to be reported
at a time. This is a conscious design decision, reading the audio
@@ -135,36 +140,42 @@ the hardware, there is a risk of misalignment with the avail and delay
information. To make sure applications are not confused, a
driver_timestamp field is added in the snd_pcm_status structure; this
timestamp shows when the information is put together by the driver
-before returning from the STATUS and STATUS_EXT ioctl. in most cases
+before returning from the ``STATUS`` and ``STATUS_EXT`` ioctl. in most cases
this driver_timestamp will be identical to the regular system tstamp.
Examples of typestamping with HDaudio:
1. DMA timestamp, no compensation for DMA+analog delay
-$ ./audio_time -p --ts_type=1
-playback: systime: 341121338 nsec, audio time 342000000 nsec, systime delta -878662
-playback: systime: 426236663 nsec, audio time 427187500 nsec, systime delta -950837
-playback: systime: 597080580 nsec, audio time 598000000 nsec, systime delta -919420
-playback: systime: 682059782 nsec, audio time 683020833 nsec, systime delta -961051
-playback: systime: 852896415 nsec, audio time 853854166 nsec, systime delta -957751
-playback: systime: 937903344 nsec, audio time 938854166 nsec, systime delta -950822
+::
+
+ $ ./audio_time -p --ts_type=1
+ playback: systime: 341121338 nsec, audio time 342000000 nsec, systime delta -878662
+ playback: systime: 426236663 nsec, audio time 427187500 nsec, systime delta -950837
+ playback: systime: 597080580 nsec, audio time 598000000 nsec, systime delta -919420
+ playback: systime: 682059782 nsec, audio time 683020833 nsec, systime delta -961051
+ playback: systime: 852896415 nsec, audio time 853854166 nsec, systime delta -957751
+ playback: systime: 937903344 nsec, audio time 938854166 nsec, systime delta -950822
2. DMA timestamp, compensation for DMA+analog delay
-$ ./audio_time -p --ts_type=1 -d
-playback: systime: 341053347 nsec, audio time 341062500 nsec, systime delta -9153
-playback: systime: 426072447 nsec, audio time 426062500 nsec, systime delta 9947
-playback: systime: 596899518 nsec, audio time 596895833 nsec, systime delta 3685
-playback: systime: 681915317 nsec, audio time 681916666 nsec, systime delta -1349
-playback: systime: 852741306 nsec, audio time 852750000 nsec, systime delta -8694
+::
+
+ $ ./audio_time -p --ts_type=1 -d
+ playback: systime: 341053347 nsec, audio time 341062500 nsec, systime delta -9153
+ playback: systime: 426072447 nsec, audio time 426062500 nsec, systime delta 9947
+ playback: systime: 596899518 nsec, audio time 596895833 nsec, systime delta 3685
+ playback: systime: 681915317 nsec, audio time 681916666 nsec, systime delta -1349
+ playback: systime: 852741306 nsec, audio time 852750000 nsec, systime delta -8694
3. link timestamp, compensation for DMA+analog delay
-$ ./audio_time -p --ts_type=2 -d
-playback: systime: 341060004 nsec, audio time 341062791 nsec, systime delta -2787
-playback: systime: 426242074 nsec, audio time 426244875 nsec, systime delta -2801
-playback: systime: 597080992 nsec, audio time 597084583 nsec, systime delta -3591
-playback: systime: 682084512 nsec, audio time 682088291 nsec, systime delta -3779
-playback: systime: 852936229 nsec, audio time 852940916 nsec, systime delta -4687
-playback: systime: 938107562 nsec, audio time 938112708 nsec, systime delta -5146
+::
+
+ $ ./audio_time -p --ts_type=2 -d
+ playback: systime: 341060004 nsec, audio time 341062791 nsec, systime delta -2787
+ playback: systime: 426242074 nsec, audio time 426244875 nsec, systime delta -2801
+ playback: systime: 597080992 nsec, audio time 597084583 nsec, systime delta -3591
+ playback: systime: 682084512 nsec, audio time 682088291 nsec, systime delta -3779
+ playback: systime: 852936229 nsec, audio time 852940916 nsec, systime delta -4687
+ playback: systime: 938107562 nsec, audio time 938112708 nsec, systime delta -5146
Example 1 shows that the timestamp at the DMA level is close to 1ms
ahead of the actual playback time (as a side time this sort of
@@ -181,20 +192,24 @@ shows how compensating for the delay exposes a 1ms accuracy (due to
the use of the frame counter by the driver)
Example 3: DMA timestamp, no compensation for delay, delta of ~5ms
-$ ./audio_time -p -Dhw:1 -t1
-playback: systime: 120174019 nsec, audio time 125000000 nsec, systime delta -4825981
-playback: systime: 245041136 nsec, audio time 250000000 nsec, systime delta -4958864
-playback: systime: 370106088 nsec, audio time 375000000 nsec, systime delta -4893912
-playback: systime: 495040065 nsec, audio time 500000000 nsec, systime delta -4959935
-playback: systime: 620038179 nsec, audio time 625000000 nsec, systime delta -4961821
-playback: systime: 745087741 nsec, audio time 750000000 nsec, systime delta -4912259
-playback: systime: 870037336 nsec, audio time 875000000 nsec, systime delta -4962664
+::
+
+ $ ./audio_time -p -Dhw:1 -t1
+ playback: systime: 120174019 nsec, audio time 125000000 nsec, systime delta -4825981
+ playback: systime: 245041136 nsec, audio time 250000000 nsec, systime delta -4958864
+ playback: systime: 370106088 nsec, audio time 375000000 nsec, systime delta -4893912
+ playback: systime: 495040065 nsec, audio time 500000000 nsec, systime delta -4959935
+ playback: systime: 620038179 nsec, audio time 625000000 nsec, systime delta -4961821
+ playback: systime: 745087741 nsec, audio time 750000000 nsec, systime delta -4912259
+ playback: systime: 870037336 nsec, audio time 875000000 nsec, systime delta -4962664
Example 4: DMA timestamp, compensation for delay, delay of ~1ms
-$ ./audio_time -p -Dhw:1 -t1 -d
-playback: systime: 120190520 nsec, audio time 120000000 nsec, systime delta 190520
-playback: systime: 245036740 nsec, audio time 244000000 nsec, systime delta 1036740
-playback: systime: 370034081 nsec, audio time 369000000 nsec, systime delta 1034081
-playback: systime: 495159907 nsec, audio time 494000000 nsec, systime delta 1159907
-playback: systime: 620098824 nsec, audio time 619000000 nsec, systime delta 1098824
-playback: systime: 745031847 nsec, audio time 744000000 nsec, systime delta 1031847
+::
+
+ $ ./audio_time -p -Dhw:1 -t1 -d
+ playback: systime: 120190520 nsec, audio time 120000000 nsec, systime delta 190520
+ playback: systime: 245036740 nsec, audio time 244000000 nsec, systime delta 1036740
+ playback: systime: 370034081 nsec, audio time 369000000 nsec, systime delta 1034081
+ playback: systime: 495159907 nsec, audio time 494000000 nsec, systime delta 1159907
+ playback: systime: 620098824 nsec, audio time 619000000 nsec, systime delta 1098824
+ playback: systime: 745031847 nsec, audio time 744000000 nsec, systime delta 1031847
diff --git a/Documentation/sound/alsa/HD-Audio-Controls.txt b/Documentation/sound/hd-audio/controls.rst
index e9621e349e17..f2ebc4f79b44 100644
--- a/Documentation/sound/alsa/HD-Audio-Controls.txt
+++ b/Documentation/sound/hd-audio/controls.rst
@@ -1,16 +1,21 @@
+======================================
+HD-Audio Codec-Specific Mixer Controls
+======================================
+
+
This file explains the codec-specific mixer controls.
Realtek codecs
--------------
-* Channel Mode
+Channel Mode
This is an enum control to change the surround-channel setup,
appears only when the surround channels are available.
It gives the number of channels to be used, "2ch", "4ch", "6ch",
and "8ch". According to the configuration, this also controls the
jack-retasking of multi-I/O jacks.
-* Auto-Mute Mode
+Auto-Mute Mode
This is an enum control to change the auto-mute behavior of the
headphone and line-out jacks. If built-in speakers and headphone
and/or line-out jacks are available on a machine, this controls
@@ -30,24 +35,24 @@ Realtek codecs
IDT/Sigmatel codecs
-------------------
-* Analog Loopback
+Analog Loopback
This control enables/disables the analog-loopback circuit. This
appears only when "loopback" is set to true in a codec hint
(see HD-Audio.txt). Note that on some codecs the analog-loopback
and the normal PCM playback are exclusive, i.e. when this is on, you
won't hear any PCM stream.
-* Swap Center/LFE
+Swap Center/LFE
Swaps the center and LFE channel order. Normally, the left
corresponds to the center and the right to the LFE. When this is
ON, the left to the LFE and the right to the center.
-* Headphone as Line Out
+Headphone as Line Out
When this control is ON, treat the headphone jacks as line-out
jacks. That is, the headphone won't auto-mute the other line-outs,
and no HP-amp is set to the pins.
-* Mic Jack Mode, Line Jack Mode, etc
+Mic Jack Mode, Line Jack Mode, etc
These enum controls the direction and the bias of the input jack
pins. Depending on the jack type, it can set as "Mic In" and "Line
In", for determining the input bias, or it can be set to "Line Out"
@@ -57,19 +62,19 @@ IDT/Sigmatel codecs
VIA codecs
----------
-* Smart 5.1
+Smart 5.1
An enum control to re-task the multi-I/O jacks for surround outputs.
When it's ON, the corresponding input jacks (usually a line-in and a
mic-in) are switched as the surround and the CLFE output jacks.
-* Independent HP
+Independent HP
When this enum control is enabled, the headphone output is routed
from an individual stream (the third PCM such as hw:0,2) instead of
the primary stream. In the case the headphone DAC is shared with a
side or a CLFE-channel DAC, the DAC is switched to the headphone
automatically.
-* Loopback Mixing
+Loopback Mixing
An enum control to determine whether the analog-loopback route is
enabled or not. When it's enabled, the analog-loopback is mixed to
the front-channel. Also, the same route is used for the headphone
@@ -78,7 +83,7 @@ VIA codecs
headphones and speakers because there is only one DAC connected to a
mixer widget.
-* Dynamic Power-Control
+Dynamic Power-Control
This control determines whether the dynamic power-control per jack
detection is enabled or not. When enabled, the widgets power state
(D0/D3) are changed dynamically depending on the jack plugging
@@ -86,7 +91,7 @@ VIA codecs
doesn't provide a proper jack-detection, this won't work; in such a
case, turn this control OFF.
-* Jack Detect
+Jack Detect
This control is provided only for VT1708 codec which gives no proper
unsolicited event per jack plug. When this is on, the driver polls
the jack detection so that the headphone auto-mute can work, while
@@ -96,21 +101,21 @@ VIA codecs
Conexant codecs
---------------
-* Auto-Mute Mode
+Auto-Mute Mode
See Reatek codecs.
Analog codecs
--------------
-* Channel Mode
+Channel Mode
This is an enum control to change the surround-channel setup,
appears only when the surround channels are available.
It gives the number of channels to be used, "2ch", "4ch" and "6ch".
According to the configuration, this also controls the
jack-retasking of multi-I/O jacks.
-* Independent HP
+Independent HP
When this enum control is enabled, the headphone output is routed
from an individual stream (the third PCM such as hw:0,2) instead of
the primary stream.
diff --git a/Documentation/sound/alsa/HD-Audio-DP-MST-audio.txt b/Documentation/sound/hd-audio/dp-mst.rst
index 82744ac3513d..58b72437e6c3 100644
--- a/Documentation/sound/alsa/HD-Audio-DP-MST-audio.txt
+++ b/Documentation/sound/hd-audio/dp-mst.rst
@@ -1,3 +1,7 @@
+=======================
+HD-Audio DP-MST Support
+=======================
+
To support DP MST audio, HD Audio hdmi codec driver introduces virtual pin
and dynamic pcm assignment.
@@ -44,10 +48,12 @@ Build Jack
----------
- dyn_pcm_assign
-Will not use hda_jack but use snd_jack in spec->pcm_rec[pcm_idx].jack directly.
+
+ Will not use hda_jack but use snd_jack in spec->pcm_rec[pcm_idx].jack directly.
- !dyn_pcm_assign
-Use hda_jack and assign spec->pcm_rec[pcm_idx].jack = jack->jack statically.
+
+ Use hda_jack and assign spec->pcm_rec[pcm_idx].jack = jack->jack statically.
Unsolicited Event Enabling
@@ -58,16 +64,20 @@ Enable unsolicited event if !acomp.
Monitor Hotplug Event Handling
------------------------------
- acomp
-pin_eld_notify() -> check_presence_and_report() -> hdmi_present_sense() ->
-sync_eld_via_acomp().
-Use directly snd_jack_report() on spec->pcm_rec[pcm_idx].jack for
-both dyn_pcm_assign and !dyn_pcm_assign
+
+ pin_eld_notify() -> check_presence_and_report() -> hdmi_present_sense() ->
+ sync_eld_via_acomp().
+
+ Use directly snd_jack_report() on spec->pcm_rec[pcm_idx].jack for
+ both dyn_pcm_assign and !dyn_pcm_assign
- !acomp
-Hdmi_unsol_event() -> hdmi_intrinsic_event() -> check_presence_and_report() ->
-hdmi_present_sense() -> hdmi_prepsent_sense_via_verbs()
-Use directly snd_jack_report() on spec->pcm_rec[pcm_idx].jack for dyn_pcm_assign.
-Use hda_jack mechanism to handle jack events.
+
+ hdmi_unsol_event() -> hdmi_intrinsic_event() -> check_presence_and_report() ->
+ hdmi_present_sense() -> hdmi_prepsent_sense_via_verbs()
+
+ Use directly snd_jack_report() on spec->pcm_rec[pcm_idx].jack for dyn_pcm_assign.
+ Use hda_jack mechanism to handle jack events.
Others to be added later
diff --git a/Documentation/sound/hd-audio/index.rst b/Documentation/sound/hd-audio/index.rst
new file mode 100644
index 000000000000..f8a72ffffe66
--- /dev/null
+++ b/Documentation/sound/hd-audio/index.rst
@@ -0,0 +1,10 @@
+HD-Audio
+========
+
+.. toctree::
+ :maxdepth: 2
+
+ notes
+ models
+ controls
+ dp-mst
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
new file mode 100644
index 000000000000..5338673c88d9
--- /dev/null
+++ b/Documentation/sound/hd-audio/models.rst
@@ -0,0 +1,518 @@
+==============================
+HD-Audio Codec-Specific Models
+==============================
+
+ALC880
+======
+3stack
+ 3-jack in back and a headphone out
+3stack-digout
+ 3-jack in back, a HP out and a SPDIF out
+5stack
+ 5-jack in back, 2-jack in front
+5stack-digout
+ 5-jack in back, 2-jack in front, a SPDIF out
+6stack
+ 6-jack in back, 2-jack in front
+6stack-digout
+ 6-jack with a SPDIF out
+
+ALC260
+======
+gpio1
+ Enable GPIO1
+coef
+ Enable EAPD via COEF table
+fujitsu
+ Quirk for FSC S7020
+fujitsu-jwse
+ Quirk for FSC S7020 with jack modes and HP mic support
+
+ALC262
+======
+inv-dmic
+ Inverted internal mic workaround
+
+ALC267/268
+==========
+inv-dmic
+ Inverted internal mic workaround
+hp-eapd
+ Disable HP EAPD on NID 0x15
+
+ALC22x/23x/25x/269/27x/28x/29x (and vendor-specific ALC3xxx models)
+===================================================================
+laptop-amic
+ Laptops with analog-mic input
+laptop-dmic
+ Laptops with digital-mic input
+alc269-dmic
+ Enable ALC269(VA) digital mic workaround
+alc271-dmic
+ Enable ALC271X digital mic workaround
+inv-dmic
+ Inverted internal mic workaround
+headset-mic
+ Indicates a combined headset (headphone+mic) jack
+headset-mode
+ More comprehensive headset support for ALC269 & co
+headset-mode-no-hp-mic
+ Headset mode support without headphone mic
+lenovo-dock
+ Enables docking station I/O for some Lenovos
+hp-gpio-led
+ GPIO LED support on HP laptops
+dell-headset-multi
+ Headset jack, which can also be used as mic-in
+dell-headset-dock
+ Headset jack (without mic-in), and also dock I/O
+alc283-dac-wcaps
+ Fixups for Chromebook with ALC283
+alc283-sense-combo
+ Combo jack sensing on ALC283
+tpt440-dock
+ Pin configs for Lenovo Thinkpad Dock support
+
+ALC66x/67x/892
+==============
+mario
+ Chromebook mario model fixup
+asus-mode1
+ ASUS
+asus-mode2
+ ASUS
+asus-mode3
+ ASUS
+asus-mode4
+ ASUS
+asus-mode5
+ ASUS
+asus-mode6
+ ASUS
+asus-mode7
+ ASUS
+asus-mode8
+ ASUS
+inv-dmic
+ Inverted internal mic workaround
+dell-headset-multi
+ Headset jack, which can also be used as mic-in
+
+ALC680
+======
+N/A
+
+ALC88x/898/1150
+======================
+acer-aspire-4930g
+ Acer Aspire 4930G/5930G/6530G/6930G/7730G
+acer-aspire-8930g
+ Acer Aspire 8330G/6935G
+acer-aspire
+ Acer Aspire others
+inv-dmic
+ Inverted internal mic workaround
+no-primary-hp
+ VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC)
+
+ALC861/660
+==========
+N/A
+
+ALC861VD/660VD
+==============
+N/A
+
+CMI9880
+=======
+minimal
+ 3-jack in back
+min_fp
+ 3-jack in back, 2-jack in front
+full
+ 6-jack in back, 2-jack in front
+full_dig
+ 6-jack in back, 2-jack in front, SPDIF I/O
+allout
+ 5-jack in back, 2-jack in front, SPDIF out
+auto
+ auto-config reading BIOS (default)
+
+AD1882 / AD1882A
+================
+3stack
+ 3-stack mode
+3stack-automute
+ 3-stack with automute front HP (default)
+6stack
+ 6-stack mode
+
+AD1884A / AD1883 / AD1984A / AD1984B
+====================================
+desktop 3-stack desktop (default)
+laptop laptop with HP jack sensing
+mobile mobile devices with HP jack sensing
+thinkpad Lenovo Thinkpad X300
+touchsmart HP Touchsmart
+
+AD1884
+======
+N/A
+
+AD1981
+======
+basic 3-jack (default)
+hp HP nx6320
+thinkpad Lenovo Thinkpad T60/X60/Z60
+toshiba Toshiba U205
+
+AD1983
+======
+N/A
+
+AD1984
+======
+basic default configuration
+thinkpad Lenovo Thinkpad T61/X61
+dell_desktop Dell T3400
+
+AD1986A
+=======
+3stack
+ 3-stack, shared surrounds
+laptop
+ 2-channel only (FSC V2060, Samsung M50)
+laptop-imic
+ 2-channel with built-in mic
+eapd
+ Turn on EAPD constantly
+
+AD1988/AD1988B/AD1989A/AD1989B
+==============================
+6stack
+ 6-jack
+6stack-dig
+ ditto with SPDIF
+3stack
+ 3-jack
+3stack-dig
+ ditto with SPDIF
+laptop
+ 3-jack with hp-jack automute
+laptop-dig
+ ditto with SPDIF
+auto
+ auto-config reading BIOS (default)
+
+Conexant 5045
+=============
+laptop-hpsense
+ Laptop with HP sense (old model laptop)
+laptop-micsense
+ Laptop with Mic sense (old model fujitsu)
+laptop-hpmicsense
+ Laptop with HP and Mic senses
+benq
+ Benq R55E
+laptop-hp530
+ HP 530 laptop
+test
+ for testing/debugging purpose, almost all controls can be
+ adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y
+
+Conexant 5047
+=============
+laptop
+ Basic Laptop config
+laptop-hp
+ Laptop config for some HP models (subdevice 30A5)
+laptop-eapd
+ Laptop config with EAPD support
+test
+ for testing/debugging purpose, almost all controls can be
+ adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y
+
+Conexant 5051
+=============
+laptop
+ Basic Laptop config (default)
+hp
+ HP Spartan laptop
+hp-dv6736
+ HP dv6736
+hp-f700
+ HP Compaq Presario F700
+ideapad
+ Lenovo IdeaPad laptop
+toshiba
+ Toshiba Satellite M300
+
+Conexant 5066
+=============
+laptop
+ Basic Laptop config (default)
+hp-laptop
+ HP laptops, e g G60
+asus
+ Asus K52JU, Lenovo G560
+dell-laptop
+ Dell laptops
+dell-vostro
+ Dell Vostro
+olpc-xo-1_5
+ OLPC XO 1.5
+ideapad
+ Lenovo IdeaPad U150
+thinkpad
+ Lenovo Thinkpad
+
+STAC9200
+========
+ref
+ Reference board
+oqo
+ OQO Model 2
+dell-d21
+ Dell (unknown)
+dell-d22
+ Dell (unknown)
+dell-d23
+ Dell (unknown)
+dell-m21
+ Dell Inspiron 630m, Dell Inspiron 640m
+dell-m22
+ Dell Latitude D620, Dell Latitude D820
+dell-m23
+ Dell XPS M1710, Dell Precision M90
+dell-m24
+ Dell Latitude 120L
+dell-m25
+ Dell Inspiron E1505n
+dell-m26
+ Dell Inspiron 1501
+dell-m27
+ Dell Inspiron E1705/9400
+gateway-m4
+ Gateway laptops with EAPD control
+gateway-m4-2
+ Gateway laptops with EAPD control
+panasonic
+ Panasonic CF-74
+auto
+ BIOS setup (default)
+
+STAC9205/9254
+=============
+ref
+ Reference board
+dell-m42
+ Dell (unknown)
+dell-m43
+ Dell Precision
+dell-m44
+ Dell Inspiron
+eapd
+ Keep EAPD on (e.g. Gateway T1616)
+auto
+ BIOS setup (default)
+
+STAC9220/9221
+=============
+ref
+ Reference board
+3stack
+ D945 3stack
+5stack
+ D945 5stack + SPDIF
+intel-mac-v1
+ Intel Mac Type 1
+intel-mac-v2
+ Intel Mac Type 2
+intel-mac-v3
+ Intel Mac Type 3
+intel-mac-v4
+ Intel Mac Type 4
+intel-mac-v5
+ Intel Mac Type 5
+intel-mac-auto
+ Intel Mac (detect type according to subsystem id)
+macmini
+ Intel Mac Mini (equivalent with type 3)
+macbook
+ Intel Mac Book (eq. type 5)
+macbook-pro-v1
+ Intel Mac Book Pro 1st generation (eq. type 3)
+macbook-pro
+ Intel Mac Book Pro 2nd generation (eq. type 3)
+imac-intel
+ Intel iMac (eq. type 2)
+imac-intel-20
+ Intel iMac (newer version) (eq. type 3)
+ecs202
+ ECS/PC chips
+dell-d81
+ Dell (unknown)
+dell-d82
+ Dell (unknown)
+dell-m81
+ Dell (unknown)
+dell-m82
+ Dell XPS M1210
+auto
+ BIOS setup (default)
+
+STAC9202/9250/9251
+==================
+ref
+ Reference board, base config
+m1
+ Some Gateway MX series laptops (NX560XL)
+m1-2
+ Some Gateway MX series laptops (MX6453)
+m2
+ Some Gateway MX series laptops (M255)
+m2-2
+ Some Gateway MX series laptops
+m3
+ Some Gateway MX series laptops
+m5
+ Some Gateway MX series laptops (MP6954)
+m6
+ Some Gateway NX series laptops
+auto
+ BIOS setup (default)
+
+STAC9227/9228/9229/927x
+=======================
+ref
+ Reference board
+ref-no-jd
+ Reference board without HP/Mic jack detection
+3stack
+ D965 3stack
+5stack
+ D965 5stack + SPDIF
+5stack-no-fp
+ D965 5stack without front panel
+dell-3stack
+ Dell Dimension E520
+dell-bios
+ Fixes with Dell BIOS setup
+dell-bios-amic
+ Fixes with Dell BIOS setup including analog mic
+volknob
+ Fixes with volume-knob widget 0x24
+auto
+ BIOS setup (default)
+
+STAC92HD71B*
+============
+ref
+ Reference board
+dell-m4-1
+ Dell desktops
+dell-m4-2
+ Dell desktops
+dell-m4-3
+ Dell desktops
+hp-m4
+ HP mini 1000
+hp-dv5
+ HP dv series
+hp-hdx
+ HP HDX series
+hp-dv4-1222nr
+ HP dv4-1222nr (with LED support)
+auto
+ BIOS setup (default)
+
+STAC92HD73*
+===========
+ref
+ Reference board
+no-jd
+ BIOS setup but without jack-detection
+intel
+ Intel DG45* mobos
+dell-m6-amic
+ Dell desktops/laptops with analog mics
+dell-m6-dmic
+ Dell desktops/laptops with digital mics
+dell-m6
+ Dell desktops/laptops with both type of mics
+dell-eq
+ Dell desktops/laptops
+alienware
+ Alienware M17x
+auto
+ BIOS setup (default)
+
+STAC92HD83*
+===========
+ref
+ Reference board
+mic-ref
+ Reference board with power management for ports
+dell-s14
+ Dell laptop
+dell-vostro-3500
+ Dell Vostro 3500 laptop
+hp-dv7-4000
+ HP dv-7 4000
+hp_cNB11_intquad
+ HP CNB models with 4 speakers
+hp-zephyr
+ HP Zephyr
+hp-led
+ HP with broken BIOS for mute LED
+hp-inv-led
+ HP with broken BIOS for inverted mute LED
+hp-mic-led
+ HP with mic-mute LED
+headset-jack
+ Dell Latitude with a 4-pin headset jack
+hp-envy-bass
+ Pin fixup for HP Envy bass speaker (NID 0x0f)
+hp-envy-ts-bass
+ Pin fixup for HP Envy TS bass speaker (NID 0x10)
+hp-bnb13-eq
+ Hardware equalizer setup for HP laptops
+auto
+ BIOS setup (default)
+
+STAC92HD95
+==========
+hp-led
+ LED support for HP laptops
+hp-bass
+ Bass HPF setup for HP Spectre 13
+
+STAC9872
+========
+vaio
+ VAIO laptop without SPDIF
+auto
+ BIOS setup (default)
+
+Cirrus Logic CS4206/4207
+========================
+mbp55
+ MacBook Pro 5,5
+imac27
+ IMac 27 Inch
+auto
+ BIOS setup (default)
+
+Cirrus Logic CS4208
+===================
+mba6
+ MacBook Air 6,1 and 6,2
+gpio0
+ Enable GPIO 0 amp
+auto
+ BIOS setup (default)
+
+VIA VT17xx/VT18xx/VT20xx
+========================
+auto
+ BIOS setup (default)
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/hd-audio/notes.rst
index d4510ebf2e8c..168d0cfab1ce 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/hd-audio/notes.rst
@@ -1,10 +1,12 @@
-MORE NOTES ON HD-AUDIO DRIVER
=============================
- Takashi Iwai <tiwai@suse.de>
+More Notes on HD-Audio Driver
+=============================
+Takashi Iwai <tiwai@suse.de>
-GENERAL
--------
+
+General
+=======
HD-audio is the new standard on-board audio component on modern PCs
after AC97. Although Linux has been supporting HD-audio since long
@@ -40,28 +42,28 @@ If you are interested in the deep debugging of HD-audio, read the
HD-audio specification at first. The specification is found on
Intel's web page, for example:
-- http://www.intel.com/standards/hdaudio/
+* http://www.intel.com/standards/hdaudio/
-HD-AUDIO CONTROLLER
--------------------
+HD-Audio Controller
+===================
DMA-Position Problem
-~~~~~~~~~~~~~~~~~~~~
+--------------------
The most common problem of the controller is the inaccurate DMA
pointer reporting. The DMA pointer for playback and capture can be
read in two ways, either via a LPIB register or via a position-buffer
map. As default the driver tries to read from the io-mapped
position-buffer, and falls back to LPIB if the position-buffer appears
dead. However, this detection isn't perfect on some devices. In such
-a case, you can change the default method via `position_fix` option.
+a case, you can change the default method via ``position_fix`` option.
-`position_fix=1` means to use LPIB method explicitly.
-`position_fix=2` means to use the position-buffer.
-`position_fix=3` means to use a combination of both methods, needed
+``position_fix=1`` means to use LPIB method explicitly.
+``position_fix=2`` means to use the position-buffer.
+``position_fix=3`` means to use a combination of both methods, needed
for some VIA controllers. The capture stream position is corrected
by comparing both LPIB and position-buffer values.
-`position_fix=4` is another combination available for all controllers,
+``position_fix=4`` is another combination available for all controllers,
and uses LPIB for the playback and the position-buffer for the capture
streams.
0 is the default value for all other
@@ -74,9 +76,9 @@ the wake-up timing. It wakes up a few samples before actually
processing the data on the buffer. This caused a lot of problems, for
example, with ALSA dmix or JACK. Since 2.6.27 kernel, the driver puts
an artificial delay to the wake up timing. This delay is controlled
-via `bdl_pos_adj` option.
+via ``bdl_pos_adj`` option.
-When `bdl_pos_adj` is a negative value (as default), it's assigned to
+When ``bdl_pos_adj`` is a negative value (as default), it's assigned to
an appropriate value depending on the controller chip. For Intel
chips, it'd be 1 while it'd be 32 for others. Usually this works.
Only in case it doesn't work and you get warning messages, you should
@@ -84,19 +86,19 @@ change this parameter to other values.
Codec-Probing Problem
-~~~~~~~~~~~~~~~~~~~~~
+---------------------
A less often but a more severe problem is the codec probing. When
BIOS reports the available codec slots wrongly, the driver gets
confused and tries to access the non-existing codec slot. This often
results in the total screw-up, and destructs the further communication
with the codec chips. The symptom appears usually as error messages
like:
-------------------------------------------------------------------------
- hda_intel: azx_get_response timeout, switching to polling mode:
- last cmd=0x12345678
- hda_intel: azx_get_response timeout, switching to single_cmd mode:
- last cmd=0x12345678
-------------------------------------------------------------------------
+::
+
+ hda_intel: azx_get_response timeout, switching to polling mode:
+ last cmd=0x12345678
+ hda_intel: azx_get_response timeout, switching to single_cmd mode:
+ last cmd=0x12345678
The first line is a warning, and this is usually relatively harmless.
It means that the codec response isn't notified via an IRQ. The
@@ -108,24 +110,24 @@ it means that something is really wrong. Most likely you are
accessing a non-existing codec slot.
Thus, if the second error message appears, try to narrow the probed
-codec slots via `probe_mask` option. It's a bitmask, and each bit
+codec slots via ``probe_mask`` option. It's a bitmask, and each bit
corresponds to the codec slot. For example, to probe only the first
-slot, pass `probe_mask=1`. For the first and the third slots, pass
-`probe_mask=5` (where 5 = 1 | 4), and so on.
+slot, pass ``probe_mask=1``. For the first and the third slots, pass
+``probe_mask=5`` (where 5 = 1 | 4), and so on.
Since 2.6.29 kernel, the driver has a more robust probing method, so
this error might happen rarely, though.
On a machine with a broken BIOS, sometimes you need to force the
driver to probe the codec slots the hardware doesn't report for use.
-In such a case, turn the bit 8 (0x100) of `probe_mask` option on.
+In such a case, turn the bit 8 (0x100) of ``probe_mask`` option on.
Then the rest 8 bits are passed as the codec slots to probe
-unconditionally. For example, `probe_mask=0x103` will force to probe
+unconditionally. For example, ``probe_mask=0x103`` will force to probe
the codec slots 0 and 1 no matter what the hardware reports.
Interrupt Handling
-~~~~~~~~~~~~~~~~~~
+------------------
HD-audio driver uses MSI as default (if available) since 2.6.33
kernel as MSI works better on some machines, and in general, it's
better for performance. However, Nvidia controllers showed bad
@@ -134,17 +136,17 @@ thus we disabled MSI for them.
There seem also still other devices that don't work with MSI. If you
see a regression wrt the sound quality (stuttering, etc) or a lock-up
-in the recent kernel, try to pass `enable_msi=0` option to disable
+in the recent kernel, try to pass ``enable_msi=0`` option to disable
MSI. If it works, you can add the known bad device to the blacklist
defined in hda_intel.c. In such a case, please report and give the
patch back to the upstream developer.
-HD-AUDIO CODEC
---------------
+HD-Audio Codec
+==============
Model Option
-~~~~~~~~~~~~
+------------
The most common problem regarding the HD-audio driver is the
unsupported codec features or the mismatched device configuration.
Most of codec-specific code has several preset models, either to
@@ -153,13 +155,15 @@ override the BIOS setup or to provide more comprehensive features.
The driver checks PCI SSID and looks through the static configuration
table until any matching entry is found. If you have a new machine,
you may see a message like below:
-------------------------------------------------------------------------
+::
+
hda_codec: ALC880: BIOS auto-probing.
-------------------------------------------------------------------------
+
Meanwhile, in the earlier versions, you would see a message like:
-------------------------------------------------------------------------
+::
+
hda_codec: Unknown model for ALC880, trying auto-probe from BIOS...
-------------------------------------------------------------------------
+
Even if you see such a message, DON'T PANIC. Take a deep breath and
keep your towel. First of all, it's an informational message, no
warning, no error. This means that the PCI SSID of your device isn't
@@ -182,32 +186,33 @@ model is found in the white-list, the driver assumes the static
configuration of that preset with the correct pin setup, etc.
Thus, if you have a newer machine with a slightly different PCI SSID
(or codec SSID) from the existing one, you may have a good chance to
-re-use the same model. You can pass the `model` option to specify the
+re-use the same model. You can pass the ``model`` option to specify the
preset model instead of PCI (and codec-) SSID look-up.
-What `model` option values are available depends on the codec chip.
+What ``model`` option values are available depends on the codec chip.
Check your codec chip from the codec proc file (see "Codec Proc-File"
section below). It will show the vendor/product name of your codec
-chip. Then, see Documentation/sound/alsa/HD-Audio-Models.txt file,
+chip. Then, see Documentation/sound/HD-Audio-Models.rst file,
the section of HD-audio driver. You can find a list of codecs
-and `model` options belonging to each codec. For example, for Realtek
-ALC262 codec chip, pass `model=ultra` for devices that are compatible
+and ``model`` options belonging to each codec. For example, for Realtek
+ALC262 codec chip, pass ``model=ultra`` for devices that are compatible
with Samsung Q1 Ultra.
Thus, the first thing you can do for any brand-new, unsupported and
non-working HD-audio hardware is to check HD-audio codec and several
-different `model` option values. If you have any luck, some of them
+different ``model`` option values. If you have any luck, some of them
might suit with your device well.
There are a few special model option values:
-- when 'nofixup' is passed, the device-specific fixups in the codec
+
+* when 'nofixup' is passed, the device-specific fixups in the codec
parser are skipped.
-- when `generic` is passed, the codec-specific parser is skipped and
+* when ``generic`` is passed, the codec-specific parser is skipped and
only the generic parser is used.
Speaker and Headphone Output
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------
One of the most frequent (and obvious) bugs with HD-audio is the
silent output from either or both of a built-in speaker and a
headphone jack. In general, you should try a headphone output at
@@ -236,23 +241,23 @@ report. See the bug report section for details.
If you are masochistic enough to debug the driver problem, note the
following:
-- The speaker (and the headphone, too) output often requires the
+* The speaker (and the headphone, too) output often requires the
external amplifier. This can be set usually via EAPD verb or a
certain GPIO. If the codec pin supports EAPD, you have a better
chance via SET_EAPD_BTL verb (0x70c). On others, GPIO pin (mostly
it's either GPIO0 or GPIO1) may turn on/off EAPD.
-- Some Realtek codecs require special vendor-specific coefficients to
+* Some Realtek codecs require special vendor-specific coefficients to
turn on the amplifier. See patch_realtek.c.
-- IDT codecs may have extra power-enable/disable controls on each
+* IDT codecs may have extra power-enable/disable controls on each
analog pin. See patch_sigmatel.c.
-- Very rare but some devices don't accept the pin-detection verb until
+* Very rare but some devices don't accept the pin-detection verb until
triggered. Issuing GET_PIN_SENSE verb (0xf09) may result in the
codec-communication stall. Some examples are found in
patch_realtek.c.
Capture Problems
-~~~~~~~~~~~~~~~~
+----------------
The capture problems are often because of missing setups of mixers.
Thus, before submitting a bug report, make sure that you set up the
mixer correctly. For example, both "Capture Volume" and "Capture
@@ -284,7 +289,7 @@ submit the improvement patch to the author.
Direct Debugging
-~~~~~~~~~~~~~~~~
+----------------
If no model option gives you a better result, and you are a tough guy
to fight against evil, try debugging via hitting the raw HD-audio
codec verbs to the device. Some tools are available: hda-emu and
@@ -293,45 +298,45 @@ below. You'd need to enable hwdep for using these tools. See "Kernel
Configuration" section.
-OTHER ISSUES
-------------
+Other Issues
+============
Kernel Configuration
-~~~~~~~~~~~~~~~~~~~~
+--------------------
In general, I recommend you to enable the sound debug option,
-`CONFIG_SND_DEBUG=y`, no matter whether you are debugging or not.
+``CONFIG_SND_DEBUG=y``, no matter whether you are debugging or not.
This enables snd_printd() macro and others, and you'll get additional
kernel messages at probing.
-In addition, you can enable `CONFIG_SND_DEBUG_VERBOSE=y`. But this
+In addition, you can enable ``CONFIG_SND_DEBUG_VERBOSE=y``. But this
will give you far more messages. Thus turn this on only when you are
sure to want it.
-Don't forget to turn on the appropriate `CONFIG_SND_HDA_CODEC_*`
+Don't forget to turn on the appropriate ``CONFIG_SND_HDA_CODEC_*``
options. Note that each of them corresponds to the codec chip, not
the controller chip. Thus, even if lspci shows the Nvidia controller,
you may need to choose the option for other vendors. If you are
unsure, just select all yes.
-`CONFIG_SND_HDA_HWDEP` is a useful option for debugging the driver.
+``CONFIG_SND_HDA_HWDEP`` is a useful option for debugging the driver.
When this is enabled, the driver creates hardware-dependent devices
(one per each codec), and you have a raw access to the device via
-these device files. For example, `hwC0D2` will be created for the
+these device files. For example, ``hwC0D2`` will be created for the
codec slot #2 of the first card (#0). For debug-tools such as
hda-verb and hda-analyzer, the hwdep device has to be enabled.
Thus, it'd be better to turn this on always.
-`CONFIG_SND_HDA_RECONFIG` is a new option, and this depends on the
+``CONFIG_SND_HDA_RECONFIG`` is a new option, and this depends on the
hwdep option above. When enabled, you'll have some sysfs files under
the corresponding hwdep directory. See "HD-audio reconfiguration"
section below.
-`CONFIG_SND_HDA_POWER_SAVE` option enables the power-saving feature.
+``CONFIG_SND_HDA_POWER_SAVE`` option enables the power-saving feature.
See "Power-saving" section below.
Codec Proc-File
-~~~~~~~~~~~~~~~
+---------------
The codec proc-file is a treasure-chest for debugging HD-audio.
It shows most of useful information of each codec widget.
@@ -351,161 +356,178 @@ will appear as "Realtek ID 0262", instead of "Realtek ALC262".
HD-Audio Reconfiguration
-~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------
This is an experimental feature to allow you re-configure the HD-audio
codec dynamically without reloading the driver. The following sysfs
files are available under each codec-hwdep device directory (e.g.
/sys/class/sound/hwC0D0):
-vendor_id::
- Shows the 32bit codec vendor-id hex number. You can change the
- vendor-id value by writing to this file.
-subsystem_id::
- Shows the 32bit codec subsystem-id hex number. You can change the
- subsystem-id value by writing to this file.
-revision_id::
- Shows the 32bit codec revision-id hex number. You can change the
- revision-id value by writing to this file.
-afg::
- Shows the AFG ID. This is read-only.
-mfg::
- Shows the MFG ID. This is read-only.
-name::
- Shows the codec name string. Can be changed by writing to this
- file.
-modelname::
- Shows the currently set `model` option. Can be changed by writing
- to this file.
-init_verbs::
- The extra verbs to execute at initialization. You can add a verb by
- writing to this file. Pass three numbers: nid, verb and parameter
- (separated with a space).
-hints::
- Shows / stores hint strings for codec parsers for any use.
- Its format is `key = value`. For example, passing `jack_detect = no`
- will disable the jack detection of the machine completely.
-init_pin_configs::
- Shows the initial pin default config values set by BIOS.
-driver_pin_configs::
- Shows the pin default values set by the codec parser explicitly.
- This doesn't show all pin values but only the changed values by
- the parser. That is, if the parser doesn't change the pin default
- config values by itself, this will contain nothing.
-user_pin_configs::
- Shows the pin default config values to override the BIOS setup.
- Writing this (with two numbers, NID and value) appends the new
- value. The given will be used instead of the initial BIOS value at
- the next reconfiguration time. Note that this config will override
- even the driver pin configs, too.
-reconfig::
- Triggers the codec re-configuration. When any value is written to
- this file, the driver re-initialize and parses the codec tree
- again. All the changes done by the sysfs entries above are taken
- into account.
-clear::
- Resets the codec, removes the mixer elements and PCM stuff of the
- specified codec, and clear all init verbs and hints.
+vendor_id
+ Shows the 32bit codec vendor-id hex number. You can change the
+ vendor-id value by writing to this file.
+subsystem_id
+ Shows the 32bit codec subsystem-id hex number. You can change the
+ subsystem-id value by writing to this file.
+revision_id
+ Shows the 32bit codec revision-id hex number. You can change the
+ revision-id value by writing to this file.
+afg
+ Shows the AFG ID. This is read-only.
+mfg
+ Shows the MFG ID. This is read-only.
+name
+ Shows the codec name string. Can be changed by writing to this
+ file.
+modelname
+ Shows the currently set ``model`` option. Can be changed by writing
+ to this file.
+init_verbs
+ The extra verbs to execute at initialization. You can add a verb by
+ writing to this file. Pass three numbers: nid, verb and parameter
+ (separated with a space).
+hints
+ Shows / stores hint strings for codec parsers for any use.
+ Its format is ``key = value``. For example, passing ``jack_detect = no``
+ will disable the jack detection of the machine completely.
+init_pin_configs
+ Shows the initial pin default config values set by BIOS.
+driver_pin_configs
+ Shows the pin default values set by the codec parser explicitly.
+ This doesn't show all pin values but only the changed values by
+ the parser. That is, if the parser doesn't change the pin default
+ config values by itself, this will contain nothing.
+user_pin_configs
+ Shows the pin default config values to override the BIOS setup.
+ Writing this (with two numbers, NID and value) appends the new
+ value. The given will be used instead of the initial BIOS value at
+ the next reconfiguration time. Note that this config will override
+ even the driver pin configs, too.
+reconfig
+ Triggers the codec re-configuration. When any value is written to
+ this file, the driver re-initialize and parses the codec tree
+ again. All the changes done by the sysfs entries above are taken
+ into account.
+clear
+ Resets the codec, removes the mixer elements and PCM stuff of the
+ specified codec, and clear all init verbs and hints.
For example, when you want to change the pin default configuration
value of the pin widget 0x14 to 0x9993013f, and let the driver
re-configure based on that state, run like below:
-------------------------------------------------------------------------
- # echo 0x14 0x9993013f > /sys/class/sound/hwC0D0/user_pin_configs
- # echo 1 > /sys/class/sound/hwC0D0/reconfig
-------------------------------------------------------------------------
+::
+
+ # echo 0x14 0x9993013f > /sys/class/sound/hwC0D0/user_pin_configs
+ # echo 1 > /sys/class/sound/hwC0D0/reconfig
Hint Strings
-~~~~~~~~~~~~
+------------
The codec parser have several switches and adjustment knobs for
matching better with the actual codec or device behavior. Many of
them can be adjusted dynamically via "hints" strings as mentioned in
-the section above. For example, by passing `jack_detect = no` string
+the section above. For example, by passing ``jack_detect = no`` string
via sysfs or a patch file, you can disable the jack detection, thus
the codec parser will skip the features like auto-mute or mic
-auto-switch. As a boolean value, either `yes`, `no`, `true`, `false`,
-`1` or `0` can be passed.
+auto-switch. As a boolean value, either ``yes``, ``no``, ``true``, ``false``,
+``1`` or ``0`` can be passed.
The generic parser supports the following hints:
-- jack_detect (bool): specify whether the jack detection is available
- at all on this machine; default true
-- inv_jack_detect (bool): indicates that the jack detection logic is
- inverted
-- trigger_sense (bool): indicates that the jack detection needs the
- explicit call of AC_VERB_SET_PIN_SENSE verb
-- inv_eapd (bool): indicates that the EAPD is implemented in the
- inverted logic
-- pcm_format_first (bool): sets the PCM format before the stream tag
- and channel ID
-- sticky_stream (bool): keep the PCM format, stream tag and ID as long
- as possible; default true
-- spdif_status_reset (bool): reset the SPDIF status bits at each time
- the SPDIF stream is set up
-- pin_amp_workaround (bool): the output pin may have multiple amp
- values
-- single_adc_amp (bool): ADCs can have only single input amps
-- auto_mute (bool): enable/disable the headphone auto-mute feature;
- default true
-- auto_mic (bool): enable/disable the mic auto-switch feature; default
- true
-- line_in_auto_switch (bool): enable/disable the line-in auto-switch
- feature; default false
-- need_dac_fix (bool): limits the DACs depending on the channel count
-- primary_hp (bool): probe headphone jacks as the primary outputs;
- default true
-- multi_io (bool): try probing multi-I/O config (e.g. shared
- line-in/surround, mic/clfe jacks)
-- multi_cap_vol (bool): provide multiple capture volumes
-- inv_dmic_split (bool): provide split internal mic volume/switch for
- phase-inverted digital mics
-- indep_hp (bool): provide the independent headphone PCM stream and
- the corresponding mixer control, if available
-- add_stereo_mix_input (bool): add the stereo mix (analog-loopback
- mix) to the input mux if available
-- add_jack_modes (bool): add "xxx Jack Mode" enum controls to each
- I/O jack for allowing to change the headphone amp and mic bias VREF
- capabilities
-- power_save_node (bool): advanced power management for each widget,
- controlling the power sate (D0/D3) of each widget node depending on
- the actual pin and stream states
-- power_down_unused (bool): power down the unused widgets, a subset of
- power_save_node, and will be dropped in future
-- add_hp_mic (bool): add the headphone to capture source if possible
-- hp_mic_detect (bool): enable/disable the hp/mic shared input for a
- single built-in mic case; default true
-- mixer_nid (int): specifies the widget NID of the analog-loopback
- mixer
+jack_detect (bool)
+ specify whether the jack detection is available at all on this
+ machine; default true
+inv_jack_detect (bool)
+ indicates that the jack detection logic is inverted
+trigger_sense (bool)
+ indicates that the jack detection needs the explicit call of
+ AC_VERB_SET_PIN_SENSE verb
+inv_eapd (bool)
+ indicates that the EAPD is implemented in the inverted logic
+pcm_format_first (bool)
+ sets the PCM format before the stream tag and channel ID
+sticky_stream (bool)
+ keep the PCM format, stream tag and ID as long as possible;
+ default true
+spdif_status_reset (bool)
+ reset the SPDIF status bits at each time the SPDIF stream is set
+ up
+pin_amp_workaround (bool)
+ the output pin may have multiple amp values
+single_adc_amp (bool)
+ ADCs can have only single input amps
+auto_mute (bool)
+ enable/disable the headphone auto-mute feature; default true
+auto_mic (bool)
+ enable/disable the mic auto-switch feature; default true
+line_in_auto_switch (bool)
+ enable/disable the line-in auto-switch feature; default false
+need_dac_fix (bool)
+ limits the DACs depending on the channel count
+primary_hp (bool)
+ probe headphone jacks as the primary outputs; default true
+multi_io (bool)
+ try probing multi-I/O config (e.g. shared line-in/surround,
+ mic/clfe jacks)
+multi_cap_vol (bool)
+ provide multiple capture volumes
+inv_dmic_split (bool)
+ provide split internal mic volume/switch for phase-inverted
+ digital mics
+indep_hp (bool)
+ provide the independent headphone PCM stream and the corresponding
+ mixer control, if available
+add_stereo_mix_input (bool)
+ add the stereo mix (analog-loopback mix) to the input mux if
+ available
+add_jack_modes (bool)
+ add "xxx Jack Mode" enum controls to each I/O jack for allowing to
+ change the headphone amp and mic bias VREF capabilities
+power_save_node (bool)
+ advanced power management for each widget, controlling the power
+ sate (D0/D3) of each widget node depending on the actual pin and
+ stream states
+power_down_unused (bool)
+ power down the unused widgets, a subset of power_save_node, and
+ will be dropped in future
+add_hp_mic (bool)
+ add the headphone to capture source if possible
+hp_mic_detect (bool)
+ enable/disable the hp/mic shared input for a single built-in mic
+ case; default true
+mixer_nid (int)
+ specifies the widget NID of the analog-loopback mixer
Early Patching
-~~~~~~~~~~~~~~
-When CONFIG_SND_HDA_PATCH_LOADER=y is set, you can pass a "patch" as a
-firmware file for modifying the HD-audio setup before initializing the
-codec. This can work basically like the reconfiguration via sysfs in
-the above, but it does it before the first codec configuration.
+--------------
+When ``CONFIG_SND_HDA_PATCH_LOADER=y`` is set, you can pass a "patch"
+as a firmware file for modifying the HD-audio setup before
+initializing the codec. This can work basically like the
+reconfiguration via sysfs in the above, but it does it before the
+first codec configuration.
A patch file is a plain text file which looks like below:
-------------------------------------------------------------------------
- [codec]
- 0x12345678 0xabcd1234 2
+::
+
+ [codec]
+ 0x12345678 0xabcd1234 2
- [model]
- auto
+ [model]
+ auto
- [pincfg]
- 0x12 0x411111f0
+ [pincfg]
+ 0x12 0x411111f0
- [verb]
- 0x20 0x500 0x03
- 0x20 0x400 0xff
+ [verb]
+ 0x20 0x500 0x03
+ 0x20 0x400 0xff
- [hint]
- jack_detect = no
-------------------------------------------------------------------------
+ [hint]
+ jack_detect = no
-The file needs to have a line `[codec]`. The next line should contain
+
+The file needs to have a line ``[codec]``. The next line should contain
three numbers indicating the codec vendor-id (0x12345678 in the
example), the codec subsystem-id (0xabcd1234) and the address (2) of
the codec. The rest patch entries are applied to this specified codec
@@ -514,66 +536,68 @@ the first or the second value will make the check of the corresponding
field be skipped. It'll be useful for really broken devices that don't
initialize SSID properly.
-The `[model]` line allows to change the model name of the each codec.
+The ``[model]`` line allows to change the model name of the each codec.
In the example above, it will be changed to model=auto.
Note that this overrides the module option.
-After the `[pincfg]` line, the contents are parsed as the initial
-default pin-configurations just like `user_pin_configs` sysfs above.
+After the ``[pincfg]`` line, the contents are parsed as the initial
+default pin-configurations just like ``user_pin_configs`` sysfs above.
The values can be shown in user_pin_configs sysfs file, too.
-Similarly, the lines after `[verb]` are parsed as `init_verbs`
-sysfs entries, and the lines after `[hint]` are parsed as `hints`
+Similarly, the lines after ``[verb]`` are parsed as ``init_verbs``
+sysfs entries, and the lines after ``[hint]`` are parsed as ``hints``
sysfs entries, respectively.
Another example to override the codec vendor id from 0x12345678 to
0xdeadbeef is like below:
-------------------------------------------------------------------------
- [codec]
- 0x12345678 0xabcd1234 2
+::
+
+ [codec]
+ 0x12345678 0xabcd1234 2
+
+ [vendor_id]
+ 0xdeadbeef
- [vendor_id]
- 0xdeadbeef
-------------------------------------------------------------------------
In the similar way, you can override the codec subsystem_id via
-`[subsystem_id]`, the revision id via `[revision_id]` line.
-Also, the codec chip name can be rewritten via `[chip_name]` line.
-------------------------------------------------------------------------
- [codec]
- 0x12345678 0xabcd1234 2
+``[subsystem_id]``, the revision id via ``[revision_id]`` line.
+Also, the codec chip name can be rewritten via ``[chip_name]`` line.
+::
+
+ [codec]
+ 0x12345678 0xabcd1234 2
+
+ [subsystem_id]
+ 0xffff1111
- [subsystem_id]
- 0xffff1111
+ [revision_id]
+ 0x10
- [revision_id]
- 0x10
+ [chip_name]
+ My-own NEWS-0002
- [chip_name]
- My-own NEWS-0002
-------------------------------------------------------------------------
The hd-audio driver reads the file via request_firmware(). Thus,
a patch file has to be located on the appropriate firmware path,
typically, /lib/firmware. For example, when you pass the option
-`patch=hda-init.fw`, the file /lib/firmware/hda-init.fw must be
+``patch=hda-init.fw``, the file /lib/firmware/hda-init.fw must be
present.
The patch module option is specific to each card instance, and you
need to give one file name for each instance, separated by commas.
For example, if you have two cards, one for an on-board analog and one
for an HDMI video board, you may pass patch option like below:
-------------------------------------------------------------------------
+::
+
options snd-hda-intel patch=on-board-patch,hdmi-patch
-------------------------------------------------------------------------
Power-Saving
-~~~~~~~~~~~~
+------------
The power-saving is a kind of auto-suspend of the device. When the
device is inactive for a certain time, the device is automatically
turned off to save the power. The time to go down is specified via
-`power_save` module option, and this option can be changed dynamically
+``power_save`` module option, and this option can be changed dynamically
via sysfs.
The power-saving won't work when the analog loopback is enabled on
@@ -592,63 +616,65 @@ The recent kernel supports the runtime PM for the HD-audio controller
chip, too. It means that the HD-audio controller is also powered up /
down dynamically. The feature is enabled only for certain controller
chips like Intel LynxPoint. You can enable/disable this feature
-forcibly by setting `power_save_controller` option, which is also
+forcibly by setting ``power_save_controller`` option, which is also
available at /sys/module/snd_hda_intel/parameters directory.
Tracepoints
-~~~~~~~~~~~
+-----------
The hd-audio driver gives a few basic tracepoints.
-`hda:hda_send_cmd` traces each CORB write while `hda:hda_get_response`
+``hda:hda_send_cmd`` traces each CORB write while ``hda:hda_get_response``
traces the response from RIRB (only when read from the codec driver).
-`hda:hda_bus_reset` traces the bus-reset due to fatal error, etc,
-`hda:hda_unsol_event` traces the unsolicited events, and
-`hda:hda_power_down` and `hda:hda_power_up` trace the power down/up
+``hda:hda_bus_reset`` traces the bus-reset due to fatal error, etc,
+``hda:hda_unsol_event`` traces the unsolicited events, and
+``hda:hda_power_down`` and ``hda:hda_power_up`` trace the power down/up
via power-saving behavior.
Enabling all tracepoints can be done like
-------------------------------------------------------------------------
- # echo 1 > /sys/kernel/debug/tracing/events/hda/enable
-------------------------------------------------------------------------
+::
+
+ # echo 1 > /sys/kernel/debug/tracing/events/hda/enable
+
then after some commands, you can traces from
/sys/kernel/debug/tracing/trace file. For example, when you want to
trace what codec command is sent, enable the tracepoint like:
-------------------------------------------------------------------------
- # cat /sys/kernel/debug/tracing/trace
- # tracer: nop
- #
- # TASK-PID CPU# TIMESTAMP FUNCTION
- # | | | | |
- <...>-7807 [002] 105147.774889: hda_send_cmd: [0:0] val=e3a019
- <...>-7807 [002] 105147.774893: hda_send_cmd: [0:0] val=e39019
- <...>-7807 [002] 105147.999542: hda_send_cmd: [0:0] val=e3a01a
- <...>-7807 [002] 105147.999543: hda_send_cmd: [0:0] val=e3901a
- <...>-26764 [001] 349222.837143: hda_send_cmd: [0:0] val=e3a019
- <...>-26764 [001] 349222.837148: hda_send_cmd: [0:0] val=e39019
- <...>-26764 [001] 349223.058539: hda_send_cmd: [0:0] val=e3a01a
- <...>-26764 [001] 349223.058541: hda_send_cmd: [0:0] val=e3901a
-------------------------------------------------------------------------
-Here `[0:0]` indicates the card number and the codec address, and
-`val` shows the value sent to the codec, respectively. The value is
+::
+
+ # cat /sys/kernel/debug/tracing/trace
+ # tracer: nop
+ #
+ # TASK-PID CPU# TIMESTAMP FUNCTION
+ # | | | | |
+ <...>-7807 [002] 105147.774889: hda_send_cmd: [0:0] val=e3a019
+ <...>-7807 [002] 105147.774893: hda_send_cmd: [0:0] val=e39019
+ <...>-7807 [002] 105147.999542: hda_send_cmd: [0:0] val=e3a01a
+ <...>-7807 [002] 105147.999543: hda_send_cmd: [0:0] val=e3901a
+ <...>-26764 [001] 349222.837143: hda_send_cmd: [0:0] val=e3a019
+ <...>-26764 [001] 349222.837148: hda_send_cmd: [0:0] val=e39019
+ <...>-26764 [001] 349223.058539: hda_send_cmd: [0:0] val=e3a01a
+ <...>-26764 [001] 349223.058541: hda_send_cmd: [0:0] val=e3901a
+
+Here ``[0:0]`` indicates the card number and the codec address, and
+``val`` shows the value sent to the codec, respectively. The value is
a packed value, and you can decode it via hda-decode-verb program
included in hda-emu package below. For example, the value e3a019 is
to set the left output-amp value to 25.
-------------------------------------------------------------------------
- % hda-decode-verb 0xe3a019
- raw value = 0x00e3a019
- cid = 0, nid = 0x0e, verb = 0x3a0, parm = 0x19
- raw value: verb = 0x3a0, parm = 0x19
- verbname = set_amp_gain_mute
- amp raw val = 0xa019
- output, left, idx=0, mute=0, val=25
-------------------------------------------------------------------------
+::
+
+ % hda-decode-verb 0xe3a019
+ raw value = 0x00e3a019
+ cid = 0, nid = 0x0e, verb = 0x3a0, parm = 0x19
+ raw value: verb = 0x3a0, parm = 0x19
+ verbname = set_amp_gain_mute
+ amp raw val = 0xa019
+ output, left, idx=0, mute=0, val=25
Development Tree
-~~~~~~~~~~~~~~~~
+----------------
The latest development codes for HD-audio are found on sound git tree:
-- git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
+* git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
The master branch or for-next branches can be used as the main
development branches in general while the development for the current
@@ -657,14 +683,14 @@ respectively.
Sending a Bug Report
-~~~~~~~~~~~~~~~~~~~~
+--------------------
If any model or module options don't work for your device, it's time
to send a bug report to the developers. Give the following in your
bug report:
-- Hardware vendor, product and model names
-- Kernel version (and ALSA-driver version if you built externally)
-- `alsa-info.sh` output; run with `--no-upload` option. See the
+* Hardware vendor, product and model names
+* Kernel version (and ALSA-driver version if you built externally)
+* ``alsa-info.sh`` output; run with ``--no-upload`` option. See the
section below about alsa-info
If it's a regression, at best, send alsa-info outputs of both working
@@ -673,60 +699,60 @@ compare the codec registers directly.
Send a bug report either the followings:
-kernel-bugzilla::
- https://bugzilla.kernel.org/
-alsa-devel ML::
- alsa-devel@alsa-project.org
+kernel-bugzilla
+ https://bugzilla.kernel.org/
+alsa-devel ML
+ alsa-devel@alsa-project.org
-DEBUG TOOLS
------------
+Debug Tools
+===========
This section describes some tools available for debugging HD-audio
problems.
alsa-info
-~~~~~~~~~
-The script `alsa-info.sh` is a very useful tool to gather the audio
+---------
+The script ``alsa-info.sh`` is a very useful tool to gather the audio
device information. It's included in alsa-utils package. The latest
version can be found on git repository:
-- git://git.alsa-project.org/alsa-utils.git
+* git://git.alsa-project.org/alsa-utils.git
The script can be fetched directly from the following URL, too:
-- http://www.alsa-project.org/alsa-info.sh
+* http://www.alsa-project.org/alsa-info.sh
Run this script as root, and it will gather the important information
such as the module lists, module parameters, proc file contents
including the codec proc files, mixer outputs and the control
elements. As default, it will store the information onto a web server
on alsa-project.org. But, if you send a bug report, it'd be better to
-run with `--no-upload` option, and attach the generated file.
+run with ``--no-upload`` option, and attach the generated file.
-There are some other useful options. See `--help` option output for
+There are some other useful options. See ``--help`` option output for
details.
When a probe error occurs or when the driver obviously assigns a
mismatched model, it'd be helpful to load the driver with
-`probe_only=1` option (at best after the cold reboot) and run
+``probe_only=1`` option (at best after the cold reboot) and run
alsa-info at this state. With this option, the driver won't configure
the mixer and PCM but just tries to probe the codec slot. After
probing, the proc file is available, so you can get the raw codec
information before modified by the driver. Of course, the driver
-isn't usable with `probe_only=1`. But you can continue the
+isn't usable with ``probe_only=1``. But you can continue the
configuration via hwdep sysfs file if hda-reconfig option is enabled.
-Using `probe_only` mask 2 skips the reset of HDA codecs (use
-`probe_only=3` as module option). The hwdep interface can be used
+Using ``probe_only`` mask 2 skips the reset of HDA codecs (use
+``probe_only=3`` as module option). The hwdep interface can be used
to determine the BIOS codec initialization.
hda-verb
-~~~~~~~~
+--------
hda-verb is a tiny program that allows you to access the HD-audio
codec directly. You can execute a raw HD-audio codec verb with this.
This program accesses the hwdep device, thus you need to enable the
-kernel config `CONFIG_SND_HDA_HWDEP=y` beforehand.
+kernel config ``CONFIG_SND_HDA_HWDEP=y`` beforehand.
The hda-verb program takes four arguments: the hwdep device file, the
widget NID, the verb and the parameter. When you access to the codec
@@ -739,19 +765,20 @@ parameter can be either a hex/digit number or a string corresponding
to a verb. Similarly, the last parameter is the value to write, or
can be a string for the parameter type.
-------------------------------------------------------------------------
- % hda-verb /dev/snd/hwC0D0 0x12 0x701 2
- nid = 0x12, verb = 0x701, param = 0x2
- value = 0x0
+::
+
+ % hda-verb /dev/snd/hwC0D0 0x12 0x701 2
+ nid = 0x12, verb = 0x701, param = 0x2
+ value = 0x0
- % hda-verb /dev/snd/hwC0D0 0x0 PARAMETERS VENDOR_ID
- nid = 0x0, verb = 0xf00, param = 0x0
- value = 0x10ec0262
+ % hda-verb /dev/snd/hwC0D0 0x0 PARAMETERS VENDOR_ID
+ nid = 0x0, verb = 0xf00, param = 0x0
+ value = 0x10ec0262
+
+ % hda-verb /dev/snd/hwC0D0 2 set_a 0xb080
+ nid = 0x2, verb = 0x300, param = 0xb080
+ value = 0x0
- % hda-verb /dev/snd/hwC0D0 2 set_a 0xb080
- nid = 0x2, verb = 0x300, param = 0xb080
- value = 0x0
-------------------------------------------------------------------------
Although you can issue any verbs with this program, the driver state
won't be always updated. For example, the volume values are usually
@@ -760,22 +787,22 @@ via hda-verb won't change the mixer value.
The hda-verb program is included now in alsa-tools:
-- git://git.alsa-project.org/alsa-tools.git
+* git://git.alsa-project.org/alsa-tools.git
Also, the old stand-alone package is found in the ftp directory:
-- ftp://ftp.suse.com/pub/people/tiwai/misc/
+* ftp://ftp.suse.com/pub/people/tiwai/misc/
Also a git repository is available:
-- git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/hda-verb.git
+* git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/hda-verb.git
See README file in the tarball for more details about hda-verb
program.
hda-analyzer
-~~~~~~~~~~~~
+------------
hda-analyzer provides a graphical interface to access the raw HD-audio
control, based on pyGTK2 binding. It's a more powerful version of
hda-verb. The program gives you an easy-to-use GUI stuff for showing
@@ -784,14 +811,14 @@ proc-compatible output.
The hda-analyzer:
-- http://git.alsa-project.org/?p=alsa.git;a=tree;f=hda-analyzer
+* http://git.alsa-project.org/?p=alsa.git;a=tree;f=hda-analyzer
is a part of alsa.git repository in alsa-project.org:
-- git://git.alsa-project.org/alsa.git
+* git://git.alsa-project.org/alsa.git
Codecgraph
-~~~~~~~~~~
+----------
Codecgraph is a utility program to generate a graph and visualizes the
codec-node connection of a codec chip. It's especially useful when
you analyze or debug a codec without a proper datasheet. The program
@@ -800,11 +827,11 @@ program.
The tarball and GIT trees are found in the web page at:
-- http://helllabs.org/codecgraph/
+* http://helllabs.org/codecgraph/
hda-emu
-~~~~~~~
+-------
hda-emu is an HD-audio emulator. The main purpose of this program is
to debug an HD-audio codec without the real hardware. Thus, it
doesn't emulate the behavior with the real audio I/O, but it just
@@ -817,13 +844,14 @@ codec proc collections in the tarball. Then, run the program with the
proc file, and the hda-emu program will start parsing the codec file
and simulates the HD-audio driver:
-------------------------------------------------------------------------
- % hda-emu codecs/stac9200-dell-d820-laptop
- # Parsing..
- hda_codec: Unknown model for STAC9200, using BIOS defaults
- hda_codec: pin nid 08 bios pin config 40c003fa
- ....
-------------------------------------------------------------------------
+::
+
+ % hda-emu codecs/stac9200-dell-d820-laptop
+ # Parsing..
+ hda_codec: Unknown model for STAC9200, using BIOS defaults
+ hda_codec: pin nid 08 bios pin config 40c003fa
+ ....
+
The program gives you only a very dumb command-line interface. You
can get a proc-file dump at the current state, get a list of control
@@ -832,14 +860,14 @@ operation, the jack plugging simulation, etc.
The program is found in the git repository below:
-- git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/hda-emu.git
+* git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/hda-emu.git
See README file in the repository for more details about hda-emu
program.
hda-jack-retask
-~~~~~~~~~~~~~~~
+---------------
hda-jack-retask is a user-friendly GUI program to manipulate the
HD-audio pin control for jack retasking. If you have a problem about
the jack assignment, try this program and check whether you can get
@@ -849,5 +877,4 @@ firmware patch file (see "Early Patching" section).
The program is included in alsa-tools now:
-- git://git.alsa-project.org/alsa-tools.git
-
+* git://git.alsa-project.org/alsa-tools.git
diff --git a/Documentation/sound/index.rst b/Documentation/sound/index.rst
new file mode 100644
index 000000000000..47b89f014e69
--- /dev/null
+++ b/Documentation/sound/index.rst
@@ -0,0 +1,20 @@
+===================================
+Linux Sound Subsystem Documentation
+===================================
+
+.. toctree::
+ :maxdepth: 2
+
+ kernel-api/index
+ designs/index
+ soc/index
+ alsa-configuration
+ hd-audio/index
+ cards/index
+
+.. only:: subproject
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/sound/kernel-api/alsa-driver-api.rst b/Documentation/sound/kernel-api/alsa-driver-api.rst
new file mode 100644
index 000000000000..14cd138989e3
--- /dev/null
+++ b/Documentation/sound/kernel-api/alsa-driver-api.rst
@@ -0,0 +1,134 @@
+===================
+The ALSA Driver API
+===================
+
+Management of Cards and Devices
+===============================
+
+Card Management
+---------------
+.. kernel-doc:: sound/core/init.c
+
+Device Components
+-----------------
+.. kernel-doc:: sound/core/device.c
+
+Module requests and Device File Entries
+---------------------------------------
+.. kernel-doc:: sound/core/sound.c
+
+Memory Management Helpers
+-------------------------
+.. kernel-doc:: sound/core/memory.c
+.. kernel-doc:: sound/core/memalloc.c
+
+
+PCM API
+=======
+
+PCM Core
+--------
+.. kernel-doc:: sound/core/pcm.c
+.. kernel-doc:: sound/core/pcm_lib.c
+.. kernel-doc:: sound/core/pcm_native.c
+.. kernel-doc:: include/sound/pcm.h
+
+PCM Format Helpers
+------------------
+.. kernel-doc:: sound/core/pcm_misc.c
+
+PCM Memory Management
+---------------------
+.. kernel-doc:: sound/core/pcm_memory.c
+
+PCM DMA Engine API
+------------------
+.. kernel-doc:: sound/core/pcm_dmaengine.c
+.. kernel-doc:: include/sound/dmaengine_pcm.h
+
+Control/Mixer API
+=================
+
+General Control Interface
+-------------------------
+.. kernel-doc:: sound/core/control.c
+
+AC97 Codec API
+--------------
+.. kernel-doc:: sound/pci/ac97/ac97_codec.c
+.. kernel-doc:: sound/pci/ac97/ac97_pcm.c
+
+Virtual Master Control API
+--------------------------
+.. kernel-doc:: sound/core/vmaster.c
+.. kernel-doc:: include/sound/control.h
+
+MIDI API
+========
+
+Raw MIDI API
+------------
+.. kernel-doc:: sound/core/rawmidi.c
+
+MPU401-UART API
+---------------
+.. kernel-doc:: sound/drivers/mpu401/mpu401_uart.c
+
+Proc Info API
+=============
+
+Proc Info Interface
+-------------------
+.. kernel-doc:: sound/core/info.c
+
+Compress Offload
+================
+
+Compress Offload API
+--------------------
+.. kernel-doc:: sound/core/compress_offload.c
+.. kernel-doc:: include/uapi/sound/compress_offload.h
+.. kernel-doc:: include/uapi/sound/compress_params.h
+.. kernel-doc:: include/sound/compress_driver.h
+
+ASoC
+====
+
+ASoC Core API
+-------------
+.. kernel-doc:: include/sound/soc.h
+.. kernel-doc:: sound/soc/soc-core.c
+.. kernel-doc:: sound/soc/soc-devres.c
+.. kernel-doc:: sound/soc/soc-io.c
+.. kernel-doc:: sound/soc/soc-pcm.c
+.. kernel-doc:: sound/soc/soc-ops.c
+.. kernel-doc:: sound/soc/soc-compress.c
+
+ASoC DAPM API
+-------------
+.. kernel-doc:: sound/soc/soc-dapm.c
+
+ASoC DMA Engine API
+-------------------
+.. kernel-doc:: sound/soc/soc-generic-dmaengine-pcm.c
+
+Miscellaneous Functions
+=======================
+
+Hardware-Dependent Devices API
+------------------------------
+.. kernel-doc:: sound/core/hwdep.c
+
+Jack Abstraction Layer API
+--------------------------
+.. kernel-doc:: include/sound/jack.h
+.. kernel-doc:: sound/core/jack.c
+.. kernel-doc:: sound/soc/soc-jack.c
+
+ISA DMA Helpers
+---------------
+.. kernel-doc:: sound/core/isadma.c
+
+Other Helper Macros
+-------------------
+.. kernel-doc:: include/sound/core.h
diff --git a/Documentation/sound/kernel-api/index.rst b/Documentation/sound/kernel-api/index.rst
new file mode 100644
index 000000000000..d0e6df35b4b4
--- /dev/null
+++ b/Documentation/sound/kernel-api/index.rst
@@ -0,0 +1,8 @@
+ALSA Kernel API Documentation
+=============================
+
+.. toctree::
+ :maxdepth: 2
+
+ alsa-driver-api
+ writing-an-alsa-driver
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
new file mode 100644
index 000000000000..95c5443eff38
--- /dev/null
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -0,0 +1,4219 @@
+======================
+Writing an ALSA Driver
+======================
+
+:Author: Takashi Iwai <tiwai@suse.de>
+:Date: Oct 15, 2007
+:Edition: 0.3.7
+
+Preface
+=======
+
+This document describes how to write an `ALSA (Advanced Linux Sound
+Architecture) <http://www.alsa-project.org/>`__ driver. The document
+focuses mainly on PCI soundcards. In the case of other device types, the
+API might be different, too. However, at least the ALSA kernel API is
+consistent, and therefore it would be still a bit help for writing them.
+
+This document targets people who already have enough C language skills
+and have basic linux kernel programming knowledge. This document doesn't
+explain the general topic of linux kernel coding and doesn't cover
+low-level driver implementation details. It only describes the standard
+way to write a PCI sound driver on ALSA.
+
+If you are already familiar with the older ALSA ver.0.5.x API, you can
+check the drivers such as ``sound/pci/es1938.c`` or
+``sound/pci/maestro3.c`` which have also almost the same code-base in
+the ALSA 0.5.x tree, so you can compare the differences.
+
+This document is still a draft version. Any feedback and corrections,
+please!!
+
+File Tree Structure
+===================
+
+General
+-------
+
+The ALSA drivers are provided in two ways.
+
+One is the trees provided as a tarball or via cvs from the ALSA's ftp
+site, and another is the 2.6 (or later) Linux kernel tree. To
+synchronize both, the ALSA driver tree is split into two different
+trees: alsa-kernel and alsa-driver. The former contains purely the
+source code for the Linux 2.6 (or later) tree. This tree is designed
+only for compilation on 2.6 or later environment. The latter,
+alsa-driver, contains many subtle files for compiling ALSA drivers
+outside of the Linux kernel tree, wrapper functions for older 2.2 and
+2.4 kernels, to adapt the latest kernel API, and additional drivers
+which are still in development or in tests. The drivers in alsa-driver
+tree will be moved to alsa-kernel (and eventually to the 2.6 kernel
+tree) when they are finished and confirmed to work fine.
+
+The file tree structure of ALSA driver is depicted below. Both
+alsa-kernel and alsa-driver have almost the same file structure, except
+for “core†directory. It's named as “acore†in alsa-driver tree.
+
+::
+
+ sound
+ /core
+ /oss
+ /seq
+ /oss
+ /instr
+ /ioctl32
+ /include
+ /drivers
+ /mpu401
+ /opl3
+ /i2c
+ /l3
+ /synth
+ /emux
+ /pci
+ /(cards)
+ /isa
+ /(cards)
+ /arm
+ /ppc
+ /sparc
+ /usb
+ /pcmcia /(cards)
+ /oss
+
+
+core directory
+--------------
+
+This directory contains the middle layer which is the heart of ALSA
+drivers. In this directory, the native ALSA modules are stored. The
+sub-directories contain different modules and are dependent upon the
+kernel config.
+
+core/oss
+~~~~~~~~
+
+The codes for PCM and mixer OSS emulation modules are stored in this
+directory. The rawmidi OSS emulation is included in the ALSA rawmidi
+code since it's quite small. The sequencer code is stored in
+``core/seq/oss`` directory (see `below <#core-seq-oss>`__).
+
+core/ioctl32
+~~~~~~~~~~~~
+
+This directory contains the 32bit-ioctl wrappers for 64bit architectures
+such like x86-64, ppc64 and sparc64. For 32bit and alpha architectures,
+these are not compiled.
+
+core/seq
+~~~~~~~~
+
+This directory and its sub-directories are for the ALSA sequencer. This
+directory contains the sequencer core and primary sequencer modules such
+like snd-seq-midi, snd-seq-virmidi, etc. They are compiled only when
+``CONFIG_SND_SEQUENCER`` is set in the kernel config.
+
+core/seq/oss
+~~~~~~~~~~~~
+
+This contains the OSS sequencer emulation codes.
+
+core/seq/instr
+~~~~~~~~~~~~~~
+
+This directory contains the modules for the sequencer instrument layer.
+
+include directory
+-----------------
+
+This is the place for the public header files of ALSA drivers, which are
+to be exported to user-space, or included by several files at different
+directories. Basically, the private header files should not be placed in
+this directory, but you may still find files there, due to historical
+reasons :)
+
+drivers directory
+-----------------
+
+This directory contains code shared among different drivers on different
+architectures. They are hence supposed not to be architecture-specific.
+For example, the dummy pcm driver and the serial MIDI driver are found
+in this directory. In the sub-directories, there is code for components
+which are independent from bus and cpu architectures.
+
+drivers/mpu401
+~~~~~~~~~~~~~~
+
+The MPU401 and MPU401-UART modules are stored here.
+
+drivers/opl3 and opl4
+~~~~~~~~~~~~~~~~~~~~~
+
+The OPL3 and OPL4 FM-synth stuff is found here.
+
+i2c directory
+-------------
+
+This contains the ALSA i2c components.
+
+Although there is a standard i2c layer on Linux, ALSA has its own i2c
+code for some cards, because the soundcard needs only a simple operation
+and the standard i2c API is too complicated for such a purpose.
+
+i2c/l3
+~~~~~~
+
+This is a sub-directory for ARM L3 i2c.
+
+synth directory
+---------------
+
+This contains the synth middle-level modules.
+
+So far, there is only Emu8000/Emu10k1 synth driver under the
+``synth/emux`` sub-directory.
+
+pci directory
+-------------
+
+This directory and its sub-directories hold the top-level card modules
+for PCI soundcards and the code specific to the PCI BUS.
+
+The drivers compiled from a single file are stored directly in the pci
+directory, while the drivers with several source files are stored on
+their own sub-directory (e.g. emu10k1, ice1712).
+
+isa directory
+-------------
+
+This directory and its sub-directories hold the top-level card modules
+for ISA soundcards.
+
+arm, ppc, and sparc directories
+-------------------------------
+
+They are used for top-level card modules which are specific to one of
+these architectures.
+
+usb directory
+-------------
+
+This directory contains the USB-audio driver. In the latest version, the
+USB MIDI driver is integrated in the usb-audio driver.
+
+pcmcia directory
+----------------
+
+The PCMCIA, especially PCCard drivers will go here. CardBus drivers will
+be in the pci directory, because their API is identical to that of
+standard PCI cards.
+
+oss directory
+-------------
+
+The OSS/Lite source files are stored here in Linux 2.6 (or later) tree.
+In the ALSA driver tarball, this directory is empty, of course :)
+
+Basic Flow for PCI Drivers
+==========================
+
+Outline
+-------
+
+The minimum flow for PCI soundcards is as follows:
+
+- define the PCI ID table (see the section `PCI Entries`_).
+
+- create ``probe`` callback.
+
+- create ``remove`` callback.
+
+- create a :c:type:`struct pci_driver <pci_driver>` structure
+ containing the three pointers above.
+
+- create an ``init`` function just calling the
+ :c:func:`pci_register_driver()` to register the pci_driver
+ table defined above.
+
+- create an ``exit`` function to call the
+ :c:func:`pci_unregister_driver()` function.
+
+Full Code Example
+-----------------
+
+The code example is shown below. Some parts are kept unimplemented at
+this moment but will be filled in the next sections. The numbers in the
+comment lines of the :c:func:`snd_mychip_probe()` function refer
+to details explained in the following section.
+
+::
+
+ #include <linux/init.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <sound/core.h>
+ #include <sound/initval.h>
+
+ /* module parameters (see "Module Parameters") */
+ /* SNDRV_CARDS: maximum number of cards supported by this module */
+ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
+ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+
+ /* definition of the chip-specific record */
+ struct mychip {
+ struct snd_card *card;
+ /* the rest of the implementation will be in section
+ * "PCI Resource Management"
+ */
+ };
+
+ /* chip-specific destructor
+ * (see "PCI Resource Management")
+ */
+ static int snd_mychip_free(struct mychip *chip)
+ {
+ .... /* will be implemented later... */
+ }
+
+ /* component-destructor
+ * (see "Management of Cards and Components")
+ */
+ static int snd_mychip_dev_free(struct snd_device *device)
+ {
+ return snd_mychip_free(device->device_data);
+ }
+
+ /* chip-specific constructor
+ * (see "Management of Cards and Components")
+ */
+ static int snd_mychip_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct mychip **rchip)
+ {
+ struct mychip *chip;
+ int err;
+ static struct snd_device_ops ops = {
+ .dev_free = snd_mychip_dev_free,
+ };
+
+ *rchip = NULL;
+
+ /* check PCI availability here
+ * (see "PCI Resource Management")
+ */
+ ....
+
+ /* allocate a chip-specific data with zero filled */
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ chip->card = card;
+
+ /* rest of initialization here; will be implemented
+ * later, see "PCI Resource Management"
+ */
+ ....
+
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
+ snd_mychip_free(chip);
+ return err;
+ }
+
+ *rchip = chip;
+ return 0;
+ }
+
+ /* constructor -- see "Driver Constructor" sub-section */
+ static int snd_mychip_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+ {
+ static int dev;
+ struct snd_card *card;
+ struct mychip *chip;
+ int err;
+
+ /* (1) */
+ if (dev >= SNDRV_CARDS)
+ return -ENODEV;
+ if (!enable[dev]) {
+ dev++;
+ return -ENOENT;
+ }
+
+ /* (2) */
+ err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
+ 0, &card);
+ if (err < 0)
+ return err;
+
+ /* (3) */
+ err = snd_mychip_create(card, pci, &chip);
+ if (err < 0) {
+ snd_card_free(card);
+ return err;
+ }
+
+ /* (4) */
+ strcpy(card->driver, "My Chip");
+ strcpy(card->shortname, "My Own Chip 123");
+ sprintf(card->longname, "%s at 0x%lx irq %i",
+ card->shortname, chip->ioport, chip->irq);
+
+ /* (5) */
+ .... /* implemented later */
+
+ /* (6) */
+ err = snd_card_register(card);
+ if (err < 0) {
+ snd_card_free(card);
+ return err;
+ }
+
+ /* (7) */
+ pci_set_drvdata(pci, card);
+ dev++;
+ return 0;
+ }
+
+ /* destructor -- see the "Destructor" sub-section */
+ static void snd_mychip_remove(struct pci_dev *pci)
+ {
+ snd_card_free(pci_get_drvdata(pci));
+ pci_set_drvdata(pci, NULL);
+ }
+
+
+
+Driver Constructor
+------------------
+
+The real constructor of PCI drivers is the ``probe`` callback. The
+``probe`` callback and other component-constructors which are called
+from the ``probe`` callback cannot be used with the ``__init`` prefix
+because any PCI device could be a hotplug device.
+
+In the ``probe`` callback, the following scheme is often used.
+
+1) Check and increment the device index.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ static int dev;
+ ....
+ if (dev >= SNDRV_CARDS)
+ return -ENODEV;
+ if (!enable[dev]) {
+ dev++;
+ return -ENOENT;
+ }
+
+
+where ``enable[dev]`` is the module option.
+
+Each time the ``probe`` callback is called, check the availability of
+the device. If not available, simply increment the device index and
+returns. dev will be incremented also later (`step 7
+<#set-the-pci-driver-data-and-return-zero>`__).
+
+2) Create a card instance
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ struct snd_card *card;
+ int err;
+ ....
+ err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
+ 0, &card);
+
+
+The details will be explained in the section `Management of Cards and
+Components`_.
+
+3) Create a main component
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this part, the PCI resources are allocated.
+
+::
+
+ struct mychip *chip;
+ ....
+ err = snd_mychip_create(card, pci, &chip);
+ if (err < 0) {
+ snd_card_free(card);
+ return err;
+ }
+
+The details will be explained in the section `PCI Resource
+Management`_.
+
+4) Set the driver ID and name strings.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ strcpy(card->driver, "My Chip");
+ strcpy(card->shortname, "My Own Chip 123");
+ sprintf(card->longname, "%s at 0x%lx irq %i",
+ card->shortname, chip->ioport, chip->irq);
+
+The driver field holds the minimal ID string of the chip. This is used
+by alsa-lib's configurator, so keep it simple but unique. Even the
+same driver can have different driver IDs to distinguish the
+functionality of each chip type.
+
+The shortname field is a string shown as more verbose name. The longname
+field contains the information shown in ``/proc/asound/cards``.
+
+5) Create other components, such as mixer, MIDI, etc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Here you define the basic components such as `PCM <#PCM-Interface>`__,
+mixer (e.g. `AC97 <#API-for-AC97-Codec>`__), MIDI (e.g.
+`MPU-401 <#MIDI-MPU401-UART-Interface>`__), and other interfaces.
+Also, if you want a `proc file <#Proc-Interface>`__, define it here,
+too.
+
+6) Register the card instance.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ err = snd_card_register(card);
+ if (err < 0) {
+ snd_card_free(card);
+ return err;
+ }
+
+Will be explained in the section `Management of Cards and
+Components`_, too.
+
+7) Set the PCI driver data and return zero.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ pci_set_drvdata(pci, card);
+ dev++;
+ return 0;
+
+In the above, the card record is stored. This pointer is used in the
+remove callback and power-management callbacks, too.
+
+Destructor
+----------
+
+The destructor, remove callback, simply releases the card instance. Then
+the ALSA middle layer will release all the attached components
+automatically.
+
+It would be typically like the following:
+
+::
+
+ static void snd_mychip_remove(struct pci_dev *pci)
+ {
+ snd_card_free(pci_get_drvdata(pci));
+ pci_set_drvdata(pci, NULL);
+ }
+
+
+The above code assumes that the card pointer is set to the PCI driver
+data.
+
+Header Files
+------------
+
+For the above example, at least the following include files are
+necessary.
+
+::
+
+ #include <linux/init.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <sound/core.h>
+ #include <sound/initval.h>
+
+where the last one is necessary only when module options are defined
+in the source file. If the code is split into several files, the files
+without module options don't need them.
+
+In addition to these headers, you'll need ``<linux/interrupt.h>`` for
+interrupt handling, and ``<asm/io.h>`` for I/O access. If you use the
+:c:func:`mdelay()` or :c:func:`udelay()` functions, you'll need
+to include ``<linux/delay.h>`` too.
+
+The ALSA interfaces like the PCM and control APIs are defined in other
+``<sound/xxx.h>`` header files. They have to be included after
+``<sound/core.h>``.
+
+Management of Cards and Components
+==================================
+
+Card Instance
+-------------
+
+For each soundcard, a “card†record must be allocated.
+
+A card record is the headquarters of the soundcard. It manages the whole
+list of devices (components) on the soundcard, such as PCM, mixers,
+MIDI, synthesizer, and so on. Also, the card record holds the ID and the
+name strings of the card, manages the root of proc files, and controls
+the power-management states and hotplug disconnections. The component
+list on the card record is used to manage the correct release of
+resources at destruction.
+
+As mentioned above, to create a card instance, call
+:c:func:`snd_card_new()`.
+
+::
+
+ struct snd_card *card;
+ int err;
+ err = snd_card_new(&pci->dev, index, id, module, extra_size, &card);
+
+
+The function takes six arguments: the parent device pointer, the
+card-index number, the id string, the module pointer (usually
+``THIS_MODULE``), the size of extra-data space, and the pointer to
+return the card instance. The extra_size argument is used to allocate
+card->private_data for the chip-specific data. Note that these data are
+allocated by :c:func:`snd_card_new()`.
+
+The first argument, the pointer of struct :c:type:`struct device
+<device>`, specifies the parent device. For PCI devices, typically
+``&pci->`` is passed there.
+
+Components
+----------
+
+After the card is created, you can attach the components (devices) to
+the card instance. In an ALSA driver, a component is represented as a
+:c:type:`struct snd_device <snd_device>` object. A component
+can be a PCM instance, a control interface, a raw MIDI interface, etc.
+Each such instance has one component entry.
+
+A component can be created via :c:func:`snd_device_new()`
+function.
+
+::
+
+ snd_device_new(card, SNDRV_DEV_XXX, chip, &ops);
+
+This takes the card pointer, the device-level (``SNDRV_DEV_XXX``), the
+data pointer, and the callback pointers (``&ops``). The device-level
+defines the type of components and the order of registration and
+de-registration. For most components, the device-level is already
+defined. For a user-defined component, you can use
+``SNDRV_DEV_LOWLEVEL``.
+
+This function itself doesn't allocate the data space. The data must be
+allocated manually beforehand, and its pointer is passed as the
+argument. This pointer (``chip`` in the above example) is used as the
+identifier for the instance.
+
+Each pre-defined ALSA component such as ac97 and pcm calls
+:c:func:`snd_device_new()` inside its constructor. The destructor
+for each component is defined in the callback pointers. Hence, you don't
+need to take care of calling a destructor for such a component.
+
+If you wish to create your own component, you need to set the destructor
+function to the dev_free callback in the ``ops``, so that it can be
+released automatically via :c:func:`snd_card_free()`. The next
+example will show an implementation of chip-specific data.
+
+Chip-Specific Data
+------------------
+
+Chip-specific information, e.g. the I/O port address, its resource
+pointer, or the irq number, is stored in the chip-specific record.
+
+::
+
+ struct mychip {
+ ....
+ };
+
+
+In general, there are two ways of allocating the chip record.
+
+1. Allocating via :c:func:`snd_card_new()`.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As mentioned above, you can pass the extra-data-length to the 5th
+argument of :c:func:`snd_card_new()`, i.e.
+
+::
+
+ err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
+ sizeof(struct mychip), &card);
+
+:c:type:`struct mychip <mychip>` is the type of the chip record.
+
+In return, the allocated record can be accessed as
+
+::
+
+ struct mychip *chip = card->private_data;
+
+With this method, you don't have to allocate twice. The record is
+released together with the card instance.
+
+2. Allocating an extra device.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After allocating a card instance via :c:func:`snd_card_new()`
+(with ``0`` on the 4th arg), call :c:func:`kzalloc()`.
+
+::
+
+ struct snd_card *card;
+ struct mychip *chip;
+ err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
+ 0, &card);
+ .....
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+
+The chip record should have the field to hold the card pointer at least,
+
+::
+
+ struct mychip {
+ struct snd_card *card;
+ ....
+ };
+
+
+Then, set the card pointer in the returned chip instance.
+
+::
+
+ chip->card = card;
+
+Next, initialize the fields, and register this chip record as a
+low-level device with a specified ``ops``,
+
+::
+
+ static struct snd_device_ops ops = {
+ .dev_free = snd_mychip_dev_free,
+ };
+ ....
+ snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+
+:c:func:`snd_mychip_dev_free()` is the device-destructor
+function, which will call the real destructor.
+
+::
+
+ static int snd_mychip_dev_free(struct snd_device *device)
+ {
+ return snd_mychip_free(device->device_data);
+ }
+
+where :c:func:`snd_mychip_free()` is the real destructor.
+
+Registration and Release
+------------------------
+
+After all components are assigned, register the card instance by calling
+:c:func:`snd_card_register()`. Access to the device files is
+enabled at this point. That is, before
+:c:func:`snd_card_register()` is called, the components are safely
+inaccessible from external side. If this call fails, exit the probe
+function after releasing the card via :c:func:`snd_card_free()`.
+
+For releasing the card instance, you can call simply
+:c:func:`snd_card_free()`. As mentioned earlier, all components
+are released automatically by this call.
+
+For a device which allows hotplugging, you can use
+:c:func:`snd_card_free_when_closed()`. This one will postpone
+the destruction until all devices are closed.
+
+PCI Resource Management
+=======================
+
+Full Code Example
+-----------------
+
+In this section, we'll complete the chip-specific constructor,
+destructor and PCI entries. Example code is shown first, below.
+
+::
+
+ struct mychip {
+ struct snd_card *card;
+ struct pci_dev *pci;
+
+ unsigned long port;
+ int irq;
+ };
+
+ static int snd_mychip_free(struct mychip *chip)
+ {
+ /* disable hardware here if any */
+ .... /* (not implemented in this document) */
+
+ /* release the irq */
+ if (chip->irq >= 0)
+ free_irq(chip->irq, chip);
+ /* release the I/O ports & memory */
+ pci_release_regions(chip->pci);
+ /* disable the PCI entry */
+ pci_disable_device(chip->pci);
+ /* release the data */
+ kfree(chip);
+ return 0;
+ }
+
+ /* chip-specific constructor */
+ static int snd_mychip_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct mychip **rchip)
+ {
+ struct mychip *chip;
+ int err;
+ static struct snd_device_ops ops = {
+ .dev_free = snd_mychip_dev_free,
+ };
+
+ *rchip = NULL;
+
+ /* initialize the PCI entry */
+ err = pci_enable_device(pci);
+ if (err < 0)
+ return err;
+ /* check PCI availability (28bit DMA) */
+ if (pci_set_dma_mask(pci, DMA_BIT_MASK(28)) < 0 ||
+ pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(28)) < 0) {
+ printk(KERN_ERR "error to set 28bit mask DMA\n");
+ pci_disable_device(pci);
+ return -ENXIO;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL) {
+ pci_disable_device(pci);
+ return -ENOMEM;
+ }
+
+ /* initialize the stuff */
+ chip->card = card;
+ chip->pci = pci;
+ chip->irq = -1;
+
+ /* (1) PCI resource allocation */
+ err = pci_request_regions(pci, "My Chip");
+ if (err < 0) {
+ kfree(chip);
+ pci_disable_device(pci);
+ return err;
+ }
+ chip->port = pci_resource_start(pci, 0);
+ if (request_irq(pci->irq, snd_mychip_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, chip)) {
+ printk(KERN_ERR "cannot grab irq %d\n", pci->irq);
+ snd_mychip_free(chip);
+ return -EBUSY;
+ }
+ chip->irq = pci->irq;
+
+ /* (2) initialization of the chip hardware */
+ .... /* (not implemented in this document) */
+
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
+ snd_mychip_free(chip);
+ return err;
+ }
+
+ *rchip = chip;
+ return 0;
+ }
+
+ /* PCI IDs */
+ static struct pci_device_id snd_mychip_ids[] = {
+ { PCI_VENDOR_ID_FOO, PCI_DEVICE_ID_BAR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ ....
+ { 0, }
+ };
+ MODULE_DEVICE_TABLE(pci, snd_mychip_ids);
+
+ /* pci_driver definition */
+ static struct pci_driver driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = snd_mychip_ids,
+ .probe = snd_mychip_probe,
+ .remove = snd_mychip_remove,
+ };
+
+ /* module initialization */
+ static int __init alsa_card_mychip_init(void)
+ {
+ return pci_register_driver(&driver);
+ }
+
+ /* module clean up */
+ static void __exit alsa_card_mychip_exit(void)
+ {
+ pci_unregister_driver(&driver);
+ }
+
+ module_init(alsa_card_mychip_init)
+ module_exit(alsa_card_mychip_exit)
+
+ EXPORT_NO_SYMBOLS; /* for old kernels only */
+
+Some Hafta's
+------------
+
+The allocation of PCI resources is done in the ``probe`` function, and
+usually an extra :c:func:`xxx_create()` function is written for this
+purpose.
+
+In the case of PCI devices, you first have to call the
+:c:func:`pci_enable_device()` function before allocating
+resources. Also, you need to set the proper PCI DMA mask to limit the
+accessed I/O range. In some cases, you might need to call
+:c:func:`pci_set_master()` function, too.
+
+Suppose the 28bit mask, and the code to be added would be like:
+
+::
+
+ err = pci_enable_device(pci);
+ if (err < 0)
+ return err;
+ if (pci_set_dma_mask(pci, DMA_BIT_MASK(28)) < 0 ||
+ pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(28)) < 0) {
+ printk(KERN_ERR "error to set 28bit mask DMA\n");
+ pci_disable_device(pci);
+ return -ENXIO;
+ }
+
+
+Resource Allocation
+-------------------
+
+The allocation of I/O ports and irqs is done via standard kernel
+functions. Unlike ALSA ver.0.5.x., there are no helpers for that. And
+these resources must be released in the destructor function (see below).
+Also, on ALSA 0.9.x, you don't need to allocate (pseudo-)DMA for PCI
+like in ALSA 0.5.x.
+
+Now assume that the PCI device has an I/O port with 8 bytes and an
+interrupt. Then :c:type:`struct mychip <mychip>` will have the
+following fields:
+
+::
+
+ struct mychip {
+ struct snd_card *card;
+
+ unsigned long port;
+ int irq;
+ };
+
+
+For an I/O port (and also a memory region), you need to have the
+resource pointer for the standard resource management. For an irq, you
+have to keep only the irq number (integer). But you need to initialize
+this number as -1 before actual allocation, since irq 0 is valid. The
+port address and its resource pointer can be initialized as null by
+:c:func:`kzalloc()` automatically, so you don't have to take care of
+resetting them.
+
+The allocation of an I/O port is done like this:
+
+::
+
+ err = pci_request_regions(pci, "My Chip");
+ if (err < 0) {
+ kfree(chip);
+ pci_disable_device(pci);
+ return err;
+ }
+ chip->port = pci_resource_start(pci, 0);
+
+It will reserve the I/O port region of 8 bytes of the given PCI device.
+The returned value, ``chip->res_port``, is allocated via
+:c:func:`kmalloc()` by :c:func:`request_region()`. The pointer
+must be released via :c:func:`kfree()`, but there is a problem with
+this. This issue will be explained later.
+
+The allocation of an interrupt source is done like this:
+
+::
+
+ if (request_irq(pci->irq, snd_mychip_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, chip)) {
+ printk(KERN_ERR "cannot grab irq %d\n", pci->irq);
+ snd_mychip_free(chip);
+ return -EBUSY;
+ }
+ chip->irq = pci->irq;
+
+where :c:func:`snd_mychip_interrupt()` is the interrupt handler
+defined `later <#pcm-interface-interrupt-handler>`__. Note that
+``chip->irq`` should be defined only when :c:func:`request_irq()`
+succeeded.
+
+On the PCI bus, interrupts can be shared. Thus, ``IRQF_SHARED`` is used
+as the interrupt flag of :c:func:`request_irq()`.
+
+The last argument of :c:func:`request_irq()` is the data pointer
+passed to the interrupt handler. Usually, the chip-specific record is
+used for that, but you can use what you like, too.
+
+I won't give details about the interrupt handler at this point, but at
+least its appearance can be explained now. The interrupt handler looks
+usually like the following:
+
+::
+
+ static irqreturn_t snd_mychip_interrupt(int irq, void *dev_id)
+ {
+ struct mychip *chip = dev_id;
+ ....
+ return IRQ_HANDLED;
+ }
+
+
+Now let's write the corresponding destructor for the resources above.
+The role of destructor is simple: disable the hardware (if already
+activated) and release the resources. So far, we have no hardware part,
+so the disabling code is not written here.
+
+To release the resources, the “check-and-release†method is a safer way.
+For the interrupt, do like this:
+
+::
+
+ if (chip->irq >= 0)
+ free_irq(chip->irq, chip);
+
+Since the irq number can start from 0, you should initialize
+``chip->irq`` with a negative value (e.g. -1), so that you can check
+the validity of the irq number as above.
+
+When you requested I/O ports or memory regions via
+:c:func:`pci_request_region()` or
+:c:func:`pci_request_regions()` like in this example, release the
+resource(s) using the corresponding function,
+:c:func:`pci_release_region()` or
+:c:func:`pci_release_regions()`.
+
+::
+
+ pci_release_regions(chip->pci);
+
+When you requested manually via :c:func:`request_region()` or
+:c:func:`request_mem_region()`, you can release it via
+:c:func:`release_resource()`. Suppose that you keep the resource
+pointer returned from :c:func:`request_region()` in
+chip->res_port, the release procedure looks like:
+
+::
+
+ release_and_free_resource(chip->res_port);
+
+Don't forget to call :c:func:`pci_disable_device()` before the
+end.
+
+And finally, release the chip-specific record.
+
+::
+
+ kfree(chip);
+
+We didn't implement the hardware disabling part in the above. If you
+need to do this, please note that the destructor may be called even
+before the initialization of the chip is completed. It would be better
+to have a flag to skip hardware disabling if the hardware was not
+initialized yet.
+
+When the chip-data is assigned to the card using
+:c:func:`snd_device_new()` with ``SNDRV_DEV_LOWLELVEL`` , its
+destructor is called at the last. That is, it is assured that all other
+components like PCMs and controls have already been released. You don't
+have to stop PCMs, etc. explicitly, but just call low-level hardware
+stopping.
+
+The management of a memory-mapped region is almost as same as the
+management of an I/O port. You'll need three fields like the
+following:
+
+::
+
+ struct mychip {
+ ....
+ unsigned long iobase_phys;
+ void __iomem *iobase_virt;
+ };
+
+and the allocation would be like below:
+
+::
+
+ if ((err = pci_request_regions(pci, "My Chip")) < 0) {
+ kfree(chip);
+ return err;
+ }
+ chip->iobase_phys = pci_resource_start(pci, 0);
+ chip->iobase_virt = ioremap_nocache(chip->iobase_phys,
+ pci_resource_len(pci, 0));
+
+and the corresponding destructor would be:
+
+::
+
+ static int snd_mychip_free(struct mychip *chip)
+ {
+ ....
+ if (chip->iobase_virt)
+ iounmap(chip->iobase_virt);
+ ....
+ pci_release_regions(chip->pci);
+ ....
+ }
+
+PCI Entries
+-----------
+
+So far, so good. Let's finish the missing PCI stuff. At first, we need a
+:c:type:`struct pci_device_id <pci_device_id>` table for
+this chipset. It's a table of PCI vendor/device ID number, and some
+masks.
+
+For example,
+
+::
+
+ static struct pci_device_id snd_mychip_ids[] = {
+ { PCI_VENDOR_ID_FOO, PCI_DEVICE_ID_BAR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ ....
+ { 0, }
+ };
+ MODULE_DEVICE_TABLE(pci, snd_mychip_ids);
+
+The first and second fields of the :c:type:`struct pci_device_id
+<pci_device_id>` structure are the vendor and device IDs. If you
+have no reason to filter the matching devices, you can leave the
+remaining fields as above. The last field of the :c:type:`struct
+pci_device_id <pci_device_id>` struct contains private data
+for this entry. You can specify any value here, for example, to define
+specific operations for supported device IDs. Such an example is found
+in the intel8x0 driver.
+
+The last entry of this list is the terminator. You must specify this
+all-zero entry.
+
+Then, prepare the :c:type:`struct pci_driver <pci_driver>`
+record:
+
+::
+
+ static struct pci_driver driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = snd_mychip_ids,
+ .probe = snd_mychip_probe,
+ .remove = snd_mychip_remove,
+ };
+
+The ``probe`` and ``remove`` functions have already been defined in
+the previous sections. The ``name`` field is the name string of this
+device. Note that you must not use a slash “/†in this string.
+
+And at last, the module entries:
+
+::
+
+ static int __init alsa_card_mychip_init(void)
+ {
+ return pci_register_driver(&driver);
+ }
+
+ static void __exit alsa_card_mychip_exit(void)
+ {
+ pci_unregister_driver(&driver);
+ }
+
+ module_init(alsa_card_mychip_init)
+ module_exit(alsa_card_mychip_exit)
+
+Note that these module entries are tagged with ``__init`` and ``__exit``
+prefixes.
+
+Oh, one thing was forgotten. If you have no exported symbols, you need
+to declare it in 2.2 or 2.4 kernels (it's not necessary in 2.6 kernels).
+
+::
+
+ EXPORT_NO_SYMBOLS;
+
+That's all!
+
+PCM Interface
+=============
+
+General
+-------
+
+The PCM middle layer of ALSA is quite powerful and it is only necessary
+for each driver to implement the low-level functions to access its
+hardware.
+
+For accessing to the PCM layer, you need to include ``<sound/pcm.h>``
+first. In addition, ``<sound/pcm_params.h>`` might be needed if you
+access to some functions related with hw_param.
+
+Each card device can have up to four pcm instances. A pcm instance
+corresponds to a pcm device file. The limitation of number of instances
+comes only from the available bit size of the Linux's device numbers.
+Once when 64bit device number is used, we'll have more pcm instances
+available.
+
+A pcm instance consists of pcm playback and capture streams, and each
+pcm stream consists of one or more pcm substreams. Some soundcards
+support multiple playback functions. For example, emu10k1 has a PCM
+playback of 32 stereo substreams. In this case, at each open, a free
+substream is (usually) automatically chosen and opened. Meanwhile, when
+only one substream exists and it was already opened, the successful open
+will either block or error with ``EAGAIN`` according to the file open
+mode. But you don't have to care about such details in your driver. The
+PCM middle layer will take care of such work.
+
+Full Code Example
+-----------------
+
+The example code below does not include any hardware access routines but
+shows only the skeleton, how to build up the PCM interfaces.
+
+::
+
+ #include <sound/pcm.h>
+ ....
+
+ /* hardware definition */
+ static struct snd_pcm_hardware snd_mychip_playback_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 32768,
+ .period_bytes_min = 4096,
+ .period_bytes_max = 32768,
+ .periods_min = 1,
+ .periods_max = 1024,
+ };
+
+ /* hardware definition */
+ static struct snd_pcm_hardware snd_mychip_capture_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 32768,
+ .period_bytes_min = 4096,
+ .period_bytes_max = 32768,
+ .periods_min = 1,
+ .periods_max = 1024,
+ };
+
+ /* open callback */
+ static int snd_mychip_playback_open(struct snd_pcm_substream *substream)
+ {
+ struct mychip *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw = snd_mychip_playback_hw;
+ /* more hardware-initialization will be done here */
+ ....
+ return 0;
+ }
+
+ /* close callback */
+ static int snd_mychip_playback_close(struct snd_pcm_substream *substream)
+ {
+ struct mychip *chip = snd_pcm_substream_chip(substream);
+ /* the hardware-specific codes will be here */
+ ....
+ return 0;
+
+ }
+
+ /* open callback */
+ static int snd_mychip_capture_open(struct snd_pcm_substream *substream)
+ {
+ struct mychip *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw = snd_mychip_capture_hw;
+ /* more hardware-initialization will be done here */
+ ....
+ return 0;
+ }
+
+ /* close callback */
+ static int snd_mychip_capture_close(struct snd_pcm_substream *substream)
+ {
+ struct mychip *chip = snd_pcm_substream_chip(substream);
+ /* the hardware-specific codes will be here */
+ ....
+ return 0;
+
+ }
+
+ /* hw_params callback */
+ static int snd_mychip_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+ {
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+ }
+
+ /* hw_free callback */
+ static int snd_mychip_pcm_hw_free(struct snd_pcm_substream *substream)
+ {
+ return snd_pcm_lib_free_pages(substream);
+ }
+
+ /* prepare callback */
+ static int snd_mychip_pcm_prepare(struct snd_pcm_substream *substream)
+ {
+ struct mychip *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /* set up the hardware with the current configuration
+ * for example...
+ */
+ mychip_set_sample_format(chip, runtime->format);
+ mychip_set_sample_rate(chip, runtime->rate);
+ mychip_set_channels(chip, runtime->channels);
+ mychip_set_dma_setup(chip, runtime->dma_addr,
+ chip->buffer_size,
+ chip->period_size);
+ return 0;
+ }
+
+ /* trigger callback */
+ static int snd_mychip_pcm_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+ {
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ /* do something to start the PCM engine */
+ ....
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ /* do something to stop the PCM engine */
+ ....
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* pointer callback */
+ static snd_pcm_uframes_t
+ snd_mychip_pcm_pointer(struct snd_pcm_substream *substream)
+ {
+ struct mychip *chip = snd_pcm_substream_chip(substream);
+ unsigned int current_ptr;
+
+ /* get the current hardware pointer */
+ current_ptr = mychip_get_hw_pointer(chip);
+ return current_ptr;
+ }
+
+ /* operators */
+ static struct snd_pcm_ops snd_mychip_playback_ops = {
+ .open = snd_mychip_playback_open,
+ .close = snd_mychip_playback_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_mychip_pcm_hw_params,
+ .hw_free = snd_mychip_pcm_hw_free,
+ .prepare = snd_mychip_pcm_prepare,
+ .trigger = snd_mychip_pcm_trigger,
+ .pointer = snd_mychip_pcm_pointer,
+ };
+
+ /* operators */
+ static struct snd_pcm_ops snd_mychip_capture_ops = {
+ .open = snd_mychip_capture_open,
+ .close = snd_mychip_capture_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_mychip_pcm_hw_params,
+ .hw_free = snd_mychip_pcm_hw_free,
+ .prepare = snd_mychip_pcm_prepare,
+ .trigger = snd_mychip_pcm_trigger,
+ .pointer = snd_mychip_pcm_pointer,
+ };
+
+ /*
+ * definitions of capture are omitted here...
+ */
+
+ /* create a pcm device */
+ static int snd_mychip_new_pcm(struct mychip *chip)
+ {
+ struct snd_pcm *pcm;
+ int err;
+
+ err = snd_pcm_new(chip->card, "My Chip", 0, 1, 1, &pcm);
+ if (err < 0)
+ return err;
+ pcm->private_data = chip;
+ strcpy(pcm->name, "My Chip");
+ chip->pcm = pcm;
+ /* set operators */
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_mychip_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ &snd_mychip_capture_ops);
+ /* pre-allocation of buffers */
+ /* NOTE: this may fail */
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(chip->pci),
+ 64*1024, 64*1024);
+ return 0;
+ }
+
+
+PCM Constructor
+---------------
+
+A pcm instance is allocated by the :c:func:`snd_pcm_new()`
+function. It would be better to create a constructor for pcm, namely,
+
+::
+
+ static int snd_mychip_new_pcm(struct mychip *chip)
+ {
+ struct snd_pcm *pcm;
+ int err;
+
+ err = snd_pcm_new(chip->card, "My Chip", 0, 1, 1, &pcm);
+ if (err < 0)
+ return err;
+ pcm->private_data = chip;
+ strcpy(pcm->name, "My Chip");
+ chip->pcm = pcm;
+ ....
+ return 0;
+ }
+
+The :c:func:`snd_pcm_new()` function takes four arguments. The
+first argument is the card pointer to which this pcm is assigned, and
+the second is the ID string.
+
+The third argument (``index``, 0 in the above) is the index of this new
+pcm. It begins from zero. If you create more than one pcm instances,
+specify the different numbers in this argument. For example, ``index =
+1`` for the second PCM device.
+
+The fourth and fifth arguments are the number of substreams for playback
+and capture, respectively. Here 1 is used for both arguments. When no
+playback or capture substreams are available, pass 0 to the
+corresponding argument.
+
+If a chip supports multiple playbacks or captures, you can specify more
+numbers, but they must be handled properly in open/close, etc.
+callbacks. When you need to know which substream you are referring to,
+then it can be obtained from :c:type:`struct snd_pcm_substream
+<snd_pcm_substream>` data passed to each callback as follows:
+
+::
+
+ struct snd_pcm_substream *substream;
+ int index = substream->number;
+
+
+After the pcm is created, you need to set operators for each pcm stream.
+
+::
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_mychip_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ &snd_mychip_capture_ops);
+
+The operators are defined typically like this:
+
+::
+
+ static struct snd_pcm_ops snd_mychip_playback_ops = {
+ .open = snd_mychip_pcm_open,
+ .close = snd_mychip_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_mychip_pcm_hw_params,
+ .hw_free = snd_mychip_pcm_hw_free,
+ .prepare = snd_mychip_pcm_prepare,
+ .trigger = snd_mychip_pcm_trigger,
+ .pointer = snd_mychip_pcm_pointer,
+ };
+
+All the callbacks are described in the Operators_ subsection.
+
+After setting the operators, you probably will want to pre-allocate the
+buffer. For the pre-allocation, simply call the following:
+
+::
+
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(chip->pci),
+ 64*1024, 64*1024);
+
+It will allocate a buffer up to 64kB as default. Buffer management
+details will be described in the later section `Buffer and Memory
+Management`_.
+
+Additionally, you can set some extra information for this pcm in
+``pcm->info_flags``. The available values are defined as
+``SNDRV_PCM_INFO_XXX`` in ``<sound/asound.h>``, which is used for the
+hardware definition (described later). When your soundchip supports only
+half-duplex, specify like this:
+
+::
+
+ pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX;
+
+
+... And the Destructor?
+-----------------------
+
+The destructor for a pcm instance is not always necessary. Since the pcm
+device will be released by the middle layer code automatically, you
+don't have to call the destructor explicitly.
+
+The destructor would be necessary if you created special records
+internally and needed to release them. In such a case, set the
+destructor function to ``pcm->private_free``:
+
+::
+
+ static void mychip_pcm_free(struct snd_pcm *pcm)
+ {
+ struct mychip *chip = snd_pcm_chip(pcm);
+ /* free your own data */
+ kfree(chip->my_private_pcm_data);
+ /* do what you like else */
+ ....
+ }
+
+ static int snd_mychip_new_pcm(struct mychip *chip)
+ {
+ struct snd_pcm *pcm;
+ ....
+ /* allocate your own data */
+ chip->my_private_pcm_data = kmalloc(...);
+ /* set the destructor */
+ pcm->private_data = chip;
+ pcm->private_free = mychip_pcm_free;
+ ....
+ }
+
+
+
+Runtime Pointer - The Chest of PCM Information
+----------------------------------------------
+
+When the PCM substream is opened, a PCM runtime instance is allocated
+and assigned to the substream. This pointer is accessible via
+``substream->runtime``. This runtime pointer holds most information you
+need to control the PCM: the copy of hw_params and sw_params
+configurations, the buffer pointers, mmap records, spinlocks, etc.
+
+The definition of runtime instance is found in ``<sound/pcm.h>``. Here
+are the contents of this file:
+
+::
+
+ struct _snd_pcm_runtime {
+ /* -- Status -- */
+ struct snd_pcm_substream *trigger_master;
+ snd_timestamp_t trigger_tstamp; /* trigger timestamp */
+ int overrange;
+ snd_pcm_uframes_t avail_max;
+ snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */
+ snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time*/
+
+ /* -- HW params -- */
+ snd_pcm_access_t access; /* access mode */
+ snd_pcm_format_t format; /* SNDRV_PCM_FORMAT_* */
+ snd_pcm_subformat_t subformat; /* subformat */
+ unsigned int rate; /* rate in Hz */
+ unsigned int channels; /* channels */
+ snd_pcm_uframes_t period_size; /* period size */
+ unsigned int periods; /* periods */
+ snd_pcm_uframes_t buffer_size; /* buffer size */
+ unsigned int tick_time; /* tick time */
+ snd_pcm_uframes_t min_align; /* Min alignment for the format */
+ size_t byte_align;
+ unsigned int frame_bits;
+ unsigned int sample_bits;
+ unsigned int info;
+ unsigned int rate_num;
+ unsigned int rate_den;
+
+ /* -- SW params -- */
+ struct timespec tstamp_mode; /* mmap timestamp is updated */
+ unsigned int period_step;
+ unsigned int sleep_min; /* min ticks to sleep */
+ snd_pcm_uframes_t start_threshold;
+ snd_pcm_uframes_t stop_threshold;
+ snd_pcm_uframes_t silence_threshold; /* Silence filling happens when
+ noise is nearest than this */
+ snd_pcm_uframes_t silence_size; /* Silence filling size */
+ snd_pcm_uframes_t boundary; /* pointers wrap point */
+
+ snd_pcm_uframes_t silenced_start;
+ snd_pcm_uframes_t silenced_size;
+
+ snd_pcm_sync_id_t sync; /* hardware synchronization ID */
+
+ /* -- mmap -- */
+ volatile struct snd_pcm_mmap_status *status;
+ volatile struct snd_pcm_mmap_control *control;
+ atomic_t mmap_count;
+
+ /* -- locking / scheduling -- */
+ spinlock_t lock;
+ wait_queue_head_t sleep;
+ struct timer_list tick_timer;
+ struct fasync_struct *fasync;
+
+ /* -- private section -- */
+ void *private_data;
+ void (*private_free)(struct snd_pcm_runtime *runtime);
+
+ /* -- hardware description -- */
+ struct snd_pcm_hardware hw;
+ struct snd_pcm_hw_constraints hw_constraints;
+
+ /* -- timer -- */
+ unsigned int timer_resolution; /* timer resolution */
+
+ /* -- DMA -- */
+ unsigned char *dma_area; /* DMA area */
+ dma_addr_t dma_addr; /* physical bus address (not accessible from main CPU) */
+ size_t dma_bytes; /* size of DMA area */
+
+ struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */
+
+ #if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
+ /* -- OSS things -- */
+ struct snd_pcm_oss_runtime oss;
+ #endif
+ };
+
+
+For the operators (callbacks) of each sound driver, most of these
+records are supposed to be read-only. Only the PCM middle-layer changes
+/ updates them. The exceptions are the hardware description (hw) DMA
+buffer information and the private data. Besides, if you use the
+standard buffer allocation method via
+:c:func:`snd_pcm_lib_malloc_pages()`, you don't need to set the
+DMA buffer information by yourself.
+
+In the sections below, important records are explained.
+
+Hardware Description
+~~~~~~~~~~~~~~~~~~~~
+
+The hardware descriptor (:c:type:`struct snd_pcm_hardware
+<snd_pcm_hardware>`) contains the definitions of the fundamental
+hardware configuration. Above all, you'll need to define this in the
+`PCM open callback`_. Note that the runtime instance holds the copy of
+the descriptor, not the pointer to the existing descriptor. That is,
+in the open callback, you can modify the copied descriptor
+(``runtime->hw``) as you need. For example, if the maximum number of
+channels is 1 only on some chip models, you can still use the same
+hardware descriptor and change the channels_max later:
+
+::
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ ...
+ runtime->hw = snd_mychip_playback_hw; /* common definition */
+ if (chip->model == VERY_OLD_ONE)
+ runtime->hw.channels_max = 1;
+
+Typically, you'll have a hardware descriptor as below:
+
+::
+
+ static struct snd_pcm_hardware snd_mychip_playback_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 32768,
+ .period_bytes_min = 4096,
+ .period_bytes_max = 32768,
+ .periods_min = 1,
+ .periods_max = 1024,
+ };
+
+- The ``info`` field contains the type and capabilities of this
+ pcm. The bit flags are defined in ``<sound/asound.h>`` as
+ ``SNDRV_PCM_INFO_XXX``. Here, at least, you have to specify whether
+ the mmap is supported and which interleaved format is
+ supported. When the hardware supports mmap, add the
+ ``SNDRV_PCM_INFO_MMAP`` flag here. When the hardware supports the
+ interleaved or the non-interleaved formats,
+ ``SNDRV_PCM_INFO_INTERLEAVED`` or ``SNDRV_PCM_INFO_NONINTERLEAVED``
+ flag must be set, respectively. If both are supported, you can set
+ both, too.
+
+ In the above example, ``MMAP_VALID`` and ``BLOCK_TRANSFER`` are
+ specified for the OSS mmap mode. Usually both are set. Of course,
+ ``MMAP_VALID`` is set only if the mmap is really supported.
+
+ The other possible flags are ``SNDRV_PCM_INFO_PAUSE`` and
+ ``SNDRV_PCM_INFO_RESUME``. The ``PAUSE`` bit means that the pcm
+ supports the “pause†operation, while the ``RESUME`` bit means that
+ the pcm supports the full “suspend/resume†operation. If the
+ ``PAUSE`` flag is set, the ``trigger`` callback below must handle
+ the corresponding (pause push/release) commands. The suspend/resume
+ trigger commands can be defined even without the ``RESUME``
+ flag. See `Power Management`_ section for details.
+
+ When the PCM substreams can be synchronized (typically,
+ synchronized start/stop of a playback and a capture streams), you
+ can give ``SNDRV_PCM_INFO_SYNC_START``, too. In this case, you'll
+ need to check the linked-list of PCM substreams in the trigger
+ callback. This will be described in the later section.
+
+- ``formats`` field contains the bit-flags of supported formats
+ (``SNDRV_PCM_FMTBIT_XXX``). If the hardware supports more than one
+ format, give all or'ed bits. In the example above, the signed 16bit
+ little-endian format is specified.
+
+- ``rates`` field contains the bit-flags of supported rates
+ (``SNDRV_PCM_RATE_XXX``). When the chip supports continuous rates,
+ pass ``CONTINUOUS`` bit additionally. The pre-defined rate bits are
+ provided only for typical rates. If your chip supports
+ unconventional rates, you need to add the ``KNOT`` bit and set up
+ the hardware constraint manually (explained later).
+
+- ``rate_min`` and ``rate_max`` define the minimum and maximum sample
+ rate. This should correspond somehow to ``rates`` bits.
+
+- ``channel_min`` and ``channel_max`` define, as you might already
+ expected, the minimum and maximum number of channels.
+
+- ``buffer_bytes_max`` defines the maximum buffer size in
+ bytes. There is no ``buffer_bytes_min`` field, since it can be
+ calculated from the minimum period size and the minimum number of
+ periods. Meanwhile, ``period_bytes_min`` and define the minimum and
+ maximum size of the period in bytes. ``periods_max`` and
+ ``periods_min`` define the maximum and minimum number of periods in
+ the buffer.
+
+ The “period†is a term that corresponds to a fragment in the OSS
+ world. The period defines the size at which a PCM interrupt is
+ generated. This size strongly depends on the hardware. Generally,
+ the smaller period size will give you more interrupts, that is,
+ more controls. In the case of capture, this size defines the input
+ latency. On the other hand, the whole buffer size defines the
+ output latency for the playback direction.
+
+- There is also a field ``fifo_size``. This specifies the size of the
+ hardware FIFO, but currently it is neither used in the driver nor
+ in the alsa-lib. So, you can ignore this field.
+
+PCM Configurations
+~~~~~~~~~~~~~~~~~~
+
+Ok, let's go back again to the PCM runtime records. The most
+frequently referred records in the runtime instance are the PCM
+configurations. The PCM configurations are stored in the runtime
+instance after the application sends ``hw_params`` data via
+alsa-lib. There are many fields copied from hw_params and sw_params
+structs. For example, ``format`` holds the format type chosen by the
+application. This field contains the enum value
+``SNDRV_PCM_FORMAT_XXX``.
+
+One thing to be noted is that the configured buffer and period sizes
+are stored in “frames†in the runtime. In the ALSA world, ``1 frame =
+channels \* samples-size``. For conversion between frames and bytes,
+you can use the :c:func:`frames_to_bytes()` and
+:c:func:`bytes_to_frames()` helper functions.
+
+::
+
+ period_bytes = frames_to_bytes(runtime, runtime->period_size);
+
+Also, many software parameters (sw_params) are stored in frames, too.
+Please check the type of the field. ``snd_pcm_uframes_t`` is for the
+frames as unsigned integer while ``snd_pcm_sframes_t`` is for the
+frames as signed integer.
+
+DMA Buffer Information
+~~~~~~~~~~~~~~~~~~~~~~
+
+The DMA buffer is defined by the following four fields, ``dma_area``,
+``dma_addr``, ``dma_bytes`` and ``dma_private``. The ``dma_area``
+holds the buffer pointer (the logical address). You can call
+:c:func:`memcpy()` from/to this pointer. Meanwhile, ``dma_addr`` holds
+the physical address of the buffer. This field is specified only when
+the buffer is a linear buffer. ``dma_bytes`` holds the size of buffer
+in bytes. ``dma_private`` is used for the ALSA DMA allocator.
+
+If you use a standard ALSA function,
+:c:func:`snd_pcm_lib_malloc_pages()`, for allocating the buffer,
+these fields are set by the ALSA middle layer, and you should *not*
+change them by yourself. You can read them but not write them. On the
+other hand, if you want to allocate the buffer by yourself, you'll
+need to manage it in hw_params callback. At least, ``dma_bytes`` is
+mandatory. ``dma_area`` is necessary when the buffer is mmapped. If
+your driver doesn't support mmap, this field is not
+necessary. ``dma_addr`` is also optional. You can use dma_private as
+you like, too.
+
+Running Status
+~~~~~~~~~~~~~~
+
+The running status can be referred via ``runtime->status``. This is
+the pointer to the :c:type:`struct snd_pcm_mmap_status
+<snd_pcm_mmap_status>` record. For example, you can get the current
+DMA hardware pointer via ``runtime->status->hw_ptr``.
+
+The DMA application pointer can be referred via ``runtime->control``,
+which points to the :c:type:`struct snd_pcm_mmap_control
+<snd_pcm_mmap_control>` record. However, accessing directly to
+this value is not recommended.
+
+Private Data
+~~~~~~~~~~~~
+
+You can allocate a record for the substream and store it in
+``runtime->private_data``. Usually, this is done in the `PCM open
+callback`_. Don't mix this with ``pcm->private_data``. The
+``pcm->private_data`` usually points to the chip instance assigned
+statically at the creation of PCM, while the ``runtime->private_data``
+points to a dynamic data structure created at the PCM open
+callback.
+
+::
+
+ static int snd_xxx_open(struct snd_pcm_substream *substream)
+ {
+ struct my_pcm_data *data;
+ ....
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ substream->runtime->private_data = data;
+ ....
+ }
+
+
+The allocated object must be released in the `close callback`_.
+
+Operators
+---------
+
+OK, now let me give details about each pcm callback (``ops``). In
+general, every callback must return 0 if successful, or a negative
+error number such as ``-EINVAL``. To choose an appropriate error
+number, it is advised to check what value other parts of the kernel
+return when the same kind of request fails.
+
+The callback function takes at least the argument with :c:type:`struct
+snd_pcm_substream <snd_pcm_substream>` pointer. To retrieve the chip
+record from the given substream instance, you can use the following
+macro.
+
+::
+
+ int xxx() {
+ struct mychip *chip = snd_pcm_substream_chip(substream);
+ ....
+ }
+
+The macro reads ``substream->private_data``, which is a copy of
+``pcm->private_data``. You can override the former if you need to
+assign different data records per PCM substream. For example, the
+cmi8330 driver assigns different ``private_data`` for playback and
+capture directions, because it uses two different codecs (SB- and
+AD-compatible) for different directions.
+
+PCM open callback
+~~~~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_open(struct snd_pcm_substream *substream);
+
+This is called when a pcm substream is opened.
+
+At least, here you have to initialize the ``runtime->hw``
+record. Typically, this is done by like this:
+
+::
+
+ static int snd_xxx_open(struct snd_pcm_substream *substream)
+ {
+ struct mychip *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw = snd_mychip_playback_hw;
+ return 0;
+ }
+
+where ``snd_mychip_playback_hw`` is the pre-defined hardware
+description.
+
+You can allocate a private data in this callback, as described in
+`Private Data`_ section.
+
+If the hardware configuration needs more constraints, set the hardware
+constraints here, too. See Constraints_ for more details.
+
+close callback
+~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_close(struct snd_pcm_substream *substream);
+
+
+Obviously, this is called when a pcm substream is closed.
+
+Any private instance for a pcm substream allocated in the ``open``
+callback will be released here.
+
+::
+
+ static int snd_xxx_close(struct snd_pcm_substream *substream)
+ {
+ ....
+ kfree(substream->runtime->private_data);
+ ....
+ }
+
+ioctl callback
+~~~~~~~~~~~~~~
+
+This is used for any special call to pcm ioctls. But usually you can
+pass a generic ioctl callback, :c:func:`snd_pcm_lib_ioctl()`.
+
+hw_params callback
+~~~~~~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params);
+
+This is called when the hardware parameter (``hw_params``) is set up
+by the application, that is, once when the buffer size, the period
+size, the format, etc. are defined for the pcm substream.
+
+Many hardware setups should be done in this callback, including the
+allocation of buffers.
+
+Parameters to be initialized are retrieved by
+:c:func:`params_xxx()` macros. To allocate buffer, you can call a
+helper function,
+
+::
+
+ snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+
+:c:func:`snd_pcm_lib_malloc_pages()` is available only when the
+DMA buffers have been pre-allocated. See the section `Buffer Types`_
+for more details.
+
+Note that this and ``prepare`` callbacks may be called multiple times
+per initialization. For example, the OSS emulation may call these
+callbacks at each change via its ioctl.
+
+Thus, you need to be careful not to allocate the same buffers many
+times, which will lead to memory leaks! Calling the helper function
+above many times is OK. It will release the previous buffer
+automatically when it was already allocated.
+
+Another note is that this callback is non-atomic (schedulable) as
+default, i.e. when no ``nonatomic`` flag set. This is important,
+because the ``trigger`` callback is atomic (non-schedulable). That is,
+mutexes or any schedule-related functions are not available in
+``trigger`` callback. Please see the subsection Atomicity_ for
+details.
+
+hw_free callback
+~~~~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_hw_free(struct snd_pcm_substream *substream);
+
+This is called to release the resources allocated via
+``hw_params``. For example, releasing the buffer via
+:c:func:`snd_pcm_lib_malloc_pages()` is done by calling the
+following:
+
+::
+
+ snd_pcm_lib_free_pages(substream);
+
+This function is always called before the close callback is called.
+Also, the callback may be called multiple times, too. Keep track
+whether the resource was already released.
+
+prepare callback
+~~~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_prepare(struct snd_pcm_substream *substream);
+
+This callback is called when the pcm is “preparedâ€. You can set the
+format type, sample rate, etc. here. The difference from ``hw_params``
+is that the ``prepare`` callback will be called each time
+:c:func:`snd_pcm_prepare()` is called, i.e. when recovering after
+underruns, etc.
+
+Note that this callback is now non-atomic. You can use
+schedule-related functions safely in this callback.
+
+In this and the following callbacks, you can refer to the values via
+the runtime record, ``substream->runtime``. For example, to get the
+current rate, format or channels, access to ``runtime->rate``,
+``runtime->format`` or ``runtime->channels``, respectively. The
+physical address of the allocated buffer is set to
+``runtime->dma_area``. The buffer and period sizes are in
+``runtime->buffer_size`` and ``runtime->period_size``, respectively.
+
+Be careful that this callback will be called many times at each setup,
+too.
+
+trigger callback
+~~~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_trigger(struct snd_pcm_substream *substream, int cmd);
+
+This is called when the pcm is started, stopped or paused.
+
+Which action is specified in the second argument,
+``SNDRV_PCM_TRIGGER_XXX`` in ``<sound/pcm.h>``. At least, the ``START``
+and ``STOP`` commands must be defined in this callback.
+
+::
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ /* do something to start the PCM engine */
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ /* do something to stop the PCM engine */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+When the pcm supports the pause operation (given in the info field of
+the hardware table), the ``PAUSE_PUSH`` and ``PAUSE_RELEASE`` commands
+must be handled here, too. The former is the command to pause the pcm,
+and the latter to restart the pcm again.
+
+When the pcm supports the suspend/resume operation, regardless of full
+or partial suspend/resume support, the ``SUSPEND`` and ``RESUME``
+commands must be handled, too. These commands are issued when the
+power-management status is changed. Obviously, the ``SUSPEND`` and
+``RESUME`` commands suspend and resume the pcm substream, and usually,
+they are identical to the ``STOP`` and ``START`` commands, respectively.
+See the `Power Management`_ section for details.
+
+As mentioned, this callback is atomic as default unless ``nonatomic``
+flag set, and you cannot call functions which may sleep. The
+``trigger`` callback should be as minimal as possible, just really
+triggering the DMA. The other stuff should be initialized
+``hw_params`` and ``prepare`` callbacks properly beforehand.
+
+pointer callback
+~~~~~~~~~~~~~~~~
+
+::
+
+ static snd_pcm_uframes_t snd_xxx_pointer(struct snd_pcm_substream *substream)
+
+This callback is called when the PCM middle layer inquires the current
+hardware position on the buffer. The position must be returned in
+frames, ranging from 0 to ``buffer_size - 1``.
+
+This is called usually from the buffer-update routine in the pcm
+middle layer, which is invoked when :c:func:`snd_pcm_period_elapsed()`
+is called in the interrupt routine. Then the pcm middle layer updates
+the position and calculates the available space, and wakes up the
+sleeping poll threads, etc.
+
+This callback is also atomic as default.
+
+copy and silence callbacks
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+These callbacks are not mandatory, and can be omitted in most cases.
+These callbacks are used when the hardware buffer cannot be in the
+normal memory space. Some chips have their own buffer on the hardware
+which is not mappable. In such a case, you have to transfer the data
+manually from the memory buffer to the hardware buffer. Or, if the
+buffer is non-contiguous on both physical and virtual memory spaces,
+these callbacks must be defined, too.
+
+If these two callbacks are defined, copy and set-silence operations
+are done by them. The detailed will be described in the later section
+`Buffer and Memory Management`_.
+
+ack callback
+~~~~~~~~~~~~
+
+This callback is also not mandatory. This callback is called when the
+``appl_ptr`` is updated in read or write operations. Some drivers like
+emu10k1-fx and cs46xx need to track the current ``appl_ptr`` for the
+internal buffer, and this callback is useful only for such a purpose.
+
+This callback is atomic as default.
+
+page callback
+~~~~~~~~~~~~~
+
+This callback is optional too. This callback is used mainly for
+non-contiguous buffers. The mmap calls this callback to get the page
+address. Some examples will be explained in the later section `Buffer
+and Memory Management`_, too.
+
+PCM Interrupt Handler
+---------------------
+
+The rest of pcm stuff is the PCM interrupt handler. The role of PCM
+interrupt handler in the sound driver is to update the buffer position
+and to tell the PCM middle layer when the buffer position goes across
+the prescribed period size. To inform this, call the
+:c:func:`snd_pcm_period_elapsed()` function.
+
+There are several types of sound chips to generate the interrupts.
+
+Interrupts at the period (fragment) boundary
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is the most frequently found type: the hardware generates an
+interrupt at each period boundary. In this case, you can call
+:c:func:`snd_pcm_period_elapsed()` at each interrupt.
+
+:c:func:`snd_pcm_period_elapsed()` takes the substream pointer as
+its argument. Thus, you need to keep the substream pointer accessible
+from the chip instance. For example, define ``substream`` field in the
+chip record to hold the current running substream pointer, and set the
+pointer value at ``open`` callback (and reset at ``close`` callback).
+
+If you acquire a spinlock in the interrupt handler, and the lock is used
+in other pcm callbacks, too, then you have to release the lock before
+calling :c:func:`snd_pcm_period_elapsed()`, because
+:c:func:`snd_pcm_period_elapsed()` calls other pcm callbacks
+inside.
+
+Typical code would be like:
+
+::
+
+
+ static irqreturn_t snd_mychip_interrupt(int irq, void *dev_id)
+ {
+ struct mychip *chip = dev_id;
+ spin_lock(&chip->lock);
+ ....
+ if (pcm_irq_invoked(chip)) {
+ /* call updater, unlock before it */
+ spin_unlock(&chip->lock);
+ snd_pcm_period_elapsed(chip->substream);
+ spin_lock(&chip->lock);
+ /* acknowledge the interrupt if necessary */
+ }
+ ....
+ spin_unlock(&chip->lock);
+ return IRQ_HANDLED;
+ }
+
+
+
+High frequency timer interrupts
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This happens when the hardware doesn't generate interrupts at the period
+boundary but issues timer interrupts at a fixed timer rate (e.g. es1968
+or ymfpci drivers). In this case, you need to check the current hardware
+position and accumulate the processed sample length at each interrupt.
+When the accumulated size exceeds the period size, call
+:c:func:`snd_pcm_period_elapsed()` and reset the accumulator.
+
+Typical code would be like the following.
+
+::
+
+
+ static irqreturn_t snd_mychip_interrupt(int irq, void *dev_id)
+ {
+ struct mychip *chip = dev_id;
+ spin_lock(&chip->lock);
+ ....
+ if (pcm_irq_invoked(chip)) {
+ unsigned int last_ptr, size;
+ /* get the current hardware pointer (in frames) */
+ last_ptr = get_hw_ptr(chip);
+ /* calculate the processed frames since the
+ * last update
+ */
+ if (last_ptr < chip->last_ptr)
+ size = runtime->buffer_size + last_ptr
+ - chip->last_ptr;
+ else
+ size = last_ptr - chip->last_ptr;
+ /* remember the last updated point */
+ chip->last_ptr = last_ptr;
+ /* accumulate the size */
+ chip->size += size;
+ /* over the period boundary? */
+ if (chip->size >= runtime->period_size) {
+ /* reset the accumulator */
+ chip->size %= runtime->period_size;
+ /* call updater */
+ spin_unlock(&chip->lock);
+ snd_pcm_period_elapsed(substream);
+ spin_lock(&chip->lock);
+ }
+ /* acknowledge the interrupt if necessary */
+ }
+ ....
+ spin_unlock(&chip->lock);
+ return IRQ_HANDLED;
+ }
+
+
+
+On calling :c:func:`snd_pcm_period_elapsed()`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In both cases, even if more than one period are elapsed, you don't have
+to call :c:func:`snd_pcm_period_elapsed()` many times. Call only
+once. And the pcm layer will check the current hardware pointer and
+update to the latest status.
+
+Atomicity
+---------
+
+One of the most important (and thus difficult to debug) problems in
+kernel programming are race conditions. In the Linux kernel, they are
+usually avoided via spin-locks, mutexes or semaphores. In general, if a
+race condition can happen in an interrupt handler, it has to be managed
+atomically, and you have to use a spinlock to protect the critical
+session. If the critical section is not in interrupt handler code and if
+taking a relatively long time to execute is acceptable, you should use
+mutexes or semaphores instead.
+
+As already seen, some pcm callbacks are atomic and some are not. For
+example, the ``hw_params`` callback is non-atomic, while ``trigger``
+callback is atomic. This means, the latter is called already in a
+spinlock held by the PCM middle layer. Please take this atomicity into
+account when you choose a locking scheme in the callbacks.
+
+In the atomic callbacks, you cannot use functions which may call
+:c:func:`schedule()` or go to :c:func:`sleep()`. Semaphores and
+mutexes can sleep, and hence they cannot be used inside the atomic
+callbacks (e.g. ``trigger`` callback). To implement some delay in such a
+callback, please use :c:func:`udelay()` or :c:func:`mdelay()`.
+
+All three atomic callbacks (trigger, pointer, and ack) are called with
+local interrupts disabled.
+
+The recent changes in PCM core code, however, allow all PCM operations
+to be non-atomic. This assumes that the all caller sides are in
+non-atomic contexts. For example, the function
+:c:func:`snd_pcm_period_elapsed()` is called typically from the
+interrupt handler. But, if you set up the driver to use a threaded
+interrupt handler, this call can be in non-atomic context, too. In such
+a case, you can set ``nonatomic`` filed of :c:type:`struct snd_pcm
+<snd_pcm>` object after creating it. When this flag is set, mutex
+and rwsem are used internally in the PCM core instead of spin and
+rwlocks, so that you can call all PCM functions safely in a non-atomic
+context.
+
+Constraints
+-----------
+
+If your chip supports unconventional sample rates, or only the limited
+samples, you need to set a constraint for the condition.
+
+For example, in order to restrict the sample rates in the some supported
+values, use :c:func:`snd_pcm_hw_constraint_list()`. You need to
+call this function in the open callback.
+
+::
+
+ static unsigned int rates[] =
+ {4000, 10000, 22050, 44100};
+ static struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+ };
+
+ static int snd_mychip_pcm_open(struct snd_pcm_substream *substream)
+ {
+ int err;
+ ....
+ err = snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+ if (err < 0)
+ return err;
+ ....
+ }
+
+
+
+There are many different constraints. Look at ``sound/pcm.h`` for a
+complete list. You can even define your own constraint rules. For
+example, let's suppose my_chip can manage a substream of 1 channel if
+and only if the format is ``S16_LE``, otherwise it supports any format
+specified in the :c:type:`struct snd_pcm_hardware
+<snd_pcm_hardware>` structure (or in any other
+constraint_list). You can build a rule like this:
+
+::
+
+ static int hw_rule_channels_by_format(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+ {
+ struct snd_interval *c = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_interval ch;
+
+ snd_interval_any(&ch);
+ if (f->bits[0] == SNDRV_PCM_FMTBIT_S16_LE) {
+ ch.min = ch.max = 1;
+ ch.integer = 1;
+ return snd_interval_refine(c, &ch);
+ }
+ return 0;
+ }
+
+
+Then you need to call this function to add your rule:
+
+::
+
+ snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ hw_rule_channels_by_format, NULL,
+ SNDRV_PCM_HW_PARAM_FORMAT, -1);
+
+The rule function is called when an application sets the PCM format, and
+it refines the number of channels accordingly. But an application may
+set the number of channels before setting the format. Thus you also need
+to define the inverse rule:
+
+::
+
+ static int hw_rule_format_by_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+ {
+ struct snd_interval *c = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_mask fmt;
+
+ snd_mask_any(&fmt); /* Init the struct */
+ if (c->min < 2) {
+ fmt.bits[0] &= SNDRV_PCM_FMTBIT_S16_LE;
+ return snd_mask_refine(f, &fmt);
+ }
+ return 0;
+ }
+
+
+... and in the open callback:
+
+::
+
+ snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+ hw_rule_format_by_channels, NULL,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+
+I won't give more details here, rather I would like to say, “Luke, use
+the source.â€
+
+Control Interface
+=================
+
+General
+-------
+
+The control interface is used widely for many switches, sliders, etc.
+which are accessed from user-space. Its most important use is the mixer
+interface. In other words, since ALSA 0.9.x, all the mixer stuff is
+implemented on the control kernel API.
+
+ALSA has a well-defined AC97 control module. If your chip supports only
+the AC97 and nothing else, you can skip this section.
+
+The control API is defined in ``<sound/control.h>``. Include this file
+if you want to add your own controls.
+
+Definition of Controls
+----------------------
+
+To create a new control, you need to define the following three
+callbacks: ``info``, ``get`` and ``put``. Then, define a
+:c:type:`struct snd_kcontrol_new <snd_kcontrol_new>` record, such as:
+
+::
+
+
+ static struct snd_kcontrol_new my_control = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "PCM Playback Switch",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xffff,
+ .info = my_control_info,
+ .get = my_control_get,
+ .put = my_control_put
+ };
+
+
+The ``iface`` field specifies the control type,
+``SNDRV_CTL_ELEM_IFACE_XXX``, which is usually ``MIXER``. Use ``CARD``
+for global controls that are not logically part of the mixer. If the
+control is closely associated with some specific device on the sound
+card, use ``HWDEP``, ``PCM``, ``RAWMIDI``, ``TIMER``, or ``SEQUENCER``,
+and specify the device number with the ``device`` and ``subdevice``
+fields.
+
+The ``name`` is the name identifier string. Since ALSA 0.9.x, the
+control name is very important, because its role is classified from
+its name. There are pre-defined standard control names. The details
+are described in the `Control Names`_ subsection.
+
+The ``index`` field holds the index number of this control. If there
+are several different controls with the same name, they can be
+distinguished by the index number. This is the case when several
+codecs exist on the card. If the index is zero, you can omit the
+definition above.
+
+The ``access`` field contains the access type of this control. Give
+the combination of bit masks, ``SNDRV_CTL_ELEM_ACCESS_XXX``,
+there. The details will be explained in the `Access Flags`_
+subsection.
+
+The ``private_value`` field contains an arbitrary long integer value
+for this record. When using the generic ``info``, ``get`` and ``put``
+callbacks, you can pass a value through this field. If several small
+numbers are necessary, you can combine them in bitwise. Or, it's
+possible to give a pointer (casted to unsigned long) of some record to
+this field, too.
+
+The ``tlv`` field can be used to provide metadata about the control;
+see the `Metadata`_ subsection.
+
+The other three are `Control Callbacks`_.
+
+Control Names
+-------------
+
+There are some standards to define the control names. A control is
+usually defined from the three parts as “SOURCE DIRECTION FUNCTIONâ€.
+
+The first, ``SOURCE``, specifies the source of the control, and is a
+string such as “Masterâ€, “PCMâ€, “CD†and “Lineâ€. There are many
+pre-defined sources.
+
+The second, ``DIRECTION``, is one of the following strings according to
+the direction of the control: “Playbackâ€, “Captureâ€, “Bypass Playbackâ€
+and “Bypass Captureâ€. Or, it can be omitted, meaning both playback and
+capture directions.
+
+The third, ``FUNCTION``, is one of the following strings according to
+the function of the control: “Switchâ€, “Volume†and “Routeâ€.
+
+The example of control names are, thus, “Master Capture Switch†or “PCM
+Playback Volumeâ€.
+
+There are some exceptions:
+
+Global capture and playback
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+“Capture Sourceâ€, “Capture Switch†and “Capture Volume†are used for the
+global capture (input) source, switch and volume. Similarly, “Playback
+Switch†and “Playback Volume†are used for the global output gain switch
+and volume.
+
+Tone-controls
+~~~~~~~~~~~~~
+
+tone-control switch and volumes are specified like “Tone Control - XXXâ€,
+e.g. “Tone Control - Switchâ€, “Tone Control - Bassâ€, “Tone Control -
+Centerâ€.
+
+3D controls
+~~~~~~~~~~~
+
+3D-control switches and volumes are specified like “3D Control - XXXâ€,
+e.g. “3D Control - Switchâ€, “3D Control - Centerâ€, “3D Control - Spaceâ€.
+
+Mic boost
+~~~~~~~~~
+
+Mic-boost switch is set as “Mic Boost†or “Mic Boost (6dB)â€.
+
+More precise information can be found in
+``Documentation/sound/alsa/ControlNames.txt``.
+
+Access Flags
+------------
+
+The access flag is the bitmask which specifies the access type of the
+given control. The default access type is
+``SNDRV_CTL_ELEM_ACCESS_READWRITE``, which means both read and write are
+allowed to this control. When the access flag is omitted (i.e. = 0), it
+is considered as ``READWRITE`` access as default.
+
+When the control is read-only, pass ``SNDRV_CTL_ELEM_ACCESS_READ``
+instead. In this case, you don't have to define the ``put`` callback.
+Similarly, when the control is write-only (although it's a rare case),
+you can use the ``WRITE`` flag instead, and you don't need the ``get``
+callback.
+
+If the control value changes frequently (e.g. the VU meter),
+``VOLATILE`` flag should be given. This means that the control may be
+changed without `Change notification`_. Applications should poll such
+a control constantly.
+
+When the control is inactive, set the ``INACTIVE`` flag, too. There are
+``LOCK`` and ``OWNER`` flags to change the write permissions.
+
+Control Callbacks
+-----------------
+
+info callback
+~~~~~~~~~~~~~
+
+The ``info`` callback is used to get detailed information on this
+control. This must store the values of the given :c:type:`struct
+snd_ctl_elem_info <snd_ctl_elem_info>` object. For example,
+for a boolean control with a single element:
+
+::
+
+
+ static int snd_myctl_mono_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+ {
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+ return 0;
+ }
+
+
+
+The ``type`` field specifies the type of the control. There are
+``BOOLEAN``, ``INTEGER``, ``ENUMERATED``, ``BYTES``, ``IEC958`` and
+``INTEGER64``. The ``count`` field specifies the number of elements in
+this control. For example, a stereo volume would have count = 2. The
+``value`` field is a union, and the values stored are depending on the
+type. The boolean and integer types are identical.
+
+The enumerated type is a bit different from others. You'll need to set
+the string for the currently given item index.
+
+::
+
+ static int snd_myctl_enum_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+ {
+ static char *texts[4] = {
+ "First", "Second", "Third", "Fourth"
+ };
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 4;
+ if (uinfo->value.enumerated.item > 3)
+ uinfo->value.enumerated.item = 3;
+ strcpy(uinfo->value.enumerated.name,
+ texts[uinfo->value.enumerated.item]);
+ return 0;
+ }
+
+The above callback can be simplified with a helper function,
+:c:func:`snd_ctl_enum_info()`. The final code looks like below.
+(You can pass ``ARRAY_SIZE(texts)`` instead of 4 in the third argument;
+it's a matter of taste.)
+
+::
+
+ static int snd_myctl_enum_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+ {
+ static char *texts[4] = {
+ "First", "Second", "Third", "Fourth"
+ };
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
+ }
+
+
+Some common info callbacks are available for your convenience:
+:c:func:`snd_ctl_boolean_mono_info()` and
+:c:func:`snd_ctl_boolean_stereo_info()`. Obviously, the former
+is an info callback for a mono channel boolean item, just like
+:c:func:`snd_myctl_mono_info()` above, and the latter is for a
+stereo channel boolean item.
+
+get callback
+~~~~~~~~~~~~
+
+This callback is used to read the current value of the control and to
+return to user-space.
+
+For example,
+
+::
+
+
+ static int snd_myctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct mychip *chip = snd_kcontrol_chip(kcontrol);
+ ucontrol->value.integer.value[0] = get_some_value(chip);
+ return 0;
+ }
+
+
+
+The ``value`` field depends on the type of control as well as on the
+info callback. For example, the sb driver uses this field to store the
+register offset, the bit-shift and the bit-mask. The ``private_value``
+field is set as follows:
+
+::
+
+ .private_value = reg | (shift << 16) | (mask << 24)
+
+and is retrieved in callbacks like
+
+::
+
+ static int snd_sbmixer_get_single(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ int reg = kcontrol->private_value & 0xff;
+ int shift = (kcontrol->private_value >> 16) & 0xff;
+ int mask = (kcontrol->private_value >> 24) & 0xff;
+ ....
+ }
+
+In the ``get`` callback, you have to fill all the elements if the
+control has more than one elements, i.e. ``count > 1``. In the example
+above, we filled only one element (``value.integer.value[0]``) since
+it's assumed as ``count = 1``.
+
+put callback
+~~~~~~~~~~~~
+
+This callback is used to write a value from user-space.
+
+For example,
+
+::
+
+
+ static int snd_myctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct mychip *chip = snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ if (chip->current_value !=
+ ucontrol->value.integer.value[0]) {
+ change_current_value(chip,
+ ucontrol->value.integer.value[0]);
+ changed = 1;
+ }
+ return changed;
+ }
+
+
+
+As seen above, you have to return 1 if the value is changed. If the
+value is not changed, return 0 instead. If any fatal error happens,
+return a negative error code as usual.
+
+As in the ``get`` callback, when the control has more than one
+elements, all elements must be evaluated in this callback, too.
+
+Callbacks are not atomic
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+All these three callbacks are basically not atomic.
+
+Control Constructor
+-------------------
+
+When everything is ready, finally we can create a new control. To create
+a control, there are two functions to be called,
+:c:func:`snd_ctl_new1()` and :c:func:`snd_ctl_add()`.
+
+In the simplest way, you can do like this:
+
+::
+
+ err = snd_ctl_add(card, snd_ctl_new1(&my_control, chip));
+ if (err < 0)
+ return err;
+
+where ``my_control`` is the :c:type:`struct snd_kcontrol_new
+<snd_kcontrol_new>` object defined above, and chip is the object
+pointer to be passed to kcontrol->private_data which can be referred
+to in callbacks.
+
+:c:func:`snd_ctl_new1()` allocates a new :c:type:`struct
+snd_kcontrol <snd_kcontrol>` instance, and
+:c:func:`snd_ctl_add()` assigns the given control component to the
+card.
+
+Change Notification
+-------------------
+
+If you need to change and update a control in the interrupt routine, you
+can call :c:func:`snd_ctl_notify()`. For example,
+
+::
+
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, id_pointer);
+
+This function takes the card pointer, the event-mask, and the control id
+pointer for the notification. The event-mask specifies the types of
+notification, for example, in the above example, the change of control
+values is notified. The id pointer is the pointer of :c:type:`struct
+snd_ctl_elem_id <snd_ctl_elem_id>` to be notified. You can
+find some examples in ``es1938.c`` or ``es1968.c`` for hardware volume
+interrupts.
+
+Metadata
+--------
+
+To provide information about the dB values of a mixer control, use on of
+the ``DECLARE_TLV_xxx`` macros from ``<sound/tlv.h>`` to define a
+variable containing this information, set the ``tlv.p`` field to point to
+this variable, and include the ``SNDRV_CTL_ELEM_ACCESS_TLV_READ`` flag
+in the ``access`` field; like this:
+
+::
+
+ static DECLARE_TLV_DB_SCALE(db_scale_my_control, -4050, 150, 0);
+
+ static struct snd_kcontrol_new my_control = {
+ ...
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ ...
+ .tlv.p = db_scale_my_control,
+ };
+
+
+The :c:func:`DECLARE_TLV_DB_SCALE()` macro defines information
+about a mixer control where each step in the control's value changes the
+dB value by a constant dB amount. The first parameter is the name of the
+variable to be defined. The second parameter is the minimum value, in
+units of 0.01 dB. The third parameter is the step size, in units of 0.01
+dB. Set the fourth parameter to 1 if the minimum value actually mutes
+the control.
+
+The :c:func:`DECLARE_TLV_DB_LINEAR()` macro defines information
+about a mixer control where the control's value affects the output
+linearly. The first parameter is the name of the variable to be defined.
+The second parameter is the minimum value, in units of 0.01 dB. The
+third parameter is the maximum value, in units of 0.01 dB. If the
+minimum value mutes the control, set the second parameter to
+``TLV_DB_GAIN_MUTE``.
+
+API for AC97 Codec
+==================
+
+General
+-------
+
+The ALSA AC97 codec layer is a well-defined one, and you don't have to
+write much code to control it. Only low-level control routines are
+necessary. The AC97 codec API is defined in ``<sound/ac97_codec.h>``.
+
+Full Code Example
+-----------------
+
+::
+
+ struct mychip {
+ ....
+ struct snd_ac97 *ac97;
+ ....
+ };
+
+ static unsigned short snd_mychip_ac97_read(struct snd_ac97 *ac97,
+ unsigned short reg)
+ {
+ struct mychip *chip = ac97->private_data;
+ ....
+ /* read a register value here from the codec */
+ return the_register_value;
+ }
+
+ static void snd_mychip_ac97_write(struct snd_ac97 *ac97,
+ unsigned short reg, unsigned short val)
+ {
+ struct mychip *chip = ac97->private_data;
+ ....
+ /* write the given register value to the codec */
+ }
+
+ static int snd_mychip_ac97(struct mychip *chip)
+ {
+ struct snd_ac97_bus *bus;
+ struct snd_ac97_template ac97;
+ int err;
+ static struct snd_ac97_bus_ops ops = {
+ .write = snd_mychip_ac97_write,
+ .read = snd_mychip_ac97_read,
+ };
+
+ err = snd_ac97_bus(chip->card, 0, &ops, NULL, &bus);
+ if (err < 0)
+ return err;
+ memset(&ac97, 0, sizeof(ac97));
+ ac97.private_data = chip;
+ return snd_ac97_mixer(bus, &ac97, &chip->ac97);
+ }
+
+
+AC97 Constructor
+----------------
+
+To create an ac97 instance, first call :c:func:`snd_ac97_bus()`
+with an ``ac97_bus_ops_t`` record with callback functions.
+
+::
+
+ struct snd_ac97_bus *bus;
+ static struct snd_ac97_bus_ops ops = {
+ .write = snd_mychip_ac97_write,
+ .read = snd_mychip_ac97_read,
+ };
+
+ snd_ac97_bus(card, 0, &ops, NULL, &pbus);
+
+The bus record is shared among all belonging ac97 instances.
+
+And then call :c:func:`snd_ac97_mixer()` with an :c:type:`struct
+snd_ac97_template <snd_ac97_template>` record together with
+the bus pointer created above.
+
+::
+
+ struct snd_ac97_template ac97;
+ int err;
+
+ memset(&ac97, 0, sizeof(ac97));
+ ac97.private_data = chip;
+ snd_ac97_mixer(bus, &ac97, &chip->ac97);
+
+where chip->ac97 is a pointer to a newly created ``ac97_t``
+instance. In this case, the chip pointer is set as the private data,
+so that the read/write callback functions can refer to this chip
+instance. This instance is not necessarily stored in the chip
+record. If you need to change the register values from the driver, or
+need the suspend/resume of ac97 codecs, keep this pointer to pass to
+the corresponding functions.
+
+AC97 Callbacks
+--------------
+
+The standard callbacks are ``read`` and ``write``. Obviously they
+correspond to the functions for read and write accesses to the
+hardware low-level codes.
+
+The ``read`` callback returns the register value specified in the
+argument.
+
+::
+
+ static unsigned short snd_mychip_ac97_read(struct snd_ac97 *ac97,
+ unsigned short reg)
+ {
+ struct mychip *chip = ac97->private_data;
+ ....
+ return the_register_value;
+ }
+
+Here, the chip can be cast from ``ac97->private_data``.
+
+Meanwhile, the ``write`` callback is used to set the register
+value
+
+::
+
+ static void snd_mychip_ac97_write(struct snd_ac97 *ac97,
+ unsigned short reg, unsigned short val)
+
+
+These callbacks are non-atomic like the control API callbacks.
+
+There are also other callbacks: ``reset``, ``wait`` and ``init``.
+
+The ``reset`` callback is used to reset the codec. If the chip
+requires a special kind of reset, you can define this callback.
+
+The ``wait`` callback is used to add some waiting time in the standard
+initialization of the codec. If the chip requires the extra waiting
+time, define this callback.
+
+The ``init`` callback is used for additional initialization of the
+codec.
+
+Updating Registers in The Driver
+--------------------------------
+
+If you need to access to the codec from the driver, you can call the
+following functions: :c:func:`snd_ac97_write()`,
+:c:func:`snd_ac97_read()`, :c:func:`snd_ac97_update()` and
+:c:func:`snd_ac97_update_bits()`.
+
+Both :c:func:`snd_ac97_write()` and
+:c:func:`snd_ac97_update()` functions are used to set a value to
+the given register (``AC97_XXX``). The difference between them is that
+:c:func:`snd_ac97_update()` doesn't write a value if the given
+value has been already set, while :c:func:`snd_ac97_write()`
+always rewrites the value.
+
+::
+
+ snd_ac97_write(ac97, AC97_MASTER, 0x8080);
+ snd_ac97_update(ac97, AC97_MASTER, 0x8080);
+
+:c:func:`snd_ac97_read()` is used to read the value of the given
+register. For example,
+
+::
+
+ value = snd_ac97_read(ac97, AC97_MASTER);
+
+:c:func:`snd_ac97_update_bits()` is used to update some bits in
+the given register.
+
+::
+
+ snd_ac97_update_bits(ac97, reg, mask, value);
+
+Also, there is a function to change the sample rate (of a given register
+such as ``AC97_PCM_FRONT_DAC_RATE``) when VRA or DRA is supported by the
+codec: :c:func:`snd_ac97_set_rate()`.
+
+::
+
+ snd_ac97_set_rate(ac97, AC97_PCM_FRONT_DAC_RATE, 44100);
+
+
+The following registers are available to set the rate:
+``AC97_PCM_MIC_ADC_RATE``, ``AC97_PCM_FRONT_DAC_RATE``,
+``AC97_PCM_LR_ADC_RATE``, ``AC97_SPDIF``. When ``AC97_SPDIF`` is
+specified, the register is not really changed but the corresponding
+IEC958 status bits will be updated.
+
+Clock Adjustment
+----------------
+
+In some chips, the clock of the codec isn't 48000 but using a PCI clock
+(to save a quartz!). In this case, change the field ``bus->clock`` to
+the corresponding value. For example, intel8x0 and es1968 drivers have
+their own function to read from the clock.
+
+Proc Files
+----------
+
+The ALSA AC97 interface will create a proc file such as
+``/proc/asound/card0/codec97#0/ac97#0-0`` and ``ac97#0-0+regs``. You
+can refer to these files to see the current status and registers of
+the codec.
+
+Multiple Codecs
+---------------
+
+When there are several codecs on the same card, you need to call
+:c:func:`snd_ac97_mixer()` multiple times with ``ac97.num=1`` or
+greater. The ``num`` field specifies the codec number.
+
+If you set up multiple codecs, you either need to write different
+callbacks for each codec or check ``ac97->num`` in the callback
+routines.
+
+MIDI (MPU401-UART) Interface
+============================
+
+General
+-------
+
+Many soundcards have built-in MIDI (MPU401-UART) interfaces. When the
+soundcard supports the standard MPU401-UART interface, most likely you
+can use the ALSA MPU401-UART API. The MPU401-UART API is defined in
+``<sound/mpu401.h>``.
+
+Some soundchips have a similar but slightly different implementation of
+mpu401 stuff. For example, emu10k1 has its own mpu401 routines.
+
+MIDI Constructor
+----------------
+
+To create a rawmidi object, call :c:func:`snd_mpu401_uart_new()`.
+
+::
+
+ struct snd_rawmidi *rmidi;
+ snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, port, info_flags,
+ irq, &rmidi);
+
+
+The first argument is the card pointer, and the second is the index of
+this component. You can create up to 8 rawmidi devices.
+
+The third argument is the type of the hardware, ``MPU401_HW_XXX``. If
+it's not a special one, you can use ``MPU401_HW_MPU401``.
+
+The 4th argument is the I/O port address. Many backward-compatible
+MPU401 have an I/O port such as 0x330. Or, it might be a part of its own
+PCI I/O region. It depends on the chip design.
+
+The 5th argument is a bitflag for additional information. When the I/O
+port address above is part of the PCI I/O region, the MPU401 I/O port
+might have been already allocated (reserved) by the driver itself. In
+such a case, pass a bit flag ``MPU401_INFO_INTEGRATED``, and the
+mpu401-uart layer will allocate the I/O ports by itself.
+
+When the controller supports only the input or output MIDI stream, pass
+the ``MPU401_INFO_INPUT`` or ``MPU401_INFO_OUTPUT`` bitflag,
+respectively. Then the rawmidi instance is created as a single stream.
+
+``MPU401_INFO_MMIO`` bitflag is used to change the access method to MMIO
+(via readb and writeb) instead of iob and outb. In this case, you have
+to pass the iomapped address to :c:func:`snd_mpu401_uart_new()`.
+
+When ``MPU401_INFO_TX_IRQ`` is set, the output stream isn't checked in
+the default interrupt handler. The driver needs to call
+:c:func:`snd_mpu401_uart_interrupt_tx()` by itself to start
+processing the output stream in the irq handler.
+
+If the MPU-401 interface shares its interrupt with the other logical
+devices on the card, set ``MPU401_INFO_IRQ_HOOK`` (see
+`below <#MIDI-Interrupt-Handler>`__).
+
+Usually, the port address corresponds to the command port and port + 1
+corresponds to the data port. If not, you may change the ``cport``
+field of :c:type:`struct snd_mpu401 <snd_mpu401>` manually afterward.
+However, :c:type:`struct snd_mpu401 <snd_mpu401>` pointer is
+not returned explicitly by :c:func:`snd_mpu401_uart_new()`. You
+need to cast ``rmidi->private_data`` to :c:type:`struct snd_mpu401
+<snd_mpu401>` explicitly,
+
+::
+
+ struct snd_mpu401 *mpu;
+ mpu = rmidi->private_data;
+
+and reset the ``cport`` as you like:
+
+::
+
+ mpu->cport = my_own_control_port;
+
+The 6th argument specifies the ISA irq number that will be allocated. If
+no interrupt is to be allocated (because your code is already allocating
+a shared interrupt, or because the device does not use interrupts), pass
+-1 instead. For a MPU-401 device without an interrupt, a polling timer
+will be used instead.
+
+MIDI Interrupt Handler
+----------------------
+
+When the interrupt is allocated in
+:c:func:`snd_mpu401_uart_new()`, an exclusive ISA interrupt
+handler is automatically used, hence you don't have anything else to do
+than creating the mpu401 stuff. Otherwise, you have to set
+``MPU401_INFO_IRQ_HOOK``, and call
+:c:func:`snd_mpu401_uart_interrupt()` explicitly from your own
+interrupt handler when it has determined that a UART interrupt has
+occurred.
+
+In this case, you need to pass the private_data of the returned rawmidi
+object from :c:func:`snd_mpu401_uart_new()` as the second
+argument of :c:func:`snd_mpu401_uart_interrupt()`.
+
+::
+
+ snd_mpu401_uart_interrupt(irq, rmidi->private_data, regs);
+
+
+RawMIDI Interface
+=================
+
+Overview
+--------
+
+The raw MIDI interface is used for hardware MIDI ports that can be
+accessed as a byte stream. It is not used for synthesizer chips that do
+not directly understand MIDI.
+
+ALSA handles file and buffer management. All you have to do is to write
+some code to move data between the buffer and the hardware.
+
+The rawmidi API is defined in ``<sound/rawmidi.h>``.
+
+RawMIDI Constructor
+-------------------
+
+To create a rawmidi device, call the :c:func:`snd_rawmidi_new()`
+function:
+
+::
+
+ struct snd_rawmidi *rmidi;
+ err = snd_rawmidi_new(chip->card, "MyMIDI", 0, outs, ins, &rmidi);
+ if (err < 0)
+ return err;
+ rmidi->private_data = chip;
+ strcpy(rmidi->name, "My MIDI");
+ rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
+ SNDRV_RAWMIDI_INFO_INPUT |
+ SNDRV_RAWMIDI_INFO_DUPLEX;
+
+The first argument is the card pointer, the second argument is the ID
+string.
+
+The third argument is the index of this component. You can create up to
+8 rawmidi devices.
+
+The fourth and fifth arguments are the number of output and input
+substreams, respectively, of this device (a substream is the equivalent
+of a MIDI port).
+
+Set the ``info_flags`` field to specify the capabilities of the
+device. Set ``SNDRV_RAWMIDI_INFO_OUTPUT`` if there is at least one
+output port, ``SNDRV_RAWMIDI_INFO_INPUT`` if there is at least one
+input port, and ``SNDRV_RAWMIDI_INFO_DUPLEX`` if the device can handle
+output and input at the same time.
+
+After the rawmidi device is created, you need to set the operators
+(callbacks) for each substream. There are helper functions to set the
+operators for all the substreams of a device:
+
+::
+
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_mymidi_output_ops);
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_mymidi_input_ops);
+
+The operators are usually defined like this:
+
+::
+
+ static struct snd_rawmidi_ops snd_mymidi_output_ops = {
+ .open = snd_mymidi_output_open,
+ .close = snd_mymidi_output_close,
+ .trigger = snd_mymidi_output_trigger,
+ };
+
+These callbacks are explained in the `RawMIDI Callbacks`_ section.
+
+If there are more than one substream, you should give a unique name to
+each of them:
+
+::
+
+ struct snd_rawmidi_substream *substream;
+ list_for_each_entry(substream,
+ &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT].substreams,
+ list {
+ sprintf(substream->name, "My MIDI Port %d", substream->number + 1);
+ }
+ /* same for SNDRV_RAWMIDI_STREAM_INPUT */
+
+RawMIDI Callbacks
+-----------------
+
+In all the callbacks, the private data that you've set for the rawmidi
+device can be accessed as ``substream->rmidi->private_data``.
+
+If there is more than one port, your callbacks can determine the port
+index from the struct snd_rawmidi_substream data passed to each
+callback:
+
+::
+
+ struct snd_rawmidi_substream *substream;
+ int index = substream->number;
+
+RawMIDI open callback
+~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_open(struct snd_rawmidi_substream *substream);
+
+
+This is called when a substream is opened. You can initialize the
+hardware here, but you shouldn't start transmitting/receiving data yet.
+
+RawMIDI close callback
+~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_close(struct snd_rawmidi_substream *substream);
+
+Guess what.
+
+The ``open`` and ``close`` callbacks of a rawmidi device are
+serialized with a mutex, and can sleep.
+
+Rawmidi trigger callback for output substreams
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ static void snd_xxx_output_trigger(struct snd_rawmidi_substream *substream, int up);
+
+
+This is called with a nonzero ``up`` parameter when there is some data
+in the substream buffer that must be transmitted.
+
+To read data from the buffer, call
+:c:func:`snd_rawmidi_transmit_peek()`. It will return the number
+of bytes that have been read; this will be less than the number of bytes
+requested when there are no more data in the buffer. After the data have
+been transmitted successfully, call
+:c:func:`snd_rawmidi_transmit_ack()` to remove the data from the
+substream buffer:
+
+::
+
+ unsigned char data;
+ while (snd_rawmidi_transmit_peek(substream, &data, 1) == 1) {
+ if (snd_mychip_try_to_transmit(data))
+ snd_rawmidi_transmit_ack(substream, 1);
+ else
+ break; /* hardware FIFO full */
+ }
+
+If you know beforehand that the hardware will accept data, you can use
+the :c:func:`snd_rawmidi_transmit()` function which reads some
+data and removes them from the buffer at once:
+
+::
+
+ while (snd_mychip_transmit_possible()) {
+ unsigned char data;
+ if (snd_rawmidi_transmit(substream, &data, 1) != 1)
+ break; /* no more data */
+ snd_mychip_transmit(data);
+ }
+
+If you know beforehand how many bytes you can accept, you can use a
+buffer size greater than one with the
+:c:func:`snd_rawmidi_transmit\*()` functions.
+
+The ``trigger`` callback must not sleep. If the hardware FIFO is full
+before the substream buffer has been emptied, you have to continue
+transmitting data later, either in an interrupt handler, or with a
+timer if the hardware doesn't have a MIDI transmit interrupt.
+
+The ``trigger`` callback is called with a zero ``up`` parameter when
+the transmission of data should be aborted.
+
+RawMIDI trigger callback for input substreams
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ static void snd_xxx_input_trigger(struct snd_rawmidi_substream *substream, int up);
+
+
+This is called with a nonzero ``up`` parameter to enable receiving data,
+or with a zero ``up`` parameter do disable receiving data.
+
+The ``trigger`` callback must not sleep; the actual reading of data
+from the device is usually done in an interrupt handler.
+
+When data reception is enabled, your interrupt handler should call
+:c:func:`snd_rawmidi_receive()` for all received data:
+
+::
+
+ void snd_mychip_midi_interrupt(...)
+ {
+ while (mychip_midi_available()) {
+ unsigned char data;
+ data = mychip_midi_read();
+ snd_rawmidi_receive(substream, &data, 1);
+ }
+ }
+
+
+drain callback
+~~~~~~~~~~~~~~
+
+::
+
+ static void snd_xxx_drain(struct snd_rawmidi_substream *substream);
+
+
+This is only used with output substreams. This function should wait
+until all data read from the substream buffer have been transmitted.
+This ensures that the device can be closed and the driver unloaded
+without losing data.
+
+This callback is optional. If you do not set ``drain`` in the struct
+snd_rawmidi_ops structure, ALSA will simply wait for 50 milliseconds
+instead.
+
+Miscellaneous Devices
+=====================
+
+FM OPL3
+-------
+
+The FM OPL3 is still used in many chips (mainly for backward
+compatibility). ALSA has a nice OPL3 FM control layer, too. The OPL3 API
+is defined in ``<sound/opl3.h>``.
+
+FM registers can be directly accessed through the direct-FM API, defined
+in ``<sound/asound_fm.h>``. In ALSA native mode, FM registers are
+accessed through the Hardware-Dependent Device direct-FM extension API,
+whereas in OSS compatible mode, FM registers can be accessed with the
+OSS direct-FM compatible API in ``/dev/dmfmX`` device.
+
+To create the OPL3 component, you have two functions to call. The first
+one is a constructor for the ``opl3_t`` instance.
+
+::
+
+ struct snd_opl3 *opl3;
+ snd_opl3_create(card, lport, rport, OPL3_HW_OPL3_XXX,
+ integrated, &opl3);
+
+The first argument is the card pointer, the second one is the left port
+address, and the third is the right port address. In most cases, the
+right port is placed at the left port + 2.
+
+The fourth argument is the hardware type.
+
+When the left and right ports have been already allocated by the card
+driver, pass non-zero to the fifth argument (``integrated``). Otherwise,
+the opl3 module will allocate the specified ports by itself.
+
+When the accessing the hardware requires special method instead of the
+standard I/O access, you can create opl3 instance separately with
+:c:func:`snd_opl3_new()`.
+
+::
+
+ struct snd_opl3 *opl3;
+ snd_opl3_new(card, OPL3_HW_OPL3_XXX, &opl3);
+
+Then set ``command``, ``private_data`` and ``private_free`` for the
+private access function, the private data and the destructor. The
+``l_port`` and ``r_port`` are not necessarily set. Only the command
+must be set properly. You can retrieve the data from the
+``opl3->private_data`` field.
+
+After creating the opl3 instance via :c:func:`snd_opl3_new()`,
+call :c:func:`snd_opl3_init()` to initialize the chip to the
+proper state. Note that :c:func:`snd_opl3_create()` always calls
+it internally.
+
+If the opl3 instance is created successfully, then create a hwdep device
+for this opl3.
+
+::
+
+ struct snd_hwdep *opl3hwdep;
+ snd_opl3_hwdep_new(opl3, 0, 1, &opl3hwdep);
+
+The first argument is the ``opl3_t`` instance you created, and the
+second is the index number, usually 0.
+
+The third argument is the index-offset for the sequencer client assigned
+to the OPL3 port. When there is an MPU401-UART, give 1 for here (UART
+always takes 0).
+
+Hardware-Dependent Devices
+--------------------------
+
+Some chips need user-space access for special controls or for loading
+the micro code. In such a case, you can create a hwdep
+(hardware-dependent) device. The hwdep API is defined in
+``<sound/hwdep.h>``. You can find examples in opl3 driver or
+``isa/sb/sb16_csp.c``.
+
+The creation of the ``hwdep`` instance is done via
+:c:func:`snd_hwdep_new()`.
+
+::
+
+ struct snd_hwdep *hw;
+ snd_hwdep_new(card, "My HWDEP", 0, &hw);
+
+where the third argument is the index number.
+
+You can then pass any pointer value to the ``private_data``. If you
+assign a private data, you should define the destructor, too. The
+destructor function is set in the ``private_free`` field.
+
+::
+
+ struct mydata *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ hw->private_data = p;
+ hw->private_free = mydata_free;
+
+and the implementation of the destructor would be:
+
+::
+
+ static void mydata_free(struct snd_hwdep *hw)
+ {
+ struct mydata *p = hw->private_data;
+ kfree(p);
+ }
+
+The arbitrary file operations can be defined for this instance. The file
+operators are defined in the ``ops`` table. For example, assume that
+this chip needs an ioctl.
+
+::
+
+ hw->ops.open = mydata_open;
+ hw->ops.ioctl = mydata_ioctl;
+ hw->ops.release = mydata_release;
+
+And implement the callback functions as you like.
+
+IEC958 (S/PDIF)
+---------------
+
+Usually the controls for IEC958 devices are implemented via the control
+interface. There is a macro to compose a name string for IEC958
+controls, :c:func:`SNDRV_CTL_NAME_IEC958()` defined in
+``<include/asound.h>``.
+
+There are some standard controls for IEC958 status bits. These controls
+use the type ``SNDRV_CTL_ELEM_TYPE_IEC958``, and the size of element is
+fixed as 4 bytes array (value.iec958.status[x]). For the ``info``
+callback, you don't specify the value field for this type (the count
+field must be set, though).
+
+“IEC958 Playback Con Mask†is used to return the bit-mask for the IEC958
+status bits of consumer mode. Similarly, “IEC958 Playback Pro Maskâ€
+returns the bitmask for professional mode. They are read-only controls,
+and are defined as MIXER controls (iface =
+``SNDRV_CTL_ELEM_IFACE_MIXER``).
+
+Meanwhile, “IEC958 Playback Default†control is defined for getting and
+setting the current default IEC958 bits. Note that this one is usually
+defined as a PCM control (iface = ``SNDRV_CTL_ELEM_IFACE_PCM``),
+although in some places it's defined as a MIXER control.
+
+In addition, you can define the control switches to enable/disable or to
+set the raw bit mode. The implementation will depend on the chip, but
+the control should be named as “IEC958 xxxâ€, preferably using the
+:c:func:`SNDRV_CTL_NAME_IEC958()` macro.
+
+You can find several cases, for example, ``pci/emu10k1``,
+``pci/ice1712``, or ``pci/cmipci.c``.
+
+Buffer and Memory Management
+============================
+
+Buffer Types
+------------
+
+ALSA provides several different buffer allocation functions depending on
+the bus and the architecture. All these have a consistent API. The
+allocation of physically-contiguous pages is done via
+:c:func:`snd_malloc_xxx_pages()` function, where xxx is the bus
+type.
+
+The allocation of pages with fallback is
+:c:func:`snd_malloc_xxx_pages_fallback()`. This function tries
+to allocate the specified pages but if the pages are not available, it
+tries to reduce the page sizes until enough space is found.
+
+The release the pages, call :c:func:`snd_free_xxx_pages()`
+function.
+
+Usually, ALSA drivers try to allocate and reserve a large contiguous
+physical space at the time the module is loaded for the later use. This
+is called “pre-allocationâ€. As already written, you can call the
+following function at pcm instance construction time (in the case of PCI
+bus).
+
+::
+
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(pci), size, max);
+
+where ``size`` is the byte size to be pre-allocated and the ``max`` is
+the maximum size to be changed via the ``prealloc`` proc file. The
+allocator will try to get an area as large as possible within the
+given size.
+
+The second argument (type) and the third argument (device pointer) are
+dependent on the bus. In the case of the ISA bus, pass
+:c:func:`snd_dma_isa_data()` as the third argument with
+``SNDRV_DMA_TYPE_DEV`` type. For the continuous buffer unrelated to the
+bus can be pre-allocated with ``SNDRV_DMA_TYPE_CONTINUOUS`` type and the
+``snd_dma_continuous_data(GFP_KERNEL)`` device pointer, where
+``GFP_KERNEL`` is the kernel allocation flag to use. For the PCI
+scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with
+``snd_dma_pci_data(pci)`` (see the `Non-Contiguous Buffers`_
+section).
+
+Once the buffer is pre-allocated, you can use the allocator in the
+``hw_params`` callback:
+
+::
+
+ snd_pcm_lib_malloc_pages(substream, size);
+
+Note that you have to pre-allocate to use this function.
+
+External Hardware Buffers
+-------------------------
+
+Some chips have their own hardware buffers and the DMA transfer from the
+host memory is not available. In such a case, you need to either 1)
+copy/set the audio data directly to the external hardware buffer, or 2)
+make an intermediate buffer and copy/set the data from it to the
+external hardware buffer in interrupts (or in tasklets, preferably).
+
+The first case works fine if the external hardware buffer is large
+enough. This method doesn't need any extra buffers and thus is more
+effective. You need to define the ``copy`` and ``silence`` callbacks
+for the data transfer. However, there is a drawback: it cannot be
+mmapped. The examples are GUS's GF1 PCM or emu8000's wavetable PCM.
+
+The second case allows for mmap on the buffer, although you have to
+handle an interrupt or a tasklet to transfer the data from the
+intermediate buffer to the hardware buffer. You can find an example in
+the vxpocket driver.
+
+Another case is when the chip uses a PCI memory-map region for the
+buffer instead of the host memory. In this case, mmap is available only
+on certain architectures like the Intel one. In non-mmap mode, the data
+cannot be transferred as in the normal way. Thus you need to define the
+``copy`` and ``silence`` callbacks as well, as in the cases above. The
+examples are found in ``rme32.c`` and ``rme96.c``.
+
+The implementation of the ``copy`` and ``silence`` callbacks depends
+upon whether the hardware supports interleaved or non-interleaved
+samples. The ``copy`` callback is defined like below, a bit
+differently depending whether the direction is playback or capture:
+
+::
+
+ static int playback_copy(struct snd_pcm_substream *substream, int channel,
+ snd_pcm_uframes_t pos, void *src, snd_pcm_uframes_t count);
+ static int capture_copy(struct snd_pcm_substream *substream, int channel,
+ snd_pcm_uframes_t pos, void *dst, snd_pcm_uframes_t count);
+
+In the case of interleaved samples, the second argument (``channel``) is
+not used. The third argument (``pos``) points the current position
+offset in frames.
+
+The meaning of the fourth argument is different between playback and
+capture. For playback, it holds the source data pointer, and for
+capture, it's the destination data pointer.
+
+The last argument is the number of frames to be copied.
+
+What you have to do in this callback is again different between playback
+and capture directions. In the playback case, you copy the given amount
+of data (``count``) at the specified pointer (``src``) to the specified
+offset (``pos``) on the hardware buffer. When coded like memcpy-like
+way, the copy would be like:
+
+::
+
+ my_memcpy(my_buffer + frames_to_bytes(runtime, pos), src,
+ frames_to_bytes(runtime, count));
+
+For the capture direction, you copy the given amount of data (``count``)
+at the specified offset (``pos``) on the hardware buffer to the
+specified pointer (``dst``).
+
+::
+
+ my_memcpy(dst, my_buffer + frames_to_bytes(runtime, pos),
+ frames_to_bytes(runtime, count));
+
+Note that both the position and the amount of data are given in frames.
+
+In the case of non-interleaved samples, the implementation will be a bit
+more complicated.
+
+You need to check the channel argument, and if it's -1, copy the whole
+channels. Otherwise, you have to copy only the specified channel. Please
+check ``isa/gus/gus_pcm.c`` as an example.
+
+The ``silence`` callback is also implemented in a similar way
+
+::
+
+ static int silence(struct snd_pcm_substream *substream, int channel,
+ snd_pcm_uframes_t pos, snd_pcm_uframes_t count);
+
+The meanings of arguments are the same as in the ``copy`` callback,
+although there is no ``src/dst`` argument. In the case of interleaved
+samples, the channel argument has no meaning, as well as on ``copy``
+callback.
+
+The role of ``silence`` callback is to set the given amount
+(``count``) of silence data at the specified offset (``pos``) on the
+hardware buffer. Suppose that the data format is signed (that is, the
+silent-data is 0), and the implementation using a memset-like function
+would be like:
+
+::
+
+ my_memcpy(my_buffer + frames_to_bytes(runtime, pos), 0,
+ frames_to_bytes(runtime, count));
+
+In the case of non-interleaved samples, again, the implementation
+becomes a bit more complicated. See, for example, ``isa/gus/gus_pcm.c``.
+
+Non-Contiguous Buffers
+----------------------
+
+If your hardware supports the page table as in emu10k1 or the buffer
+descriptors as in via82xx, you can use the scatter-gather (SG) DMA. ALSA
+provides an interface for handling SG-buffers. The API is provided in
+``<sound/pcm.h>``.
+
+For creating the SG-buffer handler, call
+:c:func:`snd_pcm_lib_preallocate_pages()` or
+:c:func:`snd_pcm_lib_preallocate_pages_for_all()` with
+``SNDRV_DMA_TYPE_DEV_SG`` in the PCM constructor like other PCI
+pre-allocator. You need to pass ``snd_dma_pci_data(pci)``, where pci is
+the :c:type:`struct pci_dev <pci_dev>` pointer of the chip as
+well. The ``struct snd_sg_buf`` instance is created as
+``substream->dma_private``. You can cast the pointer like:
+
+::
+
+ struct snd_sg_buf *sgbuf = (struct snd_sg_buf *)substream->dma_private;
+
+Then call :c:func:`snd_pcm_lib_malloc_pages()` in the ``hw_params``
+callback as well as in the case of normal PCI buffer. The SG-buffer
+handler will allocate the non-contiguous kernel pages of the given size
+and map them onto the virtually contiguous memory. The virtual pointer
+is addressed in runtime->dma_area. The physical address
+(``runtime->dma_addr``) is set to zero, because the buffer is
+physically non-contiguous. The physical address table is set up in
+``sgbuf->table``. You can get the physical address at a certain offset
+via :c:func:`snd_pcm_sgbuf_get_addr()`.
+
+When a SG-handler is used, you need to set
+:c:func:`snd_pcm_sgbuf_ops_page()` as the ``page`` callback. (See
+`page callback`_ section.)
+
+To release the data, call :c:func:`snd_pcm_lib_free_pages()` in
+the ``hw_free`` callback as usual.
+
+Vmalloc'ed Buffers
+------------------
+
+It's possible to use a buffer allocated via :c:func:`vmalloc()`, for
+example, for an intermediate buffer. Since the allocated pages are not
+contiguous, you need to set the ``page`` callback to obtain the physical
+address at every offset.
+
+The implementation of ``page`` callback would be like this:
+
+::
+
+ #include <linux/vmalloc.h>
+
+ /* get the physical page pointer on the given offset */
+ static struct page *mychip_page(struct snd_pcm_substream *substream,
+ unsigned long offset)
+ {
+ void *pageptr = substream->runtime->dma_area + offset;
+ return vmalloc_to_page(pageptr);
+ }
+
+Proc Interface
+==============
+
+ALSA provides an easy interface for procfs. The proc files are very
+useful for debugging. I recommend you set up proc files if you write a
+driver and want to get a running status or register dumps. The API is
+found in ``<sound/info.h>``.
+
+To create a proc file, call :c:func:`snd_card_proc_new()`.
+
+::
+
+ struct snd_info_entry *entry;
+ int err = snd_card_proc_new(card, "my-file", &entry);
+
+where the second argument specifies the name of the proc file to be
+created. The above example will create a file ``my-file`` under the
+card directory, e.g. ``/proc/asound/card0/my-file``.
+
+Like other components, the proc entry created via
+:c:func:`snd_card_proc_new()` will be registered and released
+automatically in the card registration and release functions.
+
+When the creation is successful, the function stores a new instance in
+the pointer given in the third argument. It is initialized as a text
+proc file for read only. To use this proc file as a read-only text file
+as it is, set the read callback with a private data via
+:c:func:`snd_info_set_text_ops()`.
+
+::
+
+ snd_info_set_text_ops(entry, chip, my_proc_read);
+
+where the second argument (``chip``) is the private data to be used in
+the callbacks. The third parameter specifies the read buffer size and
+the fourth (``my_proc_read``) is the callback function, which is
+defined like
+
+::
+
+ static void my_proc_read(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer);
+
+In the read callback, use :c:func:`snd_iprintf()` for output
+strings, which works just like normal :c:func:`printf()`. For
+example,
+
+::
+
+ static void my_proc_read(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+ {
+ struct my_chip *chip = entry->private_data;
+
+ snd_iprintf(buffer, "This is my chip!\n");
+ snd_iprintf(buffer, "Port = %ld\n", chip->port);
+ }
+
+The file permissions can be changed afterwards. As default, it's set as
+read only for all users. If you want to add write permission for the
+user (root as default), do as follows:
+
+::
+
+ entry->mode = S_IFREG | S_IRUGO | S_IWUSR;
+
+and set the write buffer size and the callback
+
+::
+
+ entry->c.text.write = my_proc_write;
+
+For the write callback, you can use :c:func:`snd_info_get_line()`
+to get a text line, and :c:func:`snd_info_get_str()` to retrieve
+a string from the line. Some examples are found in
+``core/oss/mixer_oss.c``, core/oss/and ``pcm_oss.c``.
+
+For a raw-data proc-file, set the attributes as follows:
+
+::
+
+ static struct snd_info_entry_ops my_file_io_ops = {
+ .read = my_file_io_read,
+ };
+
+ entry->content = SNDRV_INFO_CONTENT_DATA;
+ entry->private_data = chip;
+ entry->c.ops = &my_file_io_ops;
+ entry->size = 4096;
+ entry->mode = S_IFREG | S_IRUGO;
+
+For the raw data, ``size`` field must be set properly. This specifies
+the maximum size of the proc file access.
+
+The read/write callbacks of raw mode are more direct than the text mode.
+You need to use a low-level I/O functions such as
+:c:func:`copy_from/to_user()` to transfer the data.
+
+::
+
+ static ssize_t my_file_io_read(struct snd_info_entry *entry,
+ void *file_private_data,
+ struct file *file,
+ char *buf,
+ size_t count,
+ loff_t pos)
+ {
+ if (copy_to_user(buf, local_data + pos, count))
+ return -EFAULT;
+ return count;
+ }
+
+If the size of the info entry has been set up properly, ``count`` and
+``pos`` are guaranteed to fit within 0 and the given size. You don't
+have to check the range in the callbacks unless any other condition is
+required.
+
+Power Management
+================
+
+If the chip is supposed to work with suspend/resume functions, you need
+to add power-management code to the driver. The additional code for
+power-management should be ifdef-ed with ``CONFIG_PM``.
+
+If the driver *fully* supports suspend/resume that is, the device can be
+properly resumed to its state when suspend was called, you can set the
+``SNDRV_PCM_INFO_RESUME`` flag in the pcm info field. Usually, this is
+possible when the registers of the chip can be safely saved and restored
+to RAM. If this is set, the trigger callback is called with
+``SNDRV_PCM_TRIGGER_RESUME`` after the resume callback completes.
+
+Even if the driver doesn't support PM fully but partial suspend/resume
+is still possible, it's still worthy to implement suspend/resume
+callbacks. In such a case, applications would reset the status by
+calling :c:func:`snd_pcm_prepare()` and restart the stream
+appropriately. Hence, you can define suspend/resume callbacks below but
+don't set ``SNDRV_PCM_INFO_RESUME`` info flag to the PCM.
+
+Note that the trigger with SUSPEND can always be called when
+:c:func:`snd_pcm_suspend_all()` is called, regardless of the
+``SNDRV_PCM_INFO_RESUME`` flag. The ``RESUME`` flag affects only the
+behavior of :c:func:`snd_pcm_resume()`. (Thus, in theory,
+``SNDRV_PCM_TRIGGER_RESUME`` isn't needed to be handled in the trigger
+callback when no ``SNDRV_PCM_INFO_RESUME`` flag is set. But, it's better
+to keep it for compatibility reasons.)
+
+In the earlier version of ALSA drivers, a common power-management layer
+was provided, but it has been removed. The driver needs to define the
+suspend/resume hooks according to the bus the device is connected to. In
+the case of PCI drivers, the callbacks look like below:
+
+::
+
+ #ifdef CONFIG_PM
+ static int snd_my_suspend(struct pci_dev *pci, pm_message_t state)
+ {
+ .... /* do things for suspend */
+ return 0;
+ }
+ static int snd_my_resume(struct pci_dev *pci)
+ {
+ .... /* do things for suspend */
+ return 0;
+ }
+ #endif
+
+The scheme of the real suspend job is as follows.
+
+1. Retrieve the card and the chip data.
+
+2. Call :c:func:`snd_power_change_state()` with
+ ``SNDRV_CTL_POWER_D3hot`` to change the power status.
+
+3. Call :c:func:`snd_pcm_suspend_all()` to suspend the running
+ PCM streams.
+
+4. If AC97 codecs are used, call :c:func:`snd_ac97_suspend()` for
+ each codec.
+
+5. Save the register values if necessary.
+
+6. Stop the hardware if necessary.
+
+7. Disable the PCI device by calling
+ :c:func:`pci_disable_device()`. Then, call
+ :c:func:`pci_save_state()` at last.
+
+A typical code would be like:
+
+::
+
+ static int mychip_suspend(struct pci_dev *pci, pm_message_t state)
+ {
+ /* (1) */
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct mychip *chip = card->private_data;
+ /* (2) */
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+ /* (3) */
+ snd_pcm_suspend_all(chip->pcm);
+ /* (4) */
+ snd_ac97_suspend(chip->ac97);
+ /* (5) */
+ snd_mychip_save_registers(chip);
+ /* (6) */
+ snd_mychip_stop_hardware(chip);
+ /* (7) */
+ pci_disable_device(pci);
+ pci_save_state(pci);
+ return 0;
+ }
+
+
+The scheme of the real resume job is as follows.
+
+1. Retrieve the card and the chip data.
+
+2. Set up PCI. First, call :c:func:`pci_restore_state()`. Then
+ enable the pci device again by calling
+ :c:func:`pci_enable_device()`. Call
+ :c:func:`pci_set_master()` if necessary, too.
+
+3. Re-initialize the chip.
+
+4. Restore the saved registers if necessary.
+
+5. Resume the mixer, e.g. calling :c:func:`snd_ac97_resume()`.
+
+6. Restart the hardware (if any).
+
+7. Call :c:func:`snd_power_change_state()` with
+ ``SNDRV_CTL_POWER_D0`` to notify the processes.
+
+A typical code would be like:
+
+::
+
+ static int mychip_resume(struct pci_dev *pci)
+ {
+ /* (1) */
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct mychip *chip = card->private_data;
+ /* (2) */
+ pci_restore_state(pci);
+ pci_enable_device(pci);
+ pci_set_master(pci);
+ /* (3) */
+ snd_mychip_reinit_chip(chip);
+ /* (4) */
+ snd_mychip_restore_registers(chip);
+ /* (5) */
+ snd_ac97_resume(chip->ac97);
+ /* (6) */
+ snd_mychip_restart_chip(chip);
+ /* (7) */
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ return 0;
+ }
+
+As shown in the above, it's better to save registers after suspending
+the PCM operations via :c:func:`snd_pcm_suspend_all()` or
+:c:func:`snd_pcm_suspend()`. It means that the PCM streams are
+already stopped when the register snapshot is taken. But, remember that
+you don't have to restart the PCM stream in the resume callback. It'll
+be restarted via trigger call with ``SNDRV_PCM_TRIGGER_RESUME`` when
+necessary.
+
+OK, we have all callbacks now. Let's set them up. In the initialization
+of the card, make sure that you can get the chip data from the card
+instance, typically via ``private_data`` field, in case you created the
+chip data individually.
+
+::
+
+ static int snd_mychip_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+ {
+ ....
+ struct snd_card *card;
+ struct mychip *chip;
+ int err;
+ ....
+ err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
+ 0, &card);
+ ....
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ ....
+ card->private_data = chip;
+ ....
+ }
+
+When you created the chip data with :c:func:`snd_card_new()`, it's
+anyway accessible via ``private_data`` field.
+
+::
+
+ static int snd_mychip_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+ {
+ ....
+ struct snd_card *card;
+ struct mychip *chip;
+ int err;
+ ....
+ err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
+ sizeof(struct mychip), &card);
+ ....
+ chip = card->private_data;
+ ....
+ }
+
+If you need a space to save the registers, allocate the buffer for it
+here, too, since it would be fatal if you cannot allocate a memory in
+the suspend phase. The allocated buffer should be released in the
+corresponding destructor.
+
+And next, set suspend/resume callbacks to the pci_driver.
+
+::
+
+ static struct pci_driver driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = snd_my_ids,
+ .probe = snd_my_probe,
+ .remove = snd_my_remove,
+ #ifdef CONFIG_PM
+ .suspend = snd_my_suspend,
+ .resume = snd_my_resume,
+ #endif
+ };
+
+Module Parameters
+=================
+
+There are standard module options for ALSA. At least, each module should
+have the ``index``, ``id`` and ``enable`` options.
+
+If the module supports multiple cards (usually up to 8 = ``SNDRV_CARDS``
+cards), they should be arrays. The default initial values are defined
+already as constants for easier programming:
+
+::
+
+ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
+ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+
+If the module supports only a single card, they could be single
+variables, instead. ``enable`` option is not always necessary in this
+case, but it would be better to have a dummy option for compatibility.
+
+The module parameters must be declared with the standard
+``module_param()()``, ``module_param_array()()`` and
+:c:func:`MODULE_PARM_DESC()` macros.
+
+The typical coding would be like below:
+
+::
+
+ #define CARD_NAME "My Chip"
+
+ module_param_array(index, int, NULL, 0444);
+ MODULE_PARM_DESC(index, "Index value for " CARD_NAME " soundcard.");
+ module_param_array(id, charp, NULL, 0444);
+ MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard.");
+ module_param_array(enable, bool, NULL, 0444);
+ MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard.");
+
+Also, don't forget to define the module description, classes, license
+and devices. Especially, the recent modprobe requires to define the
+module license as GPL, etc., otherwise the system is shown as “taintedâ€.
+
+::
+
+ MODULE_DESCRIPTION("My Chip");
+ MODULE_LICENSE("GPL");
+ MODULE_SUPPORTED_DEVICE("{{Vendor,My Chip Name}}");
+
+
+How To Put Your Driver Into ALSA Tree
+=====================================
+
+General
+-------
+
+So far, you've learned how to write the driver codes. And you might have
+a question now: how to put my own driver into the ALSA driver tree? Here
+(finally :) the standard procedure is described briefly.
+
+Suppose that you create a new PCI driver for the card “xyzâ€. The card
+module name would be snd-xyz. The new driver is usually put into the
+alsa-driver tree, ``alsa-driver/pci`` directory in the case of PCI
+cards. Then the driver is evaluated, audited and tested by developers
+and users. After a certain time, the driver will go to the alsa-kernel
+tree (to the corresponding directory, such as ``alsa-kernel/pci``) and
+eventually will be integrated into the Linux 2.6 tree (the directory
+would be ``linux/sound/pci``).
+
+In the following sections, the driver code is supposed to be put into
+alsa-driver tree. The two cases are covered: a driver consisting of a
+single source file and one consisting of several source files.
+
+Driver with A Single Source File
+--------------------------------
+
+1. Modify alsa-driver/pci/Makefile
+
+ Suppose you have a file xyz.c. Add the following two lines
+
+::
+
+ snd-xyz-objs := xyz.o
+ obj-$(CONFIG_SND_XYZ) += snd-xyz.o
+
+2. Create the Kconfig entry
+
+ Add the new entry of Kconfig for your xyz driver. config SND_XYZ
+ tristate "Foobar XYZ" depends on SND select SND_PCM help Say Y here
+ to include support for Foobar XYZ soundcard. To compile this driver
+ as a module, choose M here: the module will be called snd-xyz. the
+ line, select SND_PCM, specifies that the driver xyz supports PCM. In
+ addition to SND_PCM, the following components are supported for
+ select command: SND_RAWMIDI, SND_TIMER, SND_HWDEP,
+ SND_MPU401_UART, SND_OPL3_LIB, SND_OPL4_LIB, SND_VX_LIB,
+ SND_AC97_CODEC. Add the select command for each supported
+ component.
+
+ Note that some selections imply the lowlevel selections. For example,
+ PCM includes TIMER, MPU401_UART includes RAWMIDI, AC97_CODEC
+ includes PCM, and OPL3_LIB includes HWDEP. You don't need to give
+ the lowlevel selections again.
+
+ For the details of Kconfig script, refer to the kbuild documentation.
+
+3. Run cvscompile script to re-generate the configure script and build
+ the whole stuff again.
+
+Drivers with Several Source Files
+---------------------------------
+
+Suppose that the driver snd-xyz have several source files. They are
+located in the new subdirectory, pci/xyz.
+
+1. Add a new directory (``xyz``) in ``alsa-driver/pci/Makefile`` as
+ below
+
+::
+
+ obj-$(CONFIG_SND) += xyz/
+
+
+2. Under the directory ``xyz``, create a Makefile
+
+::
+
+ ifndef SND_TOPDIR
+ SND_TOPDIR=../..
+ endif
+
+ include $(SND_TOPDIR)/toplevel.config
+ include $(SND_TOPDIR)/Makefile.conf
+
+ snd-xyz-objs := xyz.o abc.o def.o
+
+ obj-$(CONFIG_SND_XYZ) += snd-xyz.o
+
+ include $(SND_TOPDIR)/Rules.make
+
+3. Create the Kconfig entry
+
+ This procedure is as same as in the last section.
+
+4. Run cvscompile script to re-generate the configure script and build
+ the whole stuff again.
+
+Useful Functions
+================
+
+:c:func:`snd_printk()` and friends
+---------------------------------------
+
+ALSA provides a verbose version of the :c:func:`printk()` function.
+If a kernel config ``CONFIG_SND_VERBOSE_PRINTK`` is set, this function
+prints the given message together with the file name and the line of the
+caller. The ``KERN_XXX`` prefix is processed as well as the original
+:c:func:`printk()` does, so it's recommended to add this prefix,
+e.g. snd_printk(KERN_ERR "Oh my, sorry, it's extremely bad!\\n");
+
+There are also :c:func:`printk()`'s for debugging.
+:c:func:`snd_printd()` can be used for general debugging purposes.
+If ``CONFIG_SND_DEBUG`` is set, this function is compiled, and works
+just like :c:func:`snd_printk()`. If the ALSA is compiled without
+the debugging flag, it's ignored.
+
+:c:func:`snd_printdd()` is compiled in only when
+``CONFIG_SND_DEBUG_VERBOSE`` is set. Please note that
+``CONFIG_SND_DEBUG_VERBOSE`` is not set as default even if you configure
+the alsa-driver with ``--with-debug=full`` option. You need to give
+explicitly ``--with-debug=detect`` option instead.
+
+:c:func:`snd_BUG()`
+------------------------
+
+It shows the ``BUG?`` message and stack trace as well as
+:c:func:`snd_BUG_ON()` at the point. It's useful to show that a
+fatal error happens there.
+
+When no debug flag is set, this macro is ignored.
+
+:c:func:`snd_BUG_ON()`
+----------------------------
+
+:c:func:`snd_BUG_ON()` macro is similar with
+:c:func:`WARN_ON()` macro. For example, snd_BUG_ON(!pointer); or
+it can be used as the condition, if (snd_BUG_ON(non_zero_is_bug))
+return -EINVAL;
+
+The macro takes an conditional expression to evaluate. When
+``CONFIG_SND_DEBUG``, is set, if the expression is non-zero, it shows
+the warning message such as ``BUG? (xxx)`` normally followed by stack
+trace. In both cases it returns the evaluated value.
+
+Acknowledgments
+===============
+
+I would like to thank Phil Kerr for his help for improvement and
+corrections of this document.
+
+Kevin Conder reformatted the original plain-text to the DocBook format.
+
+Giuliano Pochini corrected typos and contributed the example codes in
+the hardware constraints section.
diff --git a/Documentation/sound/oss/oss-parameters.txt b/Documentation/sound/oss/oss-parameters.txt
index 3ab391e7c295..cc675f25eee4 100644
--- a/Documentation/sound/oss/oss-parameters.txt
+++ b/Documentation/sound/oss/oss-parameters.txt
@@ -1,7 +1,7 @@
OSS Kernel Parameters
~~~~~~~~~~~~~~~~~~~~~
-See Documentation/kernel-parameters.txt for general information on
+See Documentation/admin-guide/kernel-parameters.rst for general information on
specifying module parameters.
This document may not be entirely up to date and comprehensive. The command
diff --git a/Documentation/sound/alsa/soc/clocking.txt b/Documentation/sound/soc/clocking.rst
index b1300162e01c..32122d6877a3 100644
--- a/Documentation/sound/alsa/soc/clocking.txt
+++ b/Documentation/sound/soc/clocking.rst
@@ -1,3 +1,4 @@
+==============
Audio Clocking
==============
@@ -30,15 +31,9 @@ runs at exactly the sample rate (LRC = Rate).
Bit Clock can be generated as follows:-
-BCLK = MCLK / x
-
- or
-
-BCLK = LRC * x
-
- or
-
-BCLK = LRC * Channels * Word Size
+- BCLK = MCLK / x, or
+- BCLK = LRC * x, or
+- BCLK = LRC * Channels * Word Size
This relationship depends on the codec or SoC CPU in particular. In general
it is best to configure BCLK to the lowest possible speed (depending on your
diff --git a/Documentation/sound/soc/codec-to-codec.rst b/Documentation/sound/soc/codec-to-codec.rst
new file mode 100644
index 000000000000..810109d7500d
--- /dev/null
+++ b/Documentation/sound/soc/codec-to-codec.rst
@@ -0,0 +1,108 @@
+==============================================
+Creating codec to codec dai link for ALSA dapm
+==============================================
+
+Mostly the flow of audio is always from CPU to codec so your system
+will look as below:
+::
+
+ --------- ---------
+ | | dai | |
+ CPU -------> codec
+ | | | |
+ --------- ---------
+
+In case your system looks as below:
+::
+
+ ---------
+ | |
+ codec-2
+ | |
+ ---------
+ |
+ dai-2
+ |
+ ---------- ---------
+ | | dai-1 | |
+ CPU -------> codec-1
+ | | | |
+ ---------- ---------
+ |
+ dai-3
+ |
+ ---------
+ | |
+ codec-3
+ | |
+ ---------
+
+Suppose codec-2 is a bluetooth chip and codec-3 is connected to
+a speaker and you have a below scenario:
+codec-2 will receive the audio data and the user wants to play that
+audio through codec-3 without involving the CPU.This
+aforementioned case is the ideal case when codec to codec
+connection should be used.
+
+Your dai_link should appear as below in your machine
+file:
+::
+
+ /*
+ * this pcm stream only supports 24 bit, 2 channel and
+ * 48k sampling rate.
+ */
+ static const struct snd_soc_pcm_stream dsp_codec_params = {
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ };
+
+ {
+ .name = "CPU-DSP",
+ .stream_name = "CPU-DSP",
+ .cpu_dai_name = "samsung-i2s.0",
+ .codec_name = "codec-2,
+ .codec_dai_name = "codec-2-dai_name",
+ .platform_name = "samsung-i2s.0",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .ignore_suspend = 1,
+ .params = &dsp_codec_params,
+ },
+ {
+ .name = "DSP-CODEC",
+ .stream_name = "DSP-CODEC",
+ .cpu_dai_name = "wm0010-sdi2",
+ .codec_name = "codec-3,
+ .codec_dai_name = "codec-3-dai_name",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .ignore_suspend = 1,
+ .params = &dsp_codec_params,
+ },
+
+Above code snippet is motivated from sound/soc/samsung/speyside.c.
+
+Note the "params" callback which lets the dapm know that this
+dai_link is a codec to codec connection.
+
+In dapm core a route is created between cpu_dai playback widget
+and codec_dai capture widget for playback path and vice-versa is
+true for capture path. In order for this aforementioned route to get
+triggered, DAPM needs to find a valid endpoint which could be either
+a sink or source widget corresponding to playback and capture path
+respectively.
+
+In order to trigger this dai_link widget, a thin codec driver for
+the speaker amp can be created as demonstrated in wm8727.c file, it
+sets appropriate constraints for the device even if it needs no control.
+
+Make sure to name your corresponding cpu and codec playback and capture
+dai names ending with "Playback" and "Capture" respectively as dapm core
+will link and power those dais based on the name.
+
+Note that in current device tree there is no way to mark a dai_link
+as codec to codec. However, it may change in future.
diff --git a/Documentation/sound/alsa/soc/codec.txt b/Documentation/sound/soc/codec.rst
index db5f9c9ae149..f87612b94812 100644
--- a/Documentation/sound/alsa/soc/codec.txt
+++ b/Documentation/sound/soc/codec.rst
@@ -1,3 +1,4 @@
+=======================
ASoC Codec Class Driver
=======================
@@ -9,16 +10,16 @@ machine drivers respectively.
Each codec class driver *must* provide the following features:-
- 1) Codec DAI and PCM configuration
- 2) Codec control IO - using RegMap API
- 3) Mixers and audio controls
- 4) Codec audio operations
- 5) DAPM description.
- 6) DAPM event handler.
+1. Codec DAI and PCM configuration
+2. Codec control IO - using RegMap API
+3. Mixers and audio controls
+4. Codec audio operations
+5. DAPM description.
+6. DAPM event handler.
Optionally, codec drivers can also provide:-
- 7) DAC Digital mute control.
+7. DAC Digital mute control.
Its probably best to use this guide in conjunction with the existing codec
driver code in sound/soc/codecs/
@@ -26,24 +27,25 @@ driver code in sound/soc/codecs/
ASoC Codec driver breakdown
===========================
-1 - Codec DAI and PCM configuration
------------------------------------
+Codec DAI and PCM configuration
+-------------------------------
Each codec driver must have a struct snd_soc_dai_driver to define its DAI and
PCM capabilities and operations. This struct is exported so that it can be
registered with the core by your machine driver.
e.g.
+::
-static struct snd_soc_dai_ops wm8731_dai_ops = {
+ static struct snd_soc_dai_ops wm8731_dai_ops = {
.prepare = wm8731_pcm_prepare,
.hw_params = wm8731_hw_params,
.shutdown = wm8731_shutdown,
.digital_mute = wm8731_mute,
.set_sysclk = wm8731_set_dai_sysclk,
.set_fmt = wm8731_set_dai_fmt,
-};
-
-struct snd_soc_dai_driver wm8731_dai = {
+ };
+
+ struct snd_soc_dai_driver wm8731_dai = {
.name = "wm8731-hifi",
.playback = {
.stream_name = "Playback",
@@ -59,25 +61,27 @@ struct snd_soc_dai_driver wm8731_dai = {
.formats = WM8731_FORMATS,},
.ops = &wm8731_dai_ops,
.symmetric_rates = 1,
-};
+ };
-2 - Codec control IO
---------------------
+Codec control IO
+----------------
The codec can usually be controlled via an I2C or SPI style interface
(AC97 combines control with data in the DAI). The codec driver should use the
Regmap API for all codec IO. Please see include/linux/regmap.h and existing
codec drivers for example regmap usage.
-3 - Mixers and audio controls
------------------------------
+Mixers and audio controls
+-------------------------
All the codec mixers and audio controls can be defined using the convenience
macros defined in soc.h.
+::
#define SOC_SINGLE(xname, reg, shift, mask, invert)
Defines a single control as follows:-
+::
xname = Control name e.g. "Playback Volume"
reg = codec register
@@ -86,18 +90,22 @@ Defines a single control as follows:-
invert = the control is inverted
Other macros include:-
+::
#define SOC_DOUBLE(xname, reg, shift_left, shift_right, mask, invert)
A stereo control
+::
#define SOC_DOUBLE_R(xname, reg_left, reg_right, shift, mask, invert)
A stereo control spanning 2 registers
+::
#define SOC_ENUM_SINGLE(xreg, xshift, xmask, xtexts)
Defines an single enumerated control as follows:-
+::
xreg = register
xshift = control bit(s) offset in register
@@ -109,25 +117,26 @@ Defines an single enumerated control as follows:-
Defines a stereo enumerated control
-4 - Codec Audio Operations
---------------------------
+Codec Audio Operations
+----------------------
The codec driver also supports the following ALSA PCM operations:-
+::
-/* SoC audio ops */
-struct snd_soc_ops {
+ /* SoC audio ops */
+ struct snd_soc_ops {
int (*startup)(struct snd_pcm_substream *);
void (*shutdown)(struct snd_pcm_substream *);
int (*hw_params)(struct snd_pcm_substream *, struct snd_pcm_hw_params *);
int (*hw_free)(struct snd_pcm_substream *);
int (*prepare)(struct snd_pcm_substream *);
-};
+ };
Please refer to the ALSA driver PCM documentation for details.
http://www.alsa-project.org/~iwai/writing-an-alsa-driver/
-5 - DAPM description.
----------------------
+DAPM description
+----------------
The Dynamic Audio Power Management description describes the codec power
components and their relationships and registers to the ASoC core.
Please read dapm.txt for details of building the description.
@@ -135,13 +144,14 @@ Please read dapm.txt for details of building the description.
Please also see the examples in other codec drivers.
-6 - DAPM event handler
-----------------------
+DAPM event handler
+------------------
This function is a callback that handles codec domain PM calls and system
domain PM calls (e.g. suspend and resume). It is used to put the codec
to sleep when not in use.
Power states:-
+::
SNDRV_CTL_POWER_D0: /* full On */
/* vref/mid, clk and osc on, active */
@@ -155,8 +165,8 @@ Power states:-
SNDRV_CTL_POWER_D3cold: /* Everything Off, without power */
-7 - Codec DAC digital mute control
-----------------------------------
+Codec DAC digital mute control
+------------------------------
Most codecs have a digital mute before the DACs that can be used to
minimise any system noise. The mute stops any digital data from
entering the DAC.
@@ -165,9 +175,10 @@ A callback can be created that is called by the core for each codec DAI
when the mute is applied or freed.
i.e.
+::
-static int wm8974_mute(struct snd_soc_dai *dai, int mute)
-{
+ static int wm8974_mute(struct snd_soc_dai *dai, int mute)
+ {
struct snd_soc_codec *codec = dai->codec;
u16 mute_reg = snd_soc_read(codec, WM8974_DAC) & 0xffbf;
@@ -176,4 +187,4 @@ static int wm8974_mute(struct snd_soc_dai *dai, int mute)
else
snd_soc_write(codec, WM8974_DAC, mute_reg);
return 0;
-}
+ }
diff --git a/Documentation/sound/alsa/soc/DAI.txt b/Documentation/sound/soc/dai.rst
index c9679264c559..55820e51708f 100644
--- a/Documentation/sound/alsa/soc/DAI.txt
+++ b/Documentation/sound/soc/dai.rst
@@ -1,3 +1,7 @@
+==================================
+ASoC Digital Audio Interface (DAI)
+==================================
+
ASoC currently supports the three main Digital Audio Interfaces (DAI) found on
SoC controllers and portable audio CODECs today, namely AC97, I2S and PCM.
@@ -5,21 +9,21 @@ SoC controllers and portable audio CODECs today, namely AC97, I2S and PCM.
AC97
====
- AC97 is a five wire interface commonly found on many PC sound cards. It is
+AC97 is a five wire interface commonly found on many PC sound cards. It is
now also popular in many portable devices. This DAI has a reset line and time
multiplexes its data on its SDATA_OUT (playback) and SDATA_IN (capture) lines.
The bit clock (BCLK) is always driven by the CODEC (usually 12.288MHz) and the
frame (FRAME) (usually 48kHz) is always driven by the controller. Each AC97
frame is 21uS long and is divided into 13 time slots.
-The AC97 specification can be found at :-
+The AC97 specification can be found at :
http://www.intel.com/p/en_US/business/design
I2S
===
- I2S is a common 4 wire DAI used in HiFi, STB and portable devices. The Tx and
+I2S is a common 4 wire DAI used in HiFi, STB and portable devices. The Tx and
Rx lines are used for audio transmission, whilst the bit clock (BCLK) and
left/right clock (LRC) synchronise the link. I2S is flexible in that either the
controller or CODEC can drive (master) the BCLK and LRC clock lines. Bit clock
@@ -30,13 +34,15 @@ different sample rates.
I2S has several different operating modes:-
- o I2S - MSB is transmitted on the falling edge of the first BCLK after LRC
- transition.
+I2S
+ MSB is transmitted on the falling edge of the first BCLK after LRC
+ transition.
- o Left Justified - MSB is transmitted on transition of LRC.
+Left Justified
+ MSB is transmitted on transition of LRC.
- o Right Justified - MSB is transmitted sample size BCLKs before LRC
- transition.
+Right Justified
+ MSB is transmitted sample size BCLKs before LRC transition.
PCM
===
@@ -51,6 +57,8 @@ is sometimes referred to as network mode).
Common PCM operating modes:-
- o Mode A - MSB is transmitted on falling edge of first BCLK after FRAME/SYNC.
+Mode A
+ MSB is transmitted on falling edge of first BCLK after FRAME/SYNC.
- o Mode B - MSB is transmitted on rising edge of FRAME/SYNC.
+Mode B
+ MSB is transmitted on rising edge of FRAME/SYNC.
diff --git a/Documentation/sound/alsa/soc/dapm.txt b/Documentation/sound/soc/dapm.rst
index c45bd79f291e..a27f42befa4d 100644
--- a/Documentation/sound/alsa/soc/dapm.txt
+++ b/Documentation/sound/soc/dapm.rst
@@ -1,8 +1,9 @@
+===================================================
Dynamic Audio Power Management for Portable Devices
===================================================
-1. Description
-==============
+Description
+===========
Dynamic Audio Power Management (DAPM) is designed to allow portable
Linux devices to use the minimum amount of power within the audio
@@ -21,20 +22,28 @@ level power systems.
There are 4 power domains within DAPM
- 1. Codec bias domain - VREF, VMID (core codec and audio power)
+Codec bias domain
+ VREF, VMID (core codec and audio power)
+
Usually controlled at codec probe/remove and suspend/resume, although
can be set at stream time if power is not needed for sidetone, etc.
- 2. Platform/Machine domain - physically connected inputs and outputs
+Platform/Machine domain
+ physically connected inputs and outputs
+
Is platform/machine and user action specific, is configured by the
machine driver and responds to asynchronous events e.g when HP
are inserted
- 3. Path domain - audio subsystem signal paths
+Path domain
+ audio subsystem signal paths
+
Automatically set when mixer and mux settings are changed by the user.
e.g. alsamixer, amixer.
- 4. Stream domain - DACs and ADCs.
+Stream domain
+ DACs and ADCs.
+
Enabled and disabled when stream playback/capture is started and
stopped respectively. e.g. aplay, arecord.
@@ -45,34 +54,57 @@ internal codec components). All audio components that effect power are called
widgets hereafter.
-2. DAPM Widgets
-===============
+DAPM Widgets
+============
Audio DAPM widgets fall into a number of types:-
- o Mixer - Mixes several analog signals into a single analog signal.
- o Mux - An analog switch that outputs only one of many inputs.
- o PGA - A programmable gain amplifier or attenuation widget.
- o ADC - Analog to Digital Converter
- o DAC - Digital to Analog Converter
- o Switch - An analog switch
- o Input - A codec input pin
- o Output - A codec output pin
- o Headphone - Headphone (and optional Jack)
- o Mic - Mic (and optional Jack)
- o Line - Line Input/Output (and optional Jack)
- o Speaker - Speaker
- o Supply - Power or clock supply widget used by other widgets.
- o Regulator - External regulator that supplies power to audio components.
- o Clock - External clock that supplies clock to audio components.
- o AIF IN - Audio Interface Input (with TDM slot mask).
- o AIF OUT - Audio Interface Output (with TDM slot mask).
- o Siggen - Signal Generator.
- o DAI IN - Digital Audio Interface Input.
- o DAI OUT - Digital Audio Interface Output.
- o DAI Link - DAI Link between two DAI structures */
- o Pre - Special PRE widget (exec before all others)
- o Post - Special POST widget (exec after all others)
+Mixer
+ Mixes several analog signals into a single analog signal.
+Mux
+ An analog switch that outputs only one of many inputs.
+PGA
+ A programmable gain amplifier or attenuation widget.
+ADC
+ Analog to Digital Converter
+DAC
+ Digital to Analog Converter
+Switch
+ An analog switch
+Input
+ A codec input pin
+Output
+ A codec output pin
+Headphone
+ Headphone (and optional Jack)
+Mic
+ Mic (and optional Jack)
+Line
+ Line Input/Output (and optional Jack)
+Speaker
+ Speaker
+Supply
+ Power or clock supply widget used by other widgets.
+Regulator
+ External regulator that supplies power to audio components.
+Clock
+ External clock that supplies clock to audio components.
+AIF IN
+ Audio Interface Input (with TDM slot mask).
+AIF OUT
+ Audio Interface Output (with TDM slot mask).
+Siggen
+ Signal Generator.
+DAI IN
+ Digital Audio Interface Input.
+DAI OUT
+ Digital Audio Interface Output.
+DAI Link
+ DAI Link between two DAI structures
+Pre
+ Special PRE widget (exec before all others)
+Post
+ Special POST widget (exec after all others)
(Widgets are defined in include/sound/soc-dapm.h)
@@ -84,52 +116,57 @@ Most widgets have a name, register, shift and invert. Some widgets have extra
parameters for stream name and kcontrols.
-2.1 Stream Domain Widgets
--------------------------
+Stream Domain Widgets
+---------------------
Stream Widgets relate to the stream power domain and only consist of ADCs
(analog to digital converters), DACs (digital to analog converters),
AIF IN and AIF OUT.
Stream widgets have the following format:-
+::
-SND_SOC_DAPM_DAC(name, stream name, reg, shift, invert),
-SND_SOC_DAPM_AIF_IN(name, stream, slot, reg, shift, invert)
+ SND_SOC_DAPM_DAC(name, stream name, reg, shift, invert),
+ SND_SOC_DAPM_AIF_IN(name, stream, slot, reg, shift, invert)
NOTE: the stream name must match the corresponding stream name in your codec
snd_soc_codec_dai.
e.g. stream widgets for HiFi playback and capture
+::
-SND_SOC_DAPM_DAC("HiFi DAC", "HiFi Playback", REG, 3, 1),
-SND_SOC_DAPM_ADC("HiFi ADC", "HiFi Capture", REG, 2, 1),
+ SND_SOC_DAPM_DAC("HiFi DAC", "HiFi Playback", REG, 3, 1),
+ SND_SOC_DAPM_ADC("HiFi ADC", "HiFi Capture", REG, 2, 1),
e.g. stream widgets for AIF
+::
-SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
-2.2 Path Domain Widgets
------------------------
+Path Domain Widgets
+-------------------
Path domain widgets have a ability to control or affect the audio signal or
audio paths within the audio subsystem. They have the following form:-
+::
-SND_SOC_DAPM_PGA(name, reg, shift, invert, controls, num_controls)
+ SND_SOC_DAPM_PGA(name, reg, shift, invert, controls, num_controls)
Any widget kcontrols can be set using the controls and num_controls members.
e.g. Mixer widget (the kcontrols are declared first)
+::
-/* Output Mixer */
-static const snd_kcontrol_new_t wm8731_output_mixer_controls[] = {
-SOC_DAPM_SINGLE("Line Bypass Switch", WM8731_APANA, 3, 1, 0),
-SOC_DAPM_SINGLE("Mic Sidetone Switch", WM8731_APANA, 5, 1, 0),
-SOC_DAPM_SINGLE("HiFi Playback Switch", WM8731_APANA, 4, 1, 0),
-};
+ /* Output Mixer */
+ static const snd_kcontrol_new_t wm8731_output_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Line Bypass Switch", WM8731_APANA, 3, 1, 0),
+ SOC_DAPM_SINGLE("Mic Sidetone Switch", WM8731_APANA, 5, 1, 0),
+ SOC_DAPM_SINGLE("HiFi Playback Switch", WM8731_APANA, 4, 1, 0),
+ };
-SND_SOC_DAPM_MIXER("Output Mixer", WM8731_PWR, 4, 1, wm8731_output_mixer_controls,
+ SND_SOC_DAPM_MIXER("Output Mixer", WM8731_PWR, 4, 1, wm8731_output_mixer_controls,
ARRAY_SIZE(wm8731_output_mixer_controls)),
If you don't want the mixer elements prefixed with the name of the mixer widget,
@@ -137,48 +174,49 @@ you can use SND_SOC_DAPM_MIXER_NAMED_CTL instead. the parameters are the same
as for SND_SOC_DAPM_MIXER.
-2.3 Machine domain Widgets
---------------------------
+Machine domain Widgets
+----------------------
Machine widgets are different from codec widgets in that they don't have a
codec register bit associated with them. A machine widget is assigned to each
machine audio component (non codec or DSP) that can be independently
powered. e.g.
- o Speaker Amp
- o Microphone Bias
- o Jack connectors
+* Speaker Amp
+* Microphone Bias
+* Jack connectors
A machine widget can have an optional call back.
e.g. Jack connector widget for an external Mic that enables Mic Bias
-when the Mic is inserted:-
+when the Mic is inserted:-::
-static int spitz_mic_bias(struct snd_soc_dapm_widget* w, int event)
-{
+ static int spitz_mic_bias(struct snd_soc_dapm_widget* w, int event)
+ {
gpio_set_value(SPITZ_GPIO_MIC_BIAS, SND_SOC_DAPM_EVENT_ON(event));
return 0;
-}
+ }
-SND_SOC_DAPM_MIC("Mic Jack", spitz_mic_bias),
+ SND_SOC_DAPM_MIC("Mic Jack", spitz_mic_bias),
-2.4 Codec (BIAS) Domain
------------------------
+Codec (BIAS) Domain
+-------------------
The codec bias power domain has no widgets and is handled by the codecs DAPM
event handler. This handler is called when the codec powerstate is changed wrt
to any stream event or by kernel PM events.
-2.5 Virtual Widgets
--------------------
+Virtual Widgets
+---------------
Sometimes widgets exist in the codec or machine audio map that don't have any
corresponding soft power control. In this case it is necessary to create
a virtual widget - a widget with no control bits e.g.
+::
-SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_DAPM_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_DAPM_NOPM, 0, 0, NULL, 0),
This can be used to merge to signal paths together in software.
@@ -186,8 +224,8 @@ After all the widgets have been defined, they can then be added to the DAPM
subsystem individually with a call to snd_soc_dapm_new_control().
-3. Codec/DSP Widget Interconnections
-====================================
+Codec/DSP Widget Interconnections
+=================================
Widgets are connected to each other within the codec, platform and machine by
audio paths (called interconnections). Each interconnection must be defined in
@@ -201,13 +239,14 @@ e.g., from the WM8731 output mixer (wm8731.c)
The WM8731 output mixer has 3 inputs (sources)
- 1. Line Bypass Input
- 2. DAC (HiFi playback)
- 3. Mic Sidetone Input
+1. Line Bypass Input
+2. DAC (HiFi playback)
+3. Mic Sidetone Input
Each input in this example has a kcontrol associated with it (defined in example
above) and is connected to the output mixer via its kcontrol name. We can now
connect the destination widget (wrt audio signal) with its source widgets.
+::
/* output mixer */
{"Output Mixer", "Line Bypass Switch", "Line Input"},
@@ -216,22 +255,17 @@ connect the destination widget (wrt audio signal) with its source widgets.
So we have :-
- Destination Widget <=== Path Name <=== Source Widget
-
-Or:-
-
- Sink, Path, Source
-
-Or :-
-
- "Output Mixer" is connected to the "DAC" via the "HiFi Playback Switch".
+* Destination Widget <=== Path Name <=== Source Widget, or
+* Sink, Path, Source, or
+* ``Output Mixer`` is connected to the ``DAC`` via the ``HiFi Playback Switch``.
When there is no path name connecting widgets (e.g. a direct connection) we
pass NULL for the path name.
Interconnections are created with a call to:-
+::
-snd_soc_dapm_connect_input(codec, sink, path, source);
+ snd_soc_dapm_connect_input(codec, sink, path, source);
Finally, snd_soc_dapm_new_widgets(codec) must be called after all widgets and
interconnections have been registered with the core. This causes the core to
@@ -239,12 +273,13 @@ scan the codec and machine so that the internal DAPM state matches the
physical state of the machine.
-3.1 Machine Widget Interconnections
------------------------------------
+Machine Widget Interconnections
+-------------------------------
Machine widget interconnections are created in the same way as codec ones and
directly connect the codec pins to machine level widgets.
e.g. connects the speaker out codec pins to the internal speaker.
+::
/* ext speaker connected to codec pins LOUT2, ROUT2 */
{"Ext Spk", NULL , "ROUT2"},
@@ -254,52 +289,54 @@ This allows the DAPM to power on and off pins that are connected (and in use)
and pins that are NC respectively.
-4 Endpoint Widgets
-===================
+Endpoint Widgets
+================
An endpoint is a start or end point (widget) of an audio signal within the
machine and includes the codec. e.g.
- o Headphone Jack
- o Internal Speaker
- o Internal Mic
- o Mic Jack
- o Codec Pins
+* Headphone Jack
+* Internal Speaker
+* Internal Mic
+* Mic Jack
+* Codec Pins
Endpoints are added to the DAPM graph so that their usage can be determined in
order to save power. e.g. NC codecs pins will be switched OFF, unconnected
jacks can also be switched OFF.
-5 DAPM Widget Events
-====================
+DAPM Widget Events
+==================
Some widgets can register their interest with the DAPM core in PM events.
e.g. A Speaker with an amplifier registers a widget so the amplifier can be
powered only when the spk is in use.
+::
-/* turn speaker amplifier on/off depending on use */
-static int corgi_amp_event(struct snd_soc_dapm_widget *w, int event)
-{
+ /* turn speaker amplifier on/off depending on use */
+ static int corgi_amp_event(struct snd_soc_dapm_widget *w, int event)
+ {
gpio_set_value(CORGI_GPIO_APM_ON, SND_SOC_DAPM_EVENT_ON(event));
return 0;
-}
+ }
-/* corgi machine dapm widgets */
-static const struct snd_soc_dapm_widget wm8731_dapm_widgets =
+ /* corgi machine dapm widgets */
+ static const struct snd_soc_dapm_widget wm8731_dapm_widgets =
SND_SOC_DAPM_SPK("Ext Spk", corgi_amp_event);
Please see soc-dapm.h for all other widgets that support events.
-5.1 Event types
----------------
+Event types
+-----------
The following event types are supported by event widgets.
-
-/* dapm event types */
-#define SND_SOC_DAPM_PRE_PMU 0x1 /* before widget power up */
-#define SND_SOC_DAPM_POST_PMU 0x2 /* after widget power up */
-#define SND_SOC_DAPM_PRE_PMD 0x4 /* before widget power down */
-#define SND_SOC_DAPM_POST_PMD 0x8 /* after widget power down */
-#define SND_SOC_DAPM_PRE_REG 0x10 /* before audio path setup */
-#define SND_SOC_DAPM_POST_REG 0x20 /* after audio path setup */
+::
+
+ /* dapm event types */
+ #define SND_SOC_DAPM_PRE_PMU 0x1 /* before widget power up */
+ #define SND_SOC_DAPM_POST_PMU 0x2 /* after widget power up */
+ #define SND_SOC_DAPM_PRE_PMD 0x4 /* before widget power down */
+ #define SND_SOC_DAPM_POST_PMD 0x8 /* after widget power down */
+ #define SND_SOC_DAPM_PRE_REG 0x10 /* before audio path setup */
+ #define SND_SOC_DAPM_POST_REG 0x20 /* after audio path setup */
diff --git a/Documentation/sound/alsa/soc/DPCM.txt b/Documentation/sound/soc/dpcm.rst
index 0110180b7ac6..395e5a516282 100644
--- a/Documentation/sound/alsa/soc/DPCM.txt
+++ b/Documentation/sound/soc/dpcm.rst
@@ -1,8 +1,9 @@
+===========
Dynamic PCM
===========
-1. Description
-==============
+Description
+===========
Dynamic PCM allows an ALSA PCM device to digitally route its PCM audio to
various digital endpoints during the PCM stream runtime. e.g. PCM0 can route
@@ -23,22 +24,23 @@ Phone Audio System with SoC based DSP
Consider the following phone audio subsystem. This will be used in this
document for all examples :-
-
-| Front End PCMs | SoC DSP | Back End DAIs | Audio devices |
-
- *************
-PCM0 <------------> * * <----DAI0-----> Codec Headset
- * *
-PCM1 <------------> * * <----DAI1-----> Codec Speakers
- * DSP *
-PCM2 <------------> * * <----DAI2-----> MODEM
- * *
-PCM3 <------------> * * <----DAI3-----> BT
- * *
- * * <----DAI4-----> DMIC
- * *
- * * <----DAI5-----> FM
- *************
+::
+
+ | Front End PCMs | SoC DSP | Back End DAIs | Audio devices |
+
+ *************
+ PCM0 <------------> * * <----DAI0-----> Codec Headset
+ * *
+ PCM1 <------------> * * <----DAI1-----> Codec Speakers
+ * DSP *
+ PCM2 <------------> * * <----DAI2-----> MODEM
+ * *
+ PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
This diagram shows a simple smart phone audio subsystem. It supports Bluetooth,
FM digital radio, Speakers, Headset Jack, digital microphones and cellular
@@ -55,50 +57,52 @@ Audio is being played to the Headset. After a while the user removes the headset
and audio continues playing on the speakers.
Playback on PCM0 to Headset would look like :-
-
- *************
-PCM0 <============> * * <====DAI0=====> Codec Headset
- * *
-PCM1 <------------> * * <----DAI1-----> Codec Speakers
- * DSP *
-PCM2 <------------> * * <----DAI2-----> MODEM
- * *
-PCM3 <------------> * * <----DAI3-----> BT
- * *
- * * <----DAI4-----> DMIC
- * *
- * * <----DAI5-----> FM
- *************
+::
+
+ *************
+ PCM0 <============> * * <====DAI0=====> Codec Headset
+ * *
+ PCM1 <------------> * * <----DAI1-----> Codec Speakers
+ * DSP *
+ PCM2 <------------> * * <----DAI2-----> MODEM
+ * *
+ PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
The headset is removed from the jack by user so the speakers must now be used :-
-
- *************
-PCM0 <============> * * <----DAI0-----> Codec Headset
- * *
-PCM1 <------------> * * <====DAI1=====> Codec Speakers
- * DSP *
-PCM2 <------------> * * <----DAI2-----> MODEM
- * *
-PCM3 <------------> * * <----DAI3-----> BT
- * *
- * * <----DAI4-----> DMIC
- * *
- * * <----DAI5-----> FM
- *************
+::
+
+ *************
+ PCM0 <============> * * <----DAI0-----> Codec Headset
+ * *
+ PCM1 <------------> * * <====DAI1=====> Codec Speakers
+ * DSP *
+ PCM2 <------------> * * <----DAI2-----> MODEM
+ * *
+ PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
The audio driver processes this as follows :-
- 1) Machine driver receives Jack removal event.
+1. Machine driver receives Jack removal event.
- 2) Machine driver OR audio HAL disables the Headset path.
+2. Machine driver OR audio HAL disables the Headset path.
- 3) DPCM runs the PCM trigger(stop), hw_free(), shutdown() operations on DAI0
- for headset since the path is now disabled.
+3. DPCM runs the PCM trigger(stop), hw_free(), shutdown() operations on DAI0
+ for headset since the path is now disabled.
- 4) Machine driver or audio HAL enables the speaker path.
+4. Machine driver or audio HAL enables the speaker path.
- 5) DPCM runs the PCM ops for startup(), hw_params(), prepapre() and
- trigger(start) for DAI1 Speakers since the path is enabled.
+5. DPCM runs the PCM ops for startup(), hw_params(), prepapre() and
+ trigger(start) for DAI1 Speakers since the path is enabled.
In this example, the machine driver or userspace audio HAL can alter the routing
and then DPCM will take care of managing the DAI PCM operations to either bring
@@ -112,36 +116,38 @@ DPCM machine driver
The DPCM enabled ASoC machine driver is similar to normal machine drivers
except that we also have to :-
- 1) Define the FE and BE DAI links.
+1. Define the FE and BE DAI links.
- 2) Define any FE/BE PCM operations.
+2. Define any FE/BE PCM operations.
- 3) Define widget graph connections.
+3. Define widget graph connections.
-1 FE and BE DAI links
----------------------
+FE and BE DAI links
+-------------------
+::
-| Front End PCMs | SoC DSP | Back End DAIs | Audio devices |
-
- *************
-PCM0 <------------> * * <----DAI0-----> Codec Headset
- * *
-PCM1 <------------> * * <----DAI1-----> Codec Speakers
- * DSP *
-PCM2 <------------> * * <----DAI2-----> MODEM
- * *
-PCM3 <------------> * * <----DAI3-----> BT
- * *
- * * <----DAI4-----> DMIC
- * *
- * * <----DAI5-----> FM
- *************
+ | Front End PCMs | SoC DSP | Back End DAIs | Audio devices |
+
+ *************
+ PCM0 <------------> * * <----DAI0-----> Codec Headset
+ * *
+ PCM1 <------------> * * <----DAI1-----> Codec Speakers
+ * DSP *
+ PCM2 <------------> * * <----DAI2-----> MODEM
+ * *
+ PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
For the example above we have to define 4 FE DAI links and 6 BE DAI links. The
FE DAI links are defined as follows :-
+::
-static struct snd_soc_dai_link machine_dais[] = {
+ static struct snd_soc_dai_link machine_dais[] = {
{
.name = "PCM0 System",
.stream_name = "System Playback",
@@ -154,11 +160,11 @@ static struct snd_soc_dai_link machine_dais[] = {
.dpcm_playback = 1,
},
.....< other FE and BE DAI links here >
-};
+ };
This FE DAI link is pretty similar to a regular DAI link except that we also
-set the DAI link to a DPCM FE with the "dynamic = 1". The supported FE stream
-directions should also be set with the "dpcm_playback" and "dpcm_capture"
+set the DAI link to a DPCM FE with the ``dynamic = 1``. The supported FE stream
+directions should also be set with the ``dpcm_playback`` and ``dpcm_capture``
flags. There is also an option to specify the ordering of the trigger call for
each FE. This allows the ASoC core to trigger the DSP before or after the other
components (as some DSPs have strong requirements for the ordering DAI/DSP
@@ -168,8 +174,9 @@ The FE DAI above sets the codec and code DAIs to dummy devices since the BE is
dynamic and will change depending on runtime config.
The BE DAIs are configured as follows :-
+::
-static struct snd_soc_dai_link machine_dais[] = {
+ static struct snd_soc_dai_link machine_dais[] = {
.....< FE DAI links here >
{
.name = "Codec Headset",
@@ -186,29 +193,30 @@ static struct snd_soc_dai_link machine_dais[] = {
.dpcm_capture = 1,
},
.....< other BE DAI links here >
-};
+ };
This BE DAI link connects DAI0 to the codec (in this case RT5460 AIF1). It sets
-the "no_pcm" flag to mark it has a BE and sets flags for supported stream
-directions using "dpcm_playback" and "dpcm_capture" above.
+the ``no_pcm`` flag to mark it has a BE and sets flags for supported stream
+directions using ``dpcm_playback`` and ``dpcm_capture`` above.
The BE has also flags set for ignoring suspend and PM down time. This allows
the BE to work in a hostless mode where the host CPU is not transferring data
like a BT phone call :-
-
- *************
-PCM0 <------------> * * <----DAI0-----> Codec Headset
- * *
-PCM1 <------------> * * <----DAI1-----> Codec Speakers
- * DSP *
-PCM2 <------------> * * <====DAI2=====> MODEM
- * *
-PCM3 <------------> * * <====DAI3=====> BT
- * *
- * * <----DAI4-----> DMIC
- * *
- * * <----DAI5-----> FM
- *************
+::
+
+ *************
+ PCM0 <------------> * * <----DAI0-----> Codec Headset
+ * *
+ PCM1 <------------> * * <----DAI1-----> Codec Speakers
+ * DSP *
+ PCM2 <------------> * * <====DAI2=====> MODEM
+ * *
+ PCM3 <------------> * * <====DAI3=====> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
This allows the host CPU to sleep whilst the DSP, MODEM DAI and the BT DAI are
still in operation.
@@ -220,10 +228,10 @@ Likewise a BE DAI can also set a dummy cpu DAI if the CPU DAI is managed by the
DSP firmware.
-2 FE/BE PCM operations
-----------------------
+FE/BE PCM operations
+--------------------
-The BE above also exports some PCM operations and a "fixup" callback. The fixup
+The BE above also exports some PCM operations and a ``fixup`` callback. The fixup
callback is used by the machine driver to (re)configure the DAI based upon the
FE hw params. i.e. the DSP may perform SRC or ASRC from the FE to BE.
@@ -231,10 +239,11 @@ e.g. DSP converts all FE hw params to run at fixed rate of 48k, 16bit, stereo fo
DAI0. This means all FE hw_params have to be fixed in the machine driver for
DAI0 so that the DAI is running at desired configuration regardless of the FE
configuration.
+::
-static int dai0_fixup(struct snd_soc_pcm_runtime *rtd,
+ static int dai0_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
-{
+ {
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
@@ -249,21 +258,22 @@ static int dai0_fixup(struct snd_soc_pcm_runtime *rtd,
SNDRV_PCM_HW_PARAM_FIRST_MASK],
SNDRV_PCM_FORMAT_S16_LE);
return 0;
-}
+ }
The other PCM operation are the same as for regular DAI links. Use as necessary.
-3 Widget graph connections
---------------------------
+Widget graph connections
+------------------------
The BE DAI links will normally be connected to the graph at initialisation time
by the ASoC DAPM core. However, if the BE codec or BE DAI is a dummy then this
has to be set explicitly in the driver :-
+::
-/* BE for codec Headset - DAI0 is dummy and managed by DSP FW */
-{"DAI0 CODEC IN", NULL, "AIF1 Capture"},
-{"AIF1 Playback", NULL, "DAI0 CODEC OUT"},
+ /* BE for codec Headset - DAI0 is dummy and managed by DSP FW */
+ {"DAI0 CODEC IN", NULL, "AIF1 Capture"},
+ {"AIF1 Playback", NULL, "DAI0 CODEC OUT"},
Writing a DPCM DSP driver
@@ -273,24 +283,25 @@ The DPCM DSP driver looks much like a standard platform class ASoC driver
combined with elements from a codec class driver. A DSP platform driver must
implement :-
- 1) Front End PCM DAIs - i.e. struct snd_soc_dai_driver.
+1. Front End PCM DAIs - i.e. struct snd_soc_dai_driver.
- 2) DAPM graph showing DSP audio routing from FE DAIs to BEs.
+2. DAPM graph showing DSP audio routing from FE DAIs to BEs.
- 3) DAPM widgets from DSP graph.
+3. DAPM widgets from DSP graph.
- 4) Mixers for gains, routing, etc.
+4. Mixers for gains, routing, etc.
- 5) DMA configuration.
+5. DMA configuration.
- 6) BE AIF widgets.
+6. BE AIF widgets.
Items 6 is important for routing the audio outside of the DSP. AIF need to be
defined for each BE and each stream direction. e.g for BE DAI0 above we would
have :-
+::
-SND_SOC_DAPM_AIF_IN("DAI0 RX", NULL, 0, SND_SOC_NOPM, 0, 0),
-SND_SOC_DAPM_AIF_OUT("DAI0 TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DAI0 RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("DAI0 TX", NULL, 0, SND_SOC_NOPM, 0, 0),
The BE AIF are used to connect the DSP graph to the graphs for the other
component drivers (e.g. codec graph).
@@ -301,33 +312,33 @@ Hostless PCM streams
A hostless PCM stream is a stream that is not routed through the host CPU. An
example of this would be a phone call from handset to modem.
-
-
- *************
-PCM0 <------------> * * <----DAI0-----> Codec Headset
- * *
-PCM1 <------------> * * <====DAI1=====> Codec Speakers/Mic
- * DSP *
-PCM2 <------------> * * <====DAI2=====> MODEM
- * *
-PCM3 <------------> * * <----DAI3-----> BT
- * *
- * * <----DAI4-----> DMIC
- * *
- * * <----DAI5-----> FM
- *************
+::
+
+ *************
+ PCM0 <------------> * * <----DAI0-----> Codec Headset
+ * *
+ PCM1 <------------> * * <====DAI1=====> Codec Speakers/Mic
+ * DSP *
+ PCM2 <------------> * * <====DAI2=====> MODEM
+ * *
+ PCM3 <------------> * * <----DAI3-----> BT
+ * *
+ * * <----DAI4-----> DMIC
+ * *
+ * * <----DAI5-----> FM
+ *************
In this case the PCM data is routed via the DSP. The host CPU in this use case
is only used for control and can sleep during the runtime of the stream.
The host can control the hostless link either by :-
- 1) Configuring the link as a CODEC <-> CODEC style link. In this case the link
+ 1. Configuring the link as a CODEC <-> CODEC style link. In this case the link
is enabled or disabled by the state of the DAPM graph. This usually means
there is a mixer control that can be used to connect or disconnect the path
between both DAIs.
- 2) Hostless FE. This FE has a virtual connection to the BE DAI links on the DAPM
+ 2. Hostless FE. This FE has a virtual connection to the BE DAI links on the DAPM
graph. Control is then carried out by the FE as regular PCM operations.
This method gives more control over the DAI links, but requires much more
userspace code to control the link. Its recommended to use CODEC<->CODEC
@@ -339,16 +350,17 @@ CODEC <-> CODEC link
This DAI link is enabled when DAPM detects a valid path within the DAPM graph.
The machine driver sets some additional parameters to the DAI link i.e.
+::
-static const struct snd_soc_pcm_stream dai_params = {
+ static const struct snd_soc_pcm_stream dai_params = {
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rate_min = 8000,
.rate_max = 8000,
.channels_min = 2,
.channels_max = 2,
-};
+ };
-static struct snd_soc_dai_link dais[] = {
+ static struct snd_soc_dai_link dais[] = {
< ... more DAI links above ... >
{
.name = "MODEM",
diff --git a/Documentation/sound/soc/index.rst b/Documentation/sound/soc/index.rst
new file mode 100644
index 000000000000..e57df2dab2fd
--- /dev/null
+++ b/Documentation/sound/soc/index.rst
@@ -0,0 +1,20 @@
+==============
+ALSA SoC Layer
+==============
+
+The documentation is spilt into the following sections:-
+
+.. toctree::
+ :maxdepth: 2
+
+ overview
+ codec
+ dai
+ dapm
+ platform
+ machine
+ pops-clicks
+ clocking
+ jack
+ dpcm
+ codec-to-codec
diff --git a/Documentation/sound/alsa/soc/jack.txt b/Documentation/sound/soc/jack.rst
index fcf82a417293..644b99ecba35 100644
--- a/Documentation/sound/alsa/soc/jack.txt
+++ b/Documentation/sound/soc/jack.rst
@@ -1,3 +1,4 @@
+===================
ASoC jack detection
===================
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/soc/machine.rst
index 6bf2d2063b52..515c9444deaf 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/soc/machine.rst
@@ -1,3 +1,4 @@
+===================
ASoC Machine Driver
===================
@@ -9,9 +10,10 @@ interrupts, clocking, jacks and voltage regulators.
The machine driver can contain codec and platform specific code. It registers
the audio subsystem with the kernel as a platform device and is represented by
the following struct:-
+::
-/* SoC machine */
-struct snd_soc_card {
+ /* SoC machine */
+ struct snd_soc_card {
char *name;
...
@@ -33,7 +35,7 @@ struct snd_soc_card {
int num_links;
...
-};
+ };
probe()/remove()
----------------
@@ -55,9 +57,10 @@ initialisation e.g. the machine audio map can be connected to the codec audio
map, unconnected codec pins can be set as such.
struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
+::
-/* corgi digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link corgi_dai = {
+ /* corgi digital audio interface glue - connects codec <--> CPU */
+ static struct snd_soc_dai_link corgi_dai = {
.name = "WM8731",
.stream_name = "WM8731",
.cpu_dai_name = "pxa-is2-dai",
@@ -66,16 +69,17 @@ static struct snd_soc_dai_link corgi_dai = {
.codec_name = "wm8713-codec.0-001a",
.init = corgi_wm8731_init,
.ops = &corgi_ops,
-};
+ };
struct snd_soc_card then sets up the machine with its DAIs. e.g.
+::
-/* corgi audio machine driver */
-static struct snd_soc_card snd_soc_corgi = {
+ /* corgi audio machine driver */
+ static struct snd_soc_card snd_soc_corgi = {
.name = "Corgi",
.dai_link = &corgi_dai,
.num_links = 1,
-};
+ };
Machine Power Map
diff --git a/Documentation/sound/alsa/soc/overview.txt b/Documentation/sound/soc/overview.rst
index f3f28b7ae242..dc8370bbfff6 100644
--- a/Documentation/sound/alsa/soc/overview.txt
+++ b/Documentation/sound/soc/overview.rst
@@ -1,5 +1,6 @@
-ALSA SoC Layer
-==============
+=======================
+ALSA SoC Layer Overview
+=======================
The overall project goal of the ALSA System on Chip (ASoC) layer is to
provide better ALSA support for embedded system-on-chip processors (e.g.
@@ -66,30 +67,3 @@ multiple re-usable component drivers :-
describes and binds the other component drivers together to form an ALSA
"sound card device". It handles any machine specific controls and
machine level audio events (e.g. turning on an amp at start of playback).
-
-
-Documentation
-=============
-
-The documentation is spilt into the following sections:-
-
-overview.txt: This file.
-
-codec.txt: Codec driver internals.
-
-DAI.txt: Description of Digital Audio Interface standards and how to configure
-a DAI within your codec and CPU DAI drivers.
-
-dapm.txt: Dynamic Audio Power Management
-
-platform.txt: Platform audio DMA and DAI.
-
-machine.txt: Machine driver internals.
-
-pop_clicks.txt: How to minimise audio artifacts.
-
-clocking.txt: ASoC clocking for best power performance.
-
-jack.txt: ASoC jack detection.
-
-DPCM.txt: Dynamic PCM - Describes DPCM with DSP examples.
diff --git a/Documentation/sound/alsa/soc/platform.txt b/Documentation/sound/soc/platform.rst
index 3a08a2c9150c..d5574904d981 100644
--- a/Documentation/sound/alsa/soc/platform.txt
+++ b/Documentation/sound/soc/platform.rst
@@ -1,3 +1,4 @@
+====================
ASoC Platform Driver
====================
@@ -9,21 +10,23 @@ Audio DMA
=========
The platform DMA driver optionally supports the following ALSA operations:-
+::
-/* SoC audio ops */
-struct snd_soc_ops {
+ /* SoC audio ops */
+ struct snd_soc_ops {
int (*startup)(struct snd_pcm_substream *);
void (*shutdown)(struct snd_pcm_substream *);
int (*hw_params)(struct snd_pcm_substream *, struct snd_pcm_hw_params *);
int (*hw_free)(struct snd_pcm_substream *);
int (*prepare)(struct snd_pcm_substream *);
int (*trigger)(struct snd_pcm_substream *, int);
-};
+ };
The platform driver exports its DMA functionality via struct
snd_soc_platform_driver:-
+::
-struct snd_soc_platform_driver {
+ struct snd_soc_platform_driver {
char *name;
int (*probe)(struct platform_device *pdev);
@@ -44,7 +47,7 @@ struct snd_soc_platform_driver {
/* platform stream ops */
struct snd_pcm_ops *pcm_ops;
-};
+ };
Please refer to the ALSA driver documentation for details of audio DMA.
http://www.alsa-project.org/~iwai/writing-an-alsa-driver/
@@ -57,11 +60,11 @@ SoC DAI Drivers
Each SoC DAI driver must provide the following features:-
- 1) Digital audio interface (DAI) description
- 2) Digital audio interface configuration
- 3) PCM's description
- 4) SYSCLK configuration
- 5) Suspend and resume (optional)
+1. Digital audio interface (DAI) description
+2. Digital audio interface configuration
+3. PCM's description
+4. SYSCLK configuration
+5. Suspend and resume (optional)
Please see codec.txt for a description of items 1 - 4.
@@ -71,9 +74,9 @@ SoC DSP Drivers
Each SoC DSP driver usually supplies the following features :-
- 1) DAPM graph
- 2) Mixer controls
- 3) DMA IO to/from DSP buffers (if applicable)
- 4) Definition of DSP front end (FE) PCM devices.
+1. DAPM graph
+2. Mixer controls
+3. DMA IO to/from DSP buffers (if applicable)
+4. Definition of DSP front end (FE) PCM devices.
Please see DPCM.txt for a description of item 4.
diff --git a/Documentation/sound/alsa/soc/pops_clicks.txt b/Documentation/sound/soc/pops-clicks.rst
index e1e74daa4497..de7eb2a6604a 100644
--- a/Documentation/sound/alsa/soc/pops_clicks.txt
+++ b/Documentation/sound/soc/pops-clicks.rst
@@ -1,3 +1,4 @@
+=====================
Audio Pops and Clicks
=====================
@@ -20,10 +21,11 @@ currently, however future audio codec hardware will have better pop and click
suppression. Pops can be reduced within playback by powering the audio
components in a specific order. This order is different for startup and
shutdown and follows some basic rules:-
+::
- Startup Order :- DAC --> Mixers --> Output PGA --> Digital Unmute
-
- Shutdown Order :- Digital Mute --> Output PGA --> Mixers --> DAC
+ Startup Order :- DAC --> Mixers --> Output PGA --> Digital Unmute
+
+ Shutdown Order :- Digital Mute --> Output PGA --> Mixers --> DAC
This assumes that the codec PCM output path from the DAC is via a mixer and then
a PGA (programmable gain amplifier) before being output to the speakers.
@@ -36,10 +38,11 @@ Capture artifacts are somewhat easier to get rid as we can delay activating the
ADC until all the pops have occurred. This follows similar power rules to
playback in that components are powered in a sequence depending upon stream
startup or shutdown.
+::
- Startup Order - Input PGA --> Mixers --> ADC
-
- Shutdown Order - ADC --> Mixers --> Input PGA
+ Startup Order - Input PGA --> Mixers --> ADC
+
+ Shutdown Order - ADC --> Mixers --> Input PGA
Zipper Noise
diff --git a/Documentation/sphinx/kernel-doc.py b/Documentation/sphinx/kerneldoc.py
index d15e07f36881..d15e07f36881 100644
--- a/Documentation/sphinx/kernel-doc.py
+++ b/Documentation/sphinx/kerneldoc.py
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
index db0186a7618f..a958d8b5e99d 100755
--- a/Documentation/sphinx/parse-headers.pl
+++ b/Documentation/sphinx/parse-headers.pl
@@ -1,22 +1,22 @@
#!/usr/bin/perl
use strict;
use Text::Tabs;
+use Getopt::Long;
+use Pod::Usage;
-my $debug = 0;
+my $debug;
+my $help;
+my $man;
-while ($ARGV[0] =~ m/^-(.*)/) {
- my $cmd = shift @ARGV;
- if ($cmd eq "--debug") {
- require Data::Dumper;
- $debug = 1;
- next;
- }
- die "argument $cmd unknown";
-}
+GetOptions(
+ "debug" => \$debug,
+ 'usage|?' => \$help,
+ 'help' => \$man
+) or pod2usage(2);
-if (scalar @ARGV < 2 || scalar @ARGV > 3) {
- die "Usage:\n\t$0 <file in> <file out> [<exceptions file>]\n";
-}
+pod2usage(1) if $help;
+pod2usage(-exitstatus => 0, -verbose => 2) if $man;
+pod2usage(2) if (scalar @ARGV < 2 || scalar @ARGV > 3);
my ($file_in, $file_out, $file_exceptions) = @ARGV;
@@ -28,6 +28,8 @@ my %enums;
my %enum_symbols;
my %structs;
+require Data::Dumper if ($debug);
+
#
# read the file and get identifiers
#
@@ -330,3 +332,70 @@ print OUT "=" x length($title);
print OUT "\n\n.. parsed-literal::\n\n";
print OUT $data;
close OUT;
+
+__END__
+
+=head1 NAME
+
+parse_headers.pl - parse a C file, in order to identify functions, structs,
+enums and defines and create cross-references to a Sphinx book.
+
+=head1 SYNOPSIS
+
+B<parse_headers.pl> [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
+
+Where <options> can be: --debug, --help or --man.
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--debug>
+
+Put the script in verbose mode, useful for debugging.
+
+=item B<--usage>
+
+Prints a brief help message and exits.
+
+=item B<--help>
+
+Prints a more detailed help message and exits.
+
+=back
+
+=head1 DESCRIPTION
+
+Convert a C header or source file (C_FILE), into a ReStructured Text
+included via ..parsed-literal block with cross-references for the
+documentation files that describe the API. It accepts an optional
+EXCEPTIONS_FILE with describes what elements will be either ignored or
+be pointed to a non-default reference.
+
+The output is written at the (OUT_FILE).
+
+It is capable of identifying defines, functions, structs, typedefs,
+enums and enum symbols and create cross-references for all of them.
+It is also capable of distinguish #define used for specifying a Linux
+ioctl.
+
+The EXCEPTIONS_FILE contain two rules to allow ignoring a symbol or
+to replace the default references by a custom one.
+
+Please read Documentation/doc-guide/parse-headers.rst at the Kernel's
+tree for more details.
+
+=head1 BUGS
+
+Report bugs to Mauro Carvalho Chehab <mchehab@s-opensource.com>
+
+=head1 COPYRIGHT
+
+Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>.
+
+License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
+
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
+
+=cut
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt
index b63a68531afd..269681a6faec 100644
--- a/Documentation/sync_file.txt
+++ b/Documentation/sync_file.txt
@@ -6,7 +6,7 @@
This document serves as a guide for device drivers writers on what the
sync_file API is, and how drivers can support it. Sync file is the carrier of
-the fences(struct fence) that are needed to synchronize between drivers or
+the fences(struct dma_fence) that are needed to synchronize between drivers or
across process boundaries.
The sync_file API is meant to be used to send and receive fence information
@@ -32,9 +32,9 @@ in-fences and out-fences
Sync files can go either to or from userspace. When a sync_file is sent from
the driver to userspace we call the fences it contains 'out-fences'. They are
related to a buffer that the driver is processing or is going to process, so
-the driver creates an out-fence to be able to notify, through fence_signal(),
-when it has finished using (or processing) that buffer. Out-fences are fences
-that the driver creates.
+the driver creates an out-fence to be able to notify, through
+dma_fence_signal(), when it has finished using (or processing) that buffer.
+Out-fences are fences that the driver creates.
On the other hand if the driver receives fence(s) through a sync_file from
userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
@@ -47,7 +47,7 @@ Creating Sync Files
When a driver needs to send an out-fence userspace it creates a sync_file.
Interface:
- struct sync_file *sync_file_create(struct fence *fence);
+ struct sync_file *sync_file_create(struct dma_fence *fence);
The caller pass the out-fence and gets back the sync_file. That is just the
first step, next it needs to install an fd on sync_file->file. So it gets an
@@ -72,11 +72,11 @@ of the Sync File to the kernel. The kernel can then retrieve the fences
from it.
Interface:
- struct fence *sync_file_get_fence(int fd);
+ struct dma_fence *sync_file_get_fence(int fd);
The returned reference is owned by the caller and must be disposed of
-afterwards using fence_put(). In case of error, a NULL is returned instead.
+afterwards using dma_fence_put(). In case of error, a NULL is returned instead.
References:
[1] struct sync_file in include/linux/sync_file.h
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index ffab8b5caa60..a32b4b748644 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -40,7 +40,6 @@ show up in /proc/sys/kernel:
- hung_task_warnings
- kexec_load_disabled
- kptr_restrict
-- kstack_depth_to_print [ X86 only ]
- l2cr [ PPC only ]
- modprobe ==> Documentation/debugging-modules.txt
- modules_disabled
@@ -71,7 +70,7 @@ show up in /proc/sys/kernel:
- printk_ratelimit_burst
- pty ==> Documentation/filesystems/devpts.txt
- randomize_va_space
-- real-root-dev ==> Documentation/initrd.txt
+- real-root-dev ==> Documentation/admin-guide/initrd.rst
- reboot-cmd [ SPARC only ]
- rtsig-max
- rtsig-nr
@@ -395,13 +394,6 @@ When kptr_restrict is set to (2), kernel pointers printed using
==============================================================
-kstack_depth_to_print: (X86 only)
-
-Controls the number of words to print when dumping the raw
-kernel stack.
-
-==============================================================
-
l2cr: (PPC only)
This flag controls the L2 cache of G3 processor boards. If
@@ -453,7 +445,7 @@ in a KVM virtual machine. This default can be overridden by adding
nmi_watchdog=1
-to the guest kernel command line (see Documentation/kernel-parameters.txt).
+to the guest kernel command line (see Documentation/admin-guide/kernel-parameters.rst).
==============================================================
diff --git a/Documentation/sysfs-rules.txt b/Documentation/sysfs-rules.txt
deleted file mode 100644
index ce60ffa94d2d..000000000000
--- a/Documentation/sysfs-rules.txt
+++ /dev/null
@@ -1,184 +0,0 @@
-Rules on how to access information in the Linux kernel sysfs
-
-The kernel-exported sysfs exports internal kernel implementation details
-and depends on internal kernel structures and layout. It is agreed upon
-by the kernel developers that the Linux kernel does not provide a stable
-internal API. Therefore, there are aspects of the sysfs interface that
-may not be stable across kernel releases.
-
-To minimize the risk of breaking users of sysfs, which are in most cases
-low-level userspace applications, with a new kernel release, the users
-of sysfs must follow some rules to use an as-abstract-as-possible way to
-access this filesystem. The current udev and HAL programs already
-implement this and users are encouraged to plug, if possible, into the
-abstractions these programs provide instead of accessing sysfs directly.
-
-But if you really do want or need to access sysfs directly, please follow
-the following rules and then your programs should work with future
-versions of the sysfs interface.
-
-- Do not use libsysfs
- It makes assumptions about sysfs which are not true. Its API does not
- offer any abstraction, it exposes all the kernel driver-core
- implementation details in its own API. Therefore it is not better than
- reading directories and opening the files yourself.
- Also, it is not actively maintained, in the sense of reflecting the
- current kernel development. The goal of providing a stable interface
- to sysfs has failed; it causes more problems than it solves. It
- violates many of the rules in this document.
-
-- sysfs is always at /sys
- Parsing /proc/mounts is a waste of time. Other mount points are a
- system configuration bug you should not try to solve. For test cases,
- possibly support a SYSFS_PATH environment variable to overwrite the
- application's behavior, but never try to search for sysfs. Never try
- to mount it, if you are not an early boot script.
-
-- devices are only "devices"
- There is no such thing like class-, bus-, physical devices,
- interfaces, and such that you can rely on in userspace. Everything is
- just simply a "device". Class-, bus-, physical, ... types are just
- kernel implementation details which should not be expected by
- applications that look for devices in sysfs.
-
- The properties of a device are:
- o devpath (/devices/pci0000:00/0000:00:1d.1/usb2/2-2/2-2:1.0)
- - identical to the DEVPATH value in the event sent from the kernel
- at device creation and removal
- - the unique key to the device at that point in time
- - the kernel's path to the device directory without the leading
- /sys, and always starting with a slash
- - all elements of a devpath must be real directories. Symlinks
- pointing to /sys/devices must always be resolved to their real
- target and the target path must be used to access the device.
- That way the devpath to the device matches the devpath of the
- kernel used at event time.
- - using or exposing symlink values as elements in a devpath string
- is a bug in the application
-
- o kernel name (sda, tty, 0000:00:1f.2, ...)
- - a directory name, identical to the last element of the devpath
- - applications need to handle spaces and characters like '!' in
- the name
-
- o subsystem (block, tty, pci, ...)
- - simple string, never a path or a link
- - retrieved by reading the "subsystem"-link and using only the
- last element of the target path
-
- o driver (tg3, ata_piix, uhci_hcd)
- - a simple string, which may contain spaces, never a path or a
- link
- - it is retrieved by reading the "driver"-link and using only the
- last element of the target path
- - devices which do not have "driver"-link just do not have a
- driver; copying the driver value in a child device context is a
- bug in the application
-
- o attributes
- - the files in the device directory or files below subdirectories
- of the same device directory
- - accessing attributes reached by a symlink pointing to another device,
- like the "device"-link, is a bug in the application
-
- Everything else is just a kernel driver-core implementation detail
- that should not be assumed to be stable across kernel releases.
-
-- Properties of parent devices never belong into a child device.
- Always look at the parent devices themselves for determining device
- context properties. If the device 'eth0' or 'sda' does not have a
- "driver"-link, then this device does not have a driver. Its value is empty.
- Never copy any property of the parent-device into a child-device. Parent
- device properties may change dynamically without any notice to the
- child device.
-
-- Hierarchy in a single device tree
- There is only one valid place in sysfs where hierarchy can be examined
- and this is below: /sys/devices.
- It is planned that all device directories will end up in the tree
- below this directory.
-
-- Classification by subsystem
- There are currently three places for classification of devices:
- /sys/block, /sys/class and /sys/bus. It is planned that these will
- not contain any device directories themselves, but only flat lists of
- symlinks pointing to the unified /sys/devices tree.
- All three places have completely different rules on how to access
- device information. It is planned to merge all three
- classification directories into one place at /sys/subsystem,
- following the layout of the bus directories. All buses and
- classes, including the converted block subsystem, will show up
- there.
- The devices belonging to a subsystem will create a symlink in the
- "devices" directory at /sys/subsystem/<name>/devices.
-
- If /sys/subsystem exists, /sys/bus, /sys/class and /sys/block can be
- ignored. If it does not exist, you always have to scan all three
- places, as the kernel is free to move a subsystem from one place to
- the other, as long as the devices are still reachable by the same
- subsystem name.
-
- Assuming /sys/class/<subsystem> and /sys/bus/<subsystem>, or
- /sys/block and /sys/class/block are not interchangeable is a bug in
- the application.
-
-- Block
- The converted block subsystem at /sys/class/block or
- /sys/subsystem/block will contain the links for disks and partitions
- at the same level, never in a hierarchy. Assuming the block subsystem to
- contain only disks and not partition devices in the same flat list is
- a bug in the application.
-
-- "device"-link and <subsystem>:<kernel name>-links
- Never depend on the "device"-link. The "device"-link is a workaround
- for the old layout, where class devices are not created in
- /sys/devices/ like the bus devices. If the link-resolving of a
- device directory does not end in /sys/devices/, you can use the
- "device"-link to find the parent devices in /sys/devices/. That is the
- single valid use of the "device"-link; it must never appear in any
- path as an element. Assuming the existence of the "device"-link for
- a device in /sys/devices/ is a bug in the application.
- Accessing /sys/class/net/eth0/device is a bug in the application.
-
- Never depend on the class-specific links back to the /sys/class
- directory. These links are also a workaround for the design mistake
- that class devices are not created in /sys/devices. If a device
- directory does not contain directories for child devices, these links
- may be used to find the child devices in /sys/class. That is the single
- valid use of these links; they must never appear in any path as an
- element. Assuming the existence of these links for devices which are
- real child device directories in the /sys/devices tree is a bug in
- the application.
-
- It is planned to remove all these links when all class device
- directories live in /sys/devices.
-
-- Position of devices along device chain can change.
- Never depend on a specific parent device position in the devpath,
- or the chain of parent devices. The kernel is free to insert devices into
- the chain. You must always request the parent device you are looking for
- by its subsystem value. You need to walk up the chain until you find
- the device that matches the expected subsystem. Depending on a specific
- position of a parent device or exposing relative paths using "../" to
- access the chain of parents is a bug in the application.
-
-- When reading and writing sysfs device attribute files, avoid dependency
- on specific error codes wherever possible. This minimizes coupling to
- the error handling implementation within the kernel.
-
- In general, failures to read or write sysfs device attributes shall
- propagate errors wherever possible. Common errors include, but are not
- limited to:
-
- -EIO: The read or store operation is not supported, typically returned by
- the sysfs system itself if the read or store pointer is NULL.
-
- -ENXIO: The read or store operation failed
-
- Error codes will not be changed without good reason, and should a change
- to error codes result in user-space breakage, it will be fixed, or the
- the offending change will be reverted.
-
- Userspace applications can, however, expect the format and contents of
- the attribute files to remain consistent in the absence of a version
- attribute change in the context of a given attribute.
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
deleted file mode 100644
index 3a3b30ac2a75..000000000000
--- a/Documentation/sysrq.txt
+++ /dev/null
@@ -1,257 +0,0 @@
-Linux Magic System Request Key Hacks
-Documentation for sysrq.c
-
-* What is the magic SysRq key?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-It is a 'magical' key combo you can hit which the kernel will respond to
-regardless of whatever else it is doing, unless it is completely locked up.
-
-* How do I enable the magic SysRq key?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-You need to say "yes" to 'Magic SysRq key (CONFIG_MAGIC_SYSRQ)' when
-configuring the kernel. When running a kernel with SysRq compiled in,
-/proc/sys/kernel/sysrq controls the functions allowed to be invoked via
-the SysRq key. The default value in this file is set by the
-CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE config symbol, which itself defaults
-to 1. Here is the list of possible values in /proc/sys/kernel/sysrq:
- 0 - disable sysrq completely
- 1 - enable all functions of sysrq
- >1 - bitmask of allowed sysrq functions (see below for detailed function
- description):
- 2 = 0x2 - enable control of console logging level
- 4 = 0x4 - enable control of keyboard (SAK, unraw)
- 8 = 0x8 - enable debugging dumps of processes etc.
- 16 = 0x10 - enable sync command
- 32 = 0x20 - enable remount read-only
- 64 = 0x40 - enable signalling of processes (term, kill, oom-kill)
- 128 = 0x80 - allow reboot/poweroff
- 256 = 0x100 - allow nicing of all RT tasks
-
-You can set the value in the file by the following command:
- echo "number" >/proc/sys/kernel/sysrq
-
-The number may be written here either as decimal or as hexadecimal
-with the 0x prefix. CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE must always be
-written in hexadecimal.
-
-Note that the value of /proc/sys/kernel/sysrq influences only the invocation
-via a keyboard. Invocation of any operation via /proc/sysrq-trigger is always
-allowed (by a user with admin privileges).
-
-* How do I use the magic SysRq key?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-On x86 - You press the key combo 'ALT-SysRq-<command key>'. Note - Some
- keyboards may not have a key labeled 'SysRq'. The 'SysRq' key is
- also known as the 'Print Screen' key. Also some keyboards cannot
- handle so many keys being pressed at the same time, so you might
- have better luck with "press Alt", "press SysRq", "release SysRq",
- "press <command key>", release everything.
-
-On SPARC - You press 'ALT-STOP-<command key>', I believe.
-
-On the serial console (PC style standard serial ports only) -
- You send a BREAK, then within 5 seconds a command key. Sending
- BREAK twice is interpreted as a normal BREAK.
-
-On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
- Print Screen (or F13) - <command key> may suffice.
-
-On other - If you know of the key combos for other architectures, please
- let me know so I can add them to this section.
-
-On all - write a character to /proc/sysrq-trigger. e.g.:
-
- echo t > /proc/sysrq-trigger
-
-* What are the 'command' keys?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-'b' - Will immediately reboot the system without syncing or unmounting
- your disks.
-
-'c' - Will perform a system crash by a NULL pointer dereference.
- A crashdump will be taken if configured.
-
-'d' - Shows all locks that are held.
-
-'e' - Send a SIGTERM to all processes, except for init.
-
-'f' - Will call the oom killer to kill a memory hog process, but do not
- panic if nothing can be killed.
-
-'g' - Used by kgdb (kernel debugger)
-
-'h' - Will display help (actually any other key than those listed
- here will display help. but 'h' is easy to remember :-)
-
-'i' - Send a SIGKILL to all processes, except for init.
-
-'j' - Forcibly "Just thaw it" - filesystems frozen by the FIFREEZE ioctl.
-
-'k' - Secure Access Key (SAK) Kills all programs on the current virtual
- console. NOTE: See important comments below in SAK section.
-
-'l' - Shows a stack backtrace for all active CPUs.
-
-'m' - Will dump current memory info to your console.
-
-'n' - Used to make RT tasks nice-able
-
-'o' - Will shut your system off (if configured and supported).
-
-'p' - Will dump the current registers and flags to your console.
-
-'q' - Will dump per CPU lists of all armed hrtimers (but NOT regular
- timer_list timers) and detailed information about all
- clockevent devices.
-
-'r' - Turns off keyboard raw mode and sets it to XLATE.
-
-'s' - Will attempt to sync all mounted filesystems.
-
-'t' - Will dump a list of current tasks and their information to your
- console.
-
-'u' - Will attempt to remount all mounted filesystems read-only.
-
-'v' - Forcefully restores framebuffer console
-'v' - Causes ETM buffer dump [ARM-specific]
-
-'w' - Dumps tasks that are in uninterruptable (blocked) state.
-
-'x' - Used by xmon interface on ppc/powerpc platforms.
- Show global PMU Registers on sparc64.
- Dump all TLB entries on MIPS.
-
-'y' - Show global CPU Registers [SPARC-64 specific]
-
-'z' - Dump the ftrace buffer
-
-'0'-'9' - Sets the console log level, controlling which kernel messages
- will be printed to your console. ('0', for example would make
- it so that only emergency messages like PANICs or OOPSes would
- make it to your console.)
-
-* Okay, so what can I use them for?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Well, unraw(r) is very handy when your X server or a svgalib program crashes.
-
-sak(k) (Secure Access Key) is useful when you want to be sure there is no
-trojan program running at console which could grab your password
-when you would try to login. It will kill all programs on given console,
-thus letting you make sure that the login prompt you see is actually
-the one from init, not some trojan program.
-IMPORTANT: In its true form it is not a true SAK like the one in a :IMPORTANT
-IMPORTANT: c2 compliant system, and it should not be mistaken as :IMPORTANT
-IMPORTANT: such. :IMPORTANT
- It seems others find it useful as (System Attention Key) which is
-useful when you want to exit a program that will not let you switch consoles.
-(For example, X or a svgalib program.)
-
-reboot(b) is good when you're unable to shut down. But you should also
-sync(s) and umount(u) first.
-
-crash(c) can be used to manually trigger a crashdump when the system is hung.
-Note that this just triggers a crash if there is no dump mechanism available.
-
-sync(s) is great when your system is locked up, it allows you to sync your
-disks and will certainly lessen the chance of data loss and fscking. Note
-that the sync hasn't taken place until you see the "OK" and "Done" appear
-on the screen. (If the kernel is really in strife, you may not ever get the
-OK or Done message...)
-
-umount(u) is basically useful in the same ways as sync(s). I generally sync(s),
-umount(u), then reboot(b) when my system locks. It's saved me many a fsck.
-Again, the unmount (remount read-only) hasn't taken place until you see the
-"OK" and "Done" message appear on the screen.
-
-The loglevels '0'-'9' are useful when your console is being flooded with
-kernel messages you do not want to see. Selecting '0' will prevent all but
-the most urgent kernel messages from reaching your console. (They will
-still be logged if syslogd/klogd are alive, though.)
-
-term(e) and kill(i) are useful if you have some sort of runaway process you
-are unable to kill any other way, especially if it's spawning other
-processes.
-
-"just thaw it(j)" is useful if your system becomes unresponsive due to a frozen
-(probably root) filesystem via the FIFREEZE ioctl.
-
-* Sometimes SysRq seems to get 'stuck' after using it, what can I do?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-That happens to me, also. I've found that tapping shift, alt, and control
-on both sides of the keyboard, and hitting an invalid sysrq sequence again
-will fix the problem. (i.e., something like alt-sysrq-z). Switching to another
-virtual console (ALT+Fn) and then back again should also help.
-
-* I hit SysRq, but nothing seems to happen, what's wrong?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-There are some keyboards that produce a different keycode for SysRq than the
-pre-defined value of 99 (see KEY_SYSRQ in include/linux/input.h), or which
-don't have a SysRq key at all. In these cases, run 'showkey -s' to find an
-appropriate scancode sequence, and use 'setkeycodes <sequence> 99' to map
-this sequence to the usual SysRq code (e.g., 'setkeycodes e05b 99'). It's
-probably best to put this command in a boot script. Oh, and by the way, you
-exit 'showkey' by not typing anything for ten seconds.
-
-* I want to add SysRQ key events to a module, how does it work?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In order to register a basic function with the table, you must first include
-the header 'include/linux/sysrq.h', this will define everything else you need.
-Next, you must create a sysrq_key_op struct, and populate it with A) the key
-handler function you will use, B) a help_msg string, that will print when SysRQ
-prints help, and C) an action_msg string, that will print right before your
-handler is called. Your handler must conform to the prototype in 'sysrq.h'.
-
-After the sysrq_key_op is created, you can call the kernel function
-register_sysrq_key(int key, struct sysrq_key_op *op_p); this will
-register the operation pointed to by 'op_p' at table key 'key',
-if that slot in the table is blank. At module unload time, you must call
-the function unregister_sysrq_key(int key, struct sysrq_key_op *op_p), which
-will remove the key op pointed to by 'op_p' from the key 'key', if and only if
-it is currently registered in that slot. This is in case the slot has been
-overwritten since you registered it.
-
-The Magic SysRQ system works by registering key operations against a key op
-lookup table, which is defined in 'drivers/tty/sysrq.c'. This key table has
-a number of operations registered into it at compile time, but is mutable,
-and 2 functions are exported for interface to it:
- register_sysrq_key and unregister_sysrq_key.
-Of course, never ever leave an invalid pointer in the table. I.e., when
-your module that called register_sysrq_key() exits, it must call
-unregister_sysrq_key() to clean up the sysrq key table entry that it used.
-Null pointers in the table are always safe. :)
-
-If for some reason you feel the need to call the handle_sysrq function from
-within a function called by handle_sysrq, you must be aware that you are in
-a lock (you are also in an interrupt handler, which means don't sleep!), so
-you must call __handle_sysrq_nolock instead.
-
-* When I hit a SysRq key combination only the header appears on the console?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Sysrq output is subject to the same console loglevel control as all
-other console output. This means that if the kernel was booted 'quiet'
-as is common on distro kernels the output may not appear on the actual
-console, even though it will appear in the dmesg buffer, and be accessible
-via the dmesg command and to the consumers of /proc/kmsg. As a specific
-exception the header line from the sysrq command is passed to all console
-consumers as if the current loglevel was maximum. If only the header
-is emitted it is almost certain that the kernel loglevel is too low.
-Should you require the output on the console channel then you will need
-to temporarily up the console loglevel using alt-sysrq-8 or:
-
- echo 8 > /proc/sysrq-trigger
-
-Remember to return the loglevel to normal after triggering the sysrq
-command you are interested in.
-
-* I have more questions, who can I ask?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Just ask them on the linux-kernel mailing list:
- linux-kernel@vger.kernel.org
-
-* Credits
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Written by Mydraal <vulpyne@vulpyne.net>
-Updated by Adam Sulmicki <adam@cfar.umd.edu>
-Updated by Jeremy M. Dolan <jmd@turbogeek.org> 2001/01/28 10:15:59
-Added to by Crutcher Dunnavant <crutcher+kernel@datastacks.com>
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 185c39fea2a0..5596e2d71d6d 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -362,6 +362,26 @@ of ftrace. Here is a list of some of the key files:
to correlate events across hypervisor/guest if
tb_offset is known.
+ mono: This uses the fast monotonic clock (CLOCK_MONOTONIC)
+ which is monotonic and is subject to NTP rate adjustments.
+
+ mono_raw:
+ This is the raw monotonic clock (CLOCK_MONOTONIC_RAW)
+ which is montonic but is not subject to any rate adjustments
+ and ticks at the same rate as the hardware clocksource.
+
+ boot: This is the boot clock (CLOCK_BOOTTIME) and is based on the
+ fast monotonic clock, but also accounts for time spent in
+ suspend. Since the clock access is designed for use in
+ tracing in the suspend path, some side effects are possible
+ if clock is accessed after the suspend time is accounted before
+ the fast mono clock is updated. In this case, the clock update
+ appears to happen slightly sooner than it normally would have.
+ Also on 32-bit systems, it's possible that the 64-bit boot offset
+ sees a partial update. These effects are rare and post
+ processing should be able to handle them. See comments in the
+ ktime_get_boot_fast_ns() function for more information.
+
To set a clock, simply echo the clock name into this file.
echo global > trace_clock
diff --git a/Documentation/trace/intel_th.txt b/Documentation/trace/intel_th.txt
index f7fc5ba5df8d..f92070e7dde0 100644
--- a/Documentation/trace/intel_th.txt
+++ b/Documentation/trace/intel_th.txt
@@ -97,3 +97,25 @@ $ echo 0 > /sys/bus/intel_th/devices/0-msc0/active
# and now you can collect the trace from the device node:
$ cat /dev/intel_th0/msc0 > my_stp_trace
+
+Host Debugger Mode
+==================
+
+It is possible to configure the Trace Hub and control its trace
+capture from a remote debug host, which should be connected via one of
+the hardware debugging interfaces, which will then be used to both
+control Intel Trace Hub and transfer its trace data to the debug host.
+
+The driver needs to be told that such an arrangement is taking place
+so that it does not touch any capture/port configuration and avoids
+conflicting with the debug host's configuration accesses. The only
+activity that the driver will perform in this mode is collecting
+software traces to the Software Trace Hub (an stm class device). The
+user is still responsible for setting up adequate master/channel
+mappings that the decoder on the receiving end would recognize.
+
+In order to enable the host mode, set the 'host_mode' parameter of the
+'intel_th' kernel module to 'y'. None of the virtual output devices
+will show up on the intel_th bus. Also, trace configuration and
+capture controlling attribute groups of the 'gth' device will not be
+exposed. The 'sth' device will operate as usual.
diff --git a/Documentation/trace/stm.txt b/Documentation/trace/stm.txt
index ea035f9dbfd7..11cff47eecce 100644
--- a/Documentation/trace/stm.txt
+++ b/Documentation/trace/stm.txt
@@ -69,12 +69,43 @@ stm device's channel mmio region is 64 bytes and hardware page size is
width==64, you should be able to mmap() one page on this file
descriptor and obtain direct access to an mmio region for 64 channels.
+Examples of STM devices are Intel(R) Trace Hub [1] and Coresight STM
+[2].
+
+stm_source
+==========
+
For kernel-based trace sources, there is "stm_source" device
class. Devices of this class can be connected and disconnected to/from
-stm devices at runtime via a sysfs attribute.
+stm devices at runtime via a sysfs attribute called "stm_source_link"
+by writing the name of the desired stm device there, for example:
-Examples of STM devices are Intel(R) Trace Hub [1] and Coresight STM
-[2].
+$ echo dummy_stm.0 > /sys/class/stm_source/console/stm_source_link
+
+For examples on how to use stm_source interface in the kernel, refer
+to stm_console or stm_heartbeat drivers.
+
+Each stm_source device will need to assume a master and a range of
+channels, depending on how many channels it requires. These are
+allocated for the device according to the policy configuration. If
+there's a node in the root of the policy directory that matches the
+stm_source device's name (for example, "console"), this node will be
+used to allocate master and channel numbers. If there's no such policy
+node, the stm core will pick the first contiguous chunk of channels
+within the first available master. Note that the node must exist
+before the stm_source device is connected to its stm device.
+
+stm_console
+===========
+
+One implementation of this interface also used in the example above is
+the "stm_console" driver, which basically provides a one-way console
+for kernel messages over an stm device.
+
+To configure the master/channel pair that will be assigned to this
+console in the STP stream, create a "console" policy entry (see the
+beginning of this text on how to do that). When initialized, it will
+consume one channel.
[1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf
[2] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0444b/index.html
diff --git a/Documentation/trace/uprobetracer.txt b/Documentation/trace/uprobetracer.txt
index 94b6b4581763..fa7b680ee8a0 100644
--- a/Documentation/trace/uprobetracer.txt
+++ b/Documentation/trace/uprobetracer.txt
@@ -76,15 +76,15 @@ Usage examples
* Add a probe as a new uprobe event, write a new definition to uprobe_events
as below: (sets a uprobe at an offset of 0x4245c0 in the executable /bin/bash)
- echo 'p: /bin/bash:0x4245c0' > /sys/kernel/debug/tracing/uprobe_events
+ echo 'p /bin/bash:0x4245c0' > /sys/kernel/debug/tracing/uprobe_events
* Add a probe as a new uretprobe event:
- echo 'r: /bin/bash:0x4245c0' > /sys/kernel/debug/tracing/uprobe_events
+ echo 'r /bin/bash:0x4245c0' > /sys/kernel/debug/tracing/uprobe_events
* Unset registered event:
- echo '-:bash_0x4245c0' >> /sys/kernel/debug/tracing/uprobe_events
+ echo '-:p_bash_0x4245c0' >> /sys/kernel/debug/tracing/uprobe_events
* Print out the events that are registered:
diff --git a/Documentation/ja_JP/HOWTO b/Documentation/translations/ja_JP/HOWTO
index 581c14bdd7be..b03fc8047f03 100644
--- a/Documentation/ja_JP/HOWTO
+++ b/Documentation/translations/ja_JP/HOWTO
@@ -127,15 +127,15 @@ linux-api@ver.kernel.org ã«é€ã‚‹ã“ã¨ã‚’勧ã‚ã¾ã™ã€‚
å°é™ã®ãƒ¬ãƒ™ãƒ«ã§å¿…è¦ãªæ•°ã€…ã®ã‚½ãƒ•ãƒˆã‚¦ã‚§ã‚¢ãƒ‘ッケージã®ä¸€è¦§ã‚’示ã—ã¦ã„
ã¾ã™ã€‚
- Documentation/CodingStyle
+ Documentation/process/coding-style.rst
ã“れ㯠Linux カーãƒãƒ«ã®ã‚³ãƒ¼ãƒ‡ã‚£ãƒ³ã‚°ã‚¹ã‚¿ã‚¤ãƒ«ã¨èƒŒæ™¯ã«ã‚ã‚‹ç†ç”±ã‚’記述
ã—ã¦ã„ã¾ã™ã€‚å…¨ã¦ã®æ–°ã—ã„コードã¯ã“ã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã«ã‚るガイドライン
ã«å¾“ã£ã¦ã„ã‚‹ã“ã¨ã‚’期待ã•ã‚Œã¦ã„ã¾ã™ã€‚大部分ã®ãƒ¡ãƒ³ãƒ†ãƒŠã¯ã“れらã®ãƒ«ãƒ¼
ルã«å¾“ã£ã¦ã„ã‚‹ã‚‚ã®ã ã‘ã‚’å—ã‘付ã‘ã€å¤šãã®äººã¯æ­£ã—ã„スタイルã®ã‚³ãƒ¼ãƒ‰
ã ã‘をレビューã—ã¾ã™ã€‚
- Documentation/SubmittingPatches
- Documentation/SubmittingDrivers
+ Documentation/process/submitting-patches.rst
+ Documentation/process/submitting-drivers.rst
ã“れらã®ãƒ•ã‚¡ã‚¤ãƒ«ã«ã¯ã€ã©ã†ã‚„ã£ã¦ã†ã¾ãパッãƒã‚’作ã£ã¦æŠ•ç¨¿ã™ã‚‹ã‹ã«
ã¤ã„ã¦éžå¸¸ã«è©³ã—ã書ã‹ã‚Œã¦ãŠã‚Šã€ä»¥ä¸‹ã‚’å«ã¿ã¾ã™(ã“ã‚Œã ã‘ã«é™ã‚‰ãªã„
ã‘ã‚Œã©ã‚‚)
@@ -153,7 +153,7 @@ linux-api@ver.kernel.org ã«é€ã‚‹ã“ã¨ã‚’勧ã‚ã¾ã™ã€‚
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
- Documentation/stable_api_nonsense.txt
+ Documentation/process/stable-api-nonsense.rst
ã“ã®ãƒ•ã‚¡ã‚¤ãƒ«ã¯ã‚«ãƒ¼ãƒãƒ«ã®ä¸­ã«ä¸å¤‰ã®APIã‚’æŒãŸãªã„ã“ã¨ã«ã—ãŸæ„識的ãª
決断ã®èƒŒæ™¯ã«ã‚ã‚‹ç†ç”±ã«ã¤ã„ã¦æ›¸ã‹ã‚Œã¦ã„ã¾ã™ã€‚以下ã®ã‚ˆã†ãªã“ã¨ã‚’å«
ã‚“ã§ã„ã¾ã™-
@@ -164,29 +164,29 @@ linux-api@ver.kernel.org ã«é€ã‚‹ã“ã¨ã‚’勧ã‚ã¾ã™ã€‚
ã“ã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã¯ Linux 開発ã®æ€æƒ³ã‚’ç†è§£ã™ã‚‹ã®ã«éžå¸¸ã«é‡è¦ã§ã™ã€‚
ãã—ã¦ã€ä»–ã®OSã§ã®é–‹ç™ºè€…㌠Linux ã«ç§»ã‚‹æ™‚ã«ã¨ã¦ã‚‚é‡è¦ã§ã™ã€‚
- Documentation/SecurityBugs
+ Documentation/admin-guide/security-bugs.rst
ã‚‚ã— Linux カーãƒãƒ«ã§ã‚»ã‚­ãƒ¥ãƒªãƒ†ã‚£å•é¡Œã‚’発見ã—ãŸã‚ˆã†ã«æ€ã£ãŸã‚‰ã€ã“
ã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã®ã‚¹ãƒ†ãƒƒãƒ—ã«å¾“ã£ã¦ã‚«ãƒ¼ãƒãƒ«é–‹ç™ºè€…ã«é€£çµ¡ã—ã€å•é¡Œè§£æ±ºã‚’
支æ´ã—ã¦ãã ã•ã„。
- Documentation/ManagementStyle
+ Documentation/process/management-style.rst
ã“ã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã¯ Linux カーãƒãƒ«ã®ãƒ¡ãƒ³ãƒ†ãƒŠé”ãŒã©ã†è¡Œå‹•ã™ã‚‹ã‹ã€
彼らã®æ‰‹æ³•ã®èƒŒæ™¯ã«ã‚る共有ã•ã‚Œã¦ã„る精神ã«ã¤ã„ã¦è¨˜è¿°ã—ã¦ã„ã¾ã™ã€‚ã“
ã‚Œã¯ã‚«ãƒ¼ãƒãƒ«é–‹ç™ºã®åˆå¿ƒè€…ãªã‚‰ï¼ˆã‚‚ã—ãã¯ã€å˜ã«èˆˆå‘³ãŒã‚ã‚‹ã ã‘ã®äººã§ã‚‚)
é‡è¦ã§ã™ã€‚ãªãœãªã‚‰ã“ã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã¯ã€ã‚«ãƒ¼ãƒãƒ«ãƒ¡ãƒ³ãƒ†ãƒŠé”ã®ç‹¬ç‰¹ãª
行動ã«ã¤ã„ã¦ã®å¤šãã®èª¤è§£ã‚„混乱を解消ã™ã‚‹ã‹ã‚‰ã§ã™ã€‚
- Documentation/stable_kernel_rules.txt
+ Documentation/process/stable-kernel-rules.rst
ã“ã®ãƒ•ã‚¡ã‚¤ãƒ«ã¯ã©ã®ã‚ˆã†ã« stable カーãƒãƒ«ã®ãƒªãƒªãƒ¼ã‚¹ãŒè¡Œã‚れるã‹ã®ãƒ«ãƒ¼
ルãŒè¨˜è¿°ã•ã‚Œã¦ã„ã¾ã™ã€‚ãã—ã¦ã“れらã®ãƒªãƒªãƒ¼ã‚¹ã®ä¸­ã®ã©ã“ã‹ã§å¤‰æ›´ã‚’å–
り入れã¦ã‚‚らã„ãŸã„å ´åˆã«ä½•ã‚’ã™ã‚Œã°è‰¯ã„ã‹ãŒç¤ºã•ã‚Œã¦ã„ã¾ã™ã€‚
- Documentation/kernel-docs.txt
+ Documentation/process/kernel-docs.rst
  カーãƒãƒ«é–‹ç™ºã«ä»˜éšã™ã‚‹å¤–部ドキュメントã®ãƒªã‚¹ãƒˆã§ã™ã€‚ã‚‚ã—ã‚ãªãŸãŒ
探ã—ã¦ã„ã‚‹ã‚‚ã®ãŒã‚«ãƒ¼ãƒãƒ«å†…ã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã§ã¿ã¤ã‹ã‚‰ãªã‹ã£ãŸå ´åˆã€
ã“ã®ãƒªã‚¹ãƒˆã‚’ã‚ãŸã£ã¦ã¿ã¦ãã ã•ã„。
- Documentation/applying-patches.txt
+ Documentation/process/applying-patches.rst
パッãƒã¨ã¯ãªã«ã‹ã€ãƒ‘ッãƒã‚’ã©ã†ã‚„ã£ã¦æ§˜ã€…ãªã‚«ãƒ¼ãƒãƒ«ã®é–‹ç™ºãƒ–ランãƒã«
é©ç”¨ã™ã‚‹ã®ã‹ã«ã¤ã„ã¦æ­£ç¢ºã«è¨˜è¿°ã—ãŸè‰¯ã„入門書ã§ã™ã€‚
@@ -314,7 +314,7 @@ Andrew Morton ㌠Linux-kernel メーリングリストã«ã‚«ãƒ¼ãƒãƒ«ãƒªãƒªãƒ¼ã
ãŸå•é¡ŒãŒãªã‘ã‚Œã°ã‚‚ã†å°‘ã—é•·ããªã‚‹ã“ã¨ã‚‚ã‚ã‚Šã¾ã™ã€‚セキュリティ関連ã®å•é¡Œ
ã®å ´åˆã¯ã“ã‚Œã«å¯¾ã—ã¦ã ã„ãŸã„ã®å ´åˆã€ã™ãã«ãƒªãƒªãƒ¼ã‚¹ãŒã•ã‚Œã¾ã™ã€‚
-カーãƒãƒ«ãƒ„リーã«å…¥ã£ã¦ã„ã‚‹ã€Documentation/stable_kernel_rules.txt ファ
+カーãƒãƒ«ãƒ„リーã«å…¥ã£ã¦ã„ã‚‹ã€Documentation/process/stable-kernel-rules.rst ファ
イルã«ã¯ã©ã®ã‚ˆã†ãªç¨®é¡žã®å¤‰æ›´ãŒ -stable ツリーã«å—ã‘入れå¯èƒ½ã‹ã€ã¾ãŸãƒª
リースプロセスãŒã©ã†å‹•ãã‹ãŒè¨˜è¿°ã•ã‚Œã¦ã„ã¾ã™ã€‚
@@ -372,7 +372,7 @@ bugzilla.kernel.org 㯠Linux カーãƒãƒ«é–‹ç™ºè€…ãŒã‚«ãƒ¼ãƒãƒ«ã®ãƒã‚°ã‚’è¿
場所ã§ã™ã€‚ユーザã¯è¦‹ã¤ã‘ãŸãƒã‚°ã®å…¨ã¦ã‚’ã“ã®ãƒ„ールã§å ±å‘Šã™ã¹ãã§ã™ã€‚
ã©ã† kernel bugzilla を使ã†ã‹ã®è©³ç´°ã¯ã€ä»¥ä¸‹ã‚’å‚ç…§ã—ã¦ãã ã•ã„-
http://bugzilla.kernel.org/page.cgi?id=faq.html
-メインカーãƒãƒ«ã‚½ãƒ¼ã‚¹ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã«ã‚るファイル REPORTING-BUGS ã¯ã‚«ãƒ¼ãƒ
+メインカーãƒãƒ«ã‚½ãƒ¼ã‚¹ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã«ã‚るファイル admin-guide/reporting-bugs.rst ã¯ã‚«ãƒ¼ãƒ
ルãƒã‚°ã‚‰ã—ã„ã‚‚ã®ã«ã¤ã„ã¦ã©ã†ãƒ¬ãƒãƒ¼ãƒˆã™ã‚‹ã‹ã®è‰¯ã„テンプレートã§ã‚ã‚Šã€å•
é¡Œã®è¿½è·¡ã‚’助ã‘ã‚‹ãŸã‚ã«ã‚«ãƒ¼ãƒãƒ«é–‹ç™ºè€…ã«ã¨ã£ã¦ã©ã‚“ãªæƒ…å ±ãŒå¿…è¦ãªã®ã‹ã®è©³
ç´°ãŒæ›¸ã‹ã‚Œã¦ã„ã¾ã™ã€‚
@@ -438,7 +438,7 @@ MAINTAINERS ファイルã«ãƒªã‚¹ãƒˆãŒã‚ã‚Šã¾ã™ã®ã§å‚ç…§ã—ã¦ãã ã•ã
メールã®å…ˆé ­ã§ãªãã€å„引用行ã®é–“ã«ã‚ãªãŸã®è¨€ã„ãŸã„ã“ã¨ã‚’追加ã™ã‚‹ã¹ãã§
ã™ã€‚
-ã‚‚ã—パッãƒã‚’メールã«ä»˜ã‘ã‚‹å ´åˆã¯ã€Documentation/SubmittingPatches ã«æ
+ã‚‚ã—パッãƒã‚’メールã«ä»˜ã‘ã‚‹å ´åˆã¯ã€Documentation/process/submitting-patches.rst ã«æ
示ã•ã‚Œã¦ã„るよã†ã«ã€ãれ㯠プレーンãªå¯èª­ãƒ†ã‚­ã‚¹ãƒˆã«ã™ã‚‹ã“ã¨ã‚’忘れãªã„
よã†ã«ã—ã¾ã—ょã†ã€‚カーãƒãƒ«é–‹ç™ºè€…㯠添付や圧縮ã—ãŸãƒ‘ッãƒã‚’扱ã„ãŸãŒã‚Šã¾
ã›ã‚“-
diff --git a/Documentation/ja_JP/SubmitChecklist b/Documentation/translations/ja_JP/SubmitChecklist
index cb5507b1ac81..60c7c35ac517 100644
--- a/Documentation/ja_JP/SubmitChecklist
+++ b/Documentation/translations/ja_JP/SubmitChecklist
@@ -1,5 +1,5 @@
NOTE:
-This is a version of Documentation/SubmitChecklist into Japanese.
+This is a version of Documentation/process/submit-checklist.rst into Japanese.
This document is maintained by Takenori Nagano <t-nagano@ah.jp.nec.com>
and the JF Project team <http://www.linux.or.jp/JF/>.
If you find any difference between this document and the original file
@@ -14,7 +14,7 @@ to update the original English file first.
Last Updated: 2008/07/14
==================================
ã“ã‚Œã¯ã€
-linux-2.6.26/Documentation/SubmitChecklist ã®å’Œè¨³ã§ã™ã€‚
+linux-2.6.26/Documentation/process/submit-checklist.rst ã®å’Œè¨³ã§ã™ã€‚
翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
翻訳日: 2008/07/14
@@ -27,7 +27,7 @@ Linux カーãƒãƒ«ãƒ‘ッãƒæŠ•ç¨¿è€…å‘ã‘ãƒã‚§ãƒƒã‚¯ãƒªã‚¹ãƒˆ
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
本書ã§ã¯ã€ãƒ‘ッãƒã‚’より素早ãå–り込んã§ã‚‚らã„ãŸã„開発者ãŒå®Ÿè·µã™ã¹ã基本的ãªäº‹æŸ„
-ã‚’ã„ãã¤ã‹ç´¹ä»‹ã—ã¾ã™ã€‚ã“ã“ã«ã‚ã‚‹å…¨ã¦ã®äº‹æŸ„ã¯ã€Documentation/SubmittingPatches
+ã‚’ã„ãã¤ã‹ç´¹ä»‹ã—ã¾ã™ã€‚ã“ã“ã«ã‚ã‚‹å…¨ã¦ã®äº‹æŸ„ã¯ã€Documentation/process/submitting-patches.rst
ãªã©ã®Linuxカーãƒãƒ«ãƒ‘ッãƒæŠ•ç¨¿ã«éš›ã—ã¦ã®å¿ƒå¾—を補足ã™ã‚‹ã‚‚ã®ã§ã™ã€‚
1: 妥当ãªCONFIGオプションや変更ã•ã‚ŒãŸCONFIGオプションã€ã¤ã¾ã‚Š =y, =m, =n
@@ -84,7 +84,7 @@ Linux カーãƒãƒ«ãƒ‘ッãƒæŠ•ç¨¿è€…å‘ã‘ãƒã‚§ãƒƒã‚¯ãƒªã‚¹ãƒˆ
å¿…ãšãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã‚’追加ã—ã¦ãã ã•ã„。
17: æ–°ã—ã„ブートパラメータを追加ã—ãŸå ´åˆã«ã¯ã€
- å¿…ãšDocumentation/kernel-parameters.txt ã«èª¬æ˜Žã‚’追加ã—ã¦ãã ã•ã„。
+ å¿…ãšDocumentation/admin-guide/kernel-parameters.rst ã«èª¬æ˜Žã‚’追加ã—ã¦ãã ã•ã„。
18: æ–°ã—ãmoduleã«ãƒ‘ラメータを追加ã—ãŸå ´åˆã«ã¯ã€MODULE_PARM_DESC()ã‚’
利用ã—ã¦å¿…ãšãã®èª¬æ˜Žã‚’記述ã—ã¦ãã ã•ã„。
diff --git a/Documentation/ja_JP/SubmittingPatches b/Documentation/translations/ja_JP/SubmittingPatches
index 5d6ae639bfa0..02139656463e 100644
--- a/Documentation/ja_JP/SubmittingPatches
+++ b/Documentation/translations/ja_JP/SubmittingPatches
@@ -1,5 +1,5 @@
NOTE:
-This is a version of Documentation/SubmittingPatches into Japanese.
+This is a version of Documentation/process/submitting-patches.rst into Japanese.
This document is maintained by Keiichi KII <k-keiichi@bx.jp.nec.com>
and the JF Project team <http://www.linux.or.jp/JF/>.
If you find any difference between this document and the original file
@@ -15,7 +15,7 @@ Last Updated: 2011/06/09
==================================
ã“ã‚Œã¯ã€
-linux-2.6.39/Documentation/SubmittingPatches ã®å’Œè¨³
+linux-2.6.39/Documentation/process/submitting-patches.rst ã®å’Œè¨³
ã§ã™ã€‚
翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
翻訳日: 2011/06/09
@@ -34,9 +34,9 @@ Linux カーãƒãƒ«ã«å¤‰æ›´ã‚’加ãˆãŸã„ã¨æ€ã£ã¦ã„る個人åˆã¯ä¼šç¤¾ã
ãŠã˜ã‘ã¥ã‹ã›ã‚‹ã“ã¨ã‚‚ã‚ã‚Šã¾ã™ã€‚ã“ã®æ–‡ç« ã¯ã‚ãªãŸã®å¤‰æ›´ã‚’大ã„ã«å—ã‘入れ
ã¦ã‚‚らãˆã‚„ã™ãã™ã‚‹æ案を集ã‚ãŸã‚‚ã®ã§ã™ã€‚
-コードを投稿ã™ã‚‹å‰ã«ã€Documentation/SubmitChecklist ã®é …目リストã«ç›®
+コードを投稿ã™ã‚‹å‰ã«ã€Documentation/process/submit-checklist.rst ã®é …目リストã«ç›®
を通ã—ã¦ãƒã‚§ãƒƒã‚¯ã—ã¦ãã ã•ã„。もã—ã‚ãªãŸãŒãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã‚’投稿ã—よã†ã¨ã—
-ã¦ã„ã‚‹ãªã‚‰ã€Documentation/SubmittingDrivers ã«ã‚‚目を通ã—ã¦ãã ã•ã„。
+ã¦ã„ã‚‹ãªã‚‰ã€Documentation/process/submitting-drivers.rst ã«ã‚‚目を通ã—ã¦ãã ã•ã„。
--------------------------------------------
セクション1 パッãƒã®ä½œã‚Šæ–¹ã¨é€ã‚Šæ–¹
@@ -148,7 +148,7 @@ http://savannah.nongnu.org/projects/quilt
4) パッãƒã®ã‚¹ã‚¿ã‚¤ãƒ«ãƒã‚§ãƒƒã‚¯
ã‚ãªãŸã®ãƒ‘ッãƒãŒåŸºæœ¬çš„ãª( Linux カーãƒãƒ«ã®)コーディングスタイルã«é•åã—
-ã¦ã„ãªã„ã‹ã‚’ãƒã‚§ãƒƒã‚¯ã—ã¦ä¸‹ã•ã„。ãã®è©³ç´°ã‚’ Documentation/CodingStyle ã§
+ã¦ã„ãªã„ã‹ã‚’ãƒã‚§ãƒƒã‚¯ã—ã¦ä¸‹ã•ã„。ãã®è©³ç´°ã‚’ Documentation/process/coding-style.rst ã§
見ã¤ã‘ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚コーディングスタイルã®é•åã¯ãƒ¬ãƒ“ューã™ã‚‹äººã®
時間を無駄ã«ã™ã‚‹ã ã‘ãªã®ã§ã€æらãã‚ãªãŸã®ãƒ‘ッãƒã¯èª­ã¾ã‚Œã‚‹ã“ã¨ã™ã‚‰ãªã
æ‹’å¦ã•ã‚Œã‚‹ã§ã—ょã†ã€‚
@@ -246,7 +246,7 @@ MIME å½¢å¼ã®æ·»ä»˜ãƒ•ã‚¡ã‚¤ãƒ«ã¯ Linus ã«æ‰‹é–“ã‚’å–らã›ã‚‹ã“ã¨ã«ãªã‚
ã‚ã‚Œã°ã€èª°ã‹ãŒ MIME å½¢å¼ã®ãƒ‘ッãƒã‚’å†é€ã™ã‚‹ã‚ˆã†æ±‚ã‚ã‚‹ã‹ã‚‚ã—ã‚Œã¾ã›ã‚“。
余計ãªå¤‰æ›´ã‚’加ãˆãšã«ã‚ãªãŸã®ãƒ‘ッãƒã‚’é€ä¿¡ã™ã‚‹ãŸã‚ã®é›»å­ãƒ¡ãƒ¼ãƒ«ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆã®è¨­å®š
-ã®ãƒ’ントã«ã¤ã„ã¦ã¯ Documentation/email-clients.txt ã‚’å‚ç…§ã—ã¦ãã ã•ã„。
+ã®ãƒ’ントã«ã¤ã„ã¦ã¯ Documentation/process/email-clients.rst ã‚’å‚ç…§ã—ã¦ãã ã•ã„。
8) é›»å­ãƒ¡ãƒ¼ãƒ«ã®ã‚µã‚¤ã‚º
@@ -609,7 +609,7 @@ diffstat ã®çµæžœã‚’生æˆã™ã‚‹ãŸã‚ã«ã€Œ git diff -M --stat --summary ã€ã‚
ã—例外をé©ç”¨ã™ã‚‹ã«ã¯ã€æœ¬å½“ã«å¦¥å½“ãªç†ç”±ãŒä¸å¯æ¬ ã§ã™ã€‚ã‚ãªãŸã¯æらãã“ã®
セクションを Linus ã®ã‚³ãƒ³ãƒ”ュータ・サイエンス101ã¨å‘¼ã¶ã§ã—ょã†ã€‚
-1) Documentation/CodingStyleã‚’å‚ç…§
+1) Documentation/process/coding-style.rstã‚’å‚ç…§
言ã†ã¾ã§ã‚‚ãªãã€ã‚ãªãŸã®ã‚³ãƒ¼ãƒ‰ãŒã“ã®ã‚³ãƒ¼ãƒ‡ã‚£ãƒ³ã‚°ã‚¹ã‚¿ã‚¤ãƒ«ã‹ã‚‰ã‚ã¾ã‚Šã«
も逸脱ã—ã¦ã„ã‚‹ã¨ã€ãƒ¬ãƒ“ューやコメントãªã—ã«å—ã‘å–ã£ã¦ã‚‚らãˆãªã„ã‹ã‚‚ã—
@@ -704,8 +704,8 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
<https://lkml.org/lkml/2005/7/11/336>
-Kernel Documentation/CodingStyle:
- <http://users.sosdg.org/~qiyong/lxr/source/Documentation/CodingStyle>
+Kernel Documentation/process/coding-style.rst:
+ <http://users.sosdg.org/~qiyong/lxr/source/Documentation/process/coding-style.rst>
Linus Torvalds's mail on the canonical patch format:
<http://lkml.org/lkml/2005/4/7/183>
diff --git a/Documentation/ja_JP/stable_api_nonsense.txt b/Documentation/translations/ja_JP/stable_api_nonsense.txt
index 7653b5cbfed2..a3b40a4bdcfd 100644
--- a/Documentation/ja_JP/stable_api_nonsense.txt
+++ b/Documentation/translations/ja_JP/stable_api_nonsense.txt
@@ -1,5 +1,5 @@
NOTE:
-This is a version of Documentation/stable_api_nonsense.txt into Japanese.
+This is a version of Documentation/process/stable-api-nonsense.rst into Japanese.
This document is maintained by IKEDA, Munehiro <m-ikeda@ds.jp.nec.com>
and the JF Project team <http://www.linux.or.jp/JF/>.
If you find any difference between this document and the original file
@@ -14,7 +14,7 @@ to update the original English file first.
Last Updated: 2007/07/18
==================================
ã“ã‚Œã¯ã€
-linux-2.6.22-rc4/Documentation/stable_api_nonsense.txt ã®å’Œè¨³
+linux-2.6.22-rc4/Documentation/process/stable-api-nonsense.rst ã®å’Œè¨³
ã§ã™ã€‚
翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
翻訳日 : 2007/06/11
diff --git a/Documentation/ja_JP/stable_kernel_rules.txt b/Documentation/translations/ja_JP/stable_kernel_rules.txt
index 9dbda9b5d21e..f9249aecba64 100644
--- a/Documentation/ja_JP/stable_kernel_rules.txt
+++ b/Documentation/translations/ja_JP/stable_kernel_rules.txt
@@ -1,5 +1,5 @@
NOTE:
-This is Japanese translated version of "Documentation/stable_kernel_rules.txt".
+This is Japanese translated version of "Documentation/process/stable-kernel-rules.rst".
This one is maintained by Tsugikazu Shibata <tshibata@ab.jp.nec.com>
and JF Project team <www.linux.or.jp/JF>.
If you find difference with original file or problem in translation,
@@ -12,7 +12,7 @@ file at first.
==================================
ã“ã‚Œã¯ã€
-linux-2.6.29/Documentation/stable_kernel_rules.txt
+linux-2.6.29/Documentation/process/stable-kernel-rules.rst
ã®å’Œè¨³ã§ã™ã€‚
翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
@@ -43,7 +43,7 @@ linux-2.6.29/Documentation/stable_kernel_rules.txt
"ç†è«–çš„ã«ã¯ç«¶åˆçŠ¶æ…‹ã«ãªã‚‹"よã†ãªã‚‚ã®ã¯ä¸å¯ã€‚
- ã„ã‹ãªã‚‹äº›ç´°ãªä¿®æ­£ã‚‚å«ã‚ã‚‹ã“ã¨ã¯ã§ããªã„。(スペルã®ä¿®æ­£ã€ç©ºç™½ã®ã‚¯ãƒªãƒ¼
ンアップãªã©)
- - Documentation/SubmittingPatches ã®è¦å‰‡ã«å¾“ã£ãŸã‚‚ã®ã§ãªã‘ã‚Œã°ãªã‚‰ãªã„。
+ - Documentation/process/submitting-patches.rst ã®è¦å‰‡ã«å¾“ã£ãŸã‚‚ã®ã§ãªã‘ã‚Œã°ãªã‚‰ãªã„。
- パッãƒè‡ªä½“ã‹åŒç­‰ã®ä¿®æ­£ãŒ Linus ã®ãƒ„リーã«æ—¢ã«å­˜åœ¨ã—ãªã‘ã‚Œã°ãªã‚‰ãªã„。
  Linus ã®ãƒ„リーã§ã®ã‚³ãƒŸãƒƒãƒˆID ã‚’ -stable ã¸ã®ãƒ‘ッãƒæŠ•ç¨¿ã®éš›ã«å¼•ç”¨ã™
ã‚‹ã“ã¨ã€‚
diff --git a/Documentation/ko_KR/HOWTO b/Documentation/translations/ko_KR/howto.rst
index 9a3e65924d54..3b0c15b277e0 100644
--- a/Documentation/ko_KR/HOWTO
+++ b/Documentation/translations/ko_KR/howto.rst
@@ -1,5 +1,5 @@
NOTE:
-This is a version of Documentation/HOWTO translated into korean
+This is a version of Documentation/process/howto.rst translated into korean
This document is maintained by Minchan Kim <minchan@kernel.org>
If you find any difference between this document and the original file or
a problem with the translation, please contact the maintainer of this file.
@@ -9,17 +9,20 @@ read for non English (read: korean) speakers and is not intended as
a fork. So if you have any comments or updates for this file please
try to update the original English file first.
-==================================
+----------------------------------
+
ì´ ë¬¸ì„œëŠ”
-Documentation/HOWTO
+Documentation/process/howto.rst
ì˜ í•œê¸€ 번역입니다.
ì—­ìžï¼š 김민찬 <minchan@kernel.org>
ê°ìˆ˜ï¼š ì´ì œì´ë¯¸ <jamee.lee@samsung.com>
-==================================
+
+----------------------------------
+
어떻게 리눅스 ì»¤ë„ ê°œë°œì„ í•˜ëŠ”ê°€
----------------------------------
+================================
ì´ ë¬¸ì„œëŠ” ì»¤ë„ ê°œë°œì— ìžˆì–´ 가장 중요한 문서ì´ë‹¤. ì´ ë¬¸ì„œëŠ”
리눅스 ì»¤ë„ ê°œë°œìžê°€ ë˜ëŠ” 법과 리눅스 ì»¤ë„ ê°œë°œ 커뮤니티와 ì¼í•˜ëŠ”
@@ -46,6 +49,7 @@ Documentation/HOWTO
어셈블리(특정 아키í…ì³)는 잘 알아야 í•  필요는 없다.
다ìŒì˜ 참고서ì ë“¤ì€ ê¸°ë³¸ì— ì¶©ì‹¤í•œ C êµìœ¡ì´ë‚˜ ìˆ˜ë…„ê°„ì˜ ê²½í—˜ì— ê²¬ì£¼ì§€ëŠ”
못하지만 ì ì–´ë„ 참고 ìš©ë„로는 ì¢‹ì„ ê²ƒì´ë‹¤
+
- "The C Programming Language" by Kernighan and Ritchie [Prentice Hall]
- "Practical C Programming" by Steve Oualline [O'Reilly]
- "C: A Reference Manual" by Harbison and Steele [Prentice Hall]
@@ -79,7 +83,8 @@ Documentation/HOWTO
ê·¸ë“¤ì˜ ë§ì— ì˜ì§€í•´ì„œëŠ” 안ëœë‹¤.
GPLì— ê´€í•œ ìž¦ì€ ì§ˆë¬¸ë“¤ê³¼ ë‹µë³€ë“¤ì€ ë‹¤ìŒì„ 참조하ë¼.
- http://www.gnu.org/licenses/gpl-faq.html
+
+ https://www.gnu.org/licenses/gpl-faq.html
문서
@@ -93,55 +98,61 @@ GPLì— ê´€í•œ ìž¦ì€ ì§ˆë¬¸ë“¤ê³¼ ë‹µë³€ë“¤ì€ ë‹¤ìŒì„ 참조하ë¼.
mtk.manpages@gmail.comì˜ ë©”ì¸í…Œì´ë„ˆì—게 보낼 ê²ƒì„ ê¶Œìž¥í•œë‹¤.
다ìŒì€ ì»¤ë„ ì†ŒìŠ¤ íŠ¸ë¦¬ì— ìžˆëŠ” ì½ì–´ì•¼ í•  파ì¼ë“¤ì˜ 리스트ì´ë‹¤.
+
README
ì´ íŒŒì¼ì€ 리눅스 커ë„ì— ê´€í•˜ì—¬ 간단한 ë°°ê²½ 설명과 커ë„ì„ ì„¤ì •í•˜ê³ 
빌드하기 위해 필요한 ê²ƒì„ ì„¤ëª…í•œë‹¤. 커ë„ì— ìž…ë¬¸í•˜ëŠ” ì‚¬ëžŒë“¤ì€ ì—¬ê¸°ì„œ
시작해야 한다.
- Documentation/Changes
+ :ref:`Documentation/process/changes.rst <changes>`
ì´ íŒŒì¼ì€ 커ë„ì„ ì„±ê³µì ìœ¼ë¡œ 빌드하고 실행시키기 위해 필요한 다양한
소프트웨어 íŒ¨í‚¤ì§€ë“¤ì˜ ìµœì†Œ ë²„ì ¼ì„ ë‚˜ì—´í•œë‹¤.
- Documentation/CodingStyle
+ :ref:`Documentation/process/coding-style.rst <codingstyle>`
ì´ ë¬¸ì„œëŠ” 리눅스 ì»¤ë„ ì½”ë”© 스타ì¼ê³¼ 그렇게 í•œ 몇몇 ì´ìœ ë¥¼ 설명한다.
모든 새로운 코드는 ì´ ë¬¸ì„œì— ê°€ì´ë“œë¼ì¸ë“¤ì„ ë”°ë¼ì•¼ 한다. 대부분ì˜
ë©”ì¸í…Œì´ë„ˆë“¤ì€ ì´ ê·œì¹™ì„ ë”°ë¥´ëŠ” íŒ¨ì¹˜ë“¤ë§Œì„ ë°›ì•„ë“¤ì¼ ê²ƒì´ê³  ë§Žì€ ì‚¬ëžŒë“¤ì´
ê·¸ 패치가 올바른 스타ì¼ì¼ 경우만 코드를 검토할 것ì´ë‹¤.
- Documentation/SubmittingPatches
- Documentation/SubmittingDrivers
+ :ref:`Documentation/process/submitting-patches.rst <submittingpatches>` 와 :ref:`Documentation/process/submitting-drivers.rst <submittingdrivers>`
ì´ íŒŒì¼ë“¤ì€ 성공ì ìœ¼ë¡œ 패치를 만들고 보내는 ë²•ì„ ë‹¤ìŒì˜ 내용들로
굉장히 ìƒì„¸ížˆ 설명하고 있다(그러나 다ìŒìœ¼ë¡œ 한정ë˜ì§„ 않는다).
+
- Email 내용들
- Email ì–‘ì‹
- ê·¸ê²ƒì„ ëˆ„êµ¬ì—게 보낼지
+
ì´ëŸ¬í•œ ê·œì¹™ë“¤ì„ ë”°ë¥´ëŠ” ê²ƒì´ ì„±ê³µ(ì—­ìžì£¼: 패치가 받아들여 지는 것)ì„
보장하진 않는다(왜ëƒí•˜ë©´ 모든 íŒ¨ì¹˜ë“¤ì€ ë‚´ìš©ê³¼ 스타ì¼ì— 관하여
면밀히 검토ë˜ê¸° 때문ì´ë‹¤). 그러나 ê·œì¹™ì„ ë”°ë¥´ì§€ 않는다면 ê±°ì˜
ì„±ê³µí•˜ì§€ë„ ëª»í•  것ì´ë‹¤.
올바른 íŒ¨ì¹˜ë“¤ì„ ë§Œë“œëŠ” ë²•ì— ê´€í•œ 훌륭한 다른 ë¬¸ì„œë“¤ì´ ìžˆë‹¤.
+
"The Perfect Patch"
- http://www.ozlabs.org/~akpm/stuff/tpp.txt
+ https://www.ozlabs.org/~akpm/stuff/tpp.txt
+
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
- Documentation/stable_api_nonsense.txt
+ :ref:`Documentation/process/stable-api-nonsense.rst <stable_api_nonsense>`
ì´ ë¬¸ì„œëŠ” ì˜ë„ì ìœ¼ë¡œ 커ë„ì´ ë¶ˆë³€í•˜ëŠ” API를 갖지 ì•Šë„ë¡ ê²°ì •í•œ
ì´ìœ ë¥¼ 설명하며 다ìŒê³¼ ê°™ì€ ê²ƒë“¤ì„ í¬í•¨í•œë‹¤.
+
- 서브시스템 shim-layer(í˜¸í™˜ì„±ì„ ìœ„í•´?)
- ìš´ì˜ì²´ì œë“¤ê°„ì˜ ë“œë¼ì´ë²„ ì´ì‹ì„±
- ì»¤ë„ ì†ŒìŠ¤ íŠ¸ë¦¬ë‚´ì— ë¹ ë¥¸ 변화를 늦추는 것(ë˜ëŠ” 빠른 변화를 막는 것)
+
ì´ ë¬¸ì„œëŠ” 리눅스 개발 ì² í•™ì„ ì´í•´í•˜ëŠ”ë° í•„ìˆ˜ì ì´ë©° 다른 ìš´ì˜ì²´ì œì—ì„œ
리눅스로 전향하는 사람들ì—게는 매우 중요하다.
- Documentation/SecurityBugs
+ :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
ì—¬ëŸ¬ë¶„ë“¤ì´ ë¦¬ëˆ…ìŠ¤ 커ë„ì˜ ë³´ì•ˆ 문제를 발견했다고 ìƒê°í•œë‹¤ë©´ ì´ ë¬¸ì„œì—
나온 ë‹¨ê³„ì— ë”°ë¼ì„œ ì»¤ë„ ê°œë°œìžë“¤ì—게 알리고 ê·¸ 문제를 í•´ê²°í•  수 있ë„ë¡
ë„와 달ë¼.
- Documentation/ManagementStyle
+ :ref:`Documentation/process/management-style.rst <managementstyle>`
ì´ ë¬¸ì„œëŠ” 리눅스 ì»¤ë„ ë©”ì¸í…Œì´ë„ˆë“¤ì´ ê·¸ë“¤ì˜ ë°©ë²•ë¡ ì— ë…¹ì•„ 있는
ì •ì‹ ì„ ì–´ë–»ê²Œ 공유하고 ìš´ì˜í•˜ëŠ”지를 설명한다. ì´ê²ƒì€ ì»¤ë„ ê°œë°œì— ìž…ë¬¸í•˜ëŠ”
모든 사람들(ë˜ëŠ” ì»¤ë„ ê°œë°œì— ìž‘ì€ í˜¸ê¸°ì‹¬ì´ë¼ë„ 있는 사람들)ì´
@@ -149,38 +160,52 @@ mtk.manpages@gmail.comì˜ ë©”ì¸í…Œì´ë„ˆì—게 보낼 ê²ƒì„ ê¶Œìž¥í•œë‹¤.
ë…특한 í–‰ë™ì— 관하여 í”히 있는 오해들과 í˜¼ëž€ë“¤ì„ í•´ì†Œí•˜ê³  있기
때문ì´ë‹¤.
- Documentation/stable_kernel_rules.txt
+ :ref:`Documentation/process/stable_kernel_rules.rst <stable_kernel_rules>`
ì´ ë¬¸ì„œëŠ” 안정ì ì¸ ì»¤ë„ ë°°í¬ê°€ ì´ë£¨ì–´ì§€ëŠ” ê·œì¹™ì„ ì„¤ëª…í•˜ê³  있으며
ì—¬ëŸ¬ë¶„ë“¤ì´ ì´ëŸ¬í•œ ë°°í¬ë“¤ 중 í•˜ë‚˜ì— ë³€ê²½ì„ í•˜ê¸¸ ì›í•œë‹¤ë©´
ë¬´ì—‡ì„ í•´ì•¼ 하는지를 설명한다.
- Documentation/kernel-docs.txt
+ :ref:`Documentation/process/kernel-docs.rst <kernel_docs>`
ì»¤ë„ ê°œë°œì— ê´€ê³„ëœ ì™¸ë¶€ ë¬¸ì„œì˜ ë¦¬ìŠ¤íŠ¸ì´ë‹¤. ì»¤ë„ ë‚´ì˜ í¬í•¨ëœ 문서들
ì¤‘ì— ì—¬ëŸ¬ë¶„ì´ ì°¾ê³  ì‹¶ì€ ë¬¸ì„œë¥¼ 발견하지 못할 경우 ì´ ë¦¬ìŠ¤íŠ¸ë¥¼
살펴보ë¼.
- Documentation/applying-patches.txt
+ :ref:`Documentation/process/applying-patches.rst <applying_patches>`
패치가 무엇ì´ë©° ê·¸ê²ƒì„ ì»¤ë„ì˜ ë‹¤ë¥¸ 개발 ë¸Œëžœì¹˜ë“¤ì— ì–´ë–»ê²Œ
ì ìš©í•˜ëŠ”ì§€ì— ê´€í•˜ì—¬ ìžì„¸ížˆ 설명하고 있는 ì¢‹ì€ ìž…ë¬¸ì„œì´ë‹¤.
-커ë„ì€ ì†ŒìŠ¤ 코드 ê·¸ ìžì²´ì—ì„œ ìžë™ì ìœ¼ë¡œ 만들어질 수 있는 ë§Žì€ ë¬¸ì„œë“¤ì„
-가지고 있다. ì´ê²ƒì€ ì»¤ë„ ë‚´ì˜ APIì— ëŒ€í•œ 모든 설명, 그리고 ë½í‚¹ì„
-올바르게 처리하는 ë²•ì— ê´€í•œ ê·œì¹™ì„ í¬í•¨í•˜ê³  있다. ì´ ë¬¸ì„œëŠ”
-Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, HTML,
-그리고 man 페ì´ì§€ë“¤ë¡œ 다ìŒê³¼ ê°™ì´ ì‹¤í–‰í•˜ì—¬ 만들어 진다.
+커ë„ì€ ì†ŒìŠ¤ 코드 ê·¸ ìžì²´ì—ì„œ ë˜ëŠ” ì´ê²ƒê³¼ ê°™ì€ ReStructuredText 마í¬ì—… (ReST) ì„
+통해 ìžë™ì ìœ¼ë¡œ 만들어질 수 있는 ë§Žì€ ë¬¸ì„œë“¤ì„ ê°€ì§€ê³  있다. ì´ê²ƒì€ ì»¤ë„ ë‚´ì˜
+APIì— ëŒ€í•œ 모든 설명, 그리고 ë½í‚¹ì„ 올바르게 처리하는 ë²•ì— ê´€í•œ ê·œì¹™ì„ í¬í•¨í•˜ê³ 
+있다.
+
+모든 그런 ë¬¸ì„œë“¤ì€ ì»¤ë„ ì†ŒìŠ¤ 디렉토리ì—ì„œ ë‹¤ìŒ ì»¤ë§¨ë“œë¥¼ 실행하는 ê²ƒì„ í†µí•´ PDF
+나 HTML ì˜ í˜•íƒœë¡œ 만들어질 수 있다::
+
make pdfdocs
- make psdocs
make htmldocs
- make mandocs
-ê°ê°ì˜ ëª…ë ¹ì„ ë©”ì¸ ì»¤ë„ ì†ŒìŠ¤ 디렉토리로부터 실행한다.
+ReST 마í¬ì—…ì„ ì‚¬ìš©í•˜ëŠ” ë¬¸ì„œë“¤ì€ Documentation/output ì— ìƒì„±ëœë‹¤. 해당
+ë¬¸ì„œë“¤ì€ ë‹¤ìŒì˜ 커맨드를 사용하면 LaTeX ì´ë‚˜ ePub ë¡œë„ ë§Œë“¤ì–´ì§ˆ 수 있다::
+
+ make latexdocs
+ make epubdocs
+
+현재, ReST ë¡œì˜ ë³€í™˜ì´ ì§„í–‰ì¤‘ì¸, DocBook 으로 ì“°ì¸ ë¬¸ì„œë“¤ì´ ì¡´ìž¬í•œë‹¤. 그런
+ë¬¸ì„œë“¤ì€ Documentation/DocBook/ 디렉토리 ì•ˆì— ìƒì„±ë  것ì´ê³  ë‹¤ìŒ ì»¤ë§¨ë“œë¥¼ 통해
+Postscript 나 man page ë¡œë„ ë§Œë“¤ì–´ì§ˆ 수 있다::
+
+ make psdocs
+ make mandocs
ì»¤ë„ ê°œë°œìžê°€ ë˜ëŠ” 것
---------------------
ì—¬ëŸ¬ë¶„ì´ ë¦¬ëˆ…ìŠ¤ ì»¤ë„ ê°œë°œì— ê´€í•˜ì—¬ ì•„ë¬´ê²ƒë„ ëª¨ë¥¸ë‹¤ë©´ Linux KernelNewbies
프로ì íŠ¸ë¥¼ ë´ì•¼ 한다.
- http://kernelnewbies.org
+
+ https://kernelnewbies.org
+
ê·¸ê³³ì€ ê±°ì˜ ëª¨ë“  ì¢…ë¥˜ì˜ ê¸°ë³¸ì ì¸ ì»¤ë„ ê°œë°œ 질문들(질문하기 ì „ì— ë¨¼ì €
ì•„ì¹´ì´ë¸Œë¥¼ 찾아ë´ë¼. ê³¼ê±°ì— ì´ë¯¸ 답변ë˜ì—ˆì„ ìˆ˜ë„ ìžˆë‹¤)ì„ í•  수 있는 ë„움ì´
ë ë§Œí•œ ë©”ì¼ë§ 리스트가 있다. ë˜í•œ 실시간으로 질문 í•  수 있는 IRC 채ë„ë„
@@ -192,7 +217,9 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
ì—¬ëŸ¬ë¶„ì´ ì–´ë””ì„œ 시작해야 할진 모르지만 ì»¤ë„ ê°œë°œ ì»¤ë®¤ë‹ˆí‹°ì— ì°¸ì—¬í•  수
있는 ì¼ë“¤ì„ 찾길 ì›í•œë‹¤ë©´ 리눅스 ì»¤ë„ Janitor 프로ì íŠ¸ë¥¼ 살펴ë´ë¼.
- http://kernelnewbies.org/KernelJanitors
+
+ https://kernelnewbies.org/KernelJanitors
+
ê·¸ê³³ì€ ì‹œìž‘í•˜ê¸°ì— í›Œë¥­í•œ 장소ì´ë‹¤. ê·¸ê³³ì€ ë¦¬ëˆ…ìŠ¤ ì»¤ë„ ì†ŒìŠ¤ 트리내ì—
간단히 정리ë˜ê³  ìˆ˜ì •ë  ìˆ˜ 있는 ë¬¸ì œë“¤ì— ê´€í•˜ì—¬ 설명한다. ì—¬ëŸ¬ë¶„ì€ ì´
프로ì íŠ¸ë¥¼ 대표하는 개발ìžë“¤ê³¼ ì¼í•˜ë©´ì„œ ìžì‹ ì˜ 패치를 리눅스 ì»¤ë„ íŠ¸ë¦¬ì—
@@ -204,7 +231,8 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
올바른 í¬ë§·ìœ¼ë¡œ í¬ìž¥í•˜ëŠ”ë° ë„ì›€ì´ í•„ìš”í•˜ë‹¤ë©´ 그러한 문제를 ë•ê¸° 위해
만들어진 kernel-mentors 프로ì íŠ¸ê°€ 있다. ê·¸ê³³ì€ ë©”ì¼ë§ 리스트ì´ë©°
다ìŒì—ì„œ 참조할 수 있다.
- http://selenic.com/mailman/listinfo/kernel-mentors
+
+ https://selenic.com/mailman/listinfo/kernel-mentors
리눅스 ì»¤ë„ ì½”ë“œì— ì‹¤ì œ ë³€ê²½ì„ í•˜ê¸° ì „ì— ë°˜ë“œì‹œ ê·¸ 코드가 어떻게
ë™ìž‘하는지 ì´í•´í•˜ê³  있어야 한다. 코드를 분ì„하기 위하여 특정한 툴ì˜
@@ -213,6 +241,7 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
ê²ƒì€ Linux Cross-Reference projectì´ë©° ê·¸ê²ƒì€ ìžê¸° 참조 ë°©ì‹ì´ë©°
소스코드를 ì¸ë±ìŠ¤ëœ 웹 페ì´ì§€ë“¤ì˜ 형태로 보여준다. ìµœì‹ ì˜ ë©‹ì§„ 커ë„
코드 저장소는 다ìŒì„ 통하여 참조할 수 있다.
+
http://lxr.free-electrons.com/
@@ -222,6 +251,7 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
리눅스 ì»¤ë„ ê°œë°œ 프로세스는 현재 몇몇 다른 ë©”ì¸ ì»¤ë„ "브랜치들"ê³¼
ì„œë¸Œì‹œìŠ¤í…œì— íŠ¹í™”ëœ ì»¤ë„ ë¸Œëžœì¹˜ë“¤ë¡œ 구성ëœë‹¤. 몇몇 다른 ë©”ì¸
ë¸Œëžœì¹˜ë“¤ì€ ë‹¤ìŒê³¼ 같다.
+
- main 4.x ì»¤ë„ íŠ¸ë¦¬
- 4.x.y - ì•ˆì •ëœ ì»¤ë„ íŠ¸ë¦¬
- 4.x -git ì»¤ë„ íŒ¨ì¹˜ë“¤
@@ -229,15 +259,16 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
- 4.x - 통합 테스트를 위한 next ì»¤ë„ íŠ¸ë¦¬
4.x ì»¤ë„ íŠ¸ë¦¬
----------------
+~~~~~~~~~~~~~
+
+4.x 커ë„ë“¤ì€ Linus Torvaldsê°€ 관리하며 https://kernel.org ì˜
+pub/linux/kernel/v4.x/ 디렉토리ì—ì„œ ì°¸ì¡°ë  ìˆ˜ 있다.개발 프로세스는 다ìŒê³¼ 같다.
-4.x 커ë„ë“¤ì€ Linux Torvaldsê°€ 관리하며 kernel.orgì˜ pub/linux/kernel/v4.x/
-디렉토리ì—ì„œ ì°¸ì¡°ë  ìˆ˜ 있다.개발 프로세스는 다ìŒê³¼ 같다.
- 새로운 커ë„ì´ ë°°í¬ë˜ìžë§ˆìž 2ì£¼ì˜ ì‹œê°„ì´ ì£¼ì–´ì§„ë‹¤. ì´ ê¸°ê°„ë™ì€
ë©”ì¸í…Œì´ë„ˆë“¤ì€ í° diffë“¤ì„ Linusì—게 제출할 수 있다. 대개 ì´ íŒ¨ì¹˜ë“¤ì€
몇 주 ë™ì•ˆ -next 커ë„ë‚´ì— ì´ë¯¸ ìžˆì—ˆë˜ ê²ƒë“¤ì´ë‹¤. í° ë³€ê²½ë“¤ì„ ì œì¶œí•˜ëŠ” ë°
선호ë˜ëŠ” ë°©ë²•ì€ git(커ë„ì˜ ì†ŒìŠ¤ 관리 툴, ë” ë§Žì€ ì •ë³´ë“¤ì€
- http://git-scm.com/ ì—ì„œ 참조할 수 있다)를 사용하는 것ì´ì§€ë§Œ 순수한
+ https://git-scm.com/ ì—ì„œ 참조할 수 있다)를 사용하는 것ì´ì§€ë§Œ 순수한
패치파ì¼ì˜ 형ì‹ìœ¼ë¡œ 보내는 ê²ƒë„ ë¬´ê´€í•˜ë‹¤.
- 2주 í›„ì— -rc1 커ë„ì´ ë°°í¬ë˜ë©° 지금부터는 ì „ì²´ 커ë„ì˜ ì•ˆì •ì„±ì— ì˜í–¥ì„
미칠수 있는 새로운 ê¸°ëŠ¥ë“¤ì„ í¬í•¨í•˜ì§€ 않는 íŒ¨ì¹˜ë“¤ë§Œì´ ì¶”ê°€ë  ìˆ˜ 있다.
@@ -256,12 +287,13 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
ì»¤ë„ ë°°í¬ì— 있어서 언급할만한 가치가 있는 리눅스 ì»¤ë„ ë©”ì¼ë§ 리스트ì˜
Andrew Mortonì˜ ê¸€ì´ ìžˆë‹¤.
- "커ë„ì´ ì–¸ì œ ë°°í¬ë ì§€ëŠ” ì•„ë¬´ë„ ëª¨ë¥¸ë‹¤. 왜ëƒí•˜ë©´ ë°°í¬ëŠ” 알려진
+
+ *"커ë„ì´ ì–¸ì œ ë°°í¬ë ì§€ëŠ” ì•„ë¬´ë„ ëª¨ë¥¸ë‹¤. 왜ëƒí•˜ë©´ ë°°í¬ëŠ” 알려진
ë²„ê·¸ì˜ ìƒí™©ì— ë”°ë¼ ë°°í¬ë˜ëŠ” 것ì´ì§€ 미리정해 ë†“ì€ ì‹œê°„ì— ë”°ë¼
- ë°°í¬ë˜ëŠ” ê²ƒì€ ì•„ë‹ˆê¸° 때문ì´ë‹¤."
+ ë°°í¬ë˜ëŠ” ê²ƒì€ ì•„ë‹ˆê¸° 때문ì´ë‹¤."*
4.x.y - 안정 ì»¤ë„ íŠ¸ë¦¬
-------------------------
+~~~~~~~~~~~~~~~~~~~~~~
3 ìžë¦¬ 숫ìžë¡œ ì´ë£¨ì–´ì§„ ë²„ì ¼ì˜ ì»¤ë„ë“¤ì€ -stable 커ë„들ì´ë‹¤. ê·¸ê²ƒë“¤ì€ 4.x
커ë„ì—ì„œ ë°œê²¬ëœ í° íšŒê·€ë“¤ì´ë‚˜ 보안 문제들 중 비êµì  ìž‘ê³  중요한 수정들ì„
@@ -276,20 +308,21 @@ Andrew Mortonì˜ ê¸€ì´ ìžˆë‹¤.
4.x.y는 "stable" 팀<stable@vger.kernel.org>ì— ì˜í•´ 관리ë˜ë©° ê±°ì˜ ë§¤ë²ˆ 격주로
ë°°í¬ëœë‹¤.
-ì»¤ë„ íŠ¸ë¦¬ 문서들 ë‚´ì— Documentation/stable_kernel_rules.txt 파ì¼ì€ ì–´ë–¤
+ì»¤ë„ íŠ¸ë¦¬ 문서들 ë‚´ì— Documentation/process/stable-kernel-rules.rst 파ì¼ì€ ì–´ë–¤
ì¢…ë¥˜ì˜ ë³€ê²½ë“¤ì´ -stable 트리로 들어왔는지와 ë°°í¬ í”„ë¡œì„¸ìŠ¤ê°€ 어떻게
진행ë˜ëŠ”지를 설명한다.
-
4.x -git 패치들
-------------------
+~~~~~~~~~~~~~~~
+
git 저장소(그러므로 -gitì´ë¼ëŠ” ì´ë¦„ì´ ë¶™ìŒ)ì—는 날마다 관리ë˜ëŠ” Linusì˜
ì»¤ë„ íŠ¸ë¦¬ì˜ snapshot ë“¤ì´ ìžˆë‹¤. ì´ íŒ¨ì¹˜ë“¤ì€ ì¼ë°˜ì ìœ¼ë¡œ 날마다 ë°°í¬ë˜ë©°
Linusì˜ íŠ¸ë¦¬ì˜ í˜„ìž¬ ìƒíƒœë¥¼ 나타낸다. ì´ íŒ¨ì¹˜ë“¤ì€ ì •ìƒì ì¸ì§€ 조금ë„
살펴보지 ì•Šê³  ìžë™ì ìœ¼ë¡œ ìƒì„±ëœ 것ì´ë¯€ë¡œ -rc 커ë„들 ë³´ë‹¤ë„ ë” ì‹¤í—˜ì ì´ë‹¤.
서브시스템 ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들
--------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
다양한 ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œì˜ ë©”ì¸í…Œì´ë„ˆë“¤ --- 그리고 ë§Žì€ ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œ 개발ìžë“¤
--- ì€ ê·¸ë“¤ì˜ í˜„ìž¬ 개발 ìƒíƒœë¥¼ 소스 저장소로 노출한다. ì´ë¥¼ 통해 다른 사람들ë„
커ë„ì˜ ë‹¤ë¥¸ ì˜ì—­ì— ì–´ë–¤ 변화가 ì´ë£¨ì–´ì§€ê³  있는지 ì•Œ 수 있다. 급ì†ížˆ 개발ì´
@@ -300,35 +333,39 @@ Linusì˜ íŠ¸ë¦¬ì˜ í˜„ìž¬ ìƒíƒœë¥¼ 나타낸다. ì´ íŒ¨ì¹˜ë“¤ì€ ì •ìƒì ì¸ì
ëŒ€ë¶€ë¶„ì˜ ì´ëŸ¬í•œ 저장소는 git 트리지만, gitì´ ì•„ë‹Œ SCM으로 관리ë˜ê±°ë‚˜, quilt
시리즈로 제공ë˜ëŠ” íŒ¨ì¹˜ë“¤ë„ ì¡´ìž¬í•œë‹¤. ì´ëŸ¬í•œ 서브시스템 ì €ìž¥ì†Œë“¤ì€ MAINTAINERS
-파ì¼ì— 나열ë˜ì–´ 있다. ëŒ€ë¶€ë¶„ì€ http://git.kernel.org ì—ì„œ ë³¼ 수 있다.
+파ì¼ì— 나열ë˜ì–´ 있다. ëŒ€ë¶€ë¶„ì€ https://git.kernel.org ì—ì„œ ë³¼ 수 있다.
ì œì•ˆëœ íŒ¨ì¹˜ëŠ” 서브시스템 íŠ¸ë¦¬ì— ì»¤ë°‹ë˜ê¸° ì „ì— ë©”ì¼ë§ 리스트를 통해
리뷰ëœë‹¤(ì•„ëž˜ì˜ ê´€ë ¨ ì„¹ì…˜ì„ ì°¸ê³ í•˜ê¸° 바란다). ì¼ë¶€ ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œì˜ ê²½ìš°, ì´
리뷰 프로세스는 patchworkë¼ëŠ” ë„구를 통해 추ì ëœë‹¤. patchworkì€ ë“±ë¡ëœ 패치와
íŒ¨ì¹˜ì— ëŒ€í•œ 코멘트, íŒ¨ì¹˜ì˜ ë²„ì „ì„ ë³¼ 수 있는 웹 ì¸í„°íŽ˜ì´ìŠ¤ë¥¼ 제공하고,
ë©”ì¸í…Œì´ë„ˆëŠ” 패치를 리뷰 중, 리뷰 통과, ë˜ëŠ” 반려ë¨ìœ¼ë¡œ 표시할 수 있다.
-ëŒ€ë¶€ë¶„ì˜ ì´ëŸ¬í•œ patchwork 사ì´íŠ¸ëŠ” http://patchwork.kernel.org/ ë˜ëŠ”
+ëŒ€ë¶€ë¶„ì˜ ì´ëŸ¬í•œ patchwork 사ì´íŠ¸ëŠ” https://patchwork.kernel.org/ ë˜ëŠ”
http://patchwork.ozlabs.org/ ì— ë‚˜ì—´ë˜ì–´ 있다.
4.x - 통합 테스트를 위한 next ì»¤ë„ íŠ¸ë¦¬
------------------------------------------
+---------------------------------------
서브시스템 íŠ¸ë¦¬ë“¤ì˜ ë³€ê²½ì‚¬í•­ë“¤ì€ mainline 4.x 트리로 들어오기 ì „ì— í†µí•©
테스트를 ê±°ì³ì•¼ 한다. ì´ëŸ° 목ì ìœ¼ë¡œ, 모든 서브시스템 íŠ¸ë¦¬ì˜ ë³€ê²½ì‚¬í•­ì„ ê±°ì˜
ë§¤ì¼ ë°›ì•„ê°€ëŠ” 특수한 테스트 저장소가 존재한다:
- http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
+
+ https://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
ì´ëŸ° ì‹ìœ¼ë¡œ, -next 커ë„ì„ í†µí•´ ë‹¤ìŒ ë¨¸ì§€ ê¸°ê°„ì— ë©”ì¸ë¼ì¸ 커ë„ì— ì–´ë–¤ 변경ì´
가해질 것ì¸ì§€ 간략히 ì•Œ 수 있다. 모험심 ê°•í•œ 테스터ë¼ë©´ -next 커ë„ì—ì„œ 테스트를
수행하는 ê²ƒë„ ì¢‹ì„ ê²ƒì´ë‹¤.
+
버그 보고
---------
-bugzilla.kernel.org는 리눅스 ì»¤ë„ ê°œë°œìžë“¤ì´ 커ë„ì˜ ë²„ê·¸ë¥¼ 추ì í•˜ëŠ” ê³³ì´ë‹¤.
-사용ìžë“¤ì€ 발견한 모든 ë²„ê·¸ë“¤ì„ ë³´ê³ í•˜ê¸° 위하여 ì´ íˆ´ì„ ì‚¬ìš©í•  ê²ƒì„ ê¶Œìž¥í•œë‹¤.
-kernel bugzilla를 사용하는 ìžì„¸í•œ ë°©ë²•ì€ ë‹¤ìŒì„ 참조하ë¼.
- http://test.kernel.org/bugzilla/faq.html
-ë©”ì¸ ì»¤ë„ ì†ŒìŠ¤ ë””ë ‰í† ë¦¬ì— ìžˆëŠ” REPORTING-BUGS 파ì¼ì€ ì»¤ë„ ë²„ê·¸ë¼ê³  ìƒê°ë˜ëŠ”
+https://bugzilla.kernel.org는 리눅스 ì»¤ë„ ê°œë°œìžë“¤ì´ 커ë„ì˜ ë²„ê·¸ë¥¼ 추ì í•˜ëŠ”
+ê³³ì´ë‹¤. 사용ìžë“¤ì€ 발견한 모든 ë²„ê·¸ë“¤ì„ ë³´ê³ í•˜ê¸° 위하여 ì´ íˆ´ì„ ì‚¬ìš©í•  것ì„
+권장한다. kernel bugzilla를 사용하는 ìžì„¸í•œ ë°©ë²•ì€ ë‹¤ìŒì„ 참조하ë¼.
+
+ https://bugzilla.kernel.org/page.cgi?id=faq.html
+
+ë©”ì¸ ì»¤ë„ ì†ŒìŠ¤ ë””ë ‰í† ë¦¬ì— ìžˆëŠ” admin-guide/reporting-bugs.rst 파ì¼ì€ ì»¤ë„ ë²„ê·¸ë¼ê³  ìƒê°ë˜ëŠ”
ê²ƒì„ ë³´ê³ í•˜ëŠ” ë°©ë²•ì— ê´€í•œ ì¢‹ì€ í…œí”Œë¦¿ì´ë©° 문제를 추ì í•˜ê¸° 위해서 커ë„
개발ìžë“¤ì´ 필요로 하는 ì •ë³´ê°€ 무엇들ì¸ì§€ë¥¼ ìƒì„¸ížˆ 설명하고 있다.
@@ -344,13 +381,14 @@ kernel bugzilla를 사용하는 ìžì„¸í•œ ë°©ë²•ì€ ë‹¤ìŒì„ 참조하ë¼.
ì ìˆ˜ë¥¼ ì–»ì„ ìˆ˜ 있는 가장 ì¢‹ì€ ë°©ë²•ì¤‘ì˜ í•˜ë‚˜ì´ë‹¤. 왜ëƒí•˜ë©´ ë§Žì€ ì‚¬ëžŒë“¤ì€
다른 ì‚¬ëžŒë“¤ì˜ ë²„ê·¸ë“¤ì„ ìˆ˜ì •í•˜ê¸° 위하여 ì‹œê°„ì„ ë‚­ë¹„í•˜ì§€ 않기 때문ì´ë‹¤.
-ì´ë¯¸ ë³´ê³ ëœ ë²„ê·¸ 리í¬íŠ¸ë“¤ì„ 가지고 작업하기 위해서 http://bugzilla.kernel.org를
-참조하ë¼. ì—¬ëŸ¬ë¶„ì´ ì•žìœ¼ë¡œ ìƒê²¨ë‚  버그 리í¬íŠ¸ë“¤ì˜ ì¡°ì–¸ìžê°€ ë˜ê¸¸ ì›í•œë‹¤ë©´
+ì´ë¯¸ ë³´ê³ ëœ ë²„ê·¸ 리í¬íŠ¸ë“¤ì„ 가지고 작업하기 위해서 https://bugzilla.kernel.org
+를 참조하ë¼. ì—¬ëŸ¬ë¶„ì´ ì•žìœ¼ë¡œ ìƒê²¨ë‚  버그 리í¬íŠ¸ë“¤ì˜ ì¡°ì–¸ìžê°€ ë˜ê¸¸ ì›í•œë‹¤ë©´
bugme-new ë©”ì¼ë§ 리스트나(새로운 버그 리í¬íŠ¸ë“¤ë§Œì´ ì´ê³³ì—ì„œ ë©”ì¼ë¡œ 전해진다)
bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì¼ë¡œ 전해진다)
ì— ë“±ë¡í•˜ë©´ ëœë‹¤.
https://lists.linux-foundation.org/mailman/listinfo/bugme-new
+
https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
@@ -361,10 +399,14 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
ìœ„ì˜ ëª‡ëª‡ ë¬¸ì„œë“¤ì´ ì„¤ëª…í•˜ì˜€ì§€ë§Œ 핵심 ì»¤ë„ ê°œë°œìžë“¤ì˜ 대다수는
리눅스 ì»¤ë„ ë©”ì¼ë§ ë¦¬ìŠ¤íŠ¸ì— ì°¸ì—¬í•˜ê³  있다. ë¦¬ìŠ¤íŠ¸ì— ë“±ë¡í•˜ê³  해지하는
ë°©ë²•ì— ê´€í•œ ìžì„¸í•œ ì‚¬í•­ì€ ë‹¤ìŒì—ì„œ 참조할 수 있다.
+
http://vger.kernel.org/vger-lists.html#linux-kernel
+
웹ìƒì˜ ë§Žì€ ë‹¤ë¥¸ ê³³ì—ë„ ë©”ì¼ë§ ë¦¬ìŠ¤íŠ¸ì˜ ì•„ì¹´ì´ë¸Œë“¤ì´ 있다.
ì´ëŸ¬í•œ ì•„ì¹´ì´ë¸Œë“¤ì„ 찾으려면 검색 ì—”ì§„ì„ ì‚¬ìš©í•˜ë¼. 예를 들어:
+
http://dir.gmane.org/gmane.linux.kernel
+
ì—¬ëŸ¬ë¶„ì´ ìƒˆë¡œìš´ ë¬¸ì œì— ê´€í•´ ë¦¬ìŠ¤íŠ¸ì— ì˜¬ë¦¬ê¸° ì „ì— ë§í•˜ê³  ì‹¶ì€ ì£¼ì œì— ê´€í•œ
ê²ƒì„ ì•„ì¹´ì´ë¸Œì—ì„œ 먼저 찾아보기를 강력히 권장한다. ì´ë¯¸ ìƒì„¸í•˜ê²Œ í† ë¡ ëœ ë§Žì€
ê²ƒë“¤ì´ ë©”ì¼ë§ ë¦¬ìŠ¤íŠ¸ì˜ ì•„ì¹´ì´ë¸Œì— 기ë¡ë˜ì–´ 있다.
@@ -374,11 +416,13 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
있는지는 MAINTAINERS 파ì¼ì„ 참조하ë¼.
ë§Žì€ ë¦¬ìŠ¤íŠ¸ë“¤ì€ kernel.orgì—ì„œ 호스트ë˜ê³  있다. ê·¸ ì •ë³´ë“¤ì€ ë‹¤ìŒì—ì„œ ì°¸ì¡°ë  ìˆ˜ 있다.
+
http://vger.kernel.org/vger-lists.html
ë¦¬ìŠ¤íŠ¸ë“¤ì„ ì‚¬ìš©í•  때는 올바른 ì˜ˆì ˆì„ ë”°ë¥¼ ê²ƒì„ ìœ ë…í•´ë¼.
대단하진 않지만 ë‹¤ìŒ URLì€ ë¦¬ìŠ¤íŠ¸(í˜¹ì€ ëª¨ë“  리스트)와 대화하는 몇몇 간단한
ê°€ì´ë“œë¼ì¸ì„ 가지고 있다.
+
http://www.albion.com/netiquette/
여러 ì‚¬ëžŒë“¤ì´ ì—¬ëŸ¬ë¶„ì˜ ë©”ì¼ì— ì‘답한다면 CC: 즉 수신 리스트는 꽤 커지게
@@ -391,7 +435,7 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
"John 커ë„해커는 작성했다...."를 유지하며 ì—¬ëŸ¬ë¶„ë“¤ì˜ ì˜ê²¬ì„ ê·¸ ë©”ì¼ì˜ 윗부분ì—
작성하지 ë§ê³  ê° ì¸ìš©í•œ 단ë½ë“¤ 사ì´ì— 넣어ë¼.
-ì—¬ëŸ¬ë¶„ë“¤ì´ íŒ¨ì¹˜ë“¤ì„ ë©”ì¼ì— 넣는다면 ê·¸ê²ƒë“¤ì€ Documentation/SubmittingPatchesì—
+ì—¬ëŸ¬ë¶„ë“¤ì´ íŒ¨ì¹˜ë“¤ì„ ë©”ì¼ì— 넣는다면 ê·¸ê²ƒë“¤ì€ Documentation/process/submitting-patches.rstì—
나와있는ë°ë¡œ 명백히(plain) ì½ì„ 수 있는 í…스트여야 한다. ì»¤ë„ ê°œë°œìžë“¤ì€
첨부파ì¼ì´ë‚˜ ì••ì¶•ëœ íŒ¨ì¹˜ë“¤ì„ ì›í•˜ì§€ 않는다. ê·¸ë“¤ì€ ì—¬ëŸ¬ë¶„ë“¤ì˜ íŒ¨ì¹˜ì˜
ê° ë¼ì¸ 단위로 코멘트를 하길 ì›í•˜ë©° 압축하거나 첨부하지 ì•Šê³  보내는 것ì´
@@ -405,11 +449,12 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
커뮤니티와 협력하는 법
---------------------
+----------------------
ì»¤ë„ ì»¤ë®¤ë‹ˆí‹°ì˜ ëª©ì ì€ 가능한한 가장 ì¢‹ì€ ì»¤ë„ì„ ì œê³µí•˜ëŠ” 것ì´ë‹¤. 여러분ì´
받아들여질 패치를 제출하게 ë˜ë©´ ê·¸ íŒ¨ì¹˜ì˜ ê¸°ìˆ ì ì¸ ì´ì ìœ¼ë¡œ ê²€í† ë  ê²ƒì´ë‹¤.
그럼 ì—¬ëŸ¬ë¶„ë“¤ì€ ë¬´ì—‡ì„ ê¸°ëŒ€í•˜ê³  있어야 하는가?
+
- 비íŒ
- ì˜ê²¬
- ë³€ê²½ì„ ìœ„í•œ 요구
@@ -423,6 +468,7 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
기다려보고 다시 ì‹œë„í•´ë¼. 때론 너무 ë§Žì€ ë©”ì¼ë“¤ ì†ì— ë¬»í˜€ë²„ë¦¬ê¸°ë„ í•œë‹¤.
ì—¬ëŸ¬ë¶„ì€ ë¬´ì—‡ì„ í•´ì„œëŠ” 안ë˜ëŠ”ê°€?
+
- ì—¬ëŸ¬ë¶„ì˜ íŒ¨ì¹˜ê°€ 아무 질문 ì—†ì´ ë°›ì•„ë“¤ì—¬ì§€ê¸°ë¥¼ 기대하는 것
- ë°©ì–´ì ì´ ë˜ëŠ” 것
- ì˜ê²¬ì„ 무시하는 것
@@ -443,10 +489,12 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
ì»¤ë„ ì»¤ë®¤ë‹ˆí‹°ì™€ 기업 ì¡°ì§ê°„ì˜ ì°¨ì´ì 
------------------------------------------------------------------
+------------------------------------
ì»¤ë„ ì»¤ë®¤ë‹ˆí‹°ëŠ” 가장 전통ì ì¸ íšŒì‚¬ì˜ ê°œë°œ 환경과는 다르다. ì—¬ê¸°ì— ì—¬ëŸ¬ë¶„ë“¤ì˜
문제를 피하기 위한 목ë¡ì´ 있다.
+
ì—¬ëŸ¬ë¶„ë“¤ì´ ì œì•ˆí•œ ë³€ê²½ë“¤ì— ê´€í•˜ì—¬ ë§í•  ë•Œ ì¢‹ì€ ê²ƒë“¤ :
+
- "ì´ê²ƒì€ 여러 ë¬¸ì œë“¤ì„ í•´ê²°í•©ë‹ˆë‹¤."
- "ì´ê²ƒì€ 2000 ë¼ì¸ì˜ 코드를 줄입니다."
- "ì´ê²ƒì€ ë‚´ê°€ ë§í•˜ë ¤ëŠ” ê²ƒì— ê´€í•´ 설명하는 패치입니다."
@@ -455,6 +503,7 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
- "ì´ê²ƒì€ ì¼ë°˜ì ì¸ 머신ì—ì„œ ì„±ëŠ¥ì„ í–¥ìƒí•¨ìœ¼ë¡œ..."
ì—¬ëŸ¬ë¶„ë“¤ì´ ë§í•  ë•Œ 피해야 í•  좋지 ì•Šì€ ê²ƒë“¤ :
+
- "우리는 ê·¸ê²ƒì„ AIX/ptx/Solarisì—ì„œ ì´ëŸ¬í•œ 방법으로 했다. 그러므로 ê·¸ê²ƒì€ ì¢‹ì€ ê²ƒìž„ì— í‹€ë¦¼ì—†ë‹¤..."
- "나는 20ë…„ë™ì•ˆ ì´ê²ƒì„ 해왔다. 그러므로..."
- "ì´ê²ƒì€ ëˆì„ 벌기위해 ë‚˜ì˜ íšŒì‚¬ê°€ 필요로 하는 것ì´ë‹¤."
@@ -514,14 +563,15 @@ Patì´ë¼ëŠ” ì´ë¦„ì„ ê°€ì§„ ì—¬ìžê°€ ìžˆì„ ìˆ˜ë„ ìžˆëŠ” 것ì´ë‹¤. 리눅ìŠ
간단하게(í˜¹ì€ ê°„ë‹¨í•œê²Œ 재배치하여) 하는 ê²ƒë„ ì¤‘ìš”í•˜ë‹¤.
ì—¬ê¸°ì— ì»¤ë„ ê°œë°œìž Al Viroì˜ ì´ì•¼ê¸°ê°€ 있다.
- "í•™ìƒì˜ 수학 숙제를 채ì í•˜ëŠ” ì„ ìƒë‹˜ì„ ìƒê°í•´ë³´ë¼. ì„ ìƒë‹˜ì€ í•™ìƒë“¤ì´
+
+ *"í•™ìƒì˜ 수학 숙제를 채ì í•˜ëŠ” ì„ ìƒë‹˜ì„ ìƒê°í•´ë³´ë¼. ì„ ìƒë‹˜ì€ í•™ìƒë“¤ì´
ë‹µì„ ì–»ì„때까지 ê²ªì€ ì‹œí–‰ì°©ì˜¤ë¥¼ 보길 ì›í•˜ì§€ 않는다. ì„ ìƒë‹˜ë“¤ì€
간결하고 가장 ë›°ì–´ë‚œ ë‹µì„ ë³´ê¸¸ ì›í•œë‹¤. 훌륭한 í•™ìƒì€ ì´ê²ƒì„ 알고
- 마지막으로 ë‹µì„ ì–»ê¸° ì „ 중간 ê³¼ì •ë“¤ì„ ì œì¶œí•˜ì§„ 않는다.
+ 마지막으로 ë‹µì„ ì–»ê¸° ì „ 중간 ê³¼ì •ë“¤ì„ ì œì¶œí•˜ì§„ 않는다.*
- ì»¤ë„ ê°œë°œë„ ë§ˆì°¬ê°€ì§€ì´ë‹¤. ë©”ì¸í…Œì´ë„ˆë“¤ê³¼ 검토하는 ì‚¬ëžŒë“¤ì€ ë¬¸ì œë¥¼
+ *ì»¤ë„ ê°œë°œë„ ë§ˆì°¬ê°€ì§€ì´ë‹¤. ë©”ì¸í…Œì´ë„ˆë“¤ê³¼ 검토하는 ì‚¬ëžŒë“¤ì€ ë¬¸ì œë¥¼
풀어나가는 과정ì†ì— 숨겨진 ê³¼ì •ì„ ë³´ê¸¸ ì›í•˜ì§„ 않는다. 그들ì€
- 간결하고 멋진 ë‹µì„ ë³´ê¸¸ ì›í•œë‹¤."
+ 간결하고 멋진 ë‹µì„ ë³´ê¸¸ ì›í•œë‹¤."*
커뮤니티와 협력하며 ë›°ì–´ë‚œ ë‹µì„ ì°¾ëŠ” 것과 ì—¬ëŸ¬ë¶„ë“¤ì˜ ë마치지 못한 작업들
사ì´ì— ê· í˜•ì„ ìœ ì§€í•´ì•¼ 하는 ê²ƒì€ ì–´ë ¤ìš¸ì§€ë„ ëª¨ë¥¸ë‹¤. 그러므로 프로세스ì˜
@@ -549,16 +599,17 @@ Patì´ë¼ëŠ” ì´ë¦„ì„ ê°€ì§„ ì—¬ìžê°€ ìžˆì„ ìˆ˜ë„ ìžˆëŠ” 것ì´ë‹¤. 리눅ìŠ
ìƒê°í•˜ì—¬ ì´ë©”ì¼ì„ 작성해야 한다. ì´ ì •ë³´ëŠ” 패치를 위한 ChangeLogê°€ ë 
것ì´ë‹¤. 그리고 í•­ìƒ ê·¸ ë‚´ìš©ì„ ë³´ê¸¸ ì›í•˜ëŠ” 모든 ì‚¬ëžŒë“¤ì„ ìœ„í•´ ë³´ì¡´ë 
것ì´ë‹¤. 패치는 완벽하게 다ìŒê³¼ ê°™ì€ ë‚´ìš©ë“¤ì„ í¬í•¨í•˜ì—¬ 설명해야 한다.
+
- ë³€ê²½ì´ ì™œ 필요한지
- íŒ¨ì¹˜ì— ê´€í•œ ì „ì²´ 설계 ì ‘ê·¼(approach)
- 구현 ìƒì„¸ë“¤
- 테스트 결과들
ì´ê²ƒì´ 무엇ì¸ì§€ ë” ìžì„¸í•œ ê²ƒì„ ì•Œê³  싶다면 ë‹¤ìŒ ë¬¸ì„œì˜ ChageLog í•­ì„ ë´ë¼.
- "The Perfect Patch"
- http://www.ozlabs.org/~akpm/stuff/tpp.txt
+ "The Perfect Patch"
+ http://www.ozlabs.org/~akpm/stuff/tpp.txt
ì´ ëª¨ë“  ê²ƒì„ í•˜ëŠ” ê²ƒì€ ë§¤ìš° 어려운 ì¼ì´ë‹¤. 완벽히 소화하는 ë°ëŠ” ì ì–´ë„ 몇년ì´
@@ -570,7 +621,8 @@ Patì´ë¼ëŠ” ì´ë¦„ì„ ê°€ì§„ ì—¬ìžê°€ ìžˆì„ ìˆ˜ë„ ìžˆëŠ” 것ì´ë‹¤. 리눅ìŠ
----------
-"개발 프로세스"(http://lwn.net/Articles/94386/) 섹션ì„
+
+"개발 프로세스"(https://lwn.net/Articles/94386/) 섹션ì„
ìž‘ì„±í•˜ëŠ”ë° ìžˆì–´ 참고할 문서를 사용하ë„ë¡ í—ˆë½í•´ì¤€ Paolo Ciarrocchiì—게
ê°ì‚¬í•œë‹¤. ì—¬ëŸ¬ë¶„ë“¤ì´ ë§í•´ì•¼ í•  것과 ë§í•´ì„œëŠ” 안ë˜ëŠ” ê²ƒì˜ ëª©ë¡ ì¤‘ ì¼ë¶€ë¥¼ 제공해준
Randy Dunlapê³¼ Gerrit Huizengaì—게 ê°ì‚¬í•œë‹¤. ë˜í•œ 검토와 ì˜ê²¬ 그리고
diff --git a/Documentation/translations/ko_KR/index.rst b/Documentation/translations/ko_KR/index.rst
new file mode 100644
index 000000000000..0b695345abc7
--- /dev/null
+++ b/Documentation/translations/ko_KR/index.rst
@@ -0,0 +1,12 @@
+.. raw:: latex
+
+ \renewcommand\thesection*
+ \renewcommand\thesubsection*
+
+Korean translations
+===================
+
+.. toctree::
+ :maxdepth: 1
+
+ howto
diff --git a/Documentation/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index 34d3d380893d..a3228a676cc1 100644
--- a/Documentation/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -823,6 +823,38 @@ CPU 는 b ë¡œë¶€í„°ì˜ ë¡œë“œ 오í¼ë ˆì´ì…˜ì´ a ë¡œë¶€í„°ì˜ ë¡œë“œ 오í¼ë ˆ
오í¼ë ˆì´ì…˜ì„ 위한 코드를 ì •ë§ë¡œ 만들ë„ë¡ í•˜ì§€ë§Œ, 컴파ì¼ëŸ¬ê°€ 그렇게 만들어진
ì½”ë“œì˜ ìˆ˜í–‰ 결과를 사용하ë„ë¡ ê°•ì œí•˜ì§€ëŠ” 않습니다.
+ë˜í•œ, 컨트롤 ì˜ì¡´ì„±ì€ if ë¬¸ì˜ then 절과 else ì ˆì— ëŒ€í•´ì„œë§Œ ì ìš©ë©ë‹ˆë‹¤. ìƒì„¸ížˆ
+ë§í•´ì„œ, 컨트롤 ì˜ì¡´ì„±ì€ if ë¬¸ì„ ë’¤ë”°ë¥´ëŠ” 코드ì—는 ì ìš©ë˜ì§€ 않습니다:
+
+ q = READ_ONCE(a);
+ if (q) {
+ WRITE_ONCE(b, p);
+ } else {
+ WRITE_ONCE(b, r);
+ }
+ WRITE_ONCE(c, 1); /* BUG: No ordering against the read from "a". */
+
+컴파ì¼ëŸ¬ëŠ” volatile íƒ€ìž…ì— ëŒ€í•œ 액세스를 재배치 í•  수 없고 ì´ ì¡°ê±´ í•˜ì˜ "b"
+ë¡œì˜ ì“°ê¸°ë¥¼ 재배치 í•  수 없기 ë•Œë¬¸ì— ì—¬ê¸°ì— ìˆœì„œ ê·œì¹™ì´ ì¡´ìž¬í•œë‹¤ê³  주장하고
+ì‹¶ì„ ê²ë‹ˆë‹¤. ë¶ˆí–‰ížˆë„ ì´ ê²½ìš°ì—, 컴파ì¼ëŸ¬ëŠ” 다ìŒì˜ ê°€ìƒì˜ pseudo-assembly 언어
+코드처럼 "b" ë¡œì˜ ë‘ê°œì˜ ì“°ê¸° 오í¼ë ˆì´ì…˜ì„ conditional-move ì¸ìŠ¤íŠ¸ëŸ­ì…˜ìœ¼ë¡œ
+번역할 수 있습니다:
+
+ ld r1,a
+ ld r2,p
+ ld r3,r
+ cmp r1,$0
+ cmov,ne r4,r2
+ cmov,eq r4,r3
+ st r4,b
+ st $1,c
+
+ì™„í™”ëœ ìˆœì„œ ê·œì¹™ì˜ CPU 는 "a" ë¡œë¶€í„°ì˜ ë¡œë“œì™€ "c" ë¡œì˜ ìŠ¤í† ì–´ 사ì´ì— ì–´ë–¤
+ì¢…ë¥˜ì˜ ì˜ì¡´ì„±ë„ 갖지 ì•Šì„ ê²ë‹ˆë‹¤. ì´ ì»¨íŠ¸ë¡¤ ì˜ì¡´ì„±ì€ ë‘ê°œì˜ cmov ì¸ìŠ¤íŠ¸ëŸ­ì…˜ê³¼
+ê±°ê¸°ì— ì˜ì¡´í•˜ëŠ” 스토어 ì—게만 ì ìš©ë  ê²ë‹ˆë‹¤. 짧게 ë§í•˜ìžë©´, 컨트롤 ì˜ì¡´ì„±ì€
+주어진 if ë¬¸ì˜ then 절과 else ì ˆì—게만 (그리고 ì´ ë‘ ì ˆ ë‚´ì—ì„œ 호출ë˜ëŠ”
+함수들ì—게까지) ì ìš©ë˜ì§€, ì´ if ë¬¸ì„ ë’¤ë”°ë¥´ëŠ” 코드ì—는 ì ìš©ë˜ì§€ 않습니다.
+
마지막으로, 컨트롤 ì˜ì¡´ì„±ì€ ì´í–‰ì„± (transitivity) ì„ ì œê³µí•˜ì§€ -않습니다-. ì´ê±´
x 와 y ê°€ 둘 다 0 ì´ë¼ëŠ” ì´ˆê¸°ê°’ì„ ê°€ì¡Œë‹¤ëŠ” 가정 í•˜ì˜ ë‘ê°œì˜ ì˜ˆì œë¡œ
ë³´ì´ê² ìŠµë‹ˆë‹¤:
@@ -883,6 +915,10 @@ http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf 와
ì˜ì¡´ì„±ì´ 사ë¼ì§€ì§€ 않게 í•˜ëŠ”ë° ë„ì›€ì„ ì¤„ 수 있습니다. ë” ë§Žì€ ì •ë³´ë¥¼
위해선 "컴파ì¼ëŸ¬ 배리어" ì„¹ì…˜ì„ ì°¸ê³ í•˜ì‹œê¸° ë°”ëžë‹ˆë‹¤.
+ (*) 컨트롤 ì˜ì¡´ì„±ì€ 컨트롤 ì˜ì¡´ì„±ì„ 갖는 if ë¬¸ì˜ then 절과 else 절과 ì´ ë‘ ì ˆ
+ ë‚´ì—ì„œ 호출ë˜ëŠ” 함수들ì—만 ì ìš©ë©ë‹ˆë‹¤. 컨트롤 ì˜ì¡´ì„±ì€ 컨트롤 ì˜ì¡´ì„±ì„
+ 갖는 if ë¬¸ì„ ë’¤ë”°ë¥´ëŠ” 코드ì—는 ì ìš©ë˜ì§€ -않습니다-.
+
(*) 컨트롤 ì˜ì¡´ì„±ì€ 보통 다른 íƒ€ìž…ì˜ ë°°ë¦¬ì–´ë“¤ê³¼ ì§ì„ 맞춰 사용ë©ë‹ˆë‹¤.
(*) 컨트롤 ì˜ì¡´ì„±ì€ ì´í–‰ì„±ì„ 제공하지 -않습니다-. ì´í–‰ì„±ì´ 필요하다면,
diff --git a/Documentation/ko_KR/stable_api_nonsense.txt b/Documentation/translations/ko_KR/stable_api_nonsense.txt
index 3ba10b11d556..4d93af1efd61 100644
--- a/Documentation/ko_KR/stable_api_nonsense.txt
+++ b/Documentation/translations/ko_KR/stable_api_nonsense.txt
@@ -1,5 +1,5 @@
NOTE:
-This is a version of Documentation/stable_api_nonsense.txt translated
+This is a version of Documentation/process/stable-api-nonsense.rst translated
into korean
This document is maintained by Minchan Kim <minchan@kernel.org>
If you find any difference between this document and the original file or
@@ -12,7 +12,7 @@ try to update the original English file first.
==================================
ì´ ë¬¸ì„œëŠ”
-Documentation/stable_api_nonsense.txt
+Documentation/process/stable-api-nonsense.rst
ì˜ í•œê¸€ 번역입니다.
ì—­ìžï¼š 김민찬 <minchan@kernel.org>
diff --git a/Documentation/zh_CN/CodingStyle b/Documentation/translations/zh_CN/CodingStyle
index 12717791baac..dc101f48e713 100644
--- a/Documentation/zh_CN/CodingStyle
+++ b/Documentation/translations/zh_CN/CodingStyle
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/CodingStyle
+Chinese translated version of Documentation/process/coding-style.rst
If you have any comment or update to the content, please post to LKML directly.
However, if you have problem communicating in English you can also ask the
@@ -7,7 +7,7 @@ translation is outdated or there is problem with translation.
Chinese maintainer: Zhang Le <r0bertz@gentoo.org>
---------------------------------------------------------------------
-Documentation/CodingStyle的中文翻译
+Documentation/process/coding-style.rst的中文翻译
如果想评论或更新本文的内容,请直接å‘信到LKML。如果你使用英文交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯
以å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻译存在问题,请è”系中文版维护者。
@@ -399,7 +399,7 @@ C是一个简朴的语言,你的命å也应该这样。和 Modula-2 å’Œ Pasca
些事情的原因。
当注释内核API函数时,请使用 kernel-doc æ ¼å¼ã€‚请看
-Documentation/kernel-documentation.rstå’Œscripts/kernel-doc 以获得详细信æ¯ã€‚
+Documentation/doc-guide/å’Œscripts/kernel-doc 以获得详细信æ¯ã€‚
Linux的注释风格是 C89 “/* ... */†风格。ä¸è¦ä½¿ç”¨ C99 风格 “// ...†注释。
@@ -809,5 +809,5 @@ GNU 手册 - éµå¾ª K&R 标准和此文本 - cpp, gcc, gcc internals and indent,
WG14是C语言的国际标准化工作组,URL: http://www.open-std.org/JTC1/SC22/WG14/
-Kernel CodingStyle,作者 greg@kroah.com å‘表于OLS 2002:
+Kernel process/coding-style.rst,作者 greg@kroah.com å‘表于OLS 2002:
http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
diff --git a/Documentation/zh_CN/HOWTO b/Documentation/translations/zh_CN/HOWTO
index f0613b92e0be..11be075ba5fa 100644
--- a/Documentation/zh_CN/HOWTO
+++ b/Documentation/translations/zh_CN/HOWTO
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/HOWTO
+Chinese translated version of Documentation/process/howto.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -9,7 +9,7 @@ or if there is a problem with the translation.
Maintainer: Greg Kroah-Hartman <greg@kroah.com>
Chinese maintainer: Li Yang <leoli@freescale.com>
---------------------------------------------------------------------
-Documentation/HOWTO 的中文翻译
+Documentation/process/howto.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
@@ -93,16 +93,16 @@ Linux内核代ç ä¸­åŒ…å«æœ‰å¤§é‡çš„文档。这些文档对于学习如何与
文件简è¦ä»‹ç»äº†Linux内核的背景,并且æ述了如何é…置和编译内核。内核的
新用户应该从这里开始。
- Documentation/Changes
+ Documentation/process/changes.rst
文件给出了用æ¥ç¼–译和使用内核所需è¦çš„最å°è½¯ä»¶åŒ…列表。
- Documentation/CodingStyle
+ Documentation/process/coding-style.rst
æè¿°Linux内核的代ç é£Žæ ¼å’Œç†ç”±ã€‚所有新代ç éœ€è¦éµå®ˆè¿™ç¯‡æ–‡æ¡£ä¸­å®šä¹‰çš„规
范。大多数维护者åªä¼šæŽ¥æ”¶ç¬¦åˆè§„定的补ä¸ï¼Œå¾ˆå¤šäººä¹Ÿåªä¼šå¸®å¿™æ£€æŸ¥ç¬¦åˆé£Žæ ¼
的代ç ã€‚
- Documentation/SubmittingPatches
- Documentation/SubmittingDrivers
+ Documentation/process/submitting-patches.rst
+ Documentation/process/submitting-drivers.rst
这两份文档明确æ述如何创建和å‘é€è¡¥ä¸ï¼Œå…¶ä¸­åŒ…括(但ä¸ä»…é™äºŽ):
- 邮件内容
- 邮件格å¼
@@ -116,7 +116,7 @@ Linux内核代ç ä¸­åŒ…å«æœ‰å¤§é‡çš„文档。这些文档对于学习如何与
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
- Documentation/stable_api_nonsense.txt
+ Documentation/process/stable-api-nonsense.rst
论è¯å†…核为什么特æ„ä¸åŒ…括稳定的内核内部API,也就是说ä¸åŒ…括åƒè¿™æ ·çš„特
性:
- å­ç³»ç»Ÿä¸­é—´å±‚(为了兼容性?)
@@ -125,23 +125,23 @@ Linux内核代ç ä¸­åŒ…å«æœ‰å¤§é‡çš„文档。这些文档对于学习如何与
这篇文档对于ç†è§£Linuxçš„å¼€å‘哲学至关é‡è¦ã€‚对于将开å‘å¹³å°ä»Žå…¶ä»–æ“作系
统转移到Linux的人æ¥è¯´ä¹Ÿå¾ˆé‡è¦ã€‚
- Documentation/SecurityBugs
+ Documentation/admin-guide/security-bugs.rst
如果你认为自己å‘现了Linux内核的安全性问题,请根æ®è¿™ç¯‡æ–‡æ¡£ä¸­çš„步骤æ¥
æ醒其他内核开å‘者并帮助解决这个问题。
- Documentation/ManagementStyle
+ Documentation/process/management-style.rst
æ述内核维护者的工作方法åŠå…¶å…±æœ‰ç‰¹ç‚¹ã€‚这对于刚刚接触内核开å‘(或者对
它感到好奇)的人æ¥è¯´å¾ˆé‡è¦ï¼Œå› ä¸ºå®ƒè§£é‡Šäº†å¾ˆå¤šå¯¹äºŽå†…核维护者独特行为的
æ™®é误解与迷惑。
- Documentation/stable_kernel_rules.txt
+ Documentation/process/stable-kernel-rules.rst
解释了稳定版内核å‘布的规则,以åŠå¦‚何将改动放入这些版本的步骤。
- Documentation/kernel-docs.txt
+ Documentation/process/kernel-docs.rst
有助于内核开å‘的外部文档列表。如果你在内核自带的文档中没有找到你想找
的内容,å¯ä»¥æŸ¥çœ‹è¿™äº›æ–‡æ¡£ã€‚
- Documentation/applying-patches.txt
+ Documentation/process/applying-patches.rst
关于补ä¸æ˜¯ä»€ä¹ˆä»¥åŠå¦‚何将它打在ä¸åŒå†…核开å‘分支上的好介ç»
内核还拥有大é‡ä»Žä»£ç è‡ªåŠ¨ç”Ÿæˆçš„文档。它包å«å†…核内部APIçš„å…¨é¢ä»‹ç»ä»¥åŠå¦‚何
@@ -238,7 +238,7 @@ kernel.org网站的pub/linux/kernel/v2.6/目录下找到它。它的开å‘éµå¾ª
2.6.x.y版本由“稳定版â€å°ç»„(邮件地å€<stable@vger.kernel.org>)维护,一般隔周å‘
布新版本。
-内核æºç ä¸­çš„Documentation/stable_kernel_rules.txt文件具体æ述了å¯è¢«ç¨³å®š
+内核æºç ä¸­çš„Documentation/process/stable-kernel-rules.rst文件具体æ述了å¯è¢«ç¨³å®š
版内核接å—的修改类型以åŠå‘布的æµç¨‹ã€‚
@@ -329,7 +329,7 @@ bugzilla.kernel.org是Linux内核开å‘者们用æ¥è·Ÿè¸ªå†…æ ¸Bug的网站。æˆ
户在这个工具中报告找到的所有bug。如何使用内核bugzilla的细节请访问:
http://test.kernel.org/bugzilla/faq.html
-内核æºç ä¸»ç›®å½•ä¸­çš„REPORTING-BUGS文件里有一个很好的模æ¿ã€‚它指导用户如何报
+内核æºç ä¸»ç›®å½•ä¸­çš„admin-guide/reporting-bugs.rst文件里有一个很好的模æ¿ã€‚它指导用户如何报
å‘Šå¯èƒ½çš„内核bug以åŠéœ€è¦æ供哪些信æ¯æ¥å¸®åŠ©å†…核开å‘者们找到问题的根æºã€‚
@@ -380,7 +380,7 @@ MAINTAINERS文件中å¯ä»¥æ‰¾åˆ°ä¸åŒè¯é¢˜å¯¹åº”的邮件列表。
这几行。将你的评论加在被引用的段è½ä¹‹é—´è€Œä¸è¦æ”¾åœ¨é‚®ä»¶çš„顶部。
如果你在邮件中附带补ä¸ï¼Œè¯·ç¡®è®¤å®ƒä»¬æ˜¯å¯ä»¥ç›´æŽ¥é˜…读的纯文本(如
-Documentation/SubmittingPatches文档中所述)。内核开å‘者们ä¸å¸Œæœ›é‡åˆ°é™„件
+Documentation/process/submitting-patches.rst文档中所述)。内核开å‘者们ä¸å¸Œæœ›é‡åˆ°é™„件
或者被压缩了的补ä¸ã€‚åªæœ‰è¿™æ ·æ‰èƒ½ä¿è¯ä»–们å¯ä»¥ç›´æŽ¥è¯„论你的æ¯è¡Œä»£ç ã€‚请确ä¿
你使用的邮件å‘é€ç¨‹åºä¸ä¼šä¿®æ”¹ç©ºæ ¼å’Œåˆ¶è¡¨ç¬¦ã€‚一个防范性的测试方法是先将邮件
å‘é€ç»™è‡ªå·±ï¼Œç„¶åŽè‡ªå·±å°è¯•æ˜¯å¦å¯ä»¥é¡ºåˆ©åœ°æ‰“上收到的补ä¸ã€‚如果测试ä¸æˆåŠŸï¼Œè¯·
diff --git a/Documentation/zh_CN/IRQ.txt b/Documentation/translations/zh_CN/IRQ.txt
index 956026d5cf82..956026d5cf82 100644
--- a/Documentation/zh_CN/IRQ.txt
+++ b/Documentation/translations/zh_CN/IRQ.txt
diff --git a/Documentation/zh_CN/SecurityBugs b/Documentation/translations/zh_CN/SecurityBugs
index d21eb07fe943..2d0fffd122ce 100644
--- a/Documentation/zh_CN/SecurityBugs
+++ b/Documentation/translations/zh_CN/SecurityBugs
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/SecurityBugs
+Chinese translated version of Documentation/admin-guide/security-bugs.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -8,7 +8,7 @@ or if there is a problem with the translation.
Chinese maintainer: Harry Wei <harryxiyou@gmail.com>
---------------------------------------------------------------------
-Documentation/SecurityBugs 的中文翻译
+Documentation/admin-guide/security-bugs.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
@@ -31,7 +31,7 @@ linux内核安全团队å¯ä»¥é€šè¿‡email<security@kernel.org>æ¥è”系。这是
一组独立的安全工作人员,å¯ä»¥å¸®åŠ©æ”¹å–„æ¼æ´žæŠ¥å‘Šå¹¶ä¸”公布和å–消一个修å¤ã€‚安
全团队有å¯èƒ½ä¼šä»Žéƒ¨åˆ†çš„维护者那里引进é¢å¤–的帮助æ¥äº†è§£å¹¶ä¸”ä¿®å¤å®‰å…¨æ¼æ´žã€‚
当é‡åˆ°ä»»ä½•æ¼æ´žï¼Œæ‰€èƒ½æ供的信æ¯è¶Šå¤šå°±è¶Šèƒ½è¯Šæ–­å’Œä¿®å¤ã€‚如果你ä¸æ¸…楚什么
-是有帮助的信æ¯ï¼Œé‚£å°±è¯·é‡æ¸©ä¸€ä¸‹REPORTING-BUGS文件中的概述过程。任
+是有帮助的信æ¯ï¼Œé‚£å°±è¯·é‡æ¸©ä¸€ä¸‹admin-guide/reporting-bugs.rst文件中的概述过程。任
何攻击性的代ç éƒ½æ˜¯éžå¸¸æœ‰ç”¨çš„,未ç»æŠ¥å‘Šè€…çš„åŒæ„ä¸ä¼šè¢«å–消,除éžå®ƒå·²ç»
被公布于众。
diff --git a/Documentation/zh_CN/SubmittingDrivers b/Documentation/translations/zh_CN/SubmittingDrivers
index d313f5d8448d..929385e4b194 100644
--- a/Documentation/zh_CN/SubmittingDrivers
+++ b/Documentation/translations/zh_CN/SubmittingDrivers
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/SubmittingDrivers
+Chinese translated version of Documentation/process/submitting-drivers.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -8,7 +8,7 @@ or if there is a problem with the translation.
Chinese maintainer: Li Yang <leo@zh-kernel.org>
---------------------------------------------------------------------
-Documentation/SubmittingDrivers 的中文翻译
+Documentation/process/submitting-drivers.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
@@ -30,7 +30,7 @@ Documentation/SubmittingDrivers 的中文翻译
兴趣的是显å¡é©±åŠ¨ç¨‹åºï¼Œä½ ä¹Ÿè®¸åº”该访问 XFree86 项目(http://www.xfree86.org/)
å’Œï¼æˆ– X.org 项目 (http://x.org)。
-å¦è¯·å‚阅 Documentation/SubmittingPatches 文档。
+å¦è¯·å‚阅 Documentation/process/submitting-patches.rst 文档。
分é…设备å·
@@ -39,7 +39,7 @@ Documentation/SubmittingDrivers 的中文翻译
å—设备和字符设备的主设备å·ä¸Žä»Žè®¾å¤‡å·æ˜¯ç”± Linux 命åç¼–å·åˆ†é…æƒå¨ LANANA(
现在是 Torben Mathiasen)负责分é…。申请的网å€æ˜¯ http://www.lanana.org/。
å³ä½¿ä¸å‡†å¤‡æ交到主æµå†…核的设备驱动也需è¦åœ¨è¿™é‡Œåˆ†é…设备å·ã€‚有关详细信æ¯ï¼Œ
-请å‚阅 Documentation/devices.txt。
+请å‚阅 Documentation/admin-guide/devices.rst。
如果你使用的ä¸æ˜¯å·²ç»åˆ†é…的设备å·ï¼Œé‚£ä¹ˆå½“ä½ æ交设备驱动的时候,它将会被强
制分é…一个新的设备å·ï¼Œå³ä¾¿è¿™ä¸ªè®¾å¤‡å·å’Œä½ ä¹‹å‰å‘给客户的截然ä¸åŒã€‚
@@ -81,7 +81,7 @@ Linux 2.6:
如果你需è¦ä¸€ä¸ª Linux å’Œ NT 的通用驱动接å£ï¼Œé‚£ä¹ˆè¯·åœ¨ç”¨
户空间实现它。
-代ç ï¼š 请使用 Documentation/CodingStyle 中所æè¿°çš„ Linux 代ç é£Ž
+代ç ï¼š 请使用 Documentation/process/coding-style.rst 中所æè¿°çš„ Linux 代ç é£Ž
格。如果你的æŸäº›ä»£ç æ®µï¼ˆä¾‹å¦‚那些与 Windows 驱动程åºåŒ…å…±
享的代ç æ®µï¼‰éœ€è¦ä½¿ç”¨å…¶ä»–æ ¼å¼ï¼Œè€Œä½ å´åªå¸Œæœ›ç»´æŠ¤ä¸€ä»½ä»£ç ï¼Œ
那么请将它们很好地区分出æ¥ï¼Œå¹¶ä¸”注明原因。
@@ -107,7 +107,7 @@ Linux 2.6:
程åºæµ‹è¯•çš„指导,请å‚阅
Documentation/power/drivers-testing.txt。有关驱动程åºç”µ
æºç®¡ç†é—®é¢˜ç›¸å¯¹å…¨é¢çš„概述,请å‚阅
- Documentation/power/devices.txt。
+ Documentation/power/admin-guide/devices.rst。
管ç†ï¼š 如果一个驱动程åºçš„作者还在进行有效的维护,那么通常除了那
些明显正确且ä¸éœ€è¦ä»»ä½•æ£€æŸ¥çš„è¡¥ä¸ä»¥å¤–,其他所有的补ä¸éƒ½ä¼š
diff --git a/Documentation/zh_CN/SubmittingPatches b/Documentation/translations/zh_CN/SubmittingPatches
index 1d3a10f8746b..e9098da8f1a4 100644
--- a/Documentation/zh_CN/SubmittingPatches
+++ b/Documentation/translations/zh_CN/SubmittingPatches
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/SubmittingPatches
+Chinese translated version of Documentation/process/submitting-patches.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -8,7 +8,7 @@ or if there is a problem with the translation.
Chinese maintainer: TripleX Chung <triplex@zh-kernel.org>
---------------------------------------------------------------------
-Documentation/SubmittingPatches 的中文翻译
+Documentation/process/submitting-patches.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
@@ -30,9 +30,9 @@ Documentation/SubmittingPatches 的中文翻译
对于想è¦å°†æ”¹åŠ¨æ交到 Linux 内核的个人或者公å¸æ¥è¯´ï¼Œå¦‚æžœä¸ç†Ÿæ‚‰â€œè§„矩â€ï¼Œ
æ交的æµç¨‹ä¼šè®©äººç•æƒ§ã€‚本文档收集了一系列建议,这些建议å¯ä»¥å¤§å¤§çš„æ高你
的改动被接å—的机会。
-阅读 Documentation/SubmitChecklist æ¥èŽ·å¾—在æ交代ç å‰éœ€è¦æ£€æŸ¥çš„项目的列
+阅读 Documentation/process/submit-checklist.rst æ¥èŽ·å¾—在æ交代ç å‰éœ€è¦æ£€æŸ¥çš„项目的列
表。如果你在æ交一个驱动程åºï¼Œé‚£ä¹ˆåŒæ—¶é˜…读一下
-Documentation/SubmittingDrivers 。
+Documentation/process/submitting-drivers.rst 。
--------------------------
@@ -338,7 +338,7 @@ e-mail 标题中的“一å¥è¯æ¦‚è¿°â€æ‰¼è¦çš„æè¿° e-mail 中的补ä¸ã€‚â€
本节包å«å¾ˆå¤šå’Œæ交到内核的代ç æœ‰å…³çš„通常的"规则"。事情永远有例外...但是
你必须真的有好的ç†ç”±è¿™æ ·åšã€‚ä½ å¯ä»¥æŠŠæœ¬èŠ‚å«åšLinus的计算机科学入门课。
-1) 读 Document/CodingStyle
+1) 读 Document/process/coding-style.rst
Nuff 说过,如果你的代ç å’Œè¿™ä¸ªå离太多,那么它有å¯èƒ½ä¼šè¢«æ‹’ç»ï¼Œæ²¡æœ‰æ›´å¤šçš„
审查,没有更多的评价。
@@ -404,8 +404,8 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
<https://lkml.org/lkml/2005/7/11/336>
-Kernel Documentation/CodingStyle:
- <http://sosdg.org/~coywolf/lxr/source/Documentation/CodingStyle>
+Kernel Documentation/process/coding-style.rst:
+ <http://sosdg.org/~coywolf/lxr/source/Documentation/process/coding-style.rst>
Linus Torvalds's mail on the canonical patch format:
<http://lkml.org/lkml/2005/4/7/183>
diff --git a/Documentation/zh_CN/arm/Booting b/Documentation/translations/zh_CN/arm/Booting
index 6158a64df80c..1fe866f8218f 100644
--- a/Documentation/zh_CN/arm/Booting
+++ b/Documentation/translations/zh_CN/arm/Booting
@@ -68,7 +68,7 @@ RAM,或å¯èƒ½ä½¿ç”¨å¯¹è¿™ä¸ªè®¾å¤‡å·²çŸ¥çš„ RAM ä¿¡æ¯ï¼Œè¿˜å¯èƒ½ä½¿ç”¨ä»»ä½•
作为替代方案,引导加载程åºä¹Ÿå¯ä»¥é€šè¿‡æ ‡ç­¾åˆ—表传递相关的'console='
选项给内核以指定æŸä¸ªä¸²å£ï¼Œè€Œä¸²å£æ•°æ®æ ¼å¼çš„选项在以下文档中æ述:
- Documentation/kernel-parameters.txt。
+ Documentation/admin-guide/kernel-parameters.rst。
3ã€æ£€æµ‹æœºå™¨ç±»åž‹
diff --git a/Documentation/zh_CN/arm/kernel_user_helpers.txt b/Documentation/translations/zh_CN/arm/kernel_user_helpers.txt
index cd7fc8f34cf9..cd7fc8f34cf9 100644
--- a/Documentation/zh_CN/arm/kernel_user_helpers.txt
+++ b/Documentation/translations/zh_CN/arm/kernel_user_helpers.txt
diff --git a/Documentation/zh_CN/arm64/booting.txt b/Documentation/translations/zh_CN/arm64/booting.txt
index c1dd968c5ee9..c1dd968c5ee9 100644
--- a/Documentation/zh_CN/arm64/booting.txt
+++ b/Documentation/translations/zh_CN/arm64/booting.txt
diff --git a/Documentation/zh_CN/arm64/legacy_instructions.txt b/Documentation/translations/zh_CN/arm64/legacy_instructions.txt
index 68362a1ab717..68362a1ab717 100644
--- a/Documentation/zh_CN/arm64/legacy_instructions.txt
+++ b/Documentation/translations/zh_CN/arm64/legacy_instructions.txt
diff --git a/Documentation/zh_CN/arm64/memory.txt b/Documentation/translations/zh_CN/arm64/memory.txt
index 19b3a52d5d94..19b3a52d5d94 100644
--- a/Documentation/zh_CN/arm64/memory.txt
+++ b/Documentation/translations/zh_CN/arm64/memory.txt
diff --git a/Documentation/zh_CN/arm64/silicon-errata.txt b/Documentation/translations/zh_CN/arm64/silicon-errata.txt
index 39477c75c4a4..39477c75c4a4 100644
--- a/Documentation/zh_CN/arm64/silicon-errata.txt
+++ b/Documentation/translations/zh_CN/arm64/silicon-errata.txt
diff --git a/Documentation/zh_CN/arm64/tagged-pointers.txt b/Documentation/translations/zh_CN/arm64/tagged-pointers.txt
index 2664d1bd5a1c..2664d1bd5a1c 100644
--- a/Documentation/zh_CN/arm64/tagged-pointers.txt
+++ b/Documentation/translations/zh_CN/arm64/tagged-pointers.txt
diff --git a/Documentation/zh_CN/basic_profiling.txt b/Documentation/translations/zh_CN/basic_profiling.txt
index 1e6bf0bdf8f5..1e6bf0bdf8f5 100644
--- a/Documentation/zh_CN/basic_profiling.txt
+++ b/Documentation/translations/zh_CN/basic_profiling.txt
diff --git a/Documentation/zh_CN/email-clients.txt b/Documentation/translations/zh_CN/email-clients.txt
index b9a1a3e6c78d..ec31d97e8d0e 100644
--- a/Documentation/zh_CN/email-clients.txt
+++ b/Documentation/translations/zh_CN/email-clients.txt
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/email-clients.txt
+Chinese translated version of Documentation/process/email-clients.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -8,7 +8,7 @@ or if there is a problem with the translation.
Chinese maintainer: Harry Wei <harryxiyou@gmail.com>
---------------------------------------------------------------------
-Documentation/email-clients.txt 的中文翻译
+Documentation/process/email-clients.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
diff --git a/Documentation/zh_CN/filesystems/sysfs.txt b/Documentation/translations/zh_CN/filesystems/sysfs.txt
index 7d3b05edb8ce..7d3b05edb8ce 100644
--- a/Documentation/zh_CN/filesystems/sysfs.txt
+++ b/Documentation/translations/zh_CN/filesystems/sysfs.txt
diff --git a/Documentation/zh_CN/gpio.txt b/Documentation/translations/zh_CN/gpio.txt
index bce972521065..bce972521065 100644
--- a/Documentation/zh_CN/gpio.txt
+++ b/Documentation/translations/zh_CN/gpio.txt
diff --git a/Documentation/zh_CN/io_ordering.txt b/Documentation/translations/zh_CN/io_ordering.txt
index e592daf4e014..e592daf4e014 100644
--- a/Documentation/zh_CN/io_ordering.txt
+++ b/Documentation/translations/zh_CN/io_ordering.txt
diff --git a/Documentation/zh_CN/magic-number.txt b/Documentation/translations/zh_CN/magic-number.txt
index e9db693c0a23..e9db693c0a23 100644
--- a/Documentation/zh_CN/magic-number.txt
+++ b/Documentation/translations/zh_CN/magic-number.txt
diff --git a/Documentation/zh_CN/oops-tracing.txt b/Documentation/translations/zh_CN/oops-tracing.txt
index 9312608ffb8d..41ab53cc0e83 100644
--- a/Documentation/zh_CN/oops-tracing.txt
+++ b/Documentation/translations/zh_CN/oops-tracing.txt
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/oops-tracing.txt
+Chinese translated version of Documentation/admin-guide/oops-tracing.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -8,7 +8,7 @@ or if there is a problem with the translation.
Chinese maintainer: Dave Young <hidave.darkstar@gmail.com>
---------------------------------------------------------------------
-Documentation/oops-tracing.txt 的中文翻译
+Documentation/admin-guide/oops-tracing.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
@@ -50,7 +50,7 @@ cat /proc/kmsg > file, 然而你必须介入中止传输, kmsg是一个“æ°
æ¯æ»šåŠ¨åˆ°äº†ç»ˆç«¯çš„上é¢ï¼Œä½ ä¼šå‘现以高分辩率å¯åŠ¨ï¼ˆæ¯”如,vga=791)会让你读到更多的文
本。(注æ„:这需è¦vesafb,所以对‘早期’的oops没有帮助)
-(2)用串å£ç»ˆç«¯å¯åŠ¨ï¼ˆè¯·å‚看Documentation/serial-console.txt),è¿è¡Œä¸€ä¸ªnull
+(2)用串å£ç»ˆç«¯å¯åŠ¨ï¼ˆè¯·å‚看Documentation/admin-guide/serial-console.rst),è¿è¡Œä¸€ä¸ªnull
modem到å¦ä¸€å°æœºå™¨å¹¶ç”¨ä½ å–œæ¬¢çš„通讯工具获å–输出。Minicom工作地很好。
(3)使用Kdump(请å‚看Documentation/kdump/kdump.txt),
diff --git a/Documentation/zh_CN/sparse.txt b/Documentation/translations/zh_CN/sparse.txt
index cc144e581515..cc144e581515 100644
--- a/Documentation/zh_CN/sparse.txt
+++ b/Documentation/translations/zh_CN/sparse.txt
diff --git a/Documentation/zh_CN/stable_api_nonsense.txt b/Documentation/translations/zh_CN/stable_api_nonsense.txt
index c26a27d1ee7d..a2b27fab382c 100644
--- a/Documentation/zh_CN/stable_api_nonsense.txt
+++ b/Documentation/translations/zh_CN/stable_api_nonsense.txt
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/stable_api_nonsense.txt
+Chinese translated version of Documentation/process/stable-api-nonsense.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have problem
@@ -9,7 +9,7 @@ is problem with translation.
Maintainer: Greg Kroah-Hartman <greg@kroah.com>
Chinese maintainer: TripleX Chung <zhongyu@18mail.cn>
---------------------------------------------------------------------
-Documentation/stable_api_nonsense.txt 的中文翻译
+Documentation/process/stable-api-nonsense.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
diff --git a/Documentation/zh_CN/stable_kernel_rules.txt b/Documentation/translations/zh_CN/stable_kernel_rules.txt
index 26ea5ed7cd9c..db4ba5a0c39a 100644
--- a/Documentation/zh_CN/stable_kernel_rules.txt
+++ b/Documentation/translations/zh_CN/stable_kernel_rules.txt
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/stable_kernel_rules.txt
+Chinese translated version of Documentation/process/stable-kernel-rules.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -8,7 +8,7 @@ or if there is a problem with the translation.
Chinese maintainer: TripleX Chung <triplex@zh-kernel.org>
---------------------------------------------------------------------
-Documentation/stable_kernel_rules.txt 的中文翻译
+Documentation/process/stable-kernel-rules.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
@@ -38,7 +38,7 @@ Documentation/stable_kernel_rules.txt 的中文翻译
- 没有“ç†è®ºä¸Šçš„竞争æ¡ä»¶â€ï¼Œé™¤éžèƒ½ç»™å‡ºç«žäº‰æ¡ä»¶å¦‚何被利用的解释。
- ä¸èƒ½å­˜åœ¨ä»»ä½•çš„“ç碎的â€ä¿®æ­£ï¼ˆæ‹¼å†™ä¿®æ­£ï¼ŒåŽ»æŽ‰å¤šä½™ç©ºæ ¼ä¹‹ç±»çš„)。
- 必须被相关å­ç³»ç»Ÿçš„维护者接å—。
- - å¿…é¡»éµå¾ªDocumentation/SubmittingPatches里的规则。
+ - å¿…é¡»éµå¾ªDocumentation/process/submitting-patches.rst里的规则。
å‘稳定版代ç æ ‘æ交补ä¸çš„过程:
diff --git a/Documentation/zh_CN/video4linux/omap3isp.txt b/Documentation/translations/zh_CN/video4linux/omap3isp.txt
index 67ffbf352ae0..67ffbf352ae0 100644
--- a/Documentation/zh_CN/video4linux/omap3isp.txt
+++ b/Documentation/translations/zh_CN/video4linux/omap3isp.txt
diff --git a/Documentation/zh_CN/video4linux/v4l2-framework.txt b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
index 698660b7f21f..698660b7f21f 100644
--- a/Documentation/zh_CN/video4linux/v4l2-framework.txt
+++ b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
diff --git a/Documentation/zh_CN/volatile-considered-harmful.txt b/Documentation/translations/zh_CN/volatile-considered-harmful.txt
index ba8149d2233a..475125967197 100644
--- a/Documentation/zh_CN/volatile-considered-harmful.txt
+++ b/Documentation/translations/zh_CN/volatile-considered-harmful.txt
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/volatile-considered-harmful.txt
+Chinese translated version of Documentation/process/volatile-considered-harmful.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -9,7 +9,7 @@ or if there is a problem with the translation.
Maintainer: Jonathan Corbet <corbet@lwn.net>
Chinese maintainer: Bryan Wu <bryan.wu@analog.com>
---------------------------------------------------------------------
-Documentation/volatile-considered-harmful.txt 的中文翻译
+Documentation/process/volatile-considered-harmful.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
diff --git a/Documentation/vfio-mediated-device.txt b/Documentation/vfio-mediated-device.txt
new file mode 100644
index 000000000000..b38afec35edc
--- /dev/null
+++ b/Documentation/vfio-mediated-device.txt
@@ -0,0 +1,398 @@
+/*
+ * VFIO Mediated devices
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+Virtual Function I/O (VFIO) Mediated devices[1]
+===============================================
+
+The number of use cases for virtualizing DMA devices that do not have built-in
+SR_IOV capability is increasing. Previously, to virtualize such devices,
+developers had to create their own management interfaces and APIs, and then
+integrate them with user space software. To simplify integration with user space
+software, we have identified common requirements and a unified management
+interface for such devices.
+
+The VFIO driver framework provides unified APIs for direct device access. It is
+an IOMMU/device-agnostic framework for exposing direct device access to user
+space in a secure, IOMMU-protected environment. This framework is used for
+multiple devices, such as GPUs, network adapters, and compute accelerators. With
+direct device access, virtual machines or user space applications have direct
+access to the physical device. This framework is reused for mediated devices.
+
+The mediated core driver provides a common interface for mediated device
+management that can be used by drivers of different devices. This module
+provides a generic interface to perform these operations:
+
+* Create and destroy a mediated device
+* Add a mediated device to and remove it from a mediated bus driver
+* Add a mediated device to and remove it from an IOMMU group
+
+The mediated core driver also provides an interface to register a bus driver.
+For example, the mediated VFIO mdev driver is designed for mediated devices and
+supports VFIO APIs. The mediated bus driver adds a mediated device to and
+removes it from a VFIO group.
+
+The following high-level block diagram shows the main components and interfaces
+in the VFIO mediated driver framework. The diagram shows NVIDIA, Intel, and IBM
+devices as examples, as these devices are the first devices to use this module.
+
+ +---------------+
+ | |
+ | +-----------+ | mdev_register_driver() +--------------+
+ | | | +<------------------------+ |
+ | | mdev | | | |
+ | | bus | +------------------------>+ vfio_mdev.ko |<-> VFIO user
+ | | driver | | probe()/remove() | | APIs
+ | | | | +--------------+
+ | +-----------+ |
+ | |
+ | MDEV CORE |
+ | MODULE |
+ | mdev.ko |
+ | +-----------+ | mdev_register_device() +--------------+
+ | | | +<------------------------+ |
+ | | | | | nvidia.ko |<-> physical
+ | | | +------------------------>+ | device
+ | | | | callbacks +--------------+
+ | | Physical | |
+ | | device | | mdev_register_device() +--------------+
+ | | interface | |<------------------------+ |
+ | | | | | i915.ko |<-> physical
+ | | | +------------------------>+ | device
+ | | | | callbacks +--------------+
+ | | | |
+ | | | | mdev_register_device() +--------------+
+ | | | +<------------------------+ |
+ | | | | | ccw_device.ko|<-> physical
+ | | | +------------------------>+ | device
+ | | | | callbacks +--------------+
+ | +-----------+ |
+ +---------------+
+
+
+Registration Interfaces
+=======================
+
+The mediated core driver provides the following types of registration
+interfaces:
+
+* Registration interface for a mediated bus driver
+* Physical device driver interface
+
+Registration Interface for a Mediated Bus Driver
+------------------------------------------------
+
+The registration interface for a mediated bus driver provides the following
+structure to represent a mediated device's driver:
+
+ /*
+ * struct mdev_driver [2] - Mediated device's driver
+ * @name: driver name
+ * @probe: called when new device created
+ * @remove: called when device removed
+ * @driver: device driver structure
+ */
+ struct mdev_driver {
+ const char *name;
+ int (*probe) (struct device *dev);
+ void (*remove) (struct device *dev);
+ struct device_driver driver;
+ };
+
+A mediated bus driver for mdev should use this structure in the function calls
+to register and unregister itself with the core driver:
+
+* Register:
+
+ extern int mdev_register_driver(struct mdev_driver *drv,
+ struct module *owner);
+
+* Unregister:
+
+ extern void mdev_unregister_driver(struct mdev_driver *drv);
+
+The mediated bus driver is responsible for adding mediated devices to the VFIO
+group when devices are bound to the driver and removing mediated devices from
+the VFIO when devices are unbound from the driver.
+
+
+Physical Device Driver Interface
+--------------------------------
+
+The physical device driver interface provides the parent_ops[3] structure to
+define the APIs to manage work in the mediated core driver that is related to
+the physical device.
+
+The structures in the parent_ops structure are as follows:
+
+* dev_attr_groups: attributes of the parent device
+* mdev_attr_groups: attributes of the mediated device
+* supported_config: attributes to define supported configurations
+
+The functions in the parent_ops structure are as follows:
+
+* create: allocate basic resources in a driver for a mediated device
+* remove: free resources in a driver when a mediated device is destroyed
+
+The callbacks in the parent_ops structure are as follows:
+
+* open: open callback of mediated device
+* close: close callback of mediated device
+* ioctl: ioctl callback of mediated device
+* read : read emulation callback
+* write: write emulation callback
+* mmap: mmap emulation callback
+
+A driver should use the parent_ops structure in the function call to register
+itself with the mdev core driver:
+
+extern int mdev_register_device(struct device *dev,
+ const struct parent_ops *ops);
+
+However, the parent_ops structure is not required in the function call that a
+driver should use to unregister itself with the mdev core driver:
+
+extern void mdev_unregister_device(struct device *dev);
+
+
+Mediated Device Management Interface Through sysfs
+==================================================
+
+The management interface through sysfs enables user space software, such as
+libvirt, to query and configure mediated devices in a hardware-agnostic fashion.
+This management interface provides flexibility to the underlying physical
+device's driver to support features such as:
+
+* Mediated device hot plug
+* Multiple mediated devices in a single virtual machine
+* Multiple mediated devices from different physical devices
+
+Links in the mdev_bus Class Directory
+-------------------------------------
+The /sys/class/mdev_bus/ directory contains links to devices that are registered
+with the mdev core driver.
+
+Directories and files under the sysfs for Each Physical Device
+--------------------------------------------------------------
+
+|- [parent physical device]
+|--- Vendor-specific-attributes [optional]
+|--- [mdev_supported_types]
+| |--- [<type-id>]
+| | |--- create
+| | |--- name
+| | |--- available_instances
+| | |--- device_api
+| | |--- description
+| | |--- [devices]
+| |--- [<type-id>]
+| | |--- create
+| | |--- name
+| | |--- available_instances
+| | |--- device_api
+| | |--- description
+| | |--- [devices]
+| |--- [<type-id>]
+| |--- create
+| |--- name
+| |--- available_instances
+| |--- device_api
+| |--- description
+| |--- [devices]
+
+* [mdev_supported_types]
+
+ The list of currently supported mediated device types and their details.
+
+ [<type-id>], device_api, and available_instances are mandatory attributes
+ that should be provided by vendor driver.
+
+* [<type-id>]
+
+ The [<type-id>] name is created by adding the the device driver string as a
+ prefix to the string provided by the vendor driver. This format of this name
+ is as follows:
+
+ sprintf(buf, "%s-%s", dev_driver_string(parent->dev), group->name);
+
+* device_api
+
+ This attribute should show which device API is being created, for example,
+ "vfio-pci" for a PCI device.
+
+* available_instances
+
+ This attribute should show the number of devices of type <type-id> that can be
+ created.
+
+* [device]
+
+ This directory contains links to the devices of type <type-id> that have been
+created.
+
+* name
+
+ This attribute should show human readable name. This is optional attribute.
+
+* description
+
+ This attribute should show brief features/description of the type. This is
+ optional attribute.
+
+Directories and Files Under the sysfs for Each mdev Device
+----------------------------------------------------------
+
+|- [parent phy device]
+|--- [$MDEV_UUID]
+ |--- remove
+ |--- mdev_type {link to its type}
+ |--- vendor-specific-attributes [optional]
+
+* remove (write only)
+Writing '1' to the 'remove' file destroys the mdev device. The vendor driver can
+fail the remove() callback if that device is active and the vendor driver
+doesn't support hot unplug.
+
+Example:
+ # echo 1 > /sys/bus/mdev/devices/$mdev_UUID/remove
+
+Mediated device Hot plug:
+------------------------
+
+Mediated devices can be created and assigned at runtime. The procedure to hot
+plug a mediated device is the same as the procedure to hot plug a PCI device.
+
+Translation APIs for Mediated Devices
+=====================================
+
+The following APIs are provided for translating user pfn to host pfn in a VFIO
+driver:
+
+extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
+ int npage, int prot, unsigned long *phys_pfn);
+
+extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
+ int npage);
+
+These functions call back into the back-end IOMMU module by using the pin_pages
+and unpin_pages callbacks of the struct vfio_iommu_driver_ops[4]. Currently
+these callbacks are supported in the TYPE1 IOMMU module. To enable them for
+other IOMMU backend modules, such as PPC64 sPAPR module, they need to provide
+these two callback functions.
+
+Using the Sample Code
+=====================
+
+mtty.c in samples/vfio-mdev/ directory is a sample driver program to
+demonstrate how to use the mediated device framework.
+
+The sample driver creates an mdev device that simulates a serial port over a PCI
+card.
+
+1. Build and load the mtty.ko module.
+
+ This step creates a dummy device, /sys/devices/virtual/mtty/mtty/
+
+ Files in this device directory in sysfs are similar to the following:
+
+ # tree /sys/devices/virtual/mtty/mtty/
+ /sys/devices/virtual/mtty/mtty/
+ |-- mdev_supported_types
+ | |-- mtty-1
+ | | |-- available_instances
+ | | |-- create
+ | | |-- device_api
+ | | |-- devices
+ | | `-- name
+ | `-- mtty-2
+ | |-- available_instances
+ | |-- create
+ | |-- device_api
+ | |-- devices
+ | `-- name
+ |-- mtty_dev
+ | `-- sample_mtty_dev
+ |-- power
+ | |-- autosuspend_delay_ms
+ | |-- control
+ | |-- runtime_active_time
+ | |-- runtime_status
+ | `-- runtime_suspended_time
+ |-- subsystem -> ../../../../class/mtty
+ `-- uevent
+
+2. Create a mediated device by using the dummy device that you created in the
+ previous step.
+
+ # echo "83b8f4f2-509f-382f-3c1e-e6bfe0fa1001" > \
+ /sys/devices/virtual/mtty/mtty/mdev_supported_types/mtty-2/create
+
+3. Add parameters to qemu-kvm.
+
+ -device vfio-pci,\
+ sysfsdev=/sys/bus/mdev/devices/83b8f4f2-509f-382f-3c1e-e6bfe0fa1001
+
+4. Boot the VM.
+
+ In the Linux guest VM, with no hardware on the host, the device appears
+ as follows:
+
+ # lspci -s 00:05.0 -xxvv
+ 00:05.0 Serial controller: Device 4348:3253 (rev 10) (prog-if 02 [16550])
+ Subsystem: Device 4348:3253
+ Physical Slot: 5
+ Control: I/O+ Mem- BusMaster- SpecCycle- MemWINV- VGASnoop- ParErr-
+ Stepping- SERR- FastB2B- DisINTx-
+ Status: Cap- 66MHz- UDF- FastB2B- ParErr- DEVSEL=medium >TAbort-
+ <TAbort- <MAbort- >SERR- <PERR- INTx-
+ Interrupt: pin A routed to IRQ 10
+ Region 0: I/O ports at c150 [size=8]
+ Region 1: I/O ports at c158 [size=8]
+ Kernel driver in use: serial
+ 00: 48 43 53 32 01 00 00 02 10 02 00 07 00 00 00 00
+ 10: 51 c1 00 00 59 c1 00 00 00 00 00 00 00 00 00 00
+ 20: 00 00 00 00 00 00 00 00 00 00 00 00 48 43 53 32
+ 30: 00 00 00 00 00 00 00 00 00 00 00 00 0a 01 00 00
+
+ In the Linux guest VM, dmesg output for the device is as follows:
+
+ serial 0000:00:05.0: PCI INT A -> Link[LNKA] -> GSI 10 (level, high) -> IRQ
+10
+ 0000:00:05.0: ttyS1 at I/O 0xc150 (irq = 10) is a 16550A
+ 0000:00:05.0: ttyS2 at I/O 0xc158 (irq = 10) is a 16550A
+
+
+5. In the Linux guest VM, check the serial ports.
+
+ # setserial -g /dev/ttyS*
+ /dev/ttyS0, UART: 16550A, Port: 0x03f8, IRQ: 4
+ /dev/ttyS1, UART: 16550A, Port: 0xc150, IRQ: 10
+ /dev/ttyS2, UART: 16550A, Port: 0xc158, IRQ: 10
+
+6. Using a minicom or any terminal enulation program, open port /dev/ttyS1 or
+ /dev/ttyS2 with hardware flow control disabled.
+
+7. Type data on the minicom terminal or send data to the terminal emulation
+ program and read the data.
+
+ Data is loop backed from hosts mtty driver.
+
+8. Destroy the mediated device that you created.
+
+ # echo 1 > /sys/bus/mdev/devices/83b8f4f2-509f-382f-3c1e-e6bfe0fa1001/remove
+
+References
+==========
+
+[1] See Documentation/vfio.txt for more information on VFIO.
+[2] struct mdev_driver in include/linux/mdev.h
+[3] struct parent_ops in include/linux/mdev.h
+[4] struct vfio_iommu_driver_ops in include/linux/vfio.h
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
index fee9f2bf9c64..69fe1a8b7ad1 100644
--- a/Documentation/virtual/kvm/00-INDEX
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -6,6 +6,8 @@ cpuid.txt
- KVM-specific cpuid leaves (x86).
devices/
- KVM_CAP_DEVICE_CTRL userspace API.
+halt-polling.txt
+ - notes on halt-polling
hypercalls.txt
- KVM hypercalls.
locking.txt
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 739db9ab16b2..03145b7cafaa 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -777,6 +777,17 @@ Gets the current timestamp of kvmclock as seen by the current guest. In
conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
such as migration.
+When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the
+set of bits that KVM can return in struct kvm_clock_data's flag member.
+
+The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned
+value is the exact kvmclock value seen by all VCPUs at the instant
+when KVM_GET_CLOCK was called. If clear, the returned value is simply
+CLOCK_MONOTONIC plus a constant offset; the offset can be modified
+with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock,
+but the exact value read by each VCPU could differ, because the host
+TSC is not stable.
+
struct kvm_clock_data {
__u64 clock; /* kvmclock current value */
__u32 flags;
@@ -2023,6 +2034,8 @@ registers, find a list below:
PPC | KVM_REG_PPC_WORT | 64
PPC | KVM_REG_PPC_SPRG9 | 64
PPC | KVM_REG_PPC_DBSR | 32
+ PPC | KVM_REG_PPC_TIDR | 64
+ PPC | KVM_REG_PPC_PSSCR | 64
PPC | KVM_REG_PPC_TM_GPR0 | 64
...
PPC | KVM_REG_PPC_TM_GPR31 | 64
@@ -2039,6 +2052,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TM_VSCR | 32
PPC | KVM_REG_PPC_TM_DSCR | 64
PPC | KVM_REG_PPC_TM_TAR | 64
+ PPC | KVM_REG_PPC_TM_XER | 64
| |
MIPS | KVM_REG_MIPS_R0 | 64
...
@@ -2198,7 +2212,7 @@ after pausing the vcpu, but before it is resumed.
4.71 KVM_SIGNAL_MSI
Capability: KVM_CAP_SIGNAL_MSI
-Architectures: x86 arm64
+Architectures: x86 arm arm64
Type: vm ioctl
Parameters: struct kvm_msi (in)
Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
diff --git a/Documentation/virtual/kvm/halt-polling.txt b/Documentation/virtual/kvm/halt-polling.txt
new file mode 100644
index 000000000000..4a8418318769
--- /dev/null
+++ b/Documentation/virtual/kvm/halt-polling.txt
@@ -0,0 +1,127 @@
+The KVM halt polling system
+===========================
+
+The KVM halt polling system provides a feature within KVM whereby the latency
+of a guest can, under some circumstances, be reduced by polling in the host
+for some time period after the guest has elected to no longer run by cedeing.
+That is, when a guest vcpu has ceded, or in the case of powerpc when all of the
+vcpus of a single vcore have ceded, the host kernel polls for wakeup conditions
+before giving up the cpu to the scheduler in order to let something else run.
+
+Polling provides a latency advantage in cases where the guest can be run again
+very quickly by at least saving us a trip through the scheduler, normally on
+the order of a few micro-seconds, although performance benefits are workload
+dependant. In the event that no wakeup source arrives during the polling
+interval or some other task on the runqueue is runnable the scheduler is
+invoked. Thus halt polling is especially useful on workloads with very short
+wakeup periods where the time spent halt polling is minimised and the time
+savings of not invoking the scheduler are distinguishable.
+
+The generic halt polling code is implemented in:
+
+ virt/kvm/kvm_main.c: kvm_vcpu_block()
+
+The powerpc kvm-hv specific case is implemented in:
+
+ arch/powerpc/kvm/book3s_hv.c: kvmppc_vcore_blocked()
+
+Halt Polling Interval
+=====================
+
+The maximum time for which to poll before invoking the scheduler, referred to
+as the halt polling interval, is increased and decreased based on the perceived
+effectiveness of the polling in an attempt to limit pointless polling.
+This value is stored in either the vcpu struct:
+
+ kvm_vcpu->halt_poll_ns
+
+or in the case of powerpc kvm-hv, in the vcore struct:
+
+ kvmppc_vcore->halt_poll_ns
+
+Thus this is a per vcpu (or vcore) value.
+
+During polling if a wakeup source is received within the halt polling interval,
+the interval is left unchanged. In the event that a wakeup source isn't
+received during the polling interval (and thus schedule is invoked) there are
+two options, either the polling interval and total block time[0] were less than
+the global max polling interval (see module params below), or the total block
+time was greater than the global max polling interval.
+
+In the event that both the polling interval and total block time were less than
+the global max polling interval then the polling interval can be increased in
+the hope that next time during the longer polling interval the wake up source
+will be received while the host is polling and the latency benefits will be
+received. The polling interval is grown in the function grow_halt_poll_ns() and
+is multiplied by the module parameter halt_poll_ns_grow.
+
+In the event that the total block time was greater than the global max polling
+interval then the host will never poll for long enough (limited by the global
+max) to wakeup during the polling interval so it may as well be shrunk in order
+to avoid pointless polling. The polling interval is shrunk in the function
+shrink_halt_poll_ns() and is divided by the module parameter
+halt_poll_ns_shrink, or set to 0 iff halt_poll_ns_shrink == 0.
+
+It is worth noting that this adjustment process attempts to hone in on some
+steady state polling interval but will only really do a good job for wakeups
+which come at an approximately constant rate, otherwise there will be constant
+adjustment of the polling interval.
+
+[0] total block time: the time between when the halt polling function is
+ invoked and a wakeup source received (irrespective of
+ whether the scheduler is invoked within that function).
+
+Module Parameters
+=================
+
+The kvm module has 3 tuneable module parameters to adjust the global max
+polling interval as well as the rate at which the polling interval is grown and
+shrunk. These variables are defined in include/linux/kvm_host.h and as module
+parameters in virt/kvm/kvm_main.c, or arch/powerpc/kvm/book3s_hv.c in the
+powerpc kvm-hv case.
+
+Module Parameter | Description | Default Value
+--------------------------------------------------------------------------------
+halt_poll_ns | The global max polling interval | KVM_HALT_POLL_NS_DEFAULT
+ | which defines the ceiling value |
+ | of the polling interval for | (per arch value)
+ | each vcpu. |
+--------------------------------------------------------------------------------
+halt_poll_ns_grow | The value by which the halt | 2
+ | polling interval is multiplied |
+ | in the grow_halt_poll_ns() |
+ | function. |
+--------------------------------------------------------------------------------
+halt_poll_ns_shrink | The value by which the halt | 0
+ | polling interval is divided in |
+ | the shrink_halt_poll_ns() |
+ | function. |
+--------------------------------------------------------------------------------
+
+These module parameters can be set from the debugfs files in:
+
+ /sys/module/kvm/parameters/
+
+Note: that these module parameters are system wide values and are not able to
+ be tuned on a per vm basis.
+
+Further Notes
+=============
+
+- Care should be taken when setting the halt_poll_ns module parameter as a
+large value has the potential to drive the cpu usage to 100% on a machine which
+would be almost entirely idle otherwise. This is because even if a guest has
+wakeups during which very little work is done and which are quite far apart, if
+the period is shorter than the global max polling interval (halt_poll_ns) then
+the host will always poll for the entire block time and thus cpu utilisation
+will go to 100%.
+
+- Halt polling essentially presents a trade off between power usage and latency
+and the module parameters should be used to tune the affinity for this. Idle
+cpu time is essentially converted to host kernel time with the aim of decreasing
+latency when entering the guest.
+
+- Halt polling will only be conducted by the host when no other tasks are
+runnable on that cpu, otherwise the polling will cease immediately and
+schedule will be invoked to allow that other task to run. Thus this doesn't
+allow a guest to denial of service the cpu.
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index f2491a8c68b4..e5dd9f4d6100 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -4,7 +4,17 @@ KVM Lock Overview
1. Acquisition Orders
---------------------
-(to be written)
+The acquisition orders for mutexes are as follows:
+
+- kvm->lock is taken outside vcpu->mutex
+
+- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
+
+- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
+ them together is quite rare.
+
+For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
+else is a leaf: no other lock is taken inside the critical sections.
2: Exception
------------
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 2a71c8f29f68..0a9ea515512a 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -208,7 +208,9 @@ MSR_KVM_STEAL_TIME: 0x4b564d03
__u64 steal;
__u32 version;
__u32 flags;
- __u32 pad[12];
+ __u8 preempted;
+ __u8 u8_pad[3];
+ __u32 pad[11];
}
whose data will be filled in by the hypervisor periodically. Only one
@@ -232,6 +234,11 @@ MSR_KVM_STEAL_TIME: 0x4b564d03
nanoseconds. Time during which the vcpu is idle, will not be
reported as steal time.
+ preempted: indicate the vCPU who owns this struct is running or
+ not. Non-zero values mean the vCPU has been preempted. Zero
+ means the vCPU is not preempted. NOTE, it is always zero if the
+ the hypervisor doesn't support this field.
+
MSR_KVM_EOI_EN: 0x4b564d04
data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0
when disabled. Bit 1 is reserved and must be zero. When PV end of
diff --git a/Documentation/virtual/kvm/review-checklist.txt b/Documentation/virtual/kvm/review-checklist.txt
index a850986ed684..a83b27635fdd 100644
--- a/Documentation/virtual/kvm/review-checklist.txt
+++ b/Documentation/virtual/kvm/review-checklist.txt
@@ -1,8 +1,8 @@
Review checklist for kvm patches
================================
-1. The patch must follow Documentation/CodingStyle and
- Documentation/SubmittingPatches.
+1. The patch must follow Documentation/process/coding-style.rst and
+ Documentation/process/submitting-patches.rst.
2. Patches should be against kvm.git master branch.
diff --git a/Documentation/vm/numa b/Documentation/vm/numa
index e0b58c0e6b49..a08f71647714 100644
--- a/Documentation/vm/numa
+++ b/Documentation/vm/numa
@@ -82,7 +82,7 @@ such as DMA or DMA32, represent relatively scarce resources. Linux chooses
a default zonelist order based on the sizes of the various zone types relative
to the total memory of the node and the total memory of the system. The
default zonelist order may be overridden using the numa_zonelist_order kernel
-boot parameter or sysctl. [see Documentation/kernel-parameters.txt and
+boot parameter or sysctl. [see Documentation/admin-guide/kernel-parameters.rst and
Documentation/sysctl/vm.txt]
By default, Linux will attempt to satisfy memory allocation requests from the
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index 2ec6adb5a4ce..c4171e4519c2 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -136,6 +136,11 @@ or enable it back by writing 1:
echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page
echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page
+Some userspace (such as a test program, or an optimized memory allocation
+library) may want to know the size (in bytes) of a transparent hugepage:
+
+cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size
+
khugepaged will be automatically started when
transparent_hugepage/enabled is set to "always" or "madvise, and it'll
be automatically shutdown if it's set to "never".
diff --git a/Documentation/watchdog/convert_drivers_to_kernel_api.txt b/Documentation/watchdog/convert_drivers_to_kernel_api.txt
index 271b8850dde7..9fffb2958d13 100644
--- a/Documentation/watchdog/convert_drivers_to_kernel_api.txt
+++ b/Documentation/watchdog/convert_drivers_to_kernel_api.txt
@@ -213,6 +213,6 @@ The entry for the driver now needs to select WATCHDOG_CORE:
Create a patch and send it to upstream
--------------------------------------
-Make sure you understood Documentation/SubmittingPatches and send your patch to
+Make sure you understood Documentation/process/submitting-patches.rst and send your patch to
linux-watchdog@vger.kernel.org. We are looking forward to it :)
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
index a8d364227a77..e21850e270a0 100644
--- a/Documentation/watchdog/watchdog-parameters.txt
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -4,7 +4,7 @@ be listed here unless the driver has its own driver-specific information
file.
-See Documentation/kernel-parameters.txt for information on
+See Documentation/admin-guide/kernel-parameters.rst for information on
providing kernel parameters for builtin drivers versus loadable
modules.
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 9da6f3512249..5e9b826b5f62 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -921,7 +921,7 @@ They should normally not be deleted from the kernel command line even
though not all of them are actually meaningful to the kernel. Boot
loader authors who need additional command line options for the boot
loader itself should get them registered in
-Documentation/kernel-parameters.txt to make sure they will not
+Documentation/admin-guide/kernel-parameters.rst to make sure they will not
conflict with actual kernel options now or in the future.
vga=<mode>
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index 0965a71f9942..61b611e9eeaf 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -277,10 +277,6 @@ IOMMU (input/output memory management unit)
space might stop working. Use this option if you have devices that
are accessed from userspace directly on some PCI host bridge.
-Debugging
-
- kstack=N Print N words from the kernel stack in oops dumps.
-
Miscellaneous
nogbpages
diff --git a/MAINTAINERS b/MAINTAINERS
index 2c7a7b6ba7bf..9c481470e277 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -35,13 +35,13 @@ trivial patch so apply some common sense.
PLEASE check your patch with the automated style checker
(scripts/checkpatch.pl) to catch trivial style violations.
- See Documentation/CodingStyle for guidance here.
+ See Documentation/process/coding-style.rst for guidance here.
PLEASE CC: the maintainers and mailing lists that are generated
by scripts/get_maintainer.pl. The results returned by the
script will be best if you have git installed and are making
your changes in a branch derived from Linus' latest git tree.
- See Documentation/SubmittingPatches for details.
+ See Documentation/process/submitting-patches.rst for details.
PLEASE try to include any credit lines you want added with the
patch. It avoids people being missed off by mistake and makes
@@ -54,7 +54,7 @@ trivial patch so apply some common sense.
of the Linux Foundation certificate of contribution and should
include a Signed-off-by: line. The current version of this
"Developer's Certificate of Origin" (DCO) is listed in the file
- Documentation/SubmittingPatches.
+ Documentation/process/submitting-patches.rst.
6. Make sure you have the right to send any changes you make. If you
do changes at work you may find your employer owns the patch
@@ -74,9 +74,14 @@ Descriptions of section entries:
These reviewers should be CCed on patches.
L: Mailing list that is relevant to this area
W: Web-page with status/info
+ B: URI for where to file bugs. A web-page with detailed bug
+ filing info, a direct bug tracker link, or a mailto: URI.
+ C: URI for chat protocol, server and channel where developers
+ usually hang out, for example irc://server/channel.
Q: Patchwork web based patch tracking system site
T: SCM tree type and location.
Type is one of: git, hg, quilt, stgit, topgit
+ B: Bug tracking system location.
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
@@ -255,6 +260,12 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-104-idio-16.c
+ACCES 104-QUAD-8 IIO DRIVER
+M: William Breathitt Gray <vilhelm.gray@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/iio/counter/104-quad-8.c
+
ACENIC DRIVER
M: Jes Sorensen <jes@trained-monkey.org>
L: linux-acenic@sunsite.dk
@@ -281,6 +292,7 @@ L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
Q: https://patchwork.kernel.org/project/linux-acpi/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/acpi/
F: drivers/pnp/pnpacpi/
@@ -304,6 +316,8 @@ W: https://acpica.org/
W: https://github.com/acpica/acpica/
Q: https://patchwork.kernel.org/project/linux-acpi/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+B: https://bugzilla.kernel.org
+B: https://bugs.acpica.org
S: Supported
F: drivers/acpi/acpica/
F: include/acpi/
@@ -313,6 +327,7 @@ ACPI FAN DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/acpi/fan.c
@@ -328,6 +343,7 @@ ACPI THERMAL DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/acpi/*thermal*
@@ -335,6 +351,7 @@ ACPI VIDEO DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/acpi/acpi_video.c
@@ -523,6 +540,7 @@ S: Supported
F: fs/afs/
F: include/net/af_rxrpc.h
F: net/rxrpc/af_rxrpc.c
+W: https://www.infradead.org/~dhowells/kafs/
AGPGART DRIVER
M: David Airlie <airlied@linux.ie>
@@ -570,6 +588,11 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/usb/airspy/
+ALACRITECH GIGABIT ETHERNET DRIVER
+M: Lino Sanfilippo <LinoSanfilippo@gmx.de>
+S: Maintained
+F: drivers/net/ethernet/alacritech/*
+
ALCATEL SPEEDTOUCH USB DRIVER
M: Duncan Sands <duncan.sands@free.fr>
L: linux-usb@vger.kernel.org
@@ -787,7 +810,7 @@ S: Supported
F: drivers/iio/*/ad*
X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad*
-F: staging/iio/trigger/iio-trig-bfin-timer.c
+F: drivers/staging/iio/trigger/iio-trig-bfin-timer.c
ANALOG DEVICES INC DMA DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
@@ -1036,6 +1059,7 @@ F: arch/arm/mach-meson/
F: arch/arm/boot/dts/meson*
F: arch/arm64/boot/dts/amlogic/
F: drivers/pinctrl/meson/
+F: drivers/mmc/host/meson*
N: meson
ARM/Annapurna Labs ALPINE ARCHITECTURE
@@ -1775,6 +1799,7 @@ F: drivers/char/hw_random/st-rng.c
F: drivers/clocksource/arm_global_timer.c
F: drivers/clocksource/clksrc_st_lpc.c
F: drivers/cpufreq/sti-cpufreq.c
+F: drivers/dma/st_fdma*
F: drivers/i2c/busses/i2c-st.c
F: drivers/media/rc/st_rc.c
F: drivers/media/platform/sti/c8sectpfe/
@@ -1785,6 +1810,7 @@ F: drivers/phy/phy-stih407-usb.c
F: drivers/phy/phy-stih41x-usb.c
F: drivers/pinctrl/pinctrl-st.c
F: drivers/remoteproc/st_remoteproc.c
+F: drivers/remoteproc/st_slim_rproc.c
F: drivers/reset/sti/
F: drivers/rtc/rtc-st-lpc.c
F: drivers/tty/serial/st-asc.c
@@ -1793,6 +1819,7 @@ F: drivers/usb/host/ehci-st.c
F: drivers/usb/host/ohci-st.c
F: drivers/watchdog/st_lpc_wdt.c
F: drivers/ata/ahci_st.c
+F: include/linux/remoteproc/st_slim_rproc.h
ARM/STM32 ARCHITECTURE
M: Maxime Coquelin <mcoquelin.stm32@gmail.com>
@@ -2318,6 +2345,13 @@ F: include/uapi/linux/ax25.h
F: include/net/ax25.h
F: net/ax25/
+AXENTIA ASOC DRIVERS
+M: Peter Rosin <peda@axentia.se>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/axentia,*
+F: sound/soc/atmel/tse850-pcm5142.c
+
AZ6007 DVB DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
@@ -2530,6 +2564,8 @@ L: netdev@vger.kernel.org
L: linux-kernel@vger.kernel.org
S: Supported
F: kernel/bpf/
+F: tools/testing/selftests/bpf/
+F: lib/test_bpf.c
BROADCOM B44 10/100 ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
@@ -2552,15 +2588,18 @@ S: Supported
F: drivers/net/ethernet/broadcom/genet/
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Dept-HSGLinuxNICDev@qlogic.com
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Harish Patil <harish.patil@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2.*
F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Ariel Elior <ariel.elior@qlogic.com>
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <ariel.elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
@@ -2587,6 +2626,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
S: Maintained
N: bcm2835
+F: drivers/staging/vc04_services
BROADCOM BCM47XX MIPS ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
@@ -2739,6 +2779,14 @@ L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: drivers/mtd/nand/brcmnand/
+BROADCOM STB AVS CPUFREQ DRIVER
+M: Markus Mayer <mmayer@broadcom.com>
+M: bcm-kernel-feedback-list@broadcom.com
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
+F: drivers/cpufreq/brcmstb*
+
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
M: Rafał Miłecki <zajec5@gmail.com>
L: linux-wireless@vger.kernel.org
@@ -2767,7 +2815,9 @@ S: Supported
F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
-M: Rasesh Mody <rasesh.mody@qlogic.com>
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/brocade/bna/
@@ -2925,7 +2975,7 @@ CAPELLA MICROSYSTEMS LIGHT SENSOR DRIVER
M: Kevin Tsai <ktsai@capellamicro.com>
S: Maintained
F: drivers/iio/light/cm*
-F: Documentation/devicetree/bindings/i2c/trivial-devices.txt
+F: Documentation/devicetree/bindings/i2c/trivial-admin-guide/devices.rst
CAVIUM I2C DRIVER
M: Jan Glauber <jglauber@cavium.com>
@@ -3025,6 +3075,12 @@ F: drivers/usb/host/whci/
F: drivers/usb/wusbcore/
F: include/linux/usb/wusb*
+HT16K33 LED CONTROLLER DRIVER
+M: Robin van der Gracht <robin@protonic.nl>
+S: Maintained
+F: drivers/auxdisplay/ht16k33.c
+F: Documentation/devicetree/bindings/display/ht16k33.txt
+
CFAG12864B LCD DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
W: http://miguelojeda.es/auxdisplay.htm
@@ -3073,7 +3129,7 @@ M: Harry Wei <harryxiyou@gmail.com>
L: xiyoulinuxkernelgroup@googlegroups.com (subscribers-only)
L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/zh_CN/
+F: Documentation/translations/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
M: Peter Chen <Peter.Chen@nxp.com>
@@ -3144,15 +3200,15 @@ S: Supported
F: drivers/clocksource
CISCO FCOE HBA DRIVER
-M: Hiral Patel <hiralpat@cisco.com>
-M: Suma Ramars <sramars@cisco.com>
-M: Brian Uchino <buchino@cisco.com>
+M: Satish Kharat <satishkh@cisco.com>
+M: Sesidhar Baddela <sebaddel@cisco.com>
+M: Karan Tilak Kumar <kartilak@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/fnic/
CISCO SCSI HBA DRIVER
-M: Narsimhulu Musini <nmusini@cisco.com>
+M: Karan Tilak Kumar <kartilak@cisco.com>
M: Sesidhar Baddela <sebaddel@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported
@@ -3329,6 +3385,7 @@ L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
+B: https://bugzilla.kernel.org
F: Documentation/cpu-freq/
F: drivers/cpufreq/
F: include/linux/cpufreq.h
@@ -3368,6 +3425,7 @@ M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+B: https://bugzilla.kernel.org
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
@@ -3413,6 +3471,7 @@ F: arch/*/crypto/
F: crypto/
F: drivers/crypto/
F: include/crypto/
+F: include/linux/crypto*
CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
M: Neil Horman <nhorman@tuxdriver.com>
@@ -3906,7 +3965,7 @@ F: include/linux/dma-buf*
F: include/linux/reservation.h
F: include/linux/*fence.h
F: Documentation/dma-buf-sharing.txt
-T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+T: git git://anongit.freedesktop.org/drm/drm-misc
SYNC FILE FRAMEWORK
M: Sumit Semwal <sumit.semwal@linaro.org>
@@ -3914,10 +3973,12 @@ R: Gustavo Padovan <gustavo@padovan.org>
S: Maintained
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
-F: drivers/dma-buf/sync_file.c
+F: drivers/dma-buf/sync_*
+F: drivers/dma-buf/sw_sync.c
F: include/linux/sync_file.h
+F: include/uapi/linux/sync_file.h
F: Documentation/sync_file.txt
-T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+T: git git://anongit.freedesktop.org/drm/drm-misc
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
@@ -4005,6 +4066,8 @@ DRM DRIVERS
M: David Airlie <airlied@linux.ie>
L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~airlied/linux
+B: https://bugs.freedesktop.org/
+C: irc://chat.freenode.net/dri-devel
S: Maintained
F: drivers/gpu/drm/
F: drivers/gpu/vga/
@@ -4015,11 +4078,30 @@ F: Documentation/gpu/
F: include/drm/
F: include/uapi/drm/
+DRM DRIVERS AND MISC GPU PATCHES
+M: Daniel Vetter <daniel.vetter@intel.com>
+M: Jani Nikula <jani.nikula@linux.intel.com>
+M: Sean Paul <seanpaul@chromium.org>
+W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/gpu/
+F: drivers/gpu/vga/
+F: drivers/gpu/drm/*
+F: include/drm/drm*
+F: include/uapi/drm/drm*
+
DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/ast/
+DRM DRIVERS FOR BRIDGE CHIPS
+M: Archit Taneja <architt@codeaurora.org>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: drivers/gpu/drm/bridge/
+
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
S: Odd Fixes
@@ -4055,8 +4137,9 @@ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@intel.com>
M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
-L: dri-devel@lists.freedesktop.org
W: https://01.org/linuxgraphics/
+B: https://01.org/linuxgraphics/documentation/how-report-bugs
+C: irc://chat.freenode.net/intel-gfx
Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://anongit.freedesktop.org/drm-intel
S: Supported
@@ -4065,6 +4148,16 @@ F: include/drm/i915*
F: include/uapi/drm/i915_drm.h
F: Documentation/gpu/i915.rst
+INTEL GVT-g DRIVERS (Intel GPU Virtualization)
+M: Zhenyu Wang <zhenyuw@linux.intel.com>
+M: Zhi Wang <zhi.a.wang@intel.com>
+L: igvt-g-dev@lists.01.org
+L: intel-gfx@lists.freedesktop.org
+W: https://01.org/igvt-g
+T: git https://github.com/01org/gvt-linux.git
+S: Supported
+F: drivers/gpu/drm/i915/gvt/
+
DRM DRIVERS FOR ATMEL HLCDC
M: Boris Brezillon <boris.brezillon@free-electrons.com>
L: dri-devel@lists.freedesktop.org
@@ -4079,6 +4172,15 @@ S: Supported
F: drivers/gpu/drm/sun4i/
F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+DRM DRIVERS FOR AMLOGIC SOCS
+M: Neil Armstrong <narmstrong@baylibre.com>
+L: dri-devel@lists.freedesktop.org
+L: linux-amlogic@lists.infradead.org
+W: http://linux-meson.com/
+S: Supported
+F: drivers/gpu/drm/meson/
+F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
+
DRM DRIVERS FOR EXYNOS
M: Inki Dae <inki.dae@samsung.com>
M: Joonyoung Shim <jy0922.shim@samsung.com>
@@ -4118,6 +4220,7 @@ F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
M: Xinliang Liu <z.liuxinliang@hisilicon.com>
+M: Rongrong Zou <zourongrong@gmail.com>
R: Xinwei Kong <kong.kongxinwei@hisilicon.com>
R: Chen Feng <puck.chen@hisilicon.com>
L: dri-devel@lists.freedesktop.org
@@ -4242,6 +4345,7 @@ DRM DRIVERS FOR VIVANTE GPU IP
M: Lucas Stach <l.stach@pengutronix.de>
R: Russell King <linux+etnaviv@armlinux.org.uk>
R: Christian Gmeiner <christian.gmeiner@gmail.com>
+L: etnaviv@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/etnaviv/
@@ -4282,6 +4386,13 @@ S: Maintained
F: drivers/gpu/drm/tilcdc/
F: Documentation/devicetree/bindings/display/tilcdc/
+DRM DRIVERS FOR ZTE ZX
+M: Shawn Guo <shawnguo@kernel.org>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/zte/
+F: Documentation/devicetree/bindings/display/zte,vou.txt
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -4626,12 +4737,14 @@ L: linux-efi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
S: Maintained
F: Documentation/efi-stub.txt
-F: arch/ia64/kernel/efi.c
+F: arch/*/kernel/efi.c
F: arch/x86/boot/compressed/eboot.[ch]
-F: arch/x86/include/asm/efi.h
+F: arch/*/include/asm/efi.h
F: arch/x86/platform/efi/
F: drivers/firmware/efi/
F: include/linux/efi*.h
+F: arch/arm/boot/compressed/efi-header.S
+F: arch/arm64/kernel/efi-entry.S
EFI VARIABLE FILESYSTEM
M: Matthew Garrett <matthew.garrett@nebula.com>
@@ -4683,11 +4796,11 @@ M: David Woodhouse <dwmw2@infradead.org>
L: linux-embedded@vger.kernel.org
S: Maintained
-EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
-M: James Smart <james.smart@avagotech.com>
-M: Dick Kennedy <dick.kennedy@avagotech.com>
+EMULEX/BROADCOM LPFC FC/FCOE SCSI DRIVER
+M: James Smart <james.smart@broadcom.com>
+M: Dick Kennedy <dick.kennedy@broadcom.com>
L: linux-scsi@vger.kernel.org
-W: http://www.avagotech.com
+W: http://www.broadcom.com
S: Supported
F: drivers/scsi/lpfc/
@@ -4945,7 +5058,9 @@ K: fmc_d.*register
FPGA MANAGER FRAMEWORK
M: Alan Tull <atull@opensource.altera.com>
R: Moritz Fischer <moritz.fischer@ettus.com>
+L: linux-fpga@vger.kernel.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
F: drivers/fpga/
F: include/linux/fpga/fpga-mgr.h
W: http://www.rocketboards.org
@@ -4963,10 +5078,9 @@ F: drivers/net/wan/dlci.c
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
-M: Tomi Valkeinen <tomi.valkeinen@ti.com>
L: linux-fbdev@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
-S: Maintained
+S: Orphan
F: Documentation/fb/
F: drivers/video/
F: include/video/
@@ -4974,6 +5088,14 @@ F: include/linux/fb.h
F: include/uapi/video/
F: include/uapi/linux/fb.h
+FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
+M: Horia Geantă <horia.geanta@nxp.com>
+M: Dan Douglass <dan.douglass@nxp.com>
+L: linux-crypto@vger.kernel.org
+S: Maintained
+F: drivers/crypto/caam/
+F: Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+
FREESCALE DIU FRAMEBUFFER DRIVER
M: Timur Tabi <timur@tabi.org>
L: linux-fbdev@vger.kernel.org
@@ -5039,9 +5161,18 @@ S: Maintained
F: drivers/net/ethernet/freescale/fman
F: Documentation/devicetree/bindings/powerpc/fsl/fman.txt
+FREESCALE SOC DRIVERS
+M: Scott Wood <oss@buserror.net>
+L: linuxppc-dev@lists.ozlabs.org
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: drivers/soc/fsl/
+F: include/linux/fsl/
+
FREESCALE QUICC ENGINE LIBRARY
+M: Qiang Zhao <qiang.zhao@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
-S: Orphan
+S: Maintained
F: drivers/soc/fsl/qe/
F: include/soc/fsl/*qe*.h
F: include/soc/fsl/*ucc*.h
@@ -5093,13 +5224,6 @@ F: sound/soc/fsl/fsl*
F: sound/soc/fsl/imx*
F: sound/soc/fsl/mpc8610_hpcd.c
-FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER
-M: "J. German Rivera" <German.Rivera@freescale.com>
-M: Stuart Yoder <stuart.yoder@nxp.com>
-L: linux-kernel@vger.kernel.org
-S: Maintained
-F: drivers/staging/fsl-mc/
-
FREEVXFS FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
W: ftp://ftp.openlinux.org/pub/people/hch/vxfs
@@ -5133,6 +5257,7 @@ F: include/linux/fscache*.h
FS-CRYPTO: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
M: Theodore Y. Ts'o <tytso@mit.edu>
M: Jaegeuk Kim <jaegeuk@kernel.org>
+L: linux-fsdevel@vger.kernel.org
S: Supported
F: fs/crypto/
F: include/linux/fscrypto.h
@@ -5197,6 +5322,7 @@ L: kernel-hardening@lists.openwall.com
S: Maintained
F: scripts/gcc-plugins/
F: scripts/gcc-plugin.sh
+F: scripts/Makefile.gcc-plugins
F: Documentation/gcc-plugins.txt
GCOV BASED KERNEL PROFILING
@@ -5608,7 +5734,6 @@ F: drivers/watchdog/hpwdt.c
HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
M: Don Brace <don.brace@microsemi.com>
-L: iss_storagedev@hp.com
L: esc.storagedev@microsemi.com
L: linux-scsi@vger.kernel.org
S: Supported
@@ -5619,7 +5744,6 @@ F: include/uapi/linux/cciss*.h
HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
M: Don Brace <don.brace@microsemi.com>
-L: iss_storagedev@hp.com
L: esc.storagedev@microsemi.com
L: linux-scsi@vger.kernel.org
S: Supported
@@ -5658,6 +5782,7 @@ HIBERNATION (aka Software Suspend, aka swsusp)
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Pavel Machek <pavel@ucw.cz>
L: linux-pm@vger.kernel.org
+B: https://bugzilla.kernel.org
S: Supported
F: arch/x86/power/
F: drivers/base/power/
@@ -5839,6 +5964,7 @@ F: drivers/input/serio/hyperv-keyboard.c
F: drivers/pci/host/pci-hyperv.c
F: drivers/net/hyperv/
F: drivers/scsi/storvsc_drv.c
+F: drivers/uio/uio_hv_generic.c
F: drivers/video/fbdev/hyperv_fb.c
F: include/linux/hyperv.h
F: tools/hv/
@@ -6082,14 +6208,9 @@ S: Maintained
F: Documentation/cdrom/ide-cd
F: drivers/ide/ide-cd*
-IDLE-I7300
-M: Andy Henroid <andrew.d.henroid@intel.com>
-L: linux-pm@vger.kernel.org
-S: Supported
-F: drivers/idle/i7300_idle.c
-
IEEE 802.15.4 SUBSYSTEM
M: Alexander Aring <aar@pengutronix.de>
+M: Stefan Schmidt <stefan@osg.samsung.com>
L: linux-wpan@vger.kernel.org
W: http://wpan.cakelab.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
@@ -6119,6 +6240,22 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/rc/iguanair.c
+IIO DIGITAL POTENTIOMETER DAC
+M: Peter Rosin <peda@axentia.se>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
+F: Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
+F: drivers/iio/dac/dpot-dac.c
+
+IIO ENVELOPE DETECTOR
+M: Peter Rosin <peda@axentia.se>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
+F: Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
+F: drivers/iio/adc/envelope-detector.c
+
IIO SUBSYSTEM AND DRIVERS
M: Jonathan Cameron <jic23@kernel.org>
R: Hartmut Knaack <knaack.h@gmx.de>
@@ -6276,9 +6413,11 @@ S: Maintained
F: drivers/platform/x86/intel-vbtn.c
INTEL IDLE DRIVER
+M: Jacob Pan <jacob.jun.pan@linux.intel.com>
M: Len Brown <lenb@kernel.org>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/idle/intel_idle.c
@@ -6495,6 +6634,13 @@ S: Maintained
F: arch/x86/include/asm/pmc_core.h
F: drivers/platform/x86/intel_pmc_core*
+INVENSENSE MPU-3050 GYROSCOPE DRIVER
+M: Linus Walleij <linus.walleij@linaro.org>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/iio/gyro/mpu3050*
+F: Documentation/devicetree/bindings/iio/gyroscope/inv,mpu3050.txt
+
IOC3 ETHERNET DRIVER
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-mips@linux-mips.org
@@ -7076,6 +7222,7 @@ F: drivers/scsi/53c700*
LED SUBSYSTEM
M: Richard Purdie <rpurdie@rpsys.net>
M: Jacek Anaszewski <j.anaszewski@samsung.com>
+M: Pavel Machek <pavel@ucw.cz>
L: linux-leds@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
S: Maintained
@@ -7548,8 +7695,10 @@ S: Maintained
MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
M: Andrew Lunn <andrew@lunn.ch>
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/dsa/mv88e6xxx/
+F: Documentation/devicetree/bindings/net/dsa/marvell.txt
MARVELL ARMADA DRM SUPPORT
M: Russell King <rmk+kernel@armlinux.org.uk>
@@ -7699,6 +7848,7 @@ MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVER
M: Peter Rosin <peda@axentia.se>
L: linux-iio@vger.kernel.org
S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
F: drivers/iio/potentiometer/mcp4531.c
MEASUREMENT COMPUTING CIO-DAC IIO DRIVER
@@ -7830,12 +7980,12 @@ S: Maintained
F: drivers/net/wireless/mediatek/mt7601u/
MEGARAID SCSI/SAS DRIVERS
-M: Kashyap Desai <kashyap.desai@avagotech.com>
-M: Sumit Saxena <sumit.saxena@avagotech.com>
-M: Uday Lingala <uday.lingala@avagotech.com>
-L: megaraidlinux.pdl@avagotech.com
+M: Kashyap Desai <kashyap.desai@broadcom.com>
+M: Sumit Saxena <sumit.saxena@broadcom.com>
+M: Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+L: megaraidlinux.pdl@broadcom.com
L: linux-scsi@vger.kernel.org
-W: http://www.lsi.com
+W: http://www.avagotech.com/support/
S: Maintained
F: Documentation/scsi/megaraid.txt
F: drivers/scsi/megaraid.*
@@ -7917,6 +8067,10 @@ F: mm/
MEMORY TECHNOLOGY DEVICES (MTD)
M: David Woodhouse <dwmw2@infradead.org>
M: Brian Norris <computersforpeace@gmail.com>
+M: Boris Brezillon <boris.brezillon@free-electrons.com>
+M: Marek Vasut <marek.vasut@gmail.com>
+M: Richard Weinberger <richard@nod.at>
+M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
@@ -8045,6 +8199,7 @@ F: drivers/infiniband/hw/mlx4/
F: include/linux/mlx4/
MELLANOX MLX5 core VPI driver
+M: Saeed Mahameed <saeedm@mellanox.com>
M: Matan Barak <matanb@mellanox.com>
M: Leon Romanovsky <leonro@mellanox.com>
L: netdev@vger.kernel.org
@@ -8258,6 +8413,12 @@ T: git git://linuxtv.org/mkrufky/tuners.git
S: Maintained
F: drivers/media/tuners/mxl5007t.*
+MXSFB DRM DRIVER
+M: Marek Vasut <marex@denx.de>
+S: Supported
+F: drivers/gpu/drm/mxsfb/
+F: Documentation/devicetree/bindings/display/mxsfb-drm.txt
+
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
M: Hyong-Youb Kim <hykim@myri.com>
L: netdev@vger.kernel.org
@@ -8304,7 +8465,6 @@ F: drivers/scsi/arm/oak.c
F: drivers/scsi/atari_scsi.*
F: drivers/scsi/dmx3191d.c
F: drivers/scsi/g_NCR5380.*
-F: drivers/scsi/g_NCR5380_mmio.c
F: drivers/scsi/mac_scsi.*
F: drivers/scsi/sun3_scsi.*
F: drivers/scsi/sun3_scsi_vme.c
@@ -8435,7 +8595,6 @@ F: include/uapi/linux/net_namespace.h
F: tools/net/
F: tools/testing/selftests/net/
F: lib/random32.c
-F: lib/test_bpf.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
@@ -8514,11 +8673,10 @@ F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
-M: Manish Chopra <manish.chopra@qlogic.com>
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Rajesh Borundia <rajesh.borundia@qlogic.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Rahul Verma <rahul.verma@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
-W: http://www.qlogic.com
S: Supported
F: drivers/net/ethernet/qlogic/netxen/
@@ -8665,6 +8823,16 @@ L: linux-nvme@lists.infradead.org
S: Supported
F: drivers/nvme/target/
+NVM EXPRESS FC TRANSPORT DRIVERS
+M: James Smart <james.smart@broadcom.com>
+L: linux-nvme@lists.infradead.org
+S: Supported
+F: include/linux/nvme-fc.h
+F: include/linux/nvme-fc-driver.h
+F: drivers/nvme/host/fc.c
+F: drivers/nvme/target/fc.c
+F: drivers/nvme/target/fcloop.c
+
NVMEM FRAMEWORK
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Maxime Ripard <maxime.ripard@free-electrons.com>
@@ -8727,6 +8895,7 @@ F: drivers/regulator/tps65217-regulator.c
F: drivers/regulator/tps65218-regulator.c
F: drivers/regulator/tps65910-regulator.c
F: drivers/regulator/twl-regulator.c
+F: drivers/regulator/twl6030-regulator.c
F: include/linux/i2c-omap.h
OMAP DEVICE TREE SUPPORT
@@ -8947,9 +9116,11 @@ F: drivers/of/resolver.c
OPENRISC ARCHITECTURE
M: Jonas Bonn <jonas@southpole.se>
-W: http://openrisc.net
+M: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+M: Stafford Horne <shorne@gmail.com>
+L: openrisc@lists.librecores.org
+W: http://openrisc.io
S: Maintained
-T: git git://openrisc.net/~jonas/linux
F: arch/openrisc/
OPENVSWITCH
@@ -9081,7 +9252,7 @@ F: drivers/misc/panel.c
PARALLEL PORT SUBSYSTEM
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
-M: Sudip Mukherjee <sudip@vectorindia.org>
+M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
L: linux-parport@lists.infradead.org (subscribers-only)
S: Maintained
F: drivers/parport/
@@ -9236,11 +9407,12 @@ S: Maintained
F: drivers/pci/host/*layerscape*
PCI DRIVER FOR IMX6
-M: Richard Zhu <Richard.Zhu@freescale.com>
+M: Richard Zhu <hongxing.zhu@nxp.com>
M: Lucas Stach <l.stach@pengutronix.de>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
F: drivers/pci/host/*imx6*
PCI DRIVER FOR TI KEYSTONE
@@ -9299,17 +9471,11 @@ F: drivers/pci/host/pci-exynos.c
PCI DRIVER FOR SYNOPSIS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
-M: Pratyush Anand <pratyush.anand@gmail.com>
-L: linux-pci@vger.kernel.org
-S: Maintained
-F: drivers/pci/host/*designware*
-
-PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
-M: Jose Abreu <Jose.Abreu@synopsys.com>
+M: Joao Pinto <Joao.Pinto@synopsys.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/designware-pcie.txt
-F: drivers/pci/host/pcie-designware-plat.c
+F: drivers/pci/host/*designware*
PCI DRIVER FOR GENERIC OF HOSTS
M: Will Deacon <will.deacon@arm.com>
@@ -9324,7 +9490,7 @@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
M: Keith Busch <keith.busch@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
-F: arch/x86/pci/vmd.c
+F: drivers/pci/host/vmd.c
PCIE DRIVER FOR ST SPEAR13XX
M: Pratyush Anand <pratyush.anand@gmail.com>
@@ -9557,8 +9723,8 @@ F: arch/mips/boot/dts/pistachio/
F: arch/mips/configs/pistachio*_defconfig
PKTCDVD DRIVER
-M: Jiri Kosina <jikos@kernel.org>
-S: Maintained
+S: Orphan
+M: linux-block@vger.kernel.org
F: drivers/block/pktcdvd.c
F: include/linux/pktcdvd.h
F: include/uapi/linux/pktcdvd.h
@@ -9611,6 +9777,7 @@ POWER MANAGEMENT CORE
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+B: https://bugzilla.kernel.org
S: Supported
F: drivers/base/power/
F: include/linux/pm.h
@@ -9792,7 +9959,7 @@ F: drivers/media/usb/pwc/*
PWM FAN DRIVER
M: Kamil Debski <kamil@wypas.org>
-M: Lukasz Majewski <l.majewski@samsung.com>
+M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
@@ -9894,33 +10061,32 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/
QLOGIC QLA3XXX NETWORK DRIVER
-M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
-M: Ron Mercer <ron.mercer@qlogic.com>
-M: linux-driver@qlogic.com
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Dept-GELinuxNICDev@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Harish Patil <harish.patil@qlogic.com>
-M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
-M: Dept-GELinuxNICDev@qlogic.com
-M: linux-driver@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Yuval Mintz <Yuval.Mintz@qlogic.com>
-M: Ariel Elior <Ariel.Elior@qlogic.com>
-M: everest-linux-l2@qlogic.com
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <Ariel.Elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qed/
@@ -9935,6 +10101,12 @@ F: fs/qnx4/
F: include/uapi/linux/qnx4_fs.h
F: include/uapi/linux/qnxtypes.h
+QORIQ DPAA2 FSL-MC BUS DRIVER
+M: Stuart Yoder <stuart.yoder@nxp.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/staging/fsl-mc/
+
QT1010 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -10397,7 +10569,7 @@ F: arch/s390/pci/
F: drivers/pci/hotplug/s390_pci_hpc.c
S390 ZCRYPT DRIVER
-M: Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
+M: Harald Freudenberger <freude@de.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@@ -10564,7 +10736,7 @@ L: netdev@vger.kernel.org
F: drivers/net/ethernet/samsung/sxgbe/
SAMSUNG THERMAL DRIVER
-M: Lukasz Majewski <l.majewski@samsung.com>
+M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-pm@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Supported
@@ -10690,6 +10862,11 @@ W: http://www.sunplus.com
S: Supported
F: arch/score/
+SCR24X CHIP CARD INTERFACE DRIVER
+M: Lubomir Rintel <lkundrak@v3.sk>
+S: Supported
+F: drivers/char/pcmcia/scr24x_cs.c
+
SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-arm-kernel@lists.infradead.org
@@ -11092,7 +11269,7 @@ F: include/media/i2c/ov2659.h
SILICON MOTION SM712 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
M: Teddy Wang <teddy.wang@siliconmotion.com>
-M: Sudip Mukherjee <sudip@vectorindia.org>
+M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/sm712*
@@ -11397,6 +11574,17 @@ W: http://www.st.com/spear
S: Maintained
F: drivers/clk/spear/
+SPI NOR SUBSYSTEM
+M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+M: Marek Vasut <marek.vasut@gmail.com>
+L: linux-mtd@lists.infradead.org
+W: http://www.linux-mtd.infradead.org/
+Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
+T: git git://github.com/spi-nor/linux.git
+S: Maintained
+F: drivers/mtd/spi-nor/
+F: include/linux/mtd/spi-nor.h
+
SPI SUBSYSTEM
M: Mark Brown <broonie@kernel.org>
L: linux-spi@vger.kernel.org
@@ -11443,7 +11631,7 @@ STABLE BRANCH
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: stable@vger.kernel.org
S: Supported
-F: Documentation/stable_kernel_rules.txt
+F: Documentation/process/stable-kernel-rules.rst
STAGING SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -11509,17 +11697,11 @@ F: drivers/staging/rtl8712/
STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
M: Teddy Wang <teddy.wang@siliconmotion.com>
-M: Sudip Mukherjee <sudip@vectorindia.org>
+M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/staging/sm750fb/
-STAGING - SLICOSS
-M: Lior Dotan <liodot@gmail.com>
-M: Christopher Harrer <charrer@alacritech.com>
-S: Odd Fixes
-F: drivers/staging/slicoss/
-
STAGING - SPEAKUP CONSOLE SPEECH DRIVER
M: William Hubbs <w.d.hubbs@gmail.com>
M: Chris Brannon <chris@the-brannons.com>
@@ -11589,6 +11771,7 @@ M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Len Brown <len.brown@intel.com>
M: Pavel Machek <pavel@ucw.cz>
L: linux-pm@vger.kernel.org
+B: https://bugzilla.kernel.org
S: Supported
F: Documentation/power/
F: arch/x86/kernel/acpi/
@@ -12318,6 +12501,12 @@ S: Maintained
F: Documentation/filesystems/udf.txt
F: fs/udf/
+UDRAW TABLET
+M: Bastien Nocera <hadess@hadess.net>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/hid/hid-udraw.c
+
UFS FILESYSTEM
M: Evgeniy Dushistov <dushistov@mail.ru>
S: Maintained
@@ -12374,7 +12563,8 @@ F: Documentation/scsi/ufs.txt
F: drivers/scsi/ufs/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
-M: Joao Pinto <Joao.Pinto@synopsys.com>
+M: Manjunath M Bettegowda <manjumb@synopsys.com>
+M: Prabu Thangamuthu <prabut@synopsys.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ufs/*dwc*
@@ -12732,6 +12922,15 @@ F: drivers/vfio/
F: include/linux/vfio.h
F: include/uapi/linux/vfio.h
+VFIO MEDIATED DEVICE DRIVERS
+M: Kirti Wankhede <kwankhede@nvidia.com>
+L: kvm@vger.kernel.org
+S: Maintained
+F: Documentation/vfio-mediated-device.txt
+F: drivers/vfio/mdev/
+F: include/linux/mdev.h
+F: samples/vfio-mdev/
+
VFIO PLATFORM DRIVER
M: Baptiste Reynal <b.reynal@virtualopensystems.com>
L: kvm@vger.kernel.org
@@ -12776,6 +12975,7 @@ F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
+M: Jason Wang <jasowang@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: Documentation/devicetree/bindings/virtio/
@@ -12806,6 +13006,7 @@ F: include/uapi/linux/virtio_gpu.h
VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com>
+M: Jason Wang <jasowang@redhat.com>
L: kvm@vger.kernel.org
L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org
@@ -12882,7 +13083,7 @@ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: devel@driverdev.osuosl.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
-F: Documentation/vme_api.txt
+F: Documentation/driver-api/vme.rst
F: drivers/staging/vme/
F: drivers/vme/
F: include/linux/vme*
@@ -13113,7 +13314,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git
S: Maintained
F: include/linux/workqueue.h
F: kernel/workqueue.c
-F: Documentation/workqueue.txt
+F: Documentation/core-api/workqueue.rst
X-POWERS MULTIFUNCTION PMIC DEVICE DRIVERS
M: Chen-Yu Tsai <wens@csie.org>
@@ -13178,7 +13379,6 @@ F: drivers/media/tuners/tuner-xc2028.*
XEN HYPERVISOR INTERFACE
M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-M: David Vrabel <david.vrabel@citrix.com>
M: Juergen Gross <jgross@suse.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
diff --git a/Makefile b/Makefile
index a2650f9c6a25..b1037774e8e8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Psychotic Stoned Sheep
+EXTRAVERSION =
+NAME = Roaring Lionus
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -370,7 +370,7 @@ LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
-CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
@@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common \
-Werror-implicit-function-declaration \
-Wno-format-security \
- -std=gnu89
+ -std=gnu89 $(call cc-option,-fno-PIE)
+
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS := -D__ASSEMBLY__
+KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
@@ -606,6 +607,13 @@ else
include/config/auto.conf: ;
endif # $(dot-config)
+# For the kernel to actually contain only the needed exported symbols,
+# we have to build modules as well to determine what those symbols are.
+# (this can be evaluated only once include/config/auto.conf has been included)
+ifdef CONFIG_TRIM_UNUSED_KSYMS
+ KBUILD_MODULES := 1
+endif
+
# The all: target is the default when no target is given on the
# command line.
# This allow a user to issue only 'make' to build a kernel including modules
@@ -620,7 +628,6 @@ ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
@@ -629,15 +636,18 @@ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
endif
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += -Os
+KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
else
ifdef CONFIG_PROFILE_ALL_BRANCHES
-KBUILD_CFLAGS += -O2
+KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
else
KBUILD_CFLAGS += -O2
endif
endif
+KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
+ $(call cc-disable-warning,maybe-uninitialized,))
+
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
@@ -941,7 +951,7 @@ ifdef CONFIG_GDB_SCRIPTS
endif
ifdef CONFIG_TRIM_UNUSED_KSYMS
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
- "$(MAKE) KBUILD_MODULES=1 -f $(srctree)/Makefile vmlinux_prereq"
+ "$(MAKE) -f $(srctree)/Makefile vmlinux"
endif
# standalone target for easier testing
@@ -1016,8 +1026,6 @@ prepare2: prepare3 prepare-compiler-check outputmakefile asm-generic
prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
include/config/auto.conf
$(cmd_crmodverdir)
- $(Q)test -e include/generated/autoksyms.h || \
- touch include/generated/autoksyms.h
archprepare: archheaders archscripts prepare1 scripts_basic
diff --git a/README b/README
index 09f34f78f2bb..b2ba4aaa3a71 100644
--- a/README
+++ b/README
@@ -1,400 +1,18 @@
- Linux kernel release 4.x <http://kernel.org/>
+Linux kernel
+============
-These are the release notes for Linux version 4. Read them carefully,
-as they tell you what this is all about, explain how to install the
-kernel, and what to do if something goes wrong.
+This file was moved to Documentation/admin-guide/README.rst
-WHAT IS LINUX?
+Please notice that there are several guides for kernel developers and users.
+These guides can be rendered in a number of formats, like HTML and PDF.
- Linux is a clone of the operating system Unix, written from scratch by
- Linus Torvalds with assistance from a loosely-knit team of hackers across
- the Net. It aims towards POSIX and Single UNIX Specification compliance.
+In order to build the documentation, use ``make htmldocs`` or
+``make pdfdocs``.
- It has all the features you would expect in a modern fully-fledged Unix,
- including true multitasking, virtual memory, shared libraries, demand
- loading, shared copy-on-write executables, proper memory management,
- and multistack networking including IPv4 and IPv6.
-
- It is distributed under the GNU General Public License - see the
- accompanying COPYING file for more details.
-
-ON WHAT HARDWARE DOES IT RUN?
-
- Although originally developed first for 32-bit x86-based PCs (386 or higher),
- today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and
- UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell,
- IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS,
- Xtensa, Tilera TILE, AVR32, ARC and Renesas M32R architectures.
-
- Linux is easily portable to most general-purpose 32- or 64-bit architectures
- as long as they have a paged memory management unit (PMMU) and a port of the
- GNU C compiler (gcc) (part of The GNU Compiler Collection, GCC). Linux has
- also been ported to a number of architectures without a PMMU, although
- functionality is then obviously somewhat limited.
- Linux has also been ported to itself. You can now run the kernel as a
- userspace application - this is called UserMode Linux (UML).
-
-DOCUMENTATION:
-
- - There is a lot of documentation available both in electronic form on
- the Internet and in books, both Linux-specific and pertaining to
- general UNIX questions. I'd recommend looking into the documentation
- subdirectories on any Linux FTP site for the LDP (Linux Documentation
- Project) books. This README is not meant to be documentation on the
- system: there are much better sources available.
-
- - There are various README files in the Documentation/ subdirectory:
- these typically contain kernel-specific installation notes for some
- drivers for example. See Documentation/00-INDEX for a list of what
- is contained in each file. Please read the Changes file, as it
- contains information about the problems, which may result by upgrading
- your kernel.
-
- - The Documentation/DocBook/ subdirectory contains several guides for
- kernel developers and users. These guides can be rendered in a
- number of formats: PostScript (.ps), PDF, HTML, & man-pages, among others.
- After installation, "make psdocs", "make pdfdocs", "make htmldocs",
- or "make mandocs" will render the documentation in the requested format.
-
-INSTALLING the kernel source:
-
- - If you install the full sources, put the kernel tarball in a
- directory where you have permissions (e.g. your home directory) and
- unpack it:
-
- xz -cd linux-4.X.tar.xz | tar xvf -
-
- Replace "X" with the version number of the latest kernel.
-
- Do NOT use the /usr/src/linux area! This area has a (usually
- incomplete) set of kernel headers that are used by the library header
- files. They should match the library, and not get messed up by
- whatever the kernel-du-jour happens to be.
-
- - You can also upgrade between 4.x releases by patching. Patches are
- distributed in the xz format. To install by patching, get all the
- newer patch files, enter the top level directory of the kernel source
- (linux-4.X) and execute:
-
- xz -cd ../patch-4.x.xz | patch -p1
-
- Replace "x" for all versions bigger than the version "X" of your current
- source tree, _in_order_, and you should be ok. You may want to remove
- the backup files (some-file-name~ or some-file-name.orig), and make sure
- that there are no failed patches (some-file-name# or some-file-name.rej).
- If there are, either you or I have made a mistake.
-
- Unlike patches for the 4.x kernels, patches for the 4.x.y kernels
- (also known as the -stable kernels) are not incremental but instead apply
- directly to the base 4.x kernel. For example, if your base kernel is 4.0
- and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1
- and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and
- want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is,
- patch -R) _before_ applying the 4.0.3 patch. You can read more on this in
- Documentation/applying-patches.txt
-
- Alternatively, the script patch-kernel can be used to automate this
- process. It determines the current kernel version and applies any
- patches found.
-
- linux/scripts/patch-kernel linux
-
- The first argument in the command above is the location of the
- kernel source. Patches are applied from the current directory, but
- an alternative directory can be specified as the second argument.
-
- - Make sure you have no stale .o files and dependencies lying around:
-
- cd linux
- make mrproper
-
- You should now have the sources correctly installed.
-
-SOFTWARE REQUIREMENTS
-
- Compiling and running the 4.x kernels requires up-to-date
- versions of various software packages. Consult
- Documentation/Changes for the minimum version numbers required
- and how to get updates for these packages. Beware that using
- excessively old versions of these packages can cause indirect
- errors that are very difficult to track down, so don't assume that
- you can just update packages when obvious problems arise during
- build or operation.
-
-BUILD directory for the kernel:
-
- When compiling the kernel, all output files will per default be
- stored together with the kernel source code.
- Using the option "make O=output/dir" allows you to specify an alternate
- place for the output files (including .config).
- Example:
-
- kernel source code: /usr/src/linux-4.X
- build directory: /home/name/build/kernel
-
- To configure and build the kernel, use:
-
- cd /usr/src/linux-4.X
- make O=/home/name/build/kernel menuconfig
- make O=/home/name/build/kernel
- sudo make O=/home/name/build/kernel modules_install install
-
- Please note: If the 'O=output/dir' option is used, then it must be
- used for all invocations of make.
-
-CONFIGURING the kernel:
-
- Do not skip this step even if you are only upgrading one minor
- version. New configuration options are added in each release, and
- odd problems will turn up if the configuration files are not set up
- as expected. If you want to carry your existing configuration to a
- new version with minimal work, use "make oldconfig", which will
- only ask you for the answers to new questions.
-
- - Alternative configuration commands are:
-
- "make config" Plain text interface.
-
- "make menuconfig" Text based color menus, radiolists & dialogs.
-
- "make nconfig" Enhanced text based color menus.
-
- "make xconfig" Qt based configuration tool.
-
- "make gconfig" GTK+ based configuration tool.
-
- "make oldconfig" Default all questions based on the contents of
- your existing ./.config file and asking about
- new config symbols.
-
- "make silentoldconfig"
- Like above, but avoids cluttering the screen
- with questions already answered.
- Additionally updates the dependencies.
-
- "make olddefconfig"
- Like above, but sets new symbols to their default
- values without prompting.
-
- "make defconfig" Create a ./.config file by using the default
- symbol values from either arch/$ARCH/defconfig
- or arch/$ARCH/configs/${PLATFORM}_defconfig,
- depending on the architecture.
-
- "make ${PLATFORM}_defconfig"
- Create a ./.config file by using the default
- symbol values from
- arch/$ARCH/configs/${PLATFORM}_defconfig.
- Use "make help" to get a list of all available
- platforms of your architecture.
-
- "make allyesconfig"
- Create a ./.config file by setting symbol
- values to 'y' as much as possible.
-
- "make allmodconfig"
- Create a ./.config file by setting symbol
- values to 'm' as much as possible.
-
- "make allnoconfig" Create a ./.config file by setting symbol
- values to 'n' as much as possible.
-
- "make randconfig" Create a ./.config file by setting symbol
- values to random values.
-
- "make localmodconfig" Create a config based on current config and
- loaded modules (lsmod). Disables any module
- option that is not needed for the loaded modules.
-
- To create a localmodconfig for another machine,
- store the lsmod of that machine into a file
- and pass it in as a LSMOD parameter.
-
- target$ lsmod > /tmp/mylsmod
- target$ scp /tmp/mylsmod host:/tmp
-
- host$ make LSMOD=/tmp/mylsmod localmodconfig
-
- The above also works when cross compiling.
-
- "make localyesconfig" Similar to localmodconfig, except it will convert
- all module options to built in (=y) options.
-
- You can find more information on using the Linux kernel config tools
- in Documentation/kbuild/kconfig.txt.
-
- - NOTES on "make config":
-
- - Having unnecessary drivers will make the kernel bigger, and can
- under some circumstances lead to problems: probing for a
- nonexistent controller card may confuse your other controllers
-
- - A kernel with math-emulation compiled in will still use the
- coprocessor if one is present: the math emulation will just
- never get used in that case. The kernel will be slightly larger,
- but will work on different machines regardless of whether they
- have a math coprocessor or not.
-
- - The "kernel hacking" configuration details usually result in a
- bigger or slower kernel (or both), and can even make the kernel
- less stable by configuring some routines to actively try to
- break bad code to find kernel problems (kmalloc()). Thus you
- should probably answer 'n' to the questions for "development",
- "experimental", or "debugging" features.
-
-COMPILING the kernel:
-
- - Make sure you have at least gcc 3.2 available.
- For more information, refer to Documentation/Changes.
-
- Please note that you can still run a.out user programs with this kernel.
-
- - Do a "make" to create a compressed kernel image. It is also
- possible to do "make install" if you have lilo installed to suit the
- kernel makefiles, but you may want to check your particular lilo setup first.
-
- To do the actual install, you have to be root, but none of the normal
- build should require that. Don't take the name of root in vain.
-
- - If you configured any of the parts of the kernel as `modules', you
- will also have to do "make modules_install".
-
- - Verbose kernel compile/build output:
-
- Normally, the kernel build system runs in a fairly quiet mode (but not
- totally silent). However, sometimes you or other kernel developers need
- to see compile, link, or other commands exactly as they are executed.
- For this, use "verbose" build mode. This is done by passing
- "V=1" to the "make" command, e.g.
-
- make V=1 all
-
- To have the build system also tell the reason for the rebuild of each
- target, use "V=2". The default is "V=0".
-
- - Keep a backup kernel handy in case something goes wrong. This is
- especially true for the development releases, since each new release
- contains new code which has not been debugged. Make sure you keep a
- backup of the modules corresponding to that kernel, as well. If you
- are installing a new kernel with the same version number as your
- working kernel, make a backup of your modules directory before you
- do a "make modules_install".
-
- Alternatively, before compiling, use the kernel config option
- "LOCALVERSION" to append a unique suffix to the regular kernel version.
- LOCALVERSION can be set in the "General Setup" menu.
-
- - In order to boot your new kernel, you'll need to copy the kernel
- image (e.g. .../linux/arch/x86/boot/bzImage after compilation)
- to the place where your regular bootable kernel is found.
-
- - Booting a kernel directly from a floppy without the assistance of a
- bootloader such as LILO, is no longer supported.
-
- If you boot Linux from the hard drive, chances are you use LILO, which
- uses the kernel image as specified in the file /etc/lilo.conf. The
- kernel image file is usually /vmlinuz, /boot/vmlinuz, /bzImage or
- /boot/bzImage. To use the new kernel, save a copy of the old image
- and copy the new image over the old one. Then, you MUST RERUN LILO
- to update the loading map! If you don't, you won't be able to boot
- the new kernel image.
-
- Reinstalling LILO is usually a matter of running /sbin/lilo.
- You may wish to edit /etc/lilo.conf to specify an entry for your
- old kernel image (say, /vmlinux.old) in case the new one does not
- work. See the LILO docs for more information.
-
- After reinstalling LILO, you should be all set. Shutdown the system,
- reboot, and enjoy!
-
- If you ever need to change the default root device, video mode,
- ramdisk size, etc. in the kernel image, use the 'rdev' program (or
- alternatively the LILO boot options when appropriate). No need to
- recompile the kernel to change these parameters.
-
- - Reboot with the new kernel and enjoy.
-
-IF SOMETHING GOES WRONG:
-
- - If you have problems that seem to be due to kernel bugs, please check
- the file MAINTAINERS to see if there is a particular person associated
- with the part of the kernel that you are having trouble with. If there
- isn't anyone listed there, then the second best thing is to mail
- them to me (torvalds@linux-foundation.org), and possibly to any other
- relevant mailing-list or to the newsgroup.
-
- - In all bug-reports, *please* tell what kernel you are talking about,
- how to duplicate the problem, and what your setup is (use your common
- sense). If the problem is new, tell me so, and if the problem is
- old, please try to tell me when you first noticed it.
-
- - If the bug results in a message like
-
- unable to handle kernel paging request at address C0000010
- Oops: 0002
- EIP: 0010:XXXXXXXX
- eax: xxxxxxxx ebx: xxxxxxxx ecx: xxxxxxxx edx: xxxxxxxx
- esi: xxxxxxxx edi: xxxxxxxx ebp: xxxxxxxx
- ds: xxxx es: xxxx fs: xxxx gs: xxxx
- Pid: xx, process nr: xx
- xx xx xx xx xx xx xx xx xx xx
-
- or similar kernel debugging information on your screen or in your
- system log, please duplicate it *exactly*. The dump may look
- incomprehensible to you, but it does contain information that may
- help debugging the problem. The text above the dump is also
- important: it tells something about why the kernel dumped code (in
- the above example, it's due to a bad kernel pointer). More information
- on making sense of the dump is in Documentation/oops-tracing.txt
-
- - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump
- as is, otherwise you will have to use the "ksymoops" program to make
- sense of the dump (but compiling with CONFIG_KALLSYMS is usually preferred).
- This utility can be downloaded from
- ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops/ .
- Alternatively, you can do the dump lookup by hand:
-
- - In debugging dumps like the above, it helps enormously if you can
- look up what the EIP value means. The hex value as such doesn't help
- me or anybody else very much: it will depend on your particular
- kernel setup. What you should do is take the hex value from the EIP
- line (ignore the "0010:"), and look it up in the kernel namelist to
- see which kernel function contains the offending address.
-
- To find out the kernel function name, you'll need to find the system
- binary associated with the kernel that exhibited the symptom. This is
- the file 'linux/vmlinux'. To extract the namelist and match it against
- the EIP from the kernel crash, do:
-
- nm vmlinux | sort | less
-
- This will give you a list of kernel addresses sorted in ascending
- order, from which it is simple to find the function that contains the
- offending address. Note that the address given by the kernel
- debugging messages will not necessarily match exactly with the
- function addresses (in fact, that is very unlikely), so you can't
- just 'grep' the list: the list will, however, give you the starting
- point of each kernel function, so by looking for the function that
- has a starting address lower than the one you are searching for but
- is followed by a function with a higher address you will find the one
- you want. In fact, it may be a good idea to include a bit of
- "context" in your problem report, giving a few lines around the
- interesting one.
-
- If you for some reason cannot do the above (you have a pre-compiled
- kernel image or similar), telling me as much about your setup as
- possible will help. Please read the REPORTING-BUGS document for details.
-
- - Alternatively, you can use gdb on a running kernel. (read-only; i.e. you
- cannot change values or set break points.) To do this, first compile the
- kernel with -g; edit arch/x86/Makefile appropriately, then do a "make
- clean". You'll also need to enable CONFIG_PROC_FS (via "make config").
-
- After you've rebooted with the new kernel, do "gdb vmlinux /proc/kcore".
- You can now use all the usual gdb commands. The command to look up the
- point where your system crashed is "l *0xXXXXXXXX". (Replace the XXXes
- with the EIP value.)
-
- gdb'ing a non-running kernel currently fails because gdb (wrongly)
- disregards the starting offset for which the kernel is compiled.
+There are various text files in the Documentation/ subdirectory,
+several of them using the Restructured Text markup notation.
+See Documentation/00-INDEX for a list of what is contained in each file.
+Please read the Documentation/process/changes.rst file, as it contains the
+requirements for building and running the kernel, and information about
+the problems which may result by upgrading your kernel.
diff --git a/arch/Kconfig b/arch/Kconfig
index 659bdd079277..19483aea4bbc 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -96,6 +96,7 @@ config KPROBES_ON_FTRACE
config UPROBES
def_bool n
+ depends on ARCH_SUPPORTS_UPROBES
help
Uprobes is the user-space counterpart to kprobes: they
enable instrumentation applications (such as 'perf probe')
@@ -363,8 +364,9 @@ menuconfig GCC_PLUGINS
See Documentation/gcc-plugins.txt for details.
config GCC_PLUGIN_CYC_COMPLEXITY
- bool "Compute the cyclomatic complexity of a function"
+ bool "Compute the cyclomatic complexity of a function" if EXPERT
depends on GCC_PLUGINS
+ depends on !COMPILE_TEST
help
The complexity M of a function's control flow graph is defined as:
M = E - N + 2P
@@ -374,6 +376,10 @@ config GCC_PLUGIN_CYC_COMPLEXITY
N = the number of nodes
P = the number of connected components (exit nodes).
+ Enabling this plugin reports the complexity to stderr during the
+ build. It mainly serves as a simple example of how to create a
+ gcc plugin for the kernel.
+
config GCC_PLUGIN_SANCOV
bool
depends on GCC_PLUGINS
@@ -512,6 +518,9 @@ config HAVE_CONTEXT_TRACKING
config HAVE_VIRT_CPU_ACCOUNTING
bool
+config ARCH_HAS_SCALED_CPUTIME
+ bool
+
config HAVE_VIRT_CPU_ACCOUNTING_GEN
bool
default y if 64BIT
diff --git a/arch/alpha/include/asm/mutex.h b/arch/alpha/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/alpha/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index 43a7559c448b..2fec2dee3020 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -58,7 +58,6 @@ unsigned long get_wchan(struct task_struct *p);
((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 9e46d6e656d9..afc901b7a6f6 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -97,4 +97,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ffb93f499c83..56e427c7aa3c 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1029,11 +1029,16 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
}
+asmlinkage long sys_ni_posix_timers(void);
+
SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it)
{
struct itimerval kit;
int error;
+ if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
+ return sys_ni_posix_timers();
+
error = do_getitimer(which, &kit);
if (!error && put_it32(it, &kit))
error = -EFAULT;
@@ -1047,6 +1052,9 @@ SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in,
struct itimerval kin, kout;
int error;
+ if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
+ return sys_ni_posix_timers();
+
if (in) {
if (get_it32(&kin, in))
return -EFAULT;
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index 940dfb406591..04abdec7f496 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -283,7 +283,7 @@ long arch_ptrace(struct task_struct *child, long request,
/* When I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
+ copied = ptrace_access_vm(child, addr, &tmp, sizeof(tmp),
FOLL_FORCE);
ret = -EIO;
if (copied != sizeof(tmp))
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 864adad52280..19cce226d1a8 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -50,6 +50,9 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
cflags-$(atleast_gcc44) += -fsection-anchors
+cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+
ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64
@@ -68,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3
# Note: No need to add to cflags-y as that happens anyways
-ARCH_CFLAGS += -O3
+#
+# Disable the false maybe-uninitialized warings gcc spits out at -O3
+ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
endif
# small data is default for elf32 tool-chain. If not usable, disable it
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index de53f5c3251c..3121536b25a3 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -129,6 +129,7 @@
data-width = <4>;
clocks = <&ahb_clk>;
clock-names = "hclk";
+ multi-block = <1 1 1 1 1 1>;
};
i2c0: i2c@FF120000 {
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 6ae2c476ad82..53ce226f77a5 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -71,7 +71,7 @@
reg-io-width = <4>;
};
- arcpmu0: pmu {
+ arcpct0: pct {
compatible = "snps,arc700-pct";
};
};
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index ce0ccd20b5bf..5ee96b067c08 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -69,7 +69,7 @@
};
};
- arcpmu0: pmu {
+ arcpct0: pct {
compatible = "snps,arc700-pct";
};
};
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index bcf603142a33..3c391ba565ed 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -83,5 +83,9 @@
reg = <0xf0003000 0x44>;
interrupts = <7>;
};
+
+ arcpct0: pct {
+ compatible = "snps,arc700-pct";
+ };
};
};
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 7314f538847b..b0066a749d4c 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index 65ab9fbf83f2..ebe9ebb92933 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 3b3990cddbe1..4bde43278be6 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 98cf20933bbb..f6fb3d26557e 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index ddf8b96d494e..b9f0fe00044b 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index ceb90745326e..6da71ba253a9 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
CONFIG_MODULES=y
@@ -34,7 +35,6 @@ CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
@@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_DRM=y
CONFIG_DRM_ARCPGU=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 7f3f9f63708c..1bd24ec3e350 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -43,12 +43,14 @@
#define STATUS_AE_BIT 5 /* Exception active */
#define STATUS_DE_BIT 6 /* PC is in delay slot */
#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_Z_BIT 11
#define STATUS_L_BIT 12 /* Loop inhibit */
/* These masks correspond to the status word(STATUS_32) bits */
#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_Z_MASK (1<<STATUS_Z_BIT)
#define STATUS_L_MASK (1<<STATUS_L_BIT)
/*
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index 08e7e2a16ac1..a36e8601114d 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -22,10 +22,11 @@
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(
- " lp 1f \n"
- " nop \n"
- "1: \n"
- : "+l"(loops));
+ " mov lp_count, %0 \n"
+ " lp 1f \n"
+ " nop \n"
+ "1: \n"
+ : : "r"(loops));
}
extern void __bad_udelay(void);
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
deleted file mode 100644
index a2f88ff9f506..000000000000
--- a/arch/arc/include/asm/mutex.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
- * atomic dec based which can "count" any number of lock contenders.
- * This ideally needs to be fixed in core, but for now switching to dec ver.
- */
-#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
-#include <asm-generic/mutex-dec.h>
-#else
-#include <asm-generic/mutex-xchg.h>
-#endif
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 89eeb3720051..e94ca72b974e 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 16b630fbeb6a..6e1242da0159 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -60,15 +60,12 @@ struct task_struct;
#ifndef CONFIG_EZNPS_MTM_EXT
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#else
#define cpu_relax() \
__asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
-#define cpu_relax_lowlatency() barrier()
-
#endif
#define copy_segments(tsk, mm) do { } while (0)
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 89fdd1b0a76e..0861007d9ef3 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void);
* API expected BY platform smp code (FROM arch smp code)
*
* smp_ipi_irq_setup:
- * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ * Takes @cpu and @hwirq to which the arch-common ISR is hooked up
*/
-extern int smp_ipi_irq_setup(int cpu, int irq);
+extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
/*
* struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index f1e07c2344f8..3b67f538f142 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */
else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */
+ else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
+ arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */
else
arc_base_baud = 50000000; /* Fixed default 50MHz */
}
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index c424d5abc318..f39142acc89e 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -181,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
{
unsigned long flags;
cpumask_t online;
+ unsigned int destination_bits;
+ unsigned int distribution_mode;
/* errout if no online cpu per @cpumask */
if (!cpumask_and(&online, cpumask, cpu_online_mask))
@@ -188,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
raw_spin_lock_irqsave(&mcip_lock, flags);
- idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
- idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+ destination_bits = cpumask_bits(&online)[0];
+ idu_set_dest(data->hwirq, destination_bits);
+
+ if (ffs(destination_bits) == fls(destination_bits))
+ distribution_mode = IDU_M_DISTRI_DEST;
+ else
+ distribution_mode = IDU_M_DISTRI_RR;
+
+ idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
@@ -207,16 +216,15 @@ static struct irq_chip idu_irq_chip = {
};
-static int idu_first_irq;
+static irq_hw_number_t idu_first_hwirq;
static void idu_cascade_isr(struct irq_desc *desc)
{
- struct irq_domain *domain = irq_desc_get_handler_data(desc);
- unsigned int core_irq = irq_desc_get_irq(desc);
- unsigned int idu_irq;
+ struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+ irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
+ irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
- idu_irq = core_irq - idu_first_irq;
- generic_handle_irq(irq_find_mapping(domain, idu_irq));
+ generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
}
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -282,7 +290,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
struct irq_domain *domain;
/* Read IDU BCR to confirm nr_irqs */
int nr_irqs = of_irq_count(intc);
- int i, irq;
+ int i, virq;
struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -303,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
* however we need it to get the parent virq and set IDU handler
* as first level isr
*/
- irq = irq_of_parse_and_map(intc, i);
+ virq = irq_of_parse_and_map(intc, i);
if (!i)
- idu_first_irq = irq;
+ idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
- irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
+ irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
}
__mcip_cmd(CMD_IDU_ENABLE, 0);
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 59aa43cb146e..a41a79a4f4fe 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -43,8 +43,8 @@ SYSCALL_DEFINE0(arc_gettls)
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{
- int uval;
- int ret;
+ struct pt_regs *regs = current_pt_regs();
+ int uval = -EFAULT;
/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -54,24 +54,26 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
+ /* Z indicates to userspace if operation succeded */
+ regs->status32 &= ~STATUS_Z_MASK;
+
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
preempt_disable();
- ret = __get_user(uval, uaddr);
- if (ret)
+ if (__get_user(uval, uaddr))
goto done;
- if (uval != expected)
- ret = -EAGAIN;
- else
- ret = __put_user(new, uaddr);
+ if (uval == expected) {
+ if (!__put_user(new, uaddr))
+ regs->status32 |= STATUS_Z_MASK;
+ }
done:
preempt_enable();
- return ret;
+ return uval;
}
void arch_cpu_idle(void)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f183cc648851..88674d972c9d 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -22,6 +22,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
+#include <linux/irqdomain.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
@@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
int i;
/*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
+ * if platform didn't set the present map already, do it now
+ * boot cpu is set to present already by init/main.c
*/
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
+ if (num_present_cpus() <= 1) {
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+ }
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id)
*/
static DEFINE_PER_CPU(int, ipi_dev);
-int smp_ipi_irq_setup(int cpu, int irq)
+int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
{
int *dev = per_cpu_ptr(&ipi_dev, cpu);
+ unsigned int virq = irq_find_mapping(NULL, hwirq);
+
+ if (!virq)
+ panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
/* Boot cpu calls request, all call enable */
if (!cpu) {
int rc;
- rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
+ rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
if (rc)
- panic("Percpu IRQ request failed for %d\n", irq);
+ panic("Percpu IRQ request failed for %u\n", virq);
}
- enable_percpu_irq(irq, 0);
+ enable_percpu_irq(virq, 0);
return 0;
}
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index f927b8dc6edd..c10390d1ddb6 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
cycle_t full;
} stamp;
-
- __asm__ __volatile(
- "1: \n"
- " lr %0, [AUX_RTC_LOW] \n"
- " lr %1, [AUX_RTC_HIGH] \n"
- " lr %2, [AUX_RTC_CTRL] \n"
- " bbit0.nt %2, 31, 1b \n"
- : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+ /*
+ * hardware has an internal state machine which tracks readout of
+ * low/high and updates the CTRL.status if
+ * - interrupt/exception taken between the two reads
+ * - high increments after low has been read
+ */
+ do {
+ stamp.low = read_aux_reg(AUX_RTC_LOW);
+ stamp.high = read_aux_reg(AUX_RTC_HIGH);
+ status = read_aux_reg(AUX_RTC_CTRL);
+ } while (!(status & _BITUL(31)));
return stamp.full;
}
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 2b96cfc3be75..50d71695cd4e 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
static int l2_line_sz;
static int ioc_exists;
-int slc_enable = 1, ioc_enable = 1;
+int slc_enable = 1, ioc_enable = 0;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 60aab5a7522b..08450a1a5b5f 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
__free_pages(page, get_order(size));
}
+static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long user_count = vma_pages(vma);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+ unsigned long off = vma->vm_pgoff;
+ int ret = -ENXIO;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
@@ -133,7 +158,10 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
- _dma_cache_sync(paddr, size, dir);
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ _dma_cache_sync(paddr, size, dir);
+
return plat_phys_to_dma(dev, paddr);
}
@@ -193,6 +221,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc,
.free = arc_dma_free,
+ .mmap = arc_dma_mmap,
.map_page = arc_dma_map_page,
.map_sg = arc_dma_map_sg,
.sync_single_for_device = arc_dma_sync_single_for_device,
diff --git a/arch/arc/plat-eznps/smp.c b/arch/arc/plat-eznps/smp.c
index 5e901f86e4bd..56a4c8522f11 100644
--- a/arch/arc/plat-eznps/smp.c
+++ b/arch/arc/plat-eznps/smp.c
@@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu)
mtm_enable_core(cpu);
}
-static void eznps_ipi_clear(int irq)
-{
- write_aux_reg(CTOP_AUX_IACK, 1 << irq);
-}
-
struct plat_smp_ops plat_smp_ops = {
.info = smp_cpuinfo_buf,
.init_early_smp = eznps_init_cpumasks,
.cpu_kick = eznps_smp_wakeup_cpu,
.ipi_send = eznps_ipi_send,
.init_per_cpu = eznps_init_per_cpu,
- .ipi_clear = eznps_ipi_clear,
};
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b5d529fdffab..caef68429b08 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -703,6 +703,7 @@ config ARCH_VIRT
select ARM_GIC
select ARM_GIC_V2M if PCI
select ARM_GIC_V3
+ select ARM_GIC_V3_ITS if PCI
select ARM_PSCI
select HAVE_ARM_ARCH_TIMER
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index befcd2619902..c558ba75cbcc 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -745,7 +745,6 @@ dtb-$(CONFIG_MACH_SUN4I) += \
sun4i-a10-pcduino2.dtb \
sun4i-a10-pov-protab2-ips9.dtb
dtb-$(CONFIG_MACH_SUN5I) += \
- ntc-gr8-evb.dtb \
sun5i-a10s-auxtek-t003.dtb \
sun5i-a10s-auxtek-t004.dtb \
sun5i-a10s-mk802.dtb \
@@ -761,6 +760,7 @@ dtb-$(CONFIG_MACH_SUN5I) += \
sun5i-a13-olinuxino-micro.dtb \
sun5i-a13-q8-tablet.dtb \
sun5i-a13-utoo-p66.dtb \
+ sun5i-gr8-evb.dtb \
sun5i-r8-chip.dtb
dtb-$(CONFIG_MACH_SUN6I) += \
sun6i-a31-app4-evb1.dtb \
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
index 064b322a7a04..3b23b32e1b30 100644
--- a/arch/arm/boot/dts/dra72-evm-revc.dts
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -59,15 +59,17 @@
&davinci_mdio {
dp83867_0: ethernet-phy@2 {
reg = <2>;
- ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
- ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_NS>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
+ ti,min-output-impedance;
};
dp83867_1: ethernet-phy@3 {
reg = <3>;
- ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
- ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_NS>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
+ ti,min-output-imepdance;
};
};
diff --git a/arch/arm/boot/dts/hisi-x5hd2.dtsi b/arch/arm/boot/dts/hisi-x5hd2.dtsi
index fdcc23d203e5..0da76c5ff6d7 100644
--- a/arch/arm/boot/dts/hisi-x5hd2.dtsi
+++ b/arch/arm/boot/dts/hisi-x5hd2.dtsi
@@ -436,18 +436,20 @@
};
gmac0: ethernet@1840000 {
- compatible = "hisilicon,hix5hd2-gmac";
+ compatible = "hisilicon,hix5hd2-gemac", "hisilicon,hisi-gemac-v1";
reg = <0x1840000 0x1000>,<0x184300c 0x4>;
interrupts = <0 71 4>;
clocks = <&clock HIX5HD2_MAC0_CLK>;
+ clock-names = "mac_core";
status = "disabled";
};
gmac1: ethernet@1841000 {
- compatible = "hisilicon,hix5hd2-gmac";
+ compatible = "hisilicon,hix5hd2-gemac", "hisilicon,hisi-gemac-v1";
reg = <0x1841000 0x1000>,<0x1843010 0x4>;
interrupts = <0 72 4>;
clocks = <&clock HIX5HD2_MAC1_CLK>;
+ clock-names = "mac_core";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index dec4b073ceb1..379939699164 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -64,8 +64,8 @@
};
ldo3_reg: ldo3 {
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1725000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
@@ -76,8 +76,8 @@
};
ldo5_reg: ldo5 {
- regulator-min-microvolt = <1725000>;
- regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
regulator-always-on;
};
@@ -100,14 +100,14 @@
};
ldo9_reg: ldo9 {
- regulator-min-microvolt = <1200000>;
+ regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <3600000>;
regulator-always-on;
};
ldo10_reg: ldo10 {
- regulator-min-microvolt = <1250000>;
- regulator-max-microvolt = <3650000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
regulator-always-on;
};
};
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 0d7d5ac6257b..2b6cb05bc01a 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -643,9 +643,8 @@
reg = <0x30730000 0x10000>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>,
- <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_CLK_DUMMY>;
- clock-names = "pix", "axi", "disp_axi";
+ <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>;
+ clock-names = "pix", "axi";
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 0ff1c2de95bf..26cce4d18405 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -13,6 +13,11 @@
};
};
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0>;
+ };
+
wl12xx_vmmc: wl12xx_vmmc {
compatible = "regulator-fixed";
regulator-name = "vwl1271";
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 731ec37aed5b..8f9a69ca818c 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -13,9 +13,9 @@
};
};
- memory@0 {
+ memory@80000000 {
device_type = "memory";
- reg = <0 0>;
+ reg = <0x80000000 0>;
};
leds {
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 6365635fea5c..4caadb253249 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -124,6 +124,7 @@
compatible = "ti,abe-twl6040";
ti,model = "omap5-uevm";
+ ti,jack-detection;
ti,mclk-freq = <19200000>;
ti,mcpdm = <&mcpdm>;
@@ -415,7 +416,7 @@
ti,backup-battery-charge-high-current;
};
- gpadc {
+ gpadc: gpadc {
compatible = "ti,palmas-gpadc";
interrupts = <18 0
16 0
@@ -475,8 +476,8 @@
smps6_reg: smps6 {
/* VDD_DDR3 - over VDD_SMPS6 */
regulator-name = "smps6";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
regulator-always-on;
regulator-boot-on;
};
diff --git a/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts
index 1cf644bfd7ea..51dc734cd5b9 100644
--- a/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts
+++ b/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts
@@ -82,6 +82,10 @@
gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
};
+&sata {
+ nr-ports = <2>;
+};
+
&ehci1 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index e571d66ea0fe..3d0a18abd408 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -626,4 +626,9 @@
"sru-src6", "sru-src7", "sru-src8";
};
};
+
+ rst: reset-controller@ffcc0000 {
+ compatible = "renesas,r8a7778-reset-wdt";
+ reg = <0xffcc0000 0x40>;
+ };
};
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index b9bbcce69dfb..8cf16008a09b 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -590,6 +590,11 @@
};
};
+ rst: reset-controller@ffcc0000 {
+ compatible = "renesas,r8a7779-reset-wdt";
+ reg = <0xffcc0000 0x48>;
+ };
+
sysc: system-controller@ffd85000 {
compatible = "renesas,r8a7779-sysc";
reg = <0xffd85000 0x0200>;
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 351fcc2f87df..3f10b0bf1b08 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1471,6 +1471,11 @@
};
};
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7790-rst";
+ reg = <0 0xe6160000 0 0x0100>;
+ };
+
sysc: system-controller@e6180000 {
compatible = "renesas,r8a7790-sysc";
reg = <0 0xe6180000 0 0x0200>;
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 162b55c665a3..c465c79bcca6 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1482,6 +1482,11 @@
};
};
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7791-rst";
+ reg = <0 0xe6160000 0 0x0100>;
+ };
+
sysc: system-controller@e6180000 {
compatible = "renesas,r8a7791-sysc";
reg = <0 0xe6180000 0 0x0200>;
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 713141d38b3e..6e1f61f65d29 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -118,6 +118,11 @@
IRQ_TYPE_LEVEL_LOW)>;
};
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7792-rst";
+ reg = <0 0xe6160000 0 0x0100>;
+ };
+
sysc: system-controller@e6180000 {
compatible = "renesas,r8a7792-sysc";
reg = <0 0xe6180000 0 0x0200>;
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index 8d02aacf2892..e4b385eccf74 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -1279,6 +1279,11 @@
};
};
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7793-rst";
+ reg = <0 0xe6160000 0 0x0100>;
+ };
+
sysc: system-controller@e6180000 {
compatible = "renesas,r8a7793-sysc";
reg = <0 0xe6180000 0 0x0200>;
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 9365580a194f..69e4f4fad89b 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -1375,6 +1375,11 @@
};
};
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7794-rst";
+ reg = <0 0xe6160000 0 0x0100>;
+ };
+
sysc: system-controller@e6180000 {
compatible = "renesas,r8a7794-sysc";
reg = <0 0xe6180000 0 0x0200>;
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index a935523a1eb8..7c2dc19925a1 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -204,7 +204,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <275>;
g-tx-fifo-size = <256 128 128 64 64 32>;
- g-use-dma;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 17ec2e2d7a60..74a749c566ee 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -596,7 +596,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <275>;
g-tx-fifo-size = <256 128 128 64 64 32>;
- g-use-dma;
phys = <&usbphy0>;
phy-names = "usb2-phy";
status = "disabled";
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index e15beb3c671e..8fbd3c806fa0 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -181,7 +181,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <275>;
g-tx-fifo-size = <256 128 128 64 64 32>;
- g-use-dma;
phys = <&usbphy0>;
phy-names = "usb2-phy";
status = "disabled";
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 449acf0d8272..17ea0abcdbd7 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -118,6 +118,7 @@
block_size = <0xfff>;
dma-masters = <2>;
data-width = <8 8>;
+ multi-block = <1 1 1 1 1 1 1 1>;
};
dma@eb000000 {
@@ -134,6 +135,7 @@
chan_priority = <1>;
block_size = <0xfff>;
data-width = <8 8>;
+ multi-block = <1 1 1 1 1 1 1 1>;
};
fsmc: flash@b0000000 {
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 91096a49efa9..8f79b4147bba 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -283,6 +283,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -296,6 +298,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -309,6 +313,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -322,6 +328,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -335,6 +343,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c4_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -348,6 +358,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c5_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -363,6 +375,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c10_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -376,6 +390,8 @@
clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c11_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/stih410-b2260.dts b/arch/arm/boot/dts/stih410-b2260.dts
index ef2ff2f518f6..7fb507fcba7e 100644
--- a/arch/arm/boot/dts/stih410-b2260.dts
+++ b/arch/arm/boot/dts/stih410-b2260.dts
@@ -74,7 +74,7 @@
/* Low speed expansion connector */
spi0: spi@9844000 {
label = "LS-SPI0";
- cs-gpio = <&pio30 3 0>;
+ cs-gpios = <&pio30 3 0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ntc-gr8-evb.dts b/arch/arm/boot/dts/sun5i-gr8-evb.dts
index 4b622f3b5220..714381fd64d7 100644
--- a/arch/arm/boot/dts/ntc-gr8-evb.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-evb.dts
@@ -44,7 +44,7 @@
*/
/dts-v1/;
-#include "ntc-gr8.dtsi"
+#include "sun5i-gr8.dtsi"
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm/boot/dts/ntc-gr8.dtsi b/arch/arm/boot/dts/sun5i-gr8.dtsi
index ca54e03ef366..ca54e03ef366 100644
--- a/arch/arm/boot/dts/ntc-gr8.dtsi
+++ b/arch/arm/boot/dts/sun5i-gr8.dtsi
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 48fc24f36fcb..300a1bd5a6ec 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -282,11 +282,15 @@
uart1_pins_a: uart1@0 {
allwinner,pins = "PG6", "PG7";
allwinner,function = "uart1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
uart1_pins_cts_rts_a: uart1-cts-rts@0 {
allwinner,pins = "PG8", "PG9";
allwinner,function = "uart1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
mmc0_pins_a: mmc0@0 {
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index 75a865406d3e..f4ba088b225e 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -410,7 +410,7 @@
};
uart3_pins: uart3 {
- allwinner,pins = "PG13", "PG14";
+ allwinner,pins = "PA13", "PA14";
allwinner,function = "uart3";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 5c1fcab4a6f7..1552db00cc59 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -88,10 +88,16 @@
switch0: switch0@0 {
compatible = "marvell,mv88e6085";
+ pinctrl-0 = <&pinctrl_gpio_switch0>;
+ pinctrl-names = "default";
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
dsa,member = <0 0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
ports {
#address-cells = <1>;
@@ -99,16 +105,19 @@
port@0 {
reg = <0>;
label = "lan0";
+ phy-handle = <&switch0phy0>;
};
port@1 {
reg = <1>;
label = "lan1";
+ phy-handle = <&switch0phy1>;
};
port@2 {
reg = <2>;
label = "lan2";
+ phy-handle = <&switch0phy2>;
};
switch0port5: port@5 {
@@ -133,6 +142,24 @@
};
};
};
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch0phy0: switch0phy0@0 {
+ reg = <0>;
+ interrupt-parent = <&switch0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ switch0phy1: switch1phy0@1 {
+ reg = <1>;
+ interrupt-parent = <&switch0>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; };
+ switch0phy2: switch1phy0@2 {
+ reg = <2>;
+ interrupt-parent = <&switch0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
};
};
@@ -143,10 +170,16 @@
switch1: switch1@0 {
compatible = "marvell,mv88e6085";
+ pinctrl-0 = <&pinctrl_gpio_switch1>;
+ pinctrl-names = "default";
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
dsa,member = <0 1>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
ports {
#address-cells = <1>;
@@ -196,12 +229,18 @@
#size-cells = <0>;
switch1phy0: switch1phy0@0 {
reg = <0>;
+ interrupt-parent = <&switch1>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
};
switch1phy1: switch1phy0@1 {
reg = <1>;
+ interrupt-parent = <&switch1>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
};
switch1phy2: switch1phy0@2 {
reg = <2>;
+ interrupt-parent = <&switch1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
};
};
};
@@ -636,6 +675,18 @@
>;
};
+ pinctrl_gpio_switch0: pinctrl-gpio-switch0 {
+ fsl,pins = <
+ VF610_PAD_PTB5__GPIO_27 0x219d
+ >;
+ };
+
+ pinctrl_gpio_switch1: pinctrl-gpio-switch1 {
+ fsl,pins = <
+ VF610_PAD_PTB4__GPIO_26 0x219d
+ >;
+ };
+
pinctrl_i2c_mux_reset: pinctrl-i2c-mux-reset {
fsl,pins = <
VF610_PAD_PTE14__GPIO_119 0x31c2
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index 37dc0fe1093f..46730017b3c5 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -757,19 +757,18 @@ EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
* while the switcher is active.
* We're just not ready to deal with that given the trickery involved.
*/
-static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int bL_switcher_cpu_pre(unsigned int cpu)
{
- if (bL_switcher_active) {
- int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
- switch (action & 0xf) {
- case CPU_UP_PREPARE:
- case CPU_DOWN_PREPARE:
- if (pairing == -1)
- return NOTIFY_BAD;
- }
- }
- return NOTIFY_DONE;
+ int pairing;
+
+ if (!bL_switcher_active)
+ return 0;
+
+ pairing = bL_switcher_cpu_pairing[cpu];
+
+ if (pairing == -1)
+ return -EINVAL;
+ return 0;
}
static bool no_bL_switcher;
@@ -782,8 +781,15 @@ static int __init bL_switcher_init(void)
if (!mcpm_is_available())
return -ENODEV;
- cpu_notifier(bL_switcher_hotplug_callback, 0);
-
+ cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare",
+ bL_switcher_cpu_pre, NULL);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown",
+ NULL, bL_switcher_cpu_pre);
+ if (ret < 0) {
+ cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE);
+ pr_err("bL_switcher: Failed to allocate a hotplug state\n");
+ return ret;
+ }
if (!no_bL_switcher) {
ret = bL_switcher_enable();
if (ret)
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 301281645d08..75055df1cda3 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -243,7 +243,8 @@ static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
}
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
struct safe_buffer *buf;
@@ -262,7 +263,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
@@ -272,7 +274,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
}
static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
- size_t size, enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
BUG_ON(buf->size != size);
BUG_ON(buf->direction != dir);
@@ -283,7 +286,8 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
DO_STATS(dev->archdata.dmabounce->bounce_count++);
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
void *ptr = buf->ptr;
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
@@ -334,7 +338,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE;
}
- return map_single(dev, page_address(page) + offset, size, dir);
+ return map_single(dev, page_address(page) + offset, size, dir, attrs);
}
/*
@@ -357,7 +361,7 @@ static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t
return;
}
- unmap_single(dev, buf, size, dir);
+ unmap_single(dev, buf, size, dir, attrs);
}
static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 11f37ed1dbff..30f39acd61bd 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -489,7 +489,7 @@ CONFIG_MFD_MAX8907=y
CONFIG_MFD_MAX8997=y
CONFIG_MFD_MAX8998=y
CONFIG_MFD_RK808=y
-CONFIG_MFD_PM8921_CORE=y
+CONFIG_MFD_PM8XXX=y
CONFIG_MFD_QCOM_RPM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_SEC_CORE=y
@@ -649,6 +649,9 @@ CONFIG_SND_SOC_AK4642=m
CONFIG_SND_SOC_SGTL5000=m
CONFIG_SND_SOC_SPDIF=m
CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SOC_STI=m
+CONFIG_SND_SOC_STI_SAS=m
+CONFIG_SND_SIMPLE_CARD=m
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
@@ -790,6 +793,7 @@ CONFIG_DMA_OMAP=y
CONFIG_QCOM_BAM_DMA=y
CONFIG_XILINX_DMA=y
CONFIG_DMA_SUN6I=y
+CONFIG_ST_FDMA=m
CONFIG_STAGING=y
CONFIG_SENSORS_ISL29018=y
CONFIG_SENSORS_ISL29028=y
@@ -823,6 +827,8 @@ CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ROCKCHIP_IOMMU=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_REMOTEPROC=m
+CONFIG_ST_REMOTEPROC=m
CONFIG_PM_DEVFREQ=y
CONFIG_ARM_TEGRA_DEVFREQ=m
CONFIG_MEMORY=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index a016ecc0084b..e4314b1227a3 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -411,7 +411,6 @@ CONFIG_MFD_MAX77693=y
CONFIG_MFD_MAX8907=m
CONFIG_EZX_PCAP=y
CONFIG_UCB1400_CORE=m
-CONFIG_MFD_PM8921_CORE=m
CONFIG_MFD_SEC_CORE=y
CONFIG_MFD_PALMAS=y
CONFIG_MFD_TPS65090=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index c2dff4fd5fc4..74e9cd759b99 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -119,7 +119,6 @@ CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_MSM=y
CONFIG_THERMAL=y
CONFIG_MFD_PM8XXX=y
-CONFIG_MFD_PM8921_CORE=y
CONFIG_MFD_QCOM_RPM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 27ed1b1cd1d7..13f1b4c289d4 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -88,9 +88,9 @@ config CRYPTO_AES_ARM
config CRYPTO_AES_ARM_BS
tristate "Bit sliced AES using NEON instructions"
depends on KERNEL_MODE_NEON
- select CRYPTO_ALGAPI
select CRYPTO_AES_ARM
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_SIMD
help
Use a faster and more secure NEON based implementation of AES in CBC,
CTR and XTS modes
@@ -104,8 +104,8 @@ config CRYPTO_AES_ARM_BS
config CRYPTO_AES_ARM_CE
tristate "Accelerated AES using ARMv8 Crypto Extensions"
depends on KERNEL_MODE_NEON
- select CRYPTO_ALGAPI
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_SIMD
help
Use an implementation of AES in CBC, CTR and XTS modes that uses
ARMv8 Crypto Extensions
@@ -120,4 +120,14 @@ config CRYPTO_GHASH_ARM_CE
that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
that is part of the ARMv8 Crypto Extensions
+config CRYPTO_CRCT10DIF_ARM_CE
+ tristate "CRCT10DIF digest algorithm using PMULL instructions"
+ depends on KERNEL_MODE_NEON && CRC_T10DIF
+ select CRYPTO_HASH
+
+config CRYPTO_CRC32_ARM_CE
+ tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions"
+ depends on KERNEL_MODE_NEON && CRC32
+ select CRYPTO_HASH
+
endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index fc5150702b64..b578a1820ab1 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -13,6 +13,8 @@ ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
+ce-obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o
+ce-obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o
ifneq ($(ce-obj-y)$(ce-obj-m),)
ifeq ($(call as-instr,.fpu crypto-neon-fp-armv8,y,n),y)
@@ -36,6 +38,8 @@ sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
+crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
+crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index aef022a87c53..8857531915bf 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -12,8 +12,8 @@
#include <asm/neon.h>
#include <asm/hwcap.h>
#include <crypto/aes.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
#include <linux/module.h>
#include <crypto/xts.h>
@@ -88,8 +88,13 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords;
+#ifndef CONFIG_CPU_BIG_ENDIAN
rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
rko[0] = rko[0] ^ rki[0] ^ rcon[i];
+#else
+ rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8);
+ rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24);
+#endif
rko[1] = rko[0] ^ rki[1];
rko[2] = rko[1] ^ rki[2];
rko[3] = rko[2] ^ rki[3];
@@ -128,17 +133,17 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
return 0;
}
-static int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
ret = ce_aes_expandkey(ctx, in_key, key_len);
if (!ret)
return 0;
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -147,13 +152,13 @@ struct crypto_aes_xts_ctx {
struct crypto_aes_ctx __aligned(8) key2;
};
-static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- ret = xts_check_key(tfm, in_key, key_len);
+ ret = xts_verify_key(tfm, in_key, key_len);
if (ret)
return ret;
@@ -164,130 +169,113 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
if (!ret)
return 0;
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int blocks;
int err;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, num_rounds(ctx), blocks);
- err = blkcipher_walk_done(desc, &walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int blocks;
int err;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, num_rounds(ctx), blocks);
- err = blkcipher_walk_done(desc, &walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int blocks;
int err;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, num_rounds(ctx), blocks,
walk.iv);
- err = blkcipher_walk_done(desc, &walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int blocks;
int err;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, num_rounds(ctx), blocks,
walk.iv);
- err = blkcipher_walk_done(desc, &walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
int err, blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, num_rounds(ctx), blocks,
walk.iv);
- nbytes -= blocks * AES_BLOCK_SIZE;
- if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
- break;
- err = blkcipher_walk_done(desc, &walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
- if (walk.nbytes % AES_BLOCK_SIZE) {
- u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
- u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ if (walk.nbytes) {
u8 __aligned(8) tail[AES_BLOCK_SIZE];
+ unsigned int nbytes = walk.nbytes;
+ u8 *tdst = walk.dst.virt.addr;
+ u8 *tsrc = walk.src.virt.addr;
/*
* Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
@@ -298,231 +286,172 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
ce_aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc,
num_rounds(ctx), blocks, walk.iv);
memcpy(tdst, tail, nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = skcipher_walk_done(&walk, 0);
}
kernel_neon_end();
return err;
}
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = num_rounds(&ctx->key1);
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
unsigned int blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_enc, rounds, blocks,
walk.iv, (u8 *)ctx->key2.key_enc, first);
- err = blkcipher_walk_done(desc, &walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = num_rounds(&ctx->key1);
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
unsigned int blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_dec, rounds, blocks,
walk.iv, (u8 *)ctx->key2.key_enc, first);
- err = blkcipher_walk_done(desc, &walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static struct crypto_alg aes_algs[] = { {
- .cra_name = "__ecb-aes-ce",
- .cra_driver_name = "__driver-ecb-aes-ce",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = 0,
- .setkey = ce_aes_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
+static struct skcipher_alg aes_algs[] = { {
+ .base = {
+ .cra_name = "__ecb(aes)",
+ .cra_driver_name = "__ecb-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ce_aes_setkey,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
}, {
- .cra_name = "__cbc-aes-ce",
- .cra_driver_name = "__driver-cbc-aes-ce",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
+ .base = {
+ .cra_name = "__cbc(aes)",
+ .cra_driver_name = "__cbc-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ce_aes_setkey,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
}, {
- .cra_name = "__ctr-aes-ce",
- .cra_driver_name = "__driver-ctr-aes-ce",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = ctr_encrypt,
- .decrypt = ctr_encrypt,
+ .base = {
+ .cra_name = "__ctr(aes)",
+ .cra_driver_name = "__ctr-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .setkey = ce_aes_setkey,
+ .encrypt = ctr_encrypt,
+ .decrypt = ctr_encrypt,
}, {
- .cra_name = "__xts-aes-ce",
- .cra_driver_name = "__driver-xts-aes-ce",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = xts_set_key,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
+ .base = {
+ .cra_name = "__xts(aes)",
+ .cra_driver_name = "__xts-aes-ce",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
-}, {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-ce",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = 0,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
-}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-ce",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
-}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-ce",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
-}, {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-ce",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_set_key,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
} };
+static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
+
+static void aes_exit(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
+ simd_skcipher_free(aes_simd_algs[i]);
+
+ crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
static int __init aes_init(void)
{
+ struct simd_skcipher_alg *simd;
+ const char *basename;
+ const char *algname;
+ const char *drvname;
+ int err;
+ int i;
+
if (!(elf_hwcap2 & HWCAP2_AES))
return -ENODEV;
- return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
-}
-static void __exit aes_exit(void)
-{
- crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+ err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ algname = aes_algs[i].base.cra_name + 2;
+ drvname = aes_algs[i].base.cra_driver_name + 2;
+ basename = aes_algs[i].base.cra_driver_name;
+ simd = simd_skcipher_create_compat(algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto unregister_simds;
+
+ aes_simd_algs[i] = simd;
+ }
+
+ return 0;
+
+unregister_simds:
+ aes_exit();
+ return err;
}
module_init(aes_init);
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 0511a6cafe24..d8e06de72ef3 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -10,8 +10,9 @@
#include <asm/neon.h>
#include <crypto/aes.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
+#include <crypto/cbc.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
#include <linux/module.h>
#include <crypto/xts.h>
@@ -55,14 +56,14 @@ struct aesbs_xts_ctx {
struct AES_KEY twkey;
};
-static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int aesbs_cbc_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
int bits = key_len * 8;
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->dec.rk = ctx->enc;
@@ -71,33 +72,33 @@ static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
-static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int aesbs_ctr_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
int bits = key_len * 8;
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->enc.converted = 0;
return 0;
}
-static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int aesbs_xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int bits = key_len * 4;
int err;
- err = xts_check_key(tfm, in_key, key_len);
+ err = xts_verify_key(tfm, in_key, key_len);
if (err)
return err;
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->dec.rk = ctx->enc.rk;
@@ -107,88 +108,52 @@ static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
-static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static inline void aesbs_encrypt_one(struct crypto_skcipher *tfm,
+ const u8 *src, u8 *dst)
{
- struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err;
+ struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ AES_encrypt(src, dst, &ctx->enc);
+}
- while (walk.nbytes) {
- u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
- u8 *src = walk.src.virt.addr;
+static int aesbs_cbc_encrypt(struct skcipher_request *req)
+{
+ return crypto_cbc_encrypt_walk(req, aesbs_encrypt_one);
+}
- if (walk.dst.virt.addr == walk.src.virt.addr) {
- u8 *iv = walk.iv;
-
- do {
- crypto_xor(src, iv, AES_BLOCK_SIZE);
- AES_encrypt(src, src, &ctx->enc);
- iv = src;
- src += AES_BLOCK_SIZE;
- } while (--blocks);
- memcpy(walk.iv, iv, AES_BLOCK_SIZE);
- } else {
- u8 *dst = walk.dst.virt.addr;
-
- do {
- crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
- AES_encrypt(walk.iv, dst, &ctx->enc);
- memcpy(walk.iv, dst, AES_BLOCK_SIZE);
- src += AES_BLOCK_SIZE;
- dst += AES_BLOCK_SIZE;
- } while (--blocks);
- }
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
- }
- return err;
+static inline void aesbs_decrypt_one(struct crypto_skcipher *tfm,
+ const u8 *src, u8 *dst)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ AES_decrypt(src, dst, &ctx->dec.rk);
}
-static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int aesbs_cbc_decrypt(struct skcipher_request *req)
{
- struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
-
- while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
- kernel_neon_begin();
- bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
- walk.nbytes, &ctx->dec, walk.iv);
- kernel_neon_end();
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
- }
- while (walk.nbytes) {
- u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+ for (err = skcipher_walk_virt(&walk, req, false);
+ (nbytes = walk.nbytes); err = skcipher_walk_done(&walk, nbytes)) {
+ u32 blocks = nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
- u8 bk[2][AES_BLOCK_SIZE];
u8 *iv = walk.iv;
- do {
- if (walk.dst.virt.addr == walk.src.virt.addr)
- memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
-
- AES_decrypt(src, dst, &ctx->dec.rk);
- crypto_xor(dst, iv, AES_BLOCK_SIZE);
-
- if (walk.dst.virt.addr == walk.src.virt.addr)
- iv = bk[blocks & 1];
- else
- iv = src;
+ if (blocks >= 8) {
+ kernel_neon_begin();
+ bsaes_cbc_encrypt(src, dst, nbytes, &ctx->dec, iv);
+ kernel_neon_end();
+ nbytes %= AES_BLOCK_SIZE;
+ continue;
+ }
- dst += AES_BLOCK_SIZE;
- src += AES_BLOCK_SIZE;
- } while (--blocks);
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ nbytes = crypto_cbc_decrypt_blocks(&walk, tfm,
+ aesbs_decrypt_one);
}
return err;
}
@@ -206,17 +171,15 @@ static void inc_be128_ctr(__be32 ctr[], u32 addend)
}
}
-static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int aesbs_ctr_encrypt(struct skcipher_request *req)
{
- struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
u32 blocks;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, false);
while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
@@ -235,11 +198,7 @@ static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
kernel_neon_end();
inc_be128_ctr(ctr, blocks);
- nbytes -= blocks * AES_BLOCK_SIZE;
- if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
- break;
-
- err = blkcipher_walk_done(desc, &walk, tail);
+ err = skcipher_walk_done(&walk, tail);
}
if (walk.nbytes) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
@@ -248,23 +207,21 @@ static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
AES_encrypt(walk.iv, ks, &ctx->enc.rk);
if (tdst != tsrc)
- memcpy(tdst, tsrc, nbytes);
- crypto_xor(tdst, ks, nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ memcpy(tdst, tsrc, walk.nbytes);
+ crypto_xor(tdst, ks, walk.nbytes);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
-static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int aesbs_xts_encrypt(struct skcipher_request *req)
{
- struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, false);
/* generate the initial tweak */
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
@@ -274,21 +231,19 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->enc, walk.iv);
kernel_neon_end();
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
-static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int aesbs_xts_decrypt(struct skcipher_request *req)
{
- struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, false);
/* generate the initial tweak */
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
@@ -298,141 +253,110 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->dec, walk.iv);
kernel_neon_end();
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
-static struct crypto_alg aesbs_algs[] = { {
- .cra_name = "__cbc-aes-neonbs",
- .cra_driver_name = "__driver-cbc-aes-neonbs",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aesbs_cbc_set_key,
- .encrypt = aesbs_cbc_encrypt,
- .decrypt = aesbs_cbc_decrypt,
+static struct skcipher_alg aesbs_algs[] = { {
+ .base = {
+ .cra_name = "__cbc(aes)",
+ .cra_driver_name = "__cbc-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_cbc_set_key,
+ .encrypt = aesbs_cbc_encrypt,
+ .decrypt = aesbs_cbc_decrypt,
}, {
- .cra_name = "__ctr-aes-neonbs",
- .cra_driver_name = "__driver-ctr-aes-neonbs",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aesbs_ctr_set_key,
- .encrypt = aesbs_ctr_encrypt,
- .decrypt = aesbs_ctr_encrypt,
+ .base = {
+ .cra_name = "__ctr(aes)",
+ .cra_driver_name = "__ctr-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .setkey = aesbs_ctr_set_key,
+ .encrypt = aesbs_ctr_encrypt,
+ .decrypt = aesbs_ctr_encrypt,
}, {
- .cra_name = "__xts-aes-neonbs",
- .cra_driver_name = "__driver-xts-aes-neonbs",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aesbs_xts_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aesbs_xts_set_key,
- .encrypt = aesbs_xts_encrypt,
- .decrypt = aesbs_xts_decrypt,
+ .base = {
+ .cra_name = "__xts(aes)",
+ .cra_driver_name = "__xts-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aesbs_xts_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
-}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-neonbs",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
-}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-neonbs",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
-}, {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-neonbs",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_xts_set_key,
+ .encrypt = aesbs_xts_encrypt,
+ .decrypt = aesbs_xts_decrypt,
} };
+struct simd_skcipher_alg *aesbs_simd_algs[ARRAY_SIZE(aesbs_algs)];
+
+static void aesbs_mod_exit(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aesbs_simd_algs) && aesbs_simd_algs[i]; i++)
+ simd_skcipher_free(aesbs_simd_algs[i]);
+
+ crypto_unregister_skciphers(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
static int __init aesbs_mod_init(void)
{
+ struct simd_skcipher_alg *simd;
+ const char *basename;
+ const char *algname;
+ const char *drvname;
+ int err;
+ int i;
+
if (!cpu_has_neon())
return -ENODEV;
- return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
-}
+ err = crypto_register_skciphers(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+ if (err)
+ return err;
-static void __exit aesbs_mod_exit(void)
-{
- crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+ for (i = 0; i < ARRAY_SIZE(aesbs_algs); i++) {
+ algname = aesbs_algs[i].base.cra_name + 2;
+ drvname = aesbs_algs[i].base.cra_driver_name + 2;
+ basename = aesbs_algs[i].base.cra_driver_name;
+ simd = simd_skcipher_create_compat(algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto unregister_simds;
+
+ aesbs_simd_algs[i] = simd;
+ }
+
+ return 0;
+
+unregister_simds:
+ aesbs_mod_exit();
+ return err;
}
module_init(aesbs_mod_init);
diff --git a/arch/arm/crypto/crc32-ce-core.S b/arch/arm/crypto/crc32-ce-core.S
new file mode 100644
index 000000000000..e63d400dc5c1
--- /dev/null
+++ b/arch/arm/crypto/crc32-ce-core.S
@@ -0,0 +1,306 @@
+/*
+ * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ *
+ * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
+ * calculation.
+ * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
+ * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
+ * at:
+ * http://www.intel.com/products/processor/manuals/
+ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
+ * Volume 2B: Instruction Set Reference, N-Z
+ *
+ * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
+ * Alexander Boyko <Alexander_Boyko@xyratex.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .align 6
+ .arch armv8-a
+ .arch_extension crc
+ .fpu crypto-neon-fp-armv8
+
+.Lcrc32_constants:
+ /*
+ * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
+ * #define CONSTANT_R1 0x154442bd4LL
+ *
+ * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
+ * #define CONSTANT_R2 0x1c6e41596LL
+ */
+ .quad 0x0000000154442bd4
+ .quad 0x00000001c6e41596
+
+ /*
+ * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
+ * #define CONSTANT_R3 0x1751997d0LL
+ *
+ * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
+ * #define CONSTANT_R4 0x0ccaa009eLL
+ */
+ .quad 0x00000001751997d0
+ .quad 0x00000000ccaa009e
+
+ /*
+ * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
+ * #define CONSTANT_R5 0x163cd6124LL
+ */
+ .quad 0x0000000163cd6124
+ .quad 0x00000000FFFFFFFF
+
+ /*
+ * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
+ *
+ * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
+ * = 0x1F7011641LL
+ * #define CONSTANT_RU 0x1F7011641LL
+ */
+ .quad 0x00000001DB710641
+ .quad 0x00000001F7011641
+
+.Lcrc32c_constants:
+ .quad 0x00000000740eef02
+ .quad 0x000000009e4addf8
+ .quad 0x00000000f20c0dfe
+ .quad 0x000000014cd00bd6
+ .quad 0x00000000dd45aab8
+ .quad 0x00000000FFFFFFFF
+ .quad 0x0000000105ec76f0
+ .quad 0x00000000dea713f1
+
+ dCONSTANTl .req d0
+ dCONSTANTh .req d1
+ qCONSTANT .req q0
+
+ BUF .req r0
+ LEN .req r1
+ CRC .req r2
+
+ qzr .req q9
+
+ /**
+ * Calculate crc32
+ * BUF - buffer
+ * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
+ * CRC - initial crc32
+ * return %eax crc32
+ * uint crc32_pmull_le(unsigned char const *buffer,
+ * size_t len, uint crc32)
+ */
+ENTRY(crc32_pmull_le)
+ adr r3, .Lcrc32_constants
+ b 0f
+
+ENTRY(crc32c_pmull_le)
+ adr r3, .Lcrc32c_constants
+
+0: bic LEN, LEN, #15
+ vld1.8 {q1-q2}, [BUF, :128]!
+ vld1.8 {q3-q4}, [BUF, :128]!
+ vmov.i8 qzr, #0
+ vmov.i8 qCONSTANT, #0
+ vmov dCONSTANTl[0], CRC
+ veor.8 d2, d2, dCONSTANTl
+ sub LEN, LEN, #0x40
+ cmp LEN, #0x40
+ blt less_64
+
+ vld1.64 {qCONSTANT}, [r3]
+
+loop_64: /* 64 bytes Full cache line folding */
+ sub LEN, LEN, #0x40
+
+ vmull.p64 q5, d3, dCONSTANTh
+ vmull.p64 q6, d5, dCONSTANTh
+ vmull.p64 q7, d7, dCONSTANTh
+ vmull.p64 q8, d9, dCONSTANTh
+
+ vmull.p64 q1, d2, dCONSTANTl
+ vmull.p64 q2, d4, dCONSTANTl
+ vmull.p64 q3, d6, dCONSTANTl
+ vmull.p64 q4, d8, dCONSTANTl
+
+ veor.8 q1, q1, q5
+ vld1.8 {q5}, [BUF, :128]!
+ veor.8 q2, q2, q6
+ vld1.8 {q6}, [BUF, :128]!
+ veor.8 q3, q3, q7
+ vld1.8 {q7}, [BUF, :128]!
+ veor.8 q4, q4, q8
+ vld1.8 {q8}, [BUF, :128]!
+
+ veor.8 q1, q1, q5
+ veor.8 q2, q2, q6
+ veor.8 q3, q3, q7
+ veor.8 q4, q4, q8
+
+ cmp LEN, #0x40
+ bge loop_64
+
+less_64: /* Folding cache line into 128bit */
+ vldr dCONSTANTl, [r3, #16]
+ vldr dCONSTANTh, [r3, #24]
+
+ vmull.p64 q5, d3, dCONSTANTh
+ vmull.p64 q1, d2, dCONSTANTl
+ veor.8 q1, q1, q5
+ veor.8 q1, q1, q2
+
+ vmull.p64 q5, d3, dCONSTANTh
+ vmull.p64 q1, d2, dCONSTANTl
+ veor.8 q1, q1, q5
+ veor.8 q1, q1, q3
+
+ vmull.p64 q5, d3, dCONSTANTh
+ vmull.p64 q1, d2, dCONSTANTl
+ veor.8 q1, q1, q5
+ veor.8 q1, q1, q4
+
+ teq LEN, #0
+ beq fold_64
+
+loop_16: /* Folding rest buffer into 128bit */
+ subs LEN, LEN, #0x10
+
+ vld1.8 {q2}, [BUF, :128]!
+ vmull.p64 q5, d3, dCONSTANTh
+ vmull.p64 q1, d2, dCONSTANTl
+ veor.8 q1, q1, q5
+ veor.8 q1, q1, q2
+
+ bne loop_16
+
+fold_64:
+ /* perform the last 64 bit fold, also adds 32 zeroes
+ * to the input stream */
+ vmull.p64 q2, d2, dCONSTANTh
+ vext.8 q1, q1, qzr, #8
+ veor.8 q1, q1, q2
+
+ /* final 32-bit fold */
+ vldr dCONSTANTl, [r3, #32]
+ vldr d6, [r3, #40]
+ vmov.i8 d7, #0
+
+ vext.8 q2, q1, qzr, #4
+ vand.8 d2, d2, d6
+ vmull.p64 q1, d2, dCONSTANTl
+ veor.8 q1, q1, q2
+
+ /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
+ vldr dCONSTANTl, [r3, #48]
+ vldr dCONSTANTh, [r3, #56]
+
+ vand.8 q2, q1, q3
+ vext.8 q2, qzr, q2, #8
+ vmull.p64 q2, d5, dCONSTANTh
+ vand.8 q2, q2, q3
+ vmull.p64 q2, d4, dCONSTANTl
+ veor.8 q1, q1, q2
+ vmov r0, s5
+
+ bx lr
+ENDPROC(crc32_pmull_le)
+ENDPROC(crc32c_pmull_le)
+
+ .macro __crc32, c
+ subs ip, r2, #8
+ bmi .Ltail\c
+
+ tst r1, #3
+ bne .Lunaligned\c
+
+ teq ip, #0
+.Laligned8\c:
+ ldrd r2, r3, [r1], #8
+ARM_BE8(rev r2, r2 )
+ARM_BE8(rev r3, r3 )
+ crc32\c\()w r0, r0, r2
+ crc32\c\()w r0, r0, r3
+ bxeq lr
+ subs ip, ip, #8
+ bpl .Laligned8\c
+
+.Ltail\c:
+ tst ip, #4
+ beq 2f
+ ldr r3, [r1], #4
+ARM_BE8(rev r3, r3 )
+ crc32\c\()w r0, r0, r3
+
+2: tst ip, #2
+ beq 1f
+ ldrh r3, [r1], #2
+ARM_BE8(rev16 r3, r3 )
+ crc32\c\()h r0, r0, r3
+
+1: tst ip, #1
+ bxeq lr
+ ldrb r3, [r1]
+ crc32\c\()b r0, r0, r3
+ bx lr
+
+.Lunaligned\c:
+ tst r1, #1
+ beq 2f
+ ldrb r3, [r1], #1
+ subs r2, r2, #1
+ crc32\c\()b r0, r0, r3
+
+ tst r1, #2
+ beq 0f
+2: ldrh r3, [r1], #2
+ subs r2, r2, #2
+ARM_BE8(rev16 r3, r3 )
+ crc32\c\()h r0, r0, r3
+
+0: subs ip, r2, #8
+ bpl .Laligned8\c
+ b .Ltail\c
+ .endm
+
+ .align 5
+ENTRY(crc32_armv8_le)
+ __crc32
+ENDPROC(crc32_armv8_le)
+
+ .align 5
+ENTRY(crc32c_armv8_le)
+ __crc32 c
+ENDPROC(crc32c_armv8_le)
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
new file mode 100644
index 000000000000..e1566bec1016
--- /dev/null
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -0,0 +1,242 @@
+/*
+ * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+
+#define PMULL_MIN_LEN 64L /* minimum size of buffer
+ * for crc32_pmull_le_16 */
+#define SCALE_F 16L /* size of NEON register */
+
+asmlinkage u32 crc32_pmull_le(const u8 buf[], u32 len, u32 init_crc);
+asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], u32 len);
+
+asmlinkage u32 crc32c_pmull_le(const u8 buf[], u32 len, u32 init_crc);
+asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], u32 len);
+
+static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], u32 len);
+static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], u32 len);
+
+static int crc32_cra_init(struct crypto_tfm *tfm)
+{
+ u32 *key = crypto_tfm_ctx(tfm);
+
+ *key = 0;
+ return 0;
+}
+
+static int crc32c_cra_init(struct crypto_tfm *tfm)
+{
+ u32 *key = crypto_tfm_ctx(tfm);
+
+ *key = ~0;
+ return 0;
+}
+
+static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ u32 *mctx = crypto_shash_ctx(hash);
+
+ if (keylen != sizeof(u32)) {
+ crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ *mctx = le32_to_cpup((__le32 *)key);
+ return 0;
+}
+
+static int crc32_init(struct shash_desc *desc)
+{
+ u32 *mctx = crypto_shash_ctx(desc->tfm);
+ u32 *crc = shash_desc_ctx(desc);
+
+ *crc = *mctx;
+ return 0;
+}
+
+static int crc32_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ u32 *crc = shash_desc_ctx(desc);
+
+ *crc = crc32_armv8_le(*crc, data, length);
+ return 0;
+}
+
+static int crc32c_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ u32 *crc = shash_desc_ctx(desc);
+
+ *crc = crc32c_armv8_le(*crc, data, length);
+ return 0;
+}
+
+static int crc32_final(struct shash_desc *desc, u8 *out)
+{
+ u32 *crc = shash_desc_ctx(desc);
+
+ put_unaligned_le32(*crc, out);
+ return 0;
+}
+
+static int crc32c_final(struct shash_desc *desc, u8 *out)
+{
+ u32 *crc = shash_desc_ctx(desc);
+
+ put_unaligned_le32(~*crc, out);
+ return 0;
+}
+
+static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ u32 *crc = shash_desc_ctx(desc);
+ unsigned int l;
+
+ if (may_use_simd()) {
+ if ((u32)data % SCALE_F) {
+ l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
+
+ *crc = fallback_crc32(*crc, data, l);
+
+ data += l;
+ length -= l;
+ }
+
+ if (length >= PMULL_MIN_LEN) {
+ l = round_down(length, SCALE_F);
+
+ kernel_neon_begin();
+ *crc = crc32_pmull_le(data, l, *crc);
+ kernel_neon_end();
+
+ data += l;
+ length -= l;
+ }
+ }
+
+ if (length > 0)
+ *crc = fallback_crc32(*crc, data, length);
+
+ return 0;
+}
+
+static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ u32 *crc = shash_desc_ctx(desc);
+ unsigned int l;
+
+ if (may_use_simd()) {
+ if ((u32)data % SCALE_F) {
+ l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
+
+ *crc = fallback_crc32c(*crc, data, l);
+
+ data += l;
+ length -= l;
+ }
+
+ if (length >= PMULL_MIN_LEN) {
+ l = round_down(length, SCALE_F);
+
+ kernel_neon_begin();
+ *crc = crc32c_pmull_le(data, l, *crc);
+ kernel_neon_end();
+
+ data += l;
+ length -= l;
+ }
+ }
+
+ if (length > 0)
+ *crc = fallback_crc32c(*crc, data, length);
+
+ return 0;
+}
+
+static struct shash_alg crc32_pmull_algs[] = { {
+ .setkey = crc32_setkey,
+ .init = crc32_init,
+ .update = crc32_update,
+ .final = crc32_final,
+ .descsize = sizeof(u32),
+ .digestsize = sizeof(u32),
+
+ .base.cra_ctxsize = sizeof(u32),
+ .base.cra_init = crc32_cra_init,
+ .base.cra_name = "crc32",
+ .base.cra_driver_name = "crc32-arm-ce",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .setkey = crc32_setkey,
+ .init = crc32_init,
+ .update = crc32c_update,
+ .final = crc32c_final,
+ .descsize = sizeof(u32),
+ .digestsize = sizeof(u32),
+
+ .base.cra_ctxsize = sizeof(u32),
+ .base.cra_init = crc32c_cra_init,
+ .base.cra_name = "crc32c",
+ .base.cra_driver_name = "crc32c-arm-ce",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_module = THIS_MODULE,
+} };
+
+static int __init crc32_pmull_mod_init(void)
+{
+ if (elf_hwcap2 & HWCAP2_PMULL) {
+ crc32_pmull_algs[0].update = crc32_pmull_update;
+ crc32_pmull_algs[1].update = crc32c_pmull_update;
+
+ if (elf_hwcap2 & HWCAP2_CRC32) {
+ fallback_crc32 = crc32_armv8_le;
+ fallback_crc32c = crc32c_armv8_le;
+ } else {
+ fallback_crc32 = crc32_le;
+ fallback_crc32c = __crc32c_le;
+ }
+ } else if (!(elf_hwcap2 & HWCAP2_CRC32)) {
+ return -ENODEV;
+ }
+
+ return crypto_register_shashes(crc32_pmull_algs,
+ ARRAY_SIZE(crc32_pmull_algs));
+}
+
+static void __exit crc32_pmull_mod_exit(void)
+{
+ crypto_unregister_shashes(crc32_pmull_algs,
+ ARRAY_SIZE(crc32_pmull_algs));
+}
+
+module_init(crc32_pmull_mod_init);
+module_exit(crc32_pmull_mod_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32c");
diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S
new file mode 100644
index 000000000000..ce45ba0c0687
--- /dev/null
+++ b/arch/arm/crypto/crct10dif-ce-core.S
@@ -0,0 +1,427 @@
+//
+// Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
+//
+// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License version 2 as
+// published by the Free Software Foundation.
+//
+
+//
+// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
+//
+// Copyright (c) 2013, Intel Corporation
+//
+// Authors:
+// Erdinc Ozturk <erdinc.ozturk@intel.com>
+// Vinodh Gopal <vinodh.gopal@intel.com>
+// James Guilford <james.guilford@intel.com>
+// Tim Chen <tim.c.chen@linux.intel.com>
+//
+// This software is available to you under a choice of one of two
+// licenses. You may choose to be licensed under the terms of the GNU
+// General Public License (GPL) Version 2, available from the file
+// COPYING in the main directory of this source tree, or the
+// OpenIB.org BSD license below:
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// * Neither the name of the Intel Corporation nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+//
+// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Function API:
+// UINT16 crc_t10dif_pcl(
+// UINT16 init_crc, //initial CRC value, 16 bits
+// const unsigned char *buf, //buffer pointer to calculate CRC on
+// UINT64 len //buffer length in bytes (64-bit data)
+// );
+//
+// Reference paper titled "Fast CRC Computation for Generic
+// Polynomials Using PCLMULQDQ Instruction"
+// URL: http://www.intel.com/content/dam/www/public/us/en/documents
+// /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
+//
+//
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define CPU_LE(code...)
+#else
+#define CPU_LE(code...) code
+#endif
+
+ .text
+ .fpu crypto-neon-fp-armv8
+
+ arg1_low32 .req r0
+ arg2 .req r1
+ arg3 .req r2
+
+ qzr .req q13
+
+ q0l .req d0
+ q0h .req d1
+ q1l .req d2
+ q1h .req d3
+ q2l .req d4
+ q2h .req d5
+ q3l .req d6
+ q3h .req d7
+ q4l .req d8
+ q4h .req d9
+ q5l .req d10
+ q5h .req d11
+ q6l .req d12
+ q6h .req d13
+ q7l .req d14
+ q7h .req d15
+
+ENTRY(crc_t10dif_pmull)
+ vmov.i8 qzr, #0 // init zero register
+
+ // adjust the 16-bit initial_crc value, scale it to 32 bits
+ lsl arg1_low32, arg1_low32, #16
+
+ // check if smaller than 256
+ cmp arg3, #256
+
+ // for sizes less than 128, we can't fold 64B at a time...
+ blt _less_than_128
+
+ // load the initial crc value
+ // crc value does not need to be byte-reflected, but it needs
+ // to be moved to the high part of the register.
+ // because data will be byte-reflected and will align with
+ // initial crc at correct place.
+ vmov s0, arg1_low32 // initial crc
+ vext.8 q10, qzr, q0, #4
+
+ // receive the initial 64B data, xor the initial crc value
+ vld1.64 {q0-q1}, [arg2, :128]!
+ vld1.64 {q2-q3}, [arg2, :128]!
+ vld1.64 {q4-q5}, [arg2, :128]!
+ vld1.64 {q6-q7}, [arg2, :128]!
+CPU_LE( vrev64.8 q0, q0 )
+CPU_LE( vrev64.8 q1, q1 )
+CPU_LE( vrev64.8 q2, q2 )
+CPU_LE( vrev64.8 q3, q3 )
+CPU_LE( vrev64.8 q4, q4 )
+CPU_LE( vrev64.8 q5, q5 )
+CPU_LE( vrev64.8 q6, q6 )
+CPU_LE( vrev64.8 q7, q7 )
+
+ vswp d0, d1
+ vswp d2, d3
+ vswp d4, d5
+ vswp d6, d7
+ vswp d8, d9
+ vswp d10, d11
+ vswp d12, d13
+ vswp d14, d15
+
+ // XOR the initial_crc value
+ veor.8 q0, q0, q10
+
+ adr ip, rk3
+ vld1.64 {q10}, [ip, :128] // xmm10 has rk3 and rk4
+
+ //
+ // we subtract 256 instead of 128 to save one instruction from the loop
+ //
+ sub arg3, arg3, #256
+
+ // at this section of the code, there is 64*x+y (0<=y<64) bytes of
+ // buffer. The _fold_64_B_loop will fold 64B at a time
+ // until we have 64+y Bytes of buffer
+
+
+ // fold 64B at a time. This section of the code folds 4 vector
+ // registers in parallel
+_fold_64_B_loop:
+
+ .macro fold64, reg1, reg2
+ vld1.64 {q11-q12}, [arg2, :128]!
+
+ vmull.p64 q8, \reg1\()h, d21
+ vmull.p64 \reg1, \reg1\()l, d20
+ vmull.p64 q9, \reg2\()h, d21
+ vmull.p64 \reg2, \reg2\()l, d20
+
+CPU_LE( vrev64.8 q11, q11 )
+CPU_LE( vrev64.8 q12, q12 )
+ vswp d22, d23
+ vswp d24, d25
+
+ veor.8 \reg1, \reg1, q8
+ veor.8 \reg2, \reg2, q9
+ veor.8 \reg1, \reg1, q11
+ veor.8 \reg2, \reg2, q12
+ .endm
+
+ fold64 q0, q1
+ fold64 q2, q3
+ fold64 q4, q5
+ fold64 q6, q7
+
+ subs arg3, arg3, #128
+
+ // check if there is another 64B in the buffer to be able to fold
+ bge _fold_64_B_loop
+
+ // at this point, the buffer pointer is pointing at the last y Bytes
+ // of the buffer the 64B of folded data is in 4 of the vector
+ // registers: v0, v1, v2, v3
+
+ // fold the 8 vector registers to 1 vector register with different
+ // constants
+
+ adr ip, rk9
+ vld1.64 {q10}, [ip, :128]!
+
+ .macro fold16, reg, rk
+ vmull.p64 q8, \reg\()l, d20
+ vmull.p64 \reg, \reg\()h, d21
+ .ifnb \rk
+ vld1.64 {q10}, [ip, :128]!
+ .endif
+ veor.8 q7, q7, q8
+ veor.8 q7, q7, \reg
+ .endm
+
+ fold16 q0, rk11
+ fold16 q1, rk13
+ fold16 q2, rk15
+ fold16 q3, rk17
+ fold16 q4, rk19
+ fold16 q5, rk1
+ fold16 q6
+
+ // instead of 64, we add 48 to the loop counter to save 1 instruction
+ // from the loop instead of a cmp instruction, we use the negative
+ // flag with the jl instruction
+ adds arg3, arg3, #(128-16)
+ blt _final_reduction_for_128
+
+ // now we have 16+y bytes left to reduce. 16 Bytes is in register v7
+ // and the rest is in memory. We can fold 16 bytes at a time if y>=16
+ // continue folding 16B at a time
+
+_16B_reduction_loop:
+ vmull.p64 q8, d14, d20
+ vmull.p64 q7, d15, d21
+ veor.8 q7, q7, q8
+
+ vld1.64 {q0}, [arg2, :128]!
+CPU_LE( vrev64.8 q0, q0 )
+ vswp d0, d1
+ veor.8 q7, q7, q0
+ subs arg3, arg3, #16
+
+ // instead of a cmp instruction, we utilize the flags with the
+ // jge instruction equivalent of: cmp arg3, 16-16
+ // check if there is any more 16B in the buffer to be able to fold
+ bge _16B_reduction_loop
+
+ // now we have 16+z bytes left to reduce, where 0<= z < 16.
+ // first, we reduce the data in the xmm7 register
+
+_final_reduction_for_128:
+ // check if any more data to fold. If not, compute the CRC of
+ // the final 128 bits
+ adds arg3, arg3, #16
+ beq _128_done
+
+ // here we are getting data that is less than 16 bytes.
+ // since we know that there was data before the pointer, we can
+ // offset the input pointer before the actual point, to receive
+ // exactly 16 bytes. after that the registers need to be adjusted.
+_get_last_two_regs:
+ add arg2, arg2, arg3
+ sub arg2, arg2, #16
+ vld1.64 {q1}, [arg2]
+CPU_LE( vrev64.8 q1, q1 )
+ vswp d2, d3
+
+ // get rid of the extra data that was loaded before
+ // load the shift constant
+ adr ip, tbl_shf_table + 16
+ sub ip, ip, arg3
+ vld1.8 {q0}, [ip]
+
+ // shift v2 to the left by arg3 bytes
+ vtbl.8 d4, {d14-d15}, d0
+ vtbl.8 d5, {d14-d15}, d1
+
+ // shift v7 to the right by 16-arg3 bytes
+ vmov.i8 q9, #0x80
+ veor.8 q0, q0, q9
+ vtbl.8 d18, {d14-d15}, d0
+ vtbl.8 d19, {d14-d15}, d1
+
+ // blend
+ vshr.s8 q0, q0, #7 // convert to 8-bit mask
+ vbsl.8 q0, q2, q1
+
+ // fold 16 Bytes
+ vmull.p64 q8, d18, d20
+ vmull.p64 q7, d19, d21
+ veor.8 q7, q7, q8
+ veor.8 q7, q7, q0
+
+_128_done:
+ // compute crc of a 128-bit value
+ vldr d20, rk5
+ vldr d21, rk6 // rk5 and rk6 in xmm10
+
+ // 64b fold
+ vext.8 q0, qzr, q7, #8
+ vmull.p64 q7, d15, d20
+ veor.8 q7, q7, q0
+
+ // 32b fold
+ vext.8 q0, q7, qzr, #12
+ vmov s31, s3
+ vmull.p64 q0, d0, d21
+ veor.8 q7, q0, q7
+
+ // barrett reduction
+_barrett:
+ vldr d20, rk7
+ vldr d21, rk8
+
+ vmull.p64 q0, d15, d20
+ vext.8 q0, qzr, q0, #12
+ vmull.p64 q0, d1, d21
+ vext.8 q0, qzr, q0, #12
+ veor.8 q7, q7, q0
+ vmov r0, s29
+
+_cleanup:
+ // scale the result back to 16 bits
+ lsr r0, r0, #16
+ bx lr
+
+_less_than_128:
+ teq arg3, #0
+ beq _cleanup
+
+ vmov.i8 q0, #0
+ vmov s3, arg1_low32 // get the initial crc value
+
+ vld1.64 {q7}, [arg2, :128]!
+CPU_LE( vrev64.8 q7, q7 )
+ vswp d14, d15
+ veor.8 q7, q7, q0
+
+ cmp arg3, #16
+ beq _128_done // exactly 16 left
+ blt _less_than_16_left
+
+ // now if there is, load the constants
+ vldr d20, rk1
+ vldr d21, rk2 // rk1 and rk2 in xmm10
+
+ // check if there is enough buffer to be able to fold 16B at a time
+ subs arg3, arg3, #32
+ addlt arg3, arg3, #16
+ blt _get_last_two_regs
+ b _16B_reduction_loop
+
+_less_than_16_left:
+ // shl r9, 4
+ adr ip, tbl_shf_table + 16
+ sub ip, ip, arg3
+ vld1.8 {q0}, [ip]
+ vmov.i8 q9, #0x80
+ veor.8 q0, q0, q9
+ vtbl.8 d18, {d14-d15}, d0
+ vtbl.8 d15, {d14-d15}, d1
+ vmov d14, d18
+ b _128_done
+ENDPROC(crc_t10dif_pmull)
+
+// precomputed constants
+// these constants are precomputed from the poly:
+// 0x8bb70000 (0x8bb7 scaled to 32 bits)
+ .align 4
+// Q = 0x18BB70000
+// rk1 = 2^(32*3) mod Q << 32
+// rk2 = 2^(32*5) mod Q << 32
+// rk3 = 2^(32*15) mod Q << 32
+// rk4 = 2^(32*17) mod Q << 32
+// rk5 = 2^(32*3) mod Q << 32
+// rk6 = 2^(32*2) mod Q << 32
+// rk7 = floor(2^64/Q)
+// rk8 = Q
+
+rk3: .quad 0x9d9d000000000000
+rk4: .quad 0x7cf5000000000000
+rk5: .quad 0x2d56000000000000
+rk6: .quad 0x1368000000000000
+rk7: .quad 0x00000001f65a57f8
+rk8: .quad 0x000000018bb70000
+rk9: .quad 0xceae000000000000
+rk10: .quad 0xbfd6000000000000
+rk11: .quad 0x1e16000000000000
+rk12: .quad 0x713c000000000000
+rk13: .quad 0xf7f9000000000000
+rk14: .quad 0x80a6000000000000
+rk15: .quad 0x044c000000000000
+rk16: .quad 0xe658000000000000
+rk17: .quad 0xad18000000000000
+rk18: .quad 0xa497000000000000
+rk19: .quad 0x6ee3000000000000
+rk20: .quad 0xe7b5000000000000
+rk1: .quad 0x2d56000000000000
+rk2: .quad 0x06df000000000000
+
+tbl_shf_table:
+// use these values for shift constants for the tbl/tbx instruction
+// different alignments result in values as shown:
+// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
+// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
+// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
+// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
+// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
+// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
+// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
+// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
+// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
+// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
+// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
+// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
+// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
+// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
+// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
+
+ .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
+ .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
new file mode 100644
index 000000000000..d428355cf38d
--- /dev/null
+++ b/arch/arm/crypto/crct10dif-ce-glue.c
@@ -0,0 +1,101 @@
+/*
+ * Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/crc-t10dif.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
+
+asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u32 len);
+
+static int crct10dif_init(struct shash_desc *desc)
+{
+ u16 *crc = shash_desc_ctx(desc);
+
+ *crc = 0;
+ return 0;
+}
+
+static int crct10dif_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ u16 *crc = shash_desc_ctx(desc);
+ unsigned int l;
+
+ if (!may_use_simd()) {
+ *crc = crc_t10dif_generic(*crc, data, length);
+ } else {
+ if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
+ l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
+ ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
+
+ *crc = crc_t10dif_generic(*crc, data, l);
+
+ length -= l;
+ data += l;
+ }
+ if (length > 0) {
+ kernel_neon_begin();
+ *crc = crc_t10dif_pmull(*crc, data, length);
+ kernel_neon_end();
+ }
+ }
+ return 0;
+}
+
+static int crct10dif_final(struct shash_desc *desc, u8 *out)
+{
+ u16 *crc = shash_desc_ctx(desc);
+
+ *(u16 *)out = *crc;
+ return 0;
+}
+
+static struct shash_alg crc_t10dif_alg = {
+ .digestsize = CRC_T10DIF_DIGEST_SIZE,
+ .init = crct10dif_init,
+ .update = crct10dif_update,
+ .final = crct10dif_final,
+ .descsize = CRC_T10DIF_DIGEST_SIZE,
+
+ .base.cra_name = "crct10dif",
+ .base.cra_driver_name = "crct10dif-arm-ce",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+};
+
+static int __init crc_t10dif_mod_init(void)
+{
+ if (!(elf_hwcap2 & HWCAP2_PMULL))
+ return -ENODEV;
+
+ return crypto_register_shash(&crc_t10dif_alg);
+}
+
+static void __exit crc_t10dif_mod_exit(void)
+{
+ crypto_unregister_shash(&crc_t10dif_alg);
+}
+
+module_init(crc_t10dif_mod_init);
+module_exit(crc_t10dif_mod_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("crct10dif");
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 0745538b26d3..55e0e3ea9cb6 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -8,7 +8,6 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
-generic-y += export.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index a8088290b778..27475904e096 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <asm/barrier.h>
+#include <asm/cacheflush.h>
#include <asm/cp15.h>
#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
@@ -230,19 +231,14 @@ static inline void gic_write_bpr1(u32 val)
* AArch32, since the syndrome register doesn't provide any information for
* them.
* Consequently, the following IO helpers use 32bit accesses.
- *
- * There are only two registers that need 64bit accesses in this driver:
- * - GICD_IROUTERn, contain the affinity values associated to each interrupt.
- * The upper-word (aff3) will always be 0, so there is no need for a lock.
- * - GICR_TYPER is an ID register and doesn't need atomicity.
*/
-static inline void gic_write_irouter(u64 val, volatile void __iomem *addr)
+static inline void __gic_writeq_nonatomic(u64 val, volatile void __iomem *addr)
{
writel_relaxed((u32)val, addr);
writel_relaxed((u32)(val >> 32), addr + 4);
}
-static inline u64 gic_read_typer(const volatile void __iomem *addr)
+static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
{
u64 val;
@@ -251,5 +247,49 @@ static inline u64 gic_read_typer(const volatile void __iomem *addr)
return val;
}
+#define gic_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
+
+/*
+ * GICD_IROUTERn, contain the affinity values associated to each interrupt.
+ * The upper-word (aff3) will always be 0, so there is no need for a lock.
+ */
+#define gic_write_irouter(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_TYPER is an ID register and doesn't need atomicity.
+ */
+#define gic_read_typer(c) __gic_readq_nonatomic(c)
+
+/*
+ * GITS_BASER - hi and lo bits may be accessed independently.
+ */
+#define gits_read_baser(c) __gic_readq_nonatomic(c)
+#define gits_write_baser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_PENDBASER and GICR_PROPBASE are changed with LPIs disabled, so they
+ * won't be being used during any updates and can be changed non-atomically
+ */
+#define gicr_read_propbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_propbaser(v, c) __gic_writeq_nonatomic(v, c)
+#define gicr_read_pendbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GITS_TYPER is an ID register and doesn't need atomicity.
+ */
+#define gits_read_typer(c) __gic_readq_nonatomic(c)
+
+/*
+ * GITS_CBASER - hi and lo bits may be accessed independently.
+ */
+#define gits_read_cbaser(c) __gic_readq_nonatomic(c)
+#define gits_write_cbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GITS_CWRITER - hi and lo bits may be accessed independently.
+ */
+#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 766bf9b78160..0b06f5341b45 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -57,6 +57,9 @@ void efi_virtmap_unload(void);
#define __efi_call_early(f, ...) f(__VA_ARGS__)
#define efi_is_64bit() (false)
+#define efi_call_proto(protocol, f, instance, ...) \
+ ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
+
struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg);
void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si);
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 021692c64de3..42871fb8340e 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -25,7 +25,6 @@
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index d7ea6bcb29bf..8ef05381984b 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 2d19e02d03fd..d5423ab15ed5 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -57,6 +57,9 @@ struct kvm_arch {
/* VTTBR value associated with below pgd and vmid */
u64 vttbr;
+ /* The last vcpu id that ran on each physical CPU */
+ int __percpu *last_vcpu_ran;
+
/* Timer */
struct arch_timer_kvm timer;
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 343135ede5fa..58508900c4bb 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -71,6 +71,7 @@
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
deleted file mode 100644
index 87c044910fe0..000000000000
--- a/arch/arm/include/asm/mutex.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/include/asm/mutex.h
- *
- * ARM optimized mutex locking primitives
- *
- * Please look into asm-generic/mutex-xchg.h for a formal definition.
- */
-#ifndef _ASM_MUTEX_H
-#define _ASM_MUTEX_H
-/*
- * On pre-ARMv6 hardware this results in a swp-based implementation,
- * which is the most efficient. For ARMv6+, we have exclusive memory
- * accessors and use atomic_dec to avoid the extra xchg operations
- * on the locking slowpaths.
- */
-#if __LINUX_ARM_ARCH__ < 6
-#include <asm-generic/mutex-xchg.h>
-#else
-#include <asm-generic/mutex-dec.h>
-#endif
-#endif /* _ASM_MUTEX_H */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 8a1e8e995dae..c3d5fc124a05 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -82,8 +82,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
#endif
-#define cpu_relax_lowlatency() cpu_relax()
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 1e25cd80589e..3f2eb76243e3 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -186,6 +186,8 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
tlb_add_flush(tlb, addr);
}
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
@@ -211,18 +213,17 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
+ tlb->pages[tlb->nr++] = page;
+ VM_WARN_ON(tlb->nr > tlb->max);
if (tlb->nr == tlb->max)
return true;
- tlb->pages[tlb->nr++] = page;
return false;
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (__tlb_remove_page(tlb, page)) {
+ if (__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb);
- __tlb_remove_page(tlb, page);
- }
}
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
@@ -231,12 +232,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page);
}
-static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
- struct page *page)
-{
- return __tlb_remove_page(tlb, page);
-}
-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
@@ -284,5 +279,11 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr
#define tlb_migrate_finish(mm) do { } while (0)
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+ unsigned int page_size)
+{
+}
+
#endif /* CONFIG_MMU */
#endif
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 194b69923389..ada0d29a660f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
* This may need to be greater than __NR_last_syscall+1 in order to
* account for the padding in the syscall table
*/
-#define __NR_syscalls (396)
+#define __NR_syscalls (400)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index 9d874db13c0e..3522cbaed316 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -1,87 +1 @@
-/******************************************************************************
- * hypercall.h
- *
- * Linux-specific hypervisor handling.
- *
- * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _ASM_ARM_XEN_HYPERCALL_H
-#define _ASM_ARM_XEN_HYPERCALL_H
-
-#include <linux/bug.h>
-
-#include <xen/interface/xen.h>
-#include <xen/interface/sched.h>
-#include <xen/interface/platform.h>
-
-long privcmd_call(unsigned call, unsigned long a1,
- unsigned long a2, unsigned long a3,
- unsigned long a4, unsigned long a5);
-int HYPERVISOR_xen_version(int cmd, void *arg);
-int HYPERVISOR_console_io(int cmd, int count, char *str);
-int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
-int HYPERVISOR_sched_op(int cmd, void *arg);
-int HYPERVISOR_event_channel_op(int cmd, void *arg);
-unsigned long HYPERVISOR_hvm_op(int op, void *arg);
-int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
-int HYPERVISOR_physdev_op(int cmd, void *arg);
-int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
-int HYPERVISOR_tmem_op(void *arg);
-int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
-int HYPERVISOR_platform_op_raw(void *arg);
-static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
-{
- op->interface_version = XENPF_INTERFACE_VERSION;
- return HYPERVISOR_platform_op_raw(op);
-}
-int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
-
-static inline int
-HYPERVISOR_suspend(unsigned long start_info_mfn)
-{
- struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
-
- /* start_info_mfn is unused on ARM */
- return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
-}
-
-static inline void
-MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
- unsigned int new_val, unsigned long flags)
-{
- BUG();
-}
-
-static inline void
-MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
- int count, int *success_count, domid_t domid)
-{
- BUG();
-}
-
-#endif /* _ASM_ARM_XEN_HYPERCALL_H */
+#include <xen/arm/hypercall.h>
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index 95251512e2c4..d6e7709d0688 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -1,39 +1 @@
-#ifndef _ASM_ARM_XEN_HYPERVISOR_H
-#define _ASM_ARM_XEN_HYPERVISOR_H
-
-#include <linux/init.h>
-
-extern struct shared_info *HYPERVISOR_shared_info;
-extern struct start_info *xen_start_info;
-
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
- return PARAVIRT_LAZY_NONE;
-}
-
-extern struct dma_map_ops *xen_dma_ops;
-
-#ifdef CONFIG_XEN
-void __init xen_early_init(void);
-#else
-static inline void xen_early_init(void) { return; }
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static inline void xen_arch_register_cpu(int num)
-{
-}
-
-static inline void xen_arch_unregister_cpu(int num)
-{
-}
-#endif
-
-#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
+#include <xen/arm/hypervisor.h>
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
index 75d596862892..88c0d75da190 100644
--- a/arch/arm/include/asm/xen/interface.h
+++ b/arch/arm/include/asm/xen/interface.h
@@ -1,85 +1 @@
-/******************************************************************************
- * Guest OS interface to ARM Xen.
- *
- * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
- */
-
-#ifndef _ASM_ARM_XEN_INTERFACE_H
-#define _ASM_ARM_XEN_INTERFACE_H
-
-#include <linux/types.h>
-
-#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
-
-#define __DEFINE_GUEST_HANDLE(name, type) \
- typedef struct { union { type *p; uint64_aligned_t q; }; } \
- __guest_handle_ ## name
-
-#define DEFINE_GUEST_HANDLE_STRUCT(name) \
- __DEFINE_GUEST_HANDLE(name, struct name)
-#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
-#define GUEST_HANDLE(name) __guest_handle_ ## name
-
-#define set_xen_guest_handle(hnd, val) \
- do { \
- if (sizeof(hnd) == 8) \
- *(uint64_t *)&(hnd) = 0; \
- (hnd).p = val; \
- } while (0)
-
-#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
-
-#ifndef __ASSEMBLY__
-/* Explicitly size integers that represent pfns in the interface with
- * Xen so that we can have one ABI that works for 32 and 64 bit guests.
- * Note that this means that the xen_pfn_t type may be capable of
- * representing pfn's which the guest cannot represent in its own pfn
- * type. However since pfn space is controlled by the guest this is
- * fine since it simply wouldn't be able to create any sure pfns in
- * the first place.
- */
-typedef uint64_t xen_pfn_t;
-#define PRI_xen_pfn "llx"
-typedef uint64_t xen_ulong_t;
-#define PRI_xen_ulong "llx"
-typedef int64_t xen_long_t;
-#define PRI_xen_long "llx"
-/* Guest handles for primitive C types. */
-__DEFINE_GUEST_HANDLE(uchar, unsigned char);
-__DEFINE_GUEST_HANDLE(uint, unsigned int);
-DEFINE_GUEST_HANDLE(char);
-DEFINE_GUEST_HANDLE(int);
-DEFINE_GUEST_HANDLE(void);
-DEFINE_GUEST_HANDLE(uint64_t);
-DEFINE_GUEST_HANDLE(uint32_t);
-DEFINE_GUEST_HANDLE(xen_pfn_t);
-DEFINE_GUEST_HANDLE(xen_ulong_t);
-
-/* Maximum number of virtual CPUs in multi-processor guests. */
-#define MAX_VIRT_CPUS 1
-
-struct arch_vcpu_info { };
-struct arch_shared_info { };
-
-/* TODO: Move pvclock definitions some place arch independent */
-struct pvclock_vcpu_time_info {
- u32 version;
- u32 pad0;
- u64 tsc_timestamp;
- u64 system_time;
- u32 tsc_to_system_mul;
- s8 tsc_shift;
- u8 flags;
- u8 pad[2];
-} __attribute__((__packed__)); /* 32 bytes */
-
-/* It is OK to have a 12 bytes struct with no padding because it is packed */
-struct pvclock_wall_clock {
- u32 version;
- u32 sec;
- u32 nsec;
- u32 sec_hi;
-} __attribute__((__packed__));
-#endif
-
-#endif /* _ASM_ARM_XEN_INTERFACE_H */
+#include <xen/arm/interface.h>
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 95ce6ac3a971..b3ef061d8b74 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1,98 +1 @@
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <linux/dma-mapping.h>
-
-void __xen_dma_map_page(struct device *hwdev, struct page *page,
- dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs);
-void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-void __xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
-void __xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
- return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
- __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long page_pfn = page_to_xen_pfn(page);
- unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
- unsigned long compound_pages =
- (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
- bool local = (page_pfn <= dev_pfn) &&
- (dev_pfn - page_pfn < compound_pages);
-
- /*
- * Dom0 is mapped 1:1, while the Linux page can span across
- * multiple Xen pages, it's not possible for it to contain a
- * mix of local and foreign Xen pages. So if the first xen_pfn
- * == mfn the page is local otherwise it's a foreign page
- * grant-mapped in dom0. If the page is local we can safely
- * call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (local)
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
- else
- __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long pfn = PFN_DOWN(handle);
- /*
- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
- * multiple Xen page, it's not possible to have a mix of local and
- * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
- * foreign mfn will always return false. If the page is local we can
- * safely call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->unmap_page)
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
- } else
- __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->sync_single_for_device)
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+#include <xen/arm/page-coherent.h>
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 415dbc6e43fd..31bbc803cecb 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1,122 +1 @@
-#ifndef _ASM_ARM_XEN_PAGE_H
-#define _ASM_ARM_XEN_PAGE_H
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#include <linux/pfn.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-
-#include <xen/xen.h>
-#include <xen/interface/grant_table.h>
-
-#define phys_to_machine_mapping_valid(pfn) (1)
-
-/* Xen machine address */
-typedef struct xmaddr {
- phys_addr_t maddr;
-} xmaddr_t;
-
-/* Xen pseudo-physical address */
-typedef struct xpaddr {
- phys_addr_t paddr;
-} xpaddr_t;
-
-#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
-#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
-
-#define INVALID_P2M_ENTRY (~0UL)
-
-/*
- * The pseudo-physical frame (pfn) used in all the helpers is always based
- * on Xen page granularity (i.e 4KB).
- *
- * A Linux page may be split across multiple non-contiguous Xen page so we
- * have to keep track with frame based on 4KB page granularity.
- *
- * PV drivers should never make a direct usage of those helpers (particularly
- * pfn_to_gfn and gfn_to_pfn).
- */
-
-unsigned long __pfn_to_mfn(unsigned long pfn);
-extern struct rb_root phys_to_mach;
-
-/* Pseudo-physical <-> Guest conversion */
-static inline unsigned long pfn_to_gfn(unsigned long pfn)
-{
- return pfn;
-}
-
-static inline unsigned long gfn_to_pfn(unsigned long gfn)
-{
- return gfn;
-}
-
-/* Pseudo-physical <-> BUS conversion */
-static inline unsigned long pfn_to_bfn(unsigned long pfn)
-{
- unsigned long mfn;
-
- if (phys_to_mach.rb_node != NULL) {
- mfn = __pfn_to_mfn(pfn);
- if (mfn != INVALID_P2M_ENTRY)
- return mfn;
- }
-
- return pfn;
-}
-
-static inline unsigned long bfn_to_pfn(unsigned long bfn)
-{
- return bfn;
-}
-
-#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
-
-/* VIRT <-> GUEST conversion */
-#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
-#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
-
-/* Only used in PV code. But ARM guests are always HVM. */
-static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
-{
- BUG();
-}
-
-/* TODO: this shouldn't be here but it is because the frontend drivers
- * are using it (its rolled in headers) even though we won't hit the code path.
- * So for right now just punt with this.
- */
-static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
-{
- BUG();
- return NULL;
-}
-
-extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count);
-
-extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_unmap_grant_ref *kunmap_ops,
- struct page **pages, unsigned int count);
-
-bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
- unsigned long nr_pages);
-
-static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- return __set_phys_to_machine(pfn, mfn);
-}
-
-#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
-bool xen_arch_need_swiotlb(struct device *dev,
- phys_addr_t phys,
- dma_addr_t dev_addr);
-unsigned long xen_get_swiotlb_free_pages(unsigned int order);
-
-#endif /* _ASM_ARM_XEN_PAGE_H */
+#include <xen/arm/page.h>
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index b38c10c73579..af05f8e0903e 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -87,9 +87,11 @@ struct kvm_regs {
/* Supported VGICv3 address types */
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
+#define KVM_VGIC_ITS_ADDR_TYPE 4
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
+#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 2cb9dc770e1d..314100a06ccb 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -420,6 +420,9 @@
#define __NR_copy_file_range (__NR_SYSCALL_BASE+391)
#define __NR_preadv2 (__NR_SYSCALL_BASE+392)
#define __NR_pwritev2 (__NR_SYSCALL_BASE+393)
+#define __NR_pkey_mprotect (__NR_SYSCALL_BASE+394)
+#define __NR_pkey_alloc (__NR_SYSCALL_BASE+395)
+#define __NR_pkey_free (__NR_SYSCALL_BASE+396)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 68c2c097cffe..ad325a8c7e1e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -33,7 +33,7 @@ endif
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
-obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
new file mode 100644
index 000000000000..7e45f69a0ddc
--- /dev/null
+++ b/arch/arm/kernel/armksyms.c
@@ -0,0 +1,183 @@
+/*
+ * linux/arch/arm/kernel/armksyms.c
+ *
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/cryptohash.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/arm-smccc.h>
+
+#include <asm/checksum.h>
+#include <asm/ftrace.h>
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __do_div64(void);
+extern void __bswapsi2(void);
+extern void __bswapdi2(void);
+
+extern void __aeabi_idiv(void);
+extern void __aeabi_idivmod(void);
+extern void __aeabi_lasr(void);
+extern void __aeabi_llsl(void);
+extern void __aeabi_llsr(void);
+extern void __aeabi_lmul(void);
+extern void __aeabi_uidiv(void);
+extern void __aeabi_uidivmod(void);
+extern void __aeabi_ulcmp(void);
+
+extern void fpundefinstr(void);
+
+void mmioset(void *, unsigned int, size_t);
+void mmiocpy(void *, const void *, size_t);
+
+ /* platform dependent support */
+EXPORT_SYMBOL(arm_delay_ops);
+
+ /* networking */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_ipv6_magic);
+
+ /* io */
+#ifndef __raw_readsb
+EXPORT_SYMBOL(__raw_readsb);
+#endif
+#ifndef __raw_readsw
+EXPORT_SYMBOL(__raw_readsw);
+#endif
+#ifndef __raw_readsl
+EXPORT_SYMBOL(__raw_readsl);
+#endif
+#ifndef __raw_writesb
+EXPORT_SYMBOL(__raw_writesb);
+#endif
+#ifndef __raw_writesw
+EXPORT_SYMBOL(__raw_writesw);
+#endif
+#ifndef __raw_writesl
+EXPORT_SYMBOL(__raw_writesl);
+#endif
+
+ /* string / mem functions */
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(__memzero);
+
+EXPORT_SYMBOL(mmioset);
+EXPORT_SYMBOL(mmiocpy);
+
+#ifdef CONFIG_MMU
+EXPORT_SYMBOL(copy_page);
+
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+EXPORT_SYMBOL(__get_user_8);
+
+#ifdef __ARMEB__
+EXPORT_SYMBOL(__get_user_64t_1);
+EXPORT_SYMBOL(__get_user_64t_2);
+EXPORT_SYMBOL(__get_user_64t_4);
+EXPORT_SYMBOL(__get_user_32t_8);
+#endif
+
+EXPORT_SYMBOL(__put_user_1);
+EXPORT_SYMBOL(__put_user_2);
+EXPORT_SYMBOL(__put_user_4);
+EXPORT_SYMBOL(__put_user_8);
+#endif
+
+ /* gcc lib functions */
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__do_div64);
+EXPORT_SYMBOL(__bswapsi2);
+EXPORT_SYMBOL(__bswapdi2);
+
+#ifdef CONFIG_AEABI
+EXPORT_SYMBOL(__aeabi_idiv);
+EXPORT_SYMBOL(__aeabi_idivmod);
+EXPORT_SYMBOL(__aeabi_lasr);
+EXPORT_SYMBOL(__aeabi_llsl);
+EXPORT_SYMBOL(__aeabi_llsr);
+EXPORT_SYMBOL(__aeabi_lmul);
+EXPORT_SYMBOL(__aeabi_uidiv);
+EXPORT_SYMBOL(__aeabi_uidivmod);
+EXPORT_SYMBOL(__aeabi_ulcmp);
+#endif
+
+ /* bitops */
+EXPORT_SYMBOL(_set_bit);
+EXPORT_SYMBOL(_test_and_set_bit);
+EXPORT_SYMBOL(_clear_bit);
+EXPORT_SYMBOL(_test_and_clear_bit);
+EXPORT_SYMBOL(_change_bit);
+EXPORT_SYMBOL(_test_and_change_bit);
+EXPORT_SYMBOL(_find_first_zero_bit_le);
+EXPORT_SYMBOL(_find_next_zero_bit_le);
+EXPORT_SYMBOL(_find_first_bit_le);
+EXPORT_SYMBOL(_find_next_bit_le);
+
+#ifdef __ARMEB__
+EXPORT_SYMBOL(_find_first_zero_bit_be);
+EXPORT_SYMBOL(_find_next_zero_bit_be);
+EXPORT_SYMBOL(_find_first_bit_be);
+EXPORT_SYMBOL(_find_next_bit_be);
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_OLD_MCOUNT
+EXPORT_SYMBOL(mcount);
+#endif
+EXPORT_SYMBOL(__gnu_mcount_nc);
+#endif
+
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+EXPORT_SYMBOL(__pv_phys_pfn_offset);
+EXPORT_SYMBOL(__pv_offset);
+#endif
+
+#ifdef CONFIG_HAVE_ARM_SMCCC
+EXPORT_SYMBOL(arm_smccc_smc);
+EXPORT_SYMBOL(arm_smccc_hvc);
+#endif
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 703fa0f3cd8f..08030b18f10a 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -403,6 +403,9 @@
CALL(sys_copy_file_range)
CALL(sys_preadv2)
CALL(sys_pwritev2)
+ CALL(sys_pkey_mprotect)
+/* 395 */ CALL(sys_pkey_alloc)
+ CALL(sys_pkey_free)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index b629d3f11c3d..c73c4030ca5d 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -7,7 +7,6 @@
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
-#include <asm/export.h>
#include "entry-header.S"
@@ -154,7 +153,6 @@ ENTRY(mcount)
__mcount _old
#endif
ENDPROC(mcount)
-EXPORT_SYMBOL(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller_old)
@@ -207,7 +205,6 @@ UNWIND(.fnstart)
#endif
UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
-EXPORT_SYMBOL(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f41cee4c5746..04286fd9e09c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -22,7 +22,6 @@
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/pgtable.h>
-#include <asm/export.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
@@ -728,8 +727,6 @@ __pv_phys_pfn_offset:
__pv_offset:
.quad 0
.size __pv_offset, . -__pv_offset
-EXPORT_SYMBOL(__pv_phys_pfn_offset)
-EXPORT_SYMBOL(__pv_offset)
#endif
#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index b8df45883cf7..188180b5523d 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -925,9 +925,9 @@ static bool core_has_os_save_restore(void)
}
}
-static void reset_ctrl_regs(void *unused)
+static void reset_ctrl_regs(unsigned int cpu)
{
- int i, raw_num_brps, err = 0, cpu = smp_processor_id();
+ int i, raw_num_brps, err = 0;
u32 val;
/*
@@ -1020,25 +1020,20 @@ out_mdbgen:
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
}
-static int dbg_reset_notify(struct notifier_block *self,
- unsigned long action, void *cpu)
+static int dbg_reset_online(unsigned int cpu)
{
- if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
- smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
-
- return NOTIFY_OK;
+ local_irq_disable();
+ reset_ctrl_regs(cpu);
+ local_irq_enable();
+ return 0;
}
-static struct notifier_block dbg_reset_nb = {
- .notifier_call = dbg_reset_notify,
-};
-
#ifdef CONFIG_CPU_PM
static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
void *v)
{
if (action == CPU_PM_EXIT)
- reset_ctrl_regs(NULL);
+ reset_ctrl_regs(smp_processor_id());
return NOTIFY_OK;
}
@@ -1059,6 +1054,8 @@ static inline void pm_init(void)
static int __init arch_hw_breakpoint_init(void)
{
+ int ret;
+
debug_arch = get_debug_arch();
if (!debug_arch_supported()) {
@@ -1072,25 +1069,28 @@ static int __init arch_hw_breakpoint_init(void)
core_num_brps = get_num_brps();
core_num_wrps = get_num_wrps();
- cpu_notifier_register_begin();
-
/*
* We need to tread carefully here because DBGSWENABLE may be
* driven low on this core and there isn't an architected way to
* determine that.
*/
+ get_online_cpus();
register_undef_hook(&debug_reg_hook);
/*
- * Reset the breakpoint resources. We assume that a halting
- * debugger will leave the world in a nice state for us.
+ * Register CPU notifier which resets the breakpoint resources. We
+ * assume that a halting debugger will leave the world in a nice state
+ * for us.
*/
- on_each_cpu(reset_ctrl_regs, NULL, 1);
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online",
+ dbg_reset_online, NULL);
unregister_undef_hook(&debug_reg_hook);
- if (!cpumask_empty(&debug_err_mask)) {
+ if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
core_num_brps = 0;
core_num_wrps = 0;
- cpu_notifier_register_done();
+ if (ret > 0)
+ cpuhp_remove_state_nocalls(ret);
+ put_online_cpus();
return 0;
}
@@ -1108,12 +1108,9 @@ static int __init arch_hw_breakpoint_init(void)
TRAP_HWBKPT, "watchpoint debug exception");
hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
TRAP_HWBKPT, "breakpoint debug exception");
+ put_online_cpus();
- /* Register hotplug and PM notifiers. */
- __register_cpu_notifier(&dbg_reset_nb);
-
- cpu_notifier_register_done();
-
+ /* Register PM notifiers. */
pm_init();
return 0;
}
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index 37669e7e13af..2e48b674aab1 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -16,7 +16,6 @@
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
-#include <asm/export.h>
/*
* Wrap c macros in asm macros to delay expansion until after the
@@ -52,7 +51,6 @@ UNWIND( .fnend)
ENTRY(arm_smccc_smc)
SMCCC SMCCC_SMC
ENDPROC(arm_smccc_smc)
-EXPORT_SYMBOL(arm_smccc_smc)
/*
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -62,4 +60,3 @@ EXPORT_SYMBOL(arm_smccc_smc)
ENTRY(arm_smccc_hvc)
SMCCC SMCCC_HVC
ENDPROC(arm_smccc_hvc)
-EXPORT_SYMBOL(arm_smccc_hvc)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bc698383e822..9688ec0c6ef4 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
+void dump_backtrace_stm(u32 *stack, u32 instruction)
+{
+ char str[80], *p;
+ unsigned int x;
+ int reg;
+
+ for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
+ if (instruction & BIT(reg)) {
+ p += sprintf(p, " r%d:%08x", reg, *stack--);
+ if (++x == 6) {
+ x = 0;
+ p = str;
+ printk("%s\n", str);
+ }
+ }
+ }
+ if (p != str)
+ printk("%s\n", str);
+}
+
#ifndef CONFIG_ARM_UNWIND
/*
* Stack pointers should always be within the kernels view of
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 7fa487ef7e2f..37b2a11af345 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -3,6 +3,9 @@
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
+/* No __ro_after_init data in the .rodata section - which will always be ro */
+#define RO_AFTER_INIT_DATA
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
@@ -223,6 +226,8 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
+ *(.data..ro_after_init)
+
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
READ_MOSTLY_DATA(L1_CACHE_BYTES)
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 3e1cd0452d67..90d0176fb30d 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -34,6 +34,7 @@ config KVM
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
+ select HAVE_KVM_MSI
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
Support hosting virtualized guest machines.
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index f19842ea5418..d571243ab4d1 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -32,5 +32,6 @@ obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
+obj-y += $(KVM)/arm/vgic/vgic-its.o
obj-y += $(KVM)/irqchip.o
obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 08bb84f2ad58..8f92efa8460e 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
*/
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
- int ret = 0;
+ int ret, cpu;
if (type)
return -EINVAL;
+ kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+ if (!kvm->arch.last_vcpu_ran)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+
ret = kvm_alloc_stage2_pgd(kvm);
if (ret)
goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
out_fail_alloc:
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
return ret;
}
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
+
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -209,6 +221,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+ case KVM_CAP_MSI_DEVID:
+ if (!kvm)
+ r = -EINVAL;
+ else
+ r = kvm->arch.vgic.msis_require_devid;
+ break;
default:
r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
break;
@@ -312,6 +330,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ int *last_ran;
+
+ last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+
+ /*
+ * We might get preempted before the vCPU actually runs, but
+ * over-invalidation doesn't affect correctness.
+ */
+ if (*last_ran != vcpu->vcpu_id) {
+ kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+ *last_ran = vcpu->vcpu_id;
+ }
+
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
index 729652854f90..6d810af2d9fd 100644
--- a/arch/arm/kvm/hyp/tlb.c
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
__kvm_tlb_flush_vmid(kvm);
}
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+ /* Switch to requested VMID */
+ write_sysreg(kvm->arch.vttbr, VTTBR);
+ isb();
+
+ write_sysreg(0, TLBIALL);
+ dsb(nsh);
+ isb();
+
+ write_sysreg(0, VTTBR);
+}
+
void __hyp_text __kvm_flush_vm_context(void)
{
write_sysreg(0, TLBIALLNSNHIS);
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
index a7e7de89bd75..b05e95840651 100644
--- a/arch/arm/lib/ashldi3.S
+++ b/arch/arm/lib/ashldi3.S
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsl)
ENDPROC(__ashldi3)
ENDPROC(__aeabi_llsl)
-EXPORT_SYMBOL(__ashldi3)
-EXPORT_SYMBOL(__aeabi_llsl)
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
index 490336e42518..275d7d2341a4 100644
--- a/arch/arm/lib/ashrdi3.S
+++ b/arch/arm/lib/ashrdi3.S
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
@@ -53,5 +52,3 @@ ENTRY(__aeabi_lasr)
ENDPROC(__ashrdi3)
ENDPROC(__aeabi_lasr)
-EXPORT_SYMBOL(__ashrdi3)
-EXPORT_SYMBOL(__aeabi_lasr)
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index fab5a50503ae..7d7952e5a3b1 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -10,6 +10,7 @@
* 27/03/03 Ian Molton Clean up CONFIG_CPU
*
*/
+#include <linux/kern_levels.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
@@ -83,13 +84,13 @@ for_each_frame: tst frame, mask @ Check for address exceptions
teq r3, r1, lsr #11
ldreq r0, [frame, #-8] @ get sp
subeq r0, r0, #4 @ point at the last arg
- bleq .Ldumpstm @ dump saved registers
+ bleq dump_backtrace_stm @ dump saved registers
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
ldr r3, .Ldsi @ instruction exists,
teq r3, r1, lsr #11
subeq r0, frame, #16
- bleq .Ldumpstm @ dump saved registers
+ bleq dump_backtrace_stm @ dump saved registers
teq sv_fp, #0 @ zero saved fp means
beq no_frame @ no further frames
@@ -112,38 +113,6 @@ ENDPROC(c_backtrace)
.long 1004b, 1006b
.popsection
-#define instr r4
-#define reg r5
-#define stack r6
-
-.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr}
- mov stack, r0
- mov instr, r1
- mov reg, #10
- mov r7, #0
-1: mov r3, #1
- ARM( tst instr, r3, lsl reg )
- THUMB( lsl r3, reg )
- THUMB( tst instr, r3 )
- beq 2f
- add r7, r7, #1
- teq r7, #6
- moveq r7, #0
- adr r3, .Lcr
- addne r3, r3, #1 @ skip newline
- ldr r2, [stack], #-4
- mov r1, reg
- adr r0, .Lfp
- bl printk
-2: subs reg, reg, #1
- bpl 1b
- teq r7, #0
- adrne r0, .Lcr
- blne printk
- ldmfd sp!, {instr, reg, stack, r7, pc}
-
-.Lfp: .asciz " r%d:%08x%s"
-.Lcr: .asciz "\n"
.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index df06638b327c..7d807cfd8ef5 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,6 +1,5 @@
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
.macro bitop, name, instr
@@ -26,7 +25,6 @@ UNWIND( .fnstart )
bx lr
UNWIND( .fnend )
ENDPROC(\name )
-EXPORT_SYMBOL(\name )
.endm
.macro testop, name, instr, store
@@ -57,7 +55,6 @@ UNWIND( .fnstart )
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
-EXPORT_SYMBOL(\name )
.endm
#else
.macro bitop, name, instr
@@ -77,7 +74,6 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
-EXPORT_SYMBOL(\name )
.endm
/**
@@ -106,6 +102,5 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
-EXPORT_SYMBOL(\name )
.endm
#endif
diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S
index f05f78247304..07cda737bb11 100644
--- a/arch/arm/lib/bswapsdi2.S
+++ b/arch/arm/lib/bswapsdi2.S
@@ -1,6 +1,5 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
ENTRY(__bswapsi2)
@@ -36,5 +35,3 @@ ENTRY(__bswapdi2)
ret lr
ENDPROC(__bswapdi2)
#endif
-EXPORT_SYMBOL(__bswapsi2)
-EXPORT_SYMBOL(__bswapdi2)
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index b566154f5cf4..e936352ccb00 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -10,7 +10,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.text
@@ -51,9 +50,6 @@ USER( strnebt r2, [r0])
UNWIND(.fnend)
ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
-#ifndef CONFIG_UACCESS_WITH_MEMCPY
-EXPORT_SYMBOL(arm_clear_user)
-#endif
.pushsection .text.fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 63e4c1ed0225..7a4b06049001 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
/*
* Prototype:
@@ -95,7 +94,6 @@ ENTRY(arm_copy_from_user)
#include "copy_template.S"
ENDPROC(arm_copy_from_user)
-EXPORT_SYMBOL(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index d97851d4af7a..6ee2f6706f86 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -13,7 +13,6 @@
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
-#include <asm/export.h>
#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
@@ -46,4 +45,3 @@ ENTRY(copy_page)
PLD( beq 2b )
ldmfd sp!, {r4, pc} @ 3
ENDPROC(copy_page)
-EXPORT_SYMBOL(copy_page)
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 592c179112d1..caf5019d8161 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
/*
* Prototype:
@@ -100,9 +99,6 @@ WEAK(arm_copy_to_user)
ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
-#ifndef CONFIG_UACCESS_WITH_MEMCPY
-EXPORT_SYMBOL(arm_copy_to_user)
-#endif
.pushsection .text.fixup,"ax"
.align 0
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S
index 68603b5ee537..3ac6ef01bc43 100644
--- a/arch/arm/lib/csumipv6.S
+++ b/arch/arm/lib/csumipv6.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
@@ -31,4 +30,4 @@ ENTRY(__csum_ipv6_magic)
adcs r0, r0, #0
ldmfd sp!, {pc}
ENDPROC(__csum_ipv6_magic)
-EXPORT_SYMBOL(__csum_ipv6_magic)
+
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index 830b20e81c37..984e0f29d548 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
@@ -141,4 +140,3 @@ ENTRY(csum_partial)
bne 4b
b .Lless4
ENDPROC(csum_partial)
-EXPORT_SYMBOL(csum_partial)
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index 9c3383fed129..d03fc71fc88c 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -49,6 +49,5 @@
#define FN_ENTRY ENTRY(csum_partial_copy_nocheck)
#define FN_EXIT ENDPROC(csum_partial_copy_nocheck)
-#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck)
#include "csumpartialcopygeneric.S"
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 8b94d20e51d1..10b45909610c 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -8,7 +8,6 @@
* published by the Free Software Foundation.
*/
#include <asm/assembler.h>
-#include <asm/export.h>
/*
* unsigned int
@@ -332,4 +331,3 @@ FN_ENTRY
mov r5, r4, get_byte_1
b .Lexit
FN_EXIT
-FN_EXPORT
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 5d495edf3d83..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -73,7 +73,6 @@
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
-#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user)
#include "csumpartialcopygeneric.S"
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 69aad80a3af4..2cef11884857 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -24,7 +24,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/export.h>
#include <linux/timex.h>
/*
@@ -35,7 +34,6 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = {
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
};
-EXPORT_SYMBOL(arm_delay_ops);
static const struct delay_timer *delay_timer;
static bool delay_calibrated;
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index 0c9e1c18fc9e..a9eafe4981eb 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -15,7 +15,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
@@ -211,4 +210,3 @@ Ldiv0_64:
UNWIND(.fnend)
ENDPROC(__do_div64)
-EXPORT_SYMBOL(__do_div64)
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 26302b8cd38f..7848780e8834 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -15,7 +15,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
/*
@@ -38,7 +37,6 @@ ENTRY(_find_first_zero_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_le)
-EXPORT_SYMBOL(_find_first_zero_bit_le)
/*
* Purpose : Find next 'zero' bit
@@ -59,7 +57,6 @@ ENTRY(_find_next_zero_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_le)
-EXPORT_SYMBOL(_find_next_zero_bit_le)
/*
* Purpose : Find a 'one' bit
@@ -81,7 +78,6 @@ ENTRY(_find_first_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_le)
-EXPORT_SYMBOL(_find_first_bit_le)
/*
* Purpose : Find next 'one' bit
@@ -101,7 +97,6 @@ ENTRY(_find_next_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_le)
-EXPORT_SYMBOL(_find_next_bit_le)
#ifdef __ARMEB__
@@ -121,7 +116,6 @@ ENTRY(_find_first_zero_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_be)
-EXPORT_SYMBOL(_find_first_zero_bit_be)
ENTRY(_find_next_zero_bit_be)
teq r1, #0
@@ -139,7 +133,6 @@ ENTRY(_find_next_zero_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_be)
-EXPORT_SYMBOL(_find_next_zero_bit_be)
ENTRY(_find_first_bit_be)
teq r1, #0
@@ -157,7 +150,6 @@ ENTRY(_find_first_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_be)
-EXPORT_SYMBOL(_find_first_bit_be)
ENTRY(_find_next_bit_be)
teq r1, #0
@@ -174,7 +166,6 @@ ENTRY(_find_next_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_be)
-EXPORT_SYMBOL(_find_next_bit_be)
#endif
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 9d09a38e73af..8ecfd15c3a02 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -31,7 +31,6 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
-#include <asm/export.h>
ENTRY(__get_user_1)
check_uaccess r0, 1, r1, r2, __get_user_bad
@@ -39,7 +38,6 @@ ENTRY(__get_user_1)
mov r0, #0
ret lr
ENDPROC(__get_user_1)
-EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -60,7 +58,6 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_2)
-EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -68,7 +65,6 @@ ENTRY(__get_user_4)
mov r0, #0
ret lr
ENDPROC(__get_user_4)
-EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
check_uaccess r0, 8, r1, r2, __get_user_bad
@@ -82,7 +78,6 @@ ENTRY(__get_user_8)
mov r0, #0
ret lr
ENDPROC(__get_user_8)
-EXPORT_SYMBOL(__get_user_8)
#ifdef __ARMEB__
ENTRY(__get_user_32t_8)
@@ -96,7 +91,6 @@ ENTRY(__get_user_32t_8)
mov r0, #0
ret lr
ENDPROC(__get_user_32t_8)
-EXPORT_SYMBOL(__get_user_32t_8)
ENTRY(__get_user_64t_1)
check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -104,7 +98,6 @@ ENTRY(__get_user_64t_1)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_1)
-EXPORT_SYMBOL(__get_user_64t_1)
ENTRY(__get_user_64t_2)
check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -121,7 +114,6 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_64t_2)
-EXPORT_SYMBOL(__get_user_64t_2)
ENTRY(__get_user_64t_4)
check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -129,7 +121,6 @@ ENTRY(__get_user_64t_4)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_4)
-EXPORT_SYMBOL(__get_user_64t_4)
#endif
__get_user_bad8:
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index 3dff7a3a2aef..c31b2f3153f1 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.Linsb_align: rsb ip, ip, #4
cmp ip, r2
@@ -122,4 +121,3 @@ ENTRY(__raw_readsb)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(__raw_readsb)
-EXPORT_SYMBOL(__raw_readsb)
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S
index bfd39682325b..2ed86fa5465f 100644
--- a/arch/arm/lib/io-readsl.S
+++ b/arch/arm/lib/io-readsl.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
ENTRY(__raw_readsl)
teq r2, #0 @ do we have to check for the zero len?
@@ -78,4 +77,3 @@ ENTRY(__raw_readsl)
strb r3, [r1, #0]
ret lr
ENDPROC(__raw_readsl)
-EXPORT_SYMBOL(__raw_readsl)
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index b3af3db6caac..413da9914529 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.Linsw_bad_alignment:
adr r0, .Linsw_bad_align_msg
@@ -104,4 +103,4 @@ ENTRY(__raw_readsw)
ldmfd sp!, {r4, r5, r6, pc}
-EXPORT_SYMBOL(__raw_readsw)
+
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S
index 3c7a7a40b33e..d9a45e9692ae 100644
--- a/arch/arm/lib/io-readsw-armv4.S
+++ b/arch/arm/lib/io-readsw-armv4.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.macro pack, rd, hw1, hw2
#ifndef __ARMEB__
@@ -130,4 +129,3 @@ ENTRY(__raw_readsw)
strneb ip, [r1]
ldmfd sp!, {r4, pc}
ENDPROC(__raw_readsw)
-EXPORT_SYMBOL(__raw_readsw)
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index fa3633594415..a46bbc9b168b 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
@@ -93,4 +92,3 @@ ENTRY(__raw_writesb)
ldmfd sp!, {r4, r5, pc}
ENDPROC(__raw_writesb)
-EXPORT_SYMBOL(__raw_writesb)
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S
index 98ed6aec0b47..4ea2435988c1 100644
--- a/arch/arm/lib/io-writesl.S
+++ b/arch/arm/lib/io-writesl.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
ENTRY(__raw_writesl)
teq r2, #0 @ do we have to check for the zero len?
@@ -66,4 +65,3 @@ ENTRY(__raw_writesl)
bne 6b
ret lr
ENDPROC(__raw_writesl)
-EXPORT_SYMBOL(__raw_writesl)
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 577184c082bb..121789eb6802 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.Loutsw_bad_alignment:
adr r0, .Loutsw_bad_align_msg
@@ -125,4 +124,3 @@ ENTRY(__raw_writesw)
strne ip, [r0]
ldmfd sp!, {r4, r5, r6, pc}
-EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index e335f489d1fc..269f90c51ad2 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
@@ -99,4 +98,3 @@ ENTRY(__raw_writesw)
strneh ip, [r0]
ret lr
ENDPROC(__raw_writesw)
-EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index f541bc013bff..9397b2e532af 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -36,7 +36,6 @@ Boston, MA 02111-1307, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.macro ARM_DIV_BODY dividend, divisor, result, curbit
@@ -239,8 +238,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
-EXPORT_SYMBOL(__udivsi3)
-EXPORT_SYMBOL(__aeabi_uidiv)
ENTRY(__umodsi3)
UNWIND(.fnstart)
@@ -259,7 +256,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__umodsi3)
-EXPORT_SYMBOL(__umodsi3)
#ifdef CONFIG_ARM_PATCH_IDIV
.align 3
@@ -307,8 +303,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
-EXPORT_SYMBOL(__divsi3)
-EXPORT_SYMBOL(__aeabi_idiv)
ENTRY(__modsi3)
UNWIND(.fnstart)
@@ -333,7 +327,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__modsi3)
-EXPORT_SYMBOL(__modsi3)
#ifdef CONFIG_AEABI
@@ -350,7 +343,6 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_uidivmod)
-EXPORT_SYMBOL(__aeabi_uidivmod)
ENTRY(__aeabi_idivmod)
UNWIND(.fnstart)
@@ -364,7 +356,6 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_idivmod)
-EXPORT_SYMBOL(__aeabi_idivmod)
#endif
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
index e40833981417..922dcd88b02b 100644
--- a/arch/arm/lib/lshrdi3.S
+++ b/arch/arm/lib/lshrdi3.S
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsr)
ENDPROC(__lshrdi3)
ENDPROC(__aeabi_llsr)
-EXPORT_SYMBOL(__lshrdi3)
-EXPORT_SYMBOL(__aeabi_llsr)
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index 44182bf686a5..74a5bed6d999 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -11,7 +11,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
.align 5
@@ -25,4 +24,3 @@ ENTRY(memchr)
2: movne r0, #0
ret lr
ENDPROC(memchr)
-EXPORT_SYMBOL(memchr)
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 1be5b6ddf37c..64111bd4440b 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
@@ -69,5 +68,3 @@ ENTRY(memcpy)
ENDPROC(memcpy)
ENDPROC(mmiocpy)
-EXPORT_SYMBOL(memcpy)
-EXPORT_SYMBOL(mmiocpy)
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 71dcc5400d02..69a9d47fc5ab 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.text
@@ -226,4 +225,3 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8
ENDPROC(memmove)
-EXPORT_SYMBOL(memmove)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 7b72044cba62..3c65e3bd790f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.text
.align 5
@@ -136,5 +135,3 @@ UNWIND( .fnstart )
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
-EXPORT_SYMBOL(memset)
-EXPORT_SYMBOL(mmioset)
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 6dec26ed5bcc..0eded952e089 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -10,7 +10,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
-#include <asm/export.h>
.text
.align 5
@@ -136,4 +135,3 @@ UNWIND( .fnstart )
ret lr @ 1
UNWIND( .fnend )
ENDPROC(__memzero)
-EXPORT_SYMBOL(__memzero)
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
index b8f12388ccac..204305956925 100644
--- a/arch/arm/lib/muldi3.S
+++ b/arch/arm/lib/muldi3.S
@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
@@ -47,5 +46,3 @@ ENTRY(__aeabi_lmul)
ENDPROC(__muldi3)
ENDPROC(__aeabi_lmul)
-EXPORT_SYMBOL(__muldi3)
-EXPORT_SYMBOL(__aeabi_lmul)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 11de126e2ed6..38d660d3705f 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -31,7 +31,6 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
-#include <asm/export.h>
ENTRY(__put_user_1)
check_uaccess r0, 1, r1, ip, __put_user_bad
@@ -39,7 +38,6 @@ ENTRY(__put_user_1)
mov r0, #0
ret lr
ENDPROC(__put_user_1)
-EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
check_uaccess r0, 2, r1, ip, __put_user_bad
@@ -64,7 +62,6 @@ ENTRY(__put_user_2)
mov r0, #0
ret lr
ENDPROC(__put_user_2)
-EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
check_uaccess r0, 4, r1, ip, __put_user_bad
@@ -72,7 +69,6 @@ ENTRY(__put_user_4)
mov r0, #0
ret lr
ENDPROC(__put_user_4)
-EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
check_uaccess r0, 8, r1, ip, __put_user_bad
@@ -86,7 +82,6 @@ ENTRY(__put_user_8)
mov r0, #0
ret lr
ENDPROC(__put_user_8)
-EXPORT_SYMBOL(__put_user_8)
__put_user_bad:
mov r0, #-EFAULT
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index 7301f6e6046c..013d64c71e8d 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -11,7 +11,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
.align 5
@@ -26,4 +25,3 @@ ENTRY(strchr)
subeq r0, r0, #1
ret lr
ENDPROC(strchr)
-EXPORT_SYMBOL(strchr)
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index aaf9fd98b754..3cec1c7482c4 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -11,7 +11,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
.text
.align 5
@@ -25,4 +24,3 @@ ENTRY(strrchr)
mov r0, r3
ret lr
ENDPROC(strrchr)
-EXPORT_SYMBOL(strrchr)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 1626e3a551a1..6bd1089b07e0 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -19,7 +19,6 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
-#include <linux/export.h>
#include <asm/current.h>
#include <asm/page.h>
@@ -157,7 +156,6 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
}
return n;
}
-EXPORT_SYMBOL(arm_copy_to_user);
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
@@ -215,7 +213,6 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n)
}
return n;
}
-EXPORT_SYMBOL(arm_clear_user);
#if 0
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
index 127a91af46f3..ad4a6309141a 100644
--- a/arch/arm/lib/ucmpdi2.S
+++ b/arch/arm/lib/ucmpdi2.S
@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
@@ -36,7 +35,6 @@ ENTRY(__ucmpdi2)
ret lr
ENDPROC(__ucmpdi2)
-EXPORT_SYMBOL(__ucmpdi2)
#ifdef CONFIG_AEABI
@@ -50,7 +48,6 @@ ENTRY(__aeabi_ulcmp)
ret lr
ENDPROC(__aeabi_ulcmp)
-EXPORT_SYMBOL(__aeabi_ulcmp)
#endif
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 737450fe790c..cab128913e72 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -32,6 +32,7 @@ endif
ifdef CONFIG_SND_IMX_SOC
obj-y += ssi-fiq.o
+obj-y += ssi-fiq-ksym.o
endif
# i.MX21 based machines
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index b54db47f6f32..1dc2a34b9dbd 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -380,13 +380,6 @@ static struct pu_domain imx6q_pu_domain = {
.name = "PU",
.power_off = imx6q_pm_pu_power_off,
.power_on = imx6q_pm_pu_power_on,
- .states = {
- [0] = {
- .power_off_latency_ns = 25000,
- .power_on_latency_ns = 2000000,
- },
- },
- .state_count = 1,
},
};
@@ -430,6 +423,16 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
return 0;
+ imx6q_pu_domain.base.states = devm_kzalloc(dev,
+ sizeof(*imx6q_pu_domain.base.states),
+ GFP_KERNEL);
+ if (!imx6q_pu_domain.base.states)
+ return -ENOMEM;
+
+ imx6q_pu_domain.base.states[0].power_off_latency_ns = 25000;
+ imx6q_pu_domain.base.states[0].power_on_latency_ns = 2000000;
+ imx6q_pu_domain.base.state_count = 1;
+
for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
pm_genpd_init(imx_gpc_domains[i], NULL, false);
diff --git a/arch/arm/mach-imx/ssi-fiq-ksym.c b/arch/arm/mach-imx/ssi-fiq-ksym.c
new file mode 100644
index 000000000000..792090f9a032
--- /dev/null
+++ b/arch/arm/mach-imx/ssi-fiq-ksym.c
@@ -0,0 +1,20 @@
+/*
+ * Exported ksyms for the SSI FIQ handler
+ *
+ * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/platform_data/asoc-imx-ssi.h>
+
+EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer);
+EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer);
+EXPORT_SYMBOL(imx_ssi_fiq_start);
+EXPORT_SYMBOL(imx_ssi_fiq_end);
+EXPORT_SYMBOL(imx_ssi_fiq_base);
+
diff --git a/arch/arm/mach-imx/ssi-fiq.S b/arch/arm/mach-imx/ssi-fiq.S
index fd7917f1c204..a8b93c5f29b5 100644
--- a/arch/arm/mach-imx/ssi-fiq.S
+++ b/arch/arm/mach-imx/ssi-fiq.S
@@ -8,7 +8,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/export.h>
/*
* r8 = bit 0-15: tx offset, bit 16-31: tx buffer size
@@ -145,8 +144,4 @@ imx_ssi_fiq_tx_buffer:
.word 0x0
.L_imx_ssi_fiq_end:
imx_ssi_fiq_end:
-EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer)
-EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer)
-EXPORT_SYMBOL(imx_ssi_fiq_start)
-EXPORT_SYMBOL(imx_ssi_fiq_end)
-EXPORT_SYMBOL(imx_ssi_fiq_base)
+
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index ed9a01484030..a109f6482413 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -21,7 +21,6 @@
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
#include <linux/amba/mmci.h>
-#include <linux/amba/pl061.h>
#include <linux/io.h>
#include <linux/platform_data/clk-integrator.h>
#include <linux/slab.h>
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index a9afeebd59f2..0465338183c7 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -71,6 +71,7 @@ config SOC_AM43XX
select HAVE_ARM_TWD
select ARM_ERRATA_754322
select ARM_ERRATA_775420
+ select OMAP_INTERCONNECT
config SOC_DRA7XX
bool "TI DRA7XX"
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2abd53ae3e7a..cc6d9fa60924 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -205,11 +205,15 @@ void __init omap2xxx_check_revision(void)
#define OMAP3_SHOW_FEATURE(feat) \
if (omap3_has_ ##feat()) \
- printk(#feat" ");
+ n += scnprintf(buf + n, sizeof(buf) - n, #feat " ");
static void __init omap3_cpuinfo(void)
{
const char *cpu_name;
+ char buf[64];
+ int n = 0;
+
+ memset(buf, 0, sizeof(buf));
/*
* OMAP3430 and OMAP3530 are assumed to be same.
@@ -241,10 +245,10 @@ static void __init omap3_cpuinfo(void)
cpu_name = "OMAP3503";
}
- sprintf(soc_name, "%s", cpu_name);
+ scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name);
/* Print verbose information */
- pr_info("%s %s (", soc_name, soc_rev);
+ n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev);
OMAP3_SHOW_FEATURE(l2cache);
OMAP3_SHOW_FEATURE(iva);
@@ -252,8 +256,10 @@ static void __init omap3_cpuinfo(void)
OMAP3_SHOW_FEATURE(neon);
OMAP3_SHOW_FEATURE(isp);
OMAP3_SHOW_FEATURE(192mhz_clk);
-
- printk(")\n");
+ if (*(buf + n - 1) == ' ')
+ n--;
+ n += scnprintf(buf + n, sizeof(buf) - n, ")\n");
+ pr_info("%s", buf);
}
#define OMAP3_CHECK_FEATURE(status,feat) \
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index 62680aad2126..718981bb80cd 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -319,6 +319,9 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva)
if (has_uart4) {
en_uart4_mask = OMAP3630_EN_UART4_MASK;
grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK;
+ } else {
+ en_uart4_mask = 0;
+ grpsel_uart4_mask = 0;
}
/* Enable wakeups in PER */
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index cba8cada8c81..cd15dbd62671 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -87,6 +87,12 @@ int voltdm_scale(struct voltagedomain *voltdm,
return -ENODATA;
}
+ if (!voltdm->volt_data) {
+ pr_err("%s: No voltage data defined for vdd_%s\n",
+ __func__, voltdm->name);
+ return -ENODATA;
+ }
+
/* Adjust voltage to the exact voltage from the OPP table */
for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
if (voltdm->volt_data[i].volt_nominal >= target_volt) {
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index 66070acaa888..d1db32b1a2c6 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -85,6 +85,7 @@ static struct resource smc91x_resources[] = {
static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_USE_DMA | SMC91X_NOWAIT,
+ .pxa_u16_align4 = true,
};
static struct platform_device smc91x_device = {
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 40964069a17c..a2d851a3a546 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -140,6 +140,7 @@ static struct resource smc91x_resources[] = {
static struct smc91x_platdata mainstone_smc91x_info = {
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT | SMC91X_USE_DMA,
+ .pxa_u16_align4 = true,
};
static struct platform_device smc91x_device = {
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 702f4f14b708..7b6610e9dae4 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -673,6 +673,7 @@ static struct resource smc91x_resources[] = {
static struct smc91x_platdata stargate2_smc91x_info = {
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT
| SMC91X_NOWAIT | SMC91X_USE_DMA,
+ .pxa_u16_align4 = true,
};
static struct platform_device smc91x_device = {
diff --git a/arch/arm/mach-s3c64xx/pl080.c b/arch/arm/mach-s3c64xx/pl080.c
index 89c5a62830a7..261820a855ec 100644
--- a/arch/arm/mach-s3c64xx/pl080.c
+++ b/arch/arm/mach-s3c64xx/pl080.c
@@ -117,6 +117,25 @@ static struct pl08x_channel_data s3c64xx_dma0_info[] = {
}
};
+static const struct dma_slave_map s3c64xx_dma0_slave_map[] = {
+ { "s3c6400-uart.0", "tx", &s3c64xx_dma0_info[0] },
+ { "s3c6400-uart.0", "rx", &s3c64xx_dma0_info[1] },
+ { "s3c6400-uart.1", "tx", &s3c64xx_dma0_info[2] },
+ { "s3c6400-uart.1", "rx", &s3c64xx_dma0_info[3] },
+ { "s3c6400-uart.2", "tx", &s3c64xx_dma0_info[4] },
+ { "s3c6400-uart.2", "rx", &s3c64xx_dma0_info[5] },
+ { "s3c6400-uart.3", "tx", &s3c64xx_dma0_info[6] },
+ { "s3c6400-uart.3", "rx", &s3c64xx_dma0_info[7] },
+ { "samsung-pcm.0", "tx", &s3c64xx_dma0_info[8] },
+ { "samsung-pcm.0", "rx", &s3c64xx_dma0_info[9] },
+ { "samsung-i2s.0", "tx", &s3c64xx_dma0_info[10] },
+ { "samsung-i2s.0", "rx", &s3c64xx_dma0_info[11] },
+ { "s3c6410-spi.0", "tx", &s3c64xx_dma0_info[12] },
+ { "s3c6410-spi.0", "rx", &s3c64xx_dma0_info[13] },
+ { "samsung-i2s.2", "tx", &s3c64xx_dma0_info[14] },
+ { "samsung-i2s.2", "rx", &s3c64xx_dma0_info[15] },
+};
+
struct pl08x_platform_data s3c64xx_dma0_plat_data = {
.memcpy_channel = {
.bus_id = "memcpy",
@@ -134,6 +153,8 @@ struct pl08x_platform_data s3c64xx_dma0_plat_data = {
.put_xfer_signal = pl08x_put_xfer_signal,
.slave_channels = s3c64xx_dma0_info,
.num_slave_channels = ARRAY_SIZE(s3c64xx_dma0_info),
+ .slave_map = s3c64xx_dma0_slave_map,
+ .slave_map_len = ARRAY_SIZE(s3c64xx_dma0_slave_map),
};
static AMBA_AHB_DEVICE(s3c64xx_dma0, "dma-pl080s.0", 0,
@@ -207,6 +228,15 @@ static struct pl08x_channel_data s3c64xx_dma1_info[] = {
},
};
+static const struct dma_slave_map s3c64xx_dma1_slave_map[] = {
+ { "samsung-pcm.1", "tx", &s3c64xx_dma1_info[0] },
+ { "samsung-pcm.1", "rx", &s3c64xx_dma1_info[1] },
+ { "samsung-i2s.1", "tx", &s3c64xx_dma1_info[2] },
+ { "samsung-i2s.1", "rx", &s3c64xx_dma1_info[3] },
+ { "s3c6410-spi.1", "tx", &s3c64xx_dma1_info[4] },
+ { "s3c6410-spi.1", "rx", &s3c64xx_dma1_info[5] },
+};
+
struct pl08x_platform_data s3c64xx_dma1_plat_data = {
.memcpy_channel = {
.bus_id = "memcpy",
@@ -224,6 +254,8 @@ struct pl08x_platform_data s3c64xx_dma1_plat_data = {
.put_xfer_signal = pl08x_put_xfer_signal,
.slave_channels = s3c64xx_dma1_info,
.num_slave_channels = ARRAY_SIZE(s3c64xx_dma1_info),
+ .slave_map = s3c64xx_dma1_slave_map,
+ .slave_map_len = ARRAY_SIZE(s3c64xx_dma1_slave_map),
};
static AMBA_AHB_DEVICE(s3c64xx_dma1, "dma-pl080s.1", 0,
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index cf236db686a9..7fa4a0b5f654 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -15,7 +15,6 @@
* GNU General Public License for more details.
*/
-#include <linux/clk/renesas.h>
#include <linux/io.h>
#include <linux/irqchip.h>
@@ -23,19 +22,6 @@
#include "common.h"
-#define MODEMR 0xffcc0020
-
-static void __init r8a7778_timer_init(void)
-{
- u32 mode;
- void __iomem *modemr = ioremap_nocache(MODEMR, 4);
-
- BUG_ON(!modemr);
- mode = ioread32(modemr);
- iounmap(modemr);
- r8a7778_clocks_init(mode);
-}
-
#define INT2SMSKCR0 0x82288 /* 0xfe782288 */
#define INT2SMSKCR1 0x8228c /* 0xfe78228c */
@@ -70,6 +56,5 @@ DT_MACHINE_START(R8A7778_DT, "Generic R8A7778 (Flattened Device Tree)")
.init_early = shmobile_init_delay,
.init_irq = r8a7778_init_irq_dt,
.init_late = shmobile_init_late,
- .init_time = r8a7778_timer_init,
.dt_compat = r8a7778_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 0007ff51d180..0686112f2435 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -14,8 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/clk/renesas.h>
-#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
@@ -76,30 +74,6 @@ static void __init r8a7779_init_irq_dt(void)
__raw_writel(0x003fee3f, INT2SMSKCR4);
}
-#define MODEMR 0xffcc0020
-
-static u32 __init r8a7779_read_mode_pins(void)
-{
- static u32 mode;
- static bool mode_valid;
-
- if (!mode_valid) {
- void __iomem *modemr = ioremap_nocache(MODEMR, PAGE_SIZE);
- BUG_ON(!modemr);
- mode = ioread32(modemr);
- iounmap(modemr);
- mode_valid = true;
- }
-
- return mode;
-}
-
-static void __init r8a7779_init_time(void)
-{
- r8a7779_clocks_init(r8a7779_read_mode_pins());
- clocksource_probe();
-}
-
static const char *const r8a7779_compat_dt[] __initconst = {
"renesas,r8a7779",
NULL,
@@ -109,7 +83,6 @@ DT_MACHINE_START(R8A7779_DT, "Generic R8A7779 (Flattened Device Tree)")
.smp = smp_ops(r8a7779_smp_ops),
.map_io = r8a7779_map_io,
.init_early = shmobile_init_delay,
- .init_time = r8a7779_init_time,
.init_irq = r8a7779_init_irq_dt,
.init_late = shmobile_init_late,
.dt_compat = r8a7779_compat_dt,
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index afb9fdcd3d90..b527258e0a62 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*/
-#include <linux/clk/renesas.h>
+#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/device.h>
#include <linux/dma-contiguous.h>
@@ -71,7 +71,6 @@ static unsigned int __init get_extal_freq(void)
void __init rcar_gen2_timer_init(void)
{
- u32 mode = rcar_gen2_read_mode_pins();
#ifdef CONFIG_ARM_ARCH_TIMER
void __iomem *base;
u32 freq;
@@ -130,7 +129,7 @@ void __init rcar_gen2_timer_init(void)
iounmap(base);
#endif /* CONFIG_ARM_ARCH_TIMER */
- rcar_gen2_clocks_init(mode);
+ of_clk_init(NULL);
clocksource_probe();
}
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index 6d8e8e3365d1..4cdfab31a0b6 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -7,7 +7,7 @@
* : r4 = aborted context pc
* : r5 = aborted context psr
*
- * Returns : r4-r5, r10-r11, r13 preserved
+ * Returns : r4-r5, r9-r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
/* d */ b do_DataAbort @ ldc rd, [rn, #m]
/* e */ b .data_unknown
-/* f */
+/* f */ b .data_unknown
+
+.data_unknown_r9:
+ ldr r9, [sp], #4
.data_unknown: @ Part of jumptable
mov r0, r4
mov r1, r8
@@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
beq do_DataAbort @ no writeback -> no fixup
+ str r9, [sp, #-4]!
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
@@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrhpre:
tst r8, #1 << 21 @ Check writeback bit
beq do_DataAbort @ No writeback -> no fixup
.data_arm_lateldrhpost:
+ str r9, [sp, #-4]!
and r9, r8, #0x00f @ get Rm / low nibble of immediate value
tst r8, #1 << 22 @ if (immediate offset)
andne r6, r8, #0xf00 @ { immediate high nibble
@@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrpreconst:
@@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostconst:
movs r6, r8, lsl #20 @ Get offset
beq do_DataAbort @ zero -> no fixup
+ str r9, [sp, #-4]!
and r9, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsr #20 @ Undo increment
addeq r7, r7, r6, lsr #20 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrprereg:
@@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
+ str r9, [sp, #-4]!
mov r9, r8, lsr #7 @ get shift count
ands r9, r9, #31
and r7, r8, #0x70 @ get shift type
@@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
- b .data_unknown @ 2: MUL?
+ b .data_unknown_r9 @ 2: MUL?
nop
- b .data_unknown @ 3: MUL?
+ b .data_unknown_r9 @ 3: MUL?
nop
mov r6, r6, lsr r9 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ 6: MUL?
+ b .data_unknown_r9 @ 6: MUL?
nop
- b .data_unknown @ 7: MUL?
+ b .data_unknown_r9 @ 7: MUL?
nop
mov r6, r6, asr r9 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ A: MUL?
+ b .data_unknown_r9 @ A: MUL?
nop
- b .data_unknown @ B: MUL?
+ b .data_unknown_r9 @ B: MUL?
nop
mov r6, r6, ror r9 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
- b .data_unknown @ E: MUL?
+ b .data_unknown_r9 @ E: MUL?
nop
- b .data_unknown @ F: MUL?
+ b .data_unknown_r9 @ F: MUL?
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
@@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
.data_thumb_pushpop:
tst r8, #1 << 10
beq .data_unknown
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8) + R bit
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subne r7, r7, r6, lsl #2 @ decrement SP if POP
str r7, [r2, #13 << 2]
+ ldr r9, [sp], #4
b do_DataAbort
.data_thumb_ldmstm:
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8)
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement
str r7, [r2, r9, lsr #6]
+ ldr r9, [sp], #4
b do_DataAbort
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ab4f74536057..ab7710002ba6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1167,7 +1167,7 @@ static int __init dma_debug_do_init(void)
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
-fs_initcall(dma_debug_do_init);
+core_initcall(dma_debug_do_init);
#ifdef CONFIG_ARM_DMA_USE_IOMMU
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index f6d333f09bfe..8dea61640cc1 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -96,7 +96,7 @@ ENTRY(cpu_cm7_proc_fin)
ret lr
ENDPROC(cpu_cm7_proc_fin)
- .section ".text.init", #alloc, #execinstr
+ .section ".init.text", #alloc, #execinstr
__v7m_cm7_setup:
mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index e93aa6734147..cf7b95fddbb3 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -1124,15 +1124,6 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
- pd.dma_tx = (void *)DMACH_SPI0_TX;
- pd.dma_rx = (void *)DMACH_SPI0_RX;
-#if defined(CONFIG_PL330_DMA)
- pd.filter = pl330_filter;
-#elif defined(CONFIG_S3C64XX_PL080)
- pd.filter = pl08x_filter_id;
-#elif defined(CONFIG_S3C24XX_DMAC)
- pd.filter = s3c24xx_dma_filter;
-#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
}
@@ -1169,14 +1160,6 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
- pd.dma_tx = (void *)DMACH_SPI1_TX;
- pd.dma_rx = (void *)DMACH_SPI1_RX;
-#if defined(CONFIG_PL330_DMA)
- pd.filter = pl330_filter;
-#elif defined(CONFIG_S3C64XX_PL080)
- pd.filter = pl08x_filter_id;
-#endif
-
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
}
@@ -1213,13 +1196,6 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
- pd.dma_tx = (void *)DMACH_SPI2_TX;
- pd.dma_rx = (void *)DMACH_SPI2_RX;
-#if defined(CONFIG_PL330_DMA)
- pd.filter = pl330_filter;
-#elif defined(CONFIG_S3C64XX_PL080)
- pd.filter = pl08x_filter_id;
-#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);
}
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f193414d0f6f..4986dc0c1dff 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -372,8 +372,7 @@ static int __init xen_guest_init(void)
* for secondary CPUs as they are brought up.
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/
- xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
- sizeof(struct vcpu_info));
+ xen_vcpu_info = alloc_percpu(struct vcpu_info);
if (xen_vcpu_info == NULL)
return -ENOMEM;
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index d062f08f5020..bd62d94f8ac5 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -186,7 +186,6 @@ struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL(xen_dma_ops);
static struct dma_map_ops xen_swiotlb_dma_ops = {
- .mapping_error = xen_swiotlb_dma_mapping_error,
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 969ef880d234..111742126897 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -52,6 +52,7 @@ config ARM64
select GENERIC_TIME_VSYSCALL
select HANDLE_DOMAIN_IRQ
select HARDIRQS_SW_RESEND
+ select HAVE_ACPI_APEI if (ACPI && EFI)
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
@@ -109,6 +110,7 @@ config ARM64
select POWER_SUPPLY
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
help
ARM 64-bit (AArch64) Linux support.
@@ -238,6 +240,9 @@ config PGTABLE_LEVELS
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
+config ARCH_SUPPORTS_UPROBES
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -790,6 +795,14 @@ config SETEND_EMULATION
If unsure, say Y
endif
+config ARM64_SW_TTBR0_PAN
+ bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+ help
+ Enabling this option prevents the kernel from accessing
+ user-space memory directly by pointing TTBR0_EL1 to a reserved
+ zeroed area and reserved ASID. The user access routines
+ restore the valid TTBR0_EL1 temporarily.
+
menu "ARMv8.1 architectural features"
config ARM64_HW_AFDBM
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index b661fe742615..d1ebd46872fd 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -2,9 +2,13 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-config ARM64_PTDUMP
+config ARM64_PTDUMP_CORE
+ def_bool n
+
+config ARM64_PTDUMP_DEBUGFS
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
+ select ARM64_PTDUMP_CORE
select DEBUG_FS
help
Say Y here if you want to show the kernel pagetable layout in a
@@ -38,6 +42,35 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
of TEXT_OFFSET and platforms must not require a specific
value.
+config DEBUG_WX
+ bool "Warn on W+X mappings at boot"
+ select ARM64_PTDUMP_CORE
+ ---help---
+ Generate a warning if any W+X mappings are found at boot.
+
+ This is useful for discovering cases where the kernel is leaving
+ W+X mappings after applying NX, as such mappings are a security risk.
+ This check also includes UXN, which should be set on all kernel
+ mappings.
+
+ Look for a message in dmesg output like this:
+
+ arm64/mm: Checked W+X mappings: passed, no W+X pages found.
+
+ or like this, if the check failed:
+
+ arm64/mm: Checked W+X mappings: FAILED, <N> W+X pages found.
+
+ Note that even if the check fails, your kernel is possibly
+ still fine, as W+X mappings are not a security hole in
+ themselves, what they do is that they make the exploitation
+ of other unfixed kernel bugs easier.
+
+ There is no runtime or memory usage effect of this option
+ once the kernel has booted up - it's a one time check.
+
+ If in doubt, say "Y".
+
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
depends on MODULES
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 3635b8662724..b9a4a934ca05 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -37,10 +37,16 @@ $(warning LSE atomics not supported by binutils)
endif
endif
-KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
+brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
+
+ifneq ($(brokengasinst),)
+$(warning Detected assembler with broken .inst; disassembly will be unreliable)
+endif
+
+KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
-KBUILD_AFLAGS += $(lseinstr)
+KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 334271a25f70..7d3a2acc6a55 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -393,7 +393,7 @@
#address-cells = <3>;
#size-cells = <2>;
dma-coherent;
- ranges = <0x01000000 0x00 0x5f800000 0x00 0x5f800000 0x0 0x00800000>,
+ ranges = <0x01000000 0x00 0x00000000 0x00 0x5f800000 0x0 0x00800000>,
<0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>,
<0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>;
#interrupt-cells = <1>;
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index 123a58b29cbd..f0b857d6d73c 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -76,7 +76,7 @@
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x1010000>;
local-timer-stop;
- entry-latency-us = <300>;
+ entry-latency-us = <400>;
exit-latency-us = <1200>;
min-residency-us = <2500>;
};
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index 007be826efce..26aaa6a7670f 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -76,7 +76,7 @@
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x1010000>;
local-timer-stop;
- entry-latency-us = <300>;
+ entry-latency-us = <400>;
exit-latency-us = <1200>;
min-residency-us = <2500>;
};
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index a7270eff6939..6e154d948a80 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -76,7 +76,7 @@
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x1010000>;
local-timer-stop;
- entry-latency-us = <300>;
+ entry-latency-us = <400>;
exit-latency-us = <1200>;
min-residency-us = <2500>;
};
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index b09f3bc5c6c1..c4d544244b19 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -56,6 +56,10 @@
};
};
+&enet {
+ status = "ok";
+};
+
&pci_phy0 {
status = "ok";
};
@@ -174,6 +178,7 @@
&mdio_mux_iproc {
mdio@10 {
gphy0: eth-phy@10 {
+ enet-phy-lane-swap;
reg = <0x10>;
};
};
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index d95dc408629a..773ed593da4d 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -191,6 +191,18 @@
#include "ns2-clock.dtsi"
+ enet: ethernet@61000000 {
+ compatible = "brcm,ns2-amac";
+ reg = <0x61000000 0x1000>,
+ <0x61090000 0x1000>,
+ <0x61030000 0x100>;
+ reg-names = "amac_base", "idm_base", "nicpm_base";
+ interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+ phy-handle = <&gphy0>;
+ phy-mode = "rgmii";
+ status = "disabled";
+ };
+
dma0: dma@61360000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x61360000 0x1000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 7f0dc13b4087..d058e56db72d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -216,6 +216,12 @@
clocks = <&sysclk>;
};
+ dcfg: dcfg@1e00000 {
+ compatible = "fsl,ls2080a-dcfg", "syscon";
+ reg = <0x0 0x1e00000 0x0 0x10000>;
+ little-endian;
+ };
+
serial0: serial@21c0500 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21c0500 0x0 0x100>;
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 17839db585d5..e0ea60382087 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -747,7 +747,6 @@
clocks = <&sys_ctrl HI6220_USBOTG_HCLK>;
clock-names = "otg";
dr_mode = "otg";
- g-use-dma;
g-rx-fifo-size = <512>;
g-np-tx-fifo-size = <128>;
g-tx-fifo-size = <128 128 128 128 128 128>;
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index 1372e9a6aaa4..a59d36cd6caf 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -81,3 +81,26 @@
&pcie0 {
status = "okay";
};
+
+&mdio {
+ status = "okay";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+};
+
+&eth0 {
+ phy-mode = "rgmii-id";
+ phy = <&phy0>;
+ status = "okay";
+};
+
+&eth1 {
+ phy-mode = "sgmii";
+ phy = <&phy1>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index c4762538ec01..3b8eb45bdc76 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -105,7 +105,7 @@
status = "disabled";
};
- nb_perih_clk: nb-periph-clk@13000{
+ nb_periph_clk: nb-periph-clk@13000 {
compatible = "marvell,armada-3700-periph-clock-nb";
reg = <0x13000 0x100>;
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
@@ -113,7 +113,7 @@
#clock-cells = <1>;
};
- sb_perih_clk: sb-periph-clk@18000{
+ sb_periph_clk: sb-periph-clk@18000 {
compatible = "marvell,armada-3700-periph-clock-sb";
reg = <0x18000 0x100>;
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
@@ -140,6 +140,29 @@
};
};
+ eth0: ethernet@30000 {
+ compatible = "marvell,armada-3700-neta";
+ reg = <0x30000 0x4000>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sb_periph_clk 8>;
+ status = "disabled";
+ };
+
+ mdio: mdio@32004 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "marvell,orion-mdio";
+ reg = <0x32004 0x4>;
+ };
+
+ eth1: ethernet@40000 {
+ compatible = "marvell,armada-3700-neta";
+ reg = <0x40000 0x4000>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sb_periph_clk 7>;
+ status = "disabled";
+ };
+
usb3: usb@58000 {
compatible = "marvell,armada3700-xhci",
"generic-xhci";
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 602e2c2e9a4d..93ec8fef82a1 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -164,6 +164,14 @@
clocks = <&cpm_syscon0 1 21>;
status = "disabled";
};
+
+ cpm_trng: trng@760000 {
+ compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76";
+ reg = <0x760000 0x7d>;
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpm_syscon0 1 25>;
+ status = "okay";
+ };
};
cpm_pcie0: pcie@f2600000 {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 842fb333285c..ee8db0556791 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -130,8 +130,8 @@
reg = <0x700600 0x50>;
#address-cells = <0x1>;
#size-cells = <0x0>;
- cell-index = <1>;
- clocks = <&cps_syscon0 0 3>;
+ cell-index = <3>;
+ clocks = <&cps_syscon0 1 21>;
status = "disabled";
};
@@ -140,7 +140,7 @@
reg = <0x700680 0x50>;
#address-cells = <1>;
#size-cells = <0>;
- cell-index = <2>;
+ cell-index = <4>;
clocks = <&cps_syscon0 1 21>;
status = "disabled";
};
@@ -164,6 +164,14 @@
clocks = <&cps_syscon0 1 21>;
status = "disabled";
};
+
+ cps_trng: trng@760000 {
+ compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76";
+ reg = <0x760000 0x7d>;
+ interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cps_syscon0 1 25>;
+ status = "okay";
+ };
};
cps_pcie0: pcie@f4600000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 2a7f731c7759..0ecaad4333a7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -34,15 +34,6 @@
chosen { };
- usb_p1_vbus: regulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "usb_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&pio 130 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
connector {
compatible = "hdmi-connector";
label = "hdmi";
@@ -54,6 +45,29 @@
};
};
};
+
+ extcon_usb: extcon_iddig {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
+ };
+
+ usb_p1_vbus: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 130 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_p0_vbus: regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
};
&cec {
@@ -243,6 +257,20 @@
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
};
};
+
+ usb_id_pins_float: usb_iddig_pull_up {
+ pins_iddig {
+ pinmux = <MT8173_PIN_16_IDDIG__FUNC_IDDIG>;
+ bias-pull-up;
+ };
+ };
+
+ usb_id_pins_ground: usb_iddig_pull_down {
+ pins_iddig {
+ pinmux = <MT8173_PIN_16_IDDIG__FUNC_IDDIG>;
+ bias-pull-down;
+ };
+ };
};
&pwm0 {
@@ -469,12 +497,25 @@
status = "okay";
};
+&ssusb {
+ vusb33-supply = <&mt6397_vusb_reg>;
+ vbus-supply = <&usb_p0_vbus>;
+ extcon = <&extcon_usb>;
+ dr_mode = "otg";
+ mediatek,enable-wakeup;
+ pinctrl-names = "default", "id_float", "id_ground";
+ pinctrl-0 = <&usb_id_pins_float>;
+ pinctrl-1 = <&usb_id_pins_float>;
+ pinctrl-2 = <&usb_id_pins_ground>;
+ status = "okay";
+};
+
&uart0 {
status = "okay";
};
-&usb30 {
+&usb_host {
vusb33-supply = <&mt6397_vusb_reg>;
vbus-supply = <&usb_p1_vbus>;
- mediatek,wakeup-src = <1>;
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 1c71e256601d..c2d588ca59b7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -707,11 +707,14 @@
status = "disabled";
};
- usb30: usb@11270000 {
- compatible = "mediatek,mt8173-xhci";
- reg = <0 0x11270000 0 0x1000>,
+ ssusb: usb@11271000 {
+ compatible = "mediatek,mt8173-mtu3";
+ reg = <0 0x11271000 0 0x3000>,
<0 0x11280700 0 0x0100>;
- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&phy_port0 PHY_TYPE_USB3>,
+ <&phy_port1 PHY_TYPE_USB2>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
clocks = <&topckgen CLK_TOP_USB30_SEL>,
<&pericfg CLK_PERI_USB0>,
@@ -719,10 +722,22 @@
clock-names = "sys_ck",
"wakeup_deb_p0",
"wakeup_deb_p1";
- phys = <&phy_port0 PHY_TYPE_USB3>,
- <&phy_port1 PHY_TYPE_USB2>;
mediatek,syscon-wakeup = <&pericfg>;
- status = "okay";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ usb_host: xhci@11270000 {
+ compatible = "mediatek,mt8173-xhci";
+ reg = <0 0x11270000 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ status = "disabled";
+ };
};
u3phy: usb-phy@11290000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index 8c15040f2540..625dda713548 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -321,6 +321,11 @@
#power-domain-cells = <0>;
};
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7795-rst";
+ reg = <0 0xe6160000 0 0x0200>;
+ };
+
sysc: system-controller@e6180000 {
compatible = "renesas,r8a7795-sysc";
reg = <0 0xe6180000 0 0x0400>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index 9217da983525..75c8c55a8248 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -233,6 +233,11 @@
#power-domain-cells = <0>;
};
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7796-rst";
+ reg = <0 0xe6160000 0 0x0200>;
+ };
+
sysc: system-controller@e6180000 {
compatible = "renesas,r8a7796-sysc";
reg = <0 0xe6180000 0 0x0400>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 0fcb2147c9f9..df231c4df5a5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -537,7 +537,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <275>;
g-tx-fifo-size = <256 128 128 64 64 32>;
- g-use-dma;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index b65c193dc64e..1e24e455700b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -174,6 +174,7 @@
<GIC_PPI 14 IRQ_TYPE_LEVEL_LOW 0>,
<GIC_PPI 11 IRQ_TYPE_LEVEL_LOW 0>,
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW 0>;
+ arm,no-tick-in-suspend;
};
xin24m: xin24m {
@@ -300,8 +301,11 @@
ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+ <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
status = "disabled";
pcie0_intc: interrupt-controller {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index dab2cb0c1f1c..c3caaddde6cc 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -82,6 +82,7 @@ CONFIG_KEXEC=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
CONFIG_CPU_IDLE=y
+CONFIG_HIBERNATION=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPUFREQ_DT=y
@@ -257,6 +258,7 @@ CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_PL061=y
CONFIG_GPIO_RCAR=y
CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_GPIO_MAX77620=y
diff --git a/arch/arm64/crypto/.gitignore b/arch/arm64/crypto/.gitignore
new file mode 100644
index 000000000000..879df8781ed5
--- /dev/null
+++ b/arch/arm64/crypto/.gitignore
@@ -0,0 +1,2 @@
+sha256-core.S
+sha512-core.S
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 2cf32e9887e1..450a85df041a 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -8,6 +8,14 @@ menuconfig ARM64_CRYPTO
if ARM64_CRYPTO
+config CRYPTO_SHA256_ARM64
+ tristate "SHA-224/SHA-256 digest algorithm for arm64"
+ select CRYPTO_HASH
+
+config CRYPTO_SHA512_ARM64
+ tristate "SHA-384/SHA-512 digest algorithm for arm64"
+ select CRYPTO_HASH
+
config CRYPTO_SHA1_ARM64_CE
tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
depends on ARM64 && KERNEL_MODE_NEON
@@ -23,6 +31,16 @@ config CRYPTO_GHASH_ARM64_CE
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_HASH
+config CRYPTO_CRCT10DIF_ARM64_CE
+ tristate "CRCT10DIF digest algorithm using PMULL instructions"
+ depends on KERNEL_MODE_NEON && CRC_T10DIF
+ select CRYPTO_HASH
+
+config CRYPTO_CRC32_ARM64_CE
+ tristate "CRC32 and CRC32C digest algorithms using PMULL instructions"
+ depends on KERNEL_MODE_NEON && CRC32
+ select CRYPTO_HASH
+
config CRYPTO_AES_ARM64_CE
tristate "AES core cipher using ARMv8 Crypto Extensions"
depends on ARM64 && KERNEL_MODE_NEON
@@ -40,17 +58,18 @@ config CRYPTO_AES_ARM64_CE_BLK
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_AES_ARM64_CE
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_SIMD
config CRYPTO_AES_ARM64_NEON_BLK
tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_AES
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_SIMD
config CRYPTO_CRC32_ARM64
tristate "CRC32 and CRC32C using optional ARMv8 instructions"
depends on ARM64
select CRYPTO_HASH
+
endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index abb79b3cfcfe..aa8888d7b744 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -17,6 +17,12 @@ sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
+obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o
+crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
+
+obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o
+crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o
+
obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
@@ -29,6 +35,12 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o
obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
aes-neon-blk-y := aes-glue-neon.o aes-neon.o
+obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o
+sha256-arm64-y := sha256-glue.o sha256-core.o
+
+obj-$(CONFIG_CRYPTO_SHA512_ARM64) += sha512-arm64.o
+sha512-arm64-y := sha512-glue.o sha512-core.o
+
AFLAGS_aes-ce.o := -DINTERLEAVE=4
AFLAGS_aes-neon.o := -DINTERLEAVE=4
@@ -40,3 +52,14 @@ CFLAGS_crc32-arm64.o := -mcpu=generic+crc
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
+
+quiet_cmd_perlasm = PERLASM $@
+ cmd_perlasm = $(PERL) $(<) void $(@)
+
+$(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
+ $(call cmd,perlasm)
+
+$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
+ $(call cmd,perlasm)
+
+.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index a2a7fbcacc14..3363560c79b7 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
.text
.arch armv8-a+crypto
@@ -19,7 +20,7 @@
*/
ENTRY(ce_aes_ccm_auth_data)
ldr w8, [x3] /* leftover from prev round? */
- ld1 {v0.2d}, [x0] /* load mac */
+ ld1 {v0.16b}, [x0] /* load mac */
cbz w8, 1f
sub w8, w8, #16
eor v1.16b, v1.16b, v1.16b
@@ -31,7 +32,7 @@ ENTRY(ce_aes_ccm_auth_data)
beq 8f /* out of input? */
cbnz w8, 0b
eor v0.16b, v0.16b, v1.16b
-1: ld1 {v3.2d}, [x4] /* load first round key */
+1: ld1 {v3.16b}, [x4] /* load first round key */
prfm pldl1strm, [x1]
cmp w5, #12 /* which key size? */
add x6, x4, #16
@@ -41,17 +42,17 @@ ENTRY(ce_aes_ccm_auth_data)
mov v5.16b, v3.16b
b 4f
2: mov v4.16b, v3.16b
- ld1 {v5.2d}, [x6], #16 /* load 2nd round key */
+ ld1 {v5.16b}, [x6], #16 /* load 2nd round key */
3: aese v0.16b, v4.16b
aesmc v0.16b, v0.16b
-4: ld1 {v3.2d}, [x6], #16 /* load next round key */
+4: ld1 {v3.16b}, [x6], #16 /* load next round key */
aese v0.16b, v5.16b
aesmc v0.16b, v0.16b
-5: ld1 {v4.2d}, [x6], #16 /* load next round key */
+5: ld1 {v4.16b}, [x6], #16 /* load next round key */
subs w7, w7, #3
aese v0.16b, v3.16b
aesmc v0.16b, v0.16b
- ld1 {v5.2d}, [x6], #16 /* load next round key */
+ ld1 {v5.16b}, [x6], #16 /* load next round key */
bpl 3b
aese v0.16b, v4.16b
subs w2, w2, #16 /* last data? */
@@ -60,7 +61,7 @@ ENTRY(ce_aes_ccm_auth_data)
ld1 {v1.16b}, [x1], #16 /* load next input block */
eor v0.16b, v0.16b, v1.16b /* xor with mac */
bne 1b
-6: st1 {v0.2d}, [x0] /* store mac */
+6: st1 {v0.16b}, [x0] /* store mac */
beq 10f
adds w2, w2, #16
beq 10f
@@ -79,7 +80,7 @@ ENTRY(ce_aes_ccm_auth_data)
adds w7, w7, #1
bne 9b
eor v0.16b, v0.16b, v1.16b
- st1 {v0.2d}, [x0]
+ st1 {v0.16b}, [x0]
10: str w8, [x3]
ret
ENDPROC(ce_aes_ccm_auth_data)
@@ -89,27 +90,27 @@ ENDPROC(ce_aes_ccm_auth_data)
* u32 rounds);
*/
ENTRY(ce_aes_ccm_final)
- ld1 {v3.2d}, [x2], #16 /* load first round key */
- ld1 {v0.2d}, [x0] /* load mac */
+ ld1 {v3.16b}, [x2], #16 /* load first round key */
+ ld1 {v0.16b}, [x0] /* load mac */
cmp w3, #12 /* which key size? */
sub w3, w3, #2 /* modified # of rounds */
- ld1 {v1.2d}, [x1] /* load 1st ctriv */
+ ld1 {v1.16b}, [x1] /* load 1st ctriv */
bmi 0f
bne 3f
mov v5.16b, v3.16b
b 2f
0: mov v4.16b, v3.16b
-1: ld1 {v5.2d}, [x2], #16 /* load next round key */
+1: ld1 {v5.16b}, [x2], #16 /* load next round key */
aese v0.16b, v4.16b
aesmc v0.16b, v0.16b
aese v1.16b, v4.16b
aesmc v1.16b, v1.16b
-2: ld1 {v3.2d}, [x2], #16 /* load next round key */
+2: ld1 {v3.16b}, [x2], #16 /* load next round key */
aese v0.16b, v5.16b
aesmc v0.16b, v0.16b
aese v1.16b, v5.16b
aesmc v1.16b, v1.16b
-3: ld1 {v4.2d}, [x2], #16 /* load next round key */
+3: ld1 {v4.16b}, [x2], #16 /* load next round key */
subs w3, w3, #3
aese v0.16b, v3.16b
aesmc v0.16b, v0.16b
@@ -120,47 +121,47 @@ ENTRY(ce_aes_ccm_final)
aese v1.16b, v4.16b
/* final round key cancels out */
eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
- st1 {v0.2d}, [x0] /* store result */
+ st1 {v0.16b}, [x0] /* store result */
ret
ENDPROC(ce_aes_ccm_final)
.macro aes_ccm_do_crypt,enc
ldr x8, [x6, #8] /* load lower ctr */
- ld1 {v0.2d}, [x5] /* load mac */
- rev x8, x8 /* keep swabbed ctr in reg */
+ ld1 {v0.16b}, [x5] /* load mac */
+CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
0: /* outer loop */
- ld1 {v1.1d}, [x6] /* load upper ctr */
+ ld1 {v1.8b}, [x6] /* load upper ctr */
prfm pldl1strm, [x1]
add x8, x8, #1
rev x9, x8
cmp w4, #12 /* which key size? */
sub w7, w4, #2 /* get modified # of rounds */
ins v1.d[1], x9 /* no carry in lower ctr */
- ld1 {v3.2d}, [x3] /* load first round key */
+ ld1 {v3.16b}, [x3] /* load first round key */
add x10, x3, #16
bmi 1f
bne 4f
mov v5.16b, v3.16b
b 3f
1: mov v4.16b, v3.16b
- ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
+ ld1 {v5.16b}, [x10], #16 /* load 2nd round key */
2: /* inner loop: 3 rounds, 2x interleaved */
aese v0.16b, v4.16b
aesmc v0.16b, v0.16b
aese v1.16b, v4.16b
aesmc v1.16b, v1.16b
-3: ld1 {v3.2d}, [x10], #16 /* load next round key */
+3: ld1 {v3.16b}, [x10], #16 /* load next round key */
aese v0.16b, v5.16b
aesmc v0.16b, v0.16b
aese v1.16b, v5.16b
aesmc v1.16b, v1.16b
-4: ld1 {v4.2d}, [x10], #16 /* load next round key */
+4: ld1 {v4.16b}, [x10], #16 /* load next round key */
subs w7, w7, #3
aese v0.16b, v3.16b
aesmc v0.16b, v0.16b
aese v1.16b, v3.16b
aesmc v1.16b, v1.16b
- ld1 {v5.2d}, [x10], #16 /* load next round key */
+ ld1 {v5.16b}, [x10], #16 /* load next round key */
bpl 2b
aese v0.16b, v4.16b
aese v1.16b, v4.16b
@@ -177,14 +178,14 @@ ENDPROC(ce_aes_ccm_final)
eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
st1 {v1.16b}, [x0], #16 /* write output block */
bne 0b
- rev x8, x8
- st1 {v0.2d}, [x5] /* store mac */
+CPU_LE( rev x8, x8 )
+ st1 {v0.16b}, [x5] /* store mac */
str x8, [x6, #8] /* store lsb end of ctr (BE) */
5: ret
6: eor v0.16b, v0.16b, v5.16b /* final round mac */
eor v1.16b, v1.16b, v5.16b /* final round enc */
- st1 {v0.2d}, [x5] /* store mac */
+ st1 {v0.16b}, [x5] /* store mac */
add w2, w2, #16 /* process partial tail block */
7: ldrb w9, [x1], #1 /* get 1 byte of input */
umov w6, v1.b[0] /* get top crypted ctr byte */
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index f4bf2f2a014c..cc5515dac74a 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -11,9 +11,9 @@
#include <asm/neon.h>
#include <asm/unaligned.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <linux/module.h>
#include "aes-ce-setkey.h"
@@ -149,12 +149,7 @@ static int ccm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
- struct blkcipher_desc desc = { .info = req->iv };
- struct blkcipher_walk walk;
- struct scatterlist srcbuf[2];
- struct scatterlist dstbuf[2];
- struct scatterlist *src;
- struct scatterlist *dst;
+ struct skcipher_walk walk;
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen;
@@ -172,27 +167,19 @@ static int ccm_encrypt(struct aead_request *req)
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
- src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
- dst = src;
- if (req->src != req->dst)
- dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
-
- blkcipher_walk_init(&walk, dst, src, len);
- err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
- AES_BLOCK_SIZE);
+ err = skcipher_walk_aead_encrypt(&walk, req, true);
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
- if (walk.nbytes == len)
+ if (walk.nbytes == walk.total)
tail = 0;
ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes - tail, ctx->key_enc,
num_rounds(ctx), mac, walk.iv);
- len -= walk.nbytes - tail;
- err = blkcipher_walk_done(&desc, &walk, tail);
+ err = skcipher_walk_done(&walk, tail);
}
if (!err)
ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
@@ -203,7 +190,7 @@ static int ccm_encrypt(struct aead_request *req)
return err;
/* copy authtag to end of dst */
- scatterwalk_map_and_copy(mac, dst, req->cryptlen,
+ scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
crypto_aead_authsize(aead), 1);
return 0;
@@ -214,12 +201,7 @@ static int ccm_decrypt(struct aead_request *req)
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
- struct blkcipher_desc desc = { .info = req->iv };
- struct blkcipher_walk walk;
- struct scatterlist srcbuf[2];
- struct scatterlist dstbuf[2];
- struct scatterlist *src;
- struct scatterlist *dst;
+ struct skcipher_walk walk;
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen - authsize;
@@ -237,27 +219,19 @@ static int ccm_decrypt(struct aead_request *req)
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
- src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
- dst = src;
- if (req->src != req->dst)
- dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
-
- blkcipher_walk_init(&walk, dst, src, len);
- err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
- AES_BLOCK_SIZE);
+ err = skcipher_walk_aead_decrypt(&walk, req, true);
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
- if (walk.nbytes == len)
+ if (walk.nbytes == walk.total)
tail = 0;
ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes - tail, ctx->key_enc,
num_rounds(ctx), mac, walk.iv);
- len -= walk.nbytes - tail;
- err = blkcipher_walk_done(&desc, &walk, tail);
+ err = skcipher_walk_done(&walk, tail);
}
if (!err)
ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
@@ -268,7 +242,8 @@ static int ccm_decrypt(struct aead_request *req)
return err;
/* compare calculated auth tag with the stored one */
- scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize,
+ scatterwalk_map_and_copy(buf, req->src,
+ req->assoclen + req->cryptlen - authsize,
authsize, 0);
if (crypto_memneq(mac, buf, authsize))
@@ -287,6 +262,7 @@ static struct aead_alg ccm_aes_alg = {
.cra_module = THIS_MODULE,
},
.ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = ccm_setkey,
.setauthsize = ccm_setauthsize,
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
index f7bd9bf0bbb3..50d9fe11d0c8 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -47,24 +47,24 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
kernel_neon_begin_partial(4);
__asm__(" ld1 {v0.16b}, %[in] ;"
- " ld1 {v1.2d}, [%[key]], #16 ;"
+ " ld1 {v1.16b}, [%[key]], #16 ;"
" cmp %w[rounds], #10 ;"
" bmi 0f ;"
" bne 3f ;"
" mov v3.16b, v1.16b ;"
" b 2f ;"
"0: mov v2.16b, v1.16b ;"
- " ld1 {v3.2d}, [%[key]], #16 ;"
+ " ld1 {v3.16b}, [%[key]], #16 ;"
"1: aese v0.16b, v2.16b ;"
" aesmc v0.16b, v0.16b ;"
- "2: ld1 {v1.2d}, [%[key]], #16 ;"
+ "2: ld1 {v1.16b}, [%[key]], #16 ;"
" aese v0.16b, v3.16b ;"
" aesmc v0.16b, v0.16b ;"
- "3: ld1 {v2.2d}, [%[key]], #16 ;"
+ "3: ld1 {v2.16b}, [%[key]], #16 ;"
" subs %w[rounds], %w[rounds], #3 ;"
" aese v0.16b, v1.16b ;"
" aesmc v0.16b, v0.16b ;"
- " ld1 {v3.2d}, [%[key]], #16 ;"
+ " ld1 {v3.16b}, [%[key]], #16 ;"
" bpl 1b ;"
" aese v0.16b, v2.16b ;"
" eor v0.16b, v0.16b, v3.16b ;"
@@ -92,24 +92,24 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
kernel_neon_begin_partial(4);
__asm__(" ld1 {v0.16b}, %[in] ;"
- " ld1 {v1.2d}, [%[key]], #16 ;"
+ " ld1 {v1.16b}, [%[key]], #16 ;"
" cmp %w[rounds], #10 ;"
" bmi 0f ;"
" bne 3f ;"
" mov v3.16b, v1.16b ;"
" b 2f ;"
"0: mov v2.16b, v1.16b ;"
- " ld1 {v3.2d}, [%[key]], #16 ;"
+ " ld1 {v3.16b}, [%[key]], #16 ;"
"1: aesd v0.16b, v2.16b ;"
" aesimc v0.16b, v0.16b ;"
- "2: ld1 {v1.2d}, [%[key]], #16 ;"
+ "2: ld1 {v1.16b}, [%[key]], #16 ;"
" aesd v0.16b, v3.16b ;"
" aesimc v0.16b, v0.16b ;"
- "3: ld1 {v2.2d}, [%[key]], #16 ;"
+ "3: ld1 {v2.16b}, [%[key]], #16 ;"
" subs %w[rounds], %w[rounds], #3 ;"
" aesd v0.16b, v1.16b ;"
" aesimc v0.16b, v0.16b ;"
- " ld1 {v3.2d}, [%[key]], #16 ;"
+ " ld1 {v3.16b}, [%[key]], #16 ;"
" bpl 1b ;"
" aesd v0.16b, v2.16b ;"
" eor v0.16b, v0.16b, v3.16b ;"
@@ -173,7 +173,12 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords;
+#ifndef CONFIG_CPU_BIG_ENDIAN
rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
+#else
+ rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^
+ rki[0];
+#endif
rko[1] = rko[0] ^ rki[1];
rko[2] = rko[1] ^ rki[2];
rko[3] = rko[2] ^ rki[3];
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 78f3cfe92c08..b46093d567e5 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#define AES_ENTRY(func) ENTRY(ce_ ## func)
#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 6b2aa0fd6cd0..4e3f8adb1793 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -11,8 +11,8 @@
#include <asm/neon.h>
#include <asm/hwcap.h>
#include <crypto/aes.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/xts.h>
@@ -80,13 +80,19 @@ struct crypto_aes_xts_ctx {
struct crypto_aes_ctx __aligned(8) key2;
};
-static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
+static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- ret = xts_check_key(tfm, in_key, key_len);
+ ret = xts_verify_key(tfm, in_key, key_len);
if (ret)
return ret;
@@ -97,111 +103,101 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
if (!ret)
return 0;
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
unsigned int blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, first);
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
unsigned int blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, first);
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
unsigned int blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
first);
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
unsigned int blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
first);
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key_length / 4;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
int blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, true);
first = 1;
kernel_neon_begin();
@@ -209,17 +205,14 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
first);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
first = 0;
- nbytes -= blocks * AES_BLOCK_SIZE;
- if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
- break;
- err = blkcipher_walk_done(desc, &walk,
- walk.nbytes % AES_BLOCK_SIZE);
}
- if (walk.nbytes % AES_BLOCK_SIZE) {
- u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
- u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ if (walk.nbytes) {
u8 __aligned(8) tail[AES_BLOCK_SIZE];
+ unsigned int nbytes = walk.nbytes;
+ u8 *tdst = walk.dst.virt.addr;
+ u8 *tsrc = walk.src.virt.addr;
/*
* Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
@@ -230,227 +223,169 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
blocks, walk.iv, first);
memcpy(tdst, tail, nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = skcipher_walk_done(&walk, 0);
}
kernel_neon_end();
return err;
}
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key1.key_length / 4;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
unsigned int blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_enc, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, first, rounds = 6 + ctx->key1.key_length / 4;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
unsigned int blocks;
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, true);
kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_dec, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
}
-static struct crypto_alg aes_algs[] = { {
- .cra_name = "__ecb-aes-" MODE,
- .cra_driver_name = "__driver-ecb-aes-" MODE,
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = 0,
- .setkey = aes_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
+static struct skcipher_alg aes_algs[] = { {
+ .base = {
+ .cra_name = "__ecb(aes)",
+ .cra_driver_name = "__ecb-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = skcipher_aes_setkey,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
}, {
- .cra_name = "__cbc-aes-" MODE,
- .cra_driver_name = "__driver-cbc-aes-" MODE,
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
+ .base = {
+ .cra_name = "__cbc(aes)",
+ .cra_driver_name = "__cbc-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
}, {
- .cra_name = "__ctr-aes-" MODE,
- .cra_driver_name = "__driver-ctr-aes-" MODE,
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_setkey,
- .encrypt = ctr_encrypt,
- .decrypt = ctr_encrypt,
+ .base = {
+ .cra_name = "__ctr(aes)",
+ .cra_driver_name = "__ctr-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
+ .encrypt = ctr_encrypt,
+ .decrypt = ctr_encrypt,
}, {
- .cra_name = "__xts-aes-" MODE,
- .cra_driver_name = "__driver-xts-aes-" MODE,
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_blkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = xts_set_key,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
+ .base = {
+ .cra_name = "__xts(aes)",
+ .cra_driver_name = "__xts-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
},
-}, {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-" MODE,
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = 0,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
-}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-" MODE,
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
-}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-" MODE,
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
-}, {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-" MODE,
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- }
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_set_key,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
} };
-static int __init aes_init(void)
+static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
+
+static void aes_exit(void)
{
- return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
+ simd_skcipher_free(aes_simd_algs[i]);
+
+ crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
-static void __exit aes_exit(void)
+static int __init aes_init(void)
{
- crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+ struct simd_skcipher_alg *simd;
+ const char *basename;
+ const char *algname;
+ const char *drvname;
+ int err;
+ int i;
+
+ err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ algname = aes_algs[i].base.cra_name + 2;
+ drvname = aes_algs[i].base.cra_driver_name + 2;
+ basename = aes_algs[i].base.cra_driver_name;
+ simd = simd_skcipher_create_compat(algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto unregister_simds;
+
+ aes_simd_algs[i] = simd;
+ }
+
+ return 0;
+
+unregister_simds:
+ aes_exit();
+ return err;
}
#ifdef USE_V8_CRYPTO_EXTENSIONS
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index f6e372c528eb..c53dbeae79f2 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -386,7 +386,8 @@ AES_ENDPROC(aes_ctr_encrypt)
.endm
.Lxts_mul_x:
- .word 1, 0, 0x87, 0
+CPU_LE( .quad 1, 0x87 )
+CPU_BE( .quad 0x87, 1 )
AES_ENTRY(aes_xts_encrypt)
FRAME_PUSH
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index b93170e1cc93..85f07ead7c5c 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#define AES_ENTRY(func) ENTRY(neon_ ## func)
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
@@ -83,13 +84,13 @@
.endm
.macro do_block, enc, in, rounds, rk, rkp, i
- ld1 {v15.16b}, [\rk]
+ ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
mov \i, \rounds
1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
sub_bytes \in
- ld1 {v15.16b}, [\rkp], #16
+ ld1 {v15.4s}, [\rkp], #16
subs \i, \i, #1
beq 2222f
.if \enc == 1
@@ -229,7 +230,7 @@
.endm
.macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
- ld1 {v15.16b}, [\rk]
+ ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
mov \i, \rounds
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
@@ -237,7 +238,7 @@
sub_bytes_2x \in0, \in1
tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
- ld1 {v15.16b}, [\rkp], #16
+ ld1 {v15.4s}, [\rkp], #16
subs \i, \i, #1
beq 2222f
.if \enc == 1
@@ -254,7 +255,7 @@
.endm
.macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
- ld1 {v15.16b}, [\rk]
+ ld1 {v15.4s}, [\rk]
add \rkp, \rk, #16
mov \i, \rounds
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
@@ -266,7 +267,7 @@
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
- ld1 {v15.16b}, [\rkp], #16
+ ld1 {v15.4s}, [\rkp], #16
subs \i, \i, #1
beq 2222f
.if \enc == 1
@@ -306,12 +307,16 @@
.text
.align 4
.LForward_ShiftRows:
- .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
- .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
+CPU_LE( .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3 )
+CPU_LE( .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb )
+CPU_BE( .byte 0xb, 0x6, 0x1, 0xc, 0x7, 0x2, 0xd, 0x8 )
+CPU_BE( .byte 0x3, 0xe, 0x9, 0x4, 0xf, 0xa, 0x5, 0x0 )
.LReverse_ShiftRows:
- .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
- .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
+CPU_LE( .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb )
+CPU_LE( .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3 )
+CPU_BE( .byte 0x3, 0x6, 0x9, 0xc, 0xf, 0x2, 0x5, 0x8 )
+CPU_BE( .byte 0xb, 0xe, 0x1, 0x4, 0x7, 0xa, 0xd, 0x0 )
.LForward_Sbox:
.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S
new file mode 100644
index 000000000000..18f5a8442276
--- /dev/null
+++ b/arch/arm64/crypto/crc32-ce-core.S
@@ -0,0 +1,266 @@
+/*
+ * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ *
+ * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
+ * calculation.
+ * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
+ * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
+ * at:
+ * http://www.intel.com/products/processor/manuals/
+ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
+ * Volume 2B: Instruction Set Reference, N-Z
+ *
+ * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
+ * Alexander Boyko <Alexander_Boyko@xyratex.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .align 6
+ .cpu generic+crypto+crc
+
+.Lcrc32_constants:
+ /*
+ * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
+ * #define CONSTANT_R1 0x154442bd4LL
+ *
+ * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
+ * #define CONSTANT_R2 0x1c6e41596LL
+ */
+ .octa 0x00000001c6e415960000000154442bd4
+
+ /*
+ * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
+ * #define CONSTANT_R3 0x1751997d0LL
+ *
+ * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
+ * #define CONSTANT_R4 0x0ccaa009eLL
+ */
+ .octa 0x00000000ccaa009e00000001751997d0
+
+ /*
+ * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
+ * #define CONSTANT_R5 0x163cd6124LL
+ */
+ .quad 0x0000000163cd6124
+ .quad 0x00000000FFFFFFFF
+
+ /*
+ * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
+ *
+ * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
+ * = 0x1F7011641LL
+ * #define CONSTANT_RU 0x1F7011641LL
+ */
+ .octa 0x00000001F701164100000001DB710641
+
+.Lcrc32c_constants:
+ .octa 0x000000009e4addf800000000740eef02
+ .octa 0x000000014cd00bd600000000f20c0dfe
+ .quad 0x00000000dd45aab8
+ .quad 0x00000000FFFFFFFF
+ .octa 0x00000000dea713f10000000105ec76f0
+
+ vCONSTANT .req v0
+ dCONSTANT .req d0
+ qCONSTANT .req q0
+
+ BUF .req x0
+ LEN .req x1
+ CRC .req x2
+
+ vzr .req v9
+
+ /**
+ * Calculate crc32
+ * BUF - buffer
+ * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
+ * CRC - initial crc32
+ * return %eax crc32
+ * uint crc32_pmull_le(unsigned char const *buffer,
+ * size_t len, uint crc32)
+ */
+ENTRY(crc32_pmull_le)
+ adr x3, .Lcrc32_constants
+ b 0f
+
+ENTRY(crc32c_pmull_le)
+ adr x3, .Lcrc32c_constants
+
+0: bic LEN, LEN, #15
+ ld1 {v1.16b-v4.16b}, [BUF], #0x40
+ movi vzr.16b, #0
+ fmov dCONSTANT, CRC
+ eor v1.16b, v1.16b, vCONSTANT.16b
+ sub LEN, LEN, #0x40
+ cmp LEN, #0x40
+ b.lt less_64
+
+ ldr qCONSTANT, [x3]
+
+loop_64: /* 64 bytes Full cache line folding */
+ sub LEN, LEN, #0x40
+
+ pmull2 v5.1q, v1.2d, vCONSTANT.2d
+ pmull2 v6.1q, v2.2d, vCONSTANT.2d
+ pmull2 v7.1q, v3.2d, vCONSTANT.2d
+ pmull2 v8.1q, v4.2d, vCONSTANT.2d
+
+ pmull v1.1q, v1.1d, vCONSTANT.1d
+ pmull v2.1q, v2.1d, vCONSTANT.1d
+ pmull v3.1q, v3.1d, vCONSTANT.1d
+ pmull v4.1q, v4.1d, vCONSTANT.1d
+
+ eor v1.16b, v1.16b, v5.16b
+ ld1 {v5.16b}, [BUF], #0x10
+ eor v2.16b, v2.16b, v6.16b
+ ld1 {v6.16b}, [BUF], #0x10
+ eor v3.16b, v3.16b, v7.16b
+ ld1 {v7.16b}, [BUF], #0x10
+ eor v4.16b, v4.16b, v8.16b
+ ld1 {v8.16b}, [BUF], #0x10
+
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ eor v3.16b, v3.16b, v7.16b
+ eor v4.16b, v4.16b, v8.16b
+
+ cmp LEN, #0x40
+ b.ge loop_64
+
+less_64: /* Folding cache line into 128bit */
+ ldr qCONSTANT, [x3, #16]
+
+ pmull2 v5.1q, v1.2d, vCONSTANT.2d
+ pmull v1.1q, v1.1d, vCONSTANT.1d
+ eor v1.16b, v1.16b, v5.16b
+ eor v1.16b, v1.16b, v2.16b
+
+ pmull2 v5.1q, v1.2d, vCONSTANT.2d
+ pmull v1.1q, v1.1d, vCONSTANT.1d
+ eor v1.16b, v1.16b, v5.16b
+ eor v1.16b, v1.16b, v3.16b
+
+ pmull2 v5.1q, v1.2d, vCONSTANT.2d
+ pmull v1.1q, v1.1d, vCONSTANT.1d
+ eor v1.16b, v1.16b, v5.16b
+ eor v1.16b, v1.16b, v4.16b
+
+ cbz LEN, fold_64
+
+loop_16: /* Folding rest buffer into 128bit */
+ subs LEN, LEN, #0x10
+
+ ld1 {v2.16b}, [BUF], #0x10
+ pmull2 v5.1q, v1.2d, vCONSTANT.2d
+ pmull v1.1q, v1.1d, vCONSTANT.1d
+ eor v1.16b, v1.16b, v5.16b
+ eor v1.16b, v1.16b, v2.16b
+
+ b.ne loop_16
+
+fold_64:
+ /* perform the last 64 bit fold, also adds 32 zeroes
+ * to the input stream */
+ ext v2.16b, v1.16b, v1.16b, #8
+ pmull2 v2.1q, v2.2d, vCONSTANT.2d
+ ext v1.16b, v1.16b, vzr.16b, #8
+ eor v1.16b, v1.16b, v2.16b
+
+ /* final 32-bit fold */
+ ldr dCONSTANT, [x3, #32]
+ ldr d3, [x3, #40]
+
+ ext v2.16b, v1.16b, vzr.16b, #4
+ and v1.16b, v1.16b, v3.16b
+ pmull v1.1q, v1.1d, vCONSTANT.1d
+ eor v1.16b, v1.16b, v2.16b
+
+ /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
+ ldr qCONSTANT, [x3, #48]
+
+ and v2.16b, v1.16b, v3.16b
+ ext v2.16b, vzr.16b, v2.16b, #8
+ pmull2 v2.1q, v2.2d, vCONSTANT.2d
+ and v2.16b, v2.16b, v3.16b
+ pmull v2.1q, v2.1d, vCONSTANT.1d
+ eor v1.16b, v1.16b, v2.16b
+ mov w0, v1.s[1]
+
+ ret
+ENDPROC(crc32_pmull_le)
+ENDPROC(crc32c_pmull_le)
+
+ .macro __crc32, c
+0: subs x2, x2, #16
+ b.mi 8f
+ ldp x3, x4, [x1], #16
+CPU_BE( rev x3, x3 )
+CPU_BE( rev x4, x4 )
+ crc32\c\()x w0, w0, x3
+ crc32\c\()x w0, w0, x4
+ b.ne 0b
+ ret
+
+8: tbz x2, #3, 4f
+ ldr x3, [x1], #8
+CPU_BE( rev x3, x3 )
+ crc32\c\()x w0, w0, x3
+4: tbz x2, #2, 2f
+ ldr w3, [x1], #4
+CPU_BE( rev w3, w3 )
+ crc32\c\()w w0, w0, w3
+2: tbz x2, #1, 1f
+ ldrh w3, [x1], #2
+CPU_BE( rev16 w3, w3 )
+ crc32\c\()h w0, w0, w3
+1: tbz x2, #0, 0f
+ ldrb w3, [x1]
+ crc32\c\()b w0, w0, w3
+0: ret
+ .endm
+
+ .align 5
+ENTRY(crc32_armv8_le)
+ __crc32
+ENDPROC(crc32_armv8_le)
+
+ .align 5
+ENTRY(crc32c_armv8_le)
+ __crc32 c
+ENDPROC(crc32c_armv8_le)
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
new file mode 100644
index 000000000000..8594127d5e01
--- /dev/null
+++ b/arch/arm64/crypto/crc32-ce-glue.c
@@ -0,0 +1,212 @@
+/*
+ * Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+
+#define PMULL_MIN_LEN 64L /* minimum size of buffer
+ * for crc32_pmull_le_16 */
+#define SCALE_F 16L /* size of NEON register */
+
+asmlinkage u32 crc32_pmull_le(const u8 buf[], u64 len, u32 init_crc);
+asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], size_t len);
+
+asmlinkage u32 crc32c_pmull_le(const u8 buf[], u64 len, u32 init_crc);
+asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], size_t len);
+
+static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], size_t len);
+static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], size_t len);
+
+static int crc32_pmull_cra_init(struct crypto_tfm *tfm)
+{
+ u32 *key = crypto_tfm_ctx(tfm);
+
+ *key = 0;
+ return 0;
+}
+
+static int crc32c_pmull_cra_init(struct crypto_tfm *tfm)
+{
+ u32 *key = crypto_tfm_ctx(tfm);
+
+ *key = ~0;
+ return 0;
+}
+
+static int crc32_pmull_setkey(struct crypto_shash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ u32 *mctx = crypto_shash_ctx(hash);
+
+ if (keylen != sizeof(u32)) {
+ crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ *mctx = le32_to_cpup((__le32 *)key);
+ return 0;
+}
+
+static int crc32_pmull_init(struct shash_desc *desc)
+{
+ u32 *mctx = crypto_shash_ctx(desc->tfm);
+ u32 *crc = shash_desc_ctx(desc);
+
+ *crc = *mctx;
+ return 0;
+}
+
+static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ u32 *crc = shash_desc_ctx(desc);
+ unsigned int l;
+
+ if ((u64)data % SCALE_F) {
+ l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
+
+ *crc = fallback_crc32(*crc, data, l);
+
+ data += l;
+ length -= l;
+ }
+
+ if (length >= PMULL_MIN_LEN) {
+ l = round_down(length, SCALE_F);
+
+ kernel_neon_begin_partial(10);
+ *crc = crc32_pmull_le(data, l, *crc);
+ kernel_neon_end();
+
+ data += l;
+ length -= l;
+ }
+
+ if (length > 0)
+ *crc = fallback_crc32(*crc, data, length);
+
+ return 0;
+}
+
+static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ u32 *crc = shash_desc_ctx(desc);
+ unsigned int l;
+
+ if ((u64)data % SCALE_F) {
+ l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
+
+ *crc = fallback_crc32c(*crc, data, l);
+
+ data += l;
+ length -= l;
+ }
+
+ if (length >= PMULL_MIN_LEN) {
+ l = round_down(length, SCALE_F);
+
+ kernel_neon_begin_partial(10);
+ *crc = crc32c_pmull_le(data, l, *crc);
+ kernel_neon_end();
+
+ data += l;
+ length -= l;
+ }
+
+ if (length > 0) {
+ *crc = fallback_crc32c(*crc, data, length);
+ }
+
+ return 0;
+}
+
+static int crc32_pmull_final(struct shash_desc *desc, u8 *out)
+{
+ u32 *crc = shash_desc_ctx(desc);
+
+ put_unaligned_le32(*crc, out);
+ return 0;
+}
+
+static int crc32c_pmull_final(struct shash_desc *desc, u8 *out)
+{
+ u32 *crc = shash_desc_ctx(desc);
+
+ put_unaligned_le32(~*crc, out);
+ return 0;
+}
+
+static struct shash_alg crc32_pmull_algs[] = { {
+ .setkey = crc32_pmull_setkey,
+ .init = crc32_pmull_init,
+ .update = crc32_pmull_update,
+ .final = crc32_pmull_final,
+ .descsize = sizeof(u32),
+ .digestsize = sizeof(u32),
+
+ .base.cra_ctxsize = sizeof(u32),
+ .base.cra_init = crc32_pmull_cra_init,
+ .base.cra_name = "crc32",
+ .base.cra_driver_name = "crc32-arm64-ce",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .setkey = crc32_pmull_setkey,
+ .init = crc32_pmull_init,
+ .update = crc32c_pmull_update,
+ .final = crc32c_pmull_final,
+ .descsize = sizeof(u32),
+ .digestsize = sizeof(u32),
+
+ .base.cra_ctxsize = sizeof(u32),
+ .base.cra_init = crc32c_pmull_cra_init,
+ .base.cra_name = "crc32c",
+ .base.cra_driver_name = "crc32c-arm64-ce",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_module = THIS_MODULE,
+} };
+
+static int __init crc32_pmull_mod_init(void)
+{
+ if (elf_hwcap & HWCAP_CRC32) {
+ fallback_crc32 = crc32_armv8_le;
+ fallback_crc32c = crc32c_armv8_le;
+ } else {
+ fallback_crc32 = crc32_le;
+ fallback_crc32c = __crc32c_le;
+ }
+
+ return crypto_register_shashes(crc32_pmull_algs,
+ ARRAY_SIZE(crc32_pmull_algs));
+}
+
+static void __exit crc32_pmull_mod_exit(void)
+{
+ crypto_unregister_shashes(crc32_pmull_algs,
+ ARRAY_SIZE(crc32_pmull_algs));
+}
+
+module_cpu_feature_match(PMULL, crc32_pmull_mod_init);
+module_exit(crc32_pmull_mod_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
new file mode 100644
index 000000000000..d5b5a8c038c8
--- /dev/null
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -0,0 +1,392 @@
+//
+// Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
+//
+// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License version 2 as
+// published by the Free Software Foundation.
+//
+
+//
+// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
+//
+// Copyright (c) 2013, Intel Corporation
+//
+// Authors:
+// Erdinc Ozturk <erdinc.ozturk@intel.com>
+// Vinodh Gopal <vinodh.gopal@intel.com>
+// James Guilford <james.guilford@intel.com>
+// Tim Chen <tim.c.chen@linux.intel.com>
+//
+// This software is available to you under a choice of one of two
+// licenses. You may choose to be licensed under the terms of the GNU
+// General Public License (GPL) Version 2, available from the file
+// COPYING in the main directory of this source tree, or the
+// OpenIB.org BSD license below:
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// * Neither the name of the Intel Corporation nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+//
+// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Function API:
+// UINT16 crc_t10dif_pcl(
+// UINT16 init_crc, //initial CRC value, 16 bits
+// const unsigned char *buf, //buffer pointer to calculate CRC on
+// UINT64 len //buffer length in bytes (64-bit data)
+// );
+//
+// Reference paper titled "Fast CRC Computation for Generic
+// Polynomials Using PCLMULQDQ Instruction"
+// URL: http://www.intel.com/content/dam/www/public/us/en/documents
+// /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
+//
+//
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+ .cpu generic+crypto
+
+ arg1_low32 .req w0
+ arg2 .req x1
+ arg3 .req x2
+
+ vzr .req v13
+
+ENTRY(crc_t10dif_pmull)
+ movi vzr.16b, #0 // init zero register
+
+ // adjust the 16-bit initial_crc value, scale it to 32 bits
+ lsl arg1_low32, arg1_low32, #16
+
+ // check if smaller than 256
+ cmp arg3, #256
+
+ // for sizes less than 128, we can't fold 64B at a time...
+ b.lt _less_than_128
+
+ // load the initial crc value
+ // crc value does not need to be byte-reflected, but it needs
+ // to be moved to the high part of the register.
+ // because data will be byte-reflected and will align with
+ // initial crc at correct place.
+ movi v10.16b, #0
+ mov v10.s[3], arg1_low32 // initial crc
+
+ // receive the initial 64B data, xor the initial crc value
+ ldp q0, q1, [arg2]
+ ldp q2, q3, [arg2, #0x20]
+ ldp q4, q5, [arg2, #0x40]
+ ldp q6, q7, [arg2, #0x60]
+ add arg2, arg2, #0x80
+
+CPU_LE( rev64 v0.16b, v0.16b )
+CPU_LE( rev64 v1.16b, v1.16b )
+CPU_LE( rev64 v2.16b, v2.16b )
+CPU_LE( rev64 v3.16b, v3.16b )
+CPU_LE( rev64 v4.16b, v4.16b )
+CPU_LE( rev64 v5.16b, v5.16b )
+CPU_LE( rev64 v6.16b, v6.16b )
+CPU_LE( rev64 v7.16b, v7.16b )
+
+CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
+CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
+CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 )
+CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 )
+CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 )
+CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 )
+CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 )
+CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
+
+ // XOR the initial_crc value
+ eor v0.16b, v0.16b, v10.16b
+
+ ldr q10, rk3 // xmm10 has rk3 and rk4
+ // type of pmull instruction
+ // will determine which constant to use
+
+ //
+ // we subtract 256 instead of 128 to save one instruction from the loop
+ //
+ sub arg3, arg3, #256
+
+ // at this section of the code, there is 64*x+y (0<=y<64) bytes of
+ // buffer. The _fold_64_B_loop will fold 64B at a time
+ // until we have 64+y Bytes of buffer
+
+
+ // fold 64B at a time. This section of the code folds 4 vector
+ // registers in parallel
+_fold_64_B_loop:
+
+ .macro fold64, reg1, reg2
+ ldp q11, q12, [arg2], #0x20
+
+ pmull2 v8.1q, \reg1\().2d, v10.2d
+ pmull \reg1\().1q, \reg1\().1d, v10.1d
+
+CPU_LE( rev64 v11.16b, v11.16b )
+CPU_LE( rev64 v12.16b, v12.16b )
+
+ pmull2 v9.1q, \reg2\().2d, v10.2d
+ pmull \reg2\().1q, \reg2\().1d, v10.1d
+
+CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
+CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
+
+ eor \reg1\().16b, \reg1\().16b, v8.16b
+ eor \reg2\().16b, \reg2\().16b, v9.16b
+ eor \reg1\().16b, \reg1\().16b, v11.16b
+ eor \reg2\().16b, \reg2\().16b, v12.16b
+ .endm
+
+ fold64 v0, v1
+ fold64 v2, v3
+ fold64 v4, v5
+ fold64 v6, v7
+
+ subs arg3, arg3, #128
+
+ // check if there is another 64B in the buffer to be able to fold
+ b.ge _fold_64_B_loop
+
+ // at this point, the buffer pointer is pointing at the last y Bytes
+ // of the buffer the 64B of folded data is in 4 of the vector
+ // registers: v0, v1, v2, v3
+
+ // fold the 8 vector registers to 1 vector register with different
+ // constants
+
+ ldr q10, rk9
+
+ .macro fold16, reg, rk
+ pmull v8.1q, \reg\().1d, v10.1d
+ pmull2 \reg\().1q, \reg\().2d, v10.2d
+ .ifnb \rk
+ ldr q10, \rk
+ .endif
+ eor v7.16b, v7.16b, v8.16b
+ eor v7.16b, v7.16b, \reg\().16b
+ .endm
+
+ fold16 v0, rk11
+ fold16 v1, rk13
+ fold16 v2, rk15
+ fold16 v3, rk17
+ fold16 v4, rk19
+ fold16 v5, rk1
+ fold16 v6
+
+ // instead of 64, we add 48 to the loop counter to save 1 instruction
+ // from the loop instead of a cmp instruction, we use the negative
+ // flag with the jl instruction
+ adds arg3, arg3, #(128-16)
+ b.lt _final_reduction_for_128
+
+ // now we have 16+y bytes left to reduce. 16 Bytes is in register v7
+ // and the rest is in memory. We can fold 16 bytes at a time if y>=16
+ // continue folding 16B at a time
+
+_16B_reduction_loop:
+ pmull v8.1q, v7.1d, v10.1d
+ pmull2 v7.1q, v7.2d, v10.2d
+ eor v7.16b, v7.16b, v8.16b
+
+ ldr q0, [arg2], #16
+CPU_LE( rev64 v0.16b, v0.16b )
+CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
+ eor v7.16b, v7.16b, v0.16b
+ subs arg3, arg3, #16
+
+ // instead of a cmp instruction, we utilize the flags with the
+ // jge instruction equivalent of: cmp arg3, 16-16
+ // check if there is any more 16B in the buffer to be able to fold
+ b.ge _16B_reduction_loop
+
+ // now we have 16+z bytes left to reduce, where 0<= z < 16.
+ // first, we reduce the data in the xmm7 register
+
+_final_reduction_for_128:
+ // check if any more data to fold. If not, compute the CRC of
+ // the final 128 bits
+ adds arg3, arg3, #16
+ b.eq _128_done
+
+ // here we are getting data that is less than 16 bytes.
+ // since we know that there was data before the pointer, we can
+ // offset the input pointer before the actual point, to receive
+ // exactly 16 bytes. after that the registers need to be adjusted.
+_get_last_two_regs:
+ add arg2, arg2, arg3
+ ldr q1, [arg2, #-16]
+CPU_LE( rev64 v1.16b, v1.16b )
+CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
+
+ // get rid of the extra data that was loaded before
+ // load the shift constant
+ adr x4, tbl_shf_table + 16
+ sub x4, x4, arg3
+ ld1 {v0.16b}, [x4]
+
+ // shift v2 to the left by arg3 bytes
+ tbl v2.16b, {v7.16b}, v0.16b
+
+ // shift v7 to the right by 16-arg3 bytes
+ movi v9.16b, #0x80
+ eor v0.16b, v0.16b, v9.16b
+ tbl v7.16b, {v7.16b}, v0.16b
+
+ // blend
+ sshr v0.16b, v0.16b, #7 // convert to 8-bit mask
+ bsl v0.16b, v2.16b, v1.16b
+
+ // fold 16 Bytes
+ pmull v8.1q, v7.1d, v10.1d
+ pmull2 v7.1q, v7.2d, v10.2d
+ eor v7.16b, v7.16b, v8.16b
+ eor v7.16b, v7.16b, v0.16b
+
+_128_done:
+ // compute crc of a 128-bit value
+ ldr q10, rk5 // rk5 and rk6 in xmm10
+
+ // 64b fold
+ ext v0.16b, vzr.16b, v7.16b, #8
+ mov v7.d[0], v7.d[1]
+ pmull v7.1q, v7.1d, v10.1d
+ eor v7.16b, v7.16b, v0.16b
+
+ // 32b fold
+ ext v0.16b, v7.16b, vzr.16b, #4
+ mov v7.s[3], vzr.s[0]
+ pmull2 v0.1q, v0.2d, v10.2d
+ eor v7.16b, v7.16b, v0.16b
+
+ // barrett reduction
+_barrett:
+ ldr q10, rk7
+ mov v0.d[0], v7.d[1]
+
+ pmull v0.1q, v0.1d, v10.1d
+ ext v0.16b, vzr.16b, v0.16b, #12
+ pmull2 v0.1q, v0.2d, v10.2d
+ ext v0.16b, vzr.16b, v0.16b, #12
+ eor v7.16b, v7.16b, v0.16b
+ mov w0, v7.s[1]
+
+_cleanup:
+ // scale the result back to 16 bits
+ lsr x0, x0, #16
+ ret
+
+_less_than_128:
+ cbz arg3, _cleanup
+
+ movi v0.16b, #0
+ mov v0.s[3], arg1_low32 // get the initial crc value
+
+ ldr q7, [arg2], #0x10
+CPU_LE( rev64 v7.16b, v7.16b )
+CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
+ eor v7.16b, v7.16b, v0.16b // xor the initial crc value
+
+ cmp arg3, #16
+ b.eq _128_done // exactly 16 left
+ b.lt _less_than_16_left
+
+ ldr q10, rk1 // rk1 and rk2 in xmm10
+
+ // update the counter. subtract 32 instead of 16 to save one
+ // instruction from the loop
+ subs arg3, arg3, #32
+ b.ge _16B_reduction_loop
+
+ add arg3, arg3, #16
+ b _get_last_two_regs
+
+_less_than_16_left:
+ // shl r9, 4
+ adr x0, tbl_shf_table + 16
+ sub x0, x0, arg3
+ ld1 {v0.16b}, [x0]
+ movi v9.16b, #0x80
+ eor v0.16b, v0.16b, v9.16b
+ tbl v7.16b, {v7.16b}, v0.16b
+ b _128_done
+ENDPROC(crc_t10dif_pmull)
+
+// precomputed constants
+// these constants are precomputed from the poly:
+// 0x8bb70000 (0x8bb7 scaled to 32 bits)
+ .align 4
+// Q = 0x18BB70000
+// rk1 = 2^(32*3) mod Q << 32
+// rk2 = 2^(32*5) mod Q << 32
+// rk3 = 2^(32*15) mod Q << 32
+// rk4 = 2^(32*17) mod Q << 32
+// rk5 = 2^(32*3) mod Q << 32
+// rk6 = 2^(32*2) mod Q << 32
+// rk7 = floor(2^64/Q)
+// rk8 = Q
+
+rk1: .octa 0x06df0000000000002d56000000000000
+rk3: .octa 0x7cf50000000000009d9d000000000000
+rk5: .octa 0x13680000000000002d56000000000000
+rk7: .octa 0x000000018bb7000000000001f65a57f8
+rk9: .octa 0xbfd6000000000000ceae000000000000
+rk11: .octa 0x713c0000000000001e16000000000000
+rk13: .octa 0x80a6000000000000f7f9000000000000
+rk15: .octa 0xe658000000000000044c000000000000
+rk17: .octa 0xa497000000000000ad18000000000000
+rk19: .octa 0xe7b50000000000006ee3000000000000
+
+tbl_shf_table:
+// use these values for shift constants for the tbl/tbx instruction
+// different alignments result in values as shown:
+// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
+// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
+// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
+// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
+// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
+// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
+// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
+// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
+// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
+// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
+// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
+// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
+// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
+// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
+// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
+
+ .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
+ .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
new file mode 100644
index 000000000000..60cb590c2590
--- /dev/null
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -0,0 +1,95 @@
+/*
+ * Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/crc-t10dif.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/neon.h>
+
+#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
+
+asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u64 len);
+
+static int crct10dif_init(struct shash_desc *desc)
+{
+ u16 *crc = shash_desc_ctx(desc);
+
+ *crc = 0;
+ return 0;
+}
+
+static int crct10dif_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ u16 *crc = shash_desc_ctx(desc);
+ unsigned int l;
+
+ if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
+ l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
+ ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
+
+ *crc = crc_t10dif_generic(*crc, data, l);
+
+ length -= l;
+ data += l;
+ }
+
+ if (length > 0) {
+ kernel_neon_begin_partial(14);
+ *crc = crc_t10dif_pmull(*crc, data, length);
+ kernel_neon_end();
+ }
+
+ return 0;
+}
+
+static int crct10dif_final(struct shash_desc *desc, u8 *out)
+{
+ u16 *crc = shash_desc_ctx(desc);
+
+ *(u16 *)out = *crc;
+ return 0;
+}
+
+static struct shash_alg crc_t10dif_alg = {
+ .digestsize = CRC_T10DIF_DIGEST_SIZE,
+ .init = crct10dif_init,
+ .update = crct10dif_update,
+ .final = crct10dif_final,
+ .descsize = CRC_T10DIF_DIGEST_SIZE,
+
+ .base.cra_name = "crct10dif",
+ .base.cra_driver_name = "crct10dif-arm64-ce",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+};
+
+static int __init crc_t10dif_mod_init(void)
+{
+ return crypto_register_shash(&crc_t10dif_alg);
+}
+
+static void __exit crc_t10dif_mod_exit(void)
+{
+ crypto_unregister_shash(&crc_t10dif_alg);
+}
+
+module_cpu_feature_match(PMULL, crc_t10dif_mod_init);
+module_exit(crc_t10dif_mod_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index dc457015884e..f0bb9f0b524f 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -29,8 +29,8 @@
* struct ghash_key const *k, const char *head)
*/
ENTRY(pmull_ghash_update)
- ld1 {SHASH.16b}, [x3]
- ld1 {XL.16b}, [x1]
+ ld1 {SHASH.2d}, [x3]
+ ld1 {XL.2d}, [x1]
movi MASK.16b, #0xe1
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
shl MASK.2d, MASK.2d, #57
@@ -74,6 +74,6 @@ CPU_LE( rev64 T1.16b, T1.16b )
cbnz w0, 0b
- st1 {XL.16b}, [x1]
+ st1 {XL.2d}, [x1]
ret
ENDPROC(pmull_ghash_update)
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index 033aae6d732a..c98e7e849f06 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -78,7 +78,7 @@ ENTRY(sha1_ce_transform)
ld1r {k3.4s}, [x6]
/* load state */
- ldr dga, [x0]
+ ld1 {dgav.4s}, [x0]
ldr dgb, [x0, #16]
/* load sha1_ce_state::finalize */
@@ -144,7 +144,7 @@ CPU_LE( rev32 v11.16b, v11.16b )
b 1b
/* store new state */
-3: str dga, [x0]
+3: st1 {dgav.4s}, [x0]
str dgb, [x0, #16]
ret
ENDPROC(sha1_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 5df9d9d470ad..01cfee066837 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -85,7 +85,7 @@ ENTRY(sha2_ce_transform)
ld1 {v12.4s-v15.4s}, [x8]
/* load state */
- ldp dga, dgb, [x0]
+ ld1 {dgav.4s, dgbv.4s}, [x0]
/* load sha256_ce_state::finalize */
ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
@@ -148,6 +148,6 @@ CPU_LE( rev32 v19.16b, v19.16b )
b 1b
/* store new state */
-3: stp dga, dgb, [x0]
+3: st1 {dgav.4s, dgbv.4s}, [x0]
ret
ENDPROC(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha256-core.S_shipped b/arch/arm64/crypto/sha256-core.S_shipped
new file mode 100644
index 000000000000..3ce82cc860bc
--- /dev/null
+++ b/arch/arm64/crypto/sha256-core.S_shipped
@@ -0,0 +1,2061 @@
+// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License"). You may not use
+// this file except in compliance with the License. You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+
+// ====================================================================
+// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// project. The module is, however, dual licensed under OpenSSL and
+// CRYPTOGAMS licenses depending on where you obtain it. For further
+// details see http://www.openssl.org/~appro/cryptogams/.
+//
+// Permission to use under GPLv2 terms is granted.
+// ====================================================================
+//
+// SHA256/512 for ARMv8.
+//
+// Performance in cycles per processed byte and improvement coefficient
+// over code generated with "default" compiler:
+//
+// SHA256-hw SHA256(*) SHA512
+// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
+// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
+// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
+// Denver 2.01 10.5 (+26%) 6.70 (+8%)
+// X-Gene 20.0 (+100%) 12.8 (+300%(***))
+// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
+//
+// (*) Software SHA256 results are of lesser relevance, presented
+// mostly for informational purposes.
+// (**) The result is a trade-off: it's possible to improve it by
+// 10% (or by 1 cycle per round), but at the cost of 20% loss
+// on Cortex-A53 (or by 4 cycles per round).
+// (***) Super-impressive coefficients over gcc-generated code are
+// indication of some compiler "pathology", most notably code
+// generated with -mgeneral-regs-only is significanty faster
+// and the gap is only 40-90%.
+//
+// October 2016.
+//
+// Originally it was reckoned that it makes no sense to implement NEON
+// version of SHA256 for 64-bit processors. This is because performance
+// improvement on most wide-spread Cortex-A5x processors was observed
+// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+// observed that 32-bit NEON SHA256 performs significantly better than
+// 64-bit scalar version on *some* of the more recent processors. As
+// result 64-bit NEON version of SHA256 was added to provide best
+// all-round performance. For example it executes ~30% faster on X-Gene
+// and Mongoose. [For reference, NEON version of SHA512 is bound to
+// deliver much less improvement, likely *negative* on Cortex-A5x.
+// Which is why NEON support is limited to SHA256.]
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern OPENSSL_armcap_P
+.globl sha256_block_data_order
+.type sha256_block_data_order,%function
+.align 6
+sha256_block_data_order:
+#ifndef __KERNEL__
+# ifdef __ILP32__
+ ldrsw x16,.LOPENSSL_armcap_P
+# else
+ ldr x16,.LOPENSSL_armcap_P
+# endif
+ adr x17,.LOPENSSL_armcap_P
+ add x16,x16,x17
+ ldr w16,[x16]
+ tst w16,#ARMV8_SHA256
+ b.ne .Lv8_entry
+ tst w16,#ARMV7_NEON
+ b.ne .Lneon_entry
+#endif
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#4*4
+
+ ldp w20,w21,[x0] // load context
+ ldp w22,w23,[x0,#2*4]
+ ldp w24,w25,[x0,#4*4]
+ add x2,x1,x2,lsl#6 // end of input
+ ldp w26,w27,[x0,#6*4]
+ adr x30,.LK256
+ stp x0,x2,[x29,#96]
+
+.Loop:
+ ldp w3,w4,[x1],#2*4
+ ldr w19,[x30],#4 // *K++
+ eor w28,w21,w22 // magic seed
+ str x1,[x29,#112]
+#ifndef __AARCH64EB__
+ rev w3,w3 // 0
+#endif
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ eor w6,w24,w24,ror#14
+ and w17,w25,w24
+ bic w19,w26,w24
+ add w27,w27,w3 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w6,ror#11 // Sigma1(e)
+ ror w6,w20,#2
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ eor w17,w20,w20,ror#9
+ add w27,w27,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w23,w23,w27 // d+=h
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w6,w17,ror#13 // Sigma0(a)
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w27,w27,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w4,w4 // 1
+#endif
+ ldp w5,w6,[x1],#2*4
+ add w27,w27,w17 // h+=Sigma0(a)
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ eor w7,w23,w23,ror#14
+ and w17,w24,w23
+ bic w28,w25,w23
+ add w26,w26,w4 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w7,ror#11 // Sigma1(e)
+ ror w7,w27,#2
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ eor w17,w27,w27,ror#9
+ add w26,w26,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w22,w22,w26 // d+=h
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w7,w17,ror#13 // Sigma0(a)
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w26,w26,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w5,w5 // 2
+#endif
+ add w26,w26,w17 // h+=Sigma0(a)
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ eor w8,w22,w22,ror#14
+ and w17,w23,w22
+ bic w19,w24,w22
+ add w25,w25,w5 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w8,ror#11 // Sigma1(e)
+ ror w8,w26,#2
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ eor w17,w26,w26,ror#9
+ add w25,w25,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w21,w21,w25 // d+=h
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w8,w17,ror#13 // Sigma0(a)
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w25,w25,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w6,w6 // 3
+#endif
+ ldp w7,w8,[x1],#2*4
+ add w25,w25,w17 // h+=Sigma0(a)
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ eor w9,w21,w21,ror#14
+ and w17,w22,w21
+ bic w28,w23,w21
+ add w24,w24,w6 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w9,ror#11 // Sigma1(e)
+ ror w9,w25,#2
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ eor w17,w25,w25,ror#9
+ add w24,w24,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w20,w20,w24 // d+=h
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w9,w17,ror#13 // Sigma0(a)
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w24,w24,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w7,w7 // 4
+#endif
+ add w24,w24,w17 // h+=Sigma0(a)
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ eor w10,w20,w20,ror#14
+ and w17,w21,w20
+ bic w19,w22,w20
+ add w23,w23,w7 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w10,ror#11 // Sigma1(e)
+ ror w10,w24,#2
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ eor w17,w24,w24,ror#9
+ add w23,w23,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w27,w27,w23 // d+=h
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w10,w17,ror#13 // Sigma0(a)
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w23,w23,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w8,w8 // 5
+#endif
+ ldp w9,w10,[x1],#2*4
+ add w23,w23,w17 // h+=Sigma0(a)
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ eor w11,w27,w27,ror#14
+ and w17,w20,w27
+ bic w28,w21,w27
+ add w22,w22,w8 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w11,ror#11 // Sigma1(e)
+ ror w11,w23,#2
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ eor w17,w23,w23,ror#9
+ add w22,w22,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w26,w26,w22 // d+=h
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w11,w17,ror#13 // Sigma0(a)
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w22,w22,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w9,w9 // 6
+#endif
+ add w22,w22,w17 // h+=Sigma0(a)
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ eor w12,w26,w26,ror#14
+ and w17,w27,w26
+ bic w19,w20,w26
+ add w21,w21,w9 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w12,ror#11 // Sigma1(e)
+ ror w12,w22,#2
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ eor w17,w22,w22,ror#9
+ add w21,w21,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w25,w25,w21 // d+=h
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w12,w17,ror#13 // Sigma0(a)
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w21,w21,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w10,w10 // 7
+#endif
+ ldp w11,w12,[x1],#2*4
+ add w21,w21,w17 // h+=Sigma0(a)
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ eor w13,w25,w25,ror#14
+ and w17,w26,w25
+ bic w28,w27,w25
+ add w20,w20,w10 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w13,ror#11 // Sigma1(e)
+ ror w13,w21,#2
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ eor w17,w21,w21,ror#9
+ add w20,w20,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w24,w24,w20 // d+=h
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w13,w17,ror#13 // Sigma0(a)
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w20,w20,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w11,w11 // 8
+#endif
+ add w20,w20,w17 // h+=Sigma0(a)
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ eor w14,w24,w24,ror#14
+ and w17,w25,w24
+ bic w19,w26,w24
+ add w27,w27,w11 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w14,ror#11 // Sigma1(e)
+ ror w14,w20,#2
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ eor w17,w20,w20,ror#9
+ add w27,w27,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w23,w23,w27 // d+=h
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w14,w17,ror#13 // Sigma0(a)
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w27,w27,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w12,w12 // 9
+#endif
+ ldp w13,w14,[x1],#2*4
+ add w27,w27,w17 // h+=Sigma0(a)
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ eor w15,w23,w23,ror#14
+ and w17,w24,w23
+ bic w28,w25,w23
+ add w26,w26,w12 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w15,ror#11 // Sigma1(e)
+ ror w15,w27,#2
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ eor w17,w27,w27,ror#9
+ add w26,w26,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w22,w22,w26 // d+=h
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w15,w17,ror#13 // Sigma0(a)
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w26,w26,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w13,w13 // 10
+#endif
+ add w26,w26,w17 // h+=Sigma0(a)
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ eor w0,w22,w22,ror#14
+ and w17,w23,w22
+ bic w19,w24,w22
+ add w25,w25,w13 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w0,ror#11 // Sigma1(e)
+ ror w0,w26,#2
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ eor w17,w26,w26,ror#9
+ add w25,w25,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w21,w21,w25 // d+=h
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w0,w17,ror#13 // Sigma0(a)
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w25,w25,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w14,w14 // 11
+#endif
+ ldp w15,w0,[x1],#2*4
+ add w25,w25,w17 // h+=Sigma0(a)
+ str w6,[sp,#12]
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ eor w6,w21,w21,ror#14
+ and w17,w22,w21
+ bic w28,w23,w21
+ add w24,w24,w14 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w6,ror#11 // Sigma1(e)
+ ror w6,w25,#2
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ eor w17,w25,w25,ror#9
+ add w24,w24,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w20,w20,w24 // d+=h
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w6,w17,ror#13 // Sigma0(a)
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w24,w24,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w15,w15 // 12
+#endif
+ add w24,w24,w17 // h+=Sigma0(a)
+ str w7,[sp,#0]
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ eor w7,w20,w20,ror#14
+ and w17,w21,w20
+ bic w19,w22,w20
+ add w23,w23,w15 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w7,ror#11 // Sigma1(e)
+ ror w7,w24,#2
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ eor w17,w24,w24,ror#9
+ add w23,w23,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w27,w27,w23 // d+=h
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w7,w17,ror#13 // Sigma0(a)
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w23,w23,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w0,w0 // 13
+#endif
+ ldp w1,w2,[x1]
+ add w23,w23,w17 // h+=Sigma0(a)
+ str w8,[sp,#4]
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ eor w8,w27,w27,ror#14
+ and w17,w20,w27
+ bic w28,w21,w27
+ add w22,w22,w0 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w8,ror#11 // Sigma1(e)
+ ror w8,w23,#2
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ eor w17,w23,w23,ror#9
+ add w22,w22,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w26,w26,w22 // d+=h
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w8,w17,ror#13 // Sigma0(a)
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w22,w22,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w1,w1 // 14
+#endif
+ ldr w6,[sp,#12]
+ add w22,w22,w17 // h+=Sigma0(a)
+ str w9,[sp,#8]
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ eor w9,w26,w26,ror#14
+ and w17,w27,w26
+ bic w19,w20,w26
+ add w21,w21,w1 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w9,ror#11 // Sigma1(e)
+ ror w9,w22,#2
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ eor w17,w22,w22,ror#9
+ add w21,w21,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w25,w25,w21 // d+=h
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w9,w17,ror#13 // Sigma0(a)
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w21,w21,w17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev w2,w2 // 15
+#endif
+ ldr w7,[sp,#0]
+ add w21,w21,w17 // h+=Sigma0(a)
+ str w10,[sp,#12]
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ ror w9,w4,#7
+ and w17,w26,w25
+ ror w8,w1,#17
+ bic w28,w27,w25
+ ror w10,w21,#2
+ add w20,w20,w2 // h+=X[i]
+ eor w16,w16,w25,ror#11
+ eor w9,w9,w4,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w25,ror#25 // Sigma1(e)
+ eor w10,w10,w21,ror#13
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w8,w8,w1,ror#19
+ eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
+ add w20,w20,w16 // h+=Sigma1(e)
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w10,w21,ror#22 // Sigma0(a)
+ eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
+ add w3,w3,w12
+ add w24,w24,w20 // d+=h
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w3,w3,w9
+ add w20,w20,w17 // h+=Sigma0(a)
+ add w3,w3,w8
+.Loop_16_xx:
+ ldr w8,[sp,#4]
+ str w11,[sp,#0]
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ ror w10,w5,#7
+ and w17,w25,w24
+ ror w9,w2,#17
+ bic w19,w26,w24
+ ror w11,w20,#2
+ add w27,w27,w3 // h+=X[i]
+ eor w16,w16,w24,ror#11
+ eor w10,w10,w5,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w24,ror#25 // Sigma1(e)
+ eor w11,w11,w20,ror#13
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w9,w9,w2,ror#19
+ eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
+ add w27,w27,w16 // h+=Sigma1(e)
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w11,w20,ror#22 // Sigma0(a)
+ eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
+ add w4,w4,w13
+ add w23,w23,w27 // d+=h
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w4,w4,w10
+ add w27,w27,w17 // h+=Sigma0(a)
+ add w4,w4,w9
+ ldr w9,[sp,#8]
+ str w12,[sp,#4]
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ ror w11,w6,#7
+ and w17,w24,w23
+ ror w10,w3,#17
+ bic w28,w25,w23
+ ror w12,w27,#2
+ add w26,w26,w4 // h+=X[i]
+ eor w16,w16,w23,ror#11
+ eor w11,w11,w6,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w23,ror#25 // Sigma1(e)
+ eor w12,w12,w27,ror#13
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w10,w10,w3,ror#19
+ eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
+ add w26,w26,w16 // h+=Sigma1(e)
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w12,w27,ror#22 // Sigma0(a)
+ eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
+ add w5,w5,w14
+ add w22,w22,w26 // d+=h
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w5,w5,w11
+ add w26,w26,w17 // h+=Sigma0(a)
+ add w5,w5,w10
+ ldr w10,[sp,#12]
+ str w13,[sp,#8]
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ ror w12,w7,#7
+ and w17,w23,w22
+ ror w11,w4,#17
+ bic w19,w24,w22
+ ror w13,w26,#2
+ add w25,w25,w5 // h+=X[i]
+ eor w16,w16,w22,ror#11
+ eor w12,w12,w7,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w22,ror#25 // Sigma1(e)
+ eor w13,w13,w26,ror#13
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w11,w11,w4,ror#19
+ eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
+ add w25,w25,w16 // h+=Sigma1(e)
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w13,w26,ror#22 // Sigma0(a)
+ eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
+ add w6,w6,w15
+ add w21,w21,w25 // d+=h
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w6,w6,w12
+ add w25,w25,w17 // h+=Sigma0(a)
+ add w6,w6,w11
+ ldr w11,[sp,#0]
+ str w14,[sp,#12]
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ ror w13,w8,#7
+ and w17,w22,w21
+ ror w12,w5,#17
+ bic w28,w23,w21
+ ror w14,w25,#2
+ add w24,w24,w6 // h+=X[i]
+ eor w16,w16,w21,ror#11
+ eor w13,w13,w8,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w21,ror#25 // Sigma1(e)
+ eor w14,w14,w25,ror#13
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w12,w12,w5,ror#19
+ eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
+ add w24,w24,w16 // h+=Sigma1(e)
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w14,w25,ror#22 // Sigma0(a)
+ eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
+ add w7,w7,w0
+ add w20,w20,w24 // d+=h
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w7,w7,w13
+ add w24,w24,w17 // h+=Sigma0(a)
+ add w7,w7,w12
+ ldr w12,[sp,#4]
+ str w15,[sp,#0]
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ ror w14,w9,#7
+ and w17,w21,w20
+ ror w13,w6,#17
+ bic w19,w22,w20
+ ror w15,w24,#2
+ add w23,w23,w7 // h+=X[i]
+ eor w16,w16,w20,ror#11
+ eor w14,w14,w9,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w20,ror#25 // Sigma1(e)
+ eor w15,w15,w24,ror#13
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w13,w13,w6,ror#19
+ eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
+ add w23,w23,w16 // h+=Sigma1(e)
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w15,w24,ror#22 // Sigma0(a)
+ eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
+ add w8,w8,w1
+ add w27,w27,w23 // d+=h
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w8,w8,w14
+ add w23,w23,w17 // h+=Sigma0(a)
+ add w8,w8,w13
+ ldr w13,[sp,#8]
+ str w0,[sp,#4]
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ ror w15,w10,#7
+ and w17,w20,w27
+ ror w14,w7,#17
+ bic w28,w21,w27
+ ror w0,w23,#2
+ add w22,w22,w8 // h+=X[i]
+ eor w16,w16,w27,ror#11
+ eor w15,w15,w10,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w27,ror#25 // Sigma1(e)
+ eor w0,w0,w23,ror#13
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w14,w14,w7,ror#19
+ eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
+ add w22,w22,w16 // h+=Sigma1(e)
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w0,w23,ror#22 // Sigma0(a)
+ eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
+ add w9,w9,w2
+ add w26,w26,w22 // d+=h
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w9,w9,w15
+ add w22,w22,w17 // h+=Sigma0(a)
+ add w9,w9,w14
+ ldr w14,[sp,#12]
+ str w1,[sp,#8]
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ ror w0,w11,#7
+ and w17,w27,w26
+ ror w15,w8,#17
+ bic w19,w20,w26
+ ror w1,w22,#2
+ add w21,w21,w9 // h+=X[i]
+ eor w16,w16,w26,ror#11
+ eor w0,w0,w11,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w26,ror#25 // Sigma1(e)
+ eor w1,w1,w22,ror#13
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w15,w15,w8,ror#19
+ eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
+ add w21,w21,w16 // h+=Sigma1(e)
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w1,w22,ror#22 // Sigma0(a)
+ eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
+ add w10,w10,w3
+ add w25,w25,w21 // d+=h
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w10,w10,w0
+ add w21,w21,w17 // h+=Sigma0(a)
+ add w10,w10,w15
+ ldr w15,[sp,#0]
+ str w2,[sp,#12]
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ ror w1,w12,#7
+ and w17,w26,w25
+ ror w0,w9,#17
+ bic w28,w27,w25
+ ror w2,w21,#2
+ add w20,w20,w10 // h+=X[i]
+ eor w16,w16,w25,ror#11
+ eor w1,w1,w12,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w25,ror#25 // Sigma1(e)
+ eor w2,w2,w21,ror#13
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w0,w0,w9,ror#19
+ eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
+ add w20,w20,w16 // h+=Sigma1(e)
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w2,w21,ror#22 // Sigma0(a)
+ eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
+ add w11,w11,w4
+ add w24,w24,w20 // d+=h
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w11,w11,w1
+ add w20,w20,w17 // h+=Sigma0(a)
+ add w11,w11,w0
+ ldr w0,[sp,#4]
+ str w3,[sp,#0]
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ ror w2,w13,#7
+ and w17,w25,w24
+ ror w1,w10,#17
+ bic w19,w26,w24
+ ror w3,w20,#2
+ add w27,w27,w11 // h+=X[i]
+ eor w16,w16,w24,ror#11
+ eor w2,w2,w13,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w24,ror#25 // Sigma1(e)
+ eor w3,w3,w20,ror#13
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w1,w1,w10,ror#19
+ eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
+ add w27,w27,w16 // h+=Sigma1(e)
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w3,w20,ror#22 // Sigma0(a)
+ eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
+ add w12,w12,w5
+ add w23,w23,w27 // d+=h
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w12,w12,w2
+ add w27,w27,w17 // h+=Sigma0(a)
+ add w12,w12,w1
+ ldr w1,[sp,#8]
+ str w4,[sp,#4]
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ ror w3,w14,#7
+ and w17,w24,w23
+ ror w2,w11,#17
+ bic w28,w25,w23
+ ror w4,w27,#2
+ add w26,w26,w12 // h+=X[i]
+ eor w16,w16,w23,ror#11
+ eor w3,w3,w14,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w23,ror#25 // Sigma1(e)
+ eor w4,w4,w27,ror#13
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w2,w2,w11,ror#19
+ eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
+ add w26,w26,w16 // h+=Sigma1(e)
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w4,w27,ror#22 // Sigma0(a)
+ eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
+ add w13,w13,w6
+ add w22,w22,w26 // d+=h
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w13,w13,w3
+ add w26,w26,w17 // h+=Sigma0(a)
+ add w13,w13,w2
+ ldr w2,[sp,#12]
+ str w5,[sp,#8]
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ ror w4,w15,#7
+ and w17,w23,w22
+ ror w3,w12,#17
+ bic w19,w24,w22
+ ror w5,w26,#2
+ add w25,w25,w13 // h+=X[i]
+ eor w16,w16,w22,ror#11
+ eor w4,w4,w15,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w22,ror#25 // Sigma1(e)
+ eor w5,w5,w26,ror#13
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w3,w3,w12,ror#19
+ eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
+ add w25,w25,w16 // h+=Sigma1(e)
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w5,w26,ror#22 // Sigma0(a)
+ eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
+ add w14,w14,w7
+ add w21,w21,w25 // d+=h
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w14,w14,w4
+ add w25,w25,w17 // h+=Sigma0(a)
+ add w14,w14,w3
+ ldr w3,[sp,#0]
+ str w6,[sp,#12]
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ ror w5,w0,#7
+ and w17,w22,w21
+ ror w4,w13,#17
+ bic w28,w23,w21
+ ror w6,w25,#2
+ add w24,w24,w14 // h+=X[i]
+ eor w16,w16,w21,ror#11
+ eor w5,w5,w0,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w21,ror#25 // Sigma1(e)
+ eor w6,w6,w25,ror#13
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w4,w4,w13,ror#19
+ eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
+ add w24,w24,w16 // h+=Sigma1(e)
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w6,w25,ror#22 // Sigma0(a)
+ eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
+ add w15,w15,w8
+ add w20,w20,w24 // d+=h
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w15,w15,w5
+ add w24,w24,w17 // h+=Sigma0(a)
+ add w15,w15,w4
+ ldr w4,[sp,#4]
+ str w7,[sp,#0]
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ ror w6,w1,#7
+ and w17,w21,w20
+ ror w5,w14,#17
+ bic w19,w22,w20
+ ror w7,w24,#2
+ add w23,w23,w15 // h+=X[i]
+ eor w16,w16,w20,ror#11
+ eor w6,w6,w1,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w20,ror#25 // Sigma1(e)
+ eor w7,w7,w24,ror#13
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w5,w5,w14,ror#19
+ eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
+ add w23,w23,w16 // h+=Sigma1(e)
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w7,w24,ror#22 // Sigma0(a)
+ eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
+ add w0,w0,w9
+ add w27,w27,w23 // d+=h
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w0,w0,w6
+ add w23,w23,w17 // h+=Sigma0(a)
+ add w0,w0,w5
+ ldr w5,[sp,#8]
+ str w8,[sp,#4]
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ ror w7,w2,#7
+ and w17,w20,w27
+ ror w6,w15,#17
+ bic w28,w21,w27
+ ror w8,w23,#2
+ add w22,w22,w0 // h+=X[i]
+ eor w16,w16,w27,ror#11
+ eor w7,w7,w2,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w27,ror#25 // Sigma1(e)
+ eor w8,w8,w23,ror#13
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w6,w6,w15,ror#19
+ eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
+ add w22,w22,w16 // h+=Sigma1(e)
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w8,w23,ror#22 // Sigma0(a)
+ eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
+ add w1,w1,w10
+ add w26,w26,w22 // d+=h
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w1,w1,w7
+ add w22,w22,w17 // h+=Sigma0(a)
+ add w1,w1,w6
+ ldr w6,[sp,#12]
+ str w9,[sp,#8]
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ ror w8,w3,#7
+ and w17,w27,w26
+ ror w7,w0,#17
+ bic w19,w20,w26
+ ror w9,w22,#2
+ add w21,w21,w1 // h+=X[i]
+ eor w16,w16,w26,ror#11
+ eor w8,w8,w3,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w26,ror#25 // Sigma1(e)
+ eor w9,w9,w22,ror#13
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w7,w7,w0,ror#19
+ eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
+ add w21,w21,w16 // h+=Sigma1(e)
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w9,w22,ror#22 // Sigma0(a)
+ eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
+ add w2,w2,w11
+ add w25,w25,w21 // d+=h
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w2,w2,w8
+ add w21,w21,w17 // h+=Sigma0(a)
+ add w2,w2,w7
+ ldr w7,[sp,#0]
+ str w10,[sp,#12]
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ ror w9,w4,#7
+ and w17,w26,w25
+ ror w8,w1,#17
+ bic w28,w27,w25
+ ror w10,w21,#2
+ add w20,w20,w2 // h+=X[i]
+ eor w16,w16,w25,ror#11
+ eor w9,w9,w4,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w25,ror#25 // Sigma1(e)
+ eor w10,w10,w21,ror#13
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w8,w8,w1,ror#19
+ eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
+ add w20,w20,w16 // h+=Sigma1(e)
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w10,w21,ror#22 // Sigma0(a)
+ eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
+ add w3,w3,w12
+ add w24,w24,w20 // d+=h
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w3,w3,w9
+ add w20,w20,w17 // h+=Sigma0(a)
+ add w3,w3,w8
+ cbnz w19,.Loop_16_xx
+
+ ldp x0,x2,[x29,#96]
+ ldr x1,[x29,#112]
+ sub x30,x30,#260 // rewind
+
+ ldp w3,w4,[x0]
+ ldp w5,w6,[x0,#2*4]
+ add x1,x1,#14*4 // advance input pointer
+ ldp w7,w8,[x0,#4*4]
+ add w20,w20,w3
+ ldp w9,w10,[x0,#6*4]
+ add w21,w21,w4
+ add w22,w22,w5
+ add w23,w23,w6
+ stp w20,w21,[x0]
+ add w24,w24,w7
+ add w25,w25,w8
+ stp w22,w23,[x0,#2*4]
+ add w26,w26,w9
+ add w27,w27,w10
+ cmp x1,x2
+ stp w24,w25,[x0,#4*4]
+ stp w26,w27,[x0,#6*4]
+ b.ne .Loop
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#4*4
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#128
+ ret
+.size sha256_block_data_order,.-sha256_block_data_order
+
+.align 6
+.type .LK256,%object
+.LK256:
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+ .long 0 //terminator
+.size .LK256,.-.LK256
+#ifndef __KERNEL__
+.align 3
+.LOPENSSL_armcap_P:
+# ifdef __ILP32__
+ .long OPENSSL_armcap_P-.
+# else
+ .quad OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+#ifndef __KERNEL__
+.type sha256_block_armv8,%function
+.align 6
+sha256_block_armv8:
+.Lv8_entry:
+ stp x29,x30,[sp,#-16]!
+ add x29,sp,#0
+
+ ld1 {v0.4s,v1.4s},[x0]
+ adr x3,.LK256
+
+.Loop_hw:
+ ld1 {v4.16b-v7.16b},[x1],#64
+ sub x2,x2,#1
+ ld1 {v16.4s},[x3],#16
+ rev32 v4.16b,v4.16b
+ rev32 v5.16b,v5.16b
+ rev32 v6.16b,v6.16b
+ rev32 v7.16b,v7.16b
+ orr v18.16b,v0.16b,v0.16b // offload
+ orr v19.16b,v1.16b,v1.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v6.4s
+ .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v7.4s
+ .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v6.4s
+ .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v7.4s
+ .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v6.4s
+ .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v7.4s
+ .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+
+ ld1 {v17.4s},[x3]
+ add v16.4s,v16.4s,v6.4s
+ sub x3,x3,#64*4-16 // rewind
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+
+ add v17.4s,v17.4s,v7.4s
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+
+ add v0.4s,v0.4s,v18.4s
+ add v1.4s,v1.4s,v19.4s
+
+ cbnz x2,.Loop_hw
+
+ st1 {v0.4s,v1.4s},[x0]
+
+ ldr x29,[sp],#16
+ ret
+.size sha256_block_armv8,.-sha256_block_armv8
+#endif
+#ifdef __KERNEL__
+.globl sha256_block_neon
+#endif
+.type sha256_block_neon,%function
+.align 4
+sha256_block_neon:
+.Lneon_entry:
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ sub sp,sp,#16*4
+
+ adr x16,.LK256
+ add x2,x1,x2,lsl#6 // len to point at the end of inp
+
+ ld1 {v0.16b},[x1], #16
+ ld1 {v1.16b},[x1], #16
+ ld1 {v2.16b},[x1], #16
+ ld1 {v3.16b},[x1], #16
+ ld1 {v4.4s},[x16], #16
+ ld1 {v5.4s},[x16], #16
+ ld1 {v6.4s},[x16], #16
+ ld1 {v7.4s},[x16], #16
+ rev32 v0.16b,v0.16b // yes, even on
+ rev32 v1.16b,v1.16b // big-endian
+ rev32 v2.16b,v2.16b
+ rev32 v3.16b,v3.16b
+ mov x17,sp
+ add v4.4s,v4.4s,v0.4s
+ add v5.4s,v5.4s,v1.4s
+ add v6.4s,v6.4s,v2.4s
+ st1 {v4.4s-v5.4s},[x17], #32
+ add v7.4s,v7.4s,v3.4s
+ st1 {v6.4s-v7.4s},[x17]
+ sub x17,x17,#32
+
+ ldp w3,w4,[x0]
+ ldp w5,w6,[x0,#8]
+ ldp w7,w8,[x0,#16]
+ ldp w9,w10,[x0,#24]
+ ldr w12,[sp,#0]
+ mov w13,wzr
+ eor w14,w4,w5
+ mov w15,wzr
+ b .L_00_48
+
+.align 4
+.L_00_48:
+ ext v4.16b,v0.16b,v1.16b,#4
+ add w10,w10,w12
+ add w3,w3,w15
+ and w12,w8,w7
+ bic w15,w9,w7
+ ext v7.16b,v2.16b,v3.16b,#4
+ eor w11,w7,w7,ror#5
+ add w3,w3,w13
+ mov d19,v3.d[1]
+ orr w12,w12,w15
+ eor w11,w11,w7,ror#19
+ ushr v6.4s,v4.4s,#7
+ eor w15,w3,w3,ror#11
+ ushr v5.4s,v4.4s,#3
+ add w10,w10,w12
+ add v0.4s,v0.4s,v7.4s
+ ror w11,w11,#6
+ sli v6.4s,v4.4s,#25
+ eor w13,w3,w4
+ eor w15,w15,w3,ror#20
+ ushr v7.4s,v4.4s,#18
+ add w10,w10,w11
+ ldr w12,[sp,#4]
+ and w14,w14,w13
+ eor v5.16b,v5.16b,v6.16b
+ ror w15,w15,#2
+ add w6,w6,w10
+ sli v7.4s,v4.4s,#14
+ eor w14,w14,w4
+ ushr v16.4s,v19.4s,#17
+ add w9,w9,w12
+ add w10,w10,w15
+ and w12,w7,w6
+ eor v5.16b,v5.16b,v7.16b
+ bic w15,w8,w6
+ eor w11,w6,w6,ror#5
+ sli v16.4s,v19.4s,#15
+ add w10,w10,w14
+ orr w12,w12,w15
+ ushr v17.4s,v19.4s,#10
+ eor w11,w11,w6,ror#19
+ eor w15,w10,w10,ror#11
+ ushr v7.4s,v19.4s,#19
+ add w9,w9,w12
+ ror w11,w11,#6
+ add v0.4s,v0.4s,v5.4s
+ eor w14,w10,w3
+ eor w15,w15,w10,ror#20
+ sli v7.4s,v19.4s,#13
+ add w9,w9,w11
+ ldr w12,[sp,#8]
+ and w13,w13,w14
+ eor v17.16b,v17.16b,v16.16b
+ ror w15,w15,#2
+ add w5,w5,w9
+ eor w13,w13,w3
+ eor v17.16b,v17.16b,v7.16b
+ add w8,w8,w12
+ add w9,w9,w15
+ and w12,w6,w5
+ add v0.4s,v0.4s,v17.4s
+ bic w15,w7,w5
+ eor w11,w5,w5,ror#5
+ add w9,w9,w13
+ ushr v18.4s,v0.4s,#17
+ orr w12,w12,w15
+ ushr v19.4s,v0.4s,#10
+ eor w11,w11,w5,ror#19
+ eor w15,w9,w9,ror#11
+ sli v18.4s,v0.4s,#15
+ add w8,w8,w12
+ ushr v17.4s,v0.4s,#19
+ ror w11,w11,#6
+ eor w13,w9,w10
+ eor v19.16b,v19.16b,v18.16b
+ eor w15,w15,w9,ror#20
+ add w8,w8,w11
+ sli v17.4s,v0.4s,#13
+ ldr w12,[sp,#12]
+ and w14,w14,w13
+ ror w15,w15,#2
+ ld1 {v4.4s},[x16], #16
+ add w4,w4,w8
+ eor v19.16b,v19.16b,v17.16b
+ eor w14,w14,w10
+ eor v17.16b,v17.16b,v17.16b
+ add w7,w7,w12
+ add w8,w8,w15
+ and w12,w5,w4
+ mov v17.d[1],v19.d[0]
+ bic w15,w6,w4
+ eor w11,w4,w4,ror#5
+ add w8,w8,w14
+ add v0.4s,v0.4s,v17.4s
+ orr w12,w12,w15
+ eor w11,w11,w4,ror#19
+ eor w15,w8,w8,ror#11
+ add v4.4s,v4.4s,v0.4s
+ add w7,w7,w12
+ ror w11,w11,#6
+ eor w14,w8,w9
+ eor w15,w15,w8,ror#20
+ add w7,w7,w11
+ ldr w12,[sp,#16]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w3,w3,w7
+ eor w13,w13,w9
+ st1 {v4.4s},[x17], #16
+ ext v4.16b,v1.16b,v2.16b,#4
+ add w6,w6,w12
+ add w7,w7,w15
+ and w12,w4,w3
+ bic w15,w5,w3
+ ext v7.16b,v3.16b,v0.16b,#4
+ eor w11,w3,w3,ror#5
+ add w7,w7,w13
+ mov d19,v0.d[1]
+ orr w12,w12,w15
+ eor w11,w11,w3,ror#19
+ ushr v6.4s,v4.4s,#7
+ eor w15,w7,w7,ror#11
+ ushr v5.4s,v4.4s,#3
+ add w6,w6,w12
+ add v1.4s,v1.4s,v7.4s
+ ror w11,w11,#6
+ sli v6.4s,v4.4s,#25
+ eor w13,w7,w8
+ eor w15,w15,w7,ror#20
+ ushr v7.4s,v4.4s,#18
+ add w6,w6,w11
+ ldr w12,[sp,#20]
+ and w14,w14,w13
+ eor v5.16b,v5.16b,v6.16b
+ ror w15,w15,#2
+ add w10,w10,w6
+ sli v7.4s,v4.4s,#14
+ eor w14,w14,w8
+ ushr v16.4s,v19.4s,#17
+ add w5,w5,w12
+ add w6,w6,w15
+ and w12,w3,w10
+ eor v5.16b,v5.16b,v7.16b
+ bic w15,w4,w10
+ eor w11,w10,w10,ror#5
+ sli v16.4s,v19.4s,#15
+ add w6,w6,w14
+ orr w12,w12,w15
+ ushr v17.4s,v19.4s,#10
+ eor w11,w11,w10,ror#19
+ eor w15,w6,w6,ror#11
+ ushr v7.4s,v19.4s,#19
+ add w5,w5,w12
+ ror w11,w11,#6
+ add v1.4s,v1.4s,v5.4s
+ eor w14,w6,w7
+ eor w15,w15,w6,ror#20
+ sli v7.4s,v19.4s,#13
+ add w5,w5,w11
+ ldr w12,[sp,#24]
+ and w13,w13,w14
+ eor v17.16b,v17.16b,v16.16b
+ ror w15,w15,#2
+ add w9,w9,w5
+ eor w13,w13,w7
+ eor v17.16b,v17.16b,v7.16b
+ add w4,w4,w12
+ add w5,w5,w15
+ and w12,w10,w9
+ add v1.4s,v1.4s,v17.4s
+ bic w15,w3,w9
+ eor w11,w9,w9,ror#5
+ add w5,w5,w13
+ ushr v18.4s,v1.4s,#17
+ orr w12,w12,w15
+ ushr v19.4s,v1.4s,#10
+ eor w11,w11,w9,ror#19
+ eor w15,w5,w5,ror#11
+ sli v18.4s,v1.4s,#15
+ add w4,w4,w12
+ ushr v17.4s,v1.4s,#19
+ ror w11,w11,#6
+ eor w13,w5,w6
+ eor v19.16b,v19.16b,v18.16b
+ eor w15,w15,w5,ror#20
+ add w4,w4,w11
+ sli v17.4s,v1.4s,#13
+ ldr w12,[sp,#28]
+ and w14,w14,w13
+ ror w15,w15,#2
+ ld1 {v4.4s},[x16], #16
+ add w8,w8,w4
+ eor v19.16b,v19.16b,v17.16b
+ eor w14,w14,w6
+ eor v17.16b,v17.16b,v17.16b
+ add w3,w3,w12
+ add w4,w4,w15
+ and w12,w9,w8
+ mov v17.d[1],v19.d[0]
+ bic w15,w10,w8
+ eor w11,w8,w8,ror#5
+ add w4,w4,w14
+ add v1.4s,v1.4s,v17.4s
+ orr w12,w12,w15
+ eor w11,w11,w8,ror#19
+ eor w15,w4,w4,ror#11
+ add v4.4s,v4.4s,v1.4s
+ add w3,w3,w12
+ ror w11,w11,#6
+ eor w14,w4,w5
+ eor w15,w15,w4,ror#20
+ add w3,w3,w11
+ ldr w12,[sp,#32]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w7,w7,w3
+ eor w13,w13,w5
+ st1 {v4.4s},[x17], #16
+ ext v4.16b,v2.16b,v3.16b,#4
+ add w10,w10,w12
+ add w3,w3,w15
+ and w12,w8,w7
+ bic w15,w9,w7
+ ext v7.16b,v0.16b,v1.16b,#4
+ eor w11,w7,w7,ror#5
+ add w3,w3,w13
+ mov d19,v1.d[1]
+ orr w12,w12,w15
+ eor w11,w11,w7,ror#19
+ ushr v6.4s,v4.4s,#7
+ eor w15,w3,w3,ror#11
+ ushr v5.4s,v4.4s,#3
+ add w10,w10,w12
+ add v2.4s,v2.4s,v7.4s
+ ror w11,w11,#6
+ sli v6.4s,v4.4s,#25
+ eor w13,w3,w4
+ eor w15,w15,w3,ror#20
+ ushr v7.4s,v4.4s,#18
+ add w10,w10,w11
+ ldr w12,[sp,#36]
+ and w14,w14,w13
+ eor v5.16b,v5.16b,v6.16b
+ ror w15,w15,#2
+ add w6,w6,w10
+ sli v7.4s,v4.4s,#14
+ eor w14,w14,w4
+ ushr v16.4s,v19.4s,#17
+ add w9,w9,w12
+ add w10,w10,w15
+ and w12,w7,w6
+ eor v5.16b,v5.16b,v7.16b
+ bic w15,w8,w6
+ eor w11,w6,w6,ror#5
+ sli v16.4s,v19.4s,#15
+ add w10,w10,w14
+ orr w12,w12,w15
+ ushr v17.4s,v19.4s,#10
+ eor w11,w11,w6,ror#19
+ eor w15,w10,w10,ror#11
+ ushr v7.4s,v19.4s,#19
+ add w9,w9,w12
+ ror w11,w11,#6
+ add v2.4s,v2.4s,v5.4s
+ eor w14,w10,w3
+ eor w15,w15,w10,ror#20
+ sli v7.4s,v19.4s,#13
+ add w9,w9,w11
+ ldr w12,[sp,#40]
+ and w13,w13,w14
+ eor v17.16b,v17.16b,v16.16b
+ ror w15,w15,#2
+ add w5,w5,w9
+ eor w13,w13,w3
+ eor v17.16b,v17.16b,v7.16b
+ add w8,w8,w12
+ add w9,w9,w15
+ and w12,w6,w5
+ add v2.4s,v2.4s,v17.4s
+ bic w15,w7,w5
+ eor w11,w5,w5,ror#5
+ add w9,w9,w13
+ ushr v18.4s,v2.4s,#17
+ orr w12,w12,w15
+ ushr v19.4s,v2.4s,#10
+ eor w11,w11,w5,ror#19
+ eor w15,w9,w9,ror#11
+ sli v18.4s,v2.4s,#15
+ add w8,w8,w12
+ ushr v17.4s,v2.4s,#19
+ ror w11,w11,#6
+ eor w13,w9,w10
+ eor v19.16b,v19.16b,v18.16b
+ eor w15,w15,w9,ror#20
+ add w8,w8,w11
+ sli v17.4s,v2.4s,#13
+ ldr w12,[sp,#44]
+ and w14,w14,w13
+ ror w15,w15,#2
+ ld1 {v4.4s},[x16], #16
+ add w4,w4,w8
+ eor v19.16b,v19.16b,v17.16b
+ eor w14,w14,w10
+ eor v17.16b,v17.16b,v17.16b
+ add w7,w7,w12
+ add w8,w8,w15
+ and w12,w5,w4
+ mov v17.d[1],v19.d[0]
+ bic w15,w6,w4
+ eor w11,w4,w4,ror#5
+ add w8,w8,w14
+ add v2.4s,v2.4s,v17.4s
+ orr w12,w12,w15
+ eor w11,w11,w4,ror#19
+ eor w15,w8,w8,ror#11
+ add v4.4s,v4.4s,v2.4s
+ add w7,w7,w12
+ ror w11,w11,#6
+ eor w14,w8,w9
+ eor w15,w15,w8,ror#20
+ add w7,w7,w11
+ ldr w12,[sp,#48]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w3,w3,w7
+ eor w13,w13,w9
+ st1 {v4.4s},[x17], #16
+ ext v4.16b,v3.16b,v0.16b,#4
+ add w6,w6,w12
+ add w7,w7,w15
+ and w12,w4,w3
+ bic w15,w5,w3
+ ext v7.16b,v1.16b,v2.16b,#4
+ eor w11,w3,w3,ror#5
+ add w7,w7,w13
+ mov d19,v2.d[1]
+ orr w12,w12,w15
+ eor w11,w11,w3,ror#19
+ ushr v6.4s,v4.4s,#7
+ eor w15,w7,w7,ror#11
+ ushr v5.4s,v4.4s,#3
+ add w6,w6,w12
+ add v3.4s,v3.4s,v7.4s
+ ror w11,w11,#6
+ sli v6.4s,v4.4s,#25
+ eor w13,w7,w8
+ eor w15,w15,w7,ror#20
+ ushr v7.4s,v4.4s,#18
+ add w6,w6,w11
+ ldr w12,[sp,#52]
+ and w14,w14,w13
+ eor v5.16b,v5.16b,v6.16b
+ ror w15,w15,#2
+ add w10,w10,w6
+ sli v7.4s,v4.4s,#14
+ eor w14,w14,w8
+ ushr v16.4s,v19.4s,#17
+ add w5,w5,w12
+ add w6,w6,w15
+ and w12,w3,w10
+ eor v5.16b,v5.16b,v7.16b
+ bic w15,w4,w10
+ eor w11,w10,w10,ror#5
+ sli v16.4s,v19.4s,#15
+ add w6,w6,w14
+ orr w12,w12,w15
+ ushr v17.4s,v19.4s,#10
+ eor w11,w11,w10,ror#19
+ eor w15,w6,w6,ror#11
+ ushr v7.4s,v19.4s,#19
+ add w5,w5,w12
+ ror w11,w11,#6
+ add v3.4s,v3.4s,v5.4s
+ eor w14,w6,w7
+ eor w15,w15,w6,ror#20
+ sli v7.4s,v19.4s,#13
+ add w5,w5,w11
+ ldr w12,[sp,#56]
+ and w13,w13,w14
+ eor v17.16b,v17.16b,v16.16b
+ ror w15,w15,#2
+ add w9,w9,w5
+ eor w13,w13,w7
+ eor v17.16b,v17.16b,v7.16b
+ add w4,w4,w12
+ add w5,w5,w15
+ and w12,w10,w9
+ add v3.4s,v3.4s,v17.4s
+ bic w15,w3,w9
+ eor w11,w9,w9,ror#5
+ add w5,w5,w13
+ ushr v18.4s,v3.4s,#17
+ orr w12,w12,w15
+ ushr v19.4s,v3.4s,#10
+ eor w11,w11,w9,ror#19
+ eor w15,w5,w5,ror#11
+ sli v18.4s,v3.4s,#15
+ add w4,w4,w12
+ ushr v17.4s,v3.4s,#19
+ ror w11,w11,#6
+ eor w13,w5,w6
+ eor v19.16b,v19.16b,v18.16b
+ eor w15,w15,w5,ror#20
+ add w4,w4,w11
+ sli v17.4s,v3.4s,#13
+ ldr w12,[sp,#60]
+ and w14,w14,w13
+ ror w15,w15,#2
+ ld1 {v4.4s},[x16], #16
+ add w8,w8,w4
+ eor v19.16b,v19.16b,v17.16b
+ eor w14,w14,w6
+ eor v17.16b,v17.16b,v17.16b
+ add w3,w3,w12
+ add w4,w4,w15
+ and w12,w9,w8
+ mov v17.d[1],v19.d[0]
+ bic w15,w10,w8
+ eor w11,w8,w8,ror#5
+ add w4,w4,w14
+ add v3.4s,v3.4s,v17.4s
+ orr w12,w12,w15
+ eor w11,w11,w8,ror#19
+ eor w15,w4,w4,ror#11
+ add v4.4s,v4.4s,v3.4s
+ add w3,w3,w12
+ ror w11,w11,#6
+ eor w14,w4,w5
+ eor w15,w15,w4,ror#20
+ add w3,w3,w11
+ ldr w12,[x16]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w7,w7,w3
+ eor w13,w13,w5
+ st1 {v4.4s},[x17], #16
+ cmp w12,#0 // check for K256 terminator
+ ldr w12,[sp,#0]
+ sub x17,x17,#64
+ bne .L_00_48
+
+ sub x16,x16,#256 // rewind x16
+ cmp x1,x2
+ mov x17, #64
+ csel x17, x17, xzr, eq
+ sub x1,x1,x17 // avoid SEGV
+ mov x17,sp
+ add w10,w10,w12
+ add w3,w3,w15
+ and w12,w8,w7
+ ld1 {v0.16b},[x1],#16
+ bic w15,w9,w7
+ eor w11,w7,w7,ror#5
+ ld1 {v4.4s},[x16],#16
+ add w3,w3,w13
+ orr w12,w12,w15
+ eor w11,w11,w7,ror#19
+ eor w15,w3,w3,ror#11
+ rev32 v0.16b,v0.16b
+ add w10,w10,w12
+ ror w11,w11,#6
+ eor w13,w3,w4
+ eor w15,w15,w3,ror#20
+ add v4.4s,v4.4s,v0.4s
+ add w10,w10,w11
+ ldr w12,[sp,#4]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w6,w6,w10
+ eor w14,w14,w4
+ add w9,w9,w12
+ add w10,w10,w15
+ and w12,w7,w6
+ bic w15,w8,w6
+ eor w11,w6,w6,ror#5
+ add w10,w10,w14
+ orr w12,w12,w15
+ eor w11,w11,w6,ror#19
+ eor w15,w10,w10,ror#11
+ add w9,w9,w12
+ ror w11,w11,#6
+ eor w14,w10,w3
+ eor w15,w15,w10,ror#20
+ add w9,w9,w11
+ ldr w12,[sp,#8]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w5,w5,w9
+ eor w13,w13,w3
+ add w8,w8,w12
+ add w9,w9,w15
+ and w12,w6,w5
+ bic w15,w7,w5
+ eor w11,w5,w5,ror#5
+ add w9,w9,w13
+ orr w12,w12,w15
+ eor w11,w11,w5,ror#19
+ eor w15,w9,w9,ror#11
+ add w8,w8,w12
+ ror w11,w11,#6
+ eor w13,w9,w10
+ eor w15,w15,w9,ror#20
+ add w8,w8,w11
+ ldr w12,[sp,#12]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w4,w4,w8
+ eor w14,w14,w10
+ add w7,w7,w12
+ add w8,w8,w15
+ and w12,w5,w4
+ bic w15,w6,w4
+ eor w11,w4,w4,ror#5
+ add w8,w8,w14
+ orr w12,w12,w15
+ eor w11,w11,w4,ror#19
+ eor w15,w8,w8,ror#11
+ add w7,w7,w12
+ ror w11,w11,#6
+ eor w14,w8,w9
+ eor w15,w15,w8,ror#20
+ add w7,w7,w11
+ ldr w12,[sp,#16]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w3,w3,w7
+ eor w13,w13,w9
+ st1 {v4.4s},[x17], #16
+ add w6,w6,w12
+ add w7,w7,w15
+ and w12,w4,w3
+ ld1 {v1.16b},[x1],#16
+ bic w15,w5,w3
+ eor w11,w3,w3,ror#5
+ ld1 {v4.4s},[x16],#16
+ add w7,w7,w13
+ orr w12,w12,w15
+ eor w11,w11,w3,ror#19
+ eor w15,w7,w7,ror#11
+ rev32 v1.16b,v1.16b
+ add w6,w6,w12
+ ror w11,w11,#6
+ eor w13,w7,w8
+ eor w15,w15,w7,ror#20
+ add v4.4s,v4.4s,v1.4s
+ add w6,w6,w11
+ ldr w12,[sp,#20]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w10,w10,w6
+ eor w14,w14,w8
+ add w5,w5,w12
+ add w6,w6,w15
+ and w12,w3,w10
+ bic w15,w4,w10
+ eor w11,w10,w10,ror#5
+ add w6,w6,w14
+ orr w12,w12,w15
+ eor w11,w11,w10,ror#19
+ eor w15,w6,w6,ror#11
+ add w5,w5,w12
+ ror w11,w11,#6
+ eor w14,w6,w7
+ eor w15,w15,w6,ror#20
+ add w5,w5,w11
+ ldr w12,[sp,#24]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w9,w9,w5
+ eor w13,w13,w7
+ add w4,w4,w12
+ add w5,w5,w15
+ and w12,w10,w9
+ bic w15,w3,w9
+ eor w11,w9,w9,ror#5
+ add w5,w5,w13
+ orr w12,w12,w15
+ eor w11,w11,w9,ror#19
+ eor w15,w5,w5,ror#11
+ add w4,w4,w12
+ ror w11,w11,#6
+ eor w13,w5,w6
+ eor w15,w15,w5,ror#20
+ add w4,w4,w11
+ ldr w12,[sp,#28]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w8,w8,w4
+ eor w14,w14,w6
+ add w3,w3,w12
+ add w4,w4,w15
+ and w12,w9,w8
+ bic w15,w10,w8
+ eor w11,w8,w8,ror#5
+ add w4,w4,w14
+ orr w12,w12,w15
+ eor w11,w11,w8,ror#19
+ eor w15,w4,w4,ror#11
+ add w3,w3,w12
+ ror w11,w11,#6
+ eor w14,w4,w5
+ eor w15,w15,w4,ror#20
+ add w3,w3,w11
+ ldr w12,[sp,#32]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w7,w7,w3
+ eor w13,w13,w5
+ st1 {v4.4s},[x17], #16
+ add w10,w10,w12
+ add w3,w3,w15
+ and w12,w8,w7
+ ld1 {v2.16b},[x1],#16
+ bic w15,w9,w7
+ eor w11,w7,w7,ror#5
+ ld1 {v4.4s},[x16],#16
+ add w3,w3,w13
+ orr w12,w12,w15
+ eor w11,w11,w7,ror#19
+ eor w15,w3,w3,ror#11
+ rev32 v2.16b,v2.16b
+ add w10,w10,w12
+ ror w11,w11,#6
+ eor w13,w3,w4
+ eor w15,w15,w3,ror#20
+ add v4.4s,v4.4s,v2.4s
+ add w10,w10,w11
+ ldr w12,[sp,#36]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w6,w6,w10
+ eor w14,w14,w4
+ add w9,w9,w12
+ add w10,w10,w15
+ and w12,w7,w6
+ bic w15,w8,w6
+ eor w11,w6,w6,ror#5
+ add w10,w10,w14
+ orr w12,w12,w15
+ eor w11,w11,w6,ror#19
+ eor w15,w10,w10,ror#11
+ add w9,w9,w12
+ ror w11,w11,#6
+ eor w14,w10,w3
+ eor w15,w15,w10,ror#20
+ add w9,w9,w11
+ ldr w12,[sp,#40]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w5,w5,w9
+ eor w13,w13,w3
+ add w8,w8,w12
+ add w9,w9,w15
+ and w12,w6,w5
+ bic w15,w7,w5
+ eor w11,w5,w5,ror#5
+ add w9,w9,w13
+ orr w12,w12,w15
+ eor w11,w11,w5,ror#19
+ eor w15,w9,w9,ror#11
+ add w8,w8,w12
+ ror w11,w11,#6
+ eor w13,w9,w10
+ eor w15,w15,w9,ror#20
+ add w8,w8,w11
+ ldr w12,[sp,#44]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w4,w4,w8
+ eor w14,w14,w10
+ add w7,w7,w12
+ add w8,w8,w15
+ and w12,w5,w4
+ bic w15,w6,w4
+ eor w11,w4,w4,ror#5
+ add w8,w8,w14
+ orr w12,w12,w15
+ eor w11,w11,w4,ror#19
+ eor w15,w8,w8,ror#11
+ add w7,w7,w12
+ ror w11,w11,#6
+ eor w14,w8,w9
+ eor w15,w15,w8,ror#20
+ add w7,w7,w11
+ ldr w12,[sp,#48]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w3,w3,w7
+ eor w13,w13,w9
+ st1 {v4.4s},[x17], #16
+ add w6,w6,w12
+ add w7,w7,w15
+ and w12,w4,w3
+ ld1 {v3.16b},[x1],#16
+ bic w15,w5,w3
+ eor w11,w3,w3,ror#5
+ ld1 {v4.4s},[x16],#16
+ add w7,w7,w13
+ orr w12,w12,w15
+ eor w11,w11,w3,ror#19
+ eor w15,w7,w7,ror#11
+ rev32 v3.16b,v3.16b
+ add w6,w6,w12
+ ror w11,w11,#6
+ eor w13,w7,w8
+ eor w15,w15,w7,ror#20
+ add v4.4s,v4.4s,v3.4s
+ add w6,w6,w11
+ ldr w12,[sp,#52]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w10,w10,w6
+ eor w14,w14,w8
+ add w5,w5,w12
+ add w6,w6,w15
+ and w12,w3,w10
+ bic w15,w4,w10
+ eor w11,w10,w10,ror#5
+ add w6,w6,w14
+ orr w12,w12,w15
+ eor w11,w11,w10,ror#19
+ eor w15,w6,w6,ror#11
+ add w5,w5,w12
+ ror w11,w11,#6
+ eor w14,w6,w7
+ eor w15,w15,w6,ror#20
+ add w5,w5,w11
+ ldr w12,[sp,#56]
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w9,w9,w5
+ eor w13,w13,w7
+ add w4,w4,w12
+ add w5,w5,w15
+ and w12,w10,w9
+ bic w15,w3,w9
+ eor w11,w9,w9,ror#5
+ add w5,w5,w13
+ orr w12,w12,w15
+ eor w11,w11,w9,ror#19
+ eor w15,w5,w5,ror#11
+ add w4,w4,w12
+ ror w11,w11,#6
+ eor w13,w5,w6
+ eor w15,w15,w5,ror#20
+ add w4,w4,w11
+ ldr w12,[sp,#60]
+ and w14,w14,w13
+ ror w15,w15,#2
+ add w8,w8,w4
+ eor w14,w14,w6
+ add w3,w3,w12
+ add w4,w4,w15
+ and w12,w9,w8
+ bic w15,w10,w8
+ eor w11,w8,w8,ror#5
+ add w4,w4,w14
+ orr w12,w12,w15
+ eor w11,w11,w8,ror#19
+ eor w15,w4,w4,ror#11
+ add w3,w3,w12
+ ror w11,w11,#6
+ eor w14,w4,w5
+ eor w15,w15,w4,ror#20
+ add w3,w3,w11
+ and w13,w13,w14
+ ror w15,w15,#2
+ add w7,w7,w3
+ eor w13,w13,w5
+ st1 {v4.4s},[x17], #16
+ add w3,w3,w15 // h+=Sigma0(a) from the past
+ ldp w11,w12,[x0,#0]
+ add w3,w3,w13 // h+=Maj(a,b,c) from the past
+ ldp w13,w14,[x0,#8]
+ add w3,w3,w11 // accumulate
+ add w4,w4,w12
+ ldp w11,w12,[x0,#16]
+ add w5,w5,w13
+ add w6,w6,w14
+ ldp w13,w14,[x0,#24]
+ add w7,w7,w11
+ add w8,w8,w12
+ ldr w12,[sp,#0]
+ stp w3,w4,[x0,#0]
+ add w9,w9,w13
+ mov w13,wzr
+ stp w5,w6,[x0,#8]
+ add w10,w10,w14
+ stp w7,w8,[x0,#16]
+ eor w14,w4,w5
+ stp w9,w10,[x0,#24]
+ mov w15,wzr
+ mov x17,sp
+ b.ne .L_00_48
+
+ ldr x29,[x29]
+ add sp,sp,#16*4+16
+ ret
+.size sha256_block_neon,.-sha256_block_neon
+#ifndef __KERNEL__
+.comm OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
new file mode 100644
index 000000000000..a2226f841960
--- /dev/null
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -0,0 +1,185 @@
+/*
+ * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
+ *
+ * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/sha256_base.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
+MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
+
+asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
+ unsigned int num_blks);
+
+asmlinkage void sha256_block_neon(u32 *digest, const void *data,
+ unsigned int num_blks);
+
+static int sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order);
+}
+
+static int sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (len)
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc,
+ (sha256_block_fn *)sha256_block_data_order);
+
+ return sha256_base_finish(desc, out);
+}
+
+static int sha256_final(struct shash_desc *desc, u8 *out)
+{
+ return sha256_finup(desc, NULL, 0, out);
+}
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = sha256_base_init,
+ .update = sha256_update,
+ .final = sha256_final,
+ .finup = sha256_finup,
+ .descsize = sizeof(struct sha256_state),
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-arm64",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = sha224_base_init,
+ .update = sha256_update,
+ .final = sha256_final,
+ .finup = sha256_finup,
+ .descsize = sizeof(struct sha256_state),
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-arm64",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
+
+static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ /*
+ * Stacking and unstacking a substantial slice of the NEON register
+ * file may significantly affect performance for small updates when
+ * executing in interrupt context, so fall back to the scalar code
+ * in that case.
+ */
+ if (!may_use_simd())
+ return sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order);
+
+ kernel_neon_begin();
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_neon);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!may_use_simd()) {
+ if (len)
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc,
+ (sha256_block_fn *)sha256_block_data_order);
+ } else {
+ kernel_neon_begin();
+ if (len)
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_finalize(desc,
+ (sha256_block_fn *)sha256_block_neon);
+ kernel_neon_end();
+ }
+ return sha256_base_finish(desc, out);
+}
+
+static int sha256_final_neon(struct shash_desc *desc, u8 *out)
+{
+ return sha256_finup_neon(desc, NULL, 0, out);
+}
+
+static struct shash_alg neon_algs[] = { {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = sha256_base_init,
+ .update = sha256_update_neon,
+ .final = sha256_final_neon,
+ .finup = sha256_finup_neon,
+ .descsize = sizeof(struct sha256_state),
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-arm64-neon",
+ .base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = sha224_base_init,
+ .update = sha256_update_neon,
+ .final = sha256_final_neon,
+ .finup = sha256_finup_neon,
+ .descsize = sizeof(struct sha256_state),
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-arm64-neon",
+ .base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
+
+static int __init sha256_mod_init(void)
+{
+ int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+ if (ret)
+ return ret;
+
+ if (elf_hwcap & HWCAP_ASIMD) {
+ ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
+ if (ret)
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ }
+ return ret;
+}
+
+static void __exit sha256_mod_fini(void)
+{
+ if (elf_hwcap & HWCAP_ASIMD)
+ crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_init(sha256_mod_init);
+module_exit(sha256_mod_fini);
diff --git a/arch/arm64/crypto/sha512-armv8.pl b/arch/arm64/crypto/sha512-armv8.pl
new file mode 100644
index 000000000000..c55efb308544
--- /dev/null
+++ b/arch/arm64/crypto/sha512-armv8.pl
@@ -0,0 +1,778 @@
+#! /usr/bin/env perl
+# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Permission to use under GPLv2 terms is granted.
+# ====================================================================
+#
+# SHA256/512 for ARMv8.
+#
+# Performance in cycles per processed byte and improvement coefficient
+# over code generated with "default" compiler:
+#
+# SHA256-hw SHA256(*) SHA512
+# Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
+# Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
+# Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
+# Denver 2.01 10.5 (+26%) 6.70 (+8%)
+# X-Gene 20.0 (+100%) 12.8 (+300%(***))
+# Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
+#
+# (*) Software SHA256 results are of lesser relevance, presented
+# mostly for informational purposes.
+# (**) The result is a trade-off: it's possible to improve it by
+# 10% (or by 1 cycle per round), but at the cost of 20% loss
+# on Cortex-A53 (or by 4 cycles per round).
+# (***) Super-impressive coefficients over gcc-generated code are
+# indication of some compiler "pathology", most notably code
+# generated with -mgeneral-regs-only is significanty faster
+# and the gap is only 40-90%.
+#
+# October 2016.
+#
+# Originally it was reckoned that it makes no sense to implement NEON
+# version of SHA256 for 64-bit processors. This is because performance
+# improvement on most wide-spread Cortex-A5x processors was observed
+# to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+# observed that 32-bit NEON SHA256 performs significantly better than
+# 64-bit scalar version on *some* of the more recent processors. As
+# result 64-bit NEON version of SHA256 was added to provide best
+# all-round performance. For example it executes ~30% faster on X-Gene
+# and Mongoose. [For reference, NEON version of SHA512 is bound to
+# deliver much less improvement, likely *negative* on Cortex-A5x.
+# Which is why NEON support is limited to SHA256.]
+
+$output=pop;
+$flavour=pop;
+
+if ($flavour && $flavour ne "void") {
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+ die "can't locate arm-xlate.pl";
+
+ open OUT,"| \"$^X\" $xlate $flavour $output";
+ *STDOUT=*OUT;
+} else {
+ open STDOUT,">$output";
+}
+
+if ($output =~ /512/) {
+ $BITS=512;
+ $SZ=8;
+ @Sigma0=(28,34,39);
+ @Sigma1=(14,18,41);
+ @sigma0=(1, 8, 7);
+ @sigma1=(19,61, 6);
+ $rounds=80;
+ $reg_t="x";
+} else {
+ $BITS=256;
+ $SZ=4;
+ @Sigma0=( 2,13,22);
+ @Sigma1=( 6,11,25);
+ @sigma0=( 7,18, 3);
+ @sigma1=(17,19,10);
+ $rounds=64;
+ $reg_t="w";
+}
+
+$func="sha${BITS}_block_data_order";
+
+($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
+
+@X=map("$reg_t$_",(3..15,0..2));
+@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
+($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
+
+sub BODY_00_xx {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my $j=($i+1)&15;
+my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
+ $T0=@X[$i+3] if ($i<11);
+
+$code.=<<___ if ($i<16);
+#ifndef __AARCH64EB__
+ rev @X[$i],@X[$i] // $i
+#endif
+___
+$code.=<<___ if ($i<13 && ($i&1));
+ ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ
+___
+$code.=<<___ if ($i==13);
+ ldp @X[14],@X[15],[$inp]
+___
+$code.=<<___ if ($i>=14);
+ ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
+___
+$code.=<<___ if ($i>0 && $i<16);
+ add $a,$a,$t1 // h+=Sigma0(a)
+___
+$code.=<<___ if ($i>=11);
+ str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
+___
+# While ARMv8 specifies merged rotate-n-logical operation such as
+# 'eor x,y,z,ror#n', it was found to negatively affect performance
+# on Apple A7. The reason seems to be that it requires even 'y' to
+# be available earlier. This means that such merged instruction is
+# not necessarily best choice on critical path... On the other hand
+# Cortex-A5x handles merged instructions much better than disjoint
+# rotate and logical... See (**) footnote above.
+$code.=<<___ if ($i<15);
+ ror $t0,$e,#$Sigma1[0]
+ add $h,$h,$t2 // h+=K[i]
+ eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
+ and $t1,$f,$e
+ bic $t2,$g,$e
+ add $h,$h,@X[$i&15] // h+=X[i]
+ orr $t1,$t1,$t2 // Ch(e,f,g)
+ eor $t2,$a,$b // a^b, b^c in next round
+ eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e)
+ ror $T0,$a,#$Sigma0[0]
+ add $h,$h,$t1 // h+=Ch(e,f,g)
+ eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
+ add $h,$h,$t0 // h+=Sigma1(e)
+ and $t3,$t3,$t2 // (b^c)&=(a^b)
+ add $d,$d,$h // d+=h
+ eor $t3,$t3,$b // Maj(a,b,c)
+ eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a)
+ add $h,$h,$t3 // h+=Maj(a,b,c)
+ ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
+ //add $h,$h,$t1 // h+=Sigma0(a)
+___
+$code.=<<___ if ($i>=15);
+ ror $t0,$e,#$Sigma1[0]
+ add $h,$h,$t2 // h+=K[i]
+ ror $T1,@X[($j+1)&15],#$sigma0[0]
+ and $t1,$f,$e
+ ror $T2,@X[($j+14)&15],#$sigma1[0]
+ bic $t2,$g,$e
+ ror $T0,$a,#$Sigma0[0]
+ add $h,$h,@X[$i&15] // h+=X[i]
+ eor $t0,$t0,$e,ror#$Sigma1[1]
+ eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
+ orr $t1,$t1,$t2 // Ch(e,f,g)
+ eor $t2,$a,$b // a^b, b^c in next round
+ eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e)
+ eor $T0,$T0,$a,ror#$Sigma0[1]
+ add $h,$h,$t1 // h+=Ch(e,f,g)
+ and $t3,$t3,$t2 // (b^c)&=(a^b)
+ eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
+ eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1])
+ add $h,$h,$t0 // h+=Sigma1(e)
+ eor $t3,$t3,$b // Maj(a,b,c)
+ eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a)
+ eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14])
+ add @X[$j],@X[$j],@X[($j+9)&15]
+ add $d,$d,$h // d+=h
+ add $h,$h,$t3 // h+=Maj(a,b,c)
+ ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
+ add @X[$j],@X[$j],$T1
+ add $h,$h,$t1 // h+=Sigma0(a)
+ add @X[$j],@X[$j],$T2
+___
+ ($t2,$t3)=($t3,$t2);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern OPENSSL_armcap_P
+.globl $func
+.type $func,%function
+.align 6
+$func:
+___
+$code.=<<___ if ($SZ==4);
+#ifndef __KERNEL__
+# ifdef __ILP32__
+ ldrsw x16,.LOPENSSL_armcap_P
+# else
+ ldr x16,.LOPENSSL_armcap_P
+# endif
+ adr x17,.LOPENSSL_armcap_P
+ add x16,x16,x17
+ ldr w16,[x16]
+ tst w16,#ARMV8_SHA256
+ b.ne .Lv8_entry
+ tst w16,#ARMV7_NEON
+ b.ne .Lneon_entry
+#endif
+___
+$code.=<<___;
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#4*$SZ
+
+ ldp $A,$B,[$ctx] // load context
+ ldp $C,$D,[$ctx,#2*$SZ]
+ ldp $E,$F,[$ctx,#4*$SZ]
+ add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input
+ ldp $G,$H,[$ctx,#6*$SZ]
+ adr $Ktbl,.LK$BITS
+ stp $ctx,$num,[x29,#96]
+
+.Loop:
+ ldp @X[0],@X[1],[$inp],#2*$SZ
+ ldr $t2,[$Ktbl],#$SZ // *K++
+ eor $t3,$B,$C // magic seed
+ str $inp,[x29,#112]
+___
+for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=".Loop_16_xx:\n";
+for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ cbnz $t2,.Loop_16_xx
+
+ ldp $ctx,$num,[x29,#96]
+ ldr $inp,[x29,#112]
+ sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind
+
+ ldp @X[0],@X[1],[$ctx]
+ ldp @X[2],@X[3],[$ctx,#2*$SZ]
+ add $inp,$inp,#14*$SZ // advance input pointer
+ ldp @X[4],@X[5],[$ctx,#4*$SZ]
+ add $A,$A,@X[0]
+ ldp @X[6],@X[7],[$ctx,#6*$SZ]
+ add $B,$B,@X[1]
+ add $C,$C,@X[2]
+ add $D,$D,@X[3]
+ stp $A,$B,[$ctx]
+ add $E,$E,@X[4]
+ add $F,$F,@X[5]
+ stp $C,$D,[$ctx,#2*$SZ]
+ add $G,$G,@X[6]
+ add $H,$H,@X[7]
+ cmp $inp,$num
+ stp $E,$F,[$ctx,#4*$SZ]
+ stp $G,$H,[$ctx,#6*$SZ]
+ b.ne .Loop
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#4*$SZ
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#128
+ ret
+.size $func,.-$func
+
+.align 6
+.type .LK$BITS,%object
+.LK$BITS:
+___
+$code.=<<___ if ($SZ==8);
+ .quad 0x428a2f98d728ae22,0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+ .quad 0x3956c25bf348b538,0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+ .quad 0xd807aa98a3030242,0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+ .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235,0xc19bf174cf692694
+ .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+ .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ .quad 0x983e5152ee66dfab,0xa831c66d2db43210
+ .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ .quad 0x06ca6351e003826f,0x142929670a0e6e70
+ .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+ .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+ .quad 0x81c2c92e47edaee6,0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+ .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218,0xd69906245565a910
+ .quad 0xf40e35855771202a,0x106aa07032bbd1b8
+ .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+ .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+ .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+ .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ .quad 0x90befffa23631e28,0xa4506cebde82bde9
+ .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ .quad 0xca273eceea26619c,0xd186b8c721c0c207
+ .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae,0x1b710b35131c471b
+ .quad 0x28db77f523047d84,0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+ .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+ .quad 0 // terminator
+___
+$code.=<<___ if ($SZ==4);
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+ .long 0 //terminator
+___
+$code.=<<___;
+.size .LK$BITS,.-.LK$BITS
+#ifndef __KERNEL__
+.align 3
+.LOPENSSL_armcap_P:
+# ifdef __ILP32__
+ .long OPENSSL_armcap_P-.
+# else
+ .quad OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+if ($SZ==4) {
+my $Ktbl="x3";
+
+my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
+my @MSG=map("v$_.16b",(4..7));
+my ($W0,$W1)=("v16.4s","v17.4s");
+my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
+
+$code.=<<___;
+#ifndef __KERNEL__
+.type sha256_block_armv8,%function
+.align 6
+sha256_block_armv8:
+.Lv8_entry:
+ stp x29,x30,[sp,#-16]!
+ add x29,sp,#0
+
+ ld1.32 {$ABCD,$EFGH},[$ctx]
+ adr $Ktbl,.LK256
+
+.Loop_hw:
+ ld1 {@MSG[0]-@MSG[3]},[$inp],#64
+ sub $num,$num,#1
+ ld1.32 {$W0},[$Ktbl],#16
+ rev32 @MSG[0],@MSG[0]
+ rev32 @MSG[1],@MSG[1]
+ rev32 @MSG[2],@MSG[2]
+ rev32 @MSG[3],@MSG[3]
+ orr $ABCD_SAVE,$ABCD,$ABCD // offload
+ orr $EFGH_SAVE,$EFGH,$EFGH
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+ ld1.32 {$W1},[$Ktbl],#16
+ add.i32 $W0,$W0,@MSG[0]
+ sha256su0 @MSG[0],@MSG[1]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+ sha256su1 @MSG[0],@MSG[2],@MSG[3]
+___
+ ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ ld1.32 {$W1},[$Ktbl],#16
+ add.i32 $W0,$W0,@MSG[0]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ ld1.32 {$W0},[$Ktbl],#16
+ add.i32 $W1,$W1,@MSG[1]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ ld1.32 {$W1},[$Ktbl]
+ add.i32 $W0,$W0,@MSG[2]
+ sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ add.i32 $W1,$W1,@MSG[3]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ add.i32 $ABCD,$ABCD,$ABCD_SAVE
+ add.i32 $EFGH,$EFGH,$EFGH_SAVE
+
+ cbnz $num,.Loop_hw
+
+ st1.32 {$ABCD,$EFGH},[$ctx]
+
+ ldr x29,[sp],#16
+ ret
+.size sha256_block_armv8,.-sha256_block_armv8
+#endif
+___
+}
+
+if ($SZ==4) { ######################################### NEON stuff #
+# You'll surely note a lot of similarities with sha256-armv4 module,
+# and of course it's not a coincidence. sha256-armv4 was used as
+# initial template, but was adapted for ARMv8 instruction set and
+# extensively re-tuned for all-round performance.
+
+my @V = ($A,$B,$C,$D,$E,$F,$G,$H) = map("w$_",(3..10));
+my ($t0,$t1,$t2,$t3,$t4) = map("w$_",(11..15));
+my $Ktbl="x16";
+my $Xfer="x17";
+my @X = map("q$_",(0..3));
+my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19));
+my $j=0;
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+ my $arg = pop;
+ $arg = "#$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub Dscalar { shift =~ m|[qv]([0-9]+)|?"d$1":""; }
+sub Dlo { shift =~ m|[qv]([0-9]+)|?"v$1.d[0]":""; }
+sub Dhi { shift =~ m|[qv]([0-9]+)|?"v$1.d[1]":""; }
+
+sub Xupdate()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+ &ext_8 ($T0,@X[0],@X[1],4); # X[1..4]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ext_8 ($T3,@X[2],@X[3],4); # X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &mov (&Dscalar($T7),&Dhi(@X[3])); # X[14..15]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ushr_32 ($T2,$T0,$sigma0[0]);
+ eval(shift(@insns));
+ &ushr_32 ($T1,$T0,$sigma0[2]);
+ eval(shift(@insns));
+ &add_32 (@X[0],@X[0],$T3); # X[0..3] += X[9..12]
+ eval(shift(@insns));
+ &sli_32 ($T2,$T0,32-$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ushr_32 ($T3,$T0,$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &eor_8 ($T1,$T1,$T2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &sli_32 ($T3,$T0,32-$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ushr_32 ($T4,$T7,$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &eor_8 ($T1,$T1,$T3); # sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &sli_32 ($T4,$T7,32-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ushr_32 ($T5,$T7,$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ushr_32 ($T3,$T7,$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &add_32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &sli_u32 ($T3,$T7,32-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &eor_8 ($T5,$T5,$T4);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &eor_8 ($T5,$T5,$T3); # sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &add_32 (@X[0],@X[0],$T5); # X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ushr_32 ($T6,@X[0],$sigma1[0]);
+ eval(shift(@insns));
+ &ushr_32 ($T7,@X[0],$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &sli_32 ($T6,@X[0],32-$sigma1[0]);
+ eval(shift(@insns));
+ &ushr_32 ($T5,@X[0],$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &eor_8 ($T7,$T7,$T6);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &sli_32 ($T5,@X[0],32-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ld1_32 ("{$T0}","[$Ktbl], #16");
+ eval(shift(@insns));
+ &eor_8 ($T7,$T7,$T5); # sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &eor_8 ($T5,$T5,$T5);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &mov (&Dhi($T5), &Dlo($T7));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &add_32 (@X[0],@X[0],$T5); # X[2..3] += sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &add_32 ($T0,$T0,@X[0]);
+ while($#insns>=1) { eval(shift(@insns)); }
+ &st1_32 ("{$T0}","[$Xfer], #16");
+ eval(shift(@insns));
+
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xpreload()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ld1_8 ("{@X[0]}","[$inp],#16");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &ld1_32 ("{$T0}","[$Ktbl],#16");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &rev32 (@X[0],@X[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &add_32 ($T0,$T0,@X[0]);
+ foreach (@insns) { eval; } # remaining instructions
+ &st1_32 ("{$T0}","[$Xfer], #16");
+
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub body_00_15 () {
+ (
+ '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
+ '&add ($h,$h,$t1)', # h+=X[i]+K[i]
+ '&add ($a,$a,$t4);'. # h+=Sigma0(a) from the past
+ '&and ($t1,$f,$e)',
+ '&bic ($t4,$g,$e)',
+ '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
+ '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past
+ '&orr ($t1,$t1,$t4)', # Ch(e,f,g)
+ '&eor ($t0,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e)
+ '&eor ($t4,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
+ '&add ($h,$h,$t1)', # h+=Ch(e,f,g)
+ '&ror ($t0,$t0,"#$Sigma1[0]")',
+ '&eor ($t2,$a,$b)', # a^b, b^c in next round
+ '&eor ($t4,$t4,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a)
+ '&add ($h,$h,$t0)', # h+=Sigma1(e)
+ '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'.
+ '&ldr ($t1,"[$Ktbl]") if ($j==15);'.
+ '&and ($t3,$t3,$t2)', # (b^c)&=(a^b)
+ '&ror ($t4,$t4,"#$Sigma0[0]")',
+ '&add ($d,$d,$h)', # d+=h
+ '&eor ($t3,$t3,$b)', # Maj(a,b,c)
+ '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
+ )
+}
+
+$code.=<<___;
+#ifdef __KERNEL__
+.globl sha256_block_neon
+#endif
+.type sha256_block_neon,%function
+.align 4
+sha256_block_neon:
+.Lneon_entry:
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ sub sp,sp,#16*4
+
+ adr $Ktbl,.LK256
+ add $num,$inp,$num,lsl#6 // len to point at the end of inp
+
+ ld1.8 {@X[0]},[$inp], #16
+ ld1.8 {@X[1]},[$inp], #16
+ ld1.8 {@X[2]},[$inp], #16
+ ld1.8 {@X[3]},[$inp], #16
+ ld1.32 {$T0},[$Ktbl], #16
+ ld1.32 {$T1},[$Ktbl], #16
+ ld1.32 {$T2},[$Ktbl], #16
+ ld1.32 {$T3},[$Ktbl], #16
+ rev32 @X[0],@X[0] // yes, even on
+ rev32 @X[1],@X[1] // big-endian
+ rev32 @X[2],@X[2]
+ rev32 @X[3],@X[3]
+ mov $Xfer,sp
+ add.32 $T0,$T0,@X[0]
+ add.32 $T1,$T1,@X[1]
+ add.32 $T2,$T2,@X[2]
+ st1.32 {$T0-$T1},[$Xfer], #32
+ add.32 $T3,$T3,@X[3]
+ st1.32 {$T2-$T3},[$Xfer]
+ sub $Xfer,$Xfer,#32
+
+ ldp $A,$B,[$ctx]
+ ldp $C,$D,[$ctx,#8]
+ ldp $E,$F,[$ctx,#16]
+ ldp $G,$H,[$ctx,#24]
+ ldr $t1,[sp,#0]
+ mov $t2,wzr
+ eor $t3,$B,$C
+ mov $t4,wzr
+ b .L_00_48
+
+.align 4
+.L_00_48:
+___
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+$code.=<<___;
+ cmp $t1,#0 // check for K256 terminator
+ ldr $t1,[sp,#0]
+ sub $Xfer,$Xfer,#64
+ bne .L_00_48
+
+ sub $Ktbl,$Ktbl,#256 // rewind $Ktbl
+ cmp $inp,$num
+ mov $Xfer, #64
+ csel $Xfer, $Xfer, xzr, eq
+ sub $inp,$inp,$Xfer // avoid SEGV
+ mov $Xfer,sp
+___
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+$code.=<<___;
+ add $A,$A,$t4 // h+=Sigma0(a) from the past
+ ldp $t0,$t1,[$ctx,#0]
+ add $A,$A,$t2 // h+=Maj(a,b,c) from the past
+ ldp $t2,$t3,[$ctx,#8]
+ add $A,$A,$t0 // accumulate
+ add $B,$B,$t1
+ ldp $t0,$t1,[$ctx,#16]
+ add $C,$C,$t2
+ add $D,$D,$t3
+ ldp $t2,$t3,[$ctx,#24]
+ add $E,$E,$t0
+ add $F,$F,$t1
+ ldr $t1,[sp,#0]
+ stp $A,$B,[$ctx,#0]
+ add $G,$G,$t2
+ mov $t2,wzr
+ stp $C,$D,[$ctx,#8]
+ add $H,$H,$t3
+ stp $E,$F,[$ctx,#16]
+ eor $t3,$B,$C
+ stp $G,$H,[$ctx,#24]
+ mov $t4,wzr
+ mov $Xfer,sp
+ b.ne .L_00_48
+
+ ldr x29,[x29]
+ add sp,sp,#16*4+16
+ ret
+.size sha256_block_neon,.-sha256_block_neon
+___
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+.comm OPENSSL_armcap_P,4,4
+#endif
+___
+
+{ my %opcode = (
+ "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000,
+ "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 );
+
+ sub unsha256 {
+ my ($mnemonic,$arg)=@_;
+
+ $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
+ &&
+ sprintf ".inst\t0x%08x\t//%s %s",
+ $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
+ $mnemonic,$arg;
+ }
+}
+
+open SELF,$0;
+while(<SELF>) {
+ next if (/^#!/);
+ last if (!s/^#/\/\// and !/^$/);
+ print;
+}
+close SELF;
+
+foreach(split("\n",$code)) {
+
+ s/\`([^\`]*)\`/eval($1)/ge;
+
+ s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/ge;
+
+ s/\bq([0-9]+)\b/v$1.16b/g; # old->new registers
+
+ s/\.[ui]?8(\s)/$1/;
+ s/\.\w?32\b// and s/\.16b/\.4s/g;
+ m/(ld|st)1[^\[]+\[0\]/ and s/\.4s/\.s/g;
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/arch/arm64/crypto/sha512-core.S_shipped b/arch/arm64/crypto/sha512-core.S_shipped
new file mode 100644
index 000000000000..bd0f59f06c9d
--- /dev/null
+++ b/arch/arm64/crypto/sha512-core.S_shipped
@@ -0,0 +1,1085 @@
+// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License"). You may not use
+// this file except in compliance with the License. You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+
+// ====================================================================
+// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// project. The module is, however, dual licensed under OpenSSL and
+// CRYPTOGAMS licenses depending on where you obtain it. For further
+// details see http://www.openssl.org/~appro/cryptogams/.
+//
+// Permission to use under GPLv2 terms is granted.
+// ====================================================================
+//
+// SHA256/512 for ARMv8.
+//
+// Performance in cycles per processed byte and improvement coefficient
+// over code generated with "default" compiler:
+//
+// SHA256-hw SHA256(*) SHA512
+// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
+// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
+// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
+// Denver 2.01 10.5 (+26%) 6.70 (+8%)
+// X-Gene 20.0 (+100%) 12.8 (+300%(***))
+// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
+//
+// (*) Software SHA256 results are of lesser relevance, presented
+// mostly for informational purposes.
+// (**) The result is a trade-off: it's possible to improve it by
+// 10% (or by 1 cycle per round), but at the cost of 20% loss
+// on Cortex-A53 (or by 4 cycles per round).
+// (***) Super-impressive coefficients over gcc-generated code are
+// indication of some compiler "pathology", most notably code
+// generated with -mgeneral-regs-only is significanty faster
+// and the gap is only 40-90%.
+//
+// October 2016.
+//
+// Originally it was reckoned that it makes no sense to implement NEON
+// version of SHA256 for 64-bit processors. This is because performance
+// improvement on most wide-spread Cortex-A5x processors was observed
+// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+// observed that 32-bit NEON SHA256 performs significantly better than
+// 64-bit scalar version on *some* of the more recent processors. As
+// result 64-bit NEON version of SHA256 was added to provide best
+// all-round performance. For example it executes ~30% faster on X-Gene
+// and Mongoose. [For reference, NEON version of SHA512 is bound to
+// deliver much less improvement, likely *negative* on Cortex-A5x.
+// Which is why NEON support is limited to SHA256.]
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern OPENSSL_armcap_P
+.globl sha512_block_data_order
+.type sha512_block_data_order,%function
+.align 6
+sha512_block_data_order:
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#4*8
+
+ ldp x20,x21,[x0] // load context
+ ldp x22,x23,[x0,#2*8]
+ ldp x24,x25,[x0,#4*8]
+ add x2,x1,x2,lsl#7 // end of input
+ ldp x26,x27,[x0,#6*8]
+ adr x30,.LK512
+ stp x0,x2,[x29,#96]
+
+.Loop:
+ ldp x3,x4,[x1],#2*8
+ ldr x19,[x30],#8 // *K++
+ eor x28,x21,x22 // magic seed
+ str x1,[x29,#112]
+#ifndef __AARCH64EB__
+ rev x3,x3 // 0
+#endif
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ eor x6,x24,x24,ror#23
+ and x17,x25,x24
+ bic x19,x26,x24
+ add x27,x27,x3 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x6,ror#18 // Sigma1(e)
+ ror x6,x20,#28
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ eor x17,x20,x20,ror#5
+ add x27,x27,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x23,x23,x27 // d+=h
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x6,x17,ror#34 // Sigma0(a)
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x27,x27,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x4,x4 // 1
+#endif
+ ldp x5,x6,[x1],#2*8
+ add x27,x27,x17 // h+=Sigma0(a)
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ eor x7,x23,x23,ror#23
+ and x17,x24,x23
+ bic x28,x25,x23
+ add x26,x26,x4 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x7,ror#18 // Sigma1(e)
+ ror x7,x27,#28
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ eor x17,x27,x27,ror#5
+ add x26,x26,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x22,x22,x26 // d+=h
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x7,x17,ror#34 // Sigma0(a)
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x26,x26,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x5,x5 // 2
+#endif
+ add x26,x26,x17 // h+=Sigma0(a)
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ eor x8,x22,x22,ror#23
+ and x17,x23,x22
+ bic x19,x24,x22
+ add x25,x25,x5 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x8,ror#18 // Sigma1(e)
+ ror x8,x26,#28
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ eor x17,x26,x26,ror#5
+ add x25,x25,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x21,x21,x25 // d+=h
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x8,x17,ror#34 // Sigma0(a)
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x25,x25,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x6,x6 // 3
+#endif
+ ldp x7,x8,[x1],#2*8
+ add x25,x25,x17 // h+=Sigma0(a)
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ eor x9,x21,x21,ror#23
+ and x17,x22,x21
+ bic x28,x23,x21
+ add x24,x24,x6 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x9,ror#18 // Sigma1(e)
+ ror x9,x25,#28
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ eor x17,x25,x25,ror#5
+ add x24,x24,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x20,x20,x24 // d+=h
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x9,x17,ror#34 // Sigma0(a)
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x24,x24,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x7,x7 // 4
+#endif
+ add x24,x24,x17 // h+=Sigma0(a)
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ eor x10,x20,x20,ror#23
+ and x17,x21,x20
+ bic x19,x22,x20
+ add x23,x23,x7 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x10,ror#18 // Sigma1(e)
+ ror x10,x24,#28
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ eor x17,x24,x24,ror#5
+ add x23,x23,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x27,x27,x23 // d+=h
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x10,x17,ror#34 // Sigma0(a)
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x23,x23,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x8,x8 // 5
+#endif
+ ldp x9,x10,[x1],#2*8
+ add x23,x23,x17 // h+=Sigma0(a)
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ eor x11,x27,x27,ror#23
+ and x17,x20,x27
+ bic x28,x21,x27
+ add x22,x22,x8 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x11,ror#18 // Sigma1(e)
+ ror x11,x23,#28
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ eor x17,x23,x23,ror#5
+ add x22,x22,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x26,x26,x22 // d+=h
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x11,x17,ror#34 // Sigma0(a)
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x22,x22,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x9,x9 // 6
+#endif
+ add x22,x22,x17 // h+=Sigma0(a)
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ eor x12,x26,x26,ror#23
+ and x17,x27,x26
+ bic x19,x20,x26
+ add x21,x21,x9 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x12,ror#18 // Sigma1(e)
+ ror x12,x22,#28
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ eor x17,x22,x22,ror#5
+ add x21,x21,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x25,x25,x21 // d+=h
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x12,x17,ror#34 // Sigma0(a)
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x21,x21,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x10,x10 // 7
+#endif
+ ldp x11,x12,[x1],#2*8
+ add x21,x21,x17 // h+=Sigma0(a)
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ eor x13,x25,x25,ror#23
+ and x17,x26,x25
+ bic x28,x27,x25
+ add x20,x20,x10 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x13,ror#18 // Sigma1(e)
+ ror x13,x21,#28
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ eor x17,x21,x21,ror#5
+ add x20,x20,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x24,x24,x20 // d+=h
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x13,x17,ror#34 // Sigma0(a)
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x20,x20,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x11,x11 // 8
+#endif
+ add x20,x20,x17 // h+=Sigma0(a)
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ eor x14,x24,x24,ror#23
+ and x17,x25,x24
+ bic x19,x26,x24
+ add x27,x27,x11 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x14,ror#18 // Sigma1(e)
+ ror x14,x20,#28
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ eor x17,x20,x20,ror#5
+ add x27,x27,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x23,x23,x27 // d+=h
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x14,x17,ror#34 // Sigma0(a)
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x27,x27,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x12,x12 // 9
+#endif
+ ldp x13,x14,[x1],#2*8
+ add x27,x27,x17 // h+=Sigma0(a)
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ eor x15,x23,x23,ror#23
+ and x17,x24,x23
+ bic x28,x25,x23
+ add x26,x26,x12 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x15,ror#18 // Sigma1(e)
+ ror x15,x27,#28
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ eor x17,x27,x27,ror#5
+ add x26,x26,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x22,x22,x26 // d+=h
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x15,x17,ror#34 // Sigma0(a)
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x26,x26,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x13,x13 // 10
+#endif
+ add x26,x26,x17 // h+=Sigma0(a)
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ eor x0,x22,x22,ror#23
+ and x17,x23,x22
+ bic x19,x24,x22
+ add x25,x25,x13 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x0,ror#18 // Sigma1(e)
+ ror x0,x26,#28
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ eor x17,x26,x26,ror#5
+ add x25,x25,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x21,x21,x25 // d+=h
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x0,x17,ror#34 // Sigma0(a)
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x25,x25,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x14,x14 // 11
+#endif
+ ldp x15,x0,[x1],#2*8
+ add x25,x25,x17 // h+=Sigma0(a)
+ str x6,[sp,#24]
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ eor x6,x21,x21,ror#23
+ and x17,x22,x21
+ bic x28,x23,x21
+ add x24,x24,x14 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x6,ror#18 // Sigma1(e)
+ ror x6,x25,#28
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ eor x17,x25,x25,ror#5
+ add x24,x24,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x20,x20,x24 // d+=h
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x6,x17,ror#34 // Sigma0(a)
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x24,x24,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x15,x15 // 12
+#endif
+ add x24,x24,x17 // h+=Sigma0(a)
+ str x7,[sp,#0]
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ eor x7,x20,x20,ror#23
+ and x17,x21,x20
+ bic x19,x22,x20
+ add x23,x23,x15 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x7,ror#18 // Sigma1(e)
+ ror x7,x24,#28
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ eor x17,x24,x24,ror#5
+ add x23,x23,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x27,x27,x23 // d+=h
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x7,x17,ror#34 // Sigma0(a)
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x23,x23,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x0,x0 // 13
+#endif
+ ldp x1,x2,[x1]
+ add x23,x23,x17 // h+=Sigma0(a)
+ str x8,[sp,#8]
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ eor x8,x27,x27,ror#23
+ and x17,x20,x27
+ bic x28,x21,x27
+ add x22,x22,x0 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x8,ror#18 // Sigma1(e)
+ ror x8,x23,#28
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ eor x17,x23,x23,ror#5
+ add x22,x22,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x26,x26,x22 // d+=h
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x8,x17,ror#34 // Sigma0(a)
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x22,x22,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x1,x1 // 14
+#endif
+ ldr x6,[sp,#24]
+ add x22,x22,x17 // h+=Sigma0(a)
+ str x9,[sp,#16]
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ eor x9,x26,x26,ror#23
+ and x17,x27,x26
+ bic x19,x20,x26
+ add x21,x21,x1 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x9,ror#18 // Sigma1(e)
+ ror x9,x22,#28
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ eor x17,x22,x22,ror#5
+ add x21,x21,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x25,x25,x21 // d+=h
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x9,x17,ror#34 // Sigma0(a)
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x21,x21,x17 // h+=Sigma0(a)
+#ifndef __AARCH64EB__
+ rev x2,x2 // 15
+#endif
+ ldr x7,[sp,#0]
+ add x21,x21,x17 // h+=Sigma0(a)
+ str x10,[sp,#24]
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ ror x9,x4,#1
+ and x17,x26,x25
+ ror x8,x1,#19
+ bic x28,x27,x25
+ ror x10,x21,#28
+ add x20,x20,x2 // h+=X[i]
+ eor x16,x16,x25,ror#18
+ eor x9,x9,x4,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x25,ror#41 // Sigma1(e)
+ eor x10,x10,x21,ror#34
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x8,x8,x1,ror#61
+ eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
+ add x20,x20,x16 // h+=Sigma1(e)
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x10,x21,ror#39 // Sigma0(a)
+ eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
+ add x3,x3,x12
+ add x24,x24,x20 // d+=h
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x3,x3,x9
+ add x20,x20,x17 // h+=Sigma0(a)
+ add x3,x3,x8
+.Loop_16_xx:
+ ldr x8,[sp,#8]
+ str x11,[sp,#0]
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ ror x10,x5,#1
+ and x17,x25,x24
+ ror x9,x2,#19
+ bic x19,x26,x24
+ ror x11,x20,#28
+ add x27,x27,x3 // h+=X[i]
+ eor x16,x16,x24,ror#18
+ eor x10,x10,x5,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x24,ror#41 // Sigma1(e)
+ eor x11,x11,x20,ror#34
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x9,x9,x2,ror#61
+ eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
+ add x27,x27,x16 // h+=Sigma1(e)
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x11,x20,ror#39 // Sigma0(a)
+ eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
+ add x4,x4,x13
+ add x23,x23,x27 // d+=h
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x4,x4,x10
+ add x27,x27,x17 // h+=Sigma0(a)
+ add x4,x4,x9
+ ldr x9,[sp,#16]
+ str x12,[sp,#8]
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ ror x11,x6,#1
+ and x17,x24,x23
+ ror x10,x3,#19
+ bic x28,x25,x23
+ ror x12,x27,#28
+ add x26,x26,x4 // h+=X[i]
+ eor x16,x16,x23,ror#18
+ eor x11,x11,x6,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x23,ror#41 // Sigma1(e)
+ eor x12,x12,x27,ror#34
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x10,x10,x3,ror#61
+ eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
+ add x26,x26,x16 // h+=Sigma1(e)
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x12,x27,ror#39 // Sigma0(a)
+ eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
+ add x5,x5,x14
+ add x22,x22,x26 // d+=h
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x5,x5,x11
+ add x26,x26,x17 // h+=Sigma0(a)
+ add x5,x5,x10
+ ldr x10,[sp,#24]
+ str x13,[sp,#16]
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ ror x12,x7,#1
+ and x17,x23,x22
+ ror x11,x4,#19
+ bic x19,x24,x22
+ ror x13,x26,#28
+ add x25,x25,x5 // h+=X[i]
+ eor x16,x16,x22,ror#18
+ eor x12,x12,x7,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x22,ror#41 // Sigma1(e)
+ eor x13,x13,x26,ror#34
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x11,x11,x4,ror#61
+ eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
+ add x25,x25,x16 // h+=Sigma1(e)
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x13,x26,ror#39 // Sigma0(a)
+ eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
+ add x6,x6,x15
+ add x21,x21,x25 // d+=h
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x6,x6,x12
+ add x25,x25,x17 // h+=Sigma0(a)
+ add x6,x6,x11
+ ldr x11,[sp,#0]
+ str x14,[sp,#24]
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ ror x13,x8,#1
+ and x17,x22,x21
+ ror x12,x5,#19
+ bic x28,x23,x21
+ ror x14,x25,#28
+ add x24,x24,x6 // h+=X[i]
+ eor x16,x16,x21,ror#18
+ eor x13,x13,x8,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x21,ror#41 // Sigma1(e)
+ eor x14,x14,x25,ror#34
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x12,x12,x5,ror#61
+ eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
+ add x24,x24,x16 // h+=Sigma1(e)
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x14,x25,ror#39 // Sigma0(a)
+ eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
+ add x7,x7,x0
+ add x20,x20,x24 // d+=h
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x7,x7,x13
+ add x24,x24,x17 // h+=Sigma0(a)
+ add x7,x7,x12
+ ldr x12,[sp,#8]
+ str x15,[sp,#0]
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ ror x14,x9,#1
+ and x17,x21,x20
+ ror x13,x6,#19
+ bic x19,x22,x20
+ ror x15,x24,#28
+ add x23,x23,x7 // h+=X[i]
+ eor x16,x16,x20,ror#18
+ eor x14,x14,x9,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x20,ror#41 // Sigma1(e)
+ eor x15,x15,x24,ror#34
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x13,x13,x6,ror#61
+ eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
+ add x23,x23,x16 // h+=Sigma1(e)
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x15,x24,ror#39 // Sigma0(a)
+ eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
+ add x8,x8,x1
+ add x27,x27,x23 // d+=h
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x8,x8,x14
+ add x23,x23,x17 // h+=Sigma0(a)
+ add x8,x8,x13
+ ldr x13,[sp,#16]
+ str x0,[sp,#8]
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ ror x15,x10,#1
+ and x17,x20,x27
+ ror x14,x7,#19
+ bic x28,x21,x27
+ ror x0,x23,#28
+ add x22,x22,x8 // h+=X[i]
+ eor x16,x16,x27,ror#18
+ eor x15,x15,x10,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x27,ror#41 // Sigma1(e)
+ eor x0,x0,x23,ror#34
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x14,x14,x7,ror#61
+ eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
+ add x22,x22,x16 // h+=Sigma1(e)
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x0,x23,ror#39 // Sigma0(a)
+ eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
+ add x9,x9,x2
+ add x26,x26,x22 // d+=h
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x9,x9,x15
+ add x22,x22,x17 // h+=Sigma0(a)
+ add x9,x9,x14
+ ldr x14,[sp,#24]
+ str x1,[sp,#16]
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ ror x0,x11,#1
+ and x17,x27,x26
+ ror x15,x8,#19
+ bic x19,x20,x26
+ ror x1,x22,#28
+ add x21,x21,x9 // h+=X[i]
+ eor x16,x16,x26,ror#18
+ eor x0,x0,x11,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x26,ror#41 // Sigma1(e)
+ eor x1,x1,x22,ror#34
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x15,x15,x8,ror#61
+ eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
+ add x21,x21,x16 // h+=Sigma1(e)
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x1,x22,ror#39 // Sigma0(a)
+ eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
+ add x10,x10,x3
+ add x25,x25,x21 // d+=h
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x10,x10,x0
+ add x21,x21,x17 // h+=Sigma0(a)
+ add x10,x10,x15
+ ldr x15,[sp,#0]
+ str x2,[sp,#24]
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ ror x1,x12,#1
+ and x17,x26,x25
+ ror x0,x9,#19
+ bic x28,x27,x25
+ ror x2,x21,#28
+ add x20,x20,x10 // h+=X[i]
+ eor x16,x16,x25,ror#18
+ eor x1,x1,x12,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x25,ror#41 // Sigma1(e)
+ eor x2,x2,x21,ror#34
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x0,x0,x9,ror#61
+ eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
+ add x20,x20,x16 // h+=Sigma1(e)
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x2,x21,ror#39 // Sigma0(a)
+ eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
+ add x11,x11,x4
+ add x24,x24,x20 // d+=h
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x11,x11,x1
+ add x20,x20,x17 // h+=Sigma0(a)
+ add x11,x11,x0
+ ldr x0,[sp,#8]
+ str x3,[sp,#0]
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ ror x2,x13,#1
+ and x17,x25,x24
+ ror x1,x10,#19
+ bic x19,x26,x24
+ ror x3,x20,#28
+ add x27,x27,x11 // h+=X[i]
+ eor x16,x16,x24,ror#18
+ eor x2,x2,x13,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x24,ror#41 // Sigma1(e)
+ eor x3,x3,x20,ror#34
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x1,x1,x10,ror#61
+ eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
+ add x27,x27,x16 // h+=Sigma1(e)
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x3,x20,ror#39 // Sigma0(a)
+ eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
+ add x12,x12,x5
+ add x23,x23,x27 // d+=h
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x12,x12,x2
+ add x27,x27,x17 // h+=Sigma0(a)
+ add x12,x12,x1
+ ldr x1,[sp,#16]
+ str x4,[sp,#8]
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ ror x3,x14,#1
+ and x17,x24,x23
+ ror x2,x11,#19
+ bic x28,x25,x23
+ ror x4,x27,#28
+ add x26,x26,x12 // h+=X[i]
+ eor x16,x16,x23,ror#18
+ eor x3,x3,x14,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x23,ror#41 // Sigma1(e)
+ eor x4,x4,x27,ror#34
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x2,x2,x11,ror#61
+ eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
+ add x26,x26,x16 // h+=Sigma1(e)
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x4,x27,ror#39 // Sigma0(a)
+ eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
+ add x13,x13,x6
+ add x22,x22,x26 // d+=h
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x13,x13,x3
+ add x26,x26,x17 // h+=Sigma0(a)
+ add x13,x13,x2
+ ldr x2,[sp,#24]
+ str x5,[sp,#16]
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ ror x4,x15,#1
+ and x17,x23,x22
+ ror x3,x12,#19
+ bic x19,x24,x22
+ ror x5,x26,#28
+ add x25,x25,x13 // h+=X[i]
+ eor x16,x16,x22,ror#18
+ eor x4,x4,x15,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x22,ror#41 // Sigma1(e)
+ eor x5,x5,x26,ror#34
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x3,x3,x12,ror#61
+ eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
+ add x25,x25,x16 // h+=Sigma1(e)
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x5,x26,ror#39 // Sigma0(a)
+ eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
+ add x14,x14,x7
+ add x21,x21,x25 // d+=h
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x14,x14,x4
+ add x25,x25,x17 // h+=Sigma0(a)
+ add x14,x14,x3
+ ldr x3,[sp,#0]
+ str x6,[sp,#24]
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ ror x5,x0,#1
+ and x17,x22,x21
+ ror x4,x13,#19
+ bic x28,x23,x21
+ ror x6,x25,#28
+ add x24,x24,x14 // h+=X[i]
+ eor x16,x16,x21,ror#18
+ eor x5,x5,x0,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x21,ror#41 // Sigma1(e)
+ eor x6,x6,x25,ror#34
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x4,x4,x13,ror#61
+ eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
+ add x24,x24,x16 // h+=Sigma1(e)
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x6,x25,ror#39 // Sigma0(a)
+ eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
+ add x15,x15,x8
+ add x20,x20,x24 // d+=h
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x15,x15,x5
+ add x24,x24,x17 // h+=Sigma0(a)
+ add x15,x15,x4
+ ldr x4,[sp,#8]
+ str x7,[sp,#0]
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ ror x6,x1,#1
+ and x17,x21,x20
+ ror x5,x14,#19
+ bic x19,x22,x20
+ ror x7,x24,#28
+ add x23,x23,x15 // h+=X[i]
+ eor x16,x16,x20,ror#18
+ eor x6,x6,x1,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x20,ror#41 // Sigma1(e)
+ eor x7,x7,x24,ror#34
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x5,x5,x14,ror#61
+ eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
+ add x23,x23,x16 // h+=Sigma1(e)
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x7,x24,ror#39 // Sigma0(a)
+ eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
+ add x0,x0,x9
+ add x27,x27,x23 // d+=h
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x0,x0,x6
+ add x23,x23,x17 // h+=Sigma0(a)
+ add x0,x0,x5
+ ldr x5,[sp,#16]
+ str x8,[sp,#8]
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ ror x7,x2,#1
+ and x17,x20,x27
+ ror x6,x15,#19
+ bic x28,x21,x27
+ ror x8,x23,#28
+ add x22,x22,x0 // h+=X[i]
+ eor x16,x16,x27,ror#18
+ eor x7,x7,x2,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x27,ror#41 // Sigma1(e)
+ eor x8,x8,x23,ror#34
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x6,x6,x15,ror#61
+ eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
+ add x22,x22,x16 // h+=Sigma1(e)
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x8,x23,ror#39 // Sigma0(a)
+ eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
+ add x1,x1,x10
+ add x26,x26,x22 // d+=h
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x1,x1,x7
+ add x22,x22,x17 // h+=Sigma0(a)
+ add x1,x1,x6
+ ldr x6,[sp,#24]
+ str x9,[sp,#16]
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ ror x8,x3,#1
+ and x17,x27,x26
+ ror x7,x0,#19
+ bic x19,x20,x26
+ ror x9,x22,#28
+ add x21,x21,x1 // h+=X[i]
+ eor x16,x16,x26,ror#18
+ eor x8,x8,x3,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x26,ror#41 // Sigma1(e)
+ eor x9,x9,x22,ror#34
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x7,x7,x0,ror#61
+ eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
+ add x21,x21,x16 // h+=Sigma1(e)
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x9,x22,ror#39 // Sigma0(a)
+ eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
+ add x2,x2,x11
+ add x25,x25,x21 // d+=h
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x2,x2,x8
+ add x21,x21,x17 // h+=Sigma0(a)
+ add x2,x2,x7
+ ldr x7,[sp,#0]
+ str x10,[sp,#24]
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ ror x9,x4,#1
+ and x17,x26,x25
+ ror x8,x1,#19
+ bic x28,x27,x25
+ ror x10,x21,#28
+ add x20,x20,x2 // h+=X[i]
+ eor x16,x16,x25,ror#18
+ eor x9,x9,x4,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x25,ror#41 // Sigma1(e)
+ eor x10,x10,x21,ror#34
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x8,x8,x1,ror#61
+ eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
+ add x20,x20,x16 // h+=Sigma1(e)
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x10,x21,ror#39 // Sigma0(a)
+ eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
+ add x3,x3,x12
+ add x24,x24,x20 // d+=h
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x3,x3,x9
+ add x20,x20,x17 // h+=Sigma0(a)
+ add x3,x3,x8
+ cbnz x19,.Loop_16_xx
+
+ ldp x0,x2,[x29,#96]
+ ldr x1,[x29,#112]
+ sub x30,x30,#648 // rewind
+
+ ldp x3,x4,[x0]
+ ldp x5,x6,[x0,#2*8]
+ add x1,x1,#14*8 // advance input pointer
+ ldp x7,x8,[x0,#4*8]
+ add x20,x20,x3
+ ldp x9,x10,[x0,#6*8]
+ add x21,x21,x4
+ add x22,x22,x5
+ add x23,x23,x6
+ stp x20,x21,[x0]
+ add x24,x24,x7
+ add x25,x25,x8
+ stp x22,x23,[x0,#2*8]
+ add x26,x26,x9
+ add x27,x27,x10
+ cmp x1,x2
+ stp x24,x25,[x0,#4*8]
+ stp x26,x27,[x0,#6*8]
+ b.ne .Loop
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#4*8
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#128
+ ret
+.size sha512_block_data_order,.-sha512_block_data_order
+
+.align 6
+.type .LK512,%object
+.LK512:
+ .quad 0x428a2f98d728ae22,0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+ .quad 0x3956c25bf348b538,0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+ .quad 0xd807aa98a3030242,0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+ .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235,0xc19bf174cf692694
+ .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+ .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ .quad 0x983e5152ee66dfab,0xa831c66d2db43210
+ .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ .quad 0x06ca6351e003826f,0x142929670a0e6e70
+ .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+ .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+ .quad 0x81c2c92e47edaee6,0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+ .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218,0xd69906245565a910
+ .quad 0xf40e35855771202a,0x106aa07032bbd1b8
+ .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+ .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+ .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+ .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ .quad 0x90befffa23631e28,0xa4506cebde82bde9
+ .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ .quad 0xca273eceea26619c,0xd186b8c721c0c207
+ .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae,0x1b710b35131c471b
+ .quad 0x28db77f523047d84,0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+ .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+ .quad 0 // terminator
+.size .LK512,.-.LK512
+#ifndef __KERNEL__
+.align 3
+.LOPENSSL_armcap_P:
+# ifdef __ILP32__
+ .long OPENSSL_armcap_P-.
+# else
+ .quad OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+#ifndef __KERNEL__
+.comm OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
new file mode 100644
index 000000000000..aff35c9992a4
--- /dev/null
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -0,0 +1,94 @@
+/*
+ * Linux/arm64 port of the OpenSSL SHA512 implementation for AArch64
+ *
+ * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <crypto/sha.h>
+#include <crypto/sha512_base.h>
+#include <asm/neon.h>
+
+MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash for arm64");
+MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
+
+asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+ unsigned int num_blks);
+
+static int sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order);
+}
+
+static int sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (len)
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc,
+ (sha512_block_fn *)sha512_block_data_order);
+
+ return sha512_base_finish(desc, out);
+}
+
+static int sha512_final(struct shash_desc *desc, u8 *out)
+{
+ return sha512_finup(desc, NULL, 0, out);
+}
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .init = sha512_base_init,
+ .update = sha512_update,
+ .final = sha512_final,
+ .finup = sha512_finup,
+ .descsize = sizeof(struct sha512_state),
+ .base.cra_name = "sha512",
+ .base.cra_driver_name = "sha512-arm64",
+ .base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .init = sha384_base_init,
+ .update = sha512_update,
+ .final = sha512_final,
+ .finup = sha512_finup,
+ .descsize = sizeof(struct sha512_state),
+ .base.cra_name = "sha384",
+ .base.cra_driver_name = "sha384-arm64",
+ .base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
+
+static int __init sha512_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha512_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_init(sha512_mod_init);
+module_exit(sha512_mod_fini);
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 44e1d7f10add..8365a84c2640 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,7 +1,6 @@
generic-y += bugs.h
generic-y += clkdev.h
generic-y += cputime.h
-generic-y += current.h
generic-y += delay.h
generic-y += div64.h
generic-y += dma.h
@@ -24,7 +23,6 @@ generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += msi.h
-generic-y += mutex.h
generic-y += poll.h
generic-y += preempt.h
generic-y += resource.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index e517088d635f..d0de0e032bc2 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -17,6 +17,7 @@
#include <asm/cputype.h>
#include <asm/smp_plat.h>
+#include <asm/tlbflush.h>
/* Macros for consistency checks of the GICC subtable of MADT */
#define ACPI_MADT_GICC_LENGTH \
@@ -114,8 +115,28 @@ static inline const char *acpi_get_enable_method(int cpu)
}
#ifdef CONFIG_ACPI_APEI
+/*
+ * acpi_disable_cmcff is used in drivers/acpi/apei/hest.c for disabling
+ * IA-32 Architecture Corrected Machine Check (CMC) Firmware-First mode
+ * with a kernel command line parameter "acpi=nocmcoff". But we don't
+ * have this IA-32 specific feature on ARM64, this definition is only
+ * for compatibility.
+ */
+#define acpi_disable_cmcff 1
pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
-#endif
+
+/*
+ * Despite its name, this function must still broadcast the TLB
+ * invalidation in order to ensure other CPUs don't end up with junk
+ * entries as a result of speculation. Unusually, its also called in
+ * IRQ context (ghes_iounmap_irq) so if we ever need to use IPIs for
+ * TLB broadcasting, then we're in trouble here.
+ */
+static inline void arch_apei_flush_tlb_one(unsigned long addr)
+{
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+}
+#endif /* CONFIG_ACPI_APEI */
#ifdef CONFIG_ACPI_NUMA
int arm64_acpi_numa_init(void);
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 39feb85a6931..6e1cb8c5af4d 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,7 +1,7 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
-#include <asm/cpufeature.h>
+#include <asm/cpucaps.h>
#include <asm/insn.h>
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index f8ae6d6e4767..f37e3a21f6e7 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -79,19 +79,10 @@
#include <linux/stringify.h>
#include <asm/barrier.h>
+#include <asm/cacheflush.h>
-#define read_gicreg(r) \
- ({ \
- u64 reg; \
- asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
- reg; \
- })
-
-#define write_gicreg(v,r) \
- do { \
- u64 __val = (v); \
- asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
- } while (0)
+#define read_gicreg read_sysreg_s
+#define write_gicreg write_sysreg_s
/*
* Low-level accessors
@@ -102,13 +93,13 @@
static inline void gic_write_eoir(u32 irq)
{
- asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" ((u64)irq));
+ write_sysreg_s(irq, ICC_EOIR1_EL1);
isb();
}
static inline void gic_write_dir(u32 irq)
{
- asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" ((u64)irq));
+ write_sysreg_s(irq, ICC_DIR_EL1);
isb();
}
@@ -116,7 +107,7 @@ static inline u64 gic_read_iar_common(void)
{
u64 irqstat;
- asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+ irqstat = read_sysreg_s(ICC_IAR1_EL1);
dsb(sy);
return irqstat;
}
@@ -132,12 +123,9 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
{
u64 irqstat;
- asm volatile(
- "nop;nop;nop;nop\n\t"
- "nop;nop;nop;nop\n\t"
- "mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t"
- "nop;nop;nop;nop"
- : "=r" (irqstat));
+ nops(8);
+ irqstat = read_sysreg_s(ICC_IAR1_EL1);
+ nops(4);
mb();
return irqstat;
@@ -145,37 +133,34 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
static inline void gic_write_pmr(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
+ write_sysreg_s(val, ICC_PMR_EL1);
}
static inline void gic_write_ctlr(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" ((u64)val));
+ write_sysreg_s(val, ICC_CTLR_EL1);
isb();
}
static inline void gic_write_grpen1(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" ((u64)val));
+ write_sysreg_s(val, ICC_GRPEN1_EL1);
isb();
}
static inline void gic_write_sgi1r(u64 val)
{
- asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+ write_sysreg_s(val, ICC_SGI1R_EL1);
}
static inline u32 gic_read_sre(void)
{
- u64 val;
-
- asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
- return val;
+ return read_sysreg_s(ICC_SRE_EL1);
}
static inline void gic_write_sre(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((u64)val));
+ write_sysreg_s(val, ICC_SRE_EL1);
isb();
}
@@ -187,5 +172,21 @@ static inline void gic_write_bpr1(u32 val)
#define gic_read_typer(c) readq_relaxed(c)
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
+#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+
+#define gits_read_baser(c) readq_relaxed(c)
+#define gits_write_baser(v, c) writeq_relaxed(v, c)
+
+#define gits_read_cbaser(c) readq_relaxed(c)
+#define gits_write_cbaser(v, c) writeq_relaxed(v, c)
+
+#define gits_write_cwriter(v, c) writeq_relaxed(v, c)
+
+#define gicr_read_propbaser(c) readq_relaxed(c)
+#define gicr_write_propbaser(v, c) writeq_relaxed(v, c)
+
+#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
+#define gicr_read_pendbaser(c) readq_relaxed(c)
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 28bfe6132eb6..446f6c46d4b1 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -41,6 +41,15 @@
msr daifclr, #2
.endm
+ .macro save_and_disable_irq, flags
+ mrs \flags, daif
+ msr daifset, #2
+ .endm
+
+ .macro restore_irq, flags
+ msr daif, \flags
+ .endm
+
/*
* Enable and disable debug exceptions.
*/
@@ -202,14 +211,25 @@ lr .req x30 // link register
.endm
/*
+ * @dst: Result of per_cpu(sym, smp_processor_id())
* @sym: The name of the per-cpu variable
- * @reg: Result of per_cpu(sym, smp_processor_id())
* @tmp: scratch register
*/
- .macro this_cpu_ptr, sym, reg, tmp
- adr_l \reg, \sym
+ .macro adr_this_cpu, dst, sym, tmp
+ adr_l \dst, \sym
mrs \tmp, tpidr_el1
- add \reg, \reg, \tmp
+ add \dst, \dst, \tmp
+ .endm
+
+ /*
+ * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
+ * @sym: The name of the per-cpu variable
+ * @tmp: scratch register
+ */
+ .macro ldr_this_cpu dst, sym, tmp
+ adr_l \dst, \sym
+ mrs \tmp, tpidr_el1
+ ldr \dst, [\dst, \tmp]
.endm
/*
@@ -395,4 +415,24 @@ alternative_endif
movk \reg, :abs_g0_nc:\val
.endm
+/*
+ * Return the current thread_info.
+ */
+ .macro get_thread_info, rd
+ mrs \rd, sp_el0
+ .endm
+
+/*
+ * Errata workaround post TTBR0_EL1 update.
+ */
+ .macro post_ttbr0_update_workaround
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+alternative_if ARM64_WORKAROUND_CAVIUM_27456
+ ic iallu
+ dsb nsh
+ isb
+alternative_else_nop_endif
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 2e5fb976a572..5a2a6ee65f65 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -65,12 +65,12 @@
* - kaddr - page address
* - size - region size
*/
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
+extern void sync_icache_aliases(void *kaddr, unsigned long len);
static inline void flush_cache_mm(struct mm_struct *mm)
{
@@ -81,6 +81,11 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
{
}
+static inline void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
/*
* Cache maintenance functions used by the DMA API. No to be used directly.
*/
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
new file mode 100644
index 000000000000..4174f09678c4
--- /dev/null
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -0,0 +1,41 @@
+/*
+ * arch/arm64/include/asm/cpucaps.h
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPUCAPS_H
+#define __ASM_CPUCAPS_H
+
+#define ARM64_WORKAROUND_CLEAN_CACHE 0
+#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
+#define ARM64_WORKAROUND_845719 2
+#define ARM64_HAS_SYSREG_GIC_CPUIF 3
+#define ARM64_HAS_PAN 4
+#define ARM64_HAS_LSE_ATOMICS 5
+#define ARM64_WORKAROUND_CAVIUM_23154 6
+#define ARM64_WORKAROUND_834220 7
+#define ARM64_HAS_NO_HW_PREFETCH 8
+#define ARM64_HAS_UAO 9
+#define ARM64_ALT_PAN_NOT_UAO 10
+#define ARM64_HAS_VIRT_HOST_EXTN 11
+#define ARM64_WORKAROUND_CAVIUM_27456 12
+#define ARM64_HAS_32BIT_EL0 13
+#define ARM64_HYP_OFFSET_LOW 14
+#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+#define ARM64_HAS_NO_FPSIMD 16
+
+#define ARM64_NCAPS 17
+
+#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index a27c3245ba21..b4989df48670 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -9,8 +9,7 @@
#ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H
-#include <linux/jump_label.h>
-
+#include <asm/cpucaps.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
@@ -24,27 +23,10 @@
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
-#define ARM64_WORKAROUND_CLEAN_CACHE 0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-#define ARM64_WORKAROUND_845719 2
-#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_HAS_PAN 4
-#define ARM64_HAS_LSE_ATOMICS 5
-#define ARM64_WORKAROUND_CAVIUM_23154 6
-#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_UAO 9
-#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_HYP_OFFSET_LOW 14
-#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
-
-#define ARM64_NCAPS 16
-
#ifndef __ASSEMBLY__
+#include <linux/bug.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
/* CPU feature register tracking */
@@ -122,14 +104,19 @@ static inline bool cpu_have_feature(unsigned int num)
return elf_hwcap & (1UL << num);
}
+/* System capability check for constant caps */
+static inline bool cpus_have_const_cap(int num)
+{
+ if (num >= ARM64_NCAPS)
+ return false;
+ return static_branch_unlikely(&cpu_hwcap_keys[num]);
+}
+
static inline bool cpus_have_cap(unsigned int num)
{
if (num >= ARM64_NCAPS)
return false;
- if (__builtin_constant_p(num))
- return static_branch_unlikely(&cpu_hwcap_keys[num]);
- else
- return test_bit(num, cpu_hwcaps);
+ return test_bit(num, cpu_hwcaps);
}
static inline void cpus_set_cap(unsigned int num)
@@ -218,7 +205,7 @@ static inline bool cpu_supports_mixed_endian_el0(void)
static inline bool system_supports_32bit_el0(void)
{
- return cpus_have_cap(ARM64_HAS_32BIT_EL0);
+ return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
}
static inline bool system_supports_mixed_endian_el0(void)
@@ -226,6 +213,17 @@ static inline bool system_supports_mixed_endian_el0(void)
return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
}
+static inline bool system_supports_fpsimd(void)
+{
+ return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
+}
+
+static inline bool system_uses_ttbr0_pan(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+ !cpus_have_cap(ARM64_HAS_PAN);
+}
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
new file mode 100644
index 000000000000..f2bcbe2d9889
--- /dev/null
+++ b/arch/arm64/include/asm/current.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+static __always_inline struct task_struct *get_current(void)
+{
+ return (struct task_struct *)read_sysreg(sp_el0);
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
+
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index b71420a12f26..a44cf5225429 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -68,6 +68,9 @@
#define BRK64_ESR_MASK 0xFFFF
#define BRK64_ESR_KPROBES 0x0004
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+/* uprobes BRK opcodes with ESR encoding */
+#define BRK64_ESR_UPROBES 0x0005
+#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (BRK64_ESR_UPROBES << 5))
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index a9e54aad15ef..0b6b1633017f 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -1,6 +1,7 @@
#ifndef _ASM_EFI_H
#define _ASM_EFI_H
+#include <asm/cpufeature.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/neon.h>
@@ -51,6 +52,9 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define __efi_call_early(f, ...) f(__VA_ARGS__)
#define efi_is_64bit() (true)
+#define efi_call_proto(protocol, f, instance, ...) \
+ ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
+
#define alloc_screen_info(x...) &screen_info
#define free_screen_info(x...)
@@ -75,7 +79,30 @@ static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
static inline void efi_set_pgd(struct mm_struct *mm)
{
- switch_mm(NULL, mm, NULL);
+ __switch_mm(mm);
+
+ if (system_uses_ttbr0_pan()) {
+ if (mm != current->active_mm) {
+ /*
+ * Update the current thread's saved ttbr0 since it is
+ * restored as part of a return from exception. Set
+ * the hardware TTBR0_EL1 using cpu_switch_mm()
+ * directly to enable potential errata workarounds.
+ */
+ update_saved_ttbr0(current, mm);
+ cpu_switch_mm(mm->pgd, mm);
+ } else {
+ /*
+ * Defer the switch to the current thread's TTBR0_EL1
+ * until uaccess_enable(). Restore the current
+ * thread's saved ttbr0 corresponding to its active_mm
+ * (if different from init_mm).
+ */
+ cpu_set_reserved_ttbr0();
+ if (current->active_mm != &init_mm)
+ update_saved_ttbr0(current, current->active_mm);
+ }
+ }
}
void efi_virtmap_load(void);
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index a55384f4a5d7..5d1700425efe 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -138,7 +138,11 @@ typedef struct user_fpsimd_state elf_fpregset_t;
*/
#define ELF_PLAT_INIT(_r, load_addr) (_r)->regs[0] = 0
-#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
+#define SET_PERSONALITY(ex) \
+({ \
+ clear_bit(TIF_32BIT, &current->mm->context.flags); \
+ clear_thread_flag(TIF_32BIT); \
+})
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
@@ -183,7 +187,11 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
((x)->e_flags & EF_ARM_EABI_MASK))
#define compat_start_thread compat_start_thread
-#define COMPAT_SET_PERSONALITY(ex) set_thread_flag(TIF_32BIT);
+#define COMPAT_SET_PERSONALITY(ex) \
+({ \
+ set_bit(TIF_32BIT, &current->mm->context.flags); \
+ set_thread_flag(TIF_32BIT); \
+ })
#define COMPAT_ARCH_DLINFO
extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
int uses_interp);
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index f2585cdd32c2..85c4a8981d47 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -21,15 +21,12 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
-#include <asm/alternative.h>
-#include <asm/cpufeature.h>
#include <asm/errno.h>
-#include <asm/sysreg.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
+do { \
+ uaccess_enable(); \
asm volatile( \
- ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
- CONFIG_ARM64_PAN) \
" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
insn "\n" \
@@ -44,11 +41,11 @@
" .popsection\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
- ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
- CONFIG_ARM64_PAN) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "memory")
+ : "memory"); \
+ uaccess_disable(); \
+} while (0)
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -118,8 +115,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
+ uaccess_enable();
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
@@ -134,10 +131,10 @@ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
" .popsection\n"
_ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 4b)
-ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "memory");
+ uaccess_disable();
*uval = val;
return ret;
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 9510ace570e2..b6b167ac082b 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -77,7 +77,11 @@ static inline void decode_ctrl_reg(u32 reg,
/* Lengths */
#define ARM_BREAKPOINT_LEN_1 0x1
#define ARM_BREAKPOINT_LEN_2 0x3
+#define ARM_BREAKPOINT_LEN_3 0x7
#define ARM_BREAKPOINT_LEN_4 0xf
+#define ARM_BREAKPOINT_LEN_5 0x1f
+#define ARM_BREAKPOINT_LEN_6 0x3f
+#define ARM_BREAKPOINT_LEN_7 0x7f
#define ARM_BREAKPOINT_LEN_8 0xff
/* Kernel stepping */
@@ -119,7 +123,7 @@ struct perf_event;
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type);
+ int *gen_len, int *gen_type, int *offset);
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 0bba427bb4c2..0c00c87bb9dd 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -22,7 +22,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 7e51d1b57c0c..7803343e5881 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -19,6 +19,7 @@
#ifndef __ASM_KERNEL_PGTABLE_H
#define __ASM_KERNEL_PGTABLE_H
+#include <asm/pgtable.h>
#include <asm/sparsemem.h>
/*
@@ -54,6 +55,12 @@
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_TTBR0_SIZE (PAGE_SIZE)
+#else
+#define RESERVED_TTBR0_SIZE (0)
+#endif
+
/* Initial memory map size */
#if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 18f746551bf6..ec3553eb9349 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index bd94e6766759..e5050388e062 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -62,6 +62,9 @@ struct kvm_arch {
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
+ /* The last vcpu id that ran on each physical CPU */
+ int __percpu *last_vcpu_ran;
+
/* The maximum number of vCPUs depends on the used GIC model */
int max_vcpus;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index a79b969c26fc..6f72fe8b0e3e 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
return v;
}
-#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
+#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
/*
* We currently only support a 40bit IPA.
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 23acc00be32d..fc756e22c84c 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -5,7 +5,6 @@
#include <linux/stringify.h>
#include <asm/alternative.h>
-#include <asm/cpufeature.h>
#ifdef __ASSEMBLER__
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 8d9fce037b2f..47619411f0ff 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -19,6 +19,7 @@
typedef struct {
atomic64_t id;
void *vdso;
+ unsigned long flags;
} mm_context_t;
/*
@@ -34,7 +35,7 @@ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
- pgprot_t prot, bool allow_block_mappings);
+ pgprot_t prot, bool page_mappings_only);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a50185375f09..0363fe80455c 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
#include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
@@ -103,7 +104,7 @@ static inline void cpu_uninstall_idmap(void)
local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
- if (mm != &init_mm)
+ if (mm != &init_mm && !system_uses_ttbr0_pan())
cpu_switch_mm(mm->pgd, mm);
}
@@ -163,20 +164,26 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-/*
- * This is the actual mm switch as far as the scheduler
- * is concerned. No registers are touched. We avoid
- * calling the CPU specific function when the mm hasn't
- * actually changed.
- */
-static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+ struct mm_struct *mm)
{
- unsigned int cpu = smp_processor_id();
+ if (system_uses_ttbr0_pan()) {
+ BUG_ON(mm->pgd == swapper_pg_dir);
+ task_thread_info(tsk)->ttbr0 =
+ virt_to_phys(mm->pgd) | ASID(mm) << 48;
+ }
+}
+#else
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+}
+#endif
- if (prev == next)
- return;
+static inline void __switch_mm(struct mm_struct *next)
+{
+ unsigned int cpu = smp_processor_id();
/*
* init_mm.pgd does not contain any user mappings and it is always
@@ -190,8 +197,26 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
check_and_switch_context(next, cpu);
}
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ if (prev != next)
+ __switch_mm(next);
+
+ /*
+ * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
+ * value may have not been initialised yet (activate_mm caller) or the
+ * ASID has changed since the last run (following the context switch
+ * of another thread of the same process). Avoid setting the reserved
+ * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
+ */
+ if (next != &init_mm)
+ update_saved_ttbr0(tsk, next);
+}
+
#define deactivate_mm(tsk,mm) do { } while (0)
-#define activate_mm(prev,next) switch_mm(prev, next, NULL)
+#define activate_mm(prev,next) switch_mm(prev, next, current)
void verify_cpu_asid_bits(void);
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index 13ce4cc18e26..ad4cdc966c0f 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -9,8 +9,9 @@
*/
#include <linux/types.h>
+#include <asm/fpsimd.h>
-#define cpu_has_neon() (1)
+#define cpu_has_neon() system_supports_fpsimd()
#define kernel_neon_begin() kernel_neon_begin_partial(32)
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
deleted file mode 100644
index 123f45d92cd1..000000000000
--- a/arch/arm64/include/asm/opcodes.h
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
-#endif
-
-#include <../../arm/include/asm/opcodes.h>
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 5394c8405e66..3bd498e4de4c 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/stack_pointer.h>
+
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -101,16 +103,16 @@ static inline unsigned long __percpu_read(void *ptr, int size)
switch (size) {
case 1:
- ret = ACCESS_ONCE(*(u8 *)ptr);
+ ret = READ_ONCE(*(u8 *)ptr);
break;
case 2:
- ret = ACCESS_ONCE(*(u16 *)ptr);
+ ret = READ_ONCE(*(u16 *)ptr);
break;
case 4:
- ret = ACCESS_ONCE(*(u32 *)ptr);
+ ret = READ_ONCE(*(u32 *)ptr);
break;
case 8:
- ret = ACCESS_ONCE(*(u64 *)ptr);
+ ret = READ_ONCE(*(u64 *)ptr);
break;
default:
BUILD_BUG();
@@ -123,16 +125,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
+ WRITE_ONCE(*(u8 *)ptr, (u8)val);
break;
case 2:
- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
+ WRITE_ONCE(*(u16 *)ptr, (u16)val);
break;
case 4:
- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
+ WRITE_ONCE(*(u32 *)ptr, (u32)val);
break;
case 8:
- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
+ WRITE_ONCE(*(u64 *)ptr, (u64)val);
break;
default:
BUILD_BUG();
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 2065f46fa740..8d5cbec17d80 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,8 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
+#include <asm/stack_pointer.h>
+
#define ARMV8_PMU_MAX_COUNTERS 32
#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
@@ -46,7 +48,15 @@
#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
+/*
+ * PMUv3 event types: required events
+ */
+#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
+#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
/*
* Event filters for PMUv3
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
index 5af574d632fa..6a5b28904c33 100644
--- a/arch/arm64/include/asm/probes.h
+++ b/arch/arm64/include/asm/probes.h
@@ -15,21 +15,22 @@
#ifndef _ARM_PROBES_H
#define _ARM_PROBES_H
-#include <asm/opcodes.h>
-
-struct kprobe;
-struct arch_specific_insn;
-
-typedef u32 kprobe_opcode_t;
-typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+typedef u32 probe_opcode_t;
+typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
/* architecture specific copy of original instruction */
-struct arch_specific_insn {
- kprobe_opcode_t *insn;
+struct arch_probe_insn {
+ probe_opcode_t *insn;
pstate_check_t *pstate_cc;
- kprobes_handler_t *handler;
+ probes_handler_t *handler;
/* restore address after step xol */
unsigned long restore;
};
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+ struct arch_probe_insn api;
+};
+#endif
#endif
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 60e34824e18c..747c65a616ed 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -149,8 +149,6 @@ static inline void cpu_relax(void)
asm volatile("yield" ::: "memory");
}
-#define cpu_relax_lowlatency() cpu_relax()
-
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 07b8ed037dee..6afd8476c60c 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -16,9 +16,10 @@
#ifndef __ASM_PTDUMP_H
#define __ASM_PTDUMP_H
-#ifdef CONFIG_ARM64_PTDUMP
+#ifdef CONFIG_ARM64_PTDUMP_CORE
#include <linux/mm_types.h>
+#include <linux/seq_file.h>
struct addr_marker {
unsigned long start_address;
@@ -29,16 +30,25 @@ struct ptdump_info {
struct mm_struct *mm;
const struct addr_marker *markers;
unsigned long base_addr;
- unsigned long max_addr;
};
-int ptdump_register(struct ptdump_info *info, const char *name);
-
+void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
+#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
+int ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
-static inline int ptdump_register(struct ptdump_info *info, const char *name)
+static inline int ptdump_debugfs_register(struct ptdump_info *info,
+ const char *name)
{
return 0;
}
-#endif /* CONFIG_ARM64_PTDUMP */
+#endif
+void ptdump_check_wx(void);
+#endif /* CONFIG_ARM64_PTDUMP_CORE */
+
+#ifdef CONFIG_DEBUG_WX
+#define debug_checkwx() ptdump_check_wx()
+#else
+#define debug_checkwx() do { } while (0)
+#endif
#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index ada08b5b036d..513daf050e84 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -217,6 +217,14 @@ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
#include <asm-generic/ptrace.h>
+#define procedure_link_pointer(regs) ((regs)->regs[30])
+
+static inline void procedure_link_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ procedure_link_pointer(regs) = val;
+}
+
#undef profile_pc
extern unsigned long profile_pc(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 022644704a93..d050d720a1b4 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -29,11 +29,22 @@
#ifndef __ASSEMBLY__
+#include <asm/percpu.h>
+
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+
+/*
+ * We don't use this_cpu_read(cpu_number) as that has implicit writes to
+ * preempt_count, and associated (compiler) barriers, that we'd like to avoid
+ * the expense of. If we're preemptible, the value can be stale at use anyway.
+ * And we can't use this_cpu_ptr() either, as that winds up recursing back
+ * here under CONFIG_DEBUG_PREEMPT=y.
+ */
+#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
struct seq_file;
@@ -73,6 +84,7 @@ asmlinkage void secondary_start_kernel(void);
*/
struct secondary_data {
void *stack;
+ struct task_struct *task;
long status;
};
diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h
new file mode 100644
index 000000000000..ffcdf742cddf
--- /dev/null
+++ b/arch/arm64/include/asm/stack_pointer.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_STACK_POINTER_H
+#define __ASM_STACK_POINTER_H
+
+/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+#endif /* __ASM_STACK_POINTER_H */
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index b8a313fd7a09..de5600f40adf 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 10
+#define NR_CTX_REGS 12
#define NR_CALLEE_SAVED_REGS 12
/*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 6c80b3699cb8..98ae03f8eedd 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -22,8 +22,6 @@
#include <linux/stringify.h>
-#include <asm/opcodes.h>
-
/*
* ARMv8 ARM reserves the following encoding for system registers:
* (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
@@ -37,6 +35,33 @@
#define sys_reg(op0, op1, crn, crm, op2) \
((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+#ifndef CONFIG_BROKEN_GAS_INST
+
+#ifdef __ASSEMBLY__
+#define __emit_inst(x) .inst (x)
+#else
+#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
+#endif
+
+#else /* CONFIG_BROKEN_GAS_INST */
+
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define __INSTR_BSWAP(x) (x)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+#define __INSTR_BSWAP(x) ((((x) << 24) & 0xff000000) | \
+ (((x) << 8) & 0x00ff0000) | \
+ (((x) >> 8) & 0x0000ff00) | \
+ (((x) >> 24) & 0x000000ff))
+#endif /* CONFIG_CPU_BIG_ENDIAN */
+
+#ifdef __ASSEMBLY__
+#define __emit_inst(x) .long __INSTR_BSWAP(x)
+#else /* __ASSEMBLY__ */
+#define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_BROKEN_GAS_INST */
+
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -81,10 +106,10 @@
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
-#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
- (!!x)<<8 | 0x1f)
-#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
- (!!x)<<8 | 0x1f)
+#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
+ (!!x)<<8 | 0x1f)
+#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
+ (!!x)<<8 | 0x1f)
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_EE (1 << 25)
@@ -228,11 +253,11 @@
.equ .L__reg_num_xzr, 31
.macro mrs_s, rt, sreg
- .inst 0xd5200000|(\sreg)|(.L__reg_num_\rt)
+ __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
.endm
.macro msr_s, sreg, rt
- .inst 0xd5000000|(\sreg)|(.L__reg_num_\rt)
+ __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
.endm
#else
@@ -246,11 +271,11 @@ asm(
" .equ .L__reg_num_xzr, 31\n"
"\n"
" .macro mrs_s, rt, sreg\n"
-" .inst 0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n"
+ __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
" .endm\n"
"\n"
" .macro msr_s, sreg, rt\n"
-" .inst 0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n"
+ __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
" .endm\n"
);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index e9ea5a6bd449..46c3b93cf865 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -36,58 +36,31 @@
struct task_struct;
+#include <asm/stack_pointer.h>
#include <asm/types.h>
typedef unsigned long mm_segment_t;
/*
* low level task data that entry.S needs immediate access to.
- * __switch_to() assumes cpu_context follows immediately after cpu_domain.
*/
struct thread_info {
unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ u64 ttbr0; /* saved TTBR0_EL1 */
+#endif
int preempt_count; /* 0 => preemptable, <0 => bug */
- int cpu; /* cpu */
};
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
- .flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/*
- * how to get the current stack pointer from C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-/*
- * struct thread_info can be accessed directly via sp_el0.
- *
- * We don't use read_sysreg() as we want the compiler to cache the value where
- * possible.
- */
-static inline struct thread_info *current_thread_info(void)
-{
- unsigned long sp_el0;
-
- asm ("mrs %0, sp_el0" : "=r" (sp_el0));
-
- return (struct thread_info *)sp_el0;
-}
-
#define thread_saved_pc(tsk) \
((unsigned long)(tsk->thread.cpu_context.pc))
#define thread_saved_sp(tsk) \
@@ -112,6 +85,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
+#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
@@ -132,10 +106,12 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+ _TIF_UPROBE)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 55d0adbf6509..d26750ca6e06 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -18,6 +18,12 @@
#ifndef __ASM_UACCESS_H
#define __ASM_UACCESS_H
+#include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
/*
* User space memory access functions
*/
@@ -26,10 +32,8 @@
#include <linux/string.h>
#include <linux/thread_info.h>
-#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/ptrace.h>
-#include <asm/sysreg.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
@@ -120,6 +124,99 @@ static inline void set_fs(mm_segment_t fs)
" .popsection\n"
/*
+ * User access enabling/disabling.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void __uaccess_ttbr0_disable(void)
+{
+ unsigned long ttbr;
+
+ /* reserved_ttbr0 placed at the end of swapper_pg_dir */
+ ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
+ write_sysreg(ttbr, ttbr0_el1);
+ isb();
+}
+
+static inline void __uaccess_ttbr0_enable(void)
+{
+ unsigned long flags;
+
+ /*
+ * Disable interrupts to avoid preemption between reading the 'ttbr0'
+ * variable and the MSR. A context switch could trigger an ASID
+ * roll-over and an update of 'ttbr0'.
+ */
+ local_irq_save(flags);
+ write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+ isb();
+ local_irq_restore(flags);
+}
+
+static inline bool uaccess_ttbr0_disable(void)
+{
+ if (!system_uses_ttbr0_pan())
+ return false;
+ __uaccess_ttbr0_disable();
+ return true;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+ if (!system_uses_ttbr0_pan())
+ return false;
+ __uaccess_ttbr0_enable();
+ return true;
+}
+#else
+static inline bool uaccess_ttbr0_disable(void)
+{
+ return false;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+ return false;
+}
+#endif
+
+#define __uaccess_disable(alt) \
+do { \
+ if (!uaccess_ttbr0_disable()) \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
+ CONFIG_ARM64_PAN)); \
+} while (0)
+
+#define __uaccess_enable(alt) \
+do { \
+ if (!uaccess_ttbr0_enable()) \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
+ CONFIG_ARM64_PAN)); \
+} while (0)
+
+static inline void uaccess_disable(void)
+{
+ __uaccess_disable(ARM64_HAS_PAN);
+}
+
+static inline void uaccess_enable(void)
+{
+ __uaccess_enable(ARM64_HAS_PAN);
+}
+
+/*
+ * These functions are no-ops when UAO is present.
+ */
+static inline void uaccess_disable_not_uao(void)
+{
+ __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+static inline void uaccess_enable_not_uao(void)
+{
+ __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+/*
* The "__xxx" versions of the user access functions do not verify the address
* space - it must have been done previously with a separate "access_ok()"
* call.
@@ -146,8 +243,7 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
- CONFIG_ARM64_PAN)); \
+ uaccess_enable_not_uao(); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
@@ -168,9 +264,8 @@ do { \
default: \
BUILD_BUG(); \
} \
+ uaccess_disable_not_uao(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
- CONFIG_ARM64_PAN)); \
} while (0)
#define __get_user(x, ptr) \
@@ -215,8 +310,7 @@ do { \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
- CONFIG_ARM64_PAN)); \
+ uaccess_enable_not_uao(); \
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
@@ -237,8 +331,7 @@ do { \
default: \
BUILD_BUG(); \
} \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
- CONFIG_ARM64_PAN)); \
+ uaccess_disable_not_uao(); \
} while (0)
#define __put_user(x, ptr) \
@@ -331,4 +424,66 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
+#else /* __ASSEMBLY__ */
+
+#include <asm/assembler.h>
+
+/*
+ * User access enabling/disabling macros.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ .macro __uaccess_ttbr0_disable, tmp1
+ mrs \tmp1, ttbr1_el1 // swapper_pg_dir
+ add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
+ msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
+ isb
+ .endm
+
+ .macro __uaccess_ttbr0_enable, tmp1
+ get_thread_info \tmp1
+ ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
+ msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
+ isb
+ .endm
+
+ .macro uaccess_ttbr0_disable, tmp1
+alternative_if_not ARM64_HAS_PAN
+ __uaccess_ttbr0_disable \tmp1
+alternative_else_nop_endif
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+ save_and_disable_irq \tmp2 // avoid preemption
+ __uaccess_ttbr0_enable \tmp1
+ restore_irq \tmp2
+alternative_else_nop_endif
+ .endm
+#else
+ .macro uaccess_ttbr0_disable, tmp1
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+ .endm
+#endif
+
+/*
+ * These macros are no-ops when UAO is present.
+ */
+ .macro uaccess_disable_not_uao, tmp1
+ uaccess_ttbr0_disable \tmp1
+alternative_if ARM64_ALT_PAN_NOT_UAO
+ SET_PSTATE_PAN(1)
+alternative_else_nop_endif
+ .endm
+
+ .macro uaccess_enable_not_uao, tmp1, tmp2
+ uaccess_ttbr0_enable \tmp1, \tmp2
+alternative_if ARM64_ALT_PAN_NOT_UAO
+ SET_PSTATE_PAN(0)
+alternative_else_nop_endif
+ .endm
+
+#endif /* __ASSEMBLY__ */
+
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h
new file mode 100644
index 000000000000..8d004073d0e8
--- /dev/null
+++ b/arch/arm64/include/asm/uprobes.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+
+#include <asm/debug-monitors.h>
+#include <asm/insn.h>
+#include <asm/probes.h>
+
+#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
+
+#define UPROBE_SWBP_INSN BRK64_OPCODE_UPROBES
+#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
+#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
+
+typedef u32 uprobe_opcode_t;
+
+struct arch_uprobe_task {
+};
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+ struct arch_probe_insn api;
+ bool simulate;
+};
+
+#endif
diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
index 74b0c423ff5b..3522cbaed316 100644
--- a/arch/arm64/include/asm/xen/hypercall.h
+++ b/arch/arm64/include/asm/xen/hypercall.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/hypercall.h>
+#include <xen/arm/hypercall.h>
diff --git a/arch/arm64/include/asm/xen/hypervisor.h b/arch/arm64/include/asm/xen/hypervisor.h
index f263da8e8769..d6e7709d0688 100644
--- a/arch/arm64/include/asm/xen/hypervisor.h
+++ b/arch/arm64/include/asm/xen/hypervisor.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/hypervisor.h>
+#include <xen/arm/hypervisor.h>
diff --git a/arch/arm64/include/asm/xen/interface.h b/arch/arm64/include/asm/xen/interface.h
index 44457aebeed4..88c0d75da190 100644
--- a/arch/arm64/include/asm/xen/interface.h
+++ b/arch/arm64/include/asm/xen/interface.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/interface.h>
+#include <xen/arm/interface.h>
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2052102b4e02..b3ef061d8b74 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/page-coherent.h>
+#include <xen/arm/page-coherent.h>
diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h
index bed87ec36780..31bbc803cecb 100644
--- a/arch/arm64/include/asm/xen/page.h
+++ b/arch/arm64/include/asm/xen/page.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/page.h>
+#include <xen/arm/page.h>
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index b0988bb1bf64..04de188a36c9 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,10 +14,8 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
-#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
-#include <asm/opcodes.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/traps.h>
@@ -285,10 +283,10 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
#define __SWP_LL_SC_LOOPS 4
#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
+do { \
+ uaccess_enable(); \
__asm__ __volatile__( \
" mov %w3, %w7\n" \
- ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
- CONFIG_ARM64_PAN) \
"0: ldxr"B" %w2, [%4]\n" \
"1: stxr"B" %w0, %w1, [%4]\n" \
" cbz %w0, 2f\n" \
@@ -306,12 +304,12 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
" .popsection" \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
- ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
- CONFIG_ARM64_PAN) \
: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
"i" (__SWP_LL_SC_LOOPS) \
- : "memory")
+ : "memory"); \
+ uaccess_disable(); \
+} while (0)
#define __user_swp_asm(data, addr, res, temp, temp2) \
__user_swpX_asm(data, addr, res, temp, temp2, "")
@@ -352,6 +350,10 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
return res;
}
+#define ARM_OPCODE_CONDTEST_FAIL 0
+#define ARM_OPCODE_CONDTEST_PASS 1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
#define ARM_OPCODE_CONDITION_UNCOND 0xf
static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 4a2f0f0fef32..bc049afc73a7 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -36,11 +36,13 @@ int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
+ DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
+#endif
+ DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();
@@ -123,6 +125,7 @@ int main(void)
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
BLANK();
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
+ DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c02504ea304b..fdf8f045929f 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
#endif
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys);
@@ -746,6 +747,14 @@ static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
}
+static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+ u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
+
+ return cpuid_feature_extract_signed_field(pfr0,
+ ID_AA64PFR0_FP_SHIFT) < 0;
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -829,6 +838,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.def_scope = SCOPE_SYSTEM,
.matches = hyp_offset_low,
},
+ {
+ /* FP/SIMD is not implemented */
+ .capability = ARM64_HAS_NO_FPSIMD,
+ .def_scope = SCOPE_SYSTEM,
+ .min_field_value = 0,
+ .matches = has_no_fpsimd,
+ },
{},
};
@@ -1102,5 +1118,5 @@ void __init setup_cpu_features(void)
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{
- return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
+ return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index b3d5b3e8fbcb..7b7be71e87bf 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -227,7 +227,7 @@ static struct attribute_group cpuregs_attr_group = {
.name = "identification"
};
-static int cpuid_add_regs(int cpu)
+static int cpuid_cpu_online(unsigned int cpu)
{
int rc;
struct device *dev;
@@ -248,7 +248,7 @@ out:
return rc;
}
-static int cpuid_remove_regs(int cpu)
+static int cpuid_cpu_offline(unsigned int cpu)
{
struct device *dev;
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
@@ -264,40 +264,22 @@ static int cpuid_remove_regs(int cpu)
return 0;
}
-static int cpuid_callback(struct notifier_block *nb,
- unsigned long action, void *hcpu)
-{
- int rc = 0;
- unsigned long cpu = (unsigned long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- rc = cpuid_add_regs(cpu);
- break;
- case CPU_DEAD:
- rc = cpuid_remove_regs(cpu);
- break;
- }
-
- return notifier_from_errno(rc);
-}
-
static int __init cpuinfo_regs_init(void)
{
- int cpu;
-
- cpu_notifier_register_begin();
+ int cpu, ret;
for_each_possible_cpu(cpu) {
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
kobject_init(&info->kobj, &cpuregs_kobj_type);
- if (cpu_online(cpu))
- cpuid_add_regs(cpu);
}
- __hotcpu_notifier(cpuid_callback, 0);
- cpu_notifier_register_done();
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online",
+ cpuid_cpu_online, cpuid_cpu_offline);
+ if (ret < 0) {
+ pr_err("cpuinfo: failed to register hotplug callbacks.\n");
+ return ret;
+ }
return 0;
}
static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 73ae90ef434c..605df76f0a06 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -226,6 +226,8 @@ static void send_user_sigtrap(int si_code)
static int single_step_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
+ bool handler_found = false;
+
/*
* If we are stepping a pending breakpoint, call the hw_breakpoint
* handler first.
@@ -233,7 +235,14 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
if (!reinstall_suspended_bps(regs))
return 0;
- if (user_mode(regs)) {
+#ifdef CONFIG_KPROBES
+ if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
+ handler_found = true;
+#endif
+ if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
+ handler_found = true;
+
+ if (!handler_found && user_mode(regs)) {
send_user_sigtrap(TRAP_TRACE);
/*
@@ -243,15 +252,8 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* to the active-not-pending state).
*/
user_rewind_single_step(current);
- } else {
-#ifdef CONFIG_KPROBES
- if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
- return 0;
-#endif
- if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
- return 0;
-
- pr_warning("Unexpected kernel single-step exception at EL1\n");
+ } else if (!handler_found) {
+ pr_warn("Unexpected kernel single-step exception at EL1\n");
/*
* Re-enable stepping since we know that we will be
* returning to regs.
@@ -304,16 +306,20 @@ NOKPROBE_SYMBOL(call_break_hook);
static int brk_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- if (user_mode(regs)) {
- send_user_sigtrap(TRAP_BRKPT);
- }
+ bool handler_found = false;
+
#ifdef CONFIG_KPROBES
- else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
- if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED)
- return -EFAULT;
+ if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
+ if (kprobe_breakpoint_handler(regs, esr) == DBG_HOOK_HANDLED)
+ handler_found = true;
}
#endif
- else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+ if (!handler_found && call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ handler_found = true;
+
+ if (!handler_found && user_mode(regs)) {
+ send_user_sigtrap(TRAP_BRKPT);
+ } else if (!handler_found) {
pr_warn("Unexpected kernel BRK exception at EL1\n");
return -EFAULT;
}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index ba9bee389fd5..5d17f377d905 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -62,8 +62,8 @@ struct screen_info screen_info __section(.data);
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
{
pteval_t prot_val = create_mapping_protection(md);
- bool allow_block_mappings = (md->type != EFI_RUNTIME_SERVICES_CODE &&
- md->type != EFI_RUNTIME_SERVICES_DATA);
+ bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ md->type == EFI_RUNTIME_SERVICES_DATA);
if (!PAGE_ALIGNED(md->phys_addr) ||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
@@ -76,12 +76,12 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
* from the MMU routines. So avoid block mappings altogether in
* that case.
*/
- allow_block_mappings = false;
+ page_mappings_only = true;
}
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
- __pgprot(prot_val | PTE_NG), allow_block_mappings);
+ __pgprot(prot_val | PTE_NG), page_mappings_only);
return 0;
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 223d54a4d66b..4f0d76339414 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -29,7 +29,9 @@
#include <asm/esr.h>
#include <asm/irq.h>
#include <asm/memory.h>
+#include <asm/ptrace.h>
#include <asm/thread_info.h>
+#include <asm/uaccess.h>
#include <asm/unistd.h>
/*
@@ -90,9 +92,8 @@
.if \el == 0
mrs x21, sp_el0
- mov tsk, sp
- and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
- ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
+ ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
mov x29, xzr // fp pointed to user-space
@@ -100,15 +101,41 @@
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
- ldr x20, [tsk, #TI_ADDR_LIMIT]
+ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #TASK_SIZE_64
- str x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ /*
+ * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
+ * EL0, there is no need to check the state of TTBR0_EL1 since
+ * accesses are always enabled.
+ * Note that the meaning of this bit differs from the ARMv8.1 PAN
+ * feature as all TTBR0_EL1 accesses are disabled, not just those to
+ * user mappings.
+ */
+alternative_if ARM64_HAS_PAN
+ b 1f // skip TTBR0 PAN
+alternative_else_nop_endif
+
+ .if \el != 0
+ mrs x21, ttbr0_el1
+ tst x21, #0xffff << 48 // Check for the reserved ASID
+ orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
+ b.eq 1f // TTBR0 access already disabled
+ and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
+ .endif
+
+ __uaccess_ttbr0_disable x21
+1:
+#endif
+
stp x22, x23, [sp, #S_PC]
/*
@@ -139,7 +166,7 @@
.if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
- str x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
@@ -147,6 +174,40 @@
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
+ .endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ /*
+ * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
+ * PAN bit checking.
+ */
+alternative_if ARM64_HAS_PAN
+ b 2f // skip TTBR0 PAN
+alternative_else_nop_endif
+
+ .if \el != 0
+ tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
+ .endif
+
+ __uaccess_ttbr0_enable x0
+
+ .if \el == 0
+ /*
+ * Enable errata workarounds only if returning to user. The only
+ * workaround currently required for TTBR0_EL1 changes are for the
+ * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+ * corruption).
+ */
+ post_ttbr0_update_workaround
+ .endif
+1:
+ .if \el != 0
+ and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
+ .endif
+2:
+#endif
+
+ .if \el == 0
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
#ifdef CONFIG_ARM64_ERRATUM_845719
@@ -162,6 +223,7 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
.endif
+
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
ldp x0, x1, [sp, #16 * 0]
@@ -184,23 +246,20 @@ alternative_else_nop_endif
eret // return to kernel
.endm
- .macro get_thread_info, rd
- mrs \rd, sp_el0
- .endm
-
.macro irq_stack_entry
mov x19, sp // preserve the original sp
/*
- * Compare sp with the current thread_info, if the top
- * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
- * should switch to the irq stack.
+ * Compare sp with the base of the task stack.
+ * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
+ * and should switch to the irq stack.
*/
- and x25, x19, #~(THREAD_SIZE - 1)
- cmp x25, tsk
- b.ne 9998f
+ ldr x25, [tsk, TSK_STACK]
+ eor x25, x25, x19
+ and x25, x25, #~(THREAD_SIZE - 1)
+ cbnz x25, 9998f
- this_cpu_ptr irq_stack, x25, x26
+ adr_this_cpu x25, irq_stack, x26
mov x26, #IRQ_STACK_START_SP
add x26, x25, x26
@@ -427,9 +486,9 @@ el1_irq:
irq_handler
#ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
- ldr x0, [tsk, #TI_FLAGS] // get flags
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
1:
@@ -444,7 +503,7 @@ ENDPROC(el1_irq)
el1_preempt:
mov x24, lr
1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
#endif
@@ -674,8 +733,7 @@ ENTRY(cpu_switch_to)
ldp x29, x9, [x8], #16
ldr lr, [x8]
mov sp, x9
- and x9, x9, #~(THREAD_SIZE - 1)
- msr sp_el0, x9
+ msr sp_el0, x1
ret
ENDPROC(cpu_switch_to)
@@ -686,7 +744,7 @@ ENDPROC(cpu_switch_to)
ret_fast_syscall:
disable_irq // disable interrupts
str x0, [sp, #S_X0] // returned x0
- ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
@@ -706,14 +764,14 @@ work_pending:
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
- ldr x1, [tsk, #TI_FLAGS] // re-check for single-step
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
/*
* "slow" syscall return path.
*/
ret_to_user:
disable_irq // disable interrupts
- ldr x1, [tsk, #TI_FLAGS]
+ ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
@@ -746,7 +804,7 @@ el0_svc_naked: // compat entry point
enable_dbg_and_irq
ct_user_exit 1
- ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
+ ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
tst x16, #_TIF_SYSCALL_WORK
b.ne __sys_trace
cmp scno, sc_nr // check upper syscall limit
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 394c61db5566..b883f1f75216 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -127,6 +127,8 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
void fpsimd_thread_switch(struct task_struct *next)
{
+ if (!system_supports_fpsimd())
+ return;
/*
* Save the current FPSIMD state to memory, but only if whatever is in
* the registers is in fact the most recent userland FPSIMD state of
@@ -157,6 +159,8 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
+ if (!system_supports_fpsimd())
+ return;
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
@@ -168,6 +172,8 @@ void fpsimd_flush_thread(void)
*/
void fpsimd_preserve_current_state(void)
{
+ if (!system_supports_fpsimd())
+ return;
preempt_disable();
if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state);
@@ -181,6 +187,8 @@ void fpsimd_preserve_current_state(void)
*/
void fpsimd_restore_current_state(void)
{
+ if (!system_supports_fpsimd())
+ return;
preempt_disable();
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state;
@@ -199,6 +207,8 @@ void fpsimd_restore_current_state(void)
*/
void fpsimd_update_current_state(struct fpsimd_state *state)
{
+ if (!system_supports_fpsimd())
+ return;
preempt_disable();
fpsimd_load_state(state);
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -228,6 +238,8 @@ static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate);
*/
void kernel_neon_begin_partial(u32 num_regs)
{
+ if (WARN_ON(!system_supports_fpsimd()))
+ return;
if (in_interrupt()) {
struct fpsimd_partial_state *s = this_cpu_ptr(
in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
@@ -252,6 +264,8 @@ EXPORT_SYMBOL(kernel_neon_begin_partial);
void kernel_neon_end(void)
{
+ if (!system_supports_fpsimd())
+ return;
if (in_interrupt()) {
struct fpsimd_partial_state *s = this_cpu_ptr(
in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 332e33193ccf..4b1abac3485a 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -326,14 +326,14 @@ __create_page_tables:
* dirty cache lines being evicted.
*/
adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+ adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
bl __inval_cache_range
/*
* Clear the idmap and swapper page tables.
*/
adrp x0, idmap_pg_dir
- adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE
+ adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
@@ -412,7 +412,7 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines.
*/
adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+ adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
dmb sy
bl __inval_cache_range
@@ -428,7 +428,8 @@ ENDPROC(__create_page_tables)
__primary_switched:
adrp x4, init_thread_union
add sp, x4, #THREAD_SIZE
- msr sp_el0, x4 // Save thread_info
+ adr_l x5, init_task
+ msr sp_el0, x5 // Save thread_info
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
@@ -524,10 +525,21 @@ set_hcr:
msr hcr_el2, x0
isb
- /* Generic timers. */
+ /*
+ * Allow Non-secure EL1 and EL0 to access physical timer and counter.
+ * This is not necessary for VHE, since the host kernel runs in EL2,
+ * and EL0 accesses are configured in the later stage of boot process.
+ * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
+ * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
+ * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
+ * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
+ * EL2.
+ */
+ cbnz x2, 1f
mrs x0, cnthctl_el2
orr x0, x0, #3 // Enable EL1 physical timers
msr cnthctl_el2, x0
+1:
msr cntvoff_el2, xzr // Clear virtual offset
#ifdef CONFIG_ARM_GIC_V3
@@ -699,10 +711,10 @@ __secondary_switched:
isb
adr_l x0, secondary_data
- ldr x0, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
- mov sp, x0
- and x0, x0, #~(THREAD_SIZE - 1)
- msr sp_el0, x0 // save thread_info
+ ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
+ mov sp, x1
+ ldr x2, [x0, #CPU_BOOT_TASK]
+ msr sp_el0, x2
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index d55a7b09959b..fe301cbcb442 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -136,7 +136,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
/* Save the mpidr of the cpu we called cpu_suspend() on... */
if (sleep_cpu < 0) {
- pr_err("Failing to hibernate on an unkown CPU.\n");
+ pr_err("Failing to hibernate on an unknown CPU.\n");
return -ENODEV;
}
hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
@@ -547,7 +547,7 @@ out:
int hibernate_resume_nonboot_cpu_disable(void)
{
if (sleep_cpu < 0) {
- pr_err("Failing to resume from hibernate on an unkown CPU.\n");
+ pr_err("Failing to resume from hibernate on an unknown CPU.\n");
return -ENODEV;
}
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 948b73148d56..1b3c747fedda 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -317,9 +317,21 @@ static int get_hbp_len(u8 hbp_len)
case ARM_BREAKPOINT_LEN_2:
len_in_bytes = 2;
break;
+ case ARM_BREAKPOINT_LEN_3:
+ len_in_bytes = 3;
+ break;
case ARM_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
+ case ARM_BREAKPOINT_LEN_5:
+ len_in_bytes = 5;
+ break;
+ case ARM_BREAKPOINT_LEN_6:
+ len_in_bytes = 6;
+ break;
+ case ARM_BREAKPOINT_LEN_7:
+ len_in_bytes = 7;
+ break;
case ARM_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
@@ -349,7 +361,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type)
+ int *gen_len, int *gen_type, int *offset)
{
/* Type */
switch (ctrl.type) {
@@ -369,17 +381,33 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
return -EINVAL;
}
+ if (!ctrl.len)
+ return -EINVAL;
+ *offset = __ffs(ctrl.len);
+
/* Len */
- switch (ctrl.len) {
+ switch (ctrl.len >> *offset) {
case ARM_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
case ARM_BREAKPOINT_LEN_2:
*gen_len = HW_BREAKPOINT_LEN_2;
break;
+ case ARM_BREAKPOINT_LEN_3:
+ *gen_len = HW_BREAKPOINT_LEN_3;
+ break;
case ARM_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4;
break;
+ case ARM_BREAKPOINT_LEN_5:
+ *gen_len = HW_BREAKPOINT_LEN_5;
+ break;
+ case ARM_BREAKPOINT_LEN_6:
+ *gen_len = HW_BREAKPOINT_LEN_6;
+ break;
+ case ARM_BREAKPOINT_LEN_7:
+ *gen_len = HW_BREAKPOINT_LEN_7;
+ break;
case ARM_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8;
break;
@@ -423,9 +451,21 @@ static int arch_build_bp_info(struct perf_event *bp)
case HW_BREAKPOINT_LEN_2:
info->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
+ case HW_BREAKPOINT_LEN_3:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_3;
+ break;
case HW_BREAKPOINT_LEN_4:
info->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
+ case HW_BREAKPOINT_LEN_5:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_5;
+ break;
+ case HW_BREAKPOINT_LEN_6:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_6;
+ break;
+ case HW_BREAKPOINT_LEN_7:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_7;
+ break;
case HW_BREAKPOINT_LEN_8:
info->ctrl.len = ARM_BREAKPOINT_LEN_8;
break;
@@ -517,18 +557,17 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
default:
return -EINVAL;
}
-
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
} else {
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
alignment_mask = 0x3;
else
alignment_mask = 0x7;
- if (info->address & alignment_mask)
- return -EINVAL;
+ offset = info->address & alignment_mask;
}
+ info->address &= ~alignment_mask;
+ info->ctrl.len <<= offset;
+
/*
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
@@ -661,12 +700,47 @@ unlock:
}
NOKPROBE_SYMBOL(breakpoint_handler);
+/*
+ * Arm64 hardware does not always report a watchpoint hit address that matches
+ * one of the watchpoints set. It can also report an address "near" the
+ * watchpoint if a single instruction access both watched and unwatched
+ * addresses. There is no straight-forward way, short of disassembling the
+ * offending instruction, to map that address back to the watchpoint. This
+ * function computes the distance of the memory access from the watchpoint as a
+ * heuristic for the likelyhood that a given access triggered the watchpoint.
+ *
+ * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
+ * exception" of ARMv8 Architecture Reference Manual for details.
+ *
+ * The function returns the distance of the address from the bytes watched by
+ * the watchpoint. In case of an exact match, it returns 0.
+ */
+static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
+ struct arch_hw_breakpoint_ctrl *ctrl)
+{
+ u64 wp_low, wp_high;
+ u32 lens, lene;
+
+ lens = __ffs(ctrl->len);
+ lene = __fls(ctrl->len);
+
+ wp_low = val + lens;
+ wp_high = val + lene;
+ if (addr < wp_low)
+ return wp_low - addr;
+ else if (addr > wp_high)
+ return addr - wp_high;
+ else
+ return 0;
+}
+
static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- int i, step = 0, *kernel_step, access;
+ int i, step = 0, *kernel_step, access, closest_match = 0;
+ u64 min_dist = -1, dist;
u32 ctrl_reg;
- u64 val, alignment_mask;
+ u64 val;
struct perf_event *wp, **slots;
struct debug_info *debug_info;
struct arch_hw_breakpoint *info;
@@ -675,35 +749,15 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
slots = this_cpu_ptr(wp_on_reg);
debug_info = &current->thread.debug;
+ /*
+ * Find all watchpoints that match the reported address. If no exact
+ * match is found. Attribute the hit to the closest watchpoint.
+ */
+ rcu_read_lock();
for (i = 0; i < core_num_wrps; ++i) {
- rcu_read_lock();
-
wp = slots[i];
-
if (wp == NULL)
- goto unlock;
-
- info = counter_arch_bp(wp);
- /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
- if (is_compat_task()) {
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
- alignment_mask = 0x7;
- else
- alignment_mask = 0x3;
- } else {
- alignment_mask = 0x7;
- }
-
- /* Check if the watchpoint value matches. */
- val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
- if (val != (addr & ~alignment_mask))
- goto unlock;
-
- /* Possible match, check the byte address select to confirm. */
- ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
- decode_ctrl_reg(ctrl_reg, &ctrl);
- if (!((1 << (addr & alignment_mask)) & ctrl.len))
- goto unlock;
+ continue;
/*
* Check that the access type matches.
@@ -712,18 +766,41 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ continue;
+ /* Check if the watchpoint value and byte select match. */
+ val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
+ ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
+ decode_ctrl_reg(ctrl_reg, &ctrl);
+ dist = get_distance_from_watchpoint(addr, val, &ctrl);
+ if (dist < min_dist) {
+ min_dist = dist;
+ closest_match = i;
+ }
+ /* Is this an exact match? */
+ if (dist != 0)
+ continue;
+
+ info = counter_arch_bp(wp);
info->trigger = addr;
perf_bp_event(wp, regs);
/* Do we need to handle the stepping? */
if (is_default_overflow_handler(wp))
step = 1;
+ }
+ if (min_dist > 0 && min_dist != -1) {
+ /* No exact match found. */
+ wp = slots[closest_match];
+ info = counter_arch_bp(wp);
+ info->trigger = addr;
+ perf_bp_event(wp, regs);
-unlock:
- rcu_read_unlock();
+ /* Do we need to handle the stepping? */
+ if (is_default_overflow_handler(wp))
+ step = 1;
}
+ rcu_read_unlock();
if (!step)
return 0;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 6f2ac4fc66ca..94b62c1fa4df 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -30,7 +30,6 @@
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
#include <asm/fixmap.h>
-#include <asm/opcodes.h>
#include <asm/insn.h>
#define AARCH64_INSN_SF_BIT BIT(31)
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index e017a9493b92..d217c9e95b06 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -247,6 +247,9 @@ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
{
+ if (!kgdb_single_step)
+ return DBG_HOOK_ERROR;
+
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return 0;
}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index a9310a69fffd..57ae9d9ed9bb 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -31,17 +31,9 @@
/*
* ARMv8 PMUv3 Performance Events handling code.
- * Common event types.
+ * Common event types (some are defined in asm/perf_event.h).
*/
-/* Required events. */
-#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
-#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
-
/* At least one of the following is required. */
#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
index ce06312e3d34..89b6df613dde 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
kprobes_trampoline.o \
simulate-insn.o
+obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \
+ simulate-insn.o
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index d1731bf977ef..6bf6657a5a52 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -17,7 +17,6 @@
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
-#include <asm/kprobes.h>
#include <asm/insn.h>
#include <asm/sections.h>
@@ -78,8 +77,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
* INSN_GOOD If instruction is supported and uses instruction slot,
* INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
*/
-static enum kprobe_insn __kprobes
-arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
{
/*
* Instructions reading or modifying the PC won't work from the XOL
@@ -89,26 +88,26 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return INSN_GOOD;
if (aarch64_insn_is_bcond(insn)) {
- asi->handler = simulate_b_cond;
+ api->handler = simulate_b_cond;
} else if (aarch64_insn_is_cbz(insn) ||
aarch64_insn_is_cbnz(insn)) {
- asi->handler = simulate_cbz_cbnz;
+ api->handler = simulate_cbz_cbnz;
} else if (aarch64_insn_is_tbz(insn) ||
aarch64_insn_is_tbnz(insn)) {
- asi->handler = simulate_tbz_tbnz;
+ api->handler = simulate_tbz_tbnz;
} else if (aarch64_insn_is_adr_adrp(insn)) {
- asi->handler = simulate_adr_adrp;
+ api->handler = simulate_adr_adrp;
} else if (aarch64_insn_is_b(insn) ||
aarch64_insn_is_bl(insn)) {
- asi->handler = simulate_b_bl;
+ api->handler = simulate_b_bl;
} else if (aarch64_insn_is_br(insn) ||
aarch64_insn_is_blr(insn) ||
aarch64_insn_is_ret(insn)) {
- asi->handler = simulate_br_blr_ret;
+ api->handler = simulate_br_blr_ret;
} else if (aarch64_insn_is_ldr_lit(insn)) {
- asi->handler = simulate_ldr_literal;
+ api->handler = simulate_ldr_literal;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
- asi->handler = simulate_ldrsw_literal;
+ api->handler = simulate_ldrsw_literal;
} else {
/*
* Instruction cannot be stepped out-of-line and we don't
@@ -120,6 +119,7 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return INSN_GOOD_NO_SLOT;
}
+#ifdef CONFIG_KPROBES
static bool __kprobes
is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
{
@@ -138,12 +138,12 @@ is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
return false;
}
-enum kprobe_insn __kprobes
+enum probe_insn __kprobes
arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
{
- enum kprobe_insn decoded;
- kprobe_opcode_t insn = le32_to_cpu(*addr);
- kprobe_opcode_t *scan_end = NULL;
+ enum probe_insn decoded;
+ probe_opcode_t insn = le32_to_cpu(*addr);
+ probe_opcode_t *scan_end = NULL;
unsigned long size = 0, offset = 0;
/*
@@ -162,7 +162,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
else
scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
}
- decoded = arm_probe_decode_insn(insn, asi);
+ decoded = arm_probe_decode_insn(insn, &asi->api);
if (decoded != INSN_REJECTED && scan_end)
if (is_probed_address_atomic(addr - 1, scan_end))
@@ -170,3 +170,4 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
return decoded;
}
+#endif
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
index d438289646a6..76d3f315407f 100644
--- a/arch/arm64/kernel/probes/decode-insn.h
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -23,13 +23,17 @@
*/
#define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t))
-enum kprobe_insn {
+enum probe_insn {
INSN_REJECTED,
INSN_GOOD_NO_SLOT,
INSN_GOOD,
};
-enum kprobe_insn __kprobes
+#ifdef CONFIG_KPROBES
+enum probe_insn __kprobes
arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi);
+#endif
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *asi);
#endif /* _ARM_KERNEL_KPROBES_ARM64_H */
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index f5077ea7af6d..1decd2b2c730 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -44,31 +44,31 @@ post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
/* prepare insn slot */
- p->ainsn.insn[0] = cpu_to_le32(p->opcode);
+ p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
- flush_icache_range((uintptr_t) (p->ainsn.insn),
- (uintptr_t) (p->ainsn.insn) +
+ flush_icache_range((uintptr_t) (p->ainsn.api.insn),
+ (uintptr_t) (p->ainsn.api.insn) +
MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
/*
* Needs restoring of return address after stepping xol.
*/
- p->ainsn.restore = (unsigned long) p->addr +
+ p->ainsn.api.restore = (unsigned long) p->addr +
sizeof(kprobe_opcode_t);
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
{
/* This instructions is not executed xol. No need to adjust the PC */
- p->ainsn.restore = 0;
+ p->ainsn.api.restore = 0;
}
static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- if (p->ainsn.handler)
- p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
+ if (p->ainsn.api.handler)
+ p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
/* single step simulated, now go for post processing */
post_kprobe_handler(kcb, regs);
@@ -98,18 +98,18 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return -EINVAL;
case INSN_GOOD_NO_SLOT: /* insn need simulation */
- p->ainsn.insn = NULL;
+ p->ainsn.api.insn = NULL;
break;
case INSN_GOOD: /* instruction uses slot */
- p->ainsn.insn = get_insn_slot();
- if (!p->ainsn.insn)
+ p->ainsn.api.insn = get_insn_slot();
+ if (!p->ainsn.api.insn)
return -ENOMEM;
break;
};
/* prepare the instruction */
- if (p->ainsn.insn)
+ if (p->ainsn.api.insn)
arch_prepare_ss_slot(p);
else
arch_prepare_simulate(p);
@@ -142,9 +142,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- if (p->ainsn.insn) {
- free_insn_slot(p->ainsn.insn, 0);
- p->ainsn.insn = NULL;
+ if (p->ainsn.api.insn) {
+ free_insn_slot(p->ainsn.api.insn, 0);
+ p->ainsn.api.insn = NULL;
}
}
@@ -244,9 +244,9 @@ static void __kprobes setup_singlestep(struct kprobe *p,
}
- if (p->ainsn.insn) {
+ if (p->ainsn.api.insn) {
/* prepare for single stepping */
- slot = (unsigned long)p->ainsn.insn;
+ slot = (unsigned long)p->ainsn.api.insn;
set_ss_context(kcb, slot); /* mark pending ss */
@@ -295,8 +295,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
return;
/* return addr restore if non-branching insn */
- if (cur->ainsn.restore != 0)
- instruction_pointer_set(regs, cur->ainsn.restore);
+ if (cur->ainsn.api.restore != 0)
+ instruction_pointer_set(regs, cur->ainsn.api.restore);
/* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) {
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index 8977ce9d009d..357d3efe1366 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -13,28 +13,26 @@
* General Public License for more details.
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include "simulate-insn.h"
-#define sign_extend(x, signbit) \
- ((x) | (0 - ((x) & (1 << (signbit)))))
-
#define bbl_displacement(insn) \
- sign_extend(((insn) & 0x3ffffff) << 2, 27)
+ sign_extend32(((insn) & 0x3ffffff) << 2, 27)
#define bcond_displacement(insn) \
- sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+ sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
#define cbz_displacement(insn) \
- sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+ sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
#define tbz_displacement(insn) \
- sign_extend(((insn >> 5) & 0x3fff) << 2, 15)
+ sign_extend32(((insn >> 5) & 0x3fff) << 2, 15)
#define ldr_displacement(insn) \
- sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+ sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val)
{
@@ -106,7 +104,7 @@ simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs)
xn = opcode & 0x1f;
imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3);
- imm = sign_extend(imm, 20);
+ imm = sign_extend64(imm, 20);
if (opcode & 0x80000000)
val = (imm<<12) + (addr & 0xfffffffffffff000);
else
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
new file mode 100644
index 000000000000..26c998534dca
--- /dev/null
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/highmem.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+#define UPROBE_INV_FAULT_CODE UINT_MAX
+
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ void *xol_page_kaddr = kmap_atomic(page);
+ void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
+
+ /* Initialize the slot */
+ memcpy(dst, src, len);
+
+ /* flush caches (dcache/icache) */
+ sync_icache_aliases(dst, len);
+
+ kunmap_atomic(xol_page_kaddr);
+}
+
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long addr)
+{
+ probe_opcode_t insn;
+
+ /* TODO: Currently we do not support AARCH32 instruction probing */
+ if (test_bit(TIF_32BIT, &mm->context.flags))
+ return -ENOTSUPP;
+ else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
+ return -EINVAL;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+
+ switch (arm_probe_decode_insn(insn, &auprobe->api)) {
+ case INSN_REJECTED:
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT:
+ auprobe->simulate = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /* Initialize with an invalid fault code to detect if ol insn trapped */
+ current->thread.fault_code = UPROBE_INV_FAULT_CODE;
+
+ /* Instruction points to execute ol */
+ instruction_pointer_set(regs, utask->xol_vaddr);
+
+ user_enable_single_step(current);
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
+
+ /* Instruction points to execute next to breakpoint address */
+ instruction_pointer_set(regs, utask->vaddr + 4);
+
+ user_disable_single_step(current);
+
+ return 0;
+}
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ /*
+ * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
+ * insn itself is trapped, then detect the case with the help of
+ * invalid fault code which is being set in arch_uprobe_pre_xol
+ */
+ if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
+ return true;
+
+ return false;
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ probe_opcode_t insn;
+ unsigned long addr;
+
+ if (!auprobe->simulate)
+ return false;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+ addr = instruction_pointer(regs);
+
+ if (auprobe->api.handler)
+ auprobe->api.handler(insn, addr, regs);
+
+ return true;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /*
+ * Task has received a fatal signal, so reset back to probbed
+ * address.
+ */
+ instruction_pointer_set(regs, utask->vaddr);
+
+ user_disable_single_step(current);
+}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ /*
+ * If a simple branch instruction (B) was called for retprobed
+ * assembly label then return true even when regs->sp and ret->stack
+ * are same. It will ensure that cleanup and reporting of return
+ * instances corresponding to callee label is done when
+ * handle_trampoline for called function is executed.
+ */
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return regs->sp <= ret->stack;
+ else
+ return regs->sp < ret->stack;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long orig_ret_vaddr;
+
+ orig_ret_vaddr = procedure_link_pointer(regs);
+ /* Replace the return addr with trampoline addr */
+ procedure_link_pointer_set(regs, trampoline_vaddr);
+
+ return orig_ret_vaddr;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+static int uprobe_breakpoint_handler(struct pt_regs *regs,
+ unsigned int esr)
+{
+ if (user_mode(regs) && uprobe_pre_sstep_notifier(regs))
+ return DBG_HOOK_HANDLED;
+
+ return DBG_HOOK_ERROR;
+}
+
+static int uprobe_single_step_handler(struct pt_regs *regs,
+ unsigned int esr)
+{
+ struct uprobe_task *utask = current->utask;
+
+ if (user_mode(regs)) {
+ WARN_ON(utask &&
+ (instruction_pointer(regs) != utask->xol_vaddr + 4));
+
+ if (uprobe_post_sstep_notifier(regs))
+ return DBG_HOOK_HANDLED;
+ }
+
+ return DBG_HOOK_ERROR;
+}
+
+/* uprobe breakpoint handler hook */
+static struct break_hook uprobes_break_hook = {
+ .esr_mask = BRK64_ESR_MASK,
+ .esr_val = BRK64_ESR_UPROBES,
+ .fn = uprobe_breakpoint_handler,
+};
+
+/* uprobe single step handler hook */
+static struct step_hook uprobes_step_hook = {
+ .fn = uprobe_single_step_handler,
+};
+
+static int __init arch_init_uprobes(void)
+{
+ register_break_hook(&uprobes_break_hook);
+ register_step_hook(&uprobes_step_hook);
+
+ return 0;
+}
+
+device_initcall(arch_init_uprobes);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 01753cd7d3f0..a3a2816ba73a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -45,6 +45,7 @@
#include <linux/personality.h>
#include <linux/notifier.h>
#include <trace/events/power.h>
+#include <linux/percpu.h>
#include <asm/alternative.h>
#include <asm/compat.h>
@@ -282,7 +283,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
- cpus_have_cap(ARM64_HAS_UAO))
+ cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
@@ -322,6 +323,20 @@ void uao_thread_switch(struct task_struct *next)
}
/*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+ *
+ * This is *only* for exception entry from EL0, and is not valid until we
+ * __switch_to() a user task.
+ */
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+static void entry_task_switch(struct task_struct *next)
+{
+ __this_cpu_write(__entry_task, next);
+}
+
+/*
* Thread switching.
*/
struct task_struct *__switch_to(struct task_struct *prev,
@@ -333,6 +348,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
+ entry_task_switch(next);
uao_thread_switch(next);
/*
@@ -350,27 +366,35 @@ struct task_struct *__switch_to(struct task_struct *prev,
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
- unsigned long stack_page;
+ unsigned long stack_page, ret = 0;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
+ stack_page = (unsigned long)try_get_task_stack(p);
+ if (!stack_page)
+ return 0;
+
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = p->curr_ret_stack;
#endif
- stack_page = (unsigned long)task_stack_page(p);
do {
if (frame.sp < stack_page ||
frame.sp >= stack_page + THREAD_SIZE ||
unwind_frame(p, &frame))
- return 0;
- if (!in_sched_functions(frame.pc))
- return frame.pc;
+ goto out;
+ if (!in_sched_functions(frame.pc)) {
+ ret = frame.pc;
+ goto out;
+ }
} while (count ++ < 16);
- return 0;
+
+out:
+ put_task_stack(p);
+ return ret;
}
unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index e0c81da60f76..fc35e06ccaac 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -327,13 +327,13 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
- int err, len, type, disabled = !ctrl.enabled;
+ int err, len, type, offset, disabled = !ctrl.enabled;
attr->disabled = disabled;
if (disabled)
return 0;
- err = arch_bp_generic_fields(ctrl, &len, &type);
+ err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
if (err)
return err;
@@ -352,6 +352,7 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
attr->bp_len = len;
attr->bp_type = type;
+ attr->bp_addr += offset;
return 0;
}
@@ -404,7 +405,7 @@ static int ptrace_hbp_get_addr(unsigned int note_type,
if (IS_ERR(bp))
return PTR_ERR(bp);
- *addr = bp ? bp->attr.bp_addr : 0;
+ *addr = bp ? counter_arch_bp(bp)->address : 0;
return 0;
}
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 1718706fde83..12a87f2600f2 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
struct return_address_data {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f534f492a268..a53f52ac81c6 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -291,6 +291,15 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus();
smp_build_mpidr_hash();
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ /*
+ * Make sure init_thread_info.ttbr0 always generates translation
+ * faults in case uaccess_enable() is inadvertently called by the init
+ * thread.
+ */
+ init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+#endif
+
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 404dd67080b9..c7b6de62f9d3 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -414,6 +414,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
} else {
local_irq_enable();
+ if (thread_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
if (thread_flags & _TIF_SIGPENDING)
do_signal(regs);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 1bec41b5fda3..df67652e46f0 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -125,9 +125,6 @@ ENTRY(_cpu_resume)
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
mov sp, x2
- /* save thread_info */
- and x2, x2, #~(THREAD_SIZE - 1)
- msr sp_el0, x2
/*
* cpu_do_resume expects x0 to contain context address pointer
*/
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 8507703dabe4..cb87234cfcf2 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -58,6 +58,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -146,6 +149,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* We need to tell the secondary core where to find its stack and the
* page tables.
*/
+ secondary_data.task = idle;
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
update_cpu_boot_status(CPU_MMU_OFF);
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
@@ -170,6 +174,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
}
+ secondary_data.task = NULL;
secondary_data.stack = NULL;
status = READ_ONCE(secondary_data.status);
if (ret && status) {
@@ -208,7 +213,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
asmlinkage void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ cpu = task_cpu(current);
+ set_my_cpu_offset(per_cpu_offset(cpu));
/*
* All kernel threads share the same mm context; grab a
@@ -217,8 +225,6 @@ asmlinkage void secondary_start_kernel(void)
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
@@ -718,6 +724,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
*/
for_each_possible_cpu(cpu) {
+ per_cpu(cpu_number, cpu) = cpu;
+
if (cpu == smp_processor_id())
continue;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c2efddfca18c..8a552a33c6ef 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -22,6 +22,7 @@
#include <linux/stacktrace.h>
#include <asm/irq.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
/*
@@ -128,7 +129,6 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
break;
}
}
-EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
@@ -181,6 +181,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
struct stack_trace_data data;
struct stackframe frame;
+ if (!try_get_task_stack(tsk))
+ return;
+
data.trace = trace;
data.skip = trace->skip;
@@ -202,6 +205,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
walk_stackframe(tsk, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
+
+ put_task_stack(tsk);
}
void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index bb0cd787a9d3..1e3be9064cfa 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -47,12 +47,6 @@ void notrace __cpu_suspend_exit(void)
cpu_uninstall_idmap();
/*
- * Restore per-cpu offset before any kernel
- * subsystem relying on it has a chance to run.
- */
- set_my_cpu_offset(per_cpu_offset(cpu));
-
- /*
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
*/
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 694f6deedbab..23e9e13bd2aa 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -19,10 +19,226 @@
#include <linux/nodemask.h>
#include <linux/of.h>
#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/cpufreq.h>
+#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/topology.h>
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_MUTEX(cpu_scale_mutex);
+
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
+#ifdef CONFIG_PROC_SYSCTL
+static ssize_t cpu_capacity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+ return sprintf(buf, "%lu\n",
+ arch_scale_cpu_capacity(NULL, cpu->dev.id));
+}
+
+static ssize_t cpu_capacity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int this_cpu = cpu->dev.id, i;
+ unsigned long new_capacity;
+ ssize_t ret;
+
+ if (count) {
+ ret = kstrtoul(buf, 0, &new_capacity);
+ if (ret)
+ return ret;
+ if (new_capacity > SCHED_CAPACITY_SCALE)
+ return -EINVAL;
+
+ mutex_lock(&cpu_scale_mutex);
+ for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
+ set_capacity_scale(i, new_capacity);
+ mutex_unlock(&cpu_scale_mutex);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(cpu_capacity);
+
+static int register_cpu_capacity_sysctl(void)
+{
+ int i;
+ struct device *cpu;
+
+ for_each_possible_cpu(i) {
+ cpu = get_cpu_device(i);
+ if (!cpu) {
+ pr_err("%s: too early to get CPU%d device!\n",
+ __func__, i);
+ continue;
+ }
+ device_create_file(cpu, &dev_attr_cpu_capacity);
+ }
+
+ return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
+#endif
+
+static u32 capacity_scale;
+static u32 *raw_capacity;
+static bool cap_parsing_failed;
+
+static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+{
+ int ret;
+ u32 cpu_capacity;
+
+ if (cap_parsing_failed)
+ return;
+
+ ret = of_property_read_u32(cpu_node,
+ "capacity-dmips-mhz",
+ &cpu_capacity);
+ if (!ret) {
+ if (!raw_capacity) {
+ raw_capacity = kcalloc(num_possible_cpus(),
+ sizeof(*raw_capacity),
+ GFP_KERNEL);
+ if (!raw_capacity) {
+ pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
+ cap_parsing_failed = true;
+ return;
+ }
+ }
+ capacity_scale = max(cpu_capacity, capacity_scale);
+ raw_capacity[cpu] = cpu_capacity;
+ pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
+ cpu_node->full_name, raw_capacity[cpu]);
+ } else {
+ if (raw_capacity) {
+ pr_err("cpu_capacity: missing %s raw capacity\n",
+ cpu_node->full_name);
+ pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+ }
+ cap_parsing_failed = true;
+ kfree(raw_capacity);
+ }
+}
+
+static void normalize_cpu_capacity(void)
+{
+ u64 capacity;
+ int cpu;
+
+ if (!raw_capacity || cap_parsing_failed)
+ return;
+
+ pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+ mutex_lock(&cpu_scale_mutex);
+ for_each_possible_cpu(cpu) {
+ pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
+ cpu, raw_capacity[cpu]);
+ capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
+ / capacity_scale;
+ set_capacity_scale(cpu, capacity);
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+ cpu, arch_scale_cpu_capacity(NULL, cpu));
+ }
+ mutex_unlock(&cpu_scale_mutex);
+}
+
+#ifdef CONFIG_CPU_FREQ
+static cpumask_var_t cpus_to_visit;
+static bool cap_parsing_done;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+
+static int
+init_cpu_capacity_callback(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ struct cpufreq_policy *policy = data;
+ int cpu;
+
+ if (cap_parsing_failed || cap_parsing_done)
+ return 0;
+
+ switch (val) {
+ case CPUFREQ_NOTIFY:
+ pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+ cpumask_pr_args(policy->related_cpus),
+ cpumask_pr_args(cpus_to_visit));
+ cpumask_andnot(cpus_to_visit,
+ cpus_to_visit,
+ policy->related_cpus);
+ for_each_cpu(cpu, policy->related_cpus) {
+ raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
+ policy->cpuinfo.max_freq / 1000UL;
+ capacity_scale = max(raw_capacity[cpu], capacity_scale);
+ }
+ if (cpumask_empty(cpus_to_visit)) {
+ normalize_cpu_capacity();
+ kfree(raw_capacity);
+ pr_debug("cpu_capacity: parsing done\n");
+ cap_parsing_done = true;
+ schedule_work(&parsing_done_work);
+ }
+ }
+ return 0;
+}
+
+static struct notifier_block init_cpu_capacity_notifier = {
+ .notifier_call = init_cpu_capacity_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+ if (cap_parsing_failed)
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
+ pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
+ return -ENOMEM;
+ }
+ cpumask_copy(cpus_to_visit, cpu_possible_mask);
+
+ return cpufreq_register_notifier(&init_cpu_capacity_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+static void parsing_done_workfn(struct work_struct *work)
+{
+ cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+}
+
+#else
+static int __init free_raw_capacity(void)
+{
+ kfree(raw_capacity);
+
+ return 0;
+}
+core_initcall(free_raw_capacity);
+#endif
+
static int __init get_cpu_for_node(struct device_node *node)
{
struct device_node *cpu_node;
@@ -34,6 +250,7 @@ static int __init get_cpu_for_node(struct device_node *node)
for_each_possible_cpu(cpu) {
if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ parse_cpu_capacity(cpu_node, cpu);
of_node_put(cpu_node);
return cpu;
}
@@ -178,13 +395,17 @@ static int __init parse_dt_topology(void)
* cluster with restricted subnodes.
*/
map = of_get_child_by_name(cn, "cpu-map");
- if (!map)
+ if (!map) {
+ cap_parsing_failed = true;
goto out;
+ }
ret = parse_cluster(map, 0);
if (ret != 0)
goto out_map;
+ normalize_cpu_capacity();
+
/*
* Check that all cores are in the topology; the SMP code will
* only mark cores described in the DT as possible.
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c9986b3e0a96..5b830be79c01 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/traps.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
#include <asm/system_misc.h>
@@ -147,6 +148,9 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
if (!tsk)
tsk = current;
+ if (!try_get_task_stack(tsk))
+ return;
+
/*
* Switching between stacks is valid when tracing current and in
* non-preemptible context.
@@ -212,6 +216,8 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
stack + sizeof(struct pt_regs));
}
}
+
+ put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
@@ -227,10 +233,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#endif
#define S_SMP " SMP"
-static int __die(const char *str, int err, struct thread_info *thread,
- struct pt_regs *regs)
+static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = thread->task;
+ struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -245,7 +250,8 @@ static int __die(const char *str, int err, struct thread_info *thread,
print_modules();
__show_regs(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+ end_of_stack(tsk));
if (!user_mode(regs)) {
dump_mem(KERN_EMERG, "Stack: ", regs->sp,
@@ -264,7 +270,6 @@ static DEFINE_RAW_SPINLOCK(die_lock);
*/
void die(const char *str, struct pt_regs *regs, int err)
{
- struct thread_info *thread = current_thread_info();
int ret;
oops_enter();
@@ -272,9 +277,9 @@ void die(const char *str, struct pt_regs *regs, int err)
raw_spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
- ret = __die(str, err, thread, regs);
+ ret = __die(str, err, regs);
- if (regs && kexec_should_crash(thread->task))
+ if (regs && kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
@@ -435,9 +440,10 @@ int cpu_enable_cache_maint_trap(void *__unused)
}
#define __user_cache_maint(insn, address, res) \
- if (untagged_addr(address) >= user_addr_max()) \
+ if (untagged_addr(address) >= user_addr_max()) { \
res = -EFAULT; \
- else \
+ } else { \
+ uaccess_ttbr0_enable(); \
asm volatile ( \
"1: " insn ", %1\n" \
" mov %w0, #0\n" \
@@ -449,7 +455,9 @@ int cpu_enable_cache_maint_trap(void *__unused)
" .popsection\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r" (res) \
- : "r" (address), "i" (-EFAULT) )
+ : "r" (address), "i" (-EFAULT)); \
+ uaccess_ttbr0_disable(); \
+ }
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 1105aab1e6d6..b8deffa9e1bf 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -216,6 +216,11 @@ SECTIONS
swapper_pg_dir = .;
. += SWAPPER_DIR_SIZE;
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ reserved_ttbr0 = .;
+ . += RESERVED_TTBR0_SIZE;
+#endif
+
_end = .;
STABS_DEBUG
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 6eaf12c1d627..52cb7ad9b2fd 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -16,9 +16,6 @@ menuconfig VIRTUALIZATION
if VIRTUALIZATION
-config KVM_ARM_VGIC_V3_ITS
- bool
-
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on OF
@@ -34,7 +31,6 @@ config KVM
select KVM_VFIO
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
- select KVM_ARM_VGIC_V3_ITS
select KVM_ARM_PMU if HW_PERF_EVENTS
select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index a204adf29f0a..1bfe30dfbfe7 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -57,6 +57,16 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+/*
+ * Guest access to FP/ASIMD registers are routed to this handler only
+ * when the system doesn't support FP/ASIMD.
+ */
+static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
/**
* kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
* instruction executed by a guest
@@ -144,6 +154,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
+ [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 4e92399f7105..5e9052f087f2 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -106,9 +106,16 @@ el1_trap:
* x0: ESR_EC
*/
- /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+ /*
+ * We trap the first access to the FP/SIMD to save the host context
+ * and restore the guest context lazily.
+ * If FP/SIMD is not implemented, handle the trap and inject an
+ * undefined instruction exception to the guest.
+ */
+alternative_if_not ARM64_HAS_NO_FPSIMD
cmp x0, #ESR_ELx_EC_FP_ASIMD
b.eq __fpsimd_guest_restore
+alternative_else_nop_endif
mrs x1, tpidr_el2
mov x0, #ARM_EXCEPTION_TRAP
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 83037cd62d01..75e83dd40d43 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -21,6 +21,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/fpsimd.h>
static bool __hyp_text __fpsimd_enabled_nvhe(void)
{
@@ -76,16 +77,24 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
* traps are only taken to EL2 if the operation would not otherwise
* trap to EL1. Therefore, always make sure that for 32-bit guests,
* we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+ * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
+ * it will cause an exception.
*/
val = vcpu->arch.hcr_el2;
- if (!(val & HCR_RW)) {
+ if (!(val & HCR_RW) && system_supports_fpsimd()) {
write_sysreg(1 << 30, fpexc32_el2);
isb();
}
write_sysreg(val, hcr_el2);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
- /* Make sure we trap PMU access from EL0 to EL2 */
+ /*
+ * Make sure we trap PMU access from EL0 to EL2. Also sanitize
+ * PMSELR_EL0 to make sure it never contains the cycle
+ * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
+ * EL1 instead of being trapped to EL2.
+ */
+ write_sysreg(0, pmselr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
__activate_traps_arch()();
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 9cc0ea784ae6..88e2f2b938f0 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
write_sysreg(0, vttbr_el2);
}
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+ /* Switch to requested VMID */
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+
+ asm volatile("tlbi vmalle1" : : );
+ dsb(nsh);
+ isb();
+
+ write_sysreg(0, vttbr_el2);
+}
+
void __hyp_text __kvm_flush_vm_context(void)
{
dsb(ishst);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 5bc460884639..e95d4f68bf54 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -86,12 +86,6 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_VCPU_ATTRIBUTES:
r = 1;
break;
- case KVM_CAP_MSI_DEVID:
- if (!kvm)
- r = -EINVAL;
- else
- r = kvm->arch.vgic.msis_require_devid;
- break;
default:
r = 0;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f302fdb3a030..87e7e6608cd8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -597,8 +597,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
idx = ARMV8_PMU_CYCLE_IDX;
} else {
- BUG();
+ return false;
}
+ } else if (r->CRn == 0 && r->CRm == 9) {
+ /* PMCCNTR */
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+ return false;
+
+ idx = ARMV8_PMU_CYCLE_IDX;
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
/* PMEVCNTRn_EL0 */
if (pmu_access_event_counter_el0_disabled(vcpu))
@@ -606,7 +612,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
} else {
- BUG();
+ return false;
}
if (!pmu_counter_idx_valid(vcpu, idx))
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 5d1cad3ce6d6..d7150e30438a 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,10 +17,7 @@
*/
#include <linux/linkage.h>
-#include <asm/alternative.h>
-#include <asm/assembler.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
.text
@@ -33,8 +30,7 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_enable_not_uao x2, x3
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
@@ -54,8 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_disable_not_uao x2
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 4fd67ea03bb0..cfe13396085b 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -16,11 +16,8 @@
#include <linux/linkage.h>
-#include <asm/alternative.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
/*
* Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -67,12 +64,10 @@
end .req x5
ENTRY(__arch_copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_enable_not_uao x3, x4
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_disable_not_uao x3
mov x0, #0 // Nothing to copy
ret
ENDPROC(__arch_copy_from_user)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index f7292dd08c84..718b1c4e2f85 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -18,11 +18,8 @@
#include <linux/linkage.h>
-#include <asm/alternative.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
/*
* Copy from user space to user space (alignment handled by the hardware)
@@ -68,12 +65,10 @@
end .req x5
ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_enable_not_uao x3, x4
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_disable_not_uao x3
mov x0, #0
ret
ENDPROC(__copy_in_user)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 7a7efe255034..e99e31c9acac 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -16,11 +16,8 @@
#include <linux/linkage.h>
-#include <asm/alternative.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
/*
* Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -66,12 +63,10 @@
end .req x5
ENTRY(__arch_copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_enable_not_uao x3, x4
add end, x0, x2
#include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
- CONFIG_ARM64_PAN)
+ uaccess_disable_not_uao x3
mov x0, #0
ret
ENDPROC(__arch_copy_to_user)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 54bb209cae8e..e703fb9defad 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -3,7 +3,8 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_ARM64_PTDUMP) += dump.o
+obj-$(CONFIG_ARM64_PTDUMP_CORE) += dump.o
+obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS) += ptdump_debugfs.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 58b5a906ff78..da9576932322 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -23,6 +23,7 @@
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
+#include <asm/uaccess.h>
/*
* flush_icache_range(start,end)
@@ -48,6 +49,7 @@ ENTRY(flush_icache_range)
* - end - virtual end address of region
*/
ENTRY(__flush_cache_user_range)
+ uaccess_ttbr0_enable x2, x3
dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x0, x3
@@ -69,10 +71,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
dsb ish
isb
mov x0, #0
+1:
+ uaccess_ttbr0_disable x1
ret
9:
mov x0, #-EFAULT
- ret
+ b 1b
ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index efcf1f7ef1e4..4c63cb154859 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -221,7 +221,12 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
- cpu_switch_mm(mm->pgd, mm);
+ /*
+ * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
+ * emulating PAN.
+ */
+ if (!system_uses_ttbr0_pan())
+ cpu_switch_mm(mm->pgd, mm);
}
static int asids_init(void)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 3f74d0d98de6..aa6c8f834d9e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -938,11 +938,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_teardown_dma_ops(struct device *dev)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-
- if (WARN_ON(domain))
- iommu_detach_device(domain, dev);
-
dev->archdata.dma_ops = NULL;
}
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 9c3e75df2180..ca74a2aace42 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -50,6 +50,18 @@ static const struct addr_marker address_markers[] = {
{ -1, NULL },
};
+#define pt_dump_seq_printf(m, fmt, args...) \
+({ \
+ if (m) \
+ seq_printf(m, fmt, ##args); \
+})
+
+#define pt_dump_seq_puts(m, fmt) \
+({ \
+ if (m) \
+ seq_printf(m, fmt); \
+})
+
/*
* The page dumper groups page table entries of the same type into a single
* description. It uses pg_state to track the range information while
@@ -62,6 +74,9 @@ struct pg_state {
unsigned long start_address;
unsigned level;
u64 current_prot;
+ bool check_wx;
+ unsigned long wx_pages;
+ unsigned long uxn_pages;
};
struct prot_bits {
@@ -186,10 +201,39 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
s = bits->clear;
if (s)
- seq_printf(st->seq, " %s", s);
+ pt_dump_seq_printf(st->seq, " %s", s);
}
}
+static void note_prot_uxn(struct pg_state *st, unsigned long addr)
+{
+ if (!st->check_wx)
+ return;
+
+ if ((st->current_prot & PTE_UXN) == PTE_UXN)
+ return;
+
+ WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
+ (void *)st->start_address, (void *)st->start_address);
+
+ st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
+}
+
+static void note_prot_wx(struct pg_state *st, unsigned long addr)
+{
+ if (!st->check_wx)
+ return;
+ if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
+ return;
+ if ((st->current_prot & PTE_PXN) == PTE_PXN)
+ return;
+
+ WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
+ (void *)st->start_address, (void *)st->start_address);
+
+ st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
+}
+
static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
u64 val)
{
@@ -200,14 +244,16 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
st->level = level;
st->current_prot = prot;
st->start_address = addr;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
const char *unit = units;
unsigned long delta;
if (st->current_prot) {
- seq_printf(st->seq, "0x%016lx-0x%016lx ",
+ note_prot_uxn(st, addr);
+ note_prot_wx(st, addr);
+ pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
st->start_address, addr);
delta = (addr - st->start_address) >> 10;
@@ -215,17 +261,17 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
delta >>= 10;
unit++;
}
- seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+ pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
pg_level[st->level].name);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits,
pg_level[st->level].num);
- seq_puts(st->seq, "\n");
+ pt_dump_seq_puts(st->seq, "\n");
}
if (addr >= st->marker[1].start_address) {
st->marker++;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
@@ -235,7 +281,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
if (addr >= st->marker[1].start_address) {
st->marker++;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
}
}
@@ -304,9 +350,8 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
}
}
-static int ptdump_show(struct seq_file *m, void *v)
+void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
{
- struct ptdump_info *info = m->private;
struct pg_state st = {
.seq = m,
.marker = info->markers,
@@ -315,33 +360,16 @@ static int ptdump_show(struct seq_file *m, void *v)
walk_pgd(&st, info->mm, info->base_addr);
note_page(&st, 0, 0, 0);
- return 0;
}
-static int ptdump_open(struct inode *inode, struct file *file)
+static void ptdump_initialize(void)
{
- return single_open(file, ptdump_show, inode->i_private);
-}
-
-static const struct file_operations ptdump_fops = {
- .open = ptdump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-int ptdump_register(struct ptdump_info *info, const char *name)
-{
- struct dentry *pe;
unsigned i, j;
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
if (pg_level[i].bits)
for (j = 0; j < pg_level[i].num; j++)
pg_level[i].mask |= pg_level[i].bits[j].mask;
-
- pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
- return pe ? 0 : -ENOMEM;
}
static struct ptdump_info kernel_ptdump_info = {
@@ -350,8 +378,30 @@ static struct ptdump_info kernel_ptdump_info = {
.base_addr = VA_START,
};
+void ptdump_check_wx(void)
+{
+ struct pg_state st = {
+ .seq = NULL,
+ .marker = (struct addr_marker[]) {
+ { 0, NULL},
+ { -1, NULL},
+ },
+ .check_wx = true,
+ };
+
+ walk_pgd(&st, &init_mm, 0);
+ note_page(&st, 0, 0, 0);
+ if (st.wx_pages || st.uxn_pages)
+ pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
+ st.wx_pages, st.uxn_pages);
+ else
+ pr_info("Checked W+X mappings: passed, no W+X pages found\n");
+}
+
static int ptdump_init(void)
{
- return ptdump_register(&kernel_ptdump_info, "kernel_page_tables");
+ ptdump_initialize();
+ return ptdump_debugfs_register(&kernel_ptdump_info,
+ "kernel_page_tables");
}
device_initcall(ptdump_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0f8788374815..a78a5c401806 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -269,13 +269,19 @@ out:
return fault;
}
-static inline bool is_permission_fault(unsigned int esr)
+static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs)
{
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
- return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
- (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+ if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+ return false;
+
+ if (system_uses_ttbr0_pan())
+ return fsc_type == ESR_ELx_FSC_FAULT &&
+ (regs->pstate & PSR_PAN_BIT);
+ else
+ return fsc_type == ESR_ELx_FSC_PERM;
}
static bool is_el0_instruction_abort(unsigned int esr)
@@ -315,7 +321,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (is_permission_fault(esr) && (addr < USER_DS)) {
+ if (addr < USER_DS && is_permission_fault(esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
@@ -507,10 +513,10 @@ static const struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
- { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
- { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
- { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
- { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 8377329d8c97..554a2558c12e 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -25,14 +25,7 @@
#include <asm/cachetype.h>
#include <asm/tlbflush.h>
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- if (vma->vm_flags & VM_EXEC)
- __flush_icache_all();
-}
-
-static void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(void *kaddr, unsigned long len)
{
unsigned long addr = (unsigned long)kaddr;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 2e49bd252fe7..964b7549af5c 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -51,20 +51,8 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
*pgsize = PAGE_SIZE;
if (!pte_cont(pte))
return 1;
- if (!pgd_present(*pgd)) {
- VM_BUG_ON(!pgd_present(*pgd));
- return 1;
- }
pud = pud_offset(pgd, addr);
- if (!pud_present(*pud)) {
- VM_BUG_ON(!pud_present(*pud));
- return 1;
- }
pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- VM_BUG_ON(!pmd_present(*pmd));
- return 1;
- }
if ((pte_t *)pmd == ptep) {
*pgsize = PMD_SIZE;
return CONT_PMDS;
@@ -212,7 +200,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
/* save the 1st pte to return */
pte = ptep_get_and_clear(mm, addr, cpte);
- for (i = 1; i < ncontig; ++i) {
+ for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
/*
* If HW_AFDBM is enabled, then the HW could
* turn on the dirty bit for any of the page
@@ -250,7 +238,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pfn = pte_pfn(*cpte);
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
*cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte) {
+ for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
changed = ptep_set_access_flags(vma, addr, cpte,
pfn_pte(pfn,
hugeprot),
@@ -273,7 +261,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
cpte = huge_pte_offset(mm, addr);
ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
+ for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
ptep_set_wrprotect(mm, addr, cpte);
} else {
ptep_set_wrprotect(mm, addr, ptep);
@@ -291,7 +279,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
cpte = huge_pte_offset(vma->vm_mm, addr);
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
*cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
+ for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
ptep_clear_flush(vma, addr, cpte);
} else {
ptep_clear_flush(vma, addr, ptep);
@@ -323,7 +311,7 @@ __setup("hugepagesz=", setup_hugepagesz);
static __init int add_default_hugepagesz(void)
{
if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
- hugetlb_add_hstate(CONT_PMD_SHIFT);
+ hugetlb_add_hstate(CONT_PTE_SHIFT);
return 0;
}
arch_initcall(add_default_hugepagesz);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 05615a3fdc6f..17243e43184e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -28,8 +28,6 @@
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/stop_machine.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -42,6 +40,7 @@
#include <asm/tlb.h>
#include <asm/memblock.h>
#include <asm/mmu_context.h>
+#include <asm/ptdump.h>
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
@@ -95,11 +94,24 @@ static phys_addr_t __init early_pgtable_alloc(void)
return phys;
}
+static bool pgattr_change_is_safe(u64 old, u64 new)
+{
+ /*
+ * The following mapping attributes may be updated in live
+ * kernel mappings without the need for break-before-make.
+ */
+ static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
+
+ return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0;
+}
+
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+ phys_addr_t (*pgtable_alloc)(void),
+ bool page_mappings_only)
{
+ pgprot_t __prot = prot;
pte_t *pte;
BUG_ON(pmd_sect(*pmd));
@@ -115,8 +127,28 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_set_fixmap_offset(pmd, addr);
do {
- set_pte(pte, pfn_pte(pfn, prot));
+ pte_t old_pte = *pte;
+
+ /*
+ * Set the contiguous bit for the subsequent group of PTEs if
+ * its size and alignment are appropriate.
+ */
+ if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) {
+ if (end - addr >= CONT_PTE_SIZE && !page_mappings_only)
+ __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+ else
+ __prot = prot;
+ }
+
+ set_pte(pte, pfn_pte(pfn, __prot));
pfn++;
+
+ /*
+ * After the PTE entry has been populated once, we
+ * only allow updates to the permission attributes.
+ */
+ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
+
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_clear_fixmap();
@@ -125,8 +157,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void),
- bool allow_block_mappings)
+ bool page_mappings_only)
{
+ pgprot_t __prot = prot;
pmd_t *pmd;
unsigned long next;
@@ -146,27 +179,39 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pmd = pmd_set_fixmap_offset(pud, addr);
do {
+ pmd_t old_pmd = *pmd;
+
next = pmd_addr_end(addr, end);
+
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
- allow_block_mappings) {
- pmd_t old_pmd =*pmd;
- pmd_set_huge(pmd, phys, prot);
+ !page_mappings_only) {
/*
- * Check for previous table entries created during
- * boot (__create_page_tables) and flush them.
+ * Set the contiguous bit for the subsequent group of
+ * PMDs if its size and alignment are appropriate.
*/
- if (!pmd_none(old_pmd)) {
- flush_tlb_all();
- if (pmd_table(old_pmd)) {
- phys_addr_t table = pmd_page_paddr(old_pmd);
- if (!WARN_ON_ONCE(slab_is_available()))
- memblock_free(table, PAGE_SIZE);
- }
+ if (((addr | phys) & ~CONT_PMD_MASK) == 0) {
+ if (end - addr >= CONT_PMD_SIZE)
+ __prot = __pgprot(pgprot_val(prot) |
+ PTE_CONT);
+ else
+ __prot = prot;
}
+ pmd_set_huge(pmd, phys, __prot);
+
+ /*
+ * After the PMD entry has been populated once, we
+ * only allow updates to the permission attributes.
+ */
+ BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
+ pmd_val(*pmd)));
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot, pgtable_alloc);
+ prot, pgtable_alloc,
+ page_mappings_only);
+
+ BUG_ON(pmd_val(old_pmd) != 0 &&
+ pmd_val(old_pmd) != pmd_val(*pmd));
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
@@ -189,7 +234,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void),
- bool allow_block_mappings)
+ bool page_mappings_only)
{
pud_t *pud;
unsigned long next;
@@ -204,33 +249,28 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
pud = pud_set_fixmap_offset(pgd, addr);
do {
+ pud_t old_pud = *pud;
+
next = pud_addr_end(addr, end);
/*
* For 4K granule only, attempt to put down a 1GB block
*/
- if (use_1G_block(addr, next, phys) && allow_block_mappings) {
- pud_t old_pud = *pud;
+ if (use_1G_block(addr, next, phys) && !page_mappings_only) {
pud_set_huge(pud, phys, prot);
/*
- * If we have an old value for a pud, it will
- * be pointing to a pmd table that we no longer
- * need (from swapper_pg_dir).
- *
- * Look up the old pmd table and free it.
+ * After the PUD entry has been populated once, we
+ * only allow updates to the permission attributes.
*/
- if (!pud_none(old_pud)) {
- flush_tlb_all();
- if (pud_table(old_pud)) {
- phys_addr_t table = pud_page_paddr(old_pud);
- if (!WARN_ON_ONCE(slab_is_available()))
- memblock_free(table, PAGE_SIZE);
- }
- }
+ BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
+ pud_val(*pud)));
} else {
alloc_init_pmd(pud, addr, next, phys, prot,
- pgtable_alloc, allow_block_mappings);
+ pgtable_alloc, page_mappings_only);
+
+ BUG_ON(pud_val(old_pud) != 0 &&
+ pud_val(old_pud) != pud_val(*pud));
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
@@ -242,7 +282,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void),
- bool allow_block_mappings)
+ bool page_mappings_only)
{
unsigned long addr, length, end, next;
pgd_t *pgd = pgd_offset_raw(pgdir, virt);
@@ -262,7 +302,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
do {
next = pgd_addr_end(addr, end);
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
- allow_block_mappings);
+ page_mappings_only);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
@@ -291,17 +331,17 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
&phys, virt);
return;
}
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
+ __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false);
}
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
- pgprot_t prot, bool allow_block_mappings)
+ pgprot_t prot, bool page_mappings_only)
{
BUG_ON(mm == &init_mm);
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
- pgd_pgtable_alloc, allow_block_mappings);
+ pgd_pgtable_alloc, page_mappings_only);
}
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -314,7 +354,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
}
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
- NULL, !debug_pagealloc_enabled());
+ NULL, debug_pagealloc_enabled());
}
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
@@ -332,7 +372,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
__create_pgd_mapping(pgd, start, __phys_to_virt(start),
end - start, PAGE_KERNEL,
early_pgtable_alloc,
- !debug_pagealloc_enabled());
+ debug_pagealloc_enabled());
return;
}
@@ -345,13 +385,13 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
__phys_to_virt(start),
kernel_start - start, PAGE_KERNEL,
early_pgtable_alloc,
- !debug_pagealloc_enabled());
+ debug_pagealloc_enabled());
if (kernel_end < end)
__create_pgd_mapping(pgd, kernel_end,
__phys_to_virt(kernel_end),
end - kernel_end, PAGE_KERNEL,
early_pgtable_alloc,
- !debug_pagealloc_enabled());
+ debug_pagealloc_enabled());
/*
* Map the linear alias of the [_text, __init_begin) interval as
@@ -361,7 +401,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
*/
__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
kernel_end - kernel_start, PAGE_KERNEL_RO,
- early_pgtable_alloc, !debug_pagealloc_enabled());
+ early_pgtable_alloc, debug_pagealloc_enabled());
}
static void __init map_mem(pgd_t *pgd)
@@ -396,6 +436,11 @@ void mark_rodata_ro(void)
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
section_size, PAGE_KERNEL_RO);
+
+ /* flush the TLBs after updating live kernel mappings */
+ flush_tlb_all();
+
+ debug_checkwx();
}
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
@@ -408,7 +453,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(size));
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc, !debug_pagealloc_enabled());
+ early_pgtable_alloc, debug_pagealloc_enabled());
vma->addr = va_start;
vma->phys_addr = pa_start;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 352c73b6a59e..32682be978e0 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -70,11 +70,14 @@ ENTRY(cpu_do_suspend)
mrs x8, mdscr_el1
mrs x9, oslsr_el1
mrs x10, sctlr_el1
+ mrs x11, tpidr_el1
+ mrs x12, sp_el0
stp x2, x3, [x0]
stp x4, xzr, [x0, #16]
stp x5, x6, [x0, #32]
stp x7, x8, [x0, #48]
stp x9, x10, [x0, #64]
+ stp x11, x12, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@@ -90,6 +93,7 @@ ENTRY(cpu_do_resume)
ldp x6, x8, [x0, #32]
ldp x9, x10, [x0, #48]
ldp x11, x12, [x0, #64]
+ ldp x13, x14, [x0, #80]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@@ -112,6 +116,8 @@ ENTRY(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
+ msr tpidr_el1, x13
+ msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
*/
@@ -136,11 +142,7 @@ ENTRY(cpu_do_switch_mm)
bfi x0, x1, #48, #16 // set the ASID
msr ttbr0_el1, x0 // set TTBR0
isb
-alternative_if ARM64_WORKAROUND_CAVIUM_27456
- ic iallu
- dsb nsh
- isb
-alternative_else_nop_endif
+ post_ttbr0_update_workaround
ret
ENDPROC(cpu_do_switch_mm)
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
new file mode 100644
index 000000000000..eee4d864350c
--- /dev/null
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -0,0 +1,31 @@
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/ptdump.h>
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ struct ptdump_info *info = m->private;
+ ptdump_walk_pgd(m, info);
+ return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ptdump_show, inode->i_private);
+}
+
+static const struct file_operations ptdump_fops = {
+ .open = ptdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+{
+ struct dentry *pe;
+ pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
+ return pe ? 0 : -ENOMEM;
+
+}
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 329c8027b0a9..b41aff25426d 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -49,6 +49,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/uaccess.h>
#include <xen/interface/xen.h>
@@ -91,6 +92,20 @@ ENTRY(privcmd_call)
mov x2, x3
mov x3, x4
mov x4, x5
+ /*
+ * Privcmd calls are issued by the userspace. The kernel needs to
+ * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
+ * translations to user memory via AT instructions. Since AT
+ * instructions are not affected by the PAN bit (ARMv8.1), we only
+ * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
+ * is enabled (it implies that hardware UAO and PAN disabled).
+ */
+ uaccess_ttbr0_enable x6, x7
hvc XEN_IMM
+
+ /*
+ * Disable userspace access from kernel once the hyp call completed.
+ */
+ uaccess_ttbr0_disable x6
ret
ENDPROC(privcmd_call);
diff --git a/arch/avr32/include/asm/mutex.h b/arch/avr32/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/avr32/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/avr32/include/asm/processor.h b/arch/avr32/include/asm/processor.h
index 941593c7d9f3..972adcc1e8f4 100644
--- a/arch/avr32/include/asm/processor.h
+++ b/arch/avr32/include/asm/processor.h
@@ -92,7 +92,6 @@ extern struct avr32_cpuinfo boot_cpu_data;
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
struct cpu_context {
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 1fd147f09a38..5a650426f357 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _UAPI__ASM_AVR32_SOCKET_H */
diff --git a/arch/avr32/include/uapi/asm/unistd.h b/arch/avr32/include/uapi/asm/unistd.h
index 2c8a0d2b6c30..236505d889d0 100644
--- a/arch/avr32/include/uapi/asm/unistd.h
+++ b/arch/avr32/include/uapi/asm/unistd.h
@@ -340,5 +340,8 @@
#define __NR_copy_file_range 325
#define __NR_preadv2 326
#define __NR_pwritev2 327
+#define __NR_pkey_mprotect 328
+#define __NR_pkey_alloc 329
+#define __NR_pkey_free 330
#endif /* _UAPI__ASM_AVR32_UNISTD_H */
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index 7b348ba70e41..774ce57f4948 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -341,4 +341,7 @@ sys_call_table:
.long __sys_copy_file_range
.long __sys_preadv2
.long __sys_pwritev2
+ .long sys_pkey_mprotect
+ .long sys_pkey_alloc
+ .long sys_pkey_free /* 330 */
.long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 52c179bec0cc..fdf1caecb7b9 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -242,7 +242,7 @@ dump_clock(struct clk *parent, struct clkinf *r)
clk_get_rate(parent));
if (parent->dev)
seq_printf(r->s, ", for %s", dev_name(parent->dev));
- seq_printf(r->s, "\n");
+ seq_putc(r->s, '\n');
/* cost of this scan is small, but not linear... */
r->nest = nest + NEST_DELTA;
@@ -261,23 +261,32 @@ static int clk_show(struct seq_file *s, void *unused)
struct clk *clk;
/* show all the power manager registers */
- seq_printf(s, "MCCTRL = %8x\n", pm_readl(MCCTRL));
- seq_printf(s, "CKSEL = %8x\n", pm_readl(CKSEL));
- seq_printf(s, "CPUMASK = %8x\n", pm_readl(CPU_MASK));
- seq_printf(s, "HSBMASK = %8x\n", pm_readl(HSB_MASK));
- seq_printf(s, "PBAMASK = %8x\n", pm_readl(PBA_MASK));
- seq_printf(s, "PBBMASK = %8x\n", pm_readl(PBB_MASK));
- seq_printf(s, "PLL0 = %8x\n", pm_readl(PLL0));
- seq_printf(s, "PLL1 = %8x\n", pm_readl(PLL1));
- seq_printf(s, "IMR = %8x\n", pm_readl(IMR));
+ seq_printf(s,
+ "MCCTRL = %8x\n"
+ "CKSEL = %8x\n"
+ "CPUMASK = %8x\n"
+ "HSBMASK = %8x\n"
+ "PBAMASK = %8x\n"
+ "PBBMASK = %8x\n"
+ "PLL0 = %8x\n"
+ "PLL1 = %8x\n"
+ "IMR = %8x\n",
+ pm_readl(MCCTRL),
+ pm_readl(CKSEL),
+ pm_readl(CPU_MASK),
+ pm_readl(HSB_MASK),
+ pm_readl(PBA_MASK),
+ pm_readl(PBB_MASK),
+ pm_readl(PLL0),
+ pm_readl(PLL1),
+ pm_readl(IMR));
for (i = 0; i < 8; i++) {
if (i == 5)
continue;
seq_printf(s, "GCCTRL%d = %8x\n", i, pm_readl(GCCTRL(i)));
}
- seq_printf(s, "\n");
-
+ seq_putc(s, '\n');
r.s = s;
r.nest = 0;
/* protected from changes on the list while dumping */
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index 13d3fc4270b7..7fae6ec7e8ec 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -367,13 +367,13 @@ static void pio_bank_show(struct seq_file *s, struct gpio_chip *chip)
(mask & pdsr) ? "hi" : "lo",
(mask & pusr) ? " " : "up");
if (ifsr & mask)
- seq_printf(s, " deglitch");
+ seq_puts(s, " deglitch");
if ((osr & mdsr) & mask)
- seq_printf(s, " open-drain");
+ seq_puts(s, " open-drain");
if (imr & mask)
seq_printf(s, " irq-%d edge-both",
gpio_to_irq(chip->base + i));
- seq_printf(s, "\n");
+ seq_putc(s, '\n');
}
}
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index 58610d0df7ed..54534e5d0781 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -146,7 +146,8 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
{
void *cpu_addr = page_address(page) + offset;
- dma_cache_sync(dev, cpu_addr, size, direction);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_cache_sync(dev, cpu_addr, size, direction);
return virt_to_bus(cpu_addr);
}
@@ -162,6 +163,10 @@ static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
virt = sg_virt(sg);
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
dma_cache_sync(dev, virt, sg->length, direction);
}
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 91d49c0a3118..2fb67b59d188 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -24,7 +24,6 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
-generic-y += mutex.h
generic-y += param.h
generic-y += percpu.h
generic-y += pgalloc.h
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index 0c265aba94ad..85d4af97c986 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -92,7 +92,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define cpu_relax() smp_mb()
-#define cpu_relax_lowlatency() cpu_relax()
/* Get the Silicon Revision of the chip */
static inline uint32_t __pure bfin_revid(void)
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index 53fbbb61aa86..a27a74a18fb0 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -118,6 +118,10 @@ static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
for_each_sg(sg_list, sg, nents, i) {
sg->dma_address = (dma_addr_t) sg_virt(sg);
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
}
@@ -143,7 +147,9 @@ static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
{
dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
- _dma_sync(handle, size, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ _dma_sync(handle, size, dir);
+
return handle;
}
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 8d79286ee4e8..360d99645163 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -270,7 +270,7 @@ long arch_ptrace(struct task_struct *child, long request,
switch (bfin_mem_access_type(addr, to_copy)) {
case BFIN_MEM_ACCESS_CORE:
case BFIN_MEM_ACCESS_CORE_ONLY:
- copied = access_process_vm(child, addr, &tmp,
+ copied = ptrace_access_vm(child, addr, &tmp,
to_copy, FOLL_FORCE);
if (copied)
break;
@@ -323,7 +323,7 @@ long arch_ptrace(struct task_struct *child, long request,
switch (bfin_mem_access_type(addr, to_copy)) {
case BFIN_MEM_ACCESS_CORE:
case BFIN_MEM_ACCESS_CORE_ONLY:
- copied = access_process_vm(child, addr, &data,
+ copied = ptrace_access_vm(child, addr, &data,
to_copy,
FOLL_FORCE | FOLL_WRITE);
break;
diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c
index 8a2543c654b3..cf27554e76bf 100644
--- a/arch/blackfin/mach-bf561/coreb.c
+++ b/arch/blackfin/mach-bf561/coreb.c
@@ -1,5 +1,7 @@
/* Load firmware into Core B on a BF561
*
+ * Author: Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
+ *
* Copyright 2004-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
@@ -14,9 +16,9 @@
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
#define CMD_COREB_START _IO('b', 0)
#define CMD_COREB_STOP _IO('b', 1)
@@ -59,8 +61,4 @@ static struct miscdevice coreb_dev = {
.name = "coreb",
.fops = &coreb_fops,
};
-module_misc_device(coreb_dev);
-
-MODULE_AUTHOR("Bas Vermeulen <bvermeul@blackstar.xs4all.nl>");
-MODULE_DESCRIPTION("BF561 Core B Support");
-MODULE_LICENSE("GPL");
+builtin_misc_device(coreb_dev);
diff --git a/arch/c6x/include/asm/mutex.h b/arch/c6x/include/asm/mutex.h
deleted file mode 100644
index 7a7248e0462d..000000000000
--- a/arch/c6x/include/asm/mutex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_C6X_MUTEX_H
-#define _ASM_C6X_MUTEX_H
-
-#include <asm-generic/mutex-null.h>
-
-#endif /* _ASM_C6X_MUTEX_H */
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index f2ef31be2f8b..b9eb3da7f278 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -121,7 +121,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
#define cpu_relax() do { } while (0)
-#define cpu_relax_lowlatency() cpu_relax()
extern const struct seq_operations cpuinfo_op;
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
index db4a6a301f5e..6752df32ef06 100644
--- a/arch/c6x/kernel/dma.c
+++ b/arch/c6x/kernel/dma.c
@@ -42,14 +42,17 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
{
dma_addr_t handle = virt_to_phys(page_address(page) + offset);
- c6x_dma_sync(handle, size, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ c6x_dma_sync(handle, size, dir);
+
return handle;
}
static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- c6x_dma_sync(handle, size, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ c6x_dma_sync(handle, size, dir);
}
static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -60,7 +63,8 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
sg->dma_address = sg_phys(sg);
- c6x_dma_sync(sg->dma_address, sg->length, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ c6x_dma_sync(sg->dma_address, sg->length, dir);
}
return nents;
@@ -72,9 +76,11 @@ static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return;
+
for_each_sg(sglist, sg, nents, i)
c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
-
}
static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f0df654ac6fc..fe1f9cf7b391 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request,
/* The trampoline page is globally mapped, no page table to traverse.*/
tmp = *(unsigned long*)addr;
} else {
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
+ copied = ptrace_access_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
diff --git a/arch/cris/boot/compressed/Makefile b/arch/cris/boot/compressed/Makefile
index 8fe9338c1775..e4ba0be0e782 100644
--- a/arch/cris/boot/compressed/Makefile
+++ b/arch/cris/boot/compressed/Makefile
@@ -2,9 +2,6 @@
# arch/cris/boot/compressed/Makefile
#
-asflags-y += $(LINUXINCLUDE)
-ccflags-y += -O2 $(LINUXINCLUDE)
-
# asflags-$(CONFIG_ETRAX_ARCH_V32) += -I$(srctree)/include/asm/mach \
# -I$(srctree)/include/asm/arch
# ccflags-$(CONFIG_ETRAX_ARCH_V32) += -O2 -I$(srctree)/include/asm/mach
diff --git a/arch/cris/boot/rescue/Makefile b/arch/cris/boot/rescue/Makefile
index 52bd0bd1dd22..a82025940006 100644
--- a/arch/cris/boot/rescue/Makefile
+++ b/arch/cris/boot/rescue/Makefile
@@ -8,8 +8,8 @@
# asflags-y += -I $(srctree)/include/asm/arch/mach/ -I $(srctree)/include/asm/arch
# LD = gcc-cris -mlinux -march=v32 -nostdlib
-asflags-y += $(LINUXINCLUDE)
-ccflags-y += -O2 $(LINUXINCLUDE)
+ifdef CONFIG_ETRAX_AXISFLASHMAP
+
arch-$(CONFIG_ETRAX_ARCH_V10) = v10
arch-$(CONFIG_ETRAX_ARCH_V32) = v32
@@ -28,6 +28,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE
$(call if_changed,objcopy)
cp -p $(obj)/rescue.bin $(objtree)
+else
+$(obj)/rescue.bin:
+
+endif
+
$(obj)/testrescue.bin: $(obj)/testrescue.o
$(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin
# Pad it to 784 bytes
diff --git a/arch/cris/include/asm/mutex.h b/arch/cris/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/cris/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 862126b58116..15b815df29c1 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -63,7 +63,6 @@ static inline void release_thread(struct task_struct *dead_task)
#define init_stack (init_thread_union.stack)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
void default_idle(void);
diff --git a/arch/frv/include/asm/mutex.h b/arch/frv/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/frv/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h
index 73f0a79ad8e6..ddaeb9cc9143 100644
--- a/arch/frv/include/asm/processor.h
+++ b/arch/frv/include/asm/processor.h
@@ -107,7 +107,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/* data cache prefetch */
#define ARCH_HAS_PREFETCH
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index afbc98f02d27..81e03530ed39 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -90,5 +90,7 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c
index 90f2e4cb33d6..187688128c65 100644
--- a/arch/frv/mb93090-mb00/pci-dma-nommu.c
+++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c
@@ -109,16 +109,19 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
- int i;
struct scatterlist *sg;
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return nents;
for_each_sg(sglist, sg, nents, i) {
frv_cache_wback_inv(sg_dma_address(sg),
sg_dma_address(sg) + sg_dma_len(sg));
}
- BUG_ON(direction == DMA_NONE);
-
return nents;
}
@@ -127,7 +130,10 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
enum dma_data_direction direction, unsigned long attrs)
{
BUG_ON(direction == DMA_NONE);
- flush_dcache_page(page);
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ flush_dcache_page(page);
+
return (dma_addr_t) page_to_phys(page) + offset;
}
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index f585745b1abc..dba7df918144 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -40,13 +40,16 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
+ struct scatterlist *sg;
unsigned long dampr2;
void *vaddr;
int i;
- struct scatterlist *sg;
BUG_ON(direction == DMA_NONE);
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return nents;
+
dampr2 = __get_DAMPR(2);
for_each_sg(sglist, sg, nents, i) {
@@ -70,7 +73,9 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
- flush_dcache_page(page);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ flush_dcache_page(page);
+
return (dma_addr_t) page_to_phys(page) + offset;
}
diff --git a/arch/h8300/include/asm/mutex.h b/arch/h8300/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/h8300/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
index 111df7397ac7..65132d7ae9e5 100644
--- a/arch/h8300/include/asm/processor.h
+++ b/arch/h8300/include/asm/processor.h
@@ -127,7 +127,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define HARD_RESET_NOW() ({ \
local_irq_disable(); \
diff --git a/arch/hexagon/include/asm/mutex.h b/arch/hexagon/include/asm/mutex.h
deleted file mode 100644
index 58b52de1bc22..000000000000
--- a/arch/hexagon/include/asm/mutex.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-#include <asm-generic/mutex-xchg.h>
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
index d8501137c8d0..45a825402f63 100644
--- a/arch/hexagon/include/asm/processor.h
+++ b/arch/hexagon/include/asm/processor.h
@@ -56,7 +56,6 @@ struct thread_struct {
}
#define cpu_relax() __vmyield()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* Decides where the kernel will search for a free chunk of vm space during
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index b9017785fb71..dbc4f1003da4 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -119,6 +119,9 @@ static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
s->dma_length = s->length;
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
flush_dcache_range(dma_addr_to_virt(s->dma_address),
dma_addr_to_virt(s->dma_address + s->length));
}
@@ -180,7 +183,8 @@ static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
if (!check_addr("map_single", dev, bus, size))
return bad_dma_address;
- dma_sync(dma_addr_to_virt(bus), size, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_sync(dma_addr_to_virt(bus), size, dir);
return bus;
}
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
deleted file mode 100644
index 28cb819e0ff9..000000000000
--- a/arch/ia64/include/asm/mutex.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * ia64 implementation of the mutex fastpath.
- *
- * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
- *
- */
-
-#ifndef _ASM_MUTEX_H
-#define _ASM_MUTEX_H
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function MUST leave the value lower than
- * 1 even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
- fail_fn(count);
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
- return -1;
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the count from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than 1.
- *
- * If the implementation sets it to a value of lower than 1, then the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int ret = ia64_fetchadd4_rel(count, 1);
- if (unlikely(ret < 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- *
- * If the architecture has no effective trylock variant, it should call the
- * <fail_fn> spinlock-based trylock variant unconditionally.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- if (atomic_read(count) == 1 && cmpxchg_acq(count, 1, 0) == 1)
- return 1;
- return 0;
-}
-
-#endif
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index ce53c50d0ba4..03911a336406 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -547,7 +547,6 @@ ia64_eoi (void)
}
#define cpu_relax() ia64_hint(ia64_hint_pause)
-#define cpu_relax_lowlatency() cpu_relax()
static inline int
ia64_get_irr(unsigned int vector)
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 77e541cf0e5d..fced197b9626 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -207,15 +207,15 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
*/
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (tlb->nr == tlb->max)
- return true;
-
tlb->need_flush = 1;
if (!tlb->nr && tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
tlb->pages[tlb->nr++] = page;
+ VM_WARN_ON(tlb->nr > tlb->max);
+ if (tlb->nr == tlb->max)
+ return true;
return false;
}
@@ -236,10 +236,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (__tlb_remove_page(tlb, page)) {
+ if (__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb);
- __tlb_remove_page(tlb, page);
- }
}
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
@@ -248,12 +246,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page);
}
-static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
- struct page *page)
-{
- return __tlb_remove_page(tlb, page);
-}
-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
@@ -283,6 +275,15 @@ do { \
__tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0)
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
+
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+ unsigned int page_size)
+{
+}
+
#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 0018fad9039f..57feb0c1f7d7 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -99,4 +99,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 5ed0ea92c5bf..85bba43e7d5d 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -224,85 +224,45 @@ static struct attribute_group err_inject_attr_group = {
.name = "err_inject"
};
/* Add/Remove err_inject interface for CPU device */
-static int err_inject_add_dev(struct device *sys_dev)
+static int err_inject_add_dev(unsigned int cpu)
{
+ struct device *sys_dev = get_cpu_device(cpu);
+
return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
}
-static int err_inject_remove_dev(struct device *sys_dev)
+static int err_inject_remove_dev(unsigned int cpu)
{
+ struct device *sys_dev = get_cpu_device(cpu);
+
sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
return 0;
}
-static int err_inject_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct device *sys_dev;
-
- sys_dev = get_cpu_device(cpu);
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- err_inject_add_dev(sys_dev);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- err_inject_remove_dev(sys_dev);
- break;
- }
-
- return NOTIFY_OK;
-}
-static struct notifier_block err_inject_cpu_notifier =
-{
- .notifier_call = err_inject_cpu_callback,
-};
+static enum cpuhp_state hp_online;
-static int __init
-err_inject_init(void)
+static int __init err_inject_init(void)
{
- int i;
-
+ int ret;
#ifdef ERR_INJ_DEBUG
printk(KERN_INFO "Enter error injection driver.\n");
#endif
- cpu_notifier_register_begin();
-
- for_each_online_cpu(i) {
- err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
- (void *)(long)i);
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/err_inj:online",
+ err_inject_add_dev, err_inject_remove_dev);
+ if (ret >= 0) {
+ hp_online = ret;
+ ret = 0;
}
-
- __register_hotcpu_notifier(&err_inject_cpu_notifier);
-
- cpu_notifier_register_done();
-
- return 0;
+ return ret;
}
-static void __exit
-err_inject_exit(void)
+static void __exit err_inject_exit(void)
{
- int i;
- struct device *sys_dev;
-
#ifdef ERR_INJ_DEBUG
printk(KERN_INFO "Exit error injection driver.\n");
#endif
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(i) {
- sys_dev = get_cpu_device(i);
- sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
- }
-
- __unregister_hotcpu_notifier(&err_inject_cpu_notifier);
-
- cpu_notifier_register_done();
+ cpuhp_remove_state(hp_online);
}
module_init(err_inject_init);
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index c39c3cd3ac34..b6e597860888 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -932,8 +932,7 @@ static const struct file_operations proc_palinfo_fops = {
.release = single_release,
};
-static void
-create_palinfo_proc_entries(unsigned int cpu)
+static int palinfo_add_proc(unsigned int cpu)
{
pal_func_cpu_u_t f;
struct proc_dir_entry *cpu_dir;
@@ -943,7 +942,7 @@ create_palinfo_proc_entries(unsigned int cpu)
cpu_dir = proc_mkdir(cpustr, palinfo_dir);
if (!cpu_dir)
- return;
+ return -EINVAL;
f.req_cpu = cpu;
@@ -952,42 +951,21 @@ create_palinfo_proc_entries(unsigned int cpu)
proc_create_data(palinfo_entries[j].name, 0, cpu_dir,
&proc_palinfo_fops, (void *)f.value);
}
+ return 0;
}
-static void
-remove_palinfo_proc_entries(unsigned int hcpu)
+static int palinfo_del_proc(unsigned int hcpu)
{
char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
+
sprintf(cpustr, "cpu%d", hcpu);
remove_proc_subtree(cpustr, palinfo_dir);
+ return 0;
}
-static int palinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int hotcpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- create_palinfo_proc_entries(hotcpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- remove_palinfo_proc_entries(hotcpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __refdata palinfo_cpu_notifier =
-{
- .notifier_call = palinfo_cpu_callback,
- .priority = 0,
-};
+static enum cpuhp_state hp_online;
-static int __init
-palinfo_init(void)
+static int __init palinfo_init(void)
{
int i = 0;
@@ -996,25 +974,19 @@ palinfo_init(void)
if (!palinfo_dir)
return -ENOMEM;
- cpu_notifier_register_begin();
-
- /* Create palinfo dirs in /proc for all online cpus */
- for_each_online_cpu(i) {
- create_palinfo_proc_entries(i);
+ i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/palinfo:online",
+ palinfo_add_proc, palinfo_del_proc);
+ if (i < 0) {
+ remove_proc_subtree("pal", NULL);
+ return i;
}
-
- /* Register for future delivery via notify registration */
- __register_hotcpu_notifier(&palinfo_cpu_notifier);
-
- cpu_notifier_register_done();
-
+ hp_online = i;
return 0;
}
-static void __exit
-palinfo_exit(void)
+static void __exit palinfo_exit(void)
{
- unregister_hotcpu_notifier(&palinfo_cpu_notifier);
+ cpuhp_remove_state(hp_online);
remove_proc_subtree("pal", NULL);
}
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 31aa8c0f68e1..36f660da8124 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1159,7 +1159,7 @@ arch_ptrace (struct task_struct *child, long request,
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
/* read word at location addr */
- if (access_process_vm(child, addr, &data, sizeof(data),
+ if (ptrace_access_vm(child, addr, &data, sizeof(data),
FOLL_FORCE)
!= sizeof(data))
return -EIO;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 5313007d5423..aaf74f36cfa1 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -550,52 +550,40 @@ static const struct file_operations salinfo_data_fops = {
.llseek = default_llseek,
};
-static int
-salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
+static int salinfo_cpu_online(unsigned int cpu)
{
- unsigned int i, cpu = (unsigned long)hcpu;
- unsigned long flags;
+ unsigned int i, end = ARRAY_SIZE(salinfo_data);
struct salinfo_data *data;
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- spin_lock_irqsave(&data_saved_lock, flags);
- for (i = 0, data = salinfo_data;
- i < ARRAY_SIZE(salinfo_data);
- ++i, ++data) {
- cpumask_set_cpu(cpu, &data->cpu_event);
- wake_up_interruptible(&data->read_wait);
- }
- spin_unlock_irqrestore(&data_saved_lock, flags);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- spin_lock_irqsave(&data_saved_lock, flags);
- for (i = 0, data = salinfo_data;
- i < ARRAY_SIZE(salinfo_data);
- ++i, ++data) {
- struct salinfo_data_saved *data_saved;
- int j;
- for (j = ARRAY_SIZE(data->data_saved) - 1, data_saved = data->data_saved + j;
- j >= 0;
- --j, --data_saved) {
- if (data_saved->buffer && data_saved->cpu == cpu) {
- shift1_data_saved(data, j);
- }
- }
- cpumask_clear_cpu(cpu, &data->cpu_event);
- }
- spin_unlock_irqrestore(&data_saved_lock, flags);
- break;
+
+ spin_lock_irq(&data_saved_lock);
+ for (i = 0, data = salinfo_data; i < end; ++i, ++data) {
+ cpumask_set_cpu(cpu, &data->cpu_event);
+ wake_up_interruptible(&data->read_wait);
}
- return NOTIFY_OK;
+ spin_unlock_irq(&data_saved_lock);
+ return 0;
}
-static struct notifier_block salinfo_cpu_notifier =
+static int salinfo_cpu_pre_down(unsigned int cpu)
{
- .notifier_call = salinfo_cpu_callback,
- .priority = 0,
-};
+ unsigned int i, end = ARRAY_SIZE(salinfo_data);
+ struct salinfo_data *data;
+
+ spin_lock_irq(&data_saved_lock);
+ for (i = 0, data = salinfo_data; i < end; ++i, ++data) {
+ struct salinfo_data_saved *data_saved;
+ int j = ARRAY_SIZE(data->data_saved) - 1;
+
+ for (data_saved = data->data_saved + j; j >= 0;
+ --j, --data_saved) {
+ if (data_saved->buffer && data_saved->cpu == cpu)
+ shift1_data_saved(data, j);
+ }
+ cpumask_clear_cpu(cpu, &data->cpu_event);
+ }
+ spin_unlock_irq(&data_saved_lock);
+ return 0;
+}
static int __init
salinfo_init(void)
@@ -604,7 +592,7 @@ salinfo_init(void)
struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */
struct proc_dir_entry *dir, *entry;
struct salinfo_data *data;
- int i, j;
+ int i;
salinfo_dir = proc_mkdir("sal", NULL);
if (!salinfo_dir)
@@ -617,8 +605,6 @@ salinfo_init(void)
(void *)salinfo_entries[i].feature);
}
- cpu_notifier_register_begin();
-
for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
data = salinfo_data + i;
data->type = i;
@@ -639,10 +625,6 @@ salinfo_init(void)
continue;
*sdir++ = entry;
- /* we missed any events before now */
- for_each_online_cpu(j)
- cpumask_set_cpu(j, &data->cpu_event);
-
*sdir++ = dir;
}
@@ -653,10 +635,9 @@ salinfo_init(void)
salinfo_timer.function = &salinfo_timeout;
add_timer(&salinfo_timer);
- __register_hotcpu_notifier(&salinfo_cpu_notifier);
-
- cpu_notifier_register_done();
-
+ i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/salinfo:online",
+ salinfo_cpu_online, salinfo_cpu_pre_down);
+ WARN_ON(i < 0);
return 0;
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 6f892b94e906..021f44ab4bfb 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -68,7 +68,7 @@ void vtime_account_user(struct task_struct *tsk)
if (ti->ac_utime) {
delta_utime = cycle_to_cputime(ti->ac_utime);
- account_user_time(tsk, delta_utime, delta_utime);
+ account_user_time(tsk, delta_utime);
ti->ac_utime = 0;
}
}
@@ -112,7 +112,7 @@ void vtime_account_system(struct task_struct *tsk)
{
cputime_t delta = vtime_delta(tsk);
- account_system_time(tsk, 0, delta, delta);
+ account_system_time(tsk, 0, delta);
}
EXPORT_SYMBOL_GPL(vtime_account_system);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index c01fe8991244..1a68f012a6dc 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -349,9 +349,9 @@ static int cpu_cache_sysfs_init(unsigned int cpu)
}
/* Add cache interface for CPU device */
-static int cache_add_dev(struct device *sys_dev)
+static int cache_add_dev(unsigned int cpu)
{
- unsigned int cpu = sys_dev->id;
+ struct device *sys_dev = get_cpu_device(cpu);
unsigned long i, j;
struct cache_info *this_object;
int retval = 0;
@@ -399,9 +399,8 @@ static int cache_add_dev(struct device *sys_dev)
}
/* Remove cache interface for CPU device */
-static int cache_remove_dev(struct device *sys_dev)
+static int cache_remove_dev(unsigned int cpu)
{
- unsigned int cpu = sys_dev->id;
unsigned long i;
for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
@@ -419,52 +418,13 @@ static int cache_remove_dev(struct device *sys_dev)
return 0;
}
-/*
- * When a cpu is hot-plugged, do a check and initiate
- * cache kobject if necessary
- */
-static int cache_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct device *sys_dev;
-
- sys_dev = get_cpu_device(cpu);
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cache_add_dev(sys_dev);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cache_remove_dev(sys_dev);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block cache_cpu_notifier =
-{
- .notifier_call = cache_cpu_callback
-};
-
static int __init cache_sysfs_init(void)
{
- int i;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(i) {
- struct device *sys_dev = get_cpu_device((unsigned int)i);
- cache_add_dev(sys_dev);
- }
-
- __register_hotcpu_notifier(&cache_cpu_notifier);
-
- cpu_notifier_register_done();
+ int ret;
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/topology:online",
+ cache_add_dev, cache_remove_dev);
+ WARN_ON(ret < 0);
return 0;
}
-
device_initcall(cache_sysfs_init);
-
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 3cc8498fe0fe..d227a6988d6b 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -34,7 +34,7 @@ config NO_IOPORT_MAP
def_bool y
config NO_DMA
- def_bool y
+ def_bool n
config HZ
int
diff --git a/arch/m32r/include/asm/device.h b/arch/m32r/include/asm/device.h
index d8f9872b0e2d..4a9f35e0973f 100644
--- a/arch/m32r/include/asm/device.h
+++ b/arch/m32r/include/asm/device.h
@@ -3,5 +3,9 @@
*
* This file is released under the GPLv2
*/
-#include <asm-generic/device.h>
+struct dev_archdata {
+ struct dma_map_ops *dma_ops;
+};
+struct pdev_archdata {
+};
diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..2c43a77fe942
--- /dev/null
+++ b/arch/m32r/include/asm/dma-mapping.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_M32R_DMA_MAPPING_H
+#define _ASM_M32R_DMA_MAPPING_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/io.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ return &dma_noop_ops;
+}
+
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+#endif /* _ASM_M32R_DMA_MAPPING_H */
diff --git a/arch/m32r/include/asm/mutex.h b/arch/m32r/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/m32r/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h
index 9f8fd9bef70f..5767367550c6 100644
--- a/arch/m32r/include/asm/processor.h
+++ b/arch/m32r/include/asm/processor.h
@@ -133,6 +133,5 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#endif /* _ASM_M32R_PROCESSOR_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 5fe42fc7b6c5..5853f8e92c20 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c
index 9a4ba8a8589d..349eb341752c 100644
--- a/arch/m32r/platforms/m32700ut/setup.c
+++ b/arch/m32r/platforms/m32700ut/setup.c
@@ -201,6 +201,7 @@ static struct irq_chip m32700ut_lanpld_irq_type =
#define lcdpldirq2port(x) (unsigned long)((int)M32700UT_LCD_ICUCR1 + \
(((x) - 1) * sizeof(unsigned short)))
+#ifdef CONFIG_USB
static pld_icu_data_t lcdpld_icu_data[M32700UT_NUM_LCD_PLD_IRQ];
static void disable_m32700ut_lcdpld_irq(unsigned int irq)
@@ -253,6 +254,7 @@ static struct irq_chip m32700ut_lcdpld_irq_type =
.irq_mask = mask_m32700ut_lcdpld,
.irq_unmask = unmask_m32700ut_lcdpld,
};
+#endif
void __init init_IRQ(void)
{
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 2a5c7abb2896..9225b4ad9aeb 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -259,6 +259,12 @@ config M5407C3
help
Support for the Motorola M5407C3 board.
+config AMCORE
+ bool "Sysam AMCORE board support"
+ depends on M5307
+ help
+ Support for the Sysam AMCORE open-hardware generic board.
+
config FIREBEE
bool "FireBee board support"
depends on M547x
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index ddb8192a3661..65f63a457130 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -786,8 +786,7 @@ static void amiga_get_hardware_list(struct seq_file *m)
if (AMIGAHW_PRESENT(name)) \
seq_printf (m, "\t%s\n", str)
- seq_printf (m, "Detected hardware:\n");
-
+ seq_puts(m, "Detected hardware:\n");
AMIGAHW_ANNOUNCE(AMI_VIDEO, "Amiga Video");
AMIGAHW_ANNOUNCE(AMI_BLITTER, "Blitter");
AMIGAHW_ANNOUNCE(AMBER_FF, "Amber Flicker Fixer");
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 97a3c38cd1f5..e328eaf816e3 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -629,7 +629,7 @@ static void atari_get_hardware_list(struct seq_file *m)
if (ATARIHW_PRESENT(name)) \
seq_printf(m, "\t%s\n", str)
- seq_printf(m, "Detected hardware:\n");
+ seq_puts(m, "Detected hardware:\n");
ATARIHW_ANNOUNCE(STND_SHIFTER, "ST Shifter");
ATARIHW_ANNOUNCE(EXTD_SHIFTER, "STe Shifter");
ATARIHW_ANNOUNCE(TT_SHIFTER, "TT Shifter");
diff --git a/arch/m68k/coldfire/Makefile b/arch/m68k/coldfire/Makefile
index 68f0fac60099..4aa2c57afc35 100644
--- a/arch/m68k/coldfire/Makefile
+++ b/arch/m68k/coldfire/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_NETtel) += nettel.o
obj-$(CONFIG_CLEOPATRA) += nettel.o
obj-$(CONFIG_FIREBEE) += firebee.o
obj-$(CONFIG_MCF8390) += mcf8390.o
+obj-$(CONFIG_AMCORE) += amcore.o
obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/m68k/coldfire/amcore.c b/arch/m68k/coldfire/amcore.c
new file mode 100644
index 000000000000..c6cb1a5cc1a5
--- /dev/null
+++ b/arch/m68k/coldfire/amcore.c
@@ -0,0 +1,156 @@
+/*
+ * amcore.c -- Support for Sysam AMCORE open board
+ *
+ * (C) Copyright 2016, Angelo Dureghello <angelo@sysam.it>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/dm9000.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/i2c.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/io.h>
+
+#if IS_ENABLED(CONFIG_DM9000)
+
+#define DM9000_IRQ 25
+#define DM9000_ADDR 0x30000000
+
+/*
+ * DEVICES and related device RESOURCES
+ */
+static struct resource dm9000_resources[] = {
+ /* physical address of the address register (CMD [A2] to 0)*/
+ [0] = {
+ .start = DM9000_ADDR,
+ .end = DM9000_ADDR,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * physical address of the data register (CMD [A2] to 1),
+ * driver wants a range >=4 to assume a 32bit data bus
+ */
+ [1] = {
+ .start = DM9000_ADDR + 4,
+ .end = DM9000_ADDR + 7,
+ .flags = IORESOURCE_MEM,
+ },
+ /* IRQ line the device's interrupt pin is connected to */
+ [2] = {
+ .start = DM9000_IRQ,
+ .end = DM9000_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct dm9000_plat_data dm9000_platdata = {
+ .flags = DM9000_PLATF_32BITONLY,
+};
+
+static struct platform_device dm9000_device = {
+ .name = "dm9000",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dm9000_resources),
+ .resource = dm9000_resources,
+ .dev = {
+ .platform_data = &dm9000_platdata,
+ }
+};
+#endif
+
+static void __init dm9000_pre_init(void)
+{
+ /* Set the dm9000 interrupt to be auto-vectored */
+ mcf_autovector(DM9000_IRQ);
+}
+
+/*
+ * Partitioning of parallel NOR flash (39VF3201B)
+ */
+static struct mtd_partition amcore_partitions[] = {
+ {
+ .name = "U-Boot (128K)",
+ .size = 0x20000,
+ .offset = 0x0
+ },
+ {
+ .name = "Kernel+ROMfs (2994K)",
+ .size = 0x2E0000,
+ .offset = MTDPART_OFS_APPEND
+ },
+ {
+ .name = "Flash Free Space (1024K)",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND
+ }
+};
+
+static struct physmap_flash_data flash_data = {
+ .parts = amcore_partitions,
+ .nr_parts = ARRAY_SIZE(amcore_partitions),
+ .width = 2,
+};
+
+static struct resource flash_resource = {
+ .start = 0xffc00000,
+ .end = 0xffffffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device flash_device = {
+ .name = "physmap-flash",
+ .id = -1,
+ .resource = &flash_resource,
+ .num_resources = 1,
+ .dev = {
+ .platform_data = &flash_data,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "rtc-ds1307",
+ .id = -1,
+};
+
+static struct i2c_board_info amcore_i2c_info[] __initdata = {
+ {
+ I2C_BOARD_INFO("ds1338", 0x68),
+ },
+};
+
+static struct platform_device *amcore_devices[] __initdata = {
+#if IS_ENABLED(CONFIG_DM9000)
+ &dm9000_device,
+#endif
+ &flash_device,
+ &rtc_device,
+};
+
+static int __init init_amcore(void)
+{
+#if IS_ENABLED(CONFIG_DM9000)
+ dm9000_pre_init();
+#endif
+
+ /* Add i2c RTC Dallas chip supprt */
+ i2c_register_board_info(0, amcore_i2c_info,
+ ARRAY_SIZE(amcore_i2c_info));
+
+ platform_add_devices(amcore_devices, ARRAY_SIZE(amcore_devices));
+
+ return 0;
+}
+
+arch_initcall(init_amcore);
diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
index a0fc0c192427..84938fdbbada 100644
--- a/arch/m68k/coldfire/device.c
+++ b/arch/m68k/coldfire/device.c
@@ -327,6 +327,147 @@ static struct platform_device mcf_qspi = {
};
#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
+#if IS_ENABLED(CONFIG_I2C_IMX)
+static struct resource mcf_i2c0_resources[] = {
+ {
+ .start = MCFI2C_BASE0,
+ .end = MCFI2C_BASE0 + MCFI2C_SIZE0 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C0,
+ .end = MCF_IRQ_I2C0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mcf_i2c0 = {
+ .name = "imx1-i2c",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(mcf_i2c0_resources),
+ .resource = mcf_i2c0_resources,
+};
+#ifdef MCFI2C_BASE1
+
+static struct resource mcf_i2c1_resources[] = {
+ {
+ .start = MCFI2C_BASE1,
+ .end = MCFI2C_BASE1 + MCFI2C_SIZE1 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C1,
+ .end = MCF_IRQ_I2C1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mcf_i2c1 = {
+ .name = "imx1-i2c",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(mcf_i2c1_resources),
+ .resource = mcf_i2c1_resources,
+};
+
+#endif /* MCFI2C_BASE1 */
+
+#ifdef MCFI2C_BASE2
+
+static struct resource mcf_i2c2_resources[] = {
+ {
+ .start = MCFI2C_BASE2,
+ .end = MCFI2C_BASE2 + MCFI2C_SIZE2 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C2,
+ .end = MCF_IRQ_I2C2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mcf_i2c2 = {
+ .name = "imx1-i2c",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(mcf_i2c2_resources),
+ .resource = mcf_i2c2_resources,
+};
+
+#endif /* MCFI2C_BASE2 */
+
+#ifdef MCFI2C_BASE3
+
+static struct resource mcf_i2c3_resources[] = {
+ {
+ .start = MCFI2C_BASE3,
+ .end = MCFI2C_BASE3 + MCFI2C_SIZE3 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C3,
+ .end = MCF_IRQ_I2C3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mcf_i2c3 = {
+ .name = "imx1-i2c",
+ .id = 3,
+ .num_resources = ARRAY_SIZE(mcf_i2c3_resources),
+ .resource = mcf_i2c3_resources,
+};
+
+#endif /* MCFI2C_BASE3 */
+
+#ifdef MCFI2C_BASE4
+
+static struct resource mcf_i2c4_resources[] = {
+ {
+ .start = MCFI2C_BASE4,
+ .end = MCFI2C_BASE4 + MCFI2C_SIZE4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C4,
+ .end = MCF_IRQ_I2C4,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mcf_i2c4 = {
+ .name = "imx1-i2c",
+ .id = 4,
+ .num_resources = ARRAY_SIZE(mcf_i2c4_resources),
+ .resource = mcf_i2c4_resources,
+};
+
+#endif /* MCFI2C_BASE4 */
+
+#ifdef MCFI2C_BASE5
+
+static struct resource mcf_i2c5_resources[] = {
+ {
+ .start = MCFI2C_BASE5,
+ .end = MCFI2C_BASE5 + MCFI2C_SIZE5 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C5,
+ .end = MCF_IRQ_I2C5,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mcf_i2c5 = {
+ .name = "imx1-i2c",
+ .id = 5,
+ .num_resources = ARRAY_SIZE(mcf_i2c5_resources),
+ .resource = mcf_i2c5_resources,
+};
+
+#endif /* MCFI2C_BASE5 */
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+
static struct platform_device *mcf_devices[] __initdata = {
&mcf_uart,
#if IS_ENABLED(CONFIG_FEC)
@@ -338,6 +479,24 @@ static struct platform_device *mcf_devices[] __initdata = {
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
&mcf_qspi,
#endif
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ &mcf_i2c0,
+#ifdef MCFI2C_BASE1
+ &mcf_i2c1,
+#endif
+#ifdef MCFI2C_BASE2
+ &mcf_i2c2,
+#endif
+#ifdef MCFI2C_BASE3
+ &mcf_i2c3,
+#endif
+#ifdef MCFI2C_BASE4
+ &mcf_i2c4,
+#endif
+#ifdef MCFI2C_BASE5
+ &mcf_i2c5,
+#endif
+#endif
};
/*
diff --git a/arch/m68k/coldfire/m5206.c b/arch/m68k/coldfire/m5206.c
index 8945f5e7b39c..a3bcf0883f98 100644
--- a/arch/m68k/coldfire/m5206.c
+++ b/arch/m68k/coldfire/m5206.c
@@ -26,6 +26,7 @@ DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
struct clk *mcf_clks[] = {
&clk_pll,
@@ -34,11 +35,21 @@ struct clk *mcf_clks[] = {
&clk_mcftmr1,
&clk_mcfuart0,
&clk_mcfuart1,
+ &clk_mcfi2c0,
NULL
};
/***************************************************************************/
+static void __init m5206_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C0, MCFINTC_I2C);
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+}
+
void __init config_BSP(char *commandp, int size)
{
#if defined(CONFIG_NETtel)
@@ -53,6 +64,7 @@ void __init config_BSP(char *commandp, int size)
mcf_mapirq2imr(25, MCFINTC_EINT1);
mcf_mapirq2imr(28, MCFINTC_EINT4);
mcf_mapirq2imr(31, MCFINTC_EINT7);
+ m5206_i2c_init();
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m520x.c b/arch/m68k/coldfire/m520x.c
index 173834f251eb..5ba69217ce6c 100644
--- a/arch/m68k/coldfire/m520x.c
+++ b/arch/m68k/coldfire/m520x.c
@@ -28,7 +28,7 @@ DEFINE_CLK(0, "fec.0", 12, MCF_CLK);
DEFINE_CLK(0, "edma", 17, MCF_CLK);
DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
DEFINE_CLK(0, "iack.0", 21, MCF_CLK);
-DEFINE_CLK(0, "mcfi2c.0", 22, MCF_CLK);
+DEFINE_CLK(0, "imx1-i2c.0", 22, MCF_CLK);
DEFINE_CLK(0, "mcfqspi.0", 23, MCF_CLK);
DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
@@ -53,7 +53,7 @@ struct clk *mcf_clks[] = {
&__clk_0_17, /* edma */
&__clk_0_18, /* intc.0 */
&__clk_0_21, /* iack.0 */
- &__clk_0_22, /* mcfi2c.0 */
+ &__clk_0_22, /* imx1-i2c.0 */
&__clk_0_23, /* mcfqspi.0 */
&__clk_0_24, /* mcfuart.0 */
&__clk_0_25, /* mcfuart.1 */
@@ -71,7 +71,7 @@ struct clk *mcf_clks[] = {
&__clk_0_40, /* sys.0 */
&__clk_0_41, /* gpio.0 */
&__clk_0_42, /* sdram.0 */
-NULL,
+ NULL,
};
static struct clk * const enable_clks[] __initconst = {
@@ -94,7 +94,7 @@ static struct clk * const enable_clks[] __initconst = {
static struct clk * const disable_clks[] __initconst = {
&__clk_0_12, /* fec.0 */
&__clk_0_17, /* edma */
- &__clk_0_22, /* mcfi2c.0 */
+ &__clk_0_22, /* imx1-i2c.0 */
&__clk_0_23, /* mcfqspi.0 */
&__clk_0_28, /* mcftmr.0 */
&__clk_0_29, /* mcftmr.1 */
@@ -133,6 +133,21 @@ static void __init m520x_qspi_init(void)
/***************************************************************************/
+static void __init m520x_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ u8 par;
+
+ /* setup Port FECI2C Pin Assignment Register for I2C */
+ /* set PAR_SCL to SCL and PAR_SDA to SDA */
+ par = readb(MCF_GPIO_PAR_FECI2C);
+ par |= 0x0f;
+ writeb(par, MCF_GPIO_PAR_FECI2C);
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+}
+
+/***************************************************************************/
+
static void __init m520x_uarts_init(void)
{
u16 par;
@@ -175,6 +190,7 @@ void __init config_BSP(char *commandp, int size)
m520x_uarts_init();
m520x_fec_init();
m520x_qspi_init();
+ m520x_i2c_init();
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m523x.c b/arch/m68k/coldfire/m523x.c
index a191a467eff2..f7a0fcc5618c 100644
--- a/arch/m68k/coldfire/m523x.c
+++ b/arch/m68k/coldfire/m523x.c
@@ -34,6 +34,7 @@ DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
struct clk *mcf_clks[] = {
&clk_pll,
@@ -47,6 +48,7 @@ struct clk *mcf_clks[] = {
&clk_mcfuart2,
&clk_mcfqspi0,
&clk_fec0,
+ &clk_mcfi2c0,
NULL
};
@@ -68,6 +70,21 @@ static void __init m523x_qspi_init(void)
/***************************************************************************/
+static void __init m523x_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ u8 par;
+
+ /* setup Port AS Pin Assignment Register for I2C */
+ /* set PASPA0 to SCL and PASPA1 to SDA */
+ par = readb(MCFGPIO_PAR_FECI2C);
+ par |= 0x0f;
+ writeb(par, MCFGPIO_PAR_FECI2C);
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+}
+
+/***************************************************************************/
+
static void __init m523x_fec_init(void)
{
/* Set multi-function pins to ethernet use */
@@ -81,6 +98,7 @@ void __init config_BSP(char *commandp, int size)
mach_sched_init = hw_timer_init;
m523x_fec_init();
m523x_qspi_init();
+ m523x_i2c_init();
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5249.c b/arch/m68k/coldfire/m5249.c
index e48f55adc447..b16cf9b4580c 100644
--- a/arch/m68k/coldfire/m5249.c
+++ b/arch/m68k/coldfire/m5249.c
@@ -27,6 +27,8 @@ DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c1, "imx1-i2c.1", MCF_BUSCLK);
struct clk *mcf_clks[] = {
&clk_pll,
@@ -36,6 +38,8 @@ struct clk *mcf_clks[] = {
&clk_mcfuart0,
&clk_mcfuart1,
&clk_mcfqspi0,
+ &clk_mcfi2c0,
+ &clk_mcfi2c1,
NULL
};
@@ -85,6 +89,26 @@ static void __init m5249_qspi_init(void)
/***************************************************************************/
+static void __init m5249_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ u32 r;
+
+ /* first I2C controller uses regular irq setup */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C0, MCFINTC_I2C);
+
+ /* second I2C controller is completely different */
+ r = readl(MCFINTC2_INTPRI_REG(MCF_IRQ_I2C1));
+ r &= ~MCFINTC2_INTPRI_BITS(0xf, MCF_IRQ_I2C1);
+ r |= MCFINTC2_INTPRI_BITS(0x5, MCF_IRQ_I2C1);
+ writel(r, MCFINTC2_INTPRI_REG(MCF_IRQ_I2C1));
+#endif /* CONFIG_I2C_IMX */
+}
+
+/***************************************************************************/
+
#ifdef CONFIG_M5249C3
static void __init m5249_smc91x_init(void)
@@ -111,6 +135,7 @@ void __init config_BSP(char *commandp, int size)
m5249_smc91x_init();
#endif
m5249_qspi_init();
+ m5249_i2c_init();
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m525x.c b/arch/m68k/coldfire/m525x.c
index 3d8583e2187c..110e2cd34e62 100644
--- a/arch/m68k/coldfire/m525x.c
+++ b/arch/m68k/coldfire/m525x.c
@@ -27,6 +27,8 @@ DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c1, "imx1-i2c.1", MCF_BUSCLK);
struct clk *mcf_clks[] = {
&clk_pll,
@@ -36,6 +38,8 @@ struct clk *mcf_clks[] = {
&clk_mcfuart0,
&clk_mcfuart1,
&clk_mcfqspi0,
+ &clk_mcfi2c0,
+ &clk_mcfi2c1,
NULL
};
@@ -59,12 +63,12 @@ static void __init m525x_qspi_init(void)
static void __init m525x_i2c_init(void)
{
-#if IS_ENABLED(CONFIG_I2C_COLDFIRE)
+#if IS_ENABLED(CONFIG_I2C_IMX)
u32 r;
/* first I2C controller uses regular irq setup */
writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
- MCFSIM_I2CICR);
+ MCFSIM_I2CICR);
mcf_mapirq2imr(MCF_IRQ_I2C0, MCFINTC_I2C);
/* second I2C controller is completely different */
@@ -72,7 +76,7 @@ static void __init m525x_i2c_init(void)
r &= ~MCFINTC2_INTPRI_BITS(0xf, MCF_IRQ_I2C1);
r |= MCFINTC2_INTPRI_BITS(0x5, MCF_IRQ_I2C1);
writel(r, MCFINTC2_INTPRI_REG(MCF_IRQ_I2C1));
-#endif /* IS_ENABLED(CONFIG_I2C_COLDFIRE) */
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m527x.c b/arch/m68k/coldfire/m527x.c
index c0b3e28f91df..b10b436b5a31 100644
--- a/arch/m68k/coldfire/m527x.c
+++ b/arch/m68k/coldfire/m527x.c
@@ -36,6 +36,7 @@ DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
DEFINE_CLK(fec1, "fec.1", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
struct clk *mcf_clks[] = {
&clk_pll,
@@ -50,6 +51,7 @@ struct clk *mcf_clks[] = {
&clk_mcfqspi0,
&clk_fec0,
&clk_fec1,
+ &clk_mcfi2c0,
NULL
};
@@ -76,6 +78,31 @@ static void __init m527x_qspi_init(void)
/***************************************************************************/
+static void __init m527x_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+#if defined(CONFIG_M5271)
+ u8 par;
+
+ /* setup Port FECI2C Pin Assignment Register for I2C */
+ /* set PAR_SCL to SCL and PAR_SDA to SDA */
+ par = readb(MCFGPIO_PAR_FECI2C);
+ par |= 0x0f;
+ writeb(par, MCFGPIO_PAR_FECI2C);
+#elif defined(CONFIG_M5275)
+ u16 par;
+
+ /* setup Port FECI2C Pin Assignment Register for I2C */
+ /* set PAR_SCL to SCL and PAR_SDA to SDA */
+ par = readw(MCFGPIO_PAR_FECI2C);
+ par |= 0x0f;
+ writew(par, MCFGPIO_PAR_FECI2C);
+#endif
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+}
+
+/***************************************************************************/
+
static void __init m527x_uarts_init(void)
{
u16 sepmask;
@@ -122,6 +149,7 @@ void __init config_BSP(char *commandp, int size)
m527x_uarts_init();
m527x_fec_init();
m527x_qspi_init();
+ m527x_i2c_init();
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m528x.c b/arch/m68k/coldfire/m528x.c
index 12f9e370d8dd..ea76998d5ab9 100644
--- a/arch/m68k/coldfire/m528x.c
+++ b/arch/m68k/coldfire/m528x.c
@@ -36,6 +36,7 @@ DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
struct clk *mcf_clks[] = {
&clk_pll,
@@ -49,6 +50,7 @@ struct clk *mcf_clks[] = {
&clk_mcfuart2,
&clk_mcfqspi0,
&clk_fec0,
+ &clk_mcfi2c0,
NULL
};
@@ -64,6 +66,21 @@ static void __init m528x_qspi_init(void)
/***************************************************************************/
+static void __init m528x_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ u16 paspar;
+
+ /* setup Port AS Pin Assignment Register for I2C */
+ /* set PASPA0 to SCL and PASPA1 to SDA */
+ paspar = readw(MCFGPIO_PASPAR);
+ paspar |= 0xF;
+ writew(paspar, MCFGPIO_PASPAR);
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+}
+
+/***************************************************************************/
+
static void __init m528x_uarts_init(void)
{
u8 port;
@@ -127,6 +144,7 @@ void __init config_BSP(char *commandp, int size)
m528x_uarts_init();
m528x_fec_init();
m528x_qspi_init();
+ m528x_i2c_init();
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5307.c b/arch/m68k/coldfire/m5307.c
index 2da1d146e344..cc5e8a50a423 100644
--- a/arch/m68k/coldfire/m5307.c
+++ b/arch/m68k/coldfire/m5307.c
@@ -35,6 +35,7 @@ DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
struct clk *mcf_clks[] = {
&clk_pll,
@@ -43,11 +44,23 @@ struct clk *mcf_clks[] = {
&clk_mcftmr1,
&clk_mcfuart0,
&clk_mcfuart1,
+ &clk_mcfi2c0,
NULL
};
/***************************************************************************/
+static void __init m5307_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C0, MCFINTC_I2C);
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+}
+
+/***************************************************************************/
+
void __init config_BSP(char *commandp, int size)
{
#if defined(CONFIG_NETtel) || \
@@ -73,6 +86,7 @@ void __init config_BSP(char *commandp, int size)
*/
wdebug(MCFDEBUG_CSR, MCFDEBUG_CSR_PSTCLK);
#endif
+ m5307_i2c_init();
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m53xx.c b/arch/m68k/coldfire/m53xx.c
index 2502f63960bc..cf1917934b8a 100644
--- a/arch/m68k/coldfire/m53xx.c
+++ b/arch/m68k/coldfire/m53xx.c
@@ -38,7 +38,7 @@ DEFINE_CLK(0, "edma", 17, MCF_CLK);
DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
DEFINE_CLK(0, "intc.1", 19, MCF_CLK);
DEFINE_CLK(0, "iack.0", 21, MCF_CLK);
-DEFINE_CLK(0, "mcfi2c.0", 22, MCF_CLK);
+DEFINE_CLK(0, "imx1-i2c.0", 22, MCF_CLK);
DEFINE_CLK(0, "mcfqspi.0", 23, MCF_CLK);
DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
@@ -77,7 +77,7 @@ struct clk *mcf_clks[] = {
&__clk_0_18, /* intc.0 */
&__clk_0_19, /* intc.1 */
&__clk_0_21, /* iack.0 */
- &__clk_0_22, /* mcfi2c.0 */
+ &__clk_0_22, /* imx1-i2c.0 */
&__clk_0_23, /* mcfqspi.0 */
&__clk_0_24, /* mcfuart.0 */
&__clk_0_25, /* mcfuart.1 */
@@ -133,7 +133,7 @@ static struct clk * const disable_clks[] __initconst = {
&__clk_0_8, /* mcfcan.0 */
&__clk_0_12, /* fec.0 */
&__clk_0_17, /* edma */
- &__clk_0_22, /* mcfi2c.0 */
+ &__clk_0_22, /* imx1-i2c.0 */
&__clk_0_23, /* mcfqspi.0 */
&__clk_0_30, /* mcftmr.2 */
&__clk_0_31, /* mcftmr.3 */
@@ -176,6 +176,19 @@ static void __init m53xx_qspi_init(void)
/***************************************************************************/
+static void __init m53xx_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ /* setup Port AS Pin Assignment Register for I2C */
+ /* set PASPA0 to SCL and PASPA1 to SDA */
+ u8 r = readb(MCFGPIO_PAR_FECI2C);
+ r |= 0x0f;
+ writeb(r, MCFGPIO_PAR_FECI2C);
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+}
+
+/***************************************************************************/
+
static void __init m53xx_uarts_init(void)
{
/* UART GPIO initialization */
@@ -218,6 +231,7 @@ void __init config_BSP(char *commandp, int size)
m53xx_uarts_init();
m53xx_fec_init();
m53xx_qspi_init();
+ m53xx_i2c_init();
#ifdef CONFIG_BDM_DISABLE
/*
diff --git a/arch/m68k/coldfire/m5407.c b/arch/m68k/coldfire/m5407.c
index 738eba6be40e..38863ddbeab0 100644
--- a/arch/m68k/coldfire/m5407.c
+++ b/arch/m68k/coldfire/m5407.c
@@ -26,6 +26,7 @@ DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
struct clk *mcf_clks[] = {
&clk_pll,
@@ -34,11 +35,23 @@ struct clk *mcf_clks[] = {
&clk_mcftmr1,
&clk_mcfuart0,
&clk_mcfuart1,
+ &clk_mcfi2c0,
NULL
};
/***************************************************************************/
+static void __init m5407_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C0, MCFINTC_I2C);
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+}
+
+/***************************************************************************/
+
void __init config_BSP(char *commandp, int size)
{
mach_sched_init = hw_timer_init;
@@ -48,6 +61,7 @@ void __init config_BSP(char *commandp, int size)
mcf_mapirq2imr(27, MCFINTC_EINT3);
mcf_mapirq2imr(29, MCFINTC_EINT5);
mcf_mapirq2imr(31, MCFINTC_EINT7);
+ m5407_i2c_init();
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5441x.c b/arch/m68k/coldfire/m5441x.c
index 98a13cce93d8..dc589b039b62 100644
--- a/arch/m68k/coldfire/m5441x.c
+++ b/arch/m68k/coldfire/m5441x.c
@@ -19,13 +19,13 @@
DEFINE_CLK(0, "flexbus", 2, MCF_CLK);
DEFINE_CLK(0, "mcfcan.0", 8, MCF_CLK);
DEFINE_CLK(0, "mcfcan.1", 9, MCF_CLK);
-DEFINE_CLK(0, "mcfi2c.1", 14, MCF_CLK);
+DEFINE_CLK(0, "imx1-i2c.1", 14, MCF_CLK);
DEFINE_CLK(0, "mcfdspi.1", 15, MCF_CLK);
DEFINE_CLK(0, "edma", 17, MCF_CLK);
DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
DEFINE_CLK(0, "intc.1", 19, MCF_CLK);
DEFINE_CLK(0, "intc.2", 20, MCF_CLK);
-DEFINE_CLK(0, "mcfi2c.0", 22, MCF_CLK);
+DEFINE_CLK(0, "imx1-i2c.0", 22, MCF_CLK);
DEFINE_CLK(0, "mcfdspi.0", 23, MCF_CLK);
DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
@@ -59,10 +59,10 @@ DEFINE_CLK(0, "switch.1", 56, MCF_CLK);
DEFINE_CLK(0, "nand.0", 63, MCF_CLK);
DEFINE_CLK(1, "mcfow.0", 2, MCF_CLK);
-DEFINE_CLK(1, "mcfi2c.2", 4, MCF_CLK);
-DEFINE_CLK(1, "mcfi2c.3", 5, MCF_CLK);
-DEFINE_CLK(1, "mcfi2c.4", 6, MCF_CLK);
-DEFINE_CLK(1, "mcfi2c.5", 7, MCF_CLK);
+DEFINE_CLK(1, "imx1-i2c.2", 4, MCF_CLK);
+DEFINE_CLK(1, "imx1-i2c.3", 5, MCF_CLK);
+DEFINE_CLK(1, "imx1-i2c.4", 6, MCF_CLK);
+DEFINE_CLK(1, "imx1-i2c.5", 7, MCF_CLK);
DEFINE_CLK(1, "mcfuart.4", 24, MCF_BUSCLK);
DEFINE_CLK(1, "mcfuart.5", 25, MCF_BUSCLK);
DEFINE_CLK(1, "mcfuart.6", 26, MCF_BUSCLK);
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index 386df3b68cdf..c552851ec617 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -37,6 +37,7 @@ DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
DEFINE_CLK(mcfuart3, "mcfuart.3", MCF_BUSCLK);
+DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
struct clk *mcf_clks[] = {
&clk_pll,
@@ -47,6 +48,7 @@ struct clk *mcf_clks[] = {
&clk_mcfuart1,
&clk_mcfuart2,
&clk_mcfuart3,
+ &clk_mcfi2c0,
NULL
};
@@ -65,6 +67,20 @@ static void __init m54xx_uarts_init(void)
/***************************************************************************/
+static void __init m54xx_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_IMX)
+ u32 r;
+
+ /* set the fec/i2c/irq pin assignment register for i2c */
+ r = readl(MCF_PAR_FECI2CIRQ);
+ r |= MCF_PAR_FECI2CIRQ_SDA | MCF_PAR_FECI2CIRQ_SCL;
+ writel(r, MCF_PAR_FECI2CIRQ);
+#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+}
+
+/***************************************************************************/
+
static void mcf54xx_reset(void)
{
/* disable interrupts and enable the watchdog */
@@ -86,6 +102,7 @@ void __init config_BSP(char *commandp, int size)
mach_reset = mcf54xx_reset;
mach_sched_init = hw_timer_init;
m54xx_uarts_init();
+ m54xx_i2c_init();
}
/***************************************************************************/
diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig
new file mode 100644
index 000000000000..f108dd121e9a
--- /dev/null
+++ b/arch/m68k/configs/amcore_defconfig
@@ -0,0 +1,118 @@
+CONFIG_LOCALVERSION="amcore-001"
+CONFIG_DEFAULT_HOSTNAME="amcore"
+CONFIG_SYSVIPC=y
+# CONFIG_FHANDLE is not set
+# CONFIG_USELIB is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_AIO is not set
+# CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_MMU is not set
+CONFIG_M5307=y
+CONFIG_AMCORE=y
+CONFIG_UBOOT=y
+CONFIG_RAMSIZE=0x1000000
+CONFIG_KERNELBASE=0x20000
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+CONFIG_BINFMT_FLAT=y
+# CONFIG_COREDUMP is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_LE_BYTE_SWAP=y
+CONFIG_MTD_CFI_GEOMETRY=y
+# CONFIG_MTD_CFI_I2 is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UCLINUX=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_DM9000=y
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MCF=y
+CONFIG_SERIAL_MCF_BAUDRATE=115200
+CONFIG_SERIAL_MCF_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_IMX=y
+CONFIG_PPS=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_SYSTOHC is not set
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_FSCACHE=y
+# CONFIG_PROC_SYSCTL is not set
+CONFIG_JFFS2_FS=y
+CONFIG_ROMFS_FS=y
+CONFIG_ROMFS_BACKED_BY_BOTH=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_PANIC_ON_OOPS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_CRYPTO_ECHAINIV is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC16=y
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 55be7e3ff109..b98acd15ca22 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -95,9 +95,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -105,8 +106,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -366,6 +369,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_ARC is not set
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 365dda66b0e6..f80dc57e6374 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -93,9 +93,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -103,8 +104,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -347,6 +350,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index ce3cbfd16fcd..4e16b1821fbb 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -93,9 +93,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -103,8 +104,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -356,6 +359,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_ATARILANCE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 8db496a9797d..2767bbf5ad61 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -91,9 +91,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -101,8 +102,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -346,6 +349,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 8314156f7149..d13ba309265e 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -93,9 +93,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -103,8 +104,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -347,6 +350,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 6600270b9622..78b5101c1aa6 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -92,9 +92,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -102,8 +103,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -363,6 +366,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 90abfe9eabba..38e5bcbd0d62 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -102,9 +102,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -112,8 +113,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -397,6 +400,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
CONFIG_ATARILANCE=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 0d502c2f73d5..28687192b68e 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -90,9 +90,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -100,8 +101,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -345,6 +348,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 5930e91fc710..5a5f109ab3cd 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -91,9 +91,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -101,8 +102,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -346,6 +349,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 74e3ad82eca9..e557c9de3fbc 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -91,9 +91,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -101,8 +102,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -353,6 +356,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 4ba8606a4e69..c6a748a36daf 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -88,9 +88,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -98,8 +99,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -343,6 +346,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index c6f49726a6c9..10d60857b9a6 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -88,9 +88,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
+CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
+CONFIG_NFT_SET_RBTREE=m
+CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -98,8 +99,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@@ -343,6 +346,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index a0985fd088d1..fc4be028c418 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -184,7 +184,6 @@ static const struct net_device_ops nfeth_netdev_ops = {
.ndo_start_xmit = nfeth_xmit,
.ndo_tx_timeout = nfeth_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index eb85bd9c6180..1f2e5d31cb24 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -20,7 +20,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mman.h
-generic-y += mutex.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += resource.h
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index d28fa8fe26fe..c598d847d56b 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs)
*/
#define HZSCALE (268435456 / (1000000 / HZ))
-#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
+#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000))
#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/m5206sim.h b/arch/m68k/include/asm/m5206sim.h
index 4cf864f5ea7a..0ddf3efbcae9 100644
--- a/arch/m68k/include/asm/m5206sim.h
+++ b/arch/m68k/include/asm/m5206sim.h
@@ -110,6 +110,7 @@
/*
* Define system peripheral IRQ usage.
*/
+#define MCF_IRQ_I2C0 29 /* I2C, Level 5 */
#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
#define MCF_IRQ_UART0 73 /* UART0 */
@@ -138,6 +139,7 @@
#define MCFSIM_SWDICR MCFSIM_ICR8 /* Watchdog timer ICR */
#define MCFSIM_TIMER1ICR MCFSIM_ICR9 /* Timer 1 ICR */
#define MCFSIM_TIMER2ICR MCFSIM_ICR10 /* Timer 2 ICR */
+#define MCFSIM_I2CICR MCFSIM_ICR11 /* I2C ICR */
#define MCFSIM_UART1ICR MCFSIM_ICR12 /* UART 1 ICR */
#define MCFSIM_UART2ICR MCFSIM_ICR13 /* UART 2 ICR */
#ifdef CONFIG_M5206e
@@ -145,5 +147,11 @@
#define MCFSIM_DMA2ICR MCFSIM_ICR15 /* DMA 2 ICR */
#endif
+/*
+ * I2C Controller
+*/
+#define MCFI2C_BASE0 (MCF_MBAR + 0x1e0)
+#define MCFI2C_SIZE0 0x40
+
/****************************************************************************/
#endif /* m5206sim_h */
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h
index db3f8ee4a6c6..6d50cefa76c3 100644
--- a/arch/m68k/include/asm/m520xsim.h
+++ b/arch/m68k/include/asm/m520xsim.h
@@ -50,6 +50,7 @@
#define MCFINT_UART0 26 /* Interrupt number for UART0 */
#define MCFINT_UART1 27 /* Interrupt number for UART1 */
#define MCFINT_UART2 28 /* Interrupt number for UART2 */
+#define MCFINT_I2C0 30 /* Interrupt number for I2C */
#define MCFINT_QSPI 31 /* Interrupt number for QSPI */
#define MCFINT_FECRX0 36 /* Interrupt number for FEC RX */
#define MCFINT_FECTX0 40 /* Interrupt number for FEC RX */
@@ -67,6 +68,7 @@
#define MCF_IRQ_QSPI (MCFINT_VECBASE + MCFINT_QSPI)
#define MCF_IRQ_PIT1 (MCFINT_VECBASE + MCFINT_PIT1)
+#define MCF_IRQ_I2C0 (MCFINT_VECBASE + MCFINT_I2C0)
/*
* SDRAM configuration registers.
*/
@@ -200,5 +202,11 @@
#define MCFPM_PPMLR0 0xfc040034
#define MCFPM_LPCR 0xfc0a0007
+/*
+ * I2C module.
+ */
+#define MCFI2C_BASE0 0xFC058000
+#define MCFI2C_SIZE0 0x40
+
/****************************************************************************/
#endif /* m520xsim_h */
diff --git a/arch/m68k/include/asm/m523xsim.h b/arch/m68k/include/asm/m523xsim.h
index 5e06b4eb57f3..d43f6ab1edc9 100644
--- a/arch/m68k/include/asm/m523xsim.h
+++ b/arch/m68k/include/asm/m523xsim.h
@@ -37,7 +37,8 @@
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
#define MCFINT_UART1 14 /* Interrupt number for UART1 */
#define MCFINT_UART2 15 /* Interrupt number for UART2 */
-#define MCFINT_QSPI 18 /* Interrupt number for QSPI */
+#define MCFINT_I2C0 17 /* Interrupt number for I2C */
+#define MCFINT_QSPI 18 /* Interrupt number for QSPI */
#define MCFINT_FECRX0 23 /* Interrupt number for FEC */
#define MCFINT_FECTX0 27 /* Interrupt number for FEC */
#define MCFINT_FECENTC0 29 /* Interrupt number for FEC */
@@ -53,6 +54,7 @@
#define MCF_IRQ_QSPI (MCFINT_VECBASE + MCFINT_QSPI)
#define MCF_IRQ_PIT1 (MCFINT_VECBASE + MCFINT_PIT1)
+#define MCF_IRQ_I2C0 (MCFINT_VECBASE + MCFINT_I2C0)
/*
* SDRAM configuration registers.
@@ -208,5 +210,11 @@
#define MCFDMA_BASE2 (MCF_IPSBAR + 0x180)
#define MCFDMA_BASE3 (MCF_IPSBAR + 0x1C0)
+/*
+ * I2C module.
+ */
+#define MCFI2C_BASE0 (MCF_IPSBAR + 0x300)
+#define MCFI2C_SIZE0 0x40
+
/****************************************************************************/
#endif /* m523xsim_h */
diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h
index 2c648a043f24..35f6fbc89b92 100644
--- a/arch/m68k/include/asm/m527xsim.h
+++ b/arch/m68k/include/asm/m527xsim.h
@@ -37,6 +37,7 @@
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
#define MCFINT_UART1 14 /* Interrupt number for UART1 */
#define MCFINT_UART2 15 /* Interrupt number for UART2 */
+#define MCFINT_I2C0 17 /* Interrupt number for I2C */
#define MCFINT_QSPI 18 /* Interrupt number for QSPI */
#define MCFINT_FECRX0 23 /* Interrupt number for FEC0 */
#define MCFINT_FECTX0 27 /* Interrupt number for FEC0 */
@@ -61,6 +62,7 @@
#define MCF_IRQ_QSPI (MCFINT_VECBASE + MCFINT_QSPI)
#define MCF_IRQ_PIT1 (MCFINT_VECBASE + MCFINT_PIT1)
+#define MCF_IRQ_I2C0 (MCFINT_VECBASE + MCFINT_I2C0)
/*
* SDRAM configuration registers.
@@ -353,5 +355,11 @@
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
+/*
+ * I2C module.
+ */
+#define MCFI2C_BASE0 (MCF_IPSBAR + 0x300)
+#define MCFI2C_SIZE0 0x40
+
/****************************************************************************/
#endif /* m527xsim_h */
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index cf68ca0ac3a5..67f6182d10a4 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -37,6 +37,7 @@
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
#define MCFINT_UART1 14 /* Interrupt number for UART1 */
#define MCFINT_UART2 15 /* Interrupt number for UART2 */
+#define MCFINT_I2C0 17 /* Interrupt number for I2C */
#define MCFINT_QSPI 18 /* Interrupt number for QSPI */
#define MCFINT_FECRX0 23 /* Interrupt number for FEC */
#define MCFINT_FECTX0 27 /* Interrupt number for FEC */
@@ -53,6 +54,8 @@
#define MCF_IRQ_QSPI (MCFINT_VECBASE + MCFINT_QSPI)
#define MCF_IRQ_PIT1 (MCFINT_VECBASE + MCFINT_PIT1)
+#define MCF_IRQ_I2C0 (MCFINT_VECBASE + MCFINT_I2C0)
+
/*
* SDRAM configuration registers.
*/
@@ -242,5 +245,11 @@
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
+/*
+ * I2C module
+ */
+#define MCFI2C_BASE0 (MCF_IPSBAR + 0x300)
+#define MCFI2C_SIZE0 0x40
+
/****************************************************************************/
#endif /* m528xsim_h */
diff --git a/arch/m68k/include/asm/m5307sim.h b/arch/m68k/include/asm/m5307sim.h
index 5d0bb7ec31f8..d2595e04eb1d 100644
--- a/arch/m68k/include/asm/m5307sim.h
+++ b/arch/m68k/include/asm/m5307sim.h
@@ -148,6 +148,7 @@
#define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */
#define MCFSIM_TIMER1ICR MCFSIM_ICR1 /* Timer 1 ICR */
#define MCFSIM_TIMER2ICR MCFSIM_ICR2 /* Timer 2 ICR */
+#define MCFSIM_I2CICR MCFSIM_ICR3 /* I2C ICR */
#define MCFSIM_UART1ICR MCFSIM_ICR4 /* UART 1 ICR */
#define MCFSIM_UART2ICR MCFSIM_ICR5 /* UART 2 ICR */
#define MCFSIM_DMA0ICR MCFSIM_ICR6 /* DMA 0 ICR */
@@ -155,7 +156,6 @@
#define MCFSIM_DMA2ICR MCFSIM_ICR8 /* DMA 2 ICR */
#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
-
/*
* Some symbol defines for the Parallel Port Pin Assignment Register
*/
@@ -174,10 +174,17 @@
/*
* Define system peripheral IRQ usage.
*/
+#define MCF_IRQ_I2C0 29 /* I2C, Level 5 */
#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
#define MCF_IRQ_UART0 73 /* UART0 */
#define MCF_IRQ_UART1 74 /* UART1 */
+/*
+ * I2C module
+ */
+#define MCFI2C_BASE0 (MCF_MBAR + 0x280)
+#define MCFI2C_SIZE0 0x40
+
/****************************************************************************/
#endif /* m5307sim_h */
diff --git a/arch/m68k/include/asm/m53xxsim.h b/arch/m68k/include/asm/m53xxsim.h
index faa1a2133bfd..53329ae4d3e3 100644
--- a/arch/m68k/include/asm/m53xxsim.h
+++ b/arch/m68k/include/asm/m53xxsim.h
@@ -19,6 +19,7 @@
#define MCFINT_UART0 26 /* Interrupt number for UART0 */
#define MCFINT_UART1 27 /* Interrupt number for UART1 */
#define MCFINT_UART2 28 /* Interrupt number for UART2 */
+#define MCFINT_I2C0 30 /* Interrupt number for I2C */
#define MCFINT_QSPI 31 /* Interrupt number for QSPI */
#define MCFINT_FECRX0 36 /* Interrupt number for FEC */
#define MCFINT_FECTX0 40 /* Interrupt number for FEC */
@@ -32,6 +33,7 @@
#define MCF_IRQ_FECTX0 (MCFINT_VECBASE + MCFINT_FECTX0)
#define MCF_IRQ_FECENTC0 (MCFINT_VECBASE + MCFINT_FECENTC0)
+#define MCF_IRQ_I2C0 (MCFINT_VECBASE + MCFINT_I2C0)
#define MCF_IRQ_QSPI (MCFINT_VECBASE + MCFINT_QSPI)
#define MCF_WTM_WCR 0xFC098000
@@ -1237,5 +1239,11 @@
#define MCFEPORT_EPPDR (0xFC094005)
#define MCFEPORT_EPFR (0xFC094006)
+/*
+ * I2C Module
+ */
+#define MCFI2C_BASE0 (0xFc058000)
+#define MCFI2C_SIZE0 0x40
+
/********************************************************************/
#endif /* m53xxsim_h */
diff --git a/arch/m68k/include/asm/m5407sim.h b/arch/m68k/include/asm/m5407sim.h
index a7550bc5cd1e..ab40c16ba989 100644
--- a/arch/m68k/include/asm/m5407sim.h
+++ b/arch/m68k/include/asm/m5407sim.h
@@ -112,6 +112,7 @@
#define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */
#define MCFSIM_TIMER1ICR MCFSIM_ICR1 /* Timer 1 ICR */
#define MCFSIM_TIMER2ICR MCFSIM_ICR2 /* Timer 2 ICR */
+#define MCFSIM_I2CICR MCFSIM_ICR3 /* I2C ICR */
#define MCFSIM_UART1ICR MCFSIM_ICR4 /* UART 1 ICR */
#define MCFSIM_UART2ICR MCFSIM_ICR5 /* UART 2 ICR */
#define MCFSIM_DMA0ICR MCFSIM_ICR6 /* DMA 0 ICR */
@@ -137,10 +138,17 @@
/*
* Define system peripheral IRQ usage.
*/
+#define MCF_IRQ_I2C0 29 /* I2C, Level 5 */
#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
#define MCF_IRQ_UART0 73 /* UART0 */
#define MCF_IRQ_UART1 74 /* UART1 */
+/*
+ * I2C module
+ */
+#define MCFI2C_BASE0 (MCF_MBAR + 0x280)
+#define MCFI2C_SIZE0 0x40
+
/****************************************************************************/
#endif /* m5407sim_h */
diff --git a/arch/m68k/include/asm/m54xxsim.h b/arch/m68k/include/asm/m54xxsim.h
index 73d937ff36eb..7758d0a1a84d 100644
--- a/arch/m68k/include/asm/m54xxsim.h
+++ b/arch/m68k/include/asm/m54xxsim.h
@@ -45,6 +45,7 @@
*/
#define MCF_IRQ_TIMER (MCFINT_VECBASE + 54) /* Slice Timer 0 */
#define MCF_IRQ_PROFILER (MCFINT_VECBASE + 53) /* Slice Timer 1 */
+#define MCF_IRQ_I2C0 (MCFINT_VECBASE + 40)
#define MCF_IRQ_UART0 (MCFINT_VECBASE + 35)
#define MCF_IRQ_UART1 (MCFINT_VECBASE + 34)
#define MCF_IRQ_UART2 (MCFINT_VECBASE + 33)
@@ -107,4 +108,14 @@
#define MCF_PAR_PSC_RTS_RTS (0x30)
#define MCF_PAR_PSC_CANRX (0x40)
+#define MCF_PAR_FECI2CIRQ (MCF_MBAR + 0x00000a44) /* FEC/I2C/IRQ */
+#define MCF_PAR_FECI2CIRQ_SDA (1 << 3)
+#define MCF_PAR_FECI2CIRQ_SCL (1 << 2)
+
+/*
+ * I2C module.
+ */
+#define MCFI2C_BASE0 (MCF_MBAR + 0x8f00)
+#define MCFI2C_SIZE0 0x40
+
#endif /* m54xxsim_h */
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index c84a2183b3f0..f5f790c31bf8 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -156,6 +156,5 @@ unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#endif
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 8cf97cbadc91..07070065a425 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -134,7 +134,9 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
{
dma_addr_t handle = page_to_phys(page) + offset;
- dma_sync_single_for_device(dev, handle, size, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_sync_single_for_device(dev, handle, size, dir);
+
return handle;
}
@@ -146,6 +148,10 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
sg->dma_address = sg_phys(sg);
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
dma_sync_single_for_device(dev, sg->dma_address, sg->length,
dir);
}
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 29acb89daaaa..167150c701d1 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -27,7 +27,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
-generic-y += mutex.h
generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index a0333ebcac35..ec6a49076980 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -152,7 +152,6 @@ unsigned long get_wchan(struct task_struct *p);
#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
extern void setup_priv(void);
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
index 0db31e24c541..91968d92652b 100644
--- a/arch/metag/kernel/dma.c
+++ b/arch/metag/kernel/dma.c
@@ -484,8 +484,9 @@ static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
- dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
- direction);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_sync_for_device((void *)(page_to_phys(page) + offset),
+ size, direction);
return page_to_phys(page) + offset;
}
@@ -493,7 +494,8 @@ static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
- dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
}
static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -507,6 +509,10 @@ static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
BUG_ON(!sg_page(sg));
sg->dma_address = sg_phys(sg);
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
dma_sync_for_device(sg_virt(sg), sg->length, direction);
}
@@ -525,6 +531,10 @@ static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
BUG_ON(!sg_page(sg));
sg->dma_address = sg_phys(sg);
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
}
}
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 86f65721e629..85885a501dce 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -27,6 +27,7 @@ config MICROBLAZE
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_OPROFILE
select IRQ_DOMAIN
+ select XILINX_INTC
select MODULES_USE_ELF_RELA
select OF
select OF_EARLY_FLATTREE
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index bab3b1393ad4..d785defeeed5 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -16,6 +16,6 @@ struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
/* should be defined in each interrupt controller driver */
-extern unsigned int get_irq(void);
+extern unsigned int xintc_get_irq(void);
#endif /* _ASM_MICROBLAZE_IRQ_H */
diff --git a/arch/microblaze/include/asm/mutex.h b/arch/microblaze/include/asm/mutex.h
deleted file mode 100644
index ff6101aa2c71..000000000000
--- a/arch/microblaze/include/asm/mutex.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index c38d0dd91134..37ef196e4519 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -22,7 +22,6 @@
extern const struct seq_operations cpuinfo_op;
# define cpu_relax() barrier()
-# define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(tsk) \
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index f08bacaf8a95..e098381af928 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -15,7 +15,7 @@ endif
extra-y := head.o vmlinux.lds
obj-y += dma.o exceptions.o \
- hw_exception_handler.o intc.o irq.o \
+ hw_exception_handler.o irq.o \
platform.o process.o prom.o ptrace.o \
reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index ec04dc1e2527..818daf230eb4 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -61,6 +61,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
/* FIXME this part of code is untested */
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg);
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
sg->length, direction);
}
@@ -80,7 +84,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
enum dma_data_direction direction,
unsigned long attrs)
{
- __dma_sync(page_to_phys(page) + offset, size, direction);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_sync(page_to_phys(page) + offset, size, direction);
return page_to_phys(page) + offset;
}
@@ -95,7 +100,8 @@ static inline void dma_direct_unmap_page(struct device *dev,
* phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
* dma_address is physical address
*/
- __dma_sync(dma_address, size, direction);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_sync(dma_address, size, direction);
}
static inline void
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
deleted file mode 100644
index 90bec7d71f85..000000000000
--- a/arch/microblaze/kernel/intc.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2012-2013 Xilinx, Inc.
- * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/irqchip.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-
-static void __iomem *intc_baseaddr;
-
-/* No one else should require these constants, so define them locally here. */
-#define ISR 0x00 /* Interrupt Status Register */
-#define IPR 0x04 /* Interrupt Pending Register */
-#define IER 0x08 /* Interrupt Enable Register */
-#define IAR 0x0c /* Interrupt Acknowledge Register */
-#define SIE 0x10 /* Set Interrupt Enable bits */
-#define CIE 0x14 /* Clear Interrupt Enable bits */
-#define IVR 0x18 /* Interrupt Vector Register */
-#define MER 0x1c /* Master Enable Register */
-
-#define MER_ME (1<<0)
-#define MER_HIE (1<<1)
-
-static unsigned int (*read_fn)(void __iomem *);
-static void (*write_fn)(u32, void __iomem *);
-
-static void intc_write32(u32 val, void __iomem *addr)
-{
- iowrite32(val, addr);
-}
-
-static unsigned int intc_read32(void __iomem *addr)
-{
- return ioread32(addr);
-}
-
-static void intc_write32_be(u32 val, void __iomem *addr)
-{
- iowrite32be(val, addr);
-}
-
-static unsigned int intc_read32_be(void __iomem *addr)
-{
- return ioread32be(addr);
-}
-
-static void intc_enable_or_unmask(struct irq_data *d)
-{
- unsigned long mask = 1 << d->hwirq;
-
- pr_debug("enable_or_unmask: %ld\n", d->hwirq);
-
- /* ack level irqs because they can't be acked during
- * ack function since the handle_level_irq function
- * acks the irq before calling the interrupt handler
- */
- if (irqd_is_level_type(d))
- write_fn(mask, intc_baseaddr + IAR);
-
- write_fn(mask, intc_baseaddr + SIE);
-}
-
-static void intc_disable_or_mask(struct irq_data *d)
-{
- pr_debug("disable: %ld\n", d->hwirq);
- write_fn(1 << d->hwirq, intc_baseaddr + CIE);
-}
-
-static void intc_ack(struct irq_data *d)
-{
- pr_debug("ack: %ld\n", d->hwirq);
- write_fn(1 << d->hwirq, intc_baseaddr + IAR);
-}
-
-static void intc_mask_ack(struct irq_data *d)
-{
- unsigned long mask = 1 << d->hwirq;
-
- pr_debug("disable_and_ack: %ld\n", d->hwirq);
- write_fn(mask, intc_baseaddr + CIE);
- write_fn(mask, intc_baseaddr + IAR);
-}
-
-static struct irq_chip intc_dev = {
- .name = "Xilinx INTC",
- .irq_unmask = intc_enable_or_unmask,
- .irq_mask = intc_disable_or_mask,
- .irq_ack = intc_ack,
- .irq_mask_ack = intc_mask_ack,
-};
-
-static struct irq_domain *root_domain;
-
-unsigned int get_irq(void)
-{
- unsigned int hwirq, irq = -1;
-
- hwirq = read_fn(intc_baseaddr + IVR);
- if (hwirq != -1U)
- irq = irq_find_mapping(root_domain, hwirq);
-
- pr_debug("get_irq: hwirq=%d, irq=%d\n", hwirq, irq);
-
- return irq;
-}
-
-static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
-{
- u32 intr_mask = (u32)d->host_data;
-
- if (intr_mask & (1 << hw)) {
- irq_set_chip_and_handler_name(irq, &intc_dev,
- handle_edge_irq, "edge");
- irq_clear_status_flags(irq, IRQ_LEVEL);
- } else {
- irq_set_chip_and_handler_name(irq, &intc_dev,
- handle_level_irq, "level");
- irq_set_status_flags(irq, IRQ_LEVEL);
- }
- return 0;
-}
-
-static const struct irq_domain_ops xintc_irq_domain_ops = {
- .xlate = irq_domain_xlate_onetwocell,
- .map = xintc_map,
-};
-
-static int __init xilinx_intc_of_init(struct device_node *intc,
- struct device_node *parent)
-{
- u32 nr_irq, intr_mask;
- int ret;
-
- intc_baseaddr = of_iomap(intc, 0);
- BUG_ON(!intc_baseaddr);
-
- ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
- if (ret < 0) {
- pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__);
- return ret;
- }
-
- ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask);
- if (ret < 0) {
- pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__);
- return ret;
- }
-
- if (intr_mask >> nr_irq)
- pr_warn("%s: mismatch in kind-of-intr param\n", __func__);
-
- pr_info("%s: num_irq=%d, edge=0x%x\n",
- intc->full_name, nr_irq, intr_mask);
-
- write_fn = intc_write32;
- read_fn = intc_read32;
-
- /*
- * Disable all external interrupts until they are
- * explicity requested.
- */
- write_fn(0, intc_baseaddr + IER);
-
- /* Acknowledge any pending interrupts just in case. */
- write_fn(0xffffffff, intc_baseaddr + IAR);
-
- /* Turn on the Master Enable. */
- write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
- if (!(read_fn(intc_baseaddr + MER) & (MER_HIE | MER_ME))) {
- write_fn = intc_write32_be;
- read_fn = intc_read32_be;
- write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
- }
-
- /* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm
- * lazy and Michal can clean it up to something nicer when he tests
- * and commits this patch. ~~gcl */
- root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
- (void *)intr_mask);
-
- irq_set_default_host(root_domain);
-
- return 0;
-}
-
-IRQCHIP_DECLARE(xilinx_intc, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 11e24de91aa4..903dad822fad 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -29,12 +29,12 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
trace_hardirqs_off();
irq_enter();
- irq = get_irq();
+ irq = xintc_get_irq();
next_irq:
BUG_ON(!irq);
generic_handle_irq(irq);
- irq = get_irq();
+ irq = xintc_get_irq();
if (irq != -1U) {
pr_debug("next irq: %d\n", irq);
++concurrent_irq;
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index fbf40d3c8123..1a6bac7b076f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
VMLINUX_ENTRY_ADDRESS=$(entry-y) \
- PLATFORM=$(platform-y)
+ PLATFORM="$(platform-y)"
ifdef CONFIG_32BIT
bootvars-y += ADDR_BITS=32
endif
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
index f604a272d91d..ffe3a1508e72 100644
--- a/arch/mips/boot/dts/mti/malta.dts
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -84,12 +84,13 @@
fpga_regs: system-controller@1f000000 {
compatible = "mti,malta-fpga", "syscon", "simple-mfd";
reg = <0x1f000000 0x1000>;
+ native-endian;
reboot {
compatible = "syscon-reboot";
regmap = <&fpga_regs>;
offset = <0x500>;
- mask = <0x4d>;
+ mask = <0x42>;
};
};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 0ea73e845440..d493ccbf274a 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -30,9 +30,19 @@ static __initdata const void *mach_match_data;
void __init prom_init(void)
{
+ plat_get_fdt();
+ BUG_ON(!fdt);
+}
+
+void __init *plat_get_fdt(void)
+{
const struct mips_machine *check_mach;
const struct of_device_id *match;
+ if (fdt)
+ /* Already set up */
+ return (void *)fdt;
+
if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
/*
* We booted using the UHI boot protocol, so we have been
@@ -75,12 +85,6 @@ void __init prom_init(void)
/* Retrieve the machine's FDT */
fdt = mach->fdt;
}
-
- BUG_ON(!fdt);
-}
-
-void __init *plat_get_fdt(void)
-{
return (void *)fdt;
}
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 9740066cc631..3269b742a75e 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -9,7 +9,6 @@ generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mutex.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 355dc25172e7..c05369e0b8d6 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -63,6 +63,8 @@ do { \
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcr31_x(unsigned long fcr31)
+{
+ return fcr31 & (FPU_CSR_UNI_X |
+ ((fcr31 & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
+}
+
#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 07f58cfc1ab9..bebec370324f 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
/* Host KSEG0 address of the EI/DI offset */
void *kseg0_commpage;
- u32 io_gpr; /* GPR used as IO source/target */
+ /* Resume PC after MMIO completion */
+ unsigned long io_pc;
+ /* GPR used as IO source/target */
+ u32 io_gpr;
struct hrtimer comparecount_timer;
/* Count timer control KVM register */
@@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
- u32 pending_load_cause;
-
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 7dd2dd47909a..df78b2ca70eb 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -215,6 +215,12 @@
#endif
/*
+ * Wired register bits
+ */
+#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16)
+#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0)
+
+/*
* Values used for computation of new tlb entries
*/
#define PL_4K 12
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 0d36c87acbe2..95b8c471f572 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -389,7 +389,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* Return_address is a replacement for __builtin_return_address(count)
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index ebb5c0f2f90d..c0ae27971e31 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -76,6 +76,22 @@ do { if (cpu_has_rw_llb) { \
} while (0)
/*
+ * Check FCSR for any unmasked exceptions pending set with `ptrace',
+ * clear them and send a signal.
+ */
+#define __sanitize_fcr31(next) \
+do { \
+ unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
+ void __user *pc; \
+ \
+ if (unlikely(fcr31)) { \
+ pc = (void __user *)task_pt_regs(next)->cp0_epc; \
+ next->thread.fpu.fcr31 &= ~fcr31; \
+ force_fcr31_sig(fcr31, pc, next); \
+ } \
+} while (0)
+
+/*
* For newly created kernel threads switch_to() will return to
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
* That is, everything following resume() will be skipped for new threads.
@@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
do { \
__mips_mt_fpaff_switch_to(prev); \
lose_fpu_inatomic(1, prev); \
+ if (tsk_used_math(next)) \
+ __sanitize_fcr31(next); \
if (cpu_has_dsp) { \
__save_dsp(prev); \
__restore_dsp(next); \
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index 4a2349302b55..dd179fd8acda 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -1,6 +1,9 @@
#ifndef __ASM_TLB_H
#define __ASM_TLB_H
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+
/*
* MIPS doesn't need any special per-pte or per-vma handling, except
* we need to flush cache for area to be unmapped.
@@ -22,6 +25,16 @@
((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
+static inline unsigned int num_wired_entries(void)
+{
+ unsigned int wired = read_c0_wired();
+
+ if (cpu_has_mips_r6)
+ wired &= MIPSR6_WIRED_WIRED;
+
+ return wired;
+}
+
#include <asm-generic/tlb.h>
#endif /* __ASM_TLB_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 2027240aafbb..566ecdcb5b4b 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -108,4 +108,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index fae2f9447792..6080582a26d1 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -341,7 +341,7 @@ void output_pm_defines(void)
void output_kvm_defines(void)
{
- COMMENT(" KVM/MIPS Specfic offsets. ");
+ COMMENT(" KVM/MIPS Specific offsets. ");
OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 2a45867d3b4f..a4964c334cab 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+phys_addr_t __weak mips_cpc_default_phys_base(void)
+{
+ return 0;
+}
+
/**
* mips_cpc_phys_base - retrieve the physical base address of the CPC
*
@@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void)
if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
- /* Otherwise, give it the default address & enable it */
+ /* Otherwise, use the default address */
cpc_base = mips_cpc_default_phys_base();
+ if (!cpc_base)
+ return cpc_base;
+
+ /* Enable the CPC, mapped at the default address */
write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
return cpc_base;
}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 22dedd62818a..bd09853aecdf 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
* @regs: Process register set
* @inst: Instruction to decode and emulate
- * @fcr31: Floating Point Control and Status Register returned
+ * @fcr31: Floating Point Control and Status Register Cause bits returned
*/
int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
{
@@ -1172,13 +1172,13 @@ fpu_emul:
err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- *fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~res;
/*
* this is a tricky issue - lose_fpu() uses LL/SC atomics
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 6103b24d1bfc..a92994d60e91 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
}
/*
- * Poke at FCSR according to its mask. Don't set the cause bits as
- * this is currently not handled correctly in FP context restoration
- * and will cause an oops if a corresponding enable bit is set.
+ * Poke at FCSR according to its mask. Set the Cause bits even
+ * if a corresponding Enable bit is set. This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
*/
static void ptrace_setfcr31(struct task_struct *child, u32 value)
{
u32 fcr31;
u32 mask;
- value &= ~FPU_CSR_ALL_X;
fcr31 = child->thread.fpu.fcr31;
mask = boot_cpu_data.fpu_msk31;
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
@@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
#endif
case FPC_CSR:
+ init_fp_ctx(child);
ptrace_setfcr31(child, data);
break;
case DSP_BASE ... DSP_BASE + 5: {
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 7e71a4e0281b..5fcbdcd7abd0 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -69,7 +69,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
break;
- copied = access_process_vm(child, (u64)addrOthers, &tmp,
+ copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
@@ -178,7 +178,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
break;
ret = 0;
- if (access_process_vm(child, (u64)addrOthers, &data,
+ if (ptrace_access_vm(child, (u64)addrOthers, &data,
sizeof(data),
FOLL_FORCE | FOLL_WRITE) == sizeof(data))
break;
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index b4ac6374a38f..918f2f6d3861 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -21,106 +21,84 @@
#define EX(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
+ PTR 9b,fault; \
+ .previous
+
+#define EX2(a,b) \
+9: a,##b; \
+ .section __ex_table,"a"; \
PTR 9b,bad_stack; \
+ PTR 9b+4,bad_stack; \
.previous
.set noreorder
.set mips1
- /* Save floating point context */
+
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
- cfc1 t1,fcr31
- EX(swc1 $f0,(SC_FPREGS+0)(a0))
- EX(swc1 $f1,(SC_FPREGS+8)(a0))
- EX(swc1 $f2,(SC_FPREGS+16)(a0))
- EX(swc1 $f3,(SC_FPREGS+24)(a0))
- EX(swc1 $f4,(SC_FPREGS+32)(a0))
- EX(swc1 $f5,(SC_FPREGS+40)(a0))
- EX(swc1 $f6,(SC_FPREGS+48)(a0))
- EX(swc1 $f7,(SC_FPREGS+56)(a0))
- EX(swc1 $f8,(SC_FPREGS+64)(a0))
- EX(swc1 $f9,(SC_FPREGS+72)(a0))
- EX(swc1 $f10,(SC_FPREGS+80)(a0))
- EX(swc1 $f11,(SC_FPREGS+88)(a0))
- EX(swc1 $f12,(SC_FPREGS+96)(a0))
- EX(swc1 $f13,(SC_FPREGS+104)(a0))
- EX(swc1 $f14,(SC_FPREGS+112)(a0))
- EX(swc1 $f15,(SC_FPREGS+120)(a0))
- EX(swc1 $f16,(SC_FPREGS+128)(a0))
- EX(swc1 $f17,(SC_FPREGS+136)(a0))
- EX(swc1 $f18,(SC_FPREGS+144)(a0))
- EX(swc1 $f19,(SC_FPREGS+152)(a0))
- EX(swc1 $f20,(SC_FPREGS+160)(a0))
- EX(swc1 $f21,(SC_FPREGS+168)(a0))
- EX(swc1 $f22,(SC_FPREGS+176)(a0))
- EX(swc1 $f23,(SC_FPREGS+184)(a0))
- EX(swc1 $f24,(SC_FPREGS+192)(a0))
- EX(swc1 $f25,(SC_FPREGS+200)(a0))
- EX(swc1 $f26,(SC_FPREGS+208)(a0))
- EX(swc1 $f27,(SC_FPREGS+216)(a0))
- EX(swc1 $f28,(SC_FPREGS+224)(a0))
- EX(swc1 $f29,(SC_FPREGS+232)(a0))
- EX(swc1 $f30,(SC_FPREGS+240)(a0))
- EX(swc1 $f31,(SC_FPREGS+248)(a0))
- EX(sw t1,(SC_FPC_CSR)(a0))
- cfc1 t0,$0 # implementation/version
+ cfc1 t1, fcr31
+ EX2(s.d $f0, 0(a0))
+ EX2(s.d $f2, 16(a0))
+ EX2(s.d $f4, 32(a0))
+ EX2(s.d $f6, 48(a0))
+ EX2(s.d $f8, 64(a0))
+ EX2(s.d $f10, 80(a0))
+ EX2(s.d $f12, 96(a0))
+ EX2(s.d $f14, 112(a0))
+ EX2(s.d $f16, 128(a0))
+ EX2(s.d $f18, 144(a0))
+ EX2(s.d $f20, 160(a0))
+ EX2(s.d $f22, 176(a0))
+ EX2(s.d $f24, 192(a0))
+ EX2(s.d $f26, 208(a0))
+ EX2(s.d $f28, 224(a0))
+ EX2(s.d $f30, 240(a0))
jr ra
+ EX(sw t1, (a1))
.set pop
- .set nomacro
- EX(sw t0,(SC_FPC_EIR)(a0))
- .set macro
END(_save_fp_context)
-/*
- * Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
*
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
- EX(lw t0,(SC_FPC_CSR)(a0))
- EX(lwc1 $f0,(SC_FPREGS+0)(a0))
- EX(lwc1 $f1,(SC_FPREGS+8)(a0))
- EX(lwc1 $f2,(SC_FPREGS+16)(a0))
- EX(lwc1 $f3,(SC_FPREGS+24)(a0))
- EX(lwc1 $f4,(SC_FPREGS+32)(a0))
- EX(lwc1 $f5,(SC_FPREGS+40)(a0))
- EX(lwc1 $f6,(SC_FPREGS+48)(a0))
- EX(lwc1 $f7,(SC_FPREGS+56)(a0))
- EX(lwc1 $f8,(SC_FPREGS+64)(a0))
- EX(lwc1 $f9,(SC_FPREGS+72)(a0))
- EX(lwc1 $f10,(SC_FPREGS+80)(a0))
- EX(lwc1 $f11,(SC_FPREGS+88)(a0))
- EX(lwc1 $f12,(SC_FPREGS+96)(a0))
- EX(lwc1 $f13,(SC_FPREGS+104)(a0))
- EX(lwc1 $f14,(SC_FPREGS+112)(a0))
- EX(lwc1 $f15,(SC_FPREGS+120)(a0))
- EX(lwc1 $f16,(SC_FPREGS+128)(a0))
- EX(lwc1 $f17,(SC_FPREGS+136)(a0))
- EX(lwc1 $f18,(SC_FPREGS+144)(a0))
- EX(lwc1 $f19,(SC_FPREGS+152)(a0))
- EX(lwc1 $f20,(SC_FPREGS+160)(a0))
- EX(lwc1 $f21,(SC_FPREGS+168)(a0))
- EX(lwc1 $f22,(SC_FPREGS+176)(a0))
- EX(lwc1 $f23,(SC_FPREGS+184)(a0))
- EX(lwc1 $f24,(SC_FPREGS+192)(a0))
- EX(lwc1 $f25,(SC_FPREGS+200)(a0))
- EX(lwc1 $f26,(SC_FPREGS+208)(a0))
- EX(lwc1 $f27,(SC_FPREGS+216)(a0))
- EX(lwc1 $f28,(SC_FPREGS+224)(a0))
- EX(lwc1 $f29,(SC_FPREGS+232)(a0))
- EX(lwc1 $f30,(SC_FPREGS+240)(a0))
- EX(lwc1 $f31,(SC_FPREGS+248)(a0))
+ EX(lw t0, (a1))
+ EX2(l.d $f0, 0(a0))
+ EX2(l.d $f2, 16(a0))
+ EX2(l.d $f4, 32(a0))
+ EX2(l.d $f6, 48(a0))
+ EX2(l.d $f8, 64(a0))
+ EX2(l.d $f10, 80(a0))
+ EX2(l.d $f12, 96(a0))
+ EX2(l.d $f14, 112(a0))
+ EX2(l.d $f16, 128(a0))
+ EX2(l.d $f18, 144(a0))
+ EX2(l.d $f20, 160(a0))
+ EX2(l.d $f22, 176(a0))
+ EX2(l.d $f24, 192(a0))
+ EX2(l.d $f26, 208(a0))
+ EX2(l.d $f28, 224(a0))
+ EX2(l.d $f30, 240(a0))
jr ra
- ctc1 t0,fcr31
+ ctc1 t0, fcr31
.set pop
END(_restore_fp_context)
.set reorder
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index 47077380c15c..9cc7bfab3419 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -21,7 +21,14 @@
.set push
SET_HARDFLOAT
- /* Save floating point context */
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
LEAF(_save_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
@@ -30,59 +37,59 @@
cfc1 t1,fcr31
/* Store the 16 double precision registers */
- sdc1 $f0,(SC_FPREGS+0)(a0)
- sdc1 $f2,(SC_FPREGS+16)(a0)
- sdc1 $f4,(SC_FPREGS+32)(a0)
- sdc1 $f6,(SC_FPREGS+48)(a0)
- sdc1 $f8,(SC_FPREGS+64)(a0)
- sdc1 $f10,(SC_FPREGS+80)(a0)
- sdc1 $f12,(SC_FPREGS+96)(a0)
- sdc1 $f14,(SC_FPREGS+112)(a0)
- sdc1 $f16,(SC_FPREGS+128)(a0)
- sdc1 $f18,(SC_FPREGS+144)(a0)
- sdc1 $f20,(SC_FPREGS+160)(a0)
- sdc1 $f22,(SC_FPREGS+176)(a0)
- sdc1 $f24,(SC_FPREGS+192)(a0)
- sdc1 $f26,(SC_FPREGS+208)(a0)
- sdc1 $f28,(SC_FPREGS+224)(a0)
- sdc1 $f30,(SC_FPREGS+240)(a0)
+ sdc1 $f0,0(a0)
+ sdc1 $f2,16(a0)
+ sdc1 $f4,32(a0)
+ sdc1 $f6,48(a0)
+ sdc1 $f8,64(a0)
+ sdc1 $f10,80(a0)
+ sdc1 $f12,96(a0)
+ sdc1 $f14,112(a0)
+ sdc1 $f16,128(a0)
+ sdc1 $f18,144(a0)
+ sdc1 $f20,160(a0)
+ sdc1 $f22,176(a0)
+ sdc1 $f24,192(a0)
+ sdc1 $f26,208(a0)
+ sdc1 $f28,224(a0)
+ sdc1 $f30,240(a0)
jr ra
- sw t0,SC_FPC_CSR(a0)
+ sw t0,(a1)
1: jr ra
nop
END(_save_fp_context)
-/* Restore FPU state:
- * - fp gp registers
- * - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
*
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
bgez t0,1f
- lw t0,SC_FPC_CSR(a0)
+ lw t0,(a1)
/* Restore the 16 double precision registers */
- ldc1 $f0,(SC_FPREGS+0)(a0)
- ldc1 $f2,(SC_FPREGS+16)(a0)
- ldc1 $f4,(SC_FPREGS+32)(a0)
- ldc1 $f6,(SC_FPREGS+48)(a0)
- ldc1 $f8,(SC_FPREGS+64)(a0)
- ldc1 $f10,(SC_FPREGS+80)(a0)
- ldc1 $f12,(SC_FPREGS+96)(a0)
- ldc1 $f14,(SC_FPREGS+112)(a0)
- ldc1 $f16,(SC_FPREGS+128)(a0)
- ldc1 $f18,(SC_FPREGS+144)(a0)
- ldc1 $f20,(SC_FPREGS+160)(a0)
- ldc1 $f22,(SC_FPREGS+176)(a0)
- ldc1 $f24,(SC_FPREGS+192)(a0)
- ldc1 $f26,(SC_FPREGS+208)(a0)
- ldc1 $f28,(SC_FPREGS+224)(a0)
- ldc1 $f30,(SC_FPREGS+240)(a0)
+ ldc1 $f0,0(a0)
+ ldc1 $f2,16(a0)
+ ldc1 $f4,32(a0)
+ ldc1 $f6,48(a0)
+ ldc1 $f8,64(a0)
+ ldc1 $f10,80(a0)
+ ldc1 $f12,96(a0)
+ ldc1 $f14,112(a0)
+ ldc1 $f16,128(a0)
+ ldc1 $f18,144(a0)
+ ldc1 $f20,160(a0)
+ ldc1 $f22,176(a0)
+ ldc1 $f24,192(a0)
+ ldc1 $f26,208(a0)
+ ldc1 $f28,224(a0)
+ ldc1 $f30,240(a0)
jr ra
ctc1 t0,fcr31
1: jr ra
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index ca1cc30c0891..1958910b75c0 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
#if defined(CONFIG_USE_OF)
/* Get any additional entropy passed in device tree */
- {
+ if (initial_boot_params) {
int node, len;
u64 *prop;
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 0d57909d9026..f66e5ce505b2 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -368,6 +368,19 @@ static void __init bootmem_init(void)
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
+#ifndef CONFIG_HIGHMEM
+ /*
+ * Skip highmem here so we get an accurate max_low_pfn if low
+ * memory stops short of high memory.
+ * If the region overlaps HIGHMEM_START, end is clipped so
+ * max_pfn excludes the highmem portion.
+ */
+ if (start >= PFN_DOWN(HIGHMEM_START))
+ continue;
+ if (end > PFN_DOWN(HIGHMEM_START))
+ end = PFN_DOWN(HIGHMEM_START);
+#endif
+
if (end > max_low_pfn)
max_low_pfn = end;
if (start < min_low_pfn)
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 8d0170969e22..a7f81261c781 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(rtc_lock);
int __weak rtc_mips_set_time(unsigned long sec)
{
- return 0;
+ return -ENODEV;
}
int __weak rtc_mips_set_mmss(unsigned long nowtime)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 1f5fdee1dfc3..3905003dfe2b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
print_ip_sym(pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
- printk("\n");
+ pr_cont("\n");
}
/*
@@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task,
printk("Stack :");
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
- if (i && ((i % (64 / field)) == 0))
- printk("\n ");
+ if (i && ((i % (64 / field)) == 0)) {
+ pr_cont("\n");
+ printk(" ");
+ }
if (i > 39) {
- printk(" ...");
+ pr_cont(" ...");
break;
}
if (__get_user(stackdata, sp++)) {
- printk(" (Bad stack address)");
+ pr_cont(" (Bad stack address)");
break;
}
- printk(" %0*lx", field, stackdata);
+ pr_cont(" %0*lx", field, stackdata);
i++;
}
- printk("\n");
+ pr_cont("\n");
show_backtrace(task, regs);
}
@@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc)
long i;
unsigned short __user *pc16 = NULL;
- printk("\nCode:");
+ printk("Code:");
if ((unsigned long)pc & 1)
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
- printk(" (Bad address in epc)\n");
+ pr_cont(" (Bad address in epc)\n");
break;
}
- printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
+ pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
+ pr_cont("\n");
}
static void __show_regs(const struct pt_regs *regs)
@@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs)
if ((i % 4) == 0)
printk("$%2d :", i);
if (i == 0)
- printk(" %0*lx", field, 0UL);
+ pr_cont(" %0*lx", field, 0UL);
else if (i == 26 || i == 27)
- printk(" %*s", field, "");
+ pr_cont(" %*s", field, "");
else
- printk(" %0*lx", field, regs->regs[i]);
+ pr_cont(" %0*lx", field, regs->regs[i]);
i++;
if ((i % 4) == 0)
- printk("\n");
+ pr_cont("\n");
}
#ifdef CONFIG_CPU_HAS_SMARTMIPS
@@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs)
if (cpu_has_3kex) {
if (regs->cp0_status & ST0_KUO)
- printk("KUo ");
+ pr_cont("KUo ");
if (regs->cp0_status & ST0_IEO)
- printk("IEo ");
+ pr_cont("IEo ");
if (regs->cp0_status & ST0_KUP)
- printk("KUp ");
+ pr_cont("KUp ");
if (regs->cp0_status & ST0_IEP)
- printk("IEp ");
+ pr_cont("IEp ");
if (regs->cp0_status & ST0_KUC)
- printk("KUc ");
+ pr_cont("KUc ");
if (regs->cp0_status & ST0_IEC)
- printk("IEc ");
+ pr_cont("IEc ");
} else if (cpu_has_4kex) {
if (regs->cp0_status & ST0_KX)
- printk("KX ");
+ pr_cont("KX ");
if (regs->cp0_status & ST0_SX)
- printk("SX ");
+ pr_cont("SX ");
if (regs->cp0_status & ST0_UX)
- printk("UX ");
+ pr_cont("UX ");
switch (regs->cp0_status & ST0_KSU) {
case KSU_USER:
- printk("USER ");
+ pr_cont("USER ");
break;
case KSU_SUPERVISOR:
- printk("SUPERVISOR ");
+ pr_cont("SUPERVISOR ");
break;
case KSU_KERNEL:
- printk("KERNEL ");
+ pr_cont("KERNEL ");
break;
default:
- printk("BAD_MODE ");
+ pr_cont("BAD_MODE ");
break;
}
if (regs->cp0_status & ST0_ERL)
- printk("ERL ");
+ pr_cont("ERL ");
if (regs->cp0_status & ST0_EXL)
- printk("EXL ");
+ pr_cont("EXL ");
if (regs->cp0_status & ST0_IE)
- printk("IE ");
+ pr_cont("IE ");
}
- printk("\n");
+ pr_cont("\n");
exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
@@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
exception_exit(prev_state);
}
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
+
+ if (fcr31 & FPU_CSR_INV_X)
+ si.si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ si.si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ si.si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ si.si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ si.si_code = FPE_FLTRES;
+ else
+ si.si_code = __SI_FAULT;
+ force_sig_info(SIGFPE, &si, tsk);
+}
+
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
@@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 0;
case SIGFPE:
- si.si_addr = fault_addr;
- si.si_signo = sig;
- /*
- * Inexact can happen together with Overflow or Underflow.
- * Respect the mask to deliver the correct exception.
- */
- fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
- (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
- if (fcr31 & FPU_CSR_INV_X)
- si.si_code = FPE_FLTINV;
- else if (fcr31 & FPU_CSR_DIV_X)
- si.si_code = FPE_FLTDIV;
- else if (fcr31 & FPU_CSR_OVF_X)
- si.si_code = FPE_FLTOVF;
- else if (fcr31 & FPU_CSR_UDF_X)
- si.si_code = FPE_FLTUND;
- else if (fcr31 & FPU_CSR_INE_X)
- si.si_code = FPE_FLTRES;
- else
- si.si_code = __SI_FAULT;
- force_sig_info(sig, &si, current);
+ force_fcr31_sig(fcr31, fault_addr, current);
return 1;
case SIGBUS:
@@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1);
@@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
goto out;
/* Clear FCSR.Cause before enabling interrupts */
- write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs);
@@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
@@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave
- * any of the cause bits set in $fcr31.
+ * any enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Send a signal if required. */
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 8770f32c9e0b..aa0937423e28 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
- if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+ if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+ kvm_clear_c0_guest_status(cop0, ST0_ERL);
+ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+ } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
kvm_read_c0_guest_epc(cop0));
kvm_clear_c0_guest_status(cop0, ST0_EXL);
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
- } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
- kvm_clear_c0_guest_status(cop0, ST0_ERL);
- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else {
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
vcpu->arch.pc);
@@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
+ unsigned long curr_pc;
u32 op, rt;
u32 bytes;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
- vcpu->arch.pending_load_cause = cause;
+ /*
+ * Find the resume PC now while we have safe and easy access to the
+ * prior branch instruction, and save it for
+ * kvm_mips_complete_mmio_load() to restore later.
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+ vcpu->arch.io_pc = vcpu->arch.pc;
+ vcpu->arch.pc = curr_pc;
+
vcpu->arch.io_gpr = rt;
switch (op) {
@@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
goto done;
}
- er = update_pc(vcpu, vcpu->arch.pending_load_cause);
- if (er == EMULATE_FAIL)
- return er;
+ /* Restore saved resume PC */
+ vcpu->arch.pc = vcpu->arch.io_pc;
switch (run->mmio.len) {
case 4:
@@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
break;
}
- if (vcpu->arch.pending_load_cause & CAUSEF_BD)
- kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
- vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
- vcpu->mmio_needed);
-
done:
return er;
}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 622037d851a3..06a60b19acfb 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- int cpu = smp_processor_id();
+ int i, cpu = smp_processor_id();
unsigned int gasid;
/*
@@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
+ for_each_possible_cpu(i)
+ if (i != cpu)
+ vcpu->arch.guest_user_asid[cpu] = 0;
vcpu->arch.last_user_gasid = gasid;
}
}
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 03883ba806e2..3b677c851be0 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
- u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
- KVM_ENTRYHI_ASID;
-
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
- vcpu->arch.last_user_gasid = gasid;
newasid++;
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 2a1b3021589c..82bbd0e2e298 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -24,7 +24,7 @@
/* GPE frequency selection */
#define GPPC_OFFSET 24
-#define GPEFREQ_MASK 0x00000C0
+#define GPEFREQ_MASK 0x0000C00
#define GPEFREQ_OFFSET 10
/* Clock status register */
#define SYSCTL_CLKS 0x0000
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 0f80b936e75e..6eb50a7137db 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -135,42 +135,42 @@ static void dump_tlb(int first, int last)
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
- printk("va=%0*lx asid=%0*lx",
- vwidth, (entryhi & ~0x1fffUL),
- asidwidth, entryhi & asidmask);
+ pr_cont("va=%0*lx asid=%0*lx",
+ vwidth, (entryhi & ~0x1fffUL),
+ asidwidth, entryhi & asidmask);
if (cpu_has_guestid)
- printk(" gid=%02lx",
- (guestctl1 & MIPS_GCTL1_RID)
+ pr_cont(" gid=%02lx",
+ (guestctl1 & MIPS_GCTL1_RID)
>> MIPS_GCTL1_RID_SHIFT);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo0() << 30;
pa = (pa << 6) & PAGE_MASK;
- printk("\n\t[");
+ pr_cont("\n\t[");
if (cpu_has_rixi)
- printk("ri=%d xi=%d ",
- (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
- (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
- printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
- pwidth, pa, c0,
- (entrylo0 & ENTRYLO_D) ? 1 : 0,
- (entrylo0 & ENTRYLO_V) ? 1 : 0,
- (entrylo0 & ENTRYLO_G) ? 1 : 0);
+ pr_cont("ri=%d xi=%d ",
+ (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
+ pwidth, pa, c0,
+ (entrylo0 & ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & ENTRYLO_G) ? 1 : 0);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo1() << 30;
pa = (pa << 6) & PAGE_MASK;
if (cpu_has_rixi)
- printk("ri=%d xi=%d ",
- (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
- (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
- printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
- pwidth, pa, c1,
- (entrylo1 & ENTRYLO_D) ? 1 : 0,
- (entrylo1 & ENTRYLO_V) ? 1 : 0,
- (entrylo1 & ENTRYLO_G) ? 1 : 0);
+ pr_cont("ri=%d xi=%d ",
+ (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
+ pwidth, pa, c1,
+ (entrylo1 & ENTRYLO_D) ? 1 : 0,
+ (entrylo1 & ENTRYLO_V) ? 1 : 0,
+ (entrylo1 & ENTRYLO_G) ? 1 : 0);
}
printk("\n");
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 744f4a7bc49d..85b4086e553e 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -53,15 +53,15 @@ static void dump_tlb(int first, int last)
*/
printk("Index: %2d ", i);
- printk("va=%08lx asid=%08lx"
- " [pa=%06lx n=%d d=%d v=%d g=%d]",
- entryhi & PAGE_MASK,
- entryhi & asid_mask,
- entrylo0 & PAGE_MASK,
- (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
- (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
+ pr_cont("va=%08lx asid=%08lx"
+ " [pa=%06lx n=%d d=%d v=%d g=%d]",
+ entryhi & PAGE_MASK,
+ entryhi & asid_mask,
+ entrylo0 & PAGE_MASK,
+ (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
}
}
printk("\n");
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index 1a80b6f73ab2..aab4fd681e1f 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -61,7 +61,7 @@ static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0);
+ int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs);
mb();
return r;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 46d5696c4f27..a39c36af97ad 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -293,7 +293,7 @@ static inline void __dma_sync(struct page *page,
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, unsigned long attrs)
{
- if (cpu_needs_post_dma_flush(dev))
+ if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction);
plat_post_dma_flush(dev);
@@ -307,7 +307,8 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
for_each_sg(sglist, sg, nents, i) {
- if (!plat_device_is_coherent(dev))
+ if (!plat_device_is_coherent(dev) &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
@@ -324,7 +325,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
- if (!plat_device_is_coherent(dev))
+ if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(page, offset, size, direction);
return plat_map_dma_mem_page(dev, page) + offset;
@@ -339,6 +340,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nhwentries, i) {
if (!plat_device_is_coherent(dev) &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
direction != DMA_TO_DEVICE)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index d56a855828c2..3bef306cdfdb 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -209,17 +209,18 @@ bad_area_nosemaphore:
if (show_unhandled_signals &&
unhandled_signal(tsk, SIGSEGV) &&
__ratelimit(&ratelimit_state)) {
- pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx",
+ pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address);
pr_info("epc = %0*lx in", field,
(unsigned long) regs->cp0_epc);
- print_vma_addr(" ", regs->cp0_epc);
+ print_vma_addr(KERN_CONT " ", regs->cp0_epc);
+ pr_cont("\n");
pr_info("ra = %0*lx in", field,
(unsigned long) regs->regs[31]);
- print_vma_addr(" ", regs->regs[31]);
- pr_info("\n");
+ print_vma_addr(KERN_CONT " ", regs->regs[31]);
+ pr_cont("\n");
}
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
info.si_signo = SIGSEGV;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3a6edecc3f38..e86ebcf5c071 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -118,7 +118,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
writex_c0_entrylo1(entrylo);
}
#endif
- tlbidx = read_c0_wired();
+ tlbidx = num_wired_entries();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
@@ -147,7 +147,7 @@ void kunmap_coherent(void)
local_irq_save(flags);
old_ctx = read_c0_entryhi();
- wired = read_c0_wired() - 1;
+ wired = num_wired_entries() - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index bba9c1484b41..0596505770db 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -65,7 +65,7 @@ void local_flush_tlb_all(void)
write_c0_entrylo0(0);
write_c0_entrylo1(0);
- entry = read_c0_wired();
+ entry = num_wired_entries();
/*
* Blast 'em all away.
@@ -385,7 +385,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
old_ctx = read_c0_entryhi();
htw_stop();
old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
+ wired = num_wired_entries();
write_c0_wired(wired + 1);
write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
@@ -449,7 +449,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
htw_stop();
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
+ wired = num_wired_entries();
if (--temp_tlb_entry < wired) {
printk(KERN_WARNING
"No TLB space left for add_temporary_entry\n");
diff --git a/arch/mn10300/include/asm/mutex.h b/arch/mn10300/include/asm/mutex.h
deleted file mode 100644
index 84f5490c6fb4..000000000000
--- a/arch/mn10300/include/asm/mutex.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* MN10300 Mutex fastpath
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- *
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-#include <asm-generic/mutex-null.h>
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index b10ba121c849..18e17abf7664 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -69,7 +69,6 @@ extern void print_cpu_info(struct mn10300_cpuinfo *);
extern void dodgy_tsc(void);
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* User space process size: 1.75GB (default).
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 5129f23a9ee1..0e12527c4b0e 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/mn10300/unit-asb2303/include/unit/smc91111.h b/arch/mn10300/unit-asb2303/include/unit/smc91111.h
index dd456e9c513f..dd4e2946438e 100644
--- a/arch/mn10300/unit-asb2303/include/unit/smc91111.h
+++ b/arch/mn10300/unit-asb2303/include/unit/smc91111.h
@@ -30,7 +30,7 @@
#if SMC_CAN_USE_16BIT
#define SMC_inw(a, r) inw((unsigned long) ((a) + (r)))
-#define SMC_outw(v, a, r) outw(v, (unsigned long) ((a) + (r)))
+#define SMC_outw(lp, v, a, r) outw(v, (unsigned long) ((a) + (r)))
#define SMC_insw(a, r, p, l) insw((unsigned long) ((a) + (r)), (p), (l))
#define SMC_outsw(a, r, p, l) outsw((unsigned long) ((a) + (r)), (p), (l))
#endif
diff --git a/arch/nios2/include/asm/mutex.h b/arch/nios2/include/asm/mutex.h
deleted file mode 100644
index ff6101aa2c71..000000000000
--- a/arch/nios2/include/asm/mutex.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h
index 1c953f0cadbf..3bbbc3d798e5 100644
--- a/arch/nios2/include/asm/processor.h
+++ b/arch/nios2/include/asm/processor.h
@@ -88,7 +88,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#endif /* __ASSEMBLY__ */
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index d9563ddb337e..746bf5caaffc 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer)
ret = nios2_clocksource_init(timer);
break;
default:
+ ret = 0;
break;
}
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index d800fad87896..f6a5dcf9d682 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -98,13 +98,17 @@ static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, sg, nents, i) {
- void *addr;
+ void *addr = sg_virt(sg);
- addr = sg_virt(sg);
- if (addr) {
- __dma_sync_for_device(addr, sg->length, direction);
- sg->dma_address = sg_phys(sg);
- }
+ if (!addr)
+ continue;
+
+ sg->dma_address = sg_phys(sg);
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
+ __dma_sync_for_device(addr, sg->length, direction);
}
return nents;
@@ -117,7 +121,9 @@ static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
{
void *addr = page_address(page) + offset;
- __dma_sync_for_device(addr, size, direction);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_sync_for_device(addr, size, direction);
+
return page_to_phys(page) + offset;
}
@@ -125,7 +131,8 @@ static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
- __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
}
static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -138,6 +145,9 @@ static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
if (direction == DMA_TO_DEVICE)
return;
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return;
+
for_each_sg(sg, sg, nhwentries, i) {
addr = sg_virt(sg);
if (addr)
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 489e7f909286..8d22015fde3e 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -26,6 +26,7 @@ config OPENRISC
select HAVE_DEBUG_STACKOVERFLOW
select OR1K_PIC
select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
+ select NO_BOOTMEM
config MMU
def_bool y
@@ -98,6 +99,9 @@ config OPENRISC_HAVE_INST_DIV
Select this if your implementation has a hardware divide instruction
endmenu
+config NR_CPUS
+ int
+ default "1"
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
diff --git a/arch/openrisc/README.openrisc b/arch/openrisc/README.openrisc
index c9f7edf2b9a2..072069ab5100 100644
--- a/arch/openrisc/README.openrisc
+++ b/arch/openrisc/README.openrisc
@@ -6,7 +6,7 @@ target architecture, specifically, is the 32-bit OpenRISC 1000 family (or1k).
For information about OpenRISC processors and ongoing development:
- website http://openrisc.net
+ website http://openrisc.io
For more information about Linux on OpenRISC, please contact South Pole AB.
@@ -24,17 +24,17 @@ In order to build and run Linux for OpenRISC, you'll need at least a basic
toolchain and, perhaps, the architectural simulator. Steps to get these bits
in place are outlined here.
-1) The toolchain can be obtained from openrisc.net. Instructions for building
+1) The toolchain can be obtained from openrisc.io. Instructions for building
a toolchain can be found at:
-http://openrisc.net/toolchain-build.html
+https://github.com/openrisc/tutorials
2) or1ksim (optional)
or1ksim is the architectural simulator which will allow you to actually run
your OpenRISC Linux kernel if you don't have an OpenRISC processor at hand.
- git clone git://openrisc.net/jonas/or1ksim-svn
+ git clone https://github.com/openrisc/or1ksim.git
cd or1ksim
./configure --prefix=$OPENRISC_PREFIX
diff --git a/arch/openrisc/TODO.openrisc b/arch/openrisc/TODO.openrisc
index acfeef9c58e3..0eb04c8240f9 100644
--- a/arch/openrisc/TODO.openrisc
+++ b/arch/openrisc/TODO.openrisc
@@ -5,9 +5,6 @@ that are due for investigation shortly, i.e. our TODO list:
-- Implement the rest of the DMA API... dma_map_sg, etc.
--- Consolidate usage of memblock and bootmem... move everything over to
- memblock.
-
-- Finish the renaming cleanup... there are references to or32 in the code
which was an older name for the architecture. The name we've settled on is
or1k and this change is slowly trickling through the stack. For the time
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
index 4ce7a01a252d..5f55da9cbfd5 100644
--- a/arch/openrisc/include/asm/cache.h
+++ b/arch/openrisc/include/asm/cache.h
@@ -23,6 +23,8 @@
* they shouldn't be hard-coded!
*/
+#define __ro_after_init __read_mostly
+
#define L1_CACHE_BYTES 16
#define L1_CACHE_SHIFT 4
diff --git a/arch/openrisc/include/asm/mutex.h b/arch/openrisc/include/asm/mutex.h
deleted file mode 100644
index b85a0cfa9fc9..000000000000
--- a/arch/openrisc/include/asm/mutex.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 87eebd185089..3e1a46615120 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -23,7 +23,6 @@
#include <linux/threads.h>
#include <linux/mm.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
extern int mem_init_done;
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 69c7df0e1420..3567aa7be555 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -69,7 +69,7 @@ extern void paging_init(void);
*/
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))
-#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))
+#define PTRS_PER_PGD (1UL << (32-PGDIR_SHIFT))
/* calculate how many PGD entries a user-level program can use
* the first mappable virtual address is 0
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index 70334c9f7d24..a908e6c30a00 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -92,7 +92,6 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
#define init_stack (init_thread_union.stack)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#endif /* __ASSEMBLY__ */
#endif /* __ASM_OPENRISC_PROCESSOR_H */
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index 140c99140649..906998bac957 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -141,6 +141,9 @@ or1k_map_page(struct device *dev, struct page *page,
unsigned long cl;
dma_addr_t addr = page_to_phys(page) + offset;
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return addr;
+
switch (dir) {
case DMA_TO_DEVICE:
/* Flush the dcache for the requested range */
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index fec8bf97d806..aac0bde3330c 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -264,7 +264,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
l.srli r6,r6,26 // check opcode for write access
#endif
- l.sfgeui r6,0x34 // check opcode for write access
+ l.sfgeui r6,0x33 // check opcode for write access
l.bnf 1f
l.sfleui r6,0x37
l.bnf 1f
@@ -1101,8 +1101,16 @@ ENTRY(__sys_fork)
l.addi r3,r1,0
ENTRY(sys_rt_sigreturn)
- l.j _sys_rt_sigreturn
+ l.jal _sys_rt_sigreturn
l.addi r3,r1,0
+ l.sfne r30,r0
+ l.bnf _no_syscall_trace
+ l.nop
+ l.jal do_syscall_trace_leave
+ l.addi r3,r1,0
+_no_syscall_trace:
+ l.j _resume_userspace
+ l.nop
/* This is a catch-all syscall for atomic instructions for the OpenRISC 1000.
* The functions takes a variable number of parameters depending on which
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 7095dfe7666b..277123bb4bf8 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -173,6 +173,19 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
if (usp)
userregs->sp = usp;
+
+ /*
+ * For CLONE_SETTLS set "tp" (r10) to the TLS pointer passed to sys_clone.
+ *
+ * The kernel entry is:
+ * int clone (long flags, void *child_stack, int *parent_tid,
+ * int *child_tid, struct void *tls)
+ *
+ * This makes the source r7 in the kernel registers.
+ */
+ if (clone_flags & CLONE_SETTLS)
+ userregs->gpr[10] = userregs->gpr[7];
+
userregs->gpr[11] = 0; /* Result from fork() */
kregs->gpr[20] = 0; /* Userspace thread */
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index b4ed8b36e078..cb797a3beb47 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -38,7 +38,6 @@
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/device.h>
-#include <linux/of_platform.h>
#include <asm/sections.h>
#include <asm/segment.h>
@@ -51,18 +50,16 @@
#include "vmlinux.h"
-static unsigned long __init setup_memory(void)
+static void __init setup_memory(void)
{
- unsigned long bootmap_size;
unsigned long ram_start_pfn;
- unsigned long free_ram_start_pfn;
unsigned long ram_end_pfn;
phys_addr_t memory_start, memory_end;
struct memblock_region *region;
memory_end = memory_start = 0;
- /* Find main memory where is the kernel */
+ /* Find main memory where is the kernel, we assume its the only one */
for_each_memblock(memory, region) {
memory_start = region->base;
memory_end = region->base + region->size;
@@ -75,10 +72,11 @@ static unsigned long __init setup_memory(void)
}
ram_start_pfn = PFN_UP(memory_start);
- /* free_ram_start_pfn is first page after kernel */
- free_ram_start_pfn = PFN_UP(__pa(_end));
ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ /* setup bootmem globals (we use no_bootmem, but mm still depends on this) */
+ min_low_pfn = ram_start_pfn;
+ max_low_pfn = ram_end_pfn;
max_pfn = ram_end_pfn;
/*
@@ -86,22 +84,13 @@ static unsigned long __init setup_memory(void)
*
* This makes the memory from the end of the kernel to the end of
* RAM usable.
- * init_bootmem sets the global values min_low_pfn, max_low_pfn.
*/
- bootmap_size = init_bootmem(free_ram_start_pfn,
- ram_end_pfn - ram_start_pfn);
- free_bootmem(PFN_PHYS(free_ram_start_pfn),
- (ram_end_pfn - free_ram_start_pfn) << PAGE_SHIFT);
- reserve_bootmem(PFN_PHYS(free_ram_start_pfn), bootmap_size,
- BOOTMEM_DEFAULT);
-
- for_each_memblock(reserved, region) {
- printk(KERN_INFO "Reserved - 0x%08x-0x%08x\n",
- (u32) region->base, (u32) region->size);
- reserve_bootmem(region->base, region->size, BOOTMEM_DEFAULT);
- }
+ memblock_reserve(__pa(_stext), _end - _stext);
+
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
- return ram_end_pfn;
+ memblock_dump_all();
}
struct cpuinfo cpuinfo;
@@ -219,15 +208,6 @@ void __init or32_early_setup(void *fdt)
early_init_devtree(fdt);
}
-static int __init openrisc_device_probe(void)
-{
- of_platform_populate(NULL, NULL, NULL, NULL);
-
- return 0;
-}
-
-device_initcall(openrisc_device_probe);
-
static inline unsigned long extract_value_bits(unsigned long reg,
short bit_nr, short width)
{
@@ -282,8 +262,6 @@ void calibrate_delay(void)
void __init setup_arch(char **cmdline_p)
{
- unsigned long max_low_pfn;
-
unflatten_and_copy_device_tree();
setup_cpuinfo();
@@ -304,8 +282,8 @@ void __init setup_arch(char **cmdline_p)
initrd_below_start_ok = 1;
#endif
- /* setup bootmem allocator */
- max_low_pfn = setup_memory();
+ /* setup memblock allocator */
+ setup_memory();
/* paging_init() sets up the MMU and marks all pages as reserved */
paging_init();
@@ -317,7 +295,7 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
- printk(KERN_INFO "OpenRISC Linux -- http://openrisc.net\n");
+ printk(KERN_INFO "OpenRISC Linux -- http://openrisc.io\n");
}
static int show_cpuinfo(struct seq_file *m, void *v)
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index d68b9ede8423..ef31fc24344e 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -30,7 +30,13 @@
#include <asm/cache.h>
#include <asm-generic/vmlinux.lds.h>
-OUTPUT_FORMAT("elf32-or32", "elf32-or32", "elf32-or32")
+#ifdef __OR1K__
+#define __OUTPUT_FORMAT "elf32-or1k"
+#else
+#define __OUTPUT_FORMAT "elf32-or32"
+#endif
+
+OUTPUT_FORMAT(__OUTPUT_FORMAT, __OUTPUT_FORMAT, __OUTPUT_FORMAT)
jiffies = jiffies_64 + 4;
SECTIONS
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 7f94652311d7..f67d82b9d22f 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -106,11 +106,11 @@ static void __init map_ram(void)
}
/* Alloc one page for holding PTE's... */
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
/* Fill the newly allocated page with PTE'S */
- for (j = 0; p < e && j < PTRS_PER_PGD;
+ for (j = 0; p < e && j < PTRS_PER_PTE;
v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
if (v >= (u32) _e_kernel_ro ||
v < (u32) _s_kernel_ro)
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index fa60b81aee3e..8705a46218f9 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -124,11 +124,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm,
if (likely(mem_init_done)) {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
} else {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-#if 0
- /* FIXME: use memblock... */
pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
-#endif
}
if (pte)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 71c4a3aa3752..a14b86587013 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -34,7 +34,9 @@ config PARISC
select HAVE_ARCH_HASH
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
+ select GENERIC_SCHED_CLOCK
+ select HAVE_UNSTABLE_SCHED_CLOCK if SMP
+ select GENERIC_CLOCKEVENTS
select ARCH_NO_COHERENT_DMA_MMAP
select CPU_NO_EFFICIENT_FFS
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index f9b3a81aefcd..91f53c07f410 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mutex.h
generic-y += param.h
generic-y += percpu.h
generic-y += poll.h
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index c2c43f714684..3a4ed9f91d57 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \
old_pte = *ptep; \
- set_pte(ptep, pteval); \
if (pte_inserted(old_pte)) \
purge_tlb_entries(mm, addr); \
+ set_pte(ptep, pteval); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \
} while (0)
@@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 0;
}
- set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
+ set_pte(ptep, pte_mkold(pte));
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 1;
}
@@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
spin_lock_irqsave(&pa_tlb_lock, flags);
old_pte = *ptep;
- set_pte(ptep, __pte(0));
if (pte_inserted(old_pte))
purge_tlb_entries(mm, addr);
+ set_pte(ptep, __pte(0));
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return old_pte;
@@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
{
unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags);
- set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
+ set_pte(ptep, pte_wrprotect(*ptep));
spin_unlock_irqrestore(&pa_tlb_lock, flags);
}
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 2e674e13e005..ca40741378be 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -309,7 +309,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* parisc_requires_coherency() is used to identify the combined VIPT/PIPT
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 9c935d717df9..7a109b73ddf7 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -89,4 +89,6 @@
#define SO_CNX_ADVICE 0x402E
+#define SCM_TIMESTAMPING_OPT_STATS 0x402F
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index a9b9407f38f7..6b0741e7a7ed 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -368,7 +368,9 @@
#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
-
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
#define LINUX_GATEWAY_ADDR 0x100
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 629eb464d5ba..977f0a4f5ecf 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
unsigned long size, start;
+ unsigned long threshold;
alltime = mfctl(16);
flush_data_cache();
@@ -382,26 +383,30 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime);
- /* Racy, but if we see an intermediate value, it's ok too... */
- parisc_cache_flush_threshold = size * alltime / rangetime;
-
- parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
- if (!parisc_cache_flush_threshold)
- parisc_cache_flush_threshold = FLUSH_THRESHOLD;
-
- if (parisc_cache_flush_threshold > cache_info.dc_size)
- parisc_cache_flush_threshold = cache_info.dc_size;
-
- printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
+ threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
+ if (threshold > cache_info.dc_size)
+ threshold = cache_info.dc_size;
+ if (threshold)
+ parisc_cache_flush_threshold = threshold;
+ printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
parisc_cache_flush_threshold/1024);
/* calculate TLB flush threshold */
+ /* On SMP machines, skip the TLB measure of kernel text which
+ * has been mapped as huge pages. */
+ if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
+ threshold = max(cache_info.it_size, cache_info.dt_size);
+ threshold *= PAGE_SIZE;
+ threshold /= num_online_cpus();
+ goto set_tlb_threshold;
+ }
+
alltime = mfctl(16);
flush_tlb_all();
alltime = mfctl(16) - alltime;
- size = PAGE_SIZE;
+ size = 0;
start = (unsigned long) _text;
rangetime = mfctl(16);
while (start < (unsigned long) _end) {
@@ -414,13 +419,12 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime);
- parisc_tlb_flush_threshold = size * alltime / rangetime;
- parisc_tlb_flush_threshold *= num_online_cpus();
- parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
- if (!parisc_tlb_flush_threshold)
- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
+ threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
- printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
+set_tlb_threshold:
+ if (threshold)
+ parisc_tlb_flush_threshold = threshold;
+ printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
parisc_tlb_flush_threshold/1024);
}
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index f8150669b8c6..700e2d2da096 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev)
if (dev->num_addrs) {
int k;
- printk(", additional addresses: ");
+ pr_cont(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
- printk("0x%lx ", dev->addr[k]);
+ pr_cont("0x%lx ", dev->addr[k]);
}
- printk("\n");
+ pr_cont("\n");
}
/**
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 545f9d2fe711..c05d1876d27c 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -58,7 +58,7 @@ void __init setup_pdc(void)
status = pdc_system_map_find_mods(&module_result, &module_path, 0);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_SYSTEM_MAP;
- printk("System Map.\n");
+ pr_cont("System Map.\n");
return;
}
@@ -77,7 +77,7 @@ void __init setup_pdc(void)
status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_PAT;
- printk("64 bit PAT.\n");
+ pr_cont("64 bit PAT.\n");
return;
}
#endif
@@ -97,12 +97,12 @@ void __init setup_pdc(void)
case 0xC: /* 715/64, at least */
pdc_type = PDC_TYPE_SNAKE;
- printk("Snake.\n");
+ pr_cont("Snake.\n");
return;
default: /* Everything else */
- printk("Unsupported.\n");
+ pr_cont("Unsupported.\n");
panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
}
}
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 985e06da37f5..adf7187f8951 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */
fitmanymiddle: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
- pitlbe 0(%sr1, %r28)
+ pitlbe %r0(%sr1, %r28)
pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
fdtmanymiddle: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
- pdtlbe 0(%sr1, %r28)
+ pdtlbe %r0(%sr1, %r28)
pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
@@ -626,12 +626,12 @@ ENTRY_CFI(copy_user_page_asm)
/* Purge any old translations */
#ifdef CONFIG_PA20
- pdtlb,l 0(%r28)
- pdtlb,l 0(%r29)
+ pdtlb,l %r0(%r28)
+ pdtlb,l %r0(%r29)
#else
tlb_lock %r20,%r21,%r22
- pdtlb 0(%r28)
- pdtlb 0(%r29)
+ pdtlb %r0(%r28)
+ pdtlb %r0(%r29)
tlb_unlock %r20,%r21,%r22
#endif
@@ -774,10 +774,10 @@ ENTRY_CFI(clear_user_page_asm)
/* Purge any old translation */
#ifdef CONFIG_PA20
- pdtlb,l 0(%r28)
+ pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
- pdtlb 0(%r28)
+ pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
#endif
@@ -858,10 +858,10 @@ ENTRY_CFI(flush_dcache_page_asm)
/* Purge any old translation */
#ifdef CONFIG_PA20
- pdtlb,l 0(%r28)
+ pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
- pdtlb 0(%r28)
+ pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
#endif
@@ -892,19 +892,10 @@ ENTRY_CFI(flush_dcache_page_asm)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
- cmpb,COND(<<) %r28, %r25,1b
+ cmpb,COND(<<) %r28, %r25,1b
fdc,m r31(%r28)
sync
-
-#ifdef CONFIG_PA20
- pdtlb,l 0(%r25)
-#else
- tlb_lock %r20,%r21,%r22
- pdtlb 0(%r25)
- tlb_unlock %r20,%r21,%r22
-#endif
-
bv %r0(%r2)
nop
.exit
@@ -931,13 +922,18 @@ ENTRY_CFI(flush_icache_page_asm)
depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
- /* Purge any old translation */
+ /* Purge any old translation. Note that the FIC instruction
+ * may use either the instruction or data TLB. Given that we
+ * have a flat address space, it's not clear which TLB will be
+ * used. So, we purge both entries. */
#ifdef CONFIG_PA20
+ pdtlb,l %r0(%r28)
pitlb,l %r0(%sr4,%r28)
#else
tlb_lock %r20,%r21,%r22
- pitlb (%sr4,%r28)
+ pdtlb %r0(%r28)
+ pitlb %r0(%sr4,%r28)
tlb_unlock %r20,%r21,%r22
#endif
@@ -974,15 +970,6 @@ ENTRY_CFI(flush_icache_page_asm)
fic,m %r31(%sr4,%r28)
sync
-
-#ifdef CONFIG_PA20
- pitlb,l %r0(%sr4,%r25)
-#else
- tlb_lock %r20,%r21,%r22
- pitlb (%sr4,%r25)
- tlb_unlock %r20,%r21,%r22
-#endif
-
bv %r0(%r2)
nop
.exit
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 02d9ed0f3949..b6298a85e8ae 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte,
if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n");
- set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
purge_tlb_start(flags);
+ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
pdtlb_kernel(orig_vaddr);
purge_tlb_end(flags);
vaddr += PAGE_SIZE;
@@ -459,7 +459,9 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
void *addr = page_address(page) + offset;
BUG_ON(direction == DMA_NONE);
- flush_kernel_dcache_range((unsigned long) addr, size);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ flush_kernel_dcache_range((unsigned long) addr, size);
+
return virt_to_phys(addr);
}
@@ -469,8 +471,11 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
{
BUG_ON(direction == DMA_NONE);
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return;
+
if (direction == DMA_TO_DEVICE)
- return;
+ return;
/*
* For PCI_DMA_FROMDEVICE this flush is not necessary for the
@@ -479,7 +484,6 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
*/
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
- return;
}
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -496,6 +500,10 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
sg_dma_len(sg) = sg->length;
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
flush_kernel_dcache_range(vaddr, sg->length);
}
return nents;
@@ -510,14 +518,16 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
BUG_ON(direction == DMA_NONE);
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return;
+
if (direction == DMA_TO_DEVICE)
- return;
+ return;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for_each_sg(sglist, sg, nents, i)
flush_kernel_vmap_range(sg_virt(sg), sg->length);
- return;
}
static void pa11_dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 81d6f6391944..2e66a887788e 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -334,6 +334,10 @@ static int __init parisc_init(void)
/* tell PDC we're Linux. Nevermind failure. */
pdc_stable_write(0x40, &osid, sizeof(osid));
+ /* start with known state */
+ flush_cache_all_local();
+ flush_tlb_all_local(NULL);
+
processor_init();
#ifdef CONFIG_SMP
pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index d03422e5f188..23de307c3052 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -100,14 +100,12 @@ set_thread_pointer:
.endr
/* This address must remain fixed at 0x100 for glibc's syscalls to work */
- .align 256
+ .align LINUX_GATEWAY_ADDR
linux_gateway_entry:
gate .+8, %r0 /* become privileged */
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
- mfsp %sr7,%r1 /* save user sr7 */
- mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
@@ -133,6 +131,14 @@ linux_gateway_entry:
depdi 0, 31, 32, %r21
1:
#endif
+
+ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
+ * by external interrupts.
+ */
+ mfsp %sr7,%r1 /* save user sr7 */
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
@@ -147,6 +153,7 @@ linux_gateway_entry:
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
+ ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
@@ -474,11 +481,6 @@ lws_start:
comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
- /* WARNING: Trashing sr2 and sr3 */
- mfsp %sr7,%r1 /* get userspace into sr3 */
- mtsp %r1,%sr3
- mtsp %r0,%sr2 /* get kernel space into sr2 */
-
/* Load table start */
ldil L%lws_table, %r1
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
@@ -627,9 +629,9 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw,ma 0(%sr3,%r26), %r28
+1: ldw,ma 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw,ma %r24, 0(%sr3,%r26)
+2: stw,ma %r24, 0(%r26)
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
@@ -706,9 +708,9 @@ lws_compare_and_swap_2:
nop
/* 8bit load */
-4: ldb 0(%sr3,%r25), %r25
+4: ldb 0(%r25), %r25
b cas2_lock_start
-5: ldb 0(%sr3,%r24), %r24
+5: ldb 0(%r24), %r24
nop
nop
nop
@@ -716,9 +718,9 @@ lws_compare_and_swap_2:
nop
/* 16bit load */
-6: ldh 0(%sr3,%r25), %r25
+6: ldh 0(%r25), %r25
b cas2_lock_start
-7: ldh 0(%sr3,%r24), %r24
+7: ldh 0(%r24), %r24
nop
nop
nop
@@ -726,9 +728,9 @@ lws_compare_and_swap_2:
nop
/* 32bit load */
-8: ldw 0(%sr3,%r25), %r25
+8: ldw 0(%r25), %r25
b cas2_lock_start
-9: ldw 0(%sr3,%r24), %r24
+9: ldw 0(%r24), %r24
nop
nop
nop
@@ -737,14 +739,14 @@ lws_compare_and_swap_2:
/* 64bit load */
#ifdef CONFIG_64BIT
-10: ldd 0(%sr3,%r25), %r25
-11: ldd 0(%sr3,%r24), %r24
+10: ldd 0(%r25), %r25
+11: ldd 0(%r24), %r24
#else
/* Load new value into r22/r23 - high/low */
-10: ldw 0(%sr3,%r25), %r22
-11: ldw 4(%sr3,%r25), %r23
+10: ldw 0(%r25), %r22
+11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
-12: flddx 0(%sr3,%r24), %fr4
+12: flddx 0(%r24), %fr4
#endif
cas2_lock_start:
@@ -794,30 +796,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
-13: ldb,ma 0(%sr3,%r26), %r29
+13: ldb,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-14: stb,ma %r24, 0(%sr3,%r26)
+14: stb,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
-15: ldh,ma 0(%sr3,%r26), %r29
+15: ldh,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-16: sth,ma %r24, 0(%sr3,%r26)
+16: sth,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
-17: ldw,ma 0(%sr3,%r26), %r29
+17: ldw,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-18: stw,ma %r24, 0(%sr3,%r26)
+18: stw,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@@ -825,22 +827,22 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
-19: ldd,ma 0(%sr3,%r26), %r29
+19: ldd,ma 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
-20: std,ma %r24, 0(%sr3,%r26)
+20: std,ma %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
-19: ldw,ma 0(%sr3,%r26), %r29
+19: ldw,ma 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
-20: ldw,ma 4(%sr3,%r26), %r29
+20: ldw,ma 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
-21: fstdx %fr4, 0(%sr3,%r26)
+21: fstdx %fr4, 0(%r26)
copy %r0, %r28
#endif
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 9b63b876a13a..325f30d82b64 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/sched.h>
+#include <linux/sched_clock.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
@@ -39,18 +40,6 @@
static unsigned long clocktick __read_mostly; /* timer cycles per tick */
-#ifndef CONFIG_64BIT
-/*
- * The processor-internal cycle counter (Control Register 16) is used as time
- * source for the sched_clock() function. This register is 64bit wide on a
- * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
- * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
- * with a per-cpu variable which we increase every time the counter
- * wraps-around (which happens every ~4 secounds).
- */
-static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
-#endif
-
/*
* We keep time on PA-RISC Linux by using the Interval Timer which is
* a pair of registers; one is read-only and one is write-only; both
@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
*/
mtctl(next_tick, 16);
-#if !defined(CONFIG_64BIT)
- /* check for overflow on a 32bit kernel (every ~4 seconds). */
- if (unlikely(next_tick < now))
- this_cpu_inc(cr16_high_32_bits);
-#endif
-
/* Skip one clocktick on purpose if we missed next_tick.
* The new CR16 must be "later" than current CR16 otherwise
* itimer would not fire until CR16 wrapped - e.g 4 seconds
@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
/* clock source code */
-static cycle_t read_cr16(struct clocksource *cs)
+static cycle_t notrace read_cr16(struct clocksource *cs)
{
return get_cycles();
}
@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts)
}
-/*
- * sched_clock() framework
- */
-
-static u32 cyc2ns_mul __read_mostly;
-static u32 cyc2ns_shift __read_mostly;
-
-u64 sched_clock(void)
+static u64 notrace read_cr16_sched_clock(void)
{
- u64 now;
-
- /* Get current cycle counter (Control Register 16). */
-#ifdef CONFIG_64BIT
- now = mfctl(16);
-#else
- now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
-#endif
-
- /* return the value in ns (cycles_2_ns) */
- return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
+ return get_cycles();
}
@@ -316,17 +282,16 @@ u64 sched_clock(void)
void __init time_init(void)
{
- unsigned long current_cr16_khz;
+ unsigned long cr16_hz;
- current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
clocktick = (100 * PAGE0->mem_10msec) / HZ;
-
- /* calculate mult/shift values for cr16 */
- clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
- NSEC_PER_MSEC, 0);
-
start_cpu_itimer(); /* get CPU 0 started */
+ cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
+
/* register at clocksource framework */
- clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
+ clocksource_register_hz(&clocksource_cr16, cr16_hz);
+
+ /* register as sched_clock source */
+ sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 65fba4c34cd7..c7f120aaa98f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -160,6 +160,7 @@ config PPC
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select GENERIC_CPU_AUTOPROBE
select HAVE_VIRT_CPU_ACCOUNTING
+ select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_KERNEL_GZIP
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index eae2dc8bc218..9d47f2efa830 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -100,7 +100,8 @@ src-wlib-y := string.S crt0.S crtsavres.S stdio.c decompress.c main.c \
ns16550.c serial.c simple_alloc.c div64.S util.S \
elf_util.c $(zlib-y) devtree.c stdlib.c \
oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \
- uartlite.c mpc52xx-psc.c opal.c opal-calls.S
+ uartlite.c mpc52xx-psc.c opal.c
+src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S
src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index 57d42d129033..78aaf4ffd7ab 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -232,8 +232,12 @@ void start(void)
console_ops.close();
kentry = (kernel_entry_t) vmlinux.addr;
- if (ft_addr)
- kentry(ft_addr, 0, NULL);
+ if (ft_addr) {
+ if(platform_ops.kentry)
+ platform_ops.kentry(ft_addr, vmlinux.addr);
+ else
+ kentry(ft_addr, 0, NULL);
+ }
else
kentry((unsigned long)initrd.addr, initrd.size,
loader_info.promptr);
diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S
index ff2f1b97bc53..2a99fc9a3ccf 100644
--- a/arch/powerpc/boot/opal-calls.S
+++ b/arch/powerpc/boot/opal-calls.S
@@ -12,6 +12,19 @@
.text
+ .globl opal_kentry
+opal_kentry:
+ /* r3 is the fdt ptr */
+ mtctr r4
+ li r4, 0
+ li r5, 0
+ li r6, 0
+ li r7, 0
+ ld r11,opal@got(r2)
+ ld r8,0(r11)
+ ld r9,8(r11)
+ bctr
+
#define OPAL_CALL(name, token) \
.globl name; \
name: \
diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
index 1f37e1c1d6d8..0272570d02de 100644
--- a/arch/powerpc/boot/opal.c
+++ b/arch/powerpc/boot/opal.c
@@ -13,7 +13,7 @@
#include <libfdt.h>
#include "../include/asm/opal-api.h"
-#ifdef __powerpc64__
+#ifdef CONFIG_PPC64_BOOT_WRAPPER
/* Global OPAL struct used by opal-call.S */
struct opal {
@@ -23,14 +23,25 @@ struct opal {
static u32 opal_con_id;
+/* see opal-wrappers.S */
int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer);
int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer);
int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length);
int64_t opal_console_flush(uint64_t term_number);
int64_t opal_poll_events(uint64_t *outstanding_event_mask);
+void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr);
+
static int opal_con_open(void)
{
+ /*
+ * When OPAL loads the boot kernel it stashes the OPAL base and entry
+ * address in r8 and r9 so the kernel can use the OPAL console
+ * before unflattening the devicetree. While executing the wrapper will
+ * probably trash r8 and r9 so this kentry hook restores them before
+ * entering the decompressed kernel.
+ */
+ platform_ops.kentry = opal_kentry;
return 0;
}
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 309d1b127e96..fad1862f4b2d 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -30,6 +30,7 @@ struct platform_ops {
void * (*realloc)(void *ptr, unsigned long size);
void (*exit)(void);
void * (*vmlinux_alloc)(unsigned long size);
+ void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr);
};
extern struct platform_ops platform_ops;
diff --git a/arch/powerpc/configs/dpaa.config b/arch/powerpc/configs/dpaa.config
index efa99c048543..2fe76f5e938a 100644
--- a/arch/powerpc/configs/dpaa.config
+++ b/arch/powerpc/configs/dpaa.config
@@ -1 +1,4 @@
CONFIG_FSL_DPAA=y
+CONFIG_FSL_PAMU=y
+CONFIG_FSL_FMAN=y
+CONFIG_FSL_DPAA_ETH=y
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 7998c177f0a2..87f40454bad3 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
-obj-$(CONFIG_CRYPT_CRC32C_VPMSUM) += crc32c-vpmsum.o
+obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index d1492736d852..e0baba1535e6 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -14,6 +14,10 @@
#include <linux/threads.h>
#include <linux/kprobes.h>
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/uaccess.h>
+#include <asm/epapr_hcalls.h>
#include <uapi/asm/ucontext.h>
@@ -109,4 +113,12 @@ void early_setup_secondary(void);
/* time */
void accumulate_stolen_time(void);
+/* misc runtime */
+extern u64 __bswapdi2(u64);
+extern s64 __lshrdi3(s64, int);
+extern s64 __ashldi3(s64, int);
+extern s64 __ashrdi3(s64, int);
+extern int __cmpdi2(s64, s64);
+extern int __ucmpdi2(u64, u64);
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index e407af2b7333..2e6a823fa502 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -70,7 +70,9 @@
#define HPTE_V_SSIZE_SHIFT 62
#define HPTE_V_AVPN_SHIFT 7
+#define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
+#define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
@@ -80,14 +82,16 @@
#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
/*
- * ISA 3.0 have a different HPTE format.
+ * ISA 3.0 has a different HPTE format.
*/
#define HPTE_R_3_0_SSIZE_SHIFT 58
+#define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
+#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
@@ -316,12 +320,43 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
*/
v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
v <<= HPTE_V_AVPN_SHIFT;
- if (!cpu_has_feature(CPU_FTR_ARCH_300))
- v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+ v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
return v;
}
/*
+ * ISA v3.0 defines a new HPTE format, which differs from the old
+ * format in having smaller AVPN and ARPN fields, and the B field
+ * in the second dword instead of the first.
+ */
+static inline unsigned long hpte_old_to_new_v(unsigned long v)
+{
+ /* trim AVPN, drop B */
+ return v & HPTE_V_COMMON_BITS;
+}
+
+static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
+{
+ /* move B field from 1st to 2nd dword, trim ARPN */
+ return (r & ~HPTE_R_3_0_SSIZE_MASK) |
+ (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
+}
+
+static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
+{
+ /* insert B field */
+ return (v & HPTE_V_COMMON_BITS) |
+ ((r & HPTE_R_3_0_SSIZE_MASK) <<
+ (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
+}
+
+static inline unsigned long hpte_new_to_old_r(unsigned long r)
+{
+ /* clear out B field */
+ return r & ~HPTE_R_3_0_SSIZE_MASK;
+}
+
+/*
* This function sets the AVPN and L fields of the HPTE appropriately
* using the base page size and actual page size.
*/
@@ -341,12 +376,8 @@ static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
* aligned for the requested page size
*/
static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
- int actual_psize, int ssize)
+ int actual_psize)
{
-
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
-
/* A 4K page needs no special encoding */
if (actual_psize == MMU_PAGE_4K)
return pa & HPTE_R_RPN;
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 9fd77f8794a0..0ebfbc8f0449 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1009,7 +1009,8 @@ static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
#define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock;
static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
- struct spinlock *old_pmd_ptl)
+ struct spinlock *old_pmd_ptl,
+ struct vm_area_struct *vma)
{
if (radix_enabled())
return false;
@@ -1020,6 +1021,16 @@ static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
*/
return true;
}
+
+
+#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
+static inline bool arch_needs_pgtable_deposit(void)
+{
+ if (radix_enabled())
+ return false;
+ return true;
+}
+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ee655ed1ff1b..1e8fceb308a5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
}
-static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
#ifdef __powerpc64__
unsigned long s = (__force u32)sum;
@@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 4f60db074725..aa2e6a34b872 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -46,26 +46,12 @@ extern cputime_t cputime_one_jiffy;
* Convert cputime <-> jiffies
*/
extern u64 __cputime_jiffies_factor;
-DECLARE_PER_CPU(unsigned long, cputime_last_delta);
-DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta);
static inline unsigned long cputime_to_jiffies(const cputime_t ct)
{
return mulhdu((__force u64) ct, __cputime_jiffies_factor);
}
-/* Estimate the scaled cputime by scaling the real cputime based on
- * the last scaled to real ratio */
-static inline cputime_t cputime_to_scaled(const cputime_t ct)
-{
- if (cpu_has_feature(CPU_FTR_SPURR) &&
- __this_cpu_read(cputime_last_delta))
- return (__force u64) ct *
- __this_cpu_read(cputime_scaled_last_delta) /
- __this_cpu_read(cputime_last_delta);
- return ct;
-}
-
static inline cputime_t jiffies_to_cputime(const unsigned long jif)
{
u64 ct;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 84d49b197c32..9a3eee661297 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -91,7 +91,7 @@
*/
#define LOAD_HANDLER(reg, label) \
ld reg,PACAKBASE(r13); /* get high part of &label */ \
- ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
+ ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
#define __LOAD_HANDLER(reg, label) \
ld reg,PACAKBASE(r13); \
@@ -158,14 +158,17 @@ BEGIN_FTR_SECTION_NESTED(943) \
std ra,offset(r13); \
END_FTR_SECTION_NESTED(ftr,ftr,943)
-#define EXCEPTION_PROLOG_0(area) \
- GET_PACA(r13); \
+#define EXCEPTION_PROLOG_0_PACA(area) \
std r9,area+EX_R9(r13); /* save r9 */ \
OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
HMT_MEDIUM; \
std r10,area+EX_R10(r13); /* save r10 - r12 */ \
OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
+#define EXCEPTION_PROLOG_0(area) \
+ GET_PACA(r13); \
+ EXCEPTION_PROLOG_0_PACA(area)
+
#define __EXCEPTION_PROLOG_1(area, extra, vec) \
OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
@@ -196,6 +199,12 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label, h);
+/* Have the PACA in r13 already */
+#define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_0_PACA(area); \
+ EXCEPTION_PROLOG_1(area, extra, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label, h);
+
#define __KVMTEST(h, n) \
lbz r10,HSTATE_IN_GUEST(r13); \
cmpwi r10,0; \
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 05cabed3d1bd..09a802bb702f 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -99,6 +99,7 @@
#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
#define BOOK3S_INTERRUPT_HMI 0xe60
#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
+#define BOOK3S_INTERRUPT_H_VIRT 0xea0
#define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
#define BOOK3S_INTERRUPT_VSX 0xf40
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 28350a294b1e..e59b172666cd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -48,7 +48,7 @@
#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#endif
-#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_HALT_POLL_NS_DEFAULT 10000 /* 10 us */
/* These values are internal and can be increased later */
#define KVM_NR_IRQCHIPS 1
@@ -244,8 +244,10 @@ struct kvm_arch_memory_slot {
struct kvm_arch {
unsigned int lpid;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ unsigned int tlb_sets;
unsigned long hpt_virt;
struct revmap_entry *revmap;
+ atomic64_t mmio_update;
unsigned int host_lpid;
unsigned long host_lpcr;
unsigned long sdr1;
@@ -408,6 +410,24 @@ struct kvmppc_passthru_irqmap {
#define KVMPPC_IRQ_MPIC 1
#define KVMPPC_IRQ_XICS 2
+#define MMIO_HPTE_CACHE_SIZE 4
+
+struct mmio_hpte_cache_entry {
+ unsigned long hpte_v;
+ unsigned long hpte_r;
+ unsigned long rpte;
+ unsigned long pte_index;
+ unsigned long eaddr;
+ unsigned long slb_v;
+ long mmio_update;
+ unsigned int slb_base_pshift;
+};
+
+struct mmio_hpte_cache {
+ struct mmio_hpte_cache_entry entry[MMIO_HPTE_CACHE_SIZE];
+ unsigned int index;
+};
+
struct openpic;
struct kvm_vcpu_arch {
@@ -498,6 +518,8 @@ struct kvm_vcpu_arch {
ulong tcscr;
ulong acop;
ulong wort;
+ ulong tid;
+ ulong psscr;
ulong shadow_srr1;
#endif
u32 vrsave; /* also USPRG0 */
@@ -546,6 +568,7 @@ struct kvm_vcpu_arch {
u64 tfiar;
u32 cr_tm;
+ u64 xer_tm;
u64 lr_tm;
u64 ctr_tm;
u64 amr_tm;
@@ -655,9 +678,11 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
struct kvm_vcpu_arch_shared shregs;
+ struct mmio_hpte_cache mmio_cache;
unsigned long pgfault_addr;
long pgfault_index;
unsigned long pgfault_hpte[2];
+ struct mmio_hpte_cache_entry *pgfault_cache;
struct task_struct *run_task;
struct kvm_run *kvm_run;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index f6e49640dbe1..2da67bf1f2ec 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -483,9 +483,10 @@ extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
unsigned long host_irq);
extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
unsigned long host_irq);
-extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, u32 xirr,
- struct kvmppc_irq_map *irq_map,
- struct kvmppc_passthru_irqmap *pimap);
+extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
+ struct kvmppc_irq_map *irq_map,
+ struct kvmppc_passthru_irqmap *pimap,
+ bool *again);
extern int h_ipi_redirect;
#else
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
@@ -510,6 +511,48 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
#endif
/*
+ * Prototypes for functions called only from assembler code.
+ * Having prototypes reduces sparse errors.
+ */
+long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce);
+long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_list, unsigned long npages);
+long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_value, unsigned long npages);
+long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
+ unsigned int yield_count);
+long kvmppc_h_random(struct kvm_vcpu *vcpu);
+void kvmhv_commence_exit(int trap);
+long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
+void kvmppc_subcore_enter_guest(void);
+void kvmppc_subcore_exit_guest(void);
+long kvmppc_realmode_hmi_handler(void);
+long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel);
+long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn);
+long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
+long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long va);
+long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index);
+long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index);
+long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index);
+long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
+ unsigned long slb_v, unsigned int status, bool data);
+unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
+int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+/*
* Host-side operations we want to set up while running in real
* mode in the guest operating on the xics.
* Currently only VCPU wakeup is supported.
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e88368354e49..8d1499334257 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -29,6 +29,12 @@
*/
/*
+ * Kernel read only support.
+ * We added the ppp value 0b110 in ISA 2.04.
+ */
+#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000)
+
+/*
* We need to clear top 16bits of va (from the remaining 64 bits )in
* tlbie* instructions
*/
@@ -103,10 +109,10 @@
#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
+#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
+#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
+#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
@@ -208,6 +214,11 @@ extern u64 ppc64_rma_size;
/* Cleanup function used by kexec */
extern void mmu_cleanup_all(void);
extern void radix__mmu_cleanup_all(void);
+
+/* Functions for creating and updating partition table on POWER9 */
+extern void mmu_partition_table_init(void);
+extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
+ unsigned long dw1);
#endif /* CONFIG_PPC64 */
struct mm_struct;
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
deleted file mode 100644
index 078155fa1189..000000000000
--- a/arch/powerpc/include/asm/mutex.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
- */
-#ifndef _ASM_POWERPC_MUTEX_H
-#define _ASM_POWERPC_MUTEX_H
-
-static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
-{
- int t;
-
- __asm__ __volatile__ (
-"1: lwarx %0,0,%1 # mutex trylock\n\
- cmpw 0,%0,%2\n\
- bne- 2f\n"
- PPC405_ERR77(0,%1)
-" stwcx. %3,0,%1\n\
- bne- 1b"
- PPC_ACQUIRE_BARRIER
- "\n\
-2:"
- : "=&r" (t)
- : "r" (&v->counter), "r" (old), "r" (new)
- : "cc", "memory");
-
- return t;
-}
-
-static inline int __mutex_dec_return_lock(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%1 # mutex lock\n\
- addic %0,%0,-1\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1\n\
- bne- 1b"
- PPC_ACQUIRE_BARRIER
- : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-static inline int __mutex_inc_return_unlock(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- PPC_RELEASE_BARRIER
-"1: lwarx %0,0,%1 # mutex unlock\n\
- addic %0,%0,1\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1 \n\
- bne- 1b"
- : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function MUST leave the value lower than
- * 1 even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(__mutex_dec_return_lock(count) < 0))
- fail_fn(count);
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(__mutex_dec_return_lock(count) < 0))
- return -1;
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the count from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than 1.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(__mutex_inc_return_unlock(count) <= 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to 0, and return 1 (success), or if the count
- * was not 1, then return 0 (failure).
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- if (likely(atomic_read(count) == 1 && __mutex_cmpxchg_lock(count, 1, 0) == 1))
- return 1;
- return 0;
-}
-
-#endif
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index e958b7096f19..5c7db0f1a708 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -220,9 +220,12 @@ int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
int64_t opal_pci_poll2(uint64_t id, uint64_t data);
int64_t opal_int_get_xirr(uint32_t *out_xirr, bool just_poll);
+int64_t opal_rm_int_get_xirr(__be32 *out_xirr, bool just_poll);
int64_t opal_int_set_cppr(uint8_t cppr);
int64_t opal_int_eoi(uint32_t xirr);
+int64_t opal_rm_int_eoi(uint32_t xirr);
int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
+int64_t opal_rm_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
uint32_t pe_num, uint32_t tce_size,
uint64_t dma_addr, uint32_t npages);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 0132831b3081..c56ea8c84abb 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -460,5 +460,6 @@
#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \
((IH & 0x7) << 21))
+#define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index c07c31b0e89e..dac83fcb9445 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -404,8 +404,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#define cpu_relax() barrier()
#endif
-#define cpu_relax_lowlatency() cpu_relax()
-
/* Check that a certain kernel stack pointer is valid in task_struct p */
int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9cd4e8cbc78c..04aa1ee8cdb6 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -153,6 +153,8 @@
#define PSSCR_EC 0x00100000 /* Exit Criterion */
#define PSSCR_ESL 0x00200000 /* Enable State Loss */
#define PSSCR_SD 0x00400000 /* Status Disable */
+#define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */
+#define PSSCR_GUEST_VIS 0xf0000000000003ff /* Guest-visible PSSCR fields */
/* Floating Point Status and Control Register (FPSCR) Fields */
#define FPSCR_FX 0x80000000 /* FPU exception summary */
@@ -236,6 +238,7 @@
#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
#define TEXASR_FS __MASK(63-36) /* TEXASR Failure Summary */
#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
+#define SPRN_TIDR 144 /* Thread ID register */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
#define CTRL_CT 0xc0000000 /* current thread */
@@ -294,6 +297,7 @@
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
#define SPRN_LMRR 0x32D /* Load Monitor Region Register */
#define SPRN_LMSER 0x32E /* Load Monitor Section Enable Register */
+#define SPRN_ASDR 0x330 /* Access segment descriptor register */
#define SPRN_IC 0x350 /* Virtual Instruction Count */
#define SPRN_VTB 0x351 /* Virtual Time Base */
#define SPRN_LDBAR 0x352 /* LD Base Address Register */
@@ -305,6 +309,7 @@
/* HFSCR and FSCR bit numbers are the same */
#define FSCR_LM_LG 11 /* Enable Load Monitor Registers */
+#define FSCR_MSGP_LG 10 /* Enable MSGP */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
@@ -320,6 +325,7 @@
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
#define HFSCR_LM __MASK(FSCR_LM_LG)
+#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
@@ -355,8 +361,10 @@
#define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */
#define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */
#define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */
+#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */
#define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
#define LPCR_MER_SH 11
+#define LPCR_GTSE ASM_CONST(0x0000000000000400) /* Guest Translation Shootdown Enable */
#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 ASM_CONST(0x0000000000000008) /* LPAR Env selector 0 */
@@ -377,6 +385,12 @@
#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
+/*
+ * These bits are used in the function kvmppc_set_arch_compat() to specify and
+ * determine both the compatibility level which we want to emulate and the
+ * compatibility level which the host is capable of emulating.
+ */
+#define PCR_ARCH_207 0x8 /* Architecture 2.07 */
#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
@@ -1218,6 +1232,7 @@
#define PVR_ARCH_206 0x0f000003
#define PVR_ARCH_206p 0x0f100003
#define PVR_ARCH_207 0x0f000004
+#define PVR_ARCH_300 0x0f000005
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index fa37fe93bc02..8c1b913de6d7 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -52,6 +52,14 @@
#define SYNC_IO
#endif
+#ifdef CONFIG_PPC_PSERIES
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
+}
+#endif
+
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 99e1397b71da..609557569f65 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -28,6 +28,7 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
extern void tlb_flush(struct mmu_gather *tlb);
@@ -46,6 +47,21 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
#endif
}
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+ unsigned int page_size)
+{
+ if (!tlb->page_size)
+ tlb->page_size = page_size;
+ else if (tlb->page_size != page_size) {
+ tlb_flush_mmu(tlb);
+ /*
+ * update the page size after flush for the new
+ * mmu_gather.
+ */
+ tlb->page_size = page_size;
+ }
+}
+
#ifdef CONFIG_SMP
static inline int mm_is_core_local(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/xilinx_intc.h b/arch/powerpc/include/asm/xilinx_intc.h
index 343612f8fece..3192d7f0a05b 100644
--- a/arch/powerpc/include/asm/xilinx_intc.h
+++ b/arch/powerpc/include/asm/xilinx_intc.h
@@ -14,7 +14,7 @@
#ifdef __KERNEL__
extern void __init xilinx_intc_init_tree(void);
-extern unsigned int xilinx_intc_get_irq(void);
+extern unsigned int xintc_get_irq(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_XILINX_INTC_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index c93cf35ce379..3603b6f51b11 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -573,6 +573,10 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
#define KVM_REG_PPC_DBSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
+/* POWER9 registers */
+#define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
+#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
+
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
*/
@@ -596,6 +600,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
/* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index 1672e3398270..44583a52f882 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -97,4 +97,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index caec7bf3b99a..195a9fc8f81c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -487,6 +487,7 @@ int main(void)
/* book3s */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ DEFINE(KVM_TLB_SETS, offsetof(struct kvm, arch.tlb_sets));
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -548,6 +549,8 @@ int main(void)
DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
+ DEFINE(VCPU_TID, offsetof(struct kvm_vcpu, arch.tid));
+ DEFINE(VCPU_PSSCR, offsetof(struct kvm_vcpu, arch.psscr));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
@@ -569,6 +572,7 @@ int main(void)
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
+ DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 52ff3f025437..f3e1f5d29dce 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -98,8 +98,8 @@ _GLOBAL(__setup_cpu_power9)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
- ori r3, r3, LPCR_PECEDH
- ori r3, r3, LPCR_HVICE
+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
+ or r3, r3, r4
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power9
@@ -118,8 +118,8 @@ _GLOBAL(__restore_cpu_power9)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
- ori r3, r3, LPCR_PECEDH
- ori r3, r3, LPCR_HVICE
+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
+ or r3, r3, r4
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power9
@@ -174,7 +174,7 @@ __init_FSCR:
__init_HFSCR:
mfspr r3,SPRN_HFSCR
ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
- HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB
+ HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP
mtspr SPRN_HFSCR,r3
blr
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index e64a6016fba7..6877e3fa95bb 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -203,6 +203,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
sg->dma_length = sg->length;
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
@@ -235,7 +239,10 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
unsigned long attrs)
{
BUG_ON(dir == DMA_NONE);
- __dma_sync_page(page, offset, size, dir);
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_sync_page(page, offset, size, dir);
+
return page_to_phys(page) + offset + get_dma_offset(dev);
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index a62be72da274..5c31369435f2 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -671,8 +671,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
/* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false);
- if (rc)
+ if (rc) {
+ pci_unlock_rescan_remove();
return rc;
+ }
/* Give the system 5 seconds to finish running the user-space
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 08ba447a4b3d..1ba82ea90230 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -116,7 +116,9 @@ EXC_VIRT_NONE(0x4000, 0x4100)
EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ GET_PACA(r13)
+ clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */
+ EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD,
IDLETEST, 0x100)
EXC_REAL_END(system_reset, 0x100, 0x200)
@@ -124,6 +126,9 @@ EXC_VIRT_NONE(0x4100, 0x4200)
#ifdef CONFIG_PPC_P7_NAP
EXC_COMMON_BEGIN(system_reset_idle_common)
+BEGIN_FTR_SECTION
+ GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
bl pnv_restore_hyp_resource
li r0,PNV_THREAD_RUNNING
@@ -169,7 +174,7 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
SET_SCRATCH0(r13) /* save r13 */
/*
* Running native on arch 2.06 or later, we may wakeup from winkle
- * inside machine check. If yes, then last bit of HSPGR0 would be set
+ * inside machine check. If yes, then last bit of HSPRG0 would be set
* to 1. Hence clear it unconditionally.
*/
GET_PACA(r13)
@@ -388,7 +393,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
/*
* Go back to winkle. Please note that this thread was woken up in
* machine check from winkle and have not restored the per-subcore
- * state. Hence before going back to winkle, set last bit of HSPGR0
+ * state. Hence before going back to winkle, set last bit of HSPRG0
* to 1. This will make sure that if this thread gets woken up
* again at reset vector 0x100 then it will get chance to restore
* the subcore state.
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ce6dc61b15b2..49a680d5ae37 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1215,7 +1215,7 @@ static void show_instructions(struct pt_regs *regs)
int instr;
if (!(i % 8))
- printk("\n");
+ pr_cont("\n");
#if !defined(CONFIG_BOOKE)
/* If executing with the IMMU off, adjust pc rather
@@ -1227,18 +1227,18 @@ static void show_instructions(struct pt_regs *regs)
if (!__kernel_text_address(pc) ||
probe_kernel_address((unsigned int __user *)pc, instr)) {
- printk(KERN_CONT "XXXXXXXX ");
+ pr_cont("XXXXXXXX ");
} else {
if (regs->nip == pc)
- printk(KERN_CONT "<%08x> ", instr);
+ pr_cont("<%08x> ", instr);
else
- printk(KERN_CONT "%08x ", instr);
+ pr_cont("%08x ", instr);
}
pc += sizeof(int);
}
- printk("\n");
+ pr_cont("\n");
}
struct regbit {
@@ -1282,7 +1282,7 @@ static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
for (; bits->bit; ++bits)
if (val & bits->bit) {
- printk("%s%s", s, bits->name);
+ pr_cont("%s%s", s, bits->name);
s = sep;
}
}
@@ -1305,9 +1305,9 @@ static void print_tm_bits(unsigned long val)
* T: Transactional (bit 34)
*/
if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
- printk(",TM[");
+ pr_cont(",TM[");
print_bits(val, msr_tm_bits, "");
- printk("]");
+ pr_cont("]");
}
}
#else
@@ -1316,10 +1316,10 @@ static void print_tm_bits(unsigned long val) {}
static void print_msr_bits(unsigned long val)
{
- printk("<");
+ pr_cont("<");
print_bits(val, msr_bits, ",");
print_tm_bits(val);
- printk(">");
+ pr_cont(">");
}
#ifdef CONFIG_PPC64
@@ -1347,29 +1347,29 @@ void show_regs(struct pt_regs * regs)
printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
- printk("CFAR: "REG" ", regs->orig_gpr3);
+ pr_cont("CFAR: "REG" ", regs->orig_gpr3);
if (trap == 0x200 || trap == 0x300 || trap == 0x600)
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
+ pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
#else
- printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
+ pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
#endif
#ifdef CONFIG_PPC64
- printk("SOFTE: %ld ", regs->softe);
+ pr_cont("SOFTE: %ld ", regs->softe);
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr))
- printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
+ pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
#endif
for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0)
- printk("\nGPR%02d: ", i);
- printk(REG " ", regs->gpr[i]);
+ pr_cont("\nGPR%02d: ", i);
+ pr_cont(REG " ", regs->gpr[i]);
if (i == LAST_VOLATILE && !FULL_REGS(regs))
break;
}
- printk("\n");
+ pr_cont("\n");
#ifdef CONFIG_KALLSYMS
/*
* Lookup NIP late so we have the best change of getting the
@@ -1900,14 +1900,14 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((ip == rth) && curr_frame >= 0) {
- printk(" (%pS)",
+ pr_cont(" (%pS)",
(void *)current->ret_stack[curr_frame].ret);
curr_frame--;
}
#endif
if (firstframe)
- printk(" (unreliable)");
- printk("\n");
+ pr_cont(" (unreliable)");
+ pr_cont("\n");
}
firstframe = 0;
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 010b7b310237..1e887f3a61a6 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -73,7 +73,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
break;
- copied = access_process_vm(child, (u64)addrOthers, &tmp,
+ copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
@@ -178,7 +178,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
break;
ret = 0;
- if (access_process_vm(child, (u64)addrOthers, &tmp,
+ if (ptrace_access_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp),
FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
break;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 7ac8e6eaab5b..8d586cff8a41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
if (firmware_has_feature(FW_FEATURE_OPAL))
opal_configure_cores();
- /* Enable AIL if supported, and we are in hypervisor mode */
- if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
- early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
- unsigned long lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
- }
+ /* AIL on native is done in cpu_ready_for_interrupts() */
}
}
static void cpu_ready_for_interrupts(void)
{
+ /*
+ * Enable AIL if supported, and we are in hypervisor mode. This
+ * is called once for every processor.
+ *
+ * If we are not in hypervisor mode the job is done once for
+ * the whole partition in configure_exceptions().
+ */
+ if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
+ early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ unsigned long lpcr = mfspr(SPRN_LPCR);
+ mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ }
+
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index c4f1d1f7bae0..c1fb255a60d6 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -703,7 +703,7 @@ static struct device_attribute pa6t_attrs[] = {
#endif /* HAS_PPC_PMC_PA6T */
#endif /* HAS_PPC_PMC_CLASSIC */
-static void register_cpu_online(unsigned int cpu)
+static int register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
@@ -782,11 +782,12 @@ static void register_cpu_online(unsigned int cpu)
}
#endif
cacheinfo_cpu_online(cpu);
+ return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-static void unregister_cpu_online(unsigned int cpu)
+static int unregister_cpu_online(unsigned int cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
struct device_attribute *attrs, *pmc_attrs;
@@ -863,6 +864,8 @@ static void unregister_cpu_online(unsigned int cpu)
}
#endif
cacheinfo_cpu_offline(cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+ return 0;
}
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
@@ -883,32 +886,6 @@ ssize_t arch_cpu_release(const char *buf, size_t count)
}
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
-#endif /* CONFIG_HOTPLUG_CPU */
-
-static int sysfs_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned int)(long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- register_cpu_online(cpu);
- break;
-#ifdef CONFIG_HOTPLUG_CPU
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- unregister_cpu_online(cpu);
- break;
-#endif
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block sysfs_cpu_nb = {
- .notifier_call = sysfs_cpu_notify,
-};
-
static DEFINE_MUTEX(cpu_mutex);
int cpu_add_dev_attr(struct device_attribute *attr)
@@ -1023,12 +1000,10 @@ static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
static int __init topology_init(void)
{
- int cpu;
+ int cpu, r;
register_nodes();
- cpu_notifier_register_begin();
-
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -1047,15 +1022,10 @@ static int __init topology_init(void)
device_create_file(&c->dev, &dev_attr_physical_id);
}
-
- if (cpu_online(cpu))
- register_cpu_online(cpu);
}
-
- __register_cpu_notifier(&sysfs_cpu_nb);
-
- cpu_notifier_register_done();
-
+ r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
+ register_cpu_online, unregister_cpu_online);
+ WARN_ON(r < 0);
#ifdef CONFIG_PPC64
sysfs_create_dscr_default();
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index bc3f7d0d7b79..be9751f1cb2a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -164,8 +164,6 @@ u64 __cputime_sec_factor;
EXPORT_SYMBOL(__cputime_sec_factor);
u64 __cputime_clockt_factor;
EXPORT_SYMBOL(__cputime_clockt_factor);
-DEFINE_PER_CPU(unsigned long, cputime_last_delta);
-DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
cputime_t cputime_one_jiffy;
@@ -360,7 +358,8 @@ void vtime_account_system(struct task_struct *tsk)
unsigned long delta, sys_scaled, stolen;
delta = vtime_delta(tsk, &sys_scaled, &stolen);
- account_system_time(tsk, 0, delta, sys_scaled);
+ account_system_time(tsk, 0, delta);
+ tsk->stimescaled += sys_scaled;
if (stolen)
account_steal_time(stolen);
}
@@ -393,7 +392,8 @@ void vtime_account_user(struct task_struct *tsk)
acct->user_time = 0;
acct->user_time_scaled = 0;
acct->utime_sspurr = 0;
- account_user_time(tsk, utime, utimescaled);
+ account_user_time(tsk, utime);
+ tsk->utimescaled += utimescaled;
}
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8295f51c1a5f..7394b770ae1f 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -94,8 +94,17 @@ SECTIONS
* detected, and will result in a crash at boot due to offsets being
* wrong.
*/
+#ifdef CONFIG_PPC64
+ /*
+ * BLOCK(0) overrides the default output section alignment because
+ * this needs to start right after .head.text in order for fixed
+ * section placement to work.
+ */
+ .text BLOCK(0) : AT(ADDR(.text) - LOAD_OFFSET) {
+#else
.text : AT(ADDR(.text) - LOAD_OFFSET) {
ALIGN_FUNCTION();
+#endif
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text .fixup __ftr_alt_* .ref.text)
SCHED_TEXT
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 05f09ae82587..b795dd1ac2ef 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -88,6 +88,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
/* 128 (2**7) bytes in each HPTEG */
kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
+ atomic64_set(&kvm->arch.mmio_update, 0);
+
/* Allocate reverse map array */
rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
if (!rev) {
@@ -255,7 +257,7 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, msr);
}
-long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
+static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh,
unsigned long ptel, unsigned long *pte_idx_ret)
{
@@ -312,7 +314,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_slb *slbe;
unsigned long slb_v;
unsigned long pp, key;
- unsigned long v, gr;
+ unsigned long v, orig_v, gr;
__be64 *hptep;
int index;
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
@@ -337,10 +339,12 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return -ENOENT;
}
hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
- v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
+ v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
gr = kvm->arch.revmap[index].guest_rpte;
- unlock_hpte(hptep, v);
+ unlock_hpte(hptep, orig_v);
preempt_enable();
gpte->eaddr = eaddr;
@@ -438,6 +442,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
struct kvm *kvm = vcpu->kvm;
unsigned long hpte[3], r;
+ unsigned long hnow_v, hnow_r;
__be64 *hptep;
unsigned long mmu_seq, psize, pte_size;
unsigned long gpa_base, gfn_base;
@@ -451,6 +456,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int writing, write_ok;
struct vm_area_struct *vma;
unsigned long rcbits;
+ long mmio_update;
/*
* Real-mode code has already searched the HPT and found the
@@ -460,6 +466,19 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
if (ea != vcpu->arch.pgfault_addr)
return RESUME_GUEST;
+
+ if (vcpu->arch.pgfault_cache) {
+ mmio_update = atomic64_read(&kvm->arch.mmio_update);
+ if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) {
+ r = vcpu->arch.pgfault_cache->rpte;
+ psize = hpte_page_size(vcpu->arch.pgfault_hpte[0], r);
+ gpa_base = r & HPTE_R_RPN & ~(psize - 1);
+ gfn_base = gpa_base >> PAGE_SHIFT;
+ gpa = gpa_base | (ea & (psize - 1));
+ return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
+ dsisr & DSISR_ISSTORE);
+ }
+ }
index = vcpu->arch.pgfault_index;
hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
rev = &kvm->arch.revmap[index];
@@ -472,6 +491,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
unlock_hpte(hptep, hpte[0]);
preempt_enable();
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]);
+ hpte[1] = hpte_new_to_old_r(hpte[1]);
+ }
if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
hpte[1] != vcpu->arch.pgfault_hpte[1])
return RESUME_GUEST;
@@ -575,16 +598,22 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
if (psize < PAGE_SIZE)
psize = PAGE_SIZE;
- r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
+ r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) |
+ ((pfn << PAGE_SHIFT) & ~(psize - 1));
if (hpte_is_writable(r) && !write_ok)
r = hpte_make_readonly(r);
ret = RESUME_GUEST;
preempt_disable();
while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
cpu_relax();
- if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] ||
- be64_to_cpu(hptep[1]) != hpte[1] ||
- rev->guest_rpte != hpte[2])
+ hnow_v = be64_to_cpu(hptep[0]);
+ hnow_r = be64_to_cpu(hptep[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
+ hnow_r = hpte_new_to_old_r(hnow_r);
+ }
+ if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
+ rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */
goto out_unlock;
hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
@@ -615,6 +644,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
}
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ r = hpte_old_to_new_r(hpte[0], r);
+ hpte[0] = hpte_old_to_new_v(hpte[0]);
+ }
hptep[1] = cpu_to_be64(r);
eieio();
__unlock_hpte(hptep, hpte[0]);
@@ -758,6 +791,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
hpte_rpn(ptel, psize) == gfn) {
hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
kvmppc_invalidate_hpte(kvm, hptep, i);
+ hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
/* Harvest R and C */
rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
@@ -1165,7 +1199,7 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
unsigned long *hpte, struct revmap_entry *revp,
int want_valid, int first_pass)
{
- unsigned long v, r;
+ unsigned long v, r, hr;
unsigned long rcbits_unset;
int ok = 1;
int valid, dirty;
@@ -1192,6 +1226,11 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
cpu_relax();
v = be64_to_cpu(hptp[0]);
+ hr = be64_to_cpu(hptp[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ v = hpte_new_to_old_v(v, hr);
+ hr = hpte_new_to_old_r(hr);
+ }
/* re-evaluate valid and dirty from synchronized HPTE value */
valid = !!(v & HPTE_V_VALID);
@@ -1199,8 +1238,8 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
/* Harvest R and C into guest view if necessary */
rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
- if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) {
- revp->guest_rpte |= (be64_to_cpu(hptp[1]) &
+ if (valid && (rcbits_unset & hr)) {
+ revp->guest_rpte |= (hr &
(HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
dirty = 1;
}
@@ -1608,7 +1647,7 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
return ret;
}
-ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
+static ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
return -EACCES;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index d461c440889a..e4c4ea973e57 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -39,7 +39,6 @@
#include <asm/udbg.h>
#include <asm/iommu.h>
#include <asm/tce.h>
-#include <asm/iommu.h>
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3686471be32b..8dcbe37a4dac 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -54,6 +54,9 @@
#include <asm/dbell.h>
#include <asm/hmi.h>
#include <asm/pnv-pci.h>
+#include <asm/mmu.h>
+#include <asm/opal.h>
+#include <asm/xics.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
@@ -62,6 +65,7 @@
#include <linux/irqbypass.h>
#include <linux/module.h>
#include <linux/compiler.h>
+#include <linux/of.h>
#include "book3s.h"
@@ -104,23 +108,6 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
#endif
-/* Maximum halt poll interval defaults to KVM_HALT_POLL_NS_DEFAULT */
-static unsigned int halt_poll_max_ns = KVM_HALT_POLL_NS_DEFAULT;
-module_param(halt_poll_max_ns, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(halt_poll_max_ns, "Maximum halt poll time in ns");
-
-/* Factor by which the vcore halt poll interval is grown, default is to double
- */
-static unsigned int halt_poll_ns_grow = 2;
-module_param(halt_poll_ns_grow, int, S_IRUGO);
-MODULE_PARM_DESC(halt_poll_ns_grow, "Factor halt poll time is grown by");
-
-/* Factor by which the vcore halt poll interval is shrunk, default is to reset
- */
-static unsigned int halt_poll_ns_shrink;
-module_param(halt_poll_ns_shrink, int, S_IRUGO);
-MODULE_PARM_DESC(halt_poll_ns_shrink, "Factor halt poll time is shrunk by");
-
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
@@ -146,12 +133,21 @@ static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
static bool kvmppc_ipi_thread(int cpu)
{
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+
+ /* On POWER9 we can use msgsnd to IPI any cpu */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ msg |= get_hard_smp_processor_id(cpu);
+ smp_mb();
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ return true;
+ }
+
/* On POWER8 for IPIs to threads in the same core, use msgsnd */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
preempt_disable();
if (cpu_first_thread_sibling(cpu) ==
cpu_first_thread_sibling(smp_processor_id())) {
- unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
msg |= cpu_thread_in_core(cpu);
smp_mb();
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
@@ -162,8 +158,12 @@ static bool kvmppc_ipi_thread(int cpu)
}
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
- if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) {
- xics_wake_cpu(cpu);
+ if (cpu >= 0 && cpu < nr_cpu_ids) {
+ if (paca[cpu].kvm_hstate.xics_phys) {
+ xics_wake_cpu(cpu);
+ return true;
+ }
+ opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
return true;
}
#endif
@@ -299,41 +299,54 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
vcpu->arch.pvr = pvr;
}
+/* Dummy value used in computing PCR value below */
+#define PCR_ARCH_300 (PCR_ARCH_207 << 1)
+
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
- unsigned long pcr = 0;
+ unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ /* We can (emulate) our own architecture version and anything older */
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ host_pcr_bit = PCR_ARCH_300;
+ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ host_pcr_bit = PCR_ARCH_207;
+ else if (cpu_has_feature(CPU_FTR_ARCH_206))
+ host_pcr_bit = PCR_ARCH_206;
+ else
+ host_pcr_bit = PCR_ARCH_205;
+
+ /* Determine lowest PCR bit needed to run guest in given PVR level */
+ guest_pcr_bit = host_pcr_bit;
if (arch_compat) {
switch (arch_compat) {
case PVR_ARCH_205:
- /*
- * If an arch bit is set in PCR, all the defined
- * higher-order arch bits also have to be set.
- */
- pcr = PCR_ARCH_206 | PCR_ARCH_205;
+ guest_pcr_bit = PCR_ARCH_205;
break;
case PVR_ARCH_206:
case PVR_ARCH_206p:
- pcr = PCR_ARCH_206;
+ guest_pcr_bit = PCR_ARCH_206;
break;
case PVR_ARCH_207:
+ guest_pcr_bit = PCR_ARCH_207;
+ break;
+ case PVR_ARCH_300:
+ guest_pcr_bit = PCR_ARCH_300;
break;
default:
return -EINVAL;
}
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
- /* POWER7 can't emulate POWER8 */
- if (!(pcr & PCR_ARCH_206))
- return -EINVAL;
- pcr &= ~PCR_ARCH_206;
- }
}
+ /* Check requested PCR bits don't exceed our capabilities */
+ if (guest_pcr_bit > host_pcr_bit)
+ return -EINVAL;
+
spin_lock(&vc->lock);
vc->arch_compat = arch_compat;
- vc->pcr = pcr;
+ /* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */
+ vc->pcr = host_pcr_bit - guest_pcr_bit;
spin_unlock(&vc->lock);
return 0;
@@ -945,6 +958,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOK3S_INTERRUPT_EXTERNAL:
case BOOK3S_INTERRUPT_H_DOORBELL:
+ case BOOK3S_INTERRUPT_H_VIRT:
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
@@ -1229,6 +1243,12 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_WORT:
*val = get_reg_val(id, vcpu->arch.wort);
break;
+ case KVM_REG_PPC_TIDR:
+ *val = get_reg_val(id, vcpu->arch.tid);
+ break;
+ case KVM_REG_PPC_PSSCR:
+ *val = get_reg_val(id, vcpu->arch.psscr);
+ break;
case KVM_REG_PPC_VPA_ADDR:
spin_lock(&vcpu->arch.vpa_update_lock);
*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -1288,6 +1308,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR:
*val = get_reg_val(id, vcpu->arch.cr_tm);
break;
+ case KVM_REG_PPC_TM_XER:
+ *val = get_reg_val(id, vcpu->arch.xer_tm);
+ break;
case KVM_REG_PPC_TM_LR:
*val = get_reg_val(id, vcpu->arch.lr_tm);
break;
@@ -1427,6 +1450,12 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_WORT:
vcpu->arch.wort = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_TIDR:
+ vcpu->arch.tid = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_PSSCR:
+ vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
+ break;
case KVM_REG_PPC_VPA_ADDR:
addr = set_reg_val(id, *val);
r = -EINVAL;
@@ -1498,6 +1527,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR:
vcpu->arch.cr_tm = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_TM_XER:
+ vcpu->arch.xer_tm = set_reg_val(id, *val);
+ break;
case KVM_REG_PPC_TM_LR:
vcpu->arch.lr_tm = set_reg_val(id, *val);
break;
@@ -1540,6 +1572,20 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
return r;
}
+/*
+ * On POWER9, threads are independent and can be in different partitions.
+ * Therefore we consider each thread to be a subcore.
+ * There is a restriction that all threads have to be in the same
+ * MMU mode (radix or HPT), unfortunately, but since we only support
+ * HPT guests on a HPT host so far, that isn't an impediment yet.
+ */
+static int threads_per_vcore(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return 1;
+ return threads_per_subcore;
+}
+
static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
{
struct kvmppc_vcore *vcore;
@@ -1554,7 +1600,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
init_swait_queue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
- vcore->first_vcpuid = core * threads_per_subcore;
+ vcore->first_vcpuid = core * threads_per_vcore();
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
@@ -1717,7 +1763,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
int core;
struct kvmppc_vcore *vcore;
- core = id / threads_per_subcore;
+ core = id / threads_per_vcore();
if (core >= KVM_MAX_VCORES)
goto out;
@@ -1935,7 +1981,10 @@ static void kvmppc_wait_for_nap(void)
{
int cpu = smp_processor_id();
int i, loops;
+ int n_threads = threads_per_vcore();
+ if (n_threads <= 1)
+ return;
for (loops = 0; loops < 1000000; ++loops) {
/*
* Check if all threads are finished.
@@ -1943,17 +1992,17 @@ static void kvmppc_wait_for_nap(void)
* and the thread clears it when finished, so we look
* for any threads that still have a non-NULL vcore ptr.
*/
- for (i = 1; i < threads_per_subcore; ++i)
+ for (i = 1; i < n_threads; ++i)
if (paca[cpu + i].kvm_hstate.kvm_vcore)
break;
- if (i == threads_per_subcore) {
+ if (i == n_threads) {
HMT_medium();
return;
}
HMT_low();
}
HMT_medium();
- for (i = 1; i < threads_per_subcore; ++i)
+ for (i = 1; i < n_threads; ++i)
if (paca[cpu + i].kvm_hstate.kvm_vcore)
pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
}
@@ -2019,7 +2068,7 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
vc->vcore_state = VCORE_PREEMPT;
vc->pcpu = smp_processor_id();
- if (vc->num_threads < threads_per_subcore) {
+ if (vc->num_threads < threads_per_vcore()) {
spin_lock(&lp->lock);
list_add_tail(&vc->preempt_list, &lp->list);
spin_unlock(&lp->lock);
@@ -2123,8 +2172,7 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
cip->subcore_threads[sub] = vc->num_threads;
cip->subcore_vm[sub] = vc->kvm;
init_master_vcore(vc);
- list_del(&vc->preempt_list);
- list_add_tail(&vc->preempt_list, &cip->vcs[sub]);
+ list_move_tail(&vc->preempt_list, &cip->vcs[sub]);
return true;
}
@@ -2254,12 +2302,12 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
* enter the guest. Only do this if it is the primary thread of the
* core (not if a subcore) that is entering the guest.
*/
-static inline void kvmppc_clear_host_core(int cpu)
+static inline int kvmppc_clear_host_core(unsigned int cpu)
{
int core;
if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
- return;
+ return 0;
/*
* Memory barrier can be omitted here as we will do a smp_wmb()
* later in kvmppc_start_thread and we need ensure that state is
@@ -2267,6 +2315,7 @@ static inline void kvmppc_clear_host_core(int cpu)
*/
core = cpu >> threads_shift;
kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
+ return 0;
}
/*
@@ -2274,12 +2323,12 @@ static inline void kvmppc_clear_host_core(int cpu)
* Only need to do this if it is the primary thread of the core that is
* exiting.
*/
-static inline void kvmppc_set_host_core(int cpu)
+static inline int kvmppc_set_host_core(unsigned int cpu)
{
int core;
if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
- return;
+ return 0;
/*
* Memory barrier can be omitted here because we do a spin_unlock
@@ -2287,6 +2336,7 @@ static inline void kvmppc_set_host_core(int cpu)
*/
core = cpu >> threads_shift;
kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
+ return 0;
}
/*
@@ -2307,6 +2357,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
unsigned long cmd_bit, stat_bit;
int pcpu, thr;
int target_threads;
+ int controlled_threads;
/*
* Remove from the list any threads that have a signal pending
@@ -2325,11 +2376,18 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
vc->preempt_tb = TB_NIL;
/*
+ * Number of threads that we will be controlling: the same as
+ * the number of threads per subcore, except on POWER9,
+ * where it's 1 because the threads are (mostly) independent.
+ */
+ controlled_threads = threads_per_vcore();
+
+ /*
* Make sure we are running on primary threads, and that secondary
* threads are offline. Also check if the number of threads in this
* guest are greater than the current system threads per guest.
*/
- if ((threads_per_core > 1) &&
+ if ((controlled_threads > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
for_each_runnable_thread(i, vcpu, vc) {
vcpu->arch.ret = -EBUSY;
@@ -2345,7 +2403,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*/
init_core_info(&core_info, vc);
pcpu = smp_processor_id();
- target_threads = threads_per_subcore;
+ target_threads = controlled_threads;
if (target_smt_mode && target_smt_mode < target_threads)
target_threads = target_smt_mode;
if (vc->num_threads < target_threads)
@@ -2381,7 +2439,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
smp_wmb();
}
pcpu = smp_processor_id();
- for (thr = 0; thr < threads_per_subcore; ++thr)
+ for (thr = 0; thr < controlled_threads; ++thr)
paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
/* Initiate micro-threading (split-core) if required */
@@ -2491,7 +2549,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
}
/* Let secondaries go back to the offline loop */
- for (i = 0; i < threads_per_subcore; ++i) {
+ for (i = 0; i < controlled_threads; ++i) {
kvmppc_release_hwthread(pcpu + i);
if (sip && sip->napped[i])
kvmppc_ipi_thread(pcpu + i);
@@ -2543,9 +2601,6 @@ static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
vc->halt_poll_ns = 10000;
else
vc->halt_poll_ns *= halt_poll_ns_grow;
-
- if (vc->halt_poll_ns > halt_poll_max_ns)
- vc->halt_poll_ns = halt_poll_max_ns;
}
static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
@@ -2556,7 +2611,8 @@ static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
vc->halt_poll_ns /= halt_poll_ns_shrink;
}
-/* Check to see if any of the runnable vcpus on the vcore have pending
+/*
+ * Check to see if any of the runnable vcpus on the vcore have pending
* exceptions or are no longer ceded
*/
static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
@@ -2655,16 +2711,18 @@ out:
}
/* Adjust poll time */
- if (halt_poll_max_ns) {
+ if (halt_poll_ns) {
if (block_ns <= vc->halt_poll_ns)
;
/* We slept and blocked for longer than the max halt time */
- else if (vc->halt_poll_ns && block_ns > halt_poll_max_ns)
+ else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
shrink_halt_poll_ns(vc);
/* We slept and our poll time is too small */
- else if (vc->halt_poll_ns < halt_poll_max_ns &&
- block_ns < halt_poll_max_ns)
+ else if (vc->halt_poll_ns < halt_poll_ns &&
+ block_ns < halt_poll_ns)
grow_halt_poll_ns(vc);
+ if (vc->halt_poll_ns > halt_poll_ns)
+ vc->halt_poll_ns = halt_poll_ns;
} else
vc->halt_poll_ns = 0;
@@ -2971,6 +3029,15 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
+ /*
+ * If we are making a new memslot, it might make
+ * some address that was previously cached as emulated
+ * MMIO be no longer emulated MMIO, so invalidate
+ * all the caches of emulated MMIO translations.
+ */
+ if (npages)
+ atomic64_inc(&kvm->arch.mmio_update);
+
if (npages && old->npages) {
/*
* If modifying a memslot, reset all the rmap dirty bits.
@@ -3015,6 +3082,22 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
return;
}
+static void kvmppc_setup_partition_table(struct kvm *kvm)
+{
+ unsigned long dw0, dw1;
+
+ /* PS field - page size for VRMA */
+ dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
+ ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
+ /* HTABSIZE and HTABORG fields */
+ dw0 |= kvm->arch.sdr1;
+
+ /* Second dword has GR=0; other fields are unused since UPRT=0 */
+ dw1 = 0;
+
+ mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1);
+}
+
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
@@ -3066,17 +3149,20 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
psize == 0x1000000))
goto out_srcu;
- /* Update VRMASD field in the LPCR */
senc = slb_pgsize_encoding(psize);
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T);
- /* the -4 is to account for senc values starting at 0x10 */
- lpcr = senc << (LPCR_VRMASD_SH - 4);
-
/* Create HPTEs in the hash page table for the VRMA */
kvmppc_map_vrma(vcpu, memslot, porder);
- kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+ /* Update VRMASD field in the LPCR */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* the -4 is to account for senc values starting at 0x10 */
+ lpcr = senc << (LPCR_VRMASD_SH - 4);
+ kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+ } else {
+ kvmppc_setup_partition_table(kvm);
+ }
/* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
smp_wmb();
@@ -3094,36 +3180,6 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
}
#ifdef CONFIG_KVM_XICS
-static int kvmppc_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
-{
- unsigned long cpu = (long)hcpu;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- kvmppc_set_host_core(cpu);
- break;
-
-#ifdef CONFIG_HOTPLUG_CPU
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- kvmppc_clear_host_core(cpu);
- break;
-#endif
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block kvmppc_cpu_notifier = {
- .notifier_call = kvmppc_cpu_notify,
-};
-
/*
* Allocate a per-core structure for managing state about which cores are
* running in the host versus the guest and for exchanging data between
@@ -3185,15 +3241,17 @@ void kvmppc_alloc_host_rm_ops(void)
return;
}
- register_cpu_notifier(&kvmppc_cpu_notifier);
-
+ cpuhp_setup_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE,
+ "ppc/kvm_book3s:prepare",
+ kvmppc_set_host_core,
+ kvmppc_clear_host_core);
put_online_cpus();
}
void kvmppc_free_host_rm_ops(void)
{
if (kvmppc_host_rm_ops_hv) {
- unregister_cpu_notifier(&kvmppc_cpu_notifier);
+ cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE);
kfree(kvmppc_host_rm_ops_hv->rm_core);
kfree(kvmppc_host_rm_ops_hv);
kvmppc_host_rm_ops_hv = NULL;
@@ -3219,14 +3277,18 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
* make sure we flush on each core before running the new VM.
+ * On POWER9, the tlbie in mmu_partition_table_set_entry()
+ * does this flush for us.
*/
- cpumask_setall(&kvm->arch.need_tlb_flush);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ cpumask_setall(&kvm->arch.need_tlb_flush);
/* Start out with the default set of hcalls enabled */
memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
sizeof(kvm->arch.enabled_hcalls));
- kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
/* Init LPCR for virtual RMA mode */
kvm->arch.host_lpid = mfspr(SPRN_LPID);
@@ -3239,9 +3301,29 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/* On POWER8 turn on online bit to enable PURR/SPURR */
if (cpu_has_feature(CPU_FTR_ARCH_207S))
lpcr |= LPCR_ONL;
+ /*
+ * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
+ * Set HVICE bit to enable hypervisor virtualization interrupts.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ lpcr &= ~LPCR_VPM0;
+ lpcr |= LPCR_HVICE;
+ }
+
kvm->arch.lpcr = lpcr;
/*
+ * Work out how many sets the TLB has, for the use of
+ * the TLB invalidation loop in book3s_hv_rmhandlers.S.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
+ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */
+ else
+ kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */
+
+ /*
* Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online.
*/
@@ -3305,9 +3387,9 @@ static int kvmppc_core_check_processor_compat_hv(void)
!cpu_has_feature(CPU_FTR_ARCH_206))
return -EIO;
/*
- * Disable KVM for Power9, untill the required bits merged.
+ * Disable KVM for Power9 in radix mode.
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300))
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
return -EIO;
return 0;
@@ -3661,6 +3743,23 @@ static int kvmppc_book3s_init_hv(void)
if (r)
return r;
+ /*
+ * We need a way of accessing the XICS interrupt controller,
+ * either directly, via paca[cpu].kvm_hstate.xics_phys, or
+ * indirectly, via OPAL.
+ */
+#ifdef CONFIG_SMP
+ if (!get_paca()->kvm_hstate.xics_phys) {
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
+ if (!np) {
+ pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
+ return -ENODEV;
+ }
+ }
+#endif
+
kvm_ops_hv.owner = THIS_MODULE;
kvmppc_hv_ops = &kvm_ops_hv;
@@ -3683,3 +3782,4 @@ module_exit(kvmppc_book3s_exit_hv);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");
+
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 0c84d6bc8356..5bb24be0b346 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -26,6 +26,8 @@
#include <asm/dbell.h>
#include <asm/cputhreads.h>
#include <asm/io.h>
+#include <asm/opal.h>
+#include <asm/smp.h>
#define KVM_CMA_CHUNK_ORDER 18
@@ -205,12 +207,18 @@ static inline void rm_writeb(unsigned long paddr, u8 val)
void kvmhv_rm_send_ipi(int cpu)
{
unsigned long xics_phys;
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
- /* On POWER8 for IPIs to threads in the same core, use msgsnd */
+ /* On POWER9 we can use msgsnd for any destination cpu. */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ msg |= get_hard_smp_processor_id(cpu);
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ return;
+ }
+ /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
cpu_first_thread_sibling(cpu) ==
cpu_first_thread_sibling(raw_smp_processor_id())) {
- unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
msg |= cpu_thread_in_core(cpu);
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
return;
@@ -218,7 +226,11 @@ void kvmhv_rm_send_ipi(int cpu)
/* Else poke the target with an IPI */
xics_phys = paca[cpu].kvm_hstate.xics_phys;
- rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ if (xics_phys)
+ rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ else
+ opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu),
+ IPI_PRIORITY);
}
/*
@@ -329,7 +341,7 @@ static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
* saved a copy of the XIRR in the PACA, it will be picked up by
* the host ICP driver.
*/
-static int kvmppc_check_passthru(u32 xisr, __be32 xirr)
+static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
{
struct kvmppc_passthru_irqmap *pimap;
struct kvmppc_irq_map *irq_map;
@@ -348,11 +360,11 @@ static int kvmppc_check_passthru(u32 xisr, __be32 xirr)
/* We're handling this interrupt, generic code doesn't need to */
local_paca->kvm_hstate.saved_xirr = 0;
- return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap);
+ return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
}
#else
-static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr)
+static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
{
return 1;
}
@@ -367,14 +379,31 @@ static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr)
* -1 if there was a guest wakeup IPI (which has now been cleared)
* -2 if there is PCI passthrough external interrupt that was handled
*/
+static long kvmppc_read_one_intr(bool *again);
long kvmppc_read_intr(void)
{
+ long ret = 0;
+ long rc;
+ bool again;
+
+ do {
+ again = false;
+ rc = kvmppc_read_one_intr(&again);
+ if (rc && (ret == 0 || rc > ret))
+ ret = rc;
+ } while (again);
+ return ret;
+}
+
+static long kvmppc_read_one_intr(bool *again)
+{
unsigned long xics_phys;
u32 h_xirr;
__be32 xirr;
u32 xisr;
u8 host_ipi;
+ int64_t rc;
/* see if a host IPI is pending */
host_ipi = local_paca->kvm_hstate.host_ipi;
@@ -383,8 +412,14 @@ long kvmppc_read_intr(void)
/* Now read the interrupt from the ICP */
xics_phys = local_paca->kvm_hstate.xics_phys;
- if (unlikely(!xics_phys))
- return 1;
+ if (!xics_phys) {
+ /* Use OPAL to read the XIRR */
+ rc = opal_rm_int_get_xirr(&xirr, false);
+ if (rc < 0)
+ return 1;
+ } else {
+ xirr = _lwzcix(xics_phys + XICS_XIRR);
+ }
/*
* Save XIRR for later. Since we get control in reverse endian
@@ -392,7 +427,6 @@ long kvmppc_read_intr(void)
* host endian. Note that xirr is the value read from the
* XIRR register, while h_xirr is the host endian version.
*/
- xirr = _lwzcix(xics_phys + XICS_XIRR);
h_xirr = be32_to_cpu(xirr);
local_paca->kvm_hstate.saved_xirr = h_xirr;
xisr = h_xirr & 0xffffff;
@@ -411,8 +445,16 @@ long kvmppc_read_intr(void)
* If it is an IPI, clear the MFRR and EOI it.
*/
if (xisr == XICS_IPI) {
- _stbcix(xics_phys + XICS_MFRR, 0xff);
- _stwcix(xics_phys + XICS_XIRR, xirr);
+ if (xics_phys) {
+ _stbcix(xics_phys + XICS_MFRR, 0xff);
+ _stwcix(xics_phys + XICS_XIRR, xirr);
+ } else {
+ opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff);
+ rc = opal_rm_int_eoi(h_xirr);
+ /* If rc > 0, there is another interrupt pending */
+ *again = rc > 0;
+ }
+
/*
* Need to ensure side effects of above stores
* complete before proceeding.
@@ -429,7 +471,11 @@ long kvmppc_read_intr(void)
/* We raced with the host,
* we need to resend that IPI, bummer
*/
- _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ if (xics_phys)
+ _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ else
+ opal_rm_int_set_mfrr(hard_smp_processor_id(),
+ IPI_PRIORITY);
/* Let side effects complete */
smp_mb();
return 1;
@@ -440,5 +486,5 @@ long kvmppc_read_intr(void)
return -1;
}
- return kvmppc_check_passthru(xisr, xirr);
+ return kvmppc_check_passthru(xisr, xirr, again);
}
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 0fa70a9618d7..7ef0993214f3 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -16,6 +16,7 @@
#include <asm/machdep.h>
#include <asm/cputhreads.h>
#include <asm/hmi.h>
+#include <asm/kvm_ppc.h>
/* SRR1 bits for machine check on POWER7 */
#define SRR1_MC_LDSTERR (1ul << (63-42))
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 99b4e9d5dd23..9ef3c4be952f 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -264,8 +264,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
if (pa)
pteh |= HPTE_V_VALID;
- else
+ else {
pteh |= HPTE_V_ABSENT;
+ ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
+ }
/*If we had host pte mapping then Check WIMG */
if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
@@ -351,6 +353,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
+ ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
unlock_rmap(rmap);
} else {
kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
@@ -361,6 +364,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
}
}
+ /* Convert to new format on P9 */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ ptel = hpte_old_to_new_r(pteh, ptel);
+ pteh = hpte_old_to_new_v(pteh);
+ }
hpte[1] = cpu_to_be64(ptel);
/* Write the first HPTE dword, unlocking the HPTE and making it valid */
@@ -386,6 +394,13 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
#endif
+static inline int is_mmio_hpte(unsigned long v, unsigned long r)
+{
+ return ((v & HPTE_V_ABSENT) &&
+ (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
+ (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
+}
+
static inline int try_lock_tlbie(unsigned int *lock)
{
unsigned int tmp, old;
@@ -409,13 +424,18 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
{
long i;
+ /*
+ * We use the POWER9 5-operand versions of tlbie and tlbiel here.
+ * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
+ * the RS field, this is backwards-compatible with P7 and P8.
+ */
if (global) {
while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax();
if (need_sync)
asm volatile("ptesync" : : : "memory");
for (i = 0; i < npages; ++i)
- asm volatile(PPC_TLBIE(%1,%0) : :
+ asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
"r" (rbvalues[i]), "r" (kvm->arch.lpid));
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
kvm->arch.tlbie_lock = 0;
@@ -423,7 +443,8 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
if (need_sync)
asm volatile("ptesync" : : : "memory");
for (i = 0; i < npages; ++i)
- asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
+ asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
+ "r" (rbvalues[i]), "r" (0));
asm volatile("ptesync" : : : "memory");
}
}
@@ -435,18 +456,23 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
__be64 *hpte;
unsigned long v, r, rb;
struct revmap_entry *rev;
- u64 pte;
+ u64 pte, orig_pte, pte_r;
if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- pte = be64_to_cpu(hpte[0]);
+ pte = orig_pte = be64_to_cpu(hpte[0]);
+ pte_r = be64_to_cpu(hpte[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ pte = hpte_new_to_old_v(pte, pte_r);
+ pte_r = hpte_new_to_old_r(pte_r);
+ }
if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
((flags & H_ANDCOND) && (pte & avpn) != 0)) {
- __unlock_hpte(hpte, pte);
+ __unlock_hpte(hpte, orig_pte);
return H_NOT_FOUND;
}
@@ -454,7 +480,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
v = pte & ~HPTE_V_HVLOCK;
if (v & HPTE_V_VALID) {
hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
- rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
+ rb = compute_tlbie_rb(v, pte_r, pte_index);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/*
* The reference (R) and change (C) bits in a HPT
@@ -472,6 +498,9 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
note_hpte_modification(kvm, rev);
unlock_hpte(hpte, 0);
+ if (is_mmio_hpte(v, pte_r))
+ atomic64_inc(&kvm->arch.mmio_update);
+
if (v & HPTE_V_ABSENT)
v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
hpret[0] = v;
@@ -498,7 +527,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
int global;
long int ret = H_SUCCESS;
struct revmap_entry *rev, *revs[4];
- u64 hp0;
+ u64 hp0, hp1;
global = global_invalidates(kvm, 0);
for (i = 0; i < 4 && ret == H_SUCCESS; ) {
@@ -531,6 +560,11 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
}
found = 0;
hp0 = be64_to_cpu(hp[0]);
+ hp1 = be64_to_cpu(hp[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hp0 = hpte_new_to_old_v(hp0, hp1);
+ hp1 = hpte_new_to_old_r(hp1);
+ }
if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
switch (flags & 3) {
case 0: /* absolute */
@@ -561,13 +595,14 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
args[j] |= rcbits << (56 - 5);
hp[0] = 0;
+ if (is_mmio_hpte(hp0, hp1))
+ atomic64_inc(&kvm->arch.mmio_update);
continue;
}
/* leave it locked */
hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
- tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
- be64_to_cpu(hp[1]), pte_index);
+ tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
indexes[n] = j;
hptes[n] = hp;
revs[n] = rev;
@@ -605,7 +640,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
__be64 *hpte;
struct revmap_entry *rev;
unsigned long v, r, rb, mask, bits;
- u64 pte;
+ u64 pte_v, pte_r;
if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
@@ -613,14 +648,16 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- pte = be64_to_cpu(hpte[0]);
- if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
- ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
- __unlock_hpte(hpte, pte);
+ v = pte_v = be64_to_cpu(hpte[0]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
+ if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+ ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
+ __unlock_hpte(hpte, pte_v);
return H_NOT_FOUND;
}
- v = pte;
+ pte_r = be64_to_cpu(hpte[1]);
bits = (flags << 55) & HPTE_R_PP0;
bits |= (flags << 48) & HPTE_R_KEY_HI;
bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
@@ -642,22 +679,26 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
* readonly to writable. If it should be writable, we'll
* take a trap and let the page fault code sort it out.
*/
- pte = be64_to_cpu(hpte[1]);
- r = (pte & ~mask) | bits;
- if (hpte_is_writable(r) && !hpte_is_writable(pte))
+ r = (pte_r & ~mask) | bits;
+ if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
r = hpte_make_readonly(r);
/* If the PTE is changing, invalidate it first */
- if (r != pte) {
+ if (r != pte_r) {
rb = compute_tlbie_rb(v, r, pte_index);
- hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) |
+ hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
HPTE_V_ABSENT);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
true);
+ /* Don't lose R/C bit updates done by hardware */
+ r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
hpte[1] = cpu_to_be64(r);
}
}
- unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
+ unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
asm volatile("ptesync" : : : "memory");
+ if (is_mmio_hpte(v, pte_r))
+ atomic64_inc(&kvm->arch.mmio_update);
+
return H_SUCCESS;
}
@@ -681,6 +722,10 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
r = be64_to_cpu(hpte[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ v = hpte_new_to_old_v(v, r);
+ r = hpte_new_to_old_r(r);
+ }
if (v & HPTE_V_ABSENT) {
v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID;
@@ -798,10 +843,16 @@ void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index)
{
unsigned long rb;
+ u64 hp0, hp1;
hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
- rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
- pte_index);
+ hp0 = be64_to_cpu(hptep[0]);
+ hp1 = be64_to_cpu(hptep[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hp0 = hpte_new_to_old_v(hp0, hp1);
+ hp1 = hpte_new_to_old_r(hp1);
+ }
+ rb = compute_tlbie_rb(hp0, hp1, pte_index);
do_tlbies(kvm, &rb, 1, 1, true);
}
EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
@@ -811,9 +862,15 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
{
unsigned long rb;
unsigned char rbyte;
+ u64 hp0, hp1;
- rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
- pte_index);
+ hp0 = be64_to_cpu(hptep[0]);
+ hp1 = be64_to_cpu(hptep[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hp0 = hpte_new_to_old_v(hp0, hp1);
+ hp1 = hpte_new_to_old_r(hp1);
+ }
+ rb = compute_tlbie_rb(hp0, hp1, pte_index);
rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
/* modify only the second-last byte, which contains the ref bit */
*((char *)hptep + 14) = rbyte;
@@ -828,6 +885,37 @@ static int slb_base_page_shift[4] = {
20, /* 1M, unsupported */
};
+static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
+ unsigned long eaddr, unsigned long slb_v, long mmio_update)
+{
+ struct mmio_hpte_cache_entry *entry = NULL;
+ unsigned int pshift;
+ unsigned int i;
+
+ for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
+ entry = &vcpu->arch.mmio_cache.entry[i];
+ if (entry->mmio_update == mmio_update) {
+ pshift = entry->slb_base_pshift;
+ if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
+ entry->slb_v == slb_v)
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+static struct mmio_hpte_cache_entry *
+ next_mmio_cache_entry(struct kvm_vcpu *vcpu)
+{
+ unsigned int index = vcpu->arch.mmio_cache.index;
+
+ vcpu->arch.mmio_cache.index++;
+ if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
+ vcpu->arch.mmio_cache.index = 0;
+
+ return &vcpu->arch.mmio_cache.entry[index];
+}
+
/* When called from virtmode, this func should be protected by
* preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
* can trigger deadlock issue.
@@ -842,7 +930,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
unsigned long avpn;
__be64 *hpte;
unsigned long mask, val;
- unsigned long v, r;
+ unsigned long v, r, orig_v;
/* Get page shift, work out hash and AVPN etc. */
mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
@@ -877,6 +965,8 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
for (i = 0; i < 16; i += 2) {
/* Read the PTE racily */
v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
/* Check valid/absent, hash, segment size and AVPN */
if (!(v & valid) || (v & mask) != val)
@@ -885,8 +975,12 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
/* Lock the PTE and read it under the lock */
while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
cpu_relax();
- v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+ v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
r = be64_to_cpu(hpte[i+1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ v = hpte_new_to_old_v(v, r);
+ r = hpte_new_to_old_r(r);
+ }
/*
* Check the HPTE again, including base page size
@@ -896,7 +990,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
/* Return with the HPTE still locked */
return (hash << 3) + (i >> 1);
- __unlock_hpte(&hpte[i], v);
+ __unlock_hpte(&hpte[i], orig_v);
}
if (val & HPTE_V_SECONDARY)
@@ -924,30 +1018,45 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
{
struct kvm *kvm = vcpu->kvm;
long int index;
- unsigned long v, r, gr;
+ unsigned long v, r, gr, orig_v;
__be64 *hpte;
unsigned long valid;
struct revmap_entry *rev;
unsigned long pp, key;
+ struct mmio_hpte_cache_entry *cache_entry = NULL;
+ long mmio_update = 0;
/* For protection fault, expect to find a valid HPTE */
valid = HPTE_V_VALID;
- if (status & DSISR_NOHPTE)
+ if (status & DSISR_NOHPTE) {
valid |= HPTE_V_ABSENT;
-
- index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
- if (index < 0) {
- if (status & DSISR_NOHPTE)
- return status; /* there really was no HPTE */
- return 0; /* for prot fault, HPTE disappeared */
+ mmio_update = atomic64_read(&kvm->arch.mmio_update);
+ cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
}
- hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
- v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
- r = be64_to_cpu(hpte[1]);
- rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
- gr = rev->guest_rpte;
+ if (cache_entry) {
+ index = cache_entry->pte_index;
+ v = cache_entry->hpte_v;
+ r = cache_entry->hpte_r;
+ gr = cache_entry->rpte;
+ } else {
+ index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
+ if (index < 0) {
+ if (status & DSISR_NOHPTE)
+ return status; /* there really was no HPTE */
+ return 0; /* for prot fault, HPTE disappeared */
+ }
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+ v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
+ r = be64_to_cpu(hpte[1]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ v = hpte_new_to_old_v(v, r);
+ r = hpte_new_to_old_r(r);
+ }
+ rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
+ gr = rev->guest_rpte;
- unlock_hpte(hpte, v);
+ unlock_hpte(hpte, orig_v);
+ }
/* For not found, if the HPTE is valid by now, retry the instruction */
if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
@@ -985,12 +1094,32 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
vcpu->arch.pgfault_index = index;
vcpu->arch.pgfault_hpte[0] = v;
vcpu->arch.pgfault_hpte[1] = r;
+ vcpu->arch.pgfault_cache = cache_entry;
/* Check the storage key to see if it is possibly emulated MMIO */
- if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
- (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
- (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
- return -2; /* MMIO emulation - load instr word */
+ if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
+ (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
+ if (!cache_entry) {
+ unsigned int pshift = 12;
+ unsigned int pshift_index;
+
+ if (slb_v & SLB_VSID_L) {
+ pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
+ pshift = slb_base_page_shift[pshift_index];
+ }
+ cache_entry = next_mmio_cache_entry(vcpu);
+ cache_entry->eaddr = addr;
+ cache_entry->slb_base_pshift = pshift;
+ cache_entry->pte_index = index;
+ cache_entry->hpte_v = v;
+ cache_entry->hpte_r = r;
+ cache_entry->rpte = gr;
+ cache_entry->slb_v = slb_v;
+ cache_entry->mmio_update = mmio_update;
+ }
+ if (data && (vcpu->arch.shregs.msr & MSR_IR))
+ return -2; /* MMIO emulation - load instr word */
+ }
return -1; /* send fault up to host kernel mode */
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index a0ea63ac2b52..06edc4366639 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -70,7 +70,11 @@ static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
hcpu = hcore << threads_shift;
kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
- icp_native_cause_ipi_rm(hcpu);
+ if (paca[hcpu].kvm_hstate.xics_phys)
+ icp_native_cause_ipi_rm(hcpu);
+ else
+ opal_rm_int_set_mfrr(get_hard_smp_processor_id(hcpu),
+ IPI_PRIORITY);
}
#else
static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
@@ -737,7 +741,7 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
unsigned long eoi_rc;
-static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
+static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
{
unsigned long xics_phys;
int64_t rc;
@@ -751,7 +755,12 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
/* EOI it */
xics_phys = local_paca->kvm_hstate.xics_phys;
- _stwcix(xics_phys + XICS_XIRR, xirr);
+ if (xics_phys) {
+ _stwcix(xics_phys + XICS_XIRR, xirr);
+ } else {
+ rc = opal_rm_int_eoi(be32_to_cpu(xirr));
+ *again = rc > 0;
+ }
}
static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
@@ -809,9 +818,10 @@ static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
}
long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
- u32 xirr,
+ __be32 xirr,
struct kvmppc_irq_map *irq_map,
- struct kvmppc_passthru_irqmap *pimap)
+ struct kvmppc_passthru_irqmap *pimap,
+ bool *again)
{
struct kvmppc_xics *xics;
struct kvmppc_icp *icp;
@@ -825,7 +835,8 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
icp_rm_deliver_irq(xics, icp, irq);
/* EOI the interrupt */
- icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
+ icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,
+ again);
if (check_too_hard(xics, icp) == H_TOO_HARD)
return 2;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c3c1d1bcfc67..9338a818e05c 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -501,17 +501,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
cmpwi r0, 0
beq 57f
li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
- mfspr r4, SPRN_LPCR
- rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
- mtspr SPRN_LPCR, r4
- isync
- std r0, HSTATE_SCRATCH0(r13)
- ptesync
- ld r0, HSTATE_SCRATCH0(r13)
-1: cmpd r0, r0
- bne 1b
- nap
- b .
+ mfspr r5, SPRN_LPCR
+ rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
+ b kvm_nap_sequence
57: li r0, 0
stbx r0, r3, r4
@@ -523,6 +515,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* *
*****************************************************************************/
+/* Stack frame offsets */
+#define STACK_SLOT_TID (112-16)
+#define STACK_SLOT_PSSCR (112-24)
+
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -581,12 +577,14 @@ kvmppc_hv_entry:
ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
cmpwi r6,0
bne 10f
- ld r6,KVM_SDR1(r9)
lwz r7,KVM_LPID(r9)
+BEGIN_FTR_SECTION
+ ld r6,KVM_SDR1(r9)
li r0,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r0
ptesync
mtspr SPRN_SDR1,r6 /* switch to partition page table */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -607,12 +605,8 @@ kvmppc_hv_entry:
stdcx. r7,0,r6
bne 23b
/* Flush the TLB of any entries for this LPID */
- /* use arch 2.07S as a proxy for POWER8 */
-BEGIN_FTR_SECTION
- li r6,512 /* POWER8 has 512 sets */
-FTR_SECTION_ELSE
- li r6,128 /* POWER7 has 128 sets */
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
+ lwz r6,KVM_TLB_SETS(r9)
+ li r0,0 /* RS for P9 version of tlbiel */
mtctr r6
li r7,0x800 /* IS field = 0b10 */
ptesync
@@ -698,6 +692,14 @@ kvmppc_got_guest:
mtspr SPRN_PURR,r7
mtspr SPRN_SPURR,r8
+ /* Save host values of some registers */
+BEGIN_FTR_SECTION
+ mfspr r5, SPRN_TIDR
+ mfspr r6, SPRN_PSSCR
+ std r5, STACK_SLOT_TID(r1)
+ std r6, STACK_SLOT_PSSCR(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
BEGIN_FTR_SECTION
/* Set partition DABR */
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -750,14 +752,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
BEGIN_FTR_SECTION
ld r5, VCPU_MMCR + 24(r4)
ld r6, VCPU_SIER(r4)
+ mtspr SPRN_MMCR2, r5
+ mtspr SPRN_SIER, r6
+BEGIN_FTR_SECTION_NESTED(96)
lwz r7, VCPU_PMC + 24(r4)
lwz r8, VCPU_PMC + 28(r4)
ld r9, VCPU_MMCR + 32(r4)
- mtspr SPRN_MMCR2, r5
- mtspr SPRN_SIER, r6
mtspr SPRN_SPMC1, r7
mtspr SPRN_SPMC2, r8
mtspr SPRN_MMCRS, r9
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_MMCR0, r3
isync
@@ -813,20 +817,30 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_EBBHR, r8
ld r5, VCPU_EBBRR(r4)
ld r6, VCPU_BESCR(r4)
- ld r7, VCPU_CSIGR(r4)
- ld r8, VCPU_TACR(r4)
+ lwz r7, VCPU_GUEST_PID(r4)
+ ld r8, VCPU_WORT(r4)
mtspr SPRN_EBBRR, r5
mtspr SPRN_BESCR, r6
- mtspr SPRN_CSIGR, r7
- mtspr SPRN_TACR, r8
+ mtspr SPRN_PID, r7
+ mtspr SPRN_WORT, r8
+BEGIN_FTR_SECTION
+ /* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
ld r6, VCPU_ACOP(r4)
- lwz r7, VCPU_GUEST_PID(r4)
- ld r8, VCPU_WORT(r4)
+ ld r7, VCPU_CSIGR(r4)
+ ld r8, VCPU_TACR(r4)
mtspr SPRN_TCSCR, r5
mtspr SPRN_ACOP, r6
- mtspr SPRN_PID, r7
- mtspr SPRN_WORT, r8
+ mtspr SPRN_CSIGR, r7
+ mtspr SPRN_TACR, r8
+FTR_SECTION_ELSE
+ /* POWER9-only registers */
+ ld r5, VCPU_TID(r4)
+ ld r6, VCPU_PSSCR(r4)
+ oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
+ mtspr SPRN_TIDR, r5
+ mtspr SPRN_PSSCR, r6
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
8:
/*
@@ -1341,20 +1355,29 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r8, VCPU_EBBHR(r9)
mfspr r5, SPRN_EBBRR
mfspr r6, SPRN_BESCR
- mfspr r7, SPRN_CSIGR
- mfspr r8, SPRN_TACR
+ mfspr r7, SPRN_PID
+ mfspr r8, SPRN_WORT
std r5, VCPU_EBBRR(r9)
std r6, VCPU_BESCR(r9)
- std r7, VCPU_CSIGR(r9)
- std r8, VCPU_TACR(r9)
+ stw r7, VCPU_GUEST_PID(r9)
+ std r8, VCPU_WORT(r9)
+BEGIN_FTR_SECTION
mfspr r5, SPRN_TCSCR
mfspr r6, SPRN_ACOP
- mfspr r7, SPRN_PID
- mfspr r8, SPRN_WORT
+ mfspr r7, SPRN_CSIGR
+ mfspr r8, SPRN_TACR
std r5, VCPU_TCSCR(r9)
std r6, VCPU_ACOP(r9)
- stw r7, VCPU_GUEST_PID(r9)
- std r8, VCPU_WORT(r9)
+ std r7, VCPU_CSIGR(r9)
+ std r8, VCPU_TACR(r9)
+FTR_SECTION_ELSE
+ mfspr r5, SPRN_TIDR
+ mfspr r6, SPRN_PSSCR
+ std r5, VCPU_TID(r9)
+ rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
+ rotldi r6, r6, 60
+ std r6, VCPU_PSSCR(r9)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
@@ -1363,12 +1386,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_IAMR, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
- mtspr SPRN_TCSCR, r0
mtspr SPRN_WORT, r0
+BEGIN_FTR_SECTION
+ mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
sldi r0, r0, 31
mtspr SPRN_MMCRS, r0
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
8:
/* Save and reset AMR and UAMOR before turning on the MMU */
@@ -1502,15 +1527,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
stw r8, VCPU_PMC + 20(r9)
BEGIN_FTR_SECTION
mfspr r5, SPRN_SIER
+ std r5, VCPU_SIER(r9)
+BEGIN_FTR_SECTION_NESTED(96)
mfspr r6, SPRN_SPMC1
mfspr r7, SPRN_SPMC2
mfspr r8, SPRN_MMCRS
- std r5, VCPU_SIER(r9)
stw r6, VCPU_PMC + 24(r9)
stw r7, VCPU_PMC + 28(r9)
std r8, VCPU_MMCR + 32(r9)
lis r4, 0x8000
mtspr SPRN_MMCRS, r4
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
22:
/* Clear out SLB */
@@ -1519,6 +1546,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
slbia
ptesync
+ /* Restore host values of some registers */
+BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_TID(r1)
+ ld r6, STACK_SLOT_PSSCR(r1)
+ mtspr SPRN_TIDR, r5
+ mtspr SPRN_PSSCR, r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
/*
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
@@ -1552,12 +1587,14 @@ kvmhv_switch_to_host:
beq 19f
/* Primary thread switches back to host partition */
- ld r6,KVM_HOST_SDR1(r4)
lwz r7,KVM_HOST_LPID(r4)
+BEGIN_FTR_SECTION
+ ld r6,KVM_HOST_SDR1(r4)
li r8,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r8
ptesync
- mtspr SPRN_SDR1,r6 /* switch to partition page table */
+ mtspr SPRN_SDR1,r6 /* switch to host page table */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -2211,6 +2248,21 @@ BEGIN_FTR_SECTION
ori r5, r5, LPCR_PECEDH
rlwimi r5, r3, 0, LPCR_PECEDP
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
+kvm_nap_sequence: /* desired LPCR value in r5 */
+BEGIN_FTR_SECTION
+ /*
+ * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
+ * enable state loss = 1 (allow SMT mode switch)
+ * requested level = 0 (just stop dispatching)
+ */
+ lis r3, (PSSCR_EC | PSSCR_ESL)@h
+ mtspr SPRN_PSSCR, r3
+ /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
+ li r4, LPCR_PECE_HVEE@higher
+ sldi r4, r4, 32
+ or r5, r5, r4
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_LPCR,r5
isync
li r0, 0
@@ -2219,7 +2271,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ld r0, HSTATE_SCRATCH0(r13)
1: cmpd r0, r0
bne 1b
+BEGIN_FTR_SECTION
nap
+FTR_SECTION_ELSE
+ PPC_STOP
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
b .
33: mr r4, r3
@@ -2600,11 +2656,13 @@ kvmppc_save_tm:
mfctr r7
mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR
+ mfxer r11
std r5, VCPU_LR_TM(r9)
stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9)
+ std r11, VCPU_XER_TM(r9)
/* Restore r12 as trap number. */
lwz r12, VCPU_TRAP(r9)
@@ -2697,11 +2755,13 @@ kvmppc_restore_tm:
ld r7, VCPU_CTR_TM(r4)
ld r8, VCPU_AMR_TM(r4)
ld r9, VCPU_TAR_TM(r4)
+ ld r10, VCPU_XER_TM(r4)
mtlr r5
mtcr r6
mtctr r7
mtspr SPRN_AMR, r8
mtspr SPRN_TAR, r9
+ mtxer r10
/*
* Load up PPR and DSCR values but don't put them in the actual SPRs
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 70963c845e96..efd1183a6b16 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -536,7 +536,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_SPAPR_TCE_64:
- case KVM_CAP_PPC_ALLOC_HTAB:
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
case KVM_CAP_PPC_ENABLE_HCALL:
@@ -545,13 +544,20 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#endif
r = 1;
break;
+
+ case KVM_CAP_PPC_ALLOC_HTAB:
+ r = hv_enabled;
+ break;
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_SMT:
- if (hv_enabled)
- r = threads_per_subcore;
- else
- r = 0;
+ r = 0;
+ if (hv_enabled) {
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ r = 1;
+ else
+ r = threads_per_subcore;
+ }
break;
case KVM_CAP_PPC_RMA:
r = 0;
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index fb21990c0fb4..ebc6dd449556 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -449,7 +449,7 @@ TRACE_EVENT(kvmppc_vcore_wakeup,
__entry->tgid = current->tgid;
),
- TP_printk("%s time %lld ns, tgid=%d",
+ TP_printk("%s time %llu ns, tgid=%d",
__entry->waited ? "wait" : "poll",
__entry->ns, __entry->tgid)
);
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 42c702b3be1f..6fa450c12d6d 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
*/
rflags = htab_convert_pte_flags(new_pte);
- if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+ if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 3bbbea07378c..1a68cb19b0e3 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -87,7 +87,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
subpg_pte = new_pte & ~subpg_prot;
rflags = htab_convert_pte_flags(subpg_pte);
- if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+ if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
/*
@@ -258,7 +258,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
rflags = htab_convert_pte_flags(new_pte);
- if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+ if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 83ddc0e171b0..ad9fd5245be2 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -221,13 +221,18 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
return -1;
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED)) {
DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
i, hpte_v, hpte_r);
}
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
+ hpte_v = hpte_old_to_new_v(hpte_v);
+ }
+
hptep->r = cpu_to_be64(hpte_r);
/* Guarantee the second dword is visible before the valid bit */
eieio();
@@ -295,6 +300,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
vpn, want_v & HPTE_V_AVPN, slot, newpp);
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -309,6 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_lock_hpte(hptep);
/* recheck with locks held */
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))) {
ret = -1;
@@ -350,6 +359,8 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + slot;
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
/* HPTE matches */
@@ -409,6 +420,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
native_lock_hpte(hptep);
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
@@ -467,6 +480,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
/* Even if we miss, we need to invalidate the TLB */
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
@@ -504,6 +519,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
/* Look at the 8 bit LP value */
unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
+ hpte_r = hpte_new_to_old_r(hpte_r);
+ }
if (!(hpte_v & HPTE_V_LARGE)) {
size = MMU_PAGE_4K;
a_size = MMU_PAGE_4K;
@@ -512,11 +531,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
a_size = hpte_page_sizes[lp] >> 4;
}
/* This works for all page sizes, and for 256M and 1T segments */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
- else
- *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
-
+ *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
shift = mmu_psize_defs[size].shift;
avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
@@ -639,6 +654,9 @@ static void native_flush_hash_range(unsigned long number, int local)
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v,
+ be64_to_cpu(hptep->r));
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 44d3c3a38e3e..8410b4bb36ed 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -193,8 +193,12 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
/*
* Kernel read only mapped with ppp bits 0b110
*/
- if (!(pteflags & _PAGE_WRITE))
- rflags |= (HPTE_R_PP0 | 0x2);
+ if (!(pteflags & _PAGE_WRITE)) {
+ if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ rflags |= (HPTE_R_PP0 | 0x2);
+ else
+ rflags |= 0x3;
+ }
} else {
if (pteflags & _PAGE_RWX)
rflags |= 0x2;
@@ -792,37 +796,17 @@ static void update_hid_for_hash(void)
static void __init hash_init_partition_table(phys_addr_t hash_table,
unsigned long htab_size)
{
- unsigned long ps_field;
- unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
+ mmu_partition_table_init();
/*
- * slb llp encoding for the page size used in VPM real mode.
- * We can ignore that for lpid 0
+ * PS field (VRMA page size) is not used for LPID 0, hence set to 0.
+ * For now, UPRT is 0 and we have no segment table.
*/
- ps_field = 0;
htab_size = __ilog2(htab_size) - 18;
-
- BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
- partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
- MEMBLOCK_ALLOC_ANYWHERE));
-
- /* Initialize the Partition Table with no entries */
- memset((void *)partition_tb, 0, patb_size);
- partition_tb->patb0 = cpu_to_be64(ps_field | hash_table | htab_size);
- /*
- * FIXME!! This should be done via update_partition table
- * For now UPRT is 0 for us.
- */
- partition_tb->patb1 = 0;
+ mmu_partition_table_set_entry(0, hash_table | htab_size, 0);
pr_info("Partition table %p\n", partition_tb);
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
update_hid_for_hash();
- /*
- * update partition table control register,
- * 64 K size.
- */
- mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
-
}
static void __init htab_initialize(void)
@@ -1029,6 +1013,10 @@ void hash__early_init_mmu_secondary(void)
{
/* Initialize hash table for that CPU */
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ update_hid_for_hash();
+
if (!cpu_has_feature(CPU_FTR_ARCH_300))
mtspr(SPRN_SDR1, _SDR1);
else
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index a51c188b81f3..0cb6bd8bfccf 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1085,7 +1085,7 @@ static int hot_add_node_scn_to_nid(unsigned long scn_addr)
int hot_add_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory = NULL;
- int nid, found = 0;
+ int nid;
if (!numa_enabled || (min_common_depth < 0))
return first_online_node;
@@ -1101,17 +1101,6 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
if (nid < 0 || !node_online(nid))
nid = first_online_node;
- if (NODE_DATA(nid)->node_spanned_pages)
- return nid;
-
- for_each_online_node(nid) {
- if (NODE_DATA(nid)->node_spanned_pages) {
- found = 1;
- break;
- }
- }
-
- BUG_ON(!found);
return nid;
}
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index ed7bddc456b7..8d941c692eb3 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -177,23 +177,15 @@ redo:
static void __init radix_init_partition_table(void)
{
- unsigned long rts_field;
+ unsigned long rts_field, dw0;
+ mmu_partition_table_init();
rts_field = radix__get_tree_size();
+ dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
+ mmu_partition_table_set_entry(0, dw0, 0);
- BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
- partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
- partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) |
- RADIX_PGD_INDEX_SIZE | PATB_HR);
pr_info("Initializing Radix MMU\n");
pr_info("Partition table %p\n", partition_tb);
-
- memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
- /*
- * update partition table control register,
- * 64 K size.
- */
- mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
}
void __init radix_init_native(void)
@@ -378,6 +370,8 @@ void __init radix__early_init_mmu(void)
radix_init_partition_table();
}
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+
radix_init_pgtable();
}
@@ -388,6 +382,10 @@ void radix__early_init_mmu_secondary(void)
* update partition table control register and UPRT
*/
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ update_hid_for_radix();
+
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index f5e8d4edb808..8bca7f58afc4 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -431,3 +431,37 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
}
}
#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void __init mmu_partition_table_init(void)
+{
+ unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
+
+ BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
+ partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
+ MEMBLOCK_ALLOC_ANYWHERE));
+
+ /* Initialize the Partition Table with no entries */
+ memset((void *)partition_tb, 0, patb_size);
+
+ /*
+ * update partition table control register,
+ * 64 K size.
+ */
+ mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+}
+
+void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
+ unsigned long dw1)
+{
+ partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+ partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+
+ /* Global flush of TLBs and partition table caches for this lpid */
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
+ "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
+EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
+#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index bda8c43be78a..3493cf4e0452 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -50,6 +50,8 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
__tlbiel_pid(pid, set, ric);
}
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
return;
}
@@ -83,6 +85,8 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory");
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}
static inline void _tlbie_va(unsigned long va, unsigned long pid,
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 0fe98a567125..73a5cf18fd84 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -766,7 +766,7 @@ emit_clear:
func = (u8 *) __bpf_call_base + imm;
/* Save skb pointer if we need to re-cache skb data */
- if (bpf_helper_changes_skb_data(func))
+ if (bpf_helper_changes_pkt_data(func))
PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
bpf_jit_emit_func_call(image, ctx, (u64)func);
@@ -775,7 +775,7 @@ emit_clear:
PPC_MR(b2p[BPF_REG_0], 3);
/* refresh skb cache */
- if (bpf_helper_changes_skb_data(func)) {
+ if (bpf_helper_changes_pkt_data(func)) {
/* reload skb pointer to r3 */
PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
bpf_jit_emit_skb_loads(image, ctx);
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index e3257f24a8a1..1d7c1b142bf4 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -64,6 +64,7 @@ config XILINX_VIRTEX_GENERIC_BOARD
default n
select XILINX_VIRTEX_II_PRO
select XILINX_VIRTEX_4_FX
+ select XILINX_INTC
help
This option enables generic support for Xilinx Virtex based boards.
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
index 91a08ea758a8..e3d5e095846b 100644
--- a/arch/powerpc/platforms/40x/virtex.c
+++ b/arch/powerpc/platforms/40x/virtex.c
@@ -48,7 +48,7 @@ define_machine(virtex) {
.probe = virtex_probe,
.setup_arch = xilinx_pci_init,
.init_IRQ = xilinx_intc_init_tree,
- .get_irq = xilinx_intc_get_irq,
+ .get_irq = xintc_get_irq,
.restart = ppc4xx_reset_system,
.calibrate_decr = generic_calibrate_decr,
};
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 48fc18041ff6..25b8d641ff9f 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -241,6 +241,7 @@ config XILINX_VIRTEX440_GENERIC_BOARD
depends on 44x
default n
select XILINX_VIRTEX_5_FXT
+ select XILINX_INTC
help
This option enables generic support for Xilinx Virtex based boards
that use a 440 based processor in the Virtex 5 FXT FPGA architecture.
diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c
index a7e08026097a..3eb13ed926ee 100644
--- a/arch/powerpc/platforms/44x/virtex.c
+++ b/arch/powerpc/platforms/44x/virtex.c
@@ -54,7 +54,7 @@ define_machine(virtex) {
.probe = virtex_probe,
.setup_arch = xilinx_pci_init,
.init_IRQ = xilinx_intc_init_tree,
- .get_irq = xilinx_intc_get_irq,
+ .get_irq = xintc_get_irq,
.calibrate_decr = generic_calibrate_decr,
.restart = ppc4xx_reset_system,
};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 06254467e4dd..3a147122bc98 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -236,7 +236,6 @@ static int
spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct spu_context *ctx = vma->vm_file->private_data;
- unsigned long address = (unsigned long)vmf->virtual_address;
unsigned long pfn, offset;
offset = vmf->pgoff << PAGE_SHIFT;
@@ -244,7 +243,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
- address, offset);
+ vmf->address, offset);
if (spu_acquire(ctx))
return VM_FAULT_NOPAGE;
@@ -256,7 +255,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
}
- vm_insert_pfn(vma, address, pfn);
+ vm_insert_pfn(vma, vmf->address, pfn);
spu_release(ctx);
@@ -355,8 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
down_read(&current->mm->mmap_sem);
} else {
area = ctx->spu->problem_phys + ps_offs;
- vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
- (area + offset) >> PAGE_SHIFT);
+ vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 44d2d842cee7..3aa40f1b20f5 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -304,8 +304,11 @@ OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE);
OPAL_CALL(opal_pci_get_power_state, OPAL_PCI_GET_POWER_STATE);
OPAL_CALL(opal_pci_set_power_state, OPAL_PCI_SET_POWER_STATE);
OPAL_CALL(opal_int_get_xirr, OPAL_INT_GET_XIRR);
+OPAL_CALL_REAL(opal_rm_int_get_xirr, OPAL_INT_GET_XIRR);
OPAL_CALL(opal_int_set_cppr, OPAL_INT_SET_CPPR);
OPAL_CALL(opal_int_eoi, OPAL_INT_EOI);
+OPAL_CALL_REAL(opal_rm_int_eoi, OPAL_INT_EOI);
OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR);
+OPAL_CALL_REAL(opal_rm_int_set_mfrr, OPAL_INT_SET_MFRR);
OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL);
OPAL_CALL_REAL(opal_rm_pci_tce_kill, OPAL_PCI_TCE_KILL);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 6c9a65b52e63..b3b8930ac52f 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -896,3 +896,5 @@ EXPORT_SYMBOL_GPL(opal_leds_get_ind);
EXPORT_SYMBOL_GPL(opal_leds_set_ind);
/* Export this symbol for PowerNV Operator Panel class driver */
EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
+/* Export this for KVM */
+EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index cb3c50328de8..cc2b281a3766 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -63,7 +63,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
vflags &= ~HPTE_V_SECONDARY;
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize, ssize) | rflags;
+ hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize) | rflags;
spin_lock_irqsave(&ps3_htab_lock, flags);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index aa35245d8d6d..f2c98f6c1c9c 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -145,7 +145,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
hpte_group, vpn, pa, rflags, vflags, psize);
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 0f52d7955796..4a86dcff3fcd 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -29,194 +29,7 @@
#include <asm/processor.h>
#include <asm/i8259.h>
#include <asm/irq.h>
-
-/*
- * INTC Registers
- */
-#define XINTC_ISR 0 /* Interrupt Status */
-#define XINTC_IPR 4 /* Interrupt Pending */
-#define XINTC_IER 8 /* Interrupt Enable */
-#define XINTC_IAR 12 /* Interrupt Acknowledge */
-#define XINTC_SIE 16 /* Set Interrupt Enable bits */
-#define XINTC_CIE 20 /* Clear Interrupt Enable bits */
-#define XINTC_IVR 24 /* Interrupt Vector */
-#define XINTC_MER 28 /* Master Enable */
-
-static struct irq_domain *master_irqhost;
-
-#define XILINX_INTC_MAXIRQS (32)
-
-/* The following table allows the interrupt type, edge or level,
- * to be cached after being read from the device tree until the interrupt
- * is mapped
- */
-static int xilinx_intc_typetable[XILINX_INTC_MAXIRQS];
-
-/* Map the interrupt type from the device tree to the interrupt types
- * used by the interrupt subsystem
- */
-static unsigned char xilinx_intc_map_senses[] = {
- IRQ_TYPE_EDGE_RISING,
- IRQ_TYPE_EDGE_FALLING,
- IRQ_TYPE_LEVEL_HIGH,
- IRQ_TYPE_LEVEL_LOW,
-};
-
-/*
- * The interrupt controller is setup such that it doesn't work well with
- * the level interrupt handler in the kernel because the handler acks the
- * interrupt before calling the application interrupt handler. To deal with
- * that, we use 2 different irq chips so that different functions can be
- * used for level and edge type interrupts.
- *
- * IRQ Chip common (across level and edge) operations
- */
-static void xilinx_intc_mask(struct irq_data *d)
-{
- int irq = irqd_to_hwirq(d);
- void * regs = irq_data_get_irq_chip_data(d);
- pr_debug("mask: %d\n", irq);
- out_be32(regs + XINTC_CIE, 1 << irq);
-}
-
-static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)
-{
- return 0;
-}
-
-/*
- * IRQ Chip level operations
- */
-static void xilinx_intc_level_unmask(struct irq_data *d)
-{
- int irq = irqd_to_hwirq(d);
- void * regs = irq_data_get_irq_chip_data(d);
- pr_debug("unmask: %d\n", irq);
- out_be32(regs + XINTC_SIE, 1 << irq);
-
- /* ack level irqs because they can't be acked during
- * ack function since the handle_level_irq function
- * acks the irq before calling the inerrupt handler
- */
- out_be32(regs + XINTC_IAR, 1 << irq);
-}
-
-static struct irq_chip xilinx_intc_level_irqchip = {
- .name = "Xilinx Level INTC",
- .irq_mask = xilinx_intc_mask,
- .irq_mask_ack = xilinx_intc_mask,
- .irq_unmask = xilinx_intc_level_unmask,
- .irq_set_type = xilinx_intc_set_type,
-};
-
-/*
- * IRQ Chip edge operations
- */
-static void xilinx_intc_edge_unmask(struct irq_data *d)
-{
- int irq = irqd_to_hwirq(d);
- void *regs = irq_data_get_irq_chip_data(d);
- pr_debug("unmask: %d\n", irq);
- out_be32(regs + XINTC_SIE, 1 << irq);
-}
-
-static void xilinx_intc_edge_ack(struct irq_data *d)
-{
- int irq = irqd_to_hwirq(d);
- void * regs = irq_data_get_irq_chip_data(d);
- pr_debug("ack: %d\n", irq);
- out_be32(regs + XINTC_IAR, 1 << irq);
-}
-
-static struct irq_chip xilinx_intc_edge_irqchip = {
- .name = "Xilinx Edge INTC",
- .irq_mask = xilinx_intc_mask,
- .irq_unmask = xilinx_intc_edge_unmask,
- .irq_ack = xilinx_intc_edge_ack,
- .irq_set_type = xilinx_intc_set_type,
-};
-
-/*
- * IRQ Host operations
- */
-
-/**
- * xilinx_intc_xlate - translate virq# from device tree interrupts property
- */
-static int xilinx_intc_xlate(struct irq_domain *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq,
- unsigned int *out_flags)
-{
- if ((intsize < 2) || (intspec[0] >= XILINX_INTC_MAXIRQS))
- return -EINVAL;
-
- /* keep a copy of the interrupt type til the interrupt is mapped
- */
- xilinx_intc_typetable[intspec[0]] = xilinx_intc_map_senses[intspec[1]];
-
- /* Xilinx uses 2 interrupt entries, the 1st being the h/w
- * interrupt number, the 2nd being the interrupt type, edge or level
- */
- *out_hwirq = intspec[0];
- *out_flags = xilinx_intc_map_senses[intspec[1]];
-
- return 0;
-}
-static int xilinx_intc_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t irq)
-{
- irq_set_chip_data(virq, h->host_data);
-
- if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH ||
- xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) {
- irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip,
- handle_level_irq);
- } else {
- irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip,
- handle_edge_irq);
- }
- return 0;
-}
-
-static const struct irq_domain_ops xilinx_intc_ops = {
- .map = xilinx_intc_map,
- .xlate = xilinx_intc_xlate,
-};
-
-struct irq_domain * __init
-xilinx_intc_init(struct device_node *np)
-{
- struct irq_domain * irq;
- void * regs;
-
- /* Find and map the intc registers */
- regs = of_iomap(np, 0);
- if (!regs) {
- pr_err("xilinx_intc: could not map registers\n");
- return NULL;
- }
-
- /* Setup interrupt controller */
- out_be32(regs + XINTC_IER, 0); /* disable all irqs */
- out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */
- out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */
-
- /* Allocate and initialize an irq_domain structure. */
- irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops,
- regs);
- if (!irq)
- panic(__FILE__ ": Cannot allocate IRQ host\n");
-
- return irq;
-}
-
-int xilinx_intc_get_irq(void)
-{
- void * regs = master_irqhost->host_data;
- pr_debug("get_irq:\n");
- return irq_linear_revmap(master_irqhost, in_be32(regs + XINTC_IVR));
-}
+#include <linux/irqchip.h>
#if defined(CONFIG_PPC_I8259)
/*
@@ -265,31 +78,11 @@ static void __init xilinx_i8259_setup_cascade(void)
static inline void xilinx_i8259_setup_cascade(void) { return; }
#endif /* defined(CONFIG_PPC_I8259) */
-static const struct of_device_id xilinx_intc_match[] __initconst = {
- { .compatible = "xlnx,opb-intc-1.00.c", },
- { .compatible = "xlnx,xps-intc-1.00.a", },
- {}
-};
-
/*
* Initialize master Xilinx interrupt controller
*/
void __init xilinx_intc_init_tree(void)
{
- struct device_node *np;
-
- /* find top level interrupt controller */
- for_each_matching_node(np, xilinx_intc_match) {
- if (!of_get_property(np, "interrupts", NULL))
- break;
- }
- BUG_ON(!np);
-
- master_irqhost = xilinx_intc_init(np);
- BUG_ON(!master_irqhost);
-
- irq_set_default_host(master_irqhost);
- of_node_put(np);
-
+ irqchip_init();
xilinx_i8259_setup_cascade();
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 426481d4cc86..c6722112527d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -136,6 +136,7 @@ config S390
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -169,8 +170,10 @@ config S390
select OLD_SIGSUSPEND3
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
select TTY
select VIRT_CPU_ACCOUNTING
+ select ARCH_HAS_SCALED_CPUTIME
select VIRT_TO_BUS
select HAVE_NMI
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 0daa070d6c9d..6bd2c9022be3 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,7 +10,7 @@ targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += misc.o piggy.o sizes.h head.o
-KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
+KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index 28c4f96a2d9c..11f6254c561e 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -46,7 +46,7 @@ mover_end:
.align 8
.Lstack:
- .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
+ .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.Loffset:
.quad 0x11000
.Lmvsize:
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 45968686f918..e659daffe368 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -66,6 +66,8 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
+CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZPOOL=m
CONFIG_ZBUD=m
@@ -366,6 +368,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -438,7 +442,6 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -693,3 +696,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 1dd05e345c4d..95ceac50bc65 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -434,7 +436,6 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -633,3 +634,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 29d1178666f0..bc7b176f5795 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -434,7 +436,6 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -632,3 +633,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 9cc050f9536c..1113389d0a39 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -507,8 +507,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
prng_data->prngws.byte_counter += n;
prng_data->prngws.reseed_counter += n;
- if (copy_to_user(ubuf, prng_data->buf, chunk))
- return -EFAULT;
+ if (copy_to_user(ubuf, prng_data->buf, chunk)) {
+ ret = -EFAULT;
+ break;
+ }
nbytes -= chunk;
ret += chunk;
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 28f03ca60100..794bebb43d23 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -363,11 +363,11 @@ out:
static int diag224_get_name_table(void)
{
/* memory must be below 2GB */
- diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_cpu_names)
return -ENOMEM;
if (diag224(diag224_cpu_names)) {
- kfree(diag224_cpu_names);
+ free_page((unsigned long) diag224_cpu_names);
return -EOPNOTSUPP;
}
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
@@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
static void diag224_delete_name_table(void)
{
- kfree(diag224_cpu_names);
+ free_page((unsigned long) diag224_cpu_names);
}
static int diag224_idx2name(int index, char *name)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 09bccb224d03..cf8a2d92467f 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -3,6 +3,7 @@
*
* Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ * License: GPL
*/
#define KMSG_COMPONENT "hypfs"
@@ -18,7 +19,8 @@
#include <linux/time.h>
#include <linux/parser.h>
#include <linux/sysfs.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/uio.h>
@@ -443,7 +445,6 @@ static struct file_system_type hypfs_type = {
.mount = hypfs_mount,
.kill_sb = hypfs_kill_super
};
-MODULE_ALIAS_FS("s390_hypfs");
static const struct super_operations hypfs_s_ops = {
.statfs = simple_statfs,
@@ -497,21 +498,4 @@ fail_dbfs_exit:
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc;
}
-
-static void __exit hypfs_exit(void)
-{
- unregister_filesystem(&hypfs_type);
- sysfs_remove_mount_point(hypervisor_kobj, "s390");
- hypfs_diag0c_exit();
- hypfs_sprp_exit();
- hypfs_vm_exit();
- hypfs_diag_exit();
- hypfs_dbfs_exit();
-}
-
-module_init(hypfs_init)
-module_exit(hypfs_exit)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>");
-MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
+device_initcall(hypfs_init)
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 20f196b82a6e..8aea32fe8bd2 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,6 @@
-
-
+generic-y += asm-offsets.h
generic-y += clkdev.h
+generic-y += dma-contiguous.h
generic-y += export.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
diff --git a/arch/s390/include/asm/asm-offsets.h b/arch/s390/include/asm/asm-offsets.h
deleted file mode 100644
index d370ee36a182..000000000000
--- a/arch/s390/include/asm/asm-offsets.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index d28cc2f5b7b2..f7f69dfd2db2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,13 +1,8 @@
/*
- * Copyright IBM Corp. 1999, 2009
+ * Copyright IBM Corp. 1999, 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow,
- * Arnd Bergmann <arndb@de.ibm.com>,
- *
- * Atomic operations that C can't guarantee us.
- * Useful for resource counting etc.
- * s390 uses 'Compare And Swap' for atomicity in SMP environment.
- *
+ * Arnd Bergmann,
*/
#ifndef __ARCH_S390_ATOMIC__
@@ -15,62 +10,12 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
-#define __ATOMIC_NO_BARRIER "\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC_OR "lao"
-#define __ATOMIC_AND "lan"
-#define __ATOMIC_ADD "laa"
-#define __ATOMIC_XOR "lax"
-#define __ATOMIC_BARRIER "bcr 14,0\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- int old_val; \
- \
- typecheck(atomic_t *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- __barrier \
- : "=d" (old_val), "+Q" ((ptr)->counter) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC_OR "or"
-#define __ATOMIC_AND "nr"
-#define __ATOMIC_ADD "ar"
-#define __ATOMIC_XOR "xr"
-#define __ATOMIC_BARRIER "\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- int old_val, new_val; \
- \
- typecheck(atomic_t *, ptr); \
- asm volatile( \
- " l %0,%2\n" \
- "0: lr %1,%0\n" \
- op_string " %1,%3\n" \
- " cs %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
static inline int atomic_read(const atomic_t *v)
{
int c;
@@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v)
{
- return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
+ return __atomic_add_barrier(i, &v->counter) + i;
}
static inline int atomic_fetch_add(int i, atomic_t *v)
{
- return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
+ return __atomic_add_barrier(i, &v->counter);
}
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- asm volatile(
- "asi %0,%1\n"
- : "+Q" (v->counter)
- : "i" (i)
- : "cc", "memory");
+ __atomic_add_const(i, &v->counter);
return;
}
#endif
- __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
+ __atomic_add(i, &v->counter);
}
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
@@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-#define ATOMIC_OPS(op, OP) \
+#define ATOMIC_OPS(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
+ __atomic_##op(i, &v->counter); \
} \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
- return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
+ return __atomic_##op##_barrier(i, &v->counter); \
}
-ATOMIC_OPS(and, AND)
-ATOMIC_OPS(or, OR)
-ATOMIC_OPS(xor, XOR)
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
#undef ATOMIC_OPS
@@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
- asm volatile(
- " cs %0,%2,%1"
- : "+d" (old), "+Q" (v->counter)
- : "d" (new)
- : "cc", "memory");
- return old;
+ return __atomic_cmpxchg(&v->counter, old, new);
}
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
@@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-
-#undef __ATOMIC_LOOP
-
#define ATOMIC64_INIT(i) { (i) }
-#define __ATOMIC64_NO_BARRIER "\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC64_OR "laog"
-#define __ATOMIC64_AND "lang"
-#define __ATOMIC64_ADD "laag"
-#define __ATOMIC64_XOR "laxg"
-#define __ATOMIC64_BARRIER "bcr 14,0\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- long long old_val; \
- \
- typecheck(atomic64_t *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- __barrier \
- : "=d" (old_val), "+Q" ((ptr)->counter) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC64_OR "ogr"
-#define __ATOMIC64_AND "ngr"
-#define __ATOMIC64_ADD "agr"
-#define __ATOMIC64_XOR "xgr"
-#define __ATOMIC64_BARRIER "\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
-({ \
- long long old_val, new_val; \
- \
- typecheck(atomic64_t *, ptr); \
- asm volatile( \
- " lg %0,%2\n" \
- "0: lgr %1,%0\n" \
- op_string " %1,%3\n" \
- " csg %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-static inline long long atomic64_read(const atomic64_t *v)
+static inline long atomic64_read(const atomic64_t *v)
{
- long long c;
+ long c;
asm volatile(
" lg %0,%1\n"
@@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
return c;
}
-static inline void atomic64_set(atomic64_t *v, long long i)
+static inline void atomic64_set(atomic64_t *v, long i)
{
asm volatile(
" stg %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
+static inline long atomic64_add_return(long i, atomic64_t *v)
{
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
+ return __atomic64_add_barrier(i, &v->counter) + i;
}
-static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+static inline long atomic64_fetch_add(long i, atomic64_t *v)
{
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
+ return __atomic64_add_barrier(i, &v->counter);
}
-static inline void atomic64_add(long long i, atomic64_t *v)
+static inline void atomic64_add(long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- asm volatile(
- "agsi %0,%1\n"
- : "+Q" (v->counter)
- : "i" (i)
- : "cc", "memory");
+ __atomic64_add_const(i, &v->counter);
return;
}
#endif
- __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
+ __atomic64_add(i, &v->counter);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static inline long long atomic64_cmpxchg(atomic64_t *v,
- long long old, long long new)
+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
- asm volatile(
- " csg %0,%2,%1"
- : "+d" (old), "+Q" (v->counter)
- : "d" (new)
- : "cc", "memory");
- return old;
+ return __atomic64_cmpxchg(&v->counter, old, new);
}
-#define ATOMIC64_OPS(op, OP) \
+#define ATOMIC64_OPS(op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
- __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
+ __atomic64_##op(i, &v->counter); \
} \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
+ return __atomic64_##op##_barrier(i, &v->counter); \
}
-ATOMIC64_OPS(and, AND)
-ATOMIC64_OPS(or, OR)
-ATOMIC64_OPS(xor, XOR)
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
-#undef __ATOMIC64_LOOP
-static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
{
- long long c, old;
+ long c, old;
c = atomic64_read(v);
for (;;) {
@@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
return c != u;
}
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
+static inline long atomic64_dec_if_positive(atomic64_t *v)
{
- long long c, old, dec;
+ long c, old, dec;
c = atomic64_read(v);
for (;;) {
@@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_inc(_v) atomic64_add(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
-#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
-#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
+#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
+#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
new file mode 100644
index 000000000000..ac9e2b939d04
--- /dev/null
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -0,0 +1,130 @@
+/*
+ * Low level function for atomic operations
+ *
+ * Copyright IBM Corp. 1999, 2016
+ */
+
+#ifndef __ARCH_S390_ATOMIC_OPS__
+#define __ARCH_S390_ATOMIC_OPS__
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
+static inline op_type op_name(op_type val, op_type *ptr) \
+{ \
+ op_type old; \
+ \
+ asm volatile( \
+ op_string " %[old],%[val],%[ptr]\n" \
+ op_barrier \
+ : [old] "=d" (old), [ptr] "+Q" (*ptr) \
+ : [val] "d" (val) : "cc", "memory"); \
+ return old; \
+} \
+
+#define __ATOMIC_OPS(op_name, op_type, op_string) \
+ __ATOMIC_OP(op_name, op_type, op_string, "\n") \
+ __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+
+__ATOMIC_OPS(__atomic_add, int, "laa")
+__ATOMIC_OPS(__atomic_and, int, "lan")
+__ATOMIC_OPS(__atomic_or, int, "lao")
+__ATOMIC_OPS(__atomic_xor, int, "lax")
+
+__ATOMIC_OPS(__atomic64_add, long, "laag")
+__ATOMIC_OPS(__atomic64_and, long, "lang")
+__ATOMIC_OPS(__atomic64_or, long, "laog")
+__ATOMIC_OPS(__atomic64_xor, long, "laxg")
+
+#undef __ATOMIC_OPS
+#undef __ATOMIC_OP
+
+static inline void __atomic_add_const(int val, int *ptr)
+{
+ asm volatile(
+ " asi %[ptr],%[val]\n"
+ : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+static inline void __atomic64_add_const(long val, long *ptr)
+{
+ asm volatile(
+ " agsi %[ptr],%[val]\n"
+ : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OP(op_name, op_string) \
+static inline int op_name(int val, int *ptr) \
+{ \
+ int old, new; \
+ \
+ asm volatile( \
+ "0: lr %[new],%[old]\n" \
+ op_string " %[new],%[val]\n" \
+ " cs %[old],%[new],%[ptr]\n" \
+ " jl 0b" \
+ : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+ : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
+ return old; \
+}
+
+#define __ATOMIC_OPS(op_name, op_string) \
+ __ATOMIC_OP(op_name, op_string) \
+ __ATOMIC_OP(op_name##_barrier, op_string)
+
+__ATOMIC_OPS(__atomic_add, "ar")
+__ATOMIC_OPS(__atomic_and, "nr")
+__ATOMIC_OPS(__atomic_or, "or")
+__ATOMIC_OPS(__atomic_xor, "xr")
+
+#undef __ATOMIC_OPS
+
+#define __ATOMIC64_OP(op_name, op_string) \
+static inline long op_name(long val, long *ptr) \
+{ \
+ long old, new; \
+ \
+ asm volatile( \
+ "0: lgr %[new],%[old]\n" \
+ op_string " %[new],%[val]\n" \
+ " csg %[old],%[new],%[ptr]\n" \
+ " jl 0b" \
+ : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+ : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
+ return old; \
+}
+
+#define __ATOMIC64_OPS(op_name, op_string) \
+ __ATOMIC64_OP(op_name, op_string) \
+ __ATOMIC64_OP(op_name##_barrier, op_string)
+
+__ATOMIC64_OPS(__atomic64_add, "agr")
+__ATOMIC64_OPS(__atomic64_and, "ngr")
+__ATOMIC64_OPS(__atomic64_or, "ogr")
+__ATOMIC64_OPS(__atomic64_xor, "xgr")
+
+#undef __ATOMIC64_OPS
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+static inline int __atomic_cmpxchg(int *ptr, int old, int new)
+{
+ asm volatile(
+ " cs %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new) : "cc", "memory");
+ return old;
+}
+
+static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
+{
+ asm volatile(
+ " csg %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new) : "cc", "memory");
+ return old;
+}
+
+#endif /* __ARCH_S390_ATOMIC_OPS__ */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 8043f10da6b5..d92047da5ccb 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -42,57 +42,9 @@
#include <linux/typecheck.h>
#include <linux/compiler.h>
+#include <asm/atomic_ops.h>
#include <asm/barrier.h>
-#define __BITOPS_NO_BARRIER "\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __BITOPS_OR "laog"
-#define __BITOPS_AND "lang"
-#define __BITOPS_XOR "laxg"
-#define __BITOPS_BARRIER "bcr 14,0\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
-({ \
- unsigned long __old; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- __op_string " %0,%2,%1\n" \
- __barrier \
- : "=d" (__old), "+Q" (*(__addr)) \
- : "d" (__val) \
- : "cc", "memory"); \
- __old; \
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __BITOPS_OR "ogr"
-#define __BITOPS_AND "ngr"
-#define __BITOPS_XOR "xgr"
-#define __BITOPS_BARRIER "\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
-({ \
- unsigned long __old, __new; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- " lg %0,%2\n" \
- "0: lgr %1,%0\n" \
- __op_string " %1,%3\n" \
- " csg %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
- : "d" (__val) \
- : "cc", "memory"); \
- __old; \
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
@@ -128,7 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
+ __atomic64_or(mask, addr);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -149,7 +101,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
+ __atomic64_and(mask, addr);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -170,7 +122,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
+ __atomic64_xor(mask, addr);
}
static inline int
@@ -180,7 +132,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
+ old = __atomic64_or_barrier(mask, addr);
return (old & mask) != 0;
}
@@ -191,7 +143,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
+ old = __atomic64_and_barrier(mask, addr);
return (old & ~mask) != 0;
}
@@ -202,7 +154,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
+ old = __atomic64_xor_barrier(mask, addr);
return (old & mask) != 0;
}
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 03516476127b..b69d8bc231a5 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -104,7 +104,8 @@ struct hws_basic_entry {
unsigned int P:1; /* 28 PSW Problem state */
unsigned int AS:2; /* 29-30 PSW address-space control */
unsigned int I:1; /* 31 entry valid or invalid */
- unsigned int:16;
+ unsigned int CL:2; /* 32-33 Configuration Level */
+ unsigned int:14;
unsigned int prim_asn:16; /* primary ASN */
unsigned long long ia; /* Instruction Address */
unsigned long long gpp; /* Guest Program Parameter */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1736c7d3c94c..f4381e1fb19e 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -193,7 +193,7 @@ extern char elf_platform[];
do { \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
- current_thread_info()->sys_call_table = \
+ current->thread.sys_call_table = \
(unsigned long) &sys_call_table; \
} while (0)
#else /* CONFIG_COMPAT */
@@ -204,11 +204,11 @@ do { \
(current->personality & ~PER_MASK)); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
- current_thread_info()->sys_call_table = \
+ current->thread.sys_call_table = \
(unsigned long) &sys_call_table_emu; \
} else { \
clear_thread_flag(TIF_31BIT); \
- current_thread_info()->sys_call_table = \
+ current->thread.sys_call_table = \
(unsigned long) &sys_call_table; \
} \
} while (0)
diff --git a/arch/s390/include/asm/facilities_src.h b/arch/s390/include/asm/facilities_src.h
deleted file mode 100644
index 3b758f66e48b..000000000000
--- a/arch/s390/include/asm/facilities_src.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright IBM Corp. 2015
- */
-
-#ifndef S390_GEN_FACILITIES_C
-#error "This file can only be included by gen_facilities.c"
-#endif
-
-#include <linux/kconfig.h>
-
-struct facility_def {
- char *name;
- int *bits;
-};
-
-static struct facility_def facility_defs[] = {
- {
- /*
- * FACILITIES_ALS contains the list of facilities that are
- * required to run a kernel that is compiled e.g. with
- * -march=<machine>.
- */
- .name = "FACILITIES_ALS",
- .bits = (int[]){
-#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
- 0, /* N3 instructions */
- 1, /* z/Arch mode installed */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
- 18, /* long displacement facility */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- 7, /* stfle */
- 17, /* message security assist */
- 21, /* extended-immediate facility */
- 25, /* store clock fast */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
- 27, /* mvcos */
- 32, /* compare and swap and store */
- 33, /* compare and swap and store 2 */
- 34, /* general extension facility */
- 35, /* execute extensions */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- 45, /* fast-BCR, etc. */
-#endif
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- 49, /* misc-instruction-extensions */
- 52, /* interlocked facility 2 */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
- 53, /* load-and-zero-rightmost-byte, etc. */
-#endif
- -1 /* END */
- }
- },
- {
- .name = "FACILITIES_KVM",
- .bits = (int[]){
- 0, /* N3 instructions */
- 1, /* z/Arch mode installed */
- 2, /* z/Arch mode active */
- 3, /* DAT-enhancement */
- 4, /* idte segment table */
- 5, /* idte region table */
- 6, /* ASN-and-LX reuse */
- 7, /* stfle */
- 8, /* enhanced-DAT 1 */
- 9, /* sense-running-status */
- 10, /* conditional sske */
- 13, /* ipte-range */
- 14, /* nonquiescing key-setting */
- 73, /* transactional execution */
- 75, /* access-exception-fetch/store indication */
- 76, /* msa extension 3 */
- 77, /* msa extension 4 */
- 78, /* enhanced-DAT 2 */
- -1 /* END */
- }
- },
-};
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 4da22b2f0521..edb5161df7e2 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -97,7 +97,7 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
extern void do_reipl(void);
extern void do_halt(void);
extern void do_poff(void);
-extern void ipl_save_parameters(void);
+extern void ipl_verify_parameters(void);
extern void ipl_update_parameters(void);
extern size_t append_ipl_vmparm(char *, size_t);
extern size_t append_ipl_scpdata(char *, size_t);
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 7b93b78f423c..9bfad2ad6312 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -95,7 +95,7 @@ struct lowcore {
/* Current process. */
__u64 current_task; /* 0x0310 */
- __u64 thread_info; /* 0x0318 */
+ __u8 pad_0x318[0x320-0x318]; /* 0x0318 */
__u64 kernel_stack; /* 0x0320 */
/* Interrupt, panic and restart stack. */
@@ -126,7 +126,8 @@ struct lowcore {
__u64 percpu_offset; /* 0x0378 */
__u64 vdso_per_cpu_data; /* 0x0380 */
__u64 machine_flags; /* 0x0388 */
- __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
+ __u32 preempt_count; /* 0x0390 */
+ __u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */
__u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */
__u32 fpu_flags; /* 0x03a4 */
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/s390/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index e75c64cbcf08..c232ef9711f5 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -46,6 +46,8 @@ struct clp_fh_list_entry {
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
+extern bool zpci_unique_uid;
+
/* List PCI functions request */
struct clp_req_list_pci {
struct clp_req_hdr hdr;
@@ -59,7 +61,8 @@ struct clp_rsp_list_pci {
u64 resume_token;
u32 reserved2;
u16 max_fn;
- u8 reserved3;
+ u8 : 7;
+ u8 uid_checking : 1;
u8 entry_size;
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
} __packed;
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f4eb9843eed4..166f703dad7c 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -27,17 +27,17 @@ extern int page_table_allocate_pgste;
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
- typedef struct { char _[n]; } addrtype;
-
- *s = val;
- n = (n / 256) - 1;
- asm volatile(
- " mvc 8(248,%0),0(%0)\n"
- "0: mvc 256(256,%0),0(%0)\n"
- " la %0,256(%0)\n"
- " brct %1,0b\n"
- : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
- : "m" (*(addrtype *) s));
+ struct addrtype { char _[256]; };
+ int i;
+
+ for (i = 0; i < n; i += 256) {
+ *s = val;
+ asm volatile(
+ "mvc 8(248,%[s]),0(%[s])\n"
+ : "+m" (*(struct addrtype *) s)
+ : [s] "a" (s));
+ s += 256 / sizeof(long);
+ }
}
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
new file mode 100644
index 000000000000..b0776b2c8dcf
--- /dev/null
+++ b/arch/s390/include/asm/preempt.h
@@ -0,0 +1,137 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <asm/current.h>
+#include <linux/thread_info.h>
+#include <asm/atomic_ops.h>
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
+
+static inline int preempt_count(void)
+{
+ return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
+}
+
+static inline void preempt_count_set(int pc)
+{
+ int old, new;
+
+ do {
+ old = READ_ONCE(S390_lowcore.preempt_count);
+ new = (old & PREEMPT_NEED_RESCHED) |
+ (pc & ~PREEMPT_NEED_RESCHED);
+ } while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
+ old, new) != old);
+}
+
+#define init_task_preempt_count(p) do { } while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static inline void set_preempt_need_resched(void)
+{
+ __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+}
+
+static inline void clear_preempt_need_resched(void)
+{
+ __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+}
+
+static inline bool test_preempt_need_resched(void)
+{
+ return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
+}
+
+static inline void __preempt_count_add(int val)
+{
+ if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
+ __atomic_add_const(val, &S390_lowcore.preempt_count);
+ else
+ __atomic_add(val, &S390_lowcore.preempt_count);
+}
+
+static inline void __preempt_count_sub(int val)
+{
+ __preempt_count_add(-val);
+}
+
+static inline bool __preempt_count_dec_and_test(void)
+{
+ return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
+}
+
+static inline bool should_resched(int preempt_offset)
+{
+ return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
+ preempt_offset);
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define PREEMPT_ENABLED (0)
+
+static inline int preempt_count(void)
+{
+ return READ_ONCE(S390_lowcore.preempt_count);
+}
+
+static inline void preempt_count_set(int pc)
+{
+ S390_lowcore.preempt_count = pc;
+}
+
+#define init_task_preempt_count(p) do { } while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static inline void set_preempt_need_resched(void)
+{
+}
+
+static inline void clear_preempt_need_resched(void)
+{
+}
+
+static inline bool test_preempt_need_resched(void)
+{
+ return false;
+}
+
+static inline void __preempt_count_add(int val)
+{
+ S390_lowcore.preempt_count += val;
+}
+
+static inline void __preempt_count_sub(int val)
+{
+ S390_lowcore.preempt_count -= val;
+}
+
+static inline bool __preempt_count_dec_and_test(void)
+{
+ return !--S390_lowcore.preempt_count && tif_need_resched();
+}
+
+static inline bool should_resched(int preempt_offset)
+{
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
+}
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#ifdef CONFIG_PREEMPT
+extern asmlinkage void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+#endif /* CONFIG_PREEMPT */
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 602af692efdc..6bca916a5ba0 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -110,14 +110,20 @@ typedef struct {
struct thread_struct {
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
+ unsigned long user_timer; /* task cputime in user space */
+ unsigned long system_timer; /* task cputime in kernel space */
+ unsigned long sys_call_table; /* system call table address */
mm_segment_t mm_segment;
unsigned long gmap_addr; /* address of last gmap fault. */
unsigned int gmap_write_flag; /* gmap fault write indication */
unsigned int gmap_int_code; /* int code of last gmap fault */
unsigned int gmap_pfault; /* signal of a pending guest pfault */
+ /* Per-thread information related to debugging */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
unsigned long per_flags; /* Flags to control debug behavior */
+ unsigned int system_call; /* system call number in signal */
+ unsigned long last_break; /* last breaking-event-address. */
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
struct list_head list;
@@ -234,9 +240,10 @@ static inline unsigned short stap(void)
/*
* Give up the time slice of the virtual PU.
*/
-void cpu_relax(void);
+#define cpu_relax_yield cpu_relax_yield
+void cpu_relax_yield(void);
-#define cpu_relax_lowlatency() barrier()
+#define cpu_relax() barrier()
#define ECAG_CACHE_ATTRIBUTE 0
#define ECAG_CPU_ATTRIBUTE 1
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 2ad9c204b1a2..8db92a5b3bf1 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -101,7 +101,8 @@ struct zpci_report_error_header {
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
} __packed;
-int sclp_get_core_info(struct sclp_core_info *info);
+int _sclp_get_core_info_early(struct sclp_core_info *info);
+int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);
int sclp_sdias_blk_count(void);
@@ -119,4 +120,11 @@ void sclp_early_detect(void);
void _sclp_print_early(const char *);
void sclp_ocf_cpc_name_copy(char *dst);
+static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
+{
+ if (early)
+ return _sclp_get_core_info_early(info);
+ return _sclp_get_core_info(info);
+}
+
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 4af99cdaddf5..17a7904f001a 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -96,7 +96,8 @@ struct tm_scsw {
u32 dstat:8;
u32 cstat:8;
u32 fcxs:8;
- u32 schxs:8;
+ u32 ifob:1;
+ u32 sesq:7;
} __attribute__ ((packed));
/**
@@ -177,6 +178,9 @@ union scsw {
#define SCHN_STAT_INTF_CTRL_CHK 0x02
#define SCHN_STAT_CHAIN_CHECK 0x01
+#define SCSW_SESQ_DEV_NOFCX 3
+#define SCSW_SESQ_PATH_NOFCX 4
+
/*
* architectured values for first sense byte
*/
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 0cc383b9be7f..3deb134587b7 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -36,6 +36,7 @@ extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void);
+extern void smp_detect_cpus(void);
#else /* CONFIG_SMP */
@@ -56,6 +57,7 @@ static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_fill_possible_mask(void) { }
+static inline void smp_detect_cpus(void) { }
#endif /* CONFIG_SMP */
@@ -69,6 +71,12 @@ static inline void smp_stop_cpu(void)
}
}
+/* Return thread 0 CPU number as base CPU */
+static inline int smp_get_base_cpu(int cpu)
+{
+ return cpu - (cpu % (smp_cpu_mtid + 1));
+}
+
#ifdef CONFIG_HOTPLUG_CPU
extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 7e9e09f600fa..7ecd8902a5c3 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
return __sync_bool_compare_and_swap(lock, old, new);
}
+#ifndef CONFIG_SMP
+static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
+#else
+bool arch_vcpu_is_preempted(int cpu);
+#endif
+
+#define vcpu_is_preempted arch_vcpu_is_preempted
+
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 8662f5c8e17f..15a3c005c274 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -14,6 +14,7 @@
#define __HAVE_ARCH_MEMCHR /* inline & arch function */
#define __HAVE_ARCH_MEMCMP /* arch function */
#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
+#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
@@ -32,6 +33,7 @@
extern int memcmp(const void *, const void *, size_t);
extern void *memcpy(void *, const void *, size_t);
extern void *memset(void *, int, size_t);
+extern void *memmove(void *, const void *, size_t);
extern int strcmp(const char *,const char *);
extern size_t strlcat(char *, const char *, size_t);
extern size_t strlcpy(char *, const char *, size_t);
@@ -40,7 +42,6 @@ extern char *strncpy(char *, const char *, size_t);
extern char *strrchr(const char *, int);
extern char *strstr(const char *, const char *);
-#undef __HAVE_ARCH_MEMMOVE
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
#undef __HAVE_ARCH_STRNCMP
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 2728114d5484..229326c942c7 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -107,6 +107,11 @@ struct sysinfo_2_2_2 {
char reserved_3[5];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
+ char reserved_4[3];
+ unsigned char vsne;
+ uuid_be uuid;
+ char reserved_5[160];
+ char ext_name[256];
};
#define LPAR_CHAR_DEDICATED (1 << 7)
@@ -127,7 +132,7 @@ struct sysinfo_3_2_2 {
unsigned int caf;
char cpi[16];
char reserved_1[3];
- char ext_name_encoding;
+ unsigned char evmne;
unsigned int reserved_2;
uuid_be uuid;
} vm[8];
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f15c0398c363..a5b54a445eb8 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -12,10 +12,10 @@
/*
* Size of kernel stack for each process
*/
-#define THREAD_ORDER 2
+#define THREAD_SIZE_ORDER 2
#define ASYNC_ORDER 2
-#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
#ifndef __ASSEMBLY__
@@ -30,15 +30,7 @@
* - if the contents of this structure are changed, the assembly constants must also be changed
*/
struct thread_info {
- struct task_struct *task; /* main task structure */
unsigned long flags; /* low level flags */
- unsigned long sys_call_table; /* System call table address */
- unsigned int cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
- unsigned int system_call;
- __u64 user_timer;
- __u64 system_timer;
- unsigned long last_break; /* last breaking-event-address. */
};
/*
@@ -46,26 +38,14 @@ struct thread_info {
*/
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
.flags = 0, \
- .cpu = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
- return (struct thread_info *) S390_lowcore.thread_info;
-}
-
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
-#define THREAD_SIZE_ORDER THREAD_ORDER
-
#endif
/*
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 0bb08f341c09..de8298800722 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -52,11 +52,9 @@ static inline void store_clock_comparator(__u64 *time)
void clock_comparator_work(void);
-void __init ptff_init(void);
+void __init time_early_init(void);
extern unsigned char ptff_function_mask[16];
-extern unsigned long lpar_offset;
-extern unsigned long initial_leap_seconds;
/* Function codes for the ptff instruction. */
#define PTFF_QAF 0x00 /* query available functions */
@@ -100,21 +98,28 @@ struct ptff_qui {
unsigned int pad_0x5c[41];
} __packed;
-static inline int ptff(void *ptff_block, size_t len, unsigned int func)
-{
- typedef struct { char _[len]; } addrtype;
- register unsigned int reg0 asm("0") = func;
- register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
- int rc;
-
- asm volatile(
- " .word 0x0104\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (rc), "+m" (*(addrtype *) ptff_block)
- : "d" (reg0), "d" (reg1) : "cc");
- return rc;
-}
+/*
+ * ptff - Perform timing facility function
+ * @ptff_block: Pointer to ptff parameter block
+ * @len: Length of parameter block
+ * @func: Function code
+ * Returns: Condition code (0 on success)
+ */
+#define ptff(ptff_block, len, func) \
+({ \
+ struct addrtype { char _[len]; }; \
+ register unsigned int reg0 asm("0") = func; \
+ register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
+ int rc; \
+ \
+ asm volatile( \
+ " .word 0x0104\n" \
+ " ipm %0\n" \
+ " srl %0,28\n" \
+ : "=d" (rc), "+m" (*(struct addrtype *) reg1) \
+ : "d" (reg0), "d" (reg1) : "cc"); \
+ rc; \
+})
static inline unsigned long long local_tick_disable(void)
{
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 15711de10403..853b2a3d8dee 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -104,12 +104,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page);
}
-static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
- struct page *page)
-{
- return __tlb_remove_page(tlb, page);
-}
-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
@@ -162,5 +156,13 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
#define tlb_migrate_finish(mm) do { } while (0)
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
+
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+ unsigned int page_size)
+{
+}
#endif /* _S390_TLB_H */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index f15f5571ca2b..fa1bfce10370 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -22,21 +22,22 @@ struct cpu_topology_s390 {
cpumask_t drawer_mask;
};
-DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-
-#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
-#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
-#define topology_sibling_cpumask(cpu) \
- (&per_cpu(cpu_topology, cpu).thread_mask)
-#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
-#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
-#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
-#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
-#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id)
-#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
+extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+extern cpumask_t cpus_with_topology;
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
+#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
+#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
+#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
+#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
+#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
#define mc_capable() 1
+void topology_init_early(void);
int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
@@ -46,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
#else /* CONFIG_SCHED_TOPOLOGY */
+static inline void topology_init_early(void) { }
static inline void topology_schedule_update(void) { }
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
static inline void topology_expect_change(void) { }
@@ -65,7 +67,7 @@ static inline void topology_expect_change(void) { }
#define cpu_to_node cpu_to_node
static inline int cpu_to_node(int cpu)
{
- return per_cpu(cpu_topology, cpu).node_id;
+ return cpu_topology[cpu].node_id;
}
/* Returns a pointer to the cpumask of CPUs on node 'node'. */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 52d7c8709279..f82b04e85a21 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -37,14 +37,14 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment)
-#define set_fs(x) \
-({ \
+#define set_fs(x) \
+{ \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
__ctl_load(__pto, 7, 7); \
-})
+}
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index d0a2dbf2433d..88bdc477a843 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -33,6 +33,8 @@ struct vdso_data {
__u32 ectg_available; /* ECTG instruction present 0x58 */
__u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
+ __u32 ts_dir; /* TOD steering direction 0x64 */
+ __u64 ts_end; /* TOD steering end 0x68 */
};
struct vdso_per_cpu_data {
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index cc44b09c25fc..bf736e764cb4 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -12,6 +12,7 @@ header-y += dasd.h
header-y += debug.h
header-y += errno.h
header-y += fcntl.h
+header-y += hypfs.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
@@ -29,16 +30,16 @@ header-y += ptrace.h
header-y += qeth.h
header-y += resource.h
header-y += schid.h
+header-y += sclp_ctl.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
+header-y += sie.h
header-y += sigcontext.h
header-y += siginfo.h
header-y += signal.h
header-y += socket.h
header-y += sockios.h
-header-y += sclp_ctl.h
-header-y += sie.h
header-y += stat.h
header-y += statfs.h
header-y += swab.h
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 41b51c2f4f1b..b24a64cbfeb1 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -96,4 +96,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 1f0fe98f6db9..36b5101c8606 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -2,20 +2,47 @@
# Makefile for the linux kernel.
#
-KCOV_INSTRUMENT_early.o := n
-KCOV_INSTRUMENT_sclp.o := n
-KCOV_INSTRUMENT_als.o := n
-
ifdef CONFIG_FUNCTION_TRACER
-# Don't trace early setup code and tracing code
-CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+
+# Do not trace tracer code
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+
+# Do not trace early setup code
+CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
+
+endif
+
+GCOV_PROFILE_als.o := n
+GCOV_PROFILE_early.o := n
+GCOV_PROFILE_sclp.o := n
+
+KCOV_INSTRUMENT_als.o := n
+KCOV_INSTRUMENT_early.o := n
+KCOV_INSTRUMENT_sclp.o := n
+
+UBSAN_SANITIZE_als.o := n
+UBSAN_SANITIZE_early.o := n
+UBSAN_SANITIZE_sclp.o := n
+
+#
+# Use -march=z900 for sclp.c and als.c to be able to print an error
+# message if the kernel is started on a machine which is too old
+#
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_als.o += -march=z900
+CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
+CFLAGS_sclp.o += -march=z900
+AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
+AFLAGS_head.o += -march=z900
endif
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
-CFLAGS_smp.o := -Wno-nonnull
+CFLAGS_smp.o := -Wno-nonnull
#
# Disable tailcall optimizations for stack / callchain walking functions
@@ -30,27 +57,7 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-CFLAGS_sysinfo.o += -w
-
-#
-# Use -march=z900 for sclp.c and als.c to be able to print an error
-# message if the kernel is started on a machine which is too old
-#
-CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp.o += -march=z900
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_als.o += -march=z900
-AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += -march=z900
-endif
-GCOV_PROFILE_sclp.o := n
-GCOV_PROFILE_als.o := n
-UBSAN_SANITIZE_als.o := n
-UBSAN_SANITIZE_early.o := n
-UBSAN_SANITIZE_sclp.o := n
+CFLAGS_sysinfo.o += -w
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index f3df9e0a5dec..c4b3570ded5b 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -25,12 +25,14 @@
int main(void)
{
/* task struct offsets */
- OFFSET(__TASK_thread_info, task_struct, stack);
+ OFFSET(__TASK_stack, task_struct, stack);
OFFSET(__TASK_thread, task_struct, thread);
OFFSET(__TASK_pid, task_struct, pid);
BLANK();
/* thread struct offsets */
OFFSET(__THREAD_ksp, thread_struct, ksp);
+ OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table);
+ OFFSET(__THREAD_last_break, thread_struct, last_break);
OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
@@ -39,14 +41,7 @@ int main(void)
OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
BLANK();
/* thread info offsets */
- OFFSET(__TI_task, thread_info, task);
- OFFSET(__TI_flags, thread_info, flags);
- OFFSET(__TI_sysc_table, thread_info, sys_call_table);
- OFFSET(__TI_cpu, thread_info, cpu);
- OFFSET(__TI_precount, thread_info, preempt_count);
- OFFSET(__TI_user_timer, thread_info, user_timer);
- OFFSET(__TI_system_timer, thread_info, system_timer);
- OFFSET(__TI_last_break, thread_info, last_break);
+ OFFSET(__TI_flags, task_struct, thread_info.flags);
BLANK();
/* pt_regs offsets */
OFFSET(__PT_ARGS, pt_regs, args);
@@ -79,6 +74,8 @@ int main(void)
OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
+ OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
+ OFFSET(__VDSO_TS_END, vdso_data, ts_end);
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
@@ -159,7 +156,6 @@ int main(void)
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
- OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
@@ -173,6 +169,7 @@ int main(void)
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
+ OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_PASTE, lowcore, paste);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4af60374eba0..6f2a6ab13cb5 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -446,7 +446,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
- regs->gprs[6] = task_thread_info(current)->last_break;
+ regs->gprs[6] = current->thread.last_break;
}
return 0;
@@ -523,7 +523,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
- regs->gprs[5] = task_thread_info(current)->last_break;
+ regs->gprs[5] = current->thread.last_break;
return 0;
}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2374c5b46bbc..d038c8cea6cb 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void)
psw.addr = (unsigned long) s390_base_pgm_handler;
S390_lowcore.program_new_psw = psw;
s390_base_pgm_handler_fn = early_pgm_check_handler;
+ S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
}
static noinline __init void setup_facility_list(void)
@@ -391,7 +392,49 @@ static int __init cad_init(void)
}
early_initcall(cad_init);
-static __init void rescue_initrd(void)
+static __init void memmove_early(void *dst, const void *src, size_t n)
+{
+ unsigned long addr;
+ long incr;
+ psw_t old;
+
+ if (!n)
+ return;
+ incr = 1;
+ if (dst > src) {
+ incr = -incr;
+ dst += n - 1;
+ src += n - 1;
+ }
+ old = S390_lowcore.program_new_psw;
+ S390_lowcore.program_new_psw.mask = __extract_psw();
+ asm volatile(
+ " larl %[addr],1f\n"
+ " stg %[addr],%[psw_pgm_addr]\n"
+ "0: mvc 0(1,%[dst]),0(%[src])\n"
+ " agr %[dst],%[incr]\n"
+ " agr %[src],%[incr]\n"
+ " brctg %[n],0b\n"
+ "1:\n"
+ : [addr] "=&d" (addr),
+ [psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr),
+ [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
+ : [incr] "d" (incr)
+ : "cc", "memory");
+ S390_lowcore.program_new_psw = old;
+}
+
+static __init noinline void ipl_save_parameters(void)
+{
+ void *src, *dst;
+
+ src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
+ dst = (void *) IPL_PARMBLOCK_ORIGIN;
+ memmove_early(dst, src, PAGE_SIZE);
+ S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
+}
+
+static __init noinline void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
@@ -405,7 +448,7 @@ static __init void rescue_initrd(void)
return;
if (INITRD_START >= min_initrd_addr)
return;
- memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
+ memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
#endif
}
@@ -467,7 +510,8 @@ void __init startup_init(void)
ipl_save_parameters();
rescue_initrd();
clear_bss_section();
- ptff_init();
+ ipl_verify_parameters();
+ time_early_init();
init_kernel_storage_key();
lockdep_off();
setup_lowcore_early();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 49a30737adde..97298c58b2be 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -42,7 +42,7 @@ __PT_R13 = __PT_GPRS + 104
__PT_R14 = __PT_GPRS + 112
__PT_R15 = __PT_GPRS + 120
-STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
@@ -123,8 +123,14 @@ _PIF_WORK = (_PIF_PER_TRAP)
.macro LAST_BREAK scratch
srag \scratch,%r10,23
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
jz .+10
- stg %r10,__TI_last_break(%r12)
+ stg %r10,__TASK_thread+__THREAD_last_break(%r12)
+#else
+ jz .+14
+ lghi \scratch,__TASK_thread
+ stg %r10,__THREAD_last_break(\scratch,%r12)
+#endif
.endm
.macro REENABLE_IRQS
@@ -186,14 +192,13 @@ ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
lgr %r1,%r2
aghi %r1,__TASK_thread # thread_struct of prev task
- lg %r5,__TASK_thread_info(%r3) # get thread_info of next
+ lg %r5,__TASK_stack(%r3) # start of kernel stack of next
stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
lgr %r1,%r3
aghi %r1,__TASK_thread # thread_struct of next task
lgr %r15,%r5
aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
- stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
@@ -274,7 +279,7 @@ ENTRY(system_call)
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
lghi %r14,_PIF_SYSCALL
.Lsysc_per:
lg %r15,__LC_KERNEL_STACK
@@ -288,7 +293,13 @@ ENTRY(system_call)
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
- lg %r10,__TI_sysc_table(%r12) # address of system call table
+ # load address of system call table
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
+ lg %r10,__TASK_thread+__THREAD_sysc_table(%r12)
+#else
+ lghi %r13,__TASK_thread
+ lg %r10,__THREAD_sysc_table(%r13,%r12)
+#endif
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz .Lsysc_nr_ok
@@ -389,7 +400,6 @@ ENTRY(system_call)
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
jno .Lsysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
- lg %r10,__TI_sysc_table(%r12) # address of system call table
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
@@ -457,7 +467,7 @@ ENTRY(system_call)
#
ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
brasl %r14,schedule_tail
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
@@ -478,7 +488,7 @@ ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
@@ -501,7 +511,7 @@ ENTRY(pgm_check_handler)
2: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
lg %r15,__LC_KERNEL_STACK
- lg %r14,__TI_task(%r12)
+ lgr %r14,%r12
aghi %r14,__TASK_thread # pointer to thread_struct
lghi %r13,__LC_PGM_TDB
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
@@ -567,7 +577,7 @@ ENTRY(io_int_handler)
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -626,7 +636,7 @@ ENTRY(io_int_handler)
jo .Lio_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
- icm %r0,15,__TI_precount(%r12)
+ icm %r0,15,__LC_PREEMPT_COUNT
jnz .Lio_restore # preemption is disabled
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jno .Lio_restore
@@ -741,7 +751,7 @@ ENTRY(ext_int_handler)
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -798,13 +808,10 @@ ENTRY(save_fpu_regs)
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
bor %r14
stfpc __THREAD_FPU_fpc(%r2)
-.Lsave_fpu_regs_fpc_end:
lg %r3,__THREAD_FPU_regs(%r2)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
jz .Lsave_fpu_regs_fp # no -> store FP regs
-.Lsave_fpu_regs_vx_low:
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
-.Lsave_fpu_regs_vx_high:
VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
j .Lsave_fpu_regs_done # -> set CIF_FPU flag
.Lsave_fpu_regs_fp:
@@ -851,9 +858,7 @@ load_fpu_regs:
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz .Lload_fpu_regs_fp # -> no VX, load FP regs
-.Lload_fpu_regs_vx:
VLM %v0,%v15,0,%r4
-.Lload_fpu_regs_vx_high:
VLM %v16,%v31,256,%r4
j .Lload_fpu_regs_done
.Lload_fpu_regs_fp:
@@ -889,7 +894,7 @@ ENTRY(mcck_int_handler)
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
+ lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
@@ -948,7 +953,7 @@ ENTRY(mcck_int_handler)
.Lmcck_panic:
lg %r15,__LC_PANIC_STACK
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
#
@@ -1085,7 +1090,7 @@ cleanup_critical:
jhe 0f
# set up saved registers r10 and r12
stg %r10,16(%r11) # r10 last break
- stg %r12,32(%r11) # r12 thread-info pointer
+ stg %r12,32(%r11) # r12 task struct pointer
0: # check if the user time update has been done
clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
@@ -1106,7 +1111,9 @@ cleanup_critical:
lg %r9,16(%r11)
srag %r9,%r9,23
jz 0f
- mvc __TI_last_break(8,%r12),16(%r11)
+ lgr %r9,%r12
+ aghi %r9,__TASK_thread
+ mvc __THREAD_last_break(8,%r9),16(%r11)
0: # set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 4431905f8cfa..0b5ebf8a3d30 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -315,7 +315,7 @@ ENTRY(startup_kdump)
jg startup_continue
.Lstack:
- .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
+ .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 03c2b469c472..482d3526e32b 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -32,11 +32,10 @@ ENTRY(startup_continue)
#
# Setup stack
#
- larl %r15,init_thread_union
- stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
- lg %r14,__TI_task(%r15) # cache current in lowcore
+ larl %r14,init_task
stg %r14,__LC_CURRENT
- aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+ larl %r15,init_thread_union
+ aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160
#
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 295bfb7124bc..ff3364a067ff 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1991,10 +1991,9 @@ void __init ipl_update_parameters(void)
diag308_set_works = 1;
}
-void __init ipl_save_parameters(void)
+void __init ipl_verify_parameters(void)
{
struct cio_iplinfo iplinfo;
- void *src, *dst;
if (cio_get_iplinfo(&iplinfo))
return;
@@ -2005,10 +2004,6 @@ void __init ipl_save_parameters(void)
if (!iplinfo.is_qdio)
return;
ipl_flags |= IPL_PARMBLOCK_VALID;
- src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
- dst = (void *)IPL_PARMBLOCK_ORIGIN;
- memmove(dst, src, PAGE_SIZE);
- S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
}
static LIST_HEAD(rcall);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 285d6561076d..ef60f4177331 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -168,7 +168,7 @@ void do_softirq_own_stack(void)
old = current_stack_pointer();
/* Check against async. stack address range. */
new = S390_lowcore.async_stack;
- if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
+ if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
/* Need to switch to the async. stack. */
new -= STACK_FRAME_OVERHEAD;
((struct stack_frame *) new)->back_chain = old;
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index 6ea6d69339b5..ae7dff110054 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -5,7 +5,8 @@
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <asm/facility.h>
@@ -183,4 +184,4 @@ static int __init lgr_init(void)
lgr_timer_set();
return 0;
}
-module_init(lgr_init);
+device_initcall(lgr_init);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index fcc634c1479a..763dec18edcd 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -995,39 +995,36 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
regs.int_parm = CPU_MF_INT_SF_PRA;
sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
- regs.psw.addr = sfr->basic.ia;
- if (sfr->basic.T)
- regs.psw.mask |= PSW_MASK_DAT;
- if (sfr->basic.W)
- regs.psw.mask |= PSW_MASK_WAIT;
- if (sfr->basic.P)
- regs.psw.mask |= PSW_MASK_PSTATE;
- switch (sfr->basic.AS) {
- case 0x0:
- regs.psw.mask |= PSW_ASC_PRIMARY;
- break;
- case 0x1:
- regs.psw.mask |= PSW_ASC_ACCREG;
- break;
- case 0x2:
- regs.psw.mask |= PSW_ASC_SECONDARY;
- break;
- case 0x3:
- regs.psw.mask |= PSW_ASC_HOME;
- break;
- }
+ psw_bits(regs.psw).ia = sfr->basic.ia;
+ psw_bits(regs.psw).t = sfr->basic.T;
+ psw_bits(regs.psw).w = sfr->basic.W;
+ psw_bits(regs.psw).p = sfr->basic.P;
+ psw_bits(regs.psw).as = sfr->basic.AS;
/*
- * A non-zero guest program parameter indicates a guest
- * sample.
- * Note that some early samples or samples from guests without
+ * Use the hardware provided configuration level to decide if the
+ * sample belongs to a guest or host. If that is not available,
+ * fall back to the following heuristics:
+ * A non-zero guest program parameter always indicates a guest
+ * sample. Some early samples or samples from guests without
* lpp usage would be misaccounted to the host. We use the asn
- * value as a heuristic to detect most of these guest samples.
- * If the value differs from the host hpp value, we assume
- * it to be a KVM guest.
+ * value as an addon heuristic to detect most of these guest samples.
+ * If the value differs from the host hpp value, we assume to be a
+ * KVM guest.
*/
- if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp)
+ switch (sfr->basic.CL) {
+ case 1: /* logical partition */
+ sde_regs->in_guest = 0;
+ break;
+ case 2: /* virtual machine */
sde_regs->in_guest = 1;
+ break;
+ default: /* old machine, use heuristics */
+ if (sfr->basic.gpp ||
+ sfr->basic.prim_asn != (u16)sfr->basic.hpp)
+ sde_regs->in_guest = 1;
+ break;
+ }
overflow = 0;
if (perf_exclude_event(event, &regs, sde_regs))
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index bba4fa74b321..400d14f0b9f5 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -103,7 +103,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
{
- struct thread_info *ti;
struct fake_frame
{
struct stack_frame sf;
@@ -121,9 +120,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
/* Initialize per thread user and system timer values */
- ti = task_thread_info(p);
- ti->user_timer = 0;
- ti->system_timer = 0;
+ p->thread.user_timer = 0;
+ p->thread.system_timer = 0;
frame->sf.back_chain = 0;
/* new return point is ret_from_fork */
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 81d0808085e6..9e60ef144d03 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -53,7 +53,7 @@ void s390_update_cpu_mhz(void)
on_each_cpu(update_cpu_mhz, NULL, 0);
}
-void notrace cpu_relax(void)
+void notrace cpu_relax_yield(void)
{
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
diag_stat_inc(DIAG_STAT_X044);
@@ -61,7 +61,7 @@ void notrace cpu_relax(void)
}
barrier();
}
-EXPORT_SYMBOL(cpu_relax);
+EXPORT_SYMBOL(cpu_relax_yield);
/*
* cpu_init - initializes state that is per-CPU.
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9336e824e2db..b81ab8882e2e 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -461,7 +461,7 @@ long arch_ptrace(struct task_struct *child, long request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
- put_user(task_thread_info(child)->last_break,
+ put_user(child->thread.last_break,
(unsigned long __user *) data);
return 0;
case PTRACE_ENABLE_TE:
@@ -811,7 +811,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
- put_user(task_thread_info(child)->last_break,
+ put_user(child->thread.last_break,
(unsigned int __user *) data);
return 0;
}
@@ -997,10 +997,10 @@ static int s390_last_break_get(struct task_struct *target,
if (count > 0) {
if (kbuf) {
unsigned long *k = kbuf;
- *k = task_thread_info(target)->last_break;
+ *k = target->thread.last_break;
} else {
unsigned long __user *u = ubuf;
- if (__put_user(task_thread_info(target)->last_break, u))
+ if (__put_user(target->thread.last_break, u))
return -EFAULT;
}
}
@@ -1113,7 +1113,7 @@ static int s390_system_call_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- unsigned int *data = &task_thread_info(target)->system_call;
+ unsigned int *data = &target->thread.system_call;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(unsigned int));
}
@@ -1123,7 +1123,7 @@ static int s390_system_call_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- unsigned int *data = &task_thread_info(target)->system_call;
+ unsigned int *data = &target->thread.system_call;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(unsigned int));
}
@@ -1327,7 +1327,7 @@ static int s390_compat_last_break_get(struct task_struct *target,
compat_ulong_t last_break;
if (count > 0) {
- last_break = task_thread_info(target)->last_break;
+ last_break = target->thread.last_break;
if (kbuf) {
unsigned long *k = kbuf;
*k = last_break;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7f7ba5f23f13..adfac9f0a89f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -35,6 +35,7 @@
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/kernel_stat.h>
+#include <linux/dma-contiguous.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pfn.h>
@@ -303,7 +304,7 @@ static void __init setup_lowcore(void)
* Setup lowcore for boot cpu
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
- lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
+ lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS |
@@ -324,15 +325,15 @@ static void __init setup_lowcore(void)
lc->kernel_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
- __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+ memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
- __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+ memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->current_task = (unsigned long) init_thread_union.thread_info.task;
- lc->thread_info = (unsigned long) &init_thread_union;
+ lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
+ lc->preempt_count = S390_lowcore.preempt_count;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
@@ -349,7 +350,7 @@ static void __init setup_lowcore(void)
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
- restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
+ restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
restart_stack += ASYNC_SIZE;
/*
@@ -412,7 +413,7 @@ static void __init setup_resources(void)
bss_resource.end = (unsigned long) &__bss_stop - 1;
for_each_memblock(memory, reg) {
- res = alloc_bootmem_low(sizeof(*res));
+ res = memblock_virt_alloc(sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
@@ -426,7 +427,7 @@ static void __init setup_resources(void)
std_res->start > res->end)
continue;
if (std_res->end > res->end) {
- sub_res = alloc_bootmem_low(sizeof(*sub_res));
+ sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
@@ -445,7 +446,7 @@ static void __init setup_resources(void)
* part of the System RAM resource.
*/
if (crashk_res.end) {
- memblock_add(crashk_res.start, resource_size(&crashk_res));
+ memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
insert_resource(&iomem_resource, &crashk_res);
}
@@ -903,6 +904,7 @@ void __init setup_arch(char **cmdline_p)
setup_memory_end();
setup_memory();
+ dma_contiguous_reserve(memory_end);
check_initrd();
reserve_crashkernel();
@@ -921,6 +923,8 @@ void __init setup_arch(char **cmdline_p)
cpu_detect_mhz_feature();
cpu_init();
numa_setup();
+ smp_detect_cpus();
+ topology_init_early();
/*
* Create kernel page tables and switch to virtual addressing.
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d82562cf0a0e..9f241d1efeda 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -359,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
- regs->gprs[6] = task_thread_info(current)->last_break;
+ regs->gprs[6] = current->thread.last_break;
}
return 0;
}
@@ -430,7 +430,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (unsigned long) &frame->uc;
- regs->gprs[5] = task_thread_info(current)->last_break;
+ regs->gprs[5] = current->thread.last_break;
return 0;
}
@@ -467,13 +467,13 @@ void do_signal(struct pt_regs *regs)
* the debugger may change all our registers, including the system
* call information.
*/
- current_thread_info()->system_call =
+ current->thread.system_call =
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
- if (current_thread_info()->system_call) {
- regs->int_code = current_thread_info()->system_call;
+ if (current->thread.system_call) {
+ regs->int_code = current->thread.system_call;
/* Check for system call restarting. */
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
@@ -506,8 +506,8 @@ void do_signal(struct pt_regs *regs)
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
- if (current_thread_info()->system_call) {
- regs->int_code = current_thread_info()->system_call;
+ if (current->thread.system_call) {
+ regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
/* Restart with sys_restart_syscall */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 35531fe1c5ea..e49f61aadaf9 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
+#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -259,16 +260,14 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
struct lowcore *lc = pcpu->lowcore;
- struct thread_info *ti = task_thread_info(tsk);
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
lc->lpp = LPP_MAGIC;
lc->current_pid = tsk->pid;
- lc->user_timer = ti->user_timer;
- lc->system_timer = ti->system_timer;
+ lc->user_timer = tsk->thread.user_timer;
+ lc->system_timer = tsk->thread.system_timer;
lc->steal_timer = 0;
}
@@ -368,10 +367,15 @@ int smp_find_processor_id(u16 address)
return -1;
}
-int smp_vcpu_scheduled(int cpu)
+bool arch_vcpu_is_preempted(int cpu)
{
- return pcpu_running(pcpu_devices + cpu);
+ if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+ return false;
+ if (pcpu_running(pcpu_devices + cpu))
+ return false;
+ return true;
}
+EXPORT_SYMBOL(arch_vcpu_is_preempted);
void smp_yield_cpu(int cpu)
{
@@ -657,14 +661,12 @@ int smp_cpu_get_polarization(int cpu)
return pcpu_devices[cpu].polarization;
}
-static struct sclp_core_info *smp_get_core_info(void)
+static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
{
static int use_sigp_detection;
- struct sclp_core_info *info;
int address;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info && (use_sigp_detection || sclp_get_core_info(info))) {
+ if (use_sigp_detection || sclp_get_core_info(info, early)) {
use_sigp_detection = 1;
for (address = 0;
address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
@@ -678,7 +680,6 @@ static struct sclp_core_info *smp_get_core_info(void)
}
info->combined = info->configured;
}
- return info;
}
static int smp_add_present_cpu(int cpu);
@@ -719,17 +720,15 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
return nr;
}
-static void __init smp_detect_cpus(void)
+void __init smp_detect_cpus(void)
{
unsigned int cpu, mtid, c_cpus, s_cpus;
struct sclp_core_info *info;
u16 address;
/* Get CPU information */
- info = smp_get_core_info();
- if (!info)
- panic("smp_detect_cpus failed to allocate memory\n");
-
+ info = memblock_virt_alloc(sizeof(*info), 8);
+ smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
address = stap();
@@ -765,7 +764,7 @@ static void __init smp_detect_cpus(void)
get_online_cpus();
__smp_rescan_cpus(info, 0);
put_online_cpus();
- kfree(info);
+ memblock_free_early((unsigned long)info, sizeof(*info));
}
/*
@@ -802,7 +801,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
- base = cpu - (cpu % (smp_cpu_mtid + 1));
+ base = smp_get_base_cpu(cpu);
for (i = 0; i <= smp_cpu_mtid; i++) {
if (base + i < nr_cpu_ids)
if (cpu_online(base + i))
@@ -902,7 +901,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* request the 0x1202 external call external interrupt */
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
- smp_detect_cpus();
}
void __init smp_prepare_boot_cpu(void)
@@ -968,7 +966,7 @@ static ssize_t cpu_configure_store(struct device *dev,
rc = -EBUSY;
/* disallow configuration changes of online cpus and cpu 0 */
cpu = dev->id;
- cpu -= cpu % (smp_cpu_mtid + 1);
+ cpu = smp_get_base_cpu(cpu);
if (cpu == 0)
goto out;
for (i = 0; i <= smp_cpu_mtid; i++)
@@ -1047,22 +1045,18 @@ static struct attribute_group cpu_online_attr_group = {
.attrs = cpu_online_attrs,
};
-static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
+static int smp_cpu_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned int)(long)hcpu;
struct device *s = &per_cpu(cpu_device, cpu)->dev;
- int err = 0;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
- break;
- case CPU_DEAD:
- sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
- break;
- }
- return notifier_from_errno(err);
+ return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+}
+static int smp_cpu_pre_down(unsigned int cpu)
+{
+ struct device *s = &per_cpu(cpu_device, cpu)->dev;
+
+ sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
+ return 0;
}
static int smp_add_present_cpu(int cpu)
@@ -1083,20 +1077,12 @@ static int smp_add_present_cpu(int cpu)
rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
if (rc)
goto out_cpu;
- if (cpu_online(cpu)) {
- rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
- if (rc)
- goto out_online;
- }
rc = topology_cpu_init(c);
if (rc)
goto out_topology;
return 0;
out_topology:
- if (cpu_online(cpu))
- sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
-out_online:
sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
out_cpu:
#ifdef CONFIG_HOTPLUG_CPU
@@ -1113,9 +1099,10 @@ int __ref smp_rescan_cpus(void)
struct sclp_core_info *info;
int nr;
- info = smp_get_core_info();
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ smp_get_core_info(info, 0);
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
nr = __smp_rescan_cpus(info, 1);
@@ -1149,17 +1136,15 @@ static int __init s390_smp_init(void)
if (rc)
return rc;
#endif
- cpu_notifier_register_begin();
for_each_present_cpu(cpu) {
rc = smp_add_present_cpu(cpu);
if (rc)
goto out;
}
- __hotcpu_notifier(smp_cpu_notify, 0);
-
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
+ smp_cpu_online, smp_cpu_pre_down);
out:
- cpu_notifier_register_done();
return rc;
}
subsys_initcall(s390_smp_init);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 2d6b6e81f812..1ff21f05d7dd 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -194,7 +194,7 @@ pgm_check_entry:
/* Suspend CPU not available -> panic */
larl %r15,init_thread_union
- ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
+ ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
larl %r2,.Lpanic_string
larl %r3,_sclp_print_early
lghi %r1,0
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index bfda6aa40280..24021c1e3ecb 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -56,6 +56,20 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
}
EXPORT_SYMBOL(stsi);
+static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
+{
+ switch (encoding) {
+ case 1: /* EBCDIC */
+ EBCASC(name, len);
+ break;
+ case 2: /* UTF-8 */
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
{
int i;
@@ -207,24 +221,19 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid);
seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid);
}
+ if (convert_ext_name(info->vsne, info->ext_name, sizeof(info->ext_name))) {
+ seq_printf(m, "LPAR Extended Name: %-.256s\n", info->ext_name);
+ seq_printf(m, "LPAR UUID: %pUb\n", &info->uuid);
+ }
}
static void print_ext_name(struct seq_file *m, int lvl,
struct sysinfo_3_2_2 *info)
{
- if (info->vm[lvl].ext_name_encoding == 0)
- return;
- if (info->ext_names[lvl][0] == 0)
- return;
- switch (info->vm[lvl].ext_name_encoding) {
- case 1: /* EBCDIC */
- EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
- break;
- case 2: /* UTF-8 */
- break;
- default:
+ size_t len = sizeof(info->ext_names[lvl]);
+
+ if (!convert_ext_name(info->vm[lvl].evmne, info->ext_names[lvl], len))
return;
- }
seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
info->ext_names[lvl]);
}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0bfcc492987e..867d0a057046 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -59,19 +59,27 @@ ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
EXPORT_SYMBOL(s390_epoch_delta_notifier);
unsigned char ptff_function_mask[16];
-unsigned long lpar_offset;
-unsigned long initial_leap_seconds;
+
+static unsigned long long lpar_offset;
+static unsigned long long initial_leap_seconds;
+static unsigned long long tod_steering_end;
+static long long tod_steering_delta;
/*
* Get time offsets with PTFF
*/
-void __init ptff_init(void)
+void __init time_early_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
+ /* Initialize TOD steering parameters */
+ tod_steering_end = sched_clock_base_cc;
+ vdso_data->ts_end = tod_steering_end;
+
if (!test_facility(28))
return;
+
ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
/* get LPAR offset */
@@ -80,7 +88,7 @@ void __init ptff_init(void)
/* get initial leap seconds */
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
- initial_leap_seconds = (unsigned long)
+ initial_leap_seconds = (unsigned long long)
((long) qui.old_leap * 4096000000L);
}
@@ -123,18 +131,6 @@ void clock_comparator_work(void)
cd->event_handler(cd);
}
-/*
- * Fixup the clock comparator.
- */
-static void fixup_clock_comparator(unsigned long long delta)
-{
- /* If nobody is waiting there's nothing to fix. */
- if (S390_lowcore.clock_comparator == -1ULL)
- return;
- S390_lowcore.clock_comparator += delta;
- set_clock_comparator(S390_lowcore.clock_comparator);
-}
-
static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -215,7 +211,21 @@ void read_boot_clock64(struct timespec64 *ts)
static cycle_t read_tod_clock(struct clocksource *cs)
{
- return get_tod_clock();
+ unsigned long long now, adj;
+
+ preempt_disable(); /* protect from changes to steering parameters */
+ now = get_tod_clock();
+ adj = tod_steering_end - now;
+ if (unlikely((s64) adj >= 0))
+ /*
+ * manually steer by 1 cycle every 2^16 cycles. This
+ * corresponds to shifting the tod delta by 15. 1s is
+ * therefore steered in ~9h. The adjust will decrease
+ * over time, until it finally reaches 0.
+ */
+ now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
+ preempt_enable();
+ return now;
}
static struct clocksource clocksource_tod = {
@@ -384,6 +394,55 @@ static inline int check_sync_clock(void)
return rc;
}
+/*
+ * Apply clock delta to the global data structures.
+ * This is called once on the CPU that performed the clock sync.
+ */
+static void clock_sync_global(unsigned long long delta)
+{
+ unsigned long now, adj;
+ struct ptff_qto qto;
+
+ /* Fixup the monotonic sched clock. */
+ sched_clock_base_cc += delta;
+ /* Adjust TOD steering parameters. */
+ vdso_data->tb_update_count++;
+ now = get_tod_clock();
+ adj = tod_steering_end - now;
+ if (unlikely((s64) adj >= 0))
+ /* Calculate how much of the old adjustment is left. */
+ tod_steering_delta = (tod_steering_delta < 0) ?
+ -(adj >> 15) : (adj >> 15);
+ tod_steering_delta += delta;
+ if ((abs(tod_steering_delta) >> 48) != 0)
+ panic("TOD clock sync offset %lli is too large to drift\n",
+ tod_steering_delta);
+ tod_steering_end = now + (abs(tod_steering_delta) << 15);
+ vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
+ vdso_data->ts_end = tod_steering_end;
+ vdso_data->tb_update_count++;
+ /* Update LPAR offset. */
+ if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
+ lpar_offset = qto.tod_epoch_difference;
+ /* Call the TOD clock change notifier. */
+ atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
+}
+
+/*
+ * Apply clock delta to the per-CPU data structures of this CPU.
+ * This is called for each online CPU after the call to clock_sync_global.
+ */
+static void clock_sync_local(unsigned long long delta)
+{
+ /* Add the delta to the clock comparator. */
+ if (S390_lowcore.clock_comparator != -1ULL) {
+ S390_lowcore.clock_comparator += delta;
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ }
+ /* Adjust the last_update_clock time-stamp. */
+ S390_lowcore.last_update_clock += delta;
+}
+
/* Single threaded workqueue used for stp sync events */
static struct workqueue_struct *time_sync_wq;
@@ -397,31 +456,9 @@ static void __init time_init_wq(void)
struct clock_sync_data {
atomic_t cpus;
int in_sync;
- unsigned long long fixup_cc;
+ unsigned long long clock_delta;
};
-static void clock_sync_cpu(struct clock_sync_data *sync)
-{
- atomic_dec(&sync->cpus);
- enable_sync_clock();
- while (sync->in_sync == 0) {
- __udelay(1);
- /*
- * A different cpu changes *in_sync. Therefore use
- * barrier() to force memory access.
- */
- barrier();
- }
- if (sync->in_sync != 1)
- /* Didn't work. Clear per-cpu in sync bit again. */
- disable_sync_clock(NULL);
- /*
- * This round of TOD syncing is done. Set the clock comparator
- * to the next tick and let the processor continue.
- */
- fixup_clock_comparator(sync->fixup_cc);
-}
-
/*
* Server Time Protocol (STP) code.
*/
@@ -523,54 +560,46 @@ void stp_queue_work(void)
static int stp_sync_clock(void *data)
{
- static int first;
+ struct clock_sync_data *sync = data;
unsigned long long clock_delta;
- struct clock_sync_data *stp_sync;
- struct ptff_qto qto;
+ static int first;
int rc;
- stp_sync = data;
-
- if (xchg(&first, 1) == 1) {
- /* Slave */
- clock_sync_cpu(stp_sync);
- return 0;
- }
-
- /* Wait until all other cpus entered the sync function. */
- while (atomic_read(&stp_sync->cpus) != 0)
- cpu_relax();
-
enable_sync_clock();
-
- rc = 0;
- if (stp_info.todoff[0] || stp_info.todoff[1] ||
- stp_info.todoff[2] || stp_info.todoff[3] ||
- stp_info.tmd != 2) {
- rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
- if (rc == 0) {
- /* fixup the monotonic sched clock */
- sched_clock_base_cc += clock_delta;
- if (ptff_query(PTFF_QTO) &&
- ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
- /* Update LPAR offset */
- lpar_offset = qto.tod_epoch_difference;
- atomic_notifier_call_chain(&s390_epoch_delta_notifier,
- 0, &clock_delta);
- stp_sync->fixup_cc = clock_delta;
- fixup_clock_comparator(clock_delta);
- rc = chsc_sstpi(stp_page, &stp_info,
- sizeof(struct stp_sstpi));
- if (rc == 0 && stp_info.tmd != 2)
- rc = -EAGAIN;
+ if (xchg(&first, 1) == 0) {
+ /* Wait until all other cpus entered the sync function. */
+ while (atomic_read(&sync->cpus) != 0)
+ cpu_relax();
+ rc = 0;
+ if (stp_info.todoff[0] || stp_info.todoff[1] ||
+ stp_info.todoff[2] || stp_info.todoff[3] ||
+ stp_info.tmd != 2) {
+ rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
+ &clock_delta);
+ if (rc == 0) {
+ sync->clock_delta = clock_delta;
+ clock_sync_global(clock_delta);
+ rc = chsc_sstpi(stp_page, &stp_info,
+ sizeof(struct stp_sstpi));
+ if (rc == 0 && stp_info.tmd != 2)
+ rc = -EAGAIN;
+ }
}
+ sync->in_sync = rc ? -EAGAIN : 1;
+ xchg(&first, 0);
+ } else {
+ /* Slave */
+ atomic_dec(&sync->cpus);
+ /* Wait for in_sync to be set. */
+ while (READ_ONCE(sync->in_sync) == 0)
+ __udelay(1);
}
- if (rc) {
+ if (sync->in_sync != 1)
+ /* Didn't work. Clear per-cpu in sync bit again. */
disable_sync_clock(NULL);
- stp_sync->in_sync = -EAGAIN;
- } else
- stp_sync->in_sync = 1;
- xchg(&first, 0);
+ /* Apply clock delta to per-CPU fields of this CPU. */
+ clock_sync_local(sync->clock_delta);
+
return 0;
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e959c02e0cac..93dcbae1e98d 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
+#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -41,15 +42,17 @@ static bool topology_enabled = true;
static DECLARE_WORK(topology_work, topology_work_fn);
/*
- * Socket/Book linked lists and per_cpu(cpu_topology) updates are
+ * Socket/Book linked lists and cpu_topology updates are
* protected by "sched_domains_mutex".
*/
static struct mask_info socket_info;
static struct mask_info book_info;
static struct mask_info drawer_info;
-DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
+struct cpu_topology_s390 cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+cpumask_t cpus_with_topology;
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
@@ -97,7 +100,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
if (lcpu < 0)
continue;
for (i = 0; i <= smp_cpu_mtid; i++) {
- topo = &per_cpu(cpu_topology, lcpu + i);
+ topo = &cpu_topology[lcpu + i];
topo->drawer_id = drawer->id;
topo->book_id = book->id;
topo->socket_id = socket->id;
@@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
cpumask_set_cpu(lcpu + i, &drawer->mask);
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
+ cpumask_set_cpu(lcpu + i, &cpus_with_topology);
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
}
@@ -220,7 +224,7 @@ static void update_cpu_masks(void)
int cpu;
for_each_possible_cpu(cpu) {
- topo = &per_cpu(cpu_topology, cpu);
+ topo = &cpu_topology[cpu];
topo->thread_mask = cpu_thread_map(cpu);
topo->core_mask = cpu_group_map(&socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu);
@@ -231,6 +235,8 @@ static void update_cpu_masks(void)
topo->socket_id = cpu;
topo->book_id = cpu;
topo->drawer_id = cpu;
+ if (cpu_present(cpu))
+ cpumask_set_cpu(cpu, &cpus_with_topology);
}
}
numa_update_cpu_topology();
@@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info)
stsi(info, 15, 1, min(topology_max_mnest, 4));
}
-int arch_update_cpu_topology(void)
+static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
- struct device *dev;
- int cpu, rc = 0;
+ int rc = 0;
+ cpumask_clear(&cpus_with_topology);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
store_topology(info);
@@ -255,6 +261,15 @@ int arch_update_cpu_topology(void)
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple();
+ return rc;
+}
+
+int arch_update_cpu_topology(void)
+{
+ struct device *dev;
+ int cpu, rc;
+
+ rc = __arch_update_cpu_topology();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -394,23 +409,23 @@ int topology_cpu_init(struct cpu *cpu)
static const struct cpumask *cpu_thread_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).thread_mask;
+ return &cpu_topology[cpu].thread_mask;
}
const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).core_mask;
+ return &cpu_topology[cpu].core_mask;
}
static const struct cpumask *cpu_book_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).book_mask;
+ return &cpu_topology[cpu].book_mask;
}
static const struct cpumask *cpu_drawer_mask(int cpu)
{
- return &per_cpu(cpu_topology, cpu).drawer_mask;
+ return &cpu_topology[cpu].drawer_mask;
}
static int __init early_parse_topology(char *p)
@@ -438,19 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
- mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+ mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
mask = mask->next;
}
}
-static int __init s390_topology_init(void)
+void __init topology_init_early(void)
{
struct sysinfo_15_1_x *info;
int i;
+ set_sched_topology(s390_topology);
if (!MACHINE_HAS_TOPOLOGY)
- return 0;
- tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+ goto out;
+ tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
@@ -460,10 +476,9 @@ static int __init s390_topology_init(void)
alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
alloc_masks(info, &drawer_info, 3);
- set_sched_topology(s390_topology);
- return 0;
+out:
+ __arch_update_cpu_topology();
}
-early_initcall(s390_topology_init);
static int __init topology_init(void)
{
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 5eec9afbb5b5..a5769b83d90e 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -99,8 +99,27 @@ __kernel_clock_gettime:
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
stcke 0(%r15) /* Store TOD clock */
- lm %r0,%r1,1(%r15)
- s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
+ s %r0,1(%r15) /* no - ts_steering_end */
+ sl %r1,5(%r15)
+ brc 3,22f
+ ahi %r0,-1
+22: ltr %r0,%r0 /* past end of steering? */
+ jm 24f
+ srdl %r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 23f
+ lcr %r0,%r0 /* negative TOD offset */
+ lcr %r1,%r1
+ je 23f
+ ahi %r0,-1
+23: a %r0,1(%r15) /* add TOD timestamp */
+ al %r1,5(%r15)
+ brc 12,25f
+ ahi %r0,1
+ j 25f
+24: lm %r0,%r1,1(%r15) /* load TOD timestamp */
+25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
ahi %r0,-1
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 719de6186b20..63b86dceb0bf 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -31,8 +31,27 @@ __kernel_gettimeofday:
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
stcke 0(%r15) /* Store TOD clock */
- lm %r0,%r1,1(%r15)
- s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
+ s %r0,1(%r15)
+ sl %r1,5(%r15)
+ brc 3,14f
+ ahi %r0,-1
+14: ltr %r0,%r0 /* past end of steering? */
+ jm 16f
+ srdl %r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 15f
+ lcr %r0,%r0 /* negative TOD offset */
+ lcr %r1,%r1
+ je 15f
+ ahi %r0,-1
+15: a %r0,1(%r15) /* add TOD timestamp */
+ al %r1,5(%r15)
+ brc 12,17f
+ ahi %r0,1
+ j 17f
+16: lm %r0,%r1,1(%r15) /* load TOD timestamp */
+17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 61541fb93dc6..9c3b12626dba 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -83,8 +83,17 @@ __kernel_clock_gettime:
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
stcke 0(%r15) /* Store TOD clock */
- lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r1,1(%r15)
+ lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
+ slgr %r0,%r1 /* now - ts_steering_end */
+ ltgr %r0,%r0 /* past end of steering ? */
+ jm 17f
+ srlg %r0,%r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 18f
+ lcgr %r0,%r0 /* negative TOD offset */
+18: algr %r1,%r0 /* add steering offset */
+17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 6ce46707663c..b02e62f3bc12 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,16 @@ __kernel_gettimeofday:
jnz 0b
stcke 0(%r15) /* Store TOD clock */
lg %r1,1(%r15)
- sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
+ slgr %r0,%r1 /* now - ts_steering_end */
+ ltgr %r0,%r0 /* past end of steering ? */
+ jm 6f
+ srlg %r0,%r0,15 /* 1 per 2^16 */
+ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
+ jz 7f
+ lcgr %r0,%r0 /* negative TOD offset */
+7: algr %r1,%r0 /* add steering offset */
+6: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 000e6e91f6a0..3667d20e997f 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -62,9 +62,11 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__start_ro_after_init = .;
+ __start_data_ro_after_init = .;
.data..ro_after_init : {
*(.data..ro_after_init)
}
+ __end_data_ro_after_init = .;
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 856e30d8463f..6b246aadf311 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -96,7 +96,6 @@ static void update_mt_scaling(void)
*/
static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
- struct thread_info *ti = task_thread_info(tsk);
u64 timer, clock, user, system, steal;
u64 user_scaled, system_scaled;
@@ -119,13 +118,13 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
- user = S390_lowcore.user_timer - ti->user_timer;
+ user = S390_lowcore.user_timer - tsk->thread.user_timer;
S390_lowcore.steal_timer -= user;
- ti->user_timer = S390_lowcore.user_timer;
+ tsk->thread.user_timer = S390_lowcore.user_timer;
- system = S390_lowcore.system_timer - ti->system_timer;
+ system = S390_lowcore.system_timer - tsk->thread.system_timer;
S390_lowcore.steal_timer -= system;
- ti->system_timer = S390_lowcore.system_timer;
+ tsk->thread.system_timer = S390_lowcore.system_timer;
user_scaled = user;
system_scaled = system;
@@ -137,8 +136,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
user_scaled = (user_scaled * mult) / div;
system_scaled = (system_scaled * mult) / div;
}
- account_user_time(tsk, user, user_scaled);
- account_system_time(tsk, hardirq_offset, system, system_scaled);
+ account_user_time(tsk, user);
+ tsk->utimescaled += user_scaled;
+ account_system_time(tsk, hardirq_offset, system);
+ tsk->stimescaled += system_scaled;
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
@@ -151,15 +152,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
void vtime_task_switch(struct task_struct *prev)
{
- struct thread_info *ti;
-
do_account_vtime(prev, 0);
- ti = task_thread_info(prev);
- ti->user_timer = S390_lowcore.user_timer;
- ti->system_timer = S390_lowcore.system_timer;
- ti = task_thread_info(current);
- S390_lowcore.user_timer = ti->user_timer;
- S390_lowcore.system_timer = ti->system_timer;
+ prev->thread.user_timer = S390_lowcore.user_timer;
+ prev->thread.system_timer = S390_lowcore.system_timer;
+ S390_lowcore.user_timer = current->thread.user_timer;
+ S390_lowcore.system_timer = current->thread.system_timer;
}
/*
@@ -179,7 +176,6 @@ void vtime_account_user(struct task_struct *tsk)
*/
void vtime_account_irq_enter(struct task_struct *tsk)
{
- struct thread_info *ti = task_thread_info(tsk);
u64 timer, system, system_scaled;
timer = S390_lowcore.last_update_timer;
@@ -191,9 +187,9 @@ void vtime_account_irq_enter(struct task_struct *tsk)
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
- system = S390_lowcore.system_timer - ti->system_timer;
+ system = S390_lowcore.system_timer - tsk->thread.system_timer;
S390_lowcore.steal_timer -= system;
- ti->system_timer = S390_lowcore.system_timer;
+ tsk->thread.system_timer = S390_lowcore.system_timer;
system_scaled = system;
/* Do MT utilization scaling */
if (smp_cpu_mtid) {
@@ -202,7 +198,8 @@ void vtime_account_irq_enter(struct task_struct *tsk)
system_scaled = (system_scaled * mult) / div;
}
- account_system_time(tsk, 0, system, system_scaled);
+ account_system_time(tsk, 0, system);
+ tsk->stimescaled += system_scaled;
virt_timer_forward(system);
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index be4db07f70d3..af13f1a135b6 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -415,7 +415,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
int rc;
mci.val = mchk->mcic;
- /* take care of lazy register loading via vcpu load/put */
+ /* take care of lazy register loading */
save_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9c7a1ecfe6bd..bec71e902be3 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1812,22 +1812,7 @@ __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- /* Save host register state */
- save_fpu_regs();
- vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
- vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
-
- if (MACHINE_HAS_VX)
- current->thread.fpu.regs = vcpu->run->s.regs.vrs;
- else
- current->thread.fpu.regs = vcpu->run->s.regs.fprs;
- current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
- if (test_fp_ctl(current->thread.fpu.fpc))
- /* User space provided an invalid FPC, let's clear it */
- current->thread.fpu.fpc = 0;
- save_access_regs(vcpu->arch.host_acrs);
- restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.enabled_gmap);
atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
@@ -1844,16 +1829,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.enabled_gmap = gmap_get_enabled();
gmap_disable(vcpu->arch.enabled_gmap);
- /* Save guest register state */
- save_fpu_regs();
- vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
-
- /* Restore host register state */
- current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
- current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
-
- save_access_regs(vcpu->run->s.regs.acrs);
- restore_access_regs(vcpu->arch.host_acrs);
}
static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
@@ -2243,7 +2218,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
{
memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
- restore_access_regs(vcpu->run->s.regs.acrs);
return 0;
}
@@ -2257,11 +2231,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- /* make sure the new values will be lazily loaded */
- save_fpu_regs();
if (test_fp_ctl(fpu->fpc))
return -EINVAL;
- current->thread.fpu.fpc = fpu->fpc;
+ vcpu->run->s.regs.fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
(freg_t *) fpu->fprs);
@@ -2279,7 +2251,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
(__vector128 *) vcpu->run->s.regs.vrs);
else
memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
- fpu->fpc = current->thread.fpu.fpc;
+ fpu->fpc = vcpu->run->s.regs.fpc;
return 0;
}
@@ -2740,6 +2712,20 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (riccb->valid)
vcpu->arch.sie_block->ecb3 |= 0x01;
}
+ save_access_regs(vcpu->arch.host_acrs);
+ restore_access_regs(vcpu->run->s.regs.acrs);
+ /* save host (userspace) fprs/vrs */
+ save_fpu_regs();
+ vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
+ vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
+ if (MACHINE_HAS_VX)
+ current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+ else
+ current->thread.fpu.regs = vcpu->run->s.regs.fprs;
+ current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+ if (test_fp_ctl(current->thread.fpu.fpc))
+ /* User space provided an invalid FPC, let's clear it */
+ current->thread.fpu.fpc = 0;
kvm_run->kvm_dirty_regs = 0;
}
@@ -2758,6 +2744,15 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+ save_access_regs(vcpu->run->s.regs.acrs);
+ restore_access_regs(vcpu->arch.host_acrs);
+ /* Save guest register state */
+ save_fpu_regs();
+ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+ /* Restore will be done lazily at return */
+ current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
+ current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
+
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -2874,7 +2869,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
/*
* The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
- * copying in vcpu load/put. Lets update our copies before we save
+ * switch in the run ioctl. Let's update our copies before we save
* it into the save area
*/
save_fpu_regs();
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index bd98b7d25200..05c98bb853cf 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
if (r < 0)
goto out;
- diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_buf || diag224(diag224_buf))
goto out;
@@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
sctns->par.infpval1 |= PAR_WGHT_VLD;
out:
- kfree(diag224_buf);
+ free_page((unsigned long)diag224_buf);
vfree(diag204_buf);
}
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index be9fa65bfac4..7422a706f310 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -8,6 +8,45 @@
#include <asm/export.h>
/*
+ * void *memmove(void *dest, const void *src, size_t n)
+ */
+ENTRY(memmove)
+ ltgr %r4,%r4
+ lgr %r1,%r2
+ bzr %r14
+ clgr %r2,%r3
+ jnh .Lmemmove_forward
+ la %r5,0(%r4,%r3)
+ clgr %r2,%r5
+ jl .Lmemmove_reverse
+.Lmemmove_forward:
+ aghi %r4,-1
+ srlg %r0,%r4,8
+ ltgr %r0,%r0
+ jz .Lmemmove_rest
+.Lmemmove_loop:
+ mvc 0(256,%r1),0(%r3)
+ la %r1,256(%r1)
+ la %r3,256(%r3)
+ brctg %r0,.Lmemmove_loop
+.Lmemmove_rest:
+ larl %r5,.Lmemmove_mvc
+ ex %r4,0(%r5)
+ br %r14
+.Lmemmove_reverse:
+ aghi %r4,-1
+.Lmemmove_reverse_loop:
+ ic %r0,0(%r4,%r3)
+ stc %r0,0(%r4,%r1)
+ brctg %r4,.Lmemmove_reverse_loop
+ ic %r0,0(%r4,%r3)
+ stc %r0,0(%r4,%r1)
+ br %r14
+.Lmemmove_mvc:
+ mvc 0(1,%r1),0(%r3)
+EXPORT_SYMBOL(memmove)
+
+/*
* memset implementation
*
* This code corresponds to the C construct below. We do distinguish
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index e5f50a7d2f4e..e48a48ec24bc 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
}
-static inline int cpu_is_preempted(int cpu)
-{
- if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
- return 0;
- if (smp_vcpu_scheduled(cpu))
- return 0;
- return 1;
-}
-
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue;
}
/* First iteration: check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
continue;
}
/* Check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
{
if (!cpu)
return;
- if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
+ if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
return;
smp_yield_cpu(~cpu);
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 661d9fe63c43..d1faae5cdd12 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -733,6 +733,7 @@ block:
* return to userspace schedule() to block. */
__set_current_state(TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
+ set_preempt_need_resched();
}
}
out:
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 3ba622702ce4..ec1f0dedb948 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1015,7 +1015,7 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
if (slot) {
rmap->next = radix_tree_deref_slot_protected(slot,
&sg->guest_table_lock);
- radix_tree_replace_slot(slot, rmap);
+ radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
} else {
rmap->next = NULL;
radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 1848292766ef..45becc8a44ec 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -34,7 +34,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
- return alloc_bootmem_align(size, size);
+ return (void *) memblock_alloc(size, size);
}
static inline pud_t *vmem_pud_alloc(void)
@@ -61,17 +61,16 @@ pmd_t *vmem_pmd_alloc(void)
pte_t __ref *vmem_pte_alloc(void)
{
+ unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
pte_t *pte;
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
- pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
- PTRS_PER_PTE * sizeof(pte_t));
+ pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_INVALID,
- PTRS_PER_PTE * sizeof(pte_t));
+ clear_table((unsigned long *) pte, _PAGE_INVALID, size);
return pte;
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bee281f3163d..167b31b186c1 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -981,7 +981,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
EMIT2(0x0d00, REG_14, REG_W1);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
- if (bpf_helper_changes_skb_data((void *)func)) {
+ if (bpf_helper_changes_pkt_data((void *)func)) {
jit->seen |= SEEN_SKB_CHANGE;
/* lg %b1,ST_OFF_SKBP(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 37e0bb835516..cfd08384f0ab 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
+#include <linux/bootmem.h>
#include <linux/node.h>
#include <linux/memory.h>
#include <linux/slab.h>
@@ -307,13 +308,11 @@ fail:
/*
* Allocate and initialize core to node mapping
*/
-static void create_core_to_node_map(void)
+static void __ref create_core_to_node_map(void)
{
int i;
- emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
- if (emu_cores == NULL)
- panic("Could not allocate cores to node memory");
+ emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
@@ -354,13 +353,13 @@ static struct toptree *toptree_from_topology(void)
phys = toptree_new(TOPTREE_ID_PHYS, 1);
- for_each_online_cpu(cpu) {
- top = &per_cpu(cpu_topology, cpu);
+ for_each_cpu(cpu, &cpus_with_topology) {
+ top = &cpu_topology[cpu];
node = toptree_get_child(phys, 0);
drawer = toptree_get_child(node, top->drawer_id);
book = toptree_get_child(drawer, top->book_id);
mc = toptree_get_child(book, top->socket_id);
- core = toptree_get_child(mc, top->core_id);
+ core = toptree_get_child(mc, smp_get_base_cpu(cpu));
if (!drawer || !book || !mc || !core)
panic("NUMA emulation could not allocate memory");
cpumask_set_cpu(cpu, &core->mask);
@@ -378,7 +377,7 @@ static void topology_add_core(struct toptree *core)
int cpu;
for_each_cpu(cpu, &core->mask) {
- top = &per_cpu(cpu_topology, cpu);
+ top = &cpu_topology[cpu];
cpumask_copy(&top->thread_mask, &core->mask);
cpumask_copy(&top->core_mask, &core_mc(core)->mask);
cpumask_copy(&top->book_mask, &core_book(core)->mask);
@@ -425,6 +424,27 @@ static void print_node_to_core_map(void)
}
}
+static void pin_all_possible_cpus(void)
+{
+ int core_id, node_id, cpu;
+ static int initialized;
+
+ if (initialized)
+ return;
+ print_node_to_core_map();
+ node_id = 0;
+ for_each_possible_cpu(cpu) {
+ core_id = smp_get_base_cpu(cpu);
+ if (emu_cores->to_node_id[core_id] != NODE_ID_FREE)
+ continue;
+ pin_core_to_node(core_id, node_id);
+ cpu_topology[cpu].node_id = node_id;
+ node_id = (node_id + 1) % emu_nodes;
+ }
+ print_node_to_core_map();
+ initialized = 1;
+}
+
/*
* Transfer physical topology into a NUMA topology and modify CPU masks
* according to the NUMA topology.
@@ -442,7 +462,7 @@ static void emu_update_cpu_topology(void)
toptree_free(phys);
toptree_to_topology(numa);
toptree_free(numa);
- print_node_to_core_map();
+ pin_all_possible_cpus();
}
/*
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
index 902d350d859a..26f622b1cd11 100644
--- a/arch/s390/numa/toptree.c
+++ b/arch/s390/numa/toptree.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/bootmem.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <linux/list_sort.h>
@@ -25,10 +26,14 @@
* RETURNS:
* Pointer to the new tree node or NULL on error
*/
-struct toptree *toptree_alloc(int level, int id)
+struct toptree __ref *toptree_alloc(int level, int id)
{
- struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL);
+ struct toptree *res;
+ if (slab_is_available())
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ else
+ res = memblock_virt_alloc(sizeof(*res), 8);
if (!res)
return res;
@@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
* cleanly using toptree_remove. Possible children are freed
* recursively. In the end @cand itself is freed.
*/
-void toptree_free(struct toptree *cand)
+void __ref toptree_free(struct toptree *cand)
{
struct toptree *child, *tmp;
@@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
toptree_remove(cand);
toptree_for_each_child_safe(child, tmp, cand)
toptree_free(child);
- kfree(cand);
+ if (slab_is_available())
+ kfree(cand);
+ else
+ memblock_free_early((unsigned long)cand, sizeof(*cand));
}
/**
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 15ffc19c8c0c..64e1734bebb7 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -722,6 +722,11 @@ struct dev_pm_ops pcibios_pm_ops = {
static int zpci_alloc_domain(struct zpci_dev *zdev)
{
+ if (zpci_unique_uid) {
+ zdev->domain = (u16) zdev->uid;
+ return 0;
+ }
+
spin_lock(&zpci_domain_lock);
zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
if (zdev->domain == ZPCI_NR_DEVICES) {
@@ -735,6 +740,9 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
static void zpci_free_domain(struct zpci_dev *zdev)
{
+ if (zpci_unique_uid)
+ return;
+
spin_lock(&zpci_domain_lock);
clear_bit(zdev->domain, zpci_domain);
spin_unlock(&zpci_domain_lock);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 1a4512c8544a..e3ef63b36b5a 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -22,6 +22,8 @@
#include <asm/clp.h>
#include <uapi/asm/clp.h>
+bool zpci_unique_uid;
+
static inline void zpci_err_clp(unsigned int rsp, int rc)
{
struct {
@@ -315,6 +317,7 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
goto out;
}
+ zpci_unique_uid = rrb->response.uid_checking;
WARN_ON_ONCE(rrb->response.entry_size !=
sizeof(struct clp_fh_list_entry));
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 38993b156924..c2f786f0ea06 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -69,7 +69,7 @@ static void pci_sw_counter_show(struct seq_file *m)
int i;
for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
- seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
+ seq_printf(m, "%26s:\t%lu\n", pci_sw_names[i],
atomic64_read(counter));
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7350c8bc13a2..1d7a9c71944a 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -181,14 +181,17 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
/*
* With zdev->tlb_refresh == 0, rpcit is not required to establish new
* translations when previously invalid translation-table entries are
- * validated. With lazy unmap, it also is skipped for previously valid
+ * validated. With lazy unmap, rpcit is skipped for previously valid
* entries, but a global rpcit is then required before any address can
* be re-used, i.e. after each iommu bitmap wrap-around.
*/
- if (!zdev->tlb_refresh &&
- (!s390_iommu_strict ||
- ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
- return 0;
+ if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
+ if (!zdev->tlb_refresh)
+ return 0;
+ } else {
+ if (!s390_iommu_strict)
+ return 0;
+ }
return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
PAGE_ALIGN(size));
@@ -257,7 +260,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
if (offset == -1) {
- if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ if (!s390_iommu_strict) {
/* global flush before DMA addresses are reused */
if (zpci_refresh_global(zdev))
goto out_error;
@@ -292,7 +295,7 @@ static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
if (!zdev->iommu_bitmap)
goto out;
- if (zdev->tlb_refresh || s390_iommu_strict)
+ if (s390_iommu_strict)
bitmap_clear(zdev->iommu_bitmap, offset, size);
else
bitmap_set(zdev->lazy_bitmap, offset, size);
@@ -388,8 +391,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
return NULL;
pa = page_to_phys(page);
- memset((void *) pa, 0, size);
-
map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size));
@@ -419,15 +420,15 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
enum dma_data_direction dir)
{
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID;
struct scatterlist *s;
- unsigned long pa;
+ unsigned long pa = 0;
int ret;
- size = PAGE_ALIGN(size);
- dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
+ dma_addr_base = dma_alloc_address(dev, nr_pages);
if (dma_addr_base == DMA_ERROR_CODE)
return -ENOMEM;
@@ -436,26 +437,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
flags |= ZPCI_TABLE_PROTECTED;
for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
- pa = page_to_phys(sg_page(s)) + s->offset;
- ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags);
+ pa = page_to_phys(sg_page(s));
+ ret = __dma_update_trans(zdev, pa, dma_addr,
+ s->offset + s->length, flags);
if (ret)
goto unmap;
- dma_addr += s->length;
+ dma_addr += s->offset + s->length;
}
ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
if (ret)
goto unmap;
*handle = dma_addr_base;
- atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
+ atomic64_add(nr_pages, &zdev->mapped_pages);
return ret;
unmap:
dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
ZPCI_PTE_INVALID);
- dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
+ dma_free_address(dev, dma_addr_base, nr_pages);
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
return ret;
@@ -564,7 +566,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
rc = -ENOMEM;
goto free_dma_table;
}
- if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ if (!s390_iommu_strict) {
zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->lazy_bitmap) {
rc = -ENOMEM;
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index 6d9814c9df2b..4b5e1e499527 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -9,7 +9,5 @@ define filechk_facilities.h
$(obj)/gen_facilities
endef
-$(obj)/gen_facilities.o: $(srctree)/arch/s390/tools/gen_facilities.c
-
include/generated/facilities.h: $(obj)/gen_facilities FORCE
$(call filechk,facilities.h)
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index fe4e6c910dd7..8cc53b1e6d03 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -7,13 +7,83 @@
*
*/
-#define S390_GEN_FACILITIES_C
-
#include <strings.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
-#include <asm/facilities_src.h>
+
+struct facility_def {
+ char *name;
+ int *bits;
+};
+
+static struct facility_def facility_defs[] = {
+ {
+ /*
+ * FACILITIES_ALS contains the list of facilities that are
+ * required to run a kernel that is compiled e.g. with
+ * -march=<machine>.
+ */
+ .name = "FACILITIES_ALS",
+ .bits = (int[]){
+#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
+ 0, /* N3 instructions */
+ 1, /* z/Arch mode installed */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
+ 18, /* long displacement facility */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+ 7, /* stfle */
+ 17, /* message security assist */
+ 21, /* extended-immediate facility */
+ 25, /* store clock fast */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ 27, /* mvcos */
+ 32, /* compare and swap and store */
+ 33, /* compare and swap and store 2 */
+ 34, /* general extension facility */
+ 35, /* execute extensions */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ 45, /* fast-BCR, etc. */
+#endif
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ 49, /* misc-instruction-extensions */
+ 52, /* interlocked facility 2 */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
+ 53, /* load-and-zero-rightmost-byte, etc. */
+#endif
+ -1 /* END */
+ }
+ },
+ {
+ .name = "FACILITIES_KVM",
+ .bits = (int[]){
+ 0, /* N3 instructions */
+ 1, /* z/Arch mode installed */
+ 2, /* z/Arch mode active */
+ 3, /* DAT-enhancement */
+ 4, /* idte segment table */
+ 5, /* idte region table */
+ 6, /* ASN-and-LX reuse */
+ 7, /* stfle */
+ 8, /* enhanced-DAT 1 */
+ 9, /* sense-running-status */
+ 10, /* conditional sske */
+ 13, /* ipte-range */
+ 14, /* nonquiescing key-setting */
+ 73, /* transactional execution */
+ 75, /* access-exception-fetch/store indication */
+ 76, /* msa extension 3 */
+ 77, /* msa extension 4 */
+ 78, /* enhanced-DAT 2 */
+ -1 /* END */
+ }
+ },
+};
static void print_facility_list(struct facility_def *def)
{
diff --git a/arch/score/include/asm/mutex.h b/arch/score/include/asm/mutex.h
deleted file mode 100644
index 10d48fe4db97..000000000000
--- a/arch/score/include/asm/mutex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_SCORE_MUTEX_H
-#define _ASM_SCORE_MUTEX_H
-
-#include <asm-generic/mutex-dec.h>
-
-#endif /* _ASM_SCORE_MUTEX_H */
diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h
index 851f441991d2..d9a922d8711b 100644
--- a/arch/score/include/asm/processor.h
+++ b/arch/score/include/asm/processor.h
@@ -24,7 +24,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define release_thread(thread) do {} while (0)
/*
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h
deleted file mode 100644
index dad29b687bd3..000000000000
--- a/arch/sh/include/asm/mutex-llsc.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * arch/sh/include/asm/mutex-llsc.h
- *
- * SH-4A optimized mutex locking primitives
- *
- * Please look into asm-generic/mutex-xchg.h for a formal definition.
- */
-#ifndef __ASM_SH_MUTEX_LLSC_H
-#define __ASM_SH_MUTEX_LLSC_H
-
-/*
- * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
- * with a bastardized atomic decrement (it is not a reliable atomic decrement
- * but it satisfies the defined semantics for our purpose, while being
- * smaller and faster than a real atomic decrement or atomic swap.
- * The idea is to attempt decrementing the lock value only once. If once
- * decremented it isn't zero, or if its store-back fails due to a dispute
- * on the exclusive store, we simply bail out immediately through the slow
- * path where the lock will be reattempted until it succeeds.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __done, __res;
-
- __asm__ __volatile__ (
- "movli.l @%2, %0 \n"
- "add #-1, %0 \n"
- "movco.l %0, @%2 \n"
- "movt %1 \n"
- : "=&z" (__res), "=&r" (__done)
- : "r" (&(count)->counter)
- : "t");
-
- if (unlikely(!__done || __res != 0))
- fail_fn(count);
-}
-
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- int __done, __res;
-
- __asm__ __volatile__ (
- "movli.l @%2, %0 \n"
- "add #-1, %0 \n"
- "movco.l %0, @%2 \n"
- "movt %1 \n"
- : "=&z" (__res), "=&r" (__done)
- : "r" (&(count)->counter)
- : "t");
-
- if (unlikely(!__done || __res != 0))
- __res = -1;
-
- return __res;
-}
-
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __done, __res;
-
- __asm__ __volatile__ (
- "movli.l @%2, %0 \n\t"
- "add #1, %0 \n\t"
- "movco.l %0, @%2 \n\t"
- "movt %1 \n\t"
- : "=&z" (__res), "=&r" (__done)
- : "r" (&(count)->counter)
- : "t");
-
- if (unlikely(!__done || __res <= 0))
- fail_fn(count);
-}
-
-/*
- * If the unlock was done on a contended lock, or if the unlock simply fails
- * then the mutex remains locked.
- */
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/*
- * For __mutex_fastpath_trylock we do an atomic decrement and check the
- * result and put it in the __res variable.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __res, __orig;
-
- __asm__ __volatile__ (
- "1: movli.l @%2, %0 \n\t"
- "dt %0 \n\t"
- "movco.l %0,@%2 \n\t"
- "bf 1b \n\t"
- "cmp/eq #0,%0 \n\t"
- "bt 2f \n\t"
- "mov #0, %1 \n\t"
- "bf 3f \n\t"
- "2: mov #1, %1 \n\t"
- "3: "
- : "=&z" (__orig), "=&r" (__res)
- : "r" (&count->counter)
- : "t");
-
- return __res;
-}
-#endif /* __ASM_SH_MUTEX_LLSC_H */
diff --git a/arch/sh/include/asm/mutex.h b/arch/sh/include/asm/mutex.h
deleted file mode 100644
index d8e37716a4a0..000000000000
--- a/arch/sh/include/asm/mutex.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-#if defined(CONFIG_CPU_SH4A)
-#include <asm/mutex-llsc.h>
-#else
-#include <asm-generic/mutex-dec.h>
-#endif
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index f9a09942a32d..5addd69f70ef 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -97,7 +97,6 @@ extern struct sh_cpuinfo cpu_data[];
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
void default_idle(void);
void stop_this_cpu(void *);
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 025cdb1032f6..46e0d635e36f 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -65,6 +65,9 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
tlb->end = address + PAGE_SIZE;
}
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
+
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
@@ -115,18 +118,18 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page);
}
-static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
- struct page *page)
-{
- return __tlb_remove_page(tlb, page);
-}
-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return tlb_remove_page(tlb, page);
}
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+ unsigned int page_size)
+{
+}
+
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index accc7ca722e1..252e9fee687f 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -1,5 +1,5 @@
#
-# Makefile for the Linux/SuperH CPU-specifc backends.
+# Makefile for the Linux/SuperH CPU-specific backends.
#
obj-$(CONFIG_CPU_SH2) = sh2/
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index f0c7025a67d1..3f8e79402d7d 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,5 +1,5 @@
#
-# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
+# Makefile for the Linux/SuperH CPU-specific IRQ handlers.
#
obj-$(CONFIG_SUPERH32) += imask.o
obj-$(CONFIG_CPU_SH5) += intc-sh5.o
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
index eadb669a7329..47fee3b6e29c 100644
--- a/arch/sh/kernel/dma-nommu.c
+++ b/arch/sh/kernel/dma-nommu.c
@@ -18,7 +18,9 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
dma_addr_t addr = page_to_phys(page) + offset;
WARN_ON(size == 0);
- dma_cache_sync(dev, page_address(page) + offset, size, dir);
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_cache_sync(dev, page_address(page) + offset, size, dir);
return addr;
}
@@ -35,7 +37,8 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
for_each_sg(sg, s, nents, i) {
BUG_ON(!sg_page(s));
- dma_cache_sync(dev, sg_virt(s), s->length, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_cache_sync(dev, sg_virt(s), s->length, dir);
s->dma_address = sg_phys(s);
s->dma_length = s->length;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index b23c76b42d6e..cf4034c66362 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC
select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
select HAVE_ARCH_HARDENED_USERCOPY
+ select PROVE_LOCKING_SMALL if PROVE_LOCKING
config SPARC32
def_bool !64BIT
@@ -89,6 +90,14 @@ config ARCH_DEFCONFIG
config ARCH_PROC_KCORE_TEXT
def_bool y
+config ARCH_ATU
+ bool
+ default y if SPARC64
+
+config ARCH_DMA_ADDR_T_64BIT
+ bool
+ default y if ARCH_ATU
+
config IOMMU_HELPER
bool
default y if SPARC64
@@ -146,6 +155,9 @@ config PGTABLE_LEVELS
default 4 if 64BIT
default 3
+config ARCH_SUPPORTS_UPROBES
+ def_bool y if SPARC64
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -304,6 +316,20 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y if SPARC64
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ default "13"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 13 means that the largest free memory block is 2^12 pages.
+
source "mm/Kconfig"
if SPARC64
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 3583d676a916..b2e650d1764f 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -213,6 +213,7 @@ CONFIG_SCHEDSTATS=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_UPROBE_EVENTS=y
CONFIG_KEYS=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index cfc918067f80..0569bfac4afb 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -15,7 +15,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += module.h
-generic-y += mutex.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += serial.h
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a6cfdabb6054..5b0ed48e5b0c 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -24,9 +24,10 @@ typedef struct {
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
- unsigned short sock_id;
+ unsigned short sock_id; /* physical package */
unsigned short core_id;
- int proc_id;
+ unsigned short max_cache_id; /* groupings of highest shared cache */
+ unsigned short proc_id; /* strand (aka HW thread) id */
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 666d5ba230d2..73cb8978df58 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2335,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
*/
#define HV_FAST_PCI_MSG_SETVALID 0xd3
+/* PCI IOMMU v2 definitions and services
+ *
+ * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO
+ * definitions and services.
+ *
+ * CTE Clump Table Entry. First level table entry in the ATU.
+ *
+ * pci_device_list
+ * A 32-bit aligned list of pci_devices.
+ *
+ * pci_device_listp
+ * real address of a pci_device_list. 32-bit aligned.
+ *
+ * iotte IOMMU translation table entry.
+ *
+ * iotte_attributes
+ * IO Attributes for IOMMU v2 mappings. In addition to
+ * read, write IOMMU v2 supports relax ordering
+ *
+ * io_page_list A 64-bit aligned list of real addresses. Each real
+ * address in an io_page_list must be properly aligned
+ * to the pagesize of the given IOTSB.
+ *
+ * io_page_list_p Real address of an io_page_list, 64-bit aligned.
+ *
+ * IOTSB IO Translation Storage Buffer. An aligned table of
+ * IOTTEs. Each IOTSB has a pagesize, table size, and
+ * virtual address associated with it that must match
+ * a pagesize and table size supported by the un-derlying
+ * hardware implementation. The alignment requirements
+ * for an IOTSB depend on the pagesize used for that IOTSB.
+ * Each IOTTE in an IOTSB maps one pagesize-sized page.
+ * The size of the IOTSB dictates how large of a virtual
+ * address space the IOTSB is capable of mapping.
+ *
+ * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus
+ * iotsb_handle represents a binding of an IOTSB to a
+ * PCI root complex.
+ *
+ * iotsb_index Zero-based IOTTE number within an IOTSB.
+ */
+
+/* The index_count argument consists of two fields:
+ * bits 63:48 #iottes and bits 47:0 iotsb_index
+ */
+#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
+ (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
+
+/* pci_iotsb_conf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_CONF
+ * ARG0: devhandle
+ * ARG1: r_addr
+ * ARG2: size
+ * ARG3: pagesize
+ * ARG4: iova
+ * RET0: status
+ * RET1: iotsb_handle
+ * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize
+ * EBADALIGN r_addr is not properly aligned
+ * ENORADDR r_addr is not a valid real address
+ * ETOOMANY No further IOTSBs may be configured
+ * EBUSY Duplicate devhandle, raddir, iova combination
+ *
+ * Create an IOTSB suitable for the PCI root complex identified by devhandle,
+ * for the DMA virtual address defined by the argument iova.
+ *
+ * r_addr is the properly aligned base address of the IOTSB and size is the
+ * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to
+ * being configured. If it contains any values other than zeros then the
+ * behavior is undefined.
+ *
+ * pagesize is the size of each page in the IOTSB. Note that the combination of
+ * size (table size) and pagesize must be valid.
+ *
+ * virt is the DMA virtual address this IOTSB will map.
+ *
+ * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1.
+ * Once configured, privileged access to the IOTSB memory is prohibited and
+ * creates undefined behavior. The only permitted access is indirect via these
+ * services.
+ */
+#define HV_FAST_PCI_IOTSB_CONF 0x190
+
+/* pci_iotsb_info()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_INFO
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * RET0: status
+ * RET1: r_addr
+ * RET2: size
+ * RET3: pagesize
+ * RET4: iova
+ * RET5: #bound
+ * ERRORS: EINVAL Invalid devhandle or iotsb_handle
+ *
+ * This service returns configuration information about an IOTSB previously
+ * created with pci_iotsb_conf.
+ *
+ * iotsb_handle value 0 may be used with this service to inquire about the
+ * legacy IOTSB that may or may not exist. If the service succeeds, the return
+ * values describe the legacy IOTSB and I/O virtual addresses mapped by that
+ * table. However, the table base address r_addr may contain the value -1 which
+ * indicates a memory range that cannot be accessed or be reclaimed.
+ *
+ * The return value #bound contains the number of PCI devices that iotsb_handle
+ * is currently bound to.
+ */
+#define HV_FAST_PCI_IOTSB_INFO 0x191
+
+/* pci_iotsb_unconf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle or iotsb_handle
+ * EBUSY The IOTSB is bound and may not be unconfigured
+ *
+ * This service unconfigures the IOTSB identified by the devhandle and
+ * iotsb_handle arguments, previously created with pci_iotsb_conf.
+ * The IOTSB must not be currently bound to any device or the service will fail
+ *
+ * If the call succeeds, iotsb_handle is no longer valid.
+ */
+#define HV_FAST_PCI_IOTSB_UNCONF 0x192
+
+/* pci_iotsb_bind()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_BIND
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: pci_device
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
+ * EBUSY A PCI function is already bound to an IOTSB at the same
+ * address range as specified by devhandle, iotsb_handle.
+ *
+ * This service binds the PCI function specified by the argument pci_device to
+ * the IOTSB specified by the arguments devhandle and iotsb_handle.
+ *
+ * The PCI device function is bound to the specified IOTSB with the IOVA range
+ * specified when the IOTSB was configured via pci_iotsb_conf. If the function
+ * is already bound then it is unbound first.
+ */
+#define HV_FAST_PCI_IOTSB_BIND 0x193
+
+/* pci_iotsb_unbind()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: pci_device
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
+ * ENOMAP The PCI function was not bound to the specified IOTSB
+ *
+ * This service unbinds the PCI device specified by the argument pci_device
+ * from the IOTSB identified * by the arguments devhandle and iotsb_handle.
+ *
+ * If the PCI device is not bound to the specified IOTSB then this service will
+ * fail with status ENOMAP
+ */
+#define HV_FAST_PCI_IOTSB_UNBIND 0x194
+
+/* pci_iotsb_get_binding()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iova
+ * RET0: status
+ * RET1: iotsb_handle
+ * ERRORS: EINVAL Invalid devhandle, pci_device, or iova
+ * ENOMAP The PCI function is not bound to an IOTSB at iova
+ *
+ * This service returns the IOTSB binding, iotsb_handle, for a given pci_device
+ * and DMA virtual address, iova.
+ *
+ * iova must be the base address of a DMA virtual address range as defined by
+ * the iommu-address-ranges property in the root complex device node defined
+ * by the argument devhandle.
+ */
+#define HV_FAST_PCI_IOTSB_GET_BINDING 0x195
+
+/* pci_iotsb_map()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_MAP
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: index_count
+ * ARG3: iotte_attributes
+ * ARG4: io_page_list_p
+ * RET0: status
+ * RET1: #mapped
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes,
+ * iotsb_index or iotte_attributes
+ * EBADALIGN Improperly aligned io_page_list_p or I/O page
+ * address in the I/O page list.
+ * ENORADDR Invalid io_page_list_p or I/O page address in
+ * the I/O page list.
+ *
+ * This service creates and flushes mappings in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The index_count argument consists of two fields. Bits 63:48 contain #iotte
+ * and bits 47:0 contain iotsb_index
+ *
+ * The first mapping is created in the IOTSB index specified by iotsb_index.
+ * Subsequent mappings are created at iotsb_index+1 and so on.
+ *
+ * The attributes of each mapping are defined by the argument iotte_attributes.
+ *
+ * The io_page_list_p specifies the real address of the 64-bit-aligned list of
+ * #iottes I/O page addresses. Each page address must be a properly aligned
+ * real address of a page to be mapped in the IOTSB. The first entry in the I/O
+ * page list contains the real address of the first page, the 2nd entry for the
+ * 2nd page, and so on.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The return value #mapped is the actual number of mappings created, which may
+ * be less than or equal to the argument #iottes. If the function returns
+ * successfully with a #mapped value less than the requested #iottes then the
+ * caller should continue to invoke the service with updated iotsb_index,
+ * #iottes, and io_page_list_p arguments until all pages are mapped.
+ *
+ * This service must not be used to demap a mapping. In other words, all
+ * mappings must be valid and have one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP 0x196
+
+/* pci_iotsb_map_one()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * ARG3: iotte_attributes
+ * ARG4: r_addr
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index
+ * or iotte_attributes
+ * EBADALIGN Improperly aligned r_addr
+ * ENORADDR Invalid r_addr
+ *
+ * This service creates and flushes a single mapping in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The mapping for the page at r_addr is created at the IOTSB index specified by
+ * iotsb_index with the attributes iotte_attributes.
+ *
+ * This service must not be used to demap a mapping. In other words, the mapping
+ * must be valid and have one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP_ONE 0x197
+
+/* pci_iotsb_demap()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * ARG3: #iottes
+ * RET0: status
+ * RET1: #unmapped
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes
+ *
+ * This service unmaps and flushes up to #iottes mappings starting at index
+ * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs unmapped is returned in #unmapped and may be less
+ * than or equal to the requested number of IOTTEs, #iottes.
+ *
+ * If #unmapped is less than #iottes, the caller should continue to invoke this
+ * service with updated iotsb_index and #iottes arguments until all pages are
+ * demapped.
+ */
+#define HV_FAST_PCI_IOTSB_DEMAP 0x198
+
+/* pci_iotsb_getmap()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * RET0: status
+ * RET1: r_addr
+ * RET2: iotte_attributes
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index
+ * ENOMAP No mapping was found
+ *
+ * This service returns the mapping specified by index iotsb_index from the
+ * IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * Upon success, the real address of the mapping shall be returned in
+ * r_addr and thethe IOTTE mapping attributes shall be returned in
+ * iotte_attributes.
+ *
+ * The return value iotte_attributes may not include optional features used in
+ * the call to create the mapping.
+ */
+#define HV_FAST_PCI_IOTSB_GETMAP 0x199
+
+/* pci_iotsb_sync_mappings()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * ARG3: #iottes
+ * RET0: status
+ * RET1: #synced
+ * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes
+ *
+ * This service synchronizes #iottes mappings starting at index iotsb_index in
+ * the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs synchronized is returned in #synced, which may
+ * be less than or equal to the requested number, #iottes.
+ *
+ * Upon a successful return, #synced is less than #iottes, the caller should
+ * continue to invoke this service with updated iotsb_index and #iottes
+ * arguments until all pages are synchronized.
+ */
+#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a
+
/* Logical Domain Channel services. */
#define LDC_CHANNEL_DOWN 0
@@ -2993,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
#define HV_GRP_SDIO 0x0108
#define HV_GRP_SDIO_ERR 0x0109
#define HV_GRP_REBOOT_DATA 0x0110
+#define HV_GRP_ATU 0x0111
#define HV_GRP_M7_PERF 0x0114
#define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h
index cd0d69fa7592..f24f356f2503 100644
--- a/arch/sparc/include/asm/iommu_64.h
+++ b/arch/sparc/include/asm/iommu_64.h
@@ -24,8 +24,36 @@ struct iommu_arena {
unsigned int limit;
};
+#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */
+
+/* Data structures for SPARC ATU architecture */
+struct atu_iotsb {
+ void *table; /* IOTSB table base virtual addr*/
+ u64 ra; /* IOTSB table real addr */
+ u64 dvma_size; /* ranges[3].size or OS slected 32G size */
+ u64 dvma_base; /* ranges[3].base */
+ u64 table_size; /* IOTSB table size */
+ u64 page_size; /* IO PAGE size for IOTSB */
+ u32 iotsb_num; /* tsbnum is same as iotsb_handle */
+};
+
+struct atu_ranges {
+ u64 base;
+ u64 size;
+};
+
+struct atu {
+ struct atu_ranges *ranges;
+ struct atu_iotsb *iotsb;
+ struct iommu_map_table tbl;
+ u64 base;
+ u64 size;
+ u64 dma_addr_mask;
+};
+
struct iommu {
struct iommu_map_table tbl;
+ struct atu *atu;
spinlock_t lock;
u32 dma_addr_mask;
iopte_t *page_table;
diff --git a/arch/sparc/include/asm/kdebug_64.h b/arch/sparc/include/asm/kdebug_64.h
index 04465de8f3b5..867286bf7b1a 100644
--- a/arch/sparc/include/asm/kdebug_64.h
+++ b/arch/sparc/include/asm/kdebug_64.h
@@ -10,6 +10,8 @@ enum die_val {
DIE_OOPS = 1,
DIE_DEBUG, /* ta 0x70 */
DIE_DEBUG_2, /* ta 0x71 */
+ DIE_BPT, /* ta 0x73 */
+ DIE_SSTEP, /* ta 0x74 */
DIE_DIE,
DIE_TRAP,
DIE_TRAP_TL1,
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1fb317fbc0b3..314b66851348 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -826,7 +826,7 @@ static inline unsigned long __pmd_page(pmd_t pmd)
#define pgd_page_vaddr(pgd) \
((unsigned long) __va(pgd_val(pgd)))
#define pgd_present(pgd) (pgd_val(pgd) != 0U)
-#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL)
+#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
static inline unsigned long pud_large(pud_t pud)
{
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 812fd08f3e62..365d4cb267b4 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -119,7 +119,6 @@ extern struct task_struct *last_task_used_math;
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
extern void (*sparc_idle)(void);
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index ce2595c89471..6448cfc8292f 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -216,7 +216,6 @@ unsigned long get_wchan(struct task_struct *task);
"nop\n\t" \
".previous" \
::: "memory")
-#define cpu_relax_lowlatency() cpu_relax()
/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index bac6a946ee00..ca57f08bd3db 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -61,7 +61,10 @@ extern union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
#define force_successful_syscall_return() set_thread_noerror(1)
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
#define instruction_pointer(regs) ((regs)->tpc)
-#define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
+#define instruction_pointer_set(regs, val) do { \
+ (regs)->tpc = (val); \
+ (regs)->tnpc = (val)+4; \
+ } while (0)
#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
static inline int is_syscall_success(struct pt_regs *regs)
{
@@ -77,6 +80,36 @@ unsigned long profile_pc(struct pt_regs *);
#else
#define profile_pc(regs) instruction_pointer(regs)
#endif
+
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, magic))
+
+extern int regs_query_register_offset(const char *name);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register whose
+ * offset from @regs. The @offset is the offset of the register
+ * in struct pt_regs. If @offset is bigger than MAX_REG_OFFSET,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned long offset)
+{
+ if (unlikely(offset >= MAX_REG_OFFSET))
+ return 0;
+ if (offset == PT_V9_Y)
+ return *(unsigned int *)((unsigned long)regs + offset);
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->u_regs[UREG_I6];
+}
#else /* __ASSEMBLY__ */
#endif /* __ASSEMBLY__ */
#else /* (defined(__sparc__) && defined(__arch64__)) */
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index d9c5876c6121..8011e79f59c9 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -134,7 +134,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
*(volatile __u32 *)&lp->lock = ~0U;
}
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" st %%g0, [%0]"
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 87990b7c6b0d..07c9f2e9bf57 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -96,7 +96,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(arch_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -119,7 +119,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
: "memory");
}
-static int inline arch_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -140,7 +140,7 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
return tmp1;
}
-static void inline arch_read_unlock(arch_rwlock_t *lock)
+static inline void arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -156,7 +156,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_lock(arch_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -181,7 +181,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -190,7 +190,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
: "memory");
}
-static int inline arch_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 3d7b925f6516..38a24f257b85 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -180,7 +180,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
/* flag bit 4 is available */
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
-/* flag bit 6 is available */
+#define TIF_UPROBE 6 /* breakpointed or singlestepped */
#define TIF_32BIT 7 /* 32-bit binary */
#define TIF_NOHZ 8 /* in adaptive nohz mode */
#define TIF_SECCOMP 9 /* secure computing */
@@ -199,6 +199,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
+#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_NOHZ (1<<TIF_NOHZ)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
@@ -209,7 +210,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
_TIF_DO_NOTIFY_RESUME_MASK | \
_TIF_NEED_RESCHED)
-#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
+#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
+ _TIF_SIGPENDING | _TIF_UPROBE)
#define is_32bit_task() (test_thread_flag(TIF_32BIT))
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index bec481aaca16..225543000122 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -4,6 +4,7 @@
#ifdef CONFIG_NUMA
#include <asm/mmzone.h>
+#include <asm/cpudata.h>
static inline int cpu_to_node(int cpu)
{
@@ -44,14 +45,20 @@ int __node_distance(int, int);
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
+#define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu])
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#endif /* CONFIG_SMP */
extern cpumask_t cpu_core_map[NR_CPUS];
extern cpumask_t cpu_core_sib_map[NR_CPUS];
+extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];
+
+/**
+ * Return cores that shares the last level cache.
+ */
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &cpu_core_map[cpu];
+ return &cpu_core_sib_cache_map[cpu];
}
#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
index 781b9f1dbdc2..82e7df296abc 100644
--- a/arch/sparc/include/asm/ttable.h
+++ b/arch/sparc/include/asm/ttable.h
@@ -186,6 +186,12 @@
#define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
#endif
+#ifdef CONFIG_UPROBES
+#define UPROBES_TRAP(lvl) TRAP_ARG(uprobe_trap, lvl)
+#else
+#define UPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
+#endif
+
#ifdef CONFIG_KGDB
#define KGDB_TRAP(lvl) TRAP_IRQ(kgdb_trap, lvl)
#else
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index b68acc563235..5373136c412b 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -82,7 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
return 1;
}
-void __ret_efault(void);
void __retl_efault(void);
/* Uh, these should become the main single-value transfer routines..
@@ -189,55 +188,34 @@ int __get_user_bad(void);
unsigned long __must_check ___copy_from_user(void *to,
const void __user *from,
unsigned long size);
-unsigned long copy_from_user_fixup(void *to, const void __user *from,
- unsigned long size);
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret;
-
check_object_size(to, size, false);
- ret = ___copy_from_user(to, from, size);
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
-
- return ret;
+ return ___copy_from_user(to, from, size);
}
#define __copy_from_user copy_from_user
unsigned long __must_check ___copy_to_user(void __user *to,
const void *from,
unsigned long size);
-unsigned long copy_to_user_fixup(void __user *to, const void *from,
- unsigned long size);
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
- unsigned long ret;
-
check_object_size(from, size, true);
- ret = ___copy_to_user(to, from, size);
- if (unlikely(ret))
- ret = copy_to_user_fixup(to, from, size);
- return ret;
+ return ___copy_to_user(to, from, size);
}
#define __copy_to_user copy_to_user
unsigned long __must_check ___copy_in_user(void __user *to,
const void __user *from,
unsigned long size);
-unsigned long copy_in_user_fixup(void __user *to, void __user *from,
- unsigned long size);
static inline unsigned long __must_check
copy_in_user(void __user *to, void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_in_user(to, from, size);
-
- if (unlikely(ret))
- ret = copy_in_user_fixup(to, from, size);
- return ret;
+ return ___copy_in_user(to, from, size);
}
#define __copy_in_user copy_in_user
diff --git a/arch/sparc/include/asm/uprobes.h b/arch/sparc/include/asm/uprobes.h
new file mode 100644
index 000000000000..f87aae5a908e
--- /dev/null
+++ b/arch/sparc/include/asm/uprobes.h
@@ -0,0 +1,59 @@
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+/*
+ * User-space Probes (UProbes) for sparc
+ *
+ * Copyright (C) 2013 Oracle, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Authors:
+ * Jose E. Marchesi <jose.marchesi@oracle.com>
+ * Eric Saint Etienne <eric.saint.etienne@oracle.com>
+ */
+
+typedef u32 uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 4
+#define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES * 2)
+
+#define UPROBE_SWBP_INSN_SIZE 4
+#define UPROBE_SWBP_INSN 0x91d02073 /* ta 0x73 */
+#define UPROBE_STP_INSN 0x91d02074 /* ta 0x74 */
+
+#define ANNUL_BIT (1 << 29)
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u32 ixol;
+ };
+};
+
+struct arch_uprobe_task {
+ u32 saved_tpc;
+ u32 saved_tnpc;
+};
+
+struct task_struct;
+struct notifier_block;
+
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
+extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
+extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+
+#endif /* _ASM_UPROBES_H */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 31aede3af088..a25dc32f5d6a 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -86,6 +86,8 @@
#define SO_CNX_ADVICE 0x0037
+#define SCM_TIMESTAMPING_OPT_STATS 0x0038
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index fa3c02d41138..aac609889ee4 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -116,4 +116,5 @@ obj-$(CONFIG_COMPAT) += $(audit--y)
pc--$(CONFIG_PERF_EVENTS) := perf_event.o
obj-$(CONFIG_SPARC64) += $(pc--y)
+obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_SPARC64) += jump_label.o
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index beba6c11554c..6aa3da152c20 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -926,48 +926,11 @@ tlb_type: .word 0 /* Must NOT end up in BSS */
EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr
- .globl __ret_efault, __retl_efault, __ret_one, __retl_one
-ENTRY(__ret_efault)
- ret
- restore %g0, -EFAULT, %o0
-ENDPROC(__ret_efault)
-EXPORT_SYMBOL(__ret_efault)
-
ENTRY(__retl_efault)
retl
mov -EFAULT, %o0
ENDPROC(__retl_efault)
-ENTRY(__retl_one)
- retl
- mov 1, %o0
-ENDPROC(__retl_one)
-
-ENTRY(__retl_one_fp)
- VISExitHalf
- retl
- mov 1, %o0
-ENDPROC(__retl_one_fp)
-
-ENTRY(__ret_one_asi)
- wr %g0, ASI_AIUS, %asi
- ret
- restore %g0, 1, %o0
-ENDPROC(__ret_one_asi)
-
-ENTRY(__retl_one_asi)
- wr %g0, ASI_AIUS, %asi
- retl
- mov 1, %o0
-ENDPROC(__retl_one_asi)
-
-ENTRY(__retl_one_asi_fp)
- wr %g0, ASI_AIUS, %asi
- VISExitHalf
- retl
- mov 1, %o0
-ENDPROC(__retl_one_asi_fp)
-
ENTRY(__retl_o1)
retl
mov %o1, %o0
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 662500fa555f..267731234ce8 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -39,6 +39,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_SDIO, },
{ .group = HV_GRP_SDIO_ERR, },
{ .group = HV_GRP_REBOOT_DATA, },
+ { .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, },
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 5c615abff030..9df997995f6b 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -415,7 +415,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */
- if (strbuf->strbuf_enabled)
+ if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
strbuf_flush(strbuf, iommu, bus_addr, ctx,
npages, direction);
@@ -640,7 +640,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
base = iommu->page_table + entry;
dma_handle &= IO_PAGE_MASK;
- if (strbuf->strbuf_enabled)
+ if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
strbuf_flush(strbuf, iommu, dma_handle, ctx,
npages, direction);
@@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask)
struct iommu *iommu = dev->archdata.iommu;
u64 dma_addr_mask = iommu->dma_addr_mask;
- if (device_mask >= (1UL << 32UL))
- return 0;
+ if (device_mask > DMA_BIT_MASK(32)) {
+ if (iommu->atu)
+ dma_addr_mask = iommu->atu->dma_addr_mask;
+ else
+ return 0;
+ }
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index b40cec252905..828493329f68 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -13,7 +13,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/iommu-helper.h>
-#include <linux/scatterlist.h>
#include <asm/iommu.h>
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 2344103414d1..6ffaec44931a 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -527,7 +527,7 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- if (dir != PCI_DMA_TODEVICE)
+ if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_make_coherent(ba, PAGE_ALIGN(size));
}
@@ -572,7 +572,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
struct scatterlist *sg;
int n;
- if (dir != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
for_each_sg(sgl, sg, nents, n) {
dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
}
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 59bbeff55024..07933b9e9ce0 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -13,19 +13,30 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- u32 val;
u32 *insn = (u32 *) (unsigned long) entry->code;
+ u32 val;
if (type == JUMP_LABEL_JMP) {
s32 off = (s32)entry->target - (s32)entry->code;
+ bool use_v9_branch = false;
+
+ BUG_ON(off & 3);
#ifdef CONFIG_SPARC64
- /* ba,pt %xcc, . + (off << 2) */
- val = 0x10680000 | ((u32) off >> 2);
-#else
- /* ba . + (off << 2) */
- val = 0x10800000 | ((u32) off >> 2);
+ if (off <= 0xfffff && off >= -0x100000)
+ use_v9_branch = true;
#endif
+ if (use_v9_branch) {
+ /* WDISP19 - target is . + immed << 2 */
+ /* ba,pt %xcc, . + off */
+ val = 0x10680000 | (((u32) off >> 2) & 0x7ffff);
+ } else {
+ /* WDISP22 - target is . + immed << 2 */
+ BUG_ON(off > 0x7fffff);
+ BUG_ON(off < -0x800000);
+ /* ba . + off */
+ val = 0x10800000 | (((u32) off >> 2) & 0x3fffff);
+ }
} else {
val = 0x01000000;
}
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 33cd171d933e..afcdd5e4f43f 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -349,37 +349,37 @@ void __init leon_init_timers(void)
/* Find GPTIMER Timer Registers base address otherwise bail out. */
nnp = rootnp;
- do {
- np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
- if (!np) {
- np = of_find_node_by_name(nnp, "01_011");
- if (!np)
- goto bad;
- }
- ampopts = 0;
- pp = of_find_property(np, "ampopts", &len);
- if (pp) {
- ampopts = *(int *)pp->value;
- if (ampopts == 0) {
- /* Skip this instance, resource already
- * allocated by other OS */
- nnp = np;
- continue;
- }
+retry:
+ np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
+ if (!np) {
+ np = of_find_node_by_name(nnp, "01_011");
+ if (!np)
+ goto bad;
+ }
+
+ ampopts = 0;
+ pp = of_find_property(np, "ampopts", &len);
+ if (pp) {
+ ampopts = *(int *)pp->value;
+ if (ampopts == 0) {
+ /* Skip this instance, resource already
+ * allocated by other OS */
+ nnp = np;
+ goto retry;
}
+ }
+
+ /* Select Timer-Instance on Timer Core. Default is zero */
+ leon3_gptimer_idx = ampopts & 0x7;
- /* Select Timer-Instance on Timer Core. Default is zero */
- leon3_gptimer_idx = ampopts & 0x7;
-
- pp = of_find_property(np, "reg", &len);
- if (pp)
- leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
- pp->value;
- pp = of_find_property(np, "interrupts", &len);
- if (pp)
- leon3_gptimer_irq = *(unsigned int *)pp->value;
- } while (0);
+ pp = of_find_property(np, "reg", &len);
+ if (pp)
+ leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
+ pp->value;
+ pp = of_find_property(np, "interrupts", &len);
+ if (pp)
+ leon3_gptimer_irq = *(unsigned int *)pp->value;
if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq))
goto bad;
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 11228861d9b4..8a6982dfd733 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -645,13 +645,20 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node,
cpu_data(*id).core_id = core_id;
}
-static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
- int sock_id)
+static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
+ int max_cache_id)
{
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
- if (*id < num_possible_cpus())
- cpu_data(*id).sock_id = sock_id;
+ if (*id < num_possible_cpus()) {
+ cpu_data(*id).max_cache_id = max_cache_id;
+
+ /**
+ * On systems without explicit socket descriptions socket
+ * is max_cache_id
+ */
+ cpu_data(*id).sock_id = max_cache_id;
+ }
}
static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
@@ -660,10 +667,11 @@ static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
}
-static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
- int sock_id)
+static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp,
+ int max_cache_id)
{
- find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
+ find_back_node_value(hp, mp, "cpu", __mark_max_cache_id,
+ max_cache_id, 10);
}
static void set_core_ids(struct mdesc_handle *hp)
@@ -694,14 +702,15 @@ static void set_core_ids(struct mdesc_handle *hp)
}
}
-static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
+static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level)
{
u64 mp;
int idx = 1;
int fnd = 0;
- /* Identify unique sockets by looking for cpus backpointed to by
- * shared level n caches.
+ /**
+ * Identify unique highest level of shared cache by looking for cpus
+ * backpointed to by shared level N caches.
*/
mdesc_for_each_node_by_name(hp, mp, "cache") {
const u64 *cur_lvl;
@@ -709,8 +718,7 @@ static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
if (*cur_lvl != level)
continue;
-
- mark_sock_ids(hp, mp, idx);
+ mark_max_cache_ids(hp, mp, idx);
idx++;
fnd = 1;
}
@@ -745,15 +753,17 @@ static void set_sock_ids(struct mdesc_handle *hp)
{
u64 mp;
- /* If machine description exposes sockets data use it.
- * Otherwise fallback to use shared L3 or L2 caches.
+ /**
+ * Find the highest level of shared cache which pre-T7 is also
+ * the socket.
*/
+ if (!set_max_cache_ids_by_cache(hp, 3))
+ set_max_cache_ids_by_cache(hp, 2);
+
+ /* If machine description exposes sockets data use it.*/
mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
if (mp != MDESC_NODE_NULL)
- return set_sock_ids_by_socket(hp, mp);
-
- if (!set_sock_ids_by_cache(hp, 3))
- set_sock_ids_by_cache(hp, 2);
+ set_sock_ids_by_socket(hp, mp);
}
static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index a9973bb4a1b2..95e73c63c99d 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -42,7 +42,7 @@ static int panic_on_timeout;
*/
atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
EXPORT_SYMBOL(nmi_active);
-
+static int nmi_init_done;
static unsigned int nmi_hz = HZ;
static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata;
@@ -153,6 +153,8 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
void stop_nmi_watchdog(void *unused)
{
+ if (!__this_cpu_read(wd_enabled))
+ return;
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
__this_cpu_write(wd_enabled, 0);
atomic_dec(&nmi_active);
@@ -207,6 +209,9 @@ error:
void start_nmi_watchdog(void *unused)
{
+ if (__this_cpu_read(wd_enabled))
+ return;
+
__this_cpu_write(wd_enabled, 1);
atomic_inc(&nmi_active);
@@ -259,6 +264,8 @@ int __init nmi_init(void)
}
}
+ nmi_init_done = 1;
+
return err;
}
@@ -270,3 +277,38 @@ static int __init setup_nmi_watchdog(char *str)
return 0;
}
__setup("nmi_watchdog=", setup_nmi_watchdog);
+
+/*
+ * sparc specific NMI watchdog enable function.
+ * Enables watchdog if it is not enabled already.
+ */
+int watchdog_nmi_enable(unsigned int cpu)
+{
+ if (atomic_read(&nmi_active) == -1) {
+ pr_warn("NMI watchdog cannot be enabled or disabled\n");
+ return -1;
+ }
+
+ /*
+ * watchdog thread could start even before nmi_init is called.
+ * Just Return in that case. Let nmi_init finish the init
+ * process first.
+ */
+ if (!nmi_init_done)
+ return 0;
+
+ smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1);
+
+ return 0;
+}
+/*
+ * sparc specific NMI watchdog disable function.
+ * Disables watchdog if it is not disabled already.
+ */
+void watchdog_nmi_disable(unsigned int cpu)
+{
+ if (atomic_read(&nmi_active) == -1)
+ pr_warn_once("NMI watchdog cannot be enabled or disabled\n");
+ else
+ smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1);
+}
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index db57d8acdc01..f4daccd12bf5 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = {
{ .major = 1, .minor = 1 },
};
+static unsigned long vatu_major = 1;
+static unsigned long vatu_minor = 1;
+
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
struct iommu_batch {
@@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
}
/* Interrupts must be disabled. */
-static long iommu_batch_flush(struct iommu_batch *p)
+static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{
struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
+ u64 *pglist = p->pglist;
+ u64 index_count;
unsigned long devhandle = pbm->devhandle;
unsigned long prot = p->prot;
unsigned long entry = p->entry;
- u64 *pglist = p->pglist;
unsigned long npages = p->npages;
+ unsigned long iotsb_num;
+ unsigned long ret;
+ long num;
/* VPCI maj=1, min=[0,1] only supports read and write */
if (vpci_major < 2)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) {
- long num;
-
- num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
- npages, prot, __pa(pglist));
- if (unlikely(num < 0)) {
- if (printk_ratelimit())
- printk("iommu_batch_flush: IOMMU map of "
- "[%08lx:%08llx:%lx:%lx:%lx] failed with "
- "status %ld\n",
- devhandle, HV_PCI_TSBID(0, entry),
- npages, prot, __pa(pglist), num);
- return -1;
+ if (mask <= DMA_BIT_MASK(32)) {
+ num = pci_sun4v_iommu_map(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages,
+ prot,
+ __pa(pglist));
+ if (unlikely(num < 0)) {
+ pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
+ __func__,
+ devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages, prot, __pa(pglist),
+ num);
+ return -1;
+ }
+ } else {
+ index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
+ iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
+ ret = pci_sun4v_iotsb_map(devhandle,
+ iotsb_num,
+ index_count,
+ prot,
+ __pa(pglist),
+ &num);
+ if (unlikely(ret != HV_EOK)) {
+ pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
+ __func__,
+ devhandle, iotsb_num,
+ index_count, prot,
+ __pa(pglist), ret);
+ return -1;
+ }
}
-
entry += num;
npages -= num;
pglist += num;
@@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p)
return 0;
}
-static inline void iommu_batch_new_entry(unsigned long entry)
+static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
if (p->entry + p->npages == entry)
return;
if (p->entry != ~0UL)
- iommu_batch_flush(p);
+ iommu_batch_flush(p, mask);
p->entry = entry;
}
/* Interrupts must be disabled. */
-static inline long iommu_batch_add(u64 phys_page)
+static inline long iommu_batch_add(u64 phys_page, u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
@@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page)
p->pglist[p->npages++] = phys_page;
if (p->npages == PGLIST_NENTS)
- return iommu_batch_flush(p);
+ return iommu_batch_flush(p, mask);
return 0;
}
/* Interrupts must be disabled. */
-static inline long iommu_batch_end(void)
+static inline long iommu_batch_end(u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
- return iommu_batch_flush(p);
+ return iommu_batch_flush(p, mask);
}
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
unsigned long attrs)
{
+ u64 mask;
unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
struct page *page;
void *ret;
long entry;
@@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
+ atu = iommu->atu;
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ mask = dev->coherent_dma_mask;
+ if (mask <= DMA_BIT_MASK(32))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
+
+ entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto range_alloc_fail;
- *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+ *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
@@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
entry);
for (n = 0; n < npages; n++) {
- long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
+ long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
@@ -206,25 +242,72 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
return ret;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
+ local_irq_restore(flags);
+ iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
-static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
- unsigned long npages)
+unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
+ unsigned long iotsb_num,
+ struct pci_bus *bus_dev)
+{
+ struct pci_dev *pdev;
+ unsigned long err;
+ unsigned int bus;
+ unsigned int device;
+ unsigned int fun;
+
+ list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
+ if (pdev->subordinate) {
+ /* No need to bind pci bridge */
+ dma_4v_iotsb_bind(devhandle, iotsb_num,
+ pdev->subordinate);
+ } else {
+ bus = bus_dev->number;
+ device = PCI_SLOT(pdev->devfn);
+ fun = PCI_FUNC(pdev->devfn);
+ err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
+ HV_PCI_DEVICE_BUILD(bus,
+ device,
+ fun));
+
+ /* If bind fails for one device it is going to fail
+ * for rest of the devices because we are sharing
+ * IOTSB. So in case of failure simply return with
+ * error.
+ */
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
+ dma_addr_t dvma, unsigned long iotsb_num,
+ unsigned long entry, unsigned long npages)
{
- u32 devhandle = *(u32 *)demap_arg;
unsigned long num, flags;
+ unsigned long ret;
local_irq_save(flags);
do {
- num = pci_sun4v_iommu_demap(devhandle,
- HV_PCI_TSBID(0, entry),
- npages);
-
+ if (dvma <= DMA_BIT_MASK(32)) {
+ num = pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages);
+ } else {
+ ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
+ entry, npages, &num);
+ if (unlikely(ret != HV_EOK)) {
+ pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
+ ret);
+ }
+ }
entry += num;
npages -= num;
} while (npages != 0);
@@ -236,16 +319,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
unsigned long order, npages, entry;
+ unsigned long iotsb_num;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
- entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
+
+ if (dvma <= DMA_BIT_MASK(32)) {
+ tbl = &iommu->tbl;
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ } else {
+ tbl = &atu->tbl;
+ iotsb_num = atu->iotsb->iotsb_num;
+ }
+ entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
+ dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
+ iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
@@ -257,13 +352,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ u64 mask;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr;
- u32 bus_addr, ret;
unsigned long prot;
+ dma_addr_t bus_addr, ret;
long entry;
iommu = dev->archdata.iommu;
+ atu = iommu->atu;
if (unlikely(direction == DMA_NONE))
goto bad;
@@ -272,13 +371,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ mask = *dev->dma_mask;
+ if (mask <= DMA_BIT_MASK(32))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
+
+ entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto bad;
- bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+ bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
@@ -293,11 +398,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
iommu_batch_start(dev, prot, entry);
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
- long err = iommu_batch_add(base_paddr);
+ long err = iommu_batch_add(base_paddr, mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
@@ -310,7 +415,8 @@ bad:
return DMA_ERROR_CODE;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+ local_irq_restore(flags);
+ iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
return DMA_ERROR_CODE;
}
@@ -320,7 +426,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
unsigned long npages;
+ unsigned long iotsb_num;
long entry;
u32 devhandle;
@@ -332,14 +441,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
- entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+
+ if (bus_addr <= DMA_BIT_MASK(32)) {
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ tbl = &iommu->tbl;
+ } else {
+ iotsb_num = atu->iotsb->iotsb_num;
+ tbl = &atu->tbl;
+ }
+ entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
+ dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
+ iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -353,6 +471,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long seg_boundary_size;
int outcount, incount, i;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ u64 mask;
unsigned long base_shift;
long err;
@@ -361,7 +482,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
iommu = dev->archdata.iommu;
if (nelems == 0 || !iommu)
return 0;
-
+ atu = iommu->atu;
+
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
@@ -384,7 +506,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
+
+ mask = *dev->dma_mask;
+ if (mask <= DMA_BIT_MASK(32))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
+
+ base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
+
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
@@ -397,27 +527,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+ entry = iommu_tbl_range_alloc(dev, tbl, npages,
&handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == IOMMU_ERROR_CODE)) {
- if (printk_ratelimit())
- printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
- " npages %lx\n", iommu, paddr, npages);
+ pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
+ tbl, paddr, npages);
goto iommu_map_failed;
}
- iommu_batch_new_entry(entry);
+ iommu_batch_new_entry(entry, mask);
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
+ dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
- err = iommu_batch_add(paddr);
+ err = iommu_batch_add(paddr, mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
paddr += IO_PAGE_SIZE;
@@ -452,7 +581,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
dma_next = dma_addr + slen;
}
- err = iommu_batch_end();
+ err = iommu_batch_end(mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
@@ -475,7 +604,7 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+ iommu_tbl_range_free(tbl, vaddr, npages,
IOMMU_ERROR_CODE);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
@@ -496,13 +625,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct pci_pbm_info *pbm;
struct scatterlist *sg;
struct iommu *iommu;
+ struct atu *atu;
unsigned long flags, entry;
+ unsigned long iotsb_num;
u32 devhandle;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
local_irq_save(flags);
@@ -512,15 +644,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages;
- struct iommu_map_table *tbl = &iommu->tbl;
+ struct iommu_map_table *tbl;
unsigned long shift = IO_PAGE_SHIFT;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
+
+ if (dma_handle <= DMA_BIT_MASK(32)) {
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ tbl = &iommu->tbl;
+ } else {
+ iotsb_num = atu->iotsb->iotsb_num;
+ tbl = &atu->tbl;
+ }
entry = ((dma_handle - tbl->table_map_base) >> shift);
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+ dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
+ entry, npages);
+ iommu_tbl_range_free(tbl, dma_handle, npages,
IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
@@ -581,6 +722,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
return cnt;
}
+static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
+{
+ struct atu *atu = pbm->iommu->atu;
+ struct atu_iotsb *iotsb;
+ void *table;
+ u64 table_size;
+ u64 iotsb_num;
+ unsigned long order;
+ unsigned long err;
+
+ iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
+ if (!iotsb) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ atu->iotsb = iotsb;
+
+ /* calculate size of IOTSB */
+ table_size = (atu->size / IO_PAGE_SIZE) * 8;
+ order = get_order(table_size);
+ table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!table) {
+ err = -ENOMEM;
+ goto table_failed;
+ }
+ iotsb->table = table;
+ iotsb->ra = __pa(table);
+ iotsb->dvma_size = atu->size;
+ iotsb->dvma_base = atu->base;
+ iotsb->table_size = table_size;
+ iotsb->page_size = IO_PAGE_SIZE;
+
+ /* configure and register IOTSB with HV */
+ err = pci_sun4v_iotsb_conf(pbm->devhandle,
+ iotsb->ra,
+ iotsb->table_size,
+ iotsb->page_size,
+ iotsb->dvma_base,
+ &iotsb_num);
+ if (err) {
+ pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
+ goto iotsb_conf_failed;
+ }
+ iotsb->iotsb_num = iotsb_num;
+
+ err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
+ if (err) {
+ pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
+ goto iotsb_conf_failed;
+ }
+
+ return 0;
+
+iotsb_conf_failed:
+ free_pages((unsigned long)table, order);
+table_failed:
+ kfree(iotsb);
+out_err:
+ return err;
+}
+
+static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
+{
+ struct atu *atu = pbm->iommu->atu;
+ unsigned long err;
+ const u64 *ranges;
+ u64 map_size, num_iotte;
+ u64 dma_mask;
+ const u32 *page_size;
+ int len;
+
+ ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
+ &len);
+ if (!ranges) {
+ pr_err(PFX "No iommu-address-ranges\n");
+ return -EINVAL;
+ }
+
+ page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
+ NULL);
+ if (!page_size) {
+ pr_err(PFX "No iommu-pagesizes\n");
+ return -EINVAL;
+ }
+
+ /* There are 4 iommu-address-ranges supported. Each range is pair of
+ * {base, size}. The ranges[0] and ranges[1] are 32bit address space
+ * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
+ * address ranges to support 64bit addressing. Because 'size' for
+ * address ranges[2] and ranges[3] are same we can select either of
+ * ranges[2] or ranges[3] for mapping. However due to 'size' is too
+ * large for OS to allocate IOTSB we are using fix size 32G
+ * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
+ * to share.
+ */
+ atu->ranges = (struct atu_ranges *)ranges;
+ atu->base = atu->ranges[3].base;
+ atu->size = ATU_64_SPACE_SIZE;
+
+ /* Create IOTSB */
+ err = pci_sun4v_atu_alloc_iotsb(pbm);
+ if (err) {
+ pr_err(PFX "Error creating ATU IOTSB\n");
+ return err;
+ }
+
+ /* Create ATU iommu map.
+ * One bit represents one iotte in IOTSB table.
+ */
+ dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
+ num_iotte = atu->size / IO_PAGE_SIZE;
+ map_size = num_iotte / 8;
+ atu->tbl.table_map_base = atu->base;
+ atu->dma_addr_mask = dma_mask;
+ atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
+ if (!atu->tbl.map)
+ return -ENOMEM;
+
+ iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
+ NULL, false /* no large_pool */,
+ 0 /* default npools */,
+ false /* want span boundary checking */);
+
+ return 0;
+}
+
static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
@@ -918,6 +1185,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
pci_sun4v_scan_bus(pbm, &op->dev);
+ /* if atu_init fails its not complete failure.
+ * we can still continue using legacy iommu.
+ */
+ if (pbm->iommu->atu) {
+ err = pci_sun4v_atu_init(pbm);
+ if (err) {
+ kfree(pbm->iommu->atu);
+ pbm->iommu->atu = NULL;
+ pr_err(PFX "ATU init failed, err=%d\n", err);
+ }
+ }
+
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
@@ -931,8 +1210,10 @@ static int pci_sun4v_probe(struct platform_device *op)
struct pci_pbm_info *pbm;
struct device_node *dp;
struct iommu *iommu;
+ struct atu *atu;
u32 devhandle;
int i, err = -ENODEV;
+ static bool hv_atu = true;
dp = op->dev.of_node;
@@ -954,6 +1235,19 @@ static int pci_sun4v_probe(struct platform_device *op)
pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
vpci_major, vpci_minor);
+ err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
+ if (err) {
+ /* don't return an error if we fail to register the
+ * ATU group, but ATU hcalls won't be available.
+ */
+ hv_atu = false;
+ pr_err(PFX "Could not register hvapi ATU err=%d\n",
+ err);
+ } else {
+ pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
+ vatu_major, vatu_minor);
+ }
+
dma_ops = &sun4v_dma_ops;
}
@@ -991,6 +1285,14 @@ static int pci_sun4v_probe(struct platform_device *op)
}
pbm->iommu = iommu;
+ iommu->atu = NULL;
+ if (hv_atu) {
+ atu = kzalloc(sizeof(*atu), GFP_KERNEL);
+ if (!atu)
+ pr_err(PFX "Could not allocate atu\n");
+ else
+ iommu->atu = atu;
+ }
err = pci_sun4v_pbm_init(pbm, op, devhandle);
if (err)
@@ -1001,6 +1303,7 @@ static int pci_sun4v_probe(struct platform_device *op)
return 0;
out_free_iommu:
+ kfree(iommu->atu);
kfree(pbm->iommu);
out_free_controller:
diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h
index 5642212390b2..22603a4e48bf 100644
--- a/arch/sparc/kernel/pci_sun4v.h
+++ b/arch/sparc/kernel/pci_sun4v.h
@@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
unsigned long msinum,
unsigned long valid);
+/* Sun4v HV IOMMU v2 APIs */
+unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
+ unsigned long ra,
+ unsigned long table_size,
+ unsigned long page_size,
+ unsigned long dvma_base,
+ u64 *iotsb_num);
+unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned int pci_device);
+unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned long iotsb_index_iottes,
+ unsigned long io_attributes,
+ unsigned long io_page_list_pa,
+ long *mapped);
+unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned long iotsb_index,
+ unsigned long iottes,
+ unsigned long *demapped);
#endif /* !(_PCI_SUN4V_H) */
diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S
index e606d46c6815..578f09657916 100644
--- a/arch/sparc/kernel/pci_sun4v_asm.S
+++ b/arch/sparc/kernel/pci_sun4v_asm.S
@@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid)
mov %o0, %o0
ENDPROC(pci_sun4v_msg_setvalid)
+ /*
+ * %o0: devhandle
+ * %o1: r_addr
+ * %o2: size
+ * %o3: pagesize
+ * %o4: virt
+ * %o5: &iotsb_num/&iotsb_handle
+ *
+ * returns %o0: status
+ * %o1: iotsb_num/iotsb_handle
+ */
+ENTRY(pci_sun4v_iotsb_conf)
+ mov %o5, %g1
+ mov HV_FAST_PCI_IOTSB_CONF, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_conf)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: pci_device
+ *
+ * returns %o0: status
+ */
+ENTRY(pci_sun4v_iotsb_bind)
+ mov HV_FAST_PCI_IOTSB_BIND, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(pci_sun4v_iotsb_bind)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: index_count
+ * %o3: iotte_attributes
+ * %o4: io_page_list_p
+ * %o5: &mapped
+ *
+ * returns %o0: status
+ * %o1: #mapped
+ */
+ENTRY(pci_sun4v_iotsb_map)
+ mov %o5, %g1
+ mov HV_FAST_PCI_IOTSB_MAP, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_map)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: iotsb_index
+ * %o3: #iottes
+ * %o4: &demapped
+ *
+ * returns %o0: status
+ * %o1: #demapped
+ */
+ENTRY(pci_sun4v_iotsb_demap)
+ mov HV_FAST_PCI_IOTSB_DEMAP, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(pci_sun4v_iotsb_demap)
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index 1836cb965ff8..4b60f385c98f 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -67,9 +67,4 @@ static struct platform_driver power_driver = {
},
};
-static int __init power_init(void)
-{
- return platform_driver_register(&power_driver);
-}
-
-device_initcall(power_init);
+builtin_platform_driver(power_driver);
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index ac082dd8c67d..96494b2ef41f 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -46,6 +46,43 @@
/* #define ALLOW_INIT_TRACING */
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(n, r) \
+ {.name = n, .offset = (PT_V9_##r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME("g0", G0),
+ REG_OFFSET_NAME("g1", G1),
+ REG_OFFSET_NAME("g2", G2),
+ REG_OFFSET_NAME("g3", G3),
+ REG_OFFSET_NAME("g4", G4),
+ REG_OFFSET_NAME("g5", G5),
+ REG_OFFSET_NAME("g6", G6),
+ REG_OFFSET_NAME("g7", G7),
+
+ REG_OFFSET_NAME("i0", I0),
+ REG_OFFSET_NAME("i1", I1),
+ REG_OFFSET_NAME("i2", I2),
+ REG_OFFSET_NAME("i3", I3),
+ REG_OFFSET_NAME("i4", I4),
+ REG_OFFSET_NAME("i5", I5),
+ REG_OFFSET_NAME("i6", I6),
+ REG_OFFSET_NAME("i7", I7),
+
+ REG_OFFSET_NAME("tstate", TSTATE),
+ REG_OFFSET_NAME("pc", TPC),
+ REG_OFFSET_NAME("npc", TNPC),
+ REG_OFFSET_NAME("y", Y),
+ REG_OFFSET_NAME("lr", I7),
+
+ REG_OFFSET_END,
+};
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -1107,3 +1144,20 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
if (test_thread_flag(TIF_NOHZ))
user_enter();
}
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index c3c12efe0bc0..9c0c8fd0b292 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv_and_exit;
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
synchronize_user_stack();
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
- if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv;
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 5ee930c48f4c..c782c9b716db 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -545,6 +545,8 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
{
user_exit();
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs, orig_i0);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index d3035ba6cd31..8182f7caf5b1 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -63,9 +63,13 @@ cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
+ [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
+
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_SYMBOL(cpu_core_map);
EXPORT_SYMBOL(cpu_core_sib_map);
+EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask;
@@ -1265,6 +1269,10 @@ void smp_fill_in_sib_core_maps(void)
unsigned int j;
for_each_present_cpu(j) {
+ if (cpu_data(i).max_cache_id ==
+ cpu_data(j).max_cache_id)
+ cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
+
if (cpu_data(i).sock_id == cpu_data(j).sock_id)
cpumask_set_cpu(j, &cpu_core_sib_map[i]);
}
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index fa8e21abb5e0..4808b6d23455 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -221,7 +221,7 @@ static struct device_attribute cpu_core_attrs[] = {
static DEFINE_PER_CPU(struct cpu, cpu_devices);
-static void register_cpu_online(unsigned int cpu)
+static int register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
@@ -231,11 +231,12 @@ static void register_cpu_online(unsigned int cpu)
device_create_file(s, &cpu_core_attrs[i]);
register_mmu_stats(s);
+ return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-static void unregister_cpu_online(unsigned int cpu)
+static int unregister_cpu_online(unsigned int cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
int i;
@@ -243,33 +244,10 @@ static void unregister_cpu_online(unsigned int cpu)
unregister_mmu_stats(s);
for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
device_remove_file(s, &cpu_core_attrs[i]);
-}
-#endif
-
-static int sysfs_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned int)(long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- register_cpu_online(cpu);
- break;
-#ifdef CONFIG_HOTPLUG_CPU
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- unregister_cpu_online(cpu);
- break;
#endif
- }
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block sysfs_cpu_nb = {
- .notifier_call = sysfs_cpu_notify,
-};
-
static void __init check_mmu_stats(void)
{
unsigned long dummy1, err;
@@ -294,26 +272,21 @@ static void register_nodes(void)
static int __init topology_init(void)
{
- int cpu;
+ int cpu, ret;
register_nodes();
check_mmu_stats();
- cpu_notifier_register_begin();
-
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
register_cpu(c, cpu);
- if (cpu_online(cpu))
- register_cpu_online(cpu);
}
- __register_cpu_notifier(&sysfs_cpu_nb);
-
- cpu_notifier_register_done();
-
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "sparc/topology:online",
+ register_cpu_online, unregister_cpu_online);
+ WARN_ON(ret < 0);
return 0;
}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4094a51b1970..496fa926e1e0 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
void bad_trap(struct pt_regs *regs, long lvl)
{
- char buffer[32];
+ char buffer[36];
siginfo_t info;
if (notify_die(DIE_TRAP, "bad trap", regs,
@@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl)
void bad_trap_tl1(struct pt_regs *regs, long lvl)
{
- char buffer[32];
+ char buffer[36];
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP)
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index c6dfdaa29e20..7bd8f6556352 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -165,7 +165,7 @@ tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c)
tl0_linux64: LINUX_64BIT_SYSCALL_TRAP
tl0_gsctx: TRAP(sparc64_get_context) TRAP(sparc64_set_context)
tl0_resv170: KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) KGDB_TRAP(0x172)
-tl0_resv173: BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
+tl0_resv173: UPROBES_TRAP(0x173) UPROBES_TRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
#define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7)
diff --git a/arch/sparc/kernel/uprobes.c b/arch/sparc/kernel/uprobes.c
new file mode 100644
index 000000000000..b68314050602
--- /dev/null
+++ b/arch/sparc/kernel/uprobes.c
@@ -0,0 +1,331 @@
+/*
+ * User-space Probes (UProbes) for sparc
+ *
+ * Copyright (C) 2013 Oracle Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Authors:
+ * Jose E. Marchesi <jose.marchesi@oracle.com>
+ * Eric Saint Etienne <eric.saint.etienne@oracle.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/highmem.h>
+#include <linux/uprobes.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h> /* For struct task_struct */
+#include <linux/kdebug.h>
+
+#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
+
+/* Compute the address of the breakpoint instruction and return it.
+ *
+ * Note that uprobe_get_swbp_addr is defined as a weak symbol in
+ * kernel/events/uprobe.c.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+static void copy_to_page(struct page *page, unsigned long vaddr,
+ const void *src, int len)
+{
+ void *kaddr = kmap_atomic(page);
+
+ memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
+ kunmap_atomic(kaddr);
+}
+
+/* Fill in the xol area with the probed instruction followed by the
+ * single-step trap. Some fixups in the copied instruction are
+ * performed at this point.
+ *
+ * Note that uprobe_xol_copy is defined as a weak symbol in
+ * kernel/events/uprobe.c.
+ */
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ const u32 stp_insn = UPROBE_STP_INSN;
+ u32 insn = *(u32 *) src;
+
+ /* Branches annulling their delay slot must be fixed to not do
+ * so. Clearing the annul bit on these instructions we can be
+ * sure the single-step breakpoint in the XOL slot will be
+ * executed.
+ */
+
+ u32 op = (insn >> 30) & 0x3;
+ u32 op2 = (insn >> 22) & 0x7;
+
+ if (op == 0 &&
+ (op2 == 1 || op2 == 2 || op2 == 3 || op2 == 5 || op2 == 6) &&
+ (insn & ANNUL_BIT) == ANNUL_BIT)
+ insn &= ~ANNUL_BIT;
+
+ copy_to_page(page, vaddr, &insn, len);
+ copy_to_page(page, vaddr+len, &stp_insn, 4);
+}
+
+
+/* Instruction analysis/validity.
+ *
+ * This function returns 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
+ struct mm_struct *mm, unsigned long addr)
+{
+ /* Any unsupported instruction? Then return -EINVAL */
+ return 0;
+}
+
+/* If INSN is a relative control transfer instruction, return the
+ * corrected branch destination value.
+ *
+ * Note that regs->tpc and regs->tnpc still hold the values of the
+ * program counters at the time of the single-step trap due to the
+ * execution of the UPROBE_STP_INSN at utask->xol_vaddr + 4.
+ *
+ */
+static unsigned long relbranch_fixup(u32 insn, struct uprobe_task *utask,
+ struct pt_regs *regs)
+{
+ /* Branch not taken, no mods necessary. */
+ if (regs->tnpc == regs->tpc + 0x4UL)
+ return utask->autask.saved_tnpc + 0x4UL;
+
+ /* The three cases are call, branch w/prediction,
+ * and traditional branch.
+ */
+ if ((insn & 0xc0000000) == 0x40000000 ||
+ (insn & 0xc1c00000) == 0x00400000 ||
+ (insn & 0xc1c00000) == 0x00800000) {
+ unsigned long real_pc = (unsigned long) utask->vaddr;
+ unsigned long ixol_addr = utask->xol_vaddr;
+
+ /* The instruction did all the work for us
+ * already, just apply the offset to the correct
+ * instruction location.
+ */
+ return (real_pc + (regs->tnpc - ixol_addr));
+ }
+
+ /* It is jmpl or some other absolute PC modification instruction,
+ * leave NPC as-is.
+ */
+ return regs->tnpc;
+}
+
+/* If INSN is an instruction which writes its PC location
+ * into a destination register, fix that up.
+ */
+static int retpc_fixup(struct pt_regs *regs, u32 insn,
+ unsigned long real_pc)
+{
+ unsigned long *slot = NULL;
+ int rc = 0;
+
+ /* Simplest case is 'call', which always uses %o7 */
+ if ((insn & 0xc0000000) == 0x40000000)
+ slot = &regs->u_regs[UREG_I7];
+
+ /* 'jmpl' encodes the register inside of the opcode */
+ if ((insn & 0xc1f80000) == 0x81c00000) {
+ unsigned long rd = ((insn >> 25) & 0x1f);
+
+ if (rd <= 15) {
+ slot = &regs->u_regs[rd];
+ } else {
+ unsigned long fp = regs->u_regs[UREG_FP];
+ /* Hard case, it goes onto the stack. */
+ flushw_all();
+
+ rd -= 16;
+ if (test_thread_64bit_stack(fp)) {
+ unsigned long __user *uslot =
+ (unsigned long __user *) (fp + STACK_BIAS) + rd;
+ rc = __put_user(real_pc, uslot);
+ } else {
+ unsigned int __user *uslot = (unsigned int
+ __user *) fp + rd;
+ rc = __put_user((u32) real_pc, uslot);
+ }
+ }
+ }
+ if (slot != NULL)
+ *slot = real_pc;
+ return rc;
+}
+
+/* Single-stepping can be avoided for certain instructions: NOPs and
+ * instructions that can be emulated. This function determines
+ * whether the instruction where the uprobe is installed falls in one
+ * of these cases and emulates it.
+ *
+ * This function returns true if the single-stepping can be skipped,
+ * false otherwise.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ /* We currently only emulate NOP instructions.
+ */
+
+ if (auprobe->ixol == (1 << 24)) {
+ regs->tnpc += 4;
+ regs->tpc += 4;
+ return true;
+ }
+
+ return false;
+}
+
+/* Prepare to execute out of line. At this point
+ * current->utask->xol_vaddr points to an allocated XOL slot properly
+ * initialized with the original instruction and the single-stepping
+ * trap instruction.
+ *
+ * This function returns 0 on success, any other number on error.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+ struct arch_uprobe_task *autask = &current->utask->autask;
+
+ /* Save the current program counters so they can be restored
+ * later.
+ */
+ autask->saved_tpc = regs->tpc;
+ autask->saved_tnpc = regs->tnpc;
+
+ /* Adjust PC and NPC so the first instruction in the XOL slot
+ * will be executed by the user task.
+ */
+ instruction_pointer_set(regs, utask->xol_vaddr);
+
+ return 0;
+}
+
+/* Prepare to resume execution after the single-step. Called after
+ * single-stepping. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.
+ *
+ * This function returns 0 on success, any other number on error.
+ */
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+ struct arch_uprobe_task *autask = &utask->autask;
+ u32 insn = auprobe->ixol;
+ int rc = 0;
+
+ if (utask->state == UTASK_SSTEP_ACK) {
+ regs->tnpc = relbranch_fixup(insn, utask, regs);
+ regs->tpc = autask->saved_tnpc;
+ rc = retpc_fixup(regs, insn, (unsigned long) utask->vaddr);
+ } else {
+ regs->tnpc = utask->vaddr+4;
+ regs->tpc = autask->saved_tnpc+4;
+ }
+ return rc;
+}
+
+/* Handler for uprobe traps. This is called from the traps table and
+ * triggers the proper die notification.
+ */
+asmlinkage void uprobe_trap(struct pt_regs *regs,
+ unsigned long trap_level)
+{
+ BUG_ON(trap_level != 0x173 && trap_level != 0x174);
+
+ /* We are only interested in user-mode code. Uprobe traps
+ * shall not be present in kernel code.
+ */
+ if (!user_mode(regs)) {
+ local_irq_enable();
+ bad_trap(regs, trap_level);
+ return;
+ }
+
+ /* trap_level == 0x173 --> ta 0x73
+ * trap_level == 0x174 --> ta 0x74
+ */
+ if (notify_die((trap_level == 0x173) ? DIE_BPT : DIE_SSTEP,
+ (trap_level == 0x173) ? "bpt" : "sstep",
+ regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
+ bad_trap(regs, trap_level);
+}
+
+/* Callback routine for handling die notifications.
+*/
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ int ret = NOTIFY_DONE;
+ struct die_args *args = (struct die_args *)data;
+
+ /* We are only interested in userspace traps */
+ if (args->regs && !user_mode(args->regs))
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BPT:
+ if (uprobe_pre_sstep_notifier(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+
+ case DIE_SSTEP:
+ if (uprobe_post_sstep_notifier(args->regs))
+ ret = NOTIFY_STOP;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* This function gets called when a XOL instruction either gets
+ * trapped or the thread has a fatal signal, so reset the instruction
+ * pointer to its probed address.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ instruction_pointer_set(regs, utask->vaddr);
+}
+
+/* If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address.
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ return false;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long orig_ret_vaddr = regs->u_regs[UREG_I7];
+
+ regs->u_regs[UREG_I7] = trampoline_vaddr-8;
+
+ return orig_ret_vaddr + 8;
+}
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
index b7d0bd6b1406..69a439fa2fc1 100644
--- a/arch/sparc/lib/GENcopy_from_user.S
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
index 780550e1afc7..9947427ce354 100644
--- a/arch/sparc/lib/GENcopy_to_user.S
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S
index 89358ee94851..059ea24ad73d 100644
--- a/arch/sparc/lib/GENmemcpy.S
+++ b/arch/sparc/lib/GENmemcpy.S
@@ -4,21 +4,18 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#define GLOBAL_SPARE %g7
#else
#define GLOBAL_SPARE %g5
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST(x,y) x
#endif
#ifndef LOAD
@@ -45,6 +42,29 @@
.register %g3,#scratch
.text
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+ENTRY(GEN_retl_o4_1)
+ add %o4, %o2, %o4
+ retl
+ add %o4, 1, %o0
+ENDPROC(GEN_retl_o4_1)
+ENTRY(GEN_retl_g1_8)
+ add %g1, %o2, %g1
+ retl
+ add %g1, 8, %o0
+ENDPROC(GEN_retl_g1_8)
+ENTRY(GEN_retl_o2_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(GEN_retl_o2_4)
+ENTRY(GEN_retl_o2_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(GEN_retl_o2_1)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -73,8 +93,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %o4, %o4
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1)
+ EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -82,8 +102,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x7, %g1
sub %o2, %g1, %o2
1: subcc %g1, 0x8, %g1
- EX_LD(LOAD(ldx, %o1, %g2))
- EX_ST(STORE(stx, %g2, %o0))
+ EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8)
+ EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8)
add %o1, 0x8, %o1
bne,pt %XCC, 1b
add %o0, 0x8, %o0
@@ -100,8 +120,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4)
+ EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -111,8 +131,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1)
+ EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 885f00e81d1a..69912d2f8b54 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -38,7 +38,7 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o
lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
-lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
+lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o
lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index d5242b8c4f94..b79a6998d87c 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index 4e962d993b10..dcec55f254ab 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index d5f585df2f3f..c629dbd121b6 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -32,21 +33,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -140,45 +137,110 @@
fsrc2 %x6, %f12; \
fsrc2 %x7, %f14;
#define FREG_LOAD_1(base, x0) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0))
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1)
#define FREG_LOAD_2(base, x0, x1) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1);
#define FREG_LOAD_3(base, x0, x1, x2) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1);
#define FREG_LOAD_4(base, x0, x1, x2, x3) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1);
#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1);
#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
- EX_LD_FP(LOAD(ldd, base + 0x28, %x5));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1);
#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
- EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
- EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
- EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
- EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
- EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
- EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \
- EX_LD_FP(LOAD(ldd, base + 0x30, %x6));
+ EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \
+ EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1);
.register %g2,#scratch
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_fp:
+ VISExitHalf
+__restore_asi:
+ retl
+ wr %g0, ASI_AIUS, %asi
+ENTRY(NG2_retl_o2)
+ ba,pt %xcc, __restore_asi
+ mov %o2, %o0
+ENDPROC(NG2_retl_o2)
+ENTRY(NG2_retl_o2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %o2, 1, %o0
+ENDPROC(NG2_retl_o2_plus_1)
+ENTRY(NG2_retl_o2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %o2, 4, %o0
+ENDPROC(NG2_retl_o2_plus_4)
+ENTRY(NG2_retl_o2_plus_8)
+ ba,pt %xcc, __restore_asi
+ add %o2, 8, %o0
+ENDPROC(NG2_retl_o2_plus_8)
+ENTRY(NG2_retl_o2_plus_o4_plus_1)
+ add %o4, 1, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_1)
+ENTRY(NG2_retl_o2_plus_o4_plus_8)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_8)
+ENTRY(NG2_retl_o2_plus_o4_plus_16)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_plus_o4_plus_16)
+ENTRY(NG2_retl_o2_plus_g1_fp)
+ ba,pt %xcc, __restore_fp
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_fp)
+ENTRY(NG2_retl_o2_plus_g1_plus_64_fp)
+ add %g1, 64, %g1
+ ba,pt %xcc, __restore_fp
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp)
+ENTRY(NG2_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG2_retl_o2_plus_g1_plus_1)
+ENTRY(NG2_retl_o2_and_7_plus_o4)
+ and %o2, 7, %o2
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_and_7_plus_o4)
+ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8)
+ and %o2, 7, %o2
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -230,8 +292,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %o4, %o4 ! bytes to align dst
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1)
+ EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -281,11 +343,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
/* fall through for 0 < low bits < 8 */
110: sub %o4, 64, %g2
- EX_LD_FP(LOAD_BLK(%g2, %f0))
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+ EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1)
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -296,10 +358,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
120: sub %o4, 56, %g2
FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -310,10 +372,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
130: sub %o4, 48, %g2
FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -324,10 +386,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
140: sub %o4, 40, %g2
FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_5(f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -338,10 +400,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
150: sub %o4, 32, %g2
FREG_LOAD_4(%g2, f0, f2, f4, f6)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_4(f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -352,10 +414,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
160: sub %o4, 24, %g2
FREG_LOAD_3(%g2, f0, f2, f4)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_3(f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -366,10 +428,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
170: sub %o4, 16, %g2
FREG_LOAD_2(%g2, f0, f2)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_2(f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -380,10 +442,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
180: sub %o4, 8, %g2
FREG_LOAD_1(%g2, f0)
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
- EX_LD_FP(LOAD_BLK(%o4, %f16))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
+ EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_1(f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@@ -393,10 +455,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
190:
-1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
+1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
subcc %g1, 64, %g1
- EX_LD_FP(LOAD_BLK(%o4, %f0))
- EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
+ EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64)
+ EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64)
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
@@ -423,28 +485,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0xf, %o4
and %o2, 0xf, %o2
1: subcc %o4, 0x10, %o4
- EX_LD(LOAD(ldx, %o1, %o5))
+ EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x08, %o1
- EX_LD(LOAD(ldx, %o1, %g1))
+ EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16)
sub %o1, 0x08, %o1
- EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
+ EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE))
+ EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8)
+ EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4)
+ EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -460,8 +522,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -477,16 +539,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, GLOBAL_SPARE
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2)
sub GLOBAL_SPARE, %g1, GLOBAL_SPARE
andn %o2, 0x7, %o4
sllx %g2, %g1, %g2
1: add %o1, 0x8, %o1
- EX_LD(LOAD(ldx, %o1, %g3))
+ EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4)
subcc %o4, 0x8, %o4
srlx %g3, GLOBAL_SPARE, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -506,8 +568,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4)
+ EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -517,8 +579,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE))
+ EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1)
+ EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S
index 2e8ee7ad07a9..16a286c1a528 100644
--- a/arch/sparc/lib/NG4copy_from_user.S
+++ b/arch/sparc/lib/NG4copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2012 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x, y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S
index be0bf4590df8..6b0276ffc858 100644
--- a/arch/sparc/lib/NG4copy_to_user.S
+++ b/arch/sparc/lib/NG4copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 2012 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_asi_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 8e13ee1f4454..75bb93b1437f 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -46,22 +47,19 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
+#define EX_ST_FP(x,y) x
#endif
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
-#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
@@ -94,6 +92,158 @@
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_asi_fp:
+ VISExitHalf
+__restore_asi:
+ retl
+ wr %g0, ASI_AIUS, %asi
+
+ENTRY(NG4_retl_o2)
+ ba,pt %xcc, __restore_asi
+ mov %o2, %o0
+ENDPROC(NG4_retl_o2)
+ENTRY(NG4_retl_o2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %o2, 1, %o0
+ENDPROC(NG4_retl_o2_plus_1)
+ENTRY(NG4_retl_o2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %o2, 4, %o0
+ENDPROC(NG4_retl_o2_plus_4)
+ENTRY(NG4_retl_o2_plus_o5)
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5)
+ENTRY(NG4_retl_o2_plus_o5_plus_4)
+ add %o5, 4, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_4)
+ENTRY(NG4_retl_o2_plus_o5_plus_8)
+ add %o5, 8, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_8)
+ENTRY(NG4_retl_o2_plus_o5_plus_16)
+ add %o5, 16, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_16)
+ENTRY(NG4_retl_o2_plus_o5_plus_24)
+ add %o5, 24, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_24)
+ENTRY(NG4_retl_o2_plus_o5_plus_32)
+ add %o5, 32, %o5
+ ba,pt %xcc, __restore_asi
+ add %o2, %o5, %o0
+ENDPROC(NG4_retl_o2_plus_o5_plus_32)
+ENTRY(NG4_retl_o2_plus_g1)
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1)
+ENTRY(NG4_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1_plus_1)
+ENTRY(NG4_retl_o2_plus_g1_plus_8)
+ add %g1, 8, %g1
+ ba,pt %xcc, __restore_asi
+ add %o2, %g1, %o0
+ENDPROC(NG4_retl_o2_plus_g1_plus_8)
+ENTRY(NG4_retl_o2_plus_o4)
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4)
+ENTRY(NG4_retl_o2_plus_o4_plus_8)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_8)
+ENTRY(NG4_retl_o2_plus_o4_plus_16)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_16)
+ENTRY(NG4_retl_o2_plus_o4_plus_24)
+ add %o4, 24, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_24)
+ENTRY(NG4_retl_o2_plus_o4_plus_32)
+ add %o4, 32, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_32)
+ENTRY(NG4_retl_o2_plus_o4_plus_40)
+ add %o4, 40, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_40)
+ENTRY(NG4_retl_o2_plus_o4_plus_48)
+ add %o4, 48, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_48)
+ENTRY(NG4_retl_o2_plus_o4_plus_56)
+ add %o4, 56, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_56)
+ENTRY(NG4_retl_o2_plus_o4_plus_64)
+ add %o4, 64, %o4
+ ba,pt %xcc, __restore_asi
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_64)
+ENTRY(NG4_retl_o2_plus_o4_fp)
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_8_fp)
+ add %o4, 8, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_16_fp)
+ add %o4, 16, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_24_fp)
+ add %o4, 24, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_32_fp)
+ add %o4, 32, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_40_fp)
+ add %o4, 40, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_48_fp)
+ add %o4, 48, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_56_fp)
+ add %o4, 56, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp)
+ENTRY(NG4_retl_o2_plus_o4_plus_64_fp)
+ add %o4, 64, %o4
+ ba,pt %xcc, __restore_asi_fp
+ add %o2, %o4, %o0
+ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp)
+#endif
.align 64
.globl FUNC_NAME
@@ -124,12 +274,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, 51f
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g2, %o0 - 0x01))
+ EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong)
LOAD(prefetch, %o1 + 0x080, #n_reads_strong)
@@ -154,43 +305,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, .Llarge_aligned
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 8, %o1
subcc %g1, 8, %g1
add %o0, 8, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stx, %g2, %o0 - 0x08))
+ EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8)
.Llarge_aligned:
/* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */
andn %o2, 0x3f, %o4
sub %o2, %o4, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4)
add %o1, 0x40, %o1
- EX_LD(LOAD(ldx, %o1 - 0x38, %g2))
+ EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
- EX_LD(LOAD(ldx, %o1 - 0x30, %g3))
- EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE))
- EX_LD(LOAD(ldx, %o1 - 0x20, %o5))
- EX_ST(STORE_INIT(%g1, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64)
+ EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g2, %o0))
+ EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x18, %g2))
- EX_ST(STORE_INIT(%g3, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48)
+ EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x10, %g3))
- EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40)
+ EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40)
add %o0, 0x08, %o0
- EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE))
- EX_ST(STORE_INIT(%o5, %o0))
+ EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32)
+ EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g2, %o0))
+ EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(%g3, %o0))
+ EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16)
add %o0, 0x08, %o0
- EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+ EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
@@ -216,17 +367,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %o4, %o2
alignaddr %o1, %g0, %g1
add %o1, %o4, %o1
- EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0))
-1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4)
+1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
- EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4))
- EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6))
- EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8))
- EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10))
- EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12))
- EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64)
+ EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0))
+ EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64)
faligndata %f2, %f4, %f18
add %g1, 0x40, %g1
faligndata %f4, %f6, %f20
@@ -235,14 +386,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE(std, %f16, %o0 + 0x00))
- EX_ST_FP(STORE(std, %f18, %o0 + 0x08))
- EX_ST_FP(STORE(std, %f20, %o0 + 0x10))
- EX_ST_FP(STORE(std, %f22, %o0 + 0x18))
- EX_ST_FP(STORE(std, %f24, %o0 + 0x20))
- EX_ST_FP(STORE(std, %f26, %o0 + 0x28))
- EX_ST_FP(STORE(std, %f28, %o0 + 0x30))
- EX_ST_FP(STORE(std, %f30, %o0 + 0x38))
+ EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64)
+ EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56)
+ EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48)
+ EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40)
+ EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32)
+ EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24)
+ EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16)
+ EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8)
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
@@ -270,37 +421,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andncc %o2, 0x20 - 1, %o5
be,pn %icc, 2f
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g2))
- EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE))
- EX_LD(LOAD(ldx, %o1 + 0x18, %o4))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5)
+ EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5)
add %o1, 0x20, %o1
subcc %o5, 0x20, %o5
- EX_ST(STORE(stx, %g1, %o0 + 0x00))
- EX_ST(STORE(stx, %g2, %o0 + 0x08))
- EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10))
- EX_ST(STORE(stx, %o4, %o0 + 0x18))
+ EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32)
+ EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8)
bne,pt %icc, 1b
add %o0, 0x20, %o0
2: andcc %o2, 0x18, %o5
be,pt %icc, 3f
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
add %o1, 0x08, %o1
add %o0, 0x08, %o0
subcc %o5, 0x08, %o5
bne,pt %icc, 1b
- EX_ST(STORE(stx, %g1, %o0 - 0x08))
+ EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8)
3: brz,pt %o2, .Lexit
cmp %o2, 0x04
bl,pn %icc, .Ltiny
nop
- EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+ EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2)
add %o1, 0x04, %o1
add %o0, 0x04, %o0
subcc %o2, 0x04, %o2
bne,pn %icc, .Ltiny
- EX_ST(STORE(stw, %g1, %o0 - 0x04))
+ EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4)
ba,a,pt %icc, .Lexit
.Lmedium_unaligned:
/* First get dest 8 byte aligned. */
@@ -309,12 +461,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, 2f
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g2, %o0 - 0x01))
+ EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1)
2:
and %o1, 0x7, %g1
brz,pn %g1, .Lmedium_noprefetch
@@ -322,16 +474,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov 64, %g2
sub %g2, %g1, %g2
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1 + 0x00, %o4))
+ EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2)
sllx %o4, %g1, %o4
andn %o2, 0x08 - 1, %o5
sub %o2, %o5, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5)
add %o1, 0x08, %o1
subcc %o5, 0x08, %o5
srlx %g3, %g2, GLOBAL_SPARE
or GLOBAL_SPARE, %o4, GLOBAL_SPARE
- EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00))
+ EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
sllx %g3, %g1, %o4
@@ -342,17 +494,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
ba,pt %icc, .Lsmall_unaligned
.Ltiny:
- EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+ EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x00))
- EX_LD(LOAD(ldub, %o1 + 0x01, %g1))
+ EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1)
+ EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x01))
- EX_LD(LOAD(ldub, %o1 + 0x02, %g1))
+ EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1)
+ EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2)
ba,pt %icc, .Lexit
- EX_ST(STORE(stb, %g1, %o0 + 0x02))
+ EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2)
.Lsmall:
andcc %g2, 0x3, %g0
@@ -360,22 +512,22 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x4 - 1, %o5
sub %o2, %o5, %o2
1:
- EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+ EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5)
add %o1, 0x04, %o1
subcc %o5, 0x04, %o5
add %o0, 0x04, %o0
bne,pt %icc, 1b
- EX_ST(STORE(stw, %g1, %o0 - 0x04))
+ EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4)
brz,pt %o2, .Lexit
nop
ba,a,pt %icc, .Ltiny
.Lsmall_unaligned:
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2)
add %o1, 1, %o1
add %o0, 1, %o0
subcc %o2, 1, %o2
bne,pt %icc, 1b
- EX_ST(STORE(stb, %g1, %o0 - 0x01))
+ EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
ba,a,pt %icc, .Lexit
.size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S
index 5d1e4d1ac21e..9cd42fcbc781 100644
--- a/arch/sparc/lib/NGcopy_from_user.S
+++ b/arch/sparc/lib/NGcopy_from_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __ret_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S
index ff630dcb273c..5c358afd464e 100644
--- a/arch/sparc/lib/NGcopy_to_user.S
+++ b/arch/sparc/lib/NGcopy_to_user.S
@@ -3,11 +3,11 @@
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __ret_one_asi;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index 96a14caf6966..d88c4ed50a00 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/thread_info.h>
#define GLOBAL_SPARE %g7
@@ -27,15 +28,11 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST(x,y) x
#endif
#ifndef LOAD
@@ -79,6 +76,92 @@
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_asi:
+ ret
+ wr %g0, ASI_AIUS, %asi
+ restore
+ENTRY(NG_ret_i2_plus_i4_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %i2, %i5, %i0
+ENDPROC(NG_ret_i2_plus_i4_plus_1)
+ENTRY(NG_ret_i2_plus_g1)
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1)
+ENTRY(NG_ret_i2_plus_g1_minus_8)
+ sub %g1, 8, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_8)
+ENTRY(NG_ret_i2_plus_g1_minus_16)
+ sub %g1, 16, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_16)
+ENTRY(NG_ret_i2_plus_g1_minus_24)
+ sub %g1, 24, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_24)
+ENTRY(NG_ret_i2_plus_g1_minus_32)
+ sub %g1, 32, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_32)
+ENTRY(NG_ret_i2_plus_g1_minus_40)
+ sub %g1, 40, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_40)
+ENTRY(NG_ret_i2_plus_g1_minus_48)
+ sub %g1, 48, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_48)
+ENTRY(NG_ret_i2_plus_g1_minus_56)
+ sub %g1, 56, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_minus_56)
+ENTRY(NG_ret_i2_plus_i4)
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_plus_i4)
+ENTRY(NG_ret_i2_plus_i4_minus_8)
+ sub %i4, 8, %i4
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_plus_i4_minus_8)
+ENTRY(NG_ret_i2_plus_8)
+ ba,pt %xcc, __restore_asi
+ add %i2, 8, %i0
+ENDPROC(NG_ret_i2_plus_8)
+ENTRY(NG_ret_i2_plus_4)
+ ba,pt %xcc, __restore_asi
+ add %i2, 4, %i0
+ENDPROC(NG_ret_i2_plus_4)
+ENTRY(NG_ret_i2_plus_1)
+ ba,pt %xcc, __restore_asi
+ add %i2, 1, %i0
+ENDPROC(NG_ret_i2_plus_1)
+ENTRY(NG_ret_i2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ ba,pt %xcc, __restore_asi
+ add %i2, %g1, %i0
+ENDPROC(NG_ret_i2_plus_g1_plus_1)
+ENTRY(NG_ret_i2)
+ ba,pt %xcc, __restore_asi
+ mov %i2, %i0
+ENDPROC(NG_ret_i2)
+ENTRY(NG_ret_i2_and_7_plus_i4)
+ and %i2, 7, %i2
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_and_7_plus_i4)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -126,8 +209,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
sub %g0, %i4, %i4 ! bytes to align dst
sub %i2, %i4, %i2
1: subcc %i4, 1, %i4
- EX_LD(LOAD(ldub, %i1, %g1))
- EX_ST(STORE(stb, %g1, %o0))
+ EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1)
+ EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1)
add %i1, 1, %i1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -160,7 +243,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
and %i4, 0x7, GLOBAL_SPARE
sll GLOBAL_SPARE, 3, GLOBAL_SPARE
mov 64, %i5
- EX_LD(LOAD_TWIN(%i1, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1)
sub %i5, GLOBAL_SPARE, %i5
mov 16, %o4
mov 32, %o5
@@ -178,31 +261,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
srlx WORD3, PRE_SHIFT, TMP; \
or WORD2, TMP, WORD2;
-8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
- EX_ST(STORE_INIT(%g2, %o0 + 0x00))
- EX_ST(STORE_INIT(%g3, %o0 + 0x08))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1)
+ EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
- EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o2, %o0 + 0x10))
- EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%g2, %o0 + 0x20))
- EX_ST(STORE_INIT(%g3, %o0 + 0x28))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
- EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o2, %o0 + 0x30))
- EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 8b
@@ -211,31 +294,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
ba,pt %XCC, 60f
add %i1, %i4, %i1
-9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
- EX_ST(STORE_INIT(%g3, %o0 + 0x00))
- EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
- EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o3, %o0 + 0x10))
- EX_ST(STORE_INIT(%g2, %o0 + 0x18))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%g3, %o0 + 0x20))
- EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
- EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
- EX_ST(STORE_INIT(%o3, %o0 + 0x30))
- EX_ST(STORE_INIT(%g2, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 9b
@@ -249,25 +332,25 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
* one twin load ahead, then add 8 back into source when
* we finish the loop.
*/
- EX_LD(LOAD_TWIN(%i1, %o4, %o5))
+ EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1)
mov 16, %o7
mov 32, %g2
mov 48, %g3
mov 64, %o1
-1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
- EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%o2, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
- EX_ST(STORE_INIT(%o3, %o0 + 0x10))
- EX_ST(STORE_INIT(%o4, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
- EX_ST(STORE_INIT(%o5, %o0 + 0x20))
- EX_ST(STORE_INIT(%o2, %o0 + 0x28))
- EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5))
+ EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
+ EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
- EX_ST(STORE_INIT(%o3, %o0 + 0x30))
- EX_ST(STORE_INIT(%o4, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -282,20 +365,20 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
mov 32, %g2
mov 48, %g3
mov 64, %o1
-1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5))
- EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1)
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
- EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%o5, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
- EX_ST(STORE_INIT(%o2, %o0 + 0x10))
- EX_ST(STORE_INIT(%o3, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
+ EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
+ EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
add %i1, 64, %i1
- EX_ST(STORE_INIT(%o4, %o0 + 0x20))
- EX_ST(STORE_INIT(%o5, %o0 + 0x28))
- EX_ST(STORE_INIT(%o2, %o0 + 0x30))
- EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
+ EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -321,28 +404,28 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
andn %i2, 0xf, %i4
and %i2, 0xf, %i2
1: subcc %i4, 0x10, %i4
- EX_LD(LOAD(ldx, %i1, %o4))
+ EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4)
add %i1, 0x08, %i1
- EX_LD(LOAD(ldx, %i1, %g1))
+ EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4)
sub %i1, 0x08, %i1
- EX_ST(STORE(stx, %o4, %i1 + %i3))
+ EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4)
add %i1, 0x8, %i1
- EX_ST(STORE(stx, %g1, %i1 + %i3))
+ EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8)
bgu,pt %XCC, 1b
add %i1, 0x8, %i1
73: andcc %i2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x8, %i2
- EX_LD(LOAD(ldx, %i1, %o4))
- EX_ST(STORE(stx, %o4, %i1 + %i3))
+ EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8)
+ EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8)
add %i1, 0x8, %i1
1: andcc %i2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x4, %i2
- EX_LD(LOAD(lduw, %i1, %i5))
- EX_ST(STORE(stw, %i5, %i1 + %i3))
+ EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4)
+ EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4)
add %i1, 0x4, %i1
1: cmp %i2, 0
be,pt %XCC, 85f
@@ -358,8 +441,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
sub %i2, %g1, %i2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %i1, %i5))
- EX_ST(STORE(stb, %i5, %i1 + %i3))
+ EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %i1, 1, %i1
@@ -375,16 +458,16 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
8: mov 64, %i3
andn %i1, 0x7, %i1
- EX_LD(LOAD(ldx, %i1, %g2))
+ EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2)
sub %i3, %g1, %i3
andn %i2, 0x7, %i4
sllx %g2, %g1, %g2
1: add %i1, 0x8, %i1
- EX_LD(LOAD(ldx, %i1, %g3))
+ EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4)
subcc %i4, 0x8, %i4
srlx %g3, %i3, %i5
or %i5, %g2, %i5
- EX_ST(STORE(stx, %i5, %o0))
+ EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -404,8 +487,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
1:
subcc %i2, 4, %i2
- EX_LD(LOAD(lduw, %i1, %g1))
- EX_ST(STORE(stw, %g1, %i1 + %i3))
+ EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4)
+ EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4)
bgu,pt %XCC, 1b
add %i1, 4, %i1
@@ -415,8 +498,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
.align 32
90:
subcc %i2, 1, %i2
- EX_LD(LOAD(ldub, %i1, %g1))
- EX_ST(STORE(stb, %g1, %i1 + %i3))
+ EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1)
+ EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1)
bgu,pt %XCC, 90b
add %i1, 1, %i1
ret
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index ecc5692fa2b4..bb6ff73229e3 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index 9eea392e44d4..ed92ce739558 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index 97e1b211090c..4f0d50b33a72 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -5,6 +5,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/export.h>
@@ -24,21 +25,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -79,53 +76,169 @@
faligndata %f7, %f8, %f60; \
faligndata %f8, %f9, %f62;
-#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
- EX_LD_FP(LOAD_BLK(%src, %fdest)); \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
- add %src, 0x40, %src; \
- subcc %len, 0x40, %len; \
- be,pn %xcc, jmptgt; \
- add %dest, 0x40, %dest; \
-
-#define LOOP_CHUNK1(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
-#define LOOP_CHUNK2(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
-#define LOOP_CHUNK3(src, dest, len, branch_dest) \
- MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
+#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \
+ EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
+ add %src, 0x40, %src; \
+ subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \
+ be,pn %xcc, jmptgt; \
+ add %dest, 0x40, %dest; \
+
+#define LOOP_CHUNK1(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest)
+#define LOOP_CHUNK2(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest)
+#define LOOP_CHUNK3(src, dest, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest)
#define DO_SYNC membar #Sync;
#define STORE_SYNC(dest, fsrc) \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
add %dest, 0x40, %dest; \
DO_SYNC
#define STORE_JUMP(dest, fsrc, target) \
- EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
+ EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \
add %dest, 0x40, %dest; \
ba,pt %xcc, target; \
nop;
-#define FINISH_VISCHUNK(dest, f0, f1, left) \
- subcc %left, 8, %left;\
- bl,pn %xcc, 95f; \
- faligndata %f0, %f1, %f48; \
- EX_ST_FP(STORE(std, %f48, %dest)); \
+#define FINISH_VISCHUNK(dest, f0, f1) \
+ subcc %g3, 8, %g3; \
+ bl,pn %xcc, 95f; \
+ faligndata %f0, %f1, %f48; \
+ EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \
add %dest, 8, %dest;
-#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
- subcc %left, 8, %left; \
- bl,pn %xcc, 95f; \
+#define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
+ subcc %g3, 8, %g3; \
+ bl,pn %xcc, 95f; \
fsrc2 %f0, %f1;
-#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
- UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
+#define UNEVEN_VISCHUNK(dest, f0, f1) \
+ UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
ba,a,pt %xcc, 93f;
.register %g2,#scratch
.register %g3,#scratch
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+ENTRY(U1_g1_1_fp)
+ VISExitHalf
+ add %g1, 1, %g1
+ add %g1, %g2, %g1
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_1_fp)
+ENTRY(U1_g2_0_fp)
+ VISExitHalf
+ retl
+ add %g2, %o2, %o0
+ENDPROC(U1_g2_0_fp)
+ENTRY(U1_g2_8_fp)
+ VISExitHalf
+ add %g2, 8, %g2
+ retl
+ add %g2, %o2, %o0
+ENDPROC(U1_g2_8_fp)
+ENTRY(U1_gs_0_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_0_fp)
+ENTRY(U1_gs_80_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_80_fp)
+ENTRY(U1_gs_40_fp)
+ VISExitHalf
+ add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE
+ add %GLOBAL_SPARE, %g3, %o0
+ retl
+ add %o0, %o2, %o0
+ENDPROC(U1_gs_40_fp)
+ENTRY(U1_g3_0_fp)
+ VISExitHalf
+ retl
+ add %g3, %o2, %o0
+ENDPROC(U1_g3_0_fp)
+ENTRY(U1_g3_8_fp)
+ VISExitHalf
+ add %g3, 8, %g3
+ retl
+ add %g3, %o2, %o0
+ENDPROC(U1_g3_8_fp)
+ENTRY(U1_o2_0_fp)
+ VISExitHalf
+ retl
+ mov %o2, %o0
+ENDPROC(U1_o2_0_fp)
+ENTRY(U1_o2_1_fp)
+ VISExitHalf
+ retl
+ add %o2, 1, %o0
+ENDPROC(U1_o2_1_fp)
+ENTRY(U1_gs_0)
+ VISExitHalf
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_0)
+ENTRY(U1_gs_8)
+ VISExitHalf
+ add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, 0x8, %o0
+ENDPROC(U1_gs_8)
+ENTRY(U1_gs_10)
+ VISExitHalf
+ add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, 0x10, %o0
+ENDPROC(U1_gs_10)
+ENTRY(U1_o2_0)
+ retl
+ mov %o2, %o0
+ENDPROC(U1_o2_0)
+ENTRY(U1_o2_8)
+ retl
+ add %o2, 8, %o0
+ENDPROC(U1_o2_8)
+ENTRY(U1_o2_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(U1_o2_4)
+ENTRY(U1_o2_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(U1_o2_1)
+ENTRY(U1_g1_0)
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_0)
+ENTRY(U1_g1_1)
+ add %g1, 1, %g1
+ retl
+ add %g1, %o2, %o0
+ENDPROC(U1_g1_1)
+ENTRY(U1_gs_0_o2_adj)
+ and %o2, 7, %o2
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_0_o2_adj)
+ENTRY(U1_gs_8_o2_adj)
+ and %o2, 7, %o2
+ add %GLOBAL_SPARE, 8, %GLOBAL_SPARE
+ retl
+ add %GLOBAL_SPARE, %o2, %o0
+ENDPROC(U1_gs_8_o2_adj)
+#endif
+
.align 64
.globl FUNC_NAME
@@ -167,8 +280,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
- EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
- EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
+ EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp)
+ EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@@ -179,20 +292,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
- EX_LD_FP(LOAD(ldd, %o1, %f4))
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
+ EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp)
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
be,pn %icc, 3f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
bne,pt %icc, 1b
add %o0, 0x8, %o0
@@ -215,13 +328,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %g1, %GLOBAL_SPARE, %g1
subcc %o2, %g3, %o2
- EX_LD_FP(LOAD_BLK(%o1, %f0))
+ EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp)
add %o1, 0x40, %o1
add %g1, %g3, %g1
- EX_LD_FP(LOAD_BLK(%o1, %f16))
+ EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp)
add %o1, 0x40, %o1
sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
- EX_LD_FP(LOAD_BLK(%o1, %f32))
+ EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp)
add %o1, 0x40, %o1
/* There are 8 instances of the unrolled loop,
@@ -241,11 +354,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 64
1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f0, %f2, %f48
1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
@@ -262,11 +375,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 56f)
1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f2, %f4, %f48
1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
@@ -283,11 +396,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 57f)
1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f4, %f6, %f48
1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
@@ -304,11 +417,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 58f)
1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f6, %f8, %f48
1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
@@ -325,11 +438,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 59f)
1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f8, %f10, %f48
1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
@@ -346,11 +459,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 60f)
1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f10, %f12, %f48
1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
@@ -367,11 +480,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 61f)
1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f12, %f14, %f48
1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
@@ -388,11 +501,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
STORE_JUMP(o0, f48, 62f)
1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
- LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+ LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
- LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+ LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
- LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+ LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f14, %f16, %f48
1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
@@ -408,53 +521,53 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
STORE_JUMP(o0, f48, 63f)
-40: FINISH_VISCHUNK(o0, f0, f2, g3)
-41: FINISH_VISCHUNK(o0, f2, f4, g3)
-42: FINISH_VISCHUNK(o0, f4, f6, g3)
-43: FINISH_VISCHUNK(o0, f6, f8, g3)
-44: FINISH_VISCHUNK(o0, f8, f10, g3)
-45: FINISH_VISCHUNK(o0, f10, f12, g3)
-46: FINISH_VISCHUNK(o0, f12, f14, g3)
-47: UNEVEN_VISCHUNK(o0, f14, f0, g3)
-48: FINISH_VISCHUNK(o0, f16, f18, g3)
-49: FINISH_VISCHUNK(o0, f18, f20, g3)
-50: FINISH_VISCHUNK(o0, f20, f22, g3)
-51: FINISH_VISCHUNK(o0, f22, f24, g3)
-52: FINISH_VISCHUNK(o0, f24, f26, g3)
-53: FINISH_VISCHUNK(o0, f26, f28, g3)
-54: FINISH_VISCHUNK(o0, f28, f30, g3)
-55: UNEVEN_VISCHUNK(o0, f30, f0, g3)
-56: FINISH_VISCHUNK(o0, f32, f34, g3)
-57: FINISH_VISCHUNK(o0, f34, f36, g3)
-58: FINISH_VISCHUNK(o0, f36, f38, g3)
-59: FINISH_VISCHUNK(o0, f38, f40, g3)
-60: FINISH_VISCHUNK(o0, f40, f42, g3)
-61: FINISH_VISCHUNK(o0, f42, f44, g3)
-62: FINISH_VISCHUNK(o0, f44, f46, g3)
-63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
-
-93: EX_LD_FP(LOAD(ldd, %o1, %f2))
+40: FINISH_VISCHUNK(o0, f0, f2)
+41: FINISH_VISCHUNK(o0, f2, f4)
+42: FINISH_VISCHUNK(o0, f4, f6)
+43: FINISH_VISCHUNK(o0, f6, f8)
+44: FINISH_VISCHUNK(o0, f8, f10)
+45: FINISH_VISCHUNK(o0, f10, f12)
+46: FINISH_VISCHUNK(o0, f12, f14)
+47: UNEVEN_VISCHUNK(o0, f14, f0)
+48: FINISH_VISCHUNK(o0, f16, f18)
+49: FINISH_VISCHUNK(o0, f18, f20)
+50: FINISH_VISCHUNK(o0, f20, f22)
+51: FINISH_VISCHUNK(o0, f22, f24)
+52: FINISH_VISCHUNK(o0, f24, f26)
+53: FINISH_VISCHUNK(o0, f26, f28)
+54: FINISH_VISCHUNK(o0, f28, f30)
+55: UNEVEN_VISCHUNK(o0, f30, f0)
+56: FINISH_VISCHUNK(o0, f32, f34)
+57: FINISH_VISCHUNK(o0, f34, f36)
+58: FINISH_VISCHUNK(o0, f36, f38)
+59: FINISH_VISCHUNK(o0, f38, f40)
+60: FINISH_VISCHUNK(o0, f40, f42)
+61: FINISH_VISCHUNK(o0, f42, f44)
+62: FINISH_VISCHUNK(o0, f44, f46)
+63: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
+
+93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f0, %f2, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bl,pn %xcc, 95f
add %o0, 8, %o0
- EX_LD_FP(LOAD(ldd, %o1, %f0))
+ EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f2, %f0, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bge,pt %xcc, 93b
add %o0, 8, %o0
95: brz,pt %o2, 2f
mov %g1, %o1
-1: EX_LD_FP(LOAD(ldub, %o1, %o3))
+1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp)
add %o1, 1, %o1
subcc %o2, 1, %o2
- EX_ST_FP(STORE(stb, %o3, %o0))
+ EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp)
bne,pt %xcc, 1b
add %o0, 1, %o0
@@ -470,27 +583,27 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
72: andn %o2, 0xf, %GLOBAL_SPARE
and %o2, 0xf, %o2
-1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0)
subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
- EX_LD(LOAD(ldx, %o1, %o5))
+ EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0)
sub %o2, 0x8, %o2
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
- EX_LD(LOAD(lduw, %o1, %o5))
+ EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0)
sub %o2, 0x4, %o2
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -504,9 +617,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g0, %g1, %g1
sub %o2, %g1, %o2
-1: EX_LD(LOAD(ldub, %o1, %o5))
+1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0)
subcc %g1, 1, %g1
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -522,16 +635,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, %o3
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0)
sub %o3, %g1, %o3
andn %o2, 0x7, %GLOBAL_SPARE
sllx %g2, %g1, %g2
-1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj)
subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -549,9 +662,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
bne,pn %XCC, 90f
sub %o0, %o1, %o3
-1: EX_LD(LOAD(lduw, %o1, %g1))
+1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0)
subcc %o2, 4, %o2
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -559,9 +672,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov EX_RETVAL(%o4), %o0
.align 32
-90: EX_LD(LOAD(ldub, %o1, %g1))
+90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0)
subcc %o2, 1, %o2
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
index 88ad73d86fe4..db73010a1af8 100644
--- a/arch/sparc/lib/U3copy_from_user.S
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_LD(x) \
+#define EX_LD(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_LD_FP(x) \
+#define EX_LD_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index 845139d75537..c4ee858e352a 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -3,19 +3,19 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#define EX_ST(x) \
+#define EX_ST(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, y; \
.text; \
.align 4;
-#define EX_ST_FP(x) \
+#define EX_ST_FP(x,y) \
98: x; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one_fp;\
+ .word 98b, y##_fp; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 491ee69e4995..54f98706b03b 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -4,6 +4,7 @@
*/
#ifdef __KERNEL__
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
@@ -22,21 +23,17 @@
#endif
#ifndef EX_LD
-#define EX_LD(x) x
+#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
-#define EX_LD_FP(x) x
+#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
-#define EX_ST(x) x
+#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
-#define EX_ST_FP(x) x
-#endif
-
-#ifndef EX_RETVAL
-#define EX_RETVAL(x) x
+#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
@@ -77,6 +74,87 @@
*/
.text
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+__restore_fp:
+ VISExitHalf
+ retl
+ nop
+ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
+ add %g1, 1, %g1
+ add %g2, %g1, %g2
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
+ENTRY(U3_retl_o2_plus_g2_fp)
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_fp)
+ENTRY(U3_retl_o2_plus_g2_plus_8_fp)
+ add %g2, 8, %g2
+ ba,pt %xcc, __restore_fp
+ add %o2, %g2, %o0
+ENDPROC(U3_retl_o2_plus_g2_plus_8_fp)
+ENTRY(U3_retl_o2)
+ retl
+ mov %o2, %o0
+ENDPROC(U3_retl_o2)
+ENTRY(U3_retl_o2_plus_1)
+ retl
+ add %o2, 1, %o0
+ENDPROC(U3_retl_o2_plus_1)
+ENTRY(U3_retl_o2_plus_4)
+ retl
+ add %o2, 4, %o0
+ENDPROC(U3_retl_o2_plus_4)
+ENTRY(U3_retl_o2_plus_8)
+ retl
+ add %o2, 8, %o0
+ENDPROC(U3_retl_o2_plus_8)
+ENTRY(U3_retl_o2_plus_g1_plus_1)
+ add %g1, 1, %g1
+ retl
+ add %o2, %g1, %o0
+ENDPROC(U3_retl_o2_plus_g1_plus_1)
+ENTRY(U3_retl_o2_fp)
+ ba,pt %xcc, __restore_fp
+ mov %o2, %o0
+ENDPROC(U3_retl_o2_fp)
+ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
+ sll %o3, 6, %o3
+ add %o3, 0x80, %o3
+ ba,pt %xcc, __restore_fp
+ add %o2, %o3, %o0
+ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
+ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
+ sll %o3, 6, %o3
+ add %o3, 0x40, %o3
+ ba,pt %xcc, __restore_fp
+ add %o2, %o3, %o0
+ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
+ENTRY(U3_retl_o2_plus_GS_plus_0x10)
+ add GLOBAL_SPARE, 0x10, GLOBAL_SPARE
+ retl
+ add %o2, GLOBAL_SPARE, %o0
+ENDPROC(U3_retl_o2_plus_GS_plus_0x10)
+ENTRY(U3_retl_o2_plus_GS_plus_0x08)
+ add GLOBAL_SPARE, 0x08, GLOBAL_SPARE
+ retl
+ add %o2, GLOBAL_SPARE, %o0
+ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
+ENTRY(U3_retl_o2_and_7_plus_GS)
+ and %o2, 7, %o2
+ retl
+ add %o2, GLOBAL_SPARE, %o2
+ENDPROC(U3_retl_o2_and_7_plus_GS)
+ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
+ add GLOBAL_SPARE, 8, GLOBAL_SPARE
+ and %o2, 7, %o2
+ retl
+ add %o2, GLOBAL_SPARE, %o2
+ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
+#endif
+
.align 64
/* The cheetah's flexible spine, oversized liver, enlarged heart,
@@ -126,8 +204,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
- EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
- EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
+ EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1)
+ EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@@ -138,20 +216,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
- EX_LD_FP(LOAD(ldd, %o1, %f4))
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
+ EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2)
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
- EX_ST_FP(STORE(std, %f0, %o0))
+ EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %icc, 3f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f2
- EX_ST_FP(STORE(std, %f2, %o0))
+ EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pt %icc, 1b
add %o0, 0x8, %o0
@@ -161,25 +239,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
LOAD(prefetch, %o1 + 0x080, #one_read)
LOAD(prefetch, %o1 + 0x0c0, #one_read)
LOAD(prefetch, %o1 + 0x100, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2)
LOAD(prefetch, %o1 + 0x140, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2)
LOAD(prefetch, %o1 + 0x180, #one_read)
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2)
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2)
faligndata %f8, %f10, %f24
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2)
faligndata %f10, %f12, %f26
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
add %o1, 0x40, %o1
@@ -190,26 +268,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 64
1:
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
add %o0, 0x40, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
subcc %o3, 0x01, %o3
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f8, %f10, %f24
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f10, %f12, %f26
bg,pt %XCC, 1b
@@ -217,29 +295,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
/* Finally we copy the last full 64-byte block. */
2:
- EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
- EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
- EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
+ EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
- EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
- EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
- EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f6, %f8, %f22
- EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f8, %f10, %f24
cmp %g1, 0
be,pt %XCC, 1f
add %o0, 0x40, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
1: faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
- EX_ST_FP(STORE_BLK(%f16, %o0))
+ EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
add %o0, 0x40, %o0
add %o1, 0x40, %o1
membar #Sync
@@ -259,20 +337,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g2, %o2
be,a,pt %XCC, 1f
- EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2)
-1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2))
+1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %XCC, 2f
add %o0, 0x8, %o0
- EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0))
+ EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8
- EX_ST_FP(STORE(std, %f8, %o0))
+ EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pn %XCC, 1b
add %o0, 0x8, %o0
@@ -292,30 +370,33 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andcc %o2, 0x8, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x8, %o1
+ sub %o2, 8, %o2
1: andcc %o2, 0x4, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x4, %o1
+ sub %o2, 4, %o2
1: andcc %o2, 0x2, %g0
be,pt %icc, 1f
nop
- EX_LD(LOAD(lduh, %o1, %o5))
- EX_ST(STORE(sth, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2)
+ EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x2, %o1
+ sub %o2, 2, %o2
1: andcc %o2, 0x1, %g0
be,pt %icc, 85f
nop
- EX_LD(LOAD(ldub, %o1, %o5))
+ EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
ba,pt %xcc, 85f
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
.align 64
70: /* 16 < len <= 64 */
@@ -326,26 +407,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0xf, GLOBAL_SPARE
and %o2, 0xf, %o2
1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
- EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
- EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10)
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10)
add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8)
+ EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4)
+ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
@@ -361,8 +442,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
@@ -378,16 +459,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
8: mov 64, %o3
andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
+ EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2)
sub %o3, %g1, %o3
andn %o2, 0x7, GLOBAL_SPARE
sllx %g2, %g1, %g2
-1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS)
subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+ EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
@@ -407,8 +488,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
1:
subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4)
+ EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
@@ -418,8 +499,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1)
+ EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 482de093bdae..0252b218de45 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -9,18 +9,33 @@
#define XCC xcc
-#define EX(x,y) \
+#define EX(x,y,z) \
98: x,y; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_one; \
+ .word 98b, z; \
.text; \
.align 4;
+#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8)
+#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4)
+#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1)
+
.register %g2,#scratch
.register %g3,#scratch
.text
+__retl_o4_plus_8:
+ add %o4, %o2, %o4
+ retl
+ add %o4, 8, %o0
+__retl_o2_plus_4:
+ retl
+ add %o2, 4, %o0
+__retl_o2_plus_1:
+ retl
+ add %o2, 1, %o0
+
.align 32
/* Don't try to get too fancy here, just nice and
@@ -45,8 +60,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x7, %o4
and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4
- EX(ldxa [%o1] %asi, %o5)
- EX(stxa %o5, [%o0] %asi)
+ EX_O4(ldxa [%o1] %asi, %o5)
+ EX_O4(stxa %o5, [%o0] %asi)
add %o1, 0x8, %o1
bgu,pt %XCC, 1b
add %o0, 0x8, %o0
@@ -54,8 +69,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
- EX(lduwa [%o1] %asi, %o5)
- EX(stwa %o5, [%o0] %asi)
+ EX_O2_4(lduwa [%o1] %asi, %o5)
+ EX_O2_4(stwa %o5, [%o0] %asi)
add %o1, 0x4, %o1
add %o0, 0x4, %o0
1: cmp %o2, 0
@@ -71,8 +86,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
82:
subcc %o2, 4, %o2
- EX(lduwa [%o1] %asi, %g1)
- EX(stwa %g1, [%o0] %asi)
+ EX_O2_4(lduwa [%o1] %asi, %g1)
+ EX_O2_4(stwa %g1, [%o0] %asi)
add %o1, 4, %o1
bgu,pt %XCC, 82b
add %o0, 4, %o0
@@ -83,8 +98,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
.align 32
90:
subcc %o2, 1, %o2
- EX(lduba [%o1] %asi, %g1)
- EX(stba %g1, [%o0] %asi)
+ EX_O2_1(lduba [%o1] %asi, %g1)
+ EX_O2_1(stba %g1, [%o0] %asi)
add %o1, 1, %o1
bgu,pt %XCC, 90b
add %o0, 1, %o0
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c
deleted file mode 100644
index ac96ae236709..000000000000
--- a/arch/sparc/lib/user_fixup.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* user_fixup.c: Fix up user copy faults.
- *
- * Copyright (C) 2004 David S. Miller <davem@redhat.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-
-#include <asm/uaccess.h>
-
-/* Calculating the exact fault address when using
- * block loads and stores can be very complicated.
- *
- * Instead of trying to be clever and handling all
- * of the cases, just fix things up simply here.
- */
-
-static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
-{
- unsigned long fault_addr = current_thread_info()->fault_address;
- unsigned long end = start + size;
-
- if (fault_addr < start || fault_addr >= end) {
- *offset = 0;
- } else {
- *offset = fault_addr - start;
- size = end - fault_addr;
- }
- return size;
-}
-
-unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
-{
- unsigned long offset;
-
- size = compute_size((unsigned long) from, size, &offset);
- if (likely(size))
- memset(to + offset, 0, size);
-
- return size;
-}
-EXPORT_SYMBOL(copy_from_user_fixup);
-
-unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
-{
- unsigned long offset;
-
- return compute_size((unsigned long) to, size, &offset);
-}
-EXPORT_SYMBOL(copy_to_user_fixup);
-
-unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
-{
- unsigned long fault_addr = current_thread_info()->fault_address;
- unsigned long start = (unsigned long) to;
- unsigned long end = start + size;
-
- if (fault_addr >= start && fault_addr < end)
- return end - fault_addr;
-
- start = (unsigned long) from;
- end = start + size;
- if (fault_addr >= start && fault_addr < end)
- return end - fault_addr;
-
- return size;
-}
-EXPORT_SYMBOL(copy_in_user_fixup);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 439784b7b7ac..37aa537b3ad8 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -802,8 +802,10 @@ struct mdesc_mblock {
};
static struct mdesc_mblock *mblocks;
static int num_mblocks;
+static int find_numa_node_for_addr(unsigned long pa,
+ struct node_mem_mask *pnode_mask);
-static unsigned long ra_to_pa(unsigned long addr)
+static unsigned long __init ra_to_pa(unsigned long addr)
{
int i;
@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr)
return addr;
}
-static int find_node(unsigned long addr)
+static int __init find_node(unsigned long addr)
{
+ static bool search_mdesc = true;
+ static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
+ static int last_index;
int i;
addr = ra_to_pa(addr);
@@ -830,13 +835,30 @@ static int find_node(unsigned long addr)
if ((addr & p->mask) == p->val)
return i;
}
- /* The following condition has been observed on LDOM guests.*/
- WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
- " rule. Some physical memory will be owned by node 0.");
- return 0;
+ /* The following condition has been observed on LDOM guests because
+ * node_masks only contains the best latency mask and value.
+ * LDOM guest's mdesc can contain a single latency group to
+ * cover multiple address range. Print warning message only if the
+ * address cannot be found in node_masks nor mdesc.
+ */
+ if ((search_mdesc) &&
+ ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
+ /* find the available node in the mdesc */
+ last_index = find_numa_node_for_addr(addr, &last_mem_mask);
+ numadbg("find_node: latency group for address 0x%lx is %d\n",
+ addr, last_index);
+ if ((last_index < 0) || (last_index >= num_node_masks)) {
+ /* WARN_ONCE() and use default group 0 */
+ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
+ search_mdesc = false;
+ last_index = 0;
+ }
+ }
+
+ return last_index;
}
-static u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
{
*nid = find_node(start);
start += PAGE_SIZE;
@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
return numa_latency[from][to];
}
+static int find_numa_node_for_addr(unsigned long pa,
+ struct node_mem_mask *pnode_mask)
+{
+ struct mdesc_handle *md = mdesc_grab();
+ u64 node, arc;
+ int i = 0;
+
+ node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
+ if (node == MDESC_NODE_NULL)
+ goto out;
+
+ mdesc_for_each_node_by_name(md, node, "group") {
+ mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
+ u64 target = mdesc_arc_target(md, arc);
+ struct mdesc_mlgroup *m = find_mlgroup(target);
+
+ if (!m)
+ continue;
+ if ((pa & m->mask) == m->match) {
+ if (pnode_mask) {
+ pnode_mask->mask = m->mask;
+ pnode_mask->val = m->match;
+ }
+ mdesc_release(md);
+ return i;
+ }
+ }
+ i++;
+ }
+
+out:
+ mdesc_release(md);
+ return -1;
+}
+
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
{
int i;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f2b77112e9d8..e20fbbafb0b0 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr)
return (tag == (vaddr >> 22));
}
+static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
+ struct tsb *ent = &swapper_tsb[idx];
+ unsigned long match = idx << 13;
+
+ match |= (ent->tag << 22);
+ if (match >= start && match < end)
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ }
+}
+
/* TSB flushes need only occur on the processor initiating the address
* space modification, not on each cpu the address space has run on.
* Only the TLB flush needs that treatment.
@@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long v;
+ if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
+ return flush_tsb_kernel_range_scan(start, end);
+
for (v = start; v < end; v += PAGE_SIZE) {
unsigned long hash = tsb_hash(v, PAGE_SHIFT,
KERNEL_TSB_NENTRIES);
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index b4f4733abc6e..5d2fd6cd3189 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -30,7 +30,7 @@
.text
.align 32
.globl __flush_tlb_mm
-__flush_tlb_mm: /* 18 insns */
+__flush_tlb_mm: /* 19 insns */
/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
ldxa [%o1] ASI_DMMU, %g2
cmp %g2, %o0
@@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */
.align 32
.globl __flush_tlb_pending
-__flush_tlb_pending: /* 26 insns */
+__flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
sllx %o1, 3, %o1
@@ -113,12 +113,14 @@ __flush_tlb_pending: /* 26 insns */
.align 32
.globl __flush_tlb_kernel_range
-__flush_tlb_kernel_range: /* 16 insns */
+__flush_tlb_kernel_range: /* 31 insns */
/* %o0=start, %o1=end */
cmp %o0, %o1
be,pn %xcc, 2f
+ sub %o1, %o0, %o3
+ srlx %o3, 18, %o4
+ brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
sethi %hi(PAGE_SIZE), %o4
- sub %o1, %o0, %o3
sub %o3, %o4, %o3
or %o0, 0x20, %o0 ! Nucleus
1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
@@ -131,6 +133,41 @@ __flush_tlb_kernel_range: /* 16 insns */
retl
nop
nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+__spitfire_flush_tlb_kernel_range_slow:
+ mov 63 * 8, %o4
+1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
+ andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %o3
+ stxa %g0, [%o3] ASI_IMMU
+ stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
+ andcc %o3, 0x40, %g0
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %o3
+ stxa %g0, [%o3] ASI_DMMU
+ stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2: sub %o4, 8, %o4
+ brgez,pt %o4, 1b
+ nop
+ retl
+ nop
__spitfire_flush_tlb_mm_slow:
rdpr %pstate, %g1
@@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending: /* 27 insns */
retl
wrpr %g7, 0x0, %pstate
+__cheetah_flush_tlb_kernel_range: /* 31 insns */
+ /* %o0=start, %o1=end */
+ cmp %o0, %o1
+ be,pn %xcc, 2f
+ sub %o1, %o0, %o3
+ srlx %o3, 18, %o4
+ brnz,pn %o4, 3f
+ sethi %hi(PAGE_SIZE), %o4
+ sub %o3, %o4, %o3
+ or %o0, 0x20, %o0 ! Nucleus
+1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
+ stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %o3, 1b
+ sub %o3, %o4, %o3
+2: sethi %hi(KERNBASE), %o3
+ flush %o3
+ retl
+ nop
+3: mov 0x80, %o4
+ stxa %g0, [%o4] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g0, [%o4] ASI_IMMU_DEMAP
+ membar #Sync
+ retl
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
#ifdef DCACHE_ALIASING_POSSIBLE
__cheetah_flush_dcache_page: /* 11 insns */
sethi %hi(PAGE_OFFSET), %g1
@@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error:
ret
restore
-__hypervisor_flush_tlb_mm: /* 10 insns */
+__hypervisor_flush_tlb_mm: /* 19 insns */
mov %o0, %o2 /* ARG2: mmu context */
mov 0, %o0 /* ARG0: CPU lists unimplemented */
mov 0, %o1 /* ARG1: CPU lists unimplemented */
mov HV_MMU_ALL, %o3 /* ARG3: flags */
mov HV_FAST_MMU_DEMAP_CTX, %o5
ta HV_FAST_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_FAST_MMU_DEMAP_CTX, %o1
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
+ jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_page: /* 11 insns */
+__hypervisor_flush_tlb_page: /* 22 insns */
/* %o0 = context, %o1 = vaddr */
mov %o0, %g2
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
@@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */
srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_pending: /* 16 insns */
+__hypervisor_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
sllx %o1, 3, %g1
mov %o2, %g2
@@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */
srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g1, 1b
nop
retl
nop
+1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
-__hypervisor_flush_tlb_kernel_range: /* 16 insns */
+__hypervisor_flush_tlb_kernel_range: /* 31 insns */
/* %o0=start, %o1=end */
cmp %o0, %o1
be,pn %xcc, 2f
- sethi %hi(PAGE_SIZE), %g3
- mov %o0, %g1
- sub %o1, %g1, %g2
+ sub %o1, %o0, %g2
+ srlx %g2, 18, %g3
+ brnz,pn %g3, 4f
+ mov %o0, %g1
+ sethi %hi(PAGE_SIZE), %g3
sub %g2, %g3, %g2
1: add %g1, %g2, %o0 /* ARG0: virtual address */
mov 0, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
+ brnz,pn %o0, 3f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g2, 1b
sub %g2, %g3, %g2
2: retl
nop
+3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
+ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+ nop
+4: mov 0, %o0 /* ARG0: CPU lists unimplemented */
+ mov 0, %o1 /* ARG1: CPU lists unimplemented */
+ mov 0, %o2 /* ARG2: mmu context == nucleus */
+ mov HV_MMU_ALL, %o3 /* ARG3: flags */
+ mov HV_FAST_MMU_DEMAP_CTX, %o5
+ ta HV_FAST_TRAP
+ brnz,pn %o0, 3b
+ mov HV_FAST_MMU_DEMAP_CTX, %o1
+ retl
+ nop
#ifdef DCACHE_ALIASING_POSSIBLE
/* XXX Niagara and friends have an 8K cache, so no aliasing is
@@ -394,43 +511,6 @@ tlb_patch_one:
retl
nop
- .globl cheetah_patch_cachetlbops
-cheetah_patch_cachetlbops:
- save %sp, -128, %sp
-
- sethi %hi(__flush_tlb_mm), %o0
- or %o0, %lo(__flush_tlb_mm), %o0
- sethi %hi(__cheetah_flush_tlb_mm), %o1
- or %o1, %lo(__cheetah_flush_tlb_mm), %o1
- call tlb_patch_one
- mov 19, %o2
-
- sethi %hi(__flush_tlb_page), %o0
- or %o0, %lo(__flush_tlb_page), %o0
- sethi %hi(__cheetah_flush_tlb_page), %o1
- or %o1, %lo(__cheetah_flush_tlb_page), %o1
- call tlb_patch_one
- mov 22, %o2
-
- sethi %hi(__flush_tlb_pending), %o0
- or %o0, %lo(__flush_tlb_pending), %o0
- sethi %hi(__cheetah_flush_tlb_pending), %o1
- or %o1, %lo(__cheetah_flush_tlb_pending), %o1
- call tlb_patch_one
- mov 27, %o2
-
-#ifdef DCACHE_ALIASING_POSSIBLE
- sethi %hi(__flush_dcache_page), %o0
- or %o0, %lo(__flush_dcache_page), %o0
- sethi %hi(__cheetah_flush_dcache_page), %o1
- or %o1, %lo(__cheetah_flush_dcache_page), %o1
- call tlb_patch_one
- mov 11, %o2
-#endif /* DCACHE_ALIASING_POSSIBLE */
-
- ret
- restore
-
#ifdef CONFIG_SMP
/* These are all called by the slaves of a cross call, at
* trap level 1, with interrupts fully disabled.
@@ -447,7 +527,7 @@ cheetah_patch_cachetlbops:
*/
.align 32
.globl xcall_flush_tlb_mm
-xcall_flush_tlb_mm: /* 21 insns */
+xcall_flush_tlb_mm: /* 24 insns */
mov PRIMARY_CONTEXT, %g2
ldxa [%g2] ASI_DMMU, %g3
srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -469,9 +549,12 @@ xcall_flush_tlb_mm: /* 21 insns */
nop
nop
nop
+ nop
+ nop
+ nop
.globl xcall_flush_tlb_page
-xcall_flush_tlb_page: /* 17 insns */
+xcall_flush_tlb_page: /* 20 insns */
/* %g5=context, %g1=vaddr */
mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2
@@ -490,15 +573,20 @@ xcall_flush_tlb_page: /* 17 insns */
retry
nop
nop
+ nop
+ nop
+ nop
.globl xcall_flush_tlb_kernel_range
-xcall_flush_tlb_kernel_range: /* 25 insns */
+xcall_flush_tlb_kernel_range: /* 44 insns */
sethi %hi(PAGE_SIZE - 1), %g2
or %g2, %lo(PAGE_SIZE - 1), %g2
andn %g1, %g2, %g1
andn %g7, %g2, %g7
sub %g7, %g1, %g3
- add %g2, 1, %g2
+ srlx %g3, 18, %g2
+ brnz,pn %g2, 2f
+ add %g2, 1, %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range: /* 25 insns */
brnz,pt %g3, 1b
sub %g3, %g2, %g3
retry
- nop
- nop
+2: mov 63 * 8, %g1
+1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
+ andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g0, [%g2] ASI_IMMU
+ stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
+ andcc %g2, 0x40, %g0
+ bne,pn %xcc, 2f
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g0, [%g2] ASI_DMMU
+ stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2: sub %g1, 8, %g1
+ brgez,pt %g1, 1b
+ nop
+ retry
nop
nop
nop
@@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4:
retry
+__cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
+ sethi %hi(PAGE_SIZE - 1), %g2
+ or %g2, %lo(PAGE_SIZE - 1), %g2
+ andn %g1, %g2, %g1
+ andn %g7, %g2, %g7
+ sub %g7, %g1, %g3
+ srlx %g3, 18, %g2
+ brnz,pn %g2, 2f
+ add %g2, 1, %g2
+ sub %g3, %g2, %g3
+ or %g1, 0x20, %g1 ! Nucleus
+1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %g3, 1b
+ sub %g3, %g2, %g3
+ retry
+2: mov 0x80, %g2
+ stxa %g0, [%g2] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g0, [%g2] ASI_IMMU_DEMAP
+ membar #Sync
+ retry
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
#ifdef DCACHE_ALIASING_POSSIBLE
.align 32
.globl xcall_flush_dcache_page_cheetah
@@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error:
ba,a,pt %xcc, rtrap
.globl __hypervisor_xcall_flush_tlb_mm
-__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
+__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
mov %o0, %g2
mov %o1, %g3
@@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
mov HV_FAST_MMU_DEMAP_CTX, %o5
ta HV_FAST_TRAP
mov HV_FAST_MMU_DEMAP_CTX, %g6
- brnz,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,pn %o0, 1f
mov %o0, %g5
mov %g2, %o0
mov %g3, %o1
@@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
mov %g7, %o5
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
.globl __hypervisor_xcall_flush_tlb_page
-__hypervisor_xcall_flush_tlb_page: /* 17 insns */
+__hypervisor_xcall_flush_tlb_page: /* 20 insns */
/* %g5=ctx, %g1=vaddr */
mov %o0, %g2
mov %o1, %g3
@@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
- brnz,a,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,a,pn %o0, 1f
mov %o0, %g5
mov %g2, %o0
mov %g3, %o1
mov %g4, %o2
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
.globl __hypervisor_xcall_flush_tlb_kernel_range
-__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
+__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
sethi %hi(PAGE_SIZE - 1), %g2
or %g2, %lo(PAGE_SIZE - 1), %g2
andn %g1, %g2, %g1
andn %g7, %g2, %g7
sub %g7, %g1, %g3
+ srlx %g3, 18, %g7
add %g2, 1, %g2
sub %g3, %g2, %g3
mov %o0, %g2
mov %o1, %g4
- mov %o2, %g7
+ brnz,pn %g7, 2f
+ mov %o2, %g7
1: add %g1, %g3, %o0 /* ARG0: virtual address */
mov 0, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
ta HV_MMU_UNMAP_ADDR_TRAP
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
- brnz,pn %o0, __hypervisor_tlb_xcall_error
+ brnz,pn %o0, 1f
mov %o0, %g5
sethi %hi(PAGE_SIZE), %o2
brnz,pt %g3, 1b
sub %g3, %o2, %g3
- mov %g2, %o0
+5: mov %g2, %o0
mov %g4, %o1
mov %g7, %o2
membar #Sync
retry
+1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
+ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+ nop
+2: mov %o3, %g1
+ mov %o5, %g3
+ mov 0, %o0 /* ARG0: CPU lists unimplemented */
+ mov 0, %o1 /* ARG1: CPU lists unimplemented */
+ mov 0, %o2 /* ARG2: mmu context == nucleus */
+ mov HV_MMU_ALL, %o3 /* ARG3: flags */
+ mov HV_FAST_MMU_DEMAP_CTX, %o5
+ ta HV_FAST_TRAP
+ mov %g1, %o3
+ brz,pt %o0, 5b
+ mov %g3, %o5
+ mov HV_FAST_MMU_DEMAP_CTX, %g6
+ ba,pt %xcc, 1b
+ clr %g5
/* These just get rescheduled to PIL vectors. */
.globl xcall_call_function
@@ -809,6 +985,58 @@ xcall_kgdb_capture:
#endif /* CONFIG_SMP */
+ .globl cheetah_patch_cachetlbops
+cheetah_patch_cachetlbops:
+ save %sp, -128, %sp
+
+ sethi %hi(__flush_tlb_mm), %o0
+ or %o0, %lo(__flush_tlb_mm), %o0
+ sethi %hi(__cheetah_flush_tlb_mm), %o1
+ or %o1, %lo(__cheetah_flush_tlb_mm), %o1
+ call tlb_patch_one
+ mov 19, %o2
+
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__cheetah_flush_tlb_page), %o1
+ or %o1, %lo(__cheetah_flush_tlb_page), %o1
+ call tlb_patch_one
+ mov 22, %o2
+
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__cheetah_flush_tlb_pending), %o1
+ or %o1, %lo(__cheetah_flush_tlb_pending), %o1
+ call tlb_patch_one
+ mov 27, %o2
+
+ sethi %hi(__flush_tlb_kernel_range), %o0
+ or %o0, %lo(__flush_tlb_kernel_range), %o0
+ sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
+ or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
+ call tlb_patch_one
+ mov 31, %o2
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+ sethi %hi(__flush_dcache_page), %o0
+ or %o0, %lo(__flush_dcache_page), %o0
+ sethi %hi(__cheetah_flush_dcache_page), %o1
+ or %o1, %lo(__cheetah_flush_dcache_page), %o1
+ call tlb_patch_one
+ mov 11, %o2
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+#ifdef CONFIG_SMP
+ sethi %hi(xcall_flush_tlb_kernel_range), %o0
+ or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
+ sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
+ or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
+ call tlb_patch_one
+ mov 44, %o2
+#endif /* CONFIG_SMP */
+
+ ret
+ restore
.globl hypervisor_patch_cachetlbops
hypervisor_patch_cachetlbops:
@@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops:
sethi %hi(__hypervisor_flush_tlb_mm), %o1
or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
call tlb_patch_one
- mov 10, %o2
+ mov 19, %o2
sethi %hi(__flush_tlb_page), %o0
or %o0, %lo(__flush_tlb_page), %o0
sethi %hi(__hypervisor_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_flush_tlb_page), %o1
call tlb_patch_one
- mov 11, %o2
+ mov 22, %o2
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__hypervisor_flush_tlb_pending), %o1
or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
call tlb_patch_one
- mov 16, %o2
+ mov 27, %o2
sethi %hi(__flush_tlb_kernel_range), %o0
or %o0, %lo(__flush_tlb_kernel_range), %o0
sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
call tlb_patch_one
- mov 16, %o2
+ mov 31, %o2
#ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0
@@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops:
sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
call tlb_patch_one
- mov 21, %o2
+ mov 24, %o2
sethi %hi(xcall_flush_tlb_page), %o0
or %o0, %lo(xcall_flush_tlb_page), %o0
sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
call tlb_patch_one
- mov 17, %o2
+ mov 20, %o2
sethi %hi(xcall_flush_tlb_kernel_range), %o0
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
call tlb_patch_one
- mov 25, %o2
+ mov 44, %o2
#endif /* CONFIG_SMP */
ret
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index ba35c41c71ff..2d1f5638974c 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -21,7 +21,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
-generic-y += mutex.h
generic-y += param.h
generic-y += parport.h
generic-y += poll.h
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index 6160761d5f61..4810e48dbbbf 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -61,4 +61,7 @@
*/
#define __write_once __read_mostly
+/* __ro_after_init is the generic name for the tile arch __write_once. */
+#define __ro_after_init __read_mostly
+
#endif /* _ASM_TILE_CACHE_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 0684e88aacd8..0bc9968b97a1 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -264,8 +264,6 @@ static inline void cpu_relax(void)
barrier();
}
-#define cpu_relax_lowlatency() cpu_relax()
-
/* Info on this processor (see fs/proc/cpuinfo.c) */
struct seq_operations;
extern const struct seq_operations cpuinfo_op;
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 09bb774b39cd..24e0f8c21f2f 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -213,10 +213,12 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
sg->dma_address = sg_phys(sg);
- __dma_prep_pa_range(sg->dma_address, sg->length, direction);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length = sg->length;
#endif
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+ __dma_prep_pa_range(sg->dma_address, sg->length, direction);
}
return nents;
@@ -232,6 +234,8 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
BUG_ON(!valid_dma_direction(direction));
for_each_sg(sglist, sg, nents, i) {
sg->dma_address = sg_phys(sg);
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
__dma_complete_pa_range(sg->dma_address, sg->length,
direction);
}
@@ -245,7 +249,8 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
BUG_ON(!valid_dma_direction(direction));
BUG_ON(offset + size > PAGE_SIZE);
- __dma_prep_page(page, offset, size, direction);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_prep_page(page, offset, size, direction);
return page_to_pa(page) + offset;
}
@@ -256,6 +261,9 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
{
BUG_ON(!valid_dma_direction(direction));
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return;
+
__dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
dma_address & (PAGE_SIZE - 1), size, direction);
}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 178989e6d3e3..ea960d660917 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
*/
unsigned long long sched_clock(void)
{
- return clocksource_cyc2ns(get_cycles(),
- sched_clock_mult, SCHED_CLOCK_SHIFT);
+ return mult_frac(get_cycles(),
+ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
}
int setup_profiling_timer(unsigned int multiplier)
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 2cd5b6874c7b..1669240c7a25 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -256,13 +256,6 @@ static void uml_net_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
-{
- dev->mtu = new_mtu;
-
- return 0;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void uml_net_poll_controller(struct net_device *dev)
{
@@ -374,7 +367,6 @@ static const struct net_device_ops uml_netdev_ops = {
.ndo_set_rx_mode = uml_net_set_multicast_list,
.ndo_tx_timeout = uml_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = uml_net_change_mtu,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = uml_net_poll_controller,
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 904f3ebf4220..052f7f6d0551 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -17,7 +17,6 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mutex.h
generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 821ff0acfe17..600a2e9bfee2 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -116,12 +116,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page);
}
-static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
- struct page *page)
-{
- return __tlb_remove_page(tlb, page);
-}
-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
@@ -141,6 +135,15 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ tlb_remove_tlb_entry(tlb, ptep, address)
+
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+ unsigned int page_size)
+{
+}
+
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
diff --git a/arch/unicore32/include/asm/mutex.h b/arch/unicore32/include/asm/mutex.h
deleted file mode 100644
index fab7d0e8adf6..000000000000
--- a/arch/unicore32/include/asm/mutex.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * linux/arch/unicore32/include/asm/mutex.h
- *
- * Code specific to PKUnity SoC and UniCore ISA
- *
- * Copyright (C) 2001-2010 GUAN Xue-tao
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * UniCore optimized mutex locking primitives
- *
- * Please look into asm-generic/mutex-xchg.h for a formal definition.
- */
-#ifndef __UNICORE_MUTEX_H__
-#define __UNICORE_MUTEX_H__
-
-# include <asm-generic/mutex-xchg.h>
-#endif
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h
index 8d21b7adf26b..4eaa42167667 100644
--- a/arch/unicore32/include/asm/processor.h
+++ b/arch/unicore32/include/asm/processor.h
@@ -71,7 +71,6 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bada636d1065..dd47e60aabf5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -9,28 +9,50 @@ config 64BIT
config X86_32
def_bool y
depends on !64BIT
+ # Options that are inherently 32-bit kernel only:
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select CLKSRC_I8253
+ select CLONE_BACKWARDS
+ select HAVE_AOUT
+ select HAVE_GENERIC_DMA_COHERENT
+ select MODULES_USE_ELF_REL
+ select OLD_SIGACTION
config X86_64
def_bool y
depends on 64BIT
+ # Options that are inherently 64-bit kernel only:
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_SUPPORTS_INT128
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select HAVE_ARCH_SOFT_DIRTY
+ select MODULES_USE_ELF_RELA
+ select X86_DEV_DMA_OPS
-### Arch settings
+#
+# Arch settings
+#
+# ( Note that options that are marked 'if X86_64' could in principle be
+# ported to 32-bit as well. )
+#
config X86
def_bool y
+ #
+ # Note: keep this list sorted alphabetically
+ #
select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ANON_INODES
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK
- select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_GIGANTIC_PAGE if X86_64
select ARCH_HAS_KCOV if X86_64
- select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_MMIO_FLUSH
+ select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -39,23 +61,17 @@ config X86
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
- select ARCH_SUPPORTS_INT128 if X86_64
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_USE_BUILTIN_BSWAP
- select ARCH_USE_CMPXCHG_LOCKREF if X86_64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
- select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_FRAME_POINTERS
- select ARCH_WANT_IPC_PARSE_VERSION if X86_32
+ select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select BUILDTIME_EXTABLE_SORT
select CLKEVT_I8253
- select CLKSRC_I8253 if X86_32
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
select CLOCKSOURCE_WATCHDOG
- select CLONE_BACKWARDS if X86_32
- select COMPAT_OLD_SIGACTION if IA32_EMULATION
select DCACHE_WORD_ACCESS
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
@@ -77,7 +93,6 @@ config X86
select HAVE_ACPI_APEI if ACPI
select HAVE_ACPI_APEI_NMI if ACPI
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
- select HAVE_AOUT if X86_32
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
@@ -88,12 +103,10 @@ config X86
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_ARCH_SOFT_DIRTY if X86_64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
- select HAVE_ARCH_WITHIN_STACK_FRAMES
- select HAVE_EBPF_JIT if X86_64
select HAVE_ARCH_VMAP_STACK if X86_64
+ select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
@@ -106,6 +119,7 @@ config X86
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_EBPF_JIT if X86_64
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EXIT_THREAD
select HAVE_FENTRY if X86_64
@@ -113,7 +127,6 @@ config X86
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
- select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_HW_BREAKPOINT
select HAVE_IDE
select HAVE_IOREMAP_PROT
@@ -142,15 +155,11 @@ config X86
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_STACK_VALIDATION if X86_64
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_UID16 if X86_32 || IA32_EMULATION
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
select IRQ_FORCED_THREADING
- select MODULES_USE_ELF_RELA if X86_64
- select MODULES_USE_ELF_REL if X86_32
- select OLD_SIGACTION if X86_32
- select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
select PERF_EVENTS
select RTC_LIB
select RTC_MC146818_LIB
@@ -160,11 +169,7 @@ config X86
select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
- select X86_DEV_DMA_OPS if X86_64
select X86_FEATURE_NAMES if PROC_FS
- select HAVE_STACK_VALIDATION if X86_64
- select ARCH_USES_HIGH_VMA_FLAGS if X86_INTEL_MEMORY_PROTECTION_KEYS
- select ARCH_HAS_PKEYS if X86_INTEL_MEMORY_PROTECTION_KEYS
config INSTRUCTION_DECODER
def_bool y
@@ -939,6 +944,27 @@ config SCHED_MC
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
+config SCHED_MC_PRIO
+ bool "CPU core priorities scheduler support"
+ depends on SCHED_MC && CPU_SUP_INTEL
+ select X86_INTEL_PSTATE
+ select CPU_FREQ
+ default y
+ ---help---
+ Intel Turbo Boost Max Technology 3.0 enabled CPUs have a
+ core ordering determined at manufacturing time, which allows
+ certain cores to reach higher turbo frequencies (when running
+ single threaded workloads) than others.
+
+ Enabling this kernel feature teaches the scheduler about
+ the TBM3 (aka ITMT) priority order of the CPU cores and adjusts the
+ scheduler's CPU selection logic accordingly, so that higher
+ overall system performance can be achieved.
+
+ This feature will have no effect on CPUs without this feature.
+
+ If unsure say Y here.
+
source "kernel/Kconfig.preempt"
config UP_LATE_INIT
@@ -1025,7 +1051,7 @@ config X86_MCE_INTEL
config X86_MCE_AMD
def_bool y
prompt "AMD MCE features"
- depends on X86_MCE && X86_LOCAL_APIC
+ depends on X86_MCE && X86_LOCAL_APIC && AMD_NB
---help---
Additional support for AMD specific MCE features such as
the DRAM Error Threshold.
@@ -1525,7 +1551,7 @@ config X86_CHECK_BIOS_CORRUPTION
line. By default it scans the low 64k of memory every 60
seconds; see the memory_corruption_check_size and
memory_corruption_check_period parameters in
- Documentation/kernel-parameters.txt to adjust this.
+ Documentation/admin-guide/kernel-parameters.rst to adjust this.
When enabled with the default parameters, this option has
almost no overhead, as it reserves a relatively small amount
@@ -1737,6 +1763,8 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS
def_bool y
# Note: only available in 64-bit mode
depends on CPU_SUP_INTEL && X86_64
+ select ARCH_USES_HIGH_VMA_FLAGS
+ select ARCH_HAS_PKEYS
---help---
Memory Protection Keys provides a mechanism for enforcing
page-based protections, but without requiring modification of the
@@ -2092,7 +2120,7 @@ config DEBUG_HOTPLUG_CPU0
config COMPAT_VDSO
def_bool n
prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
- depends on X86_32 || IA32_EMULATION
+ depends on COMPAT_32
---help---
Certain buggy versions of glibc will crash if they are
presented with a 32-bit vDSO that is not mapped at the address
@@ -2694,9 +2722,10 @@ source "fs/Kconfig.binfmt"
config IA32_EMULATION
bool "IA32 Emulation"
depends on X86_64
+ select ARCH_WANT_OLD_COMPAT_IPC
select BINFMT_ELF
select COMPAT_BINFMT_ELF
- select ARCH_WANT_OLD_COMPAT_IPC
+ select COMPAT_OLD_SIGACTION
---help---
Include code to run legacy 32-bit programs under a
64-bit kernel. You should likely turn this on, unless you're
@@ -2721,6 +2750,12 @@ config X86_X32
elf32_x86_64 support enabled to compile a kernel with this
option set.
+config COMPAT_32
+ def_bool y
+ depends on IA32_EMULATION || X86_32
+ select HAVE_UID16
+ select OLD_SIGSUSPEND3
+
config COMPAT
def_bool y
depends on IA32_EMULATION || X86_X32
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 12ea8f8384f4..0d810fb15eac 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -65,7 +65,7 @@ clean-files += cpustr.h
# ---------------------------------------------------------------------------
-KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
+KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
UBSAN_SANITIZE := n
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 536ccfcc01c6..44163e8c3868 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -25,7 +25,7 @@ KCOV_INSTRUMENT := n
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
-KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
+KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ -O2
KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
@@ -40,8 +40,8 @@ GCOV_PROFILE := n
UBSAN_SANITIZE :=n
LDFLAGS := -m elf_$(UTS_MACHINE)
-ifeq ($(CONFIG_RELOCATABLE),y)
-# If kernel is relocatable, build compressed kernel as PIE.
+# Compressed kernel should be built as PIE since it may be loaded at any
+# address by the bootloader.
ifeq ($(CONFIG_X86_32),y)
LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
else
@@ -51,7 +51,6 @@ else
LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
endif
-endif
LDFLAGS_vmlinux := -T
hostprogs-y := mkpiggy
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index cc69e37548db..ff01c8fc76f7 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -537,6 +537,69 @@ free_handle:
efi_call_early(free_pool, pci_handle);
}
+static void retrieve_apple_device_properties(struct boot_params *boot_params)
+{
+ efi_guid_t guid = APPLE_PROPERTIES_PROTOCOL_GUID;
+ struct setup_data *data, *new;
+ efi_status_t status;
+ u32 size = 0;
+ void *p;
+
+ status = efi_call_early(locate_protocol, &guid, NULL, &p);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (efi_table_attr(apple_properties_protocol, version, p) != 0x10000) {
+ efi_printk(sys_table, "Unsupported properties proto version\n");
+ return;
+ }
+
+ efi_call_proto(apple_properties_protocol, get_all, p, NULL, &size);
+ if (!size)
+ return;
+
+ do {
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size + sizeof(struct setup_data), &new);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table,
+ "Failed to alloc mem for properties\n");
+ return;
+ }
+
+ status = efi_call_proto(apple_properties_protocol, get_all, p,
+ new->data, &size);
+
+ if (status == EFI_BUFFER_TOO_SMALL)
+ efi_call_early(free_pool, new);
+ } while (status == EFI_BUFFER_TOO_SMALL);
+
+ new->type = SETUP_APPLE_PROPERTIES;
+ new->len = size;
+ new->next = 0;
+
+ data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
+ if (!data)
+ boot_params->hdr.setup_data = (unsigned long)new;
+ else {
+ while (data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+ data->next = (unsigned long)new;
+ }
+}
+
+static void setup_quirks(struct boot_params *boot_params)
+{
+ efi_char16_t const apple[] = { 'A', 'p', 'p', 'l', 'e', 0 };
+ efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
+ efi_table_attr(efi_system_table, fw_vendor, sys_table);
+
+ if (!memcmp(fw_vendor, apple, sizeof(apple))) {
+ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
+ retrieve_apple_device_properties(boot_params);
+ }
+}
+
static efi_status_t
setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
{
@@ -1098,6 +1161,8 @@ struct boot_params *efi_main(struct efi_config *c,
setup_efi_pci(boot_params);
+ setup_quirks(boot_params);
+
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
sizeof(*gdt), (void **)&gdt);
if (status != EFI_SUCCESS) {
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index efdfba21a5b2..4d85e600db78 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -119,8 +119,7 @@ ENTRY(startup_32)
*/
/* Load new GDT with the 64bit segments using 32bit descriptor */
- leal gdt(%ebp), %eax
- movl %eax, gdt+2(%ebp)
+ addl %ebp, gdt+2(%ebp)
lgdt gdt(%ebp)
/* Enable PAE mode */
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c
index 26240dde081e..4224ede43b4e 100644
--- a/arch/x86/boot/cpu.c
+++ b/arch/x86/boot/cpu.c
@@ -87,6 +87,12 @@ int validate_cpu(void)
return -1;
}
+ if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
+ !has_eflag(X86_EFLAGS_ID)) {
+ printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
+ return -1;
+ }
+
if (err_flags) {
puts("This kernel requires the following features "
"not present on the CPU:\n");
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 0ab5ee1c26af..31c34ee131f3 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -21,7 +21,6 @@
#include <linux/hardirq.h>
#include <linux/types.h>
-#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/err.h>
#include <crypto/algapi.h>
@@ -29,14 +28,14 @@
#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <crypto/b128ops.h>
-#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/cpu_device_id.h>
#include <asm/fpu/api.h>
#include <asm/crypto/aes.h>
-#include <crypto/ablk_helper.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#ifdef CONFIG_X86_64
@@ -45,28 +44,26 @@
#define AESNI_ALIGN 16
+#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
#define RFC4106_HASH_SUBKEY_SIZE 16
+#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
+#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
+#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
/* This data is stored at the end of the crypto_tfm struct.
* It's a type of per "session" data storage location.
* This needs to be 16 byte aligned.
*/
struct aesni_rfc4106_gcm_ctx {
- u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
- struct crypto_aes_ctx aes_key_expanded
- __attribute__ ((__aligned__(AESNI_ALIGN)));
+ u8 hash_subkey[16] AESNI_ALIGN_ATTR;
+ struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
u8 nonce[4];
};
-struct aesni_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
-};
-
struct aesni_xts_ctx {
- u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
- u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
+ u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
+ u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
};
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -360,96 +357,95 @@ static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
aesni_dec(ctx, dst, src);
}
-static int ecb_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
+{
+ return aes_set_key_common(crypto_skcipher_tfm(tfm),
+ crypto_skcipher_ctx(tfm), key, len);
+}
+
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
kernel_fpu_end();
return err;
}
-static int ecb_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
kernel_fpu_end();
return err;
}
-static int cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
kernel_fpu_end();
return err;
}
-static int cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
kernel_fpu_end();
@@ -458,7 +454,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
#ifdef CONFIG_X86_64
static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
- struct blkcipher_walk *walk)
+ struct skcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[AES_BLOCK_SIZE];
@@ -491,157 +487,53 @@ static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
}
#endif
-static int ctr_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, true);
kernel_fpu_begin();
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
if (walk.nbytes) {
ctr_crypt_final(ctx, &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = skcipher_walk_done(&walk, 0);
}
kernel_fpu_end();
return err;
}
-#endif
-
-static int ablk_ecb_init(struct crypto_tfm *tfm)
-{
- return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
-}
-
-static int ablk_cbc_init(struct crypto_tfm *tfm)
-{
- return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
-}
-
-#ifdef CONFIG_X86_64
-static int ablk_ctr_init(struct crypto_tfm *tfm)
-{
- return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
-}
-
-#endif
-
-#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
-static int ablk_pcbc_init(struct crypto_tfm *tfm)
-{
- return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
-}
-#endif
-
-static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
-{
- aesni_ecb_enc(ctx, blks, blks, nbytes);
-}
-static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
-{
- aesni_ecb_dec(ctx, blks, blks, nbytes);
-}
-
-static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
+static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
- keylen - AES_BLOCK_SIZE);
+ err = xts_verify_key(tfm, key, keylen);
if (err)
return err;
- return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
-}
-
-static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
-{
- struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
-}
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[8];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
- .crypt_fn = lrw_xts_encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- kernel_fpu_begin();
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- kernel_fpu_end();
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[8];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
- .crypt_fn = lrw_xts_decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- kernel_fpu_begin();
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- kernel_fpu_end();
-
- return ret;
-}
-
-static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = xts_check_key(tfm, key, keylen);
- if (err)
- return err;
+ keylen /= 2;
/* first half of xts-key is for crypt */
- err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
+ err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
+ key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
- return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
- keylen / 2);
+ return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
+ key + keylen, keylen);
}
@@ -650,8 +542,6 @@ static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
aesni_enc(ctx, out, in);
}
-#ifdef CONFIG_X86_64
-
static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
@@ -698,83 +588,28 @@ static const struct common_glue_ctx aesni_dec_xts = {
} }
};
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(aesni_xts_tweak),
- aes_ctx(ctx->raw_tweak_ctx),
- aes_ctx(ctx->raw_crypt_ctx));
+ return glue_xts_req_128bit(&aesni_enc_xts, req,
+ XTS_TWEAK_CAST(aesni_xts_tweak),
+ aes_ctx(ctx->raw_tweak_ctx),
+ aes_ctx(ctx->raw_crypt_ctx));
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(aesni_xts_tweak),
- aes_ctx(ctx->raw_tweak_ctx),
- aes_ctx(ctx->raw_crypt_ctx));
-}
-
-#else
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[8];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
- .tweak_fn = aesni_xts_tweak,
- .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
- .crypt_fn = lrw_xts_encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- kernel_fpu_begin();
- ret = xts_crypt(desc, dst, src, nbytes, &req);
- kernel_fpu_end();
-
- return ret;
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[8];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
- .tweak_fn = aesni_xts_tweak,
- .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
- .crypt_fn = lrw_xts_decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- kernel_fpu_begin();
- ret = xts_crypt(desc, dst, src, nbytes, &req);
- kernel_fpu_end();
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return ret;
+ return glue_xts_req_128bit(&aesni_dec_xts, req,
+ XTS_TWEAK_CAST(aesni_xts_tweak),
+ aes_ctx(ctx->raw_tweak_ctx),
+ aes_ctx(ctx->raw_crypt_ctx));
}
-#endif
-
-#ifdef CONFIG_X86_64
static int rfc4106_init(struct crypto_aead *aead)
{
struct cryptd_aead *cryptd_tfm;
@@ -888,7 +723,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk;
+ struct scatter_walk dst_sg_walk = {};
unsigned int i;
/* Assuming we are supporting rfc4106 64-bit extended */
@@ -968,7 +803,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 authTag[16];
struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk;
+ struct scatter_walk dst_sg_walk = {};
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
@@ -1077,9 +912,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
- AESNI_ALIGN - 1,
- .cra_alignmask = 0,
+ .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
@@ -1091,14 +924,12 @@ static struct crypto_alg aesni_algs[] = { {
}
}
}, {
- .cra_name = "__aes-aesni",
- .cra_driver_name = "__driver-aes-aesni",
- .cra_priority = 0,
+ .cra_name = "__aes",
+ .cra_driver_name = "__aes-aesni",
+ .cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
- AESNI_ALIGN - 1,
- .cra_alignmask = 0,
+ .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
@@ -1109,250 +940,94 @@ static struct crypto_alg aesni_algs[] = { {
.cia_decrypt = __aes_decrypt
}
}
-}, {
- .cra_name = "__ecb-aes-aesni",
- .cra_driver_name = "__driver-ecb-aes-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
- AESNI_ALIGN - 1,
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_set_key,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-aes-aesni",
- .cra_driver_name = "__driver-cbc-aes-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
- AESNI_ALIGN - 1,
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_set_key,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_ecb_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
+} };
+
+static struct skcipher_alg aesni_skciphers[] = {
+ {
+ .base = {
+ .cra_name = "__ecb(aes)",
+ .cra_driver_name = "__ecb-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
+ .cra_module = THIS_MODULE,
},
- },
-}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_cbc_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aesni_skcipher_setkey,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base = {
+ .cra_name = "__cbc(aes)",
+ .cra_driver_name = "__cbc-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
+ .cra_module = THIS_MODULE,
},
- },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesni_skcipher_setkey,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
#ifdef CONFIG_X86_64
-}, {
- .cra_name = "__ctr-aes-aesni",
- .cra_driver_name = "__driver-ctr-aes-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
- AESNI_ALIGN - 1,
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_set_key,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
+ }, {
+ .base = {
+ .cra_name = "__ctr(aes)",
+ .cra_driver_name = "__ctr-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
+ .cra_module = THIS_MODULE,
},
- },
-}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_ctr_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .setkey = aesni_skcipher_setkey,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base = {
+ .cra_name = "__xts(aes)",
+ .cra_driver_name = "__xts-aes-aesni",
+ .cra_priority = 401,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = XTS_AES_CTX_SIZE,
+ .cra_module = THIS_MODULE,
},
- },
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_aesni_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
#endif
+ }
+};
+
+struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
+
+struct {
+ const char *algname;
+ const char *drvname;
+ const char *basename;
+ struct simd_skcipher_alg *simd;
+} aesni_simd_skciphers2[] = {
#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
-}, {
- .cra_name = "pcbc(aes)",
- .cra_driver_name = "pcbc-aes-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_pcbc_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+ {
+ .algname = "pcbc(aes)",
+ .drvname = "pcbc-aes-aesni",
+ .basename = "fpu(pcbc(__aes-aesni))",
},
#endif
-}, {
- .cra_name = "__lrw-aes-aesni",
- .cra_driver_name = "__driver-lrw-aes-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_aesni_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = lrw_aesni_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-aes-aesni",
- .cra_driver_name = "__driver-xts-aes-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aesni_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = xts_aesni_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "lrw(aes)",
- .cra_driver_name = "lrw-aes-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-} };
+};
#ifdef CONFIG_X86_64
static struct aead_alg aesni_aead_algs[] = { {
@@ -1401,9 +1076,27 @@ static const struct x86_cpu_id aesni_cpu_id[] = {
};
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
+static void aesni_free_simds(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
+ aesni_simd_skciphers[i]; i++)
+ simd_skcipher_free(aesni_simd_skciphers[i]);
+
+ for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
+ aesni_simd_skciphers2[i].simd; i++)
+ simd_skcipher_free(aesni_simd_skciphers2[i].simd);
+}
+
static int __init aesni_init(void)
{
+ struct simd_skcipher_alg *simd;
+ const char *basename;
+ const char *algname;
+ const char *drvname;
int err;
+ int i;
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
@@ -1445,13 +1138,48 @@ static int __init aesni_init(void)
if (err)
goto fpu_exit;
+ err = crypto_register_skciphers(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers));
+ if (err)
+ goto unregister_algs;
+
err = crypto_register_aeads(aesni_aead_algs,
ARRAY_SIZE(aesni_aead_algs));
if (err)
- goto unregister_algs;
+ goto unregister_skciphers;
+
+ for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
+ algname = aesni_skciphers[i].base.cra_name + 2;
+ drvname = aesni_skciphers[i].base.cra_driver_name + 2;
+ basename = aesni_skciphers[i].base.cra_driver_name;
+ simd = simd_skcipher_create_compat(algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto unregister_simds;
+
+ aesni_simd_skciphers[i] = simd;
+ }
- return err;
+ for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
+ algname = aesni_simd_skciphers2[i].algname;
+ drvname = aesni_simd_skciphers2[i].drvname;
+ basename = aesni_simd_skciphers2[i].basename;
+ simd = simd_skcipher_create_compat(algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto unregister_simds;
+ aesni_simd_skciphers2[i].simd = simd;
+ }
+
+ return 0;
+
+unregister_simds:
+ aesni_free_simds();
+ crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
+unregister_skciphers:
+ crypto_unregister_skciphers(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers));
unregister_algs:
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
fpu_exit:
@@ -1461,7 +1189,10 @@ fpu_exit:
static void __exit aesni_exit(void)
{
+ aesni_free_simds();
crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
+ crypto_unregister_skciphers(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers));
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
crypto_fpu_exit();
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 0857b1a1de3b..c194d5717ae5 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -48,26 +48,13 @@
#ifdef CONFIG_X86_64
/*
* use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
* for fpu state save/restore overhead.
*/
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024
+#define CRC32C_PCL_BREAKEVEN 512
asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#if defined(X86_FEATURE_EAGER_FPU)
-#define set_pcl_breakeven_point() \
-do { \
- if (!use_eager_fpu()) \
- crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
-} while (0)
-#else
-#define set_pcl_breakeven_point() \
- (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU)
-#endif
#endif /* CONFIG_X86_64 */
static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
@@ -190,7 +177,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
* use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead
*/
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end();
@@ -202,7 +189,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end();
@@ -261,7 +248,6 @@ static int __init crc32c_intel_mod_init(void)
alg.update = crc32c_pcl_intel_update;
alg.finup = crc32c_pcl_intel_finup;
alg.digest = crc32c_pcl_intel_digest;
- set_pcl_breakeven_point();
}
#endif
return crypto_register_shash(&alg);
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index e7d679e2a018..406680476c52 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -11,143 +11,186 @@
*
*/
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/crypto.h>
#include <asm/fpu/api.h>
struct crypto_fpu_ctx {
- struct crypto_blkcipher *child;
+ struct crypto_skcipher *child;
};
-static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key,
+static int crypto_fpu_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
- struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent);
- struct crypto_blkcipher *child = ctx->child;
+ struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child = ctx->child;
int err;
- crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_blkcipher_setkey(child, key, keylen);
- crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
-static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_fpu_encrypt(struct skcipher_request *req)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = ctx->child;
+ SKCIPHER_REQUEST_ON_STACK(subreq, child);
int err;
- struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
- struct crypto_blkcipher *child = ctx->child;
- struct blkcipher_desc desc = {
- .tfm = child,
- .info = desc_in->info,
- .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
- };
+
+ skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_callback(subreq, 0, NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
kernel_fpu_begin();
- err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes);
+ err = crypto_skcipher_encrypt(subreq);
kernel_fpu_end();
+
+ skcipher_request_zero(subreq);
return err;
}
-static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_fpu_decrypt(struct skcipher_request *req)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = ctx->child;
+ SKCIPHER_REQUEST_ON_STACK(subreq, child);
int err;
- struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
- struct crypto_blkcipher *child = ctx->child;
- struct blkcipher_desc desc = {
- .tfm = child,
- .info = desc_in->info,
- .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
- };
+
+ skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_callback(subreq, 0, NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
kernel_fpu_begin();
- err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes);
+ err = crypto_skcipher_decrypt(subreq);
kernel_fpu_end();
+
+ skcipher_request_zero(subreq);
return err;
}
-static int crypto_fpu_init_tfm(struct crypto_tfm *tfm)
+static int crypto_fpu_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_blkcipher *cipher;
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher_spawn *spawn;
+ struct crypto_skcipher *cipher;
- cipher = crypto_spawn_blkcipher(spawn);
+ spawn = skcipher_instance_ctx(inst);
+ cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
+
return 0;
}
-static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_fpu_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->child);
+}
+
+static void crypto_fpu_free(struct skcipher_instance *inst)
{
- struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_blkcipher(ctx->child);
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
}
-static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb)
+static int crypto_fpu_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
- struct crypto_alg *alg;
+ struct crypto_skcipher_spawn *spawn;
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct skcipher_alg *alg;
+ const char *cipher_name;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if ((algt->type ^ (CRYPTO_ALG_INTERNAL | CRYPTO_ALG_TYPE_SKCIPHER)) &
+ algt->mask)
+ return -EINVAL;
+
+ if (!(algt->mask & CRYPTO_ALG_INTERNAL))
+ return -EINVAL;
+
+ cipher_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(cipher_name))
+ return PTR_ERR(cipher_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ spawn = skcipher_instance_ctx(inst);
+
+ crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
+ err = crypto_grab_skcipher(spawn, cipher_name, CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
if (err)
- return ERR_PTR(err);
-
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
-
- inst = crypto_alloc_instance("fpu", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- inst->alg.cra_flags = alg->cra_flags;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = alg->cra_type;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize;
- inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
- inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
- inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
- inst->alg.cra_init = crypto_fpu_init_tfm;
- inst->alg.cra_exit = crypto_fpu_exit_tfm;
- inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey;
- inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt;
- inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt;
-
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ goto out_free_inst;
-static void crypto_fpu_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ alg = crypto_skcipher_spawn_alg(spawn);
+
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "fpu",
+ &alg->base);
+ if (err)
+ goto out_drop_skcipher;
+
+ inst->alg.base.cra_flags = CRYPTO_ALG_INTERNAL;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
+
+ inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
+
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
+
+ inst->alg.init = crypto_fpu_init_tfm;
+ inst->alg.exit = crypto_fpu_exit_tfm;
+
+ inst->alg.setkey = crypto_fpu_setkey;
+ inst->alg.encrypt = crypto_fpu_encrypt;
+ inst->alg.decrypt = crypto_fpu_decrypt;
+
+ inst->free = crypto_fpu_free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto out_drop_skcipher;
+
+out:
+ return err;
+
+out_drop_skcipher:
+ crypto_drop_skcipher(spawn);
+out_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_fpu_tmpl = {
.name = "fpu",
- .alloc = crypto_fpu_alloc,
- .free = crypto_fpu_free,
+ .create = crypto_fpu_create,
.module = THIS_MODULE,
};
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index 6a85598931b5..260a060d7275 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -27,10 +27,10 @@
#include <linux/module.h>
#include <crypto/b128ops.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/crypto/glue_helper.h>
-#include <crypto/scatterwalk.h>
static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
@@ -339,6 +339,41 @@ done:
return nbytes;
}
+static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
+ void *ctx,
+ struct skcipher_walk *walk)
+{
+ const unsigned int bsize = 128 / 8;
+ unsigned int nbytes = walk->nbytes;
+ u128 *src = walk->src.virt.addr;
+ u128 *dst = walk->dst.virt.addr;
+ unsigned int num_blocks, func_bytes;
+ unsigned int i;
+
+ /* Process multi-block batch */
+ for (i = 0; i < gctx->num_funcs; i++) {
+ num_blocks = gctx->funcs[i].num_blocks;
+ func_bytes = bsize * num_blocks;
+
+ if (nbytes >= func_bytes) {
+ do {
+ gctx->funcs[i].fn_u.xts(ctx, dst, src,
+ walk->iv);
+
+ src += num_blocks;
+ dst += num_blocks;
+ nbytes -= func_bytes;
+ } while (nbytes >= func_bytes);
+
+ if (nbytes < bsize)
+ goto done;
+ }
+ }
+
+done:
+ return nbytes;
+}
+
/* for implementations implementing faster XTS IV generator */
int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc, struct scatterlist *dst,
@@ -379,6 +414,43 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
}
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
+int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req,
+ common_glue_func_t tweak_fn, void *tweak_ctx,
+ void *crypt_ctx)
+{
+ const unsigned int bsize = 128 / 8;
+ struct skcipher_walk walk;
+ bool fpu_enabled = false;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ nbytes = walk.nbytes;
+ if (!nbytes)
+ return err;
+
+ /* set minimum length to bsize, for tweak_fn */
+ fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
+ &walk, fpu_enabled,
+ nbytes < bsize ? bsize : nbytes);
+
+ /* calculate first value of T */
+ tweak_fn(tweak_ctx, walk.iv, walk.iv);
+
+ while (nbytes) {
+ nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
+
+ err = skcipher_walk_done(&walk, nbytes);
+ nbytes = walk.nbytes;
+ }
+
+ glue_fpu_end(fpu_enabled);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
+
void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
common_glue_func_t fn)
{
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index 9e5b67127a09..acf9fdf01671 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -114,7 +114,7 @@ static inline void sha1_init_digest(uint32_t *digest)
}
static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
- uint32_t total_len)
+ uint64_t total_len)
{
uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
index 98a35bcc6f4a..13590ccf965c 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
@@ -125,7 +125,7 @@ struct sha1_hash_ctx {
/* error flag */
int error;
- uint32_t total_length;
+ uint64_t total_length;
const void *incoming_buffer;
uint32_t incoming_buffer_length;
uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2];
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 6f97fb33ae21..7926a226b120 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -115,7 +115,7 @@ inline void sha256_init_digest(uint32_t *digest)
}
inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
- uint32_t total_len)
+ uint64_t total_len)
{
uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
index edd252b73206..aabb30320af0 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
@@ -125,7 +125,7 @@ struct sha256_hash_ctx {
/* error flag */
int error;
- uint32_t total_length;
+ uint64_t total_length;
const void *incoming_buffer;
uint32_t incoming_buffer_length;
uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2];
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index d210174a52b0..9c1bb6d58141 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -117,7 +117,7 @@ inline void sha512_init_digest(uint64_t *digest)
}
inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
- uint32_t total_len)
+ uint64_t total_len)
{
uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
index 9d4b2c8208d5..e4653f5eec3f 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
@@ -119,7 +119,7 @@ struct sha512_hash_ctx {
/* error flag */
int error;
- uint32_t total_length;
+ uint64_t total_length;
const void *incoming_buffer;
uint32_t incoming_buffer_length;
uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2];
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 9a9e5884066c..05ed3d393da7 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -90,8 +90,8 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8
- .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
- addq $-(15*8+\addskip), %rsp
+ .macro ALLOC_PT_GPREGS_ON_STACK
+ addq $-(15*8), %rsp
.endm
.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
@@ -147,15 +147,6 @@ For 32-bit we have the following conventions - kernel is built with
movq 5*8+\offset(%rsp), %rbx
.endm
- .macro ZERO_EXTRA_REGS
- xorl %r15d, %r15d
- xorl %r14d, %r14d
- xorl %r13d, %r13d
- xorl %r12d, %r12d
- xorl %ebp, %ebp
- xorl %ebx, %ebx
- .endm
-
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
.if \rstor_r11
movq 6*8(%rsp), %r11
@@ -201,6 +192,26 @@ For 32-bit we have the following conventions - kernel is built with
.byte 0xf1
.endm
+/*
+ * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
+ * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
+ * is just setting the LSB, which makes it an invalid stack address and is also
+ * a signal to the unwinder that it's a pt_regs pointer in disguise.
+ *
+ * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
+ * the original rbp.
+ */
+.macro ENCODE_FRAME_POINTER ptregs_offset=0
+#ifdef CONFIG_FRAME_POINTER
+ .if \ptregs_offset
+ leaq \ptregs_offset(%rsp), %rbp
+ .else
+ mov %rsp, %rbp
+ .endif
+ orq $0x1, %rbp
+#endif
+.endm
+
#endif /* CONFIG_X86_64 */
/*
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 21b352a11b49..acc0c6f36f3f 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -45,6 +45,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
+#include <asm/frame.h>
.section .entry.text, "ax"
@@ -175,6 +176,22 @@
SET_KERNEL_GS %edx
.endm
+/*
+ * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
+ * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
+ * is just setting the LSB, which makes it an invalid stack address and is also
+ * a signal to the unwinder that it's a pt_regs pointer in disguise.
+ *
+ * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
+ * original rbp.
+ */
+.macro ENCODE_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
+ mov %esp, %ebp
+ orl $0x1, %ebp
+#endif
+.endm
+
.macro RESTORE_INT_REGS
popl %ebx
popl %ecx
@@ -238,6 +255,23 @@ ENTRY(__switch_to_asm)
END(__switch_to_asm)
/*
+ * The unwinder expects the last frame on the stack to always be at the same
+ * offset from the end of the page, which allows it to validate the stack.
+ * Calling schedule_tail() directly would break that convention because its an
+ * asmlinkage function so its argument has to be pushed on the stack. This
+ * wrapper creates a proper "end of stack" frame header before the call.
+ */
+ENTRY(schedule_tail_wrapper)
+ FRAME_BEGIN
+
+ pushl %eax
+ call schedule_tail
+ popl %eax
+
+ FRAME_END
+ ret
+ENDPROC(schedule_tail_wrapper)
+/*
* A newly forked process directly context switches into this address.
*
* eax: prev task we switched from
@@ -245,9 +279,7 @@ END(__switch_to_asm)
* edi: kernel thread arg
*/
ENTRY(ret_from_fork)
- pushl %eax
- call schedule_tail
- popl %eax
+ call schedule_tail_wrapper
testl %ebx, %ebx
jnz 1f /* kernel threads are uncommon */
@@ -307,13 +339,13 @@ END(ret_from_exception)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
-need_resched:
+.Lneed_resched:
cmpl $0, PER_CPU_VAR(__preempt_count)
jnz restore_all
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
call preempt_schedule_irq
- jmp need_resched
+ jmp .Lneed_resched
END(resume_kernel)
#endif
@@ -334,7 +366,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
*/
ENTRY(xen_sysenter_target)
addl $5*4, %esp /* remove xen-provided frame */
- jmp sysenter_past_esp
+ jmp .Lsysenter_past_esp
#endif
/*
@@ -371,7 +403,7 @@ ENTRY(xen_sysenter_target)
*/
ENTRY(entry_SYSENTER_32)
movl TSS_sysenter_sp0(%esp), %esp
-sysenter_past_esp:
+.Lsysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
pushl %ebp /* pt_regs->sp (stashed in bp) */
pushfl /* pt_regs->flags (except IF = 0) */
@@ -504,9 +536,9 @@ ENTRY(entry_INT80_32)
restore_all:
TRACE_IRQS_IRET
-restore_all_notrace:
+.Lrestore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32
- ALTERNATIVE "jmp restore_nocheck", "", X86_BUG_ESPFIX
+ ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
/*
@@ -518,22 +550,23 @@ restore_all_notrace:
movb PT_CS(%esp), %al
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
- je ldt_ss # returning to user-space with LDT SS
+ je .Lldt_ss # returning to user-space with LDT SS
#endif
-restore_nocheck:
+.Lrestore_nocheck:
RESTORE_REGS 4 # skip orig_eax/error_code
-irq_return:
+.Lirq_return:
INTERRUPT_RETURN
+
.section .fixup, "ax"
ENTRY(iret_exc )
pushl $0 # no error code
pushl $do_iret_error
- jmp error_code
+ jmp common_exception
.previous
- _ASM_EXTABLE(irq_return, iret_exc)
+ _ASM_EXTABLE(.Lirq_return, iret_exc)
#ifdef CONFIG_X86_ESPFIX32
-ldt_ss:
+.Lldt_ss:
/*
* Setup and switch to ESPFIX stack
*
@@ -562,7 +595,7 @@ ldt_ss:
*/
DISABLE_INTERRUPTS(CLBR_EAX)
lss (%esp), %esp /* switch to espfix segment */
- jmp restore_nocheck
+ jmp .Lrestore_nocheck
#endif
ENDPROC(entry_INT80_32)
@@ -624,6 +657,7 @@ common_interrupt:
ASM_CLAC
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
SAVE_ALL
+ ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
movl %esp, %eax
call do_IRQ
@@ -635,6 +669,7 @@ ENTRY(name) \
ASM_CLAC; \
pushl $~(nr); \
SAVE_ALL; \
+ ENCODE_FRAME_POINTER; \
TRACE_IRQS_OFF \
movl %esp, %eax; \
call fn; \
@@ -659,7 +694,7 @@ ENTRY(coprocessor_error)
ASM_CLAC
pushl $0
pushl $do_coprocessor_error
- jmp error_code
+ jmp common_exception
END(coprocessor_error)
ENTRY(simd_coprocessor_error)
@@ -673,14 +708,14 @@ ENTRY(simd_coprocessor_error)
#else
pushl $do_simd_coprocessor_error
#endif
- jmp error_code
+ jmp common_exception
END(simd_coprocessor_error)
ENTRY(device_not_available)
ASM_CLAC
pushl $-1 # mark this as an int
pushl $do_device_not_available
- jmp error_code
+ jmp common_exception
END(device_not_available)
#ifdef CONFIG_PARAVIRT
@@ -694,59 +729,59 @@ ENTRY(overflow)
ASM_CLAC
pushl $0
pushl $do_overflow
- jmp error_code
+ jmp common_exception
END(overflow)
ENTRY(bounds)
ASM_CLAC
pushl $0
pushl $do_bounds
- jmp error_code
+ jmp common_exception
END(bounds)
ENTRY(invalid_op)
ASM_CLAC
pushl $0
pushl $do_invalid_op
- jmp error_code
+ jmp common_exception
END(invalid_op)
ENTRY(coprocessor_segment_overrun)
ASM_CLAC
pushl $0
pushl $do_coprocessor_segment_overrun
- jmp error_code
+ jmp common_exception
END(coprocessor_segment_overrun)
ENTRY(invalid_TSS)
ASM_CLAC
pushl $do_invalid_TSS
- jmp error_code
+ jmp common_exception
END(invalid_TSS)
ENTRY(segment_not_present)
ASM_CLAC
pushl $do_segment_not_present
- jmp error_code
+ jmp common_exception
END(segment_not_present)
ENTRY(stack_segment)
ASM_CLAC
pushl $do_stack_segment
- jmp error_code
+ jmp common_exception
END(stack_segment)
ENTRY(alignment_check)
ASM_CLAC
pushl $do_alignment_check
- jmp error_code
+ jmp common_exception
END(alignment_check)
ENTRY(divide_error)
ASM_CLAC
pushl $0 # no error code
pushl $do_divide_error
- jmp error_code
+ jmp common_exception
END(divide_error)
#ifdef CONFIG_X86_MCE
@@ -754,7 +789,7 @@ ENTRY(machine_check)
ASM_CLAC
pushl $0
pushl machine_check_vector
- jmp error_code
+ jmp common_exception
END(machine_check)
#endif
@@ -762,13 +797,14 @@ ENTRY(spurious_interrupt_bug)
ASM_CLAC
pushl $0
pushl $do_spurious_interrupt_bug
- jmp error_code
+ jmp common_exception
END(spurious_interrupt_bug)
#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
+ ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
/*
@@ -823,6 +859,7 @@ ENTRY(xen_failsafe_callback)
jmp iret_exc
5: pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
+ ENCODE_FRAME_POINTER
jmp ret_from_exception
.section .fixup, "ax"
@@ -882,7 +919,7 @@ ftrace_call:
popl %edx
popl %ecx
popl %eax
-ftrace_ret:
+.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
@@ -952,7 +989,7 @@ GLOBAL(ftrace_regs_call)
popl %gs
addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */
- jmp ftrace_ret
+ jmp .Lftrace_ret
popf
jmp ftrace_stub
@@ -963,7 +1000,7 @@ ENTRY(mcount)
jb ftrace_stub /* Paging not enabled yet? */
cmpl $ftrace_stub, ftrace_trace_function
- jnz trace
+ jnz .Ltrace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpl $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller
@@ -976,7 +1013,7 @@ ftrace_stub:
ret
/* taken from glibc */
-trace:
+.Ltrace:
pushl %eax
pushl %ecx
pushl %edx
@@ -1027,7 +1064,7 @@ return_to_handler:
ENTRY(trace_page_fault)
ASM_CLAC
pushl $trace_do_page_fault
- jmp error_code
+ jmp common_exception
END(trace_page_fault)
#endif
@@ -1035,7 +1072,10 @@ ENTRY(page_fault)
ASM_CLAC
pushl $do_page_fault
ALIGN
-error_code:
+ jmp common_exception
+END(page_fault)
+
+common_exception:
/* the function address is in %gs's slot on the stack */
pushl %fs
pushl %es
@@ -1047,6 +1087,7 @@ error_code:
pushl %edx
pushl %ecx
pushl %ebx
+ ENCODE_FRAME_POINTER
cld
movl $(__KERNEL_PERCPU), %ecx
movl %ecx, %fs
@@ -1064,7 +1105,7 @@ error_code:
movl %esp, %eax # pt_regs pointer
call *%edi
jmp ret_from_exception
-END(page_fault)
+END(common_exception)
ENTRY(debug)
/*
@@ -1079,6 +1120,7 @@ ENTRY(debug)
ASM_CLAC
pushl $-1 # mark this as an int
SAVE_ALL
+ ENCODE_FRAME_POINTER
xorl %edx, %edx # error code 0
movl %esp, %eax # pt_regs pointer
@@ -1094,11 +1136,11 @@ ENTRY(debug)
.Ldebug_from_sysenter_stack:
/* We're on the SYSENTER stack. Switch off. */
- movl %esp, %ebp
+ movl %esp, %ebx
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
TRACE_IRQS_OFF
call do_debug
- movl %ebp, %esp
+ movl %ebx, %esp
jmp ret_from_exception
END(debug)
@@ -1116,11 +1158,12 @@ ENTRY(nmi)
movl %ss, %eax
cmpw $__ESPFIX_SS, %ax
popl %eax
- je nmi_espfix_stack
+ je .Lnmi_espfix_stack
#endif
pushl %eax # pt_regs->orig_ax
SAVE_ALL
+ ENCODE_FRAME_POINTER
xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
@@ -1132,21 +1175,21 @@ ENTRY(nmi)
/* Not on SYSENTER stack. */
call do_nmi
- jmp restore_all_notrace
+ jmp .Lrestore_all_notrace
.Lnmi_from_sysenter_stack:
/*
* We're on the SYSENTER stack. Switch off. No one (not even debug)
* is using the thread stack right now, so it's safe for us to use it.
*/
- movl %esp, %ebp
+ movl %esp, %ebx
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
call do_nmi
- movl %ebp, %esp
- jmp restore_all_notrace
+ movl %ebx, %esp
+ jmp .Lrestore_all_notrace
#ifdef CONFIG_X86_ESPFIX32
-nmi_espfix_stack:
+.Lnmi_espfix_stack:
/*
* create the pointer to lss back
*/
@@ -1159,12 +1202,13 @@ nmi_espfix_stack:
.endr
pushl %eax
SAVE_ALL
+ ENCODE_FRAME_POINTER
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx, %edx # zero error code
call do_nmi
RESTORE_REGS
lss 12+4(%esp), %esp # back to espfix stack
- jmp irq_return
+ jmp .Lirq_return
#endif
END(nmi)
@@ -1172,6 +1216,7 @@ ENTRY(int3)
ASM_CLAC
pushl $-1 # mark this as an int
SAVE_ALL
+ ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
@@ -1181,14 +1226,14 @@ END(int3)
ENTRY(general_protection)
pushl $do_general_protection
- jmp error_code
+ jmp common_exception
END(general_protection)
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
ASM_CLAC
pushl $do_async_page_fault
- jmp error_code
+ jmp common_exception
END(async_page_fault)
#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index ef766a358b37..5b219707c2f2 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -38,12 +38,6 @@
#include <asm/export.h>
#include <linux/err.h>
-/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
-#include <linux/elf-em.h>
-#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
-#define __AUDIT_ARCH_64BIT 0x80000000
-#define __AUDIT_ARCH_LE 0x40000000
-
.code64
.section .entry.text, "ax"
@@ -469,6 +463,7 @@ END(irq_entries_start)
ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS
SAVE_EXTRA_REGS
+ ENCODE_FRAME_POINTER
testb $3, CS(%rsp)
jz 1f
@@ -985,6 +980,7 @@ ENTRY(xen_failsafe_callback)
ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS
SAVE_EXTRA_REGS
+ ENCODE_FRAME_POINTER
jmp error_exit
END(xen_failsafe_callback)
@@ -1028,6 +1024,7 @@ ENTRY(paranoid_entry)
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
+ ENCODE_FRAME_POINTER 8
movl $1, %ebx
movl $MSR_GS_BASE, %ecx
rdmsr
@@ -1075,6 +1072,7 @@ ENTRY(error_entry)
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
+ ENCODE_FRAME_POINTER 8
xorl %ebx, %ebx
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace
@@ -1257,6 +1255,7 @@ ENTRY(nmi)
pushq %r13 /* pt_regs->r13 */
pushq %r14 /* pt_regs->r14 */
pushq %r15 /* pt_regs->r15 */
+ ENCODE_FRAME_POINTER
/*
* At this point we no longer need to worry about stack damage
@@ -1270,11 +1269,10 @@ ENTRY(nmi)
/*
* Return back to user mode. We must *not* do the normal exit
- * work, because we don't want to enable interrupts. Fortunately,
- * do_nmi doesn't modify pt_regs.
+ * work, because we don't want to enable interrupts.
*/
SWAPGS
- jmp restore_c_regs_and_iret
+ jmp restore_regs_and_iret
.Lnmi_from_kernel:
/*
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 23c881caabd1..40121d14d34d 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -109,7 +109,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
return VM_FAULT_SIGBUS;
if (sym_offset == image->sym_vvar_page) {
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
+ ret = vm_insert_pfn(vma, vmf->address,
__pa_symbol(&__vvar_page) >> PAGE_SHIFT);
} else if (sym_offset == image->sym_pvclock_page) {
struct pvclock_vsyscall_time_info *pvti =
@@ -117,7 +117,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
ret = vm_insert_pfn(
vma,
- (unsigned long)vmf->virtual_address,
+ vmf->address,
__pa(pvti) >> PAGE_SHIFT);
}
}
@@ -161,8 +161,6 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}
text_start = addr - image->sym_vvar_start;
- current->mm->context.vdso = (void __user *)text_start;
- current->mm->context.vdso_image = image;
/*
* MAYWRITE to allow gdb to COW and set breakpoints
@@ -189,14 +187,12 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
do_munmap(mm, text_start, image->size);
+ } else {
+ current->mm->context.vdso = (void __user *)text_start;
+ current->mm->context.vdso_image = image;
}
up_fail:
- if (ret) {
- current->mm->context.vdso = NULL;
- current->mm->context.vdso_image = NULL;
- }
-
up_write(&mm->mmap_sem);
return ret;
}
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index f5f4b3fbbbc2..afb222b63cae 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void)
pr_cont("Fam15h ");
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
break;
-
+ case 0x17:
+ pr_cont("Fam17h ");
+ /*
+ * In family 17h, there are no event constraints in the PMC hardware.
+ * We fallback to using default amd_get_event_constraints.
+ */
+ break;
default:
pr_err("core perfctr but no constraints; unknown hardware!\n");
return -ENODEV;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index d31735f37ed7..f1c22584a46f 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -69,7 +69,7 @@ u64 x86_perf_event_update(struct perf_event *event)
int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
int idx = hwc->idx;
- s64 delta;
+ u64 delta;
if (idx == INTEL_PMC_IDX_FIXED_BTS)
return 0;
@@ -365,7 +365,11 @@ int x86_add_exclusive(unsigned int what)
{
int i;
- if (x86_pmu.lbr_pt_coexist)
+ /*
+ * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
+ * LBR and BTS are still mutually exclusive.
+ */
+ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return 0;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
@@ -388,7 +392,7 @@ fail_unlock:
void x86_del_exclusive(unsigned int what)
{
- if (x86_pmu.lbr_pt_coexist)
+ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return;
atomic_dec(&x86_pmu.lbr_exclusive[what]);
@@ -2299,7 +2303,7 @@ valid_user_frame(const void __user *fp, unsigned long size)
static unsigned long get_segment_base(unsigned int segment)
{
struct desc_struct *desc;
- int idx = segment >> 3;
+ unsigned int idx = segment >> 3;
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
#ifdef CONFIG_MODIFY_LDT_SYSCALL
@@ -2352,7 +2356,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
frame.next_frame = 0;
frame.return_address = 0;
- if (!access_ok(VERIFY_READ, fp, 8))
+ if (!valid_user_frame(fp, sizeof(frame)))
break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
@@ -2362,9 +2366,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
if (bytes != 0)
break;
- if (!valid_user_frame(fp, sizeof(frame)))
- break;
-
perf_callchain_store(entry, cs_base + frame.return_address);
fp = compat_ptr(ss_base + frame.next_frame);
}
@@ -2413,7 +2414,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
frame.next_frame = NULL;
frame.return_address = 0;
- if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
+ if (!valid_user_frame(fp, sizeof(frame)))
break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
@@ -2423,9 +2424,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
if (bytes != 0)
break;
- if (!valid_user_frame(fp, sizeof(frame)))
- break;
-
perf_callchain_store(entry, frame.return_address);
fp = (void __user *)frame.next_frame;
}
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a74a2dbc0180..cb8522290e6a 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4034,7 +4034,7 @@ __init int intel_pmu_init(void)
/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
- x86_pmu.max_period = x86_pmu.cntval_mask;
+ x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
x86_pmu.perfctr = MSR_IA32_PMC0;
pr_cont("full-width counters, ");
}
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 4f5ac726335f..da51e5a3e2ff 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -540,6 +540,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 0319311dbdbb..be202390bbd3 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
}
/*
- * We use the interrupt regs as a base because the PEBS record
- * does not contain a full regs set, specifically it seems to
- * lack segment descriptors, which get used by things like
- * user_mode().
+ * We use the interrupt regs as a base because the PEBS record does not
+ * contain a full regs set, specifically it seems to lack segment
+ * descriptors, which get used by things like user_mode().
*
- * In the simple case fix up only the IP and BP,SP regs, for
- * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
- * A possible PERF_SAMPLE_REGS will have to transfer all regs.
+ * In the simple case fix up only the IP for PERF_SAMPLE_IP.
+ *
+ * We must however always use BP,SP from iregs for the unwinder to stay
+ * sane; the record BP,SP can point into thin air when the record is
+ * from a previous PMI context or an (I)RET happend between the record
+ * and PMI.
*/
*regs = *iregs;
regs->flags = pebs->flags;
set_linear_ip(regs, pebs->ip);
- regs->bp = pebs->bp;
- regs->sp = pebs->sp;
if (sample_type & PERF_SAMPLE_REGS_INTR) {
regs->ax = pebs->ax;
@@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
regs->dx = pebs->dx;
regs->si = pebs->si;
regs->di = pebs->di;
- regs->bp = pebs->bp;
- regs->sp = pebs->sp;
- regs->flags = pebs->flags;
+ /*
+ * Per the above; only set BP,SP if we don't need callchains.
+ *
+ * XXX: does this make sense?
+ */
+ if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ regs->bp = pebs->bp;
+ regs->sp = pebs->sp;
+ }
+
+ /*
+ * Preserve PERF_EFLAGS_VM from set_linear_ip().
+ */
+ regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
#ifndef CONFIG_X86_32
regs->r8 = pebs->r8;
regs->r9 = pebs->r9;
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index c5047b8f777b..1c1b9fe705c8 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -36,13 +36,6 @@ static DEFINE_PER_CPU(struct pt, pt_ctx);
static struct pt_pmu pt_pmu;
-enum cpuid_regs {
- CR_EAX = 0,
- CR_ECX,
- CR_EDX,
- CR_EBX
-};
-
/*
* Capabilities of Intel PT hardware, such as number of address bits or
* supported output schemes, are cached and exported to userspace as "caps"
@@ -64,21 +57,21 @@ static struct pt_cap_desc {
u8 reg;
u32 mask;
} pt_caps[] = {
- PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff),
- PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)),
- PT_CAP(psb_cyc, 0, CR_EBX, BIT(1)),
- PT_CAP(ip_filtering, 0, CR_EBX, BIT(2)),
- PT_CAP(mtc, 0, CR_EBX, BIT(3)),
- PT_CAP(ptwrite, 0, CR_EBX, BIT(4)),
- PT_CAP(power_event_trace, 0, CR_EBX, BIT(5)),
- PT_CAP(topa_output, 0, CR_ECX, BIT(0)),
- PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)),
- PT_CAP(single_range_output, 0, CR_ECX, BIT(2)),
- PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)),
- PT_CAP(num_address_ranges, 1, CR_EAX, 0x3),
- PT_CAP(mtc_periods, 1, CR_EAX, 0xffff0000),
- PT_CAP(cycle_thresholds, 1, CR_EBX, 0xffff),
- PT_CAP(psb_periods, 1, CR_EBX, 0xffff0000),
+ PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff),
+ PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)),
+ PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)),
+ PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)),
+ PT_CAP(mtc, 0, CPUID_EBX, BIT(3)),
+ PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)),
+ PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)),
+ PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
+ PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
+ PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
+ PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
+ PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
+ PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
+ PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
+ PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
};
static u32 pt_cap_get(enum pt_capabilities cap)
@@ -213,10 +206,10 @@ static int __init pt_pmu_hw_init(void)
for (i = 0; i < PT_CPUID_LEAVES; i++) {
cpuid_count(20, i,
- &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
- &pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM],
- &pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM],
- &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
+ &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
+ &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
+ &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
+ &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
}
ret = -ENOMEM;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index efca2685d876..dbaaf7dc8373 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
*/
static int uncore_pmu_event_init(struct perf_event *event);
-static bool is_uncore_event(struct perf_event *event)
+static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
{
- return event->pmu->event_init == uncore_pmu_event_init;
+ return &box->pmu->pmu == event->pmu;
}
static int
@@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
n = box->n_events;
- if (is_uncore_event(leader)) {
+ if (is_box_event(box, leader)) {
box->event_list[n] = leader;
n++;
}
@@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
return n;
list_for_each_entry(event, &leader->sibling_list, group_entry) {
- if (!is_uncore_event(event) ||
+ if (!is_box_event(box, event) ||
event->state <= PERF_EVENT_STATE_OFF)
continue;
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 5f845eef9a4d..a3dcc12bef4a 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -8,8 +8,12 @@
#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
-#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
-#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
+#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
+#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
+#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
+#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -486,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
snb_uncore_imc_event_start(event, 0);
- box->n_events++;
-
return 0;
}
static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
{
- struct intel_uncore_box *box = uncore_event_to_box(event);
- int i;
-
snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
-
- for (i = 0; i < box->n_events; i++) {
- if (event == box->event_list[i]) {
- --box->n_events;
- break;
- }
- }
}
int snb_pci2phy_map_init(int devid)
@@ -616,13 +608,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
static const struct pci_device_id skl_uncore_pci_ids[] = {
{ /* IMC */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* end: all zeroes */ },
};
@@ -666,8 +674,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
- IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */
+ IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
+ IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
+ IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
+ IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
+ IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
{ /* end marker */ }
};
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 5874d8de1f8d..bcbb1d2ae10b 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -113,7 +113,7 @@ struct debug_store {
* Per register state.
*/
struct er_account {
- raw_spinlock_t lock; /* per-core: protect structure */
+ raw_spinlock_t lock; /* per-core: protect structure */
u64 config; /* extra MSR config */
u64 reg; /* extra MSR number */
atomic_t ref; /* reference count */
@@ -604,7 +604,7 @@ struct x86_pmu {
u64 lbr_sel_mask; /* LBR_SELECT valid bits */
const int *lbr_sel_map; /* lbr_select mappings */
bool lbr_double_abort; /* duplicated lbr aborts */
- bool lbr_pt_coexist; /* LBR may coexist with PT */
+ bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
/*
* Intel PT/LBR/BTS are exclusive
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 2cfed174e3c9..2b892e2313a9 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -6,10 +6,6 @@ generated-y += unistd_32_ia32.h
generated-y += unistd_64_x32.h
generated-y += xen-hypercalls.h
-genhdr-y += unistd_32.h
-genhdr-y += unistd_64.h
-genhdr-y += unistd_x32.h
-
generic-y += clkdev.h
generic-y += cputime.h
generic-y += dma-contiguous.h
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 5391b0ae7cc3..395b69551fce 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_mask < 0x0A)
return 1;
- else if (amd_e400_c1e_detected)
+ else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
return 1;
else
return max_cstate;
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 5e828da2e18f..00c88a01301d 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -21,6 +21,10 @@ extern int amd_numa_init(void);
extern int amd_get_subcaches(int);
extern int amd_set_subcaches(int, unsigned long);
+extern int amd_smn_read(u16 node, u32 address, u32 *value);
+extern int amd_smn_write(u16 node, u32 address, u32 value);
+extern int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo);
+
struct amd_l3_cache {
unsigned indices;
u8 subcaches[4];
@@ -55,6 +59,7 @@ struct threshold_bank {
};
struct amd_northbridge {
+ struct pci_dev *root;
struct pci_dev *misc;
struct pci_dev *link;
struct amd_l3_cache l3_cache;
@@ -66,7 +71,6 @@ struct amd_northbridge_info {
u64 flags;
struct amd_northbridge *nb;
};
-extern struct amd_northbridge_info amd_northbridges;
#define AMD_NB_GART BIT(0)
#define AMD_NB_L3_INDEX_DISABLE BIT(1)
@@ -74,20 +78,9 @@ extern struct amd_northbridge_info amd_northbridges;
#ifdef CONFIG_AMD_NB
-static inline u16 amd_nb_num(void)
-{
- return amd_northbridges.num;
-}
-
-static inline bool amd_nb_has_feature(unsigned feature)
-{
- return ((amd_northbridges.flags & feature) == feature);
-}
-
-static inline struct amd_northbridge *node_to_amd_nb(int node)
-{
- return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
-}
+u16 amd_nb_num(void);
+bool amd_nb_has_feature(unsigned int feature);
+struct amd_northbridge *node_to_amd_nb(int node);
static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
{
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index f5aaf6c83222..0c5fbc68e82d 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -11,7 +11,6 @@
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/msr.h>
-#include <asm/idle.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -196,7 +195,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
{
- wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+ wrmsr_notrace(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
}
static inline u32 native_apic_msr_read(u32 reg)
@@ -332,6 +331,7 @@ struct apic {
* on write for EOI.
*/
void (*eoi_write)(u32 reg, u32 v);
+ void (*native_eoi_write)(u32 reg, u32 v);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void);
@@ -639,7 +639,6 @@ extern void irq_exit(void);
static inline void entering_irq(void)
{
irq_enter();
- exit_idle();
}
static inline void entering_ack_irq(void)
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 03d269bed941..24118c0b4640 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -272,7 +272,6 @@ struct compat_shmid64_ds {
/*
* The type of struct elf_prstatus.pr_reg in compatible core dumps.
*/
-#ifdef CONFIG_X86_X32_ABI
typedef struct user_regs_struct compat_elf_gregset_t;
/* Full regset -- prstatus on x32, otherwise on ia32 */
@@ -281,10 +280,9 @@ typedef struct user_regs_struct compat_elf_gregset_t;
do { *(int *) (((void *) &((S)->pr_reg)) + R) = (V); } \
while (0)
+#ifdef CONFIG_X86_X32_ABI
#define COMPAT_USE_64BIT_TIME \
(!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
-#else
-typedef struct user_regs_struct32 compat_elf_gregset_t;
#endif
/*
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 1d2b69fc0ceb..d59c15c3defd 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -204,6 +204,7 @@ static __always_inline __pure bool _static_cpu_has(u16 bit)
#define static_cpu_has_bug(bit) static_cpu_has((bit))
#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
+#define boot_cpu_set_bug(bit) set_cpu_cap(&boot_cpu_data, (bit))
#define MAX_CPU_FEATURES (NCAPINTS * 32)
#define cpu_have_feature boot_cpu_has
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index a39629206864..59ac427960d4 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
@@ -193,6 +192,7 @@
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
@@ -226,6 +226,7 @@
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
@@ -279,8 +280,10 @@
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
+#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
+#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */
/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
@@ -311,4 +314,6 @@
#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
index 03bb1065c335..29e53ea7d764 100644
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -5,8 +5,8 @@
#ifndef _CRYPTO_GLUE_HELPER_H
#define _CRYPTO_GLUE_HELPER_H
+#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
-#include <linux/crypto.h>
#include <asm/fpu/api.h>
#include <crypto/b128ops.h>
@@ -69,6 +69,31 @@ static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
return true;
}
+static inline bool glue_skwalk_fpu_begin(unsigned int bsize,
+ int fpu_blocks_limit,
+ struct skcipher_walk *walk,
+ bool fpu_enabled, unsigned int nbytes)
+{
+ if (likely(fpu_blocks_limit < 0))
+ return false;
+
+ if (fpu_enabled)
+ return true;
+
+ /*
+ * Vector-registers are only used when chunk to be processed is large
+ * enough, so do not enable FPU until it is necessary.
+ */
+ if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
+ return false;
+
+ /* prevent sleeping if FPU is in use */
+ skcipher_walk_atomise(walk);
+
+ kernel_fpu_begin();
+ return true;
+}
+
static inline void glue_fpu_end(bool fpu_enabled)
{
if (fpu_enabled)
@@ -139,6 +164,18 @@ extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
common_glue_func_t tweak_fn, void *tweak_ctx,
void *crypt_ctx);
+extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes,
+ common_glue_func_t tweak_fn, void *tweak_ctx,
+ void *crypt_ctx);
+
+extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req,
+ common_glue_func_t tweak_fn, void *tweak_ctx,
+ void *crypt_ctx);
+
extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
le128 *iv, common_glue_func_t fn);
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 476b574de99e..ec23d8e1297c 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -1,13 +1,17 @@
#ifndef _ASM_X86_E820_H
#define _ASM_X86_E820_H
-#ifdef CONFIG_EFI
+/*
+ * E820_X_MAX is the maximum size of the extended E820 table. The extended
+ * table may contain up to 3 extra E820 entries per possible NUMA node, so we
+ * make room for 3 * MAX_NUMNODES possible entries, beyond the standard 128.
+ * Also note that E820_X_MAX *must* be defined before we include uapi/asm/e820.h.
+ */
#include <linux/numa.h>
#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
-#else /* ! CONFIG_EFI */
-#define E820_X_MAX E820MAX
-#endif
+
#include <uapi/asm/e820.h>
+
#ifndef __ASSEMBLY__
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map *e820;
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 389d700b961e..e99675b9c861 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -210,12 +210,18 @@ static inline bool efi_is_64bit(void)
return __efi_early()->is64;
}
+#define efi_table_attr(table, attr, instance) \
+ (efi_is_64bit() ? \
+ ((table##_64_t *)(unsigned long)instance)->attr : \
+ ((table##_32_t *)(unsigned long)instance)->attr)
+
+#define efi_call_proto(protocol, f, instance, ...) \
+ __efi_early()->call(efi_table_attr(protocol, f, instance), \
+ instance, ##__VA_ARGS__)
+
#define efi_call_early(f, ...) \
- __efi_early()->call(efi_is_64bit() ? \
- ((efi_boot_services_64_t *)(unsigned long) \
- __efi_early()->boot_services)->f : \
- ((efi_boot_services_32_t *)(unsigned long) \
- __efi_early()->boot_services)->f, __VA_ARGS__)
+ __efi_early()->call(efi_table_attr(efi_boot_services, f, \
+ __efi_early()->boot_services), __VA_ARGS__)
#define __efi_call_early(f, ...) \
__efi_early()->call((unsigned long)f, __VA_ARGS__);
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index 1429a7c736db..0877ae018fc9 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -27,16 +27,6 @@ extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);
/*
- * Some instructions like VIA's padlock instructions generate a spurious
- * DNA fault but don't modify SSE registers. And these instructions
- * get used from interrupt context as well. To prevent these kernel instructions
- * in interrupt context interacting wrongly with other user/kernel fpu usage, we
- * should use them only in the context of irq_ts_save/restore()
- */
-extern int irq_ts_save(void);
-extern void irq_ts_restore(int TS_state);
-
-/*
* Query the presence of one or more xfeatures. Works on any legacy CPU as well.
*
* If 'feature_name' is set then put a human-readable description of
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 2737366ea583..d4a684997497 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -60,11 +60,6 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
/*
* FPU related CPU feature flag helper routines:
*/
-static __always_inline __pure bool use_eager_fpu(void)
-{
- return static_cpu_has(X86_FEATURE_EAGER_FPU);
-}
-
static __always_inline __pure bool use_xsaveopt(void)
{
return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -484,42 +479,42 @@ extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size)
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
/*
- * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
- * on this CPU.
+ * The in-register FPU state for an FPU context on a CPU is assumed to be
+ * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
+ * matches the FPU.
*
- * This will disable any lazy FPU state restore of the current FPU state,
- * but if the current thread owns the FPU, it will still be saved by.
+ * If the FPU register state is valid, the kernel can skip restoring the
+ * FPU state from memory.
+ *
+ * Any code that clobbers the FPU registers or updates the in-memory
+ * FPU state for a task MUST let the rest of the kernel know that the
+ * FPU registers are no longer valid for this task.
+ *
+ * Either one of these invalidation functions is enough. Invalidate
+ * a resource you control: CPU if using the CPU for something else
+ * (with preemption disabled), FPU for the current task, or a task that
+ * is prevented from running by the current task.
*/
-static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+static inline void __cpu_invalidate_fpregs_state(void)
{
- per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
+ __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
-static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
-{
- return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
-}
-
-
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
+static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
{
- if (!use_eager_fpu())
- clts();
+ fpu->last_cpu = -1;
}
-static inline void __fpregs_deactivate_hw(void)
+static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
{
- if (!use_eager_fpu())
- stts();
+ return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
}
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
-static inline void __fpregs_deactivate(struct fpu *fpu)
+/*
+ * These generally need preemption protection to work,
+ * do try to avoid using these on their own:
+ */
+static inline void fpregs_deactivate(struct fpu *fpu)
{
WARN_ON_FPU(!fpu->fpregs_active);
@@ -528,8 +523,7 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
trace_x86_fpu_regs_deactivated(fpu);
}
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
-static inline void __fpregs_activate(struct fpu *fpu)
+static inline void fpregs_activate(struct fpu *fpu)
{
WARN_ON_FPU(fpu->fpregs_active);
@@ -554,51 +548,19 @@ static inline int fpregs_active(void)
}
/*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
- * These generally need preemption protection to work,
- * do try to avoid using these on their own.
- */
-static inline void fpregs_activate(struct fpu *fpu)
-{
- __fpregs_activate_hw();
- __fpregs_activate(fpu);
-}
-
-static inline void fpregs_deactivate(struct fpu *fpu)
-{
- __fpregs_deactivate(fpu);
- __fpregs_deactivate_hw();
-}
-
-/*
* FPU state switching for scheduling.
*
* This is a two-stage process:
*
- * - switch_fpu_prepare() saves the old state and
- * sets the new state of the CR0.TS bit. This is
- * done within the context of the old process.
+ * - switch_fpu_prepare() saves the old state.
+ * This is done within the context of the old process.
*
* - switch_fpu_finish() restores the new state as
* necessary.
*/
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t
-switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+static inline void
+switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- fpu_switch_t fpu;
-
- /*
- * If the task has used the math, pre-load the FPU on xsave processors
- * or if the past 5 consecutive context-switches used math.
- */
- fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active &&
- (use_eager_fpu() || new_fpu->counter > 5);
-
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
@@ -608,29 +570,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
/* But leave fpu_fpregs_owner_ctx! */
old_fpu->fpregs_active = 0;
trace_x86_fpu_regs_deactivated(old_fpu);
-
- /* Don't change CR0.TS if we just switch! */
- if (fpu.preload) {
- new_fpu->counter++;
- __fpregs_activate(new_fpu);
- trace_x86_fpu_regs_activated(new_fpu);
- prefetch(&new_fpu->state);
- } else {
- __fpregs_deactivate_hw();
- }
- } else {
- old_fpu->counter = 0;
+ } else
old_fpu->last_cpu = -1;
- if (fpu.preload) {
- new_fpu->counter++;
- if (fpu_want_lazy_restore(new_fpu, cpu))
- fpu.preload = 0;
- else
- prefetch(&new_fpu->state);
- fpregs_activate(new_fpu);
- }
- }
- return fpu;
}
/*
@@ -638,15 +579,19 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
*/
/*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
+ * Set up the userspace FPU context for the new task, if the task
+ * has used the FPU.
*/
-static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
+static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
- if (fpu_switch.preload)
- copy_kernel_to_fpregs(&new_fpu->state);
+ bool preload = static_cpu_has(X86_FEATURE_FPU) &&
+ new_fpu->fpstate_active;
+
+ if (preload) {
+ if (!fpregs_state_valid(new_fpu, cpu))
+ copy_kernel_to_fpregs(&new_fpu->state);
+ fpregs_activate(new_fpu);
+ }
}
/*
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 48df486b02f9..3c80f5b9c09d 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -322,17 +322,6 @@ struct fpu {
unsigned char fpregs_active;
/*
- * @counter:
- *
- * This counter contains the number of consecutive context switches
- * during which the FPU stays used. If this is over a threshold, the
- * lazy FPU restore logic becomes eager, to save the trap overhead.
- * This is an unsigned char so that after 256 iterations the counter
- * wraps and the context switch behavior turns lazy again; this is to
- * deal with bursty apps that only use the FPU for a short time:
- */
- unsigned char counter;
- /*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
@@ -340,29 +329,6 @@ struct fpu {
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
- *
- * After context switches there may be a (short) time period
- * during which the in-FPU hardware registers are unchanged
- * and still perfectly match this state, if the tasks
- * scheduled afterwards are not using the FPU.
- *
- * This is the 'lazy restore' window of optimization, which
- * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
- *
- * We detect whether a subsequent task uses the FPU via setting
- * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
- *
- * During this window, if the task gets scheduled again, we
- * might be able to skip having to do a restore from this
- * memory buffer to the hardware registers - at the cost of
- * incurring the overhead of #NM fault traps.
- *
- * Note that on modern CPUs that support the XSAVEOPT (or other
- * optimized XSAVE instructions), we don't use #NM traps anymore,
- * as the hardware can track whether FPU registers need saving
- * or not. On such CPUs we activate the non-lazy ('eagerfpu')
- * logic, which unconditionally saves/restores all FPU state
- * across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/*
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 430bacf73074..1b2799e0699a 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -21,21 +21,16 @@
/* Supervisor features */
#define XFEATURE_MASK_SUPERVISOR (XFEATURE_MASK_PT)
-/* Supported features which support lazy state saving */
-#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
+/* All currently supported features */
+#define XCNTXT_MASK (XFEATURE_MASK_FP | \
XFEATURE_MASK_SSE | \
XFEATURE_MASK_YMM | \
XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
- XFEATURE_MASK_Hi16_ZMM)
-
-/* Supported features which require eager state saving */
-#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | \
- XFEATURE_MASK_BNDCSR | \
- XFEATURE_MASK_PKRU)
-
-/* All currently supported features */
-#define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
+ XFEATURE_MASK_Hi16_ZMM | \
+ XFEATURE_MASK_PKRU | \
+ XFEATURE_MASK_BNDREGS | \
+ XFEATURE_MASK_BNDCSR)
#ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, "
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
deleted file mode 100644
index c5d1785373ed..000000000000
--- a/arch/x86/include/asm/idle.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _ASM_X86_IDLE_H
-#define _ASM_X86_IDLE_H
-
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
-#ifdef CONFIG_X86_64
-void enter_idle(void);
-void exit_idle(void);
-#else /* !CONFIG_X86_64 */
-static inline void enter_idle(void) { }
-static inline void exit_idle(void) { }
-static inline void __exit_idle(void) { }
-#endif /* CONFIG_X86_64 */
-
-void amd_e400_remove_cpu(int cpu);
-
-#endif /* _ASM_X86_IDLE_H */
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 5b6753d1f7f4..49da9f497b90 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -17,6 +17,7 @@
extern int intel_mid_pci_init(void);
extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev);
extern void intel_mid_pwr_power_off(void);
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index d31881188431..29a594a3b82a 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -21,7 +21,6 @@ enum die_val {
DIE_NMIUNKNOWN,
};
-extern void printk_address(unsigned long address);
extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_stack_regs(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4b20f7304b9c..7892530cbacf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -191,6 +191,8 @@ enum {
#define PFERR_RSVD_BIT 3
#define PFERR_FETCH_BIT 4
#define PFERR_PK_BIT 5
+#define PFERR_GUEST_FINAL_BIT 32
+#define PFERR_GUEST_PAGE_BIT 33
#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
@@ -198,6 +200,13 @@ enum {
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
+#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
+#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
+
+#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
+ PFERR_USER_MASK | \
+ PFERR_WRITE_MASK | \
+ PFERR_PRESENT_MASK)
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
@@ -948,7 +957,6 @@ struct kvm_x86_ops {
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
bool (*invpcid_supported)(void);
- void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -958,8 +966,6 @@ struct kvm_x86_ops {
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
- u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
-
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
int (*check_intercept)(struct kvm_vcpu *vcpu,
@@ -1065,6 +1071,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
+bool pdptrs_changed(struct kvm_vcpu *vcpu);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
@@ -1127,7 +1134,8 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
struct x86_emulate_ctxt;
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
+int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port);
+int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
@@ -1206,7 +1214,7 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
@@ -1361,7 +1369,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
+int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void);
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index c2b8d24a235c..d74747b031ec 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -29,9 +29,20 @@ struct kvm_page_track_notifier_node {
* @gpa: the physical address written by guest.
* @new: the data was written to the address.
* @bytes: the written length.
+ * @node: this node
*/
void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
- int bytes);
+ int bytes, struct kvm_page_track_notifier_node *node);
+ /*
+ * It is called when memory slot is being moved or removed
+ * users can drop write-protection for the pages in that memory slot
+ *
+ * @kvm: the kvm where memory slot being moved or removed
+ * @slot: the memory slot being moved or removed
+ * @node: this node
+ */
+ void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node);
};
void kvm_page_track_init(struct kvm *kvm);
@@ -58,4 +69,5 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
+void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
#endif
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index ef01fef3eebc..6c119cfae218 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -9,7 +9,6 @@
#define LHCALL_FLUSH_TLB 5
#define LHCALL_LOAD_IDT_ENTRY 6
#define LHCALL_SET_STACK 7
-#define LHCALL_TS 8
#define LHCALL_SET_CLOCKEVENT 9
#define LHCALL_HALT 10
#define LHCALL_SET_PMD 13
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 9bd7ff5ffbcc..5132f2a6c0a2 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -252,8 +252,10 @@ static inline void cmci_recheck(void) {}
#ifdef CONFIG_X86_MCE_AMD
void mce_amd_feature_init(struct cpuinfo_x86 *c);
+int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
#else
static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
+static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
#endif
int mce_available(struct cpuinfo_x86 *c);
@@ -293,9 +295,7 @@ void do_machine_check(struct pt_regs *, long);
/*
* Threshold handler
*/
-
extern void (*mce_threshold_vector)(void);
-extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
/* Deferred error interrupt handler */
extern void (*deferred_error_int_vector)(void);
@@ -356,27 +356,31 @@ enum smca_bank_types {
N_SMCA_BANK_TYPES
};
-struct smca_bank_name {
- const char *name; /* Short name for sysfs */
- const char *long_name; /* Long name for pretty-printing */
-};
-
-extern struct smca_bank_name smca_bank_names[N_SMCA_BANK_TYPES];
+#define HWID_MCATYPE(hwid, mcatype) (((hwid) << 16) | (mcatype))
-#define HWID_MCATYPE(hwid, mcatype) ((hwid << 16) | mcatype)
-
-struct smca_hwid_mcatype {
+struct smca_hwid {
unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */
u32 hwid_mcatype; /* (hwid,mcatype) tuple */
u32 xec_bitmap; /* Bitmap of valid ExtErrorCodes; current max is 21. */
};
-struct smca_bank_info {
- struct smca_hwid_mcatype *type;
- u32 type_instance;
+struct smca_bank {
+ struct smca_hwid *hwid;
+ /* Instance ID */
+ u32 id;
};
-extern struct smca_bank_info smca_banks[MAX_NR_BANKS];
+extern struct smca_bank smca_banks[MAX_NR_BANKS];
+
+extern const char *smca_get_long_name(enum smca_bank_types t);
+
+extern int mce_threshold_create_device(unsigned int cpu);
+extern int mce_threshold_remove_device(unsigned int cpu);
+
+#else
+
+static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
+static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
#endif
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index da0d81fa0b54..38711df3bcb5 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -20,6 +20,15 @@ do { \
(u32)((u64)(val)), \
(u32)((u64)(val) >> 32))
+struct ucode_patch {
+ struct list_head plist;
+ void *data; /* Intel uses only this one */
+ u32 patch_id;
+ u16 equiv_cpu;
+};
+
+extern struct list_head microcode_cache;
+
struct cpu_signature {
unsigned int sig;
unsigned int pf;
@@ -55,12 +64,7 @@ struct ucode_cpu_info {
void *mc;
};
extern struct ucode_cpu_info ucode_cpu_info[];
-
-#ifdef CONFIG_MICROCODE
-int __init microcode_init(void);
-#else
-static inline int __init microcode_init(void) { return 0; };
-#endif
+struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
#ifdef CONFIG_MICROCODE_INTEL
extern struct microcode_ops * __init init_intel_microcode(void);
@@ -131,11 +135,13 @@ static inline unsigned int x86_cpuid_family(void)
}
#ifdef CONFIG_MICROCODE
+int __init microcode_init(void);
extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
void reload_early_microcode(void);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
#else
+static inline int __init microcode_init(void) { return 0; };
static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
static inline void reload_early_microcode(void) { }
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 15eb75484cc0..3e3e20be829a 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -40,38 +40,18 @@ struct microcode_amd {
unsigned int mpb[0];
};
-static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
- unsigned int sig)
-{
- int i = 0;
-
- if (!equiv_cpu_table)
- return 0;
-
- while (equiv_cpu_table[i].installed_cpu != 0) {
- if (sig == equiv_cpu_table[i].installed_cpu)
- return equiv_cpu_table[i].equiv_cpu;
-
- i++;
- }
- return 0;
-}
-
-extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
-extern int apply_microcode_amd(int cpu);
-extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
-
#define PATCH_MAX_SIZE PAGE_SIZE
#ifdef CONFIG_MICROCODE_AMD
extern void __init load_ucode_amd_bsp(unsigned int family);
-extern void load_ucode_amd_ap(void);
-extern int __init save_microcode_in_initrd_amd(void);
+extern void load_ucode_amd_ap(unsigned int family);
+extern int __init save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(void);
#else
static inline void __init load_ucode_amd_bsp(unsigned int family) {}
-static inline void load_ucode_amd_ap(void) {}
-static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
+static inline void load_ucode_amd_ap(unsigned int family) {}
+static inline int __init
+save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
void reload_ucode_amd(void) {}
#endif
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 5e69154c9f07..195becc6f780 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -52,10 +52,6 @@ struct extended_sigtable {
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
-extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev);
-extern int microcode_sanity_check(void *mc, int print_err);
-extern int find_matching_signature(void *mc, unsigned int csig, int cpf);
-
#ifdef CONFIG_MICROCODE_INTEL
extern void __init load_ucode_intel_bsp(void);
extern void load_ucode_intel_ap(void);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 8e0a9fe86de4..306c7e12af55 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -47,7 +47,7 @@ struct ldt_struct {
* allocations, but it's not worth trying to optimize.
*/
struct desc_struct *entries;
- int size;
+ unsigned int size;
};
/*
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 78f3760ca1f2..710273c617b8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -37,6 +37,10 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)
/* Intel MSRs. Some also available on other CPUs */
+
+#define MSR_PPIN_CTL 0x0000004e
+#define MSR_PPIN 0x0000004f
+
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index b5fee97813cd..db0b90c3b03e 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -70,14 +70,14 @@ extern struct tracepoint __tracepoint_read_msr;
extern struct tracepoint __tracepoint_write_msr;
extern struct tracepoint __tracepoint_rdpmc;
#define msr_tracepoint_active(t) static_key_false(&(t).key)
-extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
-extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
-extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
+extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
+extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
+extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
#else
#define msr_tracepoint_active(t) false
-static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
-static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
-static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
+static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
+static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
+static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
#endif
static inline unsigned long long native_read_msr(unsigned int msr)
@@ -115,22 +115,36 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
}
/* Can be uninlined because referenced by paravirt */
-notrace static inline void native_write_msr(unsigned int msr,
- unsigned low, unsigned high)
+static inline void notrace
+__native_write_msr_notrace(unsigned int msr, u32 low, u32 high)
{
asm volatile("1: wrmsr\n"
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
: : "c" (msr), "a"(low), "d" (high) : "memory");
+}
+
+/* Can be uninlined because referenced by paravirt */
+static inline void notrace
+native_write_msr(unsigned int msr, u32 low, u32 high)
+{
+ __native_write_msr_notrace(msr, low, high);
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
+static inline void
+wrmsr_notrace(unsigned int msr, u32 low, u32 high)
+{
+ __native_write_msr_notrace(msr, low, high);
+}
+
/* Can be uninlined because referenced by paravirt */
-notrace static inline int native_write_msr_safe(unsigned int msr,
- unsigned low, unsigned high)
+static inline int notrace
+native_write_msr_safe(unsigned int msr, u32 low, u32 high)
{
int err;
+
asm volatile("2: wrmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
@@ -223,7 +237,7 @@ do { \
(void)((high) = (u32)(__val >> 32)); \
} while (0)
-static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
+static inline void wrmsr(unsigned int msr, u32 low, u32 high)
{
native_write_msr(msr, low, high);
}
@@ -231,13 +245,13 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
#define rdmsrl(msr, val) \
((val) = native_read_msr((msr)))
-static inline void wrmsrl(unsigned msr, u64 val)
+static inline void wrmsrl(unsigned int msr, u64 val)
{
native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
}
/* wrmsr with exception handling */
-static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
+static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
{
return native_write_msr_safe(msr, low, high);
}
@@ -252,7 +266,7 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
__err; \
})
-static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
{
int err;
@@ -325,12 +339,12 @@ static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs)
{
- rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
+ rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
}
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs)
{
- wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
+ wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
}
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
u32 *l, u32 *h)
diff --git a/arch/x86/include/asm/mutex.h b/arch/x86/include/asm/mutex.h
deleted file mode 100644
index 7d3a48275394..000000000000
--- a/arch/x86/include/asm/mutex.h
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef CONFIG_X86_32
-# include <asm/mutex_32.h>
-#else
-# include <asm/mutex_64.h>
-#endif
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
deleted file mode 100644
index e9355a84fc67..000000000000
--- a/arch/x86/include/asm/mutex_32.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Assembly implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- *
- * started by Ingo Molnar:
- *
- * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- */
-#ifndef _ASM_X86_MUTEX_32_H
-#define _ASM_X86_MUTEX_32_H
-
-#include <asm/alternative.h>
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fn> if it
- * wasn't 1 originally. This function MUST leave the value lower than 1
- * even when the "1" assertion wasn't true.
- */
-#define __mutex_fastpath_lock(count, fail_fn) \
-do { \
- unsigned int dummy; \
- \
- typecheck(atomic_t *, count); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
- " jns 1f \n" \
- " call " #fail_fn "\n" \
- "1:\n" \
- : "=a" (dummy) \
- : "a" (count) \
- : "memory", "ecx", "edx"); \
-} while (0)
-
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int __mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(atomic_dec_return(count) < 0))
- return -1;
- else
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value
- * to 1, or to set it to a value lower than 1.
- *
- * If the implementation sets it to a value of lower than 1, the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-#define __mutex_fastpath_unlock(count, fail_fn) \
-do { \
- unsigned int dummy; \
- \
- typecheck(atomic_t *, count); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
- " jg 1f\n" \
- " call " #fail_fn "\n" \
- "1:\n" \
- : "=a" (dummy) \
- : "a" (count) \
- : "memory", "ecx", "edx"); \
-} while (0)
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- */
-static inline int __mutex_fastpath_trylock(atomic_t *count,
- int (*fail_fn)(atomic_t *))
-{
- /* cmpxchg because it never induces a false contention state. */
- if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
- return 1;
-
- return 0;
-}
-
-#endif /* _ASM_X86_MUTEX_32_H */
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
deleted file mode 100644
index d9850758464e..000000000000
--- a/arch/x86/include/asm/mutex_64.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Assembly implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- *
- * started by Ingo Molnar:
- *
- * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- */
-#ifndef _ASM_X86_MUTEX_64_H
-#define _ASM_X86_MUTEX_64_H
-
-/**
- * __mutex_fastpath_lock - decrement and call function if negative
- * @v: pointer of type atomic_t
- * @fail_fn: function to call if the result is negative
- *
- * Atomically decrements @v and calls <fail_fn> if the result is negative.
- */
-#ifdef CC_HAVE_ASM_GOTO
-static inline void __mutex_fastpath_lock(atomic_t *v,
- void (*fail_fn)(atomic_t *))
-{
- asm_volatile_goto(LOCK_PREFIX " decl %0\n"
- " jns %l[exit]\n"
- : : "m" (v->counter)
- : "memory", "cc"
- : exit);
- fail_fn(v);
-exit:
- return;
-}
-#else
-#define __mutex_fastpath_lock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
- " jns 1f \n" \
- " call " #fail_fn "\n" \
- "1:" \
- : "=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
-} while (0)
-#endif
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int __mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(atomic_dec_return(count) < 0))
- return -1;
- else
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - increment and call function if nonpositive
- * @v: pointer of type atomic_t
- * @fail_fn: function to call if the result is nonpositive
- *
- * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
- */
-#ifdef CC_HAVE_ASM_GOTO
-static inline void __mutex_fastpath_unlock(atomic_t *v,
- void (*fail_fn)(atomic_t *))
-{
- asm_volatile_goto(LOCK_PREFIX " incl %0\n"
- " jg %l[exit]\n"
- : : "m" (v->counter)
- : "memory", "cc"
- : exit);
- fail_fn(v);
-exit:
- return;
-}
-#else
-#define __mutex_fastpath_unlock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
- " jg 1f\n" \
- " call " #fail_fn "\n" \
- "1:" \
- : "=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
-} while (0)
-#endif
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
- * if it wasn't 1 originally. [the fallback function is never used on
- * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
- */
-static inline int __mutex_fastpath_trylock(atomic_t *count,
- int (*fail_fn)(atomic_t *))
-{
- if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
- return 1;
-
- return 0;
-}
-
-#endif /* _ASM_X86_MUTEX_64_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ce932812f142..1eea6ca40694 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -41,11 +41,6 @@ static inline void set_debugreg(unsigned long val, int reg)
PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
}
-static inline void clts(void)
-{
- PVOP_VCALL0(pv_cpu_ops.clts);
-}
-
static inline unsigned long read_cr0(void)
{
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
@@ -678,6 +673,11 @@ static __always_inline void pv_kick(int cpu)
PVOP_VCALL1(pv_lock_ops.kick, cpu);
}
+static __always_inline bool pv_vcpu_is_preempted(int cpu)
+{
+ return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+}
+
#endif /* SMP && PARAVIRT_SPINLOCKS */
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0f400c0e4979..bb2de45a60f2 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -103,8 +103,6 @@ struct pv_cpu_ops {
unsigned long (*get_debugreg)(int regno);
void (*set_debugreg)(int regno, unsigned long value);
- void (*clts)(void);
-
unsigned long (*read_cr0)(void);
void (*write_cr0)(unsigned long);
@@ -310,6 +308,8 @@ struct pv_lock_ops {
void (*wait)(u8 *ptr, u8 val);
void (*kick)(int cpu);
+
+ struct paravirt_callee_save vcpu_is_preempted;
};
/* This contains all the paravirt structures: we get a convenient
@@ -508,6 +508,18 @@ int paravirt_disable_iospace(void);
#define PVOP_TEST_NULL(op) ((void)op)
#endif
+#define PVOP_RETMASK(rettype) \
+ ({ unsigned long __mask = ~0UL; \
+ switch (sizeof(rettype)) { \
+ case 1: __mask = 0xffUL; break; \
+ case 2: __mask = 0xffffUL; break; \
+ case 4: __mask = 0xffffffffUL; break; \
+ default: break; \
+ } \
+ __mask; \
+ })
+
+
#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
pre, post, ...) \
({ \
@@ -535,7 +547,7 @@ int paravirt_disable_iospace(void);
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
- __ret = (rettype)__eax; \
+ __ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \
} \
__ret; \
})
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 84f58de08c2b..9fa03604b2b3 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -507,17 +507,6 @@ do { \
#endif
-/* This is not atomic against other CPUs -- CPU preemption needs to be off */
-#define x86_test_and_clear_bit_percpu(bit, var) \
-({ \
- bool old__; \
- asm volatile("btr %2,"__percpu_arg(1)"\n\t" \
- CC_SET(c) \
- : CC_OUT(c) (old__), "+m" (var) \
- : "dIr" (bit)); \
- old__; \
-})
-
static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
const unsigned long __percpu *addr)
{
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 17f218645701..ec1f3c651150 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -24,7 +24,13 @@ static __always_inline int preempt_count(void)
static __always_inline void preempt_count_set(int pc)
{
- raw_cpu_write_4(__preempt_count, pc);
+ int old, new;
+
+ do {
+ old = raw_cpu_read_4(__preempt_count);
+ new = (old & PREEMPT_NEED_RESCHED) |
+ (pc & ~PREEMPT_NEED_RESCHED);
+ } while (raw_cpu_cmpxchg_4(__preempt_count, old, new) != old);
}
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 984a7bf17f6a..6aa741fbe1df 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -137,6 +137,17 @@ struct cpuinfo_x86 {
u32 microcode;
};
+struct cpuid_regs {
+ u32 eax, ebx, ecx, edx;
+};
+
+enum cpuid_regs_idx {
+ CPUID_EAX = 0,
+ CPUID_EBX,
+ CPUID_ECX,
+ CPUID_EDX,
+};
+
#define X86_VENDOR_INTEL 0
#define X86_VENDOR_CYRIX 1
#define X86_VENDOR_AMD 2
@@ -178,6 +189,9 @@ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
void print_cpu_msr(struct cpuinfo_x86 *);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
+extern u32 get_scattered_cpuid_leaf(unsigned int level,
+ unsigned int sub_leaf,
+ enum cpuid_regs_idx reg);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
@@ -588,8 +602,6 @@ static __always_inline void cpu_relax(void)
rep_nop();
}
-#define cpu_relax_lowlatency() cpu_relax()
-
/* Stop speculative execution and prefetching of modified code. */
static inline void sync_core(void)
{
@@ -621,10 +633,9 @@ static inline void sync_core(void)
}
extern void select_idle_routine(const struct cpuinfo_x86 *c);
-extern void init_amd_e400_c1e_mask(void);
+extern void amd_e400_c1e_apic_setup(void);
extern unsigned long boot_option_idle_override;
-extern bool amd_e400_c1e_detected;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
IDLE_POLL};
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index eaba08076030..c343ab52579f 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -32,6 +32,12 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
{
pv_queued_spin_unlock(lock);
}
+
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return pv_vcpu_is_preempted(cpu);
+}
#else
static inline void queued_spin_unlock(struct qspinlock *lock)
{
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 19a2224f9e16..12af3e35edfa 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -6,11 +6,6 @@
#include <asm/nops.h>
-static inline void native_clts(void)
-{
- asm volatile("clts");
-}
-
/*
* Volatile isn't enough to prevent the compiler from reordering the
* read/write functions for the control registers and messing everything up.
@@ -208,16 +203,8 @@ static inline void load_gs_index(unsigned selector)
#endif
-/* Clear the 'TS' bit */
-static inline void clts(void)
-{
- native_clts();
-}
-
#endif/* CONFIG_PARAVIRT */
-#define stts() write_cr0(read_cr0() | X86_CR0_TS)
-
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 37f2e0b377ad..a3269c897ec5 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -30,8 +30,7 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
int get_stack_info(unsigned long *stack, struct task_struct *task,
struct stack_info *info, unsigned long *visit_mask);
-void stack_type_str(enum stack_type type, const char **begin,
- const char **end);
+const char *stack_type_name(enum stack_type type);
static inline bool on_stack(struct stack_info *info, void *addr, size_t len)
{
@@ -43,8 +42,6 @@ static inline bool on_stack(struct stack_info *info, void *addr, size_t len)
addr + len > begin && addr + len <= end);
}
-extern int kstack_depth_to_print;
-
#ifdef CONFIG_X86_32
#define STACKSLOTS_PER_LINE 8
#else
@@ -86,9 +83,6 @@ get_stack_pointer(struct task_struct *task, struct pt_regs *regs)
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, char *log_lvl);
-void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *sp, char *log_lvl);
-
extern unsigned int code_bytes;
/* The form of the top of the frame on the stack */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index cf75871d2f81..6358a85e2270 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -146,4 +146,36 @@ struct pci_bus;
int x86_pci_root_bus_node(int bus);
void x86_pci_root_bus_resources(int bus, struct list_head *resources);
+extern bool x86_topology_update;
+
+#ifdef CONFIG_SCHED_MC_PRIO
+#include <asm/percpu.h>
+
+DECLARE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
+extern unsigned int __read_mostly sysctl_sched_itmt_enabled;
+
+/* Interface to set priority of a cpu */
+void sched_set_itmt_core_prio(int prio, int core_cpu);
+
+/* Interface to notify scheduler that system supports ITMT */
+int sched_set_itmt_support(void);
+
+/* Interface to notify scheduler that system revokes ITMT support */
+void sched_clear_itmt_support(void);
+
+#else /* CONFIG_SCHED_MC_PRIO */
+
+#define sysctl_sched_itmt_enabled 0
+static inline void sched_set_itmt_core_prio(int prio, int core_cpu)
+{
+}
+static inline int sched_set_itmt_support(void)
+{
+ return 0;
+}
+static inline void sched_clear_itmt_support(void)
+{
+}
+#endif /* CONFIG_SCHED_MC_PRIO */
+
#endif /* _ASM_X86_TOPOLOGY_H */
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 9217ab1f5bf6..342e59789fcd 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
__field(struct fpu *, fpu)
__field(bool, fpregs_active)
__field(bool, fpstate_active)
- __field(int, counter)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
),
@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
__entry->fpu = fpu;
__entry->fpregs_active = fpu->fpregs_active;
__entry->fpstate_active = fpu->fpstate_active;
- __entry->counter = fpu->counter;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
}
),
- TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
+ TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
__entry->fpu,
__entry->fpregs_active,
__entry->fpstate_active,
- __entry->counter,
__entry->xfeatures,
__entry->xcomp_bv
)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index faf3687f1035..ea148313570f 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -68,6 +68,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
+#else
+# define WARN_ON_IN_IRQ()
+#endif
+
/**
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
@@ -88,8 +94,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type, addr, size) \
- likely(!__range_not_ok(addr, size, user_addr_max()))
+#define access_ok(type, addr, size) \
+({ \
+ WARN_ON_IN_IRQ(); \
+ likely(!__range_not_ok(addr, size, user_addr_max())); \
+})
/*
* These are the main single-value transfer routines. They automatically
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index 46de9ac4b990..c5a7f3a930dd 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -13,6 +13,7 @@ struct unwind_state {
int graph_idx;
#ifdef CONFIG_FRAME_POINTER
unsigned long *bp;
+ struct pt_regs *regs;
#else
unsigned long *sp;
#endif
@@ -47,7 +48,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
if (unwind_done(state))
return NULL;
- return state->bp + 1;
+ return state->regs ? &state->regs->ip : state->bp + 1;
+}
+
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return NULL;
+
+ return state->regs;
}
#else /* !CONFIG_FRAME_POINTER */
@@ -58,6 +67,11 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
return NULL;
}
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+{
+ return NULL;
+}
+
#endif /* CONFIG_FRAME_POINTER */
#endif /* _ASM_X86_UNWIND_H */
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index e728699db774..3a01996db58f 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -89,8 +89,13 @@ static inline unsigned int __getcpu(void)
* works on all CPUs. This is volatile so that it orders
* correctly wrt barrier() and to keep gcc from cleverly
* hoisting it out of the calling function.
+ *
+ * If RDPID is available, use it.
*/
- asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ alternative_io ("lsl %[p],%[seg]",
+ ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
+ X86_FEATURE_RDPID,
+ [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
return p;
}
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index a002b07a7099..2b5b2d4b924e 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -25,6 +25,7 @@
#define VMX_H
+#include <linux/bitops.h>
#include <linux/types.h>
#include <uapi/asm/vmx.h>
@@ -60,6 +61,7 @@
*/
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
+#define SECONDARY_EXEC_DESC 0x00000004
#define SECONDARY_EXEC_RDTSCP 0x00000008
#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
@@ -110,6 +112,36 @@
#define VMX_MISC_SAVE_EFER_LMA 0x00000020
#define VMX_MISC_ACTIVITY_HLT 0x00000040
+static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic)
+{
+ return vmx_basic & GENMASK_ULL(30, 0);
+}
+
+static inline u32 vmx_basic_vmcs_size(u64 vmx_basic)
+{
+ return (vmx_basic & GENMASK_ULL(44, 32)) >> 32;
+}
+
+static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc)
+{
+ return vmx_misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+}
+
+static inline int vmx_misc_cr3_count(u64 vmx_misc)
+{
+ return (vmx_misc & GENMASK_ULL(24, 16)) >> 16;
+}
+
+static inline int vmx_misc_max_msr(u64 vmx_misc)
+{
+ return (vmx_misc & GENMASK_ULL(27, 25)) >> 25;
+}
+
+static inline int vmx_misc_mseg_revid(u64 vmx_misc)
+{
+ return (vmx_misc & GENMASK_ULL(63, 32)) >> 32;
+}
+
/* VMCS Encodings */
enum vmcs_field {
VIRTUAL_PROCESSOR_ID = 0x00000000,
@@ -399,10 +431,11 @@ enum vmcs_field {
#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2)
#define VMX_NR_VPIDS (1 << 16)
+#define VMX_VPID_EXTENT_INDIVIDUAL_ADDR 0
#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
#define VMX_VPID_EXTENT_ALL_CONTEXT 2
+#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL 3
-#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
#define VMX_EPT_EXTENT_CONTEXT 1
#define VMX_EPT_EXTENT_GLOBAL 2
#define VMX_EPT_EXTENT_SHIFT 24
@@ -419,8 +452,10 @@ enum vmcs_field {
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
#define VMX_VPID_INVVPID_BIT (1ull << 0) /* (32 - 32) */
+#define VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT (1ull << 8) /* (40 - 32) */
#define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */
#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
+#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */
#define VMX_EPT_DEFAULT_GAW 3
#define VMX_EPT_MAX_GAW 0x4
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c18ce67495fa..b10bf319ed20 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -7,6 +7,7 @@
#define SETUP_DTB 2
#define SETUP_PCI 3
#define SETUP_EFI 4
+#define SETUP_APPLE_PROPERTIES 5
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 94dc8ca434e0..1421a6585126 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -45,7 +45,9 @@ struct kvm_steal_time {
__u64 steal;
__u32 version;
__u32 flags;
- __u32 pad[12];
+ __u8 preempted;
+ __u8 u8_pad[3];
+ __u32 pad[11];
};
#define KVM_STEAL_ALIGNMENT_BITS 5
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 69a6e07e3149..eb6247a7009b 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -28,6 +28,7 @@ struct mce {
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
__u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
__u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
+ __u64 ppin; /* Protected Processor Inventory Number */
};
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
index ae135de547f5..835aa51c7f6e 100644
--- a/arch/x86/include/uapi/asm/prctl.h
+++ b/arch/x86/include/uapi/asm/prctl.h
@@ -6,10 +6,8 @@
#define ARCH_GET_FS 0x1003
#define ARCH_GET_GS 0x1004
-#ifdef CONFIG_CHECKPOINT_RESTORE
-# define ARCH_MAP_VDSO_X32 0x2001
-# define ARCH_MAP_VDSO_32 0x2002
-# define ARCH_MAP_VDSO_64 0x2003
-#endif
+#define ARCH_MAP_VDSO_X32 0x2001
+#define ARCH_MAP_VDSO_32 0x2002
+#define ARCH_MAP_VDSO_64 0x2003
#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 37fee272618f..14458658e988 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -65,6 +65,8 @@
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
#define EXIT_REASON_APIC_ACCESS 44
#define EXIT_REASON_EOI_INDUCED 45
+#define EXIT_REASON_GDTR_IDTR 46
+#define EXIT_REASON_LDTR_TR 47
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_INVEPT 50
@@ -113,6 +115,8 @@
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
{ EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
+ { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \
+ { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
{ EXIT_REASON_INVEPT, "INVEPT" }, \
@@ -129,6 +133,7 @@
{ EXIT_REASON_XRSTORS, "XRSTORS" }
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
+#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 79076d75bdbf..05110c1097ae 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -123,6 +123,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o
+obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o
ifdef CONFIG_FRAME_POINTER
obj-y += unwind_frame.o
diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c
index c280df6b2aa2..ea3046e0b0cf 100644
--- a/arch/x86/kernel/acpi/apei.c
+++ b/arch/x86/kernel/acpi/apei.c
@@ -24,9 +24,6 @@ int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data)
struct acpi_hest_ia_corrected *cmc;
struct acpi_hest_ia_error_bank *mc_bank;
- if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK)
- return 0;
-
cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
if (!cmc->enabled)
return 0;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 931ced8ca345..4764fa56924d 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -76,6 +76,7 @@ int acpi_fix_pin2_polarity __initdata;
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
#endif
+#ifdef CONFIG_X86_IO_APIC
/*
* Locks related to IOAPIC hotplug
* Hotplug side:
@@ -88,6 +89,7 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
* ->ioapic_lock
*/
static DEFINE_MUTEX(acpi_ioapic_lock);
+#endif
/* --------------------------------------------------------------------------
Boot-time Configuration
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 169963f471bb..50b8ed0317a3 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -109,6 +109,15 @@ ENTRY(do_suspend_lowlevel)
movq pt_regs_r14(%rax), %r14
movq pt_regs_r15(%rax), %r15
+#ifdef CONFIG_KASAN
+ /*
+ * The suspend path may have poisoned some areas deeper in the stack,
+ * which we now need to unpoison.
+ */
+ movq %rsp, %rdi
+ call kasan_unpoison_task_stack_below
+#endif
+
xorl %eax, %eax
addq $8, %rsp
FRAME_END
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 4fdf6230d93c..458da8509b75 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -13,8 +13,20 @@
#include <linux/spinlock.h>
#include <asm/amd_nb.h>
+#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
+
+/* Protect the PCI config register pairs used for SMN and DF indirect access. */
+static DEFINE_MUTEX(smn_mutex);
+
static u32 *flush_words;
+static const struct pci_device_id amd_root_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
+ {}
+};
+
const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
@@ -24,9 +36,10 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
-EXPORT_SYMBOL(amd_nb_misc_ids);
+EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
@@ -34,6 +47,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{}
};
@@ -44,8 +58,25 @@ const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
{ }
};
-struct amd_northbridge_info amd_northbridges;
-EXPORT_SYMBOL(amd_northbridges);
+static struct amd_northbridge_info amd_northbridges;
+
+u16 amd_nb_num(void)
+{
+ return amd_northbridges.num;
+}
+EXPORT_SYMBOL_GPL(amd_nb_num);
+
+bool amd_nb_has_feature(unsigned int feature)
+{
+ return ((amd_northbridges.flags & feature) == feature);
+}
+EXPORT_SYMBOL_GPL(amd_nb_has_feature);
+
+struct amd_northbridge *node_to_amd_nb(int node)
+{
+ return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
+}
+EXPORT_SYMBOL_GPL(node_to_amd_nb);
static struct pci_dev *next_northbridge(struct pci_dev *dev,
const struct pci_device_id *ids)
@@ -58,13 +89,106 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev,
return dev;
}
+static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
+{
+ struct pci_dev *root;
+ int err = -ENODEV;
+
+ if (node >= amd_northbridges.num)
+ goto out;
+
+ root = node_to_amd_nb(node)->root;
+ if (!root)
+ goto out;
+
+ mutex_lock(&smn_mutex);
+
+ err = pci_write_config_dword(root, 0x60, address);
+ if (err) {
+ pr_warn("Error programming SMN address 0x%x.\n", address);
+ goto out_unlock;
+ }
+
+ err = (write ? pci_write_config_dword(root, 0x64, *value)
+ : pci_read_config_dword(root, 0x64, value));
+ if (err)
+ pr_warn("Error %s SMN address 0x%x.\n",
+ (write ? "writing to" : "reading from"), address);
+
+out_unlock:
+ mutex_unlock(&smn_mutex);
+
+out:
+ return err;
+}
+
+int amd_smn_read(u16 node, u32 address, u32 *value)
+{
+ return __amd_smn_rw(node, address, value, false);
+}
+EXPORT_SYMBOL_GPL(amd_smn_read);
+
+int amd_smn_write(u16 node, u32 address, u32 value)
+{
+ return __amd_smn_rw(node, address, &value, true);
+}
+EXPORT_SYMBOL_GPL(amd_smn_write);
+
+/*
+ * Data Fabric Indirect Access uses FICAA/FICAD.
+ *
+ * Fabric Indirect Configuration Access Address (FICAA): Constructed based
+ * on the device's Instance Id and the PCI function and register offset of
+ * the desired register.
+ *
+ * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
+ * and FICAD HI registers but so far we only need the LO register.
+ */
+int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
+{
+ struct pci_dev *F4;
+ u32 ficaa;
+ int err = -ENODEV;
+
+ if (node >= amd_northbridges.num)
+ goto out;
+
+ F4 = node_to_amd_nb(node)->link;
+ if (!F4)
+ goto out;
+
+ ficaa = 1;
+ ficaa |= reg & 0x3FC;
+ ficaa |= (func & 0x7) << 11;
+ ficaa |= instance_id << 16;
+
+ mutex_lock(&smn_mutex);
+
+ err = pci_write_config_dword(F4, 0x5C, ficaa);
+ if (err) {
+ pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
+ goto out_unlock;
+ }
+
+ err = pci_read_config_dword(F4, 0x98, lo);
+ if (err)
+ pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
+
+out_unlock:
+ mutex_unlock(&smn_mutex);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(amd_df_indirect_read);
+
int amd_cache_northbridges(void)
{
u16 i = 0;
struct amd_northbridge *nb;
- struct pci_dev *misc, *link;
+ struct pci_dev *root, *misc, *link;
- if (amd_nb_num())
+ if (amd_northbridges.num)
return 0;
misc = NULL;
@@ -74,15 +198,17 @@ int amd_cache_northbridges(void)
if (!i)
return -ENODEV;
- nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
+ nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
if (!nb)
return -ENOMEM;
amd_northbridges.nb = nb;
amd_northbridges.num = i;
- link = misc = NULL;
- for (i = 0; i != amd_nb_num(); i++) {
+ link = misc = root = NULL;
+ for (i = 0; i != amd_northbridges.num; i++) {
+ node_to_amd_nb(i)->root = root =
+ next_northbridge(root, amd_root_ids);
node_to_amd_nb(i)->misc = misc =
next_northbridge(misc, amd_nb_misc_ids);
node_to_amd_nb(i)->link = link =
@@ -139,13 +265,13 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
{
u32 address;
u64 base, msr;
- unsigned segn_busn_bits;
+ unsigned int segn_busn_bits;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return NULL;
/* assume all cpus from fam10h have mmconfig */
- if (boot_cpu_data.x86 < 0x10)
+ if (boot_cpu_data.x86 < 0x10)
return NULL;
address = MSR_FAM10H_MMIO_CONF_BASE;
@@ -226,14 +352,14 @@ static void amd_cache_gart(void)
if (!amd_nb_has_feature(AMD_NB_GART))
return;
- flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
+ flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
if (!flush_words) {
amd_northbridges.flags &= ~AMD_NB_GART;
pr_notice("Cannot initialize GART flush words, GART support disabled\n");
return;
}
- for (i = 0; i != amd_nb_num(); i++)
+ for (i = 0; i != amd_northbridges.num; i++)
pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
}
@@ -246,18 +372,20 @@ void amd_flush_garts(void)
if (!amd_nb_has_feature(AMD_NB_GART))
return;
- /* Avoid races between AGP and IOMMU. In theory it's not needed
- but I'm not sure if the hardware won't lose flush requests
- when another is pending. This whole thing is so expensive anyways
- that it doesn't matter to serialize more. -AK */
+ /*
+ * Avoid races between AGP and IOMMU. In theory it's not needed
+ * but I'm not sure if the hardware won't lose flush requests
+ * when another is pending. This whole thing is so expensive anyways
+ * that it doesn't matter to serialize more. -AK
+ */
spin_lock_irqsave(&gart_lock, flags);
flushed = 0;
- for (i = 0; i < amd_nb_num(); i++) {
+ for (i = 0; i < amd_northbridges.num; i++) {
pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
flush_words[i] | 1);
flushed++;
}
- for (i = 0; i < amd_nb_num(); i++) {
+ for (i = 0; i < amd_northbridges.num; i++) {
u32 w;
/* Make sure the hardware actually executed the flush*/
for (;;) {
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 88c657b057e2..bb47e5eacd44 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -48,7 +48,6 @@
#include <asm/io_apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
-#include <asm/idle.h>
#include <asm/mtrr.h>
#include <asm/time.h>
#include <asm/smp.h>
@@ -894,11 +893,13 @@ void __init setup_boot_APIC_clock(void)
/* Setup the lapic or request the broadcast */
setup_APIC_timer();
+ amd_e400_c1e_apic_setup();
}
void setup_secondary_APIC_clock(void)
{
setup_APIC_timer();
+ amd_e400_c1e_apic_setup();
}
/*
@@ -2263,6 +2264,7 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
/* Should happen once for each apic */
WARN_ON((*drv)->eoi_write == eoi_write);
+ (*drv)->native_eoi_write = (*drv)->eoi_write;
(*drv)->eoi_write = eoi_write;
}
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 48e6d84f173e..945e512a112a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -48,7 +48,6 @@
#include <linux/bootmem.h>
#include <asm/irqdomain.h>
-#include <asm/idle.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/cpu.h>
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index aeef53ce93e1..35690a168cf7 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -815,9 +815,9 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
l = li;
}
addr1 = (base << shift) +
- f * (unsigned long)(1 << m_io);
+ f * (1ULL << m_io);
addr2 = (base << shift) +
- (l + 1) * (unsigned long)(1 << m_io);
+ (l + 1) * (1ULL << m_io);
pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
id, fi, li, lnasid, addr1, addr2);
if (max_io < l)
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index c7364bd633e1..643818a7688b 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -906,14 +906,14 @@ static int apm_cpu_idle(struct cpuidle_device *dev,
static int use_apm_idle; /* = 0 */
static unsigned int last_jiffies; /* = 0 */
static unsigned int last_stime; /* = 0 */
- cputime_t stime;
+ cputime_t stime, utime;
int apm_idle_done = 0;
unsigned int jiffies_since_last_check = jiffies - last_jiffies;
unsigned int bucket;
recalc:
- task_cputime(current, NULL, &stime);
+ task_cputime(current, &utime, &stime);
if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
use_apm_idle = 0;
} else if (jiffies_since_last_check > idle_period) {
@@ -1042,8 +1042,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
if (apm_info.get_power_status_broken)
return APM_32_UNSUPPORTED;
- if (apm_bios_call(&call))
+ if (apm_bios_call(&call)) {
+ if (!call.err)
+ return APM_NO_ERROR;
return call.err;
+ }
*status = call.ebx;
*bat = call.ecx;
if (apm_info.get_power_status_swabinminutes) {
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 4a8697f7d4ef..33b63670bf09 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -20,13 +20,11 @@ obj-y := intel_cacheinfo.o scattered.o topology.o
obj-y += common.o
obj-y += rdrand.o
obj-y += match.o
+obj-y += bugs.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
-obj-$(CONFIG_X86_32) += bugs.o
-obj-$(CONFIG_X86_64) += bugs_64.o
-
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index b81fe2d63e15..71cae73a5076 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -20,6 +20,10 @@
#include "cpu.h"
+static const int amd_erratum_383[];
+static const int amd_erratum_400[];
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
+
/*
* nodes_per_socket: Stores the number of nodes per socket.
* Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
@@ -314,11 +318,30 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
smp_num_siblings = ((ebx >> 8) & 3) + 1;
c->x86_max_cores /= smp_num_siblings;
c->cpu_core_id = ebx & 0xff;
+
+ /*
+ * We may have multiple LLCs if L3 caches exist, so check if we
+ * have an L3 cache by looking at the L3 cache CPUID leaf.
+ */
+ if (cpuid_edx(0x80000006)) {
+ if (c->x86 == 0x17) {
+ /*
+ * LLC is at the core complex level.
+ * Core complex id is ApicId[3].
+ */
+ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
+ } else {
+ /* LLC is at the node level. */
+ per_cpu(cpu_llc_id, cpu) = node_id;
+ }
+ }
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value);
node_id = value & 7;
+
+ per_cpu(cpu_llc_id, cpu) = node_id;
} else
return;
@@ -329,9 +352,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
cus_per_node = c->x86_max_cores / nodes_per_socket;
- /* store NodeID, use llc_shared_map to store sibling info */
- per_cpu(cpu_llc_id, cpu) = node_id;
-
/* core id has to be in the [0 .. cores_per_node - 1] range */
c->cpu_core_id %= cus_per_node;
}
@@ -347,7 +367,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP
unsigned bits;
int cpu = smp_processor_id();
- unsigned int socket_id, core_complex_id;
bits = c->x86_coreid_bits;
/* Low order bits define the core id (index of core in socket) */
@@ -357,18 +376,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
/* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
amd_get_topology(c);
-
- /*
- * Fix percpu cpu_llc_id here as LLC topology is different
- * for Fam17h systems.
- */
- if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
- return;
-
- socket_id = (c->apicid >> bits) - 1;
- core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
-
- per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
#endif
}
@@ -589,11 +596,16 @@ static void early_init_amd(struct cpuinfo_x86 *c)
/* F16h erratum 793, CVE-2013-6885 */
if (c->x86 == 0x16 && c->x86_model <= 0xf)
msr_set_bit(MSR_AMD64_LS_CFG, 15);
-}
-static const int amd_erratum_383[];
-static const int amd_erratum_400[];
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
+ /*
+ * Check whether the machine is affected by erratum 400. This is
+ * used to select the proper idle routine and to enable the check
+ * whether the machine is affected in arch_post_acpi_init(), which
+ * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
+ */
+ if (cpu_has_amd_erratum(c, amd_erratum_400))
+ set_cpu_bug(c, X86_BUG_AMD_E400);
+}
static void init_amd_k8(struct cpuinfo_x86 *c)
{
@@ -774,9 +786,6 @@ static void init_amd(struct cpuinfo_x86 *c)
if (c->x86 > 0x11)
set_cpu_cap(c, X86_FEATURE_ARAT);
- if (cpu_has_amd_erratum(c, amd_erratum_400))
- set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
-
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
/* 3DNow or LM implies PREFETCHW */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index bd17db15a2c1..a44ef52184df 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -16,15 +16,19 @@
#include <asm/msr.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
void __init check_bugs(void)
{
identify_boot_cpu();
-#ifndef CONFIG_SMP
- pr_info("CPU: ");
- print_cpu_info(&boot_cpu_data);
-#endif
+ if (!IS_ENABLED(CONFIG_SMP)) {
+ pr_info("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+ }
+
+#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
*
@@ -40,4 +44,18 @@ void __init check_bugs(void)
alternative_instructions();
fpu__init_check_bugs();
+#else /* CONFIG_X86_64 */
+ alternative_instructions();
+
+ /*
+ * Make sure the first 2MB area is not mapped by huge pages
+ * There are typically fixed size MTRRs in there and overlapping
+ * MTRRs into large pages causes slow downs.
+ *
+ * Right now we don't do that with gbpages because there seems
+ * very little benefit for that case.
+ */
+ if (!direct_gbpages)
+ set_memory_4k((unsigned long)__va(0), 1);
+#endif
}
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c
deleted file mode 100644
index a972ac4c7e7d..000000000000
--- a/arch/x86/kernel/cpu/bugs_64.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 1994 Linus Torvalds
- * Copyright (C) 2000 SuSE
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/alternative.h>
-#include <asm/bugs.h>
-#include <asm/processor.h>
-#include <asm/mtrr.h>
-#include <asm/cacheflush.h>
-
-void __init check_bugs(void)
-{
- identify_boot_cpu();
-#if !defined(CONFIG_SMP)
- pr_info("CPU: ");
- print_cpu_info(&boot_cpu_data);
-#endif
- alternative_instructions();
-
- /*
- * Make sure the first 2MB area is not mapped by huge pages
- * There are typically fixed size MTRRs in there and overlapping
- * MTRRs into large pages causes slow downs.
- *
- * Right now we don't do that with gbpages because there seems
- * very little benefit for that case.
- */
- if (!direct_gbpages)
- set_memory_4k((unsigned long)__va(0), 1);
-}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9bd910a7dd0a..729f92ba8224 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -979,6 +979,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
}
/*
+ * The physical to logical package id mapping is initialized from the
+ * acpi/mptables information. Make sure that CPUID actually agrees with
+ * that.
+ */
+static void sanitize_package_id(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ unsigned int pkg, apicid, cpu = smp_processor_id();
+
+ apicid = apic->cpu_present_to_apicid(cpu);
+ pkg = apicid >> boot_cpu_data.x86_coreid_bits;
+
+ if (apicid != c->initial_apicid) {
+ pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
+ cpu, apicid, c->initial_apicid);
+ c->initial_apicid = apicid;
+ }
+ if (pkg != c->phys_proc_id) {
+ pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
+ cpu, pkg, c->phys_proc_id);
+ c->phys_proc_id = pkg;
+ }
+ c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
+#else
+ c->logical_proc_id = 0;
+#endif
+}
+
+/*
* This does the hard work of actually picking apart the CPU stuff...
*/
static void identify_cpu(struct cpuinfo_x86 *c)
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id());
#endif
- /* The boot/hotplug time assigment got cleared, restore it */
- c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
+ sanitize_package_id(c);
}
/*
@@ -1144,7 +1172,6 @@ void enable_sep_cpu(void)
void __init identify_boot_cpu(void)
{
identify_cpu(&boot_cpu_data);
- init_amd_e400_c1e_mask();
#ifdef CONFIG_X86_32
sysenter_setup();
enable_sep_cpu();
@@ -1162,51 +1189,6 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init();
}
-struct msr_range {
- unsigned min;
- unsigned max;
-};
-
-static const struct msr_range msr_range_array[] = {
- { 0x00000000, 0x00000418},
- { 0xc0000000, 0xc000040b},
- { 0xc0010000, 0xc0010142},
- { 0xc0011000, 0xc001103b},
-};
-
-static void __print_cpu_msr(void)
-{
- unsigned index_min, index_max;
- unsigned index;
- u64 val;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
- index_min = msr_range_array[i].min;
- index_max = msr_range_array[i].max;
-
- for (index = index_min; index < index_max; index++) {
- if (rdmsrl_safe(index, &val))
- continue;
- pr_info(" MSR%08x: %016llx\n", index, val);
- }
- }
-}
-
-static int show_msr;
-
-static __init int setup_show_msr(char *arg)
-{
- int num;
-
- get_option(&arg, &num);
-
- if (num > 0)
- show_msr = num;
- return 1;
-}
-__setup("show_msr=", setup_show_msr);
-
static __init int setup_noclflush(char *arg)
{
setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
@@ -1240,14 +1222,6 @@ void print_cpu_info(struct cpuinfo_x86 *c)
pr_cont(", stepping: 0x%x)\n", c->x86_mask);
else
pr_cont(")\n");
-
- print_cpu_msr(c);
-}
-
-void print_cpu_msr(struct cpuinfo_x86 *c)
-{
- if (c->cpu_index < show_msr)
- __print_cpu_msr();
}
static __init int setup_disablecpuid(char *arg)
@@ -1462,11 +1436,8 @@ void cpu_init(void)
*/
cr4_init_shadow();
- /*
- * Load microcode on this cpu if a valid microcode is available.
- * This is early microcode loading procedure.
- */
- load_ucode_ap();
+ if (cpu)
+ load_ucode_ap();
t = &per_cpu(cpu_tss, cpu);
oist = &per_cpu(orig_ist, cpu);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index de6626c18e42..be6337156502 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
ci_leaf_init(this_leaf++, &id4_regs);
__cache_cpumap_setup(cpu, idx, &id4_regs);
}
+ this_cpu_ci->cpu_map_populated = true;
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 631356c8cca4..c7efbcfbeda6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -311,7 +311,7 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
*msg = s->msg;
s->covered = 1;
if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) {
- if (panic_on_oops || tolerant < 1)
+ if (tolerant < 1)
return MCE_PANIC_SEVERITY;
}
return s->sev;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a7fdf453d895..00ef43233e03 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -43,6 +43,7 @@
#include <linux/export.h>
#include <linux/jump_label.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/traps.h>
#include <asm/tlbflush.h>
@@ -135,6 +136,9 @@ void mce_setup(struct mce *m)
m->socketid = cpu_data(m->extcpu).phys_proc_id;
m->apicid = cpu_data(m->extcpu).initial_apicid;
rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+
+ if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
+ rdmsrl(MSR_PPIN, m->ppin);
}
DEFINE_PER_CPU(struct mce, injectm);
@@ -207,8 +211,12 @@ EXPORT_SYMBOL_GPL(mce_inject_log);
static struct notifier_block mce_srao_nb;
+static atomic_t num_notifiers;
+
void mce_register_decode_chain(struct notifier_block *nb)
{
+ atomic_inc(&num_notifiers);
+
/* Ensure SRAO notifier has the highest priority in the decode chain. */
if (nb != &mce_srao_nb && nb->priority == INT_MAX)
nb->priority -= 1;
@@ -219,6 +227,8 @@ EXPORT_SYMBOL_GPL(mce_register_decode_chain);
void mce_unregister_decode_chain(struct notifier_block *nb)
{
+ atomic_dec(&num_notifiers);
+
atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
@@ -270,17 +280,17 @@ struct mca_msr_regs msr_ops = {
.misc = misc_reg
};
-static void print_mce(struct mce *m)
+static void __print_mce(struct mce *m)
{
- int ret = 0;
-
- pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
- m->extcpu, m->mcgstatus, m->bank, m->status);
+ pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
+ m->extcpu,
+ (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
+ m->mcgstatus, m->bank, m->status);
if (m->ip) {
pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
- m->cs, m->ip);
+ m->cs, m->ip);
if (m->cs == __KERNEL_CS)
print_symbol("{%s}", m->ip);
@@ -308,6 +318,13 @@ static void print_mce(struct mce *m)
pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
cpu_data(m->extcpu).microcode);
+}
+
+static void print_mce(struct mce *m)
+{
+ int ret = 0;
+
+ __print_mce(m);
/*
* Print out human-readable details about the MCE error,
@@ -499,7 +516,7 @@ int mce_available(struct cpuinfo_x86 *c)
static void mce_schedule_work(void)
{
- if (!mce_gen_pool_empty() && keventd_up())
+ if (!mce_gen_pool_empty())
schedule_work(&mce_work);
}
@@ -569,6 +586,32 @@ static struct notifier_block mce_srao_nb = {
.priority = INT_MAX,
};
+static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *m = (struct mce *)data;
+
+ if (!m)
+ return NOTIFY_DONE;
+
+ /*
+ * Run the default notifier if we have only the SRAO
+ * notifier and us registered.
+ */
+ if (atomic_read(&num_notifiers) > 2)
+ return NOTIFY_DONE;
+
+ __print_mce(m);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mce_default_nb = {
+ .notifier_call = mce_default_notifier,
+ /* lowest prio, we want it to run last. */
+ .priority = 0,
+};
+
/*
* Read ADDR and MISC registers.
*/
@@ -667,6 +710,15 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
mce_gather_info(&m, NULL);
+ /*
+ * m.tsc was set in mce_setup(). Clear it if not requested.
+ *
+ * FIXME: Propagate @flags to mce_gather_info/mce_setup() to avoid
+ * that dance.
+ */
+ if (!(flags & MCP_TIMESTAMP))
+ m.tsc = 0;
+
for (i = 0; i < mca_cfg.banks; i++) {
if (!mce_banks[i].ctl || !test_bit(i, *b))
continue;
@@ -674,14 +726,12 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
m.misc = 0;
m.addr = 0;
m.bank = i;
- m.tsc = 0;
barrier();
m.status = mce_rdmsrl(msr_ops.status(i));
if (!(m.status & MCI_STATUS_VAL))
continue;
-
/*
* Uncorrected or signalled events are handled by the exception
* handler when it is enabled, so don't process those here.
@@ -696,9 +746,6 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
mce_read_aux(&m, i);
- if (!(flags & MCP_TIMESTAMP))
- m.tsc = 0;
-
severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
@@ -1355,7 +1402,7 @@ static void mce_timer_fn(unsigned long data)
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
- machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
+ machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
if (mce_intel_cmci_poll()) {
iv = mce_adjust_timer(iv);
@@ -1745,6 +1792,14 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
add_timer_on(t, cpu);
}
+static void __mcheck_cpu_setup_timer(void)
+{
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
+ unsigned int cpu = smp_processor_id();
+
+ setup_pinned_timer(t, mce_timer_fn, cpu);
+}
+
static void __mcheck_cpu_init_timer(void)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
@@ -1796,7 +1851,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c);
__mcheck_cpu_init_clear_banks();
- __mcheck_cpu_init_timer();
+ __mcheck_cpu_setup_timer();
}
/*
@@ -2138,6 +2193,7 @@ int __init mcheck_init(void)
{
mcheck_intel_therm_init();
mce_register_decode_chain(&mce_srao_nb);
+ mce_register_decode_chain(&mce_default_nb);
mcheck_vendor_init_severity();
INIT_WORK(&mce_work, mce_process_work);
@@ -2255,8 +2311,6 @@ static struct bus_type mce_subsys = {
DEFINE_PER_CPU(struct device *, mce_device);
-void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
-
static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
{
return container_of(attr, struct mce_bank, attr);
@@ -2409,6 +2463,10 @@ static int mce_device_create(unsigned int cpu)
if (!mce_available(&boot_cpu_data))
return -EIO;
+ dev = per_cpu(mce_device, cpu);
+ if (dev)
+ return 0;
+
dev = kzalloc(sizeof *dev, GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -2468,28 +2526,25 @@ static void mce_device_remove(unsigned int cpu)
}
/* Make sure there are no machine checks on offlined CPUs. */
-static void mce_disable_cpu(void *h)
+static void mce_disable_cpu(void)
{
- unsigned long action = *(unsigned long *)h;
-
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
- if (!(action & CPU_TASKS_FROZEN))
+ if (!cpuhp_tasks_frozen)
cmci_clear();
vendor_disable_error_reporting();
}
-static void mce_reenable_cpu(void *h)
+static void mce_reenable_cpu(void)
{
- unsigned long action = *(unsigned long *)h;
int i;
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
- if (!(action & CPU_TASKS_FROZEN))
+ if (!cpuhp_tasks_frozen)
cmci_reenable();
for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
@@ -2499,45 +2554,43 @@ static void mce_reenable_cpu(void *h)
}
}
-/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int
-mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+static int mce_cpu_dead(unsigned int cpu)
+{
+ mce_intel_hcpu_update(cpu);
+
+ /* intentionally ignoring frozen here */
+ if (!cpuhp_tasks_frozen)
+ cmci_rediscover();
+ return 0;
+}
+
+static int mce_cpu_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
struct timer_list *t = &per_cpu(mce_timer, cpu);
+ int ret;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- mce_device_create(cpu);
- if (threshold_cpu_callback)
- threshold_cpu_callback(action, cpu);
- break;
- case CPU_DEAD:
- if (threshold_cpu_callback)
- threshold_cpu_callback(action, cpu);
- mce_device_remove(cpu);
- mce_intel_hcpu_update(cpu);
+ mce_device_create(cpu);
- /* intentionally ignoring frozen here */
- if (!(action & CPU_TASKS_FROZEN))
- cmci_rediscover();
- break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
- del_timer_sync(t);
- break;
- case CPU_DOWN_FAILED:
- smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
- mce_start_timer(cpu, t);
- break;
+ ret = mce_threshold_create_device(cpu);
+ if (ret) {
+ mce_device_remove(cpu);
+ return ret;
}
-
- return NOTIFY_OK;
+ mce_reenable_cpu();
+ mce_start_timer(cpu, t);
+ return 0;
}
-static struct notifier_block mce_cpu_notifier = {
- .notifier_call = mce_cpu_callback,
-};
+static int mce_cpu_pre_down(unsigned int cpu)
+{
+ struct timer_list *t = &per_cpu(mce_timer, cpu);
+
+ mce_disable_cpu();
+ del_timer_sync(t);
+ mce_threshold_remove_device(cpu);
+ mce_device_remove(cpu);
+ return 0;
+}
static __init void mce_init_banks(void)
{
@@ -2559,8 +2612,8 @@ static __init void mce_init_banks(void)
static __init int mcheck_init_device(void)
{
+ enum cpuhp_state hp_online;
int err;
- int i = 0;
if (!mce_available(&boot_cpu_data)) {
err = -EIO;
@@ -2578,23 +2631,16 @@ static __init int mcheck_init_device(void)
if (err)
goto err_out_mem;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- err = mce_device_create(i);
- if (err) {
- /*
- * Register notifier anyway (and do not unreg it) so
- * that we don't leave undeleted timers, see notifier
- * callback above.
- */
- __register_hotcpu_notifier(&mce_cpu_notifier);
- cpu_notifier_register_done();
- goto err_device_create;
- }
- }
+ err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
+ mce_cpu_dead);
+ if (err)
+ goto err_out_mem;
- __register_hotcpu_notifier(&mce_cpu_notifier);
- cpu_notifier_register_done();
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
+ mce_cpu_online, mce_cpu_pre_down);
+ if (err < 0)
+ goto err_out_online;
+ hp_online = err;
register_syscore_ops(&mce_syscore_ops);
@@ -2607,16 +2653,10 @@ static __init int mcheck_init_device(void)
err_register:
unregister_syscore_ops(&mce_syscore_ops);
+ cpuhp_remove_state(hp_online);
-err_device_create:
- /*
- * We didn't keep track of which devices were created above, but
- * even if we had, the set of online cpus might have changed.
- * Play safe and remove for every possible cpu, since
- * mce_device_remove() will do the right thing.
- */
- for_each_possible_cpu(i)
- mce_device_remove(i);
+err_out_online:
+ cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
err_out_mem:
free_cpumask_var(mce_device_initialized);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9b5403462936..ffacfdcacb85 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -24,7 +24,6 @@
#include <asm/amd_nb.h>
#include <asm/apic.h>
-#include <asm/idle.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/trace/irq_vectors.h>
@@ -55,6 +54,8 @@
/* Threshold LVT offset is at MSR0xC0000410[15:12] */
#define SMCA_THR_LVT_OFF 0xF000
+static bool thresholding_en;
+
static const char * const th_names[] = {
"load_store",
"insn_fetch",
@@ -69,7 +70,12 @@ static const char * const smca_umc_block_names[] = {
"misc_umc"
};
-struct smca_bank_name smca_bank_names[] = {
+struct smca_bank_name {
+ const char *name; /* Short name for sysfs */
+ const char *long_name; /* Long name for pretty-printing */
+};
+
+static struct smca_bank_name smca_names[] = {
[SMCA_LS] = { "load_store", "Load Store Unit" },
[SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
[SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
@@ -84,9 +90,25 @@ struct smca_bank_name smca_bank_names[] = {
[SMCA_PSP] = { "psp", "Platform Security Processor" },
[SMCA_SMU] = { "smu", "System Management Unit" },
};
-EXPORT_SYMBOL_GPL(smca_bank_names);
-static struct smca_hwid_mcatype smca_hwid_mcatypes[] = {
+const char *smca_get_name(enum smca_bank_types t)
+{
+ if (t >= N_SMCA_BANK_TYPES)
+ return NULL;
+
+ return smca_names[t].name;
+}
+
+const char *smca_get_long_name(enum smca_bank_types t)
+{
+ if (t >= N_SMCA_BANK_TYPES)
+ return NULL;
+
+ return smca_names[t].long_name;
+}
+EXPORT_SYMBOL_GPL(smca_get_long_name);
+
+static struct smca_hwid smca_hwid_mcatypes[] = {
/* { bank_type, hwid_mcatype, xec_bitmap } */
/* ZN Core (HWID=0xB0) MCA types */
@@ -116,7 +138,7 @@ static struct smca_hwid_mcatype smca_hwid_mcatypes[] = {
{ SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 },
};
-struct smca_bank_info smca_banks[MAX_NR_BANKS];
+struct smca_bank smca_banks[MAX_NR_BANKS];
EXPORT_SYMBOL_GPL(smca_banks);
/*
@@ -142,35 +164,34 @@ static void default_deferred_error_interrupt(void)
}
void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
-/*
- * CPU Initialization
- */
-
static void get_smca_bank_info(unsigned int bank)
{
unsigned int i, hwid_mcatype, cpu = smp_processor_id();
- struct smca_hwid_mcatype *type;
- u32 high, instanceId;
- u16 hwid, mcatype;
+ struct smca_hwid *s_hwid;
+ u32 high, instance_id;
/* Collect bank_info using CPU 0 for now. */
if (cpu)
return;
- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &instanceId, &high)) {
+ if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &instance_id, &high)) {
pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
return;
}
- hwid = high & MCI_IPID_HWID;
- mcatype = (high & MCI_IPID_MCATYPE) >> 16;
- hwid_mcatype = HWID_MCATYPE(hwid, mcatype);
+ hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
+ (high & MCI_IPID_MCATYPE) >> 16);
for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
- type = &smca_hwid_mcatypes[i];
- if (hwid_mcatype == type->hwid_mcatype) {
- smca_banks[bank].type = type;
- smca_banks[bank].type_instance = instanceId;
+ s_hwid = &smca_hwid_mcatypes[i];
+ if (hwid_mcatype == s_hwid->hwid_mcatype) {
+
+ WARN(smca_banks[bank].hwid,
+ "Bank %s already initialized!\n",
+ smca_get_name(s_hwid->bank_type));
+
+ smca_banks[bank].hwid = s_hwid;
+ smca_banks[bank].id = instance_id;
break;
}
}
@@ -533,6 +554,206 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
deferred_error_interrupt_enable(c);
}
+int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
+{
+ u64 dram_base_addr, dram_limit_addr, dram_hole_base;
+ /* We start from the normalized address */
+ u64 ret_addr = norm_addr;
+
+ u32 tmp;
+
+ u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
+ u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
+ u8 intlv_addr_sel, intlv_addr_bit;
+ u8 num_intlv_bits, hashed_bit;
+ u8 lgcy_mmio_hole_en, base = 0;
+ u8 cs_mask, cs_id = 0;
+ bool hash_enabled = false;
+
+ /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
+ if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
+ goto out_err;
+
+ /* Remove HiAddrOffset from normalized address, if enabled: */
+ if (tmp & BIT(0)) {
+ u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;
+
+ if (norm_addr >= hi_addr_offset) {
+ ret_addr -= hi_addr_offset;
+ base = 1;
+ }
+ }
+
+ /* Read D18F0x110 (DramBaseAddress). */
+ if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
+ goto out_err;
+
+ /* Check if address range is valid. */
+ if (!(tmp & BIT(0))) {
+ pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
+ __func__, tmp);
+ goto out_err;
+ }
+
+ lgcy_mmio_hole_en = tmp & BIT(1);
+ intlv_num_chan = (tmp >> 4) & 0xF;
+ intlv_addr_sel = (tmp >> 8) & 0x7;
+ dram_base_addr = (tmp & GENMASK_ULL(31, 12)) << 16;
+
+ /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
+ if (intlv_addr_sel > 3) {
+ pr_err("%s: Invalid interleave address select %d.\n",
+ __func__, intlv_addr_sel);
+ goto out_err;
+ }
+
+ /* Read D18F0x114 (DramLimitAddress). */
+ if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
+ goto out_err;
+
+ intlv_num_sockets = (tmp >> 8) & 0x1;
+ intlv_num_dies = (tmp >> 10) & 0x3;
+ dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
+
+ intlv_addr_bit = intlv_addr_sel + 8;
+
+ /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
+ switch (intlv_num_chan) {
+ case 0: intlv_num_chan = 0; break;
+ case 1: intlv_num_chan = 1; break;
+ case 3: intlv_num_chan = 2; break;
+ case 5: intlv_num_chan = 3; break;
+ case 7: intlv_num_chan = 4; break;
+
+ case 8: intlv_num_chan = 1;
+ hash_enabled = true;
+ break;
+ default:
+ pr_err("%s: Invalid number of interleaved channels %d.\n",
+ __func__, intlv_num_chan);
+ goto out_err;
+ }
+
+ num_intlv_bits = intlv_num_chan;
+
+ if (intlv_num_dies > 2) {
+ pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
+ __func__, intlv_num_dies);
+ goto out_err;
+ }
+
+ num_intlv_bits += intlv_num_dies;
+
+ /* Add a bit if sockets are interleaved. */
+ num_intlv_bits += intlv_num_sockets;
+
+ /* Assert num_intlv_bits <= 4 */
+ if (num_intlv_bits > 4) {
+ pr_err("%s: Invalid interleave bits %d.\n",
+ __func__, num_intlv_bits);
+ goto out_err;
+ }
+
+ if (num_intlv_bits > 0) {
+ u64 temp_addr_x, temp_addr_i, temp_addr_y;
+ u8 die_id_bit, sock_id_bit, cs_fabric_id;
+
+ /*
+ * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
+ * This is the fabric id for this coherent slave. Use
+ * umc/channel# as instance id of the coherent slave
+ * for FICAA.
+ */
+ if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
+ goto out_err;
+
+ cs_fabric_id = (tmp >> 8) & 0xFF;
+ die_id_bit = 0;
+
+ /* If interleaved over more than 1 channel: */
+ if (intlv_num_chan) {
+ die_id_bit = intlv_num_chan;
+ cs_mask = (1 << die_id_bit) - 1;
+ cs_id = cs_fabric_id & cs_mask;
+ }
+
+ sock_id_bit = die_id_bit;
+
+ /* Read D18F1x208 (SystemFabricIdMask). */
+ if (intlv_num_dies || intlv_num_sockets)
+ if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
+ goto out_err;
+
+ /* If interleaved over more than 1 die. */
+ if (intlv_num_dies) {
+ sock_id_bit = die_id_bit + intlv_num_dies;
+ die_id_shift = (tmp >> 24) & 0xF;
+ die_id_mask = (tmp >> 8) & 0xFF;
+
+ cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
+ }
+
+ /* If interleaved over more than 1 socket. */
+ if (intlv_num_sockets) {
+ socket_id_shift = (tmp >> 28) & 0xF;
+ socket_id_mask = (tmp >> 16) & 0xFF;
+
+ cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
+ }
+
+ /*
+ * The pre-interleaved address consists of XXXXXXIIIYYYYY
+ * where III is the ID for this CS, and XXXXXXYYYYY are the
+ * address bits from the post-interleaved address.
+ * "num_intlv_bits" has been calculated to tell us how many "I"
+ * bits there are. "intlv_addr_bit" tells us how many "Y" bits
+ * there are (where "I" starts).
+ */
+ temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
+ temp_addr_i = (cs_id << intlv_addr_bit);
+ temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
+ ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
+ }
+
+ /* Add dram base address */
+ ret_addr += dram_base_addr;
+
+ /* If legacy MMIO hole enabled */
+ if (lgcy_mmio_hole_en) {
+ if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
+ goto out_err;
+
+ dram_hole_base = tmp & GENMASK(31, 24);
+ if (ret_addr >= dram_hole_base)
+ ret_addr += (BIT_ULL(32) - dram_hole_base);
+ }
+
+ if (hash_enabled) {
+ /* Save some parentheses and grab ls-bit at the end. */
+ hashed_bit = (ret_addr >> 12) ^
+ (ret_addr >> 18) ^
+ (ret_addr >> 21) ^
+ (ret_addr >> 30) ^
+ cs_id;
+
+ hashed_bit &= BIT(0);
+
+ if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
+ ret_addr ^= BIT(intlv_addr_bit);
+ }
+
+ /* Is calculated system address is above DRAM limit address? */
+ if (ret_addr > dram_limit_addr)
+ goto out_err;
+
+ *sys_addr = ret_addr;
+ return 0;
+
+out_err:
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);
+
static void
__log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
{
@@ -645,6 +866,7 @@ static void amd_threshold_interrupt(void)
{
u32 low = 0, high = 0, address = 0;
unsigned int bank, block, cpu = smp_processor_id();
+ struct thresh_restart tr;
/* assume first bank caused it */
for (bank = 0; bank < mca_cfg.banks; ++bank) {
@@ -681,6 +903,11 @@ static void amd_threshold_interrupt(void)
log:
__log_error(bank, false, true, ((u64)high << 32) | low);
+
+ /* Reset threshold block after logging error. */
+ memset(&tr, 0, sizeof(tr));
+ tr.b = &per_cpu(threshold_banks, cpu)[bank]->blocks[block];
+ threshold_restart_bank(&tr);
}
/*
@@ -826,10 +1053,10 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
return th_names[bank];
}
- if (!smca_banks[bank].type)
+ if (!smca_banks[bank].hwid)
return NULL;
- bank_type = smca_banks[bank].type->bank_type;
+ bank_type = smca_banks[bank].hwid->bank_type;
if (b && bank_type == SMCA_UMC) {
if (b->block < ARRAY_SIZE(smca_umc_block_names))
@@ -838,8 +1065,8 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
}
snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
- "%s_%x", smca_bank_names[bank_type].name,
- smca_banks[bank].type_instance);
+ "%s_%x", smca_get_name(bank_type),
+ smca_banks[bank].id);
return buf_mcatype;
}
@@ -1010,31 +1237,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
return err;
}
-/* create dir/files for all valid threshold banks */
-static int threshold_create_device(unsigned int cpu)
-{
- unsigned int bank;
- struct threshold_bank **bp;
- int err = 0;
-
- bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
- GFP_KERNEL);
- if (!bp)
- return -ENOMEM;
-
- per_cpu(threshold_banks, cpu) = bp;
-
- for (bank = 0; bank < mca_cfg.banks; ++bank) {
- if (!(per_cpu(bank_map, cpu) & (1 << bank)))
- continue;
- err = threshold_create_bank(cpu, bank);
- if (err)
- return err;
- }
-
- return err;
-}
-
static void deallocate_threshold_block(unsigned int cpu,
unsigned int bank)
{
@@ -1102,48 +1304,71 @@ free_out:
per_cpu(threshold_banks, cpu)[bank] = NULL;
}
-static void threshold_remove_device(unsigned int cpu)
+int mce_threshold_remove_device(unsigned int cpu)
{
unsigned int bank;
+ if (!thresholding_en)
+ return 0;
+
for (bank = 0; bank < mca_cfg.banks; ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue;
threshold_remove_bank(cpu, bank);
}
kfree(per_cpu(threshold_banks, cpu));
+ per_cpu(threshold_banks, cpu) = NULL;
+ return 0;
}
-/* get notified when a cpu comes on/off */
-static void
-amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
+/* create dir/files for all valid threshold banks */
+int mce_threshold_create_device(unsigned int cpu)
{
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- threshold_create_device(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- threshold_remove_device(cpu);
- break;
- default:
- break;
+ unsigned int bank;
+ struct threshold_bank **bp;
+ int err = 0;
+
+ if (!thresholding_en)
+ return 0;
+
+ bp = per_cpu(threshold_banks, cpu);
+ if (bp)
+ return 0;
+
+ bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
+ GFP_KERNEL);
+ if (!bp)
+ return -ENOMEM;
+
+ per_cpu(threshold_banks, cpu) = bp;
+
+ for (bank = 0; bank < mca_cfg.banks; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+ continue;
+ err = threshold_create_bank(cpu, bank);
+ if (err)
+ goto err;
}
+ return err;
+err:
+ mce_threshold_remove_device(cpu);
+ return err;
}
static __init int threshold_init_device(void)
{
unsigned lcpu = 0;
+ if (mce_threshold_vector == amd_threshold_interrupt)
+ thresholding_en = true;
+
/* to hit CPUs online before the notifier is up */
for_each_online_cpu(lcpu) {
- int err = threshold_create_device(lcpu);
+ int err = mce_threshold_create_device(lcpu);
if (err)
return err;
}
- threshold_cpu_callback = amd_64_threshold_cpu_callback;
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 1defb8ea882c..190b3e6cef4d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -11,6 +11,8 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <asm/apic.h>
+#include <asm/cpufeature.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/mce.h>
@@ -130,7 +132,7 @@ bool mce_intel_cmci_poll(void)
* Reset the counter if we've logged an error in the last poll
* during the storm.
*/
- if (machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)))
+ if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned)))
this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
else
this_cpu_dec(cmci_backoff_cnt);
@@ -342,7 +344,7 @@ void cmci_recheck(void)
return;
local_irq_save(flags);
- machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
+ machine_check_poll(0, this_cpu_ptr(&mce_banks_owned));
local_irq_restore(flags);
}
@@ -464,11 +466,46 @@ static void intel_clear_lmce(void)
wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
}
+static void intel_ppin_init(struct cpuinfo_x86 *c)
+{
+ unsigned long long val;
+
+ /*
+ * Even if testing the presence of the MSR would be enough, we don't
+ * want to risk the situation where other models reuse this MSR for
+ * other purposes.
+ */
+ switch (c->x86_model) {
+ case INTEL_FAM6_IVYBRIDGE_X:
+ case INTEL_FAM6_HASWELL_X:
+ case INTEL_FAM6_BROADWELL_XEON_D:
+ case INTEL_FAM6_BROADWELL_X:
+ case INTEL_FAM6_SKYLAKE_X:
+ if (rdmsrl_safe(MSR_PPIN_CTL, &val))
+ return;
+
+ if ((val & 3UL) == 1UL) {
+ /* PPIN available but disabled: */
+ return;
+ }
+
+ /* If PPIN is disabled, but not locked, try to enable: */
+ if (!(val & 3UL)) {
+ wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
+ rdmsrl_safe(MSR_PPIN_CTL, &val);
+ }
+
+ if ((val & 3UL) == 2UL)
+ set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
+ }
+}
+
void mce_intel_feature_init(struct cpuinfo_x86 *c)
{
intel_init_thermal(c);
intel_init_cmci();
intel_init_lmce();
+ intel_ppin_init(c);
}
void mce_intel_feature_clear(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 6b9dc4d18ccc..465aca8be009 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -26,7 +26,6 @@
#include <asm/processor.h>
#include <asm/apic.h>
-#include <asm/idle.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/trace/irq_vectors.h>
@@ -271,58 +270,32 @@ static void thermal_throttle_remove_dev(struct device *dev)
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int
-thermal_throttle_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int thermal_throttle_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- struct device *dev;
- int err = 0;
-
- dev = get_cpu_device(cpu);
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- err = thermal_throttle_add_dev(dev, cpu);
- WARN_ON(err);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- thermal_throttle_remove_dev(dev);
- break;
- }
- return notifier_from_errno(err);
+ struct device *dev = get_cpu_device(cpu);
+
+ return thermal_throttle_add_dev(dev, cpu);
}
-static struct notifier_block thermal_throttle_cpu_notifier =
+static int thermal_throttle_offline(unsigned int cpu)
{
- .notifier_call = thermal_throttle_cpu_callback,
-};
+ struct device *dev = get_cpu_device(cpu);
+
+ thermal_throttle_remove_dev(dev);
+ return 0;
+}
static __init int thermal_throttle_init_device(void)
{
- unsigned int cpu = 0;
- int err;
+ int ret;
if (!atomic_read(&therm_throt_en))
return 0;
- cpu_notifier_register_begin();
-
- /* connect live CPUs to sysfs */
- for_each_online_cpu(cpu) {
- err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
- WARN_ON(err);
- }
-
- __register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
- cpu_notifier_register_done();
-
- return 0;
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/therm:online",
+ thermal_throttle_online,
+ thermal_throttle_offline);
+ return ret < 0 ? ret : 0;
}
device_initcall(thermal_throttle_init_device);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index fcf9ae9384f4..9beb092d68a5 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -6,7 +6,6 @@
#include <asm/irq_vectors.h>
#include <asm/apic.h>
-#include <asm/idle.h>
#include <asm/mce.h>
#include <asm/trace/irq_vectors.h>
diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile
index 220b1a508513..ba12e8aa4a45 100644
--- a/arch/x86/kernel/cpu/microcode/Makefile
+++ b/arch/x86/kernel/cpu/microcode/Makefile
@@ -1,4 +1,4 @@
microcode-y := core.o
obj-$(CONFIG_MICROCODE) += microcode.o
-microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o
+microcode-$(CONFIG_MICROCODE_INTEL) += intel.o
microcode-$(CONFIG_MICROCODE_AMD) += amd.o
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 017bda12caae..6f353bdb3a25 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -5,6 +5,7 @@
* CPUs and later.
*
* Copyright (C) 2008-2011 Advanced Micro Devices Inc.
+ * 2013-2016 Borislav Petkov <bp@alien8.de>
*
* Author: Peter Oruba <peter.oruba@amd.com>
*
@@ -39,64 +40,25 @@
static struct equiv_cpu_entry *equiv_cpu_table;
-struct ucode_patch {
- struct list_head plist;
- void *data;
- u32 patch_id;
- u16 equiv_cpu;
-};
-
-static LIST_HEAD(pcache);
-
/*
* This points to the current valid container of microcode patches which we will
- * save from the initrd before jettisoning its contents.
+ * save from the initrd/builtin before jettisoning its contents.
*/
-static u8 *container;
-static size_t container_size;
-static bool ucode_builtin;
+struct container {
+ u8 *data;
+ size_t size;
+} cont;
static u32 ucode_new_rev;
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
static u16 this_equiv_id;
-static struct cpio_data ucode_cpio;
-
-static struct cpio_data __init find_ucode_in_initrd(void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- char *path;
- void *start;
- size_t size;
-
- /*
- * Microcode patch container file is prepended to the initrd in cpio
- * format. See Documentation/x86/early-microcode.txt
- */
- static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
-
-#ifdef CONFIG_X86_32
- struct boot_params *p;
-
- /*
- * On 32-bit, early load occurs before paging is turned on so we need
- * to use physical addresses.
- */
- p = (struct boot_params *)__pa_nodebug(&boot_params);
- path = (char *)__pa_nodebug(ucode_path);
- start = (void *)p->hdr.ramdisk_image;
- size = p->hdr.ramdisk_size;
-#else
- path = ucode_path;
- start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
- size = boot_params.hdr.ramdisk_size;
-#endif /* !CONFIG_X86_32 */
-
- return find_cpio_data(path, start, size, NULL);
-#else
- return (struct cpio_data){ NULL, 0, "" };
-#endif
-}
+/*
+ * Microcode patch container file is prepended to the initrd in cpio
+ * format. See Documentation/x86/early-microcode.txt
+ */
+static const char
+ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
static size_t compute_container_size(u8 *data, u32 total_size)
{
@@ -135,48 +97,48 @@ static size_t compute_container_size(u8 *data, u32 total_size)
return size;
}
+static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
+ unsigned int sig)
+{
+ int i = 0;
+
+ if (!equiv_cpu_table)
+ return 0;
+
+ while (equiv_cpu_table[i].installed_cpu != 0) {
+ if (sig == equiv_cpu_table[i].installed_cpu)
+ return equiv_cpu_table[i].equiv_cpu;
+
+ i++;
+ }
+ return 0;
+}
+
/*
- * Early load occurs before we can vmalloc(). So we look for the microcode
- * patch container file in initrd, traverse equivalent cpu table, look for a
- * matching microcode patch, and update, all in initrd memory in place.
- * When vmalloc() is available for use later -- on 64-bit during first AP load,
- * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
- * load_microcode_amd() to save equivalent cpu table and microcode patches in
- * kernel heap memory.
+ * This scans the ucode blob for the proper container as we can have multiple
+ * containers glued together.
*/
-static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
+static struct container
+find_proper_container(u8 *ucode, size_t size, u16 *ret_id)
{
+ struct container ret = { NULL, 0 };
+ u32 eax, ebx, ecx, edx;
struct equiv_cpu_entry *eq;
- size_t *cont_sz;
- u32 *header;
- u8 *data, **cont;
- u8 (*patch)[PATCH_MAX_SIZE];
- u16 eq_id = 0;
int offset, left;
- u32 rev, eax, ebx, ecx, edx;
- u32 *new_rev;
-
-#ifdef CONFIG_X86_32
- new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
- cont_sz = (size_t *)__pa_nodebug(&container_size);
- cont = (u8 **)__pa_nodebug(&container);
- patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
-#else
- new_rev = &ucode_new_rev;
- cont_sz = &container_size;
- cont = &container;
- patch = &amd_ucode_patch;
-#endif
+ u16 eq_id = 0;
+ u32 *header;
+ u8 *data;
data = ucode;
left = size;
header = (u32 *)data;
+
/* find equiv cpu table */
if (header[0] != UCODE_MAGIC ||
header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
header[2] == 0) /* size */
- return;
+ return ret;
eax = 0x00000001;
ecx = 0;
@@ -185,7 +147,7 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
while (left > 0) {
eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
- *cont = data;
+ ret.data = data;
/* Advance past the container header */
offset = header[2] + CONTAINER_HDR_SZ;
@@ -194,15 +156,15 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
eq_id = find_equiv_id(eq, eax);
if (eq_id) {
- this_equiv_id = eq_id;
- *cont_sz = compute_container_size(*cont, left + offset);
+ ret.size = compute_container_size(ret.data, left + offset);
/*
* truncate how much we need to iterate over in the
* ucode update loop below
*/
- left = *cont_sz - offset;
- break;
+ left = ret.size - offset;
+ *ret_id = eq_id;
+ return ret;
}
/*
@@ -212,6 +174,7 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
*/
while (left > 0) {
header = (u32 *)data;
+
if (header[0] == UCODE_MAGIC &&
header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
break;
@@ -226,14 +189,64 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
ucode = data;
}
- if (!eq_id) {
- *cont = NULL;
- *cont_sz = 0;
- return;
- }
+ return ret;
+}
+
+static int __apply_microcode_amd(struct microcode_amd *mc_amd)
+{
+ u32 rev, dummy;
+
+ native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
+
+ /* verify patch application was successful */
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ if (rev != mc_amd->hdr.patch_id)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Early load occurs before we can vmalloc(). So we look for the microcode
+ * patch container file in initrd, traverse equivalent cpu table, look for a
+ * matching microcode patch, and update, all in initrd memory in place.
+ * When vmalloc() is available for use later -- on 64-bit during first AP load,
+ * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
+ * load_microcode_amd() to save equivalent cpu table and microcode patches in
+ * kernel heap memory.
+ */
+static struct container
+apply_microcode_early_amd(void *ucode, size_t size, bool save_patch)
+{
+ struct container ret = { NULL, 0 };
+ u8 (*patch)[PATCH_MAX_SIZE];
+ int offset, left;
+ u32 rev, *header;
+ u8 *data;
+ u16 eq_id = 0;
+ u32 *new_rev;
+
+#ifdef CONFIG_X86_32
+ new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
+ patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
+#else
+ new_rev = &ucode_new_rev;
+ patch = &amd_ucode_patch;
+#endif
if (check_current_patch_level(&rev, true))
- return;
+ return (struct container){ NULL, 0 };
+
+ ret = find_proper_container(ucode, size, &eq_id);
+ if (!eq_id)
+ return (struct container){ NULL, 0 };
+
+ this_equiv_id = eq_id;
+ header = (u32 *)ret.data;
+
+ /* We're pointing to an equiv table, skip over it. */
+ data = ret.data + header[2] + CONTAINER_HDR_SZ;
+ left = ret.size - (header[2] + CONTAINER_HDR_SZ);
while (left > 0) {
struct microcode_amd *mc;
@@ -252,8 +265,7 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
*new_rev = rev;
if (save_patch)
- memcpy(patch, mc,
- min_t(u32, header[1], PATCH_MAX_SIZE));
+ memcpy(patch, mc, min_t(u32, header[1], PATCH_MAX_SIZE));
}
}
@@ -261,10 +273,10 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
data += offset;
left -= offset;
}
+ return ret;
}
-static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
- unsigned int family)
+static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
{
#ifdef CONFIG_X86_64
char fw_name[36] = "amd-ucode/microcode_amd.bin";
@@ -281,47 +293,45 @@ static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
void __init load_ucode_amd_bsp(unsigned int family)
{
+ struct ucode_cpu_info *uci;
struct cpio_data cp;
- bool *builtin;
- void **data;
- size_t *size;
+ const char *path;
+ bool use_pa;
-#ifdef CONFIG_X86_32
- data = (void **)__pa_nodebug(&ucode_cpio.data);
- size = (size_t *)__pa_nodebug(&ucode_cpio.size);
- builtin = (bool *)__pa_nodebug(&ucode_builtin);
-#else
- data = &ucode_cpio.data;
- size = &ucode_cpio.size;
- builtin = &ucode_builtin;
-#endif
+ if (IS_ENABLED(CONFIG_X86_32)) {
+ uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
+ path = (const char *)__pa_nodebug(ucode_path);
+ use_pa = true;
+ } else {
+ uci = ucode_cpu_info;
+ path = ucode_path;
+ use_pa = false;
+ }
- *builtin = load_builtin_amd_microcode(&cp, family);
- if (!*builtin)
- cp = find_ucode_in_initrd();
+ if (!get_builtin_microcode(&cp, family))
+ cp = find_microcode_in_initrd(path, use_pa);
if (!(cp.data && cp.size))
return;
- *data = cp.data;
- *size = cp.size;
+ /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */
+ uci->cpu_sig.sig = cpuid_eax(1);
- apply_ucode_in_initrd(cp.data, cp.size, true);
+ apply_microcode_early_amd(cp.data, cp.size, true);
}
#ifdef CONFIG_X86_32
/*
* On 32-bit, since AP's early load occurs before paging is turned on, we
- * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
- * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
- * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
+ * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
+ * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
+ * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
* which is used upon resume from suspend.
*/
-void load_ucode_amd_ap(void)
+void load_ucode_amd_ap(unsigned int family)
{
struct microcode_amd *mc;
- size_t *usize;
- void **ucode;
+ struct cpio_data cp;
mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
@@ -329,60 +339,63 @@ void load_ucode_amd_ap(void)
return;
}
- ucode = (void *)__pa_nodebug(&container);
- usize = (size_t *)__pa_nodebug(&container_size);
+ if (!get_builtin_microcode(&cp, family))
+ cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
- if (!*ucode || !*usize)
+ if (!(cp.data && cp.size))
return;
- apply_ucode_in_initrd(*ucode, *usize, false);
-}
-
-static void __init collect_cpu_sig_on_bsp(void *arg)
-{
- unsigned int cpu = smp_processor_id();
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-
- uci->cpu_sig.sig = cpuid_eax(0x00000001);
-}
-
-static void __init get_bsp_sig(void)
-{
- unsigned int bsp = boot_cpu_data.cpu_index;
- struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
-
- if (!uci->cpu_sig.sig)
- smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+ /*
+ * This would set amd_ucode_patch above so that the following APs can
+ * use it directly instead of going down this path again.
+ */
+ apply_microcode_early_amd(cp.data, cp.size, true);
}
#else
-void load_ucode_amd_ap(void)
+void load_ucode_amd_ap(unsigned int family)
{
- unsigned int cpu = smp_processor_id();
struct equiv_cpu_entry *eq;
struct microcode_amd *mc;
- u8 *cont = container;
u32 rev, eax;
u16 eq_id;
- /* Exit if called on the BSP. */
- if (!cpu)
+ /* 64-bit runs with paging enabled, thus early==false. */
+ if (check_current_patch_level(&rev, false))
return;
- if (!container)
- return;
+ /* First AP hasn't cached it yet, go through the blob. */
+ if (!cont.data) {
+ struct cpio_data cp = { NULL, 0, "" };
- /*
- * 64-bit runs with paging enabled, thus early==false.
- */
- if (check_current_patch_level(&rev, false))
- return;
+ if (cont.size == -1)
+ return;
- /* Add CONFIG_RANDOMIZE_MEMORY offset. */
- if (!ucode_builtin)
- cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+reget:
+ if (!get_builtin_microcode(&cp, family)) {
+#ifdef CONFIG_BLK_DEV_INITRD
+ cp = find_cpio_data(ucode_path, (void *)initrd_start,
+ initrd_end - initrd_start, NULL);
+#endif
+ if (!(cp.data && cp.size)) {
+ /*
+ * Mark it so that other APs do not scan again
+ * for no real reason and slow down boot
+ * needlessly.
+ */
+ cont.size = -1;
+ return;
+ }
+ }
+
+ cont = apply_microcode_early_amd(cp.data, cp.size, false);
+ if (!(cont.data && cont.size)) {
+ cont.size = -1;
+ return;
+ }
+ }
eax = cpuid_eax(0x00000001);
- eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
+ eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
eq_id = find_equiv_id(eq, eax);
if (!eq_id)
@@ -397,61 +410,50 @@ void load_ucode_amd_ap(void)
}
} else {
- if (!ucode_cpio.data)
- return;
/*
* AP has a different equivalence ID than BSP, looks like
* mixed-steppings silicon so go through the ucode blob anew.
*/
- apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
+ goto reget;
}
}
-#endif
+#endif /* CONFIG_X86_32 */
+
+static enum ucode_state
+load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
-int __init save_microcode_in_initrd_amd(void)
+int __init save_microcode_in_initrd_amd(unsigned int fam)
{
- unsigned long cont;
- int retval = 0;
enum ucode_state ret;
- u8 *cont_va;
- u32 eax;
+ int retval = 0;
+ u16 eq_id;
- if (!container)
- return -EINVAL;
+ if (!cont.data) {
+ if (IS_ENABLED(CONFIG_X86_32) && (cont.size != -1)) {
+ struct cpio_data cp = { NULL, 0, "" };
-#ifdef CONFIG_X86_32
- get_bsp_sig();
- cont = (unsigned long)container;
- cont_va = __va(container);
-#else
- /*
- * We need the physical address of the container for both bitness since
- * boot_params.hdr.ramdisk_image is a physical address.
- */
- cont = __pa_nodebug(container);
- cont_va = container;
+#ifdef CONFIG_BLK_DEV_INITRD
+ cp = find_cpio_data(ucode_path, (void *)initrd_start,
+ initrd_end - initrd_start, NULL);
#endif
- /*
- * Take into account the fact that the ramdisk might get relocated and
- * therefore we need to recompute the container's position in virtual
- * memory space.
- */
- if (relocated_ramdisk)
- container = (u8 *)(__va(relocated_ramdisk) +
- (cont - boot_params.hdr.ramdisk_image));
- else
- container = cont_va;
+ if (!(cp.data && cp.size)) {
+ cont.size = -1;
+ return -EINVAL;
+ }
- /* Add CONFIG_RANDOMIZE_MEMORY offset. */
- if (!ucode_builtin)
- container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+ cont = find_proper_container(cp.data, cp.size, &eq_id);
+ if (!eq_id) {
+ cont.size = -1;
+ return -EINVAL;
+ }
- eax = cpuid_eax(0x00000001);
- eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+ } else
+ return -EINVAL;
+ }
- ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
+ ret = load_microcode_amd(smp_processor_id(), fam, cont.data, cont.size);
if (ret != UCODE_OK)
retval = -EINVAL;
@@ -459,8 +461,8 @@ int __init save_microcode_in_initrd_amd(void)
* This will be freed any msec now, stash patches for the current
* family and switch to patch cache for cpu hotplug, etc later.
*/
- container = NULL;
- container_size = 0;
+ cont.data = NULL;
+ cont.size = 0;
return retval;
}
@@ -478,8 +480,10 @@ void reload_ucode_amd(void)
return;
mc = (struct microcode_amd *)amd_ucode_patch;
+ if (!mc)
+ return;
- if (mc && rev < mc->hdr.patch_id) {
+ if (rev < mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) {
ucode_new_rev = mc->hdr.patch_id;
pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
@@ -513,7 +517,7 @@ static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
{
struct ucode_patch *p;
- list_for_each_entry(p, &pcache, plist)
+ list_for_each_entry(p, &microcode_cache, plist)
if (p->equiv_cpu == equiv_cpu)
return p;
return NULL;
@@ -523,7 +527,7 @@ static void update_cache(struct ucode_patch *new_patch)
{
struct ucode_patch *p;
- list_for_each_entry(p, &pcache, plist) {
+ list_for_each_entry(p, &microcode_cache, plist) {
if (p->equiv_cpu == new_patch->equiv_cpu) {
if (p->patch_id >= new_patch->patch_id)
/* we already have the latest patch */
@@ -536,14 +540,14 @@ static void update_cache(struct ucode_patch *new_patch)
}
}
/* no patch found, add it */
- list_add_tail(&new_patch->plist, &pcache);
+ list_add_tail(&new_patch->plist, &microcode_cache);
}
static void free_cache(void)
{
struct ucode_patch *p, *tmp;
- list_for_each_entry_safe(p, tmp, &pcache, plist) {
+ list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
__list_del(p->plist.prev, p->plist.next);
kfree(p->data);
kfree(p);
@@ -663,21 +667,7 @@ bool check_current_patch_level(u32 *rev, bool early)
return ret;
}
-int __apply_microcode_amd(struct microcode_amd *mc_amd)
-{
- u32 rev, dummy;
-
- native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
-
- /* verify patch application was successful */
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
- if (rev != mc_amd->hdr.patch_id)
- return -1;
-
- return 0;
-}
-
-int apply_microcode_amd(int cpu)
+static int apply_microcode_amd(int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_amd *mc_amd;
@@ -860,7 +850,8 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
return UCODE_OK;
}
-enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
+static enum ucode_state
+load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
{
enum ucode_state ret;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 5ce5155f0695..6996413c78c3 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
* 2006 Shaohua Li <shaohua.li@intel.com>
- * 2013-2015 Borislav Petkov <bp@alien8.de>
+ * 2013-2016 Borislav Petkov <bp@alien8.de>
*
* X86 CPU microcode early update for Linux:
*
@@ -39,12 +39,15 @@
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/cmdline.h>
+#include <asm/setup.h>
-#define MICROCODE_VERSION "2.01"
+#define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops;
static bool dis_ucode_ldr;
+LIST_HEAD(microcode_cache);
+
/*
* Synchronization.
*
@@ -167,7 +170,7 @@ void load_ucode_ap(void)
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
- load_ucode_amd_ap();
+ load_ucode_amd_ap(family);
break;
default:
break;
@@ -185,7 +188,7 @@ static int __init save_microcode_in_initrd(void)
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
- return save_microcode_in_initrd_amd();
+ return save_microcode_in_initrd_amd(c->x86);
break;
default:
break;
@@ -194,6 +197,58 @@ static int __init save_microcode_in_initrd(void)
return -EINVAL;
}
+struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long start = 0;
+ size_t size;
+
+#ifdef CONFIG_X86_32
+ struct boot_params *params;
+
+ if (use_pa)
+ params = (struct boot_params *)__pa_nodebug(&boot_params);
+ else
+ params = &boot_params;
+
+ size = params->hdr.ramdisk_size;
+
+ /*
+ * Set start only if we have an initrd image. We cannot use initrd_start
+ * because it is not set that early yet.
+ */
+ if (size)
+ start = params->hdr.ramdisk_image;
+
+# else /* CONFIG_X86_64 */
+ size = (unsigned long)boot_params.ext_ramdisk_size << 32;
+ size |= boot_params.hdr.ramdisk_size;
+
+ if (size) {
+ start = (unsigned long)boot_params.ext_ramdisk_image << 32;
+ start |= boot_params.hdr.ramdisk_image;
+
+ start += PAGE_OFFSET;
+ }
+# endif
+
+ /*
+ * Did we relocate the ramdisk?
+ *
+ * So we possibly relocate the ramdisk *after* applying microcode on the
+ * BSP so we rely on use_pa (use physical addresses) - even if it is not
+ * absolutely correct - to determine whether we've done the ramdisk
+ * relocation already.
+ */
+ if (!use_pa && relocated_ramdisk)
+ start = initrd_start;
+
+ return find_cpio_data(path, (void *)start, size, NULL);
+#else /* !CONFIG_BLK_DEV_INITRD */
+ return (struct cpio_data){ NULL, 0, "" };
+#endif
+}
+
void reload_early_microcode(void)
{
int vendor, family;
@@ -453,16 +508,17 @@ static struct attribute_group mc_attr_group = {
static void microcode_fini_cpu(int cpu)
{
- microcode_ops->microcode_fini_cpu(cpu);
+ if (microcode_ops->microcode_fini_cpu)
+ microcode_ops->microcode_fini_cpu(cpu);
}
static enum ucode_state microcode_resume_cpu(int cpu)
{
- pr_debug("CPU%d updated upon resume\n", cpu);
-
if (apply_microcode_on_target(cpu))
return UCODE_ERROR;
+ pr_debug("CPU%d updated upon resume\n", cpu);
+
return UCODE_OK;
}
@@ -496,6 +552,9 @@ static enum ucode_state microcode_update_cpu(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ /* Refresh CPU microcode revision after resume. */
+ collect_cpu_info(cpu);
+
if (uci->valid)
return microcode_resume_cpu(cpu);
@@ -579,12 +638,7 @@ static int mc_cpu_down_prep(unsigned int cpu)
/* Suspend is in progress, only remove the interface */
sysfs_remove_group(&dev->kobj, &mc_attr_group);
pr_debug("CPU%d removed\n", cpu);
- /*
- * When a CPU goes offline, don't free up or invalidate the copy of
- * the microcode in kernel memory, so that we can reuse it when the
- * CPU comes back online without unnecessarily requesting the userspace
- * for it again.
- */
+
return 0;
}
@@ -649,8 +703,7 @@ int __init microcode_init(void)
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep);
- pr_info("Microcode Update Driver: v" MICROCODE_VERSION
- " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
+ pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
return 0;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index cdc0deab00c9..54d50c3694d8 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -39,125 +39,83 @@
#include <asm/setup.h>
#include <asm/msr.h>
-/*
- * Temporary microcode blobs pointers storage. We note here during early load
- * the pointers to microcode blobs we've got from whatever storage (detached
- * initrd, builtin). Later on, we put those into final storage
- * mc_saved_data.mc_saved.
- *
- * Important: those are offsets from the beginning of initrd or absolute
- * addresses within the kernel image when built-in.
- */
-static unsigned long mc_tmp_ptrs[MAX_UCODE_COUNT];
-
-static struct mc_saved_data {
- unsigned int num_saved;
- struct microcode_intel **mc_saved;
-} mc_saved_data;
+static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
-/* Microcode blobs within the initrd. 0 if builtin. */
-static struct ucode_blobs {
- unsigned long start;
- bool valid;
-} blobs;
+/* Current microcode patch used in early patching */
+struct microcode_intel *intel_ucode_patch;
-/* Go through saved patches and find the one suitable for the current CPU. */
-static enum ucode_state
-find_microcode_patch(struct microcode_intel **saved,
- unsigned int num_saved, struct ucode_cpu_info *uci)
+static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
+ unsigned int s2, unsigned int p2)
{
- struct microcode_intel *ucode_ptr, *new_mc = NULL;
- struct microcode_header_intel *mc_hdr;
- int new_rev, ret, i;
-
- new_rev = uci->cpu_sig.rev;
-
- for (i = 0; i < num_saved; i++) {
- ucode_ptr = saved[i];
- mc_hdr = (struct microcode_header_intel *)ucode_ptr;
-
- ret = has_newer_microcode(ucode_ptr,
- uci->cpu_sig.sig,
- uci->cpu_sig.pf,
- new_rev);
- if (!ret)
- continue;
-
- new_rev = mc_hdr->rev;
- new_mc = ucode_ptr;
- }
+ if (s1 != s2)
+ return false;
- if (!new_mc)
- return UCODE_NFOUND;
+ /* Processor flags are either both 0 ... */
+ if (!p1 && !p2)
+ return true;
- uci->mc = (struct microcode_intel *)new_mc;
- return UCODE_OK;
+ /* ... or they intersect. */
+ return p1 & p2;
}
-static inline void
-copy_ptrs(struct microcode_intel **mc_saved, unsigned long *mc_ptrs,
- unsigned long off, int num_saved)
+/*
+ * Returns 1 if update has been found, 0 otherwise.
+ */
+static int find_matching_signature(void *mc, unsigned int csig, int cpf)
{
+ struct microcode_header_intel *mc_hdr = mc;
+ struct extended_sigtable *ext_hdr;
+ struct extended_signature *ext_sig;
int i;
- for (i = 0; i < num_saved; i++)
- mc_saved[i] = (struct microcode_intel *)(mc_ptrs[i] + off);
-}
-
-#ifdef CONFIG_X86_32
-static void
-microcode_phys(struct microcode_intel **mc_saved_tmp, struct mc_saved_data *mcs)
-{
- int i;
- struct microcode_intel ***mc_saved;
+ if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
+ return 1;
- mc_saved = (struct microcode_intel ***)__pa_nodebug(&mcs->mc_saved);
+ /* Look for ext. headers: */
+ if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
+ return 0;
- for (i = 0; i < mcs->num_saved; i++) {
- struct microcode_intel *p;
+ ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
+ ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
- p = *(struct microcode_intel **)__pa_nodebug(mcs->mc_saved + i);
- mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
+ for (i = 0; i < ext_hdr->count; i++) {
+ if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
+ return 1;
+ ext_sig++;
}
+ return 0;
}
-#endif
-static enum ucode_state
-load_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- unsigned long offset, struct ucode_cpu_info *uci)
+/*
+ * Returns 1 if update has been found, 0 otherwise.
+ */
+static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
{
- struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
- unsigned int count = mcs->num_saved;
+ struct microcode_header_intel *mc_hdr = mc;
- if (!mcs->mc_saved) {
- copy_ptrs(mc_saved_tmp, mc_ptrs, offset, count);
+ if (mc_hdr->rev <= new_rev)
+ return 0;
- return find_microcode_patch(mc_saved_tmp, count, uci);
- } else {
-#ifdef CONFIG_X86_32
- microcode_phys(mc_saved_tmp, mcs);
- return find_microcode_patch(mc_saved_tmp, count, uci);
-#else
- return find_microcode_patch(mcs->mc_saved, count, uci);
-#endif
- }
+ return find_matching_signature(mc, csig, cpf);
}
/*
* Given CPU signature and a microcode patch, this function finds if the
* microcode patch has matching family and model with the CPU.
+ *
+ * %true - if there's a match
+ * %false - otherwise
*/
-static enum ucode_state
-matching_model_microcode(struct microcode_header_intel *mc_header,
- unsigned long sig)
+static bool microcode_matches(struct microcode_header_intel *mc_header,
+ unsigned long sig)
{
- unsigned int fam, model;
- unsigned int fam_ucode, model_ucode;
- struct extended_sigtable *ext_header;
unsigned long total_size = get_totalsize(mc_header);
unsigned long data_size = get_datasize(mc_header);
- int ext_sigcount, i;
+ struct extended_sigtable *ext_header;
+ unsigned int fam_ucode, model_ucode;
struct extended_signature *ext_sig;
+ unsigned int fam, model;
+ int ext_sigcount, i;
fam = x86_family(sig);
model = x86_model(sig);
@@ -166,11 +124,11 @@ matching_model_microcode(struct microcode_header_intel *mc_header,
model_ucode = x86_model(mc_header->sig);
if (fam == fam_ucode && model == model_ucode)
- return UCODE_OK;
+ return true;
/* Look for ext. headers: */
if (total_size <= data_size + MC_HEADER_SIZE)
- return UCODE_NFOUND;
+ return false;
ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
@@ -181,192 +139,242 @@ matching_model_microcode(struct microcode_header_intel *mc_header,
model_ucode = x86_model(ext_sig->sig);
if (fam == fam_ucode && model == model_ucode)
- return UCODE_OK;
+ return true;
ext_sig++;
}
- return UCODE_NFOUND;
+ return false;
}
-static int
-save_microcode(struct mc_saved_data *mcs,
- struct microcode_intel **mc_saved_src,
- unsigned int num_saved)
+static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
{
- int i, j;
- struct microcode_intel **saved_ptr;
- int ret;
+ struct ucode_patch *p;
- if (!num_saved)
- return -EINVAL;
+ p = kzalloc(size, GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
- /*
- * Copy new microcode data.
- */
- saved_ptr = kcalloc(num_saved, sizeof(struct microcode_intel *), GFP_KERNEL);
- if (!saved_ptr)
- return -ENOMEM;
-
- for (i = 0; i < num_saved; i++) {
- struct microcode_header_intel *mc_hdr;
- struct microcode_intel *mc;
- unsigned long size;
-
- if (!mc_saved_src[i]) {
- ret = -EINVAL;
- goto err;
- }
+ p->data = kmemdup(data, size, GFP_KERNEL);
+ if (!p->data) {
+ kfree(p);
+ return ERR_PTR(-ENOMEM);
+ }
- mc = mc_saved_src[i];
- mc_hdr = &mc->hdr;
- size = get_totalsize(mc_hdr);
+ return p;
+}
- saved_ptr[i] = kmemdup(mc, size, GFP_KERNEL);
- if (!saved_ptr[i]) {
- ret = -ENOMEM;
- goto err;
+static void save_microcode_patch(void *data, unsigned int size)
+{
+ struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
+ struct ucode_patch *iter, *tmp, *p;
+ bool prev_found = false;
+ unsigned int sig, pf;
+
+ mc_hdr = (struct microcode_header_intel *)data;
+
+ list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
+ mc_saved_hdr = (struct microcode_header_intel *)iter->data;
+ sig = mc_saved_hdr->sig;
+ pf = mc_saved_hdr->pf;
+
+ if (find_matching_signature(data, sig, pf)) {
+ prev_found = true;
+
+ if (mc_hdr->rev <= mc_saved_hdr->rev)
+ continue;
+
+ p = __alloc_microcode_buf(data, size);
+ if (IS_ERR(p))
+ pr_err("Error allocating buffer %p\n", data);
+ else
+ list_replace(&iter->plist, &p->plist);
}
}
/*
- * Point to newly saved microcode.
+ * There weren't any previous patches found in the list cache; save the
+ * newly found.
*/
- mcs->mc_saved = saved_ptr;
- mcs->num_saved = num_saved;
-
- return 0;
-
-err:
- for (j = 0; j <= i; j++)
- kfree(saved_ptr[j]);
- kfree(saved_ptr);
-
- return ret;
+ if (!prev_found) {
+ p = __alloc_microcode_buf(data, size);
+ if (IS_ERR(p))
+ pr_err("Error allocating buffer for %p\n", data);
+ else
+ list_add_tail(&p->plist, &microcode_cache);
+ }
}
-/*
- * A microcode patch in ucode_ptr is saved into mc_saved
- * - if it has matching signature and newer revision compared to an existing
- * patch mc_saved.
- * - or if it is a newly discovered microcode patch.
- *
- * The microcode patch should have matching model with CPU.
- *
- * Returns: The updated number @num_saved of saved microcode patches.
- */
-static unsigned int _save_mc(struct microcode_intel **mc_saved,
- u8 *ucode_ptr, unsigned int num_saved)
+static int microcode_sanity_check(void *mc, int print_err)
{
- struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
- unsigned int sig, pf;
- int found = 0, i;
+ unsigned long total_size, data_size, ext_table_size;
+ struct microcode_header_intel *mc_header = mc;
+ struct extended_sigtable *ext_header = NULL;
+ u32 sum, orig_sum, ext_sigcount = 0, i;
+ struct extended_signature *ext_sig;
- mc_hdr = (struct microcode_header_intel *)ucode_ptr;
+ total_size = get_totalsize(mc_header);
+ data_size = get_datasize(mc_header);
- for (i = 0; i < num_saved; i++) {
- mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
- sig = mc_saved_hdr->sig;
- pf = mc_saved_hdr->pf;
+ if (data_size + MC_HEADER_SIZE > total_size) {
+ if (print_err)
+ pr_err("Error: bad microcode data file size.\n");
+ return -EINVAL;
+ }
- if (!find_matching_signature(ucode_ptr, sig, pf))
- continue;
+ if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
+ if (print_err)
+ pr_err("Error: invalid/unknown microcode update format.\n");
+ return -EINVAL;
+ }
- found = 1;
+ ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
+ if (ext_table_size) {
+ u32 ext_table_sum = 0;
+ u32 *ext_tablep;
- if (mc_hdr->rev <= mc_saved_hdr->rev)
- continue;
+ if ((ext_table_size < EXT_HEADER_SIZE)
+ || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
+ if (print_err)
+ pr_err("Error: truncated extended signature table.\n");
+ return -EINVAL;
+ }
+
+ ext_header = mc + MC_HEADER_SIZE + data_size;
+ if (ext_table_size != exttable_size(ext_header)) {
+ if (print_err)
+ pr_err("Error: extended signature table size mismatch.\n");
+ return -EFAULT;
+ }
+
+ ext_sigcount = ext_header->count;
/*
- * Found an older ucode saved earlier. Replace it with
- * this newer one.
+ * Check extended table checksum: the sum of all dwords that
+ * comprise a valid table must be 0.
*/
- mc_saved[i] = (struct microcode_intel *)ucode_ptr;
- break;
+ ext_tablep = (u32 *)ext_header;
+
+ i = ext_table_size / sizeof(u32);
+ while (i--)
+ ext_table_sum += ext_tablep[i];
+
+ if (ext_table_sum) {
+ if (print_err)
+ pr_warn("Bad extended signature table checksum, aborting.\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Calculate the checksum of update data and header. The checksum of
+ * valid update data and header including the extended signature table
+ * must be 0.
+ */
+ orig_sum = 0;
+ i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
+ while (i--)
+ orig_sum += ((u32 *)mc)[i];
+
+ if (orig_sum) {
+ if (print_err)
+ pr_err("Bad microcode data checksum, aborting.\n");
+ return -EINVAL;
}
- /* Newly detected microcode, save it to memory. */
- if (i >= num_saved && !found)
- mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
+ if (!ext_table_size)
+ return 0;
- return num_saved;
+ /*
+ * Check extended signature checksum: 0 => valid.
+ */
+ for (i = 0; i < ext_sigcount; i++) {
+ ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
+ EXT_SIGNATURE_SIZE * i;
+
+ sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
+ (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
+ if (sum) {
+ if (print_err)
+ pr_err("Bad extended signature checksum, aborting.\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
}
/*
* Get microcode matching with BSP's model. Only CPUs with the same model as
* BSP can stay in the platform.
*/
-static enum ucode_state __init
-get_matching_model_microcode(unsigned long start, void *data, size_t size,
- struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- struct ucode_cpu_info *uci)
+static struct microcode_intel *
+scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
{
- struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
struct microcode_header_intel *mc_header;
- unsigned int num_saved = mcs->num_saved;
- enum ucode_state state = UCODE_OK;
- unsigned int leftover = size;
- u8 *ucode_ptr = data;
+ struct microcode_intel *patch = NULL;
unsigned int mc_size;
- int i;
-
- while (leftover && num_saved < ARRAY_SIZE(mc_saved_tmp)) {
- if (leftover < sizeof(mc_header))
+ while (size) {
+ if (size < sizeof(struct microcode_header_intel))
break;
- mc_header = (struct microcode_header_intel *)ucode_ptr;
+ mc_header = (struct microcode_header_intel *)data;
mc_size = get_totalsize(mc_header);
- if (!mc_size || mc_size > leftover ||
- microcode_sanity_check(ucode_ptr, 0) < 0)
+ if (!mc_size ||
+ mc_size > size ||
+ microcode_sanity_check(data, 0) < 0)
break;
- leftover -= mc_size;
+ size -= mc_size;
- /*
- * Since APs with same family and model as the BSP may boot in
- * the platform, we need to find and save microcode patches
- * with the same family and model as the BSP.
- */
- if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != UCODE_OK) {
- ucode_ptr += mc_size;
+ if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
+ data += mc_size;
continue;
}
- num_saved = _save_mc(mc_saved_tmp, ucode_ptr, num_saved);
+ if (save) {
+ save_microcode_patch(data, mc_size);
+ goto next;
+ }
- ucode_ptr += mc_size;
- }
- if (leftover) {
- state = UCODE_ERROR;
- return state;
- }
+ if (!patch) {
+ if (!has_newer_microcode(data,
+ uci->cpu_sig.sig,
+ uci->cpu_sig.pf,
+ uci->cpu_sig.rev))
+ goto next;
- if (!num_saved) {
- state = UCODE_NFOUND;
- return state;
- }
+ } else {
+ struct microcode_header_intel *phdr = &patch->hdr;
- for (i = 0; i < num_saved; i++)
- mc_ptrs[i] = (unsigned long)mc_saved_tmp[i] - start;
+ if (!has_newer_microcode(data,
+ phdr->sig,
+ phdr->pf,
+ phdr->rev))
+ goto next;
+ }
+
+ /* We have a newer patch, save it. */
+ patch = data;
- mcs->num_saved = num_saved;
+next:
+ data += mc_size;
+ }
- return state;
+ if (size)
+ return NULL;
+
+ return patch;
}
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
unsigned int family, model;
- struct cpu_signature csig;
+ struct cpu_signature csig = { 0 };
unsigned int eax, ebx, ecx, edx;
- csig.sig = 0;
- csig.pf = 0;
- csig.rev = 0;
-
memset(uci, 0, sizeof(*uci));
eax = 0x00000001;
@@ -374,8 +382,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
native_cpuid(&eax, &ebx, &ecx, &edx);
csig.sig = eax;
- family = x86_family(csig.sig);
- model = x86_model(csig.sig);
+ family = x86_family(eax);
+ model = x86_model(eax);
if ((model >= 5) || (family > 6)) {
/* get processor flags from MSR 0x17 */
@@ -401,40 +409,41 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
static void show_saved_mc(void)
{
#ifdef DEBUG
- int i, j;
+ int i = 0, j;
unsigned int sig, pf, rev, total_size, data_size, date;
struct ucode_cpu_info uci;
+ struct ucode_patch *p;
- if (!mc_saved_data.num_saved) {
+ if (list_empty(&microcode_cache)) {
pr_debug("no microcode data saved.\n");
return;
}
- pr_debug("Total microcode saved: %d\n", mc_saved_data.num_saved);
collect_cpu_info_early(&uci);
- sig = uci.cpu_sig.sig;
- pf = uci.cpu_sig.pf;
- rev = uci.cpu_sig.rev;
+ sig = uci.cpu_sig.sig;
+ pf = uci.cpu_sig.pf;
+ rev = uci.cpu_sig.rev;
pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
- for (i = 0; i < mc_saved_data.num_saved; i++) {
+ list_for_each_entry(p, &microcode_cache, plist) {
struct microcode_header_intel *mc_saved_header;
struct extended_sigtable *ext_header;
- int ext_sigcount;
struct extended_signature *ext_sig;
+ int ext_sigcount;
+
+ mc_saved_header = (struct microcode_header_intel *)p->data;
- mc_saved_header = (struct microcode_header_intel *)
- mc_saved_data.mc_saved[i];
- sig = mc_saved_header->sig;
- pf = mc_saved_header->pf;
- rev = mc_saved_header->rev;
- total_size = get_totalsize(mc_saved_header);
- data_size = get_datasize(mc_saved_header);
- date = mc_saved_header->date;
+ sig = mc_saved_header->sig;
+ pf = mc_saved_header->pf;
+ rev = mc_saved_header->rev;
+ date = mc_saved_header->date;
+
+ total_size = get_totalsize(mc_saved_header);
+ data_size = get_datasize(mc_saved_header);
pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
- i, sig, pf, rev, total_size,
+ i++, sig, pf, rev, total_size,
date & 0xffff,
date >> 24,
(date >> 16) & 0xff);
@@ -443,7 +452,7 @@ static void show_saved_mc(void)
if (total_size <= data_size + MC_HEADER_SIZE)
continue;
- ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
+ ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
ext_sigcount = ext_header->count;
ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
@@ -456,85 +465,43 @@ static void show_saved_mc(void)
ext_sig++;
}
-
}
#endif
}
/*
- * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
- * hot added or resumes.
- *
- * Please make sure this mc should be a valid microcode patch before calling
- * this function.
+ * Save this microcode patch. It will be loaded early when a CPU is
+ * hot-added or resumes.
*/
-static void save_mc_for_early(u8 *mc)
+static void save_mc_for_early(u8 *mc, unsigned int size)
{
#ifdef CONFIG_HOTPLUG_CPU
/* Synchronization during CPU hotplug. */
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
- struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
- unsigned int mc_saved_count_init;
- unsigned int num_saved;
- struct microcode_intel **mc_saved;
- int ret, i;
-
mutex_lock(&x86_cpu_microcode_mutex);
- mc_saved_count_init = mc_saved_data.num_saved;
- num_saved = mc_saved_data.num_saved;
- mc_saved = mc_saved_data.mc_saved;
-
- if (mc_saved && num_saved)
- memcpy(mc_saved_tmp, mc_saved,
- num_saved * sizeof(struct microcode_intel *));
- /*
- * Save the microcode patch mc in mc_save_tmp structure if it's a newer
- * version.
- */
- num_saved = _save_mc(mc_saved_tmp, mc, num_saved);
-
- /*
- * Save the mc_save_tmp in global mc_saved_data.
- */
- ret = save_microcode(&mc_saved_data, mc_saved_tmp, num_saved);
- if (ret) {
- pr_err("Cannot save microcode patch.\n");
- goto out;
- }
-
+ save_microcode_patch(mc, size);
show_saved_mc();
- /*
- * Free old saved microcode data.
- */
- if (mc_saved) {
- for (i = 0; i < mc_saved_count_init; i++)
- kfree(mc_saved[i]);
- kfree(mc_saved);
- }
-
-out:
mutex_unlock(&x86_cpu_microcode_mutex);
#endif
}
-static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
+static bool load_builtin_intel_microcode(struct cpio_data *cp)
{
-#ifdef CONFIG_X86_64
- unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
+ unsigned int eax = 1, ebx, ecx = 0, edx;
char name[30];
+ if (IS_ENABLED(CONFIG_X86_32))
+ return false;
+
native_cpuid(&eax, &ebx, &ecx, &edx);
sprintf(name, "intel-ucode/%02x-%02x-%02x",
x86_family(eax), x86_model(eax), x86_stepping(eax));
return get_builtin_firmware(cp, name);
-#else
- return false;
-#endif
}
/*
@@ -570,8 +537,7 @@ void show_ucode_info_early(void)
}
/*
- * At this point, we can not call printk() yet. Keep microcode patch number in
- * mc_saved_data.mc_saved and delay printing microcode info in
+ * At this point, we can not call printk() yet. Delay printing microcode info in
* show_ucode_info_early() until printk() works.
*/
static void print_ucode(struct ucode_cpu_info *uci)
@@ -648,206 +614,140 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
return 0;
}
-/*
- * This function converts microcode patch offsets previously stored in
- * mc_tmp_ptrs to pointers and stores the pointers in mc_saved_data.
- */
int __init save_microcode_in_initrd_intel(void)
{
- struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
- unsigned int count = mc_saved_data.num_saved;
- unsigned long offset = 0;
- int ret;
-
- if (!count)
- return 0;
+ struct ucode_cpu_info uci;
+ struct cpio_data cp;
/*
- * We have found a valid initrd but it might've been relocated in the
- * meantime so get its updated address.
+ * AP loading didn't find any microcode patch, no need to save anything.
*/
- if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && blobs.valid)
- offset = initrd_start;
-
- copy_ptrs(mc_saved, mc_tmp_ptrs, offset, count);
+ if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
+ return 0;
- ret = save_microcode(&mc_saved_data, mc_saved, count);
- if (ret)
- pr_err("Cannot save microcode patches from initrd.\n");
- else
- show_saved_mc();
+ if (!load_builtin_intel_microcode(&cp))
+ cp = find_microcode_in_initrd(ucode_path, false);
- return ret;
-}
+ if (!(cp.data && cp.size))
+ return 0;
-static __init enum ucode_state
-__scan_microcode_initrd(struct cpio_data *cd, struct ucode_blobs *blbp)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
- char *p = IS_ENABLED(CONFIG_X86_32) ? (char *)__pa_nodebug(ucode_name)
- : ucode_name;
-# ifdef CONFIG_X86_32
- unsigned long start = 0, size;
- struct boot_params *params;
+ collect_cpu_info_early(&uci);
- params = (struct boot_params *)__pa_nodebug(&boot_params);
- size = params->hdr.ramdisk_size;
+ scan_microcode(cp.data, cp.size, &uci, true);
- /*
- * Set start only if we have an initrd image. We cannot use initrd_start
- * because it is not set that early yet.
- */
- start = (size ? params->hdr.ramdisk_image : 0);
+ show_saved_mc();
-# else /* CONFIG_X86_64 */
- unsigned long start = 0, size;
+ return 0;
+}
- size = (u64)boot_params.ext_ramdisk_size << 32;
- size |= boot_params.hdr.ramdisk_size;
- if (size) {
- start = (u64)boot_params.ext_ramdisk_image << 32;
- start |= boot_params.hdr.ramdisk_image;
+/*
+ * @res_patch, output: a pointer to the patch we found.
+ */
+static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
+{
+ static const char *path;
+ struct cpio_data cp;
+ bool use_pa;
- start += PAGE_OFFSET;
+ if (IS_ENABLED(CONFIG_X86_32)) {
+ path = (const char *)__pa_nodebug(ucode_path);
+ use_pa = true;
+ } else {
+ path = ucode_path;
+ use_pa = false;
}
-# endif
-
- *cd = find_cpio_data(p, (void *)start, size, NULL);
- if (cd->data) {
- blbp->start = start;
- blbp->valid = true;
- return UCODE_OK;
- } else
-#endif /* CONFIG_BLK_DEV_INITRD */
- return UCODE_ERROR;
-}
+ /* try built-in microcode first */
+ if (!load_builtin_intel_microcode(&cp))
+ cp = find_microcode_in_initrd(path, use_pa);
-static __init enum ucode_state
-scan_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- struct ucode_cpu_info *uci, struct ucode_blobs *blbp)
-{
- struct cpio_data cd = { NULL, 0, "" };
- enum ucode_state ret;
+ if (!(cp.data && cp.size))
+ return NULL;
- /* try built-in microcode first */
- if (load_builtin_intel_microcode(&cd))
- /*
- * Invalidate blobs as we might've gotten an initrd too,
- * supplied by the boot loader, by mistake or simply forgotten
- * there. That's fine, we ignore it since we've found builtin
- * microcode already.
- */
- blbp->valid = false;
- else {
- ret = __scan_microcode_initrd(&cd, blbp);
- if (ret != UCODE_OK)
- return ret;
- }
+ collect_cpu_info_early(uci);
- return get_matching_model_microcode(blbp->start, cd.data, cd.size,
- mcs, mc_ptrs, uci);
+ return scan_microcode(cp.data, cp.size, uci, false);
}
-static void __init
-_load_ucode_intel_bsp(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- struct ucode_blobs *blbp)
+void __init load_ucode_intel_bsp(void)
{
+ struct microcode_intel *patch;
struct ucode_cpu_info uci;
- enum ucode_state ret;
-
- collect_cpu_info_early(&uci);
- ret = scan_microcode(mcs, mc_ptrs, &uci, blbp);
- if (ret != UCODE_OK)
+ patch = __load_ucode_intel(&uci);
+ if (!patch)
return;
- ret = load_microcode(mcs, mc_ptrs, blbp->start, &uci);
- if (ret != UCODE_OK)
- return;
+ uci.mc = patch;
apply_microcode_early(&uci, true);
}
-void __init load_ucode_intel_bsp(void)
+void load_ucode_intel_ap(void)
{
- struct ucode_blobs *blobs_p;
- struct mc_saved_data *mcs;
- unsigned long *ptrs;
+ struct microcode_intel *patch, **iup;
+ struct ucode_cpu_info uci;
-#ifdef CONFIG_X86_32
- mcs = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
- ptrs = (unsigned long *)__pa_nodebug(&mc_tmp_ptrs);
- blobs_p = (struct ucode_blobs *)__pa_nodebug(&blobs);
-#else
- mcs = &mc_saved_data;
- ptrs = mc_tmp_ptrs;
- blobs_p = &blobs;
-#endif
+ if (IS_ENABLED(CONFIG_X86_32))
+ iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
+ else
+ iup = &intel_ucode_patch;
+
+reget:
+ if (!*iup) {
+ patch = __load_ucode_intel(&uci);
+ if (!patch)
+ return;
+
+ *iup = patch;
+ }
- _load_ucode_intel_bsp(mcs, ptrs, blobs_p);
+ uci.mc = *iup;
+
+ if (apply_microcode_early(&uci, true)) {
+ /* Mixed-silicon system? Try to refetch the proper patch: */
+ *iup = NULL;
+
+ goto reget;
+ }
}
-void load_ucode_intel_ap(void)
+static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
{
- struct ucode_blobs *blobs_p;
- unsigned long *ptrs, start = 0;
- struct mc_saved_data *mcs;
- struct ucode_cpu_info uci;
- enum ucode_state ret;
+ struct microcode_header_intel *phdr;
+ struct ucode_patch *iter, *tmp;
-#ifdef CONFIG_X86_32
- mcs = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
- ptrs = (unsigned long *)__pa_nodebug(mc_tmp_ptrs);
- blobs_p = (struct ucode_blobs *)__pa_nodebug(&blobs);
-#else
- mcs = &mc_saved_data;
- ptrs = mc_tmp_ptrs;
- blobs_p = &blobs;
-#endif
-
- /*
- * If there is no valid ucode previously saved in memory, no need to
- * update ucode on this AP.
- */
- if (!mcs->num_saved)
- return;
+ list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
- if (blobs_p->valid) {
- start = blobs_p->start;
+ phdr = (struct microcode_header_intel *)iter->data;
- /*
- * Pay attention to CONFIG_RANDOMIZE_MEMORY=y as it shuffles
- * physmem mapping too and there we have the initrd.
- */
- start += PAGE_OFFSET - __PAGE_OFFSET_BASE;
- }
+ if (phdr->rev <= uci->cpu_sig.rev)
+ continue;
- collect_cpu_info_early(&uci);
- ret = load_microcode(mcs, ptrs, start, &uci);
- if (ret != UCODE_OK)
- return;
+ if (!find_matching_signature(phdr,
+ uci->cpu_sig.sig,
+ uci->cpu_sig.pf))
+ continue;
- apply_microcode_early(&uci, true);
+ return iter->data;
+ }
+ return NULL;
}
void reload_ucode_intel(void)
{
+ struct microcode_intel *p;
struct ucode_cpu_info uci;
- enum ucode_state ret;
-
- if (!mc_saved_data.num_saved)
- return;
collect_cpu_info_early(&uci);
- ret = find_microcode_patch(mc_saved_data.mc_saved,
- mc_saved_data.num_saved, &uci);
- if (ret != UCODE_OK)
+ p = find_patch(&uci);
+ if (!p)
return;
+ uci.mc = p;
+
apply_microcode_early(&uci, false);
}
@@ -879,24 +779,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
return 0;
}
-/*
- * return 0 - no update found
- * return 1 - found update
- */
-static int get_matching_mc(struct microcode_intel *mc, int cpu)
-{
- struct cpu_signature cpu_sig;
- unsigned int csig, cpf, crev;
-
- collect_cpu_info(cpu, &cpu_sig);
-
- csig = cpu_sig.sig;
- cpf = cpu_sig.pf;
- crev = cpu_sig.rev;
-
- return has_newer_microcode(mc, csig, cpf, crev);
-}
-
static int apply_microcode_intel(int cpu)
{
struct microcode_intel *mc;
@@ -911,16 +793,12 @@ static int apply_microcode_intel(int cpu)
uci = ucode_cpu_info + cpu;
mc = uci->mc;
- if (!mc)
- return 0;
-
- /*
- * Microcode on this CPU could be updated earlier. Only apply the
- * microcode patch in mc when it is newer than the one on this
- * CPU.
- */
- if (!get_matching_mc(mc, cpu))
- return 0;
+ if (!mc) {
+ /* Look for a newer patch in our cache: */
+ mc = find_patch(uci);
+ if (!mc)
+ return 0;
+ }
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
@@ -962,7 +840,6 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover = size;
- enum ucode_state state = UCODE_OK;
unsigned int curr_mc_size = 0;
unsigned int csig, cpf;
@@ -1015,14 +892,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
if (leftover) {
vfree(new_mc);
- state = UCODE_ERROR;
- goto out;
+ return UCODE_ERROR;
}
- if (!new_mc) {
- state = UCODE_NFOUND;
- goto out;
- }
+ if (!new_mc)
+ return UCODE_NFOUND;
vfree(uci->mc);
uci->mc = (struct microcode_intel *)new_mc;
@@ -1032,12 +906,12 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
* permanent memory. So it will be loaded early when a CPU is hot added
* or resumes.
*/
- save_mc_for_early(new_mc);
+ save_mc_for_early(new_mc, curr_mc_size);
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
-out:
- return state;
+
+ return UCODE_OK;
}
static int get_ucode_fw(void *to, const void *from, size_t n)
@@ -1081,20 +955,11 @@ request_microcode_user(int cpu, const void __user *buf, size_t size)
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}
-static void microcode_fini_cpu(int cpu)
-{
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-
- vfree(uci->mc);
- uci->mc = NULL;
-}
-
static struct microcode_ops microcode_intel_ops = {
.request_microcode_user = request_microcode_user,
.request_microcode_fw = request_microcode_fw,
.collect_cpu_info = collect_cpu_info,
.apply_microcode = apply_microcode_intel,
- .microcode_fini_cpu = microcode_fini_cpu,
};
struct microcode_ops * __init init_intel_microcode(void)
@@ -1109,4 +974,3 @@ struct microcode_ops * __init init_intel_microcode(void)
return &microcode_intel_ops;
}
-
diff --git a/arch/x86/kernel/cpu/microcode/intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c
deleted file mode 100644
index 406cb6c0d9dd..000000000000
--- a/arch/x86/kernel/cpu/microcode/intel_lib.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Intel CPU Microcode Update Driver for Linux
- *
- * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
- * H Peter Anvin" <hpa@zytor.com>
- *
- * This driver allows to upgrade microcode on Intel processors
- * belonging to IA-32 family - PentiumPro, Pentium II,
- * Pentium III, Xeon, Pentium 4, etc.
- *
- * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
- * Software Developer's Manual
- * Order Number 253668 or free download from:
- *
- * http://developer.intel.com/Assets/PDF/manual/253668.pdf
- *
- * For more information, go to http://www.urbanmyth.org/microcode
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-#include <linux/firmware.h>
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-
-#include <asm/microcode_intel.h>
-#include <asm/processor.h>
-#include <asm/msr.h>
-
-static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
- unsigned int s2, unsigned int p2)
-{
- if (s1 != s2)
- return false;
-
- /* Processor flags are either both 0 ... */
- if (!p1 && !p2)
- return true;
-
- /* ... or they intersect. */
- return p1 & p2;
-}
-
-int microcode_sanity_check(void *mc, int print_err)
-{
- unsigned long total_size, data_size, ext_table_size;
- struct microcode_header_intel *mc_header = mc;
- struct extended_sigtable *ext_header = NULL;
- u32 sum, orig_sum, ext_sigcount = 0, i;
- struct extended_signature *ext_sig;
-
- total_size = get_totalsize(mc_header);
- data_size = get_datasize(mc_header);
-
- if (data_size + MC_HEADER_SIZE > total_size) {
- if (print_err)
- pr_err("Error: bad microcode data file size.\n");
- return -EINVAL;
- }
-
- if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
- if (print_err)
- pr_err("Error: invalid/unknown microcode update format.\n");
- return -EINVAL;
- }
-
- ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
- if (ext_table_size) {
- u32 ext_table_sum = 0;
- u32 *ext_tablep;
-
- if ((ext_table_size < EXT_HEADER_SIZE)
- || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
- if (print_err)
- pr_err("Error: truncated extended signature table.\n");
- return -EINVAL;
- }
-
- ext_header = mc + MC_HEADER_SIZE + data_size;
- if (ext_table_size != exttable_size(ext_header)) {
- if (print_err)
- pr_err("Error: extended signature table size mismatch.\n");
- return -EFAULT;
- }
-
- ext_sigcount = ext_header->count;
-
- /*
- * Check extended table checksum: the sum of all dwords that
- * comprise a valid table must be 0.
- */
- ext_tablep = (u32 *)ext_header;
-
- i = ext_table_size / sizeof(u32);
- while (i--)
- ext_table_sum += ext_tablep[i];
-
- if (ext_table_sum) {
- if (print_err)
- pr_warn("Bad extended signature table checksum, aborting.\n");
- return -EINVAL;
- }
- }
-
- /*
- * Calculate the checksum of update data and header. The checksum of
- * valid update data and header including the extended signature table
- * must be 0.
- */
- orig_sum = 0;
- i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
- while (i--)
- orig_sum += ((u32 *)mc)[i];
-
- if (orig_sum) {
- if (print_err)
- pr_err("Bad microcode data checksum, aborting.\n");
- return -EINVAL;
- }
-
- if (!ext_table_size)
- return 0;
-
- /*
- * Check extended signature checksum: 0 => valid.
- */
- for (i = 0; i < ext_sigcount; i++) {
- ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
- EXT_SIGNATURE_SIZE * i;
-
- sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
- (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
- if (sum) {
- if (print_err)
- pr_err("Bad extended signature checksum, aborting.\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/*
- * Returns 1 if update has been found, 0 otherwise.
- */
-int find_matching_signature(void *mc, unsigned int csig, int cpf)
-{
- struct microcode_header_intel *mc_hdr = mc;
- struct extended_sigtable *ext_hdr;
- struct extended_signature *ext_sig;
- int i;
-
- if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
- return 1;
-
- /* Look for ext. headers: */
- if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
- return 0;
-
- ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
- ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
-
- for (i = 0; i < ext_hdr->count; i++) {
- if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
- return 1;
- ext_sig++;
- }
- return 0;
-}
-
-/*
- * Returns 1 if update has been found, 0 otherwise.
- */
-int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
-{
- struct microcode_header_intel *mc_hdr = mc;
-
- if (mc_hdr->rev <= new_rev)
- return 0;
-
- return find_matching_signature(mc, csig, cpf);
-}
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 8f44c5a50ab8..6c044543545e 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -25,7 +25,6 @@
#include <asm/hyperv.h>
#include <asm/mshyperv.h>
#include <asm/desc.h>
-#include <asm/idle.h>
#include <asm/irq_regs.h>
#include <asm/i8259.h>
#include <asm/apic.h>
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 1db8dc490b66..d1316f9c8329 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -17,11 +17,17 @@ struct cpuid_bit {
u32 sub_leaf;
};
-enum cpuid_regs {
- CR_EAX = 0,
- CR_ECX,
- CR_EDX,
- CR_EBX
+/* Please keep the leaf sorted by cpuid_bit.level for faster search. */
+static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
+ { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
+ { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
+ { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
+ { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { 0, 0, 0, 0, 0 }
};
void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
@@ -30,18 +36,6 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
u32 regs[4];
const struct cpuid_bit *cb;
- static const struct cpuid_bit cpuid_bits[] = {
- { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
- { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
- { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
- { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
- { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
- { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
- { 0, 0, 0, 0, 0 }
- };
-
for (cb = cpuid_bits; cb->feature; cb++) {
/* Verify that the level is valid */
@@ -50,10 +44,35 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
max_level > (cb->level | 0xffff))
continue;
- cpuid_count(cb->level, cb->sub_leaf, &regs[CR_EAX],
- &regs[CR_EBX], &regs[CR_ECX], &regs[CR_EDX]);
+ cpuid_count(cb->level, cb->sub_leaf, &regs[CPUID_EAX],
+ &regs[CPUID_EBX], &regs[CPUID_ECX],
+ &regs[CPUID_EDX]);
if (regs[cb->reg] & (1 << cb->bit))
set_cpu_cap(c, cb->feature);
}
}
+
+u32 get_scattered_cpuid_leaf(unsigned int level, unsigned int sub_leaf,
+ enum cpuid_regs_idx reg)
+{
+ const struct cpuid_bit *cb;
+ u32 cpuid_val = 0;
+
+ for (cb = cpuid_bits; cb->feature; cb++) {
+
+ if (level > cb->level)
+ continue;
+
+ if (level < cb->level)
+ break;
+
+ if (reg == cb->reg && sub_leaf == cb->sub_leaf) {
+ if (cpu_has(&boot_cpu_data, cb->feature))
+ cpuid_val |= BIT(cb->bit);
+ }
+ }
+
+ return cpuid_val;
+}
+EXPORT_SYMBOL_GPL(get_scattered_cpuid_leaf);
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 5130985b758b..891f4dad7b2c 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -24,11 +24,16 @@
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/clocksource.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
#include <asm/timer.h>
#include <asm/apic.h>
+#include <asm/timer.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "vmware: " fmt
#define CPUID_VMWARE_INFO_LEAF 0x40000000
#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
@@ -48,6 +53,8 @@
"2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) : \
"memory");
+static unsigned long vmware_tsc_khz __ro_after_init;
+
static inline int __vmware_platform(void)
{
uint32_t eax, ebx, ecx, edx;
@@ -57,35 +64,80 @@ static inline int __vmware_platform(void)
static unsigned long vmware_get_tsc_khz(void)
{
- uint64_t tsc_hz, lpj;
- uint32_t eax, ebx, ecx, edx;
+ return vmware_tsc_khz;
+}
- VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
+#ifdef CONFIG_PARAVIRT
+static struct cyc2ns_data vmware_cyc2ns __ro_after_init;
+static int vmw_sched_clock __initdata = 1;
- tsc_hz = eax | (((uint64_t)ebx) << 32);
- do_div(tsc_hz, 1000);
- BUG_ON(tsc_hz >> 32);
- pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
- (unsigned long) tsc_hz / 1000,
- (unsigned long) tsc_hz % 1000);
-
- if (!preset_lpj) {
- lpj = ((u64)tsc_hz * 1000);
- do_div(lpj, HZ);
- preset_lpj = lpj;
- }
+static __init int setup_vmw_sched_clock(char *s)
+{
+ vmw_sched_clock = 0;
+ return 0;
+}
+early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
+
+static unsigned long long vmware_sched_clock(void)
+{
+ unsigned long long ns;
- return tsc_hz;
+ ns = mul_u64_u32_shr(rdtsc(), vmware_cyc2ns.cyc2ns_mul,
+ vmware_cyc2ns.cyc2ns_shift);
+ ns -= vmware_cyc2ns.cyc2ns_offset;
+ return ns;
}
+static void __init vmware_sched_clock_setup(void)
+{
+ struct cyc2ns_data *d = &vmware_cyc2ns;
+ unsigned long long tsc_now = rdtsc();
+
+ clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift,
+ vmware_tsc_khz, NSEC_PER_MSEC, 0);
+ d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul,
+ d->cyc2ns_shift);
+
+ pv_time_ops.sched_clock = vmware_sched_clock;
+ pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset);
+}
+
+static void __init vmware_paravirt_ops_setup(void)
+{
+ pv_info.name = "VMware hypervisor";
+ pv_cpu_ops.io_delay = paravirt_nop;
+
+ if (vmware_tsc_khz && vmw_sched_clock)
+ vmware_sched_clock_setup();
+}
+#else
+#define vmware_paravirt_ops_setup() do {} while (0)
+#endif
+
static void __init vmware_platform_setup(void)
{
uint32_t eax, ebx, ecx, edx;
+ uint64_t lpj, tsc_khz;
VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
if (ebx != UINT_MAX) {
+ lpj = tsc_khz = eax | (((uint64_t)ebx) << 32);
+ do_div(tsc_khz, 1000);
+ WARN_ON(tsc_khz >> 32);
+ pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
+ (unsigned long) tsc_khz / 1000,
+ (unsigned long) tsc_khz % 1000);
+
+ if (!preset_lpj) {
+ do_div(lpj, HZ);
+ preset_lpj = lpj;
+ }
+
+ vmware_tsc_khz = tsc_khz;
x86_platform.calibrate_tsc = vmware_get_tsc_khz;
+ x86_platform.calibrate_cpu = vmware_get_tsc_khz;
+
#ifdef CONFIG_X86_LOCAL_APIC
/* Skip lapic calibration since we know the bus frequency. */
lapic_timer_frequency = ecx / HZ;
@@ -96,6 +148,8 @@ static void __init vmware_platform_setup(void)
pr_warn("Failed to get TSC freq from the hypervisor\n");
}
+ vmware_paravirt_ops_setup();
+
#ifdef CONFIG_X86_IO_APIC
no_timer_check = 1;
#endif
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 2836de390f95..0931a105ffe1 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -45,10 +45,7 @@
#include <asm/msr.h>
static struct class *cpuid_class;
-
-struct cpuid_regs {
- u32 eax, ebx, ecx, edx;
-};
+static enum cpuhp_state cpuhp_cpuid_state;
static void cpuid_smp_cpuid(void *cmd_block)
{
@@ -115,7 +112,7 @@ static const struct file_operations cpuid_fops = {
.open = cpuid_open,
};
-static int cpuid_device_create(int cpu)
+static int cpuid_device_create(unsigned int cpu)
{
struct device *dev;
@@ -124,35 +121,12 @@ static int cpuid_device_create(int cpu)
return PTR_ERR_OR_ZERO(dev);
}
-static void cpuid_device_destroy(int cpu)
+static int cpuid_device_destroy(unsigned int cpu)
{
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+ return 0;
}
-static int cpuid_class_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int err = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- err = cpuid_device_create(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- cpuid_device_destroy(cpu);
- break;
- }
- return notifier_from_errno(err);
-}
-
-static struct notifier_block cpuid_class_cpu_notifier =
-{
- .notifier_call = cpuid_class_cpu_callback,
-};
-
static char *cpuid_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
@@ -160,15 +134,13 @@ static char *cpuid_devnode(struct device *dev, umode_t *mode)
static int __init cpuid_init(void)
{
- int i, err = 0;
- i = 0;
+ int err;
if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS,
"cpu/cpuid", &cpuid_fops)) {
printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
CPUID_MAJOR);
- err = -EBUSY;
- goto out;
+ return -EBUSY;
}
cpuid_class = class_create(THIS_MODULE, "cpuid");
if (IS_ERR(cpuid_class)) {
@@ -177,45 +149,28 @@ static int __init cpuid_init(void)
}
cpuid_class->devnode = cpuid_devnode;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- err = cpuid_device_create(i);
- if (err != 0)
- goto out_class;
- }
- __register_hotcpu_notifier(&cpuid_class_cpu_notifier);
- cpu_notifier_register_done();
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/cpuid:online",
+ cpuid_device_create, cpuid_device_destroy);
+ if (err < 0)
+ goto out_class;
- err = 0;
- goto out;
+ cpuhp_cpuid_state = err;
+ return 0;
out_class:
- i = 0;
- for_each_online_cpu(i) {
- cpuid_device_destroy(i);
- }
- cpu_notifier_register_done();
class_destroy(cpuid_class);
out_chrdev:
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
-out:
return err;
}
+module_init(cpuid_init);
static void __exit cpuid_exit(void)
{
- int cpu = 0;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- cpuid_device_destroy(cpu);
+ cpuhp_remove_state(cpuhp_cpuid_state);
class_destroy(cpuid_class);
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
- __unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
- cpu_notifier_register_done();
}
-
-module_init(cpuid_init);
module_exit(cpuid_exit);
MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 9b7cf5c28f5f..0cfd01d2754c 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -22,7 +22,6 @@
int panic_on_unrecovered_nmi;
int panic_on_io_nmi;
unsigned int code_bytes = 64;
-int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
static int die_counter;
bool in_task_stack(unsigned long *stack, struct task_struct *task,
@@ -46,14 +45,7 @@ static void printk_stack_address(unsigned long address, int reliable,
char *log_lvl)
{
touch_nmi_watchdog();
- printk("%s [<%p>] %s%pB\n",
- log_lvl, (void *)address, reliable ? "" : "? ",
- (void *)address);
-}
-
-void printk_address(unsigned long address)
-{
- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
+ printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
}
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
@@ -67,6 +59,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack);
+ stack = stack ? : get_stack_pointer(task, regs);
/*
* Iterate through the stacks, starting with the current stack pointer.
@@ -82,8 +75,8 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - softirq stack
* - hardirq stack
*/
- for (; stack; stack = stack_info.next_sp) {
- const char *str_begin, *str_end;
+ for (regs = NULL; stack; stack = stack_info.next_sp) {
+ const char *stack_name;
/*
* If we overflowed the task stack into a guard page, jump back
@@ -95,9 +88,9 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (get_stack_info(stack, task, &stack_info, &visit_mask))
break;
- stack_type_str(stack_info.type, &str_begin, &str_end);
- if (str_begin)
- printk("%s <%s> ", log_lvl, str_begin);
+ stack_name = stack_type_name(stack_info.type);
+ if (stack_name)
+ printk("%s <%s>\n", log_lvl, stack_name);
/*
* Scan the stack, printing any text addresses we find. At the
@@ -112,13 +105,22 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
for (; stack < stack_info.end; stack++) {
unsigned long real_addr;
int reliable = 0;
- unsigned long addr = *stack;
+ unsigned long addr = READ_ONCE_NOCHECK(*stack);
unsigned long *ret_addr_p =
unwind_get_return_address_ptr(&state);
if (!__kernel_text_address(addr))
continue;
+ /*
+ * Don't print regs->ip again if it was already printed
+ * by __show_regs() below.
+ */
+ if (regs && stack == &regs->ip) {
+ unwind_next_frame(&state);
+ continue;
+ }
+
if (stack == ret_addr_p)
reliable = 1;
@@ -146,10 +148,15 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* of the addresses will just be printed as unreliable.
*/
unwind_next_frame(&state);
+
+ /* if the frame has entry regs, print them */
+ regs = unwind_get_entry_regs(&state);
+ if (regs)
+ __show_regs(regs, 0);
}
- if (str_end)
- printk("%s <%s> ", log_lvl, str_end);
+ if (stack_name)
+ printk("%s </%s>\n", log_lvl, stack_name);
}
}
@@ -164,12 +171,12 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (!sp && task == current)
sp = get_stack_pointer(current, NULL);
- show_stack_log_lvl(task, NULL, sp, "");
+ show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT);
}
void show_stack_regs(struct pt_regs *regs)
{
- show_stack_log_lvl(current, regs, NULL, "");
+ show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
}
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -261,14 +268,11 @@ int __die(const char *str, struct pt_regs *regs, long err)
sp = kernel_stack_pointer(regs);
savesegment(ss, ss);
}
- printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
- print_symbol("%s", regs->ip);
- printk(" SS:ESP %04x:%08lx\n", ss, sp);
+ printk(KERN_EMERG "EIP: %pS SS:ESP: %04x:%08lx\n",
+ (void *)regs->ip, ss, sp);
#else
/* Executive summary in case the oops scrolled away */
- printk(KERN_ALERT "RIP ");
- printk_address(regs->ip);
- printk(" RSP <%016lx>\n", regs->sp);
+ printk(KERN_ALERT "RIP: %pS RSP: %016lx\n", (void *)regs->ip, regs->sp);
#endif
return 0;
}
@@ -291,22 +295,6 @@ void die(const char *str, struct pt_regs *regs, long err)
oops_end(flags, regs, sig);
}
-static int __init kstack_setup(char *s)
-{
- ssize_t ret;
- unsigned long val;
-
- if (!s)
- return -EINVAL;
-
- ret = kstrtoul(s, 0, &val);
- if (ret)
- return ret;
- kstack_depth_to_print = val;
- return 0;
-}
-early_param("kstack", kstack_setup);
-
static int __init code_bytes_setup(char *s)
{
ssize_t ret;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 06eb322b5f9f..bb3b5b9a6899 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -16,18 +16,15 @@
#include <asm/stacktrace.h>
-void stack_type_str(enum stack_type type, const char **begin, const char **end)
+const char *stack_type_name(enum stack_type type)
{
- switch (type) {
- case STACK_TYPE_IRQ:
- case STACK_TYPE_SOFTIRQ:
- *begin = "IRQ";
- *end = "EOI";
- break;
- default:
- *begin = NULL;
- *end = NULL;
- }
+ if (type == STACK_TYPE_IRQ)
+ return "IRQ";
+
+ if (type == STACK_TYPE_SOFTIRQ)
+ return "SOFTIRQ";
+
+ return NULL;
}
static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
@@ -109,8 +106,10 @@ recursion_check:
* just break out and report an unknown stack type.
*/
if (visit_mask) {
- if (*visit_mask & (1UL << info->type))
+ if (*visit_mask & (1UL << info->type)) {
+ printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
goto unknown;
+ }
*visit_mask |= 1UL << info->type;
}
@@ -121,36 +120,6 @@ unknown:
return -EINVAL;
}
-void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *sp, char *log_lvl)
-{
- unsigned long *stack;
- int i;
-
- if (!try_get_task_stack(task))
- return;
-
- sp = sp ? : get_stack_pointer(task, regs);
-
- stack = sp;
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (kstack_end(stack))
- break;
- if ((i % STACKSLOTS_PER_LINE) == 0) {
- if (i != 0)
- pr_cont("\n");
- printk("%s %08lx", log_lvl, *stack++);
- } else
- pr_cont(" %08lx", *stack++);
- touch_nmi_watchdog();
- }
- pr_cont("\n");
- show_trace_log_lvl(task, regs, sp, log_lvl);
-
- put_task_stack(task);
-}
-
-
void show_regs(struct pt_regs *regs)
{
int i;
@@ -168,8 +137,7 @@ void show_regs(struct pt_regs *regs)
unsigned char c;
u8 *ip;
- pr_emerg("Stack:\n");
- show_stack_log_lvl(current, regs, NULL, KERN_EMERG);
+ show_trace_log_lvl(current, regs, NULL, KERN_EMERG);
pr_emerg("Code:");
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 36cf1a498227..fac189efcc34 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -28,23 +28,17 @@ static unsigned long exception_stack_sizes[N_EXCEPTION_STACKS] = {
[DEBUG_STACK - 1] = DEBUG_STKSZ
};
-void stack_type_str(enum stack_type type, const char **begin, const char **end)
+const char *stack_type_name(enum stack_type type)
{
BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
- switch (type) {
- case STACK_TYPE_IRQ:
- *begin = "IRQ";
- *end = "EOI";
- break;
- case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST:
- *begin = exception_stack_names[type - STACK_TYPE_EXCEPTION];
- *end = "EOE";
- break;
- default:
- *begin = NULL;
- *end = NULL;
- }
+ if (type == STACK_TYPE_IRQ)
+ return "IRQ";
+
+ if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
+ return exception_stack_names[type - STACK_TYPE_EXCEPTION];
+
+ return NULL;
}
static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
@@ -128,8 +122,10 @@ recursion_check:
* just break out and report an unknown stack type.
*/
if (visit_mask) {
- if (*visit_mask & (1UL << info->type))
+ if (*visit_mask & (1UL << info->type)) {
+ printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
goto unknown;
+ }
*visit_mask |= 1UL << info->type;
}
@@ -140,56 +136,6 @@ unknown:
return -EINVAL;
}
-void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *sp, char *log_lvl)
-{
- unsigned long *irq_stack_end;
- unsigned long *irq_stack;
- unsigned long *stack;
- int i;
-
- if (!try_get_task_stack(task))
- return;
-
- irq_stack_end = (unsigned long *)this_cpu_read(irq_stack_ptr);
- irq_stack = irq_stack_end - (IRQ_STACK_SIZE / sizeof(long));
-
- sp = sp ? : get_stack_pointer(task, regs);
-
- stack = sp;
- for (i = 0; i < kstack_depth_to_print; i++) {
- unsigned long word;
-
- if (stack >= irq_stack && stack <= irq_stack_end) {
- if (stack == irq_stack_end) {
- stack = (unsigned long *) (irq_stack_end[-1]);
- pr_cont(" <EOI> ");
- }
- } else {
- if (kstack_end(stack))
- break;
- }
-
- if (probe_kernel_address(stack, word))
- break;
-
- if ((i % STACKSLOTS_PER_LINE) == 0) {
- if (i != 0)
- pr_cont("\n");
- printk("%s %016lx", log_lvl, word);
- } else
- pr_cont(" %016lx", word);
-
- stack++;
- touch_nmi_watchdog();
- }
-
- pr_cont("\n");
- show_trace_log_lvl(task, regs, sp, log_lvl);
-
- put_task_stack(task);
-}
-
void show_regs(struct pt_regs *regs)
{
int i;
@@ -207,8 +153,7 @@ void show_regs(struct pt_regs *regs)
unsigned char c;
u8 *ip;
- printk(KERN_DEFAULT "Stack:\n");
- show_stack_log_lvl(current, regs, NULL, KERN_DEFAULT);
+ show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
printk(KERN_DEFAULT "Code: ");
diff --git a/arch/x86/kernel/fpu/bugs.c b/arch/x86/kernel/fpu/bugs.c
index aad34aafc0e0..d913047f832c 100644
--- a/arch/x86/kernel/fpu/bugs.c
+++ b/arch/x86/kernel/fpu/bugs.c
@@ -23,17 +23,12 @@ static double __initdata y = 3145727.0;
*/
void __init fpu__init_check_bugs(void)
{
- u32 cr0_saved;
s32 fdiv_bug;
/* kernel_fpu_begin/end() relies on patched alternative instructions. */
if (!boot_cpu_has(X86_FEATURE_FPU))
return;
- /* We might have CR0::TS set already, clear it: */
- cr0_saved = read_cr0();
- write_cr0(cr0_saved & ~X86_CR0_TS);
-
kernel_fpu_begin();
/*
@@ -56,8 +51,6 @@ void __init fpu__init_check_bugs(void)
kernel_fpu_end();
- write_cr0(cr0_saved);
-
if (fdiv_bug) {
set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
pr_warn("Hmm, FPU with FDIV bug\n");
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 47004010ad5d..e4e97a5355ce 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -58,27 +58,9 @@ static bool kernel_fpu_disabled(void)
return this_cpu_read(in_kernel_fpu);
}
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
static bool interrupted_kernel_fpu_idle(void)
{
- if (kernel_fpu_disabled())
- return false;
-
- if (use_eager_fpu())
- return true;
-
- return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+ return !kernel_fpu_disabled();
}
/*
@@ -125,8 +107,7 @@ void __kernel_fpu_begin(void)
*/
copy_fpregs_to_fpstate(fpu);
} else {
- this_cpu_write(fpu_fpregs_owner_ctx, NULL);
- __fpregs_activate_hw();
+ __cpu_invalidate_fpregs_state();
}
}
EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -137,8 +118,6 @@ void __kernel_fpu_end(void)
if (fpu->fpregs_active)
copy_kernel_to_fpregs(&fpu->state);
- else
- __fpregs_deactivate_hw();
kernel_fpu_enable();
}
@@ -159,35 +138,6 @@ void kernel_fpu_end(void)
EXPORT_SYMBOL_GPL(kernel_fpu_end);
/*
- * CR0::TS save/restore functions:
- */
-int irq_ts_save(void)
-{
- /*
- * If in process context and not atomic, we can take a spurious DNA fault.
- * Otherwise, doing clts() in process context requires disabling preemption
- * or some heavy lifting like kernel_fpu_begin()
- */
- if (!in_atomic())
- return 0;
-
- if (read_cr0() & X86_CR0_TS) {
- clts();
- return 1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(irq_ts_save);
-
-void irq_ts_restore(int TS_state)
-{
- if (TS_state)
- stts();
-}
-EXPORT_SYMBOL_GPL(irq_ts_restore);
-
-/*
* Save the FPU state (mark it for reload if necessary):
*
* This only ever gets called for the current task.
@@ -200,10 +150,7 @@ void fpu__save(struct fpu *fpu)
trace_x86_fpu_before_save(fpu);
if (fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(fpu)) {
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&fpu->state);
- else
- fpregs_deactivate(fpu);
+ copy_kernel_to_fpregs(&fpu->state);
}
}
trace_x86_fpu_after_save(fpu);
@@ -247,7 +194,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
@@ -260,8 +206,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
* Don't let 'init optimized' areas of the XSAVE area
* leak into the child task:
*/
- if (use_eager_fpu())
- memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
+ memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
/*
* Save current FPU registers directly into the child
@@ -283,10 +228,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
memcpy(&src_fpu->state, &dst_fpu->state,
fpu_kernel_xstate_size);
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&src_fpu->state);
- else
- fpregs_deactivate(src_fpu);
+ copy_kernel_to_fpregs(&src_fpu->state);
}
preempt_enable();
@@ -366,7 +308,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
if (fpu->fpstate_active) {
/* Invalidate any lazy state: */
- fpu->last_cpu = -1;
+ __fpu_invalidate_fpregs_state(fpu);
} else {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
@@ -409,7 +351,7 @@ void fpu__current_fpstate_write_begin(void)
* ensures we will not be lazy and skip a XRSTOR in the
* future.
*/
- fpu->last_cpu = -1;
+ __fpu_invalidate_fpregs_state(fpu);
}
/*
@@ -459,7 +401,6 @@ void fpu__restore(struct fpu *fpu)
trace_x86_fpu_before_restore(fpu);
fpregs_activate(fpu);
copy_kernel_to_fpregs(&fpu->state);
- fpu->counter++;
trace_x86_fpu_after_restore(fpu);
kernel_fpu_enable();
}
@@ -477,7 +418,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__drop(struct fpu *fpu)
{
preempt_disable();
- fpu->counter = 0;
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
@@ -521,14 +461,14 @@ void fpu__clear(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
- if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
- /* FPU state will be reallocated lazily at the first use. */
- fpu__drop(fpu);
- } else {
- if (!fpu->fpstate_active) {
- fpu__activate_curr(fpu);
- user_fpu_begin();
- }
+ fpu__drop(fpu);
+
+ /*
+ * Make sure fpstate is cleared and initialized.
+ */
+ if (static_cpu_has(X86_FEATURE_FPU)) {
+ fpu__activate_curr(fpu);
+ user_fpu_begin();
copy_init_fpstate_to_fpregs();
}
}
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 2f2b8c7ccb85..60dece392b3a 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -10,18 +10,6 @@
#include <linux/init.h>
/*
- * Initialize the TS bit in CR0 according to the style of context-switches
- * we are using:
- */
-static void fpu__init_cpu_ctx_switch(void)
-{
- if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
- stts();
- else
- clts();
-}
-
-/*
* Initialize the registers found in all CPUs, CR0 and CR4:
*/
static void fpu__init_cpu_generic(void)
@@ -58,7 +46,6 @@ void fpu__init_cpu(void)
{
fpu__init_cpu_generic();
fpu__init_cpu_xstate();
- fpu__init_cpu_ctx_switch();
}
/*
@@ -233,82 +220,16 @@ static void __init fpu__init_system_xstate_size_legacy(void)
}
/*
- * FPU context switching strategies:
- *
- * Against popular belief, we don't do lazy FPU saves, due to the
- * task migration complications it brings on SMP - we only do
- * lazy FPU restores.
- *
- * 'lazy' is the traditional strategy, which is based on setting
- * CR0::TS to 1 during context-switch (instead of doing a full
- * restore of the FPU state), which causes the first FPU instruction
- * after the context switch (whenever it is executed) to fault - at
- * which point we lazily restore the FPU state into FPU registers.
- *
- * Tasks are of course under no obligation to execute FPU instructions,
- * so it can easily happen that another context-switch occurs without
- * a single FPU instruction being executed. If we eventually switch
- * back to the original task (that still owns the FPU) then we have
- * not only saved the restores along the way, but we also have the
- * FPU ready to be used for the original task.
- *
- * 'lazy' is deprecated because it's almost never a performance win
- * and it's much more complicated than 'eager'.
- *
- * 'eager' switching is by default on all CPUs, there we switch the FPU
- * state during every context switch, regardless of whether the task
- * has used FPU instructions in that time slice or not. This is done
- * because modern FPU context saving instructions are able to optimize
- * state saving and restoration in hardware: they can detect both
- * unused and untouched FPU state and optimize accordingly.
- *
- * [ Note that even in 'lazy' mode we might optimize context switches
- * to use 'eager' restores, if we detect that a task is using the FPU
- * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
- */
-static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
-
-/*
* Find supported xfeatures based on cpu features and command-line input.
* This must be called after fpu__init_parse_early_param() is called and
* xfeatures_mask is enumerated.
*/
u64 __init fpu__get_supported_xfeatures_mask(void)
{
- /* Support all xfeatures known to us */
- if (eagerfpu != DISABLE)
- return XCNTXT_MASK;
-
- /* Warning of xfeatures being disabled for no eagerfpu mode */
- if (xfeatures_mask & XFEATURE_MASK_EAGER) {
- pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
- xfeatures_mask & XFEATURE_MASK_EAGER);
- }
-
- /* Return a mask that masks out all features requiring eagerfpu mode */
- return ~XFEATURE_MASK_EAGER;
+ return XCNTXT_MASK;
}
-/*
- * Disable features dependent on eagerfpu.
- */
-static void __init fpu__clear_eager_fpu_features(void)
-{
- setup_clear_cpu_cap(X86_FEATURE_MPX);
-}
-
-/*
- * Pick the FPU context switching strategy:
- *
- * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
- * the following is true:
- *
- * (1) the cpu has xsaveopt, as it has the optimization and doing eager
- * FPU switching has a relatively low cost compared to a plain xsave;
- * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
- * switching. Should the kernel boot with noxsaveopt, we support MPX
- * with eager FPU switching at a higher cost.
- */
+/* Legacy code to initialize eager fpu mode. */
static void __init fpu__init_system_ctx_switch(void)
{
static bool on_boot_cpu __initdata = 1;
@@ -317,17 +238,6 @@ static void __init fpu__init_system_ctx_switch(void)
on_boot_cpu = 0;
WARN_ON_FPU(current->thread.fpu.fpstate_active);
-
- if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
- eagerfpu = ENABLE;
-
- if (xfeatures_mask & XFEATURE_MASK_EAGER)
- eagerfpu = ENABLE;
-
- if (eagerfpu == ENABLE)
- setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
-
- printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
}
/*
@@ -336,11 +246,6 @@ static void __init fpu__init_system_ctx_switch(void)
*/
static void __init fpu__init_parse_early_param(void)
{
- if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
- eagerfpu = DISABLE;
- fpu__clear_eager_fpu_features();
- }
-
if (cmdline_find_option_bool(boot_command_line, "no387"))
setup_clear_cpu_cap(X86_FEATURE_FPU);
@@ -375,14 +280,6 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
*/
fpu__init_cpu();
- /*
- * But don't leave CR0::TS set yet, as some of the FPU setup
- * methods depend on being able to execute FPU instructions
- * that will fault on a set TS, such as the FXSAVE in
- * fpu__init_system_mxcsr().
- */
- clts();
-
fpu__init_system_generic();
fpu__init_system_xstate_size_legacy();
fpu__init_system_xstate();
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index a184c210efba..83c23c230b4c 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -340,11 +340,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
}
fpu->fpstate_active = 1;
- if (use_eager_fpu()) {
- preempt_disable();
- fpu__restore(fpu);
- preempt_enable();
- }
+ preempt_disable();
+ fpu__restore(fpu);
+ preempt_enable();
return err;
} else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 095ef7ddd6ae..1d7770447b3e 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -65,6 +65,7 @@ void fpu__xstate_clear_all_cpu_caps(void)
setup_clear_cpu_cap(X86_FEATURE_AVX);
setup_clear_cpu_cap(X86_FEATURE_AVX2);
setup_clear_cpu_cap(X86_FEATURE_AVX512F);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512IFMA);
setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
@@ -73,6 +74,7 @@ void fpu__xstate_clear_all_cpu_caps(void)
setup_clear_cpu_cap(X86_FEATURE_AVX512VL);
setup_clear_cpu_cap(X86_FEATURE_MPX);
setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512VBMI);
setup_clear_cpu_cap(X86_FEATURE_PKU);
setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
@@ -890,15 +892,6 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
*/
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return -EINVAL;
- /*
- * For most XSAVE components, this would be an arduous task:
- * brining fpstate up to date with fpregs, updating fpstate,
- * then re-populating fpregs. But, for components that are
- * never lazily managed, we can just access the fpregs
- * directly. PKRU is never managed lazily, so we can just
- * manipulate it directly. Make sure it stays that way.
- */
- WARN_ON_ONCE(!use_eager_fpu());
/* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b6b2f0264af3..4e8577d03372 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -63,6 +63,8 @@
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
#endif
+#define SIZEOF_PTREGS 17*4
+
/*
* Number of possible pages in the lowmem region.
*
@@ -248,19 +250,19 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
#ifdef CONFIG_PARAVIRT
/* This is can only trip for a broken bootloader... */
cmpw $0x207, pa(boot_params + BP_version)
- jb default_entry
+ jb .Ldefault_entry
/* Paravirt-compatible boot parameters. Look to see what architecture
we're booting under. */
movl pa(boot_params + BP_hardware_subarch), %eax
cmpl $num_subarch_entries, %eax
- jae bad_subarch
+ jae .Lbad_subarch
movl pa(subarch_entries)(,%eax,4), %eax
subl $__PAGE_OFFSET, %eax
jmp *%eax
-bad_subarch:
+.Lbad_subarch:
WEAK(lguest_entry)
WEAK(xen_entry)
/* Unknown implementation; there's really
@@ -270,14 +272,14 @@ WEAK(xen_entry)
__INITDATA
subarch_entries:
- .long default_entry /* normal x86/PC */
+ .long .Ldefault_entry /* normal x86/PC */
.long lguest_entry /* lguest hypervisor */
.long xen_entry /* Xen hypervisor */
- .long default_entry /* Moorestown MID */
+ .long .Ldefault_entry /* Moorestown MID */
num_subarch_entries = (. - subarch_entries) / 4
.previous
#else
- jmp default_entry
+ jmp .Ldefault_entry
#endif /* CONFIG_PARAVIRT */
#ifdef CONFIG_HOTPLUG_CPU
@@ -289,7 +291,8 @@ num_subarch_entries = (. - subarch_entries) / 4
ENTRY(start_cpu0)
movl initial_stack, %ecx
movl %ecx, %esp
- jmp *(initial_code)
+ call *(initial_code)
+1: jmp 1b
ENDPROC(start_cpu0)
#endif
@@ -317,7 +320,7 @@ ENTRY(startup_32_smp)
call load_ucode_ap
#endif
-default_entry:
+.Ldefault_entry:
#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
X86_CR0_PG)
@@ -347,7 +350,7 @@ default_entry:
pushfl
popl %eax # get EFLAGS
testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set?
- jz enable_paging # hw disallowed setting of ID bit
+ jz .Lenable_paging # hw disallowed setting of ID bit
# which means no CPUID and no CR4
xorl %eax,%eax
@@ -357,13 +360,13 @@ default_entry:
movl $1,%eax
cpuid
andl $~1,%edx # Ignore CPUID.FPU
- jz enable_paging # No flags or only CPUID.FPU = no CR4
+ jz .Lenable_paging # No flags or only CPUID.FPU = no CR4
movl pa(mmu_cr4_features),%eax
movl %eax,%cr4
testb $X86_CR4_PAE, %al # check if PAE is enabled
- jz enable_paging
+ jz .Lenable_paging
/* Check if extended functions are implemented */
movl $0x80000000, %eax
@@ -371,7 +374,7 @@ default_entry:
/* Value must be in the range 0x80000001 to 0x8000ffff */
subl $0x80000001, %eax
cmpl $(0x8000ffff-0x80000001), %eax
- ja enable_paging
+ ja .Lenable_paging
/* Clear bogus XD_DISABLE bits */
call verify_cpu
@@ -380,7 +383,7 @@ default_entry:
cpuid
/* Execute Disable bit supported? */
btl $(X86_FEATURE_NX & 31), %edx
- jnc enable_paging
+ jnc .Lenable_paging
/* Setup EFER (Extended Feature Enable Register) */
movl $MSR_EFER, %ecx
@@ -390,7 +393,7 @@ default_entry:
/* Make changes effective */
wrmsr
-enable_paging:
+.Lenable_paging:
/*
* Enable paging
@@ -419,7 +422,7 @@ enable_paging:
*/
movb $4,X86 # at least 486
cmpl $-1,X86_CPUID
- je is486
+ je .Lis486
/* get vendor info */
xorl %eax,%eax # call CPUID with 0 -> return vendor ID
@@ -430,7 +433,7 @@ enable_paging:
movl %ecx,X86_VENDOR_ID+8 # last 4 chars
orl %eax,%eax # do we have processor info as well?
- je is486
+ je .Lis486
movl $1,%eax # Use the CPUID instruction to get CPU type
cpuid
@@ -444,7 +447,7 @@ enable_paging:
movb %cl,X86_MASK
movl %edx,X86_CAPABILITY
-is486:
+.Lis486:
movl $0x50022,%ecx # set AM, WP, NE and MP
movl %cr0,%eax
andl $0x80000011,%eax # Save PG,PE,ET
@@ -470,8 +473,9 @@ is486:
xorl %eax,%eax # Clear LDT
lldt %ax
- pushl $0 # fake return address for unwinder
- jmp *(initial_code)
+ call *(initial_code)
+1: jmp 1b
+ENDPROC(startup_32_smp)
#include "verify_cpu.S"
@@ -665,14 +669,17 @@ __PAGE_ALIGNED_BSS
initial_pg_pmd:
.fill 1024*KPMDS,4,0
#else
-ENTRY(initial_page_table)
+.globl initial_page_table
+initial_page_table:
.fill 1024,4,0
#endif
initial_pg_fixmap:
.fill 1024,4,0
-ENTRY(empty_zero_page)
+.globl empty_zero_page
+empty_zero_page:
.fill 4096,1,0
-ENTRY(swapper_pg_dir)
+.globl swapper_pg_dir
+swapper_pg_dir:
.fill 1024,4,0
EXPORT_SYMBOL(empty_zero_page)
@@ -706,7 +713,12 @@ ENTRY(initial_page_table)
.data
.balign 4
ENTRY(initial_stack)
- .long init_thread_union+THREAD_SIZE
+ /*
+ * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
+ * unwinder reliably detect the end of the stack.
+ */
+ .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - \
+ TOP_OF_KERNEL_STACK_PADDING;
__INITRODATA
int_msg:
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index b4421cc191b0..90de28841242 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -66,13 +66,8 @@ startup_64:
* tables and then reload them.
*/
- /*
- * Setup stack for verify_cpu(). "-8" because initial_stack is defined
- * this way, see below. Our best guess is a NULL ptr for stack
- * termination heuristics and we don't want to break anything which
- * might depend on it (kgdb, ...).
- */
- leaq (__end_init_task - 8)(%rip), %rsp
+ /* Set up the stack for verify_cpu(), similar to initial_stack below */
+ leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
/* Sanitize CPU configuration */
call verify_cpu
@@ -117,20 +112,20 @@ startup_64:
movq %rdi, %rax
shrq $PGDIR_SHIFT, %rax
- leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx
+ leaq (PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx
movq %rdx, 0(%rbx,%rax,8)
movq %rdx, 8(%rbx,%rax,8)
- addq $4096, %rdx
+ addq $PAGE_SIZE, %rdx
movq %rdi, %rax
shrq $PUD_SHIFT, %rax
andl $(PTRS_PER_PUD-1), %eax
- movq %rdx, 4096(%rbx,%rax,8)
+ movq %rdx, PAGE_SIZE(%rbx,%rax,8)
incl %eax
andl $(PTRS_PER_PUD-1), %eax
- movq %rdx, 4096(%rbx,%rax,8)
+ movq %rdx, PAGE_SIZE(%rbx,%rax,8)
- addq $8192, %rbx
+ addq $PAGE_SIZE * 2, %rbx
movq %rdi, %rax
shrq $PMD_SHIFT, %rdi
addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
@@ -147,6 +142,9 @@ startup_64:
decl %ecx
jnz 1b
+ test %rbp, %rbp
+ jz .Lskip_fixup
+
/*
* Fixup the kernel text+data virtual addresses. Note that
* we might write invalid pmds, when the kernel is relocated
@@ -154,9 +152,9 @@ startup_64:
* beyond _end.
*/
leaq level2_kernel_pgt(%rip), %rdi
- leaq 4096(%rdi), %r8
+ leaq PAGE_SIZE(%rdi), %r8
/* See if it is a valid page table entry */
-1: testb $1, 0(%rdi)
+1: testb $_PAGE_PRESENT, 0(%rdi)
jz 2f
addq %rbp, 0(%rdi)
/* Go to the next page */
@@ -167,6 +165,7 @@ startup_64:
/* Fixup phys_base */
addq %rbp, phys_base(%rip)
+.Lskip_fixup:
movq $(early_level4_pgt - __START_KERNEL_map), %rax
jmp 1f
ENTRY(secondary_startup_64)
@@ -265,13 +264,17 @@ ENTRY(secondary_startup_64)
movl $MSR_GS_BASE,%ecx
movl initial_gs(%rip),%eax
movl initial_gs+4(%rip),%edx
- wrmsr
+ wrmsr
/* rsi is pointer to real mode structure with interesting info.
pass it to C */
movq %rsi, %rdi
-
- /* Finally jump to run C code and to be on real kernel address
+ jmp start_cpu
+ENDPROC(secondary_startup_64)
+
+ENTRY(start_cpu)
+ /*
+ * Jump to run C code and to be on a real kernel address.
* Since we are running on identity-mapped space we have to jump
* to the full 64bit address, this is only possible as indirect
* jump. In addition we need to ensure %cs is set so we make this
@@ -295,12 +298,13 @@ ENTRY(secondary_startup_64)
* REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
* address given in m16:64.
*/
- movq initial_code(%rip),%rax
- pushq $0 # fake return address to stop unwinder
+ call 1f # put return address on stack for unwinder
+1: xorq %rbp, %rbp # clear frame pointer
+ movq initial_code(%rip), %rax
pushq $__KERNEL_CS # set correct cs
pushq %rax # target address in negative space
lretq
-ENDPROC(secondary_startup_64)
+ENDPROC(start_cpu)
#include "verify_cpu.S"
@@ -308,15 +312,11 @@ ENDPROC(secondary_startup_64)
/*
* Boot CPU0 entry point. It's called from play_dead(). Everything has been set
* up already except stack. We just set up stack here. Then call
- * start_secondary().
+ * start_secondary() via start_cpu().
*/
ENTRY(start_cpu0)
- movq initial_stack(%rip),%rsp
- movq initial_code(%rip),%rax
- pushq $0 # fake return address to stop unwinder
- pushq $__KERNEL_CS # set correct cs
- pushq %rax # target address in negative space
- lretq
+ movq initial_stack(%rip), %rsp
+ jmp start_cpu
ENDPROC(start_cpu0)
#endif
@@ -328,7 +328,11 @@ ENDPROC(start_cpu0)
GLOBAL(initial_gs)
.quad INIT_PER_CPU_VAR(irq_stack_union)
GLOBAL(initial_stack)
- .quad init_thread_union+THREAD_SIZE-8
+ /*
+ * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
+ * unwinder reliably detect the end of the stack.
+ */
+ .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS
__FINITDATA
bad_address:
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 9f669fdd2010..7c6e9ffe4424 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -14,7 +14,6 @@
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/irq.h>
-#include <asm/idle.h>
#include <asm/mce.h>
#include <asm/hw_irq.h>
#include <asm/desc.h>
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 9ebd0b0e73d9..6b0678a541e2 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -16,7 +16,6 @@
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <asm/io_apic.h>
-#include <asm/idle.h>
#include <asm/apic.h>
int sysctl_panic_on_stackoverflow;
diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
new file mode 100644
index 000000000000..cb9c1ed1d391
--- /dev/null
+++ b/arch/x86/kernel/itmt.c
@@ -0,0 +1,215 @@
+/*
+ * itmt.c: Support Intel Turbo Boost Max Technology 3.0
+ *
+ * (C) Copyright 2016 Intel Corporation
+ * Author: Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * On platforms supporting Intel Turbo Boost Max Technology 3.0, (ITMT),
+ * the maximum turbo frequencies of some cores in a CPU package may be
+ * higher than for the other cores in the same package. In that case,
+ * better performance can be achieved by making the scheduler prefer
+ * to run tasks on the CPUs with higher max turbo frequencies.
+ *
+ * This file provides functions and data structures for enabling the
+ * scheduler to favor scheduling on cores can be boosted to a higher
+ * frequency under ITMT.
+ */
+
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/cpuset.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
+#include <linux/nodemask.h>
+
+static DEFINE_MUTEX(itmt_update_mutex);
+DEFINE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
+
+/* Boolean to track if system has ITMT capabilities */
+static bool __read_mostly sched_itmt_capable;
+
+/*
+ * Boolean to control whether we want to move processes to cpu capable
+ * of higher turbo frequency for cpus supporting Intel Turbo Boost Max
+ * Technology 3.0.
+ *
+ * It can be set via /proc/sys/kernel/sched_itmt_enabled
+ */
+unsigned int __read_mostly sysctl_sched_itmt_enabled;
+
+static int sched_itmt_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ unsigned int old_sysctl;
+ int ret;
+
+ mutex_lock(&itmt_update_mutex);
+
+ if (!sched_itmt_capable) {
+ mutex_unlock(&itmt_update_mutex);
+ return -EINVAL;
+ }
+
+ old_sysctl = sysctl_sched_itmt_enabled;
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) {
+ x86_topology_update = true;
+ rebuild_sched_domains();
+ }
+
+ mutex_unlock(&itmt_update_mutex);
+
+ return ret;
+}
+
+static unsigned int zero;
+static unsigned int one = 1;
+static struct ctl_table itmt_kern_table[] = {
+ {
+ .procname = "sched_itmt_enabled",
+ .data = &sysctl_sched_itmt_enabled,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_itmt_update_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {}
+};
+
+static struct ctl_table itmt_root_table[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = itmt_kern_table,
+ },
+ {}
+};
+
+static struct ctl_table_header *itmt_sysctl_header;
+
+/**
+ * sched_set_itmt_support() - Indicate platform supports ITMT
+ *
+ * This function is used by the OS to indicate to scheduler that the platform
+ * is capable of supporting the ITMT feature.
+ *
+ * The current scheme has the pstate driver detects if the system
+ * is ITMT capable and call sched_set_itmt_support.
+ *
+ * This must be done only after sched_set_itmt_core_prio
+ * has been called to set the cpus' priorities.
+ * It must not be called with cpu hot plug lock
+ * held as we need to acquire the lock to rebuild sched domains
+ * later.
+ *
+ * Return: 0 on success
+ */
+int sched_set_itmt_support(void)
+{
+ mutex_lock(&itmt_update_mutex);
+
+ if (sched_itmt_capable) {
+ mutex_unlock(&itmt_update_mutex);
+ return 0;
+ }
+
+ itmt_sysctl_header = register_sysctl_table(itmt_root_table);
+ if (!itmt_sysctl_header) {
+ mutex_unlock(&itmt_update_mutex);
+ return -ENOMEM;
+ }
+
+ sched_itmt_capable = true;
+
+ sysctl_sched_itmt_enabled = 1;
+
+ if (sysctl_sched_itmt_enabled) {
+ x86_topology_update = true;
+ rebuild_sched_domains();
+ }
+
+ mutex_unlock(&itmt_update_mutex);
+
+ return 0;
+}
+
+/**
+ * sched_clear_itmt_support() - Revoke platform's support of ITMT
+ *
+ * This function is used by the OS to indicate that it has
+ * revoked the platform's support of ITMT feature.
+ *
+ * It must not be called with cpu hot plug lock
+ * held as we need to acquire the lock to rebuild sched domains
+ * later.
+ */
+void sched_clear_itmt_support(void)
+{
+ mutex_lock(&itmt_update_mutex);
+
+ if (!sched_itmt_capable) {
+ mutex_unlock(&itmt_update_mutex);
+ return;
+ }
+ sched_itmt_capable = false;
+
+ if (itmt_sysctl_header) {
+ unregister_sysctl_table(itmt_sysctl_header);
+ itmt_sysctl_header = NULL;
+ }
+
+ if (sysctl_sched_itmt_enabled) {
+ /* disable sched_itmt if we are no longer ITMT capable */
+ sysctl_sched_itmt_enabled = 0;
+ x86_topology_update = true;
+ rebuild_sched_domains();
+ }
+
+ mutex_unlock(&itmt_update_mutex);
+}
+
+int arch_asym_cpu_priority(int cpu)
+{
+ return per_cpu(sched_core_priority, cpu);
+}
+
+/**
+ * sched_set_itmt_core_prio() - Set CPU priority based on ITMT
+ * @prio: Priority of cpu core
+ * @core_cpu: The cpu number associated with the core
+ *
+ * The pstate driver will find out the max boost frequency
+ * and call this function to set a priority proportional
+ * to the max boost frequency. CPU with higher boost
+ * frequency will receive higher priority.
+ *
+ * No need to rebuild sched domain after updating
+ * the CPU priorities. The sched domains have no
+ * dependency on CPU priorities.
+ */
+void sched_set_itmt_core_prio(int prio, int core_cpu)
+{
+ int cpu, i = 1;
+
+ for_each_cpu(cpu, topology_sibling_cpumask(core_cpu)) {
+ int smt_prio;
+
+ /*
+ * Ensure that the siblings are moved to the end
+ * of the priority chain and only used when
+ * all other high priority cpus are out of capacity.
+ */
+ smt_prio = prio * smp_num_siblings / i;
+ per_cpu(sched_core_priority, cpu) = smt_prio;
+ i++;
+ }
+}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index edbbfc854e39..36bc66416021 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -42,7 +42,6 @@
#include <asm/traps.h>
#include <asm/desc.h>
#include <asm/tlbflush.h>
-#include <asm/idle.h>
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
@@ -267,13 +266,11 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */
prev_state = exception_enter();
- exit_idle();
kvm_async_pf_task_wait((u32)read_cr2());
exception_exit(prev_state);
break;
case KVM_PV_REASON_PAGE_READY:
rcu_irq_enter();
- exit_idle();
kvm_async_pf_task_wake((u32)read_cr2());
rcu_irq_exit();
break;
@@ -308,7 +305,7 @@ static void kvm_register_steal_time(void)
static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
-static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
+static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
{
/**
* This relies on __test_and_clear_bit to modify the memory
@@ -319,7 +316,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
*/
if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
return;
- apic_write(APIC_EOI, APIC_EOI_ACK);
+ apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
}
static void kvm_guest_cpu_init(void)
@@ -592,6 +589,14 @@ out:
local_irq_restore(flags);
}
+__visible bool __kvm_vcpu_is_preempted(int cpu)
+{
+ struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
+
+ return !!src->preempted;
+}
+PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
+
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
*/
@@ -608,6 +613,11 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick = kvm_kick_cpu;
+
+ if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ pv_lock_ops.vcpu_is_preempted =
+ PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
+ }
}
static __init int kvm_spinlock_init_jump(void)
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 6707039b9032..d4a15831ac58 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -34,10 +34,10 @@ static void flush_ldt(void *current_mm)
}
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
-static struct ldt_struct *alloc_ldt_struct(int size)
+static struct ldt_struct *alloc_ldt_struct(unsigned int size)
{
struct ldt_struct *new_ldt;
- int alloc_size;
+ unsigned int alloc_size;
if (size > LDT_ENTRIES)
return NULL;
@@ -93,7 +93,7 @@ static void free_ldt_struct(struct ldt_struct *ldt)
paravirt_free_ldt(ldt->entries, ldt->size);
if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(ldt->entries);
+ vfree_atomic(ldt->entries);
else
free_page((unsigned long)ldt->entries);
kfree(ldt);
@@ -207,11 +207,11 @@ static int read_default_ldt(void __user *ptr, unsigned long bytecount)
static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
{
struct mm_struct *mm = current->mm;
+ struct ldt_struct *new_ldt, *old_ldt;
+ unsigned int oldsize, newsize;
+ struct user_desc ldt_info;
struct desc_struct ldt;
int error;
- struct user_desc ldt_info;
- int oldsize, newsize;
- struct ldt_struct *new_ldt, *old_ldt;
error = -EINVAL;
if (bytecount != sizeof(ldt_info))
@@ -249,7 +249,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
old_ldt = mm->context.ldt;
oldsize = old_ldt ? old_ldt->size : 0;
- newsize = max((int)(ldt_info.entry_number + 1), oldsize);
+ newsize = max(ldt_info.entry_number + 1, oldsize);
error = -ENOMEM;
new_ldt = alloc_ldt_struct(newsize);
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 8c1f218926d7..307b1f4543de 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -328,7 +328,7 @@ void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
- VMCOREINFO_SYMBOL(phys_base);
+ VMCOREINFO_NUMBER(phys_base);
VMCOREINFO_SYMBOL(init_level4_pgt);
#ifdef CONFIG_NUMA
@@ -337,9 +337,7 @@ void arch_crash_save_vmcoreinfo(void)
#endif
vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
kaslr_offset());
- VMCOREINFO_PAGE_OFFSET(PAGE_OFFSET);
- VMCOREINFO_VMALLOC_START(VMALLOC_START);
- VMCOREINFO_VMEMMAP_START(VMEMMAP_START);
+ VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
}
/* arch-dependent functionality related to kexec file-based syscall */
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 7f3550acde1b..f5e3ff835cc8 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -44,6 +44,7 @@
#include <asm/msr.h>
static struct class *msr_class;
+static enum cpuhp_state cpuhp_msr_state;
static ssize_t msr_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -180,7 +181,7 @@ static const struct file_operations msr_fops = {
.compat_ioctl = msr_ioctl,
};
-static int msr_device_create(int cpu)
+static int msr_device_create(unsigned int cpu)
{
struct device *dev;
@@ -189,34 +190,12 @@ static int msr_device_create(int cpu)
return PTR_ERR_OR_ZERO(dev);
}
-static void msr_device_destroy(int cpu)
+static int msr_device_destroy(unsigned int cpu)
{
device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ return 0;
}
-static int msr_class_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int err = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- err = msr_device_create(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- msr_device_destroy(cpu);
- break;
- }
- return notifier_from_errno(err);
-}
-
-static struct notifier_block __refdata msr_class_cpu_notifier = {
- .notifier_call = msr_class_cpu_callback,
-};
-
static char *msr_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
@@ -224,13 +203,11 @@ static char *msr_devnode(struct device *dev, umode_t *mode)
static int __init msr_init(void)
{
- int i, err = 0;
- i = 0;
+ int err;
if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
pr_err("unable to get major %d for msr\n", MSR_MAJOR);
- err = -EBUSY;
- goto out;
+ return -EBUSY;
}
msr_class = class_create(THIS_MODULE, "msr");
if (IS_ERR(msr_class)) {
@@ -239,44 +216,28 @@ static int __init msr_init(void)
}
msr_class->devnode = msr_devnode;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- err = msr_device_create(i);
- if (err != 0)
- goto out_class;
- }
- __register_hotcpu_notifier(&msr_class_cpu_notifier);
- cpu_notifier_register_done();
-
- err = 0;
- goto out;
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/msr:online",
+ msr_device_create, msr_device_destroy);
+ if (err < 0)
+ goto out_class;
+ cpuhp_msr_state = err;
+ return 0;
out_class:
- i = 0;
- for_each_online_cpu(i)
- msr_device_destroy(i);
- cpu_notifier_register_done();
+ cpuhp_remove_state(cpuhp_msr_state);
class_destroy(msr_class);
out_chrdev:
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
-out:
return err;
}
+module_init(msr_init);
static void __exit msr_exit(void)
{
- int cpu = 0;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- msr_device_destroy(cpu);
+ cpuhp_remove_state(cpuhp_msr_state);
class_destroy(msr_class);
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
- __unregister_hotcpu_notifier(&msr_class_cpu_notifier);
- cpu_notifier_register_done();
}
-
-module_init(msr_init);
module_exit(msr_exit)
MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 2c55a003b793..6d4bf812af45 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -12,7 +12,6 @@ __visible void __native_queued_spin_unlock(struct qspinlock *lock)
{
native_queued_spin_unlock(lock);
}
-
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
bool pv_is_native_spin_unlock(void)
@@ -21,12 +20,25 @@ bool pv_is_native_spin_unlock(void)
__raw_callee_save___native_queued_spin_unlock;
}
+__visible bool __native_vcpu_is_preempted(int cpu)
+{
+ return false;
+}
+PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
+
+bool pv_is_native_vcpu_is_preempted(void)
+{
+ return pv_lock_ops.vcpu_is_preempted.func ==
+ __raw_callee_save___native_vcpu_is_preempted;
+}
+
struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
.wait = paravirt_nop,
.kick = paravirt_nop,
+ .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index bbf3d5933eaa..a1bfba0f7234 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -328,7 +328,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
.cpuid = native_cpuid,
.get_debugreg = native_get_debugreg,
.set_debugreg = native_set_debugreg,
- .clts = native_clts,
.read_cr0 = native_read_cr0,
.write_cr0 = native_write_cr0,
.read_cr4 = native_read_cr4,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 920c6ae08592..d33ef165b1f8 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -8,10 +8,10 @@ DEF_NATIVE(pv_cpu_ops, iret, "iret");
DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
-DEF_NATIVE(pv_cpu_ops, clts, "clts");
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -27,6 +27,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
}
extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
@@ -48,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_mmu_ops, read_cr2);
PATCH_SITE(pv_mmu_ops, read_cr3);
PATCH_SITE(pv_mmu_ops, write_cr3);
- PATCH_SITE(pv_cpu_ops, clts);
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
if (pv_is_native_spin_unlock()) {
@@ -56,9 +56,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
end = end_pv_lock_ops_queued_spin_unlock;
goto patch_site;
}
+ goto patch_default;
+
+ case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
+ if (pv_is_native_vcpu_is_preempted()) {
+ start = start_pv_lock_ops_vcpu_is_preempted;
+ end = end_pv_lock_ops_vcpu_is_preempted;
+ goto patch_site;
+ }
+ goto patch_default;
#endif
default:
+patch_default:
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
break;
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index bb3840cedb4f..f4fcf26c9fce 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -10,7 +10,6 @@ DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
-DEF_NATIVE(pv_cpu_ops, clts, "clts");
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
@@ -21,6 +20,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -36,6 +36,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
}
extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
@@ -58,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_mmu_ops, read_cr2);
PATCH_SITE(pv_mmu_ops, read_cr3);
PATCH_SITE(pv_mmu_ops, write_cr3);
- PATCH_SITE(pv_cpu_ops, clts);
PATCH_SITE(pv_mmu_ops, flush_tlb_single);
PATCH_SITE(pv_cpu_ops, wbinvd);
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
@@ -68,9 +68,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
end = end_pv_lock_ops_queued_spin_unlock;
goto patch_site;
}
+ goto patch_default;
+
+ case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
+ if (pv_is_native_vcpu_is_preempted()) {
+ start = start_pv_lock_ops_vcpu_is_preempted;
+ end = end_pv_lock_ops_vcpu_is_preempted;
+ goto patch_site;
+ }
+ goto patch_default;
#endif
default:
+patch_default:
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
break;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 0888a879120f..43c36d8a6ae2 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -23,7 +23,6 @@
#include <asm/cpu.h>
#include <asm/apic.h>
#include <asm/syscalls.h>
-#include <asm/idle.h>
#include <asm/uaccess.h>
#include <asm/mwait.h>
#include <asm/fpu/internal.h>
@@ -65,23 +64,6 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
};
EXPORT_PER_CPU_SYMBOL(cpu_tss);
-#ifdef CONFIG_X86_64
-static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
- atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
-#endif
-
/*
* this gets called so that we can store lazy state into memory and copy the
* current task into the new thread.
@@ -251,39 +233,9 @@ static inline void play_dead(void)
}
#endif
-#ifdef CONFIG_X86_64
-void enter_idle(void)
-{
- this_cpu_write(is_idle, 1);
- atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-}
-
-static void __exit_idle(void)
-{
- if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
- return;
- atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-}
-
-/* Called from interrupts to signify idle end */
-void exit_idle(void)
-{
- /* idle loop has pid 0 */
- if (current->pid)
- return;
- __exit_idle();
-}
-#endif
-
void arch_cpu_idle_enter(void)
{
local_touch_nmi();
- enter_idle();
-}
-
-void arch_cpu_idle_exit(void)
-{
- __exit_idle();
}
void arch_cpu_idle_dead(void)
@@ -336,59 +288,33 @@ void stop_this_cpu(void *dummy)
halt();
}
-bool amd_e400_c1e_detected;
-EXPORT_SYMBOL(amd_e400_c1e_detected);
-
-static cpumask_var_t amd_e400_c1e_mask;
-
-void amd_e400_remove_cpu(int cpu)
-{
- if (amd_e400_c1e_mask != NULL)
- cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
-}
-
/*
- * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
- * pending message MSR. If we detect C1E, then we handle it the same
- * way as C3 power states (local apic timer and TSC stop)
+ * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
+ * states (local apic timer and TSC stop).
*/
static void amd_e400_idle(void)
{
- if (!amd_e400_c1e_detected) {
- u32 lo, hi;
-
- rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
-
- if (lo & K8_INTP_C1E_ACTIVE_MASK) {
- amd_e400_c1e_detected = true;
- if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- mark_tsc_unstable("TSC halt in AMD C1E");
- pr_info("System has AMD C1E enabled\n");
- }
+ /*
+ * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
+ * gets set after static_cpu_has() places have been converted via
+ * alternatives.
+ */
+ if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
+ default_idle();
+ return;
}
- if (amd_e400_c1e_detected) {
- int cpu = smp_processor_id();
+ tick_broadcast_enter();
- if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
- cpumask_set_cpu(cpu, amd_e400_c1e_mask);
- /* Force broadcast so ACPI can not interfere. */
- tick_broadcast_force();
- pr_info("Switch to broadcast mode on CPU%d\n", cpu);
- }
- tick_broadcast_enter();
-
- default_idle();
+ default_idle();
- /*
- * The switch back from broadcast mode needs to be
- * called with interrupts disabled.
- */
- local_irq_disable();
- tick_broadcast_exit();
- local_irq_enable();
- } else
- default_idle();
+ /*
+ * The switch back from broadcast mode needs to be called with
+ * interrupts disabled.
+ */
+ local_irq_disable();
+ tick_broadcast_exit();
+ local_irq_enable();
}
/*
@@ -448,8 +374,7 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
if (x86_idle || boot_option_idle_override == IDLE_POLL)
return;
- if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
- /* E400: APIC timer interrupt does not wake up CPU from C1e */
+ if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
pr_info("using AMD E400 aware idle routine\n");
x86_idle = amd_e400_idle;
} else if (prefer_mwait_c1_over_halt(c)) {
@@ -459,11 +384,37 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
x86_idle = default_idle;
}
-void __init init_amd_e400_c1e_mask(void)
+void amd_e400_c1e_apic_setup(void)
+{
+ if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
+ pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
+ local_irq_disable();
+ tick_broadcast_force();
+ local_irq_enable();
+ }
+}
+
+void __init arch_post_acpi_subsys_init(void)
{
- /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
- if (x86_idle == amd_e400_idle)
- zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
+ u32 lo, hi;
+
+ if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
+ return;
+
+ /*
+ * AMD E400 detection needs to happen after ACPI has been enabled. If
+ * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
+ * MSR_K8_INT_PENDING_MSG.
+ */
+ rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
+ if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
+ return;
+
+ boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
+
+ if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halt in AMD C1E");
+ pr_info("System has AMD C1E enabled\n");
}
static int __init idle_setup(char *str)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd7be8efdc4c..d0d744108594 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -49,7 +49,6 @@
#include <asm/tlbflush.h>
#include <asm/cpu.h>
-#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
@@ -72,10 +71,9 @@ void __show_regs(struct pt_regs *regs, int all)
savesegment(gs, gs);
}
- printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
- (u16)regs->cs, regs->ip, regs->flags,
- smp_processor_id());
- print_symbol("EIP is at %s\n", regs->ip);
+ printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
+ printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
+ smp_processor_id());
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
regs->ax, regs->bx, regs->cx, regs->dx);
@@ -232,11 +230,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
- fpu_switch_t fpu_switch;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
- fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+ switch_fpu_prepare(prev_fpu, cpu);
/*
* Save away %gs. No need to save %fs, as it was saved on the
@@ -295,7 +292,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (prev->gs | next->gs)
lazy_load_gs(next->gs);
- switch_fpu_finish(next_fpu, fpu_switch);
+ switch_fpu_finish(next_fpu, cpu);
this_cpu_write(current_task, next_p);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b3760b3c1ca0..a76b65e3e615 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -44,7 +44,6 @@
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/ia32.h>
-#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
@@ -61,10 +60,15 @@ void __show_regs(struct pt_regs *regs, int all)
unsigned int fsindex, gsindex;
unsigned int ds, cs, es;
- printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
- printk_address(regs->ip);
- printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
- regs->sp, regs->flags);
+ printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs & 0xffff,
+ (void *)regs->ip);
+ printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
+ regs->sp, regs->flags);
+ if (regs->orig_ax != -1)
+ pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
+ else
+ pr_cont("\n");
+
printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
regs->ax, regs->bx, regs->cx);
printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
@@ -265,9 +269,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
unsigned prev_fsindex, prev_gsindex;
- fpu_switch_t fpu_switch;
- fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+ switch_fpu_prepare(prev_fpu, cpu);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
@@ -417,7 +420,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
prev->gsbase = 0;
prev->gsindex = prev_gsindex;
- switch_fpu_finish(next_fpu, fpu_switch);
+ switch_fpu_finish(next_fpu, cpu);
/*
* Switch the PDA and FPU contexts.
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 79c6311cd912..5b21cb7d84d6 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -64,6 +64,15 @@ void mach_get_cmos_time(struct timespec *now)
unsigned int status, year, mon, day, hour, min, sec, century = 0;
unsigned long flags;
+ /*
+ * If pm_trace abused the RTC as storage, set the timespec to 0,
+ * which tells the caller that this RTC value is unusable.
+ */
+ if (!pm_trace_rtc_valid()) {
+ now->tv_sec = now->tv_nsec = 0;
+ return;
+ }
+
spin_lock_irqsave(&rtc_lock, flags);
/*
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9c337b0e8ba7..4cfba947d774 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -985,6 +985,30 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /*
+ * Memory used by the kernel cannot be hot-removed because Linux
+ * cannot migrate the kernel pages. When memory hotplug is
+ * enabled, we should prevent memblock from allocating memory
+ * for the kernel.
+ *
+ * ACPI SRAT records all hotpluggable memory ranges. But before
+ * SRAT is parsed, we don't know about it.
+ *
+ * The kernel image is loaded into memory at very early time. We
+ * cannot prevent this anyway. So on NUMA system, we set any
+ * node the kernel resides in as un-hotpluggable.
+ *
+ * Since on modern servers, one node could have double-digit
+ * gigabytes memory, we can assume the memory around the kernel
+ * image is also un-hotpluggable. So before SRAT is parsed, just
+ * allocate memory near the kernel image to try the best to keep
+ * the kernel away from hotpluggable memory.
+ */
+ if (movable_node_is_enabled())
+ memblock_set_bottom_up(true);
+#endif
+
x86_report_nx();
/* after early param, so could get panic from serial */
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 2bbd27f89802..9820d6d977c6 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -1,7 +1,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/percpu.h>
@@ -12,6 +12,7 @@
#include <linux/pfn.h>
#include <asm/sections.h>
#include <asm/processor.h>
+#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index c00cb64bc0a1..68f8cc222f25 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -261,10 +261,8 @@ static inline void __smp_reschedule_interrupt(void)
__visible void smp_reschedule_interrupt(struct pt_regs *regs)
{
- irq_enter();
ack_APIC_irq();
__smp_reschedule_interrupt();
- irq_exit();
/*
* KVM uses this interrupt to force a cpu out of guest mode
*/
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 42f5eb7b4f6c..0c37d4fd01b2 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -58,7 +58,6 @@
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
-#include <asm/idle.h>
#include <asm/realmode.h>
#include <asm/cpu.h>
#include <asm/numa.h>
@@ -109,6 +108,17 @@ static bool logical_packages_frozen __read_mostly;
/* Maximum number of SMT threads on any online core */
int __max_smt_threads __read_mostly;
+/* Flag to indicate if a complete sched domain rebuild is required */
+bool x86_topology_update;
+
+int arch_update_cpu_topology(void)
+{
+ int retval = x86_topology_update;
+
+ x86_topology_update = false;
+ return retval;
+}
+
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
{
unsigned long flags;
@@ -471,22 +481,42 @@ static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
+#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
+static inline int x86_sched_itmt_flags(void)
+{
+ return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
+}
+
+#ifdef CONFIG_SCHED_MC
+static int x86_core_flags(void)
+{
+ return cpu_core_flags() | x86_sched_itmt_flags();
+}
+#endif
+#ifdef CONFIG_SCHED_SMT
+static int x86_smt_flags(void)
+{
+ return cpu_smt_flags() | x86_sched_itmt_flags();
+}
+#endif
+#endif
+
static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
#ifdef CONFIG_SCHED_SMT
- { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+ { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
#endif
#ifdef CONFIG_SCHED_MC
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+ { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
#endif
{ NULL, },
};
static struct sched_domain_topology_level x86_topology[] = {
#ifdef CONFIG_SCHED_SMT
- { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+ { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
#endif
#ifdef CONFIG_SCHED_MC
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+ { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
#endif
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
@@ -821,14 +851,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
return (send_status | accept_status);
}
-void smp_announce(void)
-{
- int num_nodes = num_online_nodes();
-
- printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
- num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
-}
-
/* reduce the number of lines printed when booting a large cpu count system */
static void announce_cpu(int cpu, int apicid)
{
@@ -964,9 +986,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
int cpu0_nmi_registered = 0;
unsigned long timeout;
- idle->thread.sp = (unsigned long) (((struct pt_regs *)
- (THREAD_SIZE + task_stack_page(idle))) - 1);
-
+ idle->thread.sp = (unsigned long)task_pt_regs(idle);
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
initial_stack = idle->thread.sp;
@@ -1111,7 +1131,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
return err;
/* the FPU context is blank, nobody can own it */
- __cpu_disable_lazy_restore(cpu);
+ per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
common_cpu_up(cpu, tidle);
@@ -1331,7 +1351,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
default_setup_apic_routing();
cpu0_logical_apicid = apic_bsp_setup(false);
- pr_info("CPU%d: ", 0);
+ pr_info("CPU0: ");
print_cpu_info(&cpu_data(0));
if (is_uv_system())
@@ -1575,7 +1595,6 @@ void play_dead_common(void)
{
idle_task_exit();
reset_lazy_tlbstate();
- amd_e400_remove_cpu(raw_smp_processor_id());
/* Ack it */
(void)cpu_report_death();
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 764a29f84de7..85195d447a92 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si,
{
struct platform_device *pd;
struct resource res;
- unsigned long len;
+ u64 base, size;
+ u32 length;
- /* don't use lfb_size as it may contain the whole VMEM instead of only
- * the part that is occupied by the framebuffer */
- len = mode->height * mode->stride;
- len = PAGE_ALIGN(len);
- if (len > (u64)si->lfb_size << 16) {
+ /*
+ * If the 64BIT_BASE capability is set, ext_lfb_base will contain the
+ * upper half of the base address. Assemble the address, then make sure
+ * it is valid and we can actually access it.
+ */
+ base = si->lfb_base;
+ if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)si->ext_lfb_base << 32;
+ if (!base || (u64)(resource_size_t)base != base) {
+ printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Don't use lfb_size as IORESOURCE size, since it may contain the
+ * entire VMEM, and thus require huge mappings. Use just the part we
+ * need, that is, the part where the framebuffer is located. But verify
+ * that it does not exceed the advertised VMEM.
+ * Note that in case of VBE, the lfb_size is shifted by 16 bits for
+ * historical reasons.
+ */
+ size = si->lfb_size;
+ if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
+ size <<= 16;
+ length = mode->height * mode->stride;
+ length = PAGE_ALIGN(length);
+ if (length > size) {
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
return -EINVAL;
}
@@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si,
memset(&res, 0, sizeof(res));
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
res.name = simplefb_resname;
- res.start = si->lfb_base;
- res.end = si->lfb_base + len - 1;
+ res.start = base;
+ res.end = res.start + length - 1;
if (res.end <= res.start)
return -EINVAL;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index bd4e3d4d3625..bf0c6d049080 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -853,6 +853,8 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
dotraplinkage void
do_device_not_available(struct pt_regs *regs, long error_code)
{
+ unsigned long cr0;
+
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
#ifdef CONFIG_MATH_EMULATION
@@ -866,10 +868,20 @@ do_device_not_available(struct pt_regs *regs, long error_code)
return;
}
#endif
- fpu__restore(&current->thread.fpu); /* interrupts still off */
-#ifdef CONFIG_X86_32
- cond_local_irq_enable(regs);
-#endif
+
+ /* This should not happen. */
+ cr0 = read_cr0();
+ if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
+ /* Try to fix it up and carry on. */
+ write_cr0(cr0 & ~X86_CR0_TS);
+ } else {
+ /*
+ * Something terrible happened, and we're better off trying
+ * to kill the task than getting stuck in a never-ending
+ * loop of #NM faults.
+ */
+ die("unexpected #NM exception", regs, error_code);
+ }
}
NOKPROBE_SYMBOL(do_device_not_available);
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index a2456d4d286a..ea7b7f9a3b9e 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -14,13 +14,55 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
if (unwind_done(state))
return 0;
+ if (state->regs && user_mode(state->regs))
+ return 0;
+
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
addr_p);
- return __kernel_text_address(addr) ? addr : 0;
+ if (!__kernel_text_address(addr)) {
+ printk_deferred_once(KERN_WARNING
+ "WARNING: unrecognized kernel stack return address %p at %p in %s:%d\n",
+ (void *)addr, addr_p, state->task->comm,
+ state->task->pid);
+ return 0;
+ }
+
+ return addr;
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
+static size_t regs_size(struct pt_regs *regs)
+{
+ /* x86_32 regs from kernel mode are two words shorter: */
+ if (IS_ENABLED(CONFIG_X86_32) && !user_mode(regs))
+ return sizeof(*regs) - 2*sizeof(long);
+
+ return sizeof(*regs);
+}
+
+static bool is_last_task_frame(struct unwind_state *state)
+{
+ unsigned long bp = (unsigned long)state->bp;
+ unsigned long regs = (unsigned long)task_pt_regs(state->task);
+
+ return bp == regs - FRAME_HEADER_SIZE;
+}
+
+/*
+ * This determines if the frame pointer actually contains an encoded pointer to
+ * pt_regs on the stack. See ENCODE_FRAME_POINTER.
+ */
+static struct pt_regs *decode_frame_pointer(unsigned long *bp)
+{
+ unsigned long regs = (unsigned long)bp;
+
+ if (!(regs & 0x1))
+ return NULL;
+
+ return (struct pt_regs *)(regs & ~0x1);
+}
+
static bool update_stack_state(struct unwind_state *state, void *addr,
size_t len)
{
@@ -43,26 +85,117 @@ static bool update_stack_state(struct unwind_state *state, void *addr,
bool unwind_next_frame(struct unwind_state *state)
{
- unsigned long *next_bp;
+ struct pt_regs *regs;
+ unsigned long *next_bp, *next_frame;
+ size_t next_len;
+ enum stack_type prev_type = state->stack_info.type;
if (unwind_done(state))
return false;
- next_bp = (unsigned long *)*state->bp;
+ /* have we reached the end? */
+ if (state->regs && user_mode(state->regs))
+ goto the_end;
+
+ if (is_last_task_frame(state)) {
+ regs = task_pt_regs(state->task);
+
+ /*
+ * kthreads (other than the boot CPU's idle thread) have some
+ * partial regs at the end of their stack which were placed
+ * there by copy_thread_tls(). But the regs don't have any
+ * useful information, so we can skip them.
+ *
+ * This user_mode() check is slightly broader than a PF_KTHREAD
+ * check because it also catches the awkward situation where a
+ * newly forked kthread transitions into a user task by calling
+ * do_execve(), which eventually clears PF_KTHREAD.
+ */
+ if (!user_mode(regs))
+ goto the_end;
+
+ /*
+ * We're almost at the end, but not quite: there's still the
+ * syscall regs frame. Entry code doesn't encode the regs
+ * pointer for syscalls, so we have to set it manually.
+ */
+ state->regs = regs;
+ state->bp = NULL;
+ return true;
+ }
+
+ /* get the next frame pointer */
+ if (state->regs)
+ next_bp = (unsigned long *)state->regs->bp;
+ else
+ next_bp = (unsigned long *)*state->bp;
+
+ /* is the next frame pointer an encoded pointer to pt_regs? */
+ regs = decode_frame_pointer(next_bp);
+ if (regs) {
+ next_frame = (unsigned long *)regs;
+ next_len = sizeof(*regs);
+ } else {
+ next_frame = next_bp;
+ next_len = FRAME_HEADER_SIZE;
+ }
/* make sure the next frame's data is accessible */
- if (!update_stack_state(state, next_bp, FRAME_HEADER_SIZE))
- return false;
+ if (!update_stack_state(state, next_frame, next_len)) {
+ /*
+ * Don't warn on bad regs->bp. An interrupt in entry code
+ * might cause a false positive warning.
+ */
+ if (state->regs)
+ goto the_end;
+
+ goto bad_address;
+ }
+
+ /* Make sure it only unwinds up and doesn't overlap the last frame: */
+ if (state->stack_info.type == prev_type) {
+ if (state->regs && (void *)next_frame < (void *)state->regs + regs_size(state->regs))
+ goto bad_address;
+
+ if (state->bp && (void *)next_frame < (void *)state->bp + FRAME_HEADER_SIZE)
+ goto bad_address;
+ }
/* move to the next frame */
- state->bp = next_bp;
+ if (regs) {
+ state->regs = regs;
+ state->bp = NULL;
+ } else {
+ state->bp = next_bp;
+ state->regs = NULL;
+ }
+
return true;
+
+bad_address:
+ if (state->regs) {
+ printk_deferred_once(KERN_WARNING
+ "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
+ state->regs, state->task->comm,
+ state->task->pid, next_frame);
+ } else {
+ printk_deferred_once(KERN_WARNING
+ "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
+ state->bp, state->task->comm,
+ state->task->pid, next_frame);
+ }
+the_end:
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
}
EXPORT_SYMBOL_GPL(unwind_next_frame);
void __unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs, unsigned long *first_frame)
{
+ unsigned long *bp, *frame;
+ size_t len;
+
memset(state, 0, sizeof(*state));
state->task = task;
@@ -73,12 +206,22 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
}
/* set up the starting stack frame */
- state->bp = get_frame_pointer(task, regs);
+ bp = get_frame_pointer(task, regs);
+ regs = decode_frame_pointer(bp);
+ if (regs) {
+ state->regs = regs;
+ frame = (unsigned long *)regs;
+ len = sizeof(*regs);
+ } else {
+ state->bp = bp;
+ frame = bp;
+ len = FRAME_HEADER_SIZE;
+ }
/* initialize stack info and make sure the frame data is accessible */
- get_stack_info(state->bp, state->task, &state->stack_info,
+ get_stack_info(frame, state->task, &state->stack_info,
&state->stack_mask);
- update_stack_state(state, state->bp, FRAME_HEADER_SIZE);
+ update_stack_state(state, frame, len);
/*
* The caller can provide the address of the first frame directly
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 2d721e533cf4..22881ddcbb9f 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -7,11 +7,15 @@
unsigned long unwind_get_return_address(struct unwind_state *state)
{
+ unsigned long addr;
+
if (unwind_done(state))
return 0;
+ addr = READ_ONCE_NOCHECK(*state->sp);
+
return ftrace_graph_ret_addr(state->task, &state->graph_idx,
- *state->sp, state->sp);
+ addr, state->sp);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
@@ -23,9 +27,12 @@ bool unwind_next_frame(struct unwind_state *state)
return false;
do {
- for (state->sp++; state->sp < info->end; state->sp++)
- if (__kernel_text_address(*state->sp))
+ for (state->sp++; state->sp < info->end; state->sp++) {
+ unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
+
+ if (__kernel_text_address(addr))
return true;
+ }
state->sp = info->next_sp;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index dbf67f64d5ec..e79f15f108a8 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -91,10 +91,10 @@ SECTIONS
/* Text and read-only data */
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
+ _stext = .;
/* bootstrapping code */
HEAD_TEXT
. = ALIGN(8);
- _stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index afa7bbb596cd..b2d3cf1ef54a 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,7 +16,7 @@
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
+#include <asm/processor.h>
#include <asm/user.h>
#include <asm/fpu/xstate.h>
#include "cpuid.h"
@@ -65,6 +65,11 @@ u64 kvm_supported_xcr0(void)
#define F(x) bit(X86_FEATURE_##x)
+/* These are scattered features in cpufeatures.h. */
+#define KVM_CPUID_BIT_AVX512_4VNNIW 2
+#define KVM_CPUID_BIT_AVX512_4FMAPS 3
+#define KF(x) bit(KVM_CPUID_BIT_##x)
+
int kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -81,6 +86,10 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
best->ecx |= F(OSXSAVE);
}
+ best->edx &= ~F(APIC);
+ if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
+ best->edx |= F(APIC);
+
if (apic) {
if (best->ecx & F(TSC_DEADLINE_TIMER))
apic->lapic_timer.timer_mode_mask = 3 << 17;
@@ -114,8 +123,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- if (use_eager_fpu())
- kvm_x86_ops->fpu_activate(vcpu);
+ kvm_x86_ops->fpu_activate(vcpu);
/*
* The existing code assumes virtual address is 48-bit in the canonical
@@ -376,6 +384,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.ecx*/
const u32 kvm_cpuid_7_0_ecx_x86_features = F(PKU) | 0 /*OSPKE*/;
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+ KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS);
+
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -458,12 +470,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled)
entry->ecx &= ~F(PKU);
+ entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
} else {
entry->ebx = 0;
entry->ecx = 0;
+ entry->edx = 0;
}
entry->eax = 0;
- entry->edx = 0;
break;
}
case 9:
@@ -863,17 +877,17 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
}
EXPORT_SYMBOL_GPL(kvm_cpuid);
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
- u32 function, eax, ebx, ecx, edx;
+ u32 eax, ebx, ecx, edx;
- function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
- kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4e95d3eb2955..56628a44668b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -158,9 +158,11 @@
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
+#define AlignMask ((u64)7 << 41)
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
-#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
-#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
+#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
+#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
+#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
@@ -446,6 +448,26 @@ FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;
+/*
+ * XXX: inoutclob user must know where the argument is being expanded.
+ * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
+ */
+#define asm_safe(insn, inoutclob...) \
+({ \
+ int _fault = 0; \
+ \
+ asm volatile("1:" insn "\n" \
+ "2:\n" \
+ ".pushsection .fixup, \"ax\"\n" \
+ "3: movl $1, %[_fault]\n" \
+ " jmp 2b\n" \
+ ".popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [_fault] "+qm"(_fault) inoutclob ); \
+ \
+ _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
+})
+
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
@@ -632,21 +654,26 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
* depending on whether they're AVX encoded or not.
*
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
- * subject to the same check.
+ * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
+ * 512 bytes of data must be aligned to a 16 byte boundary.
*/
-static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
+static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
{
- if (likely(size < 16))
- return false;
+ u64 alignment = ctxt->d & AlignMask;
- if (ctxt->d & Aligned)
- return true;
- else if (ctxt->d & Unaligned)
- return false;
- else if (ctxt->d & Avx)
- return false;
- else
- return true;
+ if (likely(size < 16))
+ return 1;
+
+ switch (alignment) {
+ case Unaligned:
+ case Avx:
+ return 1;
+ case Aligned16:
+ return 16;
+ case Aligned:
+ default:
+ return size;
+ }
}
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
@@ -704,7 +731,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
}
break;
}
- if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
+ if (la & (insn_alignment(ctxt, size) - 1))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
bad:
@@ -2105,16 +2132,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
- unsigned short sel, old_sel;
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
+ unsigned short sel;
+ struct desc_struct new_desc;
u8 cpl = ctxt->ops->cpl(ctxt);
- /* Assignment of RIP may only fail in 64-bit mode */
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
- VCPU_SREG_CS);
-
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
@@ -2124,12 +2145,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- /* assigning eip failed; restore the old cs */
- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
- return rc;
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}
@@ -2189,14 +2208,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
- u16 old_cs;
int cpl = ctxt->ops->cpl(ctxt);
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
-
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
- VCPU_SREG_CS);
+ struct desc_struct new_desc;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
@@ -2213,10 +2226,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}
@@ -3856,6 +3869,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int check_fxsr(struct x86_emulate_ctxt *ctxt)
+{
+ u32 eax = 1, ebx, ecx = 0, edx;
+
+ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
+ if (!(edx & FFL(FXSR)))
+ return emulate_ud(ctxt);
+
+ if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
+ return emulate_nm(ctxt);
+
+ /*
+ * Don't emulate a case that should never be hit, instead of working
+ * around a lack of fxsave64/fxrstor64 on old compilers.
+ */
+ if (ctxt->mode >= X86EMUL_MODE_PROT64)
+ return X86EMUL_UNHANDLEABLE;
+
+ return X86EMUL_CONTINUE;
+}
+
+/*
+ * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
+ * 1) 16 bit mode
+ * 2) 32 bit mode
+ * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
+ * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
+ * save and restore
+ * 3) 64-bit mode with REX.W prefix
+ * - like (2), but XMM 8-15 are being saved and restored
+ * 4) 64-bit mode without REX.W prefix
+ * - like (3), but FIP and FDP are 64 bit
+ *
+ * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
+ * desired result. (4) is not emulated.
+ *
+ * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
+ * and FPU DS) should match.
+ */
+static int em_fxsave(struct x86_emulate_ctxt *ctxt)
+{
+ struct fxregs_state fx_state;
+ size_t size;
+ int rc;
+
+ rc = check_fxsr(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ ctxt->ops->get_fpu(ctxt);
+
+ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+
+ ctxt->ops->put_fpu(ctxt);
+
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
+ size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
+ else
+ size = offsetof(struct fxregs_state, xmm_space[0]);
+
+ return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+}
+
+static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
+ struct fxregs_state *new)
+{
+ int rc = X86EMUL_CONTINUE;
+ struct fxregs_state old;
+
+ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ /*
+ * 64 bit host will restore XMM 8-15, which is not correct on non-64
+ * bit guests. Load the current values in order to preserve 64 bit
+ * XMMs after fxrstor.
+ */
+#ifdef CONFIG_X86_64
+ /* XXX: accessing XMM 8-15 very awkwardly */
+ memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
+#endif
+
+ /*
+ * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
+ * does save and restore MXCSR.
+ */
+ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
+ memcpy(new->xmm_space, old.xmm_space, 8 * 16);
+
+ return rc;
+}
+
+static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
+{
+ struct fxregs_state fx_state;
+ int rc;
+
+ rc = check_fxsr(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ if (fx_state.mxcsr >> 16)
+ return emulate_gp(ctxt, 0);
+
+ ctxt->ops->get_fpu(ctxt);
+
+ if (ctxt->mode < X86EMUL_MODE_PROT64)
+ rc = fxrstor_fixup(ctxt, &fx_state);
+
+ if (rc == X86EMUL_CONTINUE)
+ rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
+
+ ctxt->ops->put_fpu(ctxt);
+
+ return rc;
+}
+
static bool valid_cr(int nr)
{
switch (nr) {
@@ -4208,7 +4346,9 @@ static const struct gprefix pfx_0f_ae_7 = {
};
static const struct group_dual group15 = { {
- N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
+ I(ModRM | Aligned16, em_fxsave),
+ I(ModRM | Aligned16, em_fxrstor),
+ N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
N, N, N, N, N, N, N, N,
} };
@@ -5045,7 +5185,7 @@ done_prefixes:
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
- if (ctxt->rip_relative)
+ if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
@@ -5080,21 +5220,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
- bool fault = false;
+ int rc;
ctxt->ops->get_fpu(ctxt);
- asm volatile("1: fwait \n\t"
- "2: \n\t"
- ".pushsection .fixup,\"ax\" \n\t"
- "3: \n\t"
- "movb $1, %[fault] \n\t"
- "jmp 2b \n\t"
- ".popsection \n\t"
- _ASM_EXTABLE(1b, 3b)
- : [fault]"+qm"(fault));
+ rc = asm_safe("fwait");
ctxt->ops->put_fpu(ctxt);
- if (unlikely(fault))
+ if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 42b1c83741c8..99cde5220e07 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -291,7 +291,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
return ret;
}
-int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
+static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
{
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
struct kvm_lapic_irq irq;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 16a7134eedac..a78b445ce411 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -212,7 +212,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
*/
smp_mb();
if (atomic_dec_if_positive(&ps->pending) > 0)
- kthread_queue_work(&pit->worker, &pit->expired);
+ kthread_queue_work(pit->worker, &pit->expired);
}
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -272,7 +272,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
if (atomic_read(&ps->reinject))
atomic_inc(&ps->pending);
- kthread_queue_work(&pt->worker, &pt->expired);
+ kthread_queue_work(pt->worker, &pt->expired);
if (ps->is_periodic) {
hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -667,10 +667,8 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
pid_nr = pid_vnr(pid);
put_pid(pid);
- kthread_init_worker(&pit->worker);
- pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
- "kvm-pit/%d", pid_nr);
- if (IS_ERR(pit->worker_task))
+ pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr);
+ if (IS_ERR(pit->worker))
goto fail_kthread;
kthread_init_work(&pit->expired, pit_do_work);
@@ -713,7 +711,7 @@ fail_register_speaker:
fail_register_pit:
mutex_unlock(&kvm->slots_lock);
kvm_pit_set_reinject(pit, false);
- kthread_stop(pit->worker_task);
+ kthread_destroy_worker(pit->worker);
fail_kthread:
kvm_free_irq_source_id(kvm, pit->irq_source_id);
fail_request:
@@ -730,8 +728,7 @@ void kvm_free_pit(struct kvm *kvm)
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
kvm_pit_set_reinject(pit, false);
hrtimer_cancel(&pit->pit_state.timer);
- kthread_flush_work(&pit->expired);
- kthread_stop(pit->worker_task);
+ kthread_destroy_worker(pit->worker);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index 2f5af0798326..600bee9dcbbd 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -44,8 +44,7 @@ struct kvm_pit {
struct kvm_kpit_state pit_state;
int irq_source_id;
struct kvm_irq_mask_notifier mask_notifier;
- struct kthread_worker worker;
- struct task_struct *worker_task;
+ struct kthread_worker *worker;
struct kthread_work expired;
};
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 1a22de70f7f7..6e219e5c07d2 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
+ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 7d2692a49657..1cc6e54436db 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -42,13 +42,13 @@ struct kvm_vcpu;
struct dest_map {
/* vcpu bitmap where IRQ has been sent */
- DECLARE_BITMAP(map, KVM_MAX_VCPUS);
+ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
/*
* Vector sent to a given vcpu, only valid when
* the vcpu's bit in map is set
*/
- u8 vectors[KVM_MAX_VCPUS];
+ u8 vectors[KVM_MAX_VCPU_ID];
};
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 25810b144b58..6c0191615f23 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
bool line_status)
{
struct kvm_pic *pic = pic_irqchip(kvm);
+
+ /*
+ * XXX: rejecting pic routes when pic isn't in use would be better,
+ * but the default routing table is installed while kvm->arch.vpic is
+ * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE.
+ */
+ if (!pic)
+ return -1;
+
return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
}
@@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
bool line_status)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+ if (!ioapic)
+ return -1;
+
return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
line_status);
}
@@ -156,6 +169,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
}
+static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ if (!level)
+ return -1;
+
+ return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
+}
+
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
@@ -163,18 +186,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm_lapic_irq irq;
int r;
- if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
- return -EWOULDBLOCK;
+ switch (e->type) {
+ case KVM_IRQ_ROUTING_HV_SINT:
+ return kvm_hv_set_sint(e, kvm, irq_source_id, level,
+ line_status);
- if (kvm_msi_route_invalid(kvm, e))
- return -EINVAL;
+ case KVM_IRQ_ROUTING_MSI:
+ if (kvm_msi_route_invalid(kvm, e))
+ return -EINVAL;
- kvm_set_msi_irq(kvm, e, &irq);
+ kvm_set_msi_irq(kvm, e, &irq);
- if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
- return r;
- else
- return -EWOULDBLOCK;
+ if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
+ return r;
+ break;
+
+ default:
+ break;
+ }
+
+ return -EWOULDBLOCK;
}
int kvm_request_irq_source_id(struct kvm *kvm)
@@ -254,16 +285,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
srcu_read_unlock(&kvm->irq_srcu, idx);
}
-static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- if (!level)
- return -1;
-
- return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
-}
-
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
@@ -423,18 +444,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
srcu_read_unlock(&kvm->irq_srcu, idx);
}
-int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm,
- int irq_source_id, int level, bool line_status)
-{
- switch (irq->type) {
- case KVM_IRQ_ROUTING_HV_SINT:
- return kvm_hv_set_sint(irq, kvm, irq_source_id, level,
- line_status);
- default:
- return -EWOULDBLOCK;
- }
-}
-
void kvm_arch_irq_routing_update(struct kvm *kvm)
{
kvm_hv_irq_routing_update(kvm);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 23b99f305382..34a66b2d47e6 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
*mask = dest_id & 0xff;
return true;
case KVM_APIC_MODE_XAPIC_CLUSTER:
- *cluster = map->xapic_cluster_map[dest_id >> 4];
+ *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
*mask = dest_id & 0xf;
return true;
default:
@@ -342,9 +342,11 @@ void __kvm_apic_update_irr(u32 *pir, void *regs)
u32 i, pir_val;
for (i = 0; i <= 7; i++) {
- pir_val = xchg(&pir[i], 0);
- if (pir_val)
+ pir_val = READ_ONCE(pir[i]);
+ if (pir_val) {
+ pir_val = xchg(&pir[i], 0);
*((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
+ }
}
}
EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
@@ -1090,7 +1092,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
static u32 apic_get_tmcct(struct kvm_lapic *apic)
{
- ktime_t remaining;
+ ktime_t remaining, now;
s64 ns;
u32 tmcct;
@@ -1101,7 +1103,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
apic->lapic_timer.period == 0)
return 0;
- remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
+ now = ktime_get();
+ remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
if (ktime_to_ns(remaining) < 0)
remaining = ktime_set(0, 0);
@@ -1332,7 +1335,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
local_irq_save(flags);
- now = apic->lapic_timer.timer.base->get_time();
+ now = ktime_get();
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
@@ -1347,6 +1350,79 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
local_irq_restore(flags);
}
+static void start_sw_period(struct kvm_lapic *apic)
+{
+ if (!apic->lapic_timer.period)
+ return;
+
+ if (apic_lvtt_oneshot(apic) &&
+ ktime_after(ktime_get(),
+ apic->lapic_timer.target_expiration)) {
+ apic_timer_expired(apic);
+ return;
+ }
+
+ hrtimer_start(&apic->lapic_timer.timer,
+ apic->lapic_timer.target_expiration,
+ HRTIMER_MODE_ABS_PINNED);
+}
+
+static bool set_target_expiration(struct kvm_lapic *apic)
+{
+ ktime_t now;
+ u64 tscl = rdtsc();
+
+ now = ktime_get();
+ apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
+ * APIC_BUS_CYCLE_NS * apic->divide_count;
+
+ if (!apic->lapic_timer.period)
+ return false;
+
+ /*
+ * Do not allow the guest to program periodic timers with small
+ * interval, since the hrtimers are not throttled by the host
+ * scheduler.
+ */
+ if (apic_lvtt_period(apic)) {
+ s64 min_period = min_timer_period_us * 1000LL;
+
+ if (apic->lapic_timer.period < min_period) {
+ pr_info_ratelimited(
+ "kvm: vcpu %i: requested %lld ns "
+ "lapic timer period limited to %lld ns\n",
+ apic->vcpu->vcpu_id,
+ apic->lapic_timer.period, min_period);
+ apic->lapic_timer.period = min_period;
+ }
+ }
+
+ apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
+ PRIx64 ", "
+ "timer initial count 0x%x, period %lldns, "
+ "expire @ 0x%016" PRIx64 ".\n", __func__,
+ APIC_BUS_CYCLE_NS, ktime_to_ns(now),
+ kvm_lapic_get_reg(apic, APIC_TMICT),
+ apic->lapic_timer.period,
+ ktime_to_ns(ktime_add_ns(now,
+ apic->lapic_timer.period)));
+
+ apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
+ nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
+ apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
+
+ return true;
+}
+
+static void advance_periodic_target_expiration(struct kvm_lapic *apic)
+{
+ apic->lapic_timer.tscdeadline +=
+ nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
+ apic->lapic_timer.target_expiration =
+ ktime_add_ns(apic->lapic_timer.target_expiration,
+ apic->lapic_timer.period);
+}
+
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
{
if (!lapic_in_kernel(vcpu))
@@ -1356,52 +1432,59 @@ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
-static void cancel_hv_tscdeadline(struct kvm_lapic *apic)
+static void cancel_hv_timer(struct kvm_lapic *apic)
{
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
}
-void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
-
- WARN_ON(!apic->lapic_timer.hv_timer_in_use);
- WARN_ON(swait_active(&vcpu->wq));
- cancel_hv_tscdeadline(apic);
- apic_timer_expired(apic);
-}
-EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
-
-static bool start_hv_tscdeadline(struct kvm_lapic *apic)
+static bool start_hv_timer(struct kvm_lapic *apic)
{
u64 tscdeadline = apic->lapic_timer.tscdeadline;
- if (atomic_read(&apic->lapic_timer.pending) ||
+ if ((atomic_read(&apic->lapic_timer.pending) &&
+ !apic_lvtt_period(apic)) ||
kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
if (apic->lapic_timer.hv_timer_in_use)
- cancel_hv_tscdeadline(apic);
+ cancel_hv_timer(apic);
} else {
apic->lapic_timer.hv_timer_in_use = true;
hrtimer_cancel(&apic->lapic_timer.timer);
/* In case the sw timer triggered in the window */
- if (atomic_read(&apic->lapic_timer.pending))
- cancel_hv_tscdeadline(apic);
+ if (atomic_read(&apic->lapic_timer.pending) &&
+ !apic_lvtt_period(apic))
+ cancel_hv_timer(apic);
}
trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
apic->lapic_timer.hv_timer_in_use);
return apic->lapic_timer.hv_timer_in_use;
}
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+ WARN_ON(swait_active(&vcpu->wq));
+ cancel_hv_timer(apic);
+ apic_timer_expired(apic);
+
+ if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
+ advance_periodic_target_expiration(apic);
+ if (!start_hv_timer(apic))
+ start_sw_period(apic);
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
+
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
WARN_ON(apic->lapic_timer.hv_timer_in_use);
- if (apic_lvtt_tscdeadline(apic))
- start_hv_tscdeadline(apic);
+ start_hv_timer(apic);
}
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
@@ -1413,62 +1496,28 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
if (!apic->lapic_timer.hv_timer_in_use)
return;
- cancel_hv_tscdeadline(apic);
+ cancel_hv_timer(apic);
if (atomic_read(&apic->lapic_timer.pending))
return;
- start_sw_tscdeadline(apic);
+ if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
+ start_sw_period(apic);
+ else if (apic_lvtt_tscdeadline(apic))
+ start_sw_tscdeadline(apic);
}
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
static void start_apic_timer(struct kvm_lapic *apic)
{
- ktime_t now;
-
atomic_set(&apic->lapic_timer.pending, 0);
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
- /* lapic timer in oneshot or periodic mode */
- now = apic->lapic_timer.timer.base->get_time();
- apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
- * APIC_BUS_CYCLE_NS * apic->divide_count;
-
- if (!apic->lapic_timer.period)
- return;
- /*
- * Do not allow the guest to program periodic timers with small
- * interval, since the hrtimers are not throttled by the host
- * scheduler.
- */
- if (apic_lvtt_period(apic)) {
- s64 min_period = min_timer_period_us * 1000LL;
-
- if (apic->lapic_timer.period < min_period) {
- pr_info_ratelimited(
- "kvm: vcpu %i: requested %lld ns "
- "lapic timer period limited to %lld ns\n",
- apic->vcpu->vcpu_id,
- apic->lapic_timer.period, min_period);
- apic->lapic_timer.period = min_period;
- }
- }
-
- hrtimer_start(&apic->lapic_timer.timer,
- ktime_add_ns(now, apic->lapic_timer.period),
- HRTIMER_MODE_ABS_PINNED);
-
- apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
- PRIx64 ", "
- "timer initial count 0x%x, period %lldns, "
- "expire @ 0x%016" PRIx64 ".\n", __func__,
- APIC_BUS_CYCLE_NS, ktime_to_ns(now),
- kvm_lapic_get_reg(apic, APIC_TMICT),
- apic->lapic_timer.period,
- ktime_to_ns(ktime_add_ns(now,
- apic->lapic_timer.period)));
+ if (set_target_expiration(apic) &&
+ !(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
+ start_sw_period(apic);
} else if (apic_lvtt_tscdeadline(apic)) {
- if (!(kvm_x86_ops->set_hv_timer && start_hv_tscdeadline(apic)))
+ if (!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
start_sw_tscdeadline(apic);
}
}
@@ -1701,13 +1750,22 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
* LAPIC interface
*----------------------------------------------------------------------
*/
+u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ if (!lapic_in_kernel(vcpu))
+ return 0;
+
+ return apic->lapic_timer.tscdeadline;
+}
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
- apic_lvtt_period(apic))
+ if (!lapic_in_kernel(vcpu) ||
+ !apic_lvtt_tscdeadline(apic))
return 0;
return apic->lapic_timer.tscdeadline;
@@ -1748,14 +1806,17 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
u64 old_value = vcpu->arch.apic_base;
struct kvm_lapic *apic = vcpu->arch.apic;
- if (!apic) {
+ if (!apic)
value |= MSR_IA32_APICBASE_BSP;
- vcpu->arch.apic_base = value;
- return;
- }
vcpu->arch.apic_base = value;
+ if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
+ kvm_update_cpuid(vcpu);
+
+ if (!apic)
+ return;
+
/* update jump label if enable bit changes */
if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE) {
@@ -1909,6 +1970,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
apic_timer_expired(apic);
if (lapic_is_periodic(apic)) {
+ advance_periodic_target_expiration(apic);
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
return HRTIMER_RESTART;
} else
@@ -1993,6 +2055,10 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
kvm_apic_local_deliver(apic, APIC_LVTT);
if (apic_lvtt_tscdeadline(apic))
apic->lapic_timer.tscdeadline = 0;
+ if (apic_lvtt_oneshot(apic)) {
+ apic->lapic_timer.tscdeadline = 0;
+ apic->lapic_timer.target_expiration = ktime_set(0, 0);
+ }
atomic_set(&apic->lapic_timer.pending, 0);
}
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index f60d01c29d51..e0c80233b3e1 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -15,6 +15,7 @@
struct kvm_timer {
struct hrtimer timer;
s64 period; /* unit: ns */
+ ktime_t target_expiration;
u32 timer_mode;
u32 timer_mode_mask;
u64 tscdeadline;
@@ -85,6 +86,7 @@ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d9c7e986b4e4..7012de4a1fed 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1660,17 +1660,9 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
* This has some overhead, but not as much as the cost of swapping
* out actively used pages or breaking up actively used hugepages.
*/
- if (!shadow_accessed_mask) {
- /*
- * We are holding the kvm->mmu_lock, and we are blowing up
- * shadow PTEs. MMU notifier consumers need to be kept at bay.
- * This is correct as long as we don't decouple the mmu_lock
- * protected regions (like invalidate_range_start|end does).
- */
- kvm->mmu_notifier_seq++;
+ if (!shadow_accessed_mask)
return kvm_handle_hva_range(kvm, start, end, 0,
kvm_unmap_rmapp);
- }
return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
}
@@ -4405,7 +4397,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
}
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes)
+ const u8 *new, int bytes,
+ struct kvm_page_track_notifier_node *node)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
@@ -4508,7 +4501,7 @@ static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
}
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
void *insn, int insn_len)
{
int r, emulation_type = EMULTYPE_RETRY;
@@ -4527,12 +4520,28 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
return r;
}
- r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
+ r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
+ false);
if (r < 0)
return r;
if (!r)
return 1;
+ /*
+ * Before emulating the instruction, check if the error code
+ * was due to a RO violation while translating the guest page.
+ * This can occur when using nested virtualization with nested
+ * paging in both guests. If true, we simply unprotect the page
+ * and resume the guest.
+ *
+ * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
+ * in PFERR_NEXT_GUEST_PAGE)
+ */
+ if (error_code == PFERR_NESTED_GUEST_PAGE) {
+ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+ return 1;
+ }
+
if (mmio_info_in_cache(vcpu, cr2, direct))
emulation_type = 0;
emulate:
@@ -4617,11 +4626,19 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
init_kvm_mmu(vcpu);
}
+static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+}
+
void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
node->track_write = kvm_mmu_pte_write;
+ node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
}
@@ -4958,7 +4975,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
* zap all shadow pages.
*/
if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
- printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
+ kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm);
}
}
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index b431539c3714..4a1c13eaa518 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -106,6 +106,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
kvm_flush_remote_tlbs(kvm);
}
+EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
/*
* remove the guest page from the tracking pool which stops the interception
@@ -135,6 +136,7 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
*/
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
+EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
/*
* check if the corresponding access on the specified guest page is tracked.
@@ -181,6 +183,7 @@ kvm_page_track_register_notifier(struct kvm *kvm,
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
spin_unlock(&kvm->mmu_lock);
}
+EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
/*
* stop receiving the event interception. It is the opposed operation of
@@ -199,6 +202,7 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
synchronize_srcu(&head->track_srcu);
}
+EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
/*
* Notify the node that write access is intercepted and write emulation is
@@ -222,6 +226,31 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
idx = srcu_read_lock(&head->track_srcu);
hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
if (n->track_write)
- n->track_write(vcpu, gpa, new, bytes);
+ n->track_write(vcpu, gpa, new, bytes, n);
+ srcu_read_unlock(&head->track_srcu, idx);
+}
+
+/*
+ * Notify the node that memory slot is being removed or moved so that it can
+ * drop write-protection for the pages in the memory slot.
+ *
+ * The node should figure out it has any write-protected pages in this slot
+ * by itself.
+ */
+void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ struct kvm_page_track_notifier_head *head;
+ struct kvm_page_track_notifier_node *n;
+ int idx;
+
+ head = &kvm->arch.track_notifier_head;
+
+ if (hlist_empty(&head->track_notifier_list))
+ return;
+
+ idx = srcu_read_lock(&head->track_srcu);
+ hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
+ if (n->track_flush_slot)
+ n->track_flush_slot(kvm, slot, n);
srcu_read_unlock(&head->track_srcu, idx);
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f8157a36ab09..08a4d3ab3455 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1138,21 +1138,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}
-static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- svm->vmcb->control.tsc_offset += adjustment;
- if (is_guest_mode(vcpu))
- svm->nested.hsave->control.tsc_offset += adjustment;
- else
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- svm->vmcb->control.tsc_offset - adjustment,
- svm->vmcb->control.tsc_offset);
-
- mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
-}
-
static void avic_init_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb;
@@ -2089,7 +2074,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
static int pf_interception(struct vcpu_svm *svm)
{
u64 fault_address = svm->vmcb->control.exit_info_2;
- u32 error_code;
+ u64 error_code;
int r = 1;
switch (svm->apf_reason) {
@@ -2285,7 +2270,7 @@ static int io_interception(struct vcpu_svm *svm)
++svm->vcpu.stat.io_exits;
string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
- if (string || in)
+ if (string)
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = io_info >> 16;
@@ -2293,7 +2278,8 @@ static int io_interception(struct vcpu_svm *svm)
svm->next_rip = svm->vmcb->control.exit_info_2;
skip_emulated_instruction(&svm->vcpu);
- return kvm_fast_pio_out(vcpu, size, port);
+ return in ? kvm_fast_pio_in(vcpu, size, port)
+ : kvm_fast_pio_out(vcpu, size, port);
}
static int nmi_interception(struct vcpu_svm *svm)
@@ -3165,8 +3151,7 @@ static int skinit_interception(struct vcpu_svm *svm)
static int wbinvd_interception(struct vcpu_svm *svm)
{
- kvm_emulate_wbinvd(&svm->vcpu);
- return 1;
+ return kvm_emulate_wbinvd(&svm->vcpu);
}
static int xsetbv_interception(struct vcpu_svm *svm)
@@ -3253,8 +3238,7 @@ static int task_switch_interception(struct vcpu_svm *svm)
static int cpuid_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
- kvm_emulate_cpuid(&svm->vcpu);
- return 1;
+ return kvm_emulate_cpuid(&svm->vcpu);
}
static int iret_interception(struct vcpu_svm *svm)
@@ -3290,9 +3274,7 @@ static int rdpmc_interception(struct vcpu_svm *svm)
return emulate_on_interception(svm);
err = kvm_rdpmc(&svm->vcpu);
- kvm_complete_insn_gp(&svm->vcpu, err);
-
- return 1;
+ return kvm_complete_insn_gp(&svm->vcpu, err);
}
static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
@@ -3389,9 +3371,7 @@ static int cr_interception(struct vcpu_svm *svm)
}
kvm_register_write(&svm->vcpu, reg, val);
}
- kvm_complete_insn_gp(&svm->vcpu, err);
-
- return 1;
+ return kvm_complete_insn_gp(&svm->vcpu, err);
}
static int dr_interception(struct vcpu_svm *svm)
@@ -3449,12 +3429,6 @@ static int cr8_write_interception(struct vcpu_svm *svm)
return 0;
}
-static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
-{
- struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
- return vmcb->control.tsc_offset + host_tsc;
-}
-
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -5422,8 +5396,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.has_wbinvd_exit = svm_has_wbinvd_exit,
.write_tsc_offset = svm_write_tsc_offset,
- .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
- .read_l1_tsc = svm_read_l1_tsc,
.set_tdp_cr3 = set_tdp_cr3,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index cf1b16dbc98a..aae43c6f2472 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -132,6 +132,22 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
+#define VMX_VPID_EXTENT_SUPPORTED_MASK \
+ (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
+ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
+
+/*
+ * Hyper-V requires all of these, so mark them as supported even though
+ * they are just treated the same as all-context.
+ */
+#define VMX_VPID_EXTENT_SUPPORTED_MASK \
+ (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
+ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
+
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
@@ -187,6 +203,7 @@ struct vmcs {
*/
struct loaded_vmcs {
struct vmcs *vmcs;
+ struct vmcs *shadow_vmcs;
int cpu;
int launched;
struct list_head loaded_vmcss_on_cpu_link;
@@ -411,7 +428,6 @@ struct nested_vmx {
* memory during VMXOFF, VMCLEAR, VMPTRLD.
*/
struct vmcs12 *cached_vmcs12;
- struct vmcs *current_shadow_vmcs;
/*
* Indicates if the shadow vmcs must be updated with the
* data hold by vmcs12
@@ -421,7 +437,6 @@ struct nested_vmx {
/* vmcs02_list cache of VMCSs recently used to run L2 guests */
struct list_head vmcs02_pool;
int vmcs02_num;
- u64 vmcs01_tsc_offset;
bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
@@ -447,23 +462,31 @@ struct nested_vmx {
u16 vpid02;
u16 last_vpid;
+ /*
+ * We only store the "true" versions of the VMX capability MSRs. We
+ * generate the "non-true" versions by setting the must-be-1 bits
+ * according to the SDM.
+ */
u32 nested_vmx_procbased_ctls_low;
u32 nested_vmx_procbased_ctls_high;
- u32 nested_vmx_true_procbased_ctls_low;
u32 nested_vmx_secondary_ctls_low;
u32 nested_vmx_secondary_ctls_high;
u32 nested_vmx_pinbased_ctls_low;
u32 nested_vmx_pinbased_ctls_high;
u32 nested_vmx_exit_ctls_low;
u32 nested_vmx_exit_ctls_high;
- u32 nested_vmx_true_exit_ctls_low;
u32 nested_vmx_entry_ctls_low;
u32 nested_vmx_entry_ctls_high;
- u32 nested_vmx_true_entry_ctls_low;
u32 nested_vmx_misc_low;
u32 nested_vmx_misc_high;
u32 nested_vmx_ept_caps;
u32 nested_vmx_vpid_caps;
+ u64 nested_vmx_basic;
+ u64 nested_vmx_cr0_fixed0;
+ u64 nested_vmx_cr0_fixed1;
+ u64 nested_vmx_cr4_fixed0;
+ u64 nested_vmx_cr4_fixed1;
+ u64 nested_vmx_vmcs_enum;
};
#define POSTED_INTR_ON 0
@@ -521,6 +544,12 @@ static inline void pi_set_sn(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
+static inline void pi_clear_on(struct pi_desc *pi_desc)
+{
+ clear_bit(POSTED_INTR_ON,
+ (unsigned long *)&pi_desc->control);
+}
+
static inline int pi_test_on(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_ON,
@@ -921,16 +950,32 @@ static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
-static unsigned long *vmx_io_bitmap_a;
-static unsigned long *vmx_io_bitmap_b;
-static unsigned long *vmx_msr_bitmap_legacy;
-static unsigned long *vmx_msr_bitmap_longmode;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
-static unsigned long *vmx_vmread_bitmap;
-static unsigned long *vmx_vmwrite_bitmap;
+enum {
+ VMX_IO_BITMAP_A,
+ VMX_IO_BITMAP_B,
+ VMX_MSR_BITMAP_LEGACY,
+ VMX_MSR_BITMAP_LONGMODE,
+ VMX_MSR_BITMAP_LEGACY_X2APIC_APICV,
+ VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV,
+ VMX_MSR_BITMAP_LEGACY_X2APIC,
+ VMX_MSR_BITMAP_LONGMODE_X2APIC,
+ VMX_VMREAD_BITMAP,
+ VMX_VMWRITE_BITMAP,
+ VMX_BITMAP_NR
+};
+
+static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
+
+#define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A])
+#define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B])
+#define vmx_msr_bitmap_legacy (vmx_bitmap[VMX_MSR_BITMAP_LEGACY])
+#define vmx_msr_bitmap_longmode (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE])
+#define vmx_msr_bitmap_legacy_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV])
+#define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV])
+#define vmx_msr_bitmap_legacy_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC])
+#define vmx_msr_bitmap_longmode_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC])
+#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
+#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
static bool cpu_has_load_ia32_efer;
static bool cpu_has_load_perf_global_ctrl;
@@ -1419,6 +1464,8 @@ static void vmcs_clear(struct vmcs *vmcs)
static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
{
vmcs_clear(loaded_vmcs->vmcs);
+ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
loaded_vmcs->cpu = -1;
loaded_vmcs->launched = 0;
}
@@ -2144,12 +2191,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
#endif
if (vmx->host_state.msr_host_bndcfgs)
wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
- /*
- * If the FPU is not active (through the host task or
- * the guest vcpu), then restore the cr0.TS bit.
- */
- if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded)
- stts();
load_gdt(this_cpu_ptr(&host_gdt));
}
@@ -2528,14 +2569,14 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv;
else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv;
} else {
if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
}
} else {
if (is_long_mode(vcpu))
@@ -2605,20 +2646,6 @@ static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
}
/*
- * Like guest_read_tsc, but always returns L1's notion of the timestamp
- * counter, even if a nested guest (L2) is currently running.
- */
-static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
-{
- u64 tsc_offset;
-
- tsc_offset = is_guest_mode(vcpu) ?
- to_vmx(vcpu)->nested.vmcs01_tsc_offset :
- vmcs_read64(TSC_OFFSET);
- return host_tsc + tsc_offset;
-}
-
-/*
* writes 'offset' into guest's timestamp counter offset register
*/
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
@@ -2631,7 +2658,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
* to the newly set TSC to get L2's TSC.
*/
struct vmcs12 *vmcs12;
- to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
/* recalculate vmcs02.TSC_OFFSET: */
vmcs12 = get_vmcs12(vcpu);
vmcs_write64(TSC_OFFSET, offset +
@@ -2644,19 +2670,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
}
}
-static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
-{
- u64 offset = vmcs_read64(TSC_OFFSET);
-
- vmcs_write64(TSC_OFFSET, offset + adjustment);
- if (is_guest_mode(vcpu)) {
- /* Even when running L2, the adjustment needs to apply to L1 */
- to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
- } else
- trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
- offset + adjustment);
-}
-
static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
@@ -2739,9 +2752,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
- vmx->nested.nested_vmx_true_exit_ctls_low =
- vmx->nested.nested_vmx_exit_ctls_low &
- ~VM_EXIT_SAVE_DEBUG_CONTROLS;
+ vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
/* entry controls */
rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
@@ -2760,9 +2771,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
- vmx->nested.nested_vmx_true_entry_ctls_low =
- vmx->nested.nested_vmx_entry_ctls_low &
- ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
+ vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
/* cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
@@ -2795,8 +2804,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
CPU_BASED_USE_MSR_BITMAPS;
/* We support free control of CR3 access interception. */
- vmx->nested.nested_vmx_true_procbased_ctls_low =
- vmx->nested.nested_vmx_procbased_ctls_low &
+ vmx->nested.nested_vmx_procbased_ctls_low &=
~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
/* secondary cpu-based controls */
@@ -2807,6 +2815,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
+ SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
@@ -2838,8 +2847,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
*/
if (enable_vpid)
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
- VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
- VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+ VMX_VPID_EXTENT_SUPPORTED_MASK;
else
vmx->nested.nested_vmx_vpid_caps = 0;
@@ -2856,14 +2864,52 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
VMX_MISC_ACTIVITY_HLT;
vmx->nested.nested_vmx_misc_high = 0;
+
+ /*
+ * This MSR reports some information about VMX support. We
+ * should return information about the VMX we emulate for the
+ * guest, and the VMCS structure we give it - not about the
+ * VMX support of the underlying hardware.
+ */
+ vmx->nested.nested_vmx_basic =
+ VMCS12_REVISION |
+ VMX_BASIC_TRUE_CTLS |
+ ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
+ (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+
+ if (cpu_has_vmx_basic_inout())
+ vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
+
+ /*
+ * These MSRs specify bits which the guest must keep fixed on
+ * while L1 is in VMXON mode (in L1's root mode, or running an L2).
+ * We picked the standard core2 setting.
+ */
+#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
+#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
+ vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
+ vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
+
+ /* These MSRs specify bits which the guest must keep fixed off. */
+ rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
+ rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
+
+ /* highest index: VMX_PREEMPTION_TIMER_VALUE */
+ vmx->nested.nested_vmx_vmcs_enum = 0x2e;
+}
+
+/*
+ * if fixed0[i] == 1: val[i] must be 1
+ * if fixed1[i] == 0: val[i] must be 0
+ */
+static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
+{
+ return ((val & fixed1) | fixed0) == val;
}
static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
{
- /*
- * Bits 0 in high must be 0, and bits 1 in low must be 1.
- */
- return ((control & high) | low) == control;
+ return fixed_bits_valid(control, low, high);
}
static inline u64 vmx_control_msr(u32 low, u32 high)
@@ -2871,87 +2917,285 @@ static inline u64 vmx_control_msr(u32 low, u32 high)
return low | ((u64)high << 32);
}
-/* Returns 0 on success, non-0 otherwise. */
-static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
+{
+ superset &= mask;
+ subset &= mask;
+
+ return (superset | subset) == superset;
+}
+
+static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
+{
+ const u64 feature_and_reserved =
+ /* feature (except bit 48; see below) */
+ BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
+ /* reserved */
+ BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
+ u64 vmx_basic = vmx->nested.nested_vmx_basic;
+
+ if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
+ return -EINVAL;
+
+ /*
+ * KVM does not emulate a version of VMX that constrains physical
+ * addresses of VMX structures (e.g. VMCS) to 32-bits.
+ */
+ if (data & BIT_ULL(48))
+ return -EINVAL;
+
+ if (vmx_basic_vmcs_revision_id(vmx_basic) !=
+ vmx_basic_vmcs_revision_id(data))
+ return -EINVAL;
+
+ if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
+ return -EINVAL;
+
+ vmx->nested.nested_vmx_basic = data;
+ return 0;
+}
+
+static int
+vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+ u64 supported;
+ u32 *lowp, *highp;
+
+ switch (msr_index) {
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
+ highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
+ break;
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
+ highp = &vmx->nested.nested_vmx_procbased_ctls_high;
+ break;
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ lowp = &vmx->nested.nested_vmx_exit_ctls_low;
+ highp = &vmx->nested.nested_vmx_exit_ctls_high;
+ break;
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ lowp = &vmx->nested.nested_vmx_entry_ctls_low;
+ highp = &vmx->nested.nested_vmx_entry_ctls_high;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
+ highp = &vmx->nested.nested_vmx_secondary_ctls_high;
+ break;
+ default:
+ BUG();
+ }
+
+ supported = vmx_control_msr(*lowp, *highp);
+
+ /* Check must-be-1 bits are still 1. */
+ if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
+ return -EINVAL;
+
+ /* Check must-be-0 bits are still 0. */
+ if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
+ return -EINVAL;
+
+ *lowp = data;
+ *highp = data >> 32;
+ return 0;
+}
+
+static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
+{
+ const u64 feature_and_reserved_bits =
+ /* feature */
+ BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
+ BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
+ /* reserved */
+ GENMASK_ULL(13, 9) | BIT_ULL(31);
+ u64 vmx_misc;
+
+ vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
+ vmx->nested.nested_vmx_misc_high);
+
+ if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
+ return -EINVAL;
+
+ if ((vmx->nested.nested_vmx_pinbased_ctls_high &
+ PIN_BASED_VMX_PREEMPTION_TIMER) &&
+ vmx_misc_preemption_timer_rate(data) !=
+ vmx_misc_preemption_timer_rate(vmx_misc))
+ return -EINVAL;
+
+ if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
+ return -EINVAL;
+
+ if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
+ return -EINVAL;
+
+ if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
+ return -EINVAL;
+
+ vmx->nested.nested_vmx_misc_low = data;
+ vmx->nested.nested_vmx_misc_high = data >> 32;
+ return 0;
+}
+
+static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
+{
+ u64 vmx_ept_vpid_cap;
+
+ vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
+ vmx->nested.nested_vmx_vpid_caps);
+
+ /* Every bit is either reserved or a feature bit. */
+ if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
+ return -EINVAL;
+
+ vmx->nested.nested_vmx_ept_caps = data;
+ vmx->nested.nested_vmx_vpid_caps = data >> 32;
+ return 0;
+}
+
+static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+ u64 *msr;
+
+ switch (msr_index) {
+ case MSR_IA32_VMX_CR0_FIXED0:
+ msr = &vmx->nested.nested_vmx_cr0_fixed0;
+ break;
+ case MSR_IA32_VMX_CR4_FIXED0:
+ msr = &vmx->nested.nested_vmx_cr4_fixed0;
+ break;
+ default:
+ BUG();
+ }
+
+ /*
+ * 1 bits (which indicates bits which "must-be-1" during VMX operation)
+ * must be 1 in the restored value.
+ */
+ if (!is_bitwise_subset(data, *msr, -1ULL))
+ return -EINVAL;
+
+ *msr = data;
+ return 0;
+}
+
+/*
+ * Called when userspace is restoring VMX MSRs.
+ *
+ * Returns 0 on success, non-0 otherwise.
+ */
+static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
switch (msr_index) {
case MSR_IA32_VMX_BASIC:
+ return vmx_restore_vmx_basic(vmx, data);
+ case MSR_IA32_VMX_PINBASED_CTLS:
+ case MSR_IA32_VMX_PROCBASED_CTLS:
+ case MSR_IA32_VMX_EXIT_CTLS:
+ case MSR_IA32_VMX_ENTRY_CTLS:
+ /*
+ * The "non-true" VMX capability MSRs are generated from the
+ * "true" MSRs, so we do not support restoring them directly.
+ *
+ * If userspace wants to emulate VMX_BASIC[55]=0, userspace
+ * should restore the "true" MSRs with the must-be-1 bits
+ * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
+ * DEFAULT SETTINGS".
+ */
+ return -EINVAL;
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ return vmx_restore_control_msr(vmx, msr_index, data);
+ case MSR_IA32_VMX_MISC:
+ return vmx_restore_vmx_misc(vmx, data);
+ case MSR_IA32_VMX_CR0_FIXED0:
+ case MSR_IA32_VMX_CR4_FIXED0:
+ return vmx_restore_fixed0_msr(vmx, msr_index, data);
+ case MSR_IA32_VMX_CR0_FIXED1:
+ case MSR_IA32_VMX_CR4_FIXED1:
+ /*
+ * These MSRs are generated based on the vCPU's CPUID, so we
+ * do not support restoring them directly.
+ */
+ return -EINVAL;
+ case MSR_IA32_VMX_EPT_VPID_CAP:
+ return vmx_restore_vmx_ept_vpid_cap(vmx, data);
+ case MSR_IA32_VMX_VMCS_ENUM:
+ vmx->nested.nested_vmx_vmcs_enum = data;
+ return 0;
+ default:
/*
- * This MSR reports some information about VMX support. We
- * should return information about the VMX we emulate for the
- * guest, and the VMCS structure we give it - not about the
- * VMX support of the underlying hardware.
+ * The rest of the VMX capability MSRs do not support restore.
*/
- *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
- ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
- (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
- if (cpu_has_vmx_basic_inout())
- *pdata |= VMX_BASIC_INOUT;
+ return -EINVAL;
+ }
+}
+
+/* Returns 0 on success, non-0 otherwise. */
+static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ switch (msr_index) {
+ case MSR_IA32_VMX_BASIC:
+ *pdata = vmx->nested.nested_vmx_basic;
break;
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
case MSR_IA32_VMX_PINBASED_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_pinbased_ctls_low,
vmx->nested.nested_vmx_pinbased_ctls_high);
+ if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
+ *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
break;
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
- *pdata = vmx_control_msr(
- vmx->nested.nested_vmx_true_procbased_ctls_low,
- vmx->nested.nested_vmx_procbased_ctls_high);
- break;
case MSR_IA32_VMX_PROCBASED_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high);
+ if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
+ *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
break;
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- *pdata = vmx_control_msr(
- vmx->nested.nested_vmx_true_exit_ctls_low,
- vmx->nested.nested_vmx_exit_ctls_high);
- break;
case MSR_IA32_VMX_EXIT_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_exit_ctls_low,
vmx->nested.nested_vmx_exit_ctls_high);
+ if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
+ *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
break;
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- *pdata = vmx_control_msr(
- vmx->nested.nested_vmx_true_entry_ctls_low,
- vmx->nested.nested_vmx_entry_ctls_high);
- break;
case MSR_IA32_VMX_ENTRY_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_entry_ctls_low,
vmx->nested.nested_vmx_entry_ctls_high);
+ if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
+ *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
break;
case MSR_IA32_VMX_MISC:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_misc_low,
vmx->nested.nested_vmx_misc_high);
break;
- /*
- * These MSRs specify bits which the guest must keep fixed (on or off)
- * while L1 is in VMXON mode (in L1's root mode, or running an L2).
- * We picked the standard core2 setting.
- */
-#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
-#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
case MSR_IA32_VMX_CR0_FIXED0:
- *pdata = VMXON_CR0_ALWAYSON;
+ *pdata = vmx->nested.nested_vmx_cr0_fixed0;
break;
case MSR_IA32_VMX_CR0_FIXED1:
- *pdata = -1ULL;
+ *pdata = vmx->nested.nested_vmx_cr0_fixed1;
break;
case MSR_IA32_VMX_CR4_FIXED0:
- *pdata = VMXON_CR4_ALWAYSON;
+ *pdata = vmx->nested.nested_vmx_cr4_fixed0;
break;
case MSR_IA32_VMX_CR4_FIXED1:
- *pdata = -1ULL;
+ *pdata = vmx->nested.nested_vmx_cr4_fixed1;
break;
case MSR_IA32_VMX_VMCS_ENUM:
- *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */
+ *pdata = vmx->nested.nested_vmx_vmcs_enum;
break;
case MSR_IA32_VMX_PROCBASED_CTLS2:
*pdata = vmx_control_msr(
@@ -3134,7 +3378,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx_leave_nested(vcpu);
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
- return 1; /* they are read-only */
+ if (!msr_info->host_initiated)
+ return 1; /* they are read-only */
+ if (!nested_vmx_allowed(vcpu))
+ return 1;
+ return vmx_set_vmx_msr(vcpu, msr_index, data);
case MSR_IA32_XSS:
if (!vmx_xsaves_supported())
return 1;
@@ -3562,6 +3810,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
loaded_vmcs_clear(loaded_vmcs);
free_vmcs(loaded_vmcs->vmcs);
loaded_vmcs->vmcs = NULL;
+ WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
}
static void free_kvm_area(void)
@@ -3895,6 +4144,40 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
(unsigned long *)&vcpu->arch.regs_dirty);
}
+static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
+ u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
+ SECONDARY_EXEC_UNRESTRICTED_GUEST &&
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
+ fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
+
+ return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
+ u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
+
+ return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed0;
+ u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed1;
+
+ return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+/* No difference in the restrictions on guest and host CR4 in VMX operation. */
+#define nested_guest_cr4_valid nested_cr4_valid
+#define nested_host_cr4_valid nested_cr4_valid
+
static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
@@ -4023,8 +4306,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!nested_vmx_allowed(vcpu))
return 1;
}
- if (to_vmx(vcpu)->nested.vmxon &&
- ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
+
+ if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
return 1;
vcpu->arch.cr4 = cr4;
@@ -4601,41 +4884,6 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
}
}
-static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
-{
- int f = sizeof(unsigned long);
-
- if (!cpu_has_vmx_msr_bitmap())
- return;
-
- /*
- * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
- * have the write-low and read-high bitmap offsets the wrong way round.
- * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
- */
- if (msr <= 0x1fff) {
- if (type & MSR_TYPE_R)
- /* read-low */
- __set_bit(msr, msr_bitmap + 0x000 / f);
-
- if (type & MSR_TYPE_W)
- /* write-low */
- __set_bit(msr, msr_bitmap + 0x800 / f);
-
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- if (type & MSR_TYPE_R)
- /* read-high */
- __set_bit(msr, msr_bitmap + 0x400 / f);
-
- if (type & MSR_TYPE_W)
- /* write-high */
- __set_bit(msr, msr_bitmap + 0xc00 / f);
-
- }
-}
-
/*
* If a msr is allowed by L0, we should check whether it is allowed by L1.
* The corresponding bit will be cleared unless both of L0 and L1 allow it.
@@ -4691,48 +4939,18 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
msr, MSR_TYPE_R | MSR_TYPE_W);
}
-static void vmx_enable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
+static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_active)
{
if (apicv_active) {
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
- } else {
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
- msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
- msr, MSR_TYPE_R);
- }
-}
-
-static void vmx_disable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
-{
- if (apicv_active) {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv,
+ msr, type);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv,
+ msr, type);
} else {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
- msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
- msr, MSR_TYPE_R);
- }
-}
-
-static void vmx_disable_intercept_msr_write_x2apic(u32 msr, bool apicv_active)
-{
- if (apicv_active) {
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_W);
+ msr, type);
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_W);
- } else {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
- msr, MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
- msr, MSR_TYPE_W);
+ msr, type);
}
}
@@ -4854,9 +5072,15 @@ static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!pi_test_and_clear_on(&vmx->pi_desc))
+ if (!pi_test_on(&vmx->pi_desc))
return;
+ pi_clear_on(&vmx->pi_desc);
+ /*
+ * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+ * But on x86 this is just a compiler barrier anyway.
+ */
+ smp_mb__after_atomic();
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
}
@@ -4871,9 +5095,11 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
u32 low32, high32;
unsigned long tmpl;
struct desc_ptr dt;
- unsigned long cr4;
+ unsigned long cr0, cr4;
- vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
+ cr0 = read_cr0();
+ WARN_ON(cr0 & X86_CR0_TS);
+ vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
/* Save the most likely value for this task's CR4 in the VMCS. */
@@ -5613,7 +5839,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu)
static int handle_io(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
- int size, in, string;
+ int size, in, string, ret;
unsigned port;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -5627,9 +5853,14 @@ static int handle_io(struct kvm_vcpu *vcpu)
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
- skip_emulated_instruction(vcpu);
- return kvm_fast_pio_out(vcpu, size, port);
+ ret = kvm_skip_emulated_instruction(vcpu);
+
+ /*
+ * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
+ return kvm_fast_pio_out(vcpu, size, port) && ret;
}
static void
@@ -5643,18 +5874,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1;
}
-static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
- unsigned long always_on = VMXON_CR0_ALWAYSON;
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
- SECONDARY_EXEC_UNRESTRICTED_GUEST &&
- nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
- always_on &= ~(X86_CR0_PE | X86_CR0_PG);
- return (val & always_on) == always_on;
-}
-
/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
{
@@ -5673,7 +5892,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
val = (val & ~vmcs12->cr0_guest_host_mask) |
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
- if (!nested_cr0_valid(vcpu, val))
+ if (!nested_guest_cr0_valid(vcpu, val))
return 1;
if (kvm_set_cr0(vcpu, val))
@@ -5682,8 +5901,9 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
return 0;
} else {
if (to_vmx(vcpu)->nested.vmxon &&
- ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
+ !nested_host_cr0_valid(vcpu, val))
return 1;
+
return kvm_set_cr0(vcpu, val);
}
}
@@ -5727,6 +5947,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
int cr;
int reg;
int err;
+ int ret;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
cr = exit_qualification & 15;
@@ -5738,25 +5959,27 @@ static int handle_cr(struct kvm_vcpu *vcpu)
switch (cr) {
case 0:
err = handle_set_cr0(vcpu, val);
- kvm_complete_insn_gp(vcpu, err);
- return 1;
+ return kvm_complete_insn_gp(vcpu, err);
case 3:
err = kvm_set_cr3(vcpu, val);
- kvm_complete_insn_gp(vcpu, err);
- return 1;
+ return kvm_complete_insn_gp(vcpu, err);
case 4:
err = handle_set_cr4(vcpu, val);
- kvm_complete_insn_gp(vcpu, err);
- return 1;
+ return kvm_complete_insn_gp(vcpu, err);
case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu);
u8 cr8 = (u8)val;
err = kvm_set_cr8(vcpu, cr8);
- kvm_complete_insn_gp(vcpu, err);
+ ret = kvm_complete_insn_gp(vcpu, err);
if (lapic_in_kernel(vcpu))
- return 1;
+ return ret;
if (cr8_prev <= cr8)
- return 1;
+ return ret;
+ /*
+ * TODO: we might be squashing a
+ * KVM_GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
return 0;
}
@@ -5765,23 +5988,20 @@ static int handle_cr(struct kvm_vcpu *vcpu)
case 2: /* clts */
handle_clts(vcpu);
trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
- skip_emulated_instruction(vcpu);
vmx_fpu_activate(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
case 1: /*mov from cr*/
switch (cr) {
case 3:
val = kvm_read_cr3(vcpu);
kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
case 8:
val = kvm_get_cr8(vcpu);
kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
break;
case 3: /* lmsw */
@@ -5789,8 +6009,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
kvm_lmsw(vcpu, val);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
default:
break;
}
@@ -5861,8 +6080,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
return 1;
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
@@ -5894,8 +6112,7 @@ static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
static int handle_cpuid(struct kvm_vcpu *vcpu)
{
- kvm_emulate_cpuid(vcpu);
- return 1;
+ return kvm_emulate_cpuid(vcpu);
}
static int handle_rdmsr(struct kvm_vcpu *vcpu)
@@ -5916,8 +6133,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
/* FIXME: handling of bits 32:63 of rax, rdx */
vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_wrmsr(struct kvm_vcpu *vcpu)
@@ -5937,8 +6153,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
}
trace_kvm_msr_write(ecx, data);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
@@ -5982,8 +6197,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
kvm_mmu_invlpg(vcpu, exit_qualification);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_rdpmc(struct kvm_vcpu *vcpu)
@@ -5991,15 +6205,12 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
int err;
err = kvm_rdpmc(vcpu);
- kvm_complete_insn_gp(vcpu, err);
-
- return 1;
+ return kvm_complete_insn_gp(vcpu, err);
}
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
- kvm_emulate_wbinvd(vcpu);
- return 1;
+ return kvm_emulate_wbinvd(vcpu);
}
static int handle_xsetbv(struct kvm_vcpu *vcpu)
@@ -6008,20 +6219,20 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
if (kvm_set_xcr(vcpu, index, new_bv) == 0)
- skip_emulated_instruction(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
return 1;
}
static int handle_xsaves(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
+ kvm_skip_emulated_instruction(vcpu);
WARN(1, "this should never happen\n");
return 1;
}
static int handle_xrstors(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
+ kvm_skip_emulated_instruction(vcpu);
WARN(1, "this should never happen\n");
return 1;
}
@@ -6042,8 +6253,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
(offset == APIC_EOI)) {
kvm_lapic_set_eoi(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
}
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
@@ -6191,9 +6401,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
- skip_emulated_instruction(vcpu);
trace_kvm_fast_mmio(gpa);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
ret = handle_mmio_page_fault(vcpu, gpa, true);
@@ -6378,50 +6587,13 @@ static __init int hardware_setup(void)
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
kvm_define_shared_msr(i, vmx_msr_index[i]);
- vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_io_bitmap_a)
- return r;
+ for (i = 0; i < VMX_BITMAP_NR; i++) {
+ vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_bitmap[i])
+ goto out;
+ }
vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_io_bitmap_b)
- goto out;
-
- vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy)
- goto out1;
-
- vmx_msr_bitmap_legacy_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic)
- goto out2;
-
- vmx_msr_bitmap_legacy_x2apic_apicv_inactive =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic_apicv_inactive)
- goto out3;
-
- vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode)
- goto out4;
-
- vmx_msr_bitmap_longmode_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic)
- goto out5;
-
- vmx_msr_bitmap_longmode_x2apic_apicv_inactive =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic_apicv_inactive)
- goto out6;
-
- vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_vmread_bitmap)
- goto out7;
-
- vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_vmwrite_bitmap)
- goto out8;
-
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
@@ -6439,7 +6611,7 @@ static __init int hardware_setup(void)
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
- goto out9;
+ goto out;
}
if (boot_cpu_has(X86_FEATURE_NX))
@@ -6502,39 +6674,34 @@ static __init int hardware_setup(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
- memcpy(vmx_msr_bitmap_legacy_x2apic,
+ memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
+ memcpy(vmx_msr_bitmap_longmode_x2apic_apicv,
vmx_msr_bitmap_longmode, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+ for (msr = 0x800; msr <= 0x8ff; msr++) {
+ if (msr == 0x839 /* TMCCT */)
+ continue;
+ vmx_disable_intercept_msr_x2apic(msr, MSR_TYPE_R, true);
+ }
+
/*
- * enable_apicv && kvm_vcpu_apicv_active()
+ * TPR reads and writes can be virtualized even if virtual interrupt
+ * delivery is not in use.
*/
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr, true);
+ vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_W, true);
+ vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_R | MSR_TYPE_W, false);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839, true);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808, true);
/* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b, true);
+ vmx_disable_intercept_msr_x2apic(0x80b, MSR_TYPE_W, true);
/* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f, true);
-
- /*
- * (enable_apicv && !kvm_vcpu_apicv_active()) ||
- * !enable_apicv
- */
- /* TPR */
- vmx_disable_intercept_msr_read_x2apic(0x808, false);
- vmx_disable_intercept_msr_write_x2apic(0x808, false);
+ vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true);
if (enable_ept) {
kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
@@ -6581,42 +6748,19 @@ static __init int hardware_setup(void)
return alloc_kvm_area();
-out9:
- free_page((unsigned long)vmx_vmwrite_bitmap);
-out8:
- free_page((unsigned long)vmx_vmread_bitmap);
-out7:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
-out6:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out5:
- free_page((unsigned long)vmx_msr_bitmap_longmode);
-out4:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
-out3:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-out2:
- free_page((unsigned long)vmx_msr_bitmap_legacy);
-out1:
- free_page((unsigned long)vmx_io_bitmap_b);
out:
- free_page((unsigned long)vmx_io_bitmap_a);
+ for (i = 0; i < VMX_BITMAP_NR; i++)
+ free_page((unsigned long)vmx_bitmap[i]);
return r;
}
static __exit void hardware_unsetup(void)
{
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
- free_page((unsigned long)vmx_msr_bitmap_legacy);
- free_page((unsigned long)vmx_msr_bitmap_longmode);
- free_page((unsigned long)vmx_io_bitmap_b);
- free_page((unsigned long)vmx_io_bitmap_a);
- free_page((unsigned long)vmx_vmwrite_bitmap);
- free_page((unsigned long)vmx_vmread_bitmap);
+ int i;
+
+ for (i = 0; i < VMX_BITMAP_NR; i++)
+ free_page((unsigned long)vmx_bitmap[i]);
free_kvm_area();
}
@@ -6630,16 +6774,13 @@ static int handle_pause(struct kvm_vcpu *vcpu)
if (ple_gap)
grow_ple_window(vcpu);
- skip_emulated_instruction(vcpu);
kvm_vcpu_on_spin(vcpu);
-
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_nop(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_mwait(struct kvm_vcpu *vcpu)
@@ -6696,6 +6837,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
if (!item)
return NULL;
item->vmcs02.vmcs = alloc_vmcs();
+ item->vmcs02.shadow_vmcs = NULL;
if (!item->vmcs02.vmcs) {
kfree(item);
return NULL;
@@ -6945,8 +7087,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
*/
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failInvalid(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
page = nested_get_page(vcpu, vmptr);
@@ -6954,8 +7095,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
*(u32 *)kmap(page) != VMCS12_REVISION) {
nested_vmx_failInvalid(vcpu);
kunmap(page);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
kunmap(page);
vmx->nested.vmxon_ptr = vmptr;
@@ -6964,30 +7104,26 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_INVALID_ADDRESS);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
break;
case EXIT_REASON_VMPTRLD:
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INVALID_ADDRESS);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
break;
default:
@@ -7043,8 +7179,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
if (vmx->nested.vmxon) {
nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
@@ -7072,7 +7207,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
vmcs_clear(shadow_vmcs);
- vmx->nested.current_shadow_vmcs = shadow_vmcs;
+ vmx->vmcs01.shadow_vmcs = shadow_vmcs;
}
INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
@@ -7084,9 +7219,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
vmx->nested.vmxon = true;
- skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
out_shadow_vmcs:
kfree(vmx->nested.cached_vmcs12);
@@ -7174,8 +7308,11 @@ static void free_nested(struct vcpu_vmx *vmx)
free_page((unsigned long)vmx->nested.msr_bitmap);
vmx->nested.msr_bitmap = NULL;
}
- if (enable_shadow_vmcs)
- free_vmcs(vmx->nested.current_shadow_vmcs);
+ if (enable_shadow_vmcs) {
+ vmcs_clear(vmx->vmcs01.shadow_vmcs);
+ free_vmcs(vmx->vmcs01.shadow_vmcs);
+ vmx->vmcs01.shadow_vmcs = NULL;
+ }
kfree(vmx->nested.cached_vmcs12);
/* Unpin physical memory we referred to in current vmcs02 */
if (vmx->nested.apic_access_page) {
@@ -7202,9 +7339,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
free_nested(to_vmx(vcpu));
- skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* Emulate the VMCLEAR instruction */
@@ -7243,9 +7379,8 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
nested_free_vmcs02(vmx, vmptr);
- skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
@@ -7352,7 +7487,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
int i;
unsigned long field;
u64 field_value;
- struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
const unsigned long *fields = shadow_read_write_fields;
const int num_fields = max_shadow_read_write_fields;
@@ -7401,7 +7536,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
int i, q;
unsigned long field;
u64 field_value = 0;
- struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
vmcs_load(shadow_vmcs);
@@ -7443,7 +7578,6 @@ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->nested.current_vmptr == -1ull) {
nested_vmx_failInvalid(vcpu);
- skip_emulated_instruction(vcpu);
return 0;
}
return 1;
@@ -7457,17 +7591,18 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t gva = 0;
- if (!nested_vmx_check_permission(vcpu) ||
- !nested_vmx_check_vmcs12(vcpu))
+ if (!nested_vmx_check_permission(vcpu))
return 1;
+ if (!nested_vmx_check_vmcs12(vcpu))
+ return kvm_skip_emulated_instruction(vcpu);
+
/* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */
if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/*
* Now copy part of this value to register or memory, as requested.
@@ -7487,8 +7622,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
}
nested_vmx_succeed(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
@@ -7507,10 +7641,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
u64 field_value = 0;
struct x86_exception e;
- if (!nested_vmx_check_permission(vcpu) ||
- !nested_vmx_check_vmcs12(vcpu))
+ if (!nested_vmx_check_permission(vcpu))
return 1;
+ if (!nested_vmx_check_vmcs12(vcpu))
+ return kvm_skip_emulated_instruction(vcpu);
+
if (vmx_instruction_info & (1u << 10))
field_value = kvm_register_readl(vcpu,
(((vmx_instruction_info) >> 3) & 0xf));
@@ -7530,19 +7666,16 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (vmcs_field_readonly(field)) {
nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
if (vmcs12_write_any(vcpu, field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
nested_vmx_succeed(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* Emulate the VMPTRLD instruction */
@@ -7563,8 +7696,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
page = nested_get_page(vcpu, vmptr);
if (page == NULL) {
nested_vmx_failInvalid(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
new_vmcs12 = kmap(page);
if (new_vmcs12->revision_id != VMCS12_REVISION) {
@@ -7572,8 +7704,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
nested_release_page_clean(page);
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
nested_release_vmcs12(vmx);
@@ -7591,14 +7722,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER,
- __pa(vmx->nested.current_shadow_vmcs));
+ __pa(vmx->vmcs01.shadow_vmcs));
vmx->nested.sync_shadow_vmcs = true;
}
}
nested_vmx_succeed(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* Emulate the VMPTRST instruction */
@@ -7623,8 +7753,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
return 1;
}
nested_vmx_succeed(vcpu);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* Emulate the INVEPT instruction */
@@ -7659,11 +7788,10 @@ static int handle_invept(struct kvm_vcpu *vcpu)
types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
- if (!(types & (1UL << type))) {
+ if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* According to the Intel VMX instruction reference, the memory
@@ -7694,8 +7822,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
break;
}
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_invvpid(struct kvm_vcpu *vcpu)
@@ -7720,13 +7847,13 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
- types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
+ types = (vmx->nested.nested_vmx_vpid_caps &
+ VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
- if (!(types & (1UL << type))) {
+ if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- skip_emulated_instruction(vcpu);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
/* according to the intel vmx instruction reference, the memory
@@ -7742,23 +7869,26 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
}
switch (type) {
+ case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
case VMX_VPID_EXTENT_SINGLE_CONTEXT:
- /*
- * Old versions of KVM use the single-context version so we
- * have to support it; just treat it the same as all-context.
- */
+ case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
+ if (!vpid) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ break;
case VMX_VPID_EXTENT_ALL_CONTEXT:
- __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
- nested_vmx_succeed(vcpu);
break;
default:
- /* Trap individual address invalidation invvpid calls */
- BUG_ON(1);
- break;
+ WARN_ON_ONCE(1);
+ return kvm_skip_emulated_instruction(vcpu);
}
- skip_emulated_instruction(vcpu);
- return 1;
+ __vmx_flush_tlb(vcpu, vmx->nested.vpid02);
+ nested_vmx_succeed(vcpu);
+
+ return kvm_skip_emulated_instruction(vcpu);
}
static int handle_pml_full(struct kvm_vcpu *vcpu)
@@ -8097,6 +8227,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
case EXIT_REASON_IO_INSTRUCTION:
return nested_vmx_exit_handled_io(vcpu, vmcs12);
+ case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
case EXIT_REASON_MSR_READ:
case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
@@ -8646,11 +8778,6 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
register void *__sp asm(_ASM_SP);
- /*
- * If external interrupt exists, IF bit is set in rflags/eflags on the
- * interrupt stack frame, and interrupt will be enabled on a return
- * from interrupt handler.
- */
if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
unsigned int vector;
@@ -8835,7 +8962,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host);
}
-void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 tscl;
@@ -9156,6 +9283,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->loaded_vmcs = &vmx->vmcs01;
vmx->loaded_vmcs->vmcs = alloc_vmcs();
+ vmx->loaded_vmcs->shadow_vmcs = NULL;
if (!vmx->loaded_vmcs->vmcs)
goto free_msrs;
if (!vmm_exclusive)
@@ -9304,6 +9432,50 @@ static void vmcs_set_secondary_exec_control(u32 new_ctl)
(new_ctl & ~mask) | (cur_ctl & mask));
}
+/*
+ * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
+ * (indicating "allowed-1") if they are supported in the guest's CPUID.
+ */
+static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_cpuid_entry2 *entry;
+
+ vmx->nested.nested_vmx_cr0_fixed1 = 0xffffffff;
+ vmx->nested.nested_vmx_cr4_fixed1 = X86_CR4_PCE;
+
+#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
+ if (entry && (entry->_reg & (_cpuid_mask))) \
+ vmx->nested.nested_vmx_cr4_fixed1 |= (_cr4_mask); \
+} while (0)
+
+ entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+ cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME));
+ cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME));
+ cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC));
+ cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE));
+ cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE));
+ cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE));
+ cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE));
+ cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE));
+ cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR));
+ cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
+ cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX));
+ cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX));
+ cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID));
+ cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE));
+
+ entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
+ cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE));
+ cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP));
+ cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP));
+ cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU));
+ /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */
+ cr4_fixed1_update(bit(11), ecx, bit(2));
+
+#undef cr4_fixed1_update
+}
+
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -9345,6 +9517,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
else
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+
+ if (nested_vmx_allowed(vcpu))
+ nested_vmx_cr_fixed1_bits_update(vcpu);
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -9799,6 +9974,49 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
return 0;
}
+static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ unsigned long invalid_mask;
+
+ invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
+ return (val & invalid_mask) == 0;
+}
+
+/*
+ * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
+ * emulating VM entry into a guest with EPT enabled.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
+ */
+static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
+ unsigned long *entry_failure_code)
+{
+ if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
+ if (!nested_cr3_valid(vcpu, cr3)) {
+ *entry_failure_code = ENTRY_FAIL_DEFAULT;
+ return 1;
+ }
+
+ /*
+ * If PAE paging and EPT are both on, CR3 is not used by the CPU and
+ * must not be dereferenced.
+ */
+ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
+ !nested_ept) {
+ if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
+ *entry_failure_code = ENTRY_FAIL_PDPTE;
+ return 1;
+ }
+ }
+
+ vcpu->arch.cr3 = cr3;
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+ }
+
+ kvm_mmu_reset_context(vcpu);
+ return 0;
+}
+
/*
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -9807,11 +10025,15 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
* needs. In addition to modifying the active vmcs (which is vmcs02), this
* function also has additional necessary side-effects, like setting various
* vcpu->arch fields.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
*/
-static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ unsigned long *entry_failure_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control;
+ bool nested_ept_enabled = false;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -9976,6 +10198,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_intr_status);
}
+ nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
@@ -9989,6 +10212,15 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx_set_constant_host_state(vmx);
/*
+ * Set the MSR load/store lists to match L0's settings.
+ */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+
+ /*
* HOST_RSP is normally set correctly in vmx_vcpu_run() just before
* entry, but only if the current (host) sp changed from the value
* we wrote last (vmx->host_rsp). This cache is no longer relevant
@@ -10061,9 +10293,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vmcs_write64(TSC_OFFSET,
- vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+ vcpu->arch.tsc_offset + vmcs12->tsc_offset);
else
- vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
@@ -10094,15 +10326,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
nested_ept_init_mmu_context(vcpu);
}
- if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
- vcpu->arch.efer = vmcs12->guest_ia32_efer;
- else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
- vcpu->arch.efer |= (EFER_LMA | EFER_LME);
- else
- vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
- /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
- vmx_set_efer(vcpu, vcpu->arch.efer);
-
/*
* This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
* TS bit (for lazy fpu) and bits which we consider mandatory enabled.
@@ -10117,8 +10340,20 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx_set_cr4(vcpu, vmcs12->guest_cr4);
vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
- /* shadow page tables on either EPT or shadow page tables */
- kvm_set_cr3(vcpu, vmcs12->guest_cr3);
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
+ vcpu->arch.efer = vmcs12->guest_ia32_efer;
+ else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
+ vcpu->arch.efer |= (EFER_LMA | EFER_LME);
+ else
+ vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
+ /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
+ vmx_set_efer(vcpu, vcpu->arch.efer);
+
+ /* Shadow page tables on either EPT or shadow page tables. */
+ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
+ entry_failure_code))
+ return 1;
+
kvm_mmu_reset_context(vcpu);
if (!enable_ept)
@@ -10136,6 +10371,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+ return 0;
}
/*
@@ -10150,12 +10386,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct loaded_vmcs *vmcs02;
bool ia32e;
u32 msr_entry_idx;
+ unsigned long exit_qualification;
- if (!nested_vmx_check_permission(vcpu) ||
- !nested_vmx_check_vmcs12(vcpu))
+ if (!nested_vmx_check_permission(vcpu))
return 1;
- skip_emulated_instruction(vcpu);
+ if (!nested_vmx_check_vmcs12(vcpu))
+ goto out;
+
vmcs12 = get_vmcs12(vcpu);
if (enable_shadow_vmcs)
@@ -10175,37 +10413,37 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
- return 1;
+ goto out;
}
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
- vmx->nested.nested_vmx_true_procbased_ctls_low,
+ vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high) ||
!vmx_control_verify(vmcs12->secondary_vm_exec_control,
vmx->nested.nested_vmx_secondary_ctls_low,
@@ -10214,33 +10452,34 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx->nested.nested_vmx_pinbased_ctls_low,
vmx->nested.nested_vmx_pinbased_ctls_high) ||
!vmx_control_verify(vmcs12->vm_exit_controls,
- vmx->nested.nested_vmx_true_exit_ctls_low,
+ vmx->nested.nested_vmx_exit_ctls_low,
vmx->nested.nested_vmx_exit_ctls_high) ||
!vmx_control_verify(vmcs12->vm_entry_controls,
- vmx->nested.nested_vmx_true_entry_ctls_low,
+ vmx->nested.nested_vmx_entry_ctls_low,
vmx->nested.nested_vmx_entry_ctls_high))
{
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- return 1;
+ goto out;
}
- if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
- ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+ if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
+ !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
+ !nested_cr3_valid(vcpu, vmcs12->host_cr3)) {
nested_vmx_failValid(vcpu,
VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
- return 1;
+ goto out;
}
- if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
- ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+ if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
+ !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
+ goto out;
}
if (vmcs12->vmcs_link_pointer != -1ull) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
- return 1;
+ goto out;
}
/*
@@ -10260,7 +10499,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
+ goto out;
}
}
@@ -10278,7 +10517,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
+ goto out;
}
}
@@ -10291,10 +10530,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (!vmcs02)
return -ENOMEM;
+ /*
+ * After this point, the trap flag no longer triggers a singlestep trap
+ * on the vm entry instructions. Don't call
+ * kvm_skip_emulated_instruction.
+ */
+ skip_emulated_instruction(vcpu);
enter_guest_mode(vcpu);
- vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
-
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
@@ -10307,7 +10550,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx_segment_cache_clear(vmx);
- prepare_vmcs02(vcpu, vmcs12);
+ if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification)) {
+ leave_guest_mode(vcpu);
+ vmx_load_vmcs01(vcpu);
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_INVALID_STATE, exit_qualification);
+ return 1;
+ }
msr_entry_idx = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr,
@@ -10334,6 +10583,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* the success flag) when L2 exits (see nested_vmx_vmexit()).
*/
return 1;
+
+out:
+ return kvm_skip_emulated_instruction(vcpu);
}
/*
@@ -10639,6 +10891,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct kvm_segment seg;
+ unsigned long entry_failure_code;
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -10676,8 +10929,12 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
nested_ept_uninit_mmu_context(vcpu);
- kvm_set_cr3(vcpu, vmcs12->host_cr3);
- kvm_mmu_reset_context(vcpu);
+ /*
+ * Only PDPTE load can fail as the value of cr3 was checked on entry and
+ * couldn't have changed.
+ */
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
@@ -10778,6 +11035,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ u32 vm_inst_error = 0;
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
@@ -10790,6 +11048,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmcs12->vm_exit_msr_store_count))
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
+ if (unlikely(vmx->fail))
+ vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR);
+
vmx_load_vmcs01(vcpu);
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
@@ -10818,7 +11079,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
load_vmcs12_host_state(vcpu, vmcs12);
/* Update any VMCS fields that might have changed while L2 ran */
- vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (vmx->hv_deadline_tsc == -1)
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER);
@@ -10866,7 +11129,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
*/
if (unlikely(vmx->fail)) {
vmx->fail = 0;
- nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
+ nested_vmx_failValid(vcpu, vm_inst_error);
} else
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
@@ -11339,8 +11602,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
.write_tsc_offset = vmx_write_tsc_offset,
- .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
- .read_l1_tsc = vmx_read_l1_tsc,
.set_tdp_cr3 = vmx_set_cr3,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e375235d81c9..1f0d2383f5ee 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
+ unsigned long flags;
+ /*
+ * Disabling irqs at this point since the following code could be
+ * interrupted and executed through kvm_arch_hardware_disable()
+ */
+ local_irq_save(flags);
+ if (locals->registered) {
+ locals->registered = false;
+ user_return_notifier_unregister(urn);
+ }
+ local_irq_restore(flags);
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
values->curr = values->host;
}
}
- locals->registered = false;
- user_return_notifier_unregister(urn);
}
static void shared_msr_update(unsigned slot, u32 msr)
@@ -425,12 +434,14 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
-void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
+int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
if (err)
kvm_inject_gp(vcpu, 0);
else
- kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+
+ return 1;
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
@@ -564,7 +575,7 @@ out:
}
EXPORT_SYMBOL_GPL(load_pdptrs);
-static bool pdptrs_changed(struct kvm_vcpu *vcpu)
+bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
bool changed = true;
@@ -590,6 +601,7 @@ out:
return changed;
}
+EXPORT_SYMBOL_GPL(pdptrs_changed);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
@@ -1409,7 +1421,7 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
- return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc));
+ return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
@@ -1547,7 +1559,7 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
s64 adjustment)
{
- kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+ kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
}
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -1555,7 +1567,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
WARN_ON(adjustment < 0);
adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
- kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+ adjust_tsc_offset_guest(vcpu, adjustment);
}
#ifdef CONFIG_X86_64
@@ -1724,18 +1736,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
static u64 __get_kvmclock_ns(struct kvm *kvm)
{
- struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
struct kvm_arch *ka = &kvm->arch;
- s64 ns;
+ struct pvclock_vcpu_time_info hv_clock;
- if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
- u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
- ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
- } else {
- ns = ktime_get_boot_ns() + ka->kvmclock_offset;
+ spin_lock(&ka->pvclock_gtod_sync_lock);
+ if (!ka->use_master_clock) {
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+ return ktime_get_boot_ns() + ka->kvmclock_offset;
}
- return ns;
+ hv_clock.tsc_timestamp = ka->master_cycle_now;
+ hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+
+ kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+ &hv_clock.tsc_shift,
+ &hv_clock.tsc_to_system_mul);
+ return __pvclock_read_cycles(&hv_clock, rdtsc());
}
u64 get_kvmclock_ns(struct kvm *kvm)
@@ -2057,6 +2074,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
return;
+ vcpu->arch.st.steal.preempted = 0;
+
if (vcpu->arch.st.steal.version & 1)
vcpu->arch.st.steal.version += 1; /* first time write, random junk */
@@ -2162,7 +2181,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
- u64 gpa_offset;
struct kvm_arch *ka = &vcpu->kvm->arch;
kvmclock_reset(vcpu);
@@ -2184,8 +2202,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- gpa_offset = data & ~(PAGE_MASK | 1);
-
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL,
sizeof(struct pvclock_vcpu_time_info)))
@@ -2262,7 +2278,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
- vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
+ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
@@ -2280,11 +2296,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
if (!ignore_msrs) {
- vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
+ vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
msr, data);
return 1;
} else {
- vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
+ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
msr, data);
break;
}
@@ -2492,7 +2508,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
if (!ignore_msrs) {
- vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index);
+ vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
+ msr_info->index);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
@@ -2596,7 +2613,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
- case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
@@ -2623,6 +2639,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#endif
r = 1;
break;
+ case KVM_CAP_ADJUST_CLOCK:
+ r = KVM_CLOCK_TSC_STABLE;
+ break;
case KVM_CAP_X86_SMM:
/* SMBASE is usually relocated above 1M on modern chipsets,
* and SMM handlers might indeed rely on 4G segment limits,
@@ -2794,7 +2813,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
if (kvm_lapic_hv_timer_in_use(vcpu) &&
kvm_x86_ops->set_hv_timer(vcpu,
- kvm_get_lapic_tscdeadline_msr(vcpu)))
+ kvm_get_lapic_target_expiration_tsc(vcpu)))
kvm_lapic_switch_to_sw_timer(vcpu);
/*
* On a host with synchronized TSC, there is no need to update
@@ -2810,8 +2829,22 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
}
+static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+ vcpu->arch.st.steal.preempted = 1;
+
+ kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ &vcpu->arch.st.steal.preempted,
+ offsetof(struct kvm_steal_time, preempted),
+ sizeof(vcpu->arch.st.steal.preempted));
+}
+
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ kvm_steal_time_set_preempted(vcpu);
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = rdtsc();
@@ -3415,6 +3448,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
};
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
+ int idx;
r = -EINVAL;
if (!lapic_in_kernel(vcpu))
@@ -3422,7 +3456,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
case KVM_X86_SETUP_MCE: {
@@ -4103,9 +4139,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
struct kvm_clock_data user_ns;
u64 now_ns;
- now_ns = get_kvmclock_ns(kvm);
+ local_irq_disable();
+ now_ns = __get_kvmclock_ns(kvm);
user_ns.clock = now_ns;
- user_ns.flags = 0;
+ user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
+ local_irq_enable();
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
@@ -4795,7 +4833,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
}
-int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
+static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
{
if (!need_emulate_wbinvd(vcpu))
return X86EMUL_CONTINUE;
@@ -4815,8 +4853,8 @@ int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
- kvm_x86_ops->skip_emulated_instruction(vcpu);
- return kvm_emulate_wbinvd_noskip(vcpu);
+ kvm_emulate_wbinvd_noskip(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
@@ -5060,11 +5098,6 @@ static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
{
preempt_disable();
kvm_load_guest_fpu(emul_to_vcpu(ctxt));
- /*
- * CR0.TS may reference the host fpu state, not the guest fpu state,
- * so it may be clear at this point.
- */
- clts();
}
static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
@@ -5419,7 +5452,6 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
kvm_run->exit_reason = KVM_EXIT_DEBUG;
*r = EMULATE_USER_EXIT;
} else {
- vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
/*
* "Certain debug exceptions may clear bit 0-3. The
* remaining contents of the DR6 register are never
@@ -5432,6 +5464,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
}
}
+int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
+{
+ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+ int r = EMULATE_DONE;
+
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+ return r == EMULATE_DONE;
+}
+EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
+
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
{
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
@@ -5617,6 +5660,49 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
}
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
+static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
+{
+ unsigned long val;
+
+ /* We should only ever be called with arch.pio.count equal to 1 */
+ BUG_ON(vcpu->arch.pio.count != 1);
+
+ /* For size less than 4 we merge, else we zero extend */
+ val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
+ : 0;
+
+ /*
+ * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
+ * the copy and tracing
+ */
+ emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
+ vcpu->arch.pio.port, &val, 1);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+
+ return 1;
+}
+
+int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port)
+{
+ unsigned long val;
+ int ret;
+
+ /* For size less than 4 we merge, else we zero extend */
+ val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
+
+ ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
+ &val, 1);
+ if (ret) {
+ kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+ return ret;
+ }
+
+ vcpu->arch.complete_userspace_io = complete_fast_pio_in;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_fast_pio_in);
+
static int kvmclock_cpu_down_prep(unsigned int cpu)
{
__this_cpu_write(cpu_tsc_khz, 0);
@@ -5966,8 +6052,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
- kvm_x86_ops->skip_emulated_instruction(vcpu);
- return kvm_vcpu_halt(vcpu);
+ int ret = kvm_skip_emulated_instruction(vcpu);
+ /*
+ * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
+ return kvm_vcpu_halt(vcpu) && ret;
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
@@ -5998,9 +6088,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
- int op_64_bit, r = 1;
+ int op_64_bit, r;
- kvm_x86_ops->skip_emulated_instruction(vcpu);
+ r = kvm_skip_emulated_instruction(vcpu);
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
@@ -7386,34 +7476,24 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
- if (!vcpu->guest_fpu_loaded) {
- vcpu->fpu_counter = 0;
+ if (!vcpu->guest_fpu_loaded)
return;
- }
vcpu->guest_fpu_loaded = 0;
copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
- /*
- * If using eager FPU mode, or if the guest is a frequent user
- * of the FPU, just leave the FPU active for next time.
- * Every 255 times fpu_counter rolls over to 0; a guest that uses
- * the FPU in bursts will revert to loading it on demand.
- */
- if (!use_eager_fpu()) {
- if (++vcpu->fpu_counter < 5)
- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
- }
trace_kvm_fpu(0);
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
+ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+
kvmclock_reset(vcpu);
- free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
kvm_x86_ops->vcpu_free(vcpu);
+ free_cpumask_var(wbinvd_dirty_mask);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
@@ -8153,7 +8233,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_invalidate_zap_all_pages(kvm);
+ kvm_page_track_flush_slot(kvm, slot);
}
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 25da5bc8d83d..4ca0d78adcf0 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -497,38 +497,24 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
* a whole series of functions like read_cr0() and write_cr0().
*
* We start with cr0. cr0 allows you to turn on and off all kinds of basic
- * features, but Linux only really cares about one: the horrifically-named Task
- * Switched (TS) bit at bit 3 (ie. 8)
+ * features, but the only cr0 bit that Linux ever used at runtime was the
+ * horrifically-named Task Switched (TS) bit at bit 3 (ie. 8)
*
* What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
* the floating point unit is used. Which allows us to restore FPU state
- * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
- * name like "FPUTRAP bit" be a little less cryptic?
+ * lazily after a task switch if we wanted to, but wouldn't a name like
+ * "FPUTRAP bit" be a little less cryptic?
*
- * We store cr0 locally because the Host never changes it. The Guest sometimes
- * wants to read it and we'd prefer not to bother the Host unnecessarily.
+ * Fortunately, Linux keeps it simple and doesn't use TS, so we can ignore
+ * cr0.
*/
-static unsigned long current_cr0;
static void lguest_write_cr0(unsigned long val)
{
- lazy_hcall1(LHCALL_TS, val & X86_CR0_TS);
- current_cr0 = val;
}
static unsigned long lguest_read_cr0(void)
{
- return current_cr0;
-}
-
-/*
- * Intel provided a special instruction to clear the TS bit for people too cool
- * to use write_cr0() to do it. This "clts" instruction is faster, because all
- * the vowels have been optimized out.
- */
-static void lguest_clts(void)
-{
- lazy_hcall1(LHCALL_TS, 0);
- current_cr0 &= ~X86_CR0_TS;
+ return 0;
}
/*
@@ -1432,7 +1418,6 @@ __init void lguest_init(void)
pv_cpu_ops.load_tls = lguest_load_tls;
pv_cpu_ops.get_debugreg = lguest_get_debugreg;
pv_cpu_ops.set_debugreg = lguest_set_debugreg;
- pv_cpu_ops.clts = lguest_clts;
pv_cpu_ops.read_cr0 = lguest_read_cr0;
pv_cpu_ops.write_cr0 = lguest_write_cr0;
pv_cpu_ops.read_cr4 = lguest_read_cr4;
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index d376e4b48f88..c5959576c315 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -16,53 +16,6 @@
#include <asm/smap.h>
#include <asm/export.h>
-/* Standard copy_to_user with segment limit checking */
-ENTRY(_copy_to_user)
- mov PER_CPU_VAR(current_task), %rax
- movq %rdi,%rcx
- addq %rdx,%rcx
- jc bad_to_user
- cmpq TASK_addr_limit(%rax),%rcx
- ja bad_to_user
- ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
- "jmp copy_user_generic_string", \
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-ENDPROC(_copy_to_user)
-EXPORT_SYMBOL(_copy_to_user)
-
-/* Standard copy_from_user with segment limit checking */
-ENTRY(_copy_from_user)
- mov PER_CPU_VAR(current_task), %rax
- movq %rsi,%rcx
- addq %rdx,%rcx
- jc bad_from_user
- cmpq TASK_addr_limit(%rax),%rcx
- ja bad_from_user
- ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
- "jmp copy_user_generic_string", \
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-ENDPROC(_copy_from_user)
-EXPORT_SYMBOL(_copy_from_user)
-
-
- .section .fixup,"ax"
- /* must zero dest */
-ENTRY(bad_from_user)
-bad_from_user:
- movl %edx,%ecx
- xorl %eax,%eax
- rep
- stosb
-bad_to_user:
- movl %edx,%eax
- ret
-ENDPROC(bad_from_user)
- .previous
-
/*
* copy_user_generic_unrolled - memory copy with exception handling.
* This version is for CPUs like P4 that don't have efficient micro
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index d1dee753b949..07764255b611 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -113,14 +113,14 @@ int msr_clear_bit(u32 msr, u8 bit)
}
#ifdef CONFIG_TRACEPOINTS
-void do_trace_write_msr(unsigned msr, u64 val, int failed)
+void do_trace_write_msr(unsigned int msr, u64 val, int failed)
{
trace_write_msr(msr, val, failed);
}
EXPORT_SYMBOL(do_trace_write_msr);
EXPORT_TRACEPOINT_SYMBOL(write_msr);
-void do_trace_read_msr(unsigned msr, u64 val, int failed)
+void do_trace_read_msr(unsigned int msr, u64 val, int failed)
{
trace_read_msr(msr, val, failed);
}
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index b4908789484e..c074799bddae 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -34,3 +34,52 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return ret;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
+
+/**
+ * copy_to_user: - Copy a block of data into user space.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from kernel space to user space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+}
+EXPORT_SYMBOL(_copy_to_user);
+
+/**
+ * copy_from_user: - Copy a block of data from user space.
+ * @to: Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from user space to kernel space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
+{
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else
+ memset(to, 0, n);
+ return n;
+}
+EXPORT_SYMBOL(_copy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 3bc7baf2a711..0b281217c890 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -640,52 +640,3 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
- return n;
-}
-EXPORT_SYMBOL(_copy_to_user);
-
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
-{
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-EXPORT_SYMBOL(_copy_from_user);
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 79ae939970d3..fcd06f7526de 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
if (early_recursion_flag > 2)
goto halt_loop;
- if (regs->cs != __KERNEL_CS)
+ /*
+ * Old CPUs leave the high bits of CS on the stack
+ * undefined. I'm not sure which CPUs do this, but at least
+ * the 486 DX works this way.
+ */
+ if ((regs->cs & 0xFFFF) != __KERNEL_CS)
goto fail;
/*
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9f72ca3b2669..17c55a536fdd 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -679,8 +679,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
printk(KERN_CONT "paging request");
printk(KERN_CONT " at %p\n", (void *) address);
- printk(KERN_ALERT "IP:");
- printk_address(regs->ip);
+ printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip);
dump_pagetable(address);
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 83e701f160a9..efc32bc6862b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -986,20 +986,17 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
return 0;
}
-int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn)
+void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
{
enum page_cache_mode pcm;
if (!pat_enabled())
- return 0;
+ return;
/* Set prot based on lookup */
pcm = lookup_memtype(pfn_t_to_phys(pfn));
*prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
cachemode2protval(pcm));
-
- return 0;
}
/*
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index f88ce0e5efd9..2dab69a706ec 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -141,8 +141,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
* Called from the FPU code when creating a fresh set of FPU
* registers. This is called from a very specific context where
* we know the FPU regstiers are safe for use and we can use PKRU
- * directly. The fact that PKRU is only available when we are
- * using eagerfpu mode makes this possible.
+ * directly.
*/
void copy_init_pkru_to_fpregs(void)
{
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index fe04a04dab8e..e76d1af60f7a 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -853,7 +853,7 @@ xadd: if (is_imm8(insn->off))
func = (u8 *) __bpf_call_base + imm32;
jmp_offset = func - (image + addrs[i]);
if (seen_ld_abs) {
- reload_skb_data = bpf_helper_changes_skb_data(func);
+ reload_skb_data = bpf_helper_changes_pkt_data(func);
if (reload_skb_data) {
EMIT1(0x57); /* push %rdi */
jmp_offset += 22; /* pop, mov, sub, mov */
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 28c04123b6dd..ffdbc4836b4f 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -339,10 +339,11 @@ fail:
return 0;
}
-static void nmi_cpu_setup(void *dummy)
+static void nmi_cpu_setup(void)
{
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
+
nmi_cpu_save_registers(msrs);
raw_spin_lock(&oprofilefs_lock);
model->setup_ctrs(model, msrs);
@@ -369,7 +370,7 @@ static void nmi_cpu_restore_registers(struct op_msrs *msrs)
}
}
-static void nmi_cpu_shutdown(void *dummy)
+static void nmi_cpu_shutdown(void)
{
unsigned int v;
int cpu = smp_processor_id();
@@ -387,20 +388,26 @@ static void nmi_cpu_shutdown(void *dummy)
nmi_cpu_restore_registers(msrs);
}
-static void nmi_cpu_up(void *dummy)
+static int nmi_cpu_online(unsigned int cpu)
{
+ local_irq_disable();
if (nmi_enabled)
- nmi_cpu_setup(dummy);
+ nmi_cpu_setup();
if (ctr_running)
- nmi_cpu_start(dummy);
+ nmi_cpu_start(NULL);
+ local_irq_enable();
+ return 0;
}
-static void nmi_cpu_down(void *dummy)
+static int nmi_cpu_down_prep(unsigned int cpu)
{
+ local_irq_disable();
if (ctr_running)
- nmi_cpu_stop(dummy);
+ nmi_cpu_stop(NULL);
if (nmi_enabled)
- nmi_cpu_shutdown(dummy);
+ nmi_cpu_shutdown();
+ local_irq_enable();
+ return 0;
}
static int nmi_create_files(struct dentry *root)
@@ -433,26 +440,7 @@ static int nmi_create_files(struct dentry *root)
return 0;
}
-static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
- void *data)
-{
- int cpu = (unsigned long)data;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
- break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
- break;
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block oprofile_cpu_nb = {
- .notifier_call = oprofile_cpu_notifier
-};
+static enum cpuhp_state cpuhp_nmi_online;
static int nmi_setup(void)
{
@@ -495,20 +483,17 @@ static int nmi_setup(void)
if (err)
goto fail;
- cpu_notifier_register_begin();
-
- /* Use get/put_online_cpus() to protect 'nmi_enabled' */
- get_online_cpus();
nmi_enabled = 1;
/* make nmi_enabled visible to the nmi handler: */
smp_mb();
- on_each_cpu(nmi_cpu_setup, NULL, 1);
- __register_cpu_notifier(&oprofile_cpu_nb);
- put_online_cpus();
-
- cpu_notifier_register_done();
-
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
+ nmi_cpu_online, nmi_cpu_down_prep);
+ if (err < 0)
+ goto fail_nmi;
+ cpuhp_nmi_online = err;
return 0;
+fail_nmi:
+ unregister_nmi_handler(NMI_LOCAL, "oprofile");
fail:
free_msrs();
return err;
@@ -518,17 +503,9 @@ static void nmi_shutdown(void)
{
struct op_msrs *msrs;
- cpu_notifier_register_begin();
-
- /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
- get_online_cpus();
- on_each_cpu(nmi_cpu_shutdown, NULL, 1);
+ cpuhp_remove_state(cpuhp_nmi_online);
nmi_enabled = 0;
ctr_running = 0;
- __unregister_cpu_notifier(&oprofile_cpu_nb);
- put_online_cpus();
-
- cpu_notifier_register_done();
/* make variables visible to the nmi handler: */
smp_mb();
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index c20d2cc7ef64..ae387e5ee6f7 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -327,35 +327,18 @@ static int __init early_root_info_init(void)
#define ENABLE_CF8_EXT_CFG (1ULL << 46)
-static void enable_pci_io_ecs(void *unused)
+static int amd_bus_cpu_online(unsigned int cpu)
{
u64 reg;
+
rdmsrl(MSR_AMD64_NB_CFG, reg);
if (!(reg & ENABLE_CF8_EXT_CFG)) {
reg |= ENABLE_CF8_EXT_CFG;
wrmsrl(MSR_AMD64_NB_CFG, reg);
}
+ return 0;
}
-static int amd_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
-{
- int cpu = (long)hcpu;
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block amd_cpu_notifier = {
- .notifier_call = amd_cpu_notify,
-};
-
static void __init pci_enable_pci_io_ecs(void)
{
#ifdef CONFIG_AMD_NB
@@ -385,7 +368,7 @@ static void __init pci_enable_pci_io_ecs(void)
static int __init pci_io_ecs_init(void)
{
- int cpu;
+ int ret;
/* assume all cpus from fam10h have IO ECS */
if (boot_cpu_data.x86 < 0x10)
@@ -395,12 +378,9 @@ static int __init pci_io_ecs_init(void)
if (early_pci_allowed())
pci_enable_pci_io_ecs();
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
- (void *)(long)cpu);
- __register_cpu_notifier(&amd_cpu_notifier);
- cpu_notifier_register_done();
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/amd_bus:online",
+ amd_bus_cpu_online, NULL);
+ WARN_ON(ret < 0);
pci_probe |= PCI_HAS_IO_ECS;
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index bedfab98077a..e1fb269c87af 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -264,8 +264,8 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 0;
error:
- dev_err(&dev->dev,
- "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+ dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
+ type == PCI_CAP_ID_MSI ? "" : "-X", irq);
return irq;
}
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index b27bccd4390f..821cb41f00e6 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -89,7 +89,7 @@ static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
}
static void ce4100_serial_fixup(int port, struct uart_port *up,
- unsigned short *capabilites)
+ u32 *capabilites)
{
#ifdef CONFIG_EARLY_PRINTK
/*
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index bf99aa7005eb..936a488d6cf6 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void)
int count = 0, pg_shift = 0;
void *new_memmap = NULL;
efi_status_t status;
- phys_addr_t pa;
+ unsigned long pa;
efi.systab = NULL;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 58b0f801f66f..319148bd4b05 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/ucs2_string.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void)
memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
}
+/*
+ * Wrapper for slow_virt_to_phys() that handles NULL addresses.
+ */
+static inline phys_addr_t
+virt_to_phys_or_null_size(void *va, unsigned long size)
+{
+ bool bad_size;
+
+ if (!va)
+ return 0;
+
+ if (virt_addr_valid(va))
+ return virt_to_phys(va);
+
+ /*
+ * A fully aligned variable on the stack is guaranteed not to
+ * cross a page bounary. Try to catch strings on the stack by
+ * checking that 'size' is a power of two.
+ */
+ bad_size = size > PAGE_SIZE || !is_power_of_2(size);
+
+ WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
+
+ return slow_virt_to_phys(va);
+}
+
+#define virt_to_phys_or_null(addr) \
+ virt_to_phys_or_null_size((addr), sizeof(*(addr)))
+
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
unsigned long pfn, text;
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
spin_lock(&rtc_lock);
- phys_tm = virt_to_phys(tm);
- phys_tc = virt_to_phys(tc);
+ phys_tm = virt_to_phys_or_null(tm);
+ phys_tc = virt_to_phys_or_null(tc);
status = efi_thunk(get_time, phys_tm, phys_tc);
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
spin_lock(&rtc_lock);
- phys_tm = virt_to_phys(tm);
+ phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_time, phys_tm);
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
spin_lock(&rtc_lock);
- phys_enabled = virt_to_phys(enabled);
- phys_pending = virt_to_phys(pending);
- phys_tm = virt_to_phys(tm);
+ phys_enabled = virt_to_phys_or_null(enabled);
+ phys_pending = virt_to_phys_or_null(pending);
+ phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(get_wakeup_time, phys_enabled,
phys_pending, phys_tm);
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
spin_lock(&rtc_lock);
- phys_tm = virt_to_phys(tm);
+ phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_wakeup_time, enabled, phys_tm);
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
return status;
}
+static unsigned long efi_name_size(efi_char16_t *name)
+{
+ return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
+}
static efi_status_t
efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 phys_name, phys_vendor, phys_attr;
u32 phys_data_size, phys_data;
- phys_data_size = virt_to_phys(data_size);
- phys_vendor = virt_to_phys(vendor);
- phys_name = virt_to_phys(name);
- phys_attr = virt_to_phys(attr);
- phys_data = virt_to_phys(data);
+ phys_data_size = virt_to_phys_or_null(data_size);
+ phys_vendor = virt_to_phys_or_null(vendor);
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_attr = virt_to_phys_or_null(attr);
+ phys_data = virt_to_phys_or_null_size(data, *data_size);
status = efi_thunk(get_variable, phys_name, phys_vendor,
phys_attr, phys_data_size, phys_data);
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
- phys_name = virt_to_phys(name);
- phys_vendor = virt_to_phys(vendor);
- phys_data = virt_to_phys(data);
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_vendor = virt_to_phys_or_null(vendor);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
/* If data_size is > sizeof(u32) we've got problems */
status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
efi_status_t status;
u32 phys_name_size, phys_name, phys_vendor;
- phys_name_size = virt_to_phys(name_size);
- phys_vendor = virt_to_phys(vendor);
- phys_name = virt_to_phys(name);
+ phys_name_size = virt_to_phys_or_null(name_size);
+ phys_vendor = virt_to_phys_or_null(vendor);
+ phys_name = virt_to_phys_or_null_size(name, *name_size);
status = efi_thunk(get_next_variable, phys_name_size,
phys_name, phys_vendor);
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count)
efi_status_t status;
u32 phys_count;
- phys_count = virt_to_phys(count);
+ phys_count = virt_to_phys_or_null(count);
status = efi_thunk(get_next_high_mono_count, phys_count);
return status;
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
{
u32 phys_data;
- phys_data = virt_to_phys(data);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
efi_thunk(reset_system, reset_type, status, data_size, phys_data);
}
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
- phys_storage = virt_to_phys(storage_space);
- phys_remaining = virt_to_phys(remaining_space);
- phys_max = virt_to_phys(max_variable_size);
+ phys_storage = virt_to_phys_or_null(storage_space);
+ phys_remaining = virt_to_phys_or_null(remaining_space);
+ phys_max = virt_to_phys_or_null(max_variable_size);
status = efi_thunk(query_variable_info, attr, phys_storage,
phys_remaining, phys_max);
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 429d08be7848..dd6cfa4ad3ac 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
# MISC Devices
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
-obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index de734134bc8d..3f1f1c77d090 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -1,5 +1,5 @@
/*
- * platform_wdt.c: Watchdog platform library file
+ * Intel Merrifield watchdog platform device library file
*
* (C) Copyright 2014 Intel Corporation
* Author: David Cohen <david.a.cohen@linux.intel.com>
@@ -14,7 +14,9 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/platform_data/intel-mid_wdt.h>
+
#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
#include <asm/io_apic.h>
#define TANGIER_EXT_TIMER0_MSI 15
@@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = {
.probe = tangier_probe,
};
-static int __init register_mid_wdt(void)
+static int wdt_scu_status_change(struct notifier_block *nb,
+ unsigned long code, void *data)
{
- if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
- wdt_dev.dev.platform_data = &tangier_pdata;
- return platform_device_register(&wdt_dev);
+ if (code == SCU_DOWN) {
+ platform_device_unregister(&wdt_dev);
+ return 0;
}
- return -ENODEV;
+ return platform_device_register(&wdt_dev);
}
+static struct notifier_block wdt_scu_notifier = {
+ .notifier_call = wdt_scu_status_change,
+};
+
+static int __init register_mid_wdt(void)
+{
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+ return -ENODEV;
+
+ wdt_dev.dev.platform_data = &tangier_pdata;
+
+ /*
+ * We need to be sure that the SCU IPC is ready before watchdog device
+ * can be registered:
+ */
+ intel_scu_notifier_add(&wdt_scu_notifier);
+
+ return 0;
+}
rootfs_initcall(register_mid_wdt);
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
index 5d3b45ad1c03..ef03852ea6e8 100644
--- a/arch/x86/platform/intel-mid/pwr.c
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -270,7 +270,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
return 0;
}
-EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
+
+pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
+{
+ struct mid_pwr *pwr = midpwr;
+ int id, reg, bit;
+ u32 power;
+
+ if (!pwr || !pwr->available)
+ return PCI_UNKNOWN;
+
+ id = intel_mid_pwr_get_lss_id(pdev);
+ if (id < 0)
+ return PCI_UNKNOWN;
+
+ reg = (id * LSS_PWS_BITS) / 32;
+ bit = (id * LSS_PWS_BITS) % 32;
+ power = mid_pwr_get_state(pwr, reg);
+ return (__force pci_power_t)((power >> bit) & 3);
+}
void intel_mid_pwr_power_off(void)
{
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 55130846ac87..c0533fbc39e3 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -196,6 +196,7 @@ static int xo15_sci_remove(struct acpi_device *device)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int xo15_sci_resume(struct device *dev)
{
/* Enable all EC events */
@@ -207,6 +208,7 @@ static int xo15_sci_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 9e42842e924a..766d4d3529a1 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -19,7 +19,6 @@
#include <asm/uv/uv_hub.h>
#include <asm/uv/uv_bau.h>
#include <asm/apic.h>
-#include <asm/idle.h>
#include <asm/tsc.h>
#include <asm/irq_vectors.h>
#include <asm/timer.h>
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index cd5173a2733f..8410e7d0a5b5 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -387,8 +387,8 @@ static void uv_nmi_dump_cpu_ip_hdr(void)
/* Dump Instruction Pointer info */
static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
{
- pr_info("UV: %4d %6d %-32.32s ", cpu, current->pid, current->comm);
- printk_address(regs->ip);
+ pr_info("UV: %4d %6d %-32.32s %pS",
+ cpu, current->pid, current->comm, (void *)regs->ip);
}
/*
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 9634557a5444..ded2e8272382 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -11,6 +11,10 @@
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/suspend.h>
+#include <linux/scatterlist.h>
+#include <linux/kdebug.h>
+
+#include <crypto/hash.h>
#include <asm/init.h>
#include <asm/proto.h>
@@ -177,14 +181,86 @@ int pfn_is_nosave(unsigned long pfn)
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
+#define MD5_DIGEST_SIZE 16
+
struct restore_data_record {
unsigned long jump_address;
unsigned long jump_address_phys;
unsigned long cr3;
unsigned long magic;
+ u8 e820_digest[MD5_DIGEST_SIZE];
};
-#define RESTORE_MAGIC 0x123456789ABCDEF0UL
+#define RESTORE_MAGIC 0x23456789ABCDEF01UL
+
+#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
+/**
+ * get_e820_md5 - calculate md5 according to given e820 map
+ *
+ * @map: the e820 map to be calculated
+ * @buf: the md5 result to be stored to
+ */
+static int get_e820_md5(struct e820map *map, void *buf)
+{
+ struct scatterlist sg;
+ struct crypto_ahash *tfm;
+ int size;
+ int ret = 0;
+
+ tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return -ENOMEM;
+
+ {
+ AHASH_REQUEST_ON_STACK(req, tfm);
+ size = offsetof(struct e820map, map)
+ + sizeof(struct e820entry) * map->nr_map;
+ ahash_request_set_tfm(req, tfm);
+ sg_init_one(&sg, (u8 *)map, size);
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_crypt(req, &sg, buf, size);
+
+ if (crypto_ahash_digest(req))
+ ret = -EINVAL;
+ ahash_request_zero(req);
+ }
+ crypto_free_ahash(tfm);
+
+ return ret;
+}
+
+static void hibernation_e820_save(void *buf)
+{
+ get_e820_md5(e820_saved, buf);
+}
+
+static bool hibernation_e820_mismatch(void *buf)
+{
+ int ret;
+ u8 result[MD5_DIGEST_SIZE];
+
+ memset(result, 0, MD5_DIGEST_SIZE);
+ /* If there is no digest in suspend kernel, let it go. */
+ if (!memcmp(result, buf, MD5_DIGEST_SIZE))
+ return false;
+
+ ret = get_e820_md5(e820_saved, result);
+ if (ret)
+ return true;
+
+ return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
+}
+#else
+static void hibernation_e820_save(void *buf)
+{
+}
+
+static bool hibernation_e820_mismatch(void *buf)
+{
+ /* If md5 is not builtin for restore kernel, let it go. */
+ return false;
+}
+#endif
/**
* arch_hibernation_header_save - populate the architecture specific part
@@ -201,6 +277,9 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
rdr->jump_address_phys = __pa_symbol(&restore_registers);
rdr->cr3 = restore_cr3;
rdr->magic = RESTORE_MAGIC;
+
+ hibernation_e820_save(rdr->e820_digest);
+
return 0;
}
@@ -216,5 +295,16 @@ int arch_hibernation_header_restore(void *addr)
restore_jump_address = rdr->jump_address;
jump_address_phys = rdr->jump_address_phys;
restore_cr3 = rdr->cr3;
- return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
+
+ if (rdr->magic != RESTORE_MAGIC) {
+ pr_crit("Unrecognized hibernate image header format!\n");
+ return -EINVAL;
+ }
+
+ if (hibernation_e820_mismatch(rdr->e820_digest)) {
+ pr_crit("Hibernate inconsistent memory map detected!\n");
+ return -ENODEV;
+ }
+
+ return 0;
}
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index ac58c1616408..555b9fa0ad43 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
KBUILD_CFLAGS += -m$(BITS)
+KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c
index 1ac76479c266..8730c2882fff 100644
--- a/arch/x86/ras/mce_amd_inj.c
+++ b/arch/x86/ras/mce_amd_inj.c
@@ -275,6 +275,8 @@ static void do_inject(void)
unsigned int cpu = i_mce.extcpu;
u8 b = i_mce.bank;
+ rdtscll(i_mce.tsc);
+
if (i_mce.misc)
i_mce.status |= MCI_STATUS_MISCV;
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 25012abc3409..4463fa72db94 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -69,7 +69,7 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
# ---------------------------------------------------------------------------
-KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
+KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
-I$(srctree)/arch/x86/boot
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index ba70ff232917..1972565ab106 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -269,7 +269,8 @@ int main(int argc, char **argv)
insns++;
}
- fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
+ fprintf((errors) ? stderr : stdout,
+ "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
prog,
(errors) ? "Failure" : "Success",
insns,
diff --git a/arch/x86/tools/relocs.h b/arch/x86/tools/relocs.h
index f59590645b68..1d23bf953a4a 100644
--- a/arch/x86/tools/relocs.h
+++ b/arch/x86/tools/relocs.h
@@ -16,7 +16,7 @@
#include <regex.h>
#include <tools/le_byteshift.h>
-void die(char *fmt, ...);
+void die(char *fmt, ...) __attribute__((noreturn));
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c
index 56f04db0c9c0..ecf31e0358c8 100644
--- a/arch/x86/tools/test_get_len.c
+++ b/arch/x86/tools/test_get_len.c
@@ -167,7 +167,7 @@ int main(int argc, char **argv)
fprintf(stderr, "Warning: decoded and checked %d"
" instructions with %d warnings\n", insns, warnings);
else
- fprintf(stderr, "Succeed: decoded and checked %d"
+ fprintf(stdout, "Success: decoded and checked %d"
" instructions\n", insns);
return 0;
}
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h
index 233ee09c1ce8..c77db2288982 100644
--- a/arch/x86/um/asm/processor.h
+++ b/arch/x86/um/asm/processor.h
@@ -26,7 +26,6 @@ static inline void rep_nop(void)
}
#define cpu_relax() rep_nop()
-#define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(t) (&(t)->thread.regs)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bdd855685403..ced7027b3fbc 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -980,17 +980,6 @@ static void xen_io_delay(void)
{
}
-static void xen_clts(void)
-{
- struct multicall_space mcs;
-
- mcs = xen_mc_entry(0);
-
- MULTI_fpu_taskswitch(mcs.mc, 0);
-
- xen_mc_issue(PARAVIRT_LAZY_CPU);
-}
-
static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
static unsigned long xen_read_cr0(void)
@@ -1233,8 +1222,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.set_debugreg = xen_set_debugreg,
.get_debugreg = xen_get_debugreg,
- .clts = xen_clts,
-
.read_cr0 = xen_read_cr0,
.write_cr0 = xen_write_cr0,
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 0e98e5d241d0..a9fafb5c8738 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -19,7 +19,6 @@
int xen_swiotlb __read_mostly;
static struct dma_map_ops xen_swiotlb_dma_ops = {
- .mapping_error = xen_swiotlb_dma_mapping_error,
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f8960fca0827..8c394e30e5fe 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -41,7 +41,7 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
unsigned long xen_released_pages;
/* E820 map used during setting up memory. */
-static struct e820entry xen_e820_map[E820MAX] __initdata;
+static struct e820entry xen_e820_map[E820_X_MAX] __initdata;
static u32 xen_e820_map_entries __initdata;
/*
@@ -750,7 +750,7 @@ char * __init xen_memory_setup(void)
max_pfn = min(max_pfn, xen_start_info->nr_pages);
mem_end = PFN_PHYS(max_pfn);
- memmap.nr_entries = E820MAX;
+ memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
set_xen_guest_handle(memmap.buffer, xen_e820_map);
op = xen_initial_domain() ?
@@ -923,7 +923,7 @@ char * __init xen_auto_xlated_memory_setup(void)
int i;
int rc;
- memmap.nr_entries = E820MAX;
+ memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
set_xen_guest_handle(memmap.buffer, xen_e820_map);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 3d6e0064cbfc..e8a9ea7d7a21 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -114,6 +114,7 @@ void xen_uninit_lock_cpu(int cpu)
per_cpu(irq_name, cpu) = NULL;
}
+PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
/*
* Our init of PV spinlocks is split in two init functions due to us
@@ -137,6 +138,7 @@ void __init xen_init_spinlocks(void)
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_lock_ops.wait = xen_qlock_wait;
pv_lock_ops.kick = xen_qlock_kick;
+ pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
}
/*
diff --git a/arch/xtensa/include/asm/mutex.h b/arch/xtensa/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/xtensa/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index b42d68bfe3cf..86ffcd68e496 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -206,7 +206,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/* Special register access. */
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 81435d995e11..9fdbe1fe0473 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -101,4 +101,6 @@
#define SO_CNX_ADVICE 53
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index de9b14b2d348..cd400af4a6b2 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -767,7 +767,14 @@ __SYSCALL(346, sys_preadv2, 6)
#define __NR_pwritev2 347
__SYSCALL(347, sys_pwritev2, 6)
-#define __NR_syscall_count 348
+#define __NR_pkey_mprotect 348
+__SYSCALL(348, sys_pkey_mprotect, 4)
+#define __NR_pkey_alloc 349
+__SYSCALL(349, sys_pkey_alloc, 2)
+#define __NR_pkey_free 350
+__SYSCALL(350, sys_pkey_free, 1)
+
+#define __NR_syscall_count 351
/*
* sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 1e68806d6695..6a16decf278f 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -189,7 +189,9 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
{
dma_addr_t dma_handle = page_to_phys(page) + offset;
- xtensa_sync_single_for_device(dev, dma_handle, size, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ xtensa_sync_single_for_device(dev, dma_handle, size, dir);
+
return dma_handle;
}
@@ -197,7 +199,8 @@ static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
}
static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 9a5bcd0381a7..be81e69b25bc 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -172,10 +172,11 @@ void __init time_init(void)
{
of_clk_init(NULL);
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
- printk("Calibrating CPU frequency ");
+ pr_info("Calibrating CPU frequency ");
calibrate_ccount();
- printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
- (int)(ccount_freq/10000)%100);
+ pr_cont("%d.%02d MHz\n",
+ (int)ccount_freq / 1000000,
+ (int)(ccount_freq / 10000) % 100);
#else
ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
#endif
@@ -210,9 +211,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
void calibrate_delay(void)
{
loops_per_jiffy = ccount_freq / HZ;
- printk("Calibrating delay loop (skipped)... "
- "%lu.%02lu BogoMIPS preset\n",
- loops_per_jiffy/(1000000/HZ),
- (loops_per_jiffy/(10000/HZ)) % 100);
+ pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
+ loops_per_jiffy / (1000000 / HZ),
+ (loops_per_jiffy / (10000 / HZ)) % 100);
}
#endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index d02fc304b31c..ce37d5b899fe 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -465,26 +465,25 @@ void show_regs(struct pt_regs * regs)
for (i = 0; i < 16; i++) {
if ((i % 8) == 0)
- printk(KERN_INFO "a%02d:", i);
- printk(KERN_CONT " %08lx", regs->areg[i]);
+ pr_info("a%02d:", i);
+ pr_cont(" %08lx", regs->areg[i]);
}
- printk(KERN_CONT "\n");
-
- printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
- regs->pc, regs->ps, regs->depc, regs->excvaddr);
- printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
- regs->lbeg, regs->lend, regs->lcount, regs->sar);
+ pr_cont("\n");
+ pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
+ regs->pc, regs->ps, regs->depc, regs->excvaddr);
+ pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
+ regs->lbeg, regs->lend, regs->lcount, regs->sar);
if (user_mode(regs))
- printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
- regs->windowbase, regs->windowstart, regs->wmask,
- regs->syscall);
+ pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
+ regs->windowbase, regs->windowstart, regs->wmask,
+ regs->syscall);
}
static int show_trace_cb(struct stackframe *frame, void *data)
{
if (kernel_text_address(frame->pc)) {
- printk(" [<%08lx>] ", frame->pc);
- print_symbol("%s\n", frame->pc);
+ pr_cont(" [<%08lx>]", frame->pc);
+ print_symbol(" %s\n", frame->pc);
}
return 0;
}
@@ -494,19 +493,13 @@ void show_trace(struct task_struct *task, unsigned long *sp)
if (!sp)
sp = stack_pointer(task);
- printk("Call Trace:");
-#ifdef CONFIG_KALLSYMS
- printk("\n");
-#endif
+ pr_info("Call Trace:\n");
walk_stackframe(sp, show_trace_cb, NULL);
- printk("\n");
+#ifndef CONFIG_KALLSYMS
+ pr_cont("\n");
+#endif
}
-/*
- * This routine abuses get_user()/put_user() to reference pointers
- * with at least a bit of error checking ...
- */
-
static int kstack_depth_to_print = 24;
void show_stack(struct task_struct *task, unsigned long *sp)
@@ -518,52 +511,29 @@ void show_stack(struct task_struct *task, unsigned long *sp)
sp = stack_pointer(task);
stack = sp;
- printk("\nStack: ");
+ pr_info("Stack:\n");
for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(sp))
break;
- if (i && ((i % 8) == 0))
- printk("\n ");
- printk("%08lx ", *sp++);
+ pr_cont(" %08lx", *sp++);
+ if (i % 8 == 7)
+ pr_cont("\n");
}
- printk("\n");
show_trace(task, stack);
}
-void show_code(unsigned int *pc)
-{
- long i;
-
- printk("\nCode:");
-
- for(i = -3 ; i < 6 ; i++) {
- unsigned long insn;
- if (__get_user(insn, pc + i)) {
- printk(" (Bad address in pc)\n");
- break;
- }
- printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
- }
-}
-
DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
- int nl = 0;
console_verbose();
spin_lock_irq(&die_lock);
- printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
-#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
- nl = 1;
-#endif
- if (nl)
- printk("\n");
+ pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
+ IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
show_regs(regs);
if (!user_mode(regs))
show_stack(NULL, (unsigned long*)regs->areg[1]);
diff --git a/block/Kconfig b/block/Kconfig
index 1d4d624492fc..8bf114a3858a 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -5,6 +5,7 @@ menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
select SBITMAP
+ select SRCU
help
Provide block layer support for the kernel.
@@ -89,6 +90,14 @@ config BLK_DEV_INTEGRITY
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
+config BLK_DEV_ZONED
+ bool "Zoned block device support"
+ ---help---
+ Block layer zoned block device support. This option enables
+ support for ZAC/ZBC host-managed and host-aware zoned block devices.
+
+ Say yes here if you have a ZAC or ZBC storage device.
+
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
@@ -112,6 +121,32 @@ config BLK_CMDLINE_PARSER
See Documentation/block/cmdline-partition.txt for more information.
+config BLK_WBT
+ bool "Enable support for block device writeback throttling"
+ default n
+ ---help---
+ Enabling this option enables the block layer to throttle buffered
+ background writeback from the VM, making it more smooth and having
+ less impact on foreground operations. The throttling is done
+ dynamically on an algorithm loosely based on CoDel, factoring in
+ the realtime performance of the disk.
+
+config BLK_WBT_SQ
+ bool "Single queue writeback throttling"
+ default n
+ depends on BLK_WBT
+ ---help---
+ Enable writeback throttling by default on legacy single queue devices
+
+config BLK_WBT_MQ
+ bool "Multiqueue writeback throttling"
+ default y
+ depends on BLK_WBT
+ ---help---
+ Enable writeback throttling by default on multiqueue devices.
+ Multiqueue currently doesn't have support for IO scheduling,
+ enabling this option is recommended.
+
menu "Partition Types"
source "block/partitions/Kconfig"
diff --git a/block/Makefile b/block/Makefile
index 36acdd7545be..a827f988c4e6 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-lib.o blk-mq.o blk-mq-tag.o \
+ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/
@@ -23,3 +23,5 @@ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
+obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
+obj-$(CONFIG_BLK_WBT) += blk-wbt.o
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 63f72f00c72e..5384713d48bc 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -172,7 +172,7 @@ bool bio_integrity_enabled(struct bio *bio)
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
- if (!bio_is_rw(bio))
+ if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
return false;
/* Already protected? */
diff --git a/block/bio.c b/block/bio.c
index db85c5753a76..2b375020fc49 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -270,11 +270,15 @@ static void bio_free(struct bio *bio)
}
}
-void bio_init(struct bio *bio)
+void bio_init(struct bio *bio, struct bio_vec *table,
+ unsigned short max_vecs)
{
memset(bio, 0, sizeof(*bio));
atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->__bi_cnt, 1);
+
+ bio->bi_io_vec = table;
+ bio->bi_max_vecs = max_vecs;
}
EXPORT_SYMBOL(bio_init);
@@ -480,7 +484,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
return NULL;
bio = p + front_pad;
- bio_init(bio);
+ bio_init(bio, NULL, 0);
if (nr_iovecs > inline_vecs) {
unsigned long idx = 0;
@@ -670,6 +674,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
break;
case REQ_OP_WRITE_SAME:
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
@@ -847,6 +852,55 @@ done:
}
EXPORT_SYMBOL(bio_add_page);
+/**
+ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * @bio: bio to add pages to
+ * @iter: iov iterator describing the region to be mapped
+ *
+ * Pins as many pages from *iter and appends them to @bio's bvec array. The
+ * pages will have to be released using put_page() when done.
+ */
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+{
+ unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+ struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
+ struct page **pages = (struct page **)bv;
+ size_t offset, diff;
+ ssize_t size;
+
+ size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
+ if (unlikely(size <= 0))
+ return size ? size : -EFAULT;
+ nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ /*
+ * Deep magic below: We need to walk the pinned pages backwards
+ * because we are abusing the space allocated for the bio_vecs
+ * for the page array. Because the bio_vecs are larger than the
+ * page pointers by definition this will always work. But it also
+ * means we can't use bio_add_page, so any changes to it's semantics
+ * need to be reflected here as well.
+ */
+ bio->bi_iter.bi_size += size;
+ bio->bi_vcnt += nr_pages;
+
+ diff = (nr_pages * PAGE_SIZE - offset) - size;
+ while (nr_pages--) {
+ bv[nr_pages].bv_page = pages[nr_pages];
+ bv[nr_pages].bv_len = PAGE_SIZE;
+ bv[nr_pages].bv_offset = 0;
+ }
+
+ bv[0].bv_offset += offset;
+ bv[0].bv_len -= offset;
+ if (diff)
+ bv[bio->bi_vcnt - 1].bv_len -= diff;
+
+ iov_iter_advance(iter, size);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
+
struct submit_bio_ret {
struct completion event;
int error;
@@ -1786,15 +1840,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
BUG_ON(sectors <= 0);
BUG_ON(sectors >= bio_sectors(bio));
- /*
- * Discards need a mutable bio_vec to accommodate the payload
- * required by the DSM TRIM and UNMAP commands.
- */
- if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
- split = bio_clone_bioset(bio, gfp, bs);
- else
- split = bio_clone_fast(bio, gfp, bs);
-
+ split = bio_clone_fast(bio, gfp, bs);
if (!split)
return NULL;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b08ccbb9393a..8ba0af780e88 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -185,7 +185,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
}
wb_congested = wb_congested_get_create(&q->backing_dev_info,
- blkcg->css.id, GFP_NOWAIT);
+ blkcg->css.id,
+ GFP_NOWAIT | __GFP_NOWARN);
if (!wb_congested) {
ret = -ENOMEM;
goto err_put_css;
@@ -193,7 +194,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* allocate */
if (!new_blkg) {
- new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
+ new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_congested;
@@ -1022,7 +1023,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
}
spin_lock_init(&blkcg->lock);
- INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
@@ -1240,7 +1241,7 @@ pd_prealloc:
if (blkg->pd[pol->plid])
continue;
- pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
+ pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
if (!pd)
swap(pd, pd_prealloc);
if (!pd) {
diff --git a/block/blk-core.c b/block/blk-core.c
index 14d7c0740dc0..61ba08c58b64 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -39,6 +39,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-wbt.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -145,13 +146,13 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
if (error)
bio->bi_error = error;
- if (unlikely(rq->cmd_flags & REQ_QUIET))
+ if (unlikely(rq->rq_flags & RQF_QUIET))
bio_set_flag(bio, BIO_QUIET);
bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
- if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+ if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
bio_endio(bio);
}
@@ -882,6 +883,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
fail:
blk_free_flush_queue(q->fq);
+ wbt_exit(q);
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -899,7 +901,7 @@ EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_list *rl, struct request *rq)
{
- if (rq->cmd_flags & REQ_ELVPRIV) {
+ if (rq->rq_flags & RQF_ELVPRIV) {
elv_put_request(rl->q, rq);
if (rq->elv.icq)
put_io_context(rq->elv.icq->ioc);
@@ -961,14 +963,14 @@ static void __freed_request(struct request_list *rl, int sync)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(struct request_list *rl, int op, unsigned int flags)
+static void freed_request(struct request_list *rl, bool sync,
+ req_flags_t rq_flags)
{
struct request_queue *q = rl->q;
- int sync = rw_is_sync(op, flags);
q->nr_rqs[sync]--;
rl->count[sync]--;
- if (flags & REQ_ELVPRIV)
+ if (rq_flags & RQF_ELVPRIV)
q->nr_rqs_elvpriv--;
__freed_request(rl, sync);
@@ -1056,8 +1058,7 @@ static struct io_context *rq_ioc(struct bio *bio)
/**
* __get_request - get a free request
* @rl: request list to allocate from
- * @op: REQ_OP_READ/REQ_OP_WRITE
- * @op_flags: rq_flag_bits
+ * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
@@ -1068,22 +1069,22 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
-static struct request *__get_request(struct request_list *rl, int op,
- int op_flags, struct bio *bio,
- gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, unsigned int op,
+ struct bio *bio, gfp_t gfp_mask)
{
struct request_queue *q = rl->q;
struct request *rq;
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
- const bool is_sync = rw_is_sync(op, op_flags) != 0;
+ const bool is_sync = op_is_sync(op);
int may_queue;
+ req_flags_t rq_flags = RQF_ALLOCED;
if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV);
- may_queue = elv_may_queue(q, op, op_flags);
+ may_queue = elv_may_queue(q, op);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
@@ -1127,7 +1128,7 @@ static struct request *__get_request(struct request_list *rl, int op,
/*
* Decide whether the new request will be managed by elevator. If
- * so, mark @op_flags and increment elvpriv. Non-zero elvpriv will
+ * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
* prevent the current elevator from being destroyed until the new
* request is freed. This guarantees icq's won't be destroyed and
* makes creating new ones safe.
@@ -1136,14 +1137,14 @@ static struct request *__get_request(struct request_list *rl, int op,
* it will be created after releasing queue_lock.
*/
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
- op_flags |= REQ_ELVPRIV;
+ rq_flags |= RQF_ELVPRIV;
q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q);
}
if (blk_queue_io_stat(q))
- op_flags |= REQ_IO_STAT;
+ rq_flags |= RQF_IO_STAT;
spin_unlock_irq(q->queue_lock);
/* allocate and init request */
@@ -1153,10 +1154,12 @@ static struct request *__get_request(struct request_list *rl, int op,
blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
- req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED);
+ blk_rq_set_prio(rq, ioc);
+ rq->cmd_flags = op;
+ rq->rq_flags = rq_flags;
/* init elvpriv */
- if (op_flags & REQ_ELVPRIV) {
+ if (rq_flags & RQF_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) {
if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask);
@@ -1195,7 +1198,7 @@ fail_elvpriv:
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
__func__, dev_name(q->backing_dev_info.dev));
- rq->cmd_flags &= ~REQ_ELVPRIV;
+ rq->rq_flags &= ~RQF_ELVPRIV;
rq->elv.icq = NULL;
spin_lock_irq(q->queue_lock);
@@ -1212,7 +1215,7 @@ fail_alloc:
* queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
- freed_request(rl, op, op_flags);
+ freed_request(rl, is_sync, rq_flags);
/*
* in the very unlikely event that allocation failed and no
@@ -1230,8 +1233,7 @@ rq_starved:
/**
* get_request - get a free request
* @q: request_queue to allocate request from
- * @op: REQ_OP_READ/REQ_OP_WRITE
- * @op_flags: rq_flag_bits
+ * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
@@ -1242,18 +1244,17 @@ rq_starved:
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request(struct request_queue *q, int op,
- int op_flags, struct bio *bio,
- gfp_t gfp_mask)
+static struct request *get_request(struct request_queue *q, unsigned int op,
+ struct bio *bio, gfp_t gfp_mask)
{
- const bool is_sync = rw_is_sync(op, op_flags) != 0;
+ const bool is_sync = op_is_sync(op);
DEFINE_WAIT(wait);
struct request_list *rl;
struct request *rq;
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
- rq = __get_request(rl, op, op_flags, bio, gfp_mask);
+ rq = __get_request(rl, op, bio, gfp_mask);
if (!IS_ERR(rq))
return rq;
@@ -1295,7 +1296,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
- rq = get_request(q, rw, 0, NULL, gfp_mask);
+ rq = get_request(q, rw, NULL, gfp_mask);
if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock);
return rq;
@@ -1346,8 +1347,9 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
+ wbt_requeue(q->rq_wb, &rq->issue_stat);
- if (rq->cmd_flags & REQ_QUEUED)
+ if (rq->rq_flags & RQF_QUEUED)
blk_queue_end_tag(q, rq);
BUG_ON(blk_queued_rq(rq));
@@ -1409,7 +1411,7 @@ EXPORT_SYMBOL_GPL(part_round_stats);
#ifdef CONFIG_PM
static void blk_pm_put_request(struct request *rq)
{
- if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
pm_runtime_mark_last_busy(rq->q->dev);
}
#else
@@ -1421,6 +1423,8 @@ static inline void blk_pm_put_request(struct request *rq) {}
*/
void __blk_put_request(struct request_queue *q, struct request *req)
{
+ req_flags_t rq_flags = req->rq_flags;
+
if (unlikely(!q))
return;
@@ -1436,20 +1440,21 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
+ wbt_done(q->rq_wb, &req->issue_stat);
+
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
*/
- if (req->cmd_flags & REQ_ALLOCED) {
- unsigned int flags = req->cmd_flags;
- int op = req_op(req);
+ if (rq_flags & RQF_ALLOCED) {
struct request_list *rl = blk_rq_rl(req);
+ bool sync = op_is_sync(req->cmd_flags);
BUG_ON(!list_empty(&req->queuelist));
BUG_ON(ELV_ON_HASH(req));
blk_free_request(rl, req);
- freed_request(rl, op, flags);
+ freed_request(rl, sync, rq_flags);
blk_put_rl(rl);
}
}
@@ -1471,38 +1476,6 @@ void blk_put_request(struct request *req)
}
EXPORT_SYMBOL(blk_put_request);
-/**
- * blk_add_request_payload - add a payload to a request
- * @rq: request to update
- * @page: page backing the payload
- * @offset: offset in page
- * @len: length of the payload.
- *
- * This allows to later add a payload to an already submitted request by
- * a block driver. The driver needs to take care of freeing the payload
- * itself.
- *
- * Note that this is a quite horrible hack and nothing but handling of
- * discard requests should ever use it.
- */
-void blk_add_request_payload(struct request *rq, struct page *page,
- int offset, unsigned int len)
-{
- struct bio *bio = rq->bio;
-
- bio->bi_io_vec->bv_page = page;
- bio->bi_io_vec->bv_offset = offset;
- bio->bi_io_vec->bv_len = len;
-
- bio->bi_iter.bi_size = len;
- bio->bi_vcnt = 1;
- bio->bi_phys_segments = 1;
-
- rq->__data_len = rq->resid_len = len;
- rq->nr_phys_segments = 1;
-}
-EXPORT_SYMBOL_GPL(blk_add_request_payload);
-
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
@@ -1649,24 +1622,23 @@ out:
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
-
- req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->errors = 0;
req->__sector = bio->bi_iter.bi_sector;
- req->ioprio = bio_prio(bio);
+ if (ioprio_valid(bio_prio(bio)))
+ req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
- const bool sync = !!(bio->bi_opf & REQ_SYNC);
struct blk_plug *plug;
- int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
+ int el_ret, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
+ unsigned int wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -1719,30 +1691,22 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
- /*
- * This sync check and mask will be re-done in init_request_from_bio(),
- * but we need to set it earlier to expose the sync flag to the
- * rq allocator and io schedulers.
- */
- if (sync)
- rw_flags |= REQ_SYNC;
-
- /*
- * Add in META/PRIO flags, if set, before we get to the IO scheduler
- */
- rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
+ wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
/*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
+ req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
if (IS_ERR(req)) {
+ __wbt_done(q->rq_wb, wb_acct);
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}
+ wbt_track(&req->issue_stat, wb_acct);
+
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
@@ -1759,11 +1723,16 @@ get_rq:
/*
* If this is the first request added after a plug, fire
* of a plug trace.
+ *
+ * @request_count may become stale because of schedule
+ * out, so check plug list again.
*/
- if (!request_count)
+ if (!request_count || list_empty(&plug->list))
trace_block_plug(q);
else {
- if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ struct request *last = list_entry_rq(plug->list.prev);
+ if (request_count >= BLK_MAX_REQUEST_COUNT ||
+ blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
blk_flush_plug_list(plug, false);
trace_block_plug(q);
}
@@ -1788,7 +1757,12 @@ static inline void blk_partition_remap(struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
- if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+ /*
+ * Zone reset does not include bi_size so bio_sectors() is always 0.
+ * Include a test for the reset op code and perform the remap if needed.
+ */
+ if (bdev != bdev->bd_contains &&
+ (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) {
struct hd_struct *p = bdev->bd_part;
bio->bi_iter.bi_sector += p->start_sect;
@@ -1942,6 +1916,15 @@ generic_make_request_checks(struct bio *bio)
if (!bdev_write_same(bio->bi_bdev))
goto not_supported;
break;
+ case REQ_OP_ZONE_REPORT:
+ case REQ_OP_ZONE_RESET:
+ if (!bdev_is_zoned(bio->bi_bdev))
+ goto not_supported;
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ if (!bdev_write_zeroes_sectors(bio->bi_bdev))
+ goto not_supported;
+ break;
default:
break;
}
@@ -2210,7 +2193,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
unsigned int bytes = 0;
struct bio *bio;
- if (!(rq->cmd_flags & REQ_MIXED_MERGE))
+ if (!(rq->rq_flags & RQF_MIXED_MERGE))
return blk_rq_bytes(rq);
/*
@@ -2253,7 +2236,7 @@ void blk_account_io_done(struct request *req)
* normal IO on queueing nor completion. Accounting the
* containing request is enough.
*/
- if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
+ if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
unsigned long duration = jiffies - req->start_time;
const int rw = rq_data_dir(req);
struct hd_struct *part;
@@ -2281,7 +2264,7 @@ static struct request *blk_pm_peek_request(struct request_queue *q,
struct request *rq)
{
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
- (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
+ (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
return NULL;
else
return rq;
@@ -2357,13 +2340,13 @@ struct request *blk_peek_request(struct request_queue *q)
if (!rq)
break;
- if (!(rq->cmd_flags & REQ_STARTED)) {
+ if (!(rq->rq_flags & RQF_STARTED)) {
/*
* This is the first time the device driver
* sees this request (possibly after
* requeueing). Notify IO scheduler.
*/
- if (rq->cmd_flags & REQ_SORTED)
+ if (rq->rq_flags & RQF_SORTED)
elv_activate_rq(q, rq);
/*
@@ -2371,7 +2354,7 @@ struct request *blk_peek_request(struct request_queue *q)
* it, a request that has been delayed should
* not be passed by new incoming requests
*/
- rq->cmd_flags |= REQ_STARTED;
+ rq->rq_flags |= RQF_STARTED;
trace_block_rq_issue(q, rq);
}
@@ -2380,7 +2363,7 @@ struct request *blk_peek_request(struct request_queue *q)
q->boundary_rq = NULL;
}
- if (rq->cmd_flags & REQ_DONTPREP)
+ if (rq->rq_flags & RQF_DONTPREP)
break;
if (q->dma_drain_size && blk_rq_bytes(rq)) {
@@ -2403,11 +2386,11 @@ struct request *blk_peek_request(struct request_queue *q)
/*
* the request may have been (partially) prepped.
* we need to keep this request in the front to
- * avoid resource deadlock. REQ_STARTED will
+ * avoid resource deadlock. RQF_STARTED will
* prevent other fs requests from passing this one.
*/
if (q->dma_drain_size && blk_rq_bytes(rq) &&
- !(rq->cmd_flags & REQ_DONTPREP)) {
+ !(rq->rq_flags & RQF_DONTPREP)) {
/*
* remove the space for the drain we added
* so that we don't add it again
@@ -2420,7 +2403,7 @@ struct request *blk_peek_request(struct request_queue *q)
} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
/*
* Mark this request as started so we don't trigger
* any debug logic in the end I/O path.
@@ -2475,6 +2458,12 @@ void blk_start_request(struct request *req)
{
blk_dequeue_request(req);
+ if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
+ blk_stat_set_issue_time(&req->issue_stat);
+ req->rq_flags |= RQF_STATS;
+ wbt_issue(req->q->rq_wb, &req->issue_stat);
+ }
+
/*
* We are now handing the request to the hardware, initialize
* resid_len to full count and add the timeout handler.
@@ -2557,7 +2546,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->errors = 0;
if (error && req->cmd_type == REQ_TYPE_FS &&
- !(req->cmd_flags & REQ_QUIET)) {
+ !(req->rq_flags & RQF_QUIET)) {
char *error_type;
switch (error) {
@@ -2623,6 +2612,8 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
return false;
}
+ WARN_ON_ONCE(req->rq_flags & RQF_SPECIAL_PAYLOAD);
+
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
@@ -2630,7 +2621,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */
- if (req->cmd_flags & REQ_MIXED_MERGE) {
+ if (req->rq_flags & RQF_MIXED_MERGE) {
req->cmd_flags &= ~REQ_FAILFAST_MASK;
req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
}
@@ -2683,7 +2674,7 @@ void blk_unprep_request(struct request *req)
{
struct request_queue *q = req->q;
- req->cmd_flags &= ~REQ_DONTPREP;
+ req->rq_flags &= ~RQF_DONTPREP;
if (q->unprep_rq_fn)
q->unprep_rq_fn(q, req);
}
@@ -2694,8 +2685,13 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
*/
void blk_finish_request(struct request *req, int error)
{
- if (req->cmd_flags & REQ_QUEUED)
- blk_queue_end_tag(req->q, req);
+ struct request_queue *q = req->q;
+
+ if (req->rq_flags & RQF_STATS)
+ blk_stat_add(&q->rq_stats[rq_data_dir(req)], req);
+
+ if (req->rq_flags & RQF_QUEUED)
+ blk_queue_end_tag(q, req);
BUG_ON(blk_queued_rq(req));
@@ -2704,18 +2700,19 @@ void blk_finish_request(struct request *req, int error)
blk_delete_timer(req);
- if (req->cmd_flags & REQ_DONTPREP)
+ if (req->rq_flags & RQF_DONTPREP)
blk_unprep_request(req);
blk_account_io_done(req);
- if (req->end_io)
+ if (req->end_io) {
+ wbt_done(req->q->rq_wb, &req->issue_stat);
req->end_io(req, error);
- else {
+ } else {
if (blk_bidi_rq(req))
__blk_put_request(req->next_rq->q, req->next_rq);
- __blk_put_request(req->q, req);
+ __blk_put_request(q, req);
}
}
EXPORT_SYMBOL(blk_finish_request);
@@ -2939,8 +2936,6 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- req_set_op(rq, bio_op(bio));
-
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -3024,8 +3019,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
- req_set_op_attrs(dst, req_op(src),
- (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE);
+ dst->cmd_flags = src->cmd_flags | REQ_NOMERGE;
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
@@ -3303,52 +3297,6 @@ void blk_finish_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_finish_plug);
-bool blk_poll(struct request_queue *q, blk_qc_t cookie)
-{
- struct blk_plug *plug;
- long state;
- unsigned int queue_num;
- struct blk_mq_hw_ctx *hctx;
-
- if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
- !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
- return false;
-
- queue_num = blk_qc_t_to_queue_num(cookie);
- hctx = q->queue_hw_ctx[queue_num];
- hctx->poll_considered++;
-
- plug = current->plug;
- if (plug)
- blk_flush_plug_list(plug, false);
-
- state = current->state;
- while (!need_resched()) {
- int ret;
-
- hctx->poll_invoked++;
-
- ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie));
- if (ret > 0) {
- hctx->poll_success++;
- set_current_state(TASK_RUNNING);
- return true;
- }
-
- if (signal_pending_state(state, current))
- set_current_state(TASK_RUNNING);
-
- if (current->state == TASK_RUNNING)
- return true;
- if (ret < 0)
- break;
- cpu_relax();
- }
-
- return false;
-}
-EXPORT_SYMBOL_GPL(blk_poll);
-
#ifdef CONFIG_PM
/**
* blk_pm_runtime_init - Block layer runtime PM initialization routine
@@ -3530,8 +3478,11 @@ EXPORT_SYMBOL(blk_set_runtime_active);
int __init blk_dev_init(void)
{
- BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+ BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
+ BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
FIELD_SIZEOF(struct request, cmd_flags));
+ BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
+ FIELD_SIZEOF(struct bio, bi_opf));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 7ea04325d02f..3ecb00a6cf45 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -72,7 +72,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dying(q))) {
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
rq->errors = -ENXIO;
__blk_end_request_all(rq, rq->errors);
spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 3c882cbc7541..20b7c7a02f1c 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -56,7 +56,7 @@
* Once while executing DATA and again after the whole sequence is
* complete. The first completion updates the contained bio but doesn't
* finish it so that the bio submitter is notified only after the whole
- * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
+ * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
* req_bio_endio().
*
* The above peculiarity requires that each FLUSH/FUA request has only one
@@ -127,17 +127,14 @@ static void blk_flush_restore_request(struct request *rq)
rq->bio = rq->biotail;
/* make @rq a normal request */
- rq->cmd_flags &= ~REQ_FLUSH_SEQ;
+ rq->rq_flags &= ~RQF_FLUSH_SEQ;
rq->end_io = rq->flush.saved_end_io;
}
static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
if (rq->q->mq_ops) {
- struct request_queue *q = rq->q;
-
- blk_mq_add_to_requeue_list(rq, add_front);
- blk_mq_kick_requeue_list(q);
+ blk_mq_add_to_requeue_list(rq, add_front, true);
return false;
} else {
if (add_front)
@@ -330,7 +327,8 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
}
flush_rq->cmd_type = REQ_TYPE_FS;
- req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ);
+ flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
+ flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
@@ -368,7 +366,7 @@ static void flush_data_end_io(struct request *rq, int error)
elv_completed_request(q, rq);
/* for avoiding double accounting */
- rq->cmd_flags &= ~REQ_STARTED;
+ rq->rq_flags &= ~RQF_STARTED;
/*
* After populating an empty queue, kick it to avoid stall. Read
@@ -426,6 +424,13 @@ void blk_insert_flush(struct request *rq)
rq->cmd_flags &= ~REQ_FUA;
/*
+ * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
+ * of those flags, we have to set REQ_SYNC to avoid skewing
+ * the request accounting.
+ */
+ rq->cmd_flags |= REQ_SYNC;
+
+ /*
* An empty flush handed down from a stacking driver may
* translate into nothing if the underlying device does not
* advertise a write-back cache. In this case, simply
@@ -449,7 +454,7 @@ void blk_insert_flush(struct request *rq)
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) {
- blk_mq_insert_request(rq, false, false, true);
+ blk_mq_insert_request(rq, false, true, false);
} else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
@@ -461,7 +466,7 @@ void blk_insert_flush(struct request *rq)
*/
memset(&rq->flush, 0, sizeof(rq->flush));
INIT_LIST_HEAD(&rq->flush.list);
- rq->cmd_flags |= REQ_FLUSH_SEQ;
+ rq->rq_flags |= RQF_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
if (q->mq_ops) {
rq->end_io = mq_flush_data_end_io;
@@ -513,7 +518,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
bio = bio_alloc(gfp_mask, 0);
bio->bi_bdev = bdev;
- bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
ret = submit_bio_wait(bio);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 46fe9248410d..ed89c8f4b2a0 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -29,7 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
unsigned int granularity;
- enum req_op op;
+ unsigned int op;
int alignment;
sector_t bs_mask;
@@ -80,7 +80,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
req_sects = end_sect - sector;
}
- bio = next_bio(bio, 1, gfp_mask);
+ bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio_set_op_attrs(bio, op, 0);
@@ -137,24 +137,24 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
EXPORT_SYMBOL(blkdev_issue_discard);
/**
- * blkdev_issue_write_same - queue a write same operation
+ * __blkdev_issue_write_same - generate number of bios with same page
* @bdev: target blockdev
* @sector: start sector
* @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc)
* @page: page containing data to write
+ * @biop: pointer to anchor bio
*
* Description:
- * Issue a write same request for the sectors in question.
+ * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
*/
-int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask,
- struct page *page)
+static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct page *page,
+ struct bio **biop)
{
struct request_queue *q = bdev_get_queue(bdev);
unsigned int max_write_same_sectors;
- struct bio *bio = NULL;
- int ret = 0;
+ struct bio *bio = *biop;
sector_t bs_mask;
if (!q)
@@ -164,6 +164,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
+ if (!bdev_write_same(bdev))
+ return -EOPNOTSUPP;
+
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9;
@@ -185,32 +188,112 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bio->bi_iter.bi_size = nr_sects << 9;
nr_sects = 0;
}
+ cond_resched();
}
- if (bio) {
+ *biop = bio;
+ return 0;
+}
+
+/**
+ * blkdev_issue_write_same - queue a write same operation
+ * @bdev: target blockdev
+ * @sector: start sector
+ * @nr_sects: number of sectors to write
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @page: page containing data
+ *
+ * Description:
+ * Issue a write same request for the sectors in question.
+ */
+int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask,
+ struct page *page)
+{
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ int ret;
+
+ blk_start_plug(&plug);
+ ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
+ &bio);
+ if (ret == 0 && bio) {
ret = submit_bio_wait(bio);
bio_put(bio);
}
+ blk_finish_plug(&plug);
return ret;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
/**
- * blkdev_issue_zeroout - generate number of zero filed write bios
+ * __blkdev_issue_write_zeroes - generate number of bios with WRITE ZEROES
* @bdev: blockdev to issue
* @sector: start sector
* @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc)
+ * @biop: pointer to anchor bio
*
* Description:
- * Generate and issue number of bios with zerofiled pages.
+ * Generate and issue number of bios(REQ_OP_WRITE_ZEROES) with zerofiled pages.
*/
+static int __blkdev_issue_write_zeroes(struct block_device *bdev,
+ sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
+ struct bio **biop)
+{
+ struct bio *bio = *biop;
+ unsigned int max_write_zeroes_sectors;
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (!q)
+ return -ENXIO;
+
+ /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
+ max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
+
+ if (max_write_zeroes_sectors == 0)
+ return -EOPNOTSUPP;
+
+ while (nr_sects) {
+ bio = next_bio(bio, 0, gfp_mask);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = bdev;
+ bio_set_op_attrs(bio, REQ_OP_WRITE_ZEROES, 0);
-static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask)
+ if (nr_sects > max_write_zeroes_sectors) {
+ bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
+ nr_sects -= max_write_zeroes_sectors;
+ sector += max_write_zeroes_sectors;
+ } else {
+ bio->bi_iter.bi_size = nr_sects << 9;
+ nr_sects = 0;
+ }
+ cond_resched();
+ }
+
+ *biop = bio;
+ return 0;
+}
+
+/**
+ * __blkdev_issue_zeroout - generate number of zero filed write bios
+ * @bdev: blockdev to issue
+ * @sector: start sector
+ * @nr_sects: number of sectors to write
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @biop: pointer to anchor bio
+ * @discard: discard flag
+ *
+ * Description:
+ * Generate and issue number of bios with zerofiled pages.
+ */
+int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
+ bool discard)
{
int ret;
- struct bio *bio = NULL;
+ int bi_size = 0;
+ struct bio *bio = *biop;
unsigned int sz;
sector_t bs_mask;
@@ -218,6 +301,24 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
+ if (discard) {
+ ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
+ BLKDEV_DISCARD_ZERO, biop);
+ if (ret == 0 || (ret && ret != -EOPNOTSUPP))
+ goto out;
+ }
+
+ ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
+ biop);
+ if (ret == 0 || (ret && ret != -EOPNOTSUPP))
+ goto out;
+
+ ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
+ ZERO_PAGE(0), biop);
+ if (ret == 0 || (ret && ret != -EOPNOTSUPP))
+ goto out;
+
+ ret = 0;
while (nr_sects != 0) {
bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
gfp_mask);
@@ -227,21 +328,20 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
- ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
- nr_sects -= ret >> 9;
- sector += ret >> 9;
- if (ret < (sz << 9))
+ bi_size = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
+ nr_sects -= bi_size >> 9;
+ sector += bi_size >> 9;
+ if (bi_size < (sz << 9))
break;
}
+ cond_resched();
}
- if (bio) {
- ret = submit_bio_wait(bio);
- bio_put(bio);
- return ret;
- }
- return 0;
+ *biop = bio;
+out:
+ return ret;
}
+EXPORT_SYMBOL(__blkdev_issue_zeroout);
/**
* blkdev_issue_zeroout - zero-fill a block range
@@ -258,26 +358,27 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
* the discard request fail, if the discard flag is not set, or if
* discard_zeroes_data is not supported, this function will resort to
* zeroing the blocks manually, thus provisioning (allocating,
- * anchoring) them. If the block device supports the WRITE SAME command
- * blkdev_issue_zeroout() will use it to optimize the process of
+ * anchoring) them. If the block device supports WRITE ZEROES or WRITE SAME
+ * command(s), blkdev_issue_zeroout() will use it to optimize the process of
* clearing the block range. Otherwise the zeroing will be performed
* using regular WRITE calls.
*/
-
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, bool discard)
{
- if (discard) {
- if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
- BLKDEV_DISCARD_ZERO))
- return 0;
- }
+ int ret;
+ struct bio *bio = NULL;
+ struct blk_plug plug;
- if (bdev_write_same(bdev) &&
- blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
- ZERO_PAGE(0)) == 0)
- return 0;
+ blk_start_plug(&plug);
+ ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
+ &bio, discard);
+ if (ret == 0 && bio) {
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ }
+ blk_finish_plug(&plug);
- return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
+ return ret;
}
EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/blk-map.c b/block/blk-map.c
index b8657fa8dc9a..0acb6640ead7 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -16,6 +16,8 @@
int blk_rq_append_bio(struct request *rq, struct bio *bio)
{
if (!rq->bio) {
+ rq->cmd_flags &= REQ_OP_MASK;
+ rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
blk_rq_bio_prep(rq->q, rq, bio);
} else {
if (!ll_back_merge_fn(rq->q, rq, bio))
@@ -118,6 +120,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct iov_iter i;
int ret;
+ if (!iter_is_iovec(iter))
+ goto fail;
+
if (map_data)
copy = true;
else if (iov_iter_alignment(iter) & align)
@@ -135,11 +140,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
} while (iov_iter_count(&i));
if (!bio_flagged(bio, BIO_USER_MAPPED))
- rq->cmd_flags |= REQ_COPY_USER;
+ rq->rq_flags |= RQF_COPY_USER;
return 0;
unmap_rq:
__blk_rq_unmap_user(bio);
+fail:
rq->bio = NULL;
return -EINVAL;
}
@@ -232,7 +238,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
if (do_copy)
- rq->cmd_flags |= REQ_COPY_USER;
+ rq->rq_flags |= RQF_COPY_USER;
ret = blk_rq_append_bio(rq, bio);
if (unlikely(ret)) {
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2642e5fc8b69..182398cb1524 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -199,6 +199,10 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
case REQ_OP_SECURE_ERASE:
split = blk_bio_discard_split(q, *bio, bs, &nsegs);
break;
+ case REQ_OP_WRITE_ZEROES:
+ split = NULL;
+ nsegs = (*bio)->bi_phys_segments;
+ break;
case REQ_OP_WRITE_SAME:
split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
break;
@@ -237,15 +241,14 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (!bio)
return 0;
- /*
- * This should probably be returning 0, but blk_add_request_payload()
- * (Christoph!!!!)
- */
- if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
- return 1;
-
- if (bio_op(bio) == REQ_OP_WRITE_SAME)
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
+ return 0;
+ case REQ_OP_WRITE_SAME:
return 1;
+ }
fbio = bio;
cluster = blk_queue_cluster(q);
@@ -402,38 +405,21 @@ new_segment:
*bvprv = *bvec;
}
+static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
+ struct scatterlist *sglist, struct scatterlist **sg)
+{
+ *sg = sglist;
+ sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
+ return 1;
+}
+
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist,
struct scatterlist **sg)
{
struct bio_vec bvec, bvprv = { NULL };
struct bvec_iter iter;
- int nsegs, cluster;
-
- nsegs = 0;
- cluster = blk_queue_cluster(q);
-
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- /*
- * This is a hack - drivers should be neither modifying the
- * biovec, nor relying on bi_vcnt - but because of
- * blk_add_request_payload(), a discard bio may or may not have
- * a payload we need to set up here (thank you Christoph) and
- * bi_vcnt is really the only way of telling if we need to.
- */
- if (!bio->bi_vcnt)
- return 0;
- /* Fall through */
- case REQ_OP_WRITE_SAME:
- *sg = sglist;
- bvec = bio_iovec(bio);
- sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
- return 1;
- default:
- break;
- }
+ int cluster = blk_queue_cluster(q), nsegs = 0;
for_each_bio(bio)
bio_for_each_segment(bvec, bio, iter)
@@ -453,10 +439,14 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sg = NULL;
int nsegs = 0;
- if (rq->bio)
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
+ else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
+ nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
+ else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
- if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+ if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
unsigned int pad_len =
(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
@@ -486,12 +476,19 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
* Something must have been wrong if the figured number of
* segment is bigger than number of req's physical segments
*/
- WARN_ON(nsegs > rq->nr_phys_segments);
+ WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
return nsegs;
}
EXPORT_SYMBOL(blk_rq_map_sg);
+static void req_set_nomerge(struct request_queue *q, struct request *req)
+{
+ req->cmd_flags |= REQ_NOMERGE;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
+}
+
static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
@@ -512,9 +509,7 @@ static inline int ll_new_hw_segment(struct request_queue *q,
return 1;
no_merge:
- req->cmd_flags |= REQ_NOMERGE;
- if (req == q->last_merge)
- q->last_merge = NULL;
+ req_set_nomerge(q, req);
return 0;
}
@@ -528,9 +523,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
- req->cmd_flags |= REQ_NOMERGE;
- if (req == q->last_merge)
- q->last_merge = NULL;
+ req_set_nomerge(q, req);
return 0;
}
if (!bio_flagged(req->biotail, BIO_SEG_VALID))
@@ -552,9 +545,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
- req->cmd_flags |= REQ_NOMERGE;
- if (req == q->last_merge)
- q->last_merge = NULL;
+ req_set_nomerge(q, req);
return 0;
}
if (!bio_flagged(bio, BIO_SEG_VALID))
@@ -634,7 +625,7 @@ void blk_rq_set_mixed_merge(struct request *rq)
unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
struct bio *bio;
- if (rq->cmd_flags & REQ_MIXED_MERGE)
+ if (rq->rq_flags & RQF_MIXED_MERGE)
return;
/*
@@ -647,7 +638,7 @@ void blk_rq_set_mixed_merge(struct request *rq)
(bio->bi_opf & REQ_FAILFAST_MASK) != ff);
bio->bi_opf |= ff;
}
- rq->cmd_flags |= REQ_MIXED_MERGE;
+ rq->rq_flags |= RQF_MIXED_MERGE;
}
static void blk_account_io_merge(struct request *req)
@@ -709,7 +700,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
* makes sure that all involved bios have mixable attributes
* set properly.
*/
- if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
+ if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
(req->cmd_flags & REQ_FAILFAST_MASK) !=
(next->cmd_flags & REQ_FAILFAST_MASK)) {
blk_rq_set_mixed_merge(req);
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 19b1d9c5f07e..8e61e8640e17 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -87,6 +87,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
free_cpumask_var(cpus);
return 0;
}
+EXPORT_SYMBOL_GPL(blk_mq_map_queues);
/*
* We have no quick way of doing reverse lookups. This is only used at
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 01fb455d3377..eacd3af72099 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -259,6 +259,47 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
return ret;
}
+static void blk_mq_stat_clear(struct blk_mq_hw_ctx *hctx)
+{
+ struct blk_mq_ctx *ctx;
+ unsigned int i;
+
+ hctx_for_each_ctx(hctx, ctx, i) {
+ blk_stat_init(&ctx->stat[BLK_STAT_READ]);
+ blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
+ }
+}
+
+static ssize_t blk_mq_hw_sysfs_stat_store(struct blk_mq_hw_ctx *hctx,
+ const char *page, size_t count)
+{
+ blk_mq_stat_clear(hctx);
+ return count;
+}
+
+static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
+{
+ return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
+ pre, (long long) stat->nr_samples,
+ (long long) stat->mean, (long long) stat->min,
+ (long long) stat->max);
+}
+
+static ssize_t blk_mq_hw_sysfs_stat_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ struct blk_rq_stat stat[2];
+ ssize_t ret;
+
+ blk_stat_init(&stat[BLK_STAT_READ]);
+ blk_stat_init(&stat[BLK_STAT_WRITE]);
+
+ blk_hctx_stat_get(hctx, stat);
+
+ ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
+ ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
+ return ret;
+}
+
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
@@ -317,6 +358,11 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
.show = blk_mq_hw_sysfs_poll_show,
.store = blk_mq_hw_sysfs_poll_store,
};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_stat = {
+ .attr = {.name = "stats", .mode = S_IRUGO | S_IWUSR },
+ .show = blk_mq_hw_sysfs_stat_show,
+ .store = blk_mq_hw_sysfs_stat_store,
+};
static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
@@ -327,6 +373,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_cpus.attr,
&blk_mq_hw_sysfs_active.attr,
&blk_mq_hw_sysfs_poll.attr,
+ &blk_mq_hw_sysfs_stat.attr,
NULL,
};
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f3d27a6dee09..4bf850e8d6b5 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -30,6 +30,8 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
+#include "blk-stat.h"
+#include "blk-wbt.h"
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
@@ -115,6 +117,33 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+/**
+ * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
+ * @q: request queue.
+ *
+ * Note: this function does not prevent that the struct request end_io()
+ * callback function is invoked. Additionally, it is not prevented that
+ * new queue_rq() calls occur unless the queue has been stopped first.
+ */
+void blk_mq_quiesce_queue(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+ bool rcu = false;
+
+ blk_mq_stop_hw_queues(q);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->flags & BLK_MQ_F_BLOCKING)
+ synchronize_srcu(&hctx->queue_rq_srcu);
+ else
+ rcu = true;
+ }
+ if (rcu)
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
+
void blk_mq_wake_waiters(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
@@ -139,17 +168,15 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
EXPORT_SYMBOL(blk_mq_can_queue);
static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
- struct request *rq, int op,
- unsigned int op_flags)
+ struct request *rq, unsigned int op)
{
- if (blk_queue_io_stat(q))
- op_flags |= REQ_IO_STAT;
-
INIT_LIST_HEAD(&rq->queuelist);
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = q;
rq->mq_ctx = ctx;
- req_set_op_attrs(rq, op, op_flags);
+ rq->cmd_flags = op;
+ if (blk_queue_io_stat(q))
+ rq->rq_flags |= RQF_IO_STAT;
/* do not touch atomic flags, it needs atomic ops against the timer */
rq->cpu = -1;
INIT_HLIST_NODE(&rq->hash);
@@ -184,11 +211,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->end_io_data = NULL;
rq->next_rq = NULL;
- ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
+ ctx->rq_dispatched[op_is_sync(op)]++;
}
static struct request *
-__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
+__blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op)
{
struct request *rq;
unsigned int tag;
@@ -198,12 +225,12 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
rq = data->hctx->tags->rqs[tag];
if (blk_mq_tag_busy(data->hctx)) {
- rq->cmd_flags = REQ_MQ_INFLIGHT;
+ rq->rq_flags = RQF_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
}
rq->tag = tag;
- blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
+ blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
return rq;
}
@@ -226,7 +253,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
blk_mq_put_ctx(ctx);
if (!rq) {
@@ -278,7 +305,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
if (!rq) {
ret = -EWOULDBLOCK;
goto out_queue_exit;
@@ -298,11 +325,14 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
const int tag = rq->tag;
struct request_queue *q = rq->q;
- if (rq->cmd_flags & REQ_MQ_INFLIGHT)
+ if (rq->rq_flags & RQF_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
- rq->cmd_flags = 0;
+
+ wbt_done(q->rq_wb, &rq->issue_stat);
+ rq->rq_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+ clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
blk_mq_put_tag(hctx, ctx, tag);
blk_queue_exit(q);
}
@@ -328,6 +358,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
blk_account_io_done(rq);
if (rq->end_io) {
+ wbt_done(rq->q->rq_wb, &rq->issue_stat);
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))
@@ -378,10 +409,27 @@ static void blk_mq_ipi_complete_request(struct request *rq)
put_cpu();
}
+static void blk_mq_stat_add(struct request *rq)
+{
+ if (rq->rq_flags & RQF_STATS) {
+ /*
+ * We could rq->mq_ctx here, but there's less of a risk
+ * of races if we have the completion event add the stats
+ * to the local software queue.
+ */
+ struct blk_mq_ctx *ctx;
+
+ ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
+ blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
+ }
+}
+
static void __blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
+ blk_mq_stat_add(rq);
+
if (!q->softirq_done_fn)
blk_mq_end_request(rq, rq->errors);
else
@@ -425,6 +473,12 @@ void blk_mq_start_request(struct request *rq)
if (unlikely(blk_bidi_rq(rq)))
rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
+ if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
+ blk_stat_set_issue_time(&rq->issue_stat);
+ rq->rq_flags |= RQF_STATS;
+ wbt_issue(q->rq_wb, &rq->issue_stat);
+ }
+
blk_add_timer(rq);
/*
@@ -460,6 +514,7 @@ static void __blk_mq_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
trace_block_rq_requeue(q, rq);
+ wbt_requeue(q->rq_wb, &rq->issue_stat);
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
if (q->dma_drain_size && blk_rq_bytes(rq))
@@ -467,12 +522,12 @@ static void __blk_mq_requeue_request(struct request *rq)
}
}
-void blk_mq_requeue_request(struct request *rq)
+void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
{
__blk_mq_requeue_request(rq);
BUG_ON(blk_queued_rq(rq));
- blk_mq_add_to_requeue_list(rq, true);
+ blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
}
EXPORT_SYMBOL(blk_mq_requeue_request);
@@ -489,10 +544,10 @@ static void blk_mq_requeue_work(struct work_struct *work)
spin_unlock_irqrestore(&q->requeue_lock, flags);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
- if (!(rq->cmd_flags & REQ_SOFTBARRIER))
+ if (!(rq->rq_flags & RQF_SOFTBARRIER))
continue;
- rq->cmd_flags &= ~REQ_SOFTBARRIER;
+ rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
blk_mq_insert_request(rq, true, false, false);
}
@@ -503,14 +558,11 @@ static void blk_mq_requeue_work(struct work_struct *work)
blk_mq_insert_request(rq, false, false, false);
}
- /*
- * Use the start variant of queue running here, so that running
- * the requeue work will kick stopped queues.
- */
- blk_mq_start_hw_queues(q);
+ blk_mq_run_hw_queues(q, false);
}
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+ bool kick_requeue_list)
{
struct request_queue *q = rq->q;
unsigned long flags;
@@ -519,24 +571,21 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
* We abuse this flag that is otherwise used by the I/O scheduler to
* request head insertation from the workqueue.
*/
- BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
+ BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
spin_lock_irqsave(&q->requeue_lock, flags);
if (at_head) {
- rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->rq_flags |= RQF_SOFTBARRIER;
list_add(&rq->queuelist, &q->requeue_list);
} else {
list_add_tail(&rq->queuelist, &q->requeue_list);
}
spin_unlock_irqrestore(&q->requeue_lock, flags);
-}
-EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
-void blk_mq_cancel_requeue_work(struct request_queue *q)
-{
- cancel_delayed_work_sync(&q->requeue_work);
+ if (kick_requeue_list)
+ blk_mq_kick_requeue_list(q);
}
-EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
+EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
@@ -772,44 +821,13 @@ static inline unsigned int queued_to_index(unsigned int queued)
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
}
-/*
- * Run this hardware queue, pulling any software queues mapped to it in.
- * Note that this function currently has various problems around ordering
- * of IO. In particular, we'd like FIFO behaviour on handling existing
- * items on the hctx->dispatch list. Ignore that for now.
- */
-static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
+bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
{
struct request_queue *q = hctx->queue;
struct request *rq;
- LIST_HEAD(rq_list);
LIST_HEAD(driver_list);
struct list_head *dptr;
- int queued;
-
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
- return;
-
- WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
- cpu_online(hctx->next_cpu));
-
- hctx->run++;
-
- /*
- * Touch any software queue that has pending entries.
- */
- flush_busy_ctxs(hctx, &rq_list);
-
- /*
- * If we have previous entries on our dispatch list, grab them
- * and stuff them at the front for more fair dispatch.
- */
- if (!list_empty_careful(&hctx->dispatch)) {
- spin_lock(&hctx->lock);
- if (!list_empty(&hctx->dispatch))
- list_splice_init(&hctx->dispatch, &rq_list);
- spin_unlock(&hctx->lock);
- }
+ int queued, ret = BLK_MQ_RQ_QUEUE_OK;
/*
* Start off with dptr being NULL, so we start the first request
@@ -821,16 +839,15 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
* Now process all the entries, sending them to the driver.
*/
queued = 0;
- while (!list_empty(&rq_list)) {
+ while (!list_empty(list)) {
struct blk_mq_queue_data bd;
- int ret;
- rq = list_first_entry(&rq_list, struct request, queuelist);
+ rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
bd.rq = rq;
bd.list = dptr;
- bd.last = list_empty(&rq_list);
+ bd.last = list_empty(list);
ret = q->mq_ops->queue_rq(hctx, &bd);
switch (ret) {
@@ -838,7 +855,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
queued++;
break;
case BLK_MQ_RQ_QUEUE_BUSY:
- list_add(&rq->queuelist, &rq_list);
+ list_add(&rq->queuelist, list);
__blk_mq_requeue_request(rq);
break;
default:
@@ -856,7 +873,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
* We've done the first request. If we have more than 1
* left in the list, set dptr to defer issue.
*/
- if (!dptr && rq_list.next != rq_list.prev)
+ if (!dptr && list->next != list->prev)
dptr = &driver_list;
}
@@ -866,10 +883,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
* Any items that need requeuing? Stuff them into hctx->dispatch,
* that is where we will continue on next queue run.
*/
- if (!list_empty(&rq_list)) {
+ if (!list_empty(list)) {
spin_lock(&hctx->lock);
- list_splice(&rq_list, &hctx->dispatch);
+ list_splice(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
+
/*
* the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
* it's possible the queue is stopped and restarted again
@@ -881,6 +899,61 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
**/
blk_mq_run_hw_queue(hctx, true);
}
+
+ return ret != BLK_MQ_RQ_QUEUE_BUSY;
+}
+
+/*
+ * Run this hardware queue, pulling any software queues mapped to it in.
+ * Note that this function currently has various problems around ordering
+ * of IO. In particular, we'd like FIFO behaviour on handling existing
+ * items on the hctx->dispatch list. Ignore that for now.
+ */
+static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
+{
+ LIST_HEAD(rq_list);
+ LIST_HEAD(driver_list);
+
+ if (unlikely(blk_mq_hctx_stopped(hctx)))
+ return;
+
+ hctx->run++;
+
+ /*
+ * Touch any software queue that has pending entries.
+ */
+ flush_busy_ctxs(hctx, &rq_list);
+
+ /*
+ * If we have previous entries on our dispatch list, grab them
+ * and stuff them at the front for more fair dispatch.
+ */
+ if (!list_empty_careful(&hctx->dispatch)) {
+ spin_lock(&hctx->lock);
+ if (!list_empty(&hctx->dispatch))
+ list_splice_init(&hctx->dispatch, &rq_list);
+ spin_unlock(&hctx->lock);
+ }
+
+ blk_mq_dispatch_rq_list(hctx, &rq_list);
+}
+
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ int srcu_idx;
+
+ WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
+ cpu_online(hctx->next_cpu));
+
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+ rcu_read_lock();
+ blk_mq_process_rq_list(hctx);
+ rcu_read_unlock();
+ } else {
+ srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+ blk_mq_process_rq_list(hctx);
+ srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
+ }
}
/*
@@ -895,7 +968,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
- int cpu = hctx->next_cpu, next_cpu;
+ int next_cpu;
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
if (next_cpu >= nr_cpu_ids)
@@ -903,8 +976,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
-
- return cpu;
}
return hctx->next_cpu;
@@ -912,8 +983,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
- !blk_mq_hw_queue_mapped(hctx)))
+ if (unlikely(blk_mq_hctx_stopped(hctx) ||
+ !blk_mq_hw_queue_mapped(hctx)))
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
@@ -938,7 +1009,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
queue_for_each_hw_ctx(q, hctx, i) {
if ((!blk_mq_hctx_has_pending(hctx) &&
list_empty_careful(&hctx->dispatch)) ||
- test_bit(BLK_MQ_S_STOPPED, &hctx->state))
+ blk_mq_hctx_stopped(hctx))
continue;
blk_mq_run_hw_queue(hctx, async);
@@ -946,6 +1017,26 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
}
EXPORT_SYMBOL(blk_mq_run_hw_queues);
+/**
+ * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
+ * @q: request queue.
+ *
+ * The caller is responsible for serializing this function against
+ * blk_mq_{start,stop}_hw_queue().
+ */
+bool blk_mq_queue_stopped(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ if (blk_mq_hctx_stopped(hctx))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(blk_mq_queue_stopped);
+
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
cancel_work(&hctx->run_work);
@@ -982,18 +1073,23 @@ void blk_mq_start_hw_queues(struct request_queue *q)
}
EXPORT_SYMBOL(blk_mq_start_hw_queues);
+void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+ if (!blk_mq_hctx_stopped(hctx))
+ return;
+
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+ blk_mq_run_hw_queue(hctx, async);
+}
+EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
+
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
{
struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i) {
- if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
- continue;
-
- clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- blk_mq_run_hw_queue(hctx, async);
- }
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_start_stopped_hw_queue(hctx, async);
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
@@ -1155,7 +1251,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
init_request_from_bio(rq, bio);
- blk_account_io_start(rq, 1);
+ blk_account_io_start(rq, true);
}
static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
@@ -1190,40 +1286,27 @@ insert_rq:
}
}
-struct blk_map_ctx {
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx;
-};
-
static struct request *blk_mq_map_request(struct request_queue *q,
struct bio *bio,
- struct blk_map_ctx *data)
+ struct blk_mq_alloc_data *data)
{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct request *rq;
- int op = bio_data_dir(bio);
- int op_flags = 0;
- struct blk_mq_alloc_data alloc_data;
blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, ctx->cpu);
- if (rw_is_sync(bio_op(bio), bio->bi_opf))
- op_flags |= REQ_SYNC;
-
- trace_block_getrq(q, bio, op);
- blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
+ trace_block_getrq(q, bio, bio->bi_opf);
+ blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
+ rq = __blk_mq_alloc_request(data, bio->bi_opf);
- data->hctx = alloc_data.hctx;
- data->ctx = alloc_data.ctx;
data->hctx->queued++;
return rq;
}
-static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
+static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
{
int ret;
struct request_queue *q = rq->q;
@@ -1235,6 +1318,9 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
};
blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
+ if (blk_mq_hctx_stopped(hctx))
+ goto insert;
+
/*
* For OK queue, we are done. For error, kill it. Any other
* error (busy), just add it to our list as we previously
@@ -1243,7 +1329,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
ret = q->mq_ops->queue_rq(hctx, &bd);
if (ret == BLK_MQ_RQ_QUEUE_OK) {
*cookie = new_cookie;
- return 0;
+ return;
}
__blk_mq_requeue_request(rq);
@@ -1252,10 +1338,11 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
*cookie = BLK_QC_T_NONE;
rq->errors = -EIO;
blk_mq_end_request(rq, rq->errors);
- return 0;
+ return;
}
- return -1;
+insert:
+ blk_mq_insert_request(rq, false, true, true);
}
/*
@@ -1265,14 +1352,15 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
*/
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
- struct blk_map_ctx data;
+ struct blk_mq_alloc_data data;
struct request *rq;
- unsigned int request_count = 0;
+ unsigned int request_count = 0, srcu_idx;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
+ unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1287,9 +1375,15 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
return BLK_QC_T_NONE;
+ wb_acct = wbt_wait(q->rq_wb, bio, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ __wbt_done(q->rq_wb, wb_acct);
return BLK_QC_T_NONE;
+ }
+
+ wbt_track(&rq->issue_stat, wb_acct);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -1312,7 +1406,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_bio_to_request(rq, bio);
/*
- * We do limited pluging. If the bio can be merged, do that.
+ * We do limited plugging. If the bio can be merged, do that.
* Otherwise the existing request in the plug list will be
* issued. So the plug list will have one request at most
*/
@@ -1332,9 +1426,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_put_ctx(data.ctx);
if (!old_rq)
goto done;
- if (!blk_mq_direct_issue_request(old_rq, &cookie))
- goto done;
- blk_mq_insert_request(old_rq, false, true, true);
+
+ if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
+ rcu_read_lock();
+ blk_mq_try_issue_directly(old_rq, &cookie);
+ rcu_read_unlock();
+ } else {
+ srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
+ blk_mq_try_issue_directly(old_rq, &cookie);
+ srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
+ }
goto done;
}
@@ -1359,13 +1460,14 @@ done:
*/
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_plug *plug;
unsigned int request_count = 0;
- struct blk_map_ctx data;
+ struct blk_mq_alloc_data data;
struct request *rq;
blk_qc_t cookie;
+ unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1382,9 +1484,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
} else
request_count = blk_plug_queued_count(q);
+ wb_acct = wbt_wait(q->rq_wb, bio, NULL);
+
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq))
+ if (unlikely(!rq)) {
+ __wbt_done(q->rq_wb, wb_acct);
return BLK_QC_T_NONE;
+ }
+
+ wbt_track(&rq->issue_stat, wb_acct);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -1401,13 +1509,25 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
*/
plug = current->plug;
if (plug) {
+ struct request *last = NULL;
+
blk_mq_bio_to_request(rq, bio);
+
+ /*
+ * @request_count may become stale because of schedule
+ * out, so check the list again.
+ */
+ if (list_empty(&plug->mq_list))
+ request_count = 0;
if (!request_count)
trace_block_plug(q);
+ else
+ last = list_entry_rq(plug->mq_list.prev);
blk_mq_put_ctx(data.ctx);
- if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
+ blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
blk_flush_plug_list(plug, false);
trace_block_plug(q);
}
@@ -1485,7 +1605,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&tags->page_list);
tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
set->numa_node);
if (!tags->rqs) {
blk_mq_free_tags(tags);
@@ -1511,7 +1631,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
do {
page = alloc_pages_node(set->numa_node,
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
this_order);
if (page)
break;
@@ -1532,7 +1652,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
* Allow kmemleak to scan these pages as they contain pointers
* to additional allocations like via ops->init_request().
*/
- kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
+ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
entries_per_page = order_to_size(this_order) / rq_size;
to_do = min(entries_per_page, set->queue_depth - i);
left -= to_do * rq_size;
@@ -1613,6 +1733,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
+ if (hctx->flags & BLK_MQ_F_BLOCKING)
+ cleanup_srcu_struct(&hctx->queue_rq_srcu);
+
blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq);
sbitmap_free(&hctx->ctx_map);
@@ -1693,6 +1816,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
flush_start_tag + hctx_idx, node))
goto free_fq;
+ if (hctx->flags & BLK_MQ_F_BLOCKING)
+ init_srcu_struct(&hctx->queue_rq_srcu);
+
return 0;
free_fq:
@@ -1723,6 +1849,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
spin_lock_init(&__ctx->lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
+ blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
+ blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
/* If the cpu isn't online, the cpu is mapped to first hctx */
if (!cpu_online(i))
@@ -1742,7 +1870,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
static void blk_mq_map_swqueue(struct request_queue *q,
const struct cpumask *online_mask)
{
- unsigned int i;
+ unsigned int i, hctx_idx;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
@@ -1765,6 +1893,21 @@ static void blk_mq_map_swqueue(struct request_queue *q,
if (!cpumask_test_cpu(i, online_mask))
continue;
+ hctx_idx = q->mq_map[i];
+ /* unmapped hw queue can be remapped after CPU topo changed */
+ if (!set->tags[hctx_idx]) {
+ set->tags[hctx_idx] = blk_mq_init_rq_map(set, hctx_idx);
+
+ /*
+ * If tags initialization fail for some hctx,
+ * that hctx won't be brought online. In this
+ * case, remap the current ctx to hctx[0] which
+ * is guaranteed to always have tags allocated
+ */
+ if (!set->tags[hctx_idx])
+ q->mq_map[i] = 0;
+ }
+
ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i);
@@ -1781,7 +1924,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
* disable it and free the request entries.
*/
if (!hctx->nr_ctx) {
- if (set->tags[i]) {
+ /* Never unmap queue 0. We need it as a
+ * fallback in case of a new remap fails
+ * allocation
+ */
+ if (i && set->tags[i]) {
blk_mq_free_rq_map(set, set->tags[i], i);
set->tags[i] = NULL;
}
@@ -1789,9 +1936,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
continue;
}
- /* unmapped hw queue can be remapped after CPU topo changed */
- if (!set->tags[i])
- set->tags[i] = blk_mq_init_rq_map(set, i);
hctx->tags = set->tags[i];
WARN_ON(!hctx->tags);
@@ -2018,6 +2162,11 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
*/
q->nr_requests = set->queue_depth;
+ /*
+ * Default to classic polling
+ */
+ q->poll_nsec = -1;
+
if (set->ops->complete)
blk_queue_softirq_done(q, set->ops->complete);
@@ -2053,6 +2202,8 @@ void blk_mq_free_queue(struct request_queue *q)
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
+ wbt_exit(q);
+
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
@@ -2099,16 +2250,9 @@ static void blk_mq_queue_reinit_work(void)
*/
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_start(q);
- list_for_each_entry(q, &all_q_list, all_q_node) {
+ list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_wait(q);
- /*
- * timeout handler can't touch hw queue during the
- * reinitialization
- */
- del_timer_sync(&q->timeout);
- }
-
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q, &cpuhp_online_new);
@@ -2353,6 +2497,165 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
+static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ struct blk_rq_stat stat[2];
+ unsigned long ret = 0;
+
+ /*
+ * If stats collection isn't on, don't sleep but turn it on for
+ * future users
+ */
+ if (!blk_stat_enable(q))
+ return 0;
+
+ /*
+ * We don't have to do this once per IO, should optimize this
+ * to just use the current window of stats until it changes
+ */
+ memset(&stat, 0, sizeof(stat));
+ blk_hctx_stat_get(hctx, stat);
+
+ /*
+ * As an optimistic guess, use half of the mean service time
+ * for this type of request. We can (and should) make this smarter.
+ * For instance, if the completion latencies are tight, we can
+ * get closer than just half the mean. This is especially
+ * important on devices where the completion latencies are longer
+ * than ~10 usec.
+ */
+ if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
+ ret = (stat[BLK_STAT_READ].mean + 1) / 2;
+ else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
+ ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
+
+ return ret;
+}
+
+static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ struct hrtimer_sleeper hs;
+ enum hrtimer_mode mode;
+ unsigned int nsecs;
+ ktime_t kt;
+
+ if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
+ return false;
+
+ /*
+ * poll_nsec can be:
+ *
+ * -1: don't ever hybrid sleep
+ * 0: use half of prev avg
+ * >0: use this specific value
+ */
+ if (q->poll_nsec == -1)
+ return false;
+ else if (q->poll_nsec > 0)
+ nsecs = q->poll_nsec;
+ else
+ nsecs = blk_mq_poll_nsecs(q, hctx, rq);
+
+ if (!nsecs)
+ return false;
+
+ set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
+
+ /*
+ * This will be replaced with the stats tracking code, using
+ * 'avg_completion_time / 2' as the pre-sleep target.
+ */
+ kt = ktime_set(0, nsecs);
+
+ mode = HRTIMER_MODE_REL;
+ hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
+ hrtimer_set_expires(&hs.timer, kt);
+
+ hrtimer_init_sleeper(&hs, current);
+ do {
+ if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ hrtimer_start_expires(&hs.timer, mode);
+ if (hs.task)
+ io_schedule();
+ hrtimer_cancel(&hs.timer);
+ mode = HRTIMER_MODE_ABS;
+ } while (hs.task && !signal_pending(current));
+
+ __set_current_state(TASK_RUNNING);
+ destroy_hrtimer_on_stack(&hs.timer);
+ return true;
+}
+
+static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+ struct request_queue *q = hctx->queue;
+ long state;
+
+ /*
+ * If we sleep, have the caller restart the poll loop to reset
+ * the state. Like for the other success return cases, the
+ * caller is responsible for checking if the IO completed. If
+ * the IO isn't complete, we'll get called again and will go
+ * straight to the busy poll loop.
+ */
+ if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
+ return true;
+
+ hctx->poll_considered++;
+
+ state = current->state;
+ while (!need_resched()) {
+ int ret;
+
+ hctx->poll_invoked++;
+
+ ret = q->mq_ops->poll(hctx, rq->tag);
+ if (ret > 0) {
+ hctx->poll_success++;
+ set_current_state(TASK_RUNNING);
+ return true;
+ }
+
+ if (signal_pending_state(state, current))
+ set_current_state(TASK_RUNNING);
+
+ if (current->state == TASK_RUNNING)
+ return true;
+ if (ret < 0)
+ break;
+ cpu_relax();
+ }
+
+ return false;
+}
+
+bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_plug *plug;
+ struct request *rq;
+
+ if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
+ !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ return false;
+
+ plug = current->plug;
+ if (plug)
+ blk_flush_plug_list(plug, false);
+
+ hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+ rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
+
+ return __blk_mq_poll(hctx, rq);
+}
+EXPORT_SYMBOL_GPL(blk_mq_poll);
+
void blk_mq_disable_hotplug(void)
{
mutex_lock(&all_q_mutex);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index e5d25249028c..63e9116cddbd 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -1,6 +1,8 @@
#ifndef INT_BLK_MQ_H
#define INT_BLK_MQ_H
+#include "blk-stat.h"
+
struct blk_mq_tag_set;
struct blk_mq_ctx {
@@ -18,6 +20,7 @@ struct blk_mq_ctx {
/* incremented at completion time */
unsigned long ____cacheline_aligned_in_smp rq_completed[2];
+ struct blk_rq_stat stat[2];
struct request_queue *queue;
struct kobject kobj;
@@ -28,6 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
+bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
/*
* CPU hotplug helpers
@@ -38,7 +42,6 @@ void blk_mq_disable_hotplug(void);
/*
* CPU -> queue mappings
*/
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
@@ -100,6 +103,11 @@ static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
data->hctx = hctx;
}
+static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
+{
+ return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
+}
+
static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
{
return hctx->nr_ctx && hctx->tags;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f679ae122843..529e55f52a03 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -13,6 +13,7 @@
#include <linux/gfp.h>
#include "blk.h"
+#include "blk-wbt.h"
unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn);
@@ -95,6 +96,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->max_dev_sectors = 0;
lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0;
+ lim->max_write_zeroes_sectors = 0;
lim->max_discard_sectors = 0;
lim->max_hw_discard_sectors = 0;
lim->discard_granularity = 0;
@@ -107,6 +109,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->io_opt = 0;
lim->misaligned = 0;
lim->cluster = 1;
+ lim->zoned = BLK_ZONED_NONE;
}
EXPORT_SYMBOL(blk_set_default_limits);
@@ -130,6 +133,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->max_sectors = UINT_MAX;
lim->max_dev_sectors = UINT_MAX;
lim->max_write_same_sectors = UINT_MAX;
+ lim->max_write_zeroes_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -249,6 +253,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
limits->max_sectors = max_sectors;
+ q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
@@ -298,6 +303,19 @@ void blk_queue_max_write_same_sectors(struct request_queue *q,
EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
/**
+ * blk_queue_max_write_zeroes_sectors - set max sectors for a single
+ * write zeroes
+ * @q: the request queue for the device
+ * @max_write_zeroes_sectors: maximum number of sectors to write per command
+ **/
+void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
+ unsigned int max_write_zeroes_sectors)
+{
+ q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
+
+/**
* blk_queue_max_segments - set max hw segments for a request for this queue
* @q: the request queue for the device
* @max_segments: max number of segments
@@ -525,6 +543,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_same_sectors = min(t->max_write_same_sectors,
b->max_write_same_sectors);
+ t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
+ b->max_write_zeroes_sectors);
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
@@ -630,6 +650,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->discard_granularity;
}
+ if (b->chunk_sectors)
+ t->chunk_sectors = min_not_zero(t->chunk_sectors,
+ b->chunk_sectors);
+
return ret;
}
EXPORT_SYMBOL(blk_stack_limits);
@@ -832,6 +856,19 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
/**
+ * blk_set_queue_depth - tell the block layer about the device queue depth
+ * @q: the request queue for the device
+ * @depth: queue depth
+ *
+ */
+void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
+{
+ q->queue_depth = depth;
+ wbt_set_queue_depth(q->rq_wb, depth);
+}
+EXPORT_SYMBOL(blk_set_queue_depth);
+
+/**
* blk_queue_write_cache - configure queue's write cache
* @q: the request queue for the device
* @wc: write back cache on or off
@@ -851,6 +888,8 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
else
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
+
+ wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
diff --git a/block/blk-stat.c b/block/blk-stat.c
new file mode 100644
index 000000000000..9b43efb8933f
--- /dev/null
+++ b/block/blk-stat.c
@@ -0,0 +1,256 @@
+/*
+ * Block stat tracking code
+ *
+ * Copyright (C) 2016 Jens Axboe
+ */
+#include <linux/kernel.h>
+#include <linux/blk-mq.h>
+
+#include "blk-stat.h"
+#include "blk-mq.h"
+
+static void blk_stat_flush_batch(struct blk_rq_stat *stat)
+{
+ const s32 nr_batch = READ_ONCE(stat->nr_batch);
+ const s32 nr_samples = READ_ONCE(stat->nr_samples);
+
+ if (!nr_batch)
+ return;
+ if (!nr_samples)
+ stat->mean = div64_s64(stat->batch, nr_batch);
+ else {
+ stat->mean = div64_s64((stat->mean * nr_samples) +
+ stat->batch,
+ nr_batch + nr_samples);
+ }
+
+ stat->nr_samples += nr_batch;
+ stat->nr_batch = stat->batch = 0;
+}
+
+static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
+{
+ if (!src->nr_samples)
+ return;
+
+ blk_stat_flush_batch(src);
+
+ dst->min = min(dst->min, src->min);
+ dst->max = max(dst->max, src->max);
+
+ if (!dst->nr_samples)
+ dst->mean = src->mean;
+ else {
+ dst->mean = div64_s64((src->mean * src->nr_samples) +
+ (dst->mean * dst->nr_samples),
+ dst->nr_samples + src->nr_samples);
+ }
+ dst->nr_samples += src->nr_samples;
+}
+
+static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ uint64_t latest = 0;
+ int i, j, nr;
+
+ blk_stat_init(&dst[BLK_STAT_READ]);
+ blk_stat_init(&dst[BLK_STAT_WRITE]);
+
+ nr = 0;
+ do {
+ uint64_t newest = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
+ blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+
+ if (!ctx->stat[BLK_STAT_READ].nr_samples &&
+ !ctx->stat[BLK_STAT_WRITE].nr_samples)
+ continue;
+ if (ctx->stat[BLK_STAT_READ].time > newest)
+ newest = ctx->stat[BLK_STAT_READ].time;
+ if (ctx->stat[BLK_STAT_WRITE].time > newest)
+ newest = ctx->stat[BLK_STAT_WRITE].time;
+ }
+ }
+
+ /*
+ * No samples
+ */
+ if (!newest)
+ break;
+
+ if (newest > latest)
+ latest = newest;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ if (ctx->stat[BLK_STAT_READ].time == newest) {
+ blk_stat_sum(&dst[BLK_STAT_READ],
+ &ctx->stat[BLK_STAT_READ]);
+ nr++;
+ }
+ if (ctx->stat[BLK_STAT_WRITE].time == newest) {
+ blk_stat_sum(&dst[BLK_STAT_WRITE],
+ &ctx->stat[BLK_STAT_WRITE]);
+ nr++;
+ }
+ }
+ }
+ /*
+ * If we race on finding an entry, just loop back again.
+ * Should be very rare.
+ */
+ } while (!nr);
+
+ dst[BLK_STAT_READ].time = dst[BLK_STAT_WRITE].time = latest;
+}
+
+void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
+{
+ if (q->mq_ops)
+ blk_mq_stat_get(q, dst);
+ else {
+ blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]);
+ blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]);
+ memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ],
+ sizeof(struct blk_rq_stat));
+ memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE],
+ sizeof(struct blk_rq_stat));
+ }
+}
+
+void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
+{
+ struct blk_mq_ctx *ctx;
+ unsigned int i, nr;
+
+ nr = 0;
+ do {
+ uint64_t newest = 0;
+
+ hctx_for_each_ctx(hctx, ctx, i) {
+ blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
+ blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+
+ if (!ctx->stat[BLK_STAT_READ].nr_samples &&
+ !ctx->stat[BLK_STAT_WRITE].nr_samples)
+ continue;
+
+ if (ctx->stat[BLK_STAT_READ].time > newest)
+ newest = ctx->stat[BLK_STAT_READ].time;
+ if (ctx->stat[BLK_STAT_WRITE].time > newest)
+ newest = ctx->stat[BLK_STAT_WRITE].time;
+ }
+
+ if (!newest)
+ break;
+
+ hctx_for_each_ctx(hctx, ctx, i) {
+ if (ctx->stat[BLK_STAT_READ].time == newest) {
+ blk_stat_sum(&dst[BLK_STAT_READ],
+ &ctx->stat[BLK_STAT_READ]);
+ nr++;
+ }
+ if (ctx->stat[BLK_STAT_WRITE].time == newest) {
+ blk_stat_sum(&dst[BLK_STAT_WRITE],
+ &ctx->stat[BLK_STAT_WRITE]);
+ nr++;
+ }
+ }
+ /*
+ * If we race on finding an entry, just loop back again.
+ * Should be very rare, as the window is only updated
+ * occasionally
+ */
+ } while (!nr);
+}
+
+static void __blk_stat_init(struct blk_rq_stat *stat, s64 time_now)
+{
+ stat->min = -1ULL;
+ stat->max = stat->nr_samples = stat->mean = 0;
+ stat->batch = stat->nr_batch = 0;
+ stat->time = time_now & BLK_STAT_NSEC_MASK;
+}
+
+void blk_stat_init(struct blk_rq_stat *stat)
+{
+ __blk_stat_init(stat, ktime_to_ns(ktime_get()));
+}
+
+static bool __blk_stat_is_current(struct blk_rq_stat *stat, s64 now)
+{
+ return (now & BLK_STAT_NSEC_MASK) == (stat->time & BLK_STAT_NSEC_MASK);
+}
+
+bool blk_stat_is_current(struct blk_rq_stat *stat)
+{
+ return __blk_stat_is_current(stat, ktime_to_ns(ktime_get()));
+}
+
+void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
+{
+ s64 now, value;
+
+ now = __blk_stat_time(ktime_to_ns(ktime_get()));
+ if (now < blk_stat_time(&rq->issue_stat))
+ return;
+
+ if (!__blk_stat_is_current(stat, now))
+ __blk_stat_init(stat, now);
+
+ value = now - blk_stat_time(&rq->issue_stat);
+ if (value > stat->max)
+ stat->max = value;
+ if (value < stat->min)
+ stat->min = value;
+
+ if (stat->batch + value < stat->batch ||
+ stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
+ blk_stat_flush_batch(stat);
+
+ stat->batch += value;
+ stat->nr_batch++;
+}
+
+void blk_stat_clear(struct request_queue *q)
+{
+ if (q->mq_ops) {
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int i, j;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ blk_stat_init(&ctx->stat[BLK_STAT_READ]);
+ blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
+ }
+ }
+ } else {
+ blk_stat_init(&q->rq_stats[BLK_STAT_READ]);
+ blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]);
+ }
+}
+
+void blk_stat_set_issue_time(struct blk_issue_stat *stat)
+{
+ stat->time = (stat->time & BLK_STAT_MASK) |
+ (ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK);
+}
+
+/*
+ * Enable stat tracking, return whether it was enabled
+ */
+bool blk_stat_enable(struct request_queue *q)
+{
+ if (!test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
+ set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
+ return false;
+ }
+
+ return true;
+}
diff --git a/block/blk-stat.h b/block/blk-stat.h
new file mode 100644
index 000000000000..a2050a0a5314
--- /dev/null
+++ b/block/blk-stat.h
@@ -0,0 +1,42 @@
+#ifndef BLK_STAT_H
+#define BLK_STAT_H
+
+/*
+ * ~0.13s window as a power-of-2 (2^27 nsecs)
+ */
+#define BLK_STAT_NSEC 134217728ULL
+#define BLK_STAT_NSEC_MASK ~(BLK_STAT_NSEC - 1)
+
+/*
+ * Upper 3 bits can be used elsewhere
+ */
+#define BLK_STAT_RES_BITS 3
+#define BLK_STAT_SHIFT (64 - BLK_STAT_RES_BITS)
+#define BLK_STAT_TIME_MASK ((1ULL << BLK_STAT_SHIFT) - 1)
+#define BLK_STAT_MASK ~BLK_STAT_TIME_MASK
+
+enum {
+ BLK_STAT_READ = 0,
+ BLK_STAT_WRITE,
+};
+
+void blk_stat_add(struct blk_rq_stat *, struct request *);
+void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *);
+void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);
+void blk_stat_clear(struct request_queue *);
+void blk_stat_init(struct blk_rq_stat *);
+bool blk_stat_is_current(struct blk_rq_stat *);
+void blk_stat_set_issue_time(struct blk_issue_stat *);
+bool blk_stat_enable(struct request_queue *);
+
+static inline u64 __blk_stat_time(u64 time)
+{
+ return time & BLK_STAT_TIME_MASK;
+}
+
+static inline u64 blk_stat_time(struct blk_issue_stat *stat)
+{
+ return __blk_stat_time(stat->time);
+}
+
+#endif
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 9cc8d7c5439a..1dbce057592d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -13,6 +13,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-wbt.h"
struct queue_sysfs_entry {
struct attribute attr;
@@ -41,6 +42,19 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
+static ssize_t queue_var_store64(s64 *var, const char *page)
+{
+ int err;
+ s64 v;
+
+ err = kstrtos64(page, 10, &v);
+ if (err < 0)
+ return err;
+
+ *var = v;
+ return 0;
+}
+
static ssize_t queue_requests_show(struct request_queue *q, char *page)
{
return queue_var_show(q->nr_requests, (page));
@@ -130,6 +144,11 @@ static ssize_t queue_physical_block_size_show(struct request_queue *q, char *pag
return queue_var_show(queue_physical_block_size(q), page);
}
+static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(q->limits.chunk_sectors, page);
+}
+
static ssize_t queue_io_min_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_io_min(q), page);
@@ -192,6 +211,11 @@ static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
(unsigned long long)q->limits.max_write_same_sectors << 9);
}
+static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
+{
+ return sprintf(page, "%llu\n",
+ (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
+}
static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
@@ -212,6 +236,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
spin_lock_irq(q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
+ q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(q->queue_lock);
return ret;
@@ -257,6 +282,18 @@ QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
#undef QUEUE_SYSFS_BIT_FNS
+static ssize_t queue_zoned_show(struct request_queue *q, char *page)
+{
+ switch (blk_queue_zoned_model(q)) {
+ case BLK_ZONED_HA:
+ return sprintf(page, "host-aware\n");
+ case BLK_ZONED_HM:
+ return sprintf(page, "host-managed\n");
+ default:
+ return sprintf(page, "none\n");
+ }
+}
+
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
return queue_var_show((blk_queue_nomerges(q) << 1) |
@@ -319,6 +356,38 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
return ret;
}
+static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
+{
+ int val;
+
+ if (q->poll_nsec == -1)
+ val = -1;
+ else
+ val = q->poll_nsec / 1000;
+
+ return sprintf(page, "%d\n", val);
+}
+
+static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ int err, val;
+
+ if (!q->mq_ops || !q->mq_ops->poll)
+ return -EINVAL;
+
+ err = kstrtoint(page, 10, &val);
+ if (err < 0)
+ return err;
+
+ if (val == -1)
+ q->poll_nsec = -1;
+ else
+ q->poll_nsec = val * 1000;
+
+ return count;
+}
+
static ssize_t queue_poll_show(struct request_queue *q, char *page)
{
return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
@@ -347,6 +416,50 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
+}
+
+static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ struct rq_wb *rwb;
+ ssize_t ret;
+ s64 val;
+
+ ret = queue_var_store64(&val, page);
+ if (ret < 0)
+ return ret;
+ if (val < -1)
+ return -EINVAL;
+
+ rwb = q->rq_wb;
+ if (!rwb) {
+ ret = wbt_init(q);
+ if (ret)
+ return ret;
+
+ rwb = q->rq_wb;
+ if (!rwb)
+ return -EINVAL;
+ }
+
+ if (val == -1)
+ rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+ else if (val >= 0)
+ rwb->min_lat_nsec = val * 1000ULL;
+
+ if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+ rwb->enable_state = WBT_STATE_ON_MANUAL;
+
+ wbt_update_limits(rwb);
+ return count;
+}
+
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -384,6 +497,26 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page)
return queue_var_show(blk_queue_dax(q), page);
}
+static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
+{
+ return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
+ pre, (long long) stat->nr_samples,
+ (long long) stat->mean, (long long) stat->min,
+ (long long) stat->max);
+}
+
+static ssize_t queue_stats_show(struct request_queue *q, char *page)
+{
+ struct blk_rq_stat stat[2];
+ ssize_t ret;
+
+ blk_queue_stat_get(q, stat);
+
+ ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
+ ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
+ return ret;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
@@ -443,6 +576,11 @@ static struct queue_sysfs_entry queue_physical_block_size_entry = {
.show = queue_physical_block_size_show,
};
+static struct queue_sysfs_entry queue_chunk_sectors_entry = {
+ .attr = {.name = "chunk_sectors", .mode = S_IRUGO },
+ .show = queue_chunk_sectors_show,
+};
+
static struct queue_sysfs_entry queue_io_min_entry = {
.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
.show = queue_io_min_show,
@@ -479,12 +617,22 @@ static struct queue_sysfs_entry queue_write_same_max_entry = {
.show = queue_write_same_max_show,
};
+static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
+ .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO },
+ .show = queue_write_zeroes_max_show,
+};
+
static struct queue_sysfs_entry queue_nonrot_entry = {
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
.show = queue_show_nonrot,
.store = queue_store_nonrot,
};
+static struct queue_sysfs_entry queue_zoned_entry = {
+ .attr = {.name = "zoned", .mode = S_IRUGO },
+ .show = queue_zoned_show,
+};
+
static struct queue_sysfs_entry queue_nomerges_entry = {
.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
.show = queue_nomerges_show,
@@ -515,6 +663,12 @@ static struct queue_sysfs_entry queue_poll_entry = {
.store = queue_poll_store,
};
+static struct queue_sysfs_entry queue_poll_delay_entry = {
+ .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_poll_delay_show,
+ .store = queue_poll_delay_store,
+};
+
static struct queue_sysfs_entry queue_wc_entry = {
.attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
.show = queue_wc_show,
@@ -526,6 +680,17 @@ static struct queue_sysfs_entry queue_dax_entry = {
.show = queue_dax_show,
};
+static struct queue_sysfs_entry queue_stats_entry = {
+ .attr = {.name = "stats", .mode = S_IRUGO },
+ .show = queue_stats_show,
+};
+
+static struct queue_sysfs_entry queue_wb_lat_entry = {
+ .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_lat_show,
+ .store = queue_wb_lat_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -538,6 +703,7 @@ static struct attribute *default_attrs[] = {
&queue_hw_sector_size_entry.attr,
&queue_logical_block_size_entry.attr,
&queue_physical_block_size_entry.attr,
+ &queue_chunk_sectors_entry.attr,
&queue_io_min_entry.attr,
&queue_io_opt_entry.attr,
&queue_discard_granularity_entry.attr,
@@ -545,7 +711,9 @@ static struct attribute *default_attrs[] = {
&queue_discard_max_hw_entry.attr,
&queue_discard_zeroes_data_entry.attr,
&queue_write_same_max_entry.attr,
+ &queue_write_zeroes_max_entry.attr,
&queue_nonrot_entry.attr,
+ &queue_zoned_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
@@ -553,6 +721,9 @@ static struct attribute *default_attrs[] = {
&queue_poll_entry.attr,
&queue_wc_entry.attr,
&queue_dax_entry.attr,
+ &queue_stats_entry.attr,
+ &queue_wb_lat_entry.attr,
+ &queue_poll_delay_entry.attr,
NULL,
};
@@ -627,6 +798,7 @@ static void blk_release_queue(struct kobject *kobj)
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
+ wbt_exit(q);
bdi_exit(&q->backing_dev_info);
blkcg_exit_queue(q);
@@ -667,6 +839,23 @@ struct kobj_type blk_queue_ktype = {
.release = blk_release_queue,
};
+static void blk_wb_init(struct request_queue *q)
+{
+#ifndef CONFIG_BLK_WBT_MQ
+ if (q->mq_ops)
+ return;
+#endif
+#ifndef CONFIG_BLK_WBT_SQ
+ if (q->request_fn)
+ return;
+#endif
+
+ /*
+ * If this fails, we don't get throttling
+ */
+ wbt_init(q);
+}
+
int blk_register_queue(struct gendisk *disk)
{
int ret;
@@ -706,6 +895,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
blk_mq_register_dev(dev, q);
+ blk_wb_init(q);
+
if (!q->request_fn)
return 0;
diff --git a/block/blk-tag.c b/block/blk-tag.c
index f0344e6939d5..bae1decb6ec3 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -270,7 +270,7 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
BUG_ON(tag >= bqt->real_max_depth);
list_del_init(&rq->queuelist);
- rq->cmd_flags &= ~REQ_QUEUED;
+ rq->rq_flags &= ~RQF_QUEUED;
rq->tag = -1;
if (unlikely(bqt->tag_index[tag] == NULL))
@@ -316,7 +316,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
unsigned max_depth;
int tag;
- if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
+ if (unlikely((rq->rq_flags & RQF_QUEUED))) {
printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d",
__func__, rq,
@@ -371,7 +371,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
*/
bqt->next_tag = (tag + 1) % bqt->max_depth;
- rq->cmd_flags |= REQ_QUEUED;
+ rq->rq_flags |= RQF_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
blk_start_request(rq);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a3ea8260c94c..a6bb4fe326c3 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -818,13 +818,13 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
tg->io_disp[rw]++;
/*
- * REQ_THROTTLED is used to prevent the same bio to be throttled
+ * BIO_THROTTLED is used to prevent the same bio to be throttled
* more than once as a throttled bio will go through blk-throtl the
* second time when it eventually gets issued. Set it when a bio
* is being charged to a tg.
*/
- if (!(bio->bi_opf & REQ_THROTTLED))
- bio->bi_opf |= REQ_THROTTLED;
+ if (!bio_flagged(bio, BIO_THROTTLED))
+ bio_set_flag(bio, BIO_THROTTLED);
}
/**
@@ -1401,7 +1401,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
WARN_ON_ONCE(!rcu_read_lock_held());
/* see throtl_charge_bio() */
- if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
+ if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
goto out;
spin_lock_irq(q->queue_lock);
@@ -1480,7 +1480,7 @@ out:
* being issued.
*/
if (!throttled)
- bio->bi_opf &= ~REQ_THROTTLED;
+ bio_clear_flag(bio, BIO_THROTTLED);
return throttled;
}
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
new file mode 100644
index 000000000000..6e82769f4042
--- /dev/null
+++ b/block/blk-wbt.c
@@ -0,0 +1,750 @@
+/*
+ * buffered writeback throttling. loosely based on CoDel. We can't drop
+ * packets for IO scheduling, so the logic is something like this:
+ *
+ * - Monitor latencies in a defined window of time.
+ * - If the minimum latency in the above window exceeds some target, increment
+ * scaling step and scale down queue depth by a factor of 2x. The monitoring
+ * window is then shrunk to 100 / sqrt(scaling step + 1).
+ * - For any window where we don't have solid data on what the latencies
+ * look like, retain status quo.
+ * - If latencies look good, decrement scaling step.
+ * - If we're only doing writes, allow the scaling step to go negative. This
+ * will temporarily boost write performance, snapping back to a stable
+ * scaling step of 0 if reads show up or the heavy writers finish. Unlike
+ * positive scaling steps where we shrink the monitoring window, a negative
+ * scaling step retains the default step==0 window size.
+ *
+ * Copyright (C) 2016 Jens Axboe
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/backing-dev.h>
+#include <linux/swap.h>
+
+#include "blk-wbt.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/wbt.h>
+
+enum {
+ /*
+ * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
+ * from here depending on device stats
+ */
+ RWB_DEF_DEPTH = 16,
+
+ /*
+ * 100msec window
+ */
+ RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
+
+ /*
+ * Disregard stats, if we don't meet this minimum
+ */
+ RWB_MIN_WRITE_SAMPLES = 3,
+
+ /*
+ * If we have this number of consecutive windows with not enough
+ * information to scale up or down, scale up.
+ */
+ RWB_UNKNOWN_BUMP = 5,
+};
+
+static inline bool rwb_enabled(struct rq_wb *rwb)
+{
+ return rwb && rwb->wb_normal != 0;
+}
+
+/*
+ * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
+ * false if 'v' + 1 would be bigger than 'below'.
+ */
+static bool atomic_inc_below(atomic_t *v, int below)
+{
+ int cur = atomic_read(v);
+
+ for (;;) {
+ int old;
+
+ if (cur >= below)
+ return false;
+ old = atomic_cmpxchg(v, cur, cur + 1);
+ if (old == cur)
+ break;
+ cur = old;
+ }
+
+ return true;
+}
+
+static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
+{
+ if (rwb_enabled(rwb)) {
+ const unsigned long cur = jiffies;
+
+ if (cur != *var)
+ *var = cur;
+ }
+}
+
+/*
+ * If a task was rate throttled in balance_dirty_pages() within the last
+ * second or so, use that to indicate a higher cleaning rate.
+ */
+static bool wb_recent_wait(struct rq_wb *rwb)
+{
+ struct bdi_writeback *wb = &rwb->queue->backing_dev_info.wb;
+
+ return time_before(jiffies, wb->dirty_sleep + HZ);
+}
+
+static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb, bool is_kswapd)
+{
+ return &rwb->rq_wait[is_kswapd];
+}
+
+static void rwb_wake_all(struct rq_wb *rwb)
+{
+ int i;
+
+ for (i = 0; i < WBT_NUM_RWQ; i++) {
+ struct rq_wait *rqw = &rwb->rq_wait[i];
+
+ if (waitqueue_active(&rqw->wait))
+ wake_up_all(&rqw->wait);
+ }
+}
+
+void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
+{
+ struct rq_wait *rqw;
+ int inflight, limit;
+
+ if (!(wb_acct & WBT_TRACKED))
+ return;
+
+ rqw = get_rq_wait(rwb, wb_acct & WBT_KSWAPD);
+ inflight = atomic_dec_return(&rqw->inflight);
+
+ /*
+ * wbt got disabled with IO in flight. Wake up any potential
+ * waiters, we don't have to do more than that.
+ */
+ if (unlikely(!rwb_enabled(rwb))) {
+ rwb_wake_all(rwb);
+ return;
+ }
+
+ /*
+ * If the device does write back caching, drop further down
+ * before we wake people up.
+ */
+ if (rwb->wc && !wb_recent_wait(rwb))
+ limit = 0;
+ else
+ limit = rwb->wb_normal;
+
+ /*
+ * Don't wake anyone up if we are above the normal limit.
+ */
+ if (inflight && inflight >= limit)
+ return;
+
+ if (waitqueue_active(&rqw->wait)) {
+ int diff = limit - inflight;
+
+ if (!inflight || diff >= rwb->wb_background / 2)
+ wake_up_all(&rqw->wait);
+ }
+}
+
+/*
+ * Called on completion of a request. Note that it's also called when
+ * a request is merged, when the request gets freed.
+ */
+void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+ if (!rwb)
+ return;
+
+ if (!wbt_is_tracked(stat)) {
+ if (rwb->sync_cookie == stat) {
+ rwb->sync_issue = 0;
+ rwb->sync_cookie = NULL;
+ }
+
+ if (wbt_is_read(stat))
+ wb_timestamp(rwb, &rwb->last_comp);
+ wbt_clear_state(stat);
+ } else {
+ WARN_ON_ONCE(stat == rwb->sync_cookie);
+ __wbt_done(rwb, wbt_stat_to_mask(stat));
+ wbt_clear_state(stat);
+ }
+}
+
+/*
+ * Return true, if we can't increase the depth further by scaling
+ */
+static bool calc_wb_limits(struct rq_wb *rwb)
+{
+ unsigned int depth;
+ bool ret = false;
+
+ if (!rwb->min_lat_nsec) {
+ rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
+ return false;
+ }
+
+ /*
+ * For QD=1 devices, this is a special case. It's important for those
+ * to have one request ready when one completes, so force a depth of
+ * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
+ * since the device can't have more than that in flight. If we're
+ * scaling down, then keep a setting of 1/1/1.
+ */
+ if (rwb->queue_depth == 1) {
+ if (rwb->scale_step > 0)
+ rwb->wb_max = rwb->wb_normal = 1;
+ else {
+ rwb->wb_max = rwb->wb_normal = 2;
+ ret = true;
+ }
+ rwb->wb_background = 1;
+ } else {
+ /*
+ * scale_step == 0 is our default state. If we have suffered
+ * latency spikes, step will be > 0, and we shrink the
+ * allowed write depths. If step is < 0, we're only doing
+ * writes, and we allow a temporarily higher depth to
+ * increase performance.
+ */
+ depth = min_t(unsigned int, RWB_DEF_DEPTH, rwb->queue_depth);
+ if (rwb->scale_step > 0)
+ depth = 1 + ((depth - 1) >> min(31, rwb->scale_step));
+ else if (rwb->scale_step < 0) {
+ unsigned int maxd = 3 * rwb->queue_depth / 4;
+
+ depth = 1 + ((depth - 1) << -rwb->scale_step);
+ if (depth > maxd) {
+ depth = maxd;
+ ret = true;
+ }
+ }
+
+ /*
+ * Set our max/normal/bg queue depths based on how far
+ * we have scaled down (->scale_step).
+ */
+ rwb->wb_max = depth;
+ rwb->wb_normal = (rwb->wb_max + 1) / 2;
+ rwb->wb_background = (rwb->wb_max + 3) / 4;
+ }
+
+ return ret;
+}
+
+static inline bool stat_sample_valid(struct blk_rq_stat *stat)
+{
+ /*
+ * We need at least one read sample, and a minimum of
+ * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
+ * that it's writes impacting us, and not just some sole read on
+ * a device that is in a lower power state.
+ */
+ return stat[BLK_STAT_READ].nr_samples >= 1 &&
+ stat[BLK_STAT_WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES;
+}
+
+static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
+{
+ u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
+
+ if (!issue || !rwb->sync_cookie)
+ return 0;
+
+ now = ktime_to_ns(ktime_get());
+ return now - issue;
+}
+
+enum {
+ LAT_OK = 1,
+ LAT_UNKNOWN,
+ LAT_UNKNOWN_WRITES,
+ LAT_EXCEEDED,
+};
+
+static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
+{
+ struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
+ u64 thislat;
+
+ /*
+ * If our stored sync issue exceeds the window size, or it
+ * exceeds our min target AND we haven't logged any entries,
+ * flag the latency as exceeded. wbt works off completion latencies,
+ * but for a flooded device, a single sync IO can take a long time
+ * to complete after being issued. If this time exceeds our
+ * monitoring window AND we didn't see any other completions in that
+ * window, then count that sync IO as a violation of the latency.
+ */
+ thislat = rwb_sync_issue_lat(rwb);
+ if (thislat > rwb->cur_win_nsec ||
+ (thislat > rwb->min_lat_nsec && !stat[BLK_STAT_READ].nr_samples)) {
+ trace_wbt_lat(bdi, thislat);
+ return LAT_EXCEEDED;
+ }
+
+ /*
+ * No read/write mix, if stat isn't valid
+ */
+ if (!stat_sample_valid(stat)) {
+ /*
+ * If we had writes in this stat window and the window is
+ * current, we're only doing writes. If a task recently
+ * waited or still has writes in flights, consider us doing
+ * just writes as well.
+ */
+ if ((stat[BLK_STAT_WRITE].nr_samples && blk_stat_is_current(stat)) ||
+ wb_recent_wait(rwb) || wbt_inflight(rwb))
+ return LAT_UNKNOWN_WRITES;
+ return LAT_UNKNOWN;
+ }
+
+ /*
+ * If the 'min' latency exceeds our target, step down.
+ */
+ if (stat[BLK_STAT_READ].min > rwb->min_lat_nsec) {
+ trace_wbt_lat(bdi, stat[BLK_STAT_READ].min);
+ trace_wbt_stat(bdi, stat);
+ return LAT_EXCEEDED;
+ }
+
+ if (rwb->scale_step)
+ trace_wbt_stat(bdi, stat);
+
+ return LAT_OK;
+}
+
+static int latency_exceeded(struct rq_wb *rwb)
+{
+ struct blk_rq_stat stat[2];
+
+ blk_queue_stat_get(rwb->queue, stat);
+ return __latency_exceeded(rwb, stat);
+}
+
+static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
+{
+ struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
+
+ trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
+ rwb->wb_background, rwb->wb_normal, rwb->wb_max);
+}
+
+static void scale_up(struct rq_wb *rwb)
+{
+ /*
+ * Hit max in previous round, stop here
+ */
+ if (rwb->scaled_max)
+ return;
+
+ rwb->scale_step--;
+ rwb->unknown_cnt = 0;
+ blk_stat_clear(rwb->queue);
+
+ rwb->scaled_max = calc_wb_limits(rwb);
+
+ rwb_wake_all(rwb);
+
+ rwb_trace_step(rwb, "step up");
+}
+
+/*
+ * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
+ * had a latency violation.
+ */
+static void scale_down(struct rq_wb *rwb, bool hard_throttle)
+{
+ /*
+ * Stop scaling down when we've hit the limit. This also prevents
+ * ->scale_step from going to crazy values, if the device can't
+ * keep up.
+ */
+ if (rwb->wb_max == 1)
+ return;
+
+ if (rwb->scale_step < 0 && hard_throttle)
+ rwb->scale_step = 0;
+ else
+ rwb->scale_step++;
+
+ rwb->scaled_max = false;
+ rwb->unknown_cnt = 0;
+ blk_stat_clear(rwb->queue);
+ calc_wb_limits(rwb);
+ rwb_trace_step(rwb, "step down");
+}
+
+static void rwb_arm_timer(struct rq_wb *rwb)
+{
+ unsigned long expires;
+
+ if (rwb->scale_step > 0) {
+ /*
+ * We should speed this up, using some variant of a fast
+ * integer inverse square root calculation. Since we only do
+ * this for every window expiration, it's not a huge deal,
+ * though.
+ */
+ rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
+ int_sqrt((rwb->scale_step + 1) << 8));
+ } else {
+ /*
+ * For step < 0, we don't want to increase/decrease the
+ * window size.
+ */
+ rwb->cur_win_nsec = rwb->win_nsec;
+ }
+
+ expires = jiffies + nsecs_to_jiffies(rwb->cur_win_nsec);
+ mod_timer(&rwb->window_timer, expires);
+}
+
+static void wb_timer_fn(unsigned long data)
+{
+ struct rq_wb *rwb = (struct rq_wb *) data;
+ unsigned int inflight = wbt_inflight(rwb);
+ int status;
+
+ status = latency_exceeded(rwb);
+
+ trace_wbt_timer(&rwb->queue->backing_dev_info, status, rwb->scale_step,
+ inflight);
+
+ /*
+ * If we exceeded the latency target, step down. If we did not,
+ * step one level up. If we don't know enough to say either exceeded
+ * or ok, then don't do anything.
+ */
+ switch (status) {
+ case LAT_EXCEEDED:
+ scale_down(rwb, true);
+ break;
+ case LAT_OK:
+ scale_up(rwb);
+ break;
+ case LAT_UNKNOWN_WRITES:
+ /*
+ * We started a the center step, but don't have a valid
+ * read/write sample, but we do have writes going on.
+ * Allow step to go negative, to increase write perf.
+ */
+ scale_up(rwb);
+ break;
+ case LAT_UNKNOWN:
+ if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
+ break;
+ /*
+ * We get here when previously scaled reduced depth, and we
+ * currently don't have a valid read/write sample. For that
+ * case, slowly return to center state (step == 0).
+ */
+ if (rwb->scale_step > 0)
+ scale_up(rwb);
+ else if (rwb->scale_step < 0)
+ scale_down(rwb, false);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Re-arm timer, if we have IO in flight
+ */
+ if (rwb->scale_step || inflight)
+ rwb_arm_timer(rwb);
+}
+
+void wbt_update_limits(struct rq_wb *rwb)
+{
+ rwb->scale_step = 0;
+ rwb->scaled_max = false;
+ calc_wb_limits(rwb);
+
+ rwb_wake_all(rwb);
+}
+
+static bool close_io(struct rq_wb *rwb)
+{
+ const unsigned long now = jiffies;
+
+ return time_before(now, rwb->last_issue + HZ / 10) ||
+ time_before(now, rwb->last_comp + HZ / 10);
+}
+
+#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
+
+static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
+{
+ unsigned int limit;
+
+ /*
+ * At this point we know it's a buffered write. If this is
+ * kswapd trying to free memory, or REQ_SYNC is set, set, then
+ * it's WB_SYNC_ALL writeback, and we'll use the max limit for
+ * that. If the write is marked as a background write, then use
+ * the idle limit, or go to normal if we haven't had competing
+ * IO for a bit.
+ */
+ if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
+ limit = rwb->wb_max;
+ else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
+ /*
+ * If less than 100ms since we completed unrelated IO,
+ * limit us to half the depth for background writeback.
+ */
+ limit = rwb->wb_background;
+ } else
+ limit = rwb->wb_normal;
+
+ return limit;
+}
+
+static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
+ wait_queue_t *wait, unsigned long rw)
+{
+ /*
+ * inc it here even if disabled, since we'll dec it at completion.
+ * this only happens if the task was sleeping in __wbt_wait(),
+ * and someone turned it off at the same time.
+ */
+ if (!rwb_enabled(rwb)) {
+ atomic_inc(&rqw->inflight);
+ return true;
+ }
+
+ /*
+ * If the waitqueue is already active and we are not the next
+ * in line to be woken up, wait for our turn.
+ */
+ if (waitqueue_active(&rqw->wait) &&
+ rqw->wait.task_list.next != &wait->task_list)
+ return false;
+
+ return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
+}
+
+/*
+ * Block if we will exceed our limit, or if we are currently waiting for
+ * the timer to kick off queuing again.
+ */
+static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
+{
+ struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
+ DEFINE_WAIT(wait);
+
+ if (may_queue(rwb, rqw, &wait, rw))
+ return;
+
+ do {
+ prepare_to_wait_exclusive(&rqw->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ if (may_queue(rwb, rqw, &wait, rw))
+ break;
+
+ if (lock)
+ spin_unlock_irq(lock);
+
+ io_schedule();
+
+ if (lock)
+ spin_lock_irq(lock);
+ } while (1);
+
+ finish_wait(&rqw->wait, &wait);
+}
+
+static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
+{
+ const int op = bio_op(bio);
+
+ /*
+ * If not a WRITE, do nothing
+ */
+ if (op != REQ_OP_WRITE)
+ return false;
+
+ /*
+ * Don't throttle WRITE_ODIRECT
+ */
+ if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE))
+ return false;
+
+ return true;
+}
+
+/*
+ * Returns true if the IO request should be accounted, false if not.
+ * May sleep, if we have exceeded the writeback limits. Caller can pass
+ * in an irq held spinlock, if it holds one when calling this function.
+ * If we do sleep, we'll release and re-grab it.
+ */
+unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+{
+ unsigned int ret = 0;
+
+ if (!rwb_enabled(rwb))
+ return 0;
+
+ if (bio_op(bio) == REQ_OP_READ)
+ ret = WBT_READ;
+
+ if (!wbt_should_throttle(rwb, bio)) {
+ if (ret & WBT_READ)
+ wb_timestamp(rwb, &rwb->last_issue);
+ return ret;
+ }
+
+ __wbt_wait(rwb, bio->bi_opf, lock);
+
+ if (!timer_pending(&rwb->window_timer))
+ rwb_arm_timer(rwb);
+
+ if (current_is_kswapd())
+ ret |= WBT_KSWAPD;
+
+ return ret | WBT_TRACKED;
+}
+
+void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+ if (!rwb_enabled(rwb))
+ return;
+
+ /*
+ * Track sync issue, in case it takes a long time to complete. Allows
+ * us to react quicker, if a sync IO takes a long time to complete.
+ * Note that this is just a hint. 'stat' can go away when the
+ * request completes, so it's important we never dereference it. We
+ * only use the address to compare with, which is why we store the
+ * sync_issue time locally.
+ */
+ if (wbt_is_read(stat) && !rwb->sync_issue) {
+ rwb->sync_cookie = stat;
+ rwb->sync_issue = blk_stat_time(stat);
+ }
+}
+
+void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+ if (!rwb_enabled(rwb))
+ return;
+ if (stat == rwb->sync_cookie) {
+ rwb->sync_issue = 0;
+ rwb->sync_cookie = NULL;
+ }
+}
+
+void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
+{
+ if (rwb) {
+ rwb->queue_depth = depth;
+ wbt_update_limits(rwb);
+ }
+}
+
+void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
+{
+ if (rwb)
+ rwb->wc = write_cache_on;
+}
+
+ /*
+ * Disable wbt, if enabled by default. Only called from CFQ, if we have
+ * cgroups enabled
+ */
+void wbt_disable_default(struct request_queue *q)
+{
+ struct rq_wb *rwb = q->rq_wb;
+
+ if (rwb && rwb->enable_state == WBT_STATE_ON_DEFAULT) {
+ del_timer_sync(&rwb->window_timer);
+ rwb->win_nsec = rwb->min_lat_nsec = 0;
+ wbt_update_limits(rwb);
+ }
+}
+EXPORT_SYMBOL_GPL(wbt_disable_default);
+
+u64 wbt_default_latency_nsec(struct request_queue *q)
+{
+ /*
+ * We default to 2msec for non-rotational storage, and 75msec
+ * for rotational storage.
+ */
+ if (blk_queue_nonrot(q))
+ return 2000000ULL;
+ else
+ return 75000000ULL;
+}
+
+int wbt_init(struct request_queue *q)
+{
+ struct rq_wb *rwb;
+ int i;
+
+ /*
+ * For now, we depend on the stats window being larger than
+ * our monitoring window. Ensure that this isn't inadvertently
+ * violated.
+ */
+ BUILD_BUG_ON(RWB_WINDOW_NSEC > BLK_STAT_NSEC);
+ BUILD_BUG_ON(WBT_NR_BITS > BLK_STAT_RES_BITS);
+
+ rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
+ if (!rwb)
+ return -ENOMEM;
+
+ for (i = 0; i < WBT_NUM_RWQ; i++) {
+ atomic_set(&rwb->rq_wait[i].inflight, 0);
+ init_waitqueue_head(&rwb->rq_wait[i].wait);
+ }
+
+ setup_timer(&rwb->window_timer, wb_timer_fn, (unsigned long) rwb);
+ rwb->wc = 1;
+ rwb->queue_depth = RWB_DEF_DEPTH;
+ rwb->last_comp = rwb->last_issue = jiffies;
+ rwb->queue = q;
+ rwb->win_nsec = RWB_WINDOW_NSEC;
+ rwb->enable_state = WBT_STATE_ON_DEFAULT;
+ wbt_update_limits(rwb);
+
+ /*
+ * Assign rwb, and turn on stats tracking for this queue
+ */
+ q->rq_wb = rwb;
+ blk_stat_enable(q);
+
+ rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+
+ wbt_set_queue_depth(rwb, blk_queue_depth(q));
+ wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+
+ return 0;
+}
+
+void wbt_exit(struct request_queue *q)
+{
+ struct rq_wb *rwb = q->rq_wb;
+
+ if (rwb) {
+ del_timer_sync(&rwb->window_timer);
+ q->rq_wb = NULL;
+ kfree(rwb);
+ }
+}
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
new file mode 100644
index 000000000000..65f1de519f67
--- /dev/null
+++ b/block/blk-wbt.h
@@ -0,0 +1,171 @@
+#ifndef WB_THROTTLE_H
+#define WB_THROTTLE_H
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/ktime.h>
+
+#include "blk-stat.h"
+
+enum wbt_flags {
+ WBT_TRACKED = 1, /* write, tracked for throttling */
+ WBT_READ = 2, /* read */
+ WBT_KSWAPD = 4, /* write, from kswapd */
+
+ WBT_NR_BITS = 3, /* number of bits */
+};
+
+enum {
+ WBT_NUM_RWQ = 2,
+};
+
+/*
+ * Enable states. Either off, or on by default (done at init time),
+ * or on through manual setup in sysfs.
+ */
+enum {
+ WBT_STATE_ON_DEFAULT = 1,
+ WBT_STATE_ON_MANUAL = 2,
+};
+
+static inline void wbt_clear_state(struct blk_issue_stat *stat)
+{
+ stat->time &= BLK_STAT_TIME_MASK;
+}
+
+static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat)
+{
+ return (stat->time & BLK_STAT_MASK) >> BLK_STAT_SHIFT;
+}
+
+static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct)
+{
+ stat->time |= ((u64) wb_acct) << BLK_STAT_SHIFT;
+}
+
+static inline bool wbt_is_tracked(struct blk_issue_stat *stat)
+{
+ return (stat->time >> BLK_STAT_SHIFT) & WBT_TRACKED;
+}
+
+static inline bool wbt_is_read(struct blk_issue_stat *stat)
+{
+ return (stat->time >> BLK_STAT_SHIFT) & WBT_READ;
+}
+
+struct rq_wait {
+ wait_queue_head_t wait;
+ atomic_t inflight;
+};
+
+struct rq_wb {
+ /*
+ * Settings that govern how we throttle
+ */
+ unsigned int wb_background; /* background writeback */
+ unsigned int wb_normal; /* normal writeback */
+ unsigned int wb_max; /* max throughput writeback */
+ int scale_step;
+ bool scaled_max;
+
+ short enable_state; /* WBT_STATE_* */
+
+ /*
+ * Number of consecutive periods where we don't have enough
+ * information to make a firm scale up/down decision.
+ */
+ unsigned int unknown_cnt;
+
+ u64 win_nsec; /* default window size */
+ u64 cur_win_nsec; /* current window size */
+
+ struct timer_list window_timer;
+
+ s64 sync_issue;
+ void *sync_cookie;
+
+ unsigned int wc;
+ unsigned int queue_depth;
+
+ unsigned long last_issue; /* last non-throttled issue */
+ unsigned long last_comp; /* last non-throttled comp */
+ unsigned long min_lat_nsec;
+ struct request_queue *queue;
+ struct rq_wait rq_wait[WBT_NUM_RWQ];
+};
+
+static inline unsigned int wbt_inflight(struct rq_wb *rwb)
+{
+ unsigned int i, ret = 0;
+
+ for (i = 0; i < WBT_NUM_RWQ; i++)
+ ret += atomic_read(&rwb->rq_wait[i].inflight);
+
+ return ret;
+}
+
+#ifdef CONFIG_BLK_WBT
+
+void __wbt_done(struct rq_wb *, enum wbt_flags);
+void wbt_done(struct rq_wb *, struct blk_issue_stat *);
+enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
+int wbt_init(struct request_queue *);
+void wbt_exit(struct request_queue *);
+void wbt_update_limits(struct rq_wb *);
+void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
+void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
+void wbt_disable_default(struct request_queue *);
+
+void wbt_set_queue_depth(struct rq_wb *, unsigned int);
+void wbt_set_write_cache(struct rq_wb *, bool);
+
+u64 wbt_default_latency_nsec(struct request_queue *);
+
+#else
+
+static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
+{
+}
+static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+}
+static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
+ spinlock_t *lock)
+{
+ return 0;
+}
+static inline int wbt_init(struct request_queue *q)
+{
+ return -EINVAL;
+}
+static inline void wbt_exit(struct request_queue *q)
+{
+}
+static inline void wbt_update_limits(struct rq_wb *rwb)
+{
+}
+static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+}
+static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+}
+static inline void wbt_disable_default(struct request_queue *q)
+{
+}
+static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
+{
+}
+static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
+{
+}
+static inline u64 wbt_default_latency_nsec(struct request_queue *q)
+{
+ return 0;
+}
+
+#endif /* CONFIG_BLK_WBT */
+
+#endif
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
new file mode 100644
index 000000000000..472211fa183a
--- /dev/null
+++ b/block/blk-zoned.c
@@ -0,0 +1,348 @@
+/*
+ * Zoned block device handling
+ *
+ * Copyright (c) 2015, Hannes Reinecke
+ * Copyright (c) 2015, SUSE Linux GmbH
+ *
+ * Copyright (c) 2016, Damien Le Moal
+ * Copyright (c) 2016, Western Digital
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/blkdev.h>
+
+static inline sector_t blk_zone_start(struct request_queue *q,
+ sector_t sector)
+{
+ sector_t zone_mask = blk_queue_zone_size(q) - 1;
+
+ return sector & ~zone_mask;
+}
+
+/*
+ * Check that a zone report belongs to the partition.
+ * If yes, fix its start sector and write pointer, copy it in the
+ * zone information array and return true. Return false otherwise.
+ */
+static bool blkdev_report_zone(struct block_device *bdev,
+ struct blk_zone *rep,
+ struct blk_zone *zone)
+{
+ sector_t offset = get_start_sect(bdev);
+
+ if (rep->start < offset)
+ return false;
+
+ rep->start -= offset;
+ if (rep->start + rep->len > bdev->bd_part->nr_sects)
+ return false;
+
+ if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ rep->wp = rep->start + rep->len;
+ else
+ rep->wp -= offset;
+ memcpy(zone, rep, sizeof(struct blk_zone));
+
+ return true;
+}
+
+/**
+ * blkdev_report_zones - Get zones information
+ * @bdev: Target block device
+ * @sector: Sector from which to report zones
+ * @zones: Array of zone structures where to return the zones information
+ * @nr_zones: Number of zone structures in the zone array
+ * @gfp_mask: Memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Get zone information starting from the zone containing @sector.
+ * The number of zone information reported may be less than the number
+ * requested by @nr_zones. The number of zones actually reported is
+ * returned in @nr_zones.
+ */
+int blkdev_report_zones(struct block_device *bdev,
+ sector_t sector,
+ struct blk_zone *zones,
+ unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct blk_zone_report_hdr *hdr;
+ unsigned int nrz = *nr_zones;
+ struct page *page;
+ unsigned int nr_rep;
+ size_t rep_bytes;
+ unsigned int nr_pages;
+ struct bio *bio;
+ struct bio_vec *bv;
+ unsigned int i, n, nz;
+ unsigned int ofst;
+ void *addr;
+ int ret;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_is_zoned(q))
+ return -EOPNOTSUPP;
+
+ if (!nrz)
+ return 0;
+
+ if (sector > bdev->bd_part->nr_sects) {
+ *nr_zones = 0;
+ return 0;
+ }
+
+ /*
+ * The zone report has a header. So make room for it in the
+ * payload. Also make sure that the report fits in a single BIO
+ * that will not be split down the stack.
+ */
+ rep_bytes = sizeof(struct blk_zone_report_hdr) +
+ sizeof(struct blk_zone) * nrz;
+ rep_bytes = (rep_bytes + PAGE_SIZE - 1) & PAGE_MASK;
+ if (rep_bytes > (queue_max_sectors(q) << 9))
+ rep_bytes = queue_max_sectors(q) << 9;
+
+ nr_pages = min_t(unsigned int, BIO_MAX_PAGES,
+ rep_bytes >> PAGE_SHIFT);
+ nr_pages = min_t(unsigned int, nr_pages,
+ queue_max_segments(q));
+
+ bio = bio_alloc(gfp_mask, nr_pages);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = blk_zone_start(q, sector);
+ bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0);
+
+ for (i = 0; i < nr_pages; i++) {
+ page = alloc_page(gfp_mask);
+ if (!page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
+ __free_page(page);
+ break;
+ }
+ }
+
+ if (i == 0)
+ ret = -ENOMEM;
+ else
+ ret = submit_bio_wait(bio);
+ if (ret)
+ goto out;
+
+ /*
+ * Process the report result: skip the header and go through the
+ * reported zones to fixup and fixup the zone information for
+ * partitions. At the same time, return the zone information into
+ * the zone array.
+ */
+ n = 0;
+ nz = 0;
+ nr_rep = 0;
+ bio_for_each_segment_all(bv, bio, i) {
+
+ if (!bv->bv_page)
+ break;
+
+ addr = kmap_atomic(bv->bv_page);
+
+ /* Get header in the first page */
+ ofst = 0;
+ if (!nr_rep) {
+ hdr = (struct blk_zone_report_hdr *) addr;
+ nr_rep = hdr->nr_zones;
+ ofst = sizeof(struct blk_zone_report_hdr);
+ }
+
+ /* Fixup and report zones */
+ while (ofst < bv->bv_len &&
+ n < nr_rep && nz < nrz) {
+ if (blkdev_report_zone(bdev, addr + ofst, &zones[nz]))
+ nz++;
+ ofst += sizeof(struct blk_zone);
+ n++;
+ }
+
+ kunmap_atomic(addr);
+
+ if (n >= nr_rep || nz >= nrz)
+ break;
+
+ }
+
+ *nr_zones = nz;
+out:
+ bio_for_each_segment_all(bv, bio, i)
+ __free_page(bv->bv_page);
+ bio_put(bio);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blkdev_report_zones);
+
+/**
+ * blkdev_reset_zones - Reset zones write pointer
+ * @bdev: Target block device
+ * @sector: Start sector of the first zone to reset
+ * @nr_sectors: Number of sectors, at least the length of one zone
+ * @gfp_mask: Memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Reset the write pointer of the zones contained in the range
+ * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
+ * is valid, but the specified range should not contain conventional zones.
+ */
+int blkdev_reset_zones(struct block_device *bdev,
+ sector_t sector, sector_t nr_sectors,
+ gfp_t gfp_mask)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ sector_t zone_sectors;
+ sector_t end_sector = sector + nr_sectors;
+ struct bio *bio;
+ int ret;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_is_zoned(q))
+ return -EOPNOTSUPP;
+
+ if (end_sector > bdev->bd_part->nr_sects)
+ /* Out of range */
+ return -EINVAL;
+
+ /* Check alignment (handle eventual smaller last zone) */
+ zone_sectors = blk_queue_zone_size(q);
+ if (sector & (zone_sectors - 1))
+ return -EINVAL;
+
+ if ((nr_sectors & (zone_sectors - 1)) &&
+ end_sector != bdev->bd_part->nr_sects)
+ return -EINVAL;
+
+ while (sector < end_sector) {
+
+ bio = bio_alloc(gfp_mask, 0);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = bdev;
+ bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
+
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+
+ if (ret)
+ return ret;
+
+ sector += zone_sectors;
+
+ /* This may take a while, so be nice to others */
+ cond_resched();
+
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blkdev_reset_zones);
+
+/**
+ * BLKREPORTZONE ioctl processing.
+ * Called from blkdev_ioctl.
+ */
+int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct request_queue *q;
+ struct blk_zone_report rep;
+ struct blk_zone *zones;
+ int ret;
+
+ if (!argp)
+ return -EINVAL;
+
+ q = bdev_get_queue(bdev);
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_is_zoned(q))
+ return -ENOTTY;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
+ return -EFAULT;
+
+ if (!rep.nr_zones)
+ return -EINVAL;
+
+ zones = kcalloc(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL);
+ if (!zones)
+ return -ENOMEM;
+
+ ret = blkdev_report_zones(bdev, rep.sector,
+ zones, &rep.nr_zones,
+ GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (rep.nr_zones) {
+ if (copy_to_user(argp + sizeof(struct blk_zone_report), zones,
+ sizeof(struct blk_zone) * rep.nr_zones))
+ ret = -EFAULT;
+ }
+
+ out:
+ kfree(zones);
+
+ return ret;
+}
+
+/**
+ * BLKRESETZONE ioctl processing.
+ * Called from blkdev_ioctl.
+ */
+int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct request_queue *q;
+ struct blk_zone_range zrange;
+
+ if (!argp)
+ return -EINVAL;
+
+ q = bdev_get_queue(bdev);
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_is_zoned(q))
+ return -ENOTTY;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (!(mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
+ return -EFAULT;
+
+ return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
+ GFP_KERNEL);
+}
diff --git a/block/blk.h b/block/blk.h
index 74444c49078f..041185e5f129 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -111,6 +111,7 @@ void blk_account_io_done(struct request *req);
enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0,
REQ_ATOM_STARTED,
+ REQ_ATOM_POLL_SLEPT,
};
/*
@@ -130,7 +131,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
/*
* Internal elevator interface
*/
-#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
+#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
void blk_insert_flush(struct request *rq);
@@ -247,7 +248,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
static inline int blk_do_io_stat(struct request *rq)
{
return rq->rq_disk &&
- (rq->cmd_flags & REQ_IO_STAT) &&
+ (rq->rq_flags & RQF_IO_STAT) &&
(rq->cmd_type == REQ_TYPE_FS);
}
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 650f427d915b..9d652a992316 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -32,8 +32,13 @@
* bsg_destroy_job - routine to teardown/delete a bsg job
* @job: bsg_job that is to be torn down
*/
-static void bsg_destroy_job(struct bsg_job *job)
+static void bsg_destroy_job(struct kref *kref)
{
+ struct bsg_job *job = container_of(kref, struct bsg_job, kref);
+ struct request *rq = job->req;
+
+ blk_end_request_all(rq, rq->errors);
+
put_device(job->dev); /* release reference for the request */
kfree(job->request_payload.sg_list);
@@ -41,6 +46,18 @@ static void bsg_destroy_job(struct bsg_job *job)
kfree(job);
}
+void bsg_job_put(struct bsg_job *job)
+{
+ kref_put(&job->kref, bsg_destroy_job);
+}
+EXPORT_SYMBOL_GPL(bsg_job_put);
+
+int bsg_job_get(struct bsg_job *job)
+{
+ return kref_get_unless_zero(&job->kref);
+}
+EXPORT_SYMBOL_GPL(bsg_job_get);
+
/**
* bsg_job_done - completion routine for bsg requests
* @job: bsg_job that is complete
@@ -83,8 +100,7 @@ static void bsg_softirq_done(struct request *rq)
{
struct bsg_job *job = rq->special;
- blk_end_request_all(rq, rq->errors);
- bsg_destroy_job(job);
+ bsg_job_put(job);
}
static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
@@ -142,6 +158,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
job->dev = dev;
/* take a reference for the request */
get_device(job->dev);
+ kref_init(&job->kref);
return 0;
failjob_rls_rqst_payload:
@@ -161,6 +178,8 @@ failjob_rls_job:
* Drivers/subsys should pass this to the queue init function.
*/
void bsg_request_fn(struct request_queue *q)
+ __releases(q->queue_lock)
+ __acquires(q->queue_lock)
{
struct device *dev = q->queuedata;
struct request *req;
diff --git a/block/bsg.c b/block/bsg.c
index d214e929ce18..8a05a404ae70 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -176,7 +176,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
* Check if sg_io_v4 from user is allowed and valid
*/
static int
-bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
+bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
{
int ret = 0;
@@ -226,7 +226,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len);
- ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
+ ret = bsg_validate_sgv4_hdr(hdr, &rw);
if (ret)
return ERR_PTR(ret);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5e24d880306c..c73a6fcaeb9d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -16,6 +16,7 @@
#include <linux/blktrace_api.h>
#include <linux/blk-cgroup.h>
#include "blk.h"
+#include "blk-wbt.h"
/*
* tunables
@@ -667,10 +668,10 @@ static inline void cfqg_put(struct cfq_group *cfqg)
} while (0)
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg, int op,
- int op_flags)
+ struct cfq_group *curr_cfqg,
+ unsigned int op)
{
- blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1);
+ blkg_rwstat_add(&cfqg->stats.queued, op, 1);
cfqg_stats_end_empty_time(&cfqg->stats);
cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
}
@@ -684,30 +685,29 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
#endif
}
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
- int op_flags)
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
+ unsigned int op)
{
- blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1);
+ blkg_rwstat_add(&cfqg->stats.queued, op, -1);
}
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
- int op_flags)
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
+ unsigned int op)
{
- blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1);
+ blkg_rwstat_add(&cfqg->stats.merged, op, 1);
}
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- uint64_t start_time, uint64_t io_start_time, int op,
- int op_flags)
+ uint64_t start_time, uint64_t io_start_time,
+ unsigned int op)
{
struct cfqg_stats *stats = &cfqg->stats;
unsigned long long now = sched_clock();
if (time_after64(now, io_start_time))
- blkg_rwstat_add(&stats->service_time, op, op_flags,
- now - io_start_time);
+ blkg_rwstat_add(&stats->service_time, op, now - io_start_time);
if (time_after64(io_start_time, start_time))
- blkg_rwstat_add(&stats->wait_time, op, op_flags,
+ blkg_rwstat_add(&stats->wait_time, op,
io_start_time - start_time);
}
@@ -786,16 +786,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) { }
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg, int op, int op_flags) { }
+ struct cfq_group *curr_cfqg, unsigned int op) { }
static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
uint64_t time, unsigned long unaccounted_time) { }
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
- int op_flags) { }
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
- int op_flags) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
+ unsigned int op) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
+ unsigned int op) { }
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- uint64_t start_time, uint64_t io_start_time, int op,
- int op_flags) { }
+ uint64_t start_time, uint64_t io_start_time,
+ unsigned int op) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
@@ -913,15 +913,6 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
}
/*
- * We regard a request as SYNC, if it's either a read or has the SYNC bit
- * set (in which case it could also be direct WRITE).
- */
-static inline bool cfq_bio_sync(struct bio *bio)
-{
- return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC);
-}
-
-/*
* scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing
*/
@@ -1596,7 +1587,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
{
struct cfq_group_data *cgd;
- cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
+ cgd = kzalloc(sizeof(*cgd), gfp);
if (!cgd)
return NULL;
return &cgd->cpd;
@@ -2474,10 +2465,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
cfq_add_rq_rb(rq);
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
- req_op(rq), rq->cmd_flags);
+ rq->cmd_flags);
}
static struct request *
@@ -2491,7 +2482,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
if (!cic)
return NULL;
- cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
+ cfqq = cic_to_cfqq(cic, op_is_sync(bio->bi_opf));
if (cfqq)
return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
@@ -2530,7 +2521,7 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
if (rq->cmd_flags & REQ_PRIO) {
WARN_ON(!cfqq->prio_pending);
cfqq->prio_pending--;
@@ -2565,7 +2556,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
- cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
}
static void
@@ -2588,7 +2579,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
- cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags);
+ cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
cfqq = RQ_CFQQ(next);
/*
@@ -2605,13 +2596,14 @@ static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
+ bool is_sync = op_is_sync(bio->bi_opf);
struct cfq_io_cq *cic;
struct cfq_queue *cfqq;
/*
* Disallow merge of a sync bio into an async request.
*/
- if (cfq_bio_sync(bio) && !rq_is_sync(rq))
+ if (is_sync && !rq_is_sync(rq))
return false;
/*
@@ -2622,7 +2614,7 @@ static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
if (!cic)
return false;
- cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
+ cfqq = cic_to_cfqq(cic, is_sync);
return cfqq == RQ_CFQQ(rq);
}
@@ -3771,9 +3763,11 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq;
uint64_t serial_nr;
+ bool nonroot_cg;
rcu_read_lock();
serial_nr = bio_blkcg(bio)->css.serial_nr;
+ nonroot_cg = bio_blkcg(bio) != &blkcg_root;
rcu_read_unlock();
/*
@@ -3784,6 +3778,14 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
return;
/*
+ * If we have a non-root cgroup, we can depend on that to
+ * do proper throttling of writes. Turn off wbt for that
+ * case, if it was enabled by default.
+ */
+ if (nonroot_cg)
+ wbt_disable_default(cfqd->queue);
+
+ /*
* Drop reference to queues. New queues will be assigned in new
* group upon arrival of fresh requests.
*/
@@ -3854,7 +3856,8 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
goto out;
}
- cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
+ cfqq = kmem_cache_alloc_node(cfq_pool,
+ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
cfqd->queue->node);
if (!cfqq) {
cfqq = &cfqd->oom_cfqq;
@@ -3923,6 +3926,12 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
}
+static inline bool req_noidle(struct request *req)
+{
+ return req_op(req) == REQ_OP_WRITE &&
+ (req->cmd_flags & (REQ_SYNC | REQ_IDLE)) == REQ_SYNC;
+}
+
/*
* Disable idle window if the process thinks too long or seeks so much that
* it doesn't matter
@@ -3944,7 +3953,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (cfqq->queued[0] + cfqq->queued[1] >= 4)
cfq_mark_cfqq_deep(cfqq);
- if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
+ if (cfqq->next_rq && req_noidle(cfqq->next_rq))
enable_idle = 0;
else if (!atomic_read(&cic->icq.ioc->active_ref) ||
!cfqd->cfq_slice_idle ||
@@ -4142,7 +4151,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
- cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq),
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
rq->cmd_flags);
cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -4229,8 +4238,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
const int sync = rq_is_sync(rq);
u64 now = ktime_get_ns();
- cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
- !!(rq->cmd_flags & REQ_NOIDLE));
+ cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq));
cfq_update_hw_tag(cfqd);
@@ -4240,8 +4248,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfqq->dispatched--;
(RQ_CFQG(rq))->dispatched--;
cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
- rq_io_start_time_ns(rq), req_op(rq),
- rq->cmd_flags);
+ rq_io_start_time_ns(rq), rq->cmd_flags);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
@@ -4319,14 +4326,14 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_schedule_dispatch(cfqd);
}
-static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags)
+static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
{
/*
* If REQ_PRIO is set, boost class and prio level, if it's below
* BE/NORM. If prio is not set, restore the potentially boosted
* class/prio level.
*/
- if (!(op_flags & REQ_PRIO)) {
+ if (!(op & REQ_PRIO)) {
cfqq->ioprio_class = cfqq->org_ioprio_class;
cfqq->ioprio = cfqq->org_ioprio;
} else {
@@ -4347,7 +4354,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
return ELV_MQUEUE_MAY;
}
-static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
+static int cfq_may_queue(struct request_queue *q, unsigned int op)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@@ -4364,10 +4371,10 @@ static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
if (!cic)
return ELV_MQUEUE_MAY;
- cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags));
+ cfqq = cic_to_cfqq(cic, op_is_sync(op));
if (cfqq) {
cfq_init_prio_data(cfqq, cic);
- cfqq_boost_on_prio(cfqq, op_flags);
+ cfqq_boost_on_prio(cfqq, op);
return __cfq_may_queue(cfqq);
}
diff --git a/block/elevator.c b/block/elevator.c
index f7d973a56fd7..40f0c04e5ad3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -245,31 +245,31 @@ EXPORT_SYMBOL(elevator_exit);
static inline void __elv_rqhash_del(struct request *rq)
{
hash_del(&rq->hash);
- rq->cmd_flags &= ~REQ_HASHED;
+ rq->rq_flags &= ~RQF_HASHED;
}
-static void elv_rqhash_del(struct request_queue *q, struct request *rq)
+void elv_rqhash_del(struct request_queue *q, struct request *rq)
{
if (ELV_ON_HASH(rq))
__elv_rqhash_del(rq);
}
-static void elv_rqhash_add(struct request_queue *q, struct request *rq)
+void elv_rqhash_add(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
BUG_ON(ELV_ON_HASH(rq));
hash_add(e->hash, &rq->hash, rq_hash_key(rq));
- rq->cmd_flags |= REQ_HASHED;
+ rq->rq_flags |= RQF_HASHED;
}
-static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
+void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
{
__elv_rqhash_del(rq);
elv_rqhash_add(q, rq);
}
-static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
+struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
struct elevator_queue *e = q->elevator;
struct hlist_node *next;
@@ -352,7 +352,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
{
sector_t boundary;
struct list_head *entry;
- int stop_flags;
if (q->last_merge == rq)
q->last_merge = NULL;
@@ -362,7 +361,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
q->nr_sorted--;
boundary = q->end_sector;
- stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
@@ -370,7 +368,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
break;
if (rq_data_dir(rq) != rq_data_dir(pos))
break;
- if (pos->cmd_flags & stop_flags)
+ if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
break;
if (blk_rq_pos(rq) >= boundary) {
if (blk_rq_pos(pos) < boundary)
@@ -510,7 +508,7 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct elevator_queue *e = q->elevator;
- const int next_sorted = next->cmd_flags & REQ_SORTED;
+ const int next_sorted = next->rq_flags & RQF_SORTED;
if (next_sorted && e->type->ops.elevator_merge_req_fn)
e->type->ops.elevator_merge_req_fn(q, rq, next);
@@ -537,13 +535,13 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
#ifdef CONFIG_PM
static void blk_pm_requeue_request(struct request *rq)
{
- if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
rq->q->nr_pending--;
}
static void blk_pm_add_request(struct request_queue *q, struct request *rq)
{
- if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
+ if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
(q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
pm_request_resume(q->dev);
}
@@ -563,11 +561,11 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
- if (rq->cmd_flags & REQ_SORTED)
+ if (rq->rq_flags & RQF_SORTED)
elv_deactivate_rq(q, rq);
}
- rq->cmd_flags &= ~REQ_STARTED;
+ rq->rq_flags &= ~RQF_STARTED;
blk_pm_requeue_request(rq);
@@ -597,13 +595,13 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
rq->q = q;
- if (rq->cmd_flags & REQ_SOFTBARRIER) {
+ if (rq->rq_flags & RQF_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
- } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
+ } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
(where == ELEVATOR_INSERT_SORT ||
where == ELEVATOR_INSERT_SORT_MERGE))
where = ELEVATOR_INSERT_BACK;
@@ -611,12 +609,12 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
switch (where) {
case ELEVATOR_INSERT_REQUEUE:
case ELEVATOR_INSERT_FRONT:
- rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->rq_flags |= RQF_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head);
break;
case ELEVATOR_INSERT_BACK:
- rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->rq_flags |= RQF_SOFTBARRIER;
elv_drain_elevator(q);
list_add_tail(&rq->queuelist, &q->queue_head);
/*
@@ -642,7 +640,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
break;
case ELEVATOR_INSERT_SORT:
BUG_ON(rq->cmd_type != REQ_TYPE_FS);
- rq->cmd_flags |= REQ_SORTED;
+ rq->rq_flags |= RQF_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
elv_rqhash_add(q, rq);
@@ -659,7 +657,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
break;
case ELEVATOR_INSERT_FLUSH:
- rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->rq_flags |= RQF_SOFTBARRIER;
blk_insert_flush(rq);
break;
default:
@@ -716,12 +714,12 @@ void elv_put_request(struct request_queue *q, struct request *rq)
e->type->ops.elevator_put_req_fn(rq);
}
-int elv_may_queue(struct request_queue *q, int op, int op_flags)
+int elv_may_queue(struct request_queue *q, unsigned int op)
{
struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_may_queue_fn)
- return e->type->ops.elevator_may_queue_fn(q, op, op_flags);
+ return e->type->ops.elevator_may_queue_fn(q, op);
return ELV_MQUEUE_MAY;
}
@@ -735,7 +733,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
- if ((rq->cmd_flags & REQ_SORTED) &&
+ if ((rq->rq_flags & RQF_SORTED) &&
e->type->ops.elevator_completed_req_fn)
e->type->ops.elevator_completed_req_fn(q, rq);
}
diff --git a/block/ioctl.c b/block/ioctl.c
index 755119c3c1b9..f856963204f4 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -519,6 +519,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
BLKDEV_DISCARD_SECURE);
case BLKZEROOUT:
return blk_ioctl_zeroout(bdev, mode, arg);
+ case BLKREPORTZONE:
+ return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
+ case BLKRESETZONE:
+ return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg);
case HDIO_GETGEO:
return blkdev_getgeo(bdev, argp);
case BLKRAGET:
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 71d9ed9df8da..d7beb6bbbf66 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -430,6 +430,56 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
return 0;
}
+static bool part_zone_aligned(struct gendisk *disk,
+ struct block_device *bdev,
+ sector_t from, sector_t size)
+{
+ unsigned int zone_size = bdev_zone_size(bdev);
+
+ /*
+ * If this function is called, then the disk is a zoned block device
+ * (host-aware or host-managed). This can be detected even if the
+ * zoned block device support is disabled (CONFIG_BLK_DEV_ZONED not
+ * set). In this case, however, only host-aware devices will be seen
+ * as a block device is not created for host-managed devices. Without
+ * zoned block device support, host-aware drives can still be used as
+ * regular block devices (no zone operation) and their zone size will
+ * be reported as 0. Allow this case.
+ */
+ if (!zone_size)
+ return true;
+
+ /*
+ * Check partition start and size alignement. If the drive has a
+ * smaller last runt zone, ignore it and allow the partition to
+ * use it. Check the zone size too: it should be a power of 2 number
+ * of sectors.
+ */
+ if (WARN_ON_ONCE(!is_power_of_2(zone_size))) {
+ u32 rem;
+
+ div_u64_rem(from, zone_size, &rem);
+ if (rem)
+ return false;
+ if ((from + size) < get_capacity(disk)) {
+ div_u64_rem(size, zone_size, &rem);
+ if (rem)
+ return false;
+ }
+
+ } else {
+
+ if (from & (zone_size - 1))
+ return false;
+ if ((from + size) < get_capacity(disk) &&
+ (size & (zone_size - 1)))
+ return false;
+
+ }
+
+ return true;
+}
+
int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
{
struct parsed_partitions *state = NULL;
@@ -529,6 +579,21 @@ rescan:
}
}
+ /*
+ * On a zoned block device, partitions should be aligned on the
+ * device zone size (i.e. zone boundary crossing not allowed).
+ * Otherwise, resetting the write pointer of the last zone of
+ * one partition may impact the following partition.
+ */
+ if (bdev_is_zoned(bdev) &&
+ !part_zone_aligned(disk, bdev, from, size)) {
+ printk(KERN_WARNING
+ "%s: p%d start %llu+%llu is not zone aligned\n",
+ disk->disk_name, p, (unsigned long long) from,
+ (unsigned long long) size);
+ continue;
+ }
+
part = add_partition(disk, p, from, size,
state->parts[p].flags,
&state->parts[p].info);
diff --git a/crypto/842.c b/crypto/842.c
index 98e387efb8c8..bc26dc942821 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -31,11 +31,46 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/sw842.h>
+#include <crypto/internal/scompress.h>
struct crypto842_ctx {
- char wmem[SW842_MEM_COMPRESS]; /* working memory for compress */
+ void *wmem; /* working memory for compress */
};
+static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
+{
+ void *ctx;
+
+ ctx = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ return ctx;
+}
+
+static int crypto842_init(struct crypto_tfm *tfm)
+{
+ struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->wmem = crypto842_alloc_ctx(NULL);
+ if (IS_ERR(ctx->wmem))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ kfree(ctx);
+}
+
+static void crypto842_exit(struct crypto_tfm *tfm)
+{
+ struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto842_free_ctx(NULL, ctx->wmem);
+}
+
static int crypto842_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
@@ -45,6 +80,13 @@ static int crypto842_compress(struct crypto_tfm *tfm,
return sw842_compress(src, slen, dst, dlen, ctx->wmem);
}
+static int crypto842_scompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ return sw842_compress(src, slen, dst, dlen, ctx);
+}
+
static int crypto842_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
@@ -52,6 +94,13 @@ static int crypto842_decompress(struct crypto_tfm *tfm,
return sw842_decompress(src, slen, dst, dlen);
}
+static int crypto842_sdecompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ return sw842_decompress(src, slen, dst, dlen);
+}
+
static struct crypto_alg alg = {
.cra_name = "842",
.cra_driver_name = "842-generic",
@@ -59,20 +108,48 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct crypto842_ctx),
.cra_module = THIS_MODULE,
+ .cra_init = crypto842_init,
+ .cra_exit = crypto842_exit,
.cra_u = { .compress = {
.coa_compress = crypto842_compress,
.coa_decompress = crypto842_decompress } }
};
+static struct scomp_alg scomp = {
+ .alloc_ctx = crypto842_alloc_ctx,
+ .free_ctx = crypto842_free_ctx,
+ .compress = crypto842_scompress,
+ .decompress = crypto842_sdecompress,
+ .base = {
+ .cra_name = "842",
+ .cra_driver_name = "842-scomp",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ }
+};
+
static int __init crypto842_mod_init(void)
{
- return crypto_register_alg(&alg);
+ int ret;
+
+ ret = crypto_register_alg(&alg);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg);
+ return ret;
+ }
+
+ return ret;
}
module_init(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
crypto_unregister_alg(&alg);
+ crypto_unregister_scomp(&scomp);
}
module_exit(crypto842_mod_exit);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 84d71482bf08..160f08e721cc 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -24,7 +24,7 @@ comment "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
- depends on MODULE_SIG
+ depends on (MODULE_SIG || !MODULES)
help
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
@@ -102,6 +102,15 @@ config CRYPTO_KPP
select CRYPTO_ALGAPI
select CRYPTO_KPP2
+config CRYPTO_ACOMP2
+ tristate
+ select CRYPTO_ALGAPI2
+
+config CRYPTO_ACOMP
+ tristate
+ select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
+
config CRYPTO_RSA
tristate "RSA algorithm"
select CRYPTO_AKCIPHER
@@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
select CRYPTO_BLKCIPHER2
select CRYPTO_AKCIPHER2
select CRYPTO_KPP2
+ select CRYPTO_ACOMP2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
@@ -236,10 +246,14 @@ config CRYPTO_ABLK_HELPER
tristate
select CRYPTO_CRYPTD
+config CRYPTO_SIMD
+ tristate
+ select CRYPTO_CRYPTD
+
config CRYPTO_GLUE_HELPER_X86
tristate
depends on X86
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
config CRYPTO_ENGINE
tristate
@@ -437,7 +451,7 @@ config CRYPTO_CRC32C_INTEL
gain performance compared with software implementation.
Module will be crc32c-intel.
-config CRYPT_CRC32C_VPMSUM
+config CRYPTO_CRC32C_VPMSUM
tristate "CRC32c CRC algorithm (powerpc64)"
depends on PPC64 && ALTIVEC
select CRYPTO_HASH
@@ -928,14 +942,13 @@ config CRYPTO_AES_X86_64
config CRYPTO_AES_NI_INTEL
tristate "AES cipher algorithms (AES-NI)"
depends on X86
+ select CRYPTO_AEAD
select CRYPTO_AES_X86_64 if 64BIT
select CRYPTO_AES_586 if !64BIT
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86 if 64BIT
- select CRYPTO_LRW
- select CRYPTO_XTS
+ select CRYPTO_SIMD
help
Use Intel AES-NI instructions for AES algorithm.
@@ -1568,6 +1581,7 @@ comment "Compression"
config CRYPTO_DEFLATE
tristate "Deflate compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select ZLIB_INFLATE
select ZLIB_DEFLATE
help
@@ -1579,6 +1593,7 @@ config CRYPTO_DEFLATE
config CRYPTO_LZO
tristate "LZO compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select LZO_COMPRESS
select LZO_DECOMPRESS
help
@@ -1587,6 +1602,7 @@ config CRYPTO_LZO
config CRYPTO_842
tristate "842 compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select 842_COMPRESS
select 842_DECOMPRESS
help
@@ -1595,6 +1611,7 @@ config CRYPTO_842
config CRYPTO_LZ4
tristate "LZ4 compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select LZ4_COMPRESS
select LZ4_DECOMPRESS
help
@@ -1603,6 +1620,7 @@ config CRYPTO_LZ4
config CRYPTO_LZ4HC
tristate "LZ4HC compression algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
select LZ4HC_COMPRESS
select LZ4_DECOMPRESS
help
diff --git a/crypto/Makefile b/crypto/Makefile
index 99cc64ac70ef..b8f0e3eb0791 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
+$(obj)/rsa_helper.o: $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.h
clean-files += rsapubkey-asn1.c rsapubkey-asn1.h
clean-files += rsaprivkey-asn1.c rsaprivkey-asn1.h
@@ -50,6 +51,10 @@ rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
+crypto_acompress-y := acompress.o
+crypto_acompress-y += scompress.o
+obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o
+
cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
@@ -138,3 +143,5 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
+crypto_simd-y := simd.o
+obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
diff --git a/crypto/acompress.c b/crypto/acompress.c
new file mode 100644
index 000000000000..887783d8e9a9
--- /dev/null
+++ b/crypto/acompress.c
@@ -0,0 +1,169 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include "internal.h"
+
+static const struct crypto_type crypto_acomp_type;
+
+#ifdef CONFIG_NET
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_acomp racomp;
+
+ strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
+ sizeof(struct crypto_report_acomp), &racomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_puts(m, "type : acomp\n");
+}
+
+static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+ struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+ alg->exit(acomp);
+}
+
+static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+ struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+ if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
+ return crypto_init_scomp_ops_async(tfm);
+
+ acomp->compress = alg->compress;
+ acomp->decompress = alg->decompress;
+ acomp->dst_free = alg->dst_free;
+ acomp->reqsize = alg->reqsize;
+
+ if (alg->exit)
+ acomp->base.exit = crypto_acomp_exit_tfm;
+
+ if (alg->init)
+ return alg->init(acomp);
+
+ return 0;
+}
+
+static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
+{
+ int extsize = crypto_alg_extsize(alg);
+
+ if (alg->cra_type != &crypto_acomp_type)
+ extsize += sizeof(struct crypto_scomp *);
+
+ return extsize;
+}
+
+static const struct crypto_type crypto_acomp_type = {
+ .extsize = crypto_acomp_extsize,
+ .init_tfm = crypto_acomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_acomp_show,
+#endif
+ .report = crypto_acomp_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
+ .type = CRYPTO_ALG_TYPE_ACOMPRESS,
+ .tfmsize = offsetof(struct crypto_acomp, base),
+};
+
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
+
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct acomp_req *req;
+
+ req = __acomp_request_alloc(acomp);
+ if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
+ return crypto_acomp_scomp_alloc_ctx(req);
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(acomp_request_alloc);
+
+void acomp_request_free(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+
+ if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
+ crypto_acomp_scomp_free_ctx(req);
+
+ if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
+ acomp->dst_free(req->dst);
+ req->dst = NULL;
+ }
+
+ __acomp_request_free(req);
+}
+EXPORT_SYMBOL_GPL(acomp_request_free);
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ base->cra_type = &crypto_acomp_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_acomp);
+
+int crypto_unregister_acomp(struct acomp_alg *alg)
+{
+ return crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 6e39d9c05b98..ccb85e1798f2 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -247,12 +247,8 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
memcpy(param->alg, alg->cra_name, sizeof(param->alg));
type = alg->cra_flags;
- /* This piece of crap needs to disappear into per-type test hooks. */
- if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
- CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
- ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
- alg->cra_ablkcipher.ivsize))
+ /* Do not test internal algorithms. */
+ if (type & CRYPTO_ALG_INTERNAL)
type |= CRYPTO_ALG_TESTED;
param->type = type;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 80a0f1a78551..668ef402c6eb 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -81,7 +81,11 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
{
unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
- return ctx->used >= ctx->aead_assoclen + as;
+ /*
+ * The minimum amount of memory needed for an AEAD cipher is
+ * the AAD and in case of decryption the tag.
+ */
+ return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
}
static void aead_reset_ctx(struct aead_ctx *ctx)
@@ -132,28 +136,27 @@ static void aead_wmem_wakeup(struct sock *sk)
static int aead_wait_for_data(struct sock *sk, unsigned flags)
{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct alg_sock *ask = alg_sk(sk);
struct aead_ctx *ctx = ask->private;
long timeout;
- DEFINE_WAIT(wait);
int err = -ERESTARTSYS;
if (flags & MSG_DONTWAIT)
return -EAGAIN;
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
+ add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
if (signal_pending(current))
break;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, !ctx->more)) {
+ if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
err = 0;
break;
}
}
- finish_wait(sk_sleep(sk), &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
@@ -416,7 +419,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
unsigned int i, reqlen = GET_REQ_SIZE(tfm);
int err = -ENOMEM;
unsigned long used;
- size_t outlen;
+ size_t outlen = 0;
size_t usedpages = 0;
lock_sock(sk);
@@ -426,12 +429,15 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
goto unlock;
}
- used = ctx->used;
- outlen = used;
-
if (!aead_sufficient_data(ctx))
goto unlock;
+ used = ctx->used;
+ if (ctx->enc)
+ outlen = used + as;
+ else
+ outlen = used - as;
+
req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
if (unlikely(!req))
goto unlock;
@@ -445,15 +451,16 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
aead_request_set_ad(req, ctx->aead_assoclen);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
aead_async_cb, sk);
- used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
+ used -= ctx->aead_assoclen;
/* take over all tx sgls from ctx */
- areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
+ areq->tsgl = sock_kmalloc(sk,
+ sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
GFP_KERNEL);
if (unlikely(!areq->tsgl))
goto free;
- sg_init_table(areq->tsgl, sgl->cur);
+ sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
for (i = 0; i < sgl->cur; i++)
sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
sgl->sg[i].length, sgl->sg[i].offset);
@@ -461,7 +468,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
areq->tsgls = sgl->cur;
/* create rx sgls */
- while (iov_iter_count(&msg->msg_iter)) {
+ while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages));
@@ -491,16 +498,14 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
last_rsgl = rsgl;
- /* we do not need more iovecs as we have sufficient memory */
- if (outlen <= usedpages)
- break;
-
iov_iter_advance(&msg->msg_iter, err);
}
- err = -EINVAL;
+
/* ensure output buffer is sufficiently large */
- if (usedpages < outlen)
- goto free;
+ if (usedpages < outlen) {
+ err = -EINVAL;
+ goto unlock;
+ }
aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
areq->iv);
@@ -571,6 +576,7 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
goto unlock;
}
+ /* data length provided by caller via sendmsg/sendpage */
used = ctx->used;
/*
@@ -585,16 +591,27 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
if (!aead_sufficient_data(ctx))
goto unlock;
- outlen = used;
+ /*
+ * Calculate the minimum output buffer size holding the result of the
+ * cipher operation. When encrypting data, the receiving buffer is
+ * larger by the tag length compared to the input buffer as the
+ * encryption operation generates the tag. For decryption, the input
+ * buffer provides the tag which is consumed resulting in only the
+ * plaintext without a buffer for the tag returned to the caller.
+ */
+ if (ctx->enc)
+ outlen = used + as;
+ else
+ outlen = used - as;
/*
* The cipher operation input data is reduced by the associated data
* length as this data is processed separately later on.
*/
- used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
+ used -= ctx->aead_assoclen;
/* convert iovecs of output buffers into scatterlists */
- while (iov_iter_count(&msg->msg_iter)) {
+ while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages));
@@ -621,16 +638,14 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
last_rsgl = rsgl;
- /* we do not need more iovecs as we have sufficient memory */
- if (outlen <= usedpages)
- break;
iov_iter_advance(&msg->msg_iter, err);
}
- err = -EINVAL;
/* ensure output buffer is sufficiently large */
- if (usedpages < outlen)
+ if (usedpages < outlen) {
+ err = -EINVAL;
goto unlock;
+ }
sg_mark_end(sgl->sg + sgl->cur - 1);
aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 2d8466f9e49b..d19b09cdf284 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -214,23 +214,26 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- if (ctx->more) {
+ if (!result && !ctx->more) {
+ err = af_alg_wait_for_completion(
+ crypto_ahash_init(&ctx->req),
+ &ctx->completion);
+ if (err)
+ goto unlock;
+ }
+
+ if (!result || ctx->more) {
ctx->more = 0;
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
- } else if (!result) {
- err = af_alg_wait_for_completion(
- crypto_ahash_digest(&ctx->req),
- &ctx->completion);
}
err = memcpy_to_msg(msg, ctx->result, len);
- hash_free_result(sk, ctx);
-
unlock:
+ hash_free_result(sk, ctx);
release_sock(sk);
return err ?: len;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 28556fce4267..a9e79d8eff87 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -199,26 +199,26 @@ static void skcipher_free_sgl(struct sock *sk)
static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
{
- long timeout;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err = -ERESTARTSYS;
+ long timeout;
if (flags & MSG_DONTWAIT)
return -EAGAIN;
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
if (signal_pending(current))
break;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
+ if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) {
err = 0;
break;
}
}
- finish_wait(sk_sleep(sk), &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
@@ -242,10 +242,10 @@ static void skcipher_wmem_wakeup(struct sock *sk)
static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
long timeout;
- DEFINE_WAIT(wait);
int err = -ERESTARTSYS;
if (flags & MSG_DONTWAIT) {
@@ -254,17 +254,17 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
if (signal_pending(current))
break;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, ctx->used)) {
+ if (sk_wait_event(sk, &timeout, ctx->used, &wait)) {
err = 0;
break;
}
}
- finish_wait(sk_sleep(sk), &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
@@ -566,8 +566,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
* need to expand */
tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
GFP_KERNEL);
- if (!tmp)
+ if (!tmp) {
+ err = -ENOMEM;
goto free;
+ }
sg_init_table(tmp, tx_nents * 2);
for (x = 0; x < tx_nents; x++)
diff --git a/crypto/api.c b/crypto/api.c
index bbc147cb5dec..b16ce1653284 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -211,8 +211,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
if (!name)
return ERR_PTR(-ENOENT);
+ type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
- type &= mask;
alg = crypto_alg_lookup(name, type, mask);
if (!alg) {
@@ -310,24 +310,8 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
{
const struct crypto_type *type = tfm->__crt_alg->cra_type;
- if (type) {
- if (tfm->exit)
- tfm->exit(tfm);
- return;
- }
-
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- crypto_exit_cipher_ops(tfm);
- break;
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- crypto_exit_compress_ops(tfm);
- break;
-
- default:
- BUG();
- }
+ if (type && tfm->exit)
+ tfm->exit(tfm);
}
static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index fd76b5fc3b3a..d3a989e718f5 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -121,6 +121,7 @@ int public_key_verify_signature(const struct public_key *pkey,
if (ret)
goto error_free_req;
+ ret = -ENOMEM;
outlen = crypto_akcipher_maxsize(tfm);
output = kmalloc(outlen, GFP_KERNEL);
if (!output)
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 865f46ea724f..c80765b211cf 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
return cert;
error_decode:
- kfree(cert->pub->key);
kfree(ctx);
error_no_ctx:
x509_free_certificate(cert);
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a7e1ac786c5d..875470b0e026 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -324,7 +324,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(auth))
return PTR_ERR(auth);
- enc = crypto_spawn_skcipher2(&ictx->enc);
+ enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
@@ -420,9 +420,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_auth;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 121010ac9962..6f8f6b86bfe2 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -342,7 +342,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(auth))
return PTR_ERR(auth);
- enc = crypto_spawn_skcipher2(&ictx->enc);
+ enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
@@ -441,9 +441,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_auth;
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 780ee27b2d43..68f751a41a84 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -1,7 +1,7 @@
/*
* CBC: Cipher Block Chaining mode
*
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -10,191 +10,78 @@
*
*/
-#include <crypto/algapi.h>
+#include <crypto/cbc.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_cbc_ctx {
struct crypto_cipher *child;
};
-static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
+static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
- struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent);
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+ crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
-static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_cipher *tfm)
+static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm,
+ const u8 *src, u8 *dst)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_encrypt;
- int bsize = crypto_cipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(iv, src, bsize);
- fn(crypto_cipher_tfm(tfm), dst, iv);
- memcpy(iv, dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- return nbytes;
-}
-
-static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_cipher *tfm)
-{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_encrypt;
- int bsize = crypto_cipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- crypto_xor(src, iv, bsize);
- fn(crypto_cipher_tfm(tfm), src, src);
- iv = src;
-
- src += bsize;
- } while ((nbytes -= bsize) >= bsize);
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
+ crypto_cipher_encrypt_one(ctx->child, dst, src);
}
-static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_cbc_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child);
- else
- nbytes = crypto_cbc_encrypt_segment(desc, &walk, child);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
+ return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one);
}
-static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_cipher *tfm)
+static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm,
+ const u8 *src, u8 *dst)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_decrypt;
- int bsize = crypto_cipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- fn(crypto_cipher_tfm(tfm), dst, src);
- crypto_xor(dst, iv, bsize);
- iv = src;
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- memcpy(walk->iv, iv, bsize);
-
- return nbytes;
-}
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_cipher *tfm)
-{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_decrypt;
- int bsize = crypto_cipher_blocksize(tfm);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 last_iv[bsize];
-
- /* Start of the last block. */
- src += nbytes - (nbytes & (bsize - 1)) - bsize;
- memcpy(last_iv, src, bsize);
-
- for (;;) {
- fn(crypto_cipher_tfm(tfm), src, src);
- if ((nbytes -= bsize) < bsize)
- break;
- crypto_xor(src, src - bsize, bsize);
- src -= bsize;
- }
-
- crypto_xor(src, walk->iv, bsize);
- memcpy(walk->iv, last_iv, bsize);
-
- return nbytes;
+ crypto_cipher_decrypt_one(ctx->child, dst, src);
}
-static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_cbc_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_walk walk;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes)) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child);
- else
- nbytes = crypto_cbc_decrypt_segment(desc, &walk, child);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while (walk.nbytes) {
+ err = crypto_cbc_decrypt_blocks(&walk, tfm,
+ crypto_cbc_decrypt_one);
+ err = skcipher_walk_done(&walk, err);
}
return err;
}
-static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
+static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
@@ -205,72 +92,94 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
return 0;
}
-static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
crypto_free_cipher(ctx->child);
}
-static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
+static void crypto_cbc_free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
+ struct skcipher_instance *inst;
+ struct crypto_spawn *spawn;
struct crypto_alg *alg;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
if (err)
- return ERR_PTR(err);
+ return err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
+ err = PTR_ERR(alg);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ goto err_free_inst;
- inst = ERR_PTR(-EINVAL);
- if (!is_power_of_2(alg->cra_blocksize))
- goto out_put_alg;
+ spawn = skcipher_instance_ctx(inst);
+ err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ crypto_mod_put(alg);
+ if (err)
+ goto err_free_inst;
- inst = crypto_alloc_instance("cbc", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
+ if (err)
+ goto err_drop_spawn;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_blkcipher_type;
+ err = -EINVAL;
+ if (!is_power_of_2(alg->cra_blocksize))
+ goto err_drop_spawn;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
/* We access the data as u32s when xoring. */
- inst->alg.cra_alignmask |= __alignof__(u32) - 1;
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+ inst->alg.ivsize = alg->cra_blocksize;
+ inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+ inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
- inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
- inst->alg.cra_init = crypto_cbc_init_tfm;
- inst->alg.cra_exit = crypto_cbc_exit_tfm;
+ inst->alg.init = crypto_cbc_init_tfm;
+ inst->alg.exit = crypto_cbc_exit_tfm;
- inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey;
- inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt;
- inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt;
+ inst->alg.setkey = crypto_cbc_setkey;
+ inst->alg.encrypt = crypto_cbc_encrypt;
+ inst->alg.decrypt = crypto_cbc_decrypt;
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ inst->free = crypto_cbc_free;
-static void crypto_cbc_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_spawn(spawn);
+err_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_cbc_tmpl = {
.name = "cbc",
- .alloc = crypto_cbc_alloc,
- .free = crypto_cbc_free,
+ .create = crypto_cbc_create,
.module = THIS_MODULE,
};
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 006d8575ef5c..26b924d1e582 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctr = crypto_spawn_skcipher2(&ictx->ctr);
+ ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_cipher;
@@ -544,9 +544,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
goto err_free_inst;
crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_cipher;
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index e899ef51dc8e..db1bc3147bc4 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
if (IS_ERR(poly))
return PTR_ERR(poly);
- chacha = crypto_spawn_skcipher2(&ictx->chacha);
+ chacha = crypto_spawn_skcipher(&ictx->chacha);
if (IS_ERR(chacha)) {
crypto_free_ahash(poly);
return PTR_ERR(chacha);
@@ -625,9 +625,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_poly;
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 39541e0e537d..94fa3551476b 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -116,7 +116,3 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm)
return 0;
}
-
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
-{
-}
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 7a8bfbd548f6..04080dca8f0c 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -57,7 +57,8 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent);
unsigned int bs = crypto_shash_blocksize(parent);
- __be64 *consts = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
+ __be64 *consts = PTR_ALIGN((void *)ctx->ctx,
+ (alignmask | (__alignof__(__be64) - 1)) + 1);
u64 _const[2];
int i, err = 0;
u8 msb_mask, gfmask;
@@ -173,7 +174,8 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *consts = PTR_ALIGN((void *)tctx->ctx, alignmask + 1);
+ u8 *consts = PTR_ALIGN((void *)tctx->ctx,
+ (alignmask | (__alignof__(__be64) - 1)) + 1);
u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
u8 *prev = odds + bs;
unsigned int offset = 0;
@@ -243,6 +245,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
case 8:
break;
default:
+ err = -EINVAL;
goto out_put_alg;
}
@@ -257,7 +260,8 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto out_free_inst;
- alignmask = alg->cra_alignmask | (sizeof(long) - 1);
+ /* We access the data as u32s when xoring. */
+ alignmask = alg->cra_alignmask | (__alignof__(u32) - 1);
inst->alg.base.cra_alignmask = alignmask;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
@@ -269,7 +273,9 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+ alg->cra_blocksize * 2;
inst->alg.base.cra_ctxsize =
- ALIGN(sizeof(struct cmac_tfm_ctx), alignmask + 1)
+ ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment())
+ + ((alignmask | (__alignof__(__be64) - 1)) &
+ ~(crypto_tfm_ctx_alignment() - 1))
+ alg->cra_blocksize * 2;
inst->alg.base.cra_init = cmac_init_tfm;
diff --git a/crypto/compress.c b/crypto/compress.c
index c33f0763a956..f2d522924a07 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -42,7 +42,3 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm)
return 0;
}
-
-void crypto_exit_compress_ops(struct crypto_tfm *tfm)
-{
-}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 0c654e59f215..0508c48a45c4 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -17,9 +17,9 @@
*
*/
-#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/cryptd.h>
#include <crypto/crypto_wq.h>
#include <linux/atomic.h>
@@ -48,6 +48,11 @@ struct cryptd_instance_ctx {
struct cryptd_queue *queue;
};
+struct skcipherd_instance_ctx {
+ struct crypto_skcipher_spawn spawn;
+ struct cryptd_queue *queue;
+};
+
struct hashd_instance_ctx {
struct crypto_shash_spawn spawn;
struct cryptd_queue *queue;
@@ -67,6 +72,15 @@ struct cryptd_blkcipher_request_ctx {
crypto_completion_t complete;
};
+struct cryptd_skcipher_ctx {
+ atomic_t refcnt;
+ struct crypto_skcipher *child;
+};
+
+struct cryptd_skcipher_request_ctx {
+ crypto_completion_t complete;
+};
+
struct cryptd_hash_ctx {
atomic_t refcnt;
struct crypto_shash *child;
@@ -122,7 +136,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
{
int cpu, err;
struct cryptd_cpu_queue *cpu_queue;
- struct crypto_tfm *tfm;
atomic_t *refcnt;
bool may_backlog;
@@ -141,7 +154,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
if (!atomic_read(refcnt))
goto out_put_cpu;
- tfm = request->tfm;
atomic_inc(refcnt);
out_put_cpu:
@@ -432,6 +444,216 @@ out_put_alg:
return err;
}
+static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child = ctx->child;
+ int err;
+
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ int refcnt = atomic_read(&ctx->refcnt);
+
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+
+ if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_skcipher(tfm);
+}
+
+static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
+ int err)
+{
+ struct skcipher_request *req = skcipher_request_cast(base);
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = ctx->child;
+ SKCIPHER_REQUEST_ON_STACK(subreq, child);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+
+ req->base.complete = rctx->complete;
+
+out:
+ cryptd_skcipher_complete(req, err);
+}
+
+static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
+ int err)
+{
+ struct skcipher_request *req = skcipher_request_cast(base);
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = ctx->child;
+ SKCIPHER_REQUEST_ON_STACK(subreq, child);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+
+ err = crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+
+ req->base.complete = rctx->complete;
+
+out:
+ cryptd_skcipher_complete(req, err);
+}
+
+static int cryptd_skcipher_enqueue(struct skcipher_request *req,
+ crypto_completion_t compl)
+{
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cryptd_queue *queue;
+
+ queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
+ rctx->complete = req->base.complete;
+ req->base.complete = compl;
+
+ return cryptd_enqueue_request(queue, &req->base);
+}
+
+static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
+{
+ return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
+}
+
+static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
+{
+ return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
+}
+
+static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
+ struct crypto_skcipher_spawn *spawn = &ictx->spawn;
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *cipher;
+
+ cipher = crypto_spawn_skcipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ crypto_skcipher_set_reqsize(
+ tfm, sizeof(struct cryptd_skcipher_request_ctx));
+ return 0;
+}
+
+static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->child);
+}
+
+static void cryptd_skcipher_free(struct skcipher_instance *inst)
+{
+ struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ctx->spawn);
+}
+
+static int cryptd_create_skcipher(struct crypto_template *tmpl,
+ struct rtattr **tb,
+ struct cryptd_queue *queue)
+{
+ struct skcipherd_instance_ctx *ctx;
+ struct skcipher_instance *inst;
+ struct skcipher_alg *alg;
+ const char *name;
+ u32 type;
+ u32 mask;
+ int err;
+
+ type = 0;
+ mask = CRYPTO_ALG_ASYNC;
+
+ cryptd_check_internal(tb, &type, &mask);
+
+ name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ ctx = skcipher_instance_ctx(inst);
+ ctx->queue = queue;
+
+ crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
+ err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
+ if (err)
+ goto out_free_inst;
+
+ alg = crypto_spawn_skcipher_alg(&ctx->spawn);
+ err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
+ if (err)
+ goto out_drop_skcipher;
+
+ inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
+
+ inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
+ inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
+
+ inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
+
+ inst->alg.init = cryptd_skcipher_init_tfm;
+ inst->alg.exit = cryptd_skcipher_exit_tfm;
+
+ inst->alg.setkey = cryptd_skcipher_setkey;
+ inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
+ inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
+
+ inst->free = cryptd_skcipher_free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err) {
+out_drop_skcipher:
+ crypto_drop_skcipher(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
+ return err;
+}
+
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
@@ -895,7 +1117,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
- return cryptd_create_blkcipher(tmpl, tb, &queue);
+ if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER)
+ return cryptd_create_blkcipher(tmpl, tb, &queue);
+
+ return cryptd_create_skcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_create_hash(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_AEAD:
@@ -985,6 +1211,58 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
+struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct cryptd_skcipher_ctx *ctx;
+ struct crypto_skcipher *tfm;
+
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+
+ tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctx = crypto_skcipher_ctx(tfm);
+ atomic_set(&ctx->refcnt, 1);
+
+ return container_of(tfm, struct cryptd_skcipher, base);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
+
+struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+ return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
+
+bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+ return atomic_read(&ctx->refcnt) - 1;
+}
+EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
+
+void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
+{
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+ if (atomic_dec_and_test(&ctx->refcnt))
+ crypto_free_skcipher(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
+
struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 6989ba0046df..f1bf3418d968 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
/* If another context is idling then defer */
if (engine->idling) {
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
goto out;
}
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
/* Only do teardown in the thread */
if (!in_kthread) {
- kthread_queue_work(&engine->kworker,
+ kthread_queue_work(engine->kworker,
&engine->pump_requests);
goto out;
}
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
ret = ablkcipher_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine,
ret = ahash_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine,
req->base.complete(&req->base, err);
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
req->base.complete(&req->base, err);
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine)
engine->running = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
- kthread_queue_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
return 0;
}
@@ -422,11 +422,8 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
spin_lock_init(&engine->queue_lock);
- kthread_init_worker(&engine->kworker);
- engine->kworker_task = kthread_run(kthread_worker_fn,
- &engine->kworker, "%s",
- engine->name);
- if (IS_ERR(engine->kworker_task)) {
+ engine->kworker = kthread_create_worker(0, "%s", engine->name);
+ if (IS_ERR(engine->kworker)) {
dev_err(dev, "failed to create crypto request pump task\n");
return NULL;
}
@@ -434,7 +431,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
if (engine->rt) {
dev_info(dev, "will run requests pump with realtime priority\n");
- sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
+ sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
}
return engine;
@@ -455,8 +452,7 @@ int crypto_engine_exit(struct crypto_engine *engine)
if (ret)
return ret;
- kthread_flush_worker(&engine->kworker);
- kthread_stop(engine->kworker_task);
+ kthread_destroy_worker(engine->kworker);
return 0;
}
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 1c5705481c69..a90404a0c5ff 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -112,6 +112,21 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_acomp racomp;
+
+ strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
+ sizeof(struct crypto_report_acomp), &racomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
@@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
+ case CRYPTO_ALG_TYPE_ACOMPRESS:
+ if (crypto_report_acomp(skb, alg))
+ goto nla_put_failure;
+ break;
case CRYPTO_ALG_TYPE_AKCIPHER:
if (crypto_report_akcipher(skb, alg))
goto nla_put_failure;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index ff4d21eddb83..a9a7a44f2783 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -312,7 +312,7 @@ static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
unsigned long align;
unsigned int reqsize;
- cipher = crypto_spawn_skcipher2(spawn);
+ cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
@@ -370,9 +370,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
spawn = skcipher_instance_ctx(inst);
crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher2(spawn, cipher_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_free_inst;
diff --git a/crypto/cts.c b/crypto/cts.c
index 51976187b2bf..00254d76b21b 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -290,7 +290,7 @@ static int crypto_cts_init_tfm(struct crypto_skcipher *tfm)
unsigned bsize;
unsigned align;
- cipher = crypto_spawn_skcipher2(spawn);
+ cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
@@ -348,9 +348,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher2(spawn, cipher_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_free_inst;
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 95d8d37c5021..f942cb391890 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -32,6 +32,7 @@
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/net.h>
+#include <crypto/internal/scompress.h>
#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION
#define DEFLATE_DEF_WINBITS 11
@@ -101,9 +102,8 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx)
vfree(ctx->decomp_stream.workspace);
}
-static int deflate_init(struct crypto_tfm *tfm)
+static int __deflate_init(void *ctx)
{
- struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
ret = deflate_comp_init(ctx);
@@ -116,19 +116,55 @@ out:
return ret;
}
-static void deflate_exit(struct crypto_tfm *tfm)
+static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
+{
+ struct deflate_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ret = __deflate_init(ctx);
+ if (ret) {
+ kfree(ctx);
+ return ERR_PTR(ret);
+ }
+
+ return ctx;
+}
+
+static int deflate_init(struct crypto_tfm *tfm)
{
struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+ return __deflate_init(ctx);
+}
+
+static void __deflate_exit(void *ctx)
+{
deflate_comp_exit(ctx);
deflate_decomp_exit(ctx);
}
-static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ __deflate_exit(ctx);
+ kzfree(ctx);
+}
+
+static void deflate_exit(struct crypto_tfm *tfm)
+{
+ struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __deflate_exit(ctx);
+}
+
+static int __deflate_compress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
int ret = 0;
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = &dctx->comp_stream;
ret = zlib_deflateReset(stream);
@@ -153,12 +189,27 @@ out:
return ret;
}
-static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+
+ return __deflate_compress(src, slen, dst, dlen, dctx);
+}
+
+static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __deflate_compress(src, slen, dst, dlen, ctx);
+}
+
+static int __deflate_decompress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
int ret = 0;
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = &dctx->decomp_stream;
ret = zlib_inflateReset(stream);
@@ -194,6 +245,21 @@ out:
return ret;
}
+static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+
+ return __deflate_decompress(src, slen, dst, dlen, dctx);
+}
+
+static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __deflate_decompress(src, slen, dst, dlen, ctx);
+}
+
static struct crypto_alg alg = {
.cra_name = "deflate",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
@@ -206,14 +272,39 @@ static struct crypto_alg alg = {
.coa_decompress = deflate_decompress } }
};
+static struct scomp_alg scomp = {
+ .alloc_ctx = deflate_alloc_ctx,
+ .free_ctx = deflate_free_ctx,
+ .compress = deflate_scompress,
+ .decompress = deflate_sdecompress,
+ .base = {
+ .cra_name = "deflate",
+ .cra_driver_name = "deflate-scomp",
+ .cra_module = THIS_MODULE,
+ }
+};
+
static int __init deflate_mod_init(void)
{
- return crypto_register_alg(&alg);
+ int ret;
+
+ ret = crypto_register_alg(&alg);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg);
+ return ret;
+ }
+
+ return ret;
}
static void __exit deflate_mod_fini(void)
{
crypto_unregister_alg(&alg);
+ crypto_unregister_scomp(&scomp);
}
module_init(deflate_mod_init);
diff --git a/crypto/dh.c b/crypto/dh.c
index 9d19360e7189..ddcb528ab2cc 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -118,7 +118,7 @@ static int dh_compute_value(struct kpp_request *req)
if (req->src) {
base = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!base) {
- ret = EINVAL;
+ ret = -EINVAL;
goto err_free_val;
}
} else {
diff --git a/crypto/drbg.c b/crypto/drbg.c
index fb33f7d3b052..8a4d98b4adba 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -262,6 +262,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inbuflen,
u8 *outbuf, u32 outlen);
#define DRBG_CTR_NULL_LEN 128
+#define DRBG_OUTSCRATCHLEN DRBG_CTR_NULL_LEN
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg,
@@ -1644,6 +1645,9 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
kfree(drbg->ctr_null_value_buf);
drbg->ctr_null_value = NULL;
+ kfree(drbg->outscratchpadbuf);
+ drbg->outscratchpadbuf = NULL;
+
return 0;
}
@@ -1708,6 +1712,15 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
alignmask + 1);
+ drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask,
+ GFP_KERNEL);
+ if (!drbg->outscratchpadbuf) {
+ drbg_fini_sym_kernel(drbg);
+ return -ENOMEM;
+ }
+ drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf,
+ alignmask + 1);
+
return alignmask;
}
@@ -1737,15 +1750,16 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *outbuf, u32 outlen)
{
struct scatterlist sg_in;
+ int ret;
sg_init_one(&sg_in, inbuf, inlen);
while (outlen) {
- u32 cryptlen = min_t(u32, inlen, outlen);
+ u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
struct scatterlist sg_out;
- int ret;
- sg_init_one(&sg_out, outbuf, cryptlen);
+ /* Output buffer may not be valid for SGL, use scratchpad */
+ sg_init_one(&sg_out, drbg->outscratchpad, cryptlen);
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
cryptlen, drbg->V);
ret = crypto_skcipher_encrypt(drbg->ctr_req);
@@ -1761,14 +1775,20 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
break;
}
default:
- return ret;
+ goto out;
}
init_completion(&drbg->ctr_completion);
+ memcpy(outbuf, drbg->outscratchpad, cryptlen);
+
outlen -= cryptlen;
+ outbuf += cryptlen;
}
+ ret = 0;
- return 0;
+out:
+ memzero_explicit(drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
+ return ret;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */
diff --git a/crypto/gcm.c b/crypto/gcm.c
index f624ac98c94e..b7ad808be3d4 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(ghash))
return PTR_ERR(ghash);
- ctr = crypto_spawn_skcipher2(&ictx->ctr);
+ ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_hash;
@@ -663,20 +663,20 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
goto err_drop_ghash;
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
- err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
if (err)
goto err_drop_ghash;
ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
/* We only support 16-byte blocks. */
+ err = -EINVAL;
if (crypto_skcipher_alg_ivsize(ctr) != 16)
goto out_put_ctr;
/* Not a stream cipher? */
- err = -EINVAL;
if (ctr->base.cra_blocksize != 1)
goto out_put_ctr;
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index 5276607c72d0..72015fee533d 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -263,48 +263,6 @@ EXPORT_SYMBOL(gf128mul_bbe);
* t[1][BYTE] contains g*x^8*BYTE
* ..
* t[15][BYTE] contains g*x^120*BYTE */
-struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g)
-{
- struct gf128mul_64k *t;
- int i, j, k;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- goto out;
-
- for (i = 0; i < 16; i++) {
- t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
- if (!t->t[i]) {
- gf128mul_free_64k(t);
- t = NULL;
- goto out;
- }
- }
-
- t->t[0]->t[128] = *g;
- for (j = 64; j > 0; j >>= 1)
- gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]);
-
- for (i = 0;;) {
- for (j = 2; j < 256; j += j)
- for (k = 1; k < j; ++k)
- be128_xor(&t->t[i]->t[j + k],
- &t->t[i]->t[j], &t->t[i]->t[k]);
-
- if (++i >= 16)
- break;
-
- for (j = 128; j > 0; j >>= 1) {
- t->t[i]->t[j] = t->t[i - 1]->t[j];
- gf128mul_x8_lle(&t->t[i]->t[j]);
- }
- }
-
-out:
- return t;
-}
-EXPORT_SYMBOL(gf128mul_init_64k_lle);
-
struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
{
struct gf128mul_64k *t;
@@ -352,24 +310,11 @@ void gf128mul_free_64k(struct gf128mul_64k *t)
int i;
for (i = 0; i < 16; i++)
- kfree(t->t[i]);
- kfree(t);
+ kzfree(t->t[i]);
+ kzfree(t);
}
EXPORT_SYMBOL(gf128mul_free_64k);
-void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t)
-{
- u8 *ap = (u8 *)a;
- be128 r[1];
- int i;
-
- *r = t->t[0]->t[ap[0]];
- for (i = 1; i < 16; ++i)
- be128_xor(r, r, &t->t[i]->t[ap[i]]);
- *a = *r;
-}
-EXPORT_SYMBOL(gf128mul_64k_lle);
-
void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t)
{
u8 *ap = (u8 *)a;
diff --git a/crypto/internal.h b/crypto/internal.h
index 7eefcdb00227..f07320423191 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -76,9 +76,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
-void crypto_exit_compress_ops(struct crypto_tfm *tfm);
-
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
void crypto_larval_kill(struct crypto_alg *alg);
struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index c4938497eedb..787dccca3715 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -39,7 +39,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/fips.h>
#include <linux/time.h>
#include <linux/crypto.h>
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 6f9908a7ebcb..ecd8474018e3 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -17,7 +17,8 @@
*
* The test vectors are included in the testing module tcrypt.[ch] */
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -29,11 +30,30 @@
#include <crypto/gf128mul.h>
#include <crypto/lrw.h>
+#define LRW_BUFFER_SIZE 128u
+
struct priv {
- struct crypto_cipher *child;
+ struct crypto_skcipher *child;
struct lrw_table_ctx table;
};
+struct rctx {
+ be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
+
+ be128 t;
+
+ be128 *ext;
+
+ struct scatterlist srcbuf[2];
+ struct scatterlist dstbuf[2];
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ unsigned int left;
+
+ struct skcipher_request subreq;
+};
+
static inline void setbit128_bbe(void *b, int bit)
{
__set_bit(bit ^ (0x80 -
@@ -76,32 +96,26 @@ void lrw_free_table(struct lrw_table_ctx *ctx)
}
EXPORT_SYMBOL_GPL(lrw_free_table);
-static int setkey(struct crypto_tfm *parent, const u8 *key,
+static int setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
- struct priv *ctx = crypto_tfm_ctx(parent);
- struct crypto_cipher *child = ctx->child;
+ struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child = ctx->child;
int err, bsize = LRW_BLOCK_SIZE;
const u8 *tweak = key + keylen - bsize;
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen - bsize);
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen - bsize);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
if (err)
return err;
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
return lrw_init_table(&ctx->table, tweak);
}
-struct sinfo {
- be128 t;
- struct crypto_tfm *tfm;
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-};
-
static inline void inc(be128 *iv)
{
be64_add_cpu(&iv->b, 1);
@@ -109,13 +123,6 @@ static inline void inc(be128 *iv)
be64_add_cpu(&iv->a, 1);
}
-static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
-{
- be128_xor(dst, &s->t, src); /* PP <- T xor P */
- s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */
- be128_xor(dst, dst, &s->t); /* C <- T xor CC */
-}
-
/* this returns the number of consequative 1 bits starting
* from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
static inline int get_index128(be128 *block)
@@ -135,83 +142,263 @@ static inline int get_index128(be128 *block)
return x;
}
-static int crypt(struct blkcipher_desc *d,
- struct blkcipher_walk *w, struct priv *ctx,
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
+static int post_crypt(struct skcipher_request *req)
{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ be128 *buf = rctx->ext ?: rctx->buf;
+ struct skcipher_request *subreq;
+ const int bs = LRW_BLOCK_SIZE;
+ struct skcipher_walk w;
+ struct scatterlist *sg;
+ unsigned offset;
int err;
- unsigned int avail;
+
+ subreq = &rctx->subreq;
+ err = skcipher_walk_virt(&w, subreq, false);
+
+ while (w.nbytes) {
+ unsigned int avail = w.nbytes;
+ be128 *wdst;
+
+ wdst = w.dst.virt.addr;
+
+ do {
+ be128_xor(wdst, buf++, wdst);
+ wdst++;
+ } while ((avail -= bs) >= bs);
+
+ err = skcipher_walk_done(&w, avail);
+ }
+
+ rctx->left -= subreq->cryptlen;
+
+ if (err || !rctx->left)
+ goto out;
+
+ rctx->dst = rctx->dstbuf;
+
+ scatterwalk_done(&w.out, 0, 1);
+ sg = w.out.sg;
+ offset = w.out.offset;
+
+ if (rctx->dst != sg) {
+ rctx->dst[0] = *sg;
+ sg_unmark_end(rctx->dst);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ }
+ rctx->dst[0].length -= offset - sg->offset;
+ rctx->dst[0].offset = offset;
+
+out:
+ return err;
+}
+
+static int pre_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
+ be128 *buf = rctx->ext ?: rctx->buf;
+ struct skcipher_request *subreq;
const int bs = LRW_BLOCK_SIZE;
- struct sinfo s = {
- .tfm = crypto_cipher_tfm(ctx->child),
- .fn = fn
- };
+ struct skcipher_walk w;
+ struct scatterlist *sg;
+ unsigned cryptlen;
+ unsigned offset;
be128 *iv;
- u8 *wsrc;
- u8 *wdst;
+ bool more;
+ int err;
- err = blkcipher_walk_virt(d, w);
- if (!(avail = w->nbytes))
- return err;
+ subreq = &rctx->subreq;
+ skcipher_request_set_tfm(subreq, tfm);
- wsrc = w->src.virt.addr;
- wdst = w->dst.virt.addr;
+ cryptlen = subreq->cryptlen;
+ more = rctx->left > cryptlen;
+ if (!more)
+ cryptlen = rctx->left;
- /* calculate first value of T */
- iv = (be128 *)w->iv;
- s.t = *iv;
+ skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
+ cryptlen, req->iv);
- /* T <- I*Key2 */
- gf128mul_64k_bbe(&s.t, ctx->table.table);
+ err = skcipher_walk_virt(&w, subreq, false);
+ iv = w.iv;
- goto first;
+ while (w.nbytes) {
+ unsigned int avail = w.nbytes;
+ be128 *wsrc;
+ be128 *wdst;
+
+ wsrc = w.src.virt.addr;
+ wdst = w.dst.virt.addr;
- for (;;) {
do {
+ *buf++ = rctx->t;
+ be128_xor(wdst++, &rctx->t, wsrc++);
+
/* T <- I*Key2, using the optimization
* discussed in the specification */
- be128_xor(&s.t, &s.t,
+ be128_xor(&rctx->t, &rctx->t,
&ctx->table.mulinc[get_index128(iv)]);
inc(iv);
+ } while ((avail -= bs) >= bs);
-first:
- lrw_round(&s, wdst, wsrc);
+ err = skcipher_walk_done(&w, avail);
+ }
- wsrc += bs;
- wdst += bs;
- } while ((avail -= bs) >= bs);
+ skcipher_request_set_tfm(subreq, ctx->child);
+ skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
+ cryptlen, NULL);
- err = blkcipher_walk_done(d, w, avail);
- if (!(avail = w->nbytes))
- break;
+ if (err || !more)
+ goto out;
+
+ rctx->src = rctx->srcbuf;
+
+ scatterwalk_done(&w.in, 0, 1);
+ sg = w.in.sg;
+ offset = w.in.offset;
+
+ if (rctx->src != sg) {
+ rctx->src[0] = *sg;
+ sg_unmark_end(rctx->src);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+ }
+ rctx->src[0].length -= offset - sg->offset;
+ rctx->src[0].offset = offset;
+
+out:
+ return err;
+}
+
+static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
+{
+ struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+ gfp_t gfp;
+
+ subreq = &rctx->subreq;
+ skcipher_request_set_callback(subreq, req->base.flags, done, req);
+
+ gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ rctx->ext = NULL;
+
+ subreq->cryptlen = LRW_BUFFER_SIZE;
+ if (req->cryptlen > LRW_BUFFER_SIZE) {
+ subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
+ rctx->ext = kmalloc(subreq->cryptlen, gfp);
+ }
+
+ rctx->src = req->src;
+ rctx->dst = req->dst;
+ rctx->left = req->cryptlen;
+
+ /* calculate first value of T */
+ memcpy(&rctx->t, req->iv, sizeof(rctx->t));
+
+ /* T <- I*Key2 */
+ gf128mul_64k_bbe(&rctx->t, ctx->table.table);
- wsrc = w->src.virt.addr;
- wdst = w->dst.virt.addr;
+ return 0;
+}
+
+static void exit_crypt(struct skcipher_request *req)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+
+ rctx->left = 0;
+
+ if (rctx->ext)
+ kfree(rctx->ext);
+}
+
+static int do_encrypt(struct skcipher_request *req, int err)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+
+ subreq = &rctx->subreq;
+
+ while (!err && rctx->left) {
+ err = pre_crypt(req) ?:
+ crypto_skcipher_encrypt(subreq) ?:
+ post_crypt(req);
+
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return err;
}
+ exit_crypt(req);
return err;
}
-static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static void encrypt_done(struct crypto_async_request *areq, int err)
+{
+ struct skcipher_request *req = areq->data;
+ struct skcipher_request *subreq;
+ struct rctx *rctx;
+
+ rctx = skcipher_request_ctx(req);
+ subreq = &rctx->subreq;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+ err = do_encrypt(req, err ?: post_crypt(req));
+ if (rctx->left)
+ return;
+
+ skcipher_request_complete(req, err);
+}
+
+static int encrypt(struct skcipher_request *req)
+{
+ return do_encrypt(req, init_crypt(req, encrypt_done));
+}
+
+static int do_decrypt(struct skcipher_request *req, int err)
{
- struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk w;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+
+ subreq = &rctx->subreq;
+
+ while (!err && rctx->left) {
+ err = pre_crypt(req) ?:
+ crypto_skcipher_decrypt(subreq) ?:
+ post_crypt(req);
+
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return err;
+ }
- blkcipher_walk_init(&w, dst, src, nbytes);
- return crypt(desc, &w, ctx,
- crypto_cipher_alg(ctx->child)->cia_encrypt);
+ exit_crypt(req);
+ return err;
}
-static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static void decrypt_done(struct crypto_async_request *areq, int err)
{
- struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk w;
+ struct skcipher_request *req = areq->data;
+ struct skcipher_request *subreq;
+ struct rctx *rctx;
+
+ rctx = skcipher_request_ctx(req);
+ subreq = &rctx->subreq;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+ err = do_decrypt(req, err ?: post_crypt(req));
+ if (rctx->left)
+ return;
- blkcipher_walk_init(&w, dst, src, nbytes);
- return crypt(desc, &w, ctx,
- crypto_cipher_alg(ctx->child)->cia_decrypt);
+ skcipher_request_complete(req, err);
+}
+
+static int decrypt(struct skcipher_request *req)
+{
+ return do_decrypt(req, init_crypt(req, decrypt_done));
}
int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
@@ -293,95 +480,161 @@ first:
}
EXPORT_SYMBOL_GPL(lrw_crypt);
-static int init_tfm(struct crypto_tfm *tfm)
+static int init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct priv *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *cipher;
- cipher = crypto_spawn_cipher(spawn);
+ cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) {
- *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- crypto_free_cipher(cipher);
- return -EINVAL;
- }
-
ctx->child = cipher;
+
+ crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
+ sizeof(struct rctx));
+
return 0;
}
-static void exit_tfm(struct crypto_tfm *tfm)
+static void exit_tfm(struct crypto_skcipher *tfm)
{
- struct priv *ctx = crypto_tfm_ctx(tfm);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
lrw_free_table(&ctx->table);
- crypto_free_cipher(ctx->child);
+ crypto_free_skcipher(ctx->child);
+}
+
+static void free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
}
-static struct crypto_instance *alloc(struct rtattr **tb)
+static int create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
- struct crypto_alg *alg;
+ struct crypto_skcipher_spawn *spawn;
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct skcipher_alg *alg;
+ const char *cipher_name;
+ char ecb_name[CRYPTO_MAX_ALG_NAME];
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+ return -EINVAL;
+
+ cipher_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(cipher_name))
+ return PTR_ERR(cipher_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ spawn = skcipher_instance_ctx(inst);
+
+ crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
+ err = crypto_grab_skcipher(spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ if (err == -ENOENT) {
+ err = -ENAMETOOLONG;
+ if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ cipher_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ err = crypto_grab_skcipher(spawn, ecb_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ }
+
if (err)
- return ERR_PTR(err);
+ goto err_free_inst;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
+ alg = crypto_skcipher_spawn_alg(spawn);
- inst = crypto_alloc_instance("lrw", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = -EINVAL;
+ if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
+ goto err_drop_spawn;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
+ if (crypto_skcipher_alg_ivsize(alg))
+ goto err_drop_spawn;
- if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
- else inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_blkcipher_type;
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
+ &alg->base);
+ if (err)
+ goto err_drop_spawn;
- if (!(alg->cra_blocksize % 4))
- inst->alg.cra_alignmask |= 3;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize =
- alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
- inst->alg.cra_blkcipher.max_keysize =
- alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
+ err = -EINVAL;
+ cipher_name = alg->base.cra_name;
- inst->alg.cra_ctxsize = sizeof(struct priv);
+ /* Alas we screwed up the naming so we have to mangle the
+ * cipher name.
+ */
+ if (!strncmp(cipher_name, "ecb(", 4)) {
+ unsigned len;
- inst->alg.cra_init = init_tfm;
- inst->alg.cra_exit = exit_tfm;
+ len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+ if (len < 2 || len >= sizeof(ecb_name))
+ goto err_drop_spawn;
- inst->alg.cra_blkcipher.setkey = setkey;
- inst->alg.cra_blkcipher.encrypt = encrypt;
- inst->alg.cra_blkcipher.decrypt = decrypt;
+ if (ecb_name[len - 1] != ')')
+ goto err_drop_spawn;
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ ecb_name[len - 1] = 0;
-static void free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME)
+ return -ENAMETOOLONG;
+ }
+
+ inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
+ (__alignof__(u64) - 1);
+
+ inst->alg.ivsize = LRW_BLOCK_SIZE;
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
+ LRW_BLOCK_SIZE;
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
+ LRW_BLOCK_SIZE;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct priv);
+
+ inst->alg.init = init_tfm;
+ inst->alg.exit = exit_tfm;
+
+ inst->alg.setkey = setkey;
+ inst->alg.encrypt = encrypt;
+ inst->alg.decrypt = decrypt;
+
+ inst->free = free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_skcipher(spawn);
+err_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_tmpl = {
.name = "lrw",
- .alloc = alloc,
- .free = free,
+ .create = create,
.module = THIS_MODULE,
};
diff --git a/crypto/lz4.c b/crypto/lz4.c
index aefbceaf3104..99c1b2cc2976 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -23,36 +23,53 @@
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
+#include <crypto/internal/scompress.h>
struct lz4_ctx {
void *lz4_comp_mem;
};
+static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
+{
+ void *ctx;
+
+ ctx = vmalloc(LZ4_MEM_COMPRESS);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ return ctx;
+}
+
static int lz4_init(struct crypto_tfm *tfm)
{
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS);
- if (!ctx->lz4_comp_mem)
+ ctx->lz4_comp_mem = lz4_alloc_ctx(NULL);
+ if (IS_ERR(ctx->lz4_comp_mem))
return -ENOMEM;
return 0;
}
+static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ vfree(ctx);
+}
+
static void lz4_exit(struct crypto_tfm *tfm)
{
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
- vfree(ctx->lz4_comp_mem);
+
+ lz4_free_ctx(NULL, ctx->lz4_comp_mem);
}
-static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen;
int err;
- err = lz4_compress(src, slen, dst, &tmp_len, ctx->lz4_comp_mem);
+ err = lz4_compress(src, slen, dst, &tmp_len, ctx);
if (err < 0)
return -EINVAL;
@@ -61,8 +78,23 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
return 0;
}
-static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lz4_compress_crypto(src, slen, dst, dlen, ctx);
+}
+
+static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem);
+}
+
+static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
int err;
size_t tmp_len = *dlen;
@@ -76,6 +108,20 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
return err;
}
+static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
+static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst,
+ unsigned int *dlen)
+{
+ return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
static struct crypto_alg alg_lz4 = {
.cra_name = "lz4",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
@@ -89,14 +135,39 @@ static struct crypto_alg alg_lz4 = {
.coa_decompress = lz4_decompress_crypto } }
};
+static struct scomp_alg scomp = {
+ .alloc_ctx = lz4_alloc_ctx,
+ .free_ctx = lz4_free_ctx,
+ .compress = lz4_scompress,
+ .decompress = lz4_sdecompress,
+ .base = {
+ .cra_name = "lz4",
+ .cra_driver_name = "lz4-scomp",
+ .cra_module = THIS_MODULE,
+ }
+};
+
static int __init lz4_mod_init(void)
{
- return crypto_register_alg(&alg_lz4);
+ int ret;
+
+ ret = crypto_register_alg(&alg_lz4);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg_lz4);
+ return ret;
+ }
+
+ return ret;
}
static void __exit lz4_mod_fini(void)
{
crypto_unregister_alg(&alg_lz4);
+ crypto_unregister_scomp(&scomp);
}
module_init(lz4_mod_init);
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index a1d3b5bd3d85..75ffc4a3f786 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -22,37 +22,53 @@
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
+#include <crypto/internal/scompress.h>
struct lz4hc_ctx {
void *lz4hc_comp_mem;
};
+static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
+{
+ void *ctx;
+
+ ctx = vmalloc(LZ4HC_MEM_COMPRESS);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ return ctx;
+}
+
static int lz4hc_init(struct crypto_tfm *tfm)
{
struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->lz4hc_comp_mem = vmalloc(LZ4HC_MEM_COMPRESS);
- if (!ctx->lz4hc_comp_mem)
+ ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL);
+ if (IS_ERR(ctx->lz4hc_comp_mem))
return -ENOMEM;
return 0;
}
+static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ vfree(ctx);
+}
+
static void lz4hc_exit(struct crypto_tfm *tfm)
{
struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
- vfree(ctx->lz4hc_comp_mem);
+ lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem);
}
-static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen;
int err;
- err = lz4hc_compress(src, slen, dst, &tmp_len, ctx->lz4hc_comp_mem);
+ err = lz4hc_compress(src, slen, dst, &tmp_len, ctx);
if (err < 0)
return -EINVAL;
@@ -61,8 +77,25 @@ static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
return 0;
}
-static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx);
+}
+
+static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst,
+ unsigned int *dlen)
+{
+ struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return __lz4hc_compress_crypto(src, slen, dst, dlen,
+ ctx->lz4hc_comp_mem);
+}
+
+static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
int err;
size_t tmp_len = *dlen;
@@ -76,6 +109,20 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
return err;
}
+static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
+static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst,
+ unsigned int *dlen)
+{
+ return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
static struct crypto_alg alg_lz4hc = {
.cra_name = "lz4hc",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
@@ -89,14 +136,39 @@ static struct crypto_alg alg_lz4hc = {
.coa_decompress = lz4hc_decompress_crypto } }
};
+static struct scomp_alg scomp = {
+ .alloc_ctx = lz4hc_alloc_ctx,
+ .free_ctx = lz4hc_free_ctx,
+ .compress = lz4hc_scompress,
+ .decompress = lz4hc_sdecompress,
+ .base = {
+ .cra_name = "lz4hc",
+ .cra_driver_name = "lz4hc-scomp",
+ .cra_module = THIS_MODULE,
+ }
+};
+
static int __init lz4hc_mod_init(void)
{
- return crypto_register_alg(&alg_lz4hc);
+ int ret;
+
+ ret = crypto_register_alg(&alg_lz4hc);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg_lz4hc);
+ return ret;
+ }
+
+ return ret;
}
static void __exit lz4hc_mod_fini(void)
{
crypto_unregister_alg(&alg_lz4hc);
+ crypto_unregister_scomp(&scomp);
}
module_init(lz4hc_mod_init);
diff --git a/crypto/lzo.c b/crypto/lzo.c
index c3f3dd9a28c5..168df784da84 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -22,40 +22,55 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/lzo.h>
+#include <crypto/internal/scompress.h>
struct lzo_ctx {
void *lzo_comp_mem;
};
+static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
+{
+ void *ctx;
+
+ ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN);
+ if (!ctx)
+ ctx = vmalloc(LZO1X_MEM_COMPRESS);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ return ctx;
+}
+
static int lzo_init(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
- GFP_KERNEL | __GFP_NOWARN);
- if (!ctx->lzo_comp_mem)
- ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
- if (!ctx->lzo_comp_mem)
+ ctx->lzo_comp_mem = lzo_alloc_ctx(NULL);
+ if (IS_ERR(ctx->lzo_comp_mem))
return -ENOMEM;
return 0;
}
+static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+ kvfree(ctx);
+}
+
static void lzo_exit(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
- kvfree(ctx->lzo_comp_mem);
+ lzo_free_ctx(NULL, ctx->lzo_comp_mem);
}
-static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lzo_compress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
- err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem);
+ err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
@@ -64,8 +79,23 @@ static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
return 0;
}
-static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem);
+}
+
+static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lzo_compress(src, slen, dst, dlen, ctx);
+}
+
+static int __lzo_decompress(const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
{
int err;
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
@@ -77,7 +107,19 @@ static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
*dlen = tmp_len;
return 0;
+}
+static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ return __lzo_decompress(src, slen, dst, dlen);
+}
+
+static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return __lzo_decompress(src, slen, dst, dlen);
}
static struct crypto_alg alg = {
@@ -88,18 +130,43 @@ static struct crypto_alg alg = {
.cra_init = lzo_init,
.cra_exit = lzo_exit,
.cra_u = { .compress = {
- .coa_compress = lzo_compress,
- .coa_decompress = lzo_decompress } }
+ .coa_compress = lzo_compress,
+ .coa_decompress = lzo_decompress } }
+};
+
+static struct scomp_alg scomp = {
+ .alloc_ctx = lzo_alloc_ctx,
+ .free_ctx = lzo_free_ctx,
+ .compress = lzo_scompress,
+ .decompress = lzo_sdecompress,
+ .base = {
+ .cra_name = "lzo",
+ .cra_driver_name = "lzo-scomp",
+ .cra_module = THIS_MODULE,
+ }
};
static int __init lzo_mod_init(void)
{
- return crypto_register_alg(&alg);
+ int ret;
+
+ ret = crypto_register_alg(&alg);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_scomp(&scomp);
+ if (ret) {
+ crypto_unregister_alg(&alg);
+ return ret;
+ }
+
+ return ret;
}
static void __exit lzo_mod_fini(void)
{
crypto_unregister_alg(&alg);
+ crypto_unregister_scomp(&scomp);
}
module_init(lzo_mod_init);
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index 94ee44acd465..c207458d6299 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -254,18 +254,22 @@ out_free_inst:
goto out;
}
-static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
+static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
u32 *mask)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return;
- if ((algt->type & CRYPTO_ALG_INTERNAL))
- *type |= CRYPTO_ALG_INTERNAL;
- if ((algt->mask & CRYPTO_ALG_INTERNAL))
- *mask |= CRYPTO_ALG_INTERNAL;
+ return false;
+
+ *type |= algt->type & CRYPTO_ALG_INTERNAL;
+ *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
+
+ if (*type & *mask & CRYPTO_ALG_INTERNAL)
+ return true;
+ else
+ return false;
}
static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
@@ -492,7 +496,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
u32 mask = 0;
int err;
- mcryptd_check_internal(tb, &type, &mask);
+ if (!mcryptd_check_internal(tb, &type, &mask))
+ return -EINVAL;
halg = ahash_attr_alg(tb[1], type, mask);
if (IS_ERR(halg))
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index f654965f0933..e4538e07f7ca 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -14,40 +14,37 @@
*
*/
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_pcbc_ctx {
struct crypto_cipher *child;
};
-static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
+static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
- struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
-static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
+static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
+ struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
@@ -56,7 +53,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
do {
crypto_xor(iv, src, bsize);
- fn(crypto_cipher_tfm(tfm), dst, iv);
+ crypto_cipher_encrypt_one(tfm, dst, iv);
memcpy(iv, dst, bsize);
crypto_xor(iv, src, bsize);
@@ -67,12 +64,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
return nbytes;
}
-static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
+static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
+ struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
@@ -82,7 +77,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
do {
memcpy(tmpbuf, src, bsize);
crypto_xor(iv, src, bsize);
- fn(crypto_cipher_tfm(tfm), src, iv);
+ crypto_cipher_encrypt_one(tfm, src, iv);
memcpy(iv, tmpbuf, bsize);
crypto_xor(iv, src, bsize);
@@ -94,38 +89,34 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
return nbytes;
}
-static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_pcbc_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
- nbytes = crypto_pcbc_encrypt_inplace(desc, &walk,
+ nbytes = crypto_pcbc_encrypt_inplace(req, &walk,
child);
else
- nbytes = crypto_pcbc_encrypt_segment(desc, &walk,
+ nbytes = crypto_pcbc_encrypt_segment(req, &walk,
child);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
+static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
+ struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
@@ -133,7 +124,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
u8 *iv = walk->iv;
do {
- fn(crypto_cipher_tfm(tfm), dst, src);
+ crypto_cipher_decrypt_one(tfm, dst, src);
crypto_xor(dst, iv, bsize);
memcpy(iv, src, bsize);
crypto_xor(iv, dst, bsize);
@@ -147,21 +138,19 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
return nbytes;
}
-static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
+static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
+ struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
- crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
- u8 tmpbuf[bsize];
+ u8 tmpbuf[bsize] __attribute__ ((aligned(__alignof__(u32))));
do {
memcpy(tmpbuf, src, bsize);
- fn(crypto_cipher_tfm(tfm), src, src);
+ crypto_cipher_decrypt_one(tfm, src, src);
crypto_xor(src, iv, bsize);
memcpy(iv, tmpbuf, bsize);
crypto_xor(iv, src, bsize);
@@ -174,37 +163,35 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
return nbytes;
}
-static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_pcbc_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
- nbytes = crypto_pcbc_decrypt_inplace(desc, &walk,
+ nbytes = crypto_pcbc_decrypt_inplace(req, &walk,
child);
else
- nbytes = crypto_pcbc_decrypt_segment(desc, &walk,
+ nbytes = crypto_pcbc_decrypt_segment(req, &walk,
child);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
+static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
@@ -215,68 +202,98 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
return 0;
}
-static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
crypto_free_cipher(ctx->child);
}
-static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb)
+static void crypto_pcbc_free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct crypto_spawn *spawn;
struct crypto_alg *alg;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
- if (err)
- return ERR_PTR(err);
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) &
+ ~CRYPTO_ALG_INTERNAL)
+ return -EINVAL;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER |
+ (algt->type & CRYPTO_ALG_INTERNAL),
+ CRYPTO_ALG_TYPE_MASK |
+ (algt->mask & CRYPTO_ALG_INTERNAL));
+ err = PTR_ERR(alg);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ goto err_free_inst;
+
+ spawn = skcipher_instance_ctx(inst);
+ err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ crypto_mod_put(alg);
+ if (err)
+ goto err_free_inst;
- inst = crypto_alloc_instance("pcbc", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
+ if (err)
+ goto err_drop_spawn;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_blkcipher_type;
+ inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL;
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
/* We access the data as u32s when xoring. */
- inst->alg.cra_alignmask |= __alignof__(u32) - 1;
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+ inst->alg.ivsize = alg->cra_blocksize;
+ inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+ inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
- inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
- inst->alg.cra_init = crypto_pcbc_init_tfm;
- inst->alg.cra_exit = crypto_pcbc_exit_tfm;
+ inst->alg.init = crypto_pcbc_init_tfm;
+ inst->alg.exit = crypto_pcbc_exit_tfm;
- inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey;
- inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt;
- inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt;
+ inst->alg.setkey = crypto_pcbc_setkey;
+ inst->alg.encrypt = crypto_pcbc_encrypt;
+ inst->alg.decrypt = crypto_pcbc_decrypt;
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ inst->free = crypto_pcbc_free;
-static void crypto_pcbc_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_spawn(spawn);
+err_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_pcbc_tmpl = {
.name = "pcbc",
- .alloc = crypto_pcbc_alloc,
- .free = crypto_pcbc_free,
+ .create = crypto_pcbc_create,
.module = THIS_MODULE,
};
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 2df9835dfbc0..b1c2d57dc734 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -17,6 +17,7 @@
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <asm/unaligned.h>
static inline u64 mlt(u64 a, u64 b)
{
@@ -33,11 +34,6 @@ static inline u32 and(u32 v, u32 mask)
return v & mask;
}
-static inline u32 le32_to_cpuvp(const void *p)
-{
- return le32_to_cpup(p);
-}
-
int crypto_poly1305_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
@@ -65,19 +61,19 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- dctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff;
- dctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03;
- dctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff;
- dctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff;
- dctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff;
+ dctx->r[0] = (get_unaligned_le32(key + 0) >> 0) & 0x3ffffff;
+ dctx->r[1] = (get_unaligned_le32(key + 3) >> 2) & 0x3ffff03;
+ dctx->r[2] = (get_unaligned_le32(key + 6) >> 4) & 0x3ffc0ff;
+ dctx->r[3] = (get_unaligned_le32(key + 9) >> 6) & 0x3f03fff;
+ dctx->r[4] = (get_unaligned_le32(key + 12) >> 8) & 0x00fffff;
}
static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
{
- dctx->s[0] = le32_to_cpuvp(key + 0);
- dctx->s[1] = le32_to_cpuvp(key + 4);
- dctx->s[2] = le32_to_cpuvp(key + 8);
- dctx->s[3] = le32_to_cpuvp(key + 12);
+ dctx->s[0] = get_unaligned_le32(key + 0);
+ dctx->s[1] = get_unaligned_le32(key + 4);
+ dctx->s[2] = get_unaligned_le32(key + 8);
+ dctx->s[3] = get_unaligned_le32(key + 12);
}
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
@@ -137,11 +133,11 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
while (likely(srclen >= POLY1305_BLOCK_SIZE)) {
/* h += m[i] */
- h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff;
- h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff;
- h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff;
- h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff;
- h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit;
+ h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff;
+ h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff;
+ h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff;
+ h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff;
+ h4 += (get_unaligned_le32(src + 12) >> 8) | hibit;
/* h *= r */
d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) +
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 52ce17a3dd63..c16c94f88733 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,10 +68,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
sg = scatterwalk_ffwd(tmp, sg, start);
- if (sg_page(sg) == virt_to_page(buf) &&
- sg->offset == offset_in_page(buf))
- return;
-
scatterwalk_start(&walk, sg);
scatterwalk_copychunks(buf, &walk, nbytes, out);
scatterwalk_done(&walk, out, 0);
diff --git a/crypto/scompress.c b/crypto/scompress.c
new file mode 100644
index 000000000000..35e396d154b7
--- /dev/null
+++ b/crypto/scompress.c
@@ -0,0 +1,356 @@
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/vmalloc.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include "internal.h"
+
+static const struct crypto_type crypto_scomp_type;
+static void * __percpu *scomp_src_scratches;
+static void * __percpu *scomp_dst_scratches;
+static int scomp_scratch_users;
+static DEFINE_MUTEX(scomp_lock);
+
+#ifdef CONFIG_NET
+static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_comp rscomp;
+
+ strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+ sizeof(struct crypto_report_comp), &rscomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+
+static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_puts(m, "type : scomp\n");
+}
+
+static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
+{
+ return 0;
+}
+
+static void crypto_scomp_free_scratches(void * __percpu *scratches)
+{
+ int i;
+
+ if (!scratches)
+ return;
+
+ for_each_possible_cpu(i)
+ vfree(*per_cpu_ptr(scratches, i));
+
+ free_percpu(scratches);
+}
+
+static void * __percpu *crypto_scomp_alloc_scratches(void)
+{
+ void * __percpu *scratches;
+ int i;
+
+ scratches = alloc_percpu(void *);
+ if (!scratches)
+ return NULL;
+
+ for_each_possible_cpu(i) {
+ void *scratch;
+
+ scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+ if (!scratch)
+ goto error;
+ *per_cpu_ptr(scratches, i) = scratch;
+ }
+
+ return scratches;
+
+error:
+ crypto_scomp_free_scratches(scratches);
+ return NULL;
+}
+
+static void crypto_scomp_free_all_scratches(void)
+{
+ if (!--scomp_scratch_users) {
+ crypto_scomp_free_scratches(scomp_src_scratches);
+ crypto_scomp_free_scratches(scomp_dst_scratches);
+ scomp_src_scratches = NULL;
+ scomp_dst_scratches = NULL;
+ }
+}
+
+static int crypto_scomp_alloc_all_scratches(void)
+{
+ if (!scomp_scratch_users++) {
+ scomp_src_scratches = crypto_scomp_alloc_scratches();
+ if (!scomp_src_scratches)
+ return -ENOMEM;
+ scomp_dst_scratches = crypto_scomp_alloc_scratches();
+ if (!scomp_dst_scratches)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void crypto_scomp_sg_free(struct scatterlist *sgl)
+{
+ int i, n;
+ struct page *page;
+
+ if (!sgl)
+ return;
+
+ n = sg_nents(sgl);
+ for_each_sg(sgl, sgl, n, i) {
+ page = sg_page(sgl);
+ if (page)
+ __free_page(page);
+ }
+
+ kfree(sgl);
+}
+
+static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
+{
+ struct scatterlist *sgl;
+ struct page *page;
+ int i, n;
+
+ n = ((size - 1) >> PAGE_SHIFT) + 1;
+
+ sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, n);
+
+ for (i = 0; i < n; i++) {
+ page = alloc_page(gfp);
+ if (!page)
+ goto err;
+ sg_set_page(sgl + i, page, PAGE_SIZE, 0);
+ }
+
+ return sgl;
+
+err:
+ sg_mark_end(sgl + i);
+ crypto_scomp_sg_free(sgl);
+ return NULL;
+}
+
+static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ void **tfm_ctx = acomp_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void **ctx = acomp_request_ctx(req);
+ const int cpu = get_cpu();
+ u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
+ u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
+ int ret;
+
+ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (req->dst && !req->dlen) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
+ req->dlen = SCOMP_SCRATCH_SIZE;
+
+ scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
+ if (dir)
+ ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
+ scratch_dst, &req->dlen, *ctx);
+ else
+ ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
+ scratch_dst, &req->dlen, *ctx);
+ if (!ret) {
+ if (!req->dst) {
+ req->dst = crypto_scomp_sg_alloc(req->dlen,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req->dst)
+ goto out;
+ }
+ scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
+ 1);
+ }
+out:
+ put_cpu();
+ return ret;
+}
+
+static int scomp_acomp_compress(struct acomp_req *req)
+{
+ return scomp_acomp_comp_decomp(req, 1);
+}
+
+static int scomp_acomp_decompress(struct acomp_req *req)
+{
+ return scomp_acomp_comp_decomp(req, 0);
+}
+
+static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
+{
+ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_scomp(*ctx);
+}
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *calg = tfm->__crt_alg;
+ struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
+ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
+ struct crypto_scomp *scomp;
+
+ if (!crypto_mod_get(calg))
+ return -EAGAIN;
+
+ scomp = crypto_create_tfm(calg, &crypto_scomp_type);
+ if (IS_ERR(scomp)) {
+ crypto_mod_put(calg);
+ return PTR_ERR(scomp);
+ }
+
+ *ctx = scomp;
+ tfm->exit = crypto_exit_scomp_ops_async;
+
+ crt->compress = scomp_acomp_compress;
+ crt->decompress = scomp_acomp_decompress;
+ crt->dst_free = crypto_scomp_sg_free;
+ crt->reqsize = sizeof(void *);
+
+ return 0;
+}
+
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void *ctx;
+
+ ctx = crypto_scomp_alloc_ctx(scomp);
+ if (IS_ERR(ctx)) {
+ kfree(req);
+ return NULL;
+ }
+
+ *req->__ctx = ctx;
+
+ return req;
+}
+
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void *ctx = *req->__ctx;
+
+ if (ctx)
+ crypto_scomp_free_ctx(scomp, ctx);
+}
+
+static const struct crypto_type crypto_scomp_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_scomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_scomp_show,
+#endif
+ .report = crypto_scomp_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_SCOMPRESS,
+ .tfmsize = offsetof(struct crypto_scomp, base),
+};
+
+int crypto_register_scomp(struct scomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int ret = -ENOMEM;
+
+ mutex_lock(&scomp_lock);
+ if (crypto_scomp_alloc_all_scratches())
+ goto error;
+
+ base->cra_type = &crypto_scomp_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
+
+ ret = crypto_register_alg(base);
+ if (ret)
+ goto error;
+
+ mutex_unlock(&scomp_lock);
+ return ret;
+
+error:
+ crypto_scomp_free_all_scratches();
+ mutex_unlock(&scomp_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_scomp);
+
+int crypto_unregister_scomp(struct scomp_alg *alg)
+{
+ int ret;
+
+ mutex_lock(&scomp_lock);
+ ret = crypto_unregister_alg(&alg->base);
+ crypto_scomp_free_all_scratches();
+ mutex_unlock(&scomp_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Synchronous compression type");
diff --git a/crypto/simd.c b/crypto/simd.c
new file mode 100644
index 000000000000..88203370a62f
--- /dev/null
+++ b/crypto/simd.c
@@ -0,0 +1,226 @@
+/*
+ * Shared crypto simd helpers
+ *
+ * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Based on aesni-intel_glue.c by:
+ * Copyright (C) 2008, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ */
+
+#include <crypto/cryptd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <asm/simd.h>
+
+struct simd_skcipher_alg {
+ const char *ialg_name;
+ struct skcipher_alg alg;
+};
+
+struct simd_skcipher_ctx {
+ struct cryptd_skcipher *cryptd_tfm;
+};
+
+static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child = &ctx->cryptd_tfm->base;
+ int err;
+
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, key_len);
+ crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int simd_skcipher_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_request *subreq;
+ struct crypto_skcipher *child;
+
+ subreq = skcipher_request_ctx(req);
+ *subreq = *req;
+
+ if (!may_use_simd() ||
+ (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
+ child = &ctx->cryptd_tfm->base;
+ else
+ child = cryptd_skcipher_child(ctx->cryptd_tfm);
+
+ skcipher_request_set_tfm(subreq, child);
+
+ return crypto_skcipher_encrypt(subreq);
+}
+
+static int simd_skcipher_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_request *subreq;
+ struct crypto_skcipher *child;
+
+ subreq = skcipher_request_ctx(req);
+ *subreq = *req;
+
+ if (!may_use_simd() ||
+ (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
+ child = &ctx->cryptd_tfm->base;
+ else
+ child = cryptd_skcipher_child(ctx->cryptd_tfm);
+
+ skcipher_request_set_tfm(subreq, child);
+
+ return crypto_skcipher_decrypt(subreq);
+}
+
+static void simd_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ cryptd_free_skcipher(ctx->cryptd_tfm);
+}
+
+static int simd_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cryptd_skcipher *cryptd_tfm;
+ struct simd_skcipher_alg *salg;
+ struct skcipher_alg *alg;
+ unsigned reqsize;
+
+ alg = crypto_skcipher_alg(tfm);
+ salg = container_of(alg, struct simd_skcipher_alg, alg);
+
+ cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name,
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+
+ ctx->cryptd_tfm = cryptd_tfm;
+
+ reqsize = sizeof(struct skcipher_request);
+ reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base);
+
+ crypto_skcipher_set_reqsize(tfm, reqsize);
+
+ return 0;
+}
+
+struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+ const char *drvname,
+ const char *basename)
+{
+ struct simd_skcipher_alg *salg;
+ struct crypto_skcipher *tfm;
+ struct skcipher_alg *ialg;
+ struct skcipher_alg *alg;
+ int err;
+
+ tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ ialg = crypto_skcipher_alg(tfm);
+
+ salg = kzalloc(sizeof(*salg), GFP_KERNEL);
+ if (!salg) {
+ salg = ERR_PTR(-ENOMEM);
+ goto out_put_tfm;
+ }
+
+ salg->ialg_name = basename;
+ alg = &salg->alg;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto out_free_salg;
+
+ if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ drvname) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_salg;
+
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_priority = ialg->base.cra_priority;
+ alg->base.cra_blocksize = ialg->base.cra_blocksize;
+ alg->base.cra_alignmask = ialg->base.cra_alignmask;
+ alg->base.cra_module = ialg->base.cra_module;
+ alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx);
+
+ alg->ivsize = ialg->ivsize;
+ alg->chunksize = ialg->chunksize;
+ alg->min_keysize = ialg->min_keysize;
+ alg->max_keysize = ialg->max_keysize;
+
+ alg->init = simd_skcipher_init;
+ alg->exit = simd_skcipher_exit;
+
+ alg->setkey = simd_skcipher_setkey;
+ alg->encrypt = simd_skcipher_encrypt;
+ alg->decrypt = simd_skcipher_decrypt;
+
+ err = crypto_register_skcipher(alg);
+ if (err)
+ goto out_free_salg;
+
+out_put_tfm:
+ crypto_free_skcipher(tfm);
+ return salg;
+
+out_free_salg:
+ kfree(salg);
+ salg = ERR_PTR(err);
+ goto out_put_tfm;
+}
+EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
+
+struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
+ const char *basename)
+{
+ char drvname[CRYPTO_MAX_ALG_NAME];
+
+ if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
+ CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ return simd_skcipher_create_compat(algname, drvname, basename);
+}
+EXPORT_SYMBOL_GPL(simd_skcipher_create);
+
+void simd_skcipher_free(struct simd_skcipher_alg *salg)
+{
+ crypto_unregister_skcipher(&salg->alg);
+ kfree(salg);
+}
+EXPORT_SYMBOL_GPL(simd_skcipher_free);
+
+MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f7d0018dcaee..0e1e6c35188e 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -14,9 +14,12 @@
*
*/
+#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
@@ -24,6 +27,545 @@
#include "internal.h"
+enum {
+ SKCIPHER_WALK_PHYS = 1 << 0,
+ SKCIPHER_WALK_SLOW = 1 << 1,
+ SKCIPHER_WALK_COPY = 1 << 2,
+ SKCIPHER_WALK_DIFF = 1 << 3,
+ SKCIPHER_WALK_SLEEP = 1 << 4,
+};
+
+struct skcipher_walk_buffer {
+ struct list_head entry;
+ struct scatter_walk dst;
+ unsigned int len;
+ u8 *data;
+ u8 buffer[];
+};
+
+static int skcipher_walk_next(struct skcipher_walk *walk);
+
+static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
+{
+ if (PageHighMem(scatterwalk_page(walk)))
+ kunmap_atomic(vaddr);
+}
+
+static inline void *skcipher_map(struct scatter_walk *walk)
+{
+ struct page *page = scatterwalk_page(walk);
+
+ return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
+ offset_in_page(walk->offset);
+}
+
+static inline void skcipher_map_src(struct skcipher_walk *walk)
+{
+ walk->src.virt.addr = skcipher_map(&walk->in);
+}
+
+static inline void skcipher_map_dst(struct skcipher_walk *walk)
+{
+ walk->dst.virt.addr = skcipher_map(&walk->out);
+}
+
+static inline void skcipher_unmap_src(struct skcipher_walk *walk)
+{
+ skcipher_unmap(&walk->in, walk->src.virt.addr);
+}
+
+static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
+{
+ skcipher_unmap(&walk->out, walk->dst.virt.addr);
+}
+
+static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
+{
+ return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
+/* Get a spot of the specified length that does not straddle a page.
+ * The caller needs to ensure that there is enough space for this operation.
+ */
+static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
+{
+ u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+
+ return max(start, end_page);
+}
+
+static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ u8 *addr;
+
+ addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+ addr = skcipher_get_spot(addr, bsize);
+ scatterwalk_copychunks(addr, &walk->out, bsize,
+ (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+ return 0;
+}
+
+int skcipher_walk_done(struct skcipher_walk *walk, int err)
+{
+ unsigned int n = walk->nbytes - err;
+ unsigned int nbytes;
+
+ nbytes = walk->total - n;
+
+ if (unlikely(err < 0)) {
+ nbytes = 0;
+ n = 0;
+ } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+ SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
+unmap_src:
+ skcipher_unmap_src(walk);
+ } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+ skcipher_unmap_dst(walk);
+ goto unmap_src;
+ } else if (walk->flags & SKCIPHER_WALK_COPY) {
+ skcipher_map_dst(walk);
+ memcpy(walk->dst.virt.addr, walk->page, n);
+ skcipher_unmap_dst(walk);
+ } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
+ if (WARN_ON(err)) {
+ err = -EINVAL;
+ nbytes = 0;
+ } else
+ n = skcipher_done_slow(walk, n);
+ }
+
+ if (err > 0)
+ err = 0;
+
+ walk->total = nbytes;
+ walk->nbytes = nbytes;
+
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_advance(&walk->out, n);
+ scatterwalk_done(&walk->in, 0, nbytes);
+ scatterwalk_done(&walk->out, 1, nbytes);
+
+ if (nbytes) {
+ crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
+ CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+ return skcipher_walk_next(walk);
+ }
+
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+ goto out;
+
+ if (walk->flags & SKCIPHER_WALK_PHYS)
+ goto out;
+
+ if (walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_done);
+
+void skcipher_walk_complete(struct skcipher_walk *walk, int err)
+{
+ struct skcipher_walk_buffer *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
+ u8 *data;
+
+ if (err)
+ goto done;
+
+ data = p->data;
+ if (!data) {
+ data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
+ data = skcipher_get_spot(data, walk->chunksize);
+ }
+
+ scatterwalk_copychunks(data, &p->dst, p->len, 1);
+
+ if (offset_in_page(p->data) + p->len + walk->chunksize >
+ PAGE_SIZE)
+ free_page((unsigned long)p->data);
+
+done:
+ list_del(&p->entry);
+ kfree(p);
+ }
+
+ if (!err && walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_complete);
+
+static void skcipher_queue_write(struct skcipher_walk *walk,
+ struct skcipher_walk_buffer *p)
+{
+ p->dst = walk->out;
+ list_add_tail(&p->entry, &walk->buffers);
+}
+
+static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ bool phys = walk->flags & SKCIPHER_WALK_PHYS;
+ unsigned alignmask = walk->alignmask;
+ struct skcipher_walk_buffer *p;
+ unsigned a;
+ unsigned n;
+ u8 *buffer;
+ void *v;
+
+ if (!phys) {
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (buffer)
+ goto ok;
+ }
+
+ /* Start with the minimum alignment of kmalloc. */
+ a = crypto_tfm_ctx_alignment() - 1;
+ n = bsize;
+
+ if (phys) {
+ /* Calculate the minimum alignment of p->buffer. */
+ a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
+ n += sizeof(*p);
+ }
+
+ /* Minimum size to align p->buffer by alignmask. */
+ n += alignmask & ~a;
+
+ /* Minimum size to ensure p->buffer does not straddle a page. */
+ n += (bsize - 1) & ~(alignmask | a);
+
+ v = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!v)
+ return skcipher_walk_done(walk, -ENOMEM);
+
+ if (phys) {
+ p = v;
+ p->len = bsize;
+ skcipher_queue_write(walk, p);
+ buffer = p->buffer;
+ } else {
+ walk->buffer = v;
+ buffer = v;
+ }
+
+ok:
+ walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
+ walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
+ walk->src.virt.addr = walk->dst.virt.addr;
+
+ scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
+
+ walk->nbytes = bsize;
+ walk->flags |= SKCIPHER_WALK_SLOW;
+
+ return 0;
+}
+
+static int skcipher_next_copy(struct skcipher_walk *walk)
+{
+ struct skcipher_walk_buffer *p;
+ u8 *tmp = walk->page;
+
+ skcipher_map_src(walk);
+ memcpy(tmp, walk->src.virt.addr, walk->nbytes);
+ skcipher_unmap_src(walk);
+
+ walk->src.virt.addr = tmp;
+ walk->dst.virt.addr = tmp;
+
+ if (!(walk->flags & SKCIPHER_WALK_PHYS))
+ return 0;
+
+ p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
+ if (!p)
+ return -ENOMEM;
+
+ p->data = walk->page;
+ p->len = walk->nbytes;
+ skcipher_queue_write(walk, p);
+
+ if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize >
+ PAGE_SIZE)
+ walk->page = NULL;
+ else
+ walk->page += walk->nbytes;
+
+ return 0;
+}
+
+static int skcipher_next_fast(struct skcipher_walk *walk)
+{
+ unsigned long diff;
+
+ walk->src.phys.page = scatterwalk_page(&walk->in);
+ walk->src.phys.offset = offset_in_page(walk->in.offset);
+ walk->dst.phys.page = scatterwalk_page(&walk->out);
+ walk->dst.phys.offset = offset_in_page(walk->out.offset);
+
+ if (walk->flags & SKCIPHER_WALK_PHYS)
+ return 0;
+
+ diff = walk->src.phys.offset - walk->dst.phys.offset;
+ diff |= walk->src.virt.page - walk->dst.virt.page;
+
+ skcipher_map_src(walk);
+ walk->dst.virt.addr = walk->src.virt.addr;
+
+ if (diff) {
+ walk->flags |= SKCIPHER_WALK_DIFF;
+ skcipher_map_dst(walk);
+ }
+
+ return 0;
+}
+
+static int skcipher_walk_next(struct skcipher_walk *walk)
+{
+ unsigned int bsize;
+ unsigned int n;
+ int err;
+
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
+
+ n = walk->total;
+ bsize = min(walk->chunksize, max(n, walk->blocksize));
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+ if (unlikely(walk->total < walk->blocksize))
+ return skcipher_walk_done(walk, -EINVAL);
+
+slow_path:
+ err = skcipher_next_slow(walk, bsize);
+ goto set_phys_lowmem;
+ }
+
+ if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
+ if (!walk->page) {
+ gfp_t gfp = skcipher_walk_gfp(walk);
+
+ walk->page = (void *)__get_free_page(gfp);
+ if (!walk->page)
+ goto slow_path;
+ }
+
+ walk->nbytes = min_t(unsigned, n,
+ PAGE_SIZE - offset_in_page(walk->page));
+ walk->flags |= SKCIPHER_WALK_COPY;
+ err = skcipher_next_copy(walk);
+ goto set_phys_lowmem;
+ }
+
+ walk->nbytes = n;
+
+ return skcipher_next_fast(walk);
+
+set_phys_lowmem:
+ if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
+ walk->src.phys.page = virt_to_page(walk->src.virt.addr);
+ walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
+ walk->src.phys.offset &= PAGE_SIZE - 1;
+ walk->dst.phys.offset &= PAGE_SIZE - 1;
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_next);
+
+static int skcipher_copy_iv(struct skcipher_walk *walk)
+{
+ unsigned a = crypto_tfm_ctx_alignment() - 1;
+ unsigned alignmask = walk->alignmask;
+ unsigned ivsize = walk->ivsize;
+ unsigned bs = walk->chunksize;
+ unsigned aligned_bs;
+ unsigned size;
+ u8 *iv;
+
+ aligned_bs = ALIGN(bs, alignmask);
+
+ /* Minimum size to align buffer by alignmask. */
+ size = alignmask & ~a;
+
+ if (walk->flags & SKCIPHER_WALK_PHYS)
+ size += ivsize;
+ else {
+ size += aligned_bs + ivsize;
+
+ /* Minimum size to ensure buffer does not straddle a page. */
+ size += (bs - 1) & ~(alignmask | a);
+ }
+
+ walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
+ if (!walk->buffer)
+ return -ENOMEM;
+
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1);
+ iv = skcipher_get_spot(iv, bs) + aligned_bs;
+
+ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+}
+
+static int skcipher_walk_first(struct skcipher_walk *walk)
+{
+ walk->nbytes = 0;
+
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
+ if (unlikely(!walk->total))
+ return 0;
+
+ walk->buffer = NULL;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = skcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+
+ walk->page = NULL;
+ walk->nbytes = walk->total;
+
+ return skcipher_walk_next(walk);
+}
+
+static int skcipher_walk_skcipher(struct skcipher_walk *walk,
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ scatterwalk_start(&walk->in, req->src);
+ scatterwalk_start(&walk->out, req->dst);
+
+ walk->total = req->cryptlen;
+ walk->iv = req->iv;
+ walk->oiv = req->iv;
+
+ walk->flags &= ~SKCIPHER_WALK_SLEEP;
+ walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ SKCIPHER_WALK_SLEEP : 0;
+
+ walk->blocksize = crypto_skcipher_blocksize(tfm);
+ walk->chunksize = crypto_skcipher_chunksize(tfm);
+ walk->ivsize = crypto_skcipher_ivsize(tfm);
+ walk->alignmask = crypto_skcipher_alignmask(tfm);
+
+ return skcipher_walk_first(walk);
+}
+
+int skcipher_walk_virt(struct skcipher_walk *walk,
+ struct skcipher_request *req, bool atomic)
+{
+ int err;
+
+ walk->flags &= ~SKCIPHER_WALK_PHYS;
+
+ err = skcipher_walk_skcipher(walk, req);
+
+ walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_virt);
+
+void skcipher_walk_atomise(struct skcipher_walk *walk)
+{
+ walk->flags &= ~SKCIPHER_WALK_SLEEP;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
+
+int skcipher_walk_async(struct skcipher_walk *walk,
+ struct skcipher_request *req)
+{
+ walk->flags |= SKCIPHER_WALK_PHYS;
+
+ INIT_LIST_HEAD(&walk->buffers);
+
+ return skcipher_walk_skcipher(walk, req);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_async);
+
+static int skcipher_walk_aead_common(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int err;
+
+ walk->flags &= ~SKCIPHER_WALK_PHYS;
+
+ scatterwalk_start(&walk->in, req->src);
+ scatterwalk_start(&walk->out, req->dst);
+
+ scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
+ scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
+
+ walk->iv = req->iv;
+ walk->oiv = req->iv;
+
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+ walk->flags |= SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags &= ~SKCIPHER_WALK_SLEEP;
+
+ walk->blocksize = crypto_aead_blocksize(tfm);
+ walk->chunksize = crypto_aead_chunksize(tfm);
+ walk->ivsize = crypto_aead_ivsize(tfm);
+ walk->alignmask = crypto_aead_alignmask(tfm);
+
+ err = skcipher_walk_first(walk);
+
+ if (atomic)
+ walk->flags &= ~SKCIPHER_WALK_SLEEP;
+
+ return err;
+}
+
+int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
+ bool atomic)
+{
+ walk->total = req->cryptlen;
+
+ return skcipher_walk_aead_common(walk, req, atomic);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_aead);
+
+int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic)
+{
+ walk->total = req->cryptlen;
+
+ return skcipher_walk_aead_common(walk, req, atomic);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
+
+int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ walk->total = req->cryptlen - crypto_aead_authsize(tfm);
+
+ return skcipher_walk_aead_common(walk, req, atomic);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
+
static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
{
if (alg->cra_type == &crypto_blkcipher_type)
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 62dffa0028ac..f616ad74cce7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -33,6 +33,7 @@
#include <crypto/drbg.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
+#include <crypto/acompress.h>
#include "internal.h"
@@ -62,7 +63,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
*/
#define IDX1 32
#define IDX2 32400
-#define IDX3 1
+#define IDX3 1511
#define IDX4 8193
#define IDX5 22222
#define IDX6 17101
@@ -1442,6 +1443,126 @@ out:
return ret;
}
+static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
+ struct comp_testvec *dtemplate, int ctcount, int dtcount)
+{
+ const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
+ unsigned int i;
+ char *output;
+ int ret;
+ struct scatterlist src, dst;
+ struct acomp_req *req;
+ struct tcrypt_result result;
+
+ output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
+
+ for (i = 0; i < ctcount; i++) {
+ unsigned int dlen = COMP_BUF_SIZE;
+ int ilen = ctemplate[i].inlen;
+
+ memset(output, 0, dlen);
+ init_completion(&result.completion);
+ sg_init_one(&src, ctemplate[i].input, ilen);
+ sg_init_one(&dst, output, dlen);
+
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ ret = wait_async_op(&result, crypto_acomp_compress(req));
+ if (ret) {
+ pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (req->dlen != ctemplate[i].outlen) {
+ pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(output, ctemplate[i].output, req->dlen)) {
+ pr_err("alg: acomp: Compression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ acomp_request_free(req);
+ goto out;
+ }
+
+ acomp_request_free(req);
+ }
+
+ for (i = 0; i < dtcount; i++) {
+ unsigned int dlen = COMP_BUF_SIZE;
+ int ilen = dtemplate[i].inlen;
+
+ memset(output, 0, dlen);
+ init_completion(&result.completion);
+ sg_init_one(&src, dtemplate[i].input, ilen);
+ sg_init_one(&dst, output, dlen);
+
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ if (ret) {
+ pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (req->dlen != dtemplate[i].outlen) {
+ pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(output, dtemplate[i].output, req->dlen)) {
+ pr_err("alg: acomp: Decompression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ acomp_request_free(req);
+ goto out;
+ }
+
+ acomp_request_free(req);
+ }
+
+ ret = 0;
+
+out:
+ kfree(output);
+ return ret;
+}
+
static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
unsigned int tcount)
{
@@ -1509,7 +1630,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
struct crypto_aead *tfm;
int err = 0;
- tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_aead(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
@@ -1538,7 +1659,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
struct crypto_cipher *tfm;
int err = 0;
- tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_cipher(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1567,7 +1688,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
struct crypto_skcipher *tfm;
int err = 0;
- tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_skcipher(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: skcipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1593,22 +1714,38 @@ out:
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
- struct crypto_comp *tfm;
+ struct crypto_comp *comp;
+ struct crypto_acomp *acomp;
int err;
+ u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
+
+ if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
+ acomp = crypto_alloc_acomp(driver, type, mask);
+ if (IS_ERR(acomp)) {
+ pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(acomp));
+ return PTR_ERR(acomp);
+ }
+ err = test_acomp(acomp, desc->suite.comp.comp.vecs,
+ desc->suite.comp.decomp.vecs,
+ desc->suite.comp.comp.count,
+ desc->suite.comp.decomp.count);
+ crypto_free_acomp(acomp);
+ } else {
+ comp = crypto_alloc_comp(driver, type, mask);
+ if (IS_ERR(comp)) {
+ pr_err("alg: comp: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(comp));
+ return PTR_ERR(comp);
+ }
- tfm = crypto_alloc_comp(driver, type, mask);
- if (IS_ERR(tfm)) {
- printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
- "%ld\n", driver, PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- err = test_comp(tfm, desc->suite.comp.comp.vecs,
- desc->suite.comp.decomp.vecs,
- desc->suite.comp.comp.count,
- desc->suite.comp.decomp.count);
+ err = test_comp(comp, desc->suite.comp.comp.vecs,
+ desc->suite.comp.decomp.vecs,
+ desc->suite.comp.comp.count,
+ desc->suite.comp.decomp.count);
- crypto_free_comp(tfm);
+ crypto_free_comp(comp);
+ }
return err;
}
@@ -1618,7 +1755,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
struct crypto_ahash *tfm;
int err;
- tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_ahash(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
@@ -1646,7 +1783,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
if (err)
goto out;
- tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
@@ -1688,7 +1825,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
struct crypto_rng *rng;
int err;
- rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ rng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(rng)) {
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
@@ -1715,7 +1852,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
if (!buf)
return -ENOMEM;
- drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ drng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(drng)) {
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
"%s\n", driver);
@@ -1909,7 +2046,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
struct crypto_kpp *tfm;
int err = 0;
- tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_kpp(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
@@ -2068,7 +2205,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
struct crypto_akcipher *tfm;
int err = 0;
- tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+ tfm = crypto_alloc_akcipher(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
@@ -2091,88 +2228,6 @@ static int alg_test_null(const struct alg_test_desc *desc,
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
- .alg = "__cbc-cast5-avx",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-cast6-avx",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-serpent-avx",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-serpent-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-serpent-sse2",
- .test = alg_test_null,
- }, {
- .alg = "__cbc-twofish-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-aes-aesni",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "__driver-cbc-camellia-aesni",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-camellia-aesni-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-cast5-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-cast6-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-serpent-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-serpent-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-serpent-sse2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-cbc-twofish-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-aes-aesni",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "__driver-ecb-camellia-aesni",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-camellia-aesni-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-cast5-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-cast6-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-serpent-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-serpent-avx2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-serpent-sse2",
- .test = alg_test_null,
- }, {
- .alg = "__driver-ecb-twofish-avx",
- .test = alg_test_null,
- }, {
- .alg = "__driver-gcm-aes-aesni",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "__ghash-pclmulqdqni",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
.suite = {
@@ -2659,55 +2714,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
- .alg = "cryptd(__driver-cbc-aes-aesni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "cryptd(__driver-cbc-camellia-aesni)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-cbc-serpent-avx2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-aes-aesni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "cryptd(__driver-ecb-camellia-aesni)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-cast5-avx)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-cast6-avx)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-serpent-avx)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-serpent-avx2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-serpent-sse2)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-ecb-twofish-avx)",
- .test = alg_test_null,
- }, {
- .alg = "cryptd(__driver-gcm-aes-aesni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
- .alg = "cryptd(__ghash-pclmulqdqni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -3034,10 +3040,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
.test = alg_test_null,
}, {
- .alg = "ecb(__aes-aesni)",
- .test = alg_test_null,
- .fips_allowed = 1,
- }, {
.alg = "ecb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index e64a4ef9d8ca..9b656be7f52f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1334,36 +1334,50 @@ static struct hash_testvec rmd320_tv_template[] = {
}
};
-#define CRCT10DIF_TEST_VECTORS 3
+#define CRCT10DIF_TEST_VECTORS ARRAY_SIZE(crct10dif_tv_template)
static struct hash_testvec crct10dif_tv_template[] = {
{
- .plaintext = "abc",
- .psize = 3,
-#ifdef __LITTLE_ENDIAN
- .digest = "\x3b\x44",
-#else
- .digest = "\x44\x3b",
-#endif
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 79,
-#ifdef __LITTLE_ENDIAN
- .digest = "\x70\x4b",
-#else
- .digest = "\x4b\x70",
-#endif
- }, {
- .plaintext =
- "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
- .psize = 56,
-#ifdef __LITTLE_ENDIAN
- .digest = "\xe3\x9c",
-#else
- .digest = "\x9c\xe3",
-#endif
- .np = 2,
- .tap = { 28, 28 }
+ .plaintext = "abc",
+ .psize = 3,
+ .digest = (u8 *)(u16 []){ 0x443b },
+ }, {
+ .plaintext = "1234567890123456789012345678901234567890"
+ "123456789012345678901234567890123456789",
+ .psize = 79,
+ .digest = (u8 *)(u16 []){ 0x4b70 },
+ .np = 2,
+ .tap = { 63, 16 },
+ }, {
+ .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
+ "ddddddddddddd",
+ .psize = 56,
+ .digest = (u8 *)(u16 []){ 0x9ce3 },
+ .np = 8,
+ .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
+ }, {
+ .plaintext = "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "123456789012345678901234567890123456789",
+ .psize = 319,
+ .digest = (u8 *)(u16 []){ 0x44c6 },
+ }, {
+ .plaintext = "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "1234567890123456789012345678901234567890"
+ "123456789012345678901234567890123456789",
+ .psize = 319,
+ .digest = (u8 *)(u16 []){ 0x44c6 },
+ .np = 4,
+ .tap = { 1, 255, 57, 6 },
}
};
diff --git a/crypto/xts.c b/crypto/xts.c
index 305343f22a02..410a2e299085 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -13,7 +13,8 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -25,140 +26,320 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
+#define XTS_BUFFER_SIZE 128u
+
struct priv {
- struct crypto_cipher *child;
+ struct crypto_skcipher *child;
struct crypto_cipher *tweak;
};
-static int setkey(struct crypto_tfm *parent, const u8 *key,
+struct xts_instance_ctx {
+ struct crypto_skcipher_spawn spawn;
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct rctx {
+ be128 buf[XTS_BUFFER_SIZE / sizeof(be128)];
+
+ be128 t;
+
+ be128 *ext;
+
+ struct scatterlist srcbuf[2];
+ struct scatterlist dstbuf[2];
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ unsigned int left;
+
+ struct skcipher_request subreq;
+};
+
+static int setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
- struct priv *ctx = crypto_tfm_ctx(parent);
- struct crypto_cipher *child = ctx->tweak;
+ struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child;
+ struct crypto_cipher *tweak;
int err;
- err = xts_check_key(parent, key, keylen);
+ err = xts_verify_key(parent, key, keylen);
if (err)
return err;
+ keylen /= 2;
+
/* we need two cipher instances: one to compute the initial 'tweak'
* by encrypting the IV (usually the 'plain' iv) and the other
* one to encrypt and decrypt the data */
/* tweak cipher, uses Key2 i.e. the second half of *key */
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+ tweak = ctx->tweak;
+ crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key + keylen/2, keylen/2);
+ err = crypto_cipher_setkey(tweak, key + keylen, keylen);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
+ CRYPTO_TFM_RES_MASK);
if (err)
return err;
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
-
+ /* data cipher, uses Key1 i.e. the first half of *key */
child = ctx->child;
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
- /* data cipher, uses Key1 i.e. the first half of *key */
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen/2);
- if (err)
- return err;
+ return err;
+}
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+static int post_crypt(struct skcipher_request *req)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ be128 *buf = rctx->ext ?: rctx->buf;
+ struct skcipher_request *subreq;
+ const int bs = XTS_BLOCK_SIZE;
+ struct skcipher_walk w;
+ struct scatterlist *sg;
+ unsigned offset;
+ int err;
- return 0;
-}
+ subreq = &rctx->subreq;
+ err = skcipher_walk_virt(&w, subreq, false);
-struct sinfo {
- be128 *t;
- struct crypto_tfm *tfm;
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-};
+ while (w.nbytes) {
+ unsigned int avail = w.nbytes;
+ be128 *wdst;
-static inline void xts_round(struct sinfo *s, void *dst, const void *src)
-{
- be128_xor(dst, s->t, src); /* PP <- T xor P */
- s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */
- be128_xor(dst, dst, s->t); /* C <- T xor CC */
+ wdst = w.dst.virt.addr;
+
+ do {
+ be128_xor(wdst, buf++, wdst);
+ wdst++;
+ } while ((avail -= bs) >= bs);
+
+ err = skcipher_walk_done(&w, avail);
+ }
+
+ rctx->left -= subreq->cryptlen;
+
+ if (err || !rctx->left)
+ goto out;
+
+ rctx->dst = rctx->dstbuf;
+
+ scatterwalk_done(&w.out, 0, 1);
+ sg = w.out.sg;
+ offset = w.out.offset;
+
+ if (rctx->dst != sg) {
+ rctx->dst[0] = *sg;
+ sg_unmark_end(rctx->dst);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ }
+ rctx->dst[0].length -= offset - sg->offset;
+ rctx->dst[0].offset = offset;
+
+out:
+ return err;
}
-static int crypt(struct blkcipher_desc *d,
- struct blkcipher_walk *w, struct priv *ctx,
- void (*tw)(struct crypto_tfm *, u8 *, const u8 *),
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
+static int pre_crypt(struct skcipher_request *req)
{
- int err;
- unsigned int avail;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ be128 *buf = rctx->ext ?: rctx->buf;
+ struct skcipher_request *subreq;
const int bs = XTS_BLOCK_SIZE;
- struct sinfo s = {
- .tfm = crypto_cipher_tfm(ctx->child),
- .fn = fn
- };
- u8 *wsrc;
- u8 *wdst;
-
- err = blkcipher_walk_virt(d, w);
- if (!w->nbytes)
- return err;
+ struct skcipher_walk w;
+ struct scatterlist *sg;
+ unsigned cryptlen;
+ unsigned offset;
+ bool more;
+ int err;
- s.t = (be128 *)w->iv;
- avail = w->nbytes;
+ subreq = &rctx->subreq;
+ cryptlen = subreq->cryptlen;
- wsrc = w->src.virt.addr;
- wdst = w->dst.virt.addr;
+ more = rctx->left > cryptlen;
+ if (!more)
+ cryptlen = rctx->left;
- /* calculate first value of T */
- tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv);
+ skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
+ cryptlen, NULL);
- goto first;
+ err = skcipher_walk_virt(&w, subreq, false);
- for (;;) {
- do {
- gf128mul_x_ble(s.t, s.t);
+ while (w.nbytes) {
+ unsigned int avail = w.nbytes;
+ be128 *wsrc;
+ be128 *wdst;
-first:
- xts_round(&s, wdst, wsrc);
+ wsrc = w.src.virt.addr;
+ wdst = w.dst.virt.addr;
- wsrc += bs;
- wdst += bs;
+ do {
+ *buf++ = rctx->t;
+ be128_xor(wdst++, &rctx->t, wsrc++);
+ gf128mul_x_ble(&rctx->t, &rctx->t);
} while ((avail -= bs) >= bs);
- err = blkcipher_walk_done(d, w, avail);
- if (!w->nbytes)
- break;
+ err = skcipher_walk_done(&w, avail);
+ }
+
+ skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
+ cryptlen, NULL);
- avail = w->nbytes;
+ if (err || !more)
+ goto out;
- wsrc = w->src.virt.addr;
- wdst = w->dst.virt.addr;
+ rctx->src = rctx->srcbuf;
+
+ scatterwalk_done(&w.in, 0, 1);
+ sg = w.in.sg;
+ offset = w.in.offset;
+
+ if (rctx->src != sg) {
+ rctx->src[0] = *sg;
+ sg_unmark_end(rctx->src);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
}
+ rctx->src[0].length -= offset - sg->offset;
+ rctx->src[0].offset = offset;
+out:
return err;
}
-static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
{
- struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk w;
+ struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+ gfp_t gfp;
+
+ subreq = &rctx->subreq;
+ skcipher_request_set_tfm(subreq, ctx->child);
+ skcipher_request_set_callback(subreq, req->base.flags, done, req);
+
+ gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ rctx->ext = NULL;
+
+ subreq->cryptlen = XTS_BUFFER_SIZE;
+ if (req->cryptlen > XTS_BUFFER_SIZE) {
+ subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
+ rctx->ext = kmalloc(subreq->cryptlen, gfp);
+ }
+
+ rctx->src = req->src;
+ rctx->dst = req->dst;
+ rctx->left = req->cryptlen;
- blkcipher_walk_init(&w, dst, src, nbytes);
- return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
- crypto_cipher_alg(ctx->child)->cia_encrypt);
+ /* calculate first value of T */
+ crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
+
+ return 0;
}
-static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static void exit_crypt(struct skcipher_request *req)
{
- struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk w;
+ struct rctx *rctx = skcipher_request_ctx(req);
+
+ rctx->left = 0;
- blkcipher_walk_init(&w, dst, src, nbytes);
- return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
- crypto_cipher_alg(ctx->child)->cia_decrypt);
+ if (rctx->ext)
+ kzfree(rctx->ext);
+}
+
+static int do_encrypt(struct skcipher_request *req, int err)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+
+ subreq = &rctx->subreq;
+
+ while (!err && rctx->left) {
+ err = pre_crypt(req) ?:
+ crypto_skcipher_encrypt(subreq) ?:
+ post_crypt(req);
+
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return err;
+ }
+
+ exit_crypt(req);
+ return err;
+}
+
+static void encrypt_done(struct crypto_async_request *areq, int err)
+{
+ struct skcipher_request *req = areq->data;
+ struct skcipher_request *subreq;
+ struct rctx *rctx;
+
+ rctx = skcipher_request_ctx(req);
+ subreq = &rctx->subreq;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+ err = do_encrypt(req, err ?: post_crypt(req));
+ if (rctx->left)
+ return;
+
+ skcipher_request_complete(req, err);
+}
+
+static int encrypt(struct skcipher_request *req)
+{
+ return do_encrypt(req, init_crypt(req, encrypt_done));
+}
+
+static int do_decrypt(struct skcipher_request *req, int err)
+{
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq;
+
+ subreq = &rctx->subreq;
+
+ while (!err && rctx->left) {
+ err = pre_crypt(req) ?:
+ crypto_skcipher_decrypt(subreq) ?:
+ post_crypt(req);
+
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return err;
+ }
+
+ exit_crypt(req);
+ return err;
+}
+
+static void decrypt_done(struct crypto_async_request *areq, int err)
+{
+ struct skcipher_request *req = areq->data;
+ struct skcipher_request *subreq;
+ struct rctx *rctx;
+
+ rctx = skcipher_request_ctx(req);
+ subreq = &rctx->subreq;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+ err = do_decrypt(req, err ?: post_crypt(req));
+ if (rctx->left)
+ return;
+
+ skcipher_request_complete(req, err);
+}
+
+static int decrypt(struct skcipher_request *req)
+{
+ return do_decrypt(req, init_crypt(req, decrypt_done));
}
int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
@@ -233,112 +414,168 @@ first:
}
EXPORT_SYMBOL_GPL(xts_crypt);
-static int init_tfm(struct crypto_tfm *tfm)
+static int init_tfm(struct crypto_skcipher *tfm)
{
- struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct priv *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
- *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- crypto_free_cipher(cipher);
- return -EINVAL;
- }
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *child;
+ struct crypto_cipher *tweak;
- ctx->child = cipher;
+ child = crypto_spawn_skcipher(&ictx->spawn);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher)) {
- crypto_free_cipher(ctx->child);
- return PTR_ERR(cipher);
- }
+ ctx->child = child;
- /* this check isn't really needed, leave it here just in case */
- if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
- crypto_free_cipher(cipher);
- crypto_free_cipher(ctx->child);
- *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- return -EINVAL;
+ tweak = crypto_alloc_cipher(ictx->name, 0, 0);
+ if (IS_ERR(tweak)) {
+ crypto_free_skcipher(ctx->child);
+ return PTR_ERR(tweak);
}
- ctx->tweak = cipher;
+ ctx->tweak = tweak;
+
+ crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
+ sizeof(struct rctx));
return 0;
}
-static void exit_tfm(struct crypto_tfm *tfm)
+static void exit_tfm(struct crypto_skcipher *tfm)
{
- struct priv *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->child);
+ struct priv *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->child);
crypto_free_cipher(ctx->tweak);
}
-static struct crypto_instance *alloc(struct rtattr **tb)
+static void free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
- struct crypto_alg *alg;
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct xts_instance_ctx *ctx;
+ struct skcipher_alg *alg;
+ const char *cipher_name;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+ return -EINVAL;
+
+ cipher_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(cipher_name))
+ return PTR_ERR(cipher_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ ctx = skcipher_instance_ctx(inst);
+
+ crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
+ err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ if (err == -ENOENT) {
+ err = -ENAMETOOLONG;
+ if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ cipher_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ }
+
if (err)
- return ERR_PTR(err);
+ goto err_free_inst;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
+ alg = crypto_skcipher_spawn_alg(&ctx->spawn);
- inst = crypto_alloc_instance("xts", alg);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = -EINVAL;
+ if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
+ goto err_drop_spawn;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
+ if (crypto_skcipher_alg_ivsize(alg))
+ goto err_drop_spawn;
- if (alg->cra_alignmask < 7)
- inst->alg.cra_alignmask = 7;
- else
- inst->alg.cra_alignmask = alg->cra_alignmask;
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
+ &alg->base);
+ if (err)
+ goto err_drop_spawn;
- inst->alg.cra_type = &crypto_blkcipher_type;
+ err = -EINVAL;
+ cipher_name = alg->base.cra_name;
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize =
- 2 * alg->cra_cipher.cia_min_keysize;
- inst->alg.cra_blkcipher.max_keysize =
- 2 * alg->cra_cipher.cia_max_keysize;
+ /* Alas we screwed up the naming so we have to mangle the
+ * cipher name.
+ */
+ if (!strncmp(cipher_name, "ecb(", 4)) {
+ unsigned len;
- inst->alg.cra_ctxsize = sizeof(struct priv);
+ len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
+ if (len < 2 || len >= sizeof(ctx->name))
+ goto err_drop_spawn;
- inst->alg.cra_init = init_tfm;
- inst->alg.cra_exit = exit_tfm;
+ if (ctx->name[len - 1] != ')')
+ goto err_drop_spawn;
- inst->alg.cra_blkcipher.setkey = setkey;
- inst->alg.cra_blkcipher.encrypt = encrypt;
- inst->alg.cra_blkcipher.decrypt = decrypt;
+ ctx->name[len - 1] = 0;
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
+ return -ENAMETOOLONG;
+ } else
+ goto err_drop_spawn;
-static void free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
+ inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
+ (__alignof__(u64) - 1);
+
+ inst->alg.ivsize = XTS_BLOCK_SIZE;
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct priv);
+
+ inst->alg.init = init_tfm;
+ inst->alg.exit = exit_tfm;
+
+ inst->alg.setkey = setkey;
+ inst->alg.encrypt = encrypt;
+ inst->alg.decrypt = decrypt;
+
+ inst->free = free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_skcipher(&ctx->spawn);
+err_free_inst:
kfree(inst);
+ goto out;
}
static struct crypto_template crypto_tmpl = {
.name = "xts",
- .alloc = alloc,
- .free = free,
+ .create = create,
.module = THIS_MODULE,
};
diff --git a/drivers/Makefile b/drivers/Makefile
index 194d20bee7dc..060026a02f59 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -107,7 +107,7 @@ obj-$(CONFIG_INPUT) += input/
obj-$(CONFIG_RTC_LIB) += rtc/
obj-y += i2c/ media/
obj-$(CONFIG_PPS) += pps/
-obj-$(CONFIG_PTP_1588_CLOCK) += ptp/
+obj-y += ptp/
obj-$(CONFIG_W1) += w1/
obj-y += power/
obj-$(CONFIG_HWMON) += hwmon/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 535e7828445a..83e5f7e1a20d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -104,7 +104,7 @@ config ACPI_PROCFS_POWER
Say N to delete power /proc/acpi/ directories that have moved to /sys/
config ACPI_REV_OVERRIDE_POSSIBLE
- bool "Allow supported ACPI revision to be overriden"
+ bool "Allow supported ACPI revision to be overridden"
depends on X86
default y
help
@@ -342,7 +342,7 @@ config ACPI_DEBUG
Use the acpi.debug_layer and acpi.debug_level kernel command-line
parameters documented in Documentation/acpi/debug.txt and
- Documentation/kernel-parameters.txt to control the type and
+ Documentation/admin-guide/kernel-parameters.rst to control the type and
amount of debug output.
config ACPI_PCI_SLOT
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d58fbf7f04e6..26696b693e63 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -77,6 +77,11 @@ static const struct apd_device_desc cz_i2c_desc = {
.fixed_clk_rate = 133000000,
};
+static const struct apd_device_desc wt_i2c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 150000000,
+};
+
static struct property_entry uart_properties[] = {
PROPERTY_ENTRY_U32("reg-io-width", 4),
PROPERTY_ENTRY_U32("reg-shift", 2),
@@ -122,7 +127,7 @@ static int acpi_apd_create_device(struct acpi_device *adev,
int ret;
if (!dev_desc) {
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
@@ -139,14 +144,8 @@ static int acpi_apd_create_device(struct acpi_device *adev,
goto err_out;
}
- if (dev_desc->properties) {
- ret = device_add_properties(&adev->dev, dev_desc->properties);
- if (ret)
- goto err_out;
- }
-
adev->driver_data = pdata;
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev))
return 1;
@@ -162,7 +161,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
/* Generic apd devices */
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
{ "AMD0010", APD_ADDR(cz_i2c_desc) },
- { "AMDI0010", APD_ADDR(cz_i2c_desc) },
+ { "AMDI0010", APD_ADDR(wt_i2c_desc) },
{ "AMD0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0020", APD_ADDR(cz_uart_desc) },
{ "AMD0030", },
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 552010288135..8ea836c046f8 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -395,7 +395,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
dev_desc = (const struct lpss_device_desc *)id->driver_data;
if (!dev_desc) {
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -451,14 +451,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
goto err_out;
}
- if (dev_desc->properties) {
- ret = device_add_properties(&adev->dev, dev_desc->properties);
- if (ret)
- goto err_out;
- }
-
adev->driver_data = pdata;
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) {
return 1;
}
@@ -724,13 +718,14 @@ static int acpi_lpss_resume_early(struct device *dev)
#define LPSS_GPIODEF0_DMA1_D3 BIT(2)
#define LPSS_GPIODEF0_DMA2_D3 BIT(3)
#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
+#define LPSS_GPIODEF0_DMA_LLP BIT(13)
static DEFINE_MUTEX(lpss_iosf_mutex);
static void lpss_iosf_enter_d3_state(void)
{
u32 value1 = 0;
- u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
+ u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
u32 value2 = LPSS_PMCSR_D3hot;
u32 mask2 = LPSS_PMCSR_Dx_MASK;
/*
@@ -774,8 +769,9 @@ exit:
static void lpss_iosf_exit_d3_state(void)
{
- u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3;
- u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
+ u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
+ LPSS_GPIODEF0_DMA_LLP;
+ u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
u32 value2 = LPSS_PMCSR_D0;
u32 mask2 = LPSS_PMCSR_Dx_MASK;
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b200ae1f3c6f..b4c1a6a51da4 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -50,6 +50,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
/**
* acpi_create_platform_device - Create platform device for ACPI device node
* @adev: ACPI device node to create a platform device for.
+ * @properties: Optional collection of build-in properties.
*
* Check if the given @adev can be represented as a platform device and, if
* that's the case, create and register a platform device, populate its common
@@ -57,7 +58,8 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
*
* Name of the platform device will be the same as @adev's.
*/
-struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
+ struct property_entry *properties)
{
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
@@ -106,6 +108,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
pdevinfo.res = resources;
pdevinfo.num_res = count;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
+ pdevinfo.properties = properties;
if (acpi_dma_supported(adev))
pdevinfo.dma_mask = DMA_BIT_MASK(32);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index c5557d070954..201292e67ee8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -43,17 +43,6 @@
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
-#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
-#define ACPI_VIDEO_NOTIFY_PROBE 0x81
-#define ACPI_VIDEO_NOTIFY_CYCLE 0x82
-#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT 0x83
-#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT 0x84
-
-#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS 0x85
-#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86
-#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
-#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88
-#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89
#define MAX_NAME_LEN 20
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 92fa47c6498c..8a0049d5cdf3 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -243,9 +243,7 @@ acpi_ev_default_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context);
-acpi_status
-acpi_ev_initialize_region(union acpi_operand_object *region_obj,
- u8 acpi_ns_locked);
+acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj);
/*
* evsci - SCI (System Control Interrupt) handling/dispatch
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 750fa824d42c..edbb42e251a6 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -240,10 +240,6 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
-/* Maximum number of While() loop iterations before forced abort */
-
-ACPI_GLOBAL(u16, acpi_gbl_max_loop_iterations);
-
/* Control method single step flag */
ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
@@ -318,6 +314,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_emit_external_opcodes, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_do_disassembler_optimizations, TRUE);
ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm);
ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index dff1207a6078..792660054992 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -765,7 +765,7 @@ union acpi_parse_value {
union acpi_parse_value value; /* Value or args associated with the opcode */\
u8 arg_list_length; /* Number of elements in the arg list */\
ACPI_DISASM_ONLY_MEMBERS (\
- u8 disasm_flags; /* Used during AML disassembly */\
+ u16 disasm_flags; /* Used during AML disassembly */\
u8 disasm_opcode; /* Subtype used for disassembly */\
char *operator_symbol;/* Used for C-style operator name strings */\
char aml_op_name[16]) /* Op name (debug only) */
@@ -868,14 +868,15 @@ struct acpi_parse_state {
/* Parse object disasm_flags */
-#define ACPI_PARSEOP_IGNORE 0x01
-#define ACPI_PARSEOP_PARAMETER_LIST 0x02
-#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
-#define ACPI_PARSEOP_PREDEFINED_CHECKED 0x08
-#define ACPI_PARSEOP_CLOSING_PAREN 0x10
-#define ACPI_PARSEOP_COMPOUND_ASSIGNMENT 0x20
-#define ACPI_PARSEOP_ASSIGNMENT 0x40
-#define ACPI_PARSEOP_ELSEIF 0x80
+#define ACPI_PARSEOP_IGNORE 0x0001
+#define ACPI_PARSEOP_PARAMETER_LIST 0x0002
+#define ACPI_PARSEOP_EMPTY_TERMLIST 0x0004
+#define ACPI_PARSEOP_PREDEFINED_CHECKED 0x0008
+#define ACPI_PARSEOP_CLOSING_PAREN 0x0010
+#define ACPI_PARSEOP_COMPOUND_ASSIGNMENT 0x0020
+#define ACPI_PARSEOP_ASSIGNMENT 0x0040
+#define ACPI_PARSEOP_ELSEIF 0x0080
+#define ACPI_PARSEOP_LEGACY_ASL_ONLY 0x0100
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index bb7fca1c8ba3..7affdcdfcc81 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -292,6 +292,9 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
acpi_status
+acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer);
+
+acpi_status
acpi_ns_handle_to_pathname(acpi_handle target_handle,
struct acpi_buffer *buffer, u8 no_trailing);
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index e85953b6fa0e..7dd527f8ca1d 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -127,10 +127,11 @@ acpi_status
acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node);
acpi_status
-acpi_tb_install_and_load_table(struct acpi_table_header *table,
- acpi_physical_address address,
+acpi_tb_install_and_load_table(acpi_physical_address address,
u8 flags, u8 override, u32 *table_index);
+acpi_status acpi_tb_unload_table(u32 table_index);
+
void acpi_tb_terminate(void);
acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 0a1b53c9ee0e..845afb180a7e 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -232,6 +232,8 @@ const char *acpi_ut_get_region_name(u8 space_id);
const char *acpi_ut_get_event_name(u32 event_id);
+const char *acpi_ut_get_argument_type_name(u32 arg_type);
+
char acpi_ut_hex_to_ascii_char(u64 integer, u32 position);
acpi_status acpi_ut_ascii_to_hex_byte(char *two_ascii_chars, u8 *return_byte);
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index ceb4f7365f7f..6bd8d4bcff65 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -240,6 +240,7 @@
#define ARGP_QWORDDATA 0x11
#define ARGP_SIMPLENAME 0x12 /* name_string | local_term | arg_term */
#define ARGP_NAME_OR_REF 0x13 /* For object_type only */
+#define ARGP_MAX 0x13
/*
* Resolved argument types for the AML Interpreter
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 54d48b90de2c..5de3f10cab03 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -221,8 +221,8 @@ acpi_ds_initialize_objects(u32 table_index,
*/
status =
acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
- 0, acpi_ds_init_one_object, NULL, &info,
- NULL);
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ds_init_one_object, NULL, &info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 4cc9d989a114..77fd7c84ec39 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -84,7 +84,7 @@ acpi_status acpi_ds_initialize_region(acpi_handle obj_handle)
/* Namespace is NOT locked */
- status = acpi_ev_initialize_region(obj_desc, FALSE);
+ status = acpi_ev_initialize_region(obj_desc);
return (status);
}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index e36218206bb0..651f35a66cc2 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -609,18 +609,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ev_initialize_region
- (acpi_ns_get_attached_object(node), FALSE);
-
- if (ACPI_FAILURE(status)) {
- /*
- * If AE_NOT_EXIST is returned, it is not fatal
- * because many regions get created before a handler
- * is installed for said region.
- */
- if (AE_NOT_EXIST == status) {
- status = AE_OK;
- }
- }
+ (acpi_ns_get_attached_object(node));
break;
case AML_NAME_OP:
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 75ddd160a716..a9092251ce80 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -479,7 +479,6 @@ acpi_ev_default_region_setup(acpi_handle handle,
* FUNCTION: acpi_ev_initialize_region
*
* PARAMETERS: region_obj - Region we are initializing
- * acpi_ns_locked - Is namespace locked?
*
* RETURN: Status
*
@@ -497,19 +496,28 @@ acpi_ev_default_region_setup(acpi_handle handle,
* MUTEX: Interpreter should be unlocked, because we may run the _REG
* method for this region.
*
+ * NOTE: Possible incompliance:
+ * There is a behavior conflict in automatic _REG execution:
+ * 1. When the interpreter is evaluating a method, we can only
+ * automatically run _REG for the following case:
+ * operation_region (OPR1, 0x80, 0x1000010, 0x4)
+ * 2. When the interpreter is loading a table, we can also
+ * automatically run _REG for the following case:
+ * operation_region (OPR1, 0x80, 0x1000010, 0x4)
+ * Though this may not be compliant to the de-facto standard, the
+ * logic is kept in order not to trigger regressions. And keeping
+ * this logic should be taken care by the caller of this function.
+ *
******************************************************************************/
-acpi_status
-acpi_ev_initialize_region(union acpi_operand_object *region_obj,
- u8 acpi_ns_locked)
+acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj)
{
union acpi_operand_object *handler_obj;
union acpi_operand_object *obj_desc;
acpi_adr_space_type space_id;
struct acpi_namespace_node *node;
- acpi_status status;
- ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
+ ACPI_FUNCTION_TRACE(ev_initialize_region);
if (!region_obj) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -580,39 +588,17 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
handler_obj, region_obj,
obj_desc));
- status =
- acpi_ev_attach_region(handler_obj,
- region_obj,
- acpi_ns_locked);
+ (void)acpi_ev_attach_region(handler_obj,
+ region_obj, FALSE);
/*
* Tell all users that this region is usable by
* running the _REG method
*/
- if (acpi_ns_locked) {
- status =
- acpi_ut_release_mutex
- (ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
acpi_ex_exit_interpreter();
- status =
- acpi_ev_execute_reg_method(region_obj,
- ACPI_REG_CONNECT);
+ (void)acpi_ev_execute_reg_method(region_obj,
+ ACPI_REG_CONNECT);
acpi_ex_enter_interpreter();
-
- if (acpi_ns_locked) {
- status =
- acpi_ut_acquire_mutex
- (ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
return_ACPI_STATUS(AE_OK);
}
}
@@ -622,12 +608,15 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
node = node->parent;
}
- /* If we get here, there is no handler for this region */
-
+ /*
+ * If we get here, there is no handler for this region. This is not
+ * fatal because many regions get created before a handler is installed
+ * for said region.
+ */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"No handler for RegionType %s(%X) (RegionObj %p)\n",
acpi_ut_get_region_name(space_id), space_id,
region_obj));
- return_ACPI_STATUS(AE_NOT_EXIST);
+ return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 718428ba0b89..c32c7829878a 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -437,10 +437,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
ACPI_INFO(("Dynamic OEM Table Load:"));
acpi_ex_exit_interpreter();
- status =
- acpi_tb_install_and_load_table(table, ACPI_PTR_TO_PHYSADDR(table),
- ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
- TRUE, &table_index);
+ status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
+ ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
+ TRUE, &table_index);
acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
@@ -500,7 +499,6 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
acpi_status status = AE_OK;
union acpi_operand_object *table_desc = ddb_handle;
u32 table_index;
- struct acpi_table_header *table;
ACPI_FUNCTION_TRACE(ex_unload_table);
@@ -537,39 +535,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
* strict order requirement against it.
*/
acpi_ex_exit_interpreter();
-
- /* Ensure the table is still loaded */
-
- if (!acpi_tb_is_table_loaded(table_index)) {
- status = AE_NOT_EXIST;
- goto lock_and_exit;
- }
-
- /* Invoke table handler if present */
-
- if (acpi_gbl_table_handler) {
- status = acpi_get_table_by_index(table_index, &table);
- if (ACPI_SUCCESS(status)) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
- table,
- acpi_gbl_table_handler_context);
- }
- }
-
- /* Delete the portion of the namespace owned by this table */
-
- status = acpi_tb_delete_namespace_by_owner(table_index);
- if (ACPI_FAILURE(status)) {
- goto lock_and_exit;
- }
-
- (void)acpi_tb_release_owner_id(table_index);
- acpi_tb_set_table_loaded_flag(table_index, FALSE);
-
-lock_and_exit:
-
- /* Re-acquire the interpreter lock */
-
+ status = acpi_tb_unload_table(table_index);
acpi_ex_enter_interpreter();
/*
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index f03dd41e86d0..94d5d3339845 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -97,6 +97,51 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
/*******************************************************************************
*
+ * FUNCTION: acpi_ns_handle_to_name
+ *
+ * PARAMETERS: target_handle - Handle of named object whose name is
+ * to be found
+ * buffer - Where the name is returned
+ *
+ * RETURN: Status, Buffer is filled with name if status is AE_OK
+ *
+ * DESCRIPTION: Build and return a full namespace name
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+ const char *node_name;
+
+ ACPI_FUNCTION_TRACE_PTR(ns_handle_to_name, target_handle);
+
+ node = acpi_ns_validate_handle(target_handle);
+ if (!node) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Just copy the ACPI name from the Node and zero terminate it */
+
+ node_name = acpi_ut_get_node_name(node);
+ ACPI_MOVE_NAME(buffer->pointer, node_name);
+ ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%4.4s\n", (char *)buffer->pointer));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_handle_to_pathname
*
* PARAMETERS: target_handle - Handle of named object whose name is
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 76a1bd4bb070..e525cbe7d83b 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -158,8 +158,6 @@ acpi_status
acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
{
acpi_status status;
- struct acpi_namespace_node *node;
- const char *node_name;
/* Parameter validation */
@@ -172,18 +170,6 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
return (status);
}
- if (name_type == ACPI_FULL_PATHNAME ||
- name_type == ACPI_FULL_PATHNAME_NO_TRAILING) {
-
- /* Get the full pathname (From the namespace root) */
-
- status = acpi_ns_handle_to_pathname(handle, buffer,
- name_type ==
- ACPI_FULL_PATHNAME ? FALSE :
- TRUE);
- return (status);
- }
-
/*
* Wants the single segment ACPI name.
* Validate handle and convert to a namespace Node
@@ -193,27 +179,20 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
return (status);
}
- node = acpi_ns_validate_handle(handle);
- if (!node) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- /* Validate/Allocate/Clear caller buffer */
-
- status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
+ if (name_type == ACPI_FULL_PATHNAME ||
+ name_type == ACPI_FULL_PATHNAME_NO_TRAILING) {
- /* Just copy the ACPI name from the Node and zero terminate it */
+ /* Get the full pathname (From the namespace root) */
- node_name = acpi_ut_get_node_name(node);
- ACPI_MOVE_NAME(buffer->pointer, node_name);
- ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
- status = AE_OK;
+ status = acpi_ns_handle_to_pathname(handle, buffer,
+ name_type ==
+ ACPI_FULL_PATHNAME ? FALSE :
+ TRUE);
+ } else {
+ /* Get the single name */
-unlock_and_exit:
+ status = acpi_ns_handle_to_name(handle, buffer);
+ }
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index d9ca8c2aa2d3..82b0b5710979 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -832,9 +832,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
*
* FUNCTION: acpi_tb_install_and_load_table
*
- * PARAMETERS: table - Pointer to the table
- * address - Physical address of the table
+ * PARAMETERS: address - Physical address of the table
* flags - Allocation flags of the table
+ * override - Whether override should be performed
* table_index - Where table index is returned
*
* RETURN: Status
@@ -844,15 +844,13 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
******************************************************************************/
acpi_status
-acpi_tb_install_and_load_table(struct acpi_table_header *table,
- acpi_physical_address address,
+acpi_tb_install_and_load_table(acpi_physical_address address,
u8 flags, u8 override, u32 *table_index)
{
acpi_status status;
u32 i;
- acpi_owner_id owner_id;
- ACPI_FUNCTION_TRACE(acpi_load_table);
+ ACPI_FUNCTION_TRACE(tb_install_and_load_table);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
@@ -864,45 +862,60 @@ acpi_tb_install_and_load_table(struct acpi_table_header *table,
goto unlock_and_exit;
}
- /*
- * Note: Now table is "INSTALLED", it must be validated before
- * using.
- */
- status = acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ status = acpi_tb_load_table(i, acpi_gbl_root_node);
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+unlock_and_exit:
+ *table_index = i;
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- status = acpi_ns_load_table(i, acpi_gbl_root_node);
+ return_ACPI_STATUS(status);
+}
- /* Execute any module-level code that was found in the table */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_unload_table
+ *
+ * PARAMETERS: table_index - Table index
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Unload an ACPI table
+ *
+ ******************************************************************************/
- if (!acpi_gbl_parse_table_as_term_list
- && acpi_gbl_group_module_level_code) {
- acpi_ns_exec_module_code_list();
- }
+acpi_status acpi_tb_unload_table(u32 table_index)
+{
+ acpi_status status = AE_OK;
+ struct acpi_table_header *table;
- /*
- * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
- * responsible for discovering any new wake GPEs by running _PRW methods
- * that may have been loaded by this table.
- */
- status = acpi_tb_get_owner_id(i, &owner_id);
- if (ACPI_SUCCESS(status)) {
- acpi_ev_update_gpes(owner_id);
+ ACPI_FUNCTION_TRACE(tb_unload_table);
+
+ /* Ensure the table is still loaded */
+
+ if (!acpi_tb_is_table_loaded(table_index)) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
}
/* Invoke table handler if present */
if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
- acpi_gbl_table_handler_context);
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_SUCCESS(status)) {
+ (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
+ table,
+ acpi_gbl_table_handler_context);
+ }
}
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-unlock_and_exit:
- *table_index = i;
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ /* Delete the portion of the namespace owned by this table */
+
+ status = acpi_tb_delete_namespace_by_owner(table_index);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ (void)acpi_tb_release_owner_id(table_index);
+ acpi_tb_set_table_loaded_flag(table_index, FALSE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 046c4d0394ee..5fb838e592dc 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -480,19 +480,17 @@ static void acpi_tb_convert_fadt(void)
u32 i;
/*
- * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which
+ * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
* should be zero are indeed zero. This will workaround BIOSs that
* inadvertently place values in these fields.
*
* The ACPI 1.0 reserved fields that will be zeroed are the bytes located
* at offset 45, 55, 95, and the word located at offset 109, 110.
*
- * Note: The FADT revision value is unreliable because of BIOS errors.
- * The table length is instead used as the final word on the version.
- *
- * Note: FADT revision 3 is the ACPI 2.0 version of the FADT.
+ * Note: The FADT revision value is unreliable. Only the length can be
+ * trusted.
*/
- if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) {
+ if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
acpi_gbl_FADT.preferred_profile = 0;
acpi_gbl_FADT.pstate_control = 0;
acpi_gbl_FADT.cst_control = 0;
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 4ab6b9cd0aec..d5adb7ac4684 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -167,6 +167,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
{
acpi_status status;
+ u32 i;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -178,6 +179,21 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
+ /*
+ * Ensure OS early boot logic, which is required by some hosts. If the
+ * table state is reported to be wrong, developers should fix the
+ * issue by invoking acpi_put_table() for the reported table during the
+ * early stage.
+ */
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+ if (acpi_gbl_root_table_list.tables[i].pointer) {
+ ACPI_ERROR((AE_INFO,
+ "Table [%4.4s] is not invalidated during early boot stage",
+ acpi_gbl_root_table_list.tables[i].
+ signature.ascii));
+ }
+ }
+
acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
status = acpi_tb_resize_root_table_list();
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 5569f637f669..82019c01a0e5 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -239,7 +239,7 @@ acpi_status acpi_tb_load_namespace(void)
}
if (!tables_failed) {
- ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded\n", tables_loaded));
+ ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded", tables_loaded));
} else {
ACPI_ERROR((AE_INFO,
"%u table load failures, %u successful",
@@ -250,6 +250,10 @@ acpi_status acpi_tb_load_namespace(void)
status = AE_CTRL_TERMINATE;
}
+#ifdef ACPI_APPLICATION
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\n"));
+#endif
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
@@ -326,10 +330,9 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
/* Install the table and load it into the namespace */
ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
- status =
- acpi_tb_install_and_load_table(table, ACPI_PTR_TO_PHYSADDR(table),
- ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
- FALSE, &table_index);
+ status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
+ ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
+ FALSE, &table_index);
return_ACPI_STATUS(status);
}
@@ -405,37 +408,8 @@ acpi_status acpi_unload_parent_table(acpi_handle object)
break;
}
- /* Ensure the table is actually loaded */
-
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- if (!acpi_tb_is_table_loaded(i)) {
- status = AE_NOT_EXIST;
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- break;
- }
-
- /* Invoke table handler if present */
-
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
- acpi_gbl_root_table_list.
- tables[i].pointer,
- acpi_gbl_table_handler_context);
- }
-
- /*
- * Delete all namespace objects owned by this table. Note that
- * these objects can appear anywhere in the namespace by virtue
- * of the AML "Scope" operator. Thus, we need to track ownership
- * by an ID, not simply a position within the hierarchy.
- */
- status = acpi_tb_delete_namespace_by_owner(i);
- if (ACPI_FAILURE(status)) {
- break;
- }
-
- status = acpi_tb_release_owner_id(i);
- acpi_tb_set_table_loaded_flag(i, FALSE);
+ status = acpi_tb_unload_table(i);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
break;
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 15728ad8356b..b3d8421cfb80 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -44,6 +44,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
+#include "amlcode.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdecode")
@@ -532,6 +533,54 @@ const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type)
return ("Hardware-Specific");
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_argument_type_name
+ *
+ * PARAMETERS: arg_type - an ARGP_* parser argument type
+ *
+ * RETURN: Decoded ARGP_* type
+ *
+ * DESCRIPTION: Decode an ARGP_* parser type, as defined in the amlcode.h file,
+ * and used in the acopcode.h file. For example, ARGP_TERMARG.
+ * Used for debug only.
+ *
+ ******************************************************************************/
+
+static const char *acpi_gbl_argument_type[20] = {
+ /* 00 */ "Unknown ARGP",
+ /* 01 */ "ByteData",
+ /* 02 */ "ByteList",
+ /* 03 */ "CharList",
+ /* 04 */ "DataObject",
+ /* 05 */ "DataObjectList",
+ /* 06 */ "DWordData",
+ /* 07 */ "FieldList",
+ /* 08 */ "Name",
+ /* 09 */ "NameString",
+ /* 0A */ "ObjectList",
+ /* 0B */ "PackageLength",
+ /* 0C */ "SuperName",
+ /* 0D */ "Target",
+ /* 0E */ "TermArg",
+ /* 0F */ "TermList",
+ /* 10 */ "WordData",
+ /* 11 */ "QWordData",
+ /* 12 */ "SimpleName",
+ /* 13 */ "NameOrRef"
+};
+
+const char *acpi_ut_get_argument_type_name(u32 arg_type)
+{
+
+ if (arg_type > ARGP_MAX) {
+ return ("Unknown ARGP");
+ }
+
+ return (acpi_gbl_argument_type[arg_type]);
+}
+
#endif
/*******************************************************************************
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0d099a24f776..e53bef6cf53c 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -852,6 +852,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
if (ghes_read_estatus(ghes, 1)) {
ghes_clear_estatus(ghes);
continue;
+ } else {
+ ret = NMI_HANDLED;
}
sev = ghes_severity(ghes->estatus->error_severity);
@@ -863,12 +865,11 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
__process_error(ghes);
ghes_clear_estatus(ghes);
-
- ret = NMI_HANDLED;
}
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- irq_work_queue(&ghes_proc_irq_work);
+ if (ret == NMI_HANDLED)
+ irq_work_queue(&ghes_proc_irq_work);
#endif
atomic_dec(&ghes_in_nmi);
return ret;
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 20b3fcf4007c..8f2a98e23bba 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -123,7 +123,13 @@ EXPORT_SYMBOL_GPL(apei_hest_parse);
*/
static int __init hest_parse_cmc(struct acpi_hest_header *hest_hdr, void *data)
{
- return arch_apei_enable_cmcff(hest_hdr, data);
+ if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK)
+ return 0;
+
+ if (!acpi_disable_cmcff)
+ return !arch_apei_enable_cmcff(hest_hdr, data);
+
+ return 0;
}
struct ghes_arr {
@@ -232,8 +238,9 @@ void __init acpi_hest_init(void)
goto err;
}
- if (!acpi_disable_cmcff)
- apei_hest_parse(hest_parse_cmc, NULL);
+ rc = apei_hest_parse(hest_parse_cmc, NULL);
+ if (rc)
+ goto err;
if (!ghes_disable) {
rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 93ecae55fe6a..05fe9ebfb9b5 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -430,39 +430,24 @@ static int acpi_battery_get_status(struct acpi_battery *battery)
return 0;
}
-static int acpi_battery_get_info(struct acpi_battery *battery)
+
+static int extract_battery_info(const int use_bix,
+ struct acpi_battery *battery,
+ const struct acpi_buffer *buffer)
{
int result = -EFAULT;
- acpi_status status = 0;
- char *name = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags) ?
- "_BIX" : "_BIF";
-
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- if (!acpi_battery_present(battery))
- return 0;
- mutex_lock(&battery->lock);
- status = acpi_evaluate_object(battery->device->handle, name,
- NULL, &buffer);
- mutex_unlock(&battery->lock);
-
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
- return -ENODEV;
- }
-
- if (battery_bix_broken_package)
- result = extract_package(battery, buffer.pointer,
+ if (use_bix && battery_bix_broken_package)
+ result = extract_package(battery, buffer->pointer,
extended_info_offsets + 1,
ARRAY_SIZE(extended_info_offsets) - 1);
- else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
- result = extract_package(battery, buffer.pointer,
+ else if (use_bix)
+ result = extract_package(battery, buffer->pointer,
extended_info_offsets,
ARRAY_SIZE(extended_info_offsets));
else
- result = extract_package(battery, buffer.pointer,
+ result = extract_package(battery, buffer->pointer,
info_offsets, ARRAY_SIZE(info_offsets));
- kfree(buffer.pointer);
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
battery->full_charge_capacity = battery->design_capacity;
if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
@@ -483,6 +468,45 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
return result;
}
+static int acpi_battery_get_info(struct acpi_battery *battery)
+{
+ const int xinfo = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
+ int use_bix;
+ int result = -ENODEV;
+
+ if (!acpi_battery_present(battery))
+ return 0;
+
+
+ for (use_bix = xinfo ? 1 : 0; use_bix >= 0; use_bix--) {
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status = AE_ERROR;
+
+ mutex_lock(&battery->lock);
+ status = acpi_evaluate_object(battery->device->handle,
+ use_bix ? "_BIX":"_BIF",
+ NULL, &buffer);
+ mutex_unlock(&battery->lock);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s",
+ use_bix ? "_BIX":"_BIF"));
+ } else {
+ result = extract_battery_info(use_bix,
+ battery,
+ &buffer);
+
+ kfree(buffer.pointer);
+ break;
+ }
+ }
+
+ if (!result && !use_bix && xinfo)
+ pr_warn(FW_BUG "The _BIX method is broken, using _BIF.\n");
+
+ return result;
+}
+
static int acpi_battery_get_state(struct acpi_battery *battery)
{
int result = 0;
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index bdc67bad61a7..4421f7c9981c 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -160,6 +160,34 @@ static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
},
},
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Precision 5520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"),
+ },
+ },
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Precision 3520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"),
+ },
+ },
+ /*
+ * Resolves a quirk with the Dell Latitude 3350 that
+ * causes the ethernet adapter to not function.
+ */
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Latitude 3350",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"),
+ },
+ },
#endif
{}
};
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 56190d00fd87..5cbefd7621f0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -331,6 +331,16 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
+#ifdef CONFIG_X86
+ if (boot_cpu_has(X86_FEATURE_HWP)) {
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT;
+ }
+#endif
+
+ if (IS_ENABLED(CONFIG_SCHED_MC_PRIO))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT;
+
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d0d0504b7c89..3ca0729f7e0e 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -776,21 +776,25 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
init_waitqueue_head(&pcc_data.pcc_write_wait_q);
}
- /* Plug PSD data into this CPUs CPC descriptor. */
- per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
-
/* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
/* Add per logical CPU nodes for reading its feedback counters. */
cpu_dev = get_cpu_device(pr->id);
- if (!cpu_dev)
+ if (!cpu_dev) {
+ ret = -EINVAL;
goto out_free;
+ }
+
+ /* Plug PSD data into this CPUs CPC descriptor. */
+ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
"acpi_cppc");
- if (ret)
+ if (ret) {
+ per_cpu(cpc_desc_ptr, pr->id) = NULL;
goto out_free;
+ }
kfree(output.pointer);
return 0;
@@ -824,6 +828,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
void __iomem *addr;
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
+ if (!cpc_ptr)
+ return;
/* Free all the mapped sys mem areas for this CPU */
for (i = 2; i < cpc_ptr->num_entries; i++) {
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 7b2c48fde4e2..24418932612e 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -52,7 +52,7 @@ struct acpi_data_node_attr {
static ssize_t data_node_show_path(struct acpi_data_node *dn, char *buf)
{
- return acpi_object_path(dn->handle, buf);
+ return dn->handle ? acpi_object_path(dn->handle, buf) : 0;
}
DATA_NODE_ATTR(path);
@@ -105,10 +105,10 @@ static void acpi_expose_nondev_subnodes(struct kobject *kobj,
init_completion(&dn->kobj_done);
ret = kobject_init_and_add(&dn->kobj, &acpi_data_node_ktype,
kobj, "%s", dn->name);
- if (ret)
- acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret);
- else
+ if (!ret)
acpi_expose_nondev_subnodes(&dn->kobj, &dn->data);
+ else if (dn->handle)
+ acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret);
}
}
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 33505c651f62..86364097e236 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -34,11 +34,11 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
if (IS_ENABLED(CONFIG_INT340X_THERMAL))
- acpi_create_platform_device(adev);
+ acpi_create_platform_device(adev, NULL);
/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
id->driver_data == INT3401_DEVICE)
- acpi_create_platform_device(adev);
+ acpi_create_platform_device(adev, NULL);
return 1;
}
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index e24ea4e796e4..7fceb3b4691b 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -82,8 +82,8 @@ static const struct genl_multicast_group acpi_event_mcgrps[] = {
{ .name = ACPI_GENL_MCAST_GROUP_NAME, },
};
-static struct genl_family acpi_event_genl_family = {
- .id = GENL_ID_GENERATE,
+static struct genl_family acpi_event_genl_family __ro_after_init = {
+ .module = THIS_MODULE,
.name = ACPI_GENL_FAMILY_NAME,
.version = ACPI_GENL_VERSION,
.maxattr = ACPI_GENL_ATTR_MAX,
@@ -144,7 +144,7 @@ int acpi_bus_generate_netlink_event(const char *device_class,
EXPORT_SYMBOL(acpi_bus_generate_netlink_event);
-static int acpi_event_genetlink_init(void)
+static int __init acpi_event_genetlink_init(void)
{
return genl_register_family(&acpi_event_genl_family);
}
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 71a7d07c28c9..312c4b4dc363 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -94,7 +94,7 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
return to_acpi_device(acpi_desc->dev);
}
-static int xlat_status(void *buf, unsigned int cmd, u32 status)
+static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
{
struct nd_cmd_clear_error *clear_err;
struct nd_cmd_ars_status *ars_status;
@@ -113,7 +113,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
if ((status >> 16 & flags) == 0)
return -ENOTTY;
- break;
+ return 0;
case ND_CMD_ARS_START:
/* ARS is in progress */
if ((status & 0xffff) == NFIT_ARS_START_BUSY)
@@ -122,7 +122,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
/* Command failed */
if (status & 0xffff)
return -EIO;
- break;
+ return 0;
case ND_CMD_ARS_STATUS:
ars_status = buf;
/* Command failed */
@@ -146,7 +146,8 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
* then just continue with the returned results.
*/
if (status == NFIT_ARS_STATUS_INTR) {
- if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
+ if (ars_status->out_length >= 40 && (ars_status->flags
+ & NFIT_ARS_F_OVERFLOW))
return -ENOSPC;
return 0;
}
@@ -154,7 +155,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
/* Unknown status */
if (status >> 16)
return -EIO;
- break;
+ return 0;
case ND_CMD_CLEAR_ERROR:
clear_err = buf;
if (status & 0xffff)
@@ -163,7 +164,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
return -EIO;
if (clear_err->length > clear_err->cleared)
return clear_err->cleared;
- break;
+ return 0;
default:
break;
}
@@ -174,9 +175,18 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
return 0;
}
-static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
- struct nvdimm *nvdimm, unsigned int cmd, void *buf,
- unsigned int buf_len, int *cmd_rc)
+static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
+ u32 status)
+{
+ if (!nvdimm)
+ return xlat_bus_status(buf, cmd, status);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
union acpi_object in_obj, in_buf, *out_obj;
@@ -298,7 +308,8 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
for (i = 0, offset = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
- (u32 *) out_obj->buffer.pointer);
+ (u32 *) out_obj->buffer.pointer,
+ out_obj->buffer.length - offset);
if (offset + out_size > out_obj->buffer.length) {
dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
@@ -333,7 +344,8 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
*/
rc = buf_len - offset - in_buf.buffer.length;
if (cmd_rc)
- *cmd_rc = xlat_status(buf, cmd, fw_status);
+ *cmd_rc = xlat_status(nvdimm, buf, cmd,
+ fw_status);
} else {
dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
__func__, dimm_name, cmd_name, buf_len,
@@ -343,7 +355,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
} else {
rc = 0;
if (cmd_rc)
- *cmd_rc = xlat_status(buf, cmd, fw_status);
+ *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
}
out:
@@ -351,6 +363,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
return rc;
}
+EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
static const char *spa_type_name(u16 type)
{
@@ -2001,19 +2014,32 @@ static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
return cmd_rc;
}
-static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
+static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
struct nd_cmd_ars_status *ars_status)
{
+ struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
int rc;
u32 i;
+ /*
+ * First record starts at 44 byte offset from the start of the
+ * payload.
+ */
+ if (ars_status->out_length < 44)
+ return 0;
for (i = 0; i < ars_status->num_records; i++) {
+ /* only process full records */
+ if (ars_status->out_length
+ < 44 + sizeof(struct nd_ars_record) * (i + 1))
+ break;
rc = nvdimm_bus_add_poison(nvdimm_bus,
ars_status->records[i].err_address,
ars_status->records[i].length);
if (rc)
return rc;
}
+ if (i < ars_status->num_records)
+ dev_warn(acpi_desc->dev, "detected truncated ars results\n");
return 0;
}
@@ -2266,8 +2292,7 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
if (rc < 0 && rc != -ENOSPC)
return rc;
- if (ars_status_process_records(acpi_desc->nvdimm_bus,
- acpi_desc->ars_status))
+ if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
return -ENOMEM;
return 0;
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 14296f5267c8..fc29c2e9832e 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -240,5 +240,7 @@ const u8 *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event);
void __acpi_nvdimm_notify(struct device *dev, u32 event);
+int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc);
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
#endif /* __NFIT_H__ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 416953a42510..9a4c6abee63e 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -181,15 +181,15 @@ void acpi_os_vprintf(const char *fmt, va_list args)
static unsigned long acpi_rsdp;
static int __init setup_acpi_rsdp(char *arg)
{
- if (kstrtoul(arg, 16, &acpi_rsdp))
- return -EINVAL;
- return 0;
+ return kstrtoul(arg, 16, &acpi_rsdp);
}
early_param("acpi_rsdp", setup_acpi_rsdp);
#endif
acpi_physical_address __init acpi_os_get_root_pointer(void)
{
+ acpi_physical_address pa = 0;
+
#ifdef CONFIG_KEXEC
if (acpi_rsdp)
return acpi_rsdp;
@@ -198,21 +198,14 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
- else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ if (efi.acpi != EFI_INVALID_TABLE_ADDR)
return efi.acpi;
- else {
- printk(KERN_ERR PREFIX
- "System description tables not found\n");
- return 0;
- }
+ pr_err(PREFIX "System description tables not found\n");
} else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
- acpi_physical_address pa = 0;
-
acpi_find_root_pointer(&pa);
- return pa;
}
- return 0;
+ return pa;
}
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2237d3f24f0e..5c8aa9cf62d7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -141,7 +141,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
- if (amd_e400_c1e_detected)
+ if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
type = ACPI_STATE_C1;
/*
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index bb01dea39fdc..f0b4a981b8d3 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -157,7 +157,7 @@ static void acpi_processor_ppc_ost(acpi_handle handle, int status)
status, NULL);
}
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
+void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
{
int ret;
@@ -168,7 +168,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
*/
if (event_flag)
acpi_processor_ppc_ost(pr->handle, 1);
- return 0;
+ return;
}
ret = acpi_processor_get_platform_limit(pr);
@@ -182,10 +182,8 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
else
acpi_processor_ppc_ost(pr->handle, 0);
}
- if (ret < 0)
- return (ret);
- else
- return cpufreq_update_policy(pr->id);
+ if (ret >= 0)
+ cpufreq_update_policy(pr->id);
}
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
@@ -465,11 +463,33 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
return result;
}
EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
-int acpi_processor_notify_smm(struct module *calling_module)
+
+int acpi_processor_pstate_control(void)
{
acpi_status status;
- static int is_done = 0;
+ if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
+ return 0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
+ acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
+
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ (u32)acpi_gbl_FADT.pstate_control, 8);
+ if (ACPI_SUCCESS(status))
+ return 1;
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Failed to write pstate_control [0x%x] to smi_command [0x%x]",
+ acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
+ return -EIO;
+}
+
+int acpi_processor_notify_smm(struct module *calling_module)
+{
+ static int is_done = 0;
+ int result;
if (!(acpi_processor_ppc_status & PPC_REGISTERED))
return -EBUSY;
@@ -492,26 +512,15 @@ int acpi_processor_notify_smm(struct module *calling_module)
is_done = -EIO;
- /* Can't write pstate_control to smi_command if either value is zero */
- if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
+ result = acpi_processor_pstate_control();
+ if (!result) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
module_put(calling_module);
return 0;
}
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
- acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
-
- status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
- (u32) acpi_gbl_FADT.pstate_control, 8);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Failed to write pstate_control [0x%x] to "
- "smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
- acpi_gbl_FADT.smi_command));
+ if (result < 0) {
module_put(calling_module);
- return status;
+ return result;
}
/* Success. If there's no _PPC, we need to fear nothing, so
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 03f5ec11ab31..3afddcd834ef 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -41,14 +41,13 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
static bool acpi_extract_properties(const union acpi_object *desc,
struct acpi_device_data *data);
-static bool acpi_nondev_subnode_ok(acpi_handle scope,
- const union acpi_object *link,
- struct list_head *list)
+static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
+ acpi_handle handle,
+ const union acpi_object *link,
+ struct list_head *list)
{
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
struct acpi_data_node *dn;
- acpi_handle handle;
- acpi_status status;
+ bool result;
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
if (!dn)
@@ -58,43 +57,75 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
dn->fwnode.type = FWNODE_ACPI_DATA;
INIT_LIST_HEAD(&dn->data.subnodes);
- status = acpi_get_handle(scope, link->package.elements[1].string.pointer,
- &handle);
- if (ACPI_FAILURE(status))
- goto fail;
+ result = acpi_extract_properties(desc, &dn->data);
- status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
- goto fail;
+ if (handle) {
+ acpi_handle scope;
+ acpi_status status;
- if (acpi_extract_properties(buf.pointer, &dn->data))
- dn->handle = handle;
+ /*
+ * The scope for the subnode object lookup is the one of the
+ * namespace node (device) containing the object that has
+ * returned the package. That is, it's the scope of that
+ * object's parent.
+ */
+ status = acpi_get_parent(handle, &scope);
+ if (ACPI_SUCCESS(status)
+ && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data))
+ result = true;
+ } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data)) {
+ result = true;
+ }
- /*
- * The scope for the subnode object lookup is the one of the namespace
- * node (device) containing the object that has returned the package.
- * That is, it's the scope of that object's parent.
- */
- status = acpi_get_parent(handle, &scope);
- if (ACPI_SUCCESS(status)
- && acpi_enumerate_nondev_subnodes(scope, buf.pointer, &dn->data))
+ if (result) {
dn->handle = handle;
-
- if (dn->handle) {
- dn->data.pointer = buf.pointer;
+ dn->data.pointer = desc;
list_add_tail(&dn->sibling, list);
return true;
}
+ kfree(dn);
acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
+ return false;
+}
+
+static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
+ const union acpi_object *link,
+ struct list_head *list)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ acpi_status status;
+
+ status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list))
+ return true;
- fail:
ACPI_FREE(buf.pointer);
- kfree(dn);
return false;
}
+static bool acpi_nondev_subnode_ok(acpi_handle scope,
+ const union acpi_object *link,
+ struct list_head *list)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!scope)
+ return false;
+
+ status = acpi_get_handle(scope, link->package.elements[1].string.pointer,
+ &handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ return acpi_nondev_subnode_data_ok(handle, link, list);
+}
+
static int acpi_add_nondev_subnodes(acpi_handle scope,
const union acpi_object *links,
struct list_head *list)
@@ -103,15 +134,37 @@ static int acpi_add_nondev_subnodes(acpi_handle scope,
int i;
for (i = 0; i < links->package.count; i++) {
- const union acpi_object *link;
+ const union acpi_object *link, *desc;
+ acpi_handle handle;
+ bool result;
link = &links->package.elements[i];
- /* Only two elements allowed, both must be strings. */
- if (link->package.count == 2
- && link->package.elements[0].type == ACPI_TYPE_STRING
- && link->package.elements[1].type == ACPI_TYPE_STRING
- && acpi_nondev_subnode_ok(scope, link, list))
- ret = true;
+ /* Only two elements allowed. */
+ if (link->package.count != 2)
+ continue;
+
+ /* The first one must be a string. */
+ if (link->package.elements[0].type != ACPI_TYPE_STRING)
+ continue;
+
+ /* The second one may be a string, a reference or a package. */
+ switch (link->package.elements[1].type) {
+ case ACPI_TYPE_STRING:
+ result = acpi_nondev_subnode_ok(scope, link, list);
+ break;
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ handle = link->package.elements[1].reference.handle;
+ result = acpi_nondev_subnode_data_ok(handle, link, list);
+ break;
+ case ACPI_TYPE_PACKAGE:
+ desc = &link->package.elements[1];
+ result = acpi_nondev_subnode_extract(desc, NULL, link, list);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ ret = ret || result;
}
return ret;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 035ac646d8db..3d1856f1f4d0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1734,7 +1734,7 @@ static void acpi_default_enumeration(struct acpi_device *device)
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
if (!is_spi_i2c_slave) {
- acpi_create_platform_device(device);
+ acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device);
} else {
blocking_notifier_call_chain(&acpi_reconfig_chain,
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index deb0ff78eba8..9b6cebe227a0 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -47,32 +47,15 @@ static void acpi_sleep_tts_switch(u32 acpi_state)
}
}
-static void acpi_sleep_pts_switch(u32 acpi_state)
-{
- acpi_status status;
-
- status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- /*
- * OS can't evaluate the _PTS object correctly. Some warning
- * message will be printed. But it won't break anything.
- */
- printk(KERN_NOTICE "Failure in evaluating _PTS object\n");
- }
-}
-
-static int sleep_notify_reboot(struct notifier_block *this,
+static int tts_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
acpi_sleep_tts_switch(ACPI_STATE_S5);
-
- acpi_sleep_pts_switch(ACPI_STATE_S5);
-
return NOTIFY_DONE;
}
-static struct notifier_block sleep_notifier = {
- .notifier_call = sleep_notify_reboot,
+static struct notifier_block tts_notifier = {
+ .notifier_call = tts_notify_reboot,
.next = NULL,
.priority = 0,
};
@@ -691,6 +674,14 @@ static void acpi_sleep_suspend_setup(void)
if (acpi_sleep_state_supported(i))
sleep_states[i] = 1;
+ /*
+ * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
+ * the default suspend mode was not selected from the command line.
+ */
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
+ mem_sleep_default > PM_SUSPEND_MEM)
+ mem_sleep_default = PM_SUSPEND_FREEZE;
+
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
freeze_set_ops(&acpi_freeze_ops);
@@ -916,9 +907,9 @@ int __init acpi_sleep_init(void)
pr_info(PREFIX "(supports%s)\n", supported);
/*
- * Register the sleep_notifier to reboot notifier list so that the _TTS
- * and _PTS object can also be evaluated when the system enters S5.
+ * Register the tts_notifier to reboot notifier list so that the _TTS
+ * object can also be evaluated when the system enters S5.
*/
- register_reboot_notifier(&sleep_notifier);
+ register_reboot_notifier(&tts_notifier);
return 0;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index a6b36fc53aec..02ded25c82e4 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -296,6 +296,26 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
},
},
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1123661 */
+ .callback = video_detect_force_native,
+ .ident = "Dell XPS 17 L702X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
+ },
+ },
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
+ /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
+ .callback = video_detect_force_native,
+ .ident = "HP Pavilion dv6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
+ },
+ },
+
{ },
};
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9669fc7c19df..2fc52407306c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,6 +46,8 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
+#include <linux/ahci-remap.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "ahci.h"
#define DRV_NAME "ahci"
@@ -1400,6 +1402,40 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
}
#endif
+static void ahci_remap_check(struct pci_dev *pdev, int bar,
+ struct ahci_host_priv *hpriv)
+{
+ int i, count = 0;
+ u32 cap;
+
+ /*
+ * Check if this device might have remapped nvme devices.
+ */
+ if (pdev->vendor != PCI_VENDOR_ID_INTEL ||
+ pci_resource_len(pdev, bar) < SZ_512K ||
+ bar != AHCI_PCI_BAR_STANDARD ||
+ !(readl(hpriv->mmio + AHCI_VSCAP) & 1))
+ return;
+
+ cap = readq(hpriv->mmio + AHCI_REMAP_CAP);
+ for (i = 0; i < AHCI_MAX_REMAP; i++) {
+ if ((cap & (1 << i)) == 0)
+ continue;
+ if (readl(hpriv->mmio + ahci_remap_dcc(i))
+ != PCI_CLASS_STORAGE_EXPRESS)
+ continue;
+
+ /* We've found a remapped device */
+ count++;
+ }
+
+ if (!count)
+ return;
+
+ dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
+ dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n");
+}
+
static int ahci_get_irq_vector(struct ata_host *host, int port)
{
return pci_irq_vector(to_pci_dev(host->dev), port);
@@ -1436,13 +1472,6 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
"ahci: MRSM is on, fallback to single MSI\n");
pci_free_irq_vectors(pdev);
}
-
- /*
- * -ENOSPC indicated we don't have enough vectors. Don't bother
- * trying a single vectors for any other error:
- */
- if (nvec < 0 && nvec != -ENOSPC)
- return nvec;
}
/*
@@ -1548,6 +1577,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ /* detect remapped nvme devices */
+ ahci_remap_check(pdev, ahci_pci_bar, hpriv);
+
/* must set flag prior to save config in order to take effect */
if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 1eba8dff875e..9884c8c6e934 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -46,11 +46,13 @@
#define LS1021A_AXICC_ADDR 0xC0
#define SATA_ECC_DISABLE 0x00020000
+#define LS1046A_SATA_ECC_DIS 0x80000000
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1043A,
AHCI_LS2080A,
+ AHCI_LS1046A,
};
struct ahci_qoriq_priv {
@@ -63,6 +65,7 @@ static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
+ { .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
{},
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
@@ -175,6 +178,13 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
+
+ case AHCI_LS1046A:
+ writel(LS1046A_SATA_ECC_DIS, qpriv->ecc_addr);
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ break;
}
return 0;
@@ -204,9 +214,9 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
- if (qoriq_priv->type == AHCI_LS1021A) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "sata-ecc");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "sata-ecc");
+ if (res) {
qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(qoriq_priv->ecc_addr))
return PTR_ERR(qoriq_priv->ecc_addr);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 0d028ead99e8..ee7db3119b18 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -140,6 +140,7 @@ EXPORT_SYMBOL_GPL(ahci_shost_attrs);
struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
&dev_attr_unload_heads,
+ &dev_attr_ncq_prio_enable,
NULL
};
EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 223a770f78f3..9cd0a2d41816 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -129,7 +129,7 @@ static int ata_force_tbl_size;
static char ata_force_param_buf[PAGE_SIZE] __initdata;
/* param_buf is thrown away after initialization, disallow read */
module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
-MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
+MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
static int atapi_enabled = 1;
module_param(atapi_enabled, int, 0444);
@@ -739,6 +739,7 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
* @n_block: Number of blocks
* @tf_flags: RW/FUA etc...
* @tag: tag
+ * @class: IO priority class
*
* LOCKING:
* None.
@@ -753,7 +754,7 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
*/
int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
- unsigned int tag)
+ unsigned int tag, int class)
{
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->flags |= tf_flags;
@@ -785,6 +786,12 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
tf->device = ATA_LBA;
if (tf->flags & ATA_TFLAG_FUA)
tf->device |= 1 << 7;
+
+ if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
+ if (class == IOPRIO_CLASS_RT)
+ tf->hob_nsect |= ATA_PRIO_HIGH <<
+ ATA_SHIFT_PRIO;
+ }
} else if (dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
@@ -2156,6 +2163,37 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
}
}
+static void ata_dev_config_ncq_prio(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+
+ if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+ return;
+ }
+
+ err_mask = ata_read_log_page(dev,
+ ATA_LOG_SATA_ID_DEV_DATA,
+ ATA_LOG_SATA_SETTINGS,
+ ap->sector_buf,
+ 1);
+ if (err_mask) {
+ ata_dev_dbg(dev,
+ "failed to get Identify Device data, Emask 0x%x\n",
+ err_mask);
+ return;
+ }
+
+ if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
+ dev->flags |= ATA_DFLAG_NCQ_PRIO;
+ } else {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+ ata_dev_dbg(dev, "SATA page does not support priority\n");
+ }
+
+}
+
static int ata_dev_config_ncq(struct ata_device *dev,
char *desc, size_t desc_sz)
{
@@ -2205,6 +2243,8 @@ static int ata_dev_config_ncq(struct ata_device *dev,
ata_dev_config_ncq_send_recv(dev);
if (ata_id_has_ncq_non_data(dev->id))
ata_dev_config_ncq_non_data(dev);
+ if (ata_id_has_ncq_prio(dev->id))
+ ata_dev_config_ncq_prio(dev);
}
return 0;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9cceb4a875a5..1f863e757ee4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -50,6 +50,7 @@
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include <asm/unaligned.h>
+#include <linux/ioprio.h>
#include "libata.h"
#include "libata-transport.h"
@@ -270,6 +271,83 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ bool ncq_prio_enable;
+ int rc = 0;
+
+ ap = ata_shost_to_port(sdev->host);
+
+ spin_lock_irq(ap->lock);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+
+ ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+unlock:
+ spin_unlock_irq(ap->lock);
+
+ return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
+}
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ long int input;
+ int rc;
+
+ rc = kstrtol(buf, 10, &input);
+ if (rc)
+ return rc;
+ if ((input < 0) || (input > 1))
+ return -EINVAL;
+
+ ap = ata_shost_to_port(sdev->host);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (unlikely(!dev))
+ return -ENODEV;
+
+ spin_lock_irq(ap->lock);
+ if (input)
+ dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
+ else
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+ dev->link->eh_info.action |= ATA_EH_REVALIDATE;
+ dev->link->eh_info.flags |= ATA_EHI_QUIET;
+ ata_port_schedule_eh(ap);
+ spin_unlock_irq(ap->lock);
+
+ ata_port_wait_eh(ap);
+
+ if (input) {
+ spin_lock_irq(ap->lock);
+ if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+ rc = -EIO;
+ }
+ spin_unlock_irq(ap->lock);
+ }
+
+ return rc ? rc : len;
+}
+
+DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
+ ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
+EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
+
void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
u8 sk, u8 asc, u8 ascq)
{
@@ -401,6 +479,7 @@ EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
struct device_attribute *ata_common_sdev_attrs[] = {
&dev_attr_unload_heads,
+ &dev_attr_ncq_prio_enable,
NULL
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
@@ -1088,7 +1167,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
desc[1] = tf->command; /* status */
desc[2] = tf->device;
desc[3] = tf->nsect;
- desc[0] = 0;
+ desc[7] = 0;
if (tf->flags & ATA_TFLAG_LBA48) {
desc[8] |= 0x80;
if (tf->hob_nsect)
@@ -1159,6 +1238,7 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
+ sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
@@ -1755,6 +1835,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
+ struct request *rq = scmd->request;
+ int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
unsigned int tf_flags = 0;
u64 block;
u32 n_block;
@@ -1821,7 +1903,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
qc->nbytes = n_block * scmd->device->sector_size;
rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
- qc->tag);
+ qc->tag, class);
+
if (likely(rc == 0))
return 0;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 3b301a48007c..8f3a5596dd67 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -66,7 +66,7 @@ extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
- unsigned int tag);
+ unsigned int tag, int class);
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 139d20778b29..d4caa23f5a88 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -11,19 +11,26 @@
*
* TODO:
* - dmaengine support
- * - check if timing stuff needed
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi_host.h>
+
#include <linux/ata.h>
+#include <linux/clk.h>
#include <linux/libata.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
#define DRV_NAME "pata_imx"
+#define PATA_IMX_ATA_TIME_OFF 0x00
+#define PATA_IMX_ATA_TIME_ON 0x01
+#define PATA_IMX_ATA_TIME_1 0x02
+#define PATA_IMX_ATA_TIME_2W 0x03
+#define PATA_IMX_ATA_TIME_2R 0x04
+#define PATA_IMX_ATA_TIME_AX 0x05
+#define PATA_IMX_ATA_TIME_PIO_RDX 0x06
+#define PATA_IMX_ATA_TIME_4 0x07
+#define PATA_IMX_ATA_TIME_9 0x08
+
#define PATA_IMX_ATA_CONTROL 0x24
#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
@@ -33,6 +40,10 @@
#define PATA_IMX_DRIVE_DATA 0xA0
#define PATA_IMX_DRIVE_CONTROL 0xD8
+static u32 pio_t4[] = { 30, 20, 15, 10, 10 };
+static u32 pio_t9[] = { 20, 15, 10, 10, 10 };
+static u32 pio_tA[] = { 35, 35, 35, 35, 35 };
+
struct pata_imx_priv {
struct clk *clk;
/* timings/interrupt/control regs */
@@ -40,28 +51,49 @@ struct pata_imx_priv {
u32 ata_ctl;
};
-static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
+static void pata_imx_set_timing(struct ata_device *adev,
+ struct pata_imx_priv *priv)
+{
+ struct ata_timing timing;
+ unsigned long clkrate;
+ u32 T, mode;
+
+ clkrate = clk_get_rate(priv->clk);
+
+ if (adev->pio_mode < XFER_PIO_0 || adev->pio_mode > XFER_PIO_4 ||
+ !clkrate)
+ return;
+
+ T = 1000000000 / clkrate;
+ ata_timing_compute(adev, adev->pio_mode, &timing, T * 1000, 0);
+
+ mode = adev->pio_mode - XFER_PIO_0;
+
+ writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_OFF);
+ writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_ON);
+ writeb(timing.setup, priv->host_regs + PATA_IMX_ATA_TIME_1);
+ writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2W);
+ writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2R);
+ writeb(1, priv->host_regs + PATA_IMX_ATA_TIME_PIO_RDX);
+
+ writeb(pio_t4[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_4);
+ writeb(pio_t9[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_9);
+ writeb(pio_tA[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_AX);
+}
+
+static void pata_imx_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
- struct ata_device *dev;
- struct ata_port *ap = link->ap;
struct pata_imx_priv *priv = ap->host->private_data;
u32 val;
- ata_for_each_dev(dev, link, ENABLED) {
- dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
- dev->xfer_shift = ATA_SHIFT_PIO;
- dev->flags |= ATA_DFLAG_PIO;
-
- val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
- if (ata_pio_need_iordy(dev))
- val |= PATA_IMX_ATA_CTRL_IORDY_EN;
- else
- val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
- __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
+ pata_imx_set_timing(adev, priv);
- ata_dev_info(dev, "configured for PIO\n");
- }
- return 0;
+ val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+ if (ata_pio_need_iordy(adev))
+ val |= PATA_IMX_ATA_CTRL_IORDY_EN;
+ else
+ val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
+ __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
}
static struct scsi_host_template pata_imx_sht = {
@@ -72,7 +104,7 @@ static struct ata_port_operations pata_imx_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer_noirq,
.cable_detect = ata_cable_unknown,
- .set_mode = pata_imx_set_mode,
+ .set_piomode = pata_imx_set_piomode,
};
static void pata_imx_setup_port(struct ata_ioports *ioaddr)
@@ -128,7 +160,7 @@ static int pata_imx_probe(struct platform_device *pdev)
ap = host->ports[0];
ap->ops = &pata_imx_port_ops;
- ap->pio_mask = ATA_PIO0;
+ ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index efc48bf89d51..823e938c9a78 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4090,7 +4090,20 @@ static int mv_platform_probe(struct platform_device *pdev)
/* allocate host */
if (pdev->dev.of_node) {
- of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
+ rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
+ &n_ports);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "error parsing nr-ports property: %d\n", rc);
+ return rc;
+ }
+
+ if (n_ports <= 0) {
+ dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
+ n_ports);
+ return -EINVAL;
+ }
+
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
} else {
mv_platform_data = dev_get_platdata(&pdev->dev);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index f2aaf9e32a36..40c2d561417b 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1727,7 +1727,7 @@ static int eni_do_init(struct atm_dev *dev)
printk("\n");
printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page "
"mapping\n",dev->number);
- return error;
+ return -ENOMEM;
}
eni_dev->ioaddr = base;
eni_dev->base_diff = real_base - (unsigned long) base;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index ce43ae3e87b3..445505d9ea07 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2143,6 +2143,7 @@ static int lanai_dev_open(struct atm_dev *atmdev)
lanai->base = (bus_addr_t) ioremap(raw_base, LANAI_MAPPING_SIZE);
if (lanai->base == NULL) {
printk(KERN_ERR DEV_LABEL ": couldn't remap I/O space\n");
+ result = -ENOMEM;
goto error_pci;
}
/* 3.3: Reset lanai and PHY */
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 6ac2b2b1e8de..5ad037c07ec7 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -584,7 +584,7 @@ static ssize_t hardware_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", data32);
}
-static DEVICE_ATTR(console, 0644, console_show, console_store);
+static DEVICE_ATTR_RW(console);
#define SOLOS_ATTR_RO(x) static DEVICE_ATTR(x, 0444, solos_param_show, NULL);
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 10e1b9eee10e..4ef4c5caed4f 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -128,4 +128,17 @@ config IMG_ASCII_LCD
development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
from Imagination Technologies.
+config HT16K33
+ tristate "Holtek Ht16K33 LED controller with keyscan"
+ depends on FB && OF && I2C && INPUT
+ select FB_SYS_FOPS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select INPUT_MATRIXKMAP
+ select FB_BACKLIGHT
+ help
+ Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
+ LED controller driver with keyscan.
+
endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 3127175c89df..cb3dd847713b 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
+obj-$(CONFIG_HT16K33) += ht16k33.o
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
new file mode 100644
index 000000000000..eeb323f56c07
--- /dev/null
+++ b/drivers/auxdisplay/ht16k33.c
@@ -0,0 +1,563 @@
+/*
+ * HT16K33 driver
+ *
+ * Author: Robin van der Gracht <robin@protonic.nl>
+ *
+ * Copyright: (C) 2016 Protonic Holland.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+#include <linux/backlight.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/workqueue.h>
+#include <linux/mm.h>
+
+/* Registers */
+#define REG_SYSTEM_SETUP 0x20
+#define REG_SYSTEM_SETUP_OSC_ON BIT(0)
+
+#define REG_DISPLAY_SETUP 0x80
+#define REG_DISPLAY_SETUP_ON BIT(0)
+
+#define REG_ROWINT_SET 0xA0
+#define REG_ROWINT_SET_INT_EN BIT(0)
+#define REG_ROWINT_SET_INT_ACT_HIGH BIT(1)
+
+#define REG_BRIGHTNESS 0xE0
+
+/* Defines */
+#define DRIVER_NAME "ht16k33"
+
+#define MIN_BRIGHTNESS 0x1
+#define MAX_BRIGHTNESS 0x10
+
+#define HT16K33_MATRIX_LED_MAX_COLS 8
+#define HT16K33_MATRIX_LED_MAX_ROWS 16
+#define HT16K33_MATRIX_KEYPAD_MAX_COLS 3
+#define HT16K33_MATRIX_KEYPAD_MAX_ROWS 12
+
+#define BYTES_PER_ROW (HT16K33_MATRIX_LED_MAX_ROWS / 8)
+#define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW)
+
+struct ht16k33_keypad {
+ struct input_dev *dev;
+ spinlock_t lock;
+ struct delayed_work work;
+ uint32_t cols;
+ uint32_t rows;
+ uint32_t row_shift;
+ uint32_t debounce_ms;
+ uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+};
+
+struct ht16k33_fbdev {
+ struct fb_info *info;
+ uint32_t refresh_rate;
+ uint8_t *buffer;
+ uint8_t *cache;
+ struct delayed_work work;
+};
+
+struct ht16k33_priv {
+ struct i2c_client *client;
+ struct ht16k33_keypad keypad;
+ struct ht16k33_fbdev fbdev;
+ struct workqueue_struct *workqueue;
+};
+
+static struct fb_fix_screeninfo ht16k33_fb_fix = {
+ .id = DRIVER_NAME,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO10,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = HT16K33_MATRIX_LED_MAX_ROWS,
+ .accel = FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo ht16k33_fb_var = {
+ .xres = HT16K33_MATRIX_LED_MAX_ROWS,
+ .yres = HT16K33_MATRIX_LED_MAX_COLS,
+ .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
+ .yres_virtual = HT16K33_MATRIX_LED_MAX_COLS,
+ .bits_per_pixel = 1,
+ .red = { 0, 1, 0 },
+ .green = { 0, 1, 0 },
+ .blue = { 0, 1, 0 },
+ .left_margin = 0,
+ .right_margin = 0,
+ .upper_margin = 0,
+ .lower_margin = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+};
+
+static int ht16k33_display_on(struct ht16k33_priv *priv)
+{
+ uint8_t data = REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON;
+
+ return i2c_smbus_write_byte(priv->client, data);
+}
+
+static int ht16k33_display_off(struct ht16k33_priv *priv)
+{
+ return i2c_smbus_write_byte(priv->client, REG_DISPLAY_SETUP);
+}
+
+static void ht16k33_fb_queue(struct ht16k33_priv *priv)
+{
+ struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+ queue_delayed_work(priv->workqueue, &fbdev->work,
+ msecs_to_jiffies(HZ / fbdev->refresh_rate));
+}
+
+static void ht16k33_keypad_queue(struct ht16k33_priv *priv)
+{
+ struct ht16k33_keypad *keypad = &priv->keypad;
+
+ queue_delayed_work(priv->workqueue, &keypad->work,
+ msecs_to_jiffies(keypad->debounce_ms));
+}
+
+/*
+ * This gets the fb data from cache and copies it to ht16k33 display RAM
+ */
+static void ht16k33_fb_update(struct work_struct *work)
+{
+ struct ht16k33_fbdev *fbdev =
+ container_of(work, struct ht16k33_fbdev, work.work);
+ struct ht16k33_priv *priv =
+ container_of(fbdev, struct ht16k33_priv, fbdev);
+
+ uint8_t *p1, *p2;
+ int len, pos = 0, first = -1;
+
+ p1 = fbdev->cache;
+ p2 = fbdev->buffer;
+
+ /* Search for the first byte with changes */
+ while (pos < HT16K33_FB_SIZE && first < 0) {
+ if (*(p1++) - *(p2++))
+ first = pos;
+ pos++;
+ }
+
+ /* No changes found */
+ if (first < 0)
+ goto requeue;
+
+ len = HT16K33_FB_SIZE - first;
+ p1 = fbdev->cache + HT16K33_FB_SIZE - 1;
+ p2 = fbdev->buffer + HT16K33_FB_SIZE - 1;
+
+ /* Determine i2c transfer length */
+ while (len > 1) {
+ if (*(p1--) - *(p2--))
+ break;
+ len--;
+ }
+
+ p1 = fbdev->cache + first;
+ p2 = fbdev->buffer + first;
+ if (!i2c_smbus_write_i2c_block_data(priv->client, first, len, p2))
+ memcpy(p1, p2, len);
+requeue:
+ ht16k33_fb_queue(priv);
+}
+
+static int ht16k33_keypad_start(struct input_dev *dev)
+{
+ struct ht16k33_priv *priv = input_get_drvdata(dev);
+ struct ht16k33_keypad *keypad = &priv->keypad;
+
+ /*
+ * Schedule an immediate key scan to capture current key state;
+ * columns will be activated and IRQs be enabled after the scan.
+ */
+ queue_delayed_work(priv->workqueue, &keypad->work, 0);
+ return 0;
+}
+
+static void ht16k33_keypad_stop(struct input_dev *dev)
+{
+ struct ht16k33_priv *priv = input_get_drvdata(dev);
+ struct ht16k33_keypad *keypad = &priv->keypad;
+
+ cancel_delayed_work(&keypad->work);
+ /*
+ * ht16k33_keypad_scan() will leave IRQs enabled;
+ * we should disable them now.
+ */
+ disable_irq_nosync(priv->client->irq);
+}
+
+static int ht16k33_initialize(struct ht16k33_priv *priv)
+{
+ uint8_t byte;
+ int err;
+ uint8_t data[HT16K33_MATRIX_LED_MAX_COLS * 2];
+
+ /* Clear RAM (8 * 16 bits) */
+ memset(data, 0, sizeof(data));
+ err = i2c_smbus_write_block_data(priv->client, 0, sizeof(data), data);
+ if (err)
+ return err;
+
+ /* Turn on internal oscillator */
+ byte = REG_SYSTEM_SETUP_OSC_ON | REG_SYSTEM_SETUP;
+ err = i2c_smbus_write_byte(priv->client, byte);
+ if (err)
+ return err;
+
+ /* Configure INT pin */
+ byte = REG_ROWINT_SET | REG_ROWINT_SET_INT_ACT_HIGH;
+ if (priv->client->irq > 0)
+ byte |= REG_ROWINT_SET_INT_EN;
+ return i2c_smbus_write_byte(priv->client, byte);
+}
+
+/*
+ * This gets the keys from keypad and reports it to input subsystem
+ */
+static void ht16k33_keypad_scan(struct work_struct *work)
+{
+ struct ht16k33_keypad *keypad =
+ container_of(work, struct ht16k33_keypad, work.work);
+ struct ht16k33_priv *priv =
+ container_of(keypad, struct ht16k33_priv, keypad);
+ const unsigned short *keycodes = keypad->dev->keycode;
+ uint16_t bits_changed, new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+ uint8_t data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
+ int row, col, code;
+ bool reschedule = false;
+
+ if (i2c_smbus_read_i2c_block_data(priv->client, 0x40, 6, data) != 6) {
+ dev_err(&priv->client->dev, "Failed to read key data\n");
+ goto end;
+ }
+
+ for (col = 0; col < keypad->cols; col++) {
+ new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
+ if (new_state[col])
+ reschedule = true;
+ bits_changed = keypad->last_key_state[col] ^ new_state[col];
+
+ while (bits_changed) {
+ row = ffs(bits_changed) - 1;
+ code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+ input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(keypad->dev, keycodes[code],
+ new_state[col] & BIT(row));
+ bits_changed &= ~BIT(row);
+ }
+ }
+ input_sync(keypad->dev);
+ memcpy(keypad->last_key_state, new_state, sizeof(new_state));
+
+end:
+ if (reschedule)
+ ht16k33_keypad_queue(priv);
+ else
+ enable_irq(priv->client->irq);
+}
+
+static irqreturn_t ht16k33_irq_thread(int irq, void *dev)
+{
+ struct ht16k33_priv *priv = dev;
+
+ disable_irq_nosync(priv->client->irq);
+ ht16k33_keypad_queue(priv);
+
+ return IRQ_HANDLED;
+}
+
+static int ht16k33_bl_update_status(struct backlight_device *bl)
+{
+ int brightness = bl->props.brightness;
+ struct ht16k33_priv *priv = bl_get_data(bl);
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK || brightness == 0) {
+ return ht16k33_display_off(priv);
+ }
+
+ ht16k33_display_on(priv);
+ return i2c_smbus_write_byte(priv->client,
+ REG_BRIGHTNESS | (brightness - 1));
+}
+
+static int ht16k33_bl_check_fb(struct backlight_device *bl, struct fb_info *fi)
+{
+ struct ht16k33_priv *priv = bl_get_data(bl);
+
+ return (fi == NULL) || (fi->par == priv);
+}
+
+static const struct backlight_ops ht16k33_bl_ops = {
+ .update_status = ht16k33_bl_update_status,
+ .check_fb = ht16k33_bl_check_fb,
+};
+
+static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct ht16k33_priv *priv = info->par;
+
+ return vm_insert_page(vma, vma->vm_start,
+ virt_to_page(priv->fbdev.buffer));
+}
+
+static struct fb_ops ht16k33_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_mmap = ht16k33_mmap,
+};
+
+static int ht16k33_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+ uint32_t rows, cols, dft_brightness;
+ struct backlight_device *bl;
+ struct backlight_properties bl_props;
+ struct ht16k33_priv *priv;
+ struct ht16k33_keypad *keypad;
+ struct ht16k33_fbdev *fbdev;
+ struct device_node *node = client->dev.of_node;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c_check_functionality error\n");
+ return -EIO;
+ }
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "No IRQ specified\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+ fbdev = &priv->fbdev;
+ keypad = &priv->keypad;
+
+ priv->workqueue = create_singlethread_workqueue(DRIVER_NAME "-wq");
+ if (priv->workqueue == NULL)
+ return -ENOMEM;
+
+ err = ht16k33_initialize(priv);
+ if (err)
+ goto err_destroy_wq;
+
+ /* Framebuffer (2 bytes per column) */
+ BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
+ fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
+ if (!fbdev->buffer) {
+ err = -ENOMEM;
+ goto err_free_fbdev;
+ }
+
+ fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL);
+ if (!fbdev->cache) {
+ err = -ENOMEM;
+ goto err_fbdev_buffer;
+ }
+
+ fbdev->info = framebuffer_alloc(0, &client->dev);
+ if (!fbdev->info) {
+ err = -ENOMEM;
+ goto err_fbdev_buffer;
+ }
+
+ err = of_property_read_u32(node, "refresh-rate-hz",
+ &fbdev->refresh_rate);
+ if (err) {
+ dev_err(&client->dev, "refresh rate not specified\n");
+ goto err_fbdev_info;
+ }
+ fb_bl_default_curve(fbdev->info, 0, MIN_BRIGHTNESS, MAX_BRIGHTNESS);
+
+ INIT_DELAYED_WORK(&fbdev->work, ht16k33_fb_update);
+ fbdev->info->fbops = &ht16k33_fb_ops;
+ fbdev->info->screen_base = (char __iomem *) fbdev->buffer;
+ fbdev->info->screen_size = HT16K33_FB_SIZE;
+ fbdev->info->fix = ht16k33_fb_fix;
+ fbdev->info->var = ht16k33_fb_var;
+ fbdev->info->pseudo_palette = NULL;
+ fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+ fbdev->info->par = priv;
+
+ err = register_framebuffer(fbdev->info);
+ if (err)
+ goto err_fbdev_info;
+
+ /* Keypad */
+ keypad->dev = devm_input_allocate_device(&client->dev);
+ if (!keypad->dev) {
+ err = -ENOMEM;
+ goto err_fbdev_unregister;
+ }
+
+ keypad->dev->name = DRIVER_NAME"-keypad";
+ keypad->dev->id.bustype = BUS_I2C;
+ keypad->dev->open = ht16k33_keypad_start;
+ keypad->dev->close = ht16k33_keypad_stop;
+
+ if (!of_get_property(node, "linux,no-autorepeat", NULL))
+ __set_bit(EV_REP, keypad->dev->evbit);
+
+ err = of_property_read_u32(node, "debounce-delay-ms",
+ &keypad->debounce_ms);
+ if (err) {
+ dev_err(&client->dev, "key debounce delay not specified\n");
+ goto err_fbdev_unregister;
+ }
+
+ err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ ht16k33_irq_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ DRIVER_NAME, priv);
+ if (err) {
+ dev_err(&client->dev, "irq request failed %d, error %d\n",
+ client->irq, err);
+ goto err_fbdev_unregister;
+ }
+
+ disable_irq_nosync(client->irq);
+ rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
+ cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
+ err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
+ if (err)
+ goto err_fbdev_unregister;
+
+ err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
+ keypad->dev);
+ if (err) {
+ dev_err(&client->dev, "failed to build keymap\n");
+ goto err_fbdev_unregister;
+ }
+
+ input_set_drvdata(keypad->dev, priv);
+ keypad->rows = rows;
+ keypad->cols = cols;
+ keypad->row_shift = get_count_order(cols);
+ INIT_DELAYED_WORK(&keypad->work, ht16k33_keypad_scan);
+
+ err = input_register_device(keypad->dev);
+ if (err)
+ goto err_fbdev_unregister;
+
+ /* Backlight */
+ memset(&bl_props, 0, sizeof(struct backlight_properties));
+ bl_props.type = BACKLIGHT_RAW;
+ bl_props.max_brightness = MAX_BRIGHTNESS;
+
+ bl = devm_backlight_device_register(&client->dev, DRIVER_NAME"-bl",
+ &client->dev, priv,
+ &ht16k33_bl_ops, &bl_props);
+ if (IS_ERR(bl)) {
+ dev_err(&client->dev, "failed to register backlight\n");
+ err = PTR_ERR(bl);
+ goto err_keypad_unregister;
+ }
+
+ err = of_property_read_u32(node, "default-brightness-level",
+ &dft_brightness);
+ if (err) {
+ dft_brightness = MAX_BRIGHTNESS;
+ } else if (dft_brightness > MAX_BRIGHTNESS) {
+ dev_warn(&client->dev,
+ "invalid default brightness level: %u, using %u\n",
+ dft_brightness, MAX_BRIGHTNESS);
+ dft_brightness = MAX_BRIGHTNESS;
+ }
+
+ bl->props.brightness = dft_brightness;
+ ht16k33_bl_update_status(bl);
+
+ ht16k33_fb_queue(priv);
+ return 0;
+
+err_keypad_unregister:
+ input_unregister_device(keypad->dev);
+err_fbdev_unregister:
+ unregister_framebuffer(fbdev->info);
+err_fbdev_info:
+ framebuffer_release(fbdev->info);
+err_fbdev_buffer:
+ free_page((unsigned long) fbdev->buffer);
+err_free_fbdev:
+ kfree(fbdev);
+err_destroy_wq:
+ destroy_workqueue(priv->workqueue);
+
+ return err;
+}
+
+static int ht16k33_remove(struct i2c_client *client)
+{
+ struct ht16k33_priv *priv = i2c_get_clientdata(client);
+ struct ht16k33_keypad *keypad = &priv->keypad;
+ struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+ ht16k33_keypad_stop(keypad->dev);
+
+ cancel_delayed_work(&fbdev->work);
+ unregister_framebuffer(fbdev->info);
+ framebuffer_release(fbdev->info);
+ free_page((unsigned long) fbdev->buffer);
+
+ destroy_workqueue(priv->workqueue);
+ return 0;
+}
+
+static const struct i2c_device_id ht16k33_i2c_match[] = {
+ { "ht16k33", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match);
+
+static const struct of_device_id ht16k33_of_match[] = {
+ { .compatible = "holtek,ht16k33", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ht16k33_of_match);
+
+static struct i2c_driver ht16k33_driver = {
+ .probe = ht16k33_probe,
+ .remove = ht16k33_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(ht16k33_of_match),
+ },
+ .id_table = ht16k33_i2c_match,
+};
+module_i2c_driver(ht16k33_driver);
+
+MODULE_DESCRIPTION("Holtek HT16K33 driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d02e7c0f5bfd..d718ae4b907a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -224,6 +224,8 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
+source "drivers/base/test/Kconfig"
+
config SYS_HYPERVISOR
bool
default n
@@ -237,6 +239,7 @@ config GENERIC_CPU_AUTOPROBE
config SOC_BUS
bool
+ select GLOB
source "drivers/base/regmap/Kconfig"
@@ -250,11 +253,11 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
-config FENCE_TRACE
- bool "Enable verbose FENCE_TRACE messages"
+config DMA_FENCE_TRACE
+ bool "Enable verbose DMA_FENCE_TRACE messages"
depends on DMA_SHARED_BUFFER
help
- Enable the FENCE_TRACE printks. This will add extra
+ Enable the DMA_FENCE_TRACE printks. This will add extra
spam to the console log, but will make it easier to diagnose
lockup related problems for dma-buffers shared across multiple
devices.
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 2609ba20b396..f2816f6ff76a 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -24,5 +24,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
+obj-y += test/
+
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e05db388bd1c..ada9dce34e6d 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -107,6 +107,9 @@ extern void bus_remove_device(struct device *dev);
extern int bus_add_driver(struct device_driver *drv);
extern void bus_remove_driver(struct device_driver *drv);
+extern void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent);
extern void driver_detach(struct device_driver *drv);
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
@@ -138,6 +141,8 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
+extern struct device_attribute dev_attr_deferred_probe;
+
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
@@ -152,3 +157,13 @@ extern int devtmpfs_init(void);
#else
static inline int devtmpfs_init(void) { return 0; }
#endif
+
+/* Device links support */
+extern int device_links_read_lock(void);
+extern void device_links_read_unlock(int idx);
+extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_driver_bound(struct device *dev);
+extern void device_links_driver_cleanup(struct device *dev);
+extern void device_links_no_driver(struct device *dev);
+extern bool device_links_busy(struct device *dev);
+extern void device_links_unbind_consumers(struct device *dev);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index e9fd32e91668..1e3903d0d994 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -16,6 +16,9 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@@ -85,7 +88,120 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
return sib_leaf->of_node == this_leaf->of_node;
}
+
+/* OF properties to query for a given cache type */
+struct cache_type_info {
+ const char *size_prop;
+ const char *line_size_props[2];
+ const char *nr_sets_prop;
+};
+
+static const struct cache_type_info cache_type_info[] = {
+ {
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ }, {
+ .size_prop = "i-cache-size",
+ .line_size_props = { "i-cache-line-size",
+ "i-cache-block-size", },
+ .nr_sets_prop = "i-cache-sets",
+ }, {
+ .size_prop = "d-cache-size",
+ .line_size_props = { "d-cache-line-size",
+ "d-cache-block-size", },
+ .nr_sets_prop = "d-cache-sets",
+ },
+};
+
+static inline int get_cacheinfo_idx(enum cache_type type)
+{
+ if (type == CACHE_TYPE_UNIFIED)
+ return 0;
+ return type;
+}
+
+static void cache_size(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *cache_size;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].size_prop;
+
+ cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (cache_size)
+ this_leaf->size = of_read_number(cache_size, 1);
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static void cache_get_line_size(struct cacheinfo *this_leaf)
+{
+ const __be32 *line_size;
+ int i, lim, ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
+
+ for (i = 0; i < lim; i++) {
+ const char *propname;
+
+ propname = cache_type_info[ct_idx].line_size_props[i];
+ line_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (line_size)
+ break;
+ }
+
+ if (line_size)
+ this_leaf->coherency_line_size = of_read_number(line_size, 1);
+}
+
+static void cache_nr_sets(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *nr_sets;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].nr_sets_prop;
+
+ nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+ if (nr_sets)
+ this_leaf->number_of_sets = of_read_number(nr_sets, 1);
+}
+
+static void cache_associativity(struct cacheinfo *this_leaf)
+{
+ unsigned int line_size = this_leaf->coherency_line_size;
+ unsigned int nr_sets = this_leaf->number_of_sets;
+ unsigned int size = this_leaf->size;
+
+ /*
+ * If the cache is fully associative, there is no need to
+ * check the other properties.
+ */
+ if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
+ this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
+}
+
+static void cache_of_override_properties(unsigned int cpu)
+{
+ int index;
+ struct cacheinfo *this_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ this_leaf = this_cpu_ci->info_list + index;
+ cache_size(this_leaf);
+ cache_get_line_size(this_leaf);
+ cache_nr_sets(this_leaf);
+ cache_associativity(this_leaf);
+ }
+}
#else
+static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
@@ -104,9 +220,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index;
- int ret;
+ int ret = 0;
+
+ if (this_cpu_ci->cpu_map_populated)
+ return 0;
- ret = cache_setup_of_node(cpu);
+ if (of_have_populated_dt())
+ ret = cache_setup_of_node(cpu);
+ else if (!acpi_disabled)
+ /* No cache property/hierarchy support yet in ACPI */
+ ret = -ENOTSUPP;
if (ret)
return ret;
@@ -161,6 +284,12 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
}
}
+static void cache_override_properties(unsigned int cpu)
+{
+ if (of_have_populated_dt())
+ return cache_of_override_properties(cpu);
+}
+
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@@ -203,10 +332,11 @@ static int detect_cache_attributes(unsigned int cpu)
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
- pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
- cpu);
+ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
goto free_ci;
}
+
+ cache_override_properties(cpu);
return 0;
free_ci:
@@ -498,57 +628,30 @@ err:
return rc;
}
-static void cache_remove_dev(unsigned int cpu)
+static int cacheinfo_cpu_online(unsigned int cpu)
{
- if (!cpumask_test_cpu(cpu, &cache_dev_map))
- return;
- cpumask_clear_cpu(cpu, &cache_dev_map);
+ int rc = detect_cache_attributes(cpu);
- cpu_cache_sysfs_exit(cpu);
+ if (rc)
+ return rc;
+ rc = cache_add_dev(cpu);
+ if (rc)
+ free_cache_attributes(cpu);
+ return rc;
}
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int cacheinfo_cpu_pre_down(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
+ if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
+ cpu_cache_sysfs_exit(cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- rc = detect_cache_attributes(cpu);
- if (!rc)
- rc = cache_add_dev(cpu);
- break;
- case CPU_DEAD:
- cache_remove_dev(cpu);
- free_cache_attributes(cpu);
- break;
- }
- return notifier_from_errno(rc);
+ free_cache_attributes(cpu);
+ return 0;
}
static int __init cacheinfo_sysfs_init(void)
{
- int cpu, rc = 0;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu) {
- rc = detect_cache_attributes(cpu);
- if (rc)
- goto out;
- rc = cache_add_dev(cpu);
- if (rc) {
- free_cache_attributes(cpu);
- pr_err("error populating cacheinfo..cpu%d\n", cpu);
- goto out;
- }
- }
- __hotcpu_notifier(cacheinfo_cpu_callback, 0);
-
-out:
- cpu_notifier_register_done();
- return rc;
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
+ cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
}
-
device_initcall(cacheinfo_sysfs_init);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 71059e32bebc..a2b2896693d6 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -163,6 +163,18 @@ static void klist_class_dev_put(struct klist_node *n)
put_device(dev);
}
+static int class_add_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&cls->p->subsys.kobj, groups);
+}
+
+static void class_remove_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
+}
+
int __class_register(struct class *cls, struct lock_class_key *key)
{
struct subsys_private *cp;
@@ -203,6 +215,8 @@ int __class_register(struct class *cls, struct lock_class_key *key)
kfree(cp);
return error;
}
+ error = class_add_groups(class_get(cls), cls->class_groups);
+ class_put(cls);
error = add_class_attrs(class_get(cls));
class_put(cls);
return error;
@@ -213,6 +227,7 @@ void class_unregister(struct class *cls)
{
pr_debug("device class '%s': unregistering\n", cls->name);
remove_class_attrs(cls);
+ class_remove_groups(cls, cls->class_groups);
kset_unregister(&cls->p->subsys);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ce057a568673..020ea7f05520 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,572 @@ static int __init sysfs_deprecated_setup(char *arg)
early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
+/* Device links support. */
+
+#ifdef CONFIG_SRCU
+static DEFINE_MUTEX(device_links_lock);
+DEFINE_STATIC_SRCU(device_links_srcu);
+
+static inline void device_links_write_lock(void)
+{
+ mutex_lock(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ mutex_unlock(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ return srcu_read_lock(&device_links_srcu);
+}
+
+void device_links_read_unlock(int idx)
+{
+ srcu_read_unlock(&device_links_srcu, idx);
+}
+#else /* !CONFIG_SRCU */
+static DECLARE_RWSEM(device_links_lock);
+
+static inline void device_links_write_lock(void)
+{
+ down_write(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ up_write(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ down_read(&device_links_lock);
+ return 0;
+}
+
+void device_links_read_unlock(int not_used)
+{
+ up_read(&device_links_lock);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_is_dependent - Check if one device depends on another one
+ * @dev: Device to check dependencies for.
+ * @target: Device to check against.
+ *
+ * Check if @target depends on @dev or any device dependent on it (its child or
+ * its consumer etc). Return 1 if that is the case or 0 otherwise.
+ */
+static int device_is_dependent(struct device *dev, void *target)
+{
+ struct device_link *link;
+ int ret;
+
+ if (WARN_ON(dev == target))
+ return 1;
+
+ ret = device_for_each_child(dev, target, device_is_dependent);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (WARN_ON(link->consumer == target))
+ return 1;
+
+ ret = device_is_dependent(link->consumer, target);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int device_reorder_to_tail(struct device *dev, void *not_used)
+{
+ struct device_link *link;
+
+ /*
+ * Devices that have not been registered yet will be put to the ends
+ * of the lists during the registration, so skip them here.
+ */
+ if (device_is_registered(dev))
+ devices_kset_move_last(dev);
+
+ if (device_pm_initialized(dev))
+ device_pm_move_last(dev);
+
+ device_for_each_child(dev, NULL, device_reorder_to_tail);
+ list_for_each_entry(link, &dev->links.consumers, s_node)
+ device_reorder_to_tail(link->consumer, NULL);
+
+ return 0;
+}
+
+/**
+ * device_link_add - Create a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ * @flags: Link flags.
+ *
+ * The caller is responsible for the proper synchronization of the link creation
+ * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
+ * runtime PM framework to take the link into account. Second, if the
+ * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
+ * be forced into the active metastate and reference-counted upon the creation
+ * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+ * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
+ * when the consumer device driver unbinds from it. The combination of both
+ * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
+ * to be returned.
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+ * on it to the ends of these lists (that does not happen to devices that have
+ * not been registered when this function is called).
+ *
+ * The supplier device is required to be registered when this function is called
+ * and NULL will be returned if that is not the case. The consumer device need
+ * not be registered, however.
+ */
+struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags)
+{
+ struct device_link *link;
+
+ if (!consumer || !supplier ||
+ ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
+ return NULL;
+
+ device_links_write_lock();
+ device_pm_lock();
+
+ /*
+ * If the supplier has not been fully registered yet or there is a
+ * reverse dependency between the consumer and the supplier already in
+ * the graph, return NULL.
+ */
+ if (!device_pm_initialized(supplier)
+ || device_is_dependent(consumer, supplier)) {
+ link = NULL;
+ goto out;
+ }
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node)
+ if (link->consumer == consumer)
+ goto out;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ goto out;
+
+ if (flags & DL_FLAG_PM_RUNTIME) {
+ if (flags & DL_FLAG_RPM_ACTIVE) {
+ if (pm_runtime_get_sync(supplier) < 0) {
+ pm_runtime_put_noidle(supplier);
+ kfree(link);
+ link = NULL;
+ goto out;
+ }
+ link->rpm_active = true;
+ }
+ pm_runtime_new_link(consumer);
+ }
+ get_device(supplier);
+ link->supplier = supplier;
+ INIT_LIST_HEAD(&link->s_node);
+ get_device(consumer);
+ link->consumer = consumer;
+ INIT_LIST_HEAD(&link->c_node);
+ link->flags = flags;
+
+ /* Determine the initial link state. */
+ if (flags & DL_FLAG_STATELESS) {
+ link->status = DL_STATE_NONE;
+ } else {
+ switch (supplier->links.status) {
+ case DL_DEV_DRIVER_BOUND:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ /*
+ * Balance the decrementation of the supplier's
+ * runtime PM usage counter after consumer probe
+ * in driver_probe_device().
+ */
+ if (flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(supplier);
+
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ link->status = DL_STATE_ACTIVE;
+ break;
+ default:
+ link->status = DL_STATE_AVAILABLE;
+ break;
+ }
+ break;
+ case DL_DEV_UNBINDING:
+ link->status = DL_STATE_SUPPLIER_UNBIND;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+ }
+
+ /*
+ * Move the consumer and all of the devices depending on it to the end
+ * of dpm_list and the devices_kset list.
+ *
+ * It is necessary to hold dpm_list locked throughout all that or else
+ * we may end up suspending with a wrong ordering of it.
+ */
+ device_reorder_to_tail(consumer, NULL);
+
+ list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+ list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
+ dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
+
+ out:
+ device_pm_unlock();
+ device_links_write_unlock();
+ return link;
+}
+EXPORT_SYMBOL_GPL(device_link_add);
+
+static void device_link_free(struct device_link *link)
+{
+ put_device(link->consumer);
+ put_device(link->supplier);
+ kfree(link);
+}
+
+#ifdef CONFIG_SRCU
+static void __device_link_free_srcu(struct rcu_head *rhead)
+{
+ device_link_free(container_of(rhead, struct device_link, rcu_head));
+}
+
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_drop_link(link->consumer);
+
+ list_del_rcu(&link->s_node);
+ list_del_rcu(&link->c_node);
+ call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
+}
+#else /* !CONFIG_SRCU */
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ list_del(&link->s_node);
+ list_del(&link->c_node);
+ device_link_free(link);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_link_del - Delete a link between two devices.
+ * @link: Device link to delete.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_del(struct device_link *link)
+{
+ device_links_write_lock();
+ device_pm_lock();
+ __device_link_del(link);
+ device_pm_unlock();
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_del);
+
+static void device_links_missing_supplier(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node)
+ if (link->status == DL_STATE_CONSUMER_PROBE)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+}
+
+/**
+ * device_links_check_suppliers - Check presence of supplier drivers.
+ * @dev: Consumer device.
+ *
+ * Check links from this device to any suppliers. Walk the list of the device's
+ * links to suppliers and see if all of them are available. If not, simply
+ * return -EPROBE_DEFER.
+ *
+ * We need to guarantee that the supplier will not go away after the check has
+ * been positive here. It only can go away in __device_release_driver() and
+ * that function checks the device's links to consumers. This means we need to
+ * mark the link as "consumer probe in progress" to make the supplier removal
+ * wait for us to complete (or bad things may happen).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+int device_links_check_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int ret = 0;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status != DL_STATE_AVAILABLE) {
+ device_links_missing_supplier(dev);
+ ret = -EPROBE_DEFER;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+ }
+ dev->links.status = DL_DEV_PROBING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_driver_bound - Update device links after probing its driver.
+ * @dev: Device to update the links for.
+ *
+ * The probe has been successful, so update links from this device to any
+ * consumers by changing their status to "available".
+ *
+ * Also change the status of @dev's links to suppliers to "active".
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_bound(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_DORMANT);
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+ WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ }
+
+ dev->links.status = DL_DEV_DRIVER_BOUND;
+
+ device_links_write_unlock();
+}
+
+/**
+ * __device_links_no_driver - Update links of a device without a driver.
+ * @dev: Device without a drvier.
+ *
+ * Delete all non-persistent links from this device to any suppliers.
+ *
+ * Persistent links stay around, but their status is changed to "available",
+ * unless they already are in the "supplier unbind in progress" state in which
+ * case they need not be updated.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+static void __device_links_no_driver(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->flags & DL_FLAG_AUTOREMOVE)
+ __device_link_del(link);
+ else if (link->status != DL_STATE_SUPPLIER_UNBIND)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ dev->links.status = DL_DEV_NO_DRIVER;
+}
+
+void device_links_no_driver(struct device *dev)
+{
+ device_links_write_lock();
+ __device_links_no_driver(dev);
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_driver_cleanup - Update links after driver removal.
+ * @dev: Device whose driver has just gone away.
+ *
+ * Update links to consumers for @dev by changing their status to "dormant" and
+ * invoke %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_cleanup(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
+ WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+
+ __device_links_no_driver(dev);
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_busy - Check if there are any busy links to consumers.
+ * @dev: Device to check.
+ *
+ * Check each consumer of the device and return 'true' if its link's status
+ * is one of "consumer probe" or "active" (meaning that the given consumer is
+ * probing right now or its driver is present). Otherwise, change the link
+ * state to "supplier unbind" to prevent the consumer from being probed
+ * successfully going forward.
+ *
+ * Return 'false' if there are no probing or active consumers.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+bool device_links_busy(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = false;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status == DL_STATE_CONSUMER_PROBE
+ || link->status == DL_STATE_ACTIVE) {
+ ret = true;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ }
+
+ dev->links.status = DL_DEV_UNBINDING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_unbind_consumers - Force unbind consumers of the given device.
+ * @dev: Device to unbind the consumers of.
+ *
+ * Walk the list of links to consumers for @dev and if any of them is in the
+ * "consumer probe" state, wait for all device probes in progress to complete
+ * and start over.
+ *
+ * If that's not the case, change the status of the link to "supplier unbind"
+ * and check if the link was in the "active" state. If so, force the consumer
+ * driver to unbind and start over (the consumer will not re-probe as we have
+ * changed the state of the link already).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_unbind_consumers(struct device *dev)
+{
+ struct device_link *link;
+
+ start:
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ enum device_link_state status;
+
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ status = link->status;
+ if (status == DL_STATE_CONSUMER_PROBE) {
+ device_links_write_unlock();
+
+ wait_for_device_probe();
+ goto start;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ if (status == DL_STATE_ACTIVE) {
+ struct device *consumer = link->consumer;
+
+ get_device(consumer);
+
+ device_links_write_unlock();
+
+ device_release_driver_internal(consumer, NULL,
+ consumer->parent);
+ put_device(consumer);
+ goto start;
+ }
+ }
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_purge - Delete existing links to other devices.
+ * @dev: Target device.
+ */
+static void device_links_purge(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ /*
+ * Delete all of the remaining links from this device to any other
+ * devices (either consumers or suppliers).
+ */
+ device_links_write_lock();
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ WARN_ON(link->status == DL_STATE_ACTIVE);
+ __device_link_del(link);
+ }
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
+ WARN_ON(link->status != DL_STATE_DORMANT &&
+ link->status != DL_STATE_NONE);
+ __device_link_del(link);
+ }
+
+ device_links_write_unlock();
+}
+
+/* Device links support end. */
+
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
@@ -494,8 +1060,14 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
+ error = device_create_file(dev, &dev_attr_deferred_probe);
+ if (error)
+ goto err_remove_online;
+
return 0;
+ err_remove_online:
+ device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@@ -513,6 +1085,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
@@ -711,6 +1284,9 @@ void device_initialize(struct device *dev)
#ifdef CONFIG_GENERIC_MSI_IRQ
INIT_LIST_HEAD(&dev->msi_list);
#endif
+ INIT_LIST_HEAD(&dev->links.consumers);
+ INIT_LIST_HEAD(&dev->links.suppliers);
+ dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -1258,6 +1834,8 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
+
+ device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d22a7260f42b..a8b258e5407b 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,6 +53,19 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static ssize_t deferred_probe_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ bool value;
+
+ mutex_lock(&deferred_probe_mutex);
+ value = !list_empty(&dev->p->deferred_probe);
+ mutex_unlock(&deferred_probe_mutex);
+
+ return sprintf(buf, "%d\n", value);
+}
+DEVICE_ATTR_RO(deferred_probe);
+
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
@@ -244,6 +257,7 @@ static void driver_bound(struct device *dev)
__func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+ device_links_driver_bound(dev);
device_pm_check_callbacks(dev);
@@ -324,7 +338,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = -EPROBE_DEFER;
int local_trigger_count = atomic_read(&deferred_trigger_count);
- bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE);
+ bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
+ !drv->suppress_bind_attrs;
if (defer_all_probes) {
/*
@@ -337,6 +352,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return ret;
}
+ ret = device_links_check_suppliers(dev);
+ if (ret)
+ return ret;
+
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@@ -383,7 +402,7 @@ re_probe:
if (test_remove) {
test_remove = false;
- if (dev->bus && dev->bus->remove)
+ if (dev->bus->remove)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
@@ -415,6 +434,7 @@ probe_failed:
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
+ device_links_no_driver(dev);
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
@@ -507,6 +527,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
+ pm_runtime_get_suppliers(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
@@ -517,6 +538,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
if (dev->parent)
pm_runtime_put(dev->parent);
+ pm_runtime_put_suppliers(dev);
return ret;
}
@@ -771,7 +793,7 @@ EXPORT_SYMBOL_GPL(driver_attach);
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
-static void __device_release_driver(struct device *dev)
+static void __device_release_driver(struct device *dev, struct device *parent)
{
struct device_driver *drv;
@@ -780,7 +802,27 @@ static void __device_release_driver(struct device *dev)
if (driver_allows_async_probing(drv))
async_synchronize_full();
+ while (device_links_busy(dev)) {
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+
+ device_links_unbind_consumers(dev);
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ /*
+ * A concurrent invocation of the same function might
+ * have released the driver successfully while this one
+ * was waiting, so check for that.
+ */
+ if (dev->driver != drv)
+ return;
+ }
+
pm_runtime_get_sync(dev);
+ pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
@@ -795,6 +837,8 @@ static void __device_release_driver(struct device *dev)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
+
+ device_links_driver_cleanup(dev);
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@@ -811,12 +855,32 @@ static void __device_release_driver(struct device *dev)
}
}
+void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent)
+{
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ if (!drv || drv == dev->driver)
+ __device_release_driver(dev, parent);
+
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+}
+
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
+ *
+ * If this function is to be called with @dev->parent lock held, ensure that
+ * the device's consumers are unbound in advance or that their locks can be
+ * acquired under the @dev->parent lock.
*/
void device_release_driver(struct device *dev)
{
@@ -825,9 +889,7 @@ void device_release_driver(struct device *dev)
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
- device_lock(dev);
- __device_release_driver(dev);
- device_unlock(dev);
+ device_release_driver_internal(dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(device_release_driver);
@@ -852,15 +914,7 @@ void driver_detach(struct device_driver *drv)
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
-
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_lock(dev);
- if (dev->driver == drv)
- __device_release_driver(dev);
- device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
+ device_release_driver_internal(dev, drv, dev->parent);
put_device(dev);
}
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 240374fd1838..7be310f7db73 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -160,18 +160,20 @@ static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(disabled);
-static struct class_attribute devcd_class_attrs[] = {
- __ATTR_RW(disabled),
- __ATTR_NULL
+static struct attribute *devcd_class_attrs[] = {
+ &class_attr_disabled.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(devcd_class);
static struct class devcd_class = {
.name = "devcoredump",
.owner = THIS_MODULE,
.dev_release = devcd_dev_release,
.dev_groups = devcd_dev_groups,
- .class_attrs = devcd_class_attrs,
+ .class_groups = devcd_class_groups,
};
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 8fc654f0807b..71d577025285 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/percpu.h>
#include "base.h"
@@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev, unsigned long addr)
&devres));
}
EXPORT_SYMBOL_GPL(devm_free_pages);
+
+static void devm_percpu_release(struct device *dev, void *pdata)
+{
+ void __percpu *p;
+
+ p = *(void __percpu **)pdata;
+ free_percpu(p);
+}
+
+static int devm_percpu_match(struct device *dev, void *data, void *p)
+{
+ struct devres *devr = container_of(data, struct devres, data);
+
+ return *(void **)devr->data == p;
+}
+
+/**
+ * __devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @size: Size of per-cpu memory to allocate
+ * @align: Alignment of per-cpu memory to allocate
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
+ size_t align)
+{
+ void *p;
+ void __percpu *pcpu;
+
+ pcpu = __alloc_percpu(size, align);
+ if (!pcpu)
+ return NULL;
+
+ p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
+ if (!p) {
+ free_percpu(pcpu);
+ return NULL;
+ }
+
+ *(void __percpu **)p = pcpu;
+
+ devres_add(dev, p);
+
+ return pcpu;
+}
+EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
+
+/**
+ * devm_free_percpu - Resource-managed free_percpu
+ * @dev: Device this memory belongs to
+ * @pdata: Per-cpu memory to free
+ *
+ * Free memory allocated with devm_alloc_percpu().
+ */
+void devm_free_percpu(struct device *dev, void __percpu *pdata)
+{
+ WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
+ (void *)pdata));
+}
+EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 8f8b68c80986..efd71cf4fdea 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -108,13 +108,13 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
EXPORT_SYMBOL(dmam_free_coherent);
/**
- * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
+ * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
* @dev: Device to allocate non_coherent memory for
* @size: Size of allocation
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
*
- * Managed dma_alloc_non_coherent(). Memory allocated using this
+ * Managed dma_alloc_noncoherent(). Memory allocated using this
* function will be automatically released on driver detach.
*
* RETURNS:
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 22d1760a4278..4497d263209f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,6 +30,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/swait.h>
#include <generated/utsrelease.h>
@@ -91,10 +92,11 @@ static inline bool fw_is_builtin_firmware(const struct firmware *fw)
}
#endif
-enum {
+enum fw_status {
+ FW_STATUS_UNKNOWN,
FW_STATUS_LOADING,
FW_STATUS_DONE,
- FW_STATUS_ABORT,
+ FW_STATUS_ABORTED,
};
static int loading_timeout = 60; /* In seconds */
@@ -104,6 +106,82 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
}
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized. struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+ struct swait_queue_head wq;
+ enum fw_status status;
+};
+
+static void fw_state_init(struct fw_state *fw_st)
+{
+ init_swait_queue_head(&fw_st->wq);
+ fw_st->status = FW_STATUS_UNKNOWN;
+}
+
+static inline bool __fw_state_is_done(enum fw_status status)
+{
+ return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
+}
+
+static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
+{
+ long ret;
+
+ ret = swait_event_interruptible_timeout(fw_st->wq,
+ __fw_state_is_done(READ_ONCE(fw_st->status)),
+ timeout);
+ if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+ return -ENOENT;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+static void __fw_state_set(struct fw_state *fw_st,
+ enum fw_status status)
+{
+ WRITE_ONCE(fw_st->status, status);
+
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+ swake_up(&fw_st->wq);
+}
+
+#define fw_state_start(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_LOADING)
+#define fw_state_done(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_wait(fw_st) \
+ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
+
+#ifndef CONFIG_FW_LOADER_USER_HELPER
+
+#define fw_state_is_aborted(fw_st) false
+
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+
+static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
+{
+ return fw_st->status == status;
+}
+
+#define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
+#define fw_state_is_done(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_DONE)
+#define fw_state_is_loading(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_LOADING)
+#define fw_state_is_aborted(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_ABORTED)
+#define fw_state_wait_timeout(fw_st, timeout) \
+ __fw_state_wait_common(fw_st, timeout)
+
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
/* firmware behavior options */
#define FW_OPT_UEVENT (1U << 0)
#define FW_OPT_NOWAIT (1U << 1)
@@ -145,9 +223,8 @@ struct firmware_cache {
struct firmware_buf {
struct kref ref;
struct list_head list;
- struct completion completion;
struct firmware_cache *fwc;
- unsigned long status;
+ struct fw_state fw_st;
void *data;
size_t size;
size_t allocated_size;
@@ -205,7 +282,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
buf->fwc = fwc;
buf->data = dbuf;
buf->allocated_size = size;
- init_completion(&buf->completion);
+ fw_state_init(&buf->fw_st);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&buf->pending_list);
#endif
@@ -305,15 +382,6 @@ static const char * const fw_path[] = {
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
-static void fw_finish_direct_load(struct device *device,
- struct firmware_buf *buf)
-{
- mutex_lock(&fw_lock);
- set_bit(FW_STATUS_DONE, &buf->status);
- complete_all(&buf->completion);
- mutex_unlock(&fw_lock);
-}
-
static int
fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
{
@@ -360,7 +428,7 @@ fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
}
dev_dbg(device, "direct-loading %s\n", buf->fw_id);
buf->size = size;
- fw_finish_direct_load(device, buf);
+ fw_state_done(&buf->fw_st);
break;
}
__putname(path);
@@ -478,12 +546,11 @@ static void __fw_load_abort(struct firmware_buf *buf)
* There is a small window in which user can write to 'loading'
* between loading done and disappearance of 'loading'
*/
- if (test_bit(FW_STATUS_DONE, &buf->status))
+ if (fw_state_is_done(&buf->fw_st))
return;
list_del_init(&buf->pending_list);
- set_bit(FW_STATUS_ABORT, &buf->status);
- complete_all(&buf->completion);
+ fw_state_aborted(&buf->fw_st);
}
static void fw_load_abort(struct firmware_priv *fw_priv)
@@ -496,9 +563,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
fw_priv->buf = NULL;
}
-#define is_fw_load_aborted(buf) \
- test_bit(FW_STATUS_ABORT, &(buf)->status)
-
static LIST_HEAD(pending_fw_head);
/* reboot notifier for avoid deadlock with usermode_lock */
@@ -546,11 +610,13 @@ static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(timeout);
-static struct class_attribute firmware_class_attrs[] = {
- __ATTR_RW(timeout),
- __ATTR_NULL
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(firmware_class);
static void fw_dev_release(struct device *dev)
{
@@ -585,7 +651,7 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct class firmware_class = {
.name = "firmware",
- .class_attrs = firmware_class_attrs,
+ .class_groups = firmware_class_groups,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
@@ -598,7 +664,7 @@ static ssize_t firmware_loading_show(struct device *dev,
mutex_lock(&fw_lock);
if (fw_priv->buf)
- loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+ loading = fw_state_is_loading(&fw_priv->buf->fw_st);
mutex_unlock(&fw_lock);
return sprintf(buf, "%d\n", loading);
@@ -653,23 +719,20 @@ static ssize_t firmware_loading_store(struct device *dev,
switch (loading) {
case 1:
/* discarding any previous partial load */
- if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
+ if (!fw_state_is_done(&fw_buf->fw_st)) {
for (i = 0; i < fw_buf->nr_pages; i++)
__free_page(fw_buf->pages[i]);
vfree(fw_buf->pages);
fw_buf->pages = NULL;
fw_buf->page_array_size = 0;
fw_buf->nr_pages = 0;
- set_bit(FW_STATUS_LOADING, &fw_buf->status);
+ fw_state_start(&fw_buf->fw_st);
}
break;
case 0:
- if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
+ if (fw_state_is_loading(&fw_buf->fw_st)) {
int rc;
- set_bit(FW_STATUS_DONE, &fw_buf->status);
- clear_bit(FW_STATUS_LOADING, &fw_buf->status);
-
/*
* Several loading requests may be pending on
* one same firmware buf, so let all requests
@@ -691,10 +754,11 @@ static ssize_t firmware_loading_store(struct device *dev,
*/
list_del_init(&fw_buf->pending_list);
if (rc) {
- set_bit(FW_STATUS_ABORT, &fw_buf->status);
+ fw_state_aborted(&fw_buf->fw_st);
written = rc;
+ } else {
+ fw_state_done(&fw_buf->fw_st);
}
- complete_all(&fw_buf->completion);
break;
}
/* fallthrough */
@@ -755,7 +819,7 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
ret_count = -ENODEV;
goto out;
}
@@ -842,7 +906,7 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
retval = -ENODEV;
goto out;
}
@@ -955,17 +1019,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
timeout = MAX_JIFFY_OFFSET;
}
- retval = wait_for_completion_interruptible_timeout(&buf->completion,
- timeout);
- if (retval == -ERESTARTSYS || !retval) {
+ retval = fw_state_wait_timeout(&buf->fw_st, timeout);
+ if (retval < 0) {
mutex_lock(&fw_lock);
fw_load_abort(fw_priv);
mutex_unlock(&fw_lock);
- } else if (retval > 0) {
- retval = 0;
}
- if (is_fw_load_aborted(buf))
+ if (fw_state_is_aborted(&buf->fw_st))
retval = -EAGAIN;
else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
@@ -1015,35 +1076,12 @@ fw_load_from_user_helper(struct firmware *firmware, const char *name,
return -ENOENT;
}
-/* No abort during direct loading */
-#define is_fw_load_aborted(buf) false
-
#ifdef CONFIG_PM_SLEEP
static inline void kill_requests_without_uevent(void) { }
#endif
#endif /* CONFIG_FW_LOADER_USER_HELPER */
-
-/* wait until the shared firmware_buf becomes ready (or error) */
-static int sync_cached_firmware_buf(struct firmware_buf *buf)
-{
- int ret = 0;
-
- mutex_lock(&fw_lock);
- while (!test_bit(FW_STATUS_DONE, &buf->status)) {
- if (is_fw_load_aborted(buf)) {
- ret = -ENOENT;
- break;
- }
- mutex_unlock(&fw_lock);
- ret = wait_for_completion_interruptible(&buf->completion);
- mutex_lock(&fw_lock);
- }
- mutex_unlock(&fw_lock);
- return ret;
-}
-
/* prepare firmware and firmware_buf structs;
* return 0 if a firmware is already assigned, 1 if need to load one,
* or a negative error code
@@ -1077,7 +1115,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
firmware->priv = buf;
if (ret > 0) {
- ret = sync_cached_firmware_buf(buf);
+ ret = fw_state_wait(&buf->fw_st);
if (!ret) {
fw_set_page_data(buf, firmware);
return 0; /* assigned */
@@ -1095,7 +1133,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
struct firmware_buf *buf = fw->priv;
mutex_lock(&fw_lock);
- if (!buf->size || is_fw_load_aborted(buf)) {
+ if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
mutex_unlock(&fw_lock);
return -ENOENT;
}
@@ -1345,9 +1383,9 @@ static void request_firmware_work_func(struct work_struct *work)
*
* Asynchronous variant of request_firmware() for user contexts:
* - sleep for as small periods as possible since it may
- * increase kernel boot time of built-in device drivers
- * requesting firmware in their ->probe() methods, if
- * @gfp is GFP_KERNEL.
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
*
* - can't sleep at all if @gfp is GFP_ATOMIC.
**/
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 62c63c0c5c22..bb69e58c29f3 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -226,11 +226,9 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct page *first_page;
int ret;
start_pfn = section_nr_to_pfn(phys_index);
- first_page = pfn_to_page(start_pfn);
switch (action) {
case MEM_ONLINE:
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e023066e4215..5711708532db 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -39,6 +39,105 @@
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
+struct genpd_lock_ops {
+ void (*lock)(struct generic_pm_domain *genpd);
+ void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
+ int (*lock_interruptible)(struct generic_pm_domain *genpd);
+ void (*unlock)(struct generic_pm_domain *genpd);
+};
+
+static void genpd_lock_mtx(struct generic_pm_domain *genpd)
+{
+ mutex_lock(&genpd->mlock);
+}
+
+static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
+ int depth)
+{
+ mutex_lock_nested(&genpd->mlock, depth);
+}
+
+static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_lock_interruptible(&genpd->mlock);
+}
+
+static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_unlock(&genpd->mlock);
+}
+
+static const struct genpd_lock_ops genpd_mtx_ops = {
+ .lock = genpd_lock_mtx,
+ .lock_nested = genpd_lock_nested_mtx,
+ .lock_interruptible = genpd_lock_interruptible_mtx,
+ .unlock = genpd_unlock_mtx,
+};
+
+static void genpd_lock_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+}
+
+static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
+ int depth)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave_nested(&genpd->slock, flags, depth);
+ genpd->lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+ return 0;
+}
+
+static void genpd_unlock_spin(struct generic_pm_domain *genpd)
+ __releases(&genpd->slock)
+{
+ spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_spin_ops = {
+ .lock = genpd_lock_spin,
+ .lock_nested = genpd_lock_nested_spin,
+ .lock_interruptible = genpd_lock_interruptible_spin,
+ .unlock = genpd_unlock_spin,
+};
+
+#define genpd_lock(p) p->lock_ops->lock(p)
+#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
+#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
+#define genpd_unlock(p) p->lock_ops->unlock(p)
+
+#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
+
+static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
+ struct generic_pm_domain *genpd)
+{
+ bool ret;
+
+ ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
+
+ /* Warn once for each IRQ safe dev in no sleep domain */
+ if (ret)
+ dev_warn_once(dev, "PM domain %s will not be powered off\n",
+ genpd->name);
+
+ return ret;
+}
+
/*
* Get the generic PM domain for a particular struct device.
* This validates the struct device pointer, the PM domain pointer,
@@ -200,9 +299,9 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
genpd_sd_counter_inc(master);
- mutex_lock_nested(&master->lock, depth + 1);
+ genpd_lock_nested(master, depth + 1);
ret = genpd_poweron(master, depth + 1);
- mutex_unlock(&master->lock);
+ genpd_unlock(master);
if (ret) {
genpd_sd_counter_dec(master);
@@ -255,9 +354,9 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
spin_unlock_irq(&dev->power.lock);
if (!IS_ERR(genpd)) {
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd->max_off_time_changed = true;
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
dev = dev->parent;
@@ -303,7 +402,12 @@ static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
- if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
+ /*
+ * Do not allow PM domain to be powered off, when an IRQ safe
+ * device is part of a non-IRQ safe domain.
+ */
+ if (!pm_runtime_suspended(pdd->dev) ||
+ irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
not_suspended++;
}
@@ -354,9 +458,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd_poweroff(genpd, true);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
/**
@@ -466,15 +570,15 @@ static int genpd_runtime_suspend(struct device *dev)
}
/*
- * If power.irq_safe is set, this routine will be run with interrupts
- * off, so it can't use mutexes.
+ * If power.irq_safe is set, this routine may be run with
+ * IRQs disabled, so suspend only if the PM domain also is irq_safe.
*/
- if (dev->power.irq_safe)
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
return 0;
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd_poweroff(genpd, false);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
return 0;
}
@@ -503,15 +607,18 @@ static int genpd_runtime_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- /* If power.irq_safe, the PM domain is never powered off. */
- if (dev->power.irq_safe) {
+ /*
+ * As we don't power off a non IRQ safe domain, which holds
+ * an IRQ safe device, we don't need to restore power to it.
+ */
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
timed = false;
goto out;
}
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
ret = genpd_poweron(genpd, 0);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
if (ret)
return ret;
@@ -546,10 +653,11 @@ static int genpd_runtime_resume(struct device *dev)
err_stop:
genpd_stop_dev(genpd, dev);
err_poweroff:
- if (!dev->power.irq_safe) {
- mutex_lock(&genpd->lock);
+ if (!pm_runtime_is_irq_safe(dev) ||
+ (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+ genpd_lock(genpd);
genpd_poweroff(genpd, 0);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
return ret;
@@ -732,20 +840,20 @@ static int pm_genpd_prepare(struct device *dev)
if (resume_needed(dev, genpd))
pm_runtime_resume(dev);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->prepared_count++ == 0)
genpd->suspended_count = 0;
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
ret = pm_generic_prepare(dev);
if (ret) {
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd->prepared_count--;
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
return ret;
@@ -936,13 +1044,13 @@ static void pm_genpd_complete(struct device *dev)
pm_generic_complete(dev);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd->prepared_count--;
if (!genpd->prepared_count)
genpd_queue_power_off_work(genpd);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
/**
@@ -1071,7 +1179,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1088,7 +1196,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
out:
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
if (ret)
genpd_free_dev_data(dev, gpd_data);
@@ -1130,7 +1238,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
gpd_data = to_gpd_data(pdd);
dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1145,14 +1253,14 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
list_del_init(&pdd->list_node);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
genpd_free_dev_data(dev, gpd_data);
return 0;
out:
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
dev_pm_qos_add_notifier(dev, &gpd_data->nb);
return ret;
@@ -1183,12 +1291,23 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|| genpd == subdomain)
return -EINVAL;
+ /*
+ * If the domain can be powered on/off in an IRQ safe
+ * context, ensure that the subdomain can also be
+ * powered on/off in that context.
+ */
+ if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
+ WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
+ genpd->name, subdomain->name);
+ return -EINVAL;
+ }
+
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
- mutex_lock(&subdomain->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (genpd->status == GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1211,8 +1330,8 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
genpd_sd_counter_inc(genpd);
out:
- mutex_unlock(&genpd->lock);
- mutex_unlock(&subdomain->lock);
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
if (ret)
kfree(link);
return ret;
@@ -1250,8 +1369,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
- mutex_lock(&subdomain->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1275,13 +1394,39 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
out:
- mutex_unlock(&genpd->lock);
- mutex_unlock(&subdomain->lock);
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
+{
+ struct genpd_power_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ genpd->states = state;
+ genpd->state_count = 1;
+ genpd->free = state;
+
+ return 0;
+}
+
+static void genpd_lock_init(struct generic_pm_domain *genpd)
+{
+ if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
+ spin_lock_init(&genpd->slock);
+ genpd->lock_ops = &genpd_spin_ops;
+ } else {
+ mutex_init(&genpd->mlock);
+ genpd->lock_ops = &genpd_mtx_ops;
+ }
+}
+
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -1293,13 +1438,15 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off)
{
+ int ret;
+
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
INIT_LIST_HEAD(&genpd->master_links);
INIT_LIST_HEAD(&genpd->slave_links);
INIT_LIST_HEAD(&genpd->dev_list);
- mutex_init(&genpd->lock);
+ genpd_lock_init(genpd);
genpd->gov = gov;
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
atomic_set(&genpd->sd_count, 0);
@@ -1325,19 +1472,12 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->dev_ops.start = pm_clk_resume;
}
- if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
- pr_warn("Initial state index out of bounds.\n");
- genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
- }
-
- if (genpd->state_count > GENPD_MAX_NUM_STATES) {
- pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES);
- genpd->state_count = GENPD_MAX_NUM_STATES;
- }
-
/* Use only one "off" state if there were no states declared */
- if (genpd->state_count == 0)
- genpd->state_count = 1;
+ if (genpd->state_count == 0) {
+ ret = genpd_set_default_power_state(genpd);
+ if (ret)
+ return ret;
+ }
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
@@ -1354,16 +1494,16 @@ static int genpd_remove(struct generic_pm_domain *genpd)
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->has_provider) {
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
pr_err("Provider present, unable to remove %s\n", genpd->name);
return -EBUSY;
}
if (!list_empty(&genpd->master_links) || genpd->device_count) {
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
return -EBUSY;
}
@@ -1375,8 +1515,9 @@ static int genpd_remove(struct generic_pm_domain *genpd)
}
list_del(&genpd->gpd_list_node);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
cancel_work_sync(&genpd->power_off_work);
+ kfree(genpd->free);
pr_debug("%s: removed %s\n", __func__, genpd->name);
return 0;
@@ -1890,21 +2031,117 @@ int genpd_dev_pm_attach(struct device *dev)
mutex_unlock(&gpd_list_lock);
if (ret < 0) {
- dev_err(dev, "failed to add to PM domain %s: %d",
- pd->name, ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to add to PM domain %s: %d",
+ pd->name, ret);
goto out;
}
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- mutex_lock(&pd->lock);
+ genpd_lock(pd);
ret = genpd_poweron(pd, 0);
- mutex_unlock(&pd->lock);
+ genpd_unlock(pd);
out:
return ret ? -EPROBE_DEFER : 0;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
+
+static const struct of_device_id idle_state_match[] = {
+ { .compatible = "domain-idle-state", },
+ { }
+};
+
+static int genpd_parse_state(struct genpd_power_state *genpd_state,
+ struct device_node *state_node)
+{
+ int err;
+ u32 residency;
+ u32 entry_latency, exit_latency;
+ const struct of_device_id *match_id;
+
+ match_id = of_match_node(idle_state_match, state_node);
+ if (!match_id)
+ return -EINVAL;
+
+ err = of_property_read_u32(state_node, "entry-latency-us",
+ &entry_latency);
+ if (err) {
+ pr_debug(" * %s missing entry-latency-us property\n",
+ state_node->full_name);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "exit-latency-us",
+ &exit_latency);
+ if (err) {
+ pr_debug(" * %s missing exit-latency-us property\n",
+ state_node->full_name);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "min-residency-us", &residency);
+ if (!err)
+ genpd_state->residency_ns = 1000 * residency;
+
+ genpd_state->power_on_latency_ns = 1000 * exit_latency;
+ genpd_state->power_off_latency_ns = 1000 * entry_latency;
+ genpd_state->fwnode = &state_node->fwnode;
+
+ return 0;
+}
+
+/**
+ * of_genpd_parse_idle_states: Return array of idle states for the genpd.
+ *
+ * @dn: The genpd device node
+ * @states: The pointer to which the state array will be saved.
+ * @n: The count of elements in the array returned from this function.
+ *
+ * Returns the device states parsed from the OF node. The memory for the states
+ * is allocated by this function and is the responsibility of the caller to
+ * free the memory after use.
+ */
+int of_genpd_parse_idle_states(struct device_node *dn,
+ struct genpd_power_state **states, int *n)
+{
+ struct genpd_power_state *st;
+ struct device_node *np;
+ int i = 0;
+ int err, ret;
+ int count;
+ struct of_phandle_iterator it;
+
+ count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
+ if (count <= 0)
+ return -EINVAL;
+
+ st = kcalloc(count, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ /* Loop over the phandles until all the requested entry is found */
+ of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
+ np = it.node;
+ ret = genpd_parse_state(&st[i++], np);
+ if (ret) {
+ pr_err
+ ("Parsing idle state node %s failed with err %d\n",
+ np->full_name, ret);
+ of_node_put(np);
+ kfree(st);
+ return ret;
+ }
+ }
+
+ *n = count;
+ *states = st;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
+
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -1958,7 +2195,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
char state[16];
int ret;
- ret = mutex_lock_interruptible(&genpd->lock);
+ ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
@@ -1984,7 +2221,9 @@ static int pm_genpd_summary_one(struct seq_file *s,
}
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
- kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
if (kobj_path == NULL)
continue;
@@ -1995,7 +2234,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
seq_puts(s, "\n");
exit:
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
return 0;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e44944f4be77..48c6294e9c34 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -131,6 +131,7 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
+ dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);
}
@@ -145,6 +146,7 @@ void device_pm_remove(struct device *dev)
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
+ dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
@@ -244,6 +246,62 @@ static void dpm_wait_for_children(struct device *dev, bool async)
device_for_each_child(dev, &async, dpm_wait_fn);
}
+static void dpm_wait_for_suppliers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * If the supplier goes away right after we've checked the link to it,
+ * we'll wait for its completion to change the state, but that's fine,
+ * because the only things that will block as a result are the SRCU
+ * callbacks freeing the link objects for the links in the list we're
+ * walking.
+ */
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->supplier, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_superior(struct device *dev, bool async)
+{
+ dpm_wait(dev->parent, async);
+ dpm_wait_for_suppliers(dev, async);
+}
+
+static void dpm_wait_for_consumers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * The status of a device link can only be changed from "dormant" by a
+ * probe, but that cannot happen during system suspend/resume. In
+ * theory it can change to "dormant" at that time, but then it is
+ * reasonable to wait for the target device anyway (eg. if it goes
+ * away, it's better to wait for it to go away completely and then
+ * continue instead of trying to continue in parallel with its
+ * unregistration).
+ */
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->consumer, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_subordinate(struct device *dev, bool async)
+{
+ dpm_wait_for_children(dev, async);
+ dpm_wait_for_consumers(dev, async);
+}
+
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
@@ -488,7 +546,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -618,7 +676,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "early power domain ";
@@ -750,7 +808,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1027,6 +1085,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ dpm_wait_for_subordinate(dev, async);
+
if (async_error)
goto Complete;
@@ -1038,8 +1098,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -1174,6 +1232,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
__pm_runtime_disable(dev, false);
+ dpm_wait_for_subordinate(dev, async);
+
if (async_error)
goto Complete;
@@ -1185,8 +1245,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -1342,6 +1400,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
+static void dpm_clear_suppliers_direct_complete(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ spin_lock_irq(&link->supplier->power.lock);
+ link->supplier->power.direct_complete = false;
+ spin_unlock_irq(&link->supplier->power.lock);
+ }
+
+ device_links_read_unlock(idx);
+}
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@@ -1358,7 +1432,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1454,16 +1528,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
spin_unlock_irq(&parent->power.lock);
}
+ dpm_clear_suppliers_direct_complete(dev);
}
device_unlock(dev);
dpm_watchdog_clear(&wd);
Complete:
- complete_all(&dev->power.completion);
if (error)
async_error = error;
+ complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
return error;
}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 4c7c6da7a989..35ff06283738 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -93,6 +93,8 @@ struct opp_table *_find_opp_table(struct device *dev)
* Return: voltage in micro volt corresponding to the opp, else
* return 0
*
+ * This is useful only for devices with single power supply.
+ *
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. This means that opp which could have been fetched by
* opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
@@ -112,7 +114,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
if (IS_ERR_OR_NULL(tmp_opp))
pr_err("%s: Invalid parameters\n", __func__);
else
- v = tmp_opp->u_volt;
+ v = tmp_opp->supplies[0].u_volt;
return v;
}
@@ -210,6 +212,24 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
+static int _get_regulator_count(struct device *dev)
+{
+ struct opp_table *opp_table;
+ int count;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (!IS_ERR(opp_table))
+ count = opp_table->regulator_count;
+ else
+ count = 0;
+
+ rcu_read_unlock();
+
+ return count;
+}
+
/**
* dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
* @dev: device for which we do this operation
@@ -222,34 +242,51 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
struct opp_table *opp_table;
struct dev_pm_opp *opp;
- struct regulator *reg;
+ struct regulator *reg, **regulators;
unsigned long latency_ns = 0;
- unsigned long min_uV = ~0, max_uV = 0;
- int ret;
+ int ret, i, count;
+ struct {
+ unsigned long min;
+ unsigned long max;
+ } *uV;
+
+ count = _get_regulator_count(dev);
+
+ /* Regulator may not be required for the device */
+ if (!count)
+ return 0;
+
+ regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
+ if (!regulators)
+ return 0;
+
+ uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
+ if (!uV)
+ goto free_regulators;
rcu_read_lock();
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
rcu_read_unlock();
- return 0;
+ goto free_uV;
}
- reg = opp_table->regulator;
- if (IS_ERR(reg)) {
- /* Regulator may not be required for device */
- rcu_read_unlock();
- return 0;
- }
+ memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
- list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
- if (!opp->available)
- continue;
+ for (i = 0; i < count; i++) {
+ uV[i].min = ~0;
+ uV[i].max = 0;
+
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+ if (!opp->available)
+ continue;
- if (opp->u_volt_min < min_uV)
- min_uV = opp->u_volt_min;
- if (opp->u_volt_max > max_uV)
- max_uV = opp->u_volt_max;
+ if (opp->supplies[i].u_volt_min < uV[i].min)
+ uV[i].min = opp->supplies[i].u_volt_min;
+ if (opp->supplies[i].u_volt_max > uV[i].max)
+ uV[i].max = opp->supplies[i].u_volt_max;
+ }
}
rcu_read_unlock();
@@ -258,9 +295,16 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
* The caller needs to ensure that opp_table (and hence the regulator)
* isn't freed, while we are executing this routine.
*/
- ret = regulator_set_voltage_time(reg, min_uV, max_uV);
- if (ret > 0)
- latency_ns = ret * 1000;
+ for (i = 0; reg = regulators[i], i < count; i++) {
+ ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
+ if (ret > 0)
+ latency_ns += ret * 1000;
+ }
+
+free_uV:
+ kfree(uV);
+free_regulators:
+ kfree(regulators);
return latency_ns;
}
@@ -542,8 +586,7 @@ unlock:
}
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
- unsigned long u_volt, unsigned long u_volt_min,
- unsigned long u_volt_max)
+ struct dev_pm_opp_supply *supply)
{
int ret;
@@ -554,14 +597,78 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return 0;
}
- dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
- u_volt, u_volt_max);
+ dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
+ supply->u_volt_min, supply->u_volt, supply->u_volt_max);
- ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
- u_volt_max);
+ ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
+ supply->u_volt, supply->u_volt_max);
if (ret)
dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
- __func__, u_volt_min, u_volt, u_volt_max, ret);
+ __func__, supply->u_volt_min, supply->u_volt,
+ supply->u_volt_max, ret);
+
+ return ret;
+}
+
+static inline int
+_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
+ unsigned long old_freq, unsigned long freq)
+{
+ int ret;
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ }
+
+ return ret;
+}
+
+static int _generic_set_opp(struct dev_pm_set_opp_data *data)
+{
+ struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
+ struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
+ unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+ struct regulator *reg = data->regulators[0];
+ struct device *dev= data->dev;
+ int ret;
+
+ /* This function only supports single regulator per device */
+ if (WARN_ON(data->regulator_count > 1)) {
+ dev_err(dev, "multiple regulators are not supported\n");
+ return -EINVAL;
+ }
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _set_opp_voltage(dev, reg, new_supply);
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+ ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
+ if (ret)
+ goto restore_voltage;
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _set_opp_voltage(dev, reg, new_supply);
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (old_supply->u_volt)
+ _set_opp_voltage(dev, reg, old_supply);
return ret;
}
@@ -579,12 +686,13 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
struct opp_table *opp_table;
+ unsigned long freq, old_freq;
+ int (*set_opp)(struct dev_pm_set_opp_data *data);
struct dev_pm_opp *old_opp, *opp;
- struct regulator *reg;
+ struct regulator **regulators;
+ struct dev_pm_set_opp_data *data;
struct clk *clk;
- unsigned long freq, old_freq;
- unsigned long u_volt, u_volt_min, u_volt_max;
- int ret;
+ int ret, size;
if (unlikely(!target_freq)) {
dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
@@ -633,55 +741,41 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
return ret;
}
- u_volt = opp->u_volt;
- u_volt_min = opp->u_volt_min;
- u_volt_max = opp->u_volt_max;
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
+ old_freq, freq);
- reg = opp_table->regulator;
+ regulators = opp_table->regulators;
- rcu_read_unlock();
-
- /* Scaling up? Scale voltage before frequency */
- if (freq > old_freq) {
- ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
- u_volt_max);
- if (ret)
- goto restore_voltage;
- }
-
- /* Change frequency */
-
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
- __func__, old_freq, freq);
-
- ret = clk_set_rate(clk, freq);
- if (ret) {
- dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
- ret);
- goto restore_voltage;
+ /* Only frequency scaling */
+ if (!regulators) {
+ rcu_read_unlock();
+ return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
}
- /* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
- ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
- u_volt_max);
- if (ret)
- goto restore_freq;
- }
+ if (opp_table->set_opp)
+ set_opp = opp_table->set_opp;
+ else
+ set_opp = _generic_set_opp;
+
+ data = opp_table->set_opp_data;
+ data->regulators = regulators;
+ data->regulator_count = opp_table->regulator_count;
+ data->clk = clk;
+ data->dev = dev;
+
+ data->old_opp.rate = old_freq;
+ size = sizeof(*opp->supplies) * opp_table->regulator_count;
+ if (IS_ERR(old_opp))
+ memset(data->old_opp.supplies, 0, size);
+ else
+ memcpy(data->old_opp.supplies, old_opp->supplies, size);
- return 0;
+ data->new_opp.rate = freq;
+ memcpy(data->new_opp.supplies, opp->supplies, size);
-restore_freq:
- if (clk_set_rate(clk, old_freq))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
-restore_voltage:
- /* This shouldn't harm even if the voltages weren't updated earlier */
- if (!IS_ERR(old_opp))
- _set_opp_voltage(dev, reg, old_opp->u_volt,
- old_opp->u_volt_min, old_opp->u_volt_max);
+ rcu_read_unlock();
- return ret;
+ return set_opp(data);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
@@ -764,9 +858,6 @@ static struct opp_table *_add_opp_table(struct device *dev)
_of_init_opp_table(opp_table, dev);
- /* Set regulator to a non-NULL error value */
- opp_table->regulator = ERR_PTR(-ENXIO);
-
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
if (IS_ERR(opp_table->clk)) {
@@ -815,7 +906,10 @@ static void _remove_opp_table(struct opp_table *opp_table)
if (opp_table->prop_name)
return;
- if (!IS_ERR(opp_table->regulator))
+ if (opp_table->regulators)
+ return;
+
+ if (opp_table->set_opp)
return;
/* Release clk */
@@ -924,34 +1018,50 @@ struct dev_pm_opp *_allocate_opp(struct device *dev,
struct opp_table **opp_table)
{
struct dev_pm_opp *opp;
+ int count, supply_size;
+ struct opp_table *table;
- /* allocate new OPP node */
- opp = kzalloc(sizeof(*opp), GFP_KERNEL);
- if (!opp)
+ table = _add_opp_table(dev);
+ if (!table)
return NULL;
- INIT_LIST_HEAD(&opp->node);
+ /* Allocate space for at least one supply */
+ count = table->regulator_count ? table->regulator_count : 1;
+ supply_size = sizeof(*opp->supplies) * count;
- *opp_table = _add_opp_table(dev);
- if (!*opp_table) {
- kfree(opp);
+ /* allocate new OPP node and supplies structures */
+ opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
+ if (!opp) {
+ kfree(table);
return NULL;
}
+ /* Put the supplies at the end of the OPP structure as an empty array */
+ opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+ INIT_LIST_HEAD(&opp->node);
+
+ *opp_table = table;
+
return opp;
}
static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
struct opp_table *opp_table)
{
- struct regulator *reg = opp_table->regulator;
-
- if (!IS_ERR(reg) &&
- !regulator_is_supported_voltage(reg, opp->u_volt_min,
- opp->u_volt_max)) {
- pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
- __func__, opp->u_volt_min, opp->u_volt_max);
- return false;
+ struct regulator *reg;
+ int i;
+
+ for (i = 0; i < opp_table->regulator_count; i++) {
+ reg = opp_table->regulators[i];
+
+ if (!regulator_is_supported_voltage(reg,
+ opp->supplies[i].u_volt_min,
+ opp->supplies[i].u_volt_max)) {
+ pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+ __func__, opp->supplies[i].u_volt_min,
+ opp->supplies[i].u_volt_max);
+ return false;
+ }
}
return true;
@@ -983,11 +1093,13 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
/* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
- __func__, opp->rate, opp->u_volt, opp->available,
- new_opp->rate, new_opp->u_volt, new_opp->available);
+ __func__, opp->rate, opp->supplies[0].u_volt,
+ opp->available, new_opp->rate,
+ new_opp->supplies[0].u_volt, new_opp->available);
- return opp->available && new_opp->u_volt == opp->u_volt ?
- 0 : -EEXIST;
+ /* Should we compare voltages for all regulators here ? */
+ return opp->available &&
+ new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST;
}
new_opp->opp_table = opp_table;
@@ -1054,9 +1166,9 @@ int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
/* populate the opp table */
new_opp->rate = freq;
tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
- new_opp->u_volt = u_volt;
- new_opp->u_volt_min = u_volt - tol;
- new_opp->u_volt_max = u_volt + tol;
+ new_opp->supplies[0].u_volt = u_volt;
+ new_opp->supplies[0].u_volt_min = u_volt - tol;
+ new_opp->supplies[0].u_volt_max = u_volt + tol;
new_opp->available = true;
new_opp->dynamic = dynamic;
@@ -1300,13 +1412,47 @@ unlock:
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
+static int _allocate_set_opp_data(struct opp_table *opp_table)
+{
+ struct dev_pm_set_opp_data *data;
+ int len, count = opp_table->regulator_count;
+
+ if (WARN_ON(!count))
+ return -EINVAL;
+
+ /* space for set_opp_data */
+ len = sizeof(*data);
+
+ /* space for old_opp.supplies and new_opp.supplies */
+ len += 2 * sizeof(struct dev_pm_opp_supply) * count;
+
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->old_opp.supplies = (void *)(data + 1);
+ data->new_opp.supplies = data->old_opp.supplies + count;
+
+ opp_table->set_opp_data = data;
+
+ return 0;
+}
+
+static void _free_set_opp_data(struct opp_table *opp_table)
+{
+ kfree(opp_table->set_opp_data);
+ opp_table->set_opp_data = NULL;
+}
+
/**
- * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * dev_pm_opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
- * @name: Name of the regulator.
+ * @names: Array of pointers to the names of the regulator.
+ * @count: Number of regulators.
*
* In order to support OPP switching, OPP layer needs to know the name of the
- * device's regulator, as the core would be required to switch voltages as well.
+ * device's regulators, as the core would be required to switch voltages as
+ * well.
*
* This must be called before any OPPs are initialized for the device.
*
@@ -1316,11 +1462,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
+ const char * const names[],
+ unsigned int count)
{
struct opp_table *opp_table;
struct regulator *reg;
- int ret;
+ int ret, i;
mutex_lock(&opp_table_lock);
@@ -1336,22 +1484,146 @@ int dev_pm_opp_set_regulator(struct device *dev, const char *name)
goto err;
}
- /* Already have a regulator set */
- if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+ /* Already have regulators set */
+ if (opp_table->regulators) {
ret = -EBUSY;
goto err;
}
- /* Allocate the regulator */
- reg = regulator_get_optional(dev, name);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%s: no regulator (%s) found: %d\n",
- __func__, name, ret);
+
+ opp_table->regulators = kmalloc_array(count,
+ sizeof(*opp_table->regulators),
+ GFP_KERNEL);
+ if (!opp_table->regulators) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < count; i++) {
+ reg = regulator_get_optional(dev, names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%s: no regulator (%s) found: %d\n",
+ __func__, names[i], ret);
+ goto free_regulators;
+ }
+
+ opp_table->regulators[i] = reg;
+ }
+
+ opp_table->regulator_count = count;
+
+ /* Allocate block only once to pass to set_opp() routines */
+ ret = _allocate_set_opp_data(opp_table);
+ if (ret)
+ goto free_regulators;
+
+ mutex_unlock(&opp_table_lock);
+ return opp_table;
+
+free_regulators:
+ while (i != 0)
+ regulator_put(opp_table->regulators[--i]);
+
+ kfree(opp_table->regulators);
+ opp_table->regulators = NULL;
+ opp_table->regulator_count = 0;
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
+
+/**
+ * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
+ * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulators(struct opp_table *opp_table)
+{
+ int i;
+
+ mutex_lock(&opp_table_lock);
+
+ if (!opp_table->regulators) {
+ pr_err("%s: Doesn't have regulators set\n", __func__);
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ for (i = opp_table->regulator_count - 1; i >= 0; i--)
+ regulator_put(opp_table->regulators[i]);
+
+ _free_set_opp_data(opp_table);
+
+ kfree(opp_table->regulators);
+ opp_table->regulators = NULL;
+ opp_table->regulator_count = 0;
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
+
+/**
+ * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * @dev: Device for which the helper is getting registered.
+ * @set_opp: Custom set OPP helper.
+ *
+ * This is useful to support complex platforms (like platforms with multiple
+ * regulators per device), instead of the generic OPP set rate helper.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_register_set_opp_helper(struct device *dev,
+ int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+ struct opp_table *opp_table;
+ int ret;
+
+ if (!set_opp)
+ return -EINVAL;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
goto err;
}
- opp_table->regulator = reg;
+ /* Already have custom set_opp helper */
+ if (WARN_ON(opp_table->set_opp)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ opp_table->set_opp = set_opp;
mutex_unlock(&opp_table_lock);
return 0;
@@ -1363,11 +1635,12 @@ unlock:
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
/**
- * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
- * @dev: Device for which regulator was set.
+ * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
+ * set_opp helper
+ * @dev: Device for which custom set_opp helper has to be cleared.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -1375,7 +1648,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-void dev_pm_opp_put_regulator(struct device *dev)
+void dev_pm_opp_register_put_opp_helper(struct device *dev)
{
struct opp_table *opp_table;
@@ -1389,16 +1662,16 @@ void dev_pm_opp_put_regulator(struct device *dev)
goto unlock;
}
- if (IS_ERR(opp_table->regulator)) {
- dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+ if (!opp_table->set_opp) {
+ dev_err(dev, "%s: Doesn't have custom set_opp helper set\n",
+ __func__);
goto unlock;
}
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
- regulator_put(opp_table->regulator);
- opp_table->regulator = ERR_PTR(-ENXIO);
+ opp_table->set_opp = NULL;
/* Try freeing opp_table if this was the last blocking resource */
_remove_opp_table(opp_table);
@@ -1406,7 +1679,7 @@ void dev_pm_opp_put_regulator(struct device *dev)
unlock:
mutex_unlock(&opp_table_lock);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
index ef1ae6b52042..95f433db4ac7 100644
--- a/drivers/base/power/opp/debugfs.c
+++ b/drivers/base/power/opp/debugfs.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/limits.h>
+#include <linux/slab.h>
#include "opp.h"
@@ -34,6 +35,46 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
debugfs_remove_recursive(opp->dentry);
}
+static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
+ struct opp_table *opp_table,
+ struct dentry *pdentry)
+{
+ struct dentry *d;
+ int i = 0;
+ char *name;
+
+ /* Always create at least supply-0 directory */
+ do {
+ name = kasprintf(GFP_KERNEL, "supply-%d", i);
+
+ /* Create per-opp directory */
+ d = debugfs_create_dir(name, pdentry);
+
+ kfree(name);
+
+ if (!d)
+ return false;
+
+ if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d,
+ &opp->supplies[i].u_volt))
+ return false;
+
+ if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d,
+ &opp->supplies[i].u_volt_min))
+ return false;
+
+ if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d,
+ &opp->supplies[i].u_volt_max))
+ return false;
+
+ if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
+ &opp->supplies[i].u_amp))
+ return false;
+ } while (++i < opp_table->regulator_count);
+
+ return true;
+}
+
int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
struct dentry *pdentry = opp_table->dentry;
@@ -63,16 +104,7 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
return -ENOMEM;
- if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp))
+ if (!opp_debug_create_supplies(opp, opp_table, d))
return -ENOMEM;
if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 5552211e6fcd..3f7d2591b173 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/of.h>
+#include <linux/slab.h>
#include <linux/export.h>
#include "opp.h"
@@ -101,16 +102,16 @@ static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
return true;
}
-/* TODO: Support multiple regulators */
static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
struct opp_table *opp_table)
{
- u32 microvolt[3] = {0};
- u32 val;
- int count, ret;
+ u32 *microvolt, *microamp = NULL;
+ int supplies, vcount, icount, ret, i, j;
struct property *prop = NULL;
char name[NAME_MAX];
+ supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
+
/* Search for "opp-microvolt-<name>" */
if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microvolt-%s",
@@ -128,34 +129,29 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
return 0;
}
- count = of_property_count_u32_elems(opp->np, name);
- if (count < 0) {
+ vcount = of_property_count_u32_elems(opp->np, name);
+ if (vcount < 0) {
dev_err(dev, "%s: Invalid %s property (%d)\n",
- __func__, name, count);
- return count;
+ __func__, name, vcount);
+ return vcount;
}
- /* There can be one or three elements here */
- if (count != 1 && count != 3) {
- dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
- __func__, name, count);
+ /* There can be one or three elements per supply */
+ if (vcount != supplies && vcount != supplies * 3) {
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
+ __func__, name, vcount, supplies);
return -EINVAL;
}
- ret = of_property_read_u32_array(opp->np, name, microvolt, count);
+ microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
+ if (!microvolt)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
if (ret) {
dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
- return -EINVAL;
- }
-
- opp->u_volt = microvolt[0];
-
- if (count == 1) {
- opp->u_volt_min = opp->u_volt;
- opp->u_volt_max = opp->u_volt;
- } else {
- opp->u_volt_min = microvolt[1];
- opp->u_volt_max = microvolt[2];
+ ret = -EINVAL;
+ goto free_microvolt;
}
/* Search for "opp-microamp-<name>" */
@@ -172,10 +168,59 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
prop = of_find_property(opp->np, name, NULL);
}
- if (prop && !of_property_read_u32(opp->np, name, &val))
- opp->u_amp = val;
+ if (prop) {
+ icount = of_property_count_u32_elems(opp->np, name);
+ if (icount < 0) {
+ dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
+ name, icount);
+ ret = icount;
+ goto free_microvolt;
+ }
- return 0;
+ if (icount != supplies) {
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
+ __func__, name, icount, supplies);
+ ret = -EINVAL;
+ goto free_microvolt;
+ }
+
+ microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
+ if (!microamp) {
+ ret = -EINVAL;
+ goto free_microvolt;
+ }
+
+ ret = of_property_read_u32_array(opp->np, name, microamp,
+ icount);
+ if (ret) {
+ dev_err(dev, "%s: error parsing %s: %d\n", __func__,
+ name, ret);
+ ret = -EINVAL;
+ goto free_microamp;
+ }
+ }
+
+ for (i = 0, j = 0; i < supplies; i++) {
+ opp->supplies[i].u_volt = microvolt[j++];
+
+ if (vcount == supplies) {
+ opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
+ opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
+ } else {
+ opp->supplies[i].u_volt_min = microvolt[j++];
+ opp->supplies[i].u_volt_max = microvolt[j++];
+ }
+
+ if (microamp)
+ opp->supplies[i].u_amp = microamp[i];
+ }
+
+free_microamp:
+ kfree(microamp);
+free_microvolt:
+ kfree(microvolt);
+
+ return ret;
}
/**
@@ -198,7 +243,7 @@ void dev_pm_opp_of_remove_table(struct device *dev)
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *_of_get_opp_desc_node(struct device *dev)
+static struct device_node *_of_get_opp_desc_node(struct device *dev)
{
/*
* TODO: Support for multiple OPP tables.
@@ -303,9 +348,9 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
mutex_unlock(&opp_table_lock);
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
- __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
- new_opp->u_volt_min, new_opp->u_volt_max,
- new_opp->clock_latency_ns);
+ __func__, new_opp->turbo, new_opp->rate,
+ new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
+ new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
/*
* Notify the changes in the availability of the operable
@@ -562,7 +607,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
/* Get OPP descriptor node */
np = _of_get_opp_desc_node(cpu_dev);
if (!np) {
- dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+ dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
return -ENOENT;
}
@@ -587,7 +632,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
/* Get OPP descriptor node */
tmp_np = _of_get_opp_desc_node(tcpu_dev);
if (!tmp_np) {
- dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+ dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
__func__);
ret = -ENOENT;
goto put_cpu_node;
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index fabd5ca1a083..af9f2b849a66 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -61,10 +61,7 @@ extern struct list_head opp_tables;
* @turbo: true if turbo (boost) OPP
* @suspend: true if suspend OPP
* @rate: Frequency in hertz
- * @u_volt: Target voltage in microvolts corresponding to this OPP
- * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
- * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
- * @u_amp: Maximum current drawn by the device in microamperes
+ * @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
* @opp_table: points back to the opp_table struct this opp belongs to
@@ -83,10 +80,8 @@ struct dev_pm_opp {
bool suspend;
unsigned long rate;
- unsigned long u_volt;
- unsigned long u_volt_min;
- unsigned long u_volt_max;
- unsigned long u_amp;
+ struct dev_pm_opp_supply *supplies;
+
unsigned long clock_latency_ns;
struct opp_table *opp_table;
@@ -144,7 +139,10 @@ enum opp_table_access {
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
* @clk: Device's clock handle
- * @regulator: Supply regulator
+ * @regulators: Supply regulators
+ * @regulator_count: Number of power supply regulators
+ * @set_opp: Platform specific set_opp callback
+ * @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -179,7 +177,11 @@ struct opp_table {
unsigned int supported_hw_count;
const char *prop_name;
struct clk *clk;
- struct regulator *regulator;
+ struct regulator **regulators;
+ unsigned int regulator_count;
+
+ int (*set_opp)(struct dev_pm_set_opp_data *data);
+ struct dev_pm_set_opp_data *set_opp_data;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
@@ -190,7 +192,6 @@ struct opp_table {
/* Routines internal to opp core */
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
-struct device_node *_of_get_opp_desc_node(struct device *dev);
void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 50e30e7b059d..a46e97e515c5 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -21,14 +21,22 @@ extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
+#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
+#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
+#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
+ WAKE_IRQ_DEDICATED_MANAGED)
+
struct wake_irq {
struct device *dev;
+ unsigned int status;
int irq;
- bool dedicated_irq:1;
};
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status);
+extern void dev_pm_disable_wake_irq_check(struct device *dev);
#ifdef CONFIG_PM_SLEEP
@@ -104,6 +112,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
{
}
+static inline void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status)
+{
+}
+
+static inline void dev_pm_disable_wake_irq_check(struct device *dev)
+{
+}
+
#endif
#ifdef CONFIG_PM_SLEEP
@@ -127,6 +144,11 @@ extern void device_pm_move_after(struct device *, struct device *);
extern void device_pm_move_last(struct device *);
extern void device_pm_check_callbacks(struct device *dev);
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return dev->power.in_dpm_list;
+}
+
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@@ -146,6 +168,11 @@ static inline void device_pm_move_last(struct device *dev) {}
static inline void device_pm_check_callbacks(struct device *dev) {}
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return device_is_registered(dev);
+}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 7f3646e459cb..58fcc758334e 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -856,7 +856,10 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
struct dev_pm_qos_request *req;
if (val < 0) {
- ret = -EINVAL;
+ if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
+ ret = 0;
+ else
+ ret = -EINVAL;
goto out;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -883,6 +886,7 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
/**
* dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 82a081ea4317..872eac4cb1df 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -12,6 +12,8 @@
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
+
+#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
@@ -241,7 +243,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
retval = -EACCES;
else if (atomic_read(&dev->power.usage_count) > 0)
retval = -EAGAIN;
- else if (!pm_children_suspended(dev))
+ else if (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count))
retval = -EBUSY;
/* Pending resume requests take precedence over suspends. */
@@ -258,6 +261,42 @@ static int rpm_check_suspend_allowed(struct device *dev)
return retval;
}
+static int rpm_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ int retval;
+
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
+ link->rpm_active)
+ continue;
+
+ retval = pm_runtime_get_sync(link->supplier);
+ if (retval < 0) {
+ pm_runtime_put_noidle(link->supplier);
+ return retval;
+ }
+ link->rpm_active = true;
+ }
+ return 0;
+}
+
+static void rpm_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->rpm_active &&
+ READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
+ pm_runtime_put(link->supplier);
+ link->rpm_active = false;
+ }
+}
+
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -266,19 +305,57 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
- int retval;
+ int retval, idx;
+ bool use_links = dev->power.links_count > 0;
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
- else
+ } else {
spin_unlock_irq(&dev->power.lock);
+ /*
+ * Resume suppliers if necessary.
+ *
+ * The device's runtime PM status cannot change until this
+ * routine returns, so it is safe to read the status outside of
+ * the lock.
+ */
+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+ idx = device_links_read_lock();
+
+ retval = rpm_get_suppliers(dev);
+ if (retval)
+ goto fail;
+
+ device_links_read_unlock(idx);
+ }
+ }
+
retval = cb(dev);
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
- else
+ } else {
+ /*
+ * If the device is suspending and the callback has returned
+ * success, drop the usage counters of the suppliers that have
+ * been reference counted on its resume.
+ *
+ * Do that if resume fails too.
+ */
+ if (use_links
+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ idx = device_links_read_lock();
+
+ fail:
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
spin_lock_irq(&dev->power.lock);
+ }
return retval;
}
@@ -515,7 +592,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
- dev_pm_enable_wake_irq(dev);
+ dev_pm_enable_wake_irq_check(dev, true);
retval = rpm_callback(callback, dev);
if (retval)
goto fail;
@@ -554,7 +631,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
- dev_pm_disable_wake_irq(dev);
+ dev_pm_disable_wake_irq_check(dev);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@@ -712,8 +789,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock(&parent->power.lock);
/*
- * We can resume if the parent's runtime PM is disabled or it
- * is set to ignore children.
+ * Resume the parent if it has runtime PM enabled and not been
+ * set to ignore its children.
*/
if (!parent->power.disable_depth
&& !parent->power.ignore_children) {
@@ -737,12 +814,12 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
- dev_pm_disable_wake_irq(dev);
+ dev_pm_disable_wake_irq_check(dev);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_cancel_pending(dev);
- dev_pm_enable_wake_irq(dev);
+ dev_pm_enable_wake_irq_check(dev, false);
} else {
no_callback:
__update_runtime_status(dev, RPM_ACTIVE);
@@ -1027,7 +1104,17 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
goto out_set;
if (status == RPM_SUSPENDED) {
- /* It always is possible to set the status to 'suspended'. */
+ /*
+ * It is invalid to suspend a device with an active child,
+ * unless it has been set to ignore its children.
+ */
+ if (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count)) {
+ dev_err(dev, "runtime PM trying to suspend device but active child\n");
+ error = -EBUSY;
+ goto out;
+ }
+
if (parent) {
atomic_add_unless(&parent->power.child_count, -1, 0);
notify_parent = !parent->power.ignore_children;
@@ -1447,6 +1534,94 @@ void pm_runtime_remove(struct device *dev)
}
/**
+ * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
+ * @dev: Device whose driver is going to be removed.
+ *
+ * Check links from this device to any consumers and if any of them have active
+ * runtime PM references to the device, drop the usage counter of the device
+ * (once per link).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ *
+ * Since the device is guaranteed to be runtime-active at the point this is
+ * called, nothing else needs to be done here.
+ *
+ * Moreover, this is called after device_links_busy() has returned 'false', so
+ * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
+ * therefore rpm_active can't be manipulated concurrently.
+ */
+void pm_runtime_clean_up_links(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->rpm_active) {
+ pm_runtime_put_noidle(dev);
+ link->rpm_active = false;
+ }
+ }
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_put_suppliers - Drop references to supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_put(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+void pm_runtime_new_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.links_count++;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+void pm_runtime_drop_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ WARN_ON(dev->power.links_count == 0);
+ dev->power.links_count--;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
@@ -1478,6 +1653,16 @@ int pm_runtime_force_suspend(struct device *dev)
if (ret)
goto err;
+ /*
+ * Increase the runtime PM usage count for the device's parent, in case
+ * when we find the device being used when system suspend was invoked.
+ * This informs pm_runtime_force_resume() to resume the parent
+ * immediately, which is needed to be able to resume its children,
+ * when not deferring the resume to be managed via runtime PM.
+ */
+ if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
+ pm_runtime_get_noresume(dev->parent);
+
pm_runtime_set_suspended(dev);
return 0;
err:
@@ -1487,16 +1672,20 @@ err:
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
/**
- * pm_runtime_force_resume - Force a device into resume state.
+ * pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
* Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power. We update the runtime PM
- * status and re-enables runtime PM.
+ * those actions and brings the device into full power, if it is expected to be
+ * used on system resume. To distinguish that, we check whether the runtime PM
+ * usage count is greater than 1 (the PM core increases the usage count in the
+ * system PM prepare phase), as that indicates a real user (such as a subsystem,
+ * driver, userspace, etc.) is using it. If that is the case, the device is
+ * expected to be used on system resume as well, so then we resume it. In the
+ * other case, we defer the resume to be managed via runtime PM.
*
- * Typically this function may be invoked from a system resume callback to make
- * sure the device is put into full power state.
+ * Typically this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
@@ -1513,6 +1702,17 @@ int pm_runtime_force_resume(struct device *dev)
if (!pm_runtime_status_suspended(dev))
goto out;
+ /*
+ * Decrease the parent's runtime PM usage count, if we increased it
+ * during system suspend in pm_runtime_force_suspend().
+ */
+ if (atomic_read(&dev->power.usage_count) > 1) {
+ if (dev->parent)
+ pm_runtime_put_noidle(dev->parent);
+ } else {
+ goto out;
+ }
+
ret = pm_runtime_set_active(dev);
if (ret)
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a7b46798c81d..33b4b902741a 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -263,7 +263,11 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
s32 value;
int ret;
- if (kstrtos32(buf, 0, &value)) {
+ if (kstrtos32(buf, 0, &value) == 0) {
+ /* Users can't write negative values directly */
+ if (value < 0)
+ return -EINVAL;
+ } else {
if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index efec10b49d59..1cda505d6a85 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -10,6 +10,7 @@
#include <linux/pm-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
+#include <linux/suspend.h>
#include <linux/mc146818rtc.h>
@@ -74,6 +75,9 @@
#define DEVSEED (7919)
+bool pm_trace_rtc_abused __read_mostly;
+EXPORT_SYMBOL_GPL(pm_trace_rtc_abused);
+
static unsigned int dev_hash_value;
static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
@@ -104,6 +108,7 @@ static int set_magic_time(unsigned int user, unsigned int file, unsigned int dev
time.tm_min = (n % 20) * 3;
n /= 20;
mc146818_set_time(&time);
+ pm_trace_rtc_abused = true;
return n ? -1 : 0;
}
@@ -239,9 +244,31 @@ int show_trace_dev_match(char *buf, size_t size)
return ret;
}
+static int
+pm_trace_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ if (pm_trace_rtc_abused) {
+ pm_trace_rtc_abused = false;
+ pr_warn("Possible incorrect RTC due to pm_trace, please use 'ntpdate' or 'rdate' to reset it.\n");
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block pm_trace_nb = {
+ .notifier_call = pm_trace_notify,
+};
+
static int early_resume_init(void)
{
hash_value_early_read = read_magic_time();
+ register_pm_notifier(&pm_trace_nb);
return 0;
}
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 0d77cd6fd8d1..404d94c6c8bc 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
dev->power.wakeirq = NULL;
spin_unlock_irqrestore(&dev->power.lock, flags);
- if (wirq->dedicated_irq)
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
free_irq(wirq->irq, wirq);
+ wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
+ }
kfree(wirq);
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
@@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
wirq->dev = dev;
wirq->irq = irq;
- wirq->dedicated_irq = true;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
/*
@@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_irq;
+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
+
return err;
err_free_irq:
@@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
*
- * Called from the bus code or the device driver for
- * runtime_suspend() to enable the wake-up interrupt while
- * the device is running.
+ * Optionally called from the bus code or the device driver for
+ * runtime_resume() to override the PM runtime core managed wake-up
+ * interrupt handling to enable the wake-up interrupt.
*
* Note that for runtime_suspend()) the wake-up interrupts
* should be unconditionally enabled unlike for suspend()
@@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (wirq && wirq->dedicated_irq)
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
enable_irq(wirq->irq);
}
EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
@@ -231,20 +234,73 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
* dev_pm_disable_wake_irq - Disable device wake-up interrupt
* @dev: Device
*
- * Called from the bus code or the device driver for
- * runtime_resume() to disable the wake-up interrupt while
- * the device is running.
+ * Optionally called from the bus code or the device driver for
+ * runtime_suspend() to override the PM runtime core managed wake-up
+ * interrupt handling to disable the wake-up interrupt.
*/
void dev_pm_disable_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (wirq && wirq->dedicated_irq)
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
disable_irq_nosync(wirq->irq);
}
EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
/**
+ * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
+ * @dev: Device
+ * @can_change_status: Can change wake-up interrupt status
+ *
+ * Enables wakeirq conditionally. We need to enable wake-up interrupt
+ * lazily on the first rpm_suspend(). This is needed as the consumer device
+ * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * otherwise try to disable already disabled wakeirq. The wake-up interrupt
+ * starts disabled with IRQ_NOAUTOEN set.
+ *
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ * Caller must hold &dev->power.lock to change wirq->status
+ */
+void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ return;
+
+ if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
+ goto enable;
+ } else if (can_change_status) {
+ wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
+ goto enable;
+ }
+
+ return;
+
+enable:
+ enable_irq(wirq->irq);
+}
+
+/**
+ * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
+ * @dev: Device
+ *
+ * Disables wake-up interrupt conditionally based on status.
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ */
+void dev_pm_disable_wake_irq_check(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
+ disable_irq_nosync(wirq->irq);
+}
+
+/**
* dev_pm_arm_wake_irq - Arm device wake-up
* @wirq: Device wake-up interrupt
*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 62e4de2aa8d1..bf9ba26981a5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -811,7 +811,7 @@ void pm_print_active_wakeup_sources(void)
rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
- pr_info("active wakeup source: %s\n", ws->name);
+ pr_debug("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
@@ -822,7 +822,7 @@ void pm_print_active_wakeup_sources(void)
}
if (!active && last_activity_ws)
- pr_info("last active wakeup source: %s\n",
+ pr_debug("last active wakeup source: %s\n",
last_activity_ws->name);
rcu_read_unlock();
}
@@ -905,7 +905,7 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
split_counters(&cnt, &inpr);
if (inpr == 0 || signal_pending(current))
break;
-
+ pm_print_active_wakeup_sources();
schedule();
}
finish_wait(&wakeup_count_wait_queue, &wait);
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 6f77d7319fc6..4ff311374c4a 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -236,15 +236,13 @@ static int regcache_lzo_read(struct regmap *map,
{
struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
+ size_t tmp_dst_len;
void *tmp_dst;
/* index of the compressed lzo block */
blkindex = regcache_lzo_get_blkindex(map, reg);
/* register index within the decompressed block */
blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
lzo_blocks = map->cache;
lzo_block = lzo_blocks[blkindex];
@@ -275,15 +273,13 @@ static int regcache_lzo_write(struct regmap *map,
{
struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
+ size_t tmp_dst_len;
void *tmp_dst;
/* index of the compressed lzo block */
blkindex = regcache_lzo_get_blkindex(map, reg);
/* register index within the decompressed block */
blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
lzo_blocks = map->cache;
lzo_block = lzo_blocks[blkindex];
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index b63f23e6ad61..dc26e5949a32 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/err.h>
+#include <linux/glob.h>
static DEFINE_IDA(soc_ida);
@@ -113,6 +114,12 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
struct soc_device *soc_dev;
int ret;
+ if (!soc_bus_type.p) {
+ ret = bus_register(&soc_bus_type);
+ if (ret)
+ goto out1;
+ }
+
soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
if (!soc_dev) {
ret = -ENOMEM;
@@ -156,6 +163,78 @@ void soc_device_unregister(struct soc_device *soc_dev)
static int __init soc_bus_register(void)
{
+ if (soc_bus_type.p)
+ return 0;
+
return bus_register(&soc_bus_type);
}
core_initcall(soc_bus_register);
+
+static int soc_device_match_one(struct device *dev, void *arg)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+ const struct soc_device_attribute *match = arg;
+
+ if (match->machine &&
+ (!soc_dev->attr->machine ||
+ !glob_match(match->machine, soc_dev->attr->machine)))
+ return 0;
+
+ if (match->family &&
+ (!soc_dev->attr->family ||
+ !glob_match(match->family, soc_dev->attr->family)))
+ return 0;
+
+ if (match->revision &&
+ (!soc_dev->attr->revision ||
+ !glob_match(match->revision, soc_dev->attr->revision)))
+ return 0;
+
+ if (match->soc_id &&
+ (!soc_dev->attr->soc_id ||
+ !glob_match(match->soc_id, soc_dev->attr->soc_id)))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * soc_device_match - identify the SoC in the machine
+ * @matches: zero-terminated array of possible matches
+ *
+ * returns the first matching entry of the argument array, or NULL
+ * if none of them match.
+ *
+ * This function is meant as a helper in place of of_match_node()
+ * in cases where either no device tree is available or the information
+ * in a device node is insufficient to identify a particular variant
+ * by its compatible strings or other properties. For new devices,
+ * the DT binding should always provide unique compatible strings
+ * that allow the use of of_match_node() instead.
+ *
+ * The calling function can use the .data entry of the
+ * soc_device_attribute to pass a structure or function pointer for
+ * each entry.
+ */
+const struct soc_device_attribute *soc_device_match(
+ const struct soc_device_attribute *matches)
+{
+ int ret = 0;
+
+ if (!matches)
+ return NULL;
+
+ while (!ret) {
+ if (!(matches->machine || matches->family ||
+ matches->revision || matches->soc_id))
+ break;
+ ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
+ soc_device_match_one);
+ if (!ret)
+ matches++;
+ else
+ return matches;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(soc_device_match);
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
new file mode 100644
index 000000000000..9aa0d45a60db
--- /dev/null
+++ b/drivers/base/test/Kconfig
@@ -0,0 +1,9 @@
+config TEST_ASYNC_DRIVER_PROBE
+ tristate "Build kernel module to test asynchronous driver probing"
+ depends on m
+ help
+ Enabling this option produces a kernel module that allows
+ testing asynchronous driver probing by the device core.
+ The module name will be test_async_driver_probe.ko
+
+ If unsure say N.
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
new file mode 100644
index 000000000000..90477c5fd9f9
--- /dev/null
+++ b/drivers/base/test/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
new file mode 100644
index 000000000000..304d5c2bd5e9
--- /dev/null
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+
+#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
+#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
+
+static int test_probe(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
+ TEST_PROBE_DELAY);
+ msleep(TEST_PROBE_DELAY);
+ dev_info(&pdev->dev, "done sleeping\n");
+
+ return 0;
+}
+
+static struct platform_driver async_driver = {
+ .driver = {
+ .name = "test_async_driver",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_driver sync_driver = {
+ .driver = {
+ .name = "test_sync_driver",
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_device *async_dev_1, *async_dev_2;
+static struct platform_device *sync_dev_1;
+
+static int __init test_async_probe_init(void)
+{
+ ktime_t calltime, delta;
+ unsigned long long duration;
+ int error;
+
+ pr_info("registering first asynchronous device...\n");
+
+ async_dev_1 = platform_device_register_simple("test_async_driver", 1,
+ NULL, 0);
+ if (IS_ERR(async_dev_1)) {
+ error = PTR_ERR(async_dev_1);
+ pr_err("failed to create async_dev_1: %d", error);
+ return error;
+ }
+
+ pr_info("registering asynchronous driver...\n");
+ calltime = ktime_get();
+ error = platform_driver_register(&async_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_1;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_driver;
+ }
+
+ pr_info("registering second asynchronous device...\n");
+ calltime = ktime_get();
+ async_dev_2 = platform_device_register_simple("test_async_driver", 2,
+ NULL, 0);
+ if (IS_ERR(async_dev_2)) {
+ error = PTR_ERR(async_dev_2);
+ pr_err("failed to create async_dev_2: %d", error);
+ goto err_unregister_async_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous driver...\n");
+
+ error = platform_driver_register(&sync_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous device...\n");
+ calltime = ktime_get();
+ sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
+ NULL, 0);
+ if (IS_ERR(sync_dev_1)) {
+ error = PTR_ERR(sync_dev_1);
+ pr_err("failed to create sync_dev_1: %d", error);
+ goto err_unregister_sync_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration < TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe was too quick\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_sync_dev_1;
+ }
+
+ pr_info("completed successfully");
+
+ return 0;
+
+err_unregister_sync_dev_1:
+ platform_device_unregister(sync_dev_1);
+
+err_unregister_sync_driver:
+ platform_driver_unregister(&sync_driver);
+
+err_unregister_async_dev_2:
+ platform_device_unregister(async_dev_2);
+
+err_unregister_async_driver:
+ platform_driver_unregister(&async_driver);
+
+err_unregister_async_dev_1:
+ platform_device_unregister(async_dev_1);
+
+ return error;
+}
+module_init(test_async_probe_init);
+
+static void __exit test_async_probe_exit(void)
+{
+ platform_driver_unregister(&async_driver);
+ platform_driver_unregister(&sync_driver);
+ platform_device_unregister(async_dev_1);
+ platform_device_unregister(async_dev_2);
+ platform_device_unregister(sync_dev_1);
+}
+module_exit(test_async_probe_exit);
+
+MODULE_DESCRIPTION("Test module for asynchronous driver probing");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index df3c97cb4c99..d6ec1c546f5b 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -118,51 +118,19 @@ static int topology_add_dev(unsigned int cpu)
return sysfs_create_group(&dev->kobj, &topology_attr_group);
}
-static void topology_remove_dev(unsigned int cpu)
+static int topology_remove_dev(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
sysfs_remove_group(&dev->kobj, &topology_attr_group);
-}
-
-static int topology_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rc = topology_add_dev(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- topology_remove_dev(cpu);
- break;
- }
- return notifier_from_errno(rc);
+ return 0;
}
static int topology_sysfs_init(void)
{
- int cpu;
- int rc = 0;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu) {
- rc = topology_add_dev(cpu);
- if (rc)
- goto out;
- }
- __hotcpu_notifier(topology_cpu_callback, 0);
-
-out:
- cpu_notifier_register_done();
- return rc;
+ return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE,
+ "base/topology:prepare", topology_add_dev,
+ topology_remove_dev);
}
device_initcall(topology_sysfs_init);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index bd46569e0e52..925842996986 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -295,6 +295,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0018) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 39dd30b6ef86..223ff2fcae7e 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -384,9 +384,12 @@ config BLK_DEV_RAM_DAX
allocated from highmem (only a problem for highmem systems).
config CDROM_PKTCDVD
- tristate "Packet writing on CD/DVD media"
+ tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
help
+ Note: This driver is deprecated and will be removed from the
+ kernel in the near future!
+
If you have a CDROM/DVD drive that supports packet writing, say
Y to include support. It should work with any MMC/Mt Fuji
compliant ATAPI or SCSI drive, which is just about any newer
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ab19adb07a12..3c606c09fd5a 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -853,45 +853,6 @@ rqbiocnt(struct request *r)
return n;
}
-/* This can be removed if we are certain that no users of the block
- * layer will ever use zero-count pages in bios. Otherwise we have to
- * protect against the put_page sometimes done by the network layer.
- *
- * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
- * discussion.
- *
- * We cannot use get_page in the workaround, because it insists on a
- * positive page count as a precondition. So we use _refcount directly.
- */
-static void
-bio_pageinc(struct bio *bio)
-{
- struct bio_vec bv;
- struct page *page;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- /* Non-zero page count for non-head members of
- * compound pages is no longer allowed by the kernel.
- */
- page = compound_head(bv.bv_page);
- page_ref_inc(page);
- }
-}
-
-static void
-bio_pagedec(struct bio *bio)
-{
- struct page *page;
- struct bio_vec bv;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- page = compound_head(bv.bv_page);
- page_ref_dec(page);
- }
-}
-
static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{
@@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
buf->rq = rq;
buf->bio = bio;
buf->iter = bio->bi_iter;
- bio_pageinc(bio);
}
static struct buf *
@@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf)
if (buf == d->ip.buf)
d->ip.buf = NULL;
rq = buf->rq;
- bio_pagedec(buf->bio);
mempool_free(buf, d->bufpool);
n = (unsigned long) rq->special;
rq->special = (void *) --n;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c76d4016eeb..ad793f35632c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -395,44 +395,9 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
#define brd_direct_access NULL
#endif
-static int brd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int error;
- struct brd_device *brd = bdev->bd_disk->private_data;
-
- if (cmd != BLKFLSBUF)
- return -ENOTTY;
-
- /*
- * ram device BLKFLSBUF has special semantics, we want to actually
- * release and destroy the ramdisk data.
- */
- mutex_lock(&brd_mutex);
- mutex_lock(&bdev->bd_mutex);
- error = -EBUSY;
- if (bdev->bd_openers <= 1) {
- /*
- * Kill the cache first, so it isn't written back to the
- * device.
- *
- * Another thread might instantiate more buffercache here,
- * but there is not much we can do to close that race.
- */
- kill_bdev(bdev);
- brd_free_pages(brd);
- error = 0;
- }
- mutex_unlock(&bdev->bd_mutex);
- mutex_unlock(&brd_mutex);
-
- return error;
-}
-
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.rw_page = brd_rw_page,
- .ioctl = brd_ioctl,
.direct_access = brd_direct_access,
};
@@ -443,8 +408,8 @@ static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
module_param(rd_nr, int, S_IRUGO);
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
-int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
-module_param(rd_size, int, S_IRUGO);
+unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
+module_param(rd_size, ulong, S_IRUGO);
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
static int max_part = 1;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 1537302e56e3..a18de9d727b0 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -260,43 +260,6 @@ scsi_cmd_stack_free(ctlr_info_t *h)
}
#if 0
-static int xmargin=8;
-static int amargin=60;
-
-static void
-print_bytes (unsigned char *c, int len, int hex, int ascii)
-{
-
- int i;
- unsigned char *x;
-
- if (hex)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % xmargin) == 0 && i>0) printk("\n");
- if ((i % xmargin) == 0) printk("0x%04x:", i);
- printk(" %02x", *x);
- x++;
- }
- printk("\n");
- }
- if (ascii)
- {
- x = c;
- for (i=0;i<len;i++)
- {
- if ((i % amargin) == 0 && i>0) printk("\n");
- if ((i % amargin) == 0) printk("0x%04x:", i);
- if (*x > 26 && *x < 128) printk("%c", *x);
- else printk(".");
- x++;
- }
- printk("\n");
- }
-}
-
static void
print_cmd(CommandList_struct *cp)
{
@@ -305,30 +268,13 @@ print_cmd(CommandList_struct *cp)
printk("sgtot:%d\n", cp->Header.SGTotal);
printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
cp->Header.Tag.lower);
- printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- cp->Header.LUN.LunAddrBytes[0],
- cp->Header.LUN.LunAddrBytes[1],
- cp->Header.LUN.LunAddrBytes[2],
- cp->Header.LUN.LunAddrBytes[3],
- cp->Header.LUN.LunAddrBytes[4],
- cp->Header.LUN.LunAddrBytes[5],
- cp->Header.LUN.LunAddrBytes[6],
- cp->Header.LUN.LunAddrBytes[7]);
+ printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes);
printk("CDBLen:%d\n", cp->Request.CDBLen);
printk("Type:%d\n",cp->Request.Type.Type);
printk("Attr:%d\n",cp->Request.Type.Attribute);
printk(" Dir:%d\n",cp->Request.Type.Direction);
printk("Timeout:%d\n",cp->Request.Timeout);
- printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
- " %02x %02x %02x %02x %02x %02x %02x %02x\n",
- cp->Request.CDB[0], cp->Request.CDB[1],
- cp->Request.CDB[2], cp->Request.CDB[3],
- cp->Request.CDB[4], cp->Request.CDB[5],
- cp->Request.CDB[6], cp->Request.CDB[7],
- cp->Request.CDB[8], cp->Request.CDB[9],
- cp->Request.CDB[10], cp->Request.CDB[11],
- cp->Request.CDB[12], cp->Request.CDB[13],
- cp->Request.CDB[14], cp->Request.CDB[15]),
+ printk("CDB: %16ph\n", cp->Request.CDB);
printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",
cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
cp->ErrDesc.Len);
@@ -340,9 +286,7 @@ print_cmd(CommandList_struct *cp)
printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-
}
-
#endif
static int
@@ -782,8 +726,10 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
"reported\n", c);
break;
case CMD_INVALID: {
- /* print_bytes(c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
/* We get CMD_INVALID if you address a non-existent tape drive instead
of a selection timeout (no response). You will see this if you yank
out a tape drive, then try to access it. This is kind of a shame
@@ -985,8 +931,10 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
dev_warn(&h->pdev->dev,
"%p is reported invalid (probably means "
"target device no longer present)\n", c);
- /* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
- print_cmd(c); */
+ /*
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+ print_cmd(c);
+ */
}
break;
case CMD_PROTOCOL_ERR:
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 2d3d50ab74bf..8d7bcfa49c12 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -148,7 +148,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
op_flags |= REQ_FUA | REQ_PREFLUSH;
- op_flags |= REQ_SYNC | REQ_NOIDLE;
+ op_flags |= REQ_SYNC;
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 100be556e613..83482721bc01 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
drbd_update_congested(connection);
}
do {
- rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
if (rv == -EAGAIN) {
if (we_should_drop_the_connection(connection, sock))
break;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 942384f34e22..c7728dd77230 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1266,7 +1266,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
bio->bi_bdev = device->ldev->backing_bdev;
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
- bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH);
+ bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags);
@@ -1648,20 +1648,8 @@ next_bio:
page_chain_for_each(page) {
unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
- if (!bio_add_page(bio, page, len, 0)) {
- /* A single page must always be possible!
- * But in case it fails anyways,
- * we deal with it, and complain (below). */
- if (bio->bi_vcnt == 0) {
- drbd_err(device,
- "bio_add_page failed for len=%u, "
- "bi_vcnt=0 (bi_sector=%llu)\n",
- len, (uint64_t)bio->bi_iter.bi_sector);
- err = -ENOSPC;
- goto fail;
- }
+ if (!bio_add_page(bio, page, len, 0))
goto next_bio;
- }
data_size -= len;
sector += len >> 9;
--nr_pages;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e3d8e4ced4a2..a391a3cfb3fe 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3806,14 +3806,10 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
- bio_init(&bio);
- bio.bi_io_vec = &bio_vec;
- bio_vec.bv_page = page;
- bio_vec.bv_len = size;
- bio_vec.bv_offset = 0;
- bio.bi_vcnt = 1;
- bio.bi_iter.bi_size = size;
+ bio_init(&bio, &bio_vec, 1);
bio.bi_bdev = bdev;
+ bio_add_page(&bio, page, size, 0);
+
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index fa1b7a90ba11..4af818766797 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1646,7 +1646,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
if (lo->lo_state != Lo_bound)
- return -EIO;
+ return BLK_MQ_RQ_QUEUE_ERROR;
switch (req_op(cmd->rq)) {
case REQ_OP_FLUSH:
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3cfd879267b2..f96ab717534c 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2035,18 +2035,14 @@ static int exec_drive_taskfile(struct driver_data *dd,
taskout = req_task->out_size;
taskin = req_task->in_size;
/* 130560 = 512 * 0xFF*/
- if (taskin > 130560 || taskout > 130560) {
- err = -EINVAL;
- goto abort;
- }
+ if (taskin > 130560 || taskout > 130560)
+ return -EINVAL;
if (taskout) {
outbuf = memdup_user(buf + outtotal, taskout);
- if (IS_ERR(outbuf)) {
- err = PTR_ERR(outbuf);
- outbuf = NULL;
- goto abort;
- }
+ if (IS_ERR(outbuf))
+ return PTR_ERR(outbuf);
+
outbuf_dma = pci_map_single(dd->pdev,
outbuf,
taskout,
@@ -3937,8 +3933,10 @@ static int mtip_block_initialize(struct driver_data *dd)
/* Generate the disk name, implemented same as in sd.c */
do {
- if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
+ if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) {
+ rv = -ENOMEM;
goto ida_get_error;
+ }
spin_lock(&rssd_index_lock);
rv = ida_get_new(&rssd_index_ida, &index);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 19a16b2dbb91..99c84468f154 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -41,26 +41,34 @@
#include <linux/nbd.h>
+struct nbd_sock {
+ struct socket *sock;
+ struct mutex tx_lock;
+};
+
#define NBD_TIMEDOUT 0
#define NBD_DISCONNECT_REQUESTED 1
+#define NBD_DISCONNECTED 2
+#define NBD_RUNNING 3
struct nbd_device {
u32 flags;
unsigned long runtime_flags;
- struct socket * sock; /* If == NULL, device is not ready, yet */
+ struct nbd_sock **socks;
int magic;
struct blk_mq_tag_set tag_set;
- struct mutex tx_lock;
+ struct mutex config_lock;
struct gendisk *disk;
- int blksize;
+ int num_connections;
+ atomic_t recv_threads;
+ wait_queue_head_t recv_wq;
+ loff_t blksize;
loff_t bytesize;
- /* protects initialization and shutdown of the socket */
- spinlock_t sock_lock;
struct task_struct *task_recv;
- struct task_struct *task_send;
+ struct task_struct *task_setup;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbg_dir;
@@ -69,7 +77,7 @@ struct nbd_device {
struct nbd_cmd {
struct nbd_device *nbd;
- struct list_head list;
+ struct completion send_complete;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -126,7 +134,7 @@ static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
}
static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
- int blocksize, int nr_blocks)
+ loff_t blocksize, loff_t nr_blocks)
{
int ret;
@@ -135,7 +143,7 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
return ret;
nbd->blksize = blocksize;
- nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
+ nbd->bytesize = blocksize * nr_blocks;
nbd_size_update(nbd, bdev);
@@ -159,22 +167,20 @@ static void nbd_end_request(struct nbd_cmd *cmd)
*/
static void sock_shutdown(struct nbd_device *nbd)
{
- struct socket *sock;
-
- spin_lock(&nbd->sock_lock);
+ int i;
- if (!nbd->sock) {
- spin_unlock(&nbd->sock_lock);
+ if (nbd->num_connections == 0)
+ return;
+ if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
return;
- }
-
- sock = nbd->sock;
- dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
- nbd->sock = NULL;
- spin_unlock(&nbd->sock_lock);
- kernel_sock_shutdown(sock, SHUT_RDWR);
- sockfd_put(sock);
+ for (i = 0; i < nbd->num_connections; i++) {
+ struct nbd_sock *nsock = nbd->socks[i];
+ mutex_lock(&nsock->tx_lock);
+ kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
+ mutex_unlock(&nsock->tx_lock);
+ }
+ dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
}
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
@@ -182,42 +188,38 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
struct nbd_device *nbd = cmd->nbd;
- struct socket *sock = NULL;
-
- spin_lock(&nbd->sock_lock);
+ dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
-
- if (nbd->sock) {
- sock = nbd->sock;
- get_file(sock->file);
- }
-
- spin_unlock(&nbd->sock_lock);
- if (sock) {
- kernel_sock_shutdown(sock, SHUT_RDWR);
- sockfd_put(sock);
- }
-
req->errors++;
- dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
+
+ /*
+ * If our disconnect packet times out then we're already holding the
+ * config_lock and could deadlock here, so just set an error and return,
+ * we'll handle shutting everything down later.
+ */
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ return BLK_EH_HANDLED;
+ mutex_lock(&nbd->config_lock);
+ sock_shutdown(nbd);
+ mutex_unlock(&nbd->config_lock);
return BLK_EH_HANDLED;
}
/*
* Send or receive packet.
*/
-static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
- int msg_flags)
+static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
+ int size, int msg_flags)
{
- struct socket *sock = nbd->sock;
+ struct socket *sock = nbd->socks[index]->sock;
int result;
struct msghdr msg;
struct kvec iov;
unsigned long pflags = current->flags;
if (unlikely(!sock)) {
- dev_err(disk_to_dev(nbd->disk),
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted %s on closed socket in sock_xmit\n",
(send ? "send" : "recv"));
return -EINVAL;
@@ -254,29 +256,29 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
return result;
}
-static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
- int flags)
+static inline int sock_send_bvec(struct nbd_device *nbd, int index,
+ struct bio_vec *bvec, int flags)
{
int result;
void *kaddr = kmap(bvec->bv_page);
- result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
+ result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
bvec->bv_len, flags);
kunmap(bvec->bv_page);
return result;
}
/* always call with the tx_lock held */
-static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
+static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
int result, flags;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
+ struct bio *bio;
u32 type;
+ u32 tag = blk_mq_unique_tag(req);
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- type = NBD_CMD_DISC;
- else if (req_op(req) == REQ_OP_DISCARD)
+ if (req_op(req) == REQ_OP_DISCARD)
type = NBD_CMD_TRIM;
else if (req_op(req) == REQ_OP_FLUSH)
type = NBD_CMD_FLUSH;
@@ -288,73 +290,89 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
memset(&request, 0, sizeof(request));
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(type);
- if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
+ if (type != NBD_CMD_FLUSH) {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
- memcpy(request.handle, &req->tag, sizeof(req->tag));
+ memcpy(request.handle, &tag, sizeof(tag));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
cmd, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
- result = sock_xmit(nbd, 1, &request, sizeof(request),
+ result = sock_xmit(nbd, index, 1, &request, sizeof(request),
(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
- dev_err(disk_to_dev(nbd->disk),
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
return -EIO;
}
- if (type == NBD_CMD_WRITE) {
- struct req_iterator iter;
+ if (type != NBD_CMD_WRITE)
+ return 0;
+
+ flags = 0;
+ bio = req->bio;
+ while (bio) {
+ struct bio *next = bio->bi_next;
+ struct bvec_iter iter;
struct bio_vec bvec;
- /*
- * we are really probing at internals to determine
- * whether to set MSG_MORE or not...
- */
- rq_for_each_segment(bvec, req, iter) {
- flags = 0;
- if (!rq_iter_last(bvec, iter))
+
+ bio_for_each_segment(bvec, bio, iter) {
+ bool is_last = !next && bio_iter_last(bvec, iter);
+
+ if (is_last)
flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
cmd, bvec.bv_len);
- result = sock_send_bvec(nbd, &bvec, flags);
+ result = sock_send_bvec(nbd, index, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
return -EIO;
}
+ /*
+ * The completion might already have come in,
+ * so break for the last one instead of letting
+ * the iterator do it. This prevents use-after-free
+ * of the bio.
+ */
+ if (is_last)
+ break;
}
+ bio = next;
}
return 0;
}
-static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
+static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
+ struct bio_vec *bvec)
{
int result;
void *kaddr = kmap(bvec->bv_page);
- result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
- MSG_WAITALL);
+ result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
+ bvec->bv_len, MSG_WAITALL);
kunmap(bvec->bv_page);
return result;
}
/* NULL returned = something went wrong, inform userspace */
-static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
+static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
{
int result;
struct nbd_reply reply;
struct nbd_cmd *cmd;
struct request *req = NULL;
u16 hwq;
- int tag;
+ u32 tag;
reply.magic = 0;
- result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
+ result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL);
if (result <= 0) {
- dev_err(disk_to_dev(nbd->disk),
- "Receive control failed (result %d)\n", result);
+ if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
+ !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
+ dev_err(disk_to_dev(nbd->disk),
+ "Receive control failed (result %d)\n", result);
return ERR_PTR(result);
}
@@ -364,7 +382,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
return ERR_PTR(-EPROTO);
}
- memcpy(&tag, reply.handle, sizeof(int));
+ memcpy(&tag, reply.handle, sizeof(u32));
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@@ -376,7 +394,6 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
return ERR_PTR(-ENOENT);
}
cmd = blk_mq_rq_to_pdu(req);
-
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
@@ -390,7 +407,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
- result = sock_recv_bvec(nbd, &bvec);
+ result = sock_recv_bvec(nbd, index, &bvec);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
@@ -400,6 +417,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
cmd, bvec.bv_len);
}
+ } else {
+ /* See the comment in nbd_queue_rq. */
+ wait_for_completion(&cmd->send_complete);
}
return cmd;
}
@@ -418,25 +438,24 @@ static struct device_attribute pid_attr = {
.show = pid_show,
};
-static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
+struct recv_thread_args {
+ struct work_struct work;
+ struct nbd_device *nbd;
+ int index;
+};
+
+static void recv_work(struct work_struct *work)
{
+ struct recv_thread_args *args = container_of(work,
+ struct recv_thread_args,
+ work);
+ struct nbd_device *nbd = args->nbd;
struct nbd_cmd *cmd;
- int ret;
+ int ret = 0;
BUG_ON(nbd->magic != NBD_MAGIC);
-
- sk_set_memalloc(nbd->sock->sk);
-
- ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
- if (ret) {
- dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
- return ret;
- }
-
- nbd_size_update(nbd, bdev);
-
while (1) {
- cmd = nbd_read_stat(nbd);
+ cmd = nbd_read_stat(nbd, args->index);
if (IS_ERR(cmd)) {
ret = PTR_ERR(cmd);
break;
@@ -445,10 +464,14 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
nbd_end_request(cmd);
}
- nbd_size_clear(nbd, bdev);
-
- device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
- return ret;
+ /*
+ * We got an error, shut everybody down if this wasn't the result of a
+ * disconnect request.
+ */
+ if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
+ sock_shutdown(nbd);
+ atomic_dec(&nbd->recv_threads);
+ wake_up(&nbd->recv_wq);
}
static void nbd_clear_req(struct request *req, void *data, bool reserved)
@@ -466,51 +489,60 @@ static void nbd_clear_que(struct nbd_device *nbd)
{
BUG_ON(nbd->magic != NBD_MAGIC);
- /*
- * Because we have set nbd->sock to NULL under the tx_lock, all
- * modifications to the list must have completed by now.
- */
- BUG_ON(nbd->sock);
-
blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
}
-static void nbd_handle_cmd(struct nbd_cmd *cmd)
+static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_device *nbd = cmd->nbd;
+ struct nbd_sock *nsock;
+
+ if (index >= nbd->num_connections) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on invalid socket\n");
+ goto error_out;
+ }
+
+ if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
+ goto error_out;
+ }
- if (req->cmd_type != REQ_TYPE_FS)
+ if (req->cmd_type != REQ_TYPE_FS &&
+ req->cmd_type != REQ_TYPE_DRV_PRIV)
goto error_out;
- if (rq_data_dir(req) == WRITE &&
+ if (req->cmd_type == REQ_TYPE_FS &&
+ rq_data_dir(req) == WRITE &&
(nbd->flags & NBD_FLAG_READ_ONLY)) {
- dev_err(disk_to_dev(nbd->disk),
- "Write on read-only\n");
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Write on read-only\n");
goto error_out;
}
req->errors = 0;
- mutex_lock(&nbd->tx_lock);
- nbd->task_send = current;
- if (unlikely(!nbd->sock)) {
- mutex_unlock(&nbd->tx_lock);
- dev_err(disk_to_dev(nbd->disk),
- "Attempted send on closed socket\n");
+ nsock = nbd->socks[index];
+ mutex_lock(&nsock->tx_lock);
+ if (unlikely(!nsock->sock)) {
+ mutex_unlock(&nsock->tx_lock);
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
goto error_out;
}
- if (nbd_send_cmd(nbd, cmd) != 0) {
- dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
+ if (nbd_send_cmd(nbd, cmd, index) != 0) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Request send failed\n");
req->errors++;
nbd_end_request(cmd);
}
- nbd->task_send = NULL;
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nsock->tx_lock);
return;
@@ -524,39 +556,70 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ /*
+ * Since we look at the bio's to send the request over the network we
+ * need to make sure the completion work doesn't mark this request done
+ * before we are done doing our send. This keeps us from dereferencing
+ * freed data if we have particularly fast completions (ie we get the
+ * completion before we exit sock_xmit on the last bvec) or in the case
+ * that the server is misbehaving (or there was an error) before we're
+ * done sending everything over the wire.
+ */
+ init_completion(&cmd->send_complete);
blk_mq_start_request(bd->rq);
- nbd_handle_cmd(cmd);
+ nbd_handle_cmd(cmd, hctx->queue_num);
+ complete(&cmd->send_complete);
+
return BLK_MQ_RQ_QUEUE_OK;
}
-static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
+static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
{
- int ret = 0;
-
- spin_lock_irq(&nbd->sock_lock);
+ struct nbd_sock **socks;
+ struct nbd_sock *nsock;
- if (nbd->sock) {
- ret = -EBUSY;
- goto out;
+ if (!nbd->task_setup)
+ nbd->task_setup = current;
+ if (nbd->task_setup != current) {
+ dev_err(disk_to_dev(nbd->disk),
+ "Device being setup by another task");
+ return -EINVAL;
}
- nbd->sock = sock;
+ socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
+ sizeof(struct nbd_sock *), GFP_KERNEL);
+ if (!socks)
+ return -ENOMEM;
+ nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
+ if (!nsock)
+ return -ENOMEM;
-out:
- spin_unlock_irq(&nbd->sock_lock);
+ nbd->socks = socks;
+
+ mutex_init(&nsock->tx_lock);
+ nsock->sock = sock;
+ socks[nbd->num_connections++] = nsock;
- return ret;
+ return 0;
}
/* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd)
{
+ int i;
+
+ for (i = 0; i < nbd->num_connections; i++)
+ kfree(nbd->socks[i]);
+ kfree(nbd->socks);
+ nbd->socks = NULL;
nbd->runtime_flags = 0;
nbd->blksize = 1024;
nbd->bytesize = 0;
set_capacity(nbd->disk, 0);
nbd->flags = 0;
nbd->tag_set.timeout = 0;
+ nbd->num_connections = 0;
+ nbd->task_setup = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
}
@@ -582,48 +645,68 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
blk_queue_write_cache(nbd->disk->queue, false, false);
}
+static void send_disconnects(struct nbd_device *nbd)
+{
+ struct nbd_request request = {};
+ int i, ret;
+
+ request.magic = htonl(NBD_REQUEST_MAGIC);
+ request.type = htonl(NBD_CMD_DISC);
+
+ for (i = 0; i < nbd->num_connections; i++) {
+ ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0);
+ if (ret <= 0)
+ dev_err(disk_to_dev(nbd->disk),
+ "Send disconnect failed %d\n", ret);
+ }
+}
+
static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);
-/* Must be called with tx_lock held */
-
+/* Must be called with config_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case NBD_DISCONNECT: {
- struct request *sreq;
-
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
- if (!nbd->sock)
+ if (!nbd->socks)
return -EINVAL;
- sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
- if (!sreq)
- return -ENOMEM;
-
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nbd->config_lock);
fsync_bdev(bdev);
- mutex_lock(&nbd->tx_lock);
- sreq->cmd_type = REQ_TYPE_DRV_PRIV;
+ mutex_lock(&nbd->config_lock);
/* Check again after getting mutex back. */
- if (!nbd->sock) {
- blk_mq_free_request(sreq);
+ if (!nbd->socks)
return -EINVAL;
- }
- set_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags);
-
- nbd_send_cmd(nbd, blk_mq_rq_to_pdu(sreq));
- blk_mq_free_request(sreq);
+ if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
+ &nbd->runtime_flags))
+ send_disconnects(nbd);
return 0;
}
-
+
case NBD_CLEAR_SOCK:
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
+ nbd_bdev_reset(bdev);
+ /*
+ * We want to give the run thread a chance to wait for everybody
+ * to clean up and then do it's own cleanup.
+ */
+ if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) {
+ int i;
+
+ for (i = 0; i < nbd->num_connections; i++)
+ kfree(nbd->socks[i]);
+ kfree(nbd->socks);
+ nbd->socks = NULL;
+ nbd->num_connections = 0;
+ nbd->task_setup = NULL;
+ }
return 0;
case NBD_SET_SOCK: {
@@ -633,7 +716,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (!sock)
return err;
- err = nbd_set_socket(nbd, sock);
+ err = nbd_add_socket(nbd, sock);
if (!err && max_part)
bdev->bd_invalidated = 1;
@@ -648,7 +731,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_SIZE:
return nbd_size_set(nbd, bdev, nbd->blksize,
- arg / nbd->blksize);
+ div_s64(arg, nbd->blksize));
case NBD_SET_SIZE_BLOCKS:
return nbd_size_set(nbd, bdev, nbd->blksize, arg);
@@ -662,26 +745,61 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return 0;
case NBD_DO_IT: {
- int error;
+ struct recv_thread_args *args;
+ int num_connections = nbd->num_connections;
+ int error = 0, i;
if (nbd->task_recv)
return -EBUSY;
- if (!nbd->sock)
+ if (!nbd->socks)
return -EINVAL;
+ if (num_connections > 1 &&
+ !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
+ dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
+ error = -EINVAL;
+ goto out_err;
+ }
- /* We have to claim the device under the lock */
+ set_bit(NBD_RUNNING, &nbd->runtime_flags);
+ blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
+ args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
+ if (!args) {
+ error = -ENOMEM;
+ goto out_err;
+ }
nbd->task_recv = current;
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nbd->config_lock);
nbd_parse_flags(nbd, bdev);
+ error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
+ if (error) {
+ dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
+ goto out_recv;
+ }
+
+ nbd_size_update(nbd, bdev);
+
nbd_dev_dbg_init(nbd);
- error = nbd_thread_recv(nbd, bdev);
+ for (i = 0; i < num_connections; i++) {
+ sk_set_memalloc(nbd->socks[i]->sock->sk);
+ atomic_inc(&nbd->recv_threads);
+ INIT_WORK(&args[i].work, recv_work);
+ args[i].nbd = nbd;
+ args[i].index = i;
+ queue_work(system_long_wq, &args[i].work);
+ }
+ wait_event_interruptible(nbd->recv_wq,
+ atomic_read(&nbd->recv_threads) == 0);
+ for (i = 0; i < num_connections; i++)
+ flush_work(&args[i].work);
nbd_dev_dbg_close(nbd);
-
- mutex_lock(&nbd->tx_lock);
+ nbd_size_clear(nbd, bdev);
+ device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
+out_recv:
+ mutex_lock(&nbd->config_lock);
nbd->task_recv = NULL;
-
+out_err:
sock_shutdown(nbd);
nbd_clear_que(nbd);
kill_bdev(bdev);
@@ -694,7 +812,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
error = -ETIMEDOUT;
nbd_reset(nbd);
-
return error;
}
@@ -726,9 +843,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
BUG_ON(nbd->magic != NBD_MAGIC);
- mutex_lock(&nbd->tx_lock);
+ mutex_lock(&nbd->config_lock);
error = __nbd_ioctl(bdev, nbd, cmd, arg);
- mutex_unlock(&nbd->tx_lock);
+ mutex_unlock(&nbd->config_lock);
return error;
}
@@ -748,8 +865,6 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
if (nbd->task_recv)
seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
- if (nbd->task_send)
- seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
return 0;
}
@@ -817,7 +932,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
- debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+ debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
return 0;
@@ -873,9 +988,7 @@ static int nbd_init_request(void *data, struct request *rq,
unsigned int numa_node)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
cmd->nbd = data;
- INIT_LIST_HEAD(&cmd->list);
return 0;
}
@@ -985,13 +1098,13 @@ static int __init nbd_init(void)
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = NBD_MAGIC;
- spin_lock_init(&nbd_dev[i].sock_lock);
- mutex_init(&nbd_dev[i].tx_lock);
+ mutex_init(&nbd_dev[i].config_lock);
disk->major = NBD_MAJOR;
disk->first_minor = i << part_shift;
disk->fops = &nbd_fops;
disk->private_data = &nbd_dev[i];
sprintf(disk->disk_name, "nbd%d", i);
+ init_waitqueue_head(&nbd_dev[i].recv_wq);
nbd_reset(&nbd_dev[i]);
add_disk(disk);
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ba6f4a2e73db..4943ee22716e 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -577,6 +577,7 @@ static void null_nvm_unregister(struct nullb *nullb)
#else
static int null_nvm_register(struct nullb *nullb)
{
+ pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
return -EINVAL;
}
static void null_nvm_unregister(struct nullb *nullb) {}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 90fa4ac149db..95c98de92971 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -721,7 +721,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
rq->timeout = 60*HZ;
if (cgc->quiet)
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
if (rq->errors)
@@ -944,39 +944,6 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
}
}
-/*
- * Copy all data for this packet to pkt->pages[], so that
- * a) The number of required segments for the write bio is minimized, which
- * is necessary for some scsi controllers.
- * b) The data can be used as cache to avoid read requests if we receive a
- * new write request for the same zone.
- */
-static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
-{
- int f, p, offs;
-
- /* Copy all data to pkt->pages[] */
- p = 0;
- offs = 0;
- for (f = 0; f < pkt->frames; f++) {
- if (bvec[f].bv_page != pkt->pages[p]) {
- void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
- void *vto = page_address(pkt->pages[p]) + offs;
- memcpy(vto, vfrom, CD_FRAMESIZE);
- kunmap_atomic(vfrom);
- bvec[f].bv_page = pkt->pages[p];
- bvec[f].bv_offset = offs;
- } else {
- BUG_ON(bvec[f].bv_offset != offs);
- }
- offs += CD_FRAMESIZE;
- if (offs >= PAGE_SIZE) {
- offs = 0;
- p++;
- }
- }
-}
-
static void pkt_end_io_read(struct bio *bio)
{
struct packet_data *pkt = bio->bi_private;
@@ -1298,7 +1265,6 @@ try_next_bio:
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
int f;
- struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
bio_reset(pkt->w_bio);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
@@ -1308,9 +1274,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
/* XXX: locking? */
for (f = 0; f < pkt->frames; f++) {
- bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
- bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
+ struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+ unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+
+ if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
BUG();
}
pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
@@ -1327,12 +1294,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
pkt->write_size, (unsigned long long)pkt->sector);
- if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
- pkt_make_local_copy(pkt, bvec);
+ if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
pkt->cache_valid = 1;
- } else {
+ else
pkt->cache_valid = 0;
- }
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 3822eae102db..abf805e332e2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -36,7 +36,6 @@
#include <linux/scatterlist.h>
#include <linux/version.h>
#include <linux/err.h>
-#include <linux/scatterlist.h>
#include <linux/aer.h>
#include <linux/ctype.h>
#include <linux/wait.h>
@@ -270,8 +269,6 @@ struct skd_device {
resource_size_t mem_phys[SKD_MAX_BARS];
u32 mem_size[SKD_MAX_BARS];
- skd_irq_type_t irq_type;
- u32 msix_count;
struct skd_msix_entry *msix_entries;
struct pci_dev *pdev;
@@ -2138,12 +2135,8 @@ static void skd_send_fitmsg(struct skd_device *skdev,
u8 *bp = (u8 *)skmsg->msg_buf;
int i;
for (i = 0; i < skmsg->length; i += 8) {
- pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- skdev->name, __func__, __LINE__,
- i, bp[i + 0], bp[i + 1], bp[i + 2],
- bp[i + 3], bp[i + 4], bp[i + 5],
- bp[i + 6], bp[i + 7]);
+ pr_debug("%s:%s:%d msg[%2d] %8ph\n",
+ skdev->name, __func__, __LINE__, i, &bp[i]);
if (i == 0)
i = 64 - 8;
}
@@ -2164,7 +2157,6 @@ static void skd_send_fitmsg(struct skd_device *skdev,
qcmd |= FIT_QCMD_MSGSIZE_64;
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
-
}
static void skd_send_special_fitmsg(struct skd_device *skdev,
@@ -2177,11 +2169,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
int i;
for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
- pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- skdev->name, __func__, __LINE__, i,
- bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
- bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
+ pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
+ skdev->name, __func__, __LINE__, i, &bp[i]);
if (i == 0)
i = 64 - 8;
}
@@ -2955,8 +2944,8 @@ static void skd_completion_worker(struct work_struct *work)
static void skd_isr_msg_from_dev(struct skd_device *skdev);
-irqreturn_t
-static skd_isr(int irq, void *ptr)
+static irqreturn_t
+skd_isr(int irq, void *ptr)
{
struct skd_device *skdev;
u32 intstat;
@@ -3821,10 +3810,6 @@ static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
*/
struct skd_msix_entry {
- int have_irq;
- u32 vector;
- u32 entry;
- struct skd_device *rsp;
char isr_name[30];
};
@@ -3853,193 +3838,121 @@ static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
{ "(Queue Full 3)", skd_qfull_isr },
};
-static void skd_release_msix(struct skd_device *skdev)
-{
- struct skd_msix_entry *qentry;
- int i;
-
- if (skdev->msix_entries) {
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- skdev = qentry->rsp;
-
- if (qentry->have_irq)
- devm_free_irq(&skdev->pdev->dev,
- qentry->vector, qentry->rsp);
- }
-
- kfree(skdev->msix_entries);
- }
-
- if (skdev->msix_count)
- pci_disable_msix(skdev->pdev);
-
- skdev->msix_count = 0;
- skdev->msix_entries = NULL;
-}
-
static int skd_acquire_msix(struct skd_device *skdev)
{
int i, rc;
struct pci_dev *pdev = skdev->pdev;
- struct msix_entry *entries;
- struct skd_msix_entry *qentry;
-
- entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
- GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
- entries[i].entry = i;
- rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
- if (rc) {
+ rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
+ PCI_IRQ_MSIX);
+ if (rc < 0) {
pr_err("(%s): failed to enable MSI-X %d\n",
skd_name(skdev), rc);
- goto msix_out;
+ goto out;
}
- skdev->msix_count = SKD_MAX_MSIX_COUNT;
- skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
- skdev->msix_count, GFP_KERNEL);
+ skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
+ sizeof(struct skd_msix_entry), GFP_KERNEL);
if (!skdev->msix_entries) {
rc = -ENOMEM;
pr_err("(%s): msix table allocation error\n",
skd_name(skdev));
- goto msix_out;
- }
-
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- qentry->vector = entries[i].vector;
- qentry->entry = entries[i].entry;
- qentry->rsp = NULL;
- qentry->have_irq = 0;
- pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name,
- i, qentry->vector, qentry->entry);
+ goto out;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ struct skd_msix_entry *qentry = &skdev->msix_entries[i];
+
snprintf(qentry->isr_name, sizeof(qentry->isr_name),
"%s%d-msix %s", DRV_NAME, skdev->devno,
msix_entries[i].name);
- rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
- msix_entries[i].handler, 0,
- qentry->isr_name, skdev);
+
+ rc = devm_request_irq(&skdev->pdev->dev,
+ pci_irq_vector(skdev->pdev, i),
+ msix_entries[i].handler, 0,
+ qentry->isr_name, skdev);
if (rc) {
pr_err("(%s): Unable to register(%d) MSI-X "
"handler %d: %s\n",
skd_name(skdev), rc, i, qentry->isr_name);
goto msix_out;
- } else {
- qentry->have_irq = 1;
- qentry->rsp = skdev;
}
}
+
pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name, skdev->msix_count);
+ pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
return 0;
msix_out:
- if (entries)
- kfree(entries);
- skd_release_msix(skdev);
+ while (--i >= 0)
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
+out:
+ kfree(skdev->msix_entries);
+ skdev->msix_entries = NULL;
return rc;
}
static int skd_acquire_irq(struct skd_device *skdev)
{
+ struct pci_dev *pdev = skdev->pdev;
+ unsigned int irq_flag = PCI_IRQ_LEGACY;
int rc;
- struct pci_dev *pdev;
-
- pdev = skdev->pdev;
- skdev->msix_count = 0;
-RETRY_IRQ_TYPE:
- switch (skdev->irq_type) {
- case SKD_IRQ_MSIX:
+ if (skd_isr_type == SKD_IRQ_MSIX) {
rc = skd_acquire_msix(skdev);
if (!rc)
- pr_info("(%s): MSI-X %d irqs enabled\n",
- skd_name(skdev), skdev->msix_count);
- else {
- pr_err(
- "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
- skd_name(skdev), rc);
- skdev->irq_type = SKD_IRQ_MSI;
- goto RETRY_IRQ_TYPE;
- }
- break;
- case SKD_IRQ_MSI:
- snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
- DRV_NAME, skdev->devno);
- rc = pci_enable_msi_range(pdev, 1, 1);
- if (rc > 0) {
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
- skdev->isr_name, skdev);
- if (rc) {
- pci_disable_msi(pdev);
- pr_err(
- "(%s): failed to allocate the MSI interrupt %d\n",
- skd_name(skdev), rc);
- goto RETRY_IRQ_LEGACY;
- }
- pr_info("(%s): MSI irq %d enabled\n",
- skd_name(skdev), pdev->irq);
- } else {
-RETRY_IRQ_LEGACY:
- pr_err(
- "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
- skd_name(skdev), rc);
- skdev->irq_type = SKD_IRQ_LEGACY;
- goto RETRY_IRQ_TYPE;
- }
- break;
- case SKD_IRQ_LEGACY:
- snprintf(skdev->isr_name, sizeof(skdev->isr_name),
- "%s%d-legacy", DRV_NAME, skdev->devno);
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
- IRQF_SHARED, skdev->isr_name, skdev);
- if (!rc)
- pr_info("(%s): LEGACY irq %d enabled\n",
- skd_name(skdev), pdev->irq);
- else
- pr_err("(%s): request LEGACY irq error %d\n",
- skd_name(skdev), rc);
- break;
- default:
- pr_info("(%s): irq_type %d invalid, re-set to %d\n",
- skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
- skdev->irq_type = SKD_IRQ_LEGACY;
- goto RETRY_IRQ_TYPE;
+ return 0;
+
+ pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
+ skd_name(skdev), rc);
}
- return rc;
+
+ snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
+ skdev->devno);
+
+ if (skd_isr_type != SKD_IRQ_LEGACY)
+ irq_flag |= PCI_IRQ_MSI;
+ rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
+ if (rc < 0) {
+ pr_err("(%s): failed to allocate the MSI interrupt %d\n",
+ skd_name(skdev), rc);
+ return rc;
+ }
+
+ rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
+ pdev->msi_enabled ? 0 : IRQF_SHARED,
+ skdev->isr_name, skdev);
+ if (rc) {
+ pci_free_irq_vectors(pdev);
+ pr_err("(%s): failed to allocate interrupt %d\n",
+ skd_name(skdev), rc);
+ return rc;
+ }
+
+ return 0;
}
static void skd_release_irq(struct skd_device *skdev)
{
- switch (skdev->irq_type) {
- case SKD_IRQ_MSIX:
- skd_release_msix(skdev);
- break;
- case SKD_IRQ_MSI:
- devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
- pci_disable_msi(skdev->pdev);
- break;
- case SKD_IRQ_LEGACY:
- devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
- break;
- default:
- pr_err("(%s): wrong irq type %d!",
- skd_name(skdev), skdev->irq_type);
- break;
+ struct pci_dev *pdev = skdev->pdev;
+
+ if (skdev->msix_entries) {
+ int i;
+
+ for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ skdev);
+ }
+
+ kfree(skdev->msix_entries);
+ skdev->msix_entries = NULL;
+ } else {
+ devm_free_irq(&pdev->dev, pdev->irq, skdev);
}
+
+ pci_free_irq_vectors(pdev);
}
/*
@@ -4402,7 +4315,6 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
skdev->pdev = pdev;
skdev->devno = skd_next_devno++;
skdev->major = blk_major;
- skdev->irq_type = skd_isr_type;
sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
skdev->dev_max_queue_depth = 0;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index be90e15854ed..46f4c719fed9 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
- if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
+ if (op_is_sync(bio->bi_opf) || !mm_check_plugged(card))
activate(card);
spin_unlock_irq(&card->lock);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2dc5c96c186a..5545a679abd8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev)
static int init_vq(struct virtio_blk *vblk)
{
- int err = 0;
+ int err;
int i;
vq_callback_t **callbacks;
const char **names;
@@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
- vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
+ vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
- names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
- callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
- vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
+ names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
+ callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
+ vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
if (!names || !callbacks || !vqs) {
err = -ENOMEM;
goto out;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4a80ee752597..726c32e35db9 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1253,14 +1253,14 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
case BLKIF_OP_WRITE:
ring->st_wr_req++;
operation = REQ_OP_WRITE;
- operation_flags = WRITE_ODIRECT;
+ operation_flags = REQ_SYNC | REQ_IDLE;
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
- operation_flags = WRITE_FLUSH;
+ operation_flags = REQ_PREFLUSH;
break;
default:
operation = 0; /* make gcc happy */
@@ -1272,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments;
- if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
+ if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1334,7 +1334,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
}
/* Wait on all outstanding I/O's and once that has been completed
- * issue the WRITE_FLUSH.
+ * issue the flush.
*/
if (drain)
xen_blk_drain_io(pending_req->ring);
@@ -1380,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
/* This will be hit if the operation was a flush or discard. */
if (!bio) {
- BUG_ON(operation_flags != WRITE_FLUSH);
+ BUG_ON(operation_flags != REQ_PREFLUSH);
bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL))
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3cc6d1d86f1e..415e79b69d34 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -533,13 +533,11 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
int err;
- int state = 0, discard_enable;
+ int state = 0;
struct block_device *bdev = be->blkif->vbd.bdev;
struct request_queue *q = bdev_get_queue(bdev);
- err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d",
- &discard_enable);
- if (err == 1 && !discard_enable)
+ if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
if (blk_queue_discard(q)) {
@@ -1039,30 +1037,24 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- err = xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-persistent", "%u", &pers_grants);
- if (err <= 0)
- pers_grants = 0;
-
+ pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
+ 0);
be->blkif->vbd.feature_gnt_persistent = pers_grants;
be->blkif->vbd.overflow_max_grants = 0;
/*
* Read the number of hardware queues from frontend.
*/
- err = xenbus_scanf(XBT_NIL, dev->otherend, "multi-queue-num-queues",
- "%u", &requested_num_queues);
- if (err < 0) {
- requested_num_queues = 1;
- } else {
- if (requested_num_queues > xenblk_max_queues
- || requested_num_queues == 0) {
- /* Buggy or malicious guest. */
- xenbus_dev_fatal(dev, err,
- "guest requested %u queues, exceeding the maximum of %u.",
- requested_num_queues, xenblk_max_queues);
- return -ENOSYS;
- }
+ requested_num_queues = xenbus_read_unsigned(dev->otherend,
+ "multi-queue-num-queues",
+ 1);
+ if (requested_num_queues > xenblk_max_queues
+ || requested_num_queues == 0) {
+ /* Buggy or malicious guest. */
+ xenbus_dev_fatal(dev, err,
+ "guest requested %u queues, exceeding the maximum of %u.",
+ requested_num_queues, xenblk_max_queues);
+ return -ENOSYS;
}
be->blkif->nr_rings = requested_num_queues;
if (xen_blkif_alloc_rings(be->blkif))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9908597c5209..b2bdfa81f929 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1758,17 +1758,13 @@ static int talk_to_blkback(struct xenbus_device *dev,
const char *message = NULL;
struct xenbus_transaction xbt;
int err;
- unsigned int i, max_page_order = 0;
- unsigned int ring_page_order = 0;
+ unsigned int i, max_page_order;
+ unsigned int ring_page_order;
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "max-ring-page-order", "%u", &max_page_order);
- if (err != 1)
- info->nr_ring_pages = 1;
- else {
- ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
- info->nr_ring_pages = 1 << ring_page_order;
- }
+ max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
+ "max-ring-page-order", 0);
+ ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
+ info->nr_ring_pages = 1 << ring_page_order;
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
@@ -1877,18 +1873,14 @@ again:
static int negotiate_mq(struct blkfront_info *info)
{
- unsigned int backend_max_queues = 0;
- int err;
+ unsigned int backend_max_queues;
unsigned int i;
BUG_ON(info->nr_rings);
/* Check if backend supports multiple queues. */
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "multi-queue-max-queues", "%u", &backend_max_queues);
- if (err < 0)
- backend_max_queues = 1;
-
+ backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
+ "multi-queue-max-queues", 1);
info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
/* We need at least one ring. */
if (!info->nr_rings)
@@ -2043,8 +2035,9 @@ static int blkif_recover(struct blkfront_info *info)
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
- blk_mq_requeue_request(req);
+ blk_mq_requeue_request(req, false);
}
+ blk_mq_start_stopped_hw_queues(info->rq, true);
blk_mq_kick_requeue_list(info->rq);
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
@@ -2195,7 +2188,6 @@ static void blkfront_setup_discard(struct blkfront_info *info)
int err;
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned int discard_secure;
info->feature_discard = 1;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
@@ -2206,10 +2198,9 @@ static void blkfront_setup_discard(struct blkfront_info *info)
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "discard-secure", "%u", &discard_secure);
- if (err > 0)
- info->feature_secdiscard = !!discard_secure;
+ info->feature_secdiscard =
+ !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
+ 0);
}
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
@@ -2301,16 +2292,11 @@ out_of_memory:
*/
static void blkfront_gather_backend_features(struct blkfront_info *info)
{
- int err;
- int barrier, flush, discard, persistent;
unsigned int indirect_segments;
info->feature_flush = 0;
info->feature_fua = 0;
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%d", &barrier);
-
/*
* If there's no "feature-barrier" defined, then it means
* we're dealing with a very old backend which writes
@@ -2318,7 +2304,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
*
* If there are barriers, then we use flush.
*/
- if (err > 0 && barrier) {
+ if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
info->feature_flush = 1;
info->feature_fua = 1;
}
@@ -2327,35 +2313,23 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
* And if there is "feature-flush-cache" use that above
* barriers.
*/
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-flush-cache", "%d", &flush);
-
- if (err > 0 && flush) {
+ if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
+ 0)) {
info->feature_flush = 1;
info->feature_fua = 0;
}
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-discard", "%d", &discard);
-
- if (err > 0 && discard)
+ if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-persistent", "%d", &persistent);
- if (err <= 0)
- info->feature_persistent = 0;
- else
- info->feature_persistent = persistent;
+ info->feature_persistent =
+ xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-persistent", 0);
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-max-indirect-segments", "%u",
- &indirect_segments);
- if (err <= 0)
- info->max_indirect_segments = 0;
- else
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-max-indirect-segments", 0);
+ info->max_indirect_segments = min(indirect_segments,
+ xen_blkif_max_segments);
}
/*
@@ -2420,11 +2394,9 @@ static void blkfront_connect(struct blkfront_info *info)
* provide this. Assume physical sector size to be the same as
* sector_size in that case.
*/
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "physical-sector-size", "%u", &physical_sector_size);
- if (err != 1)
- physical_sector_size = sector_size;
-
+ physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
+ "physical-sector-size",
+ sector_size);
blkfront_gather_backend_features(info);
for (i = 0; i < info->nr_rings; i++) {
err = blkfront_setup_indirect(&info->rinfo[i]);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 4b5cd3a7b2b6..12046f4f00e4 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -160,82 +160,56 @@ int zcomp_decompress(struct zcomp_strm *zstrm,
dst, &dst_len);
}
-static int __zcomp_cpu_notifier(struct zcomp *comp,
- unsigned long action, unsigned long cpu)
+int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
- switch (action) {
- case CPU_UP_PREPARE:
- if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
- break;
- zstrm = zcomp_strm_alloc(comp);
- if (IS_ERR_OR_NULL(zstrm)) {
- pr_err("Can't allocate a compression stream\n");
- return NOTIFY_BAD;
- }
- *per_cpu_ptr(comp->stream, cpu) = zstrm;
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- zstrm = *per_cpu_ptr(comp->stream, cpu);
- if (!IS_ERR_OR_NULL(zstrm))
- zcomp_strm_free(zstrm);
- *per_cpu_ptr(comp->stream, cpu) = NULL;
- break;
- default:
- break;
+ if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
+ return 0;
+
+ zstrm = zcomp_strm_alloc(comp);
+ if (IS_ERR_OR_NULL(zstrm)) {
+ pr_err("Can't allocate a compression stream\n");
+ return -ENOMEM;
}
- return NOTIFY_OK;
+ *per_cpu_ptr(comp->stream, cpu) = zstrm;
+ return 0;
}
-static int zcomp_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
+int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
- unsigned long cpu = (unsigned long)pcpu;
- struct zcomp *comp = container_of(nb, typeof(*comp), notifier);
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
+ struct zcomp_strm *zstrm;
- return __zcomp_cpu_notifier(comp, action, cpu);
+ zstrm = *per_cpu_ptr(comp->stream, cpu);
+ if (!IS_ERR_OR_NULL(zstrm))
+ zcomp_strm_free(zstrm);
+ *per_cpu_ptr(comp->stream, cpu) = NULL;
+ return 0;
}
static int zcomp_init(struct zcomp *comp)
{
- unsigned long cpu;
int ret;
- comp->notifier.notifier_call = zcomp_cpu_notifier;
-
comp->stream = alloc_percpu(struct zcomp_strm *);
if (!comp->stream)
return -ENOMEM;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu) {
- ret = __zcomp_cpu_notifier(comp, CPU_UP_PREPARE, cpu);
- if (ret == NOTIFY_BAD)
- goto cleanup;
- }
- __register_cpu_notifier(&comp->notifier);
- cpu_notifier_register_done();
+ ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
+ if (ret < 0)
+ goto cleanup;
return 0;
cleanup:
- for_each_online_cpu(cpu)
- __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
- cpu_notifier_register_done();
- return -ENOMEM;
+ free_percpu(comp->stream);
+ return ret;
}
void zcomp_destroy(struct zcomp *comp)
{
- unsigned long cpu;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
- __unregister_cpu_notifier(&comp->notifier);
- cpu_notifier_register_done();
-
+ cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
free_percpu(comp->stream);
kfree(comp);
}
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 478cac2ed465..41c1002a7d7d 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -19,11 +19,12 @@ struct zcomp_strm {
/* dynamic per-device compression frontend */
struct zcomp {
struct zcomp_strm * __percpu *stream;
- struct notifier_block notifier;
-
const char *name;
+ struct hlist_node node;
};
+int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
+int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
ssize_t zcomp_available_show(const char *comp, char *buf);
bool zcomp_available_algorithm(const char *comp);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 04365b17ee67..15f58ab44d0b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -30,6 +30,7 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/sysfs.h>
+#include <linux/cpuhotplug.h>
#include "zram_drv.h"
@@ -1403,7 +1404,8 @@ static ssize_t hot_remove_store(struct class *class,
zram = idr_find(&zram_index_idr, dev_id);
if (zram) {
ret = zram_remove(zram);
- idr_remove(&zram_index_idr, dev_id);
+ if (!ret)
+ idr_remove(&zram_index_idr, dev_id);
} else {
ret = -ENODEV;
}
@@ -1412,8 +1414,14 @@ static ssize_t hot_remove_store(struct class *class,
return ret ? ret : count;
}
+/*
+ * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
+ * sense that reading from this file does alter the state of your system -- it
+ * creates a new un-initialized zram device and returns back this device's
+ * device_id (or an error code if it fails to create a new device).
+ */
static struct class_attribute zram_control_class_attrs[] = {
- __ATTR_RO(hot_add),
+ __ATTR(hot_add, 0400, hot_add_show, NULL),
__ATTR_WO(hot_remove),
__ATTR_NULL,
};
@@ -1436,15 +1444,22 @@ static void destroy_devices(void)
idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
idr_destroy(&zram_index_idr);
unregister_blkdev(zram_major, "zram");
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
}
static int __init zram_init(void)
{
int ret;
+ ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
+ zcomp_cpu_up_prepare, zcomp_cpu_dead);
+ if (ret < 0)
+ return ret;
+
ret = class_register(&zram_control_class);
if (ret) {
pr_err("Unable to register zram-control class\n");
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return ret;
}
@@ -1452,6 +1467,7 @@ static int __init zram_init(void)
if (zram_major <= 0) {
pr_err("Unable to get major number\n");
class_unregister(&zram_control_class);
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return -EBUSY;
}
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index f742384b53f7..fc3caf4541ba 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -32,7 +32,6 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/slab.h>
#include <linux/of_irq.h>
#define BTM_HEADER_LEN 4
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index ef51c9c864c5..b6bb58c41df5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev)
BT_DBG("HCI device registered (hdev %p)", hdev);
dev_set_drvdata(&pdev->dev, hst);
- return err;
+ return 0;
}
static int bt_ti_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5ccb90ef0146..8f6c23c20c52 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
},
.driver_data = &acpi_active_low,
},
+ { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
+ .ident = "Lenovo ThinkPad 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = &acpi_active_low,
+ },
{ }
};
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index a2c921faaa12..910ec968f022 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -733,9 +733,7 @@ static int bcsp_open(struct hci_uart *hu)
skb_queue_head_init(&bcsp->rel);
skb_queue_head_init(&bcsp->unrel);
- init_timer(&bcsp->tbcsp);
- bcsp->tbcsp.function = bcsp_timed_event;
- bcsp->tbcsp.data = (u_long)hu;
+ setup_timer(&bcsp->tbcsp, bcsp_timed_event, (u_long)hu);
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 0879d64b1caf..90d0456b6744 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -204,9 +204,7 @@ static int h5_open(struct hci_uart *hu)
h5_reset_rx(h5);
- init_timer(&h5->timer);
- h5->timer.function = h5_timed_event;
- h5->timer.data = (unsigned long)hu;
+ setup_timer(&h5->timer, h5_timed_event, (unsigned long)hu);
h5->tx_win = H5_TX_WIN_MAX;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 6c867fbc56a7..05c230719a47 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -438,14 +438,11 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
- init_timer(&qca->wake_retrans_timer);
- qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
- qca->wake_retrans_timer.data = (u_long)hu;
+ setup_timer(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout,
+ (u_long)hu);
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
- init_timer(&qca->tx_idle_timer);
- qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
- qca->tx_idle_timer.data = (u_long)hu;
+ setup_timer(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, (u_long)hu);
qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index dcc09739a54e..fde005ef9d36 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -17,7 +17,6 @@ config DEVMEM
config DEVKMEM
bool "/dev/kmem virtual device support"
- default y
help
Say Y here if you want to support the /dev/kmem device. The
/dev/kmem device is rarely used, but can be used for certain
@@ -542,6 +541,7 @@ config HANGCHECK_TIMER
config MMTIMER
tristate "MMTIMER Memory mapped RTC for SGI Altix"
depends on IA64_GENERIC || IA64_SGI_SN2
+ depends on POSIX_TIMERS
default y
help
The mmtimer device allows direct userspace access to the
@@ -578,7 +578,7 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
config TILE_SROM
- bool "Character-device access via hypervisor to the Tilera SPI ROM"
+ tristate "Character-device access via hypervisor to the Tilera SPI ROM"
depends on TILE
default y
---help---
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 199b8e99f7d7..737187865269 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -19,8 +19,7 @@ static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
unsigned long pa;
struct page *page;
- dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
- + agp->aperture.bus_base;
+ dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 200dab5136a7..ceff2fc524b1 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -168,7 +168,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 0fcc9e69a346..661c82cde0f2 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -48,6 +48,16 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
return 0;
}
+static void atmel_trng_enable(struct atmel_trng *trng)
+{
+ writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+}
+
+static void atmel_trng_disable(struct atmel_trng *trng)
+{
+ writel(TRNG_KEY, trng->base + TRNG_CR);
+}
+
static int atmel_trng_probe(struct platform_device *pdev)
{
struct atmel_trng *trng;
@@ -71,7 +81,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
if (ret)
return ret;
- writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+ atmel_trng_enable(trng);
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
@@ -84,7 +94,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
return 0;
err_register:
- clk_disable(trng->clk);
+ clk_disable_unprepare(trng->clk);
return ret;
}
@@ -94,7 +104,7 @@ static int atmel_trng_remove(struct platform_device *pdev)
hwrng_unregister(&trng->rng);
- writel(TRNG_KEY, trng->base + TRNG_CR);
+ atmel_trng_disable(trng);
clk_disable_unprepare(trng->clk);
return 0;
@@ -105,6 +115,7 @@ static int atmel_trng_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
+ atmel_trng_disable(trng);
clk_disable_unprepare(trng->clk);
return 0;
@@ -113,8 +124,15 @@ static int atmel_trng_suspend(struct device *dev)
static int atmel_trng_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
+ int ret;
- return clk_prepare_enable(trng->clk);
+ ret = clk_prepare_enable(trng->clk);
+ if (ret)
+ return ret;
+
+ atmel_trng_enable(trng);
+
+ return 0;
}
static const struct dev_pm_ops atmel_trng_pm_ops = {
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index d2d2c89de5b4..f9766415ff10 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -92,6 +92,7 @@ static void add_early_randomness(struct hwrng *rng)
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(rng_buffer, bytes_read);
+ memset(rng_buffer, 0, size);
}
static inline void cleanup_rng(struct kref *kref)
@@ -287,6 +288,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
}
out:
+ memset(rng_buffer, 0, rng_buffer_size());
return ret ? : err;
out_unlock_reading:
@@ -425,6 +427,7 @@ static int hwrng_fillfn(void *unused)
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10);
+ memset(rng_fillbuf, 0, rng_buffer_size());
}
hwrng_fill = NULL;
return 0;
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 58bef39f7286..119d698439ae 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -110,6 +110,7 @@ static const struct of_device_id meson_rng_of_match[] = {
{ .compatible = "amlogic,meson-rng", },
{},
};
+MODULE_DEVICE_TABLE(of, meson_rng_of_match);
static struct platform_driver meson_rng_driver = {
.probe = meson_rng_probe,
@@ -121,7 +122,6 @@ static struct platform_driver meson_rng_driver = {
module_platform_driver(meson_rng_driver);
-MODULE_ALIAS("platform:meson-rng");
MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 96fb986402eb..841fee845ec9 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -90,10 +90,6 @@ static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
/* calculate max size bytes to transfer back to caller */
maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
- /* no room for word data */
- if (maxsize < WORD_SZ)
- return 0;
-
ret = clk_prepare_enable(rng->clk);
if (ret)
return ret;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index f5c26a5f6875..3ad86fdf954e 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -28,6 +28,7 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/interrupt.h>
+#include <linux/clk.h>
#include <asm/io.h>
@@ -63,10 +64,13 @@
#define OMAP2_RNG_OUTPUT_SIZE 0x4
#define OMAP4_RNG_OUTPUT_SIZE 0x8
+#define EIP76_RNG_OUTPUT_SIZE 0x10
enum {
- RNG_OUTPUT_L_REG = 0,
- RNG_OUTPUT_H_REG,
+ RNG_OUTPUT_0_REG = 0,
+ RNG_OUTPUT_1_REG,
+ RNG_OUTPUT_2_REG,
+ RNG_OUTPUT_3_REG,
RNG_STATUS_REG,
RNG_INTMASK_REG,
RNG_INTACK_REG,
@@ -82,7 +86,7 @@ enum {
};
static const u16 reg_map_omap2[] = {
- [RNG_OUTPUT_L_REG] = 0x0,
+ [RNG_OUTPUT_0_REG] = 0x0,
[RNG_STATUS_REG] = 0x4,
[RNG_CONFIG_REG] = 0x28,
[RNG_REV_REG] = 0x3c,
@@ -90,8 +94,8 @@ static const u16 reg_map_omap2[] = {
};
static const u16 reg_map_omap4[] = {
- [RNG_OUTPUT_L_REG] = 0x0,
- [RNG_OUTPUT_H_REG] = 0x4,
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
[RNG_STATUS_REG] = 0x8,
[RNG_INTMASK_REG] = 0xc,
[RNG_INTACK_REG] = 0x10,
@@ -106,6 +110,23 @@ static const u16 reg_map_omap4[] = {
[RNG_SYSCONFIG_REG] = 0x1FE4,
};
+static const u16 reg_map_eip76[] = {
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
+ [RNG_OUTPUT_2_REG] = 0x8,
+ [RNG_OUTPUT_3_REG] = 0xc,
+ [RNG_STATUS_REG] = 0x10,
+ [RNG_INTACK_REG] = 0x10,
+ [RNG_CONTROL_REG] = 0x14,
+ [RNG_CONFIG_REG] = 0x18,
+ [RNG_ALARMCNT_REG] = 0x1c,
+ [RNG_FROENABLE_REG] = 0x20,
+ [RNG_FRODETUNE_REG] = 0x24,
+ [RNG_ALARMMASK_REG] = 0x28,
+ [RNG_ALARMSTOP_REG] = 0x2c,
+ [RNG_REV_REG] = 0x7c,
+};
+
struct omap_rng_dev;
/**
* struct omap_rng_pdata - RNG IP block-specific data
@@ -127,6 +148,8 @@ struct omap_rng_dev {
void __iomem *base;
struct device *dev;
const struct omap_rng_pdata *pdata;
+ struct hwrng rng;
+ struct clk *clk;
};
static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
@@ -140,41 +163,35 @@ static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
__raw_writel(val, priv->base + priv->pdata->regs[reg]);
}
-static int omap_rng_data_present(struct hwrng *rng, int wait)
+
+static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
{
struct omap_rng_dev *priv;
- int data, i;
+ int i, present;
priv = (struct omap_rng_dev *)rng->priv;
+ if (max < priv->pdata->data_size)
+ return 0;
+
for (i = 0; i < 20; i++) {
- data = priv->pdata->data_present(priv);
- if (data || !wait)
+ present = priv->pdata->data_present(priv);
+ if (present || !wait)
break;
- /* RNG produces data fast enough (2+ MBit/sec, even
- * during "rngtest" loads, that these delays don't
- * seem to trigger. We *could* use the RNG IRQ, but
- * that'd be higher overhead ... so why bother?
- */
+
udelay(10);
}
- return data;
-}
-
-static int omap_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct omap_rng_dev *priv;
- u32 data_size, i;
-
- priv = (struct omap_rng_dev *)rng->priv;
- data_size = priv->pdata->data_size;
+ if (!present)
+ return 0;
- for (i = 0; i < data_size / sizeof(u32); i++)
- data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i);
+ memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG],
+ priv->pdata->data_size);
if (priv->pdata->regs[RNG_INTACK_REG])
omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
- return data_size;
+
+ return priv->pdata->data_size;
}
static int omap_rng_init(struct hwrng *rng)
@@ -193,13 +210,6 @@ static void omap_rng_cleanup(struct hwrng *rng)
priv->pdata->cleanup(priv);
}
-static struct hwrng omap_rng_ops = {
- .name = "omap",
- .data_present = omap_rng_data_present,
- .data_read = omap_rng_data_read,
- .init = omap_rng_init,
- .cleanup = omap_rng_cleanup,
-};
static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
{
@@ -231,6 +241,38 @@ static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
}
+static int eip76_rng_init(struct omap_rng_dev *priv)
+{
+ u32 val;
+
+ /* Return if RNG is already running. */
+ if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+ return 0;
+
+ /* Number of 512 bit blocks of raw Noise Source output data that must
+ * be processed by either the Conditioning Function or the
+ * SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’
+ * output value.
+ */
+ val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+
+ /* Number of FRO samples that are XOR-ed together into one bit to be
+ * shifted into the main shift register
+ */
+ val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+ omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+ /* Enable all available FROs */
+ omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+ omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+
+ /* Enable TRNG */
+ val = RNG_CONTROL_ENABLE_TRNG_MASK;
+ omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+ return 0;
+}
+
static int omap4_rng_init(struct omap_rng_dev *priv)
{
u32 val;
@@ -300,6 +342,14 @@ static struct omap_rng_pdata omap4_rng_pdata = {
.cleanup = omap4_rng_cleanup,
};
+static struct omap_rng_pdata eip76_rng_pdata = {
+ .regs = (u16 *)reg_map_eip76,
+ .data_size = EIP76_RNG_OUTPUT_SIZE,
+ .data_present = omap4_rng_data_present,
+ .init = eip76_rng_init,
+ .cleanup = omap4_rng_cleanup,
+};
+
static const struct of_device_id omap_rng_of_match[] = {
{
.compatible = "ti,omap2-rng",
@@ -309,6 +359,10 @@ static const struct of_device_id omap_rng_of_match[] = {
.compatible = "ti,omap4-rng",
.data = &omap4_rng_pdata,
},
+ {
+ .compatible = "inside-secure,safexcel-eip76",
+ .data = &eip76_rng_pdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_rng_of_match);
@@ -327,7 +381,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
}
priv->pdata = match->data;
- if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) {
+ if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
+ of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "%s: error getting IRQ resource - %d\n",
@@ -343,6 +398,16 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
return err;
}
omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
+
+ priv->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk)) {
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ dev_err(&pdev->dev, "unable to enable the clk, "
+ "err = %d\n", err);
+ }
}
return 0;
}
@@ -372,7 +437,11 @@ static int omap_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- omap_rng_ops.priv = (unsigned long)priv;
+ priv->rng.read = omap_rng_do_read;
+ priv->rng.init = omap_rng_init;
+ priv->rng.cleanup = omap_rng_cleanup;
+
+ priv->rng.priv = (unsigned long)priv;
platform_set_drvdata(pdev, priv);
priv->dev = dev;
@@ -383,6 +452,12 @@ static int omap_rng_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!priv->rng.name) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
@@ -394,20 +469,24 @@ static int omap_rng_probe(struct platform_device *pdev)
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
if (ret)
- goto err_ioremap;
+ goto err_register;
- ret = hwrng_register(&omap_rng_ops);
+ ret = hwrng_register(&priv->rng);
if (ret)
goto err_register;
- dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
+ dev_info(&pdev->dev, "Random Number Generator ver. %02x\n",
omap_rng_read(priv, RNG_REV_REG));
return 0;
err_register:
priv->base = NULL;
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
err_ioremap:
dev_err(dev, "initialization failed.\n");
return ret;
@@ -417,13 +496,16 @@ static int omap_rng_remove(struct platform_device *pdev)
{
struct omap_rng_dev *priv = platform_get_drvdata(pdev);
- hwrng_unregister(&omap_rng_ops);
+ hwrng_unregister(&priv->rng);
priv->pdata->cleanup(priv);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 11dc9b7c09ce..9b5e68a71d01 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -62,9 +62,6 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
u32 t;
unsigned int timeout = RNG_TIMEOUT;
- if (max < 8)
- return 0;
-
do {
t = readl(priv->base + RNGRCNT) & RCNT_MASK;
if (t == 64) {
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 63ce51d09af1..d9f46b437cc2 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -28,7 +28,6 @@
static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
u64 buffer[PLPAR_HCALL_BUFSIZE];
- size_t size = max < 8 ? max : 8;
int rc;
rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
@@ -36,10 +35,10 @@ static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
return -EIO;
}
- memcpy(data, buffer, size);
+ memcpy(data, buffer, 8);
/* The hypervisor interface returns 64 bits */
- return size;
+ return 8;
}
/**
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 44ce80606944..d1f5bb534e0e 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -70,21 +70,17 @@ enum {
* until we have 4 bytes, thus returning a u32 at a time,
* instead of the current u8-at-a-time.
*
- * Padlock instructions can generate a spurious DNA fault, so
- * we have to call them in the context of irq_ts_save/restore()
+ * Padlock instructions can generate a spurious DNA fault, but the
+ * kernel doesn't use CR0.TS, so this doesn't matter.
*/
static inline u32 xstore(u32 *addr, u32 edx_in)
{
u32 eax_out;
- int ts_state;
-
- ts_state = irq_ts_save();
asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
: "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
- irq_ts_restore(ts_state);
return eax_out;
}
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index b49e61320952..fc9e8891eae3 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -484,7 +484,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
}
static const struct of_device_id bt_bmc_match[] = {
- { .compatible = "aspeed,ast2400-bt-bmc" },
+ { .compatible = "aspeed,ast2400-ibt-bmc" },
{ },
};
@@ -502,4 +502,4 @@ module_platform_driver(bt_bmc_driver);
MODULE_DEVICE_TABLE(of, bt_bmc_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
-MODULE_DESCRIPTION("Linux device interface to the BT interface");
+MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 1786574536b2..a21407de46ae 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -989,4 +989,3 @@ module_exit(cleanup_ipmi);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
-MODULE_ALIAS("platform:ipmi_si");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index fcdd886819f5..92e53acf2cd2 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -158,15 +158,16 @@ struct seq_table {
* Store the information in a msgid (long) to allow us to find a
* sequence table entry from the msgid.
*/
-#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
+#define STORE_SEQ_IN_MSGID(seq, seqid) \
+ ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
do { \
- seq = ((msgid >> 26) & 0x3f); \
- seqid = (msgid & 0x3fffff); \
+ seq = (((msgid) >> 26) & 0x3f); \
+ seqid = ((msgid) & 0x3ffffff); \
} while (0)
-#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
struct ipmi_channel {
unsigned char medium;
@@ -4645,3 +4646,4 @@ MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
" interface.");
MODULE_VERSION(IPMI_DRIVER_VERSION);
+MODULE_SOFTDEP("post: ipmi_devintf");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a112c0146012..2a7c425ddfa7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -789,7 +789,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->si_state = SI_NORMAL;
break;
}
- start_getting_msg_queue(smi_info);
+ start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
@@ -812,7 +812,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->si_state = SI_NORMAL;
break;
}
- start_getting_msg_queue(smi_info);
+ start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
@@ -1764,7 +1764,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
s = strchr(*curr, ',');
if (!s) {
- printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
+ pr_warn(PFX "No hotmod %s given.\n", name);
return -EINVAL;
}
*s = '\0';
@@ -1777,7 +1777,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
}
}
- printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
+ pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
return -EINVAL;
}
@@ -1788,16 +1788,12 @@ static int check_hotmod_int_op(const char *curr, const char *option,
if (strcmp(curr, name) == 0) {
if (!option) {
- printk(KERN_WARNING PFX
- "No option given for '%s'\n",
- curr);
+ pr_warn(PFX "No option given for '%s'\n", curr);
return -EINVAL;
}
*val = simple_strtoul(option, &n, 0);
if ((*n != '\0') || (*option == '\0')) {
- printk(KERN_WARNING PFX
- "Bad option given for '%s'\n",
- curr);
+ pr_warn(PFX "Bad option given for '%s'\n", curr);
return -EINVAL;
}
return 1;
@@ -1877,8 +1873,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
}
addr = simple_strtoul(curr, &n, 0);
if ((*n != '\0') || (*curr == '\0')) {
- printk(KERN_WARNING PFX "Invalid hotmod address"
- " '%s'\n", curr);
+ pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
break;
}
@@ -1921,9 +1916,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
continue;
rv = -EINVAL;
- printk(KERN_WARNING PFX
- "Invalid hotmod option '%s'\n",
- curr);
+ pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
goto out;
}
@@ -2003,7 +1996,7 @@ static int hardcode_find_bmc(void)
return -ENOMEM;
info->addr_source = SI_HARDCODED;
- printk(KERN_INFO PFX "probing via hardcoded address\n");
+ pr_info(PFX "probing via hardcoded address\n");
if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
info->si_type = SI_KCS;
@@ -2012,9 +2005,8 @@ static int hardcode_find_bmc(void)
} else if (strcmp(si_type[i], "bt") == 0) {
info->si_type = SI_BT;
} else {
- printk(KERN_WARNING PFX "Interface type specified "
- "for interface %d, was invalid: %s\n",
- i, si_type[i]);
+ pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
+ i, si_type[i]);
kfree(info);
continue;
}
@@ -2030,9 +2022,8 @@ static int hardcode_find_bmc(void)
info->io.addr_data = addrs[i];
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
- printk(KERN_WARNING PFX "Interface type specified "
- "for interface %d, but port and address were "
- "not set or set to zero.\n", i);
+ pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
+ i);
kfree(info);
continue;
}
@@ -2173,18 +2164,18 @@ static int try_init_spmi(struct SPMITable *spmi)
int rv;
if (spmi->IPMIlegacy != 1) {
- printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ pr_info(PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
return -ENODEV;
}
info = smi_info_alloc();
if (!info) {
- printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
+ pr_err(PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
}
info->addr_source = SI_SPMI;
- printk(KERN_INFO PFX "probing via SPMI\n");
+ pr_info(PFX "probing via SPMI\n");
/* Figure out the interface type. */
switch (spmi->InterfaceType) {
@@ -2201,8 +2192,8 @@ static int try_init_spmi(struct SPMITable *spmi)
kfree(info);
return -EIO;
default:
- printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
- spmi->InterfaceType);
+ pr_info(PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
kfree(info);
return -EIO;
}
@@ -2238,15 +2229,15 @@ static int try_init_spmi(struct SPMITable *spmi)
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else {
kfree(info);
- printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
+ pr_warn(PFX "Unknown ACPI I/O Address type\n");
return -EIO;
}
info->io.addr_data = spmi->addr.address;
pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
rv = add_smi(info);
if (rv)
@@ -2356,12 +2347,12 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
info = smi_info_alloc();
if (!info) {
- printk(KERN_ERR PFX "Could not allocate SI data\n");
+ pr_err(PFX "Could not allocate SI data\n");
return;
}
info->addr_source = SI_SMBIOS;
- printk(KERN_INFO PFX "probing via SMBIOS\n");
+ pr_info(PFX "probing via SMBIOS\n");
switch (ipmi_data->type) {
case 0x01: /* KCS */
@@ -2391,8 +2382,8 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
default:
kfree(info);
- printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
- ipmi_data->addr_space);
+ pr_warn(PFX "Unknown SMBIOS I/O Address type: %d\n",
+ ipmi_data->addr_space);
return;
}
info->io.addr_data = ipmi_data->base_addr;
@@ -2410,9 +2401,9 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
info->irq_setup = std_irq_setup;
pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
if (add_smi(info))
kfree(info);
@@ -3141,9 +3132,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING PFX "Error getting response from get"
- " global enables command, the event buffer is not"
- " enabled.\n");
+ pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
goto out;
}
@@ -3154,8 +3143,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- printk(KERN_WARNING PFX "Invalid return from get global"
- " enables command, cannot enable the event buffer.\n");
+ pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -3173,9 +3161,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING PFX "Error getting response from set"
- " global, enables command, the event buffer is not"
- " enabled.\n");
+ pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
goto out;
}
@@ -3185,8 +3171,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- printk(KERN_WARNING PFX "Invalid return from get global,"
- "enables command, not enable the event buffer.\n");
+ pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -3463,8 +3448,16 @@ static int is_new_interface(struct smi_info *info)
list_for_each_entry(e, &smi_infos, link) {
if (e->io.addr_type != info->io.addr_type)
continue;
- if (e->io.addr_data == info->io.addr_data)
+ if (e->io.addr_data == info->io.addr_data) {
+ /*
+ * This is a cheap hack, ACPI doesn't have a defined
+ * slave address but SMBIOS does. Pick it up from
+ * any source that has it available.
+ */
+ if (info->slave_addr && !e->slave_addr)
+ e->slave_addr = info->slave_addr;
return 0;
+ }
}
return 1;
@@ -3474,17 +3467,18 @@ static int add_smi(struct smi_info *new_smi)
{
int rv = 0;
- printk(KERN_INFO PFX "Adding %s-specified %s state machine",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
- printk(KERN_CONT " duplicate interface\n");
+ pr_info(PFX "%s-specified %s state machine: duplicate\n",
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type]);
rv = -EBUSY;
goto out_err;
}
- printk(KERN_CONT "\n");
+ pr_info(PFX "Adding %s-specified %s state machine\n",
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type]);
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
@@ -3502,15 +3496,14 @@ static int try_smi_init(struct smi_info *new_smi)
{
int rv = 0;
int i;
+ char *init_name = NULL;
- printk(KERN_INFO PFX "Trying %s-specified %s state"
- " machine at %s address 0x%lx, slave address 0x%x,"
- " irq %d\n",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type],
- addr_space_to_str[new_smi->io.addr_type],
- new_smi->io.addr_data,
- new_smi->slave_addr, new_smi->irq);
+ pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type],
+ addr_space_to_str[new_smi->io.addr_type],
+ new_smi->io.addr_data,
+ new_smi->slave_addr, new_smi->irq);
switch (new_smi->si_type) {
case SI_KCS:
@@ -3531,11 +3524,30 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err;
}
+ /* Do this early so it's available for logs. */
+ if (!new_smi->dev) {
+ init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d", 0);
+
+ /*
+ * If we don't already have a device from something
+ * else (like PCI), then register a new one.
+ */
+ new_smi->pdev = platform_device_alloc("ipmi_si",
+ new_smi->intf_num);
+ if (!new_smi->pdev) {
+ pr_err(PFX "Unable to allocate platform device\n");
+ goto out_err;
+ }
+ new_smi->dev = &new_smi->pdev->dev;
+ new_smi->dev->driver = &ipmi_driver.driver;
+ /* Nulled by device_add() */
+ new_smi->dev->init_name = init_name;
+ }
+
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- printk(KERN_ERR PFX
- "Could not allocate state machine memory\n");
+ pr_err(PFX "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
@@ -3545,14 +3557,14 @@ static int try_smi_init(struct smi_info *new_smi)
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io_setup(new_smi);
if (rv) {
- printk(KERN_ERR PFX "Could not set up I/O space\n");
+ dev_err(new_smi->dev, "Could not set up I/O space\n");
goto out_err;
}
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
- printk(KERN_INFO PFX "Interface detection failed\n");
+ dev_err(new_smi->dev, "Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
@@ -3564,8 +3576,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->addr_source)
- printk(KERN_INFO PFX "There appears to be no BMC"
- " at this location\n");
+ dev_err(new_smi->dev, "There appears to be no BMC at this location\n");
goto out_err;
}
@@ -3604,27 +3615,12 @@ static int try_smi_init(struct smi_info *new_smi)
atomic_set(&new_smi->req_events, 1);
}
- if (!new_smi->dev) {
- /*
- * If we don't already have a device from something
- * else (like PCI), then register a new one.
- */
- new_smi->pdev = platform_device_alloc("ipmi_si",
- new_smi->intf_num);
- if (!new_smi->pdev) {
- printk(KERN_ERR PFX
- "Unable to allocate platform device\n");
- goto out_err;
- }
- new_smi->dev = &new_smi->pdev->dev;
- new_smi->dev->driver = &ipmi_driver.driver;
-
+ if (new_smi->pdev) {
rv = platform_device_add(new_smi->pdev);
if (rv) {
- printk(KERN_ERR PFX
- "Unable to register system interface device:"
- " %d\n",
- rv);
+ dev_err(new_smi->dev,
+ "Unable to register system interface device: %d\n",
+ rv);
goto out_err;
}
new_smi->dev_registered = true;
@@ -3668,6 +3664,9 @@ static int try_smi_init(struct smi_info *new_smi)
dev_info(new_smi->dev, "IPMI %s interface initialized\n",
si_to_str[new_smi->si_type]);
+ WARN_ON(new_smi->dev->init_name != NULL);
+ kfree(init_name);
+
return 0;
out_err_stop_timer:
@@ -3712,8 +3711,14 @@ out_err:
if (new_smi->dev_registered) {
platform_device_unregister(new_smi->pdev);
new_smi->dev_registered = false;
+ new_smi->pdev = NULL;
+ } else if (new_smi->pdev) {
+ platform_device_put(new_smi->pdev);
+ new_smi->pdev = NULL;
}
+ kfree(init_name);
+
return rv;
}
@@ -3732,8 +3737,7 @@ static int init_ipmi_si(void)
if (si_tryplatform) {
rv = platform_driver_register(&ipmi_driver);
if (rv) {
- printk(KERN_ERR PFX "Unable to register "
- "driver: %d\n", rv);
+ pr_err(PFX "Unable to register driver: %d\n", rv);
return rv;
}
}
@@ -3753,7 +3757,7 @@ static int init_ipmi_si(void)
}
}
- printk(KERN_INFO "IPMI System Interface driver.\n");
+ pr_info("IPMI System Interface driver.\n");
/* If the user gave us a device, they presumably want us to use it */
if (!hardcode_find_bmc())
@@ -3763,8 +3767,7 @@ static int init_ipmi_si(void)
if (si_trypci) {
rv = pci_register_driver(&ipmi_pci_driver);
if (rv)
- printk(KERN_ERR PFX "Unable to register "
- "PCI driver: %d\n", rv);
+ pr_err(PFX "Unable to register PCI driver: %d\n", rv);
else
pci_registered = true;
}
@@ -3826,8 +3829,7 @@ static int init_ipmi_si(void)
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
cleanup_ipmi_si();
- printk(KERN_WARNING PFX
- "Unable to find any System Interface(s)\n");
+ pr_warn(PFX "Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 5673ffff00be..cca6e5bc1cea 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -174,7 +174,6 @@ enum ssif_stat_indexes {
};
struct ssif_addr_info {
- unsigned short addr;
struct i2c_board_info binfo;
char *adapter_name;
int debug;
@@ -1154,10 +1153,6 @@ static bool ssif_dbg_probe;
module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
-static int use_thread;
-module_param(use_thread, int, 0);
-MODULE_PARM_DESC(use_thread, "Use the thread interface.");
-
static bool ssif_tryacpi = true;
module_param_named(tryacpi, ssif_tryacpi, bool, 0);
MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
@@ -1405,6 +1400,34 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
return false;
}
+static int find_slave_address(struct i2c_client *client, int slave_addr)
+{
+ struct ssif_addr_info *info;
+
+ if (slave_addr)
+ return slave_addr;
+
+ /*
+ * Came in without a slave address, search around to see if
+ * the other sources have a slave address. This lets us pick
+ * up an SMBIOS slave address when using ACPI.
+ */
+ list_for_each_entry(info, &ssif_infos, link) {
+ if (info->binfo.addr != client->addr)
+ continue;
+ if (info->adapter_name && client->adapter->name &&
+ strcmp_nospace(info->adapter_name,
+ client->adapter->name))
+ continue;
+ if (info->slave_addr) {
+ slave_addr = info->slave_addr;
+ break;
+ }
+ }
+
+ return slave_addr;
+}
+
/*
* Global enables we care about.
*/
@@ -1447,6 +1470,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ slave_addr = find_slave_address(client, slave_addr);
+
pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
@@ -1935,7 +1960,7 @@ static int decode_dmi(const struct dmi_device *dmi_dev)
slave_addr = data[6];
}
- return new_ssif_client(myaddr, NULL, 0, 0, SI_SMBIOS);
+ return new_ssif_client(myaddr, NULL, 0, slave_addr, SI_SMBIOS);
}
static void dmi_iterator(void)
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index f3f92d5fcda0..a697ca0cab1e 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* be because another thread has installed the pte first, so it
* is no problem.
*/
- vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ vm_insert_pfn(vma, vmf->address, pfn);
return VM_FAULT_NOPAGE;
}
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 8d3dfb0c8a26..1d1e7da8ad27 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -43,6 +43,17 @@ config CARDMAN_4040
(http://www.omnikey.com/), or a current development version of OpenCT
(http://www.opensc-project.org/opensc).
+config SCR24X
+ tristate "SCR24x Chip Card Interface support"
+ depends on PCMCIA
+ help
+ Enable support for the SCR24x PCMCIA Chip Card Interface.
+
+ To compile this driver as a module, choose M here.
+ The module will be called scr24x_cs..
+
+ If unsure say N.
+
config IPWIRELESS
tristate "IPWireless 3G UMTS PCMCIA card support"
depends on PCMCIA && NETDEVICES && TTY
diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile
index 0aae20985d57..5b836bc21406 100644
--- a/drivers/char/pcmcia/Makefile
+++ b/drivers/char/pcmcia/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o
obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o
obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o
+obj-$(CONFIG_SCR24X) += scr24x_cs.o
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index c115217c79ae..e051fc8aa7d7 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -14,7 +14,7 @@
* (C) 2000,2001,2002,2003,2004 Omnikey AG
*
* (C) 2005-2006 Harald Welte <laforge@gnumonks.org>
- * - Adhere to Kernel CodingStyle
+ * - Adhere to Kernel process/coding-style.rst
* - Port to 2.6.13 "new" style PCMCIA
* - Check for copy_{from,to}_user return values
* - Use nonseekable_open()
@@ -151,7 +151,7 @@ static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
static struct class *cmm_class;
/* This table doesn't use spaces after the comma between fields and thus
- * violates CodingStyle. However, I don't really think wrapping it around will
+ * violates process/coding-style.rst. However, I don't really think wrapping it around will
* make it any clearer to read -HW */
static unsigned char fi_di_table[10][14] = {
/*FI 00 01 02 03 04 05 06 07 08 09 10 11 12 13 */
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
new file mode 100644
index 000000000000..f6b43d9350f0
--- /dev/null
+++ b/drivers/char/pcmcia/scr24x_cs.c
@@ -0,0 +1,373 @@
+/*
+ * SCR24x PCMCIA Smart Card Reader Driver
+ *
+ * Copyright (C) 2005-2006 TL Sudheendran
+ * Copyright (C) 2016 Lubomir Rintel
+ *
+ * Derived from "scr24x_v4.2.6_Release.tar.gz" driver by TL Sudheendran.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#define CCID_HEADER_SIZE 10
+#define CCID_LENGTH_OFFSET 1
+#define CCID_MAX_LEN 271
+
+#define SCR24X_DATA(n) (1 + n)
+#define SCR24X_CMD_STATUS 7
+#define CMD_START 0x40
+#define CMD_WRITE_BYTE 0x41
+#define CMD_READ_BYTE 0x42
+#define STATUS_BUSY 0x80
+
+struct scr24x_dev {
+ struct device *dev;
+ struct cdev c_dev;
+ unsigned char buf[CCID_MAX_LEN];
+ int devno;
+ struct mutex lock;
+ struct kref refcnt;
+ u8 __iomem *regs;
+};
+
+#define SCR24X_DEVS 8
+static DECLARE_BITMAP(scr24x_minors, SCR24X_DEVS);
+
+static struct class *scr24x_class;
+static dev_t scr24x_devt;
+
+static void scr24x_delete(struct kref *kref)
+{
+ struct scr24x_dev *dev = container_of(kref, struct scr24x_dev,
+ refcnt);
+
+ kfree(dev);
+}
+
+static int scr24x_wait_ready(struct scr24x_dev *dev)
+{
+ u_char status;
+ int timeout = 100;
+
+ do {
+ status = ioread8(dev->regs + SCR24X_CMD_STATUS);
+ if (!(status & STATUS_BUSY))
+ return 0;
+
+ msleep(20);
+ } while (--timeout);
+
+ return -EIO;
+}
+
+static int scr24x_open(struct inode *inode, struct file *filp)
+{
+ struct scr24x_dev *dev = container_of(inode->i_cdev,
+ struct scr24x_dev, c_dev);
+
+ kref_get(&dev->refcnt);
+ filp->private_data = dev;
+
+ return nonseekable_open(inode, filp);
+}
+
+static int scr24x_release(struct inode *inode, struct file *filp)
+{
+ struct scr24x_dev *dev = filp->private_data;
+
+ /* We must not take the dev->lock here as scr24x_delete()
+ * might be called to remove the dev structure altogether.
+ * We don't need the lock anyway, since after the reference
+ * acquired in probe() is released in remove() the chrdev
+ * is already unregistered and noone can possibly acquire
+ * a reference via open() anymore. */
+ kref_put(&dev->refcnt, scr24x_delete);
+ return 0;
+}
+
+static int read_chunk(struct scr24x_dev *dev, size_t offset, size_t limit)
+{
+ size_t i, y;
+ int ret;
+
+ for (i = offset; i < limit; i += 5) {
+ iowrite8(CMD_READ_BYTE, dev->regs + SCR24X_CMD_STATUS);
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ for (y = 0; y < 5 && i + y < limit; y++)
+ dev->buf[i + y] = ioread8(dev->regs + SCR24X_DATA(y));
+ }
+
+ return 0;
+}
+
+static ssize_t scr24x_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct scr24x_dev *dev = filp->private_data;
+ int ret;
+ int len;
+
+ if (count < CCID_HEADER_SIZE)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+
+ if (!dev->dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+ len = CCID_HEADER_SIZE;
+ ret = read_chunk(dev, 0, len);
+ if (ret < 0)
+ goto out;
+
+ len += le32_to_cpu(*(__le32 *)(&dev->buf[CCID_LENGTH_OFFSET]));
+ if (len > sizeof(dev->buf)) {
+ ret = -EIO;
+ goto out;
+ }
+ ret = read_chunk(dev, CCID_HEADER_SIZE, len);
+ if (ret < 0)
+ goto out;
+
+ if (len < count)
+ count = len;
+
+ if (copy_to_user(buf, dev->buf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static ssize_t scr24x_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct scr24x_dev *dev = filp->private_data;
+ size_t i, y;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->lock))
+ return -ERESTARTSYS;
+
+ if (!dev->dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (count > sizeof(dev->buf)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_user(dev->buf, buf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+
+ iowrite8(CMD_START, dev->regs + SCR24X_CMD_STATUS);
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < count; i += 5) {
+ for (y = 0; y < 5 && i + y < count; y++)
+ iowrite8(dev->buf[i + y], dev->regs + SCR24X_DATA(y));
+
+ iowrite8(CMD_WRITE_BYTE, dev->regs + SCR24X_CMD_STATUS);
+ ret = scr24x_wait_ready(dev);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static const struct file_operations scr24x_fops = {
+ .owner = THIS_MODULE,
+ .read = scr24x_read,
+ .write = scr24x_write,
+ .open = scr24x_open,
+ .release = scr24x_release,
+ .llseek = no_llseek,
+};
+
+static int scr24x_config_check(struct pcmcia_device *link, void *priv_data)
+{
+ if (resource_size(link->resource[PCMCIA_IOPORT_0]) != 0x11)
+ return -ENODEV;
+ return pcmcia_request_io(link);
+}
+
+static int scr24x_probe(struct pcmcia_device *link)
+{
+ struct scr24x_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->devno = find_first_zero_bit(scr24x_minors, SCR24X_DEVS);
+ if (dev->devno >= SCR24X_DEVS) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ mutex_init(&dev->lock);
+ kref_init(&dev->refcnt);
+
+ link->priv = dev;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+ ret = pcmcia_loop_config(link, scr24x_config_check, NULL);
+ if (ret < 0)
+ goto err;
+
+ dev->dev = &link->dev;
+ dev->regs = devm_ioport_map(&link->dev,
+ link->resource[PCMCIA_IOPORT_0]->start,
+ resource_size(link->resource[PCMCIA_IOPORT_0]));
+ if (!dev->regs) {
+ ret = -EIO;
+ goto err;
+ }
+
+ cdev_init(&dev->c_dev, &scr24x_fops);
+ dev->c_dev.owner = THIS_MODULE;
+ dev->c_dev.ops = &scr24x_fops;
+ ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1);
+ if (ret < 0)
+ goto err;
+
+ ret = pcmcia_enable_device(link);
+ if (ret < 0) {
+ pcmcia_disable_device(link);
+ goto err;
+ }
+
+ device_create(scr24x_class, NULL, MKDEV(MAJOR(scr24x_devt), dev->devno),
+ NULL, "scr24x%d", dev->devno);
+
+ dev_info(&link->dev, "SCR24x Chip Card Interface\n");
+ return 0;
+
+err:
+ if (dev->devno < SCR24X_DEVS)
+ clear_bit(dev->devno, scr24x_minors);
+ kfree (dev);
+ return ret;
+}
+
+static void scr24x_remove(struct pcmcia_device *link)
+{
+ struct scr24x_dev *dev = (struct scr24x_dev *)link->priv;
+
+ device_destroy(scr24x_class, MKDEV(MAJOR(scr24x_devt), dev->devno));
+ mutex_lock(&dev->lock);
+ pcmcia_disable_device(link);
+ cdev_del(&dev->c_dev);
+ clear_bit(dev->devno, scr24x_minors);
+ dev->dev = NULL;
+ mutex_unlock(&dev->lock);
+
+ kref_put(&dev->refcnt, scr24x_delete);
+}
+
+static const struct pcmcia_device_id scr24x_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("HP", "PC Card Smart Card Reader",
+ 0x53cb94f9, 0xbfdf89a5),
+ PCMCIA_DEVICE_PROD_ID1("SCR241 PCMCIA", 0x6271efa3),
+ PCMCIA_DEVICE_PROD_ID1("SCR243 PCMCIA", 0x2054e8de),
+ PCMCIA_DEVICE_PROD_ID1("SCR24x PCMCIA", 0x54a33665),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, scr24x_ids);
+
+static struct pcmcia_driver scr24x_driver = {
+ .owner = THIS_MODULE,
+ .name = "scr24x_cs",
+ .probe = scr24x_probe,
+ .remove = scr24x_remove,
+ .id_table = scr24x_ids,
+};
+
+static int __init scr24x_init(void)
+{
+ int ret;
+
+ scr24x_class = class_create(THIS_MODULE, "scr24x");
+ if (IS_ERR(scr24x_class))
+ return PTR_ERR(scr24x_class);
+
+ ret = alloc_chrdev_region(&scr24x_devt, 0, SCR24X_DEVS, "scr24x");
+ if (ret < 0) {
+ class_destroy(scr24x_class);
+ return ret;
+ }
+
+ ret = pcmcia_register_driver(&scr24x_driver);
+ if (ret < 0) {
+ unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+ class_destroy(scr24x_class);
+ }
+
+ return ret;
+}
+
+static void __exit scr24x_exit(void)
+{
+ pcmcia_unregister_driver(&scr24x_driver);
+ unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+ class_destroy(scr24x_class);
+}
+
+module_init(scr24x_init);
+module_exit(scr24x_exit);
+
+MODULE_AUTHOR("Lubomir Rintel");
+MODULE_DESCRIPTION("SCR24x PCMCIA Smart Card Reader Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index d28922df01d7..a7dd5f4f2c5a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4248,7 +4248,6 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
- .ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hdlcdev_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d23368874710..02819e0703c8 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -86,6 +86,9 @@ struct pp_struct {
long default_inactivity;
};
+/* should we use PARDEVICE_MAX here? */
+static struct device *devices[PARPORT_MAX];
+
/* pp_struct.flags bitfields */
#define PP_CLAIMED (1<<0)
#define PP_EXCL (1<<1)
@@ -294,7 +297,7 @@ static int register_device(int minor, struct pp_struct *pp)
port = parport_find_number(minor);
if (!port) {
- printk(KERN_WARNING "%s: no associated port!\n", name);
+ pr_warn("%s: no associated port!\n", name);
kfree(name);
return -ENXIO;
}
@@ -305,10 +308,10 @@ static int register_device(int minor, struct pp_struct *pp)
ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
parport_put_port(port);
+ kfree(name);
if (!pdev) {
- printk(KERN_WARNING "%s: failed to register device!\n", name);
- kfree(name);
+ pr_warn("%s: failed to register device!\n", name);
return -ENXIO;
}
@@ -748,10 +751,7 @@ static int pp_release(struct inode *inode, struct file *file)
}
if (pp->pdev) {
- const char *name = pp->pdev->name;
-
parport_unregister_device(pp->pdev);
- kfree(name);
pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
@@ -792,13 +792,29 @@ static const struct file_operations pp_fops = {
static void pp_attach(struct parport *port)
{
- device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number),
- NULL, "parport%d", port->number);
+ struct device *ret;
+
+ if (devices[port->number])
+ return;
+
+ ret = device_create(ppdev_class, port->dev,
+ MKDEV(PP_MAJOR, port->number), NULL,
+ "parport%d", port->number);
+ if (IS_ERR(ret)) {
+ pr_err("Failed to create device parport%d\n",
+ port->number);
+ return;
+ }
+ devices[port->number] = ret;
}
static void pp_detach(struct parport *port)
{
+ if (!devices[port->number])
+ return;
+
device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
+ devices[port->number] = NULL;
}
static int pp_probe(struct pardevice *par_dev)
@@ -825,8 +841,7 @@ static int __init ppdev_init(void)
int err = 0;
if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
- printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
- PP_MAJOR);
+ pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR);
return -EIO;
}
ppdev_class = class_create(THIS_MODULE, CHRDEV);
@@ -836,11 +851,11 @@ static int __init ppdev_init(void)
}
err = parport_register_driver(&pp_driver);
if (err < 0) {
- printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
+ pr_warn(CHRDEV ": unable to register with parport\n");
goto out_class;
}
- printk(KERN_INFO PP_VERSION "\n");
+ pr_info(PP_VERSION "\n");
goto out;
out_class:
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 10e56323f390..ec07f0e99732 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -285,7 +285,7 @@ scdrv_write(struct file *file, const char __user *buf,
DECLARE_WAITQUEUE(wait, current);
if (file->f_flags & O_NONBLOCK) {
- spin_unlock(&sd->sd_wlock);
+ spin_unlock_irqrestore(&sd->sd_wlock, flags);
up(&sd->sd_wbs);
return -EAGAIN;
}
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 398800edb2cc..3d4cca64b2d4 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -312,7 +312,8 @@ ATTRIBUTE_GROUPS(srom_dev);
static char *srom_devnode(struct device *dev, umode_t *mode)
{
- *mode = S_IRUGO | S_IWUSR;
+ if (mode)
+ *mode = 0644;
return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 9faa0b1e7766..277186d3b668 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -32,7 +32,7 @@ config TCG_TIS_CORE
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
- depends on X86
+ depends on X86 || OF
select TCG_TIS_CORE
---help---
If you have a TPM security chip that is compliant with the
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index a385fb8c17de..a05b1ebd0b26 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,16 +2,10 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
-tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o
-
-ifdef CONFIG_ACPI
- tpm-y += tpm_eventlog.o tpm_acpi.o
-else
-ifdef CONFIG_TCG_IBMVTPM
- tpm-y += tpm_eventlog.o tpm_of.o
-endif
-endif
+tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
+ tpm_eventlog.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
+tpm-$(CONFIG_OF) += tpm_of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e5950131bd90..a77262d31911 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(tpm_put_ops);
*
* The return'd chip has been tpm_try_get_ops'd and must be released via
* tpm_put_ops
- */
+ */
struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *chip, *res = NULL;
@@ -103,7 +103,7 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
}
} while (chip_prev != chip_num);
} else {
- chip = idr_find_slowpath(&dev_nums_idr, chip_num);
+ chip = idr_find(&dev_nums_idr, chip_num);
if (chip && !tpm_try_get_ops(chip))
res = chip;
}
@@ -127,6 +127,7 @@ static void tpm_dev_release(struct device *dev)
idr_remove(&dev_nums_idr, chip->dev_num);
mutex_unlock(&idr_lock);
+ kfree(chip->log.bios_event_log);
kfree(chip);
}
@@ -276,27 +277,6 @@ static void tpm_del_char_device(struct tpm_chip *chip)
up_write(&chip->ops_sem);
}
-static int tpm1_chip_register(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return 0;
-
- tpm_sysfs_add_device(chip);
-
- chip->bios_dir = tpm_bios_log_setup(dev_name(&chip->dev));
-
- return 0;
-}
-
-static void tpm1_chip_unregister(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return;
-
- if (chip->bios_dir)
- tpm_bios_log_teardown(chip->bios_dir);
-}
-
static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
{
struct attribute **i;
@@ -363,20 +343,20 @@ int tpm_chip_register(struct tpm_chip *chip)
return rc;
}
- rc = tpm1_chip_register(chip);
- if (rc)
+ tpm_sysfs_add_device(chip);
+
+ rc = tpm_bios_log_setup(chip);
+ if (rc != 0 && rc != -ENODEV)
return rc;
tpm_add_ppi(chip);
rc = tpm_add_char_device(chip);
if (rc) {
- tpm1_chip_unregister(chip);
+ tpm_bios_log_teardown(chip);
return rc;
}
- chip->flags |= TPM_CHIP_FLAG_REGISTERED;
-
rc = tpm_add_legacy_sysfs(chip);
if (rc) {
tpm_chip_unregister(chip);
@@ -402,12 +382,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
- if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
- return;
-
tpm_del_legacy_sysfs(chip);
-
- tpm1_chip_unregister(chip);
+ tpm_bios_log_teardown(chip);
tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 8de61876f633..a2688ac2b48f 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
+#include <linux/pm_runtime.h>
#include "tpm.h"
#include "tpm_eventlog.h"
@@ -356,6 +357,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
if (!(flags & TPM_TRANSMIT_UNLOCKED))
mutex_lock(&chip->tpm_mutex);
+ if (chip->dev.parent)
+ pm_runtime_get_sync(chip->dev.parent);
+
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
dev_err(&chip->dev,
@@ -397,6 +401,9 @@ out_recv:
dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
+ if (chip->dev.parent)
+ pm_runtime_put_sync(chip->dev.parent);
+
if (!(flags & TPM_TRANSMIT_UNLOCKED))
mutex_unlock(&chip->tpm_mutex);
return rc;
@@ -437,26 +444,29 @@ static const struct tpm_input_header tpm_getcap_header = {
.ordinal = TPM_ORD_GET_CAP
};
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc)
{
struct tpm_cmd_t tpm_cmd;
int rc;
tpm_cmd.header.in = tpm_getcap_header;
- if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
- tpm_cmd.params.getcap_in.cap = subcap_id;
+ if (subcap_id == TPM_CAP_VERSION_1_1 ||
+ subcap_id == TPM_CAP_VERSION_1_2) {
+ tpm_cmd.params.getcap_in.cap = cpu_to_be32(subcap_id);
/*subcap field not necessary */
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
} else {
if (subcap_id == TPM_CAP_FLAG_PERM ||
subcap_id == TPM_CAP_FLAG_VOL)
- tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
+ tpm_cmd.params.getcap_in.cap =
+ cpu_to_be32(TPM_CAP_FLAG);
else
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.cap =
+ cpu_to_be32(TPM_CAP_PROP);
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = subcap_id;
+ tpm_cmd.params.getcap_in.subcap = cpu_to_be32(subcap_id);
}
rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
desc);
@@ -488,12 +498,14 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
int tpm_get_timeouts(struct tpm_chip *chip)
{
- struct tpm_cmd_t tpm_cmd;
+ cap_t cap;
unsigned long new_timeout[4];
unsigned long old_timeout[4];
- struct duration_t *duration_cap;
ssize_t rc;
+ if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS)
+ return 0;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
/* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
@@ -506,46 +518,30 @@ int tpm_get_timeouts(struct tpm_chip *chip)
msecs_to_jiffies(TPM2_DURATION_MEDIUM);
chip->duration[TPM_LONG] =
msecs_to_jiffies(TPM2_DURATION_LONG);
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
return 0;
}
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- NULL);
-
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts");
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
- dev_info(&chip->dev, "Issuing TPM_STARTUP");
+ dev_info(&chip->dev, "Issuing TPM_STARTUP\n");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- 0, NULL);
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts");
}
- if (rc) {
- dev_err(&chip->dev,
- "A TPM error (%zd) occurred attempting to determine the timeouts\n",
- rc);
- goto duration;
- }
-
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
- return -EINVAL;
+ if (rc)
+ return rc;
- old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
- old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
- old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
- old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+ old_timeout[0] = be32_to_cpu(cap.timeout.a);
+ old_timeout[1] = be32_to_cpu(cap.timeout.b);
+ old_timeout[2] = be32_to_cpu(cap.timeout.c);
+ old_timeout[3] = be32_to_cpu(cap.timeout.d);
memcpy(new_timeout, old_timeout, sizeof(new_timeout));
/*
@@ -583,29 +579,17 @@ int tpm_get_timeouts(struct tpm_chip *chip)
chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
-duration:
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
-
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- "attempting to determine the durations");
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap,
+ "attempting to determine the durations");
if (rc)
return rc;
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
- return -EINVAL;
-
- duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->duration[TPM_SHORT] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short));
chip->duration[TPM_MEDIUM] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium));
chip->duration[TPM_LONG] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
@@ -619,6 +603,8 @@ duration:
chip->duration_adjusted = true;
dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
}
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@@ -726,6 +712,14 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
+#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
+#define EXTEND_PCR_RESULT_SIZE 34
+static const struct tpm_input_header pcrextend_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(34),
+ .ordinal = TPM_ORD_PCR_EXTEND
+};
+
/**
* tpm_pcr_extend - extend pcr value with hash
* @chip_num: tpm idx # or AN&
@@ -736,14 +730,6 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
-#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
-#define EXTEND_PCR_RESULT_SIZE 34
-static const struct tpm_input_header pcrextend_header = {
- .tag = TPM_TAG_RQU_COMMAND,
- .length = cpu_to_be32(34),
- .ordinal = TPM_ORD_PCR_EXTEND
-};
-
int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
{
struct tpm_cmd_t cmd;
@@ -813,9 +799,6 @@ int tpm_do_selftest(struct tpm_chip *chip)
continue;
}
- if (rc < TPM_HEADER_SIZE)
- return -EFAULT;
-
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index a76ab4af9fb2..848ad6580b46 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -193,7 +193,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
be32_to_cpu(cap.manufacturer_id));
/* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
- rc = tpm_getcap(chip, CAP_VERSION_1_2, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
if (!rc) {
str += sprintf(str,
@@ -204,7 +204,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
cap.tpm_version_1_2.revMinor);
} else {
/* Otherwise just use TPM_STRUCT_VER */
- rc = tpm_getcap(chip, CAP_VERSION_1_1, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version");
if (rc)
return 0;
@@ -284,6 +284,9 @@ static const struct attribute_group tpm_dev_group = {
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return;
+
/* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
* is called before ops is null'd and the sysfs core synchronizes this
* removal so that no callbacks are running or can run again
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4d183c97f6a6..1ae976894257 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -35,11 +35,14 @@
#include <linux/cdev.h>
#include <linux/highmem.h>
+#include "tpm_eventlog.h"
+
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
TPM_NUM_DEVICES = 65536,
TPM_RETRY = 50, /* 5 seconds */
+ TPM_NUM_EVENT_LOG_FILES = 3,
};
enum tpm_timeout {
@@ -139,10 +142,15 @@ enum tpm2_startup_types {
#define TPM_PPI_VERSION_LEN 3
enum tpm_chip_flags {
- TPM_CHIP_FLAG_REGISTERED = BIT(0),
TPM_CHIP_FLAG_TPM2 = BIT(1),
TPM_CHIP_FLAG_IRQ = BIT(2),
TPM_CHIP_FLAG_VIRTUAL = BIT(3),
+ TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
+};
+
+struct tpm_chip_seqops {
+ struct tpm_chip *chip;
+ const struct seq_operations *seqops;
};
struct tpm_chip {
@@ -156,6 +164,10 @@ struct tpm_chip {
struct rw_semaphore ops_sem;
const struct tpm_class_ops *ops;
+ struct tpm_bios_log log;
+ struct tpm_chip_seqops bin_log_seqops;
+ struct tpm_chip_seqops ascii_log_seqops;
+
unsigned int flags;
int dev_num; /* /dev/tpm# */
@@ -171,7 +183,7 @@ struct tpm_chip {
unsigned long duration[3]; /* jiffies */
bool duration_adjusted;
- struct dentry **bios_dir;
+ struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
const struct attribute_group *groups[3];
unsigned int groups_cnt;
@@ -282,21 +294,20 @@ typedef union {
} cap_t;
enum tpm_capabilities {
- TPM_CAP_FLAG = cpu_to_be32(4),
- TPM_CAP_PROP = cpu_to_be32(5),
- CAP_VERSION_1_1 = cpu_to_be32(0x06),
- CAP_VERSION_1_2 = cpu_to_be32(0x1A)
+ TPM_CAP_FLAG = 4,
+ TPM_CAP_PROP = 5,
+ TPM_CAP_VERSION_1_1 = 0x06,
+ TPM_CAP_VERSION_1_2 = 0x1A,
};
enum tpm_sub_capabilities {
- TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
- TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
- TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
- TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
- TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
- TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
- TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
-
+ TPM_CAP_PROP_PCR = 0x101,
+ TPM_CAP_PROP_MANUFACTURER = 0x103,
+ TPM_CAP_FLAG_PERM = 0x108,
+ TPM_CAP_FLAG_VOL = 0x109,
+ TPM_CAP_PROP_OWNER = 0x111,
+ TPM_CAP_PROP_TIS_TIMEOUT = 0x115,
+ TPM_CAP_PROP_TIS_DURATION = 0x120,
};
struct tpm_getcap_params_in {
@@ -484,7 +495,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
unsigned int flags);
ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, int len,
unsigned int flags, const char *desc);
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc);
int tpm_get_timeouts(struct tpm_chip *);
int tpm1_auto_startup(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 7df55d58c939..da5b782a9731 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -680,7 +680,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
}
/**
- * tpm_unseal_trusted() - unseal the payload of a trusted key
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
* @chip_num: TPM chip to use
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index 565a9478cb94..b7718c95fd0b 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -6,10 +6,11 @@
* Stefan Berger <stefanb@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
- * Access to the eventlog extended by the TCG BIOS of PC platform
+ * Access to the event log extended by the TCG BIOS of PC platform
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -45,29 +46,28 @@ struct acpi_tcpa {
};
/* read binary bios log */
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_acpi(struct tpm_chip *chip)
{
struct acpi_tcpa *buff;
acpi_status status;
void __iomem *virt;
u64 len, start;
+ struct tpm_bios_log *log;
- if (log->bios_event_log != NULL) {
- printk(KERN_ERR
- "%s: ERROR - Eventlog already initialized\n",
- __func__);
- return -EFAULT;
- }
+ log = &chip->log;
+
+ /* Unfortuntely ACPI does not associate the event log with a specific
+ * TPM, like PPI. Thus all ACPI TPMs will read the same log.
+ */
+ if (!chip->acpi_dev_handle)
+ return -ENODEV;
/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
status = acpi_get_table(ACPI_SIG_TCPA, 1,
(struct acpi_table_header **)&buff);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
- __func__);
- return -EIO;
- }
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
switch(buff->platform_class) {
case BIOS_SERVER:
@@ -81,29 +81,29 @@ int read_log(struct tpm_bios_log *log)
break;
}
if (!len) {
- printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
+ dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
return -EIO;
}
/* malloc EventLog space */
log->bios_event_log = kmalloc(len, GFP_KERNEL);
- if (!log->bios_event_log) {
- printk("%s: ERROR - Not enough Memory for BIOS measurements\n",
- __func__);
+ if (!log->bios_event_log)
return -ENOMEM;
- }
log->bios_event_log_end = log->bios_event_log + len;
virt = acpi_os_map_iomem(start, len);
- if (!virt) {
- kfree(log->bios_event_log);
- printk("%s: ERROR - Unable to map memory\n", __func__);
- return -EIO;
- }
+ if (!virt)
+ goto err;
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
return 0;
+
+err:
+ kfree(log->bios_event_log);
+ log->bios_event_log = NULL;
+ return -EIO;
+
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a7c870af916c..717b6b47c042 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/rculist.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
@@ -83,7 +84,71 @@ struct crb_priv {
u32 cmd_size;
};
-static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume);
+/**
+ * crb_go_idle - request tpm crb device to go the idle state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
+ * The device should respond within TIMEOUT_C by clearing the bit.
+ * Anyhow, we do not wait here as a consequent CMD_READY request
+ * will be handled correctly even if idle was not completed.
+ *
+ * The function does nothing for devices with ACPI-start method.
+ *
+ * Return: 0 always
+ */
+static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
+{
+ if (priv->flags & CRB_FL_ACPI_START)
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->cca->req);
+ /* we don't really care when this settles */
+
+ return 0;
+}
+
+/**
+ * crb_cmd_ready - request tpm crb device to enter ready state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
+ * and poll till the device acknowledge it by clearing the bit.
+ * The device should respond within TIMEOUT_C.
+ *
+ * The function does nothing for devices with ACPI-start method
+ *
+ * Return: 0 on success -ETIME on timeout;
+ */
+static int __maybe_unused crb_cmd_ready(struct device *dev,
+ struct crb_priv *priv)
+{
+ ktime_t stop, start;
+
+ if (priv->flags & CRB_FL_ACPI_START)
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->cca->req);
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(TPM2_TIMEOUT_C));
+ do {
+ if (!(ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY))
+ return 0;
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), stop));
+
+ if (ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY) {
+ dev_warn(dev, "cmdReady timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
static u8 crb_status(struct tpm_chip *chip)
{
@@ -196,21 +261,6 @@ static const struct tpm_class_ops tpm_crb = {
.req_complete_val = CRB_DRV_STS_COMPLETE,
};
-static int crb_init(struct acpi_device *device, struct crb_priv *priv)
-{
- struct tpm_chip *chip;
-
- chip = tpmm_chip_alloc(&device->dev, &tpm_crb);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
-
- dev_set_drvdata(&chip->dev, priv);
- chip->acpi_dev_handle = device->handle;
- chip->flags = TPM_CHIP_FLAG_TPM2;
-
- return tpm_chip_register(chip);
-}
-
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
struct resource *io_res = data;
@@ -249,6 +299,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct list_head resources;
struct resource io_res;
struct device *dev = &device->dev;
+ u32 pa_high, pa_low;
u64 cmd_pa;
u32 cmd_size;
u64 rsp_pa;
@@ -276,12 +327,27 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (IS_ERR(priv->cca))
return PTR_ERR(priv->cca);
- cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
- (u64) ioread32(&priv->cca->cmd_pa_low);
+ /*
+ * PTT HW bug w/a: wake up the device to access
+ * possibly not retained registers.
+ */
+ ret = crb_cmd_ready(dev, priv);
+ if (ret)
+ return ret;
+
+ pa_high = ioread32(&priv->cca->cmd_pa_high);
+ pa_low = ioread32(&priv->cca->cmd_pa_low);
+ cmd_pa = ((u64)pa_high << 32) | pa_low;
cmd_size = ioread32(&priv->cca->cmd_size);
+
+ dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+ pa_high, pa_low, cmd_size);
+
priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
- if (IS_ERR(priv->cmd))
- return PTR_ERR(priv->cmd);
+ if (IS_ERR(priv->cmd)) {
+ ret = PTR_ERR(priv->cmd);
+ goto out;
+ }
memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8);
rsp_pa = le64_to_cpu(rsp_pa);
@@ -289,7 +355,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (cmd_pa != rsp_pa) {
priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
- return PTR_ERR_OR_ZERO(priv->rsp);
+ ret = PTR_ERR_OR_ZERO(priv->rsp);
+ goto out;
}
/* According to the PTP specification, overlapping command and response
@@ -297,18 +364,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
if (cmd_size != rsp_size) {
dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
+
priv->cmd_size = cmd_size;
priv->rsp = priv->cmd;
- return 0;
+
+out:
+ crb_go_idle(dev, priv);
+
+ return ret;
}
static int crb_acpi_add(struct acpi_device *device)
{
struct acpi_table_tpm2 *buf;
struct crb_priv *priv;
+ struct tpm_chip *chip;
struct device *dev = &device->dev;
acpi_status status;
u32 sm;
@@ -346,7 +420,33 @@ static int crb_acpi_add(struct acpi_device *device)
if (rc)
return rc;
- return crb_init(device, priv);
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dev_set_drvdata(&chip->dev, priv);
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+ rc = crb_cmd_ready(dev, priv);
+ if (rc)
+ return rc;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ rc = tpm_chip_register(chip);
+ if (rc) {
+ crb_go_idle(dev, priv);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return rc;
+ }
+
+ pm_runtime_put(dev);
+
+ return 0;
}
static int crb_acpi_remove(struct acpi_device *device)
@@ -356,9 +456,34 @@ static int crb_acpi_remove(struct acpi_device *device)
tpm_chip_unregister(chip);
+ pm_runtime_disable(dev);
+
return 0;
}
+#ifdef CONFIG_PM
+static int crb_pm_runtime_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return crb_go_idle(dev, priv);
+}
+
+static int crb_pm_runtime_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return crb_cmd_ready(dev, priv);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops crb_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
+ SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
+};
+
static struct acpi_device_id crb_device_ids[] = {
{"MSFT0101", 0},
{"", 0},
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index e7228863290e..11bb1138a828 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -7,10 +7,11 @@
* Stefan Berger <stefanb@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
- * Access to the eventlog created by a system's firmware / BIOS
+ * Access to the event log created by a system's firmware / BIOS
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -72,7 +73,8 @@ static const char* tcpa_pc_event_id_strings[] = {
static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t i;
- struct tpm_bios_log *log = m->private;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
void *addr = log->bios_event_log;
void *limit = log->bios_event_log_end;
struct tcpa_event *event;
@@ -119,7 +121,8 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
loff_t *pos)
{
struct tcpa_event *event = v;
- struct tpm_bios_log *log = m->private;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
void *limit = log->bios_event_log_end;
u32 converted_event_size;
u32 converted_event_type;
@@ -260,13 +263,10 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
static int tpm_bios_measurements_release(struct inode *inode,
struct file *file)
{
- struct seq_file *seq = file->private_data;
- struct tpm_bios_log *log = seq->private;
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct tpm_chip *chip = (struct tpm_chip *)seq->private;
- if (log) {
- kfree(log->bios_event_log);
- kfree(log);
- }
+ put_device(&chip->dev);
return seq_release(inode, file);
}
@@ -304,151 +304,159 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static const struct seq_operations tpm_ascii_b_measurments_seqops = {
+static const struct seq_operations tpm_ascii_b_measurements_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_ascii_bios_measurements_show,
};
-static const struct seq_operations tpm_binary_b_measurments_seqops = {
+static const struct seq_operations tpm_binary_b_measurements_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_binary_bios_measurements_show,
};
-static int tpm_ascii_bios_measurements_open(struct inode *inode,
+static int tpm_bios_measurements_open(struct inode *inode,
struct file *file)
{
int err;
- struct tpm_bios_log *log;
struct seq_file *seq;
-
- log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
- if (!log)
- return -ENOMEM;
-
- if ((err = read_log(log)))
- goto out_free;
+ struct tpm_chip_seqops *chip_seqops;
+ const struct seq_operations *seqops;
+ struct tpm_chip *chip;
+
+ inode_lock(inode);
+ if (!inode->i_private) {
+ inode_unlock(inode);
+ return -ENODEV;
+ }
+ chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
+ seqops = chip_seqops->seqops;
+ chip = chip_seqops->chip;
+ get_device(&chip->dev);
+ inode_unlock(inode);
/* now register seq file */
- err = seq_open(file, &tpm_ascii_b_measurments_seqops);
+ err = seq_open(file, seqops);
if (!err) {
seq = file->private_data;
- seq->private = log;
- } else {
- goto out_free;
+ seq->private = chip;
}
-out:
return err;
-out_free:
- kfree(log->bios_event_log);
- kfree(log);
- goto out;
}
-static const struct file_operations tpm_ascii_bios_measurements_ops = {
- .open = tpm_ascii_bios_measurements_open,
+static const struct file_operations tpm_bios_measurements_ops = {
+ .owner = THIS_MODULE,
+ .open = tpm_bios_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
.release = tpm_bios_measurements_release,
};
-static int tpm_binary_bios_measurements_open(struct inode *inode,
- struct file *file)
+static int tpm_read_log(struct tpm_chip *chip)
{
- int err;
- struct tpm_bios_log *log;
- struct seq_file *seq;
-
- log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
- if (!log)
- return -ENOMEM;
+ int rc;
- if ((err = read_log(log)))
- goto out_free;
-
- /* now register seq file */
- err = seq_open(file, &tpm_binary_b_measurments_seqops);
- if (!err) {
- seq = file->private_data;
- seq->private = log;
- } else {
- goto out_free;
+ if (chip->log.bios_event_log != NULL) {
+ dev_dbg(&chip->dev,
+ "%s: ERROR - event log already initialized\n",
+ __func__);
+ return -EFAULT;
}
-out:
- return err;
-out_free:
- kfree(log->bios_event_log);
- kfree(log);
- goto out;
-}
-
-static const struct file_operations tpm_binary_bios_measurements_ops = {
- .open = tpm_binary_bios_measurements_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tpm_bios_measurements_release,
-};
+ rc = tpm_read_log_acpi(chip);
+ if (rc != -ENODEV)
+ return rc;
-static int is_bad(void *p)
-{
- if (!p)
- return 1;
- if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
- return 1;
- return 0;
+ return tpm_read_log_of(chip);
}
-struct dentry **tpm_bios_log_setup(const char *name)
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ *
+ * Returns -ENODEV if the firmware has no event log or securityfs is not
+ * supported.
+ */
+int tpm_bios_log_setup(struct tpm_chip *chip)
{
- struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
-
- tpm_dir = securityfs_create_dir(name, NULL);
- if (is_bad(tpm_dir))
- goto out;
-
- bin_file =
+ const char *name = dev_name(&chip->dev);
+ unsigned int cnt;
+ int rc = 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return 0;
+
+ rc = tpm_read_log(chip);
+ if (rc)
+ return rc;
+
+ cnt = 0;
+ chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+ * compiled out. The caller should ignore the ENODEV return code.
+ */
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->bin_log_seqops.chip = chip;
+ chip->bin_log_seqops.seqops = &tpm_binary_b_measurements_seqops;
+
+ chip->bios_dir[cnt] =
securityfs_create_file("binary_bios_measurements",
- S_IRUSR | S_IRGRP, tpm_dir, NULL,
- &tpm_binary_bios_measurements_ops);
- if (is_bad(bin_file))
- goto out_tpm;
+ 0440, chip->bios_dir[0],
+ (void *)&chip->bin_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->ascii_log_seqops.chip = chip;
+ chip->ascii_log_seqops.seqops = &tpm_ascii_b_measurements_seqops;
- ascii_file =
+ chip->bios_dir[cnt] =
securityfs_create_file("ascii_bios_measurements",
- S_IRUSR | S_IRGRP, tpm_dir, NULL,
- &tpm_ascii_bios_measurements_ops);
- if (is_bad(ascii_file))
- goto out_bin;
-
- ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
- if (!ret)
- goto out_ascii;
-
- ret[0] = ascii_file;
- ret[1] = bin_file;
- ret[2] = tpm_dir;
-
- return ret;
-
-out_ascii:
- securityfs_remove(ascii_file);
-out_bin:
- securityfs_remove(bin_file);
-out_tpm:
- securityfs_remove(tpm_dir);
-out:
- return NULL;
+ 0440, chip->bios_dir[0],
+ (void *)&chip->ascii_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ return 0;
+
+err:
+ rc = PTR_ERR(chip->bios_dir[cnt]);
+ chip->bios_dir[cnt] = NULL;
+ tpm_bios_log_teardown(chip);
+ return rc;
}
-void tpm_bios_log_teardown(struct dentry **lst)
+void tpm_bios_log_teardown(struct tpm_chip *chip)
{
int i;
-
- for (i = 0; i < 3; i++)
- securityfs_remove(lst[i]);
+ struct inode *inode;
+
+ /* securityfs_remove currently doesn't take care of handling sync
+ * between removal and opening of pseudo files. To handle this, a
+ * workaround is added by making i_private = NULL here during removal
+ * and to check it during open(), both within inode_lock()/unlock().
+ * This design ensures that open() either safely gets kref or fails.
+ */
+ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+ if (chip->bios_dir[i]) {
+ inode = d_inode(chip->bios_dir[i]);
+ inode_lock(inode);
+ inode->i_private = NULL;
+ inode_unlock(inode);
+ securityfs_remove(chip->bios_dir[i]);
+ }
+ }
}
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
index 8de62b09be51..1660d74ea79a 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/drivers/char/tpm/tpm_eventlog.h
@@ -73,20 +73,24 @@ enum tcpa_pc_event_ids {
HOST_TABLE_OF_DEVICES,
};
-int read_log(struct tpm_bios_log *log);
-
-#if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \
- defined(CONFIG_ACPI)
-extern struct dentry **tpm_bios_log_setup(const char *);
-extern void tpm_bios_log_teardown(struct dentry **);
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
#else
-static inline struct dentry **tpm_bios_log_setup(const char *name)
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
{
- return NULL;
+ return -ENODEV;
}
-static inline void tpm_bios_log_teardown(struct dentry **dir)
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
{
+ return -ENODEV;
}
#endif
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
+
#endif
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
index 570f30c5c5f4..7dee42d7b5e0 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_of.c
@@ -2,6 +2,7 @@
* Copyright 2012 IBM Corporation
*
* Author: Ashley Lai <ashleydlai@gmail.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
@@ -20,55 +21,38 @@
#include "tpm.h"
#include "tpm_eventlog.h"
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_of(struct tpm_chip *chip)
{
struct device_node *np;
const u32 *sizep;
const u64 *basep;
+ struct tpm_bios_log *log;
- if (log->bios_event_log != NULL) {
- pr_err("%s: ERROR - Eventlog already initialized\n", __func__);
- return -EFAULT;
- }
-
- np = of_find_node_by_name(NULL, "vtpm");
- if (!np) {
- pr_err("%s: ERROR - IBMVTPM not supported\n", __func__);
+ log = &chip->log;
+ if (chip->dev.parent && chip->dev.parent->of_node)
+ np = chip->dev.parent->of_node;
+ else
return -ENODEV;
- }
sizep = of_get_property(np, "linux,sml-size", NULL);
- if (sizep == NULL) {
- pr_err("%s: ERROR - SML size not found\n", __func__);
- goto cleanup_eio;
- }
- if (*sizep == 0) {
- pr_err("%s: ERROR - event log area empty\n", __func__);
- goto cleanup_eio;
- }
-
basep = of_get_property(np, "linux,sml-base", NULL);
- if (basep == NULL) {
- pr_err("%s: ERROR - SML not found\n", __func__);
- goto cleanup_eio;
+ if (sizep == NULL && basep == NULL)
+ return -ENODEV;
+ if (sizep == NULL || basep == NULL)
+ return -EIO;
+
+ if (*sizep == 0) {
+ dev_warn(&chip->dev, "%s: Event log area empty\n", __func__);
+ return -EIO;
}
log->bios_event_log = kmalloc(*sizep, GFP_KERNEL);
- if (!log->bios_event_log) {
- pr_err("%s: ERROR - Not enough memory for BIOS measurements\n",
- __func__);
- of_node_put(np);
+ if (!log->bios_event_log)
return -ENOMEM;
- }
log->bios_event_log_end = log->bios_event_log + *sizep;
memcpy(log->bios_event_log, __va(*basep), *sizep);
- of_node_put(np);
return 0;
-
-cleanup_eio:
- of_node_put(np);
- return -EIO;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index eaf5730d79eb..0127af130cb1 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -28,6 +28,8 @@
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -354,12 +356,21 @@ static int tpm_tis_plat_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id tis_of_platform_match[] = {
+ {.compatible = "tcg,tpm-tis-mmio"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tis_of_platform_match);
+#endif
+
static struct platform_driver tis_drv = {
.probe = tpm_tis_plat_probe,
.remove = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(tis_of_platform_match),
},
};
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index e3bf31b37138..7993678954a2 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -180,12 +180,19 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int size = 0, burstcnt, rc;
- while (size < count &&
- wait_for_tpm_stat(chip,
+ while (size < count) {
+ rc = wait_for_tpm_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
chip->timeout_c,
- &priv->read_queue, true) == 0) {
- burstcnt = min_t(int, get_burstcount(chip), count - size);
+ &priv->read_queue, true);
+ if (rc < 0)
+ return rc;
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ return burstcnt;
+ }
+ burstcnt = min_t(int, burstcnt, count - size);
rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + size);
@@ -229,8 +236,11 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ size = -ETIME;
+ goto out;
+ }
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(&chip->dev, "Error left over data\n");
@@ -271,7 +281,13 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
}
while (count < len - 1) {
- burstcnt = min_t(int, get_burstcount(chip), len - count - 1);
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ rc = burstcnt;
+ goto out_err;
+ }
+ burstcnt = min_t(int, burstcnt, len - count - 1);
rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + count);
if (rc < 0)
@@ -279,8 +295,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
count += burstcnt;
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
@@ -293,8 +312,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
if (rc < 0)
goto out_err;
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
@@ -755,20 +777,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
- /* Very early on issue a command to the TPM in polling mode to make
- * sure it works. May as well use that command to set the proper
- * timeouts for the driver.
- */
- if (tpm_get_timeouts(chip)) {
- dev_err(dev, "Could not get TPM timeouts and durations\n");
- rc = -ENODEV;
- goto out_err;
- }
-
/* INTERRUPT Setup */
init_waitqueue_head(&priv->read_queue);
init_waitqueue_head(&priv->int_queue);
if (irq != -1) {
+ /* Before doing irq testing issue a command to the TPM in polling mode
+ * to make sure it works. May as well use that command to set the
+ * proper timeouts for the driver.
+ */
+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 9a940332c157..5463b58af26e 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2015, 2016 IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
*
* Author: Stefan Berger <stefanb@us.ibm.com>
*
@@ -41,6 +42,7 @@ struct proxy_dev {
long state; /* internal state */
#define STATE_OPENED_FLAG BIT(0)
#define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */
+#define STATE_REGISTERED_FLAG BIT(2)
size_t req_len; /* length of queued TPM request */
size_t resp_len; /* length of queued TPM response */
@@ -369,12 +371,9 @@ static void vtpm_proxy_work(struct work_struct *work)
rc = tpm_chip_register(proxy_dev->chip);
if (rc)
- goto err;
-
- return;
-
-err:
- vtpm_proxy_fops_undo_open(proxy_dev);
+ vtpm_proxy_fops_undo_open(proxy_dev);
+ else
+ proxy_dev->state |= STATE_REGISTERED_FLAG;
}
/*
@@ -515,7 +514,8 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
*/
vtpm_proxy_fops_undo_open(proxy_dev);
- tpm_chip_unregister(proxy_dev->chip);
+ if (proxy_dev->state & STATE_REGISTERED_FLAG)
+ tpm_chip_unregister(proxy_dev->chip);
vtpm_proxy_delete_proxy_dev(proxy_dev);
}
@@ -524,6 +524,50 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
* Code related to the control device /dev/vtpmx
*/
+/**
+ * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @file: /dev/vtpmx
+ * @ioctl: the ioctl number
+ * @arg: pointer to the struct vtpmx_proxy_new_dev
+ *
+ * Creates an anonymous file that is used by the process acting as a TPM to
+ * communicate with the client processes. The function will also add a new TPM
+ * device through which data is proxied to this TPM acting process. The caller
+ * will be provided with a file descriptor to communicate with the clients and
+ * major and minor numbers for the TPM device.
+ */
+static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
+ struct vtpm_proxy_new_dev vtpm_new_dev;
+ struct file *vtpm_file;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ vtpm_new_dev_p = argp;
+
+ if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
+ sizeof(vtpm_new_dev)))
+ return -EFAULT;
+
+ vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev);
+ if (IS_ERR(vtpm_file))
+ return PTR_ERR(vtpm_file);
+
+ if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
+ sizeof(vtpm_new_dev))) {
+ put_unused_fd(vtpm_new_dev.fd);
+ fput(vtpm_file);
+ return -EFAULT;
+ }
+
+ fd_install(vtpm_new_dev.fd, vtpm_file);
+ return 0;
+}
+
/*
* vtpmx_fops_ioctl: ioctl on /dev/vtpmx
*
@@ -531,34 +575,11 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
* Returns 0 on success, a negative error code otherwise.
*/
static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
+ unsigned long arg)
{
- void __user *argp = (void __user *)arg;
- struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
- struct vtpm_proxy_new_dev vtpm_new_dev;
- struct file *file;
-
switch (ioctl) {
case VTPM_PROXY_IOC_NEW_DEV:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- vtpm_new_dev_p = argp;
- if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
- sizeof(vtpm_new_dev)))
- return -EFAULT;
- file = vtpm_proxy_create_device(&vtpm_new_dev);
- if (IS_ERR(file))
- return PTR_ERR(file);
- if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
- sizeof(vtpm_new_dev))) {
- put_unused_fd(vtpm_new_dev.fd);
- fput(file);
- return -EFAULT;
- }
-
- fd_install(vtpm_new_dev.fd, file);
- return 0;
-
+ return vtpmx_ioc_new_dev(f, ioctl, arg);
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 62028f483bba..5aaa268f3a78 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -307,7 +307,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
rv = setup_ring(dev, priv);
if (rv) {
chip = dev_get_drvdata(&dev->dev);
- tpm_chip_unregister(chip);
ring_free(priv);
return rv;
}
@@ -337,18 +336,14 @@ static int tpmfront_resume(struct xenbus_device *dev)
static void backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
- int val;
-
switch (backend_state) {
case XenbusStateInitialised:
case XenbusStateConnected:
if (dev->state == XenbusStateConnected)
break;
- if (xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-protocol-v2", "%d", &val) < 0)
- val = 0;
- if (!val) {
+ if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
+ 0)) {
xenbus_dev_fatal(dev, -EINVAL,
"vTPM protocol 2 required");
return;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d433b1db1fdd..5649234b7316 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1539,19 +1539,29 @@ static void remove_port_data(struct port *port)
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
+ spin_unlock_irq(&port->inbuf_lock);
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->inbuf_lock);
+ do {
+ spin_lock_irq(&port->inbuf_lock);
+ buf = virtqueue_detach_unused_buf(port->in_vq);
+ spin_unlock_irq(&port->inbuf_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
/* Free pending buffers from the out-queue. */
- while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->outvq_lock);
+ do {
+ spin_lock_irq(&port->outvq_lock);
+ buf = virtqueue_detach_unused_buf(port->out_vq);
+ spin_unlock_irq(&port->outvq_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
}
/*
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 6a8ac04bedeb..56c1998ced3e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -33,7 +33,7 @@ source "drivers/clk/versatile/Kconfig"
config COMMON_CLK_MAX77686
tristate "Clock driver for Maxim 77620/77686/77802 MFD"
- depends on MFD_MAX77686 || MFD_MAX77620
+ depends on MFD_MAX77686 || MFD_MAX77620 || COMPILE_TEST
---help---
This driver supports Maxim 77620/77686/77802 crystal oscillator
clock.
@@ -119,7 +119,7 @@ config COMMON_CLK_CS2000_CP
config COMMON_CLK_S2MPS11
tristate "Clock driver for S2MPS1X/S5M8767 MFD"
- depends on MFD_SEC_CORE
+ depends on MFD_SEC_CORE || COMPILE_TEST
---help---
This driver supports S2MPS11/S2MPS14/S5M8767 crystal oscillator
clock. These multi-function devices have two (S2MPS14) or three
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index f21e9b7afd1a..b5ae5311b0a2 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -1,7 +1,6 @@
config CLK_BCM_63XX
bool "Broadcom BCM63xx clock support"
depends on ARCH_BCM_63XX || COMPILE_TEST
- depends on COMMON_CLK
select COMMON_CLK_IPROC
default ARCH_BCM_63XX
help
@@ -11,27 +10,22 @@ config CLK_BCM_63XX
config CLK_BCM_KONA
bool "Broadcom Kona CCU clock support"
depends on ARCH_BCM_MOBILE || COMPILE_TEST
- depends on COMMON_CLK
- default y
+ default ARCH_BCM_MOBILE
help
Enable common clock framework support for Broadcom SoCs
using "Kona" style clock control units, including those
in the BCM281xx and BCM21664 families.
config COMMON_CLK_IPROC
- bool "Broadcom iProc clock support"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on COMMON_CLK
- default ARCH_BCM_IPROC
+ bool
help
Enable common clock framework support for Broadcom SoCs
based on the iProc architecture
-if COMMON_CLK_IPROC
-
config CLK_BCM_CYGNUS
bool "Broadcom Cygnus clock support"
depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ select COMMON_CLK_IPROC
default ARCH_BCM_CYGNUS
help
Enable common clock framework support for the Broadcom Cygnus SoC
@@ -39,6 +33,7 @@ config CLK_BCM_CYGNUS
config CLK_BCM_NSP
bool "Broadcom Northstar/Northstar Plus clock support"
depends on ARCH_BCM_5301X || ARCH_BCM_NSP || COMPILE_TEST
+ select COMMON_CLK_IPROC
default ARCH_BCM_5301X || ARCH_BCM_NSP
help
Enable common clock framework support for the Broadcom Northstar and
@@ -47,8 +42,7 @@ config CLK_BCM_NSP
config CLK_BCM_NS2
bool "Broadcom Northstar 2 clock support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
+ select COMMON_CLK_IPROC
default ARCH_BCM_IPROC
help
Enable common clock framework support for the Broadcom Northstar 2 SoC
-
-endif
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 8c7763fd9efc..0d14409097e7 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -436,6 +436,9 @@ struct bcm2835_clock_data {
const char *const *parents;
int num_mux_parents;
+ /* Bitmap encoding which parents accept rate change propagation. */
+ unsigned int set_rate_parent;
+
u32 ctl_reg;
u32 div_reg;
@@ -751,7 +754,9 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
cprman_write(cprman, data->cm_reg,
(cprman_read(cprman, data->cm_reg) &
~data->load_mask) | data->hold_mask);
- cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
+ cprman_write(cprman, data->a2w_reg,
+ cprman_read(cprman, data->a2w_reg) |
+ A2W_PLL_CHANNEL_DISABLE);
spin_unlock(&cprman->regs_lock);
}
@@ -1015,10 +1020,60 @@ bcm2835_clk_is_pllc(struct clk_hw *hw)
return strncmp(clk_hw_get_name(hw), "pllc", 4) == 0;
}
+static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw,
+ int parent_idx,
+ unsigned long rate,
+ u32 *div,
+ unsigned long *prate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ unsigned long best_rate = 0;
+ u32 curdiv, mindiv, maxdiv;
+ struct clk_hw *parent;
+
+ parent = clk_hw_get_parent_by_index(hw, parent_idx);
+
+ if (!(BIT(parent_idx) & data->set_rate_parent)) {
+ *prate = clk_hw_get_rate(parent);
+ *div = bcm2835_clock_choose_div(hw, rate, *prate, true);
+
+ return bcm2835_clock_rate_from_divisor(clock, *prate,
+ *div);
+ }
+
+ if (data->frac_bits)
+ dev_warn(cprman->dev,
+ "frac bits are not used when propagating rate change");
+
+ /* clamp to min divider of 2 if we're dealing with a mash clock */
+ mindiv = data->is_mash_clock ? 2 : 1;
+ maxdiv = BIT(data->int_bits) - 1;
+
+ /* TODO: Be smart, and only test a subset of the available divisors. */
+ for (curdiv = mindiv; curdiv <= maxdiv; curdiv++) {
+ unsigned long tmp_rate;
+
+ tmp_rate = clk_hw_round_rate(parent, rate * curdiv);
+ tmp_rate /= curdiv;
+ if (curdiv == mindiv ||
+ (tmp_rate > best_rate && tmp_rate <= rate))
+ best_rate = tmp_rate;
+
+ if (best_rate == rate)
+ break;
+ }
+
+ *div = curdiv << CM_DIV_FRAC_BITS;
+ *prate = curdiv * best_rate;
+
+ return best_rate;
+}
+
static int bcm2835_clock_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct clk_hw *parent, *best_parent = NULL;
bool current_parent_is_pllc;
unsigned long rate, best_rate = 0;
@@ -1046,9 +1101,8 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw,
if (bcm2835_clk_is_pllc(parent) && !current_parent_is_pllc)
continue;
- prate = clk_hw_get_rate(parent);
- div = bcm2835_clock_choose_div(hw, req->rate, prate, true);
- rate = bcm2835_clock_rate_from_divisor(clock, prate, div);
+ rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate,
+ &div, &prate);
if (rate > best_rate && rate <= req->rate) {
best_parent = parent;
best_prate = prate;
@@ -1260,6 +1314,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
init.name = data->name;
init.flags = data->flags | CLK_IGNORE_UNUSED;
+ /*
+ * Pass the CLK_SET_RATE_PARENT flag if we are allowed to propagate
+ * rate changes on at least of the parents.
+ */
+ if (data->set_rate_parent)
+ init.flags |= CLK_SET_RATE_PARENT;
+
if (data->is_vpu_clock) {
init.ops = &bcm2835_vpu_clock_clk_ops;
} else {
@@ -1596,7 +1657,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.a2w_reg = A2W_PLLH_AUX,
.load_mask = CM_PLLH_LOADAUX,
.hold_mask = 0,
- .fixed_divider = 10),
+ .fixed_divider = 1),
[BCM2835_PLLH_PIX] = REGISTER_PLL_DIV(
.name = "pllh_pix",
.source_pll = "pllh",
@@ -1800,7 +1861,12 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.ctl_reg = CM_VECCTL,
.div_reg = CM_VECDIV,
.int_bits = 4,
- .frac_bits = 0),
+ .frac_bits = 0,
+ /*
+ * Allow rate change propagation only on PLLH_AUX which is
+ * assigned index 7 in the parent array.
+ */
+ .set_rate_parent = BIT(7)),
/* dsi clocks */
[BCM2835_CLOCK_DSI0E] = REGISTER_PER_CLK(
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index edf3b96b3b73..1d99292e2039 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
}
/* register clk-provider */
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 0718e831475f..3b784b593afd 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
}
/* register clk-provider */
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index b8459c14a1b7..f793b2d9238c 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -216,7 +216,7 @@ static int cdce925_pll_prepare(struct clk_hw *hw)
nn = n * BIT(p);
/* q = int(nn/m) */
q = nn / m;
- if ((q < 16) || (1 > 64)) {
+ if ((q < 16) || (q > 63)) {
pr_debug("%s invalid q=%d\n", __func__, q);
return -EINVAL;
}
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 8f571548870f..3a218c3a06ae 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -53,3 +53,24 @@ void devm_clk_put(struct device *dev, struct clk *clk)
WARN_ON(ret);
}
EXPORT_SYMBOL(devm_clk_put);
+
+struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id)
+{
+ struct clk **ptr, *clk;
+
+ ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ clk = of_clk_get_by_name(np, con_id);
+ if (!IS_ERR(clk)) {
+ *ptr = clk;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return clk;
+}
+EXPORT_SYMBOL(devm_get_clk_from_child);
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index 8802a2dd56ac..f674778fb3ac 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np)
hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
}
CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 4e691e35483a..4e0c054a787c 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -145,8 +145,8 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
init.name = name;
init.ops = &clk_gate_ops;
init.flags = flags | CLK_IS_BASIC;
- init.parent_names = (parent_name ? &parent_name: NULL);
- init.num_parents = (parent_name ? 1 : 0);
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
/* struct clk_gate assignments */
gate->reg = reg;
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
index 47649ac5d399..e51e0023fc6e 100644
--- a/drivers/clk/clk-oxnas.c
+++ b/drivers/clk/clk-oxnas.c
@@ -20,31 +20,43 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/stringify.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <dt-bindings/clock/oxsemi,ox810se.h>
+#include <dt-bindings/clock/oxsemi,ox820.h>
+
/* Standard regmap gate clocks */
-struct clk_oxnas {
+struct clk_oxnas_gate {
struct clk_hw hw;
- signed char bit;
+ unsigned int bit;
struct regmap *regmap;
};
+struct oxnas_stdclk_data {
+ struct clk_hw_onecell_data *onecell_data;
+ struct clk_oxnas_gate **gates;
+ unsigned int ngates;
+ struct clk_oxnas_pll **plls;
+ unsigned int nplls;
+};
+
/* Regmap offsets */
#define CLK_STAT_REGOFFSET 0x24
#define CLK_SET_REGOFFSET 0x2c
#define CLK_CLR_REGOFFSET 0x30
-static inline struct clk_oxnas *to_clk_oxnas(struct clk_hw *hw)
+static inline struct clk_oxnas_gate *to_clk_oxnas_gate(struct clk_hw *hw)
{
- return container_of(hw, struct clk_oxnas, hw);
+ return container_of(hw, struct clk_oxnas_gate, hw);
}
-static int oxnas_clk_is_enabled(struct clk_hw *hw)
+static int oxnas_clk_gate_is_enabled(struct clk_hw *hw)
{
- struct clk_oxnas *std = to_clk_oxnas(hw);
+ struct clk_oxnas_gate *std = to_clk_oxnas_gate(hw);
int ret;
unsigned int val;
@@ -55,29 +67,29 @@ static int oxnas_clk_is_enabled(struct clk_hw *hw)
return val & BIT(std->bit);
}
-static int oxnas_clk_enable(struct clk_hw *hw)
+static int oxnas_clk_gate_enable(struct clk_hw *hw)
{
- struct clk_oxnas *std = to_clk_oxnas(hw);
+ struct clk_oxnas_gate *std = to_clk_oxnas_gate(hw);
regmap_write(std->regmap, CLK_SET_REGOFFSET, BIT(std->bit));
return 0;
}
-static void oxnas_clk_disable(struct clk_hw *hw)
+static void oxnas_clk_gate_disable(struct clk_hw *hw)
{
- struct clk_oxnas *std = to_clk_oxnas(hw);
+ struct clk_oxnas_gate *std = to_clk_oxnas_gate(hw);
regmap_write(std->regmap, CLK_CLR_REGOFFSET, BIT(std->bit));
}
-static const struct clk_ops oxnas_clk_ops = {
- .enable = oxnas_clk_enable,
- .disable = oxnas_clk_disable,
- .is_enabled = oxnas_clk_is_enabled,
+static const struct clk_ops oxnas_clk_gate_ops = {
+ .enable = oxnas_clk_gate_enable,
+ .disable = oxnas_clk_gate_disable,
+ .is_enabled = oxnas_clk_gate_is_enabled,
};
-static const char *const oxnas_clk_parents[] = {
+static const char *const osc_parents[] = {
"oscillator",
};
@@ -85,63 +97,138 @@ static const char *const eth_parents[] = {
"gmacclk",
};
-#define DECLARE_STD_CLKP(__clk, __parent) \
-static const struct clk_init_data clk_##__clk##_init = { \
- .name = __stringify(__clk), \
- .ops = &oxnas_clk_ops, \
- .parent_names = __parent, \
- .num_parents = ARRAY_SIZE(__parent), \
+#define OXNAS_GATE(_name, _bit, _parents) \
+struct clk_oxnas_gate _name = { \
+ .bit = (_bit), \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &oxnas_clk_gate_ops, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
+ }, \
}
-#define DECLARE_STD_CLK(__clk) DECLARE_STD_CLKP(__clk, oxnas_clk_parents)
+static OXNAS_GATE(ox810se_leon, 0, osc_parents);
+static OXNAS_GATE(ox810se_dma_sgdma, 1, osc_parents);
+static OXNAS_GATE(ox810se_cipher, 2, osc_parents);
+static OXNAS_GATE(ox810se_sata, 4, osc_parents);
+static OXNAS_GATE(ox810se_audio, 5, osc_parents);
+static OXNAS_GATE(ox810se_usbmph, 6, osc_parents);
+static OXNAS_GATE(ox810se_etha, 7, eth_parents);
+static OXNAS_GATE(ox810se_pciea, 8, osc_parents);
+static OXNAS_GATE(ox810se_nand, 9, osc_parents);
+
+static struct clk_oxnas_gate *ox810se_gates[] = {
+ &ox810se_leon,
+ &ox810se_dma_sgdma,
+ &ox810se_cipher,
+ &ox810se_sata,
+ &ox810se_audio,
+ &ox810se_usbmph,
+ &ox810se_etha,
+ &ox810se_pciea,
+ &ox810se_nand,
+};
+
+static OXNAS_GATE(ox820_leon, 0, osc_parents);
+static OXNAS_GATE(ox820_dma_sgdma, 1, osc_parents);
+static OXNAS_GATE(ox820_cipher, 2, osc_parents);
+static OXNAS_GATE(ox820_sd, 3, osc_parents);
+static OXNAS_GATE(ox820_sata, 4, osc_parents);
+static OXNAS_GATE(ox820_audio, 5, osc_parents);
+static OXNAS_GATE(ox820_usbmph, 6, osc_parents);
+static OXNAS_GATE(ox820_etha, 7, eth_parents);
+static OXNAS_GATE(ox820_pciea, 8, osc_parents);
+static OXNAS_GATE(ox820_nand, 9, osc_parents);
+static OXNAS_GATE(ox820_ethb, 10, eth_parents);
+static OXNAS_GATE(ox820_pcieb, 11, osc_parents);
+static OXNAS_GATE(ox820_ref600, 12, osc_parents);
+static OXNAS_GATE(ox820_usbdev, 13, osc_parents);
+
+static struct clk_oxnas_gate *ox820_gates[] = {
+ &ox820_leon,
+ &ox820_dma_sgdma,
+ &ox820_cipher,
+ &ox820_sd,
+ &ox820_sata,
+ &ox820_audio,
+ &ox820_usbmph,
+ &ox820_etha,
+ &ox820_pciea,
+ &ox820_nand,
+ &ox820_etha,
+ &ox820_pciea,
+ &ox820_ref600,
+ &ox820_usbdev,
+};
+
+static struct clk_hw_onecell_data ox810se_hw_onecell_data = {
+ .hws = {
+ [CLK_810_LEON] = &ox810se_leon.hw,
+ [CLK_810_DMA_SGDMA] = &ox810se_dma_sgdma.hw,
+ [CLK_810_CIPHER] = &ox810se_cipher.hw,
+ [CLK_810_SATA] = &ox810se_sata.hw,
+ [CLK_810_AUDIO] = &ox810se_audio.hw,
+ [CLK_810_USBMPH] = &ox810se_usbmph.hw,
+ [CLK_810_ETHA] = &ox810se_etha.hw,
+ [CLK_810_PCIEA] = &ox810se_pciea.hw,
+ [CLK_810_NAND] = &ox810se_nand.hw,
+ },
+ .num = ARRAY_SIZE(ox810se_gates),
+};
+
+static struct clk_hw_onecell_data ox820_hw_onecell_data = {
+ .hws = {
+ [CLK_820_LEON] = &ox820_leon.hw,
+ [CLK_820_DMA_SGDMA] = &ox820_dma_sgdma.hw,
+ [CLK_820_CIPHER] = &ox820_cipher.hw,
+ [CLK_820_SD] = &ox820_sd.hw,
+ [CLK_820_SATA] = &ox820_sata.hw,
+ [CLK_820_AUDIO] = &ox820_audio.hw,
+ [CLK_820_USBMPH] = &ox820_usbmph.hw,
+ [CLK_820_ETHA] = &ox820_etha.hw,
+ [CLK_820_PCIEA] = &ox820_pciea.hw,
+ [CLK_820_NAND] = &ox820_nand.hw,
+ [CLK_820_ETHB] = &ox820_ethb.hw,
+ [CLK_820_PCIEB] = &ox820_pcieb.hw,
+ [CLK_820_REF600] = &ox820_ref600.hw,
+ [CLK_820_USBDEV] = &ox820_usbdev.hw,
+ },
+ .num = ARRAY_SIZE(ox820_gates),
+};
-/* Hardware Bit - Clock association */
-struct clk_oxnas_init_data {
- unsigned long bit;
- const struct clk_init_data *clk_init;
+static struct oxnas_stdclk_data ox810se_stdclk_data = {
+ .onecell_data = &ox810se_hw_onecell_data,
+ .gates = ox810se_gates,
+ .ngates = ARRAY_SIZE(ox810se_gates),
};
-/* Clk init data declaration */
-DECLARE_STD_CLK(leon);
-DECLARE_STD_CLK(dma_sgdma);
-DECLARE_STD_CLK(cipher);
-DECLARE_STD_CLK(sata);
-DECLARE_STD_CLK(audio);
-DECLARE_STD_CLK(usbmph);
-DECLARE_STD_CLKP(etha, eth_parents);
-DECLARE_STD_CLK(pciea);
-DECLARE_STD_CLK(nand);
-
-/* Table index is clock indice */
-static const struct clk_oxnas_init_data clk_oxnas_init[] = {
- [0] = {0, &clk_leon_init},
- [1] = {1, &clk_dma_sgdma_init},
- [2] = {2, &clk_cipher_init},
- /* Skip & Do not touch to DDR clock */
- [3] = {4, &clk_sata_init},
- [4] = {5, &clk_audio_init},
- [5] = {6, &clk_usbmph_init},
- [6] = {7, &clk_etha_init},
- [7] = {8, &clk_pciea_init},
- [8] = {9, &clk_nand_init},
+static struct oxnas_stdclk_data ox820_stdclk_data = {
+ .onecell_data = &ox820_hw_onecell_data,
+ .gates = ox820_gates,
+ .ngates = ARRAY_SIZE(ox820_gates),
};
-struct clk_oxnas_data {
- struct clk_oxnas clk_oxnas[ARRAY_SIZE(clk_oxnas_init)];
- struct clk_onecell_data onecell_data[ARRAY_SIZE(clk_oxnas_init)];
- struct clk *clks[ARRAY_SIZE(clk_oxnas_init)];
+static const struct of_device_id oxnas_stdclk_dt_ids[] = {
+ { .compatible = "oxsemi,ox810se-stdclk", &ox810se_stdclk_data },
+ { .compatible = "oxsemi,ox820-stdclk", &ox820_stdclk_data },
+ { }
};
static int oxnas_stdclk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct clk_oxnas_data *clk_oxnas;
+ const struct oxnas_stdclk_data *data;
+ const struct of_device_id *id;
struct regmap *regmap;
+ int ret;
int i;
- clk_oxnas = devm_kzalloc(&pdev->dev, sizeof(*clk_oxnas), GFP_KERNEL);
- if (!clk_oxnas)
- return -ENOMEM;
+ id = of_match_device(oxnas_stdclk_dt_ids, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+ data = id->data;
regmap = syscon_node_to_regmap(of_get_parent(np));
if (IS_ERR(regmap)) {
@@ -149,32 +236,23 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
- for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {
- struct clk_oxnas *_clk;
+ for (i = 0 ; i < data->ngates ; ++i)
+ data->gates[i]->regmap = regmap;
- _clk = &clk_oxnas->clk_oxnas[i];
- _clk->bit = clk_oxnas_init[i].bit;
- _clk->hw.init = clk_oxnas_init[i].clk_init;
- _clk->regmap = regmap;
+ for (i = 0; i < data->onecell_data->num; i++) {
+ if (!data->onecell_data->hws[i])
+ continue;
- clk_oxnas->clks[i] =
- devm_clk_register(&pdev->dev, &_clk->hw);
- if (WARN_ON(IS_ERR(clk_oxnas->clks[i])))
- return PTR_ERR(clk_oxnas->clks[i]);
+ ret = devm_clk_hw_register(&pdev->dev,
+ data->onecell_data->hws[i]);
+ if (ret)
+ return ret;
}
- clk_oxnas->onecell_data->clks = clk_oxnas->clks;
- clk_oxnas->onecell_data->clk_num = ARRAY_SIZE(clk_oxnas_init);
-
- return of_clk_add_provider(np, of_clk_src_onecell_get,
- clk_oxnas->onecell_data);
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+ data->onecell_data);
}
-static const struct of_device_id oxnas_stdclk_dt_ids[] = {
- { .compatible = "oxsemi,ox810se-stdclk" },
- { }
-};
-
static struct platform_driver oxnas_stdclk_driver = {
.probe = oxnas_stdclk_probe,
.driver = {
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 20b105584f82..d0bf8b1c67de 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -266,6 +266,39 @@ static const struct clockgen_muxinfo ls1043a_hwa2 = {
},
};
+static const struct clockgen_muxinfo ls1046a_hwa1 = {
+ {
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1046a_hwa2 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1012a_cmux = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ {},
+ [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ }
+};
+
static const struct clockgen_muxinfo t1023_hwa1 = {
{
{},
@@ -489,6 +522,31 @@ static const struct clockgen_chipinfo chipinfo[] = {
.flags = CG_PLL_8BIT,
},
{
+ .compat = "fsl,ls1046a-clockgen",
+ .init_periph = t2080_init_periph,
+ .cmux_groups = {
+ &t1040_cmux
+ },
+ .hwaccel = {
+ &ls1046a_hwa1, &ls1046a_hwa2
+ },
+ .cmux_to_group = {
+ 0, -1
+ },
+ .pll_mask = 0x07,
+ .flags = CG_PLL_8BIT,
+ },
+ {
+ .compat = "fsl,ls1012a-clockgen",
+ .cmux_groups = {
+ &ls1012a_cmux
+ },
+ .cmux_to_group = {
+ 0, -1
+ },
+ .pll_mask = 0x03,
+ },
+ {
.compat = "fsl,ls2080a-clockgen",
.cmux_groups = {
&clockgen2_cmux_cga12, &clockgen2_cmux_cgb
@@ -700,6 +758,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
struct mux_hwclock *hwc,
const struct clk_ops *ops,
unsigned long min_rate,
+ unsigned long max_rate,
unsigned long pct80_rate,
const char *fmt, int idx)
{
@@ -728,6 +787,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
continue;
if (rate < min_rate)
continue;
+ if (rate > max_rate)
+ continue;
parent_names[j] = div->name;
hwc->parent_to_clksel[j] = i;
@@ -759,7 +820,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
struct mux_hwclock *hwc;
const struct clockgen_pll_div *div;
unsigned long plat_rate, min_rate;
- u64 pct80_rate;
+ u64 max_rate, pct80_rate;
u32 clksel;
hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
@@ -787,8 +848,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
return NULL;
}
- pct80_rate = clk_get_rate(div->clk);
- pct80_rate *= 8;
+ max_rate = clk_get_rate(div->clk);
+ pct80_rate = max_rate * 8;
do_div(pct80_rate, 10);
plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
@@ -798,7 +859,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
else
min_rate = plat_rate / 2;
- return create_mux_common(cg, hwc, &cmux_ops, min_rate,
+ return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
pct80_rate, "cg-cmux%d", idx);
}
@@ -813,7 +874,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
hwc->reg = cg->regs + 0x20 * idx + 0x10;
hwc->info = cg->info.hwaccel[idx];
- return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
+ return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
"cg-hwaccel%d", idx);
}
@@ -1270,8 +1331,10 @@ err:
CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
/* Legacy nodes */
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 02d681008401..5eb05dbf59b8 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -19,10 +19,14 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#define STM32F4_RCC_PLLCFGR 0x04
#define STM32F4_RCC_CFGR 0x08
@@ -31,6 +35,8 @@
#define STM32F4_RCC_AHB3ENR 0x38
#define STM32F4_RCC_APB1ENR 0x40
#define STM32F4_RCC_APB2ENR 0x44
+#define STM32F4_RCC_BDCR 0x70
+#define STM32F4_RCC_CSR 0x74
struct stm32f4_gate_data {
u8 offset;
@@ -40,7 +46,7 @@ struct stm32f4_gate_data {
unsigned long flags;
};
-static const struct stm32f4_gate_data stm32f4_gates[] __initconst = {
+static const struct stm32f4_gate_data stm32f429_gates[] __initconst = {
{ STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
{ STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
@@ -120,26 +126,113 @@ static const struct stm32f4_gate_data stm32f4_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
-/*
- * MAX_CLKS is the maximum value in the enumeration below plus the combined
- * hweight of stm32f42xx_gate_map (plus one).
- */
-#define MAX_CLKS 74
+static const struct stm32f4_gate_data stm32f469_gates[] __initconst = {
+ { STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 20, "ccmdatam", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" },
+
+ { STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" },
+ { STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" },
+
+ { STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div",
+ CLK_IGNORE_UNUSED },
+ { STM32F4_RCC_AHB3ENR, 1, "qspi", "ahb_div",
+ CLK_IGNORE_UNUSED },
+
+ { STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 17, "uart2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 18, "uart3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 19, "uart4", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 20, "uart5", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 21, "i2c1", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 22, "i2c2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 23, "i2c3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 30, "uart7", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 31, "uart8", "apb1_div" },
+
+ { STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 4, "usart1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 5, "usart6", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 11, "sdio", "pll48" },
+ { STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
+};
-enum { SYSTICK, FCLK };
+enum { SYSTICK, FCLK, CLK_LSI, CLK_LSE, CLK_HSE_RTC, CLK_RTC, END_PRIMARY_CLK };
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
*/
-static const u64 stm32f42xx_gate_map[] = { 0x000000f17ef417ffull,
- 0x0000000000000001ull,
- 0x04777f33f6fec9ffull };
+#define MAX_GATE_MAP 3
+
+static const u64 stm32f42xx_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
+ 0x0000000000000001ull,
+ 0x04777f33f6fec9ffull };
+
+static const u64 stm32f46xx_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
+ 0x0000000000000003ull,
+ 0x0c777f33f6fec9ffull };
+
+static const u64 *stm32f4_gate_map;
+
+static struct clk_hw **clks;
-static struct clk_hw *clks[MAX_CLKS];
static DEFINE_SPINLOCK(stm32f4_clk_lock);
static void __iomem *base;
+static struct regmap *pdrm;
+
/*
* "Multiplier" device for APBx clocks.
*
@@ -256,15 +349,15 @@ static void stm32f4_rcc_register_pll(const char *hse_clk, const char *hsi_clk)
*/
static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
{
- u64 table[ARRAY_SIZE(stm32f42xx_gate_map)];
+ u64 table[MAX_GATE_MAP];
if (primary == 1) {
- if (WARN_ON(secondary > FCLK))
+ if (WARN_ON(secondary >= END_PRIMARY_CLK))
return -EINVAL;
return secondary;
}
- memcpy(table, stm32f42xx_gate_map, sizeof(table));
+ memcpy(table, stm32f4_gate_map, sizeof(table));
/* only bits set in table can be used as indices */
if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
@@ -276,7 +369,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
table[BIT_ULL_WORD(secondary)] &=
GENMASK_ULL(secondary % BITS_PER_LONG_LONG, 0);
- return FCLK + hweight64(table[0]) +
+ return END_PRIMARY_CLK - 1 + hweight64(table[0]) +
(BIT_ULL_WORD(secondary) >= 1 ? hweight64(table[1]) : 0) +
(BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0);
}
@@ -292,6 +385,212 @@ stm32f4_rcc_lookup_clk(struct of_phandle_args *clkspec, void *data)
return clks[i];
}
+#define to_rgclk(_rgate) container_of(_rgate, struct stm32_rgate, gate)
+
+static inline void disable_power_domain_write_protection(void)
+{
+ if (pdrm)
+ regmap_update_bits(pdrm, 0x00, (1 << 8), (1 << 8));
+}
+
+static inline void enable_power_domain_write_protection(void)
+{
+ if (pdrm)
+ regmap_update_bits(pdrm, 0x00, (1 << 8), (0 << 8));
+}
+
+static inline void sofware_reset_backup_domain(void)
+{
+ unsigned long val;
+
+ val = readl(base + STM32F4_RCC_BDCR);
+ writel(val | BIT(16), base + STM32F4_RCC_BDCR);
+ writel(val & ~BIT(16), base + STM32F4_RCC_BDCR);
+}
+
+struct stm32_rgate {
+ struct clk_gate gate;
+ u8 bit_rdy_idx;
+};
+
+#define RTC_TIMEOUT 1000000
+
+static int rgclk_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_rgate *rgate = to_rgclk(gate);
+ u32 reg;
+ int ret;
+
+ disable_power_domain_write_protection();
+
+ clk_gate_ops.enable(hw);
+
+ ret = readl_relaxed_poll_timeout_atomic(gate->reg, reg,
+ reg & rgate->bit_rdy_idx, 1000, RTC_TIMEOUT);
+
+ enable_power_domain_write_protection();
+ return ret;
+}
+
+static void rgclk_disable(struct clk_hw *hw)
+{
+ clk_gate_ops.disable(hw);
+}
+
+static int rgclk_is_enabled(struct clk_hw *hw)
+{
+ return clk_gate_ops.is_enabled(hw);
+}
+
+static const struct clk_ops rgclk_ops = {
+ .enable = rgclk_enable,
+ .disable = rgclk_disable,
+ .is_enabled = rgclk_is_enabled,
+};
+
+static struct clk_hw *clk_register_rgate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx, u8 bit_rdy_idx,
+ u8 clk_gate_flags, spinlock_t *lock)
+{
+ struct stm32_rgate *rgate;
+ struct clk_init_data init = { NULL };
+ struct clk_hw *hw;
+ int ret;
+
+ rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
+ if (!rgate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &rgclk_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ rgate->bit_rdy_idx = bit_rdy_idx;
+
+ rgate->gate.lock = lock;
+ rgate->gate.reg = reg;
+ rgate->gate.bit_idx = bit_idx;
+ rgate->gate.hw.init = &init;
+
+ hw = &rgate->gate.hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(rgate);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static int cclk_gate_enable(struct clk_hw *hw)
+{
+ int ret;
+
+ disable_power_domain_write_protection();
+
+ ret = clk_gate_ops.enable(hw);
+
+ enable_power_domain_write_protection();
+
+ return ret;
+}
+
+static void cclk_gate_disable(struct clk_hw *hw)
+{
+ disable_power_domain_write_protection();
+
+ clk_gate_ops.disable(hw);
+
+ enable_power_domain_write_protection();
+}
+
+static int cclk_gate_is_enabled(struct clk_hw *hw)
+{
+ return clk_gate_ops.is_enabled(hw);
+}
+
+static const struct clk_ops cclk_gate_ops = {
+ .enable = cclk_gate_enable,
+ .disable = cclk_gate_disable,
+ .is_enabled = cclk_gate_is_enabled,
+};
+
+static u8 cclk_mux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int cclk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ int ret;
+
+ disable_power_domain_write_protection();
+
+ sofware_reset_backup_domain();
+
+ ret = clk_mux_ops.set_parent(hw, index);
+
+ enable_power_domain_write_protection();
+
+ return ret;
+}
+
+static const struct clk_ops cclk_mux_ops = {
+ .get_parent = cclk_mux_get_parent,
+ .set_parent = cclk_mux_set_parent,
+};
+
+static struct clk_hw *stm32_register_cclk(struct device *dev, const char *name,
+ const char * const *parent_names, int num_parents,
+ void __iomem *reg, u8 bit_idx, u8 shift, unsigned long flags,
+ spinlock_t *lock)
+{
+ struct clk_hw *hw;
+ struct clk_gate *gate;
+ struct clk_mux *mux;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ hw = ERR_PTR(-EINVAL);
+ goto fail;
+ }
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ kfree(gate);
+ hw = ERR_PTR(-EINVAL);
+ goto fail;
+ }
+
+ gate->reg = reg;
+ gate->bit_idx = bit_idx;
+ gate->flags = 0;
+ gate->lock = lock;
+
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->mask = 3;
+ mux->flags = 0;
+
+ hw = clk_hw_register_composite(dev, name, parent_names, num_parents,
+ &mux->hw, &cclk_mux_ops,
+ NULL, NULL,
+ &gate->hw, &cclk_gate_ops,
+ flags);
+
+ if (IS_ERR(hw)) {
+ kfree(gate);
+ kfree(mux);
+ }
+
+fail:
+ return hw;
+}
+
static const char *sys_parents[] __initdata = { "hsi", NULL, "pll" };
static const struct clk_div_table ahb_div_table[] = {
@@ -308,10 +607,46 @@ static const struct clk_div_table apb_div_table[] = {
{ 0 },
};
+static const char *rtc_parents[4] = {
+ "no-clock", "lse", "lsi", "hse-rtc"
+};
+
+struct stm32f4_clk_data {
+ const struct stm32f4_gate_data *gates_data;
+ const u64 *gates_map;
+ int gates_num;
+};
+
+static const struct stm32f4_clk_data stm32f429_clk_data = {
+ .gates_data = stm32f429_gates,
+ .gates_map = stm32f42xx_gate_map,
+ .gates_num = ARRAY_SIZE(stm32f429_gates),
+};
+
+static const struct stm32f4_clk_data stm32f469_clk_data = {
+ .gates_data = stm32f469_gates,
+ .gates_map = stm32f46xx_gate_map,
+ .gates_num = ARRAY_SIZE(stm32f469_gates),
+};
+
+static const struct of_device_id stm32f4_of_match[] = {
+ {
+ .compatible = "st,stm32f42xx-rcc",
+ .data = &stm32f429_clk_data
+ },
+ {
+ .compatible = "st,stm32f469-rcc",
+ .data = &stm32f469_clk_data
+ },
+ {}
+};
+
static void __init stm32f4_rcc_init(struct device_node *np)
{
const char *hse_clk;
int n;
+ const struct of_device_id *match;
+ const struct stm32f4_clk_data *data;
base = of_iomap(np, 0);
if (!base) {
@@ -319,6 +654,25 @@ static void __init stm32f4_rcc_init(struct device_node *np)
return;
}
+ pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(pdrm)) {
+ pdrm = NULL;
+ pr_warn("%s: Unable to get syscfg\n", __func__);
+ }
+
+ match = of_match_node(stm32f4_of_match, np);
+ if (WARN_ON(!match))
+ return;
+
+ data = match->data;
+
+ clks = kmalloc_array(data->gates_num + END_PRIMARY_CLK,
+ sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ goto fail;
+
+ stm32f4_gate_map = data->gates_map;
+
hse_clk = of_clk_get_parent_name(np, 0);
clk_register_fixed_rate_with_accuracy(NULL, "hsi", NULL, 0,
@@ -351,11 +705,15 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clks[FCLK] = clk_hw_register_fixed_factor(NULL, "fclk", "ahb_div",
0, 1, 1);
- for (n = 0; n < ARRAY_SIZE(stm32f4_gates); n++) {
- const struct stm32f4_gate_data *gd = &stm32f4_gates[n];
- unsigned int secondary =
- 8 * (gd->offset - STM32F4_RCC_AHB1ENR) + gd->bit_idx;
- int idx = stm32f4_rcc_lookup_clk_idx(0, secondary);
+ for (n = 0; n < data->gates_num; n++) {
+ const struct stm32f4_gate_data *gd;
+ unsigned int secondary;
+ int idx;
+
+ gd = &data->gates_data[n];
+ secondary = 8 * (gd->offset - STM32F4_RCC_AHB1ENR) +
+ gd->bit_idx;
+ idx = stm32f4_rcc_lookup_clk_idx(0, secondary);
if (idx < 0)
goto fail;
@@ -371,9 +729,44 @@ static void __init stm32f4_rcc_init(struct device_node *np)
}
}
+ clks[CLK_LSI] = clk_register_rgate(NULL, "lsi", "clk-lsi", 0,
+ base + STM32F4_RCC_CSR, 0, 2, 0, &stm32f4_clk_lock);
+
+ if (IS_ERR(clks[CLK_LSI])) {
+ pr_err("Unable to register lsi clock\n");
+ goto fail;
+ }
+
+ clks[CLK_LSE] = clk_register_rgate(NULL, "lse", "clk-lse", 0,
+ base + STM32F4_RCC_BDCR, 0, 2, 0, &stm32f4_clk_lock);
+
+ if (IS_ERR(clks[CLK_LSE])) {
+ pr_err("Unable to register lse clock\n");
+ goto fail;
+ }
+
+ clks[CLK_HSE_RTC] = clk_hw_register_divider(NULL, "hse-rtc", "clk-hse",
+ 0, base + STM32F4_RCC_CFGR, 16, 5, 0,
+ &stm32f4_clk_lock);
+
+ if (IS_ERR(clks[CLK_HSE_RTC])) {
+ pr_err("Unable to register hse-rtc clock\n");
+ goto fail;
+ }
+
+ clks[CLK_RTC] = stm32_register_cclk(NULL, "rtc", rtc_parents, 4,
+ base + STM32F4_RCC_BDCR, 15, 8, 0, &stm32f4_clk_lock);
+
+ if (IS_ERR(clks[CLK_RTC])) {
+ pr_err("Unable to register rtc clock\n");
+ goto fail;
+ }
+
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
return;
fail:
+ kfree(clks);
iounmap(base);
}
-CLK_OF_DECLARE(stm32f4_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index f4fdac55727c..0621fbfb4beb 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -243,7 +243,7 @@ static int wm831x_clkout_is_prepared(struct clk_hw *hw)
if (ret < 0) {
dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
ret);
- return true;
+ return false;
}
return (ret & WM831X_CLKOUT_ENA) != 0;
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 5daddf5ecc4b..bc37030e38ba 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw)
struct xgene_clk *pclk = to_xgene_clk(hw);
unsigned long flags = 0;
u32 data;
- phys_addr_t reg;
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.csr_reg != NULL) {
pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
- reg = __pa(pclk->param.csr_reg);
/* First enable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
data |= pclk->param.reg_clk_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_clk_offset);
- pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
- clk_hw_get_name(hw), &reg,
+ pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
+ clk_hw_get_name(hw),
pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
data);
@@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw)
data &= ~pclk->param.reg_csr_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_csr_offset);
- pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
- clk_hw_get_name(hw), &reg,
+ pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
+ clk_hw_get_name(hw),
pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
data);
}
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index 3f537a04c6a6..cbed6602172b 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -1,3 +1,11 @@
+config COMMON_CLK_HI3516CV300
+ tristate "HI3516CV300 Clock Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ select RESET_HISI
+ default ARCH_HISI
+ help
+ Build the clock driver for hi3516cv300.
+
config COMMON_CLK_HI3519
tristate "Hi3519 Clock Driver"
depends on ARCH_HISI || COMPILE_TEST
@@ -6,6 +14,14 @@ config COMMON_CLK_HI3519
help
Build the clock driver for hi3519.
+config COMMON_CLK_HI3798CV200
+ tristate "Hi3798CV200 Clock Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ select RESET_HISI
+ default ARCH_HISI
+ help
+ Build the clock driver for hi3798cv200.
+
config COMMON_CLK_HI6220
bool "Hi6220 Clock Driver"
depends on ARCH_HISI || COMPILE_TEST
@@ -23,5 +39,6 @@ config RESET_HISI
config STUB_CLK_HI6220
bool "Hi6220 Stub Clock Driver"
depends on COMMON_CLK_HI6220 && MAILBOX
+ default ARCH_HISI
help
Build the Hisilicon Hi6220 stub clock driver.
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index e169ec7da023..4eec5e511e4c 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -7,7 +7,9 @@ obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o
obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o
+obj-$(CONFIG_COMMON_CLK_HI3516CV300) += crg-hi3516cv300.o
obj-$(CONFIG_COMMON_CLK_HI3519) += clk-hi3519.o
+obj-$(CONFIG_COMMON_CLK_HI3798CV200) += crg-hi3798cv200.o
obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o
obj-$(CONFIG_RESET_HISI) += reset.o
obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o
diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
new file mode 100644
index 000000000000..2007123832bb
--- /dev/null
+++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
@@ -0,0 +1,330 @@
+/*
+ * Hi3516CV300 Clock and Reset Generator Driver
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/clock/hi3516cv300-clock.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk.h"
+#include "crg.h"
+#include "reset.h"
+
+/* hi3516CV300 core CRG */
+#define HI3516CV300_INNER_CLK_OFFSET 64
+#define HI3516CV300_FIXED_3M 65
+#define HI3516CV300_FIXED_6M 66
+#define HI3516CV300_FIXED_24M 67
+#define HI3516CV300_FIXED_49P5 68
+#define HI3516CV300_FIXED_50M 69
+#define HI3516CV300_FIXED_83P3M 70
+#define HI3516CV300_FIXED_99M 71
+#define HI3516CV300_FIXED_100M 72
+#define HI3516CV300_FIXED_148P5M 73
+#define HI3516CV300_FIXED_198M 74
+#define HI3516CV300_FIXED_297M 75
+#define HI3516CV300_UART_MUX 76
+#define HI3516CV300_FMC_MUX 77
+#define HI3516CV300_MMC0_MUX 78
+#define HI3516CV300_MMC1_MUX 79
+#define HI3516CV300_MMC2_MUX 80
+#define HI3516CV300_MMC3_MUX 81
+#define HI3516CV300_PWM_MUX 82
+#define HI3516CV300_CRG_NR_CLKS 128
+
+static const struct hisi_fixed_rate_clock hi3516cv300_fixed_rate_clks[] = {
+ { HI3516CV300_FIXED_3M, "3m", NULL, 0, 3000000, },
+ { HI3516CV300_FIXED_6M, "6m", NULL, 0, 6000000, },
+ { HI3516CV300_FIXED_24M, "24m", NULL, 0, 24000000, },
+ { HI3516CV300_FIXED_49P5, "49.5m", NULL, 0, 49500000, },
+ { HI3516CV300_FIXED_50M, "50m", NULL, 0, 50000000, },
+ { HI3516CV300_FIXED_83P3M, "83.3m", NULL, 0, 83300000, },
+ { HI3516CV300_FIXED_99M, "99m", NULL, 0, 99000000, },
+ { HI3516CV300_FIXED_100M, "100m", NULL, 0, 100000000, },
+ { HI3516CV300_FIXED_148P5M, "148.5m", NULL, 0, 148500000, },
+ { HI3516CV300_FIXED_198M, "198m", NULL, 0, 198000000, },
+ { HI3516CV300_FIXED_297M, "297m", NULL, 0, 297000000, },
+ { HI3516CV300_APB_CLK, "apb", NULL, 0, 50000000, },
+};
+
+static const char *const uart_mux_p[] = {"24m", "6m"};
+static const char *const fmc_mux_p[] = {
+ "24m", "83.3m", "148.5m", "198m", "297m"
+};
+static const char *const mmc_mux_p[] = {"49.5m"};
+static const char *const mmc2_mux_p[] = {"99m", "49.5m"};
+static const char *const pwm_mux_p[] = {"3m", "50m", "24m", "24m"};
+
+static u32 uart_mux_table[] = {0, 1};
+static u32 fmc_mux_table[] = {0, 1, 2, 3, 4};
+static u32 mmc_mux_table[] = {0};
+static u32 mmc2_mux_table[] = {0, 2};
+static u32 pwm_mux_table[] = {0, 1, 2, 3};
+
+static const struct hisi_mux_clock hi3516cv300_mux_clks[] = {
+ { HI3516CV300_UART_MUX, "uart_mux", uart_mux_p, ARRAY_SIZE(uart_mux_p),
+ CLK_SET_RATE_PARENT, 0xe4, 19, 1, 0, uart_mux_table, },
+ { HI3516CV300_FMC_MUX, "fmc_mux", fmc_mux_p, ARRAY_SIZE(fmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xc0, 2, 3, 0, fmc_mux_table, },
+ { HI3516CV300_MMC0_MUX, "mmc0_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xc4, 4, 2, 0, mmc_mux_table, },
+ { HI3516CV300_MMC1_MUX, "mmc1_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xc4, 12, 2, 0, mmc_mux_table, },
+ { HI3516CV300_MMC2_MUX, "mmc2_mux", mmc2_mux_p, ARRAY_SIZE(mmc2_mux_p),
+ CLK_SET_RATE_PARENT, 0xc4, 20, 2, 0, mmc2_mux_table, },
+ { HI3516CV300_MMC3_MUX, "mmc3_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xc8, 4, 2, 0, mmc_mux_table, },
+ { HI3516CV300_PWM_MUX, "pwm_mux", pwm_mux_p, ARRAY_SIZE(pwm_mux_p),
+ CLK_SET_RATE_PARENT, 0x38, 2, 2, 0, pwm_mux_table, },
+};
+
+static const struct hisi_gate_clock hi3516cv300_gate_clks[] = {
+
+ { HI3516CV300_UART0_CLK, "clk_uart0", "uart_mux", CLK_SET_RATE_PARENT,
+ 0xe4, 15, 0, },
+ { HI3516CV300_UART1_CLK, "clk_uart1", "uart_mux", CLK_SET_RATE_PARENT,
+ 0xe4, 16, 0, },
+ { HI3516CV300_UART2_CLK, "clk_uart2", "uart_mux", CLK_SET_RATE_PARENT,
+ 0xe4, 17, 0, },
+
+ { HI3516CV300_SPI0_CLK, "clk_spi0", "100m", CLK_SET_RATE_PARENT,
+ 0xe4, 13, 0, },
+ { HI3516CV300_SPI1_CLK, "clk_spi1", "100m", CLK_SET_RATE_PARENT,
+ 0xe4, 14, 0, },
+
+ { HI3516CV300_FMC_CLK, "clk_fmc", "fmc_mux", CLK_SET_RATE_PARENT,
+ 0xc0, 1, 0, },
+ { HI3516CV300_MMC0_CLK, "clk_mmc0", "mmc0_mux", CLK_SET_RATE_PARENT,
+ 0xc4, 1, 0, },
+ { HI3516CV300_MMC1_CLK, "clk_mmc1", "mmc1_mux", CLK_SET_RATE_PARENT,
+ 0xc4, 9, 0, },
+ { HI3516CV300_MMC2_CLK, "clk_mmc2", "mmc2_mux", CLK_SET_RATE_PARENT,
+ 0xc4, 17, 0, },
+ { HI3516CV300_MMC3_CLK, "clk_mmc3", "mmc3_mux", CLK_SET_RATE_PARENT,
+ 0xc8, 1, 0, },
+
+ { HI3516CV300_ETH_CLK, "clk_eth", NULL, 0, 0xec, 1, 0, },
+
+ { HI3516CV300_DMAC_CLK, "clk_dmac", NULL, 0, 0xd8, 5, 0, },
+ { HI3516CV300_PWM_CLK, "clk_pwm", "pwm_mux", CLK_SET_RATE_PARENT,
+ 0x38, 1, 0, },
+
+ { HI3516CV300_USB2_BUS_CLK, "clk_usb2_bus", NULL, 0, 0xb8, 0, 0, },
+ { HI3516CV300_USB2_OHCI48M_CLK, "clk_usb2_ohci48m", NULL, 0,
+ 0xb8, 1, 0, },
+ { HI3516CV300_USB2_OHCI12M_CLK, "clk_usb2_ohci12m", NULL, 0,
+ 0xb8, 2, 0, },
+ { HI3516CV300_USB2_OTG_UTMI_CLK, "clk_usb2_otg_utmi", NULL, 0,
+ 0xb8, 3, 0, },
+ { HI3516CV300_USB2_HST_PHY_CLK, "clk_usb2_hst_phy", NULL, 0,
+ 0xb8, 4, 0, },
+ { HI3516CV300_USB2_UTMI0_CLK, "clk_usb2_utmi0", NULL, 0, 0xb8, 5, 0, },
+ { HI3516CV300_USB2_PHY_CLK, "clk_usb2_phy", NULL, 0, 0xb8, 7, 0, },
+};
+
+static struct hisi_clock_data *hi3516cv300_clk_register(
+ struct platform_device *pdev)
+{
+ struct hisi_clock_data *clk_data;
+ int ret;
+
+ clk_data = hisi_clk_alloc(pdev, HI3516CV300_CRG_NR_CLKS);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hisi_clk_register_fixed_rate(hi3516cv300_fixed_rate_clks,
+ ARRAY_SIZE(hi3516cv300_fixed_rate_clks), clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = hisi_clk_register_mux(hi3516cv300_mux_clks,
+ ARRAY_SIZE(hi3516cv300_mux_clks), clk_data);
+ if (ret)
+ goto unregister_fixed_rate;
+
+ ret = hisi_clk_register_gate(hi3516cv300_gate_clks,
+ ARRAY_SIZE(hi3516cv300_gate_clks), clk_data);
+ if (ret)
+ goto unregister_mux;
+
+ ret = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ goto unregister_gate;
+
+ return clk_data;
+
+unregister_gate:
+ hisi_clk_unregister_gate(hi3516cv300_gate_clks,
+ ARRAY_SIZE(hi3516cv300_gate_clks), clk_data);
+unregister_mux:
+ hisi_clk_unregister_mux(hi3516cv300_mux_clks,
+ ARRAY_SIZE(hi3516cv300_mux_clks), clk_data);
+unregister_fixed_rate:
+ hisi_clk_unregister_fixed_rate(hi3516cv300_fixed_rate_clks,
+ ARRAY_SIZE(hi3516cv300_fixed_rate_clks), clk_data);
+ return ERR_PTR(ret);
+}
+
+static void hi3516cv300_clk_unregister(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_gate(hi3516cv300_gate_clks,
+ ARRAY_SIZE(hi3516cv300_gate_clks), crg->clk_data);
+ hisi_clk_unregister_mux(hi3516cv300_mux_clks,
+ ARRAY_SIZE(hi3516cv300_mux_clks), crg->clk_data);
+ hisi_clk_unregister_fixed_rate(hi3516cv300_fixed_rate_clks,
+ ARRAY_SIZE(hi3516cv300_fixed_rate_clks), crg->clk_data);
+}
+
+static const struct hisi_crg_funcs hi3516cv300_crg_funcs = {
+ .register_clks = hi3516cv300_clk_register,
+ .unregister_clks = hi3516cv300_clk_unregister,
+};
+
+/* hi3516CV300 sysctrl CRG */
+#define HI3516CV300_SYSCTRL_NR_CLKS 16
+
+static const char *wdt_mux_p[] __initconst = { "3m", "apb" };
+static u32 wdt_mux_table[] = {0, 1};
+
+static const struct hisi_mux_clock hi3516cv300_sysctrl_mux_clks[] = {
+ { HI3516CV300_WDT_CLK, "wdt", wdt_mux_p, ARRAY_SIZE(wdt_mux_p),
+ CLK_SET_RATE_PARENT, 0x0, 23, 1, 0, wdt_mux_table, },
+};
+
+static struct hisi_clock_data *hi3516cv300_sysctrl_clk_register(
+ struct platform_device *pdev)
+{
+ struct hisi_clock_data *clk_data;
+ int ret;
+
+ clk_data = hisi_clk_alloc(pdev, HI3516CV300_SYSCTRL_NR_CLKS);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hisi_clk_register_mux(hi3516cv300_sysctrl_mux_clks,
+ ARRAY_SIZE(hi3516cv300_sysctrl_mux_clks), clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+
+ ret = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ goto unregister_mux;
+
+ return clk_data;
+
+unregister_mux:
+ hisi_clk_unregister_mux(hi3516cv300_sysctrl_mux_clks,
+ ARRAY_SIZE(hi3516cv300_sysctrl_mux_clks), clk_data);
+ return ERR_PTR(ret);
+}
+
+static void hi3516cv300_sysctrl_clk_unregister(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_mux(hi3516cv300_sysctrl_mux_clks,
+ ARRAY_SIZE(hi3516cv300_sysctrl_mux_clks),
+ crg->clk_data);
+}
+
+static const struct hisi_crg_funcs hi3516cv300_sysctrl_funcs = {
+ .register_clks = hi3516cv300_sysctrl_clk_register,
+ .unregister_clks = hi3516cv300_sysctrl_clk_unregister,
+};
+
+static const struct of_device_id hi3516cv300_crg_match_table[] = {
+ {
+ .compatible = "hisilicon,hi3516cv300-crg",
+ .data = &hi3516cv300_crg_funcs
+ },
+ {
+ .compatible = "hisilicon,hi3516cv300-sysctrl",
+ .data = &hi3516cv300_sysctrl_funcs
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hi3516cv300_crg_match_table);
+
+static int hi3516cv300_crg_probe(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg;
+
+ crg = devm_kmalloc(&pdev->dev, sizeof(*crg), GFP_KERNEL);
+ if (!crg)
+ return -ENOMEM;
+
+ crg->funcs = of_device_get_match_data(&pdev->dev);
+ if (!crg->funcs)
+ return -ENOENT;
+
+ crg->rstc = hisi_reset_init(pdev);
+ if (!crg->rstc)
+ return -ENOMEM;
+
+ crg->clk_data = crg->funcs->register_clks(pdev);
+ if (IS_ERR(crg->clk_data)) {
+ hisi_reset_exit(crg->rstc);
+ return PTR_ERR(crg->clk_data);
+ }
+
+ platform_set_drvdata(pdev, crg);
+ return 0;
+}
+
+static int hi3516cv300_crg_remove(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ hisi_reset_exit(crg->rstc);
+ crg->funcs->unregister_clks(pdev);
+ return 0;
+}
+
+static struct platform_driver hi3516cv300_crg_driver = {
+ .probe = hi3516cv300_crg_probe,
+ .remove = hi3516cv300_crg_remove,
+ .driver = {
+ .name = "hi3516cv300-crg",
+ .of_match_table = hi3516cv300_crg_match_table,
+ },
+};
+
+static int __init hi3516cv300_crg_init(void)
+{
+ return platform_driver_register(&hi3516cv300_crg_driver);
+}
+core_initcall(hi3516cv300_crg_init);
+
+static void __exit hi3516cv300_crg_exit(void)
+{
+ platform_driver_unregister(&hi3516cv300_crg_driver);
+}
+module_exit(hi3516cv300_crg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon Hi3516CV300 CRG Driver");
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
new file mode 100644
index 000000000000..fc8b5bc2d50d
--- /dev/null
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -0,0 +1,337 @@
+/*
+ * Hi3798CV200 Clock and Reset Generator Driver
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/clock/histb-clock.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk.h"
+#include "crg.h"
+#include "reset.h"
+
+/* hi3798CV200 core CRG */
+#define HI3798CV200_INNER_CLK_OFFSET 64
+#define HI3798CV200_FIXED_24M 65
+#define HI3798CV200_FIXED_25M 66
+#define HI3798CV200_FIXED_50M 67
+#define HI3798CV200_FIXED_75M 68
+#define HI3798CV200_FIXED_100M 69
+#define HI3798CV200_FIXED_150M 70
+#define HI3798CV200_FIXED_200M 71
+#define HI3798CV200_FIXED_250M 72
+#define HI3798CV200_FIXED_300M 73
+#define HI3798CV200_FIXED_400M 74
+#define HI3798CV200_MMC_MUX 75
+#define HI3798CV200_ETH_PUB_CLK 76
+#define HI3798CV200_ETH_BUS_CLK 77
+#define HI3798CV200_ETH_BUS0_CLK 78
+#define HI3798CV200_ETH_BUS1_CLK 79
+#define HI3798CV200_COMBPHY1_MUX 80
+
+#define HI3798CV200_CRG_NR_CLKS 128
+
+static const struct hisi_fixed_rate_clock hi3798cv200_fixed_rate_clks[] = {
+ { HISTB_OSC_CLK, "clk_osc", NULL, 0, 24000000, },
+ { HISTB_APB_CLK, "clk_apb", NULL, 0, 100000000, },
+ { HISTB_AHB_CLK, "clk_ahb", NULL, 0, 200000000, },
+ { HI3798CV200_FIXED_24M, "24m", NULL, 0, 24000000, },
+ { HI3798CV200_FIXED_25M, "25m", NULL, 0, 25000000, },
+ { HI3798CV200_FIXED_50M, "50m", NULL, 0, 50000000, },
+ { HI3798CV200_FIXED_75M, "75m", NULL, 0, 75000000, },
+ { HI3798CV200_FIXED_100M, "100m", NULL, 0, 100000000, },
+ { HI3798CV200_FIXED_150M, "150m", NULL, 0, 150000000, },
+ { HI3798CV200_FIXED_200M, "200m", NULL, 0, 200000000, },
+ { HI3798CV200_FIXED_250M, "250m", NULL, 0, 250000000, },
+};
+
+static const char *const mmc_mux_p[] = {
+ "100m", "50m", "25m", "200m", "150m" };
+static u32 mmc_mux_table[] = {0, 1, 2, 3, 6};
+
+static const char *const comphy1_mux_p[] = {
+ "100m", "25m"};
+static u32 comphy1_mux_table[] = {2, 3};
+
+static struct hisi_mux_clock hi3798cv200_mux_clks[] = {
+ { HI3798CV200_MMC_MUX, "mmc_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xa0, 8, 3, 0, mmc_mux_table, },
+ { HI3798CV200_COMBPHY1_MUX, "combphy1_mux",
+ comphy1_mux_p, ARRAY_SIZE(comphy1_mux_p),
+ CLK_SET_RATE_PARENT, 0x188, 10, 2, 0, comphy1_mux_table, },
+};
+
+static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
+ /* UART */
+ { HISTB_UART2_CLK, "clk_uart2", "75m",
+ CLK_SET_RATE_PARENT, 0x68, 4, 0, },
+ /* I2C */
+ { HISTB_I2C0_CLK, "clk_i2c0", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 4, 0, },
+ { HISTB_I2C1_CLK, "clk_i2c1", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 8, 0, },
+ { HISTB_I2C2_CLK, "clk_i2c2", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 12, 0, },
+ { HISTB_I2C3_CLK, "clk_i2c3", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 16, 0, },
+ { HISTB_I2C4_CLK, "clk_i2c4", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x6C, 20, 0, },
+ /* SPI */
+ { HISTB_SPI0_CLK, "clk_spi0", "clk_apb",
+ CLK_SET_RATE_PARENT, 0x70, 0, 0, },
+ /* SDIO */
+ { HISTB_SDIO0_BIU_CLK, "clk_sdio0_biu", "200m",
+ CLK_SET_RATE_PARENT, 0x9c, 0, 0, },
+ { HISTB_SDIO0_CIU_CLK, "clk_sdio0_ciu", "mmc_mux",
+ CLK_SET_RATE_PARENT, 0x9c, 1, 0, },
+ /* EMMC */
+ { HISTB_MMC_BIU_CLK, "clk_mmc_biu", "200m",
+ CLK_SET_RATE_PARENT, 0xa0, 0, 0, },
+ { HISTB_MMC_CIU_CLK, "clk_mmc_ciu", "mmc_mux",
+ CLK_SET_RATE_PARENT, 0xa0, 1, 0, },
+ /* PCIE*/
+ { HISTB_PCIE_BUS_CLK, "clk_pcie_bus", "200m",
+ CLK_SET_RATE_PARENT, 0x18c, 0, 0, },
+ { HISTB_PCIE_SYS_CLK, "clk_pcie_sys", "100m",
+ CLK_SET_RATE_PARENT, 0x18c, 1, 0, },
+ { HISTB_PCIE_PIPE_CLK, "clk_pcie_pipe", "250m",
+ CLK_SET_RATE_PARENT, 0x18c, 2, 0, },
+ { HISTB_PCIE_AUX_CLK, "clk_pcie_aux", "24m",
+ CLK_SET_RATE_PARENT, 0x18c, 3, 0, },
+ /* Ethernet */
+ { HI3798CV200_ETH_PUB_CLK, "clk_pub", NULL,
+ CLK_SET_RATE_PARENT, 0xcc, 5, 0, },
+ { HI3798CV200_ETH_BUS_CLK, "clk_bus", "clk_pub",
+ CLK_SET_RATE_PARENT, 0xcc, 0, 0, },
+ { HI3798CV200_ETH_BUS0_CLK, "clk_bus_m0", "clk_bus",
+ CLK_SET_RATE_PARENT, 0xcc, 1, 0, },
+ { HI3798CV200_ETH_BUS1_CLK, "clk_bus_m1", "clk_bus",
+ CLK_SET_RATE_PARENT, 0xcc, 2, 0, },
+ { HISTB_ETH0_MAC_CLK, "clk_mac0", "clk_bus_m0",
+ CLK_SET_RATE_PARENT, 0xcc, 3, 0, },
+ { HISTB_ETH0_MACIF_CLK, "clk_macif0", "clk_bus_m0",
+ CLK_SET_RATE_PARENT, 0xcc, 24, 0, },
+ { HISTB_ETH1_MAC_CLK, "clk_mac1", "clk_bus_m1",
+ CLK_SET_RATE_PARENT, 0xcc, 4, 0, },
+ { HISTB_ETH1_MACIF_CLK, "clk_macif1", "clk_bus_m1",
+ CLK_SET_RATE_PARENT, 0xcc, 25, 0, },
+ /* COMBPHY1 */
+ { HISTB_COMBPHY1_CLK, "clk_combphy1", "combphy1_mux",
+ CLK_SET_RATE_PARENT, 0x188, 8, 0, },
+};
+
+static struct hisi_clock_data *hi3798cv200_clk_register(
+ struct platform_device *pdev)
+{
+ struct hisi_clock_data *clk_data;
+ int ret;
+
+ clk_data = hisi_clk_alloc(pdev, HI3798CV200_CRG_NR_CLKS);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hisi_clk_register_fixed_rate(hi3798cv200_fixed_rate_clks,
+ ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
+ clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = hisi_clk_register_mux(hi3798cv200_mux_clks,
+ ARRAY_SIZE(hi3798cv200_mux_clks),
+ clk_data);
+ if (ret)
+ goto unregister_fixed_rate;
+
+ ret = hisi_clk_register_gate(hi3798cv200_gate_clks,
+ ARRAY_SIZE(hi3798cv200_gate_clks),
+ clk_data);
+ if (ret)
+ goto unregister_mux;
+
+ ret = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ goto unregister_gate;
+
+ return clk_data;
+
+unregister_fixed_rate:
+ hisi_clk_unregister_fixed_rate(hi3798cv200_fixed_rate_clks,
+ ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
+ clk_data);
+
+unregister_mux:
+ hisi_clk_unregister_mux(hi3798cv200_mux_clks,
+ ARRAY_SIZE(hi3798cv200_mux_clks),
+ clk_data);
+unregister_gate:
+ hisi_clk_unregister_gate(hi3798cv200_gate_clks,
+ ARRAY_SIZE(hi3798cv200_gate_clks),
+ clk_data);
+ return ERR_PTR(ret);
+}
+
+static void hi3798cv200_clk_unregister(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_gate(hi3798cv200_gate_clks,
+ ARRAY_SIZE(hi3798cv200_gate_clks),
+ crg->clk_data);
+ hisi_clk_unregister_mux(hi3798cv200_mux_clks,
+ ARRAY_SIZE(hi3798cv200_mux_clks),
+ crg->clk_data);
+ hisi_clk_unregister_fixed_rate(hi3798cv200_fixed_rate_clks,
+ ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
+ crg->clk_data);
+}
+
+static const struct hisi_crg_funcs hi3798cv200_crg_funcs = {
+ .register_clks = hi3798cv200_clk_register,
+ .unregister_clks = hi3798cv200_clk_unregister,
+};
+
+/* hi3798CV200 sysctrl CRG */
+
+#define HI3798CV200_SYSCTRL_NR_CLKS 16
+
+static const struct hisi_gate_clock hi3798cv200_sysctrl_gate_clks[] = {
+ { HISTB_IR_CLK, "clk_ir", "100m",
+ CLK_SET_RATE_PARENT, 0x48, 4, 0, },
+ { HISTB_TIMER01_CLK, "clk_timer01", "24m",
+ CLK_SET_RATE_PARENT, 0x48, 6, 0, },
+ { HISTB_UART0_CLK, "clk_uart0", "75m",
+ CLK_SET_RATE_PARENT, 0x48, 10, 0, },
+};
+
+static struct hisi_clock_data *hi3798cv200_sysctrl_clk_register(
+ struct platform_device *pdev)
+{
+ struct hisi_clock_data *clk_data;
+ int ret;
+
+ clk_data = hisi_clk_alloc(pdev, HI3798CV200_SYSCTRL_NR_CLKS);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hisi_clk_register_gate(hi3798cv200_sysctrl_gate_clks,
+ ARRAY_SIZE(hi3798cv200_sysctrl_gate_clks),
+ clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ goto unregister_gate;
+
+ return clk_data;
+
+unregister_gate:
+ hisi_clk_unregister_gate(hi3798cv200_sysctrl_gate_clks,
+ ARRAY_SIZE(hi3798cv200_sysctrl_gate_clks),
+ clk_data);
+ return ERR_PTR(ret);
+}
+
+static void hi3798cv200_sysctrl_clk_unregister(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_gate(hi3798cv200_sysctrl_gate_clks,
+ ARRAY_SIZE(hi3798cv200_sysctrl_gate_clks),
+ crg->clk_data);
+}
+
+static const struct hisi_crg_funcs hi3798cv200_sysctrl_funcs = {
+ .register_clks = hi3798cv200_sysctrl_clk_register,
+ .unregister_clks = hi3798cv200_sysctrl_clk_unregister,
+};
+
+static const struct of_device_id hi3798cv200_crg_match_table[] = {
+ { .compatible = "hisilicon,hi3798cv200-crg",
+ .data = &hi3798cv200_crg_funcs },
+ { .compatible = "hisilicon,hi3798cv200-sysctrl",
+ .data = &hi3798cv200_sysctrl_funcs },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hi3798cv200_crg_match_table);
+
+static int hi3798cv200_crg_probe(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg;
+
+ crg = devm_kmalloc(&pdev->dev, sizeof(*crg), GFP_KERNEL);
+ if (!crg)
+ return -ENOMEM;
+
+ crg->funcs = of_device_get_match_data(&pdev->dev);
+ if (!crg->funcs)
+ return -ENOENT;
+
+ crg->rstc = hisi_reset_init(pdev);
+ if (!crg->rstc)
+ return -ENOMEM;
+
+ crg->clk_data = crg->funcs->register_clks(pdev);
+ if (IS_ERR(crg->clk_data)) {
+ hisi_reset_exit(crg->rstc);
+ return PTR_ERR(crg->clk_data);
+ }
+
+ platform_set_drvdata(pdev, crg);
+ return 0;
+}
+
+static int hi3798cv200_crg_remove(struct platform_device *pdev)
+{
+ struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
+
+ hisi_reset_exit(crg->rstc);
+ crg->funcs->unregister_clks(pdev);
+ return 0;
+}
+
+static struct platform_driver hi3798cv200_crg_driver = {
+ .probe = hi3798cv200_crg_probe,
+ .remove = hi3798cv200_crg_remove,
+ .driver = {
+ .name = "hi3798cv200-crg",
+ .of_match_table = hi3798cv200_crg_match_table,
+ },
+};
+
+static int __init hi3798cv200_crg_init(void)
+{
+ return platform_driver_register(&hi3798cv200_crg_driver);
+}
+core_initcall(hi3798cv200_crg_init);
+
+static void __exit hi3798cv200_crg_exit(void)
+{
+ platform_driver_unregister(&hi3798cv200_crg_driver);
+}
+module_exit(hi3798cv200_crg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon Hi3798CV200 CRG Driver");
diff --git a/drivers/clk/hisilicon/crg.h b/drivers/clk/hisilicon/crg.h
new file mode 100644
index 000000000000..e0739717de9a
--- /dev/null
+++ b/drivers/clk/hisilicon/crg.h
@@ -0,0 +1,34 @@
+/*
+ * HiSilicon Clock and Reset Driver Header
+ *
+ * Copyright (c) 2016 HiSilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HISI_CRG_H
+#define __HISI_CRG_H
+
+struct hisi_clock_data;
+struct hisi_reset_controller;
+
+struct hisi_crg_funcs {
+ struct hisi_clock_data* (*register_clks)(struct platform_device *pdev);
+ void (*unregister_clks)(struct platform_device *pdev);
+};
+
+struct hisi_crg_dev {
+ struct hisi_clock_data *clk_data;
+ struct hisi_reset_controller *rstc;
+ const struct hisi_crg_funcs *funcs;
+};
+
+#endif /* __HISI_CRG_H */
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index ce8ea10407e4..42ffc1c92bab 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -156,10 +156,267 @@ static struct clk ** const uart_clks[] __initconst = {
NULL
};
+static int ldb_di_sel_by_clock_id(int clock_id)
+{
+ switch (clock_id) {
+ case IMX6QDL_CLK_PLL5_VIDEO_DIV:
+ if (clk_on_imx6q() &&
+ imx_get_soc_revision() == IMX_CHIP_REVISION_1_0)
+ return -ENOENT;
+ return 0;
+ case IMX6QDL_CLK_PLL2_PFD0_352M:
+ return 1;
+ case IMX6QDL_CLK_PLL2_PFD2_396M:
+ return 2;
+ case IMX6QDL_CLK_MMDC_CH1_AXI:
+ return 3;
+ case IMX6QDL_CLK_PLL3_USB_OTG:
+ return 4;
+ default:
+ return -ENOENT;
+ }
+}
+
+static void of_assigned_ldb_sels(struct device_node *node,
+ unsigned int *ldb_di0_sel,
+ unsigned int *ldb_di1_sel)
+{
+ struct of_phandle_args clkspec;
+ int index, rc, num_parents;
+ int parent, child, sel;
+
+ num_parents = of_count_phandle_with_args(node, "assigned-clock-parents",
+ "#clock-cells");
+ for (index = 0; index < num_parents; index++) {
+ rc = of_parse_phandle_with_args(node, "assigned-clock-parents",
+ "#clock-cells", index, &clkspec);
+ if (rc < 0) {
+ /* skip empty (null) phandles */
+ if (rc == -ENOENT)
+ continue;
+ else
+ return;
+ }
+ if (clkspec.np != node || clkspec.args[0] >= IMX6QDL_CLK_END) {
+ pr_err("ccm: parent clock %d not in ccm\n", index);
+ return;
+ }
+ parent = clkspec.args[0];
+
+ rc = of_parse_phandle_with_args(node, "assigned-clocks",
+ "#clock-cells", index, &clkspec);
+ if (rc < 0)
+ return;
+ if (clkspec.np != node || clkspec.args[0] >= IMX6QDL_CLK_END) {
+ pr_err("ccm: child clock %d not in ccm\n", index);
+ return;
+ }
+ child = clkspec.args[0];
+
+ if (child != IMX6QDL_CLK_LDB_DI0_SEL &&
+ child != IMX6QDL_CLK_LDB_DI1_SEL)
+ continue;
+
+ sel = ldb_di_sel_by_clock_id(parent);
+ if (sel < 0) {
+ pr_err("ccm: invalid ldb_di%d parent clock: %d\n",
+ child == IMX6QDL_CLK_LDB_DI1_SEL, parent);
+ continue;
+ }
+
+ if (child == IMX6QDL_CLK_LDB_DI0_SEL)
+ *ldb_di0_sel = sel;
+ if (child == IMX6QDL_CLK_LDB_DI1_SEL)
+ *ldb_di1_sel = sel;
+ }
+}
+
+#define CCM_CCDR 0x04
+#define CCM_CCSR 0x0c
+#define CCM_CS2CDR 0x2c
+
+#define CCDR_MMDC_CH1_MASK BIT(16)
+#define CCSR_PLL3_SW_CLK_SEL BIT(0)
+
+#define CS2CDR_LDB_DI0_CLK_SEL_SHIFT 9
+#define CS2CDR_LDB_DI1_CLK_SEL_SHIFT 12
+
+static void __init imx6q_mmdc_ch1_mask_handshake(void __iomem *ccm_base)
+{
+ unsigned int reg;
+
+ reg = readl_relaxed(ccm_base + CCM_CCDR);
+ reg |= CCDR_MMDC_CH1_MASK;
+ writel_relaxed(reg, ccm_base + CCM_CCDR);
+}
+
+/*
+ * The only way to disable the MMDC_CH1 clock is to move it to pll3_sw_clk
+ * via periph2_clk2_sel and then to disable pll3_sw_clk by selecting the
+ * bypass clock source, since there is no CG bit for mmdc_ch1.
+ */
+static void mmdc_ch1_disable(void __iomem *ccm_base)
+{
+ unsigned int reg;
+
+ clk_set_parent(clk[IMX6QDL_CLK_PERIPH2_CLK2_SEL],
+ clk[IMX6QDL_CLK_PLL3_USB_OTG]);
+
+ /*
+ * Handshake with mmdc_ch1 module must be masked when changing
+ * periph2_clk_sel.
+ */
+ clk_set_parent(clk[IMX6QDL_CLK_PERIPH2], clk[IMX6QDL_CLK_PERIPH2_CLK2]);
+
+ /* Disable pll3_sw_clk by selecting the bypass clock source */
+ reg = readl_relaxed(ccm_base + CCM_CCSR);
+ reg |= CCSR_PLL3_SW_CLK_SEL;
+ writel_relaxed(reg, ccm_base + CCM_CCSR);
+}
+
+static void mmdc_ch1_reenable(void __iomem *ccm_base)
+{
+ unsigned int reg;
+
+ /* Enable pll3_sw_clk by disabling the bypass */
+ reg = readl_relaxed(ccm_base + CCM_CCSR);
+ reg &= ~CCSR_PLL3_SW_CLK_SEL;
+ writel_relaxed(reg, ccm_base + CCM_CCSR);
+
+ clk_set_parent(clk[IMX6QDL_CLK_PERIPH2], clk[IMX6QDL_CLK_PERIPH2_PRE]);
+}
+
+/*
+ * We have to follow a strict procedure when changing the LDB clock source,
+ * otherwise we risk introducing a glitch that can lock up the LDB divider.
+ * Things to keep in mind:
+ *
+ * 1. The current and new parent clock inputs to the mux must be disabled.
+ * 2. The default clock input for ldb_di0/1_clk_sel is mmdc_ch1_axi, which
+ * has no CG bit.
+ * 3. pll2_pfd2_396m can not be gated if it is used as memory clock.
+ * 4. In the RTL implementation of the LDB_DI_CLK_SEL muxes the top four
+ * options are in one mux and the PLL3 option along with three unused
+ * inputs is in a second mux. There is a third mux with two inputs used
+ * to decide between the first and second 4-port mux:
+ *
+ * pll5_video_div 0 --|\
+ * pll2_pfd0_352m 1 --| |_
+ * pll2_pfd2_396m 2 --| | `-|\
+ * mmdc_ch1_axi 3 --|/ | |
+ * | |--
+ * pll3_usb_otg 4 --|\ | |
+ * 5 --| |_,-|/
+ * 6 --| |
+ * 7 --|/
+ *
+ * The ldb_di0/1_clk_sel[1:0] bits control both 4-port muxes at the same time.
+ * The ldb_di0/1_clk_sel[2] bit controls the 2-port mux. The code below
+ * switches the parent to the bottom mux first and then manipulates the top
+ * mux to ensure that no glitch will enter the divider.
+ */
+static void init_ldb_clks(struct device_node *np, void __iomem *ccm_base)
+{
+ unsigned int reg;
+ unsigned int sel[2][4];
+ int i;
+
+ reg = readl_relaxed(ccm_base + CCM_CS2CDR);
+ sel[0][0] = (reg >> CS2CDR_LDB_DI0_CLK_SEL_SHIFT) & 7;
+ sel[1][0] = (reg >> CS2CDR_LDB_DI1_CLK_SEL_SHIFT) & 7;
+
+ sel[0][3] = sel[0][2] = sel[0][1] = sel[0][0];
+ sel[1][3] = sel[1][2] = sel[1][1] = sel[1][0];
+
+ of_assigned_ldb_sels(np, &sel[0][3], &sel[1][3]);
+
+ for (i = 0; i < 2; i++) {
+ /* Warn if a glitch might have been introduced already */
+ if (sel[i][0] != 3) {
+ pr_warn("ccm: ldb_di%d_sel already changed from reset value: %d\n",
+ i, sel[i][0]);
+ }
+
+ if (sel[i][0] == sel[i][3])
+ continue;
+
+ /* Only switch to or from pll2_pfd2_396m if it is disabled */
+ if ((sel[i][0] == 2 || sel[i][3] == 2) &&
+ (clk_get_parent(clk[IMX6QDL_CLK_PERIPH_PRE]) ==
+ clk[IMX6QDL_CLK_PLL2_PFD2_396M])) {
+ pr_err("ccm: ldb_di%d_sel: couldn't disable pll2_pfd2_396m\n",
+ i);
+ sel[i][3] = sel[i][2] = sel[i][1] = sel[i][0];
+ continue;
+ }
+
+ /* First switch to the bottom mux */
+ sel[i][1] = sel[i][0] | 4;
+
+ /* Then configure the top mux before switching back to it */
+ sel[i][2] = sel[i][3] | 4;
+
+ pr_debug("ccm: switching ldb_di%d_sel: %d->%d->%d->%d\n", i,
+ sel[i][0], sel[i][1], sel[i][2], sel[i][3]);
+ }
+
+ if (sel[0][0] == sel[0][3] && sel[1][0] == sel[1][3])
+ return;
+
+ mmdc_ch1_disable(ccm_base);
+
+ for (i = 1; i < 4; i++) {
+ reg = readl_relaxed(ccm_base + CCM_CS2CDR);
+ reg &= ~((7 << CS2CDR_LDB_DI0_CLK_SEL_SHIFT) |
+ (7 << CS2CDR_LDB_DI1_CLK_SEL_SHIFT));
+ reg |= ((sel[0][i] << CS2CDR_LDB_DI0_CLK_SEL_SHIFT) |
+ (sel[1][i] << CS2CDR_LDB_DI1_CLK_SEL_SHIFT));
+ writel_relaxed(reg, ccm_base + CCM_CS2CDR);
+ }
+
+ mmdc_ch1_reenable(ccm_base);
+}
+
+#define CCM_ANALOG_PLL_VIDEO 0xa0
+#define CCM_ANALOG_PFD_480 0xf0
+#define CCM_ANALOG_PFD_528 0x100
+
+#define PLL_ENABLE BIT(13)
+
+#define PFD0_CLKGATE BIT(7)
+#define PFD1_CLKGATE BIT(15)
+#define PFD2_CLKGATE BIT(23)
+#define PFD3_CLKGATE BIT(31)
+
+static void disable_anatop_clocks(void __iomem *anatop_base)
+{
+ unsigned int reg;
+
+ /* Make sure PLL2 PFDs 0-2 are gated */
+ reg = readl_relaxed(anatop_base + CCM_ANALOG_PFD_528);
+ /* Cannot gate PFD2 if pll2_pfd2_396m is the parent of MMDC clock */
+ if (clk_get_parent(clk[IMX6QDL_CLK_PERIPH_PRE]) ==
+ clk[IMX6QDL_CLK_PLL2_PFD2_396M])
+ reg |= PFD0_CLKGATE | PFD1_CLKGATE;
+ else
+ reg |= PFD0_CLKGATE | PFD1_CLKGATE | PFD2_CLKGATE;
+ writel_relaxed(reg, anatop_base + CCM_ANALOG_PFD_528);
+
+ /* Make sure PLL3 PFDs 0-3 are gated */
+ reg = readl_relaxed(anatop_base + CCM_ANALOG_PFD_480);
+ reg |= PFD0_CLKGATE | PFD1_CLKGATE | PFD2_CLKGATE | PFD3_CLKGATE;
+ writel_relaxed(reg, anatop_base + CCM_ANALOG_PFD_480);
+
+ /* Make sure PLL5 is disabled */
+ reg = readl_relaxed(anatop_base + CCM_ANALOG_PLL_VIDEO);
+ reg &= ~PLL_ENABLE;
+ writel_relaxed(reg, anatop_base + CCM_ANALOG_PLL_VIDEO);
+}
+
static void __init imx6q_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
- void __iomem *base;
+ void __iomem *anatop_base, *base;
int i;
int ret;
@@ -172,7 +429,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_ANACLK2] = imx_obtain_fixed_clock("anaclk2", 0);
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
- base = of_iomap(np, 0);
+ anatop_base = base = of_iomap(np, 0);
WARN_ON(!base);
/* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
@@ -330,8 +587,20 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
clk[IMX6QDL_CLK_IPU1_SEL] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
clk[IMX6QDL_CLK_IPU2_SEL] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
- clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
+
+ disable_anatop_clocks(anatop_base);
+
+ imx6q_mmdc_ch1_mask_handshake(base);
+
+ /*
+ * The LDB_DI0/1_SEL muxes are registered read-only due to a hardware
+ * bug. Set the muxes to the requested values before registering the
+ * ldb_di_sel clocks.
+ */
+ init_ldb_clks(np, base);
+
+ clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_ldb("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
+ clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_ldb("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
@@ -582,12 +851,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk_register_clkdev(clk[IMX6QDL_CLK_ENET_REF], "enet_ref", NULL);
- if ((imx_get_soc_revision() != IMX_CHIP_REVISION_1_0) ||
- clk_on_imx6dl()) {
- clk_set_parent(clk[IMX6QDL_CLK_LDB_DI0_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
- clk_set_parent(clk[IMX6QDL_CLK_LDB_DI1_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
- }
-
clk_set_rate(clk[IMX6QDL_CLK_PLL3_PFD1_540M], 540000000);
if (clk_on_imx6dl())
clk_set_parent(clk[IMX6QDL_CLK_IPU1_SEL], clk[IMX6QDL_CLK_PLL3_PFD1_540M]);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index d1d7787ce211..75c35fb12b60 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -64,6 +64,10 @@ static const char *perclk_sels[] = { "ipg", "osc", };
static const char *lcdif_sels[] = { "lcdif_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static const char *csi_sels[] = { "osc", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
static const char *sim_sels[] = { "sim_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+/* epdc_pre_sels, epdc_sels, esai_sels only exists on i.MX6ULL */
+static const char *epdc_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", };
+static const char *esai_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", };
+static const char *epdc_sels[] = { "epdc_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static struct clk *clks[IMX6UL_CLK_END];
static struct clk_onecell_data clk_data;
@@ -102,6 +106,17 @@ static u32 share_count_audio;
static u32 share_count_sai1;
static u32 share_count_sai2;
static u32 share_count_sai3;
+static u32 share_count_esai;
+
+static inline int clk_on_imx6ul(void)
+{
+ return of_machine_is_compatible("fsl,imx6ul");
+}
+
+static inline int clk_on_imx6ull(void)
+{
+ return of_machine_is_compatible("fsl,imx6ull");
+}
static void __init imx6ul_clocks_init(struct device_node *ccm_node)
{
@@ -238,12 +253,19 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_QSPI1_SEL] = imx_clk_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
clks[IMX6UL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
clks[IMX6UL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
+ if (clk_on_imx6ull())
+ clks[IMX6ULL_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, esai_sels, ARRAY_SIZE(esai_sels));
clks[IMX6UL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
clks[IMX6UL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels, ARRAY_SIZE(enfc_sels));
clks[IMX6UL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
clks[IMX6UL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
- clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
- clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
+ if (clk_on_imx6ul()) {
+ clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
+ clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
+ } else if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_EPDC_PRE_SEL] = imx_clk_mux("epdc_pre_sel", base + 0x34, 15, 3, epdc_pre_sels, ARRAY_SIZE(epdc_pre_sels));
+ clks[IMX6ULL_CLK_EPDC_SEL] = imx_clk_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels));
+ }
clks[IMX6UL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
@@ -276,6 +298,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_SAI3_PODF] = imx_clk_divider("sai3_podf", "sai3_pred", base + 0x28, 16, 6);
clks[IMX6UL_CLK_SAI1_PRED] = imx_clk_divider("sai1_pred", "sai1_sel", base + 0x28, 6, 3);
clks[IMX6UL_CLK_SAI1_PODF] = imx_clk_divider("sai1_podf", "sai1_pred", base + 0x28, 0, 6);
+ if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_ESAI_PRED] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3);
+ clks[IMX6ULL_CLK_ESAI_PODF] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3);
+ }
clks[IMX6UL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
clks[IMX6UL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
clks[IMX6UL_CLK_SAI2_PRED] = imx_clk_divider("sai2_pred", "sai2_sel", base + 0x2c, 6, 3);
@@ -298,9 +324,15 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4);
clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
- clks[IMX6UL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
- clks[IMX6UL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
- clks[IMX6UL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
+ if (clk_on_imx6ul()) {
+ clks[IMX6UL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
+ clks[IMX6UL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
+ clks[IMX6UL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
+ } else if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_DCP_CLK] = imx_clk_gate2("dcp", "ahb", base + 0x68, 10);
+ clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x68, 12);
+ clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x68, 12);
+ }
clks[IMX6UL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16);
clks[IMX6UL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
@@ -309,7 +341,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt2_serial", "perclk", base + 0x68, 26);
clks[IMX6UL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28);
clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
- clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30);
+ if (clk_on_imx6ul())
+ clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30);
+ else if (clk_on_imx6ull())
+ clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x80, 18);
/* CCGR1 */
clks[IMX6UL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -328,6 +363,11 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
/* CCGR2 */
+ if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x70, 0, &share_count_esai);
+ clks[IMX6ULL_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x70, 0, &share_count_esai);
+ clks[IMX6ULL_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x70, 0, &share_count_esai);
+ }
clks[IMX6UL_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x70, 2);
clks[IMX6UL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
clks[IMX6UL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
@@ -340,8 +380,13 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
/* CCGR3 */
clks[IMX6UL_CLK_UART5_IPG] = imx_clk_gate2("uart5_ipg", "ipg", base + 0x74, 2);
clks[IMX6UL_CLK_UART5_SERIAL] = imx_clk_gate2("uart5_serial", "uart_podf", base + 0x74, 2);
- clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x74, 4);
- clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x74, 4);
+ if (clk_on_imx6ul()) {
+ clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x74, 4);
+ clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x74, 4);
+ } else if (clk_on_imx6ull()) {
+ clks[IMX6ULL_CLK_EPDC_ACLK] = imx_clk_gate2("epdc_aclk", "axi", base + 0x74, 4);
+ clks[IMX6ULL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_podf", base + 0x74, 4);
+ }
clks[IMX6UL_CLK_UART6_IPG] = imx_clk_gate2("uart6_ipg", "ipg", base + 0x74, 6);
clks[IMX6UL_CLK_UART6_SERIAL] = imx_clk_gate2("uart6_serial", "uart_podf", base + 0x74, 6);
clks[IMX6UL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
@@ -385,8 +430,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
clks[IMX6UL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
clks[IMX6UL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
- clks[IMX6UL_CLK_SIM1] = imx_clk_gate2("sim1", "sim_sel", base + 0x80, 6);
- clks[IMX6UL_CLK_SIM2] = imx_clk_gate2("sim2", "sim_sel", base + 0x80, 8);
+ if (clk_on_imx6ul()) {
+ clks[IMX6UL_CLK_SIM1] = imx_clk_gate2("sim1", "sim_sel", base + 0x80, 6);
+ clks[IMX6UL_CLK_SIM2] = imx_clk_gate2("sim2", "sim_sel", base + 0x80, 8);
+ }
clks[IMX6UL_CLK_EIM] = imx_clk_gate2("eim", "eim_slow_podf", base + 0x80, 10);
clks[IMX6UL_CLK_PWM8] = imx_clk_gate2("pwm8", "perclk", base + 0x80, 16);
clks[IMX6UL_CLK_UART8_IPG] = imx_clk_gate2("uart8_ipg", "ipg", base + 0x80, 14);
@@ -441,7 +488,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
}
clk_set_parent(clks[IMX6UL_CLK_CAN_SEL], clks[IMX6UL_CLK_PLL3_60M]);
- clk_set_parent(clks[IMX6UL_CLK_SIM_PRE_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
+ if (clk_on_imx6ul())
+ clk_set_parent(clks[IMX6UL_CLK_SIM_PRE_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
+ else if (clk_on_imx6ull())
+ clk_set_parent(clks[IMX6ULL_CLK_EPDC_PRE_SEL], clks[IMX6UL_CLK_PLL3_PFD2]);
clk_set_parent(clks[IMX6UL_CLK_ENFC_SEL], clks[IMX6UL_CLK_PLL2_PFD2]);
}
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 19f9b622981a..ed3a2df536ea 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
temp64 *= mfn;
do_div(temp64, mfd);
- return (parent_rate * div) + (u32)temp64;
+ return parent_rate * div + (unsigned long)temp64;
}
static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -234,6 +234,7 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long max_rate = parent_rate * 54;
u32 div;
u32 mfn, mfd = 1000000;
+ u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
if (rate > max_rate)
@@ -241,13 +242,20 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
else if (rate < min_rate)
rate = min_rate;
+ if (parent_rate <= max_mfd)
+ mfd = parent_rate;
+
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
do_div(temp64, parent_rate);
mfn = temp64;
- return parent_rate * div + parent_rate * mfn / mfd;
+ temp64 = (u64)parent_rate;
+ temp64 *= mfn;
+ do_div(temp64, mfd);
+
+ return parent_rate * div + (unsigned long)temp64;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -258,11 +266,15 @@ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long max_rate = parent_rate * 54;
u32 val, div;
u32 mfn, mfd = 1000000;
+ u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
if (rate < min_rate || rate > max_rate)
return -EINVAL;
+ if (parent_rate <= max_mfd)
+ mfd = parent_rate;
+
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 3799ff82a9b4..4afad3b96a61 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -75,6 +75,14 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate)
return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
+static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
+ u8 shift, u8 width, const char **parents, int num_parents)
+{
+ return clk_register_mux(NULL, name, parents, num_parents,
+ CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
+ shift, width, CLK_MUX_READ_ONLY, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_fixed_factor(const char *name,
const char *parent, unsigned int mult, unsigned int div)
{
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index a26ba2184454..e7e840fb74ea 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -154,7 +154,7 @@ out:
}
/**
- * _of_clk_init - PLL initialisation via DT
+ * _of_pll_clk_init - PLL initialisation via DT
* @node: device tree node for this clock
* @pllctrl: If true, lower 6 bits of multiplier is in pllm register of
* pll controller, else it is in the control register0(bit 11-6)
@@ -235,7 +235,7 @@ CLK_OF_DECLARE(keystone_pll_clock, "ti,keystone,pll-clock",
of_keystone_pll_clk_init);
/**
- * of_keystone_pll_main_clk_init - Main PLL initialisation DT wrapper
+ * of_keystone_main_pll_clk_init - Main PLL initialisation DT wrapper
* @node: device tree node for this clock
*/
static void __init of_keystone_main_pll_clk_init(struct device_node *node)
@@ -267,25 +267,30 @@ static void __init of_pll_div_clk_init(struct device_node *node)
parent_name = of_clk_get_parent_name(node, 0);
if (!parent_name) {
pr_err("%s: missing parent clock\n", __func__);
+ iounmap(reg);
return;
}
if (of_property_read_u32(node, "bit-shift", &shift)) {
pr_err("%s: missing 'shift' property\n", __func__);
+ iounmap(reg);
return;
}
if (of_property_read_u32(node, "bit-mask", &mask)) {
pr_err("%s: missing 'bit-mask' property\n", __func__);
+ iounmap(reg);
return;
}
clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
mask, 0, NULL);
- if (clk)
+ if (clk) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
- else
+ } else {
pr_err("%s: error registering divider %s\n", __func__, clk_name);
+ iounmap(reg);
+ }
}
CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index f042bd2a6a99..0bd631a41f6a 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -6,6 +6,49 @@ config COMMON_CLK_MEDIATEK
---help---
Mediatek SoCs' clock support.
+config COMMON_CLK_MT2701
+ bool "Clock driver for Mediatek MT2701"
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ ---help---
+ This driver supports Mediatek MT2701 basic clocks.
+
+config COMMON_CLK_MT2701_MMSYS
+ bool "Clock driver for Mediatek MT2701 mmsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 mmsys clocks.
+
+config COMMON_CLK_MT2701_IMGSYS
+ bool "Clock driver for Mediatek MT2701 imgsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 imgsys clocks.
+
+config COMMON_CLK_MT2701_VDECSYS
+ bool "Clock driver for Mediatek MT2701 vdecsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 vdecsys clocks.
+
+config COMMON_CLK_MT2701_HIFSYS
+ bool "Clock driver for Mediatek MT2701 hifsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 hifsys clocks.
+
+config COMMON_CLK_MT2701_ETHSYS
+ bool "Clock driver for Mediatek MT2701 ethsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 ethsys clocks.
+
+config COMMON_CLK_MT2701_BDPSYS
+ bool "Clock driver for Mediatek MT2701 bdpsys"
+ select COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 bdpsys clocks.
+
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 32e7222e7305..19ae7ef79b57 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,4 +1,11 @@
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
+obj-$(CONFIG_COMMON_CLK_MT2701) += clk-mt2701.o
+obj-$(CONFIG_COMMON_CLK_MT2701_BDPSYS) += clk-mt2701-bdp.o
+obj-$(CONFIG_COMMON_CLK_MT2701_ETHSYS) += clk-mt2701-eth.o
+obj-$(CONFIG_COMMON_CLK_MT2701_HIFSYS) += clk-mt2701-hif.o
+obj-$(CONFIG_COMMON_CLK_MT2701_IMGSYS) += clk-mt2701-img.o
+obj-$(CONFIG_COMMON_CLK_MT2701_MMSYS) += clk-mt2701-mm.o
+obj-$(CONFIG_COMMON_CLK_MT2701_VDECSYS) += clk-mt2701-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index d8787bf444eb..934bf0e45e26 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -61,6 +61,22 @@ static void mtk_cg_clr_bit(struct clk_hw *hw)
regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
}
+static void mtk_cg_set_bit_no_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
+ u32 cgbit = BIT(cg->bit);
+
+ regmap_update_bits(cg->regmap, cg->sta_ofs, cgbit, cgbit);
+}
+
+static void mtk_cg_clr_bit_no_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
+ u32 cgbit = BIT(cg->bit);
+
+ regmap_update_bits(cg->regmap, cg->sta_ofs, cgbit, 0);
+}
+
static int mtk_cg_enable(struct clk_hw *hw)
{
mtk_cg_clr_bit(hw);
@@ -85,6 +101,30 @@ static void mtk_cg_disable_inv(struct clk_hw *hw)
mtk_cg_clr_bit(hw);
}
+static int mtk_cg_enable_no_setclr(struct clk_hw *hw)
+{
+ mtk_cg_clr_bit_no_setclr(hw);
+
+ return 0;
+}
+
+static void mtk_cg_disable_no_setclr(struct clk_hw *hw)
+{
+ mtk_cg_set_bit_no_setclr(hw);
+}
+
+static int mtk_cg_enable_inv_no_setclr(struct clk_hw *hw)
+{
+ mtk_cg_set_bit_no_setclr(hw);
+
+ return 0;
+}
+
+static void mtk_cg_disable_inv_no_setclr(struct clk_hw *hw)
+{
+ mtk_cg_clr_bit_no_setclr(hw);
+}
+
const struct clk_ops mtk_clk_gate_ops_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable,
@@ -97,6 +137,18 @@ const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
.disable = mtk_cg_disable_inv,
};
+const struct clk_ops mtk_clk_gate_ops_no_setclr = {
+ .is_enabled = mtk_cg_bit_is_cleared,
+ .enable = mtk_cg_enable_no_setclr,
+ .disable = mtk_cg_disable_no_setclr,
+};
+
+const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
+ .is_enabled = mtk_cg_bit_is_set,
+ .enable = mtk_cg_enable_inv_no_setclr,
+ .disable = mtk_cg_disable_inv_no_setclr,
+};
+
struct clk *mtk_clk_register_gate(
const char *name,
const char *parent_name,
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index b1821603b887..72ef89b3ad7b 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -36,6 +36,8 @@ static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
extern const struct clk_ops mtk_clk_gate_ops_setclr;
extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
+extern const struct clk_ops mtk_clk_gate_ops_no_setclr;
+extern const struct clk_ops mtk_clk_gate_ops_no_setclr_inv;
struct clk *mtk_clk_register_gate(
const char *name,
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
new file mode 100644
index 000000000000..fe4964d05b5f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs bdp0_cg_regs = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs bdp1_cg_regs = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+#define GATE_BDP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &bdp0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_BDP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &bdp1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate bdp_clks[] = {
+ GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
+ GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
+ GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
+ GATE_BDP0(CLK_BDP_WR_VDI_PXL, "wr_vdi_pxl", "hdmi_0_deep340m", 3),
+ GATE_BDP0(CLK_BDP_WR_VDI_DRAM, "wr_vdi_dram", "mm_sel", 4),
+ GATE_BDP0(CLK_BDP_WR_B, "wr_bclk", "mm_sel", 5),
+ GATE_BDP0(CLK_BDP_DGI_IN, "dgi_in", "dpi1_sel", 6),
+ GATE_BDP0(CLK_BDP_DGI_OUT, "dgi_out", "dpi1_sel", 7),
+ GATE_BDP0(CLK_BDP_FMT_MAST_27, "fmt_mast_27", "dpi1_sel", 8),
+ GATE_BDP0(CLK_BDP_FMT_B, "fmt_bclk", "mm_sel", 9),
+ GATE_BDP0(CLK_BDP_OSD_B, "osd_bclk", "mm_sel", 10),
+ GATE_BDP0(CLK_BDP_OSD_DRAM, "osd_dram", "mm_sel", 11),
+ GATE_BDP0(CLK_BDP_OSD_AGENT, "osd_agent", "osd_sel", 12),
+ GATE_BDP0(CLK_BDP_OSD_PXL, "osd_pxl", "dpi1_sel", 13),
+ GATE_BDP0(CLK_BDP_RLE_B, "rle_bclk", "mm_sel", 14),
+ GATE_BDP0(CLK_BDP_RLE_AGENT, "rle_agent", "mm_sel", 15),
+ GATE_BDP0(CLK_BDP_RLE_DRAM, "rle_dram", "mm_sel", 16),
+ GATE_BDP0(CLK_BDP_F27M, "f27m", "di_sel", 17),
+ GATE_BDP0(CLK_BDP_F27M_VDOUT, "f27m_vdout", "di_sel", 18),
+ GATE_BDP0(CLK_BDP_F27_74_74, "f27_74_74", "di_sel", 19),
+ GATE_BDP0(CLK_BDP_F2FS, "f2fs", "di_sel", 20),
+ GATE_BDP0(CLK_BDP_F2FS74_148, "f2fs74_148", "di_sel", 21),
+ GATE_BDP0(CLK_BDP_FB, "fbclk", "mm_sel", 22),
+ GATE_BDP0(CLK_BDP_VDO_DRAM, "vdo_dram", "mm_sel", 23),
+ GATE_BDP0(CLK_BDP_VDO_2FS, "vdo_2fs", "di_sel", 24),
+ GATE_BDP0(CLK_BDP_VDO_B, "vdo_bclk", "mm_sel", 25),
+ GATE_BDP0(CLK_BDP_WR_DI_PXL, "wr_di_pxl", "di_sel", 26),
+ GATE_BDP0(CLK_BDP_WR_DI_DRAM, "wr_di_dram", "mm_sel", 27),
+ GATE_BDP0(CLK_BDP_WR_DI_B, "wr_di_bclk", "mm_sel", 28),
+ GATE_BDP0(CLK_BDP_NR_PXL, "nr_pxl", "nr_sel", 29),
+ GATE_BDP0(CLK_BDP_NR_DRAM, "nr_dram", "mm_sel", 30),
+ GATE_BDP0(CLK_BDP_NR_B, "nr_bclk", "mm_sel", 31),
+ GATE_BDP1(CLK_BDP_RX_F, "rx_fclk", "hadds2_fbclk", 0),
+ GATE_BDP1(CLK_BDP_RX_X, "rx_xclk", "clk26m", 1),
+ GATE_BDP1(CLK_BDP_RXPDT, "rxpdtclk", "hdmi_0_pix340m", 2),
+ GATE_BDP1(CLK_BDP_RX_CSCL_N, "rx_cscl_n", "clk26m", 3),
+ GATE_BDP1(CLK_BDP_RX_CSCL, "rx_cscl", "clk26m", 4),
+ GATE_BDP1(CLK_BDP_RX_DDCSCL_N, "rx_ddcscl_n", "hdmi_scl_rx", 5),
+ GATE_BDP1(CLK_BDP_RX_DDCSCL, "rx_ddcscl", "hdmi_scl_rx", 6),
+ GATE_BDP1(CLK_BDP_RX_VCO, "rx_vcoclk", "hadds2pll_294m", 7),
+ GATE_BDP1(CLK_BDP_RX_DP, "rx_dpclk", "hdmi_0_pll340m", 8),
+ GATE_BDP1(CLK_BDP_RX_P, "rx_pclk", "hdmi_0_pll340m", 9),
+ GATE_BDP1(CLK_BDP_RX_M, "rx_mclk", "hadds2pll_294m", 10),
+ GATE_BDP1(CLK_BDP_RX_PLL, "rx_pllclk", "hdmi_0_pix340m", 11),
+ GATE_BDP1(CLK_BDP_BRG_RT_B, "brg_rt_bclk", "mm_sel", 12),
+ GATE_BDP1(CLK_BDP_BRG_RT_DRAM, "brg_rt_dram", "mm_sel", 13),
+ GATE_BDP1(CLK_BDP_LARBRT_DRAM, "larbrt_dram", "mm_sel", 14),
+ GATE_BDP1(CLK_BDP_TMDS_SYN, "tmds_syn", "hdmi_0_pll340m", 15),
+ GATE_BDP1(CLK_BDP_HDMI_MON, "hdmi_mon", "hdmi_0_pll340m", 16),
+};
+
+static const struct of_device_id of_match_clk_mt2701_bdp[] = {
+ { .compatible = "mediatek,mt2701-bdpsys", },
+ {}
+};
+
+static int clk_mt2701_bdp_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_BDP_NR);
+
+ mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_bdp_drv = {
+ .probe = clk_mt2701_bdp_probe,
+ .driver = {
+ .name = "clk-mt2701-bdp",
+ .of_match_table = of_match_clk_mt2701_bdp,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_bdp_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
new file mode 100644
index 000000000000..877be8715afa
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs eth_cg_regs = {
+ .sta_ofs = 0x0030,
+};
+
+#define GATE_ETH(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &eth_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate eth_clks[] = {
+ GATE_ETH(CLK_ETHSYS_HSDMA, "hsdma_clk", "ethif_sel", 5),
+ GATE_ETH(CLK_ETHSYS_ESW, "esw_clk", "ethpll_500m_ck", 6),
+ GATE_ETH(CLK_ETHSYS_GP2, "gp2_clk", "trgpll", 7),
+ GATE_ETH(CLK_ETHSYS_GP1, "gp1_clk", "ethpll_500m_ck", 8),
+ GATE_ETH(CLK_ETHSYS_PCM, "pcm_clk", "ethif_sel", 11),
+ GATE_ETH(CLK_ETHSYS_GDMA, "gdma_clk", "ethif_sel", 14),
+ GATE_ETH(CLK_ETHSYS_I2S, "i2s_clk", "ethif_sel", 17),
+ GATE_ETH(CLK_ETHSYS_CRYPTO, "crypto_clk", "ethif_sel", 29),
+};
+
+static const struct of_device_id of_match_clk_mt2701_eth[] = {
+ { .compatible = "mediatek,mt2701-ethsys", },
+ {}
+};
+
+static int clk_mt2701_eth_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_ETHSYS_NR);
+
+ mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_eth_drv = {
+ .probe = clk_mt2701_eth_probe,
+ .driver = {
+ .name = "clk-mt2701-eth",
+ .of_match_table = of_match_clk_mt2701_eth,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
new file mode 100644
index 000000000000..18f3723be3e8
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs hif_cg_regs = {
+ .sta_ofs = 0x0030,
+};
+
+#define GATE_HIF(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &hif_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate hif_clks[] = {
+ GATE_HIF(CLK_HIFSYS_USB0PHY, "usb0_phy_clk", "ethpll_500m_ck", 21),
+ GATE_HIF(CLK_HIFSYS_USB1PHY, "usb1_phy_clk", "ethpll_500m_ck", 22),
+ GATE_HIF(CLK_HIFSYS_PCIE0, "pcie0_clk", "ethpll_500m_ck", 24),
+ GATE_HIF(CLK_HIFSYS_PCIE1, "pcie1_clk", "ethpll_500m_ck", 25),
+ GATE_HIF(CLK_HIFSYS_PCIE2, "pcie2_clk", "ethpll_500m_ck", 26),
+};
+
+static const struct of_device_id of_match_clk_mt2701_hif[] = {
+ { .compatible = "mediatek,mt2701-hifsys", },
+ {}
+};
+
+static int clk_mt2701_hif_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_HIFSYS_NR);
+
+ mtk_clk_register_gates(node, hif_clks, ARRAY_SIZE(hif_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r) {
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+ return r;
+ }
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt2701_hif_drv = {
+ .probe = clk_mt2701_hif_probe,
+ .driver = {
+ .name = "clk-mt2701-hif",
+ .of_match_table = of_match_clk_mt2701_hif,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_hif_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
new file mode 100644
index 000000000000..b7441c98bda8
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x0004,
+ .clr_ofs = 0x0008,
+ .sta_ofs = 0x0000,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &img_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
+ GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
+ GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
+ GATE_IMG(CLK_IMG_JPGDEC, "img_jpgdec", "mm_sel", 6),
+ GATE_IMG(CLK_IMG_VENC_LT, "img_venc_lt", "mm_sel", 8),
+ GATE_IMG(CLK_IMG_VENC, "img_venc", "mm_sel", 9),
+};
+
+static const struct of_device_id of_match_clk_mt2701_img[] = {
+ { .compatible = "mediatek,mt2701-imgsys", },
+ {}
+};
+
+static int clk_mt2701_img_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_img_drv = {
+ .probe = clk_mt2701_img_probe,
+ .driver = {
+ .name = "clk-mt2701-img",
+ .of_match_table = of_match_clk_mt2701_img,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
new file mode 100644
index 000000000000..fe1f85072fc5
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs disp0_cg_regs = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs disp1_cg_regs = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+#define GATE_DISP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &disp0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_DISP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &disp1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
+ GATE_DISP0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_DISP0(CLK_MM_CMDQ, "mm_cmdq", "mm_sel", 2),
+ GATE_DISP0(CLK_MM_MUTEX, "mm_mutex", "mm_sel", 3),
+ GATE_DISP0(CLK_MM_DISP_COLOR, "mm_disp_color", "mm_sel", 4),
+ GATE_DISP0(CLK_MM_DISP_BLS, "mm_disp_bls", "mm_sel", 5),
+ GATE_DISP0(CLK_MM_DISP_WDMA, "mm_disp_wdma", "mm_sel", 6),
+ GATE_DISP0(CLK_MM_DISP_RDMA, "mm_disp_rdma", "mm_sel", 7),
+ GATE_DISP0(CLK_MM_DISP_OVL, "mm_disp_ovl", "mm_sel", 8),
+ GATE_DISP0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "mm_sel", 9),
+ GATE_DISP0(CLK_MM_MDP_WROT, "mm_mdp_wrot", "mm_sel", 10),
+ GATE_DISP0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_DISP0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 12),
+ GATE_DISP0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 13),
+ GATE_DISP0(CLK_MM_MDP_RDMA, "mm_mdp_rdma", "mm_sel", 14),
+ GATE_DISP0(CLK_MM_MDP_BLS_26M, "mm_mdp_bls_26m", "pwm_sel", 15),
+ GATE_DISP0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 16),
+ GATE_DISP0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 17),
+ GATE_DISP0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "rtc_sel", 18),
+ GATE_DISP0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_DISP0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 20),
+ GATE_DISP1(CLK_MM_DSI_ENGINE, "mm_dsi_eng", "mm_sel", 0),
+ GATE_DISP1(CLK_MM_DSI_DIG, "mm_dsi_dig", "dsi0_lntc_dsi", 1),
+ GATE_DISP1(CLK_MM_DPI_DIGL, "mm_dpi_digl", "dpi0_sel", 2),
+ GATE_DISP1(CLK_MM_DPI_ENGINE, "mm_dpi_eng", "mm_sel", 3),
+ GATE_DISP1(CLK_MM_DPI1_DIGL, "mm_dpi1_digl", "dpi1_sel", 4),
+ GATE_DISP1(CLK_MM_DPI1_ENGINE, "mm_dpi1_eng", "mm_sel", 5),
+ GATE_DISP1(CLK_MM_TVE_OUTPUT, "mm_tve_output", "tve_sel", 6),
+ GATE_DISP1(CLK_MM_TVE_INPUT, "mm_tve_input", "dpi0_sel", 7),
+ GATE_DISP1(CLK_MM_HDMI_PIXEL, "mm_hdmi_pixel", "dpi1_sel", 8),
+ GATE_DISP1(CLK_MM_HDMI_PLL, "mm_hdmi_pll", "hdmi_sel", 9),
+ GATE_DISP1(CLK_MM_HDMI_AUDIO, "mm_hdmi_audio", "apll_sel", 10),
+ GATE_DISP1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll_sel", 11),
+ GATE_DISP1(CLK_MM_TVE_FMM, "mm_tve_fmm", "mm_sel", 14),
+};
+
+static const struct of_device_id of_match_clk_mt2701_mm[] = {
+ { .compatible = "mediatek,mt2701-mmsys", },
+ {}
+};
+
+static int clk_mt2701_mm_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR);
+
+ mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_mm_drv = {
+ .probe = clk_mt2701_mm_probe,
+ .driver = {
+ .name = "clk-mt2701-mm",
+ .of_match_table = of_match_clk_mt2701_mm,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
new file mode 100644
index 000000000000..d3c0fc9d6f02
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x0000,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x0008,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate vdec_clks[] = {
+ GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
+ GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
+};
+
+static const struct of_device_id of_match_clk_mt2701_vdec[] = {
+ { .compatible = "mediatek,mt2701-vdecsys", },
+ {}
+};
+
+static int clk_mt2701_vdec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDEC_NR);
+
+ mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_vdec_drv = {
+ .probe = clk_mt2701_vdec_probe,
+ .driver = {
+ .name = "clk-mt2701-vdec",
+ .of_match_table = of_match_clk_mt2701_vdec,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
new file mode 100644
index 000000000000..6f26e6a37a6b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -0,0 +1,1035 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+/*
+ * For some clocks, we don't care what their actual rates are. And these
+ * clocks may change their rate on different products or different scenarios.
+ * So we model these clocks' rate as 0, to denote it's not an actual rate.
+ */
+#define DUMMY_RATE 0
+
+static DEFINE_SPINLOCK(mt2701_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_DPI, "dpi_ck", "clk26m",
+ 108 * MHZ),
+ FIXED_CLK(CLK_TOP_DMPLL, "dmpll_ck", "clk26m",
+ 400 * MHZ),
+ FIXED_CLK(CLK_TOP_VENCPLL, "vencpll_ck", "clk26m",
+ 295750000),
+ FIXED_CLK(CLK_TOP_HDMI_0_PIX340M, "hdmi_0_pix340m", "clk26m",
+ 340 * MHZ),
+ FIXED_CLK(CLK_TOP_HDMI_0_DEEP340M, "hdmi_0_deep340m", "clk26m",
+ 340 * MHZ),
+ FIXED_CLK(CLK_TOP_HDMI_0_PLL340M, "hdmi_0_pll340m", "clk26m",
+ 340 * MHZ),
+ FIXED_CLK(CLK_TOP_HDMITX_CLKDIG_CTS, "hdmitx_dig_cts", "clk26m",
+ 300 * MHZ),
+ FIXED_CLK(CLK_TOP_HADDS2_FB, "hadds2_fbclk", "clk26m",
+ 27 * MHZ),
+ FIXED_CLK(CLK_TOP_WBG_DIG_416M, "wbg_dig_ck_416m", "clk26m",
+ 416 * MHZ),
+ FIXED_CLK(CLK_TOP_DSI0_LNTC_DSI, "dsi0_lntc_dsi", "clk26m",
+ 143 * MHZ),
+ FIXED_CLK(CLK_TOP_HDMI_SCL_RX, "hdmi_scl_rx", "clk26m",
+ 27 * MHZ),
+ FIXED_CLK(CLK_TOP_AUD_EXT1, "aud_ext1", "clk26m",
+ DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_AUD_EXT2, "aud_ext2", "clk26m",
+ DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_NFI1X_PAD, "nfi1x_pad", "clk26m",
+ DUMMY_RATE),
+};
+
+static const struct mtk_fixed_factor top_fixed_divs[] = {
+ FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "syspll_d3", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1, 4),
+
+ FACTOR(CLK_TOP_UNIVPLL, "univpll_ck", "univpll", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll", 1, 26),
+ FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univpll", 1, 52),
+ FACTOR(CLK_TOP_UNIVPLL_D108, "univpll_d108", "univpll", 1, 108),
+ FACTOR(CLK_TOP_USB_PHY48M, "usb_phy48m_ck", "univpll", 1, 26),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_d2", 1, 8),
+ FACTOR(CLK_TOP_8BDAC, "8bdac_ck", "univpll_d2", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL2_D16, "univpll2_d16", "univpll_d3", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL2_D32, "univpll2_d32", "univpll_d3", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univpll_d5", 1, 8),
+
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1, 8),
+
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+
+ FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "dmpll_ck", 1, 2),
+ FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "dmpll_ck", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_X2, "dmpll_x2", "dmpll_ck", 1, 1),
+
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4),
+
+ FACTOR(CLK_TOP_VDECPLL, "vdecpll_ck", "vdecpll", 1, 1),
+ FACTOR(CLK_TOP_TVD2PLL, "tvd2pll_ck", "tvd2pll", 1, 1),
+ FACTOR(CLK_TOP_TVD2PLL_D2, "tvd2pll_d2", "tvd2pll", 1, 2),
+
+ FACTOR(CLK_TOP_MIPIPLL, "mipipll", "dpi_ck", 1, 1),
+ FACTOR(CLK_TOP_MIPIPLL_D2, "mipipll_d2", "dpi_ck", 1, 2),
+ FACTOR(CLK_TOP_MIPIPLL_D4, "mipipll_d4", "dpi_ck", 1, 4),
+
+ FACTOR(CLK_TOP_HDMIPLL, "hdmipll_ck", "hdmitx_dig_cts", 1, 1),
+ FACTOR(CLK_TOP_HDMIPLL_D2, "hdmipll_d2", "hdmitx_dig_cts", 1, 2),
+ FACTOR(CLK_TOP_HDMIPLL_D3, "hdmipll_d3", "hdmitx_dig_cts", 1, 3),
+
+ FACTOR(CLK_TOP_ARMPLL_1P3G, "armpll_1p3g_ck", "armpll", 1, 1),
+
+ FACTOR(CLK_TOP_AUDPLL, "audpll", "audpll_sel", 1, 1),
+ FACTOR(CLK_TOP_AUDPLL_D4, "audpll_d4", "audpll_sel", 1, 4),
+ FACTOR(CLK_TOP_AUDPLL_D8, "audpll_d8", "audpll_sel", 1, 8),
+ FACTOR(CLK_TOP_AUDPLL_D16, "audpll_d16", "audpll_sel", 1, 16),
+ FACTOR(CLK_TOP_AUDPLL_D24, "audpll_d24", "audpll_sel", 1, 24),
+
+ FACTOR(CLK_TOP_AUD1PLL_98M, "aud1pll_98m_ck", "aud1pll", 1, 3),
+ FACTOR(CLK_TOP_AUD2PLL_90M, "aud2pll_90m_ck", "aud2pll", 1, 3),
+ FACTOR(CLK_TOP_HADDS2PLL_98M, "hadds2pll_98m", "hadds2pll", 1, 3),
+ FACTOR(CLK_TOP_HADDS2PLL_294M, "hadds2pll_294m", "hadds2pll", 1, 1),
+ FACTOR(CLK_TOP_ETHPLL_500M, "ethpll_500m_ck", "ethpll", 1, 1),
+ FACTOR(CLK_TOP_CLK26M_D8, "clk26m_d8", "clk26m", 1, 8),
+ FACTOR(CLK_TOP_32K_INTERNAL, "32k_internal", "clk26m", 1, 793),
+ FACTOR(CLK_TOP_32K_EXTERNAL, "32k_external", "rtc32k", 1, 1),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "mmpll_d2",
+ "dmpll_d2"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "syspll1_d2",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_ck"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4",
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "vdecpll_ck",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "vencpll_ck",
+ "msdcpll_d2",
+ "mmpll_d2"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "dmpll_x2_ck",
+ "msdcpll_ck",
+ "clk26m",
+ "syspll_d3",
+ "univpll_d3",
+ "univpll1_d2"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d2",
+ "syspll3_d2",
+ "syspll3_d4",
+ "msdcpll_d2",
+ "mmpll_d2"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8"
+};
+
+static const char * const usb20_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const msdc30_parents[] = {
+ "clk26m",
+ "msdcpll_d2",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll2_d4"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll3_d2",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d4"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll2_d4",
+ "syspll4_d2",
+ "syspll3_d4",
+ "syspll2_d8",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const dpi0_parents[] = {
+ "clk26m",
+ "mipipll",
+ "mipipll_d2",
+ "mipipll_d4",
+ "clk26m",
+ "tvdpll_ck",
+ "tvdpll_d2",
+ "tvdpll_d4"
+};
+
+static const char * const dpi1_parents[] = {
+ "clk26m",
+ "tvdpll_ck",
+ "tvdpll_d2",
+ "tvdpll_d4"
+};
+
+static const char * const tve_parents[] = {
+ "clk26m",
+ "mipipll",
+ "mipipll_d2",
+ "mipipll_d4",
+ "clk26m",
+ "tvdpll_ck",
+ "tvdpll_d2",
+ "tvdpll_d4"
+};
+
+static const char * const hdmi_parents[] = {
+ "clk26m",
+ "hdmipll_ck",
+ "hdmipll_d2",
+ "hdmipll_d3"
+};
+
+static const char * const apll_parents[] = {
+ "clk26m",
+ "audpll",
+ "audpll_d4",
+ "audpll_d8",
+ "audpll_d16",
+ "audpll_d24",
+ "clk26m",
+ "clk26m"
+};
+
+static const char * const rtc_parents[] = {
+ "32k_internal",
+ "32k_external",
+ "clk26m",
+ "univpll3_d8"
+};
+
+static const char * const nfi2x_parents[] = {
+ "clk26m",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll3_d2",
+ "syspll2_d4",
+ "univpll3_d4",
+ "syspll4_d4",
+ "clk26m"
+};
+
+static const char * const emmc_hclk_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll1_d4",
+ "syspll2_d2"
+};
+
+static const char * const flash_parents[] = {
+ "clk26m_d8",
+ "clk26m",
+ "syspll2_d8",
+ "syspll3_d4",
+ "univpll3_d4",
+ "syspll4_d2",
+ "syspll2_d4",
+ "univpll2_d4"
+};
+
+static const char * const di_parents[] = {
+ "clk26m",
+ "tvd2pll_ck",
+ "tvd2pll_d2",
+ "clk26m"
+};
+
+static const char * const nr_osd_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "syspll1_d2",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_ck"
+};
+
+static const char * const hdmirx_bist_parents[] = {
+ "clk26m",
+ "syspll_d3",
+ "clk26m",
+ "syspll1_d16",
+ "syspll4_d2",
+ "syspll1_d4",
+ "vencpll_ck",
+ "clk26m"
+};
+
+static const char * const intdir_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "syspll_d2",
+ "univpll_d2"
+};
+
+static const char * const asm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll2_d2",
+ "syspll_d5"
+};
+
+static const char * const ms_card_parents[] = {
+ "clk26m",
+ "univpll3_d8",
+ "syspll4_d4"
+};
+
+static const char * const ethif_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "dmpll_ck",
+ "dmpll_d2"
+};
+
+static const char * const hdmirx_parents[] = {
+ "clk26m",
+ "univpll_d52"
+};
+
+static const char * const cmsys_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll1_d2",
+ "univpll_d5",
+ "syspll_d5",
+ "syspll2_d2",
+ "syspll1_d4",
+ "syspll3_d2",
+ "syspll2_d4",
+ "syspll1_d8",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m"
+};
+
+static const char * const clk_8bdac_parents[] = {
+ "32k_internal",
+ "8bdac_ck",
+ "clk26m",
+ "clk26m"
+};
+
+static const char * const aud2dvd_parents[] = {
+ "a1sys_hp_ck",
+ "a2sys_hp_ck"
+};
+
+static const char * const padmclk_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll_d52",
+ "univpll_d108",
+ "univpll2_d8",
+ "univpll2_d16",
+ "univpll2_d32"
+};
+
+static const char * const aud_mux_parents[] = {
+ "clk26m",
+ "aud1pll_98m_ck",
+ "aud2pll_90m_ck",
+ "hadds2pll_98m",
+ "audio_ext1_ck",
+ "audio_ext2_ck"
+};
+
+static const char * const aud_src_parents[] = {
+ "aud_mux1_sel",
+ "aud_mux2_sel"
+};
+
+static const char * const cpu_parents[] = {
+ "clk26m",
+ "armpll",
+ "mainpll",
+ "mmpll"
+};
+
+static const struct mtk_composite top_muxes[] = {
+ MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x0040, 0, 3, 7, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ 0x0040, 8, 1, 15, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel",
+ ddrphycfg_parents, 0x0040, 16, 1, 23, CLK_IS_CRITICAL),
+ MUX_GATE(CLK_TOP_MM_SEL, "mm_sel", mm_parents,
+ 0x0040, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+ 0x0050, 0, 2, 7),
+ MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents,
+ 0x0050, 8, 4, 15),
+ MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents,
+ 0x0050, 16, 3, 23),
+ MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents,
+ 0x0050, 24, 3, 31),
+ MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents,
+ 0x0060, 0, 1, 7),
+
+ MUX_GATE(CLK_TOP_SPI0_SEL, "spi0_sel", spi_parents,
+ 0x0060, 8, 3, 15),
+ MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents,
+ 0x0060, 16, 2, 23),
+ MUX_GATE(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_parents,
+ 0x0060, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_parents,
+ 0x0070, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_parents,
+ 0x0070, 8, 3, 15),
+ MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", msdc30_parents,
+ 0x0070, 16, 1, 23),
+ MUX_GATE(CLK_TOP_AUDINTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x0070, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+ 0x0080, 0, 4, 7),
+ MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", scp_parents,
+ 0x0080, 8, 2, 15),
+ MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents,
+ 0x0080, 16, 3, 23),
+ MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents,
+ 0x0080, 24, 2, 31),
+
+ MUX_GATE(CLK_TOP_TVE_SEL, "tve_sel", tve_parents,
+ 0x0090, 0, 3, 7),
+ MUX_GATE(CLK_TOP_HDMI_SEL, "hdmi_sel", hdmi_parents,
+ 0x0090, 8, 2, 15),
+ MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel", apll_parents,
+ 0x0090, 16, 3, 23),
+
+ MUX_GATE_FLAGS(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents,
+ 0x00A0, 0, 2, 7, CLK_IS_CRITICAL),
+ MUX_GATE(CLK_TOP_NFI2X_SEL, "nfi2x_sel", nfi2x_parents,
+ 0x00A0, 8, 3, 15),
+ MUX_GATE(CLK_TOP_EMMC_HCLK_SEL, "emmc_hclk_sel", emmc_hclk_parents,
+ 0x00A0, 24, 2, 31),
+
+ MUX_GATE(CLK_TOP_FLASH_SEL, "flash_sel", flash_parents,
+ 0x00B0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_DI_SEL, "di_sel", di_parents,
+ 0x00B0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_NR_SEL, "nr_sel", nr_osd_parents,
+ 0x00B0, 16, 3, 23),
+ MUX_GATE(CLK_TOP_OSD_SEL, "osd_sel", nr_osd_parents,
+ 0x00B0, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_HDMIRX_BIST_SEL, "hdmirx_bist_sel",
+ hdmirx_bist_parents, 0x00C0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_INTDIR_SEL, "intdir_sel", intdir_parents,
+ 0x00C0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_ASM_I_SEL, "asm_i_sel", asm_parents,
+ 0x00C0, 16, 2, 23),
+ MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel", asm_parents,
+ 0x00C0, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel", asm_parents,
+ 0x00D0, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MS_CARD_SEL, "ms_card_sel", ms_card_parents,
+ 0x00D0, 16, 2, 23),
+ MUX_GATE(CLK_TOP_ETHIF_SEL, "ethif_sel", ethif_parents,
+ 0x00D0, 24, 3, 31),
+
+ MUX_GATE(CLK_TOP_HDMIRX26_24_SEL, "hdmirx26_24_sel", hdmirx_parents,
+ 0x00E0, 0, 1, 7),
+ MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_parents,
+ 0x00E0, 8, 3, 15),
+ MUX_GATE(CLK_TOP_CMSYS_SEL, "cmsys_sel", cmsys_parents,
+ 0x00E0, 16, 4, 23),
+
+ MUX_GATE(CLK_TOP_SPI1_SEL, "spi2_sel", spi_parents,
+ 0x00E0, 24, 3, 31),
+ MUX_GATE(CLK_TOP_SPI2_SEL, "spi1_sel", spi_parents,
+ 0x00F0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_8BDAC_SEL, "8bdac_sel", clk_8bdac_parents,
+ 0x00F0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_AUD2DVD_SEL, "aud2dvd_sel", aud2dvd_parents,
+ 0x00F0, 16, 1, 23),
+
+ MUX(CLK_TOP_PADMCLK_SEL, "padmclk_sel", padmclk_parents,
+ 0x0100, 0, 3),
+
+ MUX(CLK_TOP_AUD_MUX1_SEL, "aud_mux1_sel", aud_mux_parents,
+ 0x012c, 0, 3),
+ MUX(CLK_TOP_AUD_MUX2_SEL, "aud_mux2_sel", aud_mux_parents,
+ 0x012c, 3, 3),
+ MUX(CLK_TOP_AUDPLL_MUX_SEL, "audpll_sel", aud_mux_parents,
+ 0x012c, 6, 3),
+ MUX_GATE(CLK_TOP_AUD_K1_SRC_SEL, "aud_k1_src_sel", aud_src_parents,
+ 0x012c, 15, 1, 23),
+ MUX_GATE(CLK_TOP_AUD_K2_SRC_SEL, "aud_k2_src_sel", aud_src_parents,
+ 0x012c, 16, 1, 24),
+ MUX_GATE(CLK_TOP_AUD_K3_SRC_SEL, "aud_k3_src_sel", aud_src_parents,
+ 0x012c, 17, 1, 25),
+ MUX_GATE(CLK_TOP_AUD_K4_SRC_SEL, "aud_k4_src_sel", aud_src_parents,
+ 0x012c, 18, 1, 26),
+ MUX_GATE(CLK_TOP_AUD_K5_SRC_SEL, "aud_k5_src_sel", aud_src_parents,
+ 0x012c, 19, 1, 27),
+ MUX_GATE(CLK_TOP_AUD_K6_SRC_SEL, "aud_k6_src_sel", aud_src_parents,
+ 0x012c, 20, 1, 28),
+};
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ(CLK_TOP_AUD_EXTCK1_DIV, "audio_ext1_ck", "aud_ext1",
+ 0x0120, 0, 8),
+ DIV_ADJ(CLK_TOP_AUD_EXTCK2_DIV, "audio_ext2_ck", "aud_ext2",
+ 0x0120, 8, 8),
+ DIV_ADJ(CLK_TOP_AUD_MUX1_DIV, "aud_mux1_div", "aud_mux1_sel",
+ 0x0120, 16, 8),
+ DIV_ADJ(CLK_TOP_AUD_MUX2_DIV, "aud_mux2_div", "aud_mux2_sel",
+ 0x0120, 24, 8),
+ DIV_ADJ(CLK_TOP_AUD_K1_SRC_DIV, "aud_k1_src_div", "aud_k1_src_sel",
+ 0x0124, 0, 8),
+ DIV_ADJ(CLK_TOP_AUD_K2_SRC_DIV, "aud_k2_src_div", "aud_k2_src_sel",
+ 0x0124, 8, 8),
+ DIV_ADJ(CLK_TOP_AUD_K3_SRC_DIV, "aud_k3_src_div", "aud_k3_src_sel",
+ 0x0124, 16, 8),
+ DIV_ADJ(CLK_TOP_AUD_K4_SRC_DIV, "aud_k4_src_div", "aud_k4_src_sel",
+ 0x0124, 24, 8),
+ DIV_ADJ(CLK_TOP_AUD_K5_SRC_DIV, "aud_k5_src_div", "aud_k5_src_sel",
+ 0x0128, 0, 8),
+ DIV_ADJ(CLK_TOP_AUD_K6_SRC_DIV, "aud_k6_src_div", "aud_k6_src_sel",
+ 0x0128, 8, 8),
+};
+
+static const struct mtk_gate_regs top_aud_cg_regs = {
+ .sta_ofs = 0x012C,
+};
+
+#define GATE_TOP_AUD(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top_aud_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate top_clks[] = {
+ GATE_TOP_AUD(CLK_TOP_AUD_48K_TIMING, "a1sys_hp_ck", "aud_mux1_div",
+ 21),
+ GATE_TOP_AUD(CLK_TOP_AUD_44K_TIMING, "a2sys_hp_ck", "aud_mux2_div",
+ 22),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S1_MCLK, "aud_i2s1_mclk", "aud_k1_src_div",
+ 23),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S2_MCLK, "aud_i2s2_mclk", "aud_k2_src_div",
+ 24),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S3_MCLK, "aud_i2s3_mclk", "aud_k3_src_div",
+ 25),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S4_MCLK, "aud_i2s4_mclk", "aud_k4_src_div",
+ 26),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S5_MCLK, "aud_i2s5_mclk", "aud_k5_src_div",
+ 27),
+ GATE_TOP_AUD(CLK_TOP_AUD_I2S6_MCLK, "aud_i2s6_mclk", "aud_k6_src_div",
+ 28),
+};
+
+static int mtk_topckgen_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+
+ mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
+ clk_data);
+
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
+ base, &mt2701_clk_lock, clk_data);
+
+ mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ base, &mt2701_clk_lock, clk_data);
+
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x0040,
+ .clr_ofs = 0x0044,
+ .sta_ofs = 0x0048,
+};
+
+#define GATE_ICG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate infra_clks[] = {
+ GATE_ICG(CLK_INFRA_DBG, "dbgclk", "axi_sel", 0),
+ GATE_ICG(CLK_INFRA_SMI, "smi_ck", "mm_sel", 1),
+ GATE_ICG(CLK_INFRA_QAXI_CM4, "cm4_ck", "axi_sel", 2),
+ GATE_ICG(CLK_INFRA_AUD_SPLIN_B, "audio_splin_bck", "hadds2pll_294m", 4),
+ GATE_ICG(CLK_INFRA_AUDIO, "audio_ck", "clk26m", 5),
+ GATE_ICG(CLK_INFRA_EFUSE, "efuse_ck", "clk26m", 6),
+ GATE_ICG(CLK_INFRA_L2C_SRAM, "l2c_sram_ck", "mm_sel", 7),
+ GATE_ICG(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
+ GATE_ICG(CLK_INFRA_CONNMCU, "connsys_bus", "wbg_dig_ck_416m", 12),
+ GATE_ICG(CLK_INFRA_TRNG, "trng_ck", "axi_sel", 13),
+ GATE_ICG(CLK_INFRA_RAMBUFIF, "rambufif_ck", "mem_sel", 14),
+ GATE_ICG(CLK_INFRA_CPUM, "cpum_ck", "mem_sel", 15),
+ GATE_ICG(CLK_INFRA_KP, "kp_ck", "axi_sel", 16),
+ GATE_ICG(CLK_INFRA_CEC, "cec_ck", "rtc_sel", 18),
+ GATE_ICG(CLK_INFRA_IRRX, "irrx_ck", "axi_sel", 19),
+ GATE_ICG(CLK_INFRA_PMICSPI, "pmicspi_ck", "pmicspi_sel", 22),
+ GATE_ICG(CLK_INFRA_PMICWRAP, "pmicwrap_ck", "axi_sel", 23),
+ GATE_ICG(CLK_INFRA_DDCCI, "ddcci_ck", "axi_sel", 24),
+};
+
+static const struct mtk_fixed_factor infra_fixed_divs[] = {
+ FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2),
+};
+
+static struct clk_onecell_data *infra_clk_data;
+
+static void mtk_infrasys_init_early(struct device_node *node)
+{
+ int r, i;
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
+
+ for (i = 0; i < CLK_INFRA_NR; i++)
+ infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
+ infra_clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE_DRIVER(mtk_infra, "mediatek,mt2701-infracfg",
+ mtk_infrasys_init_early);
+
+static int mtk_infrasys_init(struct platform_device *pdev)
+{
+ int r, i;
+ struct device_node *node = pdev->dev.of_node;
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
+ } else {
+ for (i = 0; i < CLK_INFRA_NR; i++) {
+ if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
+ infra_clk_data->clks[i] = ERR_PTR(-ENOENT);
+ }
+ }
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ infra_clk_data);
+ mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
+ infra_clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
+ if (r)
+ return r;
+
+ mtk_register_reset_controller(node, 2, 0x30);
+
+ return 0;
+}
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x0010,
+ .sta_ofs = 0x0018,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+ .set_ofs = 0x000c,
+ .clr_ofs = 0x0014,
+ .sta_ofs = 0x001c,
+};
+
+#define GATE_PERI0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate peri_clks[] = {
+ GATE_PERI0(CLK_PERI_USB0_MCU, "usb0_mcu_ck", "axi_sel", 31),
+ GATE_PERI0(CLK_PERI_ETH, "eth_ck", "clk26m", 30),
+ GATE_PERI0(CLK_PERI_SPI0, "spi0_ck", "spi0_sel", 29),
+ GATE_PERI0(CLK_PERI_AUXADC, "auxadc_ck", "clk26m", 28),
+ GATE_PERI0(CLK_PERI_I2C3, "i2c3_ck", "clk26m", 27),
+ GATE_PERI0(CLK_PERI_I2C2, "i2c2_ck", "axi_sel", 26),
+ GATE_PERI0(CLK_PERI_I2C1, "i2c1_ck", "axi_sel", 25),
+ GATE_PERI0(CLK_PERI_I2C0, "i2c0_ck", "axi_sel", 24),
+ GATE_PERI0(CLK_PERI_BTIF, "bitif_ck", "axi_sel", 23),
+ GATE_PERI0(CLK_PERI_UART3, "uart3_ck", "axi_sel", 22),
+ GATE_PERI0(CLK_PERI_UART2, "uart2_ck", "axi_sel", 21),
+ GATE_PERI0(CLK_PERI_UART1, "uart1_ck", "axi_sel", 20),
+ GATE_PERI0(CLK_PERI_UART0, "uart0_ck", "axi_sel", 19),
+ GATE_PERI0(CLK_PERI_NLI, "nli_ck", "axi_sel", 18),
+ GATE_PERI0(CLK_PERI_MSDC50_3, "msdc50_3_ck", "emmc_hclk_sel", 17),
+ GATE_PERI0(CLK_PERI_MSDC30_3, "msdc30_3_ck", "msdc30_3_sel", 16),
+ GATE_PERI0(CLK_PERI_MSDC30_2, "msdc30_2_ck", "msdc30_2_sel", 15),
+ GATE_PERI0(CLK_PERI_MSDC30_1, "msdc30_1_ck", "msdc30_1_sel", 14),
+ GATE_PERI0(CLK_PERI_MSDC30_0, "msdc30_0_ck", "msdc30_0_sel", 13),
+ GATE_PERI0(CLK_PERI_AP_DMA, "ap_dma_ck", "axi_sel", 12),
+ GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11),
+ GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10),
+ GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9),
+ GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8),
+ GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7),
+ GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6),
+ GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5),
+ GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4),
+ GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3),
+ GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2),
+ GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1),
+ GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "nfi2x_sel", 0),
+
+ GATE_PERI1(CLK_PERI_FCI, "fci_ck", "ms_card_sel", 11),
+ GATE_PERI1(CLK_PERI_SPI2, "spi2_ck", "spi2_sel", 10),
+ GATE_PERI1(CLK_PERI_SPI1, "spi1_ck", "spi1_sel", 9),
+ GATE_PERI1(CLK_PERI_HOST89_DVD, "host89_dvd_ck", "aud2dvd_sel", 8),
+ GATE_PERI1(CLK_PERI_HOST89_SPI, "host89_spi_ck", "spi0_sel", 7),
+ GATE_PERI1(CLK_PERI_HOST89_INT, "host89_int_ck", "axi_sel", 6),
+ GATE_PERI1(CLK_PERI_FLASH, "flash_ck", "nfi2x_sel", 5),
+ GATE_PERI1(CLK_PERI_NFI_PAD, "nfi_pad_ck", "nfi1x_pad", 4),
+ GATE_PERI1(CLK_PERI_NFI_ECC, "nfi_ecc_ck", "nfi1x_pad", 3),
+ GATE_PERI1(CLK_PERI_GCPU, "gcpu_ck", "axi_sel", 2),
+ GATE_PERI1(CLK_PERI_USB_SLV, "usbslv_ck", "axi_sel", 1),
+ GATE_PERI1(CLK_PERI_USB1_MCU, "usb1_mcu_ck", "axi_sel", 0),
+};
+
+static const char * const uart_ck_sel_parents[] = {
+ "clk26m",
+ "uart_sel",
+};
+
+static const struct mtk_composite peri_muxs[] = {
+ MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents,
+ 0x40c, 0, 1),
+ MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents,
+ 0x40c, 1, 1),
+ MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents,
+ 0x40c, 2, 1),
+ MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents,
+ 0x40c, 3, 1),
+};
+
+static int mtk_pericfg_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
+
+ mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ clk_data);
+
+ mtk_clk_register_composites(peri_muxs, ARRAY_SIZE(peri_muxs), base,
+ &mt2701_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ return r;
+
+ mtk_register_reset_controller(node, 2, 0x0);
+
+ return 0;
+}
+
+#define MT8590_PLL_FMAX (2000 * MHZ)
+#define CON0_MT8590_RST_BAR BIT(27)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, \
+ _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT8590_RST_BAR, \
+ .fmax = MT8590_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ }
+
+static const struct mtk_pll_data apmixed_plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x200, 0x20c, 0x80000001,
+ PLL_AO, 21, 0x204, 24, 0x0, 0x204, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x210, 0x21c, 0xf0000001,
+ HAVE_RST_BAR, 21, 0x210, 4, 0x0, 0x214, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x220, 0x22c, 0xf3000001,
+ HAVE_RST_BAR, 7, 0x220, 4, 0x0, 0x224, 14),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x230, 0x23c, 0x00000001, 0,
+ 21, 0x230, 4, 0x0, 0x234, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x240, 0x24c, 0x00000001, 0,
+ 21, 0x240, 4, 0x0, 0x244, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x250, 0x25c, 0x00000001, 0,
+ 21, 0x250, 4, 0x0, 0x254, 0),
+ PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x270, 0x27c, 0x00000001, 0,
+ 31, 0x270, 4, 0x0, 0x274, 0),
+ PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x280, 0x28c, 0x00000001, 0,
+ 31, 0x280, 4, 0x0, 0x284, 0),
+ PLL(CLK_APMIXED_ETHPLL, "ethpll", 0x290, 0x29c, 0x00000001, 0,
+ 31, 0x290, 4, 0x0, 0x294, 0),
+ PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x2a0, 0x2ac, 0x00000001, 0,
+ 31, 0x2a0, 4, 0x0, 0x2a4, 0),
+ PLL(CLK_APMIXED_HADDS2PLL, "hadds2pll", 0x2b0, 0x2bc, 0x00000001, 0,
+ 31, 0x2b0, 4, 0x0, 0x2b4, 0),
+ PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x2c0, 0x2cc, 0x00000001, 0,
+ 31, 0x2c0, 4, 0x0, 0x2c4, 0),
+ PLL(CLK_APMIXED_TVD2PLL, "tvd2pll", 0x2d0, 0x2dc, 0x00000001, 0,
+ 21, 0x2d0, 4, 0x0, 0x2d4, 0),
+};
+
+static int mtk_apmixedsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, apmixed_plls, ARRAY_SIZE(apmixed_plls),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt2701[] = {
+ {
+ .compatible = "mediatek,mt2701-topckgen",
+ .data = mtk_topckgen_init,
+ }, {
+ .compatible = "mediatek,mt2701-infracfg",
+ .data = mtk_infrasys_init,
+ }, {
+ .compatible = "mediatek,mt2701-pericfg",
+ .data = mtk_pericfg_init,
+ }, {
+ .compatible = "mediatek,mt2701-apmixedsys",
+ .data = mtk_apmixedsys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt2701_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2701_drv = {
+ .probe = clk_mt2701_probe,
+ .driver = {
+ .name = "clk-mt2701",
+ .of_match_table = of_match_clk_mt2701,
+ },
+};
+
+static int __init clk_mt2701_init(void)
+{
+ return platform_driver_register(&clk_mt2701_drv);
+}
+
+arch_initcall(clk_mt2701_init);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index bb30f7063569..0541df78141c 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -58,6 +58,9 @@ void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
for (i = 0; i < num; i++) {
const struct mtk_fixed_clk *rc = &clks[i];
+ if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[rc->id]))
+ continue;
+
clk = clk_register_fixed_rate(NULL, rc->name, rc->parent, 0,
rc->rate);
@@ -81,6 +84,9 @@ void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
for (i = 0; i < num; i++) {
const struct mtk_fixed_factor *ff = &clks[i];
+ if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[ff->id]))
+ continue;
+
clk = clk_register_fixed_factor(NULL, ff->name, ff->parent_name,
CLK_SET_RATE_PARENT, ff->mult, ff->div);
@@ -116,6 +122,9 @@ int mtk_clk_register_gates(struct device_node *node,
for (i = 0; i < num; i++) {
const struct mtk_gate *gate = &clks[i];
+ if (!IS_ERR_OR_NULL(clk_data->clks[gate->id]))
+ continue;
+
clk = mtk_clk_register_gate(gate->name, gate->parent_name,
regmap,
gate->regs->set_ofs,
@@ -232,6 +241,9 @@ void mtk_clk_register_composites(const struct mtk_composite *mcs,
for (i = 0; i < num; i++) {
const struct mtk_composite *mc = &mcs[i];
+ if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[mc->id]))
+ continue;
+
clk = mtk_clk_register_composite(mc, base, lock);
if (IS_ERR(clk)) {
@@ -244,3 +256,31 @@ void mtk_clk_register_composites(const struct mtk_composite *mcs,
clk_data->clks[mc->id] = clk;
}
}
+
+void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
+ int num, void __iomem *base, spinlock_t *lock,
+ struct clk_onecell_data *clk_data)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ const struct mtk_clk_divider *mcd = &mcds[i];
+
+ if (clk_data && !IS_ERR_OR_NULL(clk_data->clks[mcd->id]))
+ continue;
+
+ clk = clk_register_divider(NULL, mcd->name, mcd->parent_name,
+ mcd->flags, base + mcd->div_reg, mcd->div_shift,
+ mcd->div_width, mcd->clk_divider_flags, lock);
+
+ if (IS_ERR(clk)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ mcd->name, PTR_ERR(clk));
+ continue;
+ }
+
+ if (clk_data)
+ clk_data->clks[mcd->id] = clk;
+ }
+}
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 9f24fcfa304f..f5d6b70ce189 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -87,7 +87,8 @@ struct mtk_composite {
* In case the rate change propagation to parent clocks is undesirable,
* this macro allows to specify the clock flags manually.
*/
-#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) { \
+#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
+ _gate, _flags) { \
.id = _id, \
.name = _name, \
.mux_reg = _reg, \
@@ -106,7 +107,8 @@ struct mtk_composite {
* parent clock by default.
*/
#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate) \
- MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, CLK_SET_RATE_PARENT)
+ MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
+ _gate, CLK_SET_RATE_PARENT)
#define MUX(_id, _name, _parents, _reg, _shift, _width) { \
.id = _id, \
@@ -121,7 +123,8 @@ struct mtk_composite {
.flags = CLK_SET_RATE_PARENT, \
}
-#define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, _div_width, _div_shift) { \
+#define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, \
+ _div_width, _div_shift) { \
.id = _id, \
.parent = _parent, \
.name = _name, \
@@ -156,12 +159,40 @@ struct mtk_gate {
const struct clk_ops *ops;
};
-int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks,
- int num, struct clk_onecell_data *clk_data);
+int mtk_clk_register_gates(struct device_node *node,
+ const struct mtk_gate *clks, int num,
+ struct clk_onecell_data *clk_data);
+
+struct mtk_clk_divider {
+ int id;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+
+ u32 div_reg;
+ unsigned char div_shift;
+ unsigned char div_width;
+ unsigned char clk_divider_flags;
+ const struct clk_div_table *clk_div_table;
+};
+
+#define DIV_ADJ(_id, _name, _parent, _reg, _shift, _width) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .div_reg = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+}
+
+void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
+ int num, void __iomem *base, spinlock_t *lock,
+ struct clk_onecell_data *clk_data);
struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
#define HAVE_RST_BAR BIT(0)
+#define PLL_AO BIT(1)
struct mtk_pll_div_table {
u32 div;
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 0c2deac17ce9..a409142e9346 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -301,6 +301,7 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
pll->data = data;
init.name = data->name;
+ init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
init.ops = &mtk_pll_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 3a51fff1b0e7..0fc75c395957 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -309,19 +309,19 @@ static void __init mmp2_clk_init(struct device_node *np)
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
- return;
+ goto free_memory;
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
- return;
+ goto unmap_mpmu_region;
}
pxa_unit->apbc_base = of_iomap(np, 2);
if (!pxa_unit->apbc_base) {
pr_err("failed to map apbc registers\n");
- return;
+ goto unmap_apmu_region;
}
mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS);
@@ -333,6 +333,15 @@ static void __init mmp2_clk_init(struct device_node *np)
mmp2_axi_periph_clk_init(pxa_unit);
mmp2_clk_reset_init(np, pxa_unit);
+
+ return;
+
+unmap_apmu_region:
+ iounmap(pxa_unit->apmu_base);
+unmap_mpmu_region:
+ iounmap(pxa_unit->mpmu_base);
+free_memory:
+ kfree(pxa_unit);
}
CLK_OF_DECLARE(mmp2_clk, "marvell,mmp2-clock", mmp2_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index 87f2317b2a00..f110c02e83cb 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c
index e478ff44e170..cede7b4ca3b9 100644
--- a/drivers/clk/mmp/clk-of-pxa1928.c
+++ b/drivers/clk/mmp/clk-of-pxa1928.c
@@ -216,6 +216,7 @@ static void __init pxa1928_mpmu_clk_init(struct device_node *np)
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
+ kfree(pxa_unit);
return;
}
@@ -234,6 +235,7 @@ static void __init pxa1928_apmu_clk_init(struct device_node *np)
pxa_unit->apmu_base = of_iomap(np, 0);
if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
+ kfree(pxa_unit);
return;
}
@@ -254,6 +256,7 @@ static void __init pxa1928_apbc_clk_init(struct device_node *np)
pxa_unit->apbc_base = of_iomap(np, 0);
if (!pxa_unit->apbc_base) {
pr_err("failed to map apbc registers\n");
+ kfree(pxa_unit);
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index e22a67f76d93..1dcabe95cb67 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -278,25 +278,25 @@ static void __init pxa910_clk_init(struct device_node *np)
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
- return;
+ goto free_memory;
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
- return;
+ goto unmap_mpmu_region;
}
pxa_unit->apbc_base = of_iomap(np, 2);
if (!pxa_unit->apbc_base) {
pr_err("failed to map apbc registers\n");
- return;
+ goto unmap_apmu_region;
}
pxa_unit->apbcp_base = of_iomap(np, 3);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apbcp_base) {
pr_err("failed to map apbcp registers\n");
- return;
+ goto unmap_apbc_region;
}
mmp_clk_init(np, &pxa_unit->unit, PXA910_NR_CLKS);
@@ -308,6 +308,17 @@ static void __init pxa910_clk_init(struct device_node *np)
pxa910_axi_periph_clk_init(pxa_unit);
pxa910_clk_reset_init(np, pxa_unit);
+
+ return;
+
+unmap_apbc_region:
+ iounmap(pxa_unit->apbc_base);
+unmap_apmu_region:
+ iounmap(pxa_unit->apmu_base);
+unmap_mpmu_region:
+ iounmap(pxa_unit->mpmu_base);
+free_memory:
+ kfree(pxa_unit);
}
CLK_OF_DECLARE(pxa910_clk, "marvell,pxa910-clock", pxa910_clk_init);
diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c
index 02023baf86c9..8181b919f062 100644
--- a/drivers/clk/mvebu/ap806-system-controller.c
+++ b/drivers/clk/mvebu/ap806-system-controller.c
@@ -14,7 +14,7 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -135,34 +135,17 @@ fail0:
return ret;
}
-static int ap806_syscon_clk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
- clk_unregister_fixed_factor(ap806_clks[3]);
- clk_unregister_fixed_rate(ap806_clks[2]);
- clk_unregister_fixed_rate(ap806_clks[1]);
- clk_unregister_fixed_rate(ap806_clks[0]);
-
- return 0;
-}
-
static const struct of_device_id ap806_syscon_of_match[] = {
{ .compatible = "marvell,ap806-system-controller", },
{ }
};
-MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
static struct platform_driver ap806_syscon_driver = {
.probe = ap806_syscon_clk_probe,
- .remove = ap806_syscon_clk_remove,
.driver = {
.name = "marvell-ap806-system-controller",
.of_match_table = ap806_syscon_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(ap806_syscon_driver);
-
-MODULE_DESCRIPTION("Marvell AP806 System Controller driver");
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(ap806_syscon_driver);
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index f2303da7fda7..32e5b43c086f 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -30,7 +30,7 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -87,7 +87,7 @@ struct cp110_gate_clk {
u8 bit_idx;
};
-#define to_cp110_gate_clk(clk) container_of(clk, struct cp110_gate_clk, hw)
+#define to_cp110_gate_clk(hw) container_of(hw, struct cp110_gate_clk, hw)
static int cp110_gate_enable(struct clk_hw *hw)
{
@@ -123,13 +123,14 @@ static const struct clk_ops cp110_gate_ops = {
.is_enabled = cp110_gate_is_enabled,
};
-static struct clk *cp110_register_gate(const char *name,
- const char *parent_name,
- struct regmap *regmap, u8 bit_idx)
+static struct clk_hw *cp110_register_gate(const char *name,
+ const char *parent_name,
+ struct regmap *regmap, u8 bit_idx)
{
struct cp110_gate_clk *gate;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
@@ -146,39 +147,37 @@ static struct clk *cp110_register_gate(const char *name,
gate->bit_idx = bit_idx;
gate->hw.init = &init;
- clk = clk_register(NULL, &gate->hw);
- if (IS_ERR(clk))
+ hw = &gate->hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
kfree(gate);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
-static void cp110_unregister_gate(struct clk *clk)
+static void cp110_unregister_gate(struct clk_hw *hw)
{
- struct clk_hw *hw;
-
- hw = __clk_get_hw(clk);
- if (!hw)
- return;
-
- clk_unregister(clk);
+ clk_hw_unregister(hw);
kfree(to_cp110_gate_clk(hw));
}
-static struct clk *cp110_of_clk_get(struct of_phandle_args *clkspec, void *data)
+static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
+ void *data)
{
- struct clk_onecell_data *clk_data = data;
+ struct clk_hw_onecell_data *clk_data = data;
unsigned int type = clkspec->args[0];
unsigned int idx = clkspec->args[1];
if (type == CP110_CLK_TYPE_CORE) {
if (idx > CP110_MAX_CORE_CLOCKS)
return ERR_PTR(-EINVAL);
- return clk_data->clks[idx];
+ return clk_data->hws[idx];
} else if (type == CP110_CLK_TYPE_GATABLE) {
if (idx > CP110_MAX_GATABLE_CLOCKS)
return ERR_PTR(-EINVAL);
- return clk_data->clks[CP110_MAX_CORE_CLOCKS + idx];
+ return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx];
}
return ERR_PTR(-EINVAL);
@@ -189,8 +188,8 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
struct regmap *regmap;
struct device_node *np = pdev->dev.of_node;
const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name;
- struct clk_onecell_data *cp110_clk_data;
- struct clk *clk, **cp110_clks;
+ struct clk_hw_onecell_data *cp110_clk_data;
+ struct clk_hw *hw, **cp110_clks;
u32 nand_clk_ctrl;
int i, ret;
@@ -203,80 +202,75 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
- cp110_clks = devm_kcalloc(&pdev->dev, sizeof(struct clk *),
- CP110_CLK_NUM, GFP_KERNEL);
- if (!cp110_clks)
- return -ENOMEM;
-
- cp110_clk_data = devm_kzalloc(&pdev->dev,
- sizeof(*cp110_clk_data),
+ cp110_clk_data = devm_kzalloc(&pdev->dev, sizeof(*cp110_clk_data) +
+ sizeof(struct clk_hw *) * CP110_CLK_NUM,
GFP_KERNEL);
if (!cp110_clk_data)
return -ENOMEM;
- cp110_clk_data->clks = cp110_clks;
- cp110_clk_data->clk_num = CP110_CLK_NUM;
+ cp110_clks = cp110_clk_data->hws;
+ cp110_clk_data->num = CP110_CLK_NUM;
- /* Register the APLL which is the root of the clk tree */
+ /* Register the APLL which is the root of the hw tree */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_APLL, &apll_name);
- clk = clk_register_fixed_rate(NULL, apll_name, NULL, 0,
- 1000 * 1000 * 1000);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_rate(NULL, apll_name, NULL, 0,
+ 1000 * 1000 * 1000);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail0;
}
- cp110_clks[CP110_CORE_APLL] = clk;
+ cp110_clks[CP110_CORE_APLL] = hw;
/* PPv2 is APLL/3 */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_PPV2, &ppv2_name);
- clk = clk_register_fixed_factor(NULL, ppv2_name, apll_name, 0, 1, 3);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_factor(NULL, ppv2_name, apll_name, 0, 1, 3);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail1;
}
- cp110_clks[CP110_CORE_PPV2] = clk;
+ cp110_clks[CP110_CORE_PPV2] = hw;
/* EIP clock is APLL/2 */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_EIP, &eip_name);
- clk = clk_register_fixed_factor(NULL, eip_name, apll_name, 0, 1, 2);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_factor(NULL, eip_name, apll_name, 0, 1, 2);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail2;
}
- cp110_clks[CP110_CORE_EIP] = clk;
+ cp110_clks[CP110_CORE_EIP] = hw;
/* Core clock is EIP/2 */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_CORE, &core_name);
- clk = clk_register_fixed_factor(NULL, core_name, eip_name, 0, 1, 2);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_factor(NULL, core_name, eip_name, 0, 1, 2);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail3;
}
- cp110_clks[CP110_CORE_CORE] = clk;
+ cp110_clks[CP110_CORE_CORE] = hw;
/* NAND can be either APLL/2.5 or core clock */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_NAND, &nand_name);
if (nand_clk_ctrl & NF_CLOCK_SEL_400_MASK)
- clk = clk_register_fixed_factor(NULL, nand_name,
- apll_name, 0, 2, 5);
+ hw = clk_hw_register_fixed_factor(NULL, nand_name,
+ apll_name, 0, 2, 5);
else
- clk = clk_register_fixed_factor(NULL, nand_name,
- core_name, 0, 1, 1);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = clk_hw_register_fixed_factor(NULL, nand_name,
+ core_name, 0, 1, 1);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail4;
}
- cp110_clks[CP110_CORE_NAND] = clk;
+ cp110_clks[CP110_CORE_NAND] = hw;
for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
const char *parent, *name;
@@ -335,16 +329,16 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
break;
}
- clk = cp110_register_gate(name, parent, regmap, i);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ hw = cp110_register_gate(name, parent, regmap, i);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
goto fail_gate;
}
- cp110_clks[CP110_MAX_CORE_CLOCKS + i] = clk;
+ cp110_clks[CP110_MAX_CORE_CLOCKS + i] = hw;
}
- ret = of_clk_add_provider(np, cp110_of_clk_get, cp110_clk_data);
+ ret = of_clk_add_hw_provider(np, cp110_of_clk_get, cp110_clk_data);
if (ret)
goto fail_clk_add;
@@ -355,65 +349,36 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
fail_clk_add:
fail_gate:
for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
- clk = cp110_clks[CP110_MAX_CORE_CLOCKS + i];
+ hw = cp110_clks[CP110_MAX_CORE_CLOCKS + i];
- if (clk)
- cp110_unregister_gate(clk);
+ if (hw)
+ cp110_unregister_gate(hw);
}
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]);
fail4:
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
fail3:
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
fail2:
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
fail1:
- clk_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
+ clk_hw_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
fail0:
return ret;
}
-static int cp110_syscon_clk_remove(struct platform_device *pdev)
-{
- struct clk **cp110_clks = platform_get_drvdata(pdev);
- int i;
-
- of_clk_del_provider(pdev->dev.of_node);
-
- for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
- struct clk *clk = cp110_clks[CP110_MAX_CORE_CLOCKS + i];
-
- if (clk)
- cp110_unregister_gate(clk);
- }
-
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]);
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
- clk_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
- clk_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
-
- return 0;
-}
-
static const struct of_device_id cp110_syscon_of_match[] = {
{ .compatible = "marvell,cp110-system-controller0", },
{ }
};
-MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
static struct platform_driver cp110_syscon_driver = {
.probe = cp110_syscon_clk_probe,
- .remove = cp110_syscon_clk_remove,
.driver = {
.name = "marvell-cp110-system-controller0",
.of_match_table = cp110_syscon_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(cp110_syscon_driver);
-
-MODULE_DESCRIPTION("Marvell CP110 System Controller 0 driver");
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(cp110_syscon_driver);
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index f7136b94fd0e..27781b49eb82 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -277,12 +277,15 @@ static void __init lpc18xx_ccu_init(struct device_node *np)
}
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ iounmap(reg_base);
return;
+ }
clk_data->num = of_property_count_strings(np, "clock-names");
clk_data->name = kcalloc(clk_data->num, sizeof(char *), GFP_KERNEL);
if (!clk_data->name) {
+ iounmap(reg_base);
kfree(clk_data);
return;
}
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 34c97353cdeb..5b98ff9076f3 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -1282,13 +1282,13 @@ static struct clk_hw_proto clk_hw_proto[LPC32XX_CLK_HW_MAX] = {
LPC32XX_DEFINE_MUX(PWM1_MUX, PWMCLK_CTRL, 1, 0x1, NULL, 0),
LPC32XX_DEFINE_DIV(PWM1_DIV, PWMCLK_CTRL, 4, 4, NULL,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+ CLK_DIVIDER_ONE_BASED),
LPC32XX_DEFINE_GATE(PWM1_GATE, PWMCLK_CTRL, 0, 0),
LPC32XX_DEFINE_COMPOSITE(PWM1, PWM1_MUX, PWM1_DIV, PWM1_GATE),
LPC32XX_DEFINE_MUX(PWM2_MUX, PWMCLK_CTRL, 3, 0x1, NULL, 0),
LPC32XX_DEFINE_DIV(PWM2_DIV, PWMCLK_CTRL, 8, 4, NULL,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+ CLK_DIVIDER_ONE_BASED),
LPC32XX_DEFINE_GATE(PWM2_GATE, PWMCLK_CTRL, 2, 0),
LPC32XX_DEFINE_COMPOSITE(PWM2, PWM2_MUX, PWM2_DIV, PWM2_GATE),
@@ -1335,8 +1335,7 @@ static struct clk_hw_proto clk_hw_proto[LPC32XX_CLK_HW_MAX] = {
LPC32XX_DEFINE_GATE(USB_DIV_GATE, USB_CTRL, 17, 0),
LPC32XX_DEFINE_COMPOSITE(USB_DIV, _NULL, USB_DIV_DIV, USB_DIV_GATE),
- LPC32XX_DEFINE_DIV(SD_DIV, MS_CTRL, 0, 4, NULL,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+ LPC32XX_DEFINE_DIV(SD_DIV, MS_CTRL, 0, 4, NULL, CLK_DIVIDER_ONE_BASED),
LPC32XX_DEFINE_CLK(SD_GATE, MS_CTRL, BIT(5) | BIT(9), BIT(5) | BIT(9),
0x0, BIT(5) | BIT(9), 0x0, 0x0, clk_mask_ops),
LPC32XX_DEFINE_COMPOSITE(SD, _NULL, SD_DIV, SD_GATE),
@@ -1478,6 +1477,20 @@ static struct clk * __init lpc32xx_clk_register(u32 id)
return clk;
}
+static void __init lpc32xx_clk_div_quirk(u32 reg, u32 div_mask, u32 gate)
+{
+ u32 val;
+
+ regmap_read(clk_regmap, reg, &val);
+
+ if (!(val & div_mask)) {
+ val &= ~gate;
+ val |= BIT(__ffs(div_mask));
+ }
+
+ regmap_update_bits(clk_regmap, reg, gate | div_mask, val);
+}
+
static void __init lpc32xx_clk_init(struct device_node *np)
{
unsigned int i;
@@ -1517,6 +1530,17 @@ static void __init lpc32xx_clk_init(struct device_node *np)
return;
}
+ /*
+ * Divider part of PWM and MS clocks requires a quirk to avoid
+ * a misinterpretation of formally valid zero value in register
+ * bitfield, which indicates another clock gate. Instead of
+ * adding complexity to a gate clock ensure that zero value in
+ * divider clock is never met in runtime.
+ */
+ lpc32xx_clk_div_quirk(LPC32XX_CLKPWR_PWMCLK_CTRL, 0xf0, BIT(0));
+ lpc32xx_clk_div_quirk(LPC32XX_CLKPWR_PWMCLK_CTRL, 0xf00, BIT(2));
+ lpc32xx_clk_div_quirk(LPC32XX_CLKPWR_MS_CTRL, 0xf, BIT(5) | BIT(9));
+
for (i = 1; i < LPC32XX_CLK_MAX; i++) {
clk[i] = lpc32xx_clk_register(i);
if (IS_ERR(clk[i])) {
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 29cee9e8d4d9..74f64c3c4290 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -18,7 +18,27 @@
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
-DEFINE_SPINLOCK(lock);
+#define KHz 1000
+#define MHz (1000 * 1000)
+
+#define MDREFR_K0DB4 (1 << 29) /* SDCLK0 Divide by 4 Control/Status */
+#define MDREFR_K2FREE (1 << 25) /* SDRAM Free-Running Control */
+#define MDREFR_K1FREE (1 << 24) /* SDRAM Free-Running Control */
+#define MDREFR_K0FREE (1 << 23) /* SDRAM Free-Running Control */
+#define MDREFR_SLFRSH (1 << 22) /* SDRAM Self-Refresh Control/Status */
+#define MDREFR_APD (1 << 20) /* SDRAM/SSRAM Auto-Power-Down Enable */
+#define MDREFR_K2DB2 (1 << 19) /* SDCLK2 Divide by 2 Control/Status */
+#define MDREFR_K2RUN (1 << 18) /* SDCLK2 Run Control/Status */
+#define MDREFR_K1DB2 (1 << 17) /* SDCLK1 Divide by 2 Control/Status */
+#define MDREFR_K1RUN (1 << 16) /* SDCLK1 Run Control/Status */
+#define MDREFR_E1PIN (1 << 15) /* SDCKE1 Level Control/Status */
+#define MDREFR_K0DB2 (1 << 14) /* SDCLK0 Divide by 2 Control/Status */
+#define MDREFR_K0RUN (1 << 13) /* SDCLK0 Run Control/Status */
+#define MDREFR_E0PIN (1 << 12) /* SDCKE0 Level Control/Status */
+#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
+#define MDREFR_DRI_MASK 0xFFF
+
+static DEFINE_SPINLOCK(pxa_clk_lock);
static struct clk *pxa_clocks[CLK_MAX];
static struct clk_onecell_data onecell_data = {
@@ -89,7 +109,7 @@ int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
pxa_clk->lp = clks[i].lp;
pxa_clk->hp = clks[i].hp;
pxa_clk->gate = clks[i].gate;
- pxa_clk->gate.lock = &lock;
+ pxa_clk->gate.lock = &pxa_clk_lock;
clk = clk_register_composite(NULL, clks[i].name,
clks[i].parent_names, 2,
&pxa_clk->hw, &cken_mux_ops,
@@ -106,3 +126,124 @@ void __init clk_pxa_dt_common_init(struct device_node *np)
{
of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
}
+
+void pxa2xx_core_turbo_switch(bool on)
+{
+ unsigned long flags;
+ unsigned int unused, clkcfg;
+
+ local_irq_save(flags);
+
+ asm("mrc p14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+ clkcfg &= ~CLKCFG_TURBO & ~CLKCFG_HALFTURBO;
+ if (on)
+ clkcfg |= CLKCFG_TURBO;
+ clkcfg |= CLKCFG_FCS;
+
+ asm volatile(
+ " b 2f\n"
+ " .align 5\n"
+ "1: mcr p14, 0, %1, c6, c0, 0\n"
+ " b 3f\n"
+ "2: b 1b\n"
+ "3: nop\n"
+ : "=&r" (unused)
+ : "r" (clkcfg)
+ : );
+
+ local_irq_restore(flags);
+}
+
+void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
+ u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+ void __iomem *cccr)
+{
+ unsigned int clkcfg = freq->clkcfg;
+ unsigned int unused, preset_mdrefr, postset_mdrefr;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
+ * we need to preset the smaller DRI before the change. If we're
+ * speeding up we need to set the larger DRI value after the change.
+ */
+ preset_mdrefr = postset_mdrefr = readl(mdrefr);
+ if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(freq->membus_khz)) {
+ preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
+ preset_mdrefr |= mdrefr_dri(freq->membus_khz);
+ }
+ postset_mdrefr =
+ (postset_mdrefr & ~MDREFR_DRI_MASK) |
+ mdrefr_dri(freq->membus_khz);
+
+ /* If we're dividing the memory clock by two for the SDRAM clock, this
+ * must be set prior to the change. Clearing the divide must be done
+ * after the change.
+ */
+ if (freq->div2) {
+ preset_mdrefr |= MDREFR_DB2_MASK;
+ postset_mdrefr |= MDREFR_DB2_MASK;
+ } else {
+ postset_mdrefr &= ~MDREFR_DB2_MASK;
+ }
+
+ /* Set new the CCCR and prepare CLKCFG */
+ writel(freq->cccr, cccr);
+
+ asm volatile(
+ " ldr r4, [%1]\n"
+ " b 2f\n"
+ " .align 5\n"
+ "1: str %3, [%1] /* preset the MDREFR */\n"
+ " mcr p14, 0, %2, c6, c0, 0 /* set CLKCFG[FCS] */\n"
+ " str %4, [%1] /* postset the MDREFR */\n"
+ " b 3f\n"
+ "2: b 1b\n"
+ "3: nop\n"
+ : "=&r" (unused)
+ : "r" (mdrefr), "r" (clkcfg), "r" (preset_mdrefr),
+ "r" (postset_mdrefr)
+ : "r4", "r5");
+
+ local_irq_restore(flags);
+}
+
+int pxa2xx_determine_rate(struct clk_rate_request *req,
+ struct pxa2xx_freq *freqs, int nb_freqs)
+{
+ int i, closest_below = -1, closest_above = -1;
+ unsigned long rate;
+
+ for (i = 0; i < nb_freqs; i++) {
+ rate = freqs[i].cpll;
+ if (rate == req->rate)
+ break;
+ if (rate < req->min_rate)
+ continue;
+ if (rate > req->max_rate)
+ continue;
+ if (rate <= req->rate)
+ closest_below = i;
+ if ((rate >= req->rate) && (closest_above == -1))
+ closest_above = i;
+ }
+
+ req->best_parent_hw = NULL;
+
+ if (i < nb_freqs) {
+ rate = req->rate;
+ } else if (closest_below >= 0) {
+ rate = freqs[closest_below].cpll;
+ } else if (closest_above >= 0) {
+ rate = freqs[closest_above].cpll;
+ } else {
+ pr_debug("%s(rate=%lu) no match\n", __func__, req->rate);
+ return -EINVAL;
+ }
+
+ pr_debug("%s(rate=%lu) rate=%lu\n", __func__, req->rate, rate);
+ req->rate = rate;
+
+ return 0;
+}
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
index d1de805df867..2b90c5917b32 100644
--- a/drivers/clk/pxa/clk-pxa.h
+++ b/drivers/clk/pxa/clk-pxa.h
@@ -13,6 +13,11 @@
#ifndef _CLK_PXA_
#define _CLK_PXA_
+#define CLKCFG_TURBO 0x1
+#define CLKCFG_FCS 0x2
+#define CLKCFG_HALFTURBO 0x4
+#define CLKCFG_FASTBUS 0x8
+
#define PARENTS(name) \
static const char *const name ## _parents[] __initconst
#define MUX_RO_RATE_RO_OPS(name, clk_name) \
@@ -35,10 +40,27 @@
NULL, NULL, CLK_GET_RATE_NOCACHE); \
}
-#define RATE_RO_OPS(name, clk_name) \
+#define RATE_RO_OPS(name, clk_name) \
+ static struct clk_hw name ## _rate_hw; \
+ static const struct clk_ops name ## _rate_ops = { \
+ .recalc_rate = name ## _get_rate, \
+ }; \
+ static struct clk * __init clk_register_ ## name(void) \
+ { \
+ return clk_register_composite(NULL, clk_name, \
+ name ## _parents, \
+ ARRAY_SIZE(name ## _parents), \
+ NULL, NULL, \
+ &name ## _rate_hw, &name ## _rate_ops, \
+ NULL, NULL, CLK_GET_RATE_NOCACHE); \
+ }
+
+#define RATE_OPS(name, clk_name) \
static struct clk_hw name ## _rate_hw; \
static struct clk_ops name ## _rate_ops = { \
.recalc_rate = name ## _get_rate, \
+ .set_rate = name ## _set_rate, \
+ .determine_rate = name ## _determine_rate, \
}; \
static struct clk * __init clk_register_ ## name(void) \
{ \
@@ -50,6 +72,24 @@
NULL, NULL, CLK_GET_RATE_NOCACHE); \
}
+#define MUX_OPS(name, clk_name, flags) \
+ static struct clk_hw name ## _mux_hw; \
+ static const struct clk_ops name ## _mux_ops = { \
+ .get_parent = name ## _get_parent, \
+ .set_parent = name ## _set_parent, \
+ .determine_rate = name ## _determine_rate, \
+ }; \
+ static struct clk * __init clk_register_ ## name(void) \
+ { \
+ return clk_register_composite(NULL, clk_name, \
+ name ## _parents, \
+ ARRAY_SIZE(name ## _parents), \
+ &name ## _mux_hw, &name ## _mux_ops, \
+ NULL, NULL, \
+ NULL, NULL, \
+ CLK_GET_RATE_NOCACHE | flags); \
+ }
+
/*
* CKEN clock type
* This clock takes it source from 2 possible parents :
@@ -95,7 +135,15 @@ struct desc_clk_cken {
PXA_CKEN(dev_id, con_id, name, parents, 1, 1, 1, 1, \
NULL, cken_reg, cken_bit, flag)
-static int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
+struct pxa2xx_freq {
+ unsigned long cpll;
+ unsigned int membus_khz;
+ unsigned int cccr;
+ unsigned int div2;
+ unsigned int clkcfg;
+};
+
+static inline int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
{
return 0;
}
@@ -105,4 +153,11 @@ extern void clkdev_pxa_register(int ckid, const char *con_id,
extern int clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks);
void clk_pxa_dt_common_init(struct device_node *np);
+void pxa2xx_core_turbo_switch(bool on);
+void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
+ u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+ void __iomem *cccr);
+int pxa2xx_determine_rate(struct clk_rate_request *req,
+ struct pxa2xx_freq *freqs, int nb_freqs);
+
#endif
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index a98b98e2a9e4..c53993b6bf87 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <mach/pxa2xx-regs.h>
+#include <mach/smemc.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
@@ -30,6 +31,17 @@ enum {
PXA_CORE_TURBO,
};
+#define PXA25x_CLKCFG(T) \
+ (CLKCFG_FCS | \
+ ((T) ? CLKCFG_TURBO : 0))
+#define PXA25x_CCCR(N2, M, L) (N2 << 7 | M << 5 | L)
+
+#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
+#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
+
+/* Define the refresh period in mSec for the SDRAM and the number of rows */
+#define SDRAM_TREF 64 /* standard 64ms SDRAM */
+
/*
* Various clock factors driven by the CCCR register.
*/
@@ -48,6 +60,34 @@ static const char * const get_freq_khz[] = {
"core", "run", "cpll", "memory"
};
+static int get_sdram_rows(void)
+{
+ static int sdram_rows;
+ unsigned int drac2 = 0, drac0 = 0;
+ u32 mdcnfg;
+
+ if (sdram_rows)
+ return sdram_rows;
+
+ mdcnfg = readl_relaxed(MDCNFG);
+
+ if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
+ drac2 = MDCNFG_DRAC2(mdcnfg);
+
+ if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
+ drac0 = MDCNFG_DRAC0(mdcnfg);
+
+ sdram_rows = 1 << (11 + max(drac0, drac2));
+ return sdram_rows;
+}
+
+static u32 mdrefr_dri(unsigned int freq_khz)
+{
+ u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+
+ return interval / 32;
+}
+
/*
* Get the clock frequency as reflected by CCCR and the turbo flag.
* We assume these values have been applied via a fcs.
@@ -139,6 +179,21 @@ static struct desc_clk_cken pxa25x_clocks[] __initdata = {
clk_pxa25x_memory_parents, 0),
};
+/*
+ * In this table, PXA25x_CCCR(N2, M, L) has the following meaning, where :
+ * - freq_cpll = n * m * L * 3.6864 MHz
+ * - n = N2 / 2
+ * - m = 2^(M - 1), where 1 <= M <= 3
+ * - l = L_clk_mult[L], ie. { 0, 27, 32, 36, 40, 45, 0, }[L]
+ */
+static struct pxa2xx_freq pxa25x_freqs[] = {
+ /* CPU MEMBUS CCCR DIV2 CCLKCFG */
+ { 99532800, 99500, PXA25x_CCCR(2, 1, 1), 1, PXA25x_CLKCFG(1)},
+ {199065600, 99500, PXA25x_CCCR(4, 1, 1), 0, PXA25x_CLKCFG(1)},
+ {298598400, 99500, PXA25x_CCCR(3, 2, 1), 0, PXA25x_CLKCFG(1)},
+ {398131200, 99500, PXA25x_CCCR(4, 2, 1), 0, PXA25x_CLKCFG(1)},
+};
+
static u8 clk_pxa25x_core_get_parent(struct clk_hw *hw)
{
unsigned long clkcfg;
@@ -151,13 +206,24 @@ static u8 clk_pxa25x_core_get_parent(struct clk_hw *hw)
return PXA_CORE_RUN;
}
-static unsigned long clk_pxa25x_core_get_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static int clk_pxa25x_core_set_parent(struct clk_hw *hw, u8 index)
{
- return parent_rate;
+ if (index > PXA_CORE_TURBO)
+ return -EINVAL;
+
+ pxa2xx_core_turbo_switch(index == PXA_CORE_TURBO);
+
+ return 0;
+}
+
+static int clk_pxa25x_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return __clk_mux_determine_rate(hw, req);
}
+
PARENTS(clk_pxa25x_core) = { "run", "cpll" };
-MUX_RO_RATE_RO_OPS(clk_pxa25x_core, "core");
+MUX_OPS(clk_pxa25x_core, "core", CLK_SET_RATE_PARENT);
static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -182,17 +248,42 @@ static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
m = M_clk_mult[(cccr >> 5) & 0x03];
n2 = N2_clk_mult[(cccr >> 7) & 0x07];
- if (t)
- return m * l * n2 * parent_rate / 2;
- return m * l * parent_rate;
+ return m * l * n2 * parent_rate / 2;
+}
+
+static int clk_pxa25x_cpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return pxa2xx_determine_rate(req, pxa25x_freqs,
+ ARRAY_SIZE(pxa25x_freqs));
+}
+
+static int clk_pxa25x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i;
+
+ pr_debug("%s(rate=%lu parent_rate=%lu)\n", __func__, rate, parent_rate);
+ for (i = 0; i < ARRAY_SIZE(pxa25x_freqs); i++)
+ if (pxa25x_freqs[i].cpll == rate)
+ break;
+
+ if (i >= ARRAY_SIZE(pxa25x_freqs))
+ return -EINVAL;
+
+ pxa2xx_cpll_change(&pxa25x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+
+ return 0;
}
PARENTS(clk_pxa25x_cpll) = { "osc_3_6864mhz" };
-RATE_RO_OPS(clk_pxa25x_cpll, "cpll");
+RATE_OPS(clk_pxa25x_cpll, "cpll");
static void __init pxa25x_register_core(void)
{
- clk_register_clk_pxa25x_cpll();
- clk_register_clk_pxa25x_run();
+ clkdev_pxa_register(CLK_NONE, "cpll", NULL,
+ clk_register_clk_pxa25x_cpll());
+ clkdev_pxa_register(CLK_NONE, "run", NULL,
+ clk_register_clk_pxa25x_run());
clkdev_pxa_register(CLK_CORE, "core", NULL,
clk_register_clk_pxa25x_core());
}
@@ -214,7 +305,8 @@ static void __init pxa25x_base_clocks_init(void)
{
pxa25x_register_plls();
pxa25x_register_core();
- clk_register_clk_pxa25x_memory();
+ clkdev_pxa_register(CLK_NONE, "system_bus", NULL,
+ clk_register_clk_pxa25x_memory());
}
#define DUMMY_CLK(_con_id, _dev_id, _parent) \
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index c40b1804f58c..25a30194d27a 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -17,6 +17,8 @@
#include <linux/clkdev.h>
#include <linux/of.h>
+#include <mach/smemc.h>
+
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
@@ -45,11 +47,52 @@ enum {
PXA_MEM_RUN,
};
+#define PXA27x_CLKCFG(B, HT, T) \
+ (CLKCFG_FCS | \
+ ((B) ? CLKCFG_FASTBUS : 0) | \
+ ((HT) ? CLKCFG_HALFTURBO : 0) | \
+ ((T) ? CLKCFG_TURBO : 0))
+#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
+
+#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
+#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
+
+/* Define the refresh period in mSec for the SDRAM and the number of rows */
+#define SDRAM_TREF 64 /* standard 64ms SDRAM */
+
static const char * const get_freq_khz[] = {
"core", "run", "cpll", "memory",
"system_bus"
};
+static int get_sdram_rows(void)
+{
+ static int sdram_rows;
+ unsigned int drac2 = 0, drac0 = 0;
+ u32 mdcnfg;
+
+ if (sdram_rows)
+ return sdram_rows;
+
+ mdcnfg = readl_relaxed(MDCNFG);
+
+ if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
+ drac2 = MDCNFG_DRAC2(mdcnfg);
+
+ if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
+ drac0 = MDCNFG_DRAC0(mdcnfg);
+
+ sdram_rows = 1 << (11 + max(drac0, drac2));
+ return sdram_rows;
+}
+
+static u32 mdrefr_dri(unsigned int freq_khz)
+{
+ u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+
+ return (interval - 31) / 32;
+}
+
/*
* Get the clock frequency as reflected by CCSR and the turbo flag.
* We assume these values have been applied via a fcs.
@@ -145,6 +188,42 @@ static struct desc_clk_cken pxa27x_clocks[] __initdata = {
};
+/*
+ * PXA270 definitions
+ *
+ * For the PXA27x:
+ * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
+ *
+ * A = 0 => memory controller clock from table 3-7,
+ * A = 1 => memory controller clock = system bus clock
+ * Run mode frequency = 13 MHz * L
+ * Turbo mode frequency = 13 MHz * L * N
+ * System bus frequency = 13 MHz * L / (B + 1)
+ *
+ * In CCCR:
+ * A = 1
+ * L = 16 oscillator to run mode ratio
+ * 2N = 6 2 * (turbo mode to run mode ratio)
+ *
+ * In CCLKCFG:
+ * B = 1 Fast bus mode
+ * HT = 0 Half-Turbo mode
+ * T = 1 Turbo mode
+ *
+ * For now, just support some of the combinations in table 3-7 of
+ * PXA27x Processor Family Developer's Manual to simplify frequency
+ * change sequences.
+ */
+static struct pxa2xx_freq pxa27x_freqs[] = {
+ {104000000, 104000, PXA27x_CCCR(1, 8, 2), 0, PXA27x_CLKCFG(1, 0, 1) },
+ {156000000, 104000, PXA27x_CCCR(1, 8, 3), 0, PXA27x_CLKCFG(1, 0, 1) },
+ {208000000, 208000, PXA27x_CCCR(0, 16, 2), 1, PXA27x_CLKCFG(0, 0, 1) },
+ {312000000, 208000, PXA27x_CCCR(1, 16, 3), 1, PXA27x_CLKCFG(1, 0, 1) },
+ {416000000, 208000, PXA27x_CCCR(1, 16, 4), 1, PXA27x_CLKCFG(1, 0, 1) },
+ {520000000, 208000, PXA27x_CCCR(1, 16, 5), 1, PXA27x_CLKCFG(1, 0, 1) },
+ {624000000, 208000, PXA27x_CCCR(1, 16, 6), 1, PXA27x_CLKCFG(1, 0, 1) },
+};
+
static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -162,10 +241,35 @@ static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
L = l * parent_rate;
N = (L * n2) / 2;
- return t ? N : L;
+ return N;
+}
+
+static int clk_pxa27x_cpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return pxa2xx_determine_rate(req, pxa27x_freqs,
+ ARRAY_SIZE(pxa27x_freqs));
+}
+
+static int clk_pxa27x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i;
+
+ pr_debug("%s(rate=%lu parent_rate=%lu)\n", __func__, rate, parent_rate);
+ for (i = 0; i < ARRAY_SIZE(pxa27x_freqs); i++)
+ if (pxa27x_freqs[i].cpll == rate)
+ break;
+
+ if (i >= ARRAY_SIZE(pxa27x_freqs))
+ return -EINVAL;
+
+ pxa2xx_cpll_change(&pxa27x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+ return 0;
}
+
PARENTS(clk_pxa27x_cpll) = { "osc_13mhz" };
-RATE_RO_OPS(clk_pxa27x_cpll, "cpll");
+RATE_OPS(clk_pxa27x_cpll, "cpll");
static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -217,31 +321,10 @@ static void __init pxa27x_register_plls(void)
clk_register_fixed_factor(NULL, "ppll_312mhz", "osc_13mhz", 0, 24, 1);
}
-static unsigned long clk_pxa27x_core_get_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- unsigned long clkcfg;
- unsigned int t, ht, b, osc_forced;
- unsigned long ccsr = readl(CCSR);
-
- osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
- asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
- t = clkcfg & (1 << 0);
- ht = clkcfg & (1 << 2);
- b = clkcfg & (1 << 3);
-
- if (osc_forced)
- return parent_rate;
- if (ht)
- return parent_rate / 2;
- else
- return parent_rate;
-}
-
static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
{
unsigned long clkcfg;
- unsigned int t, ht, b, osc_forced;
+ unsigned int t, ht, osc_forced;
unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
@@ -251,14 +334,30 @@ static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
t = clkcfg & (1 << 0);
ht = clkcfg & (1 << 2);
- b = clkcfg & (1 << 3);
if (ht || t)
return PXA_CORE_TURBO;
return PXA_CORE_RUN;
}
+
+static int clk_pxa27x_core_set_parent(struct clk_hw *hw, u8 index)
+{
+ if (index > PXA_CORE_TURBO)
+ return -EINVAL;
+
+ pxa2xx_core_turbo_switch(index == PXA_CORE_TURBO);
+
+ return 0;
+}
+
+static int clk_pxa27x_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return __clk_mux_determine_rate(hw, req);
+}
+
PARENTS(clk_pxa27x_core) = { "osc_13mhz", "run", "cpll" };
-MUX_RO_RATE_RO_OPS(clk_pxa27x_core, "core");
+MUX_OPS(clk_pxa27x_core, "core", CLK_SET_RATE_PARENT);
static unsigned long clk_pxa27x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -273,9 +372,10 @@ RATE_RO_OPS(clk_pxa27x_run, "run");
static void __init pxa27x_register_core(void)
{
- clk_register_clk_pxa27x_cpll();
- clk_register_clk_pxa27x_run();
-
+ clkdev_pxa_register(CLK_NONE, "cpll", NULL,
+ clk_register_clk_pxa27x_cpll());
+ clkdev_pxa_register(CLK_NONE, "run", NULL,
+ clk_register_clk_pxa27x_run());
clkdev_pxa_register(CLK_CORE, "core", NULL,
clk_register_clk_pxa27x_core());
}
@@ -294,9 +394,9 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
if (osc_forced)
return parent_rate;
if (b)
- return parent_rate / 2;
- else
return parent_rate;
+ else
+ return parent_rate / 2;
}
static u8 clk_pxa27x_system_bus_get_parent(struct clk_hw *hw)
@@ -385,8 +485,10 @@ static void __init pxa27x_base_clocks_init(void)
{
pxa27x_register_plls();
pxa27x_register_core();
- clk_register_clk_pxa27x_system_bus();
- clk_register_clk_pxa27x_memory();
+ clkdev_pxa_register(CLK_NONE, "system_bus", NULL,
+ clk_register_clk_pxa27x_system_bus());
+ clkdev_pxa_register(CLK_NONE, "memory", NULL,
+ clk_register_clk_pxa27x_memory());
clk_register_clk_pxa27x_lcd_base();
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 0146d3c2547f..5fb8d7430908 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -2,6 +2,9 @@ config QCOM_GDSC
bool
select PM_GENERIC_DOMAINS if PM
+config QCOM_RPMCC
+ bool
+
config COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
depends on OF
@@ -9,6 +12,32 @@ config COMMON_CLK_QCOM
select REGMAP_MMIO
select RESET_CONTROLLER
+config QCOM_CLK_RPM
+ tristate "RPM based Clock Controller"
+ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+ tristate "RPM over SMD based Clock Controller"
+ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8016, apq8084, msm8974 etc.
+
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
select QCOM_GDSC
@@ -132,6 +161,14 @@ config MSM_MMCC_8974
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config MSM_GCC_8994
+ tristate "MSM8994 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8994 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
config MSM_GCC_8996
tristate "MSM8996 Global Clock Controller"
select QCOM_GDSC
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 1fb1f5476cb0..1c3e222b917b 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -24,8 +24,11 @@ obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index e6a03eaf7a93..47a1da3739ce 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -18,17 +18,21 @@
#include <linux/delay.h>
#include "clk-alpha-pll.h"
+#include "common.h"
#define PLL_MODE 0x00
# define PLL_OUTCTRL BIT(0)
# define PLL_BYPASSNL BIT(1)
# define PLL_RESET_N BIT(2)
+# define PLL_OFFLINE_REQ BIT(7)
# define PLL_LOCK_COUNT_SHIFT 8
# define PLL_LOCK_COUNT_MASK 0x3f
# define PLL_BIAS_COUNT_SHIFT 14
# define PLL_BIAS_COUNT_MASK 0x3f
# define PLL_VOTE_FSM_ENA BIT(20)
+# define PLL_FSM_ENA BIT(20)
# define PLL_VOTE_FSM_RESET BIT(21)
+# define PLL_OFFLINE_ACK BIT(28)
# define PLL_ACTIVE_FLAG BIT(30)
# define PLL_LOCK_DET BIT(31)
@@ -46,6 +50,7 @@
#define PLL_USER_CTL_U 0x14
#define PLL_CONFIG_CTL 0x18
+#define PLL_CONFIG_CTL_U 0x20
#define PLL_TEST_CTL 0x1c
#define PLL_TEST_CTL_U 0x20
#define PLL_STATUS 0x24
@@ -55,6 +60,7 @@
*/
#define ALPHA_REG_BITWIDTH 40
#define ALPHA_BITWIDTH 32
+#define ALPHA_16BIT_MASK 0xffff
#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
struct clk_alpha_pll, clkr)
@@ -62,9 +68,10 @@
#define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \
struct clk_alpha_pll_postdiv, clkr)
-static int wait_for_pll(struct clk_alpha_pll *pll)
+static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
+ const char *action)
{
- u32 val, mask, off;
+ u32 val, off;
int count;
int ret;
const char *name = clk_hw_get_name(&pll->clkr.hw);
@@ -74,26 +81,148 @@ static int wait_for_pll(struct clk_alpha_pll *pll)
if (ret)
return ret;
- if (val & PLL_VOTE_FSM_ENA)
- mask = PLL_ACTIVE_FLAG;
- else
- mask = PLL_LOCK_DET;
-
- /* Wait for pll to enable. */
for (count = 100; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
if (ret)
return ret;
- if ((val & mask) == mask)
+ if (inverse && !(val & mask))
+ return 0;
+ else if ((val & mask) == mask)
return 0;
udelay(1);
}
- WARN(1, "%s didn't enable after voting for it!\n", name);
+ WARN(1, "%s failed to %s!\n", name, action);
return -ETIMEDOUT;
}
+#define wait_for_pll_enable_active(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 0, "enable")
+
+#define wait_for_pll_enable_lock(pll) \
+ wait_for_pll(pll, PLL_LOCK_DET, 0, "enable")
+
+#define wait_for_pll_disable(pll) \
+ wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable")
+
+#define wait_for_pll_offline(pll) \
+ wait_for_pll(pll, PLL_OFFLINE_ACK, 0, "offline")
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val, mask;
+ u32 off = pll->offset;
+
+ regmap_write(regmap, off + PLL_L_VAL, config->l);
+ regmap_write(regmap, off + PLL_ALPHA_VAL, config->alpha);
+ regmap_write(regmap, off + PLL_CONFIG_CTL, config->config_ctl_val);
+ regmap_write(regmap, off + PLL_CONFIG_CTL_U, config->config_ctl_hi_val);
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->pre_div_val;
+ val |= config->post_div_val;
+ val |= config->vco_val;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->pre_div_mask;
+ mask |= config->post_div_mask;
+ mask |= config->vco_mask;
+
+ regmap_update_bits(regmap, off + PLL_USER_CTL, mask, val);
+
+ if (pll->flags & SUPPORTS_FSM_MODE)
+ qcom_pll_set_fsm_mode(regmap, off + PLL_MODE, 6, 0);
+}
+
+static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
+{
+ int ret;
+ u32 val, off;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ val |= PLL_FSM_ENA;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ)
+ val &= ~PLL_OFFLINE_REQ;
+
+ ret = regmap_write(pll->clkr.regmap, off + PLL_MODE, val);
+ if (ret)
+ return ret;
+
+ /* Make sure enable request goes through before waiting for update */
+ mb();
+
+ return wait_for_pll_enable_active(pll);
+}
+
+static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
+{
+ int ret;
+ u32 val, off;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return;
+
+ if (pll->flags & SUPPORTS_OFFLINE_REQ) {
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
+ if (ret)
+ return;
+
+ ret = wait_for_pll_offline(pll);
+ if (ret)
+ return;
+ }
+
+ /* Disable hwfsm */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_FSM_ENA, 0);
+ if (ret)
+ return;
+
+ wait_for_pll_disable(pll);
+}
+
+static int pll_is_enabled(struct clk_hw *hw, u32 mask)
+{
+ int ret;
+ u32 val, off;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & mask);
+}
+
+static int clk_alpha_pll_hwfsm_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_ACTIVE_FLAG);
+}
+
+static int clk_alpha_pll_is_enabled(struct clk_hw *hw)
+{
+ return pll_is_enabled(hw, PLL_LOCK_DET);
+}
+
static int clk_alpha_pll_enable(struct clk_hw *hw)
{
int ret;
@@ -112,7 +241,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
ret = clk_enable_regmap(hw);
if (ret)
return ret;
- return wait_for_pll(pll);
+ return wait_for_pll_enable_active(pll);
}
/* Skip if already enabled */
@@ -136,7 +265,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
if (ret)
return ret;
- ret = wait_for_pll(pll);
+ ret = wait_for_pll_enable_lock(pll);
if (ret)
return ret;
@@ -234,9 +363,14 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
regmap_read(pll->clkr.regmap, off + PLL_USER_CTL, &ctl);
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL, &low);
- regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, &high);
- a = (u64)high << 32 | low;
- a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
+ if (pll->flags & SUPPORTS_16BIT_ALPHA) {
+ a = low & ALPHA_16BIT_MASK;
+ } else {
+ regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL_U,
+ &high);
+ a = (u64)high << 32 | low;
+ a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
+ }
}
return alpha_pll_calc_rate(prate, l, a);
@@ -257,11 +391,15 @@ static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
-
regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
- regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL, a);
- regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, a >> 32);
+
+ if (pll->flags & SUPPORTS_16BIT_ALPHA) {
+ regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL,
+ a & ALPHA_16BIT_MASK);
+ } else {
+ a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+ regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, a >> 32);
+ }
regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL,
PLL_VCO_MASK << PLL_VCO_SHIFT,
@@ -294,12 +432,23 @@ static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_ops = {
.enable = clk_alpha_pll_enable,
.disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_alpha_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
+const struct clk_ops clk_alpha_pll_hwfsm_ops = {
+ .enable = clk_alpha_pll_hwfsm_enable,
+ .disable = clk_alpha_pll_hwfsm_disable,
+ .is_enabled = clk_alpha_pll_hwfsm_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
+
static unsigned long
clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 90ce2016e1a0..d6e1ee2c7348 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -34,6 +34,10 @@ struct clk_alpha_pll {
const struct pll_vco *vco_table;
size_t num_vco;
+#define SUPPORTS_OFFLINE_REQ BIT(0)
+#define SUPPORTS_16BIT_ALPHA BIT(1)
+#define SUPPORTS_FSM_MODE BIT(2)
+ u8 flags;
struct clk_regmap clkr;
};
@@ -51,7 +55,28 @@ struct clk_alpha_pll_postdiv {
struct clk_regmap clkr;
};
+struct alpha_pll_config {
+ u32 l;
+ u32 alpha;
+ u32 config_ctl_val;
+ u32 config_ctl_hi_val;
+ u32 main_output_mask;
+ u32 aux_output_mask;
+ u32 aux2_output_mask;
+ u32 early_output_mask;
+ u32 pre_div_val;
+ u32 pre_div_mask;
+ u32 post_div_val;
+ u32 post_div_mask;
+ u32 vco_val;
+ u32 vco_mask;
+};
+
extern const struct clk_ops clk_alpha_pll_ops;
+extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ops;
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+
#endif
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 5b940d629045..cb6cb8710daf 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -23,16 +23,11 @@
#include <asm/div64.h>
#include "clk-pll.h"
+#include "common.h"
#define PLL_OUTCTRL BIT(0)
#define PLL_BYPASSNL BIT(1)
#define PLL_RESET_N BIT(2)
-#define PLL_LOCK_COUNT_SHIFT 8
-#define PLL_LOCK_COUNT_MASK 0x3f
-#define PLL_BIAS_COUNT_SHIFT 14
-#define PLL_BIAS_COUNT_MASK 0x3f
-#define PLL_VOTE_FSM_ENA BIT(20)
-#define PLL_VOTE_FSM_RESET BIT(21)
static int clk_pll_enable(struct clk_hw *hw)
{
@@ -228,26 +223,6 @@ const struct clk_ops clk_pll_vote_ops = {
};
EXPORT_SYMBOL_GPL(clk_pll_vote_ops);
-static void
-clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap, u8 lock_count)
-{
- u32 val;
- u32 mask;
-
- /* De-assert reset to FSM */
- regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_RESET, 0);
-
- /* Program bias count and lock count */
- val = 1 << PLL_BIAS_COUNT_SHIFT | lock_count << PLL_LOCK_COUNT_SHIFT;
- mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
- mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
- regmap_update_bits(regmap, pll->mode_reg, mask, val);
-
- /* Enable PLL FSM voting */
- regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_ENA,
- PLL_VOTE_FSM_ENA);
-}
-
static void clk_pll_configure(struct clk_pll *pll, struct regmap *regmap,
const struct pll_config *config)
{
@@ -280,7 +255,7 @@ void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
{
clk_pll_configure(pll, regmap, config);
if (fsm_mode)
- clk_pll_set_fsm_mode(pll, regmap, 8);
+ qcom_pll_set_fsm_mode(regmap, pll->mode_reg, 1, 8);
}
EXPORT_SYMBOL_GPL(clk_pll_configure_sr);
@@ -289,7 +264,7 @@ void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
{
clk_pll_configure(pll, regmap, config);
if (fsm_mode)
- clk_pll_set_fsm_mode(pll, regmap, 0);
+ qcom_pll_set_fsm_mode(regmap, pll->mode_reg, 1, 0);
}
EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp);
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index b904c335cda4..1b3e8d265bdb 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -173,6 +173,7 @@ struct clk_rcg2 {
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_floor_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a071bba8018c..1a0985ae20d2 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -47,6 +47,11 @@
#define N_REG 0xc
#define D_REG 0x10
+enum freq_policy {
+ FLOOR,
+ CEIL,
+};
+
static int clk_rcg2_is_enabled(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -176,15 +181,26 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return calc_rate(parent_rate, m, n, mode, hid_div);
}
-static int _freq_tbl_determine_rate(struct clk_hw *hw,
- const struct freq_tbl *f, struct clk_rate_request *req)
+static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+ struct clk_rate_request *req,
+ enum freq_policy policy)
{
unsigned long clk_flags, rate = req->rate;
struct clk_hw *p;
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
int index;
- f = qcom_find_freq(f, rate);
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(f, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(f, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
if (!f)
return -EINVAL;
@@ -221,7 +237,15 @@ static int clk_rcg2_determine_rate(struct clk_hw *hw,
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req);
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
+}
+
+static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
}
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
@@ -265,12 +289,23 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
return update_config(rcg);
}
-static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
+static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ enum freq_policy policy)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f;
- f = qcom_find_freq(rcg->freq_tbl, rate);
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
+ return -EINVAL;
+ };
+
if (!f)
return -EINVAL;
@@ -280,13 +315,25 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- return __clk_rcg2_set_rate(hw, rate);
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
}
static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate, u8 index)
{
- return __clk_rcg2_set_rate(hw, rate);
+ return __clk_rcg2_set_rate(hw, rate, CEIL);
+}
+
+static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate, FLOOR);
}
const struct clk_ops clk_rcg2_ops = {
@@ -300,6 +347,17 @@ const struct clk_ops clk_rcg2_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+const struct clk_ops clk_rcg2_floor_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+
static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -323,7 +381,7 @@ static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate)
pr_err("%s: RCG did not turn on\n", name);
/* set clock rate */
- ret = __clk_rcg2_set_rate(hw, rate);
+ ret = __clk_rcg2_set_rate(hw, rate, CEIL);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
new file mode 100644
index 000000000000..df3e5fe8442a
--- /dev/null
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mfd/qcom_rpm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mfd/qcom-rpm.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+
+#define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_name, \
+ .active_only = true, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .active_only = true, \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .active_only = true, \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
+
+struct clk_rpm {
+ const int rpm_clk_id;
+ const bool active_only;
+ unsigned long rate;
+ bool enabled;
+ bool branch;
+ struct clk_rpm *peer;
+ struct clk_hw hw;
+ struct qcom_rpm *rpm;
+};
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_rpm **clks;
+ size_t num_clks;
+};
+
+struct rpm_clk_desc {
+ struct clk_rpm **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpm_clk_lock);
+
+static int clk_rpm_handoff(struct clk_rpm *r)
+{
+ int ret;
+ u32 value = INT_MAX;
+
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (ret)
+ return ret;
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate)
+{
+ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
+
+ return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+}
+
+static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate)
+{
+ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
+
+ return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
+ r->rpm_clk_id, &value, 1);
+}
+
+static void to_active_sleep(struct clk_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static void clk_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (!r->rate)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+}
+
+static int clk_rpm_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (!r->enabled)
+ goto out;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->rate = rate;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+static const struct clk_ops clk_rpm_ops = {
+ .prepare = clk_rpm_prepare,
+ .unprepare = clk_rpm_unprepare,
+ .set_rate = clk_rpm_set_rate,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
+static const struct clk_ops clk_rpm_branch_ops = {
+ .prepare = clk_rpm_prepare,
+ .unprepare = clk_rpm_unprepare,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
+/* apq8064 */
+DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
+DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
+DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
+DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
+DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK);
+
+static struct clk_rpm *apq8064_clks[] = {
+ [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk,
+ [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk,
+ [RPM_CFPB_CLK] = &apq8064_cfpb_clk,
+ [RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk,
+ [RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk,
+ [RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk,
+ [RPM_EBI1_CLK] = &apq8064_ebi1_clk,
+ [RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk,
+ [RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk,
+ [RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk,
+ [RPM_MMFPB_CLK] = &apq8064_mmfpb_clk,
+ [RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk,
+ [RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk,
+ [RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk,
+ [RPM_SFPB_CLK] = &apq8064_sfpb_clk,
+ [RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk,
+ [RPM_QDSS_CLK] = &apq8064_qdss_clk,
+ [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk,
+};
+
+static const struct rpm_clk_desc rpm_clk_apq8064 = {
+ .clks = apq8064_clks,
+ .num_clks = ARRAY_SIZE(apq8064_clks),
+};
+
+static const struct of_device_id rpm_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
+
+static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct rpm_cc *rcc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= rcc->num_clks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
+}
+
+static int rpm_clk_probe(struct platform_device *pdev)
+{
+ struct rpm_cc *rcc;
+ int ret;
+ size_t num_clks, i;
+ struct qcom_rpm *rpm;
+ struct clk_rpm **rpm_clks;
+ const struct rpm_clk_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpm_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ rcc->clks = rpm_clks;
+ rcc->num_clks = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i])
+ continue;
+
+ rpm_clks[i]->rpm = rpm;
+
+ ret = clk_rpm_handoff(rpm_clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i])
+ continue;
+
+ ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw);
+ if (ret)
+ goto err;
+ }
+
+ ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_rpm_clk_hw_get,
+ rcc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret);
+ return ret;
+}
+
+static int rpm_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpm_clk_driver = {
+ .driver = {
+ .name = "qcom-clk-rpm",
+ .of_match_table = rpm_clk_match_table,
+ },
+ .probe = rpm_clk_probe,
+ .remove = rpm_clk_remove,
+};
+
+static int __init rpm_clk_init(void)
+{
+ return platform_driver_register(&rpm_clk_driver);
+}
+core_initcall(rpm_clk_init);
+
+static void __exit rpm_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_clk_driver);
+}
+module_exit(rpm_clk_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-clk-rpm");
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
new file mode 100644
index 000000000000..07e2cc6ed781
--- /dev/null
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
+
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+#define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id, \
+ key) \
+ static struct clk_smd_rpm _platform##_##_active; \
+ static struct clk_smd_rpm _platform##_##_name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &_platform##_##_active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm _platform##_##_active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .active_only = true, \
+ .rpm_key = (key), \
+ .peer = &_platform##_##_name, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, \
+ stat_id, r, key) \
+ static struct clk_smd_rpm _platform##_##_active; \
+ static struct clk_smd_rpm _platform##_##_name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .branch = true, \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm _platform##_##_active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .active_only = true, \
+ .rpm_key = (key), \
+ .branch = true, \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
+ 0, QCOM_RPM_SMD_KEY_RATE)
+
+#define DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, \
+ r_id, 0, r, QCOM_RPM_SMD_KEY_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_QDSS(_platform, _name, _active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
+ 0, QCOM_RPM_SMD_KEY_STATE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_SOFTWARE_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY)
+
+#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
+
+struct clk_smd_rpm {
+ const int rpm_res_type;
+ const int rpm_key;
+ const int rpm_clk_id;
+ const int rpm_status_id;
+ const bool active_only;
+ bool enabled;
+ bool branch;
+ struct clk_smd_rpm *peer;
+ struct clk_hw hw;
+ unsigned long rate;
+ struct qcom_smd_rpm *rpm;
+};
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_smd_rpm **clks;
+ size_t num_clks;
+};
+
+struct rpm_smd_clk_desc {
+ struct clk_smd_rpm **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpm_smd_clk_lock);
+
+static int clk_smd_rpm_handoff(struct clk_smd_rpm *r)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(INT_MAX),
+ };
+
+ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+ if (ret)
+ return ret;
+ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
+ unsigned long rate)
+{
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
+ unsigned long rate)
+{
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_smd_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_smd_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_smd_clk_lock);
+
+ return ret;
+}
+
+static void clk_smd_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ if (!r->rate)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_smd_clk_lock);
+}
+
+static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ int ret = 0;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ if (!r->enabled)
+ goto out;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->rate = rate;
+
+out:
+ mutex_unlock(&rpm_smd_clk_lock);
+
+ return ret;
+}
+
+static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(1),
+ };
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (sleep set) not enabled!\n");
+ return ret;
+ }
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (active set) not enabled!\n");
+ return ret;
+ }
+
+ pr_debug("%s: RPM clock scaling is enabled\n", __func__);
+ return 0;
+}
+
+static const struct clk_ops clk_smd_rpm_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .set_rate = clk_smd_rpm_set_rate,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+
+static const struct clk_ops clk_smd_rpm_branch_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+
+/* msm8916 */
+DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5);
+
+static struct clk_smd_rpm *msm8916_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8916 = {
+ .clks = msm8916_clks,
+ .num_clks = ARRAY_SIZE(msm8916_clks),
+};
+
+static const struct of_device_id rpm_smd_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
+
+static struct clk_hw *qcom_smdrpm_clk_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct rpm_cc *rcc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= rcc->num_clks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
+}
+
+static int rpm_smd_clk_probe(struct platform_device *pdev)
+{
+ struct rpm_cc *rcc;
+ int ret;
+ size_t num_clks, i;
+ struct qcom_smd_rpm *rpm;
+ struct clk_smd_rpm **rpm_smd_clks;
+ const struct rpm_smd_clk_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpm_smd_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ rcc->clks = rpm_smd_clks;
+ rcc->num_clks = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_smd_clks[i])
+ continue;
+
+ rpm_smd_clks[i]->rpm = rpm;
+
+ ret = clk_smd_rpm_handoff(rpm_smd_clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = clk_smd_rpm_enable_scaling(rpm);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_smd_clks[i])
+ continue;
+
+ ret = devm_clk_hw_register(&pdev->dev, &rpm_smd_clks[i]->hw);
+ if (ret)
+ goto err;
+ }
+
+ ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_smdrpm_clk_hw_get,
+ rcc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret);
+ return ret;
+}
+
+static int rpm_smd_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpm_smd_clk_driver = {
+ .driver = {
+ .name = "qcom-clk-smd-rpm",
+ .of_match_table = rpm_smd_clk_match_table,
+ },
+ .probe = rpm_smd_clk_probe,
+ .remove = rpm_smd_clk_remove,
+};
+
+static int __init rpm_smd_clk_init(void)
+{
+ return platform_driver_register(&rpm_smd_clk_driver);
+}
+core_initcall(rpm_smd_clk_init);
+
+static void __exit rpm_smd_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_smd_clk_driver);
+}
+module_exit(rpm_smd_clk_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM over SMD Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-clk-smd-rpm");
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index fffcbaf0fba7..cfab7b400381 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -46,6 +46,22 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
}
EXPORT_SYMBOL_GPL(qcom_find_freq);
+const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
+ unsigned long rate)
+{
+ const struct freq_tbl *best = NULL;
+
+ for ( ; f->freq; f++) {
+ if (rate >= f->freq)
+ best = f;
+ else
+ break;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq_floor);
+
int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
{
int i, num_parents = clk_hw_get_num_parents(hw);
@@ -74,6 +90,27 @@ qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
}
EXPORT_SYMBOL_GPL(qcom_cc_map);
+void
+qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
+{
+ u32 val;
+ u32 mask;
+
+ /* De-assert reset to FSM */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_RESET, 0);
+
+ /* Program bias count and lock count */
+ val = bias_count << PLL_BIAS_COUNT_SHIFT |
+ lock_count << PLL_LOCK_COUNT_SHIFT;
+ mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
+ mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
+ regmap_update_bits(map, reg, mask, val);
+
+ /* Enable PLL FSM voting */
+ regmap_update_bits(map, reg, PLL_VOTE_FSM_ENA, PLL_VOTE_FSM_ENA);
+}
+EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode);
+
static void qcom_cc_del_clk_provider(void *data)
{
of_clk_del_provider(data);
@@ -153,15 +190,12 @@ int qcom_cc_register_board_clk(struct device *dev, const char *path,
const char *name, unsigned long rate)
{
bool add_factor = true;
- struct device_node *node;
-
- /* The RPM clock driver will add the factor clock if present */
- if (IS_ENABLED(CONFIG_QCOM_RPMCC)) {
- node = of_find_compatible_node(NULL, NULL, "qcom,rpmcc");
- if (of_device_is_available(node))
- add_factor = false;
- of_node_put(node);
- }
+
+ /*
+ * TODO: The RPM clock driver currently does not support the xo clock.
+ * When xo is added to the RPM clock driver, we should change this
+ * function to skip registration of xo factor clocks.
+ */
return _qcom_cc_register_board_clk(dev, path, name, rate, add_factor);
}
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index ae9bdeb21f29..23c1927669ba 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -22,6 +22,13 @@ struct freq_tbl;
struct clk_hw;
struct parent_map;
+#define PLL_LOCK_COUNT_SHIFT 8
+#define PLL_LOCK_COUNT_MASK 0x3f
+#define PLL_BIAS_COUNT_SHIFT 14
+#define PLL_BIAS_COUNT_MASK 0x3f
+#define PLL_VOTE_FSM_ENA BIT(20)
+#define PLL_VOTE_FSM_RESET BIT(21)
+
struct qcom_cc_desc {
const struct regmap_config *config;
struct clk_regmap **clks;
@@ -34,6 +41,10 @@ struct qcom_cc_desc {
extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
unsigned long rate);
+extern const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
+ unsigned long rate);
+extern void
+qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
u8 src);
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 070037a29ea5..486d9610355c 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -1142,7 +1142,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_names = gcc_xo_gpll0_gpll4,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1156,7 +1156,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.name = "sdcc2_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1170,7 +1170,7 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
.name = "sdcc3_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1184,7 +1184,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.name = "sdcc4_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index b593065de8db..33d09138f5e5 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -185,8 +185,7 @@ static struct clk_branch gcc_audio_pwm_clk = {
};
static const struct freq_tbl ftbl_gcc_blsp1_qup1_2_i2c_apps_clk[] = {
- F(19200000, P_XO, 1, 2, 5),
- F(24000000, P_XO, 1, 1, 2),
+ F(19050000, P_FEPLL200, 10.5, 1, 1),
{ }
};
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 52a7d3959875..28eb200d0f1e 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -2990,11 +2990,11 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
struct regmap *regmap;
int ret;
- ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 19200000);
+ ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 25000000);
if (ret)
return ret;
- ret = qcom_cc_register_board_clk(dev, "pxo_board", "pxo", 27000000);
+ ret = qcom_cc_register_board_clk(dev, "pxo_board", "pxo", 25000000);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 5c4e193164d4..628e6ca276ec 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -1107,7 +1107,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1132,7 +1132,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.name = "sdcc2_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 00915209e7c5..348e30da4f18 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -872,7 +872,7 @@ static struct clk_init_data sdcc1_apps_clk_src_init = {
.name = "sdcc1_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
};
static struct clk_rcg2 sdcc1_apps_clk_src = {
@@ -894,7 +894,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.name = "sdcc2_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -908,7 +908,7 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
.name = "sdcc3_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -922,7 +922,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.name = "sdcc4_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
new file mode 100644
index 000000000000..8afd8304a070
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -0,0 +1,2300 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8994.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL4,
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const char * const gcc_xo_gpll0[] = {
+ "xo",
+ "gpll0",
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 },
+};
+
+static const char * const gcc_xo_gpll0_gpll4[] = {
+ "xo",
+ "gpll0",
+ "gpll4",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_fixed_factor xo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "xo",
+ .parent_names = (const char *[]) { "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll0_early = {
+ .offset = 0x00000,
+ .clkr = {
+ .enable_reg = 0x1480,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gpll0_early",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x00000,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gpll0",
+ .parent_names = (const char *[]) { "gpll0_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4_early = {
+ .offset = 0x1dc0,
+ .clkr = {
+ .enable_reg = 0x1480,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gpll4_early",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x1dc0,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gpll4",
+ .parent_names = (const char *[]) { "gpll4_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(171430000, P_GPLL0, 3.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_axi_clk_src = {
+ .cmd_rcgr = 0x1d68,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_ufs_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "ufs_axi_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(125000000, P_GPLL0, 1, 5, 24),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0x03d4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0660,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_blspqup_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x064c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0760,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x074c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0860,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x084c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x08e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x08cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0, 1, 96, 15625),
+ F(7372800, P_GPLL0, 1, 192, 15625),
+ F(14745600, P_GPLL0, 1, 384, 15625),
+ F(16000000, P_GPLL0, 5, 2, 15),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ F(32000000, P_GPLL0, 1, 4, 75),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(46400000, P_GPLL0, 1, 29, 375),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(51200000, P_GPLL0, 1, 32, 375),
+ F(56000000, P_GPLL0, 1, 7, 75),
+ F(58982400, P_GPLL0, 1, 1536, 15625),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(63160000, P_GPLL0, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x068c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x070c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x078c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x080c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x088c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x090c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp1_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x09a0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x098c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0a20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0a0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0aa0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0a8c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0b20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0b0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0ba0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0b8c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0c20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0c0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x09cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x0a4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x0acc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x0b4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x0bcc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x0c4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "blsp2_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x1904,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gp1_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_gp2_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x1944,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gp2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gp2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_gp3_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x1984,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gp3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gp3_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_pcie_0_aux_clk_src[] = {
+ F(1011000, P_XO, 1, 1, 19),
+ { }
+};
+
+static struct clk_rcg2 pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x1b00,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pcie_0_aux_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_pcie_pipe_clk_src[] = {
+ F(125000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcie_0_pipe_clk_src = {
+ .cmd_rcgr = 0x1adc,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pcie_pipe_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pcie_0_pipe_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_pcie_1_aux_clk_src[] = {
+ F(1011000, P_XO, 1, 1, 19),
+ { }
+};
+
+static struct clk_rcg2 pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x1b80,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pcie_1_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pcie_1_aux_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pcie_1_pipe_clk_src = {
+ .cmd_rcgr = 0x1b5c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pcie_pipe_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pcie_1_pipe_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x0cd0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(192000000, P_GPLL4, 2, 0, 0),
+ F(384000000, P_GPLL4, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x04d0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x0510,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc3_apps_clk_src = {
+ .cmd_rcgr = 0x0550,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "sdcc3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x0590,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "sdcc4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct freq_tbl ftbl_tsif_ref_clk_src[] = {
+ F(105500, P_XO, 1, 1, 182),
+ { }
+};
+
+static struct clk_rcg2 tsif_ref_clk_src = {
+ .cmd_rcgr = 0x0d90,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "tsif_ref_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x03e8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+ F(1200000, P_XO, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1414,
+ .hid_width = 5,
+ .freq_tbl = ftbl_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "usb3_phy_aux_clk_src",
+ .parent_names = (const char *[]) { "xo" },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x0490,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_usb_hs_system_clk_src,
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "usb_hs_system_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x05c4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0648,
+ .clkr = {
+ .enable_reg = 0x0648,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x0644,
+ .clkr = {
+ .enable_reg = 0x0644,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x06c8,
+ .clkr = {
+ .enable_reg = 0x06c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x06c4,
+ .clkr = {
+ .enable_reg = 0x06c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0748,
+ .clkr = {
+ .enable_reg = 0x0748,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0744,
+ .clkr = {
+ .enable_reg = 0x0744,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x07c8,
+ .clkr = {
+ .enable_reg = 0x07c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x07c4,
+ .clkr = {
+ .enable_reg = 0x07c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x0848,
+ .clkr = {
+ .enable_reg = 0x0848,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0844,
+ .clkr = {
+ .enable_reg = 0x0844,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x08c8,
+ .clkr = {
+ .enable_reg = 0x08c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x08c4,
+ .clkr = {
+ .enable_reg = 0x08c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0684,
+ .clkr = {
+ .enable_reg = 0x0684,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0704,
+ .clkr = {
+ .enable_reg = 0x0704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x0784,
+ .clkr = {
+ .enable_reg = 0x0784,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x0804,
+ .clkr = {
+ .enable_reg = 0x0804,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+ .halt_reg = 0x0884,
+ .clkr = {
+ .enable_reg = 0x0884,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart5_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart5_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+ .halt_reg = 0x0904,
+ .clkr = {
+ .enable_reg = 0x0904,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp1_uart6_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp1_uart6_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x0944,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0988,
+ .clkr = {
+ .enable_reg = 0x0988,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x0984,
+ .clkr = {
+ .enable_reg = 0x0984,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x0a08,
+ .clkr = {
+ .enable_reg = 0x0a08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x0a04,
+ .clkr = {
+ .enable_reg = 0x0a04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0a88,
+ .clkr = {
+ .enable_reg = 0x0a88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x0a84,
+ .clkr = {
+ .enable_reg = 0x0a84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x0b08,
+ .clkr = {
+ .enable_reg = 0x0b08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x0b04,
+ .clkr = {
+ .enable_reg = 0x0b04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
+ .halt_reg = 0x0b88,
+ .clkr = {
+ .enable_reg = 0x0b88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
+ .halt_reg = 0x0b84,
+ .clkr = {
+ .enable_reg = 0x0b84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup5_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
+ .halt_reg = 0x0c08,
+ .clkr = {
+ .enable_reg = 0x0c08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
+ .halt_reg = 0x0c04,
+ .clkr = {
+ .enable_reg = 0x0c04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_qup6_spi_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x09c4,
+ .clkr = {
+ .enable_reg = 0x09c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x0a44,
+ .clkr = {
+ .enable_reg = 0x0a44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart3_apps_clk = {
+ .halt_reg = 0x0ac4,
+ .clkr = {
+ .enable_reg = 0x0ac4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart3_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart4_apps_clk = {
+ .halt_reg = 0x0b44,
+ .clkr = {
+ .enable_reg = 0x0b44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart4_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart5_apps_clk = {
+ .halt_reg = 0x0bc4,
+ .clkr = {
+ .enable_reg = 0x0bc4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart5_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart5_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart6_apps_clk = {
+ .halt_reg = 0x0c44,
+ .clkr = {
+ .enable_reg = 0x0c44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_blsp2_uart6_apps_clk",
+ .parent_names = (const char *[]) {
+ "blsp2_uart6_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x1900,
+ .clkr = {
+ .enable_reg = 0x1900,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]) {
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x1940,
+ .clkr = {
+ .enable_reg = 0x1940,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]) {
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x1980,
+ .clkr = {
+ .enable_reg = 0x1980,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]) {
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x1ad4,
+ .clkr = {
+ .enable_reg = 0x1ad4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]) {
+ "pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x1ad8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1ad8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_names = (const char *[]) {
+ "pcie_0_pipe_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x1b54,
+ .clkr = {
+ .enable_reg = 0x1b54,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_names = (const char *[]) {
+ "pcie_1_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x1b58,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1b58,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_names = (const char *[]) {
+ "pcie_1_pipe_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x0ccc,
+ .clkr = {
+ .enable_reg = 0x0ccc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]) {
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x04c4,
+ .clkr = {
+ .enable_reg = 0x04c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]) {
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x0504,
+ .clkr = {
+ .enable_reg = 0x0504,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]) {
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_apps_clk = {
+ .halt_reg = 0x0544,
+ .clkr = {
+ .enable_reg = 0x0544,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc3_apps_clk",
+ .parent_names = (const char *[]) {
+ "sdcc3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x0584,
+ .clkr = {
+ .enable_reg = 0x0584,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]) {
+ "sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_ufs_axi_clk = {
+ .halt_reg = 0x1d7c,
+ .clkr = {
+ .enable_reg = 0x1d7c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sys_noc_ufs_axi_clk",
+ .parent_names = (const char *[]) {
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
+ .halt_reg = 0x03fc,
+ .clkr = {
+ .enable_reg = 0x03fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sys_noc_usb3_axi_clk",
+ .parent_names = (const char *[]) {
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x0d88,
+ .clkr = {
+ .enable_reg = 0x0d88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]) {
+ "tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_axi_clk = {
+ .halt_reg = 0x1d48,
+ .clkr = {
+ .enable_reg = 0x1d48,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_axi_clk",
+ .parent_names = (const char *[]) {
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_cfg_clk = {
+ .halt_reg = 0x1d54,
+ .clkr = {
+ .enable_reg = 0x1d54,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_rx_cfg_clk",
+ .parent_names = (const char *[]) {
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_tx_cfg_clk = {
+ .halt_reg = 0x1d50,
+ .clkr = {
+ .enable_reg = 0x1d50,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_tx_cfg_clk",
+ .parent_names = (const char *[]) {
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x03c8,
+ .clkr = {
+ .enable_reg = 0x03c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]) {
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x03d0,
+ .clkr = {
+ .enable_reg = 0x03d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]) {
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x1408,
+ .clkr = {
+ .enable_reg = 0x1408,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]) {
+ "usb3_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x0484,
+ .clkr = {
+ .enable_reg = 0x0484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb_hs_system_clk",
+ .parent_names = (const char *[]) {
+ "usb_hs_system_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_msm8994_clocks[] = {
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+ [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+ [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr,
+ [BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr,
+ [BLSP2_UART4_APPS_CLK_SRC] = &blsp2_uart4_apps_clk_src.clkr,
+ [BLSP2_UART5_APPS_CLK_SRC] = &blsp2_uart5_apps_clk_src.clkr,
+ [BLSP2_UART6_APPS_CLK_SRC] = &blsp2_uart6_apps_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [PCIE_0_AUX_CLK_SRC] = &pcie_0_aux_clk_src.clkr,
+ [PCIE_0_PIPE_CLK_SRC] = &pcie_0_pipe_clk_src.clkr,
+ [PCIE_1_AUX_CLK_SRC] = &pcie_1_aux_clk_src.clkr,
+ [PCIE_1_PIPE_CLK_SRC] = &pcie_1_pipe_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr,
+ [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
+ [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+ [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr,
+ [GCC_BLSP2_UART4_APPS_CLK] = &gcc_blsp2_uart4_apps_clk.clkr,
+ [GCC_BLSP2_UART5_APPS_CLK] = &gcc_blsp2_uart5_apps_clk.clkr,
+ [GCC_BLSP2_UART6_APPS_CLK] = &gcc_blsp2_uart6_apps_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SYS_NOC_UFS_AXI_CLK] = &gcc_sys_noc_ufs_axi_clk.clkr,
+ [GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
+ [GCC_UFS_RX_CFG_CLK] = &gcc_ufs_rx_cfg_clk.clkr,
+ [GCC_UFS_TX_CFG_CLK] = &gcc_ufs_tx_cfg_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+};
+
+static const struct regmap_config gcc_msm8994_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x2000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8994_desc = {
+ .config = &gcc_msm8994_regmap_config,
+ .clks = gcc_msm8994_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8994_clocks),
+};
+
+static const struct of_device_id gcc_msm8994_match_table[] = {
+ { .compatible = "qcom,gcc-msm8994" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8994_match_table);
+
+static int gcc_msm8994_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+
+ clk = devm_clk_register(dev, &xo.hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return qcom_cc_probe(pdev, &gcc_msm8994_desc);
+}
+
+static struct platform_driver gcc_msm8994_driver = {
+ .probe = gcc_msm8994_probe,
+ .driver = {
+ .name = "gcc-msm8994",
+ .of_match_table = gcc_msm8994_match_table,
+ },
+};
+
+static int __init gcc_msm8994_init(void)
+{
+ return platform_driver_register(&gcc_msm8994_driver);
+}
+core_initcall(gcc_msm8994_init);
+
+static void __exit gcc_msm8994_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8994_driver);
+}
+module_exit(gcc_msm8994_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC MSM8994 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8994");
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index fe03e6fbc7df..4b1fc1730d29 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -460,14 +460,22 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
+static struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 sdcc1_ice_core_clk_src = {
.cmd_rcgr = 0x13024,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_gpll0_early_div_map,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_ice_core_clk_src",
.parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div,
@@ -497,7 +505,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.name = "sdcc2_apps_clk_src",
.parent_names = gcc_xo_gpll0_gpll4,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -511,7 +519,7 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
.name = "sdcc3_apps_clk_src",
.parent_names = gcc_xo_gpll0_gpll4,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -535,7 +543,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.name = "sdcc4_apps_clk_src",
.parent_names = gcc_xo_gpll0,
.num_parents = 2,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1230,10 +1238,18 @@ static struct clk_rcg2 ufs_axi_clk_src = {
},
};
+static const struct freq_tbl ftbl_ufs_ice_core_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 ufs_ice_core_clk_src = {
.cmd_rcgr = 0x76014,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_ufs_ice_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_ice_core_clk_src",
.parent_names = gcc_xo_gpll0,
@@ -1242,10 +1258,19 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
},
};
+static const struct freq_tbl ftbl_qspi_ser_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(256000000, P_GPLL4, 1.5, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 qspi_ser_clk_src = {
.cmd_rcgr = 0x8b00c,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map,
+ .freq_tbl = ftbl_qspi_ser_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "qspi_ser_clk_src",
.parent_names = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index f12d7b2bddd7..288186cce0ae 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -30,6 +30,7 @@
#define SW_OVERRIDE_MASK BIT(2)
#define HW_CONTROL_MASK BIT(1)
#define SW_COLLAPSE_MASK BIT(0)
+#define GMEM_CLAMP_IO_MASK BIT(0)
/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
#define EN_REST_WAIT_VAL (0x2 << 20)
@@ -55,6 +56,13 @@ static int gdsc_is_enabled(struct gdsc *sc, unsigned int reg)
return !!(val & PWR_ON_MASK);
}
+static int gdsc_hwctrl(struct gdsc *sc, bool en)
+{
+ u32 val = en ? HW_CONTROL_MASK : 0;
+
+ return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val);
+}
+
static int gdsc_toggle_logic(struct gdsc *sc, bool en)
{
int ret;
@@ -140,6 +148,18 @@ static inline void gdsc_clear_mem_on(struct gdsc *sc)
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
}
+static inline void gdsc_deassert_clamp_io(struct gdsc *sc)
+{
+ regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
+ GMEM_CLAMP_IO_MASK, 0);
+}
+
+static inline void gdsc_assert_clamp_io(struct gdsc *sc)
+{
+ regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
+ GMEM_CLAMP_IO_MASK, 1);
+}
+
static int gdsc_enable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
@@ -148,6 +168,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
if (sc->pwrsts == PWRSTS_ON)
return gdsc_deassert_reset(sc);
+ if (sc->flags & CLAMP_IO)
+ gdsc_deassert_clamp_io(sc);
+
ret = gdsc_toggle_logic(sc, true);
if (ret)
return ret;
@@ -164,20 +187,39 @@ static int gdsc_enable(struct generic_pm_domain *domain)
*/
udelay(1);
+ /* Turn on HW trigger mode if supported */
+ if (sc->flags & HW_CTRL)
+ return gdsc_hwctrl(sc, true);
+
return 0;
}
static int gdsc_disable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
+ int ret;
if (sc->pwrsts == PWRSTS_ON)
return gdsc_assert_reset(sc);
+ /* Turn off HW trigger mode if supported */
+ if (sc->flags & HW_CTRL) {
+ ret = gdsc_hwctrl(sc, false);
+ if (ret < 0)
+ return ret;
+ }
+
if (sc->pwrsts & PWRSTS_OFF)
gdsc_clear_mem_on(sc);
- return gdsc_toggle_logic(sc, false);
+ ret = gdsc_toggle_logic(sc, false);
+ if (ret)
+ return ret;
+
+ if (sc->flags & CLAMP_IO)
+ gdsc_assert_clamp_io(sc);
+
+ return 0;
}
static int gdsc_init(struct gdsc *sc)
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 3bf497c36bdf..39648348e5ec 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -39,6 +39,7 @@ struct gdsc {
struct regmap *regmap;
unsigned int gdscr;
unsigned int gds_hw_ctrl;
+ unsigned int clamp_io_ctrl;
unsigned int *cxcs;
unsigned int cxc_count;
const u8 pwrsts;
@@ -50,6 +51,8 @@ struct gdsc {
#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
const u8 flags;
#define VOTABLE BIT(0)
+#define CLAMP_IO BIT(1)
+#define HW_CTRL BIT(2)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index db3998e5e2d8..977e98eadbeb 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -443,7 +443,7 @@ static int lcc_ipq806x_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
/* Configure the rate of PLL4 if the bootloader hasn't already */
- val = regmap_read(regmap, 0x0, &val);
+ regmap_read(regmap, 0x0, &val);
if (!val)
clk_pll_configure_sr(&pll4, regmap, &pll4_config, true);
/* Enable PLL4 source on the LPASS Primary PLL Mux */
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index ca97e1151797..9b97246287a7 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -2945,6 +2945,7 @@ static struct gdsc venus_core0_gdsc = {
.name = "venus_core0",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc venus_core1_gdsc = {
@@ -2955,6 +2956,7 @@ static struct gdsc venus_core1_gdsc = {
.name = "venus_core1",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc camss_gdsc = {
@@ -3034,6 +3036,28 @@ static struct gdsc mdss_gdsc = {
.pwrsts = PWRSTS_OFF_ON,
};
+static struct gdsc gpu_gdsc = {
+ .gdscr = 0x4034,
+ .gds_hw_ctrl = 0x4038,
+ .pd = {
+ .name = "gpu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x4024,
+ .clamp_io_ctrl = 0x4300,
+ .cxcs = (unsigned int []){ 0x4028 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "gpu_gx",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
static struct clk_regmap *mmcc_msm8996_clocks[] = {
[MMPLL0_EARLY] = &mmpll0_early.clkr,
[MMPLL0_PLL] = &mmpll0.clkr,
@@ -3223,6 +3247,8 @@ static struct gdsc *mmcc_msm8996_gdscs[] = {
[CPP_GDSC] = &cpp_gdsc,
[FD_GDSC] = &fd_gdsc,
[MDSS_GDSC] = &mdss_gdsc,
+ [GPU_GDSC] = &gpu_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
};
static const struct qcom_reset_map mmcc_msm8996_resets[] = {
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 41a12d376799..2586dfa0026b 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -1,5 +1,7 @@
config CLK_RENESAS_CPG_MSSR
bool
+ default y if ARCH_R8A7743
+ default y if ARCH_R8A7745
default y if ARCH_R8A7795
default y if ARCH_R8A7796
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 90dd0db7d9c6..1072f7653c0c 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -2,6 +2,8 @@ obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o
obj-$(CONFIG_ARCH_R8A73A4) += clk-r8a73a4.o clk-div6.o
obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7743) += r8a7743-cpg-mssr.o rcar-gen2-cpg.o
+obj-$(CONFIG_ARCH_R8A7745) += r8a7745-cpg-mssr.o rcar-gen2-cpg.o
obj-$(CONFIG_ARCH_R8A7778) += clk-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o
obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o clk-div6.o
diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index 40e3a501a50e..886a8380e912 100644
--- a/drivers/clk/renesas/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -12,6 +12,7 @@
#include <linux/clk/renesas.h>
#include <linux/of_address.h>
#include <linux/slab.h>
+#include <linux/soc/renesas/rcar-rst.h>
struct r8a7778_cpg {
struct clk_onecell_data data;
@@ -83,6 +84,18 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
struct clk **clks;
unsigned int i;
int num_clks;
+ u32 mode;
+
+ if (rcar_rst_read_mode_pins(&mode))
+ return;
+
+ BUG_ON(!(mode & BIT(19)));
+
+ cpg_mode_rates = (!!(mode & BIT(18)) << 2) |
+ (!!(mode & BIT(12)) << 1) |
+ (!!(mode & BIT(11)));
+ cpg_mode_divs = (!!(mode & BIT(2)) << 1) |
+ (!!(mode & BIT(1)));
num_clks = of_property_count_strings(np, "clock-output-names");
if (num_clks < 0) {
@@ -130,16 +143,3 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
CLK_OF_DECLARE(r8a7778_cpg_clks, "renesas,r8a7778-cpg-clocks",
r8a7778_cpg_clocks_init);
-
-void __init r8a7778_clocks_init(u32 mode)
-{
- BUG_ON(!(mode & BIT(19)));
-
- cpg_mode_rates = (!!(mode & BIT(18)) << 2) |
- (!!(mode & BIT(12)) << 1) |
- (!!(mode & BIT(11)));
- cpg_mode_divs = (!!(mode & BIT(2)) << 1) |
- (!!(mode & BIT(1)));
-
- of_clk_init(NULL);
-}
diff --git a/drivers/clk/renesas/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c
index cf2a37df03b1..27fbfafaf2cd 100644
--- a/drivers/clk/renesas/clk-r8a7779.c
+++ b/drivers/clk/renesas/clk-r8a7779.c
@@ -18,6 +18,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a7779-clock.h>
@@ -88,8 +89,6 @@ static const unsigned int cpg_plla_mult[4] __initconst = { 42, 48, 56, 64 };
* Initialization
*/
-static u32 cpg_mode __initdata;
-
static struct clk * __init
r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg,
const struct cpg_clk_config *config,
@@ -127,6 +126,10 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
struct clk **clks;
unsigned int i, plla_mult;
int num_clks;
+ u32 mode;
+
+ if (rcar_rst_read_mode_pins(&mode))
+ return;
num_clks = of_property_count_strings(np, "clock-output-names");
if (num_clks < 0) {
@@ -148,8 +151,8 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- config = &cpg_clk_configs[CPG_CLK_CONFIG_INDEX(cpg_mode)];
- plla_mult = cpg_plla_mult[CPG_PLLA_MULT_INDEX(cpg_mode)];
+ config = &cpg_clk_configs[CPG_CLK_CONFIG_INDEX(mode)];
+ plla_mult = cpg_plla_mult[CPG_PLLA_MULT_INDEX(mode)];
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -173,10 +176,3 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
}
CLK_OF_DECLARE(r8a7779_cpg_clks, "renesas,r8a7779-cpg-clocks",
r8a7779_cpg_clocks_init);
-
-void __init r8a7779_clocks_init(u32 mode)
-{
- cpg_mode = mode;
-
- of_clk_init(NULL);
-}
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 00e6aba4b9c0..f39519edc645 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -19,6 +19,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/soc/renesas/rcar-rst.h>
struct rcar_gen2_cpg {
struct clk_onecell_data data;
@@ -364,6 +365,23 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
4, 0, table, &cpg->lock);
}
+/*
+ * Reset register definitions.
+ */
+#define MODEMR 0xe6160060
+
+static u32 __init rcar_gen2_read_mode_pins(void)
+{
+ void __iomem *modemr = ioremap_nocache(MODEMR, 4);
+ u32 mode;
+
+ BUG_ON(!modemr);
+ mode = ioread32(modemr);
+ iounmap(modemr);
+
+ return mode;
+}
+
static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
{
const struct cpg_pll_config *config;
@@ -372,6 +390,13 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
unsigned int i;
int num_clks;
+ if (rcar_rst_read_mode_pins(&cpg_mode)) {
+ /* Backward-compatibility with old DT */
+ pr_warn("%s: failed to obtain mode pins from RST\n",
+ np->full_name);
+ cpg_mode = rcar_gen2_read_mode_pins();
+ }
+
num_clks = of_property_count_strings(np, "clock-output-names");
if (num_clks < 0) {
pr_err("%s: failed to count clocks\n", __func__);
@@ -420,10 +445,3 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
}
CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks",
rcar_gen2_cpg_clocks_init);
-
-void __init rcar_gen2_clocks_init(u32 mode)
-{
- cpg_mode = mode;
-
- of_clk_init(NULL);
-}
diff --git a/drivers/clk/renesas/r8a7743-cpg-mssr.c b/drivers/clk/renesas/r8a7743-cpg-mssr.c
new file mode 100644
index 000000000000..6dc0b3082aa6
--- /dev/null
+++ b/drivers/clk/renesas/r8a7743-cpg-mssr.c
@@ -0,0 +1,270 @@
+/*
+ * r8a7743 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation; of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7743-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7743_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7743_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("z", R8A7743_CLK_Z, CLK_TYPE_GEN2_Z, CLK_PLL0),
+ DEF_BASE("lb", R8A7743_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("sdh", R8A7743_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A7743_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("qspi", R8A7743_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A7743_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("zg", R8A7743_CLK_ZG, CLK_PLL1, 3, 1),
+ DEF_FIXED("zx", R8A7743_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7743_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7743_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("b", R8A7743_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7743_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7743_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7743_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("zb3", R8A7743_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7743_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7743_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("mp", R8A7743_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cp", R8A7743_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A7743_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7743_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A7743_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+ DEF_DIV6P1("sd3", R8A7743_CLK_SD3, CLK_PLL1_DIV2, 0x26c),
+ DEF_DIV6P1("mmc0", R8A7743_CLK_MMC0, CLK_PLL1_DIV2, 0x240),
+};
+
+static const struct mssr_mod_clk r8a7743_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7743_CLK_MP),
+ DEF_MOD("vcp0", 101, R8A7743_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A7743_CLK_ZS),
+ DEF_MOD("tmu1", 111, R8A7743_CLK_P),
+ DEF_MOD("3dg", 112, R8A7743_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7743_CLK_ZS),
+ DEF_MOD("fdp1-1", 118, R8A7743_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A7743_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7743_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7743_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7743_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7743_CLK_CP),
+ DEF_MOD("vsp1du1", 127, R8A7743_CLK_ZS),
+ DEF_MOD("vsp1du0", 128, R8A7743_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7743_CLK_ZS),
+ DEF_MOD("scifa2", 202, R8A7743_CLK_MP),
+ DEF_MOD("scifa1", 203, R8A7743_CLK_MP),
+ DEF_MOD("scifa0", 204, R8A7743_CLK_MP),
+ DEF_MOD("msiof2", 205, R8A7743_CLK_MP),
+ DEF_MOD("scifb0", 206, R8A7743_CLK_MP),
+ DEF_MOD("scifb1", 207, R8A7743_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A7743_CLK_MP),
+ DEF_MOD("scifb2", 216, R8A7743_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7743_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7743_CLK_ZS),
+ DEF_MOD("tpu0", 304, R8A7743_CLK_CP),
+ DEF_MOD("sdhi3", 311, R8A7743_CLK_SD3),
+ DEF_MOD("sdhi2", 312, R8A7743_CLK_SD2),
+ DEF_MOD("sdhi0", 314, R8A7743_CLK_SD0),
+ DEF_MOD("mmcif0", 315, R8A7743_CLK_MMC0),
+ DEF_MOD("iic0", 318, R8A7743_CLK_HP),
+ DEF_MOD("pciec", 319, R8A7743_CLK_MP),
+ DEF_MOD("iic1", 323, R8A7743_CLK_HP),
+ DEF_MOD("usb3.0", 328, R8A7743_CLK_MP),
+ DEF_MOD("cmt1", 329, R8A7743_CLK_R),
+ DEF_MOD("usbhs-dmac0", 330, R8A7743_CLK_HP),
+ DEF_MOD("usbhs-dmac1", 331, R8A7743_CLK_HP),
+ DEF_MOD("irqc", 407, R8A7743_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7743_CLK_ZS),
+ DEF_MOD("audio-dmac1", 501, R8A7743_CLK_HP),
+ DEF_MOD("audio-dmac0", 502, R8A7743_CLK_HP),
+ DEF_MOD("thermal", 522, CLK_EXTAL),
+ DEF_MOD("pwm", 523, R8A7743_CLK_P),
+ DEF_MOD("usb-ehci", 703, R8A7743_CLK_MP),
+ DEF_MOD("usbhs", 704, R8A7743_CLK_HP),
+ DEF_MOD("hscif2", 713, R8A7743_CLK_ZS),
+ DEF_MOD("scif5", 714, R8A7743_CLK_P),
+ DEF_MOD("scif4", 715, R8A7743_CLK_P),
+ DEF_MOD("hscif1", 716, R8A7743_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7743_CLK_ZS),
+ DEF_MOD("scif3", 718, R8A7743_CLK_P),
+ DEF_MOD("scif2", 719, R8A7743_CLK_P),
+ DEF_MOD("scif1", 720, R8A7743_CLK_P),
+ DEF_MOD("scif0", 721, R8A7743_CLK_P),
+ DEF_MOD("du1", 723, R8A7743_CLK_ZX),
+ DEF_MOD("du0", 724, R8A7743_CLK_ZX),
+ DEF_MOD("lvds0", 726, R8A7743_CLK_ZX),
+ DEF_MOD("ipmmu-sgx", 800, R8A7743_CLK_ZX),
+ DEF_MOD("vin2", 809, R8A7743_CLK_ZG),
+ DEF_MOD("vin1", 810, R8A7743_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7743_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7743_CLK_HP),
+ DEF_MOD("ether", 813, R8A7743_CLK_P),
+ DEF_MOD("sata1", 814, R8A7743_CLK_ZS),
+ DEF_MOD("sata0", 815, R8A7743_CLK_ZS),
+ DEF_MOD("gpio7", 904, R8A7743_CLK_CP),
+ DEF_MOD("gpio6", 905, R8A7743_CLK_CP),
+ DEF_MOD("gpio5", 907, R8A7743_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7743_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7743_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7743_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7743_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7743_CLK_CP),
+ DEF_MOD("can1", 915, R8A7743_CLK_P),
+ DEF_MOD("can0", 916, R8A7743_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7743_CLK_QSPI),
+ DEF_MOD("i2c5", 925, R8A7743_CLK_HP),
+ DEF_MOD("iicdvfs", 926, R8A7743_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A7743_CLK_HP),
+ DEF_MOD("i2c3", 928, R8A7743_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7743_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7743_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7743_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7743_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7743_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+ DEF_MOD("scifa3", 1106, R8A7743_CLK_MP),
+ DEF_MOD("scifa4", 1107, R8A7743_CLK_MP),
+ DEF_MOD("scifa5", 1108, R8A7743_CLK_MP),
+};
+
+static const unsigned int r8a7743_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *1
+ *---------------------------------------------------
+ * 0 0 0 15 x172/2 x208/2 x106
+ * 0 0 1 15 x172/2 x208/2 x88
+ * 0 1 0 20 x130/2 x156/2 x80
+ * 0 1 1 20 x130/2 x156/2 x66
+ * 1 0 0 26 / 2 x200/2 x240/2 x122
+ * 1 0 1 26 / 2 x200/2 x240/2 x102
+ * 1 1 0 30 / 2 x172/2 x208/2 x106
+ * 1 1 1 30 / 2 x172/2 x208/2 x88
+ *
+ * *1 : Table 7.5a indicates VCO output (PLLx = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ /* EXTAL div PLL1 mult PLL3 mult */
+ { 1, 208, 106, },
+ { 1, 208, 88, },
+ { 1, 156, 80, },
+ { 1, 156, 66, },
+ { 2, 240, 122, },
+ { 2, 240, 102, },
+ { 2, 208, 106, },
+ { 2, 208, 88, },
+};
+
+static int __init r8a7743_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7743_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7743_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7743_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7743_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7743_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7743_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7743_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7743_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
new file mode 100644
index 000000000000..2f15ba786c3b
--- /dev/null
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -0,0 +1,259 @@
+/*
+ * r8a7745 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation; of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7745-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7745_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7745_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("lb", R8A7745_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("sdh", R8A7745_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A7745_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("qspi", R8A7745_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A7745_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("z2", R8A7745_CLK_Z2, CLK_PLL0, 1, 1),
+ DEF_FIXED("zg", R8A7745_CLK_ZG, CLK_PLL1, 6, 1),
+ DEF_FIXED("zx", R8A7745_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7745_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7745_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("b", R8A7745_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7745_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7745_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cp", R8A7745_CLK_CP, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7745_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("zb3", R8A7745_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7745_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7745_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("mp", R8A7745_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cpex", R8A7745_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A7745_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7745_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A7745_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+ DEF_DIV6P1("sd3", R8A7745_CLK_SD3, CLK_PLL1_DIV2, 0x26c),
+ DEF_DIV6P1("mmc0", R8A7745_CLK_MMC0, CLK_PLL1_DIV2, 0x240),
+};
+
+static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7745_CLK_MP),
+ DEF_MOD("vcp0", 101, R8A7745_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A7745_CLK_ZS),
+ DEF_MOD("tmu1", 111, R8A7745_CLK_P),
+ DEF_MOD("3dg", 112, R8A7745_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7745_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A7745_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7745_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7745_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7745_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7745_CLK_CP),
+ DEF_MOD("vsp1du0", 128, R8A7745_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7745_CLK_ZS),
+ DEF_MOD("scifa2", 202, R8A7745_CLK_MP),
+ DEF_MOD("scifa1", 203, R8A7745_CLK_MP),
+ DEF_MOD("scifa0", 204, R8A7745_CLK_MP),
+ DEF_MOD("msiof2", 205, R8A7745_CLK_MP),
+ DEF_MOD("scifb0", 206, R8A7745_CLK_MP),
+ DEF_MOD("scifb1", 207, R8A7745_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A7745_CLK_MP),
+ DEF_MOD("scifb2", 216, R8A7745_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7745_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7745_CLK_ZS),
+ DEF_MOD("tpu0", 304, R8A7745_CLK_CP),
+ DEF_MOD("sdhi3", 311, R8A7745_CLK_SD3),
+ DEF_MOD("sdhi2", 312, R8A7745_CLK_SD2),
+ DEF_MOD("sdhi0", 314, R8A7745_CLK_SD0),
+ DEF_MOD("mmcif0", 315, R8A7745_CLK_MMC0),
+ DEF_MOD("iic0", 318, R8A7745_CLK_HP),
+ DEF_MOD("iic1", 323, R8A7745_CLK_HP),
+ DEF_MOD("cmt1", 329, R8A7745_CLK_R),
+ DEF_MOD("usbhs-dmac0", 330, R8A7745_CLK_HP),
+ DEF_MOD("usbhs-dmac1", 331, R8A7745_CLK_HP),
+ DEF_MOD("irqc", 407, R8A7745_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7745_CLK_ZS),
+ DEF_MOD("audio-dmac0", 502, R8A7745_CLK_HP),
+ DEF_MOD("pwm", 523, R8A7745_CLK_P),
+ DEF_MOD("usb-ehci", 703, R8A7745_CLK_MP),
+ DEF_MOD("usbhs", 704, R8A7745_CLK_HP),
+ DEF_MOD("hscif2", 713, R8A7745_CLK_ZS),
+ DEF_MOD("scif5", 714, R8A7745_CLK_P),
+ DEF_MOD("scif4", 715, R8A7745_CLK_P),
+ DEF_MOD("hscif1", 716, R8A7745_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7745_CLK_ZS),
+ DEF_MOD("scif3", 718, R8A7745_CLK_P),
+ DEF_MOD("scif2", 719, R8A7745_CLK_P),
+ DEF_MOD("scif1", 720, R8A7745_CLK_P),
+ DEF_MOD("scif0", 721, R8A7745_CLK_P),
+ DEF_MOD("du0", 724, R8A7745_CLK_ZX),
+ DEF_MOD("ipmmu-sgx", 800, R8A7745_CLK_ZX),
+ DEF_MOD("vin1", 810, R8A7745_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7745_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7745_CLK_HP),
+ DEF_MOD("ether", 813, R8A7745_CLK_P),
+ DEF_MOD("gpio6", 905, R8A7745_CLK_CP),
+ DEF_MOD("gpio5", 907, R8A7745_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7745_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7745_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7745_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7745_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7745_CLK_CP),
+ DEF_MOD("can1", 915, R8A7745_CLK_P),
+ DEF_MOD("can0", 916, R8A7745_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7745_CLK_QSPI),
+ DEF_MOD("i2c5", 925, R8A7745_CLK_HP),
+ DEF_MOD("i2c4", 927, R8A7745_CLK_HP),
+ DEF_MOD("i2c3", 928, R8A7745_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7745_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7745_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7745_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7745_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7745_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+ DEF_MOD("scifa3", 1106, R8A7745_CLK_MP),
+ DEF_MOD("scifa4", 1107, R8A7745_CLK_MP),
+ DEF_MOD("scifa5", 1108, R8A7745_CLK_MP),
+};
+
+static const unsigned int r8a7745_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *2
+ *---------------------------------------------------
+ * 0 0 0 15 x200/3 x208/2 x106
+ * 0 0 1 15 x200/3 x208/2 x88
+ * 0 1 0 20 x150/3 x156/2 x80
+ * 0 1 1 20 x150/3 x156/2 x66
+ * 1 0 0 26 / 2 x230/3 x240/2 x122
+ * 1 0 1 26 / 2 x230/3 x240/2 x102
+ * 1 1 0 30 / 2 x200/3 x208/2 x106
+ * 1 1 1 30 / 2 x200/3 x208/2 x88
+ *
+ * *1 : Table 7.5b indicates VCO output (PLL0 = VCO/3)
+ * *2 : Table 7.5b indicates VCO output (PLL1 = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ /* EXTAL div PLL1 mult PLL3 mult PLL0 mult */
+ { 1, 208, 106, 200 },
+ { 1, 208, 88, 200 },
+ { 1, 156, 80, 150 },
+ { 1, 156, 66, 150 },
+ { 2, 240, 122, 230 },
+ { 2, 240, 102, 230 },
+ { 2, 208, 106, 200 },
+ { 2, 208, 88, 200 },
+};
+
+static int __init r8a7745_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 3, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7745_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7745_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7745_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7745_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7745_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7745_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7745_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7745_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index f255e451e8ca..50698a7d9074 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a7795-cpg-mssr.h>
@@ -97,7 +98,7 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014),
- DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV2, 0x250),
+ DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("csi0", R8A7795_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
@@ -311,7 +312,12 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
static int __init r8a7795_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
- u32 cpg_mode = rcar_gen3_read_mode_pins();
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
if (!cpg_pll_config->extal_div) {
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index eb347ed265f2..7d298c57a3e0 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a7796-cpg-mssr.h>
@@ -102,6 +103,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_DIV6P1("csi0", R8A7796_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+
DEF_DIV6_RO("osc", R8A7796_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
@@ -109,6 +112,14 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
+ DEF_MOD("scif5", 202, R8A7796_CLK_S3D4),
+ DEF_MOD("scif4", 203, R8A7796_CLK_S3D4),
+ DEF_MOD("scif3", 204, R8A7796_CLK_S3D4),
+ DEF_MOD("scif1", 206, R8A7796_CLK_S3D4),
+ DEF_MOD("scif0", 207, R8A7796_CLK_S3D4),
+ DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3),
+ DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3),
+ DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
DEF_MOD("cmt3", 300, R8A7796_CLK_R),
DEF_MOD("cmt2", 301, R8A7796_CLK_R),
DEF_MOD("cmt1", 302, R8A7796_CLK_R),
@@ -120,7 +131,47 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("sdif0", 314, R8A7796_CLK_SD0),
DEF_MOD("rwdt0", 402, R8A7796_CLK_R),
DEF_MOD("intc-ap", 408, R8A7796_CLK_S3D1),
+ DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A7796_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A7796_CLK_S3D2),
+ DEF_MOD("drif4", 511, R8A7796_CLK_S3D2),
+ DEF_MOD("drif3", 512, R8A7796_CLK_S3D2),
+ DEF_MOD("drif2", 513, R8A7796_CLK_S3D2),
+ DEF_MOD("drif1", 514, R8A7796_CLK_S3D2),
+ DEF_MOD("drif0", 515, R8A7796_CLK_S3D2),
+ DEF_MOD("hscif4", 516, R8A7796_CLK_S3D1),
+ DEF_MOD("hscif3", 517, R8A7796_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A7796_CLK_S3D1),
+ DEF_MOD("hscif1", 519, R8A7796_CLK_S3D1),
+ DEF_MOD("hscif0", 520, R8A7796_CLK_S3D1),
DEF_MOD("thermal", 522, R8A7796_CLK_CP),
+ DEF_MOD("fcpvd2", 601, R8A7796_CLK_S0D2),
+ DEF_MOD("fcpvd1", 602, R8A7796_CLK_S0D2),
+ DEF_MOD("fcpvd0", 603, R8A7796_CLK_S0D2),
+ DEF_MOD("fcpvb0", 607, R8A7796_CLK_S0D1),
+ DEF_MOD("fcpvi0", 611, R8A7796_CLK_S0D1),
+ DEF_MOD("fcpf0", 615, R8A7796_CLK_S0D1),
+ DEF_MOD("fcpci0", 617, R8A7796_CLK_S0D2),
+ DEF_MOD("fcpcs", 619, R8A7796_CLK_S0D2),
+ DEF_MOD("vspd2", 621, R8A7796_CLK_S0D2),
+ DEF_MOD("vspd1", 622, R8A7796_CLK_S0D2),
+ DEF_MOD("vspd0", 623, R8A7796_CLK_S0D2),
+ DEF_MOD("vspb", 626, R8A7796_CLK_S0D1),
+ DEF_MOD("vspi0", 631, R8A7796_CLK_S0D1),
+ DEF_MOD("csi20", 714, R8A7796_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A7796_CLK_CSI0),
+ DEF_MOD("du2", 722, R8A7796_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A7796_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A7796_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A7796_CLK_S2D1),
+ DEF_MOD("vin7", 804, R8A7796_CLK_S0D2),
+ DEF_MOD("vin6", 805, R8A7796_CLK_S0D2),
+ DEF_MOD("vin5", 806, R8A7796_CLK_S0D2),
+ DEF_MOD("vin4", 807, R8A7796_CLK_S0D2),
+ DEF_MOD("vin3", 808, R8A7796_CLK_S0D2),
+ DEF_MOD("vin2", 809, R8A7796_CLK_S0D2),
+ DEF_MOD("vin1", 810, R8A7796_CLK_S0D2),
+ DEF_MOD("vin0", 811, R8A7796_CLK_S0D2),
DEF_MOD("etheravb", 812, R8A7796_CLK_S0D6),
DEF_MOD("gpio7", 905, R8A7796_CLK_S3D4),
DEF_MOD("gpio6", 906, R8A7796_CLK_S3D4),
@@ -130,6 +181,13 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("gpio2", 910, R8A7796_CLK_S3D4),
DEF_MOD("gpio1", 911, R8A7796_CLK_S3D4),
DEF_MOD("gpio0", 912, R8A7796_CLK_S3D4),
+ DEF_MOD("i2c6", 918, R8A7796_CLK_S0D6),
+ DEF_MOD("i2c5", 919, R8A7796_CLK_S0D6),
+ DEF_MOD("i2c4", 927, R8A7796_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A7796_CLK_S0D6),
+ DEF_MOD("i2c2", 929, R8A7796_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A7796_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A7796_CLK_S3D2),
};
static const unsigned int r8a7796_crit_mod_clks[] __initconst = {
@@ -190,7 +248,12 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
static int __init r8a7796_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
- u32 cpg_mode = rcar_gen3_read_mode_pins();
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
if (!cpg_pll_config->extal_div) {
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
new file mode 100644
index 000000000000..123b1e622179
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -0,0 +1,371 @@
+/*
+ * R-Car Gen2 Clock Pulse Generator
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+#define CPG_FRQCRB 0x0004
+#define CPG_FRQCRB_KICK BIT(31)
+#define CPG_SDCKCR 0x0074
+#define CPG_PLL0CR 0x00d8
+#define CPG_PLL0CR_STC_SHIFT 24
+#define CPG_PLL0CR_STC_MASK (0x7f << CPG_PLL0CR_STC_SHIFT)
+#define CPG_FRQCRC 0x00e0
+#define CPG_FRQCRC_ZFC_SHIFT 8
+#define CPG_FRQCRC_ZFC_MASK (0x1f << CPG_FRQCRC_ZFC_SHIFT)
+#define CPG_ADSPCKCR 0x025c
+#define CPG_RCANCKCR 0x0270
+
+static spinlock_t cpg_lock;
+
+/*
+ * Z Clock
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable. clk->rate = parent->rate * mult / 32
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ void __iomem *kick_reg;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int val;
+
+ val = (readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK) >> CPG_FRQCRC_ZFC_SHIFT;
+ mult = 32 - val;
+
+ return div_u64((u64)parent_rate * mult, 32);
+}
+
+static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long prate = *parent_rate;
+ unsigned int mult;
+
+ if (!prate)
+ prate = 1;
+
+ mult = div_u64((u64)rate * 32, prate);
+ mult = clamp(mult, 1U, 32U);
+
+ return *parent_rate / 32 * mult;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val, kick;
+ unsigned int i;
+
+ mult = div_u64((u64)rate * 32, parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
+ val = readl(zclk->reg);
+ val &= ~CPG_FRQCRC_ZFC_MASK;
+ val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
+ writel(val, zclk->reg);
+
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ kick = readl(zclk->kick_reg);
+ kick |= CPG_FRQCRB_KICK;
+ writel(kick, zclk->kick_reg);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependent on external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .round_rate = cpg_z_clk_round_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *base)
+{
+ struct clk_init_data init;
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_z_clk_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = base + CPG_FRQCRC;
+ zclk->kick_reg = base + CPG_FRQCRB;
+ zclk->hw.init = &init;
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk))
+ kfree(zclk);
+
+ return clk;
+}
+
+static struct clk * __init cpg_rcan_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *base)
+{
+ struct clk_fixed_factor *fixed;
+ struct clk_gate *gate;
+ struct clk *clk;
+
+ fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return ERR_PTR(-ENOMEM);
+
+ fixed->mult = 1;
+ fixed->div = 6;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ kfree(fixed);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gate->reg = base + CPG_RCANCKCR;
+ gate->bit_idx = 8;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
+ gate->lock = &cpg_lock;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &fixed->hw, &clk_fixed_factor_ops,
+ &gate->hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk)) {
+ kfree(gate);
+ kfree(fixed);
+ }
+
+ return clk;
+}
+
+/* ADSP divisors */
+static const struct clk_div_table cpg_adsp_div_table[] = {
+ { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 },
+ { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
+ { 10, 36 }, { 11, 48 }, { 0, 0 },
+};
+
+static struct clk * __init cpg_adsp_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *base)
+{
+ struct clk_divider *div;
+ struct clk_gate *gate;
+ struct clk *clk;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ div->reg = base + CPG_ADSPCKCR;
+ div->width = 4;
+ div->table = cpg_adsp_div_table;
+ div->lock = &cpg_lock;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ kfree(div);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gate->reg = base + CPG_ADSPCKCR;
+ gate->bit_idx = 8;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
+ gate->lock = &cpg_lock;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &div->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk)) {
+ kfree(gate);
+ kfree(div);
+ }
+
+ return clk;
+}
+
+/* SDHI divisors */
+static const struct clk_div_table cpg_sdh_div_table[] = {
+ { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 },
+ { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
+ { 8, 24 }, { 10, 36 }, { 11, 48 }, { 0, 0 },
+};
+
+static const struct clk_div_table cpg_sd01_div_table[] = {
+ { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
+ { 8, 24 }, { 10, 36 }, { 11, 48 }, { 12, 10 },
+ { 0, 0 },
+};
+
+static const struct rcar_gen2_cpg_pll_config *cpg_pll_config __initdata;
+static unsigned int cpg_pll0_div __initdata;
+static u32 cpg_mode __initdata;
+
+struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ const struct cpg_mssr_info *info,
+ struct clk **clks,
+ void __iomem *base)
+{
+ const struct clk_div_table *table = NULL;
+ const struct clk *parent;
+ const char *parent_name;
+ unsigned int mult = 1;
+ unsigned int div = 1;
+ unsigned int shift;
+
+ parent = clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ switch (core->type) {
+ /* R-Car Gen2 */
+ case CLK_TYPE_GEN2_MAIN:
+ div = cpg_pll_config->extal_div;
+ break;
+
+ case CLK_TYPE_GEN2_PLL0:
+ /*
+ * PLL0 is a configurable multiplier clock except on R-Car
+ * V2H/E2. Register the PLL0 clock as a fixed factor clock for
+ * now as there's no generic multiplier clock implementation and
+ * we currently have no need to change the multiplier value.
+ */
+ mult = cpg_pll_config->pll0_mult;
+ div = cpg_pll0_div;
+ if (!mult) {
+ u32 pll0cr = readl(base + CPG_PLL0CR);
+
+ mult = (((pll0cr & CPG_PLL0CR_STC_MASK) >>
+ CPG_PLL0CR_STC_SHIFT) + 1) * 2;
+ }
+ break;
+
+ case CLK_TYPE_GEN2_PLL1:
+ mult = cpg_pll_config->pll1_mult / 2;
+ break;
+
+ case CLK_TYPE_GEN2_PLL3:
+ mult = cpg_pll_config->pll3_mult;
+ break;
+
+ case CLK_TYPE_GEN2_Z:
+ return cpg_z_clk_register(core->name, parent_name, base);
+
+ case CLK_TYPE_GEN2_LB:
+ div = cpg_mode & BIT(18) ? 36 : 24;
+ break;
+
+ case CLK_TYPE_GEN2_ADSP:
+ return cpg_adsp_clk_register(core->name, parent_name, base);
+
+ case CLK_TYPE_GEN2_SDH:
+ table = cpg_sdh_div_table;
+ shift = 8;
+ break;
+
+ case CLK_TYPE_GEN2_SD0:
+ table = cpg_sd01_div_table;
+ shift = 4;
+ break;
+
+ case CLK_TYPE_GEN2_SD1:
+ table = cpg_sd01_div_table;
+ shift = 0;
+ break;
+
+ case CLK_TYPE_GEN2_QSPI:
+ div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) ?
+ 8 : 10;
+ break;
+
+ case CLK_TYPE_GEN2_RCAN:
+ return cpg_rcan_clk_register(core->name, parent_name, base);
+
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!table)
+ return clk_register_fixed_factor(NULL, core->name, parent_name,
+ 0, mult, div);
+ else
+ return clk_register_divider_table(NULL, core->name,
+ parent_name, 0,
+ base + CPG_SDCKCR, shift, 4,
+ 0, table, &cpg_lock);
+}
+
+int __init rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
+ unsigned int pll0_div, u32 mode)
+{
+ cpg_pll_config = config;
+ cpg_pll0_div = pll0_div;
+ cpg_mode = mode;
+
+ spin_lock_init(&cpg_lock);
+
+ return 0;
+}
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
new file mode 100644
index 000000000000..9eba07ff8b11
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -0,0 +1,43 @@
+/*
+ * R-Car Gen2 Clock Pulse Generator
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __CLK_RENESAS_RCAR_GEN2_CPG_H__
+#define __CLK_RENESAS_RCAR_GEN2_CPG_H__
+
+enum rcar_gen2_clk_types {
+ CLK_TYPE_GEN2_MAIN = CLK_TYPE_CUSTOM,
+ CLK_TYPE_GEN2_PLL0,
+ CLK_TYPE_GEN2_PLL1,
+ CLK_TYPE_GEN2_PLL3,
+ CLK_TYPE_GEN2_Z,
+ CLK_TYPE_GEN2_LB,
+ CLK_TYPE_GEN2_ADSP,
+ CLK_TYPE_GEN2_SDH,
+ CLK_TYPE_GEN2_SD0,
+ CLK_TYPE_GEN2_SD1,
+ CLK_TYPE_GEN2_QSPI,
+ CLK_TYPE_GEN2_RCAN,
+};
+
+struct rcar_gen2_cpg_pll_config {
+ unsigned int extal_div;
+ unsigned int pll1_mult;
+ unsigned int pll3_mult;
+ unsigned int pll0_mult; /* leave as zero if PLL0CR exists */
+};
+
+struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base);
+int rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
+ unsigned int pll0_div, u32 mode);
+
+#endif
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index bb4f2f9a8c2f..742f6dc7c156 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -98,7 +98,7 @@ static int cpg_sd_clock_enable(struct clk_hw *hw)
u32 val, sd_fc;
unsigned int i;
- val = clk_readl(clock->reg);
+ val = readl(clock->reg);
sd_fc = val & CPG_SD_FC_MASK;
for (i = 0; i < clock->div_num; i++)
@@ -111,7 +111,7 @@ static int cpg_sd_clock_enable(struct clk_hw *hw)
val &= ~(CPG_SD_STP_MASK);
val |= clock->div_table[i].val & CPG_SD_STP_MASK;
- clk_writel(val, clock->reg);
+ writel(val, clock->reg);
return 0;
}
@@ -120,14 +120,14 @@ static void cpg_sd_clock_disable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- clk_writel(clk_readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
+ writel(readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
}
static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- return !(clk_readl(clock->reg) & CPG_SD_STP_MASK);
+ return !(readl(clock->reg) & CPG_SD_STP_MASK);
}
static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
@@ -138,7 +138,7 @@ static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
u32 val, sd_fc;
unsigned int i;
- val = clk_readl(clock->reg);
+ val = readl(clock->reg);
sd_fc = val & CPG_SD_FC_MASK;
for (i = 0; i < clock->div_num; i++)
@@ -189,10 +189,10 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
if (i >= clock->div_num)
return -EINVAL;
- val = clk_readl(clock->reg);
+ val = readl(clock->reg);
val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- clk_writel(val, clock->reg);
+ writel(val, clock->reg);
return 0;
}
@@ -333,23 +333,6 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
__clk_get_name(parent), 0, mult, div);
}
-/*
- * Reset register definitions.
- */
-#define MODEMR 0xe6160060
-
-u32 __init rcar_gen3_read_mode_pins(void)
-{
- void __iomem *modemr = ioremap_nocache(MODEMR, 4);
- u32 mode;
-
- BUG_ON(!modemr);
- mode = ioread32(modemr);
- iounmap(modemr);
-
- return mode;
-}
-
int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
unsigned int clk_extalr)
{
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index f699085147d1..f788f481dd42 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -33,7 +33,6 @@ struct rcar_gen3_cpg_pll_config {
#define CPG_RCKCR 0x240
-u32 rcar_gen3_read_mode_pins(void);
struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
struct clk **clks, void __iomem *base);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index e1365e7491ae..8359ce75db7a 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -33,9 +33,9 @@
#include "clk-div6.h"
#ifdef DEBUG
-#define WARN_DEBUG(x) do { } while (0)
-#else
#define WARN_DEBUG(x) WARN_ON(x)
+#else
+#define WARN_DEBUG(x) do { } while (0)
#endif
@@ -146,12 +146,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
enable ? "ON" : "OFF");
spin_lock_irqsave(&priv->mstp_lock, flags);
- value = clk_readl(priv->base + SMSTPCR(reg));
+ value = readl(priv->base + SMSTPCR(reg));
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- clk_writel(value, priv->base + SMSTPCR(reg));
+ writel(value, priv->base + SMSTPCR(reg));
spin_unlock_irqrestore(&priv->mstp_lock, flags);
@@ -159,8 +159,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
return 0;
for (i = 1000; i > 0; --i) {
- if (!(clk_readl(priv->base + MSTPSR(reg)) &
- bitmask))
+ if (!(readl(priv->base + MSTPSR(reg)) & bitmask))
break;
cpu_relax();
}
@@ -190,7 +189,7 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
struct cpg_mssr_priv *priv = clock->priv;
u32 value;
- value = clk_readl(priv->base + MSTPSR(clock->index / 32));
+ value = readl(priv->base + MSTPSR(clock->index / 32));
return !(value & BIT(clock->index % 32));
}
@@ -309,7 +308,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
return;
fail:
- dev_err(dev, "Failed to register %s clock %s: %ld\n", "core,",
+ dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
core->name, PTR_ERR(clk));
}
@@ -377,7 +376,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
return;
fail:
- dev_err(dev, "Failed to register %s clock %s: %ld\n", "module,",
+ dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
mod->name, PTR_ERR(clk));
kfree(clock);
}
@@ -503,6 +502,18 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
}
static const struct of_device_id cpg_mssr_match[] = {
+#ifdef CONFIG_ARCH_R8A7743
+ {
+ .compatible = "renesas,r8a7743-cpg-mssr",
+ .data = &r8a7743_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ {
+ .compatible = "renesas,r8a7745-cpg-mssr",
+ .data = &r8a7745_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_ARCH_R8A7795
{
.compatible = "renesas,r8a7795-cpg-mssr",
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index ee7edfaf1408..4bb7a80c6469 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -130,6 +130,8 @@ struct cpg_mssr_info {
struct clk **clks, void __iomem *base);
};
+extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a7745_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
#endif
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index b5f2c8ed12e1..16e098c36f90 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -11,6 +11,7 @@ obj-y += clk-mmc-phase.o
obj-y += clk-ddr.o
obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
+obj-y += clk-rk1108.o
obj-y += clk-rk3036.o
obj-y += clk-rk3188.o
obj-y += clk-rk3228.o
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 05b3d73bfefa..0e09684d43a5 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -124,9 +124,18 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
struct clk_notifier_data *ndata)
{
const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
+ const struct rockchip_cpuclk_rate_table *rate;
unsigned long alt_prate, alt_div;
unsigned long flags;
+ /* check validity of the new rate */
+ rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for cpuclk\n",
+ __func__, ndata->new_rate);
+ return -EINVAL;
+ }
+
alt_prate = clk_get_rate(cpuclk->alt_parent);
spin_lock_irqsave(cpuclk->lock, flags);
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 8feba93672c5..e8075359366b 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -144,11 +144,8 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
ddrclk->ddr_flag = ddr_flag;
clk = clk_register(NULL, &ddrclk->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: could not register ddrclk %s\n", __func__, name);
+ if (IS_ERR(clk))
kfree(ddrclk);
- return NULL;
- }
return clk;
}
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 9c1373e81683..6ed605776abd 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -319,7 +319,8 @@ static void rockchip_rk3036_pll_init(struct clk_hw *hw)
if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 ||
rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 ||
- rate->dsmpd != cur.dsmpd || rate->frac != cur.frac) {
+ rate->dsmpd != cur.dsmpd ||
+ (!cur.dsmpd && (rate->frac != cur.frac))) {
struct clk *parent = clk_get_parent(hw->clk);
if (!parent) {
@@ -795,7 +796,8 @@ static void rockchip_rk3399_pll_init(struct clk_hw *hw)
if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 ||
rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 ||
- rate->dsmpd != cur.dsmpd || rate->frac != cur.frac) {
+ rate->dsmpd != cur.dsmpd ||
+ (!cur.dsmpd && (rate->frac != cur.frac))) {
struct clk *parent = clk_get_parent(hw->clk);
if (!parent) {
diff --git a/drivers/clk/rockchip/clk-rk1108.c b/drivers/clk/rockchip/clk-rk1108.c
new file mode 100644
index 000000000000..92750d798e5d
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk1108.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Andy Yan <andy.yan@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rk1108-cru.h>
+#include "clk.h"
+
+#define RK1108_GRF_SOC_STATUS0 0x480
+
+enum rk1108_plls {
+ apll, dpll, gpll,
+};
+
+static struct rockchip_pll_rate_table rk1108_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 984000000, 1, 82, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 936000000, 1, 78, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 900000000, 4, 300, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 888000000, 1, 74, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 864000000, 1, 72, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 840000000, 1, 70, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 800000000, 6, 400, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 700000000, 6, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 696000000, 1, 58, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 600000000, 1, 75, 3, 1, 1, 0),
+ RK3036_PLL_RATE( 594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 504000000, 1, 63, 3, 1, 1, 0),
+ RK3036_PLL_RATE( 500000000, 6, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE( 312000000, 1, 52, 2, 2, 1, 0),
+ RK3036_PLL_RATE( 216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE( 96000000, 1, 64, 4, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+#define RK1108_DIV_CORE_MASK 0xf
+#define RK1108_DIV_CORE_SHIFT 4
+
+#define RK1108_CLKSEL0(_core_peri_div) \
+ { \
+ .reg = RK1108_CLKSEL_CON(1), \
+ .val = HIWORD_UPDATE(_core_peri_div, RK1108_DIV_CORE_MASK,\
+ RK1108_DIV_CORE_SHIFT) \
+ }
+
+#define RK1108_CPUCLK_RATE(_prate, _core_peri_div) \
+ { \
+ .prate = _prate, \
+ .divs = { \
+ RK1108_CLKSEL0(_core_peri_div), \
+ }, \
+ }
+
+static struct rockchip_cpuclk_rate_table rk1108_cpuclk_rates[] __initdata = {
+ RK1108_CPUCLK_RATE(816000000, 4),
+ RK1108_CPUCLK_RATE(600000000, 4),
+ RK1108_CPUCLK_RATE(312000000, 4),
+};
+
+static const struct rockchip_cpuclk_reg_data rk1108_cpuclk_data = {
+ .core_reg = RK1108_CLKSEL_CON(0),
+ .div_core_shift = 0,
+ .div_core_mask = 0x1f,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 8,
+ .mux_core_mask = 0x1,
+};
+
+PNAME(mux_pll_p) = { "xin24m", "xin24m"};
+PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr", "apll_ddr" };
+PNAME(mux_armclk_p) = { "apll_core", "gpll_core", "dpll_core" };
+PNAME(mux_usb480m_pre_p) = { "usbphy", "xin24m" };
+PNAME(mux_hdmiphy_phy_p) = { "hdmiphy", "xin24m" };
+PNAME(mux_dclk_hdmiphy_pre_p) = { "dclk_hdmiphy_src_gpll", "dclk_hdmiphy_src_dpll" };
+PNAME(mux_pll_src_4plls_p) = { "dpll", "hdmiphy", "gpll", "usb480m" };
+PNAME(mux_pll_src_3plls_p) = { "apll", "gpll", "dpll" };
+PNAME(mux_pll_src_2plls_p) = { "dpll", "gpll" };
+PNAME(mux_pll_src_apll_gpll_p) = { "apll", "gpll" };
+PNAME(mux_aclk_peri_src_p) = { "aclk_peri_src_dpll", "aclk_peri_src_gpll" };
+PNAME(mux_aclk_bus_src_p) = { "aclk_bus_src_gpll", "aclk_bus_src_apll", "aclk_bus_src_dpll" };
+PNAME(mux_mmc_src_p) = { "dpll", "gpll", "xin24m", "usb480m" };
+PNAME(mux_pll_src_dpll_gpll_usb480m_p) = { "dpll", "gpll", "usb480m" };
+PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
+PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
+PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
+PNAME(mux_sclk_macphy_p) = { "sclk_macphy_pre", "ext_gmac" };
+PNAME(mux_i2s0_pre_p) = { "i2s0_src", "i2s0_frac", "ext_i2s", "xin12m" };
+PNAME(mux_i2s_out_p) = { "i2s0_pre", "xin12m" };
+PNAME(mux_i2s1_p) = { "i2s1_src", "i2s1_frac", "xin12m" };
+PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
+
+static struct rockchip_pll_clock rk1108_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3399, PLL_APLL, "apll", mux_pll_p, 0, RK1108_PLL_CON(0),
+ RK1108_PLL_CON(3), 8, 31, 0, rk1108_pll_rates),
+ [dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RK1108_PLL_CON(8),
+ RK1108_PLL_CON(11), 8, 31, 0, NULL),
+ [gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RK1108_PLL_CON(16),
+ RK1108_PLL_CON(19), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk1108_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK
+
+static struct rockchip_clk_branch rk1108_uart0_fracmux __initdata =
+ MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(13), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_uart1_fracmux __initdata =
+ MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(14), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_uart2_fracmux __initdata =
+ MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(15), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_i2s0_fracmux __initdata =
+ MUX(0, "i2s0_pre", mux_i2s0_pre_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(5), 12, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_i2s1_fracmux __initdata =
+ MUX(0, "i2s1_pre", mux_i2s1_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(6), 12, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_i2s2_fracmux __initdata =
+ MUX(0, "i2s2_pre", mux_i2s2_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(7), 12, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk1108_clk_branches[] __initdata = {
+ MUX(0, "hdmi_phy", mux_hdmiphy_phy_p, CLK_SET_RATE_PARENT,
+ RK1108_MISC_CON, 13, 2, MFLAGS),
+ MUX(0, "usb480m", mux_usb480m_pre_p, CLK_SET_RATE_PARENT,
+ RK1108_MISC_CON, 15, 2, MFLAGS),
+ /*
+ * Clock-Architecture Diagram 2
+ */
+
+ /* PD_CORE */
+ GATE(0, "dpll_core", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclken_dbg", "armclk", CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(1), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK1108_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_ENMCORE, "aclkenm_core", "armclk", CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(1), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK1108_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(ACLK_CORE, "aclk_core", "aclkenm_core", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(11), 0, GFLAGS),
+ GATE(0, "pclk_dbg", "pclken_dbg", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(11), 1, GFLAGS),
+
+ /* PD_RKVENC */
+
+ /* PD_RKVDEC */
+
+ /* PD_PMU_wrapper */
+ COMPOSITE_NOMUX(0, "pmu_24m_ena", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(38), 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(8), 12, GFLAGS),
+ GATE(0, "pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 0, GFLAGS),
+ GATE(0, "intmem1", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 1, GFLAGS),
+ GATE(0, "gpio0_pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 2, GFLAGS),
+ GATE(0, "pmugrf", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 3, GFLAGS),
+ GATE(0, "pmu_noc", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(0, "i2c0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 5, GFLAGS),
+ GATE(0, "pwm0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(10), 6, GFLAGS),
+ COMPOSITE(0, "pwm0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(8), 15, GFLAGS),
+ COMPOSITE(0, "i2c0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(19), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(8), 14, GFLAGS),
+ GATE(0, "pvtm_pmu", "xin24m", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(8), 13, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 4
+ */
+ COMPOSITE(0, "aclk_vio0_2wrap_occ", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(6), 0, GFLAGS),
+ GATE(0, "aclk_vio0_pre", "aclk_vio0_2wrap_occ", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(17), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_vio_pre", "aclk_vio0_pre", 0,
+ RK1108_CLKSEL_CON(29), 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(7), 2, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_vio_pre", "aclk_vio0_pre", 0,
+ RK1108_CLKSEL_CON(29), 8, 5, DFLAGS,
+ RK1108_CLKGATE_CON(7), 3, GFLAGS),
+
+ INVERTER(0, "pclk_vip", "ext_vip",
+ RK1108_CLKSEL_CON(31), 8, IFLAGS),
+ GATE(0, "pclk_isp_pre", "pclk_vip", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(0, "pclk_isp", "pclk_isp_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(0, "dclk_hdmiphy_src_gpll", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(6), 5, GFLAGS),
+ GATE(0, "dclk_hdmiphy_src_dpll", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_NOGATE(0, "dclk_hdmiphy", mux_dclk_hdmiphy_pre_p, 0,
+ RK1108_CLKSEL_CON(32), 6, 2, MFLAGS, 8, 6, DFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 5
+ */
+
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(5), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(8), 0,
+ RK1108_CLKGATE_CON(2), 1, GFLAGS,
+ &rk1108_i2s0_fracmux),
+ GATE(SCLK_I2S0, "sclk_i2s0", "i2s0_pre", CLK_SET_RATE_PARENT,
+ RK1108_CLKGATE_CON(2), 2, GFLAGS),
+ COMPOSITE_NODIV(0, "i2s_out", mux_i2s_out_p, 0,
+ RK1108_CLKSEL_CON(5), 15, 1, MFLAGS,
+ RK1108_CLKGATE_CON(2), 3, GFLAGS),
+
+ COMPOSITE(0, "i2s1_src", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(6), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(9), 0,
+ RK2928_CLKGATE_CON(2), 5, GFLAGS,
+ &rk1108_i2s1_fracmux),
+ GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT,
+ RK1108_CLKGATE_CON(2), 6, GFLAGS),
+
+ COMPOSITE(0, "i2s2_src", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(7), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 8, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(10), 0,
+ RK1108_CLKGATE_CON(2), 9, GFLAGS,
+ &rk1108_i2s2_fracmux),
+ GATE(SCLK_I2S2, "sclk_i2s2", "i2s2_pre", CLK_SET_RATE_PARENT,
+ RK1108_CLKGATE_CON(2), 10, GFLAGS),
+
+ /* PD_BUS */
+ GATE(0, "aclk_bus_src_gpll", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(0, "aclk_bus_src_apll", "apll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(0, "aclk_bus_src_dpll", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_NOGATE(ACLK_PRE, "aclk_bus_pre", mux_aclk_bus_src_p, 0,
+ RK1108_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 5, DFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_bus_pre", "aclk_bus_2wrap_occ", 0,
+ RK1108_CLKSEL_CON(3), 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(1), 4, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclken_bus", "aclk_bus_2wrap_occ", 0,
+ RK1108_CLKSEL_CON(3), 8, 5, DFLAGS,
+ RK1108_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(0, "pclk_bus_pre", "pclken_bus", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "pclk_top_pre", "pclken_bus", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 7, GFLAGS),
+ GATE(0, "pclk_ddr_pre", "pclken_bus", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 8, GFLAGS),
+ GATE(0, "clk_timer0", "mux_pll_p", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 9, GFLAGS),
+ GATE(0, "clk_timer1", "mux_pll_p", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(1), 10, GFLAGS),
+ GATE(0, "pclk_timer", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 4, GFLAGS),
+
+ COMPOSITE(0, "uart0_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(13), 12, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 1, GFLAGS),
+ COMPOSITE(0, "uart1_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(14), 12, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 3, GFLAGS),
+ COMPOSITE(0, "uart21_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(15), 12, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 5, GFLAGS),
+
+ COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(16), 0,
+ RK1108_CLKGATE_CON(3), 2, GFLAGS,
+ &rk1108_uart0_fracmux),
+ COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(17), 0,
+ RK1108_CLKGATE_CON(3), 4, GFLAGS,
+ &rk1108_uart1_fracmux),
+ COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(18), 0,
+ RK1108_CLKGATE_CON(3), 6, GFLAGS,
+ &rk1108_uart2_fracmux),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 10, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 11, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 12, GFLAGS),
+
+ COMPOSITE(0, "clk_i2c1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(19), 15, 2, MFLAGS, 8, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 7, GFLAGS),
+ COMPOSITE(0, "clk_i2c2", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(20), 7, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 8, GFLAGS),
+ COMPOSITE(0, "clk_i2c3", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(20), 15, 2, MFLAGS, 8, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 9, GFLAGS),
+ GATE(0, "pclk_i2c1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 0, GFLAGS),
+ GATE(0, "pclk_i2c2", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(0, "pclk_i2c3", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 2, GFLAGS),
+ COMPOSITE(0, "clk_pwm1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(12), 15, 2, MFLAGS, 8, 7, DFLAGS,
+ RK1108_CLKGATE_CON(3), 10, GFLAGS),
+ GATE(0, "pclk_pwm1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 6, GFLAGS),
+ GATE(0, "pclk_wdt", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(0, "pclk_gpio1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 7, GFLAGS),
+ GATE(0, "pclk_gpio2", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 8, GFLAGS),
+ GATE(0, "pclk_gpio3", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(13), 9, GFLAGS),
+
+ GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(14), 0, GFLAGS),
+
+ GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_pre", 0,
+ RK1108_CLKGATE_CON(12), 2, GFLAGS),
+ GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 3, GFLAGS),
+ GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 1, GFLAGS),
+
+ /* PD_DDR */
+ GATE(0, "apll_ddr", "apll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE(0, "ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 3,
+ DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+ RK1108_CLKGATE_CON(10), 9, GFLAGS),
+ GATE(0, "ddrupctl", "ddrphy_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 4, GFLAGS),
+ GATE(0, "ddrc", "ddrphy", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 5, GFLAGS),
+ GATE(0, "ddrmon", "ddrphy_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(12), 6, GFLAGS),
+ GATE(0, "timer_clk", "xin24m", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(0), 11, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 6
+ */
+
+ /* PD_PERI */
+ COMPOSITE_NOMUX(0, "pclk_periph_pre", "gpll", 0,
+ RK1108_CLKSEL_CON(23), 10, 5, DFLAGS,
+ RK1108_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(0, "pclk_periph", "pclk_periph_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(15), 13, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_periph_pre", "gpll", 0,
+ RK1108_CLKSEL_CON(23), 5, 5, DFLAGS,
+ RK1108_CLKGATE_CON(4), 4, GFLAGS),
+ GATE(0, "hclk_periph", "hclk_periph_pre", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(15), 12, GFLAGS),
+
+ GATE(0, "aclk_peri_src_dpll", "dpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(4), 1, GFLAGS),
+ GATE(0, "aclk_peri_src_gpll", "gpll", CLK_IGNORE_UNUSED,
+ RK1108_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(0, "aclk_periph", mux_aclk_peri_src_p, CLK_IGNORE_UNUSED,
+ RK1108_CLKSEL_CON(23), 15, 2, MFLAGS, 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(15), 11, GFLAGS),
+
+ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+ RK1108_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK1108_CLKGATE_CON(5), 0, GFLAGS),
+
+ COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
+ RK1108_CLKSEL_CON(25), 10, 2, MFLAGS,
+ RK1108_CLKGATE_CON(5), 2, GFLAGS),
+ DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
+ RK1108_CLKSEL_CON(26), 0, 8, DFLAGS),
+
+ COMPOSITE_NODIV(0, "sclk_emmc_src", mux_mmc_src_p, 0,
+ RK1108_CLKSEL_CON(25), 12, 2, MFLAGS,
+ RK1108_CLKGATE_CON(5), 1, GFLAGS),
+ DIV(SCLK_EMMC, "sclk_emmc", "sclk_emmc_src", 0,
+ RK2928_CLKSEL_CON(26), 8, 8, DFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 1, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 2, GFLAGS),
+
+ COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK1108_CLKGATE_CON(5), 3, GFLAGS),
+ GATE(HCLK_NANDC, "hclk_nandc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 3, GFLAGS),
+
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_2plls_p, 0,
+ RK1108_CLKSEL_CON(27), 7, 2, MFLAGS, 0, 7, DFLAGS,
+ RK1108_CLKGATE_CON(5), 4, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 10, GFLAGS),
+
+ COMPOSITE(0, "sclk_macphy_pre", mux_pll_src_apll_gpll_p, 0,
+ RK1108_CLKSEL_CON(24), 12, 2, MFLAGS, 0, 5, DFLAGS,
+ RK1108_CLKGATE_CON(4), 10, GFLAGS),
+ MUX(0, "sclk_macphy", mux_sclk_macphy_p, CLK_SET_RATE_PARENT,
+ RK1108_CLKSEL_CON(24), 8, 2, MFLAGS),
+ GATE(0, "sclk_macphy_rx", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(0, "sclk_mac_ref", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(0, "sclk_mac_refout", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 7, GFLAGS),
+
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK1108_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK1108_SDMMC_CON1, 1),
+
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK1108_SDIO_CON0, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK1108_SDIO_CON1, 1),
+
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK1108_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK1108_EMMC_CON1, 1),
+};
+
+static const char *const rk1108_critical_clocks[] __initconst = {
+ "aclk_core",
+ "aclk_bus_src_gpll",
+ "aclk_periph",
+ "hclk_periph",
+ "pclk_periph",
+};
+
+static void __init rk1108_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rk1108_pll_clks,
+ ARRAY_SIZE(rk1108_pll_clks),
+ RK1108_GRF_SOC_STATUS0);
+ rockchip_clk_register_branches(ctx, rk1108_clk_branches,
+ ARRAY_SIZE(rk1108_clk_branches));
+ rockchip_clk_protect_critical(rk1108_critical_clocks,
+ ARRAY_SIZE(rk1108_critical_clocks));
+
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
+ &rk1108_cpuclk_data, rk1108_cpuclk_rates,
+ ARRAY_SIZE(rk1108_cpuclk_rates));
+
+ rockchip_register_softrst(np, 13, reg_base + RK1108_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, RK1108_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(rk1108_cru, "rockchip,rk1108-cru", rk1108_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index d0e722a0e8cf..062ef4960244 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -89,6 +89,7 @@ static struct rockchip_pll_rate_table rk3188_pll_rates[] = {
RK3066_PLL_RATE( 504000000, 1, 84, 4),
RK3066_PLL_RATE( 456000000, 1, 76, 4),
RK3066_PLL_RATE( 408000000, 1, 68, 4),
+ RK3066_PLL_RATE( 400000000, 3, 100, 2),
RK3066_PLL_RATE( 384000000, 2, 128, 4),
RK3066_PLL_RATE( 360000000, 1, 60, 4),
RK3066_PLL_RATE( 312000000, 1, 52, 4),
@@ -306,14 +307,14 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(0), 2, GFLAGS),
- GATE(0, "aclk_cpu", "aclk_cpu_pre", 0,
+ GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", 0,
RK2928_CLKGATE_CON(0), 3, GFLAGS),
GATE(0, "atclk_cpu", "pclk_cpu_pre", 0,
RK2928_CLKGATE_CON(0), 6, GFLAGS),
- GATE(0, "pclk_cpu", "pclk_cpu_pre", 0,
+ GATE(PCLK_CPU, "pclk_cpu", "pclk_cpu_pre", 0,
RK2928_CLKGATE_CON(0), 5, GFLAGS),
- GATE(0, "hclk_cpu", "hclk_cpu_pre", CLK_IGNORE_UNUSED,
+ GATE(HCLK_CPU, "hclk_cpu", "hclk_cpu_pre", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(0), 4, GFLAGS),
COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
@@ -323,12 +324,12 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(31), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(1), 4, GFLAGS),
- GATE(0, "aclk_peri", "aclk_peri_pre", 0,
+ GATE(ACLK_PERI, "aclk_peri", "aclk_peri_pre", 0,
RK2928_CLKGATE_CON(2), 1, GFLAGS),
- COMPOSITE_NOMUX(0, "hclk_peri", "aclk_peri_pre", 0,
+ COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_pre", 0,
RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(2), 2, GFLAGS),
- COMPOSITE_NOMUX(0, "pclk_peri", "aclk_peri_pre", 0,
+ COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_pre", 0,
RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(2), 3, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 8387c7a40bda..3490887b0579 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -77,7 +77,7 @@ static struct rockchip_pll_rate_table rk3399_pll_rates[] = {
RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0),
RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0),
RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
- RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 1, 125, 3, 1, 1, 0),
RK3036_PLL_RATE( 984000000, 1, 82, 2, 1, 1, 0),
RK3036_PLL_RATE( 960000000, 1, 80, 2, 1, 1, 0),
RK3036_PLL_RATE( 936000000, 1, 78, 2, 1, 1, 0),
@@ -87,12 +87,13 @@ static struct rockchip_pll_rate_table rk3399_pll_rates[] = {
RK3036_PLL_RATE( 864000000, 1, 72, 2, 1, 1, 0),
RK3036_PLL_RATE( 840000000, 1, 70, 2, 1, 1, 0),
RK3036_PLL_RATE( 816000000, 1, 68, 2, 1, 1, 0),
- RK3036_PLL_RATE( 800000000, 6, 400, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 800000000, 1, 100, 3, 1, 1, 0),
RK3036_PLL_RATE( 700000000, 6, 350, 2, 1, 1, 0),
RK3036_PLL_RATE( 696000000, 1, 58, 2, 1, 1, 0),
RK3036_PLL_RATE( 676000000, 3, 169, 2, 1, 1, 0),
RK3036_PLL_RATE( 600000000, 1, 75, 3, 1, 1, 0),
RK3036_PLL_RATE( 594000000, 1, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE( 533250000, 8, 711, 4, 1, 1, 0),
RK3036_PLL_RATE( 504000000, 1, 63, 3, 1, 1, 0),
RK3036_PLL_RATE( 500000000, 6, 250, 2, 1, 1, 0),
RK3036_PLL_RATE( 408000000, 1, 68, 2, 2, 1, 0),
@@ -410,11 +411,11 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(SCLK_USB2PHY1_REF, "clk_usb2phy1_ref", "xin24m", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(6), 6, GFLAGS),
- GATE(0, "clk_usbphy0_480m_src", "clk_usbphy0_480m", CLK_IGNORE_UNUSED,
+ GATE(0, "clk_usbphy0_480m_src", "clk_usbphy0_480m", 0,
RK3399_CLKGATE_CON(13), 12, GFLAGS),
- GATE(0, "clk_usbphy1_480m_src", "clk_usbphy1_480m", CLK_IGNORE_UNUSED,
+ GATE(0, "clk_usbphy1_480m_src", "clk_usbphy1_480m", 0,
RK3399_CLKGATE_CON(13), 12, GFLAGS),
- MUX(0, "clk_usbphy_480m", mux_usbphy_480m_p, CLK_IGNORE_UNUSED,
+ MUX(0, "clk_usbphy_480m", mux_usbphy_480m_p, 0,
RK3399_CLKSEL_CON(14), 6, 1, MFLAGS),
MUX(0, "upll", mux_pll_src_24m_usbphy480m_p, 0,
@@ -498,7 +499,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKGATE_CON(14), 10, GFLAGS),
GATE(ACLK_GIC_ADB400_CORE_L_2_GIC, "aclk_core_adb400_core_l_2_gic", "armclkl", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(14), 11, GFLAGS),
- GATE(SCLK_PVTM_CORE_L, "clk_pvtm_core_l", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_PVTM_CORE_L, "clk_pvtm_core_l", "xin24m", 0,
RK3399_CLKGATE_CON(0), 7, GFLAGS),
/* big core */
@@ -539,7 +540,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(0, "pclk_dbg_cxcs_pd_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(14), 2, GFLAGS),
- GATE(SCLK_PVTM_CORE_B, "clk_pvtm_core_b", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_PVTM_CORE_B, "clk_pvtm_core_b", "xin24m", 0,
RK3399_CLKGATE_CON(1), 7, GFLAGS),
/* gmac */
@@ -675,18 +676,18 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(PCLK_CENTER_MAIN_NOC, "pclk_center_main_noc", "pclk_ddr", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(18), 10, GFLAGS),
- GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", CLK_IGNORE_UNUSED,
+ GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", 0,
RK3399_CLKGATE_CON(18), 12, GFLAGS),
GATE(PCLK_CIC, "pclk_cic", "pclk_ddr", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(18), 15, GFLAGS),
GATE(PCLK_DDR_SGRF, "pclk_ddr_sgrf", "pclk_ddr", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(19), 2, GFLAGS),
- GATE(SCLK_PVTM_DDR, "clk_pvtm_ddr", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_PVTM_DDR, "clk_pvtm_ddr", "xin24m", 0,
RK3399_CLKGATE_CON(4), 11, GFLAGS),
- GATE(SCLK_DFIMON0_TIMER, "clk_dfimon0_timer", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_DFIMON0_TIMER, "clk_dfimon0_timer", "xin24m", 0,
RK3399_CLKGATE_CON(3), 5, GFLAGS),
- GATE(SCLK_DFIMON1_TIMER, "clk_dfimon1_timer", "xin24m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_DFIMON1_TIMER, "clk_dfimon1_timer", "xin24m", 0,
RK3399_CLKGATE_CON(3), 6, GFLAGS),
/* cci */
@@ -966,7 +967,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(SCLK_INTMEM3, "clk_intmem3", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 5, GFLAGS),
GATE(SCLK_INTMEM4, "clk_intmem4", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 6, GFLAGS),
GATE(SCLK_INTMEM5, "clk_intmem5", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 7, GFLAGS),
- GATE(ACLK_DCF, "aclk_dcf", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 8, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_perilp0", 0, RK3399_CLKGATE_CON(23), 8, GFLAGS),
GATE(ACLK_DMAC0_PERILP, "aclk_dmac0_perilp", "aclk_perilp0", 0, RK3399_CLKGATE_CON(25), 5, GFLAGS),
GATE(ACLK_DMAC1_PERILP, "aclk_dmac1_perilp", "aclk_perilp0", 0, RK3399_CLKGATE_CON(25), 6, GFLAGS),
GATE(ACLK_PERILP0_NOC, "aclk_perilp0_noc", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 7, GFLAGS),
@@ -980,7 +981,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(HCLK_PERILP0_NOC, "hclk_perilp0_noc", "hclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 8, GFLAGS),
/* pclk_perilp0 gates */
- GATE(PCLK_DCF, "pclk_dcf", "pclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 9, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_perilp0", 0, RK3399_CLKGATE_CON(23), 9, GFLAGS),
/* crypto */
COMPOSITE(SCLK_CRYPTO0, "clk_crypto0", mux_pll_src_cpll_gpll_ppll_p, 0,
@@ -1382,8 +1383,8 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
/* clk_test */
/* clk_test_pre is controlled by CRU_MISC_CON[3] */
COMPOSITE_NOMUX(0, "clk_test", "clk_test_pre", CLK_IGNORE_UNUSED,
- RK3368_CLKSEL_CON(58), 0, 5, DFLAGS,
- RK3368_CLKGATE_CON(13), 11, GFLAGS),
+ RK3399_CLKSEL_CON(58), 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(13), 11, GFLAGS),
/* ddrc */
GATE(0, "clk_ddrc_lpll_src", "lpll", 0, RK3399_CLKGATE_CON(3),
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 1653edd792a5..d67eecc4ade9 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -34,6 +34,21 @@ struct clk;
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
+/* register positions shared by RK1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */
+#define RK1108_PLL_CON(x) ((x) * 0x4)
+#define RK1108_CLKSEL_CON(x) ((x) * 0x4 + 0x60)
+#define RK1108_CLKGATE_CON(x) ((x) * 0x4 + 0x120)
+#define RK1108_SOFTRST_CON(x) ((x) * 0x4 + 0x180)
+#define RK1108_GLB_SRST_FST 0x1c0
+#define RK1108_GLB_SRST_SND 0x1c4
+#define RK1108_MISC_CON 0x1cc
+#define RK1108_SDMMC_CON0 0x1d8
+#define RK1108_SDMMC_CON1 0x1dc
+#define RK1108_SDIO_CON0 0x1e0
+#define RK1108_SDIO_CON1 0x1e4
+#define RK1108_EMMC_CON0 0x1e8
+#define RK1108_EMMC_CON1 0x1ec
+
#define RK2928_PLL_CON(x) ((x) * 0x4)
#define RK2928_MODE_CON 0x40
#define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44)
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 96fab6cfb202..6c6afb87b4ce 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -132,28 +132,34 @@ free_clkout:
pr_err("%s: failed to register clkout clock\n", __func__);
}
+/*
+ * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
+ * the OF_POPULATED flag on the pmu device tree node, so later the
+ * Exynos PMU platform device can be properly probed with PMU driver.
+ */
+
static void __init exynos4_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
}
-CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
exynos4_clkout_init);
static void __init exynos5_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
}
-CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
exynos5_clkout_init);
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index ea1608682d7f..f096bd7df40c 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -543,7 +543,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
static const struct samsung_gate_clock top_gate_clks[] __initconst = {
/* ENABLE_ACLK_TOP */
GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400",
- ENABLE_ACLK_TOP, 30, 0, 0),
+ ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_IMEM_SSX_266, "aclk_imem_ssx_266",
"div_aclk_imem_sssx_266", ENABLE_ACLK_TOP,
29, CLK_IGNORE_UNUSED, 0),
@@ -555,25 +555,25 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_266",
ENABLE_ACLK_TOP, 24,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_200",
ENABLE_ACLK_TOP, 23,
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b",
ENABLE_ACLK_TOP, 22,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_PERIS_66, "aclk_peris_66", "div_aclk_peris_66_b",
ENABLE_ACLK_TOP, 21,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_MSCL_400, "aclk_mscl_400", "div_aclk_mscl_400",
ENABLE_ACLK_TOP, 19,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_FSYS_200, "aclk_fsys_200", "div_aclk_fsys_200",
ENABLE_ACLK_TOP, 18,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_GSCL_111, "aclk_gscl_111", "div_aclk_gscl_111",
ENABLE_ACLK_TOP, 15,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_GSCL_333, "aclk_gscl_333", "div_aclk_gscl_333",
ENABLE_ACLK_TOP, 14,
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
@@ -582,7 +582,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_CAM1_400, "aclk_cam1_400", "div_aclk_cam1_400",
ENABLE_ACLK_TOP, 12,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_CAM1_552, "aclk_cam1_552", "div_aclk_cam1_552",
ENABLE_ACLK_TOP, 11,
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
@@ -591,7 +591,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_CAM0_400, "aclk_cam0_400", "div_aclk_cam0_400",
ENABLE_ACLK_TOP, 9,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_CAM0_552, "aclk_cam0_552", "div_aclk_cam0_552",
ENABLE_ACLK_TOP, 8,
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
@@ -600,19 +600,19 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_ISP_400, "aclk_isp_400", "div_aclk_isp_400",
ENABLE_ACLK_TOP, 6,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_HEVC_400, "aclk_hevc_400", "div_aclk_hevc_400",
ENABLE_ACLK_TOP, 5,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_MFC_400, "aclk_mfc_400", "div_aclk_mfc_400",
ENABLE_ACLK_TOP, 3,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_G2D_266, "aclk_g2d_266", "div_aclk_g2d_266",
ENABLE_ACLK_TOP, 2,
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_G2D_400, "aclk_g2d_400", "div_aclk_g2d_400",
ENABLE_ACLK_TOP, 0,
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
/* ENABLE_SCLK_TOP_MSCL */
GATE(CLK_SCLK_JPEG_MSCL, "sclk_jpeg_mscl", "div_sclk_jpeg",
@@ -1385,7 +1385,7 @@ static const struct samsung_gate_clock mif_gate_clks[] __initconst = {
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_DISP_333, "aclk_disp_333", "div_aclk_disp_333",
ENABLE_ACLK_MIF3, 1,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_CPIF_200, "aclk_cpif_200", "div_aclk_cpif_200",
ENABLE_ACLK_MIF3, 0,
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
@@ -1929,7 +1929,7 @@ CLK_OF_DECLARE(exynos5433_cmu_peris, "samsung,exynos5433-cmu-peris",
/* list of all parent clock list */
PNAME(mout_sclk_ufs_mphy_user_p) = { "oscclk", "sclk_ufs_mphy", };
-PNAME(mout_aclk_fsys_200_user_p) = { "oscclk", "div_aclk_fsys_200", };
+PNAME(mout_aclk_fsys_200_user_p) = { "oscclk", "aclk_fsys_200", };
PNAME(mout_sclk_pcie_100_user_p) = { "oscclk", "sclk_pcie_100_fsys",};
PNAME(mout_sclk_ufsunipro_user_p) = { "oscclk", "sclk_ufsunipro_fsys",};
PNAME(mout_sclk_mmc2_user_p) = { "oscclk", "sclk_mmc2_fsys", };
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index a485f3b284b9..918ba3164da9 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -329,8 +329,10 @@ static void __init st_of_flexgen_setup(struct device_node *np)
return;
parents = flexgen_get_parents(np, &num_parents);
- if (!parents)
+ if (!parents) {
+ iounmap(reg);
return;
+ }
match = of_match_node(flexgen_of_match, np);
if (match) {
@@ -394,6 +396,7 @@ static void __init st_of_flexgen_setup(struct device_node *np)
return;
err:
+ iounmap(reg);
if (clk_data)
kfree(clk_data->clks);
kfree(clk_data);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 254d9526c018..8454c6e3dd65 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -35,17 +35,14 @@ config SUNXI_CCU_NK
config SUNXI_CCU_NKM
bool
- select RATIONAL
select SUNXI_CCU_GATE
config SUNXI_CCU_NKMP
bool
- select RATIONAL
select SUNXI_CCU_GATE
config SUNXI_CCU_NM
bool
- select RATIONAL
select SUNXI_CCU_FRAC
select SUNXI_CCU_GATE
@@ -56,6 +53,17 @@ config SUNXI_CCU_MP
# SoC Drivers
+config SUN50I_A64_CCU
+ bool "Support for the Allwinner A64 CCU"
+ select SUNXI_CCU_DIV
+ select SUNXI_CCU_NK
+ select SUNXI_CCU_NKM
+ select SUNXI_CCU_NKMP
+ select SUNXI_CCU_NM
+ select SUNXI_CCU_MP
+ select SUNXI_CCU_PHASE
+ default ARM64 && ARCH_SUNXI
+
config SUN6I_A31_CCU
bool "Support for the Allwinner A31/A31s CCU"
select SUNXI_CCU_DIV
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 106cba27c331..24fbc6e5deb8 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_SUNXI_CCU_NM) += ccu_nm.o
obj-$(CONFIG_SUNXI_CCU_MP) += ccu_mp.o
# SoC support
+obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o
obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
new file mode 100644
index 000000000000..e3c084cc6da5
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -0,0 +1,915 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-sun50i-a64.h"
+
+static struct ccu_nkmp pll_cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .m = _SUNXI_CCU_DIV(0, 2),
+ .p = _SUNXI_CCU_DIV_MAX(16, 2, 4),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpux",
+ "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
+ * the base (2x, 4x and 8x), and one variable divider (the one true
+ * pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names
+ */
+#define SUN50I_A64_PLL_AUDIO_REG 0x008
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
+ "osc24M", 0x010,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
+ "osc24M", 0x018,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr0",
+ "osc24M", 0x020,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static struct ccu_nk pll_periph0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph0", "osc24M",
+ &ccu_nk_ops, CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_nk pll_periph1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x02c,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph1", "osc24M",
+ &ccu_nk_ops, CLK_SET_RATE_UNGATE),
+ },
+};
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1",
+ "osc24M", 0x030,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
+ "osc24M", 0x038,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/*
+ * The output function can be changed to something more complex that
+ * we do not handle yet.
+ *
+ * Hardcode the mode so that we don't fall in that case.
+ */
+#define SUN50I_A64_PLL_MIPI_REG 0x040
+
+static struct ccu_nkm pll_mipi_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 4),
+ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
+ .m = _SUNXI_CCU_DIV(0, 4),
+ .common = {
+ .reg = 0x040,
+ .hw.init = CLK_HW_INIT("pll-mipi", "pll-video0",
+ &ccu_nkm_ops, CLK_SET_RATE_UNGATE),
+ },
+};
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_hsic_clk, "pll-hsic",
+ "osc24M", 0x044,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
+ "osc24M", 0x048,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1",
+ "osc24M", 0x04c,
+ 8, 7, /* N */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static const char * const cpux_parents[] = { "osc32k", "osc24M",
+ "pll-cpux", "pll-cpux" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x050, 16, 2, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
+
+static const char * const ahb1_parents[] = { "osc32k", "osc24M",
+ "axi", "pll-periph0" };
+static struct ccu_div ahb1_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 12,
+ .width = 2,
+
+ .variable_prediv = {
+ .index = 3,
+ .shift = 6,
+ .width = 2,
+ },
+ },
+
+ .common = {
+ .reg = 0x054,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb1",
+ ahb1_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct clk_div_table apb1_div_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { /* Sentinel */ },
+};
+static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1",
+ 0x054, 8, 2, apb1_div_table, 0);
+
+static const char * const apb2_parents[] = { "osc32k", "osc24M",
+ "pll-periph0-2x",
+ "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const ahb2_parents[] = { "ahb1", "pll-periph0" };
+static const struct ccu_mux_fixed_prediv ahb2_fixed_predivs[] = {
+ { .index = 1, .div = 2 },
+};
+static struct ccu_mux ahb2_clk = {
+ .mux = {
+ .shift = 0,
+ .width = 1,
+ .fixed_predivs = ahb2_fixed_predivs,
+ .n_predivs = ARRAY_SIZE(ahb2_fixed_predivs),
+ },
+
+ .common = {
+ .reg = 0x05c,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb2",
+ ahb2_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb1",
+ 0x060, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "ahb1",
+ 0x060, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1",
+ 0x060, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1",
+ 0x060, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb2",
+ 0x060, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb1",
+ 0x060, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1",
+ 0x060, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1",
+ 0x060, BIT(23), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb1",
+ 0x060, BIT(24), 0);
+static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb2",
+ 0x060, BIT(25), 0);
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb1",
+ 0x060, BIT(28), 0);
+static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb2",
+ 0x060, BIT(29), 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tcon0_clk, "bus-tcon0", "ahb1",
+ 0x064, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_tcon1_clk, "bus-tcon1", "ahb1",
+ 0x064, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "ahb1",
+ 0x064, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb1",
+ 0x064, BIT(11), 0);
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1",
+ 0x064, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1",
+ 0x064, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1",
+ 0x064, BIT(22), 0);
+
+static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1",
+ 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1",
+ 0x068, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1",
+ 0x068, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1",
+ 0x068, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1",
+ 0x068, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1",
+ 0x068, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1",
+ 0x068, BIT(14), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
+ 0x06c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
+ 0x06c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_scr_clk, "bus-scr", "apb2",
+ 0x06c, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
+ 0x06c, BIT(16), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
+ 0x06c, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2",
+ 0x06c, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2",
+ 0x06c, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2",
+ 0x06c, BIT(20), 0);
+
+static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "ahb1",
+ 0x070, BIT(7), 0);
+
+static struct clk_div_table ths_div_table[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 6 },
+};
+static const char * const ths_parents[] = { "osc24M" };
+static struct ccu_div ths_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_TABLE(0, 2, ths_div_table),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0x074,
+ .hw.init = CLK_HW_INIT_PARENTS("ths",
+ ths_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0",
+ "pll-periph1" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const mmc_default_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc_default_parents, 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc_default_parents, 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc_default_parents, 0x090,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const ts_parents[] = { "osc24M", "pll-periph0", };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x098,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 4, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", mmc_default_parents, 0x09c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
+ "pll-audio-2x", "pll-audio" };
+static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents,
+ 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
+ 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ 0x0cc, BIT(8), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M",
+ 0x0cc, BIT(9), 0);
+static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "pll-hsic",
+ 0x0cc, BIT(10), 0);
+static SUNXI_CCU_GATE(usb_hsic_12m_clk, "usb-hsic-12M", "osc12M",
+ 0x0cc, BIT(11), 0);
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M",
+ 0x0cc, BIT(16), 0);
+static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "usb-ohci0",
+ 0x0cc, BIT(17), 0);
+
+static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1" };
+static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents,
+ 0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "dram",
+ 0x100, BIT(1), 0);
+static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram",
+ 0x100, BIT(2), 0);
+static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram",
+ 0x100, BIT(3), 0);
+
+static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
+ 0x104, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
+static const u8 tcon0_table[] = { 0, 2, };
+static SUNXI_CCU_MUX_TABLE_WITH_GATE(tcon0_clk, "tcon0", tcon0_parents,
+ tcon0_table, 0x118, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char * const tcon1_parents[] = { "pll-video0", "pll-video1" };
+static const u8 tcon1_table[] = { 0, 2, };
+static struct ccu_div tcon1_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV(0, 4),
+ .mux = _SUNXI_CCU_MUX_TABLE(24, 2, tcon1_table),
+ .common = {
+ .reg = 0x11c,
+ .hw.init = CLK_HW_INIT_PARENTS("tcon1",
+ tcon1_parents,
+ &ccu_div_ops,
+ CLK_SET_RATE_PARENT),
+ },
+};
+
+static const char * const deinterlace_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace", deinterlace_parents,
+ 0x124, 0, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M",
+ 0x130, BIT(31), 0);
+
+static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
+ 0x134, 16, 4, 24, 3, BIT(31), 0);
+
+static const char * const csi_mclk_parents[] = { "osc24M", "pll-video1", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents,
+ 0x134, 0, 5, 8, 3, BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
+ 0x13c, 16, 3, BIT(31), 0);
+
+static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x",
+ 0x140, BIT(30), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
+ 0x144, BIT(31), 0);
+
+static const char * const hdmi_parents[] = { "pll-video0", "pll-video1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
+ 0x150, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M",
+ 0x154, BIT(31), 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-ddr0", "pll-ddr1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
+ 0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL);
+
+static const char * const dsi_dphy_parents[] = { "pll-video0", "pll-periph0" };
+static const u8 dsi_dphy_table[] = { 0, 2, };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(dsi_dphy_clk, "dsi-dphy",
+ dsi_dphy_parents, dsi_dphy_table,
+ 0x168, 0, 4, 8, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
+ 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+/* Fixed Factor clocks */
+static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0);
+
+/* We hardcode the divider to 4 for now */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x",
+ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x",
+ "pll-periph0", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x",
+ "pll-periph1", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x",
+ "pll-video0", 1, 2, CLK_SET_RATE_PARENT);
+
+static struct ccu_common *sun50i_a64_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_audio_base_clk.common,
+ &pll_video0_clk.common,
+ &pll_ve_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph0_clk.common,
+ &pll_periph1_clk.common,
+ &pll_video1_clk.common,
+ &pll_gpu_clk.common,
+ &pll_mipi_clk.common,
+ &pll_hsic_clk.common,
+ &pll_de_clk.common,
+ &pll_ddr1_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &ahb1_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &ahb2_clk.common,
+ &bus_mipi_dsi_clk.common,
+ &bus_ce_clk.common,
+ &bus_dma_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_nand_clk.common,
+ &bus_dram_clk.common,
+ &bus_emac_clk.common,
+ &bus_ts_clk.common,
+ &bus_hstimer_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_otg_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ve_clk.common,
+ &bus_tcon0_clk.common,
+ &bus_tcon1_clk.common,
+ &bus_deinterlace_clk.common,
+ &bus_csi_clk.common,
+ &bus_hdmi_clk.common,
+ &bus_de_clk.common,
+ &bus_gpu_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_codec_clk.common,
+ &bus_spdif_clk.common,
+ &bus_pio_clk.common,
+ &bus_ths_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_scr_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_dbg_clk.common,
+ &ths_clk.common,
+ &nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &ts_clk.common,
+ &ce_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &spdif_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_hsic_clk.common,
+ &usb_hsic_12m_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &dram_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi_clk.common,
+ &dram_deinterlace_clk.common,
+ &dram_ts_clk.common,
+ &de_clk.common,
+ &tcon0_clk.common,
+ &tcon1_clk.common,
+ &deinterlace_clk.common,
+ &csi_misc_clk.common,
+ &csi_sclk_clk.common,
+ &csi_mclk_clk.common,
+ &ve_clk.common,
+ &ac_dig_clk.common,
+ &ac_dig_4x_clk.common,
+ &avs_clk.common,
+ &hdmi_clk.common,
+ &hdmi_ddc_clk.common,
+ &mbus_clk.common,
+ &dsi_dphy_clk.common,
+ &gpu_clk.common,
+};
+
+static struct clk_hw_onecell_data sun50i_a64_hw_clks = {
+ .hws = {
+ [CLK_OSC_12M] = &osc12M_clk.hw,
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_MIPI] = &pll_mipi_clk.common.hw,
+ [CLK_PLL_HSIC] = &pll_hsic_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AHB1] = &ahb1_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_AHB2] = &ahb2_clk.common.hw,
+ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_BUS_TS] = &bus_ts_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw,
+ [CLK_BUS_TCON1] = &bus_tcon1_clk.common.hw,
+ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_CODEC] = &bus_codec_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_BUS_PIO] = &bus_pio_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_SCR] = &bus_scr_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_THS] = &ths_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_HSIC] = &usb_hsic_clk.common.hw,
+ [CLK_USB_HSIC_12M] = &usb_hsic_12m_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
+ [CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw,
+ [CLK_DRAM_TS] = &dram_ts_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_TCON0] = &tcon0_clk.common.hw,
+ [CLK_TCON1] = &tcon1_clk.common.hw,
+ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
+ [CLK_CSI_MISC] = &csi_misc_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_AC_DIG] = &ac_dig_clk.common.hw,
+ [CLK_AC_DIG_4X] = &ac_dig_4x_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_DDC] = &hdmi_ddc_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DSI_DPHY] = &dsi_dphy_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun50i_a64_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+ [RST_USB_PHY1] = { 0x0cc, BIT(1) },
+ [RST_USB_HSIC] = { 0x0cc, BIT(2) },
+
+ [RST_DRAM] = { 0x0f4, BIT(31) },
+ [RST_MBUS] = { 0x0fc, BIT(31) },
+
+ [RST_BUS_MIPI_DSI] = { 0x2c0, BIT(1) },
+ [RST_BUS_CE] = { 0x2c0, BIT(5) },
+ [RST_BUS_DMA] = { 0x2c0, BIT(6) },
+ [RST_BUS_MMC0] = { 0x2c0, BIT(8) },
+ [RST_BUS_MMC1] = { 0x2c0, BIT(9) },
+ [RST_BUS_MMC2] = { 0x2c0, BIT(10) },
+ [RST_BUS_NAND] = { 0x2c0, BIT(13) },
+ [RST_BUS_DRAM] = { 0x2c0, BIT(14) },
+ [RST_BUS_EMAC] = { 0x2c0, BIT(17) },
+ [RST_BUS_TS] = { 0x2c0, BIT(18) },
+ [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
+ [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
+ [RST_BUS_SPI1] = { 0x2c0, BIT(21) },
+ [RST_BUS_OTG] = { 0x2c0, BIT(23) },
+ [RST_BUS_EHCI0] = { 0x2c0, BIT(24) },
+ [RST_BUS_EHCI1] = { 0x2c0, BIT(25) },
+ [RST_BUS_OHCI0] = { 0x2c0, BIT(28) },
+ [RST_BUS_OHCI1] = { 0x2c0, BIT(29) },
+
+ [RST_BUS_VE] = { 0x2c4, BIT(0) },
+ [RST_BUS_TCON0] = { 0x2c4, BIT(3) },
+ [RST_BUS_TCON1] = { 0x2c4, BIT(4) },
+ [RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) },
+ [RST_BUS_CSI] = { 0x2c4, BIT(8) },
+ [RST_BUS_HDMI0] = { 0x2c4, BIT(10) },
+ [RST_BUS_HDMI1] = { 0x2c4, BIT(11) },
+ [RST_BUS_DE] = { 0x2c4, BIT(12) },
+ [RST_BUS_GPU] = { 0x2c4, BIT(20) },
+ [RST_BUS_MSGBOX] = { 0x2c4, BIT(21) },
+ [RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) },
+ [RST_BUS_DBG] = { 0x2c4, BIT(31) },
+
+ [RST_BUS_LVDS] = { 0x2c8, BIT(0) },
+
+ [RST_BUS_CODEC] = { 0x2d0, BIT(0) },
+ [RST_BUS_SPDIF] = { 0x2d0, BIT(1) },
+ [RST_BUS_THS] = { 0x2d0, BIT(8) },
+ [RST_BUS_I2S0] = { 0x2d0, BIT(12) },
+ [RST_BUS_I2S1] = { 0x2d0, BIT(13) },
+ [RST_BUS_I2S2] = { 0x2d0, BIT(14) },
+
+ [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
+ [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
+ [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
+ [RST_BUS_SCR] = { 0x2d8, BIT(5) },
+ [RST_BUS_UART0] = { 0x2d8, BIT(16) },
+ [RST_BUS_UART1] = { 0x2d8, BIT(17) },
+ [RST_BUS_UART2] = { 0x2d8, BIT(18) },
+ [RST_BUS_UART3] = { 0x2d8, BIT(19) },
+ [RST_BUS_UART4] = { 0x2d8, BIT(20) },
+};
+
+static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = {
+ .ccu_clks = sun50i_a64_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_a64_ccu_clks),
+
+ .hw_clks = &sun50i_a64_hw_clks,
+
+ .resets = sun50i_a64_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_a64_ccu_resets),
+};
+
+static int sun50i_a64_ccu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *reg;
+ u32 val;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /* Force the PLL-Audio-1x divider to 4 */
+ val = readl(reg + SUN50I_A64_PLL_AUDIO_REG);
+ val &= ~GENMASK(19, 16);
+ writel(val | (3 << 16), reg + SUN50I_A64_PLL_AUDIO_REG);
+
+ writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
+
+ return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+}
+
+static const struct of_device_id sun50i_a64_ccu_ids[] = {
+ { .compatible = "allwinner,sun50i-a64-ccu" },
+ { }
+};
+
+static struct platform_driver sun50i_a64_ccu_driver = {
+ .probe = sun50i_a64_ccu_probe,
+ .driver = {
+ .name = "sun50i-a64-ccu",
+ .of_match_table = sun50i_a64_ccu_ids,
+ },
+};
+builtin_platform_driver(sun50i_a64_ccu_driver);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
new file mode 100644
index 000000000000..9b3cd24b78d2
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2016 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SUN50I_A64_H_
+#define _CCU_SUN50I_A64_H_
+
+#include <dt-bindings/clock/sun50i-a64-ccu.h>
+#include <dt-bindings/reset/sun50i-a64-ccu.h>
+
+#define CLK_OSC_12M 0
+#define CLK_PLL_CPUX 1
+#define CLK_PLL_AUDIO_BASE 2
+#define CLK_PLL_AUDIO 3
+#define CLK_PLL_AUDIO_2X 4
+#define CLK_PLL_AUDIO_4X 5
+#define CLK_PLL_AUDIO_8X 6
+#define CLK_PLL_VIDEO0 7
+#define CLK_PLL_VIDEO0_2X 8
+#define CLK_PLL_VE 9
+#define CLK_PLL_DDR0 10
+#define CLK_PLL_PERIPH0 11
+#define CLK_PLL_PERIPH0_2X 12
+#define CLK_PLL_PERIPH1 13
+#define CLK_PLL_PERIPH1_2X 14
+#define CLK_PLL_VIDEO1 15
+#define CLK_PLL_GPU 16
+#define CLK_PLL_MIPI 17
+#define CLK_PLL_HSIC 18
+#define CLK_PLL_DE 19
+#define CLK_PLL_DDR1 20
+#define CLK_CPUX 21
+#define CLK_AXI 22
+#define CLK_APB 23
+#define CLK_AHB1 24
+#define CLK_APB1 25
+#define CLK_APB2 26
+#define CLK_AHB2 27
+
+/* All the bus gates are exported */
+
+/* The first bunch of module clocks are exported */
+
+#define CLK_USB_OHCI0_12M 90
+
+#define CLK_USB_OHCI1_12M 92
+
+#define CLK_DRAM 94
+
+/* All the DRAM gates are exported */
+
+/* Some more module clocks are exported */
+
+#define CLK_MBUS 112
+
+/* And the DSI and GPU module clock is exported */
+
+#define CLK_NUMBER (CLK_GPU + 1)
+
+#endif /* _CCU_SUN50I_A64_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 79596463e0d9..fc75a335a7ce 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -143,7 +143,7 @@ static SUNXI_CCU_NKM_WITH_MUX_GATE_LOCK(pll_mipi_clk, "pll-mipi",
4, 2, /* K */
0, 4, /* M */
21, 0, /* mux */
- BIT(31), /* gate */
+ BIT(31) | BIT(23) | BIT(22), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
@@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = {
static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
0x050, 0, 3, axi_div_table, 0);
+#define SUN6I_A31_AHB1_REG 0x054
+
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi", "pll-periph" };
@@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
val &= BIT(16);
writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
+ /* Force AHB1 to PLL6 / 3 */
+ val = readl(reg + SUN6I_A31_AHB1_REG);
+ /* set PLL6 pre-div = 3 */
+ val &= ~GENMASK(7, 6);
+ val |= 0x2 << 6;
+ /* select PLL6 / pre-div */
+ val &= ~GENMASK(13, 12);
+ val |= 0x3 << 12;
+ writel(val, reg + SUN6I_A31_AHB1_REG);
+
sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 2646d980087b..5c6d37bdf247 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -344,10 +344,10 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
"pll-audio-2x", "pll-audio" };
static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
- 0x0b0, 16, 2, BIT(31), 0);
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
- 0x0b4, 16, 2, BIT(31), 0);
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
/* TODO: the parent for most of the USB clocks is not known */
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
@@ -415,7 +415,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
- 0x140, BIT(31), 0);
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
0x144, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 96b40ca57697..9bd1f78a0547 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -131,7 +131,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
8, 4, /* N */
4, 2, /* K */
0, 4, /* M */
- BIT(31), /* gate */
+ BIT(31) | BIT(23) | BIT(22), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 4d70590f05e3..21c427d86f28 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -394,16 +394,16 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
"pll-audio-2x", "pll-audio" };
static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
- 0x0b0, 16, 2, BIT(31), 0);
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
- 0x0b4, 16, 2, BIT(31), 0);
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents,
- 0x0b8, 16, 2, BIT(31), 0);
+ 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
- 0x0c0, 0, 4, BIT(31), 0);
+ 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
0x0cc, BIT(8), 0);
@@ -466,7 +466,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
0x13c, 16, 3, BIT(31), 0);
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
- 0x140, BIT(31), 0);
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
0x144, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 34c338832c0d..06540f7cf41c 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -20,7 +20,7 @@
#include "ccu_mux.h"
/**
- * struct _ccu_div - Internal divider description
+ * struct ccu_div_internal - Internal divider description
* @shift: Bit offset of the divider in its register
* @width: Width of the divider field in its register
* @max: Maximum value allowed for that divider. This is the
@@ -36,7 +36,7 @@
* It is basically a wrapper around the clk_divider functions
* arguments.
*/
-struct _ccu_div {
+struct ccu_div_internal {
u8 shift;
u8 width;
@@ -78,7 +78,7 @@ struct _ccu_div {
struct ccu_div {
u32 enable;
- struct _ccu_div div;
+ struct ccu_div_internal div;
struct ccu_mux_internal mux;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index 5c4b10cd15b5..8b5eb7756bf7 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -14,7 +14,7 @@
#include "ccu_frac.h"
bool ccu_frac_helper_is_enabled(struct ccu_common *common,
- struct _ccu_frac *cf)
+ struct ccu_frac_internal *cf)
{
if (!(common->features & CCU_FEATURE_FRACTIONAL))
return false;
@@ -23,7 +23,7 @@ bool ccu_frac_helper_is_enabled(struct ccu_common *common,
}
void ccu_frac_helper_enable(struct ccu_common *common,
- struct _ccu_frac *cf)
+ struct ccu_frac_internal *cf)
{
unsigned long flags;
u32 reg;
@@ -38,7 +38,7 @@ void ccu_frac_helper_enable(struct ccu_common *common,
}
void ccu_frac_helper_disable(struct ccu_common *common,
- struct _ccu_frac *cf)
+ struct ccu_frac_internal *cf)
{
unsigned long flags;
u32 reg;
@@ -53,7 +53,7 @@ void ccu_frac_helper_disable(struct ccu_common *common,
}
bool ccu_frac_helper_has_rate(struct ccu_common *common,
- struct _ccu_frac *cf,
+ struct ccu_frac_internal *cf,
unsigned long rate)
{
if (!(common->features & CCU_FEATURE_FRACTIONAL))
@@ -63,7 +63,7 @@ bool ccu_frac_helper_has_rate(struct ccu_common *common,
}
unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
- struct _ccu_frac *cf)
+ struct ccu_frac_internal *cf)
{
u32 reg;
@@ -84,7 +84,7 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
}
int ccu_frac_helper_set_rate(struct ccu_common *common,
- struct _ccu_frac *cf,
+ struct ccu_frac_internal *cf,
unsigned long rate)
{
unsigned long flags;
diff --git a/drivers/clk/sunxi-ng/ccu_frac.h b/drivers/clk/sunxi-ng/ccu_frac.h
index e4c670b1cdfe..7b1ee380156f 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.h
+++ b/drivers/clk/sunxi-ng/ccu_frac.h
@@ -18,7 +18,7 @@
#include "ccu_common.h"
-struct _ccu_frac {
+struct ccu_frac_internal {
u32 enable;
u32 select;
@@ -33,21 +33,21 @@ struct _ccu_frac {
}
bool ccu_frac_helper_is_enabled(struct ccu_common *common,
- struct _ccu_frac *cf);
+ struct ccu_frac_internal *cf);
void ccu_frac_helper_enable(struct ccu_common *common,
- struct _ccu_frac *cf);
+ struct ccu_frac_internal *cf);
void ccu_frac_helper_disable(struct ccu_common *common,
- struct _ccu_frac *cf);
+ struct ccu_frac_internal *cf);
bool ccu_frac_helper_has_rate(struct ccu_common *common,
- struct _ccu_frac *cf,
+ struct ccu_frac_internal *cf,
unsigned long rate);
unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
- struct _ccu_frac *cf);
+ struct ccu_frac_internal *cf);
int ccu_frac_helper_set_rate(struct ccu_common *common,
- struct _ccu_frac *cf,
+ struct ccu_frac_internal *cf,
unsigned long rate);
#endif /* _CCU_FRAC_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index edf9215ea8cc..915625e97d98 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -29,8 +29,8 @@
struct ccu_mp {
u32 enable;
- struct _ccu_div m;
- struct _ccu_div p;
+ struct ccu_div_internal m;
+ struct ccu_div_internal p;
struct ccu_mux_internal mux;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 010e9424691d..678b6cb49f01 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -13,10 +13,23 @@
#include "ccu_gate.h"
#include "ccu_mult.h"
+struct _ccu_mult {
+ unsigned long mult, min, max;
+};
+
static void ccu_mult_find_best(unsigned long parent, unsigned long rate,
- unsigned int max_n, unsigned int *n)
+ struct _ccu_mult *mult)
{
- *n = rate / parent;
+ int _mult;
+
+ _mult = rate / parent;
+ if (_mult < mult->min)
+ _mult = mult->min;
+
+ if (_mult > mult->max)
+ _mult = mult->max;
+
+ mult->mult = _mult;
}
static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
@@ -25,11 +38,13 @@ static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
void *data)
{
struct ccu_mult *cm = data;
- unsigned int n;
+ struct _ccu_mult _cm;
- ccu_mult_find_best(parent_rate, rate, 1 << cm->mult.width, &n);
+ _cm.min = 1;
+ _cm.max = 1 << cm->mult.width;
+ ccu_mult_find_best(parent_rate, rate, &_cm);
- return parent_rate * n;
+ return parent_rate * _cm.mult;
}
static void ccu_mult_disable(struct clk_hw *hw)
@@ -83,21 +98,23 @@ static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
+ struct _ccu_mult _cm;
unsigned long flags;
- unsigned int n;
u32 reg;
ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1,
&parent_rate);
- ccu_mult_find_best(parent_rate, rate, 1 << cm->mult.width, &n);
+ _cm.min = cm->mult.min;
+ _cm.max = 1 << cm->mult.width;
+ ccu_mult_find_best(parent_rate, rate, &_cm);
spin_lock_irqsave(cm->common.lock, flags);
reg = readl(cm->common.base + cm->common.reg);
reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift);
- writel(reg | ((n - 1) << cm->mult.shift),
+ writel(reg | ((_cm.mult - 1) << cm->mult.shift),
cm->common.base + cm->common.reg);
spin_unlock_irqrestore(cm->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_mult.h b/drivers/clk/sunxi-ng/ccu_mult.h
index 5d2c8dc14073..c1a2134bdc71 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.h
+++ b/drivers/clk/sunxi-ng/ccu_mult.h
@@ -4,21 +4,26 @@
#include "ccu_common.h"
#include "ccu_mux.h"
-struct _ccu_mult {
+struct ccu_mult_internal {
u8 shift;
u8 width;
+ u8 min;
};
-#define _SUNXI_CCU_MULT(_shift, _width) \
- { \
- .shift = _shift, \
- .width = _width, \
+#define _SUNXI_CCU_MULT_MIN(_shift, _width, _min) \
+ { \
+ .shift = _shift, \
+ .width = _width, \
+ .min = _min, \
}
+#define _SUNXI_CCU_MULT(_shift, _width) \
+ _SUNXI_CCU_MULT_MIN(_shift, _width, 1)
+
struct ccu_mult {
u32 enable;
- struct _ccu_mult mult;
+ struct ccu_mult_internal mult;
struct ccu_mux_internal mux;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index d6fafb397489..eaf0fdf78d2b 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -9,21 +9,24 @@
*/
#include <linux/clk-provider.h>
-#include <linux/rational.h>
#include "ccu_gate.h"
#include "ccu_nk.h"
+struct _ccu_nk {
+ unsigned long n, min_n, max_n;
+ unsigned long k, min_k, max_k;
+};
+
static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
- unsigned int max_n, unsigned int max_k,
- unsigned int *n, unsigned int *k)
+ struct _ccu_nk *nk)
{
unsigned long best_rate = 0;
unsigned int best_k = 0, best_n = 0;
unsigned int _k, _n;
- for (_k = 1; _k <= max_k; _k++) {
- for (_n = 1; _n <= max_n; _n++) {
+ for (_k = nk->min_k; _k <= nk->max_k; _k++) {
+ for (_n = nk->min_n; _n <= nk->max_n; _n++) {
unsigned long tmp_rate = parent * _n * _k;
if (tmp_rate > rate)
@@ -37,8 +40,8 @@ static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
}
}
- *k = best_k;
- *n = best_n;
+ nk->k = best_k;
+ nk->n = best_n;
}
static void ccu_nk_disable(struct clk_hw *hw)
@@ -89,16 +92,19 @@ static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct ccu_nk *nk = hw_to_ccu_nk(hw);
- unsigned int n, k;
+ struct _ccu_nk _nk;
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate *= nk->fixed_post_div;
- ccu_nk_find_best(*parent_rate, rate,
- 1 << nk->n.width, 1 << nk->k.width,
- &n, &k);
+ _nk.min_n = nk->n.min;
+ _nk.max_n = 1 << nk->n.width;
+ _nk.min_k = nk->k.min;
+ _nk.max_k = 1 << nk->k.width;
+
+ ccu_nk_find_best(*parent_rate, rate, &_nk);
+ rate = *parent_rate * _nk.n * _nk.k;
- rate = *parent_rate * n * k;
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate = rate / nk->fixed_post_div;
@@ -110,15 +116,18 @@ static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct ccu_nk *nk = hw_to_ccu_nk(hw);
unsigned long flags;
- unsigned int n, k;
+ struct _ccu_nk _nk;
u32 reg;
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate = rate * nk->fixed_post_div;
- ccu_nk_find_best(parent_rate, rate,
- 1 << nk->n.width, 1 << nk->k.width,
- &n, &k);
+ _nk.min_n = nk->n.min;
+ _nk.max_n = 1 << nk->n.width;
+ _nk.min_k = nk->k.min;
+ _nk.max_k = 1 << nk->k.width;
+
+ ccu_nk_find_best(parent_rate, rate, &_nk);
spin_lock_irqsave(nk->common.lock, flags);
@@ -126,7 +135,7 @@ static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate,
reg &= ~GENMASK(nk->n.width + nk->n.shift - 1, nk->n.shift);
reg &= ~GENMASK(nk->k.width + nk->k.shift - 1, nk->k.shift);
- writel(reg | ((k - 1) << nk->k.shift) | ((n - 1) << nk->n.shift),
+ writel(reg | ((_nk.k - 1) << nk->k.shift) | ((_nk.n - 1) << nk->n.shift),
nk->common.base + nk->common.reg);
spin_unlock_irqrestore(nk->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_nk.h b/drivers/clk/sunxi-ng/ccu_nk.h
index 4b52da0c29fe..437836b80696 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.h
+++ b/drivers/clk/sunxi-ng/ccu_nk.h
@@ -30,8 +30,8 @@ struct ccu_nk {
u32 enable;
u32 lock;
- struct _ccu_mult n;
- struct _ccu_mult k;
+ struct ccu_mult_internal n;
+ struct ccu_mult_internal k;
unsigned int fixed_post_div;
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 059fdc3b4f96..9b840a47a94d 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -9,15 +9,14 @@
*/
#include <linux/clk-provider.h>
-#include <linux/rational.h>
#include "ccu_gate.h"
#include "ccu_nkm.h"
struct _ccu_nkm {
- unsigned long n, max_n;
- unsigned long k, max_k;
- unsigned long m, max_m;
+ unsigned long n, min_n, max_n;
+ unsigned long k, min_k, max_k;
+ unsigned long m, min_m, max_m;
};
static void ccu_nkm_find_best(unsigned long parent, unsigned long rate,
@@ -27,22 +26,22 @@ static void ccu_nkm_find_best(unsigned long parent, unsigned long rate,
unsigned long best_n = 0, best_k = 0, best_m = 0;
unsigned long _n, _k, _m;
- for (_k = 1; _k <= nkm->max_k; _k++) {
- unsigned long tmp_rate;
-
- rational_best_approximation(rate / _k, parent,
- nkm->max_n, nkm->max_m, &_n, &_m);
-
- tmp_rate = parent * _n * _k / _m;
-
- if (tmp_rate > rate)
- continue;
-
- if ((rate - tmp_rate) < (rate - best_rate)) {
- best_rate = tmp_rate;
- best_n = _n;
- best_k = _k;
- best_m = _m;
+ for (_k = nkm->min_k; _k <= nkm->max_k; _k++) {
+ for (_n = nkm->min_n; _n <= nkm->max_n; _n++) {
+ for (_m = nkm->min_m; _m <= nkm->max_m; _m++) {
+ unsigned long tmp_rate;
+
+ tmp_rate = parent * _n * _k / _m;
+
+ if (tmp_rate > rate)
+ continue;
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_n = _n;
+ best_k = _k;
+ best_m = _m;
+ }
+ }
}
}
@@ -101,8 +100,11 @@ static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux,
struct ccu_nkm *nkm = data;
struct _ccu_nkm _nkm;
+ _nkm.min_n = nkm->n.min;
_nkm.max_n = 1 << nkm->n.width;
+ _nkm.min_k = nkm->k.min;
_nkm.max_k = 1 << nkm->k.width;
+ _nkm.min_m = 1;
_nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
ccu_nkm_find_best(parent_rate, rate, &_nkm);
@@ -127,8 +129,11 @@ static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags;
u32 reg;
+ _nkm.min_n = nkm->n.min;
_nkm.max_n = 1 << nkm->n.width;
+ _nkm.min_k = nkm->k.min;
_nkm.max_k = 1 << nkm->k.width;
+ _nkm.min_m = 1;
_nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
ccu_nkm_find_best(parent_rate, rate, &_nkm);
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.h b/drivers/clk/sunxi-ng/ccu_nkm.h
index 35493fddd8ab..34580894f4d1 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.h
+++ b/drivers/clk/sunxi-ng/ccu_nkm.h
@@ -29,9 +29,9 @@ struct ccu_nkm {
u32 enable;
u32 lock;
- struct _ccu_mult n;
- struct _ccu_mult k;
- struct _ccu_div m;
+ struct ccu_mult_internal n;
+ struct ccu_mult_internal k;
+ struct ccu_div_internal m;
struct ccu_mux_internal mux;
struct ccu_common common;
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 9769dee99511..684c42da3ebb 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -9,16 +9,15 @@
*/
#include <linux/clk-provider.h>
-#include <linux/rational.h>
#include "ccu_gate.h"
#include "ccu_nkmp.h"
struct _ccu_nkmp {
- unsigned long n, max_n;
- unsigned long k, max_k;
- unsigned long m, max_m;
- unsigned long p, max_p;
+ unsigned long n, min_n, max_n;
+ unsigned long k, min_k, max_k;
+ unsigned long m, min_m, max_m;
+ unsigned long p, min_p, max_p;
};
static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
@@ -28,25 +27,25 @@ static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
unsigned long _n, _k, _m, _p;
- for (_k = 1; _k <= nkmp->max_k; _k++) {
- for (_p = 1; _p <= nkmp->max_p; _p <<= 1) {
- unsigned long tmp_rate;
-
- rational_best_approximation(rate / _k, parent / _p,
- nkmp->max_n, nkmp->max_m,
- &_n, &_m);
-
- tmp_rate = parent * _n * _k / (_m * _p);
-
- if (tmp_rate > rate)
- continue;
-
- if ((rate - tmp_rate) < (rate - best_rate)) {
- best_rate = tmp_rate;
- best_n = _n;
- best_k = _k;
- best_m = _m;
- best_p = _p;
+ for (_k = nkmp->min_k; _k <= nkmp->max_k; _k++) {
+ for (_n = nkmp->min_n; _n <= nkmp->max_n; _n++) {
+ for (_m = nkmp->min_m; _m <= nkmp->max_m; _m++) {
+ for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) {
+ unsigned long tmp_rate;
+
+ tmp_rate = parent * _n * _k / (_m * _p);
+
+ if (tmp_rate > rate)
+ continue;
+
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_n = _n;
+ best_k = _k;
+ best_m = _m;
+ best_p = _p;
+ }
+ }
}
}
}
@@ -108,9 +107,13 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
struct _ccu_nkmp _nkmp;
+ _nkmp.min_n = nkmp->n.min;
_nkmp.max_n = 1 << nkmp->n.width;
+ _nkmp.min_k = nkmp->k.min;
_nkmp.max_k = 1 << nkmp->k.width;
+ _nkmp.min_m = 1;
_nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
+ _nkmp.min_p = 1;
_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
@@ -126,9 +129,13 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags;
u32 reg;
+ _nkmp.min_n = 1;
_nkmp.max_n = 1 << nkmp->n.width;
+ _nkmp.min_k = 1;
_nkmp.max_k = 1 << nkmp->k.width;
+ _nkmp.min_m = 1;
_nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
+ _nkmp.min_p = 1;
_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.h b/drivers/clk/sunxi-ng/ccu_nkmp.h
index 5adb0c92a614..a82facbc6144 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.h
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.h
@@ -29,10 +29,10 @@ struct ccu_nkmp {
u32 enable;
u32 lock;
- struct _ccu_mult n;
- struct _ccu_mult k;
- struct _ccu_div m;
- struct _ccu_div p;
+ struct ccu_mult_internal n;
+ struct ccu_mult_internal k;
+ struct ccu_div_internal m;
+ struct ccu_div_internal p;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index b61bdd8c7a7f..c9f3b6c982f0 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -9,12 +9,42 @@
*/
#include <linux/clk-provider.h>
-#include <linux/rational.h>
#include "ccu_frac.h"
#include "ccu_gate.h"
#include "ccu_nm.h"
+struct _ccu_nm {
+ unsigned long n, min_n, max_n;
+ unsigned long m, min_m, max_m;
+};
+
+static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
+ struct _ccu_nm *nm)
+{
+ unsigned long best_rate = 0;
+ unsigned long best_n = 0, best_m = 0;
+ unsigned long _n, _m;
+
+ for (_n = nm->min_n; _n <= nm->max_n; _n++) {
+ for (_m = nm->min_m; _m <= nm->max_m; _m++) {
+ unsigned long tmp_rate = parent * _n / _m;
+
+ if (tmp_rate > rate)
+ continue;
+
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_n = _n;
+ best_m = _m;
+ }
+ }
+ }
+
+ nm->n = best_n;
+ nm->m = best_m;
+}
+
static void ccu_nm_disable(struct clk_hw *hw)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
@@ -61,24 +91,24 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
- unsigned long max_n, max_m;
- unsigned long n, m;
+ struct _ccu_nm _nm;
- max_n = 1 << nm->n.width;
- max_m = nm->m.max ?: 1 << nm->m.width;
+ _nm.min_n = nm->n.min;
+ _nm.max_n = 1 << nm->n.width;
+ _nm.min_m = 1;
+ _nm.max_m = nm->m.max ?: 1 << nm->m.width;
- rational_best_approximation(rate, *parent_rate, max_n, max_m, &n, &m);
+ ccu_nm_find_best(*parent_rate, rate, &_nm);
- return *parent_rate * n / m;
+ return *parent_rate * _nm.n / _nm.m;
}
static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
+ struct _ccu_nm _nm;
unsigned long flags;
- unsigned long max_n, max_m;
- unsigned long n, m;
u32 reg;
if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
@@ -86,10 +116,12 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
else
ccu_frac_helper_disable(&nm->common, &nm->frac);
- max_n = 1 << nm->n.width;
- max_m = nm->m.max ?: 1 << nm->m.width;
+ _nm.min_n = 1;
+ _nm.max_n = 1 << nm->n.width;
+ _nm.min_m = 1;
+ _nm.max_m = nm->m.max ?: 1 << nm->m.width;
- rational_best_approximation(rate, parent_rate, max_n, max_m, &n, &m);
+ ccu_nm_find_best(parent_rate, rate, &_nm);
spin_lock_irqsave(nm->common.lock, flags);
@@ -97,7 +129,7 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
- writel(reg | ((m - 1) << nm->m.shift) | ((n - 1) << nm->n.shift),
+ writel(reg | ((_nm.m - 1) << nm->m.shift) | ((_nm.n - 1) << nm->n.shift),
nm->common.base + nm->common.reg);
spin_unlock_irqrestore(nm->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h
index 0b7bcd33a2df..e87fd186da78 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.h
+++ b/drivers/clk/sunxi-ng/ccu_nm.h
@@ -30,9 +30,9 @@ struct ccu_nm {
u32 enable;
u32 lock;
- struct _ccu_mult n;
- struct _ccu_div m;
- struct _ccu_frac frac;
+ struct ccu_mult_internal n;
+ struct ccu_div_internal m;
+ struct ccu_frac_internal frac;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index e54266cc1c51..4417ae129ac7 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -24,7 +24,7 @@
#include "clk-factors.h"
/**
- * sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
+ * sun4i_a10_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
* MOD0 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
*/
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 838b22aa8b67..f2c9274b8bd5 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
else
calcp = 3;
- calcm = (req->parent_rate >> calcp) - 1;
+ calcm = (div >> calcp) - 1;
req->rate = (req->parent_rate >> calcp) / (calcm + 1);
req->m = calcm;
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index c205809ba580..ad1c1cc829cb 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -20,7 +20,7 @@
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <soc/tegra/fuse.h>
@@ -148,7 +148,6 @@ static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
{ .compatible = "nvidia,tegra124-dfll", },
{ },
};
-MODULE_DEVICE_TABLE(of, tegra124_dfll_fcpu_of_match);
static const struct dev_pm_ops tegra124_dfll_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
@@ -164,20 +163,4 @@ static struct platform_driver tegra124_dfll_fcpu_driver = {
.pm = &tegra124_dfll_pm_ops,
},
};
-
-static int __init tegra124_dfll_fcpu_init(void)
-{
- return platform_driver_register(&tegra124_dfll_fcpu_driver);
-}
-module_init(tegra124_dfll_fcpu_init);
-
-static void __exit tegra124_dfll_fcpu_exit(void)
-{
- platform_driver_unregister(&tegra124_dfll_fcpu_driver);
-}
-module_exit(tegra124_dfll_fcpu_exit);
-
-MODULE_DESCRIPTION("Tegra124 DFLL clock source driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Aleksandr Frid <afrid@nvidia.com>");
-MODULE_AUTHOR("Paul Walmsley <pwalmsley@nvidia.com>");
+builtin_platform_driver(tegra124_dfll_fcpu_driver);
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
index 624115e82ff9..da9e8e7b5ce5 100644
--- a/drivers/clk/tegra/cvb.c
+++ b/drivers/clk/tegra/cvb.c
@@ -92,19 +92,19 @@ static int build_opp_table(struct device *dev, const struct cvb_table *table,
/**
* tegra_cvb_add_opp_table - build OPP table from Tegra CVB tables
- * @cvb_tables: array of CVB tables
- * @sz: size of the previously mentioned array
+ * @dev: the struct device * for which the OPP table is built
+ * @tables: array of CVB tables
+ * @count: size of the previously mentioned array
* @process_id: process id of the HW module
* @speedo_id: speedo id of the HW module
* @speedo_value: speedo value of the HW module
- * @max_rate: highest safe clock rate
- * @opp_dev: the struct device * for which the OPP table is built
+ * @max_freq: highest safe clock rate
*
* On Tegra, a CVB table encodes the relationship between operating voltage
* and safe maximal frequency for a given module (e.g. GPU or CPU). This
* function calculates the optimal voltage-frequency operating points
* for the given arguments and exports them via the OPP library for the
- * given @opp_dev. Returns a pointer to the struct cvb_table that matched
+ * given @dev. Returns a pointer to the struct cvb_table that matched
* or an ERR_PTR on failure.
*/
const struct cvb_table *
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 8831e1a05367..11d8aa3ec186 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -22,13 +22,6 @@
#include "clock.h"
-/*
- * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
- * that are sourced by DPLL5, and both of these require this clock
- * to be at 120 MHz for proper operation.
- */
-#define DPLL5_FREQ_FOR_USBHOST 120000000
-
#define OMAP3430ES2_ST_DSS_IDLE_SHIFT 1
#define OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT 5
#define OMAP3430ES2_ST_SSI_IDLE_SHIFT 8
@@ -546,14 +539,21 @@ void __init omap3_clk_lock_dpll5(void)
struct clk *dpll5_clk;
struct clk *dpll5_m2_clk;
+ /*
+ * Errata sprz319f advisory 2.1 documents a USB host clock drift issue
+ * that can be worked around using specially crafted dpll5 settings
+ * with a dpll5_m2 divider set to 8. Set the dpll5 rate to 8x the USB
+ * host clock rate, its .set_rate handler() will detect that frequency
+ * and use the errata settings.
+ */
dpll5_clk = clk_get(NULL, "dpll5_ck");
- clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
+ clk_set_rate(dpll5_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST * 8);
clk_prepare_enable(dpll5_clk);
- /* Program dpll5_m2_clk divider for no division */
+ /* Program dpll5_m2_clk divider */
dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
clk_prepare_enable(dpll5_m2_clk);
- clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
+ clk_set_rate(dpll5_m2_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST);
clk_disable_unprepare(dpll5_m2_clk);
clk_disable_unprepare(dpll5_clk);
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index bfa17d33ef3b..9fd6043314eb 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -201,7 +201,6 @@ static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK(NULL, "atl_dpll_clk_mux", "atl_dpll_clk_mux"),
DT_CLK(NULL, "atl_gfclk_mux", "atl_gfclk_mux"),
DT_CLK(NULL, "dcan1_sys_clk_mux", "dcan1_sys_clk_mux"),
- DT_CLK(NULL, "gmac_gmii_ref_clk_div", "gmac_gmii_ref_clk_div"),
DT_CLK(NULL, "gmac_rft_clk_mux", "gmac_rft_clk_mux"),
DT_CLK(NULL, "gpu_core_gclk_mux", "gpu_core_gclk_mux"),
DT_CLK(NULL, "gpu_hyd_gclk_mux", "gpu_hyd_gclk_mux"),
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index c77333230bdf..45d05339d583 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
@@ -295,31 +295,17 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
return ret;
}
-static int of_dra7_atl_clk_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
static const struct of_device_id of_dra7_atl_clk_match_tbl[] = {
{ .compatible = "ti,dra7-atl", },
{},
};
-MODULE_DEVICE_TABLE(of, of_dra7_atl_clk_match_tbl);
static struct platform_driver dra7_atl_clk_driver = {
.driver = {
.name = "dra7-atl",
+ .suppress_bind_attrs = true,
.of_match_table = of_dra7_atl_clk_match_tbl,
},
.probe = of_dra7_atl_clk_probe,
- .remove = of_dra7_atl_clk_remove,
};
-
-module_platform_driver(dra7_atl_clk_driver);
-
-MODULE_DESCRIPTION("Clock driver for DRA7 Audio Tracking Logic");
-MODULE_ALIAS("platform:dra7-atl-clock");
-MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(dra7_atl_clk_driver);
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 90f3f472ae1c..13c37f48d9d6 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -257,11 +257,20 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long parent_rate);
+/*
+ * OMAP3_DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
+ * that are sourced by DPLL5, and both of these require this clock
+ * to be at 120 MHz for proper operation.
+ */
+#define OMAP3_DPLL5_FREQ_FOR_USBHOST 120000000
+
unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
unsigned long parent_rate);
int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate, u8 index);
+int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
void omap3_clk_lock_dpll5(void);
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 9fc8754a6e61..4b9a419d8e14 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -114,6 +114,18 @@ static const struct clk_ops omap3_dpll_ck_ops = {
.round_rate = &omap2_dpll_round_rate,
};
+static const struct clk_ops omap3_dpll5_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_dpll5_set_rate,
+ .set_parent = &omap3_noncore_dpll_set_parent,
+ .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
+ .determine_rate = &omap3_noncore_dpll_determine_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+
static const struct clk_ops omap3_dpll_per_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
@@ -474,7 +486,12 @@ static void __init of_ti_omap3_dpll_setup(struct device_node *node)
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
- of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
+ if ((of_machine_is_compatible("ti,omap3630") ||
+ of_machine_is_compatible("ti,omap36xx")) &&
+ !strcmp(node->name, "dpll5_ck"))
+ of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
+ else
+ of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
}
CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
of_ti_omap3_dpll_setup);
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 88f2ce81ba55..4cdd28a25584 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -838,3 +838,70 @@ int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
index);
}
+
+/* Apply DM3730 errata sprz319 advisory 2.1. */
+static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct omap3_dpll5_settings {
+ unsigned int rate, m, n;
+ };
+
+ static const struct omap3_dpll5_settings precomputed[] = {
+ /*
+ * From DM3730 errata advisory 2.1, table 35 and 36.
+ * The N value is increased by 1 compared to the tables as the
+ * errata lists register values while last_rounded_field is the
+ * real divider value.
+ */
+ { 12000000, 80, 0 + 1 },
+ { 13000000, 443, 5 + 1 },
+ { 19200000, 50, 0 + 1 },
+ { 26000000, 443, 11 + 1 },
+ { 38400000, 25, 0 + 1 }
+ };
+
+ const struct omap3_dpll5_settings *d;
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
+ if (parent_rate == precomputed[i].rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(precomputed))
+ return false;
+
+ d = &precomputed[i];
+
+ /* Update the M, N and rounded rate values and program the DPLL. */
+ dd = clk->dpll_data;
+ dd->last_rounded_m = d->m;
+ dd->last_rounded_n = d->n;
+ dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
+ omap3_noncore_dpll_program(clk, 0);
+
+ return true;
+}
+
+/**
+ * omap3_dpll5_set_rate - set rate for omap3 dpll5
+ * @hw: clock to change
+ * @rate: target rate for clock
+ * @parent_rate: rate of the parent clock
+ *
+ * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if
+ * the DPLL is used for USB host (detected through the requested rate).
+ */
+int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
+ if (omap3_dpll5_apply_errata(hw, parent_rate))
+ return 0;
+ }
+
+ return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
+}
diff --git a/drivers/clk/uniphier/Makefile b/drivers/clk/uniphier/Makefile
index f27b360329ca..665d1d65a90e 100644
--- a/drivers/clk/uniphier/Makefile
+++ b/drivers/clk/uniphier/Makefile
@@ -1,8 +1,11 @@
obj-y += clk-uniphier-core.o
+
+obj-y += clk-uniphier-cpugear.o
obj-y += clk-uniphier-fixed-factor.o
obj-y += clk-uniphier-fixed-rate.o
obj-y += clk-uniphier-gate.o
obj-y += clk-uniphier-mux.o
+
obj-y += clk-uniphier-sys.o
obj-y += clk-uniphier-mio.o
obj-y += clk-uniphier-peri.o
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 26c53f7963a4..0007218ce6a0 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -27,6 +27,9 @@ static struct clk_hw *uniphier_clk_register(struct device *dev,
const struct uniphier_clk_data *data)
{
switch (data->type) {
+ case UNIPHIER_CLK_TYPE_CPUGEAR:
+ return uniphier_clk_register_cpugear(dev, regmap, data->name,
+ &data->data.cpugear);
case UNIPHIER_CLK_TYPE_FIXED_FACTOR:
return uniphier_clk_register_fixed_factor(dev, data->name,
&data->data.factor);
diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
new file mode 100644
index 000000000000..9bff26e0cbb0
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "clk-uniphier.h"
+
+#define UNIPHIER_CLK_CPUGEAR_STAT 0 /* status */
+#define UNIPHIER_CLK_CPUGEAR_SET 4 /* set */
+#define UNIPHIER_CLK_CPUGEAR_UPD 8 /* update */
+#define UNIPHIER_CLK_CPUGEAR_UPD_BIT BIT(0)
+
+struct uniphier_clk_cpugear {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int regbase;
+ unsigned int mask;
+};
+
+#define to_uniphier_clk_cpugear(_hw) \
+ container_of(_hw, struct uniphier_clk_cpugear, hw)
+
+static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct uniphier_clk_cpugear *gear = to_uniphier_clk_cpugear(hw);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_write_bits(gear->regmap,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
+ gear->mask, index);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(gear->regmap,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
+ UNIPHIER_CLK_CPUGEAR_UPD_BIT,
+ UNIPHIER_CLK_CPUGEAR_UPD_BIT);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(gear->regmap,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
+ val, !(val & UNIPHIER_CLK_CPUGEAR_UPD_BIT),
+ 0, 1);
+}
+
+static u8 uniphier_clk_cpugear_get_parent(struct clk_hw *hw)
+{
+ struct uniphier_clk_cpugear *gear = to_uniphier_clk_cpugear(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(gear->regmap,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_STAT, &val);
+ if (ret)
+ return ret;
+
+ val &= gear->mask;
+
+ return val < num_parents ? val : -EINVAL;
+}
+
+static const struct clk_ops uniphier_clk_cpugear_ops = {
+ .determine_rate = __clk_mux_determine_rate,
+ .set_parent = uniphier_clk_cpugear_set_parent,
+ .get_parent = uniphier_clk_cpugear_get_parent,
+};
+
+struct clk_hw *uniphier_clk_register_cpugear(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ const struct uniphier_clk_cpugear_data *data)
+{
+ struct uniphier_clk_cpugear *gear;
+ struct clk_init_data init;
+ int ret;
+
+ gear = devm_kzalloc(dev, sizeof(*gear), GFP_KERNEL);
+ if (!gear)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &uniphier_clk_cpugear_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = data->parent_names;
+ init.num_parents = data->num_parents,
+
+ gear->regmap = regmap;
+ gear->regbase = data->regbase;
+ gear->mask = data->mask;
+ gear->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &gear->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &gear->hw;
+}
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 5d029991047d..d049316c1c0f 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -125,16 +125,35 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
};
const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("cpll", -1, "ref", 392, 5), /* 1960 MHz */
+ UNIPHIER_CLK_FACTOR("mpll", -1, "ref", 64, 1), /* 1600 MHz */
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 80, 1), /* 2000 MHz */
+ UNIPHIER_CLK_FACTOR("vspll", -1, "ref", 80, 1), /* 2000 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC, MIO */
UNIPHIER_CLK_FACTOR("usb2", -1, "ref", 24, 25),
+ /* CPU gears */
+ UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV4("mpll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV3("spll", 3, 4, 8),
+ /* Note: both gear1 and gear4 are spll/4. This is not a bug. */
+ UNIPHIER_CLK_CPUGEAR("cpu-ca53", 33, 0x8080, 0xf, 8,
+ "cpll/2", "spll/4", "cpll/3", "spll/3",
+ "spll/4", "spll/8", "cpll/4", "cpll/8"),
+ UNIPHIER_CLK_CPUGEAR("cpu-ipp", 34, 0x8100, 0xf, 8,
+ "mpll/2", "spll/4", "mpll/3", "spll/3",
+ "spll/4", "spll/8", "mpll/4", "mpll/8"),
{ /* sentinel */ }
};
const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("cpll", -1, "ref", 88, 1), /* ARM: 2200 MHz */
+ UNIPHIER_CLK_FACTOR("gppll", -1, "ref", 52, 1), /* Mali: 1300 MHz */
+ UNIPHIER_CLK_FACTOR("mpll", -1, "ref", 64, 1), /* Codec: 1600 MHz */
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 80, 1), /* 2000 MHz */
+ UNIPHIER_CLK_FACTOR("s2pll", -1, "ref", 88, 1), /* IPP: 2200 MHz */
+ UNIPHIER_CLK_FACTOR("vppll", -1, "ref", 504, 5), /* 2520 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
UNIPHIER_LD20_SYS_CLK_SD,
@@ -147,5 +166,18 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14),
UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12),
UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13),
+ /* CPU gears */
+ UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV4("s2pll", 2, 3, 4, 8),
+ UNIPHIER_CLK_CPUGEAR("cpu-ca72", 32, 0x8000, 0xf, 8,
+ "cpll/2", "spll/2", "cpll/3", "spll/3",
+ "spll/4", "spll/8", "cpll/4", "cpll/8"),
+ UNIPHIER_CLK_CPUGEAR("cpu-ca53", 33, 0x8080, 0xf, 8,
+ "cpll/2", "spll/2", "cpll/3", "spll/3",
+ "spll/4", "spll/8", "cpll/4", "cpll/8"),
+ UNIPHIER_CLK_CPUGEAR("cpu-ipp", 34, 0x8100, 0xf, 8,
+ "s2pll/2", "spll/2", "s2pll/3", "spll/3",
+ "spll/4", "spll/8", "s2pll/4", "s2pll/8"),
{ /* sentinel */ }
};
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 0244dba1f4cf..01c16ecec48f 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -20,15 +20,24 @@ struct clk_hw;
struct device;
struct regmap;
-#define UNIPHIER_CLK_MUX_MAX_PARENTS 8
+#define UNIPHIER_CLK_CPUGEAR_MAX_PARENTS 16
+#define UNIPHIER_CLK_MUX_MAX_PARENTS 8
enum uniphier_clk_type {
+ UNIPHIER_CLK_TYPE_CPUGEAR,
UNIPHIER_CLK_TYPE_FIXED_FACTOR,
UNIPHIER_CLK_TYPE_FIXED_RATE,
UNIPHIER_CLK_TYPE_GATE,
UNIPHIER_CLK_TYPE_MUX,
};
+struct uniphier_clk_cpugear_data {
+ const char *parent_names[UNIPHIER_CLK_CPUGEAR_MAX_PARENTS];
+ unsigned int num_parents;
+ unsigned int regbase;
+ unsigned int mask;
+};
+
struct uniphier_clk_fixed_factor_data {
const char *parent_name;
unsigned int mult;
@@ -58,6 +67,7 @@ struct uniphier_clk_data {
enum uniphier_clk_type type;
int idx;
union {
+ struct uniphier_clk_cpugear_data cpugear;
struct uniphier_clk_fixed_factor_data factor;
struct uniphier_clk_fixed_rate_data rate;
struct uniphier_clk_gate_data gate;
@@ -65,6 +75,20 @@ struct uniphier_clk_data {
} data;
};
+#define UNIPHIER_CLK_CPUGEAR(_name, _idx, _regbase, _mask, \
+ _num_parents, ...) \
+ { \
+ .name = (_name), \
+ .type = UNIPHIER_CLK_TYPE_CPUGEAR, \
+ .idx = (_idx), \
+ .data.cpugear = { \
+ .parent_names = { __VA_ARGS__ }, \
+ .num_parents = (_num_parents), \
+ .regbase = (_regbase), \
+ .mask = (_mask) \
+ }, \
+ }
+
#define UNIPHIER_CLK_FACTOR(_name, _idx, _parent, _mult, _div) \
{ \
.name = (_name), \
@@ -77,7 +101,6 @@ struct uniphier_clk_data {
}, \
}
-
#define UNIPHIER_CLK_GATE(_name, _idx, _parent, _reg, _bit) \
{ \
.name = (_name), \
@@ -90,7 +113,25 @@ struct uniphier_clk_data {
}, \
}
+#define UNIPHIER_CLK_DIV(parent, div) \
+ UNIPHIER_CLK_FACTOR(parent "/" #div, -1, parent, 1, div)
+
+#define UNIPHIER_CLK_DIV2(parent, div0, div1) \
+ UNIPHIER_CLK_DIV(parent, div0), \
+ UNIPHIER_CLK_DIV(parent, div1)
+
+#define UNIPHIER_CLK_DIV3(parent, div0, div1, div2) \
+ UNIPHIER_CLK_DIV2(parent, div0, div1), \
+ UNIPHIER_CLK_DIV(parent, div2)
+
+#define UNIPHIER_CLK_DIV4(parent, div0, div1, div2, div3) \
+ UNIPHIER_CLK_DIV2(parent, div0, div1), \
+ UNIPHIER_CLK_DIV2(parent, div2, div3)
+struct clk_hw *uniphier_clk_register_cpugear(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ const struct uniphier_clk_cpugear_data *data);
struct clk_hw *uniphier_clk_register_fixed_factor(struct device *dev,
const char *name,
const struct uniphier_clk_fixed_factor_data *data);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 73c487da6d2a..02fef6830e72 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -81,6 +81,7 @@ static struct clock_event_device __percpu *arch_timer_evt;
static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
+static bool arch_counter_suspend_stop;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
@@ -576,7 +577,7 @@ static struct clocksource clocksource_counter = {
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct cyclecounter cyclecounter = {
@@ -616,6 +617,8 @@ static void __init arch_counter_register(unsigned type)
arch_timer_read_counter = arch_counter_get_cntvct_mem;
}
+ if (!arch_counter_suspend_stop)
+ clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
start_count = arch_timer_read_counter();
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;
@@ -907,6 +910,10 @@ static int __init arch_timer_of_init(struct device_node *np)
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
arch_timer_uses_ppi = PHYS_SECURE_PPI;
+ /* On some systems, the counter stops ticking when in suspend. */
+ arch_counter_suspend_stop = of_property_read_bool(np,
+ "arm,no-tick-in-suspend");
+
return arch_timer_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
@@ -964,8 +971,9 @@ static int __init arch_timer_mem_init(struct device_node *np)
}
ret= -ENXIO;
- base = arch_counter_base = of_iomap(best_frame, 0);
- if (!base) {
+ base = arch_counter_base = of_io_request_and_map(best_frame, 0,
+ "arch_mem_timer");
+ if (IS_ERR(base)) {
pr_err("arch_timer: Can't map frame's registers\n");
goto out;
}
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index e71acf231c89..f2f29d2be1cf 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -96,7 +96,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = of_property_read_u32(node, "clock-frequency", &freq);
if (ret) {
pr_err("Can't read clock-frequency");
- return ret;
+ goto err_iounmap;
}
system_clock = base + REG_COUNTER_LO;
@@ -108,13 +108,15 @@ static int __init bcm2835_timer_init(struct device_node *node)
irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
if (irq <= 0) {
pr_err("Can't parse IRQ");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer) {
pr_err("Can't allocate timer struct\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_iounmap;
}
timer->control = base + REG_CONTROL;
@@ -133,7 +135,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = setup_irq(irq, &timer->act);
if (ret) {
pr_err("Can't set up timer IRQ\n");
- return ret;
+ goto err_iounmap;
}
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
@@ -141,6 +143,10 @@ static int __init bcm2835_timer_init(struct device_node *node)
pr_info("bcm2835: system timer (irq = %d)\n", irq);
return 0;
+
+err_iounmap:
+ iounmap(base);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
bcm2835_timer_init);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index d89b8afe23b6..920c469f3953 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -12,6 +12,27 @@ config ARM_BIG_LITTLE_CPUFREQ
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+config ARM_BRCMSTB_AVS_CPUFREQ
+ tristate "Broadcom STB AVS CPUfreq driver"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default y
+ help
+ Some Broadcom STB SoCs use a co-processor running proprietary firmware
+ ("AVS") to handle voltage and frequency scaling. This driver provides
+ a standard CPUfreq interface to to the firmware.
+
+ Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
+
+config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+ bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
+ depends on ARM_BRCMSTB_AVS_CPUFREQ
+ help
+ Enabling this option turns on debug support via sysfs under
+ /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
+ write some AVS mailbox registers through sysfs entries.
+
+ If in doubt, say N.
+
config ARM_DT_BL_CPUFREQ
tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
depends on ARM_BIG_LITTLE_CPUFREQ && OF
@@ -60,14 +81,6 @@ config ARM_IMX6Q_CPUFREQ
If in doubt, say N.
-config ARM_INTEGRATOR
- tristate "CPUfreq driver for ARM Integrator CPUs"
- depends on ARCH_INTEGRATOR
- default y
- help
- This enables the CPUfreq driver for ARM Integrator CPUs.
- If in doubt, say Y.
-
config ARM_KIRKWOOD_CPUFREQ
def_bool MACH_KIRKWOOD
help
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index adbd1de1cea5..35f71825b7f3 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -6,6 +6,7 @@ config X86_INTEL_PSTATE
bool "Intel P state control"
depends on X86
select ACPI_PROCESSOR if ACPI
+ select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 0a9b6a093646..1e46c3918e7a 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -51,12 +51,12 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
-obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 297e9128fe9f..3a98702b7445 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -84,7 +84,6 @@ static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufre
static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict;
-static struct msr __percpu *msrs;
static bool boost_state(unsigned int cpu)
{
@@ -104,11 +103,10 @@ static bool boost_state(unsigned int cpu)
return false;
}
-static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
+static int boost_set_msr(bool enable)
{
- u32 cpu;
u32 msr_addr;
- u64 msr_mask;
+ u64 msr_mask, val;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
@@ -120,26 +118,31 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
msr_mask = MSR_K7_HWCR_CPB_DIS;
break;
default:
- return;
+ return -EINVAL;
}
- rdmsr_on_cpus(cpumask, msr_addr, msrs);
+ rdmsrl(msr_addr, val);
- for_each_cpu(cpu, cpumask) {
- struct msr *reg = per_cpu_ptr(msrs, cpu);
- if (enable)
- reg->q &= ~msr_mask;
- else
- reg->q |= msr_mask;
- }
+ if (enable)
+ val &= ~msr_mask;
+ else
+ val |= msr_mask;
+
+ wrmsrl(msr_addr, val);
+ return 0;
+}
+
+static void boost_set_msr_each(void *p_en)
+{
+ bool enable = (bool) p_en;
- wrmsr_on_cpus(cpumask, msr_addr, msrs);
+ boost_set_msr(enable);
}
static int set_boost(int val)
{
get_online_cpus();
- boost_set_msrs(val, cpu_online_mask);
+ on_each_cpu(boost_set_msr_each, (void *)(long)val, 1);
put_online_cpus();
pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
@@ -536,46 +539,24 @@ static void free_acpi_perf_data(void)
free_percpu(acpi_perf_data);
}
-static int boost_notify(struct notifier_block *nb, unsigned long action,
- void *hcpu)
+static int cpufreq_boost_online(unsigned int cpu)
{
- unsigned cpu = (long)hcpu;
- const struct cpumask *cpumask;
-
- cpumask = get_cpu_mask(cpu);
+ /*
+ * On the CPU_UP path we simply keep the boost-disable flag
+ * in sync with the current global state.
+ */
+ return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
+}
+static int cpufreq_boost_down_prep(unsigned int cpu)
+{
/*
* Clear the boost-disable bit on the CPU_DOWN path so that
- * this cpu cannot block the remaining ones from boosting. On
- * the CPU_UP path we simply keep the boost-disable flag in
- * sync with the current global state.
+ * this cpu cannot block the remaining ones from boosting.
*/
-
- switch (action) {
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
- break;
-
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- boost_set_msrs(1, cpumask);
- break;
-
- default:
- break;
- }
-
- return NOTIFY_OK;
+ return boost_set_msr(1);
}
-
-static struct notifier_block boost_nb = {
- .notifier_call = boost_notify,
-};
-
/*
* acpi_cpufreq_early_init - initialize ACPI P-States library
*
@@ -922,37 +903,35 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.attr = acpi_cpufreq_attr,
};
+static enum cpuhp_state acpi_cpufreq_online;
+
static void __init acpi_cpufreq_boost_init(void)
{
- if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
- msrs = msrs_alloc();
-
- if (!msrs)
- return;
-
- acpi_cpufreq_driver.set_boost = set_boost;
- acpi_cpufreq_driver.boost_enabled = boost_state(0);
-
- cpu_notifier_register_begin();
+ int ret;
- /* Force all MSRs to the same value */
- boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
- cpu_online_mask);
+ if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
+ return;
- __register_cpu_notifier(&boost_nb);
+ acpi_cpufreq_driver.set_boost = set_boost;
+ acpi_cpufreq_driver.boost_enabled = boost_state(0);
- cpu_notifier_register_done();
+ /*
+ * This calls the online callback on all online cpu and forces all
+ * MSRs to the same value.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
+ cpufreq_boost_online, cpufreq_boost_down_prep);
+ if (ret < 0) {
+ pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
+ return;
}
+ acpi_cpufreq_online = ret;
}
static void acpi_cpufreq_boost_exit(void)
{
- if (msrs) {
- unregister_cpu_notifier(&boost_nb);
-
- msrs_free(msrs);
- msrs = NULL;
- }
+ if (acpi_cpufreq_online >= 0)
+ cpuhp_remove_state_nocalls(acpi_cpufreq_online);
}
static int __init acpi_cpufreq_init(void)
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
new file mode 100644
index 000000000000..4fda623e55bb
--- /dev/null
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -0,0 +1,1057 @@
+/*
+ * CPU frequency scaling for Broadcom SoCs with AVS firmware that
+ * supports DVS or DVFS
+ *
+ * Copyright (c) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * "AVS" is the name of a firmware developed at Broadcom. It derives
+ * its name from the technique called "Adaptive Voltage Scaling".
+ * Adaptive voltage scaling was the original purpose of this firmware.
+ * The AVS firmware still supports "AVS mode", where all it does is
+ * adaptive voltage scaling. However, on some newer Broadcom SoCs, the
+ * AVS Firmware, despite its unchanged name, also supports DFS mode and
+ * DVFS mode.
+ *
+ * In the context of this document and the related driver, "AVS" by
+ * itself always means the Broadcom firmware and never refers to the
+ * technique called "Adaptive Voltage Scaling".
+ *
+ * The Broadcom STB AVS CPUfreq driver provides voltage and frequency
+ * scaling on Broadcom SoCs using AVS firmware with support for DFS and
+ * DVFS. The AVS firmware is running on its own co-processor. The
+ * driver supports both uniprocessor (UP) and symmetric multiprocessor
+ * (SMP) systems which share clock and voltage across all CPUs.
+ *
+ * Actual voltage and frequency scaling is done solely by the AVS
+ * firmware. This driver does not change frequency or voltage itself.
+ * It provides a standard CPUfreq interface to the rest of the kernel
+ * and to userland. It interfaces with the AVS firmware to effect the
+ * requested changes and to report back the current system status in a
+ * way that is expected by existing tools.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#endif
+
+/* Max number of arguments AVS calls take */
+#define AVS_MAX_CMD_ARGS 4
+/*
+ * This macro is used to generate AVS parameter register offsets. For
+ * x >= AVS_MAX_CMD_ARGS, it returns 0 to protect against accidental memory
+ * access outside of the parameter range. (Offset 0 is the first parameter.)
+ */
+#define AVS_PARAM_MULT(x) ((x) < AVS_MAX_CMD_ARGS ? (x) : 0)
+
+/* AVS Mailbox Register offsets */
+#define AVS_MBOX_COMMAND 0x00
+#define AVS_MBOX_STATUS 0x04
+#define AVS_MBOX_VOLTAGE0 0x08
+#define AVS_MBOX_TEMP0 0x0c
+#define AVS_MBOX_PV0 0x10
+#define AVS_MBOX_MV0 0x14
+#define AVS_MBOX_PARAM(x) (0x18 + AVS_PARAM_MULT(x) * sizeof(u32))
+#define AVS_MBOX_REVISION 0x28
+#define AVS_MBOX_PSTATE 0x2c
+#define AVS_MBOX_HEARTBEAT 0x30
+#define AVS_MBOX_MAGIC 0x34
+#define AVS_MBOX_SIGMA_HVT 0x38
+#define AVS_MBOX_SIGMA_SVT 0x3c
+#define AVS_MBOX_VOLTAGE1 0x40
+#define AVS_MBOX_TEMP1 0x44
+#define AVS_MBOX_PV1 0x48
+#define AVS_MBOX_MV1 0x4c
+#define AVS_MBOX_FREQUENCY 0x50
+
+/* AVS Commands */
+#define AVS_CMD_AVAILABLE 0x00
+#define AVS_CMD_DISABLE 0x10
+#define AVS_CMD_ENABLE 0x11
+#define AVS_CMD_S2_ENTER 0x12
+#define AVS_CMD_S2_EXIT 0x13
+#define AVS_CMD_BBM_ENTER 0x14
+#define AVS_CMD_BBM_EXIT 0x15
+#define AVS_CMD_S3_ENTER 0x16
+#define AVS_CMD_S3_EXIT 0x17
+#define AVS_CMD_BALANCE 0x18
+/* PMAP and P-STATE commands */
+#define AVS_CMD_GET_PMAP 0x30
+#define AVS_CMD_SET_PMAP 0x31
+#define AVS_CMD_GET_PSTATE 0x40
+#define AVS_CMD_SET_PSTATE 0x41
+
+/* Different modes AVS supports (for GET_PMAP/SET_PMAP) */
+#define AVS_MODE_AVS 0x0
+#define AVS_MODE_DFS 0x1
+#define AVS_MODE_DVS 0x2
+#define AVS_MODE_DVFS 0x3
+
+/*
+ * PMAP parameter p1
+ * unused:31-24, mdiv_p0:23-16, unused:15-14, pdiv:13-10 , ndiv_int:9-0
+ */
+#define NDIV_INT_SHIFT 0
+#define NDIV_INT_MASK 0x3ff
+#define PDIV_SHIFT 10
+#define PDIV_MASK 0xf
+#define MDIV_P0_SHIFT 16
+#define MDIV_P0_MASK 0xff
+/*
+ * PMAP parameter p2
+ * mdiv_p4:31-24, mdiv_p3:23-16, mdiv_p2:15:8, mdiv_p1:7:0
+ */
+#define MDIV_P1_SHIFT 0
+#define MDIV_P1_MASK 0xff
+#define MDIV_P2_SHIFT 8
+#define MDIV_P2_MASK 0xff
+#define MDIV_P3_SHIFT 16
+#define MDIV_P3_MASK 0xff
+#define MDIV_P4_SHIFT 24
+#define MDIV_P4_MASK 0xff
+
+/* Different P-STATES AVS supports (for GET_PSTATE/SET_PSTATE) */
+#define AVS_PSTATE_P0 0x0
+#define AVS_PSTATE_P1 0x1
+#define AVS_PSTATE_P2 0x2
+#define AVS_PSTATE_P3 0x3
+#define AVS_PSTATE_P4 0x4
+#define AVS_PSTATE_MAX AVS_PSTATE_P4
+
+/* CPU L2 Interrupt Controller Registers */
+#define AVS_CPU_L2_SET0 0x04
+#define AVS_CPU_L2_INT_MASK BIT(31)
+
+/* AVS Command Status Values */
+#define AVS_STATUS_CLEAR 0x00
+/* Command/notification accepted */
+#define AVS_STATUS_SUCCESS 0xf0
+/* Command/notification rejected */
+#define AVS_STATUS_FAILURE 0xff
+/* Invalid command/notification (unknown) */
+#define AVS_STATUS_INVALID 0xf1
+/* Non-AVS modes are not supported */
+#define AVS_STATUS_NO_SUPP 0xf2
+/* Cannot set P-State until P-Map supplied */
+#define AVS_STATUS_NO_MAP 0xf3
+/* Cannot change P-Map after initial P-Map set */
+#define AVS_STATUS_MAP_SET 0xf4
+/* Max AVS status; higher numbers are used for debugging */
+#define AVS_STATUS_MAX 0xff
+
+/* Other AVS related constants */
+#define AVS_LOOP_LIMIT 10000
+#define AVS_TIMEOUT 300 /* in ms; expected completion is < 10ms */
+#define AVS_FIRMWARE_MAGIC 0xa11600d1
+
+#define BRCM_AVS_CPUFREQ_PREFIX "brcmstb-avs"
+#define BRCM_AVS_CPUFREQ_NAME BRCM_AVS_CPUFREQ_PREFIX "-cpufreq"
+#define BRCM_AVS_CPU_DATA "brcm,avs-cpu-data-mem"
+#define BRCM_AVS_CPU_INTR "brcm,avs-cpu-l2-intr"
+#define BRCM_AVS_HOST_INTR "sw_intr"
+
+struct pmap {
+ unsigned int mode;
+ unsigned int p1;
+ unsigned int p2;
+ unsigned int state;
+};
+
+struct private_data {
+ void __iomem *base;
+ void __iomem *avs_intr_base;
+ struct device *dev;
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+ struct dentry *debugfs;
+#endif
+ struct completion done;
+ struct semaphore sem;
+ struct pmap pmap;
+};
+
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+
+enum debugfs_format {
+ DEBUGFS_NORMAL,
+ DEBUGFS_FLOAT,
+ DEBUGFS_REV,
+};
+
+struct debugfs_data {
+ struct debugfs_entry *entry;
+ struct private_data *priv;
+};
+
+struct debugfs_entry {
+ char *name;
+ u32 offset;
+ fmode_t mode;
+ enum debugfs_format format;
+};
+
+#define DEBUGFS_ENTRY(name, mode, format) { \
+ #name, AVS_MBOX_##name, mode, format \
+}
+
+/*
+ * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
+ */
+#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
+#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
+#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
+#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
+
+/*
+ * This table stores the name, access permissions and offset for each hardware
+ * register and is used to generate debugfs entries.
+ */
+static struct debugfs_entry debugfs_entries[] = {
+ DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
+ DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
+};
+
+static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
+
+static char *__strtolower(char *s)
+{
+ char *p;
+
+ for (p = s; *p; p++)
+ *p = tolower(*p);
+
+ return s;
+}
+
+#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
+
+static void __iomem *__map_region(const char *name)
+{
+ struct device_node *np;
+ void __iomem *ptr;
+
+ np = of_find_compatible_node(NULL, NULL, name);
+ if (!np)
+ return NULL;
+
+ ptr = of_iomap(np, 0);
+ of_node_put(np);
+
+ return ptr;
+}
+
+static int __issue_avs_command(struct private_data *priv, int cmd, bool is_send,
+ u32 args[])
+{
+ unsigned long time_left = msecs_to_jiffies(AVS_TIMEOUT);
+ void __iomem *base = priv->base;
+ unsigned int i;
+ int ret;
+ u32 val;
+
+ ret = down_interruptible(&priv->sem);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure no other command is currently running: cmd is 0 if AVS
+ * co-processor is idle. Due to the guard above, we should almost never
+ * have to wait here.
+ */
+ for (i = 0, val = 1; val != 0 && i < AVS_LOOP_LIMIT; i++)
+ val = readl(base + AVS_MBOX_COMMAND);
+
+ /* Give the caller a chance to retry if AVS is busy. */
+ if (i == AVS_LOOP_LIMIT) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* Clear status before we begin. */
+ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+ /* We need to send arguments for this command. */
+ if (args && is_send) {
+ for (i = 0; i < AVS_MAX_CMD_ARGS; i++)
+ writel(args[i], base + AVS_MBOX_PARAM(i));
+ }
+
+ /* Protect from spurious interrupts. */
+ reinit_completion(&priv->done);
+
+ /* Now issue the command & tell firmware to wake up to process it. */
+ writel(cmd, base + AVS_MBOX_COMMAND);
+ writel(AVS_CPU_L2_INT_MASK, priv->avs_intr_base + AVS_CPU_L2_SET0);
+
+ /* Wait for AVS co-processor to finish processing the command. */
+ time_left = wait_for_completion_timeout(&priv->done, time_left);
+
+ /*
+ * If the AVS status is not in the expected range, it means AVS didn't
+ * complete our command in time, and we return an error. Also, if there
+ * is no "time left", we timed out waiting for the interrupt.
+ */
+ val = readl(base + AVS_MBOX_STATUS);
+ if (time_left == 0 || val == 0 || val > AVS_STATUS_MAX) {
+ dev_err(priv->dev, "AVS command %#x didn't complete in time\n",
+ cmd);
+ dev_err(priv->dev, " Time left: %u ms, AVS status: %#x\n",
+ jiffies_to_msecs(time_left), val);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* This command returned arguments, so we read them back. */
+ if (args && !is_send) {
+ for (i = 0; i < AVS_MAX_CMD_ARGS; i++)
+ args[i] = readl(base + AVS_MBOX_PARAM(i));
+ }
+
+ /* Clear status to tell AVS co-processor we are done. */
+ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+ /* Convert firmware errors to errno's as much as possible. */
+ switch (val) {
+ case AVS_STATUS_INVALID:
+ ret = -EINVAL;
+ break;
+ case AVS_STATUS_NO_SUPP:
+ ret = -ENOTSUPP;
+ break;
+ case AVS_STATUS_NO_MAP:
+ ret = -ENOENT;
+ break;
+ case AVS_STATUS_MAP_SET:
+ ret = -EEXIST;
+ break;
+ case AVS_STATUS_FAILURE:
+ ret = -EIO;
+ break;
+ }
+
+out:
+ up(&priv->sem);
+
+ return ret;
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct private_data *priv = data;
+
+ /* AVS command completed execution. Wake up __issue_avs_command(). */
+ complete(&priv->done);
+
+ return IRQ_HANDLED;
+}
+
+static char *brcm_avs_mode_to_string(unsigned int mode)
+{
+ switch (mode) {
+ case AVS_MODE_AVS:
+ return "AVS";
+ case AVS_MODE_DFS:
+ return "DFS";
+ case AVS_MODE_DVS:
+ return "DVS";
+ case AVS_MODE_DVFS:
+ return "DVFS";
+ }
+ return NULL;
+}
+
+static void brcm_avs_parse_p1(u32 p1, unsigned int *mdiv_p0, unsigned int *pdiv,
+ unsigned int *ndiv)
+{
+ *mdiv_p0 = (p1 >> MDIV_P0_SHIFT) & MDIV_P0_MASK;
+ *pdiv = (p1 >> PDIV_SHIFT) & PDIV_MASK;
+ *ndiv = (p1 >> NDIV_INT_SHIFT) & NDIV_INT_MASK;
+}
+
+static void brcm_avs_parse_p2(u32 p2, unsigned int *mdiv_p1,
+ unsigned int *mdiv_p2, unsigned int *mdiv_p3,
+ unsigned int *mdiv_p4)
+{
+ *mdiv_p4 = (p2 >> MDIV_P4_SHIFT) & MDIV_P4_MASK;
+ *mdiv_p3 = (p2 >> MDIV_P3_SHIFT) & MDIV_P3_MASK;
+ *mdiv_p2 = (p2 >> MDIV_P2_SHIFT) & MDIV_P2_MASK;
+ *mdiv_p1 = (p2 >> MDIV_P1_SHIFT) & MDIV_P1_MASK;
+}
+
+static int brcm_avs_get_pmap(struct private_data *priv, struct pmap *pmap)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+ int ret;
+
+ ret = __issue_avs_command(priv, AVS_CMD_GET_PMAP, false, args);
+ if (ret || !pmap)
+ return ret;
+
+ pmap->mode = args[0];
+ pmap->p1 = args[1];
+ pmap->p2 = args[2];
+ pmap->state = args[3];
+
+ return 0;
+}
+
+static int brcm_avs_set_pmap(struct private_data *priv, struct pmap *pmap)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+
+ args[0] = pmap->mode;
+ args[1] = pmap->p1;
+ args[2] = pmap->p2;
+ args[3] = pmap->state;
+
+ return __issue_avs_command(priv, AVS_CMD_SET_PMAP, true, args);
+}
+
+static int brcm_avs_get_pstate(struct private_data *priv, unsigned int *pstate)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+ int ret;
+
+ ret = __issue_avs_command(priv, AVS_CMD_GET_PSTATE, false, args);
+ if (ret)
+ return ret;
+ *pstate = args[0];
+
+ return 0;
+}
+
+static int brcm_avs_set_pstate(struct private_data *priv, unsigned int pstate)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+
+ args[0] = pstate;
+
+ return __issue_avs_command(priv, AVS_CMD_SET_PSTATE, true, args);
+}
+
+static unsigned long brcm_avs_get_voltage(void __iomem *base)
+{
+ return readl(base + AVS_MBOX_VOLTAGE1);
+}
+
+static unsigned long brcm_avs_get_frequency(void __iomem *base)
+{
+ return readl(base + AVS_MBOX_FREQUENCY) * 1000; /* in kHz */
+}
+
+/*
+ * We determine which frequencies are supported by cycling through all P-states
+ * and reading back what frequency we are running at for each P-state.
+ */
+static struct cpufreq_frequency_table *
+brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
+{
+ struct cpufreq_frequency_table *table;
+ unsigned int pstate;
+ int i, ret;
+
+ /* Remember P-state for later */
+ ret = brcm_avs_get_pstate(priv, &pstate);
+ if (ret)
+ return ERR_PTR(ret);
+
+ table = devm_kzalloc(dev, (AVS_PSTATE_MAX + 1) * sizeof(*table),
+ GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = AVS_PSTATE_P0; i <= AVS_PSTATE_MAX; i++) {
+ ret = brcm_avs_set_pstate(priv, i);
+ if (ret)
+ return ERR_PTR(ret);
+ table[i].frequency = brcm_avs_get_frequency(priv->base);
+ table[i].driver_data = i;
+ }
+ table[i].frequency = CPUFREQ_TABLE_END;
+
+ /* Restore P-state */
+ ret = brcm_avs_set_pstate(priv, pstate);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return table;
+}
+
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+
+#define MANT(x) (unsigned int)(abs((x)) / 1000)
+#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
+
+static int brcm_avs_debug_show(struct seq_file *s, void *data)
+{
+ struct debugfs_data *dbgfs = s->private;
+ void __iomem *base;
+ u32 val, offset;
+
+ if (!dbgfs) {
+ seq_puts(s, "No device pointer\n");
+ return 0;
+ }
+
+ base = dbgfs->priv->base;
+ offset = dbgfs->entry->offset;
+ val = readl(base + offset);
+ switch (dbgfs->entry->format) {
+ case DEBUGFS_NORMAL:
+ seq_printf(s, "%u\n", val);
+ break;
+ case DEBUGFS_FLOAT:
+ seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
+ break;
+ case DEBUGFS_REV:
+ seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
+ (val >> 16 & 0xff), (val >> 8 & 0xff),
+ val & 0xff);
+ break;
+ }
+ seq_printf(s, "0x%08x\n", val);
+
+ return 0;
+}
+
+#undef MANT
+#undef FRAC
+
+static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct debugfs_data *dbgfs = s->private;
+ struct private_data *priv = dbgfs->priv;
+ void __iomem *base, *avs_intr_base;
+ bool use_issue_command = false;
+ unsigned long val, offset;
+ char str[128];
+ int ret;
+ char *str_ptr = str;
+
+ if (size >= sizeof(str))
+ return -E2BIG;
+
+ memset(str, 0, sizeof(str));
+ ret = copy_from_user(str, buf, size);
+ if (ret)
+ return ret;
+
+ base = priv->base;
+ avs_intr_base = priv->avs_intr_base;
+ offset = dbgfs->entry->offset;
+ /*
+ * Special case writing to "command" entry only: if the string starts
+ * with a 'c', we use the driver's __issue_avs_command() function.
+ * Otherwise, we perform a raw write. This should allow testing of raw
+ * access as well as using the higher level function. (Raw access
+ * doesn't clear the firmware return status after issuing the command.)
+ */
+ if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
+ use_issue_command = true;
+ str_ptr++;
+ }
+ if (kstrtoul(str_ptr, 0, &val) != 0)
+ return -EINVAL;
+
+ /*
+ * Setting the P-state is a special case. We need to update the CPU
+ * frequency we report.
+ */
+ if (val == AVS_CMD_SET_PSTATE) {
+ struct cpufreq_policy *policy;
+ unsigned int pstate;
+
+ policy = cpufreq_cpu_get(smp_processor_id());
+ /* Read back the P-state we are about to set */
+ pstate = readl(base + AVS_MBOX_PARAM(0));
+ if (use_issue_command) {
+ ret = brcm_avs_target_index(policy, pstate);
+ return ret ? ret : size;
+ }
+ policy->cur = policy->freq_table[pstate].frequency;
+ }
+
+ if (use_issue_command) {
+ ret = __issue_avs_command(priv, val, false, NULL);
+ } else {
+ /* Locking here is not perfect, but is only for debug. */
+ ret = down_interruptible(&priv->sem);
+ if (ret)
+ return ret;
+
+ writel(val, base + offset);
+ /* We have to wake up the firmware to process a command. */
+ if (offset == AVS_MBOX_COMMAND)
+ writel(AVS_CPU_L2_INT_MASK,
+ avs_intr_base + AVS_CPU_L2_SET0);
+ up(&priv->sem);
+ }
+
+ return ret ? ret : size;
+}
+
+static struct debugfs_entry *__find_debugfs_entry(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
+ if (strcasecmp(debugfs_entries[i].name, name) == 0)
+ return &debugfs_entries[i];
+
+ return NULL;
+}
+
+static int brcm_avs_debug_open(struct inode *inode, struct file *file)
+{
+ struct debugfs_data *data;
+ fmode_t fmode;
+ int ret;
+
+ /*
+ * seq_open(), which is called by single_open(), clears "write" access.
+ * We need write access to some files, so we preserve our access mode
+ * and restore it.
+ */
+ fmode = file->f_mode;
+ /*
+ * Check access permissions even for root. We don't want to be writing
+ * to read-only registers. Access for regular users has already been
+ * checked by the VFS layer.
+ */
+ if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
+ return -EACCES;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ /*
+ * We use the same file system operations for all our debug files. To
+ * produce specific output, we look up the file name upon opening a
+ * debugfs entry and map it to a memory offset. This offset is then used
+ * in the generic "show" function to read a specific register.
+ */
+ data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
+ data->priv = inode->i_private;
+
+ ret = single_open(file, brcm_avs_debug_show, data);
+ if (ret)
+ kfree(data);
+ file->f_mode = fmode;
+
+ return ret;
+}
+
+static int brcm_avs_debug_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq_priv = file->private_data;
+ struct debugfs_data *data = seq_priv->private;
+
+ kfree(data);
+ return single_release(inode, file);
+}
+
+static const struct file_operations brcm_avs_debug_ops = {
+ .open = brcm_avs_debug_open,
+ .read = seq_read,
+ .write = brcm_avs_seq_write,
+ .llseek = seq_lseek,
+ .release = brcm_avs_debug_release,
+};
+
+static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
+{
+ struct private_data *priv = platform_get_drvdata(pdev);
+ struct dentry *dir;
+ int i;
+
+ if (!priv)
+ return;
+
+ dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
+ if (IS_ERR_OR_NULL(dir))
+ return;
+ priv->debugfs = dir;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
+ /*
+ * The DEBUGFS_ENTRY macro generates uppercase strings. We
+ * convert them to lowercase before creating the debugfs
+ * entries.
+ */
+ char *entry = __strtolower(debugfs_entries[i].name);
+ fmode_t mode = debugfs_entries[i].mode;
+
+ if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
+ dir, priv, &brcm_avs_debug_ops)) {
+ priv->debugfs = NULL;
+ debugfs_remove_recursive(dir);
+ break;
+ }
+ }
+}
+
+static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
+{
+ struct private_data *priv = platform_get_drvdata(pdev);
+
+ if (priv && priv->debugfs) {
+ debugfs_remove_recursive(priv->debugfs);
+ priv->debugfs = NULL;
+ }
+}
+
+#else
+
+static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
+static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
+
+#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
+
+/*
+ * To ensure the right firmware is running we need to
+ * - check the MAGIC matches what we expect
+ * - brcm_avs_get_pmap() doesn't return -ENOTSUPP or -EINVAL
+ * We need to set up our interrupt handling before calling brcm_avs_get_pmap()!
+ */
+static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+{
+ u32 magic;
+ int rc;
+
+ rc = brcm_avs_get_pmap(priv, NULL);
+ magic = readl(priv->base + AVS_MBOX_MAGIC);
+
+ return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
+ (rc != -EINVAL);
+}
+
+static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct private_data *priv = policy->driver_data;
+
+ return brcm_avs_get_frequency(priv->base);
+}
+
+static int brcm_avs_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return brcm_avs_set_pstate(policy->driver_data,
+ policy->freq_table[index].driver_data);
+}
+
+static int brcm_avs_suspend(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+
+ return brcm_avs_get_pmap(priv, &priv->pmap);
+}
+
+static int brcm_avs_resume(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+ int ret;
+
+ ret = brcm_avs_set_pmap(priv, &priv->pmap);
+ if (ret == -EEXIST) {
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ struct device *dev = &pdev->dev;
+
+ dev_warn(dev, "PMAP was already set\n");
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * All initialization code that we only want to execute once goes here. Setup
+ * code that can be re-tried on every core (if it failed before) can go into
+ * brcm_avs_cpufreq_init().
+ */
+static int brcm_avs_prepare_init(struct platform_device *pdev)
+{
+ struct private_data *priv;
+ struct device *dev;
+ int host_irq, ret;
+
+ dev = &pdev->dev;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ sema_init(&priv->sem, 1);
+ init_completion(&priv->done);
+ platform_set_drvdata(pdev, priv);
+
+ priv->base = __map_region(BRCM_AVS_CPU_DATA);
+ if (!priv->base) {
+ dev_err(dev, "Couldn't find property %s in device tree.\n",
+ BRCM_AVS_CPU_DATA);
+ return -ENOENT;
+ }
+
+ priv->avs_intr_base = __map_region(BRCM_AVS_CPU_INTR);
+ if (!priv->avs_intr_base) {
+ dev_err(dev, "Couldn't find property %s in device tree.\n",
+ BRCM_AVS_CPU_INTR);
+ ret = -ENOENT;
+ goto unmap_base;
+ }
+
+ host_irq = platform_get_irq_byname(pdev, BRCM_AVS_HOST_INTR);
+ if (host_irq < 0) {
+ dev_err(dev, "Couldn't find interrupt %s -- %d\n",
+ BRCM_AVS_HOST_INTR, host_irq);
+ ret = host_irq;
+ goto unmap_intr_base;
+ }
+
+ ret = devm_request_irq(dev, host_irq, irq_handler, IRQF_TRIGGER_RISING,
+ BRCM_AVS_HOST_INTR, priv);
+ if (ret) {
+ dev_err(dev, "IRQ request failed: %s (%d) -- %d\n",
+ BRCM_AVS_HOST_INTR, host_irq, ret);
+ goto unmap_intr_base;
+ }
+
+ if (brcm_avs_is_firmware_loaded(priv))
+ return 0;
+
+ dev_err(dev, "AVS firmware is not loaded or doesn't support DVFS\n");
+ ret = -ENODEV;
+
+unmap_intr_base:
+ iounmap(priv->avs_intr_base);
+unmap_base:
+ iounmap(priv->base);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct platform_device *pdev;
+ struct private_data *priv;
+ struct device *dev;
+ int ret;
+
+ pdev = cpufreq_get_driver_data();
+ priv = platform_get_drvdata(pdev);
+ policy->driver_data = priv;
+ dev = &pdev->dev;
+
+ freq_table = brcm_avs_get_freq_table(dev, priv);
+ if (IS_ERR(freq_table)) {
+ ret = PTR_ERR(freq_table);
+ dev_err(dev, "Couldn't determine frequency table (%d).\n", ret);
+ return ret;
+ }
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(dev, "invalid frequency table: %d\n", ret);
+ return ret;
+ }
+
+ /* All cores share the same clock and thus the same policy. */
+ cpumask_setall(policy->cpus);
+
+ ret = __issue_avs_command(priv, AVS_CMD_ENABLE, false, NULL);
+ if (!ret) {
+ unsigned int pstate;
+
+ ret = brcm_avs_get_pstate(priv, &pstate);
+ if (!ret) {
+ policy->cur = freq_table[pstate].frequency;
+ dev_info(dev, "registered\n");
+ return 0;
+ }
+ }
+
+ dev_err(dev, "couldn't initialize driver (%d)\n", ret);
+
+ return ret;
+}
+
+static ssize_t show_brcm_avs_pstate(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+ unsigned int pstate;
+
+ if (brcm_avs_get_pstate(priv, &pstate))
+ return sprintf(buf, "<unknown>\n");
+
+ return sprintf(buf, "%u\n", pstate);
+}
+
+static ssize_t show_brcm_avs_mode(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+ struct pmap pmap;
+
+ if (brcm_avs_get_pmap(priv, &pmap))
+ return sprintf(buf, "<unknown>\n");
+
+ return sprintf(buf, "%s %u\n", brcm_avs_mode_to_string(pmap.mode),
+ pmap.mode);
+}
+
+static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
+{
+ unsigned int mdiv_p0, mdiv_p1, mdiv_p2, mdiv_p3, mdiv_p4;
+ struct private_data *priv = policy->driver_data;
+ unsigned int ndiv, pdiv;
+ struct pmap pmap;
+
+ if (brcm_avs_get_pmap(priv, &pmap))
+ return sprintf(buf, "<unknown>\n");
+
+ brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
+ brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
+
+ return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n",
+ pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
+ mdiv_p3, mdiv_p4);
+}
+
+static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+
+ return sprintf(buf, "0x%08lx\n", brcm_avs_get_voltage(priv->base));
+}
+
+static ssize_t show_brcm_avs_frequency(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+
+ return sprintf(buf, "0x%08lx\n", brcm_avs_get_frequency(priv->base));
+}
+
+cpufreq_freq_attr_ro(brcm_avs_pstate);
+cpufreq_freq_attr_ro(brcm_avs_mode);
+cpufreq_freq_attr_ro(brcm_avs_pmap);
+cpufreq_freq_attr_ro(brcm_avs_voltage);
+cpufreq_freq_attr_ro(brcm_avs_frequency);
+
+static struct freq_attr *brcm_avs_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &brcm_avs_pstate,
+ &brcm_avs_mode,
+ &brcm_avs_pmap,
+ &brcm_avs_voltage,
+ &brcm_avs_frequency,
+ NULL
+};
+
+static struct cpufreq_driver brcm_avs_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = brcm_avs_target_index,
+ .get = brcm_avs_cpufreq_get,
+ .suspend = brcm_avs_suspend,
+ .resume = brcm_avs_resume,
+ .init = brcm_avs_cpufreq_init,
+ .attr = brcm_avs_cpufreq_attr,
+ .name = BRCM_AVS_CPUFREQ_PREFIX,
+};
+
+static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = brcm_avs_prepare_init(pdev);
+ if (ret)
+ return ret;
+
+ brcm_avs_driver.driver_data = pdev;
+ ret = cpufreq_register_driver(&brcm_avs_driver);
+ if (!ret)
+ brcm_avs_cpufreq_debug_init(pdev);
+
+ return ret;
+}
+
+static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+{
+ struct private_data *priv;
+ int ret;
+
+ ret = cpufreq_unregister_driver(&brcm_avs_driver);
+ if (ret)
+ return ret;
+
+ brcm_avs_cpufreq_debug_exit(pdev);
+
+ priv = platform_get_drvdata(pdev);
+ iounmap(priv->base);
+ iounmap(priv->avs_intr_base);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id brcm_avs_cpufreq_match[] = {
+ { .compatible = BRCM_AVS_CPU_DATA },
+ { }
+};
+MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
+
+static struct platform_driver brcm_avs_cpufreq_platdrv = {
+ .driver = {
+ .name = BRCM_AVS_CPUFREQ_NAME,
+ .of_match_table = brcm_avs_cpufreq_match,
+ },
+ .probe = brcm_avs_cpufreq_probe,
+ .remove = brcm_avs_cpufreq_remove,
+};
+module_platform_driver(brcm_avs_cpufreq_platdrv);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Broadcom STB AVS");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 4852d9efe74e..e82bb3c30b92 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -247,3 +247,10 @@ MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
MODULE_LICENSE("GPL");
late_initcall(cppc_cpufreq_init);
+
+static const struct acpi_device_id cppc_acpi_ids[] = {
+ {ACPI_PROCESSOR_DEVICE_HID, },
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 71267626456b..bc97b6a4b1cf 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -26,6 +26,9 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "allwinner,sun8i-a83t", },
{ .compatible = "allwinner,sun8i-h3", },
+ { .compatible = "arm,integrator-ap", },
+ { .compatible = "arm,integrator-cp", },
+
{ .compatible = "hisilicon,hi6220", },
{ .compatible = "fsl,imx27", },
@@ -34,6 +37,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "fsl,imx7d", },
{ .compatible = "marvell,berlin", },
+ { .compatible = "marvell,pxa250", },
+ { .compatible = "marvell,pxa270", },
{ .compatible = "samsung,exynos3250", },
{ .compatible = "samsung,exynos4210", },
@@ -50,6 +55,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "renesas,r7s72100", },
{ .compatible = "renesas,r8a73a4", },
{ .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7743", },
+ { .compatible = "renesas,r8a7745", },
{ .compatible = "renesas,r8a7778", },
{ .compatible = "renesas,r8a7779", },
{ .compatible = "renesas,r8a7790", },
@@ -72,6 +79,12 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "sigma,tango4" },
+ { .compatible = "socionext,uniphier-pro5", },
+ { .compatible = "socionext,uniphier-pxs2", },
+ { .compatible = "socionext,uniphier-ld6b", },
+ { .compatible = "socionext,uniphier-ld11", },
+ { .compatible = "socionext,uniphier-ld20", },
+
{ .compatible = "ti,am33xx", },
{ .compatible = "ti,dra7", },
{ .compatible = "ti,omap2", },
@@ -81,6 +94,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "xlnx,zynq-7000", },
+ { .compatible = "zte,zx296718", },
+
{ }
};
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 5c07ae05d69a..269013311e79 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -28,6 +28,7 @@
#include "cpufreq-dt.h"
struct private_data {
+ struct opp_table *opp_table;
struct device *cpu_dev;
struct thermal_cooling_device *cdev;
const char *reg_name;
@@ -143,6 +144,7 @@ static int resources_available(void)
static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
+ struct opp_table *opp_table = NULL;
struct private_data *priv;
struct device *cpu_dev;
struct clk *cpu_clk;
@@ -186,8 +188,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*/
name = find_supply_name(cpu_dev);
if (name) {
- ret = dev_pm_opp_set_regulator(cpu_dev, name);
- if (ret) {
+ opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
policy->cpu, ret);
goto out_put_clk;
@@ -237,6 +240,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
}
priv->reg_name = name;
+ priv->opp_table = opp_table;
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
@@ -285,7 +289,7 @@ out_free_priv:
out_free_opp:
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
if (name)
- dev_pm_opp_put_regulator(cpu_dev);
+ dev_pm_opp_put_regulators(opp_table);
out_put_clk:
clk_put(cpu_clk);
@@ -300,7 +304,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
if (priv->reg_name)
- dev_pm_opp_put_regulator(priv->cpu_dev);
+ dev_pm_opp_put_regulators(priv->opp_table);
clk_put(policy->clk);
kfree(priv);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e6c1fb60fbc..cc475eff90b3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1526,7 +1526,10 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) {
down_read(&policy->rwsem);
- ret_freq = __cpufreq_get(policy);
+
+ if (!policy_is_inactive(policy))
+ ret_freq = __cpufreq_get(policy);
+
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
@@ -2254,17 +2257,19 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Useful for policy notifiers which have different necessities
* at different times.
*/
-int cpufreq_update_policy(unsigned int cpu)
+void cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy new_policy;
- int ret;
if (!policy)
- return -ENODEV;
+ return;
down_write(&policy->rwsem);
+ if (policy_is_inactive(policy))
+ goto unlock;
+
pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&new_policy, policy, sizeof(*policy));
new_policy.min = policy->user_policy.min;
@@ -2275,24 +2280,20 @@ int cpufreq_update_policy(unsigned int cpu)
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
- if (cpufreq_suspended) {
- ret = -EAGAIN;
+ if (cpufreq_suspended)
goto unlock;
- }
+
new_policy.cur = cpufreq_update_current_freq(policy);
- if (WARN_ON(!new_policy.cur)) {
- ret = -EIO;
+ if (WARN_ON(!new_policy.cur))
goto unlock;
- }
}
- ret = cpufreq_set_policy(policy, &new_policy);
+ cpufreq_set_policy(policy, &new_policy);
unlock:
up_write(&policy->rwsem);
cpufreq_cpu_put(policy);
- return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 13475890d792..992f7c20760f 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -37,16 +37,16 @@ struct cs_dbs_tuners {
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
-static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
- struct cpufreq_policy *policy)
+static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
+ struct cpufreq_policy *policy)
{
- unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
+ unsigned int freq_step = (cs_tuners->freq_step * policy->max) / 100;
/* max freq cannot be less than 100. But who knows... */
- if (unlikely(freq_target == 0))
- freq_target = DEF_FREQUENCY_STEP;
+ if (unlikely(freq_step == 0))
+ freq_step = DEF_FREQUENCY_STEP;
- return freq_target;
+ return freq_step;
}
/*
@@ -55,10 +55,10 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
* sampling_down_factor, we check, if current idle time is more than 80%
* (default), then we try to decrease frequency
*
- * Any frequency increase takes it to the maximum frequency. Frequency reduction
- * happens at minimum steps of 5% (default) of maximum frequency
+ * Frequency updates happen at minimum steps of 5% (default) of maximum
+ * frequency
*/
-static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
+static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
@@ -66,6 +66,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
+ unsigned int freq_step;
/*
* break out if we 'cannot' reduce the speed as the user might
@@ -82,6 +83,23 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
if (requested_freq > policy->max || requested_freq < policy->min)
requested_freq = policy->cur;
+ freq_step = get_freq_step(cs_tuners, policy);
+
+ /*
+ * Decrease requested_freq one freq_step for each idle period that
+ * we didn't update the frequency.
+ */
+ if (policy_dbs->idle_periods < UINT_MAX) {
+ unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
+
+ if (requested_freq > freq_steps)
+ requested_freq -= freq_steps;
+ else
+ requested_freq = policy->min;
+
+ policy_dbs->idle_periods = UINT_MAX;
+ }
+
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
dbs_info->down_skip = 0;
@@ -90,7 +108,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
if (requested_freq == policy->max)
goto out;
- requested_freq += get_freq_target(cs_tuners, policy);
+ requested_freq += freq_step;
if (requested_freq > policy->max)
requested_freq = policy->max;
@@ -106,16 +124,14 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
- unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
if (requested_freq == policy->min)
goto out;
- freq_target = get_freq_target(cs_tuners, policy);
- if (requested_freq > freq_target)
- requested_freq -= freq_target;
+ if (requested_freq > freq_step)
+ requested_freq -= freq_step;
else
requested_freq = policy->min;
@@ -305,7 +321,7 @@ static void cs_start(struct cpufreq_policy *policy)
static struct dbs_governor cs_governor = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
.kobj_type = { .default_attrs = cs_attributes },
- .gov_dbs_timer = cs_dbs_timer,
+ .gov_dbs_update = cs_dbs_update,
.alloc = cs_alloc,
.free = cs_free,
.init = cs_init,
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 642dd0f183a8..0196467280bd 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -61,7 +61,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
* entries can't be freed concurrently.
*/
list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
- mutex_lock(&policy_dbs->timer_mutex);
+ mutex_lock(&policy_dbs->update_mutex);
/*
* On 32-bit architectures this may race with the
* sample_delay_ns read in dbs_update_util_handler(), but that
@@ -76,7 +76,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
* taken, so it shouldn't be significant.
*/
gov_update_sample_delay(policy_dbs, 0);
- mutex_unlock(&policy_dbs->timer_mutex);
+ mutex_unlock(&policy_dbs->update_mutex);
}
return count;
@@ -117,7 +117,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
unsigned int ignore_nice = dbs_data->ignore_nice_load;
- unsigned int max_load = 0;
+ unsigned int max_load = 0, idle_periods = UINT_MAX;
unsigned int sampling_rate, io_busy, j;
/*
@@ -215,9 +215,19 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
j_cdbs->prev_load = load;
}
+ if (time_elapsed > 2 * sampling_rate) {
+ unsigned int periods = time_elapsed / sampling_rate;
+
+ if (periods < idle_periods)
+ idle_periods = periods;
+ }
+
if (load > max_load)
max_load = load;
}
+
+ policy_dbs->idle_periods = idle_periods;
+
return max_load;
}
EXPORT_SYMBOL_GPL(dbs_update);
@@ -236,9 +246,9 @@ static void dbs_work_handler(struct work_struct *work)
* Make sure cpufreq_governor_limits() isn't evaluating load or the
* ondemand governor isn't updating the sampling rate in parallel.
*/
- mutex_lock(&policy_dbs->timer_mutex);
- gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
- mutex_unlock(&policy_dbs->timer_mutex);
+ mutex_lock(&policy_dbs->update_mutex);
+ gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
+ mutex_unlock(&policy_dbs->update_mutex);
/* Allow the utilization update handler to queue up more work. */
atomic_set(&policy_dbs->work_count, 0);
@@ -348,7 +358,7 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
return NULL;
policy_dbs->policy = policy;
- mutex_init(&policy_dbs->timer_mutex);
+ mutex_init(&policy_dbs->update_mutex);
atomic_set(&policy_dbs->work_count, 0);
init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
INIT_WORK(&policy_dbs->work, dbs_work_handler);
@@ -367,7 +377,7 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
{
int j;
- mutex_destroy(&policy_dbs->timer_mutex);
+ mutex_destroy(&policy_dbs->update_mutex);
for_each_cpu(j, policy_dbs->policy->related_cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
@@ -547,10 +557,10 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
- mutex_lock(&policy_dbs->timer_mutex);
+ mutex_lock(&policy_dbs->update_mutex);
cpufreq_policy_apply_limits(policy);
gov_update_sample_delay(policy_dbs, 0);
- mutex_unlock(&policy_dbs->timer_mutex);
+ mutex_unlock(&policy_dbs->update_mutex);
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index ef1037e9c92b..f5717ca070cc 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -85,7 +85,7 @@ struct policy_dbs_info {
* Per policy mutex that serializes load evaluation from limit-change
* and work-handler.
*/
- struct mutex timer_mutex;
+ struct mutex update_mutex;
u64 last_sample_time;
s64 sample_delay_ns;
@@ -97,6 +97,7 @@ struct policy_dbs_info {
struct list_head list;
/* Multiplier for increasing sample delay temporarily. */
unsigned int rate_mult;
+ unsigned int idle_periods; /* For conservative */
/* Status indicators */
bool is_shared; /* This object is used by multiple CPUs */
bool work_in_progress; /* Work is being queued up or in progress */
@@ -135,7 +136,7 @@ struct dbs_governor {
*/
struct dbs_data *gdbs_data;
- unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
+ unsigned int (*gov_dbs_update)(struct cpufreq_policy *policy);
struct policy_dbs_info *(*alloc)(void);
void (*free)(struct policy_dbs_info *policy_dbs);
int (*init)(struct dbs_data *dbs_data);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3a1f49f5f4c6..4a017e895296 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -25,7 +25,7 @@
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
-#define MIN_FREQUENCY_UP_THRESHOLD (11)
+#define MIN_FREQUENCY_UP_THRESHOLD (1)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
static struct od_ops od_ops;
@@ -169,7 +169,7 @@ static void od_update(struct cpufreq_policy *policy)
}
}
-static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
+static unsigned int od_dbs_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
@@ -191,7 +191,7 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
od_update(policy);
if (dbs_info->freq_lo) {
- /* Setup timer for SUB_SAMPLE */
+ /* Setup SUB_SAMPLE */
dbs_info->sample_type = OD_SUB_SAMPLE;
return dbs_info->freq_hi_delay_us;
}
@@ -255,11 +255,11 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
/*
* Doing this without locking might lead to using different
- * rate_mult values in od_update() and od_dbs_timer().
+ * rate_mult values in od_update() and od_dbs_update().
*/
- mutex_lock(&policy_dbs->timer_mutex);
+ mutex_lock(&policy_dbs->update_mutex);
policy_dbs->rate_mult = 1;
- mutex_unlock(&policy_dbs->timer_mutex);
+ mutex_unlock(&policy_dbs->update_mutex);
}
return count;
@@ -374,8 +374,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
/*
* In nohz/micro accounting case we set the minimum frequency
- * not depending on HZ, but fixed (very low). The deferred
- * timer might skip some samples if idle/sleeping as needed.
+ * not depending on HZ, but fixed (very low).
*/
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
@@ -415,7 +414,7 @@ static struct od_ops od_ops = {
static struct dbs_governor od_dbs_gov = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
.kobj_type = { .default_attrs = od_attributes },
- .gov_dbs_timer = od_dbs_timer,
+ .gov_dbs_update = od_dbs_update,
.alloc = od_alloc,
.free = od_free,
.init = od_init,
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 06d3abdffd3a..ac284e66839c 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -41,6 +41,18 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats)
return 0;
}
+static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
+{
+ unsigned int count = stats->max_state;
+
+ memset(stats->time_in_state, 0, count * sizeof(u64));
+#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+ memset(stats->trans_table, 0, count * count * sizeof(int));
+#endif
+ stats->last_time = get_jiffies_64();
+ stats->total_trans = 0;
+}
+
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
return sprintf(buf, "%d\n", policy->stats->total_trans);
@@ -64,6 +76,14 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
return len;
}
+static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
+ size_t count)
+{
+ /* We don't care what is written to the attribute. */
+ cpufreq_stats_clear_table(policy->stats);
+ return count;
+}
+
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
@@ -113,10 +133,12 @@ cpufreq_freq_attr_ro(trans_table);
cpufreq_freq_attr_ro(total_trans);
cpufreq_freq_attr_ro(time_in_state);
+cpufreq_freq_attr_wo(reset);
static struct attribute *default_attrs[] = {
&total_trans.attr,
&time_in_state.attr,
+ &reset.attr,
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
&trans_table.attr,
#endif
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
deleted file mode 100644
index 79e3ff2771a6..000000000000
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * CPU support functions
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/cpufreq.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <asm/mach-types.h>
-#include <asm/hardware/icst.h>
-
-static void __iomem *cm_base;
-/* The cpufreq driver only use the OSC register */
-#define INTEGRATOR_HDR_OSC_OFFSET 0x08
-#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
-
-static struct cpufreq_driver integrator_driver;
-
-static const struct icst_params lclk_params = {
- .ref = 24000000,
- .vco_max = ICST525_VCO_MAX_5V,
- .vco_min = ICST525_VCO_MIN,
- .vd_min = 8,
- .vd_max = 132,
- .rd_min = 24,
- .rd_max = 24,
- .s2div = icst525_s2div,
- .idx2s = icst525_idx2s,
-};
-
-static const struct icst_params cclk_params = {
- .ref = 24000000,
- .vco_max = ICST525_VCO_MAX_5V,
- .vco_min = ICST525_VCO_MIN,
- .vd_min = 12,
- .vd_max = 160,
- .rd_min = 24,
- .rd_max = 24,
- .s2div = icst525_s2div,
- .idx2s = icst525_idx2s,
-};
-
-/*
- * Validate the speed policy.
- */
-static int integrator_verify_policy(struct cpufreq_policy *policy)
-{
- struct icst_vco vco;
-
- cpufreq_verify_within_cpu_limits(policy);
-
- vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
- policy->max = icst_hz(&cclk_params, vco) / 1000;
-
- vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
- policy->min = icst_hz(&cclk_params, vco) / 1000;
-
- cpufreq_verify_within_cpu_limits(policy);
- return 0;
-}
-
-
-static int integrator_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- cpumask_t cpus_allowed;
- int cpu = policy->cpu;
- struct icst_vco vco;
- struct cpufreq_freqs freqs;
- u_int cm_osc;
-
- /*
- * Save this threads cpus_allowed mask.
- */
- cpus_allowed = current->cpus_allowed;
-
- /*
- * Bind to the specified CPU. When this call returns,
- * we should be running on the right CPU.
- */
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
- BUG_ON(cpu != smp_processor_id());
-
- /* get current setting */
- cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-
- if (machine_is_integrator())
- vco.s = (cm_osc >> 8) & 7;
- else if (machine_is_cintegrator())
- vco.s = 1;
- vco.v = cm_osc & 255;
- vco.r = 22;
- freqs.old = icst_hz(&cclk_params, vco) / 1000;
-
- /* icst_hz_to_vco rounds down -- so we need the next
- * larger freq in case of CPUFREQ_RELATION_L.
- */
- if (relation == CPUFREQ_RELATION_L)
- target_freq += 999;
- if (target_freq > policy->max)
- target_freq = policy->max;
- vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
- freqs.new = icst_hz(&cclk_params, vco) / 1000;
-
- if (freqs.old == freqs.new) {
- set_cpus_allowed_ptr(current, &cpus_allowed);
- return 0;
- }
-
- cpufreq_freq_transition_begin(policy, &freqs);
-
- cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-
- if (machine_is_integrator()) {
- cm_osc &= 0xfffff800;
- cm_osc |= vco.s << 8;
- } else if (machine_is_cintegrator()) {
- cm_osc &= 0xffffff00;
- }
- cm_osc |= vco.v;
-
- __raw_writel(0xa05f, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
- __raw_writel(cm_osc, cm_base + INTEGRATOR_HDR_OSC_OFFSET);
- __raw_writel(0, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
-
- /*
- * Restore the CPUs allowed mask.
- */
- set_cpus_allowed_ptr(current, &cpus_allowed);
-
- cpufreq_freq_transition_end(policy, &freqs, 0);
-
- return 0;
-}
-
-static unsigned int integrator_get(unsigned int cpu)
-{
- cpumask_t cpus_allowed;
- unsigned int current_freq;
- u_int cm_osc;
- struct icst_vco vco;
-
- cpus_allowed = current->cpus_allowed;
-
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
- BUG_ON(cpu != smp_processor_id());
-
- /* detect memory etc. */
- cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-
- if (machine_is_integrator())
- vco.s = (cm_osc >> 8) & 7;
- else
- vco.s = 1;
- vco.v = cm_osc & 255;
- vco.r = 22;
-
- current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
-
- set_cpus_allowed_ptr(current, &cpus_allowed);
-
- return current_freq;
-}
-
-static int integrator_cpufreq_init(struct cpufreq_policy *policy)
-{
-
- /* set default policy and cpuinfo */
- policy->max = policy->cpuinfo.max_freq = 160000;
- policy->min = policy->cpuinfo.min_freq = 12000;
- policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
-
- return 0;
-}
-
-static struct cpufreq_driver integrator_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = integrator_verify_policy,
- .target = integrator_set_target,
- .get = integrator_get,
- .init = integrator_cpufreq_init,
- .name = "integrator",
-};
-
-static int __init integrator_cpufreq_probe(struct platform_device *pdev)
-{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!cm_base)
- return -ENODEV;
-
- return cpufreq_register_driver(&integrator_driver);
-}
-
-static int __exit integrator_cpufreq_remove(struct platform_device *pdev)
-{
- return cpufreq_unregister_driver(&integrator_driver);
-}
-
-static const struct of_device_id integrator_cpufreq_match[] = {
- { .compatible = "arm,core-module-integrator"},
- { },
-};
-
-MODULE_DEVICE_TABLE(of, integrator_cpufreq_match);
-
-static struct platform_driver integrator_cpufreq_driver = {
- .driver = {
- .name = "integrator-cpufreq",
- .of_match_table = integrator_cpufreq_match,
- },
- .remove = __exit_p(integrator_cpufreq_remove),
-};
-
-module_platform_driver_probe(integrator_cpufreq_driver,
- integrator_cpufreq_probe);
-
-MODULE_AUTHOR("Russell M. King");
-MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4737520ec823..6acbd4af632e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,6 +37,8 @@
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
+#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
+
#define ATOM_RATIOS 0x66a
#define ATOM_VIDS 0x66b
#define ATOM_TURBO_RATIOS 0x66c
@@ -44,6 +46,7 @@
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
#endif
#define FRAC_BITS 8
@@ -52,6 +55,8 @@
#define EXT_BITS 6
#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
+#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
+#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
static inline int32_t mul_fp(int32_t x, int32_t y)
{
@@ -122,6 +127,8 @@ struct sample {
* @scaling: Scaling factor to convert frequency to cpufreq
* frequency units
* @turbo_pstate: Max Turbo P state possible for this platform
+ * @max_freq: @max_pstate frequency in cpufreq units
+ * @turbo_freq: @turbo_pstate frequency in cpufreq units
*
* Stores the per cpu model P state limits and current P state.
*/
@@ -132,6 +139,8 @@ struct pstate_data {
int max_pstate_physical;
int scaling;
int turbo_pstate;
+ unsigned int max_freq;
+ unsigned int turbo_freq;
};
/**
@@ -177,6 +186,48 @@ struct _pid {
};
/**
+ * struct perf_limits - Store user and policy limits
+ * @no_turbo: User requested turbo state from intel_pstate sysfs
+ * @turbo_disabled: Platform turbo status either from msr
+ * MSR_IA32_MISC_ENABLE or when maximum available pstate
+ * matches the maximum turbo pstate
+ * @max_perf_pct: Effective maximum performance limit in percentage, this
+ * is minimum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @min_perf_pct: Effective minimum performance limit in percentage, this
+ * is maximum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
+ * This value is used to limit max pstate
+ * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
+ * This value is used to limit min pstate
+ * @max_policy_pct: The maximum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @max_sysfs_pct: The maximum performance in percentage enforced by
+ * intel pstate sysfs interface, unused when per cpu
+ * controls are enforced
+ * @min_policy_pct: The minimum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @min_sysfs_pct: The minimum performance in percentage enforced by
+ * intel pstate sysfs interface, unused when per cpu
+ * controls are enforced
+ *
+ * Storage for user and policy defined limits.
+ */
+struct perf_limits {
+ int no_turbo;
+ int turbo_disabled;
+ int max_perf_pct;
+ int min_perf_pct;
+ int32_t max_perf;
+ int32_t min_perf;
+ int max_policy_pct;
+ int max_sysfs_pct;
+ int min_policy_pct;
+ int min_sysfs_pct;
+};
+
+/**
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
* @policy: CPUFreq policy value
@@ -194,8 +245,19 @@ struct _pid {
* @prev_cummulative_iowait: IO Wait time difference from last and
* current sample
* @sample: Storage for storing last Sample data
+ * @perf_limits: Pointer to perf_limit unique to this CPU
+ * Not all field in the structure are applicable
+ * when per cpu controls are enforced
* @acpi_perf_data: Stores ACPI perf information read from _PSS
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
+ * @epp_powersave: Last saved HWP energy performance preference
+ * (EPP) or energy performance bias (EPB),
+ * when policy switched to performance
+ * @epp_policy: Last saved policy used to set EPP/EPB
+ * @epp_default: Power on default HWP energy performance
+ * preference/bias
+ * @epp_saved: Saved EPP/EPB during system suspend or CPU offline
+ * operation
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -217,11 +279,16 @@ struct cpudata {
u64 prev_tsc;
u64 prev_cummulative_iowait;
struct sample sample;
+ struct perf_limits *perf_limits;
#ifdef CONFIG_ACPI
struct acpi_processor_performance acpi_perf_data;
bool valid_pss_table;
#endif
unsigned int iowait_boost;
+ s16 epp_powersave;
+ s16 epp_policy;
+ s16 epp_default;
+ s16 epp_saved;
};
static struct cpudata **all_cpu_data;
@@ -235,7 +302,6 @@ static struct cpudata **all_cpu_data;
* @p_gain_pct: PID proportional gain
* @i_gain_pct: PID integral gain
* @d_gain_pct: PID derivative gain
- * @boost_iowait: Whether or not to use iowait boosting.
*
* Stores per CPU model static PID configuration data.
*/
@@ -247,7 +313,6 @@ struct pstate_adjust_policy {
int p_gain_pct;
int d_gain_pct;
int i_gain_pct;
- bool boost_iowait;
};
/**
@@ -291,58 +356,19 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
static struct pstate_adjust_policy pid_params __read_mostly;
static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
+static bool per_cpu_limits __read_mostly;
#ifdef CONFIG_ACPI
static bool acpi_ppc;
#endif
-/**
- * struct perf_limits - Store user and policy limits
- * @no_turbo: User requested turbo state from intel_pstate sysfs
- * @turbo_disabled: Platform turbo status either from msr
- * MSR_IA32_MISC_ENABLE or when maximum available pstate
- * matches the maximum turbo pstate
- * @max_perf_pct: Effective maximum performance limit in percentage, this
- * is minimum of either limits enforced by cpufreq policy
- * or limits from user set limits via intel_pstate sysfs
- * @min_perf_pct: Effective minimum performance limit in percentage, this
- * is maximum of either limits enforced by cpufreq policy
- * or limits from user set limits via intel_pstate sysfs
- * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
- * This value is used to limit max pstate
- * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
- * This value is used to limit min pstate
- * @max_policy_pct: The maximum performance in percentage enforced by
- * cpufreq setpolicy interface
- * @max_sysfs_pct: The maximum performance in percentage enforced by
- * intel pstate sysfs interface
- * @min_policy_pct: The minimum performance in percentage enforced by
- * cpufreq setpolicy interface
- * @min_sysfs_pct: The minimum performance in percentage enforced by
- * intel pstate sysfs interface
- *
- * Storage for user and policy defined limits.
- */
-struct perf_limits {
- int no_turbo;
- int turbo_disabled;
- int max_perf_pct;
- int min_perf_pct;
- int32_t max_perf;
- int32_t min_perf;
- int max_policy_pct;
- int max_sysfs_pct;
- int min_policy_pct;
- int min_sysfs_pct;
-};
-
static struct perf_limits performance_limits = {
.no_turbo = 0,
.turbo_disabled = 0,
.max_perf_pct = 100,
- .max_perf = int_tofp(1),
+ .max_perf = int_ext_tofp(1),
.min_perf_pct = 100,
- .min_perf = int_tofp(1),
+ .min_perf = int_ext_tofp(1),
.max_policy_pct = 100,
.max_sysfs_pct = 100,
.min_policy_pct = 0,
@@ -353,7 +379,7 @@ static struct perf_limits powersave_limits = {
.no_turbo = 0,
.turbo_disabled = 0,
.max_perf_pct = 100,
- .max_perf = int_tofp(1),
+ .max_perf = int_ext_tofp(1),
.min_perf_pct = 0,
.min_perf = 0,
.max_policy_pct = 100,
@@ -368,6 +394,8 @@ static struct perf_limits *limits = &performance_limits;
static struct perf_limits *limits = &powersave_limits;
#endif
+static DEFINE_MUTEX(intel_pstate_limits_lock);
+
#ifdef CONFIG_ACPI
static bool intel_pstate_get_ppc_enable_status(void)
@@ -379,14 +407,67 @@ static bool intel_pstate_get_ppc_enable_status(void)
return acpi_ppc;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+
+/* The work item is needed to avoid CPU hotplug locking issues */
+static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
+{
+ sched_set_itmt_support();
+}
+
+static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
+
+static void intel_pstate_set_itmt_prio(int cpu)
+{
+ struct cppc_perf_caps cppc_perf;
+ static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
+ int ret;
+
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+ if (ret)
+ return;
+
+ /*
+ * The priorities can be set regardless of whether or not
+ * sched_set_itmt_support(true) has been called and it is valid to
+ * update them at any time after it has been called.
+ */
+ sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
+
+ if (max_highest_perf <= min_highest_perf) {
+ if (cppc_perf.highest_perf > max_highest_perf)
+ max_highest_perf = cppc_perf.highest_perf;
+
+ if (cppc_perf.highest_perf < min_highest_perf)
+ min_highest_perf = cppc_perf.highest_perf;
+
+ if (max_highest_perf > min_highest_perf) {
+ /*
+ * This code can be run during CPU online under the
+ * CPU hotplug locks, so sched_set_itmt_support()
+ * cannot be called from here. Queue up a work item
+ * to invoke it.
+ */
+ schedule_work(&sched_itmt_work);
+ }
+ }
+}
+#else
+static void intel_pstate_set_itmt_prio(int cpu)
+{
+}
+#endif
+
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
int ret;
int i;
- if (hwp_active)
+ if (hwp_active) {
+ intel_pstate_set_itmt_prio(policy->cpu);
return;
+ }
if (!intel_pstate_get_ppc_enable_status())
return;
@@ -459,11 +540,11 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
}
#else
-static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
}
-static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{
}
#endif
@@ -559,24 +640,252 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
+static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
+{
+ u64 epb;
+ int ret;
+
+ if (!static_cpu_has(X86_FEATURE_EPB))
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret)
+ return (s16)ret;
+
+ return (s16)(epb & 0x0f);
+}
+
+static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
+{
+ s16 epp;
+
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ /*
+ * When hwp_req_data is 0, means that caller didn't read
+ * MSR_HWP_REQUEST, so need to read and get EPP.
+ */
+ if (!hwp_req_data) {
+ epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+ &hwp_req_data);
+ if (epp)
+ return epp;
+ }
+ epp = (hwp_req_data >> 24) & 0xff;
+ } else {
+ /* When there is no EPP present, HWP uses EPB settings */
+ epp = intel_pstate_get_epb(cpu_data);
+ }
+
+ return epp;
+}
+
+static int intel_pstate_set_epb(int cpu, s16 pref)
+{
+ u64 epb;
+ int ret;
+
+ if (!static_cpu_has(X86_FEATURE_EPB))
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret)
+ return ret;
+
+ epb = (epb & ~0x0f) | pref;
+ wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
+
+ return 0;
+}
+
+/*
+ * EPP/EPB display strings corresponding to EPP index in the
+ * energy_perf_strings[]
+ * index String
+ *-------------------------------------
+ * 0 default
+ * 1 performance
+ * 2 balance_performance
+ * 3 balance_power
+ * 4 power
+ */
+static const char * const energy_perf_strings[] = {
+ "default",
+ "performance",
+ "balance_performance",
+ "balance_power",
+ "power",
+ NULL
+};
+
+static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
+{
+ s16 epp;
+ int index = -EINVAL;
+
+ epp = intel_pstate_get_epp(cpu_data, 0);
+ if (epp < 0)
+ return epp;
+
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ /*
+ * Range:
+ * 0x00-0x3F : Performance
+ * 0x40-0x7F : Balance performance
+ * 0x80-0xBF : Balance power
+ * 0xC0-0xFF : Power
+ * The EPP is a 8 bit value, but our ranges restrict the
+ * value which can be set. Here only using top two bits
+ * effectively.
+ */
+ index = (epp >> 6) + 1;
+ } else if (static_cpu_has(X86_FEATURE_EPB)) {
+ /*
+ * Range:
+ * 0x00-0x03 : Performance
+ * 0x04-0x07 : Balance performance
+ * 0x08-0x0B : Balance power
+ * 0x0C-0x0F : Power
+ * The EPB is a 4 bit value, but our ranges restrict the
+ * value which can be set. Here only using top two bits
+ * effectively.
+ */
+ index = (epp >> 2) + 1;
+ }
+
+ return index;
+}
+
+static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
+ int pref_index)
+{
+ int epp = -EINVAL;
+ int ret;
+
+ if (!pref_index)
+ epp = cpu_data->epp_default;
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ u64 value;
+
+ ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
+ if (ret)
+ goto return_pref;
+
+ value &= ~GENMASK_ULL(31, 24);
+
+ /*
+ * If epp is not default, convert from index into
+ * energy_perf_strings to epp value, by shifting 6
+ * bits left to use only top two bits in epp.
+ * The resultant epp need to shifted by 24 bits to
+ * epp position in MSR_HWP_REQUEST.
+ */
+ if (epp == -EINVAL)
+ epp = (pref_index - 1) << 6;
+
+ value |= (u64)epp << 24;
+ ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
+ } else {
+ if (epp == -EINVAL)
+ epp = (pref_index - 1) << 2;
+ ret = intel_pstate_set_epb(cpu_data->cpu, epp);
+ }
+return_pref:
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ return ret;
+}
+
+static ssize_t show_energy_performance_available_preferences(
+ struct cpufreq_policy *policy, char *buf)
+{
+ int i = 0;
+ int ret = 0;
+
+ while (energy_perf_strings[i] != NULL)
+ ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
+
+ ret += sprintf(&buf[ret], "\n");
+
+ return ret;
+}
+
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+
+static ssize_t store_energy_performance_preference(
+ struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+ char str_preference[21];
+ int ret, i = 0;
+
+ ret = sscanf(buf, "%20s", str_preference);
+ if (ret != 1)
+ return -EINVAL;
+
+ while (energy_perf_strings[i] != NULL) {
+ if (!strcmp(str_preference, energy_perf_strings[i])) {
+ intel_pstate_set_energy_pref_index(cpu_data, i);
+ return count;
+ }
+ ++i;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t show_energy_performance_preference(
+ struct cpufreq_policy *policy, char *buf)
+{
+ struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+ int preference;
+
+ preference = intel_pstate_get_energy_pref_index(cpu_data);
+ if (preference < 0)
+ return preference;
+
+ return sprintf(buf, "%s\n", energy_perf_strings[preference]);
+}
+
+cpufreq_freq_attr_rw(energy_performance_preference);
+
+static struct freq_attr *hwp_cpufreq_attrs[] = {
+ &energy_performance_preference,
+ &energy_performance_available_preferences,
+ NULL,
+};
+
static void intel_pstate_hwp_set(const struct cpumask *cpumask)
{
int min, hw_min, max, hw_max, cpu, range, adj_range;
+ struct perf_limits *perf_limits = limits;
u64 value, cap;
for_each_cpu(cpu, cpumask) {
+ int max_perf_pct, min_perf_pct;
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+ s16 epp;
+
+ if (per_cpu_limits)
+ perf_limits = all_cpu_data[cpu]->perf_limits;
+
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
hw_min = HWP_LOWEST_PERF(cap);
hw_max = HWP_HIGHEST_PERF(cap);
range = hw_max - hw_min;
+ max_perf_pct = perf_limits->max_perf_pct;
+ min_perf_pct = perf_limits->min_perf_pct;
+
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
- adj_range = limits->min_perf_pct * range / 100;
+ adj_range = min_perf_pct * range / 100;
min = hw_min + adj_range;
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
- adj_range = limits->max_perf_pct * range / 100;
+ adj_range = max_perf_pct * range / 100;
max = hw_min + adj_range;
if (limits->no_turbo) {
hw_max = HWP_GUARANTEED_PERF(cap);
@@ -586,6 +895,53 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
+
+ if (cpu_data->epp_policy == cpu_data->policy)
+ goto skip_epp;
+
+ cpu_data->epp_policy = cpu_data->policy;
+
+ if (cpu_data->epp_saved >= 0) {
+ epp = cpu_data->epp_saved;
+ cpu_data->epp_saved = -EINVAL;
+ goto update_epp;
+ }
+
+ if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ epp = intel_pstate_get_epp(cpu_data, value);
+ cpu_data->epp_powersave = epp;
+ /* If EPP read was failed, then don't try to write */
+ if (epp < 0)
+ goto skip_epp;
+
+
+ epp = 0;
+ } else {
+ /* skip setting EPP, when saved value is invalid */
+ if (cpu_data->epp_powersave < 0)
+ goto skip_epp;
+
+ /*
+ * No need to restore EPP when it is not zero. This
+ * means:
+ * - Policy is not changed
+ * - user has manually changed
+ * - Error reading EPB
+ */
+ epp = intel_pstate_get_epp(cpu_data, value);
+ if (epp)
+ goto skip_epp;
+
+ epp = cpu_data->epp_powersave;
+ }
+update_epp:
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ } else {
+ intel_pstate_set_epb(cpu, epp);
+ }
+skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
}
@@ -598,6 +954,28 @@ static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
return 0;
}
+static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+
+ if (!hwp_active)
+ return 0;
+
+ cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);
+
+ return 0;
+}
+
+static int intel_pstate_resume(struct cpufreq_policy *policy)
+{
+ if (!hwp_active)
+ return 0;
+
+ all_cpu_data[policy->cpu]->epp_policy = 0;
+
+ return intel_pstate_hwp_set_policy(policy);
+}
+
static void intel_pstate_hwp_set_online_cpus(void)
{
get_online_cpus();
@@ -640,8 +1018,10 @@ static void __init intel_pstate_debug_expose_params(void)
struct dentry *debugfs_parent;
int i = 0;
- if (hwp_active)
+ if (hwp_active ||
+ pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load)
return;
+
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
if (IS_ERR_OR_NULL(debugfs_parent))
return;
@@ -714,9 +1094,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
+ mutex_lock(&intel_pstate_limits_lock);
+
update_turbo_state();
if (limits->turbo_disabled) {
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+ mutex_unlock(&intel_pstate_limits_lock);
return -EPERM;
}
@@ -725,6 +1108,8 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
+ mutex_unlock(&intel_pstate_limits_lock);
+
return count;
}
@@ -738,6 +1123,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
+ mutex_lock(&intel_pstate_limits_lock);
+
limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
limits->max_perf_pct = min(limits->max_policy_pct,
limits->max_sysfs_pct);
@@ -745,10 +1132,13 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf_pct = max(limits->min_perf_pct,
limits->max_perf_pct);
- limits->max_perf = div_fp(limits->max_perf_pct, 100);
+ limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
return count;
}
@@ -762,6 +1152,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
+ mutex_lock(&intel_pstate_limits_lock);
+
limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
limits->min_perf_pct = max(limits->min_policy_pct,
limits->min_sysfs_pct);
@@ -769,10 +1161,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf_pct = min(limits->max_perf_pct,
limits->min_perf_pct);
- limits->min_perf = div_fp(limits->min_perf_pct, 100);
+ limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
return count;
}
@@ -787,8 +1182,6 @@ define_one_global_ro(num_pstates);
static struct attribute *intel_pstate_attributes[] = {
&no_turbo.attr,
- &max_perf_pct.attr,
- &min_perf_pct.attr,
&turbo_pct.attr,
&num_pstates.attr,
NULL
@@ -805,9 +1198,26 @@ static void __init intel_pstate_sysfs_expose_params(void)
intel_pstate_kobject = kobject_create_and_add("intel_pstate",
&cpu_subsys.dev_root->kobj);
- BUG_ON(!intel_pstate_kobject);
+ if (WARN_ON(!intel_pstate_kobject))
+ return;
+
rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
- BUG_ON(rc);
+ if (WARN_ON(rc))
+ return;
+
+ /*
+ * If per cpu limits are enforced there are no global limits, so
+ * return without creating max/min_perf_pct attributes
+ */
+ if (per_cpu_limits)
+ return;
+
+ rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
+ WARN_ON(rc);
+
+ rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
+ WARN_ON(rc);
+
}
/************************** sysfs end ************************/
@@ -818,6 +1228,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+ cpudata->epp_policy = 0;
+ if (cpudata->epp_default == -EINVAL)
+ cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
}
static int atom_get_min_pstate(void)
@@ -1045,7 +1458,6 @@ static const struct cpu_defaults silvermont_params = {
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
- .boost_iowait = true,
},
.funcs = {
.get_max = atom_get_max_pstate,
@@ -1067,7 +1479,6 @@ static const struct cpu_defaults airmont_params = {
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
- .boost_iowait = true,
},
.funcs = {
.get_max = atom_get_max_pstate,
@@ -1109,7 +1520,6 @@ static const struct cpu_defaults bxt_params = {
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
- .boost_iowait = true,
},
.funcs = {
.get_max = core_get_max_pstate,
@@ -1127,20 +1537,24 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
int max_perf = cpu->pstate.turbo_pstate;
int max_perf_adj;
int min_perf;
+ struct perf_limits *perf_limits = limits;
if (limits->no_turbo || limits->turbo_disabled)
max_perf = cpu->pstate.max_pstate;
+ if (per_cpu_limits)
+ perf_limits = cpu->perf_limits;
+
/*
* performance can be limited by user through sysfs, by cpufreq
* policy, or by cpu specific default values determined through
* experimentation.
*/
- max_perf_adj = fp_toint(max_perf * limits->max_perf);
+ max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf);
*max = clamp_t(int, max_perf_adj,
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
- min_perf = fp_toint(max_perf * limits->min_perf);
+ min_perf = fp_ext_toint(max_perf * perf_limits->min_perf);
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
@@ -1178,6 +1592,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
@@ -1316,15 +1732,19 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
}
-static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{
int max_perf, min_perf;
- update_turbo_state();
-
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
pstate = clamp_t(int, pstate, min_perf, max_perf);
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+ return pstate;
+}
+
+static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+{
+ pstate = intel_pstate_prepare_request(cpu, pstate);
if (pstate == cpu->pstate.current_pstate)
return;
@@ -1342,6 +1762,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
+ update_turbo_state();
+
intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
@@ -1362,7 +1784,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns;
- if (pid_params.boost_iowait) {
+ if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) {
if (flags & SCHED_CPUFREQ_IOWAIT) {
cpu->iowait_boost = int_tofp(1);
} else if (cpu->iowait_boost) {
@@ -1408,6 +1830,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params),
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params),
+ ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_params),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params),
{}
};
@@ -1424,11 +1847,26 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
- if (!all_cpu_data[cpunum])
- all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
- GFP_KERNEL);
- if (!all_cpu_data[cpunum])
- return -ENOMEM;
+ cpu = all_cpu_data[cpunum];
+
+ if (!cpu) {
+ unsigned int size = sizeof(struct cpudata);
+
+ if (per_cpu_limits)
+ size += sizeof(struct perf_limits);
+
+ cpu = kzalloc(size, GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ all_cpu_data[cpunum] = cpu;
+ if (per_cpu_limits)
+ cpu->perf_limits = (struct perf_limits *)(cpu + 1);
+
+ cpu->epp_default = -EINVAL;
+ cpu->epp_powersave = -EINVAL;
+ cpu->epp_saved = -EINVAL;
+ }
cpu = all_cpu_data[cpunum];
@@ -1487,18 +1925,57 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
limits->no_turbo = 0;
limits->turbo_disabled = 0;
limits->max_perf_pct = 100;
- limits->max_perf = int_tofp(1);
+ limits->max_perf = int_ext_tofp(1);
limits->min_perf_pct = 100;
- limits->min_perf = int_tofp(1);
+ limits->min_perf = int_ext_tofp(1);
limits->max_policy_pct = 100;
limits->max_sysfs_pct = 100;
limits->min_policy_pct = 0;
limits->min_sysfs_pct = 0;
}
+static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
+ struct perf_limits *limits)
+{
+
+ limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
+ policy->cpuinfo.max_freq);
+ limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+ if (policy->max == policy->min) {
+ limits->min_policy_pct = limits->max_policy_pct;
+ } else {
+ limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
+ policy->cpuinfo.max_freq);
+ limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
+ 0, 100);
+ }
+
+ /* Normalize user input to [min_policy_pct, max_policy_pct] */
+ limits->min_perf_pct = max(limits->min_policy_pct,
+ limits->min_sysfs_pct);
+ limits->min_perf_pct = min(limits->max_policy_pct,
+ limits->min_perf_pct);
+ limits->max_perf_pct = min(limits->max_policy_pct,
+ limits->max_sysfs_pct);
+ limits->max_perf_pct = max(limits->min_policy_pct,
+ limits->max_perf_pct);
+
+ /* Make sure min_perf_pct <= max_perf_pct */
+ limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+
+ limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
+ limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
+ limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
+ limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+
+ pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
+ limits->max_perf_pct, limits->min_perf_pct);
+}
+
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
+ struct perf_limits *perf_limits = NULL;
if (!policy->cpuinfo.max_freq)
return -ENODEV;
@@ -1516,41 +1993,31 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
policy->max = policy->cpuinfo.max_freq;
}
- if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
- limits = &performance_limits;
+ if (per_cpu_limits)
+ perf_limits = cpu->perf_limits;
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ if (!perf_limits) {
+ limits = &performance_limits;
+ perf_limits = limits;
+ }
if (policy->max >= policy->cpuinfo.max_freq) {
pr_debug("set performance\n");
- intel_pstate_set_performance_limits(limits);
+ intel_pstate_set_performance_limits(perf_limits);
goto out;
}
} else {
pr_debug("set powersave\n");
- limits = &powersave_limits;
- }
-
- limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
- limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
- limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
- policy->cpuinfo.max_freq);
- limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
-
- /* Normalize user input to [min_policy_pct, max_policy_pct] */
- limits->min_perf_pct = max(limits->min_policy_pct,
- limits->min_sysfs_pct);
- limits->min_perf_pct = min(limits->max_policy_pct,
- limits->min_perf_pct);
- limits->max_perf_pct = min(limits->max_policy_pct,
- limits->max_sysfs_pct);
- limits->max_perf_pct = max(limits->min_policy_pct,
- limits->max_perf_pct);
-
- /* Make sure min_perf_pct <= max_perf_pct */
- limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+ if (!perf_limits) {
+ limits = &powersave_limits;
+ perf_limits = limits;
+ }
- limits->min_perf = div_fp(limits->min_perf_pct, 100);
- limits->max_perf = div_fp(limits->max_perf_pct, 100);
- limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
+ }
+ intel_pstate_update_perf_limits(policy, perf_limits);
out:
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
/*
@@ -1565,6 +2032,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_hwp_set_policy(policy);
+ mutex_unlock(&intel_pstate_limits_lock);
+
return 0;
}
@@ -1579,22 +2048,32 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
return 0;
}
+static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
+{
+ intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
+}
+
static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
{
- int cpu_num = policy->cpu;
- struct cpudata *cpu = all_cpu_data[cpu_num];
+ pr_debug("CPU %d exiting\n", policy->cpu);
- pr_debug("CPU %d exiting\n", cpu_num);
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ if (hwp_active)
+ intel_pstate_hwp_save_state(policy);
+ else
+ intel_cpufreq_stop_cpu(policy);
+}
- intel_pstate_clear_update_util_hook(cpu_num);
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ intel_pstate_exit_perf_limits(policy);
- if (hwp_active)
- return;
+ policy->fast_switch_possible = false;
- intel_pstate_set_min_pstate(cpu);
+ return 0;
}
-static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
+static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
int rc;
@@ -1605,10 +2084,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
- if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ /*
+ * We need sane value in the cpu->perf_limits, so inherit from global
+ * perf_limits limits, which are seeded with values based on the
+ * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
+ */
+ if (per_cpu_limits)
+ memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));
policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -1621,24 +2103,35 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
intel_pstate_init_acpi_perf_limits(policy);
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
cpumask_set_cpu(policy->cpu, policy->cpus);
+ policy->fast_switch_possible = true;
+
return 0;
}
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
{
- intel_pstate_exit_perf_limits(policy);
+ int ret = __intel_pstate_cpu_init(policy);
+
+ if (ret)
+ return ret;
+
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
return 0;
}
-static struct cpufreq_driver intel_pstate_driver = {
+static struct cpufreq_driver intel_pstate = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
- .resume = intel_pstate_hwp_set_policy,
+ .suspend = intel_pstate_hwp_save_state,
+ .resume = intel_pstate_resume,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
@@ -1646,6 +2139,118 @@ static struct cpufreq_driver intel_pstate_driver = {
.name = "intel_pstate",
};
+static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ struct perf_limits *perf_limits = limits;
+
+ update_turbo_state();
+ policy->cpuinfo.max_freq = limits->turbo_disabled ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+
+ cpufreq_verify_within_cpu_limits(policy);
+
+ if (per_cpu_limits)
+ perf_limits = cpu->perf_limits;
+
+ intel_pstate_update_perf_limits(policy, perf_limits);
+
+ return 0;
+}
+
+static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
+ struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ unsigned int max_freq;
+
+ update_turbo_state();
+
+ max_freq = limits->no_turbo || limits->turbo_disabled ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+ policy->cpuinfo.max_freq = max_freq;
+ if (policy->max > max_freq)
+ policy->max = max_freq;
+
+ if (target_freq > max_freq)
+ target_freq = max_freq;
+
+ return target_freq;
+}
+
+static int intel_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ struct cpufreq_freqs freqs;
+ int target_pstate;
+
+ freqs.old = policy->cur;
+ freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ switch (relation) {
+ case CPUFREQ_RELATION_L:
+ target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
+ break;
+ case CPUFREQ_RELATION_H:
+ target_pstate = freqs.new / cpu->pstate.scaling;
+ break;
+ default:
+ target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
+ break;
+ }
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ if (target_pstate != cpu->pstate.current_pstate) {
+ cpu->pstate.current_pstate = target_pstate;
+ wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
+ pstate_funcs.get_val(cpu, target_pstate));
+ }
+ cpufreq_freq_transition_end(policy, &freqs, false);
+
+ return 0;
+}
+
+static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ int target_pstate;
+
+ target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+ target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+ intel_pstate_update_pstate(cpu, target_pstate);
+ return target_freq;
+}
+
+static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int ret = __intel_pstate_cpu_init(policy);
+
+ if (ret)
+ return ret;
+
+ policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
+ /* This reflects the intel_pstate_get_cpu_pstates() setting. */
+ policy->cur = policy->cpuinfo.min_freq;
+
+ return 0;
+}
+
+static struct cpufreq_driver intel_cpufreq = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = intel_cpufreq_verify_policy,
+ .target = intel_cpufreq_target,
+ .fast_switch = intel_cpufreq_fast_switch,
+ .init = intel_cpufreq_cpu_init,
+ .exit = intel_pstate_cpu_exit,
+ .stop_cpu = intel_cpufreq_stop_cpu,
+ .name = "intel_cpufreq",
+};
+
+static struct cpufreq_driver *intel_pstate_driver = &intel_pstate;
+
static int no_load __initdata;
static int no_hwp __initdata;
static int hwp_only __initdata;
@@ -1672,6 +2277,19 @@ static void __init copy_pid_params(struct pstate_adjust_policy *policy)
pid_params.setpoint = policy->setpoint;
}
+#ifdef CONFIG_ACPI
+static void intel_pstate_use_acpi_profile(void)
+{
+ if (acpi_gbl_FADT.preferred_profile == PM_MOBILE)
+ pstate_funcs.get_target_pstate =
+ get_target_pstate_use_cpu_load;
+}
+#else
+static void intel_pstate_use_acpi_profile(void)
+{
+}
+#endif
+
static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{
pstate_funcs.get_max = funcs->get_max;
@@ -1683,6 +2301,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_vid = funcs->get_vid;
pstate_funcs.get_target_pstate = funcs->get_target_pstate;
+ intel_pstate_use_acpi_profile();
}
#ifdef CONFIG_ACPI
@@ -1796,9 +2415,20 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
return false;
}
+
+static void intel_pstate_request_control_from_smm(void)
+{
+ /*
+ * It may be unsafe to request P-states control from SMM if _PPC support
+ * has not been enabled.
+ */
+ if (acpi_ppc)
+ acpi_processor_pstate_control();
+}
#else /* CONFIG_ACPI not enabled */
static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
+static inline void intel_pstate_request_control_from_smm(void) {}
#endif /* CONFIG_ACPI */
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
@@ -1818,6 +2448,7 @@ static int __init intel_pstate_init(void)
if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
copy_cpu_funcs(&core_params.funcs);
hwp_active++;
+ intel_pstate.attr = hwp_cpufreq_attrs;
goto hwp_cpu_matched;
}
@@ -1850,7 +2481,9 @@ hwp_cpu_matched:
if (!hwp_active && hwp_only)
goto out;
- rc = cpufreq_register_driver(&intel_pstate_driver);
+ intel_pstate_request_control_from_smm();
+
+ rc = cpufreq_register_driver(intel_pstate_driver);
if (rc)
goto out;
@@ -1865,7 +2498,9 @@ out:
get_online_cpus();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
- intel_pstate_clear_update_util_hook(cpu);
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_clear_update_util_hook(cpu);
+
kfree(all_cpu_data[cpu]);
}
}
@@ -1881,8 +2516,13 @@ static int __init intel_pstate_setup(char *str)
if (!str)
return -EINVAL;
- if (!strcmp(str, "disable"))
+ if (!strcmp(str, "disable")) {
no_load = 1;
+ } else if (!strcmp(str, "passive")) {
+ pr_info("Passive mode enabled\n");
+ intel_pstate_driver = &intel_cpufreq;
+ no_hwp = 1;
+ }
if (!strcmp(str, "no_hwp")) {
pr_info("HWP disabled\n");
no_hwp = 1;
@@ -1891,6 +2531,8 @@ static int __init intel_pstate_setup(char *str)
force_load = 1;
if (!strcmp(str, "hwp_only"))
hwp_only = 1;
+ if (!strcmp(str, "per_cpu_perf_limits"))
+ per_cpu_limits = true;
#ifdef CONFIG_ACPI
if (!strcmp(str, "support_acpi_ppc"))
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index d3ffde806629..37671b545880 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -42,6 +42,10 @@
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define LPSTATE_SHIFT 48
+#define GPSTATE_SHIFT 56
+#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF)
+#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF)
#define MAX_RAMP_DOWN_TIME 5120
/*
@@ -592,7 +596,8 @@ void gpstate_timer_handler(unsigned long data)
{
struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
struct global_pstate_info *gpstates = policy->driver_data;
- int gpstate_idx;
+ int gpstate_idx, lpstate_idx;
+ unsigned long val;
unsigned int time_diff = jiffies_to_msecs(jiffies)
- gpstates->last_sampled_time;
struct powernv_smp_call_data freq_data;
@@ -600,21 +605,37 @@ void gpstate_timer_handler(unsigned long data)
if (!spin_trylock(&gpstates->gpstate_lock))
return;
+ /*
+ * If PMCR was last updated was using fast_swtich then
+ * We may have wrong in gpstate->last_lpstate_idx
+ * value. Hence, read from PMCR to get correct data.
+ */
+ val = get_pmspr(SPRN_PMCR);
+ freq_data.gpstate_id = (s8)GET_GPSTATE(val);
+ freq_data.pstate_id = (s8)GET_LPSTATE(val);
+ if (freq_data.gpstate_id == freq_data.pstate_id) {
+ reset_gpstates(policy);
+ spin_unlock(&gpstates->gpstate_lock);
+ return;
+ }
+
gpstates->last_sampled_time += time_diff;
gpstates->elapsed_time += time_diff;
- freq_data.pstate_id = idx_to_pstate(gpstates->last_lpstate_idx);
- if ((gpstates->last_gpstate_idx == gpstates->last_lpstate_idx) ||
- (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
+ if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
gpstate_idx = pstate_to_idx(freq_data.pstate_id);
+ lpstate_idx = gpstate_idx;
reset_gpstates(policy);
gpstates->highest_lpstate_idx = gpstate_idx;
} else {
+ lpstate_idx = pstate_to_idx(freq_data.pstate_id);
gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
gpstates->highest_lpstate_idx,
- gpstates->last_lpstate_idx);
+ lpstate_idx);
}
-
+ freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
+ gpstates->last_gpstate_idx = gpstate_idx;
+ gpstates->last_lpstate_idx = lpstate_idx;
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
@@ -622,10 +643,6 @@ void gpstate_timer_handler(unsigned long data)
if (gpstate_idx != gpstates->last_lpstate_idx)
queue_gpstate_timer(gpstates);
- freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
- gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id);
- gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id);
-
spin_unlock(&gpstates->gpstate_lock);
/* Timer may get migrated to a different cpu on cpu hot unplug */
@@ -647,8 +664,14 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
if (unlikely(rebooting) && new_index != get_nominal_index())
return 0;
- if (!throttled)
+ if (!throttled) {
+ /* we don't want to be preempted while
+ * checking if the CPU frequency has been throttled
+ */
+ preempt_disable();
powernv_cpufreq_throttle_check(NULL);
+ preempt_enable();
+ }
cur_msec = jiffies_to_msecs(get_jiffies_64());
@@ -752,9 +775,12 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
spin_lock_init(&gpstates->gpstate_lock);
ret = cpufreq_table_validate_and_show(policy, powernv_freqs);
- if (ret < 0)
+ if (ret < 0) {
kfree(policy->driver_data);
+ return ret;
+ }
+ policy->fast_switch_possible = true;
return ret;
}
@@ -897,6 +923,20 @@ static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
del_timer_sync(&gpstates->timer);
}
+static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ int index;
+ struct powernv_smp_call_data freq_data;
+
+ index = cpufreq_table_find_index_dl(policy, target_freq);
+ freq_data.pstate_id = powernv_freqs[index].driver_data;
+ freq_data.gpstate_id = powernv_freqs[index].driver_data;
+ set_pstate(&freq_data);
+
+ return powernv_freqs[index].frequency;
+}
+
static struct cpufreq_driver powernv_cpufreq_driver = {
.name = "powernv-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
@@ -904,6 +944,7 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
.exit = powernv_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernv_cpufreq_target_index,
+ .fast_switch = powernv_fast_switch,
.get = powernv_cpufreq_get,
.stop_cpu = powernv_cpufreq_stop_cpu,
.attr = powernv_cpu_freq_attr,
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 7fe442ca38f4..0835a37a5f3a 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -22,7 +22,7 @@
#define POWERNV_THRESHOLD_LATENCY_NS 200000
-struct cpuidle_driver powernv_idle_driver = {
+static struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
.owner = THIS_MODULE,
};
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index c73207abb5a4..62810ff3b00f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -97,7 +97,23 @@ static int find_deepest_state(struct cpuidle_driver *drv,
return ret;
}
-#ifdef CONFIG_SUSPEND
+/**
+ * cpuidle_use_deepest_state - Set/clear governor override flag.
+ * @enable: New value of the flag.
+ *
+ * Set/unset the current CPU to use the deepest idle state (override governors
+ * going forward if set).
+ */
+void cpuidle_use_deepest_state(bool enable)
+{
+ struct cpuidle_device *dev;
+
+ preempt_disable();
+ dev = cpuidle_get_device();
+ dev->use_deepest_state = enable;
+ preempt_enable();
+}
+
/**
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
@@ -109,6 +125,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
return find_deepest_state(drv, dev, UINT_MAX, 0, false);
}
+#ifdef CONFIG_SUSPEND
static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index a5c111b67f37..ffca4fc0061d 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -38,6 +38,12 @@ static int init_state_node(struct cpuidle_state *idle_state,
* state enter function.
*/
idle_state->enter = match_id->data;
+ /*
+ * Since this is not a "coupled" state, it's safe to assume interrupts
+ * won't be enabled when it exits allowing the tick to be frozen
+ * safely. So enter() can be also enter_freeze() callback.
+ */
+ idle_state->enter_freeze = match_id->data;
err = of_property_read_u32(state_node, "wakeup-latency-us",
&idle_state->exit_latency);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index fb9f511cca23..4e78263e34a4 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -9,7 +9,6 @@
*/
#include <linux/mutex.h>
-#include <linux/module.h>
#include <linux/cpuidle.h>
#include "cpuidle.h"
@@ -53,14 +52,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
if (cpuidle_curr_governor) {
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_disable_device(dev);
- module_put(cpuidle_curr_governor->owner);
}
cpuidle_curr_governor = gov;
if (gov) {
- if (!try_module_get(cpuidle_curr_governor->owner))
- return -EINVAL;
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_enable_device(dev);
cpuidle_install_idle_handler();
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 63bd5a403e22..fe8f08948fcb 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
-#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/tick.h>
@@ -177,7 +176,6 @@ static struct cpuidle_governor ladder_governor = {
.enable = ladder_enable_device,
.select = ladder_select_state,
.reflect = ladder_reflect,
- .owner = THIS_MODULE,
};
/**
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 03d38c291de6..d9b5b9398a0f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,7 +19,6 @@
#include <linux/tick.h>
#include <linux/sched.h>
#include <linux/math64.h>
-#include <linux/module.h>
/*
* Please note when changing the tuning values:
@@ -484,7 +483,6 @@ static struct cpuidle_governor menu_governor = {
.enable = menu_enable_device,
.select = menu_select,
.reflect = menu_reflect,
- .owner = THIS_MODULE,
};
/**
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 832a2c3f01ff..c5adc8c9ac43 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -403,8 +403,10 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
/* state statistics */
for (i = 0; i < drv->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
- if (!kobj)
+ if (!kobj) {
+ ret = -ENOMEM;
goto error_state;
+ }
kobj->state = &drv->states[i];
kobj->state_usage = &device->states_usage[i];
init_completion(&kobj->kobj_unregister);
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index dae1e39139e9..d10b4ae5e0da 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -135,8 +135,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
&ctx->sa_out_dma_addr, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
- dma_free_coherent(ctx->dev->core_dev->device,
- ctx->sa_len * 4,
+ dma_free_coherent(ctx->dev->core_dev->device, size * 4,
ctx->sa_in, ctx->sa_in_dma_addr);
return -ENOMEM;
}
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
index 6c2951bb70b1..0ec04407b533 100644
--- a/drivers/crypto/atmel-aes-regs.h
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -28,6 +28,7 @@
#define AES_MR_OPMOD_CFB (0x3 << 12)
#define AES_MR_OPMOD_CTR (0x4 << 12)
#define AES_MR_OPMOD_GCM (0x5 << 12)
+#define AES_MR_OPMOD_XTS (0x6 << 12)
#define AES_MR_LOD (0x1 << 15)
#define AES_MR_CFBS_MASK (0x7 << 16)
#define AES_MR_CFBS_128b (0x0 << 16)
@@ -67,6 +68,9 @@
#define AES_CTRR 0x98
#define AES_GCMHR(x) (0x9c + ((x) * 0x04))
+#define AES_TWR(x) (0xc0 + ((x) * 0x04))
+#define AES_ALPHAR(x) (0xd0 + ((x) * 0x04))
+
#define AES_HW_VERSION 0xFC
#endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index e3d40a8dfffb..0e3d0d655b96 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <linux/platform_data/crypto-atmel.h>
#include <dt-bindings/dma/at91.h>
@@ -68,6 +69,7 @@
#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
+#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
AES_FLAGS_ENCRYPT | \
@@ -89,6 +91,7 @@ struct atmel_aes_caps {
bool has_cfb64;
bool has_ctr32;
bool has_gcm;
+ bool has_xts;
u32 max_burst_size;
};
@@ -135,6 +138,12 @@ struct atmel_aes_gcm_ctx {
atmel_aes_fn_t ghash_resume;
};
+struct atmel_aes_xts_ctx {
+ struct atmel_aes_base_ctx base;
+
+ u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
+};
+
struct atmel_aes_reqctx {
unsigned long mode;
};
@@ -282,6 +291,20 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
break;
+ case AES_TWR(0):
+ case AES_TWR(1):
+ case AES_TWR(2):
+ case AES_TWR(3):
+ snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
+ break;
+
+ case AES_ALPHAR(0):
+ case AES_ALPHAR(1):
+ case AES_ALPHAR(2):
+ case AES_ALPHAR(3):
+ snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
+ break;
+
default:
snprintf(tmp, sz, "0x%02x", offset);
break;
@@ -317,7 +340,7 @@ static inline void atmel_aes_write(struct atmel_aes_dev *dd,
char tmp[16];
dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
- atmel_aes_reg_name(offset, tmp));
+ atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
}
#endif /* VERBOSE_DEBUG */
@@ -453,15 +476,15 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
return err;
}
-static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv)
+static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv, const u32 *key, int keylen)
{
u32 valmr = 0;
/* MR register must be set before IV registers */
- if (dd->ctx->keylen == AES_KEYSIZE_128)
+ if (keylen == AES_KEYSIZE_128)
valmr |= AES_MR_KEYSIZE_128;
- else if (dd->ctx->keylen == AES_KEYSIZE_192)
+ else if (keylen == AES_KEYSIZE_192)
valmr |= AES_MR_KEYSIZE_192;
else
valmr |= AES_MR_KEYSIZE_256;
@@ -478,13 +501,19 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
atmel_aes_write(dd, AES_MR, valmr);
- atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
- SIZE_IN_WORDS(dd->ctx->keylen));
+ atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
atmel_aes_write_block(dd, AES_IVR(0), iv);
}
+static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv)
+
+{
+ atmel_aes_write_ctrl_key(dd, use_dma, iv,
+ dd->ctx->key, dd->ctx->keylen);
+}
/* CPU transfer */
@@ -1769,6 +1798,137 @@ static struct aead_alg aes_gcm_alg = {
};
+/* xts functions */
+
+static inline struct atmel_aes_xts_ctx *
+atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_xts_ctx, base);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
+
+static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ unsigned long flags;
+ int err;
+
+ atmel_aes_set_mode(dd, rctx);
+
+ err = atmel_aes_hw_init(dd);
+ if (err)
+ return atmel_aes_complete(dd, err);
+
+ /* Compute the tweak value from req->info with ecb(aes). */
+ flags = dd->flags;
+ dd->flags &= ~AES_FLAGS_MODE_MASK;
+ dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
+ atmel_aes_write_ctrl_key(dd, false, NULL,
+ ctx->key2, ctx->base.keylen);
+ dd->flags = flags;
+
+ atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+ return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+ u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
+ static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+ u8 *tweak_bytes = (u8 *)tweak;
+ int i;
+
+ /* Read the computed ciphered tweak value. */
+ atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
+ /*
+ * Hardware quirk:
+ * the order of the ciphered tweak bytes need to be reversed before
+ * writing them into the ODATARx registers.
+ */
+ for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
+ u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
+
+ tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
+ tweak_bytes[i] = tmp;
+ }
+
+ /* Process the data. */
+ atmel_aes_write_ctrl(dd, use_dma, NULL);
+ atmel_aes_write_block(dd, AES_TWR(0), tweak);
+ atmel_aes_write_block(dd, AES_ALPHAR(0), one);
+ if (use_dma)
+ return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+}
+
+static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int err;
+
+ err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+ if (err)
+ return err;
+
+ memcpy(ctx->base.key, key, keylen/2);
+ memcpy(ctx->key2, key + keylen/2, keylen/2);
+ ctx->base.keylen = keylen/2;
+
+ return 0;
+}
+
+static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS);
+}
+
+static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ ctx->base.start = atmel_aes_xts_start;
+
+ return 0;
+}
+
+static struct crypto_alg aes_xts_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "atmel-xts-aes",
+ .cra_priority = ATMEL_AES_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_xts_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_xts_setkey,
+ .encrypt = atmel_aes_xts_encrypt,
+ .decrypt = atmel_aes_xts_decrypt,
+ }
+};
+
+
/* Probe functions */
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
@@ -1877,6 +2037,9 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
+ if (dd->caps.has_xts)
+ crypto_unregister_alg(&aes_xts_alg);
+
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
@@ -1909,8 +2072,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_gcm_alg;
}
+ if (dd->caps.has_xts) {
+ err = crypto_register_alg(&aes_xts_alg);
+ if (err)
+ goto err_aes_xts_alg;
+ }
+
return 0;
+err_aes_xts_alg:
+ crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
crypto_unregister_alg(&aes_cfb64_alg);
err_aes_cfb64_alg:
@@ -1928,6 +2099,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 0;
dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
+ dd->caps.has_xts = 0;
dd->caps.max_burst_size = 1;
/* keep only major version number */
@@ -1937,6 +2109,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 1;
dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
+ dd->caps.has_xts = 1;
dd->caps.max_burst_size = 4;
break;
case 0x200:
@@ -2138,7 +2311,7 @@ aes_dd_err:
static int atmel_aes_remove(struct platform_device *pdev)
{
- static struct atmel_aes_dev *aes_dd;
+ struct atmel_aes_dev *aes_dd;
aes_dd = platform_get_drvdata(pdev);
if (!aes_dd)
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 64bf3024b680..bc0d3569f8d9 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -74,7 +74,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -89,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -101,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -113,7 +113,7 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -134,3 +134,6 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
help
Selecting this will enable printing of various debug
information in the CAAM driver.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 08bf5515ae8a..6554742f357e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -8,6 +8,7 @@ endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 156aad167cd6..662fe94cb2f8 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2,6 +2,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
*
* Based on talitos crypto API driver.
*
@@ -53,6 +54,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamalg_desc.h"
/*
* crypto alg
@@ -62,8 +64,6 @@
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
-/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define CAAM_MAX_IV_LENGTH 16
#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
@@ -71,37 +71,6 @@
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5)
-/* length of descriptors text */
-#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
-
-/* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
-
-#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
- 20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
- 15 * CAAM_CMD_SZ)
-
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -117,8 +86,7 @@
static void dbg_dump_sg(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
- struct scatterlist *sg, size_t tlen, bool ascii,
- bool may_sleep)
+ struct scatterlist *sg, size_t tlen, bool ascii)
{
struct scatterlist *it;
void *it_page;
@@ -137,7 +105,7 @@ static void dbg_dump_sg(const char *level, const char *prefix_str,
}
buf = it_page + it->offset;
- len = min(tlen, it->length);
+ len = min_t(size_t, tlen, it->length);
print_hex_dump(level, prefix_str, prefix_type, rowsize,
groupsize, buf, len, ascii);
tlen -= len;
@@ -152,7 +120,6 @@ static struct list_head alg_list;
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
- int alg_op;
bool rfc3686;
bool geniv;
};
@@ -163,52 +130,6 @@ struct caam_aead_alg {
bool registered;
};
-/* Set DK bit in class 1 operation if shared */
-static inline void append_dec_op1(u32 *desc, u32 type)
-{
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* DK bit is valid only for AES */
- if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- return;
- }
-
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
-}
-
-/*
- * For aead functions, read payload and write payload,
- * both of which are specified in req->src and req->dst
- */
-static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
-{
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
- KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
-}
-
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
-{
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
- KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
-}
-
/*
* per-session context
*/
@@ -220,147 +141,36 @@ struct caam_ctx {
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
- u32 class1_alg_type;
- u32 class2_alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
- unsigned int enckeylen;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
+ struct alginfo cdata;
unsigned int authsize;
};
-static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *nonce;
- unsigned int enckeylen = ctx->enckeylen;
-
- /*
- * RFC3686 specific:
- * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
- * | enckeylen = encryption key size + nonce size
- */
- if (is_rfc3686)
- enckeylen -= CTR_RFC3686_NONCE_SIZE;
-
- if (keys_fit_inline) {
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key_as_imm(desc, (void *)ctx->key +
- ctx->split_key_pad_len, enckeylen,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
- enckeylen);
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc,
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-}
-
-static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *key_jump_cmd;
-
- /* Note: Context registers are saved. */
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-}
-
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+ ctx->adata.keylen_pad;
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -368,84 +178,22 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- desc = ctx->sh_desc_dec;
+ if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH2 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /*
- * Insert a NOP here, since we need at least 4 instructions between
- * code patching the descriptor buffer and the location being patched.
- */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -453,12 +201,6 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -470,11 +212,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline;
- u32 geniv, moveiv;
u32 ctx1_iv_off = 0;
- u32 *desc;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ u32 *desc, *nonce = NULL;
+ u32 inl_mask;
+ unsigned int data_len[2];
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -482,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return 0;
/* NULL encryption / decryption */
- if (!ctx->enckeylen)
+ if (!ctx->cdata.keylen)
return aead_null_set_sh_desc(aead);
/*
@@ -497,8 +239,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* RFC3686 specific:
* CONTEXT1[255:128] = {NONCE, IV, COUNTER}
*/
- if (is_rfc3686)
+ if (is_rfc3686) {
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
if (alg->caam.geniv)
goto skip_enc;
@@ -507,54 +255,29 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_encrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- FIFOLDST_VLF);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ if (desc_inline_query(DESC_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
+ is_rfc3686, nonce, ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -562,79 +285,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_enc:
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (desc_inline_query(DESC_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (alg->caam.geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- if (alg->caam.geniv) {
- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ ctx->adata.key_dma = ctx->key_dma;
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, alg->caam.geniv, is_rfc3686,
+ nonce, ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -642,11 +322,6 @@ skip_enc:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
if (!alg->caam.geniv)
goto skip_givenc;
@@ -655,93 +330,30 @@ skip_enc:
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- if (is_rfc3686)
- goto copy_iv;
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-copy_iv:
- /* Copy IV to class 1 context */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
-
- /* Return to encryption */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- /* Copy iv from outfifo to class 2 fifo */
- moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
- NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Will write ivsize + cryptlen */
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Not need to reload iv */
- append_seq_fifo_load(desc, ivsize,
- FIFOLD_CLASS_SKIP);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Will read cryptlen */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -749,11 +361,6 @@ copy_iv:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_givenc:
return 0;
@@ -774,12 +381,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *zero_payload_jump_cmd,
- *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -787,82 +393,16 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* if assoclen + cryptlen is ZERO, skip to ICV write */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqinlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* if cryptlen is ZERO jump to zero-payload commands */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* zero-payload commands */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
-
- /* There is no input data */
- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
- /* write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -870,80 +410,21 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD |
- JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* jump to zero-payload command if cryptlen is zero */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* store encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* zero-payload command */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -951,11 +432,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -974,11 +450,11 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -986,62 +462,16 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1049,73 +479,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* Will write cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read encrypted data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1123,11 +501,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1147,12 +520,11 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
- u32 *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -1160,61 +532,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read and write assoclen + cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1222,77 +549,21 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* In-snoop assoclen + cryptlen data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1300,11 +571,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1320,19 +586,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
return 0;
}
-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
- u32 authkeylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, authkeylen,
- ctx->alg_op);
-}
-
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
struct crypto_authenc_keys keys;
@@ -1341,33 +597,25 @@ static int aead_setkey(struct crypto_aead *aead,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
- if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- goto badkey;
-
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1376,14 +624,14 @@ static int aead_setkey(struct crypto_aead *aead,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + keys.enckeylen, 1);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
- ctx->enckeylen = keys.enckeylen;
+ ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
}
@@ -1412,11 +660,11 @@ static int gcm_setkey(struct crypto_aead *aead,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
ret = gcm_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1444,9 +692,9 @@ static int rfc4106_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1455,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
ret = rfc4106_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1483,9 +731,9 @@ static int rfc4543_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1494,7 +742,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
ret = rfc4543_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1505,21 +753,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
const char *alg_name = crypto_tfm_alg_name(tfm);
struct device *jrdev = ctx->jrdev;
- int ret = 0;
- u32 *key_jump_cmd;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc;
- u8 *nonce;
- u32 geniv;
u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
+ memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -1542,60 +787,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- memcpy(ctx->key, key, keylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Load iv */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1603,61 +808,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* load IV */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
- else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1666,76 +821,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX |
- (crt->ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, crt->ivsize,
- LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1743,14 +832,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
- return ret;
+ return 0;
}
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -1758,8 +841,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- u32 *key_jump_cmd, *desc;
- __be64 sector_size = cpu_to_be64(512);
+ u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
@@ -1774,88 +856,23 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 keys only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
@@ -1864,31 +881,22 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
return 0;
}
/*
* aead_edesc - s/w-extended aead descriptor
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct aead_edesc {
- int assoc_nents;
int src_nents;
int dst_nents;
- dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -1900,9 +908,9 @@ struct aead_edesc {
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
@@ -2019,8 +1027,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2031,7 +1038,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc->src_nents > 1 ? 100 : ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2052,8 +1059,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2063,7 +1069,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2157,7 +1163,7 @@ static void init_gcm_job(struct aead_request *req,
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
/* Append Salt */
if (!generic_gcm)
- append_data(desc, ctx->key + ctx->enckeylen, 4);
+ append_data(desc, ctx->key + ctx->cdata.keylen, 4);
/* Append IV */
append_data(desc, req->iv, ivsize);
/* End of blank commands */
@@ -2172,7 +1178,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
@@ -2218,15 +1224,13 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2278,14 +1282,12 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2344,10 +1346,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/* Check if data are contiguous. */
all_contig = !src_nents;
- if (!all_contig) {
- src_nents = src_nents ? : 1;
+ if (!all_contig)
sec4_sg_len = src_nents;
- }
sec4_sg_len += dst_nents;
@@ -2556,11 +1556,9 @@ static int aead_decrypt(struct aead_request *req)
int ret = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1, may_sleep);
+ req->assoclen + req->cryptlen, 1);
#endif
/* allocate extended descriptor */
@@ -2618,16 +1616,33 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2647,6 +1662,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2673,6 +1690,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -2794,11 +1814,26 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
/*
@@ -2808,6 +1843,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2823,6 +1860,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2850,6 +1889,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->iv_dma = iv_dma;
@@ -2916,7 +1958,6 @@ struct caam_alg_template {
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
- u32 alg_op;
};
static struct caam_alg_template driver_algs[] = {
@@ -3101,7 +2142,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3123,7 +2163,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3145,7 +2184,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3167,7 +2205,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3189,7 +2226,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3211,7 +2247,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3233,7 +2268,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3256,7 +2290,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3279,7 +2312,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3302,7 +2334,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3325,7 +2356,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3348,7 +2378,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3371,7 +2400,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3394,7 +2422,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3417,7 +2444,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3440,7 +2466,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3463,7 +2488,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3486,7 +2510,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3509,7 +2532,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
}
},
{
@@ -3532,7 +2554,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
}
},
@@ -3556,7 +2577,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3580,7 +2600,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3604,7 +2623,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3628,7 +2646,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3652,7 +2669,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3676,7 +2692,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3700,7 +2715,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3724,7 +2738,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3748,7 +2761,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3772,7 +2784,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3795,7 +2806,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3818,7 +2828,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3841,7 +2850,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3864,7 +2872,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3887,7 +2894,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3910,7 +2916,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3933,7 +2938,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3956,7 +2960,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3979,7 +2982,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4002,7 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4025,7 +3026,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4048,7 +3048,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4073,7 +3072,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4098,7 +3096,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4124,7 +3121,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4149,7 +3145,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4175,7 +3170,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4200,7 +3194,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4226,7 +3219,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4251,7 +3243,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4277,7 +3268,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4302,7 +3292,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4328,7 +3317,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4353,7 +3341,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4375,9 +3362,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
}
/* copy descriptor header template value */
- ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
- ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
return 0;
}
@@ -4420,7 +3406,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
if (ctx->key_dma &&
!dma_mapping_error(ctx->jrdev, ctx->key_dma))
dma_unmap_single(ctx->jrdev, ctx->key_dma,
- ctx->enckeylen + ctx->split_key_pad_len,
+ ctx->cdata.keylen + ctx->adata.keylen_pad,
DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
@@ -4498,7 +3484,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
t_alg->caam.class1_alg_type = template->class1_alg_type;
t_alg->caam.class2_alg_type = template->class2_alg_type;
- t_alg->caam.alg_op = template->alg_op;
return t_alg;
}
@@ -4583,6 +3568,15 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /*
+ * Check support for AES modes not available
+ * on LP devices.
+ */
+ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+ if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_XTS)
+ continue;
+
t_alg = caam_alg_alloc(alg);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 000000000000..f3f48c10b9d6
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1306 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamalg_desc.h"
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ /* DK bit is valid only for AES */
+ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ return;
+ }
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
+
+/**
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
+
+static void init_sh_desc_key_aead(u32 * const desc,
+ struct alginfo * const cdata,
+ struct alginfo * const adata,
+ const bool is_rfc3686, u32 *nonce)
+{
+ u32 *key_jump_cmd;
+ unsigned int enckeylen = cdata->keylen;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /*
+ * RFC3686 specific:
+ * | key = {AUTH_KEY, ENC_KEY, NONCE}
+ * | enckeylen = encryption key size + nonce size
+ */
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, enckeylen,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686) {
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc,
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ FIFOLDST_VLF);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
+
+/**
+ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ if (geniv) {
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+ }
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
+
+/**
+ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with HW-generated initialization
+ * vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off)
+{
+ u32 geniv, moveiv;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ if (is_rfc3686)
+ goto copy_iv;
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+copy_iv:
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from outfifo to class 2 fifo */
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead givenc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
+
+/**
+ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
+ *zero_assoc_jump_cmd2;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD | JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ /* There is no input data */
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+ /* write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
+
+/**
+ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* store encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* zero-payload command */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
+
+/**
+ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
+
+/**
+ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read encrypted data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
+
+/**
+ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read and write assoclen + cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
+
+/**
+ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* In-snoop assoclen + cryptlen data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/**
+ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Load iv */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* load IV */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+
+/**
+ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
+ * with HW-generated initialization vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd, geniv;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
+ (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to memory */
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ if (ctx1_iv_off)
+ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 keys only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM descriptor support");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 000000000000..95551737333a
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,97 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#ifndef _CAAMALG_DESC_H_
+#define _CAAMALG_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in cdata.keylen */
+#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+
+#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 660dc206969f..e58639ea53b1 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -72,7 +72,7 @@
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
/* length of descriptors text */
-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
@@ -103,20 +103,15 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
- dma_addr_t sh_desc_finup_dma;
struct device *jrdev;
- u32 alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
dma_addr_t key_dma;
int ctx_len;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
};
/* ahash state */
@@ -222,89 +217,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
return 0;
}
-/* Common shared descriptor commands */
-static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-}
-
-/* Append key if it has been set */
-static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- u32 *key_jump_cmd;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- if (ctx->split_key_len) {
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_ahash(desc, ctx);
-
- set_jump_tgt_here(desc, key_jump_cmd);
- }
-
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
/*
- * For ahash read data from seqin following state->caam_ctx,
- * and write resulting class2 context to seqout, which may be state->caam_ctx
- * or req->result
+ * For ahash update, final and finup (import_ctx = true)
+ * import context, read and write to seqout
+ * For ahash firsts and digest (import_ctx = false)
+ * read and write to seqout
*/
-static inline void ahash_append_load_str(u32 *desc, int digestsize)
+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
+ struct caam_hash_ctx *ctx, bool import_ctx)
{
- /* Calculate remaining bytes to read */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
+ u32 op = ctx->adata.algtype;
+ u32 *skip_key_load;
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
-/*
- * For ahash update, final and finup, import context, read and write to seqout
- */
-static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize,
- struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* Append key if it has been set; ahash update excluded */
+ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
+ set_jump_tgt_here(desc, skip_key_load);
- /*
- * Load from buf and/or src and write to req->result or state->context
- */
- ahash_append_load_str(desc, digestsize);
-}
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
-/* For ahash firsts and digest, read and write to seqout */
-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize, struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
*/
- ahash_append_load_str(desc, digestsize);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
}
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
@@ -312,28 +272,11 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
- u32 have_key = 0;
u32 *desc;
- if (ctx->split_key_len)
- have_key = OP_ALG_AAI_HMAC_PRECOMP;
-
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
-
- /* Class 2 operation */
- append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
- OP_ALG_ENCRYPT);
-
- /* Load data and write to result or context */
- ahash_append_load_str(desc, ctx->ctx_len);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
@@ -348,10 +291,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
- ctx->ctx_len, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -367,10 +307,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
@@ -383,30 +320,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
desc_bytes(desc), 1);
#endif
- /* ahash_finup shared descriptor */
- desc = ctx->sh_desc_finup;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
- ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
- digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -424,14 +340,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 keylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, keylen,
- ctx->alg_op);
-}
-
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
@@ -467,7 +375,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Job descriptor to perform unkeyed hash on key_in */
- append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
@@ -511,8 +419,6 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
@@ -537,23 +443,12 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
-#ifdef DEBUG
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
-
- ret = gen_split_hash_key(ctx, key, keylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
+ CAAM_MAX_HASH_KEY_SIZE);
if (ret)
goto bad_free_key;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -563,14 +458,15 @@ static int ahash_setkey(struct crypto_ahash *ahash,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
+ ctx->adata.keylen_pad, 1);
#endif
ret = ahash_set_sh_desc(ahash);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
}
+
error_free_key:
kfree(hashed_key);
return ret;
@@ -639,8 +535,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -674,8 +569,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -709,8 +603,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -744,8 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -1078,7 +970,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -1683,7 +1575,6 @@ struct caam_hash_template {
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
- u32 alg_op;
};
/* ahash descriptors */
@@ -1709,7 +1600,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
@@ -1731,7 +1621,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
@@ -1753,7 +1642,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
@@ -1775,7 +1663,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
@@ -1797,7 +1684,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
}, {
.name = "md5",
.driver_name = "md5-caam",
@@ -1819,14 +1705,12 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
};
struct caam_hash_alg {
struct list_head entry;
int alg_type;
- int alg_op;
struct ahash_alg ahash_alg;
};
@@ -1859,10 +1743,10 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
/* copy descriptor header template value */
- ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -1893,10 +1777,6 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
desc_bytes(ctx->sh_desc_digest),
DMA_TO_DEVICE);
- if (ctx->sh_desc_finup_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
- desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
}
@@ -1956,7 +1836,6 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
- t_alg->alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 851015e652b8..32100c4851dd 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -395,7 +395,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -441,7 +441,7 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 9b92af2c7241..41398da3edf4 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -52,7 +52,7 @@
/* length of descriptors */
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
-#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
+#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
struct buf_data {
@@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
{
struct buf_data *bd;
- bd = (struct buf_data *)((char *)desc -
- offsetof(struct buf_data, hw_desc));
+ bd = container_of(desc, struct buf_data, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
init_sh_desc(desc, HDR_SHARE_SERIAL);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
/* Generate random bytes */
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
@@ -351,7 +347,7 @@ static int __init caam_rng_init(void)
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
err = -ENOMEM;
goto free_caam_alloc;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 72ff19658985..755109841cfd 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -330,8 +330,8 @@ static int caam_remove(struct platform_device *pdev)
clk_disable_unprepare(ctrlpriv->caam_ipg);
clk_disable_unprepare(ctrlpriv->caam_mem);
clk_disable_unprepare(ctrlpriv->caam_aclk);
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
return 0;
}
@@ -365,11 +365,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
*/
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
>> RTSDCTL_ENT_DLY_SHIFT;
- if (ent_delay <= val) {
- /* put RNG4 into run mode */
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
- return;
- }
+ if (ent_delay <= val)
+ goto start_rng;
val = rd_reg32(&r4tst->rtsdctl);
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
@@ -381,15 +378,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
/* read the control register */
val = rd_reg32(&r4tst->rtmctl);
+start_rng:
/*
* select raw sampling in both entropy shifter
- * and statistical checker
+ * and statistical checker; ; put RNG4 into run mode
*/
- clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
- /* put RNG4 into run mode */
- clrsetbits_32(&val, RTMCTL_PRGM, 0);
- /* write back the control register */
- wr_reg32(&r4tst->rtmctl, val);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
}
/**
@@ -482,14 +476,16 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_aclk = clk;
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM emi_slow clk: %d\n", ret);
- return ret;
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_emi_slow = clk;
}
- ctrlpriv->caam_emi_slow = clk;
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
if (ret < 0) {
@@ -510,11 +506,13 @@ static int caam_probe(struct platform_device *pdev)
goto disable_caam_mem;
}
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
- ret);
- goto disable_caam_aclk;
+ if (ctrlpriv->caam_emi_slow) {
+ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+ ret);
+ goto disable_caam_aclk;
+ }
}
/* Get configuration properties from device tree */
@@ -541,13 +539,13 @@ static int caam_probe(struct platform_device *pdev)
else
BLOCK_OFFSET = PG_SIZE_64K;
- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
- ctrlpriv->assure = (struct caam_assurance __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+ ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
);
- ctrlpriv->deco = (struct caam_deco __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->deco = (struct caam_deco __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * DECO_BLOCK_NUMBER
);
@@ -558,8 +556,9 @@ static int caam_probe(struct platform_device *pdev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
- MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
@@ -626,8 +625,8 @@ static int caam_probe(struct platform_device *pdev)
ring);
continue;
}
- ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl +
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
@@ -640,8 +639,8 @@ static int caam_probe(struct platform_device *pdev)
!!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
- ctrlpriv->qi = (struct caam_queue_if __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
);
/* This is all that's required to physically enable QI */
@@ -799,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
&caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
- ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IRUSR |
@@ -807,7 +806,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
- ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IRUSR |
@@ -815,7 +814,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
- ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IRUSR |
@@ -832,7 +831,8 @@ caam_remove:
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 513b6646bb36..2e6766a1573f 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -22,12 +22,6 @@
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
#define SEC4_SG_OFFSET_MASK 0x00001fff
-struct sec4_sg_entry {
- u64 ptr;
- u32 len;
- u32 bpid_offset;
-};
-
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
#define MAX_CAAM_DESCSIZE 64
@@ -90,8 +84,8 @@ struct sec4_sg_entry {
#define HDR_ZRO 0x00008000
/* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK 0x3f
#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
/* If shared descriptor header, 6-bit length */
#define HDR_DESCLEN_SHR_MASK 0x3f
@@ -121,10 +115,10 @@ struct sec4_sg_entry {
#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK 0x03
#define HDR_SD_SHARE_SHIFT 8
-#define HDR_JD_SHARE_MASK 0x07
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
@@ -235,7 +229,7 @@ struct sec4_sg_entry {
#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
@@ -400,7 +394,7 @@ struct sec4_sg_entry {
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
@@ -1107,8 +1101,8 @@ struct sec4_sg_entry {
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
-#define OP_ALG_TYPE_CLASS1 2
-#define OP_ALG_TYPE_CLASS2 4
+#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
@@ -1249,7 +1243,7 @@ struct sec4_sg_entry {
#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
#define OP_ALG_PKMODE_DST_REG_SHIFT 10
#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index a8cd8a78ec1f..b9c8d98ef826 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -33,38 +33,39 @@
extern bool caam_little_end;
-static inline int desc_len(u32 *desc)
+static inline int desc_len(u32 * const desc)
{
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
}
-static inline int desc_bytes(void *desc)
+static inline int desc_bytes(void * const desc)
{
return desc_len(desc) * CAAM_CMD_SZ;
}
-static inline u32 *desc_end(u32 *desc)
+static inline u32 *desc_end(u32 * const desc)
{
return desc + desc_len(desc);
}
-static inline void *sh_desc_pdb(u32 *desc)
+static inline void *sh_desc_pdb(u32 * const desc)
{
return desc + 1;
}
-static inline void init_desc(u32 *desc, u32 options)
+static inline void init_desc(u32 * const desc, u32 options)
{
*desc = cpu_to_caam32((options | HDR_ONE) + 1);
}
-static inline void init_sh_desc(u32 *desc, u32 options)
+static inline void init_sh_desc(u32 * const desc, u32 options)
{
PRINT_POS;
init_desc(desc, CMD_SHARED_DESC_HDR | options);
}
-static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
@@ -72,19 +73,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
options);
}
-static inline void init_job_desc(u32 *desc, u32 options)
+static inline void init_job_desc(u32 * const desc, u32 options)
{
init_desc(desc, CMD_DESC_HDR | options);
}
-static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_job_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
}
-static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
@@ -94,8 +96,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
CAAM_PTR_SZ / CAAM_CMD_SZ);
}
-static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
- u32 options)
+static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
+ int len, u32 options)
{
PRINT_POS;
init_job_desc(desc, HDR_SHARED | options |
@@ -103,7 +105,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 *desc, void *data, int len)
+static inline void append_data(u32 * const desc, void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -114,7 +116,7 @@ static inline void append_data(u32 *desc, void *data, int len)
(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
}
-static inline void append_cmd(u32 *desc, u32 command)
+static inline void append_cmd(u32 * const desc, u32 command)
{
u32 *cmd = desc_end(desc);
@@ -125,7 +127,7 @@ static inline void append_cmd(u32 *desc, u32 command)
#define append_u32 append_cmd
-static inline void append_u64(u32 *desc, u64 data)
+static inline void append_u64(u32 * const desc, u64 data)
{
u32 *offset = desc_end(desc);
@@ -142,14 +144,14 @@ static inline void append_u64(u32 *desc, u64 data)
}
/* Write command without affecting header, and return pointer to next word */
-static inline u32 *write_cmd(u32 *desc, u32 command)
+static inline u32 *write_cmd(u32 * const desc, u32 command)
{
*desc = cpu_to_caam32(command);
return desc + 1;
}
-static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
u32 command)
{
append_cmd(desc, command | len);
@@ -157,7 +159,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
}
/* Write length after pointer, rather than inside command */
-static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
unsigned int len, u32 command)
{
append_cmd(desc, command);
@@ -166,7 +168,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 *desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -174,7 +176,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
}
#define APPEND_CMD_RET(cmd, op) \
-static inline u32 *append_##cmd(u32 *desc, u32 options) \
+static inline u32 *append_##cmd(u32 * const desc, u32 options) \
{ \
u32 *cmd = desc_end(desc); \
PRINT_POS; \
@@ -184,13 +186,13 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
-static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
(desc_len(desc) - (jump_cmd - desc)));
}
-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
{
u32 val = caam32_to_cpu(*move_cmd);
@@ -200,7 +202,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
}
#define APPEND_CMD(cmd, op) \
-static inline void append_##cmd(u32 *desc, u32 options) \
+static inline void append_##cmd(u32 * const desc, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | options); \
@@ -208,7 +210,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
APPEND_CMD(operation, OPERATION)
#define APPEND_CMD_LEN(cmd, op) \
-static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+static inline void append_##cmd(u32 * const desc, unsigned int len, \
+ u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
@@ -220,8 +223,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
#define APPEND_CMD_PTR(cmd, op) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
- u32 options) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
@@ -231,8 +234,8 @@ APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
- u32 options)
+static inline void append_store(u32 * const desc, dma_addr_t ptr,
+ unsigned int len, u32 options)
{
u32 cmd_src;
@@ -249,7 +252,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
}
#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
-static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
+ dma_addr_t ptr, \
unsigned int len, \
u32 options) \
{ \
@@ -263,7 +267,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -273,7 +277,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
-static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -287,7 +291,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
* the size of its type
*/
#define APPEND_CMD_PTR_LEN(cmd, op, type) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
type len, u32 options) \
{ \
PRINT_POS; \
@@ -304,7 +308,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -315,7 +319,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
APPEND_CMD_PTR_TO_IMM2(key, KEY);
#define APPEND_CMD_RAW_IMM(cmd, op, type) \
-static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
@@ -426,3 +430,64 @@ do { \
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ * is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ * referenced by the descriptor
+ */
+struct alginfo {
+ u32 algtype;
+ unsigned int keylen;
+ unsigned int keylen_pad;
+ union {
+ dma_addr_t key_dma;
+ void *key_virt;
+ };
+ bool key_inline;
+};
+
+/**
+ * desc_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int desc_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len, unsigned int *data_len,
+ u32 *inl_mask, unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 33e41ea83fcc..79a0cc70717f 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -146,10 +146,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
strlen(rng_err_id_list[err_id])) {
/* RNG-only error */
err_str = rng_err_id_list[err_id];
- } else if (err_id < ARRAY_SIZE(err_id_list))
+ } else {
err_str = err_id_list[err_id];
- else
- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+ }
/*
* CCB ICV check failures are part of normal operation life;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c05074a5c..e2bcacc1a921 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
+ struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f9953d..c8604dfadbf5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev);
+ tasklet_kill(&jrp->irqtask);
+
/* Release interrupt */
free_irq(jrp->irq, dev);
@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/*
* Check the output ring for ready responses, kick
- * the threaded irq if jobs done.
+ * tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
- return IRQ_WAKE_THREAD;
+ preempt_disable();
+ tasklet_schedule(&jrp->irqtask);
+ preempt_enable();
+
+ return IRQ_HANDLED;
}
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = st_dev;
+ struct device *dev = (struct device *)devarg;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
-
- return IRQ_HANDLED;
}
/**
@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
/* Connect job ring interrupt handler. */
- error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
- caam_jr_threadirq, IRQF_SHARED,
- dev_name(dev), dev);
+ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -454,6 +460,7 @@ out_free_inpring:
out_free_irq:
free_irq(jrp->irq, dev);
out_kill_deq:
+ tasklet_kill(&jrp->irqtask);
return error;
}
@@ -489,7 +496,7 @@ static int caam_jr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
+ jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4ff9762..1bb2816a9b4d 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -10,6 +10,36 @@
#include "desc_constr.h"
#include "key_gen.h"
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
@@ -41,15 +71,29 @@ Split key generation-----------------------------------------------
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op)
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen)
{
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
int ret = -ENOMEM;
+ adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
+ adata->keylen_pad = split_key_pad_len(adata->algtype &
+ OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ adata->keylen, adata->keylen_pad);
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+#endif
+
+ if (adata->keylen_pad > max_keylen)
+ return -EINVAL;
+
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
@@ -63,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
goto out_free;
}
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
@@ -74,7 +118,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
- append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+ OP_ALG_AS_INIT);
/*
* do a FIFO_LOAD of zero, this will trigger the internal key expansion
@@ -87,7 +133,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- append_fifo_store(desc, dma_addr_out, split_key_len,
+ append_fifo_store(desc, dma_addr_out, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
#ifdef DEBUG
@@ -108,11 +154,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
- split_key_pad_len, 1);
+ adata->keylen_pad, 1);
#endif
}
- dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
DMA_FROM_DEVICE);
out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index c5588f6d8109..4628f389eb64 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@ struct split_key_result {
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op);
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen);
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 41cd5a356d05..6afa20c4a013 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -7,7 +7,11 @@
#include "regs.h"
-struct sec4_sg_entry;
+struct sec4_sg_entry {
+ u64 ptr;
+ u32 len;
+ u32 bpid_offset;
+};
/*
* convert single dma address to h/w link table format
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbacc6161..7bc09989e18a 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -404,10 +404,6 @@ static int ccp_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->sb_queue);
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index faf3cb3ddce2..e2ce8190ecc9 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -21,6 +21,12 @@
#include "ccp-dev.h"
+/* Allocate the requested number of contiguous LSB slots
+ * from the LSB bitmap. Look in the private range for this
+ * queue first; failing that, check the public area.
+ * If no space is available, wait around.
+ * Return: first slot number
+ */
static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
{
struct ccp_device *ccp;
@@ -50,7 +56,7 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
bitmap_set(ccp->lsbmap, start, count);
mutex_unlock(&ccp->sb_mutex);
- return start * LSB_ITEM_SIZE;
+ return start;
}
ccp->sb_avail = 0;
@@ -63,17 +69,18 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
}
}
+/* Free a number of LSB slots from the bitmap, starting at
+ * the indicated starting slot number.
+ */
static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
unsigned int count)
{
- int lsbno = start / LSB_SIZE;
-
if (!start)
return;
- if (cmd_q->lsb == lsbno) {
+ if (cmd_q->lsb == start) {
/* An entry from the private LSB */
- bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ bitmap_clear(cmd_q->lsbmap, start, count);
} else {
/* From the shared LSBs */
struct ccp_device *ccp = cmd_q->ccp;
@@ -396,7 +403,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
- CCP_RSA_SIZE(&function) = op->u.rsa.mod_size;
+ CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -411,10 +418,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
- /* Key (Exponent) is in external memory */
- CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
- CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
- CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+ /* Exponent is in LSB memory */
+ CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
+ CCP5_CMD_KEY_HI(&desc) = 0;
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
return ccp5_do_cmd(&desc, op->cmd_q);
}
@@ -751,9 +758,6 @@ static int ccp5_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queue used to suspend */
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Loading LSB map...\n");
/* Copy the private LSB mask to the public registers */
status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cafa633aae10..511ab042b5e7 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -41,7 +41,7 @@ struct ccp_tasklet_data {
};
/* Human-readable error strings */
-char *ccp_error_codes[] = {
+static char *ccp_error_codes[] = {
"",
"ERR 01: ILLEGAL_ENGINE",
"ERR 02: ILLEGAL_KEY_ID",
@@ -478,6 +478,10 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
ccp->sb_count = KSB_COUNT;
ccp->sb_start = 0;
+ /* Initialize the wait queues */
+ init_waitqueue_head(&ccp->sb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
ccp->ord = ccp_increment_unit_ordinal();
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index da5f4a678083..830f35e6005f 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -278,7 +278,7 @@ struct ccp_cmd_queue {
/* Private LSB that is assigned to this queue, or -1 if none.
* Bitmap for my private LSB, unused otherwise
*/
- unsigned int lsb;
+ int lsb;
DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
/* Queue processing thread */
@@ -515,7 +515,6 @@ struct ccp_op {
struct ccp_passthru_op passthru;
struct ccp_ecc_op ecc;
} u;
- struct ccp_mem key;
};
static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
@@ -541,23 +540,23 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
* word 7: upper 16 bits of key pointer; key memory type
*/
struct dword0 {
- __le32 soc:1;
- __le32 ioc:1;
- __le32 rsvd1:1;
- __le32 init:1;
- __le32 eom:1; /* AES/SHA only */
- __le32 function:15;
- __le32 engine:4;
- __le32 prot:1;
- __le32 rsvd2:7;
+ unsigned int soc:1;
+ unsigned int ioc:1;
+ unsigned int rsvd1:1;
+ unsigned int init:1;
+ unsigned int eom:1; /* AES/SHA only */
+ unsigned int function:15;
+ unsigned int engine:4;
+ unsigned int prot:1;
+ unsigned int rsvd2:7;
};
struct dword3 {
- __le32 src_hi:16;
- __le32 src_mem:2;
- __le32 lsb_cxt_id:8;
- __le32 rsvd1:5;
- __le32 fixed:1;
+ unsigned int src_hi:16;
+ unsigned int src_mem:2;
+ unsigned int lsb_cxt_id:8;
+ unsigned int rsvd1:5;
+ unsigned int fixed:1;
};
union dword4 {
@@ -567,18 +566,18 @@ union dword4 {
union dword5 {
struct {
- __le32 dst_hi:16;
- __le32 dst_mem:2;
- __le32 rsvd1:13;
- __le32 fixed:1;
+ unsigned int dst_hi:16;
+ unsigned int dst_mem:2;
+ unsigned int rsvd1:13;
+ unsigned int fixed:1;
} fields;
__le32 sha_len_hi;
};
struct dword7 {
- __le32 key_hi:16;
- __le32 key_mem:2;
- __le32 rsvd1:14;
+ unsigned int key_hi:16;
+ unsigned int key_mem:2;
+ unsigned int rsvd1:14;
};
struct ccp5_desc {
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 4ce67fb9a880..3e104f5aa0c2 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_AUTHENC
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index e4ddb921d7b3..2ed1e24b44a8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -54,6 +54,12 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include "t4fw_api.h"
@@ -62,6 +68,11 @@
#include "chcr_algo.h"
#include "chcr_crypto.h"
+static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->aeadctx;
+}
+
static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->ablkctx;
@@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
return ctx->crypto_ctx->hmacctx;
}
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->authenc;
+}
+
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
return ctx->dev->u_ctx;
@@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+ u8 temp[SHA512_DIGEST_SIZE];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int authsize = crypto_aead_authsize(tfm);
+ struct cpl_fw6_pld *fw6_pld;
+ int cmp = 0;
+
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+ (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+ cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+ } else {
+
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+ authsize, req->assoclen +
+ req->cryptlen - authsize);
+ cmp = memcmp(temp, (fw6_pld + 1), authsize);
+ }
+ if (cmp)
+ *err = -EBADMSG;
+ else
+ *err = 0;
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
*/
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
- int error_status)
+ int err)
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
@@ -109,17 +155,33 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
unsigned int digestsize, updated_digestsize;
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ ctx_req.req.aead_req = (struct aead_request *)req;
+ ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
+ dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+ ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
+ if (ctx_req.ctx.reqctx->skb) {
+ kfree_skb(ctx_req.ctx.reqctx->skb);
+ ctx_req.ctx.reqctx->skb = NULL;
+ }
+ if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(ctx_req.req.aead_req, input,
+ &err);
+ ctx_req.ctx.reqctx->verify = VERIFY_HW;
+ }
+ break;
+
case CRYPTO_ALG_TYPE_BLKCIPHER:
ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
ctx_req.ctx.ablk_ctx =
ablkcipher_request_ctx(ctx_req.req.ablk_req);
- if (!error_status) {
+ if (!err) {
fw6_pld = (struct cpl_fw6_pld *)input;
memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
AES_BLOCK_SIZE);
}
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
- ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+ ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.ablk_ctx->skb) {
kfree_skb(ctx_req.ctx.ablk_ctx->skb);
ctx_req.ctx.ablk_ctx->skb = NULL;
@@ -138,8 +200,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- if (ctx_req.ctx.ahash_ctx->skb)
+ if (ctx_req.ctx.ahash_ctx->skb) {
+ kfree_skb(ctx_req.ctx.ahash_ctx->skb);
ctx_req.ctx.ahash_ctx->skb = NULL;
+ }
if (ctx_req.ctx.ahash_ctx->result == 1) {
ctx_req.ctx.ahash_ctx->result = 0;
memcpy(ctx_req.req.ahash_req->result, input +
@@ -150,11 +214,9 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
sizeof(struct cpl_fw6_pld),
updated_digestsize);
}
- kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
- ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
break;
}
- return 0;
+ return err;
}
/*
@@ -178,40 +240,81 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
return flits + sgl_len(cnt);
}
-static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+static inline void get_aes_decrypt_key(unsigned char *dec_key,
+ const unsigned char *key,
+ unsigned int keylength)
+{
+ u32 temp;
+ u32 w_ring[MAX_NK];
+ int i, j, k;
+ u8 nr, nk;
+
+ switch (keylength) {
+ case AES_KEYLENGTH_128BIT:
+ nk = KEYLENGTH_4BYTES;
+ nr = NUMBER_OF_ROUNDS_10;
+ break;
+ case AES_KEYLENGTH_192BIT:
+ nk = KEYLENGTH_6BYTES;
+ nr = NUMBER_OF_ROUNDS_12;
+ break;
+ case AES_KEYLENGTH_256BIT:
+ nk = KEYLENGTH_8BYTES;
+ nr = NUMBER_OF_ROUNDS_14;
+ break;
+ default:
+ return;
+ }
+ for (i = 0; i < nk; i++)
+ w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+ i = 0;
+ temp = w_ring[nk - 1];
+ while (i + nk < (nr + 1) * 4) {
+ if (!(i % nk)) {
+ /* RotWord(temp) */
+ temp = (temp << 8) | (temp >> 24);
+ temp = aes_ks_subword(temp);
+ temp ^= round_constant[i / nk];
+ } else if (nk == 8 && (i % 4 == 0)) {
+ temp = aes_ks_subword(temp);
+ }
+ w_ring[i % nk] ^= temp;
+ temp = w_ring[i % nk];
+ i++;
+ }
+ i--;
+ for (k = 0, j = i % nk; k < nk; k++) {
+ *((u32 *)dec_key + k) = htonl(w_ring[j]);
+ j--;
+ if (j < 0)
+ j += nk;
+ }
+}
+
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
{
struct crypto_shash *base_hash = NULL;
- struct shash_desc *desc;
switch (ds) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha1", 0, 0);
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha224", 0, 0);
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha256", 0, 0);
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha384", 0, 0);
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha512", 0, 0);
break;
}
- if (IS_ERR(base_hash)) {
- pr_err("Can not allocate sha-generic algo.\n");
- return (void *)base_hash;
- }
- desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
- GFP_KERNEL);
- if (!desc)
- return ERR_PTR(-ENOMEM);
- desc->tfm = base_hash;
- desc->flags = crypto_shash_get_flags(base_hash);
- return desc;
+ return base_hash;
}
static int chcr_compute_partial_hash(struct shash_desc *desc,
@@ -279,31 +382,18 @@ static inline int is_hmac(struct crypto_tfm *tfm)
struct chcr_alg_template *chcr_crypto_alg =
container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
alg.hash);
- if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
- CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+ if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
return 1;
return 0;
}
-static inline unsigned int ch_nents(struct scatterlist *sg,
- unsigned int *total_size)
-{
- unsigned int nents;
-
- for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
- nents++;
- *total_size += sg->length;
- }
- return nents;
-}
-
static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg,
struct phys_sge_parm *sg_param)
{
struct phys_sge_pairs *to;
- unsigned int out_buf_size = sg_param->obsize;
- unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+ int out_buf_size = sg_param->obsize;
+ unsigned int nents = sg_param->nents, i, j = 0;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -321,25 +411,24 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
sizeof(struct cpl_rx_phys_dsgl));
for (i = 0; nents; to++) {
- for (j = i; (nents && (j < (8 + i))); j++, nents--) {
- to->len[j] = htons(sg->length);
+ for (j = 0; j < 8 && nents; j++, nents--) {
+ out_buf_size -= sg_dma_len(sg);
+ to->len[j] = htons(sg_dma_len(sg));
to->addr[j] = cpu_to_be64(sg_dma_address(sg));
- if (out_buf_size) {
- if (tot_len + sg_dma_len(sg) >= out_buf_size) {
- to->len[j] = htons(out_buf_size -
- tot_len);
- return;
- }
- tot_len += sg_dma_len(sg);
- }
sg = sg_next(sg);
}
}
+ if (out_buf_size) {
+ j--;
+ to--;
+ to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
+ }
}
-static inline unsigned
-int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg, struct phys_sge_parm *sg_param)
+static inline int map_writesg_phys_cpl(struct device *dev,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg,
+ struct phys_sge_parm *sg_param)
{
if (!sg || !sg_param->nents)
return 0;
@@ -353,6 +442,14 @@ int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
return 0;
}
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -362,8 +459,23 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
+static inline void write_buffer_to_skb(struct sk_buff *skb,
+ unsigned int *frags,
+ char *bfr,
+ u8 bfr_len)
+{
+ skb->len += bfr_len;
+ skb->data_len += bfr_len;
+ skb->truesize += bfr_len;
+ get_page(virt_to_page(bfr));
+ skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+ offset_in_page(bfr), bfr_len);
+ (*frags)++;
+}
+
+
static inline void
-write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
struct scatterlist *sg, unsigned int count)
{
struct page *spage;
@@ -372,8 +484,9 @@ write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
skb->len += count;
skb->data_len += count;
skb->truesize += count;
+
while (count > 0) {
- if (sg && (!(sg->length)))
+ if (!sg || (!(sg->length)))
break;
spage = sg_page(sg);
get_page(spage);
@@ -389,29 +502,25 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
struct _key_ctx *key_ctx)
{
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- get_aes_decrypt_key(key_ctx->key, ablkctx->key,
- ablkctx->enckey_len << 3);
- memset(key_ctx->key + ablkctx->enckey_len, 0,
- CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+ memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
} else {
memcpy(key_ctx->key,
ablkctx->key + (ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
- ablkctx->key, ablkctx->enckey_len << 2);
+ memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->rrkey, ablkctx->enckey_len >> 1);
}
return 0;
}
static inline void create_wreq(struct chcr_context *ctx,
- struct fw_crypto_lookaside_wr *wreq,
+ struct chcr_wr *chcr_req,
void *req, struct sk_buff *skb,
int kctx_len, int hash_sz,
- unsigned int phys_dsgl)
+ int is_iv,
+ unsigned int sc_len)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
- struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
unsigned int immdatalen = 0, nr_frags = 0;
@@ -423,27 +532,27 @@ static inline void create_wreq(struct chcr_context *ctx,
nr_frags = skb_shinfo(skb)->nr_frags;
}
- wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- (kctx_len >> 4));
- wreq->pld_size_hash_size =
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+ ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.pld_size_hash_size =
htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
- wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+ chcr_req->wreq.len16_pkd =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
(calc_tx_flits_ofld(skb) * 8), 16)));
- wreq->cookie = cpu_to_be64((uintptr_t)req);
- wreq->rx_chid_to_rx_q_id =
+ chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+ chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
- (hash_sz) ? IV_NOP : iv_loc);
+ is_iv ? iv_loc : IV_NOP);
- ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
- ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(*wreq)) >> 4)));
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+ 16) - ((sizeof(chcr_req->wreq)) >> 4)));
- sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
- sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
- ((hash_sz) ? DUMMY_BYTES :
- (sizeof(struct cpl_rx_phys_dsgl) +
- phys_dsgl)) + immdatalen);
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(chcr_req->key_ctx) +
+ kctx_len + sc_len + immdatalen);
}
/**
@@ -454,86 +563,83 @@ static inline void create_wreq(struct chcr_context *ctx,
* @op_type: encryption or decryption
*/
static struct sk_buff
-*create_cipher_wr(struct crypto_async_request *req_base,
- struct chcr_context *ctx, unsigned short qid,
+*create_cipher_wr(struct ablkcipher_request *req,
+ unsigned short qid,
unsigned short op_type)
{
- struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct phys_sge_parm sg_param;
- unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+ unsigned int frags = 0, transhdr_len, phys_dsgl;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
if (!req->info)
return ERR_PTR(-EINVAL);
- ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
- ablkctx->enc = op_type;
-
+ reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AES:Invalid Destination sg lists\n");
+ return ERR_PTR(-EINVAL);
+ }
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+ (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+ pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+ ablkctx->enckey_len, req->nbytes, ivsize);
return ERR_PTR(-EINVAL);
+ }
- phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+ phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
- kctx_len = sizeof(*key_ctx) +
- (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
-
- sec_cpl->pldlen = htonl(ivsize + req->nbytes);
- sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
- ivsize + 1, 0);
-
- sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
- 0, 0);
- sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
+
+ chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1, 1);
- sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+ 0, 0, ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
0, 1, phys_dsgl);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+ chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if (op_type == CHCR_DECRYPT_OP) {
- if (generate_copy_rrkey(ablkctx, key_ctx))
- goto map_fail1;
+ generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key, ablkctx->key,
+ ablkctx->enckey_len);
} else {
- memcpy(key_ctx->key, ablkctx->key +
+ memcpy(chcr_req->key_ctx.key, ablkctx->key +
(ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- memcpy(key_ctx->key +
+ memcpy(chcr_req->key_ctx.key +
(ablkctx->enckey_len >> 1),
ablkctx->key,
ablkctx->enckey_len >> 1);
}
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-
- memcpy(ablkctx->iv, req->info, ivsize);
- sg_init_table(&ablkctx->iv_sg, 1);
- sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
- sg_param.nents = ablkctx->dst_nents;
- sg_param.obsize = dst_bufsize;
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->nbytes;
sg_param.qid = qid;
sg_param.align = 1;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
@@ -541,10 +647,12 @@ static struct sk_buff
goto map_fail1;
skb_set_transport_header(skb, transhdr_len);
- write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
- write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
- create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
- req_ctx->skb = skb;
+ memcpy(reqctx->iv, req->info, ivsize);
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+ reqctx->skb = skb;
skb_get(skb);
return skb;
map_fail1:
@@ -557,15 +665,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
unsigned int ck_size, context_size;
u16 alignment = 0;
- if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
- goto badkey_err;
-
- memcpy(ablkctx->key, key, keylen);
- ablkctx->enckey_len = keylen;
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
@@ -576,7 +678,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
} else {
goto badkey_err;
}
-
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
keylen + alignment) >> 4;
@@ -592,16 +696,18 @@ badkey_err:
static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
{
- int ret = 0;
- struct sge_ofld_txq *q;
struct adapter *adap = netdev2adap(dev);
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
+ struct sge_uld_txq *txq;
+ int ret = 0;
local_bh_disable();
- q = &adap->sge.ofldtxq[idx];
- spin_lock(&q->sendq.lock);
- if (q->full)
+ txq = &txq_info->uldtxq[idx];
+ spin_lock(&txq->sendq.lock);
+ if (txq->full)
ret = -1;
- spin_unlock(&q->sendq.lock);
+ spin_unlock(&txq->sendq.lock);
local_bh_enable();
return ret;
}
@@ -610,7 +716,6 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -620,8 +725,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx,
- u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -637,7 +741,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -647,7 +750,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -674,11 +777,11 @@ static int chcr_device_init(struct chcr_context *ctx)
}
u_ctx = ULD_CTX(ctx);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
- ctx->dev->tx_channel_id = 0;
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_channel_id = rxq_idx;
+ ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
spin_unlock(&ctx->dev->lock_chcr_dev);
}
out:
@@ -727,50 +830,33 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline int
-write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
- struct sk_buff *skb, unsigned int *frags, char *bfr,
- u8 bfr_len)
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
{
- void *page_ptr = NULL;
-
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
- if (!page_ptr)
- return -ENOMEM;
- get_page(virt_to_page(page_ptr));
- req_ctx->dummy_payload_ptr = page_ptr;
- memcpy(page_ptr, bfr, bfr_len);
- skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
- offset_in_page(page_ptr), bfr_len);
- (*frags)++;
- return 0;
+ crypto_free_shash(base_hash);
}
/**
- * create_final_hash_wr - Create hash work request
+ * create_hash_wr - Create hash work request
* @req - Cipher req base
*/
-static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
- struct hash_wr_param *param)
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
+ struct hash_wr_param *param)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = sizeof(*key_ctx);
+ unsigned int kctx_len = 0;
u8 hash_size_in_response = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
- kctx_len += param->alg_prm.result_size + iopad_alignment;
+ kctx_len = param->alg_prm.result_size + iopad_alignment;
if (param->opad_needed)
kctx_len += param->alg_prm.result_size + iopad_alignment;
@@ -779,54 +865,54 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
- memset(wreq, 0, transhdr_len);
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
- sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+ chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
- sec_cpl->aadstart_cipherstop_hi =
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
- sec_cpl->cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
- sec_cpl->seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
- param->opad_needed, 0, 0);
+ param->opad_needed, 0);
- sec_cpl->ivgen_hdrlen =
+ chcr_req->sec_cpl.ivgen_hdrlen =
FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+ memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+ param->alg_prm.result_size);
if (param->opad_needed)
- memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
- CHCR_HASH_MAX_DIGEST_SIZE),
+ memcpy(chcr_req->key_ctx.key +
+ ((param->alg_prm.result_size <= 32) ? 32 :
+ CHCR_HASH_MAX_DIGEST_SIZE),
hmacctx->opad, param->alg_prm.result_size);
- key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+ chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
- (kctx_len >> 4));
- sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+ ((kctx_len +
+ sizeof(chcr_req->key_ctx)) >> 4));
+ chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
skb_set_transport_header(skb, transhdr_len);
if (param->bfr_len != 0)
- write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
- param->bfr_len);
+ write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
+ param->bfr_len);
if (param->sg_len != 0)
- write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+ write_sg_to_skb(skb, &frags, req->src, param->sg_len);
- create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
- 0);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
+ DUMMY_BYTES);
req_ctx->skb = skb;
skb_get(skb);
return skb;
@@ -852,34 +938,40 @@ static int chcr_ahash_update(struct ahash_request *req)
return -EBUSY;
}
- if (nbytes + req_ctx->bfr_len >= bs) {
- remainder = (nbytes + req_ctx->bfr_len) % bs;
- nbytes = nbytes + req_ctx->bfr_len - remainder;
+ if (nbytes + req_ctx->reqlen >= bs) {
+ remainder = (nbytes + req_ctx->reqlen) % bs;
+ nbytes = nbytes + req_ctx->reqlen - remainder;
} else {
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
- req_ctx->bfr_len, nbytes, 0);
- req_ctx->bfr_len += nbytes;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+ + req_ctx->reqlen, nbytes, 0);
+ req_ctx->reqlen += nbytes;
return 0;
}
params.opad_needed = 0;
params.more = 1;
params.last = 0;
- params.sg_len = nbytes - req_ctx->bfr_len;
- params.bfr_len = req_ctx->bfr_len;
+ params.sg_len = nbytes - req_ctx->reqlen;
+ params.bfr_len = req_ctx->reqlen;
params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
- req_ctx->bfr_len = remainder;
- if (remainder)
+ if (remainder) {
+ u8 *temp;
+ /* Swap buffers */
+ temp = req_ctx->reqbfr;
+ req_ctx->reqbfr = req_ctx->skbfr;
+ req_ctx->skbfr = temp;
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- req_ctx->bfr, remainder, req->nbytes -
+ req_ctx->reqbfr, remainder, req->nbytes -
remainder);
+ }
+ req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -915,10 +1007,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.sg_len = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 1;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if (req_ctx->reqlen == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -929,7 +1021,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
+ if (!skb)
+ return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -961,12 +1056,12 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.opad_needed = 0;
params.sg_len = req->nbytes;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->data_len += params.bfr_len + params.sg_len;
req_ctx->result = 1;
- if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if ((req_ctx->reqlen + req->nbytes) == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -977,9 +1072,10 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -1021,13 +1117,13 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->result = 1;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && req->nbytes == 0) {
- create_last_hash_block(req_ctx->bfr, bs, 0);
+ if (req->nbytes == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, 0);
params.more = 1;
params.bfr_len = bs;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
@@ -1042,12 +1138,12 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = out;
- state->bfr_len = req_ctx->bfr_len;
+ state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
- memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -1055,10 +1151,11 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
- req_ctx->bfr_len = state->bfr_len;
+ req_ctx->reqlen = state->reqlen;
req_ctx->data_len = state->data_len;
- req_ctx->dummy_payload_ptr = NULL;
- memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
+ memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
return 0;
@@ -1073,15 +1170,16 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
- /*
- * use the key to calculate the ipad and opad. ipad will sent with the
+ SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+ /* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- if (!hmacctx->desc)
- return -EINVAL;
+ shash->tfm = hmacctx->base_hash;
+ shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
- err = crypto_shash_digest(hmacctx->desc, key, keylen,
+ err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
if (err)
goto out;
@@ -1102,13 +1200,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+ err = chcr_compute_partial_hash(shash, hmacctx->ipad,
hmacctx->ipad, digestsize);
if (err)
goto out;
chcr_change_order(hmacctx->ipad, updated_digestsize);
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+ err = chcr_compute_partial_hash(shash, hmacctx->opad,
hmacctx->opad, digestsize);
if (err)
goto out;
@@ -1122,28 +1220,29 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- int status = 0;
unsigned short context_size = 0;
- if ((key_len == (AES_KEYSIZE_128 << 1)) ||
- (key_len == (AES_KEYSIZE_256 << 1))) {
- memcpy(ablkctx->key, key, key_len);
- ablkctx->enckey_len = key_len;
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
- FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
- CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
- CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
- CHCR_KEYCTX_NO_KEY, 1,
- 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
- } else {
+ if ((key_len != (AES_KEYSIZE_128 << 1)) &&
+ (key_len != (AES_KEYSIZE_256 << 1))) {
crypto_tfm_set_flags((struct crypto_tfm *)tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
- status = -EINVAL;
+ return -EINVAL;
+
}
- return status;
+
+ memcpy(ablkctx->key, key, key_len);
+ ablkctx->enckey_len = key_len;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+ return 0;
}
static int chcr_sha_init(struct ahash_request *areq)
@@ -1153,8 +1252,9 @@ static int chcr_sha_init(struct ahash_request *areq)
int digestsize = crypto_ahash_digestsize(tfm);
req_ctx->data_len = 0;
- req_ctx->dummy_payload_ptr = NULL;
- req_ctx->bfr_len = 0;
+ req_ctx->reqlen = 0;
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL;
req_ctx->result = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
@@ -1202,29 +1302,1184 @@ static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->desc = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->desc))
- return PTR_ERR(hmacctx->desc);
+ hmacctx->base_hash = chcr_alloc_shash(digestsize);
+ if (IS_ERR(hmacctx->base_hash))
+ return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_free_shash(struct shash_desc *desc)
-{
- crypto_free_shash(desc->tfm);
- kfree(desc);
-}
-
static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
{
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- if (hmacctx->desc) {
- chcr_free_shash(hmacctx->desc);
- hmacctx->desc = NULL;
+ if (hmacctx->base_hash) {
+ chcr_free_shash(hmacctx->base_hash);
+ hmacctx->base_hash = NULL;
+ }
+}
+
+static int chcr_copy_assoc(struct aead_request *req,
+ struct chcr_aead_ctx *ctx)
+{
+ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+
+ skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+ NULL);
+
+ return crypto_skcipher_encrypt(skreq);
+}
+
+static unsigned char get_hmac(unsigned int authsize)
+{
+ switch (authsize) {
+ case ICV_8:
+ return CHCR_SCMD_HMAC_CTRL_PL1;
+ case ICV_10:
+ return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ case ICV_12:
+ return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ }
+ return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+}
+
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
+ unsigned int kctx_len = 0;
+ unsigned short stop_offset = 0;
+ unsigned int assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ int null = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ null = 1;
+ assoclen = 0;
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AUTHENC:Invalid Destination sg entries\n");
+ goto err;
+ }
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+ - sizeof(chcr_req->key_ctx);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* LLD is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ /* Write WR */
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+ /*
+ * Input order is AAD,IV and Payload. where IV should be included as
+ * the part of authdata. All other fields should be filled according
+ * to the hardware spec
+ */
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
+ (ivsize ? (assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ assoclen ? 1 : 0, assoclen,
+ assoclen + ivsize + 1,
+ (stop_offset & 0x1F0) >> 4);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+ stop_offset & 0xF,
+ null ? 0 : assoclen + ivsize + 1,
+ stop_offset, stop_offset);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ actx->auth_mode, aeadctx->hmac_ctrl,
+ ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ if (op_type == CHCR_ENCRYPT_OP)
+ memcpy(chcr_req->key_ctx.key, aeadctx->key,
+ aeadctx->enckey_len);
+ else
+ memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+ aeadctx->enckey_len);
+
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
+ 4), actx->h_iopad, kctx_len -
+ (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ if (assoclen) {
+ /* AAD buffer in */
+ write_sg_to_skb(skb, &frags, req->src, assoclen);
+
+ }
+ write_buffer_to_skb(skb, &frags, req->iv, ivsize);
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+
+ return skb;
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
+ unsigned short offset)
+{
+ struct page *spage;
+ unsigned char *addr;
+
+ spage = sg_page(sg);
+ get_page(spage); /* so that it is not freed by NIC */
+#ifdef KMAP_ATOMIC_ARGS
+ addr = kmap_atomic(spage, KM_SOFTIRQ0);
+#else
+ addr = kmap_atomic(spage);
+#endif
+ memset(addr + sg->offset, 0, offset + 1);
+
+ kunmap_atomic(addr);
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (unsigned int)(1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static void generate_b0(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned short op_type)
+{
+ unsigned int l, lp, m;
+ int rc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ u8 *b0 = reqctx->scratch_pad;
+
+ m = crypto_aead_authsize(aead);
+
+ memcpy(b0, reqctx->iv, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+
+ /* set m, bits 3-5 */
+ *b0 |= (8 * ((m - 2) / 2));
+
+ /* set adata, bit 6, if associated data is used */
+ if (req->assoclen)
+ *b0 |= 64;
+ rc = set_msg_len(b0 + 16 - l,
+ (op_type == CHCR_DECRYPT_OP) ?
+ req->cryptlen - m : req->cryptlen, l);
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int rc = 0;
+
+ if (req->assoclen > T5_MAX_AAD_SIZE) {
+ pr_err("CCM: Unsupported AAD data. It should be < %d\n",
+ T5_MAX_AAD_SIZE);
+ return -EINVAL;
+ }
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ reqctx->iv[0] = 3;
+ memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ memset(reqctx->iv + 12, 0, 4);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen - 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 16);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen);
+ }
+ generate_b0(req, aeadctx, op_type);
+ /* zero the ctr value */
+ memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ unsigned int dst_size,
+ struct aead_request *req,
+ unsigned short op_type,
+ struct chcr_context *chcrctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+ unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+ unsigned int c_id = chcrctx->dev->tx_channel_id;
+ unsigned int ccm_xtra;
+ unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
+ unsigned int assoclen;
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen = req->assoclen - 8;
+ else
+ assoclen = req->assoclen;
+ ccm_xtra = CCM_B0_SIZE +
+ ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+ auth_offset = req->cryptlen ?
+ (assoclen + ivsize + 1 + ccm_xtra) : 0;
+ if (op_type == CHCR_DECRYPT_OP) {
+ if (crypto_aead_authsize(tfm) != req->cryptlen)
+ tag_offset = crypto_aead_authsize(tfm);
+ else
+ auth_offset = 0;
+ }
+
+
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+ 2, (ivsize ? (assoclen + 1) : 0) +
+ ccm_xtra);
+ sec_cpl->pldlen =
+ htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+ /* For CCM there wil be b0 always. So AAD start will be 1 always */
+ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ 1, assoclen + ccm_xtra, assoclen
+ + ivsize + 1 + ccm_xtra, 0);
+
+ sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+ auth_offset, tag_offset,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 :
+ crypto_aead_authsize(tfm));
+ sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+ cipher_mode, mac_mode, hmac_ctrl,
+ ivsize >> 1);
+
+ sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+ 1, dst_size);
+}
+
+int aead_ccm_validate_input(unsigned short op_type,
+ struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type)
+{
+ if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ if (crypto_ccm_check_iv(req->iv)) {
+ pr_err("CCM: IV check fails\n");
+ return -EINVAL;
+ }
+ } else {
+ if (req->assoclen != 16 && req->assoclen != 20) {
+ pr_err("RFC4309: Invalid AAD length %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+ }
+ if (aeadctx->enckey_len == 0) {
+ pr_err("CCM: Encryption key not set\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+unsigned int fill_aead_req_fields(struct sk_buff *skb,
+ struct aead_request *req,
+ struct scatterlist *src,
+ unsigned int ivsize,
+ struct chcr_aead_ctx *aeadctx)
+{
+ unsigned int frags = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ /* b0 and aad length(if available) */
+
+ write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
+ (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
+ if (req->assoclen) {
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ write_sg_to_skb(skb, &frags, req->src,
+ req->assoclen - 8);
+ else
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+ }
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ if (req->cryptlen)
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+
+ return frags;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned int sub_type;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ sub_type = get_aead_subtype(tfm);
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err) {
+ pr_err("AAD copy to destination buffer fails\n");
+ return ERR_PTR(err);
+ }
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("CCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+ goto err;
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+
+ if (!skb)
+ goto err;
+
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), aeadctx->key, aeadctx->enckey_len);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+ goto dstmap_fail;
+
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+ frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+dstmap_fail:
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned char tag_offset = 0;
+ unsigned int crypt_len = 0;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ unsigned char hmac_ctrl = get_hmac(authsize);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+
+ if (!req->cryptlen)
+ /* null-payload is not supported in the hardware.
+ * software is sending block size
+ */
+ crypt_len = AES_BLOCK_SIZE;
+ else
+ crypt_len = req->cryptlen;
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("GCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* NIC driver is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ req->assoclen -= 8;
+
+ tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+ ctx->dev->tx_channel_id, 2, (ivsize ?
+ (req->assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ req->assoclen ? 1 : 0, req->assoclen,
+ req->assoclen + ivsize + 1, 0);
+ if (req->cryptlen) {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+ tag_offset, tag_offset);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+ CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
+ ivsize >> 1);
+ } else {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ?
+ 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ 0, 0, ivsize >> 1);
+ }
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+ /* prepare a 16 byte iv */
+ /* S A L T | IV | 0x00000001 */
+ if (get_aead_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ memcpy(reqctx->iv, aeadctx->salt, 4);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 12);
+ }
+ *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+
+ if (req->cryptlen) {
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ } else {
+ aes_gcm_empty_pld_pad(req->dst, authsize - 1);
+ write_sg_to_skb(skb, &frags, dst, crypt_len);
+ }
+
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return skb;
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+ aeadctx->null = crypto_get_default_null_skcipher();
+ if (IS_ERR(aeadctx->null))
+ return PTR_ERR(aeadctx->null);
+ return chcr_device_init(ctx);
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+ crypto_put_default_null_skcipher();
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+ aeadctx->mayverify = VERIFY_HW;
+ return 0;
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+ /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+ * true for sha1. authsize == 12 condition should be before
+ * authsize == (maxauth >> 1)
+ */
+ if (authsize == ICV_4) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_6) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_10) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_12) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_14) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == (maxauth >> 1)) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == maxauth) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ } else {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ }
+ return 0;
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_13:
+ case ICV_15:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ break;
+ default:
+
+ crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_6:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_10:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
}
+ return 0;
}
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ unsigned char ck_size, mk_size;
+ int key_ctx_size = 0;
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
+ if (keylen == AES_KEYSIZE_128) {
+ mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+ key_ctx_size >> 4);
+ return 0;
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ if (keylen < 3) {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ keylen -= 3;
+ memcpy(aeadctx->salt, key + keylen, 3);
+ return chcr_aead_ccm_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+ struct blkcipher_desc h_desc;
+ struct scatterlist src[1];
+ unsigned int ck_size;
+ int ret = 0, key_ctx_size = 0;
+
+ if (get_aead_subtype(aead) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(aeadctx->salt, key + keylen, 4);
+ }
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ pr_err("GCM: Invalid key length %d", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+ /* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
+ * blkcipher It will go on key context
+ */
+ h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
+ if (IS_ERR(h_desc.tfm)) {
+ aeadctx->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+ h_desc.flags = 0;
+ ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
+ if (ret) {
+ aeadctx->enckey_len = 0;
+ goto out1;
+ }
+ memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+ sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
+ ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
+
+out1:
+ crypto_free_blkcipher(h_desc.tfm);
+out:
+ return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ /* it contains auth and cipher key both*/
+ struct crypto_authenc_keys keys;
+ unsigned int bs;
+ unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+ int err = 0, i, key_ctx_len = 0;
+ unsigned char ck_size = 0;
+ unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+ struct crypto_shash *base_hash = NULL;
+ struct algo_param param;
+ int align;
+ u8 *o_ptr = NULL;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+
+ if (get_alg_config(&param, max_authsize)) {
+ pr_err("chcr : Unsupported digest size\n");
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+
+ /* Copy only encryption key. We use authkey to generate h(ipad) and
+ * h(opad) so authkey is not needed again. authkeylen size have the
+ * size of the hash digest size.
+ */
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+
+ base_hash = chcr_alloc_shash(max_authsize);
+ if (IS_ERR(base_hash)) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
+ }
+ {
+ SHASH_DESC_ON_STACK(shash, base_hash);
+ shash->tfm = base_hash;
+ shash->flags = crypto_shash_get_flags(base_hash);
+ bs = crypto_shash_blocksize(base_hash);
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ o_ptr = actx->h_iopad + param.result_size + align;
+
+ if (keys.authkeylen > bs) {
+ err = crypto_shash_digest(shash, keys.authkey,
+ keys.authkeylen,
+ o_ptr);
+ if (err) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
+ }
+ keys.authkeylen = max_authsize;
+ } else
+ memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+ /* Compute the ipad-digest*/
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= IPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+ max_authsize))
+ goto out;
+ /* Compute the opad-digest */
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= OPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+ goto out;
+
+ /* convert the ipad and opad digest to network order */
+ chcr_change_order(actx->h_iopad, param.result_size);
+ chcr_change_order(o_ptr, param.result_size);
+ key_ctx_len = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+ 0, 1, key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+ chcr_free_shash(base_hash);
+
+ return 0;
+ }
+out:
+ aeadctx->enckey_len = 0;
+ if (base_hash)
+ chcr_free_shash(base_hash);
+ return -EINVAL;
+}
+
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct crypto_authenc_keys keys;
+
+ /* it contains auth and cipher key both*/
+ int key_ctx_len = 0;
+ unsigned char ck_size = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ key_ctx_len = sizeof(struct _key_ctx)
+ + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+ 0, key_ctx_len >> 4);
+ actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+ return 0;
+out:
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+}
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ reqctx->verify = VERIFY_HW;
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int size;
+
+ if (aeadctx->mayverify == VERIFY_SW) {
+ size = crypto_aead_maxauthsize(tfm);
+ reqctx->verify = VERIFY_SW;
+ } else {
+ size = 0;
+ reqctx->verify = VERIFY_HW;
+ }
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+
+ if (ctx && !ctx->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+ op_type);
+
+ if (IS_ERR(skb) || skb == NULL) {
+ pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+ return PTR_ERR(skb);
+ }
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -1232,7 +2487,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "cbc(aes)",
- .cra_driver_name = "cbc(aes-chcr)",
+ .cra_driver_name = "cbc-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1259,7 +2514,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "xts(aes)",
- .cra_driver_name = "xts(aes-chcr)",
+ .cra_driver_name = "xts-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1352,7 +2607,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
- .cra_driver_name = "hmac(sha1-chcr)",
+ .cra_driver_name = "hmac-sha1-chcr",
.cra_blocksize = SHA1_BLOCK_SIZE,
}
}
@@ -1364,7 +2619,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
- .cra_driver_name = "hmac(sha224-chcr)",
+ .cra_driver_name = "hmac-sha224-chcr",
.cra_blocksize = SHA224_BLOCK_SIZE,
}
}
@@ -1376,7 +2631,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
- .cra_driver_name = "hmac(sha256-chcr)",
+ .cra_driver_name = "hmac-sha256-chcr",
.cra_blocksize = SHA256_BLOCK_SIZE,
}
}
@@ -1388,7 +2643,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
- .cra_driver_name = "hmac(sha384-chcr)",
+ .cra_driver_name = "hmac-sha384-chcr",
.cra_blocksize = SHA384_BLOCK_SIZE,
}
}
@@ -1400,11 +2655,205 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
- .cra_driver_name = "hmac(sha512-chcr)",
+ .cra_driver_name = "hmac-sha512-chcr",
.cra_blocksize = SHA512_BLOCK_SIZE,
}
}
},
+ /* Add AEAD Algorithms */
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+ },
+ .ivsize = 12,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_gcm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_ccm_setkey,
+ .setauthsize = chcr_ccm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_rfc4309_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,cbc(aes))",
+ .cra_driver_name =
+ "authenc-digest_null-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
};
/*
@@ -1422,6 +2871,11 @@ static int chcr_unregister_alg(void)
crypto_unregister_alg(
&driver_algs[i].alg.crypto);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ if (driver_algs[i].is_registered)
+ crypto_unregister_aead(
+ &driver_algs[i].alg.aead);
+ break;
case CRYPTO_ALG_TYPE_AHASH:
if (driver_algs[i].is_registered)
crypto_unregister_ahash(
@@ -1456,6 +2910,19 @@ static int chcr_register_alg(void)
err = crypto_register_alg(&driver_algs[i].alg.crypto);
name = driver_algs[i].alg.crypto.cra_driver_name;
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ driver_algs[i].alg.aead.base.cra_priority =
+ CHCR_CRA_PRIORITY;
+ driver_algs[i].alg.aead.base.cra_flags =
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+ driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+ driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+ driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+ driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+ err = crypto_register_aead(&driver_algs[i].alg.aead);
+ name = driver_algs[i].alg.aead.base.cra_driver_name;
+ break;
case CRYPTO_ALG_TYPE_AHASH:
a_hash = &driver_algs[i].alg.hash;
a_hash->update = chcr_ahash_update;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index ec64fbcdeb49..3c7c51f7bedf 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -108,30 +108,24 @@
#define IPAD_DATA 0x36363636
#define OPAD_DATA 0x5c5c5c5c
-#define TRANSHDR_SIZE(alignedkctx_len)\
- (sizeof(struct ulptx_idata) +\
- sizeof(struct ulp_txpkt) +\
- sizeof(struct fw_crypto_lookaside_wr) +\
- sizeof(struct cpl_tx_sec_pdu) +\
- (alignedkctx_len))
-#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
- (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
+#define TRANSHDR_SIZE(kctx_len)\
+ (sizeof(struct chcr_wr) +\
+ kctx_len)
+#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
+ (TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
sizeof(struct cpl_rx_phys_dsgl))
-#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
- (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
+#define HASH_TRANSHDR_SIZE(kctx_len)\
+ (TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
-#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
- sizeof(struct ulp_txpkt) + \
- sizeof(struct ulptx_idata))
-#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \
+#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst) \
htonl( \
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
CPL_TX_SEC_PDU_RXCHID_V((id)) | \
CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
- CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
@@ -148,7 +142,7 @@
CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
-#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \
+#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size) \
htonl( \
SCMD_SEQ_NO_CTRL_V(0) | \
SCMD_STATUS_PRESENT_V(0) | \
@@ -159,7 +153,7 @@
SCMD_AUTH_MODE_V((amode)) | \
SCMD_HMAC_CTRL_V((opad)) | \
SCMD_IV_SIZE_V((size)) | \
- SCMD_NUM_IVS_V((nivs)))
+ SCMD_NUM_IVS_V(0))
#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
SCMD_ENB_DBGID_V(0) | \
@@ -264,13 +258,15 @@ enum {
* where they indicate the size of the integrity check value (ICV)
*/
enum {
- AES_CCM_ICV_4 = 4,
- AES_CCM_ICV_6 = 6,
- AES_CCM_ICV_8 = 8,
- AES_CCM_ICV_10 = 10,
- AES_CCM_ICV_12 = 12,
- AES_CCM_ICV_14 = 14,
- AES_CCM_ICV_16 = 16
+ ICV_4 = 4,
+ ICV_6 = 6,
+ ICV_8 = 8,
+ ICV_10 = 10,
+ ICV_12 = 12,
+ ICV_13 = 13,
+ ICV_14 = 14,
+ ICV_15 = 15,
+ ICV_16 = 16
};
struct hash_op_params {
@@ -394,7 +390,7 @@ static const u8 aes_sbox[256] = {
187, 22
};
-static u32 aes_ks_subword(const u32 w)
+static inline u32 aes_ks_subword(const u32 w)
{
u8 bytes[4];
@@ -412,60 +408,4 @@ static u32 round_constant[11] = {
0x1B000000, 0x36000000, 0x6C000000
};
-/* dec_key - OUTPUT - Reverse round key
- * key - INPUT - key
- * keylength - INPUT - length of the key in number of bits
- */
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
- const unsigned char *key,
- unsigned int keylength)
-{
- u32 temp;
- u32 w_ring[MAX_NK];
- int i, j, k = 0;
- u8 nr, nk;
-
- switch (keylength) {
- case AES_KEYLENGTH_128BIT:
- nk = KEYLENGTH_4BYTES;
- nr = NUMBER_OF_ROUNDS_10;
- break;
-
- case AES_KEYLENGTH_192BIT:
- nk = KEYLENGTH_6BYTES;
- nr = NUMBER_OF_ROUNDS_12;
- break;
- case AES_KEYLENGTH_256BIT:
- nk = KEYLENGTH_8BYTES;
- nr = NUMBER_OF_ROUNDS_14;
- break;
- default:
- return;
- }
- for (i = 0; i < nk; i++ )
- w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
-
- i = 0;
- temp = w_ring[nk - 1];
- while(i + nk < (nr + 1) * 4) {
- if(!(i % nk)) {
- /* RotWord(temp) */
- temp = (temp << 8) | (temp >> 24);
- temp = aes_ks_subword(temp);
- temp ^= round_constant[i / nk];
- }
- else if (nk == 8 && (i % 4 == 0))
- temp = aes_ks_subword(temp);
- w_ring[i % nk] ^= temp;
- temp = w_ring[i % nk];
- i++;
- }
- for (k = 0, j = i % nk; k < nk; k++) {
- *((u32 *)dec_key + k) = htonl(w_ring[j]);
- j--;
- if(j < 0)
- j += nk;
- }
-}
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index fb5f9bbfa09c..918da8e6e2d8 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -42,6 +42,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
.rxq_size = 1024,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
@@ -109,14 +110,12 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
if (ack_err_status) {
if (CHK_MAC_ERR_BIT(ack_err_status) ||
CHK_PAD_ERR_BIT(ack_err_status))
- error_status = -EINVAL;
+ error_status = -EBADMSG;
}
/* call completion callback with failure status */
if (req) {
- if (!chcr_handle_resp(req, input, error_status))
- req->complete(req, error_status);
- else
- return -EINVAL;
+ error_status = chcr_handle_resp(req, input, error_status);
+ req->complete(req, error_status);
} else {
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
@@ -126,7 +125,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
int chcr_send_wr(struct sk_buff *skb)
{
- return cxgb4_ofld_send(skb->dev, skb);
+ return cxgb4_crypto_send(skb->dev, skb);
}
static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 2a5c671a4232..c7088a4e0a49 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -52,13 +52,27 @@
#define MAC_ERROR_BIT 0
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
+#define MAX_SALT 4
struct uld_ctx;
+struct _key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[MAX_SALT];
+ __be64 reserverd;
+ unsigned char key[0];
+};
+
+struct chcr_wr {
+ struct fw_crypto_lookaside_wr wreq;
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
struct chcr_dev {
- /* Request submited to h/w and waiting for response. */
spinlock_t lock_chcr_dev;
- struct crypto_queue pending_queue;
struct uld_ctx *u_ctx;
unsigned char tx_channel_id;
};
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d7d75605da8b..d5af7d64a763 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -36,6 +36,14 @@
#ifndef __CHCR_CRYPTO_H__
#define __CHCR_CRYPTO_H__
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+#define CCM_B0_SIZE 16
+#define CCM_AAD_FIELD_SIZE 2
+#define T5_MAX_AAD_SIZE 512
+
+
/* Define following if h/w is not dropping the AAD and IV data before
* giving the processed data
*/
@@ -63,22 +71,36 @@
#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
-#define CHCR_SCMD_CIPHER_MODE_NOP 0
-#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
-#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
-#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_NOP 0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_AES_CCM 7
#define CHCR_SCMD_AUTH_MODE_NOP 0
#define CHCR_SCMD_AUTH_MODE_SHA1 1
#define CHCR_SCMD_AUTH_MODE_SHA224 2
#define CHCR_SCMD_AUTH_MODE_SHA256 3
+#define CHCR_SCMD_AUTH_MODE_GHASH 4
#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
+#define CHCR_SCMD_AUTH_MODE_CBCMAC 9
+#define CHCR_SCMD_AUTH_MODE_CMAC 10
#define CHCR_SCMD_HMAC_CTRL_NOP 0
#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
+#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366 2
+#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT 3
+#define CHCR_SCMD_HMAC_CTRL_PL1 4
+#define CHCR_SCMD_HMAC_CTRL_PL2 5
+#define CHCR_SCMD_HMAC_CTRL_PL3 6
+#define CHCR_SCMD_HMAC_CTRL_DIV2 7
+#define VERIFY_HW 0
+#define VERIFY_SW 1
#define CHCR_SCMD_IVGEN_CTRL_HW 0
#define CHCR_SCMD_IVGEN_CTRL_SW 1
@@ -106,39 +128,74 @@
#define IV_IMMEDIATE 1
#define IV_DSGL 2
+#define AEAD_H_SIZE 16
+
#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
-#define MAX_SALT 4
#define MAX_SCRATCH_PAD_SIZE 32
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
/* Aligned to 128 bit boundary */
-struct _key_ctx {
- __be32 ctx_hdr;
- u8 salt[MAX_SALT];
- __be64 reserverd;
- unsigned char key[0];
-};
struct ablk_ctx {
- u8 enc;
- unsigned int processed_len;
__be32 key_ctx_hdr;
unsigned int enckey_len;
- unsigned int dst_nents;
- struct scatterlist iv_sg;
u8 key[CHCR_AES_MAX_KEY_LEN];
- u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char ciph_mode;
+ u8 rrkey[AES_MAX_KEY_SIZE];
+};
+struct chcr_aead_reqctx {
+ struct sk_buff *skb;
+ short int dst_nents;
+ u16 verify;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
+};
+
+struct chcr_gcm_ctx {
+ u8 ghash_h[AEAD_H_SIZE];
};
+struct chcr_authenc_ctx {
+ u8 dec_rrkey[AES_MAX_KEY_SIZE];
+ u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
+ unsigned char auth_mode;
+};
+
+struct __aead_ctx {
+ struct chcr_gcm_ctx gcm[0];
+ struct chcr_authenc_ctx authenc[0];
+};
+
+
+
+struct chcr_aead_ctx {
+ __be32 key_ctx_hdr;
+ unsigned int enckey_len;
+ struct crypto_skcipher *null;
+ u8 salt[MAX_SALT];
+ u8 key[CHCR_AES_MAX_KEY_LEN];
+ u16 hmac_ctrl;
+ u16 mayverify;
+ struct __aead_ctx ctx[0];
+};
+
+
+
struct hmac_ctx {
- struct shash_desc *desc;
+ struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
@@ -146,6 +203,7 @@ struct hmac_ctx {
struct __crypto_ctx {
struct hmac_ctx hmacctx[0];
struct ablk_ctx ablkctx[0];
+ struct chcr_aead_ctx aeadctx[0];
};
struct chcr_context {
@@ -156,18 +214,22 @@ struct chcr_context {
struct chcr_ahash_req_ctx {
u32 result;
- char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
- u8 bfr_len;
+ u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 *reqbfr;
+ u8 *skbfr;
+ u8 reqlen;
/* DMA the partial hash in it */
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
- void *dummy_payload_ptr;
/* SKB which is being sent to the hardware for processing */
struct sk_buff *skb;
};
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
+ unsigned int dst_nents;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
};
struct chcr_alg_template {
@@ -176,16 +238,19 @@ struct chcr_alg_template {
union {
struct crypto_alg crypto;
struct ahash_alg hash;
+ struct aead_alg aead;
} alg;
};
struct chcr_req_ctx {
union {
struct ahash_request *ahash_req;
+ struct aead_request *aead_req;
struct ablkcipher_request *ablk_req;
} req;
union {
struct chcr_ahash_req_ctx *ahash_ctx;
+ struct chcr_aead_reqctx *reqctx;
struct chcr_blkcipher_req_ctx *ablk_ctx;
} ctx;
};
@@ -195,9 +260,15 @@ struct sge_opaque_hdr {
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
-typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
- struct chcr_context *ctx,
+typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid,
+ int size,
unsigned short op_type);
+static int chcr_aead_op(struct aead_request *req_base,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn);
+static inline int get_aead_subtype(struct crypto_aead *aead);
+
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 37dadb2a4feb..6e7a5c77a00a 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -375,10 +375,6 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
if (!dma->padding_pool)
return -ENOMEM;
- dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
- if (!dma->iv_pool)
- return -ENOMEM;
-
cesa->dma = dma;
return 0;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e423d33decd4..a768da7138a1 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -277,7 +277,7 @@ struct mv_cesa_op_ctx {
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
-#define CESA_TDMA_IV 3
+#define CESA_TDMA_RESULT 3
/**
* struct mv_cesa_tdma_desc - TDMA descriptor
@@ -393,7 +393,6 @@ struct mv_cesa_dev_dma {
struct dma_pool *op_pool;
struct dma_pool *cache_pool;
struct dma_pool *padding_pool;
- struct dma_pool *iv_pool;
};
/**
@@ -839,7 +838,7 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
memset(chain, 0, sizeof(*chain));
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags);
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d19dc9614e6e..098871a22a54 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -212,7 +212,8 @@ mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+ ivsize);
} else {
memcpy_fromio(ablkreq->info,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
@@ -373,8 +374,9 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
/* Add output data for IV */
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
- ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
+ ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 9f284682c091..317cf029c0cf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -168,12 +168,11 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_adjust_op(engine, &creq->op_tmpl);
memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
- digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
- for (i = 0; i < digsize / 4; i++)
- writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
-
- mv_cesa_adjust_op(engine, &creq->op_tmpl);
- memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
+ if (!sreq->offset) {
+ digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ for (i = 0; i < digsize / 4; i++)
+ writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
+ }
if (creq->cache_ptr)
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
@@ -312,24 +311,40 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
- if (creq->last_req) {
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
+ (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+ __le32 *data = NULL;
+
/*
- * Hardware's MD5 digest is in little endian format, but
- * SHA in big endian format
+ * Result is already in the correct endianess when the SA is
+ * used
*/
- if (creq->algo_le) {
- __le32 *result = (void *)ahashreq->result;
+ data = creq->base.chain.last->op->ctx.hash.hash;
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = cpu_to_le32(data[i]);
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_le32(creq->state[i]);
- } else {
- __be32 *result = (void *)ahashreq->result;
+ memcpy(ahashreq->result, data, digsize);
+ } else {
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = readl_relaxed(engine->regs +
+ CESA_IVDIG(i));
+ if (creq->last_req) {
+ /*
+ * Hardware's MD5 digest is in little endian format, but
+ * SHA in big endian format
+ */
+ if (creq->algo_le) {
+ __le32 *result = (void *)ahashreq->result;
+
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_le32(creq->state[i]);
+ } else {
+ __be32 *result = (void *)ahashreq->result;
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_be32(creq->state[i]);
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_be32(creq->state[i]);
+ }
}
}
@@ -504,6 +519,12 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
CESA_SA_DESC_CFG_LAST_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
+ ret = mv_cesa_dma_add_result_op(chain,
+ CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
return op;
}
@@ -564,6 +585,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
int ret;
+ u32 type;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
@@ -635,7 +657,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
goto err_free_tdma;
}
- if (op) {
+ /*
+ * If results are copied via DMA, this means that this
+ * request can be directly processed by the engine,
+ * without partial updates. So we can chain it at the
+ * DMA level with other requests.
+ */
+ type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
+
+ if (op && type != CESA_TDMA_RESULT) {
/* Add dummy desc to wait for crypto operation end */
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
if (ret)
@@ -648,8 +678,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
else
creq->cache_ptr = 0;
- basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
- CESA_TDMA_BREAK_CHAIN);
+ basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+
+ if (type != CESA_TDMA_RESULT)
+ basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
return 0;
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 9fd7a5fbaa1b..4416b88eca70 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -69,9 +69,6 @@ void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
- else if (type == CESA_TDMA_IV)
- dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
- le32_to_cpu(tdma->dst));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -209,29 +206,37 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
return new_tdma;
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags)
{
-
- struct mv_cesa_tdma_desc *tdma;
- u8 *iv;
- dma_addr_t dma_handle;
+ struct mv_cesa_tdma_desc *tdma, *op_desc;
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
- iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
- if (!iv)
- return -ENOMEM;
+ /* We re-use an existing op_desc object to retrieve the context
+ * and result instead of allocating a new one.
+ * There is at least one object of this type in a CESA crypto
+ * req, just pick the first one in the chain.
+ */
+ for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
+ u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
+
+ if (type == CESA_TDMA_OP)
+ break;
+ }
+
+ if (!op_desc)
+ return -EIO;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = src;
- tdma->dst = cpu_to_le32(dma_handle);
- tdma->data = iv;
+ tdma->dst = op_desc->src;
+ tdma->op = op_desc->op;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
- tdma->flags = flags | CESA_TDMA_IV;
+ tdma->flags = flags | CESA_TDMA_RESULT;
return 0;
}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 104e9ce9400a..451fa18c1c7b 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1073,7 +1073,7 @@ static int mv_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -1163,7 +1163,6 @@ err_irq:
err_thread:
kthread_stop(cp->queue_th);
err:
- kfree(cp);
cpg = NULL;
return ret;
}
@@ -1187,7 +1186,6 @@ static int mv_remove(struct platform_device *pdev)
clk_put(cp->clk);
}
- kfree(cp);
cpg = NULL;
return 0;
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 42f0f229f7f7..036057abb257 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -32,7 +32,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/types.h>
#include <asm/hvcall.h>
#include <asm/vio.h>
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 441e86b23571..b3869748cc6b 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -183,8 +183,8 @@ static inline void padlock_store_cword(struct cword *cword)
/*
* While the padlock instructions don't use FP/SSE registers, they
- * generate a spurious DNA fault when cr0.ts is '1'. These instructions
- * should be used only inside the irq_ts_save/restore() context
+ * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
+ * the kernel doesn't use CR0.TS.
*/
static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
@@ -298,24 +298,18 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
- ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
- ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -346,14 +340,12 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
@@ -361,7 +353,6 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
@@ -375,14 +366,12 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.decrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
@@ -390,7 +379,6 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
@@ -425,14 +413,12 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
@@ -442,7 +428,6 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.decrypt);
@@ -456,14 +441,12 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -472,8 +455,6 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
-
padlock_store_cword(&ctx->cword.encrypt);
return err;
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 8c5f90647b7a..bc72d20c32c3 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -89,7 +89,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
struct sha1_state state;
unsigned int space;
unsigned int leftover;
- int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -120,14 +119,11 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
memcpy(result, &state.state, SHA1_DIGEST_SIZE);
- /* prevent taking the spurious DNA fault with padlock. */
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
- irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
@@ -155,7 +151,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
struct sha256_state state;
unsigned int space;
unsigned int leftover;
- int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -186,14 +181,11 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
memcpy(result, &state.state, SHA256_DIGEST_SIZE);
- /* prevent taking the spurious DNA fault with padlock. */
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
- irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
@@ -312,7 +304,6 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
@@ -328,23 +319,19 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
memcpy(sctx->buffer + partial, data,
done + SHA1_BLOCK_SIZE);
src = sctx->buffer;
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst) \
: "a"((long)-1), "c"((unsigned long)1));
- irq_ts_restore(ts_state);
done += SHA1_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from the input data */
if (len - done >= SHA1_BLOCK_SIZE) {
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
- irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
src = data + done;
}
@@ -401,7 +388,6 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
@@ -417,23 +403,19 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
memcpy(sctx->buf + partial, data,
done + SHA256_BLOCK_SIZE);
src = sctx->buf;
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1), "c"((unsigned long)1));
- irq_ts_restore(ts_state);
done += SHA256_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from input data*/
if (len - done >= SHA256_BLOCK_SIZE) {
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / 64)));
- irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % 64);
src = data + done;
}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0c49956ee0ce..1d9ecd368b5b 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -390,7 +390,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
if (status & SAHARA_STATUS_MODE_BATCH)
dev_dbg(dev->device, " - Batch Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEDICATED)
- dev_dbg(dev->device, " - Decidated Mode.\n");
+ dev_dbg(dev->device, " - Dedicated Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEBUG)
dev_dbg(dev->device, " - Debug Mode.\n");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 0418a2f41dc0..0bba6a19d36a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -590,7 +590,7 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
if (v_lo & TALITOS_CCPSR_LO_MDTE)
dev_err(dev, "master data transfer error\n");
if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
- dev_err(dev, is_sec1 ? "pointeur not complete error\n"
+ dev_err(dev, is_sec1 ? "pointer not complete error\n"
: "s/g data length zero error\n");
if (v_lo & TALITOS_CCPSR_LO_FPZ)
dev_err(dev, is_sec1 ? "parity error\n"
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index de6e241b0866..55f7c392582f 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -10,10 +10,12 @@ endif
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
-$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl
- $(call cmd,perl)
+targets += aesp8-ppc.S ghashp8-ppc.S
+
+$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE
+ $(call if_changed,perl)
-$(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl
- $(call cmd,perl)
+$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE
+ $(call if_changed,perl)
-.PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S
+clean-files := aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 0e499bfca41c..26ec39ddf21f 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -270,8 +270,8 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
if (!dax_dev->alive)
return -ENXIO;
- /* prevent private / writable mappings from being established */
- if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
+ /* prevent private mappings from being established */
+ if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
dev_info(dev, "%s: %s: fail, attempted private mapping\n",
current->comm, func);
return -EINVAL;
@@ -328,7 +328,6 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- unsigned long vaddr = (unsigned long) vmf->virtual_address;
struct device *dev = &dax_dev->dev;
struct dax_region *dax_region;
int rc = VM_FAULT_SIGBUS;
@@ -353,7 +352,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- rc = vm_insert_mixed(vma, vaddr, pfn);
+ rc = vm_insert_mixed(vma, vmf->address, pfn);
if (rc == -ENOMEM)
return VM_FAULT_OOM;
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 4a15fa5df98b..73c6ce93a0d9 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -78,7 +78,9 @@ static int dax_pmem_probe(struct device *dev)
nsio = to_nd_namespace_io(&ndns->dev);
/* parse the 'pfn' info block via ->rw_bytes */
- devm_nsio_enable(dev, nsio);
+ rc = devm_nsio_enable(dev, nsio);
+ if (rc)
+ return rc;
altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
if (IS_ERR(altmap))
return PTR_ERR(altmap);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index bf3ea7603a58..a324801d6a66 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -850,7 +850,7 @@ err_out:
EXPORT_SYMBOL(devfreq_add_governor);
/**
- * devfreq_remove_device() - Remove devfreq feature from a device.
+ * devfreq_remove_governor() - Remove devfreq feature from a device.
* @governor: the devfreq governor to be removed
*/
int devfreq_remove_governor(struct devfreq_governor *governor)
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 49e712aca0c1..5c3e7b11e8a6 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -190,6 +190,7 @@ static const struct of_device_id exynos_nocp_id_match[] = {
{ .compatible = "samsung,exynos5420-nocp", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, exynos_nocp_id_match);
static struct regmap_config exynos_nocp_regmap_config = {
.reg_bits = 32,
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index f55cf0eb2a66..107eb91a9415 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
@@ -34,7 +33,6 @@ struct exynos_ppmu {
unsigned int num_events;
struct device *dev;
- struct mutex lock;
struct exynos_ppmu_data ppmu;
};
@@ -90,8 +88,6 @@ struct __exynos_ppmu_events {
PPMU_EVENT(d1-cpu),
PPMU_EVENT(d1-general),
PPMU_EVENT(d1-rt),
-
- { /* sentinel */ },
};
static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
@@ -351,6 +347,7 @@ static const struct of_device_id exynos_ppmu_id_match[] = {
},
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
{
@@ -463,7 +460,6 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- mutex_init(&info->lock);
info->dev = &pdev->dev;
/* Parse dt data to get resource */
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 43fcc5a7f515..22b113363ffc 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -188,6 +188,7 @@ static const struct of_device_id rockchip_dfi_id_match[] = {
{ .compatible = "rockchip,rk3399-dfi" },
{ },
};
+MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
static int rockchip_dfi_probe(struct platform_device *pdev)
{
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 29866f7e6d7e..a8ed7792ece2 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -35,7 +35,7 @@ struct exynos_bus {
unsigned int edev_count;
struct mutex lock;
- struct dev_pm_opp *curr_opp;
+ unsigned long curr_freq;
struct regulator *regulator;
struct clk *clk;
@@ -99,7 +99,7 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
{
struct exynos_bus *bus = dev_get_drvdata(dev);
struct dev_pm_opp *new_opp;
- unsigned long old_freq, new_freq, old_volt, new_volt, tol;
+ unsigned long old_freq, new_freq, new_volt, tol;
int ret = 0;
/* Get new opp-bus instance according to new bus clock */
@@ -113,8 +113,7 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
new_freq = dev_pm_opp_get_freq(new_opp);
new_volt = dev_pm_opp_get_voltage(new_opp);
- old_freq = dev_pm_opp_get_freq(bus->curr_opp);
- old_volt = dev_pm_opp_get_voltage(bus->curr_opp);
+ old_freq = bus->curr_freq;
rcu_read_unlock();
if (old_freq == new_freq)
@@ -146,7 +145,7 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
goto out;
}
}
- bus->curr_opp = new_opp;
+ bus->curr_freq = new_freq;
dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
old_freq/1000, new_freq/1000);
@@ -163,9 +162,7 @@ static int exynos_bus_get_dev_status(struct device *dev,
struct devfreq_event_data edata;
int ret;
- rcu_read_lock();
- stat->current_frequency = dev_pm_opp_get_freq(bus->curr_opp);
- rcu_read_unlock();
+ stat->current_frequency = bus->curr_freq;
ret = exynos_bus_get_event(bus, &edata);
if (ret < 0) {
@@ -226,7 +223,7 @@ static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
}
new_freq = dev_pm_opp_get_freq(new_opp);
- old_freq = dev_pm_opp_get_freq(bus->curr_opp);
+ old_freq = bus->curr_freq;
rcu_read_unlock();
if (old_freq == new_freq)
@@ -242,7 +239,7 @@ static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
}
*freq = new_freq;
- bus->curr_opp = new_opp;
+ bus->curr_freq = new_freq;
dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
old_freq/1000, new_freq/1000);
@@ -335,6 +332,7 @@ static int exynos_bus_parse_of(struct device_node *np,
struct exynos_bus *bus)
{
struct device *dev = bus->dev;
+ struct dev_pm_opp *opp;
unsigned long rate;
int ret;
@@ -352,22 +350,23 @@ static int exynos_bus_parse_of(struct device_node *np,
}
/* Get the freq and voltage from OPP table to scale the bus freq */
- rcu_read_lock();
ret = dev_pm_opp_of_add_table(dev);
if (ret < 0) {
dev_err(dev, "failed to get OPP table\n");
- rcu_read_unlock();
goto err_clk;
}
rate = clk_get_rate(bus->clk);
- bus->curr_opp = devfreq_recommended_opp(dev, &rate, 0);
- if (IS_ERR(bus->curr_opp)) {
+
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, &rate, 0);
+ if (IS_ERR(opp)) {
dev_err(dev, "failed to find dev_pm_opp\n");
rcu_read_unlock();
- ret = PTR_ERR(bus->curr_opp);
+ ret = PTR_ERR(opp);
goto err_opp;
}
+ bus->curr_freq = dev_pm_opp_get_freq(opp);
rcu_read_unlock();
return 0;
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index e24b73d66659..27d2f349b53c 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -80,7 +80,6 @@ struct rk3399_dmcfreq {
struct regulator *vdd_center;
unsigned long rate, target_rate;
unsigned long volt, target_volt;
- struct dev_pm_opp *curr_opp;
};
static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
@@ -102,9 +101,6 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
target_rate = dev_pm_opp_get_freq(opp);
target_volt = dev_pm_opp_get_voltage(opp);
- dmcfreq->rate = dev_pm_opp_get_freq(dmcfreq->curr_opp);
- dmcfreq->volt = dev_pm_opp_get_voltage(dmcfreq->curr_opp);
-
rcu_read_unlock();
if (dmcfreq->rate == target_rate)
@@ -165,7 +161,9 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
if (err)
dev_err(dev, "Cannot to set vol %lu uV\n", target_volt);
- dmcfreq->curr_opp = opp;
+ dmcfreq->rate = target_rate;
+ dmcfreq->volt = target_volt;
+
out:
mutex_unlock(&dmcfreq->lock);
return err;
@@ -414,7 +412,6 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
*/
if (dev_pm_opp_of_add_table(dev)) {
dev_err(dev, "Invalid operating-points in device tree.\n");
- rcu_read_unlock();
return -EINVAL;
}
@@ -431,12 +428,13 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
rcu_read_unlock();
return PTR_ERR(opp);
}
+ data->rate = dev_pm_opp_get_freq(opp);
+ data->volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
- data->curr_opp = opp;
rk3399_devfreq_dmc_profile.initial_freq = data->rate;
- data->devfreq = devfreq_add_device(dev,
+ data->devfreq = devm_devfreq_add_device(dev,
&rk3399_devfreq_dmc_profile,
"simple_ondemand",
&data->ondemand_data);
@@ -454,6 +452,7 @@ static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
{ .compatible = "rockchip,rk3399-dmc" },
{ },
};
+MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
static struct platform_driver rk3399_dmcfreq_driver = {
.probe = rk3399_dmcfreq_probe,
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 2585821b24ab..ed3b785bae37 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -7,7 +7,7 @@ config SYNC_FILE
select DMA_SHARED_BUFFER
---help---
The Sync File Framework adds explicit syncronization via
- userspace. It enables send/receive 'struct fence' objects to/from
+ userspace. It enables send/receive 'struct dma_fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 210a10bfad2b..c33bf8863147 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,3 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index cf04d249a6a4..e72e64484131 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -25,7 +25,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
@@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
return base + offset;
}
-static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
unsigned long flags;
@@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
- struct fence *fence_excl;
+ struct dma_fence *fence_excl;
unsigned long events;
unsigned shared_count, seq;
@@ -187,20 +187,20 @@ retry:
spin_unlock_irq(&dmabuf->poll.lock);
if (events & pevents) {
- if (!fence_get_rcu(fence_excl)) {
+ if (!dma_fence_get_rcu(fence_excl)) {
/* force a recheck */
events &= ~pevents;
dma_buf_poll_cb(NULL, &dcb->cb);
- } else if (!fence_add_callback(fence_excl, &dcb->cb,
- dma_buf_poll_cb)) {
+ } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
+ dma_buf_poll_cb)) {
events &= ~pevents;
- fence_put(fence_excl);
+ dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
- fence_put(fence_excl);
+ dma_fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb);
}
}
@@ -222,9 +222,9 @@ retry:
goto out;
for (i = 0; i < shared_count; ++i) {
- struct fence *fence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
- if (!fence_get_rcu(fence)) {
+ if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
@@ -235,13 +235,13 @@ retry:
dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
- if (!fence_add_callback(fence, &dcb->cb,
- dma_buf_poll_cb)) {
- fence_put(fence);
+ if (!dma_fence_add_callback(fence, &dcb->cb,
+ dma_buf_poll_cb)) {
+ dma_fence_put(fence);
events &= ~POLLOUT;
break;
}
- fence_put(fence);
+ dma_fence_put(fence);
}
/* No callback queued, wake up any additional waiters. */
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/dma-fence-array.c
index f1989fcaf354..67eb7c8fb88c 100644
--- a/drivers/dma-buf/fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -1,5 +1,5 @@
/*
- * fence-array: aggregate fences to be waited together
+ * dma-fence-array: aggregate fences to be waited together
*
* Copyright (C) 2016 Collabora Ltd
* Copyright (C) 2016 Advanced Micro Devices, Inc.
@@ -19,35 +19,34 @@
#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
-
-static const char *fence_array_get_driver_name(struct fence *fence)
+static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
{
- return "fence_array";
+ return "dma_fence_array";
}
-static const char *fence_array_get_timeline_name(struct fence *fence)
+static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
+static void dma_fence_array_cb_func(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
- struct fence_array_cb *array_cb =
- container_of(cb, struct fence_array_cb, cb);
- struct fence_array *array = array_cb->array;
+ struct dma_fence_array_cb *array_cb =
+ container_of(cb, struct dma_fence_array_cb, cb);
+ struct dma_fence_array *array = array_cb->array;
if (atomic_dec_and_test(&array->num_pending))
- fence_signal(&array->base);
- fence_put(&array->base);
+ dma_fence_signal(&array->base);
+ dma_fence_put(&array->base);
}
-static bool fence_array_enable_signaling(struct fence *fence)
+static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
- struct fence_array_cb *cb = (void *)(&array[1]);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ struct dma_fence_array_cb *cb = (void *)(&array[1]);
unsigned i;
for (i = 0; i < array->num_fences; ++i) {
@@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence)
* until we signal the array as complete (but that is now
* insufficient).
*/
- fence_get(&array->base);
- if (fence_add_callback(array->fences[i], &cb[i].cb,
- fence_array_cb_func)) {
- fence_put(&array->base);
+ dma_fence_get(&array->base);
+ if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
+ dma_fence_array_cb_func)) {
+ dma_fence_put(&array->base);
if (atomic_dec_and_test(&array->num_pending))
return false;
}
@@ -72,69 +71,71 @@ static bool fence_array_enable_signaling(struct fence *fence)
return true;
}
-static bool fence_array_signaled(struct fence *fence)
+static bool dma_fence_array_signaled(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
return atomic_read(&array->num_pending) <= 0;
}
-static void fence_array_release(struct fence *fence)
+static void dma_fence_array_release(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
unsigned i;
for (i = 0; i < array->num_fences; ++i)
- fence_put(array->fences[i]);
+ dma_fence_put(array->fences[i]);
kfree(array->fences);
- fence_free(fence);
+ dma_fence_free(fence);
}
-const struct fence_ops fence_array_ops = {
- .get_driver_name = fence_array_get_driver_name,
- .get_timeline_name = fence_array_get_timeline_name,
- .enable_signaling = fence_array_enable_signaling,
- .signaled = fence_array_signaled,
- .wait = fence_default_wait,
- .release = fence_array_release,
+const struct dma_fence_ops dma_fence_array_ops = {
+ .get_driver_name = dma_fence_array_get_driver_name,
+ .get_timeline_name = dma_fence_array_get_timeline_name,
+ .enable_signaling = dma_fence_array_enable_signaling,
+ .signaled = dma_fence_array_signaled,
+ .wait = dma_fence_default_wait,
+ .release = dma_fence_array_release,
};
-EXPORT_SYMBOL(fence_array_ops);
+EXPORT_SYMBOL(dma_fence_array_ops);
/**
- * fence_array_create - Create a custom fence array
+ * dma_fence_array_create - Create a custom fence array
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
- * Allocate a fence_array object and initialize the base fence with fence_init().
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
* In case of error it returns NULL.
*
* The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
- * array is taken and fence_put() is used on each fence on release.
+ * array is taken and dma_fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
*/
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any)
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
{
- struct fence_array *array;
+ struct dma_fence_array *array;
size_t size = sizeof(*array);
/* Allocate the callback structures behind the array. */
- size += num_fences * sizeof(struct fence_array_cb);
+ size += num_fences * sizeof(struct dma_fence_array_cb);
array = kzalloc(size, GFP_KERNEL);
if (!array)
return NULL;
spin_lock_init(&array->lock);
- fence_init(&array->base, &fence_array_ops, &array->lock,
- context, seqno);
+ dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
+ context, seqno);
array->num_fences = num_fences;
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
@@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences,
return array;
}
-EXPORT_SYMBOL(fence_array_create);
+EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/dma-fence.c
index 4d51f9e83fa8..0212af7997d9 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -21,13 +21,13 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/atomic.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
-EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
-EXPORT_TRACEPOINT_SYMBOL(fence_emit);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
/*
* fence context counter: each execution context should have its own
@@ -35,39 +35,41 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
/**
- * fence_context_alloc - allocate an array of fence contexts
+ * dma_fence_context_alloc - allocate an array of fence contexts
* @num: [in] amount of contexts to allocate
*
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
-u64 fence_context_alloc(unsigned num)
+u64 dma_fence_context_alloc(unsigned num)
{
BUG_ON(!num);
- return atomic64_add_return(num, &fence_context_counter) - num;
+ return atomic64_add_return(num, &dma_fence_context_counter) - num;
}
-EXPORT_SYMBOL(fence_context_alloc);
+EXPORT_SYMBOL(dma_fence_context_alloc);
/**
- * fence_signal_locked - signal completion of a fence
+ * dma_fence_signal_locked - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*
- * Unlike fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal, this function must be called with fence->lock held.
*/
-int fence_signal_locked(struct fence *fence)
+int dma_fence_signal_locked(struct dma_fence *fence)
{
- struct fence_cb *cur, *tmp;
+ struct dma_fence_cb *cur, *tmp;
int ret = 0;
+ lockdep_assert_held(fence->lock);
+
if (WARN_ON(!fence))
return -EINVAL;
@@ -76,15 +78,15 @@ int fence_signal_locked(struct fence *fence)
smp_mb__before_atomic();
}
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
/*
- * we might have raced with the unlocked fence_signal,
+ * we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks
*/
} else
- trace_fence_signaled(fence);
+ trace_dma_fence_signaled(fence);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
@@ -92,19 +94,19 @@ int fence_signal_locked(struct fence *fence)
}
return ret;
}
-EXPORT_SYMBOL(fence_signal_locked);
+EXPORT_SYMBOL(dma_fence_signal_locked);
/**
- * fence_signal - signal completion of a fence
+ * dma_fence_signal - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*/
-int fence_signal(struct fence *fence)
+int dma_fence_signal(struct dma_fence *fence)
{
unsigned long flags;
@@ -116,13 +118,13 @@ int fence_signal(struct fence *fence)
smp_mb__before_atomic();
}
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
- trace_fence_signaled(fence);
+ trace_dma_fence_signaled(fence);
- if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
- struct fence_cb *cur, *tmp;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
+ struct dma_fence_cb *cur, *tmp;
spin_lock_irqsave(fence->lock, flags);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
@@ -133,10 +135,10 @@ int fence_signal(struct fence *fence)
}
return 0;
}
-EXPORT_SYMBOL(fence_signal);
+EXPORT_SYMBOL(dma_fence_signal);
/**
- * fence_wait_timeout - sleep until the fence gets signaled
+ * dma_fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
@@ -152,78 +154,76 @@ EXPORT_SYMBOL(fence_signal);
* freed before return, resulting in undefined behavior.
*/
signed long
-fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
{
signed long ret;
if (WARN_ON(timeout < 0))
return -EINVAL;
- if (timeout == 0)
- return fence_is_signaled(fence);
-
- trace_fence_wait_start(fence);
+ trace_dma_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
- trace_fence_wait_end(fence);
+ trace_dma_fence_wait_end(fence);
return ret;
}
-EXPORT_SYMBOL(fence_wait_timeout);
+EXPORT_SYMBOL(dma_fence_wait_timeout);
-void fence_release(struct kref *kref)
+void dma_fence_release(struct kref *kref)
{
- struct fence *fence =
- container_of(kref, struct fence, refcount);
+ struct dma_fence *fence =
+ container_of(kref, struct dma_fence, refcount);
- trace_fence_destroy(fence);
+ trace_dma_fence_destroy(fence);
BUG_ON(!list_empty(&fence->cb_list));
if (fence->ops->release)
fence->ops->release(fence);
else
- fence_free(fence);
+ dma_fence_free(fence);
}
-EXPORT_SYMBOL(fence_release);
+EXPORT_SYMBOL(dma_fence_release);
-void fence_free(struct fence *fence)
+void dma_fence_free(struct dma_fence *fence)
{
kfree_rcu(fence, rcu);
}
-EXPORT_SYMBOL(fence_free);
+EXPORT_SYMBOL(dma_fence_free);
/**
- * fence_enable_sw_signaling - enable signaling on fence
+ * dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: [in] the fence to enable
*
* this will request for sw signaling to be enabled, to make the fence
* complete as soon as possible
*/
-void fence_enable_sw_signaling(struct fence *fence)
+void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
- !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- trace_fence_enable_signal(fence);
+ if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags);
if (!fence->ops->enable_signaling(fence))
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
}
}
-EXPORT_SYMBOL(fence_enable_sw_signaling);
+EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
/**
- * fence_add_callback - add a callback to be called when the fence
+ * dma_fence_add_callback - add a callback to be called when the fence
* is signaled
* @fence: [in] the fence to wait on
* @cb: [in] the callback to register
* @func: [in] the function to call
*
- * cb will be initialized by fence_add_callback, no initialization
+ * cb will be initialized by dma_fence_add_callback, no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
@@ -232,15 +232,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling);
* *not* call the callback)
*
* Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to fence_wait, however the caller doesn't need to
+ * refcount as it does to dma_fence_wait, however the caller doesn't need to
* keep a refcount to fence afterwards: when software access is enabled,
* the creator of the fence is required to keep the fence alive until
- * after it signals with fence_signal. The callback itself can be called
+ * after it signals with dma_fence_signal. The callback itself can be called
* from irq context.
*
*/
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
- fence_func_t func)
+int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+ dma_fence_func_t func)
{
unsigned long flags;
int ret = 0;
@@ -249,22 +249,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
if (WARN_ON(!fence || !func))
return -EINVAL;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
INIT_LIST_HEAD(&cb->node);
return -ENOENT;
}
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
else if (!was_set) {
- trace_fence_enable_signal(fence);
+ trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
ret = -ENOENT;
}
}
@@ -278,10 +279,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
return ret;
}
-EXPORT_SYMBOL(fence_add_callback);
+EXPORT_SYMBOL(dma_fence_add_callback);
/**
- * fence_remove_callback - remove a callback from the signaling list
+ * dma_fence_remove_callback - remove a callback from the signaling list
* @fence: [in] the fence to wait on
* @cb: [in] the callback to remove
*
@@ -296,7 +297,7 @@ EXPORT_SYMBOL(fence_add_callback);
* with a reference held to the fence.
*/
bool
-fence_remove_callback(struct fence *fence, struct fence_cb *cb)
+dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
{
unsigned long flags;
bool ret;
@@ -311,15 +312,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb)
return ret;
}
-EXPORT_SYMBOL(fence_remove_callback);
+EXPORT_SYMBOL(dma_fence_remove_callback);
struct default_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
+dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct default_wait_cb *wait =
container_of(cb, struct default_wait_cb, base);
@@ -328,25 +329,27 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
}
/**
- * fence_default_wait - default sleep until the fence gets signaled
+ * dma_fence_default_wait - default sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
- * remaining timeout in jiffies on success.
+ * remaining timeout in jiffies on success. If timeout is zero the value one is
+ * returned if the fence is already signaled for consistency with other
+ * functions taking a jiffies timeout.
*/
signed long
-fence_default_wait(struct fence *fence, bool intr, signed long timeout)
+dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
{
struct default_wait_cb cb;
unsigned long flags;
- signed long ret = timeout;
+ signed long ret = timeout ? timeout : 1;
bool was_set;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return timeout;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return ret;
spin_lock_irqsave(fence->lock, flags);
@@ -355,25 +358,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
goto out;
}
- was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out;
if (!was_set) {
- trace_fence_enable_signal(fence);
+ trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
goto out;
}
}
- cb.base.func = fence_default_wait_cb;
+ cb.base.func = dma_fence_default_wait_cb;
cb.task = current;
list_add(&cb.base.node, &fence->cb_list);
- while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
+ while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
if (intr)
__set_current_state(TASK_INTERRUPTIBLE);
else
@@ -395,28 +399,34 @@ out:
spin_unlock_irqrestore(fence->lock, flags);
return ret;
}
-EXPORT_SYMBOL(fence_default_wait);
+EXPORT_SYMBOL(dma_fence_default_wait);
static bool
-fence_test_signaled_any(struct fence **fences, uint32_t count)
+dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
+ uint32_t *idx)
{
int i;
for (i = 0; i < count; ++i) {
- struct fence *fence = fences[i];
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ struct dma_fence *fence = fences[i];
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (idx)
+ *idx = i;
return true;
+ }
}
return false;
}
/**
- * fence_wait_any_timeout - sleep until any fence gets signaled
+ * dma_fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
* @fences: [in] array of fences to wait on
* @count: [in] number of fences to wait on
* @intr: [in] if true, do an interruptible wait
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: [out] the first signaled fence index, meaningful only on
+ * positive return
*
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
@@ -427,8 +437,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count)
* fence might be freed before return, resulting in undefined behavior.
*/
signed long
-fence_wait_any_timeout(struct fence **fences, uint32_t count,
- bool intr, signed long timeout)
+dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
+ bool intr, signed long timeout, uint32_t *idx)
{
struct default_wait_cb *cb;
signed long ret = timeout;
@@ -439,8 +449,11 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
if (timeout == 0) {
for (i = 0; i < count; ++i)
- if (fence_is_signaled(fences[i]))
+ if (dma_fence_is_signaled(fences[i])) {
+ if (idx)
+ *idx = i;
return 1;
+ }
return 0;
}
@@ -452,17 +465,19 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
}
for (i = 0; i < count; ++i) {
- struct fence *fence = fences[i];
+ struct dma_fence *fence = fences[i];
- if (fence->ops->wait != fence_default_wait) {
+ if (fence->ops->wait != dma_fence_default_wait) {
ret = -EINVAL;
goto fence_rm_cb;
}
cb[i].task = current;
- if (fence_add_callback(fence, &cb[i].base,
- fence_default_wait_cb)) {
+ if (dma_fence_add_callback(fence, &cb[i].base,
+ dma_fence_default_wait_cb)) {
/* This fence is already signaled */
+ if (idx)
+ *idx = i;
goto fence_rm_cb;
}
}
@@ -473,7 +488,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
else
set_current_state(TASK_UNINTERRUPTIBLE);
- if (fence_test_signaled_any(fences, count))
+ if (dma_fence_test_signaled_any(fences, count, idx))
break;
ret = schedule_timeout(ret);
@@ -486,34 +501,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
fence_rm_cb:
while (i-- > 0)
- fence_remove_callback(fences[i], &cb[i].base);
+ dma_fence_remove_callback(fences[i], &cb[i].base);
err_free_cb:
kfree(cb);
return ret;
}
-EXPORT_SYMBOL(fence_wait_any_timeout);
+EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
- * fence_init - Initialize a custom fence.
+ * dma_fence_init - Initialize a custom fence.
* @fence: [in] the fence to initialize
- * @ops: [in] the fence_ops for operations on this fence
+ * @ops: [in] the dma_fence_ops for operations on this fence
* @lock: [in] the irqsafe spinlock to use for locking this fence
* @context: [in] the execution context this fence is run on
* @seqno: [in] a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
- * refcount again if fence_ops.enable_signaling gets called. This can
+ * refcount again if dma_fence_ops.enable_signaling gets called. This can
* be used for other implementing other types of fence.
*
* context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using fence_later.
+ * to check which fence is later by simply using dma_fence_later.
*/
void
-fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno)
+dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
@@ -527,6 +542,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
fence->seqno = seqno;
fence->flags = 0UL;
- trace_fence_init(fence);
+ trace_dma_fence_init(fence);
}
-EXPORT_SYMBOL(fence_init);
+EXPORT_SYMBOL(dma_fence_init);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 723d8af988e5..393817e849ed 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared);
static void
reservation_object_add_shared_inplace(struct reservation_object *obj,
struct reservation_object_list *fobj,
- struct fence *fence)
+ struct dma_fence *fence)
{
u32 i;
- fence_get(fence);
+ dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
for (i = 0; i < fobj->shared_count; ++i) {
- struct fence *old_fence;
+ struct dma_fence *old_fence;
old_fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
write_seqcount_end(&obj->seq);
preempt_enable();
- fence_put(old_fence);
+ dma_fence_put(old_fence);
return;
}
}
@@ -143,12 +143,12 @@ static void
reservation_object_add_shared_replace(struct reservation_object *obj,
struct reservation_object_list *old,
struct reservation_object_list *fobj,
- struct fence *fence)
+ struct dma_fence *fence)
{
unsigned i;
- struct fence *old_fence = NULL;
+ struct dma_fence *old_fence = NULL;
- fence_get(fence);
+ dma_fence_get(fence);
if (!old) {
RCU_INIT_POINTER(fobj->shared[0], fence);
@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
fobj->shared_count = old->shared_count;
for (i = 0; i < old->shared_count; ++i) {
- struct fence *check;
+ struct dma_fence *check;
check = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
@@ -196,7 +196,7 @@ done:
kfree_rcu(old, rcu);
if (old_fence)
- fence_put(old_fence);
+ dma_fence_put(old_fence);
}
/**
@@ -208,7 +208,7 @@ done:
* reservation_object_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct reservation_object_list *old, *fobj = obj->staged;
@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
* Add a fence to the exclusive slot. The obj->lock must be held.
*/
void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct fence *fence)
+ struct dma_fence *fence)
{
- struct fence *old_fence = reservation_object_get_excl(obj);
+ struct dma_fence *old_fence = reservation_object_get_excl(obj);
struct reservation_object_list *old;
u32 i = 0;
@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
i = old->shared_count;
if (fence)
- fence_get(fence);
+ dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
/* inplace update, no shared fences */
while (i--)
- fence_put(rcu_dereference_protected(old->shared[i],
+ dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
if (old_fence)
- fence_put(old_fence);
+ dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
@@ -276,26 +276,32 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* Zero or -errno
*/
int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct fence **pfence_excl,
+ struct dma_fence **pfence_excl,
unsigned *pshared_count,
- struct fence ***pshared)
+ struct dma_fence ***pshared)
{
- unsigned shared_count = 0;
- unsigned retry = 1;
- struct fence **shared = NULL, *fence_excl = NULL;
- int ret = 0;
+ struct dma_fence **shared = NULL;
+ struct dma_fence *fence_excl;
+ unsigned int shared_count;
+ int ret = 1;
- while (retry) {
+ do {
struct reservation_object_list *fobj;
unsigned seq;
+ unsigned int i;
- seq = read_seqcount_begin(&obj->seq);
+ shared_count = i = 0;
rcu_read_lock();
+ seq = read_seqcount_begin(&obj->seq);
+
+ fence_excl = rcu_dereference(obj->fence_excl);
+ if (fence_excl && !dma_fence_get_rcu(fence_excl))
+ goto unlock;
fobj = rcu_dereference(obj->fence);
if (fobj) {
- struct fence **nshared;
+ struct dma_fence **nshared;
size_t sz = sizeof(*shared) * fobj->shared_max;
nshared = krealloc(shared, sz,
@@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
}
ret = -ENOMEM;
- shared_count = 0;
break;
}
shared = nshared;
- memcpy(shared, fobj->shared, sz);
shared_count = fobj->shared_count;
- } else
- shared_count = 0;
- fence_excl = rcu_dereference(obj->fence_excl);
-
- retry = read_seqcount_retry(&obj->seq, seq);
- if (retry)
- goto unlock;
-
- if (!fence_excl || fence_get_rcu(fence_excl)) {
- unsigned i;
for (i = 0; i < shared_count; ++i) {
- if (fence_get_rcu(shared[i]))
- continue;
-
- /* uh oh, refcount failed, abort and retry */
- while (i--)
- fence_put(shared[i]);
-
- if (fence_excl) {
- fence_put(fence_excl);
- fence_excl = NULL;
- }
-
- retry = 1;
- break;
+ shared[i] = rcu_dereference(fobj->shared[i]);
+ if (!dma_fence_get_rcu(shared[i]))
+ break;
}
- } else
- retry = 1;
+ }
+ if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+ while (i--)
+ dma_fence_put(shared[i]);
+ dma_fence_put(fence_excl);
+ goto unlock;
+ }
+
+ ret = 0;
unlock:
rcu_read_unlock();
- }
- *pshared_count = shared_count;
- if (shared_count)
- *pshared = shared;
- else {
- *pshared = NULL;
+ } while (ret);
+
+ if (!shared_count) {
kfree(shared);
+ shared = NULL;
}
+
+ *pshared_count = shared_count;
+ *pshared = shared;
*pfence_excl = fence_excl;
return ret;
@@ -377,12 +368,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned seq, shared_count, i = 0;
- long ret = timeout;
-
- if (!timeout)
- return reservation_object_test_signaled_rcu(obj, wait_all);
+ long ret = timeout ? timeout : 1;
retry:
fence = NULL;
@@ -397,20 +385,18 @@ retry:
if (fobj)
shared_count = fobj->shared_count;
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
-
for (i = 0; i < shared_count; ++i) {
- struct fence *lfence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &lfence->flags))
continue;
- if (!fence_get_rcu(lfence))
+ if (!dma_fence_get_rcu(lfence))
goto unlock_retry;
- if (fence_is_signaled(lfence)) {
- fence_put(lfence);
+ if (dma_fence_is_signaled(lfence)) {
+ dma_fence_put(lfence);
continue;
}
@@ -420,18 +406,16 @@ retry:
}
if (!shared_count) {
- struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
+ struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl &&
- !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
- if (!fence_get_rcu(fence_excl))
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence_excl->flags)) {
+ if (!dma_fence_get_rcu(fence_excl))
goto unlock_retry;
- if (fence_is_signaled(fence_excl))
- fence_put(fence_excl);
+ if (dma_fence_is_signaled(fence_excl))
+ dma_fence_put(fence_excl);
else
fence = fence_excl;
}
@@ -439,8 +423,13 @@ retry:
rcu_read_unlock();
if (fence) {
- ret = fence_wait_timeout(fence, intr, ret);
- fence_put(fence);
+ if (read_seqcount_retry(&obj->seq, seq)) {
+ dma_fence_put(fence);
+ goto retry;
+ }
+
+ ret = dma_fence_wait_timeout(fence, intr, ret);
+ dma_fence_put(fence);
if (ret > 0 && wait_all && (i + 1 < shared_count))
goto retry;
}
@@ -454,18 +443,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
static inline int
-reservation_object_test_signaled_single(struct fence *passed_fence)
+reservation_object_test_signaled_single(struct dma_fence *passed_fence)
{
- struct fence *fence, *lfence = passed_fence;
+ struct dma_fence *fence, *lfence = passed_fence;
int ret = 1;
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
- fence = fence_get_rcu(lfence);
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+ fence = dma_fence_get_rcu(lfence);
if (!fence)
return -1;
- ret = !!fence_is_signaled(fence);
- fence_put(fence);
+ ret = !!dma_fence_is_signaled(fence);
+ dma_fence_put(fence);
}
return ret;
}
@@ -484,12 +473,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
unsigned seq, shared_count;
- int ret = true;
+ int ret;
+ rcu_read_lock();
retry:
+ ret = true;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
- rcu_read_lock();
if (test_all) {
unsigned i;
@@ -500,46 +490,35 @@ retry:
if (fobj)
shared_count = fobj->shared_count;
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
-
for (i = 0; i < shared_count; ++i) {
- struct fence *fence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
ret = reservation_object_test_signaled_single(fence);
if (ret < 0)
- goto unlock_retry;
+ goto retry;
else if (!ret)
break;
}
- /*
- * There could be a read_seqcount_retry here, but nothing cares
- * about whether it's the old or newer fence pointers that are
- * signaled. That race could still have happened after checking
- * read_seqcount_retry. If you care, use ww_mutex_lock.
- */
+ if (read_seqcount_retry(&obj->seq, seq))
+ goto retry;
}
if (!shared_count) {
- struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
+ struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl) {
ret = reservation_object_test_signaled_single(
fence_excl);
if (ret < 0)
- goto unlock_retry;
+ goto retry;
+
+ if (read_seqcount_retry(&obj->seq, seq))
+ goto retry;
}
}
rcu_read_unlock();
return ret;
-
-unlock_retry:
- rcu_read_unlock();
- goto retry;
}
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
index 71127f8f1626..f47112a64763 100644
--- a/drivers/dma-buf/seqno-fence.c
+++ b/drivers/dma-buf/seqno-fence.c
@@ -21,35 +21,35 @@
#include <linux/export.h>
#include <linux/seqno-fence.h>
-static const char *seqno_fence_get_driver_name(struct fence *fence)
+static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_driver_name(fence);
}
-static const char *seqno_fence_get_timeline_name(struct fence *fence)
+static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_timeline_name(fence);
}
-static bool seqno_enable_signaling(struct fence *fence)
+static bool seqno_enable_signaling(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->enable_signaling(fence);
}
-static bool seqno_signaled(struct fence *fence)
+static bool seqno_signaled(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
}
-static void seqno_release(struct fence *fence)
+static void seqno_release(struct dma_fence *fence)
{
struct seqno_fence *f = to_seqno_fence(fence);
@@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence)
if (f->ops->release)
f->ops->release(fence);
else
- fence_free(&f->base);
+ dma_fence_free(&f->base);
}
-static signed long seqno_wait(struct fence *fence, bool intr,
- signed long timeout)
+static signed long seqno_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
{
struct seqno_fence *f = to_seqno_fence(fence);
return f->ops->wait(fence, intr, timeout);
}
-const struct fence_ops seqno_fence_ops = {
+const struct dma_fence_ops seqno_fence_ops = {
.get_driver_name = seqno_fence_get_driver_name,
.get_timeline_name = seqno_fence_get_timeline_name,
.enable_signaling = seqno_enable_signaling,
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 62e8e6dc7953..69c5ff36e2f9 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -68,9 +68,9 @@ struct sw_sync_create_fence_data {
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-static const struct fence_ops timeline_fence_ops;
+static const struct dma_fence_ops timeline_fence_ops;
-static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
{
if (fence->ops != &timeline_fence_ops)
return NULL;
@@ -84,7 +84,7 @@ static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
* Creates a new sync_timeline. Returns the sync_timeline object or NULL in
* case of error.
*/
-struct sync_timeline *sync_timeline_create(const char *name)
+static struct sync_timeline *sync_timeline_create(const char *name)
{
struct sync_timeline *obj;
@@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name)
return NULL;
kref_init(&obj->kref);
- obj->context = fence_context_alloc(1);
+ obj->context = dma_fence_context_alloc(1);
strlcpy(obj->name, name, sizeof(obj->name));
INIT_LIST_HEAD(&obj->child_list_head);
@@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
list_for_each_entry_safe(pt, next, &obj->active_list_head,
active_list) {
- if (fence_is_signaled_locked(&pt->base))
+ if (dma_fence_is_signaled_locked(&pt->base))
list_del_init(&pt->active_list);
}
@@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
spin_lock_irqsave(&obj->child_list_lock, flags);
sync_timeline_get(obj);
- fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
- obj->context, value);
+ dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+ obj->context, value);
list_add_tail(&pt->child_list, &obj->child_list_head);
INIT_LIST_HEAD(&pt->active_list);
spin_unlock_irqrestore(&obj->child_list_lock, flags);
return pt;
}
-static const char *timeline_fence_get_driver_name(struct fence *fence)
+static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
{
return "sw_sync";
}
-static const char *timeline_fence_get_timeline_name(struct fence *fence)
+static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
return parent->name;
}
-static void timeline_fence_release(struct fence *fence)
+static void timeline_fence_release(struct dma_fence *fence)
{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
@@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence)
spin_unlock_irqrestore(fence->lock, flags);
sync_timeline_put(parent);
- fence_free(fence);
+ dma_fence_free(fence);
}
-static bool timeline_fence_signaled(struct fence *fence)
+static bool timeline_fence_signaled(struct dma_fence *fence)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
return (fence->seqno > parent->value) ? false : true;
}
-static bool timeline_fence_enable_signaling(struct fence *fence)
+static bool timeline_fence_enable_signaling(struct dma_fence *fence)
{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
if (timeline_fence_signaled(fence))
return false;
@@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence)
return true;
}
-static void timeline_fence_value_str(struct fence *fence,
+static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size)
{
snprintf(str, size, "%d", fence->seqno);
}
-static void timeline_fence_timeline_value_str(struct fence *fence,
+static void timeline_fence_timeline_value_str(struct dma_fence *fence,
char *str, int size)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
snprintf(str, size, "%d", parent->value);
}
-static const struct fence_ops timeline_fence_ops = {
+static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
.enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
@@ -316,8 +316,8 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
}
sync_file = sync_file_create(&pt->base);
+ dma_fence_put(&pt->base);
if (!sync_file) {
- fence_put(&pt->base);
err = -ENOMEM;
goto err;
}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 2dd4c3db6caa..48b20e34fb6d 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -71,12 +71,13 @@ static const char *sync_status_str(int status)
return "error";
}
-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
+static void sync_print_fence(struct seq_file *s,
+ struct dma_fence *fence, bool show)
{
int status = 1;
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
- if (fence_is_signaled_locked(fence))
+ if (dma_fence_is_signaled_locked(fence))
status = fence->status;
seq_printf(s, " %s%sfence %s",
@@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s,
int i;
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
- sync_status_str(!fence_is_signaled(sync_file->fence)));
+ sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
- if (fence_is_array(sync_file->fence)) {
- struct fence_array *array = to_fence_array(sync_file->fence);
+ if (dma_fence_is_array(sync_file->fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
for (i = 0; i < array->num_fences; ++i)
sync_print_fence(s, array->fences[i], true);
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index d269aa6783aa..26fe8b9907b3 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -15,7 +15,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/sync_file.h>
#include <uapi/linux/sync_file.h>
@@ -45,10 +45,9 @@ struct sync_timeline {
struct list_head sync_timeline_list;
};
-static inline struct sync_timeline *fence_parent(struct fence *fence)
+static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
{
- return container_of(fence->lock, struct sync_timeline,
- child_list_lock);
+ return container_of(fence->lock, struct sync_timeline, child_list_lock);
}
/**
@@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
* @active_list: sync timeline active child's list
*/
struct sync_pt {
- struct fence base;
+ struct dma_fence base;
struct list_head child_list;
struct list_head active_list;
};
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index b29a9e817320..6d802f2d2881 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -54,7 +54,7 @@ err:
return NULL;
}
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct sync_file *sync_file;
@@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
* takes ownership of @fence. The sync_file can be released with
* fput(sync_file->file). Returns the sync_file or NULL in case of error.
*/
-struct sync_file *sync_file_create(struct fence *fence)
+struct sync_file *sync_file_create(struct dma_fence *fence)
{
struct sync_file *sync_file;
@@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence)
if (!sync_file)
return NULL;
- sync_file->fence = fence;
+ sync_file->fence = dma_fence_get(fence);
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
@@ -121,16 +121,16 @@ err:
* Ensures @fd references a valid sync_file and returns a fence that
* represents all fence in the sync_file. On error NULL is returned.
*/
-struct fence *sync_file_get_fence(int fd)
+struct dma_fence *sync_file_get_fence(int fd)
{
struct sync_file *sync_file;
- struct fence *fence;
+ struct dma_fence *fence;
sync_file = sync_file_fdget(fd);
if (!sync_file)
return NULL;
- fence = fence_get(sync_file->fence);
+ fence = dma_fence_get(sync_file->fence);
fput(sync_file->file);
return fence;
@@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd)
EXPORT_SYMBOL(sync_file_get_fence);
static int sync_file_set_fence(struct sync_file *sync_file,
- struct fence **fences, int num_fences)
+ struct dma_fence **fences, int num_fences)
{
- struct fence_array *array;
+ struct dma_fence_array *array;
/*
* The reference for the fences in the new sync_file and held
* in add_fence() during the merge procedure, so for num_fences == 1
* we already own a new reference to the fence. For num_fence > 1
- * we own the reference of the fence_array creation.
+ * we own the reference of the dma_fence_array creation.
*/
if (num_fences == 1) {
sync_file->fence = fences[0];
kfree(fences);
} else {
- array = fence_array_create(num_fences, fences,
- fence_context_alloc(1), 1, false);
+ array = dma_fence_array_create(num_fences, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
if (!array)
return -ENOMEM;
@@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file,
return 0;
}
-static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+static struct dma_fence **get_fences(struct sync_file *sync_file,
+ int *num_fences)
{
- if (fence_is_array(sync_file->fence)) {
- struct fence_array *array = to_fence_array(sync_file->fence);
+ if (dma_fence_is_array(sync_file->fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
*num_fences = array->num_fences;
return array->fences;
@@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
return &sync_file->fence;
}
-static void add_fence(struct fence **fences, int *i, struct fence *fence)
+static void add_fence(struct dma_fence **fences,
+ int *i, struct dma_fence *fence)
{
fences[*i] = fence;
- if (!fence_is_signaled(fence)) {
- fence_get(fence);
+ if (!dma_fence_is_signaled(fence)) {
+ dma_fence_get(fence);
(*i)++;
}
}
@@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
- struct fence **fences, **nfences, **a_fences, **b_fences;
+ struct dma_fence **fences, **nfences, **a_fences, **b_fences;
int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
@@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* and sync_file_create, this is a reasonable assumption.
*/
for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
- struct fence *pt_a = a_fences[i_a];
- struct fence *pt_b = b_fences[i_b];
+ struct dma_fence *pt_a = a_fences[i_a];
+ struct dma_fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) {
add_fence(fences, &i, pt_a);
@@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
add_fence(fences, &i, b_fences[i_b]);
if (i == 0)
- fences[i++] = fence_get(a_fences[0]);
+ fences[i++] = dma_fence_get(a_fences[0]);
if (num_fences > i) {
nfences = krealloc(fences, i * sizeof(*fences),
@@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref)
kref);
if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
- fence_remove_callback(sync_file->fence, &sync_file->cb);
- fence_put(sync_file->fence);
+ dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
+ dma_fence_put(sync_file->fence);
kfree(sync_file);
}
@@ -305,14 +308,13 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait);
- if (!poll_does_not_wait(wait) &&
- !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
- if (fence_add_callback(sync_file->fence, &sync_file->cb,
- fence_check_cb_func) < 0)
+ if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+ if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
+ fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
}
- return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
+ return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -370,14 +372,14 @@ err_put_fd:
return err;
}
-static void sync_fill_fence_info(struct fence *fence,
+static void sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
info->status = fence->status >= 0 ? 1 : fence->status;
else
info->status = 0;
@@ -389,7 +391,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
- struct fence **fences;
+ struct dma_fence **fences;
__u32 size;
int num_fences, ret, i;
@@ -429,7 +431,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
strlcpy(info.name, sync_file->name, sizeof(info.name));
- info.status = fence_is_signaled(sync_file->fence);
+ info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index af63a6bcf564..263495d0adbd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -306,6 +306,7 @@ config MMP_TDMA
depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
select MMP_SRAM if ARCH_MMP
+ select GENERIC_ALLOCATOR
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
@@ -435,6 +436,20 @@ config STE_DMA40
help
Support for ST-Ericsson DMA40 controller
+config ST_FDMA
+ tristate "ST FDMA dmaengine support"
+ depends on ARCH_STI
+ depends on REMOTEPROC
+ select ST_SLIM_REMOTEPROC
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for ST FDMA controller.
+ It supports 16 independent DMA channels, accepts up to 32 DMA requests
+
+ Say Y here if you have such a chipset.
+ If unsure, say N.
+
config STM32_DMA
bool "STMicroelectronics STM32 DMA support"
depends on ARCH_STM32 || COMPILE_TEST
@@ -479,7 +494,7 @@ config TEGRA20_APB_DMA
or vice versa. It does not support memory to memory data transfer.
config TEGRA210_ADMA
- bool "NVIDIA Tegra210 ADMA support"
+ tristate "NVIDIA Tegra210 ADMA support"
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index e4dc9cac7ee8..a4fa3360e609 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-y += qcom/
obj-y += xilinx/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 939a7c31f760..0b7c6ce629a6 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1793,6 +1793,13 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
}
EXPORT_SYMBOL_GPL(pl08x_filter_id);
+static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+
+ return plchan->cd == chan_id;
+}
+
/*
* Just check that the device is there and active
* TODO: turn this bit on/off depending on the number of physical channels
@@ -2307,6 +2314,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ret = -EINVAL;
goto out_no_platdata;
}
+ } else {
+ pl08x->slave.filter.map = pl08x->pd->slave_map;
+ pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len;
+ pl08x->slave.filter.fn = pl08x_filter_fn;
}
/* By default, AHB1 only. If dualmaster, from platform */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index a4c8f80db29d..1baf3404a365 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -111,9 +111,8 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
struct at_dma *atdma = to_at_dma(chan->device);
dma_addr_t phys;
- desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
+ desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
if (desc) {
- memset(desc, 0, sizeof(struct at_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
/* txd.flags will be overwritten in prep functions */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b7d7f2d443a1..7d4e0bcda9af 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -221,7 +221,6 @@ struct at_xdmac {
int irq;
struct clk *clk;
u32 save_gim;
- u32 save_gs;
struct dma_pool *at_xdmac_desc_pool;
struct at_xdmac_chan chan[0];
};
@@ -444,9 +443,8 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
dma_addr_t phys;
- desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
+ desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
if (desc) {
- memset(desc, 0, sizeof(*desc));
INIT_LIST_HEAD(&desc->descs_list);
dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
@@ -1896,7 +1894,6 @@ static int atmel_xdmac_resume(struct device *dev)
}
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
atchan = to_at_xdmac_chan(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index bac5f023013b..d5ba43a87a68 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -317,6 +317,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
while (val) {
u32 desc, len;
+ int error;
+
+ error = pm_runtime_get(cdd->ddev.dev);
+ if (error < 0)
+ dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
+ __func__, error);
q_num = __fls(val);
val &= ~(1 << q_num);
@@ -338,7 +344,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
dma_cookie_complete(&c->txd);
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
- /* Paired with cppi41_dma_issue_pending */
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
}
@@ -362,8 +367,13 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
int error;
error = pm_runtime_get_sync(cdd->ddev.dev);
- if (error < 0)
+ if (error < 0) {
+ dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
+ __func__, error);
+ pm_runtime_put_noidle(cdd->ddev.dev);
+
return error;
+ }
dma_cookie_init(chan);
dma_async_tx_descriptor_init(&c->txd, chan);
@@ -385,8 +395,11 @@ static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
int error;
error = pm_runtime_get_sync(cdd->ddev.dev);
- if (error < 0)
+ if (error < 0) {
+ pm_runtime_put_noidle(cdd->ddev.dev);
+
return;
+ }
WARN_ON(!list_empty(&cdd->pending));
@@ -460,9 +473,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
struct cppi41_dd *cdd = c->cdd;
int error;
- /* PM runtime paired with dmaengine_desc_get_callback_invoke */
error = pm_runtime_get(cdd->ddev.dev);
if ((error != -EINPROGRESS) && error < 0) {
+ pm_runtime_put_noidle(cdd->ddev.dev);
dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
error);
@@ -473,6 +486,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
push_desc_queue(c);
else
pending_desc(c);
+
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
}
static u32 get_host_pd0(u32 length)
@@ -1059,8 +1075,8 @@ err_chans:
deinit_cppi41(dev, cdd);
err_init_cppi:
pm_runtime_dont_use_autosuspend(dev);
- pm_runtime_put_sync(dev);
err_get_sync:
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
@@ -1072,7 +1088,12 @@ err_get_sync:
static int cppi41_dma_remove(struct platform_device *pdev)
{
struct cppi41_dd *cdd = platform_get_drvdata(pdev);
+ int error;
+ error = pm_runtime_get_sync(&pdev->dev);
+ if (error < 0)
+ dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
+ __func__, error);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&cdd->ddev);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index cf76fc6149e5..451f899f74e4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -164,7 +164,9 @@ struct dmatest_thread {
struct task_struct *task;
struct dma_chan *chan;
u8 **srcs;
+ u8 **usrcs;
u8 **dsts;
+ u8 **udsts;
enum dma_transaction_type type;
bool done;
};
@@ -431,6 +433,7 @@ static int dmatest_func(void *data)
ktime_t comparetime = ktime_set(0, 0);
s64 runtime = 0;
unsigned long long total_len = 0;
+ u8 align = 0;
set_freezable();
@@ -441,20 +444,24 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
- if (thread->type == DMA_MEMCPY)
+ if (thread->type == DMA_MEMCPY) {
+ align = dev->copy_align;
src_cnt = dst_cnt = 1;
- else if (thread->type == DMA_SG)
+ } else if (thread->type == DMA_SG) {
+ align = dev->copy_align;
src_cnt = dst_cnt = sg_buffers;
- else if (thread->type == DMA_XOR) {
+ } else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
dst_cnt = 1;
+ align = dev->xor_align;
} else if (thread->type == DMA_PQ) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
+ align = dev->pq_align;
- pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
+ pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
if (!pq_coefs)
goto err_thread_type;
@@ -463,23 +470,47 @@ static int dmatest_func(void *data)
} else
goto err_thread_type;
- thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
if (!thread->srcs)
goto err_srcs;
+
+ thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->usrcs)
+ goto err_usrcs;
+
for (i = 0; i < src_cnt; i++) {
- thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
- if (!thread->srcs[i])
+ thread->usrcs[i] = kmalloc(params->buf_size + align,
+ GFP_KERNEL);
+ if (!thread->usrcs[i])
goto err_srcbuf;
+
+ /* align srcs to alignment restriction */
+ if (align)
+ thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
+ else
+ thread->srcs[i] = thread->usrcs[i];
}
thread->srcs[i] = NULL;
- thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
if (!thread->dsts)
goto err_dsts;
+
+ thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->udsts)
+ goto err_udsts;
+
for (i = 0; i < dst_cnt; i++) {
- thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
- if (!thread->dsts[i])
+ thread->udsts[i] = kmalloc(params->buf_size + align,
+ GFP_KERNEL);
+ if (!thread->udsts[i])
goto err_dstbuf;
+
+ /* align dsts to alignment restriction */
+ if (align)
+ thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
+ else
+ thread->dsts[i] = thread->udsts[i];
}
thread->dsts[i] = NULL;
@@ -498,20 +529,11 @@ static int dmatest_func(void *data)
dma_addr_t srcs[src_cnt];
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
- u8 align = 0;
struct scatterlist tx_sg[src_cnt];
struct scatterlist rx_sg[src_cnt];
total_tests++;
- /* honor alignment restrictions */
- if (thread->type == DMA_MEMCPY || thread->type == DMA_SG)
- align = dev->copy_align;
- else if (thread->type == DMA_XOR)
- align = dev->xor_align;
- else if (thread->type == DMA_PQ)
- align = dev->pq_align;
-
if (1 << align > params->buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
params->buf_size, 1 << align);
@@ -549,7 +571,7 @@ static int dmatest_func(void *data)
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
+ um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
@@ -729,13 +751,17 @@ static int dmatest_func(void *data)
ret = 0;
err_dstbuf:
- for (i = 0; thread->dsts[i]; i++)
- kfree(thread->dsts[i]);
+ for (i = 0; thread->udsts[i]; i++)
+ kfree(thread->udsts[i]);
+ kfree(thread->udsts);
+err_udsts:
kfree(thread->dsts);
err_dsts:
err_srcbuf:
- for (i = 0; thread->srcs[i]; i++)
- kfree(thread->srcs[i]);
+ for (i = 0; thread->usrcs[i]; i++)
+ kfree(thread->usrcs[i]);
+ kfree(thread->usrcs);
+err_usrcs:
kfree(thread->srcs);
err_srcs:
kfree(pq_coefs);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index c2c0a613cb7a..e5adf5d1c34f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1569,7 +1569,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
} else {
dwc->block_size = pdata->block_size;
- dwc->nollp = pdata->is_nollp;
+ dwc->nollp = !pdata->multi_block[i];
}
}
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 5bda0eb9f393..b1655e40cfa2 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -102,7 +102,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
- u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
+ u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
u32 nr_masters;
u32 nr_channels;
@@ -118,6 +118,8 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (of_property_read_u32(np, "dma-channels", &nr_channels))
return NULL;
+ if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return NULL;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -129,6 +131,12 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (of_property_read_bool(np, "is_private"))
pdata->is_private = true;
+ /*
+ * All known devices, which use DT for configuration, support
+ * memory-to-memory transfers. So enable it by default.
+ */
+ pdata->is_memcpy = true;
+
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
@@ -146,6 +154,14 @@ dw_dma_parse_dt(struct platform_device *pdev)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
+ if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = mb[tmp];
+ } else {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = 1;
+ }
+
return pdata;
}
#else
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index f65dd104479f..4e0128c62704 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -12,7 +12,8 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
-#define DW_DMA_MAX_NR_CHANNELS 8
+#include "internal.h"
+
#define DW_DMA_MAX_NR_REQUESTS 16
/* flow controller */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index e18a58068bca..3879f80a4815 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1628,6 +1628,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
if (echan->slot[0] < 0) {
dev_err(dev, "Entry slot allocation failed for channel %u\n",
EDMA_CHAN_SLOT(echan->ch_num));
+ ret = echan->slot[0];
goto err_slot;
}
@@ -2450,6 +2451,9 @@ static int edma_pm_resume(struct device *dev)
int i;
s8 (*queue_priority_mapping)[2];
+ /* re initialize dummy slot to dummy param set */
+ edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
+
queue_priority_mapping = ecc->info->queue_priority_mapping;
/* Event queue priority mapping */
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index db2f9e1653a2..90d29f90acfb 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -881,6 +881,7 @@ static struct of_device_id fsl_re_ids[] = {
{ .compatible = "fsl,raideng-v1.0", },
{}
};
+MODULE_DEVICE_TABLE(of, fsl_re_ids);
static struct platform_driver fsl_re_driver = {
.driver = {
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index b51639f045ed..4875fa428e81 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -77,13 +77,15 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!chip)
return -ENOMEM;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
chip->dev = &pdev->dev;
chip->regs = pcim_iomap_table(pdev)[0];
chip->length = pci_resource_len(pdev, 0);
chip->offset = HSU_PCI_CHAN_OFFSET;
- chip->irq = pdev->irq;
-
- pci_enable_msi(pdev);
+ chip->irq = pci_irq_vector(pdev, 0);
ret = hsu_dma_probe(chip);
if (ret)
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 624f1e1e9c55..54db1411ce73 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -292,7 +292,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
struct mdc_dma *mdma = mchan->mdma;
struct mdc_tx_desc *mdesc;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
if (!len)
return NULL;
@@ -324,7 +324,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
xfer_size);
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
src += xfer_size;
@@ -375,7 +374,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
struct mdc_dma *mdma = mchan->mdma;
struct mdc_tx_desc *mdesc;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
if (!buf_len && !period_len)
return NULL;
@@ -430,7 +429,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
}
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
buf_addr += xfer_size;
@@ -458,7 +456,7 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
struct mdc_tx_desc *mdesc;
struct scatterlist *sg;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
unsigned int i;
if (!sgl)
@@ -509,7 +507,6 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
}
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
mdesc->list_xfer_size += xfer_size;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b9629b2bfc05..d1651a50c349 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -298,6 +298,7 @@ struct sdma_engine;
* @event_id1 for channels that use 2 events
* @word_size peripheral access size
* @buf_tail ID of the buffer that was processed
+ * @buf_ptail ID of the previous buffer that was processed
* @num_bd max NUM_BD. number of descriptors currently handling
*/
struct sdma_channel {
@@ -309,6 +310,7 @@ struct sdma_channel {
unsigned int event_id1;
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
+ unsigned int buf_ptail;
unsigned int num_bd;
unsigned int period_len;
struct sdma_buffer_descriptor *bd;
@@ -700,6 +702,8 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
sdmac->chn_real_count = bd->mode.count;
bd->mode.status |= BD_DONE;
bd->mode.count = sdmac->period_len;
+ sdmac->buf_ptail = sdmac->buf_tail;
+ sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
/*
* The callback is called from the interrupt context in order
@@ -710,9 +714,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
- sdmac->buf_tail++;
- sdmac->buf_tail %= sdmac->num_bd;
-
if (error)
sdmac->status = old_status;
}
@@ -1186,6 +1187,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
sdmac->flags = 0;
sdmac->buf_tail = 0;
+ sdmac->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
sg_len, channel);
@@ -1288,6 +1291,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
sdmac->buf_tail = 0;
+ sdmac->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
sdmac->period_len = period_len;
sdmac->flags |= IMX_DMA_SG_LOOP;
@@ -1385,7 +1390,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
u32 residue;
if (sdmac->flags & IMX_DMA_SG_LOOP)
- residue = (sdmac->num_bd - sdmac->buf_tail) *
+ residue = (sdmac->num_bd - sdmac->buf_ptail) *
sdmac->period_len - sdmac->chn_real_count;
else
residue = sdmac->chn_count - sdmac->chn_real_count;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 49386ce04bf5..a371b07a0981 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -39,6 +39,7 @@
#include "../dmaengine.h"
static char *chanerr_str[] = {
+ "DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error",
"Next Descriptor Address Error",
"Descriptor Error",
@@ -66,7 +67,6 @@ static char *chanerr_str[] = {
"Result Guard Tag verification Error",
"Result Application Tag verification Error",
"Result Reference Tag verification Error",
- NULL
};
static void ioat_eh(struct ioatdma_chan *ioat_chan);
@@ -75,13 +75,10 @@ static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
{
int i;
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
if ((chanerr >> i) & 1) {
- if (chanerr_str[i]) {
- dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
- i, chanerr_str[i]);
- } else
- break;
+ dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
+ i, chanerr_str[i]);
}
}
}
@@ -341,15 +338,12 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
{
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *desc;
- struct ioatdma_device *ioat_dma;
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
int chunk;
dma_addr_t phys;
u8 *pos;
off_t offs;
- ioat_dma = to_ioatdma_device(chan->device);
-
chunk = idx / IOAT_DESCS_PER_2M;
idx &= (IOAT_DESCS_PER_2M - 1);
offs = idx * IOAT_DESC_SZ;
@@ -614,11 +608,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
- struct dmaengine_result res;
-
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
- res.result = DMA_TRANS_NOERROR;
dmaengine_desc_get_callback_invoke(tx, NULL);
tx->callback = NULL;
tx->callback_result = NULL;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 015f7110b96d..90eddd9f07e4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -340,11 +340,13 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_src)) {
dev_err(dev, "mapping src buffer failed\n");
+ err = -ENOMEM;
goto free_resources;
}
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_dest)) {
dev_err(dev, "mapping dest buffer failed\n");
+ err = -ENOMEM;
goto unmap_src;
}
flags = DMA_PREP_INTERRUPT;
@@ -827,16 +829,20 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
op = IOAT_OP_XOR;
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dest_dma))
+ if (dma_mapping_error(dev, dest_dma)) {
+ err = -ENOMEM;
goto free_resources;
+ }
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
@@ -904,8 +910,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -957,8 +965,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -1071,7 +1081,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
struct dma_device *dma;
struct dma_chan *c;
struct ioatdma_chan *ioat_chan;
- bool is_raid_device = false;
int err;
u16 val16;
@@ -1095,7 +1104,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
if (ioat_dma->cap & IOAT_CAP_XOR) {
- is_raid_device = true;
dma->max_xor = 8;
dma_cap_set(DMA_XOR, dma->cap_mask);
@@ -1106,7 +1114,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
}
if (ioat_dma->cap & IOAT_CAP_PQ) {
- is_raid_device = true;
dma->device_prep_dma_pq = ioat_prep_pq;
dma->device_prep_dma_pq_val = ioat_prep_pq_val;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 48fa4cf9f64a..2f3bbc88ff2a 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -106,8 +106,6 @@
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */
-/* IOAT1 define left for i7300_idle driver to not fail compiling */
-#define IOAT1_CHANSTS_OFFSET 0x04
#define IOAT_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL)
#define IOAT_CHANSTS_SOFT_ERR 0x10ULL
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index aabcb7934b05..01e25c68dd5a 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -458,13 +458,12 @@ static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
if (!ds)
return NULL;
- ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
if (!ds->desc_hw) {
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
kfree(ds);
return NULL;
}
- memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num);
ds->desc_num = num;
return ds;
}
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 818255844a3c..5ba5714d0b7c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -554,9 +554,7 @@ static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
int ret;
for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- unsigned long data;
ch = &mic_dma_dev->mic_ch[i];
- data = (unsigned long)ch;
ch->ch_num = i;
ch->owner = owner;
spin_lock_init(&ch->cleanup_lock);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 23f75285a4d9..0cb951b743a6 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -68,6 +68,36 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
hw_desc->byte_count = byte_count;
}
+/* Populate the descriptor */
+static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
+ dma_addr_t dma_src, dma_addr_t dma_dst,
+ u32 len, struct mv_xor_desc_slot *prev)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+ hw_desc->status = XOR_DESC_DMA_OWNED;
+ hw_desc->phy_next_desc = 0;
+ /* Configure for XOR with only one src address -> MEMCPY */
+ hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
+ hw_desc->phy_dest_addr = dma_dst;
+ hw_desc->phy_src_addr[0] = dma_src;
+ hw_desc->byte_count = len;
+
+ if (prev) {
+ struct mv_xor_desc *hw_prev = prev->hw_desc;
+
+ hw_prev->phy_next_desc = desc->async_tx.phys;
+ }
+}
+
+static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+ /* Enable end-of-descriptor interrupt */
+ hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
+}
+
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -228,8 +258,13 @@ mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
node) {
- if (async_tx_test_ack(&iter->async_tx))
+ if (async_tx_test_ack(&iter->async_tx)) {
list_move_tail(&iter->node, &mv_chan->free_slots);
+ if (!list_empty(&iter->sg_tx_list)) {
+ list_splice_tail_init(&iter->sg_tx_list,
+ &mv_chan->free_slots);
+ }
+ }
}
return 0;
}
@@ -244,11 +279,20 @@ mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
/* the client is allowed to attach dependent operations
* until 'ack' is set
*/
- if (!async_tx_test_ack(&desc->async_tx))
+ if (!async_tx_test_ack(&desc->async_tx)) {
/* move this slot to the completed_slots */
list_move_tail(&desc->node, &mv_chan->completed_slots);
- else
+ if (!list_empty(&desc->sg_tx_list)) {
+ list_splice_tail_init(&desc->sg_tx_list,
+ &mv_chan->completed_slots);
+ }
+ } else {
list_move_tail(&desc->node, &mv_chan->free_slots);
+ if (!list_empty(&desc->sg_tx_list)) {
+ list_splice_tail_init(&desc->sg_tx_list,
+ &mv_chan->free_slots);
+ }
+ }
return 0;
}
@@ -450,6 +494,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->node);
+ INIT_LIST_HEAD(&slot->sg_tx_list);
dma_desc = mv_chan->dma_desc_pool;
slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
@@ -617,6 +662,132 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
}
+/**
+ * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
+ * @chan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ struct mv_xor_desc_slot *new;
+ struct mv_xor_desc_slot *first = NULL;
+ struct mv_xor_desc_slot *prev = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ int desc_cnt = 0;
+ int ret;
+
+ dev_dbg(mv_chan_to_devp(mv_chan),
+ "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
+ __func__, dst_sg_len, src_sg_len, flags);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ desc_cnt++;
+ new = mv_chan_alloc_slot(mv_chan);
+ if (!new) {
+ dev_err(mv_chan_to_devp(mv_chan),
+ "Out of descriptors (desc_cnt=%d)!\n",
+ desc_cnt);
+ goto err;
+ }
+
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
+ if (len == 0)
+ goto fetch;
+
+ if (len < MV_XOR_MIN_BYTE_COUNT) {
+ dev_err(mv_chan_to_devp(mv_chan),
+ "Transfer size of %zu too small!\n", len);
+ goto err;
+ }
+
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ /* Check if a new window needs to get added for 'dst' */
+ ret = mv_xor_add_io_win(mv_chan, dma_dst);
+ if (ret)
+ goto err;
+
+ /* Check if a new window needs to get added for 'src' */
+ ret = mv_xor_add_io_win(mv_chan, dma_src);
+ if (ret)
+ goto err;
+
+ /* Populate the descriptor */
+ mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
+ prev = new;
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_move_tail(&new->node, &first->sg_tx_list);
+
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+
+ /* Fetch the next entry: if there are no more: done */
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+
+ /* Fetch the next entry: if there are no more: done */
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ /* Set the EOD flag in the last descriptor */
+ mv_xor_desc_config_eod(new);
+ first->async_tx.flags = flags;
+
+ return &first->async_tx;
+
+err:
+ /* Cleanup: Move all descriptors back into the free list */
+ spin_lock_bh(&mv_chan->lock);
+ mv_desc_clean_slot(first, mv_chan);
+ spin_unlock_bh(&mv_chan->lock);
+
+ return NULL;
+}
+
static void mv_xor_free_chan_resources(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -1083,6 +1254,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
+ if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
@@ -1132,10 +1305,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
- dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
+ dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
@@ -1378,6 +1552,7 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_cap_zero(cap_mask);
dma_cap_set(DMA_MEMCPY, cap_mask);
+ dma_cap_set(DMA_SG, cap_mask);
dma_cap_set(DMA_XOR, cap_mask);
dma_cap_set(DMA_INTERRUPT, cap_mask);
@@ -1455,12 +1630,7 @@ static struct platform_driver mv_xor_driver = {
},
};
-
-static int __init mv_xor_init(void)
-{
- return platform_driver_register(&mv_xor_driver);
-}
-device_initcall(mv_xor_init);
+builtin_platform_driver(mv_xor_driver);
/*
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 88eeab222a23..cf921dd6af73 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -148,6 +148,7 @@ struct mv_xor_chan {
*/
struct mv_xor_desc_slot {
struct list_head node;
+ struct list_head sg_tx_list;
enum dma_transaction_type type;
void *hw_desc;
u16 idx;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 09de71519d37..3f45b9bdf201 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -225,6 +225,8 @@ struct nbpf_channel {
struct nbpf_device {
struct dma_device dma_dev;
void __iomem *base;
+ u32 max_burst_mem_read;
+ u32 max_burst_mem_write;
struct clk *clk;
const struct nbpf_config *config;
unsigned int eirq;
@@ -425,10 +427,33 @@ static void nbpf_chan_configure(struct nbpf_channel *chan)
nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
}
-static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size)
+static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
+ enum dma_transfer_direction direction)
{
+ int max_burst = nbpf->config->buffer_size * 8;
+
+ if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
+ switch (direction) {
+ case DMA_MEM_TO_MEM:
+ max_burst = min_not_zero(nbpf->max_burst_mem_read,
+ nbpf->max_burst_mem_write);
+ break;
+ case DMA_MEM_TO_DEV:
+ if (nbpf->max_burst_mem_read)
+ max_burst = nbpf->max_burst_mem_read;
+ break;
+ case DMA_DEV_TO_MEM:
+ if (nbpf->max_burst_mem_write)
+ max_burst = nbpf->max_burst_mem_write;
+ break;
+ case DMA_DEV_TO_DEV:
+ default:
+ break;
+ }
+ }
+
/* Maximum supported bursts depend on the buffer size */
- return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8));
+ return min_t(int, __ffs(size), ilog2(max_burst));
}
static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
@@ -458,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
size = burst;
}
- return nbpf_xfer_ds(nbpf, size);
+ return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
}
/*
@@ -507,7 +532,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
* transfers we enable the SBE bit and terminate the transfer in our
* .device_pause handler.
*/
- mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
+ mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
switch (direction) {
case DMA_DEV_TO_MEM:
@@ -1313,6 +1338,11 @@ static int nbpf_probe(struct platform_device *pdev)
if (IS_ERR(nbpf->clk))
return PTR_ERR(nbpf->clk);
+ of_property_read_u32(np, "max-burst-mem-read",
+ &nbpf->max_burst_mem_read);
+ of_property_read_u32(np, "max-burst-mem-write",
+ &nbpf->max_burst_mem_write);
+
nbpf->config = cfg;
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7ca27d4b1c54..ac68666cd3f4 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -166,6 +166,9 @@ enum {
CSDP_DST_BURST_16 = 1 << 14,
CSDP_DST_BURST_32 = 2 << 14,
CSDP_DST_BURST_64 = 3 << 14,
+ CSDP_WRITE_NON_POSTED = 0 << 16,
+ CSDP_WRITE_POSTED = 1 << 16,
+ CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
CICR_TOUT_IE = BIT(0), /* OMAP1 only */
CICR_DROP_IE = BIT(1),
@@ -422,7 +425,30 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
c->running = true;
}
-static void omap_dma_stop(struct omap_chan *c)
+static void omap_dma_drain_chan(struct omap_chan *c)
+{
+ int i;
+ u32 val;
+
+ /* Wait for sDMA FIFO to drain */
+ for (i = 0; ; i++) {
+ val = omap_dma_chan_read(c, CCR);
+ if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
+ break;
+
+ if (i > 100)
+ break;
+
+ udelay(5);
+ }
+
+ if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
+ dev_err(c->vc.chan.device->dev,
+ "DMA drain did not complete on lch %d\n",
+ c->dma_ch);
+}
+
+static int omap_dma_stop(struct omap_chan *c)
{
struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
uint32_t val;
@@ -435,7 +461,6 @@ static void omap_dma_stop(struct omap_chan *c)
val = omap_dma_chan_read(c, CCR);
if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
uint32_t sysconfig;
- unsigned i;
sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
@@ -446,27 +471,19 @@ static void omap_dma_stop(struct omap_chan *c)
val &= ~CCR_ENABLE;
omap_dma_chan_write(c, CCR, val);
- /* Wait for sDMA FIFO to drain */
- for (i = 0; ; i++) {
- val = omap_dma_chan_read(c, CCR);
- if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
- break;
-
- if (i > 100)
- break;
-
- udelay(5);
- }
-
- if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
- dev_err(c->vc.chan.device->dev,
- "DMA drain did not complete on lch %d\n",
- c->dma_ch);
+ if (!(c->ccr & CCR_BUFFERING_DISABLE))
+ omap_dma_drain_chan(c);
omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
} else {
+ if (!(val & CCR_ENABLE))
+ return -EINVAL;
+
val &= ~CCR_ENABLE;
omap_dma_chan_write(c, CCR, val);
+
+ if (!(c->ccr & CCR_BUFFERING_DISABLE))
+ omap_dma_drain_chan(c);
}
mb();
@@ -481,8 +498,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val);
}
-
c->running = false;
+ return 0;
}
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
@@ -836,6 +853,8 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
} else {
txstate->residue = 0;
}
+ if (ret == DMA_IN_PROGRESS && c->paused)
+ ret = DMA_PAUSED;
spin_unlock_irqrestore(&c->vc.lock, flags);
return ret;
@@ -865,15 +884,18 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
unsigned i, es, en, frame_bytes;
bool ll_failed = false;
u32 burst;
+ u32 port_window, port_window_bytes;
if (dir == DMA_DEV_TO_MEM) {
dev_addr = c->cfg.src_addr;
dev_width = c->cfg.src_addr_width;
burst = c->cfg.src_maxburst;
+ port_window = c->cfg.src_port_window_size;
} else if (dir == DMA_MEM_TO_DEV) {
dev_addr = c->cfg.dst_addr;
dev_width = c->cfg.dst_addr_width;
burst = c->cfg.dst_maxburst;
+ port_window = c->cfg.dst_port_window_size;
} else {
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
return NULL;
@@ -894,6 +916,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
return NULL;
}
+ /* When the port_window is used, one frame must cover the window */
+ if (port_window) {
+ burst = port_window;
+ port_window_bytes = port_window * es_bytes[es];
+ }
+
/* Now allocate and setup the descriptor. */
d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
if (!d)
@@ -905,11 +933,45 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr = c->ccr | CCR_SYNC_FRAME;
if (dir == DMA_DEV_TO_MEM) {
- d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+
+ d->ccr |= CCR_DST_AMODE_POSTINC;
+ if (port_window) {
+ d->ccr |= CCR_SRC_AMODE_DBLIDX;
+ d->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ d->fi = -(port_window_bytes - 1);
+
+ if (port_window_bytes >= 64)
+ d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+ else if (port_window_bytes >= 32)
+ d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
+ else if (port_window_bytes >= 16)
+ d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
+ } else {
+ d->ccr |= CCR_SRC_AMODE_CONSTANT;
+ }
} else {
- d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+
+ d->ccr |= CCR_SRC_AMODE_POSTINC;
+ if (port_window) {
+ d->ccr |= CCR_DST_AMODE_DBLIDX;
+
+ if (port_window_bytes >= 64)
+ d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+ else if (port_window_bytes >= 32)
+ d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
+ else if (port_window_bytes >= 16)
+ d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
+ } else {
+ d->ccr |= CCR_DST_AMODE_CONSTANT;
+ }
}
d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
@@ -927,6 +989,9 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_TRIGGER_SRC;
d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+
+ if (port_window)
+ d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
}
if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
d->clnk_ctrl = c->dma_ch;
@@ -952,6 +1017,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
osg->addr = sg_dma_address(sgent);
osg->en = en;
osg->fn = sg_dma_len(sgent) / frame_bytes;
+ if (port_window && dir == DMA_MEM_TO_DEV) {
+ osg->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ osg->fi = -(port_window_bytes - 1);
+ }
if (d->using_ll) {
osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
@@ -1247,10 +1322,8 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
omap_dma_stop(c);
}
- if (c->cyclic) {
- c->cyclic = false;
- c->paused = false;
- }
+ c->cyclic = false;
+ c->paused = false;
vchan_get_all_descriptors(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -1269,28 +1342,66 @@ static void omap_dma_synchronize(struct dma_chan *chan)
static int omap_dma_pause(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+ unsigned long flags;
+ int ret = -EINVAL;
+ bool can_pause = false;
- /* Pause/Resume only allowed with cyclic mode */
- if (!c->cyclic)
- return -EINVAL;
+ spin_lock_irqsave(&od->irq_lock, flags);
- if (!c->paused) {
- omap_dma_stop(c);
- c->paused = true;
+ if (!c->desc)
+ goto out;
+
+ if (c->cyclic)
+ can_pause = true;
+
+ /*
+ * We do not allow DMA_MEM_TO_DEV transfers to be paused.
+ * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
+ * "When a channel is disabled during a transfer, the channel undergoes
+ * an abort, unless it is hardware-source-synchronized …".
+ * A source-synchronised channel is one where the fetching of data is
+ * under control of the device. In other words, a device-to-memory
+ * transfer. So, a destination-synchronised channel (which would be a
+ * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
+ * bit is cleared.
+ * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
+ * aborts immediately after completion of current read/write
+ * transactions and then the FIFO is cleaned up." The term "cleaned up"
+ * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
+ * are both clear _before_ disabling the channel, otherwise data loss
+ * will occur.
+ * The problem is that if the channel is active, then device activity
+ * can result in DMA activity starting between reading those as both
+ * clear and the write to DMA_CCR to clear the enable bit hitting the
+ * hardware. If the DMA hardware can't drain the data in its FIFO to the
+ * destination, then data loss "might" occur (say if we write to an UART
+ * and the UART is not accepting any further data).
+ */
+ else if (c->desc->dir == DMA_DEV_TO_MEM)
+ can_pause = true;
+
+ if (can_pause && !c->paused) {
+ ret = omap_dma_stop(c);
+ if (!ret)
+ c->paused = true;
}
+out:
+ spin_unlock_irqrestore(&od->irq_lock, flags);
- return 0;
+ return ret;
}
static int omap_dma_resume(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+ unsigned long flags;
+ int ret = -EINVAL;
- /* Pause/Resume only allowed with cyclic mode */
- if (!c->cyclic)
- return -EINVAL;
+ spin_lock_irqsave(&od->irq_lock, flags);
- if (c->paused) {
+ if (c->paused && c->desc) {
mb();
/* Restore channel link register */
@@ -1298,9 +1409,11 @@ static int omap_dma_resume(struct dma_chan *chan)
omap_dma_start(c, c->desc);
c->paused = false;
+ ret = 0;
}
+ spin_unlock_irqrestore(&od->irq_lock, flags);
- return 0;
+ return ret;
}
static int omap_dma_chan_init(struct omap_dmadev *od)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index df95727dc2fb..f9028e9d0dfc 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -417,10 +417,8 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct pch_dma_desc *desc = to_pd_desc(txd);
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
- dma_cookie_t cookie;
spin_lock(&pd_chan->lock);
- cookie = dma_cookie_assign(txd);
if (list_empty(&pd_chan->active_list)) {
list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -439,9 +437,8 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr;
- desc = pci_pool_alloc(pd->pool, flags, &addr);
+ desc = pci_pool_zalloc(pd->pool, flags, &addr);
if (desc) {
- memset(desc, 0, sizeof(struct pch_dma_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = pd_tx_submit;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 030fe05ed43b..87fd01539fcb 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -570,7 +570,8 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAADDH;
buf[0] |= (da << 1);
- *((__le16 *)&buf[1]) = cpu_to_le16(val);
+ buf[1] = val;
+ buf[2] = val >> 8;
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
da == 1 ? "DA" : "SA", val);
@@ -724,7 +725,10 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAMOV;
buf[1] = dst;
- *((__le32 *)&buf[2]) = cpu_to_le32(val);
+ buf[2] = val;
+ buf[3] = val >> 8;
+ buf[4] = val >> 16;
+ buf[5] = val >> 24;
PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
@@ -899,10 +903,11 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAGO;
buf[0] |= (ns << 1);
-
buf[1] = chan & 0x7;
-
- *((__le32 *)&buf[2]) = cpu_to_le32(addr);
+ buf[2] = addr;
+ buf[3] = addr >> 8;
+ buf[4] = addr >> 16;
+ buf[5] = addr >> 24;
return SZ_DMAGO;
}
@@ -1883,11 +1888,8 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
static int pl330_add(struct pl330_dmac *pl330)
{
- void __iomem *regs;
int i, ret;
- regs = pl330->base;
-
/* Check if we can handle this DMAC */
if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
@@ -2263,6 +2265,11 @@ static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
}
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
+
+ /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
+ if (!val)
+ return 0;
+
return val - addr;
}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 3f56f9ca4482..b53fb618bbf6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -413,15 +413,6 @@ static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
#endif
-/*
- * In the transition phase where legacy pxa handling is done at the same time as
- * mmp_dma, the DMA physical channel split between the 2 DMA providers is done
- * through legacy_reserved. Legacy code reserves DMA channels by settings
- * corresponding bits in legacy_reserved.
- */
-static u32 legacy_reserved;
-static u32 legacy_unavailable;
-
static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
{
int prio, i;
@@ -442,14 +433,10 @@ static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
for (i = 0; i < pdev->nr_chans; i++) {
if (prio != (i & 0xf) >> 2)
continue;
- if ((i < 32) && (legacy_reserved & BIT(i)))
- continue;
phy = &pdev->phys[i];
if (!phy->vchan) {
phy->vchan = pchan;
found = phy;
- if (i < 32)
- legacy_unavailable |= BIT(i);
goto out_unlock;
}
}
@@ -469,7 +456,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
unsigned long flags;
u32 reg;
- int i;
dev_dbg(&chan->vc.chan.dev->device,
"%s(): freeing\n", __func__);
@@ -483,9 +469,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
}
spin_lock_irqsave(&pdev->phy_lock, flags);
- for (i = 0; i < 32; i++)
- if (chan->phy == &pdev->phys[i])
- legacy_unavailable &= ~BIT(i);
chan->phy->vchan = NULL;
chan->phy = NULL;
spin_unlock_irqrestore(&pdev->phy_lock, flags);
@@ -739,8 +722,6 @@ static irqreturn_t pxad_int_handler(int irq, void *dev_id)
i = __ffs(dint);
dint &= (dint - 1);
phy = &pdev->phys[i];
- if ((i < 32) && (legacy_reserved & BIT(i)))
- continue;
if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
@@ -1522,15 +1503,6 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(pxad_filter_fn);
-int pxad_toggle_reserved_channel(int legacy_channel)
-{
- if (legacy_unavailable & (BIT(legacy_channel)))
- return -EBUSY;
- legacy_reserved ^= BIT(legacy_channel);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel);
-
module_platform_driver(pxad_driver);
MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e244e10a94b5..3c982c96b4b7 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -56,6 +56,7 @@
#include <linux/irq.h>
#include <linux/atomic.h>
#include <linux/pm_runtime.h>
+#include <linux/msi.h>
#include "../dmaengine.h"
#include "hidma.h"
@@ -70,6 +71,7 @@
#define HIDMA_ERR_INFO_SW 0xFF
#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
#define HIDMA_NR_DEFAULT_DESC 10
+#define HIDMA_MSI_INTS 11
static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
{
@@ -553,6 +555,17 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
return hidma_ll_inthandler(chirq, lldev);
}
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
+{
+ struct hidma_lldev **lldevp = arg;
+ struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
+
+ return hidma_ll_inthandler_msi(chirq, *lldevp,
+ 1 << (chirq - dmadev->msi_virqbase));
+}
+#endif
+
static ssize_t hidma_show_values(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -567,8 +580,13 @@ static ssize_t hidma_show_values(struct device *dev,
return strlen(buf);
}
-static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
- int mode)
+static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
+{
+ device_remove_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+static struct device_attribute*
+hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
{
struct device_attribute *attrs;
char *name_copy;
@@ -576,18 +594,125 @@ static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
GFP_KERNEL);
if (!attrs)
- return -ENOMEM;
+ return NULL;
name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
if (!name_copy)
- return -ENOMEM;
+ return NULL;
attrs->attr.name = name_copy;
attrs->attr.mode = mode;
attrs->show = hidma_show_values;
sysfs_attr_init(&attrs->attr);
- return device_create_file(dev->ddev.dev, attrs);
+ return attrs;
+}
+
+static int hidma_sysfs_init(struct hidma_dev *dev)
+{
+ dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
+ if (!dev->chid_attrs)
+ return -ENOMEM;
+
+ return device_create_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct hidma_dev *dmadev = dev_get_drvdata(dev);
+
+ if (!desc->platform.msi_index) {
+ writel(msg->address_lo, dmadev->dev_evca + 0x118);
+ writel(msg->address_hi, dmadev->dev_evca + 0x11C);
+ writel(msg->data, dmadev->dev_evca + 0x120);
+ }
+}
+#endif
+
+static void hidma_free_msis(struct hidma_dev *dmadev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct device *dev = dmadev->ddev.dev;
+ struct msi_desc *desc;
+
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, dev)
+ devm_free_irq(dev, desc->irq, &dmadev->lldev);
+
+ platform_msi_domain_free_irqs(dev);
+#endif
+}
+
+static int hidma_request_msi(struct hidma_dev *dmadev,
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ int rc;
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
+
+ rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+ hidma_write_msi_msg);
+ if (rc)
+ return rc;
+
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (!desc->platform.msi_index)
+ dmadev->msi_virqbase = desc->irq;
+
+ rc = devm_request_irq(&pdev->dev, desc->irq,
+ hidma_chirq_handler_msi,
+ 0, "qcom-hidma-msi",
+ &dmadev->lldev);
+ if (rc) {
+ failed_desc = desc;
+ break;
+ }
+ }
+
+ if (rc) {
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(&pdev->dev, desc->irq,
+ &dmadev->lldev);
+ }
+ } else {
+ /* Add callback to free MSIs on teardown */
+ hidma_ll_setup_irq(dmadev->lldev, true);
+
+ }
+ if (rc)
+ dev_warn(&pdev->dev,
+ "failed to request MSI irq, falling back to wired IRQ\n");
+ return rc;
+#else
+ return -EINVAL;
+#endif
+}
+
+static bool hidma_msi_capable(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ const char *of_compat;
+ int ret = -EINVAL;
+
+ if (!adev || acpi_disabled) {
+ ret = device_property_read_string(dev, "compatible",
+ &of_compat);
+ if (ret)
+ return false;
+
+ ret = strcmp(of_compat, "qcom,hidma-1.1");
+ } else {
+#ifdef CONFIG_ACPI
+ ret = strcmp(acpi_device_hid(adev), "QCOM8062");
+#endif
+ }
+ return ret == 0;
}
static int hidma_probe(struct platform_device *pdev)
@@ -599,6 +724,7 @@ static int hidma_probe(struct platform_device *pdev)
void __iomem *evca;
void __iomem *trca;
int rc;
+ bool msi;
pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -660,6 +786,12 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->ddev.device_terminate_all = hidma_terminate_all;
dmadev->ddev.copy_align = 8;
+ /*
+ * Determine the MSI capability of the platform. Old HW doesn't
+ * support MSI.
+ */
+ msi = hidma_msi_capable(&pdev->dev);
+
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
@@ -688,10 +820,17 @@ static int hidma_probe(struct platform_device *pdev)
goto dmafree;
}
- rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
- "qcom-hidma", dmadev->lldev);
- if (rc)
- goto uninit;
+ platform_set_drvdata(pdev, dmadev);
+ if (msi)
+ rc = hidma_request_msi(dmadev, pdev);
+
+ if (!msi || rc) {
+ hidma_ll_setup_irq(dmadev->lldev, false);
+ rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
+ 0, "qcom-hidma", dmadev->lldev);
+ if (rc)
+ goto uninit;
+ }
INIT_LIST_HEAD(&dmadev->ddev.channels);
rc = hidma_chan_init(dmadev, 0);
@@ -705,14 +844,16 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->irq = chirq;
tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
hidma_debug_init(dmadev);
- hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
+ hidma_sysfs_init(dmadev);
dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
- platform_set_drvdata(pdev, dmadev);
pm_runtime_mark_last_busy(dmadev->ddev.dev);
pm_runtime_put_autosuspend(dmadev->ddev.dev);
return 0;
uninit:
+ if (msi)
+ hidma_free_msis(dmadev);
+
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
dmafree:
@@ -730,8 +871,13 @@ static int hidma_remove(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_async_device_unregister(&dmadev->ddev);
- devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ if (!dmadev->lldev->msi_support)
+ devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ else
+ hidma_free_msis(dmadev);
+
tasklet_kill(&dmadev->task);
+ hidma_sysfs_uninit(dmadev);
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
hidma_free(dmadev);
@@ -746,12 +892,15 @@ static int hidma_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id hidma_acpi_ids[] = {
{"QCOM8061"},
+ {"QCOM8062"},
{},
};
+MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
#endif
static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
+ {.compatible = "qcom,hidma-1.1",},
{},
};
MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index e52e20716303..c7d014235c32 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -46,6 +46,7 @@ struct hidma_tre {
};
struct hidma_lldev {
+ bool msi_support; /* flag indicating MSI support */
bool initialized; /* initialized flag */
u8 trch_state; /* trch_state of the device */
u8 evch_state; /* evch_state of the device */
@@ -58,7 +59,7 @@ struct hidma_lldev {
void __iomem *evca; /* Event Channel address */
struct hidma_tre
**pending_tre_list; /* Pointers to pending TREs */
- s32 pending_tre_count; /* Number of TREs pending */
+ atomic_t pending_tre_count; /* Number of TREs pending */
void *tre_ring; /* TRE ring */
dma_addr_t tre_dma; /* TRE ring to be shared with HW */
@@ -114,6 +115,7 @@ struct hidma_dev {
int irq;
int chidx;
u32 nr_descriptors;
+ int msi_virqbase;
struct hidma_lldev *lldev;
void __iomem *dev_trca;
@@ -128,6 +130,9 @@ struct hidma_dev {
struct dentry *debugfs;
struct dentry *stats;
+ /* sysfs entry for the channel id */
+ struct device_attribute *chid_attrs;
+
/* Task delivering issue_pending */
struct tasklet_struct task;
};
@@ -145,12 +150,14 @@ int hidma_ll_disable(struct hidma_lldev *lldev);
int hidma_ll_enable(struct hidma_lldev *llhndl);
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
int hidma_ll_setup(struct hidma_lldev *lldev);
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
void __iomem *trca, void __iomem *evca,
u8 chidx);
int hidma_ll_uninit(struct hidma_lldev *llhndl);
irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
u8 err_code);
int hidma_debug_init(struct hidma_dev *dmadev);
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
index fa827e5ffd68..3bdcb8056a36 100644
--- a/drivers/dma/qcom/hidma_dbg.c
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
- seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
+ seq_printf(s, "pending_tre_count=%d\n",
+ atomic_read(&lldev->pending_tre_count));
seq_printf(s, "evca=%p\n", lldev->evca);
seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
@@ -164,7 +165,6 @@ static const struct file_operations hidma_dma_fops = {
void hidma_debug_uninit(struct hidma_dev *dmadev)
{
debugfs_remove_recursive(dmadev->debugfs);
- debugfs_remove_recursive(dmadev->stats);
}
int hidma_debug_init(struct hidma_dev *dmadev)
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 3224f24c577b..6645bdf0d151 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -198,13 +198,16 @@ static void hidma_ll_tre_complete(unsigned long arg)
}
}
-static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
- u8 err_info, u8 err_code)
+static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
+ u8 err_code)
{
struct hidma_tre *tre;
unsigned long flags;
+ u32 tre_iterator;
spin_lock_irqsave(&lldev->lock, flags);
+
+ tre_iterator = lldev->tre_processed_off;
tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
if (!tre) {
spin_unlock_irqrestore(&lldev->lock, flags);
@@ -218,12 +221,14 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
* Keep track of pending TREs that SW is expecting to receive
* from HW. We got one now. Decrement our counter.
*/
- lldev->pending_tre_count--;
- if (lldev->pending_tre_count < 0) {
+ if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
dev_warn(lldev->dev, "tre count mismatch on completion");
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
}
+ HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+ lldev->tre_ring_size);
+ lldev->tre_processed_off = tre_iterator;
spin_unlock_irqrestore(&lldev->lock, flags);
tre->err_info = err_info;
@@ -245,13 +250,11 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
{
u32 evre_ring_size = lldev->evre_ring_size;
- u32 tre_ring_size = lldev->tre_ring_size;
u32 err_info, err_code, evre_write_off;
- u32 tre_iterator, evre_iterator;
+ u32 evre_iterator;
u32 num_completed = 0;
evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
- tre_iterator = lldev->tre_processed_off;
evre_iterator = lldev->evre_processed_off;
if ((evre_write_off > evre_ring_size) ||
@@ -274,12 +277,9 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
err_code =
(cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
- if (hidma_post_completed(lldev, tre_iterator, err_info,
- err_code))
+ if (hidma_post_completed(lldev, err_info, err_code))
break;
- HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
- tre_ring_size);
HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
evre_ring_size);
@@ -291,21 +291,22 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
evre_write_off =
readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
num_completed++;
+
+ /*
+ * An error interrupt might have arrived while we are processing
+ * the completed interrupt.
+ */
+ if (!hidma_ll_isenabled(lldev))
+ break;
}
if (num_completed) {
u32 evre_read_off = (lldev->evre_processed_off +
HIDMA_EVRE_SIZE * num_completed);
- u32 tre_read_off = (lldev->tre_processed_off +
- HIDMA_TRE_SIZE * num_completed);
-
evre_read_off = evre_read_off % evre_ring_size;
- tre_read_off = tre_read_off % tre_ring_size;
-
writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
/* record the last processed tre offset */
- lldev->tre_processed_off = tre_read_off;
lldev->evre_processed_off = evre_read_off;
}
@@ -315,27 +316,10 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
u8 err_code)
{
- u32 tre_iterator;
- u32 tre_ring_size = lldev->tre_ring_size;
- int num_completed = 0;
- u32 tre_read_off;
-
- tre_iterator = lldev->tre_processed_off;
- while (lldev->pending_tre_count) {
- if (hidma_post_completed(lldev, tre_iterator, err_info,
- err_code))
+ while (atomic_read(&lldev->pending_tre_count)) {
+ if (hidma_post_completed(lldev, err_info, err_code))
break;
- HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
- tre_ring_size);
- num_completed++;
}
- tre_read_off = (lldev->tre_processed_off +
- HIDMA_TRE_SIZE * num_completed);
-
- tre_read_off = tre_read_off % tre_ring_size;
-
- /* record the last processed tre offset */
- lldev->tre_processed_off = tre_read_off;
}
static int hidma_ll_reset(struct hidma_lldev *lldev)
@@ -412,12 +396,24 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
* requests traditionally to the destination, this concept does not apply
* here for this HW.
*/
-irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{
- struct hidma_lldev *lldev = arg;
- u32 status;
- u32 enable;
- u32 cause;
+ if (cause & HIDMA_ERR_INT_MASK) {
+ dev_err(lldev->dev, "error 0x%x, disabling...\n",
+ cause);
+
+ /* Clear out pending interrupts */
+ writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ /* No further submissions. */
+ hidma_ll_disable(lldev);
+
+ /* Driver completes the txn and intimates the client.*/
+ hidma_cleanup_pending_tre(lldev, 0xFF,
+ HIDMA_EVRE_STATUS_ERROR);
+
+ return;
+ }
/*
* Fine tuned for this HW...
@@ -426,35 +422,28 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
* read and write accessors are used for performance reasons due to
* interrupt delivery guarantees. Do not copy this code blindly and
* expect that to work.
+ *
+ * Try to consume as many EVREs as possible.
*/
+ hidma_handle_tre_completion(lldev);
+
+ /* We consumed TREs or there are pending TREs or EVREs. */
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+}
+
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+ u32 status;
+ u32 enable;
+ u32 cause;
+
status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
cause = status & enable;
while (cause) {
- if (cause & HIDMA_ERR_INT_MASK) {
- dev_err(lldev->dev, "error 0x%x, disabling...\n",
- cause);
-
- /* Clear out pending interrupts */
- writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
-
- /* No further submissions. */
- hidma_ll_disable(lldev);
-
- /* Driver completes the txn and intimates the client.*/
- hidma_cleanup_pending_tre(lldev, 0xFF,
- HIDMA_EVRE_STATUS_ERROR);
- goto out;
- }
-
- /*
- * Try to consume as many EVREs as possible.
- */
- hidma_handle_tre_completion(lldev);
-
- /* We consumed TREs or there are pending TREs or EVREs. */
- writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ hidma_ll_int_handler_internal(lldev, cause);
/*
* Another interrupt might have arrived while we are
@@ -465,7 +454,14 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
cause = status & enable;
}
-out:
+ return IRQ_HANDLED;
+}
+
+irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
+{
+ struct hidma_lldev *lldev = arg;
+
+ hidma_ll_int_handler_internal(lldev, cause);
return IRQ_HANDLED;
}
@@ -548,7 +544,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
tre->err_code = 0;
tre->err_info = 0;
tre->queued = 1;
- lldev->pending_tre_count++;
+ atomic_inc(&lldev->pending_tre_count);
lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
% lldev->tre_ring_size;
spin_unlock_irqrestore(&lldev->lock, flags);
@@ -564,19 +560,8 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
u32 val;
int ret;
- val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
- lldev->evch_state = HIDMA_CH_STATE(val);
- val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
- lldev->trch_state = HIDMA_CH_STATE(val);
-
- /* already suspended by this OS */
- if ((lldev->trch_state == HIDMA_CH_SUSPENDED) ||
- (lldev->evch_state == HIDMA_CH_SUSPENDED))
- return 0;
-
- /* already stopped by the manager */
- if ((lldev->trch_state == HIDMA_CH_STOPPED) ||
- (lldev->evch_state == HIDMA_CH_STOPPED))
+ /* The channel needs to be in working state */
+ if (!hidma_ll_isenabled(lldev))
return 0;
val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
@@ -654,7 +639,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
u32 val;
u32 nr_tres = lldev->nr_tres;
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
lldev->tre_processed_off = 0;
lldev->evre_processed_off = 0;
lldev->tre_write_offset = 0;
@@ -691,17 +676,36 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
writel(HIDMA_EVRE_SIZE * nr_tres,
lldev->evca + HIDMA_EVCA_RING_LEN_REG);
- /* support IRQ only for now */
+ /* configure interrupts */
+ hidma_ll_setup_irq(lldev, lldev->msi_support);
+
+ rc = hidma_ll_enable(lldev);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
+{
+ u32 val;
+
+ lldev->msi_support = msi;
+
+ /* disable interrupts again after reset */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ /* support IRQ by default */
val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
val &= ~0xF;
- val |= 0x1;
+ if (!lldev->msi_support)
+ val = val | 0x1;
writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
/* clear all pending interrupts and enable them */
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
-
- return hidma_ll_enable(lldev);
}
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
@@ -816,7 +820,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
tasklet_kill(&lldev->task);
memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL;
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
lldev->tre_write_offset = 0;
rc = hidma_ll_reset(lldev);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 82f36e466083..f847d32cc4b5 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -282,6 +282,7 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
{"QCOM8060"},
{},
};
+MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
#endif
static const struct of_device_id hidma_mgmt_match[] = {
@@ -375,8 +376,15 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
ret = PTR_ERR(new_pdev);
goto out;
}
+ of_node_get(child);
+ new_pdev->dev.of_node = child;
of_dma_configure(&new_pdev->dev, child);
-
+ /*
+ * It is assumed that calling of_msi_configure is safe on
+ * platforms with or without MSI support.
+ */
+ of_msi_configure(&new_pdev->dev, child);
+ of_node_put(child);
kfree(res);
res = NULL;
}
@@ -395,7 +403,6 @@ static int __init hidma_mgmt_init(void)
for_each_matching_node(child, hidma_mgmt_match) {
/* device tree based firmware here */
hidma_mgmt_of_populate_channels(child);
- of_node_put(child);
}
#endif
platform_driver_register(&hidma_mgmt_driver);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 3c579abbabb7..f04c4702d98b 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -289,16 +289,11 @@ static
struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
{
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata;
struct s3c24xx_dma_phy *phy = NULL;
unsigned long flags;
int i;
int ret;
- if (s3cchan->slave)
- cdata = &pdata->channels[s3cchan->id];
-
for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
phy = &s3cdma->phy_chans[i];
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 06ecdc38cee0..72c649713ace 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -652,7 +652,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
- struct usb_dmac_chan *uchan;
struct dma_chan *chan;
dma_cap_mask_t mask;
@@ -667,8 +666,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
if (!chan)
return NULL;
- uchan = to_usb_dmac_chan(chan);
-
return chan;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 8f62edad51be..a0733ac3edb1 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -1011,7 +1011,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
- struct sirfsoc_dma_desc *sdesc;
struct sirfsoc_dma_chan *schan;
int ch;
int ret;
@@ -1044,9 +1043,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
schan = &sdma->channels[ch];
if (list_empty(&schan->active))
continue;
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc,
- node);
save->ctrl[ch] = readl_relaxed(sdma->base +
ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
}
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
new file mode 100644
index 000000000000..bfb79bd0c6de
--- /dev/null
+++ b/drivers/dma/st_fdma.c
@@ -0,0 +1,889 @@
+/*
+ * DMA driver for STMicroelectronics STi FDMA controller
+ *
+ * Copyright (C) 2014 STMicroelectronics
+ *
+ * Author: Ludovic Barre <Ludovic.barre@st.com>
+ * Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/remoteproc.h>
+
+#include "st_fdma.h"
+
+static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct st_fdma_chan, vchan.chan);
+}
+
+static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct st_fdma_desc, vdesc);
+}
+
+static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
+{
+ struct st_fdma_dev *fdev = fchan->fdev;
+ u32 req_line_cfg = fchan->cfg.req_line;
+ u32 dreq_line;
+ int try = 0;
+
+ /*
+ * dreq_mask is shared for n channels of fdma, so all accesses must be
+ * atomic. if the dreq_mask is changed between ffz and set_bit,
+ * we retry
+ */
+ do {
+ if (fdev->dreq_mask == ~0L) {
+ dev_err(fdev->dev, "No req lines available\n");
+ return -EINVAL;
+ }
+
+ if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
+ dev_err(fdev->dev, "Invalid or used req line\n");
+ return -EINVAL;
+ } else {
+ dreq_line = req_line_cfg;
+ }
+
+ try++;
+ } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
+
+ dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
+ dreq_line, fdev->dreq_mask);
+
+ return dreq_line;
+}
+
+static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
+{
+ struct st_fdma_dev *fdev = fchan->fdev;
+
+ dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
+ clear_bit(fchan->dreq_line, &fdev->dreq_mask);
+}
+
+static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
+{
+ struct virt_dma_desc *vdesc;
+ unsigned long nbytes, ch_cmd, cmd;
+
+ vdesc = vchan_next_desc(&fchan->vchan);
+ if (!vdesc)
+ return;
+
+ fchan->fdesc = to_st_fdma_desc(vdesc);
+ nbytes = fchan->fdesc->node[0].desc->nbytes;
+ cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
+ ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
+
+ /* start the channel for the descriptor */
+ fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
+ fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
+ writel(cmd,
+ fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
+
+ dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
+}
+
+static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
+ unsigned long int_sta)
+{
+ unsigned long ch_sta, ch_err;
+ int ch_id = fchan->vchan.chan.chan_id;
+ struct st_fdma_dev *fdev = fchan->fdev;
+
+ ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
+ ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
+ ch_sta &= FDMA_CH_CMD_STA_MASK;
+
+ if (int_sta & FDMA_INT_STA_ERR) {
+ dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
+ fchan->status = DMA_ERROR;
+ return;
+ }
+
+ switch (ch_sta) {
+ case FDMA_CH_CMD_STA_PAUSED:
+ fchan->status = DMA_PAUSED;
+ break;
+
+ case FDMA_CH_CMD_STA_RUNNING:
+ fchan->status = DMA_IN_PROGRESS;
+ break;
+ }
+}
+
+static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
+{
+ struct st_fdma_dev *fdev = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ struct st_fdma_chan *fchan = &fdev->chans[0];
+ unsigned long int_sta, clr;
+
+ int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
+ clr = int_sta;
+
+ for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
+ if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
+ continue;
+
+ spin_lock(&fchan->vchan.lock);
+ st_fdma_ch_sta_update(fchan, int_sta);
+
+ if (fchan->fdesc) {
+ if (!fchan->fdesc->iscyclic) {
+ list_del(&fchan->fdesc->vdesc.node);
+ vchan_cookie_complete(&fchan->fdesc->vdesc);
+ fchan->fdesc = NULL;
+ fchan->status = DMA_COMPLETE;
+ } else {
+ vchan_cyclic_callback(&fchan->fdesc->vdesc);
+ }
+
+ /* Start the next descriptor (if available) */
+ if (!fchan->fdesc)
+ st_fdma_xfer_desc(fchan);
+ }
+
+ spin_unlock(&fchan->vchan.lock);
+ ret = IRQ_HANDLED;
+ }
+
+ fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
+
+ return ret;
+}
+
+static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct st_fdma_dev *fdev = ofdma->of_dma_data;
+ struct dma_chan *chan;
+ struct st_fdma_chan *fchan;
+ int ret;
+
+ if (dma_spec->args_count < 1)
+ return ERR_PTR(-EINVAL);
+
+ if (fdev->dma_device.dev->of_node != dma_spec->np)
+ return ERR_PTR(-EINVAL);
+
+ ret = rproc_boot(fdev->slim_rproc->rproc);
+ if (ret == -ENOENT)
+ return ERR_PTR(-EPROBE_DEFER);
+ else if (ret)
+ return ERR_PTR(ret);
+
+ chan = dma_get_any_slave_channel(&fdev->dma_device);
+ if (!chan)
+ goto err_chan;
+
+ fchan = to_st_fdma_chan(chan);
+
+ fchan->cfg.of_node = dma_spec->np;
+ fchan->cfg.req_line = dma_spec->args[0];
+ fchan->cfg.req_ctrl = 0;
+ fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
+
+ if (dma_spec->args_count > 1)
+ fchan->cfg.req_ctrl = dma_spec->args[1]
+ & FDMA_REQ_CTRL_CFG_MASK;
+
+ if (dma_spec->args_count > 2)
+ fchan->cfg.type = dma_spec->args[2];
+
+ if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
+ fchan->dreq_line = 0;
+ } else {
+ fchan->dreq_line = st_fdma_dreq_get(fchan);
+ if (IS_ERR_VALUE(fchan->dreq_line)) {
+ chan = ERR_PTR(fchan->dreq_line);
+ goto err_chan;
+ }
+ }
+
+ dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
+ fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
+
+ return chan;
+
+err_chan:
+ rproc_shutdown(fdev->slim_rproc->rproc);
+ return chan;
+
+}
+
+static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct st_fdma_desc *fdesc;
+ int i;
+
+ fdesc = to_st_fdma_desc(vdesc);
+ for (i = 0; i < fdesc->n_nodes; i++)
+ dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
+ fdesc->node[i].pdesc);
+ kfree(fdesc);
+}
+
+static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
+ int sg_len)
+{
+ struct st_fdma_desc *fdesc;
+ int i;
+
+ fdesc = kzalloc(sizeof(*fdesc) +
+ sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
+ if (!fdesc)
+ return NULL;
+
+ fdesc->fchan = fchan;
+ fdesc->n_nodes = sg_len;
+ for (i = 0; i < sg_len; i++) {
+ fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
+ GFP_NOWAIT, &fdesc->node[i].pdesc);
+ if (!fdesc->node[i].desc)
+ goto err;
+ }
+ return fdesc;
+
+err:
+ while (--i >= 0)
+ dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
+ fdesc->node[i].pdesc);
+ kfree(fdesc);
+ return NULL;
+}
+
+static int st_fdma_alloc_chan_res(struct dma_chan *chan)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+
+ /* Create the dma pool for descriptor allocation */
+ fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
+ fchan->fdev->dev,
+ sizeof(struct st_fdma_hw_node),
+ __alignof__(struct st_fdma_hw_node),
+ 0);
+
+ if (!fchan->node_pool) {
+ dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
+ return -ENOMEM;
+ }
+
+ dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
+ fchan->vchan.chan.chan_id, fchan->cfg.type);
+
+ return 0;
+}
+
+static void st_fdma_free_chan_res(struct dma_chan *chan)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
+ unsigned long flags;
+
+ LIST_HEAD(head);
+
+ dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
+ __func__, fchan->vchan.chan.chan_id);
+
+ if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
+ st_fdma_dreq_put(fchan);
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ fchan->fdesc = NULL;
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+ dma_pool_destroy(fchan->node_pool);
+ fchan->node_pool = NULL;
+ memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
+
+ rproc_shutdown(rproc);
+}
+
+static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct st_fdma_chan *fchan;
+ struct st_fdma_desc *fdesc;
+ struct st_fdma_hw_node *hw_node;
+
+ if (!len)
+ return NULL;
+
+ fchan = to_st_fdma_chan(chan);
+
+ /* We only require a single descriptor */
+ fdesc = st_fdma_alloc_desc(fchan, 1);
+ if (!fdesc) {
+ dev_err(fchan->fdev->dev, "no memory for desc\n");
+ return NULL;
+ }
+
+ hw_node = fdesc->node[0].desc;
+ hw_node->next = 0;
+ hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
+ hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
+ hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
+ hw_node->control |= FDMA_NODE_CTRL_INT_EON;
+ hw_node->nbytes = len;
+ hw_node->saddr = src;
+ hw_node->daddr = dst;
+ hw_node->generic.length = len;
+ hw_node->generic.sstride = 0;
+ hw_node->generic.dstride = 0;
+
+ return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
+}
+
+static int config_reqctrl(struct st_fdma_chan *fchan,
+ enum dma_transfer_direction direction)
+{
+ u32 maxburst = 0, addr = 0;
+ enum dma_slave_buswidth width;
+ int ch_id = fchan->vchan.chan.chan_id;
+ struct st_fdma_dev *fdev = fchan->fdev;
+
+ switch (direction) {
+
+ case DMA_DEV_TO_MEM:
+ fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
+ maxburst = fchan->scfg.src_maxburst;
+ width = fchan->scfg.src_addr_width;
+ addr = fchan->scfg.src_addr;
+ break;
+
+ case DMA_MEM_TO_DEV:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
+ maxburst = fchan->scfg.dst_maxburst;
+ width = fchan->scfg.dst_addr_width;
+ addr = fchan->scfg.dst_addr;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
+
+ switch (width) {
+
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
+ fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
+ dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
+
+ fchan->cfg.dev_addr = addr;
+ fchan->cfg.dir = direction;
+
+ dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
+ ch_id, addr, fchan->cfg.req_ctrl);
+
+ return 0;
+}
+
+static void fill_hw_node(struct st_fdma_hw_node *hw_node,
+ struct st_fdma_chan *fchan,
+ enum dma_transfer_direction direction)
+{
+ if (direction == DMA_MEM_TO_DEV) {
+ hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
+ hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
+ hw_node->daddr = fchan->cfg.dev_addr;
+ } else {
+ hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
+ hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
+ hw_node->saddr = fchan->cfg.dev_addr;
+ }
+
+ hw_node->generic.sstride = 0;
+ hw_node->generic.dstride = 0;
+}
+
+static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
+ size_t len, enum dma_transfer_direction direction)
+{
+ struct st_fdma_chan *fchan;
+
+ if (!chan || !len)
+ return NULL;
+
+ fchan = to_st_fdma_chan(chan);
+
+ if (!is_slave_direction(direction)) {
+ dev_err(fchan->fdev->dev, "bad direction?\n");
+ return NULL;
+ }
+
+ return fchan;
+}
+
+static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct st_fdma_chan *fchan;
+ struct st_fdma_desc *fdesc;
+ int sg_len, i;
+
+ fchan = st_fdma_prep_common(chan, len, direction);
+ if (!fchan)
+ return NULL;
+
+ if (!period_len)
+ return NULL;
+
+ if (config_reqctrl(fchan, direction)) {
+ dev_err(fchan->fdev->dev, "bad width or direction\n");
+ return NULL;
+ }
+
+ /* the buffer length must be a multiple of period_len */
+ if (len % period_len != 0) {
+ dev_err(fchan->fdev->dev, "len is not multiple of period\n");
+ return NULL;
+ }
+
+ sg_len = len / period_len;
+ fdesc = st_fdma_alloc_desc(fchan, sg_len);
+ if (!fdesc) {
+ dev_err(fchan->fdev->dev, "no memory for desc\n");
+ return NULL;
+ }
+
+ fdesc->iscyclic = true;
+
+ for (i = 0; i < sg_len; i++) {
+ struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
+
+ hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
+
+ hw_node->control =
+ FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
+ hw_node->control |= FDMA_NODE_CTRL_INT_EON;
+
+ fill_hw_node(hw_node, fchan, direction);
+
+ if (direction == DMA_MEM_TO_DEV)
+ hw_node->saddr = buf_addr + (i * period_len);
+ else
+ hw_node->daddr = buf_addr + (i * period_len);
+
+ hw_node->nbytes = period_len;
+ hw_node->generic.length = period_len;
+ }
+
+ return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct st_fdma_chan *fchan;
+ struct st_fdma_desc *fdesc;
+ struct st_fdma_hw_node *hw_node;
+ struct scatterlist *sg;
+ int i;
+
+ fchan = st_fdma_prep_common(chan, sg_len, direction);
+ if (!fchan)
+ return NULL;
+
+ if (!sgl)
+ return NULL;
+
+ fdesc = st_fdma_alloc_desc(fchan, sg_len);
+ if (!fdesc) {
+ dev_err(fchan->fdev->dev, "no memory for desc\n");
+ return NULL;
+ }
+
+ fdesc->iscyclic = false;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ hw_node = fdesc->node[i].desc;
+
+ hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
+ hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
+
+ fill_hw_node(hw_node, fchan, direction);
+
+ if (direction == DMA_MEM_TO_DEV)
+ hw_node->saddr = sg_dma_address(sg);
+ else
+ hw_node->daddr = sg_dma_address(sg);
+
+ hw_node->nbytes = sg_dma_len(sg);
+ hw_node->generic.length = sg_dma_len(sg);
+ }
+
+ /* interrupt at end of last node */
+ hw_node->control |= FDMA_NODE_CTRL_INT_EON;
+
+ return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
+}
+
+static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
+ struct virt_dma_desc *vdesc,
+ bool in_progress)
+{
+ struct st_fdma_desc *fdesc = fchan->fdesc;
+ size_t residue = 0;
+ dma_addr_t cur_addr = 0;
+ int i;
+
+ if (in_progress) {
+ cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
+ cur_addr &= FDMA_CH_CMD_DATA_MASK;
+ }
+
+ for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
+ if (cur_addr == fdesc->node[i].pdesc) {
+ residue += fnode_read(fchan, FDMA_CNTN_OFST);
+ break;
+ }
+ residue += fdesc->node[i].desc->nbytes;
+ }
+
+ return residue;
+}
+
+static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ vd = vchan_find_desc(&fchan->vchan, cookie);
+ if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
+ txstate->residue = st_fdma_desc_residue(fchan, vd, true);
+ else if (vd)
+ txstate->residue = st_fdma_desc_residue(fchan, vd, false);
+ else
+ txstate->residue = 0;
+
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+ return ret;
+}
+
+static void st_fdma_issue_pending(struct dma_chan *chan)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
+ st_fdma_xfer_desc(fchan);
+
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+}
+
+static int st_fdma_pause(struct dma_chan *chan)
+{
+ unsigned long flags;
+ LIST_HEAD(head);
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ int ch_id = fchan->vchan.chan.chan_id;
+ unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
+
+ dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ if (fchan->fdesc)
+ fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int st_fdma_resume(struct dma_chan *chan)
+{
+ unsigned long flags;
+ unsigned long val;
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ int ch_id = fchan->vchan.chan.chan_id;
+
+ dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ if (fchan->fdesc) {
+ val = fchan_read(fchan, FDMA_CH_CMD_OFST);
+ val &= FDMA_CH_CMD_DATA_MASK;
+ fchan_write(fchan, val, FDMA_CH_CMD_OFST);
+ }
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int st_fdma_terminate_all(struct dma_chan *chan)
+{
+ unsigned long flags;
+ LIST_HEAD(head);
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+ int ch_id = fchan->vchan.chan.chan_id;
+ unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
+
+ dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
+
+ spin_lock_irqsave(&fchan->vchan.lock, flags);
+ fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
+ fchan->fdesc = NULL;
+ vchan_get_all_descriptors(&fchan->vchan, &head);
+ spin_unlock_irqrestore(&fchan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fchan->vchan, &head);
+
+ return 0;
+}
+
+static int st_fdma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *slave_cfg)
+{
+ struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
+
+ memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
+ return 0;
+}
+
+static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
+ .name = "STiH407",
+ .id = 0,
+};
+
+static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
+ .name = "STiH407",
+ .id = 1,
+};
+
+static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
+ .name = "STiH407",
+ .id = 2,
+};
+
+static const struct of_device_id st_fdma_match[] = {
+ { .compatible = "st,stih407-fdma-mpe31-11"
+ , .data = &fdma_mpe31_stih407_11 },
+ { .compatible = "st,stih407-fdma-mpe31-12"
+ , .data = &fdma_mpe31_stih407_12 },
+ { .compatible = "st,stih407-fdma-mpe31-13"
+ , .data = &fdma_mpe31_stih407_13 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_fdma_match);
+
+static int st_fdma_parse_dt(struct platform_device *pdev,
+ const struct st_fdma_driverdata *drvdata,
+ struct st_fdma_dev *fdev)
+{
+ snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
+ drvdata->name, drvdata->id);
+
+ return of_property_read_u32(pdev->dev.of_node, "dma-channels",
+ &fdev->nr_channels);
+}
+#define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static void st_fdma_free(struct st_fdma_dev *fdev)
+{
+ struct st_fdma_chan *fchan;
+ int i;
+
+ for (i = 0; i < fdev->nr_channels; i++) {
+ fchan = &fdev->chans[i];
+ list_del(&fchan->vchan.chan.device_node);
+ tasklet_kill(&fchan->vchan.task);
+ }
+}
+
+static int st_fdma_probe(struct platform_device *pdev)
+{
+ struct st_fdma_dev *fdev;
+ const struct of_device_id *match;
+ struct device_node *np = pdev->dev.of_node;
+ const struct st_fdma_driverdata *drvdata;
+ int ret, i;
+
+ match = of_match_device((st_fdma_match), &pdev->dev);
+ if (!match || !match->data) {
+ dev_err(&pdev->dev, "No device match found\n");
+ return -ENODEV;
+ }
+
+ drvdata = match->data;
+
+ fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
+ if (!fdev)
+ return -ENOMEM;
+
+ ret = st_fdma_parse_dt(pdev, drvdata, fdev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to find platform data\n");
+ goto err;
+ }
+
+ fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
+ sizeof(struct st_fdma_chan), GFP_KERNEL);
+ if (!fdev->chans)
+ return -ENOMEM;
+
+ fdev->dev = &pdev->dev;
+ fdev->drvdata = drvdata;
+ platform_set_drvdata(pdev, fdev);
+
+ fdev->irq = platform_get_irq(pdev, 0);
+ if (fdev->irq < 0) {
+ dev_err(&pdev->dev, "Failed to get irq resource\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
+ dev_name(&pdev->dev), fdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
+ goto err;
+ }
+
+ fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
+ if (IS_ERR(fdev->slim_rproc)) {
+ ret = PTR_ERR(fdev->slim_rproc);
+ dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
+ goto err;
+ }
+
+ /* Initialise list of FDMA channels */
+ INIT_LIST_HEAD(&fdev->dma_device.channels);
+ for (i = 0; i < fdev->nr_channels; i++) {
+ struct st_fdma_chan *fchan = &fdev->chans[i];
+
+ fchan->fdev = fdev;
+ fchan->vchan.desc_free = st_fdma_free_desc;
+ vchan_init(&fchan->vchan, &fdev->dma_device);
+ }
+
+ /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
+ fdev->dreq_mask = BIT(0) | BIT(31);
+
+ dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
+ dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
+
+ fdev->dma_device.dev = &pdev->dev;
+ fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
+ fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
+ fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
+ fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
+ fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
+ fdev->dma_device.device_tx_status = st_fdma_tx_status;
+ fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
+ fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
+ fdev->dma_device.device_config = st_fdma_slave_config;
+ fdev->dma_device.device_pause = st_fdma_pause;
+ fdev->dma_device.device_resume = st_fdma_resume;
+
+ fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
+ fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
+ fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ ret = dma_async_device_register(&fdev->dma_device);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to register DMA device (%d)\n", ret);
+ goto err_rproc;
+ }
+
+ ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to register controller (%d)\n", ret);
+ goto err_dma_dev;
+ }
+
+ dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
+
+ return 0;
+
+err_dma_dev:
+ dma_async_device_unregister(&fdev->dma_device);
+err_rproc:
+ st_fdma_free(fdev);
+ st_slim_rproc_put(fdev->slim_rproc);
+err:
+ return ret;
+}
+
+static int st_fdma_remove(struct platform_device *pdev)
+{
+ struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
+
+ devm_free_irq(&pdev->dev, fdev->irq, fdev);
+ st_slim_rproc_put(fdev->slim_rproc);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&fdev->dma_device);
+
+ return 0;
+}
+
+static struct platform_driver st_fdma_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = st_fdma_match,
+ },
+ .probe = st_fdma_probe,
+ .remove = st_fdma_remove,
+};
+module_platform_driver(st_fdma_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
+MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_ALIAS("platform: " DRIVER_NAME);
diff --git a/drivers/dma/st_fdma.h b/drivers/dma/st_fdma.h
new file mode 100644
index 000000000000..c58e00d4ab37
--- /dev/null
+++ b/drivers/dma/st_fdma.h
@@ -0,0 +1,249 @@
+/*
+ * DMA driver header for STMicroelectronics STi FDMA controller
+ *
+ * Copyright (C) 2014 STMicroelectronics
+ *
+ * Author: Ludovic Barre <Ludovic.barre@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __DMA_ST_FDMA_H
+#define __DMA_ST_FDMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/io.h>
+#include <linux/remoteproc/st_slim_rproc.h>
+#include "virt-dma.h"
+
+#define ST_FDMA_NR_DREQS 32
+#define FW_NAME_SIZE 30
+#define DRIVER_NAME "st-fdma"
+
+/**
+ * struct st_fdma_generic_node - Free running/paced generic node
+ *
+ * @length: Length in bytes of a line in a 2D mem to mem
+ * @sstride: Stride, in bytes, between source lines in a 2D data move
+ * @dstride: Stride, in bytes, between destination lines in a 2D data move
+ */
+struct st_fdma_generic_node {
+ u32 length;
+ u32 sstride;
+ u32 dstride;
+};
+
+/**
+ * struct st_fdma_hw_node - Node structure used by fdma hw
+ *
+ * @next: Pointer to next node
+ * @control: Transfer Control Parameters
+ * @nbytes: Number of Bytes to read
+ * @saddr: Source address
+ * @daddr: Destination address
+ *
+ * @generic: generic node for free running/paced transfert type
+ * 2 others transfert type are possible, but not yet implemented
+ *
+ * The NODE structures must be aligned to a 32 byte boundary
+ */
+struct st_fdma_hw_node {
+ u32 next;
+ u32 control;
+ u32 nbytes;
+ u32 saddr;
+ u32 daddr;
+ union {
+ struct st_fdma_generic_node generic;
+ };
+} __aligned(32);
+
+/*
+ * node control parameters
+ */
+#define FDMA_NODE_CTRL_REQ_MAP_MASK GENMASK(4, 0)
+#define FDMA_NODE_CTRL_REQ_MAP_FREE_RUN 0x0
+#define FDMA_NODE_CTRL_REQ_MAP_DREQ(n) ((n)&FDMA_NODE_CTRL_REQ_MAP_MASK)
+#define FDMA_NODE_CTRL_REQ_MAP_EXT FDMA_NODE_CTRL_REQ_MAP_MASK
+#define FDMA_NODE_CTRL_SRC_MASK GENMASK(6, 5)
+#define FDMA_NODE_CTRL_SRC_STATIC BIT(5)
+#define FDMA_NODE_CTRL_SRC_INCR BIT(6)
+#define FDMA_NODE_CTRL_DST_MASK GENMASK(8, 7)
+#define FDMA_NODE_CTRL_DST_STATIC BIT(7)
+#define FDMA_NODE_CTRL_DST_INCR BIT(8)
+#define FDMA_NODE_CTRL_SECURE BIT(15)
+#define FDMA_NODE_CTRL_PAUSE_EON BIT(30)
+#define FDMA_NODE_CTRL_INT_EON BIT(31)
+
+/**
+ * struct st_fdma_sw_node - descriptor structure for link list
+ *
+ * @pdesc: Physical address of desc
+ * @node: link used for putting this into a channel queue
+ */
+struct st_fdma_sw_node {
+ dma_addr_t pdesc;
+ struct st_fdma_hw_node *desc;
+};
+
+#define NAME_SZ 10
+
+struct st_fdma_driverdata {
+ u32 id;
+ char name[NAME_SZ];
+};
+
+struct st_fdma_desc {
+ struct virt_dma_desc vdesc;
+ struct st_fdma_chan *fchan;
+ bool iscyclic;
+ unsigned int n_nodes;
+ struct st_fdma_sw_node node[];
+};
+
+enum st_fdma_type {
+ ST_FDMA_TYPE_FREE_RUN,
+ ST_FDMA_TYPE_PACED,
+};
+
+struct st_fdma_cfg {
+ struct device_node *of_node;
+ enum st_fdma_type type;
+ dma_addr_t dev_addr;
+ enum dma_transfer_direction dir;
+ int req_line; /* request line */
+ long req_ctrl; /* Request control */
+};
+
+struct st_fdma_chan {
+ struct st_fdma_dev *fdev;
+ struct dma_pool *node_pool;
+ struct dma_slave_config scfg;
+ struct st_fdma_cfg cfg;
+
+ int dreq_line;
+
+ struct virt_dma_chan vchan;
+ struct st_fdma_desc *fdesc;
+ enum dma_status status;
+};
+
+struct st_fdma_dev {
+ struct device *dev;
+ const struct st_fdma_driverdata *drvdata;
+ struct dma_device dma_device;
+
+ struct st_slim_rproc *slim_rproc;
+
+ int irq;
+
+ struct st_fdma_chan *chans;
+
+ spinlock_t dreq_lock;
+ unsigned long dreq_mask;
+
+ u32 nr_channels;
+ char fw_name[FW_NAME_SIZE];
+};
+
+/* Peripheral Registers*/
+
+#define FDMA_CMD_STA_OFST 0xFC0
+#define FDMA_CMD_SET_OFST 0xFC4
+#define FDMA_CMD_CLR_OFST 0xFC8
+#define FDMA_CMD_MASK_OFST 0xFCC
+#define FDMA_CMD_START(ch) (0x1 << (ch << 1))
+#define FDMA_CMD_PAUSE(ch) (0x2 << (ch << 1))
+#define FDMA_CMD_FLUSH(ch) (0x3 << (ch << 1))
+
+#define FDMA_INT_STA_OFST 0xFD0
+#define FDMA_INT_STA_CH 0x1
+#define FDMA_INT_STA_ERR 0x2
+
+#define FDMA_INT_SET_OFST 0xFD4
+#define FDMA_INT_CLR_OFST 0xFD8
+#define FDMA_INT_MASK_OFST 0xFDC
+
+#define fdma_read(fdev, name) \
+ readl((fdev)->slim_rproc->peri + name)
+
+#define fdma_write(fdev, val, name) \
+ writel((val), (fdev)->slim_rproc->peri + name)
+
+/* fchan interface (dmem) */
+#define FDMA_CH_CMD_OFST 0x200
+#define FDMA_CH_CMD_STA_MASK GENMASK(1, 0)
+#define FDMA_CH_CMD_STA_IDLE (0x0)
+#define FDMA_CH_CMD_STA_START (0x1)
+#define FDMA_CH_CMD_STA_RUNNING (0x2)
+#define FDMA_CH_CMD_STA_PAUSED (0x3)
+#define FDMA_CH_CMD_ERR_MASK GENMASK(4, 2)
+#define FDMA_CH_CMD_ERR_INT (0x0 << 2)
+#define FDMA_CH_CMD_ERR_NAND (0x1 << 2)
+#define FDMA_CH_CMD_ERR_MCHI (0x2 << 2)
+#define FDMA_CH_CMD_DATA_MASK GENMASK(31, 5)
+#define fchan_read(fchan, name) \
+ readl((fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + (fchan)->vchan.chan.chan_id * 0x4 \
+ + name)
+
+#define fchan_write(fchan, val, name) \
+ writel((val), (fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + (fchan)->vchan.chan.chan_id * 0x4 \
+ + name)
+
+/* req interface */
+#define FDMA_REQ_CTRL_OFST 0x240
+#define dreq_write(fchan, val, name) \
+ writel((val), (fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + fchan->dreq_line * 0x04 \
+ + name)
+/* node interface */
+#define FDMA_NODE_SZ 128
+#define FDMA_PTRN_OFST 0x800
+#define FDMA_CNTN_OFST 0x808
+#define FDMA_SADDRN_OFST 0x80c
+#define FDMA_DADDRN_OFST 0x810
+#define fnode_read(fchan, name) \
+ readl((fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + (fchan)->vchan.chan.chan_id * FDMA_NODE_SZ \
+ + name)
+
+#define fnode_write(fchan, val, name) \
+ writel((val), (fchan)->fdev->slim_rproc->mem[ST_SLIM_DMEM].cpu_addr \
+ + (fchan)->vchan.chan.chan_id * FDMA_NODE_SZ \
+ + name)
+
+/*
+ * request control bits
+ */
+#define FDMA_REQ_CTRL_NUM_OPS_MASK GENMASK(31, 24)
+#define FDMA_REQ_CTRL_NUM_OPS(n) (FDMA_REQ_CTRL_NUM_OPS_MASK & \
+ ((n) << 24))
+#define FDMA_REQ_CTRL_INITIATOR_MASK BIT(22)
+#define FDMA_REQ_CTRL_INIT0 (0x0 << 22)
+#define FDMA_REQ_CTRL_INIT1 (0x1 << 22)
+#define FDMA_REQ_CTRL_INC_ADDR_ON BIT(21)
+#define FDMA_REQ_CTRL_DATA_SWAP_ON BIT(17)
+#define FDMA_REQ_CTRL_WNR BIT(14)
+#define FDMA_REQ_CTRL_OPCODE_MASK GENMASK(7, 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST1 (0x0 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST2 (0x1 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST4 (0x2 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST8 (0x3 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST16 (0x4 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST32 (0x5 << 4)
+#define FDMA_REQ_CTRL_OPCODE_LD_ST64 (0x6 << 4)
+#define FDMA_REQ_CTRL_HOLDOFF_MASK GENMASK(2, 0)
+#define FDMA_REQ_CTRL_HOLDOFF(n) ((n) & FDMA_REQ_CTRL_HOLDOFF_MASK)
+
+/* bits used by client to configure request control */
+#define FDMA_REQ_CTRL_CFG_MASK (FDMA_REQ_CTRL_HOLDOFF_MASK | \
+ FDMA_REQ_CTRL_DATA_SWAP_ON | \
+ FDMA_REQ_CTRL_INC_ADDR_ON | \
+ FDMA_REQ_CTRL_INITIATOR_MASK)
+
+#endif /* __DMA_ST_FDMA_H */
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 307547f4848d..3688d0873a3e 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -527,13 +527,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
{
struct stm32_dma_chan *chan = devid;
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
- u32 status, scr, sfcr;
+ u32 status, scr;
spin_lock(&chan->vchan.lock);
status = stm32_dma_irq_status(chan);
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
- sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
@@ -574,15 +573,12 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
int src_bus_width, dst_bus_width;
int src_burst_size, dst_burst_size;
u32 src_maxburst, dst_maxburst;
- dma_addr_t src_addr, dst_addr;
u32 dma_scr = 0;
src_addr_width = chan->dma_sconfig.src_addr_width;
dst_addr_width = chan->dma_sconfig.dst_addr_width;
src_maxburst = chan->dma_sconfig.src_maxburst;
dst_maxburst = chan->dma_sconfig.dst_maxburst;
- src_addr = chan->dma_sconfig.src_addr;
- dst_addr = chan->dma_sconfig.dst_addr;
switch (direction) {
case DMA_MEM_TO_DEV:
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 83461994e418..a2358780ab2c 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -578,7 +578,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
burst = convert_burst(8);
width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
- v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+ v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_DST_LINEAR_MODE |
DMA_CHAN_CFG_SRC_LINEAR_MODE |
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 245d759d5ffc..380276d078b2 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -435,13 +435,12 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
if (!ds)
return NULL;
- ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
if (!ds->desc_hw) {
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
kfree(ds);
return NULL;
}
- memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num);
ds->desc_num = num;
return ds;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 58d3e2b39b5b..6421cc3c7dc1 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -153,13 +153,17 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
if (count == 3) {
edac_printk(KERN_ALERT, EDAC_MC,
"Inject Double bit error\n");
+ local_irq_disable();
regmap_write(drvdata->mc_vbase, priv->ce_ue_trgr_offset,
(read_reg | priv->ue_set_mask));
+ local_irq_enable();
} else {
edac_printk(KERN_ALERT, EDAC_MC,
"Inject Single bit error\n");
+ local_irq_disable();
regmap_write(drvdata->mc_vbase, priv->ce_ue_trgr_offset,
(read_reg | priv->ce_set_mask));
+ local_irq_enable();
}
ptemp[0] = 0x5A5A5A5A;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ee181c53626f..260251177830 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -164,8 +164,23 @@ static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
* other archs, we might not have access to the caches directly.
*/
+static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
+{
+ /*
+ * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
+ * are shifted down by 0x5, so scrubval 0x5 is written to the register
+ * as 0x0, scrubval 0x6 as 0x1, etc.
+ */
+ if (scrubval >= 0x5 && scrubval <= 0x14) {
+ scrubval -= 0x5;
+ pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
+ pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
+ } else {
+ pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
+ }
+}
/*
- * scan the scrub rate mapping table for a close or matching bandwidth value to
+ * Scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
*/
static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
@@ -196,7 +211,9 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x15 && pvt->model == 0x60) {
+ if (pvt->fam == 0x17) {
+ __f17h_set_scrubval(pvt, scrubval);
+ } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
f15h_select_dct(pvt, 1);
@@ -233,18 +250,34 @@ static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
static int get_scrub_rate(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u32 scrubval = 0;
int i, retval = -EINVAL;
+ u32 scrubval = 0;
- if (pvt->fam == 0x15) {
+ switch (pvt->fam) {
+ case 0x15:
/* Erratum #505 */
if (pvt->model < 0x10)
f15h_select_dct(pvt, 0);
if (pvt->model == 0x60)
amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
- } else
+ break;
+
+ case 0x17:
+ amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
+ if (scrubval & BIT(0)) {
+ amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
+ scrubval &= 0xF;
+ scrubval += 0x5;
+ } else {
+ scrubval = 0;
+ }
+ break;
+
+ default:
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
+ break;
+ }
scrubval = scrubval & 0x001F;
@@ -682,15 +715,33 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
*/
static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
{
- u8 bit;
unsigned long edac_cap = EDAC_FLAG_NONE;
+ u8 bit;
- bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
- ? 19
- : 17;
+ if (pvt->umc) {
+ u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
- if (pvt->dclr0 & BIT(bit))
- edac_cap = EDAC_FLAG_SECDED;
+ for (i = 0; i < NUM_UMCS; i++) {
+ if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
+ continue;
+
+ umc_en_mask |= BIT(i);
+
+ /* UMC Configuration bit 12 (DimmEccEn) */
+ if (pvt->umc[i].umc_cfg & BIT(12))
+ dimm_ecc_en_mask |= BIT(i);
+ }
+
+ if (umc_en_mask == dimm_ecc_en_mask)
+ edac_cap = EDAC_FLAG_SECDED;
+ } else {
+ bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
+ ? 19
+ : 17;
+
+ if (pvt->dclr0 & BIT(bit))
+ edac_cap = EDAC_FLAG_SECDED;
+ }
return edac_cap;
}
@@ -729,8 +780,75 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
(dclr & BIT(15)) ? "yes" : "no");
}
+static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
+{
+ u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+ int dimm, size0, size1;
+
+ edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
+
+ for (dimm = 0; dimm < 4; dimm++) {
+ size0 = 0;
+
+ if (dcsb[dimm*2] & DCSB_CS_ENABLE)
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm);
+
+ size1 = 0;
+ if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm);
+
+ amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
+ dimm * 2, size0,
+ dimm * 2 + 1, size1);
+ }
+}
+
+static void __dump_misc_regs_df(struct amd64_pvt *pvt)
+{
+ struct amd64_umc *umc;
+ u32 i, tmp, umc_base;
+
+ for (i = 0; i < NUM_UMCS; i++) {
+ umc_base = get_umc_base(i);
+ umc = &pvt->umc[i];
+
+ edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
+ edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
+ edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
+ edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
+
+ amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
+ edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
+
+ amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
+ edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
+ edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
+
+ edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
+ i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
+ (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
+ edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
+ i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
+ edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
+ i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
+ edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
+ i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
+
+ if (pvt->dram_type == MEM_LRDDR4) {
+ amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
+ edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
+ i, 1 << ((tmp >> 4) & 0x3));
+ }
+
+ debug_display_dimm_sizes_df(pvt, i);
+ }
+
+ edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
+ pvt->dhar, dhar_base(pvt));
+}
+
/* Display and decode various NB registers for debug purposes. */
-static void dump_misc_regs(struct amd64_pvt *pvt)
+static void __dump_misc_regs(struct amd64_pvt *pvt)
{
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
@@ -750,8 +868,6 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
(pvt->fam == 0xf) ? k8_dhar_offset(pvt)
: f10_dhar_offset(pvt));
- edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
-
debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
@@ -760,13 +876,25 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
debug_display_dimm_sizes(pvt, 1);
- amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
-
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
}
+/* Display and decode various NB registers for debug purposes. */
+static void dump_misc_regs(struct amd64_pvt *pvt)
+{
+ if (pvt->umc)
+ __dump_misc_regs_df(pvt);
+ else
+ __dump_misc_regs(pvt);
+
+ edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+
+ amd64_info("using %s syndromes.\n",
+ ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
+}
+
/*
* See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
@@ -789,46 +917,78 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
*/
static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int cs;
+ int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
prep_chip_selects(pvt);
+ if (pvt->umc) {
+ base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
+ base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
+ mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
+ mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
+ } else {
+ base_reg0 = DCSB0;
+ base_reg1 = DCSB1;
+ mask_reg0 = DCSM0;
+ mask_reg1 = DCSM1;
+ }
+
for_each_chip_select(cs, 0, pvt) {
- int reg0 = DCSB0 + (cs * 4);
- int reg1 = DCSB1 + (cs * 4);
+ int reg0 = base_reg0 + (cs * 4);
+ int reg1 = base_reg1 + (cs * 4);
u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs];
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
+ if (pvt->umc) {
+ if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
+ cs, *base0, reg0);
- if (pvt->fam == 0xf)
- continue;
+ if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
+ cs, *base1, reg1);
+ } else {
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, *base0, reg0);
+
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, (pvt->fam == 0x10) ? reg1
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, (pvt->fam == 0x10) ? reg1
: reg0);
+ }
}
for_each_chip_select_mask(cs, 0, pvt) {
- int reg0 = DCSM0 + (cs * 4);
- int reg1 = DCSM1 + (cs * 4);
+ int reg0 = mask_reg0 + (cs * 4);
+ int reg1 = mask_reg1 + (cs * 4);
u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs];
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
+ if (pvt->umc) {
+ if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
+ cs, *mask0, reg0);
- if (pvt->fam == 0xf)
- continue;
+ if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
+ cs, *mask1, reg1);
+ } else {
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask0, reg0);
+
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, (pvt->fam == 0x10) ? reg1
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, (pvt->fam == 0x10) ? reg1
: reg0);
+ }
}
}
@@ -881,6 +1041,15 @@ static void determine_memory_type(struct amd64_pvt *pvt)
case 0x16:
goto ddr3;
+ case 0x17:
+ if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
+ pvt->dram_type = MEM_LRDDR4;
+ else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
+ pvt->dram_type = MEM_RDDR4;
+ else
+ pvt->dram_type = MEM_DDR4;
+ return;
+
default:
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
@@ -1210,6 +1379,19 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
return channels;
}
+static int f17_early_channel_count(struct amd64_pvt *pvt)
+{
+ int i, channels = 0;
+
+ /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
+ for (i = 0; i < NUM_UMCS; i++)
+ channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
+
+ amd64_info("MCT channel count: %d\n", channels);
+
+ return channels;
+}
+
static int ddr3_cs_size(unsigned i, bool dct_width)
{
unsigned shift = 0;
@@ -1337,6 +1519,23 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
return ddr3_cs_size(cs_mode, false);
}
+static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+ unsigned int cs_mode, int csrow_nr)
+{
+ u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
+
+ /* Each mask is used for every two base addresses. */
+ u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
+
+ /* Register [31:1] = Address [39:9]. Size is in kBs here. */
+ u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
+
+ edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
+
+ /* Return size in MBs. */
+ return size >> 10;
+}
+
static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
@@ -1897,8 +2096,9 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
size0 = 0;
if (dcsb[dimm*2] & DCSB_CS_ENABLE)
- /* For f15m60h, need multiplier for LRDIMM cs_size
- * calculation. We pass 'dimm' value to the dbam_to_cs
+ /*
+ * For F15m60h, we need multiplier for LRDIMM cs_size
+ * calculation. We pass dimm value to the dbam_to_cs
* mapper so we can find the multiplier from the
* corresponding DCSM.
*/
@@ -1989,6 +2189,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f16_dbam_to_chip_select,
}
},
+ [F17_CPUS] = {
+ .ctl_name = "F17h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -2155,7 +2364,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
}
-static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
+static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
u8 ecc_type)
{
enum hw_event_mc_err_type err_type;
@@ -2165,6 +2374,8 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
err_type = HW_EVENT_ERR_CORRECTED;
else if (ecc_type == 1)
err_type = HW_EVENT_ERR_UNCORRECTED;
+ else if (ecc_type == 3)
+ err_type = HW_EVENT_ERR_DEFERRED;
else {
WARN(1, "Something is rotten in the state of Denmark.\n");
return;
@@ -2181,7 +2392,13 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
string = "Failed to map error addr to a csrow";
break;
case ERR_CHANNEL:
- string = "unknown syndrome - possible error reporting race";
+ string = "Unknown syndrome - possible error reporting race";
+ break;
+ case ERR_SYND:
+ string = "MCA_SYND not valid - unknown syndrome and csrow";
+ break;
+ case ERR_NORM_ADDR:
+ string = "Cannot decode normalized address";
break;
default:
string = "WTF error";
@@ -2227,36 +2444,127 @@ static inline void decode_bus_error(int node_id, struct mce *m)
pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
- __log_bus_error(mci, &err, ecc_type);
+ __log_ecc_error(mci, &err, ecc_type);
+}
+
+/*
+ * To find the UMC channel represented by this bank we need to match on its
+ * instance_id. The instance_id of a bank is held in the lower 32 bits of its
+ * IPID.
+ */
+static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
+{
+ u32 umc_instance_id[] = {0x50f00, 0x150f00};
+ u32 instance_id = m->ipid & GENMASK(31, 0);
+ int i, channel = -1;
+
+ for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
+ if (umc_instance_id[i] == instance_id)
+ channel = i;
+
+ return channel;
+}
+
+static void decode_umc_error(int node_id, struct mce *m)
+{
+ u8 ecc_type = (m->status >> 45) & 0x3;
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
+ struct err_info err;
+ u64 sys_addr;
+
+ mci = edac_mc_find(node_id);
+ if (!mci)
+ return;
+
+ pvt = mci->pvt_info;
+
+ memset(&err, 0, sizeof(err));
+
+ if (m->status & MCI_STATUS_DEFERRED)
+ ecc_type = 3;
+
+ err.channel = find_umc_channel(pvt, m);
+ if (err.channel < 0) {
+ err.err_code = ERR_CHANNEL;
+ goto log_error;
+ }
+
+ if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+ err.err_code = ERR_NORM_ADDR;
+ goto log_error;
+ }
+
+ error_address_to_page_and_offset(sys_addr, &err);
+
+ if (!(m->status & MCI_STATUS_SYNDV)) {
+ err.err_code = ERR_SYND;
+ goto log_error;
+ }
+
+ if (ecc_type == 2) {
+ u8 length = (m->synd >> 18) & 0x3f;
+
+ if (length)
+ err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
+ else
+ err.err_code = ERR_CHANNEL;
+ }
+
+ err.csrow = m->synd & 0x7;
+
+log_error:
+ __log_ecc_error(mci, &err, ecc_type);
}
/*
* Use pvt->F3 which contains the F3 CPU PCI device to get the related
* F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
+ * Reserve F0 and F6 on systems with a UMC.
*/
-static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
-{
+static int
+reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
+{
+ if (pvt->umc) {
+ pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
+ if (!pvt->F0) {
+ amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
+ return -ENODEV;
+ }
+
+ pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
+ if (!pvt->F6) {
+ pci_dev_put(pvt->F0);
+ pvt->F0 = NULL;
+
+ amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
+ return -ENODEV;
+ }
+
+ edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
+ edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
+ edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
+
+ return 0;
+ }
+
/* Reserve the ADDRESS MAP Device */
- pvt->F1 = pci_get_related_function(pvt->F3->vendor, f1_id, pvt->F3);
+ pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F1) {
- amd64_err("error address map device not found: "
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_AMD, f1_id);
+ amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
return -ENODEV;
}
/* Reserve the DCT Device */
- pvt->F2 = pci_get_related_function(pvt->F3->vendor, f2_id, pvt->F3);
+ pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
if (!pvt->F2) {
pci_dev_put(pvt->F1);
pvt->F1 = NULL;
- amd64_err("error F2 device not found: "
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_AMD, f2_id);
-
+ amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
return -ENODEV;
}
+
edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
@@ -2266,8 +2574,69 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
static void free_mc_sibling_devs(struct amd64_pvt *pvt)
{
- pci_dev_put(pvt->F1);
- pci_dev_put(pvt->F2);
+ if (pvt->umc) {
+ pci_dev_put(pvt->F0);
+ pci_dev_put(pvt->F6);
+ } else {
+ pci_dev_put(pvt->F1);
+ pci_dev_put(pvt->F2);
+ }
+}
+
+static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
+{
+ pvt->ecc_sym_sz = 4;
+
+ if (pvt->umc) {
+ u8 i;
+
+ for (i = 0; i < NUM_UMCS; i++) {
+ /* Check enabled channels only: */
+ if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
+ (pvt->umc[i].ecc_ctrl & BIT(7))) {
+ pvt->ecc_sym_sz = 8;
+ break;
+ }
+ }
+
+ return;
+ }
+
+ if (pvt->fam >= 0x10) {
+ u32 tmp;
+
+ amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ /* F16h has only DCT0, so no need to read dbam1. */
+ if (pvt->fam != 0x16)
+ amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
+
+ /* F10h, revD and later can do x8 ECC too. */
+ if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
+ pvt->ecc_sym_sz = 8;
+ }
+}
+
+/*
+ * Retrieve the hardware registers of the memory controller.
+ */
+static void __read_mc_regs_df(struct amd64_pvt *pvt)
+{
+ u8 nid = pvt->mc_node_id;
+ struct amd64_umc *umc;
+ u32 i, umc_base;
+
+ /* Read registers from each UMC */
+ for (i = 0; i < NUM_UMCS; i++) {
+
+ umc_base = get_umc_base(i);
+ umc = &pvt->umc[i];
+
+ amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
+ amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
+ amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
+ amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
+ amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
+ }
}
/*
@@ -2276,24 +2645,31 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
{
- unsigned range;
+ unsigned int range;
u64 msr_val;
- u32 tmp;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
- * those are Read-As-Zero
+ * those are Read-As-Zero.
*/
rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
- /* check first whether TOP_MEM2 is enabled */
+ /* Check first whether TOP_MEM2 is enabled: */
rdmsrl(MSR_K8_SYSCFG, msr_val);
- if (msr_val & (1U << 21)) {
+ if (msr_val & BIT(21)) {
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
- } else
+ } else {
edac_dbg(0, " TOP_MEM2 disabled\n");
+ }
+
+ if (pvt->umc) {
+ __read_mc_regs_df(pvt);
+ amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
+
+ goto skip;
+ }
amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
@@ -2322,8 +2698,6 @@ static void read_mc_regs(struct amd64_pvt *pvt)
dram_dst_node(pvt, range));
}
- read_dct_base_mask(pvt);
-
amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
@@ -2337,20 +2711,14 @@ static void read_mc_regs(struct amd64_pvt *pvt)
amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
}
- pvt->ecc_sym_sz = 4;
+skip:
+ read_dct_base_mask(pvt);
+
determine_memory_type(pvt);
edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
- if (pvt->fam >= 0x10) {
- amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
- /* F16h has only DCT0, so no need to read dbam1 */
- if (pvt->fam != 0x16)
- amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
+ determine_ecc_sym_sz(pvt);
- /* F10h, revD and later can do x8 ECC too */
- if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
- pvt->ecc_sym_sz = 8;
- }
dump_misc_regs(pvt);
}
@@ -2420,20 +2788,22 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
static int init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
+ enum edac_type edac_mode = EDAC_NONE;
struct csrow_info *csrow;
struct dimm_info *dimm;
- enum edac_type edac_mode;
int i, j, empty = 1;
int nr_pages = 0;
u32 val;
- amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
+ if (!pvt->umc) {
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
- pvt->nbcfg = val;
+ pvt->nbcfg = val;
- edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+ }
/*
* We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
@@ -2469,14 +2839,18 @@ static int init_csrows(struct mem_ctl_info *mci)
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
- /*
- * determine whether CHIPKILL or JUST ECC or NO ECC is operating
- */
- if (pvt->nbcfg & NBCFG_ECC_ENABLE)
- edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
- EDAC_S4ECD4ED : EDAC_SECDED;
- else
- edac_mode = EDAC_NONE;
+ /* Determine DIMM ECC mode: */
+ if (pvt->umc) {
+ if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
+ edac_mode = EDAC_S4ECD4ED;
+ else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
+ edac_mode = EDAC_SECDED;
+
+ } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
+ edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
+ ? EDAC_S4ECD4ED
+ : EDAC_SECDED;
+ }
for (j = 0; j < pvt->channel_count; j++) {
dimm = csrow->channels[j]->dimm;
@@ -2539,7 +2913,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
amd64_warn("%s: error allocating mask\n", __func__);
- return false;
+ return -ENOMEM;
}
get_cpus_on_this_dct_cpumask(cmask, nid);
@@ -2627,7 +3001,6 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
{
u32 value, mask = 0x3; /* UECC/CECC enable */
-
if (!s->nbctl_valid)
return;
@@ -2663,20 +3036,50 @@ static const char *ecc_msg =
static bool ecc_enabled(struct pci_dev *F3, u16 nid)
{
- u32 value;
- u8 ecc_en = 0;
bool nb_mce_en = false;
+ u8 ecc_en = 0, i;
+ u32 value;
- amd64_read_pci_cfg(F3, NBCFG, &value);
+ if (boot_cpu_data.x86 >= 0x17) {
+ u8 umc_en_mask = 0, ecc_en_mask = 0;
- ecc_en = !!(value & NBCFG_ECC_ENABLE);
- amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
+ for (i = 0; i < NUM_UMCS; i++) {
+ u32 base = get_umc_base(i);
+
+ /* Only check enabled UMCs. */
+ if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
+ continue;
+
+ if (!(value & UMC_SDP_INIT))
+ continue;
+
+ umc_en_mask |= BIT(i);
- nb_mce_en = nb_mce_bank_enabled_on_node(nid);
- if (!nb_mce_en)
- amd64_notice("NB MCE bank disabled, set MSR "
- "0x%08x[4] on node %d to enable.\n",
- MSR_IA32_MCG_CTL, nid);
+ if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
+ continue;
+
+ if (value & UMC_ECC_ENABLED)
+ ecc_en_mask |= BIT(i);
+ }
+
+ /* Check whether at least one UMC is enabled: */
+ if (umc_en_mask)
+ ecc_en = umc_en_mask == ecc_en_mask;
+
+ /* Assume UMC MCA banks are enabled. */
+ nb_mce_en = true;
+ } else {
+ amd64_read_pci_cfg(F3, NBCFG, &value);
+
+ ecc_en = !!(value & NBCFG_ECC_ENABLE);
+
+ nb_mce_en = nb_mce_bank_enabled_on_node(nid);
+ if (!nb_mce_en)
+ amd64_notice("NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
+ MSR_IA32_MCG_CTL, nid);
+ }
+
+ amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
if (!ecc_en || !nb_mce_en) {
amd64_notice("%s", ecc_msg);
@@ -2685,6 +3088,27 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
return true;
}
+static inline void
+f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
+{
+ u8 i, ecc_en = 1, cpk_en = 1;
+
+ for (i = 0; i < NUM_UMCS; i++) {
+ if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
+ ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
+ cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+ }
+ }
+
+ /* Set chipkill only if ECC is enabled: */
+ if (ecc_en) {
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+
+ if (cpk_en)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ }
+}
+
static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
struct amd64_family_type *fam)
{
@@ -2693,17 +3117,21 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- if (pvt->nbcap & NBCAP_SECDED)
- mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+ if (pvt->umc) {
+ f17h_determine_edac_ctl_cap(mci, pvt);
+ } else {
+ if (pvt->nbcap & NBCAP_SECDED)
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (pvt->nbcap & NBCAP_CHIPKILL)
- mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ if (pvt->nbcap & NBCAP_CHIPKILL)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ }
mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
mci->ctl_name = fam->ctl_name;
- mci->dev_name = pci_name(pvt->F2);
+ mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
/* memory scrubber interface */
@@ -2759,6 +3187,11 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
pvt->ops = &family_types[F16_CPUS].ops;
break;
+ case 0x17:
+ fam_type = &family_types[F17_CPUS];
+ pvt->ops = &family_types[F17_CPUS].ops;
+ break;
+
default:
amd64_err("Unsupported family!\n");
return NULL;
@@ -2789,6 +3222,7 @@ static int init_one_instance(unsigned int nid)
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
struct amd64_pvt *pvt = NULL;
+ u16 pci_id1, pci_id2;
int err = 0, ret;
ret = -ENOMEM;
@@ -2804,10 +3238,23 @@ static int init_one_instance(unsigned int nid)
if (!fam_type)
goto err_free;
- ret = -ENODEV;
- err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f2_id);
+ if (pvt->fam >= 0x17) {
+ pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
+ if (!pvt->umc) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ pci_id1 = fam_type->f0_id;
+ pci_id2 = fam_type->f6_id;
+ } else {
+ pci_id1 = fam_type->f1_id;
+ pci_id2 = fam_type->f2_id;
+ }
+
+ err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
if (err)
- goto err_free;
+ goto err_post_init;
read_mc_regs(pvt);
@@ -2857,7 +3304,10 @@ static int init_one_instance(unsigned int nid)
if (report_gart_errors)
amd_report_gart_errors(true);
- amd_register_ecc_decoder(decode_bus_error);
+ if (pvt->umc)
+ amd_register_ecc_decoder(decode_umc_error);
+ else
+ amd_register_ecc_decoder(decode_bus_error);
return 0;
@@ -2867,6 +3317,10 @@ err_add_mc:
err_siblings:
free_mc_sibling_devs(pvt);
+err_post_init:
+ if (pvt->fam >= 0x17)
+ kfree(pvt->umc);
+
err_free:
kfree(pvt);
@@ -2893,7 +3347,11 @@ static int probe_one_instance(unsigned int nid)
if (!ecc_enable_override)
goto err_enable;
- amd64_warn("Forcing ECC on!\n");
+ if (boot_cpu_data.x86 >= 0x17) {
+ amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
+ goto err_enable;
+ } else
+ amd64_warn("Forcing ECC on!\n");
if (!enable_ecc_error_reporting(s, nid, F3))
goto err_enable;
@@ -2902,7 +3360,9 @@ static int probe_one_instance(unsigned int nid)
ret = init_one_instance(nid);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
- restore_ecc_error_reporting(s, nid, F3);
+
+ if (boot_cpu_data.x86 < 0x17)
+ restore_ecc_error_reporting(s, nid, F3);
}
return ret;
@@ -2938,7 +3398,11 @@ static void remove_one_instance(unsigned int nid)
/* unregister from EDAC MCE */
amd_report_gart_errors(false);
- amd_unregister_ecc_decoder(decode_bus_error);
+
+ if (pvt->umc)
+ amd_unregister_ecc_decoder(decode_umc_error);
+ else
+ amd_unregister_ecc_decoder(decode_bus_error);
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
@@ -2963,7 +3427,10 @@ static void setup_pci_device(void)
return;
pvt = mci->pvt_info;
- pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
+ if (pvt->umc)
+ pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
+ else
+ pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
if (!pci_ctl) {
pr_warn("%s(): Unable to create PCI control\n", __func__);
pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
@@ -2975,6 +3442,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
{ X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c08870479054..f14c24d5b140 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -30,10 +30,10 @@
edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
#define amd64_warn(fmt, arg...) \
- edac_printk(KERN_WARNING, "amd64", fmt, ##arg)
+ edac_printk(KERN_WARNING, "amd64", "Warning: " fmt, ##arg)
#define amd64_err(fmt, arg...) \
- edac_printk(KERN_ERR, "amd64", fmt, ##arg)
+ edac_printk(KERN_ERR, "amd64", "Error: " fmt, ##arg)
#define amd64_mc_warn(mci, fmt, arg...) \
edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
@@ -118,6 +118,8 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
+#define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
+#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
/*
* Function 1 - Address Map
@@ -202,6 +204,8 @@
#define DCT_SEL_HI 0x114
#define F15H_M60H_SCRCTRL 0x1C8
+#define F17H_SCR_BASE_ADDR 0x48
+#define F17H_SCR_LIMIT_ADDR 0x4C
/*
* Function 3 - Misc Control
@@ -248,6 +252,31 @@
/* MSRs */
#define MSR_MCGCTL_NBE BIT(4)
+/* F17h */
+
+/* F0: */
+#define DF_DHAR 0x104
+
+/* UMC CH register offsets */
+#define UMCCH_BASE_ADDR 0x0
+#define UMCCH_ADDR_MASK 0x20
+#define UMCCH_ADDR_CFG 0x30
+#define UMCCH_DIMM_CFG 0x80
+#define UMCCH_UMC_CFG 0x100
+#define UMCCH_SDP_CTRL 0x104
+#define UMCCH_ECC_CTRL 0x14C
+#define UMCCH_ECC_BAD_SYMBOL 0xD90
+#define UMCCH_UMC_CAP 0xDF0
+#define UMCCH_UMC_CAP_HI 0xDF4
+
+/* UMC CH bitfields */
+#define UMC_ECC_CHIPKILL_CAP BIT(31)
+#define UMC_ECC_ENABLED BIT(30)
+
+#define UMC_SDP_INIT BIT(31)
+
+#define NUM_UMCS 2
+
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
@@ -256,6 +285,7 @@ enum amd_families {
F15_M60H_CPUS,
F16_CPUS,
F16_M30H_CPUS,
+ F17_CPUS,
NUM_FAMILIES,
};
@@ -288,11 +318,19 @@ struct chip_select {
u8 m_cnt;
};
+struct amd64_umc {
+ u32 dimm_cfg; /* DIMM Configuration reg */
+ u32 umc_cfg; /* Configuration reg */
+ u32 sdp_ctrl; /* SDP Control reg */
+ u32 ecc_ctrl; /* DRAM ECC Control reg */
+ u32 umc_cap_hi; /* Capabilities High reg */
+};
+
struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */
- struct pci_dev *F1, *F2, *F3;
+ struct pci_dev *F0, *F1, *F2, *F3, *F6;
u16 mc_node_id; /* MC index of this MC node */
u8 fam; /* CPU family */
@@ -335,6 +373,8 @@ struct amd64_pvt {
/* cache the dram_type */
enum mem_type dram_type;
+
+ struct amd64_umc *umc; /* UMC registers */
};
enum err_codes {
@@ -342,6 +382,8 @@ enum err_codes {
ERR_NODE = -1,
ERR_CSROW = -2,
ERR_CHANNEL = -3,
+ ERR_SYND = -4,
+ ERR_NORM_ADDR = -5,
};
struct err_info {
@@ -354,6 +396,12 @@ struct err_info {
u32 offset;
};
+static inline u32 get_umc_base(u8 channel)
+{
+ /* ch0: 0x50000, ch1: 0x150000 */
+ return 0x50000 + (!!channel << 20);
+}
+
static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
{
u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
@@ -422,7 +470,7 @@ struct low_ops {
struct amd64_family_type {
const char *ctl_name;
- u16 f1_id, f2_id;
+ u16 f0_id, f1_id, f2_id, f6_id;
struct low_ops ops;
};
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index c3ee3ad98a63..d2ea9c4f1824 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -482,15 +482,8 @@ void edac_mc_free(struct mem_ctl_info *mci)
}
EXPORT_SYMBOL_GPL(edac_mc_free);
-
-/**
- * find_mci_by_dev
- *
- * scan list of controllers looking for the one that manages
- * the 'dev' device
- * @dev: pointer to a struct device related with the MCI
- */
-struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+/* Caller must hold mem_ctls_mutex */
+static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -506,6 +499,24 @@ struct mem_ctl_info *find_mci_by_dev(struct device *dev)
return NULL;
}
+
+/**
+ * find_mci_by_dev
+ *
+ * scan list of controllers looking for the one that manages
+ * the 'dev' device
+ * @dev: pointer to a struct device related with the MCI
+ */
+struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+{
+ struct mem_ctl_info *ret;
+
+ mutex_lock(&mem_ctls_mutex);
+ ret = __find_mci_by_dev(dev);
+ mutex_unlock(&mem_ctls_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(find_mci_by_dev);
/*
@@ -588,7 +599,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
insert_before = &mc_devices;
- p = find_mci_by_dev(mci->pdev);
+ p = __find_mci_by_dev(mci->pdev);
if (unlikely(p != NULL))
goto fail0;
@@ -640,26 +651,28 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
*
* If found, return a pointer to the structure.
* Else return NULL.
- *
- * Caller must hold mem_ctls_mutex.
*/
struct mem_ctl_info *edac_mc_find(int idx)
{
+ struct mem_ctl_info *mci = NULL;
struct list_head *item;
- struct mem_ctl_info *mci;
+
+ mutex_lock(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
if (mci->mc_idx >= idx) {
- if (mci->mc_idx == idx)
- return mci;
-
+ if (mci->mc_idx == idx) {
+ goto unlock;
+ }
break;
}
}
- return NULL;
+unlock:
+ mutex_unlock(&mem_ctls_mutex);
+ return mci;
}
EXPORT_SYMBOL(edac_mc_find);
@@ -779,7 +792,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
mutex_lock(&mem_ctls_mutex);
/* find the requested mci struct in the global list */
- mci = find_mci_by_dev(dev);
+ mci = __find_mci_by_dev(dev);
if (mci == NULL) {
mutex_unlock(&mem_ctls_mutex);
return NULL;
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index daaac2c79ca7..34208f38c5b1 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -8,7 +8,7 @@ static struct amd_decoder_ops *fam_ops;
static u8 xec_mask = 0xf;
static bool report_gart_errors;
-static void (*nb_bus_decoder)(int node_id, struct mce *m);
+static void (*decode_dram_ecc)(int node_id, struct mce *m);
void amd_report_gart_errors(bool v)
{
@@ -18,16 +18,16 @@ EXPORT_SYMBOL_GPL(amd_report_gart_errors);
void amd_register_ecc_decoder(void (*f)(int, struct mce *))
{
- nb_bus_decoder = f;
+ decode_dram_ecc = f;
}
EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
void amd_unregister_ecc_decoder(void (*f)(int, struct mce *))
{
- if (nb_bus_decoder) {
- WARN_ON(nb_bus_decoder != f);
+ if (decode_dram_ecc) {
+ WARN_ON(decode_dram_ecc != f);
- nb_bus_decoder = NULL;
+ decode_dram_ecc = NULL;
}
}
EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
@@ -763,8 +763,8 @@ static void decode_mc4_mce(struct mce *m)
pr_cont("%s.\n", mc4_mce_desc[xec]);
- if (nb_bus_decoder)
- nb_bus_decoder(node_id, m);
+ if (decode_dram_ecc)
+ decode_dram_ecc(node_id, m);
return;
}
break;
@@ -851,7 +851,7 @@ static void decode_mc6_mce(struct mce *m)
/* Decode errors according to Scalable MCA specification */
static void decode_smca_errors(struct mce *m)
{
- struct smca_hwid_mcatype *type;
+ struct smca_hwid *hwid;
unsigned int bank_type;
const char *ip_name;
u8 xec = XEC(m->status, xec_mask);
@@ -862,21 +862,28 @@ static void decode_smca_errors(struct mce *m)
if (boot_cpu_data.x86 >= 0x17 && m->bank == 4)
pr_emerg(HW_ERR "Bank 4 is reserved on Fam17h.\n");
- type = smca_banks[m->bank].type;
- if (!type)
+ hwid = smca_banks[m->bank].hwid;
+ if (!hwid)
return;
- bank_type = type->bank_type;
- ip_name = smca_bank_names[bank_type].long_name;
+ bank_type = hwid->bank_type;
+ ip_name = smca_get_long_name(bank_type);
pr_emerg(HW_ERR "%s Extended Error Code: %d\n", ip_name, xec);
/* Only print the decode of valid error codes */
if (xec < smca_mce_descs[bank_type].num_descs &&
- (type->xec_bitmap & BIT_ULL(xec))) {
+ (hwid->xec_bitmap & BIT_ULL(xec))) {
pr_emerg(HW_ERR "%s Error: ", ip_name);
pr_cont("%s.\n", smca_mce_descs[bank_type].descs[xec]);
}
+
+ /*
+ * amd_get_nb_id() returns the last level cache id.
+ * The last level cache on Fam17h is 1 level below the node.
+ */
+ if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc)
+ decode_dram_ecc(amd_get_nb_id(m->extcpu) >> 1, m);
}
static inline void amd_decode_err_code(u16 ec)
@@ -957,10 +964,13 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
- if (c->x86 >= 0x15)
- pr_cont("|%s|%s",
- ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
- ((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
+ if (c->x86 >= 0x15) {
+ pr_cont("|%s", (m->status & MCI_STATUS_DEFERRED ? "Deferred" : "-"));
+
+ /* F15h, bank4, bit 43 is part of McaStatSubCache. */
+ if (c->x86 != 0x15 || m->bank != 4)
+ pr_cont("|%s", (m->status & MCI_STATUS_POISON ? "Poison" : "-"));
+ }
if (boot_cpu_has(X86_FEATURE_SMCA)) {
u32 low, high;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index ff0567526ee3..c62602141f95 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -300,6 +300,22 @@ err:
return res;
}
+static int mpc85xx_pci_err_remove(struct platform_device *op)
+{
+ struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+
+ edac_dbg(0, "\n");
+
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, orig_pci_err_cap_dr);
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
+
+ edac_pci_del_device(&op->dev);
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
static const struct platform_device_id mpc85xx_pci_err_match[] = {
{
.name = "mpc85xx-pci-edac"
@@ -309,6 +325,7 @@ static const struct platform_device_id mpc85xx_pci_err_match[] = {
static struct platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe,
+ .remove = mpc85xx_pci_err_remove,
.id_table = mpc85xx_pci_err_match,
.driver = {
.name = "mpc85xx_pci_err",
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 54775221a01f..c1ad0eb7d5dd 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -23,6 +23,7 @@
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/mce.h>
@@ -3365,12 +3366,13 @@ fail0:
{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
static const struct x86_cpu_id sbridge_cpuids[] = {
- ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */
- ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */
- ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */
- ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */
- ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */
- ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
+ ICPU(INTEL_FAM6_IVYBRIDGE_X, pci_dev_descr_ibridge_table),
+ ICPU(INTEL_FAM6_HASWELL_X, pci_dev_descr_haswell_table),
+ ICPU(INTEL_FAM6_BROADWELL_X, pci_dev_descr_broadwell_table),
+ ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
+ ICPU(INTEL_FAM6_XEON_PHI_KNL, pci_dev_descr_knl_table),
+ ICPU(INTEL_FAM6_XEON_PHI_KNM, pci_dev_descr_knl_table),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 0ff4878c2aa1..9edcb29b3001 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -25,6 +25,7 @@
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/mce.h>
@@ -262,8 +263,8 @@ fail:
return -ENODEV;
}
-const struct x86_cpu_id skx_cpuids[] = {
- { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */
+static const struct x86_cpu_id skx_cpuids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
@@ -1036,7 +1037,7 @@ static void skx_remove(void)
* search for all the devices we need
* check which DIMMs are present.
*/
-int __init skx_init(void)
+static int __init skx_init(void)
{
const struct x86_cpu_id *id;
const struct munit *m;
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index bf19b6e3bd12..5569391ea800 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1602,16 +1602,16 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
dev_err(edac_dev->dev,
- "Mutilple IOB PA read data RAM error\n");
+ "Multiple IOB PA read data RAM error\n");
if (reg & IOBPA_WDATA_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA write data RAM error\n");
if (reg & IOBPA_M_WDATA_CORRUPT_MASK)
dev_err(edac_dev->dev,
- "Mutilple IOB PA write data RAM error\n");
+ "Multiple IOB PA write data RAM error\n");
if (reg & IOBPA_TRANS_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA transaction error\n");
if (reg & IOBPA_M_TRANS_CORRUPT_MASK)
- dev_err(edac_dev->dev, "Mutilple IOB PA transaction error\n");
+ dev_err(edac_dev->dev, "Multiple IOB PA transaction error\n");
if (reg & IOBPA_REQIDRAM_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA transaction ID RAM error\n");
if (reg & IOBPA_M_REQIDRAM_CORRUPT_MASK)
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 56e6c4c7c60d..d836d4ce5ee4 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -274,9 +274,10 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
int ret;
- ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+ ret = snd_soc_component_force_enable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
widget, ret);
@@ -284,7 +285,7 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
snd_soc_dapm_sync(dapm);
if (!arizona->pdata.micd_force_micbias) {
- ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
widget, ret);
@@ -349,6 +350,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
bool change;
int ret;
@@ -356,7 +358,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
ARIZONA_MICD_ENA, 0,
&change);
- ret = snd_soc_dapm_disable_pin(dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev,
"Failed to disable %s: %d\n",
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index a27d350f69e3..d589c5feff3d 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
@@ -36,7 +35,9 @@ struct usb_extcon_info {
struct extcon_dev *edev;
struct gpio_desc *id_gpiod;
+ struct gpio_desc *vbus_gpiod;
int id_irq;
+ int vbus_irq;
unsigned long debounce_jiffies;
struct delayed_work wq_detcable;
@@ -48,31 +49,47 @@ static const unsigned int usb_extcon_cable[] = {
EXTCON_NONE,
};
+/*
+ * "USB" = VBUS and "USB-HOST" = !ID, so we have:
+ * Both "USB" and "USB-HOST" can't be set as active at the
+ * same time so if "USB-HOST" is active (i.e. ID is 0) we keep "USB" inactive
+ * even if VBUS is on.
+ *
+ * State | ID | VBUS
+ * ----------------------------------------
+ * [1] USB | H | H
+ * [2] none | H | L
+ * [3] USB-HOST | L | H
+ * [4] USB-HOST | L | L
+ *
+ * In case we have only one of these signals:
+ * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1.
+ * - ID only - we want to distinguish between [1] and [4], so VBUS = ID.
+*/
static void usb_extcon_detect_cable(struct work_struct *work)
{
- int id;
+ int id, vbus;
struct usb_extcon_info *info = container_of(to_delayed_work(work),
struct usb_extcon_info,
wq_detcable);
- /* check ID and update cable state */
- id = gpiod_get_value_cansleep(info->id_gpiod);
- if (id) {
- /*
- * ID = 1 means USB HOST cable detached.
- * As we don't have event for USB peripheral cable attached,
- * we simulate USB peripheral attach here.
- */
+ /* check ID and VBUS and update cable state */
+ id = info->id_gpiod ?
+ gpiod_get_value_cansleep(info->id_gpiod) : 1;
+ vbus = info->vbus_gpiod ?
+ gpiod_get_value_cansleep(info->vbus_gpiod) : id;
+
+ /* at first we clean states which are no longer active */
+ if (id)
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
- extcon_set_state_sync(info->edev, EXTCON_USB, true);
- } else {
- /*
- * ID = 0 means USB HOST cable attached.
- * As we don't have event for USB peripheral cable detached,
- * we simulate USB peripheral detach here.
- */
+ if (!vbus)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
+
+ if (!id) {
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+ } else {
+ if (vbus)
+ extcon_set_state_sync(info->edev, EXTCON_USB, true);
}
}
@@ -101,12 +118,21 @@ static int usb_extcon_probe(struct platform_device *pdev)
return -ENOMEM;
info->dev = dev;
- info->id_gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
- if (IS_ERR(info->id_gpiod)) {
- dev_err(dev, "failed to get ID GPIO\n");
- return PTR_ERR(info->id_gpiod);
+ info->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", GPIOD_IN);
+ info->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+ GPIOD_IN);
+
+ if (!info->id_gpiod && !info->vbus_gpiod) {
+ dev_err(dev, "failed to get gpios\n");
+ return -ENODEV;
}
+ if (IS_ERR(info->id_gpiod))
+ return PTR_ERR(info->id_gpiod);
+
+ if (IS_ERR(info->vbus_gpiod))
+ return PTR_ERR(info->vbus_gpiod);
+
info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(dev, "failed to allocate extcon device\n");
@@ -119,32 +145,56 @@ static int usb_extcon_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiod_set_debounce(info->id_gpiod,
- USB_GPIO_DEBOUNCE_MS * 1000);
+ if (info->id_gpiod)
+ ret = gpiod_set_debounce(info->id_gpiod,
+ USB_GPIO_DEBOUNCE_MS * 1000);
+ if (!ret && info->vbus_gpiod)
+ ret = gpiod_set_debounce(info->vbus_gpiod,
+ USB_GPIO_DEBOUNCE_MS * 1000);
+
if (ret < 0)
info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS);
INIT_DELAYED_WORK(&info->wq_detcable, usb_extcon_detect_cable);
- info->id_irq = gpiod_to_irq(info->id_gpiod);
- if (info->id_irq < 0) {
- dev_err(dev, "failed to get ID IRQ\n");
- return info->id_irq;
+ if (info->id_gpiod) {
+ info->id_irq = gpiod_to_irq(info->id_gpiod);
+ if (info->id_irq < 0) {
+ dev_err(dev, "failed to get ID IRQ\n");
+ return info->id_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+ usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for ID IRQ\n");
+ return ret;
+ }
}
- ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
- usb_irq_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- pdev->name, info);
- if (ret < 0) {
- dev_err(dev, "failed to request handler for ID IRQ\n");
- return ret;
+ if (info->vbus_gpiod) {
+ info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
+ if (info->vbus_irq < 0) {
+ dev_err(dev, "failed to get VBUS IRQ\n");
+ return info->vbus_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
+ usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for VBUS IRQ\n");
+ return ret;
+ }
}
platform_set_drvdata(pdev, info);
device_init_wakeup(dev, true);
- dev_pm_set_wake_irq(dev, info->id_irq);
/* Perform initial detection */
usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -157,8 +207,6 @@ static int usb_extcon_remove(struct platform_device *pdev)
struct usb_extcon_info *info = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&info->wq_detcable);
-
- dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
return 0;
@@ -170,12 +218,32 @@ static int usb_extcon_suspend(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod) {
+ ret = enable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+ if (info->vbus_gpiod) {
+ ret = enable_irq_wake(info->vbus_irq);
+ if (ret) {
+ if (info->id_gpiod)
+ disable_irq_wake(info->id_irq);
+
+ return ret;
+ }
+ }
+ }
+
/*
* We don't want to process any IRQs after this point
* as GPIOs used behind I2C subsystem might not be
* accessible until resume completes. So disable IRQ.
*/
- disable_irq(info->id_irq);
+ if (info->id_gpiod)
+ disable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ disable_irq(info->vbus_irq);
return ret;
}
@@ -185,7 +253,28 @@ static int usb_extcon_resume(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- enable_irq(info->id_irq);
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod) {
+ ret = disable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+ if (info->vbus_gpiod) {
+ ret = disable_irq_wake(info->vbus_irq);
+ if (ret) {
+ if (info->id_gpiod)
+ enable_irq_wake(info->id_irq);
+
+ return ret;
+ }
+ }
+ }
+
+ if (info->id_gpiod)
+ enable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ enable_irq(info->vbus_irq);
+
if (!device_may_wakeup(dev))
queue_delayed_work(system_power_efficient_wq,
&info->wq_detcable, 0);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 309311b1faae..5d3640264f2d 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -73,13 +73,13 @@ struct rfc2734_header {
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int retval;
u16 ether_type;
+ if (len <= RFC2374_UNFRAG_HDR_SIZE)
+ return 0;
+
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) {
@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
}
+
/* A datagram fragment has been received, now the fun begins. */
+
+ if (len <= RFC2374_FRAG_HDR_SIZE)
+ return 0;
+
hdr.w1 = ntohl(buf[1]);
buf += 2;
len -= RFC2374_FRAG_HDR_SIZE;
@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+ dg_size = fwnet_get_hdr_dg_size(&hdr);
+
+ if (fg_off + len > dg_size)
+ return 0;
spin_lock_irqsave(&dev->lock, flags);
@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
fw_send_response(card, r, rcode);
}
+static int gasp_source_id(__be32 *p)
+{
+ return be32_to_cpu(p[0]) >> 16;
+}
+
+static u32 gasp_specifier_id(__be32 *p)
+{
+ return (be32_to_cpu(p[0]) & 0xffff) << 8 |
+ (be32_to_cpu(p[1]) & 0xff000000) >> 24;
+}
+
+static u32 gasp_version(__be32 *p)
+{
+ return be32_to_cpu(p[1]) & 0xffffff;
+}
+
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
__be32 *buf_ptr;
int retval;
u32 length;
- u16 source_node_id;
- u32 specifier_id;
- u32 ver;
unsigned long offset;
unsigned long flags;
@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
spin_unlock_irqrestore(&dev->lock, flags);
- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
-
- if (specifier_id == IANA_SPECIFIER_ID &&
- (ver == RFC2734_SW_VERSION
+ if (length > IEEE1394_GASP_HDR_SIZE &&
+ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
+ (gasp_version(buf_ptr) == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6)
- || ver == RFC3146_SW_VERSION
+ || gasp_version(buf_ptr) == RFC3146_SW_VERSION
#endif
- )) {
- buf_ptr += 2;
- length -= IEEE1394_GASP_HDR_SIZE;
- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
+ ))
+ fwnet_incoming_packet(dev, buf_ptr + 2,
+ length - IEEE1394_GASP_HDR_SIZE,
+ gasp_source_id(buf_ptr),
context->card->generation, true);
- }
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;
@@ -1349,15 +1368,6 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-static int fwnet_change_mtu(struct net_device *net, int new_mtu)
-{
- if (new_mtu < 68)
- return -EINVAL;
-
- net->mtu = new_mtu;
- return 0;
-}
-
static const struct ethtool_ops fwnet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
@@ -1366,7 +1376,6 @@ static const struct net_device_ops fwnet_netdev_ops = {
.ndo_open = fwnet_open,
.ndo_stop = fwnet_stop,
.ndo_start_xmit = fwnet_tx,
- .ndo_change_mtu = fwnet_change_mtu,
};
static void fwnet_init_dev(struct net_device *net)
@@ -1435,7 +1444,6 @@ static int fwnet_probe(struct fw_unit *unit,
struct net_device *net;
bool allocated_netdev = false;
struct fwnet_device *dev;
- unsigned max_mtu;
int ret;
union fwnet_hwaddr *ha;
@@ -1474,13 +1482,9 @@ static int fwnet_probe(struct fw_unit *unit,
goto out;
dev->local_fifo = dev->handler.offset;
- /*
- * Use the RFC 2734 default 1500 octets or the maximum payload
- * as initial MTU
- */
- max_mtu = (1 << (card->max_receive + 1))
- - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE;
- net->mtu = min(1500U, max_mtu);
+ net->mtu = 1500U;
+ net->min_mtu = ETH_MIN_MTU;
+ net->max_mtu = 0xfff;
/* Set our hardware address while we're at it */
ha = (union fwnet_hwaddr *)net->dev_addr;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index c981be17d3c0..2e78b0b96d74 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -129,7 +129,25 @@ config EFI_TEST
Say Y here to enable the runtime services support via /dev/efi_test.
If unsure, say N.
+config APPLE_PROPERTIES
+ bool "Apple Device Properties"
+ depends on EFI_STUB && X86
+ select EFI_DEV_PATH_PARSER
+ select UCS2_STRING
+ help
+ Retrieve properties from EFI on Apple Macs and assign them to
+ devices, allowing for improved support of Apple hardware.
+ Properties that would otherwise be missing include the
+ Thunderbolt Device ROM and GPU configuration data.
+
+ If unsure, say Y if you have a Mac. Otherwise N.
+
endmenu
config UEFI_CPER
bool
+
+config EFI_DEV_PATH_PARSER
+ bool
+ depends on ACPI
+ default n
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index c8a439f6d715..ad67342313ed 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_EFI_STUB) += libstub/
obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
obj-$(CONFIG_EFI_TEST) += test/
+obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
+obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
new file mode 100644
index 000000000000..c473f4c5ca34
--- /dev/null
+++ b/drivers/firmware/efi/apple-properties.c
@@ -0,0 +1,248 @@
+/*
+ * apple-properties.c - EFI device properties on Macs
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "apple-properties: " fmt
+
+#include <linux/bootmem.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+#include <asm/setup.h>
+
+static bool dump_properties __initdata;
+
+static int __init dump_properties_enable(char *arg)
+{
+ dump_properties = true;
+ return 0;
+}
+
+__setup("dump_apple_properties", dump_properties_enable);
+
+struct dev_header {
+ u32 len;
+ u32 prop_count;
+ struct efi_dev_path path[0];
+ /*
+ * followed by key/value pairs, each key and value preceded by u32 len,
+ * len includes itself, value may be empty (in which case its len is 4)
+ */
+};
+
+struct properties_header {
+ u32 len;
+ u32 version;
+ u32 dev_count;
+ struct dev_header dev_header[0];
+};
+
+static u8 one __initdata = 1;
+
+static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
+ struct device *dev, void *ptr,
+ struct property_entry entry[])
+{
+ int i;
+
+ for (i = 0; i < dev_header->prop_count; i++) {
+ int remaining = dev_header->len - (ptr - (void *)dev_header);
+ u32 key_len, val_len;
+ char *key;
+
+ if (sizeof(key_len) > remaining)
+ break;
+
+ key_len = *(typeof(key_len) *)ptr;
+ if (key_len + sizeof(val_len) > remaining ||
+ key_len < sizeof(key_len) + sizeof(efi_char16_t) ||
+ *(efi_char16_t *)(ptr + sizeof(key_len)) == 0) {
+ dev_err(dev, "invalid property name len at %#zx\n",
+ ptr - (void *)dev_header);
+ break;
+ }
+
+ val_len = *(typeof(val_len) *)(ptr + key_len);
+ if (key_len + val_len > remaining ||
+ val_len < sizeof(val_len)) {
+ dev_err(dev, "invalid property val len at %#zx\n",
+ ptr - (void *)dev_header + key_len);
+ break;
+ }
+
+ /* 4 bytes to accommodate UTF-8 code points + null byte */
+ key = kzalloc((key_len - sizeof(key_len)) * 4 + 1, GFP_KERNEL);
+ if (!key) {
+ dev_err(dev, "cannot allocate property name\n");
+ break;
+ }
+ ucs2_as_utf8(key, ptr + sizeof(key_len),
+ key_len - sizeof(key_len));
+
+ entry[i].name = key;
+ entry[i].is_array = true;
+ entry[i].length = val_len - sizeof(val_len);
+ entry[i].pointer.raw_data = ptr + key_len + sizeof(val_len);
+ if (!entry[i].length) {
+ /* driver core doesn't accept empty properties */
+ entry[i].length = 1;
+ entry[i].pointer.raw_data = &one;
+ }
+
+ if (dump_properties) {
+ dev_info(dev, "property: %s\n", entry[i].name);
+ print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, entry[i].pointer.raw_data,
+ entry[i].length, true);
+ }
+
+ ptr += key_len + val_len;
+ }
+
+ if (i != dev_header->prop_count) {
+ dev_err(dev, "got %d device properties, expected %u\n", i,
+ dev_header->prop_count);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ return;
+ }
+
+ dev_info(dev, "assigning %d device properties\n", i);
+}
+
+static int __init unmarshal_devices(struct properties_header *properties)
+{
+ size_t offset = offsetof(struct properties_header, dev_header[0]);
+
+ while (offset + sizeof(struct dev_header) < properties->len) {
+ struct dev_header *dev_header = (void *)properties + offset;
+ struct property_entry *entry = NULL;
+ struct device *dev;
+ size_t len;
+ int ret, i;
+ void *ptr;
+
+ if (offset + dev_header->len > properties->len ||
+ dev_header->len <= sizeof(*dev_header)) {
+ pr_err("invalid len in dev_header at %#zx\n", offset);
+ return -EINVAL;
+ }
+
+ ptr = dev_header->path;
+ len = dev_header->len - sizeof(*dev_header);
+
+ dev = efi_get_device_by_path((struct efi_dev_path **)&ptr, &len);
+ if (IS_ERR(dev)) {
+ pr_err("device path parse error %ld at %#zx:\n",
+ PTR_ERR(dev), ptr - (void *)dev_header);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ dev = NULL;
+ goto skip_device;
+ }
+
+ entry = kcalloc(dev_header->prop_count + 1, sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry) {
+ dev_err(dev, "cannot allocate properties\n");
+ goto skip_device;
+ }
+
+ unmarshal_key_value_pairs(dev_header, dev, ptr, entry);
+ if (!entry[0].name)
+ goto skip_device;
+
+ ret = device_add_properties(dev, entry); /* makes deep copy */
+ if (ret)
+ dev_err(dev, "error %d assigning properties\n", ret);
+
+ for (i = 0; entry[i].name; i++)
+ kfree(entry[i].name);
+
+skip_device:
+ kfree(entry);
+ put_device(dev);
+ offset += dev_header->len;
+ }
+
+ return 0;
+}
+
+static int __init map_properties(void)
+{
+ struct properties_header *properties;
+ struct setup_data *data;
+ u32 data_len;
+ u64 pa_data;
+ int ret;
+
+ if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.") &&
+ !dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc."))
+ return 0;
+
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = ioremap(pa_data, sizeof(*data));
+ if (!data) {
+ pr_err("cannot map setup_data header\n");
+ return -ENOMEM;
+ }
+
+ if (data->type != SETUP_APPLE_PROPERTIES) {
+ pa_data = data->next;
+ iounmap(data);
+ continue;
+ }
+
+ data_len = data->len;
+ iounmap(data);
+
+ data = ioremap(pa_data, sizeof(*data) + data_len);
+ if (!data) {
+ pr_err("cannot map setup_data payload\n");
+ return -ENOMEM;
+ }
+
+ properties = (struct properties_header *)data->data;
+ if (properties->version != 1) {
+ pr_err("unsupported version:\n");
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -ENOTSUPP;
+ } else if (properties->len != data_len) {
+ pr_err("length mismatch, expected %u\n", data_len);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -EINVAL;
+ } else
+ ret = unmarshal_devices(properties);
+
+ /*
+ * Can only free the setup_data payload but not its header
+ * to avoid breaking the chain of ->next pointers.
+ */
+ data->len = 0;
+ iounmap(data);
+ free_bootmem_late(pa_data + sizeof(*data), data_len);
+
+ return ret;
+ }
+ return 0;
+}
+
+fs_initcall(map_properties);
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 8efe13075c92..f853ad2c4ca0 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -244,8 +244,10 @@ void __init efi_init(void)
"Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
efi.memmap.desc_version);
- if (uefi_init() < 0)
+ if (uefi_init() < 0) {
+ efi_memmap_unmap();
return;
+ }
reserve_regions();
efi_memattr_init();
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 7c75a8d9091a..349dc3e1e52e 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -39,7 +39,7 @@ static struct mm_struct efi_mm = {
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
};
-#ifdef CONFIG_ARM64_PTDUMP
+#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
#include <asm/ptdump.h>
static struct ptdump_info efi_ptdump_info = {
@@ -53,7 +53,7 @@ static struct ptdump_info efi_ptdump_info = {
static int __init ptdump_init(void)
{
- return ptdump_register(&efi_ptdump_info, "efi_page_tables");
+ return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
}
device_initcall(ptdump_init);
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
new file mode 100644
index 000000000000..85d1834ee9b7
--- /dev/null
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -0,0 +1,203 @@
+/*
+ * dev-path-parser.c - EFI Device Path parser
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+struct acpi_hid_uid {
+ struct acpi_device_id hid[2];
+ char uid[11]; /* UINT_MAX + null byte */
+};
+
+static int __init match_acpi_dev(struct device *dev, void *data)
+{
+ struct acpi_hid_uid hid_uid = *(struct acpi_hid_uid *)data;
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ if (acpi_match_device_ids(adev, hid_uid.hid))
+ return 0;
+
+ if (adev->pnp.unique_id)
+ return !strcmp(adev->pnp.unique_id, hid_uid.uid);
+ else
+ return !strcmp("0", hid_uid.uid);
+}
+
+static long __init parse_acpi_path(struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ struct acpi_hid_uid hid_uid = {};
+ struct device *phys_dev;
+
+ if (node->length != 12)
+ return -EINVAL;
+
+ sprintf(hid_uid.hid[0].id, "%c%c%c%04X",
+ 'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
+ node->acpi.hid >> 16);
+ sprintf(hid_uid.uid, "%u", node->acpi.uid);
+
+ *child = bus_find_device(&acpi_bus_type, NULL, &hid_uid,
+ match_acpi_dev);
+ if (!*child)
+ return -ENODEV;
+
+ phys_dev = acpi_get_first_physical_node(to_acpi_device(*child));
+ if (phys_dev) {
+ get_device(phys_dev);
+ put_device(*child);
+ *child = phys_dev;
+ }
+
+ return 0;
+}
+
+static int __init match_pci_dev(struct device *dev, void *data)
+{
+ unsigned int devfn = *(unsigned int *)data;
+
+ return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
+}
+
+static long __init parse_pci_path(struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ unsigned int devfn;
+
+ if (node->length != 6)
+ return -EINVAL;
+ if (!parent)
+ return -EINVAL;
+
+ devfn = PCI_DEVFN(node->pci.dev, node->pci.fn);
+
+ *child = device_find_child(parent, &devfn, match_pci_dev);
+ if (!*child)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Insert parsers for further node types here.
+ *
+ * Each parser takes a pointer to the @node and to the @parent (will be NULL
+ * for the first device path node). If a device corresponding to @node was
+ * found below @parent, its reference count should be incremented and the
+ * device returned in @child.
+ *
+ * The return value should be 0 on success or a negative int on failure.
+ * The special return values 0x01 (EFI_DEV_END_INSTANCE) and 0xFF
+ * (EFI_DEV_END_ENTIRE) signal the end of the device path, only
+ * parse_end_path() is supposed to return this.
+ *
+ * Be sure to validate the node length and contents before commencing the
+ * search for a device.
+ */
+
+static long __init parse_end_path(struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ if (node->length != 4)
+ return -EINVAL;
+ if (node->sub_type != EFI_DEV_END_INSTANCE &&
+ node->sub_type != EFI_DEV_END_ENTIRE)
+ return -EINVAL;
+ if (!parent)
+ return -ENODEV;
+
+ *child = get_device(parent);
+ return node->sub_type;
+}
+
+/**
+ * efi_get_device_by_path - find device by EFI Device Path
+ * @node: EFI Device Path
+ * @len: maximum length of EFI Device Path in bytes
+ *
+ * Parse a series of EFI Device Path nodes at @node and find the corresponding
+ * device. If the device was found, its reference count is incremented and a
+ * pointer to it is returned. The caller needs to drop the reference with
+ * put_device() after use. The @node pointer is updated to point to the
+ * location immediately after the "End of Hardware Device Path" node.
+ *
+ * If another Device Path instance follows, @len is decremented by the number
+ * of bytes consumed. Otherwise @len is set to %0.
+ *
+ * If a Device Path node is malformed or its corresponding device is not found,
+ * @node is updated to point to this offending node and an ERR_PTR is returned.
+ *
+ * If @len is initially %0, the function returns %NULL. Thus, to iterate over
+ * all instances in a path, the following idiom may be used:
+ *
+ * while (!IS_ERR_OR_NULL(dev = efi_get_device_by_path(&node, &len))) {
+ * // do something with dev
+ * put_device(dev);
+ * }
+ * if (IS_ERR(dev))
+ * // report error
+ *
+ * Devices can only be found if they're already instantiated. Most buses
+ * instantiate devices in the "subsys" initcall level, hence the earliest
+ * initcall level in which this function should be called is "fs".
+ *
+ * Returns the device on success or
+ * %ERR_PTR(-ENODEV) if no device was found,
+ * %ERR_PTR(-EINVAL) if a node is malformed or exceeds @len,
+ * %ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented.
+ */
+struct device * __init efi_get_device_by_path(struct efi_dev_path **node,
+ size_t *len)
+{
+ struct device *parent = NULL, *child;
+ long ret = 0;
+
+ if (!*len)
+ return NULL;
+
+ while (!ret) {
+ if (*len < 4 || *len < (*node)->length)
+ ret = -EINVAL;
+ else if ((*node)->type == EFI_DEV_ACPI &&
+ (*node)->sub_type == EFI_DEV_BASIC_ACPI)
+ ret = parse_acpi_path(*node, parent, &child);
+ else if ((*node)->type == EFI_DEV_HW &&
+ (*node)->sub_type == EFI_DEV_PCI)
+ ret = parse_pci_path(*node, parent, &child);
+ else if (((*node)->type == EFI_DEV_END_PATH ||
+ (*node)->type == EFI_DEV_END_PATH2))
+ ret = parse_end_path(*node, parent, &child);
+ else
+ ret = -ENOTSUPP;
+
+ put_device(parent);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ parent = child;
+ *node = (void *)*node + (*node)->length;
+ *len -= (*node)->length;
+ }
+
+ if (ret == EFI_DEV_END_ENTIRE)
+ *len = 0;
+
+ return child;
+}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 1ac199cd75e7..92914801e388 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -23,7 +23,10 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/io.h>
+#include <linux/kexec.h>
#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/ucs2_string.h>
@@ -48,6 +51,7 @@ struct efi __read_mostly efi = {
.esrt = EFI_INVALID_TABLE_ADDR,
.properties_table = EFI_INVALID_TABLE_ADDR,
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
+ .rng_seed = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -259,8 +263,10 @@ static __init int efivar_ssdt_load(void)
}
data = kmalloc(size, GFP_KERNEL);
- if (!data)
+ if (!data) {
+ ret = -ENOMEM;
goto free_entry;
+ }
ret = efivar_entry_get(entry, NULL, &size, data);
if (ret) {
@@ -438,6 +444,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
+ {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
{NULL_GUID, NULL, NULL},
};
@@ -499,6 +506,29 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
pr_cont("\n");
set_bit(EFI_CONFIG_TABLES, &efi.flags);
+ if (efi.rng_seed != EFI_INVALID_TABLE_ADDR) {
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ seed = early_memremap(efi.rng_seed, sizeof(*seed));
+ if (seed != NULL) {
+ size = seed->size;
+ early_memunmap(seed, sizeof(*seed));
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = early_memremap(efi.rng_seed,
+ sizeof(*seed) + size);
+ if (seed != NULL) {
+ add_device_randomness(seed->bits, seed->size);
+ early_memunmap(seed, sizeof(*seed) + size);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ }
+
/* Parse the EFI Properties table if it exists */
if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
efi_properties_table_t *tbl;
@@ -822,3 +852,47 @@ int efi_status_to_err(efi_status_t status)
return err;
}
+
+#ifdef CONFIG_KEXEC
+static int update_efi_random_seed(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ if (!kexec_in_progress)
+ return NOTIFY_DONE;
+
+ seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB);
+ if (seed != NULL) {
+ size = min(seed->size, 32U);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = memremap(efi.rng_seed, sizeof(*seed) + size,
+ MEMREMAP_WB);
+ if (seed != NULL) {
+ seed->size = size;
+ get_random_bytes(seed->bits, seed->size);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efi_random_seed_nb = {
+ .notifier_call = update_efi_random_seed,
+};
+
+static int register_update_efi_random_seed(void)
+{
+ if (efi.rng_seed == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ return register_reboot_notifier(&efi_random_seed_nb);
+}
+late_initcall(register_update_efi_random_seed);
+#endif
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 5e23e2d305e7..d564d25df8ab 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -6,7 +6,7 @@
#
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
@@ -36,11 +36,11 @@ arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
+lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
$(patsubst %.c,lib-%.o,$(arm-deps))
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64-stub.o random.o
+lib-$(CONFIG_ARM64) += arm64-stub.o
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 993aa56755f6..b4f7d78f9e8b 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -340,6 +340,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status != EFI_SUCCESS)
pr_efi_err(sys_table, "Failed initrd from command line!\n");
+ efi_random_get_seed(sys_table);
+
new_fdt_addr = fdt_addr;
status = allocate_new_fdt_and_exit_boot(sys_table, handle,
&new_fdt_addr, dram_base + MAX_FDT_OFFSET,
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index aded10662020..757badc1debb 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -32,15 +32,6 @@
static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
-/*
- * Allow the platform to override the allocation granularity: this allows
- * systems that have the capability to run with a larger page size to deal
- * with the allocations for initrd and fdt more efficiently.
- */
-#ifndef EFI_ALLOC_ALIGN
-#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
-#endif
-
#define EFI_MMAP_NR_SLACK_SLOTS 8
struct file_info {
@@ -186,14 +177,16 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
goto fail;
/*
- * Enforce minimum alignment that EFI requires when requesting
- * a specific address. We are doing page-based allocations,
- * so we must be aligned to a page.
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
*/
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
- nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
again:
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
@@ -208,7 +201,7 @@ again:
continue;
start = desc->phys_addr;
- end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
if (end > max)
end = max;
@@ -278,14 +271,16 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
goto fail;
/*
- * Enforce minimum alignment that EFI requires when requesting
- * a specific address. We are doing page-based allocations,
- * so we must be aligned to a page.
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
*/
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
- nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
unsigned long m = (unsigned long)map;
@@ -300,7 +295,7 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
continue;
start = desc->phys_addr;
- end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
/*
* Don't allocate at 0x0. It will confuse code that
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index ee49cd23ee63..b98824e3800a 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -15,6 +15,15 @@
*/
#undef __init
+/*
+ * Allow the platform to override the allocation granularity: this allows
+ * systems that have the capability to run with a larger page size to deal
+ * with the allocations for initrd and fdt more efficiently.
+ */
+#ifndef EFI_ALLOC_ALIGN
+#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
+#endif
+
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
@@ -62,4 +71,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+
#endif
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 0c9f58c5ba50..7e72954d5860 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -8,6 +8,7 @@
*/
#include <linux/efi.h>
+#include <linux/log2.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -41,21 +42,23 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
*/
static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
unsigned long size,
- unsigned long align)
+ unsigned long align_shift)
{
- u64 start, end;
+ unsigned long align = 1UL << align_shift;
+ u64 first_slot, last_slot, region_end;
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
- start = round_up(md->phys_addr, align);
- end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size,
- align);
+ region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
- if (start > end)
+ first_slot = round_up(md->phys_addr, align);
+ last_slot = round_down(region_end - size + 1, align);
+
+ if (first_slot > last_slot)
return 0;
- return (end - start + 1) / align;
+ return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1;
}
/*
@@ -98,7 +101,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
efi_memory_desc_t *md = (void *)memory_map + map_offset;
unsigned long slots;
- slots = get_entry_num_slots(md, size, align);
+ slots = get_entry_num_slots(md, size, ilog2(align));
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
}
@@ -141,3 +144,51 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
return status;
}
+
+#define RANDOM_SEED_SIZE 32
+
+efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
+ efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
+ struct efi_rng_protocol *rng;
+ struct linux_efi_random_seed *seed;
+ efi_status_t status;
+
+ status = efi_call_early(locate_protocol, &rng_proto, NULL,
+ (void **)&rng);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*seed) + RANDOM_SEED_SIZE,
+ (void **)&seed);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = rng->get_rng(rng, &rng_algo_raw, RANDOM_SEED_SIZE,
+ seed->bits);
+ if (status == EFI_UNSUPPORTED)
+ /*
+ * Use whatever algorithm we have available if the raw algorithm
+ * is not implemented.
+ */
+ status = rng->get_rng(rng, NULL, RANDOM_SEED_SIZE,
+ seed->bits);
+
+ if (status != EFI_SUCCESS)
+ goto err_freepool;
+
+ seed->size = RANDOM_SEED_SIZE;
+ status = efi_call_early(install_configuration_table, &rng_table_guid,
+ seed);
+ if (status != EFI_SUCCESS)
+ goto err_freepool;
+
+ return EFI_SUCCESS;
+
+err_freepool:
+ efi_call_early(free_pool, seed);
+ return status;
+}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index f61bb52be318..8cd578f62059 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -8,7 +8,6 @@
*
*/
-#include <linux/version.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -156,7 +155,7 @@ static long efi_runtime_get_variable(unsigned long arg)
{
struct efi_getvariable __user *getvariable_user;
struct efi_getvariable getvariable;
- unsigned long datasize, prev_datasize, *dz;
+ unsigned long datasize = 0, prev_datasize, *dz;
efi_guid_t vendor_guid, *vd = NULL;
efi_status_t status;
efi_char16_t *name = NULL;
@@ -266,14 +265,10 @@ static long efi_runtime_set_variable(unsigned long arg)
return rv;
}
- data = kmalloc(setvariable.data_size, GFP_KERNEL);
- if (!data) {
+ data = memdup_user(setvariable.data, setvariable.data_size);
+ if (IS_ERR(data)) {
kfree(name);
- return -ENOMEM;
- }
- if (copy_from_user(data, setvariable.data, setvariable.data_size)) {
- rv = -EFAULT;
- goto out;
+ return PTR_ERR(data);
}
status = efi.set_variable(name, &vendor_guid,
@@ -429,7 +424,7 @@ static long efi_runtime_get_nextvariablename(unsigned long arg)
efi_guid_t *vd = NULL;
efi_guid_t vendor_guid;
efi_char16_t *name = NULL;
- int rv;
+ int rv = 0;
getnextvariablename_user = (struct efi_getnextvariablename __user *)arg;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index cd84934774cc..ce861a2853a4 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -13,12 +13,26 @@ config FPGA
if FPGA
+config FPGA_REGION
+ tristate "FPGA Region"
+ depends on OF && FPGA_BRIDGE
+ help
+ FPGA Regions allow loading FPGA images under control of
+ the Device Tree.
+
config FPGA_MGR_SOCFPGA
tristate "Altera SOCFPGA FPGA Manager"
- depends on ARCH_SOCFPGA
+ depends on ARCH_SOCFPGA || COMPILE_TEST
help
FPGA manager driver support for Altera SOCFPGA.
+config FPGA_MGR_SOCFPGA_A10
+ tristate "Altera SoCFPGA Arria10"
+ depends on ARCH_SOCFPGA || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ FPGA manager driver support for Altera Arria10 SoCFPGA.
+
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
depends on ARCH_ZYNQ || COMPILE_TEST
@@ -26,6 +40,29 @@ config FPGA_MGR_ZYNQ_FPGA
help
FPGA manager driver support for Xilinx Zynq FPGAs.
+config FPGA_BRIDGE
+ tristate "FPGA Bridge Framework"
+ depends on OF
+ help
+ Say Y here if you want to support bridges connected between host
+ processors and FPGAs or between FPGAs.
+
+config SOCFPGA_FPGA_BRIDGE
+ tristate "Altera SoCFPGA FPGA Bridges"
+ depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ help
+ Say Y to enable drivers for FPGA bridges for Altera SOCFPGA
+ devices.
+
+config ALTERA_FREEZE_BRIDGE
+ tristate "Altera FPGA Freeze Bridge"
+ depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ help
+ Say Y to enable drivers for Altera FPGA Freeze bridges. A
+ freeze bridge is a bridge that exists in the FPGA fabric to
+ isolate one region of the FPGA from the busses while that
+ region is being reprogrammed.
+
endif # FPGA
endmenu
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 8d83fc6b1613..8df07bcf42a6 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -7,4 +7,13 @@ obj-$(CONFIG_FPGA) += fpga-mgr.o
# FPGA Manager Drivers
obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
+obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+
+# FPGA Bridge Drivers
+obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o
+obj-$(CONFIG_SOCFPGA_FPGA_BRIDGE) += altera-hps2fpga.o altera-fpga2sdram.o
+obj-$(CONFIG_ALTERA_FREEZE_BRIDGE) += altera-freeze-bridge.o
+
+# High Level Interfaces
+obj-$(CONFIG_FPGA_REGION) += fpga-region.o
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
new file mode 100644
index 000000000000..d4eeb74388da
--- /dev/null
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -0,0 +1,180 @@
+/*
+ * FPGA to SDRAM Bridge Driver for Altera SoCFPGA Devices
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This driver manages a bridge between an FPGA and the SDRAM used by the ARM
+ * host processor system (HPS).
+ *
+ * The bridge contains 4 read ports, 4 write ports, and 6 command ports.
+ * Reconfiguring these ports requires that no SDRAM transactions occur during
+ * reconfiguration. The code reconfiguring the ports cannot run out of SDRAM
+ * nor can the FPGA access the SDRAM during reconfiguration. This driver does
+ * not support reconfiguring the ports. The ports are configured by code
+ * running out of on chip ram before Linux is started and the configuration
+ * is passed in a handoff register in the system manager.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration. Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+
+#define ALT_SDR_CTL_FPGAPORTRST_OFST 0x80
+#define ALT_SDR_CTL_FPGAPORTRST_PORTRSTN_MSK 0x00003fff
+#define ALT_SDR_CTL_FPGAPORTRST_RD_SHIFT 0
+#define ALT_SDR_CTL_FPGAPORTRST_WR_SHIFT 4
+#define ALT_SDR_CTL_FPGAPORTRST_CTRL_SHIFT 8
+
+/*
+ * From the Cyclone V HPS Memory Map document:
+ * These registers are used to store handoff information between the
+ * preloader and the OS. These 8 registers can be used to store any
+ * information. The contents of these registers have no impact on
+ * the state of the HPS hardware.
+ */
+#define SYSMGR_ISWGRP_HANDOFF3 (0x8C)
+
+#define F2S_BRIDGE_NAME "fpga2sdram"
+
+struct alt_fpga2sdram_data {
+ struct device *dev;
+ struct regmap *sdrctl;
+ int mask;
+};
+
+static int alt_fpga2sdram_enable_show(struct fpga_bridge *bridge)
+{
+ struct alt_fpga2sdram_data *priv = bridge->priv;
+ int value;
+
+ regmap_read(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST, &value);
+
+ return (value & priv->mask) == priv->mask;
+}
+
+static inline int _alt_fpga2sdram_enable_set(struct alt_fpga2sdram_data *priv,
+ bool enable)
+{
+ return regmap_update_bits(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST,
+ priv->mask, enable ? priv->mask : 0);
+}
+
+static int alt_fpga2sdram_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ return _alt_fpga2sdram_enable_set(bridge->priv, enable);
+}
+
+struct prop_map {
+ char *prop_name;
+ u32 *prop_value;
+ u32 prop_max;
+};
+
+static const struct fpga_bridge_ops altera_fpga2sdram_br_ops = {
+ .enable_set = alt_fpga2sdram_enable_set,
+ .enable_show = alt_fpga2sdram_enable_show,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-fpga2sdram-bridge" },
+ {},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct alt_fpga2sdram_data *priv;
+ u32 enable;
+ struct regmap *sysmgr;
+ int ret = 0;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->sdrctl = syscon_regmap_lookup_by_compatible("altr,sdr-ctl");
+ if (IS_ERR(priv->sdrctl)) {
+ dev_err(dev, "regmap for altr,sdr-ctl lookup failed.\n");
+ return PTR_ERR(priv->sdrctl);
+ }
+
+ sysmgr = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
+ if (IS_ERR(sysmgr)) {
+ dev_err(dev, "regmap for altr,sys-mgr lookup failed.\n");
+ return PTR_ERR(sysmgr);
+ }
+
+ /* Get f2s bridge configuration saved in handoff register */
+ regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
+
+ ret = fpga_bridge_register(dev, F2S_BRIDGE_NAME,
+ &altera_fpga2sdram_br_ops, priv);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
+
+ if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ if (enable > 1) {
+ dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+ } else {
+ dev_info(dev, "%s bridge\n",
+ (enable ? "enabling" : "disabling"));
+ ret = _alt_fpga2sdram_enable_set(priv, enable);
+ if (ret) {
+ fpga_bridge_unregister(&pdev->dev);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+ fpga_bridge_unregister(&pdev->dev);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver altera_fpga_driver = {
+ .probe = alt_fpga_bridge_probe,
+ .remove = alt_fpga_bridge_remove,
+ .driver = {
+ .name = "altera_fpga2sdram_bridge",
+ .of_match_table = of_match_ptr(altera_fpga_of_match),
+ },
+};
+
+module_platform_driver(altera_fpga_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA FPGA to SDRAM Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
new file mode 100644
index 000000000000..8dcd9fb22cb9
--- /dev/null
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -0,0 +1,273 @@
+/*
+ * FPGA Freeze Bridge Controller
+ *
+ * Copyright (C) 2016 Altera Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#define FREEZE_CSR_STATUS_OFFSET 0
+#define FREEZE_CSR_CTRL_OFFSET 4
+#define FREEZE_CSR_ILLEGAL_REQ_OFFSET 8
+#define FREEZE_CSR_REG_VERSION 12
+
+#define FREEZE_CSR_SUPPORTED_VERSION 2
+
+#define FREEZE_CSR_STATUS_FREEZE_REQ_DONE BIT(0)
+#define FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE BIT(1)
+
+#define FREEZE_CSR_CTRL_FREEZE_REQ BIT(0)
+#define FREEZE_CSR_CTRL_RESET_REQ BIT(1)
+#define FREEZE_CSR_CTRL_UNFREEZE_REQ BIT(2)
+
+#define FREEZE_BRIDGE_NAME "freeze"
+
+struct altera_freeze_br_data {
+ struct device *dev;
+ void __iomem *base_addr;
+ bool enable;
+};
+
+/*
+ * Poll status until status bit is set or we have a timeout.
+ */
+static int altera_freeze_br_req_ack(struct altera_freeze_br_data *priv,
+ u32 timeout, u32 req_ack)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_illegal_req_addr = priv->base_addr +
+ FREEZE_CSR_ILLEGAL_REQ_OFFSET;
+ u32 status, illegal, ctrl;
+ int ret = -ETIMEDOUT;
+
+ do {
+ illegal = readl(csr_illegal_req_addr);
+ if (illegal) {
+ dev_err(dev, "illegal request detected 0x%x", illegal);
+
+ writel(1, csr_illegal_req_addr);
+
+ illegal = readl(csr_illegal_req_addr);
+ if (illegal)
+ dev_err(dev, "illegal request not cleared 0x%x",
+ illegal);
+
+ ret = -EINVAL;
+ break;
+ }
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+ dev_dbg(dev, "%s %x %x\n", __func__, status, req_ack);
+ status &= req_ack;
+ if (status) {
+ ctrl = readl(priv->base_addr + FREEZE_CSR_CTRL_OFFSET);
+ dev_dbg(dev, "%s request %x acknowledged %x %x\n",
+ __func__, req_ack, status, ctrl);
+ ret = 0;
+ break;
+ }
+
+ udelay(1);
+ } while (timeout--);
+
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "%s timeout waiting for 0x%x\n",
+ __func__, req_ack);
+
+ return ret;
+}
+
+static int altera_freeze_br_do_freeze(struct altera_freeze_br_data *priv,
+ u32 timeout)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_ctrl_addr = priv->base_addr +
+ FREEZE_CSR_CTRL_OFFSET;
+ u32 status;
+ int ret;
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ if (status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE) {
+ dev_dbg(dev, "%s bridge already disabled %d\n",
+ __func__, status);
+ return 0;
+ } else if (!(status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)) {
+ dev_err(dev, "%s bridge not enabled %d\n", __func__, status);
+ return -EINVAL;
+ }
+
+ writel(FREEZE_CSR_CTRL_FREEZE_REQ, csr_ctrl_addr);
+
+ ret = altera_freeze_br_req_ack(priv, timeout,
+ FREEZE_CSR_STATUS_FREEZE_REQ_DONE);
+
+ if (ret)
+ writel(0, csr_ctrl_addr);
+ else
+ writel(FREEZE_CSR_CTRL_RESET_REQ, csr_ctrl_addr);
+
+ return ret;
+}
+
+static int altera_freeze_br_do_unfreeze(struct altera_freeze_br_data *priv,
+ u32 timeout)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_ctrl_addr = priv->base_addr +
+ FREEZE_CSR_CTRL_OFFSET;
+ u32 status;
+ int ret;
+
+ writel(0, csr_ctrl_addr);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE) {
+ dev_dbg(dev, "%s bridge already enabled %d\n",
+ __func__, status);
+ return 0;
+ } else if (!(status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE)) {
+ dev_err(dev, "%s bridge not frozen %d\n", __func__, status);
+ return -EINVAL;
+ }
+
+ writel(FREEZE_CSR_CTRL_UNFREEZE_REQ, csr_ctrl_addr);
+
+ ret = altera_freeze_br_req_ack(priv, timeout,
+ FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ writel(0, csr_ctrl_addr);
+
+ return ret;
+}
+
+/*
+ * enable = 1 : allow traffic through the bridge
+ * enable = 0 : disable traffic through the bridge
+ */
+static int altera_freeze_br_enable_set(struct fpga_bridge *bridge,
+ bool enable)
+{
+ struct altera_freeze_br_data *priv = bridge->priv;
+ struct fpga_image_info *info = bridge->info;
+ u32 timeout = 0;
+ int ret;
+
+ if (enable) {
+ if (info)
+ timeout = info->enable_timeout_us;
+
+ ret = altera_freeze_br_do_unfreeze(bridge->priv, timeout);
+ } else {
+ if (info)
+ timeout = info->disable_timeout_us;
+
+ ret = altera_freeze_br_do_freeze(bridge->priv, timeout);
+ }
+
+ if (!ret)
+ priv->enable = enable;
+
+ return ret;
+}
+
+static int altera_freeze_br_enable_show(struct fpga_bridge *bridge)
+{
+ struct altera_freeze_br_data *priv = bridge->priv;
+
+ return priv->enable;
+}
+
+static struct fpga_bridge_ops altera_freeze_br_br_ops = {
+ .enable_set = altera_freeze_br_enable_set,
+ .enable_show = altera_freeze_br_enable_show,
+};
+
+static const struct of_device_id altera_freeze_br_of_match[] = {
+ { .compatible = "altr,freeze-bridge-controller", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_freeze_br_of_match);
+
+static int altera_freeze_br_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct altera_freeze_br_data *priv;
+ struct resource *res;
+ u32 status, revision;
+
+ if (!np)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base_addr))
+ return PTR_ERR(priv->base_addr);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+ if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)
+ priv->enable = 1;
+
+ revision = readl(priv->base_addr + FREEZE_CSR_REG_VERSION);
+ if (revision != FREEZE_CSR_SUPPORTED_VERSION)
+ dev_warn(dev,
+ "%s Freeze Controller unexpected revision %d != %d\n",
+ __func__, revision, FREEZE_CSR_SUPPORTED_VERSION);
+
+ return fpga_bridge_register(dev, FREEZE_BRIDGE_NAME,
+ &altera_freeze_br_br_ops, priv);
+}
+
+static int altera_freeze_br_remove(struct platform_device *pdev)
+{
+ fpga_bridge_unregister(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver altera_freeze_br_driver = {
+ .probe = altera_freeze_br_probe,
+ .remove = altera_freeze_br_remove,
+ .driver = {
+ .name = "altera_freeze_br",
+ .of_match_table = of_match_ptr(altera_freeze_br_of_match),
+ },
+};
+
+module_platform_driver(altera_freeze_br_driver);
+
+MODULE_DESCRIPTION("Altera Freeze Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
new file mode 100644
index 000000000000..4b354c79be31
--- /dev/null
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -0,0 +1,222 @@
+/*
+ * FPGA to/from HPS Bridge Driver for Altera SoCFPGA Devices
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * Includes this patch from the mailing list:
+ * fpga: altera-hps2fpga: fix HPS2FPGA bridge visibility to L3 masters
+ * Signed-off-by: Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This driver manages bridges on a Altera SOCFPGA between the ARM host
+ * processor system (HPS) and the embedded FPGA.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration. Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/clk.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define ALT_L3_REMAP_OFST 0x0
+#define ALT_L3_REMAP_MPUZERO_MSK 0x00000001
+#define ALT_L3_REMAP_H2F_MSK 0x00000008
+#define ALT_L3_REMAP_LWH2F_MSK 0x00000010
+
+#define HPS2FPGA_BRIDGE_NAME "hps2fpga"
+#define LWHPS2FPGA_BRIDGE_NAME "lwhps2fpga"
+#define FPGA2HPS_BRIDGE_NAME "fpga2hps"
+
+struct altera_hps2fpga_data {
+ const char *name;
+ struct reset_control *bridge_reset;
+ struct regmap *l3reg;
+ unsigned int remap_mask;
+ struct clk *clk;
+};
+
+static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
+{
+ struct altera_hps2fpga_data *priv = bridge->priv;
+
+ return reset_control_status(priv->bridge_reset);
+}
+
+/* The L3 REMAP register is write only, so keep a cached value. */
+static unsigned int l3_remap_shadow;
+static spinlock_t l3_remap_lock;
+
+static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
+ bool enable)
+{
+ unsigned long flags;
+ int ret;
+
+ /* bring bridge out of reset */
+ if (enable)
+ ret = reset_control_deassert(priv->bridge_reset);
+ else
+ ret = reset_control_assert(priv->bridge_reset);
+ if (ret)
+ return ret;
+
+ /* Allow bridge to be visible to L3 masters or not */
+ if (priv->remap_mask) {
+ spin_lock_irqsave(&l3_remap_lock, flags);
+ l3_remap_shadow |= ALT_L3_REMAP_MPUZERO_MSK;
+
+ if (enable)
+ l3_remap_shadow |= priv->remap_mask;
+ else
+ l3_remap_shadow &= ~priv->remap_mask;
+
+ ret = regmap_write(priv->l3reg, ALT_L3_REMAP_OFST,
+ l3_remap_shadow);
+ spin_unlock_irqrestore(&l3_remap_lock, flags);
+ }
+
+ return ret;
+}
+
+static int alt_hps2fpga_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ return _alt_hps2fpga_enable_set(bridge->priv, enable);
+}
+
+static const struct fpga_bridge_ops altera_hps2fpga_br_ops = {
+ .enable_set = alt_hps2fpga_enable_set,
+ .enable_show = alt_hps2fpga_enable_show,
+};
+
+static struct altera_hps2fpga_data hps2fpga_data = {
+ .name = HPS2FPGA_BRIDGE_NAME,
+ .remap_mask = ALT_L3_REMAP_H2F_MSK,
+};
+
+static struct altera_hps2fpga_data lwhps2fpga_data = {
+ .name = LWHPS2FPGA_BRIDGE_NAME,
+ .remap_mask = ALT_L3_REMAP_LWH2F_MSK,
+};
+
+static struct altera_hps2fpga_data fpga2hps_data = {
+ .name = FPGA2HPS_BRIDGE_NAME,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-hps2fpga-bridge",
+ .data = &hps2fpga_data },
+ { .compatible = "altr,socfpga-lwhps2fpga-bridge",
+ .data = &lwhps2fpga_data },
+ { .compatible = "altr,socfpga-fpga2hps-bridge",
+ .data = &fpga2hps_data },
+ {},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct altera_hps2fpga_data *priv;
+ const struct of_device_id *of_id;
+ u32 enable;
+ int ret;
+
+ of_id = of_match_device(altera_fpga_of_match, dev);
+ priv = (struct altera_hps2fpga_data *)of_id->data;
+
+ priv->bridge_reset = of_reset_control_get_by_index(dev->of_node, 0);
+ if (IS_ERR(priv->bridge_reset)) {
+ dev_err(dev, "Could not get %s reset control\n", priv->name);
+ return PTR_ERR(priv->bridge_reset);
+ }
+
+ if (priv->remap_mask) {
+ priv->l3reg = syscon_regmap_lookup_by_compatible("altr,l3regs");
+ if (IS_ERR(priv->l3reg)) {
+ dev_err(dev, "regmap for altr,l3regs lookup failed\n");
+ return PTR_ERR(priv->l3reg);
+ }
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "no clock specified\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "could not enable clock\n");
+ return -EBUSY;
+ }
+
+ spin_lock_init(&l3_remap_lock);
+
+ if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ if (enable > 1) {
+ dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+ } else {
+ dev_info(dev, "%s bridge\n",
+ (enable ? "enabling" : "disabling"));
+
+ ret = _alt_hps2fpga_enable_set(priv, enable);
+ if (ret) {
+ fpga_bridge_unregister(&pdev->dev);
+ return ret;
+ }
+ }
+ }
+
+ return fpga_bridge_register(dev, priv->name, &altera_hps2fpga_br_ops,
+ priv);
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *bridge = platform_get_drvdata(pdev);
+ struct altera_hps2fpga_data *priv = bridge->priv;
+
+ fpga_bridge_unregister(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver alt_fpga_bridge_driver = {
+ .probe = alt_fpga_bridge_probe,
+ .remove = alt_fpga_bridge_remove,
+ .driver = {
+ .name = "altera_hps2fpga_bridge",
+ .of_match_table = of_match_ptr(altera_fpga_of_match),
+ },
+};
+
+module_platform_driver(alt_fpga_bridge_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA HPS to FPGA Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
new file mode 100644
index 000000000000..33ee83e6373c
--- /dev/null
+++ b/drivers/fpga/fpga-bridge.c
@@ -0,0 +1,395 @@
+/*
+ * FPGA Bridge Framework Driver
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static DEFINE_IDA(fpga_bridge_ida);
+static struct class *fpga_bridge_class;
+
+/* Lock for adding/removing bridges to linked lists*/
+spinlock_t bridge_list_lock;
+
+static int fpga_bridge_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * fpga_bridge_enable - Enable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_enable(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "enable\n");
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ return bridge->br_ops->enable_set(bridge, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_enable);
+
+/**
+ * fpga_bridge_disable - Disable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_disable(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "disable\n");
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ return bridge->br_ops->enable_set(bridge, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_disable);
+
+/**
+ * of_fpga_bridge_get - get an exclusive reference to a fpga bridge
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ *
+ * Return fpga_bridge struct if successful.
+ * Return -EBUSY if someone already has a reference to the bridge.
+ * Return -ENODEV if @np is not a FPGA Bridge.
+ */
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
+ struct fpga_image_info *info)
+
+{
+ struct device *dev;
+ struct fpga_bridge *bridge;
+ int ret = -ENODEV;
+
+ dev = class_find_device(fpga_bridge_class, NULL, np,
+ fpga_bridge_of_node_match);
+ if (!dev)
+ goto err_dev;
+
+ bridge = to_fpga_bridge(dev);
+ if (!bridge)
+ goto err_dev;
+
+ bridge->info = info;
+
+ if (!mutex_trylock(&bridge->mutex)) {
+ ret = -EBUSY;
+ goto err_dev;
+ }
+
+ if (!try_module_get(dev->parent->driver->owner))
+ goto err_ll_mod;
+
+ dev_dbg(&bridge->dev, "get\n");
+
+ return bridge;
+
+err_ll_mod:
+ mutex_unlock(&bridge->mutex);
+err_dev:
+ put_device(dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
+
+/**
+ * fpga_bridge_put - release a reference to a bridge
+ *
+ * @bridge: FPGA bridge
+ */
+void fpga_bridge_put(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "put\n");
+
+ bridge->info = NULL;
+ module_put(bridge->dev.parent->driver->owner);
+ mutex_unlock(&bridge->mutex);
+ put_device(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_put);
+
+/**
+ * fpga_bridges_enable - enable bridges in a list
+ * @bridge_list: list of FPGA bridges
+ *
+ * Enable each bridge in the list. If list is empty, do nothing.
+ *
+ * Return 0 for success or empty bridge list; return error code otherwise.
+ */
+int fpga_bridges_enable(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ struct list_head *node;
+ int ret;
+
+ list_for_each(node, bridge_list) {
+ bridge = list_entry(node, struct fpga_bridge, node);
+ ret = fpga_bridge_enable(bridge);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_enable);
+
+/**
+ * fpga_bridges_disable - disable bridges in a list
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * Disable each bridge in the list. If list is empty, do nothing.
+ *
+ * Return 0 for success or empty bridge list; return error code otherwise.
+ */
+int fpga_bridges_disable(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ struct list_head *node;
+ int ret;
+
+ list_for_each(node, bridge_list) {
+ bridge = list_entry(node, struct fpga_bridge, node);
+ ret = fpga_bridge_disable(bridge);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_disable);
+
+/**
+ * fpga_bridges_put - put bridges
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * For each bridge in the list, put the bridge and remove it from the list.
+ * If list is empty, do nothing.
+ */
+void fpga_bridges_put(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ struct list_head *node, *next;
+ unsigned long flags;
+
+ list_for_each_safe(node, next, bridge_list) {
+ bridge = list_entry(node, struct fpga_bridge, node);
+
+ fpga_bridge_put(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_del(&bridge->node);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_put);
+
+/**
+ * fpga_bridges_get_to_list - get a bridge, add it to a list
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ * @bridge_list: list of FPGA bridges
+ *
+ * Get an exclusive reference to the bridge and and it to the list.
+ *
+ * Return 0 for success, error code from of_fpga_bridge_get() othewise.
+ */
+int fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ unsigned long flags;
+
+ bridge = of_fpga_bridge_get(np, info);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_add(&bridge->node, bridge_list);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_get_to_list);
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ return sprintf(buf, "%s\n", bridge->name);
+}
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+ int enable = 1;
+
+ if (bridge->br_ops && bridge->br_ops->enable_show)
+ enable = bridge->br_ops->enable_show(bridge);
+
+ return sprintf(buf, "%s\n", enable ? "enabled" : "disabled");
+}
+
+static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *fpga_bridge_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_state.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fpga_bridge);
+
+/**
+ * fpga_bridge_register - register a fpga bridge driver
+ * @dev: FPGA bridge device from pdev
+ * @name: FPGA bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: FPGA bridge private data
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_register(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv)
+{
+ struct fpga_bridge *bridge;
+ int id, ret = 0;
+
+ if (!name || !strlen(name)) {
+ dev_err(dev, "Attempt to register with no name!\n");
+ return -EINVAL;
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto error_kfree;
+ }
+
+ mutex_init(&bridge->mutex);
+ INIT_LIST_HEAD(&bridge->node);
+
+ bridge->name = name;
+ bridge->br_ops = br_ops;
+ bridge->priv = priv;
+
+ device_initialize(&bridge->dev);
+ bridge->dev.class = fpga_bridge_class;
+ bridge->dev.parent = dev;
+ bridge->dev.of_node = dev->of_node;
+ bridge->dev.id = id;
+ dev_set_drvdata(dev, bridge);
+
+ ret = dev_set_name(&bridge->dev, "br%d", id);
+ if (ret)
+ goto error_device;
+
+ ret = device_add(&bridge->dev);
+ if (ret)
+ goto error_device;
+
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ dev_info(bridge->dev.parent, "fpga bridge [%s] registered\n",
+ bridge->name);
+
+ return 0;
+
+error_device:
+ ida_simple_remove(&fpga_bridge_ida, id);
+error_kfree:
+ kfree(bridge);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_register);
+
+/**
+ * fpga_bridge_unregister - unregister a fpga bridge driver
+ * @dev: FPGA bridge device from pdev
+ */
+void fpga_bridge_unregister(struct device *dev)
+{
+ struct fpga_bridge *bridge = dev_get_drvdata(dev);
+
+ /*
+ * If the low level driver provides a method for putting bridge into
+ * a desired state upon unregister, do it.
+ */
+ if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove)
+ bridge->br_ops->fpga_bridge_remove(bridge);
+
+ device_unregister(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
+
+static void fpga_bridge_dev_release(struct device *dev)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ ida_simple_remove(&fpga_bridge_ida, bridge->dev.id);
+ kfree(bridge);
+}
+
+static int __init fpga_bridge_dev_init(void)
+{
+ spin_lock_init(&bridge_list_lock);
+
+ fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge");
+ if (IS_ERR(fpga_bridge_class))
+ return PTR_ERR(fpga_bridge_class);
+
+ fpga_bridge_class->dev_groups = fpga_bridge_groups;
+ fpga_bridge_class->dev_release = fpga_bridge_dev_release;
+
+ return 0;
+}
+
+static void __exit fpga_bridge_dev_exit(void)
+{
+ class_destroy(fpga_bridge_class);
+ ida_destroy(&fpga_bridge_ida);
+}
+
+MODULE_DESCRIPTION("FPGA Bridge Driver");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
+
+subsys_initcall(fpga_bridge_dev_init);
+module_exit(fpga_bridge_dev_exit);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 953dc9195937..f0a69d3e60a5 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -32,19 +32,20 @@ static struct class *fpga_mgr_class;
/**
* fpga_mgr_buf_load - load fpga from image in buffer
* @mgr: fpga manager
- * @flags: flags setting fpga confuration modes
+ * @info: fpga image specific information
* @buf: buffer contain fpga image
* @count: byte count of buf
*
* Step the low level fpga manager through the device-specific steps of getting
* an FPGA ready to be configured, writing the image to it, then doing whatever
* post-configuration steps necessary. This code assumes the caller got the
- * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
+ * mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is
+ * not an error code.
*
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
- size_t count)
+int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
{
struct device *dev = &mgr->dev;
int ret;
@@ -52,10 +53,12 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
/*
* Call the low level driver's write_init function. This will do the
* device-specific things to get the FPGA into the state where it is
- * ready to receive an FPGA image.
+ * ready to receive an FPGA image. The low level driver only gets to
+ * see the first initial_header_size bytes in the buffer.
*/
mgr->state = FPGA_MGR_STATE_WRITE_INIT;
- ret = mgr->mops->write_init(mgr, flags, buf, count);
+ ret = mgr->mops->write_init(mgr, info, buf,
+ min(mgr->mops->initial_header_size, count));
if (ret) {
dev_err(dev, "Error preparing FPGA for writing\n");
mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
@@ -78,7 +81,7 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
* steps to finish and set the FPGA into operating mode.
*/
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
- ret = mgr->mops->write_complete(mgr, flags);
+ ret = mgr->mops->write_complete(mgr, info);
if (ret) {
dev_err(dev, "Error after writing image data to FPGA\n");
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
@@ -93,17 +96,19 @@ EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
/**
* fpga_mgr_firmware_load - request firmware and load to fpga
* @mgr: fpga manager
- * @flags: flags setting fpga confuration modes
+ * @info: fpga image specific information
* @image_name: name of image file on the firmware search path
*
* Request an FPGA image using the firmware class, then write out to the FPGA.
* Update the state before each step to provide info on what step failed if
* there is a failure. This code assumes the caller got the mgr pointer
- * from of_fpga_mgr_get() and checked that it is not an error code.
+ * from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is not an error
+ * code.
*
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *image_name)
{
struct device *dev = &mgr->dev;
@@ -121,7 +126,7 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
return ret;
}
- ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
+ ret = fpga_mgr_buf_load(mgr, info, fw->data, fw->size);
release_firmware(fw);
@@ -181,30 +186,11 @@ static struct attribute *fpga_mgr_attrs[] = {
};
ATTRIBUTE_GROUPS(fpga_mgr);
-static int fpga_mgr_of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-/**
- * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
- * @node: device node
- *
- * Given a device node, get an exclusive reference to a fpga mgr.
- *
- * Return: fpga manager struct or IS_ERR() condition containing error code.
- */
-struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+struct fpga_manager *__fpga_mgr_get(struct device *dev)
{
struct fpga_manager *mgr;
- struct device *dev;
int ret = -ENODEV;
- dev = class_find_device(fpga_mgr_class, NULL, node,
- fpga_mgr_of_node_match);
- if (!dev)
- return ERR_PTR(-ENODEV);
-
mgr = to_fpga_manager(dev);
if (!mgr)
goto err_dev;
@@ -226,6 +212,55 @@ err_dev:
put_device(dev);
return ERR_PTR(ret);
}
+
+static int fpga_mgr_dev_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+/**
+ * fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * @dev: parent device that fpga mgr was registered with
+ *
+ * Given a device, get an exclusive reference to a fpga mgr.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *fpga_mgr_get(struct device *dev)
+{
+ struct device *mgr_dev = class_find_device(fpga_mgr_class, NULL, dev,
+ fpga_mgr_dev_match);
+ if (!mgr_dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_mgr_get(mgr_dev);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_get);
+
+static int fpga_mgr_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * @node: device node
+ *
+ * Given a device node, get an exclusive reference to a fpga mgr.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = class_find_device(fpga_mgr_class, NULL, node,
+ fpga_mgr_of_node_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_mgr_get(dev);
+}
EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
/**
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
new file mode 100644
index 000000000000..3222fdbad75a
--- /dev/null
+++ b/drivers/fpga/fpga-region.c
@@ -0,0 +1,603 @@
+/*
+ * FPGA Region - Device Tree support for FPGA programming under Linux
+ *
+ * Copyright (C) 2013-2016 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct fpga_region - FPGA Region structure
+ * @dev: FPGA Region device
+ * @mutex: enforces exclusive reference to region
+ * @bridge_list: list of FPGA bridges specified in region
+ * @info: fpga image specific information
+ */
+struct fpga_region {
+ struct device dev;
+ struct mutex mutex; /* for exclusive reference to region */
+ struct list_head bridge_list;
+ struct fpga_image_info *info;
+};
+
+#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
+
+static DEFINE_IDA(fpga_region_ida);
+static struct class *fpga_region_class;
+
+static const struct of_device_id fpga_region_of_match[] = {
+ { .compatible = "fpga-region", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fpga_region_of_match);
+
+static int fpga_region_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * fpga_region_find - find FPGA region
+ * @np: device node of FPGA Region
+ * Caller will need to put_device(&region->dev) when done.
+ * Returns FPGA Region struct or NULL
+ */
+static struct fpga_region *fpga_region_find(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(fpga_region_class, NULL, np,
+ fpga_region_of_node_match);
+ if (!dev)
+ return NULL;
+
+ return to_fpga_region(dev);
+}
+
+/**
+ * fpga_region_get - get an exclusive reference to a fpga region
+ * @region: FPGA Region struct
+ *
+ * Caller should call fpga_region_put() when done with region.
+ *
+ * Return fpga_region struct if successful.
+ * Return -EBUSY if someone already has a reference to the region.
+ * Return -ENODEV if @np is not a FPGA Region.
+ */
+static struct fpga_region *fpga_region_get(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+
+ if (!mutex_trylock(&region->mutex)) {
+ dev_dbg(dev, "%s: FPGA Region already in use\n", __func__);
+ return ERR_PTR(-EBUSY);
+ }
+
+ get_device(dev);
+ of_node_get(dev->of_node);
+ if (!try_module_get(dev->parent->driver->owner)) {
+ of_node_put(dev->of_node);
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ dev_dbg(&region->dev, "get\n");
+
+ return region;
+}
+
+/**
+ * fpga_region_put - release a reference to a region
+ *
+ * @region: FPGA region
+ */
+static void fpga_region_put(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+
+ dev_dbg(&region->dev, "put\n");
+
+ module_put(dev->parent->driver->owner);
+ of_node_put(dev->of_node);
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+}
+
+/**
+ * fpga_region_get_manager - get exclusive reference for FPGA manager
+ * @region: FPGA region
+ *
+ * Get FPGA Manager from "fpga-mgr" property or from ancestor region.
+ *
+ * Caller should call fpga_mgr_put() when done with manager.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *mgr_node;
+ struct fpga_manager *mgr;
+
+ of_node_get(np);
+ while (np) {
+ if (of_device_is_compatible(np, "fpga-region")) {
+ mgr_node = of_parse_phandle(np, "fpga-mgr", 0);
+ if (mgr_node) {
+ mgr = of_fpga_mgr_get(mgr_node);
+ of_node_put(np);
+ return mgr;
+ }
+ }
+ np = of_get_next_parent(np);
+ }
+ of_node_put(np);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * fpga_region_get_bridges - create a list of bridges
+ * @region: FPGA region
+ * @overlay: device node of the overlay
+ *
+ * Create a list of bridges including the parent bridge and the bridges
+ * specified by "fpga-bridges" property. Note that the
+ * fpga_bridges_enable/disable/put functions are all fine with an empty list
+ * if that happens.
+ *
+ * Caller should call fpga_bridges_put(&region->bridge_list) when
+ * done with the bridges.
+ *
+ * Return 0 for success (even if there are no bridges specified)
+ * or -EBUSY if any of the bridges are in use.
+ */
+static int fpga_region_get_bridges(struct fpga_region *region,
+ struct device_node *overlay)
+{
+ struct device *dev = &region->dev;
+ struct device_node *region_np = dev->of_node;
+ struct device_node *br, *np, *parent_br = NULL;
+ int i, ret;
+
+ /* If parent is a bridge, add to list */
+ ret = fpga_bridge_get_to_list(region_np->parent, region->info,
+ &region->bridge_list);
+ if (ret == -EBUSY)
+ return ret;
+
+ if (!ret)
+ parent_br = region_np->parent;
+
+ /* If overlay has a list of bridges, use it. */
+ if (of_parse_phandle(overlay, "fpga-bridges", 0))
+ np = overlay;
+ else
+ np = region_np;
+
+ for (i = 0; ; i++) {
+ br = of_parse_phandle(np, "fpga-bridges", i);
+ if (!br)
+ break;
+
+ /* If parent bridge is in list, skip it. */
+ if (br == parent_br)
+ continue;
+
+ /* If node is a bridge, get it and add to list */
+ ret = fpga_bridge_get_to_list(br, region->info,
+ &region->bridge_list);
+
+ /* If any of the bridges are in use, give up */
+ if (ret == -EBUSY) {
+ fpga_bridges_put(&region->bridge_list);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * fpga_region_program_fpga - program FPGA
+ * @region: FPGA region
+ * @firmware_name: name of FPGA image firmware file
+ * @overlay: device node of the overlay
+ * Program an FPGA using information in the device tree.
+ * Function assumes that there is a firmware-name property.
+ * Return 0 for success or negative error code.
+ */
+static int fpga_region_program_fpga(struct fpga_region *region,
+ const char *firmware_name,
+ struct device_node *overlay)
+{
+ struct fpga_manager *mgr;
+ int ret;
+
+ region = fpga_region_get(region);
+ if (IS_ERR(region)) {
+ pr_err("failed to get fpga region\n");
+ return PTR_ERR(region);
+ }
+
+ mgr = fpga_region_get_manager(region);
+ if (IS_ERR(mgr)) {
+ pr_err("failed to get fpga region manager\n");
+ return PTR_ERR(mgr);
+ }
+
+ ret = fpga_region_get_bridges(region, overlay);
+ if (ret) {
+ pr_err("failed to get fpga region bridges\n");
+ goto err_put_mgr;
+ }
+
+ ret = fpga_bridges_disable(&region->bridge_list);
+ if (ret) {
+ pr_err("failed to disable region bridges\n");
+ goto err_put_br;
+ }
+
+ ret = fpga_mgr_firmware_load(mgr, region->info, firmware_name);
+ if (ret) {
+ pr_err("failed to load fpga image\n");
+ goto err_put_br;
+ }
+
+ ret = fpga_bridges_enable(&region->bridge_list);
+ if (ret) {
+ pr_err("failed to enable region bridges\n");
+ goto err_put_br;
+ }
+
+ fpga_mgr_put(mgr);
+ fpga_region_put(region);
+
+ return 0;
+
+err_put_br:
+ fpga_bridges_put(&region->bridge_list);
+err_put_mgr:
+ fpga_mgr_put(mgr);
+ fpga_region_put(region);
+
+ return ret;
+}
+
+/**
+ * child_regions_with_firmware
+ * @overlay: device node of the overlay
+ *
+ * If the overlay adds child FPGA regions, they are not allowed to have
+ * firmware-name property.
+ *
+ * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ */
+static int child_regions_with_firmware(struct device_node *overlay)
+{
+ struct device_node *child_region;
+ const char *child_firmware_name;
+ int ret = 0;
+
+ of_node_get(overlay);
+
+ child_region = of_find_matching_node(overlay, fpga_region_of_match);
+ while (child_region) {
+ if (!of_property_read_string(child_region, "firmware-name",
+ &child_firmware_name)) {
+ ret = -EINVAL;
+ break;
+ }
+ child_region = of_find_matching_node(child_region,
+ fpga_region_of_match);
+ }
+
+ of_node_put(child_region);
+
+ if (ret)
+ pr_err("firmware-name not allowed in child FPGA region: %s",
+ child_region->full_name);
+
+ return ret;
+}
+
+/**
+ * fpga_region_notify_pre_apply - pre-apply overlay notification
+ *
+ * @region: FPGA region that the overlay was applied to
+ * @nd: overlay notification data
+ *
+ * Called after when an overlay targeted to a FPGA Region is about to be
+ * applied. Function will check the properties that will be added to the FPGA
+ * region. If the checks pass, it will program the FPGA.
+ *
+ * The checks are:
+ * The overlay must add either firmware-name or external-fpga-config property
+ * to the FPGA Region.
+ *
+ * firmware-name : program the FPGA
+ * external-fpga-config : FPGA is already programmed
+ *
+ * The overlay can add other FPGA regions, but child FPGA regions cannot have a
+ * firmware-name property since those regions don't exist yet.
+ *
+ * If the overlay that breaks the rules, notifier returns an error and the
+ * overlay is rejected before it goes into the main tree.
+ *
+ * Returns 0 for success or negative error code for failure.
+ */
+static int fpga_region_notify_pre_apply(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ const char *firmware_name = NULL;
+ struct fpga_image_info *info;
+ int ret;
+
+ info = devm_kzalloc(&region->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ region->info = info;
+
+ /* Reject overlay if child FPGA Regions have firmware-name property */
+ ret = child_regions_with_firmware(nd->overlay);
+ if (ret)
+ return ret;
+
+ /* Read FPGA region properties from the overlay */
+ if (of_property_read_bool(nd->overlay, "partial-fpga-config"))
+ info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+ if (of_property_read_bool(nd->overlay, "external-fpga-config"))
+ info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
+
+ of_property_read_string(nd->overlay, "firmware-name", &firmware_name);
+
+ of_property_read_u32(nd->overlay, "region-unfreeze-timeout-us",
+ &info->enable_timeout_us);
+
+ of_property_read_u32(nd->overlay, "region-freeze-timeout-us",
+ &info->disable_timeout_us);
+
+ /* If FPGA was externally programmed, don't specify firmware */
+ if ((info->flags & FPGA_MGR_EXTERNAL_CONFIG) && firmware_name) {
+ pr_err("error: specified firmware and external-fpga-config");
+ return -EINVAL;
+ }
+
+ /* FPGA is already configured externally. We're done. */
+ if (info->flags & FPGA_MGR_EXTERNAL_CONFIG)
+ return 0;
+
+ /* If we got this far, we should be programming the FPGA */
+ if (!firmware_name) {
+ pr_err("should specify firmware-name or external-fpga-config\n");
+ return -EINVAL;
+ }
+
+ return fpga_region_program_fpga(region, firmware_name, nd->overlay);
+}
+
+/**
+ * fpga_region_notify_post_remove - post-remove overlay notification
+ *
+ * @region: FPGA region that was targeted by the overlay that was removed
+ * @nd: overlay notification data
+ *
+ * Called after an overlay has been removed if the overlay's target was a
+ * FPGA region.
+ */
+static void fpga_region_notify_post_remove(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ fpga_bridges_disable(&region->bridge_list);
+ fpga_bridges_put(&region->bridge_list);
+ devm_kfree(&region->dev, region->info);
+ region->info = NULL;
+}
+
+/**
+ * of_fpga_region_notify - reconfig notifier for dynamic DT changes
+ * @nb: notifier block
+ * @action: notifier action
+ * @arg: reconfig data
+ *
+ * This notifier handles programming a FPGA when a "firmware-name" property is
+ * added to a fpga-region.
+ *
+ * Returns NOTIFY_OK or error if FPGA programming fails.
+ */
+static int of_fpga_region_notify(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct of_overlay_notify_data *nd = arg;
+ struct fpga_region *region;
+ int ret;
+
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__);
+ break;
+ case OF_OVERLAY_POST_APPLY:
+ pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_PRE_REMOVE:
+ pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_POST_REMOVE:
+ pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__);
+ break;
+ default: /* should not happen */
+ return NOTIFY_OK;
+ }
+
+ region = fpga_region_find(nd->target);
+ if (!region)
+ return NOTIFY_OK;
+
+ ret = 0;
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ ret = fpga_region_notify_pre_apply(region, nd);
+ break;
+
+ case OF_OVERLAY_POST_REMOVE:
+ fpga_region_notify_post_remove(region, nd);
+ break;
+ }
+
+ put_device(&region->dev);
+
+ if (ret)
+ return notifier_from_errno(ret);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpga_region_of_nb = {
+ .notifier_call = of_fpga_region_notify,
+};
+
+static int fpga_region_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct fpga_region *region;
+ int id, ret = 0;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto err_kfree;
+ }
+
+ mutex_init(&region->mutex);
+ INIT_LIST_HEAD(&region->bridge_list);
+
+ device_initialize(&region->dev);
+ region->dev.class = fpga_region_class;
+ region->dev.parent = dev;
+ region->dev.of_node = np;
+ region->dev.id = id;
+ dev_set_drvdata(dev, region);
+
+ ret = dev_set_name(&region->dev, "region%d", id);
+ if (ret)
+ goto err_remove;
+
+ ret = device_add(&region->dev);
+ if (ret)
+ goto err_remove;
+
+ of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
+
+ dev_info(dev, "FPGA Region probed\n");
+
+ return 0;
+
+err_remove:
+ ida_simple_remove(&fpga_region_ida, id);
+err_kfree:
+ kfree(region);
+
+ return ret;
+}
+
+static int fpga_region_remove(struct platform_device *pdev)
+{
+ struct fpga_region *region = platform_get_drvdata(pdev);
+
+ device_unregister(&region->dev);
+
+ return 0;
+}
+
+static struct platform_driver fpga_region_driver = {
+ .probe = fpga_region_probe,
+ .remove = fpga_region_remove,
+ .driver = {
+ .name = "fpga-region",
+ .of_match_table = of_match_ptr(fpga_region_of_match),
+ },
+};
+
+static void fpga_region_dev_release(struct device *dev)
+{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ ida_simple_remove(&fpga_region_ida, region->dev.id);
+ kfree(region);
+}
+
+/**
+ * fpga_region_init - init function for fpga_region class
+ * Creates the fpga_region class and registers a reconfig notifier.
+ */
+static int __init fpga_region_init(void)
+{
+ int ret;
+
+ fpga_region_class = class_create(THIS_MODULE, "fpga_region");
+ if (IS_ERR(fpga_region_class))
+ return PTR_ERR(fpga_region_class);
+
+ fpga_region_class->dev_release = fpga_region_dev_release;
+
+ ret = of_overlay_notifier_register(&fpga_region_of_nb);
+ if (ret)
+ goto err_class;
+
+ ret = platform_driver_register(&fpga_region_driver);
+ if (ret)
+ goto err_plat;
+
+ return 0;
+
+err_plat:
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+err_class:
+ class_destroy(fpga_region_class);
+ ida_destroy(&fpga_region_ida);
+ return ret;
+}
+
+static void __exit fpga_region_exit(void)
+{
+ platform_driver_unregister(&fpga_region_driver);
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+ class_destroy(fpga_region_class);
+ ida_destroy(&fpga_region_ida);
+}
+
+subsys_initcall(fpga_region_init);
+module_exit(fpga_region_exit);
+
+MODULE_DESCRIPTION("FPGA Region");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
new file mode 100644
index 000000000000..f8770af0f6b5
--- /dev/null
+++ b/drivers/fpga/socfpga-a10.c
@@ -0,0 +1,557 @@
+/*
+ * FPGA Manager Driver for Altera Arria10 SoCFPGA
+ *
+ * Copyright (C) 2015-2016 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+#define A10_FPGAMGR_DCLKCNT_OFST 0x08
+#define A10_FPGAMGR_DCLKSTAT_OFST 0x0c
+#define A10_FPGAMGR_IMGCFG_CTL_00_OFST 0x70
+#define A10_FPGAMGR_IMGCFG_CTL_01_OFST 0x74
+#define A10_FPGAMGR_IMGCFG_CTL_02_OFST 0x78
+#define A10_FPGAMGR_IMGCFG_STAT_OFST 0x80
+
+#define A10_FPGAMGR_DCLKSTAT_DCLKDONE BIT(0)
+
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS BIT(1)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE BIT(2)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG BIT(8)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NSTATUS_OE BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_CONDONE_OE BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK (BIT(16) | BIT(17))
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT 16
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH BIT(24)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT 24
+
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR BIT(0)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_EARLY_USERMODE BIT(1)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE BIT(2)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN BIT(4)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN BIT(6)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY BIT(9)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE BIT(10)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR BIT(11)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN BIT(12)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK (BIT(16) | BIT(17) | BIT(18))
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT 16
+
+/* FPGA CD Ratio Value */
+#define CDRATIO_x1 0x0
+#define CDRATIO_x2 0x1
+#define CDRATIO_x4 0x2
+#define CDRATIO_x8 0x3
+
+/* Configuration width 16/32 bit */
+#define CFGWDTH_32 1
+#define CFGWDTH_16 0
+
+/*
+ * struct a10_fpga_priv - private data for fpga manager
+ * @regmap: regmap for register access
+ * @fpga_data_addr: iomap for single address data register to FPGA
+ * @clk: clock
+ */
+struct a10_fpga_priv {
+ struct regmap *regmap;
+ void __iomem *fpga_data_addr;
+ struct clk *clk;
+};
+
+static bool socfpga_a10_fpga_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case A10_FPGAMGR_DCLKCNT_OFST:
+ case A10_FPGAMGR_DCLKSTAT_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+ return true;
+ }
+ return false;
+}
+
+static bool socfpga_a10_fpga_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case A10_FPGAMGR_DCLKCNT_OFST:
+ case A10_FPGAMGR_DCLKSTAT_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+ case A10_FPGAMGR_IMGCFG_STAT_OFST:
+ return true;
+ }
+ return false;
+}
+
+static const struct regmap_config socfpga_a10_fpga_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .writeable_reg = socfpga_a10_fpga_writeable_reg,
+ .readable_reg = socfpga_a10_fpga_readable_reg,
+ .max_register = A10_FPGAMGR_IMGCFG_STAT_OFST,
+ .cache_type = REGCACHE_NONE,
+};
+
+/*
+ * from the register map description of cdratio in imgcfg_ctrl_02:
+ * Normal Configuration : 32bit Passive Parallel
+ * Partial Reconfiguration : 16bit Passive Parallel
+ */
+static void socfpga_a10_fpga_set_cfg_width(struct a10_fpga_priv *priv,
+ int width)
+{
+ width <<= A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT;
+
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH, width);
+}
+
+static void socfpga_a10_fpga_generate_dclks(struct a10_fpga_priv *priv,
+ u32 count)
+{
+ u32 val;
+
+ /* Clear any existing DONE status. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+ A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+
+ /* Issue the DCLK regmap. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKCNT_OFST, count);
+
+ /* wait till the dclkcnt done */
+ regmap_read_poll_timeout(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST, val,
+ val, 1, 100);
+
+ /* Clear DONE status. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+ A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+}
+
+#define RBF_ENCRYPTION_MODE_OFFSET 69
+#define RBF_DECOMPRESS_OFFSET 229
+
+static int socfpga_a10_fpga_encrypted(u32 *buf32, size_t buf32_size)
+{
+ if (buf32_size < RBF_ENCRYPTION_MODE_OFFSET + 1)
+ return -EINVAL;
+
+ /* Is the bitstream encrypted? */
+ return ((buf32[RBF_ENCRYPTION_MODE_OFFSET] >> 2) & 3) != 0;
+}
+
+static int socfpga_a10_fpga_compressed(u32 *buf32, size_t buf32_size)
+{
+ if (buf32_size < RBF_DECOMPRESS_OFFSET + 1)
+ return -EINVAL;
+
+ /* Is the bitstream compressed? */
+ return !((buf32[RBF_DECOMPRESS_OFFSET] >> 1) & 1);
+}
+
+static unsigned int socfpga_a10_fpga_get_cd_ratio(unsigned int cfg_width,
+ bool encrypt, bool compress)
+{
+ unsigned int cd_ratio;
+
+ /*
+ * cd ratio is dependent on cfg width and whether the bitstream
+ * is encrypted and/or compressed.
+ *
+ * | width | encr. | compr. | cd ratio |
+ * | 16 | 0 | 0 | 1 |
+ * | 16 | 0 | 1 | 4 |
+ * | 16 | 1 | 0 | 2 |
+ * | 16 | 1 | 1 | 4 |
+ * | 32 | 0 | 0 | 1 |
+ * | 32 | 0 | 1 | 8 |
+ * | 32 | 1 | 0 | 4 |
+ * | 32 | 1 | 1 | 8 |
+ */
+ if (!compress && !encrypt)
+ return CDRATIO_x1;
+
+ if (compress)
+ cd_ratio = CDRATIO_x4;
+ else
+ cd_ratio = CDRATIO_x2;
+
+ /* If 32 bit, double the cd ratio by incrementing the field */
+ if (cfg_width == CFGWDTH_32)
+ cd_ratio += 1;
+
+ return cd_ratio;
+}
+
+static int socfpga_a10_fpga_set_cdratio(struct fpga_manager *mgr,
+ unsigned int cfg_width,
+ const char *buf, size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ unsigned int cd_ratio;
+ int encrypt, compress;
+
+ encrypt = socfpga_a10_fpga_encrypted((u32 *)buf, count / 4);
+ if (encrypt < 0)
+ return -EINVAL;
+
+ compress = socfpga_a10_fpga_compressed((u32 *)buf, count / 4);
+ if (compress < 0)
+ return -EINVAL;
+
+ cd_ratio = socfpga_a10_fpga_get_cd_ratio(cfg_width, encrypt, compress);
+
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK,
+ cd_ratio << A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT);
+
+ return 0;
+}
+
+static u32 socfpga_a10_fpga_read_stat(struct a10_fpga_priv *priv)
+{
+ u32 val;
+
+ regmap_read(priv->regmap, A10_FPGAMGR_IMGCFG_STAT_OFST, &val);
+
+ return val;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_ready(struct a10_fpga_priv *priv)
+{
+ u32 reg, i;
+
+ for (i = 0; i < 10 ; i++) {
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+ return -EINVAL;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_done(struct a10_fpga_priv *priv)
+{
+ u32 reg, i;
+
+ for (i = 0; i < 10 ; i++) {
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+ return -EINVAL;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/* Start the FPGA programming by initialize the FPGA Manager */
+static int socfpga_a10_fpga_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ unsigned int cfg_width;
+ u32 msel, stat, mask;
+ int ret;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG)
+ cfg_width = CFGWDTH_16;
+ else
+ return -EINVAL;
+
+ /* Check for passive parallel (msel == 000 or 001) */
+ msel = socfpga_a10_fpga_read_stat(priv);
+ msel &= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK;
+ msel >>= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT;
+ if ((msel != 0) && (msel != 1)) {
+ dev_dbg(&mgr->dev, "Fail: invalid msel=%d\n", msel);
+ return -EINVAL;
+ }
+
+ /* Make sure no external devices are interfering */
+ stat = socfpga_a10_fpga_read_stat(priv);
+ mask = A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN |
+ A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN;
+ if ((stat & mask) != mask)
+ return -EINVAL;
+
+ /* Set cfg width */
+ socfpga_a10_fpga_set_cfg_width(priv, cfg_width);
+
+ /* Determine cd ratio from bitstream header and set cd ratio */
+ ret = socfpga_a10_fpga_set_cdratio(mgr, cfg_width, buf, count);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear s2f_nce to enable chip select. Leave pr_request
+ * unasserted and override disabled.
+ */
+ regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+ /* Set cfg_ctrl to enable s2f dclk and data */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL);
+
+ /*
+ * Disable overrides not needed for pr.
+ * s2f_config==1 leaves reset deasseted.
+ */
+ regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_00_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG);
+
+ /* Enable override for data, dclk, nce, and pr_request to CSS */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG, 0);
+
+ /* Send some clocks to clear out any errors */
+ socfpga_a10_fpga_generate_dclks(priv, 256);
+
+ /* Assert pr_request */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST);
+
+ /* Provide 2048 DCLKs before starting the config data streaming. */
+ socfpga_a10_fpga_generate_dclks(priv, 0x7ff);
+
+ /* Wait for pr_ready */
+ return socfpga_a10_fpga_wait_for_pr_ready(priv);
+}
+
+/*
+ * write data to the FPGA data register
+ */
+static int socfpga_a10_fpga_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 *buffer_32 = (u32 *)buf;
+ size_t i = 0;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ /* Write out the complete 32-bit chunks */
+ while (count >= sizeof(u32)) {
+ writel(buffer_32[i++], priv->fpga_data_addr);
+ count -= sizeof(u32);
+ }
+
+ /* Write out remaining non 32-bit chunks */
+ switch (count) {
+ case 3:
+ writel(buffer_32[i++] & 0x00ffffff, priv->fpga_data_addr);
+ break;
+ case 2:
+ writel(buffer_32[i++] & 0x0000ffff, priv->fpga_data_addr);
+ break;
+ case 1:
+ writel(buffer_32[i++] & 0x000000ff, priv->fpga_data_addr);
+ break;
+ case 0:
+ break;
+ default:
+ /* This will never happen */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int socfpga_a10_fpga_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 reg;
+ int ret;
+
+ /* Wait for pr_done */
+ ret = socfpga_a10_fpga_wait_for_pr_done(priv);
+
+ /* Clear pr_request */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST, 0);
+
+ /* Send some clocks to clear out any errors */
+ socfpga_a10_fpga_generate_dclks(priv, 256);
+
+ /* Disable s2f dclk and data */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL, 0);
+
+ /* Deassert chip select */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE);
+
+ /* Disable data, dclk, nce, and pr_request override to CSS */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+ /* Return any errors regarding pr_done or pr_error */
+ if (ret)
+ return ret;
+
+ /* Final check */
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE) == 0) ||
+ ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN) == 0) ||
+ ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)) {
+ dev_dbg(&mgr->dev,
+ "Timeout in final check. Status=%08xf\n", reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static enum fpga_mgr_states socfpga_a10_fpga_state(struct fpga_manager *mgr)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE)
+ return FPGA_MGR_STATE_OPERATING;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+ return FPGA_MGR_STATE_WRITE;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR)
+ return FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
+
+ if ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static const struct fpga_manager_ops socfpga_a10_fpga_mgr_ops = {
+ .initial_header_size = (RBF_DECOMPRESS_OFFSET + 1) * 4,
+ .state = socfpga_a10_fpga_state,
+ .write_init = socfpga_a10_fpga_write_init,
+ .write = socfpga_a10_fpga_write,
+ .write_complete = socfpga_a10_fpga_write_complete,
+};
+
+static int socfpga_a10_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct a10_fpga_priv *priv;
+ void __iomem *reg_base;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* First mmio base is for register access */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ /* Second mmio base is for writing FPGA image data */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->fpga_data_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->fpga_data_addr))
+ return PTR_ERR(priv->fpga_data_addr);
+
+ /* regmap for register access */
+ priv->regmap = devm_regmap_init_mmio(dev, reg_base,
+ &socfpga_a10_fpga_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return -ENODEV;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "no clock specified\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "could not enable clock\n");
+ return -EBUSY;
+ }
+
+ return fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
+ &socfpga_a10_fpga_mgr_ops, priv);
+}
+
+static int socfpga_a10_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+ struct a10_fpga_priv *priv = mgr->priv;
+
+ fpga_mgr_unregister(&pdev->dev);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id socfpga_a10_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-a10-fpga-mgr", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match);
+
+static struct platform_driver socfpga_a10_fpga_driver = {
+ .probe = socfpga_a10_fpga_probe,
+ .remove = socfpga_a10_fpga_remove,
+ .driver = {
+ .name = "socfpga_a10_fpga_manager",
+ .of_match_table = socfpga_a10_fpga_of_match,
+ },
+};
+
+module_platform_driver(socfpga_a10_fpga_driver);
+
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_DESCRIPTION("SoCFPGA Arria10 FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 27d2ff28132c..b6672e66cda6 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -407,13 +407,14 @@ static int socfpga_fpga_reset(struct fpga_manager *mgr)
/*
* Prepare the FPGA to receive the configuration data.
*/
-static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr, u32 flags,
+static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count)
{
struct socfpga_fpga_priv *priv = mgr->priv;
int ret;
- if (flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
return -EINVAL;
}
@@ -478,7 +479,7 @@ static int socfpga_fpga_ops_configure_write(struct fpga_manager *mgr,
}
static int socfpga_fpga_ops_configure_complete(struct fpga_manager *mgr,
- u32 flags)
+ struct fpga_image_info *info)
{
struct socfpga_fpga_priv *priv = mgr->priv;
u32 status;
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index c2fb4120bd62..1812bf7614e1 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -118,7 +118,6 @@
#define FPGA_RST_NONE_MASK 0x0
struct zynq_fpga_priv {
- struct device *dev;
int irq;
struct clk *clk;
@@ -175,7 +174,8 @@ static irqreturn_t zynq_fpga_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
+static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
const char *buf, size_t count)
{
struct zynq_fpga_priv *priv;
@@ -189,7 +189,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
return err;
/* don't globally reset PL if we're doing partial reconfig */
- if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
/* assert AXI interface resets */
regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
FPGA_RST_ALL_MASK);
@@ -217,7 +217,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
if (err) {
- dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
+ dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
goto out_err;
}
@@ -231,7 +231,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
if (err) {
- dev_err(priv->dev, "Timeout waiting for !PCFG_INIT");
+ dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n");
goto out_err;
}
@@ -245,7 +245,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
INIT_POLL_DELAY,
INIT_POLL_TIMEOUT);
if (err) {
- dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
+ dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
goto out_err;
}
}
@@ -262,7 +262,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
/* check that we have room in the command queue */
status = zynq_fpga_read(priv, STATUS_OFFSET);
if (status & STATUS_DMA_Q_F) {
- dev_err(priv->dev, "DMA command queue full");
+ dev_err(&mgr->dev, "DMA command queue full\n");
err = -EBUSY;
goto out_err;
}
@@ -295,7 +295,8 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
in_count = count;
priv = mgr->priv;
- kbuf = dma_alloc_coherent(priv->dev, count, &dma_addr, GFP_KERNEL);
+ kbuf =
+ dma_alloc_coherent(mgr->dev.parent, count, &dma_addr, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
@@ -331,19 +332,19 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
zynq_fpga_write(priv, INT_STS_OFFSET, intr_status);
if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
- dev_err(priv->dev, "Error configuring FPGA");
+ dev_err(&mgr->dev, "Error configuring FPGA\n");
err = -EFAULT;
}
clk_disable(priv->clk);
out_free:
- dma_free_coherent(priv->dev, in_count, kbuf, dma_addr);
-
+ dma_free_coherent(mgr->dev.parent, count, kbuf, dma_addr);
return err;
}
-static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags)
+static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
{
struct zynq_fpga_priv *priv = mgr->priv;
int err;
@@ -364,7 +365,7 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags)
return err;
/* for the partial reconfig case we didn't touch the level shifters */
- if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
/* enable level shifters from PL to PS */
regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
LVL_SHFTR_ENABLE_PL_TO_PS);
@@ -416,8 +417,6 @@ static int zynq_fpga_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->dev = dev;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->io_base))
@@ -426,7 +425,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
"syscon");
if (IS_ERR(priv->slcr)) {
- dev_err(dev, "unable to get zynq-slcr regmap");
+ dev_err(dev, "unable to get zynq-slcr regmap\n");
return PTR_ERR(priv->slcr);
}
@@ -434,38 +433,41 @@ static int zynq_fpga_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0) {
- dev_err(dev, "No IRQ available");
+ dev_err(dev, "No IRQ available\n");
return priv->irq;
}
- err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0,
- dev_name(dev), priv);
- if (err) {
- dev_err(dev, "unable to request IRQ");
- return err;
- }
-
priv->clk = devm_clk_get(dev, "ref_clk");
if (IS_ERR(priv->clk)) {
- dev_err(dev, "input clock not found");
+ dev_err(dev, "input clock not found\n");
return PTR_ERR(priv->clk);
}
err = clk_prepare_enable(priv->clk);
if (err) {
- dev_err(dev, "unable to enable clock");
+ dev_err(dev, "unable to enable clock\n");
return err;
}
/* unlock the device */
zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
+ zynq_fpga_write(priv, INT_MASK_OFFSET, 0xFFFFFFFF);
+ zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
+ err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
+ priv);
+ if (err) {
+ dev_err(dev, "unable to request IRQ\n");
+ clk_disable_unprepare(priv->clk);
+ return err;
+ }
+
clk_disable(priv->clk);
err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
&zynq_fpga_ops, priv);
if (err) {
- dev_err(dev, "unable to register FPGA manager");
+ dev_err(dev, "unable to register FPGA manager\n");
clk_unprepare(priv->clk);
return err;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d011cb89d25e..d5d36549ecc1 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -22,10 +22,6 @@ menuconfig GPIOLIB
if GPIOLIB
-config GPIO_DEVRES
- def_bool y
- depends on HAS_IOMEM
-
config OF_GPIO
def_bool y
depends on OF
@@ -171,7 +167,7 @@ config GPIO_DWAPB
config GPIO_EM
tristate "Emma Mobile GPIO"
- depends on ARM && OF_GPIO
+ depends on (ARCH_EMEV2 || COMPILE_TEST) && OF_GPIO
help
Say yes here to support GPIO on Renesas Emma Mobile SoCs.
@@ -455,7 +451,7 @@ config GPIO_VR41XX
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
- depends on PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select MFD_CORE
select MFD_VX855
help
@@ -524,6 +520,7 @@ config GPIO_ZYNQ
config GPIO_ZX
bool "ZTE ZX GPIO support"
+ depends on ARCH_ZX || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support the GPIO device on ZTE ZX SoCs.
@@ -607,7 +604,7 @@ config GPIO_IT87
config GPIO_SCH
tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
- depends on PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select MFD_CORE
select LPC_SCH
help
@@ -781,16 +778,13 @@ config GPIO_PCF857X
platform-neutral GPIO calls.
config GPIO_SX150X
- bool "Semtech SX150x I2C GPIO expander"
- depends on I2C=y
- select GPIOLIB_IRQCHIP
+ bool "Semtech SX150x I2C GPIO expander (deprecated)"
+ depends on PINCTRL && I2C=y
+ select PINCTRL_SX150X
default n
help
- Say yes here to provide support for Semtech SX150-series I2C
- GPIO expanders. Compatible models include:
-
- 8 bits: sx1508q
- 16 bits: sx1509q
+ Say yes here to provide support for Semtech SX150x-series I2C
+ GPIO expanders. The GPIO driver was replaced by a Pinctrl version.
config GPIO_TPIC2810
tristate "TPIC2810 8-Bit I2C GPO expander"
@@ -802,6 +796,7 @@ config GPIO_TPIC2810
config GPIO_TS4900
tristate "Technologic Systems FPGA I2C GPIO"
+ depends on SOC_IMX6 || COMPILE_TEST
select REGMAP_I2C
help
Say yes here to enabled the GPIO driver for Technologic's FPGA core.
@@ -818,6 +813,14 @@ config GPIO_ADP5520
This option enables support for on-chip GPIO found
on Analog Devices ADP5520 PMICs.
+config GPIO_ALTERA_A10SR
+ tristate "Altera Arria10 System Resource GPIO"
+ depends on MFD_ALTERA_A10SR
+ help
+ Driver for Arria10 Development Kit GPIO expansion which
+ includes reads of pushbuttons and DIP switches as well
+ as writes to LEDs.
+
config GPIO_ARIZONA
tristate "Wolfson Microelectronics Arizona class devices"
depends on MFD_ARIZONA
@@ -826,7 +829,7 @@ config GPIO_ARIZONA
config GPIO_CRYSTAL_COVE
tristate "GPIO support for Crystal Cove PMIC"
- depends on INTEL_SOC_PMIC
+ depends on (X86 || COMPILE_TEST) && INTEL_SOC_PMIC
select GPIOLIB_IRQCHIP
help
Support for GPIO pins on Crystal Cove PMIC.
@@ -839,6 +842,7 @@ config GPIO_CRYSTAL_COVE
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
+ depends on X86 || MIPS || COMPILE_TEST
depends on MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
@@ -931,7 +935,7 @@ config GPIO_MAX77620
config GPIO_MSIC
bool "Intel MSIC mixed signal gpio support"
- depends on MFD_INTEL_MSIC
+ depends on (X86 || COMPILE_TEST) && MFD_INTEL_MSIC
help
Enable support for GPIO on intel MSIC controllers found in
intel MID devices
@@ -1032,7 +1036,7 @@ config GPIO_UCB1400
config GPIO_WHISKEY_COVE
tristate "GPIO support for Whiskey Cove PMIC"
- depends on INTEL_SOC_PMIC
+ depends on (X86 || COMPILE_TEST) && INTEL_SOC_PMIC
select GPIOLIB_IRQCHIP
help
Support for GPIO pins on Whiskey Cove PMIC.
@@ -1071,6 +1075,7 @@ menu "PCI GPIO expanders"
config GPIO_AMD8111
tristate "AMD 8111 GPIO driver"
+ depends on X86 || COMPILE_TEST
help
The AMD 8111 south bridge contains 32 GPIO pins which can be used.
@@ -1112,6 +1117,7 @@ config GPIO_MERRIFIELD
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
+ depends on X86 || COMPILE_TEST
select GENERIC_IRQ_CHIP
help
ML7213 is companion chip for Intel Atom E6xx series.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ab28a2daeacc..a7676b82de6f 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,9 +2,10 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
-obj-$(CONFIG_GPIO_DEVRES) += devres.o
+obj-$(CONFIG_GPIOLIB) += devres.o
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
+obj-$(CONFIG_GPIOLIB) += gpiolib-devprop.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
@@ -24,6 +25,7 @@ obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
+obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
@@ -102,7 +104,6 @@ obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
-obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 8ff7b0d3eac6..89863ea25de1 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -468,17 +468,19 @@ static int adnp_irq_setup(struct adnp *adnp)
return err;
}
- err = gpiochip_irqchip_add(chip,
- &adnp_irq_chip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ err = gpiochip_irqchip_add_nested(chip,
+ &adnp_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (err) {
dev_err(chip->parent,
"could not connect irqchip to gpiochip\n");
return err;
}
+ gpiochip_set_nested_irqchip(chip, &adnp_irq_chip, adnp->client->irq);
+
return 0;
}
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
new file mode 100644
index 000000000000..9e1a138fed53
--- /dev/null
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * GPIO driver for Altera Arria10 MAX5 System Resource Chip
+ *
+ * Adapted from gpio-tps65910.c
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/mfd/altera-a10sr.h>
+#include <linux/module.h>
+
+/**
+ * struct altr_a10sr_gpio - Altera Max5 GPIO device private data structure
+ * @gp: : instance of the gpio_chip
+ * @regmap: the regmap from the parent device.
+ */
+struct altr_a10sr_gpio {
+ struct gpio_chip gp;
+ struct regmap *regmap;
+};
+
+static int altr_a10sr_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct altr_a10sr_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->regmap, ALTR_A10SR_PBDSW_REG, &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & BIT(offset - ALTR_A10SR_LED_VALID_SHIFT));
+}
+
+static void altr_a10sr_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct altr_a10sr_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->regmap, ALTR_A10SR_LED_REG,
+ BIT(ALTR_A10SR_LED_VALID_SHIFT + offset),
+ value ? BIT(ALTR_A10SR_LED_VALID_SHIFT + offset)
+ : 0);
+}
+
+static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int nr)
+{
+ if (nr >= (ALTR_A10SR_IN_VALID_RANGE_LO - ALTR_A10SR_LED_VALID_SHIFT))
+ return 0;
+ return -EINVAL;
+}
+
+static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int nr, int value)
+{
+ if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
+ return 0;
+ return -EINVAL;
+}
+
+static struct gpio_chip altr_a10sr_gc = {
+ .label = "altr_a10sr_gpio",
+ .owner = THIS_MODULE,
+ .get = altr_a10sr_gpio_get,
+ .set = altr_a10sr_gpio_set,
+ .direction_input = altr_a10sr_gpio_direction_input,
+ .direction_output = altr_a10sr_gpio_direction_output,
+ .can_sleep = true,
+ .ngpio = 12,
+ .base = -1,
+};
+
+static int altr_a10sr_gpio_probe(struct platform_device *pdev)
+{
+ struct altr_a10sr_gpio *gpio;
+ int ret;
+ struct altr_a10sr *a10sr = dev_get_drvdata(pdev->dev.parent);
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->regmap = a10sr->regmap;
+
+ gpio->gp = altr_a10sr_gc;
+
+ gpio->gp.of_node = pdev->dev.of_node;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, gpio);
+
+ return 0;
+}
+
+static const struct of_device_id altr_a10sr_gpio_of_match[] = {
+ { .compatible = "altr,a10sr-gpio" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, altr_a10sr_gpio_of_match);
+
+static struct platform_driver altr_a10sr_gpio_driver = {
+ .probe = altr_a10sr_gpio_probe,
+ .driver = {
+ .name = "altr_a10sr_gpio",
+ .of_match_table = of_match_ptr(altr_a10sr_gpio_of_match),
+ },
+};
+module_platform_driver(altr_a10sr_gpio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Thor Thayer <tthayer@opensource.altera.com>");
+MODULE_DESCRIPTION("Altera Arria10 System Resource Chip GPIO");
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 482462889c8f..1f91557717a6 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -137,15 +137,10 @@ static int arizona_gpio_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
- goto err;
+ return ret;
}
- platform_set_drvdata(pdev, arizona_gpio);
-
- return ret;
-
-err:
- return ret;
+ return 0;
}
static struct platform_driver arizona_gpio_driver = {
diff --git a/drivers/gpio/gpio-axp209.c b/drivers/gpio/gpio-axp209.c
index d9c2a517c6df..4a346b7b4172 100644
--- a/drivers/gpio/gpio-axp209.c
+++ b/drivers/gpio/gpio-axp209.c
@@ -64,13 +64,9 @@ static int axp20x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct axp20x_gpio *gpio = gpiochip_get_data(chip);
unsigned int val;
- int reg, ret;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
+ int ret;
- ret = regmap_read(gpio->regmap, reg, &val);
+ ret = regmap_read(gpio->regmap, AXP20X_GPIO20_SS, &val);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 7c446d118cd6..2197368cc899 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -351,8 +351,8 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
return retval;
}
- gpiochip_irqchip_add(&cg->chip, &crystalcove_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ gpiochip_irqchip_add_nested(&cg->chip, &crystalcove_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
retval = request_threaded_irq(irq, NULL, crystalcove_gpio_irq_handler,
IRQF_ONESHOT, KBUILD_MODNAME, cg);
@@ -362,6 +362,8 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
return retval;
}
+ gpiochip_set_nested_irqchip(&cg->chip, &crystalcove_irqchip, irq);
+
return 0;
}
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index dd262f00295d..9191056548fe 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -40,6 +40,7 @@ struct davinci_gpio_regs {
typedef struct irq_chip *(*gpio_get_irq_chip_cb_t)(unsigned int irq);
#define BINTEN 0x8 /* GPIO Interrupt Per-Bank Enable Register */
+#define MAX_LABEL_SIZE 20
static void __iomem *gpio_base;
@@ -201,6 +202,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
struct davinci_gpio_regs __iomem *regs;
struct device *dev = &pdev->dev;
struct resource *res;
+ char label[MAX_LABEL_SIZE];
pdata = davinci_gpio_get_pdata(pdev);
if (!pdata) {
@@ -237,7 +239,10 @@ static int davinci_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio_base);
for (i = 0, base = 0; base < ngpio; i++, base += 32) {
- chips[i].chip.label = "DaVinci";
+ snprintf(label, MAX_LABEL_SIZE, "davinci_gpio.%d", i);
+ chips[i].chip.label = devm_kstrdup(dev, label, GFP_KERNEL);
+ if (!chips[i].chip.label)
+ return -ENOMEM;
chips[i].chip.direction_input = davinci_direction_in;
chips[i].chip.get = davinci_gpio_get;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index f7a60a441e95..5d38b08d1ee2 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -467,7 +467,6 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.base = -1;
dln2->gpio.ngpio = pins;
dln2->gpio.can_sleep = true;
- dln2->gpio.irq_not_threaded = true;
dln2->gpio.set = dln2_gpio_set;
dln2->gpio.get = dln2_gpio_get;
dln2->gpio.request = dln2_gpio_request;
diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c
index 00b022c9acb3..a254d5b07b94 100644
--- a/drivers/gpio/gpio-etraxfs.c
+++ b/drivers/gpio/gpio-etraxfs.c
@@ -471,9 +471,4 @@ static struct platform_driver etraxfs_gpio_driver = {
.probe = etraxfs_gpio_probe,
};
-static int __init etraxfs_gpio_init(void)
-{
- return platform_driver_register(&etraxfs_gpio_driver);
-}
-
-device_initcall(etraxfs_gpio_init);
+builtin_platform_driver(etraxfs_gpio_driver);
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index 0b4df6051097..271356effb2e 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -17,7 +17,7 @@
#include <linux/platform_data/gpio-htc-egpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/init.h>
struct egpio_chip {
int reg_start;
@@ -160,10 +160,14 @@ static int egpio_get(struct gpio_chip *chip, unsigned offset)
bit = egpio_bit(ei, offset);
reg = egpio->reg_start + egpio_pos(ei, offset);
- value = egpio_readw(ei, reg);
- pr_debug("readw(%p + %x) = %x\n",
- ei->base_addr, reg << ei->bus_shift, value);
- return !!(value & bit);
+ if (test_bit(offset, &egpio->is_out)) {
+ return !!(egpio->cached_values & (1 << offset));
+ } else {
+ value = egpio_readw(ei, reg);
+ pr_debug("readw(%p + %x) = %x\n",
+ ei->base_addr, reg << ei->bus_shift, value);
+ return !!(value & bit);
+ }
}
static int egpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -225,6 +229,15 @@ static int egpio_direction_output(struct gpio_chip *chip,
}
}
+static int egpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct egpio_chip *egpio;
+
+ egpio = gpiochip_get_data(chip);
+
+ return !test_bit(offset, &egpio->is_out);
+}
+
static void egpio_write_cache(struct egpio_info *ei)
{
int i;
@@ -327,6 +340,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->set = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
+ chip->get_direction = egpio_get_direction;
chip->base = pdata->chip[i].gpio_base;
chip->ngpio = pdata->chip[i].num_gpios;
@@ -367,24 +381,6 @@ fail:
return ret;
}
-static int __exit egpio_remove(struct platform_device *pdev)
-{
- struct egpio_info *ei = platform_get_drvdata(pdev);
- unsigned int irq, irq_end;
-
- if (ei->chained_irq) {
- irq_end = ei->irq_start + ei->nirqs;
- for (irq = ei->irq_start; irq < irq_end; irq++) {
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
- irq_set_chained_handler(ei->chained_irq, NULL);
- device_init_wakeup(&pdev->dev, 0);
- }
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int egpio_suspend(struct platform_device *pdev, pm_message_t state)
{
@@ -416,8 +412,8 @@ static int egpio_resume(struct platform_device *pdev)
static struct platform_driver egpio_driver = {
.driver = {
.name = "htc-egpio",
+ .suppress_bind_attrs = true,
},
- .remove = __exit_p(egpio_remove),
.suspend = egpio_suspend,
.resume = egpio_resume,
};
@@ -426,15 +422,5 @@ static int __init egpio_init(void)
{
return platform_driver_probe(&egpio_driver, egpio_probe);
}
-
-static void __exit egpio_exit(void)
-{
- platform_driver_unregister(&egpio_driver);
-}
-
/* start early for dependencies */
subsys_initcall(egpio_init);
-module_exit(egpio_exit)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kevin O'Connor <kevin@koconnor.net>");
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index 164de64b11fc..a1e44c221f66 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -421,9 +421,4 @@ static struct pci_driver intel_gpio_driver = {
},
};
-static int __init intel_gpio_init(void)
-{
- return pci_register_driver(&intel_gpio_driver);
-}
-
-device_initcall(intel_gpio_init);
+builtin_pci_driver(intel_gpio_driver);
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index a9aaf9d822b4..4ea4c6a1313b 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -520,20 +520,19 @@ static int max732x_irq_setup(struct max732x_chip *chip,
client->irq);
return ret;
}
- ret = gpiochip_irqchip_add(&chip->gpio_chip,
- &max732x_irq_chip,
- irq_base,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&chip->gpio_chip,
+ &max732x_irq_chip,
+ irq_base,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&client->dev,
"could not connect irqchip to gpiochip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&chip->gpio_chip,
- &max732x_irq_chip,
- client->irq,
- NULL);
+ gpiochip_set_nested_irqchip(&chip->gpio_chip,
+ &max732x_irq_chip,
+ client->irq);
}
return 0;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index b46b436cb97f..ec8de4190db9 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -21,9 +21,6 @@ struct max77620_gpio {
struct gpio_chip gpio_chip;
struct regmap *rmap;
struct device *dev;
- int gpio_irq;
- int irq_base;
- int gpio_base;
};
static const struct regmap_irq max77620_gpio_irqs[] = {
@@ -254,7 +251,6 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->rmap = chip->rmap;
mgpio->dev = &pdev->dev;
- mgpio->gpio_irq = gpio_irq;
mgpio->gpio_chip.label = pdev->name;
mgpio->gpio_chip.parent = &pdev->dev;
@@ -268,7 +264,6 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
mgpio->gpio_chip.base = -1;
- mgpio->irq_base = -1;
#ifdef CONFIG_OF_GPIO
mgpio->gpio_chip.of_node = pdev->dev.parent->of_node;
#endif
@@ -281,9 +276,8 @@ static int max77620_gpio_probe(struct platform_device *pdev)
return ret;
}
- mgpio->gpio_base = mgpio->gpio_chip.base;
- ret = devm_regmap_add_irq_chip(&pdev->dev, chip->rmap, mgpio->gpio_irq,
- IRQF_ONESHOT, mgpio->irq_base,
+ ret = devm_regmap_add_irq_chip(&pdev->dev, chip->rmap, gpio_irq,
+ IRQF_ONESHOT, -1,
&max77620_gpio_irq_chip,
&chip->gpio_irq_data);
if (ret < 0) {
@@ -296,6 +290,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
static const struct platform_device_id max77620_gpio_devtype[] = {
{ .name = "max77620-gpio", },
+ { .name = "max20024-gpio", },
{},
};
MODULE_DEVICE_TABLE(platform, max77620_gpio_devtype);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index d55af50e7034..ffb73f688ae1 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -217,8 +217,4 @@ static struct platform_driver mb86s70_gpio_driver = {
.remove = mb86s70_gpio_remove,
};
-static int __init mb86s70_gpio_init(void)
-{
- return platform_driver_register(&mb86s70_gpio_driver);
-}
-device_initcall(mb86s70_gpio_init);
+builtin_platform_driver(mb86s70_gpio_driver);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 99d37b56c258..504550665091 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -473,21 +473,20 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
return err;
}
- err = gpiochip_irqchip_add(chip,
- &mcp23s08_irq_chip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ err = gpiochip_irqchip_add_nested(chip,
+ &mcp23s08_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (err) {
dev_err(chip->parent,
"could not connect irqchip to gpiochip: %d\n", err);
return err;
}
- gpiochip_set_chained_irqchip(chip,
- &mcp23s08_irq_chip,
- mcp->irq,
- NULL);
+ gpiochip_set_nested_irqchip(chip,
+ &mcp23s08_irq_chip,
+ mcp->irq);
return 0;
}
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 45b51278b8ee..69e0f4ace465 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -11,6 +11,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -161,6 +162,34 @@ static int mrfld_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static int mrfld_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
+
+ return (readl(gpdr) & BIT(offset % 32)) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
+}
+
+static int mrfld_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
+ unsigned int debounce)
+{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
+ void __iomem *gfbr = gpio_reg(chip, offset, GFBR);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ if (debounce)
+ value = readl(gfbr) & ~BIT(offset % 32);
+ else
+ value = readl(gfbr) | BIT(offset % 32);
+ writel(value, gfbr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
static void mrfld_irq_ack(struct irq_data *d)
{
struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
@@ -384,6 +413,8 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
priv->chip.direction_output = mrfld_gpio_direction_output;
priv->chip.get = mrfld_gpio_get;
priv->chip.set = mrfld_gpio_set;
+ priv->chip.get_direction = mrfld_gpio_get_direction;
+ priv->chip.set_debounce = mrfld_gpio_set_debounce;
priv->chip.base = gpio_base;
priv->chip.ngpio = MRFLD_NGPIO;
priv->chip.can_sleep = false;
@@ -411,7 +442,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
}
retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_bad_irq, IRQ_TYPE_NONE);
if (retval) {
dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
return retval;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index cd5dc27320a2..1ed6132b993c 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
- u32 mask = ~(1 << (d->irq - gc->irq_base));
+ u32 mask = d->mask;
irq_gc_lock(gc);
- writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
+ writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip));
irq_gc_unlock(gc);
}
@@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
@@ -319,8 +319,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
@@ -333,8 +332,7 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
@@ -347,8 +345,7 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
@@ -462,7 +459,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
- irq = mvchip->irqbase + i;
+ irq = irq_find_mapping(mvchip->domain, i);
if (!(cause & (1 << i)))
continue;
@@ -655,6 +652,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
struct irq_chip_type *ct;
struct clk *clk;
unsigned int ngpios;
+ bool have_irqs;
int soc_variant;
int i, cpu, id;
int err;
@@ -665,6 +663,9 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
else
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
+ /* Some gpio controllers do not provide irq support */
+ have_irqs = of_irq_count(np) != 0;
+
mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
GFP_KERNEL);
if (!mvchip)
@@ -697,7 +698,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
mvchip->chip.set = mvebu_gpio_set;
- mvchip->chip.to_irq = mvebu_gpio_to_irq;
+ if (have_irqs)
+ mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
mvchip->chip.ngpio = ngpios;
mvchip->chip.can_sleep = false;
@@ -758,34 +760,30 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
/* Some gpio controllers do not provide irq support */
- if (!of_irq_count(np))
+ if (!have_irqs)
return 0;
- /* Setup the interrupt handlers. Each chip can have up to 4
- * interrupt handlers, with each handler dealing with 8 GPIO
- * pins. */
- for (i = 0; i < 4; i++) {
- int irq = platform_get_irq(pdev, i);
-
- if (irq < 0)
- continue;
- irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
- mvchip);
- }
-
- mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
- if (mvchip->irqbase < 0) {
- dev_err(&pdev->dev, "no irqs\n");
- return mvchip->irqbase;
+ mvchip->domain =
+ irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+ if (!mvchip->domain) {
+ dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
+ mvchip->chip.label);
+ return -ENODEV;
}
- gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
- mvchip->membase, handle_level_irq);
- if (!gc) {
- dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- return -ENOMEM;
+ err = irq_alloc_domain_generic_chips(
+ mvchip->domain, ngpios, 2, np->name, handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
+ if (err) {
+ dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
+ mvchip->chip.label);
+ goto err_domain;
}
+ /* NOTE: The common accessors cannot be used because of the percpu
+ * access to the mask registers
+ */
+ gc = irq_get_domain_generic_chip(mvchip->domain, 0);
gc->private = mvchip;
ct = &gc->chip_types[0];
ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
@@ -803,27 +801,23 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
ct->handler = handle_edge_irq;
ct->chip.name = mvchip->chip.label;
- irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
- IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+ /* Setup the interrupt handlers. Each chip can have up to 4
+ * interrupt handlers, with each handler dealing with 8 GPIO
+ * pins.
+ */
+ for (i = 0; i < 4; i++) {
+ int irq = platform_get_irq(pdev, i);
- /* Setup irq domain on top of the generic chip. */
- mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio,
- mvchip->irqbase,
- &irq_domain_simple_ops,
- mvchip);
- if (!mvchip->domain) {
- dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
- mvchip->chip.label);
- err = -ENODEV;
- goto err_generic_chip;
+ if (irq < 0)
+ continue;
+ irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+ mvchip);
}
return 0;
-err_generic_chip:
- irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
- IRQ_LEVEL | IRQ_NOPROBE);
- kfree(gc);
+err_domain:
+ irq_domain_remove(mvchip->domain);
return err;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index ee1724806f46..1e8fde8cb803 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -87,10 +87,15 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
u32 val;
u32 pin_mask = 1 << d->hwirq;
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct mxs_gpio_port *port = gc->private;
void __iomem *pin_addr;
int edge;
+ if (!(ct->type & type))
+ if (irq_setup_alt_chip(d, type))
+ return -EINVAL;
+
port->both_edges &= ~pin_mask;
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
@@ -119,10 +124,13 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
/* set level or edge */
pin_addr = port->base + PINCTRL_IRQLEV(port);
- if (edge & GPIO_INT_LEV_MASK)
+ if (edge & GPIO_INT_LEV_MASK) {
writel(pin_mask, pin_addr + MXS_SET);
- else
+ writel(pin_mask, port->base + PINCTRL_IRQEN(port) + MXS_SET);
+ } else {
writel(pin_mask, pin_addr + MXS_CLR);
+ writel(pin_mask, port->base + PINCTRL_PIN2IRQ(port) + MXS_SET);
+ }
/* set polarity */
pin_addr = port->base + PINCTRL_IRQPOL(port);
@@ -202,21 +210,37 @@ static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
- gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base,
+ gc = irq_alloc_generic_chip("gpio-mxs", 2, irq_base,
port->base, handle_level_irq);
if (!gc)
return -ENOMEM;
gc->private = port;
- ct = gc->chip_types;
+ ct = &gc->chip_types[0];
+ ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_set_type = mxs_gpio_set_irq_type;
+ ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
+ ct->chip.flags = IRQCHIP_SET_TYPE_MASKED;
+ ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
+ ct->regs.enable = PINCTRL_PIN2IRQ(port) + MXS_SET;
+ ct->regs.disable = PINCTRL_PIN2IRQ(port) + MXS_CLR;
+
+ ct = &gc->chip_types[1];
+ ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask = irq_gc_mask_clr_bit;
- ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
ct->chip.irq_set_type = mxs_gpio_set_irq_type;
ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
+ ct->chip.flags = IRQCHIP_SET_TYPE_MASKED;
ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
- ct->regs.mask = PINCTRL_IRQEN(port);
+ ct->regs.enable = PINCTRL_IRQEN(port) + MXS_SET;
+ ct->regs.disable = PINCTRL_IRQEN(port) + MXS_CLR;
+ ct->handler = handle_level_irq;
irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
IRQ_NOREQUEST, 0);
@@ -297,11 +321,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
}
port->base = base;
- /*
- * select the pin interrupt functionality but initially
- * disable the interrupts
- */
- writel(~0U, port->base + PINCTRL_PIN2IRQ(port));
+ /* initially disable the interrupts */
+ writel(0, port->base + PINCTRL_PIN2IRQ(port));
writel(0, port->base + PINCTRL_IRQEN(port));
/* clear address has to be used to clear IRQSTAT bits */
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index e422568e14ad..d5d72d84b719 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -74,6 +74,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "max7312", 16 | PCA953X_TYPE | PCA_INT, },
{ "max7313", 16 | PCA953X_TYPE | PCA_INT, },
{ "max7315", 8 | PCA953X_TYPE | PCA_INT, },
+ { "max7318", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
@@ -372,14 +373,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
- memcpy(reg_val, chip->reg_output, NBANK(chip));
mutex_lock(&chip->i2c_lock);
+ memcpy(reg_val, chip->reg_output, NBANK(chip));
for (bank = 0; bank < NBANK(chip); bank++) {
bank_mask = mask[bank / sizeof(*mask)] >>
((bank % sizeof(*mask)) * 8);
if (bank_mask) {
bank_val = bits[bank / sizeof(*bits)] >>
((bank % sizeof(*bits)) * 8);
+ bank_val &= bank_mask;
reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
}
}
@@ -607,7 +609,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
if (client->irq && irq_base != -1
&& (chip->driver_data & PCA_INT)) {
-
ret = pca953x_read_regs(chip,
chip->regs->input, chip->irq_stat);
if (ret)
@@ -635,20 +636,20 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
return ret;
}
- ret = gpiochip_irqchip_add(&chip->gpio_chip,
- &pca953x_irq_chip,
- irq_base,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&chip->gpio_chip,
+ &pca953x_irq_chip,
+ irq_base,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&client->dev,
"could not connect irqchip to gpiochip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&chip->gpio_chip,
- &pca953x_irq_chip,
- client->irq, NULL);
+ gpiochip_set_nested_irqchip(&chip->gpio_chip,
+ &pca953x_irq_chip,
+ client->irq);
}
return 0;
@@ -907,6 +908,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "maxim,max7312", .data = OF_953X(16, PCA_INT), },
{ .compatible = "maxim,max7313", .data = OF_953X(16, PCA_INT), },
{ .compatible = "maxim,max7315", .data = OF_953X( 8, PCA_INT), },
+ { .compatible = "maxim,max7318", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,pca6107", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,pca9536", .data = OF_953X( 4, 0), },
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index d168410e2338..895af42a4513 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -378,9 +378,10 @@ static int pcf857x_probe(struct i2c_client *client,
/* Enable irqchip if we have an interrupt */
if (client->irq) {
- status = gpiochip_irqchip_add(&gpio->chip, &pcf857x_irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
+ status = gpiochip_irqchip_add_nested(&gpio->chip,
+ &pcf857x_irq_chip,
+ 0, handle_level_irq,
+ IRQ_TYPE_NONE);
if (status) {
dev_err(&client->dev, "cannot add irqchip\n");
goto fail;
@@ -393,8 +394,8 @@ static int pcf857x_probe(struct i2c_client *client,
if (status)
goto fail;
- gpiochip_set_chained_irqchip(&gpio->chip, &pcf857x_irq_chip,
- client->irq, NULL);
+ gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
+ client->irq);
gpio->irq_parent = client->irq;
}
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 6e3c1430616f..0a6bfd2b06e5 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -23,7 +23,6 @@
#include <linux/gpio.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
-#include <linux/amba/pl061.h>
#include <linux/slab.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
@@ -50,11 +49,12 @@ struct pl061_context_save_regs {
};
#endif
-struct pl061_gpio {
+struct pl061 {
spinlock_t lock;
void __iomem *base;
struct gpio_chip gc;
+ int parent_irq;
#ifdef CONFIG_PM
struct pl061_context_save_regs csave_regs;
@@ -63,22 +63,22 @@ struct pl061_gpio {
static int pl061_get_direction(struct gpio_chip *gc, unsigned offset)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
- return !(readb(chip->base + GPIODIR) & BIT(offset));
+ return !(readb(pl061->base + GPIODIR) & BIT(offset));
}
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
unsigned long flags;
unsigned char gpiodir;
- spin_lock_irqsave(&chip->lock, flags);
- gpiodir = readb(chip->base + GPIODIR);
+ spin_lock_irqsave(&pl061->lock, flags);
+ gpiodir = readb(pl061->base + GPIODIR);
gpiodir &= ~(BIT(offset));
- writeb(gpiodir, chip->base + GPIODIR);
- spin_unlock_irqrestore(&chip->lock, flags);
+ writeb(gpiodir, pl061->base + GPIODIR);
+ spin_unlock_irqrestore(&pl061->lock, flags);
return 0;
}
@@ -86,44 +86,44 @@ static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
unsigned long flags;
unsigned char gpiodir;
- spin_lock_irqsave(&chip->lock, flags);
- writeb(!!value << offset, chip->base + (BIT(offset + 2)));
- gpiodir = readb(chip->base + GPIODIR);
+ spin_lock_irqsave(&pl061->lock, flags);
+ writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
+ gpiodir = readb(pl061->base + GPIODIR);
gpiodir |= BIT(offset);
- writeb(gpiodir, chip->base + GPIODIR);
+ writeb(gpiodir, pl061->base + GPIODIR);
/*
* gpio value is set again, because pl061 doesn't allow to set value of
* a gpio pin before configuring it in OUT mode.
*/
- writeb(!!value << offset, chip->base + (BIT(offset + 2)));
- spin_unlock_irqrestore(&chip->lock, flags);
+ writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
+ spin_unlock_irqrestore(&pl061->lock, flags);
return 0;
}
static int pl061_get_value(struct gpio_chip *gc, unsigned offset)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
- return !!readb(chip->base + (BIT(offset + 2)));
+ return !!readb(pl061->base + (BIT(offset + 2)));
}
static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
{
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
- writeb(!!value << offset, chip->base + (BIT(offset + 2)));
+ writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
}
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(d);
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
@@ -143,11 +143,11 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
}
- spin_lock_irqsave(&chip->lock, flags);
+ spin_lock_irqsave(&pl061->lock, flags);
- gpioiev = readb(chip->base + GPIOIEV);
- gpiois = readb(chip->base + GPIOIS);
- gpioibe = readb(chip->base + GPIOIBE);
+ gpioiev = readb(pl061->base + GPIOIEV);
+ gpiois = readb(pl061->base + GPIOIS);
+ gpioibe = readb(pl061->base + GPIOIBE);
if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
bool polarity = trigger & IRQ_TYPE_LEVEL_HIGH;
@@ -199,11 +199,11 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
offset);
}
- writeb(gpiois, chip->base + GPIOIS);
- writeb(gpioibe, chip->base + GPIOIBE);
- writeb(gpioiev, chip->base + GPIOIEV);
+ writeb(gpiois, pl061->base + GPIOIS);
+ writeb(gpioibe, pl061->base + GPIOIBE);
+ writeb(gpioiev, pl061->base + GPIOIEV);
- spin_unlock_irqrestore(&chip->lock, flags);
+ spin_unlock_irqrestore(&pl061->lock, flags);
return 0;
}
@@ -213,12 +213,12 @@ static void pl061_irq_handler(struct irq_desc *desc)
unsigned long pending;
int offset;
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
chained_irq_enter(irqchip, desc);
- pending = readb(chip->base + GPIOMIS);
+ pending = readb(pl061->base + GPIOMIS);
if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(irq_find_mapping(gc->irqdomain,
@@ -231,27 +231,27 @@ static void pl061_irq_handler(struct irq_desc *desc)
static void pl061_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
- spin_lock(&chip->lock);
- gpioie = readb(chip->base + GPIOIE) & ~mask;
- writeb(gpioie, chip->base + GPIOIE);
- spin_unlock(&chip->lock);
+ spin_lock(&pl061->lock);
+ gpioie = readb(pl061->base + GPIOIE) & ~mask;
+ writeb(gpioie, pl061->base + GPIOIE);
+ spin_unlock(&pl061->lock);
}
static void pl061_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
- spin_lock(&chip->lock);
- gpioie = readb(chip->base + GPIOIE) | mask;
- writeb(gpioie, chip->base + GPIOIE);
- spin_unlock(&chip->lock);
+ spin_lock(&pl061->lock);
+ gpioie = readb(pl061->base + GPIOIE) | mask;
+ writeb(gpioie, pl061->base + GPIOIE);
+ spin_unlock(&pl061->lock);
}
/**
@@ -265,19 +265,20 @@ static void pl061_irq_unmask(struct irq_data *d)
static void pl061_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct pl061_gpio *chip = gpiochip_get_data(gc);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
- spin_lock(&chip->lock);
- writeb(mask, chip->base + GPIOIC);
- spin_unlock(&chip->lock);
+ spin_lock(&pl061->lock);
+ writeb(mask, pl061->base + GPIOIC);
+ spin_unlock(&pl061->lock);
}
static int pl061_irq_set_wake(struct irq_data *d, unsigned int state)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pl061 *pl061 = gpiochip_get_data(gc);
- return irq_set_irq_wake(gc->irq_parent, state);
+ return irq_set_irq_wake(pl061->parent_irq, state);
}
static struct irq_chip pl061_irqchip = {
@@ -292,81 +293,60 @@ static struct irq_chip pl061_irqchip = {
static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
- struct pl061_platform_data *pdata = dev_get_platdata(dev);
- struct pl061_gpio *chip;
- int ret, irq, i, irq_base;
+ struct pl061 *pl061;
+ int ret, irq;
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (chip == NULL)
+ pl061 = devm_kzalloc(dev, sizeof(*pl061), GFP_KERNEL);
+ if (pl061 == NULL)
return -ENOMEM;
- if (pdata) {
- chip->gc.base = pdata->gpio_base;
- irq_base = pdata->irq_base;
- if (irq_base <= 0) {
- dev_err(&adev->dev, "invalid IRQ base in pdata\n");
- return -ENODEV;
- }
- } else {
- chip->gc.base = -1;
- irq_base = 0;
- }
-
- chip->base = devm_ioremap_resource(dev, &adev->res);
- if (IS_ERR(chip->base))
- return PTR_ERR(chip->base);
+ pl061->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(pl061->base))
+ return PTR_ERR(pl061->base);
- spin_lock_init(&chip->lock);
+ spin_lock_init(&pl061->lock);
if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
- chip->gc.request = gpiochip_generic_request;
- chip->gc.free = gpiochip_generic_free;
+ pl061->gc.request = gpiochip_generic_request;
+ pl061->gc.free = gpiochip_generic_free;
}
- chip->gc.get_direction = pl061_get_direction;
- chip->gc.direction_input = pl061_direction_input;
- chip->gc.direction_output = pl061_direction_output;
- chip->gc.get = pl061_get_value;
- chip->gc.set = pl061_set_value;
- chip->gc.ngpio = PL061_GPIO_NR;
- chip->gc.label = dev_name(dev);
- chip->gc.parent = dev;
- chip->gc.owner = THIS_MODULE;
-
- ret = gpiochip_add_data(&chip->gc, chip);
+ pl061->gc.base = -1;
+ pl061->gc.get_direction = pl061_get_direction;
+ pl061->gc.direction_input = pl061_direction_input;
+ pl061->gc.direction_output = pl061_direction_output;
+ pl061->gc.get = pl061_get_value;
+ pl061->gc.set = pl061_set_value;
+ pl061->gc.ngpio = PL061_GPIO_NR;
+ pl061->gc.label = dev_name(dev);
+ pl061->gc.parent = dev;
+ pl061->gc.owner = THIS_MODULE;
+
+ ret = gpiochip_add_data(&pl061->gc, pl061);
if (ret)
return ret;
/*
* irq_chip support
*/
- writeb(0, chip->base + GPIOIE); /* disable irqs */
+ writeb(0, pl061->base + GPIOIE); /* disable irqs */
irq = adev->irq[0];
if (irq < 0) {
dev_err(&adev->dev, "invalid IRQ\n");
return -ENODEV;
}
+ pl061->parent_irq = irq;
- ret = gpiochip_irqchip_add(&chip->gc, &pl061_irqchip,
- irq_base, handle_bad_irq,
+ ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip,
+ 0, handle_bad_irq,
IRQ_TYPE_NONE);
if (ret) {
dev_info(&adev->dev, "could not add irqchip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&chip->gc, &pl061_irqchip,
+ gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip,
irq, pl061_irq_handler);
- for (i = 0; i < PL061_GPIO_NR; i++) {
- if (pdata) {
- if (pdata->directions & (BIT(i)))
- pl061_direction_output(&chip->gc, i,
- pdata->values & (BIT(i)));
- else
- pl061_direction_input(&chip->gc, i);
- }
- }
-
- amba_set_drvdata(adev, chip);
+ amba_set_drvdata(adev, pl061);
dev_info(&adev->dev, "PL061 GPIO chip @%pa registered\n",
&adev->res.start);
@@ -376,20 +356,20 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
#ifdef CONFIG_PM
static int pl061_suspend(struct device *dev)
{
- struct pl061_gpio *chip = dev_get_drvdata(dev);
+ struct pl061 *pl061 = dev_get_drvdata(dev);
int offset;
- chip->csave_regs.gpio_data = 0;
- chip->csave_regs.gpio_dir = readb(chip->base + GPIODIR);
- chip->csave_regs.gpio_is = readb(chip->base + GPIOIS);
- chip->csave_regs.gpio_ibe = readb(chip->base + GPIOIBE);
- chip->csave_regs.gpio_iev = readb(chip->base + GPIOIEV);
- chip->csave_regs.gpio_ie = readb(chip->base + GPIOIE);
+ pl061->csave_regs.gpio_data = 0;
+ pl061->csave_regs.gpio_dir = readb(pl061->base + GPIODIR);
+ pl061->csave_regs.gpio_is = readb(pl061->base + GPIOIS);
+ pl061->csave_regs.gpio_ibe = readb(pl061->base + GPIOIBE);
+ pl061->csave_regs.gpio_iev = readb(pl061->base + GPIOIEV);
+ pl061->csave_regs.gpio_ie = readb(pl061->base + GPIOIE);
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
- if (chip->csave_regs.gpio_dir & (BIT(offset)))
- chip->csave_regs.gpio_data |=
- pl061_get_value(&chip->gc, offset) << offset;
+ if (pl061->csave_regs.gpio_dir & (BIT(offset)))
+ pl061->csave_regs.gpio_data |=
+ pl061_get_value(&pl061->gc, offset) << offset;
}
return 0;
@@ -397,22 +377,22 @@ static int pl061_suspend(struct device *dev)
static int pl061_resume(struct device *dev)
{
- struct pl061_gpio *chip = dev_get_drvdata(dev);
+ struct pl061 *pl061 = dev_get_drvdata(dev);
int offset;
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
- if (chip->csave_regs.gpio_dir & (BIT(offset)))
- pl061_direction_output(&chip->gc, offset,
- chip->csave_regs.gpio_data &
+ if (pl061->csave_regs.gpio_dir & (BIT(offset)))
+ pl061_direction_output(&pl061->gc, offset,
+ pl061->csave_regs.gpio_data &
(BIT(offset)));
else
- pl061_direction_input(&chip->gc, offset);
+ pl061_direction_input(&pl061->gc, offset);
}
- writeb(chip->csave_regs.gpio_is, chip->base + GPIOIS);
- writeb(chip->csave_regs.gpio_ibe, chip->base + GPIOIBE);
- writeb(chip->csave_regs.gpio_iev, chip->base + GPIOIEV);
- writeb(chip->csave_regs.gpio_ie, chip->base + GPIOIE);
+ writeb(pl061->csave_regs.gpio_is, pl061->base + GPIOIS);
+ writeb(pl061->csave_regs.gpio_ibe, pl061->base + GPIOIBE);
+ writeb(pl061->csave_regs.gpio_iev, pl061->base + GPIOIEV);
+ writeb(pl061->csave_regs.gpio_ie, pl061->base + GPIOIE);
return 0;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 5b0042776ec7..16cbc5702865 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -413,7 +413,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
stmpe->partnum != STMPE1801) {
stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
stmpe_reg_write(stmpe,
- stmpe->regs[STMPE_IDX_GPEDR_LSB + i],
+ stmpe->regs[STMPE_IDX_GPEDR_MSB] + i,
status[i]);
}
}
@@ -484,21 +484,20 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
if (stmpe_gpio->norequest_mask & BIT(i))
clear_bit(i, stmpe_gpio->chip.irq_valid_mask);
}
- ret = gpiochip_irqchip_add(&stmpe_gpio->chip,
- &stmpe_gpio_irq_chip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&stmpe_gpio->chip,
+ &stmpe_gpio_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&pdev->dev,
"could not connect irqchip to gpiochip\n");
goto out_disable;
}
- gpiochip_set_chained_irqchip(&stmpe_gpio->chip,
- &stmpe_gpio_irq_chip,
- irq,
- NULL);
+ gpiochip_set_nested_irqchip(&stmpe_gpio->chip,
+ &stmpe_gpio_irq_chip,
+ irq);
}
platform_set_drvdata(pdev, stmpe_gpio);
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
deleted file mode 100644
index af95de89db01..000000000000
--- a/drivers/gpio/gpio-sx150x.c
+++ /dev/null
@@ -1,792 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * Driver for Semtech SX150X I2C GPIO Expanders
- *
- * Author: Gregory Bean <gbean@codeaurora.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
-#include <linux/of_device.h>
-
-#define NO_UPDATE_PENDING -1
-
-/* The chip models of sx150x */
-#define SX150X_123 0
-#define SX150X_456 1
-#define SX150X_789 2
-
-struct sx150x_123_pri {
- u8 reg_pld_mode;
- u8 reg_pld_table0;
- u8 reg_pld_table1;
- u8 reg_pld_table2;
- u8 reg_pld_table3;
- u8 reg_pld_table4;
- u8 reg_advance;
-};
-
-struct sx150x_456_pri {
- u8 reg_pld_mode;
- u8 reg_pld_table0;
- u8 reg_pld_table1;
- u8 reg_pld_table2;
- u8 reg_pld_table3;
- u8 reg_pld_table4;
- u8 reg_advance;
-};
-
-struct sx150x_789_pri {
- u8 reg_drain;
- u8 reg_polarity;
- u8 reg_clock;
- u8 reg_misc;
- u8 reg_reset;
- u8 ngpios;
-};
-
-struct sx150x_device_data {
- u8 model;
- u8 reg_pullup;
- u8 reg_pulldn;
- u8 reg_dir;
- u8 reg_data;
- u8 reg_irq_mask;
- u8 reg_irq_src;
- u8 reg_sense;
- u8 ngpios;
- union {
- struct sx150x_123_pri x123;
- struct sx150x_456_pri x456;
- struct sx150x_789_pri x789;
- } pri;
-};
-
-/**
- * struct sx150x_platform_data - config data for SX150x driver
- * @gpio_base: The index number of the first GPIO assigned to this
- * GPIO expander. The expander will create a block of
- * consecutively numbered gpios beginning at the given base,
- * with the size of the block depending on the model of the
- * expander chip.
- * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO
- * instead of as an oscillator, increasing the size of the
- * GP(I)O pool created by this expander by one. The
- * output-only GPO pin will be added at the end of the block.
- * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor
- * for each IO line in the expander. Setting the bit at
- * position n will enable the pull-up for the IO at
- * the corresponding offset. For chips with fewer than
- * 16 IO pins, high-end bits are ignored.
- * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down
- * resistor for each IO line in the expander. Setting the
- * bit at position n will enable the pull-down for the IO at
- * the corresponding offset. For chips with fewer than
- * 16 IO pins, high-end bits are ignored.
- * @io_polarity: A bit-mask which enables polarity inversion for each IO line
- * in the expander. Setting the bit at position n inverts
- * the polarity of that IO line, while clearing it results
- * in normal polarity. For chips with fewer than 16 IO pins,
- * high-end bits are ignored.
- * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line
- * is connected, via which it reports interrupt events
- * across all GPIO lines. This must be a real,
- * pre-existing IRQ line.
- * Setting this value < 0 disables the irq_chip functionality
- * of the driver.
- * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based
- * IRQ lines will appear. Similarly to gpio_base, the expander
- * will create a block of irqs beginning at this number.
- * This value is ignored if irq_summary is < 0.
- * @reset_during_probe: If set to true, the driver will trigger a full
- * reset of the chip at the beginning of the probe
- * in order to place it in a known state.
- */
-struct sx150x_platform_data {
- unsigned gpio_base;
- bool oscio_is_gpo;
- u16 io_pullup_ena;
- u16 io_pulldn_ena;
- u16 io_polarity;
- int irq_summary;
- unsigned irq_base;
- bool reset_during_probe;
-};
-
-struct sx150x_chip {
- struct gpio_chip gpio_chip;
- struct i2c_client *client;
- const struct sx150x_device_data *dev_cfg;
- int irq_summary;
- int irq_base;
- int irq_update;
- u32 irq_sense;
- u32 irq_masked;
- u32 dev_sense;
- u32 dev_masked;
- struct irq_chip irq_chip;
- struct mutex lock;
-};
-
-static const struct sx150x_device_data sx150x_devices[] = {
- [0] = { /* sx1508q */
- .model = SX150X_789,
- .reg_pullup = 0x03,
- .reg_pulldn = 0x04,
- .reg_dir = 0x07,
- .reg_data = 0x08,
- .reg_irq_mask = 0x09,
- .reg_irq_src = 0x0c,
- .reg_sense = 0x0b,
- .pri.x789 = {
- .reg_drain = 0x05,
- .reg_polarity = 0x06,
- .reg_clock = 0x0f,
- .reg_misc = 0x10,
- .reg_reset = 0x7d,
- },
- .ngpios = 8,
- },
- [1] = { /* sx1509q */
- .model = SX150X_789,
- .reg_pullup = 0x07,
- .reg_pulldn = 0x09,
- .reg_dir = 0x0f,
- .reg_data = 0x11,
- .reg_irq_mask = 0x13,
- .reg_irq_src = 0x19,
- .reg_sense = 0x17,
- .pri.x789 = {
- .reg_drain = 0x0b,
- .reg_polarity = 0x0d,
- .reg_clock = 0x1e,
- .reg_misc = 0x1f,
- .reg_reset = 0x7d,
- },
- .ngpios = 16
- },
- [2] = { /* sx1506q */
- .model = SX150X_456,
- .reg_pullup = 0x05,
- .reg_pulldn = 0x07,
- .reg_dir = 0x03,
- .reg_data = 0x01,
- .reg_irq_mask = 0x09,
- .reg_irq_src = 0x0f,
- .reg_sense = 0x0d,
- .pri.x456 = {
- .reg_pld_mode = 0x21,
- .reg_pld_table0 = 0x23,
- .reg_pld_table1 = 0x25,
- .reg_pld_table2 = 0x27,
- .reg_pld_table3 = 0x29,
- .reg_pld_table4 = 0x2b,
- .reg_advance = 0xad,
- },
- .ngpios = 16
- },
- [3] = { /* sx1502q */
- .model = SX150X_123,
- .reg_pullup = 0x02,
- .reg_pulldn = 0x03,
- .reg_dir = 0x01,
- .reg_data = 0x00,
- .reg_irq_mask = 0x05,
- .reg_irq_src = 0x08,
- .reg_sense = 0x07,
- .pri.x123 = {
- .reg_pld_mode = 0x10,
- .reg_pld_table0 = 0x11,
- .reg_pld_table1 = 0x12,
- .reg_pld_table2 = 0x13,
- .reg_pld_table3 = 0x14,
- .reg_pld_table4 = 0x15,
- .reg_advance = 0xad,
- },
- .ngpios = 8,
- },
-};
-
-static const struct i2c_device_id sx150x_id[] = {
- {"sx1508q", 0},
- {"sx1509q", 1},
- {"sx1506q", 2},
- {"sx1502q", 3},
- {}
-};
-
-static const struct of_device_id sx150x_of_match[] = {
- { .compatible = "semtech,sx1508q" },
- { .compatible = "semtech,sx1509q" },
- { .compatible = "semtech,sx1506q" },
- { .compatible = "semtech,sx1502q" },
- {},
-};
-
-static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val)
-{
- s32 err = i2c_smbus_write_byte_data(client, reg, val);
-
- if (err < 0)
- dev_warn(&client->dev,
- "i2c write fail: can't write %02x to %02x: %d\n",
- val, reg, err);
- return err;
-}
-
-static s32 sx150x_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
-{
- s32 err = i2c_smbus_read_byte_data(client, reg);
-
- if (err >= 0)
- *val = err;
- else
- dev_warn(&client->dev,
- "i2c read fail: can't read from %02x: %d\n",
- reg, err);
- return err;
-}
-
-static inline bool offset_is_oscio(struct sx150x_chip *chip, unsigned offset)
-{
- return (chip->dev_cfg->ngpios == offset);
-}
-
-/*
- * These utility functions solve the common problem of locating and setting
- * configuration bits. Configuration bits are grouped into registers
- * whose indexes increase downwards. For example, with eight-bit registers,
- * sixteen gpios would have their config bits grouped in the following order:
- * REGISTER N-1 [ f e d c b a 9 8 ]
- * N [ 7 6 5 4 3 2 1 0 ]
- *
- * For multi-bit configurations, the pattern gets wider:
- * REGISTER N-3 [ f f e e d d c c ]
- * N-2 [ b b a a 9 9 8 8 ]
- * N-1 [ 7 7 6 6 5 5 4 4 ]
- * N [ 3 3 2 2 1 1 0 0 ]
- *
- * Given the address of the starting register 'N', the index of the gpio
- * whose configuration we seek to change, and the width in bits of that
- * configuration, these functions allow us to locate the correct
- * register and mask the correct bits.
- */
-static inline void sx150x_find_cfg(u8 offset, u8 width,
- u8 *reg, u8 *mask, u8 *shift)
-{
- *reg -= offset * width / 8;
- *mask = (1 << width) - 1;
- *shift = (offset * width) % 8;
- *mask <<= *shift;
-}
-
-static s32 sx150x_write_cfg(struct sx150x_chip *chip,
- u8 offset, u8 width, u8 reg, u8 val)
-{
- u8 mask;
- u8 data;
- u8 shift;
- s32 err;
-
- sx150x_find_cfg(offset, width, &reg, &mask, &shift);
- err = sx150x_i2c_read(chip->client, reg, &data);
- if (err < 0)
- return err;
-
- data &= ~mask;
- data |= (val << shift) & mask;
- return sx150x_i2c_write(chip->client, reg, data);
-}
-
-static int sx150x_get_io(struct sx150x_chip *chip, unsigned offset)
-{
- u8 reg = chip->dev_cfg->reg_data;
- u8 mask;
- u8 data;
- u8 shift;
- s32 err;
-
- sx150x_find_cfg(offset, 1, &reg, &mask, &shift);
- err = sx150x_i2c_read(chip->client, reg, &data);
- if (err >= 0)
- err = (data & mask) != 0 ? 1 : 0;
-
- return err;
-}
-
-static void sx150x_set_oscio(struct sx150x_chip *chip, int val)
-{
- sx150x_i2c_write(chip->client,
- chip->dev_cfg->pri.x789.reg_clock,
- (val ? 0x1f : 0x10));
-}
-
-static void sx150x_set_io(struct sx150x_chip *chip, unsigned offset, int val)
-{
- sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->reg_data,
- (val ? 1 : 0));
-}
-
-static int sx150x_io_input(struct sx150x_chip *chip, unsigned offset)
-{
- return sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->reg_dir,
- 1);
-}
-
-static int sx150x_io_output(struct sx150x_chip *chip, unsigned offset, int val)
-{
- int err;
-
- err = sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->reg_data,
- (val ? 1 : 0));
- if (err >= 0)
- err = sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->reg_dir,
- 0);
- return err;
-}
-
-static int sx150x_gpio_get(struct gpio_chip *gc, unsigned offset)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
- int status = -EINVAL;
-
- if (!offset_is_oscio(chip, offset)) {
- mutex_lock(&chip->lock);
- status = sx150x_get_io(chip, offset);
- mutex_unlock(&chip->lock);
- }
-
- return (status < 0) ? status : !!status;
-}
-
-static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
-
- mutex_lock(&chip->lock);
- if (offset_is_oscio(chip, offset))
- sx150x_set_oscio(chip, val);
- else
- sx150x_set_io(chip, offset, val);
- mutex_unlock(&chip->lock);
-}
-
-static int sx150x_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned offset,
- enum single_ended_mode mode)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
-
- /* On the SX160X 789 we can set open drain */
- if (chip->dev_cfg->model != SX150X_789)
- return -ENOTSUPP;
-
- if (mode == LINE_MODE_PUSH_PULL)
- return sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->pri.x789.reg_drain,
- 0);
-
- if (mode == LINE_MODE_OPEN_DRAIN)
- return sx150x_write_cfg(chip,
- offset,
- 1,
- chip->dev_cfg->pri.x789.reg_drain,
- 1);
- return -ENOTSUPP;
-}
-
-static int sx150x_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
- int status = -EINVAL;
-
- if (!offset_is_oscio(chip, offset)) {
- mutex_lock(&chip->lock);
- status = sx150x_io_input(chip, offset);
- mutex_unlock(&chip->lock);
- }
- return status;
-}
-
-static int sx150x_gpio_direction_output(struct gpio_chip *gc,
- unsigned offset,
- int val)
-{
- struct sx150x_chip *chip = gpiochip_get_data(gc);
- int status = 0;
-
- if (!offset_is_oscio(chip, offset)) {
- mutex_lock(&chip->lock);
- status = sx150x_io_output(chip, offset, val);
- mutex_unlock(&chip->lock);
- }
- return status;
-}
-
-static void sx150x_irq_mask(struct irq_data *d)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned n = d->hwirq;
-
- chip->irq_masked |= (1 << n);
- chip->irq_update = n;
-}
-
-static void sx150x_irq_unmask(struct irq_data *d)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned n = d->hwirq;
-
- chip->irq_masked &= ~(1 << n);
- chip->irq_update = n;
-}
-
-static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned n, val = 0;
-
- if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- return -EINVAL;
-
- n = d->hwirq;
-
- if (flow_type & IRQ_TYPE_EDGE_RISING)
- val |= 0x1;
- if (flow_type & IRQ_TYPE_EDGE_FALLING)
- val |= 0x2;
-
- chip->irq_sense &= ~(3UL << (n * 2));
- chip->irq_sense |= val << (n * 2);
- chip->irq_update = n;
- return 0;
-}
-
-static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
-{
- struct sx150x_chip *chip = (struct sx150x_chip *)dev_id;
- unsigned nhandled = 0;
- unsigned sub_irq;
- unsigned n;
- s32 err;
- u8 val;
- int i;
-
- for (i = (chip->dev_cfg->ngpios / 8) - 1; i >= 0; --i) {
- err = sx150x_i2c_read(chip->client,
- chip->dev_cfg->reg_irq_src - i,
- &val);
- if (err < 0)
- continue;
-
- sx150x_i2c_write(chip->client,
- chip->dev_cfg->reg_irq_src - i,
- val);
- for (n = 0; n < 8; ++n) {
- if (val & (1 << n)) {
- sub_irq = irq_find_mapping(
- chip->gpio_chip.irqdomain,
- (i * 8) + n);
- handle_nested_irq(sub_irq);
- ++nhandled;
- }
- }
- }
-
- return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
-}
-
-static void sx150x_irq_bus_lock(struct irq_data *d)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
-
- mutex_lock(&chip->lock);
-}
-
-static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct sx150x_chip *chip = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- unsigned n;
-
- if (chip->irq_update == NO_UPDATE_PENDING)
- goto out;
-
- n = chip->irq_update;
- chip->irq_update = NO_UPDATE_PENDING;
-
- /* Avoid updates if nothing changed */
- if (chip->dev_sense == chip->irq_sense &&
- chip->dev_masked == chip->irq_masked)
- goto out;
-
- chip->dev_sense = chip->irq_sense;
- chip->dev_masked = chip->irq_masked;
-
- if (chip->irq_masked & (1 << n)) {
- sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
- sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
- } else {
- sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
- sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
- chip->irq_sense >> (n * 2));
- }
-out:
- mutex_unlock(&chip->lock);
-}
-
-static void sx150x_init_chip(struct sx150x_chip *chip,
- struct i2c_client *client,
- kernel_ulong_t driver_data,
- struct sx150x_platform_data *pdata)
-{
- mutex_init(&chip->lock);
-
- chip->client = client;
- chip->dev_cfg = &sx150x_devices[driver_data];
- chip->gpio_chip.parent = &client->dev;
- chip->gpio_chip.label = client->name;
- chip->gpio_chip.direction_input = sx150x_gpio_direction_input;
- chip->gpio_chip.direction_output = sx150x_gpio_direction_output;
- chip->gpio_chip.get = sx150x_gpio_get;
- chip->gpio_chip.set = sx150x_gpio_set;
- chip->gpio_chip.set_single_ended = sx150x_gpio_set_single_ended;
- chip->gpio_chip.base = pdata->gpio_base;
- chip->gpio_chip.can_sleep = true;
- chip->gpio_chip.ngpio = chip->dev_cfg->ngpios;
-#ifdef CONFIG_OF_GPIO
- chip->gpio_chip.of_node = client->dev.of_node;
- chip->gpio_chip.of_gpio_n_cells = 2;
-#endif
- if (pdata->oscio_is_gpo)
- ++chip->gpio_chip.ngpio;
-
- chip->irq_chip.name = client->name;
- chip->irq_chip.irq_mask = sx150x_irq_mask;
- chip->irq_chip.irq_unmask = sx150x_irq_unmask;
- chip->irq_chip.irq_set_type = sx150x_irq_set_type;
- chip->irq_chip.irq_bus_lock = sx150x_irq_bus_lock;
- chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
- chip->irq_summary = -1;
- chip->irq_base = -1;
- chip->irq_masked = ~0;
- chip->irq_sense = 0;
- chip->dev_masked = ~0;
- chip->dev_sense = 0;
- chip->irq_update = NO_UPDATE_PENDING;
-}
-
-static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
-{
- int err = 0;
- unsigned n;
-
- for (n = 0; err >= 0 && n < (chip->dev_cfg->ngpios / 8); ++n)
- err = sx150x_i2c_write(chip->client, base - n, cfg >> (n * 8));
- return err;
-}
-
-static int sx150x_reset(struct sx150x_chip *chip)
-{
- int err;
-
- err = i2c_smbus_write_byte_data(chip->client,
- chip->dev_cfg->pri.x789.reg_reset,
- 0x12);
- if (err < 0)
- return err;
-
- err = i2c_smbus_write_byte_data(chip->client,
- chip->dev_cfg->pri.x789.reg_reset,
- 0x34);
- return err;
-}
-
-static int sx150x_init_hw(struct sx150x_chip *chip,
- struct sx150x_platform_data *pdata)
-{
- int err = 0;
-
- if (pdata->reset_during_probe) {
- err = sx150x_reset(chip);
- if (err < 0)
- return err;
- }
-
- if (chip->dev_cfg->model == SX150X_789)
- err = sx150x_i2c_write(chip->client,
- chip->dev_cfg->pri.x789.reg_misc,
- 0x01);
- else if (chip->dev_cfg->model == SX150X_456)
- err = sx150x_i2c_write(chip->client,
- chip->dev_cfg->pri.x456.reg_advance,
- 0x04);
- else
- err = sx150x_i2c_write(chip->client,
- chip->dev_cfg->pri.x123.reg_advance,
- 0x00);
- if (err < 0)
- return err;
-
- err = sx150x_init_io(chip, chip->dev_cfg->reg_pullup,
- pdata->io_pullup_ena);
- if (err < 0)
- return err;
-
- err = sx150x_init_io(chip, chip->dev_cfg->reg_pulldn,
- pdata->io_pulldn_ena);
- if (err < 0)
- return err;
-
- if (chip->dev_cfg->model == SX150X_789) {
- err = sx150x_init_io(chip,
- chip->dev_cfg->pri.x789.reg_polarity,
- pdata->io_polarity);
- if (err < 0)
- return err;
- } else if (chip->dev_cfg->model == SX150X_456) {
- /* Set all pins to work in normal mode */
- err = sx150x_init_io(chip,
- chip->dev_cfg->pri.x456.reg_pld_mode,
- 0);
- if (err < 0)
- return err;
- } else {
- /* Set all pins to work in normal mode */
- err = sx150x_init_io(chip,
- chip->dev_cfg->pri.x123.reg_pld_mode,
- 0);
- if (err < 0)
- return err;
- }
-
-
- if (pdata->oscio_is_gpo)
- sx150x_set_oscio(chip, 0);
-
- return err;
-}
-
-static int sx150x_install_irq_chip(struct sx150x_chip *chip,
- int irq_summary,
- int irq_base)
-{
- int err;
-
- chip->irq_summary = irq_summary;
- chip->irq_base = irq_base;
-
- /* Add gpio chip to irq subsystem */
- err = gpiochip_irqchip_add(&chip->gpio_chip,
- &chip->irq_chip, chip->irq_base,
- handle_edge_irq, IRQ_TYPE_EDGE_BOTH);
- if (err) {
- dev_err(&chip->client->dev,
- "could not connect irqchip to gpiochip\n");
- return err;
- }
-
- err = devm_request_threaded_irq(&chip->client->dev,
- irq_summary, NULL, sx150x_irq_thread_fn,
- IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_FALLING,
- chip->irq_chip.name, chip);
- if (err < 0) {
- chip->irq_summary = -1;
- chip->irq_base = -1;
- }
-
- return err;
-}
-
-static int sx150x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- static const u32 i2c_funcs = I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WRITE_WORD_DATA;
- struct sx150x_platform_data *pdata;
- struct sx150x_chip *chip;
- int rc;
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata)
- return -EINVAL;
-
- if (!i2c_check_functionality(client->adapter, i2c_funcs))
- return -ENOSYS;
-
- chip = devm_kzalloc(&client->dev,
- sizeof(struct sx150x_chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- sx150x_init_chip(chip, client, id->driver_data, pdata);
- rc = sx150x_init_hw(chip, pdata);
- if (rc < 0)
- return rc;
-
- rc = devm_gpiochip_add_data(&client->dev, &chip->gpio_chip, chip);
- if (rc)
- return rc;
-
- if (pdata->irq_summary >= 0) {
- rc = sx150x_install_irq_chip(chip,
- pdata->irq_summary,
- pdata->irq_base);
- if (rc < 0)
- return rc;
- }
-
- i2c_set_clientdata(client, chip);
-
- return 0;
-}
-
-static struct i2c_driver sx150x_driver = {
- .driver = {
- .name = "sx150x",
- .of_match_table = of_match_ptr(sx150x_of_match),
- },
- .probe = sx150x_probe,
- .id_table = sx150x_id,
-};
-
-static int __init sx150x_init(void)
-{
- return i2c_add_driver(&sx150x_driver);
-}
-subsys_initcall(sx150x_init);
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 5a5a6cb00eea..be97101c2c9a 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -97,7 +97,7 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !!(ret & BIT(pos));
+ return !(ret & BIT(pos));
}
static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
@@ -337,21 +337,20 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_irqchip_add(&tc3589x_gpio->chip,
- &tc3589x_gpio_irq_chip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&tc3589x_gpio->chip,
+ &tc3589x_gpio_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&pdev->dev,
"could not connect irqchip to gpiochip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&tc3589x_gpio->chip,
- &tc3589x_gpio_irq_chip,
- irq,
- NULL);
+ gpiochip_set_nested_irqchip(&tc3589x_gpio->chip,
+ &tc3589x_gpio_irq_chip,
+ irq);
platform_set_drvdata(pdev, tc3589x_gpio);
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 3edb09cb9ee0..521fbe338589 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -283,8 +283,4 @@ static struct platform_driver vf610_gpio_driver = {
.probe = vf610_gpio_probe,
};
-static int __init gpio_vf610_init(void)
-{
- return platform_driver_register(&vf610_gpio_driver);
-}
-device_initcall(gpio_vf610_init);
+builtin_platform_driver(vf610_gpio_driver);
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index d0ddba7a9d08..34baee5b1dd6 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -426,8 +426,8 @@ static int wcove_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_irqchip_add(&wg->chip, &wcove_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add_nested(&wg->chip, &wcove_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "Failed to add irqchip: %d\n", ret);
return ret;
@@ -446,6 +446,8 @@ static int wcove_gpio_probe(struct platform_device *pdev)
return ret;
}
+ gpiochip_set_nested_irqchip(&wg->chip, &wcove_irqchip, virq);
+
return 0;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 72a4b326fd0d..a3faefa44f68 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -468,7 +468,8 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
int ret;
memset(&args, 0, sizeof(args));
- ret = acpi_node_get_property_reference(fwnode, propname, index, &args);
+ ret = __acpi_node_get_property_reference(fwnode, propname, index, 3,
+ &args);
if (ret) {
struct acpi_device *adev = to_acpi_device_node(fwnode);
@@ -483,13 +484,13 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* on returned args.
*/
lookup->adev = args.adev;
- if (args.nargs >= 2) {
- lookup->index = args.args[0];
- lookup->pin_index = args.args[1];
- /* 3rd argument, if present is used to specify active_low. */
- if (args.nargs >= 3)
- lookup->active_low = !!args.args[2];
- }
+ if (args.nargs != 3)
+ return -EPROTO;
+
+ lookup->index = args.args[0];
+ lookup->pin_index = args.args[1];
+ lookup->active_low = !!args.args[2];
+
return 0;
}
@@ -859,6 +860,77 @@ static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
}
}
+static struct gpio_desc *acpi_gpiochip_parse_own_gpio(
+ struct acpi_gpio_chip *achip, struct fwnode_handle *fwnode,
+ const char **name, unsigned int *lflags, unsigned int *dflags)
+{
+ struct gpio_chip *chip = achip->chip;
+ struct gpio_desc *desc;
+ u32 gpios[2];
+ int ret;
+
+ *lflags = 0;
+ *dflags = 0;
+ *name = NULL;
+
+ ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios,
+ ARRAY_SIZE(gpios));
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ ret = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, gpios[0]);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ desc = gpiochip_get_desc(chip, ret);
+ if (IS_ERR(desc))
+ return desc;
+
+ if (gpios[1])
+ *lflags |= GPIO_ACTIVE_LOW;
+
+ if (fwnode_property_present(fwnode, "input"))
+ *dflags |= GPIOD_IN;
+ else if (fwnode_property_present(fwnode, "output-low"))
+ *dflags |= GPIOD_OUT_LOW;
+ else if (fwnode_property_present(fwnode, "output-high"))
+ *dflags |= GPIOD_OUT_HIGH;
+ else
+ return ERR_PTR(-EINVAL);
+
+ fwnode_property_read_string(fwnode, "line-name", name);
+
+ return desc;
+}
+
+static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
+{
+ struct gpio_chip *chip = achip->chip;
+ struct fwnode_handle *fwnode;
+
+ device_for_each_child_node(chip->parent, fwnode) {
+ unsigned int lflags, dflags;
+ struct gpio_desc *desc;
+ const char *name;
+ int ret;
+
+ if (!fwnode_property_present(fwnode, "gpio-hog"))
+ continue;
+
+ desc = acpi_gpiochip_parse_own_gpio(achip, fwnode, &name,
+ &lflags, &dflags);
+ if (IS_ERR(desc))
+ continue;
+
+ ret = gpiod_hog(desc, name, lflags, dflags);
+ if (ret) {
+ dev_err(chip->parent, "Failed to hog GPIO\n");
+ fwnode_handle_put(fwnode);
+ return;
+ }
+ }
+}
+
void acpi_gpiochip_add(struct gpio_chip *chip)
{
struct acpi_gpio_chip *acpi_gpio;
@@ -889,7 +961,11 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
return;
}
+ if (!chip->names)
+ devprop_gpiochip_set_names(chip);
+
acpi_gpiochip_request_regions(acpi_gpio);
+ acpi_gpiochip_scan_gpios(acpi_gpio);
acpi_walk_dep_device_list(handle);
}
@@ -918,18 +994,27 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
kfree(acpi_gpio);
}
-static unsigned int acpi_gpio_package_count(const union acpi_object *obj)
+static int acpi_gpio_package_count(const union acpi_object *obj)
{
const union acpi_object *element = obj->package.elements;
const union acpi_object *end = element + obj->package.count;
unsigned int count = 0;
while (element < end) {
- if (element->type == ACPI_TYPE_LOCAL_REFERENCE)
+ switch (element->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ element += 3;
+ /* Fallthrough */
+ case ACPI_TYPE_INTEGER:
+ element++;
count++;
+ break;
- element++;
+ default:
+ return -EPROTO;
+ }
}
+
return count;
}
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
new file mode 100644
index 000000000000..27f383bda7d9
--- /dev/null
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -0,0 +1,67 @@
+/*
+ * Device property helpers for GPIO chips.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+
+#include "gpiolib.h"
+
+/**
+ * devprop_gpiochip_set_names - Set GPIO line names using device properties
+ * @chip: GPIO chip whose lines should be named, if possible
+ *
+ * Looks for device property "gpio-line-names" and if it exists assigns
+ * GPIO line names for the chip. The memory allocated for the assigned
+ * names belong to the underlying firmware node and should not be released
+ * by the caller.
+ */
+void devprop_gpiochip_set_names(struct gpio_chip *chip)
+{
+ struct gpio_device *gdev = chip->gpiodev;
+ const char **names;
+ int ret, i;
+
+ if (!chip->parent) {
+ dev_warn(&gdev->dev, "GPIO chip parent is NULL\n");
+ return;
+ }
+
+ ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+ NULL, 0);
+ if (ret < 0)
+ return;
+
+ if (ret != gdev->ngpio) {
+ dev_warn(chip->parent,
+ "names %d do not match number of GPIOs %d\n", ret,
+ gdev->ngpio);
+ return;
+ }
+
+ names = kcalloc(gdev->ngpio, sizeof(*names), GFP_KERNEL);
+ if (!names)
+ return;
+
+ ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+ names, gdev->ngpio);
+ if (ret < 0) {
+ dev_warn(chip->parent, "failed to read GPIO line names\n");
+ kfree(names);
+ return;
+ }
+
+ for (i = 0; i < gdev->ngpio; i++)
+ gdev->descs[i].name = names[i];
+
+ kfree(names);
+}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ecad3f0e3b77..92b185f19232 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -26,14 +26,18 @@
#include "gpiolib.h"
-static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
{
- return chip->gpiodev->dev.of_node == data;
+ struct of_phandle_args *gpiospec = data;
+
+ return chip->gpiodev->dev.of_node == gpiospec->np &&
+ chip->of_xlate(chip, gpiospec, NULL) >= 0;
}
-static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
+static struct gpio_chip *of_find_gpiochip_by_xlate(
+ struct of_phandle_args *gpiospec)
{
- return gpiochip_find(np, of_gpiochip_match_node);
+ return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate);
}
static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
@@ -79,7 +83,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
return ERR_PTR(ret);
}
- chip = of_find_gpiochip_by_node(gpiospec.np);
+ chip = of_find_gpiochip_by_xlate(&gpiospec);
if (!chip) {
desc = ERR_PTR(-EPROBE_DEFER);
goto out;
@@ -222,51 +226,6 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
}
/**
- * of_gpiochip_set_names() - set up the names of the lines
- * @chip: GPIO chip whose lines should be named, if possible
- */
-static void of_gpiochip_set_names(struct gpio_chip *gc)
-{
- struct gpio_device *gdev = gc->gpiodev;
- struct device_node *np = gc->of_node;
- int i;
- int nstrings;
-
- nstrings = of_property_count_strings(np, "gpio-line-names");
- if (nstrings <= 0)
- /* Lines names not present */
- return;
-
- /* This is normally not what you want */
- if (gdev->ngpio != nstrings)
- dev_info(&gdev->dev, "gpio-line-names specifies %d line "
- "names but there are %d lines on the chip\n",
- nstrings, gdev->ngpio);
-
- /*
- * Make sure to not index beyond the end of the number of descriptors
- * of the GPIO device.
- */
- for (i = 0; i < gdev->ngpio; i++) {
- const char *name;
- int ret;
-
- ret = of_property_read_string_index(np,
- "gpio-line-names",
- i,
- &name);
- if (ret) {
- if (ret != -ENODATA)
- dev_err(&gdev->dev,
- "unable to name line %d: %d\n",
- i, ret);
- break;
- }
- gdev->descs[i].name = name;
- }
-}
-
-/**
* of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions
* @chip: gpio chip to act on
*
@@ -292,8 +251,10 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
continue;
ret = gpiod_hog(desc, name, lflags, dflags);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(np);
return ret;
+ }
}
return 0;
@@ -522,7 +483,7 @@ int of_gpiochip_add(struct gpio_chip *chip)
/* If the chip defines names itself, these take precedence */
if (!chip->names)
- of_gpiochip_set_names(chip);
+ devprop_gpiochip_set_names(chip);
of_node_get(chip->of_node);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 20e09b7c2de3..f4c26c7826cd 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/anon_inodes.h>
+#include <linux/file.h>
#include <linux/kfifo.h>
#include <linux/poll.h>
#include <linux/timekeeping.h>
@@ -423,6 +424,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
{
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
+ struct file *file;
int fd, i, ret;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -499,26 +501,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
i--;
lh->numdescs = handlereq.lines;
- fd = anon_inode_getfd("gpio-linehandle",
- &linehandle_fileops,
- lh,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_descs;
}
+ file = anon_inode_getfile("gpio-linehandle",
+ &linehandle_fileops,
+ lh,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
handlereq.fd = fd;
if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
- ret = -EFAULT;
- goto out_free_descs;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
lh->numdescs);
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_descs:
for (; i >= 0; i--)
gpiod_free(lh->descs[i]);
@@ -721,6 +738,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
struct gpioevent_request eventreq;
struct lineevent_state *le;
struct gpio_desc *desc;
+ struct file *file;
u32 offset;
u32 lflags;
u32 eflags;
@@ -815,23 +833,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_desc;
- fd = anon_inode_getfd("gpio-event",
- &lineevent_fileops,
- le,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_irq;
}
+ file = anon_inode_getfile("gpio-event",
+ &lineevent_fileops,
+ le,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
eventreq.fd = fd;
if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
- ret = -EFAULT;
- goto out_free_irq;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_irq:
free_irq(le->irq, le);
out_free_desc:
@@ -953,7 +986,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *filp)
return -ENODEV;
get_device(&gdev->dev);
filp->private_data = gdev;
- return 0;
+
+ return nonseekable_open(inode, filp);
}
/**
@@ -978,7 +1012,7 @@ static const struct file_operations gpio_fileops = {
.release = gpio_chrdev_release,
.open = gpio_chrdev_open,
.owner = THIS_MODULE,
- .llseek = noop_llseek,
+ .llseek = no_llseek,
.unlocked_ioctl = gpio_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = gpio_ioctl_compat,
@@ -1479,7 +1513,7 @@ static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
}
/**
- * gpiochip_set_chained_irqchip() - sets a chained irqchip to a gpiochip
+ * gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
* @gpiochip: the gpiochip to set the irqchip chain to
* @irqchip: the irqchip to chain to the gpiochip
* @parent_irq: the irq number corresponding to the parent IRQ for this
@@ -1488,10 +1522,10 @@ static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
* coming out of the gpiochip. If the interrupt is nested rather than
* cascaded, pass NULL in this handler argument
*/
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- int parent_irq,
- irq_flow_handler_t parent_handler)
+static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq,
+ irq_flow_handler_t parent_handler)
{
unsigned int offset;
@@ -1515,7 +1549,7 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
irq_set_chained_handler_and_data(parent_irq, parent_handler,
gpiochip);
- gpiochip->irq_parent = parent_irq;
+ gpiochip->irq_chained_parent = parent_irq;
}
/* Set the parent IRQ for all affected IRQs */
@@ -1526,9 +1560,48 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
parent_irq);
}
}
+
+/**
+ * gpiochip_set_chained_irqchip() - connects a chained irqchip to a gpiochip
+ * @gpiochip: the gpiochip to set the irqchip chain to
+ * @irqchip: the irqchip to chain to the gpiochip
+ * @parent_irq: the irq number corresponding to the parent IRQ for this
+ * chained irqchip
+ * @parent_handler: the parent interrupt handler for the accumulated IRQ
+ * coming out of the gpiochip. If the interrupt is nested rather than
+ * cascaded, pass NULL in this handler argument
+ */
+void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq,
+ irq_flow_handler_t parent_handler)
+{
+ gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
+ parent_handler);
+}
EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
/**
+ * gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
+ * @gpiochip: the gpiochip to set the irqchip nested handler to
+ * @irqchip: the irqchip to nest to the gpiochip
+ * @parent_irq: the irq number corresponding to the parent IRQ for this
+ * nested irqchip
+ */
+void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq)
+{
+ if (!gpiochip->irq_nested) {
+ chip_err(gpiochip, "tried to nest a chained gpiochip\n");
+ return;
+ }
+ gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
+
+/**
* gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
* @d: the irqdomain used by this irqchip
* @irq: the global irq number used by this GPIO irqchip irq
@@ -1550,8 +1623,8 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
*/
irq_set_lockdep_class(irq, chip->lock_key);
irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
- /* Chips that can sleep need nested thread handlers */
- if (chip->can_sleep && !chip->irq_not_threaded)
+ /* Chips that use nested thread handlers have them marked */
+ if (chip->irq_nested)
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
@@ -1569,7 +1642,7 @@ static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
{
struct gpio_chip *chip = d->host_data;
- if (chip->can_sleep)
+ if (chip->irq_nested)
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
@@ -1624,9 +1697,9 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
acpi_gpiochip_free_interrupts(gpiochip);
- if (gpiochip->irq_parent) {
- irq_set_chained_handler(gpiochip->irq_parent, NULL);
- irq_set_handler_data(gpiochip->irq_parent, NULL);
+ if (gpiochip->irq_chained_parent) {
+ irq_set_chained_handler(gpiochip->irq_chained_parent, NULL);
+ irq_set_handler_data(gpiochip->irq_chained_parent, NULL);
}
/* Remove all IRQ mappings and delete the domain */
@@ -1650,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
}
/**
- * gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
* @gpiochip: the gpiochip to add the irqchip to
* @irqchip: the irqchip to add to the gpiochip
* @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1658,6 +1731,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* @handler: the irq handler to use (often a predefined irq core function)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
+ * @nested: whether this is a nested irqchip calling handle_nested_irq()
+ * in its IRQ handler
* @lock_key: lockdep class
*
* This function closely associates a certain irqchip with a certain
@@ -1679,6 +1754,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
+ bool nested,
struct lock_class_key *lock_key)
{
struct device_node *of_node;
@@ -1693,6 +1769,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
pr_err("missing gpiochip .dev parent pointer\n");
return -EINVAL;
}
+ gpiochip->irq_nested = nested;
of_node = gpiochip->parent->of_node;
#ifdef CONFIG_OF_GPIO
/*
@@ -2190,6 +2267,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input);
static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
struct gpio_chip *gc = desc->gdev->chip;
+ int val = !!value;
int ret;
/* GPIOs used for IRQs shall not be set as output */
@@ -2209,7 +2287,7 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
goto set_output_value;
}
/* Emulate open drain by not actively driving the line high */
- if (value)
+ if (val)
return gpiod_direction_input(desc);
}
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
@@ -2220,7 +2298,7 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
goto set_output_value;
}
/* Emulate open source by not actively driving the line low */
- if (!value)
+ if (!val)
return gpiod_direction_input(desc);
} else {
/* Make sure to disable open drain/source hardware, if any */
@@ -2238,10 +2316,10 @@ set_output_value:
return -EIO;
}
- ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), value);
+ ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), val);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
- trace_gpio_value(desc_to_gpio(desc), 0, value);
+ trace_gpio_value(desc_to_gpio(desc), 0, val);
trace_gpio_direction(desc_to_gpio(desc), 0, ret);
return ret;
}
@@ -2281,6 +2359,8 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
VALIDATE_DESC(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
+ else
+ value = !!value;
return _gpiod_direction_output_raw(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_direction_output);
@@ -2704,8 +2784,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
if (IS_ERR(desc))
return PTR_ERR(desc);
- /* Flush direction if something changed behind our back */
- if (chip->get_direction) {
+ /*
+ * If it's fast: flush the direction setting if something changed
+ * behind our back
+ */
+ if (!chip->can_sleep && chip->get_direction) {
int dir = chip->get_direction(chip, offset);
if (dir)
@@ -2722,6 +2805,15 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
}
set_bit(FLAG_USED_AS_IRQ, &desc->flags);
+
+ /*
+ * If the consumer has not set up a label (such as when the
+ * IRQ is referenced from .to_irq()) we set up a label here
+ * so it is clear this is used as an interrupt.
+ */
+ if (!desc->label)
+ desc_set_label(desc, "interrupt");
+
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
@@ -2736,10 +2828,17 @@ EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
*/
void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ struct gpio_desc *desc;
+
+ desc = gpiochip_get_desc(chip, offset);
+ if (IS_ERR(desc))
return;
- clear_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+ clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
+
+ /* If we only had this marking, erase it */
+ if (desc->label && !strcmp(desc->label, "interrupt"))
+ desc_set_label(desc, NULL);
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
@@ -3134,7 +3233,7 @@ static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* Process flags */
if (dflags & GPIOD_FLAGS_BIT_DIR_OUT)
status = gpiod_direction_output(desc,
- dflags & GPIOD_FLAGS_BIT_DIR_VAL);
+ !!(dflags & GPIOD_FLAGS_BIT_DIR_VAL));
else
status = gpiod_direction_input(desc);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 346fbda39220..d10eaf520860 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -209,6 +209,8 @@ static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc)
return desc - &desc->gdev->descs[0];
}
+void devprop_gpiochip_set_names(struct gpio_chip *chip);
+
/* With descriptor prefix */
#define gpiod_emerg(desc, fmt, ...) \
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 483059a22b1b..ebfe8404c25f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -12,6 +12,7 @@ menuconfig DRM
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
+ select SYNC_FILE
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -33,6 +34,20 @@ config DRM_DP_AUX_CHARDEV
read and write values to arbitrary DPCD registers on the DP aux
channel.
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM=y
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -223,6 +238,12 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
+source "drivers/gpu/drm/zte/Kconfig"
+
+source "drivers/gpu/drm/mxsfb/Kconfig"
+
+source "drivers/gpu/drm/meson/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 25c720454017..b9ae4280de9d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,13 +9,14 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
- drm_info.o drm_debugfs.o drm_encoder_slave.o \
+ drm_info.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
drm_encoder.o drm_mode_object.o drm_property.o \
- drm_plane.o drm_color_mgmt.o
+ drm_plane.o drm_color_mgmt.o drm_print.o \
+ drm_dumb_buffers.o drm_mode_config.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,6 +24,7 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
+drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
@@ -79,6 +81,7 @@ obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
+obj-$(CONFIG_DRM_MESON) += meson/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
@@ -86,3 +89,5 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
+obj-$(CONFIG_DRM_ZTE) += zte/
+obj-$(CONFIG_DRM_MXSFB) += mxsfb/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 248a05d02917..41bd2bf28f4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
- amdgpu_gtt_mgr.o
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index b8d66670bb17..06192698bd96 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,7 +90,6 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
-#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@@ -120,7 +119,6 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
-#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */
@@ -149,7 +147,6 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
-#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -411,10 +408,6 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
-#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
-
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 039b57e4644c..63208e5c1588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -34,7 +34,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -53,7 +53,11 @@
#include "amdgpu_ucode.h"
#include "amdgpu_ttm.h"
#include "amdgpu_gds.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_vm.h"
#include "amd_powerplay.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_acp.h"
#include "gpu_scheduler.h"
@@ -88,15 +92,16 @@ extern int amdgpu_vm_debug;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_powerplay;
-extern int amdgpu_powercontainment;
+extern int amdgpu_no_evict;
+extern int amdgpu_direct_gma_size;
extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
extern unsigned amdgpu_cg_mask;
extern unsigned amdgpu_pg_mask;
extern char *amdgpu_disable_cu;
-extern int amdgpu_sclk_deep_sleep_en;
extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask;
+extern int amdgpu_vram_page_split;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask;
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 8
-/* max number of rings */
-#define AMDGPU_MAX_RINGS 16
-#define AMDGPU_MAX_GFX_RINGS 1
-#define AMDGPU_MAX_COMPUTE_RINGS 8
-#define AMDGPU_MAX_VCE_RINGS 3
-
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask;
struct amdgpu_device;
struct amdgpu_ib;
-struct amdgpu_vm;
-struct amdgpu_ring;
struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
bool amdgpu_is_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+#define AMDGPU_MAX_IP_NUM 16
+
+struct amdgpu_ip_block_status {
+ bool valid;
+ bool sw;
+ bool hw;
+ bool late_initialized;
+ bool hang;
+};
+
struct amdgpu_ip_block_version {
- enum amd_ip_block_type type;
- u32 major;
- u32 minor;
- u32 rev;
+ const enum amd_ip_block_type type;
+ const u32 major;
+ const u32 minor;
+ const u32 rev;
const struct amd_ip_funcs *funcs;
};
+struct amdgpu_ip_block {
+ struct amdgpu_ip_block_status status;
+ const struct amdgpu_ip_block_version *version;
+};
+
int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor);
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type);
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
+
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
struct amdgpu_buffer_funcs {
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs {
void (*set_rptr)(struct amdgpu_device *adev);
};
-/* provided by hw blocks that expose a ring buffer for commands */
-struct amdgpu_ring_funcs {
- /* ring read/write ptr handling */
- u32 (*get_rptr)(struct amdgpu_ring *ring);
- u32 (*get_wptr)(struct amdgpu_ring *ring);
- void (*set_wptr)(struct amdgpu_ring *ring);
- /* validating and patching of IBs */
- int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
- /* command emit functions */
- void (*emit_ib)(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
- void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
- uint64_t seq, unsigned flags);
- void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
- uint64_t pd_addr);
- void (*emit_hdp_flush)(struct amdgpu_ring *ring);
- void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
- void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
- /* testing functions */
- int (*test_ring)(struct amdgpu_ring *ring);
- int (*test_ib)(struct amdgpu_ring *ring, long timeout);
- /* insert NOP packets */
- void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
- /* pad the indirect buffer to the necessary number of dw */
- void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
- unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
- void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
- /* note usage for clock and power gating */
- void (*begin_use)(struct amdgpu_ring *ring);
- void (*end_use)(struct amdgpu_ring *ring);
- void (*emit_switch_buffer) (struct amdgpu_ring *ring);
- void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
- unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
- unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
-};
-
/*
* BIOS.
*/
@@ -364,47 +337,6 @@ struct amdgpu_clock {
};
/*
- * Fences.
- */
-struct amdgpu_fence_driver {
- uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
- /* sync_seq is protected by ring emission lock */
- uint32_t sync_seq;
- atomic_t last_seq;
- bool initialized;
- struct amdgpu_irq_src *irq_src;
- unsigned irq_type;
- struct timer_list fallback_timer;
- unsigned num_fences_mask;
- spinlock_t lock;
- struct fence **fences;
-};
-
-/* some special values for the owner field */
-#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
-#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
-
-#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
-#define AMDGPU_FENCE_FLAG_INT (1 << 1)
-
-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission);
-int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq_src,
- unsigned irq_type);
-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
-void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-
-/*
* BO.
*/
struct amdgpu_bo_list_entry {
@@ -427,7 +359,7 @@ struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- struct fence *last_pt_update;
+ struct dma_fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex and spinlock */
@@ -459,12 +391,12 @@ struct amdgpu_bo {
u64 metadata_flags;
void *metadata;
u32 metadata_size;
+ unsigned prime_shared_count;
/* list of all virtual address to which this bo
* is associated to
*/
struct list_head va;
/* Constant after initialization */
- struct amdgpu_device *adev;
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
@@ -543,7 +475,7 @@ struct amdgpu_sa_bo {
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
- struct fence *fence;
+ struct dma_fence *fence;
};
/*
@@ -561,27 +493,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-/*
- * Synchronization
- */
-struct amdgpu_sync {
- DECLARE_HASHTABLE(fences, 4);
- struct fence *last_vm_update;
-};
-
-void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct reservation_object *resv,
- void *owner);
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring);
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-void amdgpu_sync_free(struct amdgpu_sync *sync);
-int amdgpu_sync_init(void);
-void amdgpu_sync_fini(void);
int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
@@ -703,10 +614,10 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_abo;
- struct fence *excl;
+ struct dma_fence *excl;
unsigned shared_count;
- struct fence **shared;
- struct fence_cb cb;
+ struct dma_fence **shared;
+ struct dma_fence_cb cb;
bool async;
};
@@ -723,14 +634,6 @@ struct amdgpu_ib {
uint32_t flags;
};
-enum amdgpu_ring_type {
- AMDGPU_RING_TYPE_GFX,
- AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_RING_TYPE_SDMA,
- AMDGPU_RING_TYPE_UVD,
- AMDGPU_RING_TYPE_VCE
-};
-
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -742,214 +645,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f);
-
-struct amdgpu_ring {
- struct amdgpu_device *adev;
- const struct amdgpu_ring_funcs *funcs;
- struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
-
- struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
- unsigned rptr_offs;
- unsigned wptr;
- unsigned wptr_old;
- unsigned ring_size;
- unsigned max_dw;
- int count_dw;
- uint64_t gpu_addr;
- uint32_t align_mask;
- uint32_t ptr_mask;
- bool ready;
- u32 nop;
- u32 idx;
- u32 me;
- u32 pipe;
- u32 queue;
- struct amdgpu_bo *mqd_obj;
- u32 doorbell_index;
- bool use_doorbell;
- unsigned wptr_offs;
- unsigned fence_offs;
- uint64_t current_ctx;
- enum amdgpu_ring_type type;
- char name[16];
- unsigned cond_exe_offs;
- u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *ent;
-#endif
-};
-
-/*
- * VM
- */
-
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM 16
-
-/* Maximum number of PTEs the hardware can write with one command */
-#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
-
-/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
-
-/* PTBs (Page Table Blocks) need to be aligned to 32K */
-#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
-#define AMDGPU_PTE_VALID (1 << 0)
-#define AMDGPU_PTE_SYSTEM (1 << 1)
-#define AMDGPU_PTE_SNOOPED (1 << 2)
-
-/* VI only */
-#define AMDGPU_PTE_EXECUTABLE (1 << 4)
-
-#define AMDGPU_PTE_READABLE (1 << 5)
-#define AMDGPU_PTE_WRITEABLE (1 << 6)
-
-#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
-
-/* How to programm VM fault handling */
-#define AMDGPU_VM_FAULT_STOP_NEVER 0
-#define AMDGPU_VM_FAULT_STOP_FIRST 1
-#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
-
-struct amdgpu_vm_pt {
- struct amdgpu_bo_list_entry entry;
- uint64_t addr;
- uint64_t shadow_addr;
-};
-
-struct amdgpu_vm {
- /* tree of virtual addresses mapped */
- struct rb_root va;
-
- /* protecting invalidated */
- spinlock_t status_lock;
-
- /* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
-
- /* BOs cleared in the PT because of a move */
- struct list_head cleared;
-
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
- /* contains the page directory */
- struct amdgpu_bo *page_directory;
- unsigned max_pde_used;
- struct fence *page_directory_fence;
- uint64_t last_eviction_counter;
-
- /* array of page tables, one for each page directory entry */
- struct amdgpu_vm_pt *page_tables;
-
- /* for id and flush management per ring */
- struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
-
- /* protecting freed */
- spinlock_t freed_lock;
-
- /* Scheduler entity for page table updates */
- struct amd_sched_entity entity;
-
- /* client id */
- u64 client_id;
-};
-
-struct amdgpu_vm_id {
- struct list_head list;
- struct fence *first;
- struct amdgpu_sync active;
- struct fence *last_flush;
- atomic64_t owner;
-
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct fence *flushed_updates;
-
- uint32_t current_gpu_reset_count;
-
- uint32_t gds_base;
- uint32_t gds_size;
- uint32_t gws_base;
- uint32_t gws_size;
- uint32_t oa_base;
- uint32_t oa_size;
-};
-
-struct amdgpu_vm_manager {
- /* Handling of VMIDs */
- struct mutex lock;
- unsigned num_ids;
- struct list_head ids_lru;
- struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
-
- /* Handling of VM fences */
- u64 fence_context;
- unsigned seqno[AMDGPU_MAX_RINGS];
-
- uint32_t max_pfn;
- /* vram base address for page table entry */
- u64 vram_base_offset;
- /* is vm enabled? */
- bool enabled;
- /* vm pte handling */
- const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
- unsigned vm_pte_num_rings;
- atomic_t vm_pte_next_ring;
- /* client id counter */
- atomic64_t client_counter;
-};
-
-void amdgpu_vm_manager_init(struct amdgpu_device *adev);
-void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
- struct list_head *validated,
- struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
- struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- bool clear);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr, uint64_t offset,
- uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va);
+ struct dma_fence **f);
/*
* context related structures
@@ -957,7 +653,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
- struct fence **fences;
+ struct dma_fence **fences;
struct amd_sched_entity entity;
};
@@ -966,7 +662,7 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
unsigned reset_counter;
spinlock_t ring_lock;
- struct fence **fences;
+ struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
};
@@ -982,8 +678,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence);
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct dma_fence *fence);
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
@@ -1093,6 +789,16 @@ struct amdgpu_scratch {
/*
* GFX configurations
*/
+#define AMDGPU_GFX_MAX_SE 4
+#define AMDGPU_GFX_MAX_SH_PER_SE 2
+
+struct amdgpu_rb_config {
+ uint32_t rb_backend_disable;
+ uint32_t user_rb_backend_disable;
+ uint32_t raster_config;
+ uint32_t raster_config_1;
+};
+
struct amdgpu_gca_config {
unsigned max_shader_engines;
unsigned max_tile_pipes;
@@ -1121,6 +827,8 @@ struct amdgpu_gca_config {
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
+
+ struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
};
struct amdgpu_cu_info {
@@ -1133,6 +841,9 @@ struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+ void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
+ void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
+ void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
};
struct amdgpu_gfx {
@@ -1181,23 +892,13 @@ struct amdgpu_gfx {
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f);
+ struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ib, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f);
+ struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
-void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
-void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_undo(struct amdgpu_ring *ring);
-int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type);
-void amdgpu_ring_fini(struct amdgpu_ring *ring);
/*
* CS.
@@ -1225,7 +926,7 @@ struct amdgpu_cs_parser {
struct amdgpu_bo_list *bo_list;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
- struct fence *fence;
+ struct dma_fence *fence;
uint64_t bytes_moved_threshold;
uint64_t bytes_moved;
struct amdgpu_bo_list_entry *evictable;
@@ -1245,7 +946,7 @@ struct amdgpu_job {
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
- struct fence *fence; /* the hw fence */
+ struct dma_fence *fence; /* the hw fence */
uint32_t preamble_status;
uint32_t num_ibs;
void *owner;
@@ -1294,354 +995,6 @@ struct amdgpu_wb {
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-
-
-enum amdgpu_int_thermal_type {
- THERMAL_TYPE_NONE,
- THERMAL_TYPE_EXTERNAL,
- THERMAL_TYPE_EXTERNAL_GPIO,
- THERMAL_TYPE_RV6XX,
- THERMAL_TYPE_RV770,
- THERMAL_TYPE_ADT7473_WITH_INTERNAL,
- THERMAL_TYPE_EVERGREEN,
- THERMAL_TYPE_SUMO,
- THERMAL_TYPE_NI,
- THERMAL_TYPE_SI,
- THERMAL_TYPE_EMC2103_WITH_INTERNAL,
- THERMAL_TYPE_CI,
- THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-#define AMDGPU_MAX_VCE_LEVELS 6
-
-enum amdgpu_vce_level {
- AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-struct amdgpu_ps {
- u32 caps; /* vbios flags */
- u32 class; /* vbios flags */
- u32 class2; /* vbios flags */
- /* UVD clocks */
- u32 vclk;
- u32 dclk;
- /* VCE clocks */
- u32 evclk;
- u32 ecclk;
- bool vce_active;
- enum amdgpu_vce_level vce_level;
- /* asic priv */
- void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
- /* thermal interrupt work */
- struct work_struct work;
- /* low temperature threshold */
- int min_temp;
- /* high temperature threshold */
- int max_temp;
- /* was last interrupt low to high or high to low */
- bool high_to_low;
- /* interrupt source */
- struct amdgpu_irq_src irq;
-};
-
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci;
-};
-
-struct amdgpu_clock_array {
- u32 count;
- u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
- u32 clk;
- u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
- u32 count;
- struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
- struct {
- u16 vddc;
- u32 leakage;
- };
- struct {
- u16 vddc1;
- u16 vddc2;
- u16 vddc3;
- };
-};
-
-struct amdgpu_cac_leakage_table {
- u32 count;
- union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
- u16 voltage;
- u32 sclk;
- u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
- u32 count;
- struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
- u32 vclk;
- u32 dclk;
- u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
- u32 ecclk;
- u32 evclk;
- u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
- u8 ppm_design;
- u16 cpu_core_number;
- u32 platform_tdp;
- u32 small_ac_platform_tdp;
- u32 platform_tdc;
- u32 small_ac_platform_tdc;
- u32 apu_tdp;
- u32 dgpu_tdp;
- u32 dgpu_ulv_power;
- u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
- u16 tdp;
- u16 configurable_tdp;
- u16 tdc;
- u16 battery_power_limit;
- u16 small_power_limit;
- u16 low_cac_leakage;
- u16 high_cac_leakage;
- u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
- struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
- struct amdgpu_clock_array valid_sclk_values;
- struct amdgpu_clock_array valid_mclk_values;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
- u32 mclk_sclk_ratio;
- u32 sclk_mclk_delta;
- u16 vddc_vddci_delta;
- u16 min_vddc_for_pcie_gen2;
- struct amdgpu_cac_leakage_table cac_leakage_table;
- struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
- struct amdgpu_ppm_table *ppm_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
- u16 t_min;
- u16 t_med;
- u16 t_high;
- u16 pwm_min;
- u16 pwm_med;
- u16 pwm_high;
- u8 t_hyst;
- u32 cycle_delay;
- u16 t_max;
- u8 control_mode;
- u16 default_max_fan_pwm;
- u16 default_fan_output_sensitivity;
- u16 fan_output_sensitivity;
- bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-enum amdgpu_dpm_forced_level {
- AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
- AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
- AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
- AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
-};
-
-struct amdgpu_vce_state {
- /* vce clocks */
- u32 evclk;
- u32 ecclk;
- /* gpu clocks */
- u32 sclk;
- u32 mclk;
- u8 clk_idx;
- u8 pstate;
-};
-
-struct amdgpu_dpm_funcs {
- int (*get_temperature)(struct amdgpu_device *adev);
- int (*pre_set_power_state)(struct amdgpu_device *adev);
- int (*set_power_state)(struct amdgpu_device *adev);
- void (*post_set_power_state)(struct amdgpu_device *adev);
- void (*display_configuration_changed)(struct amdgpu_device *adev);
- u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
- u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
- void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
- void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
- bool (*vblank_too_short)(struct amdgpu_device *adev);
- void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
- void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
- void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
- void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
- u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
- int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
- int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
- int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(struct amdgpu_device *adev);
- int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*get_mclk_od)(struct amdgpu_device *adev);
- int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
-};
-
-struct amdgpu_dpm {
- struct amdgpu_ps *ps;
- /* number of valid power states */
- int num_ps;
- /* current power state that is active */
- struct amdgpu_ps *current_ps;
- /* requested power state */
- struct amdgpu_ps *requested_ps;
- /* boot up power state */
- struct amdgpu_ps *boot_ps;
- /* default uvd power state */
- struct amdgpu_ps *uvd_ps;
- /* vce requirements */
- struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
- enum amdgpu_vce_level vce_level;
- enum amd_pm_state_type state;
- enum amd_pm_state_type user_state;
- u32 platform_caps;
- u32 voltage_response_time;
- u32 backbias_response_time;
- void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
- struct amdgpu_dpm_dynamic_state dyn_state;
- struct amdgpu_dpm_fan fan;
- u32 tdp_limit;
- u32 near_tdp_limit;
- u32 near_tdp_limit_adjusted;
- u32 sq_ramping_threshold;
- u32 cac_leakage;
- u16 tdp_od_limit;
- u32 tdp_adjustment;
- u16 load_line_slope;
- bool power_control;
- bool ac_power;
- /* special states active */
- bool thermal_active;
- bool uvd_active;
- bool vce_active;
- /* thermal handling */
- struct amdgpu_dpm_thermal thermal;
- /* forced levels */
- enum amdgpu_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
- struct mutex mutex;
- u32 current_sclk;
- u32 current_mclk;
- u32 default_sclk;
- u32 default_mclk;
- struct amdgpu_i2c_chan *i2c_bus;
- /* internal thermal controller on rv6xx+ */
- enum amdgpu_int_thermal_type int_thermal_type;
- struct device *int_hwmon_dev;
- /* fan control parameters */
- bool no_fan;
- u8 fan_pulses_per_revolution;
- u8 fan_min_rpm;
- u8 fan_max_rpm;
- /* dpm */
- bool dpm_enabled;
- bool sysfs_initialized;
- struct amdgpu_dpm dpm;
- const struct firmware *fw; /* SMC firmware */
- uint32_t fw_version;
- const struct amdgpu_dpm_funcs *funcs;
- uint32_t pcie_gen_mask;
- uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
-};
-
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/*
@@ -1862,6 +1215,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1939,14 +1294,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-struct amdgpu_ip_block_status {
- bool valid;
- bool sw;
- bool hw;
- bool late_initialized;
- bool hang;
-};
-
struct amdgpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -1985,6 +1332,7 @@ struct amdgpu_device {
/* BIOS */
uint8_t *bios;
+ uint32_t bios_size;
bool is_atom_bios;
struct amdgpu_bo *stollen_vga_memory;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -2102,9 +1450,8 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
- const struct amdgpu_ip_block_version *ip_blocks;
+ struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
- struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
@@ -2127,6 +1474,11 @@ struct amdgpu_device {
};
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+{
+ return container_of(bdev, struct amdgpu_device, mman.bdev);
+}
+
bool amdgpu_device_is_px(struct drm_device *dev);
int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev,
@@ -2278,15 +1630,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
-#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
-#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
-#define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
@@ -2301,108 +1650,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-
-#define amdgpu_dpm_read_sensor(adev, idx, value) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
- -EINVAL)
-
-#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev)))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l)))
-
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l)))
-
-#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_performance_level(adev) \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
- (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
-
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
/* Common functions */
@@ -2433,8 +1682,6 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
-u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
-int amdgpu_ttm_global_init(struct amdgpu_device *adev);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_program_register_sequence(struct amdgpu_device *adev,
@@ -2471,6 +1718,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void amdgpu_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
+int amdgpu_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 892d60fb225b..06879d1dcabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- const struct amdgpu_ip_block_version *ip_version =
+ const struct amdgpu_ip_block *ip_block =
amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
- if (!ip_version)
+ if (!ip_block)
return -EINVAL;
r = amd_acp_hw_init(adev->acp.cgs_device,
- ip_version->major, ip_version->minor);
+ ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV)
return 0;
@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle)
{
int i, ret;
struct device *dev;
-
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* return early if no ACP */
+ if (!adev->acp.acp_genpd)
+ return 0;
+
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
@@ -456,7 +459,7 @@ static int acp_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs acp_ip_funcs = {
+static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
.early_init = acp_early_init,
.late_init = NULL,
@@ -472,3 +475,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version acp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index 8a396313c86f..a288ce25c176 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -37,6 +37,6 @@ struct amdgpu_acp {
struct acp_pm_domain *acp_genpd;
};
-extern const struct amd_ip_funcs acp_ip_funcs;
+extern const struct amdgpu_ip_block_version acp_ip_block;
#endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf548d689..56a86dd5789e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
-{
- GET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnEngineClock);
-}
-
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
-{
- GET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnMemoryClock);
-}
-
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock)
-{
- SET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
-
- args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock)
-{
- SET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock)
{
@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev, volt_index = voltage_level;
-
- if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- /* 0xff01 is a flag rather then an actual voltage */
- if (voltage_level == 0xff01)
- return;
-
- switch (crev) {
- case 1:
- args.v1.ucVoltageType = voltage_type;
- args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
- args.v1.ucVoltageIndex = volt_index;
- break;
- case 2:
- args.v2.ucVoltageType = voltage_type;
- args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
- args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- case 3:
- args.v3.ucVoltageType = voltage_type;
- args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
- args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return;
- }
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
u16 *leakage_id)
{
@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
}
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung)
+{
+ u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
/* Atom needs data in little endian format
* so swap as appropriate when copying data to
* or from atom. Note that atom operates on
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 17356151db38..70e9acef5d9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock);
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock);
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type);
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock);
@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index dae35a96a694..6c343a933182 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -34,6 +34,7 @@ struct amdgpu_atpx {
static struct amdgpu_atpx_priv {
bool atpx_detected;
+ bool bridge_pm_usable;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
@@ -205,7 +206,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
- atpx->functions.power_cntl = false;
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
atpx->is_hybrid = true;
}
@@ -555,17 +560,25 @@ static bool amdgpu_atpx_detect(void)
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
+ bool d3_supported = false;
+ struct pci_dev *parent_pdev;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
if (has_atpx && vga_count == 2) {
@@ -573,6 +586,7 @@ static bool amdgpu_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
amdgpu_atpx_priv.atpx_detected = true;
+ amdgpu_atpx_priv.bridge_pm_usable = d3_supported;
amdgpu_atpx_init();
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 345305235349..cc97eee93226 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int i, r;
start_jiffies = jiffies;
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
false);
if (r)
goto exit_do_move;
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
goto exit_do_move;
- fence_put(fence);
+ dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 2b6afe123f3d..4f973a9c7b87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -70,10 +70,11 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
return false;
}
adev->bios = kmalloc(size, GFP_KERNEL);
- if (adev->bios == NULL) {
+ if (!adev->bios) {
iounmap(bios);
return false;
}
+ adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
return true;
@@ -103,6 +104,7 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
pci_unmap_rom(adev->pdev, bios);
return false;
}
+ adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
return true;
@@ -135,6 +137,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
DRM_ERROR("no memory to allocate for BIOS\n");
return false;
}
+ adev->bios_size = len;
/* read complete BIOS */
return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
@@ -159,6 +162,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
if (adev->bios == NULL) {
return false;
}
+ adev->bios_size = size;
return true;
}
@@ -273,6 +277,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
kfree(adev->bios);
return false;
}
+ adev->bios_size = size;
return true;
}
#else
@@ -334,6 +339,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
}
adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
+ adev->bios_size = vhdr->ImageLength;
ret = !!adev->bios;
out_unmap:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 651115dcce12..c02db01f6583 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
+ entry->tv.shared = !entry->robj->prime_shared_count;
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
gds_obj = entry->robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a8bfa34682f..9ada56c16a58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+ r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
(void *)adev,
state);
break;
@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(
(void *)adev,
state);
break;
@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
- if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
- || adev->asic_type == CHIP_POLARIS10)
- result = AMDGPU_UCODE_ID_CP_MEC2;
- else
+ /* for VI. JT2 should be the same as JT1, because:
+ 1, MEC2 and MEC1 use exactly same FW.
+ 2, JT2 is not pached but JT1 is.
+ */
+ if (adev->asic_type >= CHIP_TOPAZ)
result = AMDGPU_UCODE_ID_CP_MEC1;
+ else
+ result = AMDGPU_UCODE_ID_CP_MEC2;
break;
case CGS_UCODE_ID_RLC_G:
result = AMDGPU_UCODE_ID_RLC_G;
break;
+ case CGS_UCODE_ID_STORAGE:
+ result = AMDGPU_UCODE_ID_STORAGE;
+ break;
default:
DRM_ERROR("Firmware type not supported\n");
}
@@ -715,7 +723,7 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
enum cgs_ucode_id type)
{
CGS_FUNC_ADEV;
- uint16_t fw_version;
+ uint16_t fw_version = 0;
switch (type) {
case CGS_UCODE_ID_SDMA0:
@@ -745,9 +753,11 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
case CGS_UCODE_ID_RLC_G:
fw_version = adev->gfx.rlc_fw_version;
break;
+ case CGS_UCODE_ID_STORAGE:
+ break;
default:
DRM_ERROR("firmware type %d do not have version\n", type);
- fw_version = 0;
+ break;
}
return fw_version;
}
@@ -776,12 +786,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
(type == CGS_UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+ gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
data_size = le32_to_cpu(header->jt_size) << 2;
}
- info->mc_addr = gpu_addr;
+
+ info->kptr = ucode->kaddr;
info->image_size = data_size;
+ info->mc_addr = gpu_addr;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+
+ if (CGS_UCODE_ID_CP_MEC == type)
+ info->image_size = (header->jt_offset) << 2;
+
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
@@ -795,10 +811,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
case CHIP_TOPAZ:
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
+ strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
+ ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
+ strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
@@ -851,6 +876,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return 0;
}
+static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
+{
+ CGS_FUNC_ADEV;
+ return amdgpu_sriov_vf(adev);
+}
+
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info)
{
@@ -1204,6 +1235,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
+ amdgpu_cgs_is_virtualization_enabled
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index e3281d4e3e41..8d1cf2d3e663 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_connector->ddc_bus->has_aux) {
+ if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false;
}
@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.force = amdgpu_connector_dvi_force,
};
-static struct drm_encoder *
-amdgpu_connector_virtual_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- struct drm_encoder *encoder;
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
- if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
- return encoder;
- }
-
- /* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
- return NULL;
-}
-
-static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
-{
- struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
-
- if (encoder) {
- amdgpu_connector_add_common_modes(encoder, connector);
- }
-
- return 0;
-}
-
-static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static int
-amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
-{
- return 0;
-}
-
-static enum drm_connector_status
-
-amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static int
-amdgpu_connector_virtual_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
-static void amdgpu_connector_virtual_force(struct drm_connector *connector)
-{
- return;
-}
-
-static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
- .get_modes = amdgpu_connector_virtual_get_modes,
- .mode_valid = amdgpu_connector_virtual_mode_valid,
- .best_encoder = amdgpu_connector_virtual_encoder,
-};
-
-static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
- .dpms = amdgpu_connector_virtual_dpms,
- .detect = amdgpu_connector_virtual_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = amdgpu_connector_virtual_set_property,
- .destroy = amdgpu_connector_destroy,
- .force = amdgpu_connector_virtual_force,
-};
-
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
- case DRM_MODE_CONNECTOR_VIRTUAL:
- amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
- if (!amdgpu_dig_connector)
- goto failed;
- amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
- subpixel_order = SubPixelHorizontalRGB;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
- break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b0f6e6957536..29d6d84d1c28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t domain;
int r;
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -387,9 +388,9 @@ retry:
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo_list_entry *lobj)
+ struct amdgpu_bo *validated)
{
- uint32_t domain = lobj->robj->allowed_domains;
+ uint32_t domain = validated->allowed_domains;
int r;
if (!p->evictable)
@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t other;
/* If we reached our current BO we can forget it */
- if (candidate == lobj)
+ if (candidate->robj == validated)
break;
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r))
@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
return false;
}
+static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
+{
+ struct amdgpu_cs_parser *p = param;
+ int r;
+
+ do {
+ r = amdgpu_cs_bo_validate(p, bo);
+ } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ if (r)
+ return r;
+
+ if (bo->shadow)
+ r = amdgpu_cs_bo_validate(p, bo->shadow);
+
+ return r;
+}
+
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (p->evictable == lobj)
p->evictable = NULL;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+ r = amdgpu_cs_validate(p, bo);
if (r)
return r;
- if (bo->shadow) {
- r = amdgpu_cs_bo_validate(p, bo);
- if (r)
- return r;
- }
-
if (binding_userptr) {
drm_free_large(lobj->user_pages);
lobj->user_pages = NULL;
@@ -519,7 +530,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
if (unlikely(r != 0)) {
- DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
goto error_free_pages;
}
@@ -593,14 +605,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
-
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry,
tv.head);
+ r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
+ amdgpu_cs_validate, p);
+ if (r) {
+ DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+ goto error_validate;
+ }
+
r = amdgpu_cs_list_validate(p, &duplicates);
if (r) {
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
@@ -719,7 +736,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
- fence_put(parser->fence);
+ dma_fence_put(parser->fence);
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
@@ -756,7 +773,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
- struct fence *f;
+ struct dma_fence *f;
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
@@ -806,13 +823,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
- p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
- } else {
+ }
+
+ if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
r = amdgpu_bo_vm_update_pte(p, vm);
@@ -823,16 +841,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
return amdgpu_cs_sync_rings(p);
}
-static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
-{
- if (r == -EDEADLK) {
- r = amdgpu_gpu_reset(adev);
- if (!r)
- r = -EAGAIN;
- }
- return r;
-}
-
static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
struct amdgpu_cs_parser *parser)
{
@@ -901,7 +909,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
- r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
+ r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -916,9 +924,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r;
}
- ib->gpu_addr = chunk_ib->va_start;
}
+ ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
j++;
@@ -926,8 +934,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && (
- parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
return 0;
@@ -956,7 +964,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@@ -978,7 +986,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
} else if (fence) {
r = amdgpu_sync_fence(adev, &p->job->sync,
fence);
- fence_put(fence);
+ dma_fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
return r;
@@ -1008,7 +1016,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->fence_ctx = entity->fence_context;
- p->fence = fence_get(&job->base.s_fence->finished);
+ p->fence = dma_fence_get(&job->base.s_fence->finished);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
@@ -1036,29 +1044,29 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- amdgpu_cs_parser_fini(&parser, r, false);
- r = amdgpu_cs_handle_lockup(adev, r);
- return r;
- }
- r = amdgpu_cs_parser_bos(&parser, data);
- if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
- else if (r && r != -ERESTARTSYS)
- DRM_ERROR("Failed to process the buffer list %d!\n", r);
- else if (!r) {
- reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, &parser);
+ goto out;
}
- if (!r) {
- r = amdgpu_cs_dependencies(adev, &parser);
- if (r)
- DRM_ERROR("Failed in the dependencies handling %d!\n", r);
+ r = amdgpu_cs_parser_bos(&parser, data);
+ if (r) {
+ if (r == -ENOMEM)
+ DRM_ERROR("Not enough memory for command submission!\n");
+ else if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to process the buffer list %d!\n", r);
+ goto out;
}
+ reserved_buffers = true;
+ r = amdgpu_cs_ib_fill(adev, &parser);
if (r)
goto out;
+ r = amdgpu_cs_dependencies(adev, &parser);
+ if (r) {
+ DRM_ERROR("Failed in the dependencies handling %d!\n", r);
+ goto out;
+ }
+
for (i = 0; i < parser.job->num_ibs; i++)
trace_amdgpu_cs(&parser, i);
@@ -1070,7 +1078,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
- r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
@@ -1091,7 +1098,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
@@ -1107,8 +1114,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) {
- r = fence_wait_timeout(fence, true, timeout);
- fence_put(fence);
+ r = dma_fence_wait_timeout(fence, true, timeout);
+ dma_fence_put(fence);
} else
r = 1;
@@ -1123,6 +1130,180 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
}
/**
+ * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @user: drm_amdgpu_fence copied from user space
+ */
+static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_fence *user)
+{
+ struct amdgpu_ring *ring;
+ struct amdgpu_ctx *ctx;
+ struct dma_fence *fence;
+ int r;
+
+ r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
+ user->ring, &ring);
+ if (r)
+ return ERR_PTR(r);
+
+ ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
+ if (ctx == NULL)
+ return ERR_PTR(-EINVAL);
+
+ fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
+ amdgpu_ctx_put(ctx);
+
+ return fence;
+}
+
+/**
+ * amdgpu_cs_wait_all_fence - wait on all fences to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_wait_fences *wait,
+ struct drm_amdgpu_fence *fences)
+{
+ uint32_t fence_count = wait->in.fence_count;
+ unsigned int i;
+ long r = 1;
+
+ for (i = 0; i < fence_count; i++) {
+ struct dma_fence *fence;
+ unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+
+ fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
+
+ r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+ }
+
+ memset(wait, 0, sizeof(*wait));
+ wait->out.status = (r > 0);
+
+ return 0;
+}
+
+/**
+ * amdgpu_cs_wait_any_fence - wait on any fence to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_wait_fences *wait,
+ struct drm_amdgpu_fence *fences)
+{
+ unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+ uint32_t fence_count = wait->in.fence_count;
+ uint32_t first = ~0;
+ struct dma_fence **array;
+ unsigned int i;
+ long r;
+
+ /* Prepare the fence array */
+ array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
+
+ if (array == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < fence_count; i++) {
+ struct dma_fence *fence;
+
+ fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+ if (IS_ERR(fence)) {
+ r = PTR_ERR(fence);
+ goto err_free_fence_array;
+ } else if (fence) {
+ array[i] = fence;
+ } else { /* NULL, the fence has been already signaled */
+ r = 1;
+ goto out;
+ }
+ }
+
+ r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
+ &first);
+ if (r < 0)
+ goto err_free_fence_array;
+
+out:
+ memset(wait, 0, sizeof(*wait));
+ wait->out.status = (r > 0);
+ wait->out.first_signaled = first;
+ /* set return value 0 to indicate success */
+ r = 0;
+
+err_free_fence_array:
+ for (i = 0; i < fence_count; i++)
+ dma_fence_put(array[i]);
+ kfree(array);
+
+ return r;
+}
+
+/**
+ * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
+ *
+ * @dev: drm device
+ * @data: data from userspace
+ * @filp: file private
+ */
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ union drm_amdgpu_wait_fences *wait = data;
+ uint32_t fence_count = wait->in.fence_count;
+ struct drm_amdgpu_fence *fences_user;
+ struct drm_amdgpu_fence *fences;
+ int r;
+
+ /* Get the fences from userspace */
+ fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
+ GFP_KERNEL);
+ if (fences == NULL)
+ return -ENOMEM;
+
+ fences_user = (void __user *)(unsigned long)(wait->in.fences);
+ if (copy_from_user(fences, fences_user,
+ sizeof(struct drm_amdgpu_fence) * fence_count)) {
+ r = -EFAULT;
+ goto err_free_fences;
+ }
+
+ if (wait->in.wait_all)
+ r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
+ else
+ r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
+
+err_free_fences:
+ kfree(fences);
+
+ return r;
+}
+
+/**
* amdgpu_cs_find_bo_va - find bo_va for VM address
*
* @parser: command submission parser context
@@ -1195,6 +1376,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ continue;
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r))
+ return r;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a5e2fcbef0f0..400c66ba4c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
- sizeof(struct fence*), GFP_KERNEL);
+ sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
- break;
+ goto failed;
}
- if (i < adev->num_rings) {
- for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
- kfree(ctx->fences);
- ctx->fences = NULL;
- return r;
- }
return 0;
+
+failed:
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
+ kfree(ctx->fences);
+ ctx->fences = NULL;
+ return r;
}
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < amdgpu_sched_jobs; ++j)
- fence_put(ctx->rings[i].fences[j]);
+ dma_fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
ctx->fences = NULL;
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
unsigned idx = 0;
- struct fence *other = NULL;
+ struct dma_fence *other = NULL;
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
if (other) {
signed long r;
- r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
- fence_get(fence);
+ dma_fence_get(fence);
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
cring->sequence++;
spin_unlock(&ctx->ring_lock);
- fence_put(other);
+ dma_fence_put(other);
return seq;
}
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
- struct amdgpu_ring *ring, uint64_t seq)
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
- struct fence *fence;
+ struct dma_fence *fence;
spin_lock(&ctx->ring_lock);
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return NULL;
}
- fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
+ fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4f4a9239069..cc8aafd9cb0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vram_scratch.robj);
if (r) {
return r;
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
static void amdgpu_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
- if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
- amdgpu_bo_kunmap(adev->wb.wb_obj);
- amdgpu_bo_unpin(adev->wb.wb_obj);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- }
- amdgpu_bo_unref(&adev->wb.wb_obj);
- adev->wb.wb = NULL;
+ amdgpu_bo_free_kernel(&adev->wb.wb_obj,
+ &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
adev->wb.wb_obj = NULL;
}
}
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->wb.wb_obj);
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->wb.wb_obj, &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
if (r) {
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->wb.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
adev->wb.num_wb = AMDGPU_MAX_WB;
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
@@ -658,12 +636,10 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return false;
if (amdgpu_passthrough(adev)) {
- /* for FIJI: In whole GPU pass-through virtualization case
- * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
- * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
- * but if we force vPost do in pass-through case, the driver reload will hang.
- * whether doing vPost depends on amdgpu_card_posted if smc version is above
- * 00160e00 for FIJI.
+ /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
+ * some old smc fw still need driver do vPost otherwise gpu hang, while
+ * those smc fw version above 22.15 doesn't have this flaw, so we force
+ * vpost executed for smc version below 22.15
*/
if (adev->asic_type == CHIP_FIJI) {
int err;
@@ -674,22 +650,11 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
- if (fw_ver >= 0x00160e00)
- return !amdgpu_card_posted(adev);
+ if (fw_ver < 0x00160e00)
+ return true;
}
- } else {
- /* in bare-metal case, amdgpu_card_posted return false
- * after system reboot/boot, and return true if driver
- * reloaded.
- * we shouldn't do vPost after driver reload otherwise GPU
- * could hang.
- */
- if (amdgpu_card_posted(adev))
- return false;
}
-
- /* we assume vPost is neede for all other cases */
- return true;
+ return !amdgpu_card_posted(adev);
}
/**
@@ -1051,6 +1016,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_block_size);
amdgpu_vm_block_size = 9;
}
+
+ if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
+ !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
+ dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
+ amdgpu_vram_page_split);
+ amdgpu_vram_page_split = 1024;
+ }
}
/**
@@ -1125,11 +1097,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1145,11 +1117,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1164,10 +1136,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
if (r)
return r;
break;
@@ -1183,23 +1155,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type)
- return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
}
return true;
}
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type)
+ if (adev->ip_blocks[i].version->type == type)
return &adev->ip_blocks[i];
return NULL;
@@ -1220,38 +1191,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor)
{
- const struct amdgpu_ip_block_version *ip_block;
- ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
- if (ip_block && ((ip_block->major > major) ||
- ((ip_block->major == major) &&
- (ip_block->minor >= minor))))
+ if (ip_block && ((ip_block->version->major > major) ||
+ ((ip_block->version->major == major) &&
+ (ip_block->version->minor >= minor))))
return 0;
return 1;
}
-static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+/**
+ * amdgpu_ip_block_add
+ *
+ * @adev: amdgpu_device pointer
+ * @ip_block_version: pointer to the IP to add
+ *
+ * Adds the IP block driver information to the collection of IPs
+ * on the asic.
+ */
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
+{
+ if (!ip_block_version)
+ return -EINVAL;
+
+ adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
+
+ return 0;
+}
+
+static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
struct drm_device *ddev = adev->ddev;
const char *pci_address_name = pci_name(ddev->pdev);
- char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+ char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
pciaddstr_tmp = pciaddstr;
- while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+ while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
+ pciaddname = strsep(&pciaddname_tmp, ",");
if (!strcmp(pci_address_name, pciaddname)) {
+ long num_crtc;
+ int res = -1;
+
adev->enable_virtual_display = true;
+
+ if (pciaddname_tmp)
+ res = kstrtol(pciaddname_tmp, 10,
+ &num_crtc);
+
+ if (!res) {
+ if (num_crtc < 1)
+ num_crtc = 1;
+ if (num_crtc > 6)
+ num_crtc = 6;
+ adev->mode_info.num_crtc = num_crtc;
+ } else {
+ adev->mode_info.num_crtc = 1;
+ }
break;
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display);
+ DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -1261,7 +1269,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
{
int i, r;
- amdgpu_whether_enable_virtual_display(adev);
+ amdgpu_device_enable_virtual_display(adev);
switch (adev->asic_type) {
case CHIP_TOPAZ:
@@ -1313,33 +1321,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_status = kcalloc(adev->num_ip_blocks,
- sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
- if (adev->ip_block_status == NULL)
- return -ENOMEM;
-
- if (adev->ip_blocks == NULL) {
- DRM_ERROR("No IP blocks found!\n");
- return r;
- }
-
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else {
- if (adev->ip_blocks[i].funcs->early_init) {
- r = adev->ip_blocks[i].funcs->early_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->early_init) {
+ r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
if (r == -ENOENT) {
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
}
}
@@ -1355,22 +1354,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].sw = true;
+ adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
@@ -1380,22 +1380,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
/* gmc hw init is done early */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
continue;
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
return 0;
@@ -1406,25 +1407,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->late_init) {
- r = adev->ip_blocks[i].funcs->late_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].late_initialized = true;
+ adev->ip_blocks[i].status.late_initialized = true;
}
/* skip CG for VCE/UVD, it's handled specially */
- if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1439,74 +1441,83 @@ static int amdgpu_fini(struct amdgpu_device *adev)
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+ adev->ip_blocks[i].status.hw = false;
break;
}
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
amdgpu_vram_scratch_fini(adev);
}
- /* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
- return r;
+
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
+ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+
+ adev->ip_blocks[i].status.hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
- r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].sw = false;
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.sw = false;
+ adev->ip_blocks[i].status.valid = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].late_initialized)
+ if (!adev->ip_blocks[i].status.late_initialized)
continue;
- if (adev->ip_blocks[i].funcs->late_fini)
- adev->ip_blocks[i].funcs->late_fini((void *)adev);
- adev->ip_block_status[i].late_initialized = false;
+ if (adev->ip_blocks[i].version->funcs->late_fini)
+ adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+ adev->ip_blocks[i].status.late_initialized = false;
}
return 0;
}
-static int amdgpu_suspend(struct amdgpu_device *adev)
+int amdgpu_suspend(struct amdgpu_device *adev)
{
int i, r;
@@ -1518,21 +1529,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
if (i != AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
/* XXX handle errors */
- r = adev->ip_blocks[i].funcs->suspend(adev);
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
@@ -1544,11 +1557,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->resume(adev);
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1599,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
- adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -1859,8 +1873,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
- kfree(adev->ip_block_status);
- adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);
@@ -1956,9 +1968,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
r = amdgpu_suspend(adev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
amdgpu_bo_evict_vram(adev);
+ amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2010,6 +2026,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
return r;
}
}
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
if (!amdgpu_card_posted(adev) || !resume) {
@@ -2096,13 +2113,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
bool asic_hang = false;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->check_soft_reset)
- adev->ip_block_status[i].hang =
- adev->ip_blocks[i].funcs->check_soft_reset(adev);
- if (adev->ip_block_status[i].hang) {
- DRM_INFO("IP block:%d is hang!\n", i);
+ if (adev->ip_blocks[i].version->funcs->check_soft_reset)
+ adev->ip_blocks[i].status.hang =
+ adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang) {
+ DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
}
}
@@ -2114,11 +2131,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->pre_soft_reset) {
- r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->pre_soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
if (r)
return r;
}
@@ -2132,13 +2149,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
- if (adev->ip_block_status[i].hang) {
+ if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
}
@@ -2152,11 +2169,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->soft_reset) {
- r = adev->ip_blocks[i].funcs->soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
if (r)
return r;
}
@@ -2170,11 +2187,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->post_soft_reset)
- r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->post_soft_reset)
+ r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
if (r)
return r;
}
@@ -2193,7 +2210,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
- struct fence **fence)
+ struct dma_fence **fence)
{
uint32_t domain;
int r;
@@ -2268,8 +2285,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
}
if (need_full_reset) {
- /* save scratch */
- amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_suspend(adev);
retry:
@@ -2279,8 +2294,9 @@ retry:
amdgpu_display_stop_mc_access(adev, &save);
amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
}
-
+ amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_asic_reset(adev);
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -2288,8 +2304,6 @@ retry:
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_resume(adev);
}
- /* restore scratch */
- amdgpu_atombios_scratch_regs_restore(adev);
}
if (!r) {
amdgpu_irq_gpu_reset_resume_helper(adev);
@@ -2312,30 +2326,30 @@ retry:
if (need_full_reset && amdgpu_need_backup(adev)) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *bo, *tmp;
- struct fence *fence = NULL, *next = NULL;
+ struct dma_fence *fence = NULL, *next = NULL;
DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
WARN(r, "recovery from shadow isn't comleted\n");
break;
}
}
- fence_put(fence);
+ dma_fence_put(fence);
fence = next;
}
mutex_unlock(&adev->shadow_list_lock);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
WARN(r, "recovery from shadow isn't comleted\n");
}
- fence_put(fence);
+ dma_fence_put(fence);
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -2485,9 +2499,6 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
adev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
- adev->ddev->control->debugfs_root,
- adev->ddev->control);
- drm_debugfs_create_files(files, nfiles,
adev->ddev->primary->debugfs_root,
adev->ddev->primary);
#endif
@@ -2502,9 +2513,6 @@ static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
for (i = 0; i < adev->debugfs_count; i++) {
drm_debugfs_remove_files(adev->debugfs[i].files,
adev->debugfs[i].num_files,
- adev->ddev->control);
- drm_debugfs_remove_files(adev->debugfs[i].files,
- adev->debugfs[i].num_files,
adev->ddev->primary);
}
#endif
@@ -2531,6 +2539,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else {
use_bank = 0;
@@ -2539,8 +2554,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
*pos &= 0x3FFFF;
if (use_bank) {
- if (sh_bank >= adev->gfx.config.max_sh_per_se ||
- se_bank >= adev->gfx.config.max_shader_engines)
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
return -EINVAL;
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
@@ -2587,10 +2602,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos >> 24) & 0x3FF;
+ sh_bank = (*pos >> 34) & 0x3FF;
+ instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= 0x3FFFF;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
while (size) {
uint32_t value;
@@ -2609,6 +2659,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size -= 4;
}
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
return result;
}
@@ -2871,6 +2929,116 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
return !r ? 4 : r;
}
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result=0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & 0x7F);
+ se = ((*pos >> 7) & 0xFF);
+ sh = ((*pos >> 15) & 0xFF);
+ cu = ((*pos >> 23) & 0xFF);
+ wave = ((*pos >> 31) & 0xFF);
+ simd = ((*pos >> 37) & 0xFF);
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (!x)
+ return -EINVAL;
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r;
+ ssize_t result = 0;
+ uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & 0xFFF); /* in dwords */
+ se = ((*pos >> 12) & 0xFF);
+ sh = ((*pos >> 20) & 0xFF);
+ cu = ((*pos >> 28) & 0xFF);
+ wave = ((*pos >> 36) & 0xFF);
+ simd = ((*pos >> 44) & 0xFF);
+ thread = ((*pos >> 52) & 0xFF);
+ bank = ((*pos >> 60) & 1);
+
+ data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ if (bank == 0) {
+ if (adev->gfx.funcs->read_wave_vgprs)
+ adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
+ } else {
+ if (adev->gfx.funcs->read_wave_sgprs)
+ adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ while (size) {
+ uint32_t value;
+
+ value = data[offset++];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto err;
+ }
+
+ result += 4;
+ buf += 4;
+ size -= 4;
+ }
+
+err:
+ kfree(data);
+ return result;
+}
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -2908,6 +3076,17 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_gpr_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gpr_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@@ -2915,6 +3094,8 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
&amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
+ &amdgpu_debugfs_gpr_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2924,6 +3105,8 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_smc",
"amdgpu_gca_config",
"amdgpu_sensors",
+ "amdgpu_wave",
+ "amdgpu_gpr",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 083e2b429872..581601ca6b89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,29 +35,29 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
+static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
- fence_put(f);
+ dma_fence_put(f);
schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
- struct fence **f)
+ struct dma_fence **f)
{
- struct fence *fence= *f;
+ struct dma_fence *fence= *f;
if (fence == NULL)
return false;
*f = NULL;
- if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+ if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct amdgpu_flip_work *work =
container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
- struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
- struct drm_crtc *crtc = &amdgpuCrtc->base;
+ struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags;
unsigned i;
int vpos, hpos;
@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
- if (amdgpuCrtc->enabled &&
+ if (amdgpu_crtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */
- amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
- amdgpuCrtc->crtc_id, amdgpuCrtc, work);
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
}
@@ -187,7 +187,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
+ r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
r = -EINVAL;
DRM_ERROR("failed to pin new abo buffer before flip\n");
@@ -244,9 +244,9 @@ unreserve:
cleanup:
amdgpu_bo_unref(&work->old_abo);
- fence_put(work->excl);
+ dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
- fence_put(work->shared[i]);
+ dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 14f57d9915e3..6ca0333ca4c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
}
- for (i = 0; i < states->numEntries; i++) {
- if (i >= AMDGPU_MAX_VCE_LEVELS)
- break;
+ adev->pm.dpm.num_of_vce_states =
+ states->numEntries > AMD_MAX_VCE_LEVELS ?
+ AMD_MAX_VCE_LEVELS : states->numEntries;
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] +
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
return encoded_lanes[lanes];
}
+
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+{
+ if (idx < adev->pm.dpm.num_of_vce_states)
+ return &adev->pm.dpm.vce_states[idx];
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 3738a96c2619..955d6f21e2b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -23,6 +23,453 @@
#ifndef __AMDGPU_DPM_H__
#define __AMDGPU_DPM_H__
+enum amdgpu_int_thermal_type {
+ THERMAL_TYPE_NONE,
+ THERMAL_TYPE_EXTERNAL,
+ THERMAL_TYPE_EXTERNAL_GPIO,
+ THERMAL_TYPE_RV6XX,
+ THERMAL_TYPE_RV770,
+ THERMAL_TYPE_ADT7473_WITH_INTERNAL,
+ THERMAL_TYPE_EVERGREEN,
+ THERMAL_TYPE_SUMO,
+ THERMAL_TYPE_NI,
+ THERMAL_TYPE_SI,
+ THERMAL_TYPE_EMC2103_WITH_INTERNAL,
+ THERMAL_TYPE_CI,
+ THERMAL_TYPE_KV,
+};
+
+enum amdgpu_dpm_auto_throttle_src {
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum amdgpu_dpm_event_src {
+ AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
+ AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
+ AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
+ AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
+#define SCLK_DEEP_SLEEP_MASK 0x8
+
+struct amdgpu_ps {
+ u32 caps; /* vbios flags */
+ u32 class; /* vbios flags */
+ u32 class2; /* vbios flags */
+ /* UVD clocks */
+ u32 vclk;
+ u32 dclk;
+ /* VCE clocks */
+ u32 evclk;
+ u32 ecclk;
+ bool vce_active;
+ enum amd_vce_level vce_level;
+ /* asic priv */
+ void *ps_priv;
+};
+
+struct amdgpu_dpm_thermal {
+ /* thermal interrupt work */
+ struct work_struct work;
+ /* low temperature threshold */
+ int min_temp;
+ /* high temperature threshold */
+ int max_temp;
+ /* was last interrupt low to high or high to low */
+ bool high_to_low;
+ /* interrupt source */
+ struct amdgpu_irq_src irq;
+};
+
+enum amdgpu_clk_action
+{
+ AMDGPU_SCLK_UP = 1,
+ AMDGPU_SCLK_DOWN
+};
+
+struct amdgpu_blacklist_clocks
+{
+ u32 sclk;
+ u32 mclk;
+ enum amdgpu_clk_action action;
+};
+
+struct amdgpu_clock_and_voltage_limits {
+ u32 sclk;
+ u32 mclk;
+ u16 vddc;
+ u16 vddci;
+};
+
+struct amdgpu_clock_array {
+ u32 count;
+ u32 *values;
+};
+
+struct amdgpu_clock_voltage_dependency_entry {
+ u32 clk;
+ u16 v;
+};
+
+struct amdgpu_clock_voltage_dependency_table {
+ u32 count;
+ struct amdgpu_clock_voltage_dependency_entry *entries;
+};
+
+union amdgpu_cac_leakage_entry {
+ struct {
+ u16 vddc;
+ u32 leakage;
+ };
+ struct {
+ u16 vddc1;
+ u16 vddc2;
+ u16 vddc3;
+ };
+};
+
+struct amdgpu_cac_leakage_table {
+ u32 count;
+ union amdgpu_cac_leakage_entry *entries;
+};
+
+struct amdgpu_phase_shedding_limits_entry {
+ u16 voltage;
+ u32 sclk;
+ u32 mclk;
+};
+
+struct amdgpu_phase_shedding_limits_table {
+ u32 count;
+ struct amdgpu_phase_shedding_limits_entry *entries;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_entry {
+ u32 vclk;
+ u32 dclk;
+ u16 v;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_entry {
+ u32 ecclk;
+ u32 evclk;
+ u16 v;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_vce_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_ppm_table {
+ u8 ppm_design;
+ u16 cpu_core_number;
+ u32 platform_tdp;
+ u32 small_ac_platform_tdp;
+ u32 platform_tdc;
+ u32 small_ac_platform_tdc;
+ u32 apu_tdp;
+ u32 dgpu_tdp;
+ u32 dgpu_ulv_power;
+ u32 tj_max;
+};
+
+struct amdgpu_cac_tdp_table {
+ u16 tdp;
+ u16 configurable_tdp;
+ u16 tdc;
+ u16 battery_power_limit;
+ u16 small_power_limit;
+ u16 low_cac_leakage;
+ u16 high_cac_leakage;
+ u16 maximum_power_delivery_limit;
+};
+
+struct amdgpu_dpm_dynamic_state {
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
+ struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+ struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
+ struct amdgpu_clock_array valid_sclk_values;
+ struct amdgpu_clock_array valid_mclk_values;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
+ u32 mclk_sclk_ratio;
+ u32 sclk_mclk_delta;
+ u16 vddc_vddci_delta;
+ u16 min_vddc_for_pcie_gen2;
+ struct amdgpu_cac_leakage_table cac_leakage_table;
+ struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
+ struct amdgpu_ppm_table *ppm_table;
+ struct amdgpu_cac_tdp_table *cac_tdp_table;
+};
+
+struct amdgpu_dpm_fan {
+ u16 t_min;
+ u16 t_med;
+ u16 t_high;
+ u16 pwm_min;
+ u16 pwm_med;
+ u16 pwm_high;
+ u8 t_hyst;
+ u32 cycle_delay;
+ u16 t_max;
+ u8 control_mode;
+ u16 default_max_fan_pwm;
+ u16 default_fan_output_sensitivity;
+ u16 fan_output_sensitivity;
+ bool ucode_fan_control;
+};
+
+enum amdgpu_pcie_gen {
+ AMDGPU_PCIE_GEN1 = 0,
+ AMDGPU_PCIE_GEN2 = 1,
+ AMDGPU_PCIE_GEN3 = 2,
+ AMDGPU_PCIE_GEN_INVALID = 0xffff
+};
+
+enum amdgpu_dpm_forced_level {
+ AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
+ AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
+ AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+ AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
+};
+
+struct amdgpu_dpm_funcs {
+ int (*get_temperature)(struct amdgpu_device *adev);
+ int (*pre_set_power_state)(struct amdgpu_device *adev);
+ int (*set_power_state)(struct amdgpu_device *adev);
+ void (*post_set_power_state)(struct amdgpu_device *adev);
+ void (*display_configuration_changed)(struct amdgpu_device *adev);
+ u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
+ u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
+ void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
+ void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
+ int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
+ bool (*vblank_too_short)(struct amdgpu_device *adev);
+ void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
+ void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
+ void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
+ void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
+ u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
+ int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
+ int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+ int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(struct amdgpu_device *adev);
+ int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*get_mclk_od)(struct amdgpu_device *adev);
+ int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*check_state_equal)(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal);
+
+ struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
+};
+
+#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
+#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
+#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
+#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
+#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
+#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
+#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+ -EINVAL)
+
+#define amdgpu_dpm_get_temperature(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_temperature((adev)))
+
+#define amdgpu_dpm_set_fan_control_mode(adev, m) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
+ (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+
+#define amdgpu_dpm_get_fan_control_mode(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_fan_control_mode((adev)))
+
+#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
+ -EINVAL)
+
+#define amdgpu_dpm_get_sclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_sclk((adev), (l)))
+
+#define amdgpu_dpm_get_mclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_mclk((adev), (l)))
+
+
+#define amdgpu_dpm_force_performance_level(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->force_performance_level((adev), (l)))
+
+#define amdgpu_dpm_powergate_uvd(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_uvd((adev), (g)))
+
+#define amdgpu_dpm_powergate_vce(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_vce((adev), (g)))
+
+#define amdgpu_dpm_get_current_power_state(adev) \
+ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_performance_level(adev) \
+ (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+ (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+ (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+ (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+ (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
+#define amdgpu_dpm_get_sclk_od(adev) \
+ (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+ (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
+#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
+ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+
+#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+
+#define amdgpu_dpm_get_vce_clock_state(adev, i) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
+ (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+
+struct amdgpu_dpm {
+ struct amdgpu_ps *ps;
+ /* number of valid power states */
+ int num_ps;
+ /* current power state that is active */
+ struct amdgpu_ps *current_ps;
+ /* requested power state */
+ struct amdgpu_ps *requested_ps;
+ /* boot up power state */
+ struct amdgpu_ps *boot_ps;
+ /* default uvd power state */
+ struct amdgpu_ps *uvd_ps;
+ /* vce requirements */
+ u32 num_of_vce_states;
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
+ enum amd_vce_level vce_level;
+ enum amd_pm_state_type state;
+ enum amd_pm_state_type user_state;
+ enum amd_pm_state_type last_state;
+ enum amd_pm_state_type last_user_state;
+ u32 platform_caps;
+ u32 voltage_response_time;
+ u32 backbias_response_time;
+ void *priv;
+ u32 new_active_crtcs;
+ int new_active_crtc_count;
+ u32 current_active_crtcs;
+ int current_active_crtc_count;
+ struct amdgpu_dpm_dynamic_state dyn_state;
+ struct amdgpu_dpm_fan fan;
+ u32 tdp_limit;
+ u32 near_tdp_limit;
+ u32 near_tdp_limit_adjusted;
+ u32 sq_ramping_threshold;
+ u32 cac_leakage;
+ u16 tdp_od_limit;
+ u32 tdp_adjustment;
+ u16 load_line_slope;
+ bool power_control;
+ bool ac_power;
+ /* special states active */
+ bool thermal_active;
+ bool uvd_active;
+ bool vce_active;
+ /* thermal handling */
+ struct amdgpu_dpm_thermal thermal;
+ /* forced levels */
+ enum amdgpu_dpm_forced_level forced_level;
+};
+
+struct amdgpu_pm {
+ struct mutex mutex;
+ u32 current_sclk;
+ u32 current_mclk;
+ u32 default_sclk;
+ u32 default_mclk;
+ struct amdgpu_i2c_chan *i2c_bus;
+ /* internal thermal controller on rv6xx+ */
+ enum amdgpu_int_thermal_type int_thermal_type;
+ struct device *int_hwmon_dev;
+ /* fan control parameters */
+ bool no_fan;
+ u8 fan_pulses_per_revolution;
+ u8 fan_min_rpm;
+ u8 fan_max_rpm;
+ /* dpm */
+ bool dpm_enabled;
+ bool sysfs_initialized;
+ struct amdgpu_dpm dpm;
+ const struct firmware *fw; /* SMC firmware */
+ uint32_t fw_version;
+ const struct amdgpu_dpm_funcs *funcs;
+ uint32_t pcie_gen_mask;
+ uint32_t pcie_mlw_mask;
+ struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+};
+
#define R600_SSTU_DFLT 0
#define R600_SST_DFLT 0x00C8
@@ -82,4 +529,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u16 default_lanes);
u8 amdgpu_encode_pci_lane_width(u32 lanes);
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 71ed27eb3dde..8cb937b2bfcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -58,9 +58,10 @@
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
* - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel
+ * - 3.9.0 - Add support for memory query info about VRAM and GTT.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 8
+#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -85,12 +86,13 @@ int amdgpu_vm_size = 64;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
+int amdgpu_vram_page_split = 1024;
int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
-int amdgpu_powercontainment = 1;
-int amdgpu_sclk_deep_sleep_en = 1;
+int amdgpu_no_evict = 0;
+int amdgpu_direct_gma_size = 0;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff;
@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
@@ -177,14 +182,14 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
-MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
-module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
-
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
-MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
-module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
+MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
+module_param_named(no_evict, amdgpu_no_evict, int, 0444);
+
+MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
+module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
-MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+MODULE_PARM_DESC(virtual_display,
+ "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
static const struct pci_device_id pciidlist[] = {
@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
/* fiji */
{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
+ {0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
/* carrizo */
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
@@ -479,12 +486,15 @@ amdgpu_pci_remove(struct pci_dev *pdev)
static void
amdgpu_pci_shutdown(struct pci_dev *pdev)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = dev->dev_private;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
*/
- amdgpu_pci_remove(pdev);
+ amdgpu_suspend(adev);
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -735,8 +745,20 @@ static struct pci_driver amdgpu_kms_pci_driver = {
static int __init amdgpu_init(void)
{
- amdgpu_sync_init();
- amdgpu_fence_slab_init();
+ int r;
+
+ r = amdgpu_sync_init();
+ if (r)
+ goto error_sync;
+
+ r = amdgpu_fence_slab_init();
+ if (r)
+ goto error_fence;
+
+ r = amd_sched_fence_slab_init();
+ if (r)
+ goto error_sched;
+
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -748,6 +770,15 @@ static int __init amdgpu_init(void)
amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
+
+error_sched:
+ amdgpu_fence_slab_fini();
+
+error_fence:
+ amdgpu_sync_fini();
+
+error_sync:
+ return r;
}
static void __exit amdgpu_exit(void)
@@ -756,6 +787,7 @@ static void __exit amdgpu_exit(void)
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
+ amd_sched_fence_slab_fini();
amdgpu_fence_slab_fini();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9fb8aa4d6bae..24629bec181a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -75,27 +75,21 @@ amdgpufb_release(struct fb_info *info, int user)
static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = amdgpufb_open,
.fb_release = amdgpufb_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
+int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
{
int aligned = width;
int pitch_mask = 0;
- switch (bpp / 8) {
+ switch (cpp) {
case 1:
pitch_mask = 255;
break;
@@ -110,7 +104,7 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
- return aligned;
+ return aligned * cpp;
}
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,20 +133,21 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- u32 bpp, depth;
+ u32 cpp;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
- fb_tiled) * ((bpp + 1) / 8);
+ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
+ fb_tiled);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
@@ -176,7 +171,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
}
- ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
+ ret = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
if (ret) {
amdgpu_bo_unreserve(abo);
goto out_unref;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3a2e42f4b897..7b60fb79c3a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -48,7 +48,7 @@
*/
struct amdgpu_fence {
- struct fence base;
+ struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
@@ -68,13 +68,14 @@ int amdgpu_fence_slab_init(void)
void amdgpu_fence_slab_fini(void)
{
+ rcu_barrier();
kmem_cache_destroy(amdgpu_fence_slab);
}
/*
* Cast helper
*/
-static const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+static const struct dma_fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
@@ -130,11 +131,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
- struct fence *old, **ptr;
+ struct dma_fence *old, **ptr;
uint32_t seq;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -143,10 +144,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
seq = ++ring->fence_drv.sync_seq;
fence->ring = ring;
- fence_init(&fence->base, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
+ dma_fence_init(&fence->base, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx,
+ seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, AMDGPU_FENCE_FLAG_INT);
@@ -155,12 +156,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
* emitting the fence would mess up the hardware ring buffer.
*/
old = rcu_dereference_protected(*ptr, 1);
- if (old && !fence_is_signaled(old)) {
+ if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
- fence_wait(old, false);
+ dma_fence_wait(old, false);
}
- rcu_assign_pointer(*ptr, fence_get(&fence->base));
+ rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
@@ -211,7 +212,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
seq &= drv->num_fences_mask;
do {
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -224,13 +225,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- r = fence_signal(fence);
+ r = dma_fence_signal(fence);
if (!r)
- FENCE_TRACE(fence, "signaled from irq context\n");
+ DMA_FENCE_TRACE(fence, "signaled from irq context\n");
else
BUG();
- fence_put(fence);
+ dma_fence_put(fence);
} while (last_seq != seq);
}
@@ -260,7 +261,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
int r;
if (!seq)
@@ -269,14 +270,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
rcu_read_lock();
fence = rcu_dereference(*ptr);
- if (!fence || !fence_get_rcu(fence)) {
+ if (!fence || !dma_fence_get_rcu(fence)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
- r = fence_wait(fence, false);
- fence_put(fence);
+ r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -381,24 +382,27 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
if (!ring->fence_drv.fences)
return -ENOMEM;
- timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
- if (timeout == 0) {
- /*
- * FIXME:
- * Delayed workqueue cannot use it directly,
- * so the scheduler will not use delayed workqueue if
- * MAX_SCHEDULE_TIMEOUT is set.
- * Currently keep it simple and silly.
- */
- timeout = MAX_SCHEDULE_TIMEOUT;
- }
- r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
- num_hw_submission,
- timeout, ring->name);
- if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
- return r;
+ /* No need to setup the GPU scheduler for KIQ ring */
+ if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
+ timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+ if (timeout == 0) {
+ /*
+ * FIXME:
+ * Delayed workqueue cannot use it directly,
+ * so the scheduler will not use delayed workqueue if
+ * MAX_SCHEDULE_TIMEOUT is set.
+ * Currently keep it simple and silly.
+ */
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+ r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+ num_hw_submission,
+ timeout, ring->name);
+ if (r) {
+ DRM_ERROR("Failed to create scheduler on ring %s.\n",
+ ring->name);
+ return r;
+ }
}
return 0;
@@ -452,7 +456,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
- fence_put(ring->fence_drv.fences[j]);
+ dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
@@ -541,12 +545,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
* Common fence implementation
*/
-static const char *amdgpu_fence_get_driver_name(struct fence *fence)
+static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
{
return "amdgpu";
}
-static const char *amdgpu_fence_get_timeline_name(struct fence *f)
+static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
return (const char *)fence->ring->name;
@@ -560,7 +564,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool amdgpu_fence_enable_signaling(struct fence *f)
+static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
@@ -568,7 +572,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
@@ -582,7 +586,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
*/
static void amdgpu_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amdgpu_fence *fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
@@ -595,16 +599,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amdgpu_fence_release(struct fence *f)
+static void amdgpu_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_fence_free);
}
-static const struct fence_ops amdgpu_fence_ops = {
+static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 21a1242fc13b..964d2a946ed5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->gart.robj);
if (r) {
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a7ea9a3b454e..cd62f6ffde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+int amdgpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = abo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
@@ -407,10 +408,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- if (timeout == 0)
- ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
- else
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
+ ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
+ timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
@@ -470,6 +469,16 @@ out:
return r;
}
+static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
+{
+ unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* if anything is swapped out don't swap it in here,
+ just abort and wait for the next CS */
+
+ return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
+}
+
/**
* amdgpu_gem_va_update_vm -update the bo_va in its VM
*
@@ -480,7 +489,8 @@ out:
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va, uint32_t operation)
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry vm_pd;
@@ -503,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_print;
- amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
@@ -511,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
- list_for_each_entry(entry, &duplicates, head) {
- domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (domain == AMDGPU_GEM_DOMAIN_CPU)
- goto error_unreserve;
- }
+ r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
+ NULL);
+ if (r)
+ goto error_unreserve;
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r)
@@ -538,8 +544,6 @@ error_print:
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
-
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -549,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
- struct ttm_validate_buffer tv, tv_pd;
+ struct amdgpu_bo_list_entry vm_pd;
+ struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0;
@@ -594,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true;
list_add(&tv.head, &list);
- tv_pd.bo = &fpriv->vm.page_directory->tbo;
- tv_pd.shared = true;
- list_add(&tv_pd.head, &list);
+ amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
@@ -704,7 +707,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
- args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->pitch = amdgpu_align_pitch(adev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a074edd95c70..01a42b6a69a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
#include "amdgpu.h"
+#include "amdgpu_gfx.h"
/*
* GPU scratch registers helpers function.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 51321e154c09..e02044086445 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,6 +27,7 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
-unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
+ unsigned max_sh);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f86c84427778..00f46b0e076d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -164,10 +164,13 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
spin_unlock(&mgr->lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
+ if (!node) {
+ r = -ENOMEM;
+ goto err_out;
+ }
node->start = AMDGPU_BO_INVALID_OFFSET;
+ node->size = mem->num_pages;
mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
@@ -175,12 +178,20 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r)) {
kfree(node);
mem->mm_node = NULL;
+ r = 0;
+ goto err_out;
}
} else {
mem->start = node->start;
}
return 0;
+err_out:
+ spin_lock(&mgr->lock);
+ mgr->available += mem->num_pages;
+ spin_unlock(&mgr->lock);
+
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6a6c86c9c169..216a9572d946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Free an IB (all asics).
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f)
+ struct dma_fence *f)
{
amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
* to SI there was just a DE IB.
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ibs, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f)
+ struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
- num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+ alloc_size = ring->funcs->emit_frame_size + num_ibs *
+ ring->funcs->emit_ib_size;
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
- if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
+ if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 278708f5a744..fb902932f571 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -239,6 +239,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r) {
adev->irq.installed = false;
flush_work(&adev->hotplug_work);
+ cancel_work_sync(&adev->reset_work);
return r;
}
@@ -264,6 +265,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
if (adev->irq.msi_enabled)
pci_disable_msi(adev->pdev);
flush_work(&adev->hotplug_work);
+ cancel_work_sync(&adev->reset_work);
}
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
@@ -422,15 +424,6 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
return 0;
}
-bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type)
-{
- if ((type >= src->num_types) || !src->enabled_types)
- return false;
- return atomic_inc_return(&src->enabled_types[type]) == 1;
-}
-
/**
* amdgpu_irq_put - disable interrupt
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index f016464035b8..1642f4108297 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -88,9 +88,6 @@ int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
-bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type);
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 8c5807994073..a0de6286c453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- struct fence *f;
+ struct dma_fence *f;
unsigned i;
/* use sched fence if available */
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f)
+ struct dma_fence **f)
{
int r;
job->ring = ring;
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->owner = owner;
job->fence_ctx = entity->fence_context;
- *f = fence_get(&job->base.s_fence->finished);
+ *f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
}
-static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
- struct fence *fence = amdgpu_sync_get_fence(&job->sync);
+ struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
return fence;
}
-static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
struct amdgpu_job *job;
int r;
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
/* if gpu reset, hw fence will be replaced here */
- fence_put(job->fence);
- job->fence = fence_get(fence);
+ dma_fence_put(job->fence);
+ job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c2c7fb140338..9af87eaf8ee3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((amdgpu_runtime_pm != 0) &&
amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0))
flags |= AMD_IS_PX;
@@ -306,10 +308,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid) {
- ip.hw_ip_version_major = adev->ip_blocks[i].major;
- ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid) {
+ ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
+ ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
ip.capabilities_flags = 0;
ip.available_rings = ring_mask;
ip.ib_start_alignment = ib_start_alignment;
@@ -345,8 +347,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid &&
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@@ -411,6 +413,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_MEMORY: {
+ struct drm_amdgpu_memory_info mem;
+
+ memset(&mem, 0, sizeof(mem));
+ mem.vram.total_heap_size = adev->mc.real_vram_size;
+ mem.vram.usable_heap_size =
+ adev->mc.real_vram_size - adev->vram_pin_size;
+ mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+ mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
+
+ mem.cpu_accessible_vram.total_heap_size =
+ adev->mc.visible_vram_size;
+ mem.cpu_accessible_vram.usable_heap_size =
+ adev->mc.visible_vram_size -
+ (adev->vram_pin_size - adev->invisible_pin_size);
+ mem.cpu_accessible_vram.heap_usage =
+ atomic64_read(&adev->vram_vis_usage);
+ mem.cpu_accessible_vram.max_allocation =
+ mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
+
+ mem.gtt.total_heap_size = adev->mc.gtt_size;
+ mem.gtt.usable_heap_size =
+ adev->mc.gtt_size - adev->gart_pin_size;
+ mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+ mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
+
+ return copy_to_user(out, &mem,
+ min((size_t)size, sizeof(mem)))
+ ? -EFAULT : 0;
+ }
case AMDGPU_INFO_READ_MMR_REG: {
unsigned n, alloc_size;
uint32_t *regs;
@@ -459,10 +491,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
/* return all clocks in KHz */
dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
if (adev->pm.dpm_enabled) {
- dev_info.max_engine_clock =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
- dev_info.max_memory_clock =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10;
+ dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->pm.default_sclk * 10;
dev_info.max_memory_clock = adev->pm.default_mclk * 10;
@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
+ if (amdgpu_sriov_vf(adev))
+ dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
@@ -494,6 +526,50 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_VCE_CLOCK_TABLE: {
+ unsigned i;
+ struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
+ struct amd_vce_state *vce_state;
+
+ for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
+ vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
+ if (vce_state) {
+ vce_clk_table.entries[i].sclk = vce_state->sclk;
+ vce_clk_table.entries[i].mclk = vce_state->mclk;
+ vce_clk_table.entries[i].eclk = vce_state->evclk;
+ vce_clk_table.num_valid_entries++;
+ }
+ }
+
+ return copy_to_user(out, &vce_clk_table,
+ min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
+ }
+ case AMDGPU_INFO_VBIOS: {
+ uint32_t bios_size = adev->bios_size;
+
+ switch (info->vbios_info.type) {
+ case AMDGPU_INFO_VBIOS_SIZE:
+ return copy_to_user(out, &bios_size,
+ min((size_t)size, sizeof(bios_size)))
+ ? -EFAULT : 0;
+ case AMDGPU_INFO_VBIOS_IMAGE: {
+ uint8_t *bios;
+ uint32_t bios_offset = info->vbios_info.offset;
+
+ if (bios_offset >= bios_size)
+ return -EINVAL;
+
+ bios = adev->bios + bios_offset;
+ return copy_to_user(out, bios,
+ min((size_t)size, (size_t)(bios_size - bios_offset)))
+ ? -EFAULT : 0;
+ }
+ default:
+ DRM_DEBUG_KMS("Invalid request %d\n",
+ info->vbios_info.type);
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -775,6 +851,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 32fa7b7913f7..7ea3cacf9f9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -285,7 +285,7 @@ free_rmn:
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
unsigned long end = addr + amdgpu_bo_size(bo) - 1;
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct amdgpu_mn_node *node = NULL;
struct list_head bos;
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct list_head *head;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 7b0eff7d060b..202b4176b74e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -271,8 +271,6 @@ struct amdgpu_display_funcs {
u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
/* wait for vblank */
void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
- /* is dce hung */
- bool (*is_display_hung)(struct amdgpu_device *adev);
/* set backlight level */
void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
u8 level);
@@ -341,8 +339,6 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
- struct hrtimer vblank_timer;
- enum amdgpu_interrupt_state vsync_timer_enabled;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -413,6 +409,9 @@ struct amdgpu_crtc {
u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
+ /* for virtual dce */
+ struct hrtimer vblank_timer;
+ enum amdgpu_interrupt_state vsync_timer_enabled;
};
struct amdgpu_encoder_atom_dig {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index f3efb1c5dae9..bf79b73e1538 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo;
bo = container_of(tbo, struct amdgpu_bo, tbo);
- amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
+ amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
- mutex_lock(&bo->adev->shadow_list_lock);
+ mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list);
- mutex_unlock(&bo->adev->shadow_list_lock);
+ mutex_unlock(&adev->shadow_list_lock);
}
kfree(bo->metadata);
kfree(bo);
@@ -121,20 +122,14 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ unsigned lpfn = 0;
- if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
- !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- places[c].fpfn = visible_pfn;
- places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
- TTM_PL_FLAG_TOPDOWN;
- c++;
- }
+ /* This forces a reallocation if the flag wasn't set before */
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0;
- places[c].lpfn = 0;
+ places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
@@ -205,8 +200,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
- amdgpu_ttm_placement_init(abo->adev, &abo->placement,
- abo->placements, domain, abo->flags);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+
+ amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
+ domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -245,7 +242,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
int r;
r = amdgpu_bo_create(adev, size, align, true, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
@@ -351,7 +349,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
kfree(bo);
return r;
}
- bo->adev = adev;
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -374,39 +371,36 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
+
+ if (!resv) {
+ bool locked;
+
+ reservation_object_init(&bo->tbo.ttm_resv);
+ locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
+ WARN_ON(!locked);
+ }
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
- acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
- if (unlikely(r != 0)) {
+ acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
+ &amdgpu_ttm_bo_destroy);
+ if (unlikely(r != 0))
return r;
- }
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
- struct fence *fence;
-
- if (adev->mman.buffer_funcs_ring == NULL ||
- !adev->mman.buffer_funcs_ring->ready) {
- r = -EBUSY;
- goto fail_free;
- }
-
- r = amdgpu_bo_reserve(bo, false);
- if (unlikely(r != 0))
- goto fail_free;
+ struct dma_fence *fence;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r != 0))
+ r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ if (unlikely(r))
goto fail_unreserve;
- amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
amdgpu_bo_fence(bo, fence, false);
- amdgpu_bo_unreserve(bo);
- fence_put(bo->tbo.moving);
- bo->tbo.moving = fence_get(fence);
- fence_put(fence);
+ dma_fence_put(bo->tbo.moving);
+ bo->tbo.moving = dma_fence_get(fence);
+ dma_fence_put(fence);
}
+ if (!resv)
+ ww_mutex_unlock(&bo->tbo.resv->lock);
*bo_ptr = bo;
trace_amdgpu_bo_create(bo);
@@ -414,8 +408,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
return 0;
fail_unreserve:
- amdgpu_bo_unreserve(bo);
-fail_free:
+ ww_mutex_unlock(&bo->tbo.resv->lock);
amdgpu_bo_unref(&bo);
return r;
}
@@ -491,7 +484,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -523,7 +516,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -616,6 +609,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset,
u64 *gpu_addr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
unsigned fpfn, lpfn;
@@ -643,18 +637,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
(!max_offset || max_offset >
- bo->adev->mc.visible_vram_size)) {
+ adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
- bo->adev->mc.visible_vram_size))
+ adev->mc.visible_vram_size))
return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT;
- lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
+ lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else {
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
@@ -669,12 +665,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p pin failed\n", bo);
+ dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p bind failed\n", bo);
+ dev_err(adev->dev, "%p bind failed\n", bo);
goto error;
}
@@ -682,11 +678,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+ adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ adev->invisible_pin_size += amdgpu_bo_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+ adev->gart_pin_size += amdgpu_bo_size(bo);
}
error:
@@ -700,10 +696,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
if (!bo->pin_count) {
- dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
+ dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
@@ -715,16 +712,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+ dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
}
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+ adev->vram_pin_size -= amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ adev->invisible_pin_size -= amdgpu_bo_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+ adev->gart_pin_size -= amdgpu_bo_size(bo);
}
error:
@@ -854,6 +851,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -861,21 +859,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(abo->adev, abo);
+ amdgpu_vm_bo_invalidate(adev, abo);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
+ amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
unsigned long offset, size, lpfn;
int i, r;
@@ -884,13 +882,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
abo = container_of(bo, struct amdgpu_bo, tbo);
- adev = abo->adev;
if (bo->mem.mem_type != TTM_PL_VRAM)
return 0;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) <= adev->mc.visible_vram_size)
+ /* TODO: figure out how to map scattered VRAM to the CPU */
+ if ((offset + size) <= adev->mc.visible_vram_size &&
+ (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -898,6 +897,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL;
/* hurrah the memory is not visible ! */
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {
@@ -931,7 +931,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
* @shared: true if fence should be added shared
*
*/
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
@@ -959,6 +959,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return bo->tbo.offset;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 8255034d73eb..5cbf59ec0f68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
*/
static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
- dev_err(bo->adev->dev, "%p reserve failed\n", bo);
+ dev_err(adev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
@@ -156,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence, bool direct);
+ struct dma_fence **fence, bool direct);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct);
@@ -200,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
- struct fence *fence);
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index accc908bdc88..723ae682bf25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -737,6 +737,21 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return sprintf(buf, "%i\n", speed);
}
+static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ u32 speed;
+
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%i\n", speed);
+}
+
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
@@ -744,6 +759,7 @@ static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -753,6 +769,7 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_min.dev_attr.attr,
&sensor_dev_attr_pwm1_max.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
NULL
};
@@ -804,6 +821,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
+ /* requires powerplay */
+ if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
+ return 0;
+
return effective_mode;
}
@@ -986,10 +1007,10 @@ restart_search:
static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{
- int i;
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
+ bool equal;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@@ -1009,46 +1030,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
- /* no need to reprogram if nothing changed unless we are on BTC+ */
- if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
- /* vce just modifies an existing state so force a change */
- if (ps->vce_active != adev->pm.dpm.vce_active)
- goto force;
- if (adev->flags & AMD_IS_APU) {
- /* for APUs if the num crtcs changed but state is the same,
- * all we need to do is update the display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- }
- return;
- } else {
- /* for BTC+ if the num crtcs hasn't changed and state is the same,
- * nothing to do, if the num crtcs is > 1 and state is the same,
- * update display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs ==
- adev->pm.dpm.current_active_crtcs) {
- return;
- } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
- (adev->pm.dpm.new_active_crtc_count > 1)) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- return;
- }
- }
- }
-
-force:
if (amdgpu_dpm == 1) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
@@ -1059,31 +1040,21 @@ force:
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
+ amdgpu_dpm_display_configuration_changed(adev);
+
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
+ if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
+ equal = false;
- /* wait for the rings to drain */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ if (equal)
+ return;
- /* program the new power state */
amdgpu_dpm_set_power_state(adev);
-
- /* update current power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
-
amdgpu_dpm_post_set_power_state(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
-
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
@@ -1135,7 +1106,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
mutex_lock(&adev->pm.mutex);
@@ -1276,20 +1247,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
+ int i = 0;
if (!adev->pm.dpm_enabled)
return;
- if (adev->pp_enabled) {
- int i = 0;
+ amdgpu_display_bandwidth_update(adev);
- amdgpu_display_bandwidth_update(adev);
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+ if (adev->pp_enabled) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 7532ff822aa7..fc592c2b0e16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -155,9 +155,6 @@ static int amdgpu_pp_sw_init(void *handle)
ret = adev->powerplay.ip_funcs->sw_init(
adev->powerplay.pp_handle);
- if (adev->pp_enabled)
- adev->pm.dpm_enabled = true;
-
return ret;
}
@@ -187,6 +184,9 @@ static int amdgpu_pp_hw_init(void *handle)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
+ if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
+ adev->pm.dpm_enabled = true;
+
return ret;
}
@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret;
}
-const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
index da5cf47cfd99..c0c4bfdcdb14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
@@ -23,11 +23,11 @@
*
*/
-#ifndef __AMDGPU_POPWERPLAY_H__
-#define __AMDGPU_POPWERPLAY_H__
+#ifndef __AMDGPU_POWERPLAY_H__
+#define __AMDGPU_POWERPLAY_H__
#include "amd_shared.h"
-extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
+extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
-#endif /* __AMDSOC_DM_H__ */
+#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 7700dc22f243..3826d5aea0a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
return ERR_PTR(ret);
+ bo->prime_shared_count = 1;
return &bo->gem_base;
}
int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int ret = 0;
+ long ret = 0;
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
+ /*
+ * Wait for all shared fences to complete before we switch to future
+ * use of exclusive fence on this prime shared bo.
+ */
+ ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(ret < 0)) {
+ DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
+ amdgpu_bo_unreserve(bo);
+ return ret;
+ }
+
/* pin buffer into GTT */
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
amdgpu_bo_unreserve(bo);
return ret;
}
@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
return;
amdgpu_bo_unpin(bo);
+ if (bo->prime_shared_count)
+ bo->prime_shared_count--;
amdgpu_bo_unreserve(bo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3cb5e903cd62..4c992826d2d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
{
/* Align requested size with padding so unlock_commit can
* pad safely */
- ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
/* Make sure we aren't trying to allocate more space
* than the maximum for one submission
@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
int i;
for (i = 0; i < count; i++)
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->align_mask)
- ib->ptr[ib->length_dw++] = ring->nop;
+ while (ib->length_dw & ring->funcs->align_mask)
+ ib->ptr[ib->length_dw++] = ring->funcs->nop;
}
/**
@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
uint32_t count;
/* We pad to match fetch size */
- count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
- count %= ring->align_mask + 1;
+ count = ring->funcs->align_mask + 1 -
+ (ring->wptr & ring->funcs->align_mask);
+ count %= ring->funcs->align_mask + 1;
ring->funcs->insert_nop(ring, count);
mb();
@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned max_dw, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type)
+ unsigned max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type)
{
int r;
@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
amdgpu_sched_hw_submission);
- ring->align_mask = align_mask;
- ring->nop = nop;
- ring->type = ring_type;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
new file mode 100644
index 000000000000..574f0b79c690
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_RING_H__
+#define __AMDGPU_RING_H__
+
+#include "gpu_scheduler.h"
+
+/* max number of rings */
+#define AMDGPU_MAX_RINGS 16
+#define AMDGPU_MAX_GFX_RINGS 1
+#define AMDGPU_MAX_COMPUTE_RINGS 8
+#define AMDGPU_MAX_VCE_RINGS 3
+
+/* some special values for the owner field */
+#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
+#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
+
+#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
+#define AMDGPU_FENCE_FLAG_INT (1 << 1)
+
+enum amdgpu_ring_type {
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_COMPUTE,
+ AMDGPU_RING_TYPE_SDMA,
+ AMDGPU_RING_TYPE_UVD,
+ AMDGPU_RING_TYPE_VCE,
+ AMDGPU_RING_TYPE_KIQ
+};
+
+struct amdgpu_device;
+struct amdgpu_ring;
+struct amdgpu_ib;
+struct amdgpu_cs_parser;
+
+/*
+ * Fences.
+ */
+struct amdgpu_fence_driver {
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
+ /* sync_seq is protected by ring emission lock */
+ uint32_t sync_seq;
+ atomic_t last_seq;
+ bool initialized;
+ struct amdgpu_irq_src *irq_src;
+ unsigned irq_type;
+ struct timer_list fallback_timer;
+ unsigned num_fences_mask;
+ spinlock_t lock;
+ struct dma_fence **fences;
+};
+
+int amdgpu_fence_driver_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ unsigned num_hw_submission);
+int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+void amdgpu_fence_process(struct amdgpu_ring *ring);
+int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
+
+/*
+ * Rings.
+ */
+
+/* provided by hw blocks that expose a ring buffer for commands */
+struct amdgpu_ring_funcs {
+ enum amdgpu_ring_type type;
+ uint32_t align_mask;
+ u32 nop;
+
+ /* ring read/write ptr handling */
+ u32 (*get_rptr)(struct amdgpu_ring *ring);
+ u32 (*get_wptr)(struct amdgpu_ring *ring);
+ void (*set_wptr)(struct amdgpu_ring *ring);
+ /* validating and patching of IBs */
+ int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ /* constants to calculate how many DW are needed for an emit */
+ unsigned emit_frame_size;
+ unsigned emit_ib_size;
+ /* command emit functions */
+ void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch);
+ void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
+ uint64_t seq, unsigned flags);
+ void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
+ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+ uint64_t pd_addr);
+ void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+ void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
+ void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size);
+ /* testing functions */
+ int (*test_ring)(struct amdgpu_ring *ring);
+ int (*test_ib)(struct amdgpu_ring *ring, long timeout);
+ /* insert NOP packets */
+ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+ /* pad the indirect buffer to the necessary number of dw */
+ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+ unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
+ void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ /* note usage for clock and power gating */
+ void (*begin_use)(struct amdgpu_ring *ring);
+ void (*end_use)(struct amdgpu_ring *ring);
+ void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+ void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+};
+
+struct amdgpu_ring {
+ struct amdgpu_device *adev;
+ const struct amdgpu_ring_funcs *funcs;
+ struct amdgpu_fence_driver fence_drv;
+ struct amd_gpu_scheduler sched;
+
+ struct amdgpu_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr_offs;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ unsigned max_dw;
+ int count_dw;
+ uint64_t gpu_addr;
+ uint32_t ptr_mask;
+ bool ready;
+ u32 idx;
+ u32 me;
+ u32 pipe;
+ u32 queue;
+ struct amdgpu_bo *mqd_obj;
+ u32 doorbell_index;
+ bool use_doorbell;
+ unsigned wptr_offs;
+ unsigned fence_offs;
+ uint64_t current_ctx;
+ char name[16];
+ unsigned cond_exe_offs;
+ u64 cond_exe_gpu_addr;
+ volatile u32 *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *ent;
+#endif
+};
+
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_ring_commit(struct amdgpu_ring *ring);
+void amdgpu_ring_undo(struct amdgpu_ring *ring);
+int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ unsigned ring_size, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_ring_fini(struct amdgpu_ring *ring);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index d8af37a845f4..de9f919ae336 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
- fence_put(sa_bo->fence);
+ dma_fence_put(sa_bo->fence);
kfree(sa_bo);
}
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL ||
- !fence_is_signaled(sa_bo->fence)) {
+ !dma_fence_is_signaled(sa_bo->fence)) {
return;
}
amdgpu_sa_bo_remove_locked(sa_bo);
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
}
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct fence **fences,
+ struct dma_fence **fences,
unsigned *tries)
{
struct amdgpu_sa_bo *best_bo = NULL;
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
- if (!fence_is_signaled(sa_bo->fence)) {
+ if (!dma_fence_is_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
- struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+ struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned count;
int i, r;
@@ -327,9 +327,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
return -EINVAL;
*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
- if ((*sa_bo) == NULL) {
+ if (!(*sa_bo))
return -ENOMEM;
- }
(*sa_bo)->manager = sa_manager;
(*sa_bo)->fence = NULL;
INIT_LIST_HEAD(&(*sa_bo)->olist);
@@ -356,14 +355,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (fences[i])
- fences[count++] = fence_get(fences[i]);
+ fences[count++] = dma_fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
- t = fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT);
+ t = dma_fence_wait_any_timeout(fences, count, false,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
for (i = 0; i < count; ++i)
- fence_put(fences[i]);
+ dma_fence_put(fences[i]);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_sa_manager *sa_manager;
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
- if (fence && !fence_is_signaled(fence)) {
+ if (fence && !dma_fence_is_signaled(fence)) {
uint32_t idx;
- (*sa_bo)->fence = fence_get(fence);
+ (*sa_bo)->fence = dma_fence_get(fence);
idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5c8d3022fb87..ed814e6d0207 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -34,7 +34,7 @@
struct amdgpu_sync_entry {
struct hlist_node node;
- struct fence *fence;
+ struct dma_fence *fence;
};
static struct kmem_cache *amdgpu_sync_slab;
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
*
* Test if the fence was issued by us.
*/
-static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
+ struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
*
* Extract who originally created the fence.
*/
-static void *amdgpu_sync_get_owner(struct fence *f)
+static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
*
* Either keep the existing fence or the new one, depending which one is later.
*/
-static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
+static void amdgpu_sync_keep_later(struct dma_fence **keep,
+ struct dma_fence *fence)
{
- if (*keep && fence_is_later(*keep, fence))
+ if (*keep && dma_fence_is_later(*keep, fence))
return;
- fence_put(*keep);
- *keep = fence_get(fence);
+ dma_fence_put(*keep);
+ *keep = dma_fence_get(fence);
}
/**
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
*
*/
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f)
+ struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -ENOMEM;
hash_add(sync->fences, &e->node, f->context);
- e->fence = fence_get(f);
+ e->fence = dma_fence_get(f);
return 0;
}
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
void *owner)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
void *fence_owner;
unsigned i;
int r = 0;
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
* Returns the next fence not signaled yet without removing it from the sync
* object.
*/
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring)
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
+ struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (ring && s_fence) {
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* when they are scheduled.
*/
if (s_fence->sched == &ring->sched) {
- if (fence_is_signaled(&s_fence->scheduled))
+ if (dma_fence_is_signaled(&s_fence->scheduled))
continue;
return &s_fence->scheduled;
}
}
- if (fence_is_signaled(f)) {
+ if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
- fence_put(f);
+ dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
*
* Get and removes the next fence from the sync object not signaled yet.
*/
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
- struct fence *f;
+ struct dma_fence *f;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e);
- if (!fence_is_signaled(f))
+ if (!dma_fence_is_signaled(f))
return f;
- fence_put(f);
+ dma_fence_put(f);
}
return NULL;
}
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
- fence_put(e->fence);
+ dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}
- fence_put(sync->last_vm_update);
+ dma_fence_put(sync->last_vm_update);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
new file mode 100644
index 000000000000..605be266e07f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_SYNC_H__
+#define __AMDGPU_SYNC_H__
+
+#include <linux/hashtable.h>
+
+struct dma_fence;
+struct reservation_object;
+struct amdgpu_device;
+struct amdgpu_ring;
+
+/*
+ * Container for fences used to sync command submissions.
+ */
+struct amdgpu_sync {
+ DECLARE_HASHTABLE(fences, 4);
+ struct dma_fence *last_vm_update;
+};
+
+void amdgpu_sync_create(struct amdgpu_sync *sync);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_fence *f);
+int amdgpu_sync_resv(struct amdgpu_device *adev,
+ struct amdgpu_sync *sync,
+ struct reservation_object *resv,
+ void *owner);
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b827c75e95de..e05a24325eeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
@@ -216,7 +216,7 @@ out_lclean:
amdgpu_bo_unref(&gtt_obj[i]);
}
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 067e5e683bb3..bb964a8ff938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691f56b5..bc70f80260d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -34,7 +34,6 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_page_alloc.h>
-#include <ttm/ttm_memory.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <linux/seq_file.h>
@@ -51,16 +50,6 @@
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
-{
- struct amdgpu_mman *mman;
- struct amdgpu_device *adev;
-
- mman = container_of(bdev, struct amdgpu_mman, bdev);
- adev = container_of(mman, struct amdgpu_device, mman);
- return adev;
-}
-
/*
* Global memory.
@@ -75,7 +64,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
ttm_mem_global_release(ref->object);
}
-int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
struct amdgpu_ring *ring;
@@ -150,7 +139,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
{
struct amdgpu_device *adev;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
switch (type) {
case TTM_PL_SYSTEM:
@@ -168,7 +157,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
+ man->func = &amdgpu_vram_mgr_func;
man->gpu_offset = adev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -195,6 +184,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
static struct ttm_place placements = {
.fpfn = 0,
@@ -213,7 +203,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+ if (adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -229,7 +219,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* allocating address space for the BO.
*/
abo->placements[i].lpfn =
- abo->adev->mc.gtt_size >> PAGE_SHIFT;
+ adev->mc.gtt_size >> PAGE_SHIFT;
}
}
break;
@@ -260,63 +250,115 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem,
+ uint64_t *addr)
{
- struct amdgpu_device *adev;
- struct amdgpu_ring *ring;
- uint64_t old_start, new_start;
- struct fence *fence;
int r;
- adev = amdgpu_get_adev(bo->bdev);
- ring = adev->mman.buffer_funcs_ring;
-
- switch (old_mem->mem_type) {
+ switch (mem->mem_type) {
case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, old_mem);
+ r = amdgpu_ttm_bind(bo, mem);
if (r)
return r;
case TTM_PL_VRAM:
- old_start = (u64)old_mem->start << PAGE_SHIFT;
- old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
+ *addr = mm_node->start << PAGE_SHIFT;
+ *addr += bo->bdev->man[mem->mem_type].gpu_offset;
break;
default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ DRM_ERROR("Unknown placement %d\n", mem->mem_type);
return -EINVAL;
}
- switch (new_mem->mem_type) {
- case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, new_mem);
- if (r)
- return r;
- case TTM_PL_VRAM:
- new_start = (u64)new_mem->start << PAGE_SHIFT;
- new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
- return -EINVAL;
- }
+ return 0;
+}
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+ struct drm_mm_node *old_mm, *new_mm;
+ uint64_t old_start, old_size, new_start, new_size;
+ unsigned long num_pages;
+ struct dma_fence *fence = NULL;
+ int r;
+
+ BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+ old_mm = old_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
+ if (r)
+ return r;
+ old_size = old_mm->size;
+
- r = amdgpu_copy_buffer(ring, old_start, new_start,
- new_mem->num_pages * PAGE_SIZE, /* bytes */
- bo->resv, &fence, false);
+ new_mm = new_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
if (r)
return r;
+ new_size = new_mm->size;
+
+ num_pages = new_mem->num_pages;
+ while (num_pages) {
+ unsigned long cur_pages = min(old_size, new_size);
+ struct dma_fence *next;
+
+ r = amdgpu_copy_buffer(ring, old_start, new_start,
+ cur_pages * PAGE_SIZE,
+ bo->resv, &next, false);
+ if (r)
+ goto error;
+
+ dma_fence_put(fence);
+ fence = next;
+
+ num_pages -= cur_pages;
+ if (!num_pages)
+ break;
+
+ old_size -= cur_pages;
+ if (!old_size) {
+ r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
+ &old_start);
+ if (r)
+ goto error;
+ old_size = old_mm->size;
+ } else {
+ old_start += cur_pages * PAGE_SIZE;
+ }
+
+ new_size -= cur_pages;
+ if (!new_size) {
+ r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
+ &new_start);
+ if (r)
+ goto error;
+
+ new_size = new_mm->size;
+ } else {
+ new_start += cur_pages * PAGE_SIZE;
+ }
+ }
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
- fence_put(fence);
+ dma_fence_put(fence);
+ return r;
+
+error:
+ if (fence)
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -332,7 +374,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -379,7 +421,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_place placements;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -422,7 +464,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
/* remember the eviction */
if (evict)
@@ -475,7 +517,7 @@ memcpy:
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct amdgpu_device *adev = amdgpu_get_adev(bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
@@ -607,7 +649,7 @@ release_pages:
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned nents;
int r;
@@ -639,7 +681,7 @@ release_sg:
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
@@ -799,7 +841,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
@@ -843,7 +885,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -889,7 +931,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -1012,7 +1054,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned i, j;
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
@@ -1029,7 +1071,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned log2_size = min(ilog2(tbo->num_pages),
AMDGPU_TTM_LRU_SIZE - 1);
@@ -1060,12 +1102,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
return res;
}
+static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
+ unsigned long num_pages = bo->mem.num_pages;
+ struct drm_mm_node *node = bo->mem.mm_node;
+
+ /* Check each drm MM node individually */
+ while (num_pages) {
+ if (place->fpfn < (node->start + node->size) &&
+ !(place->lpfn && place->lpfn <= node->start))
+ return true;
+
+ num_pages -= node->size;
+ ++node;
+ }
+
+ return false;
+ }
+
+ return ttm_bo_eviction_valuable(bo, place);
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
.invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
+ .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
@@ -1083,6 +1150,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
unsigned i, j;
int r;
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
adev->mman.bo_global_ref.ref.object,
@@ -1119,7 +1190,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->stollen_vga_memory);
if (r) {
return r;
@@ -1247,7 +1319,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit)
+ struct dma_fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1294,7 +1366,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (direct_submit) {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
NULL, NULL, fence);
- job->fence = fence_get(*fence);
+ job->fence = dma_fence_get(*fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
amdgpu_job_free(job);
@@ -1313,28 +1385,40 @@ error_free:
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
- struct reservation_object *resv,
- struct fence **fence)
+ uint32_t src_data,
+ struct reservation_object *resv,
+ struct dma_fence **fence)
{
- struct amdgpu_device *adev = bo->adev;
- struct amdgpu_job *job;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint32_t max_bytes, byte_count;
- uint64_t dst_offset;
+ struct drm_mm_node *mm_node;
+ unsigned long num_pages;
unsigned int num_loops, num_dw;
- unsigned int i;
+
+ struct amdgpu_job *job;
int r;
- byte_count = bo->tbo.num_pages << PAGE_SHIFT;
- max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
- num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+ if (!ring->ready) {
+ DRM_ERROR("Trying to clear memory with ring turned off.\n");
+ return -EINVAL;
+ }
+
+ num_pages = bo->tbo.num_pages;
+ mm_node = bo->tbo.mem.mm_node;
+ num_loops = 0;
+ while (num_pages) {
+ uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+
+ num_loops += DIV_ROUND_UP(byte_count, max_bytes);
+ num_pages -= mm_node->size;
+ ++mm_node;
+ }
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
/* for IB padding */
- while (num_dw & 0x7)
- num_dw++;
+ num_dw += 64;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
if (r)
@@ -1342,28 +1426,43 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
}
}
- dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
- for (i = 0; i < num_loops; i++) {
- uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+ num_pages = bo->tbo.num_pages;
+ mm_node = bo->tbo.mem.mm_node;
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_offset, cur_size_in_bytes);
+ while (num_pages) {
+ uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+ uint64_t dst_addr;
- dst_offset += cur_size_in_bytes;
- byte_count -= cur_size_in_bytes;
+ r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
+ &bo->tbo.mem, &dst_addr);
+ if (r)
+ return r;
+
+ while (byte_count) {
+ uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+ dst_addr, cur_size_in_bytes);
+
+ dst_addr += cur_size_in_bytes;
+ byte_count -= cur_size_in_bytes;
+ }
+
+ num_pages -= mm_node->size;
+ ++mm_node;
}
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
@@ -1554,8 +1653,3 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
#endif
}
-
-u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
-{
- return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9812c805326c..98ee384f0fca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -66,6 +66,7 @@ struct amdgpu_mman {
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
@@ -77,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit);
+ struct dma_fence **fence, bool direct_submit);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index cb3d252f3c78..0f0b38191fac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
ucode->mc_addr = mc_addr;
ucode->kaddr = kptr;
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
+ return 0;
+
header = (const struct common_firmware_header *)ucode->fw->data;
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
le32_to_cpu(header->ucode_array_offset_bytes)),
@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
return 0;
}
+static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
+ uint64_t mc_addr, void *kptr)
+{
+ const struct gfx_firmware_header_v1_0 *header = NULL;
+ const struct common_firmware_header *comm_hdr = NULL;
+ uint8_t* src_addr = NULL;
+ uint8_t* dst_addr = NULL;
+
+ if (NULL == ucode->fw)
+ return 0;
+
+ comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
+ header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ dst_addr = ucode->kaddr +
+ ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
+ PAGE_SIZE);
+ src_addr = (uint8_t *)ucode->fw->data +
+ le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
+ (le32_to_cpu(header->jt_offset) * 4);
+ memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
+
+ return 0;
+}
+
+
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ 0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
goto failed_reserve;
}
- err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
+ err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &fw_mc_addr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset);
+ if (i == AMDGPU_UCODE_ID_CP_MEC1) {
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
+ fw_buf_ptr + fw_offset);
+ fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+ }
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index e468be4e28fa..a8a4230729f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC1,
AMDGPU_UCODE_ID_CP_MEC2,
AMDGPU_UCODE_ID_RLC_G,
+ AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e3281cacc586..a81dfaeeb8c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
continue;
}
- fence_wait(fence, false);
- fence_put(fence);
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0);
@@ -360,6 +360,18 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
}
}
+static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
+{
+ uint32_t lo, hi;
+ uint64_t addr;
+
+ lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
+ hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
+ addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
+
+ return addr;
+}
+
/**
* amdgpu_uvd_cs_pass1 - first parsing round
*
@@ -372,14 +384,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
- uint32_t cmd, lo, hi;
- uint64_t addr;
+ uint32_t cmd;
+ uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0;
- lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
- hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
- addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
-
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
if (mapping == NULL) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
@@ -698,18 +706,16 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
- uint32_t cmd, lo, hi;
+ uint32_t cmd;
uint64_t start, end;
- uint64_t addr;
+ uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r;
- lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
- hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
- addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
-
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL)
+ if (mapping == NULL) {
+ DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL;
+ }
start = amdgpu_bo_gpu_offset(bo);
@@ -876,6 +882,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r;
+ parser->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
ib->length_dw);
@@ -890,10 +899,13 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;
- /* first round, make sure the buffers are actually in the UVD segment */
- r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
- if (r)
- return r;
+ /* first round only required on chips without UVD 64 bit address support */
+ if (!parser->adev->uvd.address_64_bit) {
+ /* first round, make sure the buffers are actually in the UVD segment */
+ r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
+ if (r)
+ return r;
+ }
/* second round, patch buffer addresses into the command stream */
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
@@ -909,14 +921,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
int i, r;
@@ -931,7 +943,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
return r;
- if (!bo->adev->uvd.address_64_bit) {
+ if (!ring->adev->uvd.address_64_bit) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
}
@@ -960,7 +972,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err_free;
@@ -975,9 +987,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ttm_eu_fence_buffer_objects(&ticket, &head, f);
if (fence)
- *fence = fence_get(f);
+ *fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
- fence_put(f);
+ dma_fence_put(f);
return 0;
@@ -993,7 +1005,7 @@ err:
crash the vcpu so just try to emmit a dummy create/destroy msg to
avoid this */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1002,7 +1014,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1042,7 +1055,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1051,7 +1064,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1128,7 +1142,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
*/
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -1143,7 +1157,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -1154,7 +1168,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
- fence_put(fence);
+ dma_fence_put(fence);
error:
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index c850009602d1..6249ba1bde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd884f06..69b66b9e7f57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vce.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -395,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint64_t dummy;
int i, r;
@@ -450,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
amdgpu_job_free(job);
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -476,12 +477,12 @@ err:
* Close up a stream for HW test or if userspace failed to do so
*/
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -513,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
@@ -526,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
}
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t *size = &tmp;
int i, r, idx = 0;
+ p->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
@@ -788,6 +792,96 @@ out:
}
/**
+ * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ *
+ * @p: parser context
+ *
+ */
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+{
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ int session_idx = -1;
+ uint32_t destroyed = 0;
+ uint32_t created = 0;
+ uint32_t allocated = 0;
+ uint32_t tmp, handle = 0;
+ int i, r = 0, idx = 0;
+
+ while (idx < ib->length_dw) {
+ uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
+ uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+
+ if ((len < 8) || (len & 3)) {
+ DRM_ERROR("invalid VCE command length (%d)!\n", len);
+ r = -EINVAL;
+ goto out;
+ }
+
+ switch (cmd) {
+ case 0x00000001: /* session */
+ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ session_idx = amdgpu_vce_validate_handle(p, handle,
+ &allocated);
+ if (session_idx < 0) {
+ r = session_idx;
+ goto out;
+ }
+ break;
+
+ case 0x01000001: /* create */
+ created |= 1 << session_idx;
+ if (destroyed & (1 << session_idx)) {
+ destroyed &= ~(1 << session_idx);
+ allocated |= 1 << session_idx;
+
+ } else if (!(allocated & (1 << session_idx))) {
+ DRM_ERROR("Handle already in use!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ break;
+
+ case 0x02000001: /* destroy */
+ destroyed |= 1 << session_idx;
+ break;
+
+ default:
+ break;
+ }
+
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ idx += len / 4;
+ }
+
+ if (allocated & ~created) {
+ DRM_ERROR("New session without create command!\n");
+ r = -ENOENT;
+ }
+
+out:
+ if (!r) {
+ /* No error, free all destroyed handle slots */
+ tmp = destroyed;
+ amdgpu_ib_free(p->adev, ib, NULL);
+ } else {
+ /* Error during parsing, free all allocated handle slots */
+ tmp = allocated;
+ }
+
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+ if (tmp & (1 << i))
+ atomic_set(&p->adev->vce.handles[i], 0);
+
+ return r;
+}
+
+/**
* amdgpu_vce_ring_emit_ib - execute indirect buffer
*
* @ring: engine to use
@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
amdgpu_ring_write(ring, VCE_CMD_END);
}
-unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* amdgpu_vce_ring_emit_ib */
-}
-
-unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
/**
* amdgpu_vce_ring_test_ring - test if VCE ring is working
*
@@ -883,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
*/
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
@@ -902,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -913,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
error:
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 12729d2852df..d98041f7508d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,11 +29,12 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 06f24322e7c3..1dda9321bd5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
*
* @adev: amdgpu device pointer
* @vm: vm providing the BOs
- * @duplicates: head of duplicates list
+ * @validate: callback to do the validation
+ * @param: parameter for the validation callback
*
- * Add the page directory to the BO duplicates list
- * for command submission.
+ * Validate the page table BOs on command submission if neccessary.
*/
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
uint64_t num_evictions;
unsigned i;
+ int r;
/* We only need to validate the page tables
* if they aren't already valid.
*/
num_evictions = atomic64_read(&adev->num_evictions);
if (num_evictions == vm->last_eviction_counter)
- return;
+ return 0;
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- list_add(&entry->tv.head, duplicates);
+ r = validate(param, bo);
+ if (r)
+ return r;
}
+ return 0;
}
/**
@@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_lock(&glob->lru_lock);
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- ttm_bo_move_to_lru_tail(&entry->robj->tbo);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
}
spin_unlock(&glob->lru_lock);
}
@@ -194,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
uint64_t fence_context = adev->fence_context + ring->idx;
- struct fence *updates = sync->last_vm_update;
+ struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
- struct fence **fences;
+ struct dma_fence **fences;
unsigned i;
int r = 0;
@@ -225,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (&idle->list == &adev->vm_manager.ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct fence_array *array;
+ struct dma_fence_array *array;
unsigned j;
for (j = 0; j < i; ++j)
- fence_get(fences[j]);
+ dma_fence_get(fences[j]);
- array = fence_array_create(i, fences, fence_context,
+ array = dma_fence_array_create(i, fences, fence_context,
seqno, true);
if (!array) {
for (j = 0; j < i; ++j)
- fence_put(fences[j]);
+ dma_fence_put(fences[j]);
kfree(fences);
r = -ENOMEM;
goto error;
@@ -243,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
r = amdgpu_sync_fence(ring->adev, sync, &array->base);
- fence_put(&array->base);
+ dma_fence_put(&array->base);
if (r)
goto error;
@@ -257,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check if we can use a VMID already assigned to this VM */
i = ring->idx;
do {
- struct fence *flushed;
+ struct dma_fence *flushed;
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
@@ -279,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
continue;
if (id->last_flush->context != fence_context &&
- !fence_is_signaled(id->last_flush))
+ !dma_fence_is_signaled(id->last_flush))
continue;
flushed = id->flushed_updates;
if (updates &&
- (!flushed || fence_is_later(updates, flushed)))
+ (!flushed || dma_fence_is_later(updates, flushed)))
continue;
/* Good we can use this VMID. Remember this submission as
@@ -315,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- fence_put(id->first);
- id->first = fence_get(fence);
+ dma_fence_put(id->first);
+ id->first = dma_fence_get(fence);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = NULL;
- fence_put(id->flushed_updates);
- id->flushed_updates = fence_get(updates);
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
@@ -341,9 +346,9 @@ error:
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- const struct amdgpu_ip_block_version *ip_block;
+ const struct amdgpu_ip_block *ip_block;
- if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
/* only compute rings */
return false;
@@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
if (!ip_block)
return false;
- if (ip_block->major <= 7) {
+ if (ip_block->version->major <= 7) {
/* gfx7 has no workaround */
return true;
- } else if (ip_block->major == 8) {
+ } else if (ip_block->version->major == 8) {
if (adev->gfx.mec_fw_version >= 673)
/* gfx8 is fixed in MEC firmware 673 */
return false;
@@ -393,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
amdgpu_vm_is_gpu_reset(adev, id))) {
- struct fence *fence;
+ struct dma_fence *fence;
trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
@@ -403,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
return r;
mutex_lock(&adev->vm_manager.lock);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
@@ -525,70 +530,6 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
}
/**
- * amdgpu_vm_clear_bo - initially clear the page dir/table
- *
- * @adev: amdgpu_device pointer
- * @bo: bo to clear
- *
- * need to reserve bo first before calling it.
- */
-static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo *bo)
-{
- struct amdgpu_ring *ring;
- struct fence *fence = NULL;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- unsigned entries;
- uint64_t addr;
- int r;
-
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
-
- r = reservation_object_reserve_shared(bo->tbo.resv);
- if (r)
- return r;
-
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- if (r)
- goto error;
-
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (r)
- goto error;
-
- addr = amdgpu_bo_gpu_offset(bo);
- entries = amdgpu_bo_size(bo) / 8;
-
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
- if (r)
- goto error;
-
- memset(&params, 0, sizeof(params));
- params.adev = adev;
- params.ib = &job->ibs[0];
- amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
- amdgpu_ring_pad_ib(ring, &job->ibs[0]);
-
- WARN_ON(job->ibs[0].length_dw > 64);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(bo, fence, true);
- fence_put(fence);
- return 0;
-
-error_free:
- amdgpu_job_free(job);
-
-error:
- return r;
-}
-
-/**
* amdgpu_vm_map_gart - Resolve gart mapping of addr
*
* @pages_addr: optional DMA address to use for lookup
@@ -612,32 +553,35 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- bool shadow)
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
+ struct amdgpu_bo *shadow;
struct amdgpu_ring *ring;
- struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
- vm->page_directory;
- uint64_t pd_addr;
+ uint64_t pd_addr, shadow_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
- uint64_t last_pde = ~0, last_pt = ~0;
+ uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int r;
- if (!pd)
- return 0;
-
- r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
- if (r)
- return r;
-
- pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ shadow = vm->page_directory->shadow;
/* padding, etc. */
ndw = 64;
@@ -645,6 +589,17 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* assume the worst case */
ndw += vm->max_pde_used * 6;
+ pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+ if (shadow) {
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ if (r)
+ return r;
+ shadow_addr = amdgpu_bo_gpu_offset(shadow);
+ ndw *= 2;
+ } else {
+ shadow_addr = 0;
+ }
+
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
@@ -655,30 +610,26 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
- struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+ struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
if (bo->shadow) {
- struct amdgpu_bo *shadow = bo->shadow;
+ struct amdgpu_bo *pt_shadow = bo->shadow;
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ r = amdgpu_ttm_bind(&pt_shadow->tbo,
+ &pt_shadow->tbo.mem);
if (r)
return r;
}
pt = amdgpu_bo_gpu_offset(bo);
- if (!shadow) {
- if (vm->page_tables[pt_idx].addr == pt)
- continue;
- vm->page_tables[pt_idx].addr = pt;
- } else {
- if (vm->page_tables[pt_idx].shadow_addr == pt)
- continue;
- vm->page_tables[pt_idx].shadow_addr = pt;
- }
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+
+ vm->page_tables[pt_idx].addr = pt;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -686,6 +637,13 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
+ if (shadow)
+ amdgpu_vm_do_set_ptes(&params,
+ last_shadow,
+ last_pt, count,
+ incr,
+ AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID);
@@ -693,34 +651,44 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
count = 1;
last_pde = pde;
+ last_shadow = shadow_addr + pt_idx * 8;
last_pt = pt;
} else {
++count;
}
}
- if (count)
+ if (count) {
+ if (vm->page_directory->shadow)
+ amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
count, incr, AMDGPU_PTE_VALID);
+ }
- if (params.ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+ if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ return 0;
+ }
+
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
+ if (shadow)
+ amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
- amdgpu_bo_fence(pd, fence, true);
- fence_put(vm->page_directory_fence);
- vm->page_directory_fence = fence_get(fence);
- fence_put(fence);
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
- } else {
- amdgpu_job_free(job);
- }
+ amdgpu_bo_fence(vm->page_directory, fence, true);
+ dma_fence_put(vm->page_directory_fence);
+ vm->page_directory_fence = dma_fence_get(fence);
+ dma_fence_put(fence);
return 0;
@@ -729,29 +697,6 @@ error_free:
return r;
}
-/*
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- int r;
-
- r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
- if (r)
- return r;
- return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
-}
-
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
@@ -781,11 +726,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* initialize the variables */
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -804,11 +749,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the page tables */
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
@@ -929,20 +874,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_ring *ring;
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r;
memset(&params, 0, sizeof(params));
@@ -1045,10 +990,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
- fence_put(*fence);
- *fence = fence_get(f);
+ dma_fence_put(*fence);
+ *fence = dma_fence_get(f);
}
- fence_put(f);
+ dma_fence_put(f);
return 0;
error_free:
@@ -1065,8 +1010,8 @@ error_free:
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
- * @addr: addr to set the area to
* @flags: HW flags for the mapping
+ * @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
* Split the mapping into smaller chunks so that each update fits
@@ -1074,17 +1019,16 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
- uint32_t flags, uint64_t addr,
- struct fence **fence)
+ uint32_t flags,
+ struct drm_mm_node *nodes,
+ struct dma_fence **fence)
{
- const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
-
- uint64_t src = 0, start = mapping->it.start;
+ uint64_t pfn, src = 0, start = mapping->it.start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1097,23 +1041,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_update(mapping);
- if (pages_addr) {
- if (flags == gtt_flags)
- src = adev->gart.table_addr + (addr >> 12) * 8;
- addr = 0;
+ pfn = mapping->offset >> PAGE_SHIFT;
+ if (nodes) {
+ while (pfn >= nodes->size) {
+ pfn -= nodes->size;
+ ++nodes;
+ }
}
- addr += mapping->offset;
- if (!pages_addr || src)
- return amdgpu_vm_bo_update_mapping(adev, exclusive,
- src, pages_addr, vm,
- start, mapping->it.last,
- flags, addr, fence);
+ do {
+ uint64_t max_entries;
+ uint64_t addr, last;
- while (start != mapping->it.last + 1) {
- uint64_t last;
+ if (nodes) {
+ addr = nodes->start << PAGE_SHIFT;
+ max_entries = (nodes->size - pfn) *
+ (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ } else {
+ addr = 0;
+ max_entries = S64_MAX;
+ }
- last = min((uint64_t)mapping->it.last, start + max_size - 1);
+ if (pages_addr) {
+ if (flags == gtt_flags)
+ src = adev->gart.table_addr +
+ (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
+ else
+ max_entries = min(max_entries, 16ull * 1024ull);
+ addr = 0;
+ } else if (flags & AMDGPU_PTE_VALID) {
+ addr += adev->vm_manager.vram_base_offset;
+ }
+ addr += pfn << PAGE_SHIFT;
+
+ last = min((uint64_t)mapping->it.last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
@@ -1121,9 +1082,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
+ pfn += last - start + 1;
+ if (nodes && nodes->size == pfn) {
+ pfn = 0;
+ ++nodes;
+ }
start = last + 1;
- addr += max_size * AMDGPU_GPU_PAGE_SIZE;
- }
+
+ } while (unlikely(start != mapping->it.last + 1));
return 0;
}
@@ -1147,40 +1113,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
struct ttm_mem_reg *mem;
- struct fence *exclusive;
- uint64_t addr;
+ struct drm_mm_node *nodes;
+ struct dma_fence *exclusive;
int r;
if (clear) {
mem = NULL;
- addr = 0;
+ nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
mem = &bo_va->bo->tbo.mem;
- addr = (u64)mem->start << PAGE_SHIFT;
- switch (mem->mem_type) {
- case TTM_PL_TT:
+ nodes = mem->mm_node;
+ if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo_va->bo->tbo.ttm, struct
ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
- break;
-
- case TTM_PL_VRAM:
- addr += adev->vm_manager.vram_base_offset;
- break;
-
- default:
- break;
}
-
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == bo_va->bo->adev) ? flags : 0;
+ adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@@ -1190,7 +1146,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive,
gtt_flags, pages_addr, vm,
- mapping, flags, addr,
+ mapping, flags, nodes,
&bo_va->last_pt_update);
if (r)
return r;
@@ -1405,18 +1361,18 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
- struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt;
- entry = &vm->page_tables[pt_idx].entry;
- if (entry->robj)
+ if (vm->page_tables[pt_idx].bo)
continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED,
NULL, resv, &pt);
if (r)
goto error_free;
@@ -1426,27 +1382,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
*/
pt->parent = amdgpu_bo_ref(vm->page_directory);
- r = amdgpu_vm_clear_bo(adev, vm, pt);
- if (r) {
- amdgpu_bo_unref(&pt->shadow);
- amdgpu_bo_unref(&pt);
- goto error_free;
- }
-
- if (pt->shadow) {
- r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
- if (r) {
- amdgpu_bo_unref(&pt->shadow);
- amdgpu_bo_unref(&pt);
- goto error_free;
- }
- }
-
- entry->robj = pt;
- entry->priority = 0;
- entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
- entry->user_pages = NULL;
+ vm->page_tables[pt_idx].bo = pt;
vm->page_tables[pt_idx].addr = 0;
}
@@ -1547,7 +1483,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
kfree(mapping);
}
- fence_put(bo_va->last_pt_update);
+ dma_fence_put(bo_va->last_pt_update);
kfree(bo_va);
}
@@ -1626,7 +1562,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
@@ -1635,24 +1573,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r)
goto error_free_page_directory;
- r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
- if (r)
- goto error_unreserve;
-
- if (vm->page_directory->shadow) {
- r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
- if (r)
- goto error_unreserve;
- }
-
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
amdgpu_bo_unreserve(vm->page_directory);
return 0;
-error_unreserve:
- amdgpu_bo_unreserve(vm->page_directory);
-
error_free_page_directory:
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
@@ -1697,7 +1622,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
- struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+ struct amdgpu_bo *pt = vm->page_tables[i].bo;
if (!pt)
continue;
@@ -1709,7 +1634,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
- fence_put(vm->page_directory_fence);
+ dma_fence_put(vm->page_directory_fence);
}
/**
@@ -1733,7 +1658,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
&adev->vm_manager.ids_lru);
}
- adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->vm_manager.fence_context =
+ dma_fence_context_alloc(AMDGPU_MAX_RINGS);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
@@ -1755,8 +1681,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
- fence_put(adev->vm_manager.ids[i].first);
+ dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
- fence_put(id->flushed_updates);
+ dma_fence_put(id->flushed_updates);
+ dma_fence_put(id->last_flush);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
new file mode 100644
index 000000000000..adbc2f5e5c7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_VM_H__
+#define __AMDGPU_VM_H__
+
+#include <linux/rbtree.h>
+
+#include "gpu_scheduler.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+
+struct amdgpu_bo_va;
+struct amdgpu_job;
+struct amdgpu_bo_list_entry;
+
+/*
+ * GPUVM handling
+ */
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VM 16
+
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
+
+/* number of entries in page table */
+#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+
+#define AMDGPU_PTE_VALID (1 << 0)
+#define AMDGPU_PTE_SYSTEM (1 << 1)
+#define AMDGPU_PTE_SNOOPED (1 << 2)
+
+/* VI only */
+#define AMDGPU_PTE_EXECUTABLE (1 << 4)
+
+#define AMDGPU_PTE_READABLE (1 << 5)
+#define AMDGPU_PTE_WRITEABLE (1 << 6)
+
+#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
+
+/* How to programm VM fault handling */
+#define AMDGPU_VM_FAULT_STOP_NEVER 0
+#define AMDGPU_VM_FAULT_STOP_FIRST 1
+#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+
+struct amdgpu_vm_pt {
+ struct amdgpu_bo *bo;
+ uint64_t addr;
+};
+
+struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
+ struct rb_root va;
+
+ /* protecting invalidated */
+ spinlock_t status_lock;
+
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head invalidated;
+
+ /* BOs cleared in the PT because of a move */
+ struct list_head cleared;
+
+ /* BO mappings freed, but not yet updated in the PT */
+ struct list_head freed;
+
+ /* contains the page directory */
+ struct amdgpu_bo *page_directory;
+ unsigned max_pde_used;
+ struct dma_fence *page_directory_fence;
+ uint64_t last_eviction_counter;
+
+ /* array of page tables, one for each page directory entry */
+ struct amdgpu_vm_pt *page_tables;
+
+ /* for id and flush management per ring */
+ struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
+
+ /* protecting freed */
+ spinlock_t freed_lock;
+
+ /* Scheduler entity for page table updates */
+ struct amd_sched_entity entity;
+
+ /* client id */
+ u64 client_id;
+};
+
+struct amdgpu_vm_id {
+ struct list_head list;
+ struct dma_fence *first;
+ struct amdgpu_sync active;
+ struct dma_fence *last_flush;
+ atomic64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct dma_fence *flushed_updates;
+
+ uint32_t current_gpu_reset_count;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
+};
+
+struct amdgpu_vm_manager {
+ /* Handling of VMIDs */
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+
+ /* Handling of VM fences */
+ u64 fence_context;
+ unsigned seqno[AMDGPU_MAX_RINGS];
+
+ uint32_t max_pfn;
+ /* vram base address for page table entry */
+ u64 vram_base_offset;
+ /* is vm enabled? */
+ bool enabled;
+ /* vm pte handling */
+ const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
+ struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_rings;
+ atomic_t vm_pte_next_ring;
+ /* client id counter */
+ atomic64_t client_counter;
+};
+
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+ struct list_head *validated,
+ struct amdgpu_bo_list_entry *entry);
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*callback)(void *p, struct amdgpu_bo *bo),
+ void *param);
+void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ bool clear);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr, uint64_t offset,
+ uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
new file mode 100644
index 000000000000..d710226a0fff
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_vram_mgr {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+/**
+ * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of VRAM
+ *
+ * Allocate and initialize the VRAM manager.
+ */
+static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct amdgpu_vram_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ drm_mm_init(&mgr->mm, 0, p_size);
+ spin_lock_init(&mgr->lock);
+ man->priv = mgr;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_fini - free and destroy VRAM manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ if (!drm_mm_clean(&mgr->mm)) {
+ spin_unlock(&mgr->lock);
+ return -EBUSY;
+ }
+
+ drm_mm_takedown(&mgr->mm);
+ spin_unlock(&mgr->lock);
+ kfree(mgr);
+ man->priv = NULL;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_new - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate VRAM for the given BO.
+ */
+static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm *mm = &mgr->mm;
+ struct drm_mm_node *nodes;
+ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
+ enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+ unsigned i;
+ int r;
+
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
+ place->lpfn || amdgpu_vram_page_split == -1) {
+ pages_per_node = ~0ul;
+ num_nodes = 1;
+ } else {
+ pages_per_node = max((uint32_t)amdgpu_vram_page_split,
+ mem->page_alignment);
+ num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ }
+
+ nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+ sflags = DRM_MM_SEARCH_BELOW;
+ aflags = DRM_MM_CREATE_TOP;
+ }
+
+ pages_left = mem->num_pages;
+
+ spin_lock(&mgr->lock);
+ for (i = 0; i < num_nodes; ++i) {
+ unsigned long pages = min(pages_left, pages_per_node);
+ uint32_t alignment = mem->page_alignment;
+
+ if (pages == pages_per_node)
+ alignment = pages_per_node;
+ else
+ sflags |= DRM_MM_SEARCH_BEST;
+
+ r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
+ alignment, 0,
+ place->fpfn, lpfn,
+ sflags, aflags);
+ if (unlikely(r))
+ goto error;
+
+ pages_left -= pages;
+ }
+ spin_unlock(&mgr->lock);
+
+ mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
+ mem->mm_node = nodes;
+
+ return 0;
+
+error:
+ while (i--)
+ drm_mm_remove_node(&nodes[i]);
+ spin_unlock(&mgr->lock);
+
+ kfree(nodes);
+ return r == -ENOSPC ? 0 : r;
+}
+
+/**
+ * amdgpu_vram_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated VRAM again.
+ */
+static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm_node *nodes = mem->mm_node;
+ unsigned pages = mem->num_pages;
+
+ if (!mem->mm_node)
+ return;
+
+ spin_lock(&mgr->lock);
+ while (pages) {
+ pages -= nodes->size;
+ drm_mm_remove_node(nodes);
+ ++nodes;
+ }
+ spin_unlock(&mgr->lock);
+
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_vram_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ drm_mm_debug_table(&mgr->mm, prefix);
+ spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
+ amdgpu_vram_mgr_init,
+ amdgpu_vram_mgr_fini,
+ amdgpu_vram_mgr_new,
+ amdgpu_vram_mgr_del,
+ amdgpu_vram_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f95e74..8c9bc75a9c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -31,6 +31,7 @@
#include "atom.h"
#include "atom-bits.h"
#include "atombios_encoders.h"
+#include "atombios_crtc.h"
#include "amdgpu_atombios.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 1d8c375a3561..bda9e3de191e 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
- if (pi->uvd_power_gated == gate)
- return;
-
pi->uvd_power_gated = gate;
ci_update_uvd_dpm(adev, gate);
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
sclk = ps->performance_levels[0].sclk;
}
+ if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
+ sclk = adev->pm.pm_display_cfg.min_core_set_clock;
+
+ if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
+ mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
+
if (rps->vce_active) {
if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
struct ci_power_info *pi = ci_get_pi(adev);
int i, ret;
+ if (amdgpu_ci_is_smc_running(adev)) {
+ DRM_INFO("smc is running, no need to load smc firmware\n");
+ return 0;
+ }
+
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
break;
@@ -4075,7 +4083,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
}
} else {
- if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ if (pi->uvd_enabled) {
pi->uvd_enabled = false;
pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
amdgpu_ci_send_msg_to_smc_with_parameter(adev,
@@ -4190,8 +4198,10 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp;
+ int ret = 0;
if (!gate) {
+ /* turn the clocks on when decoding */
if (pi->caps_uvd_dpm ||
(adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
pi->smc_state_table.UvdBootLevel = 0;
@@ -4203,9 +4213,14 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
WREG32_SMC(ixDPM_TABLE_475, tmp);
+ ret = ci_enable_uvd_dpm(adev, true);
+ } else {
+ ret = ci_enable_uvd_dpm(adev, false);
+ if (ret)
+ return ret;
}
- return ci_enable_uvd_dpm(adev, !gate);
+ return ret;
}
static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
@@ -4247,13 +4262,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
ret = ci_enable_vce_dpm(adev, true);
} else {
+ ret = ci_enable_vce_dpm(adev, false);
+ if (ret)
+ return ret;
/* turn the clocks off when not encoding */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
- if (ret)
- return ret;
-
- ret = ci_enable_vce_dpm(adev, false);
}
}
return ret;
@@ -5219,6 +5233,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev,
pi->current_rps = *rps;
pi->current_ps = *new_ps;
pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
static void ci_update_requested_ps(struct amdgpu_device *adev,
@@ -5230,6 +5245,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
pi->requested_rps = *rps;
pi->requested_ps = *new_ps;
pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
@@ -5267,8 +5283,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
int ret;
- if (amdgpu_ci_is_smc_running(adev))
- return -EINVAL;
if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
ci_enable_voltage_control(adev);
ret = ci_construct_voltage_tables(adev);
@@ -5689,7 +5703,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -5874,7 +5888,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
pi->pcie_dpm_key_disabled = 0;
pi->thermal_sclk_dpm_enabled = 0;
- if (amdgpu_sclk_deep_sleep_en)
+ if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
@@ -5977,7 +5991,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
break;
default:
- DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+ DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
break;
}
WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
@@ -6069,7 +6083,7 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
activity_percent = activity_percent > 100 ? 100 : activity_percent;
}
- seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
+ seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
seq_printf(m, "power level avg sclk: %u mclk: %u\n",
sclk, mclk);
@@ -6094,6 +6108,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev,
amdgpu_dpm_print_ps_status(adev, rps);
}
+static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
+ const struct ci_pl *ci_cpl2)
+{
+ return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
+ (ci_cpl1->sclk == ci_cpl2->sclk) &&
+ (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
+ (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
+}
+
+static int ci_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct ci_ps *ci_cps;
+ struct ci_ps *ci_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ ci_cps = ci_get_ps(cps);
+ ci_rps = ci_get_ps(rps);
+
+ if (ci_cps == NULL) {
+ *equal = false;
+ return 0;
+ }
+
+ if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
+
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < ci_cps->performance_level_count; i++) {
+ if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
+ &(ci_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
struct ci_power_info *pi = ci_get_pi(adev);
@@ -6236,6 +6300,8 @@ static int ci_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
ci_dpm_fini(adev);
@@ -6287,12 +6353,19 @@ static int ci_dpm_suspend(void *handle)
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
- /* disable dpm */
- ci_dpm_disable(adev);
- /* reset the power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+ adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
+ adev->pm.dpm.last_state = adev->pm.dpm.state;
+ adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
mutex_unlock(&adev->pm.mutex);
+ amdgpu_pm_compute_clocks(adev);
+
}
+
return 0;
}
@@ -6310,6 +6383,8 @@ static int ci_dpm_resume(void *handle)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
+ adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
+ adev->pm.dpm.state = adev->pm.dpm.last_state;
mutex_unlock(&adev->pm.mutex);
if (adev->pm.dpm_enabled)
amdgpu_pm_compute_clocks(adev);
@@ -6644,6 +6719,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.set_sclk_od = ci_dpm_set_sclk_od,
.get_mclk_od = ci_dpm_get_mclk_od,
.set_mclk_od = ci_dpm_set_mclk_od,
+ .check_state_equal = ci_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -6662,3 +6739,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version ci_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ci_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a845b6a93b79..302df85893ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* cik_asic_reset - soft reset GPU
*
@@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
- cik_set_bios_scratch_engine_hung(adev, true);
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = cik_gpu_pci_config_reset(adev);
- cik_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
}
-static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-int cik_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_common_ip_funcs = {
+static const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
.late_init = NULL,
@@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
};
+
+static const struct amdgpu_ip_block_version cik_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+};
+
+int cik_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_HAWAII:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KAVERI:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 5ebd2d7a0327..c4989f51ecef 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,8 +24,6 @@
#ifndef __CIK_H__
#define __CIK_H__
-extern const struct amd_ip_funcs cik_common_ip_funcs;
-
void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index be3d6f79a864..319b32cdea84 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_ih_ip_funcs = {
+static const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init,
.late_init = NULL,
@@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
if (adev->irq.ih_funcs == NULL)
adev->irq.ih_funcs = &cik_ih_funcs;
}
+
+const struct amdgpu_ip_block_version cik_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
index 6b0f375ec244..1d9ddee2868e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
@@ -24,6 +24,6 @@
#ifndef __CIK_IH_H__
#define __CIK_IH_H__
-extern const struct amd_ip_funcs cik_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb952acc7133..4c34dbc7a254 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_NOP_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
-static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 4; /* cik_sdma_ring_emit_ib */
-}
-
-static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* cik_sdma_ring_emit_hdp_flush */
- 3 + /* cik_sdma_ring_emit_hdp_invalidate */
- 6 + /* cik_sdma_ring_emit_pipeline_sync */
- 12 + /* cik_sdma_ring_emit_vm_flush */
- 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
@@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_sdma_ip_funcs = {
+static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
.late_init = NULL,
@@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
.get_rptr = cik_sdma_ring_get_rptr,
.get_wptr = cik_sdma_ring_get_wptr,
.set_wptr = cik_sdma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* cik_sdma_ring_emit_hdp_flush */
+ 3 + /* cik_sdma_ring_emit_hdp_invalidate */
+ 6 + /* cik_sdma_ring_emit_pipeline_sync */
+ 12 + /* cik_sdma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
.emit_ib = cik_sdma_ring_emit_ib,
.emit_fence = cik_sdma_ring_emit_fence,
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
@@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
- .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
- .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version cik_sdma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
index 027727c677b8..a4a8fe01410b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
@@ -24,6 +24,6 @@
#ifndef __CIK_SDMA_H__
#define __CIK_SDMA_H__
-extern const struct amd_ip_funcs cik_sdma_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_sdma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 8659852aea9e..6cbd913fd12e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -43,6 +43,14 @@
#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c)
#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c)
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET (0x1807 - 0x1807)
+#define HPD1_REGISTER_OFFSET (0x180a - 0x1807)
+#define HPD2_REGISTER_OFFSET (0x180d - 0x1807)
+#define HPD3_REGISTER_OFFSET (0x1810 - 0x1807)
+#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
+#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
+
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 3c082e143730..ba2b66be9022 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -438,7 +438,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->caps_td_ramping = true;
pi->caps_tcp_ramping = true;
}
- if (amdgpu_sclk_deep_sleep_en)
+ if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
@@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev,
pi->current_ps = *ps;
pi->current_rps = *rps;
- pi->current_rps.ps_priv = ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
@@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev,
pi->requested_ps = *ps;
pi->requested_rps = *rps;
- pi->requested_rps.ps_priv = ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
@@ -2109,9 +2111,8 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
if (gate) {
if (pi->caps_uvd_pg) {
- /* disable clockgating so we can properly shut down the block */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
if (ret) {
DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
return;
@@ -2157,9 +2158,8 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
return;
}
- /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
+ AMD_CG_STATE_UNGATE);
if (ret) {
DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
return;
@@ -2257,6 +2257,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
}
+static int cz_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs cz_dpm_ip_funcs = {
.name = "cz_dpm",
.early_init = cz_dpm_early_init,
@@ -2289,6 +2301,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
.vblank_too_short = NULL,
.powergate_uvd = cz_dpm_powergate_uvd,
.powergate_vce = cz_dpm_powergate_vce,
+ .check_state_equal = cz_check_state_equal,
};
static void cz_dpm_set_funcs(struct amdgpu_device *adev)
@@ -2296,3 +2309,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev)
if (NULL == adev->pm.funcs)
adev->pm.funcs = &cz_dpm_funcs;
}
+
+const struct amdgpu_ip_block_version cz_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 3d23a70b6432..fe7cbb24da7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cz_ih_ip_funcs = {
+static const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
.late_init = NULL,
@@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &cz_ih_funcs;
}
+const struct amdgpu_ip_block_version cz_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
index fc4057a2ecb9..14be7753221b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
@@ -24,6 +24,6 @@
#ifndef __CZ_IH_H__
#define __CZ_IH_H__
-extern const struct amd_ip_funcs cz_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cz_ih_ip_block;
#endif /* __CZ_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4108c686aa7c..9999dc71b998 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v10_0.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v10_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq,
@@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
@@ -2115,7 +2032,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2227,9 +2144,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -2577,6 +2493,9 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
+
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
@@ -2593,11 +2512,6 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
return 0;
}
@@ -2623,6 +2537,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2661,9 +2576,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
-
dce_v10_0_lock_cursor(crtc, true);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
@@ -2679,6 +2591,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_crtc->cursor_hot_y = hot_y;
}
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height) {
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (width - 1) << 16 | (height - 1));
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+ }
+
dce_v10_0_show_cursor(crtc);
dce_v10_0_lock_cursor(crtc, false);
@@ -2700,6 +2620,7 @@ unpin:
static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v10_0_lock_cursor(crtc, true);
@@ -2707,6 +2628,10 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->cursor_width - 1) << 16 |
+ (amdgpu_crtc->cursor_height - 1));
+
dce_v10_0_show_cursor(crtc);
dce_v10_0_lock_cursor(crtc, false);
@@ -3151,10 +3076,6 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v10_0_hw_fini(handle);
}
@@ -3165,8 +3086,6 @@ static int dce_v10_0_resume(void *handle)
ret = dce_v10_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3554,7 +3473,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v10_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
.late_init = NULL,
@@ -3839,7 +3758,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
.bandwidth_update = &dce_v10_0_bandwidth_update,
.vblank_get_counter = &dce_v10_0_vblank_get_counter,
.vblank_wait = &dce_v10_0_vblank_wait,
- .is_display_hung = &dce_v10_0_is_display_hung,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v10_0_hpd_sense,
@@ -3885,3 +3803,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v10_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v10_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index e3dc04d293e4..7a0747789f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -24,7 +24,9 @@
#ifndef __DCE_V10_0_H__
#define __DCE_V10_0_H__
-extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+
+extern const struct amdgpu_ip_block_version dce_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v10_1_ip_block;
void dce_v10_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f264b8f17ad1..b3d62b909f43 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v11_0.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
@@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v11_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
@@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -2096,7 +2013,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2208,9 +2125,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -2593,6 +2509,9 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
+
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
@@ -2609,11 +2528,6 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
return 0;
}
@@ -2639,6 +2553,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2677,9 +2592,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
-
dce_v11_0_lock_cursor(crtc, true);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
@@ -2695,6 +2607,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_crtc->cursor_hot_y = hot_y;
}
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height) {
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (width - 1) << 16 | (height - 1));
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+ }
+
dce_v11_0_show_cursor(crtc);
dce_v11_0_lock_cursor(crtc, false);
@@ -2716,6 +2636,7 @@ unpin:
static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v11_0_lock_cursor(crtc, true);
@@ -2723,6 +2644,10 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->cursor_width - 1) << 16 |
+ (amdgpu_crtc->cursor_height - 1));
+
dce_v11_0_show_cursor(crtc);
dce_v11_0_lock_cursor(crtc, false);
@@ -3215,10 +3140,6 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v11_0_hw_fini(handle);
}
@@ -3229,8 +3150,6 @@ static int dce_v11_0_resume(void *handle)
ret = dce_v11_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3611,7 +3530,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v11_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.name = "dce_v11_0",
.early_init = dce_v11_0_early_init,
.late_init = NULL,
@@ -3895,7 +3814,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
.bandwidth_update = &dce_v11_0_bandwidth_update,
.vblank_get_counter = &dce_v11_0_vblank_get_counter,
.vblank_wait = &dce_v11_0_vblank_wait,
- .is_display_hung = &dce_v11_0_is_display_hung,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v11_0_hpd_sense,
@@ -3941,3 +3859,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v11_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v11_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 1f58a65ba2ef..0d878ca3acba 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -24,7 +24,8 @@
#ifndef __DCE_V11_0_H__
#define __DCE_V11_0_H__
-extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
void dce_v11_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b948d6cb1399..e564442b6393 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -30,8 +30,19 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
-#include "si/si_reg.h"
-#include "si/sid.h"
+
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+#include "gmc/gmc_6_0_d.h"
+#include "gmc/gmc_6_0_sh_mask.h"
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+#include "si_enums.h"
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -46,6 +57,16 @@ static const u32 crtc_offsets[6] =
SI_CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
+};
+
static const uint32_t dig_offsets[] = {
SI_CRTC0_REGISTER_OFFSET,
SI_CRTC1_REGISTER_OFFSET,
@@ -63,46 +84,37 @@ static const struct {
uint32_t hpd;
} interrupt_status_offsets[6] = { {
- .reg = DISP_INTERRUPT_STATUS,
+ .reg = mmDISP_INTERRUPT_STATUS,
.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE2,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE3,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE4,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
}, {
- .reg = DISP_INTERRUPT_STATUS_CONTINUE5,
+ .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- DC_HPD1_INT_CONTROL,
- DC_HPD2_INT_CONTROL,
- DC_HPD3_INT_CONTROL,
- DC_HPD4_INT_CONTROL,
- DC_HPD5_INT_CONTROL,
- DC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -118,7 +130,7 @@ static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
{
- if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
return true;
else
return false;
@@ -128,8 +140,8 @@ static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
{
u32 pos1, pos2;
- pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
if (pos1 != pos2)
return true;
@@ -151,7 +163,7 @@ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
if (crtc >= adev->mode_info.num_crtc)
return;
- if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+ if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
return;
/* depending on when we hit vblank, we may be close to active; if so,
@@ -179,7 +191,7 @@ static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
if (crtc >= adev->mode_info.num_crtc)
return 0;
else
- return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+ return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
@@ -219,16 +231,16 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
/* flip at hsync for async, default is vsync */
- WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
- EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+ GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
/* update the scanout addresses */
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)crtc_base);
/* post the write */
- RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
+ RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
}
static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
@@ -236,8 +248,8 @@ static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
{
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
- *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
- *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+ *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
return 0;
@@ -257,34 +269,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ connected = true;
return connected;
}
@@ -303,58 +292,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v6_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(DC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(DC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(DC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(DC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(DC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(DC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -369,34 +315,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
- DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -405,34 +334,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -454,46 +358,25 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
{
- return SI_DC_GPIO_HPD_A;
-}
-
-static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
-{
- DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
-
- return true;
+ return mmDC_GPIO_HPD_A;
}
static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
@@ -501,7 +384,7 @@ static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
if (crtc >= adev->mode_info.num_crtc)
return 0;
else
- return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+ return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
@@ -510,25 +393,25 @@ static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
u32 crtc_enabled, tmp, frame_count;
int i, j;
- save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+ save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
/* disable VGA render */
- WREG32(VGA_RENDER_CONTROL, 0);
+ WREG32(mmVGA_RENDER_CONTROL, 0);
/* blank the display controllers */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+ crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
if (crtc_enabled) {
save->crtc_enabled[i] = true;
- tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+ tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+ if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
dce_v6_0_vblank_wait(adev, i);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
- WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
+ WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = evergreen_get_vblank_counter(adev, i);
@@ -539,11 +422,11 @@ static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
}
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
- tmp &= ~EVERGREEN_CRTC_MASTER_EN;
- WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
save->crtc_enabled[i] = false;
/* ***** */
} else {
@@ -560,41 +443,40 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
}
- WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
+ WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+ WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
/* unlock regs and wait for update */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (save->crtc_enabled[i]) {
- tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x7) != 3) {
+ tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x7) != 0) {
tmp &= ~0x7;
- tmp |= 0x3;
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
- tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
- if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
- tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
- WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
+ tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
+ WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
}
- tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
if (tmp & 1) {
tmp &= ~1;
- WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
- if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
break;
udelay(1);
}
@@ -602,19 +484,62 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
}
/* Unlock vga access */
- WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
- WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+ WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
}
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
- if (!render)
- WREG32(R_000300_VGA_RENDER_CONTROL,
- RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+ if (!render)
+ WREG32(mmVGA_RENDER_CONTROL,
+ RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
+
+}
+
+static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ num_crtc = 6;
+ break;
+ case CHIP_OLAND:
+ num_crtc = 2;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v6_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
+ crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
+ CRTC_CONTROL__CRTC_MASTER_EN_MASK;
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
}
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
@@ -647,19 +572,23 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
case 6:
if (dither == AMDGPU_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
- tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
- FMT_SPATIAL_DITHER_EN);
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
else
- tmp |= FMT_TRUNCATE_EN;
+ tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
break;
case 8:
if (dither == AMDGPU_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
- tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
- FMT_RGB_RANDOM_ENABLE |
- FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
else
- tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+ tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
+ FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
break;
case 10:
default:
@@ -667,7 +596,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
break;
}
- WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
/**
@@ -681,7 +610,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
*/
static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(MC_SHARED_CHMAP);
+ u32 tmp = RREG32(mmMC_SHARED_CHMAP);
switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
case 0:
@@ -1178,28 +1107,28 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
}
/* select wm A */
- arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+ arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp = arb_control3;
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(1);
- WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
- WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
- (LATENCY_LOW_WATERMARK(latency_watermark_a) |
- LATENCY_HIGH_WATERMARK(line_time)));
+ WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
+ (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* select wm B */
- tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(2);
- WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
- WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
- (LATENCY_LOW_WATERMARK(latency_watermark_b) |
- LATENCY_HIGH_WATERMARK(line_time)));
+ WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
+ (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* restore original selection */
- WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
+ WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
/* write the priority marks */
- WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
- WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
+ WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
+ WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
@@ -1217,7 +1146,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
- * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
* the display controllers. The paritioning is done via one of four
* preset allocations specified in bits 21:20:
* 0 - half lb
@@ -1240,14 +1169,14 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
buffer_alloc = 0;
}
- WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
+ WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
DC_LB_MEMORY_CONFIG(tmp));
- WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
- DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
- DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
break;
udelay(1);
}
@@ -1489,12 +1418,12 @@ static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
static const u32 vga_control_regs[6] =
{
- AVIVO_D1VGA_CONTROL,
- AVIVO_D2VGA_CONTROL,
- EVERGREEN_D3VGA_CONTROL,
- EVERGREEN_D4VGA_CONTROL,
- EVERGREEN_D5VGA_CONTROL,
- EVERGREEN_D6VGA_CONTROL,
+ mmD1VGA_CONTROL,
+ mmD2VGA_CONTROL,
+ mmD3VGA_CONTROL,
+ mmD4VGA_CONTROL,
+ mmD5VGA_CONTROL,
+ mmD6VGA_CONTROL,
};
static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
@@ -1514,7 +1443,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
- WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
+ WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
}
static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
@@ -1530,10 +1459,11 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
- u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+ u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1573,71 +1503,71 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
switch (target_fb->pixel_format) {
case DRM_FORMAT_C8:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_INDEXED));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_RGB565:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB565));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+ fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
+ GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+ fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -1650,75 +1580,75 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
- fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
- fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
- fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
- fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
- fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+ fb_format |= GRPH_NUM_BANKS(num_banks);
+ fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
+ fb_format |= GRPH_TILE_SPLIT(tile_split);
+ fb_format |= GRPH_BANK_WIDTH(bankw);
+ fb_format |= GRPH_BANK_HEIGHT(bankh);
+ fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+ fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
}
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
+ fb_format |= GRPH_PIPE_CONFIG(pipe_config);
dce_v6_0_vga_enable(crtc, false);
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
- WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
- WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
- WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+ WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
/*
* The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
* for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
* retain the full precision throughout the pipeline.
*/
- WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
- (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
- ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+ WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
+ (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
+ ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
if (bypass_lut)
DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
- WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
- WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+ WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+ WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
- WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
dce_v6_0_grph_enable(crtc, true);
- WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+ WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
target_fb->height);
x &= ~3;
y &= ~1;
- WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
+ WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
(x << 16) | y);
viewport_w = crtc->mode.hdisplay;
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
- WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+ WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
/* set pageflip to happen anywhere in vblank interval */
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -1745,10 +1675,10 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
- EVERGREEN_INTERLEAVE_EN);
+ WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
+ INTERLEAVE_EN);
else
- WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
}
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
@@ -1761,54 +1691,52 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
- WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
- NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
- WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
- NI_GRPH_PRESCALE_BYPASS);
- WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
- NI_OVL_PRESCALE_BYPASS);
- WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
- NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+ WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+ (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+ WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+ PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
+ WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+ PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
+ WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+ (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
+ WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
- WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
-
- WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
-
- WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
for (i = 0; i < 256; i++) {
- WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+ WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
(amdgpu_crtc->lut_r[i] << 20) |
(amdgpu_crtc->lut_g[i] << 10) |
(amdgpu_crtc->lut_b[i] << 0));
}
- WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
- NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
- NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
- NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
- WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
- NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
- WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
- NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
- WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- (NI_OUTPUT_CSC_GRPH_MODE(0) |
- NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+ WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+ (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+ ICON_DEGAMMA_MODE(0) |
+ (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+ WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+ (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+ WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+ (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+ WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+ (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
@@ -1887,12 +1815,12 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
- cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
+ cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
if (lock)
- cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+ cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
else
- cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
- WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+ cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
+ WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
}
static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
@@ -1900,9 +1828,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
- EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
- EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
@@ -1912,15 +1840,15 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
- EVERGREEN_CURSOR_EN |
- EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
- EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
@@ -1931,7 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
- int w = amdgpu_crtc->cursor_width;
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
/* avivo cursor are offset into the total surface */
x += crtc->x;
@@ -1947,13 +1876,9 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
y = 0;
}
- WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
- WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
- ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+ WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+ WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
return 0;
}
@@ -1978,6 +1903,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2016,12 +1942,11 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
-
dce_v6_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2030,10 +1955,20 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_cursor_move_locked(crtc, x, y);
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height) {
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (width - 1) << 16 | (height - 1));
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+ }
+
dce_v6_0_show_cursor(crtc);
dce_v6_0_lock_cursor(crtc, false);
@@ -2055,6 +1990,7 @@ unpin:
static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v6_0_lock_cursor(crtc, true);
@@ -2062,6 +1998,10 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->cursor_width - 1) << 16 |
+ (amdgpu_crtc->cursor_height - 1));
+
dce_v6_0_show_cursor(crtc);
dce_v6_0_lock_cursor(crtc, false);
}
@@ -2117,13 +2057,13 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v6_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled)
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
@@ -2338,21 +2278,20 @@ static int dce_v6_0_early_init(void *handle)
dce_v6_0_set_display_funcs(adev);
dce_v6_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_OLAND:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 2;
adev->mode_info.num_dig = 2;
break;
default:
- /* FIXME: not supported yet */
return -EINVAL;
}
@@ -2482,10 +2421,6 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v6_0_hw_fini(handle);
}
@@ -2496,8 +2431,6 @@ static int dce_v6_0_resume(void *handle)
ret = dce_v6_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -2562,14 +2495,14 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- interrupt_mask = RREG32(INT_MASK + reg_block);
+ interrupt_mask = RREG32(mmINT_MASK + reg_block);
interrupt_mask &= ~VBLANK_INT_MASK;
- WREG32(INT_MASK + reg_block, interrupt_mask);
+ WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- interrupt_mask = RREG32(INT_MASK + reg_block);
+ interrupt_mask = RREG32(mmINT_MASK + reg_block);
interrupt_mask |= VBLANK_INT_MASK;
- WREG32(INT_MASK + reg_block, interrupt_mask);
+ WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
default:
break;
@@ -2588,42 +2521,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl |= DC_HPDx_INT_EN;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -2691,7 +2605,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank)
- WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -2702,7 +2616,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
- WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -2728,12 +2642,12 @@ static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
return -EINVAL;
}
- reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
+ reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
if (state == AMDGPU_IRQ_STATE_DISABLE)
- WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+ WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
else
- WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+ WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
return 0;
@@ -2756,9 +2670,9 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
return -EINVAL;
}
- if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
+ if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
- WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
+ WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
/* IRQ could occur when in initial stage */
@@ -2796,7 +2710,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -2807,12 +2721,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_INFO("IH: HPD%d\n", hpd + 1);
}
@@ -2833,7 +2746,7 @@ static int dce_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.name = "dce_v6_0",
.early_init = dce_v6_0_early_init,
.late_init = NULL,
@@ -3128,7 +3041,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
.bandwidth_update = &dce_v6_0_bandwidth_update,
.vblank_get_counter = &dce_v6_0_vblank_get_counter,
.vblank_wait = &dce_v6_0_vblank_wait,
- .is_display_hung = &dce_v6_0_is_display_hung,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v6_0_hpd_sense,
@@ -3174,3 +3086,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v6_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index 6a5528105bb6..7b546b596de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -24,6 +24,9 @@
#ifndef __DCE_V6_0_H__
#define __DCE_V6_0_H__
-extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v6_4_ip_block;
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5966166ec94c..6ce7fb42dbef 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v8_0.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] =
CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ HPD0_REGISTER_OFFSET,
+ HPD1_REGISTER_OFFSET,
+ HPD2_REGISTER_OFFSET,
+ HPD3_REGISTER_OFFSET,
+ HPD4_REGISTER_OFFSET,
+ HPD5_REGISTER_OFFSET
+};
+
static const uint32_t dig_offsets[] = {
CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET,
@@ -104,15 +115,6 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- mmDC_HPD1_INT_CONTROL,
- mmDC_HPD2_INT_CONTROL,
- mmDC_HPD3_INT_CONTROL,
- mmDC_HPD4_INT_CONTROL,
- mmDC_HPD5_INT_CONTROL,
- mmDC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+ DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ connected = true;
return connected;
}
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v8_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(mmDC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(mmDC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- WREG32(mmDC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(mmDC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- WREG32(mmDC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(mmDC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- WREG32(mmDC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(mmDC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- WREG32(mmDC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(mmDC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- WREG32(mmDC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
- (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
- DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
@@ -2030,7 +1910,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2135,9 +2015,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -2465,6 +2344,9 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
+
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
@@ -2481,11 +2363,6 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
return 0;
}
@@ -2511,6 +2388,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2549,9 +2427,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
-
dce_v8_0_lock_cursor(crtc, true);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
@@ -2563,10 +2438,20 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v8_0_cursor_move_locked(crtc, x, y);
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height) {
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (width - 1) << 16 | (height - 1));
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+ }
+
dce_v8_0_show_cursor(crtc);
dce_v8_0_lock_cursor(crtc, false);
@@ -2588,6 +2473,7 @@ unpin:
static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v8_0_lock_cursor(crtc, true);
@@ -2595,6 +2481,10 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->cursor_width - 1) << 16 |
+ (amdgpu_crtc->cursor_height - 1));
+
dce_v8_0_show_cursor(crtc);
dce_v8_0_lock_cursor(crtc, false);
@@ -3033,10 +2923,6 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v8_0_hw_fini(handle);
}
@@ -3047,8 +2933,6 @@ static int dce_v8_0_resume(void *handle)
ret = dce_v8_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3204,42 +3088,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -3412,7 +3277,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -3423,12 +3288,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
@@ -3449,7 +3313,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v8_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.name = "dce_v8_0",
.early_init = dce_v8_0_early_init,
.late_init = NULL,
@@ -3733,7 +3597,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
.bandwidth_update = &dce_v8_0_bandwidth_update,
.vblank_get_counter = &dce_v8_0_vblank_get_counter,
.vblank_wait = &dce_v8_0_vblank_wait,
- .is_display_hung = &dce_v8_0_is_display_hung,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v8_0_hpd_sense,
@@ -3779,3 +3642,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 7d0770c3a49b..13b802dd946a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -24,7 +24,11 @@
#ifndef __DCE_V8_0_H__
#define __DCE_V8_0_H__
-extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_2_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_3_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_5_ip_block;
void dce_v8_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c2bd9f045532..e4a5a5ac0ff3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -27,6 +27,9 @@
#include "atom.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "dce_v6_0.h"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "dce_v8_0.h"
#endif
@@ -34,11 +37,13 @@
#include "dce_v11_0.h"
#include "dce_virtual.h"
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry);
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index);
/**
* dce_virtual_vblank_wait - vblank wait asic callback.
@@ -90,15 +95,18 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
return 0;
}
-static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
-{
- return false;
-}
-
static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ dce_v6_0_disable_dce(adev);
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -119,6 +127,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
dce_v11_0_disable_dce(adev);
break;
case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
/* no DCE */
return;
default:
@@ -195,16 +206,15 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
amdgpu_crtc->enabled = true;
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ /* Make sure VBLANK interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
amdgpu_crtc->enabled = false;
break;
}
@@ -264,24 +274,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
-
return true;
}
@@ -341,6 +333,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
+ amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
return 0;
@@ -350,48 +343,121 @@ static int dce_virtual_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
dce_virtual_set_display_funcs(adev);
dce_virtual_set_irq_funcs(adev);
- adev->mode_info.num_crtc = 1;
adev->mode_info.num_hpd = 1;
adev->mode_info.num_dig = 1;
return 0;
}
-static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+static struct drm_encoder *
+dce_virtual_encoder(struct drm_connector *connector)
{
- struct amdgpu_i2c_bus_rec ddc_bus;
- struct amdgpu_router router;
- struct amdgpu_hpd hpd;
+ int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
- /* look up gpio for ddc, hpd */
- ddc_bus.valid = false;
- hpd.hpd = AMDGPU_HPD_NONE;
- /* needed for aux chan transactions */
- ddc_bus.hpd = hpd.hpd;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
- memset(&router, 0, sizeof(router));
- router.ddc_valid = false;
- router.cd_valid = false;
- amdgpu_display_add_connector(adev,
- 0,
- ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
- CONNECTOR_OBJECT_ID_VIRTUAL,
- &hpd,
- &router);
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
+ continue;
- amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
- ATOM_DEVICE_CRT1_SUPPORT,
- 0);
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ return encoder;
+ }
- amdgpu_link_encoder_connector(adev->ddev);
+ /* pick the first one */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+ return NULL;
+}
+
+static int dce_virtual_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ unsigned i;
+ static const struct mode_size {
+ int w;
+ int h;
+ } common_modes[17] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+ };
+
+ for (i = 0; i < 17; i++) {
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ drm_mode_probed_add(connector, mode);
+ }
- return true;
+ return 0;
+}
+
+static int dce_virtual_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
}
+static int
+dce_virtual_dpms(struct drm_connector *connector, int mode)
+{
+ return 0;
+}
+
+static int
+dce_virtual_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void dce_virtual_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static void dce_virtual_force(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
+ .get_modes = dce_virtual_get_modes,
+ .mode_valid = dce_virtual_mode_valid,
+ .best_encoder = dce_virtual_encoder,
+};
+
+static const struct drm_connector_funcs dce_virtual_connector_funcs = {
+ .dpms = dce_virtual_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = dce_virtual_set_property,
+ .destroy = dce_virtual_destroy,
+ .force = dce_virtual_force,
+};
+
static int dce_virtual_sw_init(void *handle)
{
int r, i;
@@ -420,16 +486,16 @@ static int dce_virtual_sw_init(void *handle)
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
- /* allocate crtcs */
+ /* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = dce_virtual_crtc_init(adev, i);
if (r)
return r;
+ r = dce_virtual_connector_encoder_init(adev, i);
+ if (r)
+ return r;
}
- dce_virtual_get_connector_info(adev);
- amdgpu_print_display_setup(adev->ddev);
-
drm_kms_helper_poll_init(adev->ddev);
adev->mode_info.mode_config_initialized = true;
@@ -496,7 +562,7 @@ static int dce_virtual_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_virtual_ip_funcs = {
+static const struct amd_ip_funcs dce_virtual_ip_funcs = {
.name = "dce_virtual",
.early_init = dce_virtual_early_init,
.late_init = NULL,
@@ -526,8 +592,8 @@ static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
static void
dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return;
}
@@ -547,10 +613,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
- /* set the active encoder to connector routing */
- amdgpu_encoder_set_active_device(encoder);
-
return true;
}
@@ -576,45 +638,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
.destroy = dce_virtual_encoder_destroy,
};
-static void dce_virtual_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index)
{
- struct drm_device *dev = adev->ddev;
struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
+ struct drm_connector *connector;
+
+ /* add a new encoder */
+ encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
+ if (!encoder)
+ return -ENOMEM;
+ encoder->possible_crtcs = 1 << index;
+ drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+ connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
+ if (!connector) {
+ kfree(encoder);
+ return -ENOMEM;
}
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
+ /* add a new connector */
+ drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ drm_connector_register(connector);
- encoder = &amdgpu_encoder->base;
- encoder->possible_crtcs = 0x1;
- amdgpu_encoder->enc_priv = NULL;
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
- DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+ /* link them */
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
}
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
@@ -622,7 +679,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.bandwidth_update = &dce_virtual_bandwidth_update,
.vblank_get_counter = &dce_virtual_vblank_get_counter,
.vblank_wait = &dce_virtual_vblank_wait,
- .is_display_hung = &dce_virtual_is_display_hung,
.backlight_set_level = NULL,
.backlight_get_level = NULL,
.hpd_sense = &dce_virtual_hpd_sense,
@@ -630,8 +686,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
.page_flip = &dce_virtual_page_flip,
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
- .add_encoder = &dce_virtual_encoder_add,
- .add_connector = &amdgpu_connector_add,
+ .add_encoder = NULL,
+ .add_connector = NULL,
.stop_mc_access = &dce_virtual_stop_mc_access,
.resume_mc_access = &dce_virtual_resume_mc_access,
};
@@ -642,107 +698,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
adev->mode_info.funcs = &dce_virtual_display_funcs;
}
-static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
-{
- struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
- struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
- unsigned crtc = 0;
- drm_handle_vblank(adev->ddev, crtc);
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- return HRTIMER_NORESTART;
-}
-
-static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- if (state && !adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Enable software vsync timer\n");
- hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
- adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
- hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- } else if (!state && adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Disable software vsync timer\n");
- hrtimer_cancel(&adev->mode_info.vblank_timer);
- }
-
- adev->mode_info.vsync_timer_enabled = state;
- DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
-}
-
-
-static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-}
-
-static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = 0;
- unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
-
- dce_virtual_crtc_vblank_int_ack(adev, crtc);
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
- return 0;
-}
-
-static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
- DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
-
- return 0;
-}
-
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+ unsigned crtc_id)
{
unsigned long flags;
- unsigned crtc_id = 0;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
- crtc_id = 0;
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
if (crtc_id >= adev->mode_info.num_crtc) {
@@ -781,22 +743,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
return 0;
}
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+ struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
+ struct amdgpu_crtc, vblank_timer);
+ struct drm_device *ddev = amdgpu_crtc->base.dev;
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+ dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+ hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+ HRTIMER_MODE_REL);
+
+ return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Enable software vsync timer\n");
+ hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ adev->mode_info.crtcs[crtc]->vblank_timer.function =
+ dce_virtual_vblank_timer_handle;
+ hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Disable software vsync timer\n");
+ hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
+ }
+
+ adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
+ DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ if (type > AMDGPU_CRTC_IRQ_VBLANK6)
+ return -EINVAL;
+
+ dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
+
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
.set = dce_virtual_set_crtc_irq_state,
- .process = dce_virtual_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
- .set = dce_virtual_set_pageflip_irq_state,
- .process = dce_virtual_pageflip_irq,
+ .process = NULL,
};
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
- adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
}
+const struct amdgpu_ip_block_version dce_virtual_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index e239243f6ebc..ed422012c8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -24,8 +24,7 @@
#ifndef __DCE_VIRTUAL_H__
#define __DCE_VIRTUAL_H__
-extern const struct amd_ip_funcs dce_virtual_ip_funcs;
-#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+extern const struct amdgpu_ip_block_version dce_virtual_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 40abb6b81c09..558640aee15a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -26,15 +26,18 @@
#include "amdgpu_gfx.h"
#include "amdgpu_ucode.h"
#include "si/clearstate_si.h"
-#include "si/sid.h"
-
-#define GFX6_NUM_GFX_RINGS 1
-#define GFX6_NUM_COMPUTE_RINGS 2
-#define STATIC_PER_CU_PG_ENABLE (1 << 3)
-#define DYN_PER_CU_PG_ENABLE (1 << 2)
-#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
-#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
-
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+#include "gmc/gmc_6_0_d.h"
+#include "gmc/gmc_6_0_sh_mask.h"
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+#include "si_enums.h"
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -70,6 +73,15 @@ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *bu
//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
+#define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
+#define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
+#define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
+#define MICRO_TILE_MODE(x) ((x) << 0)
+#define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
+#define BANK_WIDTH(x) ((x) << 14)
+#define BANK_HEIGHT(x) ((x) << 16)
+#define MACRO_TILE_ASPECT(x) ((x) << 18)
+#define NUM_BANKS(x) ((x) << 20)
static const u32 verde_rlc_save_restore_register_list[] =
{
@@ -400,8 +412,8 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
if (adev->asic_type == CHIP_VERDE ||
- adev->asic_type == CHIP_OLAND ||
- adev->asic_type == CHIP_HAINAN) {
+ adev->asic_type == CHIP_OLAND ||
+ adev->asic_type == CHIP_HAINAN) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
@@ -414,7 +426,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
- case 1:
+ case 1:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -434,7 +446,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
- case 3:
+ case 3:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -444,7 +456,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
- case 4:
+ case 4:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -454,7 +466,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 5:
+ case 5:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -464,7 +476,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 6:
+ case 6:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -474,7 +486,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 7:
+ case 7:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -484,7 +496,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
- case 8:
+ case 8:
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -494,7 +506,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 9:
+ case 9:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -504,7 +516,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 10:
+ case 10:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -514,7 +526,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
- case 11:
+ case 11:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -524,7 +536,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 12:
+ case 12:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -534,7 +546,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 13:
+ case 13:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -544,7 +556,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 14:
+ case 14:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -554,7 +566,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 15:
+ case 15:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -564,7 +576,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 16:
+ case 16:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -574,7 +586,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 17:
+ case 17:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
@@ -584,7 +596,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 21:
+ case 21:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
@@ -594,7 +606,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 22:
+ case 22:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
@@ -604,7 +616,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
- case 23:
+ case 23:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
@@ -614,7 +626,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 24:
+ case 24:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
@@ -624,7 +636,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
- case 25:
+ case 25:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
@@ -639,7 +651,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
}
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
}
} else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
@@ -879,7 +891,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
}
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
}
} else{
@@ -894,19 +906,23 @@ static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
u32 data;
if (instance == 0xffffffff)
- data = INSTANCE_BROADCAST_WRITES;
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
else
- data = INSTANCE_INDEX(instance);
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
- data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+ GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
else if (se_num == 0xffffffff)
- data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+ data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
+ (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
else if (sh_num == 0xffffffff)
- data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+ data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+ (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
else
- data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
- WREG32(GRBM_GFX_INDEX, data);
+ data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
+ (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ WREG32(mmGRBM_GFX_INDEX, data);
}
static u32 gfx_v6_0_create_bitmask(u32 bit_width)
@@ -920,11 +936,11 @@ static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
{
u32 data, mask;
- data = RREG32(CC_RB_BACKEND_DISABLE);
- data &= BACKEND_DISABLE_MASK;
- data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+ data = RREG32(mmCC_RB_BACKEND_DISABLE);
+ data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
+ data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
- data >>= BACKEND_DISABLE_SHIFT;
+ data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
@@ -936,14 +952,23 @@ static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
- *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
- SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
+ *rconf |=
+ (2 << PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT) |
+ (1 << PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT) |
+ (1 << PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT);
break;
case CHIP_VERDE:
- *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
+ *rconf |=
+ (1 << PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT) |
+ (2 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT) |
+ (1 << PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT);
break;
case CHIP_OLAND:
- *rconf |= RB_YSEL;
+ *rconf |= (1 << PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT);
break;
case CHIP_HAINAN:
*rconf |= 0x0;
@@ -981,24 +1006,24 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
int idx = (se / 2) * 2;
if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
- raster_config_se &= ~SE_MAP_MASK;
+ raster_config_se &= ~PA_SC_RASTER_CONFIG__SE_MAP_MASK;
if (!se_mask[idx]) {
- raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ raster_config_se |= RASTER_CONFIG_SE_MAP_3 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT;
} else {
- raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ raster_config_se |= RASTER_CONFIG_SE_MAP_0 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT;
}
}
pkr0_mask &= rb_mask;
pkr1_mask &= rb_mask;
if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
- raster_config_se &= ~PKR_MAP_MASK;
+ raster_config_se &= ~PA_SC_RASTER_CONFIG__PKR_MAP_MASK;
if (!pkr0_mask) {
- raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ raster_config_se |= RASTER_CONFIG_PKR_MAP_3 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT;
} else {
- raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ raster_config_se |= RASTER_CONFIG_PKR_MAP_0 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT;
}
}
@@ -1009,14 +1034,14 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
- raster_config_se &= ~RB_MAP_PKR0_MASK;
+ raster_config_se &= ~PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK;
if (!rb0_mask) {
raster_config_se |=
- RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ RASTER_CONFIG_RB_MAP_3 << PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT;
} else {
raster_config_se |=
- RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ RASTER_CONFIG_RB_MAP_0 << PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT;
}
}
@@ -1026,14 +1051,14 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
- raster_config_se &= ~RB_MAP_PKR1_MASK;
+ raster_config_se &= ~PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK;
if (!rb0_mask) {
raster_config_se |=
- RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ RASTER_CONFIG_RB_MAP_3 << PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT;
} else {
raster_config_se |=
- RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ RASTER_CONFIG_RB_MAP_0 << PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT;
}
}
}
@@ -1041,7 +1066,7 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
/* GRBM_GFX_INDEX has a different offset on SI */
gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
- WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
}
/* GRBM_GFX_INDEX has a different offset on SI */
@@ -1063,7 +1088,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
for (j = 0; j < sh_per_se; j++) {
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
- disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+ disabled_rbs |= data << ((i * sh_per_se + j) * 2);
}
}
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
@@ -1105,7 +1130,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
if (!adev->gfx.config.backend_enable_mask ||
adev->gfx.config.num_rbs >= num_rb_pipes)
- WREG32(PA_SC_RASTER_CONFIG, data);
+ WREG32(mmPA_SC_RASTER_CONFIG, data);
else
gfx_v6_0_write_harvested_raster_configs(adev, data,
adev->gfx.config.backend_enable_mask,
@@ -1124,11 +1149,11 @@ static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
{
u32 data, mask;
- data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
- data &= INACTIVE_CUS_MASK;
- data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+ data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+ data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+ data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
- data >>= INACTIVE_CUS_SHIFT;
+ data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
mask = gfx_v6_0_create_bitmask(cu_per_sh);
@@ -1148,7 +1173,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
- data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+ data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
mask = 1;
@@ -1156,7 +1181,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
mask <<= k;
if (active_cu & mask) {
data &= ~mask;
- WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+ WREG32(mmSPI_STATIC_THREAD_MGMT_3, data);
break;
}
}
@@ -1209,7 +1234,6 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
-
case CHIP_VERDE:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 4;
@@ -1266,18 +1290,18 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
break;
}
- WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
- WREG32(SRBM_INT_CNTL, 1);
- WREG32(SRBM_INT_ACK, 1);
+ WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
+ WREG32(mmSRBM_INT_CNTL, 1);
+ WREG32(mmSRBM_INT_ACK, 1);
- WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+ WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
- mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
- mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+ mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
- tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+ tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (adev->gfx.config.mem_row_size_in_kb > 4)
adev->gfx.config.mem_row_size_in_kb = 4;
@@ -1285,32 +1309,33 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.num_gpus = 1;
adev->gfx.config.multi_gpu_tile_size = 64;
- gb_addr_config &= ~ROW_SIZE_MASK;
+ gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
switch (adev->gfx.config.mem_row_size_in_kb) {
case 1:
default:
- gb_addr_config |= ROW_SIZE(0);
+ gb_addr_config |= 0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
break;
case 2:
- gb_addr_config |= ROW_SIZE(1);
+ gb_addr_config |= 1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
break;
case 4:
- gb_addr_config |= ROW_SIZE(2);
+ gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
break;
}
adev->gfx.config.gb_addr_config = gb_addr_config;
- WREG32(GB_ADDR_CONFIG, gb_addr_config);
- WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
- WREG32(DMIF_ADDR_CALC, gb_addr_config);
- WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
- WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+ WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmDMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
+ WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmDMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(mmDMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+
#if 0
if (adev->has_uvd) {
- WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
}
#endif
gfx_v6_0_tiling_mode_table_init(adev);
@@ -1325,45 +1350,48 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
gfx_v6_0_get_cu_info(adev);
- WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
- ROQ_IB2_START(0x2b)));
- WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+ WREG32(mmCP_QUEUE_THRESHOLDS, ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
+ (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
+ WREG32(mmCP_MEQ_THRESHOLDS, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+ (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
- sx_debug_1 = RREG32(SX_DEBUG_1);
- WREG32(SX_DEBUG_1, sx_debug_1);
+ sx_debug_1 = RREG32(mmSX_DEBUG_1);
+ WREG32(mmSX_DEBUG_1, sx_debug_1);
- WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+ WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
- WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_frontend) |
- SC_BACKEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_backend) |
- SC_HIZ_TILE_FIFO_SIZE(adev->gfx.config.sc_hiz_tile_fifo_size) |
- SC_EARLYZ_TILE_FIFO_SIZE(adev->gfx.config.sc_earlyz_tile_fifo_size)));
+ WREG32(mmPA_SC_FIFO_SIZE, ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
+ (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
+ (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
+ (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
- WREG32(VGT_NUM_INSTANCES, 1);
- WREG32(CP_PERFMON_CNTL, 0);
- WREG32(SQ_CONFIG, 0);
- WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
- FORCE_EOV_MAX_REZ_CNT(255)));
+ WREG32(mmVGT_NUM_INSTANCES, 1);
+ WREG32(mmCP_PERFMON_CNTL, 0);
+ WREG32(mmSQ_CONFIG, 0);
+ WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS, ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
+ (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
- WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
- AUTO_INVLD_EN(ES_AND_GS_AUTO));
+ WREG32(mmVGT_CACHE_INVALIDATION,
+ (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
+ (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
- WREG32(VGT_GS_VERTEX_REUSE, 16);
- WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+ WREG32(mmVGT_GS_VERTEX_REUSE, 16);
+ WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
- WREG32(CB_PERFCOUNTER0_SELECT0, 0);
- WREG32(CB_PERFCOUNTER0_SELECT1, 0);
- WREG32(CB_PERFCOUNTER1_SELECT0, 0);
- WREG32(CB_PERFCOUNTER1_SELECT1, 0);
- WREG32(CB_PERFCOUNTER2_SELECT0, 0);
- WREG32(CB_PERFCOUNTER2_SELECT1, 0);
- WREG32(CB_PERFCOUNTER3_SELECT0, 0);
- WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+ WREG32(mmCB_PERFCOUNTER0_SELECT0, 0);
+ WREG32(mmCB_PERFCOUNTER0_SELECT1, 0);
+ WREG32(mmCB_PERFCOUNTER1_SELECT0, 0);
+ WREG32(mmCB_PERFCOUNTER1_SELECT1, 0);
+ WREG32(mmCB_PERFCOUNTER2_SELECT0, 0);
+ WREG32(mmCB_PERFCOUNTER2_SELECT1, 0);
+ WREG32(mmCB_PERFCOUNTER3_SELECT0, 0);
+ WREG32(mmCB_PERFCOUNTER3_SELECT1, 0);
- hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
- WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+ hdp_host_path_cntl = RREG32(mmHDP_HOST_PATH_CNTL);
+ WREG32(mmHDP_HOST_PATH_CNTL, hdp_host_path_cntl);
- WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+ WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
+ (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
udelay(50);
}
@@ -1374,7 +1402,7 @@ static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
int i;
adev->gfx.scratch.num_reg = 7;
- adev->gfx.scratch.reg_base = SCRATCH_REG0;
+ adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
adev->gfx.scratch.free[i] = true;
adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
@@ -1430,11 +1458,18 @@ static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL);
+ amdgpu_ring_write(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0x1);
}
+static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
+ EVENT_INDEX(0));
+}
+
/**
* gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
*
@@ -1448,7 +1483,7 @@ static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, HDP_DEBUG0);
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0x1);
}
@@ -1460,7 +1495,7 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
/* flush read cache over gart */
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- amdgpu_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
+ amdgpu_ring_write(ring, (mmCP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
@@ -1475,7 +1510,8 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
- DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+ ((write64bit ? 2 : 1) << CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT) |
+ ((int_sel ? 2 : 0) << CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
@@ -1522,7 +1558,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -1548,7 +1584,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -1569,7 +1605,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1578,11 +1614,13 @@ err1:
static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
int i;
- if (enable)
- WREG32(CP_ME_CNTL, 0);
- else {
- WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
- WREG32(SCRATCH_UMSK, 0);
+ if (enable) {
+ WREG32(mmCP_ME_CNTL, 0);
+ } else {
+ WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
+ CP_ME_CNTL__PFP_HALT_MASK |
+ CP_ME_CNTL__CE_HALT_MASK));
+ WREG32(mmSCRATCH_UMSK, 0);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].ready = false;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
@@ -1616,34 +1654,33 @@ static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
fw_data = (const __le32 *)
(adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
- WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(mmCP_PFP_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
- WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_PFP_UCODE_ADDR, 0);
/* CE */
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
- WREG32(CP_CE_UCODE_ADDR, 0);
+ WREG32(mmCP_CE_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
- WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
+ WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_CE_UCODE_ADDR, 0);
/* ME */
fw_data = (const __be32 *)
(adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
- WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(mmCP_ME_RAM_WADDR, 0);
for (i = 0; i < fw_size; i++)
- WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmCP_ME_RAM_WADDR, 0);
-
- WREG32(CP_PFP_UCODE_ADDR, 0);
- WREG32(CP_CE_UCODE_ADDR, 0);
- WREG32(CP_ME_RAM_WADDR, 0);
- WREG32(CP_ME_RAM_RADDR, 0);
+ WREG32(mmCP_PFP_UCODE_ADDR, 0);
+ WREG32(mmCP_CE_UCODE_ADDR, 0);
+ WREG32(mmCP_ME_RAM_WADDR, 0);
+ WREG32(mmCP_ME_RAM_RADDR, 0);
return 0;
}
@@ -1720,14 +1757,14 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
int r;
u64 rptr_addr;
- WREG32(CP_SEM_WAIT_TIMER, 0x0);
- WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+ WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
+ WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
/* Set the write pointer delay */
- WREG32(CP_RB_WPTR_DELAY, 0);
+ WREG32(mmCP_RB_WPTR_DELAY, 0);
- WREG32(CP_DEBUG, 0);
- WREG32(SCRATCH_ADDR, 0);
+ WREG32(mmCP_DEBUG, 0);
+ WREG32(mmSCRATCH_ADDR, 0);
/* ring 0 - compute and gfx */
/* Set ring buffer size */
@@ -1738,24 +1775,24 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
- WREG32(CP_RB0_CNTL, tmp);
+ WREG32(mmCP_RB0_CNTL, tmp);
/* Initialize the ring buffer's read and write pointers */
- WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
ring->wptr = 0;
- WREG32(CP_RB0_WPTR, ring->wptr);
+ WREG32(mmCP_RB0_WPTR, ring->wptr);
/* set the wb address whether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32(CP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
- WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
- WREG32(SCRATCH_UMSK, 0);
+ WREG32(mmSCRATCH_UMSK, 0);
mdelay(1);
- WREG32(CP_RB0_CNTL, tmp);
+ WREG32(mmCP_RB0_CNTL, tmp);
- WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+ WREG32(mmCP_RB0_BASE, ring->gpu_addr >> 8);
/* start the rings */
gfx_v6_0_cp_gfx_start(adev);
@@ -1779,11 +1816,11 @@ static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->gfx.gfx_ring[0])
- return RREG32(CP_RB0_WPTR);
+ return RREG32(mmCP_RB0_WPTR);
else if (ring == &adev->gfx.compute_ring[0])
- return RREG32(CP_RB1_WPTR);
+ return RREG32(mmCP_RB1_WPTR);
else if (ring == &adev->gfx.compute_ring[1])
- return RREG32(CP_RB2_WPTR);
+ return RREG32(mmCP_RB2_WPTR);
else
BUG();
}
@@ -1792,8 +1829,8 @@ static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- WREG32(CP_RB0_WPTR, ring->wptr);
- (void)RREG32(CP_RB0_WPTR);
+ WREG32(mmCP_RB0_WPTR, ring->wptr);
+ (void)RREG32(mmCP_RB0_WPTR);
}
static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
@@ -1801,11 +1838,11 @@ static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->gfx.compute_ring[0]) {
- WREG32(CP_RB1_WPTR, ring->wptr);
- (void)RREG32(CP_RB1_WPTR);
+ WREG32(mmCP_RB1_WPTR, ring->wptr);
+ (void)RREG32(mmCP_RB1_WPTR);
} else if (ring == &adev->gfx.compute_ring[1]) {
- WREG32(CP_RB2_WPTR, ring->wptr);
- (void)RREG32(CP_RB2_WPTR);
+ WREG32(mmCP_RB2_WPTR, ring->wptr);
+ (void)RREG32(mmCP_RB2_WPTR);
} else {
BUG();
}
@@ -1817,7 +1854,7 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 tmp;
u32 rb_bufsz;
- int r;
+ int i, r;
u64 rptr_addr;
/* ring1 - compute only */
@@ -1829,19 +1866,19 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
- WREG32(CP_RB1_CNTL, tmp);
+ WREG32(mmCP_RB1_CNTL, tmp);
- WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(mmCP_RB1_CNTL, tmp | CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK);
ring->wptr = 0;
- WREG32(CP_RB1_WPTR, ring->wptr);
+ WREG32(mmCP_RB1_WPTR, ring->wptr);
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32(CP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
- WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
mdelay(1);
- WREG32(CP_RB1_CNTL, tmp);
- WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+ WREG32(mmCP_RB1_CNTL, tmp);
+ WREG32(mmCP_RB1_BASE, ring->gpu_addr >> 8);
ring = &adev->gfx.compute_ring[1];
rb_bufsz = order_base_2(ring->ring_size / 8);
@@ -1849,32 +1886,27 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
- WREG32(CP_RB2_CNTL, tmp);
+ WREG32(mmCP_RB2_CNTL, tmp);
- WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(mmCP_RB2_CNTL, tmp | CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK);
ring->wptr = 0;
- WREG32(CP_RB2_WPTR, ring->wptr);
+ WREG32(mmCP_RB2_WPTR, ring->wptr);
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32(CP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
- WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmCP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(mmCP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
mdelay(1);
- WREG32(CP_RB2_CNTL, tmp);
- WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+ WREG32(mmCP_RB2_CNTL, tmp);
+ WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
- adev->gfx.compute_ring[0].ready = true;
- adev->gfx.compute_ring[1].ready = true;
+ adev->gfx.compute_ring[0].ready = false;
+ adev->gfx.compute_ring[1].ready = false;
- r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[0]);
- if (r) {
- adev->gfx.compute_ring[0].ready = false;
- return r;
- }
-
- r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[1]);
- if (r) {
- adev->gfx.compute_ring[1].ready = false;
- return r;
+ for (i = 0; i < 2; i++) {
+ r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
+ if (r)
+ return r;
+ adev->gfx.compute_ring[i].ready = true;
}
return 0;
@@ -1892,24 +1924,26 @@ static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
-{
- u32 tmp = RREG32(CP_INT_CNTL_RING0);
+{
+ u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
u32 mask;
int i;
if (enable)
- tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp |= (CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK |
+ CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK);
else
- tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- WREG32(CP_INT_CNTL_RING0, tmp);
+ tmp &= ~(CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK |
+ CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK);
+ WREG32(mmCP_INT_CNTL_RING0, tmp);
if (!enable) {
/* read a gfx register */
- tmp = RREG32(DB_DEPTH_INFO);
+ tmp = RREG32(mmDB_DEPTH_INFO);
mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
+ if ((RREG32(mmRLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
break;
udelay(1);
}
@@ -1940,7 +1974,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -1966,16 +2000,16 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
/* write new base address */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
if (vm_id < 8) {
- amdgpu_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
} else {
- amdgpu_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
}
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12);
@@ -1984,7 +2018,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vm_id);
@@ -1992,7 +2026,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
WAIT_REG_MEM_ENGINE(0))); /* me */
- amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0); /* ref */
amdgpu_ring_write(ring, 0); /* mask */
@@ -2071,7 +2105,6 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) {
/* save restore block */
if (adev->gfx.rlc.save_restore_obj == NULL) {
-
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
@@ -2166,20 +2199,12 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
{
- u32 tmp;
-
- tmp = RREG32(RLC_LB_CNTL);
- if (enable)
- tmp |= LOAD_BALANCE_ENABLE;
- else
- tmp &= ~LOAD_BALANCE_ENABLE;
- WREG32(RLC_LB_CNTL, tmp);
+ WREG32_FIELD(RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
if (!enable) {
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- WREG32(SPI_LB_CU_MASK, 0x00ff);
+ WREG32(mmSPI_LB_CU_MASK, 0x00ff);
}
-
}
static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
@@ -2187,13 +2212,13 @@ static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
+ if (RREG32(mmRLC_SERDES_MASTER_BUSY_0) == 0)
break;
udelay(1);
}
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
+ if (RREG32(mmRLC_SERDES_MASTER_BUSY_1) == 0)
break;
udelay(1);
}
@@ -2203,20 +2228,20 @@ static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
{
u32 tmp;
- tmp = RREG32(RLC_CNTL);
+ tmp = RREG32(mmRLC_CNTL);
if (tmp != rlc)
- WREG32(RLC_CNTL, rlc);
+ WREG32(mmRLC_CNTL, rlc);
}
static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
{
u32 data, orig;
- orig = data = RREG32(RLC_CNTL);
+ orig = data = RREG32(mmRLC_CNTL);
- if (data & RLC_ENABLE) {
- data &= ~RLC_ENABLE;
- WREG32(RLC_CNTL, data);
+ if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
+ data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
+ WREG32(mmRLC_CNTL, data);
gfx_v6_0_wait_for_rlc_serdes(adev);
}
@@ -2226,7 +2251,7 @@ static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
{
- WREG32(RLC_CNTL, 0);
+ WREG32(mmRLC_CNTL, 0);
gfx_v6_0_enable_gui_idle_interrupt(adev, false);
gfx_v6_0_wait_for_rlc_serdes(adev);
@@ -2234,7 +2259,7 @@ static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
{
- WREG32(RLC_CNTL, RLC_ENABLE);
+ WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
gfx_v6_0_enable_gui_idle_interrupt(adev, true);
@@ -2243,13 +2268,9 @@ static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(GRBM_SOFT_RESET);
-
- tmp |= SOFT_RESET_RLC;
- WREG32(GRBM_SOFT_RESET, tmp);
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
- tmp &= ~SOFT_RESET_RLC;
- WREG32(GRBM_SOFT_RESET, tmp);
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
@@ -2258,11 +2279,12 @@ static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
u32 tmp;
/* Enable LBPW only for DDR3 */
- tmp = RREG32(MC_SEQ_MISC0);
+ tmp = RREG32(mmMC_SEQ_MISC0);
if ((tmp & 0xF0000000) == 0xB0000000)
return true;
return false;
}
+
static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
{
}
@@ -2283,15 +2305,15 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
gfx_v6_0_init_pg(adev);
gfx_v6_0_init_cg(adev);
- WREG32(RLC_RL_BASE, 0);
- WREG32(RLC_RL_SIZE, 0);
- WREG32(RLC_LB_CNTL, 0);
- WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
- WREG32(RLC_LB_CNTR_INIT, 0);
- WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+ WREG32(mmRLC_RL_BASE, 0);
+ WREG32(mmRLC_RL_SIZE, 0);
+ WREG32(mmRLC_LB_CNTL, 0);
+ WREG32(mmRLC_LB_CNTR_MAX, 0xffffffff);
+ WREG32(mmRLC_LB_CNTR_INIT, 0);
+ WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
- WREG32(RLC_MC_CNTL, 0);
- WREG32(RLC_UCODE_CNTL, 0);
+ WREG32(mmRLC_MC_CNTL, 0);
+ WREG32(mmRLC_UCODE_CNTL, 0);
hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
@@ -2301,10 +2323,10 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
amdgpu_ucode_print_rlc_hdr(&hdr->header);
for (i = 0; i < fw_size; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(mmRLC_UCODE_ADDR, i);
+ WREG32(mmRLC_UCODE_DATA, le32_to_cpup(fw_data++));
}
- WREG32(RLC_UCODE_ADDR, 0);
+ WREG32(mmRLC_UCODE_ADDR, 0);
gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
gfx_v6_0_rlc_start(adev);
@@ -2316,38 +2338,38 @@ static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
{
u32 data, orig, tmp;
- orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+ orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
gfx_v6_0_enable_gui_idle_interrupt(adev, true);
- WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
+ WREG32(mmRLC_GCPM_GENERAL_3, 0x00000080);
tmp = gfx_v6_0_halt_rlc(adev);
- WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
- WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
- WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_CTRL, 0x00b000ff);
gfx_v6_0_wait_for_rlc_serdes(adev);
gfx_v6_0_update_rlc(adev, tmp);
- WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
+ WREG32(mmRLC_SERDES_WR_CTRL, 0x007000ff);
- data |= CGCG_EN | CGLS_EN;
+ data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
} else {
gfx_v6_0_enable_gui_idle_interrupt(adev, false);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
- data &= ~(CGCG_EN | CGLS_EN);
+ data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
}
if (orig != data)
- WREG32(RLC_CGCG_CGLS_CTRL, data);
+ WREG32(mmRLC_CGCG_CGLS_CTRL, data);
}
@@ -2357,51 +2379,51 @@ static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
u32 data, orig, tmp = 0;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
- orig = data = RREG32(CGTS_SM_CTRL_REG);
+ orig = data = RREG32(mmCGTS_SM_CTRL_REG);
data = 0x96940200;
if (orig != data)
- WREG32(CGTS_SM_CTRL_REG, data);
+ WREG32(mmCGTS_SM_CTRL_REG, data);
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
- orig = data = RREG32(CP_MEM_SLP_CNTL);
- data |= CP_MEM_LS_EN;
+ orig = data = RREG32(mmCP_MEM_SLP_CNTL);
+ data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
if (orig != data)
- WREG32(CP_MEM_SLP_CNTL, data);
+ WREG32(mmCP_MEM_SLP_CNTL, data);
}
- orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data &= 0xffffffc0;
if (orig != data)
- WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+ WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
tmp = gfx_v6_0_halt_rlc(adev);
- WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
- WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
- WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_CTRL, 0x00d000ff);
gfx_v6_0_update_rlc(adev, tmp);
} else {
- orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data |= 0x00000003;
if (orig != data)
- WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+ WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
- data = RREG32(CP_MEM_SLP_CNTL);
- if (data & CP_MEM_LS_EN) {
- data &= ~CP_MEM_LS_EN;
- WREG32(CP_MEM_SLP_CNTL, data);
+ data = RREG32(mmCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+ data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ WREG32(mmCP_MEM_SLP_CNTL, data);
}
- orig = data = RREG32(CGTS_SM_CTRL_REG);
- data |= LS_OVERRIDE | OVERRIDE;
+ orig = data = RREG32(mmCGTS_SM_CTRL_REG);
+ data |= CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK | CGTS_SM_CTRL_REG__OVERRIDE_MASK;
if (orig != data)
- WREG32(CGTS_SM_CTRL_REG, data);
+ WREG32(mmCGTS_SM_CTRL_REG, data);
tmp = gfx_v6_0_halt_rlc(adev);
- WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
- WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
- WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(mmRLC_SERDES_WR_CTRL, 0x00e000ff);
gfx_v6_0_update_rlc(adev, tmp);
}
@@ -2421,6 +2443,7 @@ static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
gfx_v6_0_enable_gui_idle_interrupt(adev, true);
}
*/
+
static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
bool enable)
{
@@ -2435,13 +2458,13 @@ static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
{
u32 data, orig;
- orig = data = RREG32(RLC_PG_CNTL);
+ orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
data &= ~0x8000;
else
data |= 0x8000;
if (orig != data)
- WREG32(RLC_PG_CNTL, data);
+ WREG32(mmRLC_PG_CNTL, data);
}
static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
@@ -2518,26 +2541,13 @@ static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
bool enable)
{
-
- u32 tmp;
-
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
- tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
- WREG32(RLC_TTOP_D, tmp);
-
- tmp = RREG32(RLC_PG_CNTL);
- tmp |= GFX_PG_ENABLE;
- WREG32(RLC_PG_CNTL, tmp);
-
- tmp = RREG32(RLC_AUTO_PG_CTRL);
- tmp |= AUTO_PG_EN;
- WREG32(RLC_AUTO_PG_CTRL, tmp);
+ WREG32(mmRLC_TTOP_D, RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10));
+ WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, 1);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, AUTO_PG_EN, 1);
} else {
- tmp = RREG32(RLC_AUTO_PG_CTRL);
- tmp &= ~AUTO_PG_EN;
- WREG32(RLC_AUTO_PG_CTRL, tmp);
-
- tmp = RREG32(DB_RENDER_CONTROL);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, AUTO_PG_EN, 0);
+ (void)RREG32(mmDB_RENDER_CONTROL);
}
}
@@ -2550,8 +2560,8 @@ static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
- tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
- tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+ tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+ tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
@@ -2594,12 +2604,8 @@ static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
}
}
- WREG32(RLC_PG_AO_CU_MASK, tmp);
-
- tmp = RREG32(RLC_MAX_PG_CU);
- tmp &= ~MAX_PU_CU_MASK;
- tmp |= MAX_PU_CU(active_cu_number);
- WREG32(RLC_MAX_PG_CU, tmp);
+ WREG32(mmRLC_PG_AO_CU_MASK, tmp);
+ WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number);
}
static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
@@ -2607,13 +2613,13 @@ static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
{
u32 data, orig;
- orig = data = RREG32(RLC_PG_CNTL);
+ orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
- data |= STATIC_PER_CU_PG_ENABLE;
+ data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
else
- data &= ~STATIC_PER_CU_PG_ENABLE;
+ data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
if (orig != data)
- WREG32(RLC_PG_CNTL, data);
+ WREG32(mmRLC_PG_CNTL, data);
}
static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
@@ -2621,33 +2627,28 @@ static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
{
u32 data, orig;
- orig = data = RREG32(RLC_PG_CNTL);
+ orig = data = RREG32(mmRLC_PG_CNTL);
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
- data |= DYN_PER_CU_PG_ENABLE;
+ data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
else
- data &= ~DYN_PER_CU_PG_ENABLE;
+ data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
if (orig != data)
- WREG32(RLC_PG_CNTL, data);
+ WREG32(mmRLC_PG_CNTL, data);
}
static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
{
u32 tmp;
- WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_SRC, 1);
+ WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
- tmp = RREG32(RLC_PG_CNTL);
- tmp |= GFX_PG_SRC;
- WREG32(RLC_PG_CNTL, tmp);
-
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
-
- tmp = RREG32(RLC_AUTO_PG_CTRL);
-
- tmp &= ~GRBM_REG_SGIT_MASK;
- tmp |= GRBM_REG_SGIT(0x700);
- tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
- WREG32(RLC_AUTO_PG_CTRL, tmp);
+ tmp = RREG32(mmRLC_AUTO_PG_CTRL);
+ tmp &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
+ tmp |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
+ tmp &= ~RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK;
+ WREG32(mmRLC_AUTO_PG_CTRL, tmp);
}
static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
@@ -2703,7 +2704,6 @@ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
buffer[count++] = cpu_to_le32(0x80000000);
buffer[count++] = cpu_to_le32(0x80000000);
@@ -2723,7 +2723,7 @@ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
}
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+ buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (adev->asic_type) {
case CHIP_TAHITI:
@@ -2766,16 +2766,16 @@ static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
gfx_v6_0_enable_cp_pg(adev, true);
gfx_v6_0_enable_gds_pg(adev, true);
} else {
- WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+ WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
}
gfx_v6_0_init_ao_cu_mask(adev);
gfx_v6_0_update_gfx_pg(adev, true);
} else {
- WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+ WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
}
}
@@ -2800,50 +2800,86 @@ static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
return clock;
}
static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
+ if (flags & AMDGPU_HAVE_CTX_SWITCH)
+ gfx_v6_0_ring_emit_vgt_flush(ring);
amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
amdgpu_ring_write(ring, 0x80000000);
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- return
- 6; /* gfx_v6_0_ring_emit_ib */
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (address << SQ_IND_INDEX__INDEX__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK));
+ return RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t regno, uint32_t num, uint32_t *out)
{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
- 3; /* gfx_v6_ring_emit_cntxcntl */
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (regno << SQ_IND_INDEX__INDEX__SHIFT) |
+ (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK) |
+ (SQ_IND_INDEX__AUTO_INCR_MASK));
+ while (num--)
+ *(out++) = RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v6_0_ring_emit_vm_flush */
- 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+}
+
+static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t start,
+ uint32_t size, uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, 0,
+ start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v6_0_select_se_sh,
+ .read_wave_data = &gfx_v6_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v6_0_read_wave_sgprs,
};
static int gfx_v6_0_early_init(void *handle)
@@ -2896,9 +2932,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2920,9 +2954,7 @@ static int gfx_v6_0_sw_init(void *handle)
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -2998,7 +3030,7 @@ static bool gfx_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (RREG32(GRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
+ if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
return false;
else
return true;
@@ -3029,14 +3061,14 @@ static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
- cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
- cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
default:
break;
@@ -3051,27 +3083,27 @@ static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
switch (state){
case AMDGPU_IRQ_STATE_DISABLE:
if (ring == 0) {
- cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
- cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING1);
+ cp_int_cntl &= ~CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING1, cp_int_cntl);
break;
} else {
- cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
- cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING2);
+ cp_int_cntl &= ~CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING2, cp_int_cntl);
break;
}
case AMDGPU_IRQ_STATE_ENABLE:
if (ring == 0) {
- cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
- cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING1);
+ cp_int_cntl |= CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING1, cp_int_cntl);
break;
} else {
- cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
- cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING2);
+ cp_int_cntl |= CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(mmCP_INT_CNTL_RING2, cp_int_cntl);
break;
}
@@ -3092,14 +3124,14 @@ static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
default:
break;
@@ -3117,14 +3149,14 @@ static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
- WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
default:
break;
@@ -3164,7 +3196,7 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
break;
case 1:
case 2:
- amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id -1]);
+ amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id - 1]);
break;
default:
break;
@@ -3237,7 +3269,7 @@ static int gfx_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
.late_init = NULL,
@@ -3255,10 +3287,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+ 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3269,15 +3311,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v6_0_ring_emit_vm_flush */
+ 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3287,8 +3336,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.test_ring = gfx_v6_0_ring_test_ring,
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
};
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -3360,3 +3407,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
index b9657e72b248..ced6fc42f688 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GFX_V6_0_H__
#define __GFX_V6_0_H__
-extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 71116da9e782..c4e14015ec5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask;
- int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
+ int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -2105,6 +2105,18 @@ static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
+static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
+ EVENT_INDEX(4));
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
+ EVENT_INDEX(0));
+}
+
+
/**
* gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
*
@@ -2260,6 +2272,7 @@ static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+ gfx_v7_0_ring_emit_vgt_flush(ring);
/* set load_global_config & load_global_uconfig */
dw2 |= 0x8001;
/* set load_cs_sh_regs */
@@ -2286,7 +2299,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -2312,7 +2325,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -2333,7 +2346,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -3222,7 +3235,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
*/
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -3262,7 +3275,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3391,7 +3404,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.save_restore_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.save_restore_obj);
if (r) {
@@ -3435,7 +3449,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -3475,7 +3490,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -4354,44 +4370,69 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- return
- 4; /* gfx_v7_0_ring_emit_ib_gfx */
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (address << SQ_IND_INDEX__INDEX__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK));
+ return RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t regno, uint32_t num, uint32_t *out)
{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
- 3; /* gfx_v7_ring_emit_cntxcntl */
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (regno << SQ_IND_INDEX__INDEX__SHIFT) |
+ (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK) |
+ (SQ_IND_INDEX__AUTO_INCR_MASK));
+ while (num--)
+ *(out++) = RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
- return
- 4; /* gfx_v7_0_ring_emit_ib_compute */
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
}
-static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t start,
+ uint32_t size, uint32_t *dst)
{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v7_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ wave_read_regs(
+ adev, simd, wave, 0,
+ start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
+ .read_wave_data = &gfx_v7_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
@@ -4643,9 +4684,7 @@ static int gfx_v7_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -4670,9 +4709,7 @@ static int gfx_v7_0_sw_init(void *handle)
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -5123,7 +5160,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
.late_init = gfx_v7_0_late_init,
@@ -5141,10 +5178,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+ 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5157,15 +5205,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v7_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5177,8 +5233,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5289,3 +5343,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index 94e3ea147c26..2f5164cc0e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -24,6 +24,9 @@
#ifndef __GFX_V7_0_H__
#define __GFX_V7_0_H__
-extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ee6a48a09214..6324f67bdb1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "vi.h"
+#include "vi_structs.h"
#include "vid.h"
#include "amdgpu_ucode.h"
#include "amdgpu_atombios.h"
@@ -167,6 +168,7 @@ static const u32 golden_settings_tonga_a11[] =
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -640,7 +642,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
- mmATC_MISC_CG, 0xffffffff, 0x000c0200,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -798,7 +799,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -824,7 +825,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -844,7 +845,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1058,6 +1059,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ /* we need account JT in */
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+
+ if (amdgpu_sriov_vf(adev)) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+ info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+ info->fw = adev->gfx.mec_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+ }
+
if (adev->gfx.mec2_fw) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
@@ -1127,34 +1141,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_TONGA:
- case CHIP_POLARIS10:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x0000002A);
- break;
- case CHIP_POLARIS11:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_FIJI:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- case CHIP_TOPAZ:
- case CHIP_CARRIZO:
- buffer[count++] = cpu_to_le32(0x00000002);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_STONEY:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -1273,7 +1261,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -1315,7 +1304,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -1383,7 +1373,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
if (adev->gfx.mec.hpd_eop_obj == NULL) {
r = amdgpu_bo_create(adev,
- adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
+ adev->gfx.mec.num_queue * MEC_HPD_SIZE,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&adev->gfx.mec.hpd_eop_obj);
@@ -1412,7 +1402,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
return r;
}
- memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
+ memset(hpd, 0, adev->gfx.mec.num_queue * MEC_HPD_SIZE);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
@@ -1575,7 +1565,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r, i;
u32 tmp;
unsigned total_size, vgpr_offset, sgpr_offset;
@@ -1708,7 +1698,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
/* wait for the GPU to finish processing the IB */
- r = fence_wait(f, false);
+ r = dma_fence_wait(f, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto fail;
@@ -1729,7 +1719,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
fail:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
return r;
}
@@ -2045,10 +2035,8 @@ static int gfx_v8_0_sw_init(void *handle)
ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
}
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2072,10 +2060,8 @@ static int gfx_v8_0_sw_init(void *handle)
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ irq_type);
if (r)
return r;
}
@@ -3679,6 +3665,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
num_rb_pipes);
}
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ adev->gfx.config.rb_config[i][j].raster_config_1 =
+ RREG32(mmPA_SC_RASTER_CONFIG_1);
+ }
+ }
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -3905,7 +3906,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
int list_size;
unsigned int *register_list_format =
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
- if (register_list_format == NULL)
+ if (!register_list_format)
return -ENOMEM;
memcpy(register_list_format, adev->gfx.rlc.register_list_format,
adev->gfx.rlc.reg_list_format_size_bytes);
@@ -4331,7 +4332,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 tmp;
u32 rb_bufsz;
- u64 rb_addr, rptr_addr;
+ u64 rb_addr, rptr_addr, wptr_gpu_addr;
int r;
/* Set the write pointer delay */
@@ -4362,6 +4363,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
mdelay(1);
WREG32(mmCP_RB0_CNTL, tmp);
@@ -4467,267 +4471,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
return 0;
}
-struct vi_mqd {
- uint32_t header; /* ordinal0 */
- uint32_t compute_dispatch_initiator; /* ordinal1 */
- uint32_t compute_dim_x; /* ordinal2 */
- uint32_t compute_dim_y; /* ordinal3 */
- uint32_t compute_dim_z; /* ordinal4 */
- uint32_t compute_start_x; /* ordinal5 */
- uint32_t compute_start_y; /* ordinal6 */
- uint32_t compute_start_z; /* ordinal7 */
- uint32_t compute_num_thread_x; /* ordinal8 */
- uint32_t compute_num_thread_y; /* ordinal9 */
- uint32_t compute_num_thread_z; /* ordinal10 */
- uint32_t compute_pipelinestat_enable; /* ordinal11 */
- uint32_t compute_perfcount_enable; /* ordinal12 */
- uint32_t compute_pgm_lo; /* ordinal13 */
- uint32_t compute_pgm_hi; /* ordinal14 */
- uint32_t compute_tba_lo; /* ordinal15 */
- uint32_t compute_tba_hi; /* ordinal16 */
- uint32_t compute_tma_lo; /* ordinal17 */
- uint32_t compute_tma_hi; /* ordinal18 */
- uint32_t compute_pgm_rsrc1; /* ordinal19 */
- uint32_t compute_pgm_rsrc2; /* ordinal20 */
- uint32_t compute_vmid; /* ordinal21 */
- uint32_t compute_resource_limits; /* ordinal22 */
- uint32_t compute_static_thread_mgmt_se0; /* ordinal23 */
- uint32_t compute_static_thread_mgmt_se1; /* ordinal24 */
- uint32_t compute_tmpring_size; /* ordinal25 */
- uint32_t compute_static_thread_mgmt_se2; /* ordinal26 */
- uint32_t compute_static_thread_mgmt_se3; /* ordinal27 */
- uint32_t compute_restart_x; /* ordinal28 */
- uint32_t compute_restart_y; /* ordinal29 */
- uint32_t compute_restart_z; /* ordinal30 */
- uint32_t compute_thread_trace_enable; /* ordinal31 */
- uint32_t compute_misc_reserved; /* ordinal32 */
- uint32_t compute_dispatch_id; /* ordinal33 */
- uint32_t compute_threadgroup_id; /* ordinal34 */
- uint32_t compute_relaunch; /* ordinal35 */
- uint32_t compute_wave_restore_addr_lo; /* ordinal36 */
- uint32_t compute_wave_restore_addr_hi; /* ordinal37 */
- uint32_t compute_wave_restore_control; /* ordinal38 */
- uint32_t reserved9; /* ordinal39 */
- uint32_t reserved10; /* ordinal40 */
- uint32_t reserved11; /* ordinal41 */
- uint32_t reserved12; /* ordinal42 */
- uint32_t reserved13; /* ordinal43 */
- uint32_t reserved14; /* ordinal44 */
- uint32_t reserved15; /* ordinal45 */
- uint32_t reserved16; /* ordinal46 */
- uint32_t reserved17; /* ordinal47 */
- uint32_t reserved18; /* ordinal48 */
- uint32_t reserved19; /* ordinal49 */
- uint32_t reserved20; /* ordinal50 */
- uint32_t reserved21; /* ordinal51 */
- uint32_t reserved22; /* ordinal52 */
- uint32_t reserved23; /* ordinal53 */
- uint32_t reserved24; /* ordinal54 */
- uint32_t reserved25; /* ordinal55 */
- uint32_t reserved26; /* ordinal56 */
- uint32_t reserved27; /* ordinal57 */
- uint32_t reserved28; /* ordinal58 */
- uint32_t reserved29; /* ordinal59 */
- uint32_t reserved30; /* ordinal60 */
- uint32_t reserved31; /* ordinal61 */
- uint32_t reserved32; /* ordinal62 */
- uint32_t reserved33; /* ordinal63 */
- uint32_t reserved34; /* ordinal64 */
- uint32_t compute_user_data_0; /* ordinal65 */
- uint32_t compute_user_data_1; /* ordinal66 */
- uint32_t compute_user_data_2; /* ordinal67 */
- uint32_t compute_user_data_3; /* ordinal68 */
- uint32_t compute_user_data_4; /* ordinal69 */
- uint32_t compute_user_data_5; /* ordinal70 */
- uint32_t compute_user_data_6; /* ordinal71 */
- uint32_t compute_user_data_7; /* ordinal72 */
- uint32_t compute_user_data_8; /* ordinal73 */
- uint32_t compute_user_data_9; /* ordinal74 */
- uint32_t compute_user_data_10; /* ordinal75 */
- uint32_t compute_user_data_11; /* ordinal76 */
- uint32_t compute_user_data_12; /* ordinal77 */
- uint32_t compute_user_data_13; /* ordinal78 */
- uint32_t compute_user_data_14; /* ordinal79 */
- uint32_t compute_user_data_15; /* ordinal80 */
- uint32_t cp_compute_csinvoc_count_lo; /* ordinal81 */
- uint32_t cp_compute_csinvoc_count_hi; /* ordinal82 */
- uint32_t reserved35; /* ordinal83 */
- uint32_t reserved36; /* ordinal84 */
- uint32_t reserved37; /* ordinal85 */
- uint32_t cp_mqd_query_time_lo; /* ordinal86 */
- uint32_t cp_mqd_query_time_hi; /* ordinal87 */
- uint32_t cp_mqd_connect_start_time_lo; /* ordinal88 */
- uint32_t cp_mqd_connect_start_time_hi; /* ordinal89 */
- uint32_t cp_mqd_connect_end_time_lo; /* ordinal90 */
- uint32_t cp_mqd_connect_end_time_hi; /* ordinal91 */
- uint32_t cp_mqd_connect_end_wf_count; /* ordinal92 */
- uint32_t cp_mqd_connect_end_pq_rptr; /* ordinal93 */
- uint32_t cp_mqd_connect_end_pq_wptr; /* ordinal94 */
- uint32_t cp_mqd_connect_end_ib_rptr; /* ordinal95 */
- uint32_t reserved38; /* ordinal96 */
- uint32_t reserved39; /* ordinal97 */
- uint32_t cp_mqd_save_start_time_lo; /* ordinal98 */
- uint32_t cp_mqd_save_start_time_hi; /* ordinal99 */
- uint32_t cp_mqd_save_end_time_lo; /* ordinal100 */
- uint32_t cp_mqd_save_end_time_hi; /* ordinal101 */
- uint32_t cp_mqd_restore_start_time_lo; /* ordinal102 */
- uint32_t cp_mqd_restore_start_time_hi; /* ordinal103 */
- uint32_t cp_mqd_restore_end_time_lo; /* ordinal104 */
- uint32_t cp_mqd_restore_end_time_hi; /* ordinal105 */
- uint32_t reserved40; /* ordinal106 */
- uint32_t reserved41; /* ordinal107 */
- uint32_t gds_cs_ctxsw_cnt0; /* ordinal108 */
- uint32_t gds_cs_ctxsw_cnt1; /* ordinal109 */
- uint32_t gds_cs_ctxsw_cnt2; /* ordinal110 */
- uint32_t gds_cs_ctxsw_cnt3; /* ordinal111 */
- uint32_t reserved42; /* ordinal112 */
- uint32_t reserved43; /* ordinal113 */
- uint32_t cp_pq_exe_status_lo; /* ordinal114 */
- uint32_t cp_pq_exe_status_hi; /* ordinal115 */
- uint32_t cp_packet_id_lo; /* ordinal116 */
- uint32_t cp_packet_id_hi; /* ordinal117 */
- uint32_t cp_packet_exe_status_lo; /* ordinal118 */
- uint32_t cp_packet_exe_status_hi; /* ordinal119 */
- uint32_t gds_save_base_addr_lo; /* ordinal120 */
- uint32_t gds_save_base_addr_hi; /* ordinal121 */
- uint32_t gds_save_mask_lo; /* ordinal122 */
- uint32_t gds_save_mask_hi; /* ordinal123 */
- uint32_t ctx_save_base_addr_lo; /* ordinal124 */
- uint32_t ctx_save_base_addr_hi; /* ordinal125 */
- uint32_t reserved44; /* ordinal126 */
- uint32_t reserved45; /* ordinal127 */
- uint32_t cp_mqd_base_addr_lo; /* ordinal128 */
- uint32_t cp_mqd_base_addr_hi; /* ordinal129 */
- uint32_t cp_hqd_active; /* ordinal130 */
- uint32_t cp_hqd_vmid; /* ordinal131 */
- uint32_t cp_hqd_persistent_state; /* ordinal132 */
- uint32_t cp_hqd_pipe_priority; /* ordinal133 */
- uint32_t cp_hqd_queue_priority; /* ordinal134 */
- uint32_t cp_hqd_quantum; /* ordinal135 */
- uint32_t cp_hqd_pq_base_lo; /* ordinal136 */
- uint32_t cp_hqd_pq_base_hi; /* ordinal137 */
- uint32_t cp_hqd_pq_rptr; /* ordinal138 */
- uint32_t cp_hqd_pq_rptr_report_addr_lo; /* ordinal139 */
- uint32_t cp_hqd_pq_rptr_report_addr_hi; /* ordinal140 */
- uint32_t cp_hqd_pq_wptr_poll_addr; /* ordinal141 */
- uint32_t cp_hqd_pq_wptr_poll_addr_hi; /* ordinal142 */
- uint32_t cp_hqd_pq_doorbell_control; /* ordinal143 */
- uint32_t cp_hqd_pq_wptr; /* ordinal144 */
- uint32_t cp_hqd_pq_control; /* ordinal145 */
- uint32_t cp_hqd_ib_base_addr_lo; /* ordinal146 */
- uint32_t cp_hqd_ib_base_addr_hi; /* ordinal147 */
- uint32_t cp_hqd_ib_rptr; /* ordinal148 */
- uint32_t cp_hqd_ib_control; /* ordinal149 */
- uint32_t cp_hqd_iq_timer; /* ordinal150 */
- uint32_t cp_hqd_iq_rptr; /* ordinal151 */
- uint32_t cp_hqd_dequeue_request; /* ordinal152 */
- uint32_t cp_hqd_dma_offload; /* ordinal153 */
- uint32_t cp_hqd_sema_cmd; /* ordinal154 */
- uint32_t cp_hqd_msg_type; /* ordinal155 */
- uint32_t cp_hqd_atomic0_preop_lo; /* ordinal156 */
- uint32_t cp_hqd_atomic0_preop_hi; /* ordinal157 */
- uint32_t cp_hqd_atomic1_preop_lo; /* ordinal158 */
- uint32_t cp_hqd_atomic1_preop_hi; /* ordinal159 */
- uint32_t cp_hqd_hq_status0; /* ordinal160 */
- uint32_t cp_hqd_hq_control0; /* ordinal161 */
- uint32_t cp_mqd_control; /* ordinal162 */
- uint32_t cp_hqd_hq_status1; /* ordinal163 */
- uint32_t cp_hqd_hq_control1; /* ordinal164 */
- uint32_t cp_hqd_eop_base_addr_lo; /* ordinal165 */
- uint32_t cp_hqd_eop_base_addr_hi; /* ordinal166 */
- uint32_t cp_hqd_eop_control; /* ordinal167 */
- uint32_t cp_hqd_eop_rptr; /* ordinal168 */
- uint32_t cp_hqd_eop_wptr; /* ordinal169 */
- uint32_t cp_hqd_eop_done_events; /* ordinal170 */
- uint32_t cp_hqd_ctx_save_base_addr_lo; /* ordinal171 */
- uint32_t cp_hqd_ctx_save_base_addr_hi; /* ordinal172 */
- uint32_t cp_hqd_ctx_save_control; /* ordinal173 */
- uint32_t cp_hqd_cntl_stack_offset; /* ordinal174 */
- uint32_t cp_hqd_cntl_stack_size; /* ordinal175 */
- uint32_t cp_hqd_wg_state_offset; /* ordinal176 */
- uint32_t cp_hqd_ctx_save_size; /* ordinal177 */
- uint32_t cp_hqd_gds_resource_state; /* ordinal178 */
- uint32_t cp_hqd_error; /* ordinal179 */
- uint32_t cp_hqd_eop_wptr_mem; /* ordinal180 */
- uint32_t cp_hqd_eop_dones; /* ordinal181 */
- uint32_t reserved46; /* ordinal182 */
- uint32_t reserved47; /* ordinal183 */
- uint32_t reserved48; /* ordinal184 */
- uint32_t reserved49; /* ordinal185 */
- uint32_t reserved50; /* ordinal186 */
- uint32_t reserved51; /* ordinal187 */
- uint32_t reserved52; /* ordinal188 */
- uint32_t reserved53; /* ordinal189 */
- uint32_t reserved54; /* ordinal190 */
- uint32_t reserved55; /* ordinal191 */
- uint32_t iqtimer_pkt_header; /* ordinal192 */
- uint32_t iqtimer_pkt_dw0; /* ordinal193 */
- uint32_t iqtimer_pkt_dw1; /* ordinal194 */
- uint32_t iqtimer_pkt_dw2; /* ordinal195 */
- uint32_t iqtimer_pkt_dw3; /* ordinal196 */
- uint32_t iqtimer_pkt_dw4; /* ordinal197 */
- uint32_t iqtimer_pkt_dw5; /* ordinal198 */
- uint32_t iqtimer_pkt_dw6; /* ordinal199 */
- uint32_t iqtimer_pkt_dw7; /* ordinal200 */
- uint32_t iqtimer_pkt_dw8; /* ordinal201 */
- uint32_t iqtimer_pkt_dw9; /* ordinal202 */
- uint32_t iqtimer_pkt_dw10; /* ordinal203 */
- uint32_t iqtimer_pkt_dw11; /* ordinal204 */
- uint32_t iqtimer_pkt_dw12; /* ordinal205 */
- uint32_t iqtimer_pkt_dw13; /* ordinal206 */
- uint32_t iqtimer_pkt_dw14; /* ordinal207 */
- uint32_t iqtimer_pkt_dw15; /* ordinal208 */
- uint32_t iqtimer_pkt_dw16; /* ordinal209 */
- uint32_t iqtimer_pkt_dw17; /* ordinal210 */
- uint32_t iqtimer_pkt_dw18; /* ordinal211 */
- uint32_t iqtimer_pkt_dw19; /* ordinal212 */
- uint32_t iqtimer_pkt_dw20; /* ordinal213 */
- uint32_t iqtimer_pkt_dw21; /* ordinal214 */
- uint32_t iqtimer_pkt_dw22; /* ordinal215 */
- uint32_t iqtimer_pkt_dw23; /* ordinal216 */
- uint32_t iqtimer_pkt_dw24; /* ordinal217 */
- uint32_t iqtimer_pkt_dw25; /* ordinal218 */
- uint32_t iqtimer_pkt_dw26; /* ordinal219 */
- uint32_t iqtimer_pkt_dw27; /* ordinal220 */
- uint32_t iqtimer_pkt_dw28; /* ordinal221 */
- uint32_t iqtimer_pkt_dw29; /* ordinal222 */
- uint32_t iqtimer_pkt_dw30; /* ordinal223 */
- uint32_t iqtimer_pkt_dw31; /* ordinal224 */
- uint32_t reserved56; /* ordinal225 */
- uint32_t reserved57; /* ordinal226 */
- uint32_t reserved58; /* ordinal227 */
- uint32_t set_resources_header; /* ordinal228 */
- uint32_t set_resources_dw1; /* ordinal229 */
- uint32_t set_resources_dw2; /* ordinal230 */
- uint32_t set_resources_dw3; /* ordinal231 */
- uint32_t set_resources_dw4; /* ordinal232 */
- uint32_t set_resources_dw5; /* ordinal233 */
- uint32_t set_resources_dw6; /* ordinal234 */
- uint32_t set_resources_dw7; /* ordinal235 */
- uint32_t reserved59; /* ordinal236 */
- uint32_t reserved60; /* ordinal237 */
- uint32_t reserved61; /* ordinal238 */
- uint32_t reserved62; /* ordinal239 */
- uint32_t reserved63; /* ordinal240 */
- uint32_t reserved64; /* ordinal241 */
- uint32_t reserved65; /* ordinal242 */
- uint32_t reserved66; /* ordinal243 */
- uint32_t reserved67; /* ordinal244 */
- uint32_t reserved68; /* ordinal245 */
- uint32_t reserved69; /* ordinal246 */
- uint32_t reserved70; /* ordinal247 */
- uint32_t reserved71; /* ordinal248 */
- uint32_t reserved72; /* ordinal249 */
- uint32_t reserved73; /* ordinal250 */
- uint32_t reserved74; /* ordinal251 */
- uint32_t reserved75; /* ordinal252 */
- uint32_t reserved76; /* ordinal253 */
- uint32_t reserved77; /* ordinal254 */
- uint32_t reserved78; /* ordinal255 */
-
- uint32_t reserved_t[256]; /* Reserve 256 dword buffer used by ucode */
-};
-
static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev)
{
int i, r;
@@ -4761,34 +4504,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
u32 *buf;
struct vi_mqd *mqd;
- /* init the pipes */
- mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
- int me = (i < 4) ? 1 : 2;
- int pipe = (i < 4) ? i : (i - 4);
-
- eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
- eop_gpu_addr >>= 8;
-
- vi_srbm_select(adev, me, pipe, 0, 0);
-
- /* write the EOP addr */
- WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
- WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
-
- /* set the VMID assigned */
- WREG32(mmCP_HQD_VMID, 0);
-
- /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32(mmCP_HQD_EOP_CONTROL);
- tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
- (order_base_2(MEC_HPD_SIZE / 4) - 1));
- WREG32(mmCP_HQD_EOP_CONTROL, tmp);
- }
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-
- /* init the queues. Just two for now. */
+ /* init the queues. */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
@@ -4840,6 +4556,22 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
ring->pipe,
ring->queue, 0);
+ eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
+ eop_gpu_addr >>= 8;
+
+ /* write the EOP addr */
+ WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
+ WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
+
+ /* set the VMID assigned */
+ WREG32(mmCP_HQD_VMID, 0);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ tmp = RREG32(mmCP_HQD_EOP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
+ (order_base_2(MEC_HPD_SIZE / 4) - 1));
+ WREG32(mmCP_HQD_EOP_CONTROL, tmp);
+
/* disable wptr polling */
tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
@@ -4923,9 +4655,9 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- mqd->cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
+ mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr);
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo);
WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
mqd->cp_hqd_pq_wptr_poll_addr_hi);
@@ -5096,6 +4828,10 @@ static int gfx_v8_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ if (amdgpu_sriov_vf(adev)) {
+ pr_debug("For SRIOV client, shouldn't do anything.\n");
+ return 0;
+ }
gfx_v8_0_cp_enable(adev, false);
gfx_v8_0_rlc_stop(adev);
gfx_v8_0_cp_compute_fini(adev);
@@ -5438,9 +5174,70 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (address << SQ_IND_INDEX__INDEX__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK));
+ return RREG32(mmSQ_IND_DATA);
+}
+
+static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t regno, uint32_t num, uint32_t *out)
+{
+ WREG32(mmSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (regno << SQ_IND_INDEX__INDEX__SHIFT) |
+ (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK) |
+ (SQ_IND_INDEX__AUTO_INCR_MASK));
+ while (num--)
+ *(out++) = RREG32(mmSQ_IND_DATA);
+}
+
+static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+{
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+}
+
+static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t start,
+ uint32_t size, uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, 0,
+ start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
+}
+
+
static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v8_0_select_se_sh,
+ .read_wave_data = &gfx_v8_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
};
static int gfx_v8_0_early_init(void *handle)
@@ -5892,29 +5689,24 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
- /* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/
- * Cmp_busy/GFX_Idle interrupts
- */
- gfx_v8_0_enable_gui_idle_interrupt(adev, true);
-
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
if (temp1 != data1)
WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
- /* 2 wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
+ /* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
- /* 3 - clear cgcg override */
+ /* 2 - clear cgcg override */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
- /* 4 - write cmd to set CGLS */
+ /* 3 - write cmd to set CGLS */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
- /* 5 - enable cgcg */
+ /* 4 - enable cgcg */
data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@@ -5932,6 +5724,11 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
if (temp != data)
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
+
+ /* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
+ * Cmp_busy/GFX_Idle interrupts
+ */
+ gfx_v8_0_enable_gui_idle_interrupt(adev, true);
} else {
/* disable cntx_empty_int_enable & GFX Idle interrupt */
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
@@ -6120,7 +5917,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -6148,6 +5945,18 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
+static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
+ EVENT_INDEX(4));
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
+ EVENT_INDEX(0));
+}
+
+
static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -6222,7 +6031,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -6240,11 +6049,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
-
- /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
- if (usepfp)
- amdgpu_ring_insert_nop(ring, 128);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6337,6 +6142,7 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+ gfx_v8_0_ring_emit_vgt_flush(ring);
/* set load_global_config & load_global_uconfig */
dw2 |= 0x8001;
/* set load_cs_sh_regs */
@@ -6360,42 +6166,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_gfx */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
- 2 + /* gfx_v8_ring_emit_sb */
- 3; /* gfx_v8_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v8_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
-}
-
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -6541,7 +6311,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
.late_init = gfx_v8_0_late_init,
@@ -6562,10 +6332,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+ 2 + /* gfx_v8_ring_emit_sb */
+ 3 + 4, /* gfx_v8_ring_emit_cntxcntl including vgt flush */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6579,15 +6361,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v8_ring_emit_sb,
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v8_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6599,8 +6389,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6753,3 +6541,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index ebed1f829297..788cc3ab584b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -24,6 +24,7 @@
#ifndef __GFX_V8_0_H__
#define __GFX_V8_0_H__
-extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b13c8aaec078..45a573e63d4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1,4 +1,3 @@
-
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
@@ -26,7 +25,16 @@
#include "amdgpu.h"
#include "gmc_v6_0.h"
#include "amdgpu_ucode.h"
-#include "si/sid.h"
+
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+#include "gmc/gmc_6_0_d.h"
+#include "gmc/gmc_6_0_sh_mask.h"
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+#include "si_enums.h"
static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -37,6 +45,16 @@ MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
MODULE_FIRMWARE("radeon/verde_mc.bin");
MODULE_FIRMWARE("radeon/oland_mc.bin");
+#define MC_SEQ_MISC0__MT__MASK 0xf0000000
+#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
+#define MC_SEQ_MISC0__MT__DDR2 0x20000000
+#define MC_SEQ_MISC0__MT__GDDR3 0x30000000
+#define MC_SEQ_MISC0__MT__GDDR4 0x40000000
+#define MC_SEQ_MISC0__MT__GDDR5 0x50000000
+#define MC_SEQ_MISC0__MT__HBM 0x60000000
+#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
+
+
static const u32 crtc_offsets[6] =
{
SI_CRTC0_REGISTER_OFFSET,
@@ -57,14 +75,14 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
gmc_v6_0_wait_for_idle((void *)adev);
- blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
- if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
+ blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+ if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
/* Block CPU access */
- WREG32(BIF_FB_EN, 0);
+ WREG32(mmBIF_FB_EN, 0);
/* blackout the MC */
blackout = REG_SET_FIELD(blackout,
- mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
- WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
+ WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
/* wait for the MC to settle */
udelay(100);
@@ -77,13 +95,13 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
u32 tmp;
/* unblackout the MC */
- tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
- tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
- WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+ tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
+ WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
/* allow CPU access */
- tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
- tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
- WREG32(BIF_FB_EN, tmp);
+ tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
+ tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
+ WREG32(mmBIF_FB_EN, tmp);
if (adev->mode_info.num_crtc)
amdgpu_display_resume_mc_access(adev, save);
@@ -158,37 +176,37 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
new_fw_data = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+ running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
if (running == 0) {
/* reset the engine and set to writable */
- WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
- WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
- WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
}
/* load the MC ucode */
for (i = 0; i < ucode_size; i++) {
- WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
}
/* put the engine back into the active state */
- WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
- WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
- WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
/* wait for training to complete */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+ if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
break;
udelay(1);
}
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+ if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
break;
udelay(1);
}
@@ -225,7 +243,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32((0xb08 + j), 0x00000000);
WREG32((0xb09 + j), 0x00000000);
}
- WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+ WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
gmc_v6_0_mc_stop(adev, &save);
@@ -233,24 +251,24 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
/* Update configuration */
- WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12);
- WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->mc.vram_end >> 12);
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+ WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
- WREG32(MC_VM_FB_LOCATION, tmp);
+ WREG32(mmMC_VM_FB_LOCATION, tmp);
/* XXX double check these! */
- WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
- WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
- WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
- WREG32(MC_VM_AGP_BASE, 0);
- WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
- WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+ WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+ WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+ WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ WREG32(mmMC_VM_AGP_BASE, 0);
+ WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v6_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
@@ -265,16 +283,16 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
u32 tmp;
int chansize, numchan;
- tmp = RREG32(MC_ARB_RAMCFG);
- if (tmp & CHANSIZE_OVERRIDE) {
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ if (tmp & (1 << 11)) {
chansize = 16;
- } else if (tmp & CHANSIZE_MASK) {
+ } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
chansize = 64;
} else {
chansize = 32;
}
- tmp = RREG32(MC_SHARED_CHMAP);
- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ tmp = RREG32(mmMC_SHARED_CHMAP);
+ switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
case 0:
default:
numchan = 1;
@@ -309,15 +327,15 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
- adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.visible_vram_size = adev->mc.aper_size;
/* unless the user had overridden it, set the gart
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+ adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -329,9 +347,9 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
uint32_t vmid)
{
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
- WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
@@ -355,20 +373,20 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
{
u32 tmp;
- tmp = RREG32(VM_CONTEXT1_CNTL);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
- xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- WREG32(VM_CONTEXT1_CNTL, tmp);
+ tmp = RREG32(mmVM_CONTEXT1_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ WREG32(mmVM_CONTEXT1_CNTL, tmp);
}
static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
@@ -383,33 +401,39 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
/* Setup TLB control */
- WREG32(MC_VM_MX_L1_TLB_CNTL,
+ WREG32(mmMC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
- ENABLE_L1_TLB |
- ENABLE_L1_FRAGMENT_PROCESSING |
- SYSTEM_ACCESS_MODE_NOT_IN_SYS |
- ENABLE_ADVANCED_DRIVER_MODEL |
- SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
+ MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
+ MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
+ MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
+ (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
/* Setup L2 cache */
- WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
- ENABLE_L2_FRAGMENT_PROCESSING |
- ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
- ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
- EFFECTIVE_L2_QUEUE_SIZE(7) |
- CONTEXT1_IDENTITY_ACCESS_MODE(1));
- WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
- WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- BANK_SELECT(4) |
- L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+ WREG32(mmVM_L2_CNTL,
+ VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
+ VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
+ VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
+ VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
+ (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
+ (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
+ WREG32(mmVM_L2_CNTL2,
+ VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
+ VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
+ WREG32(mmVM_L2_CNTL3,
+ VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
+ (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
+ (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
/* setup context0 */
- WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
- WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+ WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT0_CNTL2, 0);
- WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+ WREG32(mmVM_CONTEXT0_CNTL2, 0);
+ WREG32(mmVM_CONTEXT0_CNTL,
+ VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
+ (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
+ VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
WREG32(0x575, 0);
WREG32(0x576, 0);
@@ -417,39 +441,41 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
/* empty context1-15 */
/* set vm size, must be a multiple of 4 */
- WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
+ WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+ WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
*/
for (i = 1; i < 16; i++) {
if (i < 8)
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
adev->gart.table_addr >> 12);
else
- WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
adev->gart.table_addr >> 12);
}
/* enable context1-15 */
- WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 4);
- WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
- RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
- PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
- VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
- READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
- READ_PROTECTION_FAULT_ENABLE_DEFAULT |
- WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ WREG32(mmVM_CONTEXT1_CNTL2, 4);
+ WREG32(mmVM_CONTEXT1_CNTL,
+ VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
+ (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
+ ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
+ VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -488,19 +514,22 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
}*/
/* Disable all tables */
- WREG32(VM_CONTEXT0_CNTL, 0);
- WREG32(VM_CONTEXT1_CNTL, 0);
+ WREG32(mmVM_CONTEXT0_CNTL, 0);
+ WREG32(mmVM_CONTEXT1_CNTL, 0);
/* Setup TLB control */
- WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
- SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ WREG32(mmMC_VM_MX_L1_TLB_CNTL,
+ MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
+ (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
/* Setup L2 cache */
- WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
- ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
- EFFECTIVE_L2_QUEUE_SIZE(7) |
- CONTEXT1_IDENTITY_ACCESS_MODE(1));
- WREG32(VM_L2_CNTL2, 0);
- WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+ WREG32(mmVM_L2_CNTL,
+ VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
+ VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
+ (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
+ (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
+ WREG32(mmVM_L2_CNTL2, 0);
+ WREG32(mmVM_L2_CNTL3,
+ VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
+ (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
amdgpu_gart_table_vram_unpin(adev);
}
@@ -523,7 +552,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU) {
- u64 tmp = RREG32(MC_VM_FB_OFFSET);
+ u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
tmp <<= 22;
adev->vm_manager.vram_base_offset = tmp;
} else
@@ -540,19 +569,19 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
u32 status, u32 addr, u32 mc_client)
{
u32 mc_id;
- u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
- u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
- xxPROTECTIONS);
+ u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+ u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
- mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
- xxMEMORY_CLIENT_ID);
+ mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
- REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
- xxMEMORY_CLIENT_RW) ?
+ REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_RW) ?
"write" : "read", block, mc_client, mc_id);
}
@@ -655,7 +684,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
{
u32 orig, data;
- orig = data = RREG32(HDP_HOST_PATH_CNTL);
+ orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
@@ -663,7 +692,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
if (orig != data)
- WREG32(HDP_HOST_PATH_CNTL, data);
+ WREG32(mmHDP_HOST_PATH_CNTL, data);
}
static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
@@ -671,7 +700,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
{
u32 orig, data;
- orig = data = RREG32(HDP_MEM_POWER_LS);
+ orig = data = RREG32(mmHDP_MEM_POWER_LS);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
@@ -679,7 +708,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
if (orig != data)
- WREG32(HDP_MEM_POWER_LS, data);
+ WREG32(mmHDP_MEM_POWER_LS, data);
}
*/
@@ -713,7 +742,7 @@ static int gmc_v6_0_early_init(void *handle)
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
- u32 tmp = RREG32(MC_SEQ_MISC0);
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
}
@@ -766,11 +795,6 @@ static int gmc_v6_0_sw_init(void *handle)
return r;
}
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
-
r = gmc_v6_0_mc_init(adev);
if (r)
return r;
@@ -879,7 +903,7 @@ static int gmc_v6_0_resume(void *handle)
static bool gmc_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
@@ -895,7 +919,7 @@ static int gmc_v6_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+ tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK |
SRBM_STATUS__MCD_BUSY_MASK |
@@ -913,17 +937,17 @@ static int gmc_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
- mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
+ SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
if (!(adev->flags & AMD_IS_APU))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
- mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
+ SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
if (srbm_soft_reset) {
@@ -933,17 +957,17 @@ static int gmc_v6_0_soft_reset(void *handle)
}
- tmp = RREG32(SRBM_SOFT_RESET);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
@@ -969,20 +993,20 @@ static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- tmp = RREG32(VM_CONTEXT0_CNTL);
+ tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp &= ~bits;
- WREG32(VM_CONTEXT0_CNTL, tmp);
- tmp = RREG32(VM_CONTEXT1_CNTL);
+ WREG32(mmVM_CONTEXT0_CNTL, tmp);
+ tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp &= ~bits;
- WREG32(VM_CONTEXT1_CNTL, tmp);
+ WREG32(mmVM_CONTEXT1_CNTL, tmp);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- tmp = RREG32(VM_CONTEXT0_CNTL);
+ tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp |= bits;
- WREG32(VM_CONTEXT0_CNTL, tmp);
- tmp = RREG32(VM_CONTEXT1_CNTL);
+ WREG32(mmVM_CONTEXT0_CNTL, tmp);
+ tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp |= bits;
- WREG32(VM_CONTEXT1_CNTL, tmp);
+ WREG32(mmVM_CONTEXT1_CNTL, tmp);
break;
default:
break;
@@ -997,9 +1021,9 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status;
- addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
- status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
+ status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
if (!addr && !status)
return 0;
@@ -1007,13 +1031,15 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v6_0_set_fault_enable_default(adev, false);
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
- entry->src_id, entry->src_data);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- addr);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- status);
- gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+ if (printk_ratelimit()) {
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ status);
+ gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+ }
return 0;
}
@@ -1030,7 +1056,7 @@ static int gmc_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.name = "gmc_v6_0",
.early_init = gmc_v6_0_early_init,
.late_init = gmc_v6_0_late_init,
@@ -1069,3 +1095,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
}
+const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
index 42c4fc676cd4..ed2f64dec47a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GMC_V6_0_H__
#define __GMC_V6_0_H__
-extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index aa0c4b964621..273b16fb9459 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -385,7 +385,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+ adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -711,7 +711,7 @@ static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
@@ -945,11 +945,6 @@ static int gmc_v7_0_sw_init(void *handle)
return r;
}
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
-
r = gmc_v7_0_mc_init(adev);
if (r)
return r;
@@ -1198,13 +1193,15 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v7_0_set_fault_enable_default(adev, false);
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
- entry->src_id, entry->src_data);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- addr);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- status);
- gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
+ if (printk_ratelimit()) {
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ status);
+ gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
+ }
return 0;
}
@@ -1235,7 +1232,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init,
@@ -1273,3 +1270,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 0b386b5d2f7a..ebce2966c1c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -24,6 +24,7 @@
#ifndef __GMC_V7_0_H__
#define __GMC_V7_0_H__
-extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c22ef140a542..0daac3a5be79 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -100,6 +100,7 @@ static const u32 cz_mgcg_cgcg_init[] =
static const u32 stoney_mgcg_cgcg_init[] =
{
+ mmATC_MISC_CG, 0xffffffff, 0x000c0200,
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
@@ -471,7 +472,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+ adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -836,7 +837,7 @@ static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
@@ -951,11 +952,6 @@ static int gmc_v8_0_sw_init(void *handle)
return r;
}
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
-
r = gmc_v8_0_mc_init(adev);
if (r)
return r;
@@ -1241,13 +1237,15 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v8_0_set_fault_enable_default(adev, false);
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
- entry->src_id, entry->src_data);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- addr);
- dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- status);
- gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
+ if (printk_ratelimit()) {
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ status);
+ gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
+ }
return 0;
}
@@ -1436,7 +1434,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init,
.late_init = gmc_v8_0_late_init,
@@ -1477,3 +1475,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index fc5001a8119d..19b8a8aed204 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -24,6 +24,8 @@
#ifndef __GMC_V8_0_H__
#define __GMC_V8_0_H__
-extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 3b8906ce3511..ac21bb7bc0f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs iceland_ih_ip_funcs = {
+static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.name = "iceland_ih",
.early_init = iceland_ih_early_init,
.late_init = NULL,
@@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &iceland_ih_funcs;
}
+const struct amdgpu_ip_block_version iceland_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &iceland_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
index 57558cddfbcb..3235f4277548 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
@@ -24,6 +24,6 @@
#ifndef __ICELAND_IH_H__
#define __ICELAND_IH_H__
-extern const struct amd_ip_funcs iceland_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version iceland_ih_ip_block;
#endif /* __ICELAND_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f8618a3881a8..5a1bc358bcb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -2845,7 +2845,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
- if (amdgpu_sclk_deep_sleep_en)
+ if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
@@ -3063,6 +3063,8 @@ static int kv_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
kv_dpm_fini(adev);
@@ -3243,6 +3245,18 @@ static int kv_dpm_set_powergating_state(void *handle,
return 0;
}
+static int kv_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
@@ -3273,6 +3287,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
+ .check_state_equal = kv_check_state_equal,
};
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -3291,3 +3307,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version kv_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 565dab3c7218..fbe74a33899c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -775,11 +775,11 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v2_4_ring_emit_ib */
-}
-
-static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v2_4_ring_emit_hdp_flush */
- 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
- 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
- 12 + /* sdma_v2_4_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v2_4_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
+static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
.late_init = NULL,
@@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v2_4_ring_emit_hdp_flush */
+ 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+ 12 + /* sdma_v2_4_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib,
.emit_fence = sdma_v2_4_ring_emit_fence,
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
@@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
- .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v2_4_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
index 07349f5ee10f..28b433729216 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
@@ -24,6 +24,6 @@
#ifndef __SDMA_V2_4_H__
#define __SDMA_V2_4_H__
-extern const struct amd_ip_funcs sdma_v2_4_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index a9d10941fb53..1170a64a3184 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -977,11 +977,11 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v3_0_ring_emit_ib */
-}
-
-static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v3_0_ring_emit_hdp_flush */
- 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
- 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
- 12 + /* sdma_v3_0_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
+static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
.late_init = NULL,
@@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr,
.set_wptr = sdma_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v3_0_ring_emit_hdp_flush */
+ 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+ 12 + /* sdma_v3_0_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
.emit_ib = sdma_v3_0_ring_emit_ib,
.emit_fence = sdma_v3_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
@@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib,
- .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
index 0cb9698a3054..7aa223d35f1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
@@ -24,6 +24,7 @@
#ifndef __SDMA_V3_0_H__
#define __SDMA_V3_0_H__
-extern const struct amd_ip_funcs sdma_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index dc9511c5ecb8..3ed8ad8725b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -39,6 +39,7 @@
#include "si_dma.h"
#include "dce_v6_0.h"
#include "si.h"
+#include "dce_virtual.h"
static const u32 tahiti_golden_registers[] =
{
@@ -905,7 +906,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
-u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
@@ -918,7 +919,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
return r;
}
-void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
@@ -1811,7 +1812,7 @@ static int si_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_common_ip_funcs = {
+static const struct amd_ip_funcs si_common_ip_funcs = {
.name = "si_common",
.early_init = si_common_early_init,
.late_init = NULL,
@@ -1828,119 +1829,13 @@ const struct amd_ip_funcs si_common_ip_funcs = {
.set_powergating_state = si_common_set_powergating_state,
};
-static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+static const struct amdgpu_ip_block_version si_common_ip_block =
{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
-/* {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- */
-};
-
-
-static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
-{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_common_ip_funcs,
};
int si_set_ip_blocks(struct amdgpu_device *adev)
@@ -1949,13 +1844,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ break;
case CHIP_OLAND:
- adev->ip_blocks = verde_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
- adev->ip_blocks = hainan_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 959d7b63e0e5..589225080c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -24,8 +24,6 @@
#ifndef __SI_H__
#define __SI_H__
-extern const struct amd_ip_funcs si_common_ip_funcs;
-
void si_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int si_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index de358193a8f9..3dd552ae0b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
-static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 3; /* si_dma_ring_emit_ib */
-}
-
-static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 3 + /* si_dma_ring_emit_hdp_flush */
- 3 + /* si_dma_ring_emit_hdp_invalidate */
- 6 + /* si_dma_ring_emit_pipeline_sync */
- 12 + /* si_dma_ring_emit_vm_flush */
- 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int si_dma_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_dma_ip_funcs = {
+static const struct amd_ip_funcs si_dma_ip_funcs = {
.name = "si_dma",
.early_init = si_dma_early_init,
.late_init = NULL,
@@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = {
};
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
.get_rptr = si_dma_ring_get_rptr,
.get_wptr = si_dma_ring_get_wptr,
.set_wptr = si_dma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 3 + /* si_dma_ring_emit_hdp_flush */
+ 3 + /* si_dma_ring_emit_hdp_invalidate */
+ 6 + /* si_dma_ring_emit_pipeline_sync */
+ 12 + /* si_dma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
.emit_ib = si_dma_ring_emit_ib,
.emit_fence = si_dma_ring_emit_fence,
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
@@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
.test_ib = si_dma_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = si_dma_ring_pad_ib,
- .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
- .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
};
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version si_dma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
index 3a3e0c78a54b..5ac1b8452fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -24,6 +24,6 @@
#ifndef __SI_DMA_H__
#define __SI_DMA_H__
-extern const struct amd_ip_funcs si_dma_ip_funcs;
+extern const struct amdgpu_ip_block_version si_dma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3de7bca5854b..6c65a1a2de79 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev,
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+ adev->pm.dpm.current_ps = &eg_pi->current_rps;
}
static void ni_update_requested_ps(struct amdgpu_device *adev,
@@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev,
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+ adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
}
static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
@@ -3477,6 +3479,50 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
+ /* limit all SI kickers */
+ if (adev->asic_type == CHIP_PITCAIRN) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811) ||
+ (adev->pdev->device == 0x6816) ||
+ (adev->pdev->device == 0x6817) ||
+ (adev->pdev->device == 0x6806))
+ max_mclk = 120000;
+ } else if (adev->asic_type == CHIP_VERDE) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
+ (adev->pdev->device == 0x6820) ||
+ (adev->pdev->device == 0x6821) ||
+ (adev->pdev->device == 0x6822) ||
+ (adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682A) ||
+ (adev->pdev->device == 0x682B)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (adev->asic_type == CHIP_OLAND) {
+ if ((adev->pdev->revision == 0xC7) ||
+ (adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (adev->asic_type == CHIP_HAINAN) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0xC3) ||
+ (adev->pdev->device == 0x6664) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ }
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (adev->pdev->vendor == p->chip_vendor &&
@@ -3489,22 +3535,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
}
++p;
}
- /* limit mclk on all R7 370 parts for stability */
- if (adev->pdev->device == 0x6811 &&
- adev->pdev->revision == 0x81)
- max_mclk = 120000;
- /* limit sclk/mclk on Jet parts for stability */
- if (adev->pdev->device == 0x6665 &&
- adev->pdev->revision == 0xc3) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
- /* Limit clocks for some HD8600 parts */
- if (adev->pdev->device == 0x6660 &&
- adev->pdev->revision == 0x83) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
@@ -7320,7 +7350,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -7686,6 +7716,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
(adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605))
chip_name = "oland_k";
@@ -7777,6 +7808,8 @@ static int si_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
si_dpm_fini(adev);
@@ -7957,6 +7990,57 @@ static int si_dpm_early_init(void *handle)
return 0;
}
+static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
+ const struct rv7xx_pl *si_cpl2)
+{
+ return ((si_cpl1->mclk == si_cpl2->mclk) &&
+ (si_cpl1->sclk == si_cpl2->sclk) &&
+ (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
+ (si_cpl1->vddc == si_cpl2->vddc) &&
+ (si_cpl1->vddci == si_cpl2->vddci));
+}
+
+static int si_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct si_ps *si_cps;
+ struct si_ps *si_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ si_cps = si_get_ps(cps);
+ si_rps = si_get_ps(rps);
+
+ if (si_cps == NULL) {
+ printk("si_cps is NULL\n");
+ *equal = false;
+ return 0;
+ }
+
+ if (si_cps->performance_level_count != si_rps->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < si_cps->performance_level_count; i++) {
+ if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
+ &(si_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
const struct amd_ip_funcs si_dpm_ip_funcs = {
.name = "si_dpm",
@@ -7991,6 +8075,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+ .check_state_equal = &si_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -8010,3 +8096,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
}
+const struct amdgpu_ip_block_version si_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
new file mode 100644
index 000000000000..fde2086246fa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SI_ENUMS_H
+#define SI_ENUMS_H
+
+#define VBLANK_INT_MASK (1 << 0)
+#define DC_HPDx_INT_EN (1 << 16)
+#define VBLANK_ACK (1 << 4)
+#define VLINE_ACK (1 << 4)
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+#define VGA_VSTATUS_CNTL 0xFFFCFFFF
+#define PRIORITY_MARK_MASK 0x7fff
+#define PRIORITY_OFF (1 << 16)
+#define PRIORITY_ALWAYS_ON (1 << 20)
+#define INTERLEAVE_EN (1 << 0)
+
+#define LATENCY_WATERMARK_MASK(x) ((x) << 16)
+#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
+#define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
+
+#define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
+#define GRPH_ENDIAN_NONE 0
+#define GRPH_ENDIAN_8IN16 1
+#define GRPH_ENDIAN_8IN32 2
+#define GRPH_ENDIAN_8IN64 3
+
+#define GRPH_DEPTH(x) (((x) & 0x3) << 0)
+#define GRPH_DEPTH_8BPP 0
+#define GRPH_DEPTH_16BPP 1
+#define GRPH_DEPTH_32BPP 2
+
+#define GRPH_FORMAT(x) (((x) & 0x7) << 8)
+#define GRPH_FORMAT_INDEXED 0
+#define GRPH_FORMAT_ARGB1555 0
+#define GRPH_FORMAT_ARGB565 1
+#define GRPH_FORMAT_ARGB4444 2
+#define GRPH_FORMAT_AI88 3
+#define GRPH_FORMAT_MONO16 4
+#define GRPH_FORMAT_BGRA5551 5
+#define GRPH_FORMAT_ARGB8888 0
+#define GRPH_FORMAT_ARGB2101010 1
+#define GRPH_FORMAT_32BPP_DIG 2
+#define GRPH_FORMAT_8B_ARGB2101010 3
+#define GRPH_FORMAT_BGRA1010102 4
+#define GRPH_FORMAT_8B_BGRA1010102 5
+#define GRPH_FORMAT_RGB111110 6
+#define GRPH_FORMAT_BGR101111 7
+
+#define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+#define GRPH_ARRAY_LINEAR_GENERAL 0
+#define GRPH_ARRAY_LINEAR_ALIGNED 1
+#define GRPH_ARRAY_1D_TILED_THIN1 2
+#define GRPH_ARRAY_2D_TILED_THIN1 4
+#define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+#define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+#define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+#define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+#define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
+
+#define CURSOR_EN (1 << 0)
+#define CURSOR_MODE(x) (((x) & 0x3) << 8)
+#define CURSOR_MONO 0
+#define CURSOR_24_1 1
+#define CURSOR_24_8_PRE_MULT 2
+#define CURSOR_24_8_UNPRE_MULT 3
+#define CURSOR_2X_MAGNIFY (1 << 16)
+#define CURSOR_FORCE_MC_ON (1 << 20)
+#define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
+#define CURSOR_URGENT_ALWAYS 0
+#define CURSOR_URGENT_1_8 1
+#define CURSOR_URGENT_1_4 2
+#define CURSOR_URGENT_3_8 3
+#define CURSOR_URGENT_1_2 4
+#define CURSOR_UPDATE_PENDING (1 << 0)
+#define CURSOR_UPDATE_TAKEN (1 << 1)
+#define CURSOR_UPDATE_LOCK (1 << 16)
+#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+#define AMDGPU_NUM_OF_VMIDS 8
+#define SI_CRTC0_REGISTER_OFFSET 0
+#define SI_CRTC1_REGISTER_OFFSET 0x300
+#define SI_CRTC2_REGISTER_OFFSET 0x2600
+#define SI_CRTC3_REGISTER_OFFSET 0x2900
+#define SI_CRTC4_REGISTER_OFFSET 0x2c00
+#define SI_CRTC5_REGISTER_OFFSET 0x2f00
+
+#define DMA0_REGISTER_OFFSET 0x000
+#define DMA1_REGISTER_OFFSET 0x200
+#define ES_AND_GS_AUTO 3
+#define RADEON_PACKET_TYPE3 3
+#define CE_PARTITION_BASE 3
+#define BUF_SWAP_32BIT (2 << 16)
+
+#define GFX_POWER_STATUS (1 << 1)
+#define GFX_CLOCK_STATUS (1 << 2)
+#define GFX_LS_STATUS (1 << 3)
+#define RLC_BUSY_STATUS (1 << 0)
+
+#define RLC_PUD(x) ((x) << 0)
+#define RLC_PUD_MASK (0xff << 0)
+#define RLC_PDD(x) ((x) << 8)
+#define RLC_PDD_MASK (0xff << 8)
+#define RLC_TTPD(x) ((x) << 16)
+#define RLC_TTPD_MASK (0xff << 16)
+#define RLC_MSD(x) ((x) << 24)
+#define RLC_MSD_MASK (0xff << 24)
+#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
+#define WRITE_DATA_DST_SEL(x) ((x) << 8)
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+
+#define GFX6_NUM_GFX_RINGS 1
+#define GFX6_NUM_COMPUTE_RINGS 2
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_BASE_INDEX(x) ((x) << 0)
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_ALLOC_GDS 0x1B
+#define PACKET3_WRITE_GDS_RAM 0x1C
+#define PACKET3_ATOMIC_GDS 0x1D
+#define PACKET3_ATOMIC 0x1E
+#define PACKET3_OCCLUSION_QUERY 0x1F
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_INDIRECT_BUFFER_CONST 0x31
+#define PACKET3_INDIRECT_BUFFER 0x3F
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_WRITE_DATA 0x37
+#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_COPY_DATA 0x40
+#define PACKET3_CP_DMA 0x41
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
+#define PACKET3_PFP_SYNC_ME 0x42
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_DEST_BASE_0_ENA (1 << 0)
+# define PACKET3_DEST_BASE_1_ENA (1 << 1)
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_DEST_BASE_2_ENA (1 << 19)
+# define PACKET3_DEST_BASE_3_ENA (1 << 21)
+# define PACKET3_TCL1_ACTION_ENA (1 << 22)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_LOAD_CONFIG_REG 0x5F
+#define PACKET3_LOAD_CONTEXT_REG 0x60
+#define PACKET3_LOAD_SH_REG 0x61
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00002000
+#define PACKET3_SET_CONFIG_REG_END 0x00002c00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x000a000
+#define PACKET3_SET_CONTEXT_REG_END 0x000a400
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_SH_REG 0x76
+#define PACKET3_SET_SH_REG_START 0x00002c00
+#define PACKET3_SET_SH_REG_END 0x00003000
+#define PACKET3_SET_SH_REG_OFFSET 0x77
+#define PACKET3_ME_WRITE 0x7A
+#define PACKET3_SCRATCH_RAM_WRITE 0x7D
+#define PACKET3_SCRATCH_RAM_READ 0x7E
+#define PACKET3_CE_WRITE 0x7F
+#define PACKET3_LOAD_CONST_RAM 0x80
+#define PACKET3_WRITE_CONST_RAM 0x81
+#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
+#define PACKET3_DUMP_CONST_RAM 0x83
+#define PACKET3_INCREMENT_CE_COUNTER 0x84
+#define PACKET3_INCREMENT_DE_COUNTER 0x85
+#define PACKET3_WAIT_ON_CE_COUNTER 0x86
+#define PACKET3_WAIT_ON_DE_COUNTER 0x87
+#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
+#define PACKET3_SET_CE_DE_COUNTERS 0x89
+#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
+#define PACKET3_SWITCH_BUFFER 0x8B
+#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
+#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
+#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 8fae3d4a2360..db0f36846661 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_ih_ip_funcs = {
+static const struct amd_ip_funcs si_ih_ip_funcs = {
.name = "si_ih",
.early_init = si_ih_early_init,
.late_init = NULL,
@@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &si_ih_funcs;
}
+const struct amdgpu_ip_block_version si_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
index f3e3a954369c..42e64a53e24f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -24,6 +24,6 @@
#ifndef __SI_IH_H__
#define __SI_IH_H__
-extern const struct amd_ip_funcs si_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version si_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b4ea229bb449..52b71ee58793 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs tonga_ih_ip_funcs = {
+static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.name = "tonga_ih",
.early_init = tonga_ih_early_init,
.late_init = NULL,
@@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &tonga_ih_funcs;
}
+const struct amdgpu_ip_block_version tonga_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
index 7392d70fa4a7..499027eee5c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
@@ -24,6 +24,6 @@
#ifndef __TONGA_IH_H__
#define __TONGA_IH_H__
-extern const struct amd_ip_funcs tonga_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version tonga_ih_ip_block;
-#endif /* __CZ_IH_H__ */
+#endif /* __TONGA_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f6c941550b8f..96444e4d862a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -36,13 +36,17 @@
#include "bif/bif_4_1_d.h"
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v4_2_start(struct amdgpu_device *adev);
static void uvd_v4_2_stop(struct amdgpu_device *adev);
-
+static int uvd_v4_2_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state);
/**
* uvd_v4_2_ring_get_rptr - get read pointer
*
@@ -116,8 +120,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -152,9 +155,9 @@ static int uvd_v4_2_hw_init(void *handle)
uint32_t tmp;
int r;
- /* raise clocks while booting up the VCPU */
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
+ uvd_v4_2_init_cg(adev);
+ uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
r = uvd_v4_2_start(adev);
if (r)
goto done;
@@ -194,8 +197,6 @@ static int uvd_v4_2_hw_init(void *handle)
amdgpu_ring_commit(ring);
done:
- /* lower clocks again */
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
if (!r)
DRM_INFO("UVD initialized successfully.\n");
@@ -272,9 +273,6 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
uvd_v4_2_mc_resume(adev);
- /* disable clock gating */
- WREG32(mmUVD_CGC_GATE, 0);
-
/* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
@@ -526,20 +524,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* uvd_v4_2_ring_emit_ib */
-}
-
-static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v4_2_ring_emit_hdp_flush */
- 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
- 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
-}
-
/**
* uvd_v4_2_mc_resume - memory controller programming
*
@@ -580,8 +564,6 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
-
- uvd_v4_2_init_cg(adev);
}
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
@@ -591,7 +573,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
- data = 0xfff;
+ data |= 0xfff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
orig = data = RREG32(mmUVD_CGC_CTRL);
@@ -615,6 +597,8 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
{
u32 tmp, tmp2;
+ WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
+
tmp = RREG32(mmUVD_CGC_CTRL);
tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
@@ -738,7 +722,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init,
.late_init = NULL,
@@ -756,10 +740,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr,
.set_wptr = uvd_v4_2_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v4_2_ring_emit_hdp_flush */
+ 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+ 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
@@ -770,8 +762,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -789,3 +779,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
index 0a615dd50840..8a0444bb8b95 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V4_2_H__
#define __UVD_V4_2_H__
-extern const struct amd_ip_funcs uvd_v4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 400c16fe579e..a79e283590fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -33,12 +33,17 @@
#include "oss/oss_2_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "vi.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v5_0_start(struct amdgpu_device *adev);
static void uvd_v5_0_stop(struct amdgpu_device *adev);
-
+static int uvd_v5_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state);
+static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
+ bool enable);
/**
* uvd_v5_0_ring_get_rptr - get read pointer
*
@@ -112,8 +117,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -148,9 +152,6 @@ static int uvd_v5_0_hw_init(void *handle)
uint32_t tmp;
int r;
- /* raise clocks while booting up the VCPU */
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
r = uvd_v5_0_start(adev);
if (r)
goto done;
@@ -188,11 +189,7 @@ static int uvd_v5_0_hw_init(void *handle)
amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring);
-
done:
- /* lower clocks again */
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
-
if (!r)
DRM_INFO("UVD initialized successfully.\n");
@@ -225,6 +222,7 @@ static int uvd_v5_0_suspend(void *handle)
r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
+ uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -312,8 +310,9 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
uvd_v5_0_mc_resume(adev);
- /* disable clock gating */
- WREG32(mmUVD_CGC_GATE, 0);
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+ uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v5_0_enable_mgcg(adev, true);
/* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
@@ -577,20 +576,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* uvd_v5_0_ring_emit_ib */
-}
-
-static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v5_0_ring_emit_hdp_flush */
- 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
- 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
-}
-
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -641,16 +626,12 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
+static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
{
- uint32_t data, data1, data2, suvd_flags;
+ uint32_t data1, data3, suvd_flags;
- data = RREG32(mmUVD_CGC_CTRL);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
- data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
-
- data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
- UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+ data3 = RREG32(mmUVD_CGC_GATE);
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
@@ -658,6 +639,51 @@ static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
+ if (enable) {
+ data3 |= (UVD_CGC_GATE__SYS_MASK |
+ UVD_CGC_GATE__UDEC_MASK |
+ UVD_CGC_GATE__MPEG2_MASK |
+ UVD_CGC_GATE__RBC_MASK |
+ UVD_CGC_GATE__LMI_MC_MASK |
+ UVD_CGC_GATE__IDCT_MASK |
+ UVD_CGC_GATE__MPRD_MASK |
+ UVD_CGC_GATE__MPC_MASK |
+ UVD_CGC_GATE__LBSI_MASK |
+ UVD_CGC_GATE__LRBBM_MASK |
+ UVD_CGC_GATE__UDEC_RE_MASK |
+ UVD_CGC_GATE__UDEC_CM_MASK |
+ UVD_CGC_GATE__UDEC_IT_MASK |
+ UVD_CGC_GATE__UDEC_DB_MASK |
+ UVD_CGC_GATE__UDEC_MP_MASK |
+ UVD_CGC_GATE__WCB_MASK |
+ UVD_CGC_GATE__JPEG_MASK |
+ UVD_CGC_GATE__SCPU_MASK);
+ /* only in pg enabled, we can gate clock to vcpu*/
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
+ data3 |= UVD_CGC_GATE__VCPU_MASK;
+ data3 &= ~UVD_CGC_GATE__REGS_MASK;
+ data1 |= suvd_flags;
+ } else {
+ data3 = 0;
+ data1 = 0;
+ }
+
+ WREG32(mmUVD_SUVD_CGC_GATE, data1);
+ WREG32(mmUVD_CGC_GATE, data3);
+}
+
+static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data, data2;
+
+ data = RREG32(mmUVD_CGC_CTRL);
+ data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
+
+
+ data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
+ UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+
+
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
@@ -688,11 +714,8 @@ static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
- data1 |= suvd_flags;
WREG32(mmUVD_CGC_CTRL, data);
- WREG32(mmUVD_CGC_GATE, 0);
- WREG32(mmUVD_SUVD_CGC_GATE, data1);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
}
@@ -737,6 +760,32 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
+static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data |= 0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ } else {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data &= ~0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ }
+}
+
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -752,17 +801,18 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
curstate = state;
if (enable) {
- /* disable HW gating and enable Sw gating */
- uvd_v5_0_set_sw_clock_gating(adev);
- } else {
/* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle))
return -EBUSY;
+ uvd_v5_0_enable_clock_gating(adev, true);
/* enable HW gates because UVD is idle */
/* uvd_v5_0_set_hw_clock_gating(adev); */
+ } else {
+ uvd_v5_0_enable_clock_gating(adev, false);
}
+ uvd_v5_0_set_sw_clock_gating(adev);
return 0;
}
@@ -789,7 +839,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init,
.late_init = NULL,
@@ -807,10 +857,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr,
.set_wptr = uvd_v5_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v5_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+ 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
@@ -821,8 +879,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -840,3 +896,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v5_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
index e3b3c49fa5de..2eaaea793ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V5_0_H__
#define __UVD_V5_0_H__
-extern const struct amd_ip_funcs uvd_v5_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ab3df6d75656..ba0bbf7138dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -42,6 +42,10 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
+static int uvd_v6_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state);
+static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
+ bool enable);
/**
* uvd_v6_0_ring_get_rptr - get read pointer
@@ -116,8 +120,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -394,11 +397,11 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
lmi_swap_cntl = 0;
mp_swap_cntl = 0;
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+ uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v6_0_enable_mgcg(adev, true);
uvd_v6_0_mc_resume(adev);
- /* disable clock gating */
- WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0);
-
/* disable interupt */
WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
@@ -725,31 +728,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
-static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 8; /* uvd_v6_0_ring_emit_ib */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 20 + /* uvd_v6_0_ring_emit_vm_flush */
- 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
-}
-
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -862,22 +840,72 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
+{
+ uint32_t data1, data3;
+
+ data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+ data3 = RREG32(mmUVD_CGC_GATE);
+
+ data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
+ UVD_SUVD_CGC_GATE__SIT_MASK |
+ UVD_SUVD_CGC_GATE__SMP_MASK |
+ UVD_SUVD_CGC_GATE__SCM_MASK |
+ UVD_SUVD_CGC_GATE__SDB_MASK |
+ UVD_SUVD_CGC_GATE__SRE_H264_MASK |
+ UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
+ UVD_SUVD_CGC_GATE__SIT_H264_MASK |
+ UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
+ UVD_SUVD_CGC_GATE__SCM_H264_MASK |
+ UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
+ UVD_SUVD_CGC_GATE__SDB_H264_MASK |
+ UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
+
+ if (enable) {
+ data3 |= (UVD_CGC_GATE__SYS_MASK |
+ UVD_CGC_GATE__UDEC_MASK |
+ UVD_CGC_GATE__MPEG2_MASK |
+ UVD_CGC_GATE__RBC_MASK |
+ UVD_CGC_GATE__LMI_MC_MASK |
+ UVD_CGC_GATE__LMI_UMC_MASK |
+ UVD_CGC_GATE__IDCT_MASK |
+ UVD_CGC_GATE__MPRD_MASK |
+ UVD_CGC_GATE__MPC_MASK |
+ UVD_CGC_GATE__LBSI_MASK |
+ UVD_CGC_GATE__LRBBM_MASK |
+ UVD_CGC_GATE__UDEC_RE_MASK |
+ UVD_CGC_GATE__UDEC_CM_MASK |
+ UVD_CGC_GATE__UDEC_IT_MASK |
+ UVD_CGC_GATE__UDEC_DB_MASK |
+ UVD_CGC_GATE__UDEC_MP_MASK |
+ UVD_CGC_GATE__WCB_MASK |
+ UVD_CGC_GATE__JPEG_MASK |
+ UVD_CGC_GATE__SCPU_MASK |
+ UVD_CGC_GATE__JPEG2_MASK);
+ /* only in pg enabled, we can gate clock to vcpu*/
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
+ data3 |= UVD_CGC_GATE__VCPU_MASK;
+
+ data3 &= ~UVD_CGC_GATE__REGS_MASK;
+ } else {
+ data3 = 0;
+ }
+
+ WREG32(mmUVD_SUVD_CGC_GATE, data1);
+ WREG32(mmUVD_CGC_GATE, data3);
+}
+
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
{
- uint32_t data, data1, data2, suvd_flags;
+ uint32_t data, data2;
data = RREG32(mmUVD_CGC_CTRL);
- data1 = RREG32(mmUVD_SUVD_CGC_GATE);
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
+
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
- suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
- UVD_SUVD_CGC_GATE__SIT_MASK |
- UVD_SUVD_CGC_GATE__SMP_MASK |
- UVD_SUVD_CGC_GATE__SCM_MASK |
- UVD_SUVD_CGC_GATE__SDB_MASK;
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
@@ -910,11 +938,8 @@ static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
- data1 |= suvd_flags;
WREG32(mmUVD_CGC_CTRL, data);
- WREG32(mmUVD_CGC_GATE, 0);
- WREG32(mmUVD_SUVD_CGC_GATE, data1);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
}
@@ -961,44 +986,53 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
-static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
+ bool enable)
{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+ u32 orig, data;
- if (enable)
- tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
- else
- tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data |= 0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ } else {
+ data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
+ data &= ~0xfff;
+ WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(mmUVD_CGC_CTRL);
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ if (orig != data)
+ WREG32(mmUVD_CGC_CTRL, data);
+ }
}
static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->asic_type == CHIP_FIJI ||
- adev->asic_type == CHIP_POLARIS10)
- uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (state == AMD_CG_STATE_GATE) {
- /* disable HW gating and enable Sw gating */
- uvd_v6_0_set_sw_clock_gating(adev);
- } else {
+ if (enable) {
/* wait for STATUS to clear */
if (uvd_v6_0_wait_for_idle(handle))
return -EBUSY;
-
+ uvd_v6_0_enable_clock_gating(adev, true);
/* enable HW gates because UVD is idle */
/* uvd_v6_0_set_hw_clock_gating(adev); */
+ } else {
+ /* disable HW gating and enable Sw gating */
+ uvd_v6_0_enable_clock_gating(adev, false);
}
-
+ uvd_v6_0_set_sw_clock_gating(adev);
return 0;
}
@@ -1027,7 +1061,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init,
.late_init = NULL,
@@ -1048,10 +1082,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
@@ -1062,15 +1105,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 20 + /* uvd_v6_0_ring_emit_vm_flush */
+ 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
@@ -1083,8 +1133,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1108,3 +1156,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
index 6b92a2352986..d3d48c6428cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
@@ -24,6 +24,8 @@
#ifndef __UVD_V6_0_H__
#define __UVD_V6_0_H__
-extern const struct amd_ip_funcs uvd_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 76e64ad04a53..38ed903dd6f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -224,8 +224,8 @@ static int vce_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->vce.irq, 0);
if (r)
return r;
}
@@ -592,7 +592,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
return vce_v2_0_start(adev);
}
-const struct amd_ip_funcs vce_v2_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.name = "vce_v2_0",
.early_init = vce_v2_0_early_init,
.late_init = NULL,
@@ -610,10 +610,15 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v2_0_ring_get_rptr,
.get_wptr = vce_v2_0_ring_get_wptr,
.set_wptr = vce_v2_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -622,8 +627,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
- .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -644,3 +647,12 @@ static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v2_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
index 0d2ae8a01acd..4d15167654a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
@@ -24,6 +24,6 @@
#ifndef __VCE_V2_0_H__
#define __VCE_V2_0_H__
-extern const struct amd_ip_funcs vce_v2_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v2_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8533269ec160..6b3293a1c7b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -52,6 +52,8 @@
#define VCE_V3_0_STACK_SIZE (64 * 1024)
#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
+#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
+
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -132,7 +134,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
accessible but the firmware will throttle the clocks on the
fly as necessary.
*/
- if (gated) {
+ if (!gated) {
data = RREG32(mmVCE_CLOCK_GATING_B);
data |= 0x1ff;
data &= ~0xef0000;
@@ -382,6 +384,10 @@ static int vce_v3_0_sw_init(void *handle)
if (r)
return r;
+ /* 52.8.3 required for 3 ring support */
+ if (adev->vce.fw_version < FW_52_8_3)
+ adev->vce.num_rings = 2;
+
r = amdgpu_vce_resume(adev);
if (r)
return r;
@@ -389,8 +395,7 @@ static int vce_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
return r;
}
@@ -808,28 +813,7 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, seq);
}
-static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 5; /* vce_v3_0_ring_emit_ib */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 6 + /* vce_v3_0_emit_vm_flush */
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
-}
-
-const struct amd_ip_funcs vce_v3_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
.late_init = NULL,
@@ -850,10 +834,17 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size =
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -862,15 +853,21 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .emit_frame_size =
+ 6 + /* vce_v3_0_emit_vm_flush */
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = vce_v3_0_ring_emit_ib,
.emit_vm_flush = vce_v3_0_emit_vm_flush,
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -881,8 +878,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -910,3 +905,30 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
index b45af65da81f..08b908c7de0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
@@ -24,6 +24,8 @@
#ifndef __VCE_V3_0_H__
#define __VCE_V3_0_H__
-extern const struct amd_ip_funcs vce_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_1_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index c0d9aad7126f..9f771f4ffcb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -80,7 +80,9 @@
#include "dce_virtual.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
@@ -121,8 +123,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- r = RREG32(mmSMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ r = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -132,8 +134,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- WREG32(mmSMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ WREG32(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -437,12 +439,12 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
/* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags);
/* set rom index to 0 */
- WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
- WREG32(mmSMC_IND_DATA_0, 0);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
+ WREG32(mmSMC_IND_DATA_11, 0);
/* set index to data for continous read */
- WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
+ dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return true;
@@ -556,21 +558,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] =
{mmPA_SC_RASTER_CONFIG_1, false, true},
};
-static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 reg_offset)
-{
- uint32_t val;
+static uint32_t vi_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
+{
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -605,10 +686,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
if (reg_offset != asic_register_entry->reg_offset)
continue;
if (!asic_register_entry->untouched)
- *value = asic_register_entry->grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ asic_register_entry->grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
}
@@ -618,10 +698,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
continue;
if (!vi_allowed_read_registers[i].untouched)
- *value = vi_allowed_read_registers[i].grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ vi_allowed_read_registers[i].grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
return -EINVAL;
@@ -652,18 +731,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
-static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* vi_asic_reset - soft reset GPU
*
@@ -677,11 +744,11 @@ static int vi_asic_reset(struct amdgpu_device *adev)
{
int r;
- vi_set_bios_scratch_engine_hung(adev, true);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = vi_gpu_pci_config_reset(adev);
- vi_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -781,734 +848,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
WREG32(mmBIF_DOORBELL_APER_EN, tmp);
}
-/* topaz has no DCE, UVD, VCE */
-static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-int vi_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
- break;
-
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
#define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
#define ATI_REV_ID_FUSE_MACRO__SHIFT 9
#define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
@@ -1587,22 +926,25 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
- AMD_CG_SUPPORT_MC_LS;
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case CHIP_TONGA:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = AMD_PG_SUPPORT_UVD;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_POLARIS11:
- adev->cg_flags = 0;
+ adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_VCE_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x5A;
break;
case CHIP_POLARIS10:
- adev->cg_flags = 0;
+ adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_VCE_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x50;
break;
@@ -1651,7 +993,7 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCE_MGCG;
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_UVD |
@@ -1908,7 +1250,7 @@ static int vi_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs vi_common_ip_funcs = {
+static const struct amd_ip_funcs vi_common_ip_funcs = {
.name = "vi_common",
.early_init = vi_common_early_init,
.late_init = NULL,
@@ -1925,3 +1267,110 @@ const struct amd_ip_funcs vi_common_ip_funcs = {
.set_powergating_state = vi_common_set_powergating_state,
};
+static const struct amdgpu_ip_block_version vi_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+};
+
+int vi_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ /* topaz has no DCE, UVD, VCE */
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+ break;
+ case CHIP_FIJI:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_TONGA:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ break;
+ case CHIP_CARRIZO:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ case CHIP_STONEY:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 502094042462..575d7aed5d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -24,8 +24,6 @@
#ifndef __VI_H__
#define __VI_H__
-extern const struct amd_ip_funcs vi_common_ip_funcs;
-
void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bec8125bceb0..d1986276dbbd 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -84,6 +84,29 @@ enum amd_powergating_state {
AMD_PG_STATE_UNGATE,
};
+struct amd_vce_state {
+ /* vce clocks */
+ u32 evclk;
+ u32 ecclk;
+ /* gpu clocks */
+ u32 sclk;
+ u32 mclk;
+ u8 clk_idx;
+ u8 pstate;
+};
+
+
+#define AMD_MAX_VCE_LEVELS 6
+
+enum amd_vce_level {
+ AMD_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
+ AMD_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
+ AMD_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+ AMD_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_d.h
new file mode 100644
index 000000000000..7138fbf7256a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_d.h
@@ -0,0 +1,661 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef BIF_3_0_D_H
+#define BIF_3_0_D_H
+
+#define ixPB0_DFT_DEBUG_CTRL_REG0 0x1300C
+#define ixPB0_DFT_JIT_INJ_REG0 0x13000
+#define ixPB0_DFT_JIT_INJ_REG1 0x13004
+#define ixPB0_DFT_JIT_INJ_REG2 0x13008
+#define ixPB0_GLB_CTRL_REG0 0x10004
+#define ixPB0_GLB_CTRL_REG1 0x10008
+#define ixPB0_GLB_CTRL_REG2 0x1000C
+#define ixPB0_GLB_CTRL_REG3 0x10010
+#define ixPB0_GLB_CTRL_REG4 0x10014
+#define ixPB0_GLB_CTRL_REG5 0x10018
+#define ixPB0_GLB_OVRD_REG0 0x10030
+#define ixPB0_GLB_OVRD_REG1 0x10034
+#define ixPB0_GLB_OVRD_REG2 0x10038
+#define ixPB0_GLB_SCI_STAT_OVRD_REG0 0x1001C
+#define ixPB0_GLB_SCI_STAT_OVRD_REG1 0x10020
+#define ixPB0_GLB_SCI_STAT_OVRD_REG2 0x10024
+#define ixPB0_GLB_SCI_STAT_OVRD_REG3 0x10028
+#define ixPB0_GLB_SCI_STAT_OVRD_REG4 0x1002C
+#define ixPB0_HW_DEBUG 0x12004
+#define ixPB0_PIF_CNTL 0x0010
+#define ixPB0_PIF_CNTL2 0x0014
+#define ixPB0_PIF_HW_DEBUG 0x0002
+#define ixPB0_PIF_PAIRING 0x0011
+#define ixPB0_PIF_PDNB_OVERRIDE_0 0x0020
+#define ixPB0_PIF_PDNB_OVERRIDE_10 0x0032
+#define ixPB0_PIF_PDNB_OVERRIDE_1 0x0021
+#define ixPB0_PIF_PDNB_OVERRIDE_11 0x0033
+#define ixPB0_PIF_PDNB_OVERRIDE_12 0x0034
+#define ixPB0_PIF_PDNB_OVERRIDE_13 0x0035
+#define ixPB0_PIF_PDNB_OVERRIDE_14 0x0036
+#define ixPB0_PIF_PDNB_OVERRIDE_15 0x0037
+#define ixPB0_PIF_PDNB_OVERRIDE_2 0x0022
+#define ixPB0_PIF_PDNB_OVERRIDE_3 0x0023
+#define ixPB0_PIF_PDNB_OVERRIDE_4 0x0024
+#define ixPB0_PIF_PDNB_OVERRIDE_5 0x0025
+#define ixPB0_PIF_PDNB_OVERRIDE_6 0x0026
+#define ixPB0_PIF_PDNB_OVERRIDE_7 0x0027
+#define ixPB0_PIF_PDNB_OVERRIDE_8 0x0030
+#define ixPB0_PIF_PDNB_OVERRIDE_9 0x0031
+#define ixPB0_PIF_PWRDOWN_0 0x0012
+#define ixPB0_PIF_PWRDOWN_1 0x0013
+#define ixPB0_PIF_PWRDOWN_2 0x0017
+#define ixPB0_PIF_PWRDOWN_3 0x0018
+#define ixPB0_PIF_SC_CTL 0x0016
+#define ixPB0_PIF_SCRATCH 0x0001
+#define ixPB0_PIF_SEQ_STATUS_0 0x0028
+#define ixPB0_PIF_SEQ_STATUS_10 0x003A
+#define ixPB0_PIF_SEQ_STATUS_1 0x0029
+#define ixPB0_PIF_SEQ_STATUS_11 0x003B
+#define ixPB0_PIF_SEQ_STATUS_12 0x003C
+#define ixPB0_PIF_SEQ_STATUS_13 0x003D
+#define ixPB0_PIF_SEQ_STATUS_14 0x003E
+#define ixPB0_PIF_SEQ_STATUS_15 0x003F
+#define ixPB0_PIF_SEQ_STATUS_2 0x002A
+#define ixPB0_PIF_SEQ_STATUS_3 0x002B
+#define ixPB0_PIF_SEQ_STATUS_4 0x002C
+#define ixPB0_PIF_SEQ_STATUS_5 0x002D
+#define ixPB0_PIF_SEQ_STATUS_6 0x002E
+#define ixPB0_PIF_SEQ_STATUS_7 0x002F
+#define ixPB0_PIF_SEQ_STATUS_8 0x0038
+#define ixPB0_PIF_SEQ_STATUS_9 0x0039
+#define ixPB0_PIF_TXPHYSTATUS 0x0015
+#define ixPB0_PLL_LC0_CTRL_REG0 0x14480
+#define ixPB0_PLL_LC0_OVRD_REG0 0x14490
+#define ixPB0_PLL_LC0_OVRD_REG1 0x14494
+#define ixPB0_PLL_LC0_SCI_STAT_OVRD_REG0 0x14500
+#define ixPB0_PLL_LC1_SCI_STAT_OVRD_REG0 0x14504
+#define ixPB0_PLL_LC2_SCI_STAT_OVRD_REG0 0x14508
+#define ixPB0_PLL_LC3_SCI_STAT_OVRD_REG0 0x1450C
+#define ixPB0_PLL_RO0_CTRL_REG0 0x14440
+#define ixPB0_PLL_RO0_OVRD_REG0 0x14450
+#define ixPB0_PLL_RO0_OVRD_REG1 0x14454
+#define ixPB0_PLL_RO0_SCI_STAT_OVRD_REG0 0x14460
+#define ixPB0_PLL_RO1_SCI_STAT_OVRD_REG0 0x14464
+#define ixPB0_PLL_RO2_SCI_STAT_OVRD_REG0 0x14468
+#define ixPB0_PLL_RO3_SCI_STAT_OVRD_REG0 0x1446C
+#define ixPB0_PLL_RO_GLB_CTRL_REG0 0x14000
+#define ixPB0_PLL_RO_GLB_OVRD_REG0 0x14010
+#define ixPB0_RX_GLB_CTRL_REG0 0x16000
+#define ixPB0_RX_GLB_CTRL_REG1 0x16004
+#define ixPB0_RX_GLB_CTRL_REG2 0x16008
+#define ixPB0_RX_GLB_CTRL_REG3 0x1600C
+#define ixPB0_RX_GLB_CTRL_REG4 0x16010
+#define ixPB0_RX_GLB_CTRL_REG5 0x16014
+#define ixPB0_RX_GLB_CTRL_REG6 0x16018
+#define ixPB0_RX_GLB_CTRL_REG7 0x1601C
+#define ixPB0_RX_GLB_CTRL_REG8 0x16020
+#define ixPB0_RX_GLB_OVRD_REG0 0x16030
+#define ixPB0_RX_GLB_OVRD_REG1 0x16034
+#define ixPB0_RX_GLB_SCI_STAT_OVRD_REG0 0x16028
+#define ixPB0_RX_LANE0_CTRL_REG0 0x16440
+#define ixPB0_RX_LANE0_SCI_STAT_OVRD_REG0 0x16448
+#define ixPB0_RX_LANE10_CTRL_REG0 0x17500
+#define ixPB0_RX_LANE10_SCI_STAT_OVRD_REG0 0x17508
+#define ixPB0_RX_LANE11_CTRL_REG0 0x17600
+#define ixPB0_RX_LANE11_SCI_STAT_OVRD_REG0 0x17608
+#define ixPB0_RX_LANE12_CTRL_REG0 0x17840
+#define ixPB0_RX_LANE12_SCI_STAT_OVRD_REG0 0x17848
+#define ixPB0_RX_LANE13_CTRL_REG0 0x17880
+#define ixPB0_RX_LANE13_SCI_STAT_OVRD_REG0 0x17888
+#define ixPB0_RX_LANE14_CTRL_REG0 0x17900
+#define ixPB0_RX_LANE14_SCI_STAT_OVRD_REG0 0x17908
+#define ixPB0_RX_LANE15_CTRL_REG0 0x17A00
+#define ixPB0_RX_LANE15_SCI_STAT_OVRD_REG0 0x17A08
+#define ixPB0_RX_LANE1_CTRL_REG0 0x16480
+#define ixPB0_RX_LANE1_SCI_STAT_OVRD_REG0 0x16488
+#define ixPB0_RX_LANE2_CTRL_REG0 0x16500
+#define ixPB0_RX_LANE2_SCI_STAT_OVRD_REG0 0x16508
+#define ixPB0_RX_LANE3_CTRL_REG0 0x16600
+#define ixPB0_RX_LANE3_SCI_STAT_OVRD_REG0 0x16608
+#define ixPB0_RX_LANE4_CTRL_REG0 0x16800
+#define ixPB0_RX_LANE4_SCI_STAT_OVRD_REG0 0x16848
+#define ixPB0_RX_LANE5_CTRL_REG0 0x16880
+#define ixPB0_RX_LANE5_SCI_STAT_OVRD_REG0 0x16888
+#define ixPB0_RX_LANE6_CTRL_REG0 0x16900
+#define ixPB0_RX_LANE6_SCI_STAT_OVRD_REG0 0x16908
+#define ixPB0_RX_LANE7_CTRL_REG0 0x16A00
+#define ixPB0_RX_LANE7_SCI_STAT_OVRD_REG0 0x16A08
+#define ixPB0_RX_LANE8_CTRL_REG0 0x17440
+#define ixPB0_RX_LANE8_SCI_STAT_OVRD_REG0 0x17448
+#define ixPB0_RX_LANE9_CTRL_REG0 0x17480
+#define ixPB0_RX_LANE9_SCI_STAT_OVRD_REG0 0x17488
+#define ixPB0_STRAP_GLB_REG0 0x12020
+#define ixPB0_STRAP_PLL_REG0 0x12030
+#define ixPB0_STRAP_RX_REG0 0x12028
+#define ixPB0_STRAP_RX_REG1 0x1202C
+#define ixPB0_STRAP_TX_REG0 0x12024
+#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0 0x18014
+#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1 0x18018
+#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2 0x1801C
+#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3 0x18020
+#define ixPB0_TX_GLB_CTRL_REG0 0x18000
+#define ixPB0_TX_GLB_LANE_SKEW_CTRL 0x18004
+#define ixPB0_TX_GLB_OVRD_REG0 0x18030
+#define ixPB0_TX_GLB_OVRD_REG1 0x18034
+#define ixPB0_TX_GLB_OVRD_REG2 0x18038
+#define ixPB0_TX_GLB_OVRD_REG3 0x1803C
+#define ixPB0_TX_GLB_OVRD_REG4 0x18040
+#define ixPB0_TX_GLB_SCI_STAT_OVRD_REG0 0x18010
+#define ixPB0_TX_LANE0_CTRL_REG0 0x18440
+#define ixPB0_TX_LANE0_OVRD_REG0 0x18444
+#define ixPB0_TX_LANE0_SCI_STAT_OVRD_REG0 0x18448
+#define ixPB0_TX_LANE10_CTRL_REG0 0x19500
+#define ixPB0_TX_LANE10_OVRD_REG0 0x19504
+#define ixPB0_TX_LANE10_SCI_STAT_OVRD_REG0 0x19508
+#define ixPB0_TX_LANE11_CTRL_REG0 0x19600
+#define ixPB0_TX_LANE11_OVRD_REG0 0x19604
+#define ixPB0_TX_LANE11_SCI_STAT_OVRD_REG0 0x19608
+#define ixPB0_TX_LANE12_CTRL_REG0 0x19840
+#define ixPB0_TX_LANE12_OVRD_REG0 0x19844
+#define ixPB0_TX_LANE12_SCI_STAT_OVRD_REG0 0x19848
+#define ixPB0_TX_LANE13_CTRL_REG0 0x19880
+#define ixPB0_TX_LANE13_OVRD_REG0 0x19884
+#define ixPB0_TX_LANE13_SCI_STAT_OVRD_REG0 0x19888
+#define ixPB0_TX_LANE14_CTRL_REG0 0x19900
+#define ixPB0_TX_LANE14_OVRD_REG0 0x19904
+#define ixPB0_TX_LANE14_SCI_STAT_OVRD_REG0 0x19908
+#define ixPB0_TX_LANE15_CTRL_REG0 0x19A00
+#define ixPB0_TX_LANE15_OVRD_REG0 0x19A04
+#define ixPB0_TX_LANE15_SCI_STAT_OVRD_REG0 0x19A08
+#define ixPB0_TX_LANE1_CTRL_REG0 0x18480
+#define ixPB0_TX_LANE1_OVRD_REG0 0x18484
+#define ixPB0_TX_LANE1_SCI_STAT_OVRD_REG0 0x18488
+#define ixPB0_TX_LANE2_CTRL_REG0 0x18500
+#define ixPB0_TX_LANE2_OVRD_REG0 0x18504
+#define ixPB0_TX_LANE2_SCI_STAT_OVRD_REG0 0x18508
+#define ixPB0_TX_LANE3_CTRL_REG0 0x18600
+#define ixPB0_TX_LANE3_OVRD_REG0 0x18604
+#define ixPB0_TX_LANE3_SCI_STAT_OVRD_REG0 0x18608
+#define ixPB0_TX_LANE4_CTRL_REG0 0x18840
+#define ixPB0_TX_LANE4_OVRD_REG0 0x18844
+#define ixPB0_TX_LANE4_SCI_STAT_OVRD_REG0 0x18848
+#define ixPB0_TX_LANE5_CTRL_REG0 0x18880
+#define ixPB0_TX_LANE5_OVRD_REG0 0x18884
+#define ixPB0_TX_LANE5_SCI_STAT_OVRD_REG0 0x18888
+#define ixPB0_TX_LANE6_CTRL_REG0 0x18900
+#define ixPB0_TX_LANE6_OVRD_REG0 0x18904
+#define ixPB0_TX_LANE6_SCI_STAT_OVRD_REG0 0x18908
+#define ixPB0_TX_LANE7_CTRL_REG0 0x18A00
+#define ixPB0_TX_LANE7_OVRD_REG0 0x18A04
+#define ixPB0_TX_LANE7_SCI_STAT_OVRD_REG0 0x18A08
+#define ixPB0_TX_LANE8_CTRL_REG0 0x19440
+#define ixPB0_TX_LANE8_OVRD_REG0 0x19444
+#define ixPB0_TX_LANE8_SCI_STAT_OVRD_REG0 0x19448
+#define ixPB0_TX_LANE9_CTRL_REG0 0x19480
+#define ixPB0_TX_LANE9_OVRD_REG0 0x19484
+#define ixPB0_TX_LANE9_SCI_STAT_OVRD_REG0 0x19488
+#define ixPB1_DFT_DEBUG_CTRL_REG0 0x1300C
+#define ixPB1_DFT_JIT_INJ_REG0 0x13000
+#define ixPB1_DFT_JIT_INJ_REG1 0x13004
+#define ixPB1_DFT_JIT_INJ_REG2 0x13008
+#define ixPB1_GLB_CTRL_REG0 0x10004
+#define ixPB1_GLB_CTRL_REG1 0x10008
+#define ixPB1_GLB_CTRL_REG2 0x1000C
+#define ixPB1_GLB_CTRL_REG3 0x10010
+#define ixPB1_GLB_CTRL_REG4 0x10014
+#define ixPB1_GLB_CTRL_REG5 0x10018
+#define ixPB1_GLB_OVRD_REG0 0x10030
+#define ixPB1_GLB_OVRD_REG1 0x10034
+#define ixPB1_GLB_OVRD_REG2 0x10038
+#define ixPB1_GLB_SCI_STAT_OVRD_REG0 0x1001C
+#define ixPB1_GLB_SCI_STAT_OVRD_REG1 0x10020
+#define ixPB1_GLB_SCI_STAT_OVRD_REG2 0x10024
+#define ixPB1_GLB_SCI_STAT_OVRD_REG3 0x10028
+#define ixPB1_GLB_SCI_STAT_OVRD_REG4 0x1002C
+#define ixPB1_HW_DEBUG 0x12004
+#define ixPB1_PIF_CNTL 0x0010
+#define ixPB1_PIF_CNTL2 0x0014
+#define ixPB1_PIF_HW_DEBUG 0x0002
+#define ixPB1_PIF_PAIRING 0x0011
+#define ixPB1_PIF_PDNB_OVERRIDE_0 0x0020
+#define ixPB1_PIF_PDNB_OVERRIDE_10 0x0032
+#define ixPB1_PIF_PDNB_OVERRIDE_1 0x0021
+#define ixPB1_PIF_PDNB_OVERRIDE_11 0x0033
+#define ixPB1_PIF_PDNB_OVERRIDE_12 0x0034
+#define ixPB1_PIF_PDNB_OVERRIDE_13 0x0035
+#define ixPB1_PIF_PDNB_OVERRIDE_14 0x0036
+#define ixPB1_PIF_PDNB_OVERRIDE_15 0x0037
+#define ixPB1_PIF_PDNB_OVERRIDE_2 0x0022
+#define ixPB1_PIF_PDNB_OVERRIDE_3 0x0023
+#define ixPB1_PIF_PDNB_OVERRIDE_4 0x0024
+#define ixPB1_PIF_PDNB_OVERRIDE_5 0x0025
+#define ixPB1_PIF_PDNB_OVERRIDE_6 0x0026
+#define ixPB1_PIF_PDNB_OVERRIDE_7 0x0027
+#define ixPB1_PIF_PDNB_OVERRIDE_8 0x0030
+#define ixPB1_PIF_PDNB_OVERRIDE_9 0x0031
+#define ixPB1_PIF_PWRDOWN_0 0x0012
+#define ixPB1_PIF_PWRDOWN_1 0x0013
+#define ixPB1_PIF_PWRDOWN_2 0x0017
+#define ixPB1_PIF_PWRDOWN_3 0x0018
+#define ixPB1_PIF_SC_CTL 0x0016
+#define ixPB1_PIF_SCRATCH 0x0001
+#define ixPB1_PIF_SEQ_STATUS_0 0x0028
+#define ixPB1_PIF_SEQ_STATUS_10 0x003A
+#define ixPB1_PIF_SEQ_STATUS_1 0x0029
+#define ixPB1_PIF_SEQ_STATUS_11 0x003B
+#define ixPB1_PIF_SEQ_STATUS_12 0x003C
+#define ixPB1_PIF_SEQ_STATUS_13 0x003D
+#define ixPB1_PIF_SEQ_STATUS_14 0x003E
+#define ixPB1_PIF_SEQ_STATUS_15 0x003F
+#define ixPB1_PIF_SEQ_STATUS_2 0x002A
+#define ixPB1_PIF_SEQ_STATUS_3 0x002B
+#define ixPB1_PIF_SEQ_STATUS_4 0x002C
+#define ixPB1_PIF_SEQ_STATUS_5 0x002D
+#define ixPB1_PIF_SEQ_STATUS_6 0x002E
+#define ixPB1_PIF_SEQ_STATUS_7 0x002F
+#define ixPB1_PIF_SEQ_STATUS_8 0x0038
+#define ixPB1_PIF_SEQ_STATUS_9 0x0039
+#define ixPB1_PIF_TXPHYSTATUS 0x0015
+#define ixPB1_PLL_LC0_CTRL_REG0 0x14480
+#define ixPB1_PLL_LC0_OVRD_REG0 0x14490
+#define ixPB1_PLL_LC0_OVRD_REG1 0x14494
+#define ixPB1_PLL_LC0_SCI_STAT_OVRD_REG0 0x14500
+#define ixPB1_PLL_LC1_SCI_STAT_OVRD_REG0 0x14504
+#define ixPB1_PLL_LC2_SCI_STAT_OVRD_REG0 0x14508
+#define ixPB1_PLL_LC3_SCI_STAT_OVRD_REG0 0x1450C
+#define ixPB1_PLL_RO0_CTRL_REG0 0x14440
+#define ixPB1_PLL_RO0_OVRD_REG0 0x14450
+#define ixPB1_PLL_RO0_OVRD_REG1 0x14454
+#define ixPB1_PLL_RO0_SCI_STAT_OVRD_REG0 0x14460
+#define ixPB1_PLL_RO1_SCI_STAT_OVRD_REG0 0x14464
+#define ixPB1_PLL_RO2_SCI_STAT_OVRD_REG0 0x14468
+#define ixPB1_PLL_RO3_SCI_STAT_OVRD_REG0 0x1446C
+#define ixPB1_PLL_RO_GLB_CTRL_REG0 0x14000
+#define ixPB1_PLL_RO_GLB_OVRD_REG0 0x14010
+#define ixPB1_RX_GLB_CTRL_REG0 0x16000
+#define ixPB1_RX_GLB_CTRL_REG1 0x16004
+#define ixPB1_RX_GLB_CTRL_REG2 0x16008
+#define ixPB1_RX_GLB_CTRL_REG3 0x1600C
+#define ixPB1_RX_GLB_CTRL_REG4 0x16010
+#define ixPB1_RX_GLB_CTRL_REG5 0x16014
+#define ixPB1_RX_GLB_CTRL_REG6 0x16018
+#define ixPB1_RX_GLB_CTRL_REG7 0x1601C
+#define ixPB1_RX_GLB_CTRL_REG8 0x16020
+#define ixPB1_RX_GLB_OVRD_REG0 0x16030
+#define ixPB1_RX_GLB_OVRD_REG1 0x16034
+#define ixPB1_RX_GLB_SCI_STAT_OVRD_REG0 0x16028
+#define ixPB1_RX_LANE0_CTRL_REG0 0x16440
+#define ixPB1_RX_LANE0_SCI_STAT_OVRD_REG0 0x16448
+#define ixPB1_RX_LANE10_CTRL_REG0 0x17500
+#define ixPB1_RX_LANE10_SCI_STAT_OVRD_REG0 0x17508
+#define ixPB1_RX_LANE11_CTRL_REG0 0x17600
+#define ixPB1_RX_LANE11_SCI_STAT_OVRD_REG0 0x17608
+#define ixPB1_RX_LANE12_CTRL_REG0 0x17840
+#define ixPB1_RX_LANE12_SCI_STAT_OVRD_REG0 0x17848
+#define ixPB1_RX_LANE13_CTRL_REG0 0x17880
+#define ixPB1_RX_LANE13_SCI_STAT_OVRD_REG0 0x17888
+#define ixPB1_RX_LANE14_CTRL_REG0 0x17900
+#define ixPB1_RX_LANE14_SCI_STAT_OVRD_REG0 0x17908
+#define ixPB1_RX_LANE15_CTRL_REG0 0x17A00
+#define ixPB1_RX_LANE15_SCI_STAT_OVRD_REG0 0x17A08
+#define ixPB1_RX_LANE1_CTRL_REG0 0x16480
+#define ixPB1_RX_LANE1_SCI_STAT_OVRD_REG0 0x16488
+#define ixPB1_RX_LANE2_CTRL_REG0 0x16500
+#define ixPB1_RX_LANE2_SCI_STAT_OVRD_REG0 0x16508
+#define ixPB1_RX_LANE3_CTRL_REG0 0x16600
+#define ixPB1_RX_LANE3_SCI_STAT_OVRD_REG0 0x16608
+#define ixPB1_RX_LANE4_CTRL_REG0 0x16800
+#define ixPB1_RX_LANE4_SCI_STAT_OVRD_REG0 0x16848
+#define ixPB1_RX_LANE5_CTRL_REG0 0x16880
+#define ixPB1_RX_LANE5_SCI_STAT_OVRD_REG0 0x16888
+#define ixPB1_RX_LANE6_CTRL_REG0 0x16900
+#define ixPB1_RX_LANE6_SCI_STAT_OVRD_REG0 0x16908
+#define ixPB1_RX_LANE7_CTRL_REG0 0x16A00
+#define ixPB1_RX_LANE7_SCI_STAT_OVRD_REG0 0x16A08
+#define ixPB1_RX_LANE8_CTRL_REG0 0x17440
+#define ixPB1_RX_LANE8_SCI_STAT_OVRD_REG0 0x17448
+#define ixPB1_RX_LANE9_CTRL_REG0 0x17480
+#define ixPB1_RX_LANE9_SCI_STAT_OVRD_REG0 0x17488
+#define ixPB1_STRAP_GLB_REG0 0x12020
+#define ixPB1_STRAP_PLL_REG0 0x12030
+#define ixPB1_STRAP_RX_REG0 0x12028
+#define ixPB1_STRAP_RX_REG1 0x1202C
+#define ixPB1_STRAP_TX_REG0 0x12024
+#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0 0x18014
+#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1 0x18018
+#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2 0x1801C
+#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3 0x18020
+#define ixPB1_TX_GLB_CTRL_REG0 0x18000
+#define ixPB1_TX_GLB_LANE_SKEW_CTRL 0x18004
+#define ixPB1_TX_GLB_OVRD_REG0 0x18030
+#define ixPB1_TX_GLB_OVRD_REG1 0x18034
+#define ixPB1_TX_GLB_OVRD_REG2 0x18038
+#define ixPB1_TX_GLB_OVRD_REG3 0x1803C
+#define ixPB1_TX_GLB_OVRD_REG4 0x18040
+#define ixPB1_TX_GLB_SCI_STAT_OVRD_REG0 0x18010
+#define ixPB1_TX_LANE0_CTRL_REG0 0x18440
+#define ixPB1_TX_LANE0_OVRD_REG0 0x18444
+#define ixPB1_TX_LANE0_SCI_STAT_OVRD_REG0 0x18448
+#define ixPB1_TX_LANE10_CTRL_REG0 0x19500
+#define ixPB1_TX_LANE10_OVRD_REG0 0x19504
+#define ixPB1_TX_LANE10_SCI_STAT_OVRD_REG0 0x19508
+#define ixPB1_TX_LANE11_CTRL_REG0 0x19600
+#define ixPB1_TX_LANE11_OVRD_REG0 0x19604
+#define ixPB1_TX_LANE11_SCI_STAT_OVRD_REG0 0x19608
+#define ixPB1_TX_LANE12_CTRL_REG0 0x19840
+#define ixPB1_TX_LANE12_OVRD_REG0 0x19844
+#define ixPB1_TX_LANE12_SCI_STAT_OVRD_REG0 0x19848
+#define ixPB1_TX_LANE13_CTRL_REG0 0x19880
+#define ixPB1_TX_LANE13_OVRD_REG0 0x19884
+#define ixPB1_TX_LANE13_SCI_STAT_OVRD_REG0 0x19888
+#define ixPB1_TX_LANE14_CTRL_REG0 0x19900
+#define ixPB1_TX_LANE14_OVRD_REG0 0x19904
+#define ixPB1_TX_LANE14_SCI_STAT_OVRD_REG0 0x19908
+#define ixPB1_TX_LANE15_CTRL_REG0 0x19A00
+#define ixPB1_TX_LANE15_OVRD_REG0 0x19A04
+#define ixPB1_TX_LANE15_SCI_STAT_OVRD_REG0 0x19A08
+#define ixPB1_TX_LANE1_CTRL_REG0 0x18480
+#define ixPB1_TX_LANE1_OVRD_REG0 0x18484
+#define ixPB1_TX_LANE1_SCI_STAT_OVRD_REG0 0x18488
+#define ixPB1_TX_LANE2_CTRL_REG0 0x18500
+#define ixPB1_TX_LANE2_OVRD_REG0 0x18504
+#define ixPB1_TX_LANE2_SCI_STAT_OVRD_REG0 0x18508
+#define ixPB1_TX_LANE3_CTRL_REG0 0x18600
+#define ixPB1_TX_LANE3_OVRD_REG0 0x18604
+#define ixPB1_TX_LANE3_SCI_STAT_OVRD_REG0 0x18608
+#define ixPB1_TX_LANE4_CTRL_REG0 0x18840
+#define ixPB1_TX_LANE4_OVRD_REG0 0x18844
+#define ixPB1_TX_LANE4_SCI_STAT_OVRD_REG0 0x18848
+#define ixPB1_TX_LANE5_CTRL_REG0 0x18880
+#define ixPB1_TX_LANE5_OVRD_REG0 0x18884
+#define ixPB1_TX_LANE5_SCI_STAT_OVRD_REG0 0x18888
+#define ixPB1_TX_LANE6_CTRL_REG0 0x18900
+#define ixPB1_TX_LANE6_OVRD_REG0 0x18904
+#define ixPB1_TX_LANE6_SCI_STAT_OVRD_REG0 0x18908
+#define ixPB1_TX_LANE7_CTRL_REG0 0x18A00
+#define ixPB1_TX_LANE7_OVRD_REG0 0x18A04
+#define ixPB1_TX_LANE7_SCI_STAT_OVRD_REG0 0x18A08
+#define ixPB1_TX_LANE8_CTRL_REG0 0x19440
+#define ixPB1_TX_LANE8_OVRD_REG0 0x19444
+#define ixPB1_TX_LANE8_SCI_STAT_OVRD_REG0 0x19448
+#define ixPB1_TX_LANE9_CTRL_REG0 0x19480
+#define ixPB1_TX_LANE9_OVRD_REG0 0x19484
+#define ixPB1_TX_LANE9_SCI_STAT_OVRD_REG0 0x19488
+#define ixPCIE_BUS_CNTL 0x0021
+#define ixPCIE_CFG_CNTL 0x003C
+#define ixPCIE_CI_CNTL 0x0020
+#define ixPCIE_CNTL 0x0010
+#define ixPCIE_CNTL2 0x001C
+#define ixPCIE_CONFIG_CNTL 0x0011
+#define ixPCIE_DEBUG_CNTL 0x0012
+#define ixPCIE_ERR_CNTL 0x006A
+#define ixPCIE_F0_DPA_CAP 0x00E0
+#define ixPCIE_F0_DPA_CNTL 0x00E5
+#define ixPCIE_F0_DPA_LATENCY_INDICATOR 0x00E4
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0x00E7
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0x00E8
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0x00E9
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0x00EA
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0x00EB
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0x00EC
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0x00ED
+#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0x00EE
+#define ixPCIE_FC_CPL 0x0062
+#define ixPCIE_FC_NP 0x0061
+#define ixPCIE_FC_P 0x0060
+#define ixPCIE_HW_DEBUG 0x0002
+#define ixPCIE_I2C_REG_ADDR_EXPAND 0x003A
+#define ixPCIE_I2C_REG_DATA 0x003B
+#define ixPCIE_INT_CNTL 0x001A
+#define ixPCIE_INT_STATUS 0x001B
+#define ixPCIE_LC_BEST_EQ_SETTINGS 0x00B9
+#define ixPCIE_LC_BW_CHANGE_CNTL 0x00B2
+#define ixPCIE_LC_CDR_CNTL 0x00B3
+#define ixPCIE_LC_CNTL 0x00A0
+#define ixPCIE_LC_CNTL2 0x00B1
+#define ixPCIE_LC_CNTL3 0x00B5
+#define ixPCIE_LC_CNTL4 0x00B6
+#define ixPCIE_LC_CNTL5 0x00B7
+#define ixPCIE_LC_FORCE_COEFF 0x00B8
+#define ixPCIE_LC_FORCE_EQ_REQ_COEFF 0x00BA
+#define ixPCIE_LC_LANE_CNTL 0x00B4
+#define ixPCIE_LC_LINK_WIDTH_CNTL 0x00A2
+#define ixPCIE_LC_N_FTS_CNTL 0x00A3
+#define ixPCIE_LC_SPEED_CNTL 0x00A4
+#define ixPCIE_LC_STATE0 0x00A5
+#define ixPCIE_LC_STATE10 0x0026
+#define ixPCIE_LC_STATE1 0x00A6
+#define ixPCIE_LC_STATE11 0x0027
+#define ixPCIE_LC_STATE2 0x00A7
+#define ixPCIE_LC_STATE3 0x00A8
+#define ixPCIE_LC_STATE4 0x00A9
+#define ixPCIE_LC_STATE5 0x00AA
+#define ixPCIE_LC_STATE6 0x0022
+#define ixPCIE_LC_STATE7 0x0023
+#define ixPCIE_LC_STATE8 0x0024
+#define ixPCIE_LC_STATE9 0x0025
+#define ixPCIE_LC_STATUS1 0x0028
+#define ixPCIE_LC_STATUS2 0x0029
+#define ixPCIE_LC_TRAINING_CNTL 0x00A1
+#define ixPCIE_P_BUF_STATUS 0x0041
+#define ixPCIE_P_CNTL 0x0040
+#define ixPCIE_P_DECODER_STATUS 0x0042
+#define ixPCIE_PERF_CNTL_EVENT0_PORT_SEL 0x0093
+#define ixPCIE_PERF_CNTL_EVENT1_PORT_SEL 0x0094
+#define ixPCIE_PERF_CNTL_MST_C_CLK 0x0087
+#define ixPCIE_PERF_CNTL_MST_R_CLK 0x0084
+#define ixPCIE_PERF_CNTL_SLV_NS_C_CLK 0x0090
+#define ixPCIE_PERF_CNTL_SLV_R_CLK 0x008A
+#define ixPCIE_PERF_CNTL_SLV_S_C_CLK 0x008D
+#define ixPCIE_PERF_CNTL_TXCLK 0x0081
+#define ixPCIE_PERF_CNTL_TXCLK2 0x0095
+#define ixPCIE_PERF_COUNT0_MST_C_CLK 0x0088
+#define ixPCIE_PERF_COUNT0_MST_R_CLK 0x0085
+#define ixPCIE_PERF_COUNT0_SLV_NS_C_CLK 0x0091
+#define ixPCIE_PERF_COUNT0_SLV_R_CLK 0x008B
+#define ixPCIE_PERF_COUNT0_SLV_S_C_CLK 0x008E
+#define ixPCIE_PERF_COUNT0_TXCLK 0x0082
+#define ixPCIE_PERF_COUNT0_TXCLK2 0x0096
+#define ixPCIE_PERF_COUNT1_MST_C_CLK 0x0089
+#define ixPCIE_PERF_COUNT1_MST_R_CLK 0x0086
+#define ixPCIE_PERF_COUNT1_SLV_NS_C_CLK 0x0092
+#define ixPCIE_PERF_COUNT1_SLV_R_CLK 0x008C
+#define ixPCIE_PERF_COUNT1_SLV_S_C_CLK 0x008F
+#define ixPCIE_PERF_COUNT1_TXCLK 0x0083
+#define ixPCIE_PERF_COUNT1_TXCLK2 0x0097
+#define ixPCIE_PERF_COUNT_CNTL 0x0080
+#define ixPCIEP_HW_DEBUG 0x0002
+#define ixPCIE_P_MISC_STATUS 0x0043
+#define ixPCIEP_PORT_CNTL 0x0010
+#define ixPCIE_P_PORT_LANE_STATUS 0x0050
+#define ixPCIE_PRBS_CLR 0x00C8
+#define ixPCIE_PRBS_ERRCNT_0 0x00D0
+#define ixPCIE_PRBS_ERRCNT_10 0x00DA
+#define ixPCIE_PRBS_ERRCNT_1 0x00D1
+#define ixPCIE_PRBS_ERRCNT_11 0x00DB
+#define ixPCIE_PRBS_ERRCNT_12 0x00DC
+#define ixPCIE_PRBS_ERRCNT_13 0x00DD
+#define ixPCIE_PRBS_ERRCNT_14 0x00DE
+#define ixPCIE_PRBS_ERRCNT_15 0x00DF
+#define ixPCIE_PRBS_ERRCNT_2 0x00D2
+#define ixPCIE_PRBS_ERRCNT_3 0x00D3
+#define ixPCIE_PRBS_ERRCNT_4 0x00D4
+#define ixPCIE_PRBS_ERRCNT_5 0x00D5
+#define ixPCIE_PRBS_ERRCNT_6 0x00D6
+#define ixPCIE_PRBS_ERRCNT_7 0x00D7
+#define ixPCIE_PRBS_ERRCNT_8 0x00D8
+#define ixPCIE_PRBS_ERRCNT_9 0x00D9
+#define ixPCIE_PRBS_FREERUN 0x00CB
+#define ixPCIE_PRBS_HI_BITCNT 0x00CF
+#define ixPCIE_PRBS_LO_BITCNT 0x00CE
+#define ixPCIE_PRBS_MISC 0x00CC
+#define ixPCIE_PRBS_STATUS1 0x00C9
+#define ixPCIE_PRBS_STATUS2 0x00CA
+#define ixPCIE_PRBS_USER_PATTERN 0x00CD
+#define ixPCIE_P_RCV_L0S_FTS_DET 0x0050
+#define ixPCIEP_RESERVED 0x0000
+#define ixPCIEP_SCRATCH 0x0001
+#define ixPCIEP_STRAP_LC 0x00C0
+#define ixPCIEP_STRAP_MISC 0x00C1
+#define ixPCIE_RESERVED 0x0000
+#define ixPCIE_RX_CNTL 0x0070
+#define ixPCIE_RX_CNTL2 0x001D
+#define ixPCIE_RX_CNTL3 0x0074
+#define ixPCIE_RX_CREDITS_ALLOCATED_CPL 0x0082
+#define ixPCIE_RX_CREDITS_ALLOCATED_NP 0x0081
+#define ixPCIE_RX_CREDITS_ALLOCATED_P 0x0080
+#define ixPCIE_RX_EXPECTED_SEQNUM 0x0071
+#define ixPCIE_RX_LAST_TLP0 0x0031
+#define ixPCIE_RX_LAST_TLP1 0x0032
+#define ixPCIE_RX_LAST_TLP2 0x0033
+#define ixPCIE_RX_LAST_TLP3 0x0034
+#define ixPCIE_RX_NUM_NAK 0x000E
+#define ixPCIE_RX_NUM_NAK_GENERATED 0x000F
+#define ixPCIE_RX_VENDOR_SPECIFIC 0x0072
+#define ixPCIE_SCRATCH 0x0001
+#define ixPCIE_STRAP_F0 0x00B0
+#define ixPCIE_STRAP_F1 0x00B1
+#define ixPCIE_STRAP_F2 0x00B2
+#define ixPCIE_STRAP_F3 0x00B3
+#define ixPCIE_STRAP_F4 0x00B4
+#define ixPCIE_STRAP_F5 0x00B5
+#define ixPCIE_STRAP_F6 0x00B6
+#define ixPCIE_STRAP_F7 0x00B7
+#define ixPCIE_STRAP_I2C_BD 0x00C4
+#define ixPCIE_STRAP_MISC 0x00C0
+#define ixPCIE_STRAP_MISC2 0x00C1
+#define ixPCIE_STRAP_PI 0x00C2
+#define ixPCIE_TX_ACK_LATENCY_LIMIT 0x0026
+#define ixPCIE_TX_CNTL 0x0020
+#define ixPCIE_TX_CREDITS_ADVT_CPL 0x0032
+#define ixPCIE_TX_CREDITS_ADVT_NP 0x0031
+#define ixPCIE_TX_CREDITS_ADVT_P 0x0030
+#define ixPCIE_TX_CREDITS_FCU_THRESHOLD 0x0037
+#define ixPCIE_TX_CREDITS_INIT_CPL 0x0035
+#define ixPCIE_TX_CREDITS_INIT_NP 0x0034
+#define ixPCIE_TX_CREDITS_INIT_P 0x0033
+#define ixPCIE_TX_CREDITS_STATUS 0x0036
+#define ixPCIE_TX_LAST_TLP0 0x0035
+#define ixPCIE_TX_LAST_TLP1 0x0036
+#define ixPCIE_TX_LAST_TLP2 0x0037
+#define ixPCIE_TX_LAST_TLP3 0x0038
+#define ixPCIE_TX_REPLAY 0x0025
+#define ixPCIE_TX_REQUESTER_ID 0x0021
+#define ixPCIE_TX_REQUEST_NUM_CNTL 0x0023
+#define ixPCIE_TX_SEQ 0x0024
+#define ixPCIE_TX_VENDOR_SPECIFIC 0x0022
+#define ixPCIE_WPR_CNTL 0x0030
+#define mmBACO_CNTL 0x14E5
+#define mmBF_ANA_ISO_CNTL 0x14C7
+#define mmBIF_BACO_DEBUG 0x14DF
+#define mmBIF_BACO_DEBUG_LATCH 0x14DC
+#define mmBIF_BACO_MSIC 0x14DE
+#define mmBIF_BUSNUM_CNTL1 0x1525
+#define mmBIF_BUSNUM_CNTL2 0x152B
+#define mmBIF_BUSNUM_LIST0 0x1526
+#define mmBIF_BUSNUM_LIST1 0x1527
+#define mmBIF_BUSY_DELAY_CNTR 0x1529
+#define mmBIF_CLK_PDWN_DELAY_TIMER 0x151F
+#define mmBIF_DEBUG_CNTL 0x151C
+#define mmBIF_DEBUG_MUX 0x151D
+#define mmBIF_DEBUG_OUT 0x151E
+#define mmBIF_DEVFUNCNUM_LIST0 0x14E8
+#define mmBIF_DEVFUNCNUM_LIST1 0x14E7
+#define mmBIF_FB_EN 0x1524
+#define mmBIF_FEATURES_CONTROL_MISC 0x14C2
+#define mmBIF_PERFCOUNTER0_RESULT 0x152D
+#define mmBIF_PERFCOUNTER1_RESULT 0x152E
+#define mmBIF_PERFMON_CNTL 0x152C
+#define mmBIF_PIF_TXCLK_SWITCH_TIMER 0x152F
+#define mmBIF_RESET_EN 0x1511
+#define mmBIF_SCRATCH0 0x150E
+#define mmBIF_SCRATCH1 0x150F
+#define mmBIF_SSA_DISP_LOWER 0x14D2
+#define mmBIF_SSA_DISP_UPPER 0x14D3
+#define mmBIF_SSA_GFX0_LOWER 0x14CA
+#define mmBIF_SSA_GFX0_UPPER 0x14CB
+#define mmBIF_SSA_GFX1_LOWER 0x14CC
+#define mmBIF_SSA_GFX1_UPPER 0x14CD
+#define mmBIF_SSA_GFX2_LOWER 0x14CE
+#define mmBIF_SSA_GFX2_UPPER 0x14CF
+#define mmBIF_SSA_GFX3_LOWER 0x14D0
+#define mmBIF_SSA_GFX3_UPPER 0x14D1
+#define mmBIF_SSA_MC_LOWER 0x14D4
+#define mmBIF_SSA_MC_UPPER 0x14D5
+#define mmBIF_SSA_PWR_STATUS 0x14C8
+#define mmBIF_XDMA_HI 0x14C1
+#define mmBIF_XDMA_LO 0x14C0
+#define mmBIOS_SCRATCH_0 0x05C9
+#define mmBIOS_SCRATCH_10 0x05D3
+#define mmBIOS_SCRATCH_1 0x05CA
+#define mmBIOS_SCRATCH_11 0x05D4
+#define mmBIOS_SCRATCH_12 0x05D5
+#define mmBIOS_SCRATCH_13 0x05D6
+#define mmBIOS_SCRATCH_14 0x05D7
+#define mmBIOS_SCRATCH_15 0x05D8
+#define mmBIOS_SCRATCH_2 0x05CB
+#define mmBIOS_SCRATCH_3 0x05CC
+#define mmBIOS_SCRATCH_4 0x05CD
+#define mmBIOS_SCRATCH_5 0x05CE
+#define mmBIOS_SCRATCH_6 0x05CF
+#define mmBIOS_SCRATCH_7 0x05D0
+#define mmBIOS_SCRATCH_8 0x05D1
+#define mmBIOS_SCRATCH_9 0x05D2
+#define mmBUS_CNTL 0x1508
+#define mmCAPTURE_HOST_BUSNUM 0x153C
+#define mmCLKREQB_PAD_CNTL 0x1521
+#define mmCONFIG_APER_SIZE 0x150C
+#define mmCONFIG_CNTL 0x1509
+#define mmCONFIG_F0_BASE 0x150B
+#define mmCONFIG_MEMSIZE 0x150A
+#define mmCONFIG_REG_APER_SIZE 0x150D
+#define mmHDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
+#define mmHDP_REG_COHERENCY_FLUSH_CNTL 0x1528
+#define mmHOST_BUSNUM 0x153D
+#define mmHW_DEBUG 0x1515
+#define mmIMPCTL_RESET 0x14F5
+#define mmINTERRUPT_CNTL 0x151A
+#define mmINTERRUPT_CNTL2 0x151B
+#define mmMASTER_CREDIT_CNTL 0x1516
+#define mmMM_CFGREGS_CNTL 0x1513
+#define mmMM_DATA 0x0001
+#define mmMM_INDEX 0x0000
+#define mmMM_INDEX_HI 0x0006
+#define mmNEW_REFCLKB_TIMER 0x14EA
+#define mmNEW_REFCLKB_TIMER_1 0x14E9
+#define mmPCIE_DATA 0x000D
+#define mmPCIE_INDEX 0x000C
+#define mmPEER0_FB_OFFSET_HI 0x14F3
+#define mmPEER0_FB_OFFSET_LO 0x14F2
+#define mmPEER1_FB_OFFSET_HI 0x14F1
+#define mmPEER1_FB_OFFSET_LO 0x14F0
+#define mmPEER2_FB_OFFSET_HI 0x14EF
+#define mmPEER2_FB_OFFSET_LO 0x14EE
+#define mmPEER3_FB_OFFSET_HI 0x14ED
+#define mmPEER3_FB_OFFSET_LO 0x14EC
+#define mmPEER_REG_RANGE0 0x153E
+#define mmPEER_REG_RANGE1 0x153F
+#define mmSLAVE_HANG_ERROR 0x153B
+#define mmSLAVE_HANG_PROTECTION_CNTL 0x1536
+#define mmSLAVE_REQ_CREDIT_CNTL 0x1517
+#define mmSMBCLK_PAD_CNTL 0x1523
+#define mmSMBDAT_PAD_CNTL 0x1522
+#define mmSMBUS_BACO_DUMMY 0x14C6
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_sh_mask.h
new file mode 100644
index 000000000000..e94445acf3c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_3_0_sh_mask.h
@@ -0,0 +1,8127 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef BIF_3_0_SH_MASK_H
+#define BIF_3_0_SH_MASK_H
+
+#define BACO_CNTL__BACO_ANA_ISO_DIS_MASK 0x00000080L
+#define BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT 0x00000007
+#define BACO_CNTL__BACO_BCLK_OFF_MASK 0x00000002L
+#define BACO_CNTL__BACO_BCLK_OFF__SHIFT 0x00000001
+#define BACO_CNTL__BACO_EN_MASK 0x00000001L
+#define BACO_CNTL__BACO_EN__SHIFT 0x00000000
+#define BACO_CNTL__BACO_HANG_PROTECTION_EN_MASK 0x00000020L
+#define BACO_CNTL__BACO_HANG_PROTECTION_EN__SHIFT 0x00000005
+#define BACO_CNTL__BACO_ISO_DIS_MASK 0x00000004L
+#define BACO_CNTL__BACO_ISO_DIS__SHIFT 0x00000002
+#define BACO_CNTL__BACO_MODE_MASK 0x00000040L
+#define BACO_CNTL__BACO_MODE__SHIFT 0x00000006
+#define BACO_CNTL__BACO_POWER_OFF_MASK 0x00000008L
+#define BACO_CNTL__BACO_POWER_OFF__SHIFT 0x00000003
+#define BACO_CNTL__BACO_RESET_EN_MASK 0x00000010L
+#define BACO_CNTL__BACO_RESET_EN__SHIFT 0x00000004
+#define BACO_CNTL__PWRGOOD_BF_MASK 0x00000200L
+#define BACO_CNTL__PWRGOOD_BF__SHIFT 0x00000009
+#define BACO_CNTL__PWRGOOD_DVO_MASK 0x00001000L
+#define BACO_CNTL__PWRGOOD_DVO__SHIFT 0x0000000c
+#define BACO_CNTL__PWRGOOD_GPIO_MASK 0x00000400L
+#define BACO_CNTL__PWRGOOD_GPIO__SHIFT 0x0000000a
+#define BACO_CNTL__PWRGOOD_MEM_MASK 0x00000800L
+#define BACO_CNTL__PWRGOOD_MEM__SHIFT 0x0000000b
+#define BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK 0x00000100L
+#define BACO_CNTL__RCU_BIF_CONFIG_DONE__SHIFT 0x00000008
+#define BF_ANA_ISO_CNTL__BF_ANA_ISO_DIS_MASK_MASK 0x00000001L
+#define BF_ANA_ISO_CNTL__BF_ANA_ISO_DIS_MASK__SHIFT 0x00000000
+#define BF_ANA_ISO_CNTL__BF_VDDC_ISO_DIS_MASK_MASK 0x00000002L
+#define BF_ANA_ISO_CNTL__BF_VDDC_ISO_DIS_MASK__SHIFT 0x00000001
+#define BIF_BACO_DEBUG__BIF_BACO_SCANDUMP_FLG_MASK 0x00000001L
+#define BIF_BACO_DEBUG__BIF_BACO_SCANDUMP_FLG__SHIFT 0x00000000
+#define BIF_BACO_DEBUG_LATCH__BIF_BACO_LATCH_FLG_MASK 0x00000001L
+#define BIF_BACO_DEBUG_LATCH__BIF_BACO_LATCH_FLG__SHIFT 0x00000000
+#define BIF_BACO_MSIC__BIF_XTALIN_SEL_MASK 0x00000001L
+#define BIF_BACO_MSIC__BIF_XTALIN_SEL__SHIFT 0x00000000
+#define BIF_BUSNUM_CNTL1__ID_MASK_MASK 0x000000ffL
+#define BIF_BUSNUM_CNTL1__ID_MASK__SHIFT 0x00000000
+#define BIF_BUSNUM_CNTL2__AUTOUPDATE_EN_MASK 0x00000100L
+#define BIF_BUSNUM_CNTL2__AUTOUPDATE_EN__SHIFT 0x00000008
+#define BIF_BUSNUM_CNTL2__AUTOUPDATE_SEL_MASK 0x000000ffL
+#define BIF_BUSNUM_CNTL2__AUTOUPDATE_SEL__SHIFT 0x00000000
+#define BIF_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH_MASK 0x00020000L
+#define BIF_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH__SHIFT 0x00000011
+#define BIF_BUSNUM_CNTL2__HDPREG_CNTL_MASK 0x00010000L
+#define BIF_BUSNUM_CNTL2__HDPREG_CNTL__SHIFT 0x00000010
+#define BIF_BUSNUM_LIST0__ID0_MASK 0x000000ffL
+#define BIF_BUSNUM_LIST0__ID0__SHIFT 0x00000000
+#define BIF_BUSNUM_LIST0__ID1_MASK 0x0000ff00L
+#define BIF_BUSNUM_LIST0__ID1__SHIFT 0x00000008
+#define BIF_BUSNUM_LIST0__ID2_MASK 0x00ff0000L
+#define BIF_BUSNUM_LIST0__ID2__SHIFT 0x00000010
+#define BIF_BUSNUM_LIST0__ID3_MASK 0xff000000L
+#define BIF_BUSNUM_LIST0__ID3__SHIFT 0x00000018
+#define BIF_BUSNUM_LIST1__ID4_MASK 0x000000ffL
+#define BIF_BUSNUM_LIST1__ID4__SHIFT 0x00000000
+#define BIF_BUSNUM_LIST1__ID5_MASK 0x0000ff00L
+#define BIF_BUSNUM_LIST1__ID5__SHIFT 0x00000008
+#define BIF_BUSNUM_LIST1__ID6_MASK 0x00ff0000L
+#define BIF_BUSNUM_LIST1__ID6__SHIFT 0x00000010
+#define BIF_BUSNUM_LIST1__ID7_MASK 0xff000000L
+#define BIF_BUSNUM_LIST1__ID7__SHIFT 0x00000018
+#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003fL
+#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x00000000
+#define BIF_CLK_PDWN_DELAY_TIMER__TIMER_MASK 0x000003ffL
+#define BIF_CLK_PDWN_DELAY_TIMER__TIMER__SHIFT 0x00000000
+#define BIF_DEBUG_CNTL__DEBUG_BYTESEL_BLK1_MASK 0x00000010L
+#define BIF_DEBUG_CNTL__DEBUG_BYTESEL_BLK1__SHIFT 0x00000004
+#define BIF_DEBUG_CNTL__DEBUG_BYTESEL_BLK2_MASK 0x00000020L
+#define BIF_DEBUG_CNTL__DEBUG_BYTESEL_BLK2__SHIFT 0x00000005
+#define BIF_DEBUG_CNTL__DEBUG_EN_MASK 0x00000001L
+#define BIF_DEBUG_CNTL__DEBUG_EN__SHIFT 0x00000000
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_BLK1_MASK 0x00001f00L
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_BLK1__SHIFT 0x00000008
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_BLK2_MASK 0x001f0000L
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_BLK2__SHIFT 0x00000010
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_XSP_MASK 0x01000000L
+#define BIF_DEBUG_CNTL__DEBUG_IDSEL_XSP__SHIFT 0x00000018
+#define BIF_DEBUG_CNTL__DEBUG_MULTIBLOCKEN_MASK 0x00000002L
+#define BIF_DEBUG_CNTL__DEBUG_MULTIBLOCKEN__SHIFT 0x00000001
+#define BIF_DEBUG_CNTL__DEBUG_OUT_EN_MASK 0x00000004L
+#define BIF_DEBUG_CNTL__DEBUG_OUT_EN__SHIFT 0x00000002
+#define BIF_DEBUG_CNTL__DEBUG_PAD_SEL_MASK 0x00000008L
+#define BIF_DEBUG_CNTL__DEBUG_PAD_SEL__SHIFT 0x00000003
+#define BIF_DEBUG_CNTL__DEBUG_SWAP_MASK 0x00000080L
+#define BIF_DEBUG_CNTL__DEBUG_SWAP__SHIFT 0x00000007
+#define BIF_DEBUG_CNTL__DEBUG_SYNC_CLKSEL_MASK 0xc0000000L
+#define BIF_DEBUG_CNTL__DEBUG_SYNC_CLKSEL__SHIFT 0x0000001e
+#define BIF_DEBUG_CNTL__DEBUG_SYNC_EN_MASK 0x00000040L
+#define BIF_DEBUG_CNTL__DEBUG_SYNC_EN__SHIFT 0x00000006
+#define BIF_DEBUG_MUX__DEBUG_MUX_BLK1_MASK 0x0000003fL
+#define BIF_DEBUG_MUX__DEBUG_MUX_BLK1__SHIFT 0x00000000
+#define BIF_DEBUG_MUX__DEBUG_MUX_BLK2_MASK 0x00003f00L
+#define BIF_DEBUG_MUX__DEBUG_MUX_BLK2__SHIFT 0x00000008
+#define BIF_DEBUG_OUT__DEBUG_OUTPUT_MASK 0x0001ffffL
+#define BIF_DEBUG_OUT__DEBUG_OUTPUT__SHIFT 0x00000000
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID0_MASK 0x000000ffL
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID0__SHIFT 0x00000000
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID1_MASK 0x0000ff00L
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID1__SHIFT 0x00000008
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID2_MASK 0x00ff0000L
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID2__SHIFT 0x00000010
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID3_MASK 0xff000000L
+#define BIF_DEVFUNCNUM_LIST0__DEVFUNC_ID3__SHIFT 0x00000018
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID4_MASK 0x000000ffL
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID4__SHIFT 0x00000000
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID5_MASK 0x0000ff00L
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID5__SHIFT 0x00000008
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID6_MASK 0x00ff0000L
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID6__SHIFT 0x00000010
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID7_MASK 0xff000000L
+#define BIF_DEVFUNCNUM_LIST1__DEVFUNC_ID7__SHIFT 0x00000018
+#define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
+#define BIF_FB_EN__FB_READ_EN__SHIFT 0x00000000
+#define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
+#define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x00000001
+#define BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS_MASK 0x00000008L
+#define BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS__SHIFT 0x00000003
+#define BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS_MASK 0x00000004L
+#define BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS__SHIFT 0x00000002
+#define BIF_FEATURES_CONTROL_MISC__IGNORE_BE_CHECK_GASKET_COMB_DIS_MASK 0x00000100L
+#define BIF_FEATURES_CONTROL_MISC__IGNORE_BE_CHECK_GASKET_COMB_DIS__SHIFT 0x00000008
+#define BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS_MASK 0x00000001L
+#define BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS__SHIFT 0x00000000
+#define BIF_FEATURES_CONTROL_MISC__PLL_SWITCH_IMPCTL_CAL_DONE_DIS_MASK 0x00000080L
+#define BIF_FEATURES_CONTROL_MISC__PLL_SWITCH_IMPCTL_CAL_DONE_DIS__SHIFT 0x00000007
+#define BIF_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_ALL_DIS_MASK 0x00000020L
+#define BIF_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_ALL_DIS__SHIFT 0x00000005
+#define BIF_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_PART_DIS_MASK 0x00000040L
+#define BIF_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_PART_DIS__SHIFT 0x00000006
+#define BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS_MASK 0x00000002L
+#define BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS__SHIFT 0x00000001
+#define BIF_FEATURES_CONTROL_MISC__UR_PSN_PKT_REPORT_POISON_DIS_MASK 0x00000010L
+#define BIF_FEATURES_CONTROL_MISC__UR_PSN_PKT_REPORT_POISON_DIS__SHIFT 0x00000004
+#define BIF_PERFCOUNTER0_RESULT__PERFCOUNTER_RESULT_MASK 0xffffffffL
+#define BIF_PERFCOUNTER0_RESULT__PERFCOUNTER_RESULT__SHIFT 0x00000000
+#define BIF_PERFCOUNTER1_RESULT__PERFCOUNTER_RESULT_MASK 0xffffffffL
+#define BIF_PERFCOUNTER1_RESULT__PERFCOUNTER_RESULT__SHIFT 0x00000000
+#define BIF_PERFMON_CNTL__PERFCOUNTER_EN_MASK 0x00000001L
+#define BIF_PERFMON_CNTL__PERFCOUNTER_EN__SHIFT 0x00000000
+#define BIF_PERFMON_CNTL__PERFCOUNTER_RESET0_MASK 0x00000002L
+#define BIF_PERFMON_CNTL__PERFCOUNTER_RESET0__SHIFT 0x00000001
+#define BIF_PERFMON_CNTL__PERFCOUNTER_RESET1_MASK 0x00000004L
+#define BIF_PERFMON_CNTL__PERFCOUNTER_RESET1__SHIFT 0x00000002
+#define BIF_PERFMON_CNTL__PERF_SEL0_MASK 0x00001f00L
+#define BIF_PERFMON_CNTL__PERF_SEL0__SHIFT 0x00000008
+#define BIF_PERFMON_CNTL__PERF_SEL1_MASK 0x0003e000L
+#define BIF_PERFMON_CNTL__PERF_SEL1__SHIFT 0x0000000d
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL0_ACK_TIMER_MASK 0x00000007L
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL0_ACK_TIMER__SHIFT 0x00000000
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL1_ACK_TIMER_MASK 0x00000038L
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL1_ACK_TIMER__SHIFT 0x00000003
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL_SWITCH_TIMER_MASK 0x000003c0L
+#define BIF_PIF_TXCLK_SWITCH_TIMER__PLL_SWITCH_TIMER__SHIFT 0x00000006
+#define BIF_RESET_EN__BIF_COR_RESET_EN_MASK 0x00400000L
+#define BIF_RESET_EN__BIF_COR_RESET_EN__SHIFT 0x00000016
+#define BIF_RESET_EN__CFG_RESET_EN_MASK 0x00000040L
+#define BIF_RESET_EN__CFG_RESET_EN__SHIFT 0x00000006
+#define BIF_RESET_EN__CFG_RESET_PULSE_WIDTH_MASK 0x0003f000L
+#define BIF_RESET_EN__CFG_RESET_PULSE_WIDTH__SHIFT 0x0000000c
+#define BIF_RESET_EN__COR_RESET_EN_MASK 0x00000008L
+#define BIF_RESET_EN__COR_RESET_EN__SHIFT 0x00000003
+#define BIF_RESET_EN__DRV_RESET_DELAY_SEL_MASK 0x000c0000L
+#define BIF_RESET_EN__DRV_RESET_DELAY_SEL__SHIFT 0x00000012
+#define BIF_RESET_EN__DRV_RESET_EN_MASK 0x00000080L
+#define BIF_RESET_EN__DRV_RESET_EN__SHIFT 0x00000007
+#define BIF_RESET_EN__FUNC0_FLR_EN_MASK 0x00800000L
+#define BIF_RESET_EN__FUNC0_FLR_EN__SHIFT 0x00000017
+#define BIF_RESET_EN__FUNC0_RESET_DELAY_SEL_MASK 0x0c000000L
+#define BIF_RESET_EN__FUNC0_RESET_DELAY_SEL__SHIFT 0x0000001a
+#define BIF_RESET_EN__FUNC1_FLR_EN_MASK 0x01000000L
+#define BIF_RESET_EN__FUNC1_FLR_EN__SHIFT 0x00000018
+#define BIF_RESET_EN__FUNC1_RESET_DELAY_SEL_MASK 0x30000000L
+#define BIF_RESET_EN__FUNC1_RESET_DELAY_SEL__SHIFT 0x0000001c
+#define BIF_RESET_EN__FUNC2_FLR_EN_MASK 0x02000000L
+#define BIF_RESET_EN__FUNC2_FLR_EN__SHIFT 0x00000019
+#define BIF_RESET_EN__FUNC2_RESET_DELAY_SEL_MASK 0xc0000000L
+#define BIF_RESET_EN__FUNC2_RESET_DELAY_SEL__SHIFT 0x0000001e
+#define BIF_RESET_EN__HOT_RESET_EN_MASK 0x00000200L
+#define BIF_RESET_EN__HOT_RESET_EN__SHIFT 0x00000009
+#define BIF_RESET_EN__LINK_DISABLE_RESET_EN_MASK 0x00000400L
+#define BIF_RESET_EN__LINK_DISABLE_RESET_EN__SHIFT 0x0000000a
+#define BIF_RESET_EN__LINK_DOWN_RESET_EN_MASK 0x00000800L
+#define BIF_RESET_EN__LINK_DOWN_RESET_EN__SHIFT 0x0000000b
+#define BIF_RESET_EN__PHY_RESET_EN_MASK 0x00000004L
+#define BIF_RESET_EN__PHY_RESET_EN__SHIFT 0x00000002
+#define BIF_RESET_EN__PIF_RSTB_EN_MASK 0x00100000L
+#define BIF_RESET_EN__PIF_RSTB_EN__SHIFT 0x00000014
+#define BIF_RESET_EN__PIF_STRAP_ALLVALID_EN_MASK 0x00200000L
+#define BIF_RESET_EN__PIF_STRAP_ALLVALID_EN__SHIFT 0x00000015
+#define BIF_RESET_EN__REG_RESET_EN_MASK 0x00000010L
+#define BIF_RESET_EN__REG_RESET_EN__SHIFT 0x00000004
+#define BIF_RESET_EN__RESET_CFGREG_ONLY_EN_MASK 0x00000100L
+#define BIF_RESET_EN__RESET_CFGREG_ONLY_EN__SHIFT 0x00000008
+#define BIF_RESET_EN__SOFT_RST_MODE_MASK 0x00000002L
+#define BIF_RESET_EN__SOFT_RST_MODE__SHIFT 0x00000001
+#define BIF_RESET_EN__STY_RESET_EN_MASK 0x00000020L
+#define BIF_RESET_EN__STY_RESET_EN__SHIFT 0x00000005
+#define BIF_SCRATCH0__BIF_SCRATCH0_MASK 0xffffffffL
+#define BIF_SCRATCH0__BIF_SCRATCH0__SHIFT 0x00000000
+#define BIF_SCRATCH1__BIF_SCRATCH1_MASK 0xffffffffL
+#define BIF_SCRATCH1__BIF_SCRATCH1__SHIFT 0x00000000
+#define BIF_SSA_DISP_LOWER__SSA_DISP_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_DISP_LOWER__SSA_DISP_LOWER__SHIFT 0x00000002
+#define BIF_SSA_DISP_LOWER__SSA_DISP_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_DISP_LOWER__SSA_DISP_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_DISP_LOWER__SSA_DISP_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_DISP_LOWER__SSA_DISP_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_DISP_UPPER__SSA_DISP_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_DISP_UPPER__SSA_DISP_UPPER__SHIFT 0x00000002
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_LOWER__SHIFT 0x00000002
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_GFX0_LOWER__SSA_GFX0_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_GFX0_UPPER__SSA_GFX0_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_GFX0_UPPER__SSA_GFX0_UPPER__SHIFT 0x00000002
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_LOWER__SHIFT 0x00000002
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_GFX1_LOWER__SSA_GFX1_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_GFX1_UPPER__SSA_GFX1_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_GFX1_UPPER__SSA_GFX1_UPPER__SHIFT 0x00000002
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_LOWER__SHIFT 0x00000002
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_GFX2_LOWER__SSA_GFX2_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_GFX2_UPPER__SSA_GFX2_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_GFX2_UPPER__SSA_GFX2_UPPER__SHIFT 0x00000002
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_LOWER__SHIFT 0x00000002
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_GFX3_LOWER__SSA_GFX3_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_GFX3_UPPER__SSA_GFX3_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_GFX3_UPPER__SSA_GFX3_UPPER__SHIFT 0x00000002
+#define BIF_SSA_MC_LOWER__SSA_MC_FB_STALL_EN_MASK 0x20000000L
+#define BIF_SSA_MC_LOWER__SSA_MC_FB_STALL_EN__SHIFT 0x0000001d
+#define BIF_SSA_MC_LOWER__SSA_MC_LOWER_MASK 0x0003fffcL
+#define BIF_SSA_MC_LOWER__SSA_MC_LOWER__SHIFT 0x00000002
+#define BIF_SSA_MC_LOWER__SSA_MC_REG_CMP_EN_MASK 0x40000000L
+#define BIF_SSA_MC_LOWER__SSA_MC_REG_CMP_EN__SHIFT 0x0000001e
+#define BIF_SSA_MC_LOWER__SSA_MC_REG_STALL_EN_MASK 0x80000000L
+#define BIF_SSA_MC_LOWER__SSA_MC_REG_STALL_EN__SHIFT 0x0000001f
+#define BIF_SSA_MC_UPPER__SSA_MC_UPPER_MASK 0x0003fffcL
+#define BIF_SSA_MC_UPPER__SSA_MC_UPPER__SHIFT 0x00000002
+#define BIF_SSA_PWR_STATUS__SSA_DISP_PWR_STATUS_MASK 0x00000002L
+#define BIF_SSA_PWR_STATUS__SSA_DISP_PWR_STATUS__SHIFT 0x00000001
+#define BIF_SSA_PWR_STATUS__SSA_GFX_PWR_STATUS_MASK 0x00000001L
+#define BIF_SSA_PWR_STATUS__SSA_GFX_PWR_STATUS__SHIFT 0x00000000
+#define BIF_SSA_PWR_STATUS__SSA_MC_PWR_STATUS_MASK 0x00000004L
+#define BIF_SSA_PWR_STATUS__SSA_MC_PWR_STATUS__SHIFT 0x00000002
+#define BIF_XDMA_HI__BIF_XDMA_UPPER_BOUND_MASK 0x1fffffffL
+#define BIF_XDMA_HI__BIF_XDMA_UPPER_BOUND__SHIFT 0x00000000
+#define BIF_XDMA_LO__BIF_XDMA_APER_EN_MASK 0x80000000L
+#define BIF_XDMA_LO__BIF_XDMA_APER_EN__SHIFT 0x0000001f
+#define BIF_XDMA_LO__BIF_XDMA_LOWER_BOUND_MASK 0x1fffffffL
+#define BIF_XDMA_LO__BIF_XDMA_LOWER_BOUND__SHIFT 0x00000000
+#define BIOS_SCRATCH_0__BIOS_SCRATCH_0_MASK 0xffffffffL
+#define BIOS_SCRATCH_0__BIOS_SCRATCH_0__SHIFT 0x00000000
+#define BIOS_SCRATCH_10__BIOS_SCRATCH_10_MASK 0xffffffffL
+#define BIOS_SCRATCH_10__BIOS_SCRATCH_10__SHIFT 0x00000000
+#define BIOS_SCRATCH_11__BIOS_SCRATCH_11_MASK 0xffffffffL
+#define BIOS_SCRATCH_11__BIOS_SCRATCH_11__SHIFT 0x00000000
+#define BIOS_SCRATCH_12__BIOS_SCRATCH_12_MASK 0xffffffffL
+#define BIOS_SCRATCH_12__BIOS_SCRATCH_12__SHIFT 0x00000000
+#define BIOS_SCRATCH_13__BIOS_SCRATCH_13_MASK 0xffffffffL
+#define BIOS_SCRATCH_13__BIOS_SCRATCH_13__SHIFT 0x00000000
+#define BIOS_SCRATCH_14__BIOS_SCRATCH_14_MASK 0xffffffffL
+#define BIOS_SCRATCH_14__BIOS_SCRATCH_14__SHIFT 0x00000000
+#define BIOS_SCRATCH_15__BIOS_SCRATCH_15_MASK 0xffffffffL
+#define BIOS_SCRATCH_15__BIOS_SCRATCH_15__SHIFT 0x00000000
+#define BIOS_SCRATCH_1__BIOS_SCRATCH_1_MASK 0xffffffffL
+#define BIOS_SCRATCH_1__BIOS_SCRATCH_1__SHIFT 0x00000000
+#define BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xffffffffL
+#define BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x00000000
+#define BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xffffffffL
+#define BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x00000000
+#define BIOS_SCRATCH_4__BIOS_SCRATCH_4_MASK 0xffffffffL
+#define BIOS_SCRATCH_4__BIOS_SCRATCH_4__SHIFT 0x00000000
+#define BIOS_SCRATCH_5__BIOS_SCRATCH_5_MASK 0xffffffffL
+#define BIOS_SCRATCH_5__BIOS_SCRATCH_5__SHIFT 0x00000000
+#define BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xffffffffL
+#define BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x00000000
+#define BIOS_SCRATCH_7__BIOS_SCRATCH_7_MASK 0xffffffffL
+#define BIOS_SCRATCH_7__BIOS_SCRATCH_7__SHIFT 0x00000000
+#define BIOS_SCRATCH_8__BIOS_SCRATCH_8_MASK 0xffffffffL
+#define BIOS_SCRATCH_8__BIOS_SCRATCH_8__SHIFT 0x00000000
+#define BIOS_SCRATCH_9__BIOS_SCRATCH_9_MASK 0xffffffffL
+#define BIOS_SCRATCH_9__BIOS_SCRATCH_9__SHIFT 0x00000000
+#define BUS_CNTL__BIF_ERR_RTR_BKPRESSURE_EN_MASK 0x00000100L
+#define BUS_CNTL__BIF_ERR_RTR_BKPRESSURE_EN__SHIFT 0x00000008
+#define BUS_CNTL__BIOS_ROM_DIS_MASK 0x00000002L
+#define BUS_CNTL__BIOS_ROM_DIS__SHIFT 0x00000001
+#define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x00000001L
+#define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x00000000
+#define BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L
+#define BUS_CNTL__PMI_BM_DIS__SHIFT 0x00000004
+#define BUS_CNTL__PMI_INT_DIS_MASK 0x00000020L
+#define BUS_CNTL__PMI_INT_DIS__SHIFT 0x00000005
+#define BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L
+#define BUS_CNTL__PMI_IO_DIS__SHIFT 0x00000002
+#define BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L
+#define BUS_CNTL__PMI_MEM_DIS__SHIFT 0x00000003
+#define BUS_CNTL__RD_STALL_IO_WR_MASK 0x00040000L
+#define BUS_CNTL__RD_STALL_IO_WR__SHIFT 0x00000012
+#define BUS_CNTL__SET_AZ_TC_MASK 0x00001c00L
+#define BUS_CNTL__SET_AZ_TC__SHIFT 0x0000000a
+#define BUS_CNTL__SET_MC_TC_MASK 0x0000e000L
+#define BUS_CNTL__SET_MC_TC__SHIFT 0x0000000d
+#define BUS_CNTL__VGA_MEM_COHERENCY_DIS_MASK 0x00000080L
+#define BUS_CNTL__VGA_MEM_COHERENCY_DIS__SHIFT 0x00000007
+#define BUS_CNTL__VGA_REG_COHERENCY_DIS_MASK 0x00000040L
+#define BUS_CNTL__VGA_REG_COHERENCY_DIS__SHIFT 0x00000006
+#define BUS_CNTL__ZERO_BE_RD_EN_MASK 0x00020000L
+#define BUS_CNTL__ZERO_BE_RD_EN__SHIFT 0x00000011
+#define BUS_CNTL__ZERO_BE_WR_EN_MASK 0x00010000L
+#define BUS_CNTL__ZERO_BE_WR_EN__SHIFT 0x00000010
+#define CAPTURE_HOST_BUSNUM__CHECK_EN_MASK 0x00000001L
+#define CAPTURE_HOST_BUSNUM__CHECK_EN__SHIFT 0x00000000
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_A_MASK 0x00000001L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_A__SHIFT 0x00000000
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN_MASK 0x00001000L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN__SHIFT 0x0000000c
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE_MASK 0x00000004L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE__SHIFT 0x00000002
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN_MASK 0x00000800L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN__SHIFT 0x0000000b
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL_MASK 0x00000002L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL__SHIFT 0x00000001
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN_MASK 0x00000200L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN__SHIFT 0x00000009
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0_MASK 0x00000020L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0__SHIFT 0x00000005
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1_MASK 0x00000040L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1__SHIFT 0x00000006
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2_MASK 0x00000080L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2__SHIFT 0x00000007
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3_MASK 0x00000100L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3__SHIFT 0x00000008
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE_MASK 0x00000018L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE__SHIFT 0x00000003
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE_MASK 0x00000400L
+#define CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE__SHIFT 0x0000000a
+#define CONFIG_APER_SIZE__APER_SIZE_MASK 0xffffffffL
+#define CONFIG_APER_SIZE__APER_SIZE__SHIFT 0x00000000
+#define CONFIG_CNTL__CFG_VGA_RAM_EN_MASK 0x00000001L
+#define CONFIG_CNTL__CFG_VGA_RAM_EN__SHIFT 0x00000000
+#define CONFIG_CNTL__GENMO_MONO_ADDRESS_B_MASK 0x00000004L
+#define CONFIG_CNTL__GENMO_MONO_ADDRESS_B__SHIFT 0x00000002
+#define CONFIG_CNTL__GRPH_ADRSEL_MASK 0x00000018L
+#define CONFIG_CNTL__GRPH_ADRSEL__SHIFT 0x00000003
+#define CONFIG_CNTL__VGA_DIS_MASK 0x00000002L
+#define CONFIG_CNTL__VGA_DIS__SHIFT 0x00000001
+#define CONFIG_F0_BASE__F0_BASE_MASK 0xffffffffL
+#define CONFIG_F0_BASE__F0_BASE__SHIFT 0x00000000
+#define CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xffffffffL
+#define CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x00000000
+#define CONFIG_REG_APER_SIZE__REG_APER_SIZE_MASK 0x000fffffL
+#define CONFIG_REG_APER_SIZE__REG_APER_SIZE__SHIFT 0x00000000
+#define HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L
+#define HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x00000000
+#define HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L
+#define HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x00000000
+#define HOST_BUSNUM__HOST_ID_MASK 0x0000ffffL
+#define HOST_BUSNUM__HOST_ID__SHIFT 0x00000000
+#define HW_DEBUG__HW_00_DEBUG_MASK 0x00000001L
+#define HW_DEBUG__HW_00_DEBUG__SHIFT 0x00000000
+#define HW_DEBUG__HW_01_DEBUG_MASK 0x00000002L
+#define HW_DEBUG__HW_01_DEBUG__SHIFT 0x00000001
+#define HW_DEBUG__HW_02_DEBUG_MASK 0x00000004L
+#define HW_DEBUG__HW_02_DEBUG__SHIFT 0x00000002
+#define HW_DEBUG__HW_03_DEBUG_MASK 0x00000008L
+#define HW_DEBUG__HW_03_DEBUG__SHIFT 0x00000003
+#define HW_DEBUG__HW_04_DEBUG_MASK 0x00000010L
+#define HW_DEBUG__HW_04_DEBUG__SHIFT 0x00000004
+#define HW_DEBUG__HW_05_DEBUG_MASK 0x00000020L
+#define HW_DEBUG__HW_05_DEBUG__SHIFT 0x00000005
+#define HW_DEBUG__HW_06_DEBUG_MASK 0x00000040L
+#define HW_DEBUG__HW_06_DEBUG__SHIFT 0x00000006
+#define HW_DEBUG__HW_07_DEBUG_MASK 0x00000080L
+#define HW_DEBUG__HW_07_DEBUG__SHIFT 0x00000007
+#define HW_DEBUG__HW_08_DEBUG_MASK 0x00000100L
+#define HW_DEBUG__HW_08_DEBUG__SHIFT 0x00000008
+#define HW_DEBUG__HW_09_DEBUG_MASK 0x00000200L
+#define HW_DEBUG__HW_09_DEBUG__SHIFT 0x00000009
+#define HW_DEBUG__HW_10_DEBUG_MASK 0x00000400L
+#define HW_DEBUG__HW_10_DEBUG__SHIFT 0x0000000a
+#define HW_DEBUG__HW_11_DEBUG_MASK 0x00000800L
+#define HW_DEBUG__HW_11_DEBUG__SHIFT 0x0000000b
+#define HW_DEBUG__HW_12_DEBUG_MASK 0x00001000L
+#define HW_DEBUG__HW_12_DEBUG__SHIFT 0x0000000c
+#define HW_DEBUG__HW_13_DEBUG_MASK 0x00002000L
+#define HW_DEBUG__HW_13_DEBUG__SHIFT 0x0000000d
+#define HW_DEBUG__HW_14_DEBUG_MASK 0x00004000L
+#define HW_DEBUG__HW_14_DEBUG__SHIFT 0x0000000e
+#define HW_DEBUG__HW_15_DEBUG_MASK 0x00008000L
+#define HW_DEBUG__HW_15_DEBUG__SHIFT 0x0000000f
+#define HW_DEBUG__HW_16_DEBUG_MASK 0x00010000L
+#define HW_DEBUG__HW_16_DEBUG__SHIFT 0x00000010
+#define HW_DEBUG__HW_17_DEBUG_MASK 0x00020000L
+#define HW_DEBUG__HW_17_DEBUG__SHIFT 0x00000011
+#define HW_DEBUG__HW_18_DEBUG_MASK 0x00040000L
+#define HW_DEBUG__HW_18_DEBUG__SHIFT 0x00000012
+#define HW_DEBUG__HW_19_DEBUG_MASK 0x00080000L
+#define HW_DEBUG__HW_19_DEBUG__SHIFT 0x00000013
+#define HW_DEBUG__HW_20_DEBUG_MASK 0x00100000L
+#define HW_DEBUG__HW_20_DEBUG__SHIFT 0x00000014
+#define HW_DEBUG__HW_21_DEBUG_MASK 0x00200000L
+#define HW_DEBUG__HW_21_DEBUG__SHIFT 0x00000015
+#define HW_DEBUG__HW_22_DEBUG_MASK 0x00400000L
+#define HW_DEBUG__HW_22_DEBUG__SHIFT 0x00000016
+#define HW_DEBUG__HW_23_DEBUG_MASK 0x00800000L
+#define HW_DEBUG__HW_23_DEBUG__SHIFT 0x00000017
+#define HW_DEBUG__HW_24_DEBUG_MASK 0x01000000L
+#define HW_DEBUG__HW_24_DEBUG__SHIFT 0x00000018
+#define HW_DEBUG__HW_25_DEBUG_MASK 0x02000000L
+#define HW_DEBUG__HW_25_DEBUG__SHIFT 0x00000019
+#define HW_DEBUG__HW_26_DEBUG_MASK 0x04000000L
+#define HW_DEBUG__HW_26_DEBUG__SHIFT 0x0000001a
+#define HW_DEBUG__HW_27_DEBUG_MASK 0x08000000L
+#define HW_DEBUG__HW_27_DEBUG__SHIFT 0x0000001b
+#define HW_DEBUG__HW_28_DEBUG_MASK 0x10000000L
+#define HW_DEBUG__HW_28_DEBUG__SHIFT 0x0000001c
+#define HW_DEBUG__HW_29_DEBUG_MASK 0x20000000L
+#define HW_DEBUG__HW_29_DEBUG__SHIFT 0x0000001d
+#define HW_DEBUG__HW_30_DEBUG_MASK 0x40000000L
+#define HW_DEBUG__HW_30_DEBUG__SHIFT 0x0000001e
+#define HW_DEBUG__HW_31_DEBUG_MASK 0x80000000L
+#define HW_DEBUG__HW_31_DEBUG__SHIFT 0x0000001f
+#define IMPCTL_RESET__IMP_SW_RESET_MASK 0x00000001L
+#define IMPCTL_RESET__IMP_SW_RESET__SHIFT 0x00000000
+#define INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR_MASK 0xffffffffL
+#define INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR__SHIFT 0x00000000
+#define INTERRUPT_CNTL__GEN_GPIO_INT_EN_MASK 0x00001e00L
+#define INTERRUPT_CNTL__GEN_GPIO_INT_EN__SHIFT 0x00000009
+#define INTERRUPT_CNTL__GEN_IH_INT_EN_MASK 0x00000100L
+#define INTERRUPT_CNTL__GEN_IH_INT_EN__SHIFT 0x00000008
+#define INTERRUPT_CNTL__IH_DUMMY_RD_EN_MASK 0x00000002L
+#define INTERRUPT_CNTL__IH_DUMMY_RD_EN__SHIFT 0x00000001
+#define INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK 0x00000001L
+#define INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE__SHIFT 0x00000000
+#define INTERRUPT_CNTL__IH_INTR_DLY_CNTR_MASK 0x000000f0L
+#define INTERRUPT_CNTL__IH_INTR_DLY_CNTR__SHIFT 0x00000004
+#define INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK 0x00000008L
+#define INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN__SHIFT 0x00000003
+#define INTERRUPT_CNTL__SELECT_INT_GPIO_OUTPUT_MASK 0x00006000L
+#define INTERRUPT_CNTL__SELECT_INT_GPIO_OUTPUT__SHIFT 0x0000000d
+#define MASTER_CREDIT_CNTL__BIF_AZ_RDRET_CREDIT_MASK 0x003f0000L
+#define MASTER_CREDIT_CNTL__BIF_AZ_RDRET_CREDIT__SHIFT 0x00000010
+#define MASTER_CREDIT_CNTL__BIF_MC_RDRET_CREDIT_MASK 0x0000003fL
+#define MASTER_CREDIT_CNTL__BIF_MC_RDRET_CREDIT__SHIFT 0x00000000
+#define MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL_MASK 0x00000007L
+#define MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL__SHIFT 0x00000000
+#define MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN_MASK 0x00000008L
+#define MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN__SHIFT 0x00000003
+#define MM_DATA__MM_DATA_MASK 0xffffffffL
+#define MM_DATA__MM_DATA__SHIFT 0x00000000
+#define MM_INDEX_HI__MM_OFFSET_HI_MASK 0xffffffffL
+#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x00000000
+#define MM_INDEX__MM_APER_MASK 0x80000000L
+#define MM_INDEX__MM_APER__SHIFT 0x0000001f
+#define MM_INDEX__MM_OFFSET_MASK 0x7fffffffL
+#define MM_INDEX__MM_OFFSET__SHIFT 0x00000000
+#define NEW_REFCLKB_TIMER_1__PHY_PLL_PDWN_TIMER_MASK 0x000003ffL
+#define NEW_REFCLKB_TIMER_1__PHY_PLL_PDWN_TIMER__SHIFT 0x00000000
+#define NEW_REFCLKB_TIMER_1__PLL0_PDNB_EN_MASK 0x00000400L
+#define NEW_REFCLKB_TIMER_1__PLL0_PDNB_EN__SHIFT 0x0000000a
+#define NEW_REFCLKB_TIMER__REFCLK_ON_MASK 0x00200000L
+#define NEW_REFCLKB_TIMER__REFCLK_ON__SHIFT 0x00000015
+#define NEW_REFCLKB_TIMER__REG_STOP_REFCLK_EN_MASK 0x00000001L
+#define NEW_REFCLKB_TIMER__REG_STOP_REFCLK_EN__SHIFT 0x00000000
+#define NEW_REFCLKB_TIMER__STOP_REFCLK_TIMER_MASK 0x001ffffeL
+#define NEW_REFCLKB_TIMER__STOP_REFCLK_TIMER__SHIFT 0x00000001
+#define PB0_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_EN_MASK 0x00000001L
+#define PB0_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_EN__SHIFT 0x00000000
+#define PB0_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_MODE_MASK 0x0000003eL
+#define PB0_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_MODE__SHIFT 0x00000001
+#define PB0_DFT_JIT_INJ_REG0__DFT_CLK_PER_STEP_MASK 0x00000f00L
+#define PB0_DFT_JIT_INJ_REG0__DFT_CLK_PER_STEP__SHIFT 0x00000008
+#define PB0_DFT_JIT_INJ_REG0__DFT_DECR_SWP_EN_MASK 0x00800000L
+#define PB0_DFT_JIT_INJ_REG0__DFT_DECR_SWP_EN__SHIFT 0x00000017
+#define PB0_DFT_JIT_INJ_REG0__DFT_INCR_SWP_EN_MASK 0x00400000L
+#define PB0_DFT_JIT_INJ_REG0__DFT_INCR_SWP_EN__SHIFT 0x00000016
+#define PB0_DFT_JIT_INJ_REG0__DFT_NUM_STEPS_MASK 0x0000001fL
+#define PB0_DFT_JIT_INJ_REG0__DFT_NUM_STEPS__SHIFT 0x00000000
+#define PB0_DFT_JIT_INJ_REG0__DFT_RECOVERY_TIME_MASK 0xff000000L
+#define PB0_DFT_JIT_INJ_REG0__DFT_RECOVERY_TIME__SHIFT 0x00000018
+#define PB0_DFT_JIT_INJ_REG1__DFT_BLOCK_EN_MASK 0x00010000L
+#define PB0_DFT_JIT_INJ_REG1__DFT_BLOCK_EN__SHIFT 0x00000010
+#define PB0_DFT_JIT_INJ_REG1__DFT_BYPASS_EN_MASK 0x00000100L
+#define PB0_DFT_JIT_INJ_REG1__DFT_BYPASS_EN__SHIFT 0x00000008
+#define PB0_DFT_JIT_INJ_REG1__DFT_BYPASS_VALUE_MASK 0x000000ffL
+#define PB0_DFT_JIT_INJ_REG1__DFT_BYPASS_VALUE__SHIFT 0x00000000
+#define PB0_DFT_JIT_INJ_REG2__DFT_LANE_EN_MASK 0x0000ffffL
+#define PB0_DFT_JIT_INJ_REG2__DFT_LANE_EN__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG0__BACKUP_MASK 0x0000ffffL
+#define PB0_GLB_CTRL_REG0__BACKUP__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG0__CFG_IDLEDET_TH_MASK 0x00030000L
+#define PB0_GLB_CTRL_REG0__CFG_IDLEDET_TH__SHIFT 0x00000010
+#define PB0_GLB_CTRL_REG0__DBG_RX2TXBYP_SEL_MASK 0x00700000L
+#define PB0_GLB_CTRL_REG0__DBG_RX2TXBYP_SEL__SHIFT 0x00000014
+#define PB0_GLB_CTRL_REG0__DBG_RXFEBYP_EN_MASK 0x00800000L
+#define PB0_GLB_CTRL_REG0__DBG_RXFEBYP_EN__SHIFT 0x00000017
+#define PB0_GLB_CTRL_REG0__DBG_RXPRBS_CLR_MASK 0x01000000L
+#define PB0_GLB_CTRL_REG0__DBG_RXPRBS_CLR__SHIFT 0x00000018
+#define PB0_GLB_CTRL_REG0__DBG_RXTOGGLE_EN_MASK 0x02000000L
+#define PB0_GLB_CTRL_REG0__DBG_RXTOGGLE_EN__SHIFT 0x00000019
+#define PB0_GLB_CTRL_REG0__DBG_TX2RXLBACK_EN_MASK 0x04000000L
+#define PB0_GLB_CTRL_REG0__DBG_TX2RXLBACK_EN__SHIFT 0x0000001a
+#define PB0_GLB_CTRL_REG0__TXCFG_CMGOOD_RANGE_MASK 0xc0000000L
+#define PB0_GLB_CTRL_REG0__TXCFG_CMGOOD_RANGE__SHIFT 0x0000001e
+#define PB0_GLB_CTRL_REG1__PLL_CFG_DISPCLK_DIV_MASK 0x80000000L
+#define PB0_GLB_CTRL_REG1__PLL_CFG_DISPCLK_DIV__SHIFT 0x0000001f
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_EN_MASK 0x00000001L
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_EN__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_VAL_MASK 0x0000007eL
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_VAL__SHIFT 0x00000001
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_EN_MASK 0x00000080L
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_EN__SHIFT 0x00000007
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_VAL_MASK 0x00003f00L
+#define PB0_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_VAL__SHIFT 0x00000008
+#define PB0_GLB_CTRL_REG1__RXDBG_D0TH_BYP_EN_MASK 0x00004000L
+#define PB0_GLB_CTRL_REG1__RXDBG_D0TH_BYP_EN__SHIFT 0x0000000e
+#define PB0_GLB_CTRL_REG1__RXDBG_D0TH_BYP_VAL_MASK 0x003f8000L
+#define PB0_GLB_CTRL_REG1__RXDBG_D0TH_BYP_VAL__SHIFT 0x0000000f
+#define PB0_GLB_CTRL_REG1__RXDBG_D1TH_BYP_EN_MASK 0x00400000L
+#define PB0_GLB_CTRL_REG1__RXDBG_D1TH_BYP_EN__SHIFT 0x00000016
+#define PB0_GLB_CTRL_REG1__RXDBG_D1TH_BYP_VAL_MASK 0x3f800000L
+#define PB0_GLB_CTRL_REG1__RXDBG_D1TH_BYP_VAL__SHIFT 0x00000017
+#define PB0_GLB_CTRL_REG1__TST_LOSPDTST_EN_MASK 0x40000000L
+#define PB0_GLB_CTRL_REG1__TST_LOSPDTST_EN__SHIFT 0x0000001e
+#define PB0_GLB_CTRL_REG2__RXDBG_D2TH_BYP_EN_MASK 0x00000001L
+#define PB0_GLB_CTRL_REG2__RXDBG_D2TH_BYP_EN__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG2__RXDBG_D2TH_BYP_VAL_MASK 0x000000feL
+#define PB0_GLB_CTRL_REG2__RXDBG_D2TH_BYP_VAL__SHIFT 0x00000001
+#define PB0_GLB_CTRL_REG2__RXDBG_D3TH_BYP_EN_MASK 0x00000100L
+#define PB0_GLB_CTRL_REG2__RXDBG_D3TH_BYP_EN__SHIFT 0x00000008
+#define PB0_GLB_CTRL_REG2__RXDBG_D3TH_BYP_VAL_MASK 0x0000fe00L
+#define PB0_GLB_CTRL_REG2__RXDBG_D3TH_BYP_VAL__SHIFT 0x00000009
+#define PB0_GLB_CTRL_REG2__RXDBG_DXTH_BYP_EN_MASK 0x00010000L
+#define PB0_GLB_CTRL_REG2__RXDBG_DXTH_BYP_EN__SHIFT 0x00000010
+#define PB0_GLB_CTRL_REG2__RXDBG_DXTH_BYP_VAL_MASK 0x00fe0000L
+#define PB0_GLB_CTRL_REG2__RXDBG_DXTH_BYP_VAL__SHIFT 0x00000011
+#define PB0_GLB_CTRL_REG2__RXDBG_ETH_BYP_EN_MASK 0x01000000L
+#define PB0_GLB_CTRL_REG2__RXDBG_ETH_BYP_EN__SHIFT 0x00000018
+#define PB0_GLB_CTRL_REG2__RXDBG_ETH_BYP_VAL_MASK 0xfe000000L
+#define PB0_GLB_CTRL_REG2__RXDBG_ETH_BYP_VAL__SHIFT 0x00000019
+#define PB0_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF0_SEL_MASK 0x00000060L
+#define PB0_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF0_SEL__SHIFT 0x00000005
+#define PB0_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF1_SEL_MASK 0x00000180L
+#define PB0_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF1_SEL__SHIFT 0x00000007
+#define PB0_GLB_CTRL_REG3__BG_CFG_RO_REG_VREF_SEL_MASK 0x00000600L
+#define PB0_GLB_CTRL_REG3__BG_CFG_RO_REG_VREF_SEL__SHIFT 0x00000009
+#define PB0_GLB_CTRL_REG3__BG_DBG_ANALOG_SEL_MASK 0x0001c000L
+#define PB0_GLB_CTRL_REG3__BG_DBG_ANALOG_SEL__SHIFT 0x0000000e
+#define PB0_GLB_CTRL_REG3__BG_DBG_IREFBYP_EN_MASK 0x00001000L
+#define PB0_GLB_CTRL_REG3__BG_DBG_IREFBYP_EN__SHIFT 0x0000000c
+#define PB0_GLB_CTRL_REG3__BG_DBG_VREFBYP_EN_MASK 0x00000800L
+#define PB0_GLB_CTRL_REG3__BG_DBG_VREFBYP_EN__SHIFT 0x0000000b
+#define PB0_GLB_CTRL_REG3__DBG_DLL_CLK_SEL_MASK 0x001c0000L
+#define PB0_GLB_CTRL_REG3__DBG_DLL_CLK_SEL__SHIFT 0x00000012
+#define PB0_GLB_CTRL_REG3__DBG_RXLEQ_DCATTN_BYP_OVR_DISABLE_MASK 0x80000000L
+#define PB0_GLB_CTRL_REG3__DBG_RXLEQ_DCATTN_BYP_OVR_DISABLE__SHIFT 0x0000001f
+#define PB0_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_EN_MASK 0x00400000L
+#define PB0_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_EN__SHIFT 0x00000016
+#define PB0_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_VAL_MASK 0x07800000L
+#define PB0_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_VAL__SHIFT 0x00000017
+#define PB0_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_EN_MASK 0x08000000L
+#define PB0_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_EN__SHIFT 0x0000001b
+#define PB0_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_VAL_MASK 0x70000000L
+#define PB0_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_VAL__SHIFT 0x0000001c
+#define PB0_GLB_CTRL_REG3__PLL_DISPCLK_CMOS_SEL_MASK 0x00200000L
+#define PB0_GLB_CTRL_REG3__PLL_DISPCLK_CMOS_SEL__SHIFT 0x00000015
+#define PB0_GLB_CTRL_REG3__RXDBG_SEL_MASK 0x0000001fL
+#define PB0_GLB_CTRL_REG3__RXDBG_SEL__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG4__DBG_RXAPU_EXEC_MASK 0x03c00000L
+#define PB0_GLB_CTRL_REG4__DBG_RXAPU_EXEC__SHIFT 0x00000016
+#define PB0_GLB_CTRL_REG4__DBG_RXAPU_INST_MASK 0x0000ffffL
+#define PB0_GLB_CTRL_REG4__DBG_RXAPU_INST__SHIFT 0x00000000
+#define PB0_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_EN_MASK 0x00040000L
+#define PB0_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_EN__SHIFT 0x00000012
+#define PB0_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_VAL_MASK 0x00030000L
+#define PB0_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_VAL__SHIFT 0x00000010
+#define PB0_GLB_CTRL_REG4__DBG_RXDLL_VREG_REF_SEL_MASK 0x04000000L
+#define PB0_GLB_CTRL_REG4__DBG_RXDLL_VREG_REF_SEL__SHIFT 0x0000001a
+#define PB0_GLB_CTRL_REG4__DBG_RXRDATA_GATING_DISABLE_MASK 0x10000000L
+#define PB0_GLB_CTRL_REG4__DBG_RXRDATA_GATING_DISABLE__SHIFT 0x0000001c
+#define PB0_GLB_CTRL_REG4__PWRGOOD_OVRD_MASK 0x08000000L
+#define PB0_GLB_CTRL_REG4__PWRGOOD_OVRD__SHIFT 0x0000001b
+#define PB0_GLB_CTRL_REG5__DBG_RXAPU_MODE_MASK 0x000000ffL
+#define PB0_GLB_CTRL_REG5__DBG_RXAPU_MODE__SHIFT 0x00000000
+#define PB0_GLB_OVRD_REG0__TXPDTERM_VAL_OVRD_VAL_MASK 0x0000ffffL
+#define PB0_GLB_OVRD_REG0__TXPDTERM_VAL_OVRD_VAL__SHIFT 0x00000000
+#define PB0_GLB_OVRD_REG0__TXPUTERM_VAL_OVRD_VAL_MASK 0xffff0000L
+#define PB0_GLB_OVRD_REG0__TXPUTERM_VAL_OVRD_VAL__SHIFT 0x00000010
+#define PB0_GLB_OVRD_REG1__RXTERM_VAL_OVRD_EN_MASK 0x00008000L
+#define PB0_GLB_OVRD_REG1__RXTERM_VAL_OVRD_EN__SHIFT 0x0000000f
+#define PB0_GLB_OVRD_REG1__RXTERM_VAL_OVRD_VAL_MASK 0xffff0000L
+#define PB0_GLB_OVRD_REG1__RXTERM_VAL_OVRD_VAL__SHIFT 0x00000010
+#define PB0_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_EN_MASK 0x00000004L
+#define PB0_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_EN__SHIFT 0x00000002
+#define PB0_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_VAL_MASK 0x00000008L
+#define PB0_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_VAL__SHIFT 0x00000003
+#define PB0_GLB_OVRD_REG1__TXPDTERM_VAL_OVRD_EN_MASK 0x00000001L
+#define PB0_GLB_OVRD_REG1__TXPDTERM_VAL_OVRD_EN__SHIFT 0x00000000
+#define PB0_GLB_OVRD_REG1__TXPUTERM_VAL_OVRD_EN_MASK 0x00000002L
+#define PB0_GLB_OVRD_REG1__TXPUTERM_VAL_OVRD_EN__SHIFT 0x00000001
+#define PB0_GLB_OVRD_REG2__BG_PWRON_OVRD_EN_MASK 0x00000001L
+#define PB0_GLB_OVRD_REG2__BG_PWRON_OVRD_EN__SHIFT 0x00000000
+#define PB0_GLB_OVRD_REG2__BG_PWRON_OVRD_VAL_MASK 0x00000002L
+#define PB0_GLB_OVRD_REG2__BG_PWRON_OVRD_VAL__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_IMPCAL_ACTIVE_SCI_UPDT_MASK 0x00000010L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IGNR_IMPCAL_ACTIVE_SCI_UPDT__SHIFT 0x00000004
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IMPCAL_ACTIVE_MASK 0x00100000L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__IMPCAL_ACTIVE__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG0__RXIMP_MASK 0x000f0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__RXIMP__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG0__TXNIMP_MASK 0x00000f00L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__TXNIMP__SHIFT 0x00000008
+#define PB0_GLB_SCI_STAT_OVRD_REG0__TXPIMP_MASK 0x0000f000L
+#define PB0_GLB_SCI_STAT_OVRD_REG0__TXPIMP__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_0_MASK 0x00001000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_0__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_1_MASK 0x00002000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_1__SHIFT 0x0000000d
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_2_MASK 0x00004000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_2__SHIFT 0x0000000e
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_3_MASK 0x00008000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_3__SHIFT 0x0000000f
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_0_MASK 0x000c0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_0__SHIFT 0x00000012
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_1_MASK 0x00c00000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_1__SHIFT 0x00000016
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_2_MASK 0x0c000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_2__SHIFT 0x0000001a
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_3_MASK 0xc0000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__FREQDIV_3__SHIFT 0x0000001e
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_DLL_LOCK_SCI_UPDT_L0T3_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_DLL_LOCK_SCI_UPDT_L0T3__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_FREQDIV_SCI_UPDT_L0T3_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_FREQDIV_SCI_UPDT_L0T3__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_MODE_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__IGNR_MODE_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_0_MASK 0x00030000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_0__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_1_MASK 0x00300000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_1__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_2_MASK 0x03000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_2__SHIFT 0x00000018
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_3_MASK 0x30000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG1__MODE_3__SHIFT 0x0000001c
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_4_MASK 0x00001000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_4__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_5_MASK 0x00002000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_5__SHIFT 0x0000000d
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_6_MASK 0x00004000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_6__SHIFT 0x0000000e
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_7_MASK 0x00008000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_7__SHIFT 0x0000000f
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_4_MASK 0x000c0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_4__SHIFT 0x00000012
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_5_MASK 0x00c00000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_5__SHIFT 0x00000016
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_6_MASK 0x0c000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_6__SHIFT 0x0000001a
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_7_MASK 0xc0000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__FREQDIV_7__SHIFT 0x0000001e
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_DLL_LOCK_SCI_UPDT_L4T7_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_DLL_LOCK_SCI_UPDT_L4T7__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_FREQDIV_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_FREQDIV_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_MODE_SCI_UPDT_L4T7_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__IGNR_MODE_SCI_UPDT_L4T7__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_4_MASK 0x00030000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_4__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_5_MASK 0x00300000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_5__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_6_MASK 0x03000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_6__SHIFT 0x00000018
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_7_MASK 0x30000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG2__MODE_7__SHIFT 0x0000001c
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_10_MASK 0x00004000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_10__SHIFT 0x0000000e
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_11_MASK 0x00008000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_11__SHIFT 0x0000000f
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_8_MASK 0x00001000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_8__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_9_MASK 0x00002000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_9__SHIFT 0x0000000d
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_10_MASK 0x0c000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_10__SHIFT 0x0000001a
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_11_MASK 0xc0000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_11__SHIFT 0x0000001e
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_8_MASK 0x000c0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_8__SHIFT 0x00000012
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_9_MASK 0x00c00000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__FREQDIV_9__SHIFT 0x00000016
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_DLL_LOCK_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_DLL_LOCK_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_FREQDIV_SCI_UPDT_L8T11_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_FREQDIV_SCI_UPDT_L8T11__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_MODE_SCI_UPDT_L8T11_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__IGNR_MODE_SCI_UPDT_L8T11__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_10_MASK 0x03000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_10__SHIFT 0x00000018
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_11_MASK 0x30000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_11__SHIFT 0x0000001c
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_8_MASK 0x00030000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_8__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_9_MASK 0x00300000L
+#define PB0_GLB_SCI_STAT_OVRD_REG3__MODE_9__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_12_MASK 0x00001000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_12__SHIFT 0x0000000c
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_13_MASK 0x00002000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_13__SHIFT 0x0000000d
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_14_MASK 0x00004000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_14__SHIFT 0x0000000e
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_15_MASK 0x00008000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_15__SHIFT 0x0000000f
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_12_MASK 0x000c0000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_12__SHIFT 0x00000012
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_13_MASK 0x00c00000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_13__SHIFT 0x00000016
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_14_MASK 0x0c000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_14__SHIFT 0x0000001a
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_15_MASK 0xc0000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__FREQDIV_15__SHIFT 0x0000001e
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_DLL_LOCK_SCI_UPDT_L12T15_MASK 0x00000004L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_DLL_LOCK_SCI_UPDT_L12T15__SHIFT 0x00000002
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_FREQDIV_SCI_UPDT_L12T15_MASK 0x00000002L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_FREQDIV_SCI_UPDT_L12T15__SHIFT 0x00000001
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_MODE_SCI_UPDT_L12T15_MASK 0x00000001L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__IGNR_MODE_SCI_UPDT_L12T15__SHIFT 0x00000000
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_12_MASK 0x00030000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_12__SHIFT 0x00000010
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_13_MASK 0x00300000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_13__SHIFT 0x00000014
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_14_MASK 0x03000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_14__SHIFT 0x00000018
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_15_MASK 0x30000000L
+#define PB0_GLB_SCI_STAT_OVRD_REG4__MODE_15__SHIFT 0x0000001c
+#define PB0_HW_DEBUG__PB0_HW_00_DEBUG_MASK 0x00000001L
+#define PB0_HW_DEBUG__PB0_HW_00_DEBUG__SHIFT 0x00000000
+#define PB0_HW_DEBUG__PB0_HW_01_DEBUG_MASK 0x00000002L
+#define PB0_HW_DEBUG__PB0_HW_01_DEBUG__SHIFT 0x00000001
+#define PB0_HW_DEBUG__PB0_HW_02_DEBUG_MASK 0x00000004L
+#define PB0_HW_DEBUG__PB0_HW_02_DEBUG__SHIFT 0x00000002
+#define PB0_HW_DEBUG__PB0_HW_03_DEBUG_MASK 0x00000008L
+#define PB0_HW_DEBUG__PB0_HW_03_DEBUG__SHIFT 0x00000003
+#define PB0_HW_DEBUG__PB0_HW_04_DEBUG_MASK 0x00000010L
+#define PB0_HW_DEBUG__PB0_HW_04_DEBUG__SHIFT 0x00000004
+#define PB0_HW_DEBUG__PB0_HW_05_DEBUG_MASK 0x00000020L
+#define PB0_HW_DEBUG__PB0_HW_05_DEBUG__SHIFT 0x00000005
+#define PB0_HW_DEBUG__PB0_HW_06_DEBUG_MASK 0x00000040L
+#define PB0_HW_DEBUG__PB0_HW_06_DEBUG__SHIFT 0x00000006
+#define PB0_HW_DEBUG__PB0_HW_07_DEBUG_MASK 0x00000080L
+#define PB0_HW_DEBUG__PB0_HW_07_DEBUG__SHIFT 0x00000007
+#define PB0_HW_DEBUG__PB0_HW_08_DEBUG_MASK 0x00000100L
+#define PB0_HW_DEBUG__PB0_HW_08_DEBUG__SHIFT 0x00000008
+#define PB0_HW_DEBUG__PB0_HW_09_DEBUG_MASK 0x00000200L
+#define PB0_HW_DEBUG__PB0_HW_09_DEBUG__SHIFT 0x00000009
+#define PB0_HW_DEBUG__PB0_HW_10_DEBUG_MASK 0x00000400L
+#define PB0_HW_DEBUG__PB0_HW_10_DEBUG__SHIFT 0x0000000a
+#define PB0_HW_DEBUG__PB0_HW_11_DEBUG_MASK 0x00000800L
+#define PB0_HW_DEBUG__PB0_HW_11_DEBUG__SHIFT 0x0000000b
+#define PB0_HW_DEBUG__PB0_HW_12_DEBUG_MASK 0x00001000L
+#define PB0_HW_DEBUG__PB0_HW_12_DEBUG__SHIFT 0x0000000c
+#define PB0_HW_DEBUG__PB0_HW_13_DEBUG_MASK 0x00002000L
+#define PB0_HW_DEBUG__PB0_HW_13_DEBUG__SHIFT 0x0000000d
+#define PB0_HW_DEBUG__PB0_HW_14_DEBUG_MASK 0x00004000L
+#define PB0_HW_DEBUG__PB0_HW_14_DEBUG__SHIFT 0x0000000e
+#define PB0_HW_DEBUG__PB0_HW_15_DEBUG_MASK 0x00008000L
+#define PB0_HW_DEBUG__PB0_HW_15_DEBUG__SHIFT 0x0000000f
+#define PB0_HW_DEBUG__PB0_HW_16_DEBUG_MASK 0x00010000L
+#define PB0_HW_DEBUG__PB0_HW_16_DEBUG__SHIFT 0x00000010
+#define PB0_HW_DEBUG__PB0_HW_17_DEBUG_MASK 0x00020000L
+#define PB0_HW_DEBUG__PB0_HW_17_DEBUG__SHIFT 0x00000011
+#define PB0_HW_DEBUG__PB0_HW_18_DEBUG_MASK 0x00040000L
+#define PB0_HW_DEBUG__PB0_HW_18_DEBUG__SHIFT 0x00000012
+#define PB0_HW_DEBUG__PB0_HW_19_DEBUG_MASK 0x00080000L
+#define PB0_HW_DEBUG__PB0_HW_19_DEBUG__SHIFT 0x00000013
+#define PB0_HW_DEBUG__PB0_HW_20_DEBUG_MASK 0x00100000L
+#define PB0_HW_DEBUG__PB0_HW_20_DEBUG__SHIFT 0x00000014
+#define PB0_HW_DEBUG__PB0_HW_21_DEBUG_MASK 0x00200000L
+#define PB0_HW_DEBUG__PB0_HW_21_DEBUG__SHIFT 0x00000015
+#define PB0_HW_DEBUG__PB0_HW_22_DEBUG_MASK 0x00400000L
+#define PB0_HW_DEBUG__PB0_HW_22_DEBUG__SHIFT 0x00000016
+#define PB0_HW_DEBUG__PB0_HW_23_DEBUG_MASK 0x00800000L
+#define PB0_HW_DEBUG__PB0_HW_23_DEBUG__SHIFT 0x00000017
+#define PB0_HW_DEBUG__PB0_HW_24_DEBUG_MASK 0x01000000L
+#define PB0_HW_DEBUG__PB0_HW_24_DEBUG__SHIFT 0x00000018
+#define PB0_HW_DEBUG__PB0_HW_25_DEBUG_MASK 0x02000000L
+#define PB0_HW_DEBUG__PB0_HW_25_DEBUG__SHIFT 0x00000019
+#define PB0_HW_DEBUG__PB0_HW_26_DEBUG_MASK 0x04000000L
+#define PB0_HW_DEBUG__PB0_HW_26_DEBUG__SHIFT 0x0000001a
+#define PB0_HW_DEBUG__PB0_HW_27_DEBUG_MASK 0x08000000L
+#define PB0_HW_DEBUG__PB0_HW_27_DEBUG__SHIFT 0x0000001b
+#define PB0_HW_DEBUG__PB0_HW_28_DEBUG_MASK 0x10000000L
+#define PB0_HW_DEBUG__PB0_HW_28_DEBUG__SHIFT 0x0000001c
+#define PB0_HW_DEBUG__PB0_HW_29_DEBUG_MASK 0x20000000L
+#define PB0_HW_DEBUG__PB0_HW_29_DEBUG__SHIFT 0x0000001d
+#define PB0_HW_DEBUG__PB0_HW_30_DEBUG_MASK 0x40000000L
+#define PB0_HW_DEBUG__PB0_HW_30_DEBUG__SHIFT 0x0000001e
+#define PB0_HW_DEBUG__PB0_HW_31_DEBUG_MASK 0x80000000L
+#define PB0_HW_DEBUG__PB0_HW_31_DEBUG__SHIFT 0x0000001f
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_EN_MASK 0x00000080L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_EN__SHIFT 0x00000007
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_0_MASK 0x00000100L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_0__SHIFT 0x00000008
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_10_MASK 0x00040000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_10__SHIFT 0x00000012
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_11_MASK 0x00080000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_11__SHIFT 0x00000013
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_12_MASK 0x00100000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_12__SHIFT 0x00000014
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_13_MASK 0x00200000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_13__SHIFT 0x00000015
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_14_MASK 0x00400000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_14__SHIFT 0x00000016
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_15_MASK 0x00800000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_15__SHIFT 0x00000017
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_1_MASK 0x00000200L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_1__SHIFT 0x00000009
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_2_MASK 0x00000400L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_2__SHIFT 0x0000000a
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_3_MASK 0x00000800L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_3__SHIFT 0x0000000b
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_4_MASK 0x00001000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_4__SHIFT 0x0000000c
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_5_MASK 0x00002000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_5__SHIFT 0x0000000d
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_6_MASK 0x00004000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_6__SHIFT 0x0000000e
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_7_MASK 0x00008000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_7__SHIFT 0x0000000f
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_8_MASK 0x00010000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_8__SHIFT 0x00000010
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_9_MASK 0x00020000L
+#define PB0_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_9__SHIFT 0x00000011
+#define PB0_PIF_CNTL2__RXDETECT_SAMPL_TIME_MASK 0x00000006L
+#define PB0_PIF_CNTL2__RXDETECT_SAMPL_TIME__SHIFT 0x00000001
+#define PB0_PIF_CNTL2__RXPHYSTATUS_DELAY_MASK 0x07000000L
+#define PB0_PIF_CNTL2__RXPHYSTATUS_DELAY__SHIFT 0x00000018
+#define PB0_PIF_CNTL__DA_FIFO_RESET_0_MASK 0x00000002L
+#define PB0_PIF_CNTL__DA_FIFO_RESET_0__SHIFT 0x00000001
+#define PB0_PIF_CNTL__DA_FIFO_RESET_1_MASK 0x00000020L
+#define PB0_PIF_CNTL__DA_FIFO_RESET_1__SHIFT 0x00000005
+#define PB0_PIF_CNTL__DA_FIFO_RESET_2_MASK 0x00000200L
+#define PB0_PIF_CNTL__DA_FIFO_RESET_2__SHIFT 0x00000009
+#define PB0_PIF_CNTL__DA_FIFO_RESET_3_MASK 0x00002000L
+#define PB0_PIF_CNTL__DA_FIFO_RESET_3__SHIFT 0x0000000d
+#define PB0_PIF_CNTL__DIVINIT_MODE_MASK 0x00000100L
+#define PB0_PIF_CNTL__DIVINIT_MODE__SHIFT 0x00000008
+#define PB0_PIF_CNTL__EI_CYCLE_OFF_TIME_MASK 0x00700000L
+#define PB0_PIF_CNTL__EI_CYCLE_OFF_TIME__SHIFT 0x00000014
+#define PB0_PIF_CNTL__EI_DET_CYCLE_MODE_MASK 0x00000010L
+#define PB0_PIF_CNTL__EI_DET_CYCLE_MODE__SHIFT 0x00000004
+#define PB0_PIF_CNTL__EXIT_L0S_INIT_DIS_MASK 0x00800000L
+#define PB0_PIF_CNTL__EXIT_L0S_INIT_DIS__SHIFT 0x00000017
+#define PB0_PIF_CNTL__EXTEND_WAIT_FOR_RAMPUP_MASK 0x10000000L
+#define PB0_PIF_CNTL__EXTEND_WAIT_FOR_RAMPUP__SHIFT 0x0000001c
+#define PB0_PIF_CNTL__IGNORE_TxDataValid_EP_DIS_MASK 0x20000000L
+#define PB0_PIF_CNTL__IGNORE_TxDataValid_EP_DIS__SHIFT 0x0000001d
+#define PB0_PIF_CNTL__LS2_EXIT_TIME_MASK 0x000e0000L
+#define PB0_PIF_CNTL__LS2_EXIT_TIME__SHIFT 0x00000011
+#define PB0_PIF_CNTL__PHYCMD_CR_EN_MODE_MASK 0x00000008L
+#define PB0_PIF_CNTL__PHYCMD_CR_EN_MODE__SHIFT 0x00000003
+#define PB0_PIF_CNTL__PHY_CR_EN_MODE_MASK 0x00000004L
+#define PB0_PIF_CNTL__PHY_CR_EN_MODE__SHIFT 0x00000002
+#define PB0_PIF_CNTL__PLL_BINDING_ENABLE_MASK 0x00000400L
+#define PB0_PIF_CNTL__PLL_BINDING_ENABLE__SHIFT 0x0000000a
+#define PB0_PIF_CNTL__RXDETECT_FIFO_RESET_MODE_MASK 0x00000040L
+#define PB0_PIF_CNTL__RXDETECT_FIFO_RESET_MODE__SHIFT 0x00000006
+#define PB0_PIF_CNTL__RXDETECT_TX_PWR_MODE_MASK 0x00000080L
+#define PB0_PIF_CNTL__RXDETECT_TX_PWR_MODE__SHIFT 0x00000007
+#define PB0_PIF_CNTL__RXEN_GATER_MASK 0x0f000000L
+#define PB0_PIF_CNTL__RXEN_GATER__SHIFT 0x00000018
+#define PB0_PIF_CNTL__SC_CALIB_DONE_CNTL_MASK 0x00000800L
+#define PB0_PIF_CNTL__SC_CALIB_DONE_CNTL__SHIFT 0x0000000b
+#define PB0_PIF_CNTL__SERIAL_CFG_ENABLE_MASK 0x00000001L
+#define PB0_PIF_CNTL__SERIAL_CFG_ENABLE__SHIFT 0x00000000
+#define PB0_PIF_CNTL__TXGND_TIME_MASK 0x00010000L
+#define PB0_PIF_CNTL__TXGND_TIME__SHIFT 0x00000010
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_00_DEBUG_MASK 0x00000001L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_00_DEBUG__SHIFT 0x00000000
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_01_DEBUG_MASK 0x00000002L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_01_DEBUG__SHIFT 0x00000001
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_02_DEBUG_MASK 0x00000004L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_02_DEBUG__SHIFT 0x00000002
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_03_DEBUG_MASK 0x00000008L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_03_DEBUG__SHIFT 0x00000003
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_04_DEBUG_MASK 0x00000010L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_04_DEBUG__SHIFT 0x00000004
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_05_DEBUG_MASK 0x00000020L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_05_DEBUG__SHIFT 0x00000005
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_06_DEBUG_MASK 0x00000040L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_06_DEBUG__SHIFT 0x00000006
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_07_DEBUG_MASK 0x00000080L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_07_DEBUG__SHIFT 0x00000007
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_08_DEBUG_MASK 0x00000100L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_08_DEBUG__SHIFT 0x00000008
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_09_DEBUG_MASK 0x00000200L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_09_DEBUG__SHIFT 0x00000009
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_10_DEBUG_MASK 0x00000400L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_10_DEBUG__SHIFT 0x0000000a
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_11_DEBUG_MASK 0x00000800L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_11_DEBUG__SHIFT 0x0000000b
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_12_DEBUG_MASK 0x00001000L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_12_DEBUG__SHIFT 0x0000000c
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_13_DEBUG_MASK 0x00002000L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_13_DEBUG__SHIFT 0x0000000d
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_14_DEBUG_MASK 0x00004000L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_14_DEBUG__SHIFT 0x0000000e
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_15_DEBUG_MASK 0x00008000L
+#define PB0_PIF_HW_DEBUG__PB0_PIF_HW_15_DEBUG__SHIFT 0x0000000f
+#define PB0_PIF_PAIRING__MULTI_PIF_MASK 0x02000000L
+#define PB0_PIF_PAIRING__MULTI_PIF__SHIFT 0x00000019
+#define PB0_PIF_PAIRING__X16_LANE_15_0_MASK 0x00100000L
+#define PB0_PIF_PAIRING__X16_LANE_15_0__SHIFT 0x00000014
+#define PB0_PIF_PAIRING__X2_LANE_1_0_MASK 0x00000001L
+#define PB0_PIF_PAIRING__X2_LANE_1_0__SHIFT 0x00000000
+#define PB0_PIF_PAIRING__X2_LANE_11_10_MASK 0x00000020L
+#define PB0_PIF_PAIRING__X2_LANE_11_10__SHIFT 0x00000005
+#define PB0_PIF_PAIRING__X2_LANE_13_12_MASK 0x00000040L
+#define PB0_PIF_PAIRING__X2_LANE_13_12__SHIFT 0x00000006
+#define PB0_PIF_PAIRING__X2_LANE_15_14_MASK 0x00000080L
+#define PB0_PIF_PAIRING__X2_LANE_15_14__SHIFT 0x00000007
+#define PB0_PIF_PAIRING__X2_LANE_3_2_MASK 0x00000002L
+#define PB0_PIF_PAIRING__X2_LANE_3_2__SHIFT 0x00000001
+#define PB0_PIF_PAIRING__X2_LANE_5_4_MASK 0x00000004L
+#define PB0_PIF_PAIRING__X2_LANE_5_4__SHIFT 0x00000002
+#define PB0_PIF_PAIRING__X2_LANE_7_6_MASK 0x00000008L
+#define PB0_PIF_PAIRING__X2_LANE_7_6__SHIFT 0x00000003
+#define PB0_PIF_PAIRING__X2_LANE_9_8_MASK 0x00000010L
+#define PB0_PIF_PAIRING__X2_LANE_9_8__SHIFT 0x00000004
+#define PB0_PIF_PAIRING__X4_LANE_11_8_MASK 0x00000400L
+#define PB0_PIF_PAIRING__X4_LANE_11_8__SHIFT 0x0000000a
+#define PB0_PIF_PAIRING__X4_LANE_15_12_MASK 0x00000800L
+#define PB0_PIF_PAIRING__X4_LANE_15_12__SHIFT 0x0000000b
+#define PB0_PIF_PAIRING__X4_LANE_3_0_MASK 0x00000100L
+#define PB0_PIF_PAIRING__X4_LANE_3_0__SHIFT 0x00000008
+#define PB0_PIF_PAIRING__X4_LANE_7_4_MASK 0x00000200L
+#define PB0_PIF_PAIRING__X4_LANE_7_4__SHIFT 0x00000009
+#define PB0_PIF_PAIRING__X8_LANE_15_8_MASK 0x00020000L
+#define PB0_PIF_PAIRING__X8_LANE_15_8__SHIFT 0x00000011
+#define PB0_PIF_PAIRING__X8_LANE_7_0_MASK 0x00010000L
+#define PB0_PIF_PAIRING__X8_LANE_7_0__SHIFT 0x00000010
+#define PB0_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_EN_0_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_EN_0__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_VAL_0_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_VAL_0__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_EN_0_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_EN_0__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_VAL_0_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_VAL_0__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_EN_0_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_EN_0__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_VAL_0_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_VAL_0__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_EN_0_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_EN_0__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_VAL_0_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_VAL_0__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_EN_0_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_EN_0__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_VAL_0_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_VAL_0__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_EN_10_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_EN_10__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_VAL_10_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_VAL_10__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_EN_10_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_EN_10__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_VAL_10_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_VAL_10__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_EN_10_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_EN_10__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_VAL_10_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_VAL_10__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_EN_10_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_EN_10__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_VAL_10_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_VAL_10__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_EN_10_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_EN_10__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_VAL_10_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_VAL_10__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_EN_11_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_EN_11__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_VAL_11_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_VAL_11__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_EN_11_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_EN_11__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_VAL_11_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_VAL_11__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_EN_11_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_EN_11__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_VAL_11_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_VAL_11__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_EN_11_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_EN_11__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_VAL_11_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_VAL_11__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_EN_11_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_EN_11__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_VAL_11_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_VAL_11__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_EN_12_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_EN_12__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_VAL_12_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_VAL_12__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_EN_12_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_EN_12__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_VAL_12_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_VAL_12__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_EN_12_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_EN_12__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_VAL_12_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_VAL_12__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_EN_12_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_EN_12__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_VAL_12_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_VAL_12__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_EN_12_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_EN_12__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_VAL_12_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_VAL_12__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_EN_13_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_EN_13__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_VAL_13_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_VAL_13__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_EN_13_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_EN_13__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_VAL_13_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_VAL_13__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_EN_13_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_EN_13__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_VAL_13_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_VAL_13__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_EN_13_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_EN_13__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_VAL_13_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_VAL_13__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_EN_13_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_EN_13__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_VAL_13_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_VAL_13__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_EN_14_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_EN_14__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_VAL_14_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_VAL_14__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_EN_14_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_EN_14__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_VAL_14_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_VAL_14__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_EN_14_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_EN_14__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_VAL_14_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_VAL_14__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_EN_14_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_EN_14__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_VAL_14_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_VAL_14__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_EN_14_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_EN_14__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_VAL_14_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_VAL_14__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_EN_15_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_EN_15__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_VAL_15_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_VAL_15__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_EN_15_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_EN_15__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_VAL_15_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_VAL_15__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_EN_15_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_EN_15__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_VAL_15_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_VAL_15__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_EN_15_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_EN_15__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_VAL_15_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_VAL_15__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_EN_15_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_EN_15__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_VAL_15_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_VAL_15__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_EN_1_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_EN_1__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_VAL_1_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_VAL_1__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_EN_1_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_EN_1__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_VAL_1_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_VAL_1__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_EN_1_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_EN_1__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_VAL_1_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_VAL_1__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_EN_1_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_EN_1__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_VAL_1_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_VAL_1__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_EN_1_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_EN_1__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_VAL_1_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_VAL_1__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_EN_2_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_EN_2__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_VAL_2_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_VAL_2__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_EN_2_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_EN_2__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_VAL_2_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_VAL_2__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_EN_2_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_EN_2__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_VAL_2_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_VAL_2__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_EN_2_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_EN_2__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_VAL_2_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_VAL_2__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_EN_2_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_EN_2__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_VAL_2_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_VAL_2__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_EN_3_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_EN_3__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_VAL_3_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_VAL_3__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_EN_3_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_EN_3__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_VAL_3_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_VAL_3__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_EN_3_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_EN_3__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_VAL_3_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_VAL_3__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_EN_3_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_EN_3__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_VAL_3_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_VAL_3__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_EN_3_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_EN_3__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_VAL_3_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_VAL_3__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_EN_4_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_EN_4__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_VAL_4_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_VAL_4__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_EN_4_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_EN_4__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_VAL_4_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_VAL_4__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_EN_4_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_EN_4__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_VAL_4_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_VAL_4__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_EN_4_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_EN_4__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_VAL_4_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_VAL_4__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_EN_4_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_EN_4__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_VAL_4_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_VAL_4__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_EN_5_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_EN_5__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_VAL_5_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_VAL_5__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_EN_5_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_EN_5__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_VAL_5_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_VAL_5__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_EN_5_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_EN_5__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_VAL_5_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_VAL_5__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_EN_5_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_EN_5__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_VAL_5_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_VAL_5__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_EN_5_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_EN_5__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_VAL_5_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_VAL_5__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_EN_6_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_EN_6__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_VAL_6_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_VAL_6__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_EN_6_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_EN_6__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_VAL_6_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_VAL_6__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_EN_6_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_EN_6__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_VAL_6_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_VAL_6__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_EN_6_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_EN_6__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_VAL_6_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_VAL_6__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_EN_6_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_EN_6__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_VAL_6_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_VAL_6__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_EN_7_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_EN_7__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_VAL_7_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_VAL_7__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_EN_7_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_EN_7__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_VAL_7_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_VAL_7__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_EN_7_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_EN_7__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_VAL_7_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_VAL_7__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_EN_7_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_EN_7__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_VAL_7_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_VAL_7__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_EN_7_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_EN_7__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_VAL_7_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_VAL_7__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_EN_8_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_EN_8__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_VAL_8_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_VAL_8__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_EN_8_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_EN_8__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_VAL_8_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_VAL_8__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_EN_8_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_EN_8__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_VAL_8_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_VAL_8__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_EN_8_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_EN_8__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_VAL_8_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_VAL_8__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_EN_8_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_EN_8__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_VAL_8_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_VAL_8__SHIFT 0x0000000b
+#define PB0_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_EN_9_MASK 0x00000100L
+#define PB0_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_EN_9__SHIFT 0x00000008
+#define PB0_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_VAL_9_MASK 0x00000200L
+#define PB0_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_VAL_9__SHIFT 0x00000009
+#define PB0_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_EN_9_MASK 0x00000010L
+#define PB0_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_EN_9__SHIFT 0x00000004
+#define PB0_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_VAL_9_MASK 0x000000e0L
+#define PB0_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_VAL_9__SHIFT 0x00000005
+#define PB0_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_EN_9_MASK 0x00004000L
+#define PB0_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_EN_9__SHIFT 0x0000000e
+#define PB0_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_VAL_9_MASK 0x00038000L
+#define PB0_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_VAL_9__SHIFT 0x0000000f
+#define PB0_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_EN_9_MASK 0x00000001L
+#define PB0_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_EN_9__SHIFT 0x00000000
+#define PB0_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_VAL_9_MASK 0x0000000eL
+#define PB0_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_VAL_9__SHIFT 0x00000001
+#define PB0_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_EN_9_MASK 0x00000400L
+#define PB0_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_EN_9__SHIFT 0x0000000a
+#define PB0_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_VAL_9_MASK 0x00003800L
+#define PB0_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_VAL_9__SHIFT 0x0000000b
+#define PB0_PIF_PWRDOWN_0__FORCE_RXEN_IN_L0s_0_MASK 0x00000008L
+#define PB0_PIF_PWRDOWN_0__FORCE_RXEN_IN_L0s_0__SHIFT 0x00000003
+#define PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK 0x00001c00L
+#define PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT 0x0000000a
+#define PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK 0x00000380L
+#define PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT 0x00000007
+#define PB0_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_EN_0_MASK 0x10000000L
+#define PB0_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_EN_0__SHIFT 0x0000001c
+#define PB0_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_VAL_0_MASK 0xe0000000L
+#define PB0_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_VAL_0__SHIFT 0x0000001d
+#define PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK 0x07000000L
+#define PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0__SHIFT 0x00000018
+#define PB0_PIF_PWRDOWN_0__RX_POWER_STATE_IN_RXS2_0_MASK 0x00000070L
+#define PB0_PIF_PWRDOWN_0__RX_POWER_STATE_IN_RXS2_0__SHIFT 0x00000004
+#define PB0_PIF_PWRDOWN_0__TX2P5CLK_CLOCK_GATING_EN_0_MASK 0x00010000L
+#define PB0_PIF_PWRDOWN_0__TX2P5CLK_CLOCK_GATING_EN_0__SHIFT 0x00000010
+#define PB0_PIF_PWRDOWN_0__TX_POWER_STATE_IN_TXS2_0_MASK 0x00000007L
+#define PB0_PIF_PWRDOWN_0__TX_POWER_STATE_IN_TXS2_0__SHIFT 0x00000000
+#define PB0_PIF_PWRDOWN_1__FORCE_RXEN_IN_L0s_1_MASK 0x00000008L
+#define PB0_PIF_PWRDOWN_1__FORCE_RXEN_IN_L0s_1__SHIFT 0x00000003
+#define PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK 0x00001c00L
+#define PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT 0x0000000a
+#define PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK 0x00000380L
+#define PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT 0x00000007
+#define PB0_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_EN_1_MASK 0x10000000L
+#define PB0_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_EN_1__SHIFT 0x0000001c
+#define PB0_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_VAL_1_MASK 0xe0000000L
+#define PB0_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_VAL_1__SHIFT 0x0000001d
+#define PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK 0x07000000L
+#define PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1__SHIFT 0x00000018
+#define PB0_PIF_PWRDOWN_1__RX_POWER_STATE_IN_RXS2_1_MASK 0x00000070L
+#define PB0_PIF_PWRDOWN_1__RX_POWER_STATE_IN_RXS2_1__SHIFT 0x00000004
+#define PB0_PIF_PWRDOWN_1__TX2P5CLK_CLOCK_GATING_EN_1_MASK 0x00010000L
+#define PB0_PIF_PWRDOWN_1__TX2P5CLK_CLOCK_GATING_EN_1__SHIFT 0x00000010
+#define PB0_PIF_PWRDOWN_1__TX_POWER_STATE_IN_TXS2_1_MASK 0x00000007L
+#define PB0_PIF_PWRDOWN_1__TX_POWER_STATE_IN_TXS2_1__SHIFT 0x00000000
+#define PB0_PIF_PWRDOWN_2__FORCE_RXEN_IN_L0s_2_MASK 0x00000008L
+#define PB0_PIF_PWRDOWN_2__FORCE_RXEN_IN_L0s_2__SHIFT 0x00000003
+#define PB0_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_OFF_2_MASK 0x00001c00L
+#define PB0_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_OFF_2__SHIFT 0x0000000a
+#define PB0_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_TXS2_2_MASK 0x00000380L
+#define PB0_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_TXS2_2__SHIFT 0x00000007
+#define PB0_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_EN_2_MASK 0x10000000L
+#define PB0_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_EN_2__SHIFT 0x0000001c
+#define PB0_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_VAL_2_MASK 0xe0000000L
+#define PB0_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_VAL_2__SHIFT 0x0000001d
+#define PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK 0x07000000L
+#define PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2__SHIFT 0x00000018
+#define PB0_PIF_PWRDOWN_2__RX_POWER_STATE_IN_RXS2_2_MASK 0x00000070L
+#define PB0_PIF_PWRDOWN_2__RX_POWER_STATE_IN_RXS2_2__SHIFT 0x00000004
+#define PB0_PIF_PWRDOWN_2__TX2P5CLK_CLOCK_GATING_EN_2_MASK 0x00010000L
+#define PB0_PIF_PWRDOWN_2__TX2P5CLK_CLOCK_GATING_EN_2__SHIFT 0x00000010
+#define PB0_PIF_PWRDOWN_2__TX_POWER_STATE_IN_TXS2_2_MASK 0x00000007L
+#define PB0_PIF_PWRDOWN_2__TX_POWER_STATE_IN_TXS2_2__SHIFT 0x00000000
+#define PB0_PIF_PWRDOWN_3__FORCE_RXEN_IN_L0s_3_MASK 0x00000008L
+#define PB0_PIF_PWRDOWN_3__FORCE_RXEN_IN_L0s_3__SHIFT 0x00000003
+#define PB0_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_OFF_3_MASK 0x00001c00L
+#define PB0_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_OFF_3__SHIFT 0x0000000a
+#define PB0_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_TXS2_3_MASK 0x00000380L
+#define PB0_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_TXS2_3__SHIFT 0x00000007
+#define PB0_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_EN_3_MASK 0x10000000L
+#define PB0_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_EN_3__SHIFT 0x0000001c
+#define PB0_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_VAL_3_MASK 0xe0000000L
+#define PB0_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_VAL_3__SHIFT 0x0000001d
+#define PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK 0x07000000L
+#define PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3__SHIFT 0x00000018
+#define PB0_PIF_PWRDOWN_3__RX_POWER_STATE_IN_RXS2_3_MASK 0x00000070L
+#define PB0_PIF_PWRDOWN_3__RX_POWER_STATE_IN_RXS2_3__SHIFT 0x00000004
+#define PB0_PIF_PWRDOWN_3__TX2P5CLK_CLOCK_GATING_EN_3_MASK 0x00010000L
+#define PB0_PIF_PWRDOWN_3__TX2P5CLK_CLOCK_GATING_EN_3__SHIFT 0x00000010
+#define PB0_PIF_PWRDOWN_3__TX_POWER_STATE_IN_TXS2_3_MASK 0x00000007L
+#define PB0_PIF_PWRDOWN_3__TX_POWER_STATE_IN_TXS2_3__SHIFT 0x00000000
+#define PB0_PIF_SC_CTL__SC_CALIBRATION_MASK 0x00000001L
+#define PB0_PIF_SC_CTL__SC_CALIBRATION__SHIFT 0x00000000
+#define PB0_PIF_SC_CTL__SC_ENTER_L1_FROM_L0_MASK 0x00000020L
+#define PB0_PIF_SC_CTL__SC_ENTER_L1_FROM_L0__SHIFT 0x00000005
+#define PB0_PIF_SC_CTL__SC_ENTER_L1_FROM_L0S_MASK 0x00000010L
+#define PB0_PIF_SC_CTL__SC_ENTER_L1_FROM_L0S__SHIFT 0x00000004
+#define PB0_PIF_SC_CTL__SC_EXIT_L1_TO_L0_MASK 0x00000008L
+#define PB0_PIF_SC_CTL__SC_EXIT_L1_TO_L0__SHIFT 0x00000003
+#define PB0_PIF_SC_CTL__SC_EXIT_L1_TO_L0S_MASK 0x00000004L
+#define PB0_PIF_SC_CTL__SC_EXIT_L1_TO_L0S__SHIFT 0x00000002
+#define PB0_PIF_SC_CTL__SC_LANE_0_RESUME_MASK 0x00010000L
+#define PB0_PIF_SC_CTL__SC_LANE_0_RESUME__SHIFT 0x00000010
+#define PB0_PIF_SC_CTL__SC_LANE_10_RESUME_MASK 0x04000000L
+#define PB0_PIF_SC_CTL__SC_LANE_10_RESUME__SHIFT 0x0000001a
+#define PB0_PIF_SC_CTL__SC_LANE_11_RESUME_MASK 0x08000000L
+#define PB0_PIF_SC_CTL__SC_LANE_11_RESUME__SHIFT 0x0000001b
+#define PB0_PIF_SC_CTL__SC_LANE_12_RESUME_MASK 0x10000000L
+#define PB0_PIF_SC_CTL__SC_LANE_12_RESUME__SHIFT 0x0000001c
+#define PB0_PIF_SC_CTL__SC_LANE_13_RESUME_MASK 0x20000000L
+#define PB0_PIF_SC_CTL__SC_LANE_13_RESUME__SHIFT 0x0000001d
+#define PB0_PIF_SC_CTL__SC_LANE_14_RESUME_MASK 0x40000000L
+#define PB0_PIF_SC_CTL__SC_LANE_14_RESUME__SHIFT 0x0000001e
+#define PB0_PIF_SC_CTL__SC_LANE_15_RESUME_MASK 0x80000000L
+#define PB0_PIF_SC_CTL__SC_LANE_15_RESUME__SHIFT 0x0000001f
+#define PB0_PIF_SC_CTL__SC_LANE_1_RESUME_MASK 0x00020000L
+#define PB0_PIF_SC_CTL__SC_LANE_1_RESUME__SHIFT 0x00000011
+#define PB0_PIF_SC_CTL__SC_LANE_2_RESUME_MASK 0x00040000L
+#define PB0_PIF_SC_CTL__SC_LANE_2_RESUME__SHIFT 0x00000012
+#define PB0_PIF_SC_CTL__SC_LANE_3_RESUME_MASK 0x00080000L
+#define PB0_PIF_SC_CTL__SC_LANE_3_RESUME__SHIFT 0x00000013
+#define PB0_PIF_SC_CTL__SC_LANE_4_RESUME_MASK 0x00100000L
+#define PB0_PIF_SC_CTL__SC_LANE_4_RESUME__SHIFT 0x00000014
+#define PB0_PIF_SC_CTL__SC_LANE_5_RESUME_MASK 0x00200000L
+#define PB0_PIF_SC_CTL__SC_LANE_5_RESUME__SHIFT 0x00000015
+#define PB0_PIF_SC_CTL__SC_LANE_6_RESUME_MASK 0x00400000L
+#define PB0_PIF_SC_CTL__SC_LANE_6_RESUME__SHIFT 0x00000016
+#define PB0_PIF_SC_CTL__SC_LANE_7_RESUME_MASK 0x00800000L
+#define PB0_PIF_SC_CTL__SC_LANE_7_RESUME__SHIFT 0x00000017
+#define PB0_PIF_SC_CTL__SC_LANE_8_RESUME_MASK 0x01000000L
+#define PB0_PIF_SC_CTL__SC_LANE_8_RESUME__SHIFT 0x00000018
+#define PB0_PIF_SC_CTL__SC_LANE_9_RESUME_MASK 0x02000000L
+#define PB0_PIF_SC_CTL__SC_LANE_9_RESUME__SHIFT 0x00000019
+#define PB0_PIF_SC_CTL__SC_PHASE_1_MASK 0x00000100L
+#define PB0_PIF_SC_CTL__SC_PHASE_1__SHIFT 0x00000008
+#define PB0_PIF_SC_CTL__SC_PHASE_2_MASK 0x00000200L
+#define PB0_PIF_SC_CTL__SC_PHASE_2__SHIFT 0x00000009
+#define PB0_PIF_SC_CTL__SC_PHASE_3_MASK 0x00000400L
+#define PB0_PIF_SC_CTL__SC_PHASE_3__SHIFT 0x0000000a
+#define PB0_PIF_SC_CTL__SC_PHASE_4_MASK 0x00000800L
+#define PB0_PIF_SC_CTL__SC_PHASE_4__SHIFT 0x0000000b
+#define PB0_PIF_SC_CTL__SC_PHASE_5_MASK 0x00001000L
+#define PB0_PIF_SC_CTL__SC_PHASE_5__SHIFT 0x0000000c
+#define PB0_PIF_SC_CTL__SC_PHASE_6_MASK 0x00002000L
+#define PB0_PIF_SC_CTL__SC_PHASE_6__SHIFT 0x0000000d
+#define PB0_PIF_SC_CTL__SC_PHASE_7_MASK 0x00004000L
+#define PB0_PIF_SC_CTL__SC_PHASE_7__SHIFT 0x0000000e
+#define PB0_PIF_SC_CTL__SC_PHASE_8_MASK 0x00008000L
+#define PB0_PIF_SC_CTL__SC_PHASE_8__SHIFT 0x0000000f
+#define PB0_PIF_SC_CTL__SC_RXDETECT_MASK 0x00000002L
+#define PB0_PIF_SC_CTL__SC_RXDETECT__SHIFT 0x00000001
+#define PB0_PIF_SC_CTL__SC_SPEED_CHANGE_MASK 0x00000040L
+#define PB0_PIF_SC_CTL__SC_SPEED_CHANGE__SHIFT 0x00000006
+#define PB0_PIF_SCRATCH__PIF_SCRATCH_MASK 0xffffffffL
+#define PB0_PIF_SCRATCH__PIF_SCRATCH__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_0__SEQ_CALIBRATION_0_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_CALIBRATION_0__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0_0_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0_0__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0S_0_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0S_0__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0_0_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0_0__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0S_0_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0S_0__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_0__SEQ_PHASE_0_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_PHASE_0__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_0__SEQ_RXDETECT_0_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_RXDETECT_0__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_0__SEQ_SPEED_CHANGE_0_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_0__SEQ_SPEED_CHANGE_0__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_10__SEQ_CALIBRATION_10_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_CALIBRATION_10__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0_10_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0_10__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0S_10_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0S_10__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0_10_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0_10__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0S_10_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0S_10__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_10__SEQ_PHASE_10_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_PHASE_10__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_10__SEQ_RXDETECT_10_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_RXDETECT_10__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_10__SEQ_SPEED_CHANGE_10_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_10__SEQ_SPEED_CHANGE_10__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_11__SEQ_CALIBRATION_11_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_CALIBRATION_11__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0_11_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0_11__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0S_11_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0S_11__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0_11_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0_11__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0S_11_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0S_11__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_11__SEQ_PHASE_11_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_PHASE_11__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_11__SEQ_RXDETECT_11_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_RXDETECT_11__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_11__SEQ_SPEED_CHANGE_11_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_11__SEQ_SPEED_CHANGE_11__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_12__SEQ_CALIBRATION_12_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_CALIBRATION_12__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0_12_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0_12__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0S_12_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0S_12__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0_12_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0_12__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0S_12_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0S_12__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_12__SEQ_PHASE_12_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_PHASE_12__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_12__SEQ_RXDETECT_12_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_RXDETECT_12__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_12__SEQ_SPEED_CHANGE_12_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_12__SEQ_SPEED_CHANGE_12__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_13__SEQ_CALIBRATION_13_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_CALIBRATION_13__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0_13_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0_13__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0S_13_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0S_13__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0_13_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0_13__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0S_13_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0S_13__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_13__SEQ_PHASE_13_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_PHASE_13__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_13__SEQ_RXDETECT_13_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_RXDETECT_13__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_13__SEQ_SPEED_CHANGE_13_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_13__SEQ_SPEED_CHANGE_13__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_14__SEQ_CALIBRATION_14_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_CALIBRATION_14__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0_14_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0_14__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0S_14_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0S_14__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0_14_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0_14__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0S_14_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0S_14__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_14__SEQ_PHASE_14_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_PHASE_14__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_14__SEQ_RXDETECT_14_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_RXDETECT_14__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_14__SEQ_SPEED_CHANGE_14_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_14__SEQ_SPEED_CHANGE_14__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_15__SEQ_CALIBRATION_15_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_CALIBRATION_15__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0_15_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0_15__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0S_15_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0S_15__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0_15_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0_15__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0S_15_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0S_15__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_15__SEQ_PHASE_15_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_PHASE_15__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_15__SEQ_RXDETECT_15_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_RXDETECT_15__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_15__SEQ_SPEED_CHANGE_15_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_15__SEQ_SPEED_CHANGE_15__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_1__SEQ_CALIBRATION_1_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_CALIBRATION_1__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0_1_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0_1__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0S_1_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0S_1__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0_1_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0_1__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0S_1_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0S_1__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_1__SEQ_PHASE_1_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_PHASE_1__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_1__SEQ_RXDETECT_1_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_RXDETECT_1__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_1__SEQ_SPEED_CHANGE_1_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_1__SEQ_SPEED_CHANGE_1__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_2__SEQ_CALIBRATION_2_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_CALIBRATION_2__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0_2_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0_2__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0S_2_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0S_2__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0_2_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0_2__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0S_2_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0S_2__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_2__SEQ_PHASE_2_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_PHASE_2__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_2__SEQ_RXDETECT_2_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_RXDETECT_2__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_2__SEQ_SPEED_CHANGE_2_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_2__SEQ_SPEED_CHANGE_2__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_3__SEQ_CALIBRATION_3_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_CALIBRATION_3__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0_3_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0_3__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0S_3_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0S_3__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0_3_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0_3__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0S_3_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0S_3__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_3__SEQ_PHASE_3_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_PHASE_3__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_3__SEQ_RXDETECT_3_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_RXDETECT_3__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_3__SEQ_SPEED_CHANGE_3_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_3__SEQ_SPEED_CHANGE_3__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_4__SEQ_CALIBRATION_4_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_CALIBRATION_4__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0_4_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0_4__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0S_4_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0S_4__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0_4_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0_4__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0S_4_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0S_4__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_4__SEQ_PHASE_4_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_PHASE_4__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_4__SEQ_RXDETECT_4_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_RXDETECT_4__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_4__SEQ_SPEED_CHANGE_4_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_4__SEQ_SPEED_CHANGE_4__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_5__SEQ_CALIBRATION_5_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_CALIBRATION_5__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0_5_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0_5__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0S_5_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0S_5__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0_5_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0_5__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0S_5_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0S_5__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_5__SEQ_PHASE_5_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_PHASE_5__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_5__SEQ_RXDETECT_5_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_RXDETECT_5__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_5__SEQ_SPEED_CHANGE_5_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_5__SEQ_SPEED_CHANGE_5__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_6__SEQ_CALIBRATION_6_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_CALIBRATION_6__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0_6_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0_6__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0S_6_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0S_6__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0_6_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0_6__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0S_6_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0S_6__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_6__SEQ_PHASE_6_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_PHASE_6__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_6__SEQ_RXDETECT_6_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_RXDETECT_6__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_6__SEQ_SPEED_CHANGE_6_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_6__SEQ_SPEED_CHANGE_6__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_7__SEQ_CALIBRATION_7_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_CALIBRATION_7__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0_7_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0_7__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0S_7_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0S_7__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0_7_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0_7__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0S_7_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0S_7__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_7__SEQ_PHASE_7_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_PHASE_7__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_7__SEQ_RXDETECT_7_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_RXDETECT_7__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_7__SEQ_SPEED_CHANGE_7_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_7__SEQ_SPEED_CHANGE_7__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_8__SEQ_CALIBRATION_8_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_CALIBRATION_8__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0_8_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0_8__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0S_8_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0S_8__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0_8_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0_8__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0S_8_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0S_8__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_8__SEQ_PHASE_8_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_PHASE_8__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_8__SEQ_RXDETECT_8_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_RXDETECT_8__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_8__SEQ_SPEED_CHANGE_8_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_8__SEQ_SPEED_CHANGE_8__SHIFT 0x00000006
+#define PB0_PIF_SEQ_STATUS_9__SEQ_CALIBRATION_9_MASK 0x00000001L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_CALIBRATION_9__SHIFT 0x00000000
+#define PB0_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0_9_MASK 0x00000020L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0_9__SHIFT 0x00000005
+#define PB0_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0S_9_MASK 0x00000010L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0S_9__SHIFT 0x00000004
+#define PB0_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0_9_MASK 0x00000008L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0_9__SHIFT 0x00000003
+#define PB0_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0S_9_MASK 0x00000004L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0S_9__SHIFT 0x00000002
+#define PB0_PIF_SEQ_STATUS_9__SEQ_PHASE_9_MASK 0x00000700L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_PHASE_9__SHIFT 0x00000008
+#define PB0_PIF_SEQ_STATUS_9__SEQ_RXDETECT_9_MASK 0x00000002L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_RXDETECT_9__SHIFT 0x00000001
+#define PB0_PIF_SEQ_STATUS_9__SEQ_SPEED_CHANGE_9_MASK 0x00000040L
+#define PB0_PIF_SEQ_STATUS_9__SEQ_SPEED_CHANGE_9__SHIFT 0x00000006
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_0_MASK 0x00000001L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_0__SHIFT 0x00000000
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_10_MASK 0x00000400L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_10__SHIFT 0x0000000a
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_11_MASK 0x00000800L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_11__SHIFT 0x0000000b
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_12_MASK 0x00001000L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_12__SHIFT 0x0000000c
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_13_MASK 0x00002000L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_13__SHIFT 0x0000000d
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_14_MASK 0x00004000L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_14__SHIFT 0x0000000e
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_15_MASK 0x00008000L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_15__SHIFT 0x0000000f
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_1_MASK 0x00000002L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_1__SHIFT 0x00000001
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_2_MASK 0x00000004L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_2__SHIFT 0x00000002
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_3_MASK 0x00000008L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_3__SHIFT 0x00000003
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_4_MASK 0x00000010L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_4__SHIFT 0x00000004
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_5_MASK 0x00000020L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_5__SHIFT 0x00000005
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_6_MASK 0x00000040L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_6__SHIFT 0x00000006
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_7_MASK 0x00000080L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_7__SHIFT 0x00000007
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_8_MASK 0x00000100L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_8__SHIFT 0x00000008
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_9_MASK 0x00000200L
+#define PB0_PIF_TXPHYSTATUS__TXPHYSTATUS_9__SHIFT 0x00000009
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_ANALOG_SEL_0_MASK 0x00000003L
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_ANALOG_SEL_0__SHIFT 0x00000000
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_EXT_RESET_EN_0_MASK 0x00000004L
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_EXT_RESET_EN_0__SHIFT 0x00000002
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_VCTL_ADC_EN_0_MASK 0x00000008L
+#define PB0_PLL_LC0_CTRL_REG0__PLL_DBG_LC_VCTL_ADC_EN_0__SHIFT 0x00000003
+#define PB0_PLL_LC0_CTRL_REG0__PLL_TST_LC_USAMPLE_EN_0_MASK 0x00000010L
+#define PB0_PLL_LC0_CTRL_REG0__PLL_TST_LC_USAMPLE_EN_0__SHIFT 0x00000004
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_EN_0_MASK 0x00000008L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_EN_0__SHIFT 0x00000003
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_VAL_0_MASK 0x00000007L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_EN_0_MASK 0x00000080L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_EN_0__SHIFT 0x00000007
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_VAL_0_MASK 0x00000070L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_VAL_0__SHIFT 0x00000004
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_EN_0_MASK 0x00000200L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_EN_0__SHIFT 0x00000009
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_VAL_0_MASK 0x00000100L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_VAL_0__SHIFT 0x00000008
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_EN_0_MASK 0x00040000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_EN_0__SHIFT 0x00000012
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_VAL_0_MASK 0x0003fc00L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_VAL_0__SHIFT 0x0000000a
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_EN_0_MASK 0x10000000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_EN_0__SHIFT 0x0000001c
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_VAL_0_MASK 0x0ff80000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_VAL_0__SHIFT 0x00000013
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_EN_0_MASK 0x80000000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_EN_0__SHIFT 0x0000001f
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_VAL_0_MASK 0x60000000L
+#define PB0_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_VAL_0__SHIFT 0x0000001d
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_EN_0_MASK 0x00000008L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_EN_0__SHIFT 0x00000003
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_VAL_0_MASK 0x00000007L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_EN_0_MASK 0x00040000L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_EN_0__SHIFT 0x00000012
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_VAL_0_MASK 0x0003c000L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_VAL_0__SHIFT 0x0000000e
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_EN_0_MASK 0x00000020L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_EN_0__SHIFT 0x00000005
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_VAL_0_MASK 0x00000010L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_VAL_0__SHIFT 0x00000004
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_EN_0_MASK 0x00000080L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_EN_0__SHIFT 0x00000007
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_VAL_0_MASK 0x00000040L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_VAL_0__SHIFT 0x00000006
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_EN_0_MASK 0x00000200L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_EN_0__SHIFT 0x00000009
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_VAL_0_MASK 0x00000100L
+#define PB0_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_VAL_0__SHIFT 0x00000008
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_ANALOG_SEL_0_MASK 0x00000003L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_ANALOG_SEL_0__SHIFT 0x00000000
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_EXT_RESET_EN_0_MASK 0x00000004L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_EXT_RESET_EN_0__SHIFT 0x00000002
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_LF_CNTRL_0_MASK 0x000007f0L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_LF_CNTRL_0__SHIFT 0x00000004
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_VCTL_ADC_EN_0_MASK 0x00000008L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_DBG_RO_VCTL_ADC_EN_0__SHIFT 0x00000003
+#define PB0_PLL_RO0_CTRL_REG0__PLL_TST_RO_USAMPLE_EN_0_MASK 0x00000800L
+#define PB0_PLL_RO0_CTRL_REG0__PLL_TST_RO_USAMPLE_EN_0__SHIFT 0x0000000b
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_EN_0_MASK 0x00000100L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_EN_0__SHIFT 0x00000008
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_VAL_0_MASK 0x000000ffL
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_EN_0_MASK 0x00001000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_EN_0__SHIFT 0x0000000c
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_VAL_0_MASK 0x00000e00L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_VAL_0__SHIFT 0x00000009
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_EN_0_MASK 0x00004000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_EN_0__SHIFT 0x0000000e
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_VAL_0_MASK 0x00002000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_VAL_0__SHIFT 0x0000000d
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_EN_0_MASK 0x10000000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_EN_0__SHIFT 0x0000001c
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_VAL_0_MASK 0x0fff8000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_VAL_0__SHIFT 0x0000000f
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_EN_0_MASK 0x80000000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_EN_0__SHIFT 0x0000001f
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_VAL_0_MASK 0x40000000L
+#define PB0_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_VAL_0__SHIFT 0x0000001e
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_EN_0_MASK 0x00400000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_EN_0__SHIFT 0x00000016
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_VAL_0_MASK 0x00380000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_VAL_0__SHIFT 0x00000013
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_EN_0_MASK 0x00000020L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_EN_0__SHIFT 0x00000005
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_VAL_0_MASK 0x0000001fL
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_EN_0_MASK 0x00000100L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_EN_0__SHIFT 0x00000008
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_VAL_0_MASK 0x000000c0L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_VAL_0__SHIFT 0x00000006
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_EN_0_MASK 0x00000400L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_EN_0__SHIFT 0x0000000a
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_VAL_0_MASK 0x00000200L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_VAL_0__SHIFT 0x00000009
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_EN_0_MASK 0x00001000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_EN_0__SHIFT 0x0000000c
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_VAL_0_MASK 0x00000800L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_VAL_0__SHIFT 0x0000000b
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_EN_0_MASK 0x00004000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_EN_0__SHIFT 0x0000000e
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_VAL_0_MASK 0x00002000L
+#define PB0_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_VAL_0__SHIFT 0x0000000d
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_FREQMODE_MASK 0x00000300L
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_FREQMODE__SHIFT 0x00000008
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_PLLPWR_MASK 0x00000070L
+#define PB0_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_PLLPWR__SHIFT 0x00000004
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS0_MASK 0x00000200L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS0__SHIFT 0x00000009
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS1_MASK 0x00000400L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS1__SHIFT 0x0000000a
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS2_MASK 0x00000800L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS2__SHIFT 0x0000000b
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_LEFT_EN_GATING_EN_MASK 0x00100000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_LEFT_EN_GATING_EN__SHIFT 0x00000014
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_RIGHT_EN_GATING_EN_MASK 0x00200000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_RIGHT_EN_GATING_EN__SHIFT 0x00000015
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS0_MASK 0x00001000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS0__SHIFT 0x0000000c
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS1_MASK 0x00002000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS1__SHIFT 0x0000000d
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS2_MASK 0x00004000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS2__SHIFT 0x0000000e
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_LEFT_EN_GATING_EN_MASK 0x00400000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_LEFT_EN_GATING_EN__SHIFT 0x00000016
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_RIGHT_EN_GATING_EN_MASK 0x00800000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_RIGHT_EN_GATING_EN__SHIFT 0x00000017
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_PWRON_LUT_ENTRY_LS2_MASK 0x00000100L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_LC_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000008
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS0_MASK 0x00000002L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS0__SHIFT 0x00000001
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS1_MASK 0x00000004L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS1__SHIFT 0x00000002
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS2_MASK 0x00000008L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS2__SHIFT 0x00000003
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_LEFT_EN_GATING_EN_MASK 0x00010000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_LEFT_EN_GATING_EN__SHIFT 0x00000010
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_RIGHT_EN_GATING_EN_MASK 0x00020000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_RIGHT_EN_GATING_EN__SHIFT 0x00000011
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS0_MASK 0x00000010L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS0__SHIFT 0x00000004
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS1_MASK 0x00000020L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS1__SHIFT 0x00000005
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS2_MASK 0x00000040L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS2__SHIFT 0x00000006
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_LEFT_EN_GATING_EN_MASK 0x00040000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_LEFT_EN_GATING_EN__SHIFT 0x00000012
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_RIGHT_EN_GATING_EN_MASK 0x00080000L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_RIGHT_EN_GATING_EN__SHIFT 0x00000013
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_PWRON_LUT_ENTRY_LS2_MASK 0x00000080L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_RO_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000007
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_TST_LOSPDTST_SRC_MASK 0x00000001L
+#define PB0_PLL_RO_GLB_CTRL_REG0__PLL_TST_LOSPDTST_SRC__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN1_MASK 0x000003ffL
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN2_MASK 0x000ffc00L
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN2__SHIFT 0x0000000a
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN3_MASK 0x3ff00000L
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_RST_MODE_MASK 0xc0000000L
+#define PB0_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_RST_MODE__SHIFT 0x0000001e
+#define PB0_RX_GLB_CTRL_REG1__RX_ADAPT_HLD_ASRT_TO_DCLK_EN_MASK 0xc0000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_ADAPT_HLD_ASRT_TO_DCLK_EN__SHIFT 0x0000001e
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN1_MASK 0x0000000fL
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN2_MASK 0x000000f0L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN2__SHIFT 0x00000004
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN3_MASK 0x00000f00L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN3__SHIFT 0x00000008
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN1_MASK 0x0000f000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN1__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN2_MASK 0x000f0000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN2__SHIFT 0x00000010
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN3_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN1_MASK 0x01000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN1__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN2_MASK 0x02000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN2__SHIFT 0x00000019
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN3_MASK 0x04000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN3__SHIFT 0x0000001a
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN1_MASK 0x08000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN1__SHIFT 0x0000001b
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN2_MASK 0x10000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN2__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN3_MASK 0x20000000L
+#define PB0_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN3__SHIFT 0x0000001d
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN1_MASK 0x0000f000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN1__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN2_MASK 0x000f0000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN2__SHIFT 0x00000010
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN3_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN1_MASK 0x03000000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN1__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN2_MASK 0x0c000000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN2__SHIFT 0x0000001a
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN3_MASK 0x30000000L
+#define PB0_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN3__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG2__RX_DCLK_EN_ASRT_TO_ADAPT_HLD_MASK 0xc0000000L
+#define PB0_RX_GLB_CTRL_REG2__RX_DCLK_EN_ASRT_TO_ADAPT_HLD__SHIFT 0x0000001e
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN1_MASK 0x00000001L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN2_MASK 0x00000002L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN2__SHIFT 0x00000001
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN3_MASK 0x00000004L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN3__SHIFT 0x00000002
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN1_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN1__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN2_MASK 0x0f000000L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN2__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN3_MASK 0xf0000000L
+#define PB0_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN3__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN1_MASK 0x00000007L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN2_MASK 0x00000038L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN2__SHIFT 0x00000003
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN3_MASK 0x000001c0L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN3__SHIFT 0x00000006
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN1_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN1__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN2_MASK 0x0f000000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN2__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN3_MASK 0xf0000000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN3__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN1_MASK 0x00000e00L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN1__SHIFT 0x00000009
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN2_MASK 0x00007000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN2__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN3_MASK 0x00038000L
+#define PB0_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN3__SHIFT 0x0000000f
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN1_MASK 0x0000001fL
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN2_MASK 0x000003e0L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN2__SHIFT 0x00000005
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN3_MASK 0x00007c00L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN3__SHIFT 0x0000000a
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN1_MASK 0x00008000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN1__SHIFT 0x0000000f
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN2_MASK 0x00010000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN2__SHIFT 0x00000010
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN3_MASK 0x00020000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN3__SHIFT 0x00000011
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN1_MASK 0x00040000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN1__SHIFT 0x00000012
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN2_MASK 0x00080000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN2__SHIFT 0x00000013
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN3_MASK 0x00100000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN1_MASK 0x08000000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN1__SHIFT 0x0000001b
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN2_MASK 0x10000000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN2__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN3_MASK 0x20000000L
+#define PB0_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN3__SHIFT 0x0000001d
+#define PB0_RX_GLB_CTRL_REG5__RX_FORCE_DLL_RST_RXPWR_LS2OFF_TO_LS0_MASK 0x40000000L
+#define PB0_RX_GLB_CTRL_REG5__RX_FORCE_DLL_RST_RXPWR_LS2OFF_TO_LS0__SHIFT 0x0000001e
+#define PB0_RX_GLB_CTRL_REG6__RX_AUX_PWRON_LUT_ENTRY_LS2_MASK 0x08000000L
+#define PB0_RX_GLB_CTRL_REG6__RX_AUX_PWRON_LUT_ENTRY_LS2__SHIFT 0x0000001b
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN1_MASK 0x0000000fL
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN2_MASK 0x000000f0L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN2__SHIFT 0x00000004
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN3_MASK 0x00000f00L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN3__SHIFT 0x00000008
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN1_MASK 0x0000f000L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN1__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN2_MASK 0x000f0000L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN2__SHIFT 0x00000010
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN3_MASK 0x00f00000L
+#define PB0_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN3__SHIFT 0x00000014
+#define PB0_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS0_CDR_EN_0_MASK 0x01000000L
+#define PB0_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS0_CDR_EN_0__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS2_MASK 0x04000000L
+#define PB0_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS2__SHIFT 0x0000001a
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN1_MASK 0x001c0000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN1__SHIFT 0x00000012
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN2_MASK 0x00e00000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN2__SHIFT 0x00000015
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN3_MASK 0x07000000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN3__SHIFT 0x00000018
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN1_MASK 0x08000000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN1__SHIFT 0x0000001b
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN2_MASK 0x10000000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN2__SHIFT 0x0000001c
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN3_MASK 0x20000000L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN3__SHIFT 0x0000001d
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN1_MASK 0x0000000fL
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN1__SHIFT 0x00000000
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN2_MASK 0x000000f0L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN2__SHIFT 0x00000004
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN3_MASK 0x00000f00L
+#define PB0_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN3__SHIFT 0x00000008
+#define PB0_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS0_CDR_EN_0_MASK 0x00001000L
+#define PB0_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS0_CDR_EN_0__SHIFT 0x0000000c
+#define PB0_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS2_MASK 0x00002000L
+#define PB0_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS2__SHIFT 0x0000000d
+#define PB0_RX_GLB_CTRL_REG7__RX_DLL_PWRON_LUT_ENTRY_LS2_MASK 0x00020000L
+#define PB0_RX_GLB_CTRL_REG7__RX_DLL_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000011
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_EN_MASK 0x80000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_EN__SHIFT 0x0000001f
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_VAL_MASK 0x40000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_VAL__SHIFT 0x0000001e
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_EN_MASK 0x00000002L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_EN__SHIFT 0x00000001
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_VAL_MASK 0x00000001L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_VAL__SHIFT 0x00000000
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_EN_MASK 0x00000008L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_EN__SHIFT 0x00000003
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_VAL_MASK 0x00000004L
+#define PB0_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_VAL__SHIFT 0x00000002
+#define PB0_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_EN_MASK 0x20000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_EN__SHIFT 0x0000001d
+#define PB0_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_VAL_MASK 0x10000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_VAL__SHIFT 0x0000001c
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_EN_MASK 0x00000100L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_EN__SHIFT 0x00000008
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_VAL_MASK 0x000000c0L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_VAL__SHIFT 0x00000006
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_EN_MASK 0x00000400L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_EN__SHIFT 0x0000000a
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_VAL_MASK 0x00000200L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_VAL__SHIFT 0x00000009
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_EN_MASK 0x00001000L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_EN__SHIFT 0x0000000c
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_VAL_MASK 0x00000800L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_VAL__SHIFT 0x0000000b
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_EN_MASK 0x00004000L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_EN__SHIFT 0x0000000e
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_VAL_MASK 0x00002000L
+#define PB0_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_VAL__SHIFT 0x0000000d
+#define PB0_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_EN_MASK 0x00010000L
+#define PB0_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_EN__SHIFT 0x00000010
+#define PB0_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_VAL_MASK 0x00008000L
+#define PB0_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_VAL__SHIFT 0x0000000f
+#define PB0_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_EN_MASK 0x00040000L
+#define PB0_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_EN__SHIFT 0x00000012
+#define PB0_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_VAL_MASK 0x00020000L
+#define PB0_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_VAL__SHIFT 0x00000011
+#define PB0_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_EN_MASK 0x00100000L
+#define PB0_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_EN__SHIFT 0x00000014
+#define PB0_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_VAL_MASK 0x00080000L
+#define PB0_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_VAL__SHIFT 0x00000013
+#define PB0_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_EN_MASK 0x00400000L
+#define PB0_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_EN__SHIFT 0x00000016
+#define PB0_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_VAL_MASK 0x00200000L
+#define PB0_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_VAL__SHIFT 0x00000015
+#define PB0_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_EN_MASK 0x01000000L
+#define PB0_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_EN__SHIFT 0x00000018
+#define PB0_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_VAL_MASK 0x00800000L
+#define PB0_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_VAL__SHIFT 0x00000017
+#define PB0_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_EN_MASK 0x00000002L
+#define PB0_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_EN__SHIFT 0x00000001
+#define PB0_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_VAL_MASK 0x00000001L
+#define PB0_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_VAL__SHIFT 0x00000000
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L0T3_MASK 0x00000010L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L0T3__SHIFT 0x00000004
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L12T15_MASK 0x00000080L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L12T15__SHIFT 0x00000007
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L4T7_MASK 0x00000020L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L4T7__SHIFT 0x00000005
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L8T11_MASK 0x00000040L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L8T11__SHIFT 0x00000006
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L0T3_MASK 0x00001000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L0T3__SHIFT 0x0000000c
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L12T15_MASK 0x00008000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L12T15__SHIFT 0x0000000f
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L4T7_MASK 0x00002000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L4T7__SHIFT 0x0000000d
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L8T11_MASK 0x00004000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L8T11__SHIFT 0x0000000e
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L0T3_MASK 0x00010000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L0T3__SHIFT 0x00000010
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L12T15_MASK 0x00080000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L12T15__SHIFT 0x00000013
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L4T7_MASK 0x00020000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L4T7__SHIFT 0x00000011
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L8T11_MASK 0x00040000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L8T11__SHIFT 0x00000012
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L0T3_MASK 0x00100000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L0T3__SHIFT 0x00000014
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L12T15_MASK 0x00800000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L12T15__SHIFT 0x00000017
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L4T7_MASK 0x00200000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L4T7__SHIFT 0x00000015
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L8T11_MASK 0x00400000L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L8T11__SHIFT 0x00000016
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L0T3_MASK 0x00000100L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L0T3__SHIFT 0x00000008
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L12T15_MASK 0x00000800L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L12T15__SHIFT 0x0000000b
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L4T7_MASK 0x00000200L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L4T7__SHIFT 0x00000009
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L8T11_MASK 0x00000400L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L8T11__SHIFT 0x0000000a
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB0_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB0_RX_LANE0_CTRL_REG0__RX_BACKUP_0_MASK 0x000000ffL
+#define PB0_RX_LANE0_CTRL_REG0__RX_BACKUP_0__SHIFT 0x00000000
+#define PB0_RX_LANE0_CTRL_REG0__RX_CFG_OVR_PWRSF_0_MASK 0x00002000L
+#define PB0_RX_LANE0_CTRL_REG0__RX_CFG_OVR_PWRSF_0__SHIFT 0x0000000d
+#define PB0_RX_LANE0_CTRL_REG0__RX_DBG_ANALOG_SEL_0_MASK 0x00000c00L
+#define PB0_RX_LANE0_CTRL_REG0__RX_DBG_ANALOG_SEL_0__SHIFT 0x0000000a
+#define PB0_RX_LANE0_CTRL_REG0__RX_TST_BSCAN_EN_0_MASK 0x00001000L
+#define PB0_RX_LANE0_CTRL_REG0__RX_TST_BSCAN_EN_0__SHIFT 0x0000000c
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_0_MASK 0x00000008L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_0__SHIFT 0x00000003
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__ENABLEFOM_0_MASK 0x00000080L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__ENABLEFOM_0__SHIFT 0x00000007
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__REQUESTFOM_0_MASK 0x00000100L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__REQUESTFOM_0__SHIFT 0x00000008
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RESPONSEMODE_0_MASK 0x00000200L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RESPONSEMODE_0__SHIFT 0x00000009
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RXPRESETHINT_0_MASK 0x00000070L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RXPRESETHINT_0__SHIFT 0x00000004
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RXPWR_0_MASK 0x00000007L
+#define PB0_RX_LANE0_SCI_STAT_OVRD_REG0__RXPWR_0__SHIFT 0x00000000
+#define PB0_RX_LANE10_CTRL_REG0__RX_BACKUP_10_MASK 0x000000ffL
+#define PB0_RX_LANE10_CTRL_REG0__RX_BACKUP_10__SHIFT 0x00000000
+#define PB0_RX_LANE10_CTRL_REG0__RX_CFG_OVR_PWRSF_10_MASK 0x00002000L
+#define PB0_RX_LANE10_CTRL_REG0__RX_CFG_OVR_PWRSF_10__SHIFT 0x0000000d
+#define PB0_RX_LANE10_CTRL_REG0__RX_DBG_ANALOG_SEL_10_MASK 0x00000c00L
+#define PB0_RX_LANE10_CTRL_REG0__RX_DBG_ANALOG_SEL_10__SHIFT 0x0000000a
+#define PB0_RX_LANE10_CTRL_REG0__RX_TST_BSCAN_EN_10_MASK 0x00001000L
+#define PB0_RX_LANE10_CTRL_REG0__RX_TST_BSCAN_EN_10__SHIFT 0x0000000c
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_10_MASK 0x00000008L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_10__SHIFT 0x00000003
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__ENABLEFOM_10_MASK 0x00000080L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__ENABLEFOM_10__SHIFT 0x00000007
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__REQUESTFOM_10_MASK 0x00000100L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__REQUESTFOM_10__SHIFT 0x00000008
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RESPONSEMODE_10_MASK 0x00000200L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RESPONSEMODE_10__SHIFT 0x00000009
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RXPRESETHINT_10_MASK 0x00000070L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RXPRESETHINT_10__SHIFT 0x00000004
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RXPWR_10_MASK 0x00000007L
+#define PB0_RX_LANE10_SCI_STAT_OVRD_REG0__RXPWR_10__SHIFT 0x00000000
+#define PB0_RX_LANE11_CTRL_REG0__RX_BACKUP_11_MASK 0x000000ffL
+#define PB0_RX_LANE11_CTRL_REG0__RX_BACKUP_11__SHIFT 0x00000000
+#define PB0_RX_LANE11_CTRL_REG0__RX_CFG_OVR_PWRSF_11_MASK 0x00002000L
+#define PB0_RX_LANE11_CTRL_REG0__RX_CFG_OVR_PWRSF_11__SHIFT 0x0000000d
+#define PB0_RX_LANE11_CTRL_REG0__RX_DBG_ANALOG_SEL_11_MASK 0x00000c00L
+#define PB0_RX_LANE11_CTRL_REG0__RX_DBG_ANALOG_SEL_11__SHIFT 0x0000000a
+#define PB0_RX_LANE11_CTRL_REG0__RX_TST_BSCAN_EN_11_MASK 0x00001000L
+#define PB0_RX_LANE11_CTRL_REG0__RX_TST_BSCAN_EN_11__SHIFT 0x0000000c
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_11_MASK 0x00000008L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_11__SHIFT 0x00000003
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__ENABLEFOM_11_MASK 0x00000080L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__ENABLEFOM_11__SHIFT 0x00000007
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__REQUESTFOM_11_MASK 0x00000100L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__REQUESTFOM_11__SHIFT 0x00000008
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RESPONSEMODE_11_MASK 0x00000200L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RESPONSEMODE_11__SHIFT 0x00000009
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RXPRESETHINT_11_MASK 0x00000070L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RXPRESETHINT_11__SHIFT 0x00000004
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RXPWR_11_MASK 0x00000007L
+#define PB0_RX_LANE11_SCI_STAT_OVRD_REG0__RXPWR_11__SHIFT 0x00000000
+#define PB0_RX_LANE12_CTRL_REG0__RX_BACKUP_12_MASK 0x000000ffL
+#define PB0_RX_LANE12_CTRL_REG0__RX_BACKUP_12__SHIFT 0x00000000
+#define PB0_RX_LANE12_CTRL_REG0__RX_CFG_OVR_PWRSF_12_MASK 0x00002000L
+#define PB0_RX_LANE12_CTRL_REG0__RX_CFG_OVR_PWRSF_12__SHIFT 0x0000000d
+#define PB0_RX_LANE12_CTRL_REG0__RX_DBG_ANALOG_SEL_12_MASK 0x00000c00L
+#define PB0_RX_LANE12_CTRL_REG0__RX_DBG_ANALOG_SEL_12__SHIFT 0x0000000a
+#define PB0_RX_LANE12_CTRL_REG0__RX_TST_BSCAN_EN_12_MASK 0x00001000L
+#define PB0_RX_LANE12_CTRL_REG0__RX_TST_BSCAN_EN_12__SHIFT 0x0000000c
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_12_MASK 0x00000008L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_12__SHIFT 0x00000003
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__ENABLEFOM_12_MASK 0x00000080L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__ENABLEFOM_12__SHIFT 0x00000007
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__REQUESTFOM_12_MASK 0x00000100L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__REQUESTFOM_12__SHIFT 0x00000008
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RESPONSEMODE_12_MASK 0x00000200L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RESPONSEMODE_12__SHIFT 0x00000009
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RXPRESETHINT_12_MASK 0x00000070L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RXPRESETHINT_12__SHIFT 0x00000004
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RXPWR_12_MASK 0x00000007L
+#define PB0_RX_LANE12_SCI_STAT_OVRD_REG0__RXPWR_12__SHIFT 0x00000000
+#define PB0_RX_LANE13_CTRL_REG0__RX_BACKUP_13_MASK 0x000000ffL
+#define PB0_RX_LANE13_CTRL_REG0__RX_BACKUP_13__SHIFT 0x00000000
+#define PB0_RX_LANE13_CTRL_REG0__RX_CFG_OVR_PWRSF_13_MASK 0x00002000L
+#define PB0_RX_LANE13_CTRL_REG0__RX_CFG_OVR_PWRSF_13__SHIFT 0x0000000d
+#define PB0_RX_LANE13_CTRL_REG0__RX_DBG_ANALOG_SEL_13_MASK 0x00000c00L
+#define PB0_RX_LANE13_CTRL_REG0__RX_DBG_ANALOG_SEL_13__SHIFT 0x0000000a
+#define PB0_RX_LANE13_CTRL_REG0__RX_TST_BSCAN_EN_13_MASK 0x00001000L
+#define PB0_RX_LANE13_CTRL_REG0__RX_TST_BSCAN_EN_13__SHIFT 0x0000000c
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_13_MASK 0x00000008L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_13__SHIFT 0x00000003
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__ENABLEFOM_13_MASK 0x00000080L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__ENABLEFOM_13__SHIFT 0x00000007
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__REQUESTFOM_13_MASK 0x00000100L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__REQUESTFOM_13__SHIFT 0x00000008
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RESPONSEMODE_13_MASK 0x00000200L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RESPONSEMODE_13__SHIFT 0x00000009
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RXPRESETHINT_13_MASK 0x00000070L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RXPRESETHINT_13__SHIFT 0x00000004
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RXPWR_13_MASK 0x00000007L
+#define PB0_RX_LANE13_SCI_STAT_OVRD_REG0__RXPWR_13__SHIFT 0x00000000
+#define PB0_RX_LANE14_CTRL_REG0__RX_BACKUP_14_MASK 0x000000ffL
+#define PB0_RX_LANE14_CTRL_REG0__RX_BACKUP_14__SHIFT 0x00000000
+#define PB0_RX_LANE14_CTRL_REG0__RX_CFG_OVR_PWRSF_14_MASK 0x00002000L
+#define PB0_RX_LANE14_CTRL_REG0__RX_CFG_OVR_PWRSF_14__SHIFT 0x0000000d
+#define PB0_RX_LANE14_CTRL_REG0__RX_DBG_ANALOG_SEL_14_MASK 0x00000c00L
+#define PB0_RX_LANE14_CTRL_REG0__RX_DBG_ANALOG_SEL_14__SHIFT 0x0000000a
+#define PB0_RX_LANE14_CTRL_REG0__RX_TST_BSCAN_EN_14_MASK 0x00001000L
+#define PB0_RX_LANE14_CTRL_REG0__RX_TST_BSCAN_EN_14__SHIFT 0x0000000c
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_14_MASK 0x00000008L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_14__SHIFT 0x00000003
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__ENABLEFOM_14_MASK 0x00000080L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__ENABLEFOM_14__SHIFT 0x00000007
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__REQUESTFOM_14_MASK 0x00000100L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__REQUESTFOM_14__SHIFT 0x00000008
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RESPONSEMODE_14_MASK 0x00000200L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RESPONSEMODE_14__SHIFT 0x00000009
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RXPRESETHINT_14_MASK 0x00000070L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RXPRESETHINT_14__SHIFT 0x00000004
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RXPWR_14_MASK 0x00000007L
+#define PB0_RX_LANE14_SCI_STAT_OVRD_REG0__RXPWR_14__SHIFT 0x00000000
+#define PB0_RX_LANE15_CTRL_REG0__RX_BACKUP_15_MASK 0x000000ffL
+#define PB0_RX_LANE15_CTRL_REG0__RX_BACKUP_15__SHIFT 0x00000000
+#define PB0_RX_LANE15_CTRL_REG0__RX_CFG_OVR_PWRSF_15_MASK 0x00002000L
+#define PB0_RX_LANE15_CTRL_REG0__RX_CFG_OVR_PWRSF_15__SHIFT 0x0000000d
+#define PB0_RX_LANE15_CTRL_REG0__RX_DBG_ANALOG_SEL_15_MASK 0x00000c00L
+#define PB0_RX_LANE15_CTRL_REG0__RX_DBG_ANALOG_SEL_15__SHIFT 0x0000000a
+#define PB0_RX_LANE15_CTRL_REG0__RX_TST_BSCAN_EN_15_MASK 0x00001000L
+#define PB0_RX_LANE15_CTRL_REG0__RX_TST_BSCAN_EN_15__SHIFT 0x0000000c
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_15_MASK 0x00000008L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_15__SHIFT 0x00000003
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__ENABLEFOM_15_MASK 0x00000080L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__ENABLEFOM_15__SHIFT 0x00000007
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__REQUESTFOM_15_MASK 0x00000100L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__REQUESTFOM_15__SHIFT 0x00000008
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RESPONSEMODE_15_MASK 0x00000200L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RESPONSEMODE_15__SHIFT 0x00000009
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RXPRESETHINT_15_MASK 0x00000070L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RXPRESETHINT_15__SHIFT 0x00000004
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RXPWR_15_MASK 0x00000007L
+#define PB0_RX_LANE15_SCI_STAT_OVRD_REG0__RXPWR_15__SHIFT 0x00000000
+#define PB0_RX_LANE1_CTRL_REG0__RX_BACKUP_1_MASK 0x000000ffL
+#define PB0_RX_LANE1_CTRL_REG0__RX_BACKUP_1__SHIFT 0x00000000
+#define PB0_RX_LANE1_CTRL_REG0__RX_CFG_OVR_PWRSF_1_MASK 0x00002000L
+#define PB0_RX_LANE1_CTRL_REG0__RX_CFG_OVR_PWRSF_1__SHIFT 0x0000000d
+#define PB0_RX_LANE1_CTRL_REG0__RX_DBG_ANALOG_SEL_1_MASK 0x00000c00L
+#define PB0_RX_LANE1_CTRL_REG0__RX_DBG_ANALOG_SEL_1__SHIFT 0x0000000a
+#define PB0_RX_LANE1_CTRL_REG0__RX_TST_BSCAN_EN_1_MASK 0x00001000L
+#define PB0_RX_LANE1_CTRL_REG0__RX_TST_BSCAN_EN_1__SHIFT 0x0000000c
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_1_MASK 0x00000008L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_1__SHIFT 0x00000003
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__ENABLEFOM_1_MASK 0x00000080L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__ENABLEFOM_1__SHIFT 0x00000007
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__REQUESTFOM_1_MASK 0x00000100L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__REQUESTFOM_1__SHIFT 0x00000008
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RESPONSEMODE_1_MASK 0x00000200L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RESPONSEMODE_1__SHIFT 0x00000009
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RXPRESETHINT_1_MASK 0x00000070L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RXPRESETHINT_1__SHIFT 0x00000004
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RXPWR_1_MASK 0x00000007L
+#define PB0_RX_LANE1_SCI_STAT_OVRD_REG0__RXPWR_1__SHIFT 0x00000000
+#define PB0_RX_LANE2_CTRL_REG0__RX_BACKUP_2_MASK 0x000000ffL
+#define PB0_RX_LANE2_CTRL_REG0__RX_BACKUP_2__SHIFT 0x00000000
+#define PB0_RX_LANE2_CTRL_REG0__RX_CFG_OVR_PWRSF_2_MASK 0x00002000L
+#define PB0_RX_LANE2_CTRL_REG0__RX_CFG_OVR_PWRSF_2__SHIFT 0x0000000d
+#define PB0_RX_LANE2_CTRL_REG0__RX_DBG_ANALOG_SEL_2_MASK 0x00000c00L
+#define PB0_RX_LANE2_CTRL_REG0__RX_DBG_ANALOG_SEL_2__SHIFT 0x0000000a
+#define PB0_RX_LANE2_CTRL_REG0__RX_TST_BSCAN_EN_2_MASK 0x00001000L
+#define PB0_RX_LANE2_CTRL_REG0__RX_TST_BSCAN_EN_2__SHIFT 0x0000000c
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_2_MASK 0x00000008L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_2__SHIFT 0x00000003
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__ENABLEFOM_2_MASK 0x00000080L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__ENABLEFOM_2__SHIFT 0x00000007
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__REQUESTFOM_2_MASK 0x00000100L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__REQUESTFOM_2__SHIFT 0x00000008
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RESPONSEMODE_2_MASK 0x00000200L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RESPONSEMODE_2__SHIFT 0x00000009
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RXPRESETHINT_2_MASK 0x00000070L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RXPRESETHINT_2__SHIFT 0x00000004
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RXPWR_2_MASK 0x00000007L
+#define PB0_RX_LANE2_SCI_STAT_OVRD_REG0__RXPWR_2__SHIFT 0x00000000
+#define PB0_RX_LANE3_CTRL_REG0__RX_BACKUP_3_MASK 0x000000ffL
+#define PB0_RX_LANE3_CTRL_REG0__RX_BACKUP_3__SHIFT 0x00000000
+#define PB0_RX_LANE3_CTRL_REG0__RX_CFG_OVR_PWRSF_3_MASK 0x00002000L
+#define PB0_RX_LANE3_CTRL_REG0__RX_CFG_OVR_PWRSF_3__SHIFT 0x0000000d
+#define PB0_RX_LANE3_CTRL_REG0__RX_DBG_ANALOG_SEL_3_MASK 0x00000c00L
+#define PB0_RX_LANE3_CTRL_REG0__RX_DBG_ANALOG_SEL_3__SHIFT 0x0000000a
+#define PB0_RX_LANE3_CTRL_REG0__RX_TST_BSCAN_EN_3_MASK 0x00001000L
+#define PB0_RX_LANE3_CTRL_REG0__RX_TST_BSCAN_EN_3__SHIFT 0x0000000c
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_3_MASK 0x00000008L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_3__SHIFT 0x00000003
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__ENABLEFOM_3_MASK 0x00000080L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__ENABLEFOM_3__SHIFT 0x00000007
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__REQUESTFOM_3_MASK 0x00000100L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__REQUESTFOM_3__SHIFT 0x00000008
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RESPONSEMODE_3_MASK 0x00000200L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RESPONSEMODE_3__SHIFT 0x00000009
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RXPRESETHINT_3_MASK 0x00000070L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RXPRESETHINT_3__SHIFT 0x00000004
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RXPWR_3_MASK 0x00000007L
+#define PB0_RX_LANE3_SCI_STAT_OVRD_REG0__RXPWR_3__SHIFT 0x00000000
+#define PB0_RX_LANE4_CTRL_REG0__RX_BACKUP_4_MASK 0x000000ffL
+#define PB0_RX_LANE4_CTRL_REG0__RX_BACKUP_4__SHIFT 0x00000000
+#define PB0_RX_LANE4_CTRL_REG0__RX_CFG_OVR_PWRSF_4_MASK 0x00002000L
+#define PB0_RX_LANE4_CTRL_REG0__RX_CFG_OVR_PWRSF_4__SHIFT 0x0000000d
+#define PB0_RX_LANE4_CTRL_REG0__RX_DBG_ANALOG_SEL_4_MASK 0x00000c00L
+#define PB0_RX_LANE4_CTRL_REG0__RX_DBG_ANALOG_SEL_4__SHIFT 0x0000000a
+#define PB0_RX_LANE4_CTRL_REG0__RX_TST_BSCAN_EN_4_MASK 0x00001000L
+#define PB0_RX_LANE4_CTRL_REG0__RX_TST_BSCAN_EN_4__SHIFT 0x0000000c
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_4_MASK 0x00000008L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_4__SHIFT 0x00000003
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__ENABLEFOM_4_MASK 0x00000080L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__ENABLEFOM_4__SHIFT 0x00000007
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__REQUESTFOM_4_MASK 0x00000100L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__REQUESTFOM_4__SHIFT 0x00000008
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RESPONSEMODE_4_MASK 0x00000200L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RESPONSEMODE_4__SHIFT 0x00000009
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RXPRESETHINT_4_MASK 0x00000070L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RXPRESETHINT_4__SHIFT 0x00000004
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RXPWR_4_MASK 0x00000007L
+#define PB0_RX_LANE4_SCI_STAT_OVRD_REG0__RXPWR_4__SHIFT 0x00000000
+#define PB0_RX_LANE5_CTRL_REG0__RX_BACKUP_5_MASK 0x000000ffL
+#define PB0_RX_LANE5_CTRL_REG0__RX_BACKUP_5__SHIFT 0x00000000
+#define PB0_RX_LANE5_CTRL_REG0__RX_CFG_OVR_PWRSF_5_MASK 0x00002000L
+#define PB0_RX_LANE5_CTRL_REG0__RX_CFG_OVR_PWRSF_5__SHIFT 0x0000000d
+#define PB0_RX_LANE5_CTRL_REG0__RX_DBG_ANALOG_SEL_5_MASK 0x00000c00L
+#define PB0_RX_LANE5_CTRL_REG0__RX_DBG_ANALOG_SEL_5__SHIFT 0x0000000a
+#define PB0_RX_LANE5_CTRL_REG0__RX_TST_BSCAN_EN_5_MASK 0x00001000L
+#define PB0_RX_LANE5_CTRL_REG0__RX_TST_BSCAN_EN_5__SHIFT 0x0000000c
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_5_MASK 0x00000008L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_5__SHIFT 0x00000003
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__ENABLEFOM_5_MASK 0x00000080L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__ENABLEFOM_5__SHIFT 0x00000007
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__REQUESTFOM_5_MASK 0x00000100L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__REQUESTFOM_5__SHIFT 0x00000008
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RESPONSEMODE_5_MASK 0x00000200L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RESPONSEMODE_5__SHIFT 0x00000009
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RXPRESETHINT_5_MASK 0x00000070L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RXPRESETHINT_5__SHIFT 0x00000004
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RXPWR_5_MASK 0x00000007L
+#define PB0_RX_LANE5_SCI_STAT_OVRD_REG0__RXPWR_5__SHIFT 0x00000000
+#define PB0_RX_LANE6_CTRL_REG0__RX_BACKUP_6_MASK 0x000000ffL
+#define PB0_RX_LANE6_CTRL_REG0__RX_BACKUP_6__SHIFT 0x00000000
+#define PB0_RX_LANE6_CTRL_REG0__RX_CFG_OVR_PWRSF_6_MASK 0x00002000L
+#define PB0_RX_LANE6_CTRL_REG0__RX_CFG_OVR_PWRSF_6__SHIFT 0x0000000d
+#define PB0_RX_LANE6_CTRL_REG0__RX_DBG_ANALOG_SEL_6_MASK 0x00000c00L
+#define PB0_RX_LANE6_CTRL_REG0__RX_DBG_ANALOG_SEL_6__SHIFT 0x0000000a
+#define PB0_RX_LANE6_CTRL_REG0__RX_TST_BSCAN_EN_6_MASK 0x00001000L
+#define PB0_RX_LANE6_CTRL_REG0__RX_TST_BSCAN_EN_6__SHIFT 0x0000000c
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_6_MASK 0x00000008L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_6__SHIFT 0x00000003
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__ENABLEFOM_6_MASK 0x00000080L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__ENABLEFOM_6__SHIFT 0x00000007
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__REQUESTFOM_6_MASK 0x00000100L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__REQUESTFOM_6__SHIFT 0x00000008
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RESPONSEMODE_6_MASK 0x00000200L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RESPONSEMODE_6__SHIFT 0x00000009
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RXPRESETHINT_6_MASK 0x00000070L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RXPRESETHINT_6__SHIFT 0x00000004
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RXPWR_6_MASK 0x00000007L
+#define PB0_RX_LANE6_SCI_STAT_OVRD_REG0__RXPWR_6__SHIFT 0x00000000
+#define PB0_RX_LANE7_CTRL_REG0__RX_BACKUP_7_MASK 0x000000ffL
+#define PB0_RX_LANE7_CTRL_REG0__RX_BACKUP_7__SHIFT 0x00000000
+#define PB0_RX_LANE7_CTRL_REG0__RX_CFG_OVR_PWRSF_7_MASK 0x00002000L
+#define PB0_RX_LANE7_CTRL_REG0__RX_CFG_OVR_PWRSF_7__SHIFT 0x0000000d
+#define PB0_RX_LANE7_CTRL_REG0__RX_DBG_ANALOG_SEL_7_MASK 0x00000c00L
+#define PB0_RX_LANE7_CTRL_REG0__RX_DBG_ANALOG_SEL_7__SHIFT 0x0000000a
+#define PB0_RX_LANE7_CTRL_REG0__RX_TST_BSCAN_EN_7_MASK 0x00001000L
+#define PB0_RX_LANE7_CTRL_REG0__RX_TST_BSCAN_EN_7__SHIFT 0x0000000c
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_7_MASK 0x00000008L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_7__SHIFT 0x00000003
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__ENABLEFOM_7_MASK 0x00000080L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__ENABLEFOM_7__SHIFT 0x00000007
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__REQUESTFOM_7_MASK 0x00000100L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__REQUESTFOM_7__SHIFT 0x00000008
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RESPONSEMODE_7_MASK 0x00000200L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RESPONSEMODE_7__SHIFT 0x00000009
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RXPRESETHINT_7_MASK 0x00000070L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RXPRESETHINT_7__SHIFT 0x00000004
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RXPWR_7_MASK 0x00000007L
+#define PB0_RX_LANE7_SCI_STAT_OVRD_REG0__RXPWR_7__SHIFT 0x00000000
+#define PB0_RX_LANE8_CTRL_REG0__RX_BACKUP_8_MASK 0x000000ffL
+#define PB0_RX_LANE8_CTRL_REG0__RX_BACKUP_8__SHIFT 0x00000000
+#define PB0_RX_LANE8_CTRL_REG0__RX_CFG_OVR_PWRSF_8_MASK 0x00002000L
+#define PB0_RX_LANE8_CTRL_REG0__RX_CFG_OVR_PWRSF_8__SHIFT 0x0000000d
+#define PB0_RX_LANE8_CTRL_REG0__RX_DBG_ANALOG_SEL_8_MASK 0x00000c00L
+#define PB0_RX_LANE8_CTRL_REG0__RX_DBG_ANALOG_SEL_8__SHIFT 0x0000000a
+#define PB0_RX_LANE8_CTRL_REG0__RX_TST_BSCAN_EN_8_MASK 0x00001000L
+#define PB0_RX_LANE8_CTRL_REG0__RX_TST_BSCAN_EN_8__SHIFT 0x0000000c
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_8_MASK 0x00000008L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_8__SHIFT 0x00000003
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__ENABLEFOM_8_MASK 0x00000080L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__ENABLEFOM_8__SHIFT 0x00000007
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__REQUESTFOM_8_MASK 0x00000100L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__REQUESTFOM_8__SHIFT 0x00000008
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RESPONSEMODE_8_MASK 0x00000200L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RESPONSEMODE_8__SHIFT 0x00000009
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RXPRESETHINT_8_MASK 0x00000070L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RXPRESETHINT_8__SHIFT 0x00000004
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RXPWR_8_MASK 0x00000007L
+#define PB0_RX_LANE8_SCI_STAT_OVRD_REG0__RXPWR_8__SHIFT 0x00000000
+#define PB0_RX_LANE9_CTRL_REG0__RX_BACKUP_9_MASK 0x000000ffL
+#define PB0_RX_LANE9_CTRL_REG0__RX_BACKUP_9__SHIFT 0x00000000
+#define PB0_RX_LANE9_CTRL_REG0__RX_CFG_OVR_PWRSF_9_MASK 0x00002000L
+#define PB0_RX_LANE9_CTRL_REG0__RX_CFG_OVR_PWRSF_9__SHIFT 0x0000000d
+#define PB0_RX_LANE9_CTRL_REG0__RX_DBG_ANALOG_SEL_9_MASK 0x00000c00L
+#define PB0_RX_LANE9_CTRL_REG0__RX_DBG_ANALOG_SEL_9__SHIFT 0x0000000a
+#define PB0_RX_LANE9_CTRL_REG0__RX_TST_BSCAN_EN_9_MASK 0x00001000L
+#define PB0_RX_LANE9_CTRL_REG0__RX_TST_BSCAN_EN_9__SHIFT 0x0000000c
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_9_MASK 0x00000008L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_9__SHIFT 0x00000003
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__ENABLEFOM_9_MASK 0x00000080L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__ENABLEFOM_9__SHIFT 0x00000007
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__REQUESTFOM_9_MASK 0x00000100L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__REQUESTFOM_9__SHIFT 0x00000008
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RESPONSEMODE_9_MASK 0x00000200L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RESPONSEMODE_9__SHIFT 0x00000009
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RXPRESETHINT_9_MASK 0x00000070L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RXPRESETHINT_9__SHIFT 0x00000004
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RXPWR_9_MASK 0x00000007L
+#define PB0_RX_LANE9_SCI_STAT_OVRD_REG0__RXPWR_9__SHIFT 0x00000000
+#define PB0_STRAP_GLB_REG0__STRAP_DBG_RXDLL_VREG_REF_SEL_MASK 0x00008000L
+#define PB0_STRAP_GLB_REG0__STRAP_DBG_RXDLL_VREG_REF_SEL__SHIFT 0x0000000f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_0_MASK 0x00000001L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_0__SHIFT 0x00000000
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_10_MASK 0x00000400L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_10__SHIFT 0x0000000a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_11_MASK 0x00000800L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_11__SHIFT 0x0000000b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_12_MASK 0x00001000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_12__SHIFT 0x0000000c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_13_MASK 0x00002000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_13__SHIFT 0x0000000d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_14_MASK 0x00004000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_14__SHIFT 0x0000000e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_15_MASK 0x00008000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_15__SHIFT 0x0000000f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_16_MASK 0x00010000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_16__SHIFT 0x00000010
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_17_MASK 0x00020000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_17__SHIFT 0x00000011
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_18_MASK 0x00040000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_18__SHIFT 0x00000012
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_19_MASK 0x00080000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_19__SHIFT 0x00000013
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_1_MASK 0x00000002L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_1__SHIFT 0x00000001
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_20_MASK 0x00100000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_20__SHIFT 0x00000014
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_21_MASK 0x00200000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_21__SHIFT 0x00000015
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_22_MASK 0x00400000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_22__SHIFT 0x00000016
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_23_MASK 0x00800000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_23__SHIFT 0x00000017
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_24_MASK 0x01000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_24__SHIFT 0x00000018
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_25_MASK 0x02000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_25__SHIFT 0x00000019
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_26_MASK 0x04000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_26__SHIFT 0x0000001a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_27_MASK 0x08000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_27__SHIFT 0x0000001b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_28_MASK 0x10000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_28__SHIFT 0x0000001c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_29_MASK 0x20000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_29__SHIFT 0x0000001d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_2_MASK 0x00000004L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_2__SHIFT 0x00000002
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_30_MASK 0x40000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_30__SHIFT 0x0000001e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_31_MASK 0x80000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_31__SHIFT 0x0000001f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_3_MASK 0x00000008L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_3__SHIFT 0x00000003
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_4_MASK 0x00000010L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_4__SHIFT 0x00000004
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_5_MASK 0x00000020L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_5__SHIFT 0x00000005
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_6_MASK 0x00000040L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_6__SHIFT 0x00000006
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_7_MASK 0x00000080L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_7__SHIFT 0x00000007
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_8_MASK 0x00000100L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_8__SHIFT 0x00000008
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_9_MASK 0x00000200L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_9__SHIFT 0x00000009
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_32_MASK 0x00000001L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_32__SHIFT 0x00000000
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_33_MASK 0x00000002L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_33__SHIFT 0x00000001
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_34_MASK 0x00000004L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_34__SHIFT 0x00000002
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_35_MASK 0x00000008L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_35__SHIFT 0x00000003
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_36_MASK 0x00000010L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_36__SHIFT 0x00000004
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_37_MASK 0x00000020L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_37__SHIFT 0x00000005
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_38_MASK 0x00000040L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_38__SHIFT 0x00000006
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_39_MASK 0x00000080L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_39__SHIFT 0x00000007
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_40_MASK 0x00000100L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_40__SHIFT 0x00000008
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_41_MASK 0x00000200L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_41__SHIFT 0x00000009
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_42_MASK 0x00000400L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_42__SHIFT 0x0000000a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_43_MASK 0x00000800L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_43__SHIFT 0x0000000b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_44_MASK 0x00001000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_44__SHIFT 0x0000000c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_45_MASK 0x00002000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_45__SHIFT 0x0000000d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_46_MASK 0x00004000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_46__SHIFT 0x0000000e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_47_MASK 0x00008000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_47__SHIFT 0x0000000f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_48_MASK 0x00010000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_48__SHIFT 0x00000010
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_49_MASK 0x00020000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_49__SHIFT 0x00000011
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_50_MASK 0x00040000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_50__SHIFT 0x00000012
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_51_MASK 0x00080000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_51__SHIFT 0x00000013
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_52_MASK 0x00100000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_52__SHIFT 0x00000014
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_53_MASK 0x00200000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_53__SHIFT 0x00000015
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_54_MASK 0x00400000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_54__SHIFT 0x00000016
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_55_MASK 0x00800000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_55__SHIFT 0x00000017
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_56_MASK 0x01000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_56__SHIFT 0x00000018
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_57_MASK 0x02000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_57__SHIFT 0x00000019
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_58_MASK 0x04000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_58__SHIFT 0x0000001a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_59_MASK 0x08000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_59__SHIFT 0x0000001b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_60_MASK 0x10000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_60__SHIFT 0x0000001c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_61_MASK 0x20000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_61__SHIFT 0x0000001d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_62_MASK 0x40000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_62__SHIFT 0x0000001e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_63_MASK 0x80000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_63__SHIFT 0x0000001f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_64_MASK 0x00000001L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_64__SHIFT 0x00000000
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_65_MASK 0x00000002L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_65__SHIFT 0x00000001
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_66_MASK 0x00000004L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_66__SHIFT 0x00000002
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_67_MASK 0x00000008L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_67__SHIFT 0x00000003
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_68_MASK 0x00000010L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_68__SHIFT 0x00000004
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_69_MASK 0x00000020L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_69__SHIFT 0x00000005
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_70_MASK 0x00000040L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_70__SHIFT 0x00000006
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_71_MASK 0x00000080L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_71__SHIFT 0x00000007
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_72_MASK 0x00000100L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_72__SHIFT 0x00000008
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_73_MASK 0x00000200L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_73__SHIFT 0x00000009
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_74_MASK 0x00000400L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_74__SHIFT 0x0000000a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_75_MASK 0x00000800L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_75__SHIFT 0x0000000b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_76_MASK 0x00001000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_76__SHIFT 0x0000000c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_77_MASK 0x00002000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_77__SHIFT 0x0000000d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_78_MASK 0x00004000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_78__SHIFT 0x0000000e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_79_MASK 0x00008000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_79__SHIFT 0x0000000f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_80_MASK 0x00010000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_80__SHIFT 0x00000010
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_81_MASK 0x00020000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_81__SHIFT 0x00000011
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_82_MASK 0x00040000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_82__SHIFT 0x00000012
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_83_MASK 0x00080000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_83__SHIFT 0x00000013
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_84_MASK 0x00100000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_84__SHIFT 0x00000014
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_85_MASK 0x00200000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_85__SHIFT 0x00000015
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_86_MASK 0x00400000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_86__SHIFT 0x00000016
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_87_MASK 0x00800000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_87__SHIFT 0x00000017
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_88_MASK 0x01000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_88__SHIFT 0x00000018
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_89_MASK 0x02000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_89__SHIFT 0x00000019
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_90_MASK 0x04000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_90__SHIFT 0x0000001a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_91_MASK 0x08000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_91__SHIFT 0x0000001b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_92_MASK 0x10000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_92__SHIFT 0x0000001c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_93_MASK 0x20000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_93__SHIFT 0x0000001d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_94_MASK 0x40000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_94__SHIFT 0x0000001e
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_95_MASK 0x80000000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_95__SHIFT 0x0000001f
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_100_MASK 0x00000010L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_100__SHIFT 0x00000004
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_101_MASK 0x00000020L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_101__SHIFT 0x00000005
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_102_MASK 0x00000040L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_102__SHIFT 0x00000006
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_103_MASK 0x00000080L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_103__SHIFT 0x00000007
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_104_MASK 0x00000100L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_104__SHIFT 0x00000008
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_105_MASK 0x00000200L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_105__SHIFT 0x00000009
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_106_MASK 0x00000400L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_106__SHIFT 0x0000000a
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_107_MASK 0x00000800L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_107__SHIFT 0x0000000b
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_108_MASK 0x00001000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_108__SHIFT 0x0000000c
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_109_MASK 0x00002000L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_109__SHIFT 0x0000000d
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_96_MASK 0x00000001L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_96__SHIFT 0x00000000
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_97_MASK 0x00000002L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_97__SHIFT 0x00000001
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_98_MASK 0x00000004L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_98__SHIFT 0x00000002
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_99_MASK 0x00000008L
+#define PB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_99__SHIFT 0x00000003
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN1_MASK 0x00000700L
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN1__SHIFT 0x00000008
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN2_MASK 0x00003800L
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN2__SHIFT 0x0000000b
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN3_MASK 0x0001c000L
+#define PB0_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN3__SHIFT 0x0000000e
+#define PB0_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_DIR_VER_MASK 0x00400000L
+#define PB0_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_DIR_VER__SHIFT 0x00000016
+#define PB0_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_EN_MASK 0x00200000L
+#define PB0_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_EN__SHIFT 0x00000015
+#define PB0_TX_GLB_CTRL_REG0__TX_DATA_CLK_GATING_MASK 0x00080000L
+#define PB0_TX_GLB_CTRL_REG0__TX_DATA_CLK_GATING__SHIFT 0x00000013
+#define PB0_TX_GLB_CTRL_REG0__TX_DCLK_EN_LSX_ALWAYS_ON_MASK 0x00800000L
+#define PB0_TX_GLB_CTRL_REG0__TX_DCLK_EN_LSX_ALWAYS_ON__SHIFT 0x00000017
+#define PB0_TX_GLB_CTRL_REG0__TX_DRV_DATA_ASRT_DLY_VAL_MASK 0x00000007L
+#define PB0_TX_GLB_CTRL_REG0__TX_DRV_DATA_ASRT_DLY_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_CTRL_REG0__TX_DRV_DATA_DSRT_DLY_VAL_MASK 0x00000038L
+#define PB0_TX_GLB_CTRL_REG0__TX_DRV_DATA_DSRT_DLY_VAL__SHIFT 0x00000003
+#define PB0_TX_GLB_CTRL_REG0__TX_FRONTEND_PWRON_IN_OFF_MASK 0x01000000L
+#define PB0_TX_GLB_CTRL_REG0__TX_FRONTEND_PWRON_IN_OFF__SHIFT 0x00000018
+#define PB0_TX_GLB_CTRL_REG0__TX_PRESET_TABLE_BYPASS_MASK 0x00100000L
+#define PB0_TX_GLB_CTRL_REG0__TX_PRESET_TABLE_BYPASS__SHIFT 0x00000014
+#define PB0_TX_GLB_CTRL_REG0__TX_STAGGER_CTRL_MASK 0x00060000L
+#define PB0_TX_GLB_CTRL_REG0__TX_STAGGER_CTRL__SHIFT 0x00000011
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX16_EN_L0T15_MASK 0x40000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX16_EN_L0T15__SHIFT 0x0000001e
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_0_MASK 0x00000001L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_0__SHIFT 0x00000000
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_10_MASK 0x00000400L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_10__SHIFT 0x0000000a
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_11_MASK 0x00000800L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_11__SHIFT 0x0000000b
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_12_MASK 0x00001000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_12__SHIFT 0x0000000c
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_13_MASK 0x00002000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_13__SHIFT 0x0000000d
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_14_MASK 0x00004000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_14__SHIFT 0x0000000e
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_15_MASK 0x00008000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_15__SHIFT 0x0000000f
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_1_MASK 0x00000002L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_1__SHIFT 0x00000001
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_2_MASK 0x00000004L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_2__SHIFT 0x00000002
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_3_MASK 0x00000008L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_3__SHIFT 0x00000003
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_4_MASK 0x00000010L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_4__SHIFT 0x00000004
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_5_MASK 0x00000020L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_5__SHIFT 0x00000005
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_6_MASK 0x00000040L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_6__SHIFT 0x00000006
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_7_MASK 0x00000080L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_7__SHIFT 0x00000007
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_8_MASK 0x00000100L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_8__SHIFT 0x00000008
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_9_MASK 0x00000200L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_9__SHIFT 0x00000009
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L0T1_MASK 0x00010000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L0T1__SHIFT 0x00000010
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L10T11_MASK 0x00200000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L10T11__SHIFT 0x00000015
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L12T13_MASK 0x00400000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L12T13__SHIFT 0x00000016
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L14T15_MASK 0x00800000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L14T15__SHIFT 0x00000017
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L2T3_MASK 0x00020000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L2T3__SHIFT 0x00000011
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L4T5_MASK 0x00040000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L4T5__SHIFT 0x00000012
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L6T7_MASK 0x00080000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L6T7__SHIFT 0x00000013
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L8T9_MASK 0x00100000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L8T9__SHIFT 0x00000014
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L0T3_MASK 0x01000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L0T3__SHIFT 0x00000018
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L12T15_MASK 0x08000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L12T15__SHIFT 0x0000001b
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L4T7_MASK 0x02000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L4T7__SHIFT 0x00000019
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L8T11_MASK 0x04000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L8T11__SHIFT 0x0000001a
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L0T7_MASK 0x10000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L0T7__SHIFT 0x0000001c
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L8T15_MASK 0x20000000L
+#define PB0_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L8T15__SHIFT 0x0000001d
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_EN_MASK 0x00000008L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_EN__SHIFT 0x00000003
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_VAL_MASK 0x00000007L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_GEN1_OVRD_VAL_MASK 0x000000f0L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_GEN1_OVRD_VAL__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_OVRD_EN_MASK 0x00000100L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_OVRD_EN__SHIFT 0x00000008
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_GEN1_OVRD_VAL_MASK 0x00001e00L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000009
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_OVRD_EN_MASK 0x00002000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_OVRD_EN__SHIFT 0x0000000d
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_GEN1_OVRD_VAL_MASK 0x0007c000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_GEN1_OVRD_VAL__SHIFT 0x0000000e
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_OVRD_EN_MASK 0x00080000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_OVRD_EN__SHIFT 0x00000013
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_GEN1_OVRD_VAL_MASK 0x01f00000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000014
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_OVRD_EN_MASK 0x02000000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_OVRD_EN__SHIFT 0x00000019
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_GEN1_OVRD_VAL_MASK 0x3c000000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_GEN1_OVRD_VAL__SHIFT 0x0000001a
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_OVRD_EN_MASK 0x40000000L
+#define PB0_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_OVRD_EN__SHIFT 0x0000001e
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_GEN1_OVRD_VAL_MASK 0x0000000fL
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_OVRD_EN_MASK 0x00000010L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_OVRD_EN__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_GEN1_OVRD_VAL_MASK 0x00000020L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_GEN1_OVRD_VAL__SHIFT 0x00000005
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_OVRD_EN_MASK 0x00000040L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_OVRD_EN__SHIFT 0x00000006
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_GEN1_OVRD_VAL_MASK 0x00000080L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000007
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_OVRD_EN_MASK 0x00000100L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_OVRD_EN__SHIFT 0x00000008
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_EN_MASK 0x00000400L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_EN__SHIFT 0x0000000a
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_VAL_MASK 0x00000200L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_VAL__SHIFT 0x00000009
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_EN_MASK 0x00001000L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_EN__SHIFT 0x0000000c
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_VAL_MASK 0x00000800L
+#define PB0_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_VAL__SHIFT 0x0000000b
+#define PB0_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_EN_MASK 0x00004000L
+#define PB0_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_EN__SHIFT 0x0000000e
+#define PB0_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_VAL_MASK 0x00002000L
+#define PB0_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_VAL__SHIFT 0x0000000d
+#define PB0_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_EN_MASK 0x02000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_EN__SHIFT 0x00000019
+#define PB0_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_VAL_MASK 0x01ff8000L
+#define PB0_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_VAL__SHIFT 0x0000000f
+#define PB0_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_EN_MASK 0x08000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_EN__SHIFT 0x0000001b
+#define PB0_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_VAL_MASK 0x04000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_VAL__SHIFT 0x0000001a
+#define PB0_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_EN_MASK 0x20000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_EN__SHIFT 0x0000001d
+#define PB0_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_VAL_MASK 0x10000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_VAL__SHIFT 0x0000001c
+#define PB0_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_EN_MASK 0x80000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_EN__SHIFT 0x0000001f
+#define PB0_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_VAL_MASK 0x40000000L
+#define PB0_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_VAL__SHIFT 0x0000001e
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV0_EN_GEN2_OVRD_VAL_MASK 0x0000f000L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV0_EN_GEN2_OVRD_VAL__SHIFT 0x0000000c
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV0_TAP_SEL_GEN2_OVRD_VAL_MASK 0x000f0000L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV0_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000010
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV1_EN_GEN2_OVRD_VAL_MASK 0x01f00000L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV1_EN_GEN2_OVRD_VAL__SHIFT 0x00000014
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV1_TAP_SEL_GEN2_OVRD_VAL_MASK 0x3e000000L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_DRV1_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000019
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_EN_MASK 0x00000800L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_EN__SHIFT 0x0000000b
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_VAL_MASK 0x00000400L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_VAL__SHIFT 0x0000000a
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_EN_MASK 0x00000008L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_EN__SHIFT 0x00000003
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_VAL_MASK 0x00000004L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_VAL__SHIFT 0x00000002
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_EN_MASK 0x00000020L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_EN__SHIFT 0x00000005
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_VAL_MASK 0x00000010L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_VAL__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_EN_MASK 0x00000080L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_EN__SHIFT 0x00000007
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_VAL_MASK 0x00000040L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_VAL__SHIFT 0x00000006
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_EN_MASK 0x00000200L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_EN__SHIFT 0x00000009
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_VAL_MASK 0x00000100L
+#define PB0_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_VAL__SHIFT 0x00000008
+#define PB0_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_EN_MASK 0x00000002L
+#define PB0_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_EN__SHIFT 0x00000001
+#define PB0_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_VAL_MASK 0x00000001L
+#define PB0_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV0_EN_GEN3_OVRD_VAL_MASK 0x00003c00L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV0_EN_GEN3_OVRD_VAL__SHIFT 0x0000000a
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV0_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0003c000L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV0_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x0000000e
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV1_EN_GEN3_OVRD_VAL_MASK 0x007c0000L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV1_EN_GEN3_OVRD_VAL__SHIFT 0x00000012
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV1_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0f800000L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV1_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000017
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN2_OVRD_VAL_MASK 0x0000000fL
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN2_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN3_OVRD_VAL_MASK 0xf0000000L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN3_OVRD_VAL__SHIFT 0x0000001c
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_TAP_SEL_GEN2_OVRD_VAL_MASK 0x000000f0L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRV2_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRVX_EN_GEN2_OVRD_VAL_MASK 0x00000100L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRVX_EN_GEN2_OVRD_VAL__SHIFT 0x00000008
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRVX_TAP_SEL_GEN2_OVRD_VAL_MASK 0x00000200L
+#define PB0_TX_GLB_OVRD_REG3__TX_CFG_DRVX_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000009
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRV2_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0000000fL
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRV2_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000000
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRVX_EN_GEN3_OVRD_VAL_MASK 0x00000010L
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRVX_EN_GEN3_OVRD_VAL__SHIFT 0x00000004
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRVX_TAP_SEL_GEN3_OVRD_VAL_MASK 0x00000020L
+#define PB0_TX_GLB_OVRD_REG4__TX_CFG_DRVX_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000005
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L0T3_MASK 0x00000100L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L0T3__SHIFT 0x00000008
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L12T15_MASK 0x00000800L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L12T15__SHIFT 0x0000000b
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L4T7_MASK 0x00000200L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L4T7__SHIFT 0x00000009
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L8T11_MASK 0x00000400L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L8T11__SHIFT 0x0000000a
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L0T3_MASK 0x00001000L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L0T3__SHIFT 0x0000000c
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L12T15_MASK 0x00008000L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L12T15__SHIFT 0x0000000f
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L4T7_MASK 0x00002000L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L4T7__SHIFT 0x0000000d
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L8T11_MASK 0x00004000L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L8T11__SHIFT 0x0000000e
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L0T3_MASK 0x00000010L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L0T3__SHIFT 0x00000004
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L12T15_MASK 0x00000080L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L12T15__SHIFT 0x00000007
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L4T7_MASK 0x00000020L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L4T7__SHIFT 0x00000005
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L8T11_MASK 0x00000040L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L8T11__SHIFT 0x00000006
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB0_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_DISPCLK_MODE_0_MASK 0x00000001L
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_DISPCLK_MODE_0__SHIFT 0x00000000
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_INV_DATA_0_MASK 0x00000002L
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_INV_DATA_0__SHIFT 0x00000001
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_SWING_BOOST_EN_0_MASK 0x00000004L
+#define PB0_TX_LANE0_CTRL_REG0__TX_CFG_SWING_BOOST_EN_0__SHIFT 0x00000002
+#define PB0_TX_LANE0_CTRL_REG0__TX_DBG_PRBS_EN_0_MASK 0x00000008L
+#define PB0_TX_LANE0_CTRL_REG0__TX_DBG_PRBS_EN_0__SHIFT 0x00000003
+#define PB0_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_EN_0_MASK 0x00000002L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_EN_0__SHIFT 0x00000001
+#define PB0_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_0_MASK 0x00000001L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_0__SHIFT 0x00000000
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_0_MASK 0x00000008L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_0__SHIFT 0x00000003
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_0_MASK 0x00000004L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_0__SHIFT 0x00000002
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_0_MASK 0x00000020L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_0__SHIFT 0x00000005
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_0_MASK 0x00000010L
+#define PB0_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_0__SHIFT 0x00000004
+#define PB0_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_0_MASK 0x00000080L
+#define PB0_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_0__SHIFT 0x00000007
+#define PB0_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_0_MASK 0x00000040L
+#define PB0_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_0__SHIFT 0x00000006
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENT_0_MASK 0x0000fc00L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENT_0__SHIFT 0x0000000a
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENTID_0_MASK 0x00000300L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENTID_0__SHIFT 0x00000008
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__DEEMPH_0_MASK 0x00000080L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__DEEMPH_0__SHIFT 0x00000007
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__INCOHERENTCK_0_MASK 0x00000008L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__INCOHERENTCK_0__SHIFT 0x00000003
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__TXMARG_0_MASK 0x00000070L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__TXMARG_0__SHIFT 0x00000004
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__TXPWR_0_MASK 0x00000007L
+#define PB0_TX_LANE0_SCI_STAT_OVRD_REG0__TXPWR_0__SHIFT 0x00000000
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_DISPCLK_MODE_10_MASK 0x00000001L
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_DISPCLK_MODE_10__SHIFT 0x00000000
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_INV_DATA_10_MASK 0x00000002L
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_INV_DATA_10__SHIFT 0x00000001
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_SWING_BOOST_EN_10_MASK 0x00000004L
+#define PB0_TX_LANE10_CTRL_REG0__TX_CFG_SWING_BOOST_EN_10__SHIFT 0x00000002
+#define PB0_TX_LANE10_CTRL_REG0__TX_DBG_PRBS_EN_10_MASK 0x00000008L
+#define PB0_TX_LANE10_CTRL_REG0__TX_DBG_PRBS_EN_10__SHIFT 0x00000003
+#define PB0_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_EN_10_MASK 0x00000002L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_EN_10__SHIFT 0x00000001
+#define PB0_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_10_MASK 0x00000001L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_10__SHIFT 0x00000000
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_10_MASK 0x00000008L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_10__SHIFT 0x00000003
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_10_MASK 0x00000004L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_10__SHIFT 0x00000002
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_10_MASK 0x00000020L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_10__SHIFT 0x00000005
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_10_MASK 0x00000010L
+#define PB0_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_10__SHIFT 0x00000004
+#define PB0_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_10_MASK 0x00000080L
+#define PB0_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_10__SHIFT 0x00000007
+#define PB0_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_10_MASK 0x00000040L
+#define PB0_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_10__SHIFT 0x00000006
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENT_10_MASK 0x0000fc00L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENT_10__SHIFT 0x0000000a
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENTID_10_MASK 0x00000300L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENTID_10__SHIFT 0x00000008
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__DEEMPH_10_MASK 0x00000080L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__DEEMPH_10__SHIFT 0x00000007
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__INCOHERENTCK_10_MASK 0x00000008L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__INCOHERENTCK_10__SHIFT 0x00000003
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__TXMARG_10_MASK 0x00000070L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__TXMARG_10__SHIFT 0x00000004
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__TXPWR_10_MASK 0x00000007L
+#define PB0_TX_LANE10_SCI_STAT_OVRD_REG0__TXPWR_10__SHIFT 0x00000000
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_DISPCLK_MODE_11_MASK 0x00000001L
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_DISPCLK_MODE_11__SHIFT 0x00000000
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_INV_DATA_11_MASK 0x00000002L
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_INV_DATA_11__SHIFT 0x00000001
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_SWING_BOOST_EN_11_MASK 0x00000004L
+#define PB0_TX_LANE11_CTRL_REG0__TX_CFG_SWING_BOOST_EN_11__SHIFT 0x00000002
+#define PB0_TX_LANE11_CTRL_REG0__TX_DBG_PRBS_EN_11_MASK 0x00000008L
+#define PB0_TX_LANE11_CTRL_REG0__TX_DBG_PRBS_EN_11__SHIFT 0x00000003
+#define PB0_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_EN_11_MASK 0x00000002L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_EN_11__SHIFT 0x00000001
+#define PB0_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_11_MASK 0x00000001L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_11__SHIFT 0x00000000
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_11_MASK 0x00000008L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_11__SHIFT 0x00000003
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_11_MASK 0x00000004L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_11__SHIFT 0x00000002
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_11_MASK 0x00000020L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_11__SHIFT 0x00000005
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_11_MASK 0x00000010L
+#define PB0_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_11__SHIFT 0x00000004
+#define PB0_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_11_MASK 0x00000080L
+#define PB0_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_11__SHIFT 0x00000007
+#define PB0_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_11_MASK 0x00000040L
+#define PB0_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_11__SHIFT 0x00000006
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENT_11_MASK 0x0000fc00L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENT_11__SHIFT 0x0000000a
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENTID_11_MASK 0x00000300L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENTID_11__SHIFT 0x00000008
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__DEEMPH_11_MASK 0x00000080L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__DEEMPH_11__SHIFT 0x00000007
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__INCOHERENTCK_11_MASK 0x00000008L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__INCOHERENTCK_11__SHIFT 0x00000003
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__TXMARG_11_MASK 0x00000070L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__TXMARG_11__SHIFT 0x00000004
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__TXPWR_11_MASK 0x00000007L
+#define PB0_TX_LANE11_SCI_STAT_OVRD_REG0__TXPWR_11__SHIFT 0x00000000
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_DISPCLK_MODE_12_MASK 0x00000001L
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_DISPCLK_MODE_12__SHIFT 0x00000000
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_INV_DATA_12_MASK 0x00000002L
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_INV_DATA_12__SHIFT 0x00000001
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_SWING_BOOST_EN_12_MASK 0x00000004L
+#define PB0_TX_LANE12_CTRL_REG0__TX_CFG_SWING_BOOST_EN_12__SHIFT 0x00000002
+#define PB0_TX_LANE12_CTRL_REG0__TX_DBG_PRBS_EN_12_MASK 0x00000008L
+#define PB0_TX_LANE12_CTRL_REG0__TX_DBG_PRBS_EN_12__SHIFT 0x00000003
+#define PB0_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_EN_12_MASK 0x00000002L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_EN_12__SHIFT 0x00000001
+#define PB0_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_12_MASK 0x00000001L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_12__SHIFT 0x00000000
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_12_MASK 0x00000008L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_12__SHIFT 0x00000003
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_12_MASK 0x00000004L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_12__SHIFT 0x00000002
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_12_MASK 0x00000020L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_12__SHIFT 0x00000005
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_12_MASK 0x00000010L
+#define PB0_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_12__SHIFT 0x00000004
+#define PB0_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_12_MASK 0x00000080L
+#define PB0_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_12__SHIFT 0x00000007
+#define PB0_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_12_MASK 0x00000040L
+#define PB0_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_12__SHIFT 0x00000006
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENT_12_MASK 0x0000fc00L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENT_12__SHIFT 0x0000000a
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENTID_12_MASK 0x00000300L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENTID_12__SHIFT 0x00000008
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__DEEMPH_12_MASK 0x00000080L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__DEEMPH_12__SHIFT 0x00000007
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__INCOHERENTCK_12_MASK 0x00000008L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__INCOHERENTCK_12__SHIFT 0x00000003
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__TXMARG_12_MASK 0x00000070L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__TXMARG_12__SHIFT 0x00000004
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__TXPWR_12_MASK 0x00000007L
+#define PB0_TX_LANE12_SCI_STAT_OVRD_REG0__TXPWR_12__SHIFT 0x00000000
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_DISPCLK_MODE_13_MASK 0x00000001L
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_DISPCLK_MODE_13__SHIFT 0x00000000
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_INV_DATA_13_MASK 0x00000002L
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_INV_DATA_13__SHIFT 0x00000001
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_SWING_BOOST_EN_13_MASK 0x00000004L
+#define PB0_TX_LANE13_CTRL_REG0__TX_CFG_SWING_BOOST_EN_13__SHIFT 0x00000002
+#define PB0_TX_LANE13_CTRL_REG0__TX_DBG_PRBS_EN_13_MASK 0x00000008L
+#define PB0_TX_LANE13_CTRL_REG0__TX_DBG_PRBS_EN_13__SHIFT 0x00000003
+#define PB0_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_EN_13_MASK 0x00000002L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_EN_13__SHIFT 0x00000001
+#define PB0_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_13_MASK 0x00000001L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_13__SHIFT 0x00000000
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_13_MASK 0x00000008L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_13__SHIFT 0x00000003
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_13_MASK 0x00000004L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_13__SHIFT 0x00000002
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_13_MASK 0x00000020L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_13__SHIFT 0x00000005
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_13_MASK 0x00000010L
+#define PB0_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_13__SHIFT 0x00000004
+#define PB0_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_13_MASK 0x00000080L
+#define PB0_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_13__SHIFT 0x00000007
+#define PB0_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_13_MASK 0x00000040L
+#define PB0_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_13__SHIFT 0x00000006
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENT_13_MASK 0x0000fc00L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENT_13__SHIFT 0x0000000a
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENTID_13_MASK 0x00000300L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENTID_13__SHIFT 0x00000008
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__DEEMPH_13_MASK 0x00000080L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__DEEMPH_13__SHIFT 0x00000007
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__INCOHERENTCK_13_MASK 0x00000008L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__INCOHERENTCK_13__SHIFT 0x00000003
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__TXMARG_13_MASK 0x00000070L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__TXMARG_13__SHIFT 0x00000004
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__TXPWR_13_MASK 0x00000007L
+#define PB0_TX_LANE13_SCI_STAT_OVRD_REG0__TXPWR_13__SHIFT 0x00000000
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_DISPCLK_MODE_14_MASK 0x00000001L
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_DISPCLK_MODE_14__SHIFT 0x00000000
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_INV_DATA_14_MASK 0x00000002L
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_INV_DATA_14__SHIFT 0x00000001
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_SWING_BOOST_EN_14_MASK 0x00000004L
+#define PB0_TX_LANE14_CTRL_REG0__TX_CFG_SWING_BOOST_EN_14__SHIFT 0x00000002
+#define PB0_TX_LANE14_CTRL_REG0__TX_DBG_PRBS_EN_14_MASK 0x00000008L
+#define PB0_TX_LANE14_CTRL_REG0__TX_DBG_PRBS_EN_14__SHIFT 0x00000003
+#define PB0_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_EN_14_MASK 0x00000002L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_EN_14__SHIFT 0x00000001
+#define PB0_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_14_MASK 0x00000001L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_14__SHIFT 0x00000000
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_14_MASK 0x00000008L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_14__SHIFT 0x00000003
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_14_MASK 0x00000004L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_14__SHIFT 0x00000002
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_14_MASK 0x00000020L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_14__SHIFT 0x00000005
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_14_MASK 0x00000010L
+#define PB0_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_14__SHIFT 0x00000004
+#define PB0_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_14_MASK 0x00000080L
+#define PB0_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_14__SHIFT 0x00000007
+#define PB0_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_14_MASK 0x00000040L
+#define PB0_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_14__SHIFT 0x00000006
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENT_14_MASK 0x0000fc00L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENT_14__SHIFT 0x0000000a
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENTID_14_MASK 0x00000300L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENTID_14__SHIFT 0x00000008
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__DEEMPH_14_MASK 0x00000080L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__DEEMPH_14__SHIFT 0x00000007
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__INCOHERENTCK_14_MASK 0x00000008L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__INCOHERENTCK_14__SHIFT 0x00000003
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__TXMARG_14_MASK 0x00000070L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__TXMARG_14__SHIFT 0x00000004
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__TXPWR_14_MASK 0x00000007L
+#define PB0_TX_LANE14_SCI_STAT_OVRD_REG0__TXPWR_14__SHIFT 0x00000000
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_DISPCLK_MODE_15_MASK 0x00000001L
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_DISPCLK_MODE_15__SHIFT 0x00000000
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_INV_DATA_15_MASK 0x00000002L
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_INV_DATA_15__SHIFT 0x00000001
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_SWING_BOOST_EN_15_MASK 0x00000004L
+#define PB0_TX_LANE15_CTRL_REG0__TX_CFG_SWING_BOOST_EN_15__SHIFT 0x00000002
+#define PB0_TX_LANE15_CTRL_REG0__TX_DBG_PRBS_EN_15_MASK 0x00000008L
+#define PB0_TX_LANE15_CTRL_REG0__TX_DBG_PRBS_EN_15__SHIFT 0x00000003
+#define PB0_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_EN_15_MASK 0x00000002L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_EN_15__SHIFT 0x00000001
+#define PB0_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_15_MASK 0x00000001L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_15__SHIFT 0x00000000
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_15_MASK 0x00000008L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_15__SHIFT 0x00000003
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_15_MASK 0x00000004L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_15__SHIFT 0x00000002
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_15_MASK 0x00000020L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_15__SHIFT 0x00000005
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_15_MASK 0x00000010L
+#define PB0_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_15__SHIFT 0x00000004
+#define PB0_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_15_MASK 0x00000080L
+#define PB0_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_15__SHIFT 0x00000007
+#define PB0_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_15_MASK 0x00000040L
+#define PB0_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_15__SHIFT 0x00000006
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENT_15_MASK 0x0000fc00L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENT_15__SHIFT 0x0000000a
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENTID_15_MASK 0x00000300L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENTID_15__SHIFT 0x00000008
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__DEEMPH_15_MASK 0x00000080L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__DEEMPH_15__SHIFT 0x00000007
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__INCOHERENTCK_15_MASK 0x00000008L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__INCOHERENTCK_15__SHIFT 0x00000003
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__TXMARG_15_MASK 0x00000070L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__TXMARG_15__SHIFT 0x00000004
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__TXPWR_15_MASK 0x00000007L
+#define PB0_TX_LANE15_SCI_STAT_OVRD_REG0__TXPWR_15__SHIFT 0x00000000
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_DISPCLK_MODE_1_MASK 0x00000001L
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_DISPCLK_MODE_1__SHIFT 0x00000000
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_INV_DATA_1_MASK 0x00000002L
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_INV_DATA_1__SHIFT 0x00000001
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_SWING_BOOST_EN_1_MASK 0x00000004L
+#define PB0_TX_LANE1_CTRL_REG0__TX_CFG_SWING_BOOST_EN_1__SHIFT 0x00000002
+#define PB0_TX_LANE1_CTRL_REG0__TX_DBG_PRBS_EN_1_MASK 0x00000008L
+#define PB0_TX_LANE1_CTRL_REG0__TX_DBG_PRBS_EN_1__SHIFT 0x00000003
+#define PB0_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_EN_1_MASK 0x00000002L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_EN_1__SHIFT 0x00000001
+#define PB0_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_1_MASK 0x00000001L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_1__SHIFT 0x00000000
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_1_MASK 0x00000008L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_1__SHIFT 0x00000003
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_1_MASK 0x00000004L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_1__SHIFT 0x00000002
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_1_MASK 0x00000020L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_1__SHIFT 0x00000005
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_1_MASK 0x00000010L
+#define PB0_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_1__SHIFT 0x00000004
+#define PB0_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_1_MASK 0x00000080L
+#define PB0_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_1__SHIFT 0x00000007
+#define PB0_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_1_MASK 0x00000040L
+#define PB0_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_1__SHIFT 0x00000006
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENT_1_MASK 0x0000fc00L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENT_1__SHIFT 0x0000000a
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENTID_1_MASK 0x00000300L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENTID_1__SHIFT 0x00000008
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__DEEMPH_1_MASK 0x00000080L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__DEEMPH_1__SHIFT 0x00000007
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__INCOHERENTCK_1_MASK 0x00000008L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__INCOHERENTCK_1__SHIFT 0x00000003
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__TXMARG_1_MASK 0x00000070L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__TXMARG_1__SHIFT 0x00000004
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__TXPWR_1_MASK 0x00000007L
+#define PB0_TX_LANE1_SCI_STAT_OVRD_REG0__TXPWR_1__SHIFT 0x00000000
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_DISPCLK_MODE_2_MASK 0x00000001L
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_DISPCLK_MODE_2__SHIFT 0x00000000
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_INV_DATA_2_MASK 0x00000002L
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_INV_DATA_2__SHIFT 0x00000001
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_SWING_BOOST_EN_2_MASK 0x00000004L
+#define PB0_TX_LANE2_CTRL_REG0__TX_CFG_SWING_BOOST_EN_2__SHIFT 0x00000002
+#define PB0_TX_LANE2_CTRL_REG0__TX_DBG_PRBS_EN_2_MASK 0x00000008L
+#define PB0_TX_LANE2_CTRL_REG0__TX_DBG_PRBS_EN_2__SHIFT 0x00000003
+#define PB0_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_EN_2_MASK 0x00000002L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_EN_2__SHIFT 0x00000001
+#define PB0_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_2_MASK 0x00000001L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_2__SHIFT 0x00000000
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_2_MASK 0x00000008L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_2__SHIFT 0x00000003
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_2_MASK 0x00000004L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_2__SHIFT 0x00000002
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_2_MASK 0x00000020L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_2__SHIFT 0x00000005
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_2_MASK 0x00000010L
+#define PB0_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_2__SHIFT 0x00000004
+#define PB0_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_2_MASK 0x00000080L
+#define PB0_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_2__SHIFT 0x00000007
+#define PB0_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_2_MASK 0x00000040L
+#define PB0_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_2__SHIFT 0x00000006
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENT_2_MASK 0x0000fc00L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENT_2__SHIFT 0x0000000a
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENTID_2_MASK 0x00000300L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENTID_2__SHIFT 0x00000008
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__DEEMPH_2_MASK 0x00000080L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__DEEMPH_2__SHIFT 0x00000007
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__INCOHERENTCK_2_MASK 0x00000008L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__INCOHERENTCK_2__SHIFT 0x00000003
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__TXMARG_2_MASK 0x00000070L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__TXMARG_2__SHIFT 0x00000004
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__TXPWR_2_MASK 0x00000007L
+#define PB0_TX_LANE2_SCI_STAT_OVRD_REG0__TXPWR_2__SHIFT 0x00000000
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_DISPCLK_MODE_3_MASK 0x00000001L
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_DISPCLK_MODE_3__SHIFT 0x00000000
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_INV_DATA_3_MASK 0x00000002L
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_INV_DATA_3__SHIFT 0x00000001
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_SWING_BOOST_EN_3_MASK 0x00000004L
+#define PB0_TX_LANE3_CTRL_REG0__TX_CFG_SWING_BOOST_EN_3__SHIFT 0x00000002
+#define PB0_TX_LANE3_CTRL_REG0__TX_DBG_PRBS_EN_3_MASK 0x00000008L
+#define PB0_TX_LANE3_CTRL_REG0__TX_DBG_PRBS_EN_3__SHIFT 0x00000003
+#define PB0_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_EN_3_MASK 0x00000002L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_EN_3__SHIFT 0x00000001
+#define PB0_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_3_MASK 0x00000001L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_3__SHIFT 0x00000000
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_3_MASK 0x00000008L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_3__SHIFT 0x00000003
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_3_MASK 0x00000004L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_3__SHIFT 0x00000002
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_3_MASK 0x00000020L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_3__SHIFT 0x00000005
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_3_MASK 0x00000010L
+#define PB0_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_3__SHIFT 0x00000004
+#define PB0_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_3_MASK 0x00000080L
+#define PB0_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_3__SHIFT 0x00000007
+#define PB0_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_3_MASK 0x00000040L
+#define PB0_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_3__SHIFT 0x00000006
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENT_3_MASK 0x0000fc00L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENT_3__SHIFT 0x0000000a
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENTID_3_MASK 0x00000300L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENTID_3__SHIFT 0x00000008
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__DEEMPH_3_MASK 0x00000080L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__DEEMPH_3__SHIFT 0x00000007
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__INCOHERENTCK_3_MASK 0x00000008L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__INCOHERENTCK_3__SHIFT 0x00000003
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__TXMARG_3_MASK 0x00000070L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__TXMARG_3__SHIFT 0x00000004
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__TXPWR_3_MASK 0x00000007L
+#define PB0_TX_LANE3_SCI_STAT_OVRD_REG0__TXPWR_3__SHIFT 0x00000000
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_DISPCLK_MODE_4_MASK 0x00000001L
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_DISPCLK_MODE_4__SHIFT 0x00000000
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_INV_DATA_4_MASK 0x00000002L
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_INV_DATA_4__SHIFT 0x00000001
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_SWING_BOOST_EN_4_MASK 0x00000004L
+#define PB0_TX_LANE4_CTRL_REG0__TX_CFG_SWING_BOOST_EN_4__SHIFT 0x00000002
+#define PB0_TX_LANE4_CTRL_REG0__TX_DBG_PRBS_EN_4_MASK 0x00000008L
+#define PB0_TX_LANE4_CTRL_REG0__TX_DBG_PRBS_EN_4__SHIFT 0x00000003
+#define PB0_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_EN_4_MASK 0x00000002L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_EN_4__SHIFT 0x00000001
+#define PB0_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_4_MASK 0x00000001L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_4__SHIFT 0x00000000
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_4_MASK 0x00000008L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_4__SHIFT 0x00000003
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_4_MASK 0x00000004L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_4__SHIFT 0x00000002
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_4_MASK 0x00000020L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_4__SHIFT 0x00000005
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_4_MASK 0x00000010L
+#define PB0_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_4__SHIFT 0x00000004
+#define PB0_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_4_MASK 0x00000080L
+#define PB0_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_4__SHIFT 0x00000007
+#define PB0_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_4_MASK 0x00000040L
+#define PB0_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_4__SHIFT 0x00000006
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENT_4_MASK 0x0000fc00L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENT_4__SHIFT 0x0000000a
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENTID_4_MASK 0x00000300L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENTID_4__SHIFT 0x00000008
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__DEEMPH_4_MASK 0x00000080L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__DEEMPH_4__SHIFT 0x00000007
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__INCOHERENTCK_4_MASK 0x00000008L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__INCOHERENTCK_4__SHIFT 0x00000003
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__TXMARG_4_MASK 0x00000070L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__TXMARG_4__SHIFT 0x00000004
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__TXPWR_4_MASK 0x00000007L
+#define PB0_TX_LANE4_SCI_STAT_OVRD_REG0__TXPWR_4__SHIFT 0x00000000
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_DISPCLK_MODE_5_MASK 0x00000001L
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_DISPCLK_MODE_5__SHIFT 0x00000000
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_INV_DATA_5_MASK 0x00000002L
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_INV_DATA_5__SHIFT 0x00000001
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_SWING_BOOST_EN_5_MASK 0x00000004L
+#define PB0_TX_LANE5_CTRL_REG0__TX_CFG_SWING_BOOST_EN_5__SHIFT 0x00000002
+#define PB0_TX_LANE5_CTRL_REG0__TX_DBG_PRBS_EN_5_MASK 0x00000008L
+#define PB0_TX_LANE5_CTRL_REG0__TX_DBG_PRBS_EN_5__SHIFT 0x00000003
+#define PB0_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_EN_5_MASK 0x00000002L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_EN_5__SHIFT 0x00000001
+#define PB0_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_5_MASK 0x00000001L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_5__SHIFT 0x00000000
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_5_MASK 0x00000008L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_5__SHIFT 0x00000003
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_5_MASK 0x00000004L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_5__SHIFT 0x00000002
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_5_MASK 0x00000020L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_5__SHIFT 0x00000005
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_5_MASK 0x00000010L
+#define PB0_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_5__SHIFT 0x00000004
+#define PB0_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_5_MASK 0x00000080L
+#define PB0_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_5__SHIFT 0x00000007
+#define PB0_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_5_MASK 0x00000040L
+#define PB0_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_5__SHIFT 0x00000006
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENT_5_MASK 0x0000fc00L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENT_5__SHIFT 0x0000000a
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENTID_5_MASK 0x00000300L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENTID_5__SHIFT 0x00000008
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__DEEMPH_5_MASK 0x00000080L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__DEEMPH_5__SHIFT 0x00000007
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__INCOHERENTCK_5_MASK 0x00000008L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__INCOHERENTCK_5__SHIFT 0x00000003
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__TXMARG_5_MASK 0x00000070L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__TXMARG_5__SHIFT 0x00000004
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__TXPWR_5_MASK 0x00000007L
+#define PB0_TX_LANE5_SCI_STAT_OVRD_REG0__TXPWR_5__SHIFT 0x00000000
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_DISPCLK_MODE_6_MASK 0x00000001L
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_DISPCLK_MODE_6__SHIFT 0x00000000
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_INV_DATA_6_MASK 0x00000002L
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_INV_DATA_6__SHIFT 0x00000001
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_SWING_BOOST_EN_6_MASK 0x00000004L
+#define PB0_TX_LANE6_CTRL_REG0__TX_CFG_SWING_BOOST_EN_6__SHIFT 0x00000002
+#define PB0_TX_LANE6_CTRL_REG0__TX_DBG_PRBS_EN_6_MASK 0x00000008L
+#define PB0_TX_LANE6_CTRL_REG0__TX_DBG_PRBS_EN_6__SHIFT 0x00000003
+#define PB0_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_EN_6_MASK 0x00000002L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_EN_6__SHIFT 0x00000001
+#define PB0_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_6_MASK 0x00000001L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_6__SHIFT 0x00000000
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_6_MASK 0x00000008L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_6__SHIFT 0x00000003
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_6_MASK 0x00000004L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_6__SHIFT 0x00000002
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_6_MASK 0x00000020L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_6__SHIFT 0x00000005
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_6_MASK 0x00000010L
+#define PB0_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_6__SHIFT 0x00000004
+#define PB0_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_6_MASK 0x00000080L
+#define PB0_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_6__SHIFT 0x00000007
+#define PB0_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_6_MASK 0x00000040L
+#define PB0_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_6__SHIFT 0x00000006
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENT_6_MASK 0x0000fc00L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENT_6__SHIFT 0x0000000a
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENTID_6_MASK 0x00000300L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENTID_6__SHIFT 0x00000008
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__DEEMPH_6_MASK 0x00000080L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__DEEMPH_6__SHIFT 0x00000007
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__INCOHERENTCK_6_MASK 0x00000008L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__INCOHERENTCK_6__SHIFT 0x00000003
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__TXMARG_6_MASK 0x00000070L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__TXMARG_6__SHIFT 0x00000004
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__TXPWR_6_MASK 0x00000007L
+#define PB0_TX_LANE6_SCI_STAT_OVRD_REG0__TXPWR_6__SHIFT 0x00000000
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_DISPCLK_MODE_7_MASK 0x00000001L
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_DISPCLK_MODE_7__SHIFT 0x00000000
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_INV_DATA_7_MASK 0x00000002L
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_INV_DATA_7__SHIFT 0x00000001
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_SWING_BOOST_EN_7_MASK 0x00000004L
+#define PB0_TX_LANE7_CTRL_REG0__TX_CFG_SWING_BOOST_EN_7__SHIFT 0x00000002
+#define PB0_TX_LANE7_CTRL_REG0__TX_DBG_PRBS_EN_7_MASK 0x00000008L
+#define PB0_TX_LANE7_CTRL_REG0__TX_DBG_PRBS_EN_7__SHIFT 0x00000003
+#define PB0_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_EN_7_MASK 0x00000002L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_EN_7__SHIFT 0x00000001
+#define PB0_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_7_MASK 0x00000001L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_7__SHIFT 0x00000000
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_7_MASK 0x00000008L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_7__SHIFT 0x00000003
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_7_MASK 0x00000004L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_7__SHIFT 0x00000002
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_7_MASK 0x00000020L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_7__SHIFT 0x00000005
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_7_MASK 0x00000010L
+#define PB0_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_7__SHIFT 0x00000004
+#define PB0_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_7_MASK 0x00000080L
+#define PB0_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_7__SHIFT 0x00000007
+#define PB0_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_7_MASK 0x00000040L
+#define PB0_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_7__SHIFT 0x00000006
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENT_7_MASK 0x0000fc00L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENT_7__SHIFT 0x0000000a
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENTID_7_MASK 0x00000300L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENTID_7__SHIFT 0x00000008
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__DEEMPH_7_MASK 0x00000080L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__DEEMPH_7__SHIFT 0x00000007
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__INCOHERENTCK_7_MASK 0x00000008L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__INCOHERENTCK_7__SHIFT 0x00000003
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__TXMARG_7_MASK 0x00000070L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__TXMARG_7__SHIFT 0x00000004
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__TXPWR_7_MASK 0x00000007L
+#define PB0_TX_LANE7_SCI_STAT_OVRD_REG0__TXPWR_7__SHIFT 0x00000000
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_DISPCLK_MODE_8_MASK 0x00000001L
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_DISPCLK_MODE_8__SHIFT 0x00000000
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_INV_DATA_8_MASK 0x00000002L
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_INV_DATA_8__SHIFT 0x00000001
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_SWING_BOOST_EN_8_MASK 0x00000004L
+#define PB0_TX_LANE8_CTRL_REG0__TX_CFG_SWING_BOOST_EN_8__SHIFT 0x00000002
+#define PB0_TX_LANE8_CTRL_REG0__TX_DBG_PRBS_EN_8_MASK 0x00000008L
+#define PB0_TX_LANE8_CTRL_REG0__TX_DBG_PRBS_EN_8__SHIFT 0x00000003
+#define PB0_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_EN_8_MASK 0x00000002L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_EN_8__SHIFT 0x00000001
+#define PB0_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_8_MASK 0x00000001L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_8__SHIFT 0x00000000
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_8_MASK 0x00000008L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_8__SHIFT 0x00000003
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_8_MASK 0x00000004L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_8__SHIFT 0x00000002
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_8_MASK 0x00000020L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_8__SHIFT 0x00000005
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_8_MASK 0x00000010L
+#define PB0_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_8__SHIFT 0x00000004
+#define PB0_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_8_MASK 0x00000080L
+#define PB0_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_8__SHIFT 0x00000007
+#define PB0_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_8_MASK 0x00000040L
+#define PB0_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_8__SHIFT 0x00000006
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENT_8_MASK 0x0000fc00L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENT_8__SHIFT 0x0000000a
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENTID_8_MASK 0x00000300L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENTID_8__SHIFT 0x00000008
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__DEEMPH_8_MASK 0x00000080L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__DEEMPH_8__SHIFT 0x00000007
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__INCOHERENTCK_8_MASK 0x00000008L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__INCOHERENTCK_8__SHIFT 0x00000003
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__TXMARG_8_MASK 0x00000070L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__TXMARG_8__SHIFT 0x00000004
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__TXPWR_8_MASK 0x00000007L
+#define PB0_TX_LANE8_SCI_STAT_OVRD_REG0__TXPWR_8__SHIFT 0x00000000
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_DISPCLK_MODE_9_MASK 0x00000001L
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_DISPCLK_MODE_9__SHIFT 0x00000000
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_INV_DATA_9_MASK 0x00000002L
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_INV_DATA_9__SHIFT 0x00000001
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_SWING_BOOST_EN_9_MASK 0x00000004L
+#define PB0_TX_LANE9_CTRL_REG0__TX_CFG_SWING_BOOST_EN_9__SHIFT 0x00000002
+#define PB0_TX_LANE9_CTRL_REG0__TX_DBG_PRBS_EN_9_MASK 0x00000008L
+#define PB0_TX_LANE9_CTRL_REG0__TX_DBG_PRBS_EN_9__SHIFT 0x00000003
+#define PB0_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_EN_9_MASK 0x00000002L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_EN_9__SHIFT 0x00000001
+#define PB0_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_9_MASK 0x00000001L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_9__SHIFT 0x00000000
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_9_MASK 0x00000008L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_9__SHIFT 0x00000003
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_9_MASK 0x00000004L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_9__SHIFT 0x00000002
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_9_MASK 0x00000020L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_9__SHIFT 0x00000005
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_9_MASK 0x00000010L
+#define PB0_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_9__SHIFT 0x00000004
+#define PB0_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_9_MASK 0x00000080L
+#define PB0_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_9__SHIFT 0x00000007
+#define PB0_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_9_MASK 0x00000040L
+#define PB0_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_9__SHIFT 0x00000006
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENT_9_MASK 0x0000fc00L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENT_9__SHIFT 0x0000000a
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENTID_9_MASK 0x00000300L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENTID_9__SHIFT 0x00000008
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__DEEMPH_9_MASK 0x00000080L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__DEEMPH_9__SHIFT 0x00000007
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__INCOHERENTCK_9_MASK 0x00000008L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__INCOHERENTCK_9__SHIFT 0x00000003
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__TXMARG_9_MASK 0x00000070L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__TXMARG_9__SHIFT 0x00000004
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__TXPWR_9_MASK 0x00000007L
+#define PB0_TX_LANE9_SCI_STAT_OVRD_REG0__TXPWR_9__SHIFT 0x00000000
+#define PB1_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_EN_MASK 0x00000001L
+#define PB1_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_EN__SHIFT 0x00000000
+#define PB1_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_MODE_MASK 0x0000003eL
+#define PB1_DFT_DEBUG_CTRL_REG0__DFT_PHY_DEBUG_MODE__SHIFT 0x00000001
+#define PB1_DFT_JIT_INJ_REG0__DFT_CLK_PER_STEP_MASK 0x00000f00L
+#define PB1_DFT_JIT_INJ_REG0__DFT_CLK_PER_STEP__SHIFT 0x00000008
+#define PB1_DFT_JIT_INJ_REG0__DFT_DECR_SWP_EN_MASK 0x00800000L
+#define PB1_DFT_JIT_INJ_REG0__DFT_DECR_SWP_EN__SHIFT 0x00000017
+#define PB1_DFT_JIT_INJ_REG0__DFT_INCR_SWP_EN_MASK 0x00400000L
+#define PB1_DFT_JIT_INJ_REG0__DFT_INCR_SWP_EN__SHIFT 0x00000016
+#define PB1_DFT_JIT_INJ_REG0__DFT_NUM_STEPS_MASK 0x0000001fL
+#define PB1_DFT_JIT_INJ_REG0__DFT_NUM_STEPS__SHIFT 0x00000000
+#define PB1_DFT_JIT_INJ_REG0__DFT_RECOVERY_TIME_MASK 0xff000000L
+#define PB1_DFT_JIT_INJ_REG0__DFT_RECOVERY_TIME__SHIFT 0x00000018
+#define PB1_DFT_JIT_INJ_REG1__DFT_BLOCK_EN_MASK 0x00010000L
+#define PB1_DFT_JIT_INJ_REG1__DFT_BLOCK_EN__SHIFT 0x00000010
+#define PB1_DFT_JIT_INJ_REG1__DFT_BYPASS_EN_MASK 0x00000100L
+#define PB1_DFT_JIT_INJ_REG1__DFT_BYPASS_EN__SHIFT 0x00000008
+#define PB1_DFT_JIT_INJ_REG1__DFT_BYPASS_VALUE_MASK 0x000000ffL
+#define PB1_DFT_JIT_INJ_REG1__DFT_BYPASS_VALUE__SHIFT 0x00000000
+#define PB1_DFT_JIT_INJ_REG2__DFT_LANE_EN_MASK 0x0000ffffL
+#define PB1_DFT_JIT_INJ_REG2__DFT_LANE_EN__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG0__BACKUP_MASK 0x0000ffffL
+#define PB1_GLB_CTRL_REG0__BACKUP__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG0__CFG_IDLEDET_TH_MASK 0x00030000L
+#define PB1_GLB_CTRL_REG0__CFG_IDLEDET_TH__SHIFT 0x00000010
+#define PB1_GLB_CTRL_REG0__DBG_RX2TXBYP_SEL_MASK 0x00700000L
+#define PB1_GLB_CTRL_REG0__DBG_RX2TXBYP_SEL__SHIFT 0x00000014
+#define PB1_GLB_CTRL_REG0__DBG_RXFEBYP_EN_MASK 0x00800000L
+#define PB1_GLB_CTRL_REG0__DBG_RXFEBYP_EN__SHIFT 0x00000017
+#define PB1_GLB_CTRL_REG0__DBG_RXPRBS_CLR_MASK 0x01000000L
+#define PB1_GLB_CTRL_REG0__DBG_RXPRBS_CLR__SHIFT 0x00000018
+#define PB1_GLB_CTRL_REG0__DBG_RXTOGGLE_EN_MASK 0x02000000L
+#define PB1_GLB_CTRL_REG0__DBG_RXTOGGLE_EN__SHIFT 0x00000019
+#define PB1_GLB_CTRL_REG0__DBG_TX2RXLBACK_EN_MASK 0x04000000L
+#define PB1_GLB_CTRL_REG0__DBG_TX2RXLBACK_EN__SHIFT 0x0000001a
+#define PB1_GLB_CTRL_REG0__TXCFG_CMGOOD_RANGE_MASK 0xc0000000L
+#define PB1_GLB_CTRL_REG0__TXCFG_CMGOOD_RANGE__SHIFT 0x0000001e
+#define PB1_GLB_CTRL_REG1__PLL_CFG_DISPCLK_DIV_MASK 0x80000000L
+#define PB1_GLB_CTRL_REG1__PLL_CFG_DISPCLK_DIV__SHIFT 0x0000001f
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_EN_MASK 0x00000001L
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_EN__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_VAL_MASK 0x0000007eL
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_FR_BYP_VAL__SHIFT 0x00000001
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_EN_MASK 0x00000080L
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_EN__SHIFT 0x00000007
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_VAL_MASK 0x00003f00L
+#define PB1_GLB_CTRL_REG1__RXDBG_CDR_PH_BYP_VAL__SHIFT 0x00000008
+#define PB1_GLB_CTRL_REG1__RXDBG_D0TH_BYP_EN_MASK 0x00004000L
+#define PB1_GLB_CTRL_REG1__RXDBG_D0TH_BYP_EN__SHIFT 0x0000000e
+#define PB1_GLB_CTRL_REG1__RXDBG_D0TH_BYP_VAL_MASK 0x003f8000L
+#define PB1_GLB_CTRL_REG1__RXDBG_D0TH_BYP_VAL__SHIFT 0x0000000f
+#define PB1_GLB_CTRL_REG1__RXDBG_D1TH_BYP_EN_MASK 0x00400000L
+#define PB1_GLB_CTRL_REG1__RXDBG_D1TH_BYP_EN__SHIFT 0x00000016
+#define PB1_GLB_CTRL_REG1__RXDBG_D1TH_BYP_VAL_MASK 0x3f800000L
+#define PB1_GLB_CTRL_REG1__RXDBG_D1TH_BYP_VAL__SHIFT 0x00000017
+#define PB1_GLB_CTRL_REG1__TST_LOSPDTST_EN_MASK 0x40000000L
+#define PB1_GLB_CTRL_REG1__TST_LOSPDTST_EN__SHIFT 0x0000001e
+#define PB1_GLB_CTRL_REG2__RXDBG_D2TH_BYP_EN_MASK 0x00000001L
+#define PB1_GLB_CTRL_REG2__RXDBG_D2TH_BYP_EN__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG2__RXDBG_D2TH_BYP_VAL_MASK 0x000000feL
+#define PB1_GLB_CTRL_REG2__RXDBG_D2TH_BYP_VAL__SHIFT 0x00000001
+#define PB1_GLB_CTRL_REG2__RXDBG_D3TH_BYP_EN_MASK 0x00000100L
+#define PB1_GLB_CTRL_REG2__RXDBG_D3TH_BYP_EN__SHIFT 0x00000008
+#define PB1_GLB_CTRL_REG2__RXDBG_D3TH_BYP_VAL_MASK 0x0000fe00L
+#define PB1_GLB_CTRL_REG2__RXDBG_D3TH_BYP_VAL__SHIFT 0x00000009
+#define PB1_GLB_CTRL_REG2__RXDBG_DXTH_BYP_EN_MASK 0x00010000L
+#define PB1_GLB_CTRL_REG2__RXDBG_DXTH_BYP_EN__SHIFT 0x00000010
+#define PB1_GLB_CTRL_REG2__RXDBG_DXTH_BYP_VAL_MASK 0x00fe0000L
+#define PB1_GLB_CTRL_REG2__RXDBG_DXTH_BYP_VAL__SHIFT 0x00000011
+#define PB1_GLB_CTRL_REG2__RXDBG_ETH_BYP_EN_MASK 0x01000000L
+#define PB1_GLB_CTRL_REG2__RXDBG_ETH_BYP_EN__SHIFT 0x00000018
+#define PB1_GLB_CTRL_REG2__RXDBG_ETH_BYP_VAL_MASK 0xfe000000L
+#define PB1_GLB_CTRL_REG2__RXDBG_ETH_BYP_VAL__SHIFT 0x00000019
+#define PB1_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF0_SEL_MASK 0x00000060L
+#define PB1_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF0_SEL__SHIFT 0x00000005
+#define PB1_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF1_SEL_MASK 0x00000180L
+#define PB1_GLB_CTRL_REG3__BG_CFG_LC_REG_VREF1_SEL__SHIFT 0x00000007
+#define PB1_GLB_CTRL_REG3__BG_CFG_RO_REG_VREF_SEL_MASK 0x00000600L
+#define PB1_GLB_CTRL_REG3__BG_CFG_RO_REG_VREF_SEL__SHIFT 0x00000009
+#define PB1_GLB_CTRL_REG3__BG_DBG_ANALOG_SEL_MASK 0x0001c000L
+#define PB1_GLB_CTRL_REG3__BG_DBG_ANALOG_SEL__SHIFT 0x0000000e
+#define PB1_GLB_CTRL_REG3__BG_DBG_IREFBYP_EN_MASK 0x00001000L
+#define PB1_GLB_CTRL_REG3__BG_DBG_IREFBYP_EN__SHIFT 0x0000000c
+#define PB1_GLB_CTRL_REG3__BG_DBG_VREFBYP_EN_MASK 0x00000800L
+#define PB1_GLB_CTRL_REG3__BG_DBG_VREFBYP_EN__SHIFT 0x0000000b
+#define PB1_GLB_CTRL_REG3__DBG_DLL_CLK_SEL_MASK 0x001c0000L
+#define PB1_GLB_CTRL_REG3__DBG_DLL_CLK_SEL__SHIFT 0x00000012
+#define PB1_GLB_CTRL_REG3__DBG_RXLEQ_DCATTN_BYP_OVR_DISABLE_MASK 0x80000000L
+#define PB1_GLB_CTRL_REG3__DBG_RXLEQ_DCATTN_BYP_OVR_DISABLE__SHIFT 0x0000001f
+#define PB1_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_EN_MASK 0x00400000L
+#define PB1_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_EN__SHIFT 0x00000016
+#define PB1_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_VAL_MASK 0x07800000L
+#define PB1_GLB_CTRL_REG3__DBG_RXPI_OFFSET_BYP_VAL__SHIFT 0x00000017
+#define PB1_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_EN_MASK 0x08000000L
+#define PB1_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_EN__SHIFT 0x0000001b
+#define PB1_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_VAL_MASK 0x70000000L
+#define PB1_GLB_CTRL_REG3__DBG_RXSWAPDX_BYP_VAL__SHIFT 0x0000001c
+#define PB1_GLB_CTRL_REG3__PLL_DISPCLK_CMOS_SEL_MASK 0x00200000L
+#define PB1_GLB_CTRL_REG3__PLL_DISPCLK_CMOS_SEL__SHIFT 0x00000015
+#define PB1_GLB_CTRL_REG3__RXDBG_SEL_MASK 0x0000001fL
+#define PB1_GLB_CTRL_REG3__RXDBG_SEL__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG4__DBG_RXAPU_EXEC_MASK 0x03c00000L
+#define PB1_GLB_CTRL_REG4__DBG_RXAPU_EXEC__SHIFT 0x00000016
+#define PB1_GLB_CTRL_REG4__DBG_RXAPU_INST_MASK 0x0000ffffL
+#define PB1_GLB_CTRL_REG4__DBG_RXAPU_INST__SHIFT 0x00000000
+#define PB1_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_EN_MASK 0x00040000L
+#define PB1_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_EN__SHIFT 0x00000012
+#define PB1_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_VAL_MASK 0x00030000L
+#define PB1_GLB_CTRL_REG4__DBG_RXDFEMUX_BYP_VAL__SHIFT 0x00000010
+#define PB1_GLB_CTRL_REG4__DBG_RXDLL_VREG_REF_SEL_MASK 0x04000000L
+#define PB1_GLB_CTRL_REG4__DBG_RXDLL_VREG_REF_SEL__SHIFT 0x0000001a
+#define PB1_GLB_CTRL_REG4__DBG_RXRDATA_GATING_DISABLE_MASK 0x10000000L
+#define PB1_GLB_CTRL_REG4__DBG_RXRDATA_GATING_DISABLE__SHIFT 0x0000001c
+#define PB1_GLB_CTRL_REG4__PWRGOOD_OVRD_MASK 0x08000000L
+#define PB1_GLB_CTRL_REG4__PWRGOOD_OVRD__SHIFT 0x0000001b
+#define PB1_GLB_CTRL_REG5__DBG_RXAPU_MODE_MASK 0x000000ffL
+#define PB1_GLB_CTRL_REG5__DBG_RXAPU_MODE__SHIFT 0x00000000
+#define PB1_GLB_OVRD_REG0__TXPDTERM_VAL_OVRD_VAL_MASK 0x0000ffffL
+#define PB1_GLB_OVRD_REG0__TXPDTERM_VAL_OVRD_VAL__SHIFT 0x00000000
+#define PB1_GLB_OVRD_REG0__TXPUTERM_VAL_OVRD_VAL_MASK 0xffff0000L
+#define PB1_GLB_OVRD_REG0__TXPUTERM_VAL_OVRD_VAL__SHIFT 0x00000010
+#define PB1_GLB_OVRD_REG1__RXTERM_VAL_OVRD_EN_MASK 0x00008000L
+#define PB1_GLB_OVRD_REG1__RXTERM_VAL_OVRD_EN__SHIFT 0x0000000f
+#define PB1_GLB_OVRD_REG1__RXTERM_VAL_OVRD_VAL_MASK 0xffff0000L
+#define PB1_GLB_OVRD_REG1__RXTERM_VAL_OVRD_VAL__SHIFT 0x00000010
+#define PB1_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_EN_MASK 0x00000004L
+#define PB1_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_EN__SHIFT 0x00000002
+#define PB1_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_VAL_MASK 0x00000008L
+#define PB1_GLB_OVRD_REG1__TST_LOSPDTST_RST_OVRD_VAL__SHIFT 0x00000003
+#define PB1_GLB_OVRD_REG1__TXPDTERM_VAL_OVRD_EN_MASK 0x00000001L
+#define PB1_GLB_OVRD_REG1__TXPDTERM_VAL_OVRD_EN__SHIFT 0x00000000
+#define PB1_GLB_OVRD_REG1__TXPUTERM_VAL_OVRD_EN_MASK 0x00000002L
+#define PB1_GLB_OVRD_REG1__TXPUTERM_VAL_OVRD_EN__SHIFT 0x00000001
+#define PB1_GLB_OVRD_REG2__BG_PWRON_OVRD_EN_MASK 0x00000001L
+#define PB1_GLB_OVRD_REG2__BG_PWRON_OVRD_EN__SHIFT 0x00000000
+#define PB1_GLB_OVRD_REG2__BG_PWRON_OVRD_VAL_MASK 0x00000002L
+#define PB1_GLB_OVRD_REG2__BG_PWRON_OVRD_VAL__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_ALL_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_IMPCAL_ACTIVE_SCI_UPDT_MASK 0x00000010L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IGNR_IMPCAL_ACTIVE_SCI_UPDT__SHIFT 0x00000004
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IMPCAL_ACTIVE_MASK 0x00100000L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__IMPCAL_ACTIVE__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG0__RXIMP_MASK 0x000f0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__RXIMP__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG0__TXNIMP_MASK 0x00000f00L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__TXNIMP__SHIFT 0x00000008
+#define PB1_GLB_SCI_STAT_OVRD_REG0__TXPIMP_MASK 0x0000f000L
+#define PB1_GLB_SCI_STAT_OVRD_REG0__TXPIMP__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_0_MASK 0x00001000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_0__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_1_MASK 0x00002000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_1__SHIFT 0x0000000d
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_2_MASK 0x00004000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_2__SHIFT 0x0000000e
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_3_MASK 0x00008000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__DLL_LOCK_3__SHIFT 0x0000000f
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_0_MASK 0x000c0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_0__SHIFT 0x00000012
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_1_MASK 0x00c00000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_1__SHIFT 0x00000016
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_2_MASK 0x0c000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_2__SHIFT 0x0000001a
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_3_MASK 0xc0000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__FREQDIV_3__SHIFT 0x0000001e
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_DLL_LOCK_SCI_UPDT_L0T3_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_DLL_LOCK_SCI_UPDT_L0T3__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_FREQDIV_SCI_UPDT_L0T3_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_FREQDIV_SCI_UPDT_L0T3__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_MODE_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__IGNR_MODE_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_0_MASK 0x00030000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_0__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_1_MASK 0x00300000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_1__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_2_MASK 0x03000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_2__SHIFT 0x00000018
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_3_MASK 0x30000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG1__MODE_3__SHIFT 0x0000001c
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_4_MASK 0x00001000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_4__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_5_MASK 0x00002000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_5__SHIFT 0x0000000d
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_6_MASK 0x00004000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_6__SHIFT 0x0000000e
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_7_MASK 0x00008000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__DLL_LOCK_7__SHIFT 0x0000000f
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_4_MASK 0x000c0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_4__SHIFT 0x00000012
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_5_MASK 0x00c00000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_5__SHIFT 0x00000016
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_6_MASK 0x0c000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_6__SHIFT 0x0000001a
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_7_MASK 0xc0000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__FREQDIV_7__SHIFT 0x0000001e
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_DLL_LOCK_SCI_UPDT_L4T7_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_DLL_LOCK_SCI_UPDT_L4T7__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_FREQDIV_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_FREQDIV_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_MODE_SCI_UPDT_L4T7_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__IGNR_MODE_SCI_UPDT_L4T7__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_4_MASK 0x00030000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_4__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_5_MASK 0x00300000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_5__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_6_MASK 0x03000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_6__SHIFT 0x00000018
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_7_MASK 0x30000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG2__MODE_7__SHIFT 0x0000001c
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_10_MASK 0x00004000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_10__SHIFT 0x0000000e
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_11_MASK 0x00008000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_11__SHIFT 0x0000000f
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_8_MASK 0x00001000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_8__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_9_MASK 0x00002000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__DLL_LOCK_9__SHIFT 0x0000000d
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_10_MASK 0x0c000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_10__SHIFT 0x0000001a
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_11_MASK 0xc0000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_11__SHIFT 0x0000001e
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_8_MASK 0x000c0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_8__SHIFT 0x00000012
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_9_MASK 0x00c00000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__FREQDIV_9__SHIFT 0x00000016
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_DLL_LOCK_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_DLL_LOCK_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_FREQDIV_SCI_UPDT_L8T11_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_FREQDIV_SCI_UPDT_L8T11__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_MODE_SCI_UPDT_L8T11_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__IGNR_MODE_SCI_UPDT_L8T11__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_10_MASK 0x03000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_10__SHIFT 0x00000018
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_11_MASK 0x30000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_11__SHIFT 0x0000001c
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_8_MASK 0x00030000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_8__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_9_MASK 0x00300000L
+#define PB1_GLB_SCI_STAT_OVRD_REG3__MODE_9__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_12_MASK 0x00001000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_12__SHIFT 0x0000000c
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_13_MASK 0x00002000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_13__SHIFT 0x0000000d
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_14_MASK 0x00004000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_14__SHIFT 0x0000000e
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_15_MASK 0x00008000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__DLL_LOCK_15__SHIFT 0x0000000f
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_12_MASK 0x000c0000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_12__SHIFT 0x00000012
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_13_MASK 0x00c00000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_13__SHIFT 0x00000016
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_14_MASK 0x0c000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_14__SHIFT 0x0000001a
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_15_MASK 0xc0000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__FREQDIV_15__SHIFT 0x0000001e
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_DLL_LOCK_SCI_UPDT_L12T15_MASK 0x00000004L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_DLL_LOCK_SCI_UPDT_L12T15__SHIFT 0x00000002
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_FREQDIV_SCI_UPDT_L12T15_MASK 0x00000002L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_FREQDIV_SCI_UPDT_L12T15__SHIFT 0x00000001
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_MODE_SCI_UPDT_L12T15_MASK 0x00000001L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__IGNR_MODE_SCI_UPDT_L12T15__SHIFT 0x00000000
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_12_MASK 0x00030000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_12__SHIFT 0x00000010
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_13_MASK 0x00300000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_13__SHIFT 0x00000014
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_14_MASK 0x03000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_14__SHIFT 0x00000018
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_15_MASK 0x30000000L
+#define PB1_GLB_SCI_STAT_OVRD_REG4__MODE_15__SHIFT 0x0000001c
+#define PB1_HW_DEBUG__PB1_HW_00_DEBUG_MASK 0x00000001L
+#define PB1_HW_DEBUG__PB1_HW_00_DEBUG__SHIFT 0x00000000
+#define PB1_HW_DEBUG__PB1_HW_01_DEBUG_MASK 0x00000002L
+#define PB1_HW_DEBUG__PB1_HW_01_DEBUG__SHIFT 0x00000001
+#define PB1_HW_DEBUG__PB1_HW_02_DEBUG_MASK 0x00000004L
+#define PB1_HW_DEBUG__PB1_HW_02_DEBUG__SHIFT 0x00000002
+#define PB1_HW_DEBUG__PB1_HW_03_DEBUG_MASK 0x00000008L
+#define PB1_HW_DEBUG__PB1_HW_03_DEBUG__SHIFT 0x00000003
+#define PB1_HW_DEBUG__PB1_HW_04_DEBUG_MASK 0x00000010L
+#define PB1_HW_DEBUG__PB1_HW_04_DEBUG__SHIFT 0x00000004
+#define PB1_HW_DEBUG__PB1_HW_05_DEBUG_MASK 0x00000020L
+#define PB1_HW_DEBUG__PB1_HW_05_DEBUG__SHIFT 0x00000005
+#define PB1_HW_DEBUG__PB1_HW_06_DEBUG_MASK 0x00000040L
+#define PB1_HW_DEBUG__PB1_HW_06_DEBUG__SHIFT 0x00000006
+#define PB1_HW_DEBUG__PB1_HW_07_DEBUG_MASK 0x00000080L
+#define PB1_HW_DEBUG__PB1_HW_07_DEBUG__SHIFT 0x00000007
+#define PB1_HW_DEBUG__PB1_HW_08_DEBUG_MASK 0x00000100L
+#define PB1_HW_DEBUG__PB1_HW_08_DEBUG__SHIFT 0x00000008
+#define PB1_HW_DEBUG__PB1_HW_09_DEBUG_MASK 0x00000200L
+#define PB1_HW_DEBUG__PB1_HW_09_DEBUG__SHIFT 0x00000009
+#define PB1_HW_DEBUG__PB1_HW_10_DEBUG_MASK 0x00000400L
+#define PB1_HW_DEBUG__PB1_HW_10_DEBUG__SHIFT 0x0000000a
+#define PB1_HW_DEBUG__PB1_HW_11_DEBUG_MASK 0x00000800L
+#define PB1_HW_DEBUG__PB1_HW_11_DEBUG__SHIFT 0x0000000b
+#define PB1_HW_DEBUG__PB1_HW_12_DEBUG_MASK 0x00001000L
+#define PB1_HW_DEBUG__PB1_HW_12_DEBUG__SHIFT 0x0000000c
+#define PB1_HW_DEBUG__PB1_HW_13_DEBUG_MASK 0x00002000L
+#define PB1_HW_DEBUG__PB1_HW_13_DEBUG__SHIFT 0x0000000d
+#define PB1_HW_DEBUG__PB1_HW_14_DEBUG_MASK 0x00004000L
+#define PB1_HW_DEBUG__PB1_HW_14_DEBUG__SHIFT 0x0000000e
+#define PB1_HW_DEBUG__PB1_HW_15_DEBUG_MASK 0x00008000L
+#define PB1_HW_DEBUG__PB1_HW_15_DEBUG__SHIFT 0x0000000f
+#define PB1_HW_DEBUG__PB1_HW_16_DEBUG_MASK 0x00010000L
+#define PB1_HW_DEBUG__PB1_HW_16_DEBUG__SHIFT 0x00000010
+#define PB1_HW_DEBUG__PB1_HW_17_DEBUG_MASK 0x00020000L
+#define PB1_HW_DEBUG__PB1_HW_17_DEBUG__SHIFT 0x00000011
+#define PB1_HW_DEBUG__PB1_HW_18_DEBUG_MASK 0x00040000L
+#define PB1_HW_DEBUG__PB1_HW_18_DEBUG__SHIFT 0x00000012
+#define PB1_HW_DEBUG__PB1_HW_19_DEBUG_MASK 0x00080000L
+#define PB1_HW_DEBUG__PB1_HW_19_DEBUG__SHIFT 0x00000013
+#define PB1_HW_DEBUG__PB1_HW_20_DEBUG_MASK 0x00100000L
+#define PB1_HW_DEBUG__PB1_HW_20_DEBUG__SHIFT 0x00000014
+#define PB1_HW_DEBUG__PB1_HW_21_DEBUG_MASK 0x00200000L
+#define PB1_HW_DEBUG__PB1_HW_21_DEBUG__SHIFT 0x00000015
+#define PB1_HW_DEBUG__PB1_HW_22_DEBUG_MASK 0x00400000L
+#define PB1_HW_DEBUG__PB1_HW_22_DEBUG__SHIFT 0x00000016
+#define PB1_HW_DEBUG__PB1_HW_23_DEBUG_MASK 0x00800000L
+#define PB1_HW_DEBUG__PB1_HW_23_DEBUG__SHIFT 0x00000017
+#define PB1_HW_DEBUG__PB1_HW_24_DEBUG_MASK 0x01000000L
+#define PB1_HW_DEBUG__PB1_HW_24_DEBUG__SHIFT 0x00000018
+#define PB1_HW_DEBUG__PB1_HW_25_DEBUG_MASK 0x02000000L
+#define PB1_HW_DEBUG__PB1_HW_25_DEBUG__SHIFT 0x00000019
+#define PB1_HW_DEBUG__PB1_HW_26_DEBUG_MASK 0x04000000L
+#define PB1_HW_DEBUG__PB1_HW_26_DEBUG__SHIFT 0x0000001a
+#define PB1_HW_DEBUG__PB1_HW_27_DEBUG_MASK 0x08000000L
+#define PB1_HW_DEBUG__PB1_HW_27_DEBUG__SHIFT 0x0000001b
+#define PB1_HW_DEBUG__PB1_HW_28_DEBUG_MASK 0x10000000L
+#define PB1_HW_DEBUG__PB1_HW_28_DEBUG__SHIFT 0x0000001c
+#define PB1_HW_DEBUG__PB1_HW_29_DEBUG_MASK 0x20000000L
+#define PB1_HW_DEBUG__PB1_HW_29_DEBUG__SHIFT 0x0000001d
+#define PB1_HW_DEBUG__PB1_HW_30_DEBUG_MASK 0x40000000L
+#define PB1_HW_DEBUG__PB1_HW_30_DEBUG__SHIFT 0x0000001e
+#define PB1_HW_DEBUG__PB1_HW_31_DEBUG_MASK 0x80000000L
+#define PB1_HW_DEBUG__PB1_HW_31_DEBUG__SHIFT 0x0000001f
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_EN_MASK 0x00000080L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_EN__SHIFT 0x00000007
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_0_MASK 0x00000100L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_0__SHIFT 0x00000008
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_10_MASK 0x00040000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_10__SHIFT 0x00000012
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_11_MASK 0x00080000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_11__SHIFT 0x00000013
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_12_MASK 0x00100000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_12__SHIFT 0x00000014
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_13_MASK 0x00200000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_13__SHIFT 0x00000015
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_14_MASK 0x00400000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_14__SHIFT 0x00000016
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_15_MASK 0x00800000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_15__SHIFT 0x00000017
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_1_MASK 0x00000200L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_1__SHIFT 0x00000009
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_2_MASK 0x00000400L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_2__SHIFT 0x0000000a
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_3_MASK 0x00000800L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_3__SHIFT 0x0000000b
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_4_MASK 0x00001000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_4__SHIFT 0x0000000c
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_5_MASK 0x00002000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_5__SHIFT 0x0000000d
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_6_MASK 0x00004000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_6__SHIFT 0x0000000e
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_7_MASK 0x00008000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_7__SHIFT 0x0000000f
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_8_MASK 0x00010000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_8__SHIFT 0x00000010
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_9_MASK 0x00020000L
+#define PB1_PIF_CNTL2__RXDETECT_OVERRIDE_VAL_9__SHIFT 0x00000011
+#define PB1_PIF_CNTL2__RXDETECT_SAMPL_TIME_MASK 0x00000006L
+#define PB1_PIF_CNTL2__RXDETECT_SAMPL_TIME__SHIFT 0x00000001
+#define PB1_PIF_CNTL2__RXPHYSTATUS_DELAY_MASK 0x07000000L
+#define PB1_PIF_CNTL2__RXPHYSTATUS_DELAY__SHIFT 0x00000018
+#define PB1_PIF_CNTL__DA_FIFO_RESET_0_MASK 0x00000002L
+#define PB1_PIF_CNTL__DA_FIFO_RESET_0__SHIFT 0x00000001
+#define PB1_PIF_CNTL__DA_FIFO_RESET_1_MASK 0x00000020L
+#define PB1_PIF_CNTL__DA_FIFO_RESET_1__SHIFT 0x00000005
+#define PB1_PIF_CNTL__DA_FIFO_RESET_2_MASK 0x00000200L
+#define PB1_PIF_CNTL__DA_FIFO_RESET_2__SHIFT 0x00000009
+#define PB1_PIF_CNTL__DA_FIFO_RESET_3_MASK 0x00002000L
+#define PB1_PIF_CNTL__DA_FIFO_RESET_3__SHIFT 0x0000000d
+#define PB1_PIF_CNTL__DIVINIT_MODE_MASK 0x00000100L
+#define PB1_PIF_CNTL__DIVINIT_MODE__SHIFT 0x00000008
+#define PB1_PIF_CNTL__EI_CYCLE_OFF_TIME_MASK 0x00700000L
+#define PB1_PIF_CNTL__EI_CYCLE_OFF_TIME__SHIFT 0x00000014
+#define PB1_PIF_CNTL__EI_DET_CYCLE_MODE_MASK 0x00000010L
+#define PB1_PIF_CNTL__EI_DET_CYCLE_MODE__SHIFT 0x00000004
+#define PB1_PIF_CNTL__EXIT_L0S_INIT_DIS_MASK 0x00800000L
+#define PB1_PIF_CNTL__EXIT_L0S_INIT_DIS__SHIFT 0x00000017
+#define PB1_PIF_CNTL__EXTEND_WAIT_FOR_RAMPUP_MASK 0x10000000L
+#define PB1_PIF_CNTL__EXTEND_WAIT_FOR_RAMPUP__SHIFT 0x0000001c
+#define PB1_PIF_CNTL__IGNORE_TxDataValid_EP_DIS_MASK 0x20000000L
+#define PB1_PIF_CNTL__IGNORE_TxDataValid_EP_DIS__SHIFT 0x0000001d
+#define PB1_PIF_CNTL__LS2_EXIT_TIME_MASK 0x000e0000L
+#define PB1_PIF_CNTL__LS2_EXIT_TIME__SHIFT 0x00000011
+#define PB1_PIF_CNTL__PHYCMD_CR_EN_MODE_MASK 0x00000008L
+#define PB1_PIF_CNTL__PHYCMD_CR_EN_MODE__SHIFT 0x00000003
+#define PB1_PIF_CNTL__PHY_CR_EN_MODE_MASK 0x00000004L
+#define PB1_PIF_CNTL__PHY_CR_EN_MODE__SHIFT 0x00000002
+#define PB1_PIF_CNTL__PLL_BINDING_ENABLE_MASK 0x00000400L
+#define PB1_PIF_CNTL__PLL_BINDING_ENABLE__SHIFT 0x0000000a
+#define PB1_PIF_CNTL__RXDETECT_FIFO_RESET_MODE_MASK 0x00000040L
+#define PB1_PIF_CNTL__RXDETECT_FIFO_RESET_MODE__SHIFT 0x00000006
+#define PB1_PIF_CNTL__RXDETECT_TX_PWR_MODE_MASK 0x00000080L
+#define PB1_PIF_CNTL__RXDETECT_TX_PWR_MODE__SHIFT 0x00000007
+#define PB1_PIF_CNTL__RXEN_GATER_MASK 0x0f000000L
+#define PB1_PIF_CNTL__RXEN_GATER__SHIFT 0x00000018
+#define PB1_PIF_CNTL__SC_CALIB_DONE_CNTL_MASK 0x00000800L
+#define PB1_PIF_CNTL__SC_CALIB_DONE_CNTL__SHIFT 0x0000000b
+#define PB1_PIF_CNTL__SERIAL_CFG_ENABLE_MASK 0x00000001L
+#define PB1_PIF_CNTL__SERIAL_CFG_ENABLE__SHIFT 0x00000000
+#define PB1_PIF_CNTL__TXGND_TIME_MASK 0x00010000L
+#define PB1_PIF_CNTL__TXGND_TIME__SHIFT 0x00000010
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_00_DEBUG_MASK 0x00000001L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_00_DEBUG__SHIFT 0x00000000
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_01_DEBUG_MASK 0x00000002L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_01_DEBUG__SHIFT 0x00000001
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_02_DEBUG_MASK 0x00000004L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_02_DEBUG__SHIFT 0x00000002
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_03_DEBUG_MASK 0x00000008L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_03_DEBUG__SHIFT 0x00000003
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_04_DEBUG_MASK 0x00000010L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_04_DEBUG__SHIFT 0x00000004
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_05_DEBUG_MASK 0x00000020L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_05_DEBUG__SHIFT 0x00000005
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_06_DEBUG_MASK 0x00000040L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_06_DEBUG__SHIFT 0x00000006
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_07_DEBUG_MASK 0x00000080L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_07_DEBUG__SHIFT 0x00000007
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_08_DEBUG_MASK 0x00000100L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_08_DEBUG__SHIFT 0x00000008
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_09_DEBUG_MASK 0x00000200L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_09_DEBUG__SHIFT 0x00000009
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_10_DEBUG_MASK 0x00000400L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_10_DEBUG__SHIFT 0x0000000a
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_11_DEBUG_MASK 0x00000800L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_11_DEBUG__SHIFT 0x0000000b
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_12_DEBUG_MASK 0x00001000L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_12_DEBUG__SHIFT 0x0000000c
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_13_DEBUG_MASK 0x00002000L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_13_DEBUG__SHIFT 0x0000000d
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_14_DEBUG_MASK 0x00004000L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_14_DEBUG__SHIFT 0x0000000e
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_15_DEBUG_MASK 0x00008000L
+#define PB1_PIF_HW_DEBUG__PB1_PIF_HW_15_DEBUG__SHIFT 0x0000000f
+#define PB1_PIF_PAIRING__MULTI_PIF_MASK 0x02000000L
+#define PB1_PIF_PAIRING__MULTI_PIF__SHIFT 0x00000019
+#define PB1_PIF_PAIRING__X16_LANE_15_0_MASK 0x00100000L
+#define PB1_PIF_PAIRING__X16_LANE_15_0__SHIFT 0x00000014
+#define PB1_PIF_PAIRING__X2_LANE_1_0_MASK 0x00000001L
+#define PB1_PIF_PAIRING__X2_LANE_1_0__SHIFT 0x00000000
+#define PB1_PIF_PAIRING__X2_LANE_11_10_MASK 0x00000020L
+#define PB1_PIF_PAIRING__X2_LANE_11_10__SHIFT 0x00000005
+#define PB1_PIF_PAIRING__X2_LANE_13_12_MASK 0x00000040L
+#define PB1_PIF_PAIRING__X2_LANE_13_12__SHIFT 0x00000006
+#define PB1_PIF_PAIRING__X2_LANE_15_14_MASK 0x00000080L
+#define PB1_PIF_PAIRING__X2_LANE_15_14__SHIFT 0x00000007
+#define PB1_PIF_PAIRING__X2_LANE_3_2_MASK 0x00000002L
+#define PB1_PIF_PAIRING__X2_LANE_3_2__SHIFT 0x00000001
+#define PB1_PIF_PAIRING__X2_LANE_5_4_MASK 0x00000004L
+#define PB1_PIF_PAIRING__X2_LANE_5_4__SHIFT 0x00000002
+#define PB1_PIF_PAIRING__X2_LANE_7_6_MASK 0x00000008L
+#define PB1_PIF_PAIRING__X2_LANE_7_6__SHIFT 0x00000003
+#define PB1_PIF_PAIRING__X2_LANE_9_8_MASK 0x00000010L
+#define PB1_PIF_PAIRING__X2_LANE_9_8__SHIFT 0x00000004
+#define PB1_PIF_PAIRING__X4_LANE_11_8_MASK 0x00000400L
+#define PB1_PIF_PAIRING__X4_LANE_11_8__SHIFT 0x0000000a
+#define PB1_PIF_PAIRING__X4_LANE_15_12_MASK 0x00000800L
+#define PB1_PIF_PAIRING__X4_LANE_15_12__SHIFT 0x0000000b
+#define PB1_PIF_PAIRING__X4_LANE_3_0_MASK 0x00000100L
+#define PB1_PIF_PAIRING__X4_LANE_3_0__SHIFT 0x00000008
+#define PB1_PIF_PAIRING__X4_LANE_7_4_MASK 0x00000200L
+#define PB1_PIF_PAIRING__X4_LANE_7_4__SHIFT 0x00000009
+#define PB1_PIF_PAIRING__X8_LANE_15_8_MASK 0x00020000L
+#define PB1_PIF_PAIRING__X8_LANE_15_8__SHIFT 0x00000011
+#define PB1_PIF_PAIRING__X8_LANE_7_0_MASK 0x00010000L
+#define PB1_PIF_PAIRING__X8_LANE_7_0__SHIFT 0x00000010
+#define PB1_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_EN_0_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_EN_0__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_VAL_0_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_0__RXEN_OVERRIDE_VAL_0__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_EN_0_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_EN_0__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_VAL_0_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_0__RX_PDNB_OVERRIDE_VAL_0__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_EN_0_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_EN_0__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_VAL_0_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_0__RXPWR_OVERRIDE_VAL_0__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_EN_0_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_EN_0__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_VAL_0_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_0__TX_PDNB_OVERRIDE_VAL_0__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_EN_0_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_EN_0__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_VAL_0_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_0__TXPWR_OVERRIDE_VAL_0__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_EN_10_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_EN_10__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_VAL_10_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_10__RXEN_OVERRIDE_VAL_10__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_EN_10_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_EN_10__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_VAL_10_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_10__RX_PDNB_OVERRIDE_VAL_10__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_EN_10_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_EN_10__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_VAL_10_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_10__RXPWR_OVERRIDE_VAL_10__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_EN_10_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_EN_10__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_VAL_10_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_10__TX_PDNB_OVERRIDE_VAL_10__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_EN_10_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_EN_10__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_VAL_10_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_10__TXPWR_OVERRIDE_VAL_10__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_EN_11_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_EN_11__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_VAL_11_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_11__RXEN_OVERRIDE_VAL_11__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_EN_11_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_EN_11__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_VAL_11_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_11__RX_PDNB_OVERRIDE_VAL_11__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_EN_11_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_EN_11__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_VAL_11_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_11__RXPWR_OVERRIDE_VAL_11__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_EN_11_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_EN_11__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_VAL_11_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_11__TX_PDNB_OVERRIDE_VAL_11__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_EN_11_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_EN_11__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_VAL_11_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_11__TXPWR_OVERRIDE_VAL_11__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_EN_12_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_EN_12__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_VAL_12_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_12__RXEN_OVERRIDE_VAL_12__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_EN_12_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_EN_12__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_VAL_12_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_12__RX_PDNB_OVERRIDE_VAL_12__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_EN_12_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_EN_12__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_VAL_12_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_12__RXPWR_OVERRIDE_VAL_12__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_EN_12_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_EN_12__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_VAL_12_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_12__TX_PDNB_OVERRIDE_VAL_12__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_EN_12_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_EN_12__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_VAL_12_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_12__TXPWR_OVERRIDE_VAL_12__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_EN_13_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_EN_13__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_VAL_13_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_13__RXEN_OVERRIDE_VAL_13__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_EN_13_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_EN_13__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_VAL_13_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_13__RX_PDNB_OVERRIDE_VAL_13__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_EN_13_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_EN_13__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_VAL_13_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_13__RXPWR_OVERRIDE_VAL_13__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_EN_13_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_EN_13__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_VAL_13_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_13__TX_PDNB_OVERRIDE_VAL_13__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_EN_13_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_EN_13__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_VAL_13_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_13__TXPWR_OVERRIDE_VAL_13__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_EN_14_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_EN_14__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_VAL_14_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_14__RXEN_OVERRIDE_VAL_14__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_EN_14_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_EN_14__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_VAL_14_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_14__RX_PDNB_OVERRIDE_VAL_14__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_EN_14_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_EN_14__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_VAL_14_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_14__RXPWR_OVERRIDE_VAL_14__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_EN_14_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_EN_14__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_VAL_14_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_14__TX_PDNB_OVERRIDE_VAL_14__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_EN_14_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_EN_14__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_VAL_14_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_14__TXPWR_OVERRIDE_VAL_14__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_EN_15_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_EN_15__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_VAL_15_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_15__RXEN_OVERRIDE_VAL_15__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_EN_15_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_EN_15__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_VAL_15_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_15__RX_PDNB_OVERRIDE_VAL_15__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_EN_15_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_EN_15__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_VAL_15_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_15__RXPWR_OVERRIDE_VAL_15__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_EN_15_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_EN_15__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_VAL_15_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_15__TX_PDNB_OVERRIDE_VAL_15__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_EN_15_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_EN_15__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_VAL_15_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_15__TXPWR_OVERRIDE_VAL_15__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_EN_1_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_EN_1__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_VAL_1_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_1__RXEN_OVERRIDE_VAL_1__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_EN_1_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_EN_1__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_VAL_1_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_1__RX_PDNB_OVERRIDE_VAL_1__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_EN_1_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_EN_1__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_VAL_1_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_1__RXPWR_OVERRIDE_VAL_1__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_EN_1_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_EN_1__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_VAL_1_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_1__TX_PDNB_OVERRIDE_VAL_1__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_EN_1_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_EN_1__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_VAL_1_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_1__TXPWR_OVERRIDE_VAL_1__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_EN_2_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_EN_2__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_VAL_2_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_2__RXEN_OVERRIDE_VAL_2__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_EN_2_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_EN_2__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_VAL_2_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_2__RX_PDNB_OVERRIDE_VAL_2__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_EN_2_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_EN_2__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_VAL_2_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_2__RXPWR_OVERRIDE_VAL_2__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_EN_2_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_EN_2__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_VAL_2_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_2__TX_PDNB_OVERRIDE_VAL_2__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_EN_2_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_EN_2__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_VAL_2_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_2__TXPWR_OVERRIDE_VAL_2__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_EN_3_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_EN_3__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_VAL_3_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_3__RXEN_OVERRIDE_VAL_3__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_EN_3_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_EN_3__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_VAL_3_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_3__RX_PDNB_OVERRIDE_VAL_3__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_EN_3_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_EN_3__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_VAL_3_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_3__RXPWR_OVERRIDE_VAL_3__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_EN_3_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_EN_3__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_VAL_3_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_3__TX_PDNB_OVERRIDE_VAL_3__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_EN_3_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_EN_3__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_VAL_3_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_3__TXPWR_OVERRIDE_VAL_3__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_EN_4_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_EN_4__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_VAL_4_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_4__RXEN_OVERRIDE_VAL_4__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_EN_4_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_EN_4__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_VAL_4_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_4__RX_PDNB_OVERRIDE_VAL_4__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_EN_4_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_EN_4__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_VAL_4_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_4__RXPWR_OVERRIDE_VAL_4__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_EN_4_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_EN_4__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_VAL_4_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_4__TX_PDNB_OVERRIDE_VAL_4__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_EN_4_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_EN_4__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_VAL_4_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_4__TXPWR_OVERRIDE_VAL_4__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_EN_5_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_EN_5__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_VAL_5_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_5__RXEN_OVERRIDE_VAL_5__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_EN_5_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_EN_5__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_VAL_5_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_5__RX_PDNB_OVERRIDE_VAL_5__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_EN_5_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_EN_5__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_VAL_5_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_5__RXPWR_OVERRIDE_VAL_5__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_EN_5_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_EN_5__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_VAL_5_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_5__TX_PDNB_OVERRIDE_VAL_5__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_EN_5_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_EN_5__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_VAL_5_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_5__TXPWR_OVERRIDE_VAL_5__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_EN_6_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_EN_6__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_VAL_6_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_6__RXEN_OVERRIDE_VAL_6__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_EN_6_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_EN_6__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_VAL_6_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_6__RX_PDNB_OVERRIDE_VAL_6__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_EN_6_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_EN_6__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_VAL_6_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_6__RXPWR_OVERRIDE_VAL_6__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_EN_6_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_EN_6__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_VAL_6_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_6__TX_PDNB_OVERRIDE_VAL_6__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_EN_6_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_EN_6__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_VAL_6_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_6__TXPWR_OVERRIDE_VAL_6__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_EN_7_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_EN_7__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_VAL_7_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_7__RXEN_OVERRIDE_VAL_7__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_EN_7_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_EN_7__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_VAL_7_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_7__RX_PDNB_OVERRIDE_VAL_7__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_EN_7_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_EN_7__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_VAL_7_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_7__RXPWR_OVERRIDE_VAL_7__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_EN_7_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_EN_7__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_VAL_7_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_7__TX_PDNB_OVERRIDE_VAL_7__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_EN_7_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_EN_7__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_VAL_7_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_7__TXPWR_OVERRIDE_VAL_7__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_EN_8_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_EN_8__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_VAL_8_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_8__RXEN_OVERRIDE_VAL_8__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_EN_8_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_EN_8__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_VAL_8_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_8__RX_PDNB_OVERRIDE_VAL_8__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_EN_8_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_EN_8__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_VAL_8_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_8__RXPWR_OVERRIDE_VAL_8__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_EN_8_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_EN_8__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_VAL_8_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_8__TX_PDNB_OVERRIDE_VAL_8__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_EN_8_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_EN_8__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_VAL_8_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_8__TXPWR_OVERRIDE_VAL_8__SHIFT 0x0000000b
+#define PB1_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_EN_9_MASK 0x00000100L
+#define PB1_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_EN_9__SHIFT 0x00000008
+#define PB1_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_VAL_9_MASK 0x00000200L
+#define PB1_PIF_PDNB_OVERRIDE_9__RXEN_OVERRIDE_VAL_9__SHIFT 0x00000009
+#define PB1_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_EN_9_MASK 0x00000010L
+#define PB1_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_EN_9__SHIFT 0x00000004
+#define PB1_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_VAL_9_MASK 0x000000e0L
+#define PB1_PIF_PDNB_OVERRIDE_9__RX_PDNB_OVERRIDE_VAL_9__SHIFT 0x00000005
+#define PB1_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_EN_9_MASK 0x00004000L
+#define PB1_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_EN_9__SHIFT 0x0000000e
+#define PB1_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_VAL_9_MASK 0x00038000L
+#define PB1_PIF_PDNB_OVERRIDE_9__RXPWR_OVERRIDE_VAL_9__SHIFT 0x0000000f
+#define PB1_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_EN_9_MASK 0x00000001L
+#define PB1_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_EN_9__SHIFT 0x00000000
+#define PB1_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_VAL_9_MASK 0x0000000eL
+#define PB1_PIF_PDNB_OVERRIDE_9__TX_PDNB_OVERRIDE_VAL_9__SHIFT 0x00000001
+#define PB1_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_EN_9_MASK 0x00000400L
+#define PB1_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_EN_9__SHIFT 0x0000000a
+#define PB1_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_VAL_9_MASK 0x00003800L
+#define PB1_PIF_PDNB_OVERRIDE_9__TXPWR_OVERRIDE_VAL_9__SHIFT 0x0000000b
+#define PB1_PIF_PWRDOWN_0__FORCE_RXEN_IN_L0s_0_MASK 0x00000008L
+#define PB1_PIF_PWRDOWN_0__FORCE_RXEN_IN_L0s_0__SHIFT 0x00000003
+#define PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK 0x00001c00L
+#define PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT 0x0000000a
+#define PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK 0x00000380L
+#define PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT 0x00000007
+#define PB1_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_EN_0_MASK 0x10000000L
+#define PB1_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_EN_0__SHIFT 0x0000001c
+#define PB1_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_VAL_0_MASK 0xe0000000L
+#define PB1_PIF_PWRDOWN_0__PLLPWR_OVERRIDE_VAL_0__SHIFT 0x0000001d
+#define PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK 0x07000000L
+#define PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0__SHIFT 0x00000018
+#define PB1_PIF_PWRDOWN_0__RX_POWER_STATE_IN_RXS2_0_MASK 0x00000070L
+#define PB1_PIF_PWRDOWN_0__RX_POWER_STATE_IN_RXS2_0__SHIFT 0x00000004
+#define PB1_PIF_PWRDOWN_0__TX2P5CLK_CLOCK_GATING_EN_0_MASK 0x00010000L
+#define PB1_PIF_PWRDOWN_0__TX2P5CLK_CLOCK_GATING_EN_0__SHIFT 0x00000010
+#define PB1_PIF_PWRDOWN_0__TX_POWER_STATE_IN_TXS2_0_MASK 0x00000007L
+#define PB1_PIF_PWRDOWN_0__TX_POWER_STATE_IN_TXS2_0__SHIFT 0x00000000
+#define PB1_PIF_PWRDOWN_1__FORCE_RXEN_IN_L0s_1_MASK 0x00000008L
+#define PB1_PIF_PWRDOWN_1__FORCE_RXEN_IN_L0s_1__SHIFT 0x00000003
+#define PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK 0x00001c00L
+#define PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT 0x0000000a
+#define PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK 0x00000380L
+#define PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT 0x00000007
+#define PB1_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_EN_1_MASK 0x10000000L
+#define PB1_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_EN_1__SHIFT 0x0000001c
+#define PB1_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_VAL_1_MASK 0xe0000000L
+#define PB1_PIF_PWRDOWN_1__PLLPWR_OVERRIDE_VAL_1__SHIFT 0x0000001d
+#define PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK 0x07000000L
+#define PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1__SHIFT 0x00000018
+#define PB1_PIF_PWRDOWN_1__RX_POWER_STATE_IN_RXS2_1_MASK 0x00000070L
+#define PB1_PIF_PWRDOWN_1__RX_POWER_STATE_IN_RXS2_1__SHIFT 0x00000004
+#define PB1_PIF_PWRDOWN_1__TX2P5CLK_CLOCK_GATING_EN_1_MASK 0x00010000L
+#define PB1_PIF_PWRDOWN_1__TX2P5CLK_CLOCK_GATING_EN_1__SHIFT 0x00000010
+#define PB1_PIF_PWRDOWN_1__TX_POWER_STATE_IN_TXS2_1_MASK 0x00000007L
+#define PB1_PIF_PWRDOWN_1__TX_POWER_STATE_IN_TXS2_1__SHIFT 0x00000000
+#define PB1_PIF_PWRDOWN_2__FORCE_RXEN_IN_L0s_2_MASK 0x00000008L
+#define PB1_PIF_PWRDOWN_2__FORCE_RXEN_IN_L0s_2__SHIFT 0x00000003
+#define PB1_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_OFF_2_MASK 0x00001c00L
+#define PB1_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_OFF_2__SHIFT 0x0000000a
+#define PB1_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_TXS2_2_MASK 0x00000380L
+#define PB1_PIF_PWRDOWN_2__PLL_POWER_STATE_IN_TXS2_2__SHIFT 0x00000007
+#define PB1_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_EN_2_MASK 0x10000000L
+#define PB1_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_EN_2__SHIFT 0x0000001c
+#define PB1_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_VAL_2_MASK 0xe0000000L
+#define PB1_PIF_PWRDOWN_2__PLLPWR_OVERRIDE_VAL_2__SHIFT 0x0000001d
+#define PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK 0x07000000L
+#define PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2__SHIFT 0x00000018
+#define PB1_PIF_PWRDOWN_2__RX_POWER_STATE_IN_RXS2_2_MASK 0x00000070L
+#define PB1_PIF_PWRDOWN_2__RX_POWER_STATE_IN_RXS2_2__SHIFT 0x00000004
+#define PB1_PIF_PWRDOWN_2__TX2P5CLK_CLOCK_GATING_EN_2_MASK 0x00010000L
+#define PB1_PIF_PWRDOWN_2__TX2P5CLK_CLOCK_GATING_EN_2__SHIFT 0x00000010
+#define PB1_PIF_PWRDOWN_2__TX_POWER_STATE_IN_TXS2_2_MASK 0x00000007L
+#define PB1_PIF_PWRDOWN_2__TX_POWER_STATE_IN_TXS2_2__SHIFT 0x00000000
+#define PB1_PIF_PWRDOWN_3__FORCE_RXEN_IN_L0s_3_MASK 0x00000008L
+#define PB1_PIF_PWRDOWN_3__FORCE_RXEN_IN_L0s_3__SHIFT 0x00000003
+#define PB1_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_OFF_3_MASK 0x00001c00L
+#define PB1_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_OFF_3__SHIFT 0x0000000a
+#define PB1_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_TXS2_3_MASK 0x00000380L
+#define PB1_PIF_PWRDOWN_3__PLL_POWER_STATE_IN_TXS2_3__SHIFT 0x00000007
+#define PB1_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_EN_3_MASK 0x10000000L
+#define PB1_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_EN_3__SHIFT 0x0000001c
+#define PB1_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_VAL_3_MASK 0xe0000000L
+#define PB1_PIF_PWRDOWN_3__PLLPWR_OVERRIDE_VAL_3__SHIFT 0x0000001d
+#define PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK 0x07000000L
+#define PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3__SHIFT 0x00000018
+#define PB1_PIF_PWRDOWN_3__RX_POWER_STATE_IN_RXS2_3_MASK 0x00000070L
+#define PB1_PIF_PWRDOWN_3__RX_POWER_STATE_IN_RXS2_3__SHIFT 0x00000004
+#define PB1_PIF_PWRDOWN_3__TX2P5CLK_CLOCK_GATING_EN_3_MASK 0x00010000L
+#define PB1_PIF_PWRDOWN_3__TX2P5CLK_CLOCK_GATING_EN_3__SHIFT 0x00000010
+#define PB1_PIF_PWRDOWN_3__TX_POWER_STATE_IN_TXS2_3_MASK 0x00000007L
+#define PB1_PIF_PWRDOWN_3__TX_POWER_STATE_IN_TXS2_3__SHIFT 0x00000000
+#define PB1_PIF_SC_CTL__SC_CALIBRATION_MASK 0x00000001L
+#define PB1_PIF_SC_CTL__SC_CALIBRATION__SHIFT 0x00000000
+#define PB1_PIF_SC_CTL__SC_ENTER_L1_FROM_L0_MASK 0x00000020L
+#define PB1_PIF_SC_CTL__SC_ENTER_L1_FROM_L0__SHIFT 0x00000005
+#define PB1_PIF_SC_CTL__SC_ENTER_L1_FROM_L0S_MASK 0x00000010L
+#define PB1_PIF_SC_CTL__SC_ENTER_L1_FROM_L0S__SHIFT 0x00000004
+#define PB1_PIF_SC_CTL__SC_EXIT_L1_TO_L0_MASK 0x00000008L
+#define PB1_PIF_SC_CTL__SC_EXIT_L1_TO_L0__SHIFT 0x00000003
+#define PB1_PIF_SC_CTL__SC_EXIT_L1_TO_L0S_MASK 0x00000004L
+#define PB1_PIF_SC_CTL__SC_EXIT_L1_TO_L0S__SHIFT 0x00000002
+#define PB1_PIF_SC_CTL__SC_LANE_0_RESUME_MASK 0x00010000L
+#define PB1_PIF_SC_CTL__SC_LANE_0_RESUME__SHIFT 0x00000010
+#define PB1_PIF_SC_CTL__SC_LANE_10_RESUME_MASK 0x04000000L
+#define PB1_PIF_SC_CTL__SC_LANE_10_RESUME__SHIFT 0x0000001a
+#define PB1_PIF_SC_CTL__SC_LANE_11_RESUME_MASK 0x08000000L
+#define PB1_PIF_SC_CTL__SC_LANE_11_RESUME__SHIFT 0x0000001b
+#define PB1_PIF_SC_CTL__SC_LANE_12_RESUME_MASK 0x10000000L
+#define PB1_PIF_SC_CTL__SC_LANE_12_RESUME__SHIFT 0x0000001c
+#define PB1_PIF_SC_CTL__SC_LANE_13_RESUME_MASK 0x20000000L
+#define PB1_PIF_SC_CTL__SC_LANE_13_RESUME__SHIFT 0x0000001d
+#define PB1_PIF_SC_CTL__SC_LANE_14_RESUME_MASK 0x40000000L
+#define PB1_PIF_SC_CTL__SC_LANE_14_RESUME__SHIFT 0x0000001e
+#define PB1_PIF_SC_CTL__SC_LANE_15_RESUME_MASK 0x80000000L
+#define PB1_PIF_SC_CTL__SC_LANE_15_RESUME__SHIFT 0x0000001f
+#define PB1_PIF_SC_CTL__SC_LANE_1_RESUME_MASK 0x00020000L
+#define PB1_PIF_SC_CTL__SC_LANE_1_RESUME__SHIFT 0x00000011
+#define PB1_PIF_SC_CTL__SC_LANE_2_RESUME_MASK 0x00040000L
+#define PB1_PIF_SC_CTL__SC_LANE_2_RESUME__SHIFT 0x00000012
+#define PB1_PIF_SC_CTL__SC_LANE_3_RESUME_MASK 0x00080000L
+#define PB1_PIF_SC_CTL__SC_LANE_3_RESUME__SHIFT 0x00000013
+#define PB1_PIF_SC_CTL__SC_LANE_4_RESUME_MASK 0x00100000L
+#define PB1_PIF_SC_CTL__SC_LANE_4_RESUME__SHIFT 0x00000014
+#define PB1_PIF_SC_CTL__SC_LANE_5_RESUME_MASK 0x00200000L
+#define PB1_PIF_SC_CTL__SC_LANE_5_RESUME__SHIFT 0x00000015
+#define PB1_PIF_SC_CTL__SC_LANE_6_RESUME_MASK 0x00400000L
+#define PB1_PIF_SC_CTL__SC_LANE_6_RESUME__SHIFT 0x00000016
+#define PB1_PIF_SC_CTL__SC_LANE_7_RESUME_MASK 0x00800000L
+#define PB1_PIF_SC_CTL__SC_LANE_7_RESUME__SHIFT 0x00000017
+#define PB1_PIF_SC_CTL__SC_LANE_8_RESUME_MASK 0x01000000L
+#define PB1_PIF_SC_CTL__SC_LANE_8_RESUME__SHIFT 0x00000018
+#define PB1_PIF_SC_CTL__SC_LANE_9_RESUME_MASK 0x02000000L
+#define PB1_PIF_SC_CTL__SC_LANE_9_RESUME__SHIFT 0x00000019
+#define PB1_PIF_SC_CTL__SC_PHASE_1_MASK 0x00000100L
+#define PB1_PIF_SC_CTL__SC_PHASE_1__SHIFT 0x00000008
+#define PB1_PIF_SC_CTL__SC_PHASE_2_MASK 0x00000200L
+#define PB1_PIF_SC_CTL__SC_PHASE_2__SHIFT 0x00000009
+#define PB1_PIF_SC_CTL__SC_PHASE_3_MASK 0x00000400L
+#define PB1_PIF_SC_CTL__SC_PHASE_3__SHIFT 0x0000000a
+#define PB1_PIF_SC_CTL__SC_PHASE_4_MASK 0x00000800L
+#define PB1_PIF_SC_CTL__SC_PHASE_4__SHIFT 0x0000000b
+#define PB1_PIF_SC_CTL__SC_PHASE_5_MASK 0x00001000L
+#define PB1_PIF_SC_CTL__SC_PHASE_5__SHIFT 0x0000000c
+#define PB1_PIF_SC_CTL__SC_PHASE_6_MASK 0x00002000L
+#define PB1_PIF_SC_CTL__SC_PHASE_6__SHIFT 0x0000000d
+#define PB1_PIF_SC_CTL__SC_PHASE_7_MASK 0x00004000L
+#define PB1_PIF_SC_CTL__SC_PHASE_7__SHIFT 0x0000000e
+#define PB1_PIF_SC_CTL__SC_PHASE_8_MASK 0x00008000L
+#define PB1_PIF_SC_CTL__SC_PHASE_8__SHIFT 0x0000000f
+#define PB1_PIF_SC_CTL__SC_RXDETECT_MASK 0x00000002L
+#define PB1_PIF_SC_CTL__SC_RXDETECT__SHIFT 0x00000001
+#define PB1_PIF_SC_CTL__SC_SPEED_CHANGE_MASK 0x00000040L
+#define PB1_PIF_SC_CTL__SC_SPEED_CHANGE__SHIFT 0x00000006
+#define PB1_PIF_SCRATCH__PIF_SCRATCH_MASK 0xffffffffL
+#define PB1_PIF_SCRATCH__PIF_SCRATCH__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_0__SEQ_CALIBRATION_0_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_CALIBRATION_0__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0_0_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0_0__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0S_0_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_ENTER_L1_FROM_L0S_0__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0_0_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0_0__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0S_0_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_EXIT_L1_TO_L0S_0__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_0__SEQ_PHASE_0_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_PHASE_0__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_0__SEQ_RXDETECT_0_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_RXDETECT_0__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_0__SEQ_SPEED_CHANGE_0_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_0__SEQ_SPEED_CHANGE_0__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_10__SEQ_CALIBRATION_10_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_CALIBRATION_10__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0_10_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0_10__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0S_10_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_ENTER_L1_FROM_L0S_10__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0_10_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0_10__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0S_10_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_EXIT_L1_TO_L0S_10__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_10__SEQ_PHASE_10_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_PHASE_10__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_10__SEQ_RXDETECT_10_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_RXDETECT_10__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_10__SEQ_SPEED_CHANGE_10_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_10__SEQ_SPEED_CHANGE_10__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_11__SEQ_CALIBRATION_11_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_CALIBRATION_11__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0_11_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0_11__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0S_11_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_ENTER_L1_FROM_L0S_11__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0_11_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0_11__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0S_11_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_EXIT_L1_TO_L0S_11__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_11__SEQ_PHASE_11_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_PHASE_11__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_11__SEQ_RXDETECT_11_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_RXDETECT_11__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_11__SEQ_SPEED_CHANGE_11_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_11__SEQ_SPEED_CHANGE_11__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_12__SEQ_CALIBRATION_12_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_CALIBRATION_12__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0_12_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0_12__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0S_12_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_ENTER_L1_FROM_L0S_12__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0_12_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0_12__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0S_12_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_EXIT_L1_TO_L0S_12__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_12__SEQ_PHASE_12_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_PHASE_12__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_12__SEQ_RXDETECT_12_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_RXDETECT_12__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_12__SEQ_SPEED_CHANGE_12_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_12__SEQ_SPEED_CHANGE_12__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_13__SEQ_CALIBRATION_13_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_CALIBRATION_13__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0_13_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0_13__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0S_13_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_ENTER_L1_FROM_L0S_13__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0_13_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0_13__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0S_13_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_EXIT_L1_TO_L0S_13__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_13__SEQ_PHASE_13_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_PHASE_13__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_13__SEQ_RXDETECT_13_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_RXDETECT_13__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_13__SEQ_SPEED_CHANGE_13_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_13__SEQ_SPEED_CHANGE_13__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_14__SEQ_CALIBRATION_14_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_CALIBRATION_14__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0_14_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0_14__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0S_14_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_ENTER_L1_FROM_L0S_14__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0_14_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0_14__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0S_14_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_EXIT_L1_TO_L0S_14__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_14__SEQ_PHASE_14_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_PHASE_14__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_14__SEQ_RXDETECT_14_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_RXDETECT_14__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_14__SEQ_SPEED_CHANGE_14_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_14__SEQ_SPEED_CHANGE_14__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_15__SEQ_CALIBRATION_15_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_CALIBRATION_15__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0_15_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0_15__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0S_15_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_ENTER_L1_FROM_L0S_15__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0_15_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0_15__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0S_15_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_EXIT_L1_TO_L0S_15__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_15__SEQ_PHASE_15_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_PHASE_15__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_15__SEQ_RXDETECT_15_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_RXDETECT_15__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_15__SEQ_SPEED_CHANGE_15_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_15__SEQ_SPEED_CHANGE_15__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_1__SEQ_CALIBRATION_1_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_CALIBRATION_1__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0_1_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0_1__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0S_1_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_ENTER_L1_FROM_L0S_1__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0_1_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0_1__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0S_1_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_EXIT_L1_TO_L0S_1__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_1__SEQ_PHASE_1_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_PHASE_1__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_1__SEQ_RXDETECT_1_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_RXDETECT_1__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_1__SEQ_SPEED_CHANGE_1_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_1__SEQ_SPEED_CHANGE_1__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_2__SEQ_CALIBRATION_2_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_CALIBRATION_2__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0_2_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0_2__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0S_2_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_ENTER_L1_FROM_L0S_2__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0_2_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0_2__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0S_2_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_EXIT_L1_TO_L0S_2__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_2__SEQ_PHASE_2_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_PHASE_2__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_2__SEQ_RXDETECT_2_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_RXDETECT_2__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_2__SEQ_SPEED_CHANGE_2_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_2__SEQ_SPEED_CHANGE_2__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_3__SEQ_CALIBRATION_3_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_CALIBRATION_3__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0_3_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0_3__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0S_3_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_ENTER_L1_FROM_L0S_3__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0_3_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0_3__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0S_3_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_EXIT_L1_TO_L0S_3__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_3__SEQ_PHASE_3_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_PHASE_3__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_3__SEQ_RXDETECT_3_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_RXDETECT_3__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_3__SEQ_SPEED_CHANGE_3_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_3__SEQ_SPEED_CHANGE_3__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_4__SEQ_CALIBRATION_4_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_CALIBRATION_4__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0_4_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0_4__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0S_4_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_ENTER_L1_FROM_L0S_4__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0_4_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0_4__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0S_4_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_EXIT_L1_TO_L0S_4__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_4__SEQ_PHASE_4_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_PHASE_4__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_4__SEQ_RXDETECT_4_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_RXDETECT_4__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_4__SEQ_SPEED_CHANGE_4_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_4__SEQ_SPEED_CHANGE_4__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_5__SEQ_CALIBRATION_5_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_CALIBRATION_5__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0_5_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0_5__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0S_5_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_ENTER_L1_FROM_L0S_5__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0_5_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0_5__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0S_5_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_EXIT_L1_TO_L0S_5__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_5__SEQ_PHASE_5_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_PHASE_5__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_5__SEQ_RXDETECT_5_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_RXDETECT_5__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_5__SEQ_SPEED_CHANGE_5_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_5__SEQ_SPEED_CHANGE_5__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_6__SEQ_CALIBRATION_6_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_CALIBRATION_6__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0_6_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0_6__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0S_6_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_ENTER_L1_FROM_L0S_6__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0_6_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0_6__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0S_6_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_EXIT_L1_TO_L0S_6__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_6__SEQ_PHASE_6_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_PHASE_6__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_6__SEQ_RXDETECT_6_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_RXDETECT_6__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_6__SEQ_SPEED_CHANGE_6_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_6__SEQ_SPEED_CHANGE_6__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_7__SEQ_CALIBRATION_7_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_CALIBRATION_7__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0_7_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0_7__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0S_7_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_ENTER_L1_FROM_L0S_7__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0_7_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0_7__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0S_7_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_EXIT_L1_TO_L0S_7__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_7__SEQ_PHASE_7_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_PHASE_7__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_7__SEQ_RXDETECT_7_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_RXDETECT_7__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_7__SEQ_SPEED_CHANGE_7_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_7__SEQ_SPEED_CHANGE_7__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_8__SEQ_CALIBRATION_8_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_CALIBRATION_8__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0_8_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0_8__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0S_8_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_ENTER_L1_FROM_L0S_8__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0_8_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0_8__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0S_8_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_EXIT_L1_TO_L0S_8__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_8__SEQ_PHASE_8_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_PHASE_8__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_8__SEQ_RXDETECT_8_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_RXDETECT_8__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_8__SEQ_SPEED_CHANGE_8_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_8__SEQ_SPEED_CHANGE_8__SHIFT 0x00000006
+#define PB1_PIF_SEQ_STATUS_9__SEQ_CALIBRATION_9_MASK 0x00000001L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_CALIBRATION_9__SHIFT 0x00000000
+#define PB1_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0_9_MASK 0x00000020L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0_9__SHIFT 0x00000005
+#define PB1_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0S_9_MASK 0x00000010L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_ENTER_L1_FROM_L0S_9__SHIFT 0x00000004
+#define PB1_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0_9_MASK 0x00000008L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0_9__SHIFT 0x00000003
+#define PB1_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0S_9_MASK 0x00000004L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_EXIT_L1_TO_L0S_9__SHIFT 0x00000002
+#define PB1_PIF_SEQ_STATUS_9__SEQ_PHASE_9_MASK 0x00000700L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_PHASE_9__SHIFT 0x00000008
+#define PB1_PIF_SEQ_STATUS_9__SEQ_RXDETECT_9_MASK 0x00000002L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_RXDETECT_9__SHIFT 0x00000001
+#define PB1_PIF_SEQ_STATUS_9__SEQ_SPEED_CHANGE_9_MASK 0x00000040L
+#define PB1_PIF_SEQ_STATUS_9__SEQ_SPEED_CHANGE_9__SHIFT 0x00000006
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_0_MASK 0x00000001L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_0__SHIFT 0x00000000
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_10_MASK 0x00000400L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_10__SHIFT 0x0000000a
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_11_MASK 0x00000800L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_11__SHIFT 0x0000000b
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_12_MASK 0x00001000L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_12__SHIFT 0x0000000c
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_13_MASK 0x00002000L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_13__SHIFT 0x0000000d
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_14_MASK 0x00004000L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_14__SHIFT 0x0000000e
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_15_MASK 0x00008000L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_15__SHIFT 0x0000000f
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_1_MASK 0x00000002L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_1__SHIFT 0x00000001
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_2_MASK 0x00000004L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_2__SHIFT 0x00000002
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_3_MASK 0x00000008L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_3__SHIFT 0x00000003
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_4_MASK 0x00000010L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_4__SHIFT 0x00000004
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_5_MASK 0x00000020L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_5__SHIFT 0x00000005
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_6_MASK 0x00000040L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_6__SHIFT 0x00000006
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_7_MASK 0x00000080L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_7__SHIFT 0x00000007
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_8_MASK 0x00000100L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_8__SHIFT 0x00000008
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_9_MASK 0x00000200L
+#define PB1_PIF_TXPHYSTATUS__TXPHYSTATUS_9__SHIFT 0x00000009
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_ANALOG_SEL_0_MASK 0x00000003L
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_ANALOG_SEL_0__SHIFT 0x00000000
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_EXT_RESET_EN_0_MASK 0x00000004L
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_EXT_RESET_EN_0__SHIFT 0x00000002
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_VCTL_ADC_EN_0_MASK 0x00000008L
+#define PB1_PLL_LC0_CTRL_REG0__PLL_DBG_LC_VCTL_ADC_EN_0__SHIFT 0x00000003
+#define PB1_PLL_LC0_CTRL_REG0__PLL_TST_LC_USAMPLE_EN_0_MASK 0x00000010L
+#define PB1_PLL_LC0_CTRL_REG0__PLL_TST_LC_USAMPLE_EN_0__SHIFT 0x00000004
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_EN_0_MASK 0x00000008L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_EN_0__SHIFT 0x00000003
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_VAL_0_MASK 0x00000007L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_BW_CNTRL_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_EN_0_MASK 0x00000080L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_EN_0__SHIFT 0x00000007
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_VAL_0_MASK 0x00000070L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_DIV_OVRD_VAL_0__SHIFT 0x00000004
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_EN_0_MASK 0x00000200L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_EN_0__SHIFT 0x00000009
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_VAL_0_MASK 0x00000100L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_CORECLK_EN_OVRD_VAL_0__SHIFT 0x00000008
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_EN_0_MASK 0x00040000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_EN_0__SHIFT 0x00000012
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_VAL_0_MASK 0x0003fc00L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_FBDIV_OVRD_VAL_0__SHIFT 0x0000000a
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_EN_0_MASK 0x10000000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_EN_0__SHIFT 0x0000001c
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_VAL_0_MASK 0x0ff80000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_LF_CNTRL_OVRD_VAL_0__SHIFT 0x00000013
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_EN_0_MASK 0x80000000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_EN_0__SHIFT 0x0000001f
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_VAL_0_MASK 0x60000000L
+#define PB1_PLL_LC0_OVRD_REG0__PLL_CFG_LC_REFDIV_OVRD_VAL_0__SHIFT 0x0000001d
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_EN_0_MASK 0x00000008L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_EN_0__SHIFT 0x00000003
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_VAL_0_MASK 0x00000007L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_REFCLK_SRC_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_EN_0_MASK 0x00040000L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_EN_0__SHIFT 0x00000012
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_VAL_0_MASK 0x0003c000L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_CFG_LC_VCO_TUNE_OVRD_VAL_0__SHIFT 0x0000000e
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_EN_0_MASK 0x00000020L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_EN_0__SHIFT 0x00000005
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_VAL_0_MASK 0x00000010L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_LEFT_EN_OVRD_VAL_0__SHIFT 0x00000004
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_EN_0_MASK 0x00000080L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_EN_0__SHIFT 0x00000007
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_VAL_0_MASK 0x00000040L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_HSCLK_RIGHT_EN_OVRD_VAL_0__SHIFT 0x00000006
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_EN_0_MASK 0x00000200L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_EN_0__SHIFT 0x00000009
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_VAL_0_MASK 0x00000100L
+#define PB1_PLL_LC0_OVRD_REG1__PLL_LC_PWRON_OVRD_VAL_0__SHIFT 0x00000008
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_LC0_SCI_STAT_OVRD_REG0__PLL_LC0_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_LC1_SCI_STAT_OVRD_REG0__PLL_LC1_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_LC2_SCI_STAT_OVRD_REG0__PLL_LC2_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_LC3_SCI_STAT_OVRD_REG0__PLL_LC3_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_ANALOG_SEL_0_MASK 0x00000003L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_ANALOG_SEL_0__SHIFT 0x00000000
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_EXT_RESET_EN_0_MASK 0x00000004L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_EXT_RESET_EN_0__SHIFT 0x00000002
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_LF_CNTRL_0_MASK 0x000007f0L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_LF_CNTRL_0__SHIFT 0x00000004
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_VCTL_ADC_EN_0_MASK 0x00000008L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_DBG_RO_VCTL_ADC_EN_0__SHIFT 0x00000003
+#define PB1_PLL_RO0_CTRL_REG0__PLL_TST_RO_USAMPLE_EN_0_MASK 0x00000800L
+#define PB1_PLL_RO0_CTRL_REG0__PLL_TST_RO_USAMPLE_EN_0__SHIFT 0x0000000b
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_EN_0_MASK 0x00000100L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_EN_0__SHIFT 0x00000008
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_VAL_0_MASK 0x000000ffL
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_BW_CNTRL_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_EN_0_MASK 0x00001000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_EN_0__SHIFT 0x0000000c
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_VAL_0_MASK 0x00000e00L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_DIV_OVRD_VAL_0__SHIFT 0x00000009
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_EN_0_MASK 0x00004000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_EN_0__SHIFT 0x0000000e
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_VAL_0_MASK 0x00002000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_CORECLK_EN_OVRD_VAL_0__SHIFT 0x0000000d
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_EN_0_MASK 0x10000000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_EN_0__SHIFT 0x0000001c
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_VAL_0_MASK 0x0fff8000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_FBDIV_OVRD_VAL_0__SHIFT 0x0000000f
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_EN_0_MASK 0x80000000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_EN_0__SHIFT 0x0000001f
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_VAL_0_MASK 0x40000000L
+#define PB1_PLL_RO0_OVRD_REG0__PLL_CFG_RO_VTOI_BIAS_CNTRL_OVRD_VAL_0__SHIFT 0x0000001e
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_EN_0_MASK 0x00400000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_EN_0__SHIFT 0x00000016
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_VAL_0_MASK 0x00380000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFCLK_SRC_OVRD_VAL_0__SHIFT 0x00000013
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_EN_0_MASK 0x00000020L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_EN_0__SHIFT 0x00000005
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_VAL_0_MASK 0x0000001fL
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_REFDIV_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_EN_0_MASK 0x00000100L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_EN_0__SHIFT 0x00000008
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_VAL_0_MASK 0x000000c0L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_CFG_RO_VCO_MODE_OVRD_VAL_0__SHIFT 0x00000006
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_EN_0_MASK 0x00000400L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_EN_0__SHIFT 0x0000000a
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_VAL_0_MASK 0x00000200L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_LEFT_EN_OVRD_VAL_0__SHIFT 0x00000009
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_EN_0_MASK 0x00001000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_EN_0__SHIFT 0x0000000c
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_VAL_0_MASK 0x00000800L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_HSCLK_RIGHT_EN_OVRD_VAL_0__SHIFT 0x0000000b
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_EN_0_MASK 0x00004000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_EN_0__SHIFT 0x0000000e
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_VAL_0_MASK 0x00002000L
+#define PB1_PLL_RO0_OVRD_REG1__PLL_RO_PWRON_OVRD_VAL_0__SHIFT 0x0000000d
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_RO0_SCI_STAT_OVRD_REG0__PLL_RO0_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_RO1_SCI_STAT_OVRD_REG0__PLL_RO1_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_RO2_SCI_STAT_OVRD_REG0__PLL_RO2_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_FREQMODE_MASK 0x00000300L
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_FREQMODE__SHIFT 0x00000008
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_FREQMODE_SCI_UPDT_MASK 0x00000002L
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_FREQMODE_SCI_UPDT__SHIFT 0x00000001
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_PLLPWR_SCI_UPDT_MASK 0x00000001L
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_IGNR_PLLPWR_SCI_UPDT__SHIFT 0x00000000
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_PLLPWR_MASK 0x00000070L
+#define PB1_PLL_RO3_SCI_STAT_OVRD_REG0__PLL_RO3_PLLPWR__SHIFT 0x00000004
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS0_MASK 0x00000200L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS0__SHIFT 0x00000009
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS1_MASK 0x00000400L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS1__SHIFT 0x0000000a
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS2_MASK 0x00000800L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_EN_LUT_ENTRY_LS2__SHIFT 0x0000000b
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_LEFT_EN_GATING_EN_MASK 0x00100000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_LEFT_EN_GATING_EN__SHIFT 0x00000014
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_RIGHT_EN_GATING_EN_MASK 0x00200000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_LEFT_RIGHT_EN_GATING_EN__SHIFT 0x00000015
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS0_MASK 0x00001000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS0__SHIFT 0x0000000c
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS1_MASK 0x00002000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS1__SHIFT 0x0000000d
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS2_MASK 0x00004000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_EN_LUT_ENTRY_LS2__SHIFT 0x0000000e
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_LEFT_EN_GATING_EN_MASK 0x00400000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_LEFT_EN_GATING_EN__SHIFT 0x00000016
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_RIGHT_EN_GATING_EN_MASK 0x00800000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_HSCLK_RIGHT_RIGHT_EN_GATING_EN__SHIFT 0x00000017
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_PWRON_LUT_ENTRY_LS2_MASK 0x00000100L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_LC_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000008
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS0_MASK 0x00000002L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS0__SHIFT 0x00000001
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS1_MASK 0x00000004L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS1__SHIFT 0x00000002
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS2_MASK 0x00000008L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_EN_LUT_ENTRY_LS2__SHIFT 0x00000003
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_LEFT_EN_GATING_EN_MASK 0x00010000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_LEFT_EN_GATING_EN__SHIFT 0x00000010
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_RIGHT_EN_GATING_EN_MASK 0x00020000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_LEFT_RIGHT_EN_GATING_EN__SHIFT 0x00000011
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS0_MASK 0x00000010L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS0__SHIFT 0x00000004
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS1_MASK 0x00000020L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS1__SHIFT 0x00000005
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS2_MASK 0x00000040L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_EN_LUT_ENTRY_LS2__SHIFT 0x00000006
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_LEFT_EN_GATING_EN_MASK 0x00040000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_LEFT_EN_GATING_EN__SHIFT 0x00000012
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_RIGHT_EN_GATING_EN_MASK 0x00080000L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_HSCLK_RIGHT_RIGHT_EN_GATING_EN__SHIFT 0x00000013
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_PWRON_LUT_ENTRY_LS2_MASK 0x00000080L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_RO_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000007
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_TST_LOSPDTST_SRC_MASK 0x00000001L
+#define PB1_PLL_RO_GLB_CTRL_REG0__PLL_TST_LOSPDTST_SRC__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN1_MASK 0x000003ffL
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN2_MASK 0x000ffc00L
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN2__SHIFT 0x0000000a
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN3_MASK 0x3ff00000L
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_MODE_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_RST_MODE_MASK 0xc0000000L
+#define PB1_RX_GLB_CTRL_REG0__RX_CFG_ADAPT_RST_MODE__SHIFT 0x0000001e
+#define PB1_RX_GLB_CTRL_REG1__RX_ADAPT_HLD_ASRT_TO_DCLK_EN_MASK 0xc0000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_ADAPT_HLD_ASRT_TO_DCLK_EN__SHIFT 0x0000001e
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN1_MASK 0x0000000fL
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN2_MASK 0x000000f0L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN2__SHIFT 0x00000004
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN3_MASK 0x00000f00L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_FR_GAIN_GEN3__SHIFT 0x00000008
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN1_MASK 0x0000f000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN1__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN2_MASK 0x000f0000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN2__SHIFT 0x00000010
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN3_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PH_GAIN_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN1_MASK 0x01000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN1__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN2_MASK 0x02000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN2__SHIFT 0x00000019
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN3_MASK 0x04000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_CDR_PI_STPSZ_GEN3__SHIFT 0x0000001a
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN1_MASK 0x08000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN1__SHIFT 0x0000001b
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN2_MASK 0x10000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN2__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN3_MASK 0x20000000L
+#define PB1_RX_GLB_CTRL_REG1__RX_CFG_LEQ_DCATTN_BYP_EN_GEN3__SHIFT 0x0000001d
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN1_MASK 0x0000f000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN1__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN2_MASK 0x000f0000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN2__SHIFT 0x00000010
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN3_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_CDR_TIME_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN1_MASK 0x03000000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN1__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN2_MASK 0x0c000000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN2__SHIFT 0x0000001a
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN3_MASK 0x30000000L
+#define PB1_RX_GLB_CTRL_REG2__RX_CFG_LEQ_LOOP_GAIN_GEN3__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG2__RX_DCLK_EN_ASRT_TO_ADAPT_HLD_MASK 0xc0000000L
+#define PB1_RX_GLB_CTRL_REG2__RX_DCLK_EN_ASRT_TO_ADAPT_HLD__SHIFT 0x0000001e
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN1_MASK 0x00000001L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN2_MASK 0x00000002L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN2__SHIFT 0x00000001
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN3_MASK 0x00000004L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_CDR_FR_EN_GEN3__SHIFT 0x00000002
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN1_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN1__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN2_MASK 0x0f000000L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN2__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN3_MASK 0xf0000000L
+#define PB1_RX_GLB_CTRL_REG3__RX_CFG_DFE_TIME_GEN3__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN1_MASK 0x00000007L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN2_MASK 0x00000038L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN2__SHIFT 0x00000003
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN3_MASK 0x000001c0L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_BER_GEN3__SHIFT 0x00000006
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN1_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN1__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN2_MASK 0x0f000000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN2__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN3_MASK 0xf0000000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_FOM_TIME_GEN3__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN1_MASK 0x00000e00L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN1__SHIFT 0x00000009
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN2_MASK 0x00007000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN2__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN3_MASK 0x00038000L
+#define PB1_RX_GLB_CTRL_REG4__RX_CFG_LEQ_POLE_BYP_VAL_GEN3__SHIFT 0x0000000f
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN1_MASK 0x0000001fL
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN2_MASK 0x000003e0L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN2__SHIFT 0x00000005
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN3_MASK 0x00007c00L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_DCATTN_BYP_VAL_GEN3__SHIFT 0x0000000a
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN1_MASK 0x00008000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN1__SHIFT 0x0000000f
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN2_MASK 0x00010000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN2__SHIFT 0x00000010
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN3_MASK 0x00020000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_POLE_BYP_EN_GEN3__SHIFT 0x00000011
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN1_MASK 0x00040000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN1__SHIFT 0x00000012
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN2_MASK 0x00080000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN2__SHIFT 0x00000013
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN3_MASK 0x00100000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_LEQ_SHUNT_EN_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN1_MASK 0x08000000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN1__SHIFT 0x0000001b
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN2_MASK 0x10000000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN2__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN3_MASK 0x20000000L
+#define PB1_RX_GLB_CTRL_REG5__RX_CFG_TERM_MODE_GEN3__SHIFT 0x0000001d
+#define PB1_RX_GLB_CTRL_REG5__RX_FORCE_DLL_RST_RXPWR_LS2OFF_TO_LS0_MASK 0x40000000L
+#define PB1_RX_GLB_CTRL_REG5__RX_FORCE_DLL_RST_RXPWR_LS2OFF_TO_LS0__SHIFT 0x0000001e
+#define PB1_RX_GLB_CTRL_REG6__RX_AUX_PWRON_LUT_ENTRY_LS2_MASK 0x08000000L
+#define PB1_RX_GLB_CTRL_REG6__RX_AUX_PWRON_LUT_ENTRY_LS2__SHIFT 0x0000001b
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN1_MASK 0x0000000fL
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN2_MASK 0x000000f0L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN2__SHIFT 0x00000004
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN3_MASK 0x00000f00L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_LEQ_TIME_GEN3__SHIFT 0x00000008
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN1_MASK 0x0000f000L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN1__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN2_MASK 0x000f0000L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN2__SHIFT 0x00000010
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN3_MASK 0x00f00000L
+#define PB1_RX_GLB_CTRL_REG6__RX_CFG_OC_TIME_GEN3__SHIFT 0x00000014
+#define PB1_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS0_CDR_EN_0_MASK 0x01000000L
+#define PB1_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS0_CDR_EN_0__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS2_MASK 0x04000000L
+#define PB1_RX_GLB_CTRL_REG6__RX_FRONTEND_PWRON_LUT_ENTRY_LS2__SHIFT 0x0000001a
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN1_MASK 0x001c0000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN1__SHIFT 0x00000012
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN2_MASK 0x00e00000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN2__SHIFT 0x00000015
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN3_MASK 0x07000000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_CPI_SEL_GEN3__SHIFT 0x00000018
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN1_MASK 0x08000000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN1__SHIFT 0x0000001b
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN2_MASK 0x10000000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN2__SHIFT 0x0000001c
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN3_MASK 0x20000000L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_DLL_FLOCK_DISABLE_GEN3__SHIFT 0x0000001d
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN1_MASK 0x0000000fL
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN1__SHIFT 0x00000000
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN2_MASK 0x000000f0L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN2__SHIFT 0x00000004
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN3_MASK 0x00000f00L
+#define PB1_RX_GLB_CTRL_REG7__RX_CFG_TH_LOOP_GAIN_GEN3__SHIFT 0x00000008
+#define PB1_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS0_CDR_EN_0_MASK 0x00001000L
+#define PB1_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS0_CDR_EN_0__SHIFT 0x0000000c
+#define PB1_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS2_MASK 0x00002000L
+#define PB1_RX_GLB_CTRL_REG7__RX_DCLK_EN_LUT_ENTRY_LS2__SHIFT 0x0000000d
+#define PB1_RX_GLB_CTRL_REG7__RX_DLL_PWRON_LUT_ENTRY_LS2_MASK 0x00020000L
+#define PB1_RX_GLB_CTRL_REG7__RX_DLL_PWRON_LUT_ENTRY_LS2__SHIFT 0x00000011
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_EN_MASK 0x80000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_EN__SHIFT 0x0000001f
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_VAL_MASK 0x40000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_FOM_OVRD_VAL__SHIFT 0x0000001e
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_EN_MASK 0x00000002L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_EN__SHIFT 0x00000001
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_VAL_MASK 0x00000001L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_HLD_OVRD_VAL__SHIFT 0x00000000
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_EN_MASK 0x00000008L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_EN__SHIFT 0x00000003
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_VAL_MASK 0x00000004L
+#define PB1_RX_GLB_OVRD_REG0__RX_ADAPT_RST_OVRD_VAL__SHIFT 0x00000002
+#define PB1_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_EN_MASK 0x20000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_EN__SHIFT 0x0000001d
+#define PB1_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_VAL_MASK 0x10000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_AUX_PWRON_OVRD_VAL__SHIFT 0x0000001c
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_EN_MASK 0x00000100L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_EN__SHIFT 0x00000008
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_VAL_MASK 0x000000c0L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DCLK_DIV_OVRD_VAL__SHIFT 0x00000006
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_EN_MASK 0x00000400L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_EN__SHIFT 0x0000000a
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_VAL_MASK 0x00000200L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_DLL_FREQ_MODE_OVRD_VAL__SHIFT 0x00000009
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_EN_MASK 0x00001000L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_EN__SHIFT 0x0000000c
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_VAL_MASK 0x00000800L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_PLLCLK_SEL_OVRD_VAL__SHIFT 0x0000000b
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_EN_MASK 0x00004000L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_EN__SHIFT 0x0000000e
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_VAL_MASK 0x00002000L
+#define PB1_RX_GLB_OVRD_REG0__RX_CFG_RCLK_DIV_OVRD_VAL__SHIFT 0x0000000d
+#define PB1_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_EN_MASK 0x00010000L
+#define PB1_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_EN__SHIFT 0x00000010
+#define PB1_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_VAL_MASK 0x00008000L
+#define PB1_RX_GLB_OVRD_REG0__RX_DCLK_EN_OVRD_VAL__SHIFT 0x0000000f
+#define PB1_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_EN_MASK 0x00040000L
+#define PB1_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_EN__SHIFT 0x00000012
+#define PB1_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_VAL_MASK 0x00020000L
+#define PB1_RX_GLB_OVRD_REG0__RX_DLL_PWRON_OVRD_VAL__SHIFT 0x00000011
+#define PB1_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_EN_MASK 0x00100000L
+#define PB1_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_EN__SHIFT 0x00000014
+#define PB1_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_VAL_MASK 0x00080000L
+#define PB1_RX_GLB_OVRD_REG0__RX_FRONTEND_PWRON_OVRD_VAL__SHIFT 0x00000013
+#define PB1_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_EN_MASK 0x00400000L
+#define PB1_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_EN__SHIFT 0x00000016
+#define PB1_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_VAL_MASK 0x00200000L
+#define PB1_RX_GLB_OVRD_REG0__RX_IDLEDET_PWRON_OVRD_VAL__SHIFT 0x00000015
+#define PB1_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_EN_MASK 0x01000000L
+#define PB1_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_EN__SHIFT 0x00000018
+#define PB1_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_VAL_MASK 0x00800000L
+#define PB1_RX_GLB_OVRD_REG0__RX_TERM_EN_OVRD_VAL__SHIFT 0x00000017
+#define PB1_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_EN_MASK 0x00000002L
+#define PB1_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_EN__SHIFT 0x00000001
+#define PB1_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_VAL_MASK 0x00000001L
+#define PB1_RX_GLB_OVRD_REG1__RX_ADAPT_TRK_OVRD_VAL__SHIFT 0x00000000
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L0T3_MASK 0x00000010L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L0T3__SHIFT 0x00000004
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L12T15_MASK 0x00000080L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L12T15__SHIFT 0x00000007
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L4T7_MASK 0x00000020L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L4T7__SHIFT 0x00000005
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L8T11_MASK 0x00000040L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ELECIDLEDETEN_SCI_UPDT_L8T11__SHIFT 0x00000006
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L0T3_MASK 0x00001000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L0T3__SHIFT 0x0000000c
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L12T15_MASK 0x00008000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L12T15__SHIFT 0x0000000f
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L4T7_MASK 0x00002000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L4T7__SHIFT 0x0000000d
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L8T11_MASK 0x00004000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_ENABLEFOM_SCI_UPDT_L8T11__SHIFT 0x0000000e
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L0T3_MASK 0x00010000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L0T3__SHIFT 0x00000010
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L12T15_MASK 0x00080000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L12T15__SHIFT 0x00000013
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L4T7_MASK 0x00020000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L4T7__SHIFT 0x00000011
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L8T11_MASK 0x00040000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_REQUESTFOM_SCI_UPDT_L8T11__SHIFT 0x00000012
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L0T3_MASK 0x00100000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L0T3__SHIFT 0x00000014
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L12T15_MASK 0x00800000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L12T15__SHIFT 0x00000017
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L4T7_MASK 0x00200000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L4T7__SHIFT 0x00000015
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L8T11_MASK 0x00400000L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RESPONSEMODE_SCI_UPDT_L8T11__SHIFT 0x00000016
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L0T3_MASK 0x00000100L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L0T3__SHIFT 0x00000008
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L12T15_MASK 0x00000800L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L12T15__SHIFT 0x0000000b
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L4T7_MASK 0x00000200L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L4T7__SHIFT 0x00000009
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L8T11_MASK 0x00000400L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPRESETHINT_SCI_UPDT_L8T11__SHIFT 0x0000000a
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB1_RX_GLB_SCI_STAT_OVRD_REG0__IGNR_RXPWR_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB1_RX_LANE0_CTRL_REG0__RX_BACKUP_0_MASK 0x000000ffL
+#define PB1_RX_LANE0_CTRL_REG0__RX_BACKUP_0__SHIFT 0x00000000
+#define PB1_RX_LANE0_CTRL_REG0__RX_CFG_OVR_PWRSF_0_MASK 0x00002000L
+#define PB1_RX_LANE0_CTRL_REG0__RX_CFG_OVR_PWRSF_0__SHIFT 0x0000000d
+#define PB1_RX_LANE0_CTRL_REG0__RX_DBG_ANALOG_SEL_0_MASK 0x00000c00L
+#define PB1_RX_LANE0_CTRL_REG0__RX_DBG_ANALOG_SEL_0__SHIFT 0x0000000a
+#define PB1_RX_LANE0_CTRL_REG0__RX_TST_BSCAN_EN_0_MASK 0x00001000L
+#define PB1_RX_LANE0_CTRL_REG0__RX_TST_BSCAN_EN_0__SHIFT 0x0000000c
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_0_MASK 0x00000008L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_0__SHIFT 0x00000003
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__ENABLEFOM_0_MASK 0x00000080L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__ENABLEFOM_0__SHIFT 0x00000007
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__REQUESTFOM_0_MASK 0x00000100L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__REQUESTFOM_0__SHIFT 0x00000008
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RESPONSEMODE_0_MASK 0x00000200L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RESPONSEMODE_0__SHIFT 0x00000009
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RXPRESETHINT_0_MASK 0x00000070L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RXPRESETHINT_0__SHIFT 0x00000004
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RXPWR_0_MASK 0x00000007L
+#define PB1_RX_LANE0_SCI_STAT_OVRD_REG0__RXPWR_0__SHIFT 0x00000000
+#define PB1_RX_LANE10_CTRL_REG0__RX_BACKUP_10_MASK 0x000000ffL
+#define PB1_RX_LANE10_CTRL_REG0__RX_BACKUP_10__SHIFT 0x00000000
+#define PB1_RX_LANE10_CTRL_REG0__RX_CFG_OVR_PWRSF_10_MASK 0x00002000L
+#define PB1_RX_LANE10_CTRL_REG0__RX_CFG_OVR_PWRSF_10__SHIFT 0x0000000d
+#define PB1_RX_LANE10_CTRL_REG0__RX_DBG_ANALOG_SEL_10_MASK 0x00000c00L
+#define PB1_RX_LANE10_CTRL_REG0__RX_DBG_ANALOG_SEL_10__SHIFT 0x0000000a
+#define PB1_RX_LANE10_CTRL_REG0__RX_TST_BSCAN_EN_10_MASK 0x00001000L
+#define PB1_RX_LANE10_CTRL_REG0__RX_TST_BSCAN_EN_10__SHIFT 0x0000000c
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_10_MASK 0x00000008L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_10__SHIFT 0x00000003
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__ENABLEFOM_10_MASK 0x00000080L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__ENABLEFOM_10__SHIFT 0x00000007
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__REQUESTFOM_10_MASK 0x00000100L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__REQUESTFOM_10__SHIFT 0x00000008
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RESPONSEMODE_10_MASK 0x00000200L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RESPONSEMODE_10__SHIFT 0x00000009
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RXPRESETHINT_10_MASK 0x00000070L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RXPRESETHINT_10__SHIFT 0x00000004
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RXPWR_10_MASK 0x00000007L
+#define PB1_RX_LANE10_SCI_STAT_OVRD_REG0__RXPWR_10__SHIFT 0x00000000
+#define PB1_RX_LANE11_CTRL_REG0__RX_BACKUP_11_MASK 0x000000ffL
+#define PB1_RX_LANE11_CTRL_REG0__RX_BACKUP_11__SHIFT 0x00000000
+#define PB1_RX_LANE11_CTRL_REG0__RX_CFG_OVR_PWRSF_11_MASK 0x00002000L
+#define PB1_RX_LANE11_CTRL_REG0__RX_CFG_OVR_PWRSF_11__SHIFT 0x0000000d
+#define PB1_RX_LANE11_CTRL_REG0__RX_DBG_ANALOG_SEL_11_MASK 0x00000c00L
+#define PB1_RX_LANE11_CTRL_REG0__RX_DBG_ANALOG_SEL_11__SHIFT 0x0000000a
+#define PB1_RX_LANE11_CTRL_REG0__RX_TST_BSCAN_EN_11_MASK 0x00001000L
+#define PB1_RX_LANE11_CTRL_REG0__RX_TST_BSCAN_EN_11__SHIFT 0x0000000c
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_11_MASK 0x00000008L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_11__SHIFT 0x00000003
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__ENABLEFOM_11_MASK 0x00000080L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__ENABLEFOM_11__SHIFT 0x00000007
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__REQUESTFOM_11_MASK 0x00000100L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__REQUESTFOM_11__SHIFT 0x00000008
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RESPONSEMODE_11_MASK 0x00000200L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RESPONSEMODE_11__SHIFT 0x00000009
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RXPRESETHINT_11_MASK 0x00000070L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RXPRESETHINT_11__SHIFT 0x00000004
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RXPWR_11_MASK 0x00000007L
+#define PB1_RX_LANE11_SCI_STAT_OVRD_REG0__RXPWR_11__SHIFT 0x00000000
+#define PB1_RX_LANE12_CTRL_REG0__RX_BACKUP_12_MASK 0x000000ffL
+#define PB1_RX_LANE12_CTRL_REG0__RX_BACKUP_12__SHIFT 0x00000000
+#define PB1_RX_LANE12_CTRL_REG0__RX_CFG_OVR_PWRSF_12_MASK 0x00002000L
+#define PB1_RX_LANE12_CTRL_REG0__RX_CFG_OVR_PWRSF_12__SHIFT 0x0000000d
+#define PB1_RX_LANE12_CTRL_REG0__RX_DBG_ANALOG_SEL_12_MASK 0x00000c00L
+#define PB1_RX_LANE12_CTRL_REG0__RX_DBG_ANALOG_SEL_12__SHIFT 0x0000000a
+#define PB1_RX_LANE12_CTRL_REG0__RX_TST_BSCAN_EN_12_MASK 0x00001000L
+#define PB1_RX_LANE12_CTRL_REG0__RX_TST_BSCAN_EN_12__SHIFT 0x0000000c
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_12_MASK 0x00000008L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_12__SHIFT 0x00000003
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__ENABLEFOM_12_MASK 0x00000080L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__ENABLEFOM_12__SHIFT 0x00000007
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__REQUESTFOM_12_MASK 0x00000100L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__REQUESTFOM_12__SHIFT 0x00000008
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RESPONSEMODE_12_MASK 0x00000200L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RESPONSEMODE_12__SHIFT 0x00000009
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RXPRESETHINT_12_MASK 0x00000070L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RXPRESETHINT_12__SHIFT 0x00000004
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RXPWR_12_MASK 0x00000007L
+#define PB1_RX_LANE12_SCI_STAT_OVRD_REG0__RXPWR_12__SHIFT 0x00000000
+#define PB1_RX_LANE13_CTRL_REG0__RX_BACKUP_13_MASK 0x000000ffL
+#define PB1_RX_LANE13_CTRL_REG0__RX_BACKUP_13__SHIFT 0x00000000
+#define PB1_RX_LANE13_CTRL_REG0__RX_CFG_OVR_PWRSF_13_MASK 0x00002000L
+#define PB1_RX_LANE13_CTRL_REG0__RX_CFG_OVR_PWRSF_13__SHIFT 0x0000000d
+#define PB1_RX_LANE13_CTRL_REG0__RX_DBG_ANALOG_SEL_13_MASK 0x00000c00L
+#define PB1_RX_LANE13_CTRL_REG0__RX_DBG_ANALOG_SEL_13__SHIFT 0x0000000a
+#define PB1_RX_LANE13_CTRL_REG0__RX_TST_BSCAN_EN_13_MASK 0x00001000L
+#define PB1_RX_LANE13_CTRL_REG0__RX_TST_BSCAN_EN_13__SHIFT 0x0000000c
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_13_MASK 0x00000008L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_13__SHIFT 0x00000003
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__ENABLEFOM_13_MASK 0x00000080L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__ENABLEFOM_13__SHIFT 0x00000007
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__REQUESTFOM_13_MASK 0x00000100L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__REQUESTFOM_13__SHIFT 0x00000008
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RESPONSEMODE_13_MASK 0x00000200L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RESPONSEMODE_13__SHIFT 0x00000009
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RXPRESETHINT_13_MASK 0x00000070L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RXPRESETHINT_13__SHIFT 0x00000004
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RXPWR_13_MASK 0x00000007L
+#define PB1_RX_LANE13_SCI_STAT_OVRD_REG0__RXPWR_13__SHIFT 0x00000000
+#define PB1_RX_LANE14_CTRL_REG0__RX_BACKUP_14_MASK 0x000000ffL
+#define PB1_RX_LANE14_CTRL_REG0__RX_BACKUP_14__SHIFT 0x00000000
+#define PB1_RX_LANE14_CTRL_REG0__RX_CFG_OVR_PWRSF_14_MASK 0x00002000L
+#define PB1_RX_LANE14_CTRL_REG0__RX_CFG_OVR_PWRSF_14__SHIFT 0x0000000d
+#define PB1_RX_LANE14_CTRL_REG0__RX_DBG_ANALOG_SEL_14_MASK 0x00000c00L
+#define PB1_RX_LANE14_CTRL_REG0__RX_DBG_ANALOG_SEL_14__SHIFT 0x0000000a
+#define PB1_RX_LANE14_CTRL_REG0__RX_TST_BSCAN_EN_14_MASK 0x00001000L
+#define PB1_RX_LANE14_CTRL_REG0__RX_TST_BSCAN_EN_14__SHIFT 0x0000000c
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_14_MASK 0x00000008L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_14__SHIFT 0x00000003
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__ENABLEFOM_14_MASK 0x00000080L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__ENABLEFOM_14__SHIFT 0x00000007
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__REQUESTFOM_14_MASK 0x00000100L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__REQUESTFOM_14__SHIFT 0x00000008
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RESPONSEMODE_14_MASK 0x00000200L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RESPONSEMODE_14__SHIFT 0x00000009
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RXPRESETHINT_14_MASK 0x00000070L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RXPRESETHINT_14__SHIFT 0x00000004
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RXPWR_14_MASK 0x00000007L
+#define PB1_RX_LANE14_SCI_STAT_OVRD_REG0__RXPWR_14__SHIFT 0x00000000
+#define PB1_RX_LANE15_CTRL_REG0__RX_BACKUP_15_MASK 0x000000ffL
+#define PB1_RX_LANE15_CTRL_REG0__RX_BACKUP_15__SHIFT 0x00000000
+#define PB1_RX_LANE15_CTRL_REG0__RX_CFG_OVR_PWRSF_15_MASK 0x00002000L
+#define PB1_RX_LANE15_CTRL_REG0__RX_CFG_OVR_PWRSF_15__SHIFT 0x0000000d
+#define PB1_RX_LANE15_CTRL_REG0__RX_DBG_ANALOG_SEL_15_MASK 0x00000c00L
+#define PB1_RX_LANE15_CTRL_REG0__RX_DBG_ANALOG_SEL_15__SHIFT 0x0000000a
+#define PB1_RX_LANE15_CTRL_REG0__RX_TST_BSCAN_EN_15_MASK 0x00001000L
+#define PB1_RX_LANE15_CTRL_REG0__RX_TST_BSCAN_EN_15__SHIFT 0x0000000c
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_15_MASK 0x00000008L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_15__SHIFT 0x00000003
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__ENABLEFOM_15_MASK 0x00000080L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__ENABLEFOM_15__SHIFT 0x00000007
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__REQUESTFOM_15_MASK 0x00000100L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__REQUESTFOM_15__SHIFT 0x00000008
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RESPONSEMODE_15_MASK 0x00000200L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RESPONSEMODE_15__SHIFT 0x00000009
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RXPRESETHINT_15_MASK 0x00000070L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RXPRESETHINT_15__SHIFT 0x00000004
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RXPWR_15_MASK 0x00000007L
+#define PB1_RX_LANE15_SCI_STAT_OVRD_REG0__RXPWR_15__SHIFT 0x00000000
+#define PB1_RX_LANE1_CTRL_REG0__RX_BACKUP_1_MASK 0x000000ffL
+#define PB1_RX_LANE1_CTRL_REG0__RX_BACKUP_1__SHIFT 0x00000000
+#define PB1_RX_LANE1_CTRL_REG0__RX_CFG_OVR_PWRSF_1_MASK 0x00002000L
+#define PB1_RX_LANE1_CTRL_REG0__RX_CFG_OVR_PWRSF_1__SHIFT 0x0000000d
+#define PB1_RX_LANE1_CTRL_REG0__RX_DBG_ANALOG_SEL_1_MASK 0x00000c00L
+#define PB1_RX_LANE1_CTRL_REG0__RX_DBG_ANALOG_SEL_1__SHIFT 0x0000000a
+#define PB1_RX_LANE1_CTRL_REG0__RX_TST_BSCAN_EN_1_MASK 0x00001000L
+#define PB1_RX_LANE1_CTRL_REG0__RX_TST_BSCAN_EN_1__SHIFT 0x0000000c
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_1_MASK 0x00000008L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_1__SHIFT 0x00000003
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__ENABLEFOM_1_MASK 0x00000080L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__ENABLEFOM_1__SHIFT 0x00000007
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__REQUESTFOM_1_MASK 0x00000100L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__REQUESTFOM_1__SHIFT 0x00000008
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RESPONSEMODE_1_MASK 0x00000200L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RESPONSEMODE_1__SHIFT 0x00000009
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RXPRESETHINT_1_MASK 0x00000070L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RXPRESETHINT_1__SHIFT 0x00000004
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RXPWR_1_MASK 0x00000007L
+#define PB1_RX_LANE1_SCI_STAT_OVRD_REG0__RXPWR_1__SHIFT 0x00000000
+#define PB1_RX_LANE2_CTRL_REG0__RX_BACKUP_2_MASK 0x000000ffL
+#define PB1_RX_LANE2_CTRL_REG0__RX_BACKUP_2__SHIFT 0x00000000
+#define PB1_RX_LANE2_CTRL_REG0__RX_CFG_OVR_PWRSF_2_MASK 0x00002000L
+#define PB1_RX_LANE2_CTRL_REG0__RX_CFG_OVR_PWRSF_2__SHIFT 0x0000000d
+#define PB1_RX_LANE2_CTRL_REG0__RX_DBG_ANALOG_SEL_2_MASK 0x00000c00L
+#define PB1_RX_LANE2_CTRL_REG0__RX_DBG_ANALOG_SEL_2__SHIFT 0x0000000a
+#define PB1_RX_LANE2_CTRL_REG0__RX_TST_BSCAN_EN_2_MASK 0x00001000L
+#define PB1_RX_LANE2_CTRL_REG0__RX_TST_BSCAN_EN_2__SHIFT 0x0000000c
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_2_MASK 0x00000008L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_2__SHIFT 0x00000003
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__ENABLEFOM_2_MASK 0x00000080L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__ENABLEFOM_2__SHIFT 0x00000007
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__REQUESTFOM_2_MASK 0x00000100L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__REQUESTFOM_2__SHIFT 0x00000008
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RESPONSEMODE_2_MASK 0x00000200L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RESPONSEMODE_2__SHIFT 0x00000009
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RXPRESETHINT_2_MASK 0x00000070L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RXPRESETHINT_2__SHIFT 0x00000004
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RXPWR_2_MASK 0x00000007L
+#define PB1_RX_LANE2_SCI_STAT_OVRD_REG0__RXPWR_2__SHIFT 0x00000000
+#define PB1_RX_LANE3_CTRL_REG0__RX_BACKUP_3_MASK 0x000000ffL
+#define PB1_RX_LANE3_CTRL_REG0__RX_BACKUP_3__SHIFT 0x00000000
+#define PB1_RX_LANE3_CTRL_REG0__RX_CFG_OVR_PWRSF_3_MASK 0x00002000L
+#define PB1_RX_LANE3_CTRL_REG0__RX_CFG_OVR_PWRSF_3__SHIFT 0x0000000d
+#define PB1_RX_LANE3_CTRL_REG0__RX_DBG_ANALOG_SEL_3_MASK 0x00000c00L
+#define PB1_RX_LANE3_CTRL_REG0__RX_DBG_ANALOG_SEL_3__SHIFT 0x0000000a
+#define PB1_RX_LANE3_CTRL_REG0__RX_TST_BSCAN_EN_3_MASK 0x00001000L
+#define PB1_RX_LANE3_CTRL_REG0__RX_TST_BSCAN_EN_3__SHIFT 0x0000000c
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_3_MASK 0x00000008L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_3__SHIFT 0x00000003
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__ENABLEFOM_3_MASK 0x00000080L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__ENABLEFOM_3__SHIFT 0x00000007
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__REQUESTFOM_3_MASK 0x00000100L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__REQUESTFOM_3__SHIFT 0x00000008
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RESPONSEMODE_3_MASK 0x00000200L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RESPONSEMODE_3__SHIFT 0x00000009
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RXPRESETHINT_3_MASK 0x00000070L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RXPRESETHINT_3__SHIFT 0x00000004
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RXPWR_3_MASK 0x00000007L
+#define PB1_RX_LANE3_SCI_STAT_OVRD_REG0__RXPWR_3__SHIFT 0x00000000
+#define PB1_RX_LANE4_CTRL_REG0__RX_BACKUP_4_MASK 0x000000ffL
+#define PB1_RX_LANE4_CTRL_REG0__RX_BACKUP_4__SHIFT 0x00000000
+#define PB1_RX_LANE4_CTRL_REG0__RX_CFG_OVR_PWRSF_4_MASK 0x00002000L
+#define PB1_RX_LANE4_CTRL_REG0__RX_CFG_OVR_PWRSF_4__SHIFT 0x0000000d
+#define PB1_RX_LANE4_CTRL_REG0__RX_DBG_ANALOG_SEL_4_MASK 0x00000c00L
+#define PB1_RX_LANE4_CTRL_REG0__RX_DBG_ANALOG_SEL_4__SHIFT 0x0000000a
+#define PB1_RX_LANE4_CTRL_REG0__RX_TST_BSCAN_EN_4_MASK 0x00001000L
+#define PB1_RX_LANE4_CTRL_REG0__RX_TST_BSCAN_EN_4__SHIFT 0x0000000c
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_4_MASK 0x00000008L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_4__SHIFT 0x00000003
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__ENABLEFOM_4_MASK 0x00000080L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__ENABLEFOM_4__SHIFT 0x00000007
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__REQUESTFOM_4_MASK 0x00000100L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__REQUESTFOM_4__SHIFT 0x00000008
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RESPONSEMODE_4_MASK 0x00000200L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RESPONSEMODE_4__SHIFT 0x00000009
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RXPRESETHINT_4_MASK 0x00000070L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RXPRESETHINT_4__SHIFT 0x00000004
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RXPWR_4_MASK 0x00000007L
+#define PB1_RX_LANE4_SCI_STAT_OVRD_REG0__RXPWR_4__SHIFT 0x00000000
+#define PB1_RX_LANE5_CTRL_REG0__RX_BACKUP_5_MASK 0x000000ffL
+#define PB1_RX_LANE5_CTRL_REG0__RX_BACKUP_5__SHIFT 0x00000000
+#define PB1_RX_LANE5_CTRL_REG0__RX_CFG_OVR_PWRSF_5_MASK 0x00002000L
+#define PB1_RX_LANE5_CTRL_REG0__RX_CFG_OVR_PWRSF_5__SHIFT 0x0000000d
+#define PB1_RX_LANE5_CTRL_REG0__RX_DBG_ANALOG_SEL_5_MASK 0x00000c00L
+#define PB1_RX_LANE5_CTRL_REG0__RX_DBG_ANALOG_SEL_5__SHIFT 0x0000000a
+#define PB1_RX_LANE5_CTRL_REG0__RX_TST_BSCAN_EN_5_MASK 0x00001000L
+#define PB1_RX_LANE5_CTRL_REG0__RX_TST_BSCAN_EN_5__SHIFT 0x0000000c
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_5_MASK 0x00000008L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_5__SHIFT 0x00000003
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__ENABLEFOM_5_MASK 0x00000080L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__ENABLEFOM_5__SHIFT 0x00000007
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__REQUESTFOM_5_MASK 0x00000100L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__REQUESTFOM_5__SHIFT 0x00000008
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RESPONSEMODE_5_MASK 0x00000200L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RESPONSEMODE_5__SHIFT 0x00000009
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RXPRESETHINT_5_MASK 0x00000070L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RXPRESETHINT_5__SHIFT 0x00000004
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RXPWR_5_MASK 0x00000007L
+#define PB1_RX_LANE5_SCI_STAT_OVRD_REG0__RXPWR_5__SHIFT 0x00000000
+#define PB1_RX_LANE6_CTRL_REG0__RX_BACKUP_6_MASK 0x000000ffL
+#define PB1_RX_LANE6_CTRL_REG0__RX_BACKUP_6__SHIFT 0x00000000
+#define PB1_RX_LANE6_CTRL_REG0__RX_CFG_OVR_PWRSF_6_MASK 0x00002000L
+#define PB1_RX_LANE6_CTRL_REG0__RX_CFG_OVR_PWRSF_6__SHIFT 0x0000000d
+#define PB1_RX_LANE6_CTRL_REG0__RX_DBG_ANALOG_SEL_6_MASK 0x00000c00L
+#define PB1_RX_LANE6_CTRL_REG0__RX_DBG_ANALOG_SEL_6__SHIFT 0x0000000a
+#define PB1_RX_LANE6_CTRL_REG0__RX_TST_BSCAN_EN_6_MASK 0x00001000L
+#define PB1_RX_LANE6_CTRL_REG0__RX_TST_BSCAN_EN_6__SHIFT 0x0000000c
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_6_MASK 0x00000008L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_6__SHIFT 0x00000003
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__ENABLEFOM_6_MASK 0x00000080L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__ENABLEFOM_6__SHIFT 0x00000007
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__REQUESTFOM_6_MASK 0x00000100L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__REQUESTFOM_6__SHIFT 0x00000008
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RESPONSEMODE_6_MASK 0x00000200L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RESPONSEMODE_6__SHIFT 0x00000009
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RXPRESETHINT_6_MASK 0x00000070L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RXPRESETHINT_6__SHIFT 0x00000004
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RXPWR_6_MASK 0x00000007L
+#define PB1_RX_LANE6_SCI_STAT_OVRD_REG0__RXPWR_6__SHIFT 0x00000000
+#define PB1_RX_LANE7_CTRL_REG0__RX_BACKUP_7_MASK 0x000000ffL
+#define PB1_RX_LANE7_CTRL_REG0__RX_BACKUP_7__SHIFT 0x00000000
+#define PB1_RX_LANE7_CTRL_REG0__RX_CFG_OVR_PWRSF_7_MASK 0x00002000L
+#define PB1_RX_LANE7_CTRL_REG0__RX_CFG_OVR_PWRSF_7__SHIFT 0x0000000d
+#define PB1_RX_LANE7_CTRL_REG0__RX_DBG_ANALOG_SEL_7_MASK 0x00000c00L
+#define PB1_RX_LANE7_CTRL_REG0__RX_DBG_ANALOG_SEL_7__SHIFT 0x0000000a
+#define PB1_RX_LANE7_CTRL_REG0__RX_TST_BSCAN_EN_7_MASK 0x00001000L
+#define PB1_RX_LANE7_CTRL_REG0__RX_TST_BSCAN_EN_7__SHIFT 0x0000000c
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_7_MASK 0x00000008L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_7__SHIFT 0x00000003
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__ENABLEFOM_7_MASK 0x00000080L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__ENABLEFOM_7__SHIFT 0x00000007
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__REQUESTFOM_7_MASK 0x00000100L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__REQUESTFOM_7__SHIFT 0x00000008
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RESPONSEMODE_7_MASK 0x00000200L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RESPONSEMODE_7__SHIFT 0x00000009
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RXPRESETHINT_7_MASK 0x00000070L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RXPRESETHINT_7__SHIFT 0x00000004
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RXPWR_7_MASK 0x00000007L
+#define PB1_RX_LANE7_SCI_STAT_OVRD_REG0__RXPWR_7__SHIFT 0x00000000
+#define PB1_RX_LANE8_CTRL_REG0__RX_BACKUP_8_MASK 0x000000ffL
+#define PB1_RX_LANE8_CTRL_REG0__RX_BACKUP_8__SHIFT 0x00000000
+#define PB1_RX_LANE8_CTRL_REG0__RX_CFG_OVR_PWRSF_8_MASK 0x00002000L
+#define PB1_RX_LANE8_CTRL_REG0__RX_CFG_OVR_PWRSF_8__SHIFT 0x0000000d
+#define PB1_RX_LANE8_CTRL_REG0__RX_DBG_ANALOG_SEL_8_MASK 0x00000c00L
+#define PB1_RX_LANE8_CTRL_REG0__RX_DBG_ANALOG_SEL_8__SHIFT 0x0000000a
+#define PB1_RX_LANE8_CTRL_REG0__RX_TST_BSCAN_EN_8_MASK 0x00001000L
+#define PB1_RX_LANE8_CTRL_REG0__RX_TST_BSCAN_EN_8__SHIFT 0x0000000c
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_8_MASK 0x00000008L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_8__SHIFT 0x00000003
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__ENABLEFOM_8_MASK 0x00000080L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__ENABLEFOM_8__SHIFT 0x00000007
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__REQUESTFOM_8_MASK 0x00000100L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__REQUESTFOM_8__SHIFT 0x00000008
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RESPONSEMODE_8_MASK 0x00000200L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RESPONSEMODE_8__SHIFT 0x00000009
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RXPRESETHINT_8_MASK 0x00000070L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RXPRESETHINT_8__SHIFT 0x00000004
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RXPWR_8_MASK 0x00000007L
+#define PB1_RX_LANE8_SCI_STAT_OVRD_REG0__RXPWR_8__SHIFT 0x00000000
+#define PB1_RX_LANE9_CTRL_REG0__RX_BACKUP_9_MASK 0x000000ffL
+#define PB1_RX_LANE9_CTRL_REG0__RX_BACKUP_9__SHIFT 0x00000000
+#define PB1_RX_LANE9_CTRL_REG0__RX_CFG_OVR_PWRSF_9_MASK 0x00002000L
+#define PB1_RX_LANE9_CTRL_REG0__RX_CFG_OVR_PWRSF_9__SHIFT 0x0000000d
+#define PB1_RX_LANE9_CTRL_REG0__RX_DBG_ANALOG_SEL_9_MASK 0x00000c00L
+#define PB1_RX_LANE9_CTRL_REG0__RX_DBG_ANALOG_SEL_9__SHIFT 0x0000000a
+#define PB1_RX_LANE9_CTRL_REG0__RX_TST_BSCAN_EN_9_MASK 0x00001000L
+#define PB1_RX_LANE9_CTRL_REG0__RX_TST_BSCAN_EN_9__SHIFT 0x0000000c
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_9_MASK 0x00000008L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__ELECIDLEDETEN_9__SHIFT 0x00000003
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__ENABLEFOM_9_MASK 0x00000080L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__ENABLEFOM_9__SHIFT 0x00000007
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__REQUESTFOM_9_MASK 0x00000100L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__REQUESTFOM_9__SHIFT 0x00000008
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RESPONSEMODE_9_MASK 0x00000200L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RESPONSEMODE_9__SHIFT 0x00000009
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RXPRESETHINT_9_MASK 0x00000070L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RXPRESETHINT_9__SHIFT 0x00000004
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RXPWR_9_MASK 0x00000007L
+#define PB1_RX_LANE9_SCI_STAT_OVRD_REG0__RXPWR_9__SHIFT 0x00000000
+#define PB1_STRAP_GLB_REG0__STRAP_DBG_RXDLL_VREG_REF_SEL_MASK 0x00008000L
+#define PB1_STRAP_GLB_REG0__STRAP_DBG_RXDLL_VREG_REF_SEL__SHIFT 0x0000000f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_0_MASK 0x00000001L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_0__SHIFT 0x00000000
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_10_MASK 0x00000400L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_10__SHIFT 0x0000000a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_11_MASK 0x00000800L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_11__SHIFT 0x0000000b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_12_MASK 0x00001000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_12__SHIFT 0x0000000c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_13_MASK 0x00002000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_13__SHIFT 0x0000000d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_14_MASK 0x00004000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_14__SHIFT 0x0000000e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_15_MASK 0x00008000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_15__SHIFT 0x0000000f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_16_MASK 0x00010000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_16__SHIFT 0x00000010
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_17_MASK 0x00020000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_17__SHIFT 0x00000011
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_18_MASK 0x00040000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_18__SHIFT 0x00000012
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_19_MASK 0x00080000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_19__SHIFT 0x00000013
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_1_MASK 0x00000002L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_1__SHIFT 0x00000001
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_20_MASK 0x00100000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_20__SHIFT 0x00000014
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_21_MASK 0x00200000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_21__SHIFT 0x00000015
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_22_MASK 0x00400000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_22__SHIFT 0x00000016
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_23_MASK 0x00800000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_23__SHIFT 0x00000017
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_24_MASK 0x01000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_24__SHIFT 0x00000018
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_25_MASK 0x02000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_25__SHIFT 0x00000019
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_26_MASK 0x04000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_26__SHIFT 0x0000001a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_27_MASK 0x08000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_27__SHIFT 0x0000001b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_28_MASK 0x10000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_28__SHIFT 0x0000001c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_29_MASK 0x20000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_29__SHIFT 0x0000001d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_2_MASK 0x00000004L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_2__SHIFT 0x00000002
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_30_MASK 0x40000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_30__SHIFT 0x0000001e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_31_MASK 0x80000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_31__SHIFT 0x0000001f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_3_MASK 0x00000008L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_3__SHIFT 0x00000003
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_4_MASK 0x00000010L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_4__SHIFT 0x00000004
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_5_MASK 0x00000020L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_5__SHIFT 0x00000005
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_6_MASK 0x00000040L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_6__SHIFT 0x00000006
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_7_MASK 0x00000080L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_7__SHIFT 0x00000007
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_8_MASK 0x00000100L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_8__SHIFT 0x00000008
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_9_MASK 0x00000200L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0__ACCEPT_ENTRY_9__SHIFT 0x00000009
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_32_MASK 0x00000001L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_32__SHIFT 0x00000000
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_33_MASK 0x00000002L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_33__SHIFT 0x00000001
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_34_MASK 0x00000004L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_34__SHIFT 0x00000002
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_35_MASK 0x00000008L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_35__SHIFT 0x00000003
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_36_MASK 0x00000010L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_36__SHIFT 0x00000004
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_37_MASK 0x00000020L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_37__SHIFT 0x00000005
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_38_MASK 0x00000040L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_38__SHIFT 0x00000006
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_39_MASK 0x00000080L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_39__SHIFT 0x00000007
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_40_MASK 0x00000100L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_40__SHIFT 0x00000008
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_41_MASK 0x00000200L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_41__SHIFT 0x00000009
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_42_MASK 0x00000400L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_42__SHIFT 0x0000000a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_43_MASK 0x00000800L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_43__SHIFT 0x0000000b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_44_MASK 0x00001000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_44__SHIFT 0x0000000c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_45_MASK 0x00002000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_45__SHIFT 0x0000000d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_46_MASK 0x00004000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_46__SHIFT 0x0000000e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_47_MASK 0x00008000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_47__SHIFT 0x0000000f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_48_MASK 0x00010000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_48__SHIFT 0x00000010
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_49_MASK 0x00020000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_49__SHIFT 0x00000011
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_50_MASK 0x00040000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_50__SHIFT 0x00000012
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_51_MASK 0x00080000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_51__SHIFT 0x00000013
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_52_MASK 0x00100000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_52__SHIFT 0x00000014
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_53_MASK 0x00200000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_53__SHIFT 0x00000015
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_54_MASK 0x00400000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_54__SHIFT 0x00000016
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_55_MASK 0x00800000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_55__SHIFT 0x00000017
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_56_MASK 0x01000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_56__SHIFT 0x00000018
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_57_MASK 0x02000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_57__SHIFT 0x00000019
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_58_MASK 0x04000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_58__SHIFT 0x0000001a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_59_MASK 0x08000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_59__SHIFT 0x0000001b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_60_MASK 0x10000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_60__SHIFT 0x0000001c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_61_MASK 0x20000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_61__SHIFT 0x0000001d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_62_MASK 0x40000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_62__SHIFT 0x0000001e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_63_MASK 0x80000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1__ACCEPT_ENTRY_63__SHIFT 0x0000001f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_64_MASK 0x00000001L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_64__SHIFT 0x00000000
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_65_MASK 0x00000002L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_65__SHIFT 0x00000001
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_66_MASK 0x00000004L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_66__SHIFT 0x00000002
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_67_MASK 0x00000008L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_67__SHIFT 0x00000003
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_68_MASK 0x00000010L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_68__SHIFT 0x00000004
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_69_MASK 0x00000020L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_69__SHIFT 0x00000005
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_70_MASK 0x00000040L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_70__SHIFT 0x00000006
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_71_MASK 0x00000080L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_71__SHIFT 0x00000007
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_72_MASK 0x00000100L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_72__SHIFT 0x00000008
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_73_MASK 0x00000200L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_73__SHIFT 0x00000009
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_74_MASK 0x00000400L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_74__SHIFT 0x0000000a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_75_MASK 0x00000800L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_75__SHIFT 0x0000000b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_76_MASK 0x00001000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_76__SHIFT 0x0000000c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_77_MASK 0x00002000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_77__SHIFT 0x0000000d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_78_MASK 0x00004000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_78__SHIFT 0x0000000e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_79_MASK 0x00008000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_79__SHIFT 0x0000000f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_80_MASK 0x00010000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_80__SHIFT 0x00000010
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_81_MASK 0x00020000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_81__SHIFT 0x00000011
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_82_MASK 0x00040000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_82__SHIFT 0x00000012
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_83_MASK 0x00080000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_83__SHIFT 0x00000013
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_84_MASK 0x00100000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_84__SHIFT 0x00000014
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_85_MASK 0x00200000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_85__SHIFT 0x00000015
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_86_MASK 0x00400000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_86__SHIFT 0x00000016
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_87_MASK 0x00800000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_87__SHIFT 0x00000017
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_88_MASK 0x01000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_88__SHIFT 0x00000018
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_89_MASK 0x02000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_89__SHIFT 0x00000019
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_90_MASK 0x04000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_90__SHIFT 0x0000001a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_91_MASK 0x08000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_91__SHIFT 0x0000001b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_92_MASK 0x10000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_92__SHIFT 0x0000001c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_93_MASK 0x20000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_93__SHIFT 0x0000001d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_94_MASK 0x40000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_94__SHIFT 0x0000001e
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_95_MASK 0x80000000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2__ACCEPT_ENTRY_95__SHIFT 0x0000001f
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_100_MASK 0x00000010L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_100__SHIFT 0x00000004
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_101_MASK 0x00000020L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_101__SHIFT 0x00000005
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_102_MASK 0x00000040L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_102__SHIFT 0x00000006
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_103_MASK 0x00000080L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_103__SHIFT 0x00000007
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_104_MASK 0x00000100L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_104__SHIFT 0x00000008
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_105_MASK 0x00000200L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_105__SHIFT 0x00000009
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_106_MASK 0x00000400L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_106__SHIFT 0x0000000a
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_107_MASK 0x00000800L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_107__SHIFT 0x0000000b
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_108_MASK 0x00001000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_108__SHIFT 0x0000000c
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_109_MASK 0x00002000L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_109__SHIFT 0x0000000d
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_96_MASK 0x00000001L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_96__SHIFT 0x00000000
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_97_MASK 0x00000002L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_97__SHIFT 0x00000001
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_98_MASK 0x00000004L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_98__SHIFT 0x00000002
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_99_MASK 0x00000008L
+#define PB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3__ACCEPT_ENTRY_99__SHIFT 0x00000003
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN1_MASK 0x00000700L
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN1__SHIFT 0x00000008
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN2_MASK 0x00003800L
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN2__SHIFT 0x0000000b
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN3_MASK 0x0001c000L
+#define PB1_TX_GLB_CTRL_REG0__TX_CFG_RPTR_RST_VAL_GEN3__SHIFT 0x0000000e
+#define PB1_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_DIR_VER_MASK 0x00400000L
+#define PB1_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_DIR_VER__SHIFT 0x00000016
+#define PB1_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_EN_MASK 0x00200000L
+#define PB1_TX_GLB_CTRL_REG0__TX_COEFF_ROUND_EN__SHIFT 0x00000015
+#define PB1_TX_GLB_CTRL_REG0__TX_DATA_CLK_GATING_MASK 0x00080000L
+#define PB1_TX_GLB_CTRL_REG0__TX_DATA_CLK_GATING__SHIFT 0x00000013
+#define PB1_TX_GLB_CTRL_REG0__TX_DCLK_EN_LSX_ALWAYS_ON_MASK 0x00800000L
+#define PB1_TX_GLB_CTRL_REG0__TX_DCLK_EN_LSX_ALWAYS_ON__SHIFT 0x00000017
+#define PB1_TX_GLB_CTRL_REG0__TX_DRV_DATA_ASRT_DLY_VAL_MASK 0x00000007L
+#define PB1_TX_GLB_CTRL_REG0__TX_DRV_DATA_ASRT_DLY_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_CTRL_REG0__TX_DRV_DATA_DSRT_DLY_VAL_MASK 0x00000038L
+#define PB1_TX_GLB_CTRL_REG0__TX_DRV_DATA_DSRT_DLY_VAL__SHIFT 0x00000003
+#define PB1_TX_GLB_CTRL_REG0__TX_FRONTEND_PWRON_IN_OFF_MASK 0x01000000L
+#define PB1_TX_GLB_CTRL_REG0__TX_FRONTEND_PWRON_IN_OFF__SHIFT 0x00000018
+#define PB1_TX_GLB_CTRL_REG0__TX_PRESET_TABLE_BYPASS_MASK 0x00100000L
+#define PB1_TX_GLB_CTRL_REG0__TX_PRESET_TABLE_BYPASS__SHIFT 0x00000014
+#define PB1_TX_GLB_CTRL_REG0__TX_STAGGER_CTRL_MASK 0x00060000L
+#define PB1_TX_GLB_CTRL_REG0__TX_STAGGER_CTRL__SHIFT 0x00000011
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX16_EN_L0T15_MASK 0x40000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX16_EN_L0T15__SHIFT 0x0000001e
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_0_MASK 0x00000001L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_0__SHIFT 0x00000000
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_10_MASK 0x00000400L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_10__SHIFT 0x0000000a
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_11_MASK 0x00000800L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_11__SHIFT 0x0000000b
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_12_MASK 0x00001000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_12__SHIFT 0x0000000c
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_13_MASK 0x00002000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_13__SHIFT 0x0000000d
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_14_MASK 0x00004000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_14__SHIFT 0x0000000e
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_15_MASK 0x00008000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_15__SHIFT 0x0000000f
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_1_MASK 0x00000002L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_1__SHIFT 0x00000001
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_2_MASK 0x00000004L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_2__SHIFT 0x00000002
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_3_MASK 0x00000008L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_3__SHIFT 0x00000003
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_4_MASK 0x00000010L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_4__SHIFT 0x00000004
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_5_MASK 0x00000020L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_5__SHIFT 0x00000005
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_6_MASK 0x00000040L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_6__SHIFT 0x00000006
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_7_MASK 0x00000080L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_7__SHIFT 0x00000007
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_8_MASK 0x00000100L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_8__SHIFT 0x00000008
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_9_MASK 0x00000200L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX1_EN_9__SHIFT 0x00000009
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L0T1_MASK 0x00010000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L0T1__SHIFT 0x00000010
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L10T11_MASK 0x00200000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L10T11__SHIFT 0x00000015
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L12T13_MASK 0x00400000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L12T13__SHIFT 0x00000016
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L14T15_MASK 0x00800000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L14T15__SHIFT 0x00000017
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L2T3_MASK 0x00020000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L2T3__SHIFT 0x00000011
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L4T5_MASK 0x00040000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L4T5__SHIFT 0x00000012
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L6T7_MASK 0x00080000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L6T7__SHIFT 0x00000013
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L8T9_MASK 0x00100000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX2_EN_L8T9__SHIFT 0x00000014
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L0T3_MASK 0x01000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L0T3__SHIFT 0x00000018
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L12T15_MASK 0x08000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L12T15__SHIFT 0x0000001b
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L4T7_MASK 0x02000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L4T7__SHIFT 0x00000019
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L8T11_MASK 0x04000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX4_EN_L8T11__SHIFT 0x0000001a
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L0T7_MASK 0x10000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L0T7__SHIFT 0x0000001c
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L8T15_MASK 0x20000000L
+#define PB1_TX_GLB_LANE_SKEW_CTRL__TX_CFG_GROUPX8_EN_L8T15__SHIFT 0x0000001d
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_EN_MASK 0x00000008L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_EN__SHIFT 0x00000003
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_VAL_MASK 0x00000007L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DCLK_DIV_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_GEN1_OVRD_VAL_MASK 0x000000f0L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_GEN1_OVRD_VAL__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_OVRD_EN_MASK 0x00000100L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_EN_OVRD_EN__SHIFT 0x00000008
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_GEN1_OVRD_VAL_MASK 0x00001e00L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000009
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_OVRD_EN_MASK 0x00002000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV0_TAP_SEL_OVRD_EN__SHIFT 0x0000000d
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_GEN1_OVRD_VAL_MASK 0x0007c000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_GEN1_OVRD_VAL__SHIFT 0x0000000e
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_OVRD_EN_MASK 0x00080000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_EN_OVRD_EN__SHIFT 0x00000013
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_GEN1_OVRD_VAL_MASK 0x01f00000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000014
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_OVRD_EN_MASK 0x02000000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV1_TAP_SEL_OVRD_EN__SHIFT 0x00000019
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_GEN1_OVRD_VAL_MASK 0x3c000000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_GEN1_OVRD_VAL__SHIFT 0x0000001a
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_OVRD_EN_MASK 0x40000000L
+#define PB1_TX_GLB_OVRD_REG0__TX_CFG_DRV2_EN_OVRD_EN__SHIFT 0x0000001e
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_GEN1_OVRD_VAL_MASK 0x0000000fL
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_OVRD_EN_MASK 0x00000010L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRV2_TAP_SEL_OVRD_EN__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_GEN1_OVRD_VAL_MASK 0x00000020L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_GEN1_OVRD_VAL__SHIFT 0x00000005
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_OVRD_EN_MASK 0x00000040L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_EN_OVRD_EN__SHIFT 0x00000006
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_GEN1_OVRD_VAL_MASK 0x00000080L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_GEN1_OVRD_VAL__SHIFT 0x00000007
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_OVRD_EN_MASK 0x00000100L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_DRVX_TAP_SEL_OVRD_EN__SHIFT 0x00000008
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_EN_MASK 0x00000400L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_EN__SHIFT 0x0000000a
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_VAL_MASK 0x00000200L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_PLLCLK_SEL_OVRD_VAL__SHIFT 0x00000009
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_EN_MASK 0x00001000L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_EN__SHIFT 0x0000000c
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_VAL_MASK 0x00000800L
+#define PB1_TX_GLB_OVRD_REG1__TX_CFG_TCLK_DIV_OVRD_VAL__SHIFT 0x0000000b
+#define PB1_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_EN_MASK 0x00004000L
+#define PB1_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_EN__SHIFT 0x0000000e
+#define PB1_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_VAL_MASK 0x00002000L
+#define PB1_TX_GLB_OVRD_REG1__TX_CMDET_EN_OVRD_VAL__SHIFT 0x0000000d
+#define PB1_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_EN_MASK 0x02000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_EN__SHIFT 0x00000019
+#define PB1_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_VAL_MASK 0x01ff8000L
+#define PB1_TX_GLB_OVRD_REG1__TX_DATA_IN_OVRD_VAL__SHIFT 0x0000000f
+#define PB1_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_EN_MASK 0x08000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_EN__SHIFT 0x0000001b
+#define PB1_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_VAL_MASK 0x04000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_RPTR_RSTN_OVRD_VAL__SHIFT 0x0000001a
+#define PB1_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_EN_MASK 0x20000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_EN__SHIFT 0x0000001d
+#define PB1_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_VAL_MASK 0x10000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_RXDET_EN_OVRD_VAL__SHIFT 0x0000001c
+#define PB1_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_EN_MASK 0x80000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_EN__SHIFT 0x0000001f
+#define PB1_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_VAL_MASK 0x40000000L
+#define PB1_TX_GLB_OVRD_REG1__TX_WPTR_RSTN_OVRD_VAL__SHIFT 0x0000001e
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV0_EN_GEN2_OVRD_VAL_MASK 0x0000f000L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV0_EN_GEN2_OVRD_VAL__SHIFT 0x0000000c
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV0_TAP_SEL_GEN2_OVRD_VAL_MASK 0x000f0000L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV0_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000010
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV1_EN_GEN2_OVRD_VAL_MASK 0x01f00000L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV1_EN_GEN2_OVRD_VAL__SHIFT 0x00000014
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV1_TAP_SEL_GEN2_OVRD_VAL_MASK 0x3e000000L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_DRV1_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000019
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_EN_MASK 0x00000800L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_EN__SHIFT 0x0000000b
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_VAL_MASK 0x00000400L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX16_EN_OVRD_VAL__SHIFT 0x0000000a
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_EN_MASK 0x00000008L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_EN__SHIFT 0x00000003
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_VAL_MASK 0x00000004L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX1_EN_OVRD_VAL__SHIFT 0x00000002
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_EN_MASK 0x00000020L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_EN__SHIFT 0x00000005
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_VAL_MASK 0x00000010L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX2_EN_OVRD_VAL__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_EN_MASK 0x00000080L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_EN__SHIFT 0x00000007
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_VAL_MASK 0x00000040L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX4_EN_OVRD_VAL__SHIFT 0x00000006
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_EN_MASK 0x00000200L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_EN__SHIFT 0x00000009
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_VAL_MASK 0x00000100L
+#define PB1_TX_GLB_OVRD_REG2__TX_CFG_GROUPX8_EN_OVRD_VAL__SHIFT 0x00000008
+#define PB1_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_EN_MASK 0x00000002L
+#define PB1_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_EN__SHIFT 0x00000001
+#define PB1_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_VAL_MASK 0x00000001L
+#define PB1_TX_GLB_OVRD_REG2__TX_WRITE_EN_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV0_EN_GEN3_OVRD_VAL_MASK 0x00003c00L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV0_EN_GEN3_OVRD_VAL__SHIFT 0x0000000a
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV0_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0003c000L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV0_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x0000000e
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV1_EN_GEN3_OVRD_VAL_MASK 0x007c0000L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV1_EN_GEN3_OVRD_VAL__SHIFT 0x00000012
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV1_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0f800000L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV1_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000017
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN2_OVRD_VAL_MASK 0x0000000fL
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN2_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN3_OVRD_VAL_MASK 0xf0000000L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_EN_GEN3_OVRD_VAL__SHIFT 0x0000001c
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_TAP_SEL_GEN2_OVRD_VAL_MASK 0x000000f0L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRV2_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRVX_EN_GEN2_OVRD_VAL_MASK 0x00000100L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRVX_EN_GEN2_OVRD_VAL__SHIFT 0x00000008
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRVX_TAP_SEL_GEN2_OVRD_VAL_MASK 0x00000200L
+#define PB1_TX_GLB_OVRD_REG3__TX_CFG_DRVX_TAP_SEL_GEN2_OVRD_VAL__SHIFT 0x00000009
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRV2_TAP_SEL_GEN3_OVRD_VAL_MASK 0x0000000fL
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRV2_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000000
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRVX_EN_GEN3_OVRD_VAL_MASK 0x00000010L
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRVX_EN_GEN3_OVRD_VAL__SHIFT 0x00000004
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRVX_TAP_SEL_GEN3_OVRD_VAL_MASK 0x00000020L
+#define PB1_TX_GLB_OVRD_REG4__TX_CFG_DRVX_TAP_SEL_GEN3_OVRD_VAL__SHIFT 0x00000005
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L0T3_MASK 0x00000100L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L0T3__SHIFT 0x00000008
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L12T15_MASK 0x00000800L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L12T15__SHIFT 0x0000000b
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L4T7_MASK 0x00000200L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L4T7__SHIFT 0x00000009
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L8T11_MASK 0x00000400L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENTID_SCI_UPDT_L8T11__SHIFT 0x0000000a
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L0T3_MASK 0x00001000L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L0T3__SHIFT 0x0000000c
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L12T15_MASK 0x00008000L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L12T15__SHIFT 0x0000000f
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L4T7_MASK 0x00002000L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L4T7__SHIFT 0x0000000d
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L8T11_MASK 0x00004000L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_COEFFICIENT_SCI_UPDT_L8T11__SHIFT 0x0000000e
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L0T3_MASK 0x00000010L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L0T3__SHIFT 0x00000004
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L12T15_MASK 0x00000080L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L12T15__SHIFT 0x00000007
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L4T7_MASK 0x00000020L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L4T7__SHIFT 0x00000005
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L8T11_MASK 0x00000040L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_INCOHERENTCK_SCI_UPDT_L8T11__SHIFT 0x00000006
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L0T3_MASK 0x00000001L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L0T3__SHIFT 0x00000000
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L12T15_MASK 0x00000008L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L12T15__SHIFT 0x00000003
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L4T7_MASK 0x00000002L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L4T7__SHIFT 0x00000001
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L8T11_MASK 0x00000004L
+#define PB1_TX_GLB_SCI_STAT_OVRD_REG0__IGNR_TXPWR_SCI_UPDT_L8T11__SHIFT 0x00000002
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_DISPCLK_MODE_0_MASK 0x00000001L
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_DISPCLK_MODE_0__SHIFT 0x00000000
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_INV_DATA_0_MASK 0x00000002L
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_INV_DATA_0__SHIFT 0x00000001
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_SWING_BOOST_EN_0_MASK 0x00000004L
+#define PB1_TX_LANE0_CTRL_REG0__TX_CFG_SWING_BOOST_EN_0__SHIFT 0x00000002
+#define PB1_TX_LANE0_CTRL_REG0__TX_DBG_PRBS_EN_0_MASK 0x00000008L
+#define PB1_TX_LANE0_CTRL_REG0__TX_DBG_PRBS_EN_0__SHIFT 0x00000003
+#define PB1_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_EN_0_MASK 0x00000002L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_EN_0__SHIFT 0x00000001
+#define PB1_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_0_MASK 0x00000001L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_0__SHIFT 0x00000000
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_0_MASK 0x00000008L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_0__SHIFT 0x00000003
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_0_MASK 0x00000004L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_0__SHIFT 0x00000002
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_0_MASK 0x00000020L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_0__SHIFT 0x00000005
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_0_MASK 0x00000010L
+#define PB1_TX_LANE0_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_0__SHIFT 0x00000004
+#define PB1_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_0_MASK 0x00000080L
+#define PB1_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_0__SHIFT 0x00000007
+#define PB1_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_0_MASK 0x00000040L
+#define PB1_TX_LANE0_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_0__SHIFT 0x00000006
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENT_0_MASK 0x0000fc00L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENT_0__SHIFT 0x0000000a
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENTID_0_MASK 0x00000300L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__COEFFICIENTID_0__SHIFT 0x00000008
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__DEEMPH_0_MASK 0x00000080L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__DEEMPH_0__SHIFT 0x00000007
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__INCOHERENTCK_0_MASK 0x00000008L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__INCOHERENTCK_0__SHIFT 0x00000003
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__TXMARG_0_MASK 0x00000070L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__TXMARG_0__SHIFT 0x00000004
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__TXPWR_0_MASK 0x00000007L
+#define PB1_TX_LANE0_SCI_STAT_OVRD_REG0__TXPWR_0__SHIFT 0x00000000
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_DISPCLK_MODE_10_MASK 0x00000001L
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_DISPCLK_MODE_10__SHIFT 0x00000000
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_INV_DATA_10_MASK 0x00000002L
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_INV_DATA_10__SHIFT 0x00000001
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_SWING_BOOST_EN_10_MASK 0x00000004L
+#define PB1_TX_LANE10_CTRL_REG0__TX_CFG_SWING_BOOST_EN_10__SHIFT 0x00000002
+#define PB1_TX_LANE10_CTRL_REG0__TX_DBG_PRBS_EN_10_MASK 0x00000008L
+#define PB1_TX_LANE10_CTRL_REG0__TX_DBG_PRBS_EN_10__SHIFT 0x00000003
+#define PB1_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_EN_10_MASK 0x00000002L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_EN_10__SHIFT 0x00000001
+#define PB1_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_10_MASK 0x00000001L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_10__SHIFT 0x00000000
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_10_MASK 0x00000008L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_10__SHIFT 0x00000003
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_10_MASK 0x00000004L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_10__SHIFT 0x00000002
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_10_MASK 0x00000020L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_10__SHIFT 0x00000005
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_10_MASK 0x00000010L
+#define PB1_TX_LANE10_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_10__SHIFT 0x00000004
+#define PB1_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_10_MASK 0x00000080L
+#define PB1_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_10__SHIFT 0x00000007
+#define PB1_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_10_MASK 0x00000040L
+#define PB1_TX_LANE10_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_10__SHIFT 0x00000006
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENT_10_MASK 0x0000fc00L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENT_10__SHIFT 0x0000000a
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENTID_10_MASK 0x00000300L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__COEFFICIENTID_10__SHIFT 0x00000008
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__DEEMPH_10_MASK 0x00000080L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__DEEMPH_10__SHIFT 0x00000007
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__INCOHERENTCK_10_MASK 0x00000008L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__INCOHERENTCK_10__SHIFT 0x00000003
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__TXMARG_10_MASK 0x00000070L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__TXMARG_10__SHIFT 0x00000004
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__TXPWR_10_MASK 0x00000007L
+#define PB1_TX_LANE10_SCI_STAT_OVRD_REG0__TXPWR_10__SHIFT 0x00000000
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_DISPCLK_MODE_11_MASK 0x00000001L
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_DISPCLK_MODE_11__SHIFT 0x00000000
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_INV_DATA_11_MASK 0x00000002L
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_INV_DATA_11__SHIFT 0x00000001
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_SWING_BOOST_EN_11_MASK 0x00000004L
+#define PB1_TX_LANE11_CTRL_REG0__TX_CFG_SWING_BOOST_EN_11__SHIFT 0x00000002
+#define PB1_TX_LANE11_CTRL_REG0__TX_DBG_PRBS_EN_11_MASK 0x00000008L
+#define PB1_TX_LANE11_CTRL_REG0__TX_DBG_PRBS_EN_11__SHIFT 0x00000003
+#define PB1_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_EN_11_MASK 0x00000002L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_EN_11__SHIFT 0x00000001
+#define PB1_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_11_MASK 0x00000001L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_11__SHIFT 0x00000000
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_11_MASK 0x00000008L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_11__SHIFT 0x00000003
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_11_MASK 0x00000004L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_11__SHIFT 0x00000002
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_11_MASK 0x00000020L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_11__SHIFT 0x00000005
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_11_MASK 0x00000010L
+#define PB1_TX_LANE11_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_11__SHIFT 0x00000004
+#define PB1_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_11_MASK 0x00000080L
+#define PB1_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_11__SHIFT 0x00000007
+#define PB1_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_11_MASK 0x00000040L
+#define PB1_TX_LANE11_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_11__SHIFT 0x00000006
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENT_11_MASK 0x0000fc00L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENT_11__SHIFT 0x0000000a
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENTID_11_MASK 0x00000300L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__COEFFICIENTID_11__SHIFT 0x00000008
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__DEEMPH_11_MASK 0x00000080L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__DEEMPH_11__SHIFT 0x00000007
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__INCOHERENTCK_11_MASK 0x00000008L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__INCOHERENTCK_11__SHIFT 0x00000003
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__TXMARG_11_MASK 0x00000070L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__TXMARG_11__SHIFT 0x00000004
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__TXPWR_11_MASK 0x00000007L
+#define PB1_TX_LANE11_SCI_STAT_OVRD_REG0__TXPWR_11__SHIFT 0x00000000
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_DISPCLK_MODE_12_MASK 0x00000001L
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_DISPCLK_MODE_12__SHIFT 0x00000000
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_INV_DATA_12_MASK 0x00000002L
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_INV_DATA_12__SHIFT 0x00000001
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_SWING_BOOST_EN_12_MASK 0x00000004L
+#define PB1_TX_LANE12_CTRL_REG0__TX_CFG_SWING_BOOST_EN_12__SHIFT 0x00000002
+#define PB1_TX_LANE12_CTRL_REG0__TX_DBG_PRBS_EN_12_MASK 0x00000008L
+#define PB1_TX_LANE12_CTRL_REG0__TX_DBG_PRBS_EN_12__SHIFT 0x00000003
+#define PB1_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_EN_12_MASK 0x00000002L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_EN_12__SHIFT 0x00000001
+#define PB1_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_12_MASK 0x00000001L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_12__SHIFT 0x00000000
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_12_MASK 0x00000008L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_12__SHIFT 0x00000003
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_12_MASK 0x00000004L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_12__SHIFT 0x00000002
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_12_MASK 0x00000020L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_12__SHIFT 0x00000005
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_12_MASK 0x00000010L
+#define PB1_TX_LANE12_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_12__SHIFT 0x00000004
+#define PB1_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_12_MASK 0x00000080L
+#define PB1_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_12__SHIFT 0x00000007
+#define PB1_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_12_MASK 0x00000040L
+#define PB1_TX_LANE12_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_12__SHIFT 0x00000006
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENT_12_MASK 0x0000fc00L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENT_12__SHIFT 0x0000000a
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENTID_12_MASK 0x00000300L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__COEFFICIENTID_12__SHIFT 0x00000008
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__DEEMPH_12_MASK 0x00000080L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__DEEMPH_12__SHIFT 0x00000007
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__INCOHERENTCK_12_MASK 0x00000008L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__INCOHERENTCK_12__SHIFT 0x00000003
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__TXMARG_12_MASK 0x00000070L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__TXMARG_12__SHIFT 0x00000004
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__TXPWR_12_MASK 0x00000007L
+#define PB1_TX_LANE12_SCI_STAT_OVRD_REG0__TXPWR_12__SHIFT 0x00000000
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_DISPCLK_MODE_13_MASK 0x00000001L
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_DISPCLK_MODE_13__SHIFT 0x00000000
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_INV_DATA_13_MASK 0x00000002L
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_INV_DATA_13__SHIFT 0x00000001
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_SWING_BOOST_EN_13_MASK 0x00000004L
+#define PB1_TX_LANE13_CTRL_REG0__TX_CFG_SWING_BOOST_EN_13__SHIFT 0x00000002
+#define PB1_TX_LANE13_CTRL_REG0__TX_DBG_PRBS_EN_13_MASK 0x00000008L
+#define PB1_TX_LANE13_CTRL_REG0__TX_DBG_PRBS_EN_13__SHIFT 0x00000003
+#define PB1_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_EN_13_MASK 0x00000002L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_EN_13__SHIFT 0x00000001
+#define PB1_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_13_MASK 0x00000001L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_13__SHIFT 0x00000000
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_13_MASK 0x00000008L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_13__SHIFT 0x00000003
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_13_MASK 0x00000004L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_13__SHIFT 0x00000002
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_13_MASK 0x00000020L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_13__SHIFT 0x00000005
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_13_MASK 0x00000010L
+#define PB1_TX_LANE13_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_13__SHIFT 0x00000004
+#define PB1_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_13_MASK 0x00000080L
+#define PB1_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_13__SHIFT 0x00000007
+#define PB1_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_13_MASK 0x00000040L
+#define PB1_TX_LANE13_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_13__SHIFT 0x00000006
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENT_13_MASK 0x0000fc00L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENT_13__SHIFT 0x0000000a
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENTID_13_MASK 0x00000300L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__COEFFICIENTID_13__SHIFT 0x00000008
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__DEEMPH_13_MASK 0x00000080L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__DEEMPH_13__SHIFT 0x00000007
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__INCOHERENTCK_13_MASK 0x00000008L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__INCOHERENTCK_13__SHIFT 0x00000003
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__TXMARG_13_MASK 0x00000070L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__TXMARG_13__SHIFT 0x00000004
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__TXPWR_13_MASK 0x00000007L
+#define PB1_TX_LANE13_SCI_STAT_OVRD_REG0__TXPWR_13__SHIFT 0x00000000
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_DISPCLK_MODE_14_MASK 0x00000001L
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_DISPCLK_MODE_14__SHIFT 0x00000000
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_INV_DATA_14_MASK 0x00000002L
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_INV_DATA_14__SHIFT 0x00000001
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_SWING_BOOST_EN_14_MASK 0x00000004L
+#define PB1_TX_LANE14_CTRL_REG0__TX_CFG_SWING_BOOST_EN_14__SHIFT 0x00000002
+#define PB1_TX_LANE14_CTRL_REG0__TX_DBG_PRBS_EN_14_MASK 0x00000008L
+#define PB1_TX_LANE14_CTRL_REG0__TX_DBG_PRBS_EN_14__SHIFT 0x00000003
+#define PB1_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_EN_14_MASK 0x00000002L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_EN_14__SHIFT 0x00000001
+#define PB1_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_14_MASK 0x00000001L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_14__SHIFT 0x00000000
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_14_MASK 0x00000008L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_14__SHIFT 0x00000003
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_14_MASK 0x00000004L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_14__SHIFT 0x00000002
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_14_MASK 0x00000020L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_14__SHIFT 0x00000005
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_14_MASK 0x00000010L
+#define PB1_TX_LANE14_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_14__SHIFT 0x00000004
+#define PB1_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_14_MASK 0x00000080L
+#define PB1_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_14__SHIFT 0x00000007
+#define PB1_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_14_MASK 0x00000040L
+#define PB1_TX_LANE14_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_14__SHIFT 0x00000006
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENT_14_MASK 0x0000fc00L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENT_14__SHIFT 0x0000000a
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENTID_14_MASK 0x00000300L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__COEFFICIENTID_14__SHIFT 0x00000008
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__DEEMPH_14_MASK 0x00000080L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__DEEMPH_14__SHIFT 0x00000007
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__INCOHERENTCK_14_MASK 0x00000008L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__INCOHERENTCK_14__SHIFT 0x00000003
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__TXMARG_14_MASK 0x00000070L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__TXMARG_14__SHIFT 0x00000004
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__TXPWR_14_MASK 0x00000007L
+#define PB1_TX_LANE14_SCI_STAT_OVRD_REG0__TXPWR_14__SHIFT 0x00000000
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_DISPCLK_MODE_15_MASK 0x00000001L
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_DISPCLK_MODE_15__SHIFT 0x00000000
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_INV_DATA_15_MASK 0x00000002L
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_INV_DATA_15__SHIFT 0x00000001
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_SWING_BOOST_EN_15_MASK 0x00000004L
+#define PB1_TX_LANE15_CTRL_REG0__TX_CFG_SWING_BOOST_EN_15__SHIFT 0x00000002
+#define PB1_TX_LANE15_CTRL_REG0__TX_DBG_PRBS_EN_15_MASK 0x00000008L
+#define PB1_TX_LANE15_CTRL_REG0__TX_DBG_PRBS_EN_15__SHIFT 0x00000003
+#define PB1_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_EN_15_MASK 0x00000002L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_EN_15__SHIFT 0x00000001
+#define PB1_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_15_MASK 0x00000001L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_15__SHIFT 0x00000000
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_15_MASK 0x00000008L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_15__SHIFT 0x00000003
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_15_MASK 0x00000004L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_15__SHIFT 0x00000002
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_15_MASK 0x00000020L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_15__SHIFT 0x00000005
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_15_MASK 0x00000010L
+#define PB1_TX_LANE15_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_15__SHIFT 0x00000004
+#define PB1_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_15_MASK 0x00000080L
+#define PB1_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_15__SHIFT 0x00000007
+#define PB1_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_15_MASK 0x00000040L
+#define PB1_TX_LANE15_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_15__SHIFT 0x00000006
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENT_15_MASK 0x0000fc00L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENT_15__SHIFT 0x0000000a
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENTID_15_MASK 0x00000300L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__COEFFICIENTID_15__SHIFT 0x00000008
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__DEEMPH_15_MASK 0x00000080L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__DEEMPH_15__SHIFT 0x00000007
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__INCOHERENTCK_15_MASK 0x00000008L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__INCOHERENTCK_15__SHIFT 0x00000003
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__TXMARG_15_MASK 0x00000070L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__TXMARG_15__SHIFT 0x00000004
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__TXPWR_15_MASK 0x00000007L
+#define PB1_TX_LANE15_SCI_STAT_OVRD_REG0__TXPWR_15__SHIFT 0x00000000
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_DISPCLK_MODE_1_MASK 0x00000001L
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_DISPCLK_MODE_1__SHIFT 0x00000000
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_INV_DATA_1_MASK 0x00000002L
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_INV_DATA_1__SHIFT 0x00000001
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_SWING_BOOST_EN_1_MASK 0x00000004L
+#define PB1_TX_LANE1_CTRL_REG0__TX_CFG_SWING_BOOST_EN_1__SHIFT 0x00000002
+#define PB1_TX_LANE1_CTRL_REG0__TX_DBG_PRBS_EN_1_MASK 0x00000008L
+#define PB1_TX_LANE1_CTRL_REG0__TX_DBG_PRBS_EN_1__SHIFT 0x00000003
+#define PB1_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_EN_1_MASK 0x00000002L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_EN_1__SHIFT 0x00000001
+#define PB1_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_1_MASK 0x00000001L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_1__SHIFT 0x00000000
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_1_MASK 0x00000008L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_1__SHIFT 0x00000003
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_1_MASK 0x00000004L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_1__SHIFT 0x00000002
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_1_MASK 0x00000020L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_1__SHIFT 0x00000005
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_1_MASK 0x00000010L
+#define PB1_TX_LANE1_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_1__SHIFT 0x00000004
+#define PB1_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_1_MASK 0x00000080L
+#define PB1_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_1__SHIFT 0x00000007
+#define PB1_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_1_MASK 0x00000040L
+#define PB1_TX_LANE1_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_1__SHIFT 0x00000006
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENT_1_MASK 0x0000fc00L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENT_1__SHIFT 0x0000000a
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENTID_1_MASK 0x00000300L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__COEFFICIENTID_1__SHIFT 0x00000008
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__DEEMPH_1_MASK 0x00000080L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__DEEMPH_1__SHIFT 0x00000007
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__INCOHERENTCK_1_MASK 0x00000008L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__INCOHERENTCK_1__SHIFT 0x00000003
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__TXMARG_1_MASK 0x00000070L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__TXMARG_1__SHIFT 0x00000004
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__TXPWR_1_MASK 0x00000007L
+#define PB1_TX_LANE1_SCI_STAT_OVRD_REG0__TXPWR_1__SHIFT 0x00000000
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_DISPCLK_MODE_2_MASK 0x00000001L
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_DISPCLK_MODE_2__SHIFT 0x00000000
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_INV_DATA_2_MASK 0x00000002L
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_INV_DATA_2__SHIFT 0x00000001
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_SWING_BOOST_EN_2_MASK 0x00000004L
+#define PB1_TX_LANE2_CTRL_REG0__TX_CFG_SWING_BOOST_EN_2__SHIFT 0x00000002
+#define PB1_TX_LANE2_CTRL_REG0__TX_DBG_PRBS_EN_2_MASK 0x00000008L
+#define PB1_TX_LANE2_CTRL_REG0__TX_DBG_PRBS_EN_2__SHIFT 0x00000003
+#define PB1_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_EN_2_MASK 0x00000002L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_EN_2__SHIFT 0x00000001
+#define PB1_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_2_MASK 0x00000001L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_2__SHIFT 0x00000000
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_2_MASK 0x00000008L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_2__SHIFT 0x00000003
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_2_MASK 0x00000004L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_2__SHIFT 0x00000002
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_2_MASK 0x00000020L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_2__SHIFT 0x00000005
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_2_MASK 0x00000010L
+#define PB1_TX_LANE2_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_2__SHIFT 0x00000004
+#define PB1_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_2_MASK 0x00000080L
+#define PB1_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_2__SHIFT 0x00000007
+#define PB1_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_2_MASK 0x00000040L
+#define PB1_TX_LANE2_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_2__SHIFT 0x00000006
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENT_2_MASK 0x0000fc00L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENT_2__SHIFT 0x0000000a
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENTID_2_MASK 0x00000300L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__COEFFICIENTID_2__SHIFT 0x00000008
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__DEEMPH_2_MASK 0x00000080L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__DEEMPH_2__SHIFT 0x00000007
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__INCOHERENTCK_2_MASK 0x00000008L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__INCOHERENTCK_2__SHIFT 0x00000003
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__TXMARG_2_MASK 0x00000070L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__TXMARG_2__SHIFT 0x00000004
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__TXPWR_2_MASK 0x00000007L
+#define PB1_TX_LANE2_SCI_STAT_OVRD_REG0__TXPWR_2__SHIFT 0x00000000
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_DISPCLK_MODE_3_MASK 0x00000001L
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_DISPCLK_MODE_3__SHIFT 0x00000000
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_INV_DATA_3_MASK 0x00000002L
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_INV_DATA_3__SHIFT 0x00000001
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_SWING_BOOST_EN_3_MASK 0x00000004L
+#define PB1_TX_LANE3_CTRL_REG0__TX_CFG_SWING_BOOST_EN_3__SHIFT 0x00000002
+#define PB1_TX_LANE3_CTRL_REG0__TX_DBG_PRBS_EN_3_MASK 0x00000008L
+#define PB1_TX_LANE3_CTRL_REG0__TX_DBG_PRBS_EN_3__SHIFT 0x00000003
+#define PB1_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_EN_3_MASK 0x00000002L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_EN_3__SHIFT 0x00000001
+#define PB1_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_3_MASK 0x00000001L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_3__SHIFT 0x00000000
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_3_MASK 0x00000008L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_3__SHIFT 0x00000003
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_3_MASK 0x00000004L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_3__SHIFT 0x00000002
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_3_MASK 0x00000020L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_3__SHIFT 0x00000005
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_3_MASK 0x00000010L
+#define PB1_TX_LANE3_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_3__SHIFT 0x00000004
+#define PB1_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_3_MASK 0x00000080L
+#define PB1_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_3__SHIFT 0x00000007
+#define PB1_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_3_MASK 0x00000040L
+#define PB1_TX_LANE3_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_3__SHIFT 0x00000006
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENT_3_MASK 0x0000fc00L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENT_3__SHIFT 0x0000000a
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENTID_3_MASK 0x00000300L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__COEFFICIENTID_3__SHIFT 0x00000008
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__DEEMPH_3_MASK 0x00000080L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__DEEMPH_3__SHIFT 0x00000007
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__INCOHERENTCK_3_MASK 0x00000008L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__INCOHERENTCK_3__SHIFT 0x00000003
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__TXMARG_3_MASK 0x00000070L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__TXMARG_3__SHIFT 0x00000004
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__TXPWR_3_MASK 0x00000007L
+#define PB1_TX_LANE3_SCI_STAT_OVRD_REG0__TXPWR_3__SHIFT 0x00000000
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_DISPCLK_MODE_4_MASK 0x00000001L
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_DISPCLK_MODE_4__SHIFT 0x00000000
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_INV_DATA_4_MASK 0x00000002L
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_INV_DATA_4__SHIFT 0x00000001
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_SWING_BOOST_EN_4_MASK 0x00000004L
+#define PB1_TX_LANE4_CTRL_REG0__TX_CFG_SWING_BOOST_EN_4__SHIFT 0x00000002
+#define PB1_TX_LANE4_CTRL_REG0__TX_DBG_PRBS_EN_4_MASK 0x00000008L
+#define PB1_TX_LANE4_CTRL_REG0__TX_DBG_PRBS_EN_4__SHIFT 0x00000003
+#define PB1_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_EN_4_MASK 0x00000002L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_EN_4__SHIFT 0x00000001
+#define PB1_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_4_MASK 0x00000001L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_4__SHIFT 0x00000000
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_4_MASK 0x00000008L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_4__SHIFT 0x00000003
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_4_MASK 0x00000004L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_4__SHIFT 0x00000002
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_4_MASK 0x00000020L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_4__SHIFT 0x00000005
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_4_MASK 0x00000010L
+#define PB1_TX_LANE4_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_4__SHIFT 0x00000004
+#define PB1_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_4_MASK 0x00000080L
+#define PB1_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_4__SHIFT 0x00000007
+#define PB1_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_4_MASK 0x00000040L
+#define PB1_TX_LANE4_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_4__SHIFT 0x00000006
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENT_4_MASK 0x0000fc00L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENT_4__SHIFT 0x0000000a
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENTID_4_MASK 0x00000300L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__COEFFICIENTID_4__SHIFT 0x00000008
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__DEEMPH_4_MASK 0x00000080L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__DEEMPH_4__SHIFT 0x00000007
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__INCOHERENTCK_4_MASK 0x00000008L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__INCOHERENTCK_4__SHIFT 0x00000003
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__TXMARG_4_MASK 0x00000070L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__TXMARG_4__SHIFT 0x00000004
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__TXPWR_4_MASK 0x00000007L
+#define PB1_TX_LANE4_SCI_STAT_OVRD_REG0__TXPWR_4__SHIFT 0x00000000
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_DISPCLK_MODE_5_MASK 0x00000001L
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_DISPCLK_MODE_5__SHIFT 0x00000000
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_INV_DATA_5_MASK 0x00000002L
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_INV_DATA_5__SHIFT 0x00000001
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_SWING_BOOST_EN_5_MASK 0x00000004L
+#define PB1_TX_LANE5_CTRL_REG0__TX_CFG_SWING_BOOST_EN_5__SHIFT 0x00000002
+#define PB1_TX_LANE5_CTRL_REG0__TX_DBG_PRBS_EN_5_MASK 0x00000008L
+#define PB1_TX_LANE5_CTRL_REG0__TX_DBG_PRBS_EN_5__SHIFT 0x00000003
+#define PB1_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_EN_5_MASK 0x00000002L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_EN_5__SHIFT 0x00000001
+#define PB1_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_5_MASK 0x00000001L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_5__SHIFT 0x00000000
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_5_MASK 0x00000008L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_5__SHIFT 0x00000003
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_5_MASK 0x00000004L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_5__SHIFT 0x00000002
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_5_MASK 0x00000020L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_5__SHIFT 0x00000005
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_5_MASK 0x00000010L
+#define PB1_TX_LANE5_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_5__SHIFT 0x00000004
+#define PB1_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_5_MASK 0x00000080L
+#define PB1_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_5__SHIFT 0x00000007
+#define PB1_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_5_MASK 0x00000040L
+#define PB1_TX_LANE5_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_5__SHIFT 0x00000006
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENT_5_MASK 0x0000fc00L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENT_5__SHIFT 0x0000000a
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENTID_5_MASK 0x00000300L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__COEFFICIENTID_5__SHIFT 0x00000008
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__DEEMPH_5_MASK 0x00000080L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__DEEMPH_5__SHIFT 0x00000007
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__INCOHERENTCK_5_MASK 0x00000008L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__INCOHERENTCK_5__SHIFT 0x00000003
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__TXMARG_5_MASK 0x00000070L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__TXMARG_5__SHIFT 0x00000004
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__TXPWR_5_MASK 0x00000007L
+#define PB1_TX_LANE5_SCI_STAT_OVRD_REG0__TXPWR_5__SHIFT 0x00000000
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_DISPCLK_MODE_6_MASK 0x00000001L
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_DISPCLK_MODE_6__SHIFT 0x00000000
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_INV_DATA_6_MASK 0x00000002L
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_INV_DATA_6__SHIFT 0x00000001
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_SWING_BOOST_EN_6_MASK 0x00000004L
+#define PB1_TX_LANE6_CTRL_REG0__TX_CFG_SWING_BOOST_EN_6__SHIFT 0x00000002
+#define PB1_TX_LANE6_CTRL_REG0__TX_DBG_PRBS_EN_6_MASK 0x00000008L
+#define PB1_TX_LANE6_CTRL_REG0__TX_DBG_PRBS_EN_6__SHIFT 0x00000003
+#define PB1_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_EN_6_MASK 0x00000002L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_EN_6__SHIFT 0x00000001
+#define PB1_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_6_MASK 0x00000001L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_6__SHIFT 0x00000000
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_6_MASK 0x00000008L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_6__SHIFT 0x00000003
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_6_MASK 0x00000004L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_6__SHIFT 0x00000002
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_6_MASK 0x00000020L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_6__SHIFT 0x00000005
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_6_MASK 0x00000010L
+#define PB1_TX_LANE6_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_6__SHIFT 0x00000004
+#define PB1_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_6_MASK 0x00000080L
+#define PB1_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_6__SHIFT 0x00000007
+#define PB1_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_6_MASK 0x00000040L
+#define PB1_TX_LANE6_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_6__SHIFT 0x00000006
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENT_6_MASK 0x0000fc00L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENT_6__SHIFT 0x0000000a
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENTID_6_MASK 0x00000300L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__COEFFICIENTID_6__SHIFT 0x00000008
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__DEEMPH_6_MASK 0x00000080L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__DEEMPH_6__SHIFT 0x00000007
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__INCOHERENTCK_6_MASK 0x00000008L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__INCOHERENTCK_6__SHIFT 0x00000003
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__TXMARG_6_MASK 0x00000070L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__TXMARG_6__SHIFT 0x00000004
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__TXPWR_6_MASK 0x00000007L
+#define PB1_TX_LANE6_SCI_STAT_OVRD_REG0__TXPWR_6__SHIFT 0x00000000
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_DISPCLK_MODE_7_MASK 0x00000001L
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_DISPCLK_MODE_7__SHIFT 0x00000000
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_INV_DATA_7_MASK 0x00000002L
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_INV_DATA_7__SHIFT 0x00000001
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_SWING_BOOST_EN_7_MASK 0x00000004L
+#define PB1_TX_LANE7_CTRL_REG0__TX_CFG_SWING_BOOST_EN_7__SHIFT 0x00000002
+#define PB1_TX_LANE7_CTRL_REG0__TX_DBG_PRBS_EN_7_MASK 0x00000008L
+#define PB1_TX_LANE7_CTRL_REG0__TX_DBG_PRBS_EN_7__SHIFT 0x00000003
+#define PB1_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_EN_7_MASK 0x00000002L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_EN_7__SHIFT 0x00000001
+#define PB1_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_7_MASK 0x00000001L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_7__SHIFT 0x00000000
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_7_MASK 0x00000008L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_7__SHIFT 0x00000003
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_7_MASK 0x00000004L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_7__SHIFT 0x00000002
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_7_MASK 0x00000020L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_7__SHIFT 0x00000005
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_7_MASK 0x00000010L
+#define PB1_TX_LANE7_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_7__SHIFT 0x00000004
+#define PB1_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_7_MASK 0x00000080L
+#define PB1_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_7__SHIFT 0x00000007
+#define PB1_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_7_MASK 0x00000040L
+#define PB1_TX_LANE7_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_7__SHIFT 0x00000006
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENT_7_MASK 0x0000fc00L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENT_7__SHIFT 0x0000000a
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENTID_7_MASK 0x00000300L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__COEFFICIENTID_7__SHIFT 0x00000008
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__DEEMPH_7_MASK 0x00000080L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__DEEMPH_7__SHIFT 0x00000007
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__INCOHERENTCK_7_MASK 0x00000008L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__INCOHERENTCK_7__SHIFT 0x00000003
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__TXMARG_7_MASK 0x00000070L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__TXMARG_7__SHIFT 0x00000004
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__TXPWR_7_MASK 0x00000007L
+#define PB1_TX_LANE7_SCI_STAT_OVRD_REG0__TXPWR_7__SHIFT 0x00000000
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_DISPCLK_MODE_8_MASK 0x00000001L
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_DISPCLK_MODE_8__SHIFT 0x00000000
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_INV_DATA_8_MASK 0x00000002L
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_INV_DATA_8__SHIFT 0x00000001
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_SWING_BOOST_EN_8_MASK 0x00000004L
+#define PB1_TX_LANE8_CTRL_REG0__TX_CFG_SWING_BOOST_EN_8__SHIFT 0x00000002
+#define PB1_TX_LANE8_CTRL_REG0__TX_DBG_PRBS_EN_8_MASK 0x00000008L
+#define PB1_TX_LANE8_CTRL_REG0__TX_DBG_PRBS_EN_8__SHIFT 0x00000003
+#define PB1_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_EN_8_MASK 0x00000002L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_EN_8__SHIFT 0x00000001
+#define PB1_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_8_MASK 0x00000001L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_8__SHIFT 0x00000000
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_8_MASK 0x00000008L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_8__SHIFT 0x00000003
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_8_MASK 0x00000004L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_8__SHIFT 0x00000002
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_8_MASK 0x00000020L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_8__SHIFT 0x00000005
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_8_MASK 0x00000010L
+#define PB1_TX_LANE8_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_8__SHIFT 0x00000004
+#define PB1_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_8_MASK 0x00000080L
+#define PB1_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_8__SHIFT 0x00000007
+#define PB1_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_8_MASK 0x00000040L
+#define PB1_TX_LANE8_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_8__SHIFT 0x00000006
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENT_8_MASK 0x0000fc00L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENT_8__SHIFT 0x0000000a
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENTID_8_MASK 0x00000300L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__COEFFICIENTID_8__SHIFT 0x00000008
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__DEEMPH_8_MASK 0x00000080L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__DEEMPH_8__SHIFT 0x00000007
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__INCOHERENTCK_8_MASK 0x00000008L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__INCOHERENTCK_8__SHIFT 0x00000003
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__TXMARG_8_MASK 0x00000070L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__TXMARG_8__SHIFT 0x00000004
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__TXPWR_8_MASK 0x00000007L
+#define PB1_TX_LANE8_SCI_STAT_OVRD_REG0__TXPWR_8__SHIFT 0x00000000
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_DISPCLK_MODE_9_MASK 0x00000001L
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_DISPCLK_MODE_9__SHIFT 0x00000000
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_INV_DATA_9_MASK 0x00000002L
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_INV_DATA_9__SHIFT 0x00000001
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_SWING_BOOST_EN_9_MASK 0x00000004L
+#define PB1_TX_LANE9_CTRL_REG0__TX_CFG_SWING_BOOST_EN_9__SHIFT 0x00000002
+#define PB1_TX_LANE9_CTRL_REG0__TX_DBG_PRBS_EN_9_MASK 0x00000008L
+#define PB1_TX_LANE9_CTRL_REG0__TX_DBG_PRBS_EN_9__SHIFT 0x00000003
+#define PB1_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_EN_9_MASK 0x00000002L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_EN_9__SHIFT 0x00000001
+#define PB1_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_9_MASK 0x00000001L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DCLK_EN_OVRD_VAL_9__SHIFT 0x00000000
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_9_MASK 0x00000008L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_EN_9__SHIFT 0x00000003
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_9_MASK 0x00000004L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_DATA_EN_OVRD_VAL_9__SHIFT 0x00000002
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_9_MASK 0x00000020L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_EN_9__SHIFT 0x00000005
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_9_MASK 0x00000010L
+#define PB1_TX_LANE9_OVRD_REG0__TX_DRV_PWRON_OVRD_VAL_9__SHIFT 0x00000004
+#define PB1_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_9_MASK 0x00000080L
+#define PB1_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_EN_9__SHIFT 0x00000007
+#define PB1_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_9_MASK 0x00000040L
+#define PB1_TX_LANE9_OVRD_REG0__TX_FRONTEND_PWRON_OVRD_VAL_9__SHIFT 0x00000006
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENT_9_MASK 0x0000fc00L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENT_9__SHIFT 0x0000000a
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENTID_9_MASK 0x00000300L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__COEFFICIENTID_9__SHIFT 0x00000008
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__DEEMPH_9_MASK 0x00000080L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__DEEMPH_9__SHIFT 0x00000007
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__INCOHERENTCK_9_MASK 0x00000008L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__INCOHERENTCK_9__SHIFT 0x00000003
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__TXMARG_9_MASK 0x00000070L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__TXMARG_9__SHIFT 0x00000004
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__TXPWR_9_MASK 0x00000007L
+#define PB1_TX_LANE9_SCI_STAT_OVRD_REG0__TXPWR_9__SHIFT 0x00000000
+#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L
+#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x00000007
+#define PCIE_BUS_CNTL__PMI_INT_DIS_MASK 0x00000040L
+#define PCIE_BUS_CNTL__PMI_INT_DIS__SHIFT 0x00000006
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000001L
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x00000000
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x00000002
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000002L
+#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x00000001
+#define PCIE_CI_CNTL__CI_MST_CMPL_DUMMY_DATA_MASK 0x00000010L
+#define PCIE_CI_CNTL__CI_MST_CMPL_DUMMY_DATA__SHIFT 0x00000004
+#define PCIE_CI_CNTL__CI_MST_IGNORE_PAGE_ALIGNED_REQUEST_MASK 0x00002000L
+#define PCIE_CI_CNTL__CI_MST_IGNORE_PAGE_ALIGNED_REQUEST__SHIFT 0x0000000d
+#define PCIE_CI_CNTL__CI_RC_ORDERING_DIS_MASK 0x00000200L
+#define PCIE_CI_CNTL__CI_RC_ORDERING_DIS__SHIFT 0x00000009
+#define PCIE_CI_CNTL__CI_SLAVE_GEN_USR_DIS_MASK 0x00000008L
+#define PCIE_CI_CNTL__CI_SLAVE_GEN_USR_DIS__SHIFT 0x00000003
+#define PCIE_CI_CNTL__CI_SLAVE_SPLIT_MODE_MASK 0x00000004L
+#define PCIE_CI_CNTL__CI_SLAVE_SPLIT_MODE__SHIFT 0x00000002
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS_MASK 0x00000400L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS__SHIFT 0x0000000a
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE_MASK 0x00000800L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE__SHIFT 0x0000000b
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR_MASK 0x00001000L
+#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR__SHIFT 0x0000000c
+#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS_MASK 0x00000100L
+#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS__SHIFT 0x00000008
+#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE_MASK 0x000000c0L
+#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE__SHIFT 0x00000006
+#define PCIE_CNTL2__MST_MEM_LS_EN_MASK 0x00040000L
+#define PCIE_CNTL2__MST_MEM_LS_EN__SHIFT 0x00000012
+#define PCIE_CNTL2__MST_MEM_SD_EN_MASK 0x00400000L
+#define PCIE_CNTL2__MST_MEM_SD_EN__SHIFT 0x00000016
+#define PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK 0x00080000L
+#define PCIE_CNTL2__REPLAY_MEM_LS_EN__SHIFT 0x00000013
+#define PCIE_CNTL2__REPLAY_MEM_SD_EN_MASK 0x00800000L
+#define PCIE_CNTL2__REPLAY_MEM_SD_EN__SHIFT 0x00000017
+#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING_MASK 0x1f000000L
+#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING__SHIFT 0x00000018
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN_MASK 0x00020000L
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN__SHIFT 0x00000011
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN_MASK 0x00200000L
+#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN__SHIFT 0x00000015
+#define PCIE_CNTL2__SLV_MEM_LS_EN_MASK 0x00010000L
+#define PCIE_CNTL2__SLV_MEM_LS_EN__SHIFT 0x00000010
+#define PCIE_CNTL2__SLV_MEM_SD_EN_MASK 0x00100000L
+#define PCIE_CNTL2__SLV_MEM_SD_EN__SHIFT 0x00000014
+#define PCIE_CNTL2__TX_ARB_MST_LIMIT_MASK 0x000007c0L
+#define PCIE_CNTL2__TX_ARB_MST_LIMIT__SHIFT 0x00000006
+#define PCIE_CNTL2__TX_ARB_ROUND_ROBIN_EN_MASK 0x00000001L
+#define PCIE_CNTL2__TX_ARB_ROUND_ROBIN_EN__SHIFT 0x00000000
+#define PCIE_CNTL2__TX_ARB_SLV_LIMIT_MASK 0x0000003eL
+#define PCIE_CNTL2__TX_ARB_SLV_LIMIT__SHIFT 0x00000001
+#define PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L
+#define PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x00000000
+#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL_MASK 0x0000000eL
+#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL__SHIFT 0x00000001
+#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE_MASK 0x00000200L
+#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE__SHIFT 0x00000009
+#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L
+#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x00000008
+#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS_MASK 0x00800000L
+#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS__SHIFT 0x00000017
+#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN_MASK 0x80000000L
+#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN__SHIFT 0x0000001f
+#define PCIE_CNTL__RX_RCB_ATS_UC_DIS_MASK 0x00008000L
+#define PCIE_CNTL__RX_RCB_ATS_UC_DIS__SHIFT 0x0000000f
+#define PCIE_CNTL__RX_RCB_CHANNEL_ORDERING_MASK 0x00100000L
+#define PCIE_CNTL__RX_RCB_CHANNEL_ORDERING__SHIFT 0x00000014
+#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE_MASK 0x00080000L
+#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE__SHIFT 0x00000013
+#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS_MASK 0x00020000L
+#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS__SHIFT 0x00000011
+#define PCIE_CNTL__RX_RCB_REORDER_EN_MASK 0x00010000L
+#define PCIE_CNTL__RX_RCB_REORDER_EN__SHIFT 0x00000010
+#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS_MASK 0x00040000L
+#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS__SHIFT 0x00000012
+#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS_MASK 0x00200000L
+#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS__SHIFT 0x00000015
+#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS_MASK 0x00400000L
+#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS__SHIFT 0x00000016
+#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE_MASK 0x00001c00L
+#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE__SHIFT 0x0000000a
+#define PCIE_CNTL__TX_CPL_DEBUG_MASK 0x3f000000L
+#define PCIE_CNTL__TX_CPL_DEBUG__SHIFT 0x00000018
+#define PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L
+#define PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x00000007
+#define PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x02000000L
+#define PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x00000019
+#define PCIE_CONFIG_CNTL__CI_MAX_PAYLOAD_SIZE_MODE_MASK 0x00010000L
+#define PCIE_CONFIG_CNTL__CI_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x00000010
+#define PCIE_CONFIG_CNTL__CI_MAX_READ_REQUEST_SIZE_MODE_MASK 0x00100000L
+#define PCIE_CONFIG_CNTL__CI_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x00000014
+#define PCIE_CONFIG_CNTL__CI_MAX_READ_SAFE_MODE_MASK 0x01000000L
+#define PCIE_CONFIG_CNTL__CI_MAX_READ_SAFE_MODE__SHIFT 0x00000018
+#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_PAYLOAD_SIZE_MASK 0x000e0000L
+#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x00000011
+#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_READ_REQUEST_SIZE_MASK 0x00e00000L
+#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x00000015
+#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK 0x0000000fL
+#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT 0x00000000
+#define PCIE_DATA__PCIE_DATA_MASK 0xffffffffL
+#define PCIE_DATA__PCIE_DATA__SHIFT 0x00000000
+#define PCIE_DEBUG_CNTL__DEBUG_LANE_EN_MASK 0xffff0000L
+#define PCIE_DEBUG_CNTL__DEBUG_LANE_EN__SHIFT 0x00000010
+#define PCIE_DEBUG_CNTL__DEBUG_PORT_EN_MASK 0x000000ffL
+#define PCIE_DEBUG_CNTL__DEBUG_PORT_EN__SHIFT 0x00000000
+#define PCIE_DEBUG_CNTL__DEBUG_SELECT_MASK 0x00000100L
+#define PCIE_DEBUG_CNTL__DEBUG_SELECT__SHIFT 0x00000008
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x0000000b
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x00001000L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x0000000c
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x00002000L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x0000000d
+#define PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L
+#define PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x00000008
+#define PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS_MASK 0x00008000L
+#define PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS__SHIFT 0x0000000f
+#define PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS_MASK 0x00004000L
+#define PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS__SHIFT 0x0000000e
+#define PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET_MASK 0x00010000L
+#define PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET__SHIFT 0x00000010
+#define PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L
+#define PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x00000000
+#define PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR_MASK 0x00000080L
+#define PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR__SHIFT 0x00000007
+#define PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR_MASK 0x00000020L
+#define PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR__SHIFT 0x00000005
+#define PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG_MASK 0x00000002L
+#define PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG__SHIFT 0x00000001
+#define PCIE_ERR_CNTL__TX_GENERATE_ECRC_ERR_MASK 0x00000040L
+#define PCIE_ERR_CNTL__TX_GENERATE_ECRC_ERR__SHIFT 0x00000006
+#define PCIE_ERR_CNTL__TX_GENERATE_LCRC_ERR_MASK 0x00000010L
+#define PCIE_ERR_CNTL__TX_GENERATE_LCRC_ERR__SHIFT 0x00000004
+#define PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L
+#define PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0x0000000c
+#define PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L
+#define PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x00000008
+#define PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00ff0000L
+#define PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x00000010
+#define PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xff000000L
+#define PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x00000018
+#define PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x0000001fL
+#define PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x00000000
+#define PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0x000000ffL
+#define PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0x000000ffL
+#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x00000000
+#define PCIE_FC_CPL__CPLD_CREDITS_MASK 0x000000ffL
+#define PCIE_FC_CPL__CPLD_CREDITS__SHIFT 0x00000000
+#define PCIE_FC_CPL__CPLH_CREDITS_MASK 0x0000ff00L
+#define PCIE_FC_CPL__CPLH_CREDITS__SHIFT 0x00000008
+#define PCIE_FC_NP__NPD_CREDITS_MASK 0x000000ffL
+#define PCIE_FC_NP__NPD_CREDITS__SHIFT 0x00000000
+#define PCIE_FC_NP__NPH_CREDITS_MASK 0x0000ff00L
+#define PCIE_FC_NP__NPH_CREDITS__SHIFT 0x00000008
+#define PCIE_FC_P__PD_CREDITS_MASK 0x000000ffL
+#define PCIE_FC_P__PD_CREDITS__SHIFT 0x00000000
+#define PCIE_FC_P__PH_CREDITS_MASK 0x0000ff00L
+#define PCIE_FC_P__PH_CREDITS__SHIFT 0x00000008
+#define PCIE_HW_DEBUG__HW_00_DEBUG_MASK 0x00000001L
+#define PCIE_HW_DEBUG__HW_00_DEBUG__SHIFT 0x00000000
+#define PCIE_HW_DEBUG__HW_01_DEBUG_MASK 0x00000002L
+#define PCIE_HW_DEBUG__HW_01_DEBUG__SHIFT 0x00000001
+#define PCIE_HW_DEBUG__HW_02_DEBUG_MASK 0x00000004L
+#define PCIE_HW_DEBUG__HW_02_DEBUG__SHIFT 0x00000002
+#define PCIE_HW_DEBUG__HW_03_DEBUG_MASK 0x00000008L
+#define PCIE_HW_DEBUG__HW_03_DEBUG__SHIFT 0x00000003
+#define PCIE_HW_DEBUG__HW_04_DEBUG_MASK 0x00000010L
+#define PCIE_HW_DEBUG__HW_04_DEBUG__SHIFT 0x00000004
+#define PCIE_HW_DEBUG__HW_05_DEBUG_MASK 0x00000020L
+#define PCIE_HW_DEBUG__HW_05_DEBUG__SHIFT 0x00000005
+#define PCIE_HW_DEBUG__HW_06_DEBUG_MASK 0x00000040L
+#define PCIE_HW_DEBUG__HW_06_DEBUG__SHIFT 0x00000006
+#define PCIE_HW_DEBUG__HW_07_DEBUG_MASK 0x00000080L
+#define PCIE_HW_DEBUG__HW_07_DEBUG__SHIFT 0x00000007
+#define PCIE_HW_DEBUG__HW_08_DEBUG_MASK 0x00000100L
+#define PCIE_HW_DEBUG__HW_08_DEBUG__SHIFT 0x00000008
+#define PCIE_HW_DEBUG__HW_09_DEBUG_MASK 0x00000200L
+#define PCIE_HW_DEBUG__HW_09_DEBUG__SHIFT 0x00000009
+#define PCIE_HW_DEBUG__HW_10_DEBUG_MASK 0x00000400L
+#define PCIE_HW_DEBUG__HW_10_DEBUG__SHIFT 0x0000000a
+#define PCIE_HW_DEBUG__HW_11_DEBUG_MASK 0x00000800L
+#define PCIE_HW_DEBUG__HW_11_DEBUG__SHIFT 0x0000000b
+#define PCIE_HW_DEBUG__HW_12_DEBUG_MASK 0x00001000L
+#define PCIE_HW_DEBUG__HW_12_DEBUG__SHIFT 0x0000000c
+#define PCIE_HW_DEBUG__HW_13_DEBUG_MASK 0x00002000L
+#define PCIE_HW_DEBUG__HW_13_DEBUG__SHIFT 0x0000000d
+#define PCIE_HW_DEBUG__HW_14_DEBUG_MASK 0x00004000L
+#define PCIE_HW_DEBUG__HW_14_DEBUG__SHIFT 0x0000000e
+#define PCIE_HW_DEBUG__HW_15_DEBUG_MASK 0x00008000L
+#define PCIE_HW_DEBUG__HW_15_DEBUG__SHIFT 0x0000000f
+#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR_MASK 0x0001ffffL
+#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR__SHIFT 0x00000000
+#define PCIE_I2C_REG_DATA__I2C_REG_DATA_MASK 0xffffffffL
+#define PCIE_I2C_REG_DATA__I2C_REG_DATA__SHIFT 0x00000000
+#define PCIE_INDEX__PCIE_INDEX_MASK 0x000000ffL
+#define PCIE_INDEX__PCIE_INDEX__SHIFT 0x00000000
+#define PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L
+#define PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x00000000
+#define PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L
+#define PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x00000002
+#define PCIE_INT_CNTL__LINK_BW_INT_EN_MASK 0x00000080L
+#define PCIE_INT_CNTL__LINK_BW_INT_EN__SHIFT 0x00000007
+#define PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L
+#define PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x00000004
+#define PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L
+#define PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x00000001
+#define PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L
+#define PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x00000006
+#define PCIE_INT_CNTL__QUIESCE_RCVD_INT_EN_MASK 0x00000100L
+#define PCIE_INT_CNTL__QUIESCE_RCVD_INT_EN__SHIFT 0x00000008
+#define PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L
+#define PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x00000003
+#define PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L
+#define PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x00000000
+#define PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L
+#define PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x00000002
+#define PCIE_INT_STATUS__LINK_BW_INT_STATUS_MASK 0x00000080L
+#define PCIE_INT_STATUS__LINK_BW_INT_STATUS__SHIFT 0x00000007
+#define PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L
+#define PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x00000004
+#define PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L
+#define PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x00000001
+#define PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L
+#define PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x00000006
+#define PCIE_INT_STATUS__QUIESCE_RCVD_INT_STATUS_MASK 0x00000100L
+#define PCIE_INT_STATUS__QUIESCE_RCVD_INT_STATUS__SHIFT 0x00000008
+#define PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L
+#define PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x00000003
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR_MASK 0x0000fc00L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR__SHIFT 0x0000000a
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM_MASK 0x3fc00000L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM__SHIFT 0x00000016
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR_MASK 0x003f0000L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR__SHIFT 0x00000010
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR_MASK 0x000003f0L
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR__SHIFT 0x00000004
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET_MASK 0x0000000fL
+#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET__SHIFT 0x00000000
+#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN_MASK 0x00000001L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN__SHIFT 0x00000000
+#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG_MASK 0x00000020L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG__SHIFT 0x00000005
+#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE_MASK 0x00000002L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE__SHIFT 0x00000001
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE_MASK 0x00000400L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE__SHIFT 0x0000000a
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE_MASK 0x00000040L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE__SHIFT 0x00000006
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED_MASK 0x00000200L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED__SHIFT 0x00000009
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER_MASK 0x00000100L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER__SHIFT 0x00000008
+#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE_MASK 0x00000008L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE__SHIFT 0x00000003
+#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE_MASK 0x00000010L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE__SHIFT 0x00000004
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE_MASK 0x00000080L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE__SHIFT 0x00000007
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE_MASK 0x00000004L
+#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE__SHIFT 0x00000002
+#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE_MASK 0x03000000L
+#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE__SHIFT 0x00000018
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF_MASK 0x00000fffL
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF__SHIFT 0x00000000
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS_MASK 0x00fff000L
+#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS__SHIFT 0x0000000c
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK 0x00020000L
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1__SHIFT 0x00000011
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK 0x00040000L
+#define PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23__SHIFT 0x00000012
+#define PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD_MASK 0x00400000L
+#define PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD__SHIFT 0x00000016
+#define PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0_MASK 0x00100000L
+#define PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0__SHIFT 0x00000014
+#define PCIE_LC_CNTL2__LC_DEASSERT_RX_EN_IN_L0S_MASK 0x00080000L
+#define PCIE_LC_CNTL2__LC_DEASSERT_RX_EN_IN_L0S__SHIFT 0x00000013
+#define PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET_MASK 0x00010000L
+#define PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET__SHIFT 0x00000010
+#define PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS_MASK 0x04000000L
+#define PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS__SHIFT 0x0000001a
+#define PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE_MASK 0x0000c000L
+#define PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE__SHIFT 0x0000000e
+#define PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI_MASK 0x80000000L
+#define PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI__SHIFT 0x0000001f
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE_MASK 0x00000800L
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN_MASK 0x00001000L
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN__SHIFT 0x0000000c
+#define PCIE_LC_CNTL2__LC_ILLEGAL_STATE__SHIFT 0x0000000b
+#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L
+#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x0000001b
+#define PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN_MASK 0x00000400L
+#define PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN__SHIFT 0x0000000a
+#define PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION_MASK 0x00000080L
+#define PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION__SHIFT 0x00000007
+#define PCIE_LC_CNTL2__LC_MORE_TS2_EN_MASK 0x00000100L
+#define PCIE_LC_CNTL2__LC_MORE_TS2_EN__SHIFT 0x00000008
+#define PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE_MASK 0x10000000L
+#define PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE__SHIFT 0x0000001c
+#define PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES_MASK 0x02000000L
+#define PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES__SHIFT 0x00000019
+#define PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK 0x00200000L
+#define PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS__SHIFT 0x00000015
+#define PCIE_LC_CNTL2__LC_STATE_TIMED_OUT_MASK 0x00000040L
+#define PCIE_LC_CNTL2__LC_STATE_TIMED_OUT__SHIFT 0x00000006
+#define PCIE_LC_CNTL2__LC_TEST_TIMER_SEL_MASK 0x60000000L
+#define PCIE_LC_CNTL2__LC_TEST_TIMER_SEL__SHIFT 0x0000001d
+#define PCIE_LC_CNTL2__LC_TIMED_OUT_STATE_MASK 0x0000003fL
+#define PCIE_LC_CNTL2__LC_TIMED_OUT_STATE__SHIFT 0x00000000
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG_MASK 0x01800000L
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG__SHIFT 0x00000017
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE_MASK 0x00002000L
+#define PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE__SHIFT 0x0000000d
+#define PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS_MASK 0x00000200L
+#define PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS__SHIFT 0x00000009
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN_MASK 0x00040000L
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN__SHIFT 0x00000012
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL_MASK 0x00180000L
+#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL__SHIFT 0x00000013
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00000100L
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x00000008
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x000000c0L
+#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0x00000006
+#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN_MASK 0x00010000L
+#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN__SHIFT 0x00000010
+#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT_MASK 0x00000200L
+#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT__SHIFT 0x00000009
+#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT_MASK 0x00000010L
+#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT__SHIFT 0x00000004
+#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK 0x00800000L
+#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK__SHIFT 0x00000017
+#define PCIE_LC_CNTL3__LC_EHP_RX_PHY_CMD_MASK 0x00003000L
+#define PCIE_LC_CNTL3__LC_EHP_RX_PHY_CMD__SHIFT 0x0000000c
+#define PCIE_LC_CNTL3__LC_EHP_TX_PHY_CMD_MASK 0x0000c000L
+#define PCIE_LC_CNTL3__LC_EHP_TX_PHY_CMD__SHIFT 0x0000000e
+#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN_MASK 0x00000400L
+#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN__SHIFT 0x0000000a
+#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN_MASK 0x00200000L
+#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN__SHIFT 0x00000015
+#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK 0x40000000L
+#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY__SHIFT 0x0000001e
+#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL_MASK 0x03000000L
+#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL__SHIFT 0x00000018
+#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN_MASK 0x00020000L
+#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN__SHIFT 0x00000011
+#define PCIE_LC_CNTL3__LC_N_EIE_SEL_MASK 0x80000000L
+#define PCIE_LC_CNTL3__LC_N_EIE_SEL__SHIFT 0x0000001f
+#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS_MASK 0x00000008L
+#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS__SHIFT 0x00000003
+#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE_MASK 0x00000800L
+#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE__SHIFT 0x0000000b
+#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN_MASK 0x00000020L
+#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN__SHIFT 0x00000005
+#define PCIE_LC_CNTL3__LC_RXPHYCMD_INACTIVE_EN_MODE_MASK 0x00400000L
+#define PCIE_LC_CNTL3__LC_RXPHYCMD_INACTIVE_EN_MODE__SHIFT 0x00000016
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL_MASK 0x00000006L
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL__SHIFT 0x00000001
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_MASK 0x00000001L
+#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS__SHIFT 0x00000000
+#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL_MASK 0x3c000000L
+#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL__SHIFT 0x0000001a
+#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN_MASK 0x02000000L
+#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN__SHIFT 0x00000019
+#define PCIE_LC_CNTL4__LC_BYPASS_EQ_MASK 0x00000010L
+#define PCIE_LC_CNTL4__LC_BYPASS_EQ_REQ_PHASE_MASK 0x00010000L
+#define PCIE_LC_CNTL4__LC_BYPASS_EQ_REQ_PHASE__SHIFT 0x00000010
+#define PCIE_LC_CNTL4__LC_BYPASS_EQ__SHIFT 0x00000004
+#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK_MASK 0x00000400L
+#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK__SHIFT 0x0000000a
+#define PCIE_LC_CNTL4__LC_EQ_SEARCH_MODE_MASK 0x00000300L
+#define PCIE_LC_CNTL4__LC_EQ_SEARCH_MODE__SHIFT 0x00000008
+#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE_MASK 0x01000000L
+#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE__SHIFT 0x00000018
+#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS_MASK 0x00000040L
+#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS__SHIFT 0x00000006
+#define PCIE_LC_CNTL4__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_MASK 0x00020000L
+#define PCIE_LC_CNTL4__LC_FORCE_PRESET_IN_EQ_REQ_PHASE__SHIFT 0x00000011
+#define PCIE_LC_CNTL4__LC_FORCE_PRESET_VALUE_MASK 0x003c0000L
+#define PCIE_LC_CNTL4__LC_FORCE_PRESET_VALUE__SHIFT 0x00000012
+#define PCIE_LC_CNTL4__LC_IGNORE_PARITY_MASK 0x00000080L
+#define PCIE_LC_CNTL4__LC_IGNORE_PARITY__SHIFT 0x00000007
+#define PCIE_LC_CNTL4__LC_PCIE_TX_FULL_SWING_MASK 0x00800000L
+#define PCIE_LC_CNTL4__LC_PCIE_TX_FULL_SWING__SHIFT 0x00000017
+#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD_MASK 0x00004000L
+#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD__SHIFT 0x0000000e
+#define PCIE_LC_CNTL4__LC_REDO_EQ_MASK 0x00000020L
+#define PCIE_LC_CNTL4__LC_REDO_EQ__SHIFT 0x00000005
+#define PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK 0x00002000L
+#define PCIE_LC_CNTL4__LC_SET_QUIESCE__SHIFT 0x0000000d
+#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR_MASK 0x00000003L
+#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR__SHIFT 0x00000000
+#define PCIE_LC_CNTL4__LC_UNEXPECTED_COEFFS_RCVD_MASK 0x00008000L
+#define PCIE_LC_CNTL4__LC_UNEXPECTED_COEFFS_RCVD__SHIFT 0x0000000f
+#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS_MASK 0x00400000L
+#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS__SHIFT 0x00000016
+#define PCIE_LC_CNTL4__LC_USC_EQ_NOT_REQD_MASK 0x00000800L
+#define PCIE_LC_CNTL4__LC_USC_EQ_NOT_REQD__SHIFT 0x0000000b
+#define PCIE_LC_CNTL4__LC_USC_GO_TO_EQ_MASK 0x00001000L
+#define PCIE_LC_CNTL4__LC_USC_GO_TO_EQ__SHIFT 0x0000000c
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK_MASK 0xfc000000L
+#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK__SHIFT 0x0000001a
+#define PCIE_LC_CNTL5__LC_EQ_FS_0_MASK 0x0000003fL
+#define PCIE_LC_CNTL5__LC_EQ_FS_0__SHIFT 0x00000000
+#define PCIE_LC_CNTL5__LC_EQ_FS_8_MASK 0x00000fc0L
+#define PCIE_LC_CNTL5__LC_EQ_FS_8__SHIFT 0x00000006
+#define PCIE_LC_CNTL5__LC_EQ_LF_0_MASK 0x0003f000L
+#define PCIE_LC_CNTL5__LC_EQ_LF_0__SHIFT 0x0000000c
+#define PCIE_LC_CNTL5__LC_EQ_LF_8_MASK 0x00fc0000L
+#define PCIE_LC_CNTL5__LC_EQ_LF_8__SHIFT 0x00000012
+#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE_MASK 0x000000f0L
+#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE__SHIFT 0x00000004
+#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS_MASK 0x01000000L
+#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS__SHIFT 0x00000018
+#define PCIE_LC_CNTL__LC_DELAY_COUNT_MASK 0x06000000L
+#define PCIE_LC_CNTL__LC_DELAY_COUNT__SHIFT 0x00000019
+#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT_MASK 0x08000000L
+#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT__SHIFT 0x0000001b
+#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK 0x10000000L
+#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT__SHIFT 0x0000001c
+#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0_MASK 0x00000002L
+#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0__SHIFT 0x00000001
+#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN_MASK 0x40000000L
+#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN__SHIFT 0x0000001e
+#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE_MASK 0x20000000L
+#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE__SHIFT 0x0000001d
+#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC_MASK 0x00100000L
+#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC__SHIFT 0x00000014
+#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE_MASK 0x80000000L
+#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE__SHIFT 0x0000001f
+#define PCIE_LC_CNTL__LC_INC_N_FTS_EN_MASK 0x00020000L
+#define PCIE_LC_CNTL__LC_INC_N_FTS_EN__SHIFT 0x00000011
+#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK 0x00000f00L
+#define PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT 0x00000008
+#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK_MASK 0x00800000L
+#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK__SHIFT 0x00000017
+#define PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK 0x0000f000L
+#define PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT 0x0000000c
+#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23_MASK 0x000c0000L
+#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23__SHIFT 0x00000012
+#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK 0x00010000L
+#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT 0x00000010
+#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN_MASK 0x00000004L
+#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN__SHIFT 0x00000002
+#define PCIE_LC_CNTL__LC_RESET_LINK_MASK 0x00000008L
+#define PCIE_LC_CNTL__LC_RESET_LINK__SHIFT 0x00000003
+#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS_MASK 0x00200000L
+#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS__SHIFT 0x00000015
+#define PCIE_LC_CNTL__LC_WAKE_FROM_L23_MASK 0x00400000L
+#define PCIE_LC_CNTL__LC_WAKE_FROM_L23__SHIFT 0x00000016
+#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN_MASK 0x00080000L
+#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN__SHIFT 0x00000013
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF_MASK 0x00000001L
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF__SHIFT 0x00000000
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR_MASK 0x00001f80L
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR__SHIFT 0x00000007
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR_MASK 0x0007e000L
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR__SHIFT 0x0000000d
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR_MASK 0x0000007eL
+#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR__SHIFT 0x00000001
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_MASK 0x00000001L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE__SHIFT 0x00000000
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ_MASK 0x00001f80L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ__SHIFT 0x00000007
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ_MASK 0x0007e000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ__SHIFT 0x0000000d
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ_MASK 0x0000007eL
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ__SHIFT 0x00000001
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END_MASK 0x01f80000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END__SHIFT 0x00000013
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END_MASK 0x7e000000L
+#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END__SHIFT 0x00000019
+#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES_MASK 0x0000ffffL
+#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES__SHIFT 0x00000000
+#define PCIE_LC_LANE_CNTL__LC_LANE_DIS_MASK 0xffff0000L
+#define PCIE_LC_LANE_CNTL__LC_LANE_DIS__SHIFT 0x00000010
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB_MASK 0x00010000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB__SHIFT 0x00000010
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN_MASK 0x00080000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN__SHIFT 0x00000013
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK 0x00600000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT 0x00000015
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN_MASK 0x00040000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN__SHIFT 0x00000012
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_EQ_REVERSAL_LOGIC_EN_MASK 0x00800000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_EQ_REVERSAL_LOGIC_EN__SHIFT 0x00000017
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK 0x00020000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN__SHIFT 0x00000011
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK 0x00000007L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x00000004
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT 0x00000000
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK 0x00000080L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE__SHIFT 0x00000007
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK 0x00000100L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW__SHIFT 0x00000008
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK 0x00000400L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN__SHIFT 0x0000000a
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK 0x00000200L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT__SHIFT 0x00000009
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN_MASK 0x00000800L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN__SHIFT 0x0000000b
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL_MASK 0x00008000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL__SHIFT 0x0000000f
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS_MASK 0x00004000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS__SHIFT 0x0000000e
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE_MASK 0x00100000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE__SHIFT 0x00000014
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK 0x00002000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS__SHIFT 0x0000000d
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK 0x00001000L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT__SHIFT 0x0000000c
+#define PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK 0xff000000L
+#define PCIE_LC_N_FTS_CNTL__LC_N_FTS__SHIFT 0x00000018
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY_MASK 0x00000200L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY__SHIFT 0x00000009
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT_MASK 0x00ff0000L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT__SHIFT 0x00000010
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK 0x000000ffL
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK 0x00000100L
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN__SHIFT 0x00000008
+#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT 0x00000000
+#define PCIE_LC_SPEED_CNTL__LC_1_OR_MORE_TS2_SPEED_ARC_EN_MASK 0x00020000L
+#define PCIE_LC_SPEED_CNTL__LC_1_OR_MORE_TS2_SPEED_ARC_EN__SHIFT 0x00000011
+#define PCIE_LC_SPEED_CNTL__LC_AUTO_RECOVERY_DIS_MASK 0x00400000L
+#define PCIE_LC_SPEED_CNTL__LC_AUTO_RECOVERY_DIS__SHIFT 0x00000016
+#define PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE_MASK 0x04000000L
+#define PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE__SHIFT 0x0000001a
+#define PCIE_LC_SPEED_CNTL__LC_CLR_FAILED_SPD_CHANGE_CNT_MASK 0x00010000L
+#define PCIE_LC_SPEED_CNTL__LC_CLR_FAILED_SPD_CHANGE_CNT__SHIFT 0x00000010
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0x00006000L
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x0000000d
+#define PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED_MASK 0x03000000L
+#define PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED__SHIFT 0x00000018
+#define PCIE_LC_SPEED_CNTL__LC_DELAY_COEFF_UPDATE_DIS_MASK 0x80000000L
+#define PCIE_LC_SPEED_CNTL__LC_DELAY_COEFF_UPDATE_DIS__SHIFT 0x0000001f
+#define PCIE_LC_SPEED_CNTL__LC_DONT_CHECK_EQTS_IN_RCFG_MASK 0x40000000L
+#define PCIE_LC_SPEED_CNTL__LC_DONT_CHECK_EQTS_IN_RCFG__SHIFT 0x0000001e
+#define PCIE_LC_SPEED_CNTL__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS_MASK 0x00008000L
+#define PCIE_LC_SPEED_CNTL__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS__SHIFT 0x0000000f
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK 0x00000100L
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE__SHIFT 0x00000008
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK 0x00000040L
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE__SHIFT 0x00000006
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_EN_HW_SPEED_CHANGE_MASK 0x00000080L
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_EN_HW_SPEED_CHANGE__SHIFT 0x00000007
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK 0x00000020L
+#define PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE__SHIFT 0x00000005
+#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L
+#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x00000000
+#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L
+#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x00000001
+#define PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK 0x00000200L
+#define PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE__SHIFT 0x00000009
+#define PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L0s_EN_MASK 0x10000000L
+#define PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L0s_EN__SHIFT 0x0000001c
+#define PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L1_EN_MASK 0x20000000L
+#define PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L1_EN__SHIFT 0x0000001d
+#define PCIE_LC_SPEED_CNTL__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN_MASK 0x08000000L
+#define PCIE_LC_SPEED_CNTL__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN__SHIFT 0x0000001b
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2_MASK 0x00040000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2__SHIFT 0x00000012
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3_MASK 0x00100000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3__SHIFT 0x00000014
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2_MASK 0x00080000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2__SHIFT 0x00000013
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3_MASK 0x00200000L
+#define PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3__SHIFT 0x00000015
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00001000L
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x0000000c
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x00000c00L
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0x0000000a
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_STATUS_MASK 0x00800000L
+#define PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_STATUS__SHIFT 0x00000017
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN_MASK 0x00000004L
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN__SHIFT 0x00000002
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_MASK 0x00000018L
+#define PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE__SHIFT 0x00000003
+#define PCIE_LC_STATE0__LC_CURRENT_STATE_MASK 0x0000003fL
+#define PCIE_LC_STATE0__LC_CURRENT_STATE__SHIFT 0x00000000
+#define PCIE_LC_STATE0__LC_PREV_STATE1_MASK 0x00003f00L
+#define PCIE_LC_STATE0__LC_PREV_STATE1__SHIFT 0x00000008
+#define PCIE_LC_STATE0__LC_PREV_STATE2_MASK 0x003f0000L
+#define PCIE_LC_STATE0__LC_PREV_STATE2__SHIFT 0x00000010
+#define PCIE_LC_STATE0__LC_PREV_STATE3_MASK 0x3f000000L
+#define PCIE_LC_STATE0__LC_PREV_STATE3__SHIFT 0x00000018
+#define PCIE_LC_STATE10__LC_PREV_STATE40_MASK 0x0000003fL
+#define PCIE_LC_STATE10__LC_PREV_STATE40__SHIFT 0x00000000
+#define PCIE_LC_STATE10__LC_PREV_STATE41_MASK 0x00003f00L
+#define PCIE_LC_STATE10__LC_PREV_STATE41__SHIFT 0x00000008
+#define PCIE_LC_STATE10__LC_PREV_STATE42_MASK 0x003f0000L
+#define PCIE_LC_STATE10__LC_PREV_STATE42__SHIFT 0x00000010
+#define PCIE_LC_STATE10__LC_PREV_STATE43_MASK 0x3f000000L
+#define PCIE_LC_STATE10__LC_PREV_STATE43__SHIFT 0x00000018
+#define PCIE_LC_STATE11__LC_PREV_STATE44_MASK 0x0000003fL
+#define PCIE_LC_STATE11__LC_PREV_STATE44__SHIFT 0x00000000
+#define PCIE_LC_STATE11__LC_PREV_STATE45_MASK 0x00003f00L
+#define PCIE_LC_STATE11__LC_PREV_STATE45__SHIFT 0x00000008
+#define PCIE_LC_STATE11__LC_PREV_STATE46_MASK 0x003f0000L
+#define PCIE_LC_STATE11__LC_PREV_STATE46__SHIFT 0x00000010
+#define PCIE_LC_STATE11__LC_PREV_STATE47_MASK 0x3f000000L
+#define PCIE_LC_STATE11__LC_PREV_STATE47__SHIFT 0x00000018
+#define PCIE_LC_STATE1__LC_PREV_STATE4_MASK 0x0000003fL
+#define PCIE_LC_STATE1__LC_PREV_STATE4__SHIFT 0x00000000
+#define PCIE_LC_STATE1__LC_PREV_STATE5_MASK 0x00003f00L
+#define PCIE_LC_STATE1__LC_PREV_STATE5__SHIFT 0x00000008
+#define PCIE_LC_STATE1__LC_PREV_STATE6_MASK 0x003f0000L
+#define PCIE_LC_STATE1__LC_PREV_STATE6__SHIFT 0x00000010
+#define PCIE_LC_STATE1__LC_PREV_STATE7_MASK 0x3f000000L
+#define PCIE_LC_STATE1__LC_PREV_STATE7__SHIFT 0x00000018
+#define PCIE_LC_STATE2__LC_PREV_STATE10_MASK 0x003f0000L
+#define PCIE_LC_STATE2__LC_PREV_STATE10__SHIFT 0x00000010
+#define PCIE_LC_STATE2__LC_PREV_STATE11_MASK 0x3f000000L
+#define PCIE_LC_STATE2__LC_PREV_STATE11__SHIFT 0x00000018
+#define PCIE_LC_STATE2__LC_PREV_STATE8_MASK 0x0000003fL
+#define PCIE_LC_STATE2__LC_PREV_STATE8__SHIFT 0x00000000
+#define PCIE_LC_STATE2__LC_PREV_STATE9_MASK 0x00003f00L
+#define PCIE_LC_STATE2__LC_PREV_STATE9__SHIFT 0x00000008
+#define PCIE_LC_STATE3__LC_PREV_STATE12_MASK 0x0000003fL
+#define PCIE_LC_STATE3__LC_PREV_STATE12__SHIFT 0x00000000
+#define PCIE_LC_STATE3__LC_PREV_STATE13_MASK 0x00003f00L
+#define PCIE_LC_STATE3__LC_PREV_STATE13__SHIFT 0x00000008
+#define PCIE_LC_STATE3__LC_PREV_STATE14_MASK 0x003f0000L
+#define PCIE_LC_STATE3__LC_PREV_STATE14__SHIFT 0x00000010
+#define PCIE_LC_STATE3__LC_PREV_STATE15_MASK 0x3f000000L
+#define PCIE_LC_STATE3__LC_PREV_STATE15__SHIFT 0x00000018
+#define PCIE_LC_STATE4__LC_PREV_STATE16_MASK 0x0000003fL
+#define PCIE_LC_STATE4__LC_PREV_STATE16__SHIFT 0x00000000
+#define PCIE_LC_STATE4__LC_PREV_STATE17_MASK 0x00003f00L
+#define PCIE_LC_STATE4__LC_PREV_STATE17__SHIFT 0x00000008
+#define PCIE_LC_STATE4__LC_PREV_STATE18_MASK 0x003f0000L
+#define PCIE_LC_STATE4__LC_PREV_STATE18__SHIFT 0x00000010
+#define PCIE_LC_STATE4__LC_PREV_STATE19_MASK 0x3f000000L
+#define PCIE_LC_STATE4__LC_PREV_STATE19__SHIFT 0x00000018
+#define PCIE_LC_STATE5__LC_PREV_STATE20_MASK 0x0000003fL
+#define PCIE_LC_STATE5__LC_PREV_STATE20__SHIFT 0x00000000
+#define PCIE_LC_STATE5__LC_PREV_STATE21_MASK 0x00003f00L
+#define PCIE_LC_STATE5__LC_PREV_STATE21__SHIFT 0x00000008
+#define PCIE_LC_STATE5__LC_PREV_STATE22_MASK 0x003f0000L
+#define PCIE_LC_STATE5__LC_PREV_STATE22__SHIFT 0x00000010
+#define PCIE_LC_STATE5__LC_PREV_STATE23_MASK 0x3f000000L
+#define PCIE_LC_STATE5__LC_PREV_STATE23__SHIFT 0x00000018
+#define PCIE_LC_STATE6__LC_PREV_STATE24_MASK 0x0000003fL
+#define PCIE_LC_STATE6__LC_PREV_STATE24__SHIFT 0x00000000
+#define PCIE_LC_STATE6__LC_PREV_STATE25_MASK 0x00003f00L
+#define PCIE_LC_STATE6__LC_PREV_STATE25__SHIFT 0x00000008
+#define PCIE_LC_STATE6__LC_PREV_STATE26_MASK 0x003f0000L
+#define PCIE_LC_STATE6__LC_PREV_STATE26__SHIFT 0x00000010
+#define PCIE_LC_STATE6__LC_PREV_STATE27_MASK 0x3f000000L
+#define PCIE_LC_STATE6__LC_PREV_STATE27__SHIFT 0x00000018
+#define PCIE_LC_STATE7__LC_PREV_STATE28_MASK 0x0000003fL
+#define PCIE_LC_STATE7__LC_PREV_STATE28__SHIFT 0x00000000
+#define PCIE_LC_STATE7__LC_PREV_STATE29_MASK 0x00003f00L
+#define PCIE_LC_STATE7__LC_PREV_STATE29__SHIFT 0x00000008
+#define PCIE_LC_STATE7__LC_PREV_STATE30_MASK 0x003f0000L
+#define PCIE_LC_STATE7__LC_PREV_STATE30__SHIFT 0x00000010
+#define PCIE_LC_STATE7__LC_PREV_STATE31_MASK 0x3f000000L
+#define PCIE_LC_STATE7__LC_PREV_STATE31__SHIFT 0x00000018
+#define PCIE_LC_STATE8__LC_PREV_STATE32_MASK 0x0000003fL
+#define PCIE_LC_STATE8__LC_PREV_STATE32__SHIFT 0x00000000
+#define PCIE_LC_STATE8__LC_PREV_STATE33_MASK 0x00003f00L
+#define PCIE_LC_STATE8__LC_PREV_STATE33__SHIFT 0x00000008
+#define PCIE_LC_STATE8__LC_PREV_STATE34_MASK 0x003f0000L
+#define PCIE_LC_STATE8__LC_PREV_STATE34__SHIFT 0x00000010
+#define PCIE_LC_STATE8__LC_PREV_STATE35_MASK 0x3f000000L
+#define PCIE_LC_STATE8__LC_PREV_STATE35__SHIFT 0x00000018
+#define PCIE_LC_STATE9__LC_PREV_STATE36_MASK 0x0000003fL
+#define PCIE_LC_STATE9__LC_PREV_STATE36__SHIFT 0x00000000
+#define PCIE_LC_STATE9__LC_PREV_STATE37_MASK 0x00003f00L
+#define PCIE_LC_STATE9__LC_PREV_STATE37__SHIFT 0x00000008
+#define PCIE_LC_STATE9__LC_PREV_STATE38_MASK 0x003f0000L
+#define PCIE_LC_STATE9__LC_PREV_STATE38__SHIFT 0x00000010
+#define PCIE_LC_STATE9__LC_PREV_STATE39_MASK 0x3f000000L
+#define PCIE_LC_STATE9__LC_PREV_STATE39__SHIFT 0x00000018
+#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK 0x000000e0L
+#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT 0x00000005
+#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK 0x0000001cL
+#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT 0x00000002
+#define PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK 0x00000001L
+#define PCIE_LC_STATUS1__LC_REVERSE_RCVR__SHIFT 0x00000000
+#define PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK 0x00000002L
+#define PCIE_LC_STATUS1__LC_REVERSE_XMIT__SHIFT 0x00000001
+#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES_MASK 0x0000ffffL
+#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES__SHIFT 0x00000000
+#define PCIE_LC_STATUS2__LC_TURN_ON_LANE_MASK 0xffff0000L
+#define PCIE_LC_STATUS2__LC_TURN_ON_LANE__SHIFT 0x00000010
+#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL_MASK 0x10000000L
+#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL__SHIFT 0x0000001c
+#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL_MASK 0x00c00000L
+#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL__SHIFT 0x00000016
+#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF_MASK 0x00020000L
+#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF__SHIFT 0x00000011
+#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE_MASK 0x00000010L
+#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE__SHIFT 0x00000004
+#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK 0x00002000L
+#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH__SHIFT 0x0000000d
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED_MASK 0x01000000L
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED__SHIFT 0x00000018
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST_MASK 0x02000000L
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST__SHIFT 0x00000019
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED_MASK 0x00000800L
+#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED__SHIFT 0x0000000b
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME_MASK 0xc0000000L
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME__SHIFT 0x0000001e
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP_MASK 0x00010000L
+#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP__SHIFT 0x00000010
+#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN_MASK 0x00080000L
+#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN__SHIFT 0x00000013
+#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN_MASK 0x00001000L
+#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN__SHIFT 0x0000000c
+#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN_MASK 0x00000040L
+#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN__SHIFT 0x00000006
+#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN_MASK 0x00000080L
+#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN__SHIFT 0x00000007
+#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW_MASK 0x00100000L
+#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW__SHIFT 0x00000014
+#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1_MASK 0x00000020L
+#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1__SHIFT 0x00000005
+#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE_MASK 0x00000700L
+#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE__SHIFT 0x00000008
+#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER_MASK 0x04000000L
+#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER__SHIFT 0x0000001a
+#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT_MASK 0x08000000L
+#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT__SHIFT 0x0000001b
+#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN_MASK 0x00200000L
+#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN__SHIFT 0x00000015
+#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL_MASK 0x0000000fL
+#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL__SHIFT 0x00000000
+#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF_MASK 0x00040000L
+#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF__SHIFT 0x00000012
+#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_FOM_VALID_AFTER_TRACK_MASK 0x20000000L
+#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_FOM_VALID_AFTER_TRACK__SHIFT 0x0000001d
+#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR_MASK 0x0000ffffL
+#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR__SHIFT 0x00000000
+#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR_MASK 0xffff0000L
+#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR__SHIFT 0x00000010
+#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK_MASK 0x00002000L
+#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK__SHIFT 0x0000000d
+#define PCIE_P_CNTL__P_BLK_LOCK_MODE_MASK 0x00001000L
+#define PCIE_P_CNTL__P_BLK_LOCK_MODE__SHIFT 0x0000000c
+#define PCIE_P_CNTL__P_ELASTDESKEW_HW_DEBUG_MASK 0x00000008L
+#define PCIE_P_CNTL__P_ELASTDESKEW_HW_DEBUG__SHIFT 0x00000003
+#define PCIE_P_CNTL__P_ELEC_IDLE_MODE_MASK 0x0000c000L
+#define PCIE_P_CNTL__P_ELEC_IDLE_MODE__SHIFT 0x0000000e
+#define PCIE_P_CNTL__P_IGNORE_CRC_ERR_MASK 0x00000010L
+#define PCIE_P_CNTL__P_IGNORE_CRC_ERR__SHIFT 0x00000004
+#define PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK 0x00000040L
+#define PCIE_P_CNTL__P_IGNORE_EDB_ERR__SHIFT 0x00000006
+#define PCIE_P_CNTL__P_IGNORE_IDL_ERR_MASK 0x00000080L
+#define PCIE_P_CNTL__P_IGNORE_IDL_ERR__SHIFT 0x00000007
+#define PCIE_P_CNTL__P_IGNORE_LEN_ERR_MASK 0x00000020L
+#define PCIE_P_CNTL__P_IGNORE_LEN_ERR__SHIFT 0x00000005
+#define PCIE_P_CNTL__P_IGNORE_TOK_ERR_MASK 0x00000100L
+#define PCIE_P_CNTL__P_IGNORE_TOK_ERR__SHIFT 0x00000008
+#define PCIE_P_CNTL__P_PWRDN_EN_MASK 0x00000001L
+#define PCIE_P_CNTL__P_PWRDN_EN__SHIFT 0x00000000
+#define PCIE_P_CNTL__P_SYMALIGN_HW_DEBUG_MASK 0x00000004L
+#define PCIE_P_CNTL__P_SYMALIGN_HW_DEBUG__SHIFT 0x00000002
+#define PCIE_P_CNTL__P_SYMALIGN_MODE_MASK 0x00000002L
+#define PCIE_P_CNTL__P_SYMALIGN_MODE__SHIFT 0x00000001
+#define PCIE_P_DECODER_STATUS__P_DECODE_ERR_MASK 0x0000ffffL
+#define PCIE_P_DECODER_STATUS__P_DECODE_ERR__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_C_CLK_MASK 0x00000f00L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_C_CLK__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_R_CLK_MASK 0x000000f0L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_R_CLK__SHIFT 0x00000004
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_NS_C_CLK_MASK 0x00f00000L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_NS_C_CLK__SHIFT 0x00000014
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_R_CLK_MASK 0x0000f000L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_R_CLK__SHIFT 0x0000000c
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_S_C_CLK_MASK 0x000f0000L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_S_C_CLK__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK2_MASK 0x0f000000L
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK2__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK_MASK 0x0000000fL
+#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_C_CLK_MASK 0x00000f00L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_C_CLK__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_R_CLK_MASK 0x000000f0L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_R_CLK__SHIFT 0x00000004
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_NS_C_CLK_MASK 0x00f00000L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_NS_C_CLK__SHIFT 0x00000014
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_R_CLK_MASK 0x0000f000L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_R_CLK__SHIFT 0x0000000c
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_S_C_CLK_MASK 0x000f0000L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_S_C_CLK__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK2_MASK 0x0f000000L
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK2__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK_MASK 0x0000000fL
+#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_MST_C_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_MST_C_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_MST_C_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_MST_C_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_MST_R_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_MST_R_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_MST_R_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_MST_R_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_CNTL_TXCLK__COUNTER0_UPPER_MASK 0x00ff0000L
+#define PCIE_PERF_CNTL_TXCLK__COUNTER0_UPPER__SHIFT 0x00000010
+#define PCIE_PERF_CNTL_TXCLK__COUNTER1_UPPER_MASK 0xff000000L
+#define PCIE_PERF_CNTL_TXCLK__COUNTER1_UPPER__SHIFT 0x00000018
+#define PCIE_PERF_CNTL_TXCLK__EVENT0_SEL_MASK 0x000000ffL
+#define PCIE_PERF_CNTL_TXCLK__EVENT0_SEL__SHIFT 0x00000000
+#define PCIE_PERF_CNTL_TXCLK__EVENT1_SEL_MASK 0x0000ff00L
+#define PCIE_PERF_CNTL_TXCLK__EVENT1_SEL__SHIFT 0x00000008
+#define PCIE_PERF_COUNT0_MST_C_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_MST_C_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_MST_R_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_MST_R_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_SLV_NS_C_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_SLV_NS_C_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_SLV_R_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_SLV_R_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_SLV_S_C_CLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_SLV_S_C_CLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT0_TXCLK__COUNTER0_MASK 0xffffffffL
+#define PCIE_PERF_COUNT0_TXCLK__COUNTER0__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_MST_C_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_MST_C_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_MST_R_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_MST_R_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_SLV_NS_C_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_SLV_NS_C_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_SLV_R_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_SLV_R_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_SLV_S_C_CLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_SLV_S_C_CLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT1_TXCLK__COUNTER1_MASK 0xffffffffL
+#define PCIE_PERF_COUNT1_TXCLK__COUNTER1__SHIFT 0x00000000
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN_MASK 0x00000001L
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN__SHIFT 0x00000000
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET_MASK 0x00000004L
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET__SHIFT 0x00000002
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR_MASK 0x00000002L
+#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR__SHIFT 0x00000001
+#define PCIEP_HW_DEBUG__HW_00_DEBUG_MASK 0x00000001L
+#define PCIEP_HW_DEBUG__HW_00_DEBUG__SHIFT 0x00000000
+#define PCIEP_HW_DEBUG__HW_01_DEBUG_MASK 0x00000002L
+#define PCIEP_HW_DEBUG__HW_01_DEBUG__SHIFT 0x00000001
+#define PCIEP_HW_DEBUG__HW_02_DEBUG_MASK 0x00000004L
+#define PCIEP_HW_DEBUG__HW_02_DEBUG__SHIFT 0x00000002
+#define PCIEP_HW_DEBUG__HW_03_DEBUG_MASK 0x00000008L
+#define PCIEP_HW_DEBUG__HW_03_DEBUG__SHIFT 0x00000003
+#define PCIEP_HW_DEBUG__HW_04_DEBUG_MASK 0x00000010L
+#define PCIEP_HW_DEBUG__HW_04_DEBUG__SHIFT 0x00000004
+#define PCIEP_HW_DEBUG__HW_05_DEBUG_MASK 0x00000020L
+#define PCIEP_HW_DEBUG__HW_05_DEBUG__SHIFT 0x00000005
+#define PCIEP_HW_DEBUG__HW_06_DEBUG_MASK 0x00000040L
+#define PCIEP_HW_DEBUG__HW_06_DEBUG__SHIFT 0x00000006
+#define PCIEP_HW_DEBUG__HW_07_DEBUG_MASK 0x00000080L
+#define PCIEP_HW_DEBUG__HW_07_DEBUG__SHIFT 0x00000007
+#define PCIEP_HW_DEBUG__HW_08_DEBUG_MASK 0x00000100L
+#define PCIEP_HW_DEBUG__HW_08_DEBUG__SHIFT 0x00000008
+#define PCIEP_HW_DEBUG__HW_09_DEBUG_MASK 0x00000200L
+#define PCIEP_HW_DEBUG__HW_09_DEBUG__SHIFT 0x00000009
+#define PCIEP_HW_DEBUG__HW_10_DEBUG_MASK 0x00000400L
+#define PCIEP_HW_DEBUG__HW_10_DEBUG__SHIFT 0x0000000a
+#define PCIEP_HW_DEBUG__HW_11_DEBUG_MASK 0x00000800L
+#define PCIEP_HW_DEBUG__HW_11_DEBUG__SHIFT 0x0000000b
+#define PCIEP_HW_DEBUG__HW_12_DEBUG_MASK 0x00001000L
+#define PCIEP_HW_DEBUG__HW_12_DEBUG__SHIFT 0x0000000c
+#define PCIEP_HW_DEBUG__HW_13_DEBUG_MASK 0x00002000L
+#define PCIEP_HW_DEBUG__HW_13_DEBUG__SHIFT 0x0000000d
+#define PCIEP_HW_DEBUG__HW_14_DEBUG_MASK 0x00004000L
+#define PCIEP_HW_DEBUG__HW_14_DEBUG__SHIFT 0x0000000e
+#define PCIEP_HW_DEBUG__HW_15_DEBUG_MASK 0x00008000L
+#define PCIEP_HW_DEBUG__HW_15_DEBUG__SHIFT 0x0000000f
+#define PCIE_P_MISC_STATUS__P_DESKEW_ERR_MASK 0x000000ffL
+#define PCIE_P_MISC_STATUS__P_DESKEW_ERR__SHIFT 0x00000000
+#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR_MASK 0xffff0000L
+#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR__SHIFT 0x00000010
+#define PCIE_PORT_DATA__PCIE_DATA_MASK 0xffffffffL
+#define PCIE_PORT_DATA__PCIE_DATA__SHIFT 0x00000000
+#define PCIE_PORT_INDEX__PCIE_INDEX_MASK 0x000000ffL
+#define PCIE_PORT_INDEX__PCIE_INDEX__SHIFT 0x00000000
+#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S_MASK 0x00007f00L
+#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S__SHIFT 0x00000008
+#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE_MASK 0x00000002L
+#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE__SHIFT 0x00000001
+#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN_MASK 0x00000004L
+#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN__SHIFT 0x00000002
+#define PCIEP_PORT_CNTL__NATIVE_PME_EN_MASK 0x00000008L
+#define PCIEP_PORT_CNTL__NATIVE_PME_EN__SHIFT 0x00000003
+#define PCIEP_PORT_CNTL__PMI_BM_DIS_MASK 0x00000020L
+#define PCIEP_PORT_CNTL__PMI_BM_DIS__SHIFT 0x00000005
+#define PCIEP_PORT_CNTL__PWR_FAULT_EN_MASK 0x00000010L
+#define PCIEP_PORT_CNTL__PWR_FAULT_EN__SHIFT 0x00000004
+#define PCIEP_PORT_CNTL__SEQNUM_DEBUG_MODE_MASK 0x00000040L
+#define PCIEP_PORT_CNTL__SEQNUM_DEBUG_MODE__SHIFT 0x00000006
+#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN_MASK 0x00000001L
+#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN__SHIFT 0x00000000
+#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH_MASK 0x0000007eL
+#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH__SHIFT 0x00000001
+#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL_MASK 0x00000001L
+#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL__SHIFT 0x00000000
+#define PCIE_PRBS_CLR__PRBS_CHECKER_DEBUG_BUS_SELECT_MASK 0x000f0000L
+#define PCIE_PRBS_CLR__PRBS_CHECKER_DEBUG_BUS_SELECT__SHIFT 0x00000010
+#define PCIE_PRBS_CLR__PRBS_CLR_MASK 0x0000ffffL
+#define PCIE_PRBS_CLR__PRBS_CLR__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8__SHIFT 0x00000000
+#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9_MASK 0xffffffffL
+#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9__SHIFT 0x00000000
+#define PCIE_PRBS_FREERUN__PRBS_FREERUN_MASK 0x0000ffffL
+#define PCIE_PRBS_FREERUN__PRBS_FREERUN__SHIFT 0x00000000
+#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT_MASK 0x000000ffL
+#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT__SHIFT 0x00000000
+#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT_MASK 0xffffffffL
+#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT__SHIFT 0x00000000
+#define PCIE_PRBS_MISC__PRBS_8BIT_SEL_MASK 0x00000010L
+#define PCIE_PRBS_MISC__PRBS_8BIT_SEL__SHIFT 0x00000004
+#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK_MASK 0xffff0000L
+#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK__SHIFT 0x00000010
+#define PCIE_PRBS_MISC__PRBS_COMMA_NUM_MASK 0x00000060L
+#define PCIE_PRBS_MISC__PRBS_COMMA_NUM__SHIFT 0x00000005
+#define PCIE_PRBS_MISC__PRBS_DATA_RATE_MASK 0x0000c000L
+#define PCIE_PRBS_MISC__PRBS_DATA_RATE__SHIFT 0x0000000e
+#define PCIE_PRBS_MISC__PRBS_EN_MASK 0x00000001L
+#define PCIE_PRBS_MISC__PRBS_EN__SHIFT 0x00000000
+#define PCIE_PRBS_MISC__PRBS_LOCK_CNT_MASK 0x00000f80L
+#define PCIE_PRBS_MISC__PRBS_LOCK_CNT__SHIFT 0x00000007
+#define PCIE_PRBS_MISC__PRBS_TEST_MODE_MASK 0x00000006L
+#define PCIE_PRBS_MISC__PRBS_TEST_MODE__SHIFT 0x00000001
+#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE_MASK 0x00000008L
+#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE__SHIFT 0x00000003
+#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT_MASK 0x0000ffffL
+#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT__SHIFT 0x00000000
+#define PCIE_PRBS_STATUS1__PRBS_LOCKED_MASK 0xffff0000L
+#define PCIE_PRBS_STATUS1__PRBS_LOCKED__SHIFT 0x00000010
+#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE_MASK 0x0000ffffL
+#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE__SHIFT 0x00000000
+#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN_MASK 0x3fffffffL
+#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN__SHIFT 0x00000000
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX_MASK 0x0000ff00L
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX__SHIFT 0x00000008
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN_MASK 0x000000ffL
+#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN__SHIFT 0x00000000
+#define PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xffffffffL
+#define PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x00000000
+#define PCIEP_SCRATCH__PCIEP_SCRATCH_MASK 0xffffffffL
+#define PCIEP_SCRATCH__PCIEP_SCRATCH__SHIFT 0x00000000
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_DIS_MASK 0x00008000L
+#define PCIEP_STRAP_LC__STRAP_AUTO_RC_SPEED_NEGOTIATION_DIS__SHIFT 0x0000000f
+#define PCIEP_STRAP_LC__STRAP_BYPASS_RCVR_DET_MASK 0x00000800L
+#define PCIEP_STRAP_LC__STRAP_BYPASS_RCVR_DET__SHIFT 0x0000000b
+#define PCIEP_STRAP_LC__STRAP_COMPLIANCE_DIS_MASK 0x00001000L
+#define PCIEP_STRAP_LC__STRAP_COMPLIANCE_DIS__SHIFT 0x0000000c
+#define PCIEP_STRAP_LC__STRAP_FORCE_COMPLIANCE_MASK 0x00002000L
+#define PCIEP_STRAP_LC__STRAP_FORCE_COMPLIANCE__SHIFT 0x0000000d
+#define PCIEP_STRAP_LC__STRAP_FTS_yTSx_COUNT_MASK 0x00000003L
+#define PCIEP_STRAP_LC__STRAP_FTS_yTSx_COUNT__SHIFT 0x00000000
+#define PCIEP_STRAP_LC__STRAP_LANE_NEGOTIATION_MASK 0x00070000L
+#define PCIEP_STRAP_LC__STRAP_LANE_NEGOTIATION__SHIFT 0x00000010
+#define PCIEP_STRAP_LC__STRAP_LONG_yTSx_COUNT_MASK 0x0000000cL
+#define PCIEP_STRAP_LC__STRAP_LONG_yTSx_COUNT__SHIFT 0x00000002
+#define PCIEP_STRAP_LC__STRAP_MED_yTSx_COUNT_MASK 0x00000030L
+#define PCIEP_STRAP_LC__STRAP_MED_yTSx_COUNT__SHIFT 0x00000004
+#define PCIEP_STRAP_LC__STRAP_REVERSE_LC_LANES_MASK 0x00004000L
+#define PCIEP_STRAP_LC__STRAP_REVERSE_LC_LANES__SHIFT 0x0000000e
+#define PCIEP_STRAP_LC__STRAP_SHORT_yTSx_COUNT_MASK 0x000000c0L
+#define PCIEP_STRAP_LC__STRAP_SHORT_yTSx_COUNT__SHIFT 0x00000006
+#define PCIEP_STRAP_LC__STRAP_SKIP_INTERVAL_MASK 0x00000700L
+#define PCIEP_STRAP_LC__STRAP_SKIP_INTERVAL__SHIFT 0x00000008
+#define PCIEP_STRAP_MISC__STRAP_E2E_PREFIX_EN_MASK 0x00000002L
+#define PCIEP_STRAP_MISC__STRAP_E2E_PREFIX_EN__SHIFT 0x00000001
+#define PCIEP_STRAP_MISC__STRAP_EXTENDED_FMT_SUPPORTED_MASK 0x00000004L
+#define PCIEP_STRAP_MISC__STRAP_EXTENDED_FMT_SUPPORTED__SHIFT 0x00000002
+#define PCIEP_STRAP_MISC__STRAP_OBFF_SUPPORTED_MASK 0x00000018L
+#define PCIEP_STRAP_MISC__STRAP_OBFF_SUPPORTED__SHIFT 0x00000003
+#define PCIEP_STRAP_MISC__STRAP_REVERSE_LANES_MASK 0x00000001L
+#define PCIEP_STRAP_MISC__STRAP_REVERSE_LANES__SHIFT 0x00000000
+#define PCIE_RESERVED__PCIE_RESERVED_MASK 0xffffffffL
+#define PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x00000000
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR_MASK 0x00000008L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR__SHIFT 0x00000003
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x00000000
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR_MASK 0x00000020L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR__SHIFT 0x00000005
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR_MASK 0x00000010L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR__SHIFT 0x00000004
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR_MASK 0x00000002L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR__SHIFT 0x00000001
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR_MASK 0x00000004L
+#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR__SHIFT 0x00000002
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR_MASK 0x00000010L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR__SHIFT 0x00000004
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR_MASK 0x00000008L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR__SHIFT 0x00000003
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR_MASK 0x00000004L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR__SHIFT 0x00000002
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR_MASK 0x00000001L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR__SHIFT 0x00000000
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR_MASK 0x00000002L
+#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR__SHIFT 0x00000001
+#define PCIE_RX_CNTL__RX_FC_INIT_FROM_REG_MASK 0x00008000L
+#define PCIE_RX_CNTL__RX_FC_INIT_FROM_REG__SHIFT 0x0000000f
+#define PCIE_RX_CNTL__RX_GEN_ONE_NAK_MASK 0x00004000L
+#define PCIE_RX_CNTL__RX_GEN_ONE_NAK__SHIFT 0x0000000e
+#define PCIE_RX_CNTL__RX_IGNORE_AT_ERR_MASK 0x00001000L
+#define PCIE_RX_CNTL__RX_IGNORE_AT_ERR__SHIFT 0x0000000c
+#define PCIE_RX_CNTL__RX_IGNORE_BE_ERR_MASK 0x00000002L
+#define PCIE_RX_CNTL__RX_IGNORE_BE_ERR__SHIFT 0x00000001
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_ERR_MASK 0x00000010L
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_ERR__SHIFT 0x00000004
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_UR_MASK 0x00000400L
+#define PCIE_RX_CNTL__RX_IGNORE_CFG_UR__SHIFT 0x0000000a
+#define PCIE_RX_CNTL__RX_IGNORE_CPL_ERR_MASK 0x00000020L
+#define PCIE_RX_CNTL__RX_IGNORE_CPL_ERR__SHIFT 0x00000005
+#define PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR_MASK 0x00800000L
+#define PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR__SHIFT 0x00000017
+#define PCIE_RX_CNTL__RX_IGNORE_CRC_ERR_MASK 0x00000008L
+#define PCIE_RX_CNTL__RX_IGNORE_CRC_ERR__SHIFT 0x00000003
+#define PCIE_RX_CNTL__RX_IGNORE_EP_ERR_MASK 0x00000040L
+#define PCIE_RX_CNTL__RX_IGNORE_EP_ERR__SHIFT 0x00000006
+#define PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L
+#define PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x00000018
+#define PCIE_RX_CNTL__RX_IGNORE_IO_ERR_MASK 0x00000001L
+#define PCIE_RX_CNTL__RX_IGNORE_IO_ERR__SHIFT 0x00000000
+#define PCIE_RX_CNTL__RX_IGNORE_IO_UR_MASK 0x00000800L
+#define PCIE_RX_CNTL__RX_IGNORE_IO_UR__SHIFT 0x0000000b
+#define PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR_MASK 0x00000080L
+#define PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR__SHIFT 0x00000007
+#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L
+#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x00000008
+#define PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L
+#define PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x00000016
+#define PCIE_RX_CNTL__RX_IGNORE_MSG_ERR_MASK 0x00000004L
+#define PCIE_RX_CNTL__RX_IGNORE_MSG_ERR__SHIFT 0x00000002
+#define PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L
+#define PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x00000019
+#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L
+#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x00000015
+#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L
+#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x00000009
+#define PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL_MASK 0x00002000L
+#define PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL__SHIFT 0x0000000d
+#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L
+#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x00000014
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MASK 0x00070000L
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE_MASK 0x00080000L
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE__SHIFT 0x00000013
+#define PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT__SHIFT 0x00000010
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD_MASK 0x00000fffL
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD__SHIFT 0x00000000
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH_MASK 0x00ff0000L
+#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH__SHIFT 0x00000010
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD_MASK 0x00000fffL
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD__SHIFT 0x00000000
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH_MASK 0x00ff0000L
+#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH__SHIFT 0x00000010
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD_MASK 0x00000fffL
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD__SHIFT 0x00000000
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH_MASK 0x00ff0000L
+#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH__SHIFT 0x00000010
+#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM_MASK 0x00000fffL
+#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM__SHIFT 0x00000000
+#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0_MASK 0xffffffffL
+#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0__SHIFT 0x00000000
+#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1_MASK 0xffffffffL
+#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1__SHIFT 0x00000000
+#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2_MASK 0xffffffffL
+#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2__SHIFT 0x00000000
+#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3_MASK 0xffffffffL
+#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3__SHIFT 0x00000000
+#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED_MASK 0xffffffffL
+#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED__SHIFT 0x00000000
+#define PCIE_RX_NUM_NAK__RX_NUM_NAK_MASK 0xffffffffL
+#define PCIE_RX_NUM_NAK__RX_NUM_NAK__SHIFT 0x00000000
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA_MASK 0x00ffffffL
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA__SHIFT 0x00000000
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS_MASK 0x01000000L
+#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS__SHIFT 0x00000018
+#define PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xffffffffL
+#define PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x00000000
+#define PCIE_STRAP_F0__STRAP_F0_ACS_EN_MASK 0x00000040L
+#define PCIE_STRAP_F0__STRAP_F0_ACS_EN__SHIFT 0x00000006
+#define PCIE_STRAP_F0__STRAP_F0_AER_EN_MASK 0x00000020L
+#define PCIE_STRAP_F0__STRAP_F0_AER_EN__SHIFT 0x00000005
+#define PCIE_STRAP_F0__STRAP_F0_ATS_EN_MASK 0x00000400L
+#define PCIE_STRAP_F0__STRAP_F0_ATS_EN__SHIFT 0x0000000a
+#define PCIE_STRAP_F0__STRAP_F0_BAR_EN_MASK 0x00000080L
+#define PCIE_STRAP_F0__STRAP_F0_BAR_EN__SHIFT 0x00000007
+#define PCIE_STRAP_F0__STRAP_F0_DPA_EN_MASK 0x00000200L
+#define PCIE_STRAP_F0__STRAP_F0_DPA_EN__SHIFT 0x00000009
+#define PCIE_STRAP_F0__STRAP_F0_DSN_EN_MASK 0x00000010L
+#define PCIE_STRAP_F0__STRAP_F0_DSN_EN__SHIFT 0x00000004
+#define PCIE_STRAP_F0__STRAP_F0_EN_MASK 0x00000001L
+#define PCIE_STRAP_F0__STRAP_F0_EN__SHIFT 0x00000000
+#define PCIE_STRAP_F0__STRAP_F0_LEGACY_DEVICE_TYPE_EN_MASK 0x00000002L
+#define PCIE_STRAP_F0__STRAP_F0_LEGACY_DEVICE_TYPE_EN__SHIFT 0x00000001
+#define PCIE_STRAP_F0__STRAP_F0_MSI_EN_MASK 0x00000004L
+#define PCIE_STRAP_F0__STRAP_F0_MSI_EN__SHIFT 0x00000002
+#define PCIE_STRAP_F0__STRAP_F0_PAGE_REQ_EN_MASK 0x00000800L
+#define PCIE_STRAP_F0__STRAP_F0_PAGE_REQ_EN__SHIFT 0x0000000b
+#define PCIE_STRAP_F0__STRAP_F0_PASID_EN_MASK 0x00001000L
+#define PCIE_STRAP_F0__STRAP_F0_PASID_EN__SHIFT 0x0000000c
+#define PCIE_STRAP_F0__STRAP_F0_PWR_EN_MASK 0x00000100L
+#define PCIE_STRAP_F0__STRAP_F0_PWR_EN__SHIFT 0x00000008
+#define PCIE_STRAP_F0__STRAP_F0_VC_EN_MASK 0x00000008L
+#define PCIE_STRAP_F0__STRAP_F0_VC_EN__SHIFT 0x00000003
+#define PCIE_STRAP_F1__STRAP_F1_ACS_EN_MASK 0x00000040L
+#define PCIE_STRAP_F1__STRAP_F1_ACS_EN__SHIFT 0x00000006
+#define PCIE_STRAP_F1__STRAP_F1_AER_EN_MASK 0x00000020L
+#define PCIE_STRAP_F1__STRAP_F1_AER_EN__SHIFT 0x00000005
+#define PCIE_STRAP_F1__STRAP_F1_ATS_EN_MASK 0x00000400L
+#define PCIE_STRAP_F1__STRAP_F1_ATS_EN__SHIFT 0x0000000a
+#define PCIE_STRAP_F1__STRAP_F1_BAR_EN_MASK 0x00000080L
+#define PCIE_STRAP_F1__STRAP_F1_BAR_EN__SHIFT 0x00000007
+#define PCIE_STRAP_F1__STRAP_F1_DPA_EN_MASK 0x00000200L
+#define PCIE_STRAP_F1__STRAP_F1_DPA_EN__SHIFT 0x00000009
+#define PCIE_STRAP_F1__STRAP_F1_DSN_EN_MASK 0x00000010L
+#define PCIE_STRAP_F1__STRAP_F1_DSN_EN__SHIFT 0x00000004
+#define PCIE_STRAP_F1__STRAP_F1_EN_MASK 0x00000001L
+#define PCIE_STRAP_F1__STRAP_F1_EN__SHIFT 0x00000000
+#define PCIE_STRAP_F1__STRAP_F1_LEGACY_DEVICE_TYPE_EN_MASK 0x00000002L
+#define PCIE_STRAP_F1__STRAP_F1_LEGACY_DEVICE_TYPE_EN__SHIFT 0x00000001
+#define PCIE_STRAP_F1__STRAP_F1_MSI_EN_MASK 0x00000004L
+#define PCIE_STRAP_F1__STRAP_F1_MSI_EN__SHIFT 0x00000002
+#define PCIE_STRAP_F1__STRAP_F1_PAGE_REQ_EN_MASK 0x00000800L
+#define PCIE_STRAP_F1__STRAP_F1_PAGE_REQ_EN__SHIFT 0x0000000b
+#define PCIE_STRAP_F1__STRAP_F1_PASID_EN_MASK 0x00001000L
+#define PCIE_STRAP_F1__STRAP_F1_PASID_EN__SHIFT 0x0000000c
+#define PCIE_STRAP_F1__STRAP_F1_PWR_EN_MASK 0x00000100L
+#define PCIE_STRAP_F1__STRAP_F1_PWR_EN__SHIFT 0x00000008
+#define PCIE_STRAP_F1__STRAP_F1_VC_EN_MASK 0x00000008L
+#define PCIE_STRAP_F1__STRAP_F1_VC_EN__SHIFT 0x00000003
+#define PCIE_STRAP_F2__STRAP_F2_ACS_EN_MASK 0x00000040L
+#define PCIE_STRAP_F2__STRAP_F2_ACS_EN__SHIFT 0x00000006
+#define PCIE_STRAP_F2__STRAP_F2_AER_EN_MASK 0x00000020L
+#define PCIE_STRAP_F2__STRAP_F2_AER_EN__SHIFT 0x00000005
+#define PCIE_STRAP_F2__STRAP_F2_ATS_EN_MASK 0x00000400L
+#define PCIE_STRAP_F2__STRAP_F2_ATS_EN__SHIFT 0x0000000a
+#define PCIE_STRAP_F2__STRAP_F2_BAR_EN_MASK 0x00000080L
+#define PCIE_STRAP_F2__STRAP_F2_BAR_EN__SHIFT 0x00000007
+#define PCIE_STRAP_F2__STRAP_F2_DPA_EN_MASK 0x00000200L
+#define PCIE_STRAP_F2__STRAP_F2_DPA_EN__SHIFT 0x00000009
+#define PCIE_STRAP_F2__STRAP_F2_DSN_EN_MASK 0x00000010L
+#define PCIE_STRAP_F2__STRAP_F2_DSN_EN__SHIFT 0x00000004
+#define PCIE_STRAP_F2__STRAP_F2_EN_MASK 0x00000001L
+#define PCIE_STRAP_F2__STRAP_F2_EN__SHIFT 0x00000000
+#define PCIE_STRAP_F2__STRAP_F2_LEGACY_DEVICE_TYPE_EN_MASK 0x00000002L
+#define PCIE_STRAP_F2__STRAP_F2_LEGACY_DEVICE_TYPE_EN__SHIFT 0x00000001
+#define PCIE_STRAP_F2__STRAP_F2_MSI_EN_MASK 0x00000004L
+#define PCIE_STRAP_F2__STRAP_F2_MSI_EN__SHIFT 0x00000002
+#define PCIE_STRAP_F2__STRAP_F2_PAGE_REQ_EN_MASK 0x00000800L
+#define PCIE_STRAP_F2__STRAP_F2_PAGE_REQ_EN__SHIFT 0x0000000b
+#define PCIE_STRAP_F2__STRAP_F2_PASID_EN_MASK 0x00001000L
+#define PCIE_STRAP_F2__STRAP_F2_PASID_EN__SHIFT 0x0000000c
+#define PCIE_STRAP_F2__STRAP_F2_PWR_EN_MASK 0x00000100L
+#define PCIE_STRAP_F2__STRAP_F2_PWR_EN__SHIFT 0x00000008
+#define PCIE_STRAP_F2__STRAP_F2_VC_EN_MASK 0x00000008L
+#define PCIE_STRAP_F2__STRAP_F2_VC_EN__SHIFT 0x00000003
+#define PCIE_STRAP_F3__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F3__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_F4__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F4__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_F5__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F5__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_F6__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F6__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_F7__RESERVED_MASK 0xffffffffL
+#define PCIE_STRAP_F7__RESERVED__SHIFT 0x00000000
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_DBG_I2C_EN_MASK 0x00000080L
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_DBG_I2C_EN__SHIFT 0x00000007
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_I2C_SLV_ADR_MASK 0x0000007fL
+#define PCIE_STRAP_I2C_BD__STRAP_BIF_I2C_SLV_ADR__SHIFT 0x00000000
+#define PCIE_STRAP_MISC2__STRAP_GEN2_COMPLIANCE_MASK 0x00000002L
+#define PCIE_STRAP_MISC2__STRAP_GEN2_COMPLIANCE__SHIFT 0x00000001
+#define PCIE_STRAP_MISC2__STRAP_GEN3_COMPLIANCE_MASK 0x00000008L
+#define PCIE_STRAP_MISC2__STRAP_GEN3_COMPLIANCE__SHIFT 0x00000003
+#define PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN_MASK 0x00000004L
+#define PCIE_STRAP_MISC2__STRAP_MSTCPL_TIMEOUT_EN__SHIFT 0x00000002
+#define PCIE_STRAP_MISC__STRAP_CLK_PM_EN_MASK 0x01000000L
+#define PCIE_STRAP_MISC__STRAP_CLK_PM_EN__SHIFT 0x00000018
+#define PCIE_STRAP_MISC__STRAP_ECN1P1_EN_MASK 0x02000000L
+#define PCIE_STRAP_MISC__STRAP_ECN1P1_EN__SHIFT 0x00000019
+#define PCIE_STRAP_MISC__STRAP_EXT_VC_COUNT_MASK 0x04000000L
+#define PCIE_STRAP_MISC__STRAP_EXT_VC_COUNT__SHIFT 0x0000001a
+#define PCIE_STRAP_MISC__STRAP_FLR_EN_MASK 0x40000000L
+#define PCIE_STRAP_MISC__STRAP_FLR_EN__SHIFT 0x0000001e
+#define PCIE_STRAP_MISC__STRAP_LINK_CONFIG_MASK 0x0000000fL
+#define PCIE_STRAP_MISC__STRAP_LINK_CONFIG__SHIFT 0x00000000
+#define PCIE_STRAP_MISC__STRAP_MAX_PASID_WIDTH_MASK 0x00001f00L
+#define PCIE_STRAP_MISC__STRAP_MAX_PASID_WIDTH__SHIFT 0x00000008
+#define PCIE_STRAP_MISC__STRAP_MST_ADR64_EN_MASK 0x20000000L
+#define PCIE_STRAP_MISC__STRAP_MST_ADR64_EN__SHIFT 0x0000001d
+#define PCIE_STRAP_MISC__STRAP_PASID_EXE_PERMISSION_SUPPORTED_MASK 0x00002000L
+#define PCIE_STRAP_MISC__STRAP_PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x0000000d
+#define PCIE_STRAP_MISC__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED_MASK 0x00008000L
+#define PCIE_STRAP_MISC__STRAP_PASID_GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x0000000f
+#define PCIE_STRAP_MISC__STRAP_PASID_PRIV_MODE_SUPPORTED_MASK 0x00004000L
+#define PCIE_STRAP_MISC__STRAP_PASID_PRIV_MODE_SUPPORTED__SHIFT 0x0000000e
+#define PCIE_STRAP_MISC__STRAP_REVERSE_ALL_MASK 0x10000000L
+#define PCIE_STRAP_MISC__STRAP_REVERSE_ALL__SHIFT 0x0000001c
+#define PCIE_STRAP_PI__STRAP_QUICKSIM_START_MASK 0x00000001L
+#define PCIE_STRAP_PI__STRAP_QUICKSIM_START__SHIFT 0x00000000
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_MODE_MASK 0x20000000L
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_MODE__SHIFT 0x0000001d
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_PATTERN_MASK 0x10000000L
+#define PCIE_STRAP_PI__STRAP_TEST_TOGGLE_PATTERN__SHIFT 0x0000001c
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_MASK 0x00000fffL
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE_MASK 0x00001000L
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE__SHIFT 0x0000000c
+#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT__SHIFT 0x00000000
+#define PCIE_TX_CNTL__TX_CLEAR_EXTRA_PM_REQS_MASK 0x00400000L
+#define PCIE_TX_CNTL__TX_CLEAR_EXTRA_PM_REQS__SHIFT 0x00000016
+#define PCIE_TX_CNTL__TX_CPL_PASS_P_MASK 0x00100000L
+#define PCIE_TX_CNTL__TX_CPL_PASS_P__SHIFT 0x00000014
+#define PCIE_TX_CNTL__TX_FC_UPDATE_TIMEOUT_DIS_MASK 0x00800000L
+#define PCIE_TX_CNTL__TX_FC_UPDATE_TIMEOUT_DIS__SHIFT 0x00000017
+#define PCIE_TX_CNTL__TX_FLUSH_TLP_DIS_MASK 0x00008000L
+#define PCIE_TX_CNTL__TX_FLUSH_TLP_DIS__SHIFT 0x0000000f
+#define PCIE_TX_CNTL__TX_NP_PASS_P_MASK 0x00200000L
+#define PCIE_TX_CNTL__TX_NP_PASS_P__SHIFT 0x00000015
+#define PCIE_TX_CNTL__TX_PACK_PACKET_DIS_MASK 0x00004000L
+#define PCIE_TX_CNTL__TX_PACK_PACKET_DIS__SHIFT 0x0000000e
+#define PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L
+#define PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0x0000000c
+#define PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000c00L
+#define PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0x0000000a
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0_MASK 0x00000700L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0__SHIFT 0x00000008
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1_MASK 0x07000000L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1__SHIFT 0x00000018
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0_MASK 0x00000070L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0__SHIFT 0x00000004
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1_MASK 0x00700000L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1__SHIFT 0x00000014
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0_MASK 0x00000007L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1_MASK 0x00070000L
+#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD_MASK 0x00000fffL
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH_MASK 0x00ff0000L
+#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD_MASK 0x00100000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD__SHIFT 0x00000014
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH_MASK 0x00200000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH__SHIFT 0x00000015
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD_MASK 0x00040000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD__SHIFT 0x00000012
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH_MASK 0x00080000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH__SHIFT 0x00000013
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD_MASK 0x00010000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD__SHIFT 0x00000010
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH_MASK 0x00020000L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH__SHIFT 0x00000011
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD_MASK 0x00000010L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD__SHIFT 0x00000004
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH_MASK 0x00000020L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH__SHIFT 0x00000005
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD_MASK 0x00000004L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD__SHIFT 0x00000002
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH_MASK 0x00000008L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH__SHIFT 0x00000003
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD_MASK 0x00000001L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD__SHIFT 0x00000000
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH_MASK 0x00000002L
+#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH__SHIFT 0x00000001
+#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0_MASK 0xffffffffL
+#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0__SHIFT 0x00000000
+#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1_MASK 0xffffffffL
+#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1__SHIFT 0x00000000
+#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2_MASK 0xffffffffL
+#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2__SHIFT 0x00000000
+#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3_MASK 0xffffffffL
+#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3__SHIFT 0x00000000
+#define PCIE_TX_REPLAY__TX_REPLAY_NUM_MASK 0x00000007L
+#define PCIE_TX_REPLAY__TX_REPLAY_NUM__SHIFT 0x00000000
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_MASK 0xffff0000L
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE_MASK 0x00008000L
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE__SHIFT 0x0000000f
+#define PCIE_TX_REPLAY__TX_REPLAY_TIMER__SHIFT 0x00000010
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000ff00L
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x00000008
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000f8L
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x00000003
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L
+#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x00000000
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN_MASK 0x80000000L
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN__SHIFT 0x0000001f
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_MASK 0x3f000000L
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP__SHIFT 0x00000018
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN_MASK 0x40000000L
+#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN__SHIFT 0x0000001e
+#define PCIE_TX_SEQ__TX_ACKD_SEQ_MASK 0x0fff0000L
+#define PCIE_TX_SEQ__TX_ACKD_SEQ__SHIFT 0x00000010
+#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ_MASK 0x00000fffL
+#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ__SHIFT 0x00000000
+#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA_MASK 0x00ffffffL
+#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA__SHIFT 0x00000000
+#define PCIE_WPR_CNTL__WPR_RESET_COR_EN_MASK 0x00000008L
+#define PCIE_WPR_CNTL__WPR_RESET_COR_EN__SHIFT 0x00000003
+#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN_MASK 0x00000001L
+#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN__SHIFT 0x00000000
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN_MASK 0x00000004L
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN__SHIFT 0x00000002
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN_MASK 0x00000002L
+#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN__SHIFT 0x00000001
+#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN_MASK 0x00000040L
+#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN__SHIFT 0x00000006
+#define PCIE_WPR_CNTL__WPR_RESET_REG_EN_MASK 0x00000010L
+#define PCIE_WPR_CNTL__WPR_RESET_REG_EN__SHIFT 0x00000004
+#define PCIE_WPR_CNTL__WPR_RESET_STY_EN_MASK 0x00000020L
+#define PCIE_WPR_CNTL__WPR_RESET_STY_EN__SHIFT 0x00000005
+#define PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI_MASK 0x000fffffL
+#define PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI__SHIFT 0x00000000
+#define PEER0_FB_OFFSET_LO__PEER0_FB_EN_MASK 0x80000000L
+#define PEER0_FB_OFFSET_LO__PEER0_FB_EN__SHIFT 0x0000001f
+#define PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO_MASK 0x000fffffL
+#define PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO__SHIFT 0x00000000
+#define PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI_MASK 0x000fffffL
+#define PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI__SHIFT 0x00000000
+#define PEER1_FB_OFFSET_LO__PEER1_FB_EN_MASK 0x80000000L
+#define PEER1_FB_OFFSET_LO__PEER1_FB_EN__SHIFT 0x0000001f
+#define PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO_MASK 0x000fffffL
+#define PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO__SHIFT 0x00000000
+#define PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI_MASK 0x000fffffL
+#define PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI__SHIFT 0x00000000
+#define PEER2_FB_OFFSET_LO__PEER2_FB_EN_MASK 0x80000000L
+#define PEER2_FB_OFFSET_LO__PEER2_FB_EN__SHIFT 0x0000001f
+#define PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO_MASK 0x000fffffL
+#define PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO__SHIFT 0x00000000
+#define PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI_MASK 0x000fffffL
+#define PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI__SHIFT 0x00000000
+#define PEER3_FB_OFFSET_LO__PEER3_FB_EN_MASK 0x80000000L
+#define PEER3_FB_OFFSET_LO__PEER3_FB_EN__SHIFT 0x0000001f
+#define PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO_MASK 0x000fffffL
+#define PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO__SHIFT 0x00000000
+#define PEER_REG_RANGE0__END_ADDR_MASK 0xffff0000L
+#define PEER_REG_RANGE0__END_ADDR__SHIFT 0x00000010
+#define PEER_REG_RANGE0__START_ADDR_MASK 0x0000ffffL
+#define PEER_REG_RANGE0__START_ADDR__SHIFT 0x00000000
+#define PEER_REG_RANGE1__END_ADDR_MASK 0xffff0000L
+#define PEER_REG_RANGE1__END_ADDR__SHIFT 0x00000010
+#define PEER_REG_RANGE1__START_ADDR_MASK 0x0000ffffL
+#define PEER_REG_RANGE1__START_ADDR__SHIFT 0x00000000
+#define SLAVE_HANG_ERROR__AUDIO_HANG_ERROR_MASK 0x00000010L
+#define SLAVE_HANG_ERROR__AUDIO_HANG_ERROR__SHIFT 0x00000004
+#define SLAVE_HANG_ERROR__CEC_HANG_ERROR_MASK 0x00000020L
+#define SLAVE_HANG_ERROR__CEC_HANG_ERROR__SHIFT 0x00000005
+#define SLAVE_HANG_ERROR__HDP_HANG_ERROR_MASK 0x00000002L
+#define SLAVE_HANG_ERROR__HDP_HANG_ERROR__SHIFT 0x00000001
+#define SLAVE_HANG_ERROR__ROM_HANG_ERROR_MASK 0x00000008L
+#define SLAVE_HANG_ERROR__ROM_HANG_ERROR__SHIFT 0x00000003
+#define SLAVE_HANG_ERROR__SRBM_HANG_ERROR_MASK 0x00000001L
+#define SLAVE_HANG_ERROR__SRBM_HANG_ERROR__SHIFT 0x00000000
+#define SLAVE_HANG_ERROR__VGA_HANG_ERROR_MASK 0x00000004L
+#define SLAVE_HANG_ERROR__VGA_HANG_ERROR__SHIFT 0x00000002
+#define SLAVE_HANG_PROTECTION_CNTL__HANG_PROTECTION_TIMER_SEL_MASK 0x0000000eL
+#define SLAVE_HANG_PROTECTION_CNTL__HANG_PROTECTION_TIMER_SEL__SHIFT 0x00000001
+#define SLAVE_REQ_CREDIT_CNTL__BIF_AZ_REQ_CREDIT_MASK 0x00100000L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_AZ_REQ_CREDIT__SHIFT 0x00000014
+#define SLAVE_REQ_CREDIT_CNTL__BIF_HDP_REQ_CREDIT_MASK 0x00007c00L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_HDP_REQ_CREDIT__SHIFT 0x0000000a
+#define SLAVE_REQ_CREDIT_CNTL__BIF_ROM_REQ_CREDIT_MASK 0x00008000L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_ROM_REQ_CREDIT__SHIFT 0x0000000f
+#define SLAVE_REQ_CREDIT_CNTL__BIF_SRBM_REQ_CREDIT_MASK 0x0000001fL
+#define SLAVE_REQ_CREDIT_CNTL__BIF_SRBM_REQ_CREDIT__SHIFT 0x00000000
+#define SLAVE_REQ_CREDIT_CNTL__BIF_VGA_REQ_CREDIT_MASK 0x000001e0L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_VGA_REQ_CREDIT__SHIFT 0x00000005
+#define SLAVE_REQ_CREDIT_CNTL__BIF_XDMA_REQ_CREDIT_MASK 0x7e000000L
+#define SLAVE_REQ_CREDIT_CNTL__BIF_XDMA_REQ_CREDIT__SHIFT 0x00000019
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_A_MASK 0x00000001L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_A__SHIFT 0x00000000
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_CNTL_EN_MASK 0x00001000L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_CNTL_EN__SHIFT 0x0000000c
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_MODE_MASK 0x00000004L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_MODE__SHIFT 0x00000002
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SCHMEN_MASK 0x00000800L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SCHMEN__SHIFT 0x0000000b
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SEL_MASK 0x00000002L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SEL__SHIFT 0x00000001
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SLEWN_MASK 0x00000200L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SLEWN__SHIFT 0x00000009
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN0_MASK 0x00000020L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN0__SHIFT 0x00000005
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN1_MASK 0x00000040L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN1__SHIFT 0x00000006
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN2_MASK 0x00000080L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN2__SHIFT 0x00000007
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN3_MASK 0x00000100L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SN3__SHIFT 0x00000008
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SPARE_MASK 0x00000018L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_SPARE__SHIFT 0x00000003
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_WAKE_MASK 0x00000400L
+#define SMBCLK_PAD_CNTL__SMBCLK_PAD_WAKE__SHIFT 0x0000000a
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_A_MASK 0x00000001L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_A__SHIFT 0x00000000
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_CNTL_EN_MASK 0x00001000L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_CNTL_EN__SHIFT 0x0000000c
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_MODE_MASK 0x00000004L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_MODE__SHIFT 0x00000002
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SCHMEN_MASK 0x00000800L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SCHMEN__SHIFT 0x0000000b
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SEL_MASK 0x00000002L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SEL__SHIFT 0x00000001
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SLEWN_MASK 0x00000200L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SLEWN__SHIFT 0x00000009
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN0_MASK 0x00000020L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN0__SHIFT 0x00000005
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN1_MASK 0x00000040L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN1__SHIFT 0x00000006
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN2_MASK 0x00000080L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN2__SHIFT 0x00000007
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN3_MASK 0x00000100L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SN3__SHIFT 0x00000008
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SPARE_MASK 0x00000018L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_SPARE__SHIFT 0x00000003
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_WAKE_MASK 0x00000400L
+#define SMBDAT_PAD_CNTL__SMBDAT_PAD_WAKE__SHIFT 0x0000000a
+#define SMBUS_BACO_DUMMY__SMBUS_BACO_DUMMY_DATA_MASK 0xffffffffL
+#define SMBUS_BACO_DUMMY__SMBUS_BACO_DUMMY_DATA__SHIFT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
index 09a7df17570d..09a7df17570d 100755..100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
index 1ddc4183a1c9..1ddc4183a1c9 100755..100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
new file mode 100644
index 000000000000..ae798f768853
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
@@ -0,0 +1,4457 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_6_0_D_H
+#define DCE_6_0_D_H
+
+#define ixATTR00 0x0000
+#define ixATTR01 0x0001
+#define ixATTR02 0x0002
+#define ixATTR03 0x0003
+#define ixATTR04 0x0004
+#define ixATTR05 0x0005
+#define ixATTR06 0x0006
+#define ixATTR07 0x0007
+#define ixATTR08 0x0008
+#define ixATTR09 0x0009
+#define ixATTR0A 0x000A
+#define ixATTR0B 0x000B
+#define ixATTR0C 0x000C
+#define ixATTR0D 0x000D
+#define ixATTR0E 0x000E
+#define ixATTR0F 0x000F
+#define ixATTR10 0x0010
+#define ixATTR11 0x0011
+#define ixATTR12 0x0012
+#define ixATTR13 0x0013
+#define ixATTR14 0x0014
+#define ixAUDIO_DESCRIPTOR0 0x0001
+#define ixAUDIO_DESCRIPTOR10 0x000B
+#define ixAUDIO_DESCRIPTOR1 0x0002
+#define ixAUDIO_DESCRIPTOR11 0x000C
+#define ixAUDIO_DESCRIPTOR12 0x000D
+#define ixAUDIO_DESCRIPTOR13 0x000E
+#define ixAUDIO_DESCRIPTOR2 0x0003
+#define ixAUDIO_DESCRIPTOR3 0x0004
+#define ixAUDIO_DESCRIPTOR4 0x0005
+#define ixAUDIO_DESCRIPTOR5 0x0006
+#define ixAUDIO_DESCRIPTOR6 0x0007
+#define ixAUDIO_DESCRIPTOR7 0x0008
+#define ixAUDIO_DESCRIPTOR8 0x0009
+#define ixAUDIO_DESCRIPTOR9 0x000A
+#define ixAZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZALIA_F0_CODEC_CONVERTER_PIN_DEBUG 0x0000
+#define ixAZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002A
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002B
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002C
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002D
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002E
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002F
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003A
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003B
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003C
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003D
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003E
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003F
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005A
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005B
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005C
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005D
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005E
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005F
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x2706
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x2200
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x270D
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2 0x270E
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3 0x273E
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x2770
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x2F09
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x2F0B
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x2F0A
+#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL 0x2724
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x1770
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE 0x1705
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET 0x17FF
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x1720
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2 0x1721
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3 0x1722
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4 0x1723
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x1F05
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x1F0F
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x1F0B
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT 0x1F04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x1F0A
+#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO 0x3793
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA 0x3781
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX 0x3780
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION 0x3771
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO 0x3772
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR 0x377C
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC 0x377B
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID 0x0000
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE 0x3777
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x3785
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE 0x3778
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x3786
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE 0x3779
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x3787
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE 0x377A
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x3788
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x3789
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0 0x0003
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1 0x0004
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID 0x0001
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x371C
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x371D
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x371E
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x371F
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY 0x3702
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x3709
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION 0x3770
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN 0x0002
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x3708
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x3707
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x3F09
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES 0x3F0C
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH 0x3F0E
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID 0x0F02
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT 0x0F04
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0F00
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x378A
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x378B
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x378C
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x378D
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x378E
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x378F
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x3790
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x3791
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x3792
+#define ixAZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZALIA_STREAM_DEBUG 0x0005
+#define ixAZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixCRT00 0x0000
+#define ixCRT01 0x0001
+#define ixCRT02 0x0002
+#define ixCRT03 0x0003
+#define ixCRT04 0x0004
+#define ixCRT05 0x0005
+#define ixCRT06 0x0006
+#define ixCRT07 0x0007
+#define ixCRT08 0x0008
+#define ixCRT09 0x0009
+#define ixCRT0A 0x000A
+#define ixCRT0B 0x000B
+#define ixCRT0C 0x000C
+#define ixCRT0D 0x000D
+#define ixCRT0E 0x000E
+#define ixCRT0F 0x000F
+#define ixCRT10 0x0010
+#define ixCRT11 0x0011
+#define ixCRT12 0x0012
+#define ixCRT13 0x0013
+#define ixCRT14 0x0014
+#define ixCRT15 0x0015
+#define ixCRT16 0x0016
+#define ixCRT17 0x0017
+#define ixCRT18 0x0018
+#define ixCRT1E 0x001E
+#define ixCRT1F 0x001F
+#define ixCRT22 0x0022
+#define ixDCIO_DEBUG10 0x0010
+#define ixDCIO_DEBUG1 0x0001
+#define ixDCIO_DEBUG11 0x0011
+#define ixDCIO_DEBUG12 0x0012
+#define ixDCIO_DEBUG13 0x0013
+#define ixDCIO_DEBUG2 0x0002
+#define ixDCIO_DEBUG3 0x0003
+#define ixDCIO_DEBUG4 0x0004
+#define ixDCIO_DEBUG5 0x0005
+#define ixDCIO_DEBUG6 0x0006
+#define ixDCIO_DEBUG7 0x0007
+#define ixDCIO_DEBUG8 0x0008
+#define ixDCIO_DEBUG9 0x0009
+#define ixDCIO_DEBUGA 0x000A
+#define ixDCIO_DEBUGB 0x000B
+#define ixDCIO_DEBUGC 0x000C
+#define ixDCIO_DEBUGD 0x000D
+#define ixDCIO_DEBUGE 0x000E
+#define ixDCIO_DEBUGF 0x000F
+#define ixDCIO_DEBUG_ID 0x0000
+#define ixDMIF_DEBUG02_CORE0 0x0002
+#define ixDMIF_DEBUG02_CORE1 0x000A
+#define ixDP_AUX1_DEBUG_A 0x0010
+#define ixDP_AUX1_DEBUG_B 0x0011
+#define ixDP_AUX1_DEBUG_C 0x0012
+#define ixDP_AUX1_DEBUG_D 0x0013
+#define ixDP_AUX1_DEBUG_E 0x0014
+#define ixDP_AUX1_DEBUG_F 0x0015
+#define ixDP_AUX1_DEBUG_G 0x0016
+#define ixDP_AUX1_DEBUG_H 0x0017
+#define ixDP_AUX1_DEBUG_I 0x0018
+#define ixDP_AUX2_DEBUG_A 0x0020
+#define ixDP_AUX2_DEBUG_B 0x0021
+#define ixDP_AUX2_DEBUG_C 0x0022
+#define ixDP_AUX2_DEBUG_D 0x0023
+#define ixDP_AUX2_DEBUG_E 0x0024
+#define ixDP_AUX2_DEBUG_F 0x0025
+#define ixDP_AUX2_DEBUG_G 0x0026
+#define ixDP_AUX2_DEBUG_H 0x0027
+#define ixDP_AUX2_DEBUG_I 0x0028
+#define ixDP_AUX3_DEBUG_A 0x0030
+#define ixDP_AUX3_DEBUG_B 0x0031
+#define ixDP_AUX3_DEBUG_C 0x0032
+#define ixDP_AUX3_DEBUG_D 0x0033
+#define ixDP_AUX3_DEBUG_E 0x0034
+#define ixDP_AUX3_DEBUG_F 0x0035
+#define ixDP_AUX3_DEBUG_G 0x0036
+#define ixDP_AUX3_DEBUG_H 0x0037
+#define ixDP_AUX3_DEBUG_I 0x0038
+#define ixDP_AUX4_DEBUG_A 0x0040
+#define ixDP_AUX4_DEBUG_B 0x0041
+#define ixDP_AUX4_DEBUG_C 0x0042
+#define ixDP_AUX4_DEBUG_D 0x0043
+#define ixDP_AUX4_DEBUG_E 0x0044
+#define ixDP_AUX4_DEBUG_F 0x0045
+#define ixDP_AUX4_DEBUG_G 0x0046
+#define ixDP_AUX4_DEBUG_H 0x0047
+#define ixDP_AUX4_DEBUG_I 0x0048
+#define ixDP_AUX5_DEBUG_A 0x0070
+#define ixDP_AUX5_DEBUG_B 0x0071
+#define ixDP_AUX5_DEBUG_C 0x0072
+#define ixDP_AUX5_DEBUG_D 0x0073
+#define ixDP_AUX5_DEBUG_E 0x0074
+#define ixDP_AUX5_DEBUG_F 0x0075
+#define ixDP_AUX5_DEBUG_G 0x0076
+#define ixDP_AUX5_DEBUG_H 0x0077
+#define ixDP_AUX5_DEBUG_I 0x0078
+#define ixDP_AUX6_DEBUG_A 0x0080
+#define ixDP_AUX6_DEBUG_B 0x0081
+#define ixDP_AUX6_DEBUG_C 0x0082
+#define ixDP_AUX6_DEBUG_D 0x0083
+#define ixDP_AUX6_DEBUG_E 0x0084
+#define ixDP_AUX6_DEBUG_F 0x0085
+#define ixDP_AUX6_DEBUG_G 0x0086
+#define ixDP_AUX6_DEBUG_H 0x0087
+#define ixDP_AUX6_DEBUG_I 0x0088
+#define ixFMT_DEBUG0 0x0001
+#define ixFMT_DEBUG1 0x0002
+#define ixFMT_DEBUG2 0x0003
+#define ixFMT_DEBUG_ID 0x0000
+#define ixGRA00 0x0000
+#define ixGRA01 0x0001
+#define ixGRA02 0x0002
+#define ixGRA03 0x0003
+#define ixGRA04 0x0004
+#define ixGRA05 0x0005
+#define ixGRA06 0x0006
+#define ixGRA07 0x0007
+#define ixGRA08 0x0008
+#define ixIDDCCIF02_DBG_DCCIF_C 0x0009
+#define ixIDDCCIF04_DBG_DCCIF_E 0x000B
+#define ixIDDCCIF05_DBG_DCCIF_F 0x000C
+#define ixMVP_DEBUG_12 0x000C
+#define ixMVP_DEBUG_13 0x000D
+#define ixMVP_DEBUG_14 0x000E
+#define ixMVP_DEBUG_15 0x000F
+#define ixMVP_DEBUG_16 0x0010
+#define ixMVP_DEBUG_17 0x0011
+#define ixSEQ00 0x0000
+#define ixSEQ01 0x0001
+#define ixSEQ02 0x0002
+#define ixSEQ03 0x0003
+#define ixSEQ04 0x0004
+#define ixSINK_DESCRIPTION0 0x0005
+#define ixSINK_DESCRIPTION10 0x000F
+#define ixSINK_DESCRIPTION1 0x0006
+#define ixSINK_DESCRIPTION11 0x0010
+#define ixSINK_DESCRIPTION12 0x0011
+#define ixSINK_DESCRIPTION13 0x0012
+#define ixSINK_DESCRIPTION14 0x0013
+#define ixSINK_DESCRIPTION15 0x0014
+#define ixSINK_DESCRIPTION16 0x0015
+#define ixSINK_DESCRIPTION17 0x0016
+#define ixSINK_DESCRIPTION2 0x0007
+#define ixSINK_DESCRIPTION3 0x0008
+#define ixSINK_DESCRIPTION4 0x0009
+#define ixSINK_DESCRIPTION5 0x000A
+#define ixSINK_DESCRIPTION6 0x000B
+#define ixSINK_DESCRIPTION7 0x000C
+#define ixSINK_DESCRIPTION8 0x000D
+#define ixSINK_DESCRIPTION9 0x000E
+#define ixVGADCC_DBG_DCCIF_C 0x007E
+#define mmABM_TEST_DEBUG_DATA 0x169F
+#define mmABM_TEST_DEBUG_INDEX 0x169E
+#define mmAFMT_60958_0 0x1C41
+#define mmAFMT_60958_1 0x1C42
+#define mmAFMT_60958_2 0x1C48
+#define mmAFMT_AUDIO_CRC_CONTROL 0x1C43
+#define mmAFMT_AUDIO_CRC_RESULT 0x1C49
+#define mmAFMT_AUDIO_DBG_DTO_CNTL 0x1C52
+#define mmAFMT_AUDIO_INFO0 0x1C3F
+#define mmAFMT_AUDIO_INFO1 0x1C40
+#define mmAFMT_AUDIO_PACKET_CONTROL 0x1C4B
+#define mmAFMT_AUDIO_PACKET_CONTROL2 0x1C17
+#define mmAFMT_AUDIO_SRC_CONTROL 0x1C4F
+#define mmAFMT_AVI_INFO0 0x1C21
+#define mmAFMT_AVI_INFO1 0x1C22
+#define mmAFMT_AVI_INFO2 0x1C23
+#define mmAFMT_AVI_INFO3 0x1C24
+#define mmAFMT_GENERIC_0 0x1C28
+#define mmAFMT_GENERIC_1 0x1C29
+#define mmAFMT_GENERIC_2 0x1C2A
+#define mmAFMT_GENERIC_3 0x1C2B
+#define mmAFMT_GENERIC_4 0x1C2C
+#define mmAFMT_GENERIC_5 0x1C2D
+#define mmAFMT_GENERIC_6 0x1C2E
+#define mmAFMT_GENERIC_7 0x1C2F
+#define mmAFMT_GENERIC_HDR 0x1C27
+#define mmAFMT_INFOFRAME_CONTROL0 0x1C4D
+#define mmAFMT_INTERRUPT_STATUS 0x1C14
+#define mmAFMT_ISRC1_0 0x1C18
+#define mmAFMT_ISRC1_1 0x1C19
+#define mmAFMT_ISRC1_2 0x1C1A
+#define mmAFMT_ISRC1_3 0x1C1B
+#define mmAFMT_ISRC1_4 0x1C1C
+#define mmAFMT_ISRC2_0 0x1C1D
+#define mmAFMT_ISRC2_1 0x1C1E
+#define mmAFMT_ISRC2_2 0x1C1F
+#define mmAFMT_ISRC2_3 0x1C20
+#define mmAFMT_MPEG_INFO0 0x1C25
+#define mmAFMT_MPEG_INFO1 0x1C26
+#define mmAFMT_RAMP_CONTROL0 0x1C44
+#define mmAFMT_RAMP_CONTROL1 0x1C45
+#define mmAFMT_RAMP_CONTROL2 0x1C46
+#define mmAFMT_RAMP_CONTROL3 0x1C47
+#define mmAFMT_STATUS 0x1C4A
+#define mmAFMT_VBI_PACKET_CONTROL 0x1C4C
+#define mmATTRDR 0x00F0
+#define mmATTRDW 0x00F0
+#define mmATTRX 0x00F0
+#define mmAUX_ARB_CONTROL 0x1882
+#define mmAUX_CONTROL 0x1880
+#define mmAUX_DPHY_RX_CONTROL0 0x188A
+#define mmAUX_DPHY_RX_CONTROL1 0x188B
+#define mmAUX_DPHY_RX_STATUS 0x188D
+#define mmAUX_DPHY_TX_CONTROL 0x1889
+#define mmAUX_DPHY_TX_REF_CONTROL 0x1888
+#define mmAUX_DPHY_TX_STATUS 0x188C
+#define mmAUX_GTC_SYNC_CONTROL 0x188E
+#define mmAUX_GTC_SYNC_DATA 0x1890
+#define mmAUX_INTERRUPT_CONTROL 0x1883
+#define mmAUX_LS_DATA 0x1887
+#define mmAUX_LS_STATUS 0x1885
+#define mmAUXN_IMPCAL 0x190C
+#define mmAUXP_IMPCAL 0x190B
+#define mmAUX_SW_CONTROL 0x1881
+#define mmAUX_SW_DATA 0x1886
+#define mmAUX_SW_STATUS 0x1884
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER 0x17C9
+#define mmAZALIA_AUDIO_DTO 0x17BA
+#define mmAZALIA_AUDIO_DTO_CONTROL 0x17BB
+#define mmAZALIA_BDL_DMA_CONTROL 0x17BF
+#define mmAZALIA_CONTROLLER_DEBUG 0x17CF
+#define mmAZALIA_CORB_DMA_CONTROL 0x17C1
+#define mmAZALIA_CYCLIC_BUFFER_SYNC 0x17CA
+#define mmAZALIA_DATA_DMA_CONTROL 0x17BE
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x17D5
+#define mmAZALIA_F0_CODEC_DEBUG 0x17DF
+#define mmAZALIA_F0_CODEC_ENDPOINT_DATA 0x1781
+#define mmAZALIA_F0_CODEC_ENDPOINT_INDEX 0x1780
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x17DE
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x17DB
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x17DC
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x17DD
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x17D7
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x17DA
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x17D9
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x17D8
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x17D6
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x17D3
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x17D2
+#define mmAZALIA_GLOBAL_CAPABILITIES 0x17CB
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x17CC
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x17CD
+#define mmAZALIA_RIRB_AND_DP_CONTROL 0x17C0
+#define mmAZALIA_SCLK_CONTROL 0x17BC
+#define mmAZALIA_STREAM_DATA 0x17E9
+#define mmAZALIA_STREAM_INDEX 0x17E8
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE 0x17BD
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x1781
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x1780
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x1787
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x1786
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA 0x178D
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x178C
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA 0x1793
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x1792
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA 0x1799
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x1798
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA 0x179F
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x179E
+#define mmAZF0STREAM0_AZALIA_STREAM_DATA 0x17E9
+#define mmAZF0STREAM0_AZALIA_STREAM_INDEX 0x17E8
+#define mmAZF0STREAM1_AZALIA_STREAM_DATA 0x17ED
+#define mmAZF0STREAM1_AZALIA_STREAM_INDEX 0x17EC
+#define mmAZF0STREAM2_AZALIA_STREAM_DATA 0x17F1
+#define mmAZF0STREAM2_AZALIA_STREAM_INDEX 0x17F0
+#define mmAZF0STREAM3_AZALIA_STREAM_DATA 0x17F5
+#define mmAZF0STREAM3_AZALIA_STREAM_INDEX 0x17F4
+#define mmAZF0STREAM4_AZALIA_STREAM_DATA 0x17F9
+#define mmAZF0STREAM4_AZALIA_STREAM_INDEX 0x17F8
+#define mmAZF0STREAM5_AZALIA_STREAM_DATA 0x17FD
+#define mmAZF0STREAM5_AZALIA_STREAM_INDEX 0x17FC
+#define mmAZ_TEST_DEBUG_DATA 0x17D1
+#define mmAZ_TEST_DEBUG_INDEX 0x17D0
+#define mmBL1_PWM_ABM_CNTL 0x162E
+#define mmBL1_PWM_AMBIENT_LIGHT_LEVEL 0x1628
+#define mmBL1_PWM_BL_UPDATE_SAMPLE_RATE 0x162F
+#define mmBL1_PWM_CURRENT_ABM_LEVEL 0x162B
+#define mmBL1_PWM_FINAL_DUTY_CYCLE 0x162C
+#define mmBL1_PWM_GRP2_REG_LOCK 0x1630
+#define mmBL1_PWM_MINIMUM_DUTY_CYCLE 0x162D
+#define mmBL1_PWM_TARGET_ABM_LEVEL 0x162A
+#define mmBL1_PWM_USER_LEVEL 0x1629
+#define mmBL_PWM_CNTL 0x191E
+#define mmBL_PWM_CNTL2 0x191F
+#define mmBL_PWM_GRP1_REG_LOCK 0x1921
+#define mmBL_PWM_PERIOD_CNTL 0x1920
+#define mmBPHYC_DAC_AUTO_CALIB_CONTROL 0x19FE
+#define mmBPHYC_DAC_MACRO_CNTL 0x19FD
+#define mmCC_DC_PIPE_DIS 0x177F
+#define mmCC_RCU_DC_AUDIO_PORT_CONNECTIVITY 0x17D4
+#define mmCOMM_MATRIXA_TRANS_C11_C12 0x1A43
+#define mmCOMM_MATRIXA_TRANS_C13_C14 0x1A44
+#define mmCOMM_MATRIXA_TRANS_C21_C22 0x1A45
+#define mmCOMM_MATRIXA_TRANS_C23_C24 0x1A46
+#define mmCOMM_MATRIXA_TRANS_C31_C32 0x1A47
+#define mmCOMM_MATRIXA_TRANS_C33_C34 0x1A48
+#define mmCOMM_MATRIXB_TRANS_C11_C12 0x1A49
+#define mmCOMM_MATRIXB_TRANS_C13_C14 0x1A4A
+#define mmCOMM_MATRIXB_TRANS_C21_C22 0x1A4B
+#define mmCOMM_MATRIXB_TRANS_C23_C24 0x1A4C
+#define mmCOMM_MATRIXB_TRANS_C31_C32 0x1A4D
+#define mmCOMM_MATRIXB_TRANS_C33_C34 0x1A4E
+#define mmCRTC0_CRTC_3D_STRUCTURE_CONTROL 0x1B78
+#define mmCRTC0_CRTC_ALLOW_STOP_OFF_V_CNT 0x1BC3
+#define mmCRTC0_CRTC_BLACK_COLOR 0x1BA2
+#define mmCRTC0_CRTC_BLANK_CONTROL 0x1B9D
+#define mmCRTC0_CRTC_BLANK_DATA_COLOR 0x1BA1
+#define mmCRTC0_CRTC_CONTROL 0x1B9C
+#define mmCRTC0_CRTC_COUNT_CONTROL 0x1BA9
+#define mmCRTC0_CRTC_COUNT_RESET 0x1BAA
+#define mmCRTC0_CRTC_DCFE_CLOCK_CONTROL 0x1B7C
+#define mmCRTC0_CRTC_DOUBLE_BUFFER_CONTROL 0x1BB6
+#define mmCRTC0_CRTC_DTMTEST_CNTL 0x1B92
+#define mmCRTC0_CRTC_DTMTEST_STATUS_POSITION 0x1B93
+#define mmCRTC0_CRTC_FLOW_CONTROL 0x1B99
+#define mmCRTC0_CRTC_FORCE_COUNT_NOW_CNTL 0x1B98
+#define mmCRTC0_CRTC_GSL_CONTROL 0x1B7B
+#define mmCRTC0_CRTC_GSL_VSYNC_GAP 0x1B79
+#define mmCRTC0_CRTC_GSL_WINDOW 0x1B7A
+#define mmCRTC0_CRTC_H_BLANK_EARLY_NUM 0x1B7D
+#define mmCRTC0_CRTC_H_BLANK_START_END 0x1B81
+#define mmCRTC0_CRTC_H_SYNC_A 0x1B82
+#define mmCRTC0_CRTC_H_SYNC_A_CNTL 0x1B83
+#define mmCRTC0_CRTC_H_SYNC_B 0x1B84
+#define mmCRTC0_CRTC_H_SYNC_B_CNTL 0x1B85
+#define mmCRTC0_CRTC_H_TOTAL 0x1B80
+#define mmCRTC0_CRTC_INTERLACE_CONTROL 0x1B9E
+#define mmCRTC0_CRTC_INTERLACE_STATUS 0x1B9F
+#define mmCRTC0_CRTC_INTERRUPT_CONTROL 0x1BB4
+#define mmCRTC0_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1BAB
+#define mmCRTC0_CRTC_MASTER_EN 0x1BC2
+#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT 0x1BBF
+#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1BC0
+#define mmCRTC0_CRTC_MVP_STATUS 0x1BC1
+#define mmCRTC0_CRTC_NOM_VERT_POSITION 0x1BA5
+#define mmCRTC0_CRTC_OVERSCAN_COLOR 0x1BA0
+#define mmCRTC0_CRTC_SNAPSHOT_CONTROL 0x1BB0
+#define mmCRTC0_CRTC_SNAPSHOT_FRAME 0x1BB2
+#define mmCRTC0_CRTC_SNAPSHOT_POSITION 0x1BB1
+#define mmCRTC0_CRTC_SNAPSHOT_STATUS 0x1BAF
+#define mmCRTC0_CRTC_START_LINE_CONTROL 0x1BB3
+#define mmCRTC0_CRTC_STATUS 0x1BA3
+#define mmCRTC0_CRTC_STATUS_FRAME_COUNT 0x1BA6
+#define mmCRTC0_CRTC_STATUS_HV_COUNT 0x1BA8
+#define mmCRTC0_CRTC_STATUS_POSITION 0x1BA4
+#define mmCRTC0_CRTC_STATUS_VF_COUNT 0x1BA7
+#define mmCRTC0_CRTC_STEREO_CONTROL 0x1BAE
+#define mmCRTC0_CRTC_STEREO_FORCE_NEXT_EYE 0x1B9B
+#define mmCRTC0_CRTC_STEREO_STATUS 0x1BAD
+#define mmCRTC0_CRTC_TEST_DEBUG_DATA 0x1BC7
+#define mmCRTC0_CRTC_TEST_DEBUG_INDEX 0x1BC6
+#define mmCRTC0_CRTC_TEST_PATTERN_COLOR 0x1BBC
+#define mmCRTC0_CRTC_TEST_PATTERN_CONTROL 0x1BBA
+#define mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS 0x1BBB
+#define mmCRTC0_CRTC_TRIGA_CNTL 0x1B94
+#define mmCRTC0_CRTC_TRIGA_MANUAL_TRIG 0x1B95
+#define mmCRTC0_CRTC_TRIGB_CNTL 0x1B96
+#define mmCRTC0_CRTC_TRIGB_MANUAL_TRIG 0x1B97
+#define mmCRTC0_CRTC_UPDATE_LOCK 0x1BB5
+#define mmCRTC0_CRTC_VBI_END 0x1B86
+#define mmCRTC0_CRTC_V_BLANK_START_END 0x1B8D
+#define mmCRTC0_CRTC_VERT_SYNC_CONTROL 0x1BAC
+#define mmCRTC0_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x1BB7
+#define mmCRTC0_CRTC_V_SYNC_A 0x1B8E
+#define mmCRTC0_CRTC_V_SYNC_A_CNTL 0x1B8F
+#define mmCRTC0_CRTC_V_SYNC_B 0x1B90
+#define mmCRTC0_CRTC_V_SYNC_B_CNTL 0x1B91
+#define mmCRTC0_CRTC_VSYNC_NOM_INT_STATUS 0x1B8C
+#define mmCRTC0_CRTC_V_TOTAL 0x1B87
+#define mmCRTC0_CRTC_V_TOTAL_CONTROL 0x1B8A
+#define mmCRTC0_CRTC_V_TOTAL_INT_STATUS 0x1B8B
+#define mmCRTC0_CRTC_V_TOTAL_MAX 0x1B89
+#define mmCRTC0_CRTC_V_TOTAL_MIN 0x1B88
+#define mmCRTC0_CRTC_V_UPDATE_INT_STATUS 0x1BC4
+#define mmCRTC0_DCFE_DBG_SEL 0x1B7E
+#define mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL 0x1B7F
+#define mmCRTC0_MASTER_UPDATE_LOCK 0x1BBD
+#define mmCRTC0_MASTER_UPDATE_MODE 0x1BBE
+#define mmCRTC0_PIXEL_RATE_CNTL 0x0140
+#define mmCRTC1_CRTC_3D_STRUCTURE_CONTROL 0x1E78
+#define mmCRTC1_CRTC_ALLOW_STOP_OFF_V_CNT 0x1EC3
+#define mmCRTC1_CRTC_BLACK_COLOR 0x1EA2
+#define mmCRTC1_CRTC_BLANK_CONTROL 0x1E9D
+#define mmCRTC1_CRTC_BLANK_DATA_COLOR 0x1EA1
+#define mmCRTC1_CRTC_CONTROL 0x1E9C
+#define mmCRTC1_CRTC_COUNT_CONTROL 0x1EA9
+#define mmCRTC1_CRTC_COUNT_RESET 0x1EAA
+#define mmCRTC1_CRTC_DCFE_CLOCK_CONTROL 0x1E7C
+#define mmCRTC1_CRTC_DOUBLE_BUFFER_CONTROL 0x1EB6
+#define mmCRTC1_CRTC_DTMTEST_CNTL 0x1E92
+#define mmCRTC1_CRTC_DTMTEST_STATUS_POSITION 0x1E93
+#define mmCRTC1_CRTC_FLOW_CONTROL 0x1E99
+#define mmCRTC1_CRTC_FORCE_COUNT_NOW_CNTL 0x1E98
+#define mmCRTC1_CRTC_GSL_CONTROL 0x1E7B
+#define mmCRTC1_CRTC_GSL_VSYNC_GAP 0x1E79
+#define mmCRTC1_CRTC_GSL_WINDOW 0x1E7A
+#define mmCRTC1_CRTC_H_BLANK_EARLY_NUM 0x1E7D
+#define mmCRTC1_CRTC_H_BLANK_START_END 0x1E81
+#define mmCRTC1_CRTC_H_SYNC_A 0x1E82
+#define mmCRTC1_CRTC_H_SYNC_A_CNTL 0x1E83
+#define mmCRTC1_CRTC_H_SYNC_B 0x1E84
+#define mmCRTC1_CRTC_H_SYNC_B_CNTL 0x1E85
+#define mmCRTC1_CRTC_H_TOTAL 0x1E80
+#define mmCRTC1_CRTC_INTERLACE_CONTROL 0x1E9E
+#define mmCRTC1_CRTC_INTERLACE_STATUS 0x1E9F
+#define mmCRTC1_CRTC_INTERRUPT_CONTROL 0x1EB4
+#define mmCRTC1_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1EAB
+#define mmCRTC1_CRTC_MASTER_EN 0x1EC2
+#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT 0x1EBF
+#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1EC0
+#define mmCRTC1_CRTC_MVP_STATUS 0x1EC1
+#define mmCRTC1_CRTC_NOM_VERT_POSITION 0x1EA5
+#define mmCRTC1_CRTC_OVERSCAN_COLOR 0x1EA0
+#define mmCRTC1_CRTC_SNAPSHOT_CONTROL 0x1EB0
+#define mmCRTC1_CRTC_SNAPSHOT_FRAME 0x1EB2
+#define mmCRTC1_CRTC_SNAPSHOT_POSITION 0x1EB1
+#define mmCRTC1_CRTC_SNAPSHOT_STATUS 0x1EAF
+#define mmCRTC1_CRTC_START_LINE_CONTROL 0x1EB3
+#define mmCRTC1_CRTC_STATUS 0x1EA3
+#define mmCRTC1_CRTC_STATUS_FRAME_COUNT 0x1EA6
+#define mmCRTC1_CRTC_STATUS_HV_COUNT 0x1EA8
+#define mmCRTC1_CRTC_STATUS_POSITION 0x1EA4
+#define mmCRTC1_CRTC_STATUS_VF_COUNT 0x1EA7
+#define mmCRTC1_CRTC_STEREO_CONTROL 0x1EAE
+#define mmCRTC1_CRTC_STEREO_FORCE_NEXT_EYE 0x1E9B
+#define mmCRTC1_CRTC_STEREO_STATUS 0x1EAD
+#define mmCRTC1_CRTC_TEST_DEBUG_DATA 0x1EC7
+#define mmCRTC1_CRTC_TEST_DEBUG_INDEX 0x1EC6
+#define mmCRTC1_CRTC_TEST_PATTERN_COLOR 0x1EBC
+#define mmCRTC1_CRTC_TEST_PATTERN_CONTROL 0x1EBA
+#define mmCRTC1_CRTC_TEST_PATTERN_PARAMETERS 0x1EBB
+#define mmCRTC1_CRTC_TRIGA_CNTL 0x1E94
+#define mmCRTC1_CRTC_TRIGA_MANUAL_TRIG 0x1E95
+#define mmCRTC1_CRTC_TRIGB_CNTL 0x1E96
+#define mmCRTC1_CRTC_TRIGB_MANUAL_TRIG 0x1E97
+#define mmCRTC1_CRTC_UPDATE_LOCK 0x1EB5
+#define mmCRTC1_CRTC_VBI_END 0x1E86
+#define mmCRTC1_CRTC_V_BLANK_START_END 0x1E8D
+#define mmCRTC1_CRTC_VERT_SYNC_CONTROL 0x1EAC
+#define mmCRTC1_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x1EB7
+#define mmCRTC1_CRTC_V_SYNC_A 0x1E8E
+#define mmCRTC1_CRTC_V_SYNC_A_CNTL 0x1E8F
+#define mmCRTC1_CRTC_V_SYNC_B 0x1E90
+#define mmCRTC1_CRTC_V_SYNC_B_CNTL 0x1E91
+#define mmCRTC1_CRTC_VSYNC_NOM_INT_STATUS 0x1E8C
+#define mmCRTC1_CRTC_V_TOTAL 0x1E87
+#define mmCRTC1_CRTC_V_TOTAL_CONTROL 0x1E8A
+#define mmCRTC1_CRTC_V_TOTAL_INT_STATUS 0x1E8B
+#define mmCRTC1_CRTC_V_TOTAL_MAX 0x1E89
+#define mmCRTC1_CRTC_V_TOTAL_MIN 0x1E88
+#define mmCRTC1_CRTC_V_UPDATE_INT_STATUS 0x1EC4
+#define mmCRTC1_DCFE_DBG_SEL 0x1E7E
+#define mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL 0x1E7F
+#define mmCRTC1_MASTER_UPDATE_LOCK 0x1EBD
+#define mmCRTC1_MASTER_UPDATE_MODE 0x1EBE
+#define mmCRTC1_PIXEL_RATE_CNTL 0x0144
+#define mmCRTC2_CRTC_3D_STRUCTURE_CONTROL 0x4178
+#define mmCRTC2_CRTC_ALLOW_STOP_OFF_V_CNT 0x41C3
+#define mmCRTC2_CRTC_BLACK_COLOR 0x41A2
+#define mmCRTC2_CRTC_BLANK_CONTROL 0x419D
+#define mmCRTC2_CRTC_BLANK_DATA_COLOR 0x41A1
+#define mmCRTC2_CRTC_CONTROL 0x419C
+#define mmCRTC2_CRTC_COUNT_CONTROL 0x41A9
+#define mmCRTC2_CRTC_COUNT_RESET 0x41AA
+#define mmCRTC2_CRTC_DCFE_CLOCK_CONTROL 0x417C
+#define mmCRTC2_CRTC_DOUBLE_BUFFER_CONTROL 0x41B6
+#define mmCRTC2_CRTC_DTMTEST_CNTL 0x4192
+#define mmCRTC2_CRTC_DTMTEST_STATUS_POSITION 0x4193
+#define mmCRTC2_CRTC_FLOW_CONTROL 0x4199
+#define mmCRTC2_CRTC_FORCE_COUNT_NOW_CNTL 0x4198
+#define mmCRTC2_CRTC_GSL_CONTROL 0x417B
+#define mmCRTC2_CRTC_GSL_VSYNC_GAP 0x4179
+#define mmCRTC2_CRTC_GSL_WINDOW 0x417A
+#define mmCRTC2_CRTC_H_BLANK_EARLY_NUM 0x417D
+#define mmCRTC2_CRTC_H_BLANK_START_END 0x4181
+#define mmCRTC2_CRTC_H_SYNC_A 0x4182
+#define mmCRTC2_CRTC_H_SYNC_A_CNTL 0x4183
+#define mmCRTC2_CRTC_H_SYNC_B 0x4184
+#define mmCRTC2_CRTC_H_SYNC_B_CNTL 0x4185
+#define mmCRTC2_CRTC_H_TOTAL 0x4180
+#define mmCRTC2_CRTC_INTERLACE_CONTROL 0x419E
+#define mmCRTC2_CRTC_INTERLACE_STATUS 0x419F
+#define mmCRTC2_CRTC_INTERRUPT_CONTROL 0x41B4
+#define mmCRTC2_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x41AB
+#define mmCRTC2_CRTC_MASTER_EN 0x41C2
+#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT 0x41BF
+#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x41C0
+#define mmCRTC2_CRTC_MVP_STATUS 0x41C1
+#define mmCRTC2_CRTC_NOM_VERT_POSITION 0x41A5
+#define mmCRTC2_CRTC_OVERSCAN_COLOR 0x41A0
+#define mmCRTC2_CRTC_SNAPSHOT_CONTROL 0x41B0
+#define mmCRTC2_CRTC_SNAPSHOT_FRAME 0x41B2
+#define mmCRTC2_CRTC_SNAPSHOT_POSITION 0x41B1
+#define mmCRTC2_CRTC_SNAPSHOT_STATUS 0x41AF
+#define mmCRTC2_CRTC_START_LINE_CONTROL 0x41B3
+#define mmCRTC2_CRTC_STATUS 0x41A3
+#define mmCRTC2_CRTC_STATUS_FRAME_COUNT 0x41A6
+#define mmCRTC2_CRTC_STATUS_HV_COUNT 0x41A8
+#define mmCRTC2_CRTC_STATUS_POSITION 0x41A4
+#define mmCRTC2_CRTC_STATUS_VF_COUNT 0x41A7
+#define mmCRTC2_CRTC_STEREO_CONTROL 0x41AE
+#define mmCRTC2_CRTC_STEREO_FORCE_NEXT_EYE 0x419B
+#define mmCRTC2_CRTC_STEREO_STATUS 0x41AD
+#define mmCRTC2_CRTC_TEST_DEBUG_DATA 0x41C7
+#define mmCRTC2_CRTC_TEST_DEBUG_INDEX 0x41C6
+#define mmCRTC2_CRTC_TEST_PATTERN_COLOR 0x41BC
+#define mmCRTC2_CRTC_TEST_PATTERN_CONTROL 0x41BA
+#define mmCRTC2_CRTC_TEST_PATTERN_PARAMETERS 0x41BB
+#define mmCRTC2_CRTC_TRIGA_CNTL 0x4194
+#define mmCRTC2_CRTC_TRIGA_MANUAL_TRIG 0x4195
+#define mmCRTC2_CRTC_TRIGB_CNTL 0x4196
+#define mmCRTC2_CRTC_TRIGB_MANUAL_TRIG 0x4197
+#define mmCRTC2_CRTC_UPDATE_LOCK 0x41B5
+#define mmCRTC2_CRTC_VBI_END 0x4186
+#define mmCRTC2_CRTC_V_BLANK_START_END 0x418D
+#define mmCRTC2_CRTC_VERT_SYNC_CONTROL 0x41AC
+#define mmCRTC2_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x41B7
+#define mmCRTC2_CRTC_V_SYNC_A 0x418E
+#define mmCRTC2_CRTC_V_SYNC_A_CNTL 0x418F
+#define mmCRTC2_CRTC_V_SYNC_B 0x4190
+#define mmCRTC2_CRTC_V_SYNC_B_CNTL 0x4191
+#define mmCRTC2_CRTC_VSYNC_NOM_INT_STATUS 0x418C
+#define mmCRTC2_CRTC_V_TOTAL 0x4187
+#define mmCRTC2_CRTC_V_TOTAL_CONTROL 0x418A
+#define mmCRTC2_CRTC_V_TOTAL_INT_STATUS 0x418B
+#define mmCRTC2_CRTC_V_TOTAL_MAX 0x4189
+#define mmCRTC2_CRTC_V_TOTAL_MIN 0x4188
+#define mmCRTC2_CRTC_V_UPDATE_INT_STATUS 0x41C4
+#define mmCRTC2_DCFE_DBG_SEL 0x417E
+#define mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL 0x417F
+#define mmCRTC2_MASTER_UPDATE_LOCK 0x41BD
+#define mmCRTC2_MASTER_UPDATE_MODE 0x41BE
+#define mmCRTC2_PIXEL_RATE_CNTL 0x0148
+#define mmCRTC3_CRTC_3D_STRUCTURE_CONTROL 0x4478
+#define mmCRTC3_CRTC_ALLOW_STOP_OFF_V_CNT 0x44C3
+#define mmCRTC3_CRTC_BLACK_COLOR 0x44A2
+#define mmCRTC3_CRTC_BLANK_CONTROL 0x449D
+#define mmCRTC3_CRTC_BLANK_DATA_COLOR 0x44A1
+#define mmCRTC3_CRTC_CONTROL 0x449C
+#define mmCRTC3_CRTC_COUNT_CONTROL 0x44A9
+#define mmCRTC3_CRTC_COUNT_RESET 0x44AA
+#define mmCRTC3_CRTC_DCFE_CLOCK_CONTROL 0x447C
+#define mmCRTC3_CRTC_DOUBLE_BUFFER_CONTROL 0x44B6
+#define mmCRTC3_CRTC_DTMTEST_CNTL 0x4492
+#define mmCRTC3_CRTC_DTMTEST_STATUS_POSITION 0x4493
+#define mmCRTC3_CRTC_FLOW_CONTROL 0x4499
+#define mmCRTC3_CRTC_FORCE_COUNT_NOW_CNTL 0x4498
+#define mmCRTC3_CRTC_GSL_CONTROL 0x447B
+#define mmCRTC3_CRTC_GSL_VSYNC_GAP 0x4479
+#define mmCRTC3_CRTC_GSL_WINDOW 0x447A
+#define mmCRTC3_CRTC_H_BLANK_EARLY_NUM 0x447D
+#define mmCRTC3_CRTC_H_BLANK_START_END 0x4481
+#define mmCRTC3_CRTC_H_SYNC_A 0x4482
+#define mmCRTC3_CRTC_H_SYNC_A_CNTL 0x4483
+#define mmCRTC3_CRTC_H_SYNC_B 0x4484
+#define mmCRTC3_CRTC_H_SYNC_B_CNTL 0x4485
+#define mmCRTC3_CRTC_H_TOTAL 0x4480
+#define mmCRTC3_CRTC_INTERLACE_CONTROL 0x449E
+#define mmCRTC3_CRTC_INTERLACE_STATUS 0x449F
+#define mmCRTC3_CRTC_INTERRUPT_CONTROL 0x44B4
+#define mmCRTC3_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x44AB
+#define mmCRTC3_CRTC_MASTER_EN 0x44C2
+#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT 0x44BF
+#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x44C0
+#define mmCRTC3_CRTC_MVP_STATUS 0x44C1
+#define mmCRTC3_CRTC_NOM_VERT_POSITION 0x44A5
+#define mmCRTC3_CRTC_OVERSCAN_COLOR 0x44A0
+#define mmCRTC3_CRTC_SNAPSHOT_CONTROL 0x44B0
+#define mmCRTC3_CRTC_SNAPSHOT_FRAME 0x44B2
+#define mmCRTC3_CRTC_SNAPSHOT_POSITION 0x44B1
+#define mmCRTC3_CRTC_SNAPSHOT_STATUS 0x44AF
+#define mmCRTC3_CRTC_START_LINE_CONTROL 0x44B3
+#define mmCRTC3_CRTC_STATUS 0x44A3
+#define mmCRTC3_CRTC_STATUS_FRAME_COUNT 0x44A6
+#define mmCRTC3_CRTC_STATUS_HV_COUNT 0x44A8
+#define mmCRTC3_CRTC_STATUS_POSITION 0x44A4
+#define mmCRTC3_CRTC_STATUS_VF_COUNT 0x44A7
+#define mmCRTC3_CRTC_STEREO_CONTROL 0x44AE
+#define mmCRTC3_CRTC_STEREO_FORCE_NEXT_EYE 0x449B
+#define mmCRTC3_CRTC_STEREO_STATUS 0x44AD
+#define mmCRTC3_CRTC_TEST_DEBUG_DATA 0x44C7
+#define mmCRTC3_CRTC_TEST_DEBUG_INDEX 0x44C6
+#define mmCRTC3_CRTC_TEST_PATTERN_COLOR 0x44BC
+#define mmCRTC3_CRTC_TEST_PATTERN_CONTROL 0x44BA
+#define mmCRTC3_CRTC_TEST_PATTERN_PARAMETERS 0x44BB
+#define mmCRTC3_CRTC_TRIGA_CNTL 0x4494
+#define mmCRTC3_CRTC_TRIGA_MANUAL_TRIG 0x4495
+#define mmCRTC3_CRTC_TRIGB_CNTL 0x4496
+#define mmCRTC3_CRTC_TRIGB_MANUAL_TRIG 0x4497
+#define mmCRTC3_CRTC_UPDATE_LOCK 0x44B5
+#define mmCRTC3_CRTC_VBI_END 0x4486
+#define mmCRTC3_CRTC_V_BLANK_START_END 0x448D
+#define mmCRTC3_CRTC_VERT_SYNC_CONTROL 0x44AC
+#define mmCRTC3_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x44B7
+#define mmCRTC3_CRTC_V_SYNC_A 0x448E
+#define mmCRTC3_CRTC_V_SYNC_A_CNTL 0x448F
+#define mmCRTC3_CRTC_V_SYNC_B 0x4490
+#define mmCRTC3_CRTC_V_SYNC_B_CNTL 0x4491
+#define mmCRTC3_CRTC_VSYNC_NOM_INT_STATUS 0x448C
+#define mmCRTC3_CRTC_V_TOTAL 0x4487
+#define mmCRTC3_CRTC_V_TOTAL_CONTROL 0x448A
+#define mmCRTC3_CRTC_V_TOTAL_INT_STATUS 0x448B
+#define mmCRTC3_CRTC_V_TOTAL_MAX 0x4489
+#define mmCRTC3_CRTC_V_TOTAL_MIN 0x4488
+#define mmCRTC3_CRTC_V_UPDATE_INT_STATUS 0x44C4
+#define mmCRTC3_DCFE_DBG_SEL 0x447E
+#define mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL 0x447F
+#define mmCRTC_3D_STRUCTURE_CONTROL 0x1B78
+#define mmCRTC3_MASTER_UPDATE_LOCK 0x44BD
+#define mmCRTC3_MASTER_UPDATE_MODE 0x44BE
+#define mmCRTC3_PIXEL_RATE_CNTL 0x014C
+#define mmCRTC4_CRTC_3D_STRUCTURE_CONTROL 0x4778
+#define mmCRTC4_CRTC_ALLOW_STOP_OFF_V_CNT 0x47C3
+#define mmCRTC4_CRTC_BLACK_COLOR 0x47A2
+#define mmCRTC4_CRTC_BLANK_CONTROL 0x479D
+#define mmCRTC4_CRTC_BLANK_DATA_COLOR 0x47A1
+#define mmCRTC4_CRTC_CONTROL 0x479C
+#define mmCRTC4_CRTC_COUNT_CONTROL 0x47A9
+#define mmCRTC4_CRTC_COUNT_RESET 0x47AA
+#define mmCRTC4_CRTC_DCFE_CLOCK_CONTROL 0x477C
+#define mmCRTC4_CRTC_DOUBLE_BUFFER_CONTROL 0x47B6
+#define mmCRTC4_CRTC_DTMTEST_CNTL 0x4792
+#define mmCRTC4_CRTC_DTMTEST_STATUS_POSITION 0x4793
+#define mmCRTC4_CRTC_FLOW_CONTROL 0x4799
+#define mmCRTC4_CRTC_FORCE_COUNT_NOW_CNTL 0x4798
+#define mmCRTC4_CRTC_GSL_CONTROL 0x477B
+#define mmCRTC4_CRTC_GSL_VSYNC_GAP 0x4779
+#define mmCRTC4_CRTC_GSL_WINDOW 0x477A
+#define mmCRTC4_CRTC_H_BLANK_EARLY_NUM 0x477D
+#define mmCRTC4_CRTC_H_BLANK_START_END 0x4781
+#define mmCRTC4_CRTC_H_SYNC_A 0x4782
+#define mmCRTC4_CRTC_H_SYNC_A_CNTL 0x4783
+#define mmCRTC4_CRTC_H_SYNC_B 0x4784
+#define mmCRTC4_CRTC_H_SYNC_B_CNTL 0x4785
+#define mmCRTC4_CRTC_H_TOTAL 0x4780
+#define mmCRTC4_CRTC_INTERLACE_CONTROL 0x479E
+#define mmCRTC4_CRTC_INTERLACE_STATUS 0x479F
+#define mmCRTC4_CRTC_INTERRUPT_CONTROL 0x47B4
+#define mmCRTC4_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x47AB
+#define mmCRTC4_CRTC_MASTER_EN 0x47C2
+#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT 0x47BF
+#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x47C0
+#define mmCRTC4_CRTC_MVP_STATUS 0x47C1
+#define mmCRTC4_CRTC_NOM_VERT_POSITION 0x47A5
+#define mmCRTC4_CRTC_OVERSCAN_COLOR 0x47A0
+#define mmCRTC4_CRTC_SNAPSHOT_CONTROL 0x47B0
+#define mmCRTC4_CRTC_SNAPSHOT_FRAME 0x47B2
+#define mmCRTC4_CRTC_SNAPSHOT_POSITION 0x47B1
+#define mmCRTC4_CRTC_SNAPSHOT_STATUS 0x47AF
+#define mmCRTC4_CRTC_START_LINE_CONTROL 0x47B3
+#define mmCRTC4_CRTC_STATUS 0x47A3
+#define mmCRTC4_CRTC_STATUS_FRAME_COUNT 0x47A6
+#define mmCRTC4_CRTC_STATUS_HV_COUNT 0x47A8
+#define mmCRTC4_CRTC_STATUS_POSITION 0x47A4
+#define mmCRTC4_CRTC_STATUS_VF_COUNT 0x47A7
+#define mmCRTC4_CRTC_STEREO_CONTROL 0x47AE
+#define mmCRTC4_CRTC_STEREO_FORCE_NEXT_EYE 0x479B
+#define mmCRTC4_CRTC_STEREO_STATUS 0x47AD
+#define mmCRTC4_CRTC_TEST_DEBUG_DATA 0x47C7
+#define mmCRTC4_CRTC_TEST_DEBUG_INDEX 0x47C6
+#define mmCRTC4_CRTC_TEST_PATTERN_COLOR 0x47BC
+#define mmCRTC4_CRTC_TEST_PATTERN_CONTROL 0x47BA
+#define mmCRTC4_CRTC_TEST_PATTERN_PARAMETERS 0x47BB
+#define mmCRTC4_CRTC_TRIGA_CNTL 0x4794
+#define mmCRTC4_CRTC_TRIGA_MANUAL_TRIG 0x4795
+#define mmCRTC4_CRTC_TRIGB_CNTL 0x4796
+#define mmCRTC4_CRTC_TRIGB_MANUAL_TRIG 0x4797
+#define mmCRTC4_CRTC_UPDATE_LOCK 0x47B5
+#define mmCRTC4_CRTC_VBI_END 0x4786
+#define mmCRTC4_CRTC_V_BLANK_START_END 0x478D
+#define mmCRTC4_CRTC_VERT_SYNC_CONTROL 0x47AC
+#define mmCRTC4_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x47B7
+#define mmCRTC4_CRTC_V_SYNC_A 0x478E
+#define mmCRTC4_CRTC_V_SYNC_A_CNTL 0x478F
+#define mmCRTC4_CRTC_V_SYNC_B 0x4790
+#define mmCRTC4_CRTC_V_SYNC_B_CNTL 0x4791
+#define mmCRTC4_CRTC_VSYNC_NOM_INT_STATUS 0x478C
+#define mmCRTC4_CRTC_V_TOTAL 0x4787
+#define mmCRTC4_CRTC_V_TOTAL_CONTROL 0x478A
+#define mmCRTC4_CRTC_V_TOTAL_INT_STATUS 0x478B
+#define mmCRTC4_CRTC_V_TOTAL_MAX 0x4789
+#define mmCRTC4_CRTC_V_TOTAL_MIN 0x4788
+#define mmCRTC4_CRTC_V_UPDATE_INT_STATUS 0x47C4
+#define mmCRTC4_DCFE_DBG_SEL 0x477E
+#define mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL 0x477F
+#define mmCRTC4_MASTER_UPDATE_LOCK 0x47BD
+#define mmCRTC4_MASTER_UPDATE_MODE 0x47BE
+#define mmCRTC4_PIXEL_RATE_CNTL 0x0150
+#define mmCRTC5_CRTC_3D_STRUCTURE_CONTROL 0x4A78
+#define mmCRTC5_CRTC_ALLOW_STOP_OFF_V_CNT 0x4AC3
+#define mmCRTC5_CRTC_BLACK_COLOR 0x4AA2
+#define mmCRTC5_CRTC_BLANK_CONTROL 0x4A9D
+#define mmCRTC5_CRTC_BLANK_DATA_COLOR 0x4AA1
+#define mmCRTC5_CRTC_CONTROL 0x4A9C
+#define mmCRTC5_CRTC_COUNT_CONTROL 0x4AA9
+#define mmCRTC5_CRTC_COUNT_RESET 0x4AAA
+#define mmCRTC5_CRTC_DCFE_CLOCK_CONTROL 0x4A7C
+#define mmCRTC5_CRTC_DOUBLE_BUFFER_CONTROL 0x4AB6
+#define mmCRTC5_CRTC_DTMTEST_CNTL 0x4A92
+#define mmCRTC5_CRTC_DTMTEST_STATUS_POSITION 0x4A93
+#define mmCRTC5_CRTC_FLOW_CONTROL 0x4A99
+#define mmCRTC5_CRTC_FORCE_COUNT_NOW_CNTL 0x4A98
+#define mmCRTC5_CRTC_GSL_CONTROL 0x4A7B
+#define mmCRTC5_CRTC_GSL_VSYNC_GAP 0x4A79
+#define mmCRTC5_CRTC_GSL_WINDOW 0x4A7A
+#define mmCRTC5_CRTC_H_BLANK_EARLY_NUM 0x4A7D
+#define mmCRTC5_CRTC_H_BLANK_START_END 0x4A81
+#define mmCRTC5_CRTC_H_SYNC_A 0x4A82
+#define mmCRTC5_CRTC_H_SYNC_A_CNTL 0x4A83
+#define mmCRTC5_CRTC_H_SYNC_B 0x4A84
+#define mmCRTC5_CRTC_H_SYNC_B_CNTL 0x4A85
+#define mmCRTC5_CRTC_H_TOTAL 0x4A80
+#define mmCRTC5_CRTC_INTERLACE_CONTROL 0x4A9E
+#define mmCRTC5_CRTC_INTERLACE_STATUS 0x4A9F
+#define mmCRTC5_CRTC_INTERRUPT_CONTROL 0x4AB4
+#define mmCRTC5_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x4AAB
+#define mmCRTC5_CRTC_MASTER_EN 0x4AC2
+#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT 0x4ABF
+#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x4AC0
+#define mmCRTC5_CRTC_MVP_STATUS 0x4AC1
+#define mmCRTC5_CRTC_NOM_VERT_POSITION 0x4AA5
+#define mmCRTC5_CRTC_OVERSCAN_COLOR 0x4AA0
+#define mmCRTC5_CRTC_SNAPSHOT_CONTROL 0x4AB0
+#define mmCRTC5_CRTC_SNAPSHOT_FRAME 0x4AB2
+#define mmCRTC5_CRTC_SNAPSHOT_POSITION 0x4AB1
+#define mmCRTC5_CRTC_SNAPSHOT_STATUS 0x4AAF
+#define mmCRTC5_CRTC_START_LINE_CONTROL 0x4AB3
+#define mmCRTC5_CRTC_STATUS 0x4AA3
+#define mmCRTC5_CRTC_STATUS_FRAME_COUNT 0x4AA6
+#define mmCRTC5_CRTC_STATUS_HV_COUNT 0x4AA8
+#define mmCRTC5_CRTC_STATUS_POSITION 0x4AA4
+#define mmCRTC5_CRTC_STATUS_VF_COUNT 0x4AA7
+#define mmCRTC5_CRTC_STEREO_CONTROL 0x4AAE
+#define mmCRTC5_CRTC_STEREO_FORCE_NEXT_EYE 0x4A9B
+#define mmCRTC5_CRTC_STEREO_STATUS 0x4AAD
+#define mmCRTC5_CRTC_TEST_DEBUG_DATA 0x4AC7
+#define mmCRTC5_CRTC_TEST_DEBUG_INDEX 0x4AC6
+#define mmCRTC5_CRTC_TEST_PATTERN_COLOR 0x4ABC
+#define mmCRTC5_CRTC_TEST_PATTERN_CONTROL 0x4ABA
+#define mmCRTC5_CRTC_TEST_PATTERN_PARAMETERS 0x4ABB
+#define mmCRTC5_CRTC_TRIGA_CNTL 0x4A94
+#define mmCRTC5_CRTC_TRIGA_MANUAL_TRIG 0x4A95
+#define mmCRTC5_CRTC_TRIGB_CNTL 0x4A96
+#define mmCRTC5_CRTC_TRIGB_MANUAL_TRIG 0x4A97
+#define mmCRTC5_CRTC_UPDATE_LOCK 0x4AB5
+#define mmCRTC5_CRTC_VBI_END 0x4A86
+#define mmCRTC5_CRTC_V_BLANK_START_END 0x4A8D
+#define mmCRTC5_CRTC_VERT_SYNC_CONTROL 0x4AAC
+#define mmCRTC5_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x4AB7
+#define mmCRTC5_CRTC_V_SYNC_A 0x4A8E
+#define mmCRTC5_CRTC_V_SYNC_A_CNTL 0x4A8F
+#define mmCRTC5_CRTC_V_SYNC_B 0x4A90
+#define mmCRTC5_CRTC_V_SYNC_B_CNTL 0x4A91
+#define mmCRTC5_CRTC_VSYNC_NOM_INT_STATUS 0x4A8C
+#define mmCRTC5_CRTC_V_TOTAL 0x4A87
+#define mmCRTC5_CRTC_V_TOTAL_CONTROL 0x4A8A
+#define mmCRTC5_CRTC_V_TOTAL_INT_STATUS 0x4A8B
+#define mmCRTC5_CRTC_V_TOTAL_MAX 0x4A89
+#define mmCRTC5_CRTC_V_TOTAL_MIN 0x4A88
+#define mmCRTC5_CRTC_V_UPDATE_INT_STATUS 0x4AC4
+#define mmCRTC5_DCFE_DBG_SEL 0x4A7E
+#define mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL 0x4A7F
+#define mmCRTC5_MASTER_UPDATE_LOCK 0x4ABD
+#define mmCRTC5_MASTER_UPDATE_MODE 0x4ABE
+#define mmCRTC5_PIXEL_RATE_CNTL 0x0154
+#define mmCRTC8_DATA 0x00ED
+#define mmCRTC8_IDX 0x00ED
+#define mmCRTC_ALLOW_STOP_OFF_V_CNT 0x1BC3
+#define mmCRTC_BLACK_COLOR 0x1BA2
+#define mmCRTC_BLANK_CONTROL 0x1B9D
+#define mmCRTC_BLANK_DATA_COLOR 0x1BA1
+#define mmCRTC_CONTROL 0x1B9C
+#define mmCRTC_COUNT_CONTROL 0x1BA9
+#define mmCRTC_COUNT_RESET 0x1BAA
+#define mmCRTC_DCFE_CLOCK_CONTROL 0x1B7C
+#define mmCRTC_DOUBLE_BUFFER_CONTROL 0x1BB6
+#define mmCRTC_DTMTEST_CNTL 0x1B92
+#define mmCRTC_DTMTEST_STATUS_POSITION 0x1B93
+#define mmCRTC_FLOW_CONTROL 0x1B99
+#define mmCRTC_FORCE_COUNT_NOW_CNTL 0x1B98
+#define mmCRTC_GSL_CONTROL 0x1B7B
+#define mmCRTC_GSL_VSYNC_GAP 0x1B79
+#define mmCRTC_GSL_WINDOW 0x1B7A
+#define mmCRTC_H_BLANK_EARLY_NUM 0x1B7D
+#define mmCRTC_H_BLANK_START_END 0x1B81
+#define mmCRTC_H_SYNC_A 0x1B82
+#define mmCRTC_H_SYNC_A_CNTL 0x1B83
+#define mmCRTC_H_SYNC_B 0x1B84
+#define mmCRTC_H_SYNC_B_CNTL 0x1B85
+#define mmCRTC_H_TOTAL 0x1B80
+#define mmCRTC_INTERLACE_CONTROL 0x1B9E
+#define mmCRTC_INTERLACE_STATUS 0x1B9F
+#define mmCRTC_INTERRUPT_CONTROL 0x1BB4
+#define mmCRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1BAB
+#define mmCRTC_MASTER_EN 0x1BC2
+#define mmCRTC_MVP_INBAND_CNTL_INSERT 0x1BBF
+#define mmCRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1BC0
+#define mmCRTC_MVP_STATUS 0x1BC1
+#define mmCRTC_NOM_VERT_POSITION 0x1BA5
+#define mmCRTC_OVERSCAN_COLOR 0x1BA0
+#define mmCRTC_SNAPSHOT_CONTROL 0x1BB0
+#define mmCRTC_SNAPSHOT_FRAME 0x1BB2
+#define mmCRTC_SNAPSHOT_POSITION 0x1BB1
+#define mmCRTC_SNAPSHOT_STATUS 0x1BAF
+#define mmCRTC_START_LINE_CONTROL 0x1BB3
+#define mmCRTC_STATUS 0x1BA3
+#define mmCRTC_STATUS_FRAME_COUNT 0x1BA6
+#define mmCRTC_STATUS_HV_COUNT 0x1BA8
+#define mmCRTC_STATUS_POSITION 0x1BA4
+#define mmCRTC_STATUS_VF_COUNT 0x1BA7
+#define mmCRTC_STEREO_CONTROL 0x1BAE
+#define mmCRTC_STEREO_FORCE_NEXT_EYE 0x1B9B
+#define mmCRTC_STEREO_STATUS 0x1BAD
+#define mmCRTC_TEST_DEBUG_DATA 0x1BC7
+#define mmCRTC_TEST_DEBUG_INDEX 0x1BC6
+#define mmCRTC_TEST_PATTERN_COLOR 0x1BBC
+#define mmCRTC_TEST_PATTERN_CONTROL 0x1BBA
+#define mmCRTC_TEST_PATTERN_PARAMETERS 0x1BBB
+#define mmCRTC_TRIGA_CNTL 0x1B94
+#define mmCRTC_TRIGA_MANUAL_TRIG 0x1B95
+#define mmCRTC_TRIGB_CNTL 0x1B96
+#define mmCRTC_TRIGB_MANUAL_TRIG 0x1B97
+#define mmCRTC_UPDATE_LOCK 0x1BB5
+#define mmCRTC_VBI_END 0x1B86
+#define mmCRTC_V_BLANK_START_END 0x1B8D
+#define mmCRTC_VERT_SYNC_CONTROL 0x1BAC
+#define mmCRTC_VGA_PARAMETER_CAPTURE_MODE 0x1BB7
+#define mmCRTC_V_SYNC_A 0x1B8E
+#define mmCRTC_V_SYNC_A_CNTL 0x1B8F
+#define mmCRTC_V_SYNC_B 0x1B90
+#define mmCRTC_V_SYNC_B_CNTL 0x1B91
+#define mmCRTC_VSYNC_NOM_INT_STATUS 0x1B8C
+#define mmCRTC_V_TOTAL 0x1B87
+#define mmCRTC_V_TOTAL_CONTROL 0x1B8A
+#define mmCRTC_V_TOTAL_INT_STATUS 0x1B8B
+#define mmCRTC_V_TOTAL_MAX 0x1B89
+#define mmCRTC_V_TOTAL_MIN 0x1B88
+#define mmCRTC_V_UPDATE_INT_STATUS 0x1BC4
+#define mmCUR_COLOR1 0x1A6C
+#define mmCUR_COLOR2 0x1A6D
+#define mmCUR_CONTROL 0x1A66
+#define mmCUR_HOT_SPOT 0x1A6B
+#define mmCUR_POSITION 0x1A6A
+#define mmCUR_REQUEST_FILTER_CNTL 0x1A99
+#define mmCUR_SIZE 0x1A68
+#define mmCUR_SURFACE_ADDRESS 0x1A67
+#define mmCUR_SURFACE_ADDRESS_HIGH 0x1A69
+#define mmCUR_UPDATE 0x1A6E
+#define mmD1VGA_CONTROL 0x00CC
+#define mmD2VGA_CONTROL 0x00CE
+#define mmD3VGA_CONTROL 0x00F8
+#define mmD4VGA_CONTROL 0x00F9
+#define mmD5VGA_CONTROL 0x00FA
+#define mmD6VGA_CONTROL 0x00FB
+#define mmDAC_AUTODETECT_CONTROL 0x19EE
+#define mmDAC_AUTODETECT_CONTROL2 0x19EF
+#define mmDAC_AUTODETECT_CONTROL3 0x19F0
+#define mmDAC_AUTODETECT_INT_CONTROL 0x19F2
+#define mmDAC_AUTODETECT_STATUS 0x19F1
+#define mmDAC_CLK_ENABLE 0x0128
+#define mmDAC_COMPARATOR_ENABLE 0x19F7
+#define mmDAC_COMPARATOR_OUTPUT 0x19F8
+#define mmDAC_CONTROL 0x19F6
+#define mmDAC_CRC_CONTROL 0x19E7
+#define mmDAC_CRC_EN 0x19E6
+#define mmDAC_CRC_SIG_CONTROL 0x19EB
+#define mmDAC_CRC_SIG_CONTROL_MASK 0x19E9
+#define mmDAC_CRC_SIG_RGB 0x19EA
+#define mmDAC_CRC_SIG_RGB_MASK 0x19E8
+#define mmDAC_DATA 0x00F2
+#define mmDAC_DFT_CONFIG 0x19FA
+#define mmDAC_ENABLE 0x19E4
+#define mmDAC_FIFO_STATUS 0x19FB
+#define mmDAC_FORCE_DATA 0x19F4
+#define mmDAC_FORCE_OUTPUT_CNTL 0x19F3
+#define mmDAC_MACRO_CNTL_RESERVED0 0x19FC
+#define mmDAC_MACRO_CNTL_RESERVED1 0x19FD
+#define mmDAC_MACRO_CNTL_RESERVED2 0x19FE
+#define mmDAC_MACRO_CNTL_RESERVED3 0x19FF
+#define mmDAC_MASK 0x00F1
+#define mmDAC_POWERDOWN 0x19F5
+#define mmDAC_PWR_CNTL 0x19F9
+#define mmDAC_R_INDEX 0x00F1
+#define mmDAC_SOURCE_SELECT 0x19E5
+#define mmDAC_STEREOSYNC_SELECT 0x19ED
+#define mmDAC_SYNC_TRISTATE_CONTROL 0x19EC
+#define mmDAC_W_INDEX 0x00F2
+#define mmDC_ABM1_ACE_CNTL_MISC 0x1641
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_0 0x163A
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_1 0x163B
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_2 0x163C
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_3 0x163D
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_4 0x163E
+#define mmDC_ABM1_ACE_THRES_12 0x163F
+#define mmDC_ABM1_ACE_THRES_34 0x1640
+#define mmDC_ABM1_BL_MASTER_LOCK 0x169C
+#define mmDC_ABM1_CNTL 0x1638
+#define mmDC_ABM1_DEBUG_MISC 0x1649
+#define mmDC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x1656
+#define mmDC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x1659
+#define mmDC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x1657
+#define mmDC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x165A
+#define mmDC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x1658
+#define mmDC_ABM1_HGLS_REG_READ_PROGRESS 0x164A
+#define mmDC_ABM1_HG_MISC_CTRL 0x164B
+#define mmDC_ABM1_HG_RESULT_10 0x1664
+#define mmDC_ABM1_HG_RESULT_1 0x165B
+#define mmDC_ABM1_HG_RESULT_11 0x1665
+#define mmDC_ABM1_HG_RESULT_12 0x1666
+#define mmDC_ABM1_HG_RESULT_13 0x1667
+#define mmDC_ABM1_HG_RESULT_14 0x1668
+#define mmDC_ABM1_HG_RESULT_15 0x1669
+#define mmDC_ABM1_HG_RESULT_16 0x166A
+#define mmDC_ABM1_HG_RESULT_17 0x166B
+#define mmDC_ABM1_HG_RESULT_18 0x166C
+#define mmDC_ABM1_HG_RESULT_19 0x166D
+#define mmDC_ABM1_HG_RESULT_20 0x166E
+#define mmDC_ABM1_HG_RESULT_2 0x165C
+#define mmDC_ABM1_HG_RESULT_21 0x166F
+#define mmDC_ABM1_HG_RESULT_22 0x1670
+#define mmDC_ABM1_HG_RESULT_23 0x1671
+#define mmDC_ABM1_HG_RESULT_24 0x1672
+#define mmDC_ABM1_HG_RESULT_3 0x165D
+#define mmDC_ABM1_HG_RESULT_4 0x165E
+#define mmDC_ABM1_HG_RESULT_5 0x165F
+#define mmDC_ABM1_HG_RESULT_6 0x1660
+#define mmDC_ABM1_HG_RESULT_7 0x1661
+#define mmDC_ABM1_HG_RESULT_8 0x1662
+#define mmDC_ABM1_HG_RESULT_9 0x1663
+#define mmDC_ABM1_HG_SAMPLE_RATE 0x1654
+#define mmDC_ABM1_IPCSC_COEFF_SEL 0x1639
+#define mmDC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x164E
+#define mmDC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x1653
+#define mmDC_ABM1_LS_MIN_MAX_LUMA 0x164D
+#define mmDC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x1651
+#define mmDC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x1652
+#define mmDC_ABM1_LS_OVR_SCAN_BIN 0x1650
+#define mmDC_ABM1_LS_PIXEL_COUNT 0x164F
+#define mmDC_ABM1_LS_SAMPLE_RATE 0x1655
+#define mmDC_ABM1_LS_SUM_OF_LUMA 0x164C
+#define mmDC_ABM1_OVERSCAN_PIXEL_VALUE 0x169B
+#define mmDCCG_AUDIO_DTO0_MODULE 0x016D
+#define mmDCCG_AUDIO_DTO0_PHASE 0x016C
+#define mmDCCG_AUDIO_DTO1_MODULE 0x0171
+#define mmDCCG_AUDIO_DTO1_PHASE 0x0170
+#define mmDCCG_AUDIO_DTO_SOURCE 0x016B
+#define mmDCCG_CAC_STATUS 0x0137
+#define mmDCCG_GATE_DISABLE_CNTL 0x0134
+#define mmDCCG_GTC_CNTL 0x0120
+#define mmDCCG_GTC_CURRENT 0x0123
+#define mmDCCG_GTC_DTO_MODULO 0x0122
+#define mmDCCG_PERFMON_CNTL 0x0133
+#define mmDCCG_PLL0_PLL_ANALOG 0x1708
+#define mmDCCG_PLL0_PLL_CNTL 0x1707
+#define mmDCCG_PLL0_PLL_DEBUG_CNTL 0x170B
+#define mmDCCG_PLL0_PLL_DISPCLK_CURRENT_DTO_PHASE 0x170F
+#define mmDCCG_PLL0_PLL_DISPCLK_DTO_CNTL 0x170E
+#define mmDCCG_PLL0_PLL_DS_CNTL 0x1705
+#define mmDCCG_PLL0_PLL_FB_DIV 0x1701
+#define mmDCCG_PLL0_PLL_IDCLK_CNTL 0x1706
+#define mmDCCG_PLL0_PLL_POST_DIV 0x1702
+#define mmDCCG_PLL0_PLL_REF_DIV 0x1700
+#define mmDCCG_PLL0_PLL_SS_AMOUNT_DSFRAC 0x1703
+#define mmDCCG_PLL0_PLL_SS_CNTL 0x1704
+#define mmDCCG_PLL0_PLL_UNLOCK_DETECT_CNTL 0x170A
+#define mmDCCG_PLL0_PLL_UPDATE_CNTL 0x170D
+#define mmDCCG_PLL0_PLL_UPDATE_LOCK 0x170C
+#define mmDCCG_PLL0_PLL_VREG_CNTL 0x1709
+#define mmDCCG_PLL1_PLL_ANALOG 0x1718
+#define mmDCCG_PLL1_PLL_CNTL 0x1717
+#define mmDCCG_PLL1_PLL_DEBUG_CNTL 0x171B
+#define mmDCCG_PLL1_PLL_DISPCLK_CURRENT_DTO_PHASE 0x171F
+#define mmDCCG_PLL1_PLL_DISPCLK_DTO_CNTL 0x171E
+#define mmDCCG_PLL1_PLL_DS_CNTL 0x1715
+#define mmDCCG_PLL1_PLL_FB_DIV 0x1711
+#define mmDCCG_PLL1_PLL_IDCLK_CNTL 0x1716
+#define mmDCCG_PLL1_PLL_POST_DIV 0x1712
+#define mmDCCG_PLL1_PLL_REF_DIV 0x1710
+#define mmDCCG_PLL1_PLL_SS_AMOUNT_DSFRAC 0x1713
+#define mmDCCG_PLL1_PLL_SS_CNTL 0x1714
+#define mmDCCG_PLL1_PLL_UNLOCK_DETECT_CNTL 0x171A
+#define mmDCCG_PLL1_PLL_UPDATE_CNTL 0x171D
+#define mmDCCG_PLL1_PLL_UPDATE_LOCK 0x171C
+#define mmDCCG_PLL1_PLL_VREG_CNTL 0x1719
+#define mmDCCG_PLL2_PLL_ANALOG 0x1728
+#define mmDCCG_PLL2_PLL_CNTL 0x1727
+#define mmDCCG_PLL2_PLL_DEBUG_CNTL 0x172B
+#define mmDCCG_PLL2_PLL_DISPCLK_CURRENT_DTO_PHASE 0x172F
+#define mmDCCG_PLL2_PLL_DISPCLK_DTO_CNTL 0x172E
+#define mmDCCG_PLL2_PLL_DS_CNTL 0x1725
+#define mmDCCG_PLL2_PLL_FB_DIV 0x1721
+#define mmDCCG_PLL2_PLL_IDCLK_CNTL 0x1726
+#define mmDCCG_PLL2_PLL_POST_DIV 0x1722
+#define mmDCCG_PLL2_PLL_REF_DIV 0x1720
+#define mmDCCG_PLL2_PLL_SS_AMOUNT_DSFRAC 0x1723
+#define mmDCCG_PLL2_PLL_SS_CNTL 0x1724
+#define mmDCCG_PLL2_PLL_UNLOCK_DETECT_CNTL 0x172A
+#define mmDCCG_PLL2_PLL_UPDATE_CNTL 0x172D
+#define mmDCCG_PLL2_PLL_UPDATE_LOCK 0x172C
+#define mmDCCG_PLL2_PLL_VREG_CNTL 0x1729
+#define mmDCCG_SOFT_RESET 0x015F
+#define mmDCCG_TEST_CLK_SEL 0x017E
+#define mmDCCG_TEST_DEBUG_DATA 0x017D
+#define mmDCCG_TEST_DEBUG_INDEX 0x017C
+#define mmDCCG_VPCLK_CNTL 0x031F
+#define mmDCDEBUG_BUS_CLK1_SEL 0x1860
+#define mmDCDEBUG_BUS_CLK2_SEL 0x1861
+#define mmDCDEBUG_BUS_CLK3_SEL 0x1862
+#define mmDCDEBUG_BUS_CLK4_SEL 0x1863
+#define mmDCDEBUG_OUT_CNTL 0x186B
+#define mmDCDEBUG_OUT_DATA 0x186E
+#define mmDCDEBUG_OUT_PIN_OVERRIDE 0x186A
+#define mmDC_DMCU_SCRATCH 0x1618
+#define mmDC_DVODATA_CONFIG 0x1905
+#define mmDCFE0_SOFT_RESET 0x0158
+#define mmDCFE1_SOFT_RESET 0x0159
+#define mmDCFE2_SOFT_RESET 0x015A
+#define mmDCFE3_SOFT_RESET 0x015B
+#define mmDCFE4_SOFT_RESET 0x015C
+#define mmDCFE5_SOFT_RESET 0x015D
+#define mmDCFE_DBG_SEL 0x1B7E
+#define mmDCFE_MEM_LIGHT_SLEEP_CNTL 0x1B7F
+#define mmDC_GENERICA 0x1900
+#define mmDC_GENERICB 0x1901
+#define mmDC_GPIO_DDC1_A 0x194D
+#define mmDC_GPIO_DDC1_EN 0x194E
+#define mmDC_GPIO_DDC1_MASK 0x194C
+#define mmDC_GPIO_DDC1_Y 0x194F
+#define mmDC_GPIO_DDC2_A 0x1951
+#define mmDC_GPIO_DDC2_EN 0x1952
+#define mmDC_GPIO_DDC2_MASK 0x1950
+#define mmDC_GPIO_DDC2_Y 0x1953
+#define mmDC_GPIO_DDC3_A 0x1955
+#define mmDC_GPIO_DDC3_EN 0x1956
+#define mmDC_GPIO_DDC3_MASK 0x1954
+#define mmDC_GPIO_DDC3_Y 0x1957
+#define mmDC_GPIO_DDC4_A 0x1959
+#define mmDC_GPIO_DDC4_EN 0x195A
+#define mmDC_GPIO_DDC4_MASK 0x1958
+#define mmDC_GPIO_DDC4_Y 0x195B
+#define mmDC_GPIO_DDC5_A 0x195D
+#define mmDC_GPIO_DDC5_EN 0x195E
+#define mmDC_GPIO_DDC5_MASK 0x195C
+#define mmDC_GPIO_DDC5_Y 0x195F
+#define mmDC_GPIO_DDC6_A 0x1961
+#define mmDC_GPIO_DDC6_EN 0x1962
+#define mmDC_GPIO_DDC6_MASK 0x1960
+#define mmDC_GPIO_DDC6_Y 0x1963
+#define mmDC_GPIO_DDCVGA_A 0x1971
+#define mmDC_GPIO_DDCVGA_EN 0x1972
+#define mmDC_GPIO_DDCVGA_MASK 0x1970
+#define mmDC_GPIO_DDCVGA_Y 0x1973
+#define mmDC_GPIO_DEBUG 0x1904
+#define mmDC_GPIO_DVODATA_A 0x1949
+#define mmDC_GPIO_DVODATA_EN 0x194A
+#define mmDC_GPIO_DVODATA_MASK 0x1948
+#define mmDC_GPIO_DVODATA_Y 0x194B
+#define mmDC_GPIO_GENERIC_A 0x1945
+#define mmDC_GPIO_GENERIC_EN 0x1946
+#define mmDC_GPIO_GENERIC_MASK 0x1944
+#define mmDC_GPIO_GENERIC_Y 0x1947
+#define mmDC_GPIO_GENLK_A 0x1969
+#define mmDC_GPIO_GENLK_EN 0x196A
+#define mmDC_GPIO_GENLK_MASK 0x1968
+#define mmDC_GPIO_GENLK_Y 0x196B
+#define mmDC_GPIO_HPD_A 0x196D
+#define mmDC_GPIO_HPD_EN 0x196E
+#define mmDC_GPIO_HPD_MASK 0x196C
+#define mmDC_GPIO_HPD_Y 0x196F
+#define mmDC_GPIO_I2CPAD_A 0x1975
+#define mmDC_GPIO_I2CPAD_EN 0x1976
+#define mmDC_GPIO_I2CPAD_MASK 0x1974
+#define mmDC_GPIO_I2CPAD_STRENGTH 0x197A
+#define mmDC_GPIO_I2CPAD_Y 0x1977
+#define mmDC_GPIO_PAD_STRENGTH_1 0x1978
+#define mmDC_GPIO_PAD_STRENGTH_2 0x1979
+#define mmDC_GPIO_PWRSEQ_A 0x1941
+#define mmDC_GPIO_PWRSEQ_EN 0x1942
+#define mmDC_GPIO_PWRSEQ_MASK 0x1940
+#define mmDC_GPIO_PWRSEQ_Y 0x1943
+#define mmDC_GPIO_SYNCA_A 0x1965
+#define mmDC_GPIO_SYNCA_EN 0x1966
+#define mmDC_GPIO_SYNCA_MASK 0x1964
+#define mmDC_GPIO_SYNCA_Y 0x1967
+#define mmDC_GPU_TIMER_READ 0x1929
+#define mmDC_GPU_TIMER_READ_CNTL 0x192A
+#define mmDC_GPU_TIMER_START_POSITION_P_FLIP 0x1928
+#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE 0x1927
+#define mmDC_HPD1_CONTROL 0x1809
+#define mmDC_HPD1_FAST_TRAIN_CNTL 0x1864
+#define mmDC_HPD1_INT_CONTROL 0x1808
+#define mmDC_HPD1_INT_STATUS 0x1807
+#define mmDC_HPD1_TOGGLE_FILT_CNTL 0x18BC
+#define mmDC_HPD2_CONTROL 0x180C
+#define mmDC_HPD2_FAST_TRAIN_CNTL 0x1865
+#define mmDC_HPD2_INT_CONTROL 0x180B
+#define mmDC_HPD2_INT_STATUS 0x180A
+#define mmDC_HPD2_TOGGLE_FILT_CNTL 0x18BD
+#define mmDC_HPD3_CONTROL 0x180F
+#define mmDC_HPD3_FAST_TRAIN_CNTL 0x1866
+#define mmDC_HPD3_INT_CONTROL 0x180E
+#define mmDC_HPD3_INT_STATUS 0x180D
+#define mmDC_HPD3_TOGGLE_FILT_CNTL 0x18BE
+#define mmDC_HPD4_CONTROL 0x1812
+#define mmDC_HPD4_FAST_TRAIN_CNTL 0x1867
+#define mmDC_HPD4_INT_CONTROL 0x1811
+#define mmDC_HPD4_INT_STATUS 0x1810
+#define mmDC_HPD4_TOGGLE_FILT_CNTL 0x18FC
+#define mmDC_HPD5_CONTROL 0x1815
+#define mmDC_HPD5_FAST_TRAIN_CNTL 0x1868
+#define mmDC_HPD5_INT_CONTROL 0x1814
+#define mmDC_HPD5_INT_STATUS 0x1813
+#define mmDC_HPD5_TOGGLE_FILT_CNTL 0x18FD
+#define mmDC_HPD6_CONTROL 0x1818
+#define mmDC_HPD6_FAST_TRAIN_CNTL 0x1869
+#define mmDC_HPD6_INT_CONTROL 0x1817
+#define mmDC_HPD6_INT_STATUS 0x1816
+#define mmDC_HPD6_TOGGLE_FILT_CNTL 0x18FE
+#define mmDC_I2C_ARBITRATION 0x181A
+#define mmDC_I2C_CONTROL 0x1819
+#define mmDC_I2C_DATA 0x1833
+#define mmDC_I2C_DDC1_HW_STATUS 0x181D
+#define mmDC_I2C_DDC1_SETUP 0x1824
+#define mmDC_I2C_DDC1_SPEED 0x1823
+#define mmDC_I2C_DDC2_HW_STATUS 0x181E
+#define mmDC_I2C_DDC2_SETUP 0x1826
+#define mmDC_I2C_DDC2_SPEED 0x1825
+#define mmDC_I2C_DDC3_HW_STATUS 0x181F
+#define mmDC_I2C_DDC3_SETUP 0x1828
+#define mmDC_I2C_DDC3_SPEED 0x1827
+#define mmDC_I2C_DDC4_HW_STATUS 0x1820
+#define mmDC_I2C_DDC4_SETUP 0x182A
+#define mmDC_I2C_DDC4_SPEED 0x1829
+#define mmDC_I2C_DDC5_HW_STATUS 0x1821
+#define mmDC_I2C_DDC5_SETUP 0x182C
+#define mmDC_I2C_DDC5_SPEED 0x182B
+#define mmDC_I2C_DDC6_HW_STATUS 0x1822
+#define mmDC_I2C_DDC6_SETUP 0x182E
+#define mmDC_I2C_DDC6_SPEED 0x182D
+#define mmDC_I2C_DDCVGA_HW_STATUS 0x1855
+#define mmDC_I2C_DDCVGA_SETUP 0x1857
+#define mmDC_I2C_DDCVGA_SPEED 0x1856
+#define mmDC_I2C_EDID_DETECT_CTRL 0x186F
+#define mmDC_I2C_INTERRUPT_CONTROL 0x181B
+#define mmDC_I2C_SW_STATUS 0x181C
+#define mmDC_I2C_TRANSACTION0 0x182F
+#define mmDC_I2C_TRANSACTION1 0x1830
+#define mmDC_I2C_TRANSACTION2 0x1831
+#define mmDC_I2C_TRANSACTION3 0x1832
+#define mmDCI_CLK_CNTL 0x031E
+#define mmDCI_CLK_RAMP_CNTL 0x0324
+#define mmDCI_DEBUG_CONFIG 0x0323
+#define mmDCI_MEM_PWR_CNTL 0x0326
+#define mmDCI_MEM_PWR_STATE 0x031B
+#define mmDCI_MEM_PWR_STATE2 0x0322
+#define mmDCIO_DEBUG 0x192E
+#define mmDCIO_GSL0_CNTL 0x1924
+#define mmDCIO_GSL1_CNTL 0x1925
+#define mmDCIO_GSL2_CNTL 0x1926
+#define mmDCIO_GSL_GENLK_PAD_CNTL 0x1922
+#define mmDCIO_GSL_SWAPLOCK_PAD_CNTL 0x1923
+#define mmDCIO_IMPCAL_CNTL_AB 0x190D
+#define mmDCIO_IMPCAL_CNTL_CD 0x1911
+#define mmDCIO_IMPCAL_CNTL_EF 0x1915
+#define mmDCIO_TEST_DEBUG_DATA 0x1930
+#define mmDCIO_TEST_DEBUG_INDEX 0x192F
+#define mmDCIO_UNIPHY0_UNIPHY_ANG_BIST_CNTL 0x198C
+#define mmDCIO_UNIPHY0_UNIPHY_CHANNEL_XBAR_CNTL 0x198E
+#define mmDCIO_UNIPHY0_UNIPHY_DATA_SYNCHRONIZATION 0x198A
+#define mmDCIO_UNIPHY0_UNIPHY_LINK_CNTL 0x198D
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_CONTROL1 0x1986
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_CONTROL2 0x1987
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_FBDIV 0x1985
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_SS_CNTL 0x1989
+#define mmDCIO_UNIPHY0_UNIPHY_PLL_SS_STEP_SIZE 0x1988
+#define mmDCIO_UNIPHY0_UNIPHY_POWER_CONTROL 0x1984
+#define mmDCIO_UNIPHY0_UNIPHY_REG_TEST_OUTPUT 0x198B
+#define mmDCIO_UNIPHY0_UNIPHY_TX_CONTROL1 0x1980
+#define mmDCIO_UNIPHY0_UNIPHY_TX_CONTROL2 0x1981
+#define mmDCIO_UNIPHY0_UNIPHY_TX_CONTROL3 0x1982
+#define mmDCIO_UNIPHY0_UNIPHY_TX_CONTROL4 0x1983
+#define mmDCIO_UNIPHY1_UNIPHY_ANG_BIST_CNTL 0x199C
+#define mmDCIO_UNIPHY1_UNIPHY_CHANNEL_XBAR_CNTL 0x199E
+#define mmDCIO_UNIPHY1_UNIPHY_DATA_SYNCHRONIZATION 0x199A
+#define mmDCIO_UNIPHY1_UNIPHY_LINK_CNTL 0x199D
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_CONTROL1 0x1996
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_CONTROL2 0x1997
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_FBDIV 0x1995
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_SS_CNTL 0x1999
+#define mmDCIO_UNIPHY1_UNIPHY_PLL_SS_STEP_SIZE 0x1998
+#define mmDCIO_UNIPHY1_UNIPHY_POWER_CONTROL 0x1994
+#define mmDCIO_UNIPHY1_UNIPHY_REG_TEST_OUTPUT 0x199B
+#define mmDCIO_UNIPHY1_UNIPHY_TX_CONTROL1 0x1990
+#define mmDCIO_UNIPHY1_UNIPHY_TX_CONTROL2 0x1991
+#define mmDCIO_UNIPHY1_UNIPHY_TX_CONTROL3 0x1992
+#define mmDCIO_UNIPHY1_UNIPHY_TX_CONTROL4 0x1993
+#define mmDCIO_UNIPHY2_UNIPHY_ANG_BIST_CNTL 0x19AC
+#define mmDCIO_UNIPHY2_UNIPHY_CHANNEL_XBAR_CNTL 0x19AE
+#define mmDCIO_UNIPHY2_UNIPHY_DATA_SYNCHRONIZATION 0x19AA
+#define mmDCIO_UNIPHY2_UNIPHY_LINK_CNTL 0x19AD
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_CONTROL1 0x19A6
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_CONTROL2 0x19A7
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_FBDIV 0x19A5
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_SS_CNTL 0x19A9
+#define mmDCIO_UNIPHY2_UNIPHY_PLL_SS_STEP_SIZE 0x19A8
+#define mmDCIO_UNIPHY2_UNIPHY_POWER_CONTROL 0x19A4
+#define mmDCIO_UNIPHY2_UNIPHY_REG_TEST_OUTPUT 0x19AB
+#define mmDCIO_UNIPHY2_UNIPHY_TX_CONTROL1 0x19A0
+#define mmDCIO_UNIPHY2_UNIPHY_TX_CONTROL2 0x19A1
+#define mmDCIO_UNIPHY2_UNIPHY_TX_CONTROL3 0x19A2
+#define mmDCIO_UNIPHY2_UNIPHY_TX_CONTROL4 0x19A3
+#define mmDCIO_UNIPHY3_UNIPHY_ANG_BIST_CNTL 0x19BC
+#define mmDCIO_UNIPHY3_UNIPHY_CHANNEL_XBAR_CNTL 0x19BE
+#define mmDCIO_UNIPHY3_UNIPHY_DATA_SYNCHRONIZATION 0x19BA
+#define mmDCIO_UNIPHY3_UNIPHY_LINK_CNTL 0x19BD
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_CONTROL1 0x19B6
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_CONTROL2 0x19B7
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_FBDIV 0x19B5
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_SS_CNTL 0x19B9
+#define mmDCIO_UNIPHY3_UNIPHY_PLL_SS_STEP_SIZE 0x19B8
+#define mmDCIO_UNIPHY3_UNIPHY_POWER_CONTROL 0x19B4
+#define mmDCIO_UNIPHY3_UNIPHY_REG_TEST_OUTPUT 0x19BB
+#define mmDCIO_UNIPHY3_UNIPHY_TX_CONTROL1 0x19B0
+#define mmDCIO_UNIPHY3_UNIPHY_TX_CONTROL2 0x19B1
+#define mmDCIO_UNIPHY3_UNIPHY_TX_CONTROL3 0x19B2
+#define mmDCIO_UNIPHY3_UNIPHY_TX_CONTROL4 0x19B3
+#define mmDCIO_UNIPHY4_UNIPHY_ANG_BIST_CNTL 0x19CC
+#define mmDCIO_UNIPHY4_UNIPHY_CHANNEL_XBAR_CNTL 0x19CE
+#define mmDCIO_UNIPHY4_UNIPHY_DATA_SYNCHRONIZATION 0x19CA
+#define mmDCIO_UNIPHY4_UNIPHY_LINK_CNTL 0x19CD
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_CONTROL1 0x19C6
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_CONTROL2 0x19C7
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_FBDIV 0x19C5
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_SS_CNTL 0x19C9
+#define mmDCIO_UNIPHY4_UNIPHY_PLL_SS_STEP_SIZE 0x19C8
+#define mmDCIO_UNIPHY4_UNIPHY_POWER_CONTROL 0x19C4
+#define mmDCIO_UNIPHY4_UNIPHY_REG_TEST_OUTPUT 0x19CB
+#define mmDCIO_UNIPHY4_UNIPHY_TX_CONTROL1 0x19C0
+#define mmDCIO_UNIPHY4_UNIPHY_TX_CONTROL2 0x19C1
+#define mmDCIO_UNIPHY4_UNIPHY_TX_CONTROL3 0x19C2
+#define mmDCIO_UNIPHY4_UNIPHY_TX_CONTROL4 0x19C3
+#define mmDCIO_UNIPHY5_UNIPHY_ANG_BIST_CNTL 0x19DC
+#define mmDCIO_UNIPHY5_UNIPHY_CHANNEL_XBAR_CNTL 0x19DE
+#define mmDCIO_UNIPHY5_UNIPHY_DATA_SYNCHRONIZATION 0x19DA
+#define mmDCIO_UNIPHY5_UNIPHY_LINK_CNTL 0x19DD
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_CONTROL1 0x19D6
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_CONTROL2 0x19D7
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_FBDIV 0x19D5
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_SS_CNTL 0x19D9
+#define mmDCIO_UNIPHY5_UNIPHY_PLL_SS_STEP_SIZE 0x19D8
+#define mmDCIO_UNIPHY5_UNIPHY_POWER_CONTROL 0x19D4
+#define mmDCIO_UNIPHY5_UNIPHY_REG_TEST_OUTPUT 0x19DB
+#define mmDCIO_UNIPHY5_UNIPHY_TX_CONTROL1 0x19D0
+#define mmDCIO_UNIPHY5_UNIPHY_TX_CONTROL2 0x19D1
+#define mmDCIO_UNIPHY5_UNIPHY_TX_CONTROL3 0x19D2
+#define mmDCIO_UNIPHY5_UNIPHY_TX_CONTROL4 0x19D3
+#define mmDCI_SOFT_RESET 0x015E
+#define mmDCI_TEST_DEBUG_DATA 0x0321
+#define mmDCI_TEST_DEBUG_INDEX 0x0320
+#define mmDC_LUT_30_COLOR 0x1A7C
+#define mmDC_LUT_AUTOFILL 0x1A7F
+#define mmDC_LUT_BLACK_OFFSET_BLUE 0x1A81
+#define mmDC_LUT_BLACK_OFFSET_GREEN 0x1A82
+#define mmDC_LUT_BLACK_OFFSET_RED 0x1A83
+#define mmDC_LUT_CONTROL 0x1A80
+#define mmDC_LUT_PWL_DATA 0x1A7B
+#define mmDC_LUT_RW_INDEX 0x1A79
+#define mmDC_LUT_RW_MODE 0x1A78
+#define mmDC_LUT_SEQ_COLOR 0x1A7A
+#define mmDC_LUT_VGA_ACCESS_ENABLE 0x1A7D
+#define mmDC_LUT_WHITE_OFFSET_BLUE 0x1A84
+#define mmDC_LUT_WHITE_OFFSET_GREEN 0x1A85
+#define mmDC_LUT_WHITE_OFFSET_RED 0x1A86
+#define mmDC_LUT_WRITE_EN_MASK 0x1A7E
+#define mmDC_MVP_LB_CONTROL 0x1ADB
+#define mmDCO_CLK_CNTL 0x192B
+#define mmDCO_CLK_RAMP_CNTL 0x192C
+#define mmDCO_LIGHT_SLEEP_DIS 0x1907
+#define mmDCO_MEM_POWER_STATE 0x1906
+#define mmDCO_SOFT_RESET 0x0167
+#define mmDCP0_COMM_MATRIXA_TRANS_C11_C12 0x1A43
+#define mmDCP0_COMM_MATRIXA_TRANS_C13_C14 0x1A44
+#define mmDCP0_COMM_MATRIXA_TRANS_C21_C22 0x1A45
+#define mmDCP0_COMM_MATRIXA_TRANS_C23_C24 0x1A46
+#define mmDCP0_COMM_MATRIXA_TRANS_C31_C32 0x1A47
+#define mmDCP0_COMM_MATRIXA_TRANS_C33_C34 0x1A48
+#define mmDCP0_COMM_MATRIXB_TRANS_C11_C12 0x1A49
+#define mmDCP0_COMM_MATRIXB_TRANS_C13_C14 0x1A4A
+#define mmDCP0_COMM_MATRIXB_TRANS_C21_C22 0x1A4B
+#define mmDCP0_COMM_MATRIXB_TRANS_C23_C24 0x1A4C
+#define mmDCP0_COMM_MATRIXB_TRANS_C31_C32 0x1A4D
+#define mmDCP0_COMM_MATRIXB_TRANS_C33_C34 0x1A4E
+#define mmDCP0_CUR_COLOR1 0x1A6C
+#define mmDCP0_CUR_COLOR2 0x1A6D
+#define mmDCP0_CUR_CONTROL 0x1A66
+#define mmDCP0_CUR_HOT_SPOT 0x1A6B
+#define mmDCP0_CUR_POSITION 0x1A6A
+#define mmDCP0_CUR_REQUEST_FILTER_CNTL 0x1A99
+#define mmDCP0_CUR_SIZE 0x1A68
+#define mmDCP0_CUR_SURFACE_ADDRESS 0x1A67
+#define mmDCP0_CUR_SURFACE_ADDRESS_HIGH 0x1A69
+#define mmDCP0_CUR_UPDATE 0x1A6E
+#define mmDCP0_DC_LUT_30_COLOR 0x1A7C
+#define mmDCP0_DC_LUT_AUTOFILL 0x1A7F
+#define mmDCP0_DC_LUT_BLACK_OFFSET_BLUE 0x1A81
+#define mmDCP0_DC_LUT_BLACK_OFFSET_GREEN 0x1A82
+#define mmDCP0_DC_LUT_BLACK_OFFSET_RED 0x1A83
+#define mmDCP0_DC_LUT_CONTROL 0x1A80
+#define mmDCP0_DC_LUT_PWL_DATA 0x1A7B
+#define mmDCP0_DC_LUT_RW_INDEX 0x1A79
+#define mmDCP0_DC_LUT_RW_MODE 0x1A78
+#define mmDCP0_DC_LUT_SEQ_COLOR 0x1A7A
+#define mmDCP0_DC_LUT_VGA_ACCESS_ENABLE 0x1A7D
+#define mmDCP0_DC_LUT_WHITE_OFFSET_BLUE 0x1A84
+#define mmDCP0_DC_LUT_WHITE_OFFSET_GREEN 0x1A85
+#define mmDCP0_DC_LUT_WHITE_OFFSET_RED 0x1A86
+#define mmDCP0_DC_LUT_WRITE_EN_MASK 0x1A7E
+#define mmDCP0_DCP_CRC_CONTROL 0x1A87
+#define mmDCP0_DCP_CRC_CURRENT 0x1A89
+#define mmDCP0_DCP_CRC_LAST 0x1A8B
+#define mmDCP0_DCP_CRC_MASK 0x1A88
+#define mmDCP0_DCP_DEBUG 0x1A8D
+#define mmDCP0_DCP_DEBUG2 0x1A98
+#define mmDCP0_DCP_FP_CONVERTED_FIELD 0x1A65
+#define mmDCP0_DCP_GSL_CONTROL 0x1A90
+#define mmDCP0_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1A91
+#define mmDCP0_DCP_RANDOM_SEEDS 0x1A61
+#define mmDCP0_DCP_SPATIAL_DITHER_CNTL 0x1A60
+#define mmDCP0_DCP_TEST_DEBUG_DATA 0x1A96
+#define mmDCP0_DCP_TEST_DEBUG_INDEX 0x1A95
+#define mmDCP0_DEGAMMA_CONTROL 0x1A58
+#define mmDCP0_DENORM_CONTROL 0x1A50
+#define mmDCP0_GAMUT_REMAP_C11_C12 0x1A5A
+#define mmDCP0_GAMUT_REMAP_C13_C14 0x1A5B
+#define mmDCP0_GAMUT_REMAP_C21_C22 0x1A5C
+#define mmDCP0_GAMUT_REMAP_C23_C24 0x1A5D
+#define mmDCP0_GAMUT_REMAP_C31_C32 0x1A5E
+#define mmDCP0_GAMUT_REMAP_C33_C34 0x1A5F
+#define mmDCP0_GAMUT_REMAP_CONTROL 0x1A59
+#define mmDCP0_GRPH_COMPRESS_PITCH 0x1A1A
+#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS 0x1A19
+#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1A1B
+#define mmDCP0_GRPH_CONTROL 0x1A01
+#define mmDCP0_GRPH_DFQ_CONTROL 0x1A14
+#define mmDCP0_GRPH_DFQ_STATUS 0x1A15
+#define mmDCP0_GRPH_ENABLE 0x1A00
+#define mmDCP0_GRPH_FLIP_CONTROL 0x1A12
+#define mmDCP0_GRPH_INTERRUPT_CONTROL 0x1A17
+#define mmDCP0_GRPH_INTERRUPT_STATUS 0x1A16
+#define mmDCP0_GRPH_LUT_10BIT_BYPASS 0x1A02
+#define mmDCP0_GRPH_PITCH 0x1A06
+#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS 0x1A04
+#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1A07
+#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS 0x1A05
+#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1A08
+#define mmDCP0_GRPH_STEREOSYNC_FLIP 0x1A97
+#define mmDCP0_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1A18
+#define mmDCP0_GRPH_SURFACE_ADDRESS_INUSE 0x1A13
+#define mmDCP0_GRPH_SURFACE_OFFSET_X 0x1A09
+#define mmDCP0_GRPH_SURFACE_OFFSET_Y 0x1A0A
+#define mmDCP0_GRPH_SWAP_CNTL 0x1A03
+#define mmDCP0_GRPH_UPDATE 0x1A11
+#define mmDCP0_GRPH_X_END 0x1A0D
+#define mmDCP0_GRPH_X_START 0x1A0B
+#define mmDCP0_GRPH_Y_END 0x1A0E
+#define mmDCP0_GRPH_Y_START 0x1A0C
+#define mmDCP0_INPUT_CSC_C11_C12 0x1A36
+#define mmDCP0_INPUT_CSC_C13_C14 0x1A37
+#define mmDCP0_INPUT_CSC_C21_C22 0x1A38
+#define mmDCP0_INPUT_CSC_C23_C24 0x1A39
+#define mmDCP0_INPUT_CSC_C31_C32 0x1A3A
+#define mmDCP0_INPUT_CSC_C33_C34 0x1A3B
+#define mmDCP0_INPUT_CSC_CONTROL 0x1A35
+#define mmDCP0_INPUT_GAMMA_CONTROL 0x1A10
+#define mmDCP0_KEY_CONTROL 0x1A53
+#define mmDCP0_KEY_RANGE_ALPHA 0x1A54
+#define mmDCP0_KEY_RANGE_BLUE 0x1A57
+#define mmDCP0_KEY_RANGE_GREEN 0x1A56
+#define mmDCP0_KEY_RANGE_RED 0x1A55
+#define mmDCP0_OUTPUT_CSC_C11_C12 0x1A3D
+#define mmDCP0_OUTPUT_CSC_C13_C14 0x1A3E
+#define mmDCP0_OUTPUT_CSC_C21_C22 0x1A3F
+#define mmDCP0_OUTPUT_CSC_C23_C24 0x1A40
+#define mmDCP0_OUTPUT_CSC_C31_C32 0x1A41
+#define mmDCP0_OUTPUT_CSC_C33_C34 0x1A42
+#define mmDCP0_OUTPUT_CSC_CONTROL 0x1A3C
+#define mmDCP0_OUT_ROUND_CONTROL 0x1A51
+#define mmDCP0_OVL_CONTROL1 0x1A1D
+#define mmDCP0_OVL_CONTROL2 0x1A1E
+#define mmDCP0_OVL_DFQ_CONTROL 0x1A29
+#define mmDCP0_OVL_DFQ_STATUS 0x1A2A
+#define mmDCP0_OVL_ENABLE 0x1A1C
+#define mmDCP0_OVL_END 0x1A26
+#define mmDCP0_OVL_PITCH 0x1A21
+#define mmDCP0_OVLSCL_EDGE_PIXEL_CNTL 0x1A2C
+#define mmDCP0_OVL_SECONDARY_SURFACE_ADDRESS 0x1A92
+#define mmDCP0_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x1A94
+#define mmDCP0_OVL_START 0x1A25
+#define mmDCP0_OVL_STEREOSYNC_FLIP 0x1A93
+#define mmDCP0_OVL_SURFACE_ADDRESS 0x1A20
+#define mmDCP0_OVL_SURFACE_ADDRESS_HIGH 0x1A22
+#define mmDCP0_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x1A2B
+#define mmDCP0_OVL_SURFACE_ADDRESS_INUSE 0x1A28
+#define mmDCP0_OVL_SURFACE_OFFSET_X 0x1A23
+#define mmDCP0_OVL_SURFACE_OFFSET_Y 0x1A24
+#define mmDCP0_OVL_SWAP_CNTL 0x1A1F
+#define mmDCP0_OVL_UPDATE 0x1A27
+#define mmDCP0_PRESCALE_GRPH_CONTROL 0x1A2D
+#define mmDCP0_PRESCALE_OVL_CONTROL 0x1A31
+#define mmDCP0_PRESCALE_VALUES_GRPH_B 0x1A30
+#define mmDCP0_PRESCALE_VALUES_GRPH_G 0x1A2F
+#define mmDCP0_PRESCALE_VALUES_GRPH_R 0x1A2E
+#define mmDCP0_PRESCALE_VALUES_OVL_CB 0x1A32
+#define mmDCP0_PRESCALE_VALUES_OVL_CR 0x1A34
+#define mmDCP0_PRESCALE_VALUES_OVL_Y 0x1A33
+#define mmDCP0_REGAMMA_CNTLA_END_CNTL1 0x1AA6
+#define mmDCP0_REGAMMA_CNTLA_END_CNTL2 0x1AA7
+#define mmDCP0_REGAMMA_CNTLA_REGION_0_1 0x1AA8
+#define mmDCP0_REGAMMA_CNTLA_REGION_10_11 0x1AAD
+#define mmDCP0_REGAMMA_CNTLA_REGION_12_13 0x1AAE
+#define mmDCP0_REGAMMA_CNTLA_REGION_14_15 0x1AAF
+#define mmDCP0_REGAMMA_CNTLA_REGION_2_3 0x1AA9
+#define mmDCP0_REGAMMA_CNTLA_REGION_4_5 0x1AAA
+#define mmDCP0_REGAMMA_CNTLA_REGION_6_7 0x1AAB
+#define mmDCP0_REGAMMA_CNTLA_REGION_8_9 0x1AAC
+#define mmDCP0_REGAMMA_CNTLA_SLOPE_CNTL 0x1AA5
+#define mmDCP0_REGAMMA_CNTLA_START_CNTL 0x1AA4
+#define mmDCP0_REGAMMA_CNTLB_END_CNTL1 0x1AB2
+#define mmDCP0_REGAMMA_CNTLB_END_CNTL2 0x1AB3
+#define mmDCP0_REGAMMA_CNTLB_REGION_0_1 0x1AB4
+#define mmDCP0_REGAMMA_CNTLB_REGION_10_11 0x1AB9
+#define mmDCP0_REGAMMA_CNTLB_REGION_12_13 0x1ABA
+#define mmDCP0_REGAMMA_CNTLB_REGION_14_15 0x1ABB
+#define mmDCP0_REGAMMA_CNTLB_REGION_2_3 0x1AB5
+#define mmDCP0_REGAMMA_CNTLB_REGION_4_5 0x1AB6
+#define mmDCP0_REGAMMA_CNTLB_REGION_6_7 0x1AB7
+#define mmDCP0_REGAMMA_CNTLB_REGION_8_9 0x1AB8
+#define mmDCP0_REGAMMA_CNTLB_SLOPE_CNTL 0x1AB1
+#define mmDCP0_REGAMMA_CNTLB_START_CNTL 0x1AB0
+#define mmDCP0_REGAMMA_CONTROL 0x1AA0
+#define mmDCP0_REGAMMA_LUT_DATA 0x1AA2
+#define mmDCP0_REGAMMA_LUT_INDEX 0x1AA1
+#define mmDCP0_REGAMMA_LUT_WRITE_EN_MASK 0x1AA3
+#define mmDCP1_COMM_MATRIXA_TRANS_C11_C12 0x1D43
+#define mmDCP1_COMM_MATRIXA_TRANS_C13_C14 0x1D44
+#define mmDCP1_COMM_MATRIXA_TRANS_C21_C22 0x1D45
+#define mmDCP1_COMM_MATRIXA_TRANS_C23_C24 0x1D46
+#define mmDCP1_COMM_MATRIXA_TRANS_C31_C32 0x1D47
+#define mmDCP1_COMM_MATRIXA_TRANS_C33_C34 0x1D48
+#define mmDCP1_COMM_MATRIXB_TRANS_C11_C12 0x1D49
+#define mmDCP1_COMM_MATRIXB_TRANS_C13_C14 0x1D4A
+#define mmDCP1_COMM_MATRIXB_TRANS_C21_C22 0x1D4B
+#define mmDCP1_COMM_MATRIXB_TRANS_C23_C24 0x1D4C
+#define mmDCP1_COMM_MATRIXB_TRANS_C31_C32 0x1D4D
+#define mmDCP1_COMM_MATRIXB_TRANS_C33_C34 0x1D4E
+#define mmDCP1_CUR_COLOR1 0x1D6C
+#define mmDCP1_CUR_COLOR2 0x1D6D
+#define mmDCP1_CUR_CONTROL 0x1D66
+#define mmDCP1_CUR_HOT_SPOT 0x1D6B
+#define mmDCP1_CUR_POSITION 0x1D6A
+#define mmDCP1_CUR_REQUEST_FILTER_CNTL 0x1D99
+#define mmDCP1_CUR_SIZE 0x1D68
+#define mmDCP1_CUR_SURFACE_ADDRESS 0x1D67
+#define mmDCP1_CUR_SURFACE_ADDRESS_HIGH 0x1D69
+#define mmDCP1_CUR_UPDATE 0x1D6E
+#define mmDCP1_DC_LUT_30_COLOR 0x1D7C
+#define mmDCP1_DC_LUT_AUTOFILL 0x1D7F
+#define mmDCP1_DC_LUT_BLACK_OFFSET_BLUE 0x1D81
+#define mmDCP1_DC_LUT_BLACK_OFFSET_GREEN 0x1D82
+#define mmDCP1_DC_LUT_BLACK_OFFSET_RED 0x1D83
+#define mmDCP1_DC_LUT_CONTROL 0x1D80
+#define mmDCP1_DC_LUT_PWL_DATA 0x1D7B
+#define mmDCP1_DC_LUT_RW_INDEX 0x1D79
+#define mmDCP1_DC_LUT_RW_MODE 0x1D78
+#define mmDCP1_DC_LUT_SEQ_COLOR 0x1D7A
+#define mmDCP1_DC_LUT_VGA_ACCESS_ENABLE 0x1D7D
+#define mmDCP1_DC_LUT_WHITE_OFFSET_BLUE 0x1D84
+#define mmDCP1_DC_LUT_WHITE_OFFSET_GREEN 0x1D85
+#define mmDCP1_DC_LUT_WHITE_OFFSET_RED 0x1D86
+#define mmDCP1_DC_LUT_WRITE_EN_MASK 0x1D7E
+#define mmDCP1_DCP_CRC_CONTROL 0x1D87
+#define mmDCP1_DCP_CRC_CURRENT 0x1D89
+#define mmDCP1_DCP_CRC_LAST 0x1D8B
+#define mmDCP1_DCP_CRC_MASK 0x1D88
+#define mmDCP1_DCP_DEBUG 0x1D8D
+#define mmDCP1_DCP_DEBUG2 0x1D98
+#define mmDCP1_DCP_FP_CONVERTED_FIELD 0x1D65
+#define mmDCP1_DCP_GSL_CONTROL 0x1D90
+#define mmDCP1_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1D91
+#define mmDCP1_DCP_RANDOM_SEEDS 0x1D61
+#define mmDCP1_DCP_SPATIAL_DITHER_CNTL 0x1D60
+#define mmDCP1_DCP_TEST_DEBUG_DATA 0x1D96
+#define mmDCP1_DCP_TEST_DEBUG_INDEX 0x1D95
+#define mmDCP1_DEGAMMA_CONTROL 0x1D58
+#define mmDCP1_DENORM_CONTROL 0x1D50
+#define mmDCP1_GAMUT_REMAP_C11_C12 0x1D5A
+#define mmDCP1_GAMUT_REMAP_C13_C14 0x1D5B
+#define mmDCP1_GAMUT_REMAP_C21_C22 0x1D5C
+#define mmDCP1_GAMUT_REMAP_C23_C24 0x1D5D
+#define mmDCP1_GAMUT_REMAP_C31_C32 0x1D5E
+#define mmDCP1_GAMUT_REMAP_C33_C34 0x1D5F
+#define mmDCP1_GAMUT_REMAP_CONTROL 0x1D59
+#define mmDCP1_GRPH_COMPRESS_PITCH 0x1D1A
+#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS 0x1D19
+#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1D1B
+#define mmDCP1_GRPH_CONTROL 0x1D01
+#define mmDCP1_GRPH_DFQ_CONTROL 0x1D14
+#define mmDCP1_GRPH_DFQ_STATUS 0x1D15
+#define mmDCP1_GRPH_ENABLE 0x1D00
+#define mmDCP1_GRPH_FLIP_CONTROL 0x1D12
+#define mmDCP1_GRPH_INTERRUPT_CONTROL 0x1D17
+#define mmDCP1_GRPH_INTERRUPT_STATUS 0x1D16
+#define mmDCP1_GRPH_LUT_10BIT_BYPASS 0x1D02
+#define mmDCP1_GRPH_PITCH 0x1D06
+#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS 0x1D04
+#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1D07
+#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS 0x1D05
+#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1D08
+#define mmDCP1_GRPH_STEREOSYNC_FLIP 0x1D97
+#define mmDCP1_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1D18
+#define mmDCP1_GRPH_SURFACE_ADDRESS_INUSE 0x1D13
+#define mmDCP1_GRPH_SURFACE_OFFSET_X 0x1D09
+#define mmDCP1_GRPH_SURFACE_OFFSET_Y 0x1D0A
+#define mmDCP1_GRPH_SWAP_CNTL 0x1D03
+#define mmDCP1_GRPH_UPDATE 0x1D11
+#define mmDCP1_GRPH_X_END 0x1D0D
+#define mmDCP1_GRPH_X_START 0x1D0B
+#define mmDCP1_GRPH_Y_END 0x1D0E
+#define mmDCP1_GRPH_Y_START 0x1D0C
+#define mmDCP1_INPUT_CSC_C11_C12 0x1D36
+#define mmDCP1_INPUT_CSC_C13_C14 0x1D37
+#define mmDCP1_INPUT_CSC_C21_C22 0x1D38
+#define mmDCP1_INPUT_CSC_C23_C24 0x1D39
+#define mmDCP1_INPUT_CSC_C31_C32 0x1D3A
+#define mmDCP1_INPUT_CSC_C33_C34 0x1D3B
+#define mmDCP1_INPUT_CSC_CONTROL 0x1D35
+#define mmDCP1_INPUT_GAMMA_CONTROL 0x1D10
+#define mmDCP1_KEY_CONTROL 0x1D53
+#define mmDCP1_KEY_RANGE_ALPHA 0x1D54
+#define mmDCP1_KEY_RANGE_BLUE 0x1D57
+#define mmDCP1_KEY_RANGE_GREEN 0x1D56
+#define mmDCP1_KEY_RANGE_RED 0x1D55
+#define mmDCP1_OUTPUT_CSC_C11_C12 0x1D3D
+#define mmDCP1_OUTPUT_CSC_C13_C14 0x1D3E
+#define mmDCP1_OUTPUT_CSC_C21_C22 0x1D3F
+#define mmDCP1_OUTPUT_CSC_C23_C24 0x1D40
+#define mmDCP1_OUTPUT_CSC_C31_C32 0x1D41
+#define mmDCP1_OUTPUT_CSC_C33_C34 0x1D42
+#define mmDCP1_OUTPUT_CSC_CONTROL 0x1D3C
+#define mmDCP1_OUT_ROUND_CONTROL 0x1D51
+#define mmDCP1_OVL_CONTROL1 0x1D1D
+#define mmDCP1_OVL_CONTROL2 0x1D1E
+#define mmDCP1_OVL_DFQ_CONTROL 0x1D29
+#define mmDCP1_OVL_DFQ_STATUS 0x1D2A
+#define mmDCP1_OVL_ENABLE 0x1D1C
+#define mmDCP1_OVL_END 0x1D26
+#define mmDCP1_OVL_PITCH 0x1D21
+#define mmDCP1_OVLSCL_EDGE_PIXEL_CNTL 0x1D2C
+#define mmDCP1_OVL_SECONDARY_SURFACE_ADDRESS 0x1D92
+#define mmDCP1_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x1D94
+#define mmDCP1_OVL_START 0x1D25
+#define mmDCP1_OVL_STEREOSYNC_FLIP 0x1D93
+#define mmDCP1_OVL_SURFACE_ADDRESS 0x1D20
+#define mmDCP1_OVL_SURFACE_ADDRESS_HIGH 0x1D22
+#define mmDCP1_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x1D2B
+#define mmDCP1_OVL_SURFACE_ADDRESS_INUSE 0x1D28
+#define mmDCP1_OVL_SURFACE_OFFSET_X 0x1D23
+#define mmDCP1_OVL_SURFACE_OFFSET_Y 0x1D24
+#define mmDCP1_OVL_SWAP_CNTL 0x1D1F
+#define mmDCP1_OVL_UPDATE 0x1D27
+#define mmDCP1_PRESCALE_GRPH_CONTROL 0x1D2D
+#define mmDCP1_PRESCALE_OVL_CONTROL 0x1D31
+#define mmDCP1_PRESCALE_VALUES_GRPH_B 0x1D30
+#define mmDCP1_PRESCALE_VALUES_GRPH_G 0x1D2F
+#define mmDCP1_PRESCALE_VALUES_GRPH_R 0x1D2E
+#define mmDCP1_PRESCALE_VALUES_OVL_CB 0x1D32
+#define mmDCP1_PRESCALE_VALUES_OVL_CR 0x1D34
+#define mmDCP1_PRESCALE_VALUES_OVL_Y 0x1D33
+#define mmDCP1_REGAMMA_CNTLA_END_CNTL1 0x1DA6
+#define mmDCP1_REGAMMA_CNTLA_END_CNTL2 0x1DA7
+#define mmDCP1_REGAMMA_CNTLA_REGION_0_1 0x1DA8
+#define mmDCP1_REGAMMA_CNTLA_REGION_10_11 0x1DAD
+#define mmDCP1_REGAMMA_CNTLA_REGION_12_13 0x1DAE
+#define mmDCP1_REGAMMA_CNTLA_REGION_14_15 0x1DAF
+#define mmDCP1_REGAMMA_CNTLA_REGION_2_3 0x1DA9
+#define mmDCP1_REGAMMA_CNTLA_REGION_4_5 0x1DAA
+#define mmDCP1_REGAMMA_CNTLA_REGION_6_7 0x1DAB
+#define mmDCP1_REGAMMA_CNTLA_REGION_8_9 0x1DAC
+#define mmDCP1_REGAMMA_CNTLA_SLOPE_CNTL 0x1DA5
+#define mmDCP1_REGAMMA_CNTLA_START_CNTL 0x1DA4
+#define mmDCP1_REGAMMA_CNTLB_END_CNTL1 0x1DB2
+#define mmDCP1_REGAMMA_CNTLB_END_CNTL2 0x1DB3
+#define mmDCP1_REGAMMA_CNTLB_REGION_0_1 0x1DB4
+#define mmDCP1_REGAMMA_CNTLB_REGION_10_11 0x1DB9
+#define mmDCP1_REGAMMA_CNTLB_REGION_12_13 0x1DBA
+#define mmDCP1_REGAMMA_CNTLB_REGION_14_15 0x1DBB
+#define mmDCP1_REGAMMA_CNTLB_REGION_2_3 0x1DB5
+#define mmDCP1_REGAMMA_CNTLB_REGION_4_5 0x1DB6
+#define mmDCP1_REGAMMA_CNTLB_REGION_6_7 0x1DB7
+#define mmDCP1_REGAMMA_CNTLB_REGION_8_9 0x1DB8
+#define mmDCP1_REGAMMA_CNTLB_SLOPE_CNTL 0x1DB1
+#define mmDCP1_REGAMMA_CNTLB_START_CNTL 0x1DB0
+#define mmDCP1_REGAMMA_CONTROL 0x1DA0
+#define mmDCP1_REGAMMA_LUT_DATA 0x1DA2
+#define mmDCP1_REGAMMA_LUT_INDEX 0x1DA1
+#define mmDCP1_REGAMMA_LUT_WRITE_EN_MASK 0x1DA3
+#define mmDCP2_COMM_MATRIXA_TRANS_C11_C12 0x4043
+#define mmDCP2_COMM_MATRIXA_TRANS_C13_C14 0x4044
+#define mmDCP2_COMM_MATRIXA_TRANS_C21_C22 0x4045
+#define mmDCP2_COMM_MATRIXA_TRANS_C23_C24 0x4046
+#define mmDCP2_COMM_MATRIXA_TRANS_C31_C32 0x4047
+#define mmDCP2_COMM_MATRIXA_TRANS_C33_C34 0x4048
+#define mmDCP2_COMM_MATRIXB_TRANS_C11_C12 0x4049
+#define mmDCP2_COMM_MATRIXB_TRANS_C13_C14 0x404A
+#define mmDCP2_COMM_MATRIXB_TRANS_C21_C22 0x404B
+#define mmDCP2_COMM_MATRIXB_TRANS_C23_C24 0x404C
+#define mmDCP2_COMM_MATRIXB_TRANS_C31_C32 0x404D
+#define mmDCP2_COMM_MATRIXB_TRANS_C33_C34 0x404E
+#define mmDCP2_CUR_COLOR1 0x406C
+#define mmDCP2_CUR_COLOR2 0x406D
+#define mmDCP2_CUR_CONTROL 0x4066
+#define mmDCP2_CUR_HOT_SPOT 0x406B
+#define mmDCP2_CUR_POSITION 0x406A
+#define mmDCP2_CUR_REQUEST_FILTER_CNTL 0x4099
+#define mmDCP2_CUR_SIZE 0x4068
+#define mmDCP2_CUR_SURFACE_ADDRESS 0x4067
+#define mmDCP2_CUR_SURFACE_ADDRESS_HIGH 0x4069
+#define mmDCP2_CUR_UPDATE 0x406E
+#define mmDCP2_DC_LUT_30_COLOR 0x407C
+#define mmDCP2_DC_LUT_AUTOFILL 0x407F
+#define mmDCP2_DC_LUT_BLACK_OFFSET_BLUE 0x4081
+#define mmDCP2_DC_LUT_BLACK_OFFSET_GREEN 0x4082
+#define mmDCP2_DC_LUT_BLACK_OFFSET_RED 0x4083
+#define mmDCP2_DC_LUT_CONTROL 0x4080
+#define mmDCP2_DC_LUT_PWL_DATA 0x407B
+#define mmDCP2_DC_LUT_RW_INDEX 0x4079
+#define mmDCP2_DC_LUT_RW_MODE 0x4078
+#define mmDCP2_DC_LUT_SEQ_COLOR 0x407A
+#define mmDCP2_DC_LUT_VGA_ACCESS_ENABLE 0x407D
+#define mmDCP2_DC_LUT_WHITE_OFFSET_BLUE 0x4084
+#define mmDCP2_DC_LUT_WHITE_OFFSET_GREEN 0x4085
+#define mmDCP2_DC_LUT_WHITE_OFFSET_RED 0x4086
+#define mmDCP2_DC_LUT_WRITE_EN_MASK 0x407E
+#define mmDCP2_DCP_CRC_CONTROL 0x4087
+#define mmDCP2_DCP_CRC_CURRENT 0x4089
+#define mmDCP2_DCP_CRC_LAST 0x408B
+#define mmDCP2_DCP_CRC_MASK 0x4088
+#define mmDCP2_DCP_DEBUG 0x408D
+#define mmDCP2_DCP_DEBUG2 0x4098
+#define mmDCP2_DCP_FP_CONVERTED_FIELD 0x4065
+#define mmDCP2_DCP_GSL_CONTROL 0x4090
+#define mmDCP2_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4091
+#define mmDCP2_DCP_RANDOM_SEEDS 0x4061
+#define mmDCP2_DCP_SPATIAL_DITHER_CNTL 0x4060
+#define mmDCP2_DCP_TEST_DEBUG_DATA 0x4096
+#define mmDCP2_DCP_TEST_DEBUG_INDEX 0x4095
+#define mmDCP2_DEGAMMA_CONTROL 0x4058
+#define mmDCP2_DENORM_CONTROL 0x4050
+#define mmDCP2_GAMUT_REMAP_C11_C12 0x405A
+#define mmDCP2_GAMUT_REMAP_C13_C14 0x405B
+#define mmDCP2_GAMUT_REMAP_C21_C22 0x405C
+#define mmDCP2_GAMUT_REMAP_C23_C24 0x405D
+#define mmDCP2_GAMUT_REMAP_C31_C32 0x405E
+#define mmDCP2_GAMUT_REMAP_C33_C34 0x405F
+#define mmDCP2_GAMUT_REMAP_CONTROL 0x4059
+#define mmDCP2_GRPH_COMPRESS_PITCH 0x401A
+#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS 0x4019
+#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x401B
+#define mmDCP2_GRPH_CONTROL 0x4001
+#define mmDCP2_GRPH_DFQ_CONTROL 0x4014
+#define mmDCP2_GRPH_DFQ_STATUS 0x4015
+#define mmDCP2_GRPH_ENABLE 0x4000
+#define mmDCP2_GRPH_FLIP_CONTROL 0x4012
+#define mmDCP2_GRPH_INTERRUPT_CONTROL 0x4017
+#define mmDCP2_GRPH_INTERRUPT_STATUS 0x4016
+#define mmDCP2_GRPH_LUT_10BIT_BYPASS 0x4002
+#define mmDCP2_GRPH_PITCH 0x4006
+#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS 0x4004
+#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4007
+#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS 0x4005
+#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4008
+#define mmDCP2_GRPH_STEREOSYNC_FLIP 0x4097
+#define mmDCP2_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4018
+#define mmDCP2_GRPH_SURFACE_ADDRESS_INUSE 0x4013
+#define mmDCP2_GRPH_SURFACE_OFFSET_X 0x4009
+#define mmDCP2_GRPH_SURFACE_OFFSET_Y 0x400A
+#define mmDCP2_GRPH_SWAP_CNTL 0x4003
+#define mmDCP2_GRPH_UPDATE 0x4011
+#define mmDCP2_GRPH_X_END 0x400D
+#define mmDCP2_GRPH_X_START 0x400B
+#define mmDCP2_GRPH_Y_END 0x400E
+#define mmDCP2_GRPH_Y_START 0x400C
+#define mmDCP2_INPUT_CSC_C11_C12 0x4036
+#define mmDCP2_INPUT_CSC_C13_C14 0x4037
+#define mmDCP2_INPUT_CSC_C21_C22 0x4038
+#define mmDCP2_INPUT_CSC_C23_C24 0x4039
+#define mmDCP2_INPUT_CSC_C31_C32 0x403A
+#define mmDCP2_INPUT_CSC_C33_C34 0x403B
+#define mmDCP2_INPUT_CSC_CONTROL 0x4035
+#define mmDCP2_INPUT_GAMMA_CONTROL 0x4010
+#define mmDCP2_KEY_CONTROL 0x4053
+#define mmDCP2_KEY_RANGE_ALPHA 0x4054
+#define mmDCP2_KEY_RANGE_BLUE 0x4057
+#define mmDCP2_KEY_RANGE_GREEN 0x4056
+#define mmDCP2_KEY_RANGE_RED 0x4055
+#define mmDCP2_OUTPUT_CSC_C11_C12 0x403D
+#define mmDCP2_OUTPUT_CSC_C13_C14 0x403E
+#define mmDCP2_OUTPUT_CSC_C21_C22 0x403F
+#define mmDCP2_OUTPUT_CSC_C23_C24 0x4040
+#define mmDCP2_OUTPUT_CSC_C31_C32 0x4041
+#define mmDCP2_OUTPUT_CSC_C33_C34 0x4042
+#define mmDCP2_OUTPUT_CSC_CONTROL 0x403C
+#define mmDCP2_OUT_ROUND_CONTROL 0x4051
+#define mmDCP2_OVL_CONTROL1 0x401D
+#define mmDCP2_OVL_CONTROL2 0x401E
+#define mmDCP2_OVL_DFQ_CONTROL 0x4029
+#define mmDCP2_OVL_DFQ_STATUS 0x402A
+#define mmDCP2_OVL_ENABLE 0x401C
+#define mmDCP2_OVL_END 0x4026
+#define mmDCP2_OVL_PITCH 0x4021
+#define mmDCP2_OVLSCL_EDGE_PIXEL_CNTL 0x402C
+#define mmDCP2_OVL_SECONDARY_SURFACE_ADDRESS 0x4092
+#define mmDCP2_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x4094
+#define mmDCP2_OVL_START 0x4025
+#define mmDCP2_OVL_STEREOSYNC_FLIP 0x4093
+#define mmDCP2_OVL_SURFACE_ADDRESS 0x4020
+#define mmDCP2_OVL_SURFACE_ADDRESS_HIGH 0x4022
+#define mmDCP2_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x402B
+#define mmDCP2_OVL_SURFACE_ADDRESS_INUSE 0x4028
+#define mmDCP2_OVL_SURFACE_OFFSET_X 0x4023
+#define mmDCP2_OVL_SURFACE_OFFSET_Y 0x4024
+#define mmDCP2_OVL_SWAP_CNTL 0x401F
+#define mmDCP2_OVL_UPDATE 0x4027
+#define mmDCP2_PRESCALE_GRPH_CONTROL 0x402D
+#define mmDCP2_PRESCALE_OVL_CONTROL 0x4031
+#define mmDCP2_PRESCALE_VALUES_GRPH_B 0x4030
+#define mmDCP2_PRESCALE_VALUES_GRPH_G 0x402F
+#define mmDCP2_PRESCALE_VALUES_GRPH_R 0x402E
+#define mmDCP2_PRESCALE_VALUES_OVL_CB 0x4032
+#define mmDCP2_PRESCALE_VALUES_OVL_CR 0x4034
+#define mmDCP2_PRESCALE_VALUES_OVL_Y 0x4033
+#define mmDCP2_REGAMMA_CNTLA_END_CNTL1 0x40A6
+#define mmDCP2_REGAMMA_CNTLA_END_CNTL2 0x40A7
+#define mmDCP2_REGAMMA_CNTLA_REGION_0_1 0x40A8
+#define mmDCP2_REGAMMA_CNTLA_REGION_10_11 0x40AD
+#define mmDCP2_REGAMMA_CNTLA_REGION_12_13 0x40AE
+#define mmDCP2_REGAMMA_CNTLA_REGION_14_15 0x40AF
+#define mmDCP2_REGAMMA_CNTLA_REGION_2_3 0x40A9
+#define mmDCP2_REGAMMA_CNTLA_REGION_4_5 0x40AA
+#define mmDCP2_REGAMMA_CNTLA_REGION_6_7 0x40AB
+#define mmDCP2_REGAMMA_CNTLA_REGION_8_9 0x40AC
+#define mmDCP2_REGAMMA_CNTLA_SLOPE_CNTL 0x40A5
+#define mmDCP2_REGAMMA_CNTLA_START_CNTL 0x40A4
+#define mmDCP2_REGAMMA_CNTLB_END_CNTL1 0x40B2
+#define mmDCP2_REGAMMA_CNTLB_END_CNTL2 0x40B3
+#define mmDCP2_REGAMMA_CNTLB_REGION_0_1 0x40B4
+#define mmDCP2_REGAMMA_CNTLB_REGION_10_11 0x40B9
+#define mmDCP2_REGAMMA_CNTLB_REGION_12_13 0x40BA
+#define mmDCP2_REGAMMA_CNTLB_REGION_14_15 0x40BB
+#define mmDCP2_REGAMMA_CNTLB_REGION_2_3 0x40B5
+#define mmDCP2_REGAMMA_CNTLB_REGION_4_5 0x40B6
+#define mmDCP2_REGAMMA_CNTLB_REGION_6_7 0x40B7
+#define mmDCP2_REGAMMA_CNTLB_REGION_8_9 0x40B8
+#define mmDCP2_REGAMMA_CNTLB_SLOPE_CNTL 0x40B1
+#define mmDCP2_REGAMMA_CNTLB_START_CNTL 0x40B0
+#define mmDCP2_REGAMMA_CONTROL 0x40A0
+#define mmDCP2_REGAMMA_LUT_DATA 0x40A2
+#define mmDCP2_REGAMMA_LUT_INDEX 0x40A1
+#define mmDCP2_REGAMMA_LUT_WRITE_EN_MASK 0x40A3
+#define mmDCP3_COMM_MATRIXA_TRANS_C11_C12 0x4343
+#define mmDCP3_COMM_MATRIXA_TRANS_C13_C14 0x4344
+#define mmDCP3_COMM_MATRIXA_TRANS_C21_C22 0x4345
+#define mmDCP3_COMM_MATRIXA_TRANS_C23_C24 0x4346
+#define mmDCP3_COMM_MATRIXA_TRANS_C31_C32 0x4347
+#define mmDCP3_COMM_MATRIXA_TRANS_C33_C34 0x4348
+#define mmDCP3_COMM_MATRIXB_TRANS_C11_C12 0x4349
+#define mmDCP3_COMM_MATRIXB_TRANS_C13_C14 0x434A
+#define mmDCP3_COMM_MATRIXB_TRANS_C21_C22 0x434B
+#define mmDCP3_COMM_MATRIXB_TRANS_C23_C24 0x434C
+#define mmDCP3_COMM_MATRIXB_TRANS_C31_C32 0x434D
+#define mmDCP3_COMM_MATRIXB_TRANS_C33_C34 0x434E
+#define mmDCP3_CUR_COLOR1 0x436C
+#define mmDCP3_CUR_COLOR2 0x436D
+#define mmDCP3_CUR_CONTROL 0x4366
+#define mmDCP3_CUR_HOT_SPOT 0x436B
+#define mmDCP3_CUR_POSITION 0x436A
+#define mmDCP3_CUR_REQUEST_FILTER_CNTL 0x4399
+#define mmDCP3_CUR_SIZE 0x4368
+#define mmDCP3_CUR_SURFACE_ADDRESS 0x4367
+#define mmDCP3_CUR_SURFACE_ADDRESS_HIGH 0x4369
+#define mmDCP3_CUR_UPDATE 0x436E
+#define mmDCP3_DC_LUT_30_COLOR 0x437C
+#define mmDCP3_DC_LUT_AUTOFILL 0x437F
+#define mmDCP3_DC_LUT_BLACK_OFFSET_BLUE 0x4381
+#define mmDCP3_DC_LUT_BLACK_OFFSET_GREEN 0x4382
+#define mmDCP3_DC_LUT_BLACK_OFFSET_RED 0x4383
+#define mmDCP3_DC_LUT_CONTROL 0x4380
+#define mmDCP3_DC_LUT_PWL_DATA 0x437B
+#define mmDCP3_DC_LUT_RW_INDEX 0x4379
+#define mmDCP3_DC_LUT_RW_MODE 0x4378
+#define mmDCP3_DC_LUT_SEQ_COLOR 0x437A
+#define mmDCP3_DC_LUT_VGA_ACCESS_ENABLE 0x437D
+#define mmDCP3_DC_LUT_WHITE_OFFSET_BLUE 0x4384
+#define mmDCP3_DC_LUT_WHITE_OFFSET_GREEN 0x4385
+#define mmDCP3_DC_LUT_WHITE_OFFSET_RED 0x4386
+#define mmDCP3_DC_LUT_WRITE_EN_MASK 0x437E
+#define mmDCP3_DCP_CRC_CONTROL 0x4387
+#define mmDCP3_DCP_CRC_CURRENT 0x4389
+#define mmDCP3_DCP_CRC_LAST 0x438B
+#define mmDCP3_DCP_CRC_MASK 0x4388
+#define mmDCP3_DCP_DEBUG 0x438D
+#define mmDCP3_DCP_DEBUG2 0x4398
+#define mmDCP3_DCP_FP_CONVERTED_FIELD 0x4365
+#define mmDCP3_DCP_GSL_CONTROL 0x4390
+#define mmDCP3_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4391
+#define mmDCP3_DCP_RANDOM_SEEDS 0x4361
+#define mmDCP3_DCP_SPATIAL_DITHER_CNTL 0x4360
+#define mmDCP3_DCP_TEST_DEBUG_DATA 0x4396
+#define mmDCP3_DCP_TEST_DEBUG_INDEX 0x4395
+#define mmDCP3_DEGAMMA_CONTROL 0x4358
+#define mmDCP3_DENORM_CONTROL 0x4350
+#define mmDCP3_GAMUT_REMAP_C11_C12 0x435A
+#define mmDCP3_GAMUT_REMAP_C13_C14 0x435B
+#define mmDCP3_GAMUT_REMAP_C21_C22 0x435C
+#define mmDCP3_GAMUT_REMAP_C23_C24 0x435D
+#define mmDCP3_GAMUT_REMAP_C31_C32 0x435E
+#define mmDCP3_GAMUT_REMAP_C33_C34 0x435F
+#define mmDCP3_GAMUT_REMAP_CONTROL 0x4359
+#define mmDCP3_GRPH_COMPRESS_PITCH 0x431A
+#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS 0x4319
+#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x431B
+#define mmDCP3_GRPH_CONTROL 0x4301
+#define mmDCP3_GRPH_DFQ_CONTROL 0x4314
+#define mmDCP3_GRPH_DFQ_STATUS 0x4315
+#define mmDCP3_GRPH_ENABLE 0x4300
+#define mmDCP3_GRPH_FLIP_CONTROL 0x4312
+#define mmDCP3_GRPH_INTERRUPT_CONTROL 0x4317
+#define mmDCP3_GRPH_INTERRUPT_STATUS 0x4316
+#define mmDCP3_GRPH_LUT_10BIT_BYPASS 0x4302
+#define mmDCP3_GRPH_PITCH 0x4306
+#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS 0x4304
+#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4307
+#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS 0x4305
+#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4308
+#define mmDCP3_GRPH_STEREOSYNC_FLIP 0x4397
+#define mmDCP3_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4318
+#define mmDCP3_GRPH_SURFACE_ADDRESS_INUSE 0x4313
+#define mmDCP3_GRPH_SURFACE_OFFSET_X 0x4309
+#define mmDCP3_GRPH_SURFACE_OFFSET_Y 0x430A
+#define mmDCP3_GRPH_SWAP_CNTL 0x4303
+#define mmDCP3_GRPH_UPDATE 0x4311
+#define mmDCP3_GRPH_X_END 0x430D
+#define mmDCP3_GRPH_X_START 0x430B
+#define mmDCP3_GRPH_Y_END 0x430E
+#define mmDCP3_GRPH_Y_START 0x430C
+#define mmDCP3_INPUT_CSC_C11_C12 0x4336
+#define mmDCP3_INPUT_CSC_C13_C14 0x4337
+#define mmDCP3_INPUT_CSC_C21_C22 0x4338
+#define mmDCP3_INPUT_CSC_C23_C24 0x4339
+#define mmDCP3_INPUT_CSC_C31_C32 0x433A
+#define mmDCP3_INPUT_CSC_C33_C34 0x433B
+#define mmDCP3_INPUT_CSC_CONTROL 0x4335
+#define mmDCP3_INPUT_GAMMA_CONTROL 0x4310
+#define mmDCP3_KEY_CONTROL 0x4353
+#define mmDCP3_KEY_RANGE_ALPHA 0x4354
+#define mmDCP3_KEY_RANGE_BLUE 0x4357
+#define mmDCP3_KEY_RANGE_GREEN 0x4356
+#define mmDCP3_KEY_RANGE_RED 0x4355
+#define mmDCP3_OUTPUT_CSC_C11_C12 0x433D
+#define mmDCP3_OUTPUT_CSC_C13_C14 0x433E
+#define mmDCP3_OUTPUT_CSC_C21_C22 0x433F
+#define mmDCP3_OUTPUT_CSC_C23_C24 0x4340
+#define mmDCP3_OUTPUT_CSC_C31_C32 0x4341
+#define mmDCP3_OUTPUT_CSC_C33_C34 0x4342
+#define mmDCP3_OUTPUT_CSC_CONTROL 0x433C
+#define mmDCP3_OUT_ROUND_CONTROL 0x4351
+#define mmDCP3_OVL_CONTROL1 0x431D
+#define mmDCP3_OVL_CONTROL2 0x431E
+#define mmDCP3_OVL_DFQ_CONTROL 0x4329
+#define mmDCP3_OVL_DFQ_STATUS 0x432A
+#define mmDCP3_OVL_ENABLE 0x431C
+#define mmDCP3_OVL_END 0x4326
+#define mmDCP3_OVL_PITCH 0x4321
+#define mmDCP3_OVLSCL_EDGE_PIXEL_CNTL 0x432C
+#define mmDCP3_OVL_SECONDARY_SURFACE_ADDRESS 0x4392
+#define mmDCP3_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x4394
+#define mmDCP3_OVL_START 0x4325
+#define mmDCP3_OVL_STEREOSYNC_FLIP 0x4393
+#define mmDCP3_OVL_SURFACE_ADDRESS 0x4320
+#define mmDCP3_OVL_SURFACE_ADDRESS_HIGH 0x4322
+#define mmDCP3_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x432B
+#define mmDCP3_OVL_SURFACE_ADDRESS_INUSE 0x4328
+#define mmDCP3_OVL_SURFACE_OFFSET_X 0x4323
+#define mmDCP3_OVL_SURFACE_OFFSET_Y 0x4324
+#define mmDCP3_OVL_SWAP_CNTL 0x431F
+#define mmDCP3_OVL_UPDATE 0x4327
+#define mmDCP3_PRESCALE_GRPH_CONTROL 0x432D
+#define mmDCP3_PRESCALE_OVL_CONTROL 0x4331
+#define mmDCP3_PRESCALE_VALUES_GRPH_B 0x4330
+#define mmDCP3_PRESCALE_VALUES_GRPH_G 0x432F
+#define mmDCP3_PRESCALE_VALUES_GRPH_R 0x432E
+#define mmDCP3_PRESCALE_VALUES_OVL_CB 0x4332
+#define mmDCP3_PRESCALE_VALUES_OVL_CR 0x4334
+#define mmDCP3_PRESCALE_VALUES_OVL_Y 0x4333
+#define mmDCP3_REGAMMA_CNTLA_END_CNTL1 0x43A6
+#define mmDCP3_REGAMMA_CNTLA_END_CNTL2 0x43A7
+#define mmDCP3_REGAMMA_CNTLA_REGION_0_1 0x43A8
+#define mmDCP3_REGAMMA_CNTLA_REGION_10_11 0x43AD
+#define mmDCP3_REGAMMA_CNTLA_REGION_12_13 0x43AE
+#define mmDCP3_REGAMMA_CNTLA_REGION_14_15 0x43AF
+#define mmDCP3_REGAMMA_CNTLA_REGION_2_3 0x43A9
+#define mmDCP3_REGAMMA_CNTLA_REGION_4_5 0x43AA
+#define mmDCP3_REGAMMA_CNTLA_REGION_6_7 0x43AB
+#define mmDCP3_REGAMMA_CNTLA_REGION_8_9 0x43AC
+#define mmDCP3_REGAMMA_CNTLA_SLOPE_CNTL 0x43A5
+#define mmDCP3_REGAMMA_CNTLA_START_CNTL 0x43A4
+#define mmDCP3_REGAMMA_CNTLB_END_CNTL1 0x43B2
+#define mmDCP3_REGAMMA_CNTLB_END_CNTL2 0x43B3
+#define mmDCP3_REGAMMA_CNTLB_REGION_0_1 0x43B4
+#define mmDCP3_REGAMMA_CNTLB_REGION_10_11 0x43B9
+#define mmDCP3_REGAMMA_CNTLB_REGION_12_13 0x43BA
+#define mmDCP3_REGAMMA_CNTLB_REGION_14_15 0x43BB
+#define mmDCP3_REGAMMA_CNTLB_REGION_2_3 0x43B5
+#define mmDCP3_REGAMMA_CNTLB_REGION_4_5 0x43B6
+#define mmDCP3_REGAMMA_CNTLB_REGION_6_7 0x43B7
+#define mmDCP3_REGAMMA_CNTLB_REGION_8_9 0x43B8
+#define mmDCP3_REGAMMA_CNTLB_SLOPE_CNTL 0x43B1
+#define mmDCP3_REGAMMA_CNTLB_START_CNTL 0x43B0
+#define mmDCP3_REGAMMA_CONTROL 0x43A0
+#define mmDCP3_REGAMMA_LUT_DATA 0x43A2
+#define mmDCP3_REGAMMA_LUT_INDEX 0x43A1
+#define mmDCP3_REGAMMA_LUT_WRITE_EN_MASK 0x43A3
+#define mmDCP4_COMM_MATRIXA_TRANS_C11_C12 0x4643
+#define mmDCP4_COMM_MATRIXA_TRANS_C13_C14 0x4644
+#define mmDCP4_COMM_MATRIXA_TRANS_C21_C22 0x4645
+#define mmDCP4_COMM_MATRIXA_TRANS_C23_C24 0x4646
+#define mmDCP4_COMM_MATRIXA_TRANS_C31_C32 0x4647
+#define mmDCP4_COMM_MATRIXA_TRANS_C33_C34 0x4648
+#define mmDCP4_COMM_MATRIXB_TRANS_C11_C12 0x4649
+#define mmDCP4_COMM_MATRIXB_TRANS_C13_C14 0x464A
+#define mmDCP4_COMM_MATRIXB_TRANS_C21_C22 0x464B
+#define mmDCP4_COMM_MATRIXB_TRANS_C23_C24 0x464C
+#define mmDCP4_COMM_MATRIXB_TRANS_C31_C32 0x464D
+#define mmDCP4_COMM_MATRIXB_TRANS_C33_C34 0x464E
+#define mmDCP4_CUR_COLOR1 0x466C
+#define mmDCP4_CUR_COLOR2 0x466D
+#define mmDCP4_CUR_CONTROL 0x4666
+#define mmDCP4_CUR_HOT_SPOT 0x466B
+#define mmDCP4_CUR_POSITION 0x466A
+#define mmDCP4_CUR_REQUEST_FILTER_CNTL 0x4699
+#define mmDCP4_CUR_SIZE 0x4668
+#define mmDCP4_CUR_SURFACE_ADDRESS 0x4667
+#define mmDCP4_CUR_SURFACE_ADDRESS_HIGH 0x4669
+#define mmDCP4_CUR_UPDATE 0x466E
+#define mmDCP4_DC_LUT_30_COLOR 0x467C
+#define mmDCP4_DC_LUT_AUTOFILL 0x467F
+#define mmDCP4_DC_LUT_BLACK_OFFSET_BLUE 0x4681
+#define mmDCP4_DC_LUT_BLACK_OFFSET_GREEN 0x4682
+#define mmDCP4_DC_LUT_BLACK_OFFSET_RED 0x4683
+#define mmDCP4_DC_LUT_CONTROL 0x4680
+#define mmDCP4_DC_LUT_PWL_DATA 0x467B
+#define mmDCP4_DC_LUT_RW_INDEX 0x4679
+#define mmDCP4_DC_LUT_RW_MODE 0x4678
+#define mmDCP4_DC_LUT_SEQ_COLOR 0x467A
+#define mmDCP4_DC_LUT_VGA_ACCESS_ENABLE 0x467D
+#define mmDCP4_DC_LUT_WHITE_OFFSET_BLUE 0x4684
+#define mmDCP4_DC_LUT_WHITE_OFFSET_GREEN 0x4685
+#define mmDCP4_DC_LUT_WHITE_OFFSET_RED 0x4686
+#define mmDCP4_DC_LUT_WRITE_EN_MASK 0x467E
+#define mmDCP4_DCP_CRC_CONTROL 0x4687
+#define mmDCP4_DCP_CRC_CURRENT 0x4689
+#define mmDCP4_DCP_CRC_LAST 0x468B
+#define mmDCP4_DCP_CRC_MASK 0x4688
+#define mmDCP4_DCP_DEBUG 0x468D
+#define mmDCP4_DCP_DEBUG2 0x4698
+#define mmDCP4_DCP_FP_CONVERTED_FIELD 0x4665
+#define mmDCP4_DCP_GSL_CONTROL 0x4690
+#define mmDCP4_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4691
+#define mmDCP4_DCP_RANDOM_SEEDS 0x4661
+#define mmDCP4_DCP_SPATIAL_DITHER_CNTL 0x4660
+#define mmDCP4_DCP_TEST_DEBUG_DATA 0x4696
+#define mmDCP4_DCP_TEST_DEBUG_INDEX 0x4695
+#define mmDCP4_DEGAMMA_CONTROL 0x4658
+#define mmDCP4_DENORM_CONTROL 0x4650
+#define mmDCP4_GAMUT_REMAP_C11_C12 0x465A
+#define mmDCP4_GAMUT_REMAP_C13_C14 0x465B
+#define mmDCP4_GAMUT_REMAP_C21_C22 0x465C
+#define mmDCP4_GAMUT_REMAP_C23_C24 0x465D
+#define mmDCP4_GAMUT_REMAP_C31_C32 0x465E
+#define mmDCP4_GAMUT_REMAP_C33_C34 0x465F
+#define mmDCP4_GAMUT_REMAP_CONTROL 0x4659
+#define mmDCP4_GRPH_COMPRESS_PITCH 0x461A
+#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS 0x4619
+#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x461B
+#define mmDCP4_GRPH_CONTROL 0x4601
+#define mmDCP4_GRPH_DFQ_CONTROL 0x4614
+#define mmDCP4_GRPH_DFQ_STATUS 0x4615
+#define mmDCP4_GRPH_ENABLE 0x4600
+#define mmDCP4_GRPH_FLIP_CONTROL 0x4612
+#define mmDCP4_GRPH_INTERRUPT_CONTROL 0x4617
+#define mmDCP4_GRPH_INTERRUPT_STATUS 0x4616
+#define mmDCP4_GRPH_LUT_10BIT_BYPASS 0x4602
+#define mmDCP4_GRPH_PITCH 0x4606
+#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS 0x4604
+#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4607
+#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS 0x4605
+#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4608
+#define mmDCP4_GRPH_STEREOSYNC_FLIP 0x4697
+#define mmDCP4_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4618
+#define mmDCP4_GRPH_SURFACE_ADDRESS_INUSE 0x4613
+#define mmDCP4_GRPH_SURFACE_OFFSET_X 0x4609
+#define mmDCP4_GRPH_SURFACE_OFFSET_Y 0x460A
+#define mmDCP4_GRPH_SWAP_CNTL 0x4603
+#define mmDCP4_GRPH_UPDATE 0x4611
+#define mmDCP4_GRPH_X_END 0x460D
+#define mmDCP4_GRPH_X_START 0x460B
+#define mmDCP4_GRPH_Y_END 0x460E
+#define mmDCP4_GRPH_Y_START 0x460C
+#define mmDCP4_INPUT_CSC_C11_C12 0x4636
+#define mmDCP4_INPUT_CSC_C13_C14 0x4637
+#define mmDCP4_INPUT_CSC_C21_C22 0x4638
+#define mmDCP4_INPUT_CSC_C23_C24 0x4639
+#define mmDCP4_INPUT_CSC_C31_C32 0x463A
+#define mmDCP4_INPUT_CSC_C33_C34 0x463B
+#define mmDCP4_INPUT_CSC_CONTROL 0x4635
+#define mmDCP4_INPUT_GAMMA_CONTROL 0x4610
+#define mmDCP4_KEY_CONTROL 0x4653
+#define mmDCP4_KEY_RANGE_ALPHA 0x4654
+#define mmDCP4_KEY_RANGE_BLUE 0x4657
+#define mmDCP4_KEY_RANGE_GREEN 0x4656
+#define mmDCP4_KEY_RANGE_RED 0x4655
+#define mmDCP4_OUTPUT_CSC_C11_C12 0x463D
+#define mmDCP4_OUTPUT_CSC_C13_C14 0x463E
+#define mmDCP4_OUTPUT_CSC_C21_C22 0x463F
+#define mmDCP4_OUTPUT_CSC_C23_C24 0x4640
+#define mmDCP4_OUTPUT_CSC_C31_C32 0x4641
+#define mmDCP4_OUTPUT_CSC_C33_C34 0x4642
+#define mmDCP4_OUTPUT_CSC_CONTROL 0x463C
+#define mmDCP4_OUT_ROUND_CONTROL 0x4651
+#define mmDCP4_OVL_CONTROL1 0x461D
+#define mmDCP4_OVL_CONTROL2 0x461E
+#define mmDCP4_OVL_DFQ_CONTROL 0x4629
+#define mmDCP4_OVL_DFQ_STATUS 0x462A
+#define mmDCP4_OVL_ENABLE 0x461C
+#define mmDCP4_OVL_END 0x4626
+#define mmDCP4_OVL_PITCH 0x4621
+#define mmDCP4_OVLSCL_EDGE_PIXEL_CNTL 0x462C
+#define mmDCP4_OVL_SECONDARY_SURFACE_ADDRESS 0x4692
+#define mmDCP4_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x4694
+#define mmDCP4_OVL_START 0x4625
+#define mmDCP4_OVL_STEREOSYNC_FLIP 0x4693
+#define mmDCP4_OVL_SURFACE_ADDRESS 0x4620
+#define mmDCP4_OVL_SURFACE_ADDRESS_HIGH 0x4622
+#define mmDCP4_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x462B
+#define mmDCP4_OVL_SURFACE_ADDRESS_INUSE 0x4628
+#define mmDCP4_OVL_SURFACE_OFFSET_X 0x4623
+#define mmDCP4_OVL_SURFACE_OFFSET_Y 0x4624
+#define mmDCP4_OVL_SWAP_CNTL 0x461F
+#define mmDCP4_OVL_UPDATE 0x4627
+#define mmDCP4_PRESCALE_GRPH_CONTROL 0x462D
+#define mmDCP4_PRESCALE_OVL_CONTROL 0x4631
+#define mmDCP4_PRESCALE_VALUES_GRPH_B 0x4630
+#define mmDCP4_PRESCALE_VALUES_GRPH_G 0x462F
+#define mmDCP4_PRESCALE_VALUES_GRPH_R 0x462E
+#define mmDCP4_PRESCALE_VALUES_OVL_CB 0x4632
+#define mmDCP4_PRESCALE_VALUES_OVL_CR 0x4634
+#define mmDCP4_PRESCALE_VALUES_OVL_Y 0x4633
+#define mmDCP4_REGAMMA_CNTLA_END_CNTL1 0x46A6
+#define mmDCP4_REGAMMA_CNTLA_END_CNTL2 0x46A7
+#define mmDCP4_REGAMMA_CNTLA_REGION_0_1 0x46A8
+#define mmDCP4_REGAMMA_CNTLA_REGION_10_11 0x46AD
+#define mmDCP4_REGAMMA_CNTLA_REGION_12_13 0x46AE
+#define mmDCP4_REGAMMA_CNTLA_REGION_14_15 0x46AF
+#define mmDCP4_REGAMMA_CNTLA_REGION_2_3 0x46A9
+#define mmDCP4_REGAMMA_CNTLA_REGION_4_5 0x46AA
+#define mmDCP4_REGAMMA_CNTLA_REGION_6_7 0x46AB
+#define mmDCP4_REGAMMA_CNTLA_REGION_8_9 0x46AC
+#define mmDCP4_REGAMMA_CNTLA_SLOPE_CNTL 0x46A5
+#define mmDCP4_REGAMMA_CNTLA_START_CNTL 0x46A4
+#define mmDCP4_REGAMMA_CNTLB_END_CNTL1 0x46B2
+#define mmDCP4_REGAMMA_CNTLB_END_CNTL2 0x46B3
+#define mmDCP4_REGAMMA_CNTLB_REGION_0_1 0x46B4
+#define mmDCP4_REGAMMA_CNTLB_REGION_10_11 0x46B9
+#define mmDCP4_REGAMMA_CNTLB_REGION_12_13 0x46BA
+#define mmDCP4_REGAMMA_CNTLB_REGION_14_15 0x46BB
+#define mmDCP4_REGAMMA_CNTLB_REGION_2_3 0x46B5
+#define mmDCP4_REGAMMA_CNTLB_REGION_4_5 0x46B6
+#define mmDCP4_REGAMMA_CNTLB_REGION_6_7 0x46B7
+#define mmDCP4_REGAMMA_CNTLB_REGION_8_9 0x46B8
+#define mmDCP4_REGAMMA_CNTLB_SLOPE_CNTL 0x46B1
+#define mmDCP4_REGAMMA_CNTLB_START_CNTL 0x46B0
+#define mmDCP4_REGAMMA_CONTROL 0x46A0
+#define mmDCP4_REGAMMA_LUT_DATA 0x46A2
+#define mmDCP4_REGAMMA_LUT_INDEX 0x46A1
+#define mmDCP4_REGAMMA_LUT_WRITE_EN_MASK 0x46A3
+#define mmDCP5_COMM_MATRIXA_TRANS_C11_C12 0x4943
+#define mmDCP5_COMM_MATRIXA_TRANS_C13_C14 0x4944
+#define mmDCP5_COMM_MATRIXA_TRANS_C21_C22 0x4945
+#define mmDCP5_COMM_MATRIXA_TRANS_C23_C24 0x4946
+#define mmDCP5_COMM_MATRIXA_TRANS_C31_C32 0x4947
+#define mmDCP5_COMM_MATRIXA_TRANS_C33_C34 0x4948
+#define mmDCP5_COMM_MATRIXB_TRANS_C11_C12 0x4949
+#define mmDCP5_COMM_MATRIXB_TRANS_C13_C14 0x494A
+#define mmDCP5_COMM_MATRIXB_TRANS_C21_C22 0x494B
+#define mmDCP5_COMM_MATRIXB_TRANS_C23_C24 0x494C
+#define mmDCP5_COMM_MATRIXB_TRANS_C31_C32 0x494D
+#define mmDCP5_COMM_MATRIXB_TRANS_C33_C34 0x494E
+#define mmDCP5_CUR_COLOR1 0x496C
+#define mmDCP5_CUR_COLOR2 0x496D
+#define mmDCP5_CUR_CONTROL 0x4966
+#define mmDCP5_CUR_HOT_SPOT 0x496B
+#define mmDCP5_CUR_POSITION 0x496A
+#define mmDCP5_CUR_REQUEST_FILTER_CNTL 0x4999
+#define mmDCP5_CUR_SIZE 0x4968
+#define mmDCP5_CUR_SURFACE_ADDRESS 0x4967
+#define mmDCP5_CUR_SURFACE_ADDRESS_HIGH 0x4969
+#define mmDCP5_CUR_UPDATE 0x496E
+#define mmDCP5_DC_LUT_30_COLOR 0x497C
+#define mmDCP5_DC_LUT_AUTOFILL 0x497F
+#define mmDCP5_DC_LUT_BLACK_OFFSET_BLUE 0x4981
+#define mmDCP5_DC_LUT_BLACK_OFFSET_GREEN 0x4982
+#define mmDCP5_DC_LUT_BLACK_OFFSET_RED 0x4983
+#define mmDCP5_DC_LUT_CONTROL 0x4980
+#define mmDCP5_DC_LUT_PWL_DATA 0x497B
+#define mmDCP5_DC_LUT_RW_INDEX 0x4979
+#define mmDCP5_DC_LUT_RW_MODE 0x4978
+#define mmDCP5_DC_LUT_SEQ_COLOR 0x497A
+#define mmDCP5_DC_LUT_VGA_ACCESS_ENABLE 0x497D
+#define mmDCP5_DC_LUT_WHITE_OFFSET_BLUE 0x4984
+#define mmDCP5_DC_LUT_WHITE_OFFSET_GREEN 0x4985
+#define mmDCP5_DC_LUT_WHITE_OFFSET_RED 0x4986
+#define mmDCP5_DC_LUT_WRITE_EN_MASK 0x497E
+#define mmDCP5_DCP_CRC_CONTROL 0x4987
+#define mmDCP5_DCP_CRC_CURRENT 0x4989
+#define mmDCP5_DCP_CRC_LAST 0x498B
+#define mmDCP5_DCP_CRC_MASK 0x4988
+#define mmDCP5_DCP_DEBUG 0x498D
+#define mmDCP5_DCP_DEBUG2 0x4998
+#define mmDCP5_DCP_FP_CONVERTED_FIELD 0x4965
+#define mmDCP5_DCP_GSL_CONTROL 0x4990
+#define mmDCP5_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4991
+#define mmDCP5_DCP_RANDOM_SEEDS 0x4961
+#define mmDCP5_DCP_SPATIAL_DITHER_CNTL 0x4960
+#define mmDCP5_DCP_TEST_DEBUG_DATA 0x4996
+#define mmDCP5_DCP_TEST_DEBUG_INDEX 0x4995
+#define mmDCP5_DEGAMMA_CONTROL 0x4958
+#define mmDCP5_DENORM_CONTROL 0x4950
+#define mmDCP5_GAMUT_REMAP_C11_C12 0x495A
+#define mmDCP5_GAMUT_REMAP_C13_C14 0x495B
+#define mmDCP5_GAMUT_REMAP_C21_C22 0x495C
+#define mmDCP5_GAMUT_REMAP_C23_C24 0x495D
+#define mmDCP5_GAMUT_REMAP_C31_C32 0x495E
+#define mmDCP5_GAMUT_REMAP_C33_C34 0x495F
+#define mmDCP5_GAMUT_REMAP_CONTROL 0x4959
+#define mmDCP5_GRPH_COMPRESS_PITCH 0x491A
+#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS 0x4919
+#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x491B
+#define mmDCP5_GRPH_CONTROL 0x4901
+#define mmDCP5_GRPH_DFQ_CONTROL 0x4914
+#define mmDCP5_GRPH_DFQ_STATUS 0x4915
+#define mmDCP5_GRPH_ENABLE 0x4900
+#define mmDCP5_GRPH_FLIP_CONTROL 0x4912
+#define mmDCP5_GRPH_INTERRUPT_CONTROL 0x4917
+#define mmDCP5_GRPH_INTERRUPT_STATUS 0x4916
+#define mmDCP5_GRPH_LUT_10BIT_BYPASS 0x4902
+#define mmDCP5_GRPH_PITCH 0x4906
+#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS 0x4904
+#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4907
+#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS 0x4905
+#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4908
+#define mmDCP5_GRPH_STEREOSYNC_FLIP 0x4997
+#define mmDCP5_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4918
+#define mmDCP5_GRPH_SURFACE_ADDRESS_INUSE 0x4913
+#define mmDCP5_GRPH_SURFACE_OFFSET_X 0x4909
+#define mmDCP5_GRPH_SURFACE_OFFSET_Y 0x490A
+#define mmDCP5_GRPH_SWAP_CNTL 0x4903
+#define mmDCP5_GRPH_UPDATE 0x4911
+#define mmDCP5_GRPH_X_END 0x490D
+#define mmDCP5_GRPH_X_START 0x490B
+#define mmDCP5_GRPH_Y_END 0x490E
+#define mmDCP5_GRPH_Y_START 0x490C
+#define mmDCP5_INPUT_CSC_C11_C12 0x4936
+#define mmDCP5_INPUT_CSC_C13_C14 0x4937
+#define mmDCP5_INPUT_CSC_C21_C22 0x4938
+#define mmDCP5_INPUT_CSC_C23_C24 0x4939
+#define mmDCP5_INPUT_CSC_C31_C32 0x493A
+#define mmDCP5_INPUT_CSC_C33_C34 0x493B
+#define mmDCP5_INPUT_CSC_CONTROL 0x4935
+#define mmDCP5_INPUT_GAMMA_CONTROL 0x4910
+#define mmDCP5_KEY_CONTROL 0x4953
+#define mmDCP5_KEY_RANGE_ALPHA 0x4954
+#define mmDCP5_KEY_RANGE_BLUE 0x4957
+#define mmDCP5_KEY_RANGE_GREEN 0x4956
+#define mmDCP5_KEY_RANGE_RED 0x4955
+#define mmDCP5_OUTPUT_CSC_C11_C12 0x493D
+#define mmDCP5_OUTPUT_CSC_C13_C14 0x493E
+#define mmDCP5_OUTPUT_CSC_C21_C22 0x493F
+#define mmDCP5_OUTPUT_CSC_C23_C24 0x4940
+#define mmDCP5_OUTPUT_CSC_C31_C32 0x4941
+#define mmDCP5_OUTPUT_CSC_C33_C34 0x4942
+#define mmDCP5_OUTPUT_CSC_CONTROL 0x493C
+#define mmDCP5_OUT_ROUND_CONTROL 0x4951
+#define mmDCP5_OVL_CONTROL1 0x491D
+#define mmDCP5_OVL_CONTROL2 0x491E
+#define mmDCP5_OVL_DFQ_CONTROL 0x4929
+#define mmDCP5_OVL_DFQ_STATUS 0x492A
+#define mmDCP5_OVL_ENABLE 0x491C
+#define mmDCP5_OVL_END 0x4926
+#define mmDCP5_OVL_PITCH 0x4921
+#define mmDCP5_OVLSCL_EDGE_PIXEL_CNTL 0x492C
+#define mmDCP5_OVL_SECONDARY_SURFACE_ADDRESS 0x4992
+#define mmDCP5_OVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x4994
+#define mmDCP5_OVL_START 0x4925
+#define mmDCP5_OVL_STEREOSYNC_FLIP 0x4993
+#define mmDCP5_OVL_SURFACE_ADDRESS 0x4920
+#define mmDCP5_OVL_SURFACE_ADDRESS_HIGH 0x4922
+#define mmDCP5_OVL_SURFACE_ADDRESS_HIGH_INUSE 0x492B
+#define mmDCP5_OVL_SURFACE_ADDRESS_INUSE 0x4928
+#define mmDCP5_OVL_SURFACE_OFFSET_X 0x4923
+#define mmDCP5_OVL_SURFACE_OFFSET_Y 0x4924
+#define mmDCP5_OVL_SWAP_CNTL 0x491F
+#define mmDCP5_OVL_UPDATE 0x4927
+#define mmDCP5_PRESCALE_GRPH_CONTROL 0x492D
+#define mmDCP5_PRESCALE_OVL_CONTROL 0x4931
+#define mmDCP5_PRESCALE_VALUES_GRPH_B 0x4930
+#define mmDCP5_PRESCALE_VALUES_GRPH_G 0x492F
+#define mmDCP5_PRESCALE_VALUES_GRPH_R 0x492E
+#define mmDCP5_PRESCALE_VALUES_OVL_CB 0x4932
+#define mmDCP5_PRESCALE_VALUES_OVL_CR 0x4934
+#define mmDCP5_PRESCALE_VALUES_OVL_Y 0x4933
+#define mmDCP5_REGAMMA_CNTLA_END_CNTL1 0x49A6
+#define mmDCP5_REGAMMA_CNTLA_END_CNTL2 0x49A7
+#define mmDCP5_REGAMMA_CNTLA_REGION_0_1 0x49A8
+#define mmDCP5_REGAMMA_CNTLA_REGION_10_11 0x49AD
+#define mmDCP5_REGAMMA_CNTLA_REGION_12_13 0x49AE
+#define mmDCP5_REGAMMA_CNTLA_REGION_14_15 0x49AF
+#define mmDCP5_REGAMMA_CNTLA_REGION_2_3 0x49A9
+#define mmDCP5_REGAMMA_CNTLA_REGION_4_5 0x49AA
+#define mmDCP5_REGAMMA_CNTLA_REGION_6_7 0x49AB
+#define mmDCP5_REGAMMA_CNTLA_REGION_8_9 0x49AC
+#define mmDCP5_REGAMMA_CNTLA_SLOPE_CNTL 0x49A5
+#define mmDCP5_REGAMMA_CNTLA_START_CNTL 0x49A4
+#define mmDCP5_REGAMMA_CNTLB_END_CNTL1 0x49B2
+#define mmDCP5_REGAMMA_CNTLB_END_CNTL2 0x49B3
+#define mmDCP5_REGAMMA_CNTLB_REGION_0_1 0x49B4
+#define mmDCP5_REGAMMA_CNTLB_REGION_10_11 0x49B9
+#define mmDCP5_REGAMMA_CNTLB_REGION_12_13 0x49BA
+#define mmDCP5_REGAMMA_CNTLB_REGION_14_15 0x49BB
+#define mmDCP5_REGAMMA_CNTLB_REGION_2_3 0x49B5
+#define mmDCP5_REGAMMA_CNTLB_REGION_4_5 0x49B6
+#define mmDCP5_REGAMMA_CNTLB_REGION_6_7 0x49B7
+#define mmDCP5_REGAMMA_CNTLB_REGION_8_9 0x49B8
+#define mmDCP5_REGAMMA_CNTLB_SLOPE_CNTL 0x49B1
+#define mmDCP5_REGAMMA_CNTLB_START_CNTL 0x49B0
+#define mmDCP5_REGAMMA_CONTROL 0x49A0
+#define mmDCP5_REGAMMA_LUT_DATA 0x49A2
+#define mmDCP5_REGAMMA_LUT_INDEX 0x49A1
+#define mmDCP5_REGAMMA_LUT_WRITE_EN_MASK 0x49A3
+#define mmDC_PAD_EXTERN_SIG 0x1902
+#define mmDCP_CRC_CONTROL 0x1A87
+#define mmDCP_CRC_CURRENT 0x1A89
+#define mmDCP_CRC_LAST 0x1A8B
+#define mmDCP_CRC_MASK 0x1A88
+#define mmDCP_DEBUG 0x1A8D
+#define mmDCP_DEBUG2 0x1A98
+#define mmDCP_FP_CONVERTED_FIELD 0x1A65
+#define mmDC_PGCNTL_STATUS_REG 0x177E
+#define mmDC_PGFSM_CONFIG_REG 0x177C
+#define mmDC_PGFSM_WRITE_REG 0x177D
+#define mmDCP_GSL_CONTROL 0x1A90
+#define mmDCPG_TEST_DEBUG_DATA 0x177B
+#define mmDCPG_TEST_DEBUG_INDEX 0x1779
+#define mmDC_PINSTRAPS 0x1917
+#define mmDCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1A91
+#define mmDCP_RANDOM_SEEDS 0x1A61
+#define mmDCP_SPATIAL_DITHER_CNTL 0x1A60
+#define mmDCP_TEST_DEBUG_DATA 0x1A96
+#define mmDCP_TEST_DEBUG_INDEX 0x1A95
+#define mmDC_RBBMIF_RDWR_CNTL1 0x031A
+#define mmDC_RBBMIF_RDWR_CNTL2 0x031D
+#define mmDC_REF_CLK_CNTL 0x1903
+#define mmDC_XDMA_INTERFACE_CNTL 0x0327
+#define mmDEGAMMA_CONTROL 0x1A58
+#define mmDENORM_CONTROL 0x1A50
+#define mmDENTIST_DISPCLK_CNTL 0x0124
+#define mmDIG0_AFMT_60958_0 0x1C41
+#define mmDIG0_AFMT_60958_1 0x1C42
+#define mmDIG0_AFMT_60958_2 0x1C48
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL 0x1C43
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT 0x1C49
+#define mmDIG0_AFMT_AUDIO_DBG_DTO_CNTL 0x1C52
+#define mmDIG0_AFMT_AUDIO_INFO0 0x1C3F
+#define mmDIG0_AFMT_AUDIO_INFO1 0x1C40
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL 0x1C4B
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2 0x1C17
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL 0x1C4F
+#define mmDIG0_AFMT_AVI_INFO0 0x1C21
+#define mmDIG0_AFMT_AVI_INFO1 0x1C22
+#define mmDIG0_AFMT_AVI_INFO2 0x1C23
+#define mmDIG0_AFMT_AVI_INFO3 0x1C24
+#define mmDIG0_AFMT_GENERIC_0 0x1C28
+#define mmDIG0_AFMT_GENERIC_1 0x1C29
+#define mmDIG0_AFMT_GENERIC_2 0x1C2A
+#define mmDIG0_AFMT_GENERIC_3 0x1C2B
+#define mmDIG0_AFMT_GENERIC_4 0x1C2C
+#define mmDIG0_AFMT_GENERIC_5 0x1C2D
+#define mmDIG0_AFMT_GENERIC_6 0x1C2E
+#define mmDIG0_AFMT_GENERIC_7 0x1C2F
+#define mmDIG0_AFMT_GENERIC_HDR 0x1C27
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0 0x1C4D
+#define mmDIG0_AFMT_INTERRUPT_STATUS 0x1C14
+#define mmDIG0_AFMT_ISRC1_0 0x1C18
+#define mmDIG0_AFMT_ISRC1_1 0x1C19
+#define mmDIG0_AFMT_ISRC1_2 0x1C1A
+#define mmDIG0_AFMT_ISRC1_3 0x1C1B
+#define mmDIG0_AFMT_ISRC1_4 0x1C1C
+#define mmDIG0_AFMT_ISRC2_0 0x1C1D
+#define mmDIG0_AFMT_ISRC2_1 0x1C1E
+#define mmDIG0_AFMT_ISRC2_2 0x1C1F
+#define mmDIG0_AFMT_ISRC2_3 0x1C20
+#define mmDIG0_AFMT_MPEG_INFO0 0x1C25
+#define mmDIG0_AFMT_MPEG_INFO1 0x1C26
+#define mmDIG0_AFMT_RAMP_CONTROL0 0x1C44
+#define mmDIG0_AFMT_RAMP_CONTROL1 0x1C45
+#define mmDIG0_AFMT_RAMP_CONTROL2 0x1C46
+#define mmDIG0_AFMT_RAMP_CONTROL3 0x1C47
+#define mmDIG0_AFMT_STATUS 0x1C4A
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL 0x1C4C
+#define mmDIG0_DIG_BE_CNTL 0x1C50
+#define mmDIG0_DIG_BE_EN_CNTL 0x1C51
+#define mmDIG0_DIG_CLOCK_PATTERN 0x1C03
+#define mmDIG0_DIG_DISPCLK_SWITCH_CNTL 0x1C08
+#define mmDIG0_DIG_DISPCLK_SWITCH_STATUS 0x1C09
+#define mmDIG0_DIG_FE_CNTL 0x1C00
+#define mmDIG0_DIG_FIFO_STATUS 0x1C0A
+#define mmDIG0_DIG_LANE_ENABLE 0x1C8D
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL 0x1C01
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT 0x1C02
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED 0x1C05
+#define mmDIG0_DIG_TEST_PATTERN 0x1C04
+#define mmDIG0_HDMI_ACR_32_0 0x1C37
+#define mmDIG0_HDMI_ACR_32_1 0x1C38
+#define mmDIG0_HDMI_ACR_44_0 0x1C39
+#define mmDIG0_HDMI_ACR_44_1 0x1C3A
+#define mmDIG0_HDMI_ACR_48_0 0x1C3B
+#define mmDIG0_HDMI_ACR_48_1 0x1C3C
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL 0x1C0F
+#define mmDIG0_HDMI_ACR_STATUS_0 0x1C3D
+#define mmDIG0_HDMI_ACR_STATUS_1 0x1C3E
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL 0x1C0E
+#define mmDIG0_HDMI_CONTROL 0x1C0C
+#define mmDIG0_HDMI_GC 0x1C16
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x1C13
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x1C30
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0 0x1C11
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1 0x1C12
+#define mmDIG0_HDMI_STATUS 0x1C0D
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL 0x1C10
+#define mmDIG0_LVDS_DATA_CNTL 0x1C8C
+#define mmDIG0_TMDS_CNTL 0x1C7C
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK 0x1C7E
+#define mmDIG0_TMDS_CONTROL_CHAR 0x1C7D
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL 0x1C86
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL 0x1C87
+#define mmDIG0_TMDS_CTL_BITS 0x1C83
+#define mmDIG0_TMDS_DCBALANCER_CONTROL 0x1C84
+#define mmDIG0_TMDS_DEBUG 0x1C82
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL 0x1C7F
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x1C80
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x1C81
+#define mmDIG1_AFMT_60958_0 0x1F41
+#define mmDIG1_AFMT_60958_1 0x1F42
+#define mmDIG1_AFMT_60958_2 0x1F48
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL 0x1F43
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT 0x1F49
+#define mmDIG1_AFMT_AUDIO_DBG_DTO_CNTL 0x1F52
+#define mmDIG1_AFMT_AUDIO_INFO0 0x1F3F
+#define mmDIG1_AFMT_AUDIO_INFO1 0x1F40
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL 0x1F4B
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2 0x1F17
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL 0x1F4F
+#define mmDIG1_AFMT_AVI_INFO0 0x1F21
+#define mmDIG1_AFMT_AVI_INFO1 0x1F22
+#define mmDIG1_AFMT_AVI_INFO2 0x1F23
+#define mmDIG1_AFMT_AVI_INFO3 0x1F24
+#define mmDIG1_AFMT_GENERIC_0 0x1F28
+#define mmDIG1_AFMT_GENERIC_1 0x1F29
+#define mmDIG1_AFMT_GENERIC_2 0x1F2A
+#define mmDIG1_AFMT_GENERIC_3 0x1F2B
+#define mmDIG1_AFMT_GENERIC_4 0x1F2C
+#define mmDIG1_AFMT_GENERIC_5 0x1F2D
+#define mmDIG1_AFMT_GENERIC_6 0x1F2E
+#define mmDIG1_AFMT_GENERIC_7 0x1F2F
+#define mmDIG1_AFMT_GENERIC_HDR 0x1F27
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0 0x1F4D
+#define mmDIG1_AFMT_INTERRUPT_STATUS 0x1F14
+#define mmDIG1_AFMT_ISRC1_0 0x1F18
+#define mmDIG1_AFMT_ISRC1_1 0x1F19
+#define mmDIG1_AFMT_ISRC1_2 0x1F1A
+#define mmDIG1_AFMT_ISRC1_3 0x1F1B
+#define mmDIG1_AFMT_ISRC1_4 0x1F1C
+#define mmDIG1_AFMT_ISRC2_0 0x1F1D
+#define mmDIG1_AFMT_ISRC2_1 0x1F1E
+#define mmDIG1_AFMT_ISRC2_2 0x1F1F
+#define mmDIG1_AFMT_ISRC2_3 0x1F20
+#define mmDIG1_AFMT_MPEG_INFO0 0x1F25
+#define mmDIG1_AFMT_MPEG_INFO1 0x1F26
+#define mmDIG1_AFMT_RAMP_CONTROL0 0x1F44
+#define mmDIG1_AFMT_RAMP_CONTROL1 0x1F45
+#define mmDIG1_AFMT_RAMP_CONTROL2 0x1F46
+#define mmDIG1_AFMT_RAMP_CONTROL3 0x1F47
+#define mmDIG1_AFMT_STATUS 0x1F4A
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL 0x1F4C
+#define mmDIG1_DIG_BE_CNTL 0x1F50
+#define mmDIG1_DIG_BE_EN_CNTL 0x1F51
+#define mmDIG1_DIG_CLOCK_PATTERN 0x1F03
+#define mmDIG1_DIG_DISPCLK_SWITCH_CNTL 0x1F08
+#define mmDIG1_DIG_DISPCLK_SWITCH_STATUS 0x1F09
+#define mmDIG1_DIG_FE_CNTL 0x1F00
+#define mmDIG1_DIG_FIFO_STATUS 0x1F0A
+#define mmDIG1_DIG_LANE_ENABLE 0x1F8D
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL 0x1F01
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT 0x1F02
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED 0x1F05
+#define mmDIG1_DIG_TEST_PATTERN 0x1F04
+#define mmDIG1_HDMI_ACR_32_0 0x1F37
+#define mmDIG1_HDMI_ACR_32_1 0x1F38
+#define mmDIG1_HDMI_ACR_44_0 0x1F39
+#define mmDIG1_HDMI_ACR_44_1 0x1F3A
+#define mmDIG1_HDMI_ACR_48_0 0x1F3B
+#define mmDIG1_HDMI_ACR_48_1 0x1F3C
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL 0x1F0F
+#define mmDIG1_HDMI_ACR_STATUS_0 0x1F3D
+#define mmDIG1_HDMI_ACR_STATUS_1 0x1F3E
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL 0x1F0E
+#define mmDIG1_HDMI_CONTROL 0x1F0C
+#define mmDIG1_HDMI_GC 0x1F16
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x1F13
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x1F30
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0 0x1F11
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1 0x1F12
+#define mmDIG1_HDMI_STATUS 0x1F0D
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL 0x1F10
+#define mmDIG1_LVDS_DATA_CNTL 0x1F8C
+#define mmDIG1_TMDS_CNTL 0x1F7C
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK 0x1F7E
+#define mmDIG1_TMDS_CONTROL_CHAR 0x1F7D
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL 0x1F86
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL 0x1F87
+#define mmDIG1_TMDS_CTL_BITS 0x1F83
+#define mmDIG1_TMDS_DCBALANCER_CONTROL 0x1F84
+#define mmDIG1_TMDS_DEBUG 0x1F82
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL 0x1F7F
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x1F80
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x1F81
+#define mmDIG2_AFMT_60958_0 0x4241
+#define mmDIG2_AFMT_60958_1 0x4242
+#define mmDIG2_AFMT_60958_2 0x4248
+#define mmDIG2_AFMT_AUDIO_CRC_CONTROL 0x4243
+#define mmDIG2_AFMT_AUDIO_CRC_RESULT 0x4249
+#define mmDIG2_AFMT_AUDIO_DBG_DTO_CNTL 0x4252
+#define mmDIG2_AFMT_AUDIO_INFO0 0x423F
+#define mmDIG2_AFMT_AUDIO_INFO1 0x4240
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL 0x424B
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL2 0x4217
+#define mmDIG2_AFMT_AUDIO_SRC_CONTROL 0x424F
+#define mmDIG2_AFMT_AVI_INFO0 0x4221
+#define mmDIG2_AFMT_AVI_INFO1 0x4222
+#define mmDIG2_AFMT_AVI_INFO2 0x4223
+#define mmDIG2_AFMT_AVI_INFO3 0x4224
+#define mmDIG2_AFMT_GENERIC_0 0x4228
+#define mmDIG2_AFMT_GENERIC_1 0x4229
+#define mmDIG2_AFMT_GENERIC_2 0x422A
+#define mmDIG2_AFMT_GENERIC_3 0x422B
+#define mmDIG2_AFMT_GENERIC_4 0x422C
+#define mmDIG2_AFMT_GENERIC_5 0x422D
+#define mmDIG2_AFMT_GENERIC_6 0x422E
+#define mmDIG2_AFMT_GENERIC_7 0x422F
+#define mmDIG2_AFMT_GENERIC_HDR 0x4227
+#define mmDIG2_AFMT_INFOFRAME_CONTROL0 0x424D
+#define mmDIG2_AFMT_INTERRUPT_STATUS 0x4214
+#define mmDIG2_AFMT_ISRC1_0 0x4218
+#define mmDIG2_AFMT_ISRC1_1 0x4219
+#define mmDIG2_AFMT_ISRC1_2 0x421A
+#define mmDIG2_AFMT_ISRC1_3 0x421B
+#define mmDIG2_AFMT_ISRC1_4 0x421C
+#define mmDIG2_AFMT_ISRC2_0 0x421D
+#define mmDIG2_AFMT_ISRC2_1 0x421E
+#define mmDIG2_AFMT_ISRC2_2 0x421F
+#define mmDIG2_AFMT_ISRC2_3 0x4220
+#define mmDIG2_AFMT_MPEG_INFO0 0x4225
+#define mmDIG2_AFMT_MPEG_INFO1 0x4226
+#define mmDIG2_AFMT_RAMP_CONTROL0 0x4244
+#define mmDIG2_AFMT_RAMP_CONTROL1 0x4245
+#define mmDIG2_AFMT_RAMP_CONTROL2 0x4246
+#define mmDIG2_AFMT_RAMP_CONTROL3 0x4247
+#define mmDIG2_AFMT_STATUS 0x424A
+#define mmDIG2_AFMT_VBI_PACKET_CONTROL 0x424C
+#define mmDIG2_DIG_BE_CNTL 0x4250
+#define mmDIG2_DIG_BE_EN_CNTL 0x4251
+#define mmDIG2_DIG_CLOCK_PATTERN 0x4203
+#define mmDIG2_DIG_DISPCLK_SWITCH_CNTL 0x4208
+#define mmDIG2_DIG_DISPCLK_SWITCH_STATUS 0x4209
+#define mmDIG2_DIG_FE_CNTL 0x4200
+#define mmDIG2_DIG_FIFO_STATUS 0x420A
+#define mmDIG2_DIG_LANE_ENABLE 0x428D
+#define mmDIG2_DIG_OUTPUT_CRC_CNTL 0x4201
+#define mmDIG2_DIG_OUTPUT_CRC_RESULT 0x4202
+#define mmDIG2_DIG_RANDOM_PATTERN_SEED 0x4205
+#define mmDIG2_DIG_TEST_PATTERN 0x4204
+#define mmDIG2_HDMI_ACR_32_0 0x4237
+#define mmDIG2_HDMI_ACR_32_1 0x4238
+#define mmDIG2_HDMI_ACR_44_0 0x4239
+#define mmDIG2_HDMI_ACR_44_1 0x423A
+#define mmDIG2_HDMI_ACR_48_0 0x423B
+#define mmDIG2_HDMI_ACR_48_1 0x423C
+#define mmDIG2_HDMI_ACR_PACKET_CONTROL 0x420F
+#define mmDIG2_HDMI_ACR_STATUS_0 0x423D
+#define mmDIG2_HDMI_ACR_STATUS_1 0x423E
+#define mmDIG2_HDMI_AUDIO_PACKET_CONTROL 0x420E
+#define mmDIG2_HDMI_CONTROL 0x420C
+#define mmDIG2_HDMI_GC 0x4216
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL0 0x4213
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL1 0x4230
+#define mmDIG2_HDMI_INFOFRAME_CONTROL0 0x4211
+#define mmDIG2_HDMI_INFOFRAME_CONTROL1 0x4212
+#define mmDIG2_HDMI_STATUS 0x420D
+#define mmDIG2_HDMI_VBI_PACKET_CONTROL 0x4210
+#define mmDIG2_LVDS_DATA_CNTL 0x428C
+#define mmDIG2_TMDS_CNTL 0x427C
+#define mmDIG2_TMDS_CONTROL0_FEEDBACK 0x427E
+#define mmDIG2_TMDS_CONTROL_CHAR 0x427D
+#define mmDIG2_TMDS_CTL0_1_GEN_CNTL 0x4286
+#define mmDIG2_TMDS_CTL2_3_GEN_CNTL 0x4287
+#define mmDIG2_TMDS_CTL_BITS 0x4283
+#define mmDIG2_TMDS_DCBALANCER_CONTROL 0x4284
+#define mmDIG2_TMDS_DEBUG 0x4282
+#define mmDIG2_TMDS_STEREOSYNC_CTL_SEL 0x427F
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_0_1 0x4280
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_2_3 0x4281
+#define mmDIG3_AFMT_60958_0 0x4541
+#define mmDIG3_AFMT_60958_1 0x4542
+#define mmDIG3_AFMT_60958_2 0x4548
+#define mmDIG3_AFMT_AUDIO_CRC_CONTROL 0x4543
+#define mmDIG3_AFMT_AUDIO_CRC_RESULT 0x4549
+#define mmDIG3_AFMT_AUDIO_DBG_DTO_CNTL 0x4552
+#define mmDIG3_AFMT_AUDIO_INFO0 0x453F
+#define mmDIG3_AFMT_AUDIO_INFO1 0x4540
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL 0x454B
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL2 0x4517
+#define mmDIG3_AFMT_AUDIO_SRC_CONTROL 0x454F
+#define mmDIG3_AFMT_AVI_INFO0 0x4521
+#define mmDIG3_AFMT_AVI_INFO1 0x4522
+#define mmDIG3_AFMT_AVI_INFO2 0x4523
+#define mmDIG3_AFMT_AVI_INFO3 0x4524
+#define mmDIG3_AFMT_GENERIC_0 0x4528
+#define mmDIG3_AFMT_GENERIC_1 0x4529
+#define mmDIG3_AFMT_GENERIC_2 0x452A
+#define mmDIG3_AFMT_GENERIC_3 0x452B
+#define mmDIG3_AFMT_GENERIC_4 0x452C
+#define mmDIG3_AFMT_GENERIC_5 0x452D
+#define mmDIG3_AFMT_GENERIC_6 0x452E
+#define mmDIG3_AFMT_GENERIC_7 0x452F
+#define mmDIG3_AFMT_GENERIC_HDR 0x4527
+#define mmDIG3_AFMT_INFOFRAME_CONTROL0 0x454D
+#define mmDIG3_AFMT_INTERRUPT_STATUS 0x4514
+#define mmDIG3_AFMT_ISRC1_0 0x4518
+#define mmDIG3_AFMT_ISRC1_1 0x4519
+#define mmDIG3_AFMT_ISRC1_2 0x451A
+#define mmDIG3_AFMT_ISRC1_3 0x451B
+#define mmDIG3_AFMT_ISRC1_4 0x451C
+#define mmDIG3_AFMT_ISRC2_0 0x451D
+#define mmDIG3_AFMT_ISRC2_1 0x451E
+#define mmDIG3_AFMT_ISRC2_2 0x451F
+#define mmDIG3_AFMT_ISRC2_3 0x4520
+#define mmDIG3_AFMT_MPEG_INFO0 0x4525
+#define mmDIG3_AFMT_MPEG_INFO1 0x4526
+#define mmDIG3_AFMT_RAMP_CONTROL0 0x4544
+#define mmDIG3_AFMT_RAMP_CONTROL1 0x4545
+#define mmDIG3_AFMT_RAMP_CONTROL2 0x4546
+#define mmDIG3_AFMT_RAMP_CONTROL3 0x4547
+#define mmDIG3_AFMT_STATUS 0x454A
+#define mmDIG3_AFMT_VBI_PACKET_CONTROL 0x454C
+#define mmDIG3_DIG_BE_CNTL 0x4550
+#define mmDIG3_DIG_BE_EN_CNTL 0x4551
+#define mmDIG3_DIG_CLOCK_PATTERN 0x4503
+#define mmDIG3_DIG_DISPCLK_SWITCH_CNTL 0x4508
+#define mmDIG3_DIG_DISPCLK_SWITCH_STATUS 0x4509
+#define mmDIG3_DIG_FE_CNTL 0x4500
+#define mmDIG3_DIG_FIFO_STATUS 0x450A
+#define mmDIG3_DIG_LANE_ENABLE 0x458D
+#define mmDIG3_DIG_OUTPUT_CRC_CNTL 0x4501
+#define mmDIG3_DIG_OUTPUT_CRC_RESULT 0x4502
+#define mmDIG3_DIG_RANDOM_PATTERN_SEED 0x4505
+#define mmDIG3_DIG_TEST_PATTERN 0x4504
+#define mmDIG3_HDMI_ACR_32_0 0x4537
+#define mmDIG3_HDMI_ACR_32_1 0x4538
+#define mmDIG3_HDMI_ACR_44_0 0x4539
+#define mmDIG3_HDMI_ACR_44_1 0x453A
+#define mmDIG3_HDMI_ACR_48_0 0x453B
+#define mmDIG3_HDMI_ACR_48_1 0x453C
+#define mmDIG3_HDMI_ACR_PACKET_CONTROL 0x450F
+#define mmDIG3_HDMI_ACR_STATUS_0 0x453D
+#define mmDIG3_HDMI_ACR_STATUS_1 0x453E
+#define mmDIG3_HDMI_AUDIO_PACKET_CONTROL 0x450E
+#define mmDIG3_HDMI_CONTROL 0x450C
+#define mmDIG3_HDMI_GC 0x4516
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL0 0x4513
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL1 0x4530
+#define mmDIG3_HDMI_INFOFRAME_CONTROL0 0x4511
+#define mmDIG3_HDMI_INFOFRAME_CONTROL1 0x4512
+#define mmDIG3_HDMI_STATUS 0x450D
+#define mmDIG3_HDMI_VBI_PACKET_CONTROL 0x4510
+#define mmDIG3_LVDS_DATA_CNTL 0x458C
+#define mmDIG3_TMDS_CNTL 0x457C
+#define mmDIG3_TMDS_CONTROL0_FEEDBACK 0x457E
+#define mmDIG3_TMDS_CONTROL_CHAR 0x457D
+#define mmDIG3_TMDS_CTL0_1_GEN_CNTL 0x4586
+#define mmDIG3_TMDS_CTL2_3_GEN_CNTL 0x4587
+#define mmDIG3_TMDS_CTL_BITS 0x4583
+#define mmDIG3_TMDS_DCBALANCER_CONTROL 0x4584
+#define mmDIG3_TMDS_DEBUG 0x4582
+#define mmDIG3_TMDS_STEREOSYNC_CTL_SEL 0x457F
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_0_1 0x4580
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_2_3 0x4581
+#define mmDIG4_AFMT_60958_0 0x4841
+#define mmDIG4_AFMT_60958_1 0x4842
+#define mmDIG4_AFMT_60958_2 0x4848
+#define mmDIG4_AFMT_AUDIO_CRC_CONTROL 0x4843
+#define mmDIG4_AFMT_AUDIO_CRC_RESULT 0x4849
+#define mmDIG4_AFMT_AUDIO_DBG_DTO_CNTL 0x4852
+#define mmDIG4_AFMT_AUDIO_INFO0 0x483F
+#define mmDIG4_AFMT_AUDIO_INFO1 0x4840
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL 0x484B
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL2 0x4817
+#define mmDIG4_AFMT_AUDIO_SRC_CONTROL 0x484F
+#define mmDIG4_AFMT_AVI_INFO0 0x4821
+#define mmDIG4_AFMT_AVI_INFO1 0x4822
+#define mmDIG4_AFMT_AVI_INFO2 0x4823
+#define mmDIG4_AFMT_AVI_INFO3 0x4824
+#define mmDIG4_AFMT_GENERIC_0 0x4828
+#define mmDIG4_AFMT_GENERIC_1 0x4829
+#define mmDIG4_AFMT_GENERIC_2 0x482A
+#define mmDIG4_AFMT_GENERIC_3 0x482B
+#define mmDIG4_AFMT_GENERIC_4 0x482C
+#define mmDIG4_AFMT_GENERIC_5 0x482D
+#define mmDIG4_AFMT_GENERIC_6 0x482E
+#define mmDIG4_AFMT_GENERIC_7 0x482F
+#define mmDIG4_AFMT_GENERIC_HDR 0x4827
+#define mmDIG4_AFMT_INFOFRAME_CONTROL0 0x484D
+#define mmDIG4_AFMT_INTERRUPT_STATUS 0x4814
+#define mmDIG4_AFMT_ISRC1_0 0x4818
+#define mmDIG4_AFMT_ISRC1_1 0x4819
+#define mmDIG4_AFMT_ISRC1_2 0x481A
+#define mmDIG4_AFMT_ISRC1_3 0x481B
+#define mmDIG4_AFMT_ISRC1_4 0x481C
+#define mmDIG4_AFMT_ISRC2_0 0x481D
+#define mmDIG4_AFMT_ISRC2_1 0x481E
+#define mmDIG4_AFMT_ISRC2_2 0x481F
+#define mmDIG4_AFMT_ISRC2_3 0x4820
+#define mmDIG4_AFMT_MPEG_INFO0 0x4825
+#define mmDIG4_AFMT_MPEG_INFO1 0x4826
+#define mmDIG4_AFMT_RAMP_CONTROL0 0x4844
+#define mmDIG4_AFMT_RAMP_CONTROL1 0x4845
+#define mmDIG4_AFMT_RAMP_CONTROL2 0x4846
+#define mmDIG4_AFMT_RAMP_CONTROL3 0x4847
+#define mmDIG4_AFMT_STATUS 0x484A
+#define mmDIG4_AFMT_VBI_PACKET_CONTROL 0x484C
+#define mmDIG4_DIG_BE_CNTL 0x4850
+#define mmDIG4_DIG_BE_EN_CNTL 0x4851
+#define mmDIG4_DIG_CLOCK_PATTERN 0x4803
+#define mmDIG4_DIG_DISPCLK_SWITCH_CNTL 0x4808
+#define mmDIG4_DIG_DISPCLK_SWITCH_STATUS 0x4809
+#define mmDIG4_DIG_FE_CNTL 0x4800
+#define mmDIG4_DIG_FIFO_STATUS 0x480A
+#define mmDIG4_DIG_LANE_ENABLE 0x488D
+#define mmDIG4_DIG_OUTPUT_CRC_CNTL 0x4801
+#define mmDIG4_DIG_OUTPUT_CRC_RESULT 0x4802
+#define mmDIG4_DIG_RANDOM_PATTERN_SEED 0x4805
+#define mmDIG4_DIG_TEST_PATTERN 0x4804
+#define mmDIG4_HDMI_ACR_32_0 0x4837
+#define mmDIG4_HDMI_ACR_32_1 0x4838
+#define mmDIG4_HDMI_ACR_44_0 0x4839
+#define mmDIG4_HDMI_ACR_44_1 0x483A
+#define mmDIG4_HDMI_ACR_48_0 0x483B
+#define mmDIG4_HDMI_ACR_48_1 0x483C
+#define mmDIG4_HDMI_ACR_PACKET_CONTROL 0x480F
+#define mmDIG4_HDMI_ACR_STATUS_0 0x483D
+#define mmDIG4_HDMI_ACR_STATUS_1 0x483E
+#define mmDIG4_HDMI_AUDIO_PACKET_CONTROL 0x480E
+#define mmDIG4_HDMI_CONTROL 0x480C
+#define mmDIG4_HDMI_GC 0x4816
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL0 0x4813
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL1 0x4830
+#define mmDIG4_HDMI_INFOFRAME_CONTROL0 0x4811
+#define mmDIG4_HDMI_INFOFRAME_CONTROL1 0x4812
+#define mmDIG4_HDMI_STATUS 0x480D
+#define mmDIG4_HDMI_VBI_PACKET_CONTROL 0x4810
+#define mmDIG4_LVDS_DATA_CNTL 0x488C
+#define mmDIG4_TMDS_CNTL 0x487C
+#define mmDIG4_TMDS_CONTROL0_FEEDBACK 0x487E
+#define mmDIG4_TMDS_CONTROL_CHAR 0x487D
+#define mmDIG4_TMDS_CTL0_1_GEN_CNTL 0x4886
+#define mmDIG4_TMDS_CTL2_3_GEN_CNTL 0x4887
+#define mmDIG4_TMDS_CTL_BITS 0x4883
+#define mmDIG4_TMDS_DCBALANCER_CONTROL 0x4884
+#define mmDIG4_TMDS_DEBUG 0x4882
+#define mmDIG4_TMDS_STEREOSYNC_CTL_SEL 0x487F
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_0_1 0x4880
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_2_3 0x4881
+#define mmDIG5_AFMT_60958_0 0x4B41
+#define mmDIG5_AFMT_60958_1 0x4B42
+#define mmDIG5_AFMT_60958_2 0x4B48
+#define mmDIG5_AFMT_AUDIO_CRC_CONTROL 0x4B43
+#define mmDIG5_AFMT_AUDIO_CRC_RESULT 0x4B49
+#define mmDIG5_AFMT_AUDIO_DBG_DTO_CNTL 0x4B52
+#define mmDIG5_AFMT_AUDIO_INFO0 0x4B3F
+#define mmDIG5_AFMT_AUDIO_INFO1 0x4B40
+#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL 0x4B4B
+#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL2 0x4B17
+#define mmDIG5_AFMT_AUDIO_SRC_CONTROL 0x4B4F
+#define mmDIG5_AFMT_AVI_INFO0 0x4B21
+#define mmDIG5_AFMT_AVI_INFO1 0x4B22
+#define mmDIG5_AFMT_AVI_INFO2 0x4B23
+#define mmDIG5_AFMT_AVI_INFO3 0x4B24
+#define mmDIG5_AFMT_GENERIC_0 0x4B28
+#define mmDIG5_AFMT_GENERIC_1 0x4B29
+#define mmDIG5_AFMT_GENERIC_2 0x4B2A
+#define mmDIG5_AFMT_GENERIC_3 0x4B2B
+#define mmDIG5_AFMT_GENERIC_4 0x4B2C
+#define mmDIG5_AFMT_GENERIC_5 0x4B2D
+#define mmDIG5_AFMT_GENERIC_6 0x4B2E
+#define mmDIG5_AFMT_GENERIC_7 0x4B2F
+#define mmDIG5_AFMT_GENERIC_HDR 0x4B27
+#define mmDIG5_AFMT_INFOFRAME_CONTROL0 0x4B4D
+#define mmDIG5_AFMT_INTERRUPT_STATUS 0x4B14
+#define mmDIG5_AFMT_ISRC1_0 0x4B18
+#define mmDIG5_AFMT_ISRC1_1 0x4B19
+#define mmDIG5_AFMT_ISRC1_2 0x4B1A
+#define mmDIG5_AFMT_ISRC1_3 0x4B1B
+#define mmDIG5_AFMT_ISRC1_4 0x4B1C
+#define mmDIG5_AFMT_ISRC2_0 0x4B1D
+#define mmDIG5_AFMT_ISRC2_1 0x4B1E
+#define mmDIG5_AFMT_ISRC2_2 0x4B1F
+#define mmDIG5_AFMT_ISRC2_3 0x4B20
+#define mmDIG5_AFMT_MPEG_INFO0 0x4B25
+#define mmDIG5_AFMT_MPEG_INFO1 0x4B26
+#define mmDIG5_AFMT_RAMP_CONTROL0 0x4B44
+#define mmDIG5_AFMT_RAMP_CONTROL1 0x4B45
+#define mmDIG5_AFMT_RAMP_CONTROL2 0x4B46
+#define mmDIG5_AFMT_RAMP_CONTROL3 0x4B47
+#define mmDIG5_AFMT_STATUS 0x4B4A
+#define mmDIG5_AFMT_VBI_PACKET_CONTROL 0x4B4C
+#define mmDIG5_DIG_BE_CNTL 0x4B50
+#define mmDIG5_DIG_BE_EN_CNTL 0x4B51
+#define mmDIG5_DIG_CLOCK_PATTERN 0x4B03
+#define mmDIG5_DIG_DISPCLK_SWITCH_CNTL 0x4B08
+#define mmDIG5_DIG_DISPCLK_SWITCH_STATUS 0x4B09
+#define mmDIG5_DIG_FE_CNTL 0x4B00
+#define mmDIG5_DIG_FIFO_STATUS 0x4B0A
+#define mmDIG5_DIG_LANE_ENABLE 0x4B8D
+#define mmDIG5_DIG_OUTPUT_CRC_CNTL 0x4B01
+#define mmDIG5_DIG_OUTPUT_CRC_RESULT 0x4B02
+#define mmDIG5_DIG_RANDOM_PATTERN_SEED 0x4B05
+#define mmDIG5_DIG_TEST_PATTERN 0x4B04
+#define mmDIG5_HDMI_ACR_32_0 0x4B37
+#define mmDIG5_HDMI_ACR_32_1 0x4B38
+#define mmDIG5_HDMI_ACR_44_0 0x4B39
+#define mmDIG5_HDMI_ACR_44_1 0x4B3A
+#define mmDIG5_HDMI_ACR_48_0 0x4B3B
+#define mmDIG5_HDMI_ACR_48_1 0x4B3C
+#define mmDIG5_HDMI_ACR_PACKET_CONTROL 0x4B0F
+#define mmDIG5_HDMI_ACR_STATUS_0 0x4B3D
+#define mmDIG5_HDMI_ACR_STATUS_1 0x4B3E
+#define mmDIG5_HDMI_AUDIO_PACKET_CONTROL 0x4B0E
+#define mmDIG5_HDMI_CONTROL 0x4B0C
+#define mmDIG5_HDMI_GC 0x4B16
+#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL0 0x4B13
+#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL1 0x4B30
+#define mmDIG5_HDMI_INFOFRAME_CONTROL0 0x4B11
+#define mmDIG5_HDMI_INFOFRAME_CONTROL1 0x4B12
+#define mmDIG5_HDMI_STATUS 0x4B0D
+#define mmDIG5_HDMI_VBI_PACKET_CONTROL 0x4B10
+#define mmDIG5_LVDS_DATA_CNTL 0x4B8C
+#define mmDIG5_TMDS_CNTL 0x4B7C
+#define mmDIG5_TMDS_CONTROL0_FEEDBACK 0x4B7E
+#define mmDIG5_TMDS_CONTROL_CHAR 0x4B7D
+#define mmDIG5_TMDS_CTL0_1_GEN_CNTL 0x4B86
+#define mmDIG5_TMDS_CTL2_3_GEN_CNTL 0x4B87
+#define mmDIG5_TMDS_CTL_BITS 0x4B83
+#define mmDIG5_TMDS_DCBALANCER_CONTROL 0x4B84
+#define mmDIG5_TMDS_DEBUG 0x4B82
+#define mmDIG5_TMDS_STEREOSYNC_CTL_SEL 0x4B7F
+#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_0_1 0x4B80
+#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_2_3 0x4B81
+#define mmDIG_BE_CNTL 0x1C50
+#define mmDIG_BE_EN_CNTL 0x1C51
+#define mmDIG_CLOCK_PATTERN 0x1C03
+#define mmDIG_DISPCLK_SWITCH_CNTL 0x1C08
+#define mmDIG_DISPCLK_SWITCH_STATUS 0x1C09
+#define mmDIG_FE_CNTL 0x1C00
+#define mmDIG_FIFO_STATUS 0x1C0A
+#define mmDIG_LANE_ENABLE 0x1C8D
+#define mmDIG_OUTPUT_CRC_CNTL 0x1C01
+#define mmDIG_OUTPUT_CRC_RESULT 0x1C02
+#define mmDIG_RANDOM_PATTERN_SEED 0x1C05
+#define mmDIG_SOFT_RESET 0x013D
+#define mmDIG_TEST_PATTERN 0x1C04
+#define mmDISPCLK_CGTT_BLK_CTRL_REG 0x0135
+#define mmDISPCLK_FREQ_CHANGE_CNTL 0x0131
+#define mmDISP_INTERRUPT_STATUS 0x183D
+#define mmDISP_INTERRUPT_STATUS_CONTINUE 0x183E
+#define mmDISP_INTERRUPT_STATUS_CONTINUE2 0x183F
+#define mmDISP_INTERRUPT_STATUS_CONTINUE3 0x1840
+#define mmDISP_INTERRUPT_STATUS_CONTINUE4 0x1853
+#define mmDISP_INTERRUPT_STATUS_CONTINUE5 0x1854
+#define mmDISPOUT_STEREOSYNC_SEL 0x18BF
+#define mmDISPPLL_BG_CNTL 0x013C
+#define mmDISP_TIMER_CONTROL 0x1842
+#define mmDMCU_CTRL 0x1600
+#define mmDMCU_ERAM_RD_CTRL 0x160B
+#define mmDMCU_ERAM_RD_DATA 0x160C
+#define mmDMCU_ERAM_WR_CTRL 0x1609
+#define mmDMCU_ERAM_WR_DATA 0x160A
+#define mmDMCU_EVENT_TRIGGER 0x1611
+#define mmDMCU_FW_CHECKSUM_SMPL_BYTE_POS 0x161A
+#define mmDMCU_FW_CS_HI 0x1606
+#define mmDMCU_FW_CS_LO 0x1607
+#define mmDMCU_FW_END_ADDR 0x1604
+#define mmDMCU_FW_ISR_START_ADDR 0x1605
+#define mmDMCU_FW_START_ADDR 0x1603
+#define mmDMCU_INT_CNT 0x1619
+#define mmDMCU_INTERRUPT_STATUS 0x1614
+#define mmDMCU_INTERRUPT_TO_HOST_EN_MASK 0x1615
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK 0x1616
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL 0x1617
+#define mmDMCU_IRAM_RD_CTRL 0x160F
+#define mmDMCU_IRAM_RD_DATA 0x1610
+#define mmDMCU_IRAM_WR_CTRL 0x160D
+#define mmDMCU_IRAM_WR_DATA 0x160E
+#define mmDMCU_PC_START_ADDR 0x1602
+#define mmDMCU_RAM_ACCESS_CTRL 0x1608
+#define mmDMCU_STATUS 0x1601
+#define mmDMCU_TEST_DEBUG_DATA 0x1627
+#define mmDMCU_TEST_DEBUG_INDEX 0x1626
+#define mmDMCU_UC_CLK_GATING_CNTL 0x161B
+#define mmDMCU_UC_INTERNAL_INT_STATUS 0x1612
+#define mmDMIF_ADDR_CALC 0x0300
+#define mmDMIF_ADDR_CONFIG 0x02F5
+#define mmDMIF_ARBITRATION_CONTROL 0x02F9
+#define mmDMIF_CONTROL 0x02F6
+#define mmDMIF_HW_DEBUG 0x02F8
+#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1 0x1B30
+#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL2 0x1B31
+#define mmDMIF_PG0_DPG_PIPE_DPM_CONTROL 0x1B34
+#define mmDMIF_PG0_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1B36
+#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL 0x1B35
+#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1B37
+#define mmDMIF_PG0_DPG_PIPE_URGENCY_CONTROL 0x1B33
+#define mmDMIF_PG0_DPG_TEST_DEBUG_DATA 0x1B39
+#define mmDMIF_PG0_DPG_TEST_DEBUG_INDEX 0x1B38
+#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL1 0x1E30
+#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL2 0x1E31
+#define mmDMIF_PG1_DPG_PIPE_DPM_CONTROL 0x1E34
+#define mmDMIF_PG1_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1E36
+#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL 0x1E35
+#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1E37
+#define mmDMIF_PG1_DPG_PIPE_URGENCY_CONTROL 0x1E33
+#define mmDMIF_PG1_DPG_TEST_DEBUG_DATA 0x1E39
+#define mmDMIF_PG1_DPG_TEST_DEBUG_INDEX 0x1E38
+#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL1 0x4130
+#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL2 0x4131
+#define mmDMIF_PG2_DPG_PIPE_DPM_CONTROL 0x4134
+#define mmDMIF_PG2_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4136
+#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL 0x4135
+#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4137
+#define mmDMIF_PG2_DPG_PIPE_URGENCY_CONTROL 0x4133
+#define mmDMIF_PG2_DPG_TEST_DEBUG_DATA 0x4139
+#define mmDMIF_PG2_DPG_TEST_DEBUG_INDEX 0x4138
+#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL1 0x4430
+#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL2 0x4431
+#define mmDMIF_PG3_DPG_PIPE_DPM_CONTROL 0x4434
+#define mmDMIF_PG3_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4436
+#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL 0x4435
+#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4437
+#define mmDMIF_PG3_DPG_PIPE_URGENCY_CONTROL 0x4433
+#define mmDMIF_PG3_DPG_TEST_DEBUG_DATA 0x4439
+#define mmDMIF_PG3_DPG_TEST_DEBUG_INDEX 0x4438
+#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL1 0x4730
+#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL2 0x4731
+#define mmDMIF_PG4_DPG_PIPE_DPM_CONTROL 0x4734
+#define mmDMIF_PG4_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4736
+#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL 0x4735
+#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4737
+#define mmDMIF_PG4_DPG_PIPE_URGENCY_CONTROL 0x4733
+#define mmDMIF_PG4_DPG_TEST_DEBUG_DATA 0x4739
+#define mmDMIF_PG4_DPG_TEST_DEBUG_INDEX 0x4738
+#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL1 0x4A30
+#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL2 0x4A31
+#define mmDMIF_PG5_DPG_PIPE_DPM_CONTROL 0x4A34
+#define mmDMIF_PG5_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4A36
+#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL 0x4A35
+#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4A37
+#define mmDMIF_PG5_DPG_PIPE_URGENCY_CONTROL 0x4A33
+#define mmDMIF_PG5_DPG_TEST_DEBUG_DATA 0x4A39
+#define mmDMIF_PG5_DPG_TEST_DEBUG_INDEX 0x4A38
+#define mmDMIF_STATUS 0x02F7
+#define mmDMIF_STATUS2 0x0301
+#define mmDMIF_TEST_DEBUG_DATA 0x0313
+#define mmDMIF_TEST_DEBUG_INDEX 0x0312
+#define mmDOUT_DCE_VCE_CONTROL 0x18FF
+#define mmDOUT_POWER_MANAGEMENT_CNTL 0x1841
+#define mmDOUT_SCRATCH0 0x1844
+#define mmDOUT_SCRATCH1 0x1845
+#define mmDOUT_SCRATCH2 0x1846
+#define mmDOUT_SCRATCH3 0x1847
+#define mmDOUT_SCRATCH4 0x1848
+#define mmDOUT_SCRATCH5 0x1849
+#define mmDOUT_SCRATCH6 0x184A
+#define mmDOUT_SCRATCH7 0x184B
+#define mmDOUT_TEST_DEBUG_DATA 0x184E
+#define mmDOUT_TEST_DEBUG_INDEX 0x184D
+#define mmDP0_DP_CONFIG 0x1CC2
+#define mmDP0_DP_DPHY_8B10B_CNTL 0x1CD3
+#define mmDP0_DP_DPHY_CNTL 0x1CD0
+#define mmDP0_DP_DPHY_CRC_CNTL 0x1CD7
+#define mmDP0_DP_DPHY_CRC_EN 0x1CD6
+#define mmDP0_DP_DPHY_CRC_MST_CNTL 0x1CC6
+#define mmDP0_DP_DPHY_CRC_MST_STATUS 0x1CC7
+#define mmDP0_DP_DPHY_CRC_RESULT 0x1CD8
+#define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS 0x1CE9
+#define mmDP0_DP_DPHY_PRBS_CNTL 0x1CD4
+#define mmDP0_DP_DPHY_SYM0 0x1CD2
+#define mmDP0_DP_DPHY_SYM1 0x1CE0
+#define mmDP0_DP_DPHY_SYM2 0x1CDF
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x1CD1
+#define mmDP0_DP_HBR2_EYE_PATTERN 0x1CC8
+#define mmDP0_DP_LINK_CNTL 0x1CC0
+#define mmDP0_DP_LINK_FRAMING_CNTL 0x1CCC
+#define mmDP0_DP_MSA_COLORIMETRY 0x1CDA
+#define mmDP0_DP_MSA_MISC 0x1CC5
+#define mmDP0_DP_MSA_V_TIMING_OVERRIDE1 0x1CEA
+#define mmDP0_DP_MSA_V_TIMING_OVERRIDE2 0x1CEB
+#define mmDP0_DP_MSE_LINK_TIMING 0x1CE8
+#define mmDP0_DP_MSE_MISC_CNTL 0x1CDB
+#define mmDP0_DP_MSE_RATE_CNTL 0x1CE1
+#define mmDP0_DP_MSE_RATE_UPDATE 0x1CE3
+#define mmDP0_DP_MSE_SAT0 0x1CE4
+#define mmDP0_DP_MSE_SAT1 0x1CE5
+#define mmDP0_DP_MSE_SAT2 0x1CE6
+#define mmDP0_DP_MSE_SAT_UPDATE 0x1CE7
+#define mmDP0_DP_PIXEL_FORMAT 0x1CC1
+#define mmDP0_DP_SEC_AUD_M 0x1CA7
+#define mmDP0_DP_SEC_AUD_M_READBACK 0x1CA8
+#define mmDP0_DP_SEC_AUD_N 0x1CA5
+#define mmDP0_DP_SEC_AUD_N_READBACK 0x1CA6
+#define mmDP0_DP_SEC_CNTL 0x1CA0
+#define mmDP0_DP_SEC_CNTL1 0x1CAB
+#define mmDP0_DP_SEC_FRAMING1 0x1CA1
+#define mmDP0_DP_SEC_FRAMING2 0x1CA2
+#define mmDP0_DP_SEC_FRAMING3 0x1CA3
+#define mmDP0_DP_SEC_FRAMING4 0x1CA4
+#define mmDP0_DP_SEC_PACKET_CNTL 0x1CAA
+#define mmDP0_DP_SEC_TIMESTAMP 0x1CA9
+#define mmDP0_DP_STEER_FIFO 0x1CC4
+#define mmDP0_DP_TEST_DEBUG_DATA 0x1CFD
+#define mmDP0_DP_TEST_DEBUG_INDEX 0x1CFC
+#define mmDP0_DP_VID_INTERRUPT_CNTL 0x1CCF
+#define mmDP0_DP_VID_M 0x1CCB
+#define mmDP0_DP_VID_MSA_VBID 0x1CCD
+#define mmDP0_DP_VID_N 0x1CCA
+#define mmDP0_DP_VID_STREAM_CNTL 0x1CC3
+#define mmDP0_DP_VID_TIMING 0x1CC9
+#define mmDP1_DP_CONFIG 0x1FC2
+#define mmDP1_DP_DPHY_8B10B_CNTL 0x1FD3
+#define mmDP1_DP_DPHY_CNTL 0x1FD0
+#define mmDP1_DP_DPHY_CRC_CNTL 0x1FD7
+#define mmDP1_DP_DPHY_CRC_EN 0x1FD6
+#define mmDP1_DP_DPHY_CRC_MST_CNTL 0x1FC6
+#define mmDP1_DP_DPHY_CRC_MST_STATUS 0x1FC7
+#define mmDP1_DP_DPHY_CRC_RESULT 0x1FD8
+#define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS 0x1FE9
+#define mmDP1_DP_DPHY_PRBS_CNTL 0x1FD4
+#define mmDP1_DP_DPHY_SYM0 0x1FD2
+#define mmDP1_DP_DPHY_SYM1 0x1FE0
+#define mmDP1_DP_DPHY_SYM2 0x1FDF
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x1FD1
+#define mmDP1_DP_HBR2_EYE_PATTERN 0x1FC8
+#define mmDP1_DP_LINK_CNTL 0x1FC0
+#define mmDP1_DP_LINK_FRAMING_CNTL 0x1FCC
+#define mmDP1_DP_MSA_COLORIMETRY 0x1FDA
+#define mmDP1_DP_MSA_MISC 0x1FC5
+#define mmDP1_DP_MSA_V_TIMING_OVERRIDE1 0x1FEA
+#define mmDP1_DP_MSA_V_TIMING_OVERRIDE2 0x1FEB
+#define mmDP1_DP_MSE_LINK_TIMING 0x1FE8
+#define mmDP1_DP_MSE_MISC_CNTL 0x1FDB
+#define mmDP1_DP_MSE_RATE_CNTL 0x1FE1
+#define mmDP1_DP_MSE_RATE_UPDATE 0x1FE3
+#define mmDP1_DP_MSE_SAT0 0x1FE4
+#define mmDP1_DP_MSE_SAT1 0x1FE5
+#define mmDP1_DP_MSE_SAT2 0x1FE6
+#define mmDP1_DP_MSE_SAT_UPDATE 0x1FE7
+#define mmDP1_DP_PIXEL_FORMAT 0x1FC1
+#define mmDP1_DP_SEC_AUD_M 0x1FA7
+#define mmDP1_DP_SEC_AUD_M_READBACK 0x1FA8
+#define mmDP1_DP_SEC_AUD_N 0x1FA5
+#define mmDP1_DP_SEC_AUD_N_READBACK 0x1FA6
+#define mmDP1_DP_SEC_CNTL 0x1FA0
+#define mmDP1_DP_SEC_CNTL1 0x1FAB
+#define mmDP1_DP_SEC_FRAMING1 0x1FA1
+#define mmDP1_DP_SEC_FRAMING2 0x1FA2
+#define mmDP1_DP_SEC_FRAMING3 0x1FA3
+#define mmDP1_DP_SEC_FRAMING4 0x1FA4
+#define mmDP1_DP_SEC_PACKET_CNTL 0x1FAA
+#define mmDP1_DP_SEC_TIMESTAMP 0x1FA9
+#define mmDP1_DP_STEER_FIFO 0x1FC4
+#define mmDP1_DP_TEST_DEBUG_DATA 0x1FFD
+#define mmDP1_DP_TEST_DEBUG_INDEX 0x1FFC
+#define mmDP1_DP_VID_INTERRUPT_CNTL 0x1FCF
+#define mmDP1_DP_VID_M 0x1FCB
+#define mmDP1_DP_VID_MSA_VBID 0x1FCD
+#define mmDP1_DP_VID_N 0x1FCA
+#define mmDP1_DP_VID_STREAM_CNTL 0x1FC3
+#define mmDP1_DP_VID_TIMING 0x1FC9
+#define mmDP2_DP_CONFIG 0x42C2
+#define mmDP2_DP_DPHY_8B10B_CNTL 0x42D3
+#define mmDP2_DP_DPHY_CNTL 0x42D0
+#define mmDP2_DP_DPHY_CRC_CNTL 0x42D7
+#define mmDP2_DP_DPHY_CRC_EN 0x42D6
+#define mmDP2_DP_DPHY_CRC_MST_CNTL 0x42C6
+#define mmDP2_DP_DPHY_CRC_MST_STATUS 0x42C7
+#define mmDP2_DP_DPHY_CRC_RESULT 0x42D8
+#define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE
+#define mmDP2_DP_DPHY_FAST_TRAINING_STATUS 0x42E9
+#define mmDP2_DP_DPHY_PRBS_CNTL 0x42D4
+#define mmDP2_DP_DPHY_SYM0 0x42D2
+#define mmDP2_DP_DPHY_SYM1 0x42E0
+#define mmDP2_DP_DPHY_SYM2 0x42DF
+#define mmDP2_DP_DPHY_TRAINING_PATTERN_SEL 0x42D1
+#define mmDP2_DP_HBR2_EYE_PATTERN 0x42C8
+#define mmDP2_DP_LINK_CNTL 0x42C0
+#define mmDP2_DP_LINK_FRAMING_CNTL 0x42CC
+#define mmDP2_DP_MSA_COLORIMETRY 0x42DA
+#define mmDP2_DP_MSA_MISC 0x42C5
+#define mmDP2_DP_MSA_V_TIMING_OVERRIDE1 0x42EA
+#define mmDP2_DP_MSA_V_TIMING_OVERRIDE2 0x42EB
+#define mmDP2_DP_MSE_LINK_TIMING 0x42E8
+#define mmDP2_DP_MSE_MISC_CNTL 0x42DB
+#define mmDP2_DP_MSE_RATE_CNTL 0x42E1
+#define mmDP2_DP_MSE_RATE_UPDATE 0x42E3
+#define mmDP2_DP_MSE_SAT0 0x42E4
+#define mmDP2_DP_MSE_SAT1 0x42E5
+#define mmDP2_DP_MSE_SAT2 0x42E6
+#define mmDP2_DP_MSE_SAT_UPDATE 0x42E7
+#define mmDP2_DP_PIXEL_FORMAT 0x42C1
+#define mmDP2_DP_SEC_AUD_M 0x42A7
+#define mmDP2_DP_SEC_AUD_M_READBACK 0x42A8
+#define mmDP2_DP_SEC_AUD_N 0x42A5
+#define mmDP2_DP_SEC_AUD_N_READBACK 0x42A6
+#define mmDP2_DP_SEC_CNTL 0x42A0
+#define mmDP2_DP_SEC_CNTL1 0x42AB
+#define mmDP2_DP_SEC_FRAMING1 0x42A1
+#define mmDP2_DP_SEC_FRAMING2 0x42A2
+#define mmDP2_DP_SEC_FRAMING3 0x42A3
+#define mmDP2_DP_SEC_FRAMING4 0x42A4
+#define mmDP2_DP_SEC_PACKET_CNTL 0x42AA
+#define mmDP2_DP_SEC_TIMESTAMP 0x42A9
+#define mmDP2_DP_STEER_FIFO 0x42C4
+#define mmDP2_DP_TEST_DEBUG_DATA 0x42FD
+#define mmDP2_DP_TEST_DEBUG_INDEX 0x42FC
+#define mmDP2_DP_VID_INTERRUPT_CNTL 0x42CF
+#define mmDP2_DP_VID_M 0x42CB
+#define mmDP2_DP_VID_MSA_VBID 0x42CD
+#define mmDP2_DP_VID_N 0x42CA
+#define mmDP2_DP_VID_STREAM_CNTL 0x42C3
+#define mmDP2_DP_VID_TIMING 0x42C9
+#define mmDP3_DP_CONFIG 0x45C2
+#define mmDP3_DP_DPHY_8B10B_CNTL 0x45D3
+#define mmDP3_DP_DPHY_CNTL 0x45D0
+#define mmDP3_DP_DPHY_CRC_CNTL 0x45D7
+#define mmDP3_DP_DPHY_CRC_EN 0x45D6
+#define mmDP3_DP_DPHY_CRC_MST_CNTL 0x45C6
+#define mmDP3_DP_DPHY_CRC_MST_STATUS 0x45C7
+#define mmDP3_DP_DPHY_CRC_RESULT 0x45D8
+#define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE
+#define mmDP3_DP_DPHY_FAST_TRAINING_STATUS 0x45E9
+#define mmDP3_DP_DPHY_PRBS_CNTL 0x45D4
+#define mmDP3_DP_DPHY_SYM0 0x45D2
+#define mmDP3_DP_DPHY_SYM1 0x45E0
+#define mmDP3_DP_DPHY_SYM2 0x45DF
+#define mmDP3_DP_DPHY_TRAINING_PATTERN_SEL 0x45D1
+#define mmDP3_DP_HBR2_EYE_PATTERN 0x45C8
+#define mmDP3_DP_LINK_CNTL 0x45C0
+#define mmDP3_DP_LINK_FRAMING_CNTL 0x45CC
+#define mmDP3_DP_MSA_COLORIMETRY 0x45DA
+#define mmDP3_DP_MSA_MISC 0x45C5
+#define mmDP3_DP_MSA_V_TIMING_OVERRIDE1 0x45EA
+#define mmDP3_DP_MSA_V_TIMING_OVERRIDE2 0x45EB
+#define mmDP3_DP_MSE_LINK_TIMING 0x45E8
+#define mmDP3_DP_MSE_MISC_CNTL 0x45DB
+#define mmDP3_DP_MSE_RATE_CNTL 0x45E1
+#define mmDP3_DP_MSE_RATE_UPDATE 0x45E3
+#define mmDP3_DP_MSE_SAT0 0x45E4
+#define mmDP3_DP_MSE_SAT1 0x45E5
+#define mmDP3_DP_MSE_SAT2 0x45E6
+#define mmDP3_DP_MSE_SAT_UPDATE 0x45E7
+#define mmDP3_DP_PIXEL_FORMAT 0x45C1
+#define mmDP3_DP_SEC_AUD_M 0x45A7
+#define mmDP3_DP_SEC_AUD_M_READBACK 0x45A8
+#define mmDP3_DP_SEC_AUD_N 0x45A5
+#define mmDP3_DP_SEC_AUD_N_READBACK 0x45A6
+#define mmDP3_DP_SEC_CNTL 0x45A0
+#define mmDP3_DP_SEC_CNTL1 0x45AB
+#define mmDP3_DP_SEC_FRAMING1 0x45A1
+#define mmDP3_DP_SEC_FRAMING2 0x45A2
+#define mmDP3_DP_SEC_FRAMING3 0x45A3
+#define mmDP3_DP_SEC_FRAMING4 0x45A4
+#define mmDP3_DP_SEC_PACKET_CNTL 0x45AA
+#define mmDP3_DP_SEC_TIMESTAMP 0x45A9
+#define mmDP3_DP_STEER_FIFO 0x45C4
+#define mmDP3_DP_TEST_DEBUG_DATA 0x45FD
+#define mmDP3_DP_TEST_DEBUG_INDEX 0x45FC
+#define mmDP3_DP_VID_INTERRUPT_CNTL 0x45CF
+#define mmDP3_DP_VID_M 0x45CB
+#define mmDP3_DP_VID_MSA_VBID 0x45CD
+#define mmDP3_DP_VID_N 0x45CA
+#define mmDP3_DP_VID_STREAM_CNTL 0x45C3
+#define mmDP3_DP_VID_TIMING 0x45C9
+#define mmDP4_DP_CONFIG 0x48C2
+#define mmDP4_DP_DPHY_8B10B_CNTL 0x48D3
+#define mmDP4_DP_DPHY_CNTL 0x48D0
+#define mmDP4_DP_DPHY_CRC_CNTL 0x48D7
+#define mmDP4_DP_DPHY_CRC_EN 0x48D6
+#define mmDP4_DP_DPHY_CRC_MST_CNTL 0x48C6
+#define mmDP4_DP_DPHY_CRC_MST_STATUS 0x48C7
+#define mmDP4_DP_DPHY_CRC_RESULT 0x48D8
+#define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE
+#define mmDP4_DP_DPHY_FAST_TRAINING_STATUS 0x48E9
+#define mmDP4_DP_DPHY_PRBS_CNTL 0x48D4
+#define mmDP4_DP_DPHY_SYM0 0x48D2
+#define mmDP4_DP_DPHY_SYM1 0x48E0
+#define mmDP4_DP_DPHY_SYM2 0x48DF
+#define mmDP4_DP_DPHY_TRAINING_PATTERN_SEL 0x48D1
+#define mmDP4_DP_HBR2_EYE_PATTERN 0x48C8
+#define mmDP4_DP_LINK_CNTL 0x48C0
+#define mmDP4_DP_LINK_FRAMING_CNTL 0x48CC
+#define mmDP4_DP_MSA_COLORIMETRY 0x48DA
+#define mmDP4_DP_MSA_MISC 0x48C5
+#define mmDP4_DP_MSA_V_TIMING_OVERRIDE1 0x48EA
+#define mmDP4_DP_MSA_V_TIMING_OVERRIDE2 0x48EB
+#define mmDP4_DP_MSE_LINK_TIMING 0x48E8
+#define mmDP4_DP_MSE_MISC_CNTL 0x48DB
+#define mmDP4_DP_MSE_RATE_CNTL 0x48E1
+#define mmDP4_DP_MSE_RATE_UPDATE 0x48E3
+#define mmDP4_DP_MSE_SAT0 0x48E4
+#define mmDP4_DP_MSE_SAT1 0x48E5
+#define mmDP4_DP_MSE_SAT2 0x48E6
+#define mmDP4_DP_MSE_SAT_UPDATE 0x48E7
+#define mmDP4_DP_PIXEL_FORMAT 0x48C1
+#define mmDP4_DP_SEC_AUD_M 0x48A7
+#define mmDP4_DP_SEC_AUD_M_READBACK 0x48A8
+#define mmDP4_DP_SEC_AUD_N 0x48A5
+#define mmDP4_DP_SEC_AUD_N_READBACK 0x48A6
+#define mmDP4_DP_SEC_CNTL 0x48A0
+#define mmDP4_DP_SEC_CNTL1 0x48AB
+#define mmDP4_DP_SEC_FRAMING1 0x48A1
+#define mmDP4_DP_SEC_FRAMING2 0x48A2
+#define mmDP4_DP_SEC_FRAMING3 0x48A3
+#define mmDP4_DP_SEC_FRAMING4 0x48A4
+#define mmDP4_DP_SEC_PACKET_CNTL 0x48AA
+#define mmDP4_DP_SEC_TIMESTAMP 0x48A9
+#define mmDP4_DP_STEER_FIFO 0x48C4
+#define mmDP4_DP_TEST_DEBUG_DATA 0x48FD
+#define mmDP4_DP_TEST_DEBUG_INDEX 0x48FC
+#define mmDP4_DP_VID_INTERRUPT_CNTL 0x48CF
+#define mmDP4_DP_VID_M 0x48CB
+#define mmDP4_DP_VID_MSA_VBID 0x48CD
+#define mmDP4_DP_VID_N 0x48CA
+#define mmDP4_DP_VID_STREAM_CNTL 0x48C3
+#define mmDP4_DP_VID_TIMING 0x48C9
+#define mmDP5_DP_CONFIG 0x4BC2
+#define mmDP5_DP_DPHY_8B10B_CNTL 0x4BD3
+#define mmDP5_DP_DPHY_CNTL 0x4BD0
+#define mmDP5_DP_DPHY_CRC_CNTL 0x4BD7
+#define mmDP5_DP_DPHY_CRC_EN 0x4BD6
+#define mmDP5_DP_DPHY_CRC_MST_CNTL 0x4BC6
+#define mmDP5_DP_DPHY_CRC_MST_STATUS 0x4BC7
+#define mmDP5_DP_DPHY_CRC_RESULT 0x4BD8
+#define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE
+#define mmDP5_DP_DPHY_FAST_TRAINING_STATUS 0x4BE9
+#define mmDP5_DP_DPHY_PRBS_CNTL 0x4BD4
+#define mmDP5_DP_DPHY_SYM0 0x4BD2
+#define mmDP5_DP_DPHY_SYM1 0x4BE0
+#define mmDP5_DP_DPHY_SYM2 0x4BDF
+#define mmDP5_DP_DPHY_TRAINING_PATTERN_SEL 0x4BD1
+#define mmDP5_DP_HBR2_EYE_PATTERN 0x4BC8
+#define mmDP5_DP_LINK_CNTL 0x4BC0
+#define mmDP5_DP_LINK_FRAMING_CNTL 0x4BCC
+#define mmDP5_DP_MSA_COLORIMETRY 0x4BDA
+#define mmDP5_DP_MSA_MISC 0x4BC5
+#define mmDP5_DP_MSA_V_TIMING_OVERRIDE1 0x4BEA
+#define mmDP5_DP_MSA_V_TIMING_OVERRIDE2 0x4BEB
+#define mmDP5_DP_MSE_LINK_TIMING 0x4BE8
+#define mmDP5_DP_MSE_MISC_CNTL 0x4BDB
+#define mmDP5_DP_MSE_RATE_CNTL 0x4BE1
+#define mmDP5_DP_MSE_RATE_UPDATE 0x4BE3
+#define mmDP5_DP_MSE_SAT0 0x4BE4
+#define mmDP5_DP_MSE_SAT1 0x4BE5
+#define mmDP5_DP_MSE_SAT2 0x4BE6
+#define mmDP5_DP_MSE_SAT_UPDATE 0x4BE7
+#define mmDP5_DP_PIXEL_FORMAT 0x4BC1
+#define mmDP5_DP_SEC_AUD_M 0x4BA7
+#define mmDP5_DP_SEC_AUD_M_READBACK 0x4BA8
+#define mmDP5_DP_SEC_AUD_N 0x4BA5
+#define mmDP5_DP_SEC_AUD_N_READBACK 0x4BA6
+#define mmDP5_DP_SEC_CNTL 0x4BA0
+#define mmDP5_DP_SEC_CNTL1 0x4BAB
+#define mmDP5_DP_SEC_FRAMING1 0x4BA1
+#define mmDP5_DP_SEC_FRAMING2 0x4BA2
+#define mmDP5_DP_SEC_FRAMING3 0x4BA3
+#define mmDP5_DP_SEC_FRAMING4 0x4BA4
+#define mmDP5_DP_SEC_PACKET_CNTL 0x4BAA
+#define mmDP5_DP_SEC_TIMESTAMP 0x4BA9
+#define mmDP5_DP_STEER_FIFO 0x4BC4
+#define mmDP5_DP_TEST_DEBUG_DATA 0x4BFD
+#define mmDP5_DP_TEST_DEBUG_INDEX 0x4BFC
+#define mmDP5_DP_VID_INTERRUPT_CNTL 0x4BCF
+#define mmDP5_DP_VID_M 0x4BCB
+#define mmDP5_DP_VID_MSA_VBID 0x4BCD
+#define mmDP5_DP_VID_N 0x4BCA
+#define mmDP5_DP_VID_STREAM_CNTL 0x4BC3
+#define mmDP5_DP_VID_TIMING 0x4BC9
+#define mmDP_AUX0_AUX_ARB_CONTROL 0x1882
+#define mmDP_AUX0_AUX_CONTROL 0x1880
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0 0x188A
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1 0x188B
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS 0x188D
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL 0x1889
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x1888
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS 0x188C
+#define mmDP_AUX0_AUX_GTC_SYNC_CONTROL 0x188E
+#define mmDP_AUX0_AUX_GTC_SYNC_DATA 0x1890
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL 0x1883
+#define mmDP_AUX0_AUX_LS_DATA 0x1887
+#define mmDP_AUX0_AUX_LS_STATUS 0x1885
+#define mmDP_AUX0_AUX_SW_CONTROL 0x1881
+#define mmDP_AUX0_AUX_SW_DATA 0x1886
+#define mmDP_AUX0_AUX_SW_STATUS 0x1884
+#define mmDP_AUX1_AUX_ARB_CONTROL 0x1896
+#define mmDP_AUX1_AUX_CONTROL 0x1894
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0 0x189E
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1 0x189F
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS 0x18A1
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL 0x189D
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x189C
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS 0x18A0
+#define mmDP_AUX1_AUX_GTC_SYNC_CONTROL 0x18A2
+#define mmDP_AUX1_AUX_GTC_SYNC_DATA 0x18A4
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL 0x1897
+#define mmDP_AUX1_AUX_LS_DATA 0x189B
+#define mmDP_AUX1_AUX_LS_STATUS 0x1899
+#define mmDP_AUX1_AUX_SW_CONTROL 0x1895
+#define mmDP_AUX1_AUX_SW_DATA 0x189A
+#define mmDP_AUX1_AUX_SW_STATUS 0x1898
+#define mmDP_AUX2_AUX_ARB_CONTROL 0x18AA
+#define mmDP_AUX2_AUX_CONTROL 0x18A8
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL0 0x18B2
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL1 0x18B3
+#define mmDP_AUX2_AUX_DPHY_RX_STATUS 0x18B5
+#define mmDP_AUX2_AUX_DPHY_TX_CONTROL 0x18B1
+#define mmDP_AUX2_AUX_DPHY_TX_REF_CONTROL 0x18B0
+#define mmDP_AUX2_AUX_DPHY_TX_STATUS 0x18B4
+#define mmDP_AUX2_AUX_GTC_SYNC_CONTROL 0x18B6
+#define mmDP_AUX2_AUX_GTC_SYNC_DATA 0x18B8
+#define mmDP_AUX2_AUX_INTERRUPT_CONTROL 0x18AB
+#define mmDP_AUX2_AUX_LS_DATA 0x18AF
+#define mmDP_AUX2_AUX_LS_STATUS 0x18AD
+#define mmDP_AUX2_AUX_SW_CONTROL 0x18A9
+#define mmDP_AUX2_AUX_SW_DATA 0x18AE
+#define mmDP_AUX2_AUX_SW_STATUS 0x18AC
+#define mmDP_AUX3_AUX_ARB_CONTROL 0x18C2
+#define mmDP_AUX3_AUX_CONTROL 0x18C0
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL0 0x18CA
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL1 0x18CB
+#define mmDP_AUX3_AUX_DPHY_RX_STATUS 0x18CD
+#define mmDP_AUX3_AUX_DPHY_TX_CONTROL 0x18C9
+#define mmDP_AUX3_AUX_DPHY_TX_REF_CONTROL 0x18C8
+#define mmDP_AUX3_AUX_DPHY_TX_STATUS 0x18CC
+#define mmDP_AUX3_AUX_GTC_SYNC_CONTROL 0x18CE
+#define mmDP_AUX3_AUX_GTC_SYNC_DATA 0x18D0
+#define mmDP_AUX3_AUX_INTERRUPT_CONTROL 0x18C3
+#define mmDP_AUX3_AUX_LS_DATA 0x18C7
+#define mmDP_AUX3_AUX_LS_STATUS 0x18C5
+#define mmDP_AUX3_AUX_SW_CONTROL 0x18C1
+#define mmDP_AUX3_AUX_SW_DATA 0x18C6
+#define mmDP_AUX3_AUX_SW_STATUS 0x18C4
+#define mmDP_AUX4_AUX_ARB_CONTROL 0x18D6
+#define mmDP_AUX4_AUX_CONTROL 0x18D4
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL0 0x18DE
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL1 0x18DF
+#define mmDP_AUX4_AUX_DPHY_RX_STATUS 0x18E1
+#define mmDP_AUX4_AUX_DPHY_TX_CONTROL 0x18DD
+#define mmDP_AUX4_AUX_DPHY_TX_REF_CONTROL 0x18DC
+#define mmDP_AUX4_AUX_DPHY_TX_STATUS 0x18E0
+#define mmDP_AUX4_AUX_GTC_SYNC_CONTROL 0x18E2
+#define mmDP_AUX4_AUX_GTC_SYNC_DATA 0x18E4
+#define mmDP_AUX4_AUX_INTERRUPT_CONTROL 0x18D7
+#define mmDP_AUX4_AUX_LS_DATA 0x18DB
+#define mmDP_AUX4_AUX_LS_STATUS 0x18D9
+#define mmDP_AUX4_AUX_SW_CONTROL 0x18D5
+#define mmDP_AUX4_AUX_SW_DATA 0x18DA
+#define mmDP_AUX4_AUX_SW_STATUS 0x18D8
+#define mmDP_AUX5_AUX_ARB_CONTROL 0x18EA
+#define mmDP_AUX5_AUX_CONTROL 0x18E8
+#define mmDP_AUX5_AUX_DPHY_RX_CONTROL0 0x18F2
+#define mmDP_AUX5_AUX_DPHY_RX_CONTROL1 0x18F3
+#define mmDP_AUX5_AUX_DPHY_RX_STATUS 0x18F5
+#define mmDP_AUX5_AUX_DPHY_TX_CONTROL 0x18F1
+#define mmDP_AUX5_AUX_DPHY_TX_REF_CONTROL 0x18F0
+#define mmDP_AUX5_AUX_DPHY_TX_STATUS 0x18F4
+#define mmDP_AUX5_AUX_GTC_SYNC_CONTROL 0x18F6
+#define mmDP_AUX5_AUX_GTC_SYNC_DATA 0x18F8
+#define mmDP_AUX5_AUX_INTERRUPT_CONTROL 0x18EB
+#define mmDP_AUX5_AUX_LS_DATA 0x18EF
+#define mmDP_AUX5_AUX_LS_STATUS 0x18ED
+#define mmDP_AUX5_AUX_SW_CONTROL 0x18E9
+#define mmDP_AUX5_AUX_SW_DATA 0x18EE
+#define mmDP_AUX5_AUX_SW_STATUS 0x18EC
+#define mmDP_CONFIG 0x1CC2
+#define mmDP_DPHY_8B10B_CNTL 0x1CD3
+#define mmDP_DPHY_CNTL 0x1CD0
+#define mmDP_DPHY_CRC_CNTL 0x1CD7
+#define mmDP_DPHY_CRC_EN 0x1CD6
+#define mmDP_DPHY_CRC_MST_CNTL 0x1CC6
+#define mmDP_DPHY_CRC_MST_STATUS 0x1CC7
+#define mmDP_DPHY_CRC_RESULT 0x1CD8
+#define mmDP_DPHY_FAST_TRAINING 0x1CCE
+#define mmDP_DPHY_FAST_TRAINING_STATUS 0x1CE9
+#define mmDP_DPHY_PRBS_CNTL 0x1CD4
+#define mmDP_DPHY_SYM0 0x1CD2
+#define mmDP_DPHY_SYM1 0x1CE0
+#define mmDP_DPHY_SYM2 0x1CDF
+#define mmDP_DPHY_TRAINING_PATTERN_SEL 0x1CD1
+#define mmDP_DTO0_MODULO 0x0142
+#define mmDP_DTO0_PHASE 0x0141
+#define mmDP_DTO1_MODULO 0x0146
+#define mmDP_DTO1_PHASE 0x0145
+#define mmDP_DTO2_MODULO 0x014A
+#define mmDP_DTO2_PHASE 0x0149
+#define mmDP_DTO3_MODULO 0x014E
+#define mmDP_DTO3_PHASE 0x014D
+#define mmDP_DTO4_MODULO 0x0152
+#define mmDP_DTO4_PHASE 0x0151
+#define mmDP_DTO5_MODULO 0x0156
+#define mmDP_DTO5_PHASE 0x0155
+#define mmDPG_PIPE_ARBITRATION_CONTROL1 0x1B30
+#define mmDPG_PIPE_ARBITRATION_CONTROL2 0x1B31
+#define mmDPG_PIPE_DPM_CONTROL 0x1B34
+#define mmDPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1B36
+#define mmDPG_PIPE_STUTTER_CONTROL 0x1B35
+#define mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1B37
+#define mmDPG_PIPE_URGENCY_CONTROL 0x1B33
+#define mmDPG_TEST_DEBUG_DATA 0x1B39
+#define mmDPG_TEST_DEBUG_INDEX 0x1B38
+#define mmDP_HBR2_EYE_PATTERN 0x1CC8
+#define mmDP_LINK_CNTL 0x1CC0
+#define mmDP_LINK_FRAMING_CNTL 0x1CCC
+#define mmDP_MSA_COLORIMETRY 0x1CDA
+#define mmDP_MSA_MISC 0x1CC5
+#define mmDP_MSA_V_TIMING_OVERRIDE1 0x1CEA
+#define mmDP_MSA_V_TIMING_OVERRIDE2 0x1CEB
+#define mmDP_MSE_LINK_TIMING 0x1CE8
+#define mmDP_MSE_MISC_CNTL 0x1CDB
+#define mmDP_MSE_RATE_CNTL 0x1CE1
+#define mmDP_MSE_RATE_UPDATE 0x1CE3
+#define mmDP_MSE_SAT0 0x1CE4
+#define mmDP_MSE_SAT1 0x1CE5
+#define mmDP_MSE_SAT2 0x1CE6
+#define mmDP_MSE_SAT_UPDATE 0x1CE7
+#define mmDP_PIXEL_FORMAT 0x1CC1
+#define mmDP_SEC_AUD_M 0x1CA7
+#define mmDP_SEC_AUD_M_READBACK 0x1CA8
+#define mmDP_SEC_AUD_N 0x1CA5
+#define mmDP_SEC_AUD_N_READBACK 0x1CA6
+#define mmDP_SEC_CNTL 0x1CA0
+#define mmDP_SEC_CNTL1 0x1CAB
+#define mmDP_SEC_FRAMING1 0x1CA1
+#define mmDP_SEC_FRAMING2 0x1CA2
+#define mmDP_SEC_FRAMING3 0x1CA3
+#define mmDP_SEC_FRAMING4 0x1CA4
+#define mmDP_SEC_PACKET_CNTL 0x1CAA
+#define mmDP_SEC_TIMESTAMP 0x1CA9
+#define mmDP_STEER_FIFO 0x1CC4
+#define mmDP_TEST_DEBUG_DATA 0x1CFD
+#define mmDP_TEST_DEBUG_INDEX 0x1CFC
+#define mmDP_VID_INTERRUPT_CNTL 0x1CCF
+#define mmDP_VID_M 0x1CCB
+#define mmDP_VID_MSA_VBID 0x1CCD
+#define mmDP_VID_N 0x1CCA
+#define mmDP_VID_STREAM_CNTL 0x1CC3
+#define mmDP_VID_TIMING 0x1CC9
+#define mmDVOACLKC_CNTL 0x016A
+#define mmDVOACLKC_MVP_CNTL 0x0169
+#define mmDVOACLKD_CNTL 0x0168
+#define mmDVO_CLK_ENABLE 0x0129
+#define mmDVO_CONTROL 0x185B
+#define mmDVO_CRC2_SIG_MASK 0x185D
+#define mmDVO_CRC2_SIG_RESULT 0x185E
+#define mmDVO_CRC_EN 0x185C
+#define mmDVO_ENABLE 0x1858
+#define mmDVO_FIFO_ERROR_STATUS 0x185F
+#define mmDVO_OUTPUT 0x185A
+#define mmDVO_SKEW_ADJUST 0x197D
+#define mmDVO_SOURCE_SELECT 0x1859
+#define mmDVO_STRENGTH_CONTROL 0x197B
+#define mmDVO_VREF_CONTROL 0x197C
+#define mmEXT_OVERSCAN_LEFT_RIGHT 0x1B5E
+#define mmEXT_OVERSCAN_TOP_BOTTOM 0x1B5F
+#define mmFBC_CLIENT_REGION_MASK 0x16EB
+#define mmFBC_CNTL 0x16D0
+#define mmFBC_COMP_CNTL 0x16D4
+#define mmFBC_COMP_MODE 0x16D5
+#define mmFBC_CSM_REGION_OFFSET_01 0x16E9
+#define mmFBC_CSM_REGION_OFFSET_23 0x16EA
+#define mmFBC_DEBUG0 0x16D6
+#define mmFBC_DEBUG1 0x16D7
+#define mmFBC_DEBUG2 0x16D8
+#define mmFBC_DEBUG_COMP 0x16EC
+#define mmFBC_DEBUG_CSR 0x16ED
+#define mmFBC_DEBUG_CSR_RDATA 0x16EE
+#define mmFBC_DEBUG_CSR_RDATA_HI 0x16F6
+#define mmFBC_DEBUG_CSR_WDATA 0x16EF
+#define mmFBC_DEBUG_CSR_WDATA_HI 0x16F7
+#define mmFBC_IDLE_FORCE_CLEAR_MASK 0x16D2
+#define mmFBC_IDLE_MASK 0x16D1
+#define mmFBC_IND_LUT0 0x16D9
+#define mmFBC_IND_LUT10 0x16E3
+#define mmFBC_IND_LUT1 0x16DA
+#define mmFBC_IND_LUT11 0x16E4
+#define mmFBC_IND_LUT12 0x16E5
+#define mmFBC_IND_LUT13 0x16E6
+#define mmFBC_IND_LUT14 0x16E7
+#define mmFBC_IND_LUT15 0x16E8
+#define mmFBC_IND_LUT2 0x16DB
+#define mmFBC_IND_LUT3 0x16DC
+#define mmFBC_IND_LUT4 0x16DD
+#define mmFBC_IND_LUT5 0x16DE
+#define mmFBC_IND_LUT6 0x16DF
+#define mmFBC_IND_LUT7 0x16E0
+#define mmFBC_IND_LUT8 0x16E1
+#define mmFBC_IND_LUT9 0x16E2
+#define mmFBC_MISC 0x16F0
+#define mmFBC_START_STOP_DELAY 0x16D3
+#define mmFBC_STATUS 0x16F1
+#define mmFBC_TEST_DEBUG_DATA 0x16F5
+#define mmFBC_TEST_DEBUG_INDEX 0x16F4
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL 0x1BF2
+#define mmFMT0_FMT_CLAMP_CNTL 0x1BF9
+#define mmFMT0_FMT_CONTROL 0x1BEE
+#define mmFMT0_FMT_CRC_CNTL 0x1BFA
+#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL 0x1BFE
+#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x1BFC
+#define mmFMT0_FMT_CRC_SIG_RED_GREEN 0x1BFD
+#define mmFMT0_FMT_CRC_SIG_RED_GREEN_MASK 0x1BFB
+#define mmFMT0_FMT_DEBUG_CNTL 0x1BFF
+#define mmFMT0_FMT_DITHER_RAND_B_SEED 0x1BF5
+#define mmFMT0_FMT_DITHER_RAND_G_SEED 0x1BF4
+#define mmFMT0_FMT_DITHER_RAND_R_SEED 0x1BF3
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL 0x1BED
+#define mmFMT0_FMT_FORCE_DATA_0_1 0x1BF0
+#define mmFMT0_FMT_FORCE_DATA_2_3 0x1BF1
+#define mmFMT0_FMT_FORCE_OUTPUT_CNTL 0x1BEF
+#define mmFMT0_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1BF6
+#define mmFMT0_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1BF7
+#define mmFMT0_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1BF8
+#define mmFMT0_FMT_TEST_DEBUG_DATA 0x1BEC
+#define mmFMT0_FMT_TEST_DEBUG_INDEX 0x1BEB
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL 0x1EF2
+#define mmFMT1_FMT_CLAMP_CNTL 0x1EF9
+#define mmFMT1_FMT_CONTROL 0x1EEE
+#define mmFMT1_FMT_CRC_CNTL 0x1EFA
+#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL 0x1EFE
+#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x1EFC
+#define mmFMT1_FMT_CRC_SIG_RED_GREEN 0x1EFD
+#define mmFMT1_FMT_CRC_SIG_RED_GREEN_MASK 0x1EFB
+#define mmFMT1_FMT_DEBUG_CNTL 0x1EFF
+#define mmFMT1_FMT_DITHER_RAND_B_SEED 0x1EF5
+#define mmFMT1_FMT_DITHER_RAND_G_SEED 0x1EF4
+#define mmFMT1_FMT_DITHER_RAND_R_SEED 0x1EF3
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL 0x1EED
+#define mmFMT1_FMT_FORCE_DATA_0_1 0x1EF0
+#define mmFMT1_FMT_FORCE_DATA_2_3 0x1EF1
+#define mmFMT1_FMT_FORCE_OUTPUT_CNTL 0x1EEF
+#define mmFMT1_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1EF6
+#define mmFMT1_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1EF7
+#define mmFMT1_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1EF8
+#define mmFMT1_FMT_TEST_DEBUG_DATA 0x1EEC
+#define mmFMT1_FMT_TEST_DEBUG_INDEX 0x1EEB
+#define mmFMT2_FMT_BIT_DEPTH_CONTROL 0x41F2
+#define mmFMT2_FMT_CLAMP_CNTL 0x41F9
+#define mmFMT2_FMT_CONTROL 0x41EE
+#define mmFMT2_FMT_CRC_CNTL 0x41FA
+#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL 0x41FE
+#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x41FC
+#define mmFMT2_FMT_CRC_SIG_RED_GREEN 0x41FD
+#define mmFMT2_FMT_CRC_SIG_RED_GREEN_MASK 0x41FB
+#define mmFMT2_FMT_DEBUG_CNTL 0x41FF
+#define mmFMT2_FMT_DITHER_RAND_B_SEED 0x41F5
+#define mmFMT2_FMT_DITHER_RAND_G_SEED 0x41F4
+#define mmFMT2_FMT_DITHER_RAND_R_SEED 0x41F3
+#define mmFMT2_FMT_DYNAMIC_EXP_CNTL 0x41ED
+#define mmFMT2_FMT_FORCE_DATA_0_1 0x41F0
+#define mmFMT2_FMT_FORCE_DATA_2_3 0x41F1
+#define mmFMT2_FMT_FORCE_OUTPUT_CNTL 0x41EF
+#define mmFMT2_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x41F6
+#define mmFMT2_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x41F7
+#define mmFMT2_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x41F8
+#define mmFMT2_FMT_TEST_DEBUG_DATA 0x41EC
+#define mmFMT2_FMT_TEST_DEBUG_INDEX 0x41EB
+#define mmFMT3_FMT_BIT_DEPTH_CONTROL 0x44F2
+#define mmFMT3_FMT_CLAMP_CNTL 0x44F9
+#define mmFMT3_FMT_CONTROL 0x44EE
+#define mmFMT3_FMT_CRC_CNTL 0x44FA
+#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL 0x44FE
+#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x44FC
+#define mmFMT3_FMT_CRC_SIG_RED_GREEN 0x44FD
+#define mmFMT3_FMT_CRC_SIG_RED_GREEN_MASK 0x44FB
+#define mmFMT3_FMT_DEBUG_CNTL 0x44FF
+#define mmFMT3_FMT_DITHER_RAND_B_SEED 0x44F5
+#define mmFMT3_FMT_DITHER_RAND_G_SEED 0x44F4
+#define mmFMT3_FMT_DITHER_RAND_R_SEED 0x44F3
+#define mmFMT3_FMT_DYNAMIC_EXP_CNTL 0x44ED
+#define mmFMT3_FMT_FORCE_DATA_0_1 0x44F0
+#define mmFMT3_FMT_FORCE_DATA_2_3 0x44F1
+#define mmFMT3_FMT_FORCE_OUTPUT_CNTL 0x44EF
+#define mmFMT3_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x44F6
+#define mmFMT3_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x44F7
+#define mmFMT3_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x44F8
+#define mmFMT3_FMT_TEST_DEBUG_DATA 0x44EC
+#define mmFMT3_FMT_TEST_DEBUG_INDEX 0x44EB
+#define mmFMT4_FMT_BIT_DEPTH_CONTROL 0x47F2
+#define mmFMT4_FMT_CLAMP_CNTL 0x47F9
+#define mmFMT4_FMT_CONTROL 0x47EE
+#define mmFMT4_FMT_CRC_CNTL 0x47FA
+#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL 0x47FE
+#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x47FC
+#define mmFMT4_FMT_CRC_SIG_RED_GREEN 0x47FD
+#define mmFMT4_FMT_CRC_SIG_RED_GREEN_MASK 0x47FB
+#define mmFMT4_FMT_DEBUG_CNTL 0x47FF
+#define mmFMT4_FMT_DITHER_RAND_B_SEED 0x47F5
+#define mmFMT4_FMT_DITHER_RAND_G_SEED 0x47F4
+#define mmFMT4_FMT_DITHER_RAND_R_SEED 0x47F3
+#define mmFMT4_FMT_DYNAMIC_EXP_CNTL 0x47ED
+#define mmFMT4_FMT_FORCE_DATA_0_1 0x47F0
+#define mmFMT4_FMT_FORCE_DATA_2_3 0x47F1
+#define mmFMT4_FMT_FORCE_OUTPUT_CNTL 0x47EF
+#define mmFMT4_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x47F6
+#define mmFMT4_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x47F7
+#define mmFMT4_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x47F8
+#define mmFMT4_FMT_TEST_DEBUG_DATA 0x47EC
+#define mmFMT4_FMT_TEST_DEBUG_INDEX 0x47EB
+#define mmFMT5_FMT_BIT_DEPTH_CONTROL 0x4AF2
+#define mmFMT5_FMT_CLAMP_CNTL 0x4AF9
+#define mmFMT5_FMT_CONTROL 0x4AEE
+#define mmFMT5_FMT_CRC_CNTL 0x4AFA
+#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL 0x4AFE
+#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x4AFC
+#define mmFMT5_FMT_CRC_SIG_RED_GREEN 0x4AFD
+#define mmFMT5_FMT_CRC_SIG_RED_GREEN_MASK 0x4AFB
+#define mmFMT5_FMT_DEBUG_CNTL 0x4AFF
+#define mmFMT5_FMT_DITHER_RAND_B_SEED 0x4AF5
+#define mmFMT5_FMT_DITHER_RAND_G_SEED 0x4AF4
+#define mmFMT5_FMT_DITHER_RAND_R_SEED 0x4AF3
+#define mmFMT5_FMT_DYNAMIC_EXP_CNTL 0x4AED
+#define mmFMT5_FMT_FORCE_DATA_0_1 0x4AF0
+#define mmFMT5_FMT_FORCE_DATA_2_3 0x4AF1
+#define mmFMT5_FMT_FORCE_OUTPUT_CNTL 0x4AEF
+#define mmFMT5_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x4AF6
+#define mmFMT5_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x4AF7
+#define mmFMT5_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x4AF8
+#define mmFMT5_FMT_TEST_DEBUG_DATA 0x4AEC
+#define mmFMT5_FMT_TEST_DEBUG_INDEX 0x4AEB
+#define mmFMT_BIT_DEPTH_CONTROL 0x1BF2
+#define mmFMT_CLAMP_CNTL 0x1BF9
+#define mmFMT_CONTROL 0x1BEE
+#define mmFMT_CRC_CNTL 0x1BFA
+#define mmFMT_CRC_SIG_BLUE_CONTROL 0x1BFE
+#define mmFMT_CRC_SIG_BLUE_CONTROL_MASK 0x1BFC
+#define mmFMT_CRC_SIG_RED_GREEN 0x1BFD
+#define mmFMT_CRC_SIG_RED_GREEN_MASK 0x1BFB
+#define mmFMT_DEBUG_CNTL 0x1BFF
+#define mmFMT_DITHER_RAND_B_SEED 0x1BF5
+#define mmFMT_DITHER_RAND_G_SEED 0x1BF4
+#define mmFMT_DITHER_RAND_R_SEED 0x1BF3
+#define mmFMT_DYNAMIC_EXP_CNTL 0x1BED
+#define mmFMT_FORCE_DATA_0_1 0x1BF0
+#define mmFMT_FORCE_DATA_2_3 0x1BF1
+#define mmFMT_FORCE_OUTPUT_CNTL 0x1BEF
+#define mmFMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1BF6
+#define mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1BF7
+#define mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1BF8
+#define mmFMT_TEST_DEBUG_DATA 0x1BEC
+#define mmFMT_TEST_DEBUG_INDEX 0x1BEB
+#define mmGAMUT_REMAP_C11_C12 0x1A5A
+#define mmGAMUT_REMAP_C13_C14 0x1A5B
+#define mmGAMUT_REMAP_C21_C22 0x1A5C
+#define mmGAMUT_REMAP_C23_C24 0x1A5D
+#define mmGAMUT_REMAP_C31_C32 0x1A5E
+#define mmGAMUT_REMAP_C33_C34 0x1A5F
+#define mmGAMUT_REMAP_CONTROL 0x1A59
+#define mmGENENB 0x00F0
+#define mmGENERIC_I2C_CONTROL 0x1834
+#define mmGENERIC_I2C_DATA 0x183A
+#define mmGENERIC_I2C_INTERRUPT_CONTROL 0x1835
+#define mmGENERIC_I2C_PIN_DEBUG 0x183C
+#define mmGENERIC_I2C_PIN_SELECTION 0x183B
+#define mmGENERIC_I2C_SETUP 0x1838
+#define mmGENERIC_I2C_SPEED 0x1837
+#define mmGENERIC_I2C_STATUS 0x1836
+#define mmGENERIC_I2C_TRANSACTION 0x1839
+#define mmGENFC_RD 0x00F2
+#define mmGENFC_WT 0x00EE
+#define mmGENMO_RD 0x00F3
+#define mmGENMO_WT 0x00F0
+#define mmGENS0 0x00F0
+#define mmGENS1 0x00EE
+#define mmGRPH8_DATA 0x00F3
+#define mmGRPH8_IDX 0x00F3
+#define mmGRPH_COMPRESS_PITCH 0x1A1A
+#define mmGRPH_COMPRESS_SURFACE_ADDRESS 0x1A19
+#define mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1A1B
+#define mmGRPH_CONTROL 0x1A01
+#define mmGRPH_DFQ_CONTROL 0x1A14
+#define mmGRPH_DFQ_STATUS 0x1A15
+#define mmGRPH_ENABLE 0x1A00
+#define mmGRPH_FLIP_CONTROL 0x1A12
+#define mmGRPH_INTERRUPT_CONTROL 0x1A17
+#define mmGRPH_INTERRUPT_STATUS 0x1A16
+#define mmGRPH_LUT_10BIT_BYPASS 0x1A02
+#define mmGRPH_PITCH 0x1A06
+#define mmGRPH_PRIMARY_SURFACE_ADDRESS 0x1A04
+#define mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1A07
+#define mmGRPH_SECONDARY_SURFACE_ADDRESS 0x1A05
+#define mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1A08
+#define mmGRPH_STEREOSYNC_FLIP 0x1A97
+#define mmGRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1A18
+#define mmGRPH_SURFACE_ADDRESS_INUSE 0x1A13
+#define mmGRPH_SURFACE_OFFSET_X 0x1A09
+#define mmGRPH_SURFACE_OFFSET_Y 0x1A0A
+#define mmGRPH_SWAP_CNTL 0x1A03
+#define mmGRPH_UPDATE 0x1A11
+#define mmGRPH_X_END 0x1A0D
+#define mmGRPH_X_START 0x1A0B
+#define mmGRPH_Y_END 0x1A0E
+#define mmGRPH_Y_START 0x1A0C
+#define mmHDMI_ACR_32_0 0x1C37
+#define mmHDMI_ACR_32_1 0x1C38
+#define mmHDMI_ACR_44_0 0x1C39
+#define mmHDMI_ACR_44_1 0x1C3A
+#define mmHDMI_ACR_48_0 0x1C3B
+#define mmHDMI_ACR_48_1 0x1C3C
+#define mmHDMI_ACR_PACKET_CONTROL 0x1C0F
+#define mmHDMI_ACR_STATUS_0 0x1C3D
+#define mmHDMI_ACR_STATUS_1 0x1C3E
+#define mmHDMI_AUDIO_PACKET_CONTROL 0x1C0E
+#define mmHDMI_CONTROL 0x1C0C
+#define mmHDMI_GC 0x1C16
+#define mmHDMI_GENERIC_PACKET_CONTROL0 0x1C13
+#define mmHDMI_GENERIC_PACKET_CONTROL1 0x1C30
+#define mmHDMI_INFOFRAME_CONTROL0 0x1C11
+#define mmHDMI_INFOFRAME_CONTROL1 0x1C12
+#define mmHDMI_STATUS 0x1C0D
+#define mmHDMI_VBI_PACKET_CONTROL 0x1C10
+#define mmINPUT_CSC_C11_C12 0x1A36
+#define mmINPUT_CSC_C13_C14 0x1A37
+#define mmINPUT_CSC_C21_C22 0x1A38
+#define mmINPUT_CSC_C23_C24 0x1A39
+#define mmINPUT_CSC_C31_C32 0x1A3A
+#define mmINPUT_CSC_C33_C34 0x1A3B
+#define mmINPUT_CSC_CONTROL 0x1A35
+#define mmINPUT_GAMMA_CONTROL 0x1A10
+#define mmKEY_CONTROL 0x1A53
+#define mmKEY_RANGE_ALPHA 0x1A54
+#define mmKEY_RANGE_BLUE 0x1A57
+#define mmKEY_RANGE_GREEN 0x1A56
+#define mmKEY_RANGE_RED 0x1A55
+#define mmLB0_DC_MVP_LB_CONTROL 0x1ADB
+#define mmLB0_LB_DEBUG 0x1AFC
+#define mmLB0_LB_DEBUG2 0x1AC9
+#define mmLB0_LB_NO_OUTSTANDING_REQ_STATUS 0x1AC8
+#define mmLB0_LB_SYNC_RESET_SEL 0x1ACA
+#define mmLB0_LB_TEST_DEBUG_DATA 0x1AFF
+#define mmLB0_LB_TEST_DEBUG_INDEX 0x1AFE
+#define mmLB0_MVP_AFR_FLIP_FIFO_CNTL 0x1AD9
+#define mmLB0_MVP_AFR_FLIP_MODE 0x1AD8
+#define mmLB0_MVP_FLIP_LINE_NUM_INSERT 0x1ADA
+#define mmLB1_DC_MVP_LB_CONTROL 0x1DDB
+#define mmLB1_LB_DEBUG 0x1DFC
+#define mmLB1_LB_DEBUG2 0x1DC9
+#define mmLB1_LB_NO_OUTSTANDING_REQ_STATUS 0x1DC8
+#define mmLB1_LB_SYNC_RESET_SEL 0x1DCA
+#define mmLB1_LB_TEST_DEBUG_DATA 0x1DFF
+#define mmLB1_LB_TEST_DEBUG_INDEX 0x1DFE
+#define mmLB1_MVP_AFR_FLIP_FIFO_CNTL 0x1DD9
+#define mmLB1_MVP_AFR_FLIP_MODE 0x1DD8
+#define mmLB1_MVP_FLIP_LINE_NUM_INSERT 0x1DDA
+#define mmLB2_DC_MVP_LB_CONTROL 0x40DB
+#define mmLB2_LB_DEBUG 0x40FC
+#define mmLB2_LB_DEBUG2 0x40C9
+#define mmLB2_LB_NO_OUTSTANDING_REQ_STATUS 0x40C8
+#define mmLB2_LB_SYNC_RESET_SEL 0x40CA
+#define mmLB2_LB_TEST_DEBUG_DATA 0x40FF
+#define mmLB2_LB_TEST_DEBUG_INDEX 0x40FE
+#define mmLB2_MVP_AFR_FLIP_FIFO_CNTL 0x40D9
+#define mmLB2_MVP_AFR_FLIP_MODE 0x40D8
+#define mmLB2_MVP_FLIP_LINE_NUM_INSERT 0x40DA
+#define mmLB3_DC_MVP_LB_CONTROL 0x43DB
+#define mmLB3_LB_DEBUG 0x43FC
+#define mmLB3_LB_DEBUG2 0x43C9
+#define mmLB3_LB_NO_OUTSTANDING_REQ_STATUS 0x43C8
+#define mmLB3_LB_SYNC_RESET_SEL 0x43CA
+#define mmLB3_LB_TEST_DEBUG_DATA 0x43FF
+#define mmLB3_LB_TEST_DEBUG_INDEX 0x43FE
+#define mmLB3_MVP_AFR_FLIP_FIFO_CNTL 0x43D9
+#define mmLB3_MVP_AFR_FLIP_MODE 0x43D8
+#define mmLB3_MVP_FLIP_LINE_NUM_INSERT 0x43DA
+#define mmLB4_DC_MVP_LB_CONTROL 0x46DB
+#define mmLB4_LB_DEBUG 0x46FC
+#define mmLB4_LB_DEBUG2 0x46C9
+#define mmLB4_LB_NO_OUTSTANDING_REQ_STATUS 0x46C8
+#define mmLB4_LB_SYNC_RESET_SEL 0x46CA
+#define mmLB4_LB_TEST_DEBUG_DATA 0x46FF
+#define mmLB4_LB_TEST_DEBUG_INDEX 0x46FE
+#define mmLB4_MVP_AFR_FLIP_FIFO_CNTL 0x46D9
+#define mmLB4_MVP_AFR_FLIP_MODE 0x46D8
+#define mmLB4_MVP_FLIP_LINE_NUM_INSERT 0x46DA
+#define mmLB5_DC_MVP_LB_CONTROL 0x49DB
+#define mmLB5_LB_DEBUG 0x49FC
+#define mmLB5_LB_DEBUG2 0x49C9
+#define mmLB5_LB_NO_OUTSTANDING_REQ_STATUS 0x49C8
+#define mmLB5_LB_SYNC_RESET_SEL 0x49CA
+#define mmLB5_LB_TEST_DEBUG_DATA 0x49FF
+#define mmLB5_LB_TEST_DEBUG_INDEX 0x49FE
+#define mmLB5_MVP_AFR_FLIP_FIFO_CNTL 0x49D9
+#define mmLB5_MVP_AFR_FLIP_MODE 0x49D8
+#define mmLB5_MVP_FLIP_LINE_NUM_INSERT 0x49DA
+#define mmLB_DEBUG 0x1AFC
+#define mmLB_DEBUG2 0x1AC9
+#define mmLB_NO_OUTSTANDING_REQ_STATUS 0x1AC8
+#define mmLB_SYNC_RESET_SEL 0x1ACA
+#define mmLB_TEST_DEBUG_DATA 0x1AFF
+#define mmLB_TEST_DEBUG_INDEX 0x1AFE
+#define mmLIGHT_SLEEP_CNTL 0x0132
+#define mmLOW_POWER_TILING_CONTROL 0x0325
+#define mmLVDS_DATA_CNTL 0x1C8C
+#define mmLVTMA_PWRSEQ_CNTL 0x1919
+#define mmLVTMA_PWRSEQ_DELAY1 0x191C
+#define mmLVTMA_PWRSEQ_DELAY2 0x191D
+#define mmLVTMA_PWRSEQ_REF_DIV 0x191B
+#define mmLVTMA_PWRSEQ_STATE 0x191A
+#define mmMASTER_COMM_CMD_REG 0x161F
+#define mmMASTER_COMM_CNTL_REG 0x1620
+#define mmMASTER_COMM_DATA_REG1 0x161C
+#define mmMASTER_COMM_DATA_REG2 0x161D
+#define mmMASTER_COMM_DATA_REG3 0x161E
+#define mmMASTER_UPDATE_LOCK 0x1BBD
+#define mmMASTER_UPDATE_MODE 0x1BBE
+#define mmMC_DC_INTERFACE_NACK_STATUS 0x031C
+#define mmMCIF_CONTROL 0x0314
+#define mmMCIF_MEM_CONTROL 0x0319
+#define mmMCIF_TEST_DEBUG_DATA 0x0317
+#define mmMCIF_TEST_DEBUG_INDEX 0x0316
+#define mmMCIF_VMID 0x0318
+#define mmMCIF_WRITE_COMBINE_CONTROL 0x0315
+#define mmMICROSECOND_TIME_BASE_DIV 0x013B
+#define mmMILLISECOND_TIME_BASE_DIV 0x0130
+#define mmMVP_AFR_FLIP_FIFO_CNTL 0x1AD9
+#define mmMVP_AFR_FLIP_MODE 0x1AD8
+#define mmMVP_BLACK_KEYER 0x1686
+#define mmMVP_CONTROL1 0x1680
+#define mmMVP_CONTROL2 0x1681
+#define mmMVP_CONTROL3 0x168A
+#define mmMVP_CRC_CNTL 0x1687
+#define mmMVP_CRC_RESULT_BLUE_GREEN 0x1688
+#define mmMVP_CRC_RESULT_RED 0x1689
+#define mmMVP_DEBUG 0x168F
+#define mmMVP_FIFO_CONTROL 0x1682
+#define mmMVP_FIFO_STATUS 0x1683
+#define mmMVP_FLIP_LINE_NUM_INSERT 0x1ADA
+#define mmMVP_INBAND_CNTL_CAP 0x1685
+#define mmMVP_RECEIVE_CNT_CNTL1 0x168B
+#define mmMVP_RECEIVE_CNT_CNTL2 0x168C
+#define mmMVP_SLAVE_STATUS 0x1684
+#define mmMVP_TEST_DEBUG_DATA 0x168E
+#define mmMVP_TEST_DEBUG_INDEX 0x168D
+#define mmOUTPUT_CSC_C11_C12 0x1A3D
+#define mmOUTPUT_CSC_C13_C14 0x1A3E
+#define mmOUTPUT_CSC_C21_C22 0x1A3F
+#define mmOUTPUT_CSC_C23_C24 0x1A40
+#define mmOUTPUT_CSC_C31_C32 0x1A41
+#define mmOUTPUT_CSC_C33_C34 0x1A42
+#define mmOUTPUT_CSC_CONTROL 0x1A3C
+#define mmOUT_ROUND_CONTROL 0x1A51
+#define mmOVL_CONTROL1 0x1A1D
+#define mmOVL_CONTROL2 0x1A1E
+#define mmOVL_DFQ_CONTROL 0x1A29
+#define mmOVL_DFQ_STATUS 0x1A2A
+#define mmOVL_ENABLE 0x1A1C
+#define mmOVL_END 0x1A26
+#define mmOVL_PITCH 0x1A21
+#define mmOVLSCL_EDGE_PIXEL_CNTL 0x1A2C
+#define mmOVL_SECONDARY_SURFACE_ADDRESS 0x1A92
+#define mmOVL_SECONDARY_SURFACE_ADDRESS_HIGH 0x1A94
+#define mmOVL_START 0x1A25
+#define mmOVL_STEREOSYNC_FLIP 0x1A93
+#define mmOVL_SURFACE_ADDRESS 0x1A20
+#define mmOVL_SURFACE_ADDRESS_HIGH 0x1A22
+#define mmOVL_SURFACE_ADDRESS_HIGH_INUSE 0x1A2B
+#define mmOVL_SURFACE_ADDRESS_INUSE 0x1A28
+#define mmOVL_SURFACE_OFFSET_X 0x1A23
+#define mmOVL_SURFACE_OFFSET_Y 0x1A24
+#define mmOVL_SWAP_CNTL 0x1A1F
+#define mmOVL_UPDATE 0x1A27
+#define mmPHY_AUX_CNTL 0x197F
+#define mmPIPE0_ARBITRATION_CONTROL3 0x02FA
+#define mmPIPE0_DMIF_BUFFER_CONTROL 0x0328
+#define mmPIPE0_MAX_REQUESTS 0x0302
+#define mmPIPE0_PG_CONFIG 0x1760
+#define mmPIPE0_PG_ENABLE 0x1761
+#define mmPIPE0_PG_STATUS 0x1762
+#define mmPIPE1_ARBITRATION_CONTROL3 0x02FB
+#define mmPIPE1_DMIF_BUFFER_CONTROL 0x0330
+#define mmPIPE1_MAX_REQUESTS 0x0303
+#define mmPIPE1_PG_CONFIG 0x1764
+#define mmPIPE1_PG_ENABLE 0x1765
+#define mmPIPE1_PG_STATUS 0x1766
+#define mmPIPE2_ARBITRATION_CONTROL3 0x02FC
+#define mmPIPE2_DMIF_BUFFER_CONTROL 0x0338
+#define mmPIPE2_MAX_REQUESTS 0x0304
+#define mmPIPE2_PG_CONFIG 0x1768
+#define mmPIPE2_PG_ENABLE 0x1769
+#define mmPIPE2_PG_STATUS 0x176A
+#define mmPIPE3_ARBITRATION_CONTROL3 0x02FD
+#define mmPIPE3_DMIF_BUFFER_CONTROL 0x0340
+#define mmPIPE3_MAX_REQUESTS 0x0305
+#define mmPIPE3_PG_CONFIG 0x176C
+#define mmPIPE3_PG_ENABLE 0x176D
+#define mmPIPE3_PG_STATUS 0x176E
+#define mmPIPE4_ARBITRATION_CONTROL3 0x02FE
+#define mmPIPE4_DMIF_BUFFER_CONTROL 0x0348
+#define mmPIPE4_MAX_REQUESTS 0x0306
+#define mmPIPE4_PG_CONFIG 0x1770
+#define mmPIPE4_PG_ENABLE 0x1771
+#define mmPIPE4_PG_STATUS 0x1772
+#define mmPIPE5_ARBITRATION_CONTROL3 0x02FF
+#define mmPIPE5_DMIF_BUFFER_CONTROL 0x0350
+#define mmPIPE5_MAX_REQUESTS 0x0307
+#define mmPIPE5_PG_CONFIG 0x1774
+#define mmPIPE5_PG_ENABLE 0x1775
+#define mmPIPE5_PG_STATUS 0x1776
+#define mmPIXCLK0_RESYNC_CNTL 0x013A
+#define mmPIXCLK1_RESYNC_CNTL 0x0138
+#define mmPIXCLK2_RESYNC_CNTL 0x0139
+#define mmPLL_ANALOG 0x1708
+#define mmPLL_CNTL 0x1707
+#define mmPLL_DEBUG_CNTL 0x170B
+#define mmPLL_DISPCLK_CURRENT_DTO_PHASE 0x170F
+#define mmPLL_DISPCLK_DTO_CNTL 0x170E
+#define mmPLL_DS_CNTL 0x1705
+#define mmPLL_FB_DIV 0x1701
+#define mmPLL_IDCLK_CNTL 0x1706
+#define mmPLL_POST_DIV 0x1702
+#define mmPLL_REF_DIV 0x1700
+#define mmPLL_SS_AMOUNT_DSFRAC 0x1703
+#define mmPLL_SS_CNTL 0x1704
+#define mmPLL_UNLOCK_DETECT_CNTL 0x170A
+#define mmPLL_UPDATE_CNTL 0x170D
+#define mmPLL_UPDATE_LOCK 0x170C
+#define mmPLL_VREG_CNTL 0x1709
+#define mmPRESCALE_GRPH_CONTROL 0x1A2D
+#define mmPRESCALE_OVL_CONTROL 0x1A31
+#define mmPRESCALE_VALUES_GRPH_B 0x1A30
+#define mmPRESCALE_VALUES_GRPH_G 0x1A2F
+#define mmPRESCALE_VALUES_GRPH_R 0x1A2E
+#define mmPRESCALE_VALUES_OVL_CB 0x1A32
+#define mmPRESCALE_VALUES_OVL_CR 0x1A34
+#define mmPRESCALE_VALUES_OVL_Y 0x1A33
+#define mmREGAMMA_CNTLA_END_CNTL1 0x1AA6
+#define mmREGAMMA_CNTLA_END_CNTL2 0x1AA7
+#define mmREGAMMA_CNTLA_REGION_0_1 0x1AA8
+#define mmREGAMMA_CNTLA_REGION_10_11 0x1AAD
+#define mmREGAMMA_CNTLA_REGION_12_13 0x1AAE
+#define mmREGAMMA_CNTLA_REGION_14_15 0x1AAF
+#define mmREGAMMA_CNTLA_REGION_2_3 0x1AA9
+#define mmREGAMMA_CNTLA_REGION_4_5 0x1AAA
+#define mmREGAMMA_CNTLA_REGION_6_7 0x1AAB
+#define mmREGAMMA_CNTLA_REGION_8_9 0x1AAC
+#define mmREGAMMA_CNTLA_SLOPE_CNTL 0x1AA5
+#define mmREGAMMA_CNTLA_START_CNTL 0x1AA4
+#define mmREGAMMA_CNTLB_END_CNTL1 0x1AB2
+#define mmREGAMMA_CNTLB_END_CNTL2 0x1AB3
+#define mmREGAMMA_CNTLB_REGION_0_1 0x1AB4
+#define mmREGAMMA_CNTLB_REGION_10_11 0x1AB9
+#define mmREGAMMA_CNTLB_REGION_12_13 0x1ABA
+#define mmREGAMMA_CNTLB_REGION_14_15 0x1ABB
+#define mmREGAMMA_CNTLB_REGION_2_3 0x1AB5
+#define mmREGAMMA_CNTLB_REGION_4_5 0x1AB6
+#define mmREGAMMA_CNTLB_REGION_6_7 0x1AB7
+#define mmREGAMMA_CNTLB_REGION_8_9 0x1AB8
+#define mmREGAMMA_CNTLB_SLOPE_CNTL 0x1AB1
+#define mmREGAMMA_CNTLB_START_CNTL 0x1AB0
+#define mmREGAMMA_CONTROL 0x1AA0
+#define mmREGAMMA_LUT_DATA 0x1AA2
+#define mmREGAMMA_LUT_INDEX 0x1AA1
+#define mmREGAMMA_LUT_WRITE_EN_MASK 0x1AA3
+#define mmSCL0_EXT_OVERSCAN_LEFT_RIGHT 0x1B5E
+#define mmSCL0_EXT_OVERSCAN_TOP_BOTTOM 0x1B5F
+#define mmSCL0_SCL_ALU_CONTROL 0x1B54
+#define mmSCL0_SCL_AUTOMATIC_MODE_CONTROL 0x1B47
+#define mmSCL0_SCL_BYPASS_CONTROL 0x1B45
+#define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55
+#define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40
+#define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL0_SCL_CONTROL 0x1B44
+#define mmSCL0_SCL_DEBUG 0x1B6A
+#define mmSCL0_SCL_DEBUG2 0x1B69
+#define mmSCL0_SCL_F_SHARP_CONTROL 0x1B53
+#define mmSCL0_SCL_HORZ_FILTER_CONTROL 0x1B4A
+#define mmSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x1B4B
+#define mmSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x1B46
+#define mmSCL0_SCL_MODE_CHANGE_DET1 0x1B60
+#define mmSCL0_SCL_MODE_CHANGE_DET2 0x1B61
+#define mmSCL0_SCL_MODE_CHANGE_DET3 0x1B62
+#define mmSCL0_SCL_MODE_CHANGE_MASK 0x1B63
+#define mmSCL0_SCL_TAP_CONTROL 0x1B43
+#define mmSCL0_SCL_TEST_DEBUG_DATA 0x1B6C
+#define mmSCL0_SCL_TEST_DEBUG_INDEX 0x1B6B
+#define mmSCL0_SCL_UPDATE 0x1B51
+#define mmSCL0_SCL_VERT_FILTER_CONTROL 0x1B4E
+#define mmSCL0_SCL_VERT_FILTER_INIT 0x1B50
+#define mmSCL0_SCL_VERT_FILTER_INIT_BOT 0x1B57
+#define mmSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x1B4F
+#define mmSCL0_VIEWPORT_SIZE 0x1B5D
+#define mmSCL0_VIEWPORT_START 0x1B5C
+#define mmSCL1_EXT_OVERSCAN_LEFT_RIGHT 0x1E5E
+#define mmSCL1_EXT_OVERSCAN_TOP_BOTTOM 0x1E5F
+#define mmSCL1_SCL_ALU_CONTROL 0x1E54
+#define mmSCL1_SCL_AUTOMATIC_MODE_CONTROL 0x1E47
+#define mmSCL1_SCL_BYPASS_CONTROL 0x1E45
+#define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55
+#define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40
+#define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41
+#define mmSCL1_SCL_CONTROL 0x1E44
+#define mmSCL1_SCL_DEBUG 0x1E6A
+#define mmSCL1_SCL_DEBUG2 0x1E69
+#define mmSCL1_SCL_F_SHARP_CONTROL 0x1E53
+#define mmSCL1_SCL_HORZ_FILTER_CONTROL 0x1E4A
+#define mmSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x1E4B
+#define mmSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x1E46
+#define mmSCL1_SCL_MODE_CHANGE_DET1 0x1E60
+#define mmSCL1_SCL_MODE_CHANGE_DET2 0x1E61
+#define mmSCL1_SCL_MODE_CHANGE_DET3 0x1E62
+#define mmSCL1_SCL_MODE_CHANGE_MASK 0x1E63
+#define mmSCL1_SCL_TAP_CONTROL 0x1E43
+#define mmSCL1_SCL_TEST_DEBUG_DATA 0x1E6C
+#define mmSCL1_SCL_TEST_DEBUG_INDEX 0x1E6B
+#define mmSCL1_SCL_UPDATE 0x1E51
+#define mmSCL1_SCL_VERT_FILTER_CONTROL 0x1E4E
+#define mmSCL1_SCL_VERT_FILTER_INIT 0x1E50
+#define mmSCL1_SCL_VERT_FILTER_INIT_BOT 0x1E57
+#define mmSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x1E4F
+#define mmSCL1_VIEWPORT_SIZE 0x1E5D
+#define mmSCL1_VIEWPORT_START 0x1E5C
+#define mmSCL2_EXT_OVERSCAN_LEFT_RIGHT 0x415E
+#define mmSCL2_EXT_OVERSCAN_TOP_BOTTOM 0x415F
+#define mmSCL2_SCL_ALU_CONTROL 0x4154
+#define mmSCL2_SCL_AUTOMATIC_MODE_CONTROL 0x4147
+#define mmSCL2_SCL_BYPASS_CONTROL 0x4145
+#define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155
+#define mmSCL2_SCL_COEF_RAM_SELECT 0x4140
+#define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141
+#define mmSCL2_SCL_CONTROL 0x4144
+#define mmSCL2_SCL_DEBUG 0x416A
+#define mmSCL2_SCL_DEBUG2 0x4169
+#define mmSCL2_SCL_F_SHARP_CONTROL 0x4153
+#define mmSCL2_SCL_HORZ_FILTER_CONTROL 0x414A
+#define mmSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x414B
+#define mmSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x4146
+#define mmSCL2_SCL_MODE_CHANGE_DET1 0x4160
+#define mmSCL2_SCL_MODE_CHANGE_DET2 0x4161
+#define mmSCL2_SCL_MODE_CHANGE_DET3 0x4162
+#define mmSCL2_SCL_MODE_CHANGE_MASK 0x4163
+#define mmSCL2_SCL_TAP_CONTROL 0x4143
+#define mmSCL2_SCL_TEST_DEBUG_DATA 0x416C
+#define mmSCL2_SCL_TEST_DEBUG_INDEX 0x416B
+#define mmSCL2_SCL_UPDATE 0x4151
+#define mmSCL2_SCL_VERT_FILTER_CONTROL 0x414E
+#define mmSCL2_SCL_VERT_FILTER_INIT 0x4150
+#define mmSCL2_SCL_VERT_FILTER_INIT_BOT 0x4157
+#define mmSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x414F
+#define mmSCL2_VIEWPORT_SIZE 0x415D
+#define mmSCL2_VIEWPORT_START 0x415C
+#define mmSCL3_EXT_OVERSCAN_LEFT_RIGHT 0x445E
+#define mmSCL3_EXT_OVERSCAN_TOP_BOTTOM 0x445F
+#define mmSCL3_SCL_ALU_CONTROL 0x4454
+#define mmSCL3_SCL_AUTOMATIC_MODE_CONTROL 0x4447
+#define mmSCL3_SCL_BYPASS_CONTROL 0x4445
+#define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455
+#define mmSCL3_SCL_COEF_RAM_SELECT 0x4440
+#define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441
+#define mmSCL3_SCL_CONTROL 0x4444
+#define mmSCL3_SCL_DEBUG 0x446A
+#define mmSCL3_SCL_DEBUG2 0x4469
+#define mmSCL3_SCL_F_SHARP_CONTROL 0x4453
+#define mmSCL3_SCL_HORZ_FILTER_CONTROL 0x444A
+#define mmSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x444B
+#define mmSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x4446
+#define mmSCL3_SCL_MODE_CHANGE_DET1 0x4460
+#define mmSCL3_SCL_MODE_CHANGE_DET2 0x4461
+#define mmSCL3_SCL_MODE_CHANGE_DET3 0x4462
+#define mmSCL3_SCL_MODE_CHANGE_MASK 0x4463
+#define mmSCL3_SCL_TAP_CONTROL 0x4443
+#define mmSCL3_SCL_TEST_DEBUG_DATA 0x446C
+#define mmSCL3_SCL_TEST_DEBUG_INDEX 0x446B
+#define mmSCL3_SCL_UPDATE 0x4451
+#define mmSCL3_SCL_VERT_FILTER_CONTROL 0x444E
+#define mmSCL3_SCL_VERT_FILTER_INIT 0x4450
+#define mmSCL3_SCL_VERT_FILTER_INIT_BOT 0x4457
+#define mmSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x444F
+#define mmSCL3_VIEWPORT_SIZE 0x445D
+#define mmSCL3_VIEWPORT_START 0x445C
+#define mmSCL4_EXT_OVERSCAN_LEFT_RIGHT 0x475E
+#define mmSCL4_EXT_OVERSCAN_TOP_BOTTOM 0x475F
+#define mmSCL4_SCL_ALU_CONTROL 0x4754
+#define mmSCL4_SCL_AUTOMATIC_MODE_CONTROL 0x4747
+#define mmSCL4_SCL_BYPASS_CONTROL 0x4745
+#define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755
+#define mmSCL4_SCL_COEF_RAM_SELECT 0x4740
+#define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741
+#define mmSCL4_SCL_CONTROL 0x4744
+#define mmSCL4_SCL_DEBUG 0x476A
+#define mmSCL4_SCL_DEBUG2 0x4769
+#define mmSCL4_SCL_F_SHARP_CONTROL 0x4753
+#define mmSCL4_SCL_HORZ_FILTER_CONTROL 0x474A
+#define mmSCL4_SCL_HORZ_FILTER_SCALE_RATIO 0x474B
+#define mmSCL4_SCL_MANUAL_REPLICATE_CONTROL 0x4746
+#define mmSCL4_SCL_MODE_CHANGE_DET1 0x4760
+#define mmSCL4_SCL_MODE_CHANGE_DET2 0x4761
+#define mmSCL4_SCL_MODE_CHANGE_DET3 0x4762
+#define mmSCL4_SCL_MODE_CHANGE_MASK 0x4763
+#define mmSCL4_SCL_TAP_CONTROL 0x4743
+#define mmSCL4_SCL_TEST_DEBUG_DATA 0x476C
+#define mmSCL4_SCL_TEST_DEBUG_INDEX 0x476B
+#define mmSCL4_SCL_UPDATE 0x4751
+#define mmSCL4_SCL_VERT_FILTER_CONTROL 0x474E
+#define mmSCL4_SCL_VERT_FILTER_INIT 0x4750
+#define mmSCL4_SCL_VERT_FILTER_INIT_BOT 0x4757
+#define mmSCL4_SCL_VERT_FILTER_SCALE_RATIO 0x474F
+#define mmSCL4_VIEWPORT_SIZE 0x475D
+#define mmSCL4_VIEWPORT_START 0x475C
+#define mmSCL5_EXT_OVERSCAN_LEFT_RIGHT 0x4A5E
+#define mmSCL5_EXT_OVERSCAN_TOP_BOTTOM 0x4A5F
+#define mmSCL5_SCL_ALU_CONTROL 0x4A54
+#define mmSCL5_SCL_AUTOMATIC_MODE_CONTROL 0x4A47
+#define mmSCL5_SCL_BYPASS_CONTROL 0x4A45
+#define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55
+#define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40
+#define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41
+#define mmSCL5_SCL_CONTROL 0x4A44
+#define mmSCL5_SCL_DEBUG 0x4A6A
+#define mmSCL5_SCL_DEBUG2 0x4A69
+#define mmSCL5_SCL_F_SHARP_CONTROL 0x4A53
+#define mmSCL5_SCL_HORZ_FILTER_CONTROL 0x4A4A
+#define mmSCL5_SCL_HORZ_FILTER_SCALE_RATIO 0x4A4B
+#define mmSCL5_SCL_MANUAL_REPLICATE_CONTROL 0x4A46
+#define mmSCL5_SCL_MODE_CHANGE_DET1 0x4A60
+#define mmSCL5_SCL_MODE_CHANGE_DET2 0x4A61
+#define mmSCL5_SCL_MODE_CHANGE_DET3 0x4A62
+#define mmSCL5_SCL_MODE_CHANGE_MASK 0x4A63
+#define mmSCL5_SCL_TAP_CONTROL 0x4A43
+#define mmSCL5_SCL_TEST_DEBUG_DATA 0x4A6C
+#define mmSCL5_SCL_TEST_DEBUG_INDEX 0x4A6B
+#define mmSCL5_SCL_UPDATE 0x4A51
+#define mmSCL5_SCL_VERT_FILTER_CONTROL 0x4A4E
+#define mmSCL5_SCL_VERT_FILTER_INIT 0x4A50
+#define mmSCL5_SCL_VERT_FILTER_INIT_BOT 0x4A57
+#define mmSCL5_SCL_VERT_FILTER_SCALE_RATIO 0x4A4F
+#define mmSCL5_VIEWPORT_SIZE 0x4A5D
+#define mmSCL5_VIEWPORT_START 0x4A5C
+#define mmSCL_ALU_CONTROL 0x1B54
+#define mmSCL_AUTOMATIC_MODE_CONTROL 0x1B47
+#define mmSCL_BYPASS_CONTROL 0x1B45
+#define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55
+#define mmSCL_COEF_RAM_SELECT 0x1B40
+#define mmSCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL_CONTROL 0x1B44
+#define mmSCL_DEBUG 0x1B6A
+#define mmSCL_DEBUG2 0x1B69
+#define mmSCL_F_SHARP_CONTROL 0x1B53
+#define mmSCL_HORZ_FILTER_CONTROL 0x1B4A
+#define mmSCL_HORZ_FILTER_SCALE_RATIO 0x1B4B
+#define mmSCLK_CGTT_BLK_CTRL_REG 0x0136
+#define mmSCL_MANUAL_REPLICATE_CONTROL 0x1B46
+#define mmSCL_MODE_CHANGE_DET1 0x1B60
+#define mmSCL_MODE_CHANGE_DET2 0x1B61
+#define mmSCL_MODE_CHANGE_DET3 0x1B62
+#define mmSCL_MODE_CHANGE_MASK 0x1B63
+#define mmSCL_TAP_CONTROL 0x1B43
+#define mmSCL_TEST_DEBUG_DATA 0x1B6C
+#define mmSCL_TEST_DEBUG_INDEX 0x1B6B
+#define mmSCL_UPDATE 0x1B51
+#define mmSCL_VERT_FILTER_CONTROL 0x1B4E
+#define mmSCL_VERT_FILTER_INIT 0x1B50
+#define mmSCL_VERT_FILTER_INIT_BOT 0x1B57
+#define mmSCL_VERT_FILTER_SCALE_RATIO 0x1B4F
+#define mmSEQ8_DATA 0x00F1
+#define mmSEQ8_IDX 0x00F1
+#define mmSLAVE_COMM_CMD_REG 0x1624
+#define mmSLAVE_COMM_CNTL_REG 0x1625
+#define mmSLAVE_COMM_DATA_REG1 0x1621
+#define mmSLAVE_COMM_DATA_REG2 0x1622
+#define mmSLAVE_COMM_DATA_REG3 0x1623
+#define mmSYMCLKA_CLOCK_ENABLE 0x0160
+#define mmSYMCLKB_CLOCK_ENABLE 0x0161
+#define mmSYMCLKC_CLOCK_ENABLE 0x0162
+#define mmSYMCLKD_CLOCK_ENABLE 0x0163
+#define mmSYMCLKE_CLOCK_ENABLE 0x0164
+#define mmSYMCLKF_CLOCK_ENABLE 0x0165
+#define mmTMDS_CNTL 0x1C7C
+#define mmTMDS_CONTROL0_FEEDBACK 0x1C7E
+#define mmTMDS_CONTROL_CHAR 0x1C7D
+#define mmTMDS_CTL0_1_GEN_CNTL 0x1C86
+#define mmTMDS_CTL2_3_GEN_CNTL 0x1C87
+#define mmTMDS_CTL_BITS 0x1C83
+#define mmTMDS_DCBALANCER_CONTROL 0x1C84
+#define mmTMDS_DEBUG 0x1C82
+#define mmTMDS_STEREOSYNC_CTL_SEL 0x1C7F
+#define mmTMDS_SYNC_CHAR_PATTERN_0_1 0x1C80
+#define mmTMDS_SYNC_CHAR_PATTERN_2_3 0x1C81
+#define mmUNIPHYAB_TPG_CONTROL 0x1931
+#define mmUNIPHYAB_TPG_SEED 0x1932
+#define mmUNIPHY_ANG_BIST_CNTL 0x198C
+#define mmUNIPHYCD_TPG_CONTROL 0x1933
+#define mmUNIPHYCD_TPG_SEED 0x1934
+#define mmUNIPHY_CHANNEL_XBAR_CNTL 0x198E
+#define mmUNIPHY_DATA_SYNCHRONIZATION 0x198A
+#define mmUNIPHYEF_TPG_CONTROL 0x1935
+#define mmUNIPHYEF_TPG_SEED 0x1936
+#define mmUNIPHY_IMPCAL_LINKA 0x1908
+#define mmUNIPHY_IMPCAL_LINKB 0x1909
+#define mmUNIPHY_IMPCAL_LINKC 0x190F
+#define mmUNIPHY_IMPCAL_LINKD 0x1910
+#define mmUNIPHY_IMPCAL_LINKE 0x1913
+#define mmUNIPHY_IMPCAL_LINKF 0x1914
+#define mmUNIPHY_IMPCAL_PERIOD 0x190A
+#define mmUNIPHY_IMPCAL_PSW_AB 0x190E
+#define mmUNIPHY_IMPCAL_PSW_CD 0x1912
+#define mmUNIPHY_IMPCAL_PSW_EF 0x1916
+#define mmUNIPHY_LINK_CNTL 0x198D
+#define mmUNIPHY_PLL_CONTROL1 0x1986
+#define mmUNIPHY_PLL_CONTROL2 0x1987
+#define mmUNIPHY_PLL_FBDIV 0x1985
+#define mmUNIPHY_PLL_SS_CNTL 0x1989
+#define mmUNIPHY_PLL_SS_STEP_SIZE 0x1988
+#define mmUNIPHY_POWER_CONTROL 0x1984
+#define mmUNIPHY_REG_TEST_OUTPUT 0x198B
+#define mmUNIPHY_SOFT_RESET 0x0166
+#define mmUNIPHY_TX_CONTROL1 0x1980
+#define mmUNIPHY_TX_CONTROL2 0x1981
+#define mmUNIPHY_TX_CONTROL3 0x1982
+#define mmUNIPHY_TX_CONTROL4 0x1983
+#define mmVGA25_PPLL_ANALOG 0x00E4
+#define mmVGA25_PPLL_FB_DIV 0x00DC
+#define mmVGA25_PPLL_POST_DIV 0x00E0
+#define mmVGA25_PPLL_REF_DIV 0x00D8
+#define mmVGA28_PPLL_ANALOG 0x00E5
+#define mmVGA28_PPLL_FB_DIV 0x00DD
+#define mmVGA28_PPLL_POST_DIV 0x00E1
+#define mmVGA28_PPLL_REF_DIV 0x00D9
+#define mmVGA41_PPLL_ANALOG 0x00E6
+#define mmVGA41_PPLL_FB_DIV 0x00DE
+#define mmVGA41_PPLL_POST_DIV 0x00E2
+#define mmVGA41_PPLL_REF_DIV 0x00DA
+#define mmVGA_CACHE_CONTROL 0x00CB
+#define mmVGA_DEBUG_READBACK_DATA 0x00D7
+#define mmVGA_DEBUG_READBACK_INDEX 0x00D6
+#define mmVGA_DISPBUF1_SURFACE_ADDR 0x00C6
+#define mmVGA_DISPBUF2_SURFACE_ADDR 0x00C8
+#define mmVGA_HDP_CONTROL 0x00CA
+#define mmVGA_HW_DEBUG 0x00CF
+#define mmVGA_INTERRUPT_CONTROL 0x00D1
+#define mmVGA_INTERRUPT_STATUS 0x00D3
+#define mmVGA_MAIN_CONTROL 0x00D4
+#define mmVGA_MEMORY_BASE_ADDRESS 0x00C4
+#define mmVGA_MEMORY_BASE_ADDRESS_HIGH 0x00C9
+#define mmVGA_MEM_READ_PAGE_ADDR 0x0013
+#define mmVGA_MEM_WRITE_PAGE_ADDR 0x0012
+#define mmVGA_MODE_CONTROL 0x00C2
+#define mmVGA_RENDER_CONTROL 0x00C0
+#define mmVGA_SEQUENCER_RESET_CONTROL 0x00C1
+#define mmVGA_SOURCE_SELECT 0x00FC
+#define mmVGA_STATUS 0x00D0
+#define mmVGA_STATUS_CLEAR 0x00D2
+#define mmVGA_SURFACE_PITCH_SELECT 0x00C3
+#define mmVGA_TEST_CONTROL 0x00D5
+#define mmVGA_TEST_DEBUG_DATA 0x00C7
+#define mmVGA_TEST_DEBUG_INDEX 0x00C5
+#define mmVIEWPORT_SIZE 0x1B5D
+#define mmVIEWPORT_START 0x1B5C
+#define mmXDMA_CLOCK_GATING_CNTL 0x0409
+#define mmXDMA_IF_BIF_STATUS 0x0418
+#define mmXDMA_INTERRUPT 0x0406
+#define mmXDMA_LOCAL_SURFACE_TILING1 0x03F4
+#define mmXDMA_LOCAL_SURFACE_TILING2 0x03F5
+#define mmXDMA_MC_PCIE_CLIENT_CONFIG 0x03E9
+#define mmXDMA_MEM_POWER_CNTL 0x040B
+#define mmXDMA_MSTR_CMD_URGENT_CNTL 0x03F6
+#define mmXDMA_MSTR_CNTL 0x03E0
+#define mmXDMA_MSTR_HEIGHT 0x03E3
+#define mmXDMA_MSTR_LOCAL_SURFACE_BASE_ADDR 0x03F1
+#define mmXDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH 0x03F2
+#define mmXDMA_MSTR_LOCAL_SURFACE_PITCH 0x03F3
+#define mmXDMA_MSTR_MEM_CLIENT_CONFIG 0x03EA
+#define mmXDMA_MSTR_MEM_NACK_STATUS 0x040D
+#define mmXDMA_MSTR_MEM_URGENT_CNTL 0x03F7
+#define mmXDMA_MSTR_PCIE_NACK_STATUS 0x040C
+#define mmXDMA_MSTR_READ_COMMAND 0x03E1
+#define mmXDMA_MSTR_REMOTE_GPU_ADDRESS 0x03E6
+#define mmXDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH 0x03E7
+#define mmXDMA_MSTR_REMOTE_SURFACE_BASE 0x03E4
+#define mmXDMA_MSTR_REMOTE_SURFACE_BASE_HIGH 0x03E5
+#define mmXDMA_MSTR_STATUS 0x03E8
+#define mmXDMA_RBBMIF_RDWR_CNTL 0x040A
+#define mmXDMA_SLV_CNTL 0x03FB
+#define mmXDMA_SLV_FLIP_PENDING 0x0407
+#define mmXDMA_SLV_MEM_CLIENT_CONFIG 0x03FD
+#define mmXDMA_SLV_MEM_NACK_STATUS 0x040F
+#define mmXDMA_SLV_PCIE_NACK_STATUS 0x040E
+#define mmXDMA_SLV_READ_LATENCY_AVE 0x0405
+#define mmXDMA_SLV_READ_LATENCY_MINMAX 0x0404
+#define mmXDMA_SLV_READ_LATENCY_TIMER 0x0412
+#define mmXDMA_SLV_READ_URGENT_CNTL 0x03FF
+#define mmXDMA_SLV_REMOTE_GPU_ADDRESS 0x0402
+#define mmXDMA_SLV_REMOTE_GPU_ADDRESS_HIGH 0x0403
+#define mmXDMA_SLV_SLS_PITCH 0x03FE
+#define mmXDMA_SLV_WB_RATE_CNTL 0x0401
+#define mmXDMA_SLV_WRITE_URGENT_CNTL 0x0400
+#define mmXDMA_TEST_DEBUG_DATA 0x041D
+#define mmXDMA_TEST_DEBUG_INDEX 0x041C
+
+/* Registers that spilled out of sid.h */
+#define mmDATA_FORMAT 0x1AC0
+#define mmDESKTOP_HEIGHT 0x1AC1
+#define mmDC_LB_MEMORY_SPLIT 0x1AC3
+#define mmPRIORITY_A_CNT 0x1AC6
+#define mmPRIORITY_B_CNT 0x1AC7
+#define mmDPG_PIPE_ARBITRATION_CONTROL3 0x1B32
+#define mmINT_MASK 0x1AD0
+#define mmVLINE_STATUS 0x1AEE
+#define mmVBLANK_STATUS 0x1AEF
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
new file mode 100644
index 000000000000..9a4d4c299d5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -0,0 +1,9836 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_6_0_SH_MASK_H
+#define DCE_6_0_SH_MASK_H
+
+#define ABM_TEST_DEBUG_DATA__ABM_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define ABM_TEST_DEBUG_DATA__ABM_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x00000000
+#define AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x00000001
+#define AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000ff00L
+#define AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x00000008
+#define AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00f00000L
+#define AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x00000014
+#define AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+#define AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x0000001c
+#define AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x00000002
+#define AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x00000003
+#define AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000c0L
+#define AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x00000006
+#define AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0f000000L
+#define AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x00000018
+#define AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000f0000L
+#define AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x00000010
+#define AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00f00000L
+#define AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x00000014
+#define AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000f0L
+#define AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x00000004
+#define AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000fL
+#define AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x00000000
+#define AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x00000010
+#define AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x00000012
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000fL
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x00000000
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000f0L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x00000004
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000f00L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x00000008
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000f000L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x0000000c
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000f0000L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x00000010
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00f00000L
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x00000014
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000f000L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0x0000000c
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x00000004
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xffff0000L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x00000010
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x00000000
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x00000008
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x00000000
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xffffff00L
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x00000008
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_BASE_MASK 0x00000100L
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_BASE__SHIFT 0x00000008
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_DIV_MASK 0x00070000L
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_DIV__SHIFT 0x00000010
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_MULTI_MASK 0x00007000L
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_MULTI__SHIFT 0x0000000c
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_FS_DIV_SEL_MASK 0x00000007L
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_FS_DIV_SEL__SHIFT 0x00000000
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x00000008
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000ffL
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00ff0000L
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x00000010
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x00000000
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0x0000000b
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1f000000L
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x00000018
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000ffL
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x00000000
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0x0000000f
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x00000010
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0x0000000b
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x0000001c
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000ff00L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x00000008
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x00000000
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x00000001
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00ff0000L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x00000010
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x00000018
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x0000001a
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x00000018
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x00000017
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x00000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0x0000000c
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0x0000000e
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x0000001e
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x0000001f
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0x0000000b
+#define AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+#define AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x00000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_A_MASK 0x00001000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_A__SHIFT 0x0000000c
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_B_MASK 0x00000c00L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_B__SHIFT 0x0000000a
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_CHECKSUM_MASK 0x000000ffL
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_CHECKSUM__SHIFT 0x00000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_C_MASK 0x00c00000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_C__SHIFT 0x00000016
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_EC_MASK 0x70000000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_EC__SHIFT 0x0000001c
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_ITC_MASK 0x80000000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_ITC__SHIFT 0x0000001f
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_M_MASK 0x00300000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_M__SHIFT 0x00000014
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_PB1_RSVD_MASK 0x00008000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_PB1_RSVD__SHIFT 0x0000000f
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Q_MASK 0x0c000000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Q__SHIFT 0x0000001a
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_R_MASK 0x000f0000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_R__SHIFT 0x00000010
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_SC_MASK 0x03000000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_SC__SHIFT 0x00000018
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_S_MASK 0x00000300L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_S__SHIFT 0x00000008
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Y_MASK 0x00006000L
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Y__SHIFT 0x0000000d
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_CN_MASK 0x00003000L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_CN__SHIFT 0x0000000c
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PB4_RSVD_MASK 0x00000080L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PB4_RSVD__SHIFT 0x00000007
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PR_MASK 0x00000f00L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PR__SHIFT 0x00000008
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_TOP_MASK 0xffff0000L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_TOP__SHIFT 0x00000010
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_VIC_MASK 0x0000007fL
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_VIC__SHIFT 0x00000000
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_YQ_MASK 0x0000c000L
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_YQ__SHIFT 0x0000000e
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_BOTTOM_MASK 0x0000ffffL
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_BOTTOM__SHIFT 0x00000000
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_LEFT_MASK 0xffff0000L
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_LEFT__SHIFT 0x00000010
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_RIGHT_MASK 0x0000ffffL
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_RIGHT__SHIFT 0x00000000
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_VERSION_MASK 0xff000000L
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_VERSION__SHIFT 0x00000018
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000ffL
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x00000000
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000ff00L
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x00000008
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00ff0000L
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x00000010
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xff000000L
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x00000018
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000ffL
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x00000000
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000ff00L
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x00000008
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00ff0000L
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x00000010
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xff000000L
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x00000018
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00ff0000L
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x00000010
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xff000000L
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x00000018
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000ffL
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x00000000
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000ff00L
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x00000008
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000ffL
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x00000000
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000ff00L
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x00000008
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00ff0000L
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x00000010
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xff000000L
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x00000018
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000ffL
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x00000000
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000ff00L
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x00000008
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00ff0000L
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x00000010
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xff000000L
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x00000018
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000ffL
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x00000000
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000ff00L
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x00000008
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00ff0000L
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x00000010
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xff000000L
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x00000018
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000ffL
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x00000000
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000ff00L
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x00000008
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00ff0000L
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x00000010
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xff000000L
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x00000018
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000ffL
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x00000000
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000ff00L
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x00000008
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00ff0000L
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x00000010
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xff000000L
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x00000018
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000ffL
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x00000000
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000ff00L
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x00000008
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00ff0000L
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x00000010
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xff000000L
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x00000018
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x00000006
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x00000007
+#define AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+#define AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0x0000000a
+#define AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x00000006
+#define AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x00000000
+#define AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+#define AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x00000007
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000ffL
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x00000000
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000ff00L
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x00000008
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00ff0000L
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x00000010
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xff000000L
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x00000018
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000ffL
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x00000000
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000ff00L
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x00000008
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00ff0000L
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x00000010
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xff000000L
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x00000018
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00ff0000L
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x00000010
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xff000000L
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x00000018
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000ffL
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x00000000
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000ff00L
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x00000008
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000ffL
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x00000000
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000ff00L
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x00000008
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00ff0000L
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x00000010
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xff000000L
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x00000018
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000ffL
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x00000000
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000ff00L
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x00000008
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00ff0000L
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x00000010
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xff000000L
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x00000018
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000ffL
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x00000000
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000ff00L
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x00000008
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00ff0000L
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x00000010
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xff000000L
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x00000018
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000ffL
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x00000000
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000ff00L
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x00000008
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00ff0000L
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x00000010
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xff000000L
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x00000018
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000ffL
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x00000000
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000ff00L
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x00000008
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00ff0000L
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x00000010
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xff000000L
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x00000018
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000ffL
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x00000000
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000ff00L
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x00000008
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00ff0000L
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x00000010
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xff000000L
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x00000018
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0x0000000c
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000ffL
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x00000000
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x00000008
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x0000001f
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00ffffffL
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x00000000
+#define AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xff000000L
+#define AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x00000018
+#define AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00ffffffL
+#define AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x00000000
+#define AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00ffffffL
+#define AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x00000000
+#define AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00ffffffL
+#define AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x00000000
+#define AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x00000004
+#define AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x00000018
+#define AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+#define AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x0000001e
+#define AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x00000008
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC0_UPDATE_MASK 0x00000004L
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC0_UPDATE__SHIFT 0x00000002
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC2_UPDATE_MASK 0x00000008L
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC2_UPDATE__SHIFT 0x00000003
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xc0000000L
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x0000001e
+#define ATTR00__ATTR_PAL_MASK 0x0000003fL
+#define ATTR00__ATTR_PAL__SHIFT 0x00000000
+#define ATTR01__ATTR_PAL_MASK 0x0000003fL
+#define ATTR01__ATTR_PAL__SHIFT 0x00000000
+#define ATTR02__ATTR_PAL_MASK 0x0000003fL
+#define ATTR02__ATTR_PAL__SHIFT 0x00000000
+#define ATTR03__ATTR_PAL_MASK 0x0000003fL
+#define ATTR03__ATTR_PAL__SHIFT 0x00000000
+#define ATTR04__ATTR_PAL_MASK 0x0000003fL
+#define ATTR04__ATTR_PAL__SHIFT 0x00000000
+#define ATTR05__ATTR_PAL_MASK 0x0000003fL
+#define ATTR05__ATTR_PAL__SHIFT 0x00000000
+#define ATTR06__ATTR_PAL_MASK 0x0000003fL
+#define ATTR06__ATTR_PAL__SHIFT 0x00000000
+#define ATTR07__ATTR_PAL_MASK 0x0000003fL
+#define ATTR07__ATTR_PAL__SHIFT 0x00000000
+#define ATTR08__ATTR_PAL_MASK 0x0000003fL
+#define ATTR08__ATTR_PAL__SHIFT 0x00000000
+#define ATTR09__ATTR_PAL_MASK 0x0000003fL
+#define ATTR09__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0A__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0A__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0B__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0B__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0C__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0C__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0D__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0D__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0E__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0E__ATTR_PAL__SHIFT 0x00000000
+#define ATTR0F__ATTR_PAL_MASK 0x0000003fL
+#define ATTR0F__ATTR_PAL__SHIFT 0x00000000
+#define ATTR10__ATTR_BLINK_EN_MASK 0x00000008L
+#define ATTR10__ATTR_BLINK_EN__SHIFT 0x00000003
+#define ATTR10__ATTR_CSEL_EN_MASK 0x00000080L
+#define ATTR10__ATTR_CSEL_EN__SHIFT 0x00000007
+#define ATTR10__ATTR_GRPH_MODE_MASK 0x00000001L
+#define ATTR10__ATTR_GRPH_MODE__SHIFT 0x00000000
+#define ATTR10__ATTR_LGRPH_EN_MASK 0x00000004L
+#define ATTR10__ATTR_LGRPH_EN__SHIFT 0x00000002
+#define ATTR10__ATTR_MONO_EN_MASK 0x00000002L
+#define ATTR10__ATTR_MONO_EN__SHIFT 0x00000001
+#define ATTR10__ATTR_PANTOPONLY_MASK 0x00000020L
+#define ATTR10__ATTR_PANTOPONLY__SHIFT 0x00000005
+#define ATTR10__ATTR_PCLKBY2_MASK 0x00000040L
+#define ATTR10__ATTR_PCLKBY2__SHIFT 0x00000006
+#define ATTR11__ATTR_OVSC_MASK 0x000000ffL
+#define ATTR11__ATTR_OVSC__SHIFT 0x00000000
+#define ATTR12__ATTR_MAP_EN_MASK 0x0000000fL
+#define ATTR12__ATTR_MAP_EN__SHIFT 0x00000000
+#define ATTR12__ATTR_VSMUX_MASK 0x00000030L
+#define ATTR12__ATTR_VSMUX__SHIFT 0x00000004
+#define ATTR13__ATTR_PPAN_MASK 0x0000000fL
+#define ATTR13__ATTR_PPAN__SHIFT 0x00000000
+#define ATTR14__ATTR_CSEL1_MASK 0x00000003L
+#define ATTR14__ATTR_CSEL1__SHIFT 0x00000000
+#define ATTR14__ATTR_CSEL2_MASK 0x0000000cL
+#define ATTR14__ATTR_CSEL2__SHIFT 0x00000002
+#define ATTRDR__ATTR_DATA_MASK 0x000000ffL
+#define ATTRDR__ATTR_DATA__SHIFT 0x00000000
+#define ATTRDW__ATTR_DATA_MASK 0x000000ffL
+#define ATTRDW__ATTR_DATA__SHIFT 0x00000000
+#define ATTRX__ATTR_IDX_MASK 0x0000001fL
+#define ATTRX__ATTR_IDX__SHIFT 0x00000000
+#define ATTRX__ATTR_PAL_RW_ENB_MASK 0x00000020L
+#define ATTRX__ATTR_PAL_RW_ENB__SHIFT 0x00000005
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x00000000
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x00000000
+#define AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+#define AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x00000019
+#define AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x00000018
+#define AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x00000018
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0x0000000a
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x00000008
+#define AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000cL
+#define AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x00000002
+#define AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x00000011
+#define AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x00000010
+#define AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x00000010
+#define AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x0000001d
+#define AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define AUX_CONTROL__AUX_EN__SHIFT 0x00000000
+#define AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x00000014
+#define AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x00000010
+#define AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x00000018
+#define AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x00000008
+#define AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0x0000000c
+#define AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x00000012
+#define AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x0000001c
+#define AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define AUX_CONTROL__SPARE_0__SHIFT 0x0000001e
+#define AUX_CONTROL__SPARE_1_MASK 0x80000000L
+#define AUX_CONTROL__SPARE_1__SHIFT 0x0000001f
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x00000011
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x00000012
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x00000013
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x0000001c
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0x0000000c
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x00000014
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x00000008
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x00000004
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TIMEOUT_LEN_MASK 0x07000000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TIMEOUT_LEN__SHIFT 0x00000018
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x00000010
+#define AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000ffL
+#define AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x00000000
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001f0000L
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x00000010
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3fe00000L
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x00000015
+#define AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x00000000
+#define AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001f00L
+#define AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x00000008
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x00000007L
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x00000000
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003f00L
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x00000008
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x00000004
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01ff0000L
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x00000010
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x00000000
+#define AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x00000000
+#define AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01ff0000L
+#define AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x00000010
+#define AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x00000004
+#define AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x00000000
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x00000005
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x00000004
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x00000006
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x00000001
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x00000000
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x00000002
+#define AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000ff00L
+#define AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x00000008
+#define AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001f0000L
+#define AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x00000010
+#define AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x0000001d
+#define AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x00000000
+#define AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x00000009
+#define AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0x0000000b
+#define AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1f000000L
+#define AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x00000018
+#define AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x00000001
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x00000013
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0x0000000e
+#define AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0x0000000c
+#define AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x00000008
+#define AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0x0000000a
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x00000016
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x00000017
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x00000014
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x00000012
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x00000011
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x00000007
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x00000004
+#define AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+#define AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x0000001f
+#define AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x0000001e
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_AK_MASK 0x00000400L
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_AK__SHIFT 0x0000000a
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_MASK 0x00000200L
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR__SHIFT 0x00000009
+#define AUXN_IMPCAL__AUXN_IMPCAL_CALOUT_MASK 0x00000100L
+#define AUXN_IMPCAL__AUXN_IMPCAL_CALOUT__SHIFT 0x00000008
+#define AUXN_IMPCAL__AUXN_IMPCAL_ENABLE_MASK 0x00000001L
+#define AUXN_IMPCAL__AUXN_IMPCAL_ENABLE__SHIFT 0x00000000
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_ENABLE_MASK 0x10000000L
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_ENABLE__SHIFT 0x0000001c
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_MASK 0x0f000000L
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE__SHIFT 0x00000018
+#define AUXN_IMPCAL__AUXN_IMPCAL_STEP_DELAY_MASK 0x00f00000L
+#define AUXN_IMPCAL__AUXN_IMPCAL_STEP_DELAY__SHIFT 0x00000014
+#define AUXN_IMPCAL__AUXN_IMPCAL_VALUE_MASK 0x000f0000L
+#define AUXN_IMPCAL__AUXN_IMPCAL_VALUE__SHIFT 0x00000010
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_AK_MASK 0x00000400L
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_AK__SHIFT 0x0000000a
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_MASK 0x00000200L
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR__SHIFT 0x00000009
+#define AUXP_IMPCAL__AUXP_IMPCAL_CALOUT_MASK 0x00000100L
+#define AUXP_IMPCAL__AUXP_IMPCAL_CALOUT__SHIFT 0x00000008
+#define AUXP_IMPCAL__AUXP_IMPCAL_ENABLE_MASK 0x00000001L
+#define AUXP_IMPCAL__AUXP_IMPCAL_ENABLE__SHIFT 0x00000000
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_ENABLE_MASK 0x10000000L
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_ENABLE__SHIFT 0x0000001c
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_MASK 0x0f000000L
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE__SHIFT 0x00000018
+#define AUXP_IMPCAL__AUXP_IMPCAL_STEP_DELAY_MASK 0x00f00000L
+#define AUXP_IMPCAL__AUXP_IMPCAL_STEP_DELAY__SHIFT 0x00000014
+#define AUXP_IMPCAL__AUXP_IMPCAL_VALUE_MASK 0x000f0000L
+#define AUXP_IMPCAL__AUXP_IMPCAL_VALUE__SHIFT 0x00000010
+#define AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x00000002
+#define AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x00000000
+#define AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000f0L
+#define AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x00000004
+#define AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001f0000L
+#define AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x00000010
+#define AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+#define AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x0000001f
+#define AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000ff00L
+#define AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x00000000
+#define AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x00000008
+#define AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001f0000L
+#define AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x00000010
+#define AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xc0000000L
+#define AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x0000001e
+#define AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x00000000
+#define AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x00000009
+#define AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0x0000000b
+#define AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1f000000L
+#define AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x00000018
+#define AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x00000001
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x00000013
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0x0000000e
+#define AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0x0000000c
+#define AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x00000008
+#define AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0x0000000a
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x00000016
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x00000017
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x00000014
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x00000012
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x00000011
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x00000007
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x00000004
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER_MASK 0xffffffffL
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER__SHIFT 0x00000000
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xffff0000L
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x00000010
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0x0000ffffL
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x00000000
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x00000300L
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x00000008
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x00000004
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x00000000
+#define AZALIA_CONTROLLER_DEBUG__CONTROLLER_DEBUG_MASK 0xffffffffL
+#define AZALIA_CONTROLLER_DEBUG__CONTROLLER_DEBUG__SHIFT 0x00000000
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS_MASK 0x00000010L
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x00000004
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x00000000
+#define AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xffffffffL
+#define AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x00000000
+#define AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xffffffffL
+#define AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x00000000
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE_MASK 0x00000001L
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE__SHIFT 0x00000000
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x00010000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x00000010
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x00020000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x00000011
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x00000004
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x00000070L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000fL
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000fL
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0x0000000b
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0x0000000e
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0x0000000f
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007f00L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x00000017
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x00000005
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x00000003
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x00000002
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x00000003
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000f0000L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0x0000000b
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x00000002
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0x0000000a
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x00000005
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001f0000L
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000fffL
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_PIN_DEBUG__AZALIA_DEBUG__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_DEBUG__CODEC_DEBUG_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_DEBUG__CODEC_DEBUG__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x0000003fL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000fL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0x0000000a
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xff000000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3fffffffL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x0000001e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x0000001f
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001f0000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000fffL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x0000001f
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x00000011
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00fc0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x00000012
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x0000001b
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007fL
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x0000001f
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000f000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x0000000c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x00000011
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x0000001c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x00000019
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000f000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x0000000c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x00000011
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xf0000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x0000001c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x00000019
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000f000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0x0000000c
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000f0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000f0L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000f00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x0000001e
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000fL
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000ffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xffff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00ff0000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xff000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000ffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x0000001c
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03ffffffL
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003fL
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x00000003
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000f0000L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x00000009
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0x0000000b
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x00000002
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0x0000000a
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x00000005
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00f00000L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x00000014
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x00000006
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x00000018
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x00000010
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x00000007
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x00000003
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x00000005
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x00000002
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x00000004
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x00000001
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000ff00L
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x00000008
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x0000003fL
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x00000000
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xffffffffL
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003cL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x00000002
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x00000002
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x00000007
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x00000003
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x00000006
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x00000005
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x00000007
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000f0L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000f0L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000f0L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x00000004
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000fL
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x00000000
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000f0L
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0x0000000b
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0x0000000e
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R_MASK 0x00008000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R__SHIFT 0x0000000f
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0x0000000f
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC_MASK 0x0000007fL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007f00L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x00000017
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x00000005
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x00000002
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000f0000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x00000009
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0x0000000b
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x00000002
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0x0000000a
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x00000005
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00f00000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x00000014
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001f0000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000fffL
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x00000014
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x0000003fL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x00000009
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0x0000000a
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000ff00L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00ff0000L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xff000000L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x00000018
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3fffffffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x0000001e
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x0000001f
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001f0000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000fffL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2_MASK 0x00ff0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_MASK 0x0000ff00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x00000018
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000ff00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID_MASK 0x0000ffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID_MASK 0x0000ffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000c0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000f000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0x0000000c
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000f0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000f0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00f00000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x00000014
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x00000018
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000f00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x0000001e
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x0000001f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION__SHIFT 0x00000009
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO_MASK 0x0000fc00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO__SHIFT 0x0000000a
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION_MASK 0x0000007fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN_MASK 0x000000ffL
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003fL
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000f0000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x00000009
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0x0000000b
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x00000002
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0x0000000a
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x00000005
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00f00000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x00000014
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x00000006
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x00000018
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x00000010
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x00000007
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x00000003
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x00000005
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x00000002
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x00000004
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x00000001
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000ff00L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x00000008
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x00000000
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xffffffffL
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003cL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x00000002
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x00000002
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x00000007
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x00000003
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x00000006
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x00000005
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x00000007
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000f0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000f0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000f0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x00000004
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000fL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x00000000
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000f0L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x00000004
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007f00L
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x00000008
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00ff0000L
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x00000010
+#define AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007fL
+#define AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x00000000
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x00000006L
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x00000001
+#define AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+#define AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x00000000
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0x0000ffffL
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x00000000
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xffff0000L
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x00000010
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL_MASK 0x000000ffL
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL__SHIFT 0x00000000
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE_MASK 0x00000100L
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE__SHIFT 0x00000008
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x00000010L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x00000004
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x00000000
+#define AZALIA_SCLK_CONTROL__AUDIO_SCLK_CONTROL_MASK 0x00000030L
+#define AZALIA_SCLK_CONTROL__AUDIO_SCLK_CONTROL__SHIFT 0x00000004
+#define AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xffffffffL
+#define AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x00000000
+#define AZALIA_STREAM_DEBUG__STREAM_DEBUG_DATA_MASK 0xffffffffL
+#define AZALIA_STREAM_DEBUG__STREAM_DEBUG_DATA__SHIFT 0x00000000
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000ffL
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x00000000
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x00000008
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xffffffffL
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x00000000
+#define AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xffffffffL
+#define AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x00000000
+#define AZ_TEST_DEBUG_DATA__AZ_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define AZ_TEST_DEBUG_DATA__AZ_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x00000003
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x00000002
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xffff0000L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x00000010
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x00000000
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x00000001
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001ffffL
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x00000000
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00ff0000L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x00000010
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x00000001
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x00000000
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000ff00L
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x00000008
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001ffffL
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x00000000
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001ffffL
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x00000000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000e0000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x00000011
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x0000001f
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x00000018
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x00000000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x00000008
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x00000010
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001ffffL
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x00000000
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001ffffL
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x00000000
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001ffffL
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x00000000
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000L
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x0000001e
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN_MASK 0x80000000L
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN__SHIFT 0x0000001f
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x0000ffffL
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x00000000
+#define BL_PWM_CNTL2__DBG_BL_PWM_INPUT_REFCLK_SELECT_MASK 0x30000000L
+#define BL_PWM_CNTL2__DBG_BL_PWM_INPUT_REFCLK_SELECT__SHIFT 0x0000001c
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0x0000ffffL
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x00000000
+#define BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000L
+#define BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x0000001f
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000L
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x0000001e
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL_MASK 0x000e0000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL__SHIFT 0x00000011
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x0000001f
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x00000018
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x00000001L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x00000000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x00000100L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x00000008
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x00000010
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0x000f0000L
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x00000010
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0x0000ffffL
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x00000000
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_COMPLETE_MASK 0x10000000L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_COMPLETE__SHIFT 0x0000001c
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_DACADJ_EN_MASK 0x00000004L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_DACADJ_EN__SHIFT 0x00000002
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_EN_MASK 0x00000002L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_EN__SHIFT 0x00000001
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_INITB_MASK 0x00000001L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_INITB__SHIFT 0x00000000
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_MASK_MASK 0x00700000L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_MASK__SHIFT 0x00000014
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_WAIT_ADJUST_MASK 0x00003ff0L
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_WAIT_ADJUST__SHIFT 0x00000004
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_ANALOG_MONITOR_MASK 0x0f000000L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_ANALOG_MONITOR__SHIFT 0x00000018
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_BANDGAP_ADJUSTMENT_MASK 0x003f0000L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_BANDGAP_ADJUSTMENT__SHIFT 0x00000010
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_COREMON_MASK 0x10000000L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_COREMON__SHIFT 0x0000001c
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL_MASK 0x00003f00L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL__SHIFT 0x00000008
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL_MASK 0x00000003L
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL__SHIFT 0x00000000
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS_MASK 0x0000007eL
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS__SHIFT 0x00000001
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x00000004
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C11_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C11__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C12_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C12__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C13_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C13__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C14_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C14__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C21_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C21__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C22_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C22__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C23_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C23__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C24_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C24__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C31_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C31__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C32_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C32__SHIFT 0x00000010
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C33_MASK 0x0000ffffL
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C33__SHIFT 0x00000000
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C34_MASK 0xffff0000L
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C34__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C11_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C11__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C12_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C12__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C13_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C13__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C14_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C14__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C21_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C21__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C22_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C22__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C23_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C23__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C24_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C24__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C31_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C31__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C32_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C32__SHIFT 0x00000010
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C33_MASK 0x0000ffffL
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C33__SHIFT 0x00000000
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C34_MASK 0xffff0000L
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C34__SHIFT 0x00000010
+#define CRT00__H_TOTAL_MASK 0x000000ffL
+#define CRT00__H_TOTAL__SHIFT 0x00000000
+#define CRT01__H_DISP_END_MASK 0x000000ffL
+#define CRT01__H_DISP_END__SHIFT 0x00000000
+#define CRT02__H_BLANK_START_MASK 0x000000ffL
+#define CRT02__H_BLANK_START__SHIFT 0x00000000
+#define CRT03__CR10CR11_R_DIS_B_MASK 0x00000080L
+#define CRT03__CR10CR11_R_DIS_B__SHIFT 0x00000007
+#define CRT03__H_BLANK_END_MASK 0x0000001fL
+#define CRT03__H_BLANK_END__SHIFT 0x00000000
+#define CRT03__H_DE_SKEW_MASK 0x00000060L
+#define CRT03__H_DE_SKEW__SHIFT 0x00000005
+#define CRT04__H_SYNC_START_MASK 0x000000ffL
+#define CRT04__H_SYNC_START__SHIFT 0x00000000
+#define CRT05__H_BLANK_END_B5_MASK 0x00000080L
+#define CRT05__H_BLANK_END_B5__SHIFT 0x00000007
+#define CRT05__H_SYNC_END_MASK 0x0000001fL
+#define CRT05__H_SYNC_END__SHIFT 0x00000000
+#define CRT05__H_SYNC_SKEW_MASK 0x00000060L
+#define CRT05__H_SYNC_SKEW__SHIFT 0x00000005
+#define CRT06__V_TOTAL_MASK 0x000000ffL
+#define CRT06__V_TOTAL__SHIFT 0x00000000
+#define CRT07__LINE_CMP_B8_MASK 0x00000010L
+#define CRT07__LINE_CMP_B8__SHIFT 0x00000004
+#define CRT07__V_BLANK_START_B8_MASK 0x00000008L
+#define CRT07__V_BLANK_START_B8__SHIFT 0x00000003
+#define CRT07__V_DISP_END_B8_MASK 0x00000002L
+#define CRT07__V_DISP_END_B8__SHIFT 0x00000001
+#define CRT07__V_DISP_END_B9_MASK 0x00000040L
+#define CRT07__V_DISP_END_B9__SHIFT 0x00000006
+#define CRT07__V_SYNC_START_B8_MASK 0x00000004L
+#define CRT07__V_SYNC_START_B8__SHIFT 0x00000002
+#define CRT07__V_SYNC_START_B9_MASK 0x00000080L
+#define CRT07__V_SYNC_START_B9__SHIFT 0x00000007
+#define CRT07__V_TOTAL_B8_MASK 0x00000001L
+#define CRT07__V_TOTAL_B8__SHIFT 0x00000000
+#define CRT07__V_TOTAL_B9_MASK 0x00000020L
+#define CRT07__V_TOTAL_B9__SHIFT 0x00000005
+#define CRT08__BYTE_PAN_MASK 0x00000060L
+#define CRT08__BYTE_PAN__SHIFT 0x00000005
+#define CRT08__ROW_SCAN_START_MASK 0x0000001fL
+#define CRT08__ROW_SCAN_START__SHIFT 0x00000000
+#define CRT09__DOUBLE_CHAR_HEIGHT_MASK 0x00000080L
+#define CRT09__DOUBLE_CHAR_HEIGHT__SHIFT 0x00000007
+#define CRT09__LINE_CMP_B9_MASK 0x00000040L
+#define CRT09__LINE_CMP_B9__SHIFT 0x00000006
+#define CRT09__MAX_ROW_SCAN_MASK 0x0000001fL
+#define CRT09__MAX_ROW_SCAN__SHIFT 0x00000000
+#define CRT09__V_BLANK_START_B9_MASK 0x00000020L
+#define CRT09__V_BLANK_START_B9__SHIFT 0x00000005
+#define CRT0A__CURSOR_DISABLE_MASK 0x00000020L
+#define CRT0A__CURSOR_DISABLE__SHIFT 0x00000005
+#define CRT0A__CURSOR_START_MASK 0x0000001fL
+#define CRT0A__CURSOR_START__SHIFT 0x00000000
+#define CRT0B__CURSOR_END_MASK 0x0000001fL
+#define CRT0B__CURSOR_END__SHIFT 0x00000000
+#define CRT0B__CURSOR_SKEW_MASK 0x00000060L
+#define CRT0B__CURSOR_SKEW__SHIFT 0x00000005
+#define CRT0C__DISP_START_MASK 0x000000ffL
+#define CRT0C__DISP_START__SHIFT 0x00000000
+#define CRT0D__DISP_START_MASK 0x000000ffL
+#define CRT0D__DISP_START__SHIFT 0x00000000
+#define CRT0E__CURSOR_LOC_HI_MASK 0x000000ffL
+#define CRT0E__CURSOR_LOC_HI__SHIFT 0x00000000
+#define CRT0F__CURSOR_LOC_LO_MASK 0x000000ffL
+#define CRT0F__CURSOR_LOC_LO__SHIFT 0x00000000
+#define CRT10__V_SYNC_START_MASK 0x000000ffL
+#define CRT10__V_SYNC_START__SHIFT 0x00000000
+#define CRT11__C0T7_WR_ONLY_MASK 0x00000080L
+#define CRT11__C0T7_WR_ONLY__SHIFT 0x00000007
+#define CRT11__SEL5_REFRESH_CYC_MASK 0x00000040L
+#define CRT11__SEL5_REFRESH_CYC__SHIFT 0x00000006
+#define CRT11__V_INTR_CLR_MASK 0x00000010L
+#define CRT11__V_INTR_CLR__SHIFT 0x00000004
+#define CRT11__V_INTR_EN_MASK 0x00000020L
+#define CRT11__V_INTR_EN__SHIFT 0x00000005
+#define CRT11__V_SYNC_END_MASK 0x0000000fL
+#define CRT11__V_SYNC_END__SHIFT 0x00000000
+#define CRT12__V_DISP_END_MASK 0x000000ffL
+#define CRT12__V_DISP_END__SHIFT 0x00000000
+#define CRT13__DISP_PITCH_MASK 0x000000ffL
+#define CRT13__DISP_PITCH__SHIFT 0x00000000
+#define CRT14__ADDR_CNT_BY4_MASK 0x00000020L
+#define CRT14__ADDR_CNT_BY4__SHIFT 0x00000005
+#define CRT14__DOUBLE_WORD_MASK 0x00000040L
+#define CRT14__DOUBLE_WORD__SHIFT 0x00000006
+#define CRT14__UNDRLN_LOC_MASK 0x0000001fL
+#define CRT14__UNDRLN_LOC__SHIFT 0x00000000
+#define CRT15__V_BLANK_START_MASK 0x000000ffL
+#define CRT15__V_BLANK_START__SHIFT 0x00000000
+#define CRT16__V_BLANK_END_MASK 0x000000ffL
+#define CRT16__V_BLANK_END__SHIFT 0x00000000
+#define CRT17__ADDR_CNT_BY2_MASK 0x00000008L
+#define CRT17__ADDR_CNT_BY2__SHIFT 0x00000003
+#define CRT17__BYTE_MODE_MASK 0x00000040L
+#define CRT17__BYTE_MODE__SHIFT 0x00000006
+#define CRT17__CRTC_SYNC_EN_MASK 0x00000080L
+#define CRT17__CRTC_SYNC_EN__SHIFT 0x00000007
+#define CRT17__RA0_AS_A13B_MASK 0x00000001L
+#define CRT17__RA0_AS_A13B__SHIFT 0x00000000
+#define CRT17__RA1_AS_A14B_MASK 0x00000002L
+#define CRT17__RA1_AS_A14B__SHIFT 0x00000001
+#define CRT17__VCOUNT_BY2_MASK 0x00000004L
+#define CRT17__VCOUNT_BY2__SHIFT 0x00000002
+#define CRT17__WRAP_A15TOA0_MASK 0x00000020L
+#define CRT17__WRAP_A15TOA0__SHIFT 0x00000005
+#define CRT18__LINE_CMP_MASK 0x000000ffL
+#define CRT18__LINE_CMP__SHIFT 0x00000000
+#define CRT1E__GRPH_DEC_RD1_MASK 0x00000002L
+#define CRT1E__GRPH_DEC_RD1__SHIFT 0x00000001
+#define CRT1F__GRPH_DEC_RD0_MASK 0x000000ffL
+#define CRT1F__GRPH_DEC_RD0__SHIFT 0x00000000
+#define CRT22__GRPH_LATCH_DATA_MASK 0x000000ffL
+#define CRT22__GRPH_LATCH_DATA__SHIFT 0x00000000
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_ADD_PIXEL_MASK 0x00000100L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DROP_PIXEL_MASK 0x00000200L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x00000010L
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x00000004
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_ADD_PIXEL_MASK 0x00000100L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DROP_PIXEL_MASK 0x00000200L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x00000010L
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x00000004
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_ADD_PIXEL_MASK 0x00000100L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DROP_PIXEL_MASK 0x00000200L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE_MASK 0x00000010L
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE__SHIFT 0x00000004
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_DB_MASK 0x00000010L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_DB__SHIFT 0x00000004
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_MASK 0x00000001L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN__SHIFT 0x00000000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_MASK 0x000c0000L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x00000011
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x00000010
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT__SHIFT 0x00000012
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0x0000000c
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x00000008
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_ADD_PIXEL_MASK 0x00000100L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DROP_PIXEL_MASK 0x00000200L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE_MASK 0x00000010L
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE__SHIFT 0x00000004
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_ADD_PIXEL_MASK 0x00000100L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DROP_PIXEL_MASK 0x00000200L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_ENABLE_MASK 0x00000010L
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_ENABLE__SHIFT 0x00000004
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_ADD_PIXEL_MASK 0x00000100L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_ADD_PIXEL__SHIFT 0x00000008
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_ERROR_COUNT_MASK 0x0fff0000L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_ERROR_COUNT__SHIFT 0x00000010
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_FIFO_ERROR_MASK 0x0000c000L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_FIFO_ERROR__SHIFT 0x0000000e
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DROP_PIXEL_MASK 0x00000200L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DROP_PIXEL__SHIFT 0x00000009
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_SOURCE__SHIFT 0x00000000
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_ENABLE_MASK 0x00000010L
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_ENABLE__SHIFT 0x00000004
+#define CRTC8_DATA__VCRTC_DATA_MASK 0x000000ffL
+#define CRTC8_DATA__VCRTC_DATA__SHIFT 0x00000000
+#define CRTC8_IDX__VCRTC_IDX_MASK 0x0000003fL
+#define CRTC8_IDX__VCRTC_IDX__SHIFT 0x00000000
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_ALLOW_STOP_OFF_V_CNT_MASK 0x000000ffL
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_ALLOW_STOP_OFF_V_CNT__SHIFT 0x00000000
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_DISABLE_ALLOW_STOP_OFF_V_CNT_MASK 0x00010000L
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_DISABLE_ALLOW_STOP_OFF_V_CNT__SHIFT 0x00000010
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB_MASK 0x000003ffL
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB__SHIFT 0x00000000
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y_MASK 0x000ffc00L
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y__SHIFT 0x0000000a
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR_MASK 0x3ff00000L
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR__SHIFT 0x00000014
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK 0x00000100L
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN__SHIFT 0x00000008
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DE_MODE_MASK 0x00010000L
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DE_MODE__SHIFT 0x00000010
+#define CRTC_BLANK_CONTROL__CRTC_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define CRTC_BLANK_CONTROL__CRTC_CURRENT_BLANK_STATE__SHIFT 0x00000000
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003ffL
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x00000000
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000ffc00L
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0x0000000a
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_RED_CR_MASK 0x3ff00000L
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_RED_CR__SHIFT 0x00000014
+#define CRTC_CONTROL__CRTC_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define CRTC_CONTROL__CRTC_CURRENT_MASTER_EN_STATE__SHIFT 0x00000010
+#define CRTC_CONTROL__CRTC_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define CRTC_CONTROL__CRTC_DISABLE_POINT_CNTL__SHIFT 0x00000008
+#define CRTC_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define CRTC_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE__SHIFT 0x00000018
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_CNTL__SHIFT 0x0000000d
+#define CRTC_CONTROL__CRTC_HBLANK_EARLY_CONTROL_MASK 0x00700000L
+#define CRTC_CONTROL__CRTC_HBLANK_EARLY_CONTROL__SHIFT 0x00000014
+#define CRTC_CONTROL__CRTC_MASTER_EN_MASK 0x00000001L
+#define CRTC_CONTROL__CRTC_MASTER_EN__SHIFT 0x00000000
+#define CRTC_CONTROL__CRTC_SOF_PULL_EN_MASK 0x20000000L
+#define CRTC_CONTROL__CRTC_SOF_PULL_EN__SHIFT 0x0000001d
+#define CRTC_CONTROL__CRTC_START_POINT_CNTL_MASK 0x00001000L
+#define CRTC_CONTROL__CRTC_START_POINT_CNTL__SHIFT 0x0000000c
+#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL_MASK 0x00000010L
+#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL__SHIFT 0x00000004
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN__SHIFT 0x00000000
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT_MASK 0x0000001eL
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT__SHIFT 0x00000001
+#define CRTC_COUNT_RESET__CRTC_RESET_FRAME_COUNT_MASK 0x00000001L
+#define CRTC_COUNT_RESET__CRTC_RESET_FRAME_COUNT__SHIFT 0x00000000
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE_MASK 0x80000000L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE__SHIFT 0x0000001f
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_TEST_CLK_SEL_MASK 0x1f000000L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_TEST_CLK_SEL__SHIFT 0x00000018
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_G_DCP_GATE_DISABLE_MASK 0x00000100L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_G_DCP_GATE_DISABLE__SHIFT 0x00000008
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_G_SCL_GATE_DISABLE_MASK 0x00001000L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_G_SCL_GATE_DISABLE__SHIFT 0x0000000c
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_R_DCFE_GATE_DISABLE_MASK 0x00000010L
+#define CRTC_DCFE_CLOCK_CONTROL__CRTC_DISPCLK_R_DCFE_GATE_DISABLE__SHIFT 0x00000004
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x00000010
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_INSTANTLY_MASK 0x00000100L
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_INSTANTLY__SHIFT 0x00000008
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_PENDING_MASK 0x00000001L
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_PENDING__SHIFT 0x00000000
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CLK_DIV_MASK 0x0000001eL
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CLK_DIV__SHIFT 0x00000001
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CRTC_EN_MASK 0x00000001L
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CRTC_EN__SHIFT 0x00000000
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_HORZ_COUNT_MASK 0x1fff0000L
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_HORZ_COUNT__SHIFT 0x00000010
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_VERT_COUNT_MASK 0x00001fffL
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_VERT_COUNT__SHIFT 0x00000000
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_GRANULARITY_MASK 0x00010000L
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_GRANULARITY__SHIFT 0x00000010
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_INPUT_STATUS_MASK 0x01000000L
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x00000018
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_POLARITY_MASK 0x00000100L
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_POLARITY__SHIFT 0x00000008
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_SOURCE_SELECT_MASK 0x0000001fL
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x00000000
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CHECK__SHIFT 0x00000004
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CLEAR__SHIFT 0x00000018
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_MODE__SHIFT 0x00000000
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x00000010
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x00000008
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_ALL_FIELDS__SHIFT 0x0000001c
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_LINE_NUM_MASK 0x00001fffL
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_LINE_NUM__SHIFT 0x00000000
+#define CRTC_GSL_CONTROL__CRTC_GSL_FORCE_DELAY_MASK 0x001f0000L
+#define CRTC_GSL_CONTROL__CRTC_GSL_FORCE_DELAY__SHIFT 0x00000010
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_CLEAR__SHIFT 0x00000013
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_DELAY_MASK 0x0000ff00L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_DELAY__SHIFT 0x00000008
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_LIMIT_MASK 0x000000ffL
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_LIMIT__SHIFT 0x00000000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASK 0xff000000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x00000017
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MODE__SHIFT 0x00000011
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x00000014
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP__SHIFT 0x00000018
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x00000010
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_END_MASK 0x1fff0000L
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_END__SHIFT 0x00000010
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_START_MASK 0x00001fffL
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_START__SHIFT 0x00000000
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_DIS_MASK 0x00010000L
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_DIS__SHIFT 0x00000010
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_MASK 0x000003ffL
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM__SHIFT 0x00000000
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_END_MASK 0x1fff0000L
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_END__SHIFT 0x00000010
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_START_MASK 0x00001fffL
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_START__SHIFT 0x00000000
+#define CRTC_H_SYNC_A_CNTL__CRTC_COMP_SYNC_A_EN_MASK 0x00010000L
+#define CRTC_H_SYNC_A_CNTL__CRTC_COMP_SYNC_A_EN__SHIFT 0x00000010
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_CUTOFF_MASK 0x00020000L
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_CUTOFF__SHIFT 0x00000011
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_POL_MASK 0x00000001L
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_POL__SHIFT 0x00000000
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_END_MASK 0x1fff0000L
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_END__SHIFT 0x00000010
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_START_MASK 0x00001fffL
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_START__SHIFT 0x00000000
+#define CRTC_H_SYNC_B_CNTL__CRTC_COMP_SYNC_B_EN_MASK 0x00010000L
+#define CRTC_H_SYNC_B_CNTL__CRTC_COMP_SYNC_B_EN__SHIFT 0x00000010
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_CUTOFF_MASK 0x00020000L
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_CUTOFF__SHIFT 0x00000011
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_POL_MASK 0x00000001L
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_POL__SHIFT 0x00000000
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_END_MASK 0x1fff0000L
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_END__SHIFT 0x00000010
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_START_MASK 0x00001fffL
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_START__SHIFT 0x00000000
+#define CRTC_H_TOTAL__CRTC_H_TOTAL_MASK 0x00001fffL
+#define CRTC_H_TOTAL__CRTC_H_TOTAL__SHIFT 0x00000000
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_ENABLE_MASK 0x00000001L
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_ENABLE__SHIFT 0x00000000
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x00000010
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_CURRENT_FIELD__SHIFT 0x00000000
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_NEXT_FIELD__SHIFT 0x00000001
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x00000008
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x00000009
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x00000010
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x00000011
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x0000001e
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x0000001f
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_MSK__SHIFT 0x00000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_TYPE__SHIFT 0x00000001
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_MSK_MASK 0x01000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_MSK__SHIFT 0x00000018
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_TYPE_MASK 0x04000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_TYPE__SHIFT 0x0000001a
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_MSK_MASK 0x02000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_MSK__SHIFT 0x00000019
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_TYPE_MASK 0x08000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_TYPE__SHIFT 0x0000001b
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_MSK__SHIFT 0x0000001c
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_TYPE__SHIFT 0x0000001d
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK 0x00000010L
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK__SHIFT 0x00000004
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_TYPE_MASK 0x00000020L
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_TYPE__SHIFT 0x00000005
+#define CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+#define CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x00000000
+#define CRTC_MASTER_EN__CRTC_MASTER_EN_MASK 0x00000001L
+#define CRTC_MASTER_EN__CRTC_MASTER_EN__SHIFT 0x00000000
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_MASK 0xffffff00L
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_CNTL_CHAR_INSERT__SHIFT 0x00000008
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_OUT_MODE_MASK 0x00000003L
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_OUT_MODE__SHIFT 0x00000000
+#define CRTC_MVP_INBAND_CNTL_INSERT_TIMER__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_TIMER_MASK 0x000000ffL
+#define CRTC_MVP_INBAND_CNTL_INSERT_TIMER__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_TIMER__SHIFT 0x00000000
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR_MASK 0x00100000L
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR__SHIFT 0x00000014
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_OCCURRED_MASK 0x00000010L
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_OCCURRED__SHIFT 0x00000004
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_CLEAR_MASK 0x00010000L
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_CLEAR__SHIFT 0x00000010
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_OCCURRED_MASK 0x00000001L
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_OCCURRED__SHIFT 0x00000000
+#define CRTC_NOM_VERT_POSITION__CRTC_VERT_COUNT_NOM_MASK 0x00001fffL
+#define CRTC_NOM_VERT_POSITION__CRTC_VERT_COUNT_NOM__SHIFT 0x00000000
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE_MASK 0x000003ffL
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE__SHIFT 0x00000000
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN_MASK 0x000ffc00L
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN__SHIFT 0x0000000a
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED_MASK 0x3ff00000L
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED__SHIFT 0x00000014
+#define CRTC_SNAPSHOT_CONTROL__CRTC_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+#define CRTC_SNAPSHOT_CONTROL__CRTC_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x00000000
+#define CRTC_SNAPSHOT_FRAME__CRTC_SNAPSHOT_FRAME_COUNT_MASK 0x00ffffffL
+#define CRTC_SNAPSHOT_FRAME__CRTC_SNAPSHOT_FRAME_COUNT__SHIFT 0x00000000
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_HORZ_COUNT_MASK 0x1fff0000L
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_HORZ_COUNT__SHIFT 0x00000010
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_VERT_COUNT_MASK 0x00001fffL
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_VERT_COUNT__SHIFT 0x00000000
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_CLEAR__SHIFT 0x00000001
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x00000002
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_OCCURRED__SHIFT 0x00000000
+#define CRTC_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION_MASK 0x000f0000L
+#define CRTC_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION__SHIFT 0x00000010
+#define CRTC_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY_MASK 0x00000100L
+#define CRTC_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY__SHIFT 0x00000008
+#define CRTC_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY_MASK 0x00000001L
+#define CRTC_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY__SHIFT 0x00000000
+#define CRTC_STATUS__CRTC_H_ACTIVE_DISP_MASK 0x00020000L
+#define CRTC_STATUS__CRTC_H_ACTIVE_DISP__SHIFT 0x00000011
+#define CRTC_STATUS__CRTC_H_BLANK_MASK 0x00010000L
+#define CRTC_STATUS__CRTC_H_BLANK__SHIFT 0x00000010
+#define CRTC_STATUS__CRTC_H_SYNC_A_MASK 0x00040000L
+#define CRTC_STATUS__CRTC_H_SYNC_A__SHIFT 0x00000012
+#define CRTC_STATUS__CRTC_V_ACTIVE_DISP_MASK 0x00000002L
+#define CRTC_STATUS__CRTC_V_ACTIVE_DISP__SHIFT 0x00000001
+#define CRTC_STATUS__CRTC_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define CRTC_STATUS__CRTC_V_BLANK_3D_STRUCTURE__SHIFT 0x00000005
+#define CRTC_STATUS__CRTC_V_BLANK_MASK 0x00000001L
+#define CRTC_STATUS__CRTC_V_BLANK__SHIFT 0x00000000
+#define CRTC_STATUS__CRTC_V_START_LINE_MASK 0x00000010L
+#define CRTC_STATUS__CRTC_V_START_LINE__SHIFT 0x00000004
+#define CRTC_STATUS__CRTC_V_SYNC_A_MASK 0x00000004L
+#define CRTC_STATUS__CRTC_V_SYNC_A__SHIFT 0x00000002
+#define CRTC_STATUS__CRTC_V_UPDATE_MASK 0x00000008L
+#define CRTC_STATUS__CRTC_V_UPDATE__SHIFT 0x00000003
+#define CRTC_STATUS_FRAME_COUNT__CRTC_FRAME_COUNT_MASK 0x00ffffffL
+#define CRTC_STATUS_FRAME_COUNT__CRTC_FRAME_COUNT__SHIFT 0x00000000
+#define CRTC_STATUS_HV_COUNT__CRTC_HV_COUNT_MASK 0x1fffffffL
+#define CRTC_STATUS_HV_COUNT__CRTC_HV_COUNT__SHIFT 0x00000000
+#define CRTC_STATUS_POSITION__CRTC_HORZ_COUNT_MASK 0x1fff0000L
+#define CRTC_STATUS_POSITION__CRTC_HORZ_COUNT__SHIFT 0x00000010
+#define CRTC_STATUS_POSITION__CRTC_VERT_COUNT_MASK 0x00001fffL
+#define CRTC_STATUS_POSITION__CRTC_VERT_COUNT__SHIFT 0x00000000
+#define CRTC_STATUS_VF_COUNT__CRTC_VF_COUNT_MASK 0x1fffffffL
+#define CRTC_STATUS_VF_COUNT__CRTC_VF_COUNT__SHIFT 0x00000000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EN_MASK 0x01000000L
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EN__SHIFT 0x00000018
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00001fffL
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x00000000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0x0000000f
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_SELECT_POLARITY_MASK 0x00010000L
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_SELECT_POLARITY__SHIFT 0x00000010
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_STEREO_FORCE_NEXT_EYE__SHIFT 0x00000000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define CRTC_STEREO_STATUS__CRTC_STEREO_CURRENT_EYE__SHIFT 0x00000000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define CRTC_STEREO_STATUS__CRTC_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x00000018
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_OUTPUT__SHIFT 0x00000008
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_SELECT__SHIFT 0x00000010
+#define CRTC_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define CRTC_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_DATA_MASK 0x0000ffffL
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_DATA__SHIFT 0x00000000
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_MASK_MASK 0x003f0000L
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_MASK__SHIFT 0x00000010
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_COLOR_FORMAT_MASK 0xff000000L
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_COLOR_FORMAT__SHIFT 0x00000018
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_DYNAMIC_RANGE_MASK 0x00010000L
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_DYNAMIC_RANGE__SHIFT 0x00000010
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_EN_MASK 0x00000001L
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_EN__SHIFT 0x00000000
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_MODE_MASK 0x00000700L
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_MODE__SHIFT 0x00000008
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_HRES_MASK 0x0000f000L
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_HRES__SHIFT 0x0000000c
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC0_MASK 0x0000000fL
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC0__SHIFT 0x00000000
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC1_MASK 0x000000f0L
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC1__SHIFT 0x00000004
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_RAMP0_OFFSET_MASK 0xffff0000L
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_RAMP0_OFFSET__SHIFT 0x00000010
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_VRES_MASK 0x00000f00L
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_VRES__SHIFT 0x00000008
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_CLEAR_MASK 0x80000000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_CLEAR__SHIFT 0x0000001f
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_DELAY_MASK 0x1f000000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_DELAY__SHIFT 0x00000018
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x00000010
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FREQUENCY_SELECT__SHIFT 0x00000014
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_INPUT_STATUS_MASK 0x00000200L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_INPUT_STATUS__SHIFT 0x00000009
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_OCCURRED_MASK 0x00000800L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_OCCURRED__SHIFT 0x0000000b
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_SELECT_MASK 0x000000e0L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_SELECT__SHIFT 0x00000005
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_STATUS_MASK 0x00000400L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_STATUS__SHIFT 0x0000000a
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000100L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RESYNC_BYPASS_EN__SHIFT 0x00000008
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00003000L
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x0000000c
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_SOURCE_SELECT_MASK 0x0000001fL
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_SOURCE_SELECT__SHIFT 0x00000000
+#define CRTC_TRIGA_MANUAL_TRIG__CRTC_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+#define CRTC_TRIGA_MANUAL_TRIG__CRTC_TRIGA_MANUAL_TRIG__SHIFT 0x00000000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR_MASK 0x80000000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR__SHIFT 0x0000001f
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY_MASK 0x1f000000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY__SHIFT 0x00000018
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x00000010
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT__SHIFT 0x00000014
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_INPUT_STATUS_MASK 0x00000200L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_INPUT_STATUS__SHIFT 0x00000009
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_OCCURRED_MASK 0x00000800L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_OCCURRED__SHIFT 0x0000000b
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT_MASK 0x000000e0L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT__SHIFT 0x00000005
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_STATUS_MASK 0x00000400L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_STATUS__SHIFT 0x0000000a
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000100L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RESYNC_BYPASS_EN__SHIFT 0x00000008
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00003000L
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x0000000c
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT_MASK 0x0000001fL
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT__SHIFT 0x00000000
+#define CRTC_TRIGB_MANUAL_TRIG__CRTC_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+#define CRTC_TRIGB_MANUAL_TRIG__CRTC_TRIGB_MANUAL_TRIG__SHIFT 0x00000000
+#define CRTC_UPDATE_LOCK__CRTC_UPDATE_LOCK_MASK 0x00000001L
+#define CRTC_UPDATE_LOCK__CRTC_UPDATE_LOCK__SHIFT 0x00000000
+#define CRTC_VBI_END__CRTC_VBI_H_END_MASK 0x1fff0000L
+#define CRTC_VBI_END__CRTC_VBI_H_END__SHIFT 0x00000010
+#define CRTC_VBI_END__CRTC_VBI_V_END_MASK 0x00001fffL
+#define CRTC_VBI_END__CRTC_VBI_V_END__SHIFT 0x00000000
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_END_MASK 0x1fff0000L
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_END__SHIFT 0x00000010
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK 0x00001fffL
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_START__SHIFT 0x00000000
+#define CRTC_VERT_SYNC_CONTROL__CRTC_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+#define CRTC_VERT_SYNC_CONTROL__CRTC_AUTO_FORCE_VSYNC_MODE__SHIFT 0x00000010
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x00000008
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x00000000
+#define CRTC_VGA_PARAMETER_CAPTURE_MODE__CRTC_VGA_PARAMETER_CAPTURE_MODE_MASK 0x00000001L
+#define CRTC_VGA_PARAMETER_CAPTURE_MODE__CRTC_VGA_PARAMETER_CAPTURE_MODE__SHIFT 0x00000000
+#define CRTC_V_SYNC_A_CNTL__CRTC_V_SYNC_A_POL_MASK 0x00000001L
+#define CRTC_V_SYNC_A_CNTL__CRTC_V_SYNC_A_POL__SHIFT 0x00000000
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_END_MASK 0x1fff0000L
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_END__SHIFT 0x00000010
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_START_MASK 0x00001fffL
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_START__SHIFT 0x00000000
+#define CRTC_V_SYNC_B_CNTL__CRTC_V_SYNC_B_POL_MASK 0x00000001L
+#define CRTC_V_SYNC_B_CNTL__CRTC_V_SYNC_B_POL__SHIFT 0x00000000
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_END_MASK 0x1fff0000L
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_END__SHIFT 0x00000010
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_START_MASK 0x00001fffL
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_START__SHIFT 0x00000000
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_INT_CLEAR__SHIFT 0x00000004
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_MASK 0x00000001L
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM__SHIFT 0x00000000
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT_MASK 0x00000100L
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT__SHIFT 0x00000008
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC_MASK 0x00001000L
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC__SHIFT 0x0000000c
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_MASK 0xffff0000L
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK__SHIFT 0x00000010
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL_MASK 0x00000010L
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL__SHIFT 0x00000004
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL__SHIFT 0x00000000
+#define CRTC_V_TOTAL__CRTC_V_TOTAL_MASK 0x00001fffL
+#define CRTC_V_TOTAL__CRTC_V_TOTAL__SHIFT 0x00000000
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x00000100L
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x00000008
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000010L
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x00000004
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x00000001L
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x00001000L
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0x0000000c
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x00000000
+#define CRTC_V_TOTAL_MAX__CRTC_ALLOW_VBLANK_EXTENSION_FOR_MC_TRAINING_MASK 0x00010000L
+#define CRTC_V_TOTAL_MAX__CRTC_ALLOW_VBLANK_EXTENSION_FOR_MC_TRAINING__SHIFT 0x00000010
+#define CRTC_V_TOTAL_MAX__CRTC_V_TOTAL_MAX_MASK 0x00001fffL
+#define CRTC_V_TOTAL_MAX__CRTC_V_TOTAL_MAX__SHIFT 0x00000000
+#define CRTC_V_TOTAL_MIN__CRTC_V_TOTAL_MIN_MASK 0x00001fffL
+#define CRTC_V_TOTAL_MIN__CRTC_V_TOTAL_MIN__SHIFT 0x00000000
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK 0x00000100L
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR__SHIFT 0x00000008
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_OCCURRED_MASK 0x00000001L
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_OCCURRED__SHIFT 0x00000000
+#define CUR_COLOR1__CUR_COLOR1_BLUE_MASK 0x000000ffL
+#define CUR_COLOR1__CUR_COLOR1_BLUE__SHIFT 0x00000000
+#define CUR_COLOR1__CUR_COLOR1_GREEN_MASK 0x0000ff00L
+#define CUR_COLOR1__CUR_COLOR1_GREEN__SHIFT 0x00000008
+#define CUR_COLOR1__CUR_COLOR1_RED_MASK 0x00ff0000L
+#define CUR_COLOR1__CUR_COLOR1_RED__SHIFT 0x00000010
+#define CUR_COLOR2__CUR_COLOR2_BLUE_MASK 0x000000ffL
+#define CUR_COLOR2__CUR_COLOR2_BLUE__SHIFT 0x00000000
+#define CUR_COLOR2__CUR_COLOR2_GREEN_MASK 0x0000ff00L
+#define CUR_COLOR2__CUR_COLOR2_GREEN__SHIFT 0x00000008
+#define CUR_COLOR2__CUR_COLOR2_RED_MASK 0x00ff0000L
+#define CUR_COLOR2__CUR_COLOR2_RED__SHIFT 0x00000010
+#define CUR_CONTROL__CUR_INV_TRANS_CLAMP_MASK 0x00000010L
+#define CUR_CONTROL__CUR_INV_TRANS_CLAMP__SHIFT 0x00000004
+#define CUR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00010000L
+#define CUR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x00000010
+#define CUR_CONTROL__CURSOR_EN_MASK 0x00000001L
+#define CUR_CONTROL__CURSOR_EN__SHIFT 0x00000000
+#define CUR_CONTROL__CURSOR_FORCE_MC_ON_MASK 0x00100000L
+#define CUR_CONTROL__CURSOR_FORCE_MC_ON__SHIFT 0x00000014
+#define CUR_CONTROL__CURSOR_MODE_MASK 0x00000300L
+#define CUR_CONTROL__CURSOR_MODE__SHIFT 0x00000008
+#define CUR_CONTROL__CURSOR_URGENT_CONTROL_MASK 0x07000000L
+#define CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT 0x00000018
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x003f0000L
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x00000010
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x0000003fL
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x00000000
+#define CUR_POSITION__CURSOR_X_POSITION_MASK 0x3fff0000L
+#define CUR_POSITION__CURSOR_X_POSITION__SHIFT 0x00000010
+#define CUR_POSITION__CURSOR_Y_POSITION_MASK 0x00003fffL
+#define CUR_POSITION__CURSOR_Y_POSITION__SHIFT 0x00000000
+#define CUR_REQUEST_FILTER_CNTL__CUR_REQUEST_FILTER_DIS_MASK 0x00000001L
+#define CUR_REQUEST_FILTER_CNTL__CUR_REQUEST_FILTER_DIS__SHIFT 0x00000000
+#define CUR_SIZE__CURSOR_HEIGHT_MASK 0x0000003fL
+#define CUR_SIZE__CURSOR_HEIGHT__SHIFT 0x00000000
+#define CUR_SIZE__CURSOR_WIDTH_MASK 0x003f0000L
+#define CUR_SIZE__CURSOR_WIDTH__SHIFT 0x00000010
+#define CUR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xffffffffL
+#define CUR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x00000000
+#define CUR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define CUR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define CUR_UPDATE__CURSOR_DISABLE_MULTIPLE_UPDATE_MASK 0x01000000L
+#define CUR_UPDATE__CURSOR_DISABLE_MULTIPLE_UPDATE__SHIFT 0x00000018
+#define CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK 0x00010000L
+#define CUR_UPDATE__CURSOR_UPDATE_LOCK__SHIFT 0x00000010
+#define CUR_UPDATE__CURSOR_UPDATE_PENDING_MASK 0x00000001L
+#define CUR_UPDATE__CURSOR_UPDATE_PENDING__SHIFT 0x00000000
+#define CUR_UPDATE__CURSOR_UPDATE_TAKEN_MASK 0x00000002L
+#define CUR_UPDATE__CURSOR_UPDATE_TAKEN__SHIFT 0x00000001
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK 0x00000001L
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D1VGA_CONTROL__D1VGA_ROTATE_MASK 0x03000000L
+#define D1VGA_CONTROL__D1VGA_ROTATE__SHIFT 0x00000018
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK 0x00000100L
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK 0x00000001L
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D2VGA_CONTROL__D2VGA_ROTATE_MASK 0x03000000L
+#define D2VGA_CONTROL__D2VGA_ROTATE__SHIFT 0x00000018
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK 0x00000100L
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE_MASK 0x00000001L
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D3VGA_CONTROL__D3VGA_ROTATE_MASK 0x03000000L
+#define D3VGA_CONTROL__D3VGA_ROTATE__SHIFT 0x00000018
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT_MASK 0x00000100L
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE_MASK 0x00000001L
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D4VGA_CONTROL__D4VGA_ROTATE_MASK 0x03000000L
+#define D4VGA_CONTROL__D4VGA_ROTATE__SHIFT 0x00000018
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT_MASK 0x00000100L
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE_MASK 0x00000001L
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D5VGA_CONTROL__D5VGA_ROTATE_MASK 0x03000000L
+#define D5VGA_CONTROL__D5VGA_ROTATE__SHIFT 0x00000018
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT_MASK 0x00000100L
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT__SHIFT 0x00000008
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE_MASK 0x00000001L
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE__SHIFT 0x00000000
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN_MASK 0x00010000L
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN__SHIFT 0x00000010
+#define D6VGA_CONTROL__D6VGA_ROTATE_MASK 0x03000000L
+#define D6VGA_CONTROL__D6VGA_ROTATE__SHIFT 0x00000018
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT_MASK 0x00000200L
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT__SHIFT 0x00000009
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT_MASK 0x00000100L
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT__SHIFT 0x00000008
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_POWERUP_COUNTER_MASK 0x000000ffL
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_POWERUP_COUNTER__SHIFT 0x00000000
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_TESTMODE_MASK 0x00000100L
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_TESTMODE__SHIFT 0x00000008
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_IN_DELAY_MASK 0x000000ffL
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_IN_DELAY__SHIFT 0x00000000
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_OUT_DELAY_MASK 0x0000ff00L
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_OUT_DELAY__SHIFT 0x00000008
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_CHECK_MASK_MASK 0x00070000L
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_CHECK_MASK__SHIFT 0x00000010
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_FRAME_TIME_COUNTER_MASK 0x0000ff00L
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_FRAME_TIME_COUNTER__SHIFT 0x00000008
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_MODE_MASK 0x00000003L
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_MODE__SHIFT 0x00000000
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_ACK_MASK 0x00000001L
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_ACK__SHIFT 0x00000000
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_INT_ENABLE_MASK 0x00010000L
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_INT_ENABLE__SHIFT 0x00000010
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_BLUE_SENSE_MASK 0x03000000L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_BLUE_SENSE__SHIFT 0x00000018
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_CONNECT_MASK 0x00000010L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_CONNECT__SHIFT 0x00000004
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_GREEN_SENSE_MASK 0x00030000L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_GREEN_SENSE__SHIFT 0x00000010
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_RED_SENSE_MASK 0x00000300L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_RED_SENSE__SHIFT 0x00000008
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_STATUS_MASK 0x00000001L
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_STATUS__SHIFT 0x00000000
+#define DAC_CLK_ENABLE__DACA_CLK_ENABLE_MASK 0x00000001L
+#define DAC_CLK_ENABLE__DACA_CLK_ENABLE__SHIFT 0x00000000
+#define DAC_CLK_ENABLE__DACB_CLK_ENABLE_MASK 0x00000010L
+#define DAC_CLK_ENABLE__DACB_CLK_ENABLE__SHIFT 0x00000004
+#define DAC_COMPARATOR_ENABLE__DAC_B_ASYNC_ENABLE_MASK 0x00040000L
+#define DAC_COMPARATOR_ENABLE__DAC_B_ASYNC_ENABLE__SHIFT 0x00000012
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_DDET_REF_EN_MASK 0x00000001L
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_DDET_REF_EN__SHIFT 0x00000000
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_SDET_REF_EN_MASK 0x00000100L
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_SDET_REF_EN__SHIFT 0x00000008
+#define DAC_COMPARATOR_ENABLE__DAC_G_ASYNC_ENABLE_MASK 0x00020000L
+#define DAC_COMPARATOR_ENABLE__DAC_G_ASYNC_ENABLE__SHIFT 0x00000011
+#define DAC_COMPARATOR_ENABLE__DAC_R_ASYNC_ENABLE_MASK 0x00010000L
+#define DAC_COMPARATOR_ENABLE__DAC_R_ASYNC_ENABLE__SHIFT 0x00000010
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_BLUE_MASK 0x00000002L
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_BLUE__SHIFT 0x00000001
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_GREEN_MASK 0x00000004L
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_GREEN__SHIFT 0x00000002
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_MASK 0x00000001L
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_RED_MASK 0x00000008L
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_RED__SHIFT 0x00000003
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT__SHIFT 0x00000000
+#define DAC_CONTROL__DAC_DFORCE_EN_MASK 0x00000001L
+#define DAC_CONTROL__DAC_DFORCE_EN__SHIFT 0x00000000
+#define DAC_CONTROL__DAC_TV_ENABLE_MASK 0x00000100L
+#define DAC_CONTROL__DAC_TV_ENABLE__SHIFT 0x00000008
+#define DAC_CONTROL__DAC_ZSCALE_SHIFT_MASK 0x00010000L
+#define DAC_CONTROL__DAC_ZSCALE_SHIFT__SHIFT 0x00000010
+#define DAC_CRC_CONTROL__DAC_CRC_FIELD_MASK 0x00000001L
+#define DAC_CRC_CONTROL__DAC_CRC_FIELD__SHIFT 0x00000000
+#define DAC_CRC_CONTROL__DAC_CRC_ONLY_BLANKb_MASK 0x00000100L
+#define DAC_CRC_CONTROL__DAC_CRC_ONLY_BLANKb__SHIFT 0x00000008
+#define DAC_CRC_EN__DAC_CRC_CONT_EN_MASK 0x00010000L
+#define DAC_CRC_EN__DAC_CRC_CONT_EN__SHIFT 0x00000010
+#define DAC_CRC_EN__DAC_CRC_EN_MASK 0x00000001L
+#define DAC_CRC_EN__DAC_CRC_EN__SHIFT 0x00000000
+#define DAC_CRC_SIG_CONTROL__DAC_CRC_SIG_CONTROL_MASK 0x0000003fL
+#define DAC_CRC_SIG_CONTROL__DAC_CRC_SIG_CONTROL__SHIFT 0x00000000
+#define DAC_CRC_SIG_CONTROL_MASK__DAC_CRC_SIG_CONTROL_MASK_MASK 0x0000003fL
+#define DAC_CRC_SIG_CONTROL_MASK__DAC_CRC_SIG_CONTROL_MASK__SHIFT 0x00000000
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_BLUE_MASK 0x000003ffL
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_BLUE__SHIFT 0x00000000
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_GREEN_MASK 0x000ffc00L
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_GREEN__SHIFT 0x0000000a
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_RED_MASK 0x3ff00000L
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_RED__SHIFT 0x00000014
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_BLUE_MASK_MASK 0x000003ffL
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_BLUE_MASK__SHIFT 0x00000000
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_GREEN_MASK_MASK 0x000ffc00L
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_GREEN_MASK__SHIFT 0x0000000a
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_RED_MASK_MASK 0x3ff00000L
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_RED_MASK__SHIFT 0x00000014
+#define DAC_DATA__DAC_DATA_MASK 0x0000003fL
+#define DAC_DATA__DAC_DATA__SHIFT 0x00000000
+#define DAC_DFT_CONFIG__DAC_DFT_CONFIG_MASK 0xffffffffL
+#define DAC_DFT_CONFIG__DAC_DFT_CONFIG__SHIFT 0x00000000
+#define DAC_ENABLE__DAC_ENABLE_MASK 0x00000001L
+#define DAC_ENABLE__DAC_ENABLE__SHIFT 0x00000000
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ENABLE_MASK 0x00000002L
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ENABLE__SHIFT 0x00000001
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_ACK_MASK 0x00000020L
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_ACK__SHIFT 0x00000005
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_MASK 0x00000010L
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR__SHIFT 0x00000004
+#define DAC_ENABLE__DAC_RESYNC_FIFO_POINTER_SKEW_MASK 0x0000000cL
+#define DAC_ENABLE__DAC_RESYNC_FIFO_POINTER_SKEW__SHIFT 0x00000002
+#define DAC_ENABLE__DAC_RESYNC_FIFO_TVOUT_SIM_MASK 0x00000100L
+#define DAC_ENABLE__DAC_RESYNC_FIFO_TVOUT_SIM__SHIFT 0x00000008
+#define DAC_FIFO_STATUS__DAC_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000fc00L
+#define DAC_FIFO_STATUS__DAC_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x0000000a
+#define DAC_FIFO_STATUS__DAC_FIFO_CALIBRATED_MASK 0x20000000L
+#define DAC_FIFO_STATUS__DAC_FIFO_CALIBRATED__SHIFT 0x0000001d
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x0000001e
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x0000001f
+#define DAC_FIFO_STATUS__DAC_FIFO_MAXIMUM_LEVEL_MASK 0x000f0000L
+#define DAC_FIFO_STATUS__DAC_FIFO_MAXIMUM_LEVEL__SHIFT 0x00000010
+#define DAC_FIFO_STATUS__DAC_FIFO_MINIMUM_LEVEL_MASK 0x03c00000L
+#define DAC_FIFO_STATUS__DAC_FIFO_MINIMUM_LEVEL__SHIFT 0x00000016
+#define DAC_FIFO_STATUS__DAC_FIFO_OVERWRITE_LEVEL_MASK 0x000000fcL
+#define DAC_FIFO_STATUS__DAC_FIFO_OVERWRITE_LEVEL__SHIFT 0x00000002
+#define DAC_FIFO_STATUS__DAC_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DAC_FIFO_STATUS__DAC_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x00000001
+#define DAC_FORCE_DATA__DAC_FORCE_DATA_MASK 0x000003ffL
+#define DAC_FORCE_DATA__DAC_FORCE_DATA__SHIFT 0x00000000
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_EN_MASK 0x00000001L
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_EN__SHIFT 0x00000000
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_ON_BLANKb_ONLY_MASK 0x01000000L
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_ON_BLANKb_ONLY__SHIFT 0x00000018
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_SEL_MASK 0x00000700L
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_SEL__SHIFT 0x00000008
+#define DAC_MACRO_CNTL_RESERVED0__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffffL
+#define DAC_MACRO_CNTL_RESERVED0__DAC_MACRO_CNTL_RESERVED__SHIFT 0x00000000
+#define DAC_MACRO_CNTL_RESERVED1__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffffL
+#define DAC_MACRO_CNTL_RESERVED1__DAC_MACRO_CNTL_RESERVED__SHIFT 0x00000000
+#define DAC_MACRO_CNTL_RESERVED2__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffffL
+#define DAC_MACRO_CNTL_RESERVED2__DAC_MACRO_CNTL_RESERVED__SHIFT 0x00000000
+#define DAC_MACRO_CNTL_RESERVED3__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffffL
+#define DAC_MACRO_CNTL_RESERVED3__DAC_MACRO_CNTL_RESERVED__SHIFT 0x00000000
+#define DAC_MASK__DAC_MASK_MASK 0x000000ffL
+#define DAC_MASK__DAC_MASK__SHIFT 0x00000000
+#define DAC_POWERDOWN__DAC_POWERDOWN_BLUE_MASK 0x00000100L
+#define DAC_POWERDOWN__DAC_POWERDOWN_BLUE__SHIFT 0x00000008
+#define DAC_POWERDOWN__DAC_POWERDOWN_GREEN_MASK 0x00010000L
+#define DAC_POWERDOWN__DAC_POWERDOWN_GREEN__SHIFT 0x00000010
+#define DAC_POWERDOWN__DAC_POWERDOWN_MASK 0x00000001L
+#define DAC_POWERDOWN__DAC_POWERDOWN_RED_MASK 0x01000000L
+#define DAC_POWERDOWN__DAC_POWERDOWN_RED__SHIFT 0x00000018
+#define DAC_POWERDOWN__DAC_POWERDOWN__SHIFT 0x00000000
+#define DAC_PWR_CNTL__DAC_BG_MODE_MASK 0x00000003L
+#define DAC_PWR_CNTL__DAC_BG_MODE__SHIFT 0x00000000
+#define DAC_PWR_CNTL__DAC_PWRCNTL_MASK 0x00030000L
+#define DAC_PWR_CNTL__DAC_PWRCNTL__SHIFT 0x00000010
+#define DAC_R_INDEX__DAC_R_INDEX_MASK 0x000000ffL
+#define DAC_R_INDEX__DAC_R_INDEX__SHIFT 0x00000000
+#define DAC_SOURCE_SELECT__DAC_SOURCE_SELECT_MASK 0x00000007L
+#define DAC_SOURCE_SELECT__DAC_SOURCE_SELECT__SHIFT 0x00000000
+#define DAC_SOURCE_SELECT__DAC_TV_SELECT_MASK 0x00000008L
+#define DAC_SOURCE_SELECT__DAC_TV_SELECT__SHIFT 0x00000003
+#define DAC_STEREOSYNC_SELECT__DAC_STEREOSYNC_SELECT_MASK 0x00000007L
+#define DAC_STEREOSYNC_SELECT__DAC_STEREOSYNC_SELECT__SHIFT 0x00000000
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_HSYNCA_TRISTATE_MASK 0x00000001L
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_HSYNCA_TRISTATE__SHIFT 0x00000000
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_SYNCA_TRISTATE_MASK 0x00010000L
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_SYNCA_TRISTATE__SHIFT 0x00000010
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_VSYNCA_TRISTATE_MASK 0x00000100L
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_VSYNCA_TRISTATE__SHIFT 0x00000008
+#define DAC_W_INDEX__DAC_W_INDEX_MASK 0x000000ffL
+#define DAC_W_INDEX__DAC_W_INDEX__SHIFT 0x00000000
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x00000008
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x00000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07ff0000L
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x00000010
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007fffL
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x00000000
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003ffL
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x00000000
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03ff0000L
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x00000010
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x0000001e
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x0000001c
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x0000001d
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003ffL
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x00000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03ff0000L
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x00000010
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_CNTL__ABM1_BLANK_MODE_SUPPORT_ENABLE_MASK 0x80000000L
+#define DC_ABM1_CNTL__ABM1_BLANK_MODE_SUPPORT_ENABLE__SHIFT 0x0000001f
+#define DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define DC_ABM1_CNTL__ABM1_EN__SHIFT 0x00000000
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT_MASK 0x00000700L
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT__SHIFT 0x00000008
+#define DC_ABM1_DEBUG_MISC__ABM1_BL_FORCE_INTERRUPT_MASK 0x00010000L
+#define DC_ABM1_DEBUG_MISC__ABM1_BL_FORCE_INTERRUPT__SHIFT 0x00000010
+#define DC_ABM1_DEBUG_MISC__ABM1_HG_FORCE_INTERRUPT_MASK 0x00000001L
+#define DC_ABM1_DEBUG_MISC__ABM1_HG_FORCE_INTERRUPT__SHIFT 0x00000000
+#define DC_ABM1_DEBUG_MISC__ABM1_LS_FORCE_INTERRUPT_MASK 0x00000100L
+#define DC_ABM1_DEBUG_MISC__ABM1_LS_FORCE_INTERRUPT__SHIFT 0x00000008
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x00000000
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x00000000
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x00000000
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x00000000
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xffffffffL
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x00000000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x00000002
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x0000001f
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0x0000000a
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x00000000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x00000010
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x00000008
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x00000001
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x00000018
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x00000009
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x00000017
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x00000018
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x0000001c
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x0000001e
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x00000010
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0x0000000c
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x0000001d
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x00000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x00000008
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x00000014
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x00000000
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xffffffffL
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x00000000
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00ff0000L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x00000010
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x00000001
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x00000000
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000ff00L
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x00000008
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000fL
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x00000000
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000f00L
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x00000008
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000f0000L
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x00000010
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03ff0000L
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x00000010
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003ffL
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x00000000
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00ffffffL
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x00000000
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03ff0000L
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x00000010
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003ffL
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x00000000
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03ff0000L
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x00000010
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003ffL
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x00000000
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00ffffffL
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x00000000
+#define DC_ABM1_LS_OVR_SCAN_BIN__ABM1_LS_OVR_SCAN_BIN_MASK 0x00ffffffL
+#define DC_ABM1_LS_OVR_SCAN_BIN__ABM1_LS_OVR_SCAN_BIN__SHIFT 0x00000000
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00ffffffL
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x00000000
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x0000001f
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00ff0000L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x00000010
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x00000001
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x00000000
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000ff00L
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x00000008
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xffffffffL
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x00000000
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_B_PIXEL_VALUE_MASK 0x3ff00000L
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_B_PIXEL_VALUE__SHIFT 0x00000014
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_G_PIXEL_VALUE_MASK 0x000ffc00L
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_G_PIXEL_VALUE__SHIFT 0x0000000a
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_R_PIXEL_VALUE_MASK 0x000003ffL
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_R_PIXEL_VALUE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xffffffffL
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xffffffffL
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xffffffffL
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xffffffffL
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x00000007L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x00000000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x00000010L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x00000004
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xffffffffL
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x00000000
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x00000004
+#define DCCG_GATE_DISABLE_CNTL__DACBCLK_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL__DACBCLK_GATE_DISABLE__SHIFT 0x00000005
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x00000000
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_RAMP_DIV_ID_MASK 0x07000000L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_RAMP_DIV_ID__SHIFT 0x00000018
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x00000001
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_RAMP_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_RAMP_DISABLE__SHIFT 0x00000014
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x00000006
+#define DCCG_GATE_DISABLE_CNTL__PCLK_TV_GATE_DISABLE_MASK 0x00010000L
+#define DCCG_GATE_DISABLE_CNTL__PCLK_TV_GATE_DISABLE__SHIFT 0x00000010
+#define DCCG_GATE_DISABLE_CNTL__SCLK_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL__SCLK_GATE_DISABLE__SHIFT 0x00000002
+#define DCCG_GATE_DISABLE_CNTL__SCLK_RAMP_DIV_ID_MASK 0x70000000L
+#define DCCG_GATE_DISABLE_CNTL__SCLK_RAMP_DIV_ID__SHIFT 0x0000001c
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKA_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKA_GATE_DISABLE__SHIFT 0x00000008
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKB_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKB_GATE_DISABLE__SHIFT 0x00000009
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKC_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKC_GATE_DISABLE__SHIFT 0x0000000a
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKD_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKD_GATE_DISABLE__SHIFT 0x0000000b
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKE_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKE_GATE_DISABLE__SHIFT 0x0000000c
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKF_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL__SYMCLKF_GATE_DISABLE__SHIFT 0x0000000d
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x00000001L
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x00000000
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xffffffffL
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x00000000
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xffffffffL
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x00000000
+#define DCCG_PERFMON_CNTL__DCCG_PERF_CRTC_SEL_MASK 0x00000700L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_CRTC_SEL__SHIFT 0x00000008
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x00000000
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x00000001
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x00000007
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x00000006
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x00000004
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK1_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK1_ENABLE__SHIFT 0x00000002
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK2_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK2_ENABLE__SHIFT 0x00000003
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x00000005
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET_MASK 0x00000001L
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET__SHIFT 0x00000000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV_MASK 0x00010000L
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV__SHIFT 0x00000010
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL_MASK 0x000000ffL
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL__SHIFT 0x00000000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV_MASK 0x01000000L
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV__SHIFT 0x00000018
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL_MASK 0x0000ff00L
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL__SHIFT 0x00000008
+#define DCCG_TEST_DEBUG_DATA__DCCG_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCCG_TEST_DEBUG_DATA__DCCG_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCCG_TEST_DEBUG_INDEX__DCCG_DBG_SEL_MASK 0x00001000L
+#define DCCG_TEST_DEBUG_INDEX__DCCG_DBG_SEL__SHIFT 0x0000000c
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DCCG_VPCLK_CNTL__AZ_LIGHT_SLEEP_DIS_MASK 0x00000004L
+#define DCCG_VPCLK_CNTL__AZ_LIGHT_SLEEP_DIS__SHIFT 0x00000002
+#define DCCG_VPCLK_CNTL__AZ_MEM_SHUTDOWN_DIS_MASK 0x04000000L
+#define DCCG_VPCLK_CNTL__AZ_MEM_SHUTDOWN_DIS__SHIFT 0x0000001a
+#define DCCG_VPCLK_CNTL__DCCG_VPCLK_POL_MASK 0x00000001L
+#define DCCG_VPCLK_CNTL__DCCG_VPCLK_POL__SHIFT 0x00000000
+#define DCCG_VPCLK_CNTL__DMCU_LIGHT_SLEEP_DIS_MASK 0x00000008L
+#define DCCG_VPCLK_CNTL__DMCU_LIGHT_SLEEP_DIS__SHIFT 0x00000003
+#define DCCG_VPCLK_CNTL__DMCU_MEM_SHUTDOWN_DIS_MASK 0x00010000L
+#define DCCG_VPCLK_CNTL__DMCU_MEM_SHUTDOWN_DIS__SHIFT 0x00000010
+#define DCCG_VPCLK_CNTL__DMIF0_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DCCG_VPCLK_CNTL__DMIF0_LIGHT_SLEEP_DIS__SHIFT 0x00000008
+#define DCCG_VPCLK_CNTL__DMIF0_MEM_SHUTDOWN_DIS_MASK 0x00100000L
+#define DCCG_VPCLK_CNTL__DMIF0_MEM_SHUTDOWN_DIS__SHIFT 0x00000014
+#define DCCG_VPCLK_CNTL__DMIF1_LIGHT_SLEEP_DIS_MASK 0x00000200L
+#define DCCG_VPCLK_CNTL__DMIF1_LIGHT_SLEEP_DIS__SHIFT 0x00000009
+#define DCCG_VPCLK_CNTL__DMIF1_MEM_SHUTDOWN_DIS_MASK 0x00200000L
+#define DCCG_VPCLK_CNTL__DMIF1_MEM_SHUTDOWN_DIS__SHIFT 0x00000015
+#define DCCG_VPCLK_CNTL__DMIF2_LIGHT_SLEEP_DIS_MASK 0x00000400L
+#define DCCG_VPCLK_CNTL__DMIF2_LIGHT_SLEEP_DIS__SHIFT 0x0000000a
+#define DCCG_VPCLK_CNTL__DMIF2_MEM_SHUTDOWN_DIS_MASK 0x00400000L
+#define DCCG_VPCLK_CNTL__DMIF2_MEM_SHUTDOWN_DIS__SHIFT 0x00000016
+#define DCCG_VPCLK_CNTL__DMIF3_LIGHT_SLEEP_DIS_MASK 0x00000800L
+#define DCCG_VPCLK_CNTL__DMIF3_LIGHT_SLEEP_DIS__SHIFT 0x0000000b
+#define DCCG_VPCLK_CNTL__DMIF3_MEM_SHUTDOWN_DIS_MASK 0x00800000L
+#define DCCG_VPCLK_CNTL__DMIF3_MEM_SHUTDOWN_DIS__SHIFT 0x00000017
+#define DCCG_VPCLK_CNTL__DMIF4_LIGHT_SLEEP_DIS_MASK 0x00001000L
+#define DCCG_VPCLK_CNTL__DMIF4_LIGHT_SLEEP_DIS__SHIFT 0x0000000c
+#define DCCG_VPCLK_CNTL__DMIF4_MEM_SHUTDOWN_DIS_MASK 0x01000000L
+#define DCCG_VPCLK_CNTL__DMIF4_MEM_SHUTDOWN_DIS__SHIFT 0x00000018
+#define DCCG_VPCLK_CNTL__DMIF5_LIGHT_SLEEP_DIS_MASK 0x00002000L
+#define DCCG_VPCLK_CNTL__DMIF5_LIGHT_SLEEP_DIS__SHIFT 0x0000000d
+#define DCCG_VPCLK_CNTL__DMIF5_MEM_SHUTDOWN_DIS_MASK 0x02000000L
+#define DCCG_VPCLK_CNTL__DMIF5_MEM_SHUTDOWN_DIS__SHIFT 0x00000019
+#define DCCG_VPCLK_CNTL__DMIF_XLR_LIGHT_SLEEP_MODE_FORCE_MASK 0x00000020L
+#define DCCG_VPCLK_CNTL__DMIF_XLR_LIGHT_SLEEP_MODE_FORCE__SHIFT 0x00000005
+#define DCCG_VPCLK_CNTL__DMIF_XLR_MEM_SHUTDOWN_MODE_FORCE_MASK 0x00040000L
+#define DCCG_VPCLK_CNTL__DMIF_XLR_MEM_SHUTDOWN_MODE_FORCE__SHIFT 0x00000012
+#define DCCG_VPCLK_CNTL__FBC_LIGHT_SLEEP_DIS_MASK 0x00004000L
+#define DCCG_VPCLK_CNTL__FBC_LIGHT_SLEEP_DIS__SHIFT 0x0000000e
+#define DCCG_VPCLK_CNTL__FBC_MEM_SHUTDOWN_DIS_MASK 0x00080000L
+#define DCCG_VPCLK_CNTL__FBC_MEM_SHUTDOWN_DIS__SHIFT 0x00000013
+#define DCCG_VPCLK_CNTL__MCIF_LIGHT_SLEEP_MODE_FORCE_MASK 0x00000010L
+#define DCCG_VPCLK_CNTL__MCIF_LIGHT_SLEEP_MODE_FORCE__SHIFT 0x00000004
+#define DCCG_VPCLK_CNTL__MCIF_MEM_SHUTDOWN_MODE_FORCE_MASK 0x00020000L
+#define DCCG_VPCLK_CNTL__MCIF_MEM_SHUTDOWN_MODE_FORCE__SHIFT 0x00000011
+#define DCCG_VPCLK_CNTL__VGA_LIGHT_SLEEP_MODE_FORCE_MASK 0x00000002L
+#define DCCG_VPCLK_CNTL__VGA_LIGHT_SLEEP_MODE_FORCE__SHIFT 0x00000001
+#define DCCG_VPCLK_CNTL__VIP_LIGHT_SLEEP_DIS_MASK 0x00008000L
+#define DCCG_VPCLK_CNTL__VIP_LIGHT_SLEEP_DIS__SHIFT 0x0000000f
+#define DCDEBUG_BUS_CLK1_SEL__DCDEBUG_BUS_CLK1_SEL_MASK 0xffffffffL
+#define DCDEBUG_BUS_CLK1_SEL__DCDEBUG_BUS_CLK1_SEL__SHIFT 0x00000000
+#define DCDEBUG_BUS_CLK2_SEL__DCDEBUG_BUS_CLK2_SEL_MASK 0xffffffffL
+#define DCDEBUG_BUS_CLK2_SEL__DCDEBUG_BUS_CLK2_SEL__SHIFT 0x00000000
+#define DCDEBUG_BUS_CLK3_SEL__DCDEBUG_BUS_CLK3_SEL_MASK 0xffffffffL
+#define DCDEBUG_BUS_CLK3_SEL__DCDEBUG_BUS_CLK3_SEL__SHIFT 0x00000000
+#define DCDEBUG_BUS_CLK4_SEL__DCDEBUG_BUS_CLK4_SEL_MASK 0xffffffffL
+#define DCDEBUG_BUS_CLK4_SEL__DCDEBUG_BUS_CLK4_SEL__SHIFT 0x00000000
+#define DCDEBUG_OUT_CNTL__DCDEBUG_BLOCK_SEL_MASK 0x0000001fL
+#define DCDEBUG_OUT_CNTL__DCDEBUG_BLOCK_SEL__SHIFT 0x00000000
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_EN_MASK 0x00000020L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_EN__SHIFT 0x00000005
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_PIN_SEL_MASK 0x00000040L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_PIN_SEL__SHIFT 0x00000006
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_SEL_MASK 0x00300000L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_SEL__SHIFT 0x00000014
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_TEST_DATA_EN_MASK 0x00000080L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_TEST_DATA_EN__SHIFT 0x00000007
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_TEST_DATA_MASK 0x000fff00L
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_TEST_DATA__SHIFT 0x00000008
+#define DCDEBUG_OUT_DATA__DCDEBUG_OUT_DATA_MASK 0xffffffffL
+#define DCDEBUG_OUT_DATA__DCDEBUG_OUT_DATA__SHIFT 0x00000000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_EN_MASK 0x00001000L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_EN__SHIFT 0x0000000c
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_PIN_SEL_MASK 0x0000000fL
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_PIN_SEL__SHIFT 0x00000000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_REGBIT_SEL_MASK 0x000001f0L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_REGBIT_SEL__SHIFT 0x00000004
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_EN_MASK 0x10000000L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_EN__SHIFT 0x0000001c
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_PIN_SEL_MASK 0x000f0000L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_PIN_SEL__SHIFT 0x00000010
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_REGBIT_SEL_MASK 0x01f00000L
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_REGBIT_SEL__SHIFT 0x00000014
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH_MASK 0xffffffffL
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH__SHIFT 0x00000000
+#define DC_DVODATA_CONFIG__DVO_ALTER_MAPPING_EN_MASK 0x00200000L
+#define DC_DVODATA_CONFIG__DVO_ALTER_MAPPING_EN__SHIFT 0x00000015
+#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN_MASK 0x00100000L
+#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN__SHIFT 0x00000014
+#define DC_DVODATA_CONFIG__VIP_MUX_EN_MASK 0x00080000L
+#define DC_DVODATA_CONFIG__VIP_MUX_EN__SHIFT 0x00000013
+#define DCFE0_SOFT_RESET__CRTC0_SOFT_RESET_MASK 0x00000010L
+#define DCFE0_SOFT_RESET__CRTC0_SOFT_RESET__SHIFT 0x00000004
+#define DCFE0_SOFT_RESET__DCP0_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE0_SOFT_RESET__DCP0_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE0_SOFT_RESET__DCP0_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE0_SOFT_RESET__DCP0_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE0_SOFT_RESET__SCL0_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE0_SOFT_RESET__SCL0_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE0_SOFT_RESET__SCL0_SOFT_RESET_MASK 0x00000008L
+#define DCFE0_SOFT_RESET__SCL0_SOFT_RESET__SHIFT 0x00000003
+#define DCFE1_SOFT_RESET__CRTC1_SOFT_RESET_MASK 0x00000010L
+#define DCFE1_SOFT_RESET__CRTC1_SOFT_RESET__SHIFT 0x00000004
+#define DCFE1_SOFT_RESET__DCP1_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE1_SOFT_RESET__DCP1_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE1_SOFT_RESET__DCP1_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE1_SOFT_RESET__DCP1_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE1_SOFT_RESET__SCL1_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE1_SOFT_RESET__SCL1_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE1_SOFT_RESET__SCL1_SOFT_RESET_MASK 0x00000008L
+#define DCFE1_SOFT_RESET__SCL1_SOFT_RESET__SHIFT 0x00000003
+#define DCFE2_SOFT_RESET__CRTC2_SOFT_RESET_MASK 0x00000010L
+#define DCFE2_SOFT_RESET__CRTC2_SOFT_RESET__SHIFT 0x00000004
+#define DCFE2_SOFT_RESET__DCP2_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE2_SOFT_RESET__DCP2_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE2_SOFT_RESET__DCP2_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE2_SOFT_RESET__DCP2_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE2_SOFT_RESET__SCL2_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE2_SOFT_RESET__SCL2_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE2_SOFT_RESET__SCL2_SOFT_RESET_MASK 0x00000008L
+#define DCFE2_SOFT_RESET__SCL2_SOFT_RESET__SHIFT 0x00000003
+#define DCFE3_SOFT_RESET__CRTC3_SOFT_RESET_MASK 0x00000010L
+#define DCFE3_SOFT_RESET__CRTC3_SOFT_RESET__SHIFT 0x00000004
+#define DCFE3_SOFT_RESET__DCP3_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE3_SOFT_RESET__DCP3_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE3_SOFT_RESET__DCP3_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE3_SOFT_RESET__DCP3_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE3_SOFT_RESET__SCL3_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE3_SOFT_RESET__SCL3_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE3_SOFT_RESET__SCL3_SOFT_RESET_MASK 0x00000008L
+#define DCFE3_SOFT_RESET__SCL3_SOFT_RESET__SHIFT 0x00000003
+#define DCFE4_SOFT_RESET__CRTC4_SOFT_RESET_MASK 0x00000010L
+#define DCFE4_SOFT_RESET__CRTC4_SOFT_RESET__SHIFT 0x00000004
+#define DCFE4_SOFT_RESET__DCP4_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE4_SOFT_RESET__DCP4_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE4_SOFT_RESET__DCP4_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE4_SOFT_RESET__DCP4_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE4_SOFT_RESET__SCL4_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE4_SOFT_RESET__SCL4_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE4_SOFT_RESET__SCL4_SOFT_RESET_MASK 0x00000008L
+#define DCFE4_SOFT_RESET__SCL4_SOFT_RESET__SHIFT 0x00000003
+#define DCFE5_SOFT_RESET__CRTC5_SOFT_RESET_MASK 0x00000010L
+#define DCFE5_SOFT_RESET__CRTC5_SOFT_RESET__SHIFT 0x00000004
+#define DCFE5_SOFT_RESET__DCP5_PIXPIPE_SOFT_RESET_MASK 0x00000001L
+#define DCFE5_SOFT_RESET__DCP5_PIXPIPE_SOFT_RESET__SHIFT 0x00000000
+#define DCFE5_SOFT_RESET__DCP5_REQ_SOFT_RESET_MASK 0x00000002L
+#define DCFE5_SOFT_RESET__DCP5_REQ_SOFT_RESET__SHIFT 0x00000001
+#define DCFE5_SOFT_RESET__SCL5_ALU_SOFT_RESET_MASK 0x00000004L
+#define DCFE5_SOFT_RESET__SCL5_ALU_SOFT_RESET__SHIFT 0x00000002
+#define DCFE5_SOFT_RESET__SCL5_SOFT_RESET_MASK 0x00000008L
+#define DCFE5_SOFT_RESET__SCL5_SOFT_RESET__SHIFT 0x00000003
+#define DCFE_DBG_SEL__DCFE_DBG_SEL_MASK 0x0000000fL
+#define DCFE_DBG_SEL__DCFE_DBG_SEL__SHIFT 0x00000000
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_CURSOR_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_CURSOR_LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_CURSOR_MEM_PWR_STATE_MASK 0x00000300L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_CURSOR_MEM_PWR_STATE__SHIFT 0x00000008
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_LUT_LIGHT_SLEEP_DIS_MASK 0x00000004L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_LUT_LIGHT_SLEEP_DIS__SHIFT 0x00000002
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_LUT_MEM_PWR_STATE_MASK 0x00003000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__DCP_LUT_MEM_PWR_STATE__SHIFT 0x0000000c
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB1_MEM_SHUTDOWN_DIS_MASK 0x20000000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB1_MEM_SHUTDOWN_DIS__SHIFT 0x0000001d
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB2_MEM_SHUTDOWN_DIS_MASK 0x40000000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB2_MEM_SHUTDOWN_DIS__SHIFT 0x0000001e
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_LIGHT_SLEEP_DIS__SHIFT 0x00000004
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_0_MASK 0x00030000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_0__SHIFT 0x00000010
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_1_MASK 0x00c00000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_1__SHIFT 0x00000016
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_2_MASK 0x03000000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__LB_MEM_PWR_STATE_2__SHIFT 0x00000018
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__OVLSCL_LIGHT_SLEEP_DIS_MASK 0x00000008L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__OVLSCL_LIGHT_SLEEP_DIS__SHIFT 0x00000003
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__OVLSCL_MEM_PWR_STATE_MASK 0x0000c000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__OVLSCL_MEM_PWR_STATE__SHIFT 0x0000000e
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__PIPE_MEM_SHUTDOWN_DIS_MASK 0x10000000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__PIPE_MEM_SHUTDOWN_DIS__SHIFT 0x0000001c
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__REGAMMA_LUT_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__REGAMMA_LUT_LIGHT_SLEEP_DIS__SHIFT 0x00000006
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__REGAMMA_LUT_MEM_PWR_STATE_MASK 0x00300000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__REGAMMA_LUT_MEM_PWR_STATE__SHIFT 0x00000014
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__SCL_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__SCL_LIGHT_SLEEP_DIS__SHIFT 0x00000005
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__SCL_MEM_PWR_STATE_MASK 0x000c0000L
+#define DCFE_MEM_LIGHT_SLEEP_CNTL__SCL_MEM_PWR_STATE__SHIFT 0x00000012
+#define DC_GENERICA__GENERICA_EN_MASK 0x00000001L
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x00000000
+#define DC_GENERICA__GENERICA_SEL_MASK 0x00000f00L
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x00000008
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0x07000000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x00000018
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL_MASK 0x00070000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x00000010
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0x00700000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x00000014
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL_MASK 0x00007000L
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL__SHIFT 0x0000000c
+#define DC_GENERICB__GENERICB_EN_MASK 0x00000001L
+#define DC_GENERICB__GENERICB_EN__SHIFT 0x00000000
+#define DC_GENERICB__GENERICB_SEL_MASK 0x00000f00L
+#define DC_GENERICB__GENERICB_SEL__SHIFT 0x00000008
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0x07000000L
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x00000018
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL_MASK 0x00070000L
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x00000010
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0x00700000L
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x00000014
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL_MASK 0x00007000L
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL__SHIFT 0x0000000c
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x00100000L
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x00100000L
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC3_MASK__AUX3_POL_MASK 0x00100000L
+#define DC_GPIO_DDC3_MASK__AUX3_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC4_MASK__AUX4_POL_MASK 0x00100000L
+#define DC_GPIO_DDC4_MASK__AUX4_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC5_MASK__AUX5_POL_MASK 0x00100000L
+#define DC_GPIO_DDC5_MASK__AUX5_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK 0x00000100L
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6DATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6DATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDC6_MASK__ALLOW_HW_DDC6_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC6_MASK__ALLOW_HW_DDC6_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDC6_MASK__AUX6_POL_MASK 0x00100000L
+#define DC_GPIO_DDC6_MASK__AUX6_POL__SHIFT 0x00000014
+#define DC_GPIO_DDC6_MASK__AUX_PAD6_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC6_MASK__AUX_PAD6_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_PD_EN__SHIFT 0x00000004
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6DATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6DATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A__SHIFT 0x00000000
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A__SHIFT 0x00000008
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN__SHIFT 0x00000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN__SHIFT 0x00000008
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN__SHIFT 0x00000016
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE_MASK 0x00010000L
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE__SHIFT 0x00000010
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL_MASK 0x00100000L
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL__SHIFT 0x00000014
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV__SHIFT 0x00000006
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR_MASK 0x0f000000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR__SHIFT 0x00000018
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK__SHIFT 0x00000008
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN__SHIFT 0x0000000c
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR_MASK 0xf0000000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR__SHIFT 0x0000001c
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y__SHIFT 0x00000000
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y__SHIFT 0x00000008
+#define DC_GPIO_DEBUG__DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL_MASK 0x00010000L
+#define DC_GPIO_DEBUG__DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL__SHIFT 0x00000010
+#define DC_GPIO_DEBUG__DC_GPIO_MACRO_DEBUG_MASK 0x00000300L
+#define DC_GPIO_DEBUG__DC_GPIO_MACRO_DEBUG__SHIFT 0x00000008
+#define DC_GPIO_DEBUG__DC_GPIO_VIP_DEBUG_MASK 0x00000001L
+#define DC_GPIO_DEBUG__DC_GPIO_VIP_DEBUG__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVOCLK_A_MASK 0x10000000L
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVOCLK_A__SHIFT 0x0000001c
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVOCNTL_A_MASK 0x07000000L
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVOCNTL_A__SHIFT 0x00000018
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVODATA_A_MASK 0x00ffffffL
+#define DC_GPIO_DVODATA_A__DC_GPIO_DVODATA_A__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_A__DC_GPIO_MVP_DVOCNTL_A_MASK 0xc0000000L
+#define DC_GPIO_DVODATA_A__DC_GPIO_MVP_DVOCNTL_A__SHIFT 0x0000001e
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVOCLK_EN_MASK 0x10000000L
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVOCLK_EN__SHIFT 0x0000001c
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVOCNTL_EN_MASK 0x07000000L
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVOCNTL_EN__SHIFT 0x00000018
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVODATA_EN_MASK 0x00ffffffL
+#define DC_GPIO_DVODATA_EN__DC_GPIO_DVODATA_EN__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_EN__DC_GPIO_MVP_DVOCNTL_EN_MASK 0xc0000000L
+#define DC_GPIO_DVODATA_EN__DC_GPIO_MVP_DVOCNTL_EN__SHIFT 0x0000001e
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVOCLK_MASK_MASK 0x10000000L
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVOCLK_MASK__SHIFT 0x0000001c
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVOCNTL_MASK_MASK 0x07000000L
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVOCNTL_MASK__SHIFT 0x00000018
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVODATA_MASK_MASK 0x00ffffffL
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_DVODATA_MASK__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_MVP_DVOCNTL_MASK_MASK 0xc0000000L
+#define DC_GPIO_DVODATA_MASK__DC_GPIO_MVP_DVOCNTL_MASK__SHIFT 0x0000001e
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVOCLK_Y_MASK 0x10000000L
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVOCLK_Y__SHIFT 0x0000001c
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVOCNTL_Y_MASK 0x07000000L
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVOCNTL_Y__SHIFT 0x00000018
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVODATA_Y_MASK 0x00ffffffL
+#define DC_GPIO_DVODATA_Y__DC_GPIO_DVODATA_Y__SHIFT 0x00000000
+#define DC_GPIO_DVODATA_Y__DC_GPIO_MVP_DVOCNTL_Y_MASK 0xc0000000L
+#define DC_GPIO_DVODATA_Y__DC_GPIO_MVP_DVOCNTL_Y__SHIFT 0x0000001e
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK 0x00000001L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A__SHIFT 0x00000000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK 0x00000100L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A__SHIFT 0x00000008
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK 0x00010000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A__SHIFT 0x00000010
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK 0x00100000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A__SHIFT 0x00000014
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK 0x00200000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A__SHIFT 0x00000015
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK 0x00400000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A__SHIFT 0x00000016
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK 0x00800000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A__SHIFT 0x00000017
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN_MASK 0x00000001L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN__SHIFT 0x00000000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN_MASK 0x00000100L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN__SHIFT 0x00000008
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN_MASK 0x00010000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN__SHIFT 0x00000010
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN_MASK 0x00100000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN__SHIFT 0x00000014
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN_MASK 0x00200000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN__SHIFT 0x00000015
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN_MASK 0x00400000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN__SHIFT 0x00000016
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN_MASK 0x00800000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN__SHIFT 0x00000017
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK_MASK 0x00000001L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK__SHIFT 0x00000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS__SHIFT 0x00000001
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV_MASK 0x00000004L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV__SHIFT 0x00000002
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK_MASK 0x00000010L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK__SHIFT 0x00000004
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS_MASK 0x00000020L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS__SHIFT 0x00000005
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV_MASK 0x00000040L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV__SHIFT 0x00000006
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK__SHIFT 0x00000008
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS__SHIFT 0x00000009
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV_MASK 0x00000400L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV__SHIFT 0x0000000a
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK_MASK 0x00001000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK__SHIFT 0x0000000c
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS_MASK 0x00002000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS__SHIFT 0x0000000d
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV_MASK 0x00004000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV__SHIFT 0x0000000e
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK_MASK 0x00010000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK__SHIFT 0x00000010
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS__SHIFT 0x00000011
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV_MASK 0x00040000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV__SHIFT 0x00000012
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK_MASK 0x00100000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK__SHIFT 0x00000014
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS__SHIFT 0x00000015
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV_MASK 0x00400000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV__SHIFT 0x00000016
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK_MASK 0x01000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK__SHIFT 0x00000018
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS__SHIFT 0x00000019
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV_MASK 0x04000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV__SHIFT 0x0000001a
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y_MASK 0x00000001L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y__SHIFT 0x00000000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y_MASK 0x00000100L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y__SHIFT 0x00000008
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y_MASK 0x00010000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y__SHIFT 0x00000010
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y_MASK 0x00100000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y__SHIFT 0x00000014
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y_MASK 0x00200000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y__SHIFT 0x00000015
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y_MASK 0x00400000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y__SHIFT 0x00000016
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y_MASK 0x00800000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y__SHIFT 0x00000017
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK 0x00000001L
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A__SHIFT 0x00000000
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK 0x00000100L
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A__SHIFT 0x00000008
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK 0x00010000L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A__SHIFT 0x00000010
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK 0x01000000L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A__SHIFT 0x00000018
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN_MASK 0x00000001L
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN__SHIFT 0x00000000
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN_MASK 0x00000100L
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN__SHIFT 0x00000008
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN_MASK 0x00010000L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN__SHIFT 0x00000010
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN_MASK 0x01000000L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN__SHIFT 0x00000018
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK__SHIFT 0x00000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS__SHIFT 0x00000001
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN_MASK 0x00000008L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN__SHIFT 0x00000003
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV_MASK 0x00000004L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV__SHIFT 0x00000002
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK__SHIFT 0x00000008
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS__SHIFT 0x00000009
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN_MASK 0x00000800L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN__SHIFT 0x0000000b
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV_MASK 0x00000400L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV__SHIFT 0x0000000a
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK_MASK 0x00010000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK__SHIFT 0x00000010
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS__SHIFT 0x00000011
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN_MASK 0x00080000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN__SHIFT 0x00000013
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV_MASK 0x00040000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV__SHIFT 0x00000012
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK_MASK 0x01000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK__SHIFT 0x00000018
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS__SHIFT 0x00000019
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN_MASK 0x08000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN__SHIFT 0x0000001b
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV_MASK 0x04000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV__SHIFT 0x0000001a
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y_MASK 0x00000001L
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y__SHIFT 0x00000000
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y_MASK 0x00000100L
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y__SHIFT 0x00000008
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y_MASK 0x00010000L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y__SHIFT 0x00000010
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y_MASK 0x01000000L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y__SHIFT 0x00000018
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x00000001L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x00000000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x00000100L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x00000008
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x00010000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x00000010
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x01000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x00000018
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x04000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x0000001a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x0000001c
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x00000001L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x00000000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x00000100L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x00000008
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN_MASK 0x00010000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN__SHIFT 0x00000010
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN_MASK 0x01000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN__SHIFT 0x00000018
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN_MASK 0x04000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN__SHIFT 0x0000001a
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN_MASK 0x10000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN__SHIFT 0x0000001c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x00000001L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x00000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x00000004
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x00000040L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x00000006
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x00000100L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x00000008
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x00000009
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x00000400L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0x0000000a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x00010000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x00000010
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x00000011
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x00040000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x00000012
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x00100000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x00000014
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x00000015
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x00400000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x00000016
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x01000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x00000018
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x00000019
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x04000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x0000001a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x0000001c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x0000001d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0x40000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x0000001e
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x00000001L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x00000000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x00000100L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x00000008
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y_MASK 0x00010000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y__SHIFT 0x00000010
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y_MASK 0x01000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y__SHIFT 0x00000018
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y_MASK 0x04000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y__SHIFT 0x0000001a
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y_MASK 0x10000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y__SHIFT 0x0000001c
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A_MASK 0x00000001L
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A_MASK 0x00000002L
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A__SHIFT 0x00000001
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SCL_EN_MASK 0x00000001L
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SCL_EN__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SDA_EN_MASK 0x00000002L
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SDA_EN__SHIFT 0x00000001
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK_MASK 0x00000001L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS__SHIFT 0x00000001
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV_MASK 0x00000004L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV__SHIFT 0x00000002
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK_MASK 0x00000010L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK__SHIFT 0x00000004
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS_MASK 0x00000020L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS__SHIFT 0x00000005
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV_MASK 0x00000040L
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV__SHIFT 0x00000006
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SN_MASK 0x0000000fL
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SN__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SP_MASK 0x000000f0L
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SP__SHIFT 0x00000004
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SCL_Y_MASK 0x00000001L
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SCL_Y__SHIFT 0x00000000
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SDA_Y_MASK 0x00000002L
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SDA_Y__SHIFT 0x00000001
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0x0000000fL
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x00000000
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0x000000f0L
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x00000004
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0x0f000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x00000018
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xf0000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x0000001c
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN_MASK 0x000f0000L
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN__SHIFT 0x00000010
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP_MASK 0x00f00000L
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP__SHIFT 0x00000014
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN_MASK 0x0000000fL
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN__SHIFT 0x00000000
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP_MASK 0x000000f0L
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP__SHIFT 0x00000004
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A__SHIFT 0x00000000
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A__SHIFT 0x00000008
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A__SHIFT 0x00000010
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x00000000
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x00000008
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN__SHIFT 0x00000010
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x00000002L
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x00000001
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x00000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x00000004
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x00000040L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x00000006
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x00000008
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x00001000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0x0000000c
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x00004000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0x0000000e
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK__SHIFT 0x00000010
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS_MASK 0x00100000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS__SHIFT 0x00000014
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV_MASK 0x00400000L
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV__SHIFT 0x00000016
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y_MASK 0x00000001L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y__SHIFT 0x00000000
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y_MASK 0x00000100L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y__SHIFT 0x00000008
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y_MASK 0x00010000L
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y__SHIFT 0x00000010
+#define DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK 0x00000001L
+#define DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A__SHIFT 0x00000000
+#define DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK 0x00000100L
+#define DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A__SHIFT 0x00000008
+#define DC_GPIO_SYNCA_EN__DC_GPIO_HSYNCA_EN_MASK 0x00000001L
+#define DC_GPIO_SYNCA_EN__DC_GPIO_HSYNCA_EN__SHIFT 0x00000000
+#define DC_GPIO_SYNCA_EN__DC_GPIO_VSYNCA_EN_MASK 0x00000100L
+#define DC_GPIO_SYNCA_EN__DC_GPIO_VSYNCA_EN__SHIFT 0x00000008
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_CRTC_HSYNC_MASK_MASK 0x07000000L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_CRTC_HSYNC_MASK__SHIFT 0x00000018
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_MASK_MASK 0x00000001L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_MASK__SHIFT 0x00000000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_PD_DIS__SHIFT 0x00000004
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV_MASK 0x00000040L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV__SHIFT 0x00000006
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_CRTC_VSYNC_MASK_MASK 0x70000000L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_CRTC_VSYNC_MASK__SHIFT 0x0000001c
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_MASK_MASK 0x00000100L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_MASK__SHIFT 0x00000008
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_PD_DIS_MASK 0x00001000L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_PD_DIS__SHIFT 0x0000000c
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV_MASK 0x00004000L
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV__SHIFT 0x0000000e
+#define DC_GPIO_SYNCA_Y__DC_GPIO_HSYNCA_Y_MASK 0x00000001L
+#define DC_GPIO_SYNCA_Y__DC_GPIO_HSYNCA_Y__SHIFT 0x00000000
+#define DC_GPIO_SYNCA_Y__DC_GPIO_VSYNCA_Y_MASK 0x00000100L
+#define DC_GPIO_SYNCA_Y__DC_GPIO_VSYNCA_Y__SHIFT 0x00000008
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT_MASK 0x0000003fL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT__SHIFT 0x00000000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM_MASK 0x00000700L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM__SHIFT 0x00000008
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM_MASK 0x00003800L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM__SHIFT 0x0000000b
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM_MASK 0x0001c000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM__SHIFT 0x0000000e
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM_MASK 0x000e0000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM__SHIFT 0x00000011
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM_MASK 0x00700000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM__SHIFT 0x00000014
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM_MASK 0x03800000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM__SHIFT 0x00000017
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ_MASK 0xffffffffL
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ__SHIFT 0x00000000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D1_P_FLIP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D1_P_FLIP__SHIFT 0x00000000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D2_P_FLIP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D2_P_FLIP__SHIFT 0x00000004
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D3_P_FLIP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D3_P_FLIP__SHIFT 0x00000008
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D4_P_FLIP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D4_P_FLIP__SHIFT 0x0000000c
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D5_P_FLIP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D5_P_FLIP__SHIFT 0x00000010
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D6_P_FLIP_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D6_P_FLIP__SHIFT 0x00000014
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE__SHIFT 0x00000000
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE__SHIFT 0x00000004
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE__SHIFT 0x00000008
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE__SHIFT 0x0000000c
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE__SHIFT 0x00000010
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE__SHIFT 0x00000014
+#define DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD1_CONTROL__DC_HPD1_EN_MASK 0x10000000L
+#define DC_HPD1_CONTROL__DC_HPD1_EN__SHIFT 0x0000001c
+#define DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD1_FAST_TRAIN_CNTL__DC_HPD1_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x00000001L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK__SHIFT 0x00000000
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK 0x00010000L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN__SHIFT 0x00000010
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD1_INT_STATUS__DC_HPD1_INT_STATUS_MASK 0x00000001L
+#define DC_HPD1_INT_STATUS__DC_HPD1_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD1_INT_STATUS__DC_HPD1_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD1_INT_STATUS__DC_HPD1_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK 0x00000002L
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE__SHIFT 0x00000001
+#define DC_HPD1_INT_STATUS__DC_HPD1_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD1_INT_STATUS__DC_HPD1_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD1_INT_STATUS__DC_HPD1_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD1_INT_STATUS__DC_HPD1_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD2_CONTROL__DC_HPD2_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD2_CONTROL__DC_HPD2_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD2_CONTROL__DC_HPD2_EN_MASK 0x10000000L
+#define DC_HPD2_CONTROL__DC_HPD2_EN__SHIFT 0x0000001c
+#define DC_HPD2_CONTROL__DC_HPD2_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD2_CONTROL__DC_HPD2_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD2_FAST_TRAIN_CNTL__DC_HPD2_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_ACK_MASK 0x00000001L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_ACK__SHIFT 0x00000000
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_EN_MASK 0x00010000L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_EN__SHIFT 0x00000010
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD2_INT_CONTROL__DC_HPD2_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD2_INT_CONTROL__DC_HPD2_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD2_INT_CONTROL__DC_HPD2_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD2_INT_STATUS__DC_HPD2_INT_STATUS_MASK 0x00000001L
+#define DC_HPD2_INT_STATUS__DC_HPD2_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD2_INT_STATUS__DC_HPD2_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD2_INT_STATUS__DC_HPD2_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD2_INT_STATUS__DC_HPD2_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD2_INT_STATUS__DC_HPD2_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK 0x00000002L
+#define DC_HPD2_INT_STATUS__DC_HPD2_SENSE__SHIFT 0x00000001
+#define DC_HPD2_INT_STATUS__DC_HPD2_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD2_INT_STATUS__DC_HPD2_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD2_INT_STATUS__DC_HPD2_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD2_INT_STATUS__DC_HPD2_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD2_TOGGLE_FILT_CNTL__DC_HPD2_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD2_TOGGLE_FILT_CNTL__DC_HPD2_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD2_TOGGLE_FILT_CNTL__DC_HPD2_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD2_TOGGLE_FILT_CNTL__DC_HPD2_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD3_CONTROL__DC_HPD3_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD3_CONTROL__DC_HPD3_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD3_CONTROL__DC_HPD3_EN_MASK 0x10000000L
+#define DC_HPD3_CONTROL__DC_HPD3_EN__SHIFT 0x0000001c
+#define DC_HPD3_CONTROL__DC_HPD3_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD3_CONTROL__DC_HPD3_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD3_FAST_TRAIN_CNTL__DC_HPD3_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_ACK_MASK 0x00000001L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_ACK__SHIFT 0x00000000
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_EN_MASK 0x00010000L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_EN__SHIFT 0x00000010
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD3_INT_CONTROL__DC_HPD3_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD3_INT_CONTROL__DC_HPD3_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD3_INT_CONTROL__DC_HPD3_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD3_INT_STATUS__DC_HPD3_INT_STATUS_MASK 0x00000001L
+#define DC_HPD3_INT_STATUS__DC_HPD3_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD3_INT_STATUS__DC_HPD3_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD3_INT_STATUS__DC_HPD3_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD3_INT_STATUS__DC_HPD3_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD3_INT_STATUS__DC_HPD3_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK 0x00000002L
+#define DC_HPD3_INT_STATUS__DC_HPD3_SENSE__SHIFT 0x00000001
+#define DC_HPD3_INT_STATUS__DC_HPD3_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD3_INT_STATUS__DC_HPD3_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD3_INT_STATUS__DC_HPD3_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD3_INT_STATUS__DC_HPD3_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD3_TOGGLE_FILT_CNTL__DC_HPD3_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD3_TOGGLE_FILT_CNTL__DC_HPD3_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD3_TOGGLE_FILT_CNTL__DC_HPD3_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD3_TOGGLE_FILT_CNTL__DC_HPD3_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD4_CONTROL__DC_HPD4_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD4_CONTROL__DC_HPD4_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD4_CONTROL__DC_HPD4_EN_MASK 0x10000000L
+#define DC_HPD4_CONTROL__DC_HPD4_EN__SHIFT 0x0000001c
+#define DC_HPD4_CONTROL__DC_HPD4_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD4_CONTROL__DC_HPD4_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD4_FAST_TRAIN_CNTL__DC_HPD4_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_ACK_MASK 0x00000001L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_ACK__SHIFT 0x00000000
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_EN_MASK 0x00010000L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_EN__SHIFT 0x00000010
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD4_INT_CONTROL__DC_HPD4_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD4_INT_CONTROL__DC_HPD4_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD4_INT_CONTROL__DC_HPD4_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD4_INT_STATUS__DC_HPD4_INT_STATUS_MASK 0x00000001L
+#define DC_HPD4_INT_STATUS__DC_HPD4_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD4_INT_STATUS__DC_HPD4_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD4_INT_STATUS__DC_HPD4_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD4_INT_STATUS__DC_HPD4_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD4_INT_STATUS__DC_HPD4_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK 0x00000002L
+#define DC_HPD4_INT_STATUS__DC_HPD4_SENSE__SHIFT 0x00000001
+#define DC_HPD4_INT_STATUS__DC_HPD4_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD4_INT_STATUS__DC_HPD4_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD4_INT_STATUS__DC_HPD4_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD4_INT_STATUS__DC_HPD4_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD4_TOGGLE_FILT_CNTL__DC_HPD4_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD4_TOGGLE_FILT_CNTL__DC_HPD4_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD4_TOGGLE_FILT_CNTL__DC_HPD4_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD4_TOGGLE_FILT_CNTL__DC_HPD4_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD5_CONTROL__DC_HPD5_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD5_CONTROL__DC_HPD5_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD5_CONTROL__DC_HPD5_EN_MASK 0x10000000L
+#define DC_HPD5_CONTROL__DC_HPD5_EN__SHIFT 0x0000001c
+#define DC_HPD5_CONTROL__DC_HPD5_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD5_CONTROL__DC_HPD5_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD5_FAST_TRAIN_CNTL__DC_HPD5_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_ACK_MASK 0x00000001L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_ACK__SHIFT 0x00000000
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_EN_MASK 0x00010000L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_EN__SHIFT 0x00000010
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD5_INT_CONTROL__DC_HPD5_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD5_INT_CONTROL__DC_HPD5_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD5_INT_CONTROL__DC_HPD5_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD5_INT_STATUS__DC_HPD5_INT_STATUS_MASK 0x00000001L
+#define DC_HPD5_INT_STATUS__DC_HPD5_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD5_INT_STATUS__DC_HPD5_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD5_INT_STATUS__DC_HPD5_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD5_INT_STATUS__DC_HPD5_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD5_INT_STATUS__DC_HPD5_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK 0x00000002L
+#define DC_HPD5_INT_STATUS__DC_HPD5_SENSE__SHIFT 0x00000001
+#define DC_HPD5_INT_STATUS__DC_HPD5_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD5_INT_STATUS__DC_HPD5_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD5_INT_STATUS__DC_HPD5_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD5_INT_STATUS__DC_HPD5_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD5_TOGGLE_FILT_CNTL__DC_HPD5_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD5_TOGGLE_FILT_CNTL__DC_HPD5_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD5_TOGGLE_FILT_CNTL__DC_HPD5_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD5_TOGGLE_FILT_CNTL__DC_HPD5_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_HPD6_CONTROL__DC_HPD6_CONNECTION_TIMER_MASK 0x00001fffL
+#define DC_HPD6_CONTROL__DC_HPD6_CONNECTION_TIMER__SHIFT 0x00000000
+#define DC_HPD6_CONTROL__DC_HPD6_EN_MASK 0x10000000L
+#define DC_HPD6_CONTROL__DC_HPD6_EN__SHIFT 0x0000001c
+#define DC_HPD6_CONTROL__DC_HPD6_RX_INT_TIMER_MASK 0x03ff0000L
+#define DC_HPD6_CONTROL__DC_HPD6_RX_INT_TIMER__SHIFT 0x00000010
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_AUX_TX_DELAY_MASK 0x000000ffL
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_AUX_TX_DELAY__SHIFT 0x00000000
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_AUX_TX_EN__SHIFT 0x00000018
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_FAST_TRAIN_DELAY_MASK 0x000ff000L
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_FAST_TRAIN_DELAY__SHIFT 0x0000000c
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+#define DC_HPD6_FAST_TRAIN_CNTL__DC_HPD6_CONNECT_FAST_TRAIN_EN__SHIFT 0x0000001c
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_ACK_MASK 0x00000001L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_ACK__SHIFT 0x00000000
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_EN_MASK 0x00010000L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_EN__SHIFT 0x00000010
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK 0x00000100L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY__SHIFT 0x00000008
+#define DC_HPD6_INT_CONTROL__DC_HPD6_RX_INT_ACK_MASK 0x00100000L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_RX_INT_ACK__SHIFT 0x00000014
+#define DC_HPD6_INT_CONTROL__DC_HPD6_RX_INT_EN_MASK 0x01000000L
+#define DC_HPD6_INT_CONTROL__DC_HPD6_RX_INT_EN__SHIFT 0x00000018
+#define DC_HPD6_INT_STATUS__DC_HPD6_INT_STATUS_MASK 0x00000001L
+#define DC_HPD6_INT_STATUS__DC_HPD6_INT_STATUS__SHIFT 0x00000000
+#define DC_HPD6_INT_STATUS__DC_HPD6_RX_INT_STATUS_MASK 0x00000100L
+#define DC_HPD6_INT_STATUS__DC_HPD6_RX_INT_STATUS__SHIFT 0x00000008
+#define DC_HPD6_INT_STATUS__DC_HPD6_SENSE_DELAYED_MASK 0x00000010L
+#define DC_HPD6_INT_STATUS__DC_HPD6_SENSE_DELAYED__SHIFT 0x00000004
+#define DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK 0x00000002L
+#define DC_HPD6_INT_STATUS__DC_HPD6_SENSE__SHIFT 0x00000001
+#define DC_HPD6_INT_STATUS__DC_HPD6_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000ff000L
+#define DC_HPD6_INT_STATUS__DC_HPD6_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0x0000000c
+#define DC_HPD6_INT_STATUS__DC_HPD6_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000L
+#define DC_HPD6_INT_STATUS__DC_HPD6_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x00000018
+#define DC_HPD6_TOGGLE_FILT_CNTL__DC_HPD6_CONNECT_INT_DELAY_MASK 0x000000ffL
+#define DC_HPD6_TOGGLE_FILT_CNTL__DC_HPD6_CONNECT_INT_DELAY__SHIFT 0x00000000
+#define DC_HPD6_TOGGLE_FILT_CNTL__DC_HPD6_DISCONNECT_INT_DELAY_MASK 0x0ff00000L
+#define DC_HPD6_TOGGLE_FILT_CNTL__DC_HPD6_DISCONNECT_INT_DELAY__SHIFT 0x00000014
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x00000100L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x00000008
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x00001000L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0x0000000c
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x02000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x00000019
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x01000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x00000018
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x00000010L
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x00000004
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0x0000000cL
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x00000002
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x00200000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x00000015
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x00000003L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x00000000
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x00100000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x00000014
+#define DC_I2C_CONTROL__DC_I2C_DBG_REF_SEL_MASK 0x80000000L
+#define DC_I2C_CONTROL__DC_I2C_DBG_REF_SEL__SHIFT 0x0000001f
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x00000700L
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x00000008
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x00000001L
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x00000000
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x00000004L
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x00000002
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x00000002L
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x00000001
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x00000008L
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x00000003
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x00300000L
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x00000014
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0x0000ff00L
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x00000001L
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x00000000
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x00000008
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x00ff0000L
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x00000010
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000L
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x0000001f
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0f000000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x00000018
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATE_MASK 0x70000000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATE__SHIFT 0x0000001c
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATUS__SHIFT 0x00000014
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_DONE__SHIFT 0x00000003
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_REQ__SHIFT 0x00000010
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_STATUS__SHIFT 0x00000000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_URG__SHIFT 0x00000011
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_CLK_DRIVE_EN__SHIFT 0x00000007
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_EN__SHIFT 0x00000000
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_ENABLE__SHIFT 0x00000004
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_MODE__SHIFT 0x00000005
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_ENABLE__SHIFT 0x00000006
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_TRANSACTION_DELAY_MASK 0x00ff0000L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_TRANSACTION_DELAY__SHIFT 0x00000010
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_TIME_LIMIT_MASK 0xff000000L
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_TIME_LIMIT__SHIFT 0x00000018
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_PRESCALE_MASK 0xffff0000L
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_PRESCALE__SHIFT 0x00000010
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_THRESHOLD__SHIFT 0x00000000
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID_MASK 0x00f00000L
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID__SHIFT 0x00000014
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET_MASK 0x10000000L
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET__SHIFT 0x0000001c
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME_MASK 0x0000ffffL
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME__SHIFT 0x00000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x00000020L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x00000005
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x00000010L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x00000004
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x00000040L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x00000006
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x00000200L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x00000009
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x00000100L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x00000008
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x00000400L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0x0000000a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK_MASK 0x00002000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK__SHIFT 0x0000000d
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT_MASK 0x00001000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT__SHIFT 0x0000000c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK_MASK 0x00004000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK__SHIFT 0x0000000e
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK_MASK 0x00020000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK__SHIFT 0x00000011
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT_MASK 0x00010000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT__SHIFT 0x00000010
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK_MASK 0x00040000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK__SHIFT 0x00000012
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK_MASK 0x00200000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK__SHIFT 0x00000015
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT_MASK 0x00100000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT__SHIFT 0x00000014
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK_MASK 0x00400000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK__SHIFT 0x00000016
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK_MASK 0x02000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK__SHIFT 0x00000019
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT_MASK 0x01000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT__SHIFT 0x00000018
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK_MASK 0x04000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK__SHIFT 0x0000001a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK_MASK 0x10000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK__SHIFT 0x0000001c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT_MASK 0x08000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT__SHIFT 0x0000001b
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK_MASK 0x20000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK__SHIFT 0x0000001d
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x00000002L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x00000001
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x00000001L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x00000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x00000004L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x00000002
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x00000010L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x00000004
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x00000080L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x00000007
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x00000004L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x00000002
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x00000040L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x00000006
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x00001000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0x0000000c
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x00002000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0x0000000d
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x00004000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0x0000000e
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x00008000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0x0000000f
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x00040000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x00000012
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x00000003L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x00000000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x00000100L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x00000008
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x00000020L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x00000005
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x00ff0000L
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x00000010
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x00000001L
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x00000000
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x00001000L
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0x0000000c
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x00002000L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0x0000000d
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x00000100L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x00000008
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x00ff0000L
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x00000010
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x00000001L
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x00000000
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x00001000L
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0x0000000c
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x00002000L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0x0000000d
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x00000100L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x00000008
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x00ff0000L
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x00000010
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x00000001L
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x00000000
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x00001000L
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0x0000000c
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x00002000L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0x0000000d
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x00000100L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x00000008
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x00ff0000L
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x00000010
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x00000001L
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x00000000
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x00001000L
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0x0000000c
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x00002000L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0x0000000d
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x00000100L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x00000008
+#define DCI_CLK_CNTL__DCI_PG_TEST_CLK_SEL_MASK 0xf8000000L
+#define DCI_CLK_CNTL__DCI_PG_TEST_CLK_SEL__SHIFT 0x0000001b
+#define DCI_CLK_CNTL__DCI_TEST_CLK_SEL_MASK 0x0000001fL
+#define DCI_CLK_CNTL__DCI_TEST_CLK_SEL__SHIFT 0x00000000
+#define DCI_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS_MASK 0x00008000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS__SHIFT 0x0000000f
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF0_GATE_DIS_MASK 0x00010000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF0_GATE_DIS__SHIFT 0x00000010
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF1_GATE_DIS_MASK 0x00020000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF1_GATE_DIS__SHIFT 0x00000011
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF2_GATE_DIS_MASK 0x00040000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF2_GATE_DIS__SHIFT 0x00000012
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF3_GATE_DIS_MASK 0x00080000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF3_GATE_DIS__SHIFT 0x00000013
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF4_GATE_DIS_MASK 0x00100000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF4_GATE_DIS__SHIFT 0x00000014
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF5_GATE_DIS_MASK 0x00200000L
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF5_GATE_DIS__SHIFT 0x00000015
+#define DCI_CLK_CNTL__DISPCLK_G_FBC_GATE_DIS_MASK 0x00000200L
+#define DCI_CLK_CNTL__DISPCLK_G_FBC_GATE_DIS__SHIFT 0x00000009
+#define DCI_CLK_CNTL__DISPCLK_G_VGA_GATE_DIS_MASK 0x00000800L
+#define DCI_CLK_CNTL__DISPCLK_G_VGA_GATE_DIS__SHIFT 0x0000000b
+#define DCI_CLK_CNTL__DISPCLK_G_VIP_GATE_DIS_MASK 0x00002000L
+#define DCI_CLK_CNTL__DISPCLK_G_VIP_GATE_DIS__SHIFT 0x0000000d
+#define DCI_CLK_CNTL__DISPCLK_M_GATE_DIS_MASK 0x00000040L
+#define DCI_CLK_CNTL__DISPCLK_M_GATE_DIS__SHIFT 0x00000006
+#define DCI_CLK_CNTL__DISPCLK_R_DCI_GATE_DIS_MASK 0x00000020L
+#define DCI_CLK_CNTL__DISPCLK_R_DCI_GATE_DIS__SHIFT 0x00000005
+#define DCI_CLK_CNTL__DISPCLK_R_DMCU_GATE_DIS_MASK 0x00004000L
+#define DCI_CLK_CNTL__DISPCLK_R_DMCU_GATE_DIS__SHIFT 0x0000000e
+#define DCI_CLK_CNTL__DISPCLK_R_VGA_GATE_DIS_MASK 0x00000400L
+#define DCI_CLK_CNTL__DISPCLK_R_VGA_GATE_DIS__SHIFT 0x0000000a
+#define DCI_CLK_CNTL__DISPCLK_R_VIP_GATE_DIS_MASK 0x00001000L
+#define DCI_CLK_CNTL__DISPCLK_R_VIP_GATE_DIS__SHIFT 0x0000000c
+#define DCI_CLK_CNTL__SCLK_G_DMIF_GATE_DIS_MASK 0x00400000L
+#define DCI_CLK_CNTL__SCLK_G_DMIF_GATE_DIS__SHIFT 0x00000016
+#define DCI_CLK_CNTL__SCLK_G_DMIFTRK_GATE_DIS_MASK 0x00800000L
+#define DCI_CLK_CNTL__SCLK_G_DMIFTRK_GATE_DIS__SHIFT 0x00000017
+#define DCI_CLK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x00000100L
+#define DCI_CLK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x00000008
+#define DCI_DEBUG_CONFIG__DCI_DBG_SEL_MASK 0x0000000fL
+#define DCI_DEBUG_CONFIG__DCI_DBG_SEL__SHIFT 0x00000000
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_MEM_PWR_STATE_MASK 0x00003000L
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_MEM_PWR_STATE__SHIFT 0x0000000c
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000040L
+#define DCI_MEM_PWR_CNTL__DMIF0_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x00000006
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000002L
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000001
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_MEM_PWR_STATE_MASK 0x0000c000L
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_MEM_PWR_STATE__SHIFT 0x0000000e
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000080L
+#define DCI_MEM_PWR_CNTL__DMIF1_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x00000007
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000004L
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000002
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_MEM_PWR_STATE_MASK 0x00030000L
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_MEM_PWR_STATE__SHIFT 0x00000010
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000100L
+#define DCI_MEM_PWR_CNTL__DMIF2_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x00000008
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000008L
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000003
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_MEM_PWR_STATE_MASK 0x000c0000L
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_MEM_PWR_STATE__SHIFT 0x00000012
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000200L
+#define DCI_MEM_PWR_CNTL__DMIF3_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x00000009
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000004
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_MEM_PWR_STATE_MASK 0x00300000L
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_MEM_PWR_STATE__SHIFT 0x00000014
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000400L
+#define DCI_MEM_PWR_CNTL__DMIF4_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x0000000a
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_LIGHT_SLEEP_DIS__SHIFT 0x00000005
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_MEM_PWR_STATE_MASK 0x00c00000L
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_MEM_PWR_STATE__SHIFT 0x00000016
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_MEM_SHUTDOWN_DIS_MASK 0x00000800L
+#define DCI_MEM_PWR_CNTL__DMIF5_ASYNC_MEM_SHUTDOWN_DIS__SHIFT 0x0000000b
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM1_PWR_STATE_MASK 0x00000003L
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM1_PWR_STATE__SHIFT 0x00000000
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM2_PWR_STATE_MASK 0x0000000cL
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM2_PWR_STATE__SHIFT 0x00000002
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM3_PWR_STATE_MASK 0x00000030L
+#define DCI_MEM_PWR_STATE2__DMCU_ERAM3_PWR_STATE__SHIFT 0x00000004
+#define DCI_MEM_PWR_STATE__AZ_MEM_PWR_STATE_MASK 0x00c00000L
+#define DCI_MEM_PWR_STATE__AZ_MEM_PWR_STATE__SHIFT 0x00000016
+#define DCI_MEM_PWR_STATE__DMCU_IRAM_PWR_STATE_MASK 0x30000000L
+#define DCI_MEM_PWR_STATE__DMCU_IRAM_PWR_STATE__SHIFT 0x0000001c
+#define DCI_MEM_PWR_STATE__DMCU_MEM_PWR_STATE_MASK 0x00000003L
+#define DCI_MEM_PWR_STATE__DMCU_MEM_PWR_STATE__SHIFT 0x00000000
+#define DCI_MEM_PWR_STATE__DMIF0_MEM_PWR_STATE_MASK 0x0000000cL
+#define DCI_MEM_PWR_STATE__DMIF0_MEM_PWR_STATE__SHIFT 0x00000002
+#define DCI_MEM_PWR_STATE__DMIF1_MEM_PWR_STATE_MASK 0x00000030L
+#define DCI_MEM_PWR_STATE__DMIF1_MEM_PWR_STATE__SHIFT 0x00000004
+#define DCI_MEM_PWR_STATE__DMIF2_MEM_PWR_STATE_MASK 0x000000c0L
+#define DCI_MEM_PWR_STATE__DMIF2_MEM_PWR_STATE__SHIFT 0x00000006
+#define DCI_MEM_PWR_STATE__DMIF3_MEM_PWR_STATE_MASK 0x00000300L
+#define DCI_MEM_PWR_STATE__DMIF3_MEM_PWR_STATE__SHIFT 0x00000008
+#define DCI_MEM_PWR_STATE__DMIF4_MEM_PWR_STATE_MASK 0x00000c00L
+#define DCI_MEM_PWR_STATE__DMIF4_MEM_PWR_STATE__SHIFT 0x0000000a
+#define DCI_MEM_PWR_STATE__DMIF5_MEM_PWR_STATE_MASK 0x00003000L
+#define DCI_MEM_PWR_STATE__DMIF5_MEM_PWR_STATE__SHIFT 0x0000000c
+#define DCI_MEM_PWR_STATE__DMIF_XLR_MEM1_PWR_STATE_MASK 0x0c000000L
+#define DCI_MEM_PWR_STATE__DMIF_XLR_MEM1_PWR_STATE__SHIFT 0x0000001a
+#define DCI_MEM_PWR_STATE__DMIF_XLR_MEM_PWR_STATE_MASK 0x03000000L
+#define DCI_MEM_PWR_STATE__DMIF_XLR_MEM_PWR_STATE__SHIFT 0x00000018
+#define DCI_MEM_PWR_STATE__FBC_MEM_PWR_STATE_MASK 0x00030000L
+#define DCI_MEM_PWR_STATE__FBC_MEM_PWR_STATE__SHIFT 0x00000010
+#define DCI_MEM_PWR_STATE__MCIF_MEM_PWR_STATE_MASK 0x000c0000L
+#define DCI_MEM_PWR_STATE__MCIF_MEM_PWR_STATE__SHIFT 0x00000012
+#define DCI_MEM_PWR_STATE__VGA_MEM_PWR_STATE_MASK 0x0000c000L
+#define DCI_MEM_PWR_STATE__VGA_MEM_PWR_STATE__SHIFT 0x0000000e
+#define DCI_MEM_PWR_STATE__VIP_MEM_PWR_STATE_MASK 0x00300000L
+#define DCI_MEM_PWR_STATE__VIP_MEM_PWR_STATE__SHIFT 0x00000014
+#define DCIO_DEBUG10__DCIO_DIGC_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG10__DCIO_DIGC_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG11__DCIO_DIGD_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG11__DCIO_DIGD_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG12__DCIO_DIGE_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG12__DCIO_DIGE_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG13__DCIO_DIGF_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG13__DCIO_DIGF_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_CLK_TRISTATE_MASK 0x00040000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_CLK_TRISTATE__SHIFT 0x00000012
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_MASK 0x00008000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_PREMUX_MASK 0x00004000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_PREMUX__SHIFT 0x0000000e
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_REG_MASK 0x00002000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0_REG__SHIFT 0x0000000d
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_A0__SHIFT 0x0000000f
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_MASK 0x00100000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_PREMUX_MASK 0x00080000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_PREMUX__SHIFT 0x00000013
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_REG_MASK 0x00010000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN_REG__SHIFT 0x00000010
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_EN__SHIFT 0x00000014
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_MASK_REG_MASK 0x00400000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_MASK_REG__SHIFT 0x00000016
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_MUX_MASK 0x00200000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_MUX__SHIFT 0x00000015
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_SEL0_MASK 0x08000000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_SEL0_PREMUX_MASK 0x04000000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_SEL0_PREMUX__SHIFT 0x0000001a
+#define DCIO_DEBUG1__DOUT_DCIO_DVOCNTL1_SEL0__SHIFT 0x0000001b
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_ENABLE_MASK 0x00800000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_ENABLE__SHIFT 0x00000017
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_HSYNC_TRISTATE_MASK 0x00020000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_HSYNC_TRISTATE__SHIFT 0x00000011
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_RATE_SEL_MASK 0x02000000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_RATE_SEL__SHIFT 0x00000019
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_VSYNC_TRISTATE_MASK 0x01000000L
+#define DCIO_DEBUG1__DOUT_DCIO_DVO_VSYNC_TRISTATE__SHIFT 0x00000018
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCLK_C_MASK 0x00001000L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCLK_C__SHIFT 0x0000000c
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_A0_MASK 0x000000c0L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_A0_REG_MASK 0x00000003L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_A0_REG__SHIFT 0x00000000
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_A0__SHIFT 0x00000006
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_EN_MASK 0x00000c00L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_EN_REG_MASK 0x00000030L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_EN_REG__SHIFT 0x00000004
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_EN__SHIFT 0x0000000a
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_MASK_REG_MASK 0x0000000cL
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_MASK_REG__SHIFT 0x00000002
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_SEL0_MASK 0x00000300L
+#define DCIO_DEBUG1__DOUT_DCIO_MVP_DVOCNTL_SEL0__SHIFT 0x00000008
+#define DCIO_DEBUG2__DCIO_DEBUG2_MASK 0xffffffffL
+#define DCIO_DEBUG2__DCIO_DEBUG2__SHIFT 0x00000000
+#define DCIO_DEBUG3__DCIO_DEBUG3_MASK 0xffffffffL
+#define DCIO_DEBUG3__DCIO_DEBUG3__SHIFT 0x00000000
+#define DCIO_DEBUG4__DCIO_DEBUG4_MASK 0xffffffffL
+#define DCIO_DEBUG4__DCIO_DEBUG4__SHIFT 0x00000000
+#define DCIO_DEBUG5__DCIO_DEBUG5_MASK 0xffffffffL
+#define DCIO_DEBUG5__DCIO_DEBUG5__SHIFT 0x00000000
+#define DCIO_DEBUG6__DCIO_DEBUG6_MASK 0xffffffffL
+#define DCIO_DEBUG6__DCIO_DEBUG6__SHIFT 0x00000000
+#define DCIO_DEBUG7__DCIO_DEBUG7_MASK 0xffffffffL
+#define DCIO_DEBUG7__DCIO_DEBUG7__SHIFT 0x00000000
+#define DCIO_DEBUG8__DCIO_DEBUG8_MASK 0xffffffffL
+#define DCIO_DEBUG8__DCIO_DEBUG8__SHIFT 0x00000000
+#define DCIO_DEBUG9__DCIO_DEBUG9_MASK 0xffffffffL
+#define DCIO_DEBUG9__DCIO_DEBUG9__SHIFT 0x00000000
+#define DCIO_DEBUGA__DCIO_DEBUGA_MASK 0xffffffffL
+#define DCIO_DEBUGA__DCIO_DEBUGA__SHIFT 0x00000000
+#define DCIO_DEBUGB__DCIO_DEBUGB_MASK 0xffffffffL
+#define DCIO_DEBUGB__DCIO_DEBUGB__SHIFT 0x00000000
+#define DCIO_DEBUGC__DCIO_DEBUGC_MASK 0xffffffffL
+#define DCIO_DEBUGC__DCIO_DEBUGC__SHIFT 0x00000000
+#define DCIO_DEBUG__DCIO_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUG__DCIO_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUGD__DCIO_DEBUGD_MASK 0xffffffffL
+#define DCIO_DEBUGD__DCIO_DEBUGD__SHIFT 0x00000000
+#define DCIO_DEBUGE__DCIO_DIGA_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUGE__DCIO_DIGA_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUGF__DCIO_DIGB_DEBUG_MASK 0xffffffffL
+#define DCIO_DEBUGF__DCIO_DIGB_DEBUG__SHIFT 0x00000000
+#define DCIO_DEBUG_ID__DCIO_DEBUG_ID_MASK 0xffffffffL
+#define DCIO_DEBUG_ID__DCIO_DEBUG_ID__SHIFT 0x00000000
+#define DCIO_GSL0_CNTL__DCIO_GSL0_GLOBAL_UNLOCK_SEL_MASK 0x00070000L
+#define DCIO_GSL0_CNTL__DCIO_GSL0_GLOBAL_UNLOCK_SEL__SHIFT 0x00000010
+#define DCIO_GSL0_CNTL__DCIO_GSL0_TIMING_SYNC_SEL_MASK 0x00000700L
+#define DCIO_GSL0_CNTL__DCIO_GSL0_TIMING_SYNC_SEL__SHIFT 0x00000008
+#define DCIO_GSL0_CNTL__DCIO_GSL0_VSYNC_SEL_MASK 0x00000007L
+#define DCIO_GSL0_CNTL__DCIO_GSL0_VSYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL1_CNTL__DCIO_GSL1_GLOBAL_UNLOCK_SEL_MASK 0x00070000L
+#define DCIO_GSL1_CNTL__DCIO_GSL1_GLOBAL_UNLOCK_SEL__SHIFT 0x00000010
+#define DCIO_GSL1_CNTL__DCIO_GSL1_TIMING_SYNC_SEL_MASK 0x00000700L
+#define DCIO_GSL1_CNTL__DCIO_GSL1_TIMING_SYNC_SEL__SHIFT 0x00000008
+#define DCIO_GSL1_CNTL__DCIO_GSL1_VSYNC_SEL_MASK 0x00000007L
+#define DCIO_GSL1_CNTL__DCIO_GSL1_VSYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL2_CNTL__DCIO_GSL2_GLOBAL_UNLOCK_SEL_MASK 0x00070000L
+#define DCIO_GSL2_CNTL__DCIO_GSL2_GLOBAL_UNLOCK_SEL__SHIFT 0x00000010
+#define DCIO_GSL2_CNTL__DCIO_GSL2_TIMING_SYNC_SEL_MASK 0x00000700L
+#define DCIO_GSL2_CNTL__DCIO_GSL2_TIMING_SYNC_SEL__SHIFT 0x00000008
+#define DCIO_GSL2_CNTL__DCIO_GSL2_VSYNC_SEL_MASK 0x00000007L
+#define DCIO_GSL2_CNTL__DCIO_GSL2_VSYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_LOCK_SEL_MASK 0x00000030L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_LOCK_SEL__SHIFT 0x00000004
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK__SHIFT 0x00000008
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_TIMING_SYNC_SEL_MASK 0x00000003L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_TIMING_SYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_LOCK_SEL_MASK 0x00300000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_LOCK_SEL__SHIFT 0x00000014
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK_MASK 0x03000000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK__SHIFT 0x00000018
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_TIMING_SYNC_SEL_MASK 0x00030000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_TIMING_SYNC_SEL__SHIFT 0x00000010
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_LOCK_SEL_MASK 0x00000030L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_LOCK_SEL__SHIFT 0x00000004
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK__SHIFT 0x00000008
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_TIMING_SYNC_SEL_MASK 0x00000003L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_TIMING_SYNC_SEL__SHIFT 0x00000000
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_LOCK_SEL_MASK 0x00300000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_LOCK_SEL__SHIFT 0x00000014
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK_MASK 0x03000000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK__SHIFT 0x00000018
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_TIMING_SYNC_SEL_MASK 0x00030000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_TIMING_SYNC_SEL__SHIFT 0x00000010
+#define DCIO_IMPCAL_CNTL_AB__CALR_CNTL_OVERRIDE_MASK 0x0000000fL
+#define DCIO_IMPCAL_CNTL_AB__CALR_CNTL_OVERRIDE__SHIFT 0x00000000
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_ARB_STATE_MASK 0x00007000L
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_ARB_STATE__SHIFT 0x0000000c
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_SOFT_RESET_MASK 0x00000020L
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_SOFT_RESET__SHIFT 0x00000005
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_STATUS_MASK 0x00000300L
+#define DCIO_IMPCAL_CNTL_AB__IMPCAL_STATUS__SHIFT 0x00000008
+#define DCIO_IMPCAL_CNTL_CD__CALR_CNTL_OVERRIDE_MASK 0x0000000fL
+#define DCIO_IMPCAL_CNTL_CD__CALR_CNTL_OVERRIDE__SHIFT 0x00000000
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_ARB_STATE_MASK 0x00007000L
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_ARB_STATE__SHIFT 0x0000000c
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_SOFT_RESET_MASK 0x00000020L
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_SOFT_RESET__SHIFT 0x00000005
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_STATUS_MASK 0x00000300L
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_STATUS__SHIFT 0x00000008
+#define DCIO_IMPCAL_CNTL_EF__CALR_CNTL_OVERRIDE_MASK 0x0000000fL
+#define DCIO_IMPCAL_CNTL_EF__CALR_CNTL_OVERRIDE__SHIFT 0x00000000
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_ARB_STATE_MASK 0x00007000L
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_ARB_STATE__SHIFT 0x0000000c
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_SOFT_RESET_MASK 0x00000020L
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_SOFT_RESET__SHIFT 0x00000005
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_STATUS_MASK 0x00000300L
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_STATUS__SHIFT 0x00000008
+#define DCIO_TEST_DEBUG_DATA__DCIO_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCIO_TEST_DEBUG_DATA__DCIO_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DCI_SOFT_RESET__DMIF0_SOFT_RESET_MASK 0x00000010L
+#define DCI_SOFT_RESET__DMIF0_SOFT_RESET__SHIFT 0x00000004
+#define DCI_SOFT_RESET__DMIF1_SOFT_RESET_MASK 0x00000020L
+#define DCI_SOFT_RESET__DMIF1_SOFT_RESET__SHIFT 0x00000005
+#define DCI_SOFT_RESET__DMIF2_SOFT_RESET_MASK 0x00000040L
+#define DCI_SOFT_RESET__DMIF2_SOFT_RESET__SHIFT 0x00000006
+#define DCI_SOFT_RESET__DMIF3_SOFT_RESET_MASK 0x00000080L
+#define DCI_SOFT_RESET__DMIF3_SOFT_RESET__SHIFT 0x00000007
+#define DCI_SOFT_RESET__DMIF4_SOFT_RESET_MASK 0x00000100L
+#define DCI_SOFT_RESET__DMIF4_SOFT_RESET__SHIFT 0x00000008
+#define DCI_SOFT_RESET__DMIF5_SOFT_RESET_MASK 0x00000200L
+#define DCI_SOFT_RESET__DMIF5_SOFT_RESET__SHIFT 0x00000009
+#define DCI_SOFT_RESET__DMIFARB_SOFT_RESET_MASK 0x00001000L
+#define DCI_SOFT_RESET__DMIFARB_SOFT_RESET__SHIFT 0x0000000c
+#define DCI_SOFT_RESET__FBC_SOFT_RESET_MASK 0x00000008L
+#define DCI_SOFT_RESET__FBC_SOFT_RESET__SHIFT 0x00000003
+#define DCI_SOFT_RESET__MCIF_SOFT_RESET_MASK 0x00000004L
+#define DCI_SOFT_RESET__MCIF_SOFT_RESET__SHIFT 0x00000002
+#define DCI_SOFT_RESET__VGA_SOFT_RESET_MASK 0x00000001L
+#define DCI_SOFT_RESET__VGA_SOFT_RESET__SHIFT 0x00000000
+#define DCI_SOFT_RESET__VIP_SOFT_RESET_MASK 0x00000002L
+#define DCI_SOFT_RESET__VIP_SOFT_RESET__SHIFT 0x00000001
+#define DCI_TEST_DEBUG_DATA__DCI_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCI_TEST_DEBUG_DATA__DCI_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_BLUE_MASK 0x000003ffL
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_BLUE__SHIFT 0x00000000
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_GREEN_MASK 0x000ffc00L
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_GREEN__SHIFT 0x0000000a
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_RED_MASK 0x3ff00000L
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_RED__SHIFT 0x00000014
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_DONE_MASK 0x00000002L
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_DONE__SHIFT 0x00000001
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_MASK 0x00000001L
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL__SHIFT 0x00000000
+#define DC_LUT_BLACK_OFFSET_BLUE__DC_LUT_BLACK_OFFSET_BLUE_MASK 0x0000ffffL
+#define DC_LUT_BLACK_OFFSET_BLUE__DC_LUT_BLACK_OFFSET_BLUE__SHIFT 0x00000000
+#define DC_LUT_BLACK_OFFSET_GREEN__DC_LUT_BLACK_OFFSET_GREEN_MASK 0x0000ffffL
+#define DC_LUT_BLACK_OFFSET_GREEN__DC_LUT_BLACK_OFFSET_GREEN__SHIFT 0x00000000
+#define DC_LUT_BLACK_OFFSET_RED__DC_LUT_BLACK_OFFSET_RED_MASK 0x0000ffffL
+#define DC_LUT_BLACK_OFFSET_RED__DC_LUT_BLACK_OFFSET_RED__SHIFT 0x00000000
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FLOAT_POINT_EN_MASK 0x00000020L
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FLOAT_POINT_EN__SHIFT 0x00000005
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FORMAT_MASK 0x000000c0L
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FORMAT__SHIFT 0x00000006
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_SIGNED_EN_MASK 0x00000010L
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_SIGNED_EN__SHIFT 0x00000004
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FLOAT_POINT_EN_MASK 0x00002000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FLOAT_POINT_EN__SHIFT 0x0000000d
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FORMAT_MASK 0x0000c000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FORMAT__SHIFT 0x0000000e
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_SIGNED_EN_MASK 0x00001000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_SIGNED_EN__SHIFT 0x0000000c
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FLOAT_POINT_EN_MASK 0x00200000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FLOAT_POINT_EN__SHIFT 0x00000015
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FORMAT_MASK 0x00c00000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FORMAT__SHIFT 0x00000016
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_SIGNED_EN_MASK 0x00100000L
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_SIGNED_EN__SHIFT 0x00000014
+#define DC_LUT_CONTROL__DC_LUT_INC_B_MASK 0x0000000fL
+#define DC_LUT_CONTROL__DC_LUT_INC_B__SHIFT 0x00000000
+#define DC_LUT_CONTROL__DC_LUT_INC_G_MASK 0x00000f00L
+#define DC_LUT_CONTROL__DC_LUT_INC_G__SHIFT 0x00000008
+#define DC_LUT_CONTROL__DC_LUT_INC_R_MASK 0x000f0000L
+#define DC_LUT_CONTROL__DC_LUT_INC_R__SHIFT 0x00000010
+#define DC_LUT_PWL_DATA__DC_LUT_BASE_MASK 0x0000ffffL
+#define DC_LUT_PWL_DATA__DC_LUT_BASE__SHIFT 0x00000000
+#define DC_LUT_PWL_DATA__DC_LUT_DELTA_MASK 0xffff0000L
+#define DC_LUT_PWL_DATA__DC_LUT_DELTA__SHIFT 0x00000010
+#define DC_LUT_RW_INDEX__DC_LUT_RW_INDEX_MASK 0x000000ffL
+#define DC_LUT_RW_INDEX__DC_LUT_RW_INDEX__SHIFT 0x00000000
+#define DC_LUT_RW_MODE__DC_LUT_RW_MODE_MASK 0x00000001L
+#define DC_LUT_RW_MODE__DC_LUT_RW_MODE__SHIFT 0x00000000
+#define DC_LUT_SEQ_COLOR__DC_LUT_SEQ_COLOR_MASK 0x0000ffffL
+#define DC_LUT_SEQ_COLOR__DC_LUT_SEQ_COLOR__SHIFT 0x00000000
+#define DC_LUT_VGA_ACCESS_ENABLE__DC_LUT_VGA_ACCESS_ENABLE_MASK 0x00000001L
+#define DC_LUT_VGA_ACCESS_ENABLE__DC_LUT_VGA_ACCESS_ENABLE__SHIFT 0x00000000
+#define DC_LUT_WHITE_OFFSET_BLUE__DC_LUT_WHITE_OFFSET_BLUE_MASK 0x0000ffffL
+#define DC_LUT_WHITE_OFFSET_BLUE__DC_LUT_WHITE_OFFSET_BLUE__SHIFT 0x00000000
+#define DC_LUT_WHITE_OFFSET_GREEN__DC_LUT_WHITE_OFFSET_GREEN_MASK 0x0000ffffL
+#define DC_LUT_WHITE_OFFSET_GREEN__DC_LUT_WHITE_OFFSET_GREEN__SHIFT 0x00000000
+#define DC_LUT_WHITE_OFFSET_RED__DC_LUT_WHITE_OFFSET_RED_MASK 0x0000ffffL
+#define DC_LUT_WHITE_OFFSET_RED__DC_LUT_WHITE_OFFSET_RED__SHIFT 0x00000000
+#define DC_LUT_WRITE_EN_MASK__DC_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define DC_LUT_WRITE_EN_MASK__DC_LUT_WRITE_EN_MASK__SHIFT 0x00000000
+#define DC_MVP_LB_CONTROL__DC_MVP_SPARE_FLOPS_MASK 0x80000000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SPARE_FLOPS__SHIFT 0x0000001f
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_IN_CAP_MASK 0x10000000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_IN_CAP__SHIFT 0x0000001c
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ONE_MASK 0x00001000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ONE__SHIFT 0x0000000c
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO_MASK 0x00010000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO__SHIFT 0x00000010
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_SEL_MASK 0x00000100L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_SEL__SHIFT 0x00000008
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_STATUS_MASK 0x00100000L
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_STATUS__SHIFT 0x00000014
+#define DC_MVP_LB_CONTROL__MVP_SWAP_LOCK_IN_MODE_MASK 0x00000003L
+#define DC_MVP_LB_CONTROL__MVP_SWAP_LOCK_IN_MODE__SHIFT 0x00000000
+#define DCO_CLK_CNTL__DCO_TEST_CLK_SEL_MASK 0x0000001fL
+#define DCO_CLK_CNTL__DCO_TEST_CLK_SEL__SHIFT 0x00000000
+#define DCO_CLK_CNTL__DISPCLK_G_ABM_GATE_DIS_MASK 0x00000040L
+#define DCO_CLK_CNTL__DISPCLK_G_ABM_GATE_DIS__SHIFT 0x00000006
+#define DCO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS_MASK 0x00000100L
+#define DCO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS__SHIFT 0x00000008
+#define DCO_CLK_CNTL__DISPCLK_G_DACB_GATE_DIS_MASK 0x00000200L
+#define DCO_CLK_CNTL__DISPCLK_G_DACB_GATE_DIS__SHIFT 0x00000009
+#define DCO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS_MASK 0x01000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS__SHIFT 0x00000018
+#define DCO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS_MASK 0x02000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS__SHIFT 0x00000019
+#define DCO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS_MASK 0x04000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS__SHIFT 0x0000001a
+#define DCO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS_MASK 0x08000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS__SHIFT 0x0000001b
+#define DCO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS_MASK 0x10000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS__SHIFT 0x0000001c
+#define DCO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS_MASK 0x20000000L
+#define DCO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS__SHIFT 0x0000001d
+#define DCO_CLK_CNTL__DISPCLK_G_DVO_GATE_DIS_MASK 0x00000080L
+#define DCO_CLK_CNTL__DISPCLK_G_DVO_GATE_DIS__SHIFT 0x00000007
+#define DCO_CLK_CNTL__DISPCLK_G_FMT0_GATE_DIS_MASK 0x00010000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT0_GATE_DIS__SHIFT 0x00000010
+#define DCO_CLK_CNTL__DISPCLK_G_FMT1_GATE_DIS_MASK 0x00020000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT1_GATE_DIS__SHIFT 0x00000011
+#define DCO_CLK_CNTL__DISPCLK_G_FMT2_GATE_DIS_MASK 0x00040000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT2_GATE_DIS__SHIFT 0x00000012
+#define DCO_CLK_CNTL__DISPCLK_G_FMT3_GATE_DIS_MASK 0x00080000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT3_GATE_DIS__SHIFT 0x00000013
+#define DCO_CLK_CNTL__DISPCLK_G_FMT4_GATE_DIS_MASK 0x00100000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT4_GATE_DIS__SHIFT 0x00000014
+#define DCO_CLK_CNTL__DISPCLK_G_FMT5_GATE_DIS_MASK 0x00200000L
+#define DCO_CLK_CNTL__DISPCLK_G_FMT5_GATE_DIS__SHIFT 0x00000015
+#define DCO_CLK_CNTL__DISPCLK_R_ABM_GATE_DIS_MASK 0x00001000L
+#define DCO_CLK_CNTL__DISPCLK_R_ABM_GATE_DIS__SHIFT 0x0000000c
+#define DCO_CLK_CNTL__DISPCLK_R_DCO_GATE_DIS_MASK 0x00000020L
+#define DCO_CLK_CNTL__DISPCLK_R_DCO_GATE_DIS__SHIFT 0x00000005
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_ABM_RAMP_DIS_MASK 0x00000040L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_ABM_RAMP_DIS__SHIFT 0x00000006
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DACA_RAMP_DIS_MASK 0x00000100L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DACA_RAMP_DIS__SHIFT 0x00000008
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DACB_RAMP_DIS_MASK 0x00000200L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DACB_RAMP_DIS__SHIFT 0x00000009
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGA_RAMP_DIS_MASK 0x01000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGA_RAMP_DIS__SHIFT 0x00000018
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGB_RAMP_DIS_MASK 0x02000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGB_RAMP_DIS__SHIFT 0x00000019
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGC_RAMP_DIS_MASK 0x04000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGC_RAMP_DIS__SHIFT 0x0000001a
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGD_RAMP_DIS_MASK 0x08000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGD_RAMP_DIS__SHIFT 0x0000001b
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGE_RAMP_DIS_MASK 0x10000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGE_RAMP_DIS__SHIFT 0x0000001c
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGF_RAMP_DIS_MASK 0x20000000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DIGF_RAMP_DIS__SHIFT 0x0000001d
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DVO_RAMP_DIS_MASK 0x00000080L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_DVO_RAMP_DIS__SHIFT 0x00000007
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT0_RAMP_DIS_MASK 0x00010000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT0_RAMP_DIS__SHIFT 0x00000010
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT1_RAMP_DIS_MASK 0x00020000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT1_RAMP_DIS__SHIFT 0x00000011
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT2_RAMP_DIS_MASK 0x00040000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT2_RAMP_DIS__SHIFT 0x00000012
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT3_RAMP_DIS_MASK 0x00080000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT3_RAMP_DIS__SHIFT 0x00000013
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT4_RAMP_DIS_MASK 0x00100000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT4_RAMP_DIS__SHIFT 0x00000014
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT5_RAMP_DIS_MASK 0x00200000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_G_FMT5_RAMP_DIS__SHIFT 0x00000015
+#define DCO_CLK_RAMP_CNTL__DISPCLK_R_ABM_RAMP_DIS_MASK 0x00001000L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_R_ABM_RAMP_DIS__SHIFT 0x0000000c
+#define DCO_CLK_RAMP_CNTL__DISPCLK_R_DCO_RAMP_DIS_MASK 0x00000020L
+#define DCO_CLK_RAMP_CNTL__DISPCLK_R_DCO_RAMP_DIS__SHIFT 0x00000005
+#define DCO_LIGHT_SLEEP_DIS__DPA_LIGHT_SLEEP_DIS_MASK 0x00000008L
+#define DCO_LIGHT_SLEEP_DIS__DPA_LIGHT_SLEEP_DIS__SHIFT 0x00000003
+#define DCO_LIGHT_SLEEP_DIS__DPA_MEM_SHUTDOWN_DIS_MASK 0x00020000L
+#define DCO_LIGHT_SLEEP_DIS__DPA_MEM_SHUTDOWN_DIS__SHIFT 0x00000011
+#define DCO_LIGHT_SLEEP_DIS__DPB_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DCO_LIGHT_SLEEP_DIS__DPB_LIGHT_SLEEP_DIS__SHIFT 0x00000004
+#define DCO_LIGHT_SLEEP_DIS__DPB_MEM_SHUTDOWN_DIS_MASK 0x00040000L
+#define DCO_LIGHT_SLEEP_DIS__DPB_MEM_SHUTDOWN_DIS__SHIFT 0x00000012
+#define DCO_LIGHT_SLEEP_DIS__DPC_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DCO_LIGHT_SLEEP_DIS__DPC_LIGHT_SLEEP_DIS__SHIFT 0x00000005
+#define DCO_LIGHT_SLEEP_DIS__DPC_MEM_SHUTDOWN_DIS_MASK 0x00080000L
+#define DCO_LIGHT_SLEEP_DIS__DPC_MEM_SHUTDOWN_DIS__SHIFT 0x00000013
+#define DCO_LIGHT_SLEEP_DIS__DPD_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DCO_LIGHT_SLEEP_DIS__DPD_LIGHT_SLEEP_DIS__SHIFT 0x00000006
+#define DCO_LIGHT_SLEEP_DIS__DPD_MEM_SHUTDOWN_DIS_MASK 0x00100000L
+#define DCO_LIGHT_SLEEP_DIS__DPD_MEM_SHUTDOWN_DIS__SHIFT 0x00000014
+#define DCO_LIGHT_SLEEP_DIS__DPE_LIGHT_SLEEP_DIS_MASK 0x00000080L
+#define DCO_LIGHT_SLEEP_DIS__DPE_LIGHT_SLEEP_DIS__SHIFT 0x00000007
+#define DCO_LIGHT_SLEEP_DIS__DPE_MEM_SHUTDOWN_DIS_MASK 0x00200000L
+#define DCO_LIGHT_SLEEP_DIS__DPE_MEM_SHUTDOWN_DIS__SHIFT 0x00000015
+#define DCO_LIGHT_SLEEP_DIS__DPF_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DCO_LIGHT_SLEEP_DIS__DPF_LIGHT_SLEEP_DIS__SHIFT 0x00000008
+#define DCO_LIGHT_SLEEP_DIS__DPF_MEM_SHUTDOWN_DIS_MASK 0x00400000L
+#define DCO_LIGHT_SLEEP_DIS__DPF_MEM_SHUTDOWN_DIS__SHIFT 0x00000016
+#define DCO_LIGHT_SLEEP_DIS__HDMI0_LIGHT_SLEEP_DIS_MASK 0x00000200L
+#define DCO_LIGHT_SLEEP_DIS__HDMI0_LIGHT_SLEEP_DIS__SHIFT 0x00000009
+#define DCO_LIGHT_SLEEP_DIS__HDMI1_LIGHT_SLEEP_DIS_MASK 0x00000400L
+#define DCO_LIGHT_SLEEP_DIS__HDMI1_LIGHT_SLEEP_DIS__SHIFT 0x0000000a
+#define DCO_LIGHT_SLEEP_DIS__HDMI2_LIGHT_SLEEP_DIS_MASK 0x00000800L
+#define DCO_LIGHT_SLEEP_DIS__HDMI2_LIGHT_SLEEP_DIS__SHIFT 0x0000000b
+#define DCO_LIGHT_SLEEP_DIS__HDMI3_LIGHT_SLEEP_DIS_MASK 0x00001000L
+#define DCO_LIGHT_SLEEP_DIS__HDMI3_LIGHT_SLEEP_DIS__SHIFT 0x0000000c
+#define DCO_LIGHT_SLEEP_DIS__HDMI4_LIGHT_SLEEP_DIS_MASK 0x00002000L
+#define DCO_LIGHT_SLEEP_DIS__HDMI4_LIGHT_SLEEP_DIS__SHIFT 0x0000000d
+#define DCO_LIGHT_SLEEP_DIS__HDMI5_LIGHT_SLEEP_DIS_MASK 0x00004000L
+#define DCO_LIGHT_SLEEP_DIS__HDMI5_LIGHT_SLEEP_DIS__SHIFT 0x0000000e
+#define DCO_LIGHT_SLEEP_DIS__I2C_LIGHT_SLEEP_FORCE_MASK 0x00000002L
+#define DCO_LIGHT_SLEEP_DIS__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x00000001
+#define DCO_LIGHT_SLEEP_DIS__MVP_LIGHT_SLEEP_DIS_MASK 0x00000004L
+#define DCO_LIGHT_SLEEP_DIS__MVP_LIGHT_SLEEP_DIS__SHIFT 0x00000002
+#define DCO_LIGHT_SLEEP_DIS__MVP_MEM_SHUTDOWN_DIS_MASK 0x00010000L
+#define DCO_LIGHT_SLEEP_DIS__MVP_MEM_SHUTDOWN_DIS__SHIFT 0x00000010
+#define DCO_LIGHT_SLEEP_DIS__TVOUT_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define DCO_LIGHT_SLEEP_DIS__TVOUT_LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define DCO_MEM_POWER_STATE__DPA_MEM_PWR_STATE_MASK 0x000000c0L
+#define DCO_MEM_POWER_STATE__DPA_MEM_PWR_STATE__SHIFT 0x00000006
+#define DCO_MEM_POWER_STATE__DPB_MEM_PWR_STATE_MASK 0x00000300L
+#define DCO_MEM_POWER_STATE__DPB_MEM_PWR_STATE__SHIFT 0x00000008
+#define DCO_MEM_POWER_STATE__DPC_MEM_PWR_STATE_MASK 0x00000c00L
+#define DCO_MEM_POWER_STATE__DPC_MEM_PWR_STATE__SHIFT 0x0000000a
+#define DCO_MEM_POWER_STATE__DPD_MEM_PWR_STATE_MASK 0x00003000L
+#define DCO_MEM_POWER_STATE__DPD_MEM_PWR_STATE__SHIFT 0x0000000c
+#define DCO_MEM_POWER_STATE__DPE_MEM_PWR_STATE_MASK 0x0000c000L
+#define DCO_MEM_POWER_STATE__DPE_MEM_PWR_STATE__SHIFT 0x0000000e
+#define DCO_MEM_POWER_STATE__DPF_MEM_PWR_STATE_MASK 0x00030000L
+#define DCO_MEM_POWER_STATE__DPF_MEM_PWR_STATE__SHIFT 0x00000010
+#define DCO_MEM_POWER_STATE__HDMI0_MEM_PWR_STATE_MASK 0x000c0000L
+#define DCO_MEM_POWER_STATE__HDMI0_MEM_PWR_STATE__SHIFT 0x00000012
+#define DCO_MEM_POWER_STATE__HDMI1_MEM_PWR_STATE_MASK 0x00300000L
+#define DCO_MEM_POWER_STATE__HDMI1_MEM_PWR_STATE__SHIFT 0x00000014
+#define DCO_MEM_POWER_STATE__HDMI2_MEM_PWR_STATE_MASK 0x00c00000L
+#define DCO_MEM_POWER_STATE__HDMI2_MEM_PWR_STATE__SHIFT 0x00000016
+#define DCO_MEM_POWER_STATE__HDMI3_MEM_PWR_STATE_MASK 0x03000000L
+#define DCO_MEM_POWER_STATE__HDMI3_MEM_PWR_STATE__SHIFT 0x00000018
+#define DCO_MEM_POWER_STATE__HDMI4_MEM_PWR_STATE_MASK 0x0c000000L
+#define DCO_MEM_POWER_STATE__HDMI4_MEM_PWR_STATE__SHIFT 0x0000001a
+#define DCO_MEM_POWER_STATE__HDMI5_MEM_PWR_STATE_MASK 0x30000000L
+#define DCO_MEM_POWER_STATE__HDMI5_MEM_PWR_STATE__SHIFT 0x0000001c
+#define DCO_MEM_POWER_STATE__I2C_MEM_PWR_STATE_MASK 0x0000000cL
+#define DCO_MEM_POWER_STATE__I2C_MEM_PWR_STATE__SHIFT 0x00000002
+#define DCO_MEM_POWER_STATE__MVP_MEM_PWR_STATE_MASK 0x00000030L
+#define DCO_MEM_POWER_STATE__MVP_MEM_PWR_STATE__SHIFT 0x00000004
+#define DCO_MEM_POWER_STATE__TVOUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DCO_MEM_POWER_STATE__TVOUT_MEM_PWR_STATE__SHIFT 0x00000000
+#define DCO_SOFT_RESET__ABM_SOFT_RESET_MASK 0x02000000L
+#define DCO_SOFT_RESET__ABM_SOFT_RESET__SHIFT 0x00000019
+#define DCO_SOFT_RESET__DACA_CFG_IF_SOFT_RESET_MASK 0x20000000L
+#define DCO_SOFT_RESET__DACA_CFG_IF_SOFT_RESET__SHIFT 0x0000001d
+#define DCO_SOFT_RESET__DACA_SOFT_RESET_MASK 0x00000001L
+#define DCO_SOFT_RESET__DACA_SOFT_RESET__SHIFT 0x00000000
+#define DCO_SOFT_RESET__DACB_SOFT_RESET_MASK 0x00000002L
+#define DCO_SOFT_RESET__DACB_SOFT_RESET__SHIFT 0x00000001
+#define DCO_SOFT_RESET__DVO_ENABLE_RST_MASK 0x00000008L
+#define DCO_SOFT_RESET__DVO_ENABLE_RST__SHIFT 0x00000003
+#define DCO_SOFT_RESET__DVO_SOFT_RESET_MASK 0x08000000L
+#define DCO_SOFT_RESET__DVO_SOFT_RESET__SHIFT 0x0000001b
+#define DCO_SOFT_RESET__FMT0_SOFT_RESET_MASK 0x00010000L
+#define DCO_SOFT_RESET__FMT0_SOFT_RESET__SHIFT 0x00000010
+#define DCO_SOFT_RESET__FMT1_SOFT_RESET_MASK 0x00020000L
+#define DCO_SOFT_RESET__FMT1_SOFT_RESET__SHIFT 0x00000011
+#define DCO_SOFT_RESET__FMT2_SOFT_RESET_MASK 0x00040000L
+#define DCO_SOFT_RESET__FMT2_SOFT_RESET__SHIFT 0x00000012
+#define DCO_SOFT_RESET__FMT3_SOFT_RESET_MASK 0x00080000L
+#define DCO_SOFT_RESET__FMT3_SOFT_RESET__SHIFT 0x00000013
+#define DCO_SOFT_RESET__FMT4_SOFT_RESET_MASK 0x00100000L
+#define DCO_SOFT_RESET__FMT4_SOFT_RESET__SHIFT 0x00000014
+#define DCO_SOFT_RESET__FMT5_SOFT_RESET_MASK 0x00200000L
+#define DCO_SOFT_RESET__FMT5_SOFT_RESET__SHIFT 0x00000015
+#define DCO_SOFT_RESET__MVP_SOFT_RESET_MASK 0x01000000L
+#define DCO_SOFT_RESET__MVP_SOFT_RESET__SHIFT 0x00000018
+#define DCO_SOFT_RESET__SOFT_RESET_DVO_MASK 0x00000004L
+#define DCO_SOFT_RESET__SOFT_RESET_DVO__SHIFT 0x00000002
+#define DCO_SOFT_RESET__SRBM_SOFT_RESET_ENABLE_MASK 0x10000000L
+#define DCO_SOFT_RESET__SRBM_SOFT_RESET_ENABLE__SHIFT 0x0000001c
+#define DCO_SOFT_RESET__TVOUT_SOFT_RESET_MASK 0x04000000L
+#define DCO_SOFT_RESET__TVOUT_SOFT_RESET__SHIFT 0x0000001a
+#define DC_PAD_EXTERN_SIG__DC_PAD_EXTERN_SIG_SEL_MASK 0x0000000fL
+#define DC_PAD_EXTERN_SIG__DC_PAD_EXTERN_SIG_SEL__SHIFT 0x00000000
+#define DC_PAD_EXTERN_SIG__MVP_PIXEL_SRC_STATUS_MASK 0x00000030L
+#define DC_PAD_EXTERN_SIG__MVP_PIXEL_SRC_STATUS__SHIFT 0x00000004
+#define DCP_CRC_CONTROL__DCP_CRC_ENABLE_MASK 0x00000001L
+#define DCP_CRC_CONTROL__DCP_CRC_ENABLE__SHIFT 0x00000000
+#define DCP_CRC_CONTROL__DCP_CRC_LINE_SEL_MASK 0x00000300L
+#define DCP_CRC_CONTROL__DCP_CRC_LINE_SEL__SHIFT 0x00000008
+#define DCP_CRC_CONTROL__DCP_CRC_SOURCE_SEL_MASK 0x0000001cL
+#define DCP_CRC_CONTROL__DCP_CRC_SOURCE_SEL__SHIFT 0x00000002
+#define DCP_CRC_CURRENT__DCP_CRC_CURRENT_MASK 0xffffffffL
+#define DCP_CRC_CURRENT__DCP_CRC_CURRENT__SHIFT 0x00000000
+#define DCP_CRC_LAST__DCP_CRC_LAST_MASK 0xffffffffL
+#define DCP_CRC_LAST__DCP_CRC_LAST__SHIFT 0x00000000
+#define DCP_CRC_MASK__DCP_CRC_MASK_MASK 0xffffffffL
+#define DCP_CRC_MASK__DCP_CRC_MASK__SHIFT 0x00000000
+#define DCP_DEBUG2__DCP_DEBUG2_MASK 0xffffffffL
+#define DCP_DEBUG2__DCP_DEBUG2__SHIFT 0x00000000
+#define DCP_DEBUG__DCP_DEBUG_MASK 0xffffffffL
+#define DCP_DEBUG__DCP_DEBUG__SHIFT 0x00000000
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_DATA_MASK 0x0003ffffL
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_DATA__SHIFT 0x00000000
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_INDEX_MASK 0x07f00000L
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_INDEX__SHIFT 0x00000014
+#define DC_PGCNTL_STATUS_REG__DCPG_ECO_DEBUG_MASK 0xffff0000L
+#define DC_PGCNTL_STATUS_REG__DCPG_ECO_DEBUG__SHIFT 0x00000010
+#define DC_PGCNTL_STATUS_REG__IPREQ_IGNORE_STATUS_MASK 0x00000004L
+#define DC_PGCNTL_STATUS_REG__IPREQ_IGNORE_STATUS__SHIFT 0x00000002
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_BUSY_MASK 0x00000001L
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_BUSY__SHIFT 0x00000000
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_FORCE_MASK 0x00000002L
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_FORCE__SHIFT 0x00000001
+#define DC_PGFSM_CONFIG_REG__PGFSM_CONFIG_REG_MASK 0xffffffffL
+#define DC_PGFSM_CONFIG_REG__PGFSM_CONFIG_REG__SHIFT 0x00000000
+#define DC_PGFSM_WRITE_REG__PGFSM_WRITE_REG_MASK 0xffffffffL
+#define DC_PGFSM_WRITE_REG__PGFSM_WRITE_REG__SHIFT 0x00000000
+#define DCP_GSL_CONTROL__DCP_GSL0_EN_MASK 0x00000001L
+#define DCP_GSL_CONTROL__DCP_GSL0_EN__SHIFT 0x00000000
+#define DCP_GSL_CONTROL__DCP_GSL1_EN_MASK 0x00000002L
+#define DCP_GSL_CONTROL__DCP_GSL1_EN__SHIFT 0x00000001
+#define DCP_GSL_CONTROL__DCP_GSL2_EN_MASK 0x00000004L
+#define DCP_GSL_CONTROL__DCP_GSL2_EN__SHIFT 0x00000002
+#define DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING_MASK 0x08000000L
+#define DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING__SHIFT 0x0000001b
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY_MASK 0xf0000000L
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY__SHIFT 0x0000001c
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY_MASK 0x0000f000L
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY__SHIFT 0x0000000c
+#define DCP_GSL_CONTROL__DCP_GSL_MASTER_EN_MASK 0x00010000L
+#define DCP_GSL_CONTROL__DCP_GSL_MASTER_EN__SHIFT 0x00000010
+#define DCP_GSL_CONTROL__DCP_GSL_MODE_MASK 0x00000300L
+#define DCP_GSL_CONTROL__DCP_GSL_MODE__SHIFT 0x00000008
+#define DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE_MASK 0x03000000L
+#define DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE__SHIFT 0x00000018
+#define DCPG_TEST_DEBUG_DATA__DCPG_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCPG_TEST_DEBUG_DATA__DCPG_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000c000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0x0000000e
+#define DC_PINSTRAPS__DC_PINSTRAPS_BIF_CEC_DIS_MASK 0x00000400L
+#define DC_PINSTRAPS__DC_PINSTRAPS_BIF_CEC_DIS__SHIFT 0x0000000a
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x00010000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x00000010
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x00002000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0x0000000d
+#define DC_PINSTRAPS__DC_PINSTRAPS_VIP_DEVICE_MASK 0x00000800L
+#define DC_PINSTRAPS__DC_PINSTRAPS_VIP_DEVICE__SHIFT 0x0000000b
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_20BPP_MASK 0x0000000fL
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_20BPP__SHIFT 0x00000000
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_30BPP_MASK 0x000000f0L
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_30BPP__SHIFT 0x00000004
+#define DCP_RANDOM_SEEDS__DCP_RAND_B_SEED_MASK 0x00ff0000L
+#define DCP_RANDOM_SEEDS__DCP_RAND_B_SEED__SHIFT 0x00000010
+#define DCP_RANDOM_SEEDS__DCP_RAND_G_SEED_MASK 0x0000ff00L
+#define DCP_RANDOM_SEEDS__DCP_RAND_G_SEED__SHIFT 0x00000008
+#define DCP_RANDOM_SEEDS__DCP_RAND_R_SEED_MASK 0x000000ffL
+#define DCP_RANDOM_SEEDS__DCP_RAND_R_SEED__SHIFT 0x00000000
+#define DCP_SPATIAL_DITHER_CNTL__DCP_FRAME_RANDOM_ENABLE_MASK 0x00000100L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_FRAME_RANDOM_ENABLE__SHIFT 0x00000008
+#define DCP_SPATIAL_DITHER_CNTL__DCP_HIGHPASS_RANDOM_ENABLE_MASK 0x00000400L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_HIGHPASS_RANDOM_ENABLE__SHIFT 0x0000000a
+#define DCP_SPATIAL_DITHER_CNTL__DCP_RGB_RANDOM_ENABLE_MASK 0x00000200L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_RGB_RANDOM_ENABLE__SHIFT 0x00000009
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_DEPTH_MASK 0x00000040L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_DEPTH__SHIFT 0x00000006
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_EN_MASK 0x00000001L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_EN__SHIFT 0x00000000
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_MODE_MASK 0x00000030L
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_MODE__SHIFT 0x00000004
+#define DCP_TEST_DEBUG_DATA__DCP_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DCP_TEST_DEBUG_DATA__DCP_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT0_RDWR_DELAY_MASK 0x00000007L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT0_RDWR_DELAY__SHIFT 0x00000000
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT0_RDWR_TIMEOUT_DIS_MASK 0x00000008L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT0_RDWR_TIMEOUT_DIS__SHIFT 0x00000003
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT1_RDWR_DELAY_MASK 0x00000070L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT1_RDWR_DELAY__SHIFT 0x00000004
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT1_RDWR_TIMEOUT_DIS_MASK 0x00000080L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT1_RDWR_TIMEOUT_DIS__SHIFT 0x00000007
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT2_RDWR_DELAY_MASK 0x00000700L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT2_RDWR_DELAY__SHIFT 0x00000008
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT2_RDWR_TIMEOUT_DIS_MASK 0x00000800L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT2_RDWR_TIMEOUT_DIS__SHIFT 0x0000000b
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT3_RDWR_DELAY_MASK 0x00007000L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT3_RDWR_DELAY__SHIFT 0x0000000c
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT3_RDWR_TIMEOUT_DIS_MASK 0x00008000L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT3_RDWR_TIMEOUT_DIS__SHIFT 0x0000000f
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT4_RDWR_DELAY_MASK 0x00070000L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT4_RDWR_DELAY__SHIFT 0x00000010
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT4_RDWR_TIMEOUT_DIS_MASK 0x00080000L
+#define DC_RBBMIF_RDWR_CNTL1__DC_RBBMIF_CLIENT4_RDWR_TIMEOUT_DIS__SHIFT 0x00000013
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT8_RDWR_DELAY_MASK 0x00000007L
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT8_RDWR_DELAY__SHIFT 0x00000000
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT8_RDWR_TIMEOUT_DIS_MASK 0x00000008L
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT8_RDWR_TIMEOUT_DIS__SHIFT 0x00000003
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT9_RDWR_DELAY_MASK 0x00000070L
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT9_RDWR_DELAY__SHIFT 0x00000004
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT9_RDWR_TIMEOUT_DIS_MASK 0x00000080L
+#define DC_RBBMIF_RDWR_CNTL2__DC_RBBMIF_CLIENT9_RDWR_TIMEOUT_DIS__SHIFT 0x00000007
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL_MASK 0x00000300L
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL__SHIFT 0x00000008
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL_MASK 0x00000003L
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL__SHIFT 0x00000000
+#define DC_XDMA_INTERFACE_CNTL__DC_FLIP_PENDING_TO_DCP_MASK 0x00400000L
+#define DC_XDMA_INTERFACE_CNTL__DC_FLIP_PENDING_TO_DCP__SHIFT 0x00000016
+#define DC_XDMA_INTERFACE_CNTL__DC_XDMA_FLIP_PENDING_MASK 0x00010000L
+#define DC_XDMA_INTERFACE_CNTL__DC_XDMA_FLIP_PENDING__SHIFT 0x00000010
+#define DC_XDMA_INTERFACE_CNTL__XDMA_M_FLIP_PENDING_TO_DCP_MASK 0x00100000L
+#define DC_XDMA_INTERFACE_CNTL__XDMA_M_FLIP_PENDING_TO_DCP__SHIFT 0x00000014
+#define DC_XDMA_INTERFACE_CNTL__XDMA_PIPE_ENABLE_MASK 0x0000003fL
+#define DC_XDMA_INTERFACE_CNTL__XDMA_PIPE_ENABLE__SHIFT 0x00000000
+#define DC_XDMA_INTERFACE_CNTL__XDMA_PIPE_SEL_MASK 0x00000700L
+#define DC_XDMA_INTERFACE_CNTL__XDMA_PIPE_SEL__SHIFT 0x00000008
+#define DC_XDMA_INTERFACE_CNTL__XDMA_S_FLIP_PENDING_TO_DCP_MASK 0x00200000L
+#define DC_XDMA_INTERFACE_CNTL__XDMA_S_FLIP_PENDING_TO_DCP__SHIFT 0x00000015
+#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE_MASK 0x00003000L
+#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0x0000000c
+#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x00000003L
+#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x00000000
+#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE_MASK 0x00000030L
+#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT 0x00000004
+#define DENORM_CONTROL__DENORM_MODE_MASK 0x00000007L
+#define DENORM_CONTROL__DENORM_MODE__SHIFT 0x00000000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x00000013
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x00018000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0x0000000f
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x00020000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x00000011
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x00040000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x00000012
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007f00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x00000008
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007fL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x00000000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHG_DONE__SHIFT 0x00000014
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHGTOG_MASK 0x00200000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHGTOG__SHIFT 0x00000015
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_DONETOG_MASK 0x00400000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_DONETOG__SHIFT 0x00000016
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_WDIVIDER_MASK 0x7f000000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_WDIVIDER__SHIFT 0x00000018
+#define DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00003f00L
+#define DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x00000008
+#define DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+#define DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x0000001c
+#define DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG_BE_CNTL__DIG_MODE__SHIFT 0x00000010
+#define DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x00000000
+#define DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+#define DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x00000008
+#define DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003ffL
+#define DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x00000000
+#define DIG_DISPCLK_SWITCH_CNTL__DIG_DISPCLK_SWITCH_POINT_MASK 0x00000001L
+#define DIG_DISPCLK_SWITCH_CNTL__DIG_DISPCLK_SWITCH_POINT__SHIFT 0x00000000
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK_MASK 0x00000100L
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK__SHIFT 0x00000008
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK 0x00000010L
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK_MASK 0x00001000L
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0x0000000c
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT__SHIFT 0x00000004
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_MASK 0x00000001L
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED__SHIFT 0x00000000
+#define DIG_FE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00010000L
+#define DIG_FE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x00000010
+#define DIG_FE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00100000L
+#define DIG_FE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x00000014
+#define DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x00000000
+#define DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG_FE_CNTL__DIG_START__SHIFT 0x0000000a
+#define DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x00000008
+#define DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x00000004
+#define DIG_FE_CNTL__DIG_SWAP_MASK 0x00040000L
+#define DIG_FE_CNTL__DIG_SWAP__SHIFT 0x00000012
+#define DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x00000018
+#define DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000fc00L
+#define DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x0000000a
+#define DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x0000001d
+#define DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x00000008
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x0000001e
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x0000001f
+#define DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x00000000
+#define DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001f0000L
+#define DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x00000010
+#define DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03c00000L
+#define DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x00000016
+#define DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000fcL
+#define DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x00000002
+#define DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x00000001
+#define DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+#define DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x00000008
+#define DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x00000000
+#define DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x00000001
+#define DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x00000002
+#define DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x00000003
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x00000008
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x00000000
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x00000004
+#define DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3fffffffL
+#define DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x00000000
+#define DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00ffffffL
+#define DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x00000000
+#define DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+#define DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x00000018
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET_MASK 0x00000002L
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET__SHIFT 0x00000001
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET_MASK 0x00000001L
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET__SHIFT 0x00000000
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET__SHIFT 0x00000005
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET_MASK 0x00000010L
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET__SHIFT 0x00000004
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET_MASK 0x00000200L
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET__SHIFT 0x00000009
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET_MASK 0x00000100L
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET__SHIFT 0x00000008
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET_MASK 0x00002000L
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET__SHIFT 0x0000000d
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET_MASK 0x00001000L
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET__SHIFT 0x0000000c
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET_MASK 0x00020000L
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET__SHIFT 0x00000011
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET_MASK 0x00010000L
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET__SHIFT 0x00000010
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET_MASK 0x00200000L
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET__SHIFT 0x00000015
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET_MASK 0x00100000L
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET__SHIFT 0x00000014
+#define DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x00000001
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x00000004
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x00000005
+#define DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03ff0000L
+#define DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x00000010
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x00000006
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x00000000
+#define DIG_TEST_PATTERN__LVDS_EYE_PATTERN_MASK 0x00000100L
+#define DIG_TEST_PATTERN__LVDS_EYE_PATTERN__SHIFT 0x00000008
+#define DIG_TEST_PATTERN__LVDS_TEST_CLOCK_DATA_MASK 0x00000004L
+#define DIG_TEST_PATTERN__LVDS_TEST_CLOCK_DATA__SHIFT 0x00000002
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0x00000ff0L
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x00000004
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0x0000000fL
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x00000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x0000001e
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x0000001c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x0000001d
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x0000001f
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x00100000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x00000014
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0x0e000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x00000019
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x00003fffL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x00000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0x000f0000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT__SHIFT 0x0000001e
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT__SHIFT 0x0000001c
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT__SHIFT 0x0000001d
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE2__SCL_DISP3_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__SCL_DISP3_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE3__SCL_DISP4_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__SCL_DISP4_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE4__SCL_DISP5_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__SCL_DISP5_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE5__SCL_DISP6_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__SCL_DISP6_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT__SHIFT 0x00000014
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT__SHIFT 0x00000013
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_TIMER_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_TIMER_INTERRUPT__SHIFT 0x00000018
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS_CONTINUE__SCL_DISP2_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE__SCL_DISP2_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x00000006
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x00000005
+#define DISP_INTERRUPT_STATUS__CRTC1_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS__CRTC1_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x0000000a
+#define DISP_INTERRUPT_STATUS__CRTC1_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS__CRTC1_SNAPSHOT_INTERRUPT__SHIFT 0x00000004
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGA_INTERRUPT__SHIFT 0x00000007
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGB_INTERRUPT__SHIFT 0x00000008
+#define DISP_INTERRUPT_STATUS__CRTC1_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS__CRTC1_VSYNC_NOM_INTERRUPT__SHIFT 0x00000009
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_INTERRUPT__SHIFT 0x00000016
+#define DISP_INTERRUPT_STATUS__DACB_AUTODETECT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS__DACB_AUTODETECT_INTERRUPT__SHIFT 0x00000017
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x00000011
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x00000012
+#define DISP_INTERRUPT_STATUS__DC_I2C_HW_DONE_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_HW_DONE_INTERRUPT__SHIFT 0x00000019
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x00000018
+#define DISP_INTERRUPT_STATUS__DIGA_DISPCLK_SWITCH_ALLOWED_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS__DIGA_DISPCLK_SWITCH_ALLOWED_INTERRUPT__SHIFT 0x00000015
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x0000000f
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x00000010
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE_MASK 0x80000000L
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x0000001f
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT__SHIFT 0x0000001b
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT__SHIFT 0x0000001a
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT__SHIFT 0x00000003
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT__SHIFT 0x00000002
+#define DISP_INTERRUPT_STATUS__SCL_DISP1_MODE_CHANGE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS__SCL_DISP1_MODE_CHANGE_INTERRUPT__SHIFT 0x00000000
+#define DISPOUT_STEREOSYNC_SEL__GENERICA_STEREOSYNC_SEL_MASK 0x00000007L
+#define DISPOUT_STEREOSYNC_SEL__GENERICA_STEREOSYNC_SEL__SHIFT 0x00000000
+#define DISPOUT_STEREOSYNC_SEL__GENERICB_STEREOSYNC_SEL_MASK 0x00070000L
+#define DISPOUT_STEREOSYNC_SEL__GENERICB_STEREOSYNC_SEL__SHIFT 0x00000010
+#define DISPPLL_BG_CNTL__DISPPLL_BG_ADJ_MASK 0x000000f0L
+#define DISPPLL_BG_CNTL__DISPPLL_BG_ADJ__SHIFT 0x00000004
+#define DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK 0x00000001L
+#define DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT 0x00000000
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01ffffffL
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x00000000
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x00000019
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK 0x40000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_MSK_MASK 0x08000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_MSK__SHIFT 0x0000001b
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_RUNNING_MASK 0x04000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_RUNNING__SHIFT 0x0000001a
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT__SHIFT 0x0000001e
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x20000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x0000001d
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_MASK 0x10000000L
+#define DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT__SHIFT 0x0000001c
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC_MASK 0x00000004L
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC__SHIFT 0x00000002
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC_MASK 0x00000008L
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC__SHIFT 0x00000003
+#define DMCU_CTRL__DMCU_ENABLE_MASK 0x00000010L
+#define DMCU_CTRL__DMCU_ENABLE__SHIFT 0x00000004
+#define DMCU_CTRL__IGNORE_PWRMGT_MASK 0x00000002L
+#define DMCU_CTRL__IGNORE_PWRMGT__SHIFT 0x00000001
+#define DMCU_CTRL__RESET_UC_MASK 0x00000001L
+#define DMCU_CTRL__RESET_UC__SHIFT 0x00000000
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT_MASK 0xffc00000L
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT__SHIFT 0x00000016
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR_MASK 0x0000ffffL
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR__SHIFT 0x00000000
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE_MASK 0x000f0000L
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE__SHIFT 0x00000010
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE_MASK 0x00100000L
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE__SHIFT 0x00000014
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA_MASK 0xffffffffL
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA__SHIFT 0x00000000
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR_MASK 0x0000ffffL
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR__SHIFT 0x00000000
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE_MASK 0x000f0000L
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE__SHIFT 0x00000010
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE_MASK 0x00100000L
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE__SHIFT 0x00000014
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA_MASK 0xffffffffL
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA__SHIFT 0x00000000
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC_MASK 0x00000001L
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC__SHIFT 0x00000000
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST_MASK 0x00800000L
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST__SHIFT 0x00000017
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE_MASK 0x007f0000L
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE__SHIFT 0x00000010
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS_MASK 0x0000000cL
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS__SHIFT 0x00000002
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS_MASK 0x00000003L
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS__SHIFT 0x00000000
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI_MASK 0xffffffffL
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI__SHIFT 0x00000000
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO_MASK 0xffffffffL
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO__SHIFT 0x00000000
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB_MASK 0x000000ffL
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB__SHIFT 0x00000000
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB_MASK 0x0000ff00L
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB__SHIFT 0x00000008
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB_MASK 0x000000ffL
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB__SHIFT 0x00000000
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB_MASK 0x0000ff00L
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB__SHIFT 0x00000008
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB_MASK 0x000000ffL
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB__SHIFT 0x00000000
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB_MASK 0x0000ff00L
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB__SHIFT 0x00000008
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT_MASK 0x00ff0000L
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT__SHIFT 0x00000010
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT_MASK 0x000000ffL
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT__SHIFT 0x00000000
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT_MASK 0x0000ff00L
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT__SHIFT 0x00000008
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR__SHIFT 0x00000002
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED_MASK 0x00000004L
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED__SHIFT 0x00000002
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR__SHIFT 0x00000000
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED_MASK 0x00000001L
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED__SHIFT 0x00000000
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR__SHIFT 0x00000001
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED_MASK 0x00000002L
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED__SHIFT 0x00000001
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_CLEAR_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_CLEAR__SHIFT 0x00000012
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_OCCURRED_MASK 0x00040000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000012
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_CLEAR_MASK 0x00001000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_CLEAR__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_OCCURRED_MASK 0x00001000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_OCCURRED__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_CLEAR_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_CLEAR__SHIFT 0x00000013
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_OCCURRED_MASK 0x00080000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000013
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_CLEAR__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_OCCURRED_MASK 0x00002000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_OCCURRED__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_CLEAR_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_CLEAR__SHIFT 0x00000014
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_OCCURRED_MASK 0x00100000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000014
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_CLEAR_MASK 0x00004000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_CLEAR__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_OCCURRED_MASK 0x00004000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_OCCURRED__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_CLEAR_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_CLEAR__SHIFT 0x00000015
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_OCCURRED_MASK 0x00200000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000015
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_CLEAR_MASK 0x00008000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_CLEAR__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_OCCURRED_MASK 0x00008000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_OCCURRED__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_CLEAR_MASK 0x00400000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_CLEAR__SHIFT 0x00000016
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_OCCURRED_MASK 0x00400000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000016
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_CLEAR_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_CLEAR__SHIFT 0x00000010
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_OCCURRED_MASK 0x00010000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_OCCURRED__SHIFT 0x00000010
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_CLEAR_MASK 0x00800000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_CLEAR__SHIFT 0x00000017
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_OCCURRED_MASK 0x00800000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_OCCURRED__SHIFT 0x00000017
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_CLEAR_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_CLEAR__SHIFT 0x00000011
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_OCCURRED_MASK 0x00020000L
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_OCCURRED__SHIFT 0x00000011
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR__SHIFT 0x00000008
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED_MASK 0x00000100L
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED__SHIFT 0x00000008
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED_MASK 0x00000008L
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED__SHIFT 0x00000003
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED_MASK 0x00000200L
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED__SHIFT 0x00000009
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR__SHIFT 0x0000000a
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED_MASK 0x00000400L
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED__SHIFT 0x0000000a
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR__SHIFT 0x0000000b
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED_MASK 0x00000800L
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED__SHIFT 0x0000000b
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR_MASK 0x01000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR__SHIFT 0x00000018
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED_MASK 0x01000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED__SHIFT 0x00000018
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR_MASK 0x02000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR__SHIFT 0x00000019
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED_MASK 0x02000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED__SHIFT 0x00000019
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR_MASK 0x04000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR__SHIFT 0x0000001a
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED_MASK 0x04000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED__SHIFT 0x0000001a
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR_MASK 0x08000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR__SHIFT 0x0000001b
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED_MASK 0x08000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED__SHIFT 0x0000001b
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR_MASK 0x10000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR__SHIFT 0x0000001c
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED_MASK 0x10000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED__SHIFT 0x0000001c
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR_MASK 0x20000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR__SHIFT 0x0000001d
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED_MASK 0x20000000L
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED__SHIFT 0x0000001d
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK__SHIFT 0x00000002
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK__SHIFT 0x00000000
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK__SHIFT 0x00000001
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_MASK_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_MASK__SHIFT 0x00000012
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_MASK__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_MASK_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_MASK__SHIFT 0x00000013
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_MASK_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_MASK__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_MASK_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_MASK__SHIFT 0x00000014
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_MASK_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_MASK__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_MASK_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_MASK__SHIFT 0x00000015
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_MASK_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_MASK__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_MASK_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_MASK__SHIFT 0x00000016
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_MASK_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_MASK__SHIFT 0x00000010
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_MASK_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_MASK__SHIFT 0x00000017
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_MASK_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_MASK__SHIFT 0x00000011
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK_MASK 0x00000200L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK__SHIFT 0x00000009
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK_MASK 0x00000400L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK__SHIFT 0x0000000a
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK_MASK 0x00000800L
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK__SHIFT 0x0000000b
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN__SHIFT 0x00000002
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN__SHIFT 0x00000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN__SHIFT 0x00000001
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_TO_UC_EN_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000012
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_TO_UC_EN_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_TO_UC_EN__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_TO_UC_EN_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000013
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_TO_UC_EN_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_TO_UC_EN__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_TO_UC_EN_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000014
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_TO_UC_EN_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_TO_UC_EN__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_TO_UC_EN_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000015
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_TO_UC_EN_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_TO_UC_EN__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_TO_UC_EN_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000016
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_TO_UC_EN_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_TO_UC_EN__SHIFT 0x00000010
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_TO_UC_EN_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x00000017
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_TO_UC_EN_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_TO_UC_EN__SHIFT 0x00000011
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN__SHIFT 0x00000008
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN__SHIFT 0x00000003
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN_MASK 0x01000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN__SHIFT 0x00000018
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN_MASK 0x02000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN__SHIFT 0x00000019
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN_MASK 0x04000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN__SHIFT 0x0000001a
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN_MASK 0x08000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN__SHIFT 0x0000001b
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN_MASK 0x10000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN__SHIFT 0x0000001c
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN_MASK 0x20000000L
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN__SHIFT 0x0000001d
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x00000004L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x00000002
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL_MASK 0x00000001L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x00000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL_MASK 0x00000002L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x00000001
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00040000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000012
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00001000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0000000c
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00080000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000013
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00002000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0000000d
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00100000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000014
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00004000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0000000e
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00200000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000015
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00008000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0000000f
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00400000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000016
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00010000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x00000010
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x00800000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x00000017
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x00020000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x00000011
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL_MASK 0x00000100L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL__SHIFT 0x00000008
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL_MASK 0x00000008L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL__SHIFT 0x00000003
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL_MASK 0x01000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL__SHIFT 0x00000018
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL_MASK 0x02000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL__SHIFT 0x00000019
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL_MASK 0x04000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL__SHIFT 0x0000001a
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL_MASK 0x08000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL__SHIFT 0x0000001b
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL_MASK 0x10000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL__SHIFT 0x0000001c
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL_MASK 0x20000000L
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL__SHIFT 0x0000001d
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR_MASK 0x000003ffL
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR__SHIFT 0x00000000
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA_MASK 0x000000ffL
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA__SHIFT 0x00000000
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR_MASK 0x000003ffL
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR__SHIFT 0x00000000
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA_MASK 0x000000ffL
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA__SHIFT 0x00000000
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB_MASK 0x000000ffL
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB__SHIFT 0x00000000
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB_MASK 0x0000ff00L
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB__SHIFT 0x00000008
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN_MASK 0x00000010L
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN__SHIFT 0x00000004
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC_MASK 0x00000002L
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC__SHIFT 0x00000001
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC_MASK 0x00000001L
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC__SHIFT 0x00000000
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN_MASK 0x00000020L
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN__SHIFT 0x00000005
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC_MASK 0x00000008L
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC__SHIFT 0x00000003
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC_MASK 0x00000004L
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC__SHIFT 0x00000002
+#define DMCU_RAM_ACCESS_CTRL__UC_RST_RELEASE_DELAY_CNT_MASK 0x0000ff00L
+#define DMCU_RAM_ACCESS_CTRL__UC_RST_RELEASE_DELAY_CNT__SHIFT 0x00000008
+#define DMCU_STATUS__UC_IN_RESET_MASK 0x00000001L
+#define DMCU_STATUS__UC_IN_RESET__SHIFT 0x00000000
+#define DMCU_STATUS__UC_IN_STOP_MODE_MASK 0x00000004L
+#define DMCU_STATUS__UC_IN_STOP_MODE__SHIFT 0x00000002
+#define DMCU_STATUS__UC_IN_WAIT_MODE_MASK 0x00000002L
+#define DMCU_STATUS__UC_IN_WAIT_MODE__SHIFT 0x00000001
+#define DMCU_TEST_DEBUG_DATA__DMCU_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DMCU_TEST_DEBUG_DATA__DMCU_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY_MASK 0x00000700L
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY__SHIFT 0x00000008
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY_MASK 0x00000007L
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY__SHIFT 0x00000000
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN_MASK 0x00010000L
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN__SHIFT 0x00000010
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP_MASK 0x00000008L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP__SHIFT 0x00000003
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN_MASK 0x00000001L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN__SHIFT 0x00000000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE_MASK 0x00004000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE__SHIFT 0x0000000e
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW_MASK 0x00008000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW__SHIFT 0x0000000f
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT_MASK 0x00000200L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT__SHIFT 0x00000009
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT_MASK 0x00000004L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT__SHIFT 0x00000002
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1_MASK 0x00002000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1__SHIFT 0x0000000d
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2_MASK 0x00001000L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2__SHIFT 0x0000000c
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3_MASK 0x00000800L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3__SHIFT 0x0000000b
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5_MASK 0x00000400L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5__SHIFT 0x0000000a
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1_MASK 0x00000080L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1__SHIFT 0x00000007
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2_MASK 0x00000040L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2__SHIFT 0x00000006
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3_MASK 0x00000020L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3__SHIFT 0x00000005
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4_MASK 0x00000010L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4__SHIFT 0x00000004
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW_MASK 0x00000100L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW__SHIFT 0x00000008
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN_MASK 0x00000002L
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN__SHIFT 0x00000001
+#define DMIF_ADDR_CALC__ADDR_CONFIG_PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define DMIF_ADDR_CALC__ADDR_CONFIG_PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define DMIF_ADDR_CALC__ADDR_CONFIG_ROW_SIZE_MASK 0x30000000L
+#define DMIF_ADDR_CALC__ADDR_CONFIG_ROW_SIZE__SHIFT 0x0000001c
+#define DMIF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define DMIF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define DMIF_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define DMIF_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define DMIF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define DMIF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define DMIF_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define DMIF_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define DMIF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define DMIF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define DMIF_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define DMIF_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define DMIF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define DMIF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define DMIF_ARBITRATION_CONTROL__DMIF_ARBITRATION_REFERENCE_CLOCK_PERIOD_MASK 0x0000ffffL
+#define DMIF_ARBITRATION_CONTROL__DMIF_ARBITRATION_REFERENCE_CLOCK_PERIOD__SHIFT 0x00000000
+#define DMIF_ARBITRATION_CONTROL__PIPE_SWITCH_EFFICIENCY_WEIGHT_MASK 0xffff0000L
+#define DMIF_ARBITRATION_CONTROL__PIPE_SWITCH_EFFICIENCY_WEIGHT__SHIFT 0x00000010
+#define DMIF_CONTROL__DMIF_BUFF_SIZE_MASK 0x00000003L
+#define DMIF_CONTROL__DMIF_BUFF_SIZE__SHIFT 0x00000000
+#define DMIF_CONTROL__DMIF_CHUNK_BUFF_MARGIN_MASK 0x60000000L
+#define DMIF_CONTROL__DMIF_CHUNK_BUFF_MARGIN__SHIFT 0x0000001d
+#define DMIF_CONTROL__DMIF_DELAY_ARBITRATION_MASK 0x1f000000L
+#define DMIF_CONTROL__DMIF_DELAY_ARBITRATION__SHIFT 0x00000018
+#define DMIF_CONTROL__DMIF_DISABLE_EARLY_RECEIVED_LEVEL_COUNT_MASK 0x00000010L
+#define DMIF_CONTROL__DMIF_DISABLE_EARLY_RECEIVED_LEVEL_COUNT__SHIFT 0x00000004
+#define DMIF_CONTROL__DMIF_FORCE_TOTAL_REQ_BURST_SIZE_MASK 0x0000f000L
+#define DMIF_CONTROL__DMIF_FORCE_TOTAL_REQ_BURST_SIZE__SHIFT 0x0000000c
+#define DMIF_CONTROL__DMIF_GROUP_REQUESTS_IN_CHUNK_MASK 0x00000004L
+#define DMIF_CONTROL__DMIF_GROUP_REQUESTS_IN_CHUNK__SHIFT 0x00000002
+#define DMIF_CONTROL__DMIF_MAX_TOTAL_OUTSTANDING_CHUNK_REQUESTS_MASK 0x003f0000L
+#define DMIF_CONTROL__DMIF_MAX_TOTAL_OUTSTANDING_CHUNK_REQUESTS__SHIFT 0x00000010
+#define DMIF_CONTROL__DMIF_REQ_BURST_SIZE_MASK 0x00000700L
+#define DMIF_CONTROL__DMIF_REQ_BURST_SIZE__SHIFT 0x00000008
+#define DMIF_DEBUG02_CORE0__DB_DATA_MASK 0x0000ffffL
+#define DMIF_DEBUG02_CORE0__DB_DATA__SHIFT 0x00000000
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNT_EN_MASK 0x00010000L
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNT_EN__SHIFT 0x00000010
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNTER_MASK 0x0ffe0000L
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNTER__SHIFT 0x00000011
+#define DMIF_DEBUG02_CORE1__DB_DATA_MASK 0x0000ffffL
+#define DMIF_DEBUG02_CORE1__DB_DATA__SHIFT 0x00000000
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNT_EN_MASK 0x00010000L
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNT_EN__SHIFT 0x00000010
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNTER_MASK 0x0ffe0000L
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNTER__SHIFT 0x00000011
+#define DMIF_HW_DEBUG__DMIF_HW_DEBUG_MASK 0xffffffffL
+#define DMIF_HW_DEBUG__DMIF_HW_DEBUG__SHIFT 0x00000000
+#define DMIF_STATUS2__DMIF_CHUNK_TRACKER_SCLK_STATUS_MASK 0x00000100L
+#define DMIF_STATUS2__DMIF_CHUNK_TRACKER_SCLK_STATUS__SHIFT 0x00000008
+#define DMIF_STATUS2__DMIF_FBC_TRACKER_SCLK_STATUS_MASK 0x00000200L
+#define DMIF_STATUS2__DMIF_FBC_TRACKER_SCLK_STATUS__SHIFT 0x00000009
+#define DMIF_STATUS2__DMIF_PIPE0_DISPCLK_STATUS_MASK 0x00000001L
+#define DMIF_STATUS2__DMIF_PIPE0_DISPCLK_STATUS__SHIFT 0x00000000
+#define DMIF_STATUS2__DMIF_PIPE1_DISPCLK_STATUS_MASK 0x00000002L
+#define DMIF_STATUS2__DMIF_PIPE1_DISPCLK_STATUS__SHIFT 0x00000001
+#define DMIF_STATUS2__DMIF_PIPE2_DISPCLK_STATUS_MASK 0x00000004L
+#define DMIF_STATUS2__DMIF_PIPE2_DISPCLK_STATUS__SHIFT 0x00000002
+#define DMIF_STATUS2__DMIF_PIPE3_DISPCLK_STATUS_MASK 0x00000008L
+#define DMIF_STATUS2__DMIF_PIPE3_DISPCLK_STATUS__SHIFT 0x00000003
+#define DMIF_STATUS2__DMIF_PIPE4_DISPCLK_STATUS_MASK 0x00000010L
+#define DMIF_STATUS2__DMIF_PIPE4_DISPCLK_STATUS__SHIFT 0x00000004
+#define DMIF_STATUS2__DMIF_PIPE5_DISPCLK_STATUS_MASK 0x00000020L
+#define DMIF_STATUS2__DMIF_PIPE5_DISPCLK_STATUS__SHIFT 0x00000005
+#define DMIF_STATUS__DMIF_CLEAR_MC_SEND_ON_IDLE_MASK 0x00003f00L
+#define DMIF_STATUS__DMIF_CLEAR_MC_SEND_ON_IDLE__SHIFT 0x00000008
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_ENABLE_MASK 0x00010000L
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_ENABLE__SHIFT 0x00000010
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_SOURCE_SELECT_MASK 0x00700000L
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_SOURCE_SELECT__SHIFT 0x00000014
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_URGENT_ONLY_MASK 0x00020000L
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_URGENT_ONLY__SHIFT 0x00000011
+#define DMIF_STATUS__DMIF_MC_SEND_ON_IDLE_MASK 0x0000003fL
+#define DMIF_STATUS__DMIF_MC_SEND_ON_IDLE__SHIFT 0x00000000
+#define DMIF_STATUS__DMIF_PERFORMANCE_COUNTER_SOURCE_SELECT_MASK 0x07000000L
+#define DMIF_STATUS__DMIF_PERFORMANCE_COUNTER_SOURCE_SELECT__SHIFT 0x00000018
+#define DMIF_STATUS__DMIF_UNDERFLOW_MASK 0x10000000L
+#define DMIF_STATUS__DMIF_UNDERFLOW__SHIFT 0x0000001c
+#define DMIF_TEST_DEBUG_DATA__DMIF_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DMIF_TEST_DEBUG_DATA__DMIF_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DOUT_DCE_VCE_CONTROL__DC_VCE_AUDIO_STREAM_SELECT_MASK 0x00000070L
+#define DOUT_DCE_VCE_CONTROL__DC_VCE_AUDIO_STREAM_SELECT__SHIFT 0x00000004
+#define DOUT_DCE_VCE_CONTROL__DC_VCE_VIDEO_PIPE_SELECT_MASK 0x00000007L
+#define DOUT_DCE_VCE_CONTROL__DC_VCE_VIDEO_PIPE_SELECT__SHIFT 0x00000000
+#define DOUT_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x00000100L
+#define DOUT_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x00000008
+#define DOUT_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x00000001L
+#define DOUT_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x00000000
+#define DOUT_SCRATCH0__DOUT_SCRATCH0_MASK 0xffffffffL
+#define DOUT_SCRATCH0__DOUT_SCRATCH0__SHIFT 0x00000000
+#define DOUT_SCRATCH1__DOUT_SCRATCH1_MASK 0xffffffffL
+#define DOUT_SCRATCH1__DOUT_SCRATCH1__SHIFT 0x00000000
+#define DOUT_SCRATCH2__DOUT_SCRATCH2_MASK 0xffffffffL
+#define DOUT_SCRATCH2__DOUT_SCRATCH2__SHIFT 0x00000000
+#define DOUT_SCRATCH3__DOUT_SCRATCH3_MASK 0xffffffffL
+#define DOUT_SCRATCH3__DOUT_SCRATCH3__SHIFT 0x00000000
+#define DOUT_SCRATCH4__DOUT_SCRATCH4_MASK 0xffffffffL
+#define DOUT_SCRATCH4__DOUT_SCRATCH4__SHIFT 0x00000000
+#define DOUT_SCRATCH5__DOUT_SCRATCH5_MASK 0xffffffffL
+#define DOUT_SCRATCH5__DOUT_SCRATCH5__SHIFT 0x00000000
+#define DOUT_SCRATCH6__DOUT_SCRATCH6_MASK 0xffffffffL
+#define DOUT_SCRATCH6__DOUT_SCRATCH6__SHIFT 0x00000000
+#define DOUT_SCRATCH7__DOUT_SCRATCH7_MASK 0xffffffffL
+#define DOUT_SCRATCH7__DOUT_SCRATCH7__SHIFT 0x00000000
+#define DOUT_TEST_DEBUG_DATA__DOUT_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DOUT_TEST_DEBUG_DATA__DOUT_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DOUT_TEST_DEBUG_INDEX__DOUT_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DOUT_TEST_DEBUG_INDEX__DOUT_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DOUT_TEST_DEBUG_INDEX__DOUT_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DOUT_TEST_DEBUG_INDEX__DOUT_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DP_AUX1_DEBUG_A__DP_AUX1_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_A__DP_AUX1_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_B__DP_AUX1_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_B__DP_AUX1_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_C__DP_AUX1_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_C__DP_AUX1_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_D__DP_AUX1_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_D__DP_AUX1_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_E__DP_AUX1_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_E__DP_AUX1_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_F__DP_AUX1_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_F__DP_AUX1_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_G__DP_AUX1_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_G__DP_AUX1_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_H__DP_AUX1_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_H__DP_AUX1_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX1_DEBUG_I__DP_AUX1_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX1_DEBUG_I__DP_AUX1_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_A__DP_AUX2_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_A__DP_AUX2_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_B__DP_AUX2_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_B__DP_AUX2_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_C__DP_AUX2_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_C__DP_AUX2_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_D__DP_AUX2_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_D__DP_AUX2_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_E__DP_AUX2_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_E__DP_AUX2_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_F__DP_AUX2_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_F__DP_AUX2_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_G__DP_AUX2_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_G__DP_AUX2_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_H__DP_AUX2_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_H__DP_AUX2_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX2_DEBUG_I__DP_AUX2_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX2_DEBUG_I__DP_AUX2_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_A__DP_AUX3_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_A__DP_AUX3_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_B__DP_AUX3_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_B__DP_AUX3_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_C__DP_AUX3_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_C__DP_AUX3_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_D__DP_AUX3_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_D__DP_AUX3_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_E__DP_AUX3_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_E__DP_AUX3_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_F__DP_AUX3_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_F__DP_AUX3_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_G__DP_AUX3_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_G__DP_AUX3_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_H__DP_AUX3_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_H__DP_AUX3_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX3_DEBUG_I__DP_AUX3_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX3_DEBUG_I__DP_AUX3_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_A__DP_AUX4_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_A__DP_AUX4_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_B__DP_AUX4_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_B__DP_AUX4_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_C__DP_AUX4_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_C__DP_AUX4_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_D__DP_AUX4_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_D__DP_AUX4_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_E__DP_AUX4_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_E__DP_AUX4_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_F__DP_AUX4_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_F__DP_AUX4_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_G__DP_AUX4_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_G__DP_AUX4_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_H__DP_AUX4_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_H__DP_AUX4_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX4_DEBUG_I__DP_AUX4_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX4_DEBUG_I__DP_AUX4_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_A__DP_AUX5_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_A__DP_AUX5_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_B__DP_AUX5_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_B__DP_AUX5_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_C__DP_AUX5_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_C__DP_AUX5_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_D__DP_AUX5_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_D__DP_AUX5_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_E__DP_AUX5_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_E__DP_AUX5_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_F__DP_AUX5_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_F__DP_AUX5_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_G__DP_AUX5_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_G__DP_AUX5_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_H__DP_AUX5_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_H__DP_AUX5_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX5_DEBUG_I__DP_AUX5_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX5_DEBUG_I__DP_AUX5_DEBUG_I__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_A__DP_AUX6_DEBUG_A_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_A__DP_AUX6_DEBUG_A__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_B__DP_AUX6_DEBUG_B_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_B__DP_AUX6_DEBUG_B__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_C__DP_AUX6_DEBUG_C_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_C__DP_AUX6_DEBUG_C__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_D__DP_AUX6_DEBUG_D_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_D__DP_AUX6_DEBUG_D__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_E__DP_AUX6_DEBUG_E_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_E__DP_AUX6_DEBUG_E__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_F__DP_AUX6_DEBUG_F_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_F__DP_AUX6_DEBUG_F__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_G__DP_AUX6_DEBUG_G_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_G__DP_AUX6_DEBUG_G__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_H__DP_AUX6_DEBUG_H_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_H__DP_AUX6_DEBUG_H__SHIFT 0x00000000
+#define DP_AUX6_DEBUG_I__DP_AUX6_DEBUG_I_MASK 0xffffffffL
+#define DP_AUX6_DEBUG_I__DP_AUX6_DEBUG_I__SHIFT 0x00000000
+#define DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+#define DP_CONFIG__DP_UDI_LANES__SHIFT 0x00000000
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x00000018
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x00000010
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x00000008
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x00000000
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x00000001
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x00000002
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x00000003
+#define DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x00000010
+#define DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+#define DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x00000018
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x00000000
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00ff0000L
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x00000010
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x00000004
+#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x00000004
+#define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x00000000
+#define DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+#define DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x00000008
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003fL
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x00000000
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003f00L
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x00000008
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x00000010
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x00000008
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x00000000
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000ff00L
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x00000008
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00ff0000L
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x00000010
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xff000000L
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x00000018
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000ffL
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x00000000
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000fff00L
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x00000008
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xfff00000L
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x00000014
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x00000002
+#define DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x00000000
+#define DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x00000001
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0x0000000c
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x00000008
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x00000004
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x00000000
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x00000000
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00L
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x00000008
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x00000004
+#define DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003ffL
+#define DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x00000000
+#define DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000ffc00L
+#define DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0x0000000a
+#define DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3ff00000L
+#define DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x00000014
+#define DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003ffL
+#define DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x00000000
+#define DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000ffc00L
+#define DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0x0000000a
+#define DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3ff00000L
+#define DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x00000014
+#define DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003ffL
+#define DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x00000000
+#define DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000ffc00L
+#define DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0x0000000a
+#define DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+#define DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x00000000
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xffffffffL
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x00000000
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xffffffffL
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x00000000
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xffffffffL
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x00000000
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xffffffffL
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x00000000
+#define DP_DTO2_MODULO__DP_DTO2_MODULO_MASK 0xffffffffL
+#define DP_DTO2_MODULO__DP_DTO2_MODULO__SHIFT 0x00000000
+#define DP_DTO2_PHASE__DP_DTO2_PHASE_MASK 0xffffffffL
+#define DP_DTO2_PHASE__DP_DTO2_PHASE__SHIFT 0x00000000
+#define DP_DTO3_MODULO__DP_DTO3_MODULO_MASK 0xffffffffL
+#define DP_DTO3_MODULO__DP_DTO3_MODULO__SHIFT 0x00000000
+#define DP_DTO3_PHASE__DP_DTO3_PHASE_MASK 0xffffffffL
+#define DP_DTO3_PHASE__DP_DTO3_PHASE__SHIFT 0x00000000
+#define DP_DTO4_MODULO__DP_DTO4_MODULO_MASK 0xffffffffL
+#define DP_DTO4_MODULO__DP_DTO4_MODULO__SHIFT 0x00000000
+#define DP_DTO4_PHASE__DP_DTO4_PHASE_MASK 0xffffffffL
+#define DP_DTO4_PHASE__DP_DTO4_PHASE__SHIFT 0x00000000
+#define DP_DTO5_MODULO__DP_DTO5_MODULO_MASK 0xffffffffL
+#define DP_DTO5_MODULO__DP_DTO5_MODULO__SHIFT 0x00000000
+#define DP_DTO5_PHASE__DP_DTO5_PHASE_MASK 0xffffffffL
+#define DP_DTO5_PHASE__DP_DTO5_PHASE__SHIFT 0x00000000
+#define DPG_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT_MASK 0xffff0000L
+#define DPG_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT__SHIFT 0x00000010
+#define DPG_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION_MASK 0x0000ffffL
+#define DPG_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION__SHIFT 0x00000000
+#define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT_MASK 0x0000ffffL
+#define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x00000000
+#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000L
+#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x00000010
+#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x00000001L
+#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x00000000
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x00000010L
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE__SHIFT 0x00000004
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON_MASK 0x00000100L
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON__SHIFT 0x00000008
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK 0xffff0000L
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK_MASK 0x00003000L
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK__SHIFT 0x0000000c
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK__SHIFT 0x00000010
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x00000400L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0x0000000a
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE_MASK 0x00000001L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE__SHIFT 0x00000000
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x00000200L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x00000009
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST_MASK 0x00000100L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x00000008
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x00000010L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x00000004
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff0000L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0x00000010
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x00000001L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH__SHIFT 0x00000000
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH_MASK 0x00000010L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH__SHIFT 0x00000004
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH_MASK 0x00000080L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH__SHIFT 0x00000007
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH_MASK 0x00000020L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH__SHIFT 0x00000005
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH_MASK 0x00000040L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH__SHIFT 0x00000006
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH_MASK 0x00000800L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH__SHIFT 0x0000000b
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH_MASK 0x00000400L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH__SHIFT 0x0000000a
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH_MASK 0x00000200L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH__SHIFT 0x00000009
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH_MASK 0x00000100L
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x00000008
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x00000001L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x00000000
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x00000010
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x00000010L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR__SHIFT 0x00000004
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC_MASK 0x00000080L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC__SHIFT 0x00000007
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON_MASK 0x00000020L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON__SHIFT 0x00000005
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA_MASK 0x00000040L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA__SHIFT 0x00000006
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON_MASK 0x00000800L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON__SHIFT 0x0000000b
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH_MASK 0x00000400L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH__SHIFT 0x0000000a
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK_MASK 0x00000200L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK__SHIFT 0x00000009
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON_MASK 0x00000100L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON__SHIFT 0x00000008
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK_MASK 0xffff0000L
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT 0x00000010
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK_MASK 0x0000ffffL
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT 0x00000000
+#define DPG_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DPG_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+#define DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x00000000
+#define DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+#define DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x00000011
+#define DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x00000008
+#define DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x00000004
+#define DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003ffffL
+#define DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x00000000
+#define DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x00000018
+#define DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+#define DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x0000001c
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_ENABLE_MASK 0x00000100L
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_ENABLE__SHIFT 0x00000008
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_MASK 0x000000ffL
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE__SHIFT 0x00000000
+#define DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000f8L
+#define DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x00000003
+#define DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000ff00L
+#define DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x00000008
+#define DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00ff0000L
+#define DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x00000010
+#define DP_MSA_MISC__DP_MSA_MISC4_MASK 0xff000000L
+#define DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x00000018
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TIMING_OVERRIDE_EN_MASK 0x00000001L
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TIMING_OVERRIDE_EN__SHIFT 0x00000000
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TOTAL_OVERRIDE_MASK 0x0001fff0L
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TOTAL_OVERRIDE__SHIFT 0x00000004
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_END_OVERRIDE_MASK 0x1fff0000L
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_END_OVERRIDE__SHIFT 0x00000010
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_START_OVERRIDE_MASK 0x00001fffL
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_START_OVERRIDE__SHIFT 0x00000000
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003ffL
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x00000000
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x00000010
+#define DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x00000000
+#define DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x00000004
+#define DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+#define DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x00000008
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xfc000000L
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x0000001a
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03ffffffL
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x00000000
+#define DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+#define DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x00000000
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003f00L
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x00000008
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3f000000L
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x00000018
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x00000000
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x00000010
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003f00L
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x00000008
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3f000000L
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x00000018
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x00000000
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x00000010
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003f00L
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x00000008
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3f000000L
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x00000018
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x00000000
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x00000010
+#define DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+#define DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x00000008
+#define DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x00000000
+#define DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x00000018
+#define DP_PIXEL_FORMAT__DP_DYN_RANGE_MASK 0x00000100L
+#define DP_PIXEL_FORMAT__DP_DYN_RANGE__SHIFT 0x00000008
+#define DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000003L
+#define DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x00000000
+#define DP_PIXEL_FORMAT__DP_YCBCR_RANGE_MASK 0x00010000L
+#define DP_PIXEL_FORMAT__DP_YCBCR_RANGE__SHIFT 0x00000010
+#define DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00ffffffL
+#define DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x00000000
+#define DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00ffffffL
+#define DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x00000000
+#define DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00ffffffL
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x00000000
+#define DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00ffffffL
+#define DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x00000000
+#define DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x00000000
+#define DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x00000010
+#define DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0x0000000c
+#define DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x00000004
+#define DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x00000008
+#define DP_SEC_CNTL__DP_SEC_AVI_ENABLE_MASK 0x01000000L
+#define DP_SEC_CNTL__DP_SEC_AVI_ENABLE__SHIFT 0x00000018
+#define DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x00000014
+#define DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x00000015
+#define DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x00000016
+#define DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x00000017
+#define DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+#define DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x0000001c
+#define DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x00000000
+#define DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000fffL
+#define DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x00000000
+#define DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xffff0000L
+#define DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x00000010
+#define DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xffff0000L
+#define DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x00000010
+#define DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000ffffL
+#define DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x00000000
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003fffL
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x00000000
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xffff0000L
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x00000010
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x0000001c
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x0000001d
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x00000018
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x00000014
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x00000010
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000eL
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x00000001
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x00000004
+#define DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003f00L
+#define DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x00000008
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x00000000
+#define DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x00000000
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x00000006
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x00000004
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x00000005
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x00000007
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0x0000000c
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x00000008
+#define DP_TEST_DEBUG_DATA__DP_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DP_TEST_DEBUG_DATA__DP_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x00000001
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x00000000
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x00000002
+#define DP_VID_M__DP_VID_M_MASK 0x00ffffffL
+#define DP_VID_M__DP_VID_M__SHIFT 0x00000000
+#define DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000fffL
+#define DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x00000000
+#define DP_VID_MSA_VBID__DP_VID_MSA_TOP_FIELD_MODE_MASK 0x00010000L
+#define DP_VID_MSA_VBID__DP_VID_MSA_TOP_FIELD_MODE__SHIFT 0x00000010
+#define DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+#define DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x00000018
+#define DP_VID_N__DP_VID_N_MASK 0x00ffffffL
+#define DP_VID_N__DP_VID_N__SHIFT 0x00000000
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x00000014
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x00000008
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x00000000
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x00000010
+#define DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x00000008
+#define DP_VID_TIMING__DP_VID_N_DIV_MASK 0xff000000L
+#define DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x00000018
+#define DP_VID_TIMING__DP_VID_TIMING_MODE_MASK 0x00000001L
+#define DP_VID_TIMING__DP_VID_TIMING_MODE__SHIFT 0x00000000
+#define DVOACLKC_CNTL__DVOACLKC_COARSE_ADJUST_EN_MASK 0x00020000L
+#define DVOACLKC_CNTL__DVOACLKC_COARSE_ADJUST_EN__SHIFT 0x00000011
+#define DVOACLKC_CNTL__DVOACLKC_COARSE_SKEW_CNTL_MASK 0x00001f00L
+#define DVOACLKC_CNTL__DVOACLKC_COARSE_SKEW_CNTL__SHIFT 0x00000008
+#define DVOACLKC_CNTL__DVOACLKC_FINE_ADJUST_EN_MASK 0x00010000L
+#define DVOACLKC_CNTL__DVOACLKC_FINE_ADJUST_EN__SHIFT 0x00000010
+#define DVOACLKC_CNTL__DVOACLKC_FINE_SKEW_CNTL_MASK 0x00000007L
+#define DVOACLKC_CNTL__DVOACLKC_FINE_SKEW_CNTL__SHIFT 0x00000000
+#define DVOACLKC_CNTL__DVOACLKC_IN_PHASE_MASK 0x00040000L
+#define DVOACLKC_CNTL__DVOACLKC_IN_PHASE__SHIFT 0x00000012
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_COARSE_ADJUST_EN_MASK 0x00020000L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_COARSE_ADJUST_EN__SHIFT 0x00000011
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_COARSE_SKEW_CNTL_MASK 0x00001f00L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_COARSE_SKEW_CNTL__SHIFT 0x00000008
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_FINE_ADJUST_EN_MASK 0x00010000L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_FINE_ADJUST_EN__SHIFT 0x00000010
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_FINE_SKEW_CNTL_MASK 0x00000007L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_FINE_SKEW_CNTL__SHIFT 0x00000000
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_IN_PHASE_MASK 0x00040000L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_IN_PHASE__SHIFT 0x00000012
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_SKEW_PHASE_OVERRIDE_MASK 0x00100000L
+#define DVOACLKC_MVP_CNTL__DVOACLKC_MVP_SKEW_PHASE_OVERRIDE__SHIFT 0x00000014
+#define DVOACLKC_MVP_CNTL__MVP_CLK_A_SRC_SEL_MASK 0x03000000L
+#define DVOACLKC_MVP_CNTL__MVP_CLK_A_SRC_SEL__SHIFT 0x00000018
+#define DVOACLKC_MVP_CNTL__MVP_CLK_B_SRC_SEL_MASK 0x30000000L
+#define DVOACLKC_MVP_CNTL__MVP_CLK_B_SRC_SEL__SHIFT 0x0000001c
+#define DVOACLKD_CNTL__DVOACLKD_COARSE_ADJUST_EN_MASK 0x00020000L
+#define DVOACLKD_CNTL__DVOACLKD_COARSE_ADJUST_EN__SHIFT 0x00000011
+#define DVOACLKD_CNTL__DVOACLKD_COARSE_SKEW_CNTL_MASK 0x00001f00L
+#define DVOACLKD_CNTL__DVOACLKD_COARSE_SKEW_CNTL__SHIFT 0x00000008
+#define DVOACLKD_CNTL__DVOACLKD_FINE_ADJUST_EN_MASK 0x00010000L
+#define DVOACLKD_CNTL__DVOACLKD_FINE_ADJUST_EN__SHIFT 0x00000010
+#define DVOACLKD_CNTL__DVOACLKD_FINE_SKEW_CNTL_MASK 0x00000007L
+#define DVOACLKD_CNTL__DVOACLKD_FINE_SKEW_CNTL__SHIFT 0x00000000
+#define DVOACLKD_CNTL__DVOACLKD_IN_PHASE_MASK 0x00040000L
+#define DVOACLKD_CNTL__DVOACLKD_IN_PHASE__SHIFT 0x00000012
+#define DVO_CLK_ENABLE__DVO_CLK_ENABLE_MASK 0x00000001L
+#define DVO_CLK_ENABLE__DVO_CLK_ENABLE__SHIFT 0x00000000
+#define DVO_CONTROL__DVO_COLOR_FORMAT_MASK 0x03000000L
+#define DVO_CONTROL__DVO_COLOR_FORMAT__SHIFT 0x00000018
+#define DVO_CONTROL__DVO_CTL3_MASK 0x80000000L
+#define DVO_CONTROL__DVO_CTL3__SHIFT 0x0000001f
+#define DVO_CONTROL__DVO_DUAL_CHANNEL_EN_MASK 0x00000100L
+#define DVO_CONTROL__DVO_DUAL_CHANNEL_EN__SHIFT 0x00000008
+#define DVO_CONTROL__DVO_INVERT_DVOCLK_MASK 0x00040000L
+#define DVO_CONTROL__DVO_INVERT_DVOCLK__SHIFT 0x00000012
+#define DVO_CONTROL__DVO_RATE_SELECT_MASK 0x00000001L
+#define DVO_CONTROL__DVO_RATE_SELECT__SHIFT 0x00000000
+#define DVO_CONTROL__DVO_RESET_FIFO_MASK 0x00010000L
+#define DVO_CONTROL__DVO_RESET_FIFO__SHIFT 0x00000010
+#define DVO_CONTROL__DVO_SDRCLK_SEL_MASK 0x00000002L
+#define DVO_CONTROL__DVO_SDRCLK_SEL__SHIFT 0x00000001
+#define DVO_CONTROL__DVO_SYNC_PHASE_MASK 0x00020000L
+#define DVO_CONTROL__DVO_SYNC_PHASE__SHIFT 0x00000011
+#define DVO_CRC2_SIG_MASK__DVO_CRC2_SIG_MASK_MASK 0x07ffffffL
+#define DVO_CRC2_SIG_MASK__DVO_CRC2_SIG_MASK__SHIFT 0x00000000
+#define DVO_CRC2_SIG_RESULT__DVO_CRC2_SIG_RESULT_MASK 0x07ffffffL
+#define DVO_CRC2_SIG_RESULT__DVO_CRC2_SIG_RESULT__SHIFT 0x00000000
+#define DVO_CRC_EN__DVO_CRC2_EN_MASK 0x00010000L
+#define DVO_CRC_EN__DVO_CRC2_EN__SHIFT 0x00000010
+#define DVO_ENABLE__DVO_ENABLE_MASK 0x00000001L
+#define DVO_ENABLE__DVO_ENABLE__SHIFT 0x00000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000fc00L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x0000000a
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CALIBRATED_MASK 0x20000000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CALIBRATED__SHIFT 0x0000001d
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_ERROR_ACK__SHIFT 0x00000008
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x0000001e
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x0000001f
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_LEVEL_ERROR__SHIFT 0x00000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MAXIMUM_LEVEL_MASK 0x000f0000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MAXIMUM_LEVEL__SHIFT 0x00000010
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MINIMUM_LEVEL_MASK 0x03c00000L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MINIMUM_LEVEL__SHIFT 0x00000016
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_OVERWRITE_LEVEL_MASK 0x000000fcL
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_OVERWRITE_LEVEL__SHIFT 0x00000002
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x00000001
+#define DVO_OUTPUT__DVO_CLOCK_MODE_MASK 0x00000100L
+#define DVO_OUTPUT__DVO_CLOCK_MODE__SHIFT 0x00000008
+#define DVO_OUTPUT__DVO_OUTPUT_ENABLE_MODE_MASK 0x00000003L
+#define DVO_OUTPUT__DVO_OUTPUT_ENABLE_MODE__SHIFT 0x00000000
+#define DVO_SKEW_ADJUST__DVO_SKEW_ADJUST_MASK 0xffffffffL
+#define DVO_SKEW_ADJUST__DVO_SKEW_ADJUST__SHIFT 0x00000000
+#define DVO_SOURCE_SELECT__DVO_SOURCE_SELECT_MASK 0x00000007L
+#define DVO_SOURCE_SELECT__DVO_SOURCE_SELECT__SHIFT 0x00000000
+#define DVO_SOURCE_SELECT__DVO_STEREOSYNC_SELECT_MASK 0x00070000L
+#define DVO_SOURCE_SELECT__DVO_STEREOSYNC_SELECT__SHIFT 0x00000010
+#define DVO_STRENGTH_CONTROL__DVOCLK_SN_MASK 0x0000f000L
+#define DVO_STRENGTH_CONTROL__DVOCLK_SN__SHIFT 0x0000000c
+#define DVO_STRENGTH_CONTROL__DVOCLK_SP_MASK 0x00000f00L
+#define DVO_STRENGTH_CONTROL__DVOCLK_SP__SHIFT 0x00000008
+#define DVO_STRENGTH_CONTROL__DVO_LSB_VMODE_MASK 0x10000000L
+#define DVO_STRENGTH_CONTROL__DVO_LSB_VMODE__SHIFT 0x0000001c
+#define DVO_STRENGTH_CONTROL__DVO_MSB_VMODE_MASK 0x20000000L
+#define DVO_STRENGTH_CONTROL__DVO_MSB_VMODE__SHIFT 0x0000001d
+#define DVO_STRENGTH_CONTROL__DVO_SN_MASK 0x000000f0L
+#define DVO_STRENGTH_CONTROL__DVO_SN__SHIFT 0x00000004
+#define DVO_STRENGTH_CONTROL__DVO_SP_MASK 0x0000000fL
+#define DVO_STRENGTH_CONTROL__DVO_SP__SHIFT 0x00000000
+#define DVO_VREF_CONTROL__DVO_VREFCAL_MASK 0x000000f0L
+#define DVO_VREF_CONTROL__DVO_VREFCAL__SHIFT 0x00000004
+#define DVO_VREF_CONTROL__DVO_VREFPON_MASK 0x00000001L
+#define DVO_VREF_CONTROL__DVO_VREFPON__SHIFT 0x00000000
+#define DVO_VREF_CONTROL__DVO_VREFSEL_MASK 0x00000002L
+#define DVO_VREF_CONTROL__DVO_VREFSEL__SHIFT 0x00000001
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x0fff0000L
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x00000010
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00000fffL
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x00000000
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00000fffL
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x00000000
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x0fff0000L
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x00000010
+#define FBC_CLIENT_REGION_MASK__FBC_MEMORY_REGION_MASK_MASK 0x000f0000L
+#define FBC_CLIENT_REGION_MASK__FBC_MEMORY_REGION_MASK__SHIFT 0x00000010
+#define FBC_CNTL__FBC_COHERENCY_MODE_MASK 0x00030000L
+#define FBC_CNTL__FBC_COHERENCY_MODE__SHIFT 0x00000010
+#define FBC_CNTL__FBC_EN_MASK 0x80000000L
+#define FBC_CNTL__FBC_EN__SHIFT 0x0000001f
+#define FBC_CNTL__FBC_GRPH_COMP_EN_MASK 0x00000001L
+#define FBC_CNTL__FBC_GRPH_COMP_EN__SHIFT 0x00000000
+#define FBC_CNTL__FBC_SOFT_COMPRESS_EN_MASK 0x02000000L
+#define FBC_CNTL__FBC_SOFT_COMPRESS_EN__SHIFT 0x00000019
+#define FBC_CNTL__FBC_SRC_SEL_MASK 0x0000000eL
+#define FBC_CNTL__FBC_SRC_SEL__SHIFT 0x00000001
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO08_EN_MASK 0x00010000L
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO08_EN__SHIFT 0x00000010
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO16_EN_MASK 0x00020000L
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO16_EN__SHIFT 0x00000011
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB04_EN_MASK 0x00040000L
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB04_EN__SHIFT 0x00000012
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB08_EN_MASK 0x00080000L
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB08_EN__SHIFT 0x00000013
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB16_EN_MASK 0x00100000L
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB16_EN__SHIFT 0x00000014
+#define FBC_COMP_CNTL__FBC_MIN_COMPRESSION_MASK 0x0000000fL
+#define FBC_COMP_CNTL__FBC_MIN_COMPRESSION__SHIFT 0x00000000
+#define FBC_COMP_MODE__FBC_DPCM4_RGB_EN_MASK 0x00000100L
+#define FBC_COMP_MODE__FBC_DPCM4_RGB_EN__SHIFT 0x00000008
+#define FBC_COMP_MODE__FBC_DPCM4_YUV_EN_MASK 0x00000400L
+#define FBC_COMP_MODE__FBC_DPCM4_YUV_EN__SHIFT 0x0000000a
+#define FBC_COMP_MODE__FBC_DPCM8_RGB_EN_MASK 0x00000200L
+#define FBC_COMP_MODE__FBC_DPCM8_RGB_EN__SHIFT 0x00000009
+#define FBC_COMP_MODE__FBC_DPCM8_YUV_EN_MASK 0x00000800L
+#define FBC_COMP_MODE__FBC_DPCM8_YUV_EN__SHIFT 0x0000000b
+#define FBC_COMP_MODE__FBC_IND_EN_MASK 0x00010000L
+#define FBC_COMP_MODE__FBC_IND_EN__SHIFT 0x00000010
+#define FBC_COMP_MODE__FBC_RLE_EN_MASK 0x00000001L
+#define FBC_COMP_MODE__FBC_RLE_EN__SHIFT 0x00000000
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_0_MASK 0x000003ffL
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_0__SHIFT 0x00000000
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_1_MASK 0x03ff0000L
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_1__SHIFT 0x00000010
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_2_MASK 0x000003ffL
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_2__SHIFT 0x00000000
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_3_MASK 0x03ff0000L
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_3__SHIFT 0x00000010
+#define FBC_DEBUG0__FBC_COMP_WAKE_DIS_MASK 0x00010000L
+#define FBC_DEBUG0__FBC_COMP_WAKE_DIS__SHIFT 0x00000010
+#define FBC_DEBUG0__FBC_DEBUG0_MASK 0x00fe0000L
+#define FBC_DEBUG0__FBC_DEBUG0__SHIFT 0x00000011
+#define FBC_DEBUG0__FBC_DEBUG_MUX_MASK 0xff000000L
+#define FBC_DEBUG0__FBC_DEBUG_MUX__SHIFT 0x00000018
+#define FBC_DEBUG0__FBC_PERF_MUX0_MASK 0x000000ffL
+#define FBC_DEBUG0__FBC_PERF_MUX0__SHIFT 0x00000000
+#define FBC_DEBUG0__FBC_PERF_MUX1_MASK 0x0000ff00L
+#define FBC_DEBUG0__FBC_PERF_MUX1__SHIFT 0x00000008
+#define FBC_DEBUG1__FBC_DEBUG1_MASK 0xffffffffL
+#define FBC_DEBUG1__FBC_DEBUG1__SHIFT 0x00000000
+#define FBC_DEBUG2__FBC_DEBUG2_MASK 0xffffffffL
+#define FBC_DEBUG2__FBC_DEBUG2__SHIFT 0x00000000
+#define FBC_DEBUG_COMP__FBC_COMP_ADDRESS_TRANSLATION_ENABLE_MASK 0x00000800L
+#define FBC_DEBUG_COMP__FBC_COMP_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x0000000b
+#define FBC_DEBUG_COMP__FBC_COMP_BUSY_HYSTERESIS_MASK 0x000000f0L
+#define FBC_DEBUG_COMP__FBC_COMP_BUSY_HYSTERESIS__SHIFT 0x00000004
+#define FBC_DEBUG_COMP__FBC_COMP_CLK_CNTL_MASK 0x00000300L
+#define FBC_DEBUG_COMP__FBC_COMP_CLK_CNTL__SHIFT 0x00000008
+#define FBC_DEBUG_COMP__FBC_COMP_PRIVILEGED_ACCESS_ENABLE_MASK 0x00000400L
+#define FBC_DEBUG_COMP__FBC_COMP_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x0000000a
+#define FBC_DEBUG_COMP__FBC_COMP_RSIZE_MASK 0x00000008L
+#define FBC_DEBUG_COMP__FBC_COMP_RSIZE__SHIFT 0x00000003
+#define FBC_DEBUG_COMP__FBC_COMP_SWAP_MASK 0x00000003L
+#define FBC_DEBUG_COMP__FBC_COMP_SWAP__SHIFT 0x00000000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_ADDR_MASK 0x000003ffL
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_ADDR__SHIFT 0x00000000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_EN_MASK 0x80000000L
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_EN__SHIFT 0x0000001f
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_RD_DATA_MASK 0x00020000L
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_RD_DATA__SHIFT 0x00000011
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_WR_DATA_MASK 0x00010000L
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_WR_DATA__SHIFT 0x00000010
+#define FBC_DEBUG_CSR_RDATA__FBC_DEBUG_CSR_RDATA_MASK 0xffffffffL
+#define FBC_DEBUG_CSR_RDATA__FBC_DEBUG_CSR_RDATA__SHIFT 0x00000000
+#define FBC_DEBUG_CSR_RDATA_HI__FBC_DEBUG_CSR_RDATA_HI_MASK 0x000000ffL
+#define FBC_DEBUG_CSR_RDATA_HI__FBC_DEBUG_CSR_RDATA_HI__SHIFT 0x00000000
+#define FBC_DEBUG_CSR_WDATA__FBC_DEBUG_CSR_WDATA_MASK 0xffffffffL
+#define FBC_DEBUG_CSR_WDATA__FBC_DEBUG_CSR_WDATA__SHIFT 0x00000000
+#define FBC_DEBUG_CSR_WDATA_HI__FBC_DEBUG_CSR_WDATA_HI_MASK 0x000000ffL
+#define FBC_DEBUG_CSR_WDATA_HI__FBC_DEBUG_CSR_WDATA_HI__SHIFT 0x00000000
+#define FBC_IDLE_FORCE_CLEAR_MASK__FBC_IDLE_FORCE_CLEAR_MASK_MASK 0xffffffffL
+#define FBC_IDLE_FORCE_CLEAR_MASK__FBC_IDLE_FORCE_CLEAR_MASK__SHIFT 0x00000000
+#define FBC_IDLE_MASK__FBC_IDLE_MASK_MASK 0xffffffffL
+#define FBC_IDLE_MASK__FBC_IDLE_MASK__SHIFT 0x00000000
+#define FBC_IND_LUT0__FBC_IND_LUT0_MASK 0x00ffffffL
+#define FBC_IND_LUT0__FBC_IND_LUT0__SHIFT 0x00000000
+#define FBC_IND_LUT10__FBC_IND_LUT10_MASK 0x00ffffffL
+#define FBC_IND_LUT10__FBC_IND_LUT10__SHIFT 0x00000000
+#define FBC_IND_LUT11__FBC_IND_LUT11_MASK 0x00ffffffL
+#define FBC_IND_LUT11__FBC_IND_LUT11__SHIFT 0x00000000
+#define FBC_IND_LUT12__FBC_IND_LUT12_MASK 0x00ffffffL
+#define FBC_IND_LUT12__FBC_IND_LUT12__SHIFT 0x00000000
+#define FBC_IND_LUT13__FBC_IND_LUT13_MASK 0x00ffffffL
+#define FBC_IND_LUT13__FBC_IND_LUT13__SHIFT 0x00000000
+#define FBC_IND_LUT14__FBC_IND_LUT14_MASK 0x00ffffffL
+#define FBC_IND_LUT14__FBC_IND_LUT14__SHIFT 0x00000000
+#define FBC_IND_LUT15__FBC_IND_LUT15_MASK 0x00ffffffL
+#define FBC_IND_LUT15__FBC_IND_LUT15__SHIFT 0x00000000
+#define FBC_IND_LUT1__FBC_IND_LUT1_MASK 0x00ffffffL
+#define FBC_IND_LUT1__FBC_IND_LUT1__SHIFT 0x00000000
+#define FBC_IND_LUT2__FBC_IND_LUT2_MASK 0x00ffffffL
+#define FBC_IND_LUT2__FBC_IND_LUT2__SHIFT 0x00000000
+#define FBC_IND_LUT3__FBC_IND_LUT3_MASK 0x00ffffffL
+#define FBC_IND_LUT3__FBC_IND_LUT3__SHIFT 0x00000000
+#define FBC_IND_LUT4__FBC_IND_LUT4_MASK 0x00ffffffL
+#define FBC_IND_LUT4__FBC_IND_LUT4__SHIFT 0x00000000
+#define FBC_IND_LUT5__FBC_IND_LUT5_MASK 0x00ffffffL
+#define FBC_IND_LUT5__FBC_IND_LUT5__SHIFT 0x00000000
+#define FBC_IND_LUT6__FBC_IND_LUT6_MASK 0x00ffffffL
+#define FBC_IND_LUT6__FBC_IND_LUT6__SHIFT 0x00000000
+#define FBC_IND_LUT7__FBC_IND_LUT7_MASK 0x00ffffffL
+#define FBC_IND_LUT7__FBC_IND_LUT7__SHIFT 0x00000000
+#define FBC_IND_LUT8__FBC_IND_LUT8_MASK 0x00ffffffL
+#define FBC_IND_LUT8__FBC_IND_LUT8__SHIFT 0x00000000
+#define FBC_IND_LUT9__FBC_IND_LUT9_MASK 0x00ffffffL
+#define FBC_IND_LUT9__FBC_IND_LUT9__SHIFT 0x00000000
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_CLEAR_MASK 0x00010000L
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_CLEAR__SHIFT 0x00000010
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_MASK 0x00000003L
+#define FBC_MISC__FBC_DECOMPRESS_ERROR__SHIFT 0x00000000
+#define FBC_MISC__FBC_DIVIDE_X_MASK 0x00000300L
+#define FBC_MISC__FBC_DIVIDE_X__SHIFT 0x00000008
+#define FBC_MISC__FBC_DIVIDE_Y_MASK 0x00000400L
+#define FBC_MISC__FBC_DIVIDE_Y__SHIFT 0x0000000a
+#define FBC_MISC__FBC_ERROR_PIXEL_MASK 0x000000f0L
+#define FBC_MISC__FBC_ERROR_PIXEL__SHIFT 0x00000004
+#define FBC_MISC__FBC_INVALIDATE_ON_ERROR_MASK 0x00000008L
+#define FBC_MISC__FBC_INVALIDATE_ON_ERROR__SHIFT 0x00000003
+#define FBC_MISC__FBC_RESET_AT_DISABLE_MASK 0x00200000L
+#define FBC_MISC__FBC_RESET_AT_DISABLE__SHIFT 0x00000015
+#define FBC_MISC__FBC_RESET_AT_ENABLE_MASK 0x00100000L
+#define FBC_MISC__FBC_RESET_AT_ENABLE__SHIFT 0x00000014
+#define FBC_MISC__FBC_RSM_UNCOMP_DATA_IMMEDIATELY_MASK 0x00001000L
+#define FBC_MISC__FBC_RSM_UNCOMP_DATA_IMMEDIATELY__SHIFT 0x0000000c
+#define FBC_MISC__FBC_RSM_WRITE_VALUE_MASK 0x00000800L
+#define FBC_MISC__FBC_RSM_WRITE_VALUE__SHIFT 0x0000000b
+#define FBC_MISC__FBC_SLOW_REQ_INTERVAL_MASK 0xf0000000L
+#define FBC_MISC__FBC_SLOW_REQ_INTERVAL__SHIFT 0x0000001c
+#define FBC_MISC__FBC_STOP_ON_ERROR_MASK 0x00000004L
+#define FBC_MISC__FBC_STOP_ON_ERROR__SHIFT 0x00000002
+#define FBC_START_STOP_DELAY__FBC_COMP_START_DELAY_MASK 0x00001f00L
+#define FBC_START_STOP_DELAY__FBC_COMP_START_DELAY__SHIFT 0x00000008
+#define FBC_START_STOP_DELAY__FBC_DECOMP_START_DELAY_MASK 0x0000001fL
+#define FBC_START_STOP_DELAY__FBC_DECOMP_START_DELAY__SHIFT 0x00000000
+#define FBC_START_STOP_DELAY__FBC_DECOMP_STOP_DELAY_MASK 0x00000080L
+#define FBC_START_STOP_DELAY__FBC_DECOMP_STOP_DELAY__SHIFT 0x00000007
+#define FBC_STATUS__FBC_ENABLE_STATUS_MASK 0x00000001L
+#define FBC_STATUS__FBC_ENABLE_STATUS__SHIFT 0x00000000
+#define FBC_TEST_DEBUG_DATA__FBC_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define FBC_TEST_DEBUG_DATA__FBC_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0c000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x0000001a
+#define FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x0000001c
+#define FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xc0000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x0000001e
+#define FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0x0000000d
+#define FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0x0000000f
+#define FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0x0000000e
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0x0000000c
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x00000008
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x00000009
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00100000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x00000014
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x00000010
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x00000015
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x00000019
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x00000018
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000010L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x00000004
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x00000000
+#define FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+#define FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x00000010
+#define FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x00000000
+#define FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00010000L
+#define FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x00000010
+#define FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x00000000
+#define FMT_CONTROL__FMT_STEREOSYNC_OVR_POL_MASK 0x00000010L
+#define FMT_CONTROL__FMT_STEREOSYNC_OVR_POL__SHIFT 0x00000004
+#define FMT_CRC_CNTL__FMT_CRC_CONT_EN_MASK 0x00000010L
+#define FMT_CRC_CNTL__FMT_CRC_CONT_EN__SHIFT 0x00000004
+#define FMT_CRC_CNTL__FMT_CRC_EN_MASK 0x00000001L
+#define FMT_CRC_CNTL__FMT_CRC_EN__SHIFT 0x00000000
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_ENABLE_MASK 0x00100000L
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_ENABLE__SHIFT 0x00000014
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_SELECT_MASK 0x01000000L
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_SELECT__SHIFT 0x00000018
+#define FMT_CRC_CNTL__FMT_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define FMT_CRC_CNTL__FMT_CRC_INTERLACE_MODE__SHIFT 0x0000000c
+#define FMT_CRC_CNTL__FMT_CRC_ONLY_BLANKb_MASK 0x00000100L
+#define FMT_CRC_CNTL__FMT_CRC_ONLY_BLANKb__SHIFT 0x00000008
+#define FMT_CRC_CNTL__FMT_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00010000L
+#define FMT_CRC_CNTL__FMT_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x00000010
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_BLUE_MASK 0x0000ffffL
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_BLUE__SHIFT 0x00000000
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_CONTROL_MASK 0xffff0000L
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_CONTROL__SHIFT 0x00000010
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_BLUE_MASK_MASK 0x0000ffffL
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_BLUE_MASK__SHIFT 0x00000000
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_CONTROL_MASK_MASK 0xffff0000L
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_CONTROL_MASK__SHIFT 0x00000010
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_GREEN_MASK 0xffff0000L
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_GREEN__SHIFT 0x00000010
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_RED_MASK 0x0000ffffL
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_RED__SHIFT 0x00000000
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_GREEN_MASK_MASK 0xffff0000L
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_GREEN_MASK__SHIFT 0x00000010
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_RED_MASK_MASK 0x0000ffffL
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_RED_MASK__SHIFT 0x00000000
+#define FMT_DEBUG0__FMT_DEBUG0_MASK 0xffffffffL
+#define FMT_DEBUG0__FMT_DEBUG0__SHIFT 0x00000000
+#define FMT_DEBUG1__FMT_DEBUG1_MASK 0xffffffffL
+#define FMT_DEBUG1__FMT_DEBUG1__SHIFT 0x00000000
+#define FMT_DEBUG2__FMT_DEBUG2_MASK 0xffffffffL
+#define FMT_DEBUG2__FMT_DEBUG2__SHIFT 0x00000000
+#define FMT_DEBUG_CNTL__FMT_DEBUG_COLOR_SELECT_MASK 0x00000003L
+#define FMT_DEBUG_CNTL__FMT_DEBUG_COLOR_SELECT__SHIFT 0x00000000
+#define FMT_DEBUG_ID__FMT_DEBUG_ID_MASK 0xffffffffL
+#define FMT_DEBUG_ID__FMT_DEBUG_ID__SHIFT 0x00000000
+#define FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000ffL
+#define FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x00000000
+#define FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000ffL
+#define FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x00000000
+#define FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000ffL
+#define FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x00000000
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x00000000
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x00000004
+#define FMT_FORCE_DATA_0_1__FMT_FORCE_DATA0_MASK 0x0000ffffL
+#define FMT_FORCE_DATA_0_1__FMT_FORCE_DATA0__SHIFT 0x00000000
+#define FMT_FORCE_DATA_0_1__FMT_FORCE_DATA1_MASK 0xffff0000L
+#define FMT_FORCE_DATA_0_1__FMT_FORCE_DATA1__SHIFT 0x00000010
+#define FMT_FORCE_DATA_2_3__FMT_FORCE_DATA2_MASK 0x0000ffffL
+#define FMT_FORCE_DATA_2_3__FMT_FORCE_DATA2__SHIFT 0x00000000
+#define FMT_FORCE_DATA_2_3__FMT_FORCE_DATA3_MASK 0xffff0000L
+#define FMT_FORCE_DATA_2_3__FMT_FORCE_DATA3__SHIFT 0x00000010
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_EN_MASK 0x00000001L
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_EN__SHIFT 0x00000000
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_ON_BLANKb_ONLY_MASK 0x00010000L
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_ON_BLANKb_ONLY__SHIFT 0x00000010
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_SEL_COLOR_MASK 0x00000700L
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_SEL_COLOR__SHIFT 0x00000008
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_SEL_SLOT_MASK 0x0000f000L
+#define FMT_FORCE_OUTPUT_CNTL__FMT_FORCE_DATA_SEL_SLOT__SHIFT 0x0000000c
+#define FMT_TEMPORAL_DITHER_PATTERN_CONTROL__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_RGB1_BGR0_MASK 0x00000010L
+#define FMT_TEMPORAL_DITHER_PATTERN_CONTROL__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_RGB1_BGR0__SHIFT 0x00000004
+#define FMT_TEMPORAL_DITHER_PATTERN_CONTROL__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_SELECT_MASK 0x00000001L
+#define FMT_TEMPORAL_DITHER_PATTERN_CONTROL__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_SELECT__SHIFT 0x00000000
+#define FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX_MASK 0xffffffffL
+#define FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX__SHIFT 0x00000000
+#define FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX_MASK 0xffffffffL
+#define FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX__FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX__SHIFT 0x00000000
+#define FMT_TEST_DEBUG_DATA__FMT_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define FMT_TEST_DEBUG_DATA__FMT_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C11_MASK 0x0000ffffL
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C11__SHIFT 0x00000000
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C12_MASK 0xffff0000L
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C12__SHIFT 0x00000010
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C13_MASK 0x0000ffffL
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C13__SHIFT 0x00000000
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C14_MASK 0xffff0000L
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C14__SHIFT 0x00000010
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C21_MASK 0x0000ffffL
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C21__SHIFT 0x00000000
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C22_MASK 0xffff0000L
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C22__SHIFT 0x00000010
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C23_MASK 0x0000ffffL
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C23__SHIFT 0x00000000
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C24_MASK 0xffff0000L
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C24__SHIFT 0x00000010
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C31_MASK 0x0000ffffL
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C31__SHIFT 0x00000000
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C32_MASK 0xffff0000L
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C32__SHIFT 0x00000010
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C33_MASK 0x0000ffffL
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C33__SHIFT 0x00000000
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C34_MASK 0xffff0000L
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C34__SHIFT 0x00000010
+#define GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT 0x00000000
+#define GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE_MASK 0x00000030L
+#define GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT 0x00000004
+#define GENENB__BLK_IO_BASE_MASK 0x000000ffL
+#define GENENB__BLK_IO_BASE__SHIFT 0x00000000
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_DBG_REF_SEL_MASK 0x80000000L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_DBG_REF_SEL__SHIFT 0x0000001f
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_ENABLE_MASK 0x00000008L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_ENABLE__SHIFT 0x00000003
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_GO_MASK 0x00000001L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_GO__SHIFT 0x00000000
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SEND_RESET_MASK 0x00000004L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SEND_RESET__SHIFT 0x00000002
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SOFT_RESET_MASK 0x00000002L
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SOFT_RESET__SHIFT 0x00000001
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_MASK 0x0000ff00L
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_RW_MASK 0x00000001L
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_RW__SHIFT 0x00000000
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA__SHIFT 0x00000008
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_MASK 0x000f0000L
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX__SHIFT 0x00000010
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_WRITE_MASK 0x80000000L
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_WRITE__SHIFT 0x0000001f
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_ACK_MASK 0x00000002L
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_ACK__SHIFT 0x00000001
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_INT_MASK 0x00000001L
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_INT__SHIFT 0x00000000
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_MASK_MASK 0x00000004L
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_MASK__SHIFT 0x00000002
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_EN_MASK 0x00000004L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_EN__SHIFT 0x00000002
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_INPUT_MASK 0x00000002L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_INPUT__SHIFT 0x00000001
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_OUTPUT_MASK 0x00000001L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_OUTPUT__SHIFT 0x00000000
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_EN_MASK 0x00000040L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_EN__SHIFT 0x00000006
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_INPUT_MASK 0x00000020L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_INPUT__SHIFT 0x00000005
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_OUTPUT_MASK 0x00000010L
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_OUTPUT__SHIFT 0x00000004
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SCL_PIN_SEL_MASK 0x0000007fL
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SCL_PIN_SEL__SHIFT 0x00000000
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SDA_PIN_SEL_MASK 0x00007f00L
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SDA_PIN_SEL__SHIFT 0x00000008
+#define GENERIC_I2C_SETUP__GENERIC_I2C_CLK_DRIVE_EN_MASK 0x00000080L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_CLK_DRIVE_EN__SHIFT 0x00000007
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_EN_MASK 0x00000001L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_EN__SHIFT 0x00000000
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_SEL_MASK 0x00000002L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_SEL__SHIFT 0x00000001
+#define GENERIC_I2C_SETUP__GENERIC_I2C_INTRA_BYTE_DELAY_MASK 0x0000ff00L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_INTRA_BYTE_DELAY__SHIFT 0x00000008
+#define GENERIC_I2C_SETUP__GENERIC_I2C_TIME_LIMIT_MASK 0xff000000L
+#define GENERIC_I2C_SETUP__GENERIC_I2C_TIME_LIMIT__SHIFT 0x00000018
+#define GENERIC_I2C_SPEED__GENERIC_I2C_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define GENERIC_I2C_SPEED__GENERIC_I2C_DISABLE_FILTER_DURING_STALL__SHIFT 0x00000004
+#define GENERIC_I2C_SPEED__GENERIC_I2C_PRESCALE_MASK 0xffff0000L
+#define GENERIC_I2C_SPEED__GENERIC_I2C_PRESCALE__SHIFT 0x00000010
+#define GENERIC_I2C_SPEED__GENERIC_I2C_THRESHOLD_MASK 0x00000003L
+#define GENERIC_I2C_SPEED__GENERIC_I2C_THRESHOLD__SHIFT 0x00000000
+#define GENERIC_I2C_STATUS__GENERIC_I2C_ABORTED_MASK 0x00000020L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_ABORTED__SHIFT 0x00000005
+#define GENERIC_I2C_STATUS__GENERIC_I2C_DONE_MASK 0x00000010L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_DONE__SHIFT 0x00000004
+#define GENERIC_I2C_STATUS__GENERIC_I2C_NACK_MASK 0x00000400L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_NACK__SHIFT 0x0000000a
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STATUS_MASK 0x0000000fL
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STATUS__SHIFT 0x00000000
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STOPPED_ON_NACK_MASK 0x00000200L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STOPPED_ON_NACK__SHIFT 0x00000009
+#define GENERIC_I2C_STATUS__GENERIC_I2C_TIMEOUT_MASK 0x00000040L
+#define GENERIC_I2C_STATUS__GENERIC_I2C_TIMEOUT__SHIFT 0x00000006
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_ACK_ON_READ_MASK 0x00000200L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_ACK_ON_READ__SHIFT 0x00000009
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_COUNT_MASK 0x000f0000L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_COUNT__SHIFT 0x00000010
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_RW_MASK 0x00000001L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_RW__SHIFT 0x00000000
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_START_MASK 0x00001000L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_START__SHIFT 0x0000000c
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_MASK 0x00002000L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_ON_NACK_MASK 0x00000100L
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_ON_NACK__SHIFT 0x00000008
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP__SHIFT 0x0000000d
+#define GENFC_RD__VSYNC_SEL_R_MASK 0x00000008L
+#define GENFC_RD__VSYNC_SEL_R__SHIFT 0x00000003
+#define GENFC_WT__VSYNC_SEL_W_MASK 0x00000008L
+#define GENFC_WT__VSYNC_SEL_W__SHIFT 0x00000003
+#define GENMO_RD__GENMO_MONO_ADDRESS_B_MASK 0x00000001L
+#define GENMO_RD__GENMO_MONO_ADDRESS_B__SHIFT 0x00000000
+#define GENMO_RD__ODD_EVEN_MD_PGSEL_MASK 0x00000020L
+#define GENMO_RD__ODD_EVEN_MD_PGSEL__SHIFT 0x00000005
+#define GENMO_RD__VGA_CKSEL_MASK 0x0000000cL
+#define GENMO_RD__VGA_CKSEL__SHIFT 0x00000002
+#define GENMO_RD__VGA_HSYNC_POL_MASK 0x00000040L
+#define GENMO_RD__VGA_HSYNC_POL__SHIFT 0x00000006
+#define GENMO_RD__VGA_RAM_EN_MASK 0x00000002L
+#define GENMO_RD__VGA_RAM_EN__SHIFT 0x00000001
+#define GENMO_RD__VGA_VSYNC_POL_MASK 0x00000080L
+#define GENMO_RD__VGA_VSYNC_POL__SHIFT 0x00000007
+#define GENMO_WT__GENMO_MONO_ADDRESS_B_MASK 0x00000001L
+#define GENMO_WT__GENMO_MONO_ADDRESS_B__SHIFT 0x00000000
+#define GENMO_WT__ODD_EVEN_MD_PGSEL_MASK 0x00000020L
+#define GENMO_WT__ODD_EVEN_MD_PGSEL__SHIFT 0x00000005
+#define GENMO_WT__VGA_CKSEL_MASK 0x0000000cL
+#define GENMO_WT__VGA_CKSEL__SHIFT 0x00000002
+#define GENMO_WT__VGA_HSYNC_POL_MASK 0x00000040L
+#define GENMO_WT__VGA_HSYNC_POL__SHIFT 0x00000006
+#define GENMO_WT__VGA_RAM_EN_MASK 0x00000002L
+#define GENMO_WT__VGA_RAM_EN__SHIFT 0x00000001
+#define GENMO_WT__VGA_VSYNC_POL_MASK 0x00000080L
+#define GENMO_WT__VGA_VSYNC_POL__SHIFT 0x00000007
+#define GENS0__CRT_INTR_MASK 0x00000080L
+#define GENS0__CRT_INTR__SHIFT 0x00000007
+#define GENS0__SENSE_SWITCH_MASK 0x00000010L
+#define GENS0__SENSE_SWITCH__SHIFT 0x00000004
+#define GENS1__NO_DISPLAY_MASK 0x00000001L
+#define GENS1__NO_DISPLAY__SHIFT 0x00000000
+#define GENS1__PIXEL_READ_BACK_MASK 0x00000030L
+#define GENS1__PIXEL_READ_BACK__SHIFT 0x00000004
+#define GENS1__VGA_VSTATUS_MASK 0x00000008L
+#define GENS1__VGA_VSTATUS__SHIFT 0x00000003
+#define GRA00__GRPH_SET_RESET0_MASK 0x00000001L
+#define GRA00__GRPH_SET_RESET0__SHIFT 0x00000000
+#define GRA00__GRPH_SET_RESET1_MASK 0x00000002L
+#define GRA00__GRPH_SET_RESET1__SHIFT 0x00000001
+#define GRA00__GRPH_SET_RESET2_MASK 0x00000004L
+#define GRA00__GRPH_SET_RESET2__SHIFT 0x00000002
+#define GRA00__GRPH_SET_RESET3_MASK 0x00000008L
+#define GRA00__GRPH_SET_RESET3__SHIFT 0x00000003
+#define GRA01__GRPH_SET_RESET_ENA0_MASK 0x00000001L
+#define GRA01__GRPH_SET_RESET_ENA0__SHIFT 0x00000000
+#define GRA01__GRPH_SET_RESET_ENA1_MASK 0x00000002L
+#define GRA01__GRPH_SET_RESET_ENA1__SHIFT 0x00000001
+#define GRA01__GRPH_SET_RESET_ENA2_MASK 0x00000004L
+#define GRA01__GRPH_SET_RESET_ENA2__SHIFT 0x00000002
+#define GRA01__GRPH_SET_RESET_ENA3_MASK 0x00000008L
+#define GRA01__GRPH_SET_RESET_ENA3__SHIFT 0x00000003
+#define GRA02__GRPH_CCOMP_MASK 0x0000000fL
+#define GRA02__GRPH_CCOMP__SHIFT 0x00000000
+#define GRA03__GRPH_FN_SEL_MASK 0x00000018L
+#define GRA03__GRPH_FN_SEL__SHIFT 0x00000003
+#define GRA03__GRPH_ROTATE_MASK 0x00000007L
+#define GRA03__GRPH_ROTATE__SHIFT 0x00000000
+#define GRA04__GRPH_RMAP_MASK 0x00000003L
+#define GRA04__GRPH_RMAP__SHIFT 0x00000000
+#define GRA05__CGA_ODDEVEN_MASK 0x00000010L
+#define GRA05__CGA_ODDEVEN__SHIFT 0x00000004
+#define GRA05__GRPH_OES_MASK 0x00000020L
+#define GRA05__GRPH_OES__SHIFT 0x00000005
+#define GRA05__GRPH_PACK_MASK 0x00000040L
+#define GRA05__GRPH_PACK__SHIFT 0x00000006
+#define GRA05__GRPH_READ1_MASK 0x00000008L
+#define GRA05__GRPH_READ1__SHIFT 0x00000003
+#define GRA05__GRPH_WRITE_MODE_MASK 0x00000003L
+#define GRA05__GRPH_WRITE_MODE__SHIFT 0x00000000
+#define GRA06__GRPH_ADRSEL_MASK 0x0000000cL
+#define GRA06__GRPH_ADRSEL__SHIFT 0x00000002
+#define GRA06__GRPH_GRAPHICS_MASK 0x00000001L
+#define GRA06__GRPH_GRAPHICS__SHIFT 0x00000000
+#define GRA06__GRPH_ODDEVEN_MASK 0x00000002L
+#define GRA06__GRPH_ODDEVEN__SHIFT 0x00000001
+#define GRA07__GRPH_XCARE0_MASK 0x00000001L
+#define GRA07__GRPH_XCARE0__SHIFT 0x00000000
+#define GRA07__GRPH_XCARE1_MASK 0x00000002L
+#define GRA07__GRPH_XCARE1__SHIFT 0x00000001
+#define GRA07__GRPH_XCARE2_MASK 0x00000004L
+#define GRA07__GRPH_XCARE2__SHIFT 0x00000002
+#define GRA07__GRPH_XCARE3_MASK 0x00000008L
+#define GRA07__GRPH_XCARE3__SHIFT 0x00000003
+#define GRA08__GRPH_BMSK_MASK 0x000000ffL
+#define GRA08__GRPH_BMSK__SHIFT 0x00000000
+#define GRPH8_DATA__GRPH_DATA_MASK 0x000000ffL
+#define GRPH8_DATA__GRPH_DATA__SHIFT 0x00000000
+#define GRPH8_IDX__GRPH_IDX_MASK 0x0000000fL
+#define GRPH8_IDX__GRPH_IDX__SHIFT 0x00000000
+#define GRPH_COMPRESS_PITCH__GRPH_COMPRESS_PITCH_MASK 0x0001ffc0L
+#define GRPH_COMPRESS_PITCH__GRPH_COMPRESS_PITCH__SHIFT 0x00000006
+#define GRPH_COMPRESS_SURFACE_ADDRESS__GRPH_COMPRESS_SURFACE_ADDRESS_MASK 0xffffff00L
+#define GRPH_COMPRESS_SURFACE_ADDRESS__GRPH_COMPRESS_SURFACE_ADDRESS__SHIFT 0x00000008
+#define GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE_MASK 0x00010000L
+#define GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x00000010
+#define GRPH_CONTROL__GRPH_ARRAY_MODE_MASK 0x00f00000L
+#define GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT 0x00000014
+#define GRPH_CONTROL__GRPH_BANK_HEIGHT_MASK 0x00001800L
+#define GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT 0x0000000b
+#define GRPH_CONTROL__GRPH_BANK_WIDTH_MASK 0x000000c0L
+#define GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT 0x00000006
+#define GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE_MASK 0x80000000L
+#define GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE__SHIFT 0x0000001f
+#define GRPH_CONTROL__GRPH_DEPTH_MASK 0x00000003L
+#define GRPH_CONTROL__GRPH_DEPTH__SHIFT 0x00000000
+#define GRPH_CONTROL__GRPH_FORMAT_MASK 0x00000700L
+#define GRPH_CONTROL__GRPH_FORMAT__SHIFT 0x00000008
+#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_MASK 0x000c0000L
+#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT 0x00000012
+#define GRPH_CONTROL__GRPH_NUM_BANKS_MASK 0x0000000cL
+#define GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT 0x00000002
+#define GRPH_CONTROL__GRPH_PIPE_CONFIG_MASK 0x1f000000L
+#define GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT 0x00000018
+#define GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE_MASK 0x00020000L
+#define GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x00000011
+#define GRPH_CONTROL__GRPH_TILE_SPLIT_MASK 0x0000e000L
+#define GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT 0x0000000d
+#define GRPH_CONTROL__GRPH_Z_MASK 0x00000030L
+#define GRPH_CONTROL__GRPH_Z__SHIFT 0x00000004
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_MIN_FREE_ENTRIES_MASK 0x00000700L
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_MIN_FREE_ENTRIES__SHIFT 0x00000008
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_RESET_MASK 0x00000001L
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_RESET__SHIFT 0x00000000
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_SIZE_MASK 0x00000070L
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_SIZE__SHIFT 0x00000004
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_ACK_MASK 0x00000200L
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_ACK__SHIFT 0x00000009
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_FLAG_MASK 0x00000100L
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_FLAG__SHIFT 0x00000008
+#define GRPH_DFQ_STATUS__GRPH_PRIMARY_DFQ_NUM_ENTRIES_MASK 0x0000000fL
+#define GRPH_DFQ_STATUS__GRPH_PRIMARY_DFQ_NUM_ENTRIES__SHIFT 0x00000000
+#define GRPH_DFQ_STATUS__GRPH_SECONDARY_DFQ_NUM_ENTRIES_MASK 0x000000f0L
+#define GRPH_DFQ_STATUS__GRPH_SECONDARY_DFQ_NUM_ENTRIES__SHIFT 0x00000004
+#define GRPH_ENABLE__GRPH_ENABLE_MASK 0x00000001L
+#define GRPH_ENABLE__GRPH_ENABLE__SHIFT 0x00000000
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK 0x00000001L
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN__SHIFT 0x00000000
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x00000001L
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK__SHIFT 0x00000000
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE_MASK 0x00000100L
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE__SHIFT 0x00000008
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x00000100L
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR__SHIFT 0x00000008
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x00000001L
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED__SHIFT 0x00000000
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN_MASK 0x00010000L
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN__SHIFT 0x00000010
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK 0x00000100L
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN__SHIFT 0x00000008
+#define GRPH_PITCH__GRPH_PITCH_MASK 0x00007fffL
+#define GRPH_PITCH__GRPH_PITCH__SHIFT 0x00000000
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_DFQ_ENABLE_MASK 0x00000001L
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_DFQ_ENABLE__SHIFT 0x00000000
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK 0xffffff00L
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS__SHIFT 0x00000008
+#define GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_DFQ_ENABLE_MASK 0x00000001L
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_DFQ_ENABLE__SHIFT 0x00000000
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK 0xffffff00L
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS__SHIFT 0x00000008
+#define GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING_MASK 0x00010000L
+#define GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING__SHIFT 0x00000010
+#define GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING_MASK 0x00020000L
+#define GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING__SHIFT 0x00000011
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN_MASK 0x00000001L
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN__SHIFT 0x00000000
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE_MASK 0x00000300L
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE__SHIFT 0x00000008
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE_MASK 0x10000000L
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE__SHIFT 0x0000001c
+#define GRPH_SURFACE_ADDRESS_HIGH_INUSE__GRPH_SURFACE_ADDRESS_HIGH_INUSE_MASK 0x000000ffL
+#define GRPH_SURFACE_ADDRESS_HIGH_INUSE__GRPH_SURFACE_ADDRESS_HIGH_INUSE__SHIFT 0x00000000
+#define GRPH_SURFACE_ADDRESS_INUSE__GRPH_SURFACE_ADDRESS_INUSE_MASK 0xffffff00L
+#define GRPH_SURFACE_ADDRESS_INUSE__GRPH_SURFACE_ADDRESS_INUSE__SHIFT 0x00000008
+#define GRPH_SURFACE_OFFSET_X__GRPH_SURFACE_OFFSET_X_MASK 0x00003fffL
+#define GRPH_SURFACE_OFFSET_X__GRPH_SURFACE_OFFSET_X__SHIFT 0x00000000
+#define GRPH_SURFACE_OFFSET_Y__GRPH_SURFACE_OFFSET_Y_MASK 0x00003fffL
+#define GRPH_SURFACE_OFFSET_Y__GRPH_SURFACE_OFFSET_Y__SHIFT 0x00000000
+#define GRPH_SWAP_CNTL__GRPH_ALPHA_CROSSBAR_MASK 0x00000c00L
+#define GRPH_SWAP_CNTL__GRPH_ALPHA_CROSSBAR__SHIFT 0x0000000a
+#define GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR_MASK 0x00000300L
+#define GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT 0x00000008
+#define GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP_MASK 0x00000003L
+#define GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT 0x00000000
+#define GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR_MASK 0x000000c0L
+#define GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR__SHIFT 0x00000006
+#define GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR_MASK 0x00000030L
+#define GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT 0x00000004
+#define GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE_MASK 0x01000000L
+#define GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x00000018
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING_MASK 0x00000001L
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING__SHIFT 0x00000000
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN_MASK 0x00000002L
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN__SHIFT 0x00000001
+#define GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_MASK 0x10000000L
+#define GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x0000001c
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK 0x00000004L
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING__SHIFT 0x00000002
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN_MASK 0x00000008L
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN__SHIFT 0x00000003
+#define GRPH_UPDATE__GRPH_SURFACE_XDMA_PENDING_ENABLE_MASK 0x00000100L
+#define GRPH_UPDATE__GRPH_SURFACE_XDMA_PENDING_ENABLE__SHIFT 0x00000008
+#define GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK 0x00010000L
+#define GRPH_UPDATE__GRPH_UPDATE_LOCK__SHIFT 0x00000010
+#define GRPH_X_END__GRPH_X_END_MASK 0x00007fffL
+#define GRPH_X_END__GRPH_X_END__SHIFT 0x00000000
+#define GRPH_X_START__GRPH_X_START_MASK 0x00003fffL
+#define GRPH_X_START__GRPH_X_START__SHIFT 0x00000000
+#define GRPH_Y_END__GRPH_Y_END_MASK 0x00007fffL
+#define GRPH_Y_END__GRPH_Y_END__SHIFT 0x00000000
+#define GRPH_Y_START__GRPH_Y_START_MASK 0x00003fffL
+#define GRPH_Y_START__GRPH_Y_START__SHIFT 0x00000000
+#define HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xfffff000L
+#define HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0x0000000c
+#define HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000fffffL
+#define HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x00000000
+#define HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xfffff000L
+#define HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0x0000000c
+#define HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000fffffL
+#define HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x00000000
+#define HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xfffff000L
+#define HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0x0000000c
+#define HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000fffffL
+#define HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x00000000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x0000001f
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0x0000000c
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x00000001
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x00000010
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x00000004
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x00000000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x00000008
+#define HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xfffff000L
+#define HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0x0000000c
+#define HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000fffffL
+#define HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x00000000
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x00000004
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001f0000L
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x00000010
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x00000008
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x0000001c
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x00000018
+#define HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x00000008
+#define HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x00000009
+#define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x00000000
+#define HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x00000004
+#define HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x00000004
+#define HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x00000002
+#define HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x00000000
+#define HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000f00L
+#define HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+#define HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0x0000000c
+#define HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x00000008
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x00000001
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_MASK 0x003f0000L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE__SHIFT 0x00000010
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x00000000
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x00000005
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_MASK 0x3f000000L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE__SHIFT 0x00000018
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x00000004
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_CONT_MASK 0x00000002L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_CONT__SHIFT 0x00000001
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_LINE_MASK 0x003f0000L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_LINE__SHIFT 0x00000010
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_SEND_MASK 0x00000001L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_SEND__SHIFT 0x00000000
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_CONT_MASK 0x00000020L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_CONT__SHIFT 0x00000005
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_LINE_MASK 0x3f000000L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_LINE__SHIFT 0x00000018
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_SEND_MASK 0x00000010L
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_SEND__SHIFT 0x00000004
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x00000005
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x00000004
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK 0x00000002L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT__SHIFT 0x00000001
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x00000001L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND__SHIFT 0x00000000
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x00000009
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x00000008
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003f00L
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x00000008
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK 0x0000003fL
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT 0x00000000
+#define HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003f0000L
+#define HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x00000010
+#define HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x00000000
+#define HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x00000010
+#define HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+#define HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x0000001b
+#define HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x00000014
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x00000005
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x00000004
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x00000009
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003f0000L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x00000010
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x00000008
+#define HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x00000000
+#define IDDCCIF02_DBG_DCCIF_C__DBG_DCCIF_C_MASK 0xffffffffL
+#define IDDCCIF02_DBG_DCCIF_C__DBG_DCCIF_C__SHIFT 0x00000000
+#define IDDCCIF04_DBG_DCCIF_E__DBG_DCCIF_E_MASK 0xffffffffL
+#define IDDCCIF04_DBG_DCCIF_E__DBG_DCCIF_E__SHIFT 0x00000000
+#define IDDCCIF05_DBG_DCCIF_F__DBG_DCCIF_F_MASK 0xffffffffL
+#define IDDCCIF05_DBG_DCCIF_F__DBG_DCCIF_F__SHIFT 0x00000000
+#define INPUT_CSC_C11_C12__INPUT_CSC_C11_MASK 0x0000ffffL
+#define INPUT_CSC_C11_C12__INPUT_CSC_C11__SHIFT 0x00000000
+#define INPUT_CSC_C11_C12__INPUT_CSC_C12_MASK 0xffff0000L
+#define INPUT_CSC_C11_C12__INPUT_CSC_C12__SHIFT 0x00000010
+#define INPUT_CSC_C13_C14__INPUT_CSC_C13_MASK 0x0000ffffL
+#define INPUT_CSC_C13_C14__INPUT_CSC_C13__SHIFT 0x00000000
+#define INPUT_CSC_C13_C14__INPUT_CSC_C14_MASK 0xffff0000L
+#define INPUT_CSC_C13_C14__INPUT_CSC_C14__SHIFT 0x00000010
+#define INPUT_CSC_C21_C22__INPUT_CSC_C21_MASK 0x0000ffffL
+#define INPUT_CSC_C21_C22__INPUT_CSC_C21__SHIFT 0x00000000
+#define INPUT_CSC_C21_C22__INPUT_CSC_C22_MASK 0xffff0000L
+#define INPUT_CSC_C21_C22__INPUT_CSC_C22__SHIFT 0x00000010
+#define INPUT_CSC_C23_C24__INPUT_CSC_C23_MASK 0x0000ffffL
+#define INPUT_CSC_C23_C24__INPUT_CSC_C23__SHIFT 0x00000000
+#define INPUT_CSC_C23_C24__INPUT_CSC_C24_MASK 0xffff0000L
+#define INPUT_CSC_C23_C24__INPUT_CSC_C24__SHIFT 0x00000010
+#define INPUT_CSC_C31_C32__INPUT_CSC_C31_MASK 0x0000ffffL
+#define INPUT_CSC_C31_C32__INPUT_CSC_C31__SHIFT 0x00000000
+#define INPUT_CSC_C31_C32__INPUT_CSC_C32_MASK 0xffff0000L
+#define INPUT_CSC_C31_C32__INPUT_CSC_C32__SHIFT 0x00000010
+#define INPUT_CSC_C33_C34__INPUT_CSC_C33_MASK 0x0000ffffL
+#define INPUT_CSC_C33_C34__INPUT_CSC_C33__SHIFT 0x00000000
+#define INPUT_CSC_C33_C34__INPUT_CSC_C34_MASK 0xffff0000L
+#define INPUT_CSC_C33_C34__INPUT_CSC_C34__SHIFT 0x00000010
+#define INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE_MASK 0x00000003L
+#define INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT 0x00000000
+#define INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE_MASK 0x00000030L
+#define INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT 0x00000004
+#define INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE_MASK 0x00000003L
+#define INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT 0x00000000
+#define INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE_MASK 0x00000030L
+#define INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT 0x00000004
+#define KEY_CONTROL__GRPH_OVL_HALF_BLEND_MASK 0x10000000L
+#define KEY_CONTROL__GRPH_OVL_HALF_BLEND__SHIFT 0x0000001c
+#define KEY_CONTROL__KEY_MODE_MASK 0x00000006L
+#define KEY_CONTROL__KEY_MODE__SHIFT 0x00000001
+#define KEY_CONTROL__KEY_SELECT_MASK 0x00000001L
+#define KEY_CONTROL__KEY_SELECT__SHIFT 0x00000000
+#define KEY_RANGE_ALPHA__KEY_ALPHA_HIGH_MASK 0xffff0000L
+#define KEY_RANGE_ALPHA__KEY_ALPHA_HIGH__SHIFT 0x00000010
+#define KEY_RANGE_ALPHA__KEY_ALPHA_LOW_MASK 0x0000ffffL
+#define KEY_RANGE_ALPHA__KEY_ALPHA_LOW__SHIFT 0x00000000
+#define KEY_RANGE_BLUE__KEY_BLUE_HIGH_MASK 0xffff0000L
+#define KEY_RANGE_BLUE__KEY_BLUE_HIGH__SHIFT 0x00000010
+#define KEY_RANGE_BLUE__KEY_BLUE_LOW_MASK 0x0000ffffL
+#define KEY_RANGE_BLUE__KEY_BLUE_LOW__SHIFT 0x00000000
+#define KEY_RANGE_GREEN__KEY_GREEN_HIGH_MASK 0xffff0000L
+#define KEY_RANGE_GREEN__KEY_GREEN_HIGH__SHIFT 0x00000010
+#define KEY_RANGE_GREEN__KEY_GREEN_LOW_MASK 0x0000ffffL
+#define KEY_RANGE_GREEN__KEY_GREEN_LOW__SHIFT 0x00000000
+#define KEY_RANGE_RED__KEY_RED_HIGH_MASK 0xffff0000L
+#define KEY_RANGE_RED__KEY_RED_HIGH__SHIFT 0x00000010
+#define KEY_RANGE_RED__KEY_RED_LOW_MASK 0x0000ffffL
+#define KEY_RANGE_RED__KEY_RED_LOW__SHIFT 0x00000000
+#define LB_DEBUG2__LB_DEBUG2_MASK 0xffffffffL
+#define LB_DEBUG2__LB_DEBUG2__SHIFT 0x00000000
+#define LB_DEBUG__LB_DEBUG_MASK 0xffffffffL
+#define LB_DEBUG__LB_DEBUG__SHIFT 0x00000000
+#define LB_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT_MASK 0x00000001L
+#define LB_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT__SHIFT 0x00000000
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2_MASK 0x00000010L
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2__SHIFT 0x00000004
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL_MASK 0x00000003L
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL__SHIFT 0x00000000
+#define LB_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define LB_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define LIGHT_SLEEP_CNTL__LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define LIGHT_SLEEP_CNTL__LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define LIGHT_SLEEP_CNTL__MEM_SHUTDOWN_DIS_MASK 0x00000100L
+#define LIGHT_SLEEP_CNTL__MEM_SHUTDOWN_DIS__SHIFT 0x00000008
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ENABLE_MASK 0x00000001L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ENABLE__SHIFT 0x00000000
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_MODE_MASK 0x00000018L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_MODE__SHIFT 0x00000003
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_BANKS_MASK 0x00000700L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_BANKS__SHIFT 0x00000008
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_PIPES_MASK 0x000000e0L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_PIPES__SHIFT 0x00000005
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE_MASK 0x00000800L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE__SHIFT 0x0000000b
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROW_SIZE_MASK 0x00007000L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROW_SIZE__SHIFT 0x0000000c
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROWS_PER_CHAN_MASK 0x0fff0000L
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROWS_PER_CHAN__SHIFT 0x00000010
+#define LVDS_DATA_CNTL__LVDS_24BIT_ENABLE_MASK 0x00000001L
+#define LVDS_DATA_CNTL__LVDS_24BIT_ENABLE__SHIFT 0x00000000
+#define LVDS_DATA_CNTL__LVDS_24BIT_FORMAT_MASK 0x00000010L
+#define LVDS_DATA_CNTL__LVDS_24BIT_FORMAT__SHIFT 0x00000004
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_DE_MASK 0x00000100L
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_DE__SHIFT 0x00000008
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_HS_MASK 0x00000400L
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_HS__SHIFT 0x0000000a
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_VS_MASK 0x00000200L
+#define LVDS_DATA_CNTL__LVDS_2ND_CHAN_VS__SHIFT 0x00000009
+#define LVDS_DATA_CNTL__LVDS_2ND_LINK_CNTL_BITS_MASK 0x00007000L
+#define LVDS_DATA_CNTL__LVDS_2ND_LINK_CNTL_BITS__SHIFT 0x0000000c
+#define LVDS_DATA_CNTL__LVDS_DTMG_POL_MASK 0x00040000L
+#define LVDS_DATA_CNTL__LVDS_DTMG_POL__SHIFT 0x00000012
+#define LVDS_DATA_CNTL__LVDS_FP_POL_MASK 0x00010000L
+#define LVDS_DATA_CNTL__LVDS_FP_POL__SHIFT 0x00000010
+#define LVDS_DATA_CNTL__LVDS_LP_POL_MASK 0x00020000L
+#define LVDS_DATA_CNTL__LVDS_LP_POL__SHIFT 0x00000011
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_MASK 0x01000000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD_MASK 0x02000000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD__SHIFT 0x00000019
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL_MASK 0x04000000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL__SHIFT 0x0000001a
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON__SHIFT 0x00000018
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_MASK 0x00010000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD_MASK 0x00020000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD__SHIFT 0x00000011
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL_MASK 0x00040000L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL__SHIFT 0x00000012
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON__SHIFT 0x00000010
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN_MASK 0x00000002L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN__SHIFT 0x00000001
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN_MASK 0x00000001L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN__SHIFT 0x00000000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE_MASK 0x00000010L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE__SHIFT 0x00000004
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_MASK 0x00000100L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD_MASK 0x00000200L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD__SHIFT 0x00000009
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL_MASK 0x00000400L
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL__SHIFT 0x0000000a
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN__SHIFT 0x00000008
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1_MASK 0x00ff0000L
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1__SHIFT 0x00000010
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2_MASK 0xff000000L
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2__SHIFT 0x00000018
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1_MASK 0x000000ffL
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1__SHIFT 0x00000000
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2_MASK 0x0000ff00L
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2__SHIFT 0x00000008
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3_MASK 0x00ff0000L
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3__SHIFT 0x00000010
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH_MASK 0x000000ffL
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH__SHIFT 0x00000000
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3_MASK 0x0000ff00L
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3__SHIFT 0x00000008
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN_MASK 0x01000000L
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN__SHIFT 0x00000018
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV_MASK 0xffff0000L
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV__SHIFT 0x00000010
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV_MASK 0x00000fffL
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV__SHIFT 0x00000000
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON_MASK 0x00000008L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON__SHIFT 0x00000003
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON_MASK 0x00000002L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON__SHIFT 0x00000001
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE_MASK 0x00000010L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE__SHIFT 0x00000004
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE_MASK 0x00000f00L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE__SHIFT 0x00000008
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN_MASK 0x00000004L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN__SHIFT 0x00000002
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R_MASK 0x00000001L
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R__SHIFT 0x00000000
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0_MASK 0x000000ffL
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0__SHIFT 0x00000000
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1_MASK 0x0000ff00L
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1__SHIFT 0x00000008
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2_MASK 0x00ff0000L
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2__SHIFT 0x00000010
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3_MASK 0xff000000L
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3__SHIFT 0x00000018
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT__SHIFT 0x00000000
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0_MASK 0x000000ffL
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0__SHIFT 0x00000000
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1_MASK 0x0000ff00L
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1__SHIFT 0x00000008
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2_MASK 0x00ff0000L
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2__SHIFT 0x00000010
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3_MASK 0xff000000L
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3__SHIFT 0x00000018
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0_MASK 0x000000ffL
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0__SHIFT 0x00000000
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1_MASK 0x0000ff00L
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1__SHIFT 0x00000008
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2_MASK 0x00ff0000L
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2__SHIFT 0x00000010
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3_MASK 0xff000000L
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3__SHIFT 0x00000018
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0_MASK 0x000000ffL
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0__SHIFT 0x00000000
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1_MASK 0x0000ff00L
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1__SHIFT 0x00000008
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2_MASK 0x00ff0000L
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2__SHIFT 0x00000010
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3_MASK 0xff000000L
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3__SHIFT 0x00000018
+#define MASTER_UPDATE_LOCK__GSL_CONTROL_MASTER_UPDATE_LOCK_MASK 0x00000100L
+#define MASTER_UPDATE_LOCK__GSL_CONTROL_MASTER_UPDATE_LOCK__SHIFT 0x00000008
+#define MASTER_UPDATE_LOCK__MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define MASTER_UPDATE_LOCK__MASTER_UPDATE_LOCK__SHIFT 0x00000000
+#define MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00030000L
+#define MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x00000010
+#define MASTER_UPDATE_MODE__MASTER_UPDATE_MODE_MASK 0x00000007L
+#define MASTER_UPDATE_MODE__MASTER_UPDATE_MODE__SHIFT 0x00000000
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_CLEAR_MASK 0x00000010L
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_CLEAR__SHIFT 0x00000004
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_OCCURRED_MASK 0x00000001L
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_OCCURRED__SHIFT 0x00000000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_CLEAR_MASK 0x00100000L
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_CLEAR__SHIFT 0x00000014
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_OCCURRED_MASK 0x00010000L
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_OCCURRED__SHIFT 0x00000010
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_CLEAR_MASK 0x10000000L
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_CLEAR__SHIFT 0x0000001c
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_OCCURRED_MASK 0x01000000L
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_OCCURRED__SHIFT 0x00000018
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_CLEAR_MASK 0x00001000L
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_CLEAR__SHIFT 0x0000000c
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_OCCURRED_MASK 0x00000100L
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_OCCURRED__SHIFT 0x00000008
+#define MCIF_CONTROL__ADDRESS_TRANSLATION_ENABLE_MASK 0x00000010L
+#define MCIF_CONTROL__ADDRESS_TRANSLATION_ENABLE__SHIFT 0x00000004
+#define MCIF_CONTROL__LOW_READ_URG_LEVEL_MASK 0x00ff0000L
+#define MCIF_CONTROL__LOW_READ_URG_LEVEL__SHIFT 0x00000010
+#define MCIF_CONTROL__MC_CLEAN_DEASSERT_LATENCY_MASK 0x3f000000L
+#define MCIF_CONTROL__MC_CLEAN_DEASSERT_LATENCY__SHIFT 0x00000018
+#define MCIF_CONTROL__MCIF_BUFF_SIZE_MASK 0x00000003L
+#define MCIF_CONTROL__MCIF_BUFF_SIZE__SHIFT 0x00000000
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE_MASK 0x40000000L
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE__SHIFT 0x0000001e
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY_MASK 0x80000000L
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY__SHIFT 0x0000001f
+#define MCIF_CONTROL__MCIF_SLOW_REQ_INTERVAL_MASK 0x0000f000L
+#define MCIF_CONTROL__MCIF_SLOW_REQ_INTERVAL__SHIFT 0x0000000c
+#define MCIF_CONTROL__PRIVILEGED_ACCESS_ENABLE_MASK 0x00000100L
+#define MCIF_CONTROL__PRIVILEGED_ACCESS_ENABLE__SHIFT 0x00000008
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_DIS_MASK 0x00000001L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_DIS__SHIFT 0x00000000
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_MASK 0x00000030L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE__SHIFT 0x00000004
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_PIPE_MASK 0x00070000L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_PIPE__SHIFT 0x00000010
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_SIZE_MASK 0x00007f00L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_SIZE__SHIFT 0x00000008
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_TYPE_MASK 0x00180000L
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_TYPE__SHIFT 0x00000013
+#define MCIF_TEST_DEBUG_DATA__MCIF_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define MCIF_TEST_DEBUG_DATA__MCIF_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define MCIF_VMID__MCIF_WR_VMID_MASK 0x0000000fL
+#define MCIF_VMID__MCIF_WR_VMID__SHIFT 0x00000000
+#define MCIF_VMID__VIP_WR_VMID_MASK 0x000000f0L
+#define MCIF_VMID__VIP_WR_VMID__SHIFT 0x00000004
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT_MASK 0x000000ffL
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT__SHIFT 0x00000000
+#define MCIF_WRITE_COMBINE_CONTROL__VIP_WRITE_COMBINE_TIMEOUT_MASK 0x0000ff00L
+#define MCIF_WRITE_COMBINE_CONTROL__VIP_WRITE_COMBINE_TIMEOUT__SHIFT 0x00000008
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x00000014
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007fL
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x00000000
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x00020000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x00000011
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x00007f00L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x00000008
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x00010000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x00000010
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x00000014
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x0001ffffL
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x00000000
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_NUM_ENTRIES_MASK 0x0000000fL
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_NUM_ENTRIES__SHIFT 0x00000000
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_ACK_MASK 0x00001000L
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_ACK__SHIFT 0x0000000c
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_FLAG_MASK 0x00000100L
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_FLAG__SHIFT 0x00000008
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_MASK 0x00000010L
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET__SHIFT 0x00000004
+#define MVP_AFR_FLIP_MODE__MVP_AFR_FLIP_MODE_MASK 0x00000003L
+#define MVP_AFR_FLIP_MODE__MVP_AFR_FLIP_MODE__SHIFT 0x00000000
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_B_MASK 0x3ff00000L
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_B__SHIFT 0x00000014
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_G_MASK 0x000ffc00L
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_G__SHIFT 0x0000000a
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_R_MASK 0x000003ffL
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_R__SHIFT 0x00000000
+#define MVP_CONTROL1__MVP_30BPP_EN_MASK 0x10000000L
+#define MVP_CONTROL1__MVP_30BPP_EN__SHIFT 0x0000001c
+#define MVP_CONTROL1__MVP_ARBITRATION_MODE_FOR_AFR_MANUAL_SWITCH_MODE_MASK 0x00000400L
+#define MVP_CONTROL1__MVP_ARBITRATION_MODE_FOR_AFR_MANUAL_SWITCH_MODE__SHIFT 0x0000000a
+#define MVP_CONTROL1__MVP_CHANNEL_CONTROL_MASK 0x00010000L
+#define MVP_CONTROL1__MVP_CHANNEL_CONTROL__SHIFT 0x00000010
+#define MVP_CONTROL1__MVP_DISABLE_MSB_EXPAND_MASK 0x01000000L
+#define MVP_CONTROL1__MVP_DISABLE_MSB_EXPAND__SHIFT 0x00000018
+#define MVP_CONTROL1__MVP_EN_MASK 0x00000001L
+#define MVP_CONTROL1__MVP_EN__SHIFT 0x00000000
+#define MVP_CONTROL1__MVP_GPU_CHAIN_LOCATION_MASK 0x00300000L
+#define MVP_CONTROL1__MVP_GPU_CHAIN_LOCATION__SHIFT 0x00000014
+#define MVP_CONTROL1__MVP_MIXER_MODE_MASK 0x00000070L
+#define MVP_CONTROL1__MVP_MIXER_MODE__SHIFT 0x00000004
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_DELAY_UNTIL_END_OF_BLANK_MASK 0x00000200L
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_DELAY_UNTIL_END_OF_BLANK__SHIFT 0x00000009
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_MASK 0x00000100L
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL__SHIFT 0x00000008
+#define MVP_CONTROL1__MVP_RATE_CONTROL_MASK 0x00001000L
+#define MVP_CONTROL1__MVP_RATE_CONTROL__SHIFT 0x0000000c
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_A_MASK 0x40000000L
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_A__SHIFT 0x0000001e
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_B_MASK 0x80000000L
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_B__SHIFT 0x0000001f
+#define MVP_CONTROL2__MVP_DVOCNTL_MUX_MASK 0x00010000L
+#define MVP_CONTROL2__MVP_DVOCNTL_MUX__SHIFT 0x00000010
+#define MVP_CONTROL2__MVP_FLOW_CONTROL_OUT_EN_MASK 0x00100000L
+#define MVP_CONTROL2__MVP_FLOW_CONTROL_OUT_EN__SHIFT 0x00000014
+#define MVP_CONTROL2__MVP_MUXA_CLK_SEL_MASK 0x00000100L
+#define MVP_CONTROL2__MVP_MUXA_CLK_SEL__SHIFT 0x00000008
+#define MVP_CONTROL2__MVP_MUXB_CLK_SEL_MASK 0x00001000L
+#define MVP_CONTROL2__MVP_MUXB_CLK_SEL__SHIFT 0x0000000c
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL0_SEL_MASK 0x00000001L
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL0_SEL__SHIFT 0x00000000
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL2_SEL_MASK 0x00000010L
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL2_SEL__SHIFT 0x00000004
+#define MVP_CONTROL2__MVP_SWAP_AB_IN_DC_DDR_MASK 0x10000000L
+#define MVP_CONTROL2__MVP_SWAP_AB_IN_DC_DDR__SHIFT 0x0000001c
+#define MVP_CONTROL2__MVP_SWAP_LOCK_OUT_EN_MASK 0x01000000L
+#define MVP_CONTROL2__MVP_SWAP_LOCK_OUT_EN__SHIFT 0x00000018
+#define MVP_CONTROL3__MVP_DDR_SC_AB_SEL_MASK 0x00000010L
+#define MVP_CONTROL3__MVP_DDR_SC_AB_SEL__SHIFT 0x00000004
+#define MVP_CONTROL3__MVP_DDR_SC_B_START_MODE_MASK 0x00000100L
+#define MVP_CONTROL3__MVP_DDR_SC_B_START_MODE__SHIFT 0x00000008
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_CASCADE_EN_MASK 0x00100000L
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_CASCADE_EN__SHIFT 0x00000014
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_IN_CAP_MASK 0x10000000L
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_IN_CAP__SHIFT 0x0000001c
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ONE_MASK 0x00001000L
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ONE__SHIFT 0x0000000c
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ZERO_MASK 0x00010000L
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ZERO__SHIFT 0x00000010
+#define MVP_CONTROL3__MVP_RESET_IN_BETWEEN_FRAMES_MASK 0x00000001L
+#define MVP_CONTROL3__MVP_RESET_IN_BETWEEN_FRAMES__SHIFT 0x00000000
+#define MVP_CONTROL3__MVP_SWAP_48BIT_EN_MASK 0x01000000L
+#define MVP_CONTROL3__MVP_SWAP_48BIT_EN__SHIFT 0x00000018
+#define MVP_CRC_CNTL__MVP_CRC_BLUE_MASK_MASK 0x000000ffL
+#define MVP_CRC_CNTL__MVP_CRC_BLUE_MASK__SHIFT 0x00000000
+#define MVP_CRC_CNTL__MVP_CRC_CONT_EN_MASK 0x20000000L
+#define MVP_CRC_CNTL__MVP_CRC_CONT_EN__SHIFT 0x0000001d
+#define MVP_CRC_CNTL__MVP_CRC_EN_MASK 0x10000000L
+#define MVP_CRC_CNTL__MVP_CRC_EN__SHIFT 0x0000001c
+#define MVP_CRC_CNTL__MVP_CRC_GREEN_MASK_MASK 0x0000ff00L
+#define MVP_CRC_CNTL__MVP_CRC_GREEN_MASK__SHIFT 0x00000008
+#define MVP_CRC_CNTL__MVP_CRC_RED_MASK_MASK 0x00ff0000L
+#define MVP_CRC_CNTL__MVP_CRC_RED_MASK__SHIFT 0x00000010
+#define MVP_CRC_CNTL__MVP_DC_DDR_CRC_EVEN_ODD_PIX_SEL_MASK 0x40000000L
+#define MVP_CRC_CNTL__MVP_DC_DDR_CRC_EVEN_ODD_PIX_SEL__SHIFT 0x0000001e
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_BLUE_RESULT_MASK 0x0000ffffL
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_BLUE_RESULT__SHIFT 0x00000000
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_GREEN_RESULT_MASK 0xffff0000L
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_GREEN_RESULT__SHIFT 0x00000010
+#define MVP_CRC_RESULT_RED__MVP_CRC_RED_RESULT_MASK 0x0000ffffL
+#define MVP_CRC_RESULT_RED__MVP_CRC_RED_RESULT__SHIFT 0x00000000
+#define MVP_DEBUG_05__IDE0_MVP_GPU_CHAIN_LOCATION_MASK 0x00000006L
+#define MVP_DEBUG_05__IDE0_MVP_GPU_CHAIN_LOCATION__SHIFT 0x00000001
+#define MVP_DEBUG_09__IDE4_CRTC2_MVP_GPU_CHAIN_LOCATION_MASK 0x00000006L
+#define MVP_DEBUG_09__IDE4_CRTC2_MVP_GPU_CHAIN_LOCATION__SHIFT 0x00000001
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_H_MASK 0x00000001L
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_H__SHIFT 0x00000000
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_MASK 0x01fffffeL
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A__SHIFT 0x00000001
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_H_MASK 0x00000001L
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_H__SHIFT 0x00000000
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_MASK 0x01fffffeL
+#define MVP_DEBUG_13__IDED_MVP_DATA_B__SHIFT 0x00000001
+#define MVP_DEBUG_13__IDED_READ_FIFO_ENTRY_DE_B_MASK 0x04000000L
+#define MVP_DEBUG_13__IDED_READ_FIFO_ENTRY_DE_B__SHIFT 0x0000001a
+#define MVP_DEBUG_13__IDED_START_READ_B_MASK 0x02000000L
+#define MVP_DEBUG_13__IDED_START_READ_B__SHIFT 0x00000019
+#define MVP_DEBUG_13__IDED_WRITE_ADD_B_MASK 0x38000000L
+#define MVP_DEBUG_13__IDED_WRITE_ADD_B__SHIFT 0x0000001b
+#define MVP_DEBUG_14__IDEE_CRC_PHASE_MASK 0x00100000L
+#define MVP_DEBUG_14__IDEE_CRC_PHASE__SHIFT 0x00000014
+#define MVP_DEBUG_14__IDEE_CRTC1_CNTL_CAPTURE_START_A_MASK 0x00080000L
+#define MVP_DEBUG_14__IDEE_CRTC1_CNTL_CAPTURE_START_A__SHIFT 0x00000013
+#define MVP_DEBUG_14__IDEE_READ_ADD_MASK 0x00000007L
+#define MVP_DEBUG_14__IDEE_READ_ADD__SHIFT 0x00000000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_B_MASK 0x00020000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_B__SHIFT 0x00000011
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_MASK 0x00010000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE__SHIFT 0x00000010
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENABLE_MASK 0x00040000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENABLE__SHIFT 0x00000012
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_B_MASK 0x00008000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_B__SHIFT 0x0000000f
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_MASK 0x00004000L
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE__SHIFT 0x0000000e
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_A_MASK 0x00000800L
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_A__SHIFT 0x0000000b
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_B_MASK 0x00001000L
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_B__SHIFT 0x0000000c
+#define MVP_DEBUG_14__IDEE_START_READ_B_MASK 0x00000400L
+#define MVP_DEBUG_14__IDEE_START_READ_B__SHIFT 0x0000000a
+#define MVP_DEBUG_14__IDEE_START_READ_MASK 0x00000200L
+#define MVP_DEBUG_14__IDEE_START_READ__SHIFT 0x00000009
+#define MVP_DEBUG_14__IDEE_WRITE2FIFO_MASK 0x00002000L
+#define MVP_DEBUG_14__IDEE_WRITE2FIFO__SHIFT 0x0000000d
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_A_MASK 0x00000038L
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_A__SHIFT 0x00000003
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_B_MASK 0x000001c0L
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_B__SHIFT 0x00000006
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WDATA_MASK 0xfffffff0L
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WDATA__SHIFT 0x00000004
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WEN_MASK 0x00000001L
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WEN__SHIFT 0x00000000
+#define MVP_DEBUG_16__IDCC_FLOW_CONTROL_OUT_MASK 0x00000008L
+#define MVP_DEBUG_16__IDCC_FLOW_CONTROL_OUT__SHIFT 0x00000003
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_PAUSE_LEVEL_MASK 0x00000004L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_PAUSE_LEVEL__SHIFT 0x00000002
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_STOP_LEVEL_MASK 0x00000002L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_STOP_LEVEL__SHIFT 0x00000001
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_NUM_ENTRIES_MASK 0x00000ff0L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_NUM_ENTRIES__SHIFT 0x00000004
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_OVERFLOW_MASK 0x00001000L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_OVERFLOW__SHIFT 0x0000000c
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_READ_MASK 0x00000001L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_READ__SHIFT 0x00000000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_UNDERFLOW_MASK 0x00002000L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_UNDERFLOW__SHIFT 0x0000000d
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_READ_ADDR_MASK 0x00ff0000L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_READ_ADDR__SHIFT 0x00000010
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_WRITE_ADDR_MASK 0xff000000L
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_WRITE_ADDR__SHIFT 0x00000018
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_PHASE_MASK 0x00000002L
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_PHASE__SHIFT 0x00000001
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_DATA_MASK 0xfffffffcL
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_DATA__SHIFT 0x00000002
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_MASK 0x00000001L
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ__SHIFT 0x00000000
+#define MVP_DEBUG__MVP_DEBUG_BITS_MASK 0xffffff00L
+#define MVP_DEBUG__MVP_DEBUG_BITS__SHIFT 0x00000008
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_AUTO_VSYNC_FLIP_MASK 0x00000020L
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_AUTO_VSYNC_FLIP__SHIFT 0x00000005
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_MANUAL_HSYNC_FLIP_MASK 0x00000010L
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_MANUAL_HSYNC_FLIP__SHIFT 0x00000004
+#define MVP_DEBUG__MVP_DIS_READ_POINTER_RESET_DELAY_MASK 0x00000080L
+#define MVP_DEBUG__MVP_DIS_READ_POINTER_RESET_DELAY__SHIFT 0x00000007
+#define MVP_DEBUG__MVP_EN_FIX_AFR_MANUAL_SWITCH_IN_SFR_MASK 0x00000040L
+#define MVP_DEBUG__MVP_EN_FIX_AFR_MANUAL_SWITCH_IN_SFR__SHIFT 0x00000006
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_EN_MASK 0x00000002L
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_EN__SHIFT 0x00000001
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_SEL_MASK 0x00000008L
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_SEL__SHIFT 0x00000003
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_EN_MASK 0x00000001L
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_EN__SHIFT 0x00000000
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_SEL_MASK 0x00000004L
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_SEL__SHIFT 0x00000002
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_CNT_MASK 0x00ff0000L
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_CNT__SHIFT 0x00000010
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_WM_MASK 0x0000ff00L
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_WM__SHIFT 0x00000008
+#define MVP_FIFO_CONTROL__MVP_STOP_SLAVE_WM_MASK 0x000000ffL
+#define MVP_FIFO_CONTROL__MVP_STOP_SLAVE_WM__SHIFT 0x00000000
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_INT_STATUS_MASK 0x80000000L
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_INT_STATUS__SHIFT 0x0000001f
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_MASK_MASK 0x40000000L
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_MASK__SHIFT 0x0000001e
+#define MVP_FIFO_STATUS__MVP_FIFO_LEVEL_MASK 0x000000ffL
+#define MVP_FIFO_STATUS__MVP_FIFO_LEVEL__SHIFT 0x00000000
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_ACK_MASK 0x00010000L
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_ACK__SHIFT 0x00000010
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_MASK 0x00000100L
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_OCCURRED_MASK 0x00001000L
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_OCCURRED__SHIFT 0x0000000c
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW__SHIFT 0x00000008
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_ACK_MASK 0x10000000L
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_ACK__SHIFT 0x0000001c
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_MASK 0x00100000L
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_OCCURRED_MASK 0x01000000L
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_OCCURRED__SHIFT 0x00000018
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW__SHIFT 0x00000014
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_AUTO_ENABLE_MASK 0x40000000L
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_AUTO_ENABLE__SHIFT 0x0000001e
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MASK 0x007fff00L
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MODE_MASK 0x00000003L
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MODE__SHIFT 0x00000000
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT__SHIFT 0x00000008
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_OFFSET_MASK 0x3f000000L
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_OFFSET__SHIFT 0x00000018
+#define MVP_INBAND_CNTL_CAP__MVP_IGNOR_INBAND_CNTL_MASK 0x00000001L
+#define MVP_INBAND_CNTL_CAP__MVP_IGNOR_INBAND_CNTL__SHIFT 0x00000000
+#define MVP_INBAND_CNTL_CAP__MVP_INBAND_CNTL_CHAR_CAP_MASK 0xffffff00L
+#define MVP_INBAND_CNTL_CAP__MVP_INBAND_CNTL_CHAR_CAP__SHIFT 0x00000008
+#define MVP_INBAND_CNTL_CAP__MVP_PASSING_INBAND_CNTL_EN_MASK 0x00000010L
+#define MVP_INBAND_CNTL_CAP__MVP_PASSING_INBAND_CNTL_EN__SHIFT 0x00000004
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_DATA_CHK_EN_MASK 0x80000000L
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_DATA_CHK_EN__SHIFT 0x0000001f
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_LINE_ERROR_CNT_MASK 0x1fff0000L
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_LINE_ERROR_CNT__SHIFT 0x00000010
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_PIXEL_ERROR_CNT_MASK 0x00001fffL
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_PIXEL_ERROR_CNT__SHIFT 0x00000000
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_MASK 0x00001fffL
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_RESET_MASK 0x80000000L
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_RESET__SHIFT 0x0000001f
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT__SHIFT 0x00000000
+#define MVP_SLAVE_STATUS__MVP_SLAVE_LINES_PER_FRAME_RCVED_MASK 0x1fff0000L
+#define MVP_SLAVE_STATUS__MVP_SLAVE_LINES_PER_FRAME_RCVED__SHIFT 0x00000010
+#define MVP_SLAVE_STATUS__MVP_SLAVE_PIXELS_PER_LINE_RCVED_MASK 0x00001fffL
+#define MVP_SLAVE_STATUS__MVP_SLAVE_PIXELS_PER_LINE_RCVED__SHIFT 0x00000000
+#define MVP_TEST_DEBUG_DATA__MVP_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define MVP_TEST_DEBUG_DATA__MVP_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C11_MASK 0x0000ffffL
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C11__SHIFT 0x00000000
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C12_MASK 0xffff0000L
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C12__SHIFT 0x00000010
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C13_MASK 0x0000ffffL
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C13__SHIFT 0x00000000
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C14_MASK 0xffff0000L
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C14__SHIFT 0x00000010
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C21_MASK 0x0000ffffL
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C21__SHIFT 0x00000000
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C22_MASK 0xffff0000L
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C22__SHIFT 0x00000010
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C23_MASK 0x0000ffffL
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C23__SHIFT 0x00000000
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C24_MASK 0xffff0000L
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C24__SHIFT 0x00000010
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C31_MASK 0x0000ffffL
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C31__SHIFT 0x00000000
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C32_MASK 0xffff0000L
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C32__SHIFT 0x00000010
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C33_MASK 0x0000ffffL
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C33__SHIFT 0x00000000
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C34_MASK 0xffff0000L
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C34__SHIFT 0x00000010
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE_MASK 0x00000007L
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT 0x00000000
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE_MASK 0x00000070L
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT 0x00000004
+#define OUT_ROUND_CONTROL__OUT_ROUND_TRUNC_MODE_MASK 0x0000000fL
+#define OUT_ROUND_CONTROL__OUT_ROUND_TRUNC_MODE__SHIFT 0x00000000
+#define OVL_CONTROL1__OVL_ADDRESS_TRANSLATION_ENABLE_MASK 0x00010000L
+#define OVL_CONTROL1__OVL_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x00000010
+#define OVL_CONTROL1__OVL_ARRAY_MODE_MASK 0x00f00000L
+#define OVL_CONTROL1__OVL_ARRAY_MODE__SHIFT 0x00000014
+#define OVL_CONTROL1__OVL_BANK_HEIGHT_MASK 0x00001800L
+#define OVL_CONTROL1__OVL_BANK_HEIGHT__SHIFT 0x0000000b
+#define OVL_CONTROL1__OVL_BANK_WIDTH_MASK 0x000000c0L
+#define OVL_CONTROL1__OVL_BANK_WIDTH__SHIFT 0x00000006
+#define OVL_CONTROL1__OVL_COLOR_EXPANSION_MODE_MASK 0x01000000L
+#define OVL_CONTROL1__OVL_COLOR_EXPANSION_MODE__SHIFT 0x00000018
+#define OVL_CONTROL1__OVL_DEPTH_MASK 0x00000003L
+#define OVL_CONTROL1__OVL_DEPTH__SHIFT 0x00000000
+#define OVL_CONTROL1__OVL_FORMAT_MASK 0x00000700L
+#define OVL_CONTROL1__OVL_FORMAT__SHIFT 0x00000008
+#define OVL_CONTROL1__OVL_MACRO_TILE_ASPECT_MASK 0x000c0000L
+#define OVL_CONTROL1__OVL_MACRO_TILE_ASPECT__SHIFT 0x00000012
+#define OVL_CONTROL1__OVL_NUM_BANKS_MASK 0x0000000cL
+#define OVL_CONTROL1__OVL_NUM_BANKS__SHIFT 0x00000002
+#define OVL_CONTROL1__OVL_PIPE_CONFIG_MASK 0x3e000000L
+#define OVL_CONTROL1__OVL_PIPE_CONFIG__SHIFT 0x00000019
+#define OVL_CONTROL1__OVL_PRIVILEGED_ACCESS_ENABLE_MASK 0x00020000L
+#define OVL_CONTROL1__OVL_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x00000011
+#define OVL_CONTROL1__OVL_TILE_SPLIT_MASK 0x0000e000L
+#define OVL_CONTROL1__OVL_TILE_SPLIT__SHIFT 0x0000000d
+#define OVL_CONTROL1__OVL_Z_MASK 0x00000030L
+#define OVL_CONTROL1__OVL_Z__SHIFT 0x00000004
+#define OVL_CONTROL2__OVL_HALF_RESOLUTION_ENABLE_MASK 0x00000001L
+#define OVL_CONTROL2__OVL_HALF_RESOLUTION_ENABLE__SHIFT 0x00000000
+#define OVL_DFQ_CONTROL__OVL_DFQ_MIN_FREE_ENTRIES_MASK 0x00000700L
+#define OVL_DFQ_CONTROL__OVL_DFQ_MIN_FREE_ENTRIES__SHIFT 0x00000008
+#define OVL_DFQ_CONTROL__OVL_DFQ_RESET_MASK 0x00000001L
+#define OVL_DFQ_CONTROL__OVL_DFQ_RESET__SHIFT 0x00000000
+#define OVL_DFQ_CONTROL__OVL_DFQ_SIZE_MASK 0x00000070L
+#define OVL_DFQ_CONTROL__OVL_DFQ_SIZE__SHIFT 0x00000004
+#define OVL_DFQ_STATUS__OVL_DFQ_NUM_ENTRIES_MASK 0x0000000fL
+#define OVL_DFQ_STATUS__OVL_DFQ_NUM_ENTRIES__SHIFT 0x00000000
+#define OVL_DFQ_STATUS__OVL_DFQ_RESET_ACK_MASK 0x00000200L
+#define OVL_DFQ_STATUS__OVL_DFQ_RESET_ACK__SHIFT 0x00000009
+#define OVL_DFQ_STATUS__OVL_DFQ_RESET_FLAG_MASK 0x00000100L
+#define OVL_DFQ_STATUS__OVL_DFQ_RESET_FLAG__SHIFT 0x00000008
+#define OVL_DFQ_STATUS__OVL_SECONDARY_DFQ_NUM_ENTRIES_MASK 0x000000f0L
+#define OVL_DFQ_STATUS__OVL_SECONDARY_DFQ_NUM_ENTRIES__SHIFT 0x00000004
+#define OVL_ENABLE__OVL_ENABLE_MASK 0x00000001L
+#define OVL_ENABLE__OVL_ENABLE__SHIFT 0x00000000
+#define OVL_ENABLE__OVLSCL_EN_MASK 0x00000100L
+#define OVL_ENABLE__OVLSCL_EN__SHIFT 0x00000008
+#define OVL_END__OVL_X_END_MASK 0x7fff0000L
+#define OVL_END__OVL_X_END__SHIFT 0x00000010
+#define OVL_END__OVL_Y_END_MASK 0x00007fffL
+#define OVL_END__OVL_Y_END__SHIFT 0x00000000
+#define OVL_PITCH__OVL_PITCH_MASK 0x00007fffL
+#define OVL_PITCH__OVL_PITCH__SHIFT 0x00000000
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_BCB_MASK 0x000003ffL
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_BCB__SHIFT 0x00000000
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_GY_MASK 0x000ffc00L
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_GY__SHIFT 0x0000000a
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_RCR_MASK 0x3ff00000L
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_BLACK_COLOR_RCR__SHIFT 0x00000014
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_EDGE_PIXEL_SEL_MASK 0x80000000L
+#define OVLSCL_EDGE_PIXEL_CNTL__OVLSCL_EDGE_PIXEL_SEL__SHIFT 0x0000001f
+#define OVL_SECONDARY_SURFACE_ADDRESS_HIGH__OVL_SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define OVL_SECONDARY_SURFACE_ADDRESS_HIGH__OVL_SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define OVL_SECONDARY_SURFACE_ADDRESS__OVL_SECONDARY_DFQ_ENABLE_MASK 0x00000001L
+#define OVL_SECONDARY_SURFACE_ADDRESS__OVL_SECONDARY_DFQ_ENABLE__SHIFT 0x00000000
+#define OVL_SECONDARY_SURFACE_ADDRESS__OVL_SECONDARY_SURFACE_ADDRESS_MASK 0xffffff00L
+#define OVL_SECONDARY_SURFACE_ADDRESS__OVL_SECONDARY_SURFACE_ADDRESS__SHIFT 0x00000008
+#define OVL_START__OVL_X_START_MASK 0x3fff0000L
+#define OVL_START__OVL_X_START__SHIFT 0x00000010
+#define OVL_START__OVL_Y_START_MASK 0x00003fffL
+#define OVL_START__OVL_Y_START__SHIFT 0x00000000
+#define OVL_STEREOSYNC_FLIP__OVL_PRIMARY_SURFACE_PENDING_MASK 0x00010000L
+#define OVL_STEREOSYNC_FLIP__OVL_PRIMARY_SURFACE_PENDING__SHIFT 0x00000010
+#define OVL_STEREOSYNC_FLIP__OVL_SECONDARY_SURFACE_PENDING_MASK 0x00020000L
+#define OVL_STEREOSYNC_FLIP__OVL_SECONDARY_SURFACE_PENDING__SHIFT 0x00000011
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_FLIP_EN_MASK 0x00000001L
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_FLIP_EN__SHIFT 0x00000000
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_FLIP_MODE_MASK 0x00000300L
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_FLIP_MODE__SHIFT 0x00000008
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_SELECT_DISABLE_MASK 0x10000000L
+#define OVL_STEREOSYNC_FLIP__OVL_STEREOSYNC_SELECT_DISABLE__SHIFT 0x0000001c
+#define OVL_SURFACE_ADDRESS_HIGH_INUSE__OVL_SURFACE_ADDRESS_HIGH_INUSE_MASK 0x000000ffL
+#define OVL_SURFACE_ADDRESS_HIGH_INUSE__OVL_SURFACE_ADDRESS_HIGH_INUSE__SHIFT 0x00000000
+#define OVL_SURFACE_ADDRESS_HIGH__OVL_SURFACE_ADDRESS_HIGH_MASK 0x000000ffL
+#define OVL_SURFACE_ADDRESS_HIGH__OVL_SURFACE_ADDRESS_HIGH__SHIFT 0x00000000
+#define OVL_SURFACE_ADDRESS_INUSE__OVL_SURFACE_ADDRESS_INUSE_MASK 0xffffff00L
+#define OVL_SURFACE_ADDRESS_INUSE__OVL_SURFACE_ADDRESS_INUSE__SHIFT 0x00000008
+#define OVL_SURFACE_ADDRESS__OVL_DFQ_ENABLE_MASK 0x00000001L
+#define OVL_SURFACE_ADDRESS__OVL_DFQ_ENABLE__SHIFT 0x00000000
+#define OVL_SURFACE_ADDRESS__OVL_SURFACE_ADDRESS_MASK 0xffffff00L
+#define OVL_SURFACE_ADDRESS__OVL_SURFACE_ADDRESS__SHIFT 0x00000008
+#define OVL_SURFACE_OFFSET_X__OVL_SURFACE_OFFSET_X_MASK 0x00003fffL
+#define OVL_SURFACE_OFFSET_X__OVL_SURFACE_OFFSET_X__SHIFT 0x00000000
+#define OVL_SURFACE_OFFSET_Y__OVL_SURFACE_OFFSET_Y_MASK 0x00003fffL
+#define OVL_SURFACE_OFFSET_Y__OVL_SURFACE_OFFSET_Y__SHIFT 0x00000000
+#define OVL_SWAP_CNTL__OVL_ALPHA_CROSSBAR_MASK 0x00000c00L
+#define OVL_SWAP_CNTL__OVL_ALPHA_CROSSBAR__SHIFT 0x0000000a
+#define OVL_SWAP_CNTL__OVL_BLUE_CROSSBAR_MASK 0x00000300L
+#define OVL_SWAP_CNTL__OVL_BLUE_CROSSBAR__SHIFT 0x00000008
+#define OVL_SWAP_CNTL__OVL_ENDIAN_SWAP_MASK 0x00000003L
+#define OVL_SWAP_CNTL__OVL_ENDIAN_SWAP__SHIFT 0x00000000
+#define OVL_SWAP_CNTL__OVL_GREEN_CROSSBAR_MASK 0x000000c0L
+#define OVL_SWAP_CNTL__OVL_GREEN_CROSSBAR__SHIFT 0x00000006
+#define OVL_SWAP_CNTL__OVL_RED_CROSSBAR_MASK 0x00000030L
+#define OVL_SWAP_CNTL__OVL_RED_CROSSBAR__SHIFT 0x00000004
+#define OVL_UPDATE__OVL_DISABLE_MULTIPLE_UPDATE_MASK 0x01000000L
+#define OVL_UPDATE__OVL_DISABLE_MULTIPLE_UPDATE__SHIFT 0x00000018
+#define OVL_UPDATE__OVL_UPDATE_LOCK_MASK 0x00010000L
+#define OVL_UPDATE__OVL_UPDATE_LOCK__SHIFT 0x00000010
+#define OVL_UPDATE__OVL_UPDATE_PENDING_MASK 0x00000001L
+#define OVL_UPDATE__OVL_UPDATE_PENDING__SHIFT 0x00000000
+#define OVL_UPDATE__OVL_UPDATE_TAKEN_MASK 0x00000002L
+#define OVL_UPDATE__OVL_UPDATE_TAKEN__SHIFT 0x00000001
+#define PHY_AUX_CNTL__AUX_PAD_RXSEL_MASK 0x00010000L
+#define PHY_AUX_CNTL__AUX_PAD_RXSEL__SHIFT 0x00000010
+#define PHY_AUX_CNTL__AUX_PAD_SLEWN_MASK 0x00001000L
+#define PHY_AUX_CNTL__AUX_PAD_SLEWN__SHIFT 0x0000000c
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x00004000L
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0x0000000e
+#define PIPE0_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE0_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE0_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE0_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE0_PG_CONFIG__PIPE0_POWER_FORCEON_MASK 0x00000001L
+#define PIPE0_PG_CONFIG__PIPE0_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE0_PG_ENABLE__PIPE0_POWER_GATE_MASK 0x00000001L
+#define PIPE0_PG_ENABLE__PIPE0_POWER_GATE__SHIFT 0x00000000
+#define PIPE0_PG_STATUS__PIPE0_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE0_PG_STATUS__PIPE0_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE0_PG_STATUS__PIPE0_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE0_PG_STATUS__PIPE0_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE1_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE1_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE1_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE1_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE1_PG_CONFIG__PIPE1_POWER_FORCEON_MASK 0x00000001L
+#define PIPE1_PG_CONFIG__PIPE1_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE1_PG_ENABLE__PIPE1_POWER_GATE_MASK 0x00000001L
+#define PIPE1_PG_ENABLE__PIPE1_POWER_GATE__SHIFT 0x00000000
+#define PIPE1_PG_STATUS__PIPE1_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE1_PG_STATUS__PIPE1_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE1_PG_STATUS__PIPE1_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE1_PG_STATUS__PIPE1_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE2_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE2_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE2_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE2_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE2_PG_CONFIG__PIPE2_POWER_FORCEON_MASK 0x00000001L
+#define PIPE2_PG_CONFIG__PIPE2_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE2_PG_ENABLE__PIPE2_POWER_GATE_MASK 0x00000001L
+#define PIPE2_PG_ENABLE__PIPE2_POWER_GATE__SHIFT 0x00000000
+#define PIPE2_PG_STATUS__PIPE2_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE2_PG_STATUS__PIPE2_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE2_PG_STATUS__PIPE2_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE2_PG_STATUS__PIPE2_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE3_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE3_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE3_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE3_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE3_PG_CONFIG__PIPE3_POWER_FORCEON_MASK 0x00000001L
+#define PIPE3_PG_CONFIG__PIPE3_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE3_PG_ENABLE__PIPE3_POWER_GATE_MASK 0x00000001L
+#define PIPE3_PG_ENABLE__PIPE3_POWER_GATE__SHIFT 0x00000000
+#define PIPE3_PG_STATUS__PIPE3_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE3_PG_STATUS__PIPE3_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE3_PG_STATUS__PIPE3_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE3_PG_STATUS__PIPE3_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE4_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE4_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE4_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE4_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE4_PG_CONFIG__PIPE4_POWER_FORCEON_MASK 0x00000001L
+#define PIPE4_PG_CONFIG__PIPE4_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE4_PG_ENABLE__PIPE4_POWER_GATE_MASK 0x00000001L
+#define PIPE4_PG_ENABLE__PIPE4_POWER_GATE__SHIFT 0x00000000
+#define PIPE4_PG_STATUS__PIPE4_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE4_PG_STATUS__PIPE4_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE4_PG_STATUS__PIPE4_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE4_PG_STATUS__PIPE4_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIPE5_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0x0000ffffL
+#define PIPE5_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x00000000
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x00000007L
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x00000000
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x00000010L
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x00000004
+#define PIPE5_MAX_REQUESTS__MAX_REQUESTS_MASK 0x000003ffL
+#define PIPE5_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x00000000
+#define PIPE5_PG_CONFIG__PIPE5_POWER_FORCEON_MASK 0x00000001L
+#define PIPE5_PG_CONFIG__PIPE5_POWER_FORCEON__SHIFT 0x00000000
+#define PIPE5_PG_ENABLE__PIPE5_POWER_GATE_MASK 0x00000001L
+#define PIPE5_PG_ENABLE__PIPE5_POWER_GATE__SHIFT 0x00000000
+#define PIPE5_PG_STATUS__PIPE5_DESIRED_PWR_STATE_MASK 0x10000000L
+#define PIPE5_PG_STATUS__PIPE5_DESIRED_PWR_STATE__SHIFT 0x0000001c
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_PWR_STATUS_MASK 0xc0000000L
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_PWR_STATUS__SHIFT 0x0000001e
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_READ_DATA_MASK 0x00ffffffL
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_READ_DATA__SHIFT 0x00000000
+#define PIPE5_PG_STATUS__PIPE5_REQUESTED_PWR_STATE_MASK 0x20000000L
+#define PIPE5_PG_STATUS__PIPE5_REQUESTED_PWR_STATE__SHIFT 0x0000001d
+#define PIXCLK0_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL0_MASK 0x00000030L
+#define PIXCLK0_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL0__SHIFT 0x00000004
+#define PIXCLK0_RESYNC_CNTL__PIXCLK0_RESYNC_ENABLE_MASK 0x00000001L
+#define PIXCLK0_RESYNC_CNTL__PIXCLK0_RESYNC_ENABLE__SHIFT 0x00000000
+#define PIXCLK1_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL1_MASK 0x00000030L
+#define PIXCLK1_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL1__SHIFT 0x00000004
+#define PIXCLK1_RESYNC_CNTL__PIXCLK1_RESYNC_ENABLE_MASK 0x00000001L
+#define PIXCLK1_RESYNC_CNTL__PIXCLK1_RESYNC_ENABLE__SHIFT 0x00000000
+#define PIXCLK2_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL2_MASK 0x00000030L
+#define PIXCLK2_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL2__SHIFT 0x00000004
+#define PIXCLK2_RESYNC_CNTL__PIXCLK2_RESYNC_ENABLE_MASK 0x00000001L
+#define PIXCLK2_RESYNC_CNTL__PIXCLK2_RESYNC_ENABLE__SHIFT 0x00000000
+#define PLL_ANALOG__PLL_CAL_MODE_MASK 0x0000001fL
+#define PLL_ANALOG__PLL_CAL_MODE__SHIFT 0x00000000
+#define PLL_ANALOG__PLL_CP_MASK 0x00000f00L
+#define PLL_ANALOG__PLL_CP__SHIFT 0x00000008
+#define PLL_ANALOG__PLL_IBIAS_MASK 0xff000000L
+#define PLL_ANALOG__PLL_IBIAS__SHIFT 0x00000018
+#define PLL_ANALOG__PLL_LF_MODE_MASK 0x001ff000L
+#define PLL_ANALOG__PLL_LF_MODE__SHIFT 0x0000000c
+#define PLL_ANALOG__PLL_PFD_PULSE_SEL_MASK 0x00000060L
+#define PLL_ANALOG__PLL_PFD_PULSE_SEL__SHIFT 0x00000005
+#define PLL_CNTL__PLL_ANTIGLITCH_RESETB_MASK 0x00000080L
+#define PLL_CNTL__PLL_ANTIGLITCH_RESETB__SHIFT 0x00000007
+#define PLL_CNTL__PLL_ANTI_GLITCH_RESET_MASK 0x00002000L
+#define PLL_CNTL__PLL_ANTI_GLITCH_RESET__SHIFT 0x0000000d
+#define PLL_CNTL__PLL_BYPASS_CAL_MASK 0x00000004L
+#define PLL_CNTL__PLL_BYPASS_CAL__SHIFT 0x00000002
+#define PLL_CNTL__PLL_CAL_BYPASS_REFDIV_MASK 0x00000400L
+#define PLL_CNTL__PLL_CAL_BYPASS_REFDIV__SHIFT 0x0000000a
+#define PLL_CNTL__PLL_CALIB_DONE_MASK 0x00100000L
+#define PLL_CNTL__PLL_CALIB_DONE__SHIFT 0x00000014
+#define PLL_CNTL__PLL_CALREF_MASK 0x00000300L
+#define PLL_CNTL__PLL_CALREF__SHIFT 0x00000008
+#define PLL_CNTL__PLL_DIG_SPARE_MASK 0xfc000000L
+#define PLL_CNTL__PLL_DIG_SPARE__SHIFT 0x0000001a
+#define PLL_CNTL__PLL_LOCKED_MASK 0x00200000L
+#define PLL_CNTL__PLL_LOCKED__SHIFT 0x00000015
+#define PLL_CNTL__PLL_LOCK_FREQ_SEL_MASK 0x00080000L
+#define PLL_CNTL__PLL_LOCK_FREQ_SEL__SHIFT 0x00000013
+#define PLL_CNTL__PLL_PCIE_REFCLK_SEL_MASK 0x00000040L
+#define PLL_CNTL__PLL_PCIE_REFCLK_SEL__SHIFT 0x00000006
+#define PLL_CNTL__PLL_POST_DIV_SRC_MASK 0x00000008L
+#define PLL_CNTL__PLL_POST_DIV_SRC__SHIFT 0x00000003
+#define PLL_CNTL__PLL_POWER_DOWN_MASK 0x00000002L
+#define PLL_CNTL__PLL_POWER_DOWN__SHIFT 0x00000001
+#define PLL_CNTL__PLL_REFCLK_SEL_MASK 0x00001800L
+#define PLL_CNTL__PLL_REFCLK_SEL__SHIFT 0x0000000b
+#define PLL_CNTL__PLL_REF_DIV_SRC_MASK 0x00070000L
+#define PLL_CNTL__PLL_REF_DIV_SRC__SHIFT 0x00000010
+#define PLL_CNTL__PLL_RESET_MASK 0x00000001L
+#define PLL_CNTL__PLL_RESET__SHIFT 0x00000000
+#define PLL_CNTL__PLL_TIMING_MODE_STATUS_MASK 0x03000000L
+#define PLL_CNTL__PLL_TIMING_MODE_STATUS__SHIFT 0x00000018
+#define PLL_CNTL__PLL_VCOREF_MASK 0x00000030L
+#define PLL_CNTL__PLL_VCOREF__SHIFT 0x00000004
+#define PLL_DEBUG_CNTL__PLL_DEBUG_CLK_SEL_MASK 0x00000f00L
+#define PLL_DEBUG_CNTL__PLL_DEBUG_CLK_SEL__SHIFT 0x00000008
+#define PLL_DEBUG_CNTL__PLL_DEBUG_MUXOUT_SEL_MASK 0x000000f0L
+#define PLL_DEBUG_CNTL__PLL_DEBUG_MUXOUT_SEL__SHIFT 0x00000004
+#define PLL_DEBUG_CNTL__PLL_DEBUG_SIGNALS_ENABLE_MASK 0x00000001L
+#define PLL_DEBUG_CNTL__PLL_DEBUG_SIGNALS_ENABLE__SHIFT 0x00000000
+#define PLL_DISPCLK_CURRENT_DTO_PHASE__PLL_DISPCLK_CURRENT_DTO_PHASE_MASK 0x000001ffL
+#define PLL_DISPCLK_CURRENT_DTO_PHASE__PLL_DISPCLK_CURRENT_DTO_PHASE__SHIFT 0x00000000
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_COMPL_DELAY_MASK 0xff000000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_COMPL_DELAY__SHIFT 0x00000018
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_DIS_MASK 0x00010000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_DIS__SHIFT 0x00000010
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_PHASE_MASK 0x000001ffL
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_PHASE__SHIFT 0x00000000
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_ACK_MASK 0x00400000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_ACK__SHIFT 0x00000016
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_MODE_MASK 0x00060000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_MODE__SHIFT 0x00000011
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_PENDING_MASK 0x00100000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_PENDING__SHIFT 0x00000014
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_REQ_MASK 0x00200000L
+#define PLL_DISPCLK_DTO_CNTL__PLL_DISPCLK_DTO_UPDATE_REQ__SHIFT 0x00000015
+#define PLL_DS_CNTL__PLL_DS_FRAC_MASK 0x0000ffffL
+#define PLL_DS_CNTL__PLL_DS_FRAC__SHIFT 0x00000000
+#define PLL_DS_CNTL__PLL_DS_MODE_MASK 0x00040000L
+#define PLL_DS_CNTL__PLL_DS_MODE__SHIFT 0x00000012
+#define PLL_DS_CNTL__PLL_DS_ORDER_MASK 0x00030000L
+#define PLL_DS_CNTL__PLL_DS_ORDER__SHIFT 0x00000010
+#define PLL_DS_CNTL__PLL_DS_PRBS_EN_MASK 0x00080000L
+#define PLL_DS_CNTL__PLL_DS_PRBS_EN__SHIFT 0x00000013
+#define PLL_FB_DIV__PLL_FB_DIV_FRACTION_CNTL_MASK 0x00000030L
+#define PLL_FB_DIV__PLL_FB_DIV_FRACTION_CNTL__SHIFT 0x00000004
+#define PLL_FB_DIV__PLL_FB_DIV_FRACTION_MASK 0x0000000fL
+#define PLL_FB_DIV__PLL_FB_DIV_FRACTION__SHIFT 0x00000000
+#define PLL_FB_DIV__PLL_FB_DIV_MASK 0x0fff0000L
+#define PLL_FB_DIV__PLL_FB_DIV__SHIFT 0x00000010
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_MASK 0x000f0000L
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_RESET_MASK 0x00000100L
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_RESET__SHIFT 0x00000008
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_SELECT_MASK 0x00001000L
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV_SELECT__SHIFT 0x0000000c
+#define PLL_IDCLK_CNTL__PLL_DIFF_POST_DIV__SHIFT 0x00000010
+#define PLL_IDCLK_CNTL__PLL_LTDP_IDCLK_DIFF_EN_MASK 0x00000002L
+#define PLL_IDCLK_CNTL__PLL_LTDP_IDCLK_DIFF_EN__SHIFT 0x00000001
+#define PLL_IDCLK_CNTL__PLL_LTDP_IDCLK_EN_MASK 0x00000001L
+#define PLL_IDCLK_CNTL__PLL_LTDP_IDCLK_EN__SHIFT 0x00000000
+#define PLL_IDCLK_CNTL__PLL_TMDP_IDCLK_DIFF_EN_MASK 0x00000008L
+#define PLL_IDCLK_CNTL__PLL_TMDP_IDCLK_DIFF_EN__SHIFT 0x00000003
+#define PLL_IDCLK_CNTL__PLL_TMDP_IDCLK_EN_MASK 0x00000004L
+#define PLL_IDCLK_CNTL__PLL_TMDP_IDCLK_EN__SHIFT 0x00000002
+#define PLL_IDCLK_CNTL__PLL_UNIPHY_IDCLK_DIFF_EN_MASK 0x00000010L
+#define PLL_IDCLK_CNTL__PLL_UNIPHY_IDCLK_DIFF_EN__SHIFT 0x00000004
+#define PLL_POST_DIV__PLL_POST_DIV1P5_DISPCLK_MASK 0x00000080L
+#define PLL_POST_DIV__PLL_POST_DIV1P5_DISPCLK__SHIFT 0x00000007
+#define PLL_POST_DIV__PLL_POST_DIV1P5_DPREFCLK_MASK 0x00008000L
+#define PLL_POST_DIV__PLL_POST_DIV1P5_DPREFCLK__SHIFT 0x0000000f
+#define PLL_POST_DIV__PLL_POST_DIV_DVOCLK_MASK 0x00007f00L
+#define PLL_POST_DIV__PLL_POST_DIV_DVOCLK__SHIFT 0x00000008
+#define PLL_POST_DIV__PLL_POST_DIV_IDCLK_MASK 0x007f0000L
+#define PLL_POST_DIV__PLL_POST_DIV_IDCLK__SHIFT 0x00000010
+#define PLL_POST_DIV__PLL_POST_DIV_PIXCLK_MASK 0x0000007fL
+#define PLL_POST_DIV__PLL_POST_DIV_PIXCLK__SHIFT 0x00000000
+#define PLL_REF_DIV__PLL_CALIBRATION_REF_DIV_MASK 0x0000f000L
+#define PLL_REF_DIV__PLL_CALIBRATION_REF_DIV__SHIFT 0x0000000c
+#define PLL_REF_DIV__PLL_REF_DIV_MASK 0x000003ffL
+#define PLL_REF_DIV__PLL_REF_DIV__SHIFT 0x00000000
+#define PLL_SS_AMOUNT_DSFRAC__PLL_SS_AMOUNT_DSFRAC_MASK 0x0000ffffL
+#define PLL_SS_AMOUNT_DSFRAC__PLL_SS_AMOUNT_DSFRAC__SHIFT 0x00000000
+#define PLL_SS_CNTL__PLL_SS_AMOUNT_FBDIV_MASK 0x000000ffL
+#define PLL_SS_CNTL__PLL_SS_AMOUNT_FBDIV__SHIFT 0x00000000
+#define PLL_SS_CNTL__PLL_SS_AMOUNT_NFRAC_SLIP_MASK 0x00000f00L
+#define PLL_SS_CNTL__PLL_SS_AMOUNT_NFRAC_SLIP__SHIFT 0x00000008
+#define PLL_SS_CNTL__PLL_SS_EN_MASK 0x00001000L
+#define PLL_SS_CNTL__PLL_SS_EN__SHIFT 0x0000000c
+#define PLL_SS_CNTL__PLL_SS_MODE_MASK 0x00002000L
+#define PLL_SS_CNTL__PLL_SS_MODE__SHIFT 0x0000000d
+#define PLL_SS_CNTL__PLL_SS_STEP_SIZE_DSFRAC_MASK 0xffff0000L
+#define PLL_SS_CNTL__PLL_SS_STEP_SIZE_DSFRAC__SHIFT 0x00000010
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DET_COUNT_MASK 0x00000070L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DET_COUNT__SHIFT 0x00000004
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DETECT_ENABLE_MASK 0x00000001L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DETECT_ENABLE__SHIFT 0x00000000
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DET_RES100_SELECT_MASK 0x00000002L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_DET_RES100_SELECT__SHIFT 0x00000001
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_STICKY_CLEAR_MASK 0x00000008L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_STICKY_CLEAR__SHIFT 0x00000003
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_STICKY_STATUS_MASK 0x00000004L
+#define PLL_UNLOCK_DETECT_CNTL__PLL_UNLOCK_STICKY_STATUS__SHIFT 0x00000002
+#define PLL_UPDATE_CNTL__PLL_AUTO_RESET_DISABLE_MASK 0x00010000L
+#define PLL_UPDATE_CNTL__PLL_AUTO_RESET_DISABLE__SHIFT 0x00000010
+#define PLL_UPDATE_CNTL__PLL_UPDATE_PENDING_MASK 0x00000001L
+#define PLL_UPDATE_CNTL__PLL_UPDATE_PENDING__SHIFT 0x00000000
+#define PLL_UPDATE_CNTL__PLL_UPDATE_POINT_MASK 0x00000100L
+#define PLL_UPDATE_CNTL__PLL_UPDATE_POINT__SHIFT 0x00000008
+#define PLL_UPDATE_LOCK__PLL_UPDATE_LOCK_MASK 0x00000001L
+#define PLL_UPDATE_LOCK__PLL_UPDATE_LOCK__SHIFT 0x00000000
+#define PLL_VREG_CNTL__PLL_VREF_SEL_MASK 0x04000000L
+#define PLL_VREG_CNTL__PLL_VREF_SEL__SHIFT 0x0000001a
+#define PLL_VREG_CNTL__PLL_VREG_BIAS_MASK 0xf0000000L
+#define PLL_VREG_CNTL__PLL_VREG_BIAS__SHIFT 0x0000001c
+#define PLL_VREG_CNTL__PLL_VREG_CNTL_MASK 0x000fffffL
+#define PLL_VREG_CNTL__PLL_VREG_CNTL__SHIFT 0x00000000
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_B_SIGN_MASK 0x00000008L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_B_SIGN__SHIFT 0x00000003
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK 0x00000010L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS__SHIFT 0x00000004
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_G_SIGN_MASK 0x00000004L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_G_SIGN__SHIFT 0x00000002
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_R_SIGN_MASK 0x00000002L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_R_SIGN__SHIFT 0x00000001
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_SELECT_MASK 0x00000001L
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_SELECT__SHIFT 0x00000000
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK 0x00000010L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS__SHIFT 0x00000004
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_CB_SIGN_MASK 0x00000002L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_CB_SIGN__SHIFT 0x00000001
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_CR_SIGN_MASK 0x00000008L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_CR_SIGN__SHIFT 0x00000003
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_SELECT_MASK 0x00000001L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_SELECT__SHIFT 0x00000000
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_Y_SIGN_MASK 0x00000004L
+#define PRESCALE_OVL_CONTROL__OVL_PRESCALE_Y_SIGN__SHIFT 0x00000002
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_BIAS_B_MASK 0x0000ffffL
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_BIAS_B__SHIFT 0x00000000
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_SCALE_B_MASK 0xffff0000L
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_SCALE_B__SHIFT 0x00000010
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_BIAS_G_MASK 0x0000ffffL
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_BIAS_G__SHIFT 0x00000000
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_SCALE_G_MASK 0xffff0000L
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_SCALE_G__SHIFT 0x00000010
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_BIAS_R_MASK 0x0000ffffL
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_BIAS_R__SHIFT 0x00000000
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_SCALE_R_MASK 0xffff0000L
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_SCALE_R__SHIFT 0x00000010
+#define PRESCALE_VALUES_OVL_CB__OVL_PRESCALE_BIAS_CB_MASK 0x0000ffffL
+#define PRESCALE_VALUES_OVL_CB__OVL_PRESCALE_BIAS_CB__SHIFT 0x00000000
+#define PRESCALE_VALUES_OVL_CB__OVL_PRESCALE_SCALE_CB_MASK 0xffff0000L
+#define PRESCALE_VALUES_OVL_CB__OVL_PRESCALE_SCALE_CB__SHIFT 0x00000010
+#define PRESCALE_VALUES_OVL_CR__OVL_PRESCALE_BIAS_CR_MASK 0x0000ffffL
+#define PRESCALE_VALUES_OVL_CR__OVL_PRESCALE_BIAS_CR__SHIFT 0x00000000
+#define PRESCALE_VALUES_OVL_CR__OVL_PRESCALE_SCALE_CR_MASK 0xffff0000L
+#define PRESCALE_VALUES_OVL_CR__OVL_PRESCALE_SCALE_CR__SHIFT 0x00000010
+#define PRESCALE_VALUES_OVL_Y__OVL_PRESCALE_BIAS_Y_MASK 0x0000ffffL
+#define PRESCALE_VALUES_OVL_Y__OVL_PRESCALE_BIAS_Y__SHIFT 0x00000000
+#define PRESCALE_VALUES_OVL_Y__OVL_PRESCALE_SCALE_Y_MASK 0xffff0000L
+#define PRESCALE_VALUES_OVL_Y__OVL_PRESCALE_SCALE_Y__SHIFT 0x00000010
+#define REGAMMA_CNTLA_END_CNTL1__REGAMMA_CNTLA_EXP_REGION_END_MASK 0x0000ffffL
+#define REGAMMA_CNTLA_END_CNTL1__REGAMMA_CNTLA_EXP_REGION_END__SHIFT 0x00000000
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_BASE_MASK 0xffff0000L
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_BASE__SHIFT 0x00000010
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_SLOPE_MASK 0x0000ffffL
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_SLOPE__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLA_SLOPE_CNTL__REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE_MASK 0x0003ffffL
+#define REGAMMA_CNTLA_SLOPE_CNTL__REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE__SHIFT 0x00000000
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_MASK 0x0003ffffL
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_SEGMENT_MASK 0x07f00000L
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_SEGMENT__SHIFT 0x00000014
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START__SHIFT 0x00000000
+#define REGAMMA_CNTLB_END_CNTL1__REGAMMA_CNTLB_EXP_REGION_END_MASK 0x0000ffffL
+#define REGAMMA_CNTLB_END_CNTL1__REGAMMA_CNTLB_EXP_REGION_END__SHIFT 0x00000000
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_BASE_MASK 0xffff0000L
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_BASE__SHIFT 0x00000010
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_SLOPE_MASK 0x0000ffffL
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_SLOPE__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_LUT_OFFSET_MASK 0x000001ffL
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_LUT_OFFSET__SHIFT 0x00000000
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0x0000000c
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_LUT_OFFSET_MASK 0x01ff0000L
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_LUT_OFFSET__SHIFT 0x00000010
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x0000001c
+#define REGAMMA_CNTLB_SLOPE_CNTL__REGAMMA_CNTLB_EXP_REGION_LINEAR_SLOPE_MASK 0x0003ffffL
+#define REGAMMA_CNTLB_SLOPE_CNTL__REGAMMA_CNTLB_EXP_REGION_LINEAR_SLOPE__SHIFT 0x00000000
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_MASK 0x0003ffffL
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_SEGMENT_MASK 0x07f00000L
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_SEGMENT__SHIFT 0x00000014
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START__SHIFT 0x00000000
+#define REGAMMA_CONTROL__GRPH_REGAMMA_MODE_MASK 0x00000007L
+#define REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT 0x00000000
+#define REGAMMA_CONTROL__OVL_REGAMMA_MODE_MASK 0x00000070L
+#define REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT 0x00000004
+#define REGAMMA_LUT_DATA__REGAMMA_LUT_DATA_MASK 0x0007ffffL
+#define REGAMMA_LUT_DATA__REGAMMA_LUT_DATA__SHIFT 0x00000000
+#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX_MASK 0x000001ffL
+#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000
+#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000
+#define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L
+#define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000
+#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L
+#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE__SHIFT 0x00000000
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_ACK_MASK 0x00000100L
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_ACK__SHIFT 0x00000008
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_FLAG_MASK 0x00000001L
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_FLAG__SHIFT 0x00000000
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_INT_STATUS_MASK 0x00010000L
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_INT_STATUS__SHIFT 0x00000010
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_MASK_MASK 0x00001000L
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_MASK__SHIFT 0x0000000c
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE_MASK 0x00030000L
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE__SHIFT 0x00000010
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_PHASE_MASK 0x00000f00L
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_PHASE__SHIFT 0x00000008
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX_MASK 0x0000000fL
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX__SHIFT 0x00000000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN__SHIFT 0x0000000f
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_MASK 0x00003fffL
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF__SHIFT 0x00000000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN__SHIFT 0x0000001f
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_MASK 0x3fff0000L
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF__SHIFT 0x00000010
+#define SCL_DEBUG2__SCL_DEBUG2_MASK 0xffffffffL
+#define SCL_DEBUG2__SCL_DEBUG2__SHIFT 0x00000000
+#define SCL_DEBUG__SCL_DEBUG_MASK 0xffffffffL
+#define SCL_DEBUG__SCL_DEBUG__SHIFT 0x00000000
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_EN_MASK 0x00000010L
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_EN__SHIFT 0x00000004
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_SCALE_FACTOR_MASK 0x00000007L
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_SCALE_FACTOR__SHIFT 0x00000000
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_EN_MASK 0x00001000L
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_EN__SHIFT 0x0000000c
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_SCALE_FACTOR_MASK 0x00000700L
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_SCALE_FACTOR__SHIFT 0x00000008
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_FILTER_PICK_NEAREST_MASK 0x00000001L
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_FILTER_PICK_NEAREST__SHIFT 0x00000000
+#define SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x03ffffffL
+#define SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x00000000
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_OFF_DELAY_MASK 0x00000ff0L
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_OFF_DELAY__SHIFT 0x00000004
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_ON_DELAY_MASK 0x0000000fL
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_ON_DELAY__SHIFT 0x00000000
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000f00L
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x00000008
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000fL
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x00000000
+#define SCL_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO_MASK 0x0fffff80L
+#define SCL_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO__SHIFT 0x00000007
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK_MASK 0x00000010L
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK__SHIFT 0x00000004
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_MASK 0x00000001L
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE__SHIFT 0x00000000
+#define SCL_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO_MASK 0x001fffffL
+#define SCL_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO__SHIFT 0x00000000
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT_MASK 0x00003fffL
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT__SHIFT 0x00000000
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH_MASK 0x3fff0000L
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH__SHIFT 0x00000010
+#define SCL_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK_MASK 0x00000001L
+#define SCL_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK__SHIFT 0x00000000
+#define SCL_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define SCL_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define SCL_UPDATE__SCL_UPDATE_LOCK_MASK 0x00010000L
+#define SCL_UPDATE__SCL_UPDATE_LOCK__SHIFT 0x00000010
+#define SCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+#define SCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x00000000
+#define SCL_UPDATE__SCL_UPDATE_TAKEN_MASK 0x00000100L
+#define SCL_UPDATE__SCL_UPDATE_TAKEN__SHIFT 0x00000008
+#define SCL_VERT_FILTER_CONTROL__SCL_V_FILTER_PICK_NEAREST_MASK 0x00000001L
+#define SCL_VERT_FILTER_CONTROL__SCL_V_FILTER_PICK_NEAREST__SHIFT 0x00000000
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x0000ffffL
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x00000000
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x00070000L
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x00000010
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x0000ffffL
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x00000000
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x00070000L
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x00000010
+#define SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x03ffffffL
+#define SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x00000000
+#define SEQ00__SEQ_RST0B_MASK 0x00000001L
+#define SEQ00__SEQ_RST0B__SHIFT 0x00000000
+#define SEQ00__SEQ_RST1B_MASK 0x00000002L
+#define SEQ00__SEQ_RST1B__SHIFT 0x00000001
+#define SEQ01__SEQ_DOT8_MASK 0x00000001L
+#define SEQ01__SEQ_DOT8__SHIFT 0x00000000
+#define SEQ01__SEQ_MAXBW_MASK 0x00000020L
+#define SEQ01__SEQ_MAXBW__SHIFT 0x00000005
+#define SEQ01__SEQ_PCLKBY2_MASK 0x00000008L
+#define SEQ01__SEQ_PCLKBY2__SHIFT 0x00000003
+#define SEQ01__SEQ_SHIFT2_MASK 0x00000004L
+#define SEQ01__SEQ_SHIFT2__SHIFT 0x00000002
+#define SEQ01__SEQ_SHIFT4_MASK 0x00000010L
+#define SEQ01__SEQ_SHIFT4__SHIFT 0x00000004
+#define SEQ02__SEQ_MAP0_EN_MASK 0x00000001L
+#define SEQ02__SEQ_MAP0_EN__SHIFT 0x00000000
+#define SEQ02__SEQ_MAP1_EN_MASK 0x00000002L
+#define SEQ02__SEQ_MAP1_EN__SHIFT 0x00000001
+#define SEQ02__SEQ_MAP2_EN_MASK 0x00000004L
+#define SEQ02__SEQ_MAP2_EN__SHIFT 0x00000002
+#define SEQ02__SEQ_MAP3_EN_MASK 0x00000008L
+#define SEQ02__SEQ_MAP3_EN__SHIFT 0x00000003
+#define SEQ03__SEQ_FONT_A0_MASK 0x00000020L
+#define SEQ03__SEQ_FONT_A0__SHIFT 0x00000005
+#define SEQ03__SEQ_FONT_A1_MASK 0x00000004L
+#define SEQ03__SEQ_FONT_A1__SHIFT 0x00000002
+#define SEQ03__SEQ_FONT_A2_MASK 0x00000008L
+#define SEQ03__SEQ_FONT_A2__SHIFT 0x00000003
+#define SEQ03__SEQ_FONT_B0_MASK 0x00000010L
+#define SEQ03__SEQ_FONT_B0__SHIFT 0x00000004
+#define SEQ03__SEQ_FONT_B1_MASK 0x00000001L
+#define SEQ03__SEQ_FONT_B1__SHIFT 0x00000000
+#define SEQ03__SEQ_FONT_B2_MASK 0x00000002L
+#define SEQ03__SEQ_FONT_B2__SHIFT 0x00000001
+#define SEQ04__SEQ_256K_MASK 0x00000002L
+#define SEQ04__SEQ_256K__SHIFT 0x00000001
+#define SEQ04__SEQ_CHAIN_MASK 0x00000008L
+#define SEQ04__SEQ_CHAIN__SHIFT 0x00000003
+#define SEQ04__SEQ_ODDEVEN_MASK 0x00000004L
+#define SEQ04__SEQ_ODDEVEN__SHIFT 0x00000002
+#define SEQ8_DATA__SEQ_DATA_MASK 0x000000ffL
+#define SEQ8_DATA__SEQ_DATA__SHIFT 0x00000000
+#define SEQ8_IDX__SEQ_IDX_MASK 0x00000007L
+#define SEQ8_IDX__SEQ_IDX__SHIFT 0x00000000
+#define SINK_DESCRIPTION0__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION0__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION10__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION10__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION11__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION11__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION12__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION12__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION13__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION13__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION14__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION14__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION15__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION15__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION16__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION16__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION17__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION17__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION1__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION1__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION2__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION2__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION3__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION3__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION4__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION4__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION5__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION5__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION6__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION6__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION7__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION7__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION8__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION8__DESCRIPTION__SHIFT 0x00000000
+#define SINK_DESCRIPTION9__DESCRIPTION_MASK 0x000000ffL
+#define SINK_DESCRIPTION9__DESCRIPTION__SHIFT 0x00000000
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0_MASK 0x000000ffL
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0__SHIFT 0x00000000
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1_MASK 0x0000ff00L
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1__SHIFT 0x00000008
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2_MASK 0x00ff0000L
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2__SHIFT 0x00000010
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3_MASK 0xff000000L
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3__SHIFT 0x00000018
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS_MASK 0x00000100L
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS__SHIFT 0x00000008
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT_MASK 0x00000001L
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT__SHIFT 0x00000000
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0_MASK 0x000000ffL
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0__SHIFT 0x00000000
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1_MASK 0x0000ff00L
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1__SHIFT 0x00000008
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2_MASK 0x00ff0000L
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2__SHIFT 0x00000010
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3_MASK 0xff000000L
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3__SHIFT 0x00000018
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0_MASK 0x000000ffL
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0__SHIFT 0x00000000
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1_MASK 0x0000ff00L
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1__SHIFT 0x00000008
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2_MASK 0x00ff0000L
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2__SHIFT 0x00000010
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3_MASK 0xff000000L
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3__SHIFT 0x00000018
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0_MASK 0x000000ffL
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0__SHIFT 0x00000000
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1_MASK 0x0000ff00L
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1__SHIFT 0x00000008
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2_MASK 0x00ff0000L
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2__SHIFT 0x00000010
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3_MASK 0xff000000L
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3__SHIFT 0x00000018
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC__SHIFT 0x00000008
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_CLOCK_ENABLE__SHIFT 0x00000000
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_EN__SHIFT 0x00000004
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_SRC_MASK 0x00000700L
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_SRC__SHIFT 0x00000008
+#define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
+#define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
+#define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
+#define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
+#define TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+#define TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x00000000
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x00000008
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x00000000
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x00000000
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x00000001
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x00000002
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x00000003
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x0000001f
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x00000004
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x00000007
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x00000008
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000fL
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x00000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0x0000000b
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0x0000000c
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0x0000000a
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x00000014
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x00000017
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x00000018
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000f0000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x00000010
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x0000001b
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x0000001c
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x0000001a
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x00000004
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x00000007
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x00000008
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000fL
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x00000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0x0000000b
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0x0000000c
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0x0000000a
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x00000014
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x00000017
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x00000018
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000f0000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x00000010
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x0000001b
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x0000001c
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x0000001a
+#define TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x00000000
+#define TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x00000008
+#define TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x00000010
+#define TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+#define TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x00000018
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x00000000
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x00000018
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x00000008
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000f0000L
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x00000010
+#define TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x00000004
+#define TMDS_DEBUG__TMDS_DEBUG_DE_EN_MASK 0x02000000L
+#define TMDS_DEBUG__TMDS_DEBUG_DE_EN__SHIFT 0x00000019
+#define TMDS_DEBUG__TMDS_DEBUG_DE_MASK 0x01000000L
+#define TMDS_DEBUG__TMDS_DEBUG_DE__SHIFT 0x00000018
+#define TMDS_DEBUG__TMDS_DEBUG_EN_MASK 0x00000001L
+#define TMDS_DEBUG__TMDS_DEBUG_EN__SHIFT 0x00000000
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_EN_MASK 0x00000200L
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_EN__SHIFT 0x00000009
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_MASK 0x00000100L
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC__SHIFT 0x00000008
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_EN_MASK 0x00020000L
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_EN__SHIFT 0x00000011
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_MASK 0x00010000L
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC__SHIFT 0x00000010
+#define TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+#define TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x00000000
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003ffL
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x00000000
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03ff0000L
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x00000010
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003ffL
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x00000000
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03ff0000L
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x00000010
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_STATIC_TEST_PATTERN_MASK 0x000003ffL
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_STATIC_TEST_PATTERN__SHIFT 0x00000000
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_TPG_EN_MASK 0x00010000L
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_TPG_EN__SHIFT 0x00000010
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_TPG_SEL_MASK 0x000e0000L
+#define UNIPHYAB_TPG_CONTROL__UNIPHYAB_TPG_SEL__SHIFT 0x00000011
+#define UNIPHYAB_TPG_SEED__UNIPHYAB_TPG_SEED_MASK 0x007fffffL
+#define UNIPHYAB_TPG_SEED__UNIPHYAB_TPG_SEED__SHIFT 0x00000000
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_ANG_BIST_ERROR_MASK 0x001f0000L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_ANG_BIST_ERROR__SHIFT 0x00000010
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_ANG_BIST_RESET_MASK 0x00000002L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_ANG_BIST_RESET__SHIFT 0x00000001
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_PRESETB_MASK 0x01000000L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_PRESETB__SHIFT 0x00000018
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_RX_BIAS_MASK 0x00000f00L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_RX_BIAS__SHIFT 0x00000008
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_TEST_RX_EN_MASK 0x00000001L
+#define UNIPHY_ANG_BIST_CNTL__UNIPHY_TEST_RX_EN__SHIFT 0x00000000
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_STATIC_TEST_PATTERN_MASK 0x000003ffL
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_STATIC_TEST_PATTERN__SHIFT 0x00000000
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_TPG_EN_MASK 0x00010000L
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_TPG_EN__SHIFT 0x00000010
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_TPG_SEL_MASK 0x000e0000L
+#define UNIPHYCD_TPG_CONTROL__UNIPHYCD_TPG_SEL__SHIFT 0x00000011
+#define UNIPHYCD_TPG_SEED__UNIPHYCD_TPG_SEED_MASK 0x007fffffL
+#define UNIPHYCD_TPG_SEED__UNIPHYCD_TPG_SEED__SHIFT 0x00000000
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x00000000
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x00000008
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x00000010
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHY_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x00000018
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYN_ERROR_MASK 0x00000040L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYN_ERROR__SHIFT 0x00000006
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYN_LEVEL_MASK 0x00000030L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYN_LEVEL__SHIFT 0x00000004
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYNSEL_MASK 0x00000001L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DSYNSEL__SHIFT 0x00000000
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DUAL_LINK_PHASE_MASK 0x00010000L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_DUAL_LINK_PHASE__SHIFT 0x00000010
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_LINK_ENABLE_MASK 0x00001000L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_LINK_ENABLE__SHIFT 0x0000000c
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_SOURCE_SELECT_MASK 0x00000100L
+#define UNIPHY_DATA_SYNCHRONIZATION__UNIPHY_SOURCE_SELECT__SHIFT 0x00000008
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_STATIC_TEST_PATTERN_MASK 0x000003ffL
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_STATIC_TEST_PATTERN__SHIFT 0x00000000
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_TPG_EN_MASK 0x00010000L
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_TPG_EN__SHIFT 0x00000010
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_TPG_SEL_MASK 0x000e0000L
+#define UNIPHYEF_TPG_CONTROL__UNIPHYEF_TPG_SEL__SHIFT 0x00000011
+#define UNIPHYEF_TPG_SEED__UNIPHYEF_TPG_SEED_MASK 0x007fffffL
+#define UNIPHYEF_TPG_SEED__UNIPHYEF_TPG_SEED__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_CALOUT_LINKA_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_CALOUT_LINKA__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_ENABLE_LINKA_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_ENABLE_LINKA__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKA_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKA__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_LINKA_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_LINKA__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_SEL_LINKA_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_SEL_LINKA__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_STEP_DELAY_LINKA_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_STEP_DELAY_LINKA__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_VALUE_LINKA_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_VALUE_LINKA__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_CALOUT_LINKB_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_CALOUT_LINKB__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_ENABLE_LINKB_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_ENABLE_LINKB__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKB_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKB__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_LINKB_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_LINKB__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_SEL_LINKB_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_SEL_LINKB__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_STEP_DELAY_LINKB_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_STEP_DELAY_LINKB__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_VALUE_LINKB_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_VALUE_LINKB__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_CALOUT_LINKC_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_CALOUT_LINKC__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_ENABLE_LINKC_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_ENABLE_LINKC__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKC_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKC__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_LINKC_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_LINKC__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_SEL_LINKC_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_SEL_LINKC__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_STEP_DELAY_LINKC_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_STEP_DELAY_LINKC__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_VALUE_LINKC_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_VALUE_LINKC__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_CALOUT_LINKD_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_CALOUT_LINKD__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_ENABLE_LINKD_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_ENABLE_LINKD__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKD_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKD__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_LINKD_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_LINKD__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_SEL_LINKD_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_SEL_LINKD__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_STEP_DELAY_LINKD_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_STEP_DELAY_LINKD__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_VALUE_LINKD_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_VALUE_LINKD__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_CALOUT_LINKE_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_CALOUT_LINKE__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_ENABLE_LINKE_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_ENABLE_LINKE__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKE_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKE__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_LINKE_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_LINKE__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_SEL_LINKE_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_SEL_LINKE__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_STEP_DELAY_LINKE_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_STEP_DELAY_LINKE__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_VALUE_LINKE_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_VALUE_LINKE__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_AK_MASK 0x00000400L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_AK__SHIFT 0x0000000a
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_MASK 0x00000200L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF__SHIFT 0x00000009
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_CALOUT_LINKF_MASK 0x00000100L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_CALOUT_LINKF__SHIFT 0x00000008
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_ENABLE_LINKF_MASK 0x00000001L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_ENABLE_LINKF__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKF_MASK 0x10000000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKF__SHIFT 0x0000001c
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_LINKF_MASK 0x0f000000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_LINKF__SHIFT 0x00000018
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_SEL_LINKF_MASK 0x40000000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_SEL_LINKF__SHIFT 0x0000001e
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_STEP_DELAY_LINKF_MASK 0x00f00000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_STEP_DELAY_LINKF__SHIFT 0x00000014
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_VALUE_LINKF_MASK 0x000f0000L
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_VALUE_LINKF__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_PERIOD__UNIPHY_IMPCAL_PERIOD_MASK 0xffffffffL
+#define UNIPHY_IMPCAL_PERIOD__UNIPHY_IMPCAL_PERIOD__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKA_MASK 0x00007fffL
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKA__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKB_MASK 0x7fff0000L
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKB__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKC_MASK 0x00007fffL
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKC__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKD_MASK 0x7fff0000L
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKD__SHIFT 0x00000010
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKE_MASK 0x00007fffL
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKE__SHIFT 0x00000000
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKF_MASK 0x7fff0000L
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKF__SHIFT 0x00000010
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0x0000000c
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0x0000000d
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0x0000000e
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHY_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0x0000000f
+#define UNIPHY_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHY_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x00000014
+#define UNIPHY_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x00030000L
+#define UNIPHY_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x00000010
+#define UNIPHY_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHY_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x00000008
+#define UNIPHY_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHY_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x00000000
+#define UNIPHY_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHY_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x00000004
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_BW_CNTL_MASK 0x00ff0000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_BW_CNTL__SHIFT 0x00000010
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_CLK_EN_MASK 0x00000008L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_CLK_EN__SHIFT 0x00000003
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_CLKPH_EN_MASK 0x000000f0L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_CLKPH_EN__SHIFT 0x00000004
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_ENABLE_MASK 0x00000001L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_ENABLE__SHIFT 0x00000000
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_EXT_RESET_EN_MASK 0x00000004L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_EXT_RESET_EN__SHIFT 0x00000002
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_LF_CNTL_MASK 0x00007f00L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_LF_CNTL__SHIFT 0x00000008
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_RESET_MASK 0x00000002L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_RESET__SHIFT 0x00000001
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_BYPCLK_EN_MASK 0x02000000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_BYPCLK_EN__SHIFT 0x00000019
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_BYPCLK_SRC_MASK 0x01000000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_BYPCLK_SRC__SHIFT 0x00000018
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_VCTL_ADC_EN_MASK 0x04000000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_PLL_TEST_VCTL_ADC_EN__SHIFT 0x0000001a
+#define UNIPHY_PLL_CONTROL1__UNIPHY_VCO_MODE_MASK 0x30000000L
+#define UNIPHY_PLL_CONTROL1__UNIPHY_VCO_MODE__SHIFT 0x0000001c
+#define UNIPHY_PLL_CONTROL2__UNIPHY_CLKINV_MASK 0x00002000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_CLKINV__SHIFT 0x0000000d
+#define UNIPHY_PLL_CONTROL2__UNIPHY_DPLLSEL_MASK 0x0000000cL
+#define UNIPHY_PLL_CONTROL2__UNIPHY_DPLLSEL__SHIFT 0x00000002
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IDCLK_EN_MASK 0x00001000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IDCLK_EN__SHIFT 0x0000000c
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IDCLK_SEL_MASK 0x00000010L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IDCLK_SEL__SHIFT 0x00000004
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IPCIE_REFCLK_SEL_MASK 0x00000020L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IPCIE_REFCLK_SEL__SHIFT 0x00000005
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IXTALIN_SEL_MASK 0x00000040L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_IXTALIN_SEL__SHIFT 0x00000006
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PCIEREF_CLK_EN_MASK 0x00000800L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PCIEREF_CLK_EN__SHIFT 0x0000000b
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PDIVFRAC_SEL_MASK 0x00100000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PDIVFRAC_SEL__SHIFT 0x00000014
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PDIV_SEL_MASK 0xe0000000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PDIV_SEL__SHIFT 0x0000001d
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_DISPCLK_MODE_MASK 0x00000003L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_DISPCLK_MODE__SHIFT 0x00000000
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_REFCLK_SRC_MASK 0x00000700L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_REFCLK_SRC__SHIFT 0x00000008
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_REFDIV_MASK 0x1f000000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_REFDIV__SHIFT 0x00000018
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_TEST_FBDIV_FRAC_BYPASS_MASK 0x00080000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_TEST_FBDIV_FRAC_BYPASS__SHIFT 0x00000013
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_VTOI_BIAS_CNTL_MASK 0x00010000L
+#define UNIPHY_PLL_CONTROL2__UNIPHY_PLL_VTOI_BIAS_CNTL__SHIFT 0x00000010
+#define UNIPHY_PLL_FBDIV__UNIPHY_PLL_FBDIV_FRACTION_MASK 0x0000fffcL
+#define UNIPHY_PLL_FBDIV__UNIPHY_PLL_FBDIV_FRACTION__SHIFT 0x00000002
+#define UNIPHY_PLL_FBDIV__UNIPHY_PLL_FBDIV_MASK 0x0fff0000L
+#define UNIPHY_PLL_FBDIV__UNIPHY_PLL_FBDIV__SHIFT 0x00000010
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_DSMOD_EN_MASK 0x00001000L
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_DSMOD_EN__SHIFT 0x0000000c
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_SS_EN_MASK 0x00002000L
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_SS_EN__SHIFT 0x0000000d
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_SS_STEP_NUM_MASK 0x00000fffL
+#define UNIPHY_PLL_SS_CNTL__UNIPHY_PLL_SS_STEP_NUM__SHIFT 0x00000000
+#define UNIPHY_PLL_SS_STEP_SIZE__UNIPHY_PLL_SS_STEP_SIZE_MASK 0x03ffffffL
+#define UNIPHY_PLL_SS_STEP_SIZE__UNIPHY_PLL_SS_STEP_SIZE__SHIFT 0x00000000
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ0P45_MASK 0x000f0000L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ0P45__SHIFT 0x00000010
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ1P00_MASK 0x00000f00L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ1P00__SHIFT 0x00000008
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ1P25_MASK 0x0000f000L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGADJ1P25__SHIFT 0x0000000c
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGPDN_MASK 0x00000001L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BGPDN__SHIFT 0x00000000
+#define UNIPHY_POWER_CONTROL__UNIPHY_BIASREF_SEL_MASK 0x00000004L
+#define UNIPHY_POWER_CONTROL__UNIPHY_BIASREF_SEL__SHIFT 0x00000002
+#define UNIPHY_POWER_CONTROL__UNIPHY_RST_LOGIC_MASK 0x00000002L
+#define UNIPHY_POWER_CONTROL__UNIPHY_RST_LOGIC__SHIFT 0x00000001
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_ERROR_MASK 0x01f00000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_ERROR__SHIFT 0x00000014
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_RESET_MASK 0x00008000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_RESET__SHIFT 0x0000000f
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_SEL_MASK 0x00010000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_DIG_BIST_SEL__SHIFT 0x00000010
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_INTRESET_MASK 0x20000000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_INTRESET__SHIFT 0x0000001d
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_TEST_FREQ_LOCK_MASK 0x10000000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_TEST_FREQ_LOCK__SHIFT 0x0000001c
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_TEST_VCTL_ADC_MASK 0x0e000000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_PLL_TEST_VCTL_ADC__SHIFT 0x00000019
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_TEST_CNTL_MASK 0x0000001fL
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_TEST_CNTL__SHIFT 0x00000000
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_TEST_VCTL_EN_MASK 0x00020000L
+#define UNIPHY_REG_TEST_OUTPUT__UNIPHY_TEST_VCTL_EN__SHIFT 0x00000011
+#define UNIPHY_SOFT_RESET__DSYNCA_SOFT_RESET_MASK 0x00000001L
+#define UNIPHY_SOFT_RESET__DSYNCA_SOFT_RESET__SHIFT 0x00000000
+#define UNIPHY_SOFT_RESET__DSYNCB_SOFT_RESET_MASK 0x00000002L
+#define UNIPHY_SOFT_RESET__DSYNCB_SOFT_RESET__SHIFT 0x00000001
+#define UNIPHY_SOFT_RESET__DSYNCC_SOFT_RESET_MASK 0x00000004L
+#define UNIPHY_SOFT_RESET__DSYNCC_SOFT_RESET__SHIFT 0x00000002
+#define UNIPHY_SOFT_RESET__DSYNCD_SOFT_RESET_MASK 0x00000008L
+#define UNIPHY_SOFT_RESET__DSYNCD_SOFT_RESET__SHIFT 0x00000003
+#define UNIPHY_SOFT_RESET__DSYNCE_SOFT_RESET_MASK 0x00000010L
+#define UNIPHY_SOFT_RESET__DSYNCE_SOFT_RESET__SHIFT 0x00000004
+#define UNIPHY_SOFT_RESET__DSYNCF_SOFT_RESET_MASK 0x00000020L
+#define UNIPHY_SOFT_RESET__DSYNCF_SOFT_RESET__SHIFT 0x00000005
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR0_MASK 0x00000007L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR0__SHIFT 0x00000000
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR1_MASK 0x00000070L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR1__SHIFT 0x00000004
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR2_MASK 0x00000700L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR2__SHIFT 0x00000008
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR3_MASK 0x00007000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR3__SHIFT 0x0000000c
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR4_MASK 0x00070000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_PREMPH_STR4__SHIFT 0x00000010
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS0_MASK 0x00300000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS0__SHIFT 0x00000014
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS1_MASK 0x00c00000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS1__SHIFT 0x00000016
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS2_MASK 0x03000000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS2__SHIFT 0x00000018
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS3_MASK 0x0c000000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS3__SHIFT 0x0000001a
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS4_MASK 0x30000000L
+#define UNIPHY_TX_CONTROL1__UNIPHY_TX_VS4__SHIFT 0x0000001c
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH0_PC_MASK 0x00000003L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH0_PC__SHIFT 0x00000000
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH1_PC_MASK 0x00000030L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH1_PC__SHIFT 0x00000004
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH2_PC_MASK 0x00000300L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH2_PC__SHIFT 0x00000008
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH3_PC_MASK 0x00003000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH3_PC__SHIFT 0x0000000c
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH4_PC_MASK 0x00030000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH4_PC__SHIFT 0x00000010
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH_SEL_MASK 0x00100000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_PREMPH_SEL__SHIFT 0x00000014
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT0_CPSEL_MASK 0x00600000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT0_CPSEL__SHIFT 0x00000015
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT1_CPSEL_MASK 0x01800000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT1_CPSEL__SHIFT 0x00000017
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT2_CPSEL_MASK 0x06000000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT2_CPSEL__SHIFT 0x00000019
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT3_CPSEL_MASK 0x18000000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT3_CPSEL__SHIFT 0x0000001b
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT4_CPSEL_MASK 0x60000000L
+#define UNIPHY_TX_CONTROL2__UNIPHY_RT4_CPSEL__SHIFT 0x0000001d
+#define UNIPHY_TX_CONTROL3__UNIPHY_LVDS_PULLDWN_MASK 0x80000000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_LVDS_PULLDWN__SHIFT 0x0000001f
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL0_MASK 0x00100000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL0__SHIFT 0x00000014
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL1_MASK 0x00200000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL1__SHIFT 0x00000015
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL2_MASK 0x00400000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL2__SHIFT 0x00000016
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL3_MASK 0x00800000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PESEL3__SHIFT 0x00000017
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_CS_CLK_MASK 0x000000f0L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_CS_CLK__SHIFT 0x00000004
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_CS_DAT_MASK 0x00000f00L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_CS_DAT__SHIFT 0x00000008
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_PW_CLK_MASK 0x00000003L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_PW_CLK__SHIFT 0x00000000
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_PW_DAT_MASK 0x0000000cL
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_PW_DAT__SHIFT 0x00000002
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_STR_CLK_MASK 0x00007000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_STR_CLK__SHIFT 0x0000000c
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_STR_DAT_MASK 0x00070000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_PREMPH_STR_DAT__SHIFT 0x00000010
+#define UNIPHY_TX_CONTROL3__UNIPHY_TX_VS_ADJ_MASK 0x1f000000L
+#define UNIPHY_TX_CONTROL3__UNIPHY_TX_VS_ADJ__SHIFT 0x00000018
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_NVS_CLK_MASK 0x0000001fL
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_NVS_CLK__SHIFT 0x00000000
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_NVS_DAT_MASK 0x000003e0L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_NVS_DAT__SHIFT 0x00000005
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_OP_CLK_MASK 0x07000000L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_OP_CLK__SHIFT 0x00000018
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_OP_DAT_MASK 0x70000000L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_OP_DAT__SHIFT 0x0000001c
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_PVS_CLK_MASK 0x0001f000L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_PVS_CLK__SHIFT 0x0000000c
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_PVS_DAT_MASK 0x003e0000L
+#define UNIPHY_TX_CONTROL4__UNIPHY_TX_PVS_DAT__SHIFT 0x00000011
+#define VGA25_PPLL_ANALOG__VGA25_CAL_MODE_MASK 0x0000001fL
+#define VGA25_PPLL_ANALOG__VGA25_CAL_MODE__SHIFT 0x00000000
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_CP_MASK 0x00000f00L
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_CP__SHIFT 0x00000008
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_IBIAS_MASK 0xff000000L
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_IBIAS__SHIFT 0x00000018
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_LF_MODE_MASK 0x001ff000L
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_LF_MODE__SHIFT 0x0000000c
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_PFD_PULSE_SEL_MASK 0x00000060L
+#define VGA25_PPLL_ANALOG__VGA25_PPLL_PFD_PULSE_SEL__SHIFT 0x00000005
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_FRACTION_CNTL_MASK 0x00000030L
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_FRACTION_CNTL__SHIFT 0x00000004
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_FRACTION_MASK 0x0000000fL
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_FRACTION__SHIFT 0x00000000
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV_MASK 0x07ff0000L
+#define VGA25_PPLL_FB_DIV__VGA25_PPLL_FB_DIV__SHIFT 0x00000010
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_DVOCLK_MASK 0x00007f00L
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_DVOCLK__SHIFT 0x00000008
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_IDCLK_MASK 0x007f0000L
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_IDCLK__SHIFT 0x00000010
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_PIXCLK_MASK 0x0000007fL
+#define VGA25_PPLL_POST_DIV__VGA25_PPLL_POST_DIV_PIXCLK__SHIFT 0x00000000
+#define VGA25_PPLL_REF_DIV__VGA25_PPLL_REF_DIV_MASK 0x000003ffL
+#define VGA25_PPLL_REF_DIV__VGA25_PPLL_REF_DIV__SHIFT 0x00000000
+#define VGA28_PPLL_ANALOG__VGA28_CAL_MODE_MASK 0x0000001fL
+#define VGA28_PPLL_ANALOG__VGA28_CAL_MODE__SHIFT 0x00000000
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_CP_MASK 0x00000f00L
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_CP__SHIFT 0x00000008
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_IBIAS_MASK 0xff000000L
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_IBIAS__SHIFT 0x00000018
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_LF_MODE_MASK 0x001ff000L
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_LF_MODE__SHIFT 0x0000000c
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_PFD_PULSE_SEL_MASK 0x00000060L
+#define VGA28_PPLL_ANALOG__VGA28_PPLL_PFD_PULSE_SEL__SHIFT 0x00000005
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_FRACTION_CNTL_MASK 0x00000030L
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_FRACTION_CNTL__SHIFT 0x00000004
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_FRACTION_MASK 0x0000000fL
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_FRACTION__SHIFT 0x00000000
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV_MASK 0x07ff0000L
+#define VGA28_PPLL_FB_DIV__VGA28_PPLL_FB_DIV__SHIFT 0x00000010
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_DVOCLK_MASK 0x00007f00L
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_DVOCLK__SHIFT 0x00000008
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_IDCLK_MASK 0x007f0000L
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_IDCLK__SHIFT 0x00000010
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_PIXCLK_MASK 0x0000007fL
+#define VGA28_PPLL_POST_DIV__VGA28_PPLL_POST_DIV_PIXCLK__SHIFT 0x00000000
+#define VGA28_PPLL_REF_DIV__VGA28_PPLL_REF_DIV_MASK 0x000003ffL
+#define VGA28_PPLL_REF_DIV__VGA28_PPLL_REF_DIV__SHIFT 0x00000000
+#define VGA41_PPLL_ANALOG__VGA41_CAL_MODE_MASK 0x0000001fL
+#define VGA41_PPLL_ANALOG__VGA41_CAL_MODE__SHIFT 0x00000000
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_CP_MASK 0x00000f00L
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_CP__SHIFT 0x00000008
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_IBIAS_MASK 0xff000000L
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_IBIAS__SHIFT 0x00000018
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_LF_MODE_MASK 0x001ff000L
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_LF_MODE__SHIFT 0x0000000c
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_PFD_PULSE_SEL_MASK 0x00000060L
+#define VGA41_PPLL_ANALOG__VGA41_PPLL_PFD_PULSE_SEL__SHIFT 0x00000005
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_FRACTION_CNTL_MASK 0x00000030L
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_FRACTION_CNTL__SHIFT 0x00000004
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_FRACTION_MASK 0x0000000fL
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_FRACTION__SHIFT 0x00000000
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV_MASK 0x07ff0000L
+#define VGA41_PPLL_FB_DIV__VGA41_PPLL_FB_DIV__SHIFT 0x00000010
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_DVOCLK_MASK 0x00007f00L
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_DVOCLK__SHIFT 0x00000008
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_IDCLK_MASK 0x007f0000L
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_IDCLK__SHIFT 0x00000010
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_PIXCLK_MASK 0x0000007fL
+#define VGA41_PPLL_POST_DIV__VGA41_PPLL_POST_DIV_PIXCLK__SHIFT 0x00000000
+#define VGA41_PPLL_REF_DIV__VGA41_PPLL_REF_DIV_MASK 0x000003ffL
+#define VGA41_PPLL_REF_DIV__VGA41_PPLL_REF_DIV__SHIFT 0x00000000
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY_MASK 0x00100000L
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY__SHIFT 0x00000014
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT_MASK 0x3f000000L
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT__SHIFT 0x00000018
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE_MASK 0x00010000L
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE__SHIFT 0x00000010
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE_MASK 0x00000100L
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE__SHIFT 0x00000008
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS_MASK 0x00000001L
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS__SHIFT 0x00000000
+#define VGADCC_DBG_DCCIF_C__DBG_DCCIF_C_MASK 0xffffffffL
+#define VGADCC_DBG_DCCIF_C__DBG_DCCIF_C__SHIFT 0x00000000
+#define VGA_DEBUG_READBACK_DATA__VGA_DEBUG_READBACK_DATA_MASK 0xffffffffL
+#define VGA_DEBUG_READBACK_DATA__VGA_DEBUG_READBACK_DATA__SHIFT 0x00000000
+#define VGA_DEBUG_READBACK_INDEX__VGA_DEBUG_READBACK_INDEX_MASK 0x000000ffL
+#define VGA_DEBUG_READBACK_INDEX__VGA_DEBUG_READBACK_INDEX__SHIFT 0x00000000
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR_MASK 0x01ffffffL
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR__SHIFT 0x00000000
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR_MASK 0x01ffffffL
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR__SHIFT 0x00000000
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK 0x00000010L
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE__SHIFT 0x00000004
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN_MASK 0x00000001L
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN__SHIFT 0x00000000
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE_MASK 0x00000100L
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE__SHIFT 0x00000008
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET_MASK 0x00010000L
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET__SHIFT 0x00000010
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL_MASK 0x01000000L
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL__SHIFT 0x00000018
+#define VGA_HW_DEBUG__VGA_HW_DEBUG_MASK 0xffffffffL
+#define VGA_HW_DEBUG__VGA_HW_DEBUG__SHIFT 0x00000000
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK_MASK 0x00010000L
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK__SHIFT 0x00000010
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK_MASK 0x00000001L
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK__SHIFT 0x00000000
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK_MASK 0x01000000L
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK__SHIFT 0x00000018
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK_MASK 0x00000100L
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK__SHIFT 0x00000008
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS_MASK 0x00000004L
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS__SHIFT 0x00000002
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS_MASK 0x00000001L
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS__SHIFT 0x00000000
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS_MASK 0x00000008L
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS__SHIFT 0x00000003
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS_MASK 0x00000002L
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS__SHIFT 0x00000001
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT_MASK 0x00000003L
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT__SHIFT 0x00000000
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE_MASK 0x20000000L
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE__SHIFT 0x0000001d
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT_MASK 0x80000000L
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT__SHIFT 0x0000001f
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT_MASK 0x03000000L
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT__SHIFT 0x00000018
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT_MASK 0x00030000L
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT__SHIFT 0x00000010
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT_MASK 0x04000000L
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT__SHIFT 0x0000001a
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT_MASK 0x00000300L
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT__SHIFT 0x00000008
+#define VGA_MAIN_CONTROL__VGA_READ_URGENT_ENABLE_MASK 0x08000000L
+#define VGA_MAIN_CONTROL__VGA_READ_URGENT_ENABLE__SHIFT 0x0000001b
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT_MASK 0x00000018L
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT__SHIFT 0x00000003
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION_MASK 0x000000e0L
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION__SHIFT 0x00000005
+#define VGA_MAIN_CONTROL__VGA_WRITES_URGENT_ENABLE_MASK 0x10000000L
+#define VGA_MAIN_CONTROL__VGA_WRITES_URGENT_ENABLE__SHIFT 0x0000001c
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH_MASK 0x000000ffL
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH__SHIFT 0x00000000
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS_MASK 0xffffffffL
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS__SHIFT 0x00000000
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR_MASK 0x000003ffL
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR__SHIFT 0x00000000
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR_MASK 0x03ff0000L
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR__SHIFT 0x00000010
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR_MASK 0x000003ffL
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR__SHIFT 0x00000000
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR_MASK 0x03ff0000L
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR__SHIFT 0x00000010
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING_MASK 0x00000100L
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING__SHIFT 0x00000008
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR_MASK 0x00000001L
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR__SHIFT 0x00000000
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE_MASK 0x00000030L
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE__SHIFT 0x00000004
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN_MASK 0x00010000L
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN__SHIFT 0x00000010
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE_MASK 0x00000060L
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE__SHIFT 0x00000005
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE_MASK 0x0000001fL
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE__SHIFT 0x00000000
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT_MASK 0x00000080L
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT__SHIFT 0x00000007
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE_MASK 0x00000100L
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE__SHIFT 0x00000008
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT_MASK 0x01000000L
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT__SHIFT 0x00000018
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL_MASK 0x02000000L
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL__SHIFT 0x00000019
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK 0x00030000L
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL__SHIFT 0x00000010
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000001L
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000000
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000100L
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x00000008
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000002L
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000001
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000200L
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x00000009
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000004L
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000002
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000400L
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x0000000a
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000008L
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000003
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00000800L
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x0000000b
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000010L
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000004
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00001000L
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x0000000c
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x00000020L
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x00000005
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x00002000L
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x0000000d
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE_MASK 0x00010000L
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE__SHIFT 0x00000010
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT_MASK 0x00fc0000L
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT__SHIFT 0x00000012
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT_MASK 0x00020000L
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT__SHIFT 0x00000011
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A_MASK 0x00000007L
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A__SHIFT 0x00000000
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B_MASK 0x00000700L
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B__SHIFT 0x00000008
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR_MASK 0x00010000L
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR__SHIFT 0x00000010
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR_MASK 0x00000001L
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR__SHIFT 0x00000000
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR_MASK 0x01000000L
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR__SHIFT 0x00000018
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR_MASK 0x00000100L
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR__SHIFT 0x00000008
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS_MASK 0x00000004L
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS__SHIFT 0x00000002
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS_MASK 0x00000001L
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS__SHIFT 0x00000000
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS_MASK 0x00000008L
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS__SHIFT 0x00000003
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS_MASK 0x00000002L
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS__SHIFT 0x00000001
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT_MASK 0x00000300L
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT__SHIFT 0x00000008
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT_MASK 0x00000003L
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT__SHIFT 0x00000000
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE_MASK 0x00000001L
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE__SHIFT 0x00000000
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT_MASK 0x01000000L
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT__SHIFT 0x00000018
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE_MASK 0x00010000L
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE__SHIFT 0x00000010
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START_MASK 0x00000100L
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START__SHIFT 0x00000008
+#define VGA_TEST_DEBUG_DATA__VGA_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define VGA_TEST_DEBUG_DATA__VGA_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define VIEWPORT_SIZE__VIEWPORT_HEIGHT_MASK 0x00003fffL
+#define VIEWPORT_SIZE__VIEWPORT_HEIGHT__SHIFT 0x00000000
+#define VIEWPORT_SIZE__VIEWPORT_WIDTH_MASK 0x3fff0000L
+#define VIEWPORT_SIZE__VIEWPORT_WIDTH__SHIFT 0x00000010
+#define VIEWPORT_START__VIEWPORT_X_START_MASK 0x3fff0000L
+#define VIEWPORT_START__VIEWPORT_X_START__SHIFT 0x00000010
+#define VIEWPORT_START__VIEWPORT_Y_START_MASK 0x00003fffL
+#define VIEWPORT_START__VIEWPORT_Y_START__SHIFT 0x00000000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_GATE_DIS_MASK 0x00008000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_GATE_DIS__SHIFT 0x0000000f
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MSTAT_GATE_DIS_MASK 0x00080000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MSTAT_GATE_DIS__SHIFT 0x00000013
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SDYN_GATE_DIS_MASK 0x00040000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SDYN_GATE_DIS__SHIFT 0x00000012
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SSTAT_GATE_DIS_MASK 0x00100000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SSTAT_GATE_DIS__SHIFT 0x00000014
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_REG_GATE_DIS_MASK 0x00010000L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_REG_GATE_DIS__SHIFT 0x00000010
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_OFF_DELAY_MASK 0x00000ff0L
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_OFF_DELAY__SHIFT 0x00000004
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_ON_DELAY_MASK 0x0000000fL
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_ON_DELAY__SHIFT 0x00000000
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_CLEAR_MASK 0x00000100L
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_CLEAR__SHIFT 0x00000008
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_STATUS_MASK 0x0000000fL
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_STATUS__SHIFT 0x00000000
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_ACK_MASK 0x00000400L
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_ACK__SHIFT 0x0000000a
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_MASK_MASK 0x00000200L
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_MASK__SHIFT 0x00000009
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_STAT_MASK 0x00000100L
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_STAT__SHIFT 0x00000008
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_ACK_MASK 0x00004000L
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_ACK__SHIFT 0x0000000e
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_MASK_MASK 0x00002000L
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_MASK__SHIFT 0x0000000d
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_STAT_MASK 0x00001000L
+#define XDMA_INTERRUPT__XDMA_MSTR_UNDERFLOW_STAT__SHIFT 0x0000000c
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_ACK_MASK 0x00040000L
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_ACK__SHIFT 0x00000012
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_MASK_MASK 0x00020000L
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_MASK__SHIFT 0x00000011
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_STAT_MASK 0x00010000L
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_STAT__SHIFT 0x00000010
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_ARRAY_MODE_MASK 0x0000000fL
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_ARRAY_MODE__SHIFT 0x00000000
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_HEIGHT_MASK 0x00000c00L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_HEIGHT__SHIFT 0x0000000a
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_WIDTH_MASK 0x00000300L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_WIDTH__SHIFT 0x00000008
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_MACRO_TILE_ASPECT_MASK 0x00003000L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_MACRO_TILE_ASPECT__SHIFT 0x0000000c
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_NUM_BANKS_MASK 0x00300000L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_NUM_BANKS__SHIFT 0x00000014
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_TILE_SPLIT_MASK 0x00000070L
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_TILE_SPLIT__SHIFT 0x00000004
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_MICRO_TILE_MODE_MASK 0x00c00000L
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_MICRO_TILE_MODE__SHIFT 0x00000016
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_CONFIG_MASK 0xf8000000L
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_CONFIG__SHIFT 0x0000001b
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_INTERLEAVE_SIZE_MASK 0x00000007L
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_INTERLEAVE_SIZE__SHIFT 0x00000000
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_PRIV_MASK 0x00010000L
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_PRIV__SHIFT 0x00000010
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_SWAP_MASK 0x00000300L
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_SWAP__SHIFT 0x00000008
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_VMID_MASK 0x0000f000L
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_VMID__SHIFT 0x0000000c
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_LIGHT_SLEEP_DIS__SHIFT 0x00000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_LIGHT_SLEEP_MODE_FORCE_MASK 0x00010000L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_LIGHT_SLEEP_MODE_FORCE__SHIFT 0x00000010
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_POWER_STATE_MASK 0xc0000000L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_POWER_STATE__SHIFT 0x0000001e
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_SHUTDOWN_DIS_MASK 0x00000100L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_SHUTDOWN_DIS__SHIFT 0x00000008
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_SHUTDOWN_MODE_FORCE_MASK 0x01000000L
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_SHUTDOWN_MODE_FORCE__SHIFT 0x00000018
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_URGENT_LEVEL_MASK 0x00000f00L
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_URGENT_LEVEL__SHIFT 0x00000008
+#define XDMA_MSTR_CNTL__XDMA_MSTR_DEBUG_MODE_MASK 0x00040000L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_DEBUG_MODE__SHIFT 0x00000012
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ENABLE_MASK 0x00010000L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ENABLE__SHIFT 0x00000010
+#define XDMA_MSTR_CNTL__XDMA_MSTR_MEM_READY_MASK 0x00000200L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_MEM_READY__SHIFT 0x00000009
+#define XDMA_MSTR_CNTL__XDMA_MSTR_SOFT_RESET_MASK 0x00100000L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_SOFT_RESET__SHIFT 0x00000014
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_ACTIVE_HEIGHT_MASK 0x00003fffL
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_ACTIVE_HEIGHT__SHIFT 0x00000000
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_FRAME_HEIGHT_MASK 0x3fff0000L
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_FRAME_HEIGHT__SHIFT 0x00000010
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH_MASK 0x000000ffL
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__SHIFT 0x00000000
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_MASK 0xffffffffL
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__SHIFT 0x00000000
+#define XDMA_MSTR_LOCAL_SURFACE_PITCH__XDMA_MSTR_LOCAL_SURFACE_PITCH_MASK 0x00003fffL
+#define XDMA_MSTR_LOCAL_SURFACE_PITCH__XDMA_MSTR_LOCAL_SURFACE_PITCH__SHIFT 0x00000000
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_PRIV_MASK 0x00010000L
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_PRIV__SHIFT 0x00000010
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_SWAP_MASK 0x00000300L
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_SWAP__SHIFT 0x00000008
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_VMID_MASK 0x0000f000L
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_VMID__SHIFT 0x0000000c
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_CLR_MASK 0x00010000L
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_CLR__SHIFT 0x00000010
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_MASK 0x00003000L
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK__SHIFT 0x0000000c
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_TAG_MASK 0x000003ffL
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_TAG__SHIFT 0x00000000
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_CLIENT_STALL_MASK 0x00000001L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_CLIENT_STALL__SHIFT 0x00000000
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_STALL_DELAY_MASK 0x0000f000L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_STALL_DELAY__SHIFT 0x0000000c
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LEVEL_MASK 0x00000f00L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LEVEL__SHIFT 0x00000008
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LIMIT_MASK 0x000000f0L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LIMIT__SHIFT 0x00000004
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_TIMER_MASK 0xffff0000L
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_TIMER__SHIFT 0x00000010
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_CLR_MASK 0x00010000L
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_CLR__SHIFT 0x00000010
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_MASK 0x00003000L
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK__SHIFT 0x0000000c
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_TAG_MASK 0x000003ffL
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_TAG__SHIFT 0x00000000
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_PREFETCH_MASK 0x3fff0000L
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_PREFETCH__SHIFT 0x00000010
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_SIZE_MASK 0x00003fffL
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_SIZE__SHIFT 0x00000000
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH_MASK 0x000000ffL
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__SHIFT 0x00000000
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS__XDMA_MSTR_REMOTE_GPU_ADDRESS_MASK 0xffffffffL
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS__XDMA_MSTR_REMOTE_GPU_ADDRESS__SHIFT 0x00000000
+#define XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH_MASK 0x000000ffL
+#define XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__SHIFT 0x00000000
+#define XDMA_MSTR_REMOTE_SURFACE_BASE__XDMA_MSTR_REMOTE_SURFACE_BASE_MASK 0xffffffffL
+#define XDMA_MSTR_REMOTE_SURFACE_BASE__XDMA_MSTR_REMOTE_SURFACE_BASE__SHIFT 0x00000000
+#define XDMA_MSTR_STATUS__XDMA_MSTR_VCOUNT_CURRENT_MASK 0x00003fffL
+#define XDMA_MSTR_STATUS__XDMA_MSTR_VCOUNT_CURRENT__SHIFT 0x00000000
+#define XDMA_MSTR_STATUS__XDMA_MSTR_WRITE_LINE_CURRENT_MASK 0x3fff0000L
+#define XDMA_MSTR_STATUS__XDMA_MSTR_WRITE_LINE_CURRENT__SHIFT 0x00000010
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_DELAY_MASK 0x00000007L
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_DELAY__SHIFT 0x00000000
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_TIMEOUT_DIS_MASK 0x00000008L
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_TIMEOUT_DIS__SHIFT 0x00000003
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_TIMEOUT_DELAY_MASK 0xffff8000L
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_TIMEOUT_DELAY__SHIFT 0x0000000f
+#define XDMA_SLV_CNTL__XDMA_SLV_ACTIVE_MASK 0x00000100L
+#define XDMA_SLV_CNTL__XDMA_SLV_ACTIVE__SHIFT 0x00000008
+#define XDMA_SLV_CNTL__XDMA_SLV_ENABLE_MASK 0x00010000L
+#define XDMA_SLV_CNTL__XDMA_SLV_ENABLE__SHIFT 0x00000010
+#define XDMA_SLV_CNTL__XDMA_SLV_MEM_READY_MASK 0x00000200L
+#define XDMA_SLV_CNTL__XDMA_SLV_MEM_READY__SHIFT 0x00000009
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LAT_TEST_EN_MASK 0x00080000L
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LAT_TEST_EN__SHIFT 0x00000013
+#define XDMA_SLV_CNTL__XDMA_SLV_SOFT_RESET_MASK 0x00100000L
+#define XDMA_SLV_CNTL__XDMA_SLV_SOFT_RESET__SHIFT 0x00000014
+#define XDMA_SLV_FLIP_PENDING__XDMA_SLV_FLIP_PENDING_MASK 0x00000001L
+#define XDMA_SLV_FLIP_PENDING__XDMA_SLV_FLIP_PENDING__SHIFT 0x00000000
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_PRIV_MASK 0x00010000L
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_PRIV__SHIFT 0x00000010
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_SWAP_MASK 0x00000300L
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_SWAP__SHIFT 0x00000008
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_VMID_MASK 0x0000f000L
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_VMID__SHIFT 0x0000000c
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_CLR_MASK 0x80000000L
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_CLR__SHIFT 0x0000001f
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_MASK 0x00030000L
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK__SHIFT 0x00000010
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_TAG_MASK 0x0000ffffL
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_TAG__SHIFT 0x00000000
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_CLR_MASK 0x00010000L
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_CLR__SHIFT 0x00000010
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_MASK 0x00003000L
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK__SHIFT 0x0000000c
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_TAG_MASK 0x000003ffL
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_TAG__SHIFT 0x00000000
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_ACC_MASK 0x000fffffL
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_ACC__SHIFT 0x00000000
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_COUNT_MASK 0xfff00000L
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_COUNT__SHIFT 0x00000014
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MAX_MASK 0xffff0000L
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MAX__SHIFT 0x00000010
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MIN_MASK 0x0000ffffL
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MIN__SHIFT 0x00000000
+#define XDMA_SLV_READ_LATENCY_TIMER__XDMA_SLV_READ_LATENCY_TIMER_MASK 0x0000ffffL
+#define XDMA_SLV_READ_LATENCY_TIMER__XDMA_SLV_READ_LATENCY_TIMER__SHIFT 0x00000000
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_CLIENT_STALL_MASK 0x00000001L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_CLIENT_STALL__SHIFT 0x00000000
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_STALL_DELAY_MASK 0x0000f000L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_STALL_DELAY__SHIFT 0x0000000c
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LEVEL_MASK 0x00000f00L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LEVEL__SHIFT 0x00000008
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LIMIT_MASK 0x000000f0L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LIMIT__SHIFT 0x00000004
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_TIMER_MASK 0xffff0000L
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_TIMER__SHIFT 0x00000010
+#define XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH_MASK 0x000000ffL
+#define XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__SHIFT 0x00000000
+#define XDMA_SLV_REMOTE_GPU_ADDRESS__XDMA_SLV_REMOTE_GPU_ADDRESS_MASK 0xffffffffL
+#define XDMA_SLV_REMOTE_GPU_ADDRESS__XDMA_SLV_REMOTE_GPU_ADDRESS__SHIFT 0x00000000
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_PITCH_MASK 0x00003fffL
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_PITCH__SHIFT 0x00000000
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_WIDTH_MASK 0x3fff0000L
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_WIDTH__SHIFT 0x00000010
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_PERIOD_MASK 0xffff0000L
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_PERIOD__SHIFT 0x00000010
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_SIZE_MASK 0x000001ffL
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_SIZE__SHIFT 0x00000000
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_DELAY_MASK 0x0000f000L
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_DELAY__SHIFT 0x0000000c
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_MASK 0x00000001L
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL__SHIFT 0x00000000
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_URGENT_LEVEL_MASK 0x00000f00L
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_URGENT_LEVEL__SHIFT 0x00000008
+#define XDMA_TEST_DEBUG_DATA__XDMA_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define XDMA_TEST_DEBUG_DATA__XDMA_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
new file mode 100644
index 000000000000..c75aee25619e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
@@ -0,0 +1,1784 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GFX_6_0_D_H
+#define GFX_6_0_D_H
+
+#define ixCLIPPER_DEBUG_REG00 0x0000
+#define ixCLIPPER_DEBUG_REG01 0x0001
+#define ixCLIPPER_DEBUG_REG02 0x0002
+#define ixCLIPPER_DEBUG_REG03 0x0003
+#define ixCLIPPER_DEBUG_REG04 0x0004
+#define ixCLIPPER_DEBUG_REG05 0x0005
+#define ixCLIPPER_DEBUG_REG06 0x0006
+#define ixCLIPPER_DEBUG_REG07 0x0007
+#define ixCLIPPER_DEBUG_REG08 0x0008
+#define ixCLIPPER_DEBUG_REG09 0x0009
+#define ixCLIPPER_DEBUG_REG10 0x000A
+#define ixCLIPPER_DEBUG_REG11 0x000B
+#define ixCLIPPER_DEBUG_REG12 0x000C
+#define ixCLIPPER_DEBUG_REG13 0x000D
+#define ixCLIPPER_DEBUG_REG14 0x000E
+#define ixCLIPPER_DEBUG_REG15 0x000F
+#define ixCLIPPER_DEBUG_REG16 0x0010
+#define ixCLIPPER_DEBUG_REG17 0x0011
+#define ixCLIPPER_DEBUG_REG18 0x0012
+#define ixCLIPPER_DEBUG_REG19 0x0013
+#define ixGDS_DEBUG_REG0 0x0000
+#define ixGDS_DEBUG_REG1 0x0001
+#define ixGDS_DEBUG_REG2 0x0002
+#define ixGDS_DEBUG_REG3 0x0003
+#define ixGDS_DEBUG_REG4 0x0004
+#define ixGDS_DEBUG_REG5 0x0005
+#define ixGDS_DEBUG_REG6 0x0006
+#define ixIA_DEBUG_REG0 0x0000
+#define ixIA_DEBUG_REG1 0x0001
+#define ixIA_DEBUG_REG2 0x0002
+#define ixIA_DEBUG_REG3 0x0003
+#define ixIA_DEBUG_REG4 0x0004
+#define ixIA_DEBUG_REG5 0x0005
+#define ixIA_DEBUG_REG6 0x0006
+#define ixIA_DEBUG_REG7 0x0007
+#define ixIA_DEBUG_REG8 0x0008
+#define ixIA_DEBUG_REG9 0x0009
+#define ixPA_SC_DEBUG_REG0 0x0000
+#define ixPA_SC_DEBUG_REG1 0x0001
+#define ixSETUP_DEBUG_REG0 0x0018
+#define ixSETUP_DEBUG_REG1 0x0019
+#define ixSETUP_DEBUG_REG2 0x001A
+#define ixSETUP_DEBUG_REG3 0x001B
+#define ixSETUP_DEBUG_REG4 0x001C
+#define ixSETUP_DEBUG_REG5 0x001D
+#define ixSQ_DEBUG_CTRL_LOCAL 0x0009
+#define ixSQ_DEBUG_STS_LOCAL 0x0008
+#define ixSQ_INTERRUPT_WORD_AUTO 0x20C0
+#define ixSQ_INTERRUPT_WORD_CMN 0x20C0
+#define ixSQ_INTERRUPT_WORD_WAVE 0x20C0
+#define ixSQ_WAVE_EXEC_HI 0x027F
+#define ixSQ_WAVE_EXEC_LO 0x027E
+#define ixSQ_WAVE_GPR_ALLOC 0x0015
+#define ixSQ_WAVE_HW_ID 0x0014
+#define ixSQ_WAVE_IB_DBG0 0x001C
+#define ixSQ_WAVE_IB_STS 0x0017
+#define ixSQ_WAVE_INST_DW0 0x001A
+#define ixSQ_WAVE_INST_DW1 0x001B
+#define ixSQ_WAVE_LDS_ALLOC 0x0016
+#define ixSQ_WAVE_M0 0x027C
+#define ixSQ_WAVE_MODE 0x0011
+#define ixSQ_WAVE_PC_HI 0x0019
+#define ixSQ_WAVE_PC_LO 0x0018
+#define ixSQ_WAVE_STATUS 0x0012
+#define ixSQ_WAVE_TBA_HI 0x026D
+#define ixSQ_WAVE_TBA_LO 0x026C
+#define ixSQ_WAVE_TMA_HI 0x026F
+#define ixSQ_WAVE_TMA_LO 0x026E
+#define ixSQ_WAVE_TRAPSTS 0x0013
+#define ixSQ_WAVE_TTMP0 0x0270
+#define ixSQ_WAVE_TTMP10 0x027A
+#define ixSQ_WAVE_TTMP1 0x0271
+#define ixSQ_WAVE_TTMP11 0x027B
+#define ixSQ_WAVE_TTMP2 0x0272
+#define ixSQ_WAVE_TTMP3 0x0273
+#define ixSQ_WAVE_TTMP4 0x0274
+#define ixSQ_WAVE_TTMP5 0x0275
+#define ixSQ_WAVE_TTMP6 0x0276
+#define ixSQ_WAVE_TTMP7 0x0277
+#define ixSQ_WAVE_TTMP8 0x0278
+#define ixSQ_WAVE_TTMP9 0x0279
+#define ixSXIFCCG_DEBUG_REG0 0x0014
+#define ixSXIFCCG_DEBUG_REG1 0x0015
+#define ixSXIFCCG_DEBUG_REG2 0x0016
+#define ixSXIFCCG_DEBUG_REG3 0x0017
+#define ixVGT_DEBUG_REG0 0x0000
+#define ixVGT_DEBUG_REG10 0x000A
+#define ixVGT_DEBUG_REG1 0x0001
+#define ixVGT_DEBUG_REG11 0x000B
+#define ixVGT_DEBUG_REG12 0x000C
+#define ixVGT_DEBUG_REG13 0x000D
+#define ixVGT_DEBUG_REG14 0x000E
+#define ixVGT_DEBUG_REG15 0x000F
+#define ixVGT_DEBUG_REG16 0x0010
+#define ixVGT_DEBUG_REG17 0x0011
+#define ixVGT_DEBUG_REG18 0x0012
+#define ixVGT_DEBUG_REG19 0x0013
+#define ixVGT_DEBUG_REG20 0x0014
+#define ixVGT_DEBUG_REG2 0x0002
+#define ixVGT_DEBUG_REG21 0x0015
+#define ixVGT_DEBUG_REG22 0x0016
+#define ixVGT_DEBUG_REG23 0x0017
+#define ixVGT_DEBUG_REG24 0x0018
+#define ixVGT_DEBUG_REG25 0x0019
+#define ixVGT_DEBUG_REG26 0x001A
+#define ixVGT_DEBUG_REG27 0x001B
+#define ixVGT_DEBUG_REG28 0x001C
+#define ixVGT_DEBUG_REG29 0x001D
+#define ixVGT_DEBUG_REG30 0x001E
+#define ixVGT_DEBUG_REG3 0x0003
+#define ixVGT_DEBUG_REG31 0x001F
+#define ixVGT_DEBUG_REG32 0x0020
+#define ixVGT_DEBUG_REG33 0x0021
+#define ixVGT_DEBUG_REG34 0x0022
+#define ixVGT_DEBUG_REG35 0x0023
+#define ixVGT_DEBUG_REG36 0x0024
+#define ixVGT_DEBUG_REG4 0x0004
+#define ixVGT_DEBUG_REG5 0x0005
+#define ixVGT_DEBUG_REG6 0x0006
+#define ixVGT_DEBUG_REG7 0x0007
+#define ixVGT_DEBUG_REG8 0x0008
+#define ixVGT_DEBUG_REG9 0x0009
+#define mmBCI_DEBUG_READ 0x24E3
+#define mmCB_BLEND0_CONTROL 0xA1E0
+#define mmCB_BLEND1_CONTROL 0xA1E1
+#define mmCB_BLEND2_CONTROL 0xA1E2
+#define mmCB_BLEND3_CONTROL 0xA1E3
+#define mmCB_BLEND4_CONTROL 0xA1E4
+#define mmCB_BLEND5_CONTROL 0xA1E5
+#define mmCB_BLEND6_CONTROL 0xA1E6
+#define mmCB_BLEND7_CONTROL 0xA1E7
+#define mmCB_BLEND_ALPHA 0xA108
+#define mmCB_BLEND_BLUE 0xA107
+#define mmCB_BLEND_GREEN 0xA106
+#define mmCB_BLEND_RED 0xA105
+#define mmCB_CGTT_SCLK_CTRL 0x2698
+#define mmCB_COLOR0_ATTRIB 0xA31D
+#define mmCB_COLOR0_BASE 0xA318
+#define mmCB_COLOR0_CLEAR_WORD0 0xA323
+#define mmCB_COLOR0_CLEAR_WORD1 0xA324
+#define mmCB_COLOR0_CMASK 0xA31F
+#define mmCB_COLOR0_CMASK_SLICE 0xA320
+#define mmCB_COLOR0_FMASK 0xA321
+#define mmCB_COLOR0_FMASK_SLICE 0xA322
+#define mmCB_COLOR0_INFO 0xA31C
+#define mmCB_COLOR0_PITCH 0xA319
+#define mmCB_COLOR0_SLICE 0xA31A
+#define mmCB_COLOR0_VIEW 0xA31B
+#define mmCB_COLOR1_ATTRIB 0xA32C
+#define mmCB_COLOR1_BASE 0xA327
+#define mmCB_COLOR1_CLEAR_WORD0 0xA332
+#define mmCB_COLOR1_CLEAR_WORD1 0xA333
+#define mmCB_COLOR1_CMASK 0xA32E
+#define mmCB_COLOR1_CMASK_SLICE 0xA32F
+#define mmCB_COLOR1_FMASK 0xA330
+#define mmCB_COLOR1_FMASK_SLICE 0xA331
+#define mmCB_COLOR1_INFO 0xA32B
+#define mmCB_COLOR1_PITCH 0xA328
+#define mmCB_COLOR1_SLICE 0xA329
+#define mmCB_COLOR1_VIEW 0xA32A
+#define mmCB_COLOR2_ATTRIB 0xA33B
+#define mmCB_COLOR2_BASE 0xA336
+#define mmCB_COLOR2_CLEAR_WORD0 0xA341
+#define mmCB_COLOR2_CLEAR_WORD1 0xA342
+#define mmCB_COLOR2_CMASK 0xA33D
+#define mmCB_COLOR2_CMASK_SLICE 0xA33E
+#define mmCB_COLOR2_FMASK 0xA33F
+#define mmCB_COLOR2_FMASK_SLICE 0xA340
+#define mmCB_COLOR2_INFO 0xA33A
+#define mmCB_COLOR2_PITCH 0xA337
+#define mmCB_COLOR2_SLICE 0xA338
+#define mmCB_COLOR2_VIEW 0xA339
+#define mmCB_COLOR3_ATTRIB 0xA34A
+#define mmCB_COLOR3_BASE 0xA345
+#define mmCB_COLOR3_CLEAR_WORD0 0xA350
+#define mmCB_COLOR3_CLEAR_WORD1 0xA351
+#define mmCB_COLOR3_CMASK 0xA34C
+#define mmCB_COLOR3_CMASK_SLICE 0xA34D
+#define mmCB_COLOR3_FMASK 0xA34E
+#define mmCB_COLOR3_FMASK_SLICE 0xA34F
+#define mmCB_COLOR3_INFO 0xA349
+#define mmCB_COLOR3_PITCH 0xA346
+#define mmCB_COLOR3_SLICE 0xA347
+#define mmCB_COLOR3_VIEW 0xA348
+#define mmCB_COLOR4_ATTRIB 0xA359
+#define mmCB_COLOR4_BASE 0xA354
+#define mmCB_COLOR4_CLEAR_WORD0 0xA35F
+#define mmCB_COLOR4_CLEAR_WORD1 0xA360
+#define mmCB_COLOR4_CMASK 0xA35B
+#define mmCB_COLOR4_CMASK_SLICE 0xA35C
+#define mmCB_COLOR4_FMASK 0xA35D
+#define mmCB_COLOR4_FMASK_SLICE 0xA35E
+#define mmCB_COLOR4_INFO 0xA358
+#define mmCB_COLOR4_PITCH 0xA355
+#define mmCB_COLOR4_SLICE 0xA356
+#define mmCB_COLOR4_VIEW 0xA357
+#define mmCB_COLOR5_ATTRIB 0xA368
+#define mmCB_COLOR5_BASE 0xA363
+#define mmCB_COLOR5_CLEAR_WORD0 0xA36E
+#define mmCB_COLOR5_CLEAR_WORD1 0xA36F
+#define mmCB_COLOR5_CMASK 0xA36A
+#define mmCB_COLOR5_CMASK_SLICE 0xA36B
+#define mmCB_COLOR5_FMASK 0xA36C
+#define mmCB_COLOR5_FMASK_SLICE 0xA36D
+#define mmCB_COLOR5_INFO 0xA367
+#define mmCB_COLOR5_PITCH 0xA364
+#define mmCB_COLOR5_SLICE 0xA365
+#define mmCB_COLOR5_VIEW 0xA366
+#define mmCB_COLOR6_ATTRIB 0xA377
+#define mmCB_COLOR6_BASE 0xA372
+#define mmCB_COLOR6_CLEAR_WORD0 0xA37D
+#define mmCB_COLOR6_CLEAR_WORD1 0xA37E
+#define mmCB_COLOR6_CMASK 0xA379
+#define mmCB_COLOR6_CMASK_SLICE 0xA37A
+#define mmCB_COLOR6_FMASK 0xA37B
+#define mmCB_COLOR6_FMASK_SLICE 0xA37C
+#define mmCB_COLOR6_INFO 0xA376
+#define mmCB_COLOR6_PITCH 0xA373
+#define mmCB_COLOR6_SLICE 0xA374
+#define mmCB_COLOR6_VIEW 0xA375
+#define mmCB_COLOR7_ATTRIB 0xA386
+#define mmCB_COLOR7_BASE 0xA381
+#define mmCB_COLOR7_CLEAR_WORD0 0xA38C
+#define mmCB_COLOR7_CLEAR_WORD1 0xA38D
+#define mmCB_COLOR7_CMASK 0xA388
+#define mmCB_COLOR7_CMASK_SLICE 0xA389
+#define mmCB_COLOR7_FMASK 0xA38A
+#define mmCB_COLOR7_FMASK_SLICE 0xA38B
+#define mmCB_COLOR7_INFO 0xA385
+#define mmCB_COLOR7_PITCH 0xA382
+#define mmCB_COLOR7_SLICE 0xA383
+#define mmCB_COLOR7_VIEW 0xA384
+#define mmCB_COLOR_CONTROL 0xA202
+#define mmCB_DEBUG_BUS_10 0x26A2
+#define mmCB_DEBUG_BUS_1 0x2699
+#define mmCB_DEBUG_BUS_11 0x26A3
+#define mmCB_DEBUG_BUS_12 0x26A4
+#define mmCB_DEBUG_BUS_13 0x26A5
+#define mmCB_DEBUG_BUS_14 0x26A6
+#define mmCB_DEBUG_BUS_15 0x26A7
+#define mmCB_DEBUG_BUS_16 0x26A8
+#define mmCB_DEBUG_BUS_17 0x26A9
+#define mmCB_DEBUG_BUS_18 0x26AA
+#define mmCB_DEBUG_BUS_2 0x269A
+#define mmCB_DEBUG_BUS_3 0x269B
+#define mmCB_DEBUG_BUS_4 0x269C
+#define mmCB_DEBUG_BUS_5 0x269D
+#define mmCB_DEBUG_BUS_6 0x269E
+#define mmCB_DEBUG_BUS_7 0x269F
+#define mmCB_DEBUG_BUS_8 0x26A0
+#define mmCB_DEBUG_BUS_9 0x26A1
+#define mmCB_HW_CONTROL 0x2684
+#define mmCB_HW_CONTROL_1 0x2685
+#define mmCB_HW_CONTROL_2 0x2686
+#define mmCB_PERFCOUNTER0_HI 0x2691
+#define mmCB_PERFCOUNTER0_LO 0x2690
+#define mmCB_PERFCOUNTER0_SELECT1 0x2689
+#define mmCB_PERFCOUNTER1_HI 0x2693
+#define mmCB_PERFCOUNTER1_LO 0x2692
+#define mmCB_PERFCOUNTER2_HI 0x2695
+#define mmCB_PERFCOUNTER2_LO 0x2694
+#define mmCB_PERFCOUNTER3_HI 0x2697
+#define mmCB_PERFCOUNTER3_LO 0x2696
+#define mmCB_SHADER_MASK 0xA08F
+#define mmCB_TARGET_MASK 0xA08E
+#define mmCC_GC_SHADER_ARRAY_CONFIG 0x226F
+#define mmCC_RB_BACKEND_DISABLE 0x263D
+#define mmCC_RB_DAISY_CHAIN 0x2641
+#define mmCC_RB_REDUNDANCY 0x263C
+#define mmCC_SQC_BANK_DISABLE 0x2307
+#define mmCGTS_RD_CTRL_REG 0x2455
+#define mmCGTS_RD_REG 0x2456
+#define mmCGTS_SM_CTRL_REG 0x2454
+#define mmCGTS_TCC_DISABLE 0x2452
+#define mmCGTS_USER_TCC_DISABLE 0x2453
+#define mmCGTT_BCI_CLK_CTRL 0x24A9
+#define mmCGTT_CP_CLK_CTRL 0x3059
+#define mmCGTT_GDS_CLK_CTRL 0x25DD
+#define mmCGTT_IA_CLK_CTRL 0x2261
+#define mmCGTT_PA_CLK_CTRL 0x2286
+#define mmCGTT_PC_CLK_CTRL 0x24A8
+#define mmCGTT_RLC_CLK_CTRL 0x30E0
+#define mmCGTT_SC_CLK_CTRL 0x22CA
+#define mmCGTT_SPI_CLK_CTRL 0x2451
+#define mmCGTT_SQ_CLK_CTRL 0x2362
+#define mmCGTT_SQG_CLK_CTRL 0x2363
+#define mmCGTT_SX_CLK_CTRL0 0x240C
+#define mmCGTT_SX_CLK_CTRL1 0x240D
+#define mmCGTT_SX_CLK_CTRL2 0x240E
+#define mmCGTT_SX_CLK_CTRL3 0x240F
+#define mmCGTT_SX_CLK_CTRL4 0x2410
+#define mmCGTT_TCI_CLK_CTRL 0x2B60
+#define mmCGTT_TCP_CLK_CTRL 0x2B15
+#define mmCGTT_VGT_CLK_CTRL 0x225F
+#define mmCOHER_DEST_BASE_0 0xA092
+#define mmCOHER_DEST_BASE_1 0xA093
+#define mmCOHER_DEST_BASE_2 0xA07E
+#define mmCOHER_DEST_BASE_3 0xA07F
+#define mmCOMPUTE_DIM_X 0x2E01
+#define mmCOMPUTE_DIM_Y 0x2E02
+#define mmCOMPUTE_DIM_Z 0x2E03
+#define mmCOMPUTE_DISPATCH_INITIATOR 0x2E00
+#define mmCOMPUTE_NUM_THREAD_X 0x2E07
+#define mmCOMPUTE_NUM_THREAD_Y 0x2E08
+#define mmCOMPUTE_NUM_THREAD_Z 0x2E09
+#define mmCOMPUTE_PGM_HI 0x2E0D
+#define mmCOMPUTE_PGM_LO 0x2E0C
+#define mmCOMPUTE_PGM_RSRC1 0x2E12
+#define mmCOMPUTE_PGM_RSRC2 0x2E13
+#define mmCOMPUTE_RESOURCE_LIMITS 0x2E15
+#define mmCOMPUTE_START_X 0x2E04
+#define mmCOMPUTE_START_Y 0x2E05
+#define mmCOMPUTE_START_Z 0x2E06
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE0 0x2E16
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE1 0x2E17
+#define mmCOMPUTE_TBA_HI 0x2E0F
+#define mmCOMPUTE_TBA_LO 0x2E0E
+#define mmCOMPUTE_TMA_HI 0x2E11
+#define mmCOMPUTE_TMA_LO 0x2E10
+#define mmCOMPUTE_TMPRING_SIZE 0x2E18
+#define mmCOMPUTE_USER_DATA_0 0x2E40
+#define mmCOMPUTE_USER_DATA_10 0x2E4A
+#define mmCOMPUTE_USER_DATA_1 0x2E41
+#define mmCOMPUTE_USER_DATA_11 0x2E4B
+#define mmCOMPUTE_USER_DATA_12 0x2E4C
+#define mmCOMPUTE_USER_DATA_13 0x2E4D
+#define mmCOMPUTE_USER_DATA_14 0x2E4E
+#define mmCOMPUTE_USER_DATA_15 0x2E4F
+#define mmCOMPUTE_USER_DATA_2 0x2E42
+#define mmCOMPUTE_USER_DATA_3 0x2E43
+#define mmCOMPUTE_USER_DATA_4 0x2E44
+#define mmCOMPUTE_USER_DATA_5 0x2E45
+#define mmCOMPUTE_USER_DATA_6 0x2E46
+#define mmCOMPUTE_USER_DATA_7 0x2E47
+#define mmCOMPUTE_USER_DATA_8 0x2E48
+#define mmCOMPUTE_USER_DATA_9 0x2E49
+#define mmCOMPUTE_VMID 0x2E14
+#define mmCP_APPEND_ADDR_HI 0x2159
+#define mmCP_APPEND_ADDR_LO 0x2158
+#define mmCP_APPEND_DATA 0x215A
+#define mmCP_APPEND_LAST_CS_FENCE 0x215B
+#define mmCP_APPEND_LAST_PS_FENCE 0x215C
+#define mmCP_ATOMIC_PREOP_HI 0x215E
+#define mmCP_ATOMIC_PREOP_LO 0x215D
+#define mmCP_BUSY_STAT 0x219F
+#define mmCP_CE_HEADER_DUMP 0x21A4
+#define mmCP_CE_IB1_BASE_HI 0x21C7
+#define mmCP_CE_IB1_BASE_LO 0x21C6
+#define mmCP_CE_IB1_BUFSZ 0x21C8
+#define mmCP_CE_IB2_BASE_HI 0x21CA
+#define mmCP_CE_IB2_BASE_LO 0x21C9
+#define mmCP_CE_IB2_BUFSZ 0x21CB
+#define mmCP_CE_INIT_BASE_HI 0x21C4
+#define mmCP_CE_INIT_BASE_LO 0x21C3
+#define mmCP_CE_INIT_BUFSZ 0x21C5
+#define mmCP_CEQ1_AVAIL 0x21E6
+#define mmCP_CEQ2_AVAIL 0x21E7
+#define mmCP_CE_ROQ_IB1_STAT 0x21E9
+#define mmCP_CE_ROQ_IB2_STAT 0x21EA
+#define mmCP_CE_ROQ_RB_STAT 0x21E8
+#define mmCP_CE_UCODE_ADDR 0x305A
+#define mmCP_CE_UCODE_DATA 0x305B
+#define mmCP_CMD_DATA 0x21DF
+#define mmCP_CMD_INDEX 0x21DE
+#define mmCP_CNTX_STAT 0x21B8
+#define mmCP_COHER_BASE 0x217E
+#define mmCP_COHER_CNTL 0x217C
+#define mmCP_COHER_SIZE 0x217D
+#define mmCP_COHER_START_DELAY 0x217B
+#define mmCP_COHER_STATUS 0x217F
+#define mmCP_CSF_CNTL 0x21B5
+#define mmCP_CSF_STAT 0x21B4
+#define mmCP_DMA_CNTL 0x218A
+#define mmCP_DMA_ME_COMMAND 0x2184
+#define mmCP_DMA_ME_DST_ADDR 0x2182
+#define mmCP_DMA_ME_DST_ADDR_HI 0x2183
+#define mmCP_DMA_ME_SRC_ADDR 0x2180
+#define mmCP_DMA_ME_SRC_ADDR_HI 0x2181
+#define mmCP_DMA_PFP_COMMAND 0x2189
+#define mmCP_DMA_PFP_DST_ADDR 0x2187
+#define mmCP_DMA_PFP_DST_ADDR_HI 0x2188
+#define mmCP_DMA_PFP_SRC_ADDR 0x2185
+#define mmCP_DMA_PFP_SRC_ADDR_HI 0x2186
+#define mmCP_DMA_READ_TAGS 0x218B
+#define mmCP_ECC_FIRSTOCCURRENCE 0x307A
+#define mmCP_ECC_FIRSTOCCURRENCE_RING0 0x307B
+#define mmCP_ECC_FIRSTOCCURRENCE_RING1 0x307C
+#define mmCP_ECC_FIRSTOCCURRENCE_RING2 0x307D
+#define mmCP_EOP_DONE_ADDR_HI 0x2101
+#define mmCP_EOP_DONE_ADDR_LO 0x2100
+#define mmCP_EOP_DONE_DATA_HI 0x2103
+#define mmCP_EOP_DONE_DATA_LO 0x2102
+#define mmCP_EOP_LAST_FENCE_HI 0x2105
+#define mmCP_EOP_LAST_FENCE_LO 0x2104
+#define mmCP_GDS_ATOMIC0_PREOP_HI 0x2160
+#define mmCP_GDS_ATOMIC0_PREOP_LO 0x215F
+#define mmCP_GDS_ATOMIC1_PREOP_HI 0x2162
+#define mmCP_GDS_ATOMIC1_PREOP_LO 0x2161
+#define mmCP_GRBM_FREE_COUNT 0x21A3
+#define mmCP_IB1_BASE_HI 0x21CD
+#define mmCP_IB1_BASE_LO 0x21CC
+#define mmCP_IB1_BUFSZ 0x21CE
+#define mmCP_IB1_OFFSET 0x2192
+#define mmCP_IB1_PREAMBLE_BEGIN 0x2194
+#define mmCP_IB1_PREAMBLE_END 0x2195
+#define mmCP_IB2_BASE_HI 0x21D0
+#define mmCP_IB2_BASE_LO 0x21CF
+#define mmCP_IB2_BUFSZ 0x21D1
+#define mmCP_IB2_OFFSET 0x2193
+#define mmCP_IB2_PREAMBLE_BEGIN 0x2196
+#define mmCP_IB2_PREAMBLE_END 0x2197
+#define mmCP_INT_CNTL 0x3049
+#define mmCP_INT_CNTL_RING0 0x306A
+#define mmCP_INT_CNTL_RING1 0x306B
+#define mmCP_INT_CNTL_RING2 0x306C
+#define mmCP_INT_STAT_DEBUG 0x21F7
+#define mmCP_INT_STATUS 0x304A
+#define mmCP_INT_STATUS_RING0 0x306D
+#define mmCP_INT_STATUS_RING1 0x306E
+#define mmCP_INT_STATUS_RING2 0x306F
+#define mmCP_MC_PACK_DELAY_CNT 0x21A7
+#define mmCP_ME_CNTL 0x21B6
+#define mmCP_ME_HEADER_DUMP 0x21A1
+#define mmCP_ME_MC_RADDR_HI 0x216E
+#define mmCP_ME_MC_RADDR_LO 0x216D
+#define mmCP_ME_MC_WADDR_HI 0x216A
+#define mmCP_ME_MC_WADDR_LO 0x2169
+#define mmCP_ME_MC_WDATA_HI 0x216C
+#define mmCP_ME_MC_WDATA_LO 0x216B
+#define mmCP_MEM_SLP_CNTL 0x3079
+#define mmCP_ME_PREEMPTION 0x21B9
+#define mmCP_MEQ_AVAIL 0x21DD
+#define mmCP_MEQ_STAT 0x21E5
+#define mmCP_MEQ_THRESHOLDS 0x21D9
+#define mmCP_ME_RAM_DATA 0x3058
+#define mmCP_ME_RAM_RADDR 0x3056
+#define mmCP_ME_RAM_WADDR 0x3057
+#define mmCP_NUM_PRIM_NEEDED_COUNT0_HI 0x210B
+#define mmCP_NUM_PRIM_NEEDED_COUNT0_LO 0x210A
+#define mmCP_NUM_PRIM_NEEDED_COUNT1_HI 0x210F
+#define mmCP_NUM_PRIM_NEEDED_COUNT1_LO 0x210E
+#define mmCP_NUM_PRIM_NEEDED_COUNT2_HI 0x2113
+#define mmCP_NUM_PRIM_NEEDED_COUNT2_LO 0x2112
+#define mmCP_NUM_PRIM_NEEDED_COUNT3_HI 0x2117
+#define mmCP_NUM_PRIM_NEEDED_COUNT3_LO 0x2116
+#define mmCP_NUM_PRIM_WRITTEN_COUNT0_HI 0x2109
+#define mmCP_NUM_PRIM_WRITTEN_COUNT0_LO 0x2108
+#define mmCP_NUM_PRIM_WRITTEN_COUNT1_HI 0x210D
+#define mmCP_NUM_PRIM_WRITTEN_COUNT1_LO 0x210C
+#define mmCP_NUM_PRIM_WRITTEN_COUNT2_HI 0x2111
+#define mmCP_NUM_PRIM_WRITTEN_COUNT2_LO 0x2110
+#define mmCP_NUM_PRIM_WRITTEN_COUNT3_HI 0x2115
+#define mmCP_NUM_PRIM_WRITTEN_COUNT3_LO 0x2114
+#define mmCP_PA_CINVOC_COUNT_HI 0x2129
+#define mmCP_PA_CINVOC_COUNT_LO 0x2128
+#define mmCP_PA_CPRIM_COUNT_HI 0x212B
+#define mmCP_PA_CPRIM_COUNT_LO 0x212A
+#define mmCP_PERFMON_CNTL 0x21FF
+#define mmCP_PERFMON_CNTX_CNTL 0xA0D8
+#define mmCP_PFP_HEADER_DUMP 0x21A2
+#define mmCP_PFP_IB_CONTROL 0x218D
+#define mmCP_PFP_LOAD_CONTROL 0x218E
+#define mmCP_PFP_UCODE_ADDR 0x3054
+#define mmCP_PFP_UCODE_DATA 0x3055
+#define mmCP_PIPE_STATS_ADDR_HI 0x2119
+#define mmCP_PIPE_STATS_ADDR_LO 0x2118
+#define mmCP_PWR_CNTL 0x3078
+#define mmCP_QUEUE_THRESHOLDS 0x21D8
+#define mmCP_RB0_BASE 0x3040
+#define mmCP_RB0_CNTL 0x3041
+#define mmCP_RB0_RPTR 0x21C0
+#define mmCP_RB0_RPTR_ADDR 0x3043
+#define mmCP_RB0_RPTR_ADDR_HI 0x3044
+#define mmCP_RB0_WPTR 0x3045
+#define mmCP_RB1_BASE 0x3060
+#define mmCP_RB1_CNTL 0x3061
+#define mmCP_RB1_RPTR 0x21BF
+#define mmCP_RB1_RPTR_ADDR 0x3062
+#define mmCP_RB1_RPTR_ADDR_HI 0x3063
+#define mmCP_RB1_WPTR 0x3064
+#define mmCP_RB2_BASE 0x3065
+#define mmCP_RB2_CNTL 0x3066
+#define mmCP_RB2_RPTR 0x21BE
+#define mmCP_RB2_RPTR_ADDR 0x3067
+#define mmCP_RB2_RPTR_ADDR_HI 0x3068
+#define mmCP_RB2_WPTR 0x3069
+#define mmCP_RB_BASE 0x3040
+#define mmCP_RB_CNTL 0x3041
+#define mmCP_RB_OFFSET 0x2191
+#define mmCP_RB_RPTR 0x21C0
+#define mmCP_RB_RPTR_ADDR 0x3043
+#define mmCP_RB_RPTR_ADDR_HI 0x3044
+#define mmCP_RB_RPTR_WR 0x3042
+#define mmCP_RB_VMID 0x3051
+#define mmCP_RB_WPTR 0x3045
+#define mmCP_RB_WPTR_DELAY 0x21C1
+#define mmCP_RB_WPTR_POLL_ADDR_HI 0x3047
+#define mmCP_RB_WPTR_POLL_ADDR_LO 0x3046
+#define mmCP_RB_WPTR_POLL_CNTL 0x21C2
+#define mmCP_RING0_PRIORITY 0x304D
+#define mmCP_RING1_PRIORITY 0x304E
+#define mmCP_RING2_PRIORITY 0x304F
+#define mmCP_RINGID 0xA0D9
+#define mmCP_RING_PRIORITY_CNTS 0x304C
+#define mmCP_ROQ1_THRESHOLDS 0x21D5
+#define mmCP_ROQ2_AVAIL 0x21DC
+#define mmCP_ROQ2_THRESHOLDS 0x21D6
+#define mmCP_ROQ_AVAIL 0x21DA
+#define mmCP_ROQ_IB1_STAT 0x21E1
+#define mmCP_ROQ_IB2_STAT 0x21E2
+#define mmCP_ROQ_RB_STAT 0x21E0
+#define mmCP_SC_PSINVOC_COUNT0_HI 0x212D
+#define mmCP_SC_PSINVOC_COUNT0_LO 0x212C
+#define mmCP_SC_PSINVOC_COUNT1_HI 0x212F
+#define mmCP_SC_PSINVOC_COUNT1_LO 0x212E
+#define mmCP_SCRATCH_DATA 0x2190
+#define mmCP_SCRATCH_INDEX 0x218F
+#define mmCP_SEM_INCOMPLETE_TIMER_CNTL 0x2172
+#define mmCP_SEM_WAIT_TIMER 0x216F
+#define mmCP_SIG_SEM_ADDR_HI 0x2171
+#define mmCP_SIG_SEM_ADDR_LO 0x2170
+#define mmCP_STALLED_STAT1 0x219D
+#define mmCP_STALLED_STAT2 0x219E
+#define mmCP_STALLED_STAT3 0x219C
+#define mmCP_STAT 0x21A0
+#define mmCP_ST_BASE_HI 0x21D3
+#define mmCP_ST_BASE_LO 0x21D2
+#define mmCP_ST_BUFSZ 0x21D4
+#define mmCP_STQ_AVAIL 0x21DB
+#define mmCP_STQ_STAT 0x21E3
+#define mmCP_STQ_THRESHOLDS 0x21D7
+#define mmCP_STREAM_OUT_ADDR_HI 0x2107
+#define mmCP_STREAM_OUT_ADDR_LO 0x2106
+#define mmCP_STRMOUT_CNTL 0x213F
+#define mmCP_VGT_CSINVOC_COUNT_HI 0x2131
+#define mmCP_VGT_CSINVOC_COUNT_LO 0x2130
+#define mmCP_VGT_DSINVOC_COUNT_HI 0x2127
+#define mmCP_VGT_DSINVOC_COUNT_LO 0x2126
+#define mmCP_VGT_GSINVOC_COUNT_HI 0x2123
+#define mmCP_VGT_GSINVOC_COUNT_LO 0x2122
+#define mmCP_VGT_GSPRIM_COUNT_HI 0x211F
+#define mmCP_VGT_GSPRIM_COUNT_LO 0x211E
+#define mmCP_VGT_HSINVOC_COUNT_HI 0x2125
+#define mmCP_VGT_HSINVOC_COUNT_LO 0x2124
+#define mmCP_VGT_IAPRIM_COUNT_HI 0x211D
+#define mmCP_VGT_IAPRIM_COUNT_LO 0x211C
+#define mmCP_VGT_IAVERT_COUNT_HI 0x211B
+#define mmCP_VGT_IAVERT_COUNT_LO 0x211A
+#define mmCP_VGT_VSINVOC_COUNT_HI 0x2121
+#define mmCP_VGT_VSINVOC_COUNT_LO 0x2120
+#define mmCP_VMID 0xA0DA
+#define mmCP_WAIT_REG_MEM_TIMEOUT 0x2174
+#define mmCP_WAIT_SEM_ADDR_HI 0x2176
+#define mmCP_WAIT_SEM_ADDR_LO 0x2175
+#define mmCS_COPY_STATE 0xA1F3
+#define mmDB_ALPHA_TO_MASK 0xA2DC
+#define mmDB_CGTT_CLK_CTRL_0 0x261A
+#define mmDB_COUNT_CONTROL 0xA001
+#define mmDB_CREDIT_LIMIT 0x2614
+#define mmDB_DEBUG 0x260C
+#define mmDB_DEBUG2 0x260D
+#define mmDB_DEBUG3 0x260E
+#define mmDB_DEBUG4 0x260F
+#define mmDB_DEPTH_BOUNDS_MAX 0xA009
+#define mmDB_DEPTH_BOUNDS_MIN 0xA008
+#define mmDB_DEPTH_CLEAR 0xA00B
+#define mmDB_DEPTH_CONTROL 0xA200
+#define mmDB_DEPTH_INFO 0xA00F
+#define mmDB_DEPTH_SIZE 0xA016
+#define mmDB_DEPTH_SLICE 0xA017
+#define mmDB_DEPTH_VIEW 0xA002
+#define mmDB_EQAA 0xA201
+#define mmDB_FIFO_DEPTH1 0x2618
+#define mmDB_FIFO_DEPTH2 0x2619
+#define mmDB_FREE_CACHELINES 0x2617
+#define mmDB_HTILE_DATA_BASE 0xA005
+#define mmDB_HTILE_SURFACE 0xA2AF
+#define mmDB_PERFCOUNTER0_HI 0x2602
+#define mmDB_PERFCOUNTER0_LO 0x2601
+#define mmDB_PERFCOUNTER0_SELECT 0x2600
+#define mmDB_PERFCOUNTER1_HI 0x2605
+#define mmDB_PERFCOUNTER1_LO 0x2604
+#define mmDB_PERFCOUNTER1_SELECT 0x2603
+#define mmDB_PERFCOUNTER2_HI 0x2608
+#define mmDB_PERFCOUNTER2_LO 0x2607
+#define mmDB_PERFCOUNTER2_SELECT 0x2606
+#define mmDB_PERFCOUNTER3_HI 0x260B
+#define mmDB_PERFCOUNTER3_LO 0x260A
+#define mmDB_PERFCOUNTER3_SELECT 0x2609
+#define mmDB_PRELOAD_CONTROL 0xA2B2
+#define mmDB_READ_DEBUG_0 0x2620
+#define mmDB_READ_DEBUG_1 0x2621
+#define mmDB_READ_DEBUG_2 0x2622
+#define mmDB_READ_DEBUG_3 0x2623
+#define mmDB_READ_DEBUG_4 0x2624
+#define mmDB_READ_DEBUG_5 0x2625
+#define mmDB_READ_DEBUG_6 0x2626
+#define mmDB_READ_DEBUG_7 0x2627
+#define mmDB_READ_DEBUG_8 0x2628
+#define mmDB_READ_DEBUG_9 0x2629
+#define mmDB_READ_DEBUG_A 0x262A
+#define mmDB_READ_DEBUG_B 0x262B
+#define mmDB_READ_DEBUG_C 0x262C
+#define mmDB_READ_DEBUG_D 0x262D
+#define mmDB_READ_DEBUG_E 0x262E
+#define mmDB_READ_DEBUG_F 0x262F
+#define mmDB_RENDER_CONTROL 0xA000
+#define mmDB_RENDER_OVERRIDE 0xA003
+#define mmDB_RENDER_OVERRIDE2 0xA004
+#define mmDB_SHADER_CONTROL 0xA203
+#define mmDB_SRESULTS_COMPARE_STATE0 0xA2B0
+#define mmDB_SRESULTS_COMPARE_STATE1 0xA2B1
+#define mmDB_STENCIL_CLEAR 0xA00A
+#define mmDB_STENCIL_CONTROL 0xA10B
+#define mmDB_STENCIL_INFO 0xA011
+#define mmDB_STENCIL_READ_BASE 0xA013
+#define mmDB_STENCILREFMASK 0xA10C
+#define mmDB_STENCILREFMASK_BF 0xA10D
+#define mmDB_STENCIL_WRITE_BASE 0xA015
+#define mmDB_SUBTILE_CONTROL 0x2616
+#define mmDB_WATERMARKS 0x2615
+#define mmDB_Z_INFO 0xA010
+#define mmDB_ZPASS_COUNT_HI 0x261D
+#define mmDB_ZPASS_COUNT_LOW 0x261C
+#define mmDB_Z_READ_BASE 0xA012
+#define mmDB_Z_WRITE_BASE 0xA014
+#define mmDEBUG_DATA 0x203D
+#define mmDEBUG_INDEX 0x203C
+#define mmGB_ADDR_CONFIG 0x263E
+#define mmGB_BACKEND_MAP 0x263F
+#define mmGB_EDC_MODE 0x307E
+#define mmGB_GPU_ID 0x2640
+#define mmGB_TILE_MODE0 0x2644
+#define mmGB_TILE_MODE10 0x264E
+#define mmGB_TILE_MODE1 0x2645
+#define mmGB_TILE_MODE11 0x264F
+#define mmGB_TILE_MODE12 0x2650
+#define mmGB_TILE_MODE13 0x2651
+#define mmGB_TILE_MODE14 0x2652
+#define mmGB_TILE_MODE15 0x2653
+#define mmGB_TILE_MODE16 0x2654
+#define mmGB_TILE_MODE17 0x2655
+#define mmGB_TILE_MODE18 0x2656
+#define mmGB_TILE_MODE19 0x2657
+#define mmGB_TILE_MODE20 0x2658
+#define mmGB_TILE_MODE2 0x2646
+#define mmGB_TILE_MODE21 0x2659
+#define mmGB_TILE_MODE22 0x265A
+#define mmGB_TILE_MODE23 0x265B
+#define mmGB_TILE_MODE24 0x265C
+#define mmGB_TILE_MODE25 0x265D
+#define mmGB_TILE_MODE26 0x265E
+#define mmGB_TILE_MODE27 0x265F
+#define mmGB_TILE_MODE28 0x2660
+#define mmGB_TILE_MODE29 0x2661
+#define mmGB_TILE_MODE30 0x2662
+#define mmGB_TILE_MODE3 0x2647
+#define mmGB_TILE_MODE31 0x2663
+#define mmGB_TILE_MODE4 0x2648
+#define mmGB_TILE_MODE5 0x2649
+#define mmGB_TILE_MODE6 0x264A
+#define mmGB_TILE_MODE7 0x264B
+#define mmGB_TILE_MODE8 0x264C
+#define mmGB_TILE_MODE9 0x264D
+#define mmGC_PRIV_MODE 0x3048
+#define mmGC_USER_RB_BACKEND_DISABLE 0x26DF
+#define mmGC_USER_SHADER_ARRAY_CONFIG 0x2270
+#define mmGDS_ATOM_BASE 0x25CE
+#define mmGDS_ATOM_CNTL 0x25CC
+#define mmGDS_ATOM_COMPLETE 0x25CD
+#define mmGDS_ATOM_DST 0x25D2
+#define mmGDS_ATOM_OFFSET0 0x25D0
+#define mmGDS_ATOM_OFFSET1 0x25D1
+#define mmGDS_ATOM_OP 0x25D3
+#define mmGDS_ATOM_READ0 0x25D8
+#define mmGDS_ATOM_READ0_U 0x25D9
+#define mmGDS_ATOM_READ1 0x25DA
+#define mmGDS_ATOM_READ1_U 0x25DB
+#define mmGDS_ATOM_SIZE 0x25CF
+#define mmGDS_ATOM_SRC0 0x25D4
+#define mmGDS_ATOM_SRC0_U 0x25D5
+#define mmGDS_ATOM_SRC1 0x25D6
+#define mmGDS_ATOM_SRC1_U 0x25D7
+#define mmGDS_CNTL_STATUS 0x25C1
+#define mmGDS_CONFIG 0x25C0
+#define mmGDS_DEBUG_CNTL 0x25DE
+#define mmGDS_DEBUG_DATA 0x25DF
+#define mmGDS_ENHANCE 0x25DC
+#define mmGDS_GRBM_SECDED_CNT 0x25E3
+#define mmGDS_GWS_RESOURCE 0x25E1
+#define mmGDS_GWS_RESOURCE_CNTL 0x25E0
+#define mmGDS_OA_DED 0x25E4
+#define mmGDS_PERFCOUNTER0_HI 0x25E7
+#define mmGDS_PERFCOUNTER0_LO 0x25E6
+#define mmGDS_PERFCOUNTER0_SELECT 0x25E5
+#define mmGDS_PERFCOUNTER1_HI 0x25EA
+#define mmGDS_PERFCOUNTER1_LO 0x25E9
+#define mmGDS_PERFCOUNTER1_SELECT 0x25E8
+#define mmGDS_PERFCOUNTER2_HI 0x25ED
+#define mmGDS_PERFCOUNTER2_LO 0x25EC
+#define mmGDS_PERFCOUNTER2_SELECT 0x25EB
+#define mmGDS_PERFCOUNTER3_HI 0x25F0
+#define mmGDS_PERFCOUNTER3_LO 0x25EF
+#define mmGDS_PERFCOUNTER3_SELECT 0x25EE
+#define mmGDS_RD_ADDR 0x25C2
+#define mmGDS_RD_BURST_ADDR 0x25C4
+#define mmGDS_RD_BURST_COUNT 0x25C5
+#define mmGDS_RD_BURST_DATA 0x25C6
+#define mmGDS_RD_DATA 0x25C3
+#define mmGDS_SECDED_CNT 0x25E2
+#define mmGDS_WR_ADDR 0x25C7
+#define mmGDS_WR_BURST_ADDR 0x25C9
+#define mmGDS_WR_BURST_DATA 0x25CA
+#define mmGDS_WR_DATA 0x25C8
+#define mmGDS_WRITE_COMPLETE 0x25CB
+#define mmGFX_COPY_STATE 0xA1F4
+#define mmGRBM_CAM_DATA 0x3001
+#define mmGRBM_CAM_INDEX 0x3000
+#define mmGRBM_CNTL 0x2000
+#define mmGRBM_DEBUG 0x2014
+#define mmGRBM_DEBUG_CNTL 0x2009
+#define mmGRBM_DEBUG_DATA 0x200A
+#define mmGRBM_DEBUG_SNAPSHOT 0x2015
+#define mmGRBM_GFX_CLKEN_CNTL 0x200C
+#define mmGRBM_GFX_INDEX 0x200B
+#define mmGRBM_INT_CNTL 0x2018
+#define mmGRBM_NOWHERE 0x203F
+#define mmGRBM_PERFCOUNTER0_HI 0x201F
+#define mmGRBM_PERFCOUNTER0_LO 0x201E
+#define mmGRBM_PERFCOUNTER0_SELECT 0x201C
+#define mmGRBM_PERFCOUNTER1_HI 0x2021
+#define mmGRBM_PERFCOUNTER1_LO 0x2020
+#define mmGRBM_PERFCOUNTER1_SELECT 0x201D
+#define mmGRBM_PWR_CNTL 0x2003
+#define mmGRBM_READ_ERROR 0x2016
+#define mmGRBM_SCRATCH_REG0 0x2040
+#define mmGRBM_SCRATCH_REG1 0x2041
+#define mmGRBM_SCRATCH_REG2 0x2042
+#define mmGRBM_SCRATCH_REG3 0x2043
+#define mmGRBM_SCRATCH_REG4 0x2044
+#define mmGRBM_SCRATCH_REG5 0x2045
+#define mmGRBM_SCRATCH_REG6 0x2046
+#define mmGRBM_SCRATCH_REG7 0x2047
+#define mmGRBM_SE0_PERFCOUNTER_HI 0x202B
+#define mmGRBM_SE0_PERFCOUNTER_LO 0x202A
+#define mmGRBM_SE0_PERFCOUNTER_SELECT 0x2026
+#define mmGRBM_SE1_PERFCOUNTER_HI 0x202D
+#define mmGRBM_SE1_PERFCOUNTER_LO 0x202C
+#define mmGRBM_SE1_PERFCOUNTER_SELECT 0x2027
+#define mmGRBM_SKEW_CNTL 0x2001
+#define mmGRBM_SOFT_RESET 0x2008
+#define mmGRBM_STATUS 0x2004
+#define mmGRBM_STATUS2 0x2002
+#define mmGRBM_STATUS_SE0 0x2005
+#define mmGRBM_STATUS_SE1 0x2006
+#define mmGRBM_WAIT_IDLE_CLOCKS 0x200D
+#define mmIA_CNTL_STATUS 0x2237
+#define mmIA_DEBUG_CNTL 0x223A
+#define mmIA_DEBUG_DATA 0x223B
+#define mmIA_ENHANCE 0xA29C
+#define mmIA_MULTI_VGT_PARAM 0xA2AA
+#define mmIA_PERFCOUNTER0_HI 0x2225
+#define mmIA_PERFCOUNTER0_LO 0x2224
+#define mmIA_PERFCOUNTER0_SELECT 0x2220
+#define mmIA_PERFCOUNTER1_HI 0x2227
+#define mmIA_PERFCOUNTER1_LO 0x2226
+#define mmIA_PERFCOUNTER1_SELECT 0x2221
+#define mmIA_PERFCOUNTER2_HI 0x2229
+#define mmIA_PERFCOUNTER2_LO 0x2228
+#define mmIA_PERFCOUNTER2_SELECT 0x2222
+#define mmIA_PERFCOUNTER3_HI 0x222B
+#define mmIA_PERFCOUNTER3_LO 0x222A
+#define mmIA_PERFCOUNTER3_SELECT 0x2223
+#define mmIA_VMID_OVERRIDE 0x2260
+#define mmPA_CL_CLIP_CNTL 0xA204
+#define mmPA_CL_CNTL_STATUS 0x2284
+#define mmPA_CL_ENHANCE 0x2285
+#define mmPA_CL_GB_HORZ_CLIP_ADJ 0xA2FC
+#define mmPA_CL_GB_HORZ_DISC_ADJ 0xA2FD
+#define mmPA_CL_GB_VERT_CLIP_ADJ 0xA2FA
+#define mmPA_CL_GB_VERT_DISC_ADJ 0xA2FB
+#define mmPA_CL_NANINF_CNTL 0xA208
+#define mmPA_CL_POINT_CULL_RAD 0xA1F8
+#define mmPA_CL_POINT_SIZE 0xA1F7
+#define mmPA_CL_POINT_X_RAD 0xA1F5
+#define mmPA_CL_POINT_Y_RAD 0xA1F6
+#define mmPA_CL_UCP_0_W 0xA172
+#define mmPA_CL_UCP_0_X 0xA16F
+#define mmPA_CL_UCP_0_Y 0xA170
+#define mmPA_CL_UCP_0_Z 0xA171
+#define mmPA_CL_UCP_1_W 0xA176
+#define mmPA_CL_UCP_1_X 0xA173
+#define mmPA_CL_UCP_1_Y 0xA174
+#define mmPA_CL_UCP_1_Z 0xA175
+#define mmPA_CL_UCP_2_W 0xA17A
+#define mmPA_CL_UCP_2_X 0xA177
+#define mmPA_CL_UCP_2_Y 0xA178
+#define mmPA_CL_UCP_2_Z 0xA179
+#define mmPA_CL_UCP_3_W 0xA17E
+#define mmPA_CL_UCP_3_X 0xA17B
+#define mmPA_CL_UCP_3_Y 0xA17C
+#define mmPA_CL_UCP_3_Z 0xA17D
+#define mmPA_CL_UCP_4_W 0xA182
+#define mmPA_CL_UCP_4_X 0xA17F
+#define mmPA_CL_UCP_4_Y 0xA180
+#define mmPA_CL_UCP_4_Z 0xA181
+#define mmPA_CL_UCP_5_W 0xA186
+#define mmPA_CL_UCP_5_X 0xA183
+#define mmPA_CL_UCP_5_Y 0xA184
+#define mmPA_CL_UCP_5_Z 0xA185
+#define mmPA_CL_VPORT_XOFFSET 0xA110
+#define mmPA_CL_VPORT_XOFFSET_10 0xA14C
+#define mmPA_CL_VPORT_XOFFSET_1 0xA116
+#define mmPA_CL_VPORT_XOFFSET_11 0xA152
+#define mmPA_CL_VPORT_XOFFSET_12 0xA158
+#define mmPA_CL_VPORT_XOFFSET_13 0xA15E
+#define mmPA_CL_VPORT_XOFFSET_14 0xA164
+#define mmPA_CL_VPORT_XOFFSET_15 0xA16A
+#define mmPA_CL_VPORT_XOFFSET_2 0xA11C
+#define mmPA_CL_VPORT_XOFFSET_3 0xA122
+#define mmPA_CL_VPORT_XOFFSET_4 0xA128
+#define mmPA_CL_VPORT_XOFFSET_5 0xA12E
+#define mmPA_CL_VPORT_XOFFSET_6 0xA134
+#define mmPA_CL_VPORT_XOFFSET_7 0xA13A
+#define mmPA_CL_VPORT_XOFFSET_8 0xA140
+#define mmPA_CL_VPORT_XOFFSET_9 0xA146
+#define mmPA_CL_VPORT_XSCALE 0xA10F
+#define mmPA_CL_VPORT_XSCALE_10 0xA14B
+#define mmPA_CL_VPORT_XSCALE_1 0xA115
+#define mmPA_CL_VPORT_XSCALE_11 0xA151
+#define mmPA_CL_VPORT_XSCALE_12 0xA157
+#define mmPA_CL_VPORT_XSCALE_13 0xA15D
+#define mmPA_CL_VPORT_XSCALE_14 0xA163
+#define mmPA_CL_VPORT_XSCALE_15 0xA169
+#define mmPA_CL_VPORT_XSCALE_2 0xA11B
+#define mmPA_CL_VPORT_XSCALE_3 0xA121
+#define mmPA_CL_VPORT_XSCALE_4 0xA127
+#define mmPA_CL_VPORT_XSCALE_5 0xA12D
+#define mmPA_CL_VPORT_XSCALE_6 0xA133
+#define mmPA_CL_VPORT_XSCALE_7 0xA139
+#define mmPA_CL_VPORT_XSCALE_8 0xA13F
+#define mmPA_CL_VPORT_XSCALE_9 0xA145
+#define mmPA_CL_VPORT_YOFFSET 0xA112
+#define mmPA_CL_VPORT_YOFFSET_10 0xA14E
+#define mmPA_CL_VPORT_YOFFSET_1 0xA118
+#define mmPA_CL_VPORT_YOFFSET_11 0xA154
+#define mmPA_CL_VPORT_YOFFSET_12 0xA15A
+#define mmPA_CL_VPORT_YOFFSET_13 0xA160
+#define mmPA_CL_VPORT_YOFFSET_14 0xA166
+#define mmPA_CL_VPORT_YOFFSET_15 0xA16C
+#define mmPA_CL_VPORT_YOFFSET_2 0xA11E
+#define mmPA_CL_VPORT_YOFFSET_3 0xA124
+#define mmPA_CL_VPORT_YOFFSET_4 0xA12A
+#define mmPA_CL_VPORT_YOFFSET_5 0xA130
+#define mmPA_CL_VPORT_YOFFSET_6 0xA136
+#define mmPA_CL_VPORT_YOFFSET_7 0xA13C
+#define mmPA_CL_VPORT_YOFFSET_8 0xA142
+#define mmPA_CL_VPORT_YOFFSET_9 0xA148
+#define mmPA_CL_VPORT_YSCALE 0xA111
+#define mmPA_CL_VPORT_YSCALE_10 0xA14D
+#define mmPA_CL_VPORT_YSCALE_1 0xA117
+#define mmPA_CL_VPORT_YSCALE_11 0xA153
+#define mmPA_CL_VPORT_YSCALE_12 0xA159
+#define mmPA_CL_VPORT_YSCALE_13 0xA15F
+#define mmPA_CL_VPORT_YSCALE_14 0xA165
+#define mmPA_CL_VPORT_YSCALE_15 0xA16B
+#define mmPA_CL_VPORT_YSCALE_2 0xA11D
+#define mmPA_CL_VPORT_YSCALE_3 0xA123
+#define mmPA_CL_VPORT_YSCALE_4 0xA129
+#define mmPA_CL_VPORT_YSCALE_5 0xA12F
+#define mmPA_CL_VPORT_YSCALE_6 0xA135
+#define mmPA_CL_VPORT_YSCALE_7 0xA13B
+#define mmPA_CL_VPORT_YSCALE_8 0xA141
+#define mmPA_CL_VPORT_YSCALE_9 0xA147
+#define mmPA_CL_VPORT_ZOFFSET 0xA114
+#define mmPA_CL_VPORT_ZOFFSET_10 0xA150
+#define mmPA_CL_VPORT_ZOFFSET_1 0xA11A
+#define mmPA_CL_VPORT_ZOFFSET_11 0xA156
+#define mmPA_CL_VPORT_ZOFFSET_12 0xA15C
+#define mmPA_CL_VPORT_ZOFFSET_13 0xA162
+#define mmPA_CL_VPORT_ZOFFSET_14 0xA168
+#define mmPA_CL_VPORT_ZOFFSET_15 0xA16E
+#define mmPA_CL_VPORT_ZOFFSET_2 0xA120
+#define mmPA_CL_VPORT_ZOFFSET_3 0xA126
+#define mmPA_CL_VPORT_ZOFFSET_4 0xA12C
+#define mmPA_CL_VPORT_ZOFFSET_5 0xA132
+#define mmPA_CL_VPORT_ZOFFSET_6 0xA138
+#define mmPA_CL_VPORT_ZOFFSET_7 0xA13E
+#define mmPA_CL_VPORT_ZOFFSET_8 0xA144
+#define mmPA_CL_VPORT_ZOFFSET_9 0xA14A
+#define mmPA_CL_VPORT_ZSCALE 0xA113
+#define mmPA_CL_VPORT_ZSCALE_10 0xA14F
+#define mmPA_CL_VPORT_ZSCALE_1 0xA119
+#define mmPA_CL_VPORT_ZSCALE_11 0xA155
+#define mmPA_CL_VPORT_ZSCALE_12 0xA15B
+#define mmPA_CL_VPORT_ZSCALE_13 0xA161
+#define mmPA_CL_VPORT_ZSCALE_14 0xA167
+#define mmPA_CL_VPORT_ZSCALE_15 0xA16D
+#define mmPA_CL_VPORT_ZSCALE_2 0xA11F
+#define mmPA_CL_VPORT_ZSCALE_3 0xA125
+#define mmPA_CL_VPORT_ZSCALE_4 0xA12B
+#define mmPA_CL_VPORT_ZSCALE_5 0xA131
+#define mmPA_CL_VPORT_ZSCALE_6 0xA137
+#define mmPA_CL_VPORT_ZSCALE_7 0xA13D
+#define mmPA_CL_VPORT_ZSCALE_8 0xA143
+#define mmPA_CL_VPORT_ZSCALE_9 0xA149
+#define mmPA_CL_VS_OUT_CNTL 0xA207
+#define mmPA_CL_VTE_CNTL 0xA206
+#define mmPA_SC_AA_CONFIG 0xA2F8
+#define mmPA_SC_AA_MASK_X0Y0_X1Y0 0xA30E
+#define mmPA_SC_AA_MASK_X0Y1_X1Y1 0xA30F
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 0xA2FE
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 0xA2FF
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 0xA300
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 0xA301
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 0xA306
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 0xA307
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 0xA308
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 0xA309
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 0xA302
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 0xA303
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 0xA304
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 0xA305
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 0xA30A
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 0xA30B
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 0xA30C
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 0xA30D
+#define mmPA_SC_CENTROID_PRIORITY_0 0xA2F5
+#define mmPA_SC_CENTROID_PRIORITY_1 0xA2F6
+#define mmPA_SC_CLIPRECT_0_BR 0xA085
+#define mmPA_SC_CLIPRECT_0_TL 0xA084
+#define mmPA_SC_CLIPRECT_1_BR 0xA087
+#define mmPA_SC_CLIPRECT_1_TL 0xA086
+#define mmPA_SC_CLIPRECT_2_BR 0xA089
+#define mmPA_SC_CLIPRECT_2_TL 0xA088
+#define mmPA_SC_CLIPRECT_3_BR 0xA08B
+#define mmPA_SC_CLIPRECT_3_TL 0xA08A
+#define mmPA_SC_CLIPRECT_RULE 0xA083
+#define mmPA_SC_DEBUG_CNTL 0x22F6
+#define mmPA_SC_DEBUG_DATA 0x22F7
+#define mmPA_SC_EDGERULE 0xA08C
+#define mmPA_SC_ENHANCE 0x22FC
+#define mmPA_SC_FIFO_DEPTH_CNTL 0x2295
+#define mmPA_SC_FIFO_SIZE 0x22F3
+#define mmPA_SC_FORCE_EOV_MAX_CNTS 0x22C9
+#define mmPA_SC_GENERIC_SCISSOR_BR 0xA091
+#define mmPA_SC_GENERIC_SCISSOR_TL 0xA090
+#define mmPA_SC_IF_FIFO_SIZE 0x22F5
+#define mmPA_SC_LINE_CNTL 0xA2F7
+#define mmPA_SC_LINE_STIPPLE 0xA283
+#define mmPA_SC_LINE_STIPPLE_STATE 0x22C4
+#define mmPA_SC_MODE_CNTL_0 0xA292
+#define mmPA_SC_MODE_CNTL_1 0xA293
+#define mmPA_SC_PERFCOUNTER0_HI 0x22A9
+#define mmPA_SC_PERFCOUNTER0_LO 0x22A8
+#define mmPA_SC_PERFCOUNTER0_SELECT 0x22A0
+#define mmPA_SC_PERFCOUNTER1_HI 0x22AB
+#define mmPA_SC_PERFCOUNTER1_LO 0x22AA
+#define mmPA_SC_PERFCOUNTER1_SELECT 0x22A1
+#define mmPA_SC_PERFCOUNTER2_HI 0x22AD
+#define mmPA_SC_PERFCOUNTER2_LO 0x22AC
+#define mmPA_SC_PERFCOUNTER2_SELECT 0x22A2
+#define mmPA_SC_PERFCOUNTER3_HI 0x22AF
+#define mmPA_SC_PERFCOUNTER3_LO 0x22AE
+#define mmPA_SC_PERFCOUNTER3_SELECT 0x22A3
+#define mmPA_SC_PERFCOUNTER4_HI 0x22B1
+#define mmPA_SC_PERFCOUNTER4_LO 0x22B0
+#define mmPA_SC_PERFCOUNTER4_SELECT 0x22A4
+#define mmPA_SC_PERFCOUNTER5_HI 0x22B3
+#define mmPA_SC_PERFCOUNTER5_LO 0x22B2
+#define mmPA_SC_PERFCOUNTER5_SELECT 0x22A5
+#define mmPA_SC_PERFCOUNTER6_HI 0x22B5
+#define mmPA_SC_PERFCOUNTER6_LO 0x22B4
+#define mmPA_SC_PERFCOUNTER6_SELECT 0x22A6
+#define mmPA_SC_PERFCOUNTER7_HI 0x22B7
+#define mmPA_SC_PERFCOUNTER7_LO 0x22B6
+#define mmPA_SC_PERFCOUNTER7_SELECT 0x22A7
+#define mmPA_SC_RASTER_CONFIG 0xA0D4
+#define mmPA_SC_SCREEN_SCISSOR_BR 0xA00D
+#define mmPA_SC_SCREEN_SCISSOR_TL 0xA00C
+#define mmPA_SC_VPORT_SCISSOR_0_BR 0xA095
+#define mmPA_SC_VPORT_SCISSOR_0_TL 0xA094
+#define mmPA_SC_VPORT_SCISSOR_10_BR 0xA0A9
+#define mmPA_SC_VPORT_SCISSOR_10_TL 0xA0A8
+#define mmPA_SC_VPORT_SCISSOR_11_BR 0xA0AB
+#define mmPA_SC_VPORT_SCISSOR_11_TL 0xA0AA
+#define mmPA_SC_VPORT_SCISSOR_12_BR 0xA0AD
+#define mmPA_SC_VPORT_SCISSOR_12_TL 0xA0AC
+#define mmPA_SC_VPORT_SCISSOR_13_BR 0xA0AF
+#define mmPA_SC_VPORT_SCISSOR_13_TL 0xA0AE
+#define mmPA_SC_VPORT_SCISSOR_14_BR 0xA0B1
+#define mmPA_SC_VPORT_SCISSOR_14_TL 0xA0B0
+#define mmPA_SC_VPORT_SCISSOR_15_BR 0xA0B3
+#define mmPA_SC_VPORT_SCISSOR_15_TL 0xA0B2
+#define mmPA_SC_VPORT_SCISSOR_1_BR 0xA097
+#define mmPA_SC_VPORT_SCISSOR_1_TL 0xA096
+#define mmPA_SC_VPORT_SCISSOR_2_BR 0xA099
+#define mmPA_SC_VPORT_SCISSOR_2_TL 0xA098
+#define mmPA_SC_VPORT_SCISSOR_3_BR 0xA09B
+#define mmPA_SC_VPORT_SCISSOR_3_TL 0xA09A
+#define mmPA_SC_VPORT_SCISSOR_4_BR 0xA09D
+#define mmPA_SC_VPORT_SCISSOR_4_TL 0xA09C
+#define mmPA_SC_VPORT_SCISSOR_5_BR 0xA09F
+#define mmPA_SC_VPORT_SCISSOR_5_TL 0xA09E
+#define mmPA_SC_VPORT_SCISSOR_6_BR 0xA0A1
+#define mmPA_SC_VPORT_SCISSOR_6_TL 0xA0A0
+#define mmPA_SC_VPORT_SCISSOR_7_BR 0xA0A3
+#define mmPA_SC_VPORT_SCISSOR_7_TL 0xA0A2
+#define mmPA_SC_VPORT_SCISSOR_8_BR 0xA0A5
+#define mmPA_SC_VPORT_SCISSOR_8_TL 0xA0A4
+#define mmPA_SC_VPORT_SCISSOR_9_BR 0xA0A7
+#define mmPA_SC_VPORT_SCISSOR_9_TL 0xA0A6
+#define mmPA_SC_VPORT_ZMAX_0 0xA0B5
+#define mmPA_SC_VPORT_ZMAX_10 0xA0C9
+#define mmPA_SC_VPORT_ZMAX_1 0xA0B7
+#define mmPA_SC_VPORT_ZMAX_11 0xA0CB
+#define mmPA_SC_VPORT_ZMAX_12 0xA0CD
+#define mmPA_SC_VPORT_ZMAX_13 0xA0CF
+#define mmPA_SC_VPORT_ZMAX_14 0xA0D1
+#define mmPA_SC_VPORT_ZMAX_15 0xA0D3
+#define mmPA_SC_VPORT_ZMAX_2 0xA0B9
+#define mmPA_SC_VPORT_ZMAX_3 0xA0BB
+#define mmPA_SC_VPORT_ZMAX_4 0xA0BD
+#define mmPA_SC_VPORT_ZMAX_5 0xA0BF
+#define mmPA_SC_VPORT_ZMAX_6 0xA0C1
+#define mmPA_SC_VPORT_ZMAX_7 0xA0C3
+#define mmPA_SC_VPORT_ZMAX_8 0xA0C5
+#define mmPA_SC_VPORT_ZMAX_9 0xA0C7
+#define mmPA_SC_VPORT_ZMIN_0 0xA0B4
+#define mmPA_SC_VPORT_ZMIN_10 0xA0C8
+#define mmPA_SC_VPORT_ZMIN_1 0xA0B6
+#define mmPA_SC_VPORT_ZMIN_11 0xA0CA
+#define mmPA_SC_VPORT_ZMIN_12 0xA0CC
+#define mmPA_SC_VPORT_ZMIN_13 0xA0CE
+#define mmPA_SC_VPORT_ZMIN_14 0xA0D0
+#define mmPA_SC_VPORT_ZMIN_15 0xA0D2
+#define mmPA_SC_VPORT_ZMIN_2 0xA0B8
+#define mmPA_SC_VPORT_ZMIN_3 0xA0BA
+#define mmPA_SC_VPORT_ZMIN_4 0xA0BC
+#define mmPA_SC_VPORT_ZMIN_5 0xA0BE
+#define mmPA_SC_VPORT_ZMIN_6 0xA0C0
+#define mmPA_SC_VPORT_ZMIN_7 0xA0C2
+#define mmPA_SC_VPORT_ZMIN_8 0xA0C4
+#define mmPA_SC_VPORT_ZMIN_9 0xA0C6
+#define mmPA_SC_WINDOW_OFFSET 0xA080
+#define mmPA_SC_WINDOW_SCISSOR_BR 0xA082
+#define mmPA_SC_WINDOW_SCISSOR_TL 0xA081
+#define mmPA_SU_CNTL_STATUS 0x2294
+#define mmPA_SU_DEBUG_CNTL 0x2280
+#define mmPA_SU_DEBUG_DATA 0x2281
+#define mmPA_SU_HARDWARE_SCREEN_OFFSET 0xA08D
+#define mmPA_SU_LINE_CNTL 0xA282
+#define mmPA_SU_LINE_STIPPLE_CNTL 0xA209
+#define mmPA_SU_LINE_STIPPLE_SCALE 0xA20A
+#define mmPA_SU_LINE_STIPPLE_VALUE 0x2298
+#define mmPA_SU_PERFCOUNTER0_HI 0x228D
+#define mmPA_SU_PERFCOUNTER0_LO 0x228C
+#define mmPA_SU_PERFCOUNTER0_SELECT 0x2288
+#define mmPA_SU_PERFCOUNTER1_HI 0x228F
+#define mmPA_SU_PERFCOUNTER1_LO 0x228E
+#define mmPA_SU_PERFCOUNTER1_SELECT 0x2289
+#define mmPA_SU_PERFCOUNTER2_HI 0x2291
+#define mmPA_SU_PERFCOUNTER2_LO 0x2290
+#define mmPA_SU_PERFCOUNTER2_SELECT 0x228A
+#define mmPA_SU_PERFCOUNTER3_HI 0x2293
+#define mmPA_SU_PERFCOUNTER3_LO 0x2292
+#define mmPA_SU_PERFCOUNTER3_SELECT 0x228B
+#define mmPA_SU_POINT_MINMAX 0xA281
+#define mmPA_SU_POINT_SIZE 0xA280
+#define mmPA_SU_POLY_OFFSET_BACK_OFFSET 0xA2E3
+#define mmPA_SU_POLY_OFFSET_BACK_SCALE 0xA2E2
+#define mmPA_SU_POLY_OFFSET_CLAMP 0xA2DF
+#define mmPA_SU_POLY_OFFSET_DB_FMT_CNTL 0xA2DE
+#define mmPA_SU_POLY_OFFSET_FRONT_OFFSET 0xA2E1
+#define mmPA_SU_POLY_OFFSET_FRONT_SCALE 0xA2E0
+#define mmPA_SU_PRIM_FILTER_CNTL 0xA20B
+#define mmPA_SU_SC_MODE_CNTL 0xA205
+#define mmPA_SU_VTX_CNTL 0xA2F9
+#define mmRAS_BCI_SIGNATURE0 0x339E
+#define mmRAS_BCI_SIGNATURE1 0x339F
+#define mmRAS_CB_SIGNATURE0 0x339D
+#define mmRAS_DB_SIGNATURE0 0x338B
+#define mmRAS_IA_SIGNATURE0 0x3397
+#define mmRAS_IA_SIGNATURE1 0x3398
+#define mmRAS_PA_SIGNATURE0 0x338C
+#define mmRAS_SC_SIGNATURE0 0x338F
+#define mmRAS_SC_SIGNATURE1 0x3390
+#define mmRAS_SC_SIGNATURE2 0x3391
+#define mmRAS_SC_SIGNATURE3 0x3392
+#define mmRAS_SC_SIGNATURE4 0x3393
+#define mmRAS_SC_SIGNATURE5 0x3394
+#define mmRAS_SC_SIGNATURE6 0x3395
+#define mmRAS_SC_SIGNATURE7 0x3396
+#define mmRAS_SIGNATURE_CONTROL 0x3380
+#define mmRAS_SIGNATURE_MASK 0x3381
+#define mmRAS_SPI_SIGNATURE0 0x3399
+#define mmRAS_SPI_SIGNATURE1 0x339A
+#define mmRAS_SQ_SIGNATURE0 0x338E
+#define mmRAS_SX_SIGNATURE0 0x3382
+#define mmRAS_SX_SIGNATURE1 0x3383
+#define mmRAS_SX_SIGNATURE2 0x3384
+#define mmRAS_SX_SIGNATURE3 0x3385
+#define mmRAS_TA_SIGNATURE0 0x339B
+#define mmRAS_TD_SIGNATURE0 0x339C
+#define mmRAS_VGT_SIGNATURE0 0x338D
+#define mmRLC_AUTO_PG_CTRL 0x310D
+#define mmRLC_CAPTURE_GPU_CLOCK_COUNT 0x30D0
+#define mmRLC_CGCG_CGLS_CTRL 0x3101
+#define mmRLC_CGCG_RAMP_CTRL 0x3102
+#define mmRLC_CGTT_MGCG_OVERRIDE 0x3100
+#define mmRLC_CNTL 0x30C0
+#define mmRLC_CU_STATUS 0x3106
+#define mmRLC_DEBUG 0x30CA
+#define mmRLC_DEBUG_SELECT 0x30C9
+#define mmRLC_DRIVER_CPDMA_STATUS 0x30C7
+#define mmRLC_DYN_PG_REQUEST 0x3104
+#define mmRLC_DYN_PG_STATUS 0x3103
+#define mmRLC_GPU_CLOCK_32 0x30D5
+#define mmRLC_GPU_CLOCK_32_RES_SEL 0x30D4
+#define mmRLC_GPU_CLOCK_COUNT_LSB 0x30CE
+#define mmRLC_GPU_CLOCK_COUNT_MSB 0x30CF
+#define mmRLC_LB_ALWAYS_ACTIVE_CU_MASK 0x3108
+#define mmRLC_LB_CNTL 0x30C3
+#define mmRLC_LB_CNTR_INIT 0x30C6
+#define mmRLC_LB_CNTR_MAX 0x30C5
+#define mmRLC_LB_INIT_CU_MASK 0x3107
+#define mmRLC_LB_PARAMS 0x3109
+#define mmRLC_LOAD_BALANCE_CNTR 0x30F6
+#define mmRLC_MAX_PG_CU 0x310C
+#define mmRLC_MC_CNTL 0x30D1
+#define mmRLC_MEM_SLP_CNTL 0x30D8
+#define mmRLC_PERFCOUNTER0_HI 0x30DC
+#define mmRLC_PERFCOUNTER0_LO 0x30DB
+#define mmRLC_PERFCOUNTER0_SELECT 0x30DA
+#define mmRLC_PERFCOUNTER1_HI 0x30DF
+#define mmRLC_PERFCOUNTER1_LO 0x30DE
+#define mmRLC_PERFCOUNTER1_SELECT 0x30DD
+#define mmRLC_PERFMON_CNTL 0x30D9
+#define mmRLC_PG_ALWAYS_ON_CU_MASK 0x310B
+#define mmRLC_PG_CNTL 0x30D7
+#define mmRLC_SAVE_AND_RESTORE_BASE 0x30C4
+#define mmRLC_SERDES_RD_DATA_0 0x3112
+#define mmRLC_SERDES_RD_DATA_1 0x3113
+#define mmRLC_SERDES_RD_DATA_2 0x3114
+#define mmRLC_SERDES_RD_MASTER_INDEX 0x3111
+#define mmRLC_SERDES_WR_CTRL 0x3117
+#define mmRLC_SERDES_WR_DATA 0x3118
+#define mmRLC_SMU_GRBM_REG_SAVE_CTRL 0x310E
+#define mmRLC_SMU_PG_CTRL 0x310F
+#define mmRLC_SMU_PG_WAKE_UP_CTRL 0x3110
+#define mmRLC_SOFT_RESET_GPU 0x30D6
+#define mmRLC_STAT 0x30D3
+#define mmRLC_THREAD1_DELAY 0x310A
+#define mmRLC_UCODE_CNTL 0x30D2
+#define mmSCRATCH_ADDR 0x2151
+#define mmSCRATCH_REG0 0x2140
+#define mmSCRATCH_REG1 0x2141
+#define mmSCRATCH_REG2 0x2142
+#define mmSCRATCH_REG3 0x2143
+#define mmSCRATCH_REG4 0x2144
+#define mmSCRATCH_REG5 0x2145
+#define mmSCRATCH_REG6 0x2146
+#define mmSCRATCH_REG7 0x2147
+#define mmSCRATCH_UMSK 0x2150
+#define mmSPI_ARB_CYCLES_0 0x243D
+#define mmSPI_ARB_CYCLES_1 0x243E
+#define mmSPI_ARB_PRIORITY 0x243C
+#define mmSPI_BARYC_CNTL 0xA1B8
+#define mmSPI_CONFIG_CNTL 0x2440
+#define mmSPI_CONFIG_CNTL_1 0x244F
+#define mmSPI_DEBUG_BUSY 0x2450
+#define mmSPI_DEBUG_CNTL 0x2441
+#define mmSPI_DEBUG_READ 0x2442
+#define mmSPI_GDS_CREDITS 0x24D8
+#define mmSPI_INTERP_CONTROL_0 0xA1B5
+#define mmSPI_LB_CTR_CTRL 0x24D4
+#define mmSPI_LB_CU_MASK 0x24D5
+#define mmSPI_LB_DATA_REG 0x24D6
+#define mmSPI_PERFCOUNTER0_HI 0x2447
+#define mmSPI_PERFCOUNTER0_LO 0x2448
+#define mmSPI_PERFCOUNTER0_SELECT 0x2443
+#define mmSPI_PERFCOUNTER1_HI 0x2449
+#define mmSPI_PERFCOUNTER1_LO 0x244A
+#define mmSPI_PERFCOUNTER1_SELECT 0x2444
+#define mmSPI_PERFCOUNTER2_HI 0x244B
+#define mmSPI_PERFCOUNTER2_LO 0x244C
+#define mmSPI_PERFCOUNTER2_SELECT 0x2445
+#define mmSPI_PERFCOUNTER3_HI 0x244D
+#define mmSPI_PERFCOUNTER3_LO 0x244E
+#define mmSPI_PERFCOUNTER3_SELECT 0x2446
+#define mmSPI_PERFCOUNTER_BINS 0x243F
+#define mmSPI_PG_ENABLE_STATIC_CU_MASK 0x24D7
+#define mmSPI_PS_IN_CONTROL 0xA1B6
+#define mmSPI_PS_INPUT_ADDR 0xA1B4
+#define mmSPI_PS_INPUT_CNTL_0 0xA191
+#define mmSPI_PS_INPUT_CNTL_10 0xA19B
+#define mmSPI_PS_INPUT_CNTL_1 0xA192
+#define mmSPI_PS_INPUT_CNTL_11 0xA19C
+#define mmSPI_PS_INPUT_CNTL_12 0xA19D
+#define mmSPI_PS_INPUT_CNTL_13 0xA19E
+#define mmSPI_PS_INPUT_CNTL_14 0xA19F
+#define mmSPI_PS_INPUT_CNTL_15 0xA1A0
+#define mmSPI_PS_INPUT_CNTL_16 0xA1A1
+#define mmSPI_PS_INPUT_CNTL_17 0xA1A2
+#define mmSPI_PS_INPUT_CNTL_18 0xA1A3
+#define mmSPI_PS_INPUT_CNTL_19 0xA1A4
+#define mmSPI_PS_INPUT_CNTL_20 0xA1A5
+#define mmSPI_PS_INPUT_CNTL_2 0xA193
+#define mmSPI_PS_INPUT_CNTL_21 0xA1A6
+#define mmSPI_PS_INPUT_CNTL_22 0xA1A7
+#define mmSPI_PS_INPUT_CNTL_23 0xA1A8
+#define mmSPI_PS_INPUT_CNTL_24 0xA1A9
+#define mmSPI_PS_INPUT_CNTL_25 0xA1AA
+#define mmSPI_PS_INPUT_CNTL_26 0xA1AB
+#define mmSPI_PS_INPUT_CNTL_27 0xA1AC
+#define mmSPI_PS_INPUT_CNTL_28 0xA1AD
+#define mmSPI_PS_INPUT_CNTL_29 0xA1AE
+#define mmSPI_PS_INPUT_CNTL_30 0xA1AF
+#define mmSPI_PS_INPUT_CNTL_3 0xA194
+#define mmSPI_PS_INPUT_CNTL_31 0xA1B0
+#define mmSPI_PS_INPUT_CNTL_4 0xA195
+#define mmSPI_PS_INPUT_CNTL_5 0xA196
+#define mmSPI_PS_INPUT_CNTL_6 0xA197
+#define mmSPI_PS_INPUT_CNTL_7 0xA198
+#define mmSPI_PS_INPUT_CNTL_8 0xA199
+#define mmSPI_PS_INPUT_CNTL_9 0xA19A
+#define mmSPI_PS_INPUT_ENA 0xA1B3
+#define mmSPI_PS_MAX_WAVE_ID 0x243B
+#define mmSPI_SHADER_COL_FORMAT 0xA1C5
+#define mmSPI_SHADER_PGM_HI_ES 0x2CC9
+#define mmSPI_SHADER_PGM_HI_GS 0x2C89
+#define mmSPI_SHADER_PGM_HI_HS 0x2D09
+#define mmSPI_SHADER_PGM_HI_LS 0x2D49
+#define mmSPI_SHADER_PGM_HI_PS 0x2C09
+#define mmSPI_SHADER_PGM_HI_VS 0x2C49
+#define mmSPI_SHADER_PGM_LO_ES 0x2CC8
+#define mmSPI_SHADER_PGM_LO_GS 0x2C88
+#define mmSPI_SHADER_PGM_LO_HS 0x2D08
+#define mmSPI_SHADER_PGM_LO_LS 0x2D48
+#define mmSPI_SHADER_PGM_LO_PS 0x2C08
+#define mmSPI_SHADER_PGM_LO_VS 0x2C48
+#define mmSPI_SHADER_PGM_RSRC1_ES 0x2CCA
+#define mmSPI_SHADER_PGM_RSRC1_GS 0x2C8A
+#define mmSPI_SHADER_PGM_RSRC1_HS 0x2D0A
+#define mmSPI_SHADER_PGM_RSRC1_LS 0x2D4A
+#define mmSPI_SHADER_PGM_RSRC1_PS 0x2C0A
+#define mmSPI_SHADER_PGM_RSRC1_VS 0x2C4A
+#define mmSPI_SHADER_PGM_RSRC2_ES 0x2CCB
+#define mmSPI_SHADER_PGM_RSRC2_GS 0x2C8B
+#define mmSPI_SHADER_PGM_RSRC2_HS 0x2D0B
+#define mmSPI_SHADER_PGM_RSRC2_LS 0x2D4B
+#define mmSPI_SHADER_PGM_RSRC2_PS 0x2C0B
+#define mmSPI_SHADER_PGM_RSRC2_VS 0x2C4B
+#define mmSPI_SHADER_POS_FORMAT 0xA1C3
+#define mmSPI_SHADER_TBA_HI_ES 0x2CC1
+#define mmSPI_SHADER_TBA_HI_GS 0x2C81
+#define mmSPI_SHADER_TBA_HI_HS 0x2D01
+#define mmSPI_SHADER_TBA_HI_LS 0x2D41
+#define mmSPI_SHADER_TBA_HI_PS 0x2C01
+#define mmSPI_SHADER_TBA_HI_VS 0x2C41
+#define mmSPI_SHADER_TBA_LO_ES 0x2CC0
+#define mmSPI_SHADER_TBA_LO_GS 0x2C80
+#define mmSPI_SHADER_TBA_LO_HS 0x2D00
+#define mmSPI_SHADER_TBA_LO_LS 0x2D40
+#define mmSPI_SHADER_TBA_LO_PS 0x2C00
+#define mmSPI_SHADER_TBA_LO_VS 0x2C40
+#define mmSPI_SHADER_TMA_HI_ES 0x2CC3
+#define mmSPI_SHADER_TMA_HI_GS 0x2C83
+#define mmSPI_SHADER_TMA_HI_HS 0x2D03
+#define mmSPI_SHADER_TMA_HI_LS 0x2D43
+#define mmSPI_SHADER_TMA_HI_PS 0x2C03
+#define mmSPI_SHADER_TMA_HI_VS 0x2C43
+#define mmSPI_SHADER_TMA_LO_ES 0x2CC2
+#define mmSPI_SHADER_TMA_LO_GS 0x2C82
+#define mmSPI_SHADER_TMA_LO_HS 0x2D02
+#define mmSPI_SHADER_TMA_LO_LS 0x2D42
+#define mmSPI_SHADER_TMA_LO_PS 0x2C02
+#define mmSPI_SHADER_TMA_LO_VS 0x2C42
+#define mmSPI_SHADER_USER_DATA_ES_0 0x2CCC
+#define mmSPI_SHADER_USER_DATA_ES_10 0x2CD6
+#define mmSPI_SHADER_USER_DATA_ES_1 0x2CCD
+#define mmSPI_SHADER_USER_DATA_ES_11 0x2CD7
+#define mmSPI_SHADER_USER_DATA_ES_12 0x2CD8
+#define mmSPI_SHADER_USER_DATA_ES_13 0x2CD9
+#define mmSPI_SHADER_USER_DATA_ES_14 0x2CDA
+#define mmSPI_SHADER_USER_DATA_ES_15 0x2CDB
+#define mmSPI_SHADER_USER_DATA_ES_2 0x2CCE
+#define mmSPI_SHADER_USER_DATA_ES_3 0x2CCF
+#define mmSPI_SHADER_USER_DATA_ES_4 0x2CD0
+#define mmSPI_SHADER_USER_DATA_ES_5 0x2CD1
+#define mmSPI_SHADER_USER_DATA_ES_6 0x2CD2
+#define mmSPI_SHADER_USER_DATA_ES_7 0x2CD3
+#define mmSPI_SHADER_USER_DATA_ES_8 0x2CD4
+#define mmSPI_SHADER_USER_DATA_ES_9 0x2CD5
+#define mmSPI_SHADER_USER_DATA_GS_0 0x2C8C
+#define mmSPI_SHADER_USER_DATA_GS_10 0x2C96
+#define mmSPI_SHADER_USER_DATA_GS_1 0x2C8D
+#define mmSPI_SHADER_USER_DATA_GS_11 0x2C97
+#define mmSPI_SHADER_USER_DATA_GS_12 0x2C98
+#define mmSPI_SHADER_USER_DATA_GS_13 0x2C99
+#define mmSPI_SHADER_USER_DATA_GS_14 0x2C9A
+#define mmSPI_SHADER_USER_DATA_GS_15 0x2C9B
+#define mmSPI_SHADER_USER_DATA_GS_2 0x2C8E
+#define mmSPI_SHADER_USER_DATA_GS_3 0x2C8F
+#define mmSPI_SHADER_USER_DATA_GS_4 0x2C90
+#define mmSPI_SHADER_USER_DATA_GS_5 0x2C91
+#define mmSPI_SHADER_USER_DATA_GS_6 0x2C92
+#define mmSPI_SHADER_USER_DATA_GS_7 0x2C93
+#define mmSPI_SHADER_USER_DATA_GS_8 0x2C94
+#define mmSPI_SHADER_USER_DATA_GS_9 0x2C95
+#define mmSPI_SHADER_USER_DATA_HS_0 0x2D0C
+#define mmSPI_SHADER_USER_DATA_HS_10 0x2D16
+#define mmSPI_SHADER_USER_DATA_HS_1 0x2D0D
+#define mmSPI_SHADER_USER_DATA_HS_11 0x2D17
+#define mmSPI_SHADER_USER_DATA_HS_12 0x2D18
+#define mmSPI_SHADER_USER_DATA_HS_13 0x2D19
+#define mmSPI_SHADER_USER_DATA_HS_14 0x2D1A
+#define mmSPI_SHADER_USER_DATA_HS_15 0x2D1B
+#define mmSPI_SHADER_USER_DATA_HS_2 0x2D0E
+#define mmSPI_SHADER_USER_DATA_HS_3 0x2D0F
+#define mmSPI_SHADER_USER_DATA_HS_4 0x2D10
+#define mmSPI_SHADER_USER_DATA_HS_5 0x2D11
+#define mmSPI_SHADER_USER_DATA_HS_6 0x2D12
+#define mmSPI_SHADER_USER_DATA_HS_7 0x2D13
+#define mmSPI_SHADER_USER_DATA_HS_8 0x2D14
+#define mmSPI_SHADER_USER_DATA_HS_9 0x2D15
+#define mmSPI_SHADER_USER_DATA_LS_0 0x2D4C
+#define mmSPI_SHADER_USER_DATA_LS_10 0x2D56
+#define mmSPI_SHADER_USER_DATA_LS_1 0x2D4D
+#define mmSPI_SHADER_USER_DATA_LS_11 0x2D57
+#define mmSPI_SHADER_USER_DATA_LS_12 0x2D58
+#define mmSPI_SHADER_USER_DATA_LS_13 0x2D59
+#define mmSPI_SHADER_USER_DATA_LS_14 0x2D5A
+#define mmSPI_SHADER_USER_DATA_LS_15 0x2D5B
+#define mmSPI_SHADER_USER_DATA_LS_2 0x2D4E
+#define mmSPI_SHADER_USER_DATA_LS_3 0x2D4F
+#define mmSPI_SHADER_USER_DATA_LS_4 0x2D50
+#define mmSPI_SHADER_USER_DATA_LS_5 0x2D51
+#define mmSPI_SHADER_USER_DATA_LS_6 0x2D52
+#define mmSPI_SHADER_USER_DATA_LS_7 0x2D53
+#define mmSPI_SHADER_USER_DATA_LS_8 0x2D54
+#define mmSPI_SHADER_USER_DATA_LS_9 0x2D55
+#define mmSPI_SHADER_USER_DATA_PS_0 0x2C0C
+#define mmSPI_SHADER_USER_DATA_PS_10 0x2C16
+#define mmSPI_SHADER_USER_DATA_PS_1 0x2C0D
+#define mmSPI_SHADER_USER_DATA_PS_11 0x2C17
+#define mmSPI_SHADER_USER_DATA_PS_12 0x2C18
+#define mmSPI_SHADER_USER_DATA_PS_13 0x2C19
+#define mmSPI_SHADER_USER_DATA_PS_14 0x2C1A
+#define mmSPI_SHADER_USER_DATA_PS_15 0x2C1B
+#define mmSPI_SHADER_USER_DATA_PS_2 0x2C0E
+#define mmSPI_SHADER_USER_DATA_PS_3 0x2C0F
+#define mmSPI_SHADER_USER_DATA_PS_4 0x2C10
+#define mmSPI_SHADER_USER_DATA_PS_5 0x2C11
+#define mmSPI_SHADER_USER_DATA_PS_6 0x2C12
+#define mmSPI_SHADER_USER_DATA_PS_7 0x2C13
+#define mmSPI_SHADER_USER_DATA_PS_8 0x2C14
+#define mmSPI_SHADER_USER_DATA_PS_9 0x2C15
+#define mmSPI_SHADER_USER_DATA_VS_0 0x2C4C
+#define mmSPI_SHADER_USER_DATA_VS_10 0x2C56
+#define mmSPI_SHADER_USER_DATA_VS_1 0x2C4D
+#define mmSPI_SHADER_USER_DATA_VS_11 0x2C57
+#define mmSPI_SHADER_USER_DATA_VS_12 0x2C58
+#define mmSPI_SHADER_USER_DATA_VS_13 0x2C59
+#define mmSPI_SHADER_USER_DATA_VS_14 0x2C5A
+#define mmSPI_SHADER_USER_DATA_VS_15 0x2C5B
+#define mmSPI_SHADER_USER_DATA_VS_2 0x2C4E
+#define mmSPI_SHADER_USER_DATA_VS_3 0x2C4F
+#define mmSPI_SHADER_USER_DATA_VS_4 0x2C50
+#define mmSPI_SHADER_USER_DATA_VS_5 0x2C51
+#define mmSPI_SHADER_USER_DATA_VS_6 0x2C52
+#define mmSPI_SHADER_USER_DATA_VS_7 0x2C53
+#define mmSPI_SHADER_USER_DATA_VS_8 0x2C54
+#define mmSPI_SHADER_USER_DATA_VS_9 0x2C55
+#define mmSPI_SHADER_Z_FORMAT 0xA1C4
+#define mmSPI_SLAVE_DEBUG_BUSY 0x24D3
+#define mmSPI_SX_EXPORT_BUFFER_SIZES 0x24D9
+#define mmSPI_SX_SCOREBOARD_BUFFER_SIZES 0x24DA
+#define mmSPI_TMPRING_SIZE 0xA1BA
+#define mmSPI_VS_OUT_CONFIG 0xA1B1
+#define mmSQ_ALU_CLK_CTRL 0x2360
+#define mmSQ_BUF_RSRC_WORD0 0x23C0
+#define mmSQ_BUF_RSRC_WORD1 0x23C1
+#define mmSQ_BUF_RSRC_WORD2 0x23C2
+#define mmSQ_BUF_RSRC_WORD3 0x23C3
+#define mmSQC_CACHES 0x2302
+#define mmSQC_CONFIG 0x2301
+#define mmSQ_CONFIG 0x2300
+#define mmSQC_SECDED_CNT 0x23A0
+#define mmSQ_DEBUG_STS_GLOBAL 0x2309
+#define mmSQ_DED_CNT 0x23A2
+#define mmSQ_DED_INFO 0x23A3
+#define mmSQ_DS_0 0x237F
+#define mmSQ_DS_1 0x237F
+#define mmSQ_EXP_0 0x237F
+#define mmSQ_EXP_1 0x237F
+#define mmSQ_FIFO_SIZES 0x2305
+#define mmSQ_IMG_RSRC_WORD0 0x23C4
+#define mmSQ_IMG_RSRC_WORD1 0x23C5
+#define mmSQ_IMG_RSRC_WORD2 0x23C6
+#define mmSQ_IMG_RSRC_WORD3 0x23C7
+#define mmSQ_IMG_RSRC_WORD4 0x23C8
+#define mmSQ_IMG_RSRC_WORD5 0x23C9
+#define mmSQ_IMG_RSRC_WORD6 0x23CA
+#define mmSQ_IMG_RSRC_WORD7 0x23CB
+#define mmSQ_IMG_SAMP_WORD0 0x23CC
+#define mmSQ_IMG_SAMP_WORD1 0x23CD
+#define mmSQ_IMG_SAMP_WORD2 0x23CE
+#define mmSQ_IMG_SAMP_WORD3 0x23CF
+#define mmSQ_IND_CMD 0x237A
+#define mmSQ_IND_DATA 0x2379
+#define mmSQ_IND_INDEX 0x2378
+#define mmSQ_INST 0x237F
+#define mmSQ_LB_CTR_CTRL 0x2398
+#define mmSQ_LB_DATA_ALU_CYCLES 0x2399
+#define mmSQ_LB_DATA_ALU_STALLS 0x239B
+#define mmSQ_LB_DATA_TEX_CYCLES 0x239A
+#define mmSQ_LB_DATA_TEX_STALLS 0x239C
+#define mmSQ_MIMG_0 0x237F
+#define mmSQ_MIMG_1 0x237F
+#define mmSQ_MTBUF_0 0x237F
+#define mmSQ_MTBUF_1 0x237F
+#define mmSQ_MUBUF_0 0x237F
+#define mmSQ_MUBUF_1 0x237F
+#define mmSQ_PERFCOUNTER0_HI 0x2321
+#define mmSQ_PERFCOUNTER0_LO 0x2320
+#define mmSQ_PERFCOUNTER0_SELECT 0x2340
+#define mmSQ_PERFCOUNTER10_HI 0x2335
+#define mmSQ_PERFCOUNTER10_LO 0x2334
+#define mmSQ_PERFCOUNTER10_SELECT 0x234A
+#define mmSQ_PERFCOUNTER11_HI 0x2337
+#define mmSQ_PERFCOUNTER11_LO 0x2336
+#define mmSQ_PERFCOUNTER11_SELECT 0x234B
+#define mmSQ_PERFCOUNTER12_HI 0x2339
+#define mmSQ_PERFCOUNTER12_LO 0x2338
+#define mmSQ_PERFCOUNTER12_SELECT 0x234C
+#define mmSQ_PERFCOUNTER13_HI 0x233B
+#define mmSQ_PERFCOUNTER13_LO 0x233A
+#define mmSQ_PERFCOUNTER13_SELECT 0x234D
+#define mmSQ_PERFCOUNTER14_HI 0x233D
+#define mmSQ_PERFCOUNTER14_LO 0x233C
+#define mmSQ_PERFCOUNTER14_SELECT 0x234E
+#define mmSQ_PERFCOUNTER15_HI 0x233F
+#define mmSQ_PERFCOUNTER15_LO 0x233E
+#define mmSQ_PERFCOUNTER15_SELECT 0x234F
+#define mmSQ_PERFCOUNTER1_HI 0x2323
+#define mmSQ_PERFCOUNTER1_LO 0x2322
+#define mmSQ_PERFCOUNTER1_SELECT 0x2341
+#define mmSQ_PERFCOUNTER2_HI 0x2325
+#define mmSQ_PERFCOUNTER2_LO 0x2324
+#define mmSQ_PERFCOUNTER2_SELECT 0x2342
+#define mmSQ_PERFCOUNTER3_HI 0x2327
+#define mmSQ_PERFCOUNTER3_LO 0x2326
+#define mmSQ_PERFCOUNTER3_SELECT 0x2343
+#define mmSQ_PERFCOUNTER4_HI 0x2329
+#define mmSQ_PERFCOUNTER4_LO 0x2328
+#define mmSQ_PERFCOUNTER4_SELECT 0x2344
+#define mmSQ_PERFCOUNTER5_HI 0x232B
+#define mmSQ_PERFCOUNTER5_LO 0x232A
+#define mmSQ_PERFCOUNTER5_SELECT 0x2345
+#define mmSQ_PERFCOUNTER6_HI 0x232D
+#define mmSQ_PERFCOUNTER6_LO 0x232C
+#define mmSQ_PERFCOUNTER6_SELECT 0x2346
+#define mmSQ_PERFCOUNTER7_HI 0x232F
+#define mmSQ_PERFCOUNTER7_LO 0x232E
+#define mmSQ_PERFCOUNTER7_SELECT 0x2347
+#define mmSQ_PERFCOUNTER8_HI 0x2331
+#define mmSQ_PERFCOUNTER8_LO 0x2330
+#define mmSQ_PERFCOUNTER8_SELECT 0x2348
+#define mmSQ_PERFCOUNTER9_HI 0x2333
+#define mmSQ_PERFCOUNTER9_LO 0x2332
+#define mmSQ_PERFCOUNTER9_SELECT 0x2349
+#define mmSQ_PERFCOUNTER_CTRL 0x2306
+#define mmSQ_POWER_THROTTLE 0x2396
+#define mmSQ_POWER_THROTTLE2 0x2397
+#define mmSQ_RANDOM_WAVE_PRI 0x2303
+#define mmSQ_REG_CREDITS 0x2304
+#define mmSQ_SEC_CNT 0x23A1
+#define mmSQ_SMRD 0x237F
+#define mmSQ_SOP1 0x237F
+#define mmSQ_SOP2 0x237F
+#define mmSQ_SOPC 0x237F
+#define mmSQ_SOPK 0x237F
+#define mmSQ_SOPP 0x237F
+#define mmSQ_TEX_CLK_CTRL 0x2361
+#define mmSQ_THREAD_TRACE_BASE 0x2380
+#define mmSQ_THREAD_TRACE_CNTR 0x2390
+#define mmSQ_THREAD_TRACE_CTRL 0x238F
+#define mmSQ_THREAD_TRACE_HIWATER 0x2392
+#define mmSQ_THREAD_TRACE_MASK 0x2382
+#define mmSQ_THREAD_TRACE_MODE 0x238E
+#define mmSQ_THREAD_TRACE_PERF_MASK 0x2384
+#define mmSQ_THREAD_TRACE_SIZE 0x2381
+#define mmSQ_THREAD_TRACE_STATUS 0x238D
+#define mmSQ_THREAD_TRACE_TOKEN_MASK 0x2383
+#define mmSQ_THREAD_TRACE_USERDATA_0 0x2388
+#define mmSQ_THREAD_TRACE_USERDATA_1 0x2389
+#define mmSQ_THREAD_TRACE_USERDATA_2 0x238A
+#define mmSQ_THREAD_TRACE_USERDATA_3 0x238B
+#define mmSQ_THREAD_TRACE_WORD_CMN 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_EVENT 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_INST 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_INST_PC_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_INST_PC_2_OF_2 0x23B1
+#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2 0x23B1
+#define mmSQ_THREAD_TRACE_WORD_ISSUE 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_MISC 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_PERF_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_PERF_2_OF_2 0x23B1
+#define mmSQ_THREAD_TRACE_WORD_REG_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_REG_2_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_TIME 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2 0x23B1
+#define mmSQ_THREAD_TRACE_WORD_WAVE 0x23B0
+#define mmSQ_THREAD_TRACE_WORD_WAVE_START 0x23B0
+#define mmSQ_THREAD_TRACE_WPTR 0x238C
+#define mmSQ_TIME_HI 0x237C
+#define mmSQ_TIME_LO 0x237D
+#define mmSQ_VINTRP 0x237F
+#define mmSQ_VOP1 0x237F
+#define mmSQ_VOP2 0x237F
+#define mmSQ_VOP3_0 0x237F
+#define mmSQ_VOP3_0_SDST_ENC 0x237F
+#define mmSQ_VOP3_1 0x237F
+#define mmSQ_VOPC 0x237F
+#define mmSX_DEBUG_1 0x2418
+#define mmSX_DEBUG_BUSY 0x2414
+#define mmSX_DEBUG_BUSY_2 0x2415
+#define mmSX_DEBUG_BUSY_3 0x2416
+#define mmSX_DEBUG_BUSY_4 0x2417
+#define mmSX_PERFCOUNTER0_HI 0x2421
+#define mmSX_PERFCOUNTER0_LO 0x2420
+#define mmSX_PERFCOUNTER0_SELECT 0x241C
+#define mmSX_PERFCOUNTER1_HI 0x2423
+#define mmSX_PERFCOUNTER1_LO 0x2422
+#define mmSX_PERFCOUNTER1_SELECT 0x241D
+#define mmSX_PERFCOUNTER2_HI 0x2425
+#define mmSX_PERFCOUNTER2_LO 0x2424
+#define mmSX_PERFCOUNTER2_SELECT 0x241E
+#define mmSX_PERFCOUNTER3_HI 0x2427
+#define mmSX_PERFCOUNTER3_LO 0x2426
+#define mmSX_PERFCOUNTER3_SELECT 0x241F
+#define mmTA_BC_BASE_ADDR 0xA020
+#define mmTA_CGTT_CTRL 0x2544
+#define mmTA_CNTL 0x2541
+#define mmTA_CNTL_AUX 0x2542
+#define mmTA_CS_BC_BASE_ADDR 0x2543
+#define mmTA_DEBUG_DATA 0x254D
+#define mmTA_DEBUG_INDEX 0x254C
+#define mmTA_PERFCOUNTER0_HI 0x2556
+#define mmTA_PERFCOUNTER0_LO 0x2555
+#define mmTA_PERFCOUNTER0_SELECT 0x2554
+#define mmTA_PERFCOUNTER1_HI 0x2562
+#define mmTA_PERFCOUNTER1_LO 0x2561
+#define mmTA_PERFCOUNTER1_SELECT 0x2560
+#define mmTA_SCRATCH 0x2564
+#define mmTA_STATUS 0x2548
+#define mmTCA_CGTT_SCLK_CTRL 0x2BC1
+#define mmTCA_CTRL 0x2BC0
+#define mmTCA_PERFCOUNTER0_HI 0x2BD2
+#define mmTCA_PERFCOUNTER0_LO 0x2BD1
+#define mmTCA_PERFCOUNTER0_SELECT 0x2BD0
+#define mmTCA_PERFCOUNTER1_HI 0x2BD5
+#define mmTCA_PERFCOUNTER1_LO 0x2BD4
+#define mmTCA_PERFCOUNTER1_SELECT 0x2BD3
+#define mmTCA_PERFCOUNTER2_HI 0x2BD8
+#define mmTCA_PERFCOUNTER2_LO 0x2BD7
+#define mmTCA_PERFCOUNTER2_SELECT 0x2BD6
+#define mmTCA_PERFCOUNTER3_HI 0x2BDB
+#define mmTCA_PERFCOUNTER3_LO 0x2BDA
+#define mmTCA_PERFCOUNTER3_SELECT 0x2BD9
+#define mmTCC_CGTT_SCLK_CTRL 0x2B81
+#define mmTCC_CTRL 0x2B80
+#define mmTCC_EDC_COUNTER 0x2B82
+#define mmTCC_PERFCOUNTER0_HI 0x2B92
+#define mmTCC_PERFCOUNTER0_LO 0x2B91
+#define mmTCC_PERFCOUNTER0_SELECT 0x2B90
+#define mmTCC_PERFCOUNTER1_HI 0x2B95
+#define mmTCC_PERFCOUNTER1_LO 0x2B94
+#define mmTCC_PERFCOUNTER1_SELECT 0x2B93
+#define mmTCC_PERFCOUNTER2_HI 0x2B98
+#define mmTCC_PERFCOUNTER2_LO 0x2B97
+#define mmTCC_PERFCOUNTER2_SELECT 0x2B96
+#define mmTCC_PERFCOUNTER3_HI 0x2B9B
+#define mmTCC_PERFCOUNTER3_LO 0x2B9A
+#define mmTCC_PERFCOUNTER3_SELECT 0x2B99
+#define mmTCI_CNTL_1 0x2B62
+#define mmTCI_CNTL_2 0x2B63
+#define mmTCI_STATUS 0x2B61
+#define mmTCP_ADDR_CONFIG 0x2B05
+#define mmTCP_BUFFER_ADDR_HASH_CNTL 0x2B16
+#define mmTCP_CHAN_STEER_HI 0x2B04
+#define mmTCP_CHAN_STEER_LO 0x2B03
+#define mmTCP_CNTL 0x2B02
+#define mmTCP_CREDIT 0x2B06
+#define mmTCP_EDC_COUNTER 0x2B17
+#define mmTCP_INVALIDATE 0x2B00
+#define mmTCP_PERFCOUNTER0_HI 0x2B0A
+#define mmTCP_PERFCOUNTER0_LO 0x2B0B
+#define mmTCP_PERFCOUNTER0_SELECT 0x2B09
+#define mmTCP_PERFCOUNTER1_HI 0x2B0D
+#define mmTCP_PERFCOUNTER1_LO 0x2B0E
+#define mmTCP_PERFCOUNTER1_SELECT 0x2B0C
+#define mmTCP_PERFCOUNTER2_HI 0x2B10
+#define mmTCP_PERFCOUNTER2_LO 0x2B11
+#define mmTCP_PERFCOUNTER2_SELECT 0x2B0F
+#define mmTCP_PERFCOUNTER3_HI 0x2B13
+#define mmTCP_PERFCOUNTER3_LO 0x2B14
+#define mmTCP_PERFCOUNTER3_SELECT 0x2B12
+#define mmTCP_STATUS 0x2B01
+#define mmTD_CGTT_CTRL 0x2527
+#define mmTD_CNTL 0x2525
+#define mmTD_DEBUG_DATA 0x2529
+#define mmTD_DEBUG_INDEX 0x2528
+#define mmTD_PERFCOUNTER0_HI 0x252E
+#define mmTD_PERFCOUNTER0_LO 0x252D
+#define mmTD_PERFCOUNTER0_SELECT 0x252C
+#define mmTD_SCRATCH 0x2530
+#define mmTD_STATUS 0x2526
+#define mmUSER_SQC_BANK_DISABLE 0x2308
+#define mmVGT_CACHE_INVALIDATION 0x2231
+#define mmVGT_CNTL_STATUS 0x223C
+#define mmVGT_DEBUG_CNTL 0x2238
+#define mmVGT_DEBUG_DATA 0x2239
+#define mmVGT_DMA_BASE 0xA1FA
+#define mmVGT_DMA_BASE_HI 0xA1F9
+#define mmVGT_DMA_DATA_FIFO_DEPTH 0x222D
+#define mmVGT_DMA_INDEX_TYPE 0xA29F
+#define mmVGT_DMA_MAX_SIZE 0xA29E
+#define mmVGT_DMA_NUM_INSTANCES 0xA2A2
+#define mmVGT_DMA_REQ_FIFO_DEPTH 0x222E
+#define mmVGT_DMA_SIZE 0xA29D
+#define mmVGT_DRAW_INIT_FIFO_DEPTH 0x222F
+#define mmVGT_DRAW_INITIATOR 0xA1FC
+#define mmVGT_ENHANCE 0xA294
+#define mmVGT_ESGS_RING_ITEMSIZE 0xA2AB
+#define mmVGT_ESGS_RING_SIZE 0x2232
+#define mmVGT_ES_PER_GS 0xA296
+#define mmVGT_EVENT_ADDRESS_REG 0xA1FE
+#define mmVGT_EVENT_INITIATOR 0xA2A4
+#define mmVGT_FIFO_DEPTHS 0x2234
+#define mmVGT_GROUP_DECR 0xA28B
+#define mmVGT_GROUP_FIRST_DECR 0xA28A
+#define mmVGT_GROUP_PRIM_TYPE 0xA289
+#define mmVGT_GROUP_VECT_0_CNTL 0xA28C
+#define mmVGT_GROUP_VECT_0_FMT_CNTL 0xA28E
+#define mmVGT_GROUP_VECT_1_CNTL 0xA28D
+#define mmVGT_GROUP_VECT_1_FMT_CNTL 0xA28F
+#define mmVGT_GS_INSTANCE_CNT 0xA2E4
+#define mmVGT_GS_MAX_VERT_OUT 0xA2CE
+#define mmVGT_GS_MODE 0xA290
+#define mmVGT_GS_OUT_PRIM_TYPE 0xA29B
+#define mmVGT_GS_PER_ES 0xA295
+#define mmVGT_GS_PER_VS 0xA297
+#define mmVGT_GS_VERTEX_REUSE 0x2235
+#define mmVGT_GS_VERT_ITEMSIZE 0xA2D7
+#define mmVGT_GS_VERT_ITEMSIZE_1 0xA2D8
+#define mmVGT_GS_VERT_ITEMSIZE_2 0xA2D9
+#define mmVGT_GS_VERT_ITEMSIZE_3 0xA2DA
+#define mmVGT_GSVS_RING_ITEMSIZE 0xA2AC
+#define mmVGT_GSVS_RING_OFFSET_1 0xA298
+#define mmVGT_GSVS_RING_OFFSET_2 0xA299
+#define mmVGT_GSVS_RING_OFFSET_3 0xA29A
+#define mmVGT_GSVS_RING_SIZE 0x2233
+#define mmVGT_HOS_CNTL 0xA285
+#define mmVGT_HOS_MAX_TESS_LEVEL 0xA286
+#define mmVGT_HOS_MIN_TESS_LEVEL 0xA287
+#define mmVGT_HOS_REUSE_DEPTH 0xA288
+#define mmVGT_HS_OFFCHIP_PARAM 0x226C
+#define mmVGT_IMMED_DATA 0xA1FD
+#define mmVGT_INDEX_TYPE 0x2257
+#define mmVGT_INDX_OFFSET 0xA102
+#define mmVGT_INSTANCE_STEP_RATE_0 0xA2A8
+#define mmVGT_INSTANCE_STEP_RATE_1 0xA2A9
+#define mmVGT_LAST_COPY_STATE 0x2230
+#define mmVGT_LS_HS_CONFIG 0xA2D6
+#define mmVGT_MAX_VTX_INDX 0xA100
+#define mmVGT_MC_LAT_CNTL 0x2236
+#define mmVGT_MIN_VTX_INDX 0xA101
+#define mmVGT_MULTI_PRIM_IB_RESET_EN 0xA2A5
+#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0xA103
+#define mmVGT_NUM_INDICES 0x225C
+#define mmVGT_NUM_INSTANCES 0x225D
+#define mmVGT_OUT_DEALLOC_CNTL 0xA317
+#define mmVGT_OUTPUT_PATH_CNTL 0xA284
+#define mmVGT_PERFCOUNTER0_HI 0x224D
+#define mmVGT_PERFCOUNTER0_LO 0x224C
+#define mmVGT_PERFCOUNTER0_SELECT 0x2248
+#define mmVGT_PERFCOUNTER1_HI 0x224F
+#define mmVGT_PERFCOUNTER1_LO 0x224E
+#define mmVGT_PERFCOUNTER1_SELECT 0x2249
+#define mmVGT_PERFCOUNTER2_HI 0x2251
+#define mmVGT_PERFCOUNTER2_LO 0x2250
+#define mmVGT_PERFCOUNTER2_SELECT 0x224A
+#define mmVGT_PERFCOUNTER3_HI 0x2253
+#define mmVGT_PERFCOUNTER3_LO 0x2252
+#define mmVGT_PERFCOUNTER3_SELECT 0x224B
+#define mmVGT_PERFCOUNTER_SEID_MASK 0x2247
+#define mmVGT_PRIMITIVEID_EN 0xA2A1
+#define mmVGT_PRIMITIVEID_RESET 0xA2A3
+#define mmVGT_PRIMITIVE_TYPE 0x2256
+#define mmVGT_REUSE_OFF 0xA2AD
+#define mmVGT_SHADER_STAGES_EN 0xA2D5
+#define mmVGT_STRMOUT_BUFFER_CONFIG 0xA2E6
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_0 0x2258
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_1 0x2259
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_2 0x225A
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_3 0x225B
+#define mmVGT_STRMOUT_BUFFER_OFFSET_0 0xA2B7
+#define mmVGT_STRMOUT_BUFFER_OFFSET_1 0xA2BB
+#define mmVGT_STRMOUT_BUFFER_OFFSET_2 0xA2BF
+#define mmVGT_STRMOUT_BUFFER_OFFSET_3 0xA2C3
+#define mmVGT_STRMOUT_BUFFER_SIZE_0 0xA2B4
+#define mmVGT_STRMOUT_BUFFER_SIZE_1 0xA2B8
+#define mmVGT_STRMOUT_BUFFER_SIZE_2 0xA2BC
+#define mmVGT_STRMOUT_BUFFER_SIZE_3 0xA2C0
+#define mmVGT_STRMOUT_CONFIG 0xA2E5
+#define mmVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE 0xA2CB
+#define mmVGT_STRMOUT_DRAW_OPAQUE_OFFSET 0xA2CA
+#define mmVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE 0xA2CC
+#define mmVGT_STRMOUT_VTX_STRIDE_0 0xA2B5
+#define mmVGT_STRMOUT_VTX_STRIDE_1 0xA2B9
+#define mmVGT_STRMOUT_VTX_STRIDE_2 0xA2BD
+#define mmVGT_STRMOUT_VTX_STRIDE_3 0xA2C1
+#define mmVGT_SYS_CONFIG 0x2263
+#define mmVGT_TF_MEMORY_BASE 0x226E
+#define mmVGT_TF_PARAM 0xA2DB
+#define mmVGT_TF_RING_SIZE 0x2262
+#define mmVGT_VERTEX_REUSE_BLOCK_CNTL 0xA316
+#define mmVGT_VTX_CNT_EN 0xA2AE
+#define mmVGT_VTX_VECT_EJECT_REG 0x222C
+
+/* manually added from old sid.h */
+#define mmCB_PERFCOUNTER0_SELECT0 0x2688
+#define mmCB_PERFCOUNTER1_SELECT0 0x268A
+#define mmCB_PERFCOUNTER1_SELECT1 0x268B
+#define mmCB_PERFCOUNTER2_SELECT0 0x268C
+#define mmCB_PERFCOUNTER2_SELECT1 0x268D
+#define mmCB_PERFCOUNTER3_SELECT0 0x268E
+#define mmCB_PERFCOUNTER3_SELECT1 0x268F
+#define mmCP_COHER_CNTL2 0x217A
+#define mmCP_DEBUG 0x307F
+#define mmRLC_SERDES_MASTER_BUSY_0 0x3119
+#define mmRLC_SERDES_MASTER_BUSY_1 0x311A
+#define mmRLC_RL_BASE 0x30C1
+#define mmRLC_RL_SIZE 0x30C2
+#define mmRLC_UCODE_ADDR 0x30CB
+#define mmRLC_UCODE_DATA 0x30CC
+#define mmRLC_GCPM_GENERAL_3 0x311E
+#define mmRLC_SERDES_WR_MASTER_MASK_0 0x3115
+#define mmRLC_SERDES_WR_MASTER_MASK_1 0x3116
+#define mmRLC_TTOP_D 0x3105
+#define mmRLC_CLEAR_STATE_RESTORE_BASE 0x30C8
+#define mmRLC_PG_AO_CU_MASK 0x310B
+#define mmSPI_STATIC_THREAD_MGMT_3 0x243A
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_sh_mask.h
new file mode 100644
index 000000000000..b5e634749665
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_sh_mask.h
@@ -0,0 +1,12821 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GFX_6_0_SH_MASK_H
+#define GFX_6_0_SH_MASK_H
+
+#define BCI_DEBUG_READ__DATA_MASK 0x00ffffffL
+#define BCI_DEBUG_READ__DATA__SHIFT 0x00000000
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0x00e00000L
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x00000015
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1f000000L
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x00000018
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x001f0000L
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x00000010
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0x000000e0L
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x00000005
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x00001f00L
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x00000008
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x0000001fL
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x00000000
+#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x0000001f
+#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x0000001e
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x0000001d
+#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xffffffffL
+#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x00000000
+#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xffffffffL
+#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x00000000
+#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xffffffffL
+#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x00000000
+#define CB_BLEND_RED__BLEND_RED_MASK 0xffffffffL
+#define CB_BLEND_RED__BLEND_RED__SHIFT 0x00000000
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CB_COLOR0_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR0_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR0_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR0_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR0_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR0_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR0_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR0_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR0_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR0_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR0_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR0_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR0_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR0_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR0_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR0_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR0_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR0_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR0_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR0_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR0_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR0_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR0_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR0_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR0_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR0_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR0_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR0_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR0_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR0_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR0_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR0_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR0_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR0_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR0_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR0_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR1_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR1_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR1_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR1_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR1_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR1_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR1_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR1_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR1_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR1_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR1_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR1_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR1_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR1_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR1_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR1_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR1_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR1_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR1_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR1_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR1_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR1_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR1_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR1_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR1_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR1_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR1_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR1_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR1_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR1_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR1_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR1_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR1_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR1_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR1_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR1_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR2_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR2_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR2_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR2_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR2_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR2_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR2_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR2_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR2_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR2_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR2_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR2_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR2_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR2_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR2_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR2_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR2_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR2_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR2_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR2_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR2_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR2_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR2_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR2_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR2_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR2_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR2_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR2_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR2_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR2_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR2_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR2_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR2_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR2_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR2_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR2_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR3_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR3_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR3_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR3_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR3_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR3_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR3_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR3_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR3_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR3_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR3_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR3_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR3_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR3_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR3_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR3_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR3_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR3_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR3_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR3_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR3_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR3_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR3_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR3_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR3_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR3_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR3_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR3_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR3_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR3_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR3_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR3_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR3_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR3_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR3_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR3_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR4_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR4_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR4_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR4_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR4_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR4_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR4_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR4_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR4_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR4_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR4_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR4_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR4_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR4_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR4_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR4_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR4_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR4_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR4_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR4_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR4_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR4_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR4_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR4_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR4_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR4_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR4_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR4_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR4_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR4_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR4_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR4_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR4_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR4_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR4_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR4_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR5_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR5_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR5_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR5_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR5_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR5_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR5_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR5_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR5_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR5_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR5_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR5_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR5_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR5_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR5_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR5_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR5_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR5_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR5_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR5_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR5_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR5_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR5_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR5_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR5_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR5_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR5_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR5_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR5_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR5_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR5_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR5_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR5_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR5_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR5_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR5_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR6_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR6_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR6_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR6_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR6_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR6_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR6_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR6_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR6_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR6_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR6_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR6_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR6_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR6_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR6_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR6_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR6_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR6_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR6_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR6_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR6_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR6_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR6_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR6_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR6_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR6_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR6_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR6_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR6_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR6_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR6_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR6_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR6_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR6_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR6_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR6_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR7_ATTRIB__FMASK_BANK_HEIGHT_MASK 0x00000c00L
+#define CB_COLOR7_ATTRIB__FMASK_BANK_HEIGHT__SHIFT 0x0000000a
+#define CB_COLOR7_ATTRIB__FMASK_TILE_MODE_INDEX_MASK 0x000003e0L
+#define CB_COLOR7_ATTRIB__FMASK_TILE_MODE_INDEX__SHIFT 0x00000005
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x00000011
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0000000f
+#define CB_COLOR7_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR7_ATTRIB__NUM_SAMPLES__SHIFT 0x0000000c
+#define CB_COLOR7_ATTRIB__TILE_MODE_INDEX_MASK 0x0000001fL
+#define CB_COLOR7_ATTRIB__TILE_MODE_INDEX__SHIFT 0x00000000
+#define CB_COLOR7_BASE__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0_MASK 0xffffffffL
+#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x00000000
+#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1_MASK 0xffffffffL
+#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x00000000
+#define CB_COLOR7_CMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR7_CMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR7_CMASK_SLICE__TILE_MAX_MASK 0x00003fffL
+#define CB_COLOR7_CMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR7_FMASK__BASE_256B_MASK 0xffffffffL
+#define CB_COLOR7_FMASK__BASE_256B__SHIFT 0x00000000
+#define CB_COLOR7_FMASK_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR7_FMASK_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x00000010
+#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0x0000000f
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x00000017
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x00000014
+#define CB_COLOR7_INFO__CMASK_IS_LINEAR_MASK 0x00080000L
+#define CB_COLOR7_INFO__CMASK_IS_LINEAR__SHIFT 0x00000013
+#define CB_COLOR7_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR7_INFO__COMPRESSION__SHIFT 0x0000000e
+#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0x0000000b
+#define CB_COLOR7_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR7_INFO__ENDIAN__SHIFT 0x00000000
+#define CB_COLOR7_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR7_INFO__FAST_CLEAR__SHIFT 0x0000000d
+#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x0000001a
+#define CB_COLOR7_INFO__FORMAT_MASK 0x0000007cL
+#define CB_COLOR7_INFO__FORMAT__SHIFT 0x00000002
+#define CB_COLOR7_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR7_INFO__LINEAR_GENERAL__SHIFT 0x00000007
+#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x00000008
+#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x00000012
+#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x00000011
+#define CB_COLOR7_PITCH__FMASK_TILE_MAX_MASK 0x7ff00000L
+#define CB_COLOR7_PITCH__FMASK_TILE_MAX__SHIFT 0x00000014
+#define CB_COLOR7_PITCH__TILE_MAX_MASK 0x000007ffL
+#define CB_COLOR7_PITCH__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR7_SLICE__TILE_MAX_MASK 0x003fffffL
+#define CB_COLOR7_SLICE__TILE_MAX__SHIFT 0x00000000
+#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define CB_COLOR7_VIEW__SLICE_START_MASK 0x000007ffL
+#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x00000000
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x00000008L
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x00000003
+#define CB_COLOR_CONTROL__MODE_MASK 0x00000070L
+#define CB_COLOR_CONTROL__MODE__SHIFT 0x00000004
+#define CB_COLOR_CONTROL__ROP3_MASK 0x00ff0000L
+#define CB_COLOR_CONTROL__ROP3__SHIFT 0x00000010
+#define CB_DEBUG_BUS_13__AC_BUSY_MASK 0x00000008L
+#define CB_DEBUG_BUS_13__AC_BUSY__SHIFT 0x00000003
+#define CB_DEBUG_BUS_13__CACHE_CTRL_BUSY_MASK 0x00000020L
+#define CB_DEBUG_BUS_13__CACHE_CTRL_BUSY__SHIFT 0x00000005
+#define CB_DEBUG_BUS_13__CRW_BUSY_MASK 0x00000010L
+#define CB_DEBUG_BUS_13__CRW_BUSY__SHIFT 0x00000004
+#define CB_DEBUG_BUS_13__EVICT_PENDING_MASK 0x00000200L
+#define CB_DEBUG_BUS_13__EVICT_PENDING__SHIFT 0x00000009
+#define CB_DEBUG_BUS_13__FC_RD_PENDING_MASK 0x00000100L
+#define CB_DEBUG_BUS_13__FC_RD_PENDING__SHIFT 0x00000008
+#define CB_DEBUG_BUS_13__FC_WR_PENDING_MASK 0x00000080L
+#define CB_DEBUG_BUS_13__FC_WR_PENDING__SHIFT 0x00000007
+#define CB_DEBUG_BUS_13__LAST_RD_ARB_WINNER_MASK 0x00000400L
+#define CB_DEBUG_BUS_13__LAST_RD_ARB_WINNER__SHIFT 0x0000000a
+#define CB_DEBUG_BUS_13__MC_WR_PENDING_MASK 0x00000040L
+#define CB_DEBUG_BUS_13__MC_WR_PENDING__SHIFT 0x00000006
+#define CB_DEBUG_BUS_13__MU_BUSY_MASK 0x00000002L
+#define CB_DEBUG_BUS_13__MU_BUSY__SHIFT 0x00000001
+#define CB_DEBUG_BUS_13__MU_STATE_MASK 0x0007f800L
+#define CB_DEBUG_BUS_13__MU_STATE__SHIFT 0x0000000b
+#define CB_DEBUG_BUS_13__TILE_INTFC_BUSY_MASK 0x00000001L
+#define CB_DEBUG_BUS_13__TILE_INTFC_BUSY__SHIFT 0x00000000
+#define CB_DEBUG_BUS_13__TQ_BUSY_MASK 0x00000004L
+#define CB_DEBUG_BUS_13__TQ_BUSY__SHIFT 0x00000002
+#define CB_DEBUG_BUS_14__ADDR_BUSY_MASK 0x00000010L
+#define CB_DEBUG_BUS_14__ADDR_BUSY__SHIFT 0x00000004
+#define CB_DEBUG_BUS_14__CACHE_CTL_BUSY_MASK 0x00000008L
+#define CB_DEBUG_BUS_14__CACHE_CTL_BUSY__SHIFT 0x00000003
+#define CB_DEBUG_BUS_14__CLEAR_BUSY_MASK 0x00000100L
+#define CB_DEBUG_BUS_14__CLEAR_BUSY__SHIFT 0x00000008
+#define CB_DEBUG_BUS_14__FOP_BUSY_MASK 0x00000002L
+#define CB_DEBUG_BUS_14__FOP_BUSY__SHIFT 0x00000001
+#define CB_DEBUG_BUS_14__LAT_BUSY_MASK 0x00000004L
+#define CB_DEBUG_BUS_14__LAT_BUSY__SHIFT 0x00000002
+#define CB_DEBUG_BUS_14__MERGE_BUSY_MASK 0x00000020L
+#define CB_DEBUG_BUS_14__MERGE_BUSY__SHIFT 0x00000005
+#define CB_DEBUG_BUS_14__QUAD_BUSY_MASK 0x00000040L
+#define CB_DEBUG_BUS_14__QUAD_BUSY__SHIFT 0x00000006
+#define CB_DEBUG_BUS_14__TILE_BUSY_MASK 0x00000080L
+#define CB_DEBUG_BUS_14__TILE_BUSY__SHIFT 0x00000007
+#define CB_DEBUG_BUS_14__TILE_RETIREMENT_BUSY_MASK 0x00000001L
+#define CB_DEBUG_BUS_14__TILE_RETIREMENT_BUSY__SHIFT 0x00000000
+#define CB_DEBUG_BUS_15__CS_BUSY_MASK 0x00000010L
+#define CB_DEBUG_BUS_15__CS_BUSY__SHIFT 0x00000004
+#define CB_DEBUG_BUS_15__DS_BUSY_MASK 0x00000040L
+#define CB_DEBUG_BUS_15__DS_BUSY__SHIFT 0x00000006
+#define CB_DEBUG_BUS_15__IB_BUSY_MASK 0x00000100L
+#define CB_DEBUG_BUS_15__IB_BUSY__SHIFT 0x00000008
+#define CB_DEBUG_BUS_15__RB_BUSY_MASK 0x00000020L
+#define CB_DEBUG_BUS_15__RB_BUSY__SHIFT 0x00000005
+#define CB_DEBUG_BUS_15__SF_BUSY_MASK 0x00000008L
+#define CB_DEBUG_BUS_15__SF_BUSY__SHIFT 0x00000003
+#define CB_DEBUG_BUS_15__SURF_SYNC_START_MASK 0x00000004L
+#define CB_DEBUG_BUS_15__SURF_SYNC_START__SHIFT 0x00000002
+#define CB_DEBUG_BUS_15__SURF_SYNC_STATE_MASK 0x00000003L
+#define CB_DEBUG_BUS_15__SURF_SYNC_STATE__SHIFT 0x00000000
+#define CB_DEBUG_BUS_15__TB_BUSY_MASK 0x00000080L
+#define CB_DEBUG_BUS_15__TB_BUSY__SHIFT 0x00000007
+#define CB_DEBUG_BUS_16__CC_WRREQ_FIFO_EMPTY_MASK 0x00100000L
+#define CB_DEBUG_BUS_16__CC_WRREQ_FIFO_EMPTY__SHIFT 0x00000014
+#define CB_DEBUG_BUS_16__CM_WRREQ_FIFO_EMPTY_MASK 0x00400000L
+#define CB_DEBUG_BUS_16__CM_WRREQ_FIFO_EMPTY__SHIFT 0x00000016
+#define CB_DEBUG_BUS_16__FC_WRREQ_FIFO_EMPTY_MASK 0x00200000L
+#define CB_DEBUG_BUS_16__FC_WRREQ_FIFO_EMPTY__SHIFT 0x00000015
+#define CB_DEBUG_BUS_16__LAST_RD_GRANT_VEC_MASK 0x000003c0L
+#define CB_DEBUG_BUS_16__LAST_RD_GRANT_VEC__SHIFT 0x00000006
+#define CB_DEBUG_BUS_16__LAST_WR_GRANT_VEC_MASK 0x000f0000L
+#define CB_DEBUG_BUS_16__LAST_WR_GRANT_VEC__SHIFT 0x00000010
+#define CB_DEBUG_BUS_16__MC_RDREQ_CREDITS_MASK 0x0000003fL
+#define CB_DEBUG_BUS_16__MC_RDREQ_CREDITS__SHIFT 0x00000000
+#define CB_DEBUG_BUS_16__MC_WRREQ_CREDITS_MASK 0x0000fc00L
+#define CB_DEBUG_BUS_16__MC_WRREQ_CREDITS__SHIFT 0x0000000a
+#define CB_DEBUG_BUS_17__BB_BUSY_MASK 0x00000008L
+#define CB_DEBUG_BUS_17__BB_BUSY__SHIFT 0x00000003
+#define CB_DEBUG_BUS_17__CC_BUSY_MASK 0x00000004L
+#define CB_DEBUG_BUS_17__CC_BUSY__SHIFT 0x00000002
+#define CB_DEBUG_BUS_17__CM_BUSY_MASK 0x00000001L
+#define CB_DEBUG_BUS_17__CM_BUSY__SHIFT 0x00000000
+#define CB_DEBUG_BUS_17__CORE_SCLK_VLD_MASK 0x00000020L
+#define CB_DEBUG_BUS_17__CORE_SCLK_VLD__SHIFT 0x00000005
+#define CB_DEBUG_BUS_17__FC_BUSY_MASK 0x00000002L
+#define CB_DEBUG_BUS_17__FC_BUSY__SHIFT 0x00000001
+#define CB_DEBUG_BUS_17__MA_BUSY_MASK 0x00000010L
+#define CB_DEBUG_BUS_17__MA_BUSY__SHIFT 0x00000004
+#define CB_DEBUG_BUS_17__REG_SCLK0_VLD_MASK 0x00000080L
+#define CB_DEBUG_BUS_17__REG_SCLK0_VLD__SHIFT 0x00000007
+#define CB_DEBUG_BUS_17__REG_SCLK1_VLD_MASK 0x00000040L
+#define CB_DEBUG_BUS_17__REG_SCLK1_VLD__SHIFT 0x00000006
+#define CB_DEBUG_BUS_18__NOT_USED_MASK 0x00ffffffL
+#define CB_DEBUG_BUS_18__NOT_USED__SHIFT 0x00000000
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x0001f800L
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0x0000000b
+#define CB_HW_CONTROL_1__CHICKEN_BITS_MASK 0xfc000000L
+#define CB_HW_CONTROL_1__CHICKEN_BITS__SHIFT 0x0000001a
+#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS_MASK 0x0000001fL
+#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS__SHIFT 0x00000000
+#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH_MASK 0x03fe0000L
+#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH__SHIFT 0x00000011
+#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS_MASK 0x000007e0L
+#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS__SHIFT 0x00000005
+#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH_MASK 0x000000ffL
+#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH__SHIFT 0x00000000
+#define CB_HW_CONTROL_2__CHICKEN_BITS_MASK 0xff800000L
+#define CB_HW_CONTROL_2__CHICKEN_BITS__SHIFT 0x00000017
+#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH_MASK 0x007f8000L
+#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH__SHIFT 0x0000000f
+#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH_MASK 0x00007f00L
+#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH__SHIFT 0x00000008
+#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L
+#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL__SHIFT 0x00000000
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00010000L
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x00000010
+#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT_MASK 0x0000f000L
+#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT__SHIFT 0x0000000c
+#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT_MASK 0x0000000fL
+#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT__SHIFT 0x00000000
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x02000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x00000019
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x04000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x0000001a
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x01000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x00000018
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x00200000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x00000015
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x08000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x0000001b
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000L
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x0000001e
+#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK_MASK 0x00400000L
+#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK__SHIFT 0x00000016
+#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING_MASK 0x00040000L
+#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING__SHIFT 0x00000012
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000L
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x0000001f
+#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG_MASK 0x00800000L
+#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG__SHIFT 0x00000017
+#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT_MASK 0x000003c0L
+#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT__SHIFT 0x00000006
+#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE_MASK 0x00100000L
+#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE__SHIFT 0x00000014
+#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x00080000L
+#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x00000013
+#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT_MASK 0x20000000L
+#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT__SHIFT 0x0000001d
+#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT_MASK 0x10000000L
+#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT__SHIFT 0x0000001c
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001ffL
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007fc00L
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0x0000000fL
+#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x00000000
+#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0x000000f0L
+#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x00000004
+#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0x00000f00L
+#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x00000008
+#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0x0000f000L
+#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0x0000000c
+#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0x000f0000L
+#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x00000010
+#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0x00f00000L
+#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x00000014
+#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0x0f000000L
+#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x00000018
+#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xf0000000L
+#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x0000001c
+#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0x0000000fL
+#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x00000000
+#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0x000000f0L
+#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x00000004
+#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0x00000f00L
+#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x00000008
+#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0x0000f000L
+#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0x0000000c
+#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0x000f0000L
+#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x00000010
+#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0x00f00000L
+#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x00000014
+#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0x0f000000L
+#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x00000018
+#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xf0000000L
+#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x0000001c
+#define CC_GC_SHADER_ARRAY_CONFIG__DPFP_RATE_MASK 0x00000006L
+#define CC_GC_SHADER_ARRAY_CONFIG__DPFP_RATE__SHIFT 0x00000001
+#define CC_GC_SHADER_ARRAY_CONFIG__HALF_LDS_MASK 0x00000010L
+#define CC_GC_SHADER_ARRAY_CONFIG__HALF_LDS__SHIFT 0x00000004
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xffff0000L
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x00000010
+#define CC_GC_SHADER_ARRAY_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
+#define CC_GC_SHADER_ARRAY_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x00000003
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00ff0000L
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x00000010
+#define CC_RB_DAISY_CHAIN__RB_0_MASK 0x0000000fL
+#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x00000000
+#define CC_RB_DAISY_CHAIN__RB_1_MASK 0x000000f0L
+#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x00000004
+#define CC_RB_DAISY_CHAIN__RB_2_MASK 0x00000f00L
+#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x00000008
+#define CC_RB_DAISY_CHAIN__RB_3_MASK 0x0000f000L
+#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0x0000000c
+#define CC_RB_DAISY_CHAIN__RB_4_MASK 0x000f0000L
+#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x00000010
+#define CC_RB_DAISY_CHAIN__RB_5_MASK 0x00f00000L
+#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x00000014
+#define CC_RB_DAISY_CHAIN__RB_6_MASK 0x0f000000L
+#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x00000018
+#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xf0000000L
+#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x0000001c
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0x0000000c
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x00000014
+#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000f00L
+#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x00000008
+#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0x000f0000L
+#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x00000010
+#define CC_SQC_BANK_DISABLE__SQC0_BANK_DISABLE_MASK 0x000f0000L
+#define CC_SQC_BANK_DISABLE__SQC0_BANK_DISABLE__SHIFT 0x00000010
+#define CC_SQC_BANK_DISABLE__SQC1_BANK_DISABLE_MASK 0x00f00000L
+#define CC_SQC_BANK_DISABLE__SQC1_BANK_DISABLE__SHIFT 0x00000014
+#define CC_SQC_BANK_DISABLE__SQC2_BANK_DISABLE_MASK 0x0f000000L
+#define CC_SQC_BANK_DISABLE__SQC2_BANK_DISABLE__SHIFT 0x00000018
+#define CC_SQC_BANK_DISABLE__SQC3_BANK_DISABLE_MASK 0xf0000000L
+#define CC_SQC_BANK_DISABLE__SQC3_BANK_DISABLE__SHIFT 0x0000001c
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x00001f00L
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x00000008
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x0000001fL
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x00000000
+#define CGTS_RD_REG__READ_DATA_MASK 0x00003fffL
+#define CGTS_RD_REG__READ_DATA__SHIFT 0x00000000
+#define CGTS_SM_CTRL_REG__BASE_MODE_MASK 0x00010000L
+#define CGTS_SM_CTRL_REG__BASE_MODE__SHIFT 0x00000010
+#define CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK 0x00400000L
+#define CGTS_SM_CTRL_REG__LS_OVERRIDE__SHIFT 0x00000016
+#define CGTS_SM_CTRL_REG__MGCG_ENABLED_MASK 0x00001000L
+#define CGTS_SM_CTRL_REG__MGCG_ENABLED__SHIFT 0x0000000c
+#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY_MASK 0x00000ff0L
+#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY__SHIFT 0x00000004
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK 0x00800000L
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN__SHIFT 0x00000017
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK 0xff000000L
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT 0x00000018
+#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY_MASK 0x0000000fL
+#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY__SHIFT 0x00000000
+#define CGTS_SM_CTRL_REG__OVERRIDE_MASK 0x00200000L
+#define CGTS_SM_CTRL_REG__OVERRIDE__SHIFT 0x00000015
+#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK 0x00100000L
+#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE__SHIFT 0x00000014
+#define CGTS_SM_CTRL_REG__SM_MODE_MASK 0x000e0000L
+#define CGTS_SM_CTRL_REG__SM_MODE__SHIFT 0x00000011
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xffff0000L
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x00000010
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xffff0000L
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x00000010
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x0000001c
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x0000001b
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x04000000L
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x0000001a
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x02000000L
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x00000019
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x01000000L
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x00000018
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0x00fff000L
+#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0x0000000c
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_CP_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_CP_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x0000001e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x0000001f
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_GDS_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_GDS_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE_MASK 0x04000000L
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE__SHIFT 0x0000001a
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x00000019
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000L
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_PC_CLK_CTRL__BACK_CLK_ON_OVERRIDE_MASK 0x02000000L
+#define CGTT_PC_CLK_CTRL__BACK_CLK_ON_OVERRIDE__SHIFT 0x00000019
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x0000001c
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x0000001b
+#define CGTT_PC_CLK_CTRL__FRONT_CLK_ON_OVERRIDE_MASK 0x04000000L
+#define CGTT_PC_CLK_CTRL__FRONT_CLK_ON_OVERRIDE__SHIFT 0x0000001a
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00fc0000L
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x00000012
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x00000018
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_PC_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_PC_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_RLC_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_RLC_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x0000001e
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x0000001f
+#define CGTT_SC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SC_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SC_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x0000001a
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x0000001c
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x0000001b
+#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00fc0000L
+#define CGTT_SPI_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x00000012
+#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x00000018
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL0__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL0__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL0__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL0__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL1__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL1__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL1__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL1__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL2__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL2__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL2__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL2__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL3__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL3__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL3__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL3__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_SX_CLK_CTRL4__ON_DELAY_MASK 0x0000000fL
+#define CGTT_SX_CLK_CTRL4__ON_DELAY__SHIFT 0x00000000
+#define CGTT_SX_CLK_CTRL4__RESERVED_MASK 0x00fff000L
+#define CGTT_SX_CLK_CTRL4__RESERVED__SHIFT 0x0000000c
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_TCI_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_TCI_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_TCP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_TCP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_TCP_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_TCP_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_TCP_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x0000001e
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE_MASK 0x04000000L
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE__SHIFT 0x0000001a
+#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE__SHIFT 0x0000001d
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0x00000019
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x0000001f
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG00__ALWAYS_ZERO_MASK 0x000000ffL
+#define CLIPPER_DEBUG_REG00__ALWAYS_ZERO__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_empty_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_empty__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG00__ccgen_to_clipcc_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG00__clipcode_fifo_fifo_empty_MASK 0x00200000L
+#define CLIPPER_DEBUG_REG00__clipcode_fifo_fifo_empty__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG00__clipcode_fifo_full_MASK 0x00400000L
+#define CLIPPER_DEBUG_REG00__clipcode_fifo_full__SHIFT 0x00000016
+#define CLIPPER_DEBUG_REG00__clip_ga_bc_fifo_write_MASK 0x00000100L
+#define CLIPPER_DEBUG_REG00__clip_ga_bc_fifo_write__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_full_MASK 0x00001000L
+#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_full__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_write_MASK 0x00000800L
+#define CLIPPER_DEBUG_REG00__clip_to_ga_fifo_write__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_empty_MASK 0x00008000L
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_empty__SHIFT 0x0000000f
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_full_MASK 0x00010000L
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_full__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_write_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG00__clip_to_outsm_fifo_write__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_empty_MASK 0x00002000L
+#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_empty__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_full_MASK 0x00004000L
+#define CLIPPER_DEBUG_REG00__primic_to_clprim_fifo_full__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG00__su_clip_baryc_free_MASK 0x00000600L
+#define CLIPPER_DEBUG_REG00__su_clip_baryc_free__SHIFT 0x00000009
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_empty_MASK 0x00020000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_empty__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_full_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_full__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_write_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clipp_fifo_write__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_empty_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_empty__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_full_MASK 0x00100000L
+#define CLIPPER_DEBUG_REG00__vgt_to_clips_fifo_full__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_empty_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_empty__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_full_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG00__vte_out_clip_fifo_fifo_full__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_empty_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_empty__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_full_MASK 0x04000000L
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_full__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_write_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG00__vte_out_orig_fifo_fifo_write__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG01__ALWAYS_ZERO_MASK 0x000000ffL
+#define CLIPPER_DEBUG_REG01__ALWAYS_ZERO__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG01__clip_extra_bc_valid_MASK 0x00000700L
+#define CLIPPER_DEBUG_REG01__clip_extra_bc_valid__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG01__clip_ga_bc_fifo_write_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG01__clip_ga_bc_fifo_write__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG01__clip_to_ga_fifo_write_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG01__clip_to_ga_fifo_write__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_deallocate_slot_MASK 0x000e0000L
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_deallocate_slot__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_null_primitive_MASK 0x00100000L
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_null_primitive__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_vertex_deallocate_MASK 0x0001c000L
+#define CLIPPER_DEBUG_REG01__clip_to_outsm_vertex_deallocate__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG01__clip_vert_vte_valid_MASK 0x00003800L
+#define CLIPPER_DEBUG_REG01__clip_vert_vte_valid__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_advanceread_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_advanceread__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_empty_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_fifo_fifo_empty__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_extra_bc_valid_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_extra_bc_valid__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vertex_store_indx_MASK 0x0c000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vertex_store_indx__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vte_naninf_kill_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG01__vte_out_clip_rd_vte_naninf_kill__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_0_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_1_MASK 0x00400000L
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_1__SHIFT 0x00000016
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_2_MASK 0x00200000L
+#define CLIPPER_DEBUG_REG01__vte_positions_vte_clip_vte_naninf_kill_2__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG02__clip_extra_bc_valid_MASK 0x00000007L
+#define CLIPPER_DEBUG_REG02__clip_extra_bc_valid__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_full_MASK 0x04000000L
+#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_full__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_write_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG02__clip_ga_bc_fifo_write__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG02__clip_to_clipga_extra_bc_coords_MASK 0x00100000L
+#define CLIPPER_DEBUG_REG02__clip_to_clipga_extra_bc_coords__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG02__clip_to_clipga_vte_naninf_kill_MASK 0x00200000L
+#define CLIPPER_DEBUG_REG02__clip_to_clipga_vte_naninf_kill__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_full_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_full__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_write_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG02__clip_to_ga_fifo_write__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_clipped_prim_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_clipped_prim__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_clip_seq_indx_MASK 0x000000c0L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_clip_seq_indx__SHIFT 0x00000006
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_end_of_packet_MASK 0x00400000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_end_of_packet__SHIFT 0x00000016
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_advanceread_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_advanceread__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_empty_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_fifo_empty__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_first_prim_of_slot_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_first_prim_of_slot__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_null_primitive_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_null_primitive__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_0_MASK 0x000f0000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_0__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_1_MASK 0x0000f000L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_1__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_2_MASK 0x00000f00L
+#define CLIPPER_DEBUG_REG02__clip_to_outsm_vertex_store_indx_2__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG02__clip_vert_vte_valid_MASK 0x00000038L
+#define CLIPPER_DEBUG_REG02__clip_vert_vte_valid__SHIFT 0x00000003
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_code_or_MASK 0x00003fffL
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_code_or__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_primitive_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_clip_primitive__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_deallocate_slot_MASK 0x07000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_deallocate_slot__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_end_of_packet_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_end_of_packet__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event_id_MASK 0x000fc000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event_id__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_first_prim_of_slot_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_first_prim_of_slot__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_state_var_indx_MASK 0x00700000L
+#define CLIPPER_DEBUG_REG03__clipsm0_clprim_to_clip_state_var_indx__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_param_cache_indx_0_MASK 0x000007feL
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_param_cache_indx_0__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_1_MASK 0x007e0000L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_1__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_2_MASK 0x0001f800L
+#define CLIPPER_DEBUG_REG04__clipsm0_clprim_to_clip_vertex_store_indx_2__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_code_or_MASK 0x00003fffL
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_code_or__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_primitive_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_clip_primitive__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_deallocate_slot_MASK 0x07000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_deallocate_slot__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_end_of_packet_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_end_of_packet__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event_id_MASK 0x000fc000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event_id__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_first_prim_of_slot_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_first_prim_of_slot__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_state_var_indx_MASK 0x00700000L
+#define CLIPPER_DEBUG_REG05__clipsm1_clprim_to_clip_state_var_indx__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_param_cache_indx_0_MASK 0x000007feL
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_param_cache_indx_0__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_1_MASK 0x007e0000L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_1__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_2_MASK 0x0001f800L
+#define CLIPPER_DEBUG_REG06__clipsm1_clprim_to_clip_vertex_store_indx_2__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_code_or_MASK 0x00003fffL
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_code_or__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_primitive_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_clip_primitive__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_deallocate_slot_MASK 0x07000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_deallocate_slot__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_end_of_packet_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_end_of_packet__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event_id_MASK 0x000fc000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event_id__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_first_prim_of_slot_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_first_prim_of_slot__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_state_var_indx_MASK 0x00700000L
+#define CLIPPER_DEBUG_REG07__clipsm2_clprim_to_clip_state_var_indx__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_param_cache_indx_0_MASK 0x000007feL
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_param_cache_indx_0__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_1_MASK 0x007e0000L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_1__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_2_MASK 0x0001f800L
+#define CLIPPER_DEBUG_REG08__clipsm2_clprim_to_clip_vertex_store_indx_2__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_code_or_MASK 0x00003fffL
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_code_or__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_primitive_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_clip_primitive__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_deallocate_slot_MASK 0x07000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_deallocate_slot__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_end_of_packet_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_end_of_packet__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event_id_MASK 0x000fc000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event_id__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_first_prim_of_slot_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_first_prim_of_slot__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_state_var_indx_MASK 0x00700000L
+#define CLIPPER_DEBUG_REG09__clipsm3_clprim_to_clip_state_var_indx__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_event_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_event__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_null_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_null_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_param_cache_indx_0_MASK 0x000007feL
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_param_cache_indx_0__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_0_MASK 0x1f800000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_0__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_1_MASK 0x007e0000L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_1__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_2_MASK 0x0001f800L
+#define CLIPPER_DEBUG_REG10__clipsm3_clprim_to_clip_vertex_store_indx_2__SHIFT 0x0000000b
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_primitive_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_primitive__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_to_outsm_cnt_MASK 0x00f00000L
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_event_MASK 0x00000008L
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_event__SHIFT 0x00000003
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_prim_valid_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG11__clipsm0_clip_to_clipga_prim_valid__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG11__clipsm0_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG11__clipsm0_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_primitive_MASK 0x00000040L
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_primitive__SHIFT 0x00000006
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_to_outsm_cnt_MASK 0x000f0000L
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_event_MASK 0x00000004L
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_event__SHIFT 0x00000002
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_prim_valid_MASK 0x04000000L
+#define CLIPPER_DEBUG_REG11__clipsm1_clip_to_clipga_prim_valid__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG11__clipsm1_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG11__clipsm1_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_primitive_MASK 0x00000020L
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_primitive__SHIFT 0x00000005
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_to_outsm_cnt_MASK 0x0000f000L
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_event_MASK 0x00000002L
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_event__SHIFT 0x00000001
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_prim_valid_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG11__clipsm2_clip_to_clipga_prim_valid__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG11__clipsm2_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG11__clipsm2_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_primitive_MASK 0x00000010L
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_primitive__SHIFT 0x00000004
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_to_outsm_cnt_MASK 0x00000f00L
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_event_MASK 0x00000001L
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_event__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_prim_valid_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG11__clipsm3_clip_to_clipga_prim_valid__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG11__clipsm3_inc_clip_to_clipga_clip_to_outsm_cnt_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG11__clipsm3_inc_clip_to_clipga_clip_to_outsm_cnt__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG12__ALWAYS_ZERO_MASK 0x000000ffL
+#define CLIPPER_DEBUG_REG12__ALWAYS_ZERO__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG12__clip_priority_available_clip_verts_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG12__clip_priority_available_clip_verts__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG12__clip_priority_available_vte_out_clip_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG12__clip_priority_available_vte_out_clip__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_load_MASK 0x00c00000L
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_load__SHIFT 0x00000016
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_out_MASK 0x000c0000L
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_out__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_vert_MASK 0x00300000L
+#define CLIPPER_DEBUG_REG12__clip_priority_seq_indx_vert__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_clip_primitive_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_clip_primitive__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG12__clipsm0_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_clip_primitive_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_clip_primitive__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_prim_valid_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG12__clipsm1_clprim_to_clip_prim_valid__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_clip_primitive_MASK 0x04000000L
+#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_clip_primitive__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_prim_valid_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG12__clipsm2_clprim_to_clip_prim_valid__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_clip_primitive_MASK 0x01000000L
+#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_clip_primitive__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_prim_valid_MASK 0x02000000L
+#define CLIPPER_DEBUG_REG12__clipsm3_clprim_to_clip_prim_valid__SHIFT 0x00000019
+#define CLIPPER_DEBUG_REG13__ccgen_to_clipcc_fifo_empty_MASK 0x00010000L
+#define CLIPPER_DEBUG_REG13__ccgen_to_clipcc_fifo_empty__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG13__clipcc_vertex_store_indx_MASK 0x00003000L
+#define CLIPPER_DEBUG_REG13__clipcc_vertex_store_indx__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG13__clipcode_fifo_fifo_empty_MASK 0x00008000L
+#define CLIPPER_DEBUG_REG13__clipcode_fifo_fifo_empty__SHIFT 0x0000000f
+#define CLIPPER_DEBUG_REG13__clip_priority_seq_indx_out_cnt_MASK 0x001e0000L
+#define CLIPPER_DEBUG_REG13__clip_priority_seq_indx_out_cnt__SHIFT 0x00000011
+#define CLIPPER_DEBUG_REG13__clprim_clip_primitive_MASK 0x00000020L
+#define CLIPPER_DEBUG_REG13__clprim_clip_primitive__SHIFT 0x00000005
+#define CLIPPER_DEBUG_REG13__clprim_cull_primitive_MASK 0x00000040L
+#define CLIPPER_DEBUG_REG13__clprim_cull_primitive__SHIFT 0x00000006
+#define CLIPPER_DEBUG_REG13__clprim_in_back_state_var_indx_MASK 0x00000007L
+#define CLIPPER_DEBUG_REG13__clprim_in_back_state_var_indx__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_advanceread_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_advanceread__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_contents_MASK 0x1f000000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_contents__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_full_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_full__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_write_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_fifo_write__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG13__outsm_clr_rd_clipsm_wait_MASK 0x00800000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_rd_clipsm_wait__SHIFT 0x00000017
+#define CLIPPER_DEBUG_REG13__outsm_clr_rd_orig_vertices_MASK 0x00600000L
+#define CLIPPER_DEBUG_REG13__outsm_clr_rd_orig_vertices__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG13__point_clip_candidate_MASK 0x00000008L
+#define CLIPPER_DEBUG_REG13__point_clip_candidate__SHIFT 0x00000003
+#define CLIPPER_DEBUG_REG13__prim_back_valid_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG13__prim_back_valid__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG13__prim_nan_kill_MASK 0x00000010L
+#define CLIPPER_DEBUG_REG13__prim_nan_kill__SHIFT 0x00000004
+#define CLIPPER_DEBUG_REG13__vertval_bits_vertex_cc_next_valid_MASK 0x00000f00L
+#define CLIPPER_DEBUG_REG13__vertval_bits_vertex_cc_next_valid__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG13__vte_out_orig_fifo_fifo_empty_MASK 0x00004000L
+#define CLIPPER_DEBUG_REG13__vte_out_orig_fifo_fifo_empty__SHIFT 0x0000000e
+#define CLIPPER_DEBUG_REG14__clprim_in_back_deallocate_slot_MASK 0x00e00000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_deallocate_slot__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG14__clprim_in_back_end_of_packet_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_end_of_packet__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG14__clprim_in_back_event_id_MASK 0x3f000000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_event_id__SHIFT 0x00000018
+#define CLIPPER_DEBUG_REG14__clprim_in_back_event_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_event__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG14__clprim_in_back_first_prim_of_slot_MASK 0x00100000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_first_prim_of_slot__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_0_MASK 0x0003f000L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_0__SHIFT 0x0000000c
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_1_MASK 0x00000fc0L
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_1__SHIFT 0x00000006
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_2_MASK 0x0000003fL
+#define CLIPPER_DEBUG_REG14__clprim_in_back_vertex_store_indx_2__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG14__outputclprimtoclip_null_primitive_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG14__outputclprimtoclip_null_primitive__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG14__prim_back_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG14__prim_back_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_0_MASK 0x7c000000L
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_0__SHIFT 0x0000001a
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_1_MASK 0x03e00000L
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_1__SHIFT 0x00000015
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_2_MASK 0x001f0000L
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_fifo_vertex_store_indx_2__SHIFT 0x00000010
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG15__primic_to_clprim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG15__vertval_bits_vertex_vertex_store_msb_MASK 0x0000ffffL
+#define CLIPPER_DEBUG_REG15__vertval_bits_vertex_vertex_store_msb__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG16__sm0_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG16__sm0_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG16__sm0_clip_to_outsm_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG16__sm0_clip_to_outsm_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG16__sm0_clip_vert_cnt_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG16__sm0_clip_vert_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG16__sm0_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG16__sm0_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG16__sm0_current_state_MASK 0x07f00000L
+#define CLIPPER_DEBUG_REG16__sm0_current_state__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG16__sm0_highest_priority_seq_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG16__sm0_highest_priority_seq__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_0_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_0__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_1_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG16__sm0_inv_to_clip_data_valid_1__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG16__sm0_outputcliptoclipga_0_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG16__sm0_outputcliptoclipga_0__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG16__sm0_prim_end_state_MASK 0x0000007fL
+#define CLIPPER_DEBUG_REG16__sm0_prim_end_state__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG16__sm0_ps_expand_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG16__sm0_ps_expand__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG16__sm0_vertex_clip_cnt_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG16__sm0_vertex_clip_cnt__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG17__sm1_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG17__sm1_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG17__sm1_clip_to_outsm_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG17__sm1_clip_to_outsm_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG17__sm1_clip_vert_cnt_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG17__sm1_clip_vert_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG17__sm1_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG17__sm1_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG17__sm1_current_state_MASK 0x07f00000L
+#define CLIPPER_DEBUG_REG17__sm1_current_state__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG17__sm1_highest_priority_seq_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG17__sm1_highest_priority_seq__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_0_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_0__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_1_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG17__sm1_inv_to_clip_data_valid_1__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG17__sm1_outputcliptoclipga_0_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG17__sm1_outputcliptoclipga_0__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG17__sm1_prim_end_state_MASK 0x0000007fL
+#define CLIPPER_DEBUG_REG17__sm1_prim_end_state__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG17__sm1_ps_expand_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG17__sm1_ps_expand__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG17__sm1_vertex_clip_cnt_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG17__sm1_vertex_clip_cnt__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG18__sm2_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG18__sm2_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG18__sm2_clip_to_outsm_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG18__sm2_clip_to_outsm_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG18__sm2_clip_vert_cnt_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG18__sm2_clip_vert_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG18__sm2_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG18__sm2_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG18__sm2_current_state_MASK 0x07f00000L
+#define CLIPPER_DEBUG_REG18__sm2_current_state__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG18__sm2_highest_priority_seq_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG18__sm2_highest_priority_seq__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_0_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_0__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_1_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG18__sm2_inv_to_clip_data_valid_1__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG18__sm2_outputcliptoclipga_0_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG18__sm2_outputcliptoclipga_0__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG18__sm2_prim_end_state_MASK 0x0000007fL
+#define CLIPPER_DEBUG_REG18__sm2_prim_end_state__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG18__sm2_ps_expand_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG18__sm2_ps_expand__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG18__sm2_vertex_clip_cnt_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG18__sm2_vertex_clip_cnt__SHIFT 0x0000000d
+#define CLIPPER_DEBUG_REG19__sm3_clip_to_clipga_clip_to_outsm_cnt_eq0_MASK 0x08000000L
+#define CLIPPER_DEBUG_REG19__sm3_clip_to_clipga_clip_to_outsm_cnt_eq0__SHIFT 0x0000001b
+#define CLIPPER_DEBUG_REG19__sm3_clip_to_outsm_fifo_full_MASK 0x10000000L
+#define CLIPPER_DEBUG_REG19__sm3_clip_to_outsm_fifo_full__SHIFT 0x0000001c
+#define CLIPPER_DEBUG_REG19__sm3_clip_vert_cnt_MASK 0x00001f00L
+#define CLIPPER_DEBUG_REG19__sm3_clip_vert_cnt__SHIFT 0x00000008
+#define CLIPPER_DEBUG_REG19__sm3_clprim_to_clip_prim_valid_MASK 0x80000000L
+#define CLIPPER_DEBUG_REG19__sm3_clprim_to_clip_prim_valid__SHIFT 0x0000001f
+#define CLIPPER_DEBUG_REG19__sm3_current_state_MASK 0x07f00000L
+#define CLIPPER_DEBUG_REG19__sm3_current_state__SHIFT 0x00000014
+#define CLIPPER_DEBUG_REG19__sm3_highest_priority_seq_MASK 0x20000000L
+#define CLIPPER_DEBUG_REG19__sm3_highest_priority_seq__SHIFT 0x0000001d
+#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_0_MASK 0x00080000L
+#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_0__SHIFT 0x00000013
+#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_1_MASK 0x00040000L
+#define CLIPPER_DEBUG_REG19__sm3_inv_to_clip_data_valid_1__SHIFT 0x00000012
+#define CLIPPER_DEBUG_REG19__sm3_outputcliptoclipga_0_MASK 0x40000000L
+#define CLIPPER_DEBUG_REG19__sm3_outputcliptoclipga_0__SHIFT 0x0000001e
+#define CLIPPER_DEBUG_REG19__sm3_prim_end_state_MASK 0x0000007fL
+#define CLIPPER_DEBUG_REG19__sm3_prim_end_state__SHIFT 0x00000000
+#define CLIPPER_DEBUG_REG19__sm3_ps_expand_MASK 0x00000080L
+#define CLIPPER_DEBUG_REG19__sm3_ps_expand__SHIFT 0x00000007
+#define CLIPPER_DEBUG_REG19__sm3_vertex_clip_cnt_MASK 0x0003e000L
+#define CLIPPER_DEBUG_REG19__sm3_vertex_clip_cnt__SHIFT 0x0000000d
+#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xffffffffL
+#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x00000000
+#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xffffffffL
+#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x00000000
+#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xffffffffL
+#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x00000000
+#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xffffffffL
+#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x00000000
+#define COMPUTE_DIM_X__SIZE_MASK 0xffffffffL
+#define COMPUTE_DIM_X__SIZE__SHIFT 0x00000000
+#define COMPUTE_DIM_Y__SIZE_MASK 0xffffffffL
+#define COMPUTE_DIM_Y__SIZE__SHIFT 0x00000000
+#define COMPUTE_DIM_Z__SIZE_MASK 0xffffffffL
+#define COMPUTE_DIM_Z__SIZE__SHIFT 0x00000000
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x00000001L
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x00000000
+#define COMPUTE_DISPATCH_INITIATOR__DATA_ATC_MASK 0x00001000L
+#define COMPUTE_DISPATCH_INITIATOR__DATA_ATC__SHIFT 0x0000000c
+#define COMPUTE_DISPATCH_INITIATOR__DISPATCH_CACHE_CNTL_MASK 0x00000380L
+#define COMPUTE_DISPATCH_INITIATOR__DISPATCH_CACHE_CNTL__SHIFT 0x00000007
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x00000004L
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x00000002
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x00000008L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x00000003
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x00000010L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x00000004
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x00000040L
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x00000006
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x00000002L
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x00000001
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x00004000L
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0x0000000e
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x00000400L
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0x0000000a
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x00000020L
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x00000005
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x00000800L
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0x0000000b
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0x0000ffffL
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x00000000
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xffff0000L
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x00000010
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0x0000ffffL
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x00000000
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xffff0000L
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x00000010
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0x0000ffffL
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x00000000
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xffff0000L
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x00000010
+#define COMPUTE_PGM_HI__DATA_MASK 0x000000ffL
+#define COMPUTE_PGM_HI__DATA__SHIFT 0x00000000
+#define COMPUTE_PGM_HI__INST_ATC_MASK 0x00000100L
+#define COMPUTE_PGM_HI__INST_ATC__SHIFT 0x00000008
+#define COMPUTE_PGM_LO__DATA_MASK 0xffffffffL
+#define COMPUTE_PGM_LO__DATA__SHIFT 0x00000000
+#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x01000000L
+#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x00000018
+#define COMPUTE_PGM_RSRC1__CDBG_USER_MASK 0x02000000L
+#define COMPUTE_PGM_RSRC1__CDBG_USER__SHIFT 0x00000019
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE_MASK 0x00400000L
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE__SHIFT 0x00000016
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x00200000L
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x00000015
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0x000ff000L
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0x0000000c
+#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x00800000L
+#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x00000017
+#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0x00000c00L
+#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0x0000000a
+#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x00100000L
+#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x00000014
+#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x000003c0L
+#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x00000006
+#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x0000003fL
+#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x00000000
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7f000000L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x00006000L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0x0000000d
+#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x00000018
+#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0x00ff8000L
+#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0x0000000f
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x00000001L
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x00000000
+#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x00000080L
+#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x00000007
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x00000100L
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x00000008
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x00000200L
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x00000009
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0x0000000a
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x00001800L
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0x0000000b
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x00000040L
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x00000006
+#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x0000003eL
+#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x00000001
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x07000000L
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x00000018
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x00800000L
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x00000017
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x003f0000L
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x00000010
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x00400000L
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x00000016
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0x0000f000L
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0x0000000c
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x0000003fL
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x00000000
+#define COMPUTE_START_X__START_MASK 0xffffffffL
+#define COMPUTE_START_X__START__SHIFT 0x00000000
+#define COMPUTE_START_Y__START_MASK 0xffffffffL
+#define COMPUTE_START_Y__START__SHIFT 0x00000000
+#define COMPUTE_START_Z__START_MASK 0xffffffffL
+#define COMPUTE_START_Z__START__SHIFT 0x00000000
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN_MASK 0x0000ffffL
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN__SHIFT 0x00000000
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN_MASK 0xffff0000L
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN__SHIFT 0x00000010
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN_MASK 0x0000ffffL
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN__SHIFT 0x00000000
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN_MASK 0xffff0000L
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN__SHIFT 0x00000010
+#define COMPUTE_TBA_HI__DATA_MASK 0x000000ffL
+#define COMPUTE_TBA_HI__DATA__SHIFT 0x00000000
+#define COMPUTE_TBA_LO__DATA_MASK 0xffffffffL
+#define COMPUTE_TBA_LO__DATA__SHIFT 0x00000000
+#define COMPUTE_TMA_HI__DATA_MASK 0x000000ffL
+#define COMPUTE_TMA_HI__DATA__SHIFT 0x00000000
+#define COMPUTE_TMA_LO__DATA_MASK 0xffffffffL
+#define COMPUTE_TMA_LO__DATA__SHIFT 0x00000000
+#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x01fff000L
+#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0x0000000c
+#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0x00000fffL
+#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_0__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_10__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_11__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_12__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_13__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_14__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_15__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_1__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_2__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_3__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_4__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_5__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_6__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_7__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_8__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x00000000
+#define COMPUTE_USER_DATA_9__DATA_MASK 0xffffffffL
+#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x00000000
+#define COMPUTE_VMID__DATA_MASK 0x0000000fL
+#define COMPUTE_VMID__DATA__SHIFT 0x00000000
+#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xe0000000L
+#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x0000001d
+#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x00030000L
+#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x00000010
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0x000000ffL
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x00000000
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xfffffffcL
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x00000002
+#define CP_APPEND_DATA__DATA_MASK 0xffffffffL
+#define CP_APPEND_DATA__DATA__SHIFT 0x00000000
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE_MASK 0xffffffffL
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE__SHIFT 0x00000000
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE_MASK 0xffffffffL
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE__SHIFT 0x00000000
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xffffffffL
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x00000000
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xffffffffL
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x00000000
+#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x00400000L
+#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x00000016
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x00000040L
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x00000006
+#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x00040000L
+#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x00000012
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x00008000L
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0x0000000f
+#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x00020000L
+#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x00000011
+#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x00000100L
+#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x00000008
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x00000080L
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x00000007
+#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x00100000L
+#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x00000014
+#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x00200000L
+#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x00000015
+#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x00000400L
+#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0x0000000a
+#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x00000200L
+#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x00000009
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x00000000
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x00001000L
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0x0000000c
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x00002000L
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0x0000000d
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x00004000L
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0x0000000e
+#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x00080000L
+#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x00000013
+#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP_MASK 0xffffffffL
+#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP__SHIFT 0x00000000
+#define CP_CE_IB1_BASE_HI__IB1_BASE_HI_MASK 0x000000ffL
+#define CP_CE_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x00000000
+#define CP_CE_IB1_BASE_LO__IB1_BASE_LO_MASK 0xfffffffcL
+#define CP_CE_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x00000002
+#define CP_CE_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000fffffL
+#define CP_CE_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x00000000
+#define CP_CE_IB2_BASE_HI__IB2_BASE_HI_MASK 0x000000ffL
+#define CP_CE_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x00000000
+#define CP_CE_IB2_BASE_LO__IB2_BASE_LO_MASK 0xfffffffcL
+#define CP_CE_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x00000002
+#define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000fffffL
+#define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x00000000
+#define CP_CE_INIT_BASE_HI__INIT_BASE_HI_MASK 0x000000ffL
+#define CP_CE_INIT_BASE_HI__INIT_BASE_HI__SHIFT 0x00000000
+#define CP_CE_INIT_BASE_LO__INIT_BASE_LO_MASK 0xffffffe0L
+#define CP_CE_INIT_BASE_LO__INIT_BASE_LO__SHIFT 0x00000005
+#define CP_CE_INIT_BUFSZ__INIT_BUFSZ_MASK 0x00000fffL
+#define CP_CE_INIT_BUFSZ__INIT_BUFSZ__SHIFT 0x00000000
+#define CP_CEQ1_AVAIL__CEQ_CNT_IB1_MASK 0x07ff0000L
+#define CP_CEQ1_AVAIL__CEQ_CNT_IB1__SHIFT 0x00000010
+#define CP_CEQ1_AVAIL__CEQ_CNT_RING_MASK 0x000007ffL
+#define CP_CEQ1_AVAIL__CEQ_CNT_RING__SHIFT 0x00000000
+#define CP_CEQ2_AVAIL__CEQ_CNT_IB2_MASK 0x000007ffL
+#define CP_CEQ2_AVAIL__CEQ_CNT_IB2__SHIFT 0x00000000
+#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1_MASK 0x000003ffL
+#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1__SHIFT 0x00000000
+#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1_MASK 0x03ff0000L
+#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1__SHIFT 0x00000010
+#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2_MASK 0x000003ffL
+#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2__SHIFT 0x00000000
+#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2_MASK 0x03ff0000L
+#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2__SHIFT 0x00000010
+#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY_MASK 0x000003ffL
+#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY__SHIFT 0x00000000
+#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY_MASK 0x03ff0000L
+#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY__SHIFT 0x00000010
+#define CP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000fffL
+#define CP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x00000000
+#define CP_CE_UCODE_DATA__UCODE_DATA_MASK 0xffffffffL
+#define CP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x00000000
+#define CP_CMD_DATA__CMD_DATA_MASK 0xffffffffL
+#define CP_CMD_DATA__CMD_DATA__SHIFT 0x00000000
+#define CP_CMD_INDEX__CMD_INDEX_MASK 0x000007ffL
+#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x00000000
+#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x00003000L
+#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0x0000000c
+#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x00030000L
+#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x00000010
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0x0ff00000L
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x00000014
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0x000000ffL
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x00000000
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000L
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x0000001c
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x00000700L
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x00000008
+#define CP_COHER_BASE__COHER_BASE_256B_MASK 0xffffffffL
+#define CP_COHER_BASE__COHER_BASE_256B__SHIFT 0x00000000
+#define CP_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000ffL
+#define CP_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x00000000
+#define CP_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x00000040L
+#define CP_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x00000006
+#define CP_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x00000080L
+#define CP_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x00000007
+#define CP_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x00000100L
+#define CP_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x00000008
+#define CP_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x00000200L
+#define CP_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x00000009
+#define CP_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x00000400L
+#define CP_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0x0000000a
+#define CP_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x00000800L
+#define CP_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0x0000000b
+#define CP_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x00001000L
+#define CP_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0x0000000c
+#define CP_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x00002000L
+#define CP_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0x0000000d
+#define CP_COHER_CNTL__CB_ACTION_ENA_MASK 0x02000000L
+#define CP_COHER_CNTL__CB_ACTION_ENA__SHIFT 0x00000019
+#define CP_COHER_CNTL__DB_ACTION_ENA_MASK 0x04000000L
+#define CP_COHER_CNTL__DB_ACTION_ENA__SHIFT 0x0000001a
+#define CP_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x00004000L
+#define CP_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0x0000000e
+#define CP_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x00000001L
+#define CP_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x00000000
+#define CP_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x00000002L
+#define CP_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x00000001
+#define CP_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x00080000L
+#define CP_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x00000013
+#define CP_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x00200000L
+#define CP_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x00000015
+#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA_MASK 0x20000000L
+#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA__SHIFT 0x0000001d
+#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA_MASK 0x08000000L
+#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA__SHIFT 0x0000001b
+#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA_MASK 0x10000000L
+#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA__SHIFT 0x0000001c
+#define CP_COHER_CNTL__TC_ACTION_ENA_MASK 0x00800000L
+#define CP_COHER_CNTL__TC_ACTION_ENA__SHIFT 0x00000017
+#define CP_COHER_CNTL__TCL1_ACTION_ENA_MASK 0x00400000L
+#define CP_COHER_CNTL__TCL1_ACTION_ENA__SHIFT 0x00000016
+#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA_MASK 0x00008000L
+#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA__SHIFT 0x0000000f
+#define CP_COHER_CNTL__TC_VOL_ACTION_ENA_MASK 0x00010000L
+#define CP_COHER_CNTL__TC_VOL_ACTION_ENA__SHIFT 0x00000010
+#define CP_COHER_CNTL__TC_WB_ACTION_ENA_MASK 0x00040000L
+#define CP_COHER_CNTL__TC_WB_ACTION_ENA__SHIFT 0x00000012
+#define CP_COHER_SIZE__COHER_SIZE_256B_MASK 0xffffffffL
+#define CP_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x00000000
+#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000ffL
+#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x00000000
+#define CP_COHER_START_DELAY__START_DELAY_COUNT_MASK 0x0000003fL
+#define CP_COHER_START_DELAY__START_DELAY_COUNT__SHIFT 0x00000000
+#define CP_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0x000000ffL
+#define CP_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x00000000
+#define CP_COHER_STATUS__MEID_MASK 0x03000000L
+#define CP_COHER_STATUS__MEID__SHIFT 0x00000018
+#define CP_COHER_STATUS__PHASE1_STATUS_MASK 0x40000000L
+#define CP_COHER_STATUS__PHASE1_STATUS__SHIFT 0x0000001e
+#define CP_COHER_STATUS__STATUS_MASK 0x80000000L
+#define CP_COHER_STATUS__STATUS__SHIFT 0x0000001f
+#define CP_CSF_CNTL__FETCH_BUFFER_DEPTH_MASK 0x0000000fL
+#define CP_CSF_CNTL__FETCH_BUFFER_DEPTH__SHIFT 0x00000000
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x00003f00L
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x00000008
+#define CP_CSF_STAT__BUFFER_SLOTS_ALLOCATED_MASK 0x0000000fL
+#define CP_CSF_STAT__BUFFER_SLOTS_ALLOCATED__SHIFT 0x00000000
+#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0x000f0000L
+#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x00000010
+#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x00000030L
+#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x00000004
+#define CP_DMA_CNTL__PIO_COUNT_MASK 0xc0000000L
+#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x0000001e
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x0000001c
+#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000L
+#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x0000001d
+#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x001fffffL
+#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x00000000
+#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x0000001d
+#define CP_DMA_ME_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x0000001b
+#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x00200000L
+#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x00000015
+#define CP_DMA_ME_COMMAND__DST_SWAP_MASK 0x03000000L
+#define CP_DMA_ME_COMMAND__DST_SWAP__SHIFT 0x00000018
+#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x0000001e
+#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x0000001c
+#define CP_DMA_ME_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x0000001a
+#define CP_DMA_ME_COMMAND__SRC_SWAP_MASK 0x00c00000L
+#define CP_DMA_ME_COMMAND__SRC_SWAP__SHIFT 0x00000016
+#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xffffffffL
+#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x00000000
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0x000000ffL
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x00000000
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x000000ffL
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x00000000
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xffffffffL
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x00000000
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x001fffffL
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x00000000
+#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x0000001d
+#define CP_DMA_PFP_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x0000001b
+#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x00200000L
+#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x00000015
+#define CP_DMA_PFP_COMMAND__DST_SWAP_MASK 0x03000000L
+#define CP_DMA_PFP_COMMAND__DST_SWAP__SHIFT 0x00000018
+#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x0000001e
+#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x0000001c
+#define CP_DMA_PFP_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x0000001a
+#define CP_DMA_PFP_COMMAND__SRC_SWAP_MASK 0x00c00000L
+#define CP_DMA_PFP_COMMAND__SRC_SWAP__SHIFT 0x00000016
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xffffffffL
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x00000000
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0x000000ffL
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x00000000
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x000000ffL
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x00000000
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xffffffffL
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x00000000
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x03ffffffL
+#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x00000000
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000L
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x0000001c
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x00000000
+#define CP_ECC_FIRSTOCCURRENCE__REQUEST_CLIENT_MASK 0x000000f0L
+#define CP_ECC_FIRSTOCCURRENCE__REQUEST_CLIENT__SHIFT 0x00000004
+#define CP_ECC_FIRSTOCCURRENCE_RING0__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE_RING0__INTERFACE__SHIFT 0x00000000
+#define CP_ECC_FIRSTOCCURRENCE_RING0__REQUEST_CLIENT_MASK 0x000000f0L
+#define CP_ECC_FIRSTOCCURRENCE_RING0__REQUEST_CLIENT__SHIFT 0x00000004
+#define CP_ECC_FIRSTOCCURRENCE_RING0__RING_ID_MASK 0x00003c00L
+#define CP_ECC_FIRSTOCCURRENCE_RING0__RING_ID__SHIFT 0x0000000a
+#define CP_ECC_FIRSTOCCURRENCE_RING0__VMID_MASK 0x000f0000L
+#define CP_ECC_FIRSTOCCURRENCE_RING0__VMID__SHIFT 0x00000010
+#define CP_ECC_FIRSTOCCURRENCE_RING1__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE_RING1__INTERFACE__SHIFT 0x00000000
+#define CP_ECC_FIRSTOCCURRENCE_RING1__REQUEST_CLIENT_MASK 0x000000f0L
+#define CP_ECC_FIRSTOCCURRENCE_RING1__REQUEST_CLIENT__SHIFT 0x00000004
+#define CP_ECC_FIRSTOCCURRENCE_RING1__RING_ID_MASK 0x00003c00L
+#define CP_ECC_FIRSTOCCURRENCE_RING1__RING_ID__SHIFT 0x0000000a
+#define CP_ECC_FIRSTOCCURRENCE_RING1__VMID_MASK 0x000f0000L
+#define CP_ECC_FIRSTOCCURRENCE_RING1__VMID__SHIFT 0x00000010
+#define CP_ECC_FIRSTOCCURRENCE_RING2__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE_RING2__INTERFACE__SHIFT 0x00000000
+#define CP_ECC_FIRSTOCCURRENCE_RING2__REQUEST_CLIENT_MASK 0x000000f0L
+#define CP_ECC_FIRSTOCCURRENCE_RING2__REQUEST_CLIENT__SHIFT 0x00000004
+#define CP_ECC_FIRSTOCCURRENCE_RING2__RING_ID_MASK 0x00003c00L
+#define CP_ECC_FIRSTOCCURRENCE_RING2__RING_ID__SHIFT 0x0000000a
+#define CP_ECC_FIRSTOCCURRENCE_RING2__VMID_MASK 0x000f0000L
+#define CP_ECC_FIRSTOCCURRENCE_RING2__VMID__SHIFT 0x00000010
+#define CP_ECC_FIRSTOCCURRENCE__RING_ID_MASK 0x00003c00L
+#define CP_ECC_FIRSTOCCURRENCE__RING_ID__SHIFT 0x0000000a
+#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0x000f0000L
+#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x00000010
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0x0000ffffL
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x00000000
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xfffffffcL
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x00000002
+#define CP_EOP_DONE_ADDR_LO__ADDR_SWAP_MASK 0x00000003L
+#define CP_EOP_DONE_ADDR_LO__ADDR_SWAP__SHIFT 0x00000000
+#define CP_EOP_DONE_DATA_CNTL__CNTX_ID_MASK 0x0000ffffL
+#define CP_EOP_DONE_DATA_CNTL__CNTX_ID__SHIFT 0x00000000
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xe0000000L
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x0000001d
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x00030000L
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x00000010
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x07000000L
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x00000018
+#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xffffffffL
+#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x00000000
+#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xffffffffL
+#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x00000000
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xffffffffL
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x00000000
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xffffffffL
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x00000000
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xffffffffL
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x00000000
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xffffffffL
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x00000000
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xffffffffL
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x00000000
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xffffffffL
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x00000000
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x00003f00L
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x00000008
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003fL
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x003f0000L
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x00000010
+#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x00000000
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x000000ffL
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x00000000
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xfffffffcL
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x00000002
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000fffffL
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x00000000
+#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0x000fffffL
+#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x00000000
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0x000fffffL
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x00000000
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0x000fffffL
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x00000000
+#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0x000000ffL
+#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x00000000
+#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xfffffffcL
+#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x00000002
+#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000fffffL
+#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x00000000
+#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0x000fffffL
+#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x00000000
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0x000fffffL
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x00000000
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0x000fffffL
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x00000000
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x00000013
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x00000014
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0x0000000e
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x0000001f
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x0000001e
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x0000001d
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x00000018
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x00000016
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x00000017
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x0000001b
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x00000013
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x00000014
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0x0000000e
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000L
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x0000001f
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x0000001e
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x0000001d
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x00000018
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x00000016
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x00000017
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x0000001b
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x0000001a
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x00000011
+#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE__SHIFT 0x00000013
+#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE__SHIFT 0x00000014
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0x0000000e
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000L
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x0000001f
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x0000001e
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x0000001d
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x00000018
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x00000016
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x00000017
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x0000001b
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x0000001a
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x00000011
+#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE__SHIFT 0x00000013
+#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE__SHIFT 0x00000014
+#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE__SHIFT 0x0000000e
+#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE_MASK 0x80000000L
+#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE__SHIFT 0x0000001f
+#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE__SHIFT 0x0000001e
+#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE__SHIFT 0x0000001d
+#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE__SHIFT 0x00000018
+#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE__SHIFT 0x00000016
+#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE__SHIFT 0x00000017
+#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x0000001b
+#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE__SHIFT 0x0000001a
+#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x00000011
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x0000001a
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x00000011
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED_MASK 0x00080000L
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED__SHIFT 0x00000013
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED_MASK 0x00100000L
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED__SHIFT 0x00000014
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0x0000000e
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x0000001f
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x0000001e
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x0000001d
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x00000018
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED_MASK 0x00400000L
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED__SHIFT 0x00000016
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x00000017
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x0000001b
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x0000001a
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x00000011
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x00000013
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x00000014
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0x0000000e
+#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000L
+#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x0000001f
+#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x0000001e
+#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x0000001d
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x00000018
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x00000016
+#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x00000017
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x0000001b
+#define CP_INT_STATUS_RING0__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING0__CNTX_BUSY_INT_STAT__SHIFT 0x00000013
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x00000014
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0x0000000e
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000L
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x0000001f
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x0000001e
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x0000001d
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x00000018
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x00000016
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x00000017
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x0000001b
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x0000001a
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x00000011
+#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT__SHIFT 0x00000013
+#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT__SHIFT 0x00000014
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0x0000000e
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000L
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x0000001f
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x0000001e
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x0000001d
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x00000018
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x00000016
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x00000017
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x0000001b
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x0000001a
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x00000011
+#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT__SHIFT 0x00000013
+#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT__SHIFT 0x00000014
+#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT__SHIFT 0x0000000e
+#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT_MASK 0x80000000L
+#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT__SHIFT 0x0000001f
+#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT__SHIFT 0x0000001e
+#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT__SHIFT 0x0000001d
+#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT__SHIFT 0x00000018
+#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT__SHIFT 0x00000016
+#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT__SHIFT 0x00000017
+#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x0000001b
+#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT__SHIFT 0x0000001a
+#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x00000011
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x0000001a
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x00000011
+#define CP_MC_PACK_DELAY_CNT__PACK_DELAY_CNT_MASK 0x0000001fL
+#define CP_MC_PACK_DELAY_CNT__PACK_DELAY_CNT__SHIFT 0x00000000
+#define CP_ME_CNTL__CE_HALT_MASK 0x01000000L
+#define CP_ME_CNTL__CE_HALT__SHIFT 0x00000018
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x00000004
+#define CP_ME_CNTL__CE_STEP_MASK 0x02000000L
+#define CP_ME_CNTL__CE_STEP__SHIFT 0x00000019
+#define CP_ME_CNTL__ME_HALT_MASK 0x10000000L
+#define CP_ME_CNTL__ME_HALT__SHIFT 0x0000001c
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x00000100L
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x00000008
+#define CP_ME_CNTL__ME_STEP_MASK 0x20000000L
+#define CP_ME_CNTL__ME_STEP__SHIFT 0x0000001d
+#define CP_ME_CNTL__PFP_HALT_MASK 0x04000000L
+#define CP_ME_CNTL__PFP_HALT__SHIFT 0x0000001a
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x00000040L
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x00000006
+#define CP_ME_CNTL__PFP_STEP_MASK 0x08000000L
+#define CP_ME_CNTL__PFP_STEP__SHIFT 0x0000001b
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xffffffffL
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x00000000
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0x000000ffL
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x00000000
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xfffffffcL
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x00000002
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_SWAP_MASK 0x00000003L
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_SWAP__SHIFT 0x00000000
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0x000000ffL
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x00000000
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xfffffffcL
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x00000002
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_SWAP_MASK 0x00000003L
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_SWAP__SHIFT 0x00000000
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xffffffffL
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x00000000
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xffffffffL
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x00000000
+#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN_MASK 0x00000002L
+#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN__SHIFT 0x00000001
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK 0x00000001L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN__SHIFT 0x00000000
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY_MASK 0x00ff0000L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY__SHIFT 0x00000010
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY_MASK 0x0000ff00L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY__SHIFT 0x00000008
+#define CP_MEM_SLP_CNTL__RESERVED1_MASK 0xff000000L
+#define CP_MEM_SLP_CNTL__RESERVED1__SHIFT 0x00000018
+#define CP_MEM_SLP_CNTL__RESERVED_MASK 0x000000fcL
+#define CP_MEM_SLP_CNTL__RESERVED__SHIFT 0x00000002
+#define CP_ME_PREEMPTION__ME_CNTXSW_PREEMPTION_MASK 0x00000001L
+#define CP_ME_PREEMPTION__ME_CNTXSW_PREEMPTION__SHIFT 0x00000000
+#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x000003ffL
+#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x00000000
+#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x000003ffL
+#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x00000000
+#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x03ff0000L
+#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x00000010
+#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0x000000ffL
+#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x00000000
+#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0x0000ff00L
+#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x00000008
+#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xffffffffL
+#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x00000000
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x00000fffL
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x00000000
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x00000fffL
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI__SHIFT 0x00000000
+#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO_MASK 0xffffffffL
+#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO__SHIFT 0x00000000
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xffffffffL
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x00000000
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xffffffffL
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x00000000
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x00000008
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0x0000000a
+#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000fL
+#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x00000000
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0x000000f0L
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x00000004
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x0000001f
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xffffffffL
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x00000000
+#define CP_PFP_IB_CONTROL__IB_EN_MASK 0x00000001L
+#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x00000000
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x00000002L
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x00000001
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x00000001L
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x00000000
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x01000000L
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x00000018
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x00010000L
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x00000010
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN_MASK 0x00008000L
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN__SHIFT 0x0000000f
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00000fffL
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x00000000
+#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xffffffffL
+#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x00000000
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0xffffffffL
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x00000000
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xfffffffcL
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x00000002
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_SWAP_MASK 0x00000003L
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_SWAP__SHIFT 0x00000000
+#define CP_PWR_CNTL__GFX_CLK_HALT_MASK 0x00000001L
+#define CP_PWR_CNTL__GFX_CLK_HALT__SHIFT 0x00000000
+#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START_MASK 0x0000003fL
+#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT 0x00000000
+#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START_MASK 0x00003f00L
+#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT 0x00000008
+#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0x000000ffL
+#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x00000000
+#define CP_RB0_BASE__RB_BASE_MASK 0xffffffffL
+#define CP_RB0_BASE__RB_BASE__SHIFT 0x00000000
+#define CP_RB0_CNTL__BUF_SWAP_MASK 0x00030000L
+#define CP_RB0_CNTL__BUF_SWAP__SHIFT 0x00000010
+#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x00000018
+#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x00000014
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0x00c00000L
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x00000016
+#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x00003f00L
+#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x0000003fL
+#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x0000001f
+#define CP_RB0_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB0_CNTL__RB_VOLATILE__SHIFT 0x0000001a
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x000000ffL
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x00000000
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffcL
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000002
+#define CP_RB0_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x00000003L
+#define CP_RB0_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x00000000
+#define CP_RB0_RPTR__RB_RPTR_MASK 0x000fffffL
+#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x00000000
+#define CP_RB0_WPTR__RB_WPTR_MASK 0x000fffffL
+#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x00000000
+#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0x000000ffL
+#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x00000000
+#define CP_RB1_BASE__RB_BASE_MASK 0xffffffffL
+#define CP_RB1_BASE__RB_BASE__SHIFT 0x00000000
+#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x00000018
+#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x00000014
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0x00c00000L
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x00000016
+#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x00003f00L
+#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x0000003fL
+#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x0000001f
+#define CP_RB1_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB1_CNTL__RB_VOLATILE__SHIFT 0x0000001a
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x000000ffL
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x00000000
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffcL
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000002
+#define CP_RB1_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x00000003L
+#define CP_RB1_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x00000000
+#define CP_RB1_RPTR__RB_RPTR_MASK 0x000fffffL
+#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x00000000
+#define CP_RB1_WPTR__RB_WPTR_MASK 0x000fffffL
+#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x00000000
+#define CP_RB2_BASE__RB_BASE_MASK 0xffffffffL
+#define CP_RB2_BASE__RB_BASE__SHIFT 0x00000000
+#define CP_RB2_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB2_CNTL__CACHE_POLICY__SHIFT 0x00000018
+#define CP_RB2_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB2_CNTL__MIN_AVAILSZ__SHIFT 0x00000014
+#define CP_RB2_CNTL__MIN_IB_AVAILSZ_MASK 0x00c00000L
+#define CP_RB2_CNTL__MIN_IB_AVAILSZ__SHIFT 0x00000016
+#define CP_RB2_CNTL__RB_BLKSZ_MASK 0x00003f00L
+#define CP_RB2_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB2_CNTL__RB_BUFSZ_MASK 0x0000003fL
+#define CP_RB2_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB2_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB2_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+#define CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+#define CP_RB2_CNTL__RB_RPTR_WR_ENA__SHIFT 0x0000001f
+#define CP_RB2_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB2_CNTL__RB_VOLATILE__SHIFT 0x0000001a
+#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x000000ffL
+#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x00000000
+#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffcL
+#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000002
+#define CP_RB2_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x00000003L
+#define CP_RB2_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x00000000
+#define CP_RB2_RPTR__RB_RPTR_MASK 0x000fffffL
+#define CP_RB2_RPTR__RB_RPTR__SHIFT 0x00000000
+#define CP_RB2_WPTR__RB_WPTR_MASK 0x000fffffL
+#define CP_RB2_WPTR__RB_WPTR__SHIFT 0x00000000
+#define CP_RB_BASE__RB_BASE_MASK 0xffffffffL
+#define CP_RB_BASE__RB_BASE__SHIFT 0x00000000
+#define CP_RB_CNTL__BUF_SWAP_MASK 0x00030000L
+#define CP_RB_CNTL__BUF_SWAP__SHIFT 0x00000010
+#define CP_RB_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x00000018
+#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x00000014
+#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0x00c00000L
+#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x00000016
+#define CP_RB_CNTL__RB_BLKSZ_MASK 0x00003f00L
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB_CNTL__RB_BUFSZ_MASK 0x0000003fL
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x0000001f
+#define CP_RB_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB_CNTL__RB_VOLATILE__SHIFT 0x0000001a
+#define CP_RB_OFFSET__RB_OFFSET_MASK 0x000fffffL
+#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x00000000
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x000000ffL
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x00000000
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xfffffffcL
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000002
+#define CP_RB_RPTR_ADDR__RB_RPTR_SWAP_MASK 0x00000003L
+#define CP_RB_RPTR_ADDR__RB_RPTR_SWAP__SHIFT 0x00000000
+#define CP_RB_RPTR__RB_RPTR_MASK 0x000fffffL
+#define CP_RB_RPTR__RB_RPTR__SHIFT 0x00000000
+#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0x000fffffL
+#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x00000000
+#define CP_RB_VMID__RB0_VMID_MASK 0x0000000fL
+#define CP_RB_VMID__RB0_VMID__SHIFT 0x00000000
+#define CP_RB_VMID__RB1_VMID_MASK 0x00000f00L
+#define CP_RB_VMID__RB1_VMID__SHIFT 0x00000008
+#define CP_RB_VMID__RB2_VMID_MASK 0x000f0000L
+#define CP_RB_VMID__RB2_VMID__SHIFT 0x00000010
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xf0000000L
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x0000001c
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0x0fffffffL
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x00000000
+#define CP_RB_WPTR_POLL_ADDR_HI__OBSOLETE_MASK 0x000000ffL
+#define CP_RB_WPTR_POLL_ADDR_HI__OBSOLETE__SHIFT 0x00000000
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0x000000ffL
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x00000000
+#define CP_RB_WPTR_POLL_ADDR_LO__OBSOLETE_MASK 0xfffffffcL
+#define CP_RB_WPTR_POLL_ADDR_LO__OBSOLETE__SHIFT 0x00000002
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xfffffffcL
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x00000002
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xffff0000L
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x00000010
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0x0000ffffL
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x00000000
+#define CP_RB_WPTR__RB_WPTR_MASK 0x000fffffL
+#define CP_RB_WPTR__RB_WPTR__SHIFT 0x00000000
+#define CP_RING0_PRIORITY__PRIORITY_MASK 0x00000003L
+#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x00000000
+#define CP_RING1_PRIORITY__PRIORITY_MASK 0x00000003L
+#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x00000000
+#define CP_RING2_PRIORITY__PRIORITY_MASK 0x00000003L
+#define CP_RING2_PRIORITY__PRIORITY__SHIFT 0x00000000
+#define CP_RINGID__RINGID_MASK 0x00000003L
+#define CP_RINGID__RINGID__SHIFT 0x00000000
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000ffL
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x00000000
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000ff00L
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x00000008
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00ff0000L
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x00000010
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xff000000L
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x00000018
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0x00ff0000L
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0x00000010
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0xff000000L
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x00000018
+#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0x000000ffL
+#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x00000000
+#define CP_ROQ1_THRESHOLDS__RB2_START_MASK 0x0000ff00L
+#define CP_ROQ1_THRESHOLDS__RB2_START__SHIFT 0x00000008
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x000007ffL
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x00000000
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0x0000ff00L
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x00000008
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0x00ff0000L
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0x00000010
+#define CP_ROQ2_THRESHOLDS__R2_IB1_START_MASK 0x000000ffL
+#define CP_ROQ2_THRESHOLDS__R2_IB1_START__SHIFT 0x00000000
+#define CP_ROQ2_THRESHOLDS__R2_IB2_START_MASK 0xff000000L
+#define CP_ROQ2_THRESHOLDS__R2_IB2_START__SHIFT 0x00000018
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x07ff0000L
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x00000010
+#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x000007ffL
+#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x00000000
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x000003ffL
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x00000000
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x03ff0000L
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x00000010
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x000003ffL
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x00000000
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x03ff0000L
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x00000010
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x000003ffL
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x00000000
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x03ff0000L
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x00000010
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xffffffffL
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x00000000
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xffffffffL
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x00000000
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xffffffffL
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x00000000
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xffffffffL
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x00000000
+#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xffffffffL
+#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x00000000
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000000ffL
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x00000000
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xffffffffL
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x00000000
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x000000ffL
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x00000000
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x00000018
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xe0000000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x0000001d
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x00000014
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x00000010
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xfffffff8L
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x00000003
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x00000000
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x00000400L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0x0000000a
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0x0000000b
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00002000L
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x0000000d
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x00001000L
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x0000000c
+#define CP_STALLED_STAT1__ME_WAITING_ON_MC_READ_DATA_MASK 0x00004000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_MC_READ_DATA__SHIFT 0x0000000e
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x00008000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0x0000000f
+#define CP_STALLED_STAT1__MIU_WAITING_ON_RDREQ_FREE_MASK 0x00010000L
+#define CP_STALLED_STAT1__MIU_WAITING_ON_RDREQ_FREE__SHIFT 0x00000010
+#define CP_STALLED_STAT1__MIU_WAITING_ON_WRREQ_FREE_MASK 0x00020000L
+#define CP_STALLED_STAT1__MIU_WAITING_ON_WRREQ_FREE__SHIFT 0x00000011
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x00000000
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_MASK 0x00000010L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV__SHIFT 0x00000004
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV__SHIFT 0x00000002
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x10000000L
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x0000001c
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x0000001c
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x08000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x0000001b
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x04000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x0000001a
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x00800000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x00000017
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x01000000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x00000018
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x02000000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x00000019
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000L
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x0000001c
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x02000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x00000019
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x04000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x0000001a
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000L
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x0000001d
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x08000000L
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x0000001b
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE_MASK 0x00200000L
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE__SHIFT 0x00000015
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM_MASK 0x00400000L
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM__SHIFT 0x00000016
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x00000800L
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0x0000000b
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00010000L
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x00000010
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x00001000L
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0x0000000c
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x00002000L
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0x0000000d
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x00004000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0x0000000e
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x00040000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x00000012
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x00008000L
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0x0000000f
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x00000400L
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0x0000000a
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x00000200L
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x00000009
+#define CP_STALLED_STAT2__PFP_MIU_READ_PENDING_MASK 0x00000040L
+#define CP_STALLED_STAT2__PFP_MIU_READ_PENDING__SHIFT 0x00000006
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x00000020L
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x00000005
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00100000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x00000014
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x00080000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x00000013
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x00000000
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x00000001
+#define CP_STALLED_STAT2__PFP_TO_MIU_WRITE_NOT_RDY_TO_RCV_MASK 0x00000080L
+#define CP_STALLED_STAT2__PFP_TO_MIU_WRITE_NOT_RDY_TO_RCV__SHIFT 0x00000007
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x00000002
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x00000010L
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x00000004
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x00000100L
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x00000008
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x01000000L
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x00000018
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00020000L
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x00000011
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x00800000L
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x00000017
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x0000001f
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x0000001e
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x00000000
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x00000006
+#define CP_STALLED_STAT3__CE_TO_MIU_WRITE_NOT_RDY_TO_RCV_MASK 0x00000100L
+#define CP_STALLED_STAT3__CE_TO_MIU_WRITE_NOT_RDY_TO_RCV__SHIFT 0x00000008
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x00000010L
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x00000004
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x00000001
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x00000008L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x00000003
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x00000020L
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x00000005
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x00000080L
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x00000007
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x00000400L
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0x0000000a
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0x0000000b
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x00000004L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x00000002
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x00001000L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0x0000000c
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x00002000L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0x0000000d
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x00004000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0x0000000e
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x00008000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0x0000000f
+#define CP_STAT__CE_BUSY_MASK 0x04000000L
+#define CP_STAT__CE_BUSY__SHIFT 0x0000001a
+#define CP_STAT__CP_BUSY_MASK 0x80000000L
+#define CP_STAT__CP_BUSY__SHIFT 0x0000001f
+#define CP_STAT__CPC_CPG_BUSY_MASK 0x02000000L
+#define CP_STAT__CPC_CPG_BUSY__SHIFT 0x00000019
+#define CP_STAT__DC_BUSY_MASK 0x00002000L
+#define CP_STAT__DC_BUSY__SHIFT 0x0000000d
+#define CP_STAT__DMA_BUSY_MASK 0x00400000L
+#define CP_STAT__DMA_BUSY__SHIFT 0x00000016
+#define CP_STAT__INTERRUPT_BUSY_MASK 0x00100000L
+#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x00000014
+#define CP_STAT__ME_BUSY_MASK 0x00020000L
+#define CP_STAT__ME_BUSY__SHIFT 0x00000011
+#define CP_STAT__MEQ_BUSY_MASK 0x00010000L
+#define CP_STAT__MEQ_BUSY__SHIFT 0x00000010
+#define CP_STAT__MIU_RDREQ_BUSY_MASK 0x00000080L
+#define CP_STAT__MIU_RDREQ_BUSY__SHIFT 0x00000007
+#define CP_STAT__MIU_WRREQ_BUSY_MASK 0x00000100L
+#define CP_STAT__MIU_WRREQ_BUSY__SHIFT 0x00000008
+#define CP_STAT__PFP_BUSY_MASK 0x00008000L
+#define CP_STAT__PFP_BUSY__SHIFT 0x0000000f
+#define CP_STAT__QUERY_BUSY_MASK 0x00040000L
+#define CP_STAT__QUERY_BUSY__SHIFT 0x00000012
+#define CP_STAT__RCIU_BUSY_MASK 0x00800000L
+#define CP_STAT__RCIU_BUSY__SHIFT 0x00000017
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000L
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x0000001d
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000L
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x0000001e
+#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000L
+#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x0000001c
+#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0x0000000a
+#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0x0000000b
+#define CP_STAT__ROQ_RING_BUSY_MASK 0x00000200L
+#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x00000009
+#define CP_STAT__ROQ_STATE_BUSY_MASK 0x00001000L
+#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0x0000000c
+#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x01000000L
+#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x00000018
+#define CP_STAT__SEMAPHORE_BUSY_MASK 0x00080000L
+#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x00000013
+#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x00200000L
+#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x00000015
+#define CP_STAT__TCIU_BUSY_MASK 0x08000000L
+#define CP_STAT__TCIU_BUSY__SHIFT 0x0000001b
+#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0x000000ffL
+#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x00000000
+#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xfffffffcL
+#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x00000002
+#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0x000fffffL
+#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x00000000
+#define CP_STQ_AVAIL__STQ_CNT_MASK 0x000001ffL
+#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x00000000
+#define CP_STQ_STAT__STQ_RPTR_MASK 0x000003ffL
+#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x00000000
+#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0x000000ffL
+#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x00000000
+#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0x0000ff00L
+#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x00000008
+#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0x00ff0000L
+#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x00000010
+#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI_MASK 0xffffffffL
+#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI__SHIFT 0x00000000
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO_MASK 0xfffffffcL
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO__SHIFT 0x00000002
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_SWAP_MASK 0x00000003L
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_SWAP__SHIFT 0x00000000
+#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE_MASK 0x00000001L
+#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE__SHIFT 0x00000000
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x00000000
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xffffffffL
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x00000000
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xffffffffL
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x00000000
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0x0000ffffL
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x00000000
+#define CP_VMID_PREEMPT__PREEMPT_STATUS_MASK 0xffff0000L
+#define CP_VMID_PREEMPT__PREEMPT_STATUS__SHIFT 0x00000010
+#define CP_VMID_RESET__RESET_REQUEST_MASK 0x0000ffffL
+#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x00000000
+#define CP_VMID_RESET__RESET_STATUS_MASK 0xffff0000L
+#define CP_VMID_RESET__RESET_STATUS__SHIFT 0x00000010
+#define CP_VMID__VMID_MASK 0x0000000fL
+#define CP_VMID__VMID__SHIFT 0x00000000
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xffffffffL
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x00000000
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x000000ffL
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x00000000
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x00000018
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xe0000000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x0000001d
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x00000014
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x00000010
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xfffffff8L
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x00000003
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x00000000
+#define CS_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+#define CS_COPY_STATE__SRC_STATE_ID__SHIFT 0x00000000
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x00000001L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x00000000
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x00000300L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x00000008
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0x00000c00L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0x0000000a
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x00003000L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0x0000000c
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0x0000c000L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0x0000000e
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x00010000L
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x00000010
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x00000004
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0x0000000fL
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x00000000
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0x00fff000L
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0x0000000c
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0x00f00000L
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x00000014
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x00000002L
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x00000001
+#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x00000070L
+#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x00000004
+#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0x000f0000L
+#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x00000010
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x0f000000L
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x00000018
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xf0000000L
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x0000001c
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0x0000f000L
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0x0000000c
+#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0x00000f00L
+#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x00000008
+#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE_MASK 0x00000001L
+#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE__SHIFT 0x00000000
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x00001c00L
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0x0000000a
+#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS_MASK 0x7f000000L
+#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS__SHIFT 0x00000018
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x000003e0L
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x00000005
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x0000001fL
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x00000000
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x00000001L
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x00000000
+#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x00003e00L
+#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x00000009
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x00040000L
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x00000012
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES_MASK 0x00010000L
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES__SHIFT 0x00000010
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x00020000L
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x00000011
+#define DB_DEBUG2__DISABLE_PREZL_CB_STALL_REZ_MASK 0x00000100L
+#define DB_DEBUG2__DISABLE_PREZL_CB_STALL_REZ__SHIFT 0x00000008
+#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL_MASK 0x00000020L
+#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL_REZ_MASK 0x00000080L
+#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL_REZ__SHIFT 0x00000007
+#define DB_DEBUG2__DISABLE_PREZL_LPF_STALL__SHIFT 0x00000005
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000L
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x0000001d
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x00080000L
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x00000013
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x80000000L
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x0000001f
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x00000004L
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x00000002
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x00000002L
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x00000001
+#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER_MASK 0x00004000L
+#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER__SHIFT 0x0000000e
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000L
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x0000001f
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x00000010L
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x00000004
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x00000008L
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x00000003
+#define DB_DEBUG2__ENABLE_PREZL_CB_STALL_MASK 0x00000040L
+#define DB_DEBUG2__ENABLE_PREZL_CB_STALL__SHIFT 0x00000006
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000L
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x0000001c
+#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING_MASK 0x00008000L
+#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING__SHIFT 0x0000000f
+#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION_MASK 0x00020000L
+#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION__SHIFT 0x00000011
+#define DB_DEBUG3__DB_EXTRA_DEBUG3_MASK 0xfc000000L
+#define DB_DEBUG3__DB_EXTRA_DEBUG3__SHIFT 0x0000001a
+#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x02000000L
+#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x00000019
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x01000000L
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x00000018
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x00000010L
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x00000004
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x00200000L
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x00000015
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x00004000L
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0x0000000e
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x00010000L
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0x00000010
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x00008000L
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0x0000000f
+#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING_MASK 0x00080000L
+#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING__SHIFT 0x00000013
+#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING_MASK 0x00002000L
+#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING__SHIFT 0x0000000d
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x08000000L
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x0000001b
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x00800000L
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x00000017
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x00000800L
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0x0000000b
+#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT_MASK 0x00000400L
+#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT__SHIFT 0x0000000a
+#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS_MASK 0x00000080L
+#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS__SHIFT 0x00000007
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x00100000L
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x00000014
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x00000008L
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x00000003
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x00000100L
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x00000008
+#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND_MASK 0x20000000L
+#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND__SHIFT 0x0000001d
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000L
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x0000001c
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x04000000L
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x0000001a
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x00001000L
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0x0000000c
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x00400000L
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x00000016
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x00800000L
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x00000017
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x00000020L
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x00000005
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x00000040L
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x00000006
+#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x00000004L
+#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x00000002
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x00040000L
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x00000012
+#define DB_DEBUG4__DB_EXTRA_DEBUG4_MASK 0xffffffc0L
+#define DB_DEBUG4__DB_EXTRA_DEBUG4__SHIFT 0x00000006
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x00000020L
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x00000005
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x00000002L
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x00000001
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x00000001L
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x00000000
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x00000004L
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x00000002
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x00000002L
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x00000001
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x00008000L
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0x0000000f
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x00004000L
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0x0000000e
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x00000040L
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x00000006
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x00180000L
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x00000013
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0x00000c00L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0x0000000a
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x00003000L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0x0000000c
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x00000300L
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x00000008
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x00000080L
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x00000007
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x00010000L
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x00000010
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x00000001L
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x00000000
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0x0f000000L
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x00000018
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x00040000L
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x00000012
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000L
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x0000001e
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000L
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x0000001f
+#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x00020000L
+#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x00000011
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x00800000L
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x00000017
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x00000008L
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x00000003
+#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x00000004L
+#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x00000002
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000L
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x0000001d
+#define DB_DEBUG__FORCE_Z_MODE_MASK 0x00000030L
+#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x00000004
+#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x00200000L
+#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x00000015
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000L
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x0000001c
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x00400000L
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x00000016
+#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xffffffffL
+#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x00000000
+#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xffffffffL
+#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x00000000
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xffffffffL
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x00000000
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x00000080L
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x00000007
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x00000008L
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x00000003
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000L
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x0000001f
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000L
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x0000001e
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x00000001L
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x00000000
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x00700000L
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x00000014
+#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x00000700L
+#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x00000008
+#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x00000002L
+#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x00000001
+#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x00000070L
+#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x00000004
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x00000004L
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x00000002
+#define DB_DEPTH_INFO__ADDR5_SWIZZLE_MASK_MASK 0x0000000fL
+#define DB_DEPTH_INFO__ADDR5_SWIZZLE_MASK__SHIFT 0x00000000
+#define DB_DEPTH_INFO__ARRAY_MODE_MASK 0x000000f0L
+#define DB_DEPTH_INFO__ARRAY_MODE__SHIFT 0x00000004
+#define DB_DEPTH_INFO__BANK_HEIGHT_MASK 0x00018000L
+#define DB_DEPTH_INFO__BANK_HEIGHT__SHIFT 0x0000000f
+#define DB_DEPTH_INFO__BANK_WIDTH_MASK 0x00006000L
+#define DB_DEPTH_INFO__BANK_WIDTH__SHIFT 0x0000000d
+#define DB_DEPTH_INFO__MACRO_TILE_ASPECT_MASK 0x00060000L
+#define DB_DEPTH_INFO__MACRO_TILE_ASPECT__SHIFT 0x00000011
+#define DB_DEPTH_INFO__NUM_BANKS_MASK 0x00180000L
+#define DB_DEPTH_INFO__NUM_BANKS__SHIFT 0x00000013
+#define DB_DEPTH_INFO__PIPE_CONFIG_MASK 0x00001f00L
+#define DB_DEPTH_INFO__PIPE_CONFIG__SHIFT 0x00000008
+#define DB_DEPTH_SIZE__HEIGHT_TILE_MAX_MASK 0x003ff800L
+#define DB_DEPTH_SIZE__HEIGHT_TILE_MAX__SHIFT 0x0000000b
+#define DB_DEPTH_SIZE__PITCH_TILE_MAX_MASK 0x000007ffL
+#define DB_DEPTH_SIZE__PITCH_TILE_MAX__SHIFT 0x00000000
+#define DB_DEPTH_SLICE__SLICE_TILE_MAX_MASK 0x003fffffL
+#define DB_DEPTH_SLICE__SLICE_TILE_MAX__SHIFT 0x00000000
+#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0x00ffe000L
+#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0x0000000d
+#define DB_DEPTH_VIEW__SLICE_START_MASK 0x000007ffL
+#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x00000000
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x02000000L
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x00000019
+#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x01000000L
+#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x00000018
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x00200000L
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x00000015
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x00007000L
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0x0000000c
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x08000000L
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x0000001b
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x00010000L
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x00000010
+#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x00020000L
+#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x00000011
+#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x00040000L
+#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x00000012
+#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x00080000L
+#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x00000013
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x00000700L
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x00000008
+#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x00000007L
+#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x00000000
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x07000000L
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x00000018
+#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x00000070L
+#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x00000004
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x00100000L
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x00000014
+#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH_MASK 0x1fe00000L
+#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x00000015
+#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0x0000fc00L
+#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0x0000000a
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH_MASK 0x0000001fL
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH__SHIFT 0x00000000
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH_MASK 0x000003e0L
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH__SHIFT 0x00000005
+#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0x001f0000L
+#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x00000010
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0x000000ffL
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x00000000
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x00007f00L
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x00000008
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x01ff8000L
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0x0000000f
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xfe000000L
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x00000019
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x0000007fL
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x00000000
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0x01e00000L
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x00000015
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x00003f80L
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x00000007
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x001fc000L
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0x0000000e
+#define DB_FREE_CACHELINES__QUAD_READ_REQS_MASK 0xfe000000L
+#define DB_FREE_CACHELINES__QUAD_READ_REQS__SHIFT 0x00000019
+#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x00000000
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x00000010
+#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
+#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x00000001
+#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN_MASK 0x00000004L
+#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN__SHIFT 0x00000002
+#define DB_HTILE_SURFACE__LINEAR_MASK 0x00000001L
+#define DB_HTILE_SURFACE__LINEAR__SHIFT 0x00000000
+#define DB_HTILE_SURFACE__PREFETCH_HEIGHT_MASK 0x0000fc00L
+#define DB_HTILE_SURFACE__PREFETCH_HEIGHT__SHIFT 0x0000000a
+#define DB_HTILE_SURFACE__PREFETCH_WIDTH_MASK 0x000003f0L
+#define DB_HTILE_SURFACE__PREFETCH_WIDTH__SHIFT 0x00000004
+#define DB_HTILE_SURFACE__PRELOAD_MASK 0x00000008L
+#define DB_HTILE_SURFACE__PRELOAD__SHIFT 0x00000003
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define DB_PRELOAD_CONTROL__MAX_X_MASK 0x00ff0000L
+#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x00000010
+#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xff000000L
+#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x00000018
+#define DB_PRELOAD_CONTROL__START_X_MASK 0x000000ffL
+#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x00000000
+#define DB_PRELOAD_CONTROL__START_Y_MASK 0x0000ff00L
+#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x00000008
+#define DB_READ_DEBUG_0__BUSY_DATA0_MASK 0xffffffffL
+#define DB_READ_DEBUG_0__BUSY_DATA0__SHIFT 0x00000000
+#define DB_READ_DEBUG_1__BUSY_DATA1_MASK 0xffffffffL
+#define DB_READ_DEBUG_1__BUSY_DATA1__SHIFT 0x00000000
+#define DB_READ_DEBUG_2__BUSY_DATA2_MASK 0xffffffffL
+#define DB_READ_DEBUG_2__BUSY_DATA2__SHIFT 0x00000000
+#define DB_READ_DEBUG_3__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_3__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_4__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_4__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_5__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_5__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_6__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_6__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_7__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_7__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_8__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_8__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_9__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_9__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_A__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_A__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_B__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_B__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_C__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_C__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_D__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_D__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_E__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_E__DEBUG_DATA__SHIFT 0x00000000
+#define DB_READ_DEBUG_F__DEBUG_DATA_MASK 0xffffffffL
+#define DB_READ_DEBUG_F__DEBUG_DATA__SHIFT 0x00000000
+#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x00000080L
+#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x00000007
+#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0x00000f00L
+#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x00000008
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x00000001L
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x00000000
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x00000040L
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x00000006
+#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x00000004L
+#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x00000002
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x00000010L
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x00000004
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x00000002L
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x00000001
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x00000020L
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x00000005
+#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x00000008L
+#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x00000003
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x00000008
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0x0000000a
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x00000007
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x00000017
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x00000009
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x00000006
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x00000020L
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x00000005
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x001c0000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x00000012
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x00038000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0x0000000f
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x00007000L
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0x0000000c
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x00000000
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001cL
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x00000002
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x00000016
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x00200000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x00000015
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0x0000000b
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x00040000L
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x00000012
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x04000000L
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x0000001a
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x00010000L
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x00000010
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x00000008
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x00000007
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0x0000000a
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x00006000L
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0x0000000d
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0x0000000cL
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x00000002
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x00000030L
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x00000004
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x00000000
+#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT_MASK 0x00008000L
+#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT__SHIFT 0x0000000f
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x00000006
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x0000001c
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x00001000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0x0000000c
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x0000001e
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x08000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x0000001b
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x00180000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x00000013
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0x0000000b
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x0000001d
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x00020000L
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x00000011
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x03e00000L
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x00000015
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x00000009
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000L
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x0000001f
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x00000800L
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0x0000000b
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x00006000L
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0x0000000d
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x00000080L
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x00000007
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x00001000L
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0x0000000c
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x00000200L
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x00000009
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x00000400L
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0x0000000a
+#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x00000040L
+#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x00000006
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x00000100L
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x00000008
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x00000004L
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x00000002
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x00000002L
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x00000001
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x00000001L
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x00000000
+#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x00000030L
+#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x00000004
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x00000000
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0x000ff000L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0x0000000c
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0x00000ff0L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x00000004
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x01000000L
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x00000018
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x00000000
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0x000ff000L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0x0000000c
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0x00000ff0L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x00000004
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x01000000L
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x00000018
+#define DB_STENCIL_CLEAR__CLEAR_MASK 0x000000ffL
+#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x00000000
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0x0000f000L
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0x0000000c
+#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0x0000000fL
+#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x00000000
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0x00f00000L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x00000014
+#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0x00000f00L
+#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x00000008
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0x000f0000L
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x00000010
+#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0x000000f0L
+#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x00000004
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x0000001b
+#define DB_STENCIL_INFO__FORMAT_MASK 0x00000001L
+#define DB_STENCIL_INFO__FORMAT__SHIFT 0x00000000
+#define DB_STENCIL_INFO__TILE_MODE_INDEX_MASK 0x00700000L
+#define DB_STENCIL_INFO__TILE_MODE_INDEX__SHIFT 0x00000014
+#define DB_STENCIL_INFO__TILE_SPLIT_MASK 0x0000e000L
+#define DB_STENCIL_INFO__TILE_SPLIT__SHIFT 0x0000000d
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000L
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x0000001d
+#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x00000000
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0x0000ff00L
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x00000008
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xff000000L
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x00000018
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0x000000ffL
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x00000000
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0x00ff0000L
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x00000010
+#define DB_STENCILREFMASK__STENCILMASK_MASK 0x0000ff00L
+#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x00000008
+#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xff000000L
+#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x00000018
+#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0x000000ffL
+#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x00000000
+#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0x00ff0000L
+#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x00000010
+#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x00000000
+#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x00030000L
+#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x00000010
+#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0x000c0000L
+#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x00000012
+#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x00000003L
+#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x00000000
+#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0x0000000cL
+#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x00000002
+#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x00000030L
+#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x00000004
+#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0x000000c0L
+#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x00000006
+#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x00000300L
+#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x00000008
+#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0x00000c00L
+#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0x0000000a
+#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x00003000L
+#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0x0000000c
+#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0x0000c000L
+#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0x0000000e
+#define DB_WATERMARKS__AUTO_FLUSH_HTILE_MASK 0x40000000L
+#define DB_WATERMARKS__AUTO_FLUSH_HTILE__SHIFT 0x0000001e
+#define DB_WATERMARKS__AUTO_FLUSH_QUAD_MASK 0x80000000L
+#define DB_WATERMARKS__AUTO_FLUSH_QUAD__SHIFT 0x0000001f
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0x07f00000L
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x00000014
+#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x000007e0L
+#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x00000005
+#define DB_WATERMARKS__DEPTH_FREE_MASK 0x0000001fL
+#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x00000000
+#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0x000f8000L
+#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0x0000000f
+#define DB_WATERMARKS__EARLY_Z_PANIC_DISABLE_MASK 0x08000000L
+#define DB_WATERMARKS__EARLY_Z_PANIC_DISABLE__SHIFT 0x0000001b
+#define DB_WATERMARKS__FORCE_SUMMARIZE_MASK 0x00007800L
+#define DB_WATERMARKS__FORCE_SUMMARIZE__SHIFT 0x0000000b
+#define DB_WATERMARKS__LATE_Z_PANIC_DISABLE_MASK 0x10000000L
+#define DB_WATERMARKS__LATE_Z_PANIC_DISABLE__SHIFT 0x0000001c
+#define DB_WATERMARKS__RE_Z_PANIC_DISABLE_MASK 0x20000000L
+#define DB_WATERMARKS__RE_Z_PANIC_DISABLE__SHIFT 0x0000001d
+#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x0000001b
+#define DB_Z_INFO__FORMAT_MASK 0x00000003L
+#define DB_Z_INFO__FORMAT__SHIFT 0x00000000
+#define DB_Z_INFO__NUM_SAMPLES_MASK 0x0000000cL
+#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x00000002
+#define DB_Z_INFO__READ_SIZE_MASK 0x10000000L
+#define DB_Z_INFO__READ_SIZE__SHIFT 0x0000001c
+#define DB_Z_INFO__TILE_MODE_INDEX_MASK 0x00700000L
+#define DB_Z_INFO__TILE_MODE_INDEX__SHIFT 0x00000014
+#define DB_Z_INFO__TILE_SPLIT_MASK 0x0000e000L
+#define DB_Z_INFO__TILE_SPLIT__SHIFT 0x0000000d
+#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000L
+#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x0000001d
+#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000L
+#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x0000001f
+#define DB_ZPASS_COUNT_HI__COUNT_HI_MASK 0x7fffffffL
+#define DB_ZPASS_COUNT_HI__COUNT_HI__SHIFT 0x00000000
+#define DB_ZPASS_COUNT_LOW__COUNT_LOW_MASK 0xffffffffL
+#define DB_ZPASS_COUNT_LOW__COUNT_LOW__SHIFT 0x00000000
+#define DB_Z_READ_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x00000000
+#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xffffffffL
+#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x00000000
+#define DEBUG_DATA__DEBUG_DATA_MASK 0xffffffffL
+#define DEBUG_DATA__DEBUG_DATA__SHIFT 0x00000000
+#define DEBUG_INDEX__DEBUG_INDEX_MASK 0x0003ffffL
+#define DEBUG_INDEX__DEBUG_INDEX__SHIFT 0x00000000
+#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define GB_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define GB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define GB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define GB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define GB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define GB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xffffffffL
+#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x00000000
+#define GB_EDC_MODE__BYPASS_MASK 0x80000000L
+#define GB_EDC_MODE__BYPASS__SHIFT 0x0000001f
+#define GB_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define GB_EDC_MODE__DED_MODE__SHIFT 0x00000014
+#define GB_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00010000L
+#define GB_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0x00000010
+#define GB_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define GB_EDC_MODE__PROP_FED__SHIFT 0x0000001d
+#define GB_GPU_ID__GPU_ID_MASK 0x0000000fL
+#define GB_GPU_ID__GPU_ID__SHIFT 0x00000000
+#define GB_TILE_MODE0__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE0__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE0__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE0__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE0__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE0__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE0__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE10__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE10__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE10__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE10__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE10__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE10__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE10__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE10__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE11__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE11__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE11__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE11__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE11__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE11__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE11__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE11__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE12__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE12__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE12__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE12__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE12__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE12__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE12__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE12__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE13__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE13__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE13__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE13__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE13__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE13__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE13__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE13__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE14__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE14__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE14__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE14__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE14__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE14__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE14__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE14__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE15__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE15__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE15__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE15__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE15__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE15__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE15__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE15__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE16__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE16__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE16__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE16__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE16__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE16__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE16__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE16__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE17__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE17__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE17__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE17__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE17__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE17__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE17__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE17__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE18__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE18__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE18__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE18__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE18__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE18__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE18__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE18__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE19__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE19__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE19__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE19__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE19__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE19__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE19__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE19__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE1__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE1__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE1__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE1__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE1__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE1__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE1__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE1__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE20__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE20__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE20__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE20__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE20__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE20__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE20__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE20__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE21__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE21__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE21__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE21__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE21__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE21__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE21__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE21__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE22__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE22__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE22__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE22__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE22__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE22__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE22__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE22__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE23__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE23__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE23__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE23__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE23__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE23__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE23__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE23__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE24__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE24__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE24__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE24__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE24__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE24__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE24__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE24__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE25__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE25__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE25__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE25__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE25__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE25__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE25__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE25__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE26__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE26__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE26__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE26__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE26__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE26__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE26__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE26__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE27__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE27__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE27__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE27__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE27__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE27__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE27__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE27__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE28__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE28__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE28__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE28__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE28__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE28__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE28__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE28__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE29__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE29__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE29__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE29__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE29__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE29__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE29__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE29__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE2__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE2__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE2__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE2__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE2__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE2__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE2__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE2__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE30__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE30__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE30__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE30__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE30__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE30__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE30__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE30__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE31__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE31__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE31__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE31__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE31__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE31__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE31__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE31__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE3__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE3__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE3__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE3__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE3__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE3__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE3__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE3__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE4__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE4__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE4__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE4__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE4__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE4__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE4__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE4__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE5__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE5__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE5__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE5__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE5__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE5__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE5__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE5__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE6__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE6__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE6__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE6__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE6__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE6__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE6__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE6__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE7__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE7__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE7__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE7__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE7__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE7__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE7__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE7__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE8__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE8__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE8__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE8__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE8__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE8__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE8__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE8__TILE_SPLIT__SHIFT 0x0000000b
+#define GB_TILE_MODE9__ARRAY_MODE_MASK 0x0000003cL
+#define GB_TILE_MODE9__ARRAY_MODE__SHIFT 0x00000002
+#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW_MASK 0x01c00000L
+#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW__SHIFT 0x00000016
+#define GB_TILE_MODE9__PIPE_CONFIG_MASK 0x000007c0L
+#define GB_TILE_MODE9__PIPE_CONFIG__SHIFT 0x00000006
+#define GB_TILE_MODE9__SAMPLE_SPLIT_MASK 0x06000000L
+#define GB_TILE_MODE9__SAMPLE_SPLIT__SHIFT 0x00000019
+#define GB_TILE_MODE9__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE9__TILE_SPLIT__SHIFT 0x0000000b
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00ff0000L
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x00000010
+#define GC_USER_SHADER_ARRAY_CONFIG__DPFP_RATE_MASK 0x00000006L
+#define GC_USER_SHADER_ARRAY_CONFIG__DPFP_RATE__SHIFT 0x00000001
+#define GC_USER_SHADER_ARRAY_CONFIG__HALF_LDS_MASK 0x00000010L
+#define GC_USER_SHADER_ARRAY_CONFIG__HALF_LDS__SHIFT 0x00000004
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xffff0000L
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x00000010
+#define GC_USER_SHADER_ARRAY_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
+#define GC_USER_SHADER_ARRAY_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x00000003
+#define GDS_ATOM_BASE__BASE_MASK 0x0000ffffL
+#define GDS_ATOM_BASE__BASE__SHIFT 0x00000000
+#define GDS_ATOM_BASE__UNUSED_MASK 0xffff0000L
+#define GDS_ATOM_BASE__UNUSED__SHIFT 0x00000010
+#define GDS_ATOM_CNTL__AINC_MASK 0x0000003fL
+#define GDS_ATOM_CNTL__AINC__SHIFT 0x00000000
+#define GDS_ATOM_CNTL__DMODE_MASK 0x00000100L
+#define GDS_ATOM_CNTL__DMODE__SHIFT 0x00000008
+#define GDS_ATOM_CNTL__UNUSED1_MASK 0x000000c0L
+#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x00000006
+#define GDS_ATOM_CNTL__UNUSED2_MASK 0xfffffe00L
+#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0x00000009
+#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x00000001L
+#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x00000000
+#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xfffffffeL
+#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x00000001
+#define GDS_ATOM_DST__DST_MASK 0xffffffffL
+#define GDS_ATOM_DST__DST__SHIFT 0x00000000
+#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0x000000ffL
+#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x00000000
+#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xffffff00L
+#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x00000008
+#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0x000000ffL
+#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x00000000
+#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xffffff00L
+#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x00000008
+#define GDS_ATOM_OP__OP_MASK 0x000000ffL
+#define GDS_ATOM_OP__OP__SHIFT 0x00000000
+#define GDS_ATOM_OP__UNUSED_MASK 0xffffff00L
+#define GDS_ATOM_OP__UNUSED__SHIFT 0x00000008
+#define GDS_ATOM_READ0__DATA_MASK 0xffffffffL
+#define GDS_ATOM_READ0__DATA__SHIFT 0x00000000
+#define GDS_ATOM_READ0_U__DATA_MASK 0xffffffffL
+#define GDS_ATOM_READ0_U__DATA__SHIFT 0x00000000
+#define GDS_ATOM_READ1__DATA_MASK 0xffffffffL
+#define GDS_ATOM_READ1__DATA__SHIFT 0x00000000
+#define GDS_ATOM_READ1_U__DATA_MASK 0xffffffffL
+#define GDS_ATOM_READ1_U__DATA__SHIFT 0x00000000
+#define GDS_ATOM_SIZE__SIZE_MASK 0x0000ffffL
+#define GDS_ATOM_SIZE__SIZE__SHIFT 0x00000000
+#define GDS_ATOM_SIZE__UNUSED_MASK 0xffff0000L
+#define GDS_ATOM_SIZE__UNUSED__SHIFT 0x00000010
+#define GDS_ATOM_SRC0__DATA_MASK 0xffffffffL
+#define GDS_ATOM_SRC0__DATA__SHIFT 0x00000000
+#define GDS_ATOM_SRC0_U__DATA_MASK 0xffffffffL
+#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x00000000
+#define GDS_ATOM_SRC1__DATA_MASK 0xffffffffL
+#define GDS_ATOM_SRC1__DATA__SHIFT 0x00000000
+#define GDS_ATOM_SRC1_U__DATA_MASK 0xffffffffL
+#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x00000000
+#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT_MASK 0x00000010L
+#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT__SHIFT 0x00000004
+#define GDS_CNTL_STATUS__DS_BANK_CONFLICT_MASK 0x00000008L
+#define GDS_CNTL_STATUS__DS_BANK_CONFLICT__SHIFT 0x00000003
+#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x00000040L
+#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x00000006
+#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x00000020L
+#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x00000005
+#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x00000001L
+#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x00000000
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x00000002L
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x00000001
+#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x00000004L
+#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x00000002
+#define GDS_CONFIG__SH0_GPR_PHASE_SEL_MASK 0x00000006L
+#define GDS_CONFIG__SH0_GPR_PHASE_SEL__SHIFT 0x00000001
+#define GDS_CONFIG__SH1_GPR_PHASE_SEL_MASK 0x00000018L
+#define GDS_CONFIG__SH1_GPR_PHASE_SEL__SHIFT 0x00000003
+#define GDS_CONFIG__SH2_GPR_PHASE_SEL_MASK 0x00000060L
+#define GDS_CONFIG__SH2_GPR_PHASE_SEL__SHIFT 0x00000005
+#define GDS_CONFIG__SH3_GPR_PHASE_SEL_MASK 0x00000180L
+#define GDS_CONFIG__SH3_GPR_PHASE_SEL__SHIFT 0x00000007
+#define GDS_DEBUG_CNTL__GDS_DEBUG_INDX_MASK 0x0000001fL
+#define GDS_DEBUG_CNTL__GDS_DEBUG_INDX__SHIFT 0x00000000
+#define GDS_DEBUG_CNTL__UNUSED_MASK 0xffffffe0L
+#define GDS_DEBUG_CNTL__UNUSED__SHIFT 0x00000005
+#define GDS_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define GDS_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define GDS_DEBUG_REG0__buff_write_MASK 0x00020000L
+#define GDS_DEBUG_REG0__buff_write__SHIFT 0x00000011
+#define GDS_DEBUG_REG0__cstate_MASK 0x0001e000L
+#define GDS_DEBUG_REG0__cstate__SHIFT 0x0000000d
+#define GDS_DEBUG_REG0__flush_request_MASK 0x00040000L
+#define GDS_DEBUG_REG0__flush_request__SHIFT 0x00000012
+#define GDS_DEBUG_REG0__last_pixel_ptr_MASK 0x00001000L
+#define GDS_DEBUG_REG0__last_pixel_ptr__SHIFT 0x0000000c
+#define GDS_DEBUG_REG0__spare1_MASK 0x00000001L
+#define GDS_DEBUG_REG0__spare1__SHIFT 0x00000000
+#define GDS_DEBUG_REG0__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG0__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG0__wbuf_fifo_empty_MASK 0x00100000L
+#define GDS_DEBUG_REG0__wbuf_fifo_empty__SHIFT 0x00000014
+#define GDS_DEBUG_REG0__wbuf_fifo_full_MASK 0x00200000L
+#define GDS_DEBUG_REG0__wbuf_fifo_full__SHIFT 0x00000015
+#define GDS_DEBUG_REG0__wr_buffer_wr_complete_MASK 0x00080000L
+#define GDS_DEBUG_REG0__wr_buffer_wr_complete__SHIFT 0x00000013
+#define GDS_DEBUG_REG0__write_buff_valid_MASK 0x00000040L
+#define GDS_DEBUG_REG0__write_buff_valid__SHIFT 0x00000006
+#define GDS_DEBUG_REG0__wr_pixel_nxt_ptr_MASK 0x00000f80L
+#define GDS_DEBUG_REG0__wr_pixel_nxt_ptr__SHIFT 0x00000007
+#define GDS_DEBUG_REG1__addr_fifo_empty_MASK 0x00200000L
+#define GDS_DEBUG_REG1__addr_fifo_empty__SHIFT 0x00000015
+#define GDS_DEBUG_REG1__addr_fifo_full_MASK 0x00100000L
+#define GDS_DEBUG_REG1__addr_fifo_full__SHIFT 0x00000014
+#define GDS_DEBUG_REG1__awaiting_data_MASK 0x00080000L
+#define GDS_DEBUG_REG1__awaiting_data__SHIFT 0x00000013
+#define GDS_DEBUG_REG1__buffer_invalid_MASK 0x00800000L
+#define GDS_DEBUG_REG1__buffer_invalid__SHIFT 0x00000017
+#define GDS_DEBUG_REG1__buffer_loaded_MASK 0x00400000L
+#define GDS_DEBUG_REG1__buffer_loaded__SHIFT 0x00000016
+#define GDS_DEBUG_REG1__data_ready_MASK 0x00040000L
+#define GDS_DEBUG_REG1__data_ready__SHIFT 0x00000012
+#define GDS_DEBUG_REG1__pixel_addr_MASK 0x0001fffcL
+#define GDS_DEBUG_REG1__pixel_addr__SHIFT 0x00000002
+#define GDS_DEBUG_REG1__pixel_vld_MASK 0x00020000L
+#define GDS_DEBUG_REG1__pixel_vld__SHIFT 0x00000011
+#define GDS_DEBUG_REG1__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG1__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG1__tag_hit_MASK 0x00000001L
+#define GDS_DEBUG_REG1__tag_hit__SHIFT 0x00000000
+#define GDS_DEBUG_REG1__tag_miss_MASK 0x00000002L
+#define GDS_DEBUG_REG1__tag_miss__SHIFT 0x00000001
+#define GDS_DEBUG_REG2__app_sel_MASK 0x000000f0L
+#define GDS_DEBUG_REG2__app_sel__SHIFT 0x00000004
+#define GDS_DEBUG_REG2__cmd_write_MASK 0x00000008L
+#define GDS_DEBUG_REG2__cmd_write__SHIFT 0x00000003
+#define GDS_DEBUG_REG2__ds_credit_avail_MASK 0x00000002L
+#define GDS_DEBUG_REG2__ds_credit_avail__SHIFT 0x00000001
+#define GDS_DEBUG_REG2__ds_full_MASK 0x00000001L
+#define GDS_DEBUG_REG2__ds_full__SHIFT 0x00000000
+#define GDS_DEBUG_REG2__ord_idx_free_MASK 0x00000004L
+#define GDS_DEBUG_REG2__ord_idx_free__SHIFT 0x00000002
+#define GDS_DEBUG_REG2__req_MASK 0x007fff00L
+#define GDS_DEBUG_REG2__req__SHIFT 0x00000008
+#define GDS_DEBUG_REG2__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG2__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG3__pipe0_busy_num_MASK 0x00007800L
+#define GDS_DEBUG_REG3__pipe0_busy_num__SHIFT 0x0000000b
+#define GDS_DEBUG_REG3__pipe_num_busy_MASK 0x000007ffL
+#define GDS_DEBUG_REG3__pipe_num_busy__SHIFT 0x00000000
+#define GDS_DEBUG_REG3__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG3__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG4__cmd_write_MASK 0x00020000L
+#define GDS_DEBUG_REG4__cmd_write__SHIFT 0x00000011
+#define GDS_DEBUG_REG4__credit_cnt_gt0_MASK 0x00010000L
+#define GDS_DEBUG_REG4__credit_cnt_gt0__SHIFT 0x00000010
+#define GDS_DEBUG_REG4__cur_reso_barrier_MASK 0x00002000L
+#define GDS_DEBUG_REG4__cur_reso_barrier__SHIFT 0x0000000d
+#define GDS_DEBUG_REG4__cur_reso_cnt_gt0_MASK 0x00008000L
+#define GDS_DEBUG_REG4__cur_reso_cnt_gt0__SHIFT 0x0000000f
+#define GDS_DEBUG_REG4__cur_reso_fed_MASK 0x00001000L
+#define GDS_DEBUG_REG4__cur_reso_fed__SHIFT 0x0000000c
+#define GDS_DEBUG_REG4__cur_reso_flag_MASK 0x00004000L
+#define GDS_DEBUG_REG4__cur_reso_flag__SHIFT 0x0000000e
+#define GDS_DEBUG_REG4__cur_reso_head_dirty_MASK 0x00000400L
+#define GDS_DEBUG_REG4__cur_reso_head_dirty__SHIFT 0x0000000a
+#define GDS_DEBUG_REG4__cur_reso_head_flag_MASK 0x00000800L
+#define GDS_DEBUG_REG4__cur_reso_head_flag__SHIFT 0x0000000b
+#define GDS_DEBUG_REG4__cur_reso_head_valid_MASK 0x00000200L
+#define GDS_DEBUG_REG4__cur_reso_head_valid__SHIFT 0x00000009
+#define GDS_DEBUG_REG4__cur_reso_MASK 0x000001f8L
+#define GDS_DEBUG_REG4__cur_reso__SHIFT 0x00000003
+#define GDS_DEBUG_REG4__grbm_gws_reso_rd_MASK 0x00080000L
+#define GDS_DEBUG_REG4__grbm_gws_reso_rd__SHIFT 0x00000013
+#define GDS_DEBUG_REG4__grbm_gws_reso_wr_MASK 0x00040000L
+#define GDS_DEBUG_REG4__grbm_gws_reso_wr__SHIFT 0x00000012
+#define GDS_DEBUG_REG4__gws_bulkfree_MASK 0x00200000L
+#define GDS_DEBUG_REG4__gws_bulkfree__SHIFT 0x00000015
+#define GDS_DEBUG_REG4__gws_busy_MASK 0x00000001L
+#define GDS_DEBUG_REG4__gws_busy__SHIFT 0x00000000
+#define GDS_DEBUG_REG4__gws_out_stall_MASK 0x00000004L
+#define GDS_DEBUG_REG4__gws_out_stall__SHIFT 0x00000002
+#define GDS_DEBUG_REG4__gws_req_MASK 0x00000002L
+#define GDS_DEBUG_REG4__gws_req__SHIFT 0x00000001
+#define GDS_DEBUG_REG4__ram_gws_re_MASK 0x00400000L
+#define GDS_DEBUG_REG4__ram_gws_re__SHIFT 0x00000016
+#define GDS_DEBUG_REG4__ram_gws_we_MASK 0x00800000L
+#define GDS_DEBUG_REG4__ram_gws_we__SHIFT 0x00000017
+#define GDS_DEBUG_REG4__ram_read_busy_MASK 0x00100000L
+#define GDS_DEBUG_REG4__ram_read_busy__SHIFT 0x00000014
+#define GDS_DEBUG_REG4__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG4__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG5__alloc_opco_error_MASK 0x00000004L
+#define GDS_DEBUG_REG5__alloc_opco_error__SHIFT 0x00000002
+#define GDS_DEBUG_REG5__dealloc_opco_error_MASK 0x00000008L
+#define GDS_DEBUG_REG5__dealloc_opco_error__SHIFT 0x00000003
+#define GDS_DEBUG_REG5__dec_error_MASK 0x00000002L
+#define GDS_DEBUG_REG5__dec_error__SHIFT 0x00000001
+#define GDS_DEBUG_REG5__error_ds_address_MASK 0x003fff00L
+#define GDS_DEBUG_REG5__error_ds_address__SHIFT 0x00000008
+#define GDS_DEBUG_REG5__spare1_MASK 0xffc00000L
+#define GDS_DEBUG_REG5__spare1__SHIFT 0x00000016
+#define GDS_DEBUG_REG5__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG5__spare__SHIFT 0x00000018
+#define GDS_DEBUG_REG5__wrap_opco_error_MASK 0x00000010L
+#define GDS_DEBUG_REG5__wrap_opco_error__SHIFT 0x00000004
+#define GDS_DEBUG_REG5__write_dis_MASK 0x00000001L
+#define GDS_DEBUG_REG5__write_dis__SHIFT 0x00000000
+#define GDS_DEBUG_REG6__counters_busy_MASK 0x001fffe0L
+#define GDS_DEBUG_REG6__counters_busy__SHIFT 0x00000005
+#define GDS_DEBUG_REG6__counters_enabled_MASK 0x0000001eL
+#define GDS_DEBUG_REG6__counters_enabled__SHIFT 0x00000001
+#define GDS_DEBUG_REG6__oa_busy_MASK 0x00000001L
+#define GDS_DEBUG_REG6__oa_busy__SHIFT 0x00000000
+#define GDS_DEBUG_REG6__spare_MASK 0xff000000L
+#define GDS_DEBUG_REG6__spare__SHIFT 0x00000018
+#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x00010000L
+#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x00000010
+#define GDS_ENHANCE__MISC_MASK 0x0000ffffL
+#define GDS_ENHANCE__MISC__SHIFT 0x00000000
+#define GDS_ENHANCE__UNUSED_MASK 0xffff0000L
+#define GDS_ENHANCE__UNUSED__SHIFT 0x00000010
+#define GDS_GRBM_SECDED_CNT__DED_MASK 0xffff0000L
+#define GDS_GRBM_SECDED_CNT__DED__SHIFT 0x00000010
+#define GDS_GRBM_SECDED_CNT__SEC_MASK 0x0000ffffL
+#define GDS_GRBM_SECDED_CNT__SEC__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x0000003fL
+#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xffffffc0L
+#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x00000006
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0x0000ffffL
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xffff0000L
+#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x00000010
+#define GDS_GWS_RESOURCE__COUNTER_MASK 0x00001ffeL
+#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x00000001
+#define GDS_GWS_RESOURCE__DED_MASK 0x00004000L
+#define GDS_GWS_RESOURCE__DED__SHIFT 0x0000000e
+#define GDS_GWS_RESOURCE__FLAG_MASK 0x00000001L
+#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x10000000L
+#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x0000001c
+#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0x07ff0000L
+#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x00000010
+#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x08000000L
+#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x0000001b
+#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x00008000L
+#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0x0000000f
+#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x00000001L
+#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x00000000
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0x0000ff00L
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x00000008
+#define GDS_GWS_RESOURCE__TYPE_MASK 0x00002000L
+#define GDS_GWS_RESOURCE__TYPE__SHIFT 0x0000000d
+#define GDS_GWS_RESOURCE__UNUSED1_MASK 0xe0000000L
+#define GDS_GWS_RESOURCE__UNUSED1__SHIFT 0x0000001d
+#define GDS_OA_DED__ME0_CS_DED_MASK 0x00000004L
+#define GDS_OA_DED__ME0_CS_DED__SHIFT 0x00000002
+#define GDS_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
+#define GDS_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x00000000
+#define GDS_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
+#define GDS_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x00000001
+#define GDS_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
+#define GDS_OA_DED__ME1_PIPE0_DED__SHIFT 0x00000004
+#define GDS_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
+#define GDS_OA_DED__ME1_PIPE1_DED__SHIFT 0x00000005
+#define GDS_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
+#define GDS_OA_DED__ME1_PIPE2_DED__SHIFT 0x00000006
+#define GDS_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
+#define GDS_OA_DED__ME1_PIPE3_DED__SHIFT 0x00000007
+#define GDS_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
+#define GDS_OA_DED__ME2_PIPE0_DED__SHIFT 0x00000008
+#define GDS_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
+#define GDS_OA_DED__ME2_PIPE1_DED__SHIFT 0x00000009
+#define GDS_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
+#define GDS_OA_DED__ME2_PIPE2_DED__SHIFT 0x0000000a
+#define GDS_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
+#define GDS_OA_DED__ME2_PIPE3_DED__SHIFT 0x0000000b
+#define GDS_OA_DED__UNUSED0_MASK 0x00000008L
+#define GDS_OA_DED__UNUSED0__SHIFT 0x00000003
+#define GDS_OA_DED__UNUSED1_MASK 0xfffff000L
+#define GDS_OA_DED__UNUSED1__SHIFT 0x0000000c
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003ffL
+#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x00000000
+#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define GDS_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define GDS_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define GDS_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define GDS_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define GDS_RD_ADDR__READ_ADDR_MASK 0xffffffffL
+#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x00000000
+#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xffffffffL
+#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x00000000
+#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xffffffffL
+#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x00000000
+#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xffffffffL
+#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x00000000
+#define GDS_RD_DATA__READ_DATA_MASK 0xffffffffL
+#define GDS_RD_DATA__READ_DATA__SHIFT 0x00000000
+#define GDS_SECDED_CNT__DED_MASK 0xffff0000L
+#define GDS_SECDED_CNT__DED__SHIFT 0x00000010
+#define GDS_SECDED_CNT__SEC_MASK 0x0000ffffL
+#define GDS_SECDED_CNT__SEC__SHIFT 0x00000000
+#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xffffffffL
+#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x00000000
+#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xffffffffL
+#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x00000000
+#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xffffffffL
+#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x00000000
+#define GDS_WR_DATA__WRITE_DATA_MASK 0xffffffffL
+#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x00000000
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xffffffffL
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x00000000
+#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x00000000
+#define GRBM_CAM_DATA__CAM_ADDR_MASK 0x0000ffffL
+#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x00000000
+#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xffff0000L
+#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x00000010
+#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
+#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x00000000
+#define GRBM_CNTL__READ_TIMEOUT_MASK 0x000000ffL
+#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x00000000
+#define GRBM_DEBUG_CNTL__GRBM_DEBUG_INDEX_MASK 0x0000003fL
+#define GRBM_DEBUG_CNTL__GRBM_DEBUG_INDEX__SHIFT 0x00000000
+#define GRBM_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define GRBM_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define GRBM_DEBUG__DISABLE_READ_TIMEOUT_MASK 0x00000040L
+#define GRBM_DEBUG__DISABLE_READ_TIMEOUT__SHIFT 0x00000006
+#define GRBM_DEBUG__GFX_CLOCK_DOMAIN_OVERRIDE_MASK 0x00001000L
+#define GRBM_DEBUG__GFX_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x0000000c
+#define GRBM_DEBUG__HYSTERESIS_GUI_ACTIVE_MASK 0x00000f00L
+#define GRBM_DEBUG__HYSTERESIS_GUI_ACTIVE__SHIFT 0x00000008
+#define GRBM_DEBUG__IGNORE_FAO_MASK 0x00000020L
+#define GRBM_DEBUG__IGNORE_FAO__SHIFT 0x00000005
+#define GRBM_DEBUG__IGNORE_RDY_MASK 0x00000002L
+#define GRBM_DEBUG__IGNORE_RDY__SHIFT 0x00000001
+#define GRBM_DEBUG_SNAPSHOT__CPF_RDY_MASK 0x00000001L
+#define GRBM_DEBUG_SNAPSHOT__CPF_RDY__SHIFT 0x00000000
+#define GRBM_DEBUG_SNAPSHOT__CPG_RDY_MASK 0x00000002L
+#define GRBM_DEBUG_SNAPSHOT__CPG_RDY__SHIFT 0x00000001
+#define GRBM_DEBUG__SNAPSHOT_FREE_CNTRS_MASK 0x00000080L
+#define GRBM_DEBUG__SNAPSHOT_FREE_CNTRS__SHIFT 0x00000007
+#define GRBM_DEBUG_SNAPSHOT__GDS_RDY_MASK 0x00000200L
+#define GRBM_DEBUG_SNAPSHOT__GDS_RDY__SHIFT 0x00000009
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY0_MASK 0x00000040L
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY0__SHIFT 0x00000006
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY1_MASK 0x00004000L
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE0_RDY1__SHIFT 0x0000000e
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY0_MASK 0x00000080L
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY0__SHIFT 0x00000007
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY1_MASK 0x00008000L
+#define GRBM_DEBUG_SNAPSHOT__SE0SPI_ME0PIPE1_RDY1__SHIFT 0x0000000f
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY0_MASK 0x00000100L
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY0__SHIFT 0x00000008
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY1_MASK 0x00010000L
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE0_RDY1__SHIFT 0x00000010
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY0_MASK 0x00000200L
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY0__SHIFT 0x00000009
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY1_MASK 0x00020000L
+#define GRBM_DEBUG_SNAPSHOT__SE1SPI_ME0PIPE1_RDY1__SHIFT 0x00000011
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY0_MASK 0x00000400L
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY0__SHIFT 0x0000000a
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY1_MASK 0x00040000L
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE0_RDY1__SHIFT 0x00000012
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY0_MASK 0x00000800L
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY0__SHIFT 0x0000000b
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY1_MASK 0x00080000L
+#define GRBM_DEBUG_SNAPSHOT__SE2SPI_ME0PIPE1_RDY1__SHIFT 0x00000013
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY0_MASK 0x00001000L
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY0__SHIFT 0x0000000c
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY1_MASK 0x00100000L
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE0_RDY1__SHIFT 0x00000014
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY0_MASK 0x00002000L
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY0__SHIFT 0x0000000d
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY1_MASK 0x00200000L
+#define GRBM_DEBUG_SNAPSHOT__SE3SPI_ME0PIPE1_RDY1__SHIFT 0x00000015
+#define GRBM_DEBUG_SNAPSHOT__SRBM_RDY_MASK 0x00000002L
+#define GRBM_DEBUG_SNAPSHOT__SRBM_RDY__SHIFT 0x00000001
+#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE0_RDY_MASK 0x00000008L
+#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE0_RDY__SHIFT 0x00000003
+#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE1_RDY_MASK 0x00000010L
+#define GRBM_DEBUG_SNAPSHOT__WD_ME0PIPE1_RDY__SHIFT 0x00000004
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x0000001e
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000ffL
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x00000000
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x0000001f
+#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00ff0000L
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x00000010
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x0000001d
+#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000ff00L
+#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x00000008
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x00080000L
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x00000013
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x00000001L
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x00000000
+#define GRBM_NOWHERE__DATA_MASK 0xffffffffL
+#define GRBM_NOWHERE__DATA__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000019
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000015
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000b
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x00000016
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000014
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000a
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x00000018
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x00000013
+#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000017
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000012
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003fL
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001a
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x00000011
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000010
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000e
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000d
+#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001b
+#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000c
+#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001c
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000019
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000015
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000b
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x00000016
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000014
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000a
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x00000018
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x00000013
+#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000017
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000012
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003fL
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001a
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x00000011
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000010
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000e
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000d
+#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001b
+#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000c
+#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x0000001c
+#define GRBM_PWR_CNTL__REQ_TYPE_MASK 0x0000000fL
+#define GRBM_PWR_CNTL__REQ_TYPE__SHIFT 0x00000000
+#define GRBM_PWR_CNTL__RSP_TYPE_MASK 0x000000f0L
+#define GRBM_PWR_CNTL__RSP_TYPE__SHIFT 0x00000004
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x00000013
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x00000014
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x00200000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x00000015
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x00400000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x00000016
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x00800000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x00000017
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x01000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x00000018
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x02000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x00000019
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x04000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x0000001a
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x08000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x0000001b
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x0000001c
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x0000001d
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x0000001e
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x0000001f
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x00000012
+#define GRBM_READ_ERROR2__READ_REQUESTER_SRBM_MASK 0x00020000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_SRBM__SHIFT 0x00000011
+#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x0003fffcL
+#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x00000002
+#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
+#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x0000001f
+#define GRBM_READ_ERROR__READ_MEID_MASK 0x00c00000L
+#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x00000016
+#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x00300000L
+#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x00000014
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x00000000
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xffffffffL
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x00000000
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000015
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000012
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000b
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000011
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000a
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000014
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003fL
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x00000000
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x00000010
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000f
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000d
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000c
+#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x00000013
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x00000015
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000012
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000b
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x00000011
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0x0000000a
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x00000014
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003fL
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x00000000
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x00000010
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000f
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000d
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0x0000000c
+#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x00000013
+#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0x00000fc0L
+#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x00000006
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x0000003fL
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x00000000
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x00040000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x00000012
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x00020000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x00000011
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x00080000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x00000013
+#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x00000001L
+#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x00000000
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x00010000L
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x00000010
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x00000004L
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x00000002
+#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000L
+#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x0000001c
+#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000L
+#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x00000010L
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x00000004
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000fL
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x00000000
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x00000005
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x00000006
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x00000007
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x00000008
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x00000009
+#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING_MASK 0x00000400L
+#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING__SHIFT 0x0000000a
+#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING_MASK 0x00000800L
+#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING__SHIFT 0x0000000b
+#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING_MASK 0x00001000L
+#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING__SHIFT 0x0000000c
+#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING_MASK 0x00002000L
+#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING__SHIFT 0x0000000d
+#define GRBM_STATUS2__RLC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x00000008
+#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x00000001L
+#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0x00000000
+#define GRBM_STATUS2__TC_BUSY_MASK 0x00000200L
+#define GRBM_STATUS2__TC_BUSY__SHIFT 0x00000009
+#define GRBM_STATUS__BCI_BUSY_MASK 0x00800000L
+#define GRBM_STATUS__BCI_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS__CB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS__CB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS__CB_CLEAN_MASK 0x00002000L
+#define GRBM_STATUS__CB_CLEAN__SHIFT 0x0000000d
+#define GRBM_STATUS__CP_BUSY_MASK 0x20000000L
+#define GRBM_STATUS__CP_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000L
+#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x0000001c
+#define GRBM_STATUS__DB_BUSY_MASK 0x04000000L
+#define GRBM_STATUS__DB_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS__DB_CLEAN_MASK 0x00001000L
+#define GRBM_STATUS__DB_CLEAN__SHIFT 0x0000000c
+#define GRBM_STATUS__GDS_BUSY_MASK 0x00008000L
+#define GRBM_STATUS__GDS_BUSY__SHIFT 0x0000000f
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x00000009
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
+#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x0000001f
+#define GRBM_STATUS__IA_BUSY_MASK 0x00080000L
+#define GRBM_STATUS__IA_BUSY_NO_DMA_MASK 0x00040000L
+#define GRBM_STATUS__IA_BUSY_NO_DMA__SHIFT 0x00000012
+#define GRBM_STATUS__IA_BUSY__SHIFT 0x00000013
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x00000007
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000fL
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x00000000
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x00000008
+#define GRBM_STATUS__PA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS__PA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS__SC_BUSY_MASK 0x01000000L
+#define GRBM_STATUS__SC_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000L
+#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x0000001f
+#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x00000002
+#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x00000001
+#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x0000001b
+#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS_SE0__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE0__VGT_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000L
+#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x0000001f
+#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x00000002
+#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x00000001
+#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x0000001b
+#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS_SE1__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE1__VGT_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000L
+#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x0000001f
+#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x00000002
+#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x00000001
+#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x0000001b
+#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS_SE2__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE2__VGT_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS_SE3__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE3__BCI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS_SE3__CB_BUSY_MASK 0x80000000L
+#define GRBM_STATUS_SE3__CB_BUSY__SHIFT 0x0000001f
+#define GRBM_STATUS_SE3__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE3__CB_CLEAN__SHIFT 0x00000002
+#define GRBM_STATUS_SE3__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE3__DB_BUSY__SHIFT 0x0000001e
+#define GRBM_STATUS_SE3__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE3__DB_CLEAN__SHIFT 0x00000001
+#define GRBM_STATUS_SE3__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE3__PA_BUSY__SHIFT 0x00000018
+#define GRBM_STATUS_SE3__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE3__SC_BUSY__SHIFT 0x0000001d
+#define GRBM_STATUS_SE3__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE3__SPI_BUSY__SHIFT 0x0000001b
+#define GRBM_STATUS_SE3__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE3__SX_BUSY__SHIFT 0x0000001a
+#define GRBM_STATUS_SE3__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE3__TA_BUSY__SHIFT 0x00000019
+#define GRBM_STATUS_SE3__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE3__VGT_BUSY__SHIFT 0x00000017
+#define GRBM_STATUS__SPI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS__SPI_BUSY__SHIFT 0x00000016
+#define GRBM_STATUS__SRBM_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS__SRBM_RQ_PENDING__SHIFT 0x00000005
+#define GRBM_STATUS__SX_BUSY_MASK 0x00100000L
+#define GRBM_STATUS__SX_BUSY__SHIFT 0x00000014
+#define GRBM_STATUS__TA_BUSY_MASK 0x00004000L
+#define GRBM_STATUS__TA_BUSY__SHIFT 0x0000000e
+#define GRBM_STATUS__VGT_BUSY_MASK 0x00020000L
+#define GRBM_STATUS__VGT_BUSY__SHIFT 0x00000011
+#define GRBM_STATUS__WD_BUSY_MASK 0x00200000L
+#define GRBM_STATUS__WD_BUSY_NO_DMA_MASK 0x00010000L
+#define GRBM_STATUS__WD_BUSY_NO_DMA__SHIFT 0x00000010
+#define GRBM_STATUS__WD_BUSY__SHIFT 0x00000015
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0x000000ffL
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x00000000
+#define IA_CNTL_STATUS__IA_ADC_BUSY_MASK 0x00000010L
+#define IA_CNTL_STATUS__IA_ADC_BUSY__SHIFT 0x00000004
+#define IA_CNTL_STATUS__IA_BUSY_MASK 0x00000001L
+#define IA_CNTL_STATUS__IA_BUSY__SHIFT 0x00000000
+#define IA_CNTL_STATUS__IA_DMA_BUSY_MASK 0x00000002L
+#define IA_CNTL_STATUS__IA_DMA_BUSY__SHIFT 0x00000001
+#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY_MASK 0x00000004L
+#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY__SHIFT 0x00000002
+#define IA_CNTL_STATUS__IA_GRP_BUSY_MASK 0x00000008L
+#define IA_CNTL_STATUS__IA_GRP_BUSY__SHIFT 0x00000003
+#define IA_DEBUG_CNTL__IA_DEBUG_INDX_MASK 0x0000003fL
+#define IA_DEBUG_CNTL__IA_DEBUG_INDX__SHIFT 0x00000000
+#define IA_DEBUG_CNTL__IA_DEBUG_SEL_BUS_B_MASK 0x00000040L
+#define IA_DEBUG_CNTL__IA_DEBUG_SEL_BUS_B__SHIFT 0x00000006
+#define IA_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define IA_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define IA_DEBUG_REG0__core_clk_busy_MASK 0x04000000L
+#define IA_DEBUG_REG0__core_clk_busy__SHIFT 0x0000001a
+#define IA_DEBUG_REG0__dma_busy_MASK 0x00000040L
+#define IA_DEBUG_REG0__dma_busy__SHIFT 0x00000006
+#define IA_DEBUG_REG0__dma_grp_hp_valid_MASK 0x00001000L
+#define IA_DEBUG_REG0__dma_grp_hp_valid__SHIFT 0x0000000c
+#define IA_DEBUG_REG0__dma_grp_valid_MASK 0x00000400L
+#define IA_DEBUG_REG0__dma_grp_valid__SHIFT 0x0000000a
+#define IA_DEBUG_REG0__dma_req_busy_MASK 0x00000020L
+#define IA_DEBUG_REG0__dma_req_busy__SHIFT 0x00000005
+#define IA_DEBUG_REG0__grp_busy_MASK 0x00000100L
+#define IA_DEBUG_REG0__grp_busy__SHIFT 0x00000008
+#define IA_DEBUG_REG0__grp_dma_hp_read_MASK 0x00002000L
+#define IA_DEBUG_REG0__grp_dma_hp_read__SHIFT 0x0000000d
+#define IA_DEBUG_REG0__grp_dma_read_MASK 0x00000800L
+#define IA_DEBUG_REG0__grp_dma_read__SHIFT 0x0000000b
+#define IA_DEBUG_REG0__ia_busy_extended_MASK 0x00000001L
+#define IA_DEBUG_REG0__ia_busy_extended__SHIFT 0x00000000
+#define IA_DEBUG_REG0__ia_busy_MASK 0x00000004L
+#define IA_DEBUG_REG0__ia_busy__SHIFT 0x00000002
+#define IA_DEBUG_REG0__ia_nodma_busy_extended_MASK 0x00000002L
+#define IA_DEBUG_REG0__ia_nodma_busy_extended__SHIFT 0x00000001
+#define IA_DEBUG_REG0__ia_nodma_busy_MASK 0x00000008L
+#define IA_DEBUG_REG0__ia_nodma_busy__SHIFT 0x00000003
+#define IA_DEBUG_REG0__mc_xl8r_busy_MASK 0x00000080L
+#define IA_DEBUG_REG0__mc_xl8r_busy__SHIFT 0x00000007
+#define IA_DEBUG_REG0__reg_clk_busy_MASK 0x01000000L
+#define IA_DEBUG_REG0__reg_clk_busy__SHIFT 0x00000018
+#define IA_DEBUG_REG0__sclk_core_vld_MASK 0x20000000L
+#define IA_DEBUG_REG0__sclk_core_vld__SHIFT 0x0000001d
+#define IA_DEBUG_REG0__sclk_reg_vld_MASK 0x10000000L
+#define IA_DEBUG_REG0__sclk_reg_vld__SHIFT 0x0000001c
+#define IA_DEBUG_REG0__SPARE0_MASK 0x00000010L
+#define IA_DEBUG_REG0__SPARE0__SHIFT 0x00000004
+#define IA_DEBUG_REG0__SPARE1_MASK 0x00000200L
+#define IA_DEBUG_REG0__SPARE1__SHIFT 0x00000009
+#define IA_DEBUG_REG0__SPARE2_MASK 0x00ffc000L
+#define IA_DEBUG_REG0__SPARE2__SHIFT 0x0000000e
+#define IA_DEBUG_REG0__SPARE3_MASK 0x00100000L
+#define IA_DEBUG_REG0__SPARE3__SHIFT 0x00000014
+#define IA_DEBUG_REG0__SPARE4_MASK 0x08000000L
+#define IA_DEBUG_REG0__SPARE4__SHIFT 0x0000001b
+#define IA_DEBUG_REG0__SPARE5_MASK 0x40000000L
+#define IA_DEBUG_REG0__SPARE5__SHIFT 0x0000001e
+#define IA_DEBUG_REG0__SPARE6_MASK 0x80000000L
+#define IA_DEBUG_REG0__SPARE6__SHIFT 0x0000001f
+#define IA_DEBUG_REG1__current_data_valid_MASK 0x10000000L
+#define IA_DEBUG_REG1__current_data_valid__SHIFT 0x0000001c
+#define IA_DEBUG_REG1__discard_1st_chunk_MASK 0x00000100L
+#define IA_DEBUG_REG1__discard_1st_chunk__SHIFT 0x00000008
+#define IA_DEBUG_REG1__discard_2nd_chunk_MASK 0x00000200L
+#define IA_DEBUG_REG1__discard_2nd_chunk__SHIFT 0x00000009
+#define IA_DEBUG_REG1__dma_buf_type_q_MASK 0x00000060L
+#define IA_DEBUG_REG1__dma_buf_type_q__SHIFT 0x00000005
+#define IA_DEBUG_REG1__dma_data_fifo_empty_q_MASK 0x00004000L
+#define IA_DEBUG_REG1__dma_data_fifo_empty_q__SHIFT 0x0000000e
+#define IA_DEBUG_REG1__dma_data_fifo_full_MASK 0x00008000L
+#define IA_DEBUG_REG1__dma_data_fifo_full__SHIFT 0x0000000f
+#define IA_DEBUG_REG1__dma_grp_valid_MASK 0x04000000L
+#define IA_DEBUG_REG1__dma_grp_valid__SHIFT 0x0000001a
+#define IA_DEBUG_REG1__dma_input_fifo_empty_MASK 0x00000001L
+#define IA_DEBUG_REG1__dma_input_fifo_empty__SHIFT 0x00000000
+#define IA_DEBUG_REG1__dma_input_fifo_full_MASK 0x00000002L
+#define IA_DEBUG_REG1__dma_input_fifo_full__SHIFT 0x00000001
+#define IA_DEBUG_REG1__dma_mask_fifo_empty_MASK 0x00002000L
+#define IA_DEBUG_REG1__dma_mask_fifo_empty__SHIFT 0x0000000d
+#define IA_DEBUG_REG1__dma_mask_fifo_we_MASK 0x40000000L
+#define IA_DEBUG_REG1__dma_mask_fifo_we__SHIFT 0x0000001e
+#define IA_DEBUG_REG1__dma_rdreq_dr_q_MASK 0x00000008L
+#define IA_DEBUG_REG1__dma_rdreq_dr_q__SHIFT 0x00000003
+#define IA_DEBUG_REG1__dma_req_fifo_empty_MASK 0x00010000L
+#define IA_DEBUG_REG1__dma_req_fifo_empty__SHIFT 0x00000010
+#define IA_DEBUG_REG1__dma_req_fifo_full_MASK 0x00020000L
+#define IA_DEBUG_REG1__dma_req_fifo_full__SHIFT 0x00000011
+#define IA_DEBUG_REG1__dma_req_path_q_MASK 0x00000080L
+#define IA_DEBUG_REG1__dma_req_path_q__SHIFT 0x00000007
+#define IA_DEBUG_REG1__dma_ret_data_we_q_MASK 0x80000000L
+#define IA_DEBUG_REG1__dma_ret_data_we_q__SHIFT 0x0000001f
+#define IA_DEBUG_REG1__dma_skid_fifo_empty_MASK 0x01000000L
+#define IA_DEBUG_REG1__dma_skid_fifo_empty__SHIFT 0x00000018
+#define IA_DEBUG_REG1__dma_skid_fifo_full_MASK 0x02000000L
+#define IA_DEBUG_REG1__dma_skid_fifo_full__SHIFT 0x00000019
+#define IA_DEBUG_REG1__dma_tc_ret_sel_q_MASK 0x00000800L
+#define IA_DEBUG_REG1__dma_tc_ret_sel_q__SHIFT 0x0000000b
+#define IA_DEBUG_REG1__dma_zero_indices_q_MASK 0x00000010L
+#define IA_DEBUG_REG1__dma_zero_indices_q__SHIFT 0x00000004
+#define IA_DEBUG_REG1__grp_dma_read_MASK 0x08000000L
+#define IA_DEBUG_REG1__grp_dma_read__SHIFT 0x0000001b
+#define IA_DEBUG_REG1__last_rdreq_in_dma_op_MASK 0x00001000L
+#define IA_DEBUG_REG1__last_rdreq_in_dma_op__SHIFT 0x0000000c
+#define IA_DEBUG_REG1__out_of_range_r2_q_MASK 0x20000000L
+#define IA_DEBUG_REG1__out_of_range_r2_q__SHIFT 0x0000001d
+#define IA_DEBUG_REG1__second_tc_ret_data_q_MASK 0x00000400L
+#define IA_DEBUG_REG1__second_tc_ret_data_q__SHIFT 0x0000000a
+#define IA_DEBUG_REG1__stage2_dr_MASK 0x00040000L
+#define IA_DEBUG_REG1__stage2_dr__SHIFT 0x00000012
+#define IA_DEBUG_REG1__stage2_rtr_MASK 0x00080000L
+#define IA_DEBUG_REG1__stage2_rtr__SHIFT 0x00000013
+#define IA_DEBUG_REG1__stage3_dr_MASK 0x00100000L
+#define IA_DEBUG_REG1__stage3_dr__SHIFT 0x00000014
+#define IA_DEBUG_REG1__stage3_rtr_MASK 0x00200000L
+#define IA_DEBUG_REG1__stage3_rtr__SHIFT 0x00000015
+#define IA_DEBUG_REG1__stage4_dr_MASK 0x00400000L
+#define IA_DEBUG_REG1__stage4_dr__SHIFT 0x00000016
+#define IA_DEBUG_REG1__stage4_rtr_MASK 0x00800000L
+#define IA_DEBUG_REG1__stage4_rtr__SHIFT 0x00000017
+#define IA_DEBUG_REG1__start_new_packet_MASK 0x00000004L
+#define IA_DEBUG_REG1__start_new_packet__SHIFT 0x00000002
+#define IA_DEBUG_REG2__hp_current_data_valid_MASK 0x10000000L
+#define IA_DEBUG_REG2__hp_current_data_valid__SHIFT 0x0000001c
+#define IA_DEBUG_REG2__hp_discard_1st_chunk_MASK 0x00000100L
+#define IA_DEBUG_REG2__hp_discard_1st_chunk__SHIFT 0x00000008
+#define IA_DEBUG_REG2__hp_discard_2nd_chunk_MASK 0x00000200L
+#define IA_DEBUG_REG2__hp_discard_2nd_chunk__SHIFT 0x00000009
+#define IA_DEBUG_REG2__hp_dma_buf_type_q_MASK 0x00000060L
+#define IA_DEBUG_REG2__hp_dma_buf_type_q__SHIFT 0x00000005
+#define IA_DEBUG_REG2__hp_dma_data_fifo_empty_q_MASK 0x00004000L
+#define IA_DEBUG_REG2__hp_dma_data_fifo_empty_q__SHIFT 0x0000000e
+#define IA_DEBUG_REG2__hp_dma_data_fifo_full_MASK 0x00008000L
+#define IA_DEBUG_REG2__hp_dma_data_fifo_full__SHIFT 0x0000000f
+#define IA_DEBUG_REG2__hp_dma_grp_valid_MASK 0x04000000L
+#define IA_DEBUG_REG2__hp_dma_grp_valid__SHIFT 0x0000001a
+#define IA_DEBUG_REG2__hp_dma_input_fifo_empty_MASK 0x00000001L
+#define IA_DEBUG_REG2__hp_dma_input_fifo_empty__SHIFT 0x00000000
+#define IA_DEBUG_REG2__hp_dma_input_fifo_full_MASK 0x00000002L
+#define IA_DEBUG_REG2__hp_dma_input_fifo_full__SHIFT 0x00000001
+#define IA_DEBUG_REG2__hp_dma_mask_fifo_empty_MASK 0x00002000L
+#define IA_DEBUG_REG2__hp_dma_mask_fifo_empty__SHIFT 0x0000000d
+#define IA_DEBUG_REG2__hp_dma_mask_fifo_we_MASK 0x40000000L
+#define IA_DEBUG_REG2__hp_dma_mask_fifo_we__SHIFT 0x0000001e
+#define IA_DEBUG_REG2__hp_dma_rdreq_dr_q_MASK 0x00000008L
+#define IA_DEBUG_REG2__hp_dma_rdreq_dr_q__SHIFT 0x00000003
+#define IA_DEBUG_REG2__hp_dma_req_fifo_empty_MASK 0x00010000L
+#define IA_DEBUG_REG2__hp_dma_req_fifo_empty__SHIFT 0x00000010
+#define IA_DEBUG_REG2__hp_dma_req_fifo_full_MASK 0x00020000L
+#define IA_DEBUG_REG2__hp_dma_req_fifo_full__SHIFT 0x00000011
+#define IA_DEBUG_REG2__hp_dma_req_path_q_MASK 0x00000080L
+#define IA_DEBUG_REG2__hp_dma_req_path_q__SHIFT 0x00000007
+#define IA_DEBUG_REG2__hp_dma_ret_data_we_q_MASK 0x80000000L
+#define IA_DEBUG_REG2__hp_dma_ret_data_we_q__SHIFT 0x0000001f
+#define IA_DEBUG_REG2__hp_dma_skid_fifo_empty_MASK 0x01000000L
+#define IA_DEBUG_REG2__hp_dma_skid_fifo_empty__SHIFT 0x00000018
+#define IA_DEBUG_REG2__hp_dma_skid_fifo_full_MASK 0x02000000L
+#define IA_DEBUG_REG2__hp_dma_skid_fifo_full__SHIFT 0x00000019
+#define IA_DEBUG_REG2__hp_dma_tc_ret_sel_q_MASK 0x00000800L
+#define IA_DEBUG_REG2__hp_dma_tc_ret_sel_q__SHIFT 0x0000000b
+#define IA_DEBUG_REG2__hp_dma_zero_indices_q_MASK 0x00000010L
+#define IA_DEBUG_REG2__hp_dma_zero_indices_q__SHIFT 0x00000004
+#define IA_DEBUG_REG2__hp_grp_dma_read_MASK 0x08000000L
+#define IA_DEBUG_REG2__hp_grp_dma_read__SHIFT 0x0000001b
+#define IA_DEBUG_REG2__hp_last_rdreq_in_dma_op_MASK 0x00001000L
+#define IA_DEBUG_REG2__hp_last_rdreq_in_dma_op__SHIFT 0x0000000c
+#define IA_DEBUG_REG2__hp_out_of_range_r2_q_MASK 0x20000000L
+#define IA_DEBUG_REG2__hp_out_of_range_r2_q__SHIFT 0x0000001d
+#define IA_DEBUG_REG2__hp_second_tc_ret_data_q_MASK 0x00000400L
+#define IA_DEBUG_REG2__hp_second_tc_ret_data_q__SHIFT 0x0000000a
+#define IA_DEBUG_REG2__hp_stage2_dr_MASK 0x00040000L
+#define IA_DEBUG_REG2__hp_stage2_dr__SHIFT 0x00000012
+#define IA_DEBUG_REG2__hp_stage2_rtr_MASK 0x00080000L
+#define IA_DEBUG_REG2__hp_stage2_rtr__SHIFT 0x00000013
+#define IA_DEBUG_REG2__hp_stage3_dr_MASK 0x00100000L
+#define IA_DEBUG_REG2__hp_stage3_dr__SHIFT 0x00000014
+#define IA_DEBUG_REG2__hp_stage3_rtr_MASK 0x00200000L
+#define IA_DEBUG_REG2__hp_stage3_rtr__SHIFT 0x00000015
+#define IA_DEBUG_REG2__hp_stage4_dr_MASK 0x00400000L
+#define IA_DEBUG_REG2__hp_stage4_dr__SHIFT 0x00000016
+#define IA_DEBUG_REG2__hp_stage4_rtr_MASK 0x00800000L
+#define IA_DEBUG_REG2__hp_stage4_rtr__SHIFT 0x00000017
+#define IA_DEBUG_REG2__hp_start_new_packet_MASK 0x00000004L
+#define IA_DEBUG_REG2__hp_start_new_packet__SHIFT 0x00000002
+#define IA_DEBUG_REG3__discard_1st_chunk_MASK 0x04000000L
+#define IA_DEBUG_REG3__discard_1st_chunk__SHIFT 0x0000001a
+#define IA_DEBUG_REG3__discard_2nd_chunk_MASK 0x08000000L
+#define IA_DEBUG_REG3__discard_2nd_chunk__SHIFT 0x0000001b
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_eop_out_MASK 0x00000008L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_eop_out__SHIFT 0x00000003
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_null_out_MASK 0x00000004L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_null_out__SHIFT 0x00000002
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_read_MASK 0x00000002L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_read__SHIFT 0x00000001
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_use_tc_out_MASK 0x00000010L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_use_tc_out__SHIFT 0x00000004
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_valid_MASK 0x00000001L
+#define IA_DEBUG_REG3__dma_pipe0_rdreq_valid__SHIFT 0x00000000
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_eop_out_MASK 0x00000800L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_eop_out__SHIFT 0x0000000b
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_null_out_MASK 0x00000400L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_null_out__SHIFT 0x0000000a
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_read_MASK 0x00000200L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_read__SHIFT 0x00000009
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_use_tc_out_MASK 0x00001000L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_use_tc_out__SHIFT 0x0000000c
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_valid_MASK 0x00000100L
+#define IA_DEBUG_REG3__dma_pipe1_rdreq_valid__SHIFT 0x00000008
+#define IA_DEBUG_REG3__dma_rdreq_send_out_MASK 0x00008000L
+#define IA_DEBUG_REG3__dma_rdreq_send_out__SHIFT 0x0000000f
+#define IA_DEBUG_REG3__grp_dma_draw_is_pipe0_MASK 0x00000020L
+#define IA_DEBUG_REG3__grp_dma_draw_is_pipe0__SHIFT 0x00000005
+#define IA_DEBUG_REG3__ia_mc_rdreq_rtr_q_MASK 0x00002000L
+#define IA_DEBUG_REG3__ia_mc_rdreq_rtr_q__SHIFT 0x0000000d
+#define IA_DEBUG_REG3__ia_tc_rdreq_rtr_q_MASK 0x00040000L
+#define IA_DEBUG_REG3__ia_tc_rdreq_rtr_q__SHIFT 0x00000012
+#define IA_DEBUG_REG3__IA_TC_rdreq_send_out_MASK 0x20000000L
+#define IA_DEBUG_REG3__IA_TC_rdreq_send_out__SHIFT 0x0000001d
+#define IA_DEBUG_REG3__last_tc_req_p1_MASK 0x10000000L
+#define IA_DEBUG_REG3__last_tc_req_p1__SHIFT 0x0000001c
+#define IA_DEBUG_REG3__mc_out_rtr_MASK 0x00004000L
+#define IA_DEBUG_REG3__mc_out_rtr__SHIFT 0x0000000e
+#define IA_DEBUG_REG3__must_service_pipe0_req_MASK 0x00000040L
+#define IA_DEBUG_REG3__must_service_pipe0_req__SHIFT 0x00000006
+#define IA_DEBUG_REG3__pair0_valid_p1_MASK 0x00100000L
+#define IA_DEBUG_REG3__pair0_valid_p1__SHIFT 0x00000014
+#define IA_DEBUG_REG3__pair1_valid_p1_MASK 0x00200000L
+#define IA_DEBUG_REG3__pair1_valid_p1__SHIFT 0x00000015
+#define IA_DEBUG_REG3__pair2_valid_p1_MASK 0x00400000L
+#define IA_DEBUG_REG3__pair2_valid_p1__SHIFT 0x00000016
+#define IA_DEBUG_REG3__pair3_valid_p1_MASK 0x00800000L
+#define IA_DEBUG_REG3__pair3_valid_p1__SHIFT 0x00000017
+#define IA_DEBUG_REG3__pipe0_dr_MASK 0x00010000L
+#define IA_DEBUG_REG3__pipe0_dr__SHIFT 0x00000010
+#define IA_DEBUG_REG3__pipe0_rtr_MASK 0x00020000L
+#define IA_DEBUG_REG3__pipe0_rtr__SHIFT 0x00000011
+#define IA_DEBUG_REG3__send_pipe1_req_MASK 0x00000080L
+#define IA_DEBUG_REG3__send_pipe1_req__SHIFT 0x00000007
+#define IA_DEBUG_REG3__TAP_IA_rdret_vld_in_MASK 0x80000000L
+#define IA_DEBUG_REG3__TAP_IA_rdret_vld_in__SHIFT 0x0000001f
+#define IA_DEBUG_REG3__TC_IA_rdret_valid_in_MASK 0x40000000L
+#define IA_DEBUG_REG3__TC_IA_rdret_valid_in__SHIFT 0x0000001e
+#define IA_DEBUG_REG3__tc_out_rtr_MASK 0x00080000L
+#define IA_DEBUG_REG3__tc_out_rtr__SHIFT 0x00000013
+#define IA_DEBUG_REG3__tc_req_count_q_MASK 0x03000000L
+#define IA_DEBUG_REG3__tc_req_count_q__SHIFT 0x00000018
+#define IA_DEBUG_REG4__current_shift_is_vect1_q_MASK 0x80000000L
+#define IA_DEBUG_REG4__current_shift_is_vect1_q__SHIFT 0x0000001f
+#define IA_DEBUG_REG4__di_event_flag_p1_q_MASK 0x00100000L
+#define IA_DEBUG_REG4__di_event_flag_p1_q__SHIFT 0x00000014
+#define IA_DEBUG_REG4__di_first_group_of_draw_q_MASK 0x20000000L
+#define IA_DEBUG_REG4__di_first_group_of_draw_q__SHIFT 0x0000001d
+#define IA_DEBUG_REG4__di_major_mode_p1_q_MASK 0x00010000L
+#define IA_DEBUG_REG4__di_major_mode_p1_q__SHIFT 0x00000010
+#define IA_DEBUG_REG4__di_source_select_p1_q_MASK 0x0c000000L
+#define IA_DEBUG_REG4__di_source_select_p1_q__SHIFT 0x0000001a
+#define IA_DEBUG_REG4__di_state_sel_p1_q_MASK 0x00e00000L
+#define IA_DEBUG_REG4__di_state_sel_p1_q__SHIFT 0x00000015
+#define IA_DEBUG_REG4__draw_opaq_active_q_MASK 0x02000000L
+#define IA_DEBUG_REG4__draw_opaq_active_q__SHIFT 0x00000019
+#define IA_DEBUG_REG4__draw_opaq_en_p1_q_MASK 0x01000000L
+#define IA_DEBUG_REG4__draw_opaq_en_p1_q__SHIFT 0x00000018
+#define IA_DEBUG_REG4__grp_se0_fifo_empty_MASK 0x00000040L
+#define IA_DEBUG_REG4__grp_se0_fifo_empty__SHIFT 0x00000006
+#define IA_DEBUG_REG4__grp_se0_fifo_full_MASK 0x00000080L
+#define IA_DEBUG_REG4__grp_se0_fifo_full__SHIFT 0x00000007
+#define IA_DEBUG_REG4__gs_mode_p1_q_MASK 0x000e0000L
+#define IA_DEBUG_REG4__gs_mode_p1_q__SHIFT 0x00000011
+#define IA_DEBUG_REG4__ia_se1vgt_prim_rtr_q_MASK 0x00008000L
+#define IA_DEBUG_REG4__ia_se1vgt_prim_rtr_q__SHIFT 0x0000000f
+#define IA_DEBUG_REG4__ia_vgt_prim_rtr_q_MASK 0x00004000L
+#define IA_DEBUG_REG4__ia_vgt_prim_rtr_q__SHIFT 0x0000000e
+#define IA_DEBUG_REG4__last_shift_of_draw_MASK 0x40000000L
+#define IA_DEBUG_REG4__last_shift_of_draw__SHIFT 0x0000001e
+#define IA_DEBUG_REG4__pipe0_dr_MASK 0x00000001L
+#define IA_DEBUG_REG4__pipe0_dr__SHIFT 0x00000000
+#define IA_DEBUG_REG4__pipe0_rtr_MASK 0x00000100L
+#define IA_DEBUG_REG4__pipe0_rtr__SHIFT 0x00000008
+#define IA_DEBUG_REG4__pipe1_dr_MASK 0x00000002L
+#define IA_DEBUG_REG4__pipe1_dr__SHIFT 0x00000001
+#define IA_DEBUG_REG4__pipe1_rtr_MASK 0x00000200L
+#define IA_DEBUG_REG4__pipe1_rtr__SHIFT 0x00000009
+#define IA_DEBUG_REG4__pipe2_dr_MASK 0x00000004L
+#define IA_DEBUG_REG4__pipe2_dr__SHIFT 0x00000002
+#define IA_DEBUG_REG4__pipe2_rtr_MASK 0x00000400L
+#define IA_DEBUG_REG4__pipe2_rtr__SHIFT 0x0000000a
+#define IA_DEBUG_REG4__pipe3_dr_MASK 0x00000008L
+#define IA_DEBUG_REG4__pipe3_dr__SHIFT 0x00000003
+#define IA_DEBUG_REG4__pipe3_rtr_MASK 0x00000800L
+#define IA_DEBUG_REG4__pipe3_rtr__SHIFT 0x0000000b
+#define IA_DEBUG_REG4__pipe4_dr_MASK 0x00000010L
+#define IA_DEBUG_REG4__pipe4_dr__SHIFT 0x00000004
+#define IA_DEBUG_REG4__pipe4_rtr_MASK 0x00001000L
+#define IA_DEBUG_REG4__pipe4_rtr__SHIFT 0x0000000c
+#define IA_DEBUG_REG4__pipe5_dr_MASK 0x00000020L
+#define IA_DEBUG_REG4__pipe5_dr__SHIFT 0x00000005
+#define IA_DEBUG_REG4__pipe5_rtr_MASK 0x00002000L
+#define IA_DEBUG_REG4__pipe5_rtr__SHIFT 0x0000000d
+#define IA_DEBUG_REG4__ready_to_read_di_MASK 0x10000000L
+#define IA_DEBUG_REG4__ready_to_read_di__SHIFT 0x0000001c
+#define IA_DEBUG_REG5__di_index_counter_q_15_0_MASK 0x0000ffffL
+#define IA_DEBUG_REG5__di_index_counter_q_15_0__SHIFT 0x00000000
+#define IA_DEBUG_REG5__draw_input_fifo_empty_MASK 0x80000000L
+#define IA_DEBUG_REG5__draw_input_fifo_empty__SHIFT 0x0000001f
+#define IA_DEBUG_REG5__draw_input_fifo_full_MASK 0x40000000L
+#define IA_DEBUG_REG5__draw_input_fifo_full__SHIFT 0x0000001e
+#define IA_DEBUG_REG5__instanceid_13_0_MASK 0x3fff0000L
+#define IA_DEBUG_REG5__instanceid_13_0__SHIFT 0x00000010
+#define IA_DEBUG_REG6__after_group_partial_MASK 0x00400000L
+#define IA_DEBUG_REG6__after_group_partial__SHIFT 0x00000016
+#define IA_DEBUG_REG6__current_shift_q_MASK 0x0000000fL
+#define IA_DEBUG_REG6__current_shift_q__SHIFT 0x00000000
+#define IA_DEBUG_REG6__current_stride_pre_MASK 0x000000f0L
+#define IA_DEBUG_REG6__current_stride_pre__SHIFT 0x00000004
+#define IA_DEBUG_REG6__current_stride_q_MASK 0x00001f00L
+#define IA_DEBUG_REG6__current_stride_q__SHIFT 0x00000008
+#define IA_DEBUG_REG6__curr_prim_partial_MASK 0x00008000L
+#define IA_DEBUG_REG6__curr_prim_partial__SHIFT 0x0000000f
+#define IA_DEBUG_REG6__extract_group_MASK 0x00800000L
+#define IA_DEBUG_REG6__extract_group__SHIFT 0x00000017
+#define IA_DEBUG_REG6__first_group_partial_MASK 0x00002000L
+#define IA_DEBUG_REG6__first_group_partial__SHIFT 0x0000000d
+#define IA_DEBUG_REG6__grp_shift_debug_data_MASK 0xff000000L
+#define IA_DEBUG_REG6__grp_shift_debug_data__SHIFT 0x00000018
+#define IA_DEBUG_REG6__next_group_partial_MASK 0x00200000L
+#define IA_DEBUG_REG6__next_group_partial__SHIFT 0x00000015
+#define IA_DEBUG_REG6__next_stride_q_MASK 0x001f0000L
+#define IA_DEBUG_REG6__next_stride_q__SHIFT 0x00000010
+#define IA_DEBUG_REG6__second_group_partial_MASK 0x00004000L
+#define IA_DEBUG_REG6__second_group_partial__SHIFT 0x0000000e
+#define IA_DEBUG_REG7__indx_shift_is_one_p2_q_MASK 0x02000000L
+#define IA_DEBUG_REG7__indx_shift_is_one_p2_q__SHIFT 0x00000019
+#define IA_DEBUG_REG7__indx_shift_is_two_p2_q_MASK 0x04000000L
+#define IA_DEBUG_REG7__indx_shift_is_two_p2_q__SHIFT 0x0000001a
+#define IA_DEBUG_REG7__indx_stride_is_four_p2_q_MASK 0x08000000L
+#define IA_DEBUG_REG7__indx_stride_is_four_p2_q__SHIFT 0x0000001b
+#define IA_DEBUG_REG7__last_group_of_draw_p2_q_MASK 0x00800000L
+#define IA_DEBUG_REG7__last_group_of_draw_p2_q__SHIFT 0x00000017
+#define IA_DEBUG_REG7__num_indx_in_group_p2_q_MASK 0x00700000L
+#define IA_DEBUG_REG7__num_indx_in_group_p2_q__SHIFT 0x00000014
+#define IA_DEBUG_REG7__reset_indx_state_q_MASK 0x0000000fL
+#define IA_DEBUG_REG7__reset_indx_state_q__SHIFT 0x00000000
+#define IA_DEBUG_REG7__shift_event_flag_p2_q_MASK 0x01000000L
+#define IA_DEBUG_REG7__shift_event_flag_p2_q__SHIFT 0x00000018
+#define IA_DEBUG_REG7__shift_prim0_partial_p3_q_MASK 0x80000000L
+#define IA_DEBUG_REG7__shift_prim0_partial_p3_q__SHIFT 0x0000001f
+#define IA_DEBUG_REG7__shift_prim0_reset_p3_q_MASK 0x40000000L
+#define IA_DEBUG_REG7__shift_prim0_reset_p3_q__SHIFT 0x0000001e
+#define IA_DEBUG_REG7__shift_prim1_partial_p3_q_MASK 0x20000000L
+#define IA_DEBUG_REG7__shift_prim1_partial_p3_q__SHIFT 0x0000001d
+#define IA_DEBUG_REG7__shift_prim1_reset_p3_q_MASK 0x10000000L
+#define IA_DEBUG_REG7__shift_prim1_reset_p3_q__SHIFT 0x0000001c
+#define IA_DEBUG_REG7__shift_vect0_reset_match_p2_q_MASK 0x0000f000L
+#define IA_DEBUG_REG7__shift_vect0_reset_match_p2_q__SHIFT 0x0000000c
+#define IA_DEBUG_REG7__shift_vect1_reset_match_p2_q_MASK 0x000f0000L
+#define IA_DEBUG_REG7__shift_vect1_reset_match_p2_q__SHIFT 0x00000010
+#define IA_DEBUG_REG7__shift_vect1_valid_p2_q_MASK 0x00000f00L
+#define IA_DEBUG_REG7__shift_vect1_valid_p2_q__SHIFT 0x00000008
+#define IA_DEBUG_REG7__shift_vect_valid_p2_q_MASK 0x000000f0L
+#define IA_DEBUG_REG7__shift_vect_valid_p2_q__SHIFT 0x00000004
+#define IA_DEBUG_REG8__di_prim_type_p1_q_MASK 0x0000001fL
+#define IA_DEBUG_REG8__di_prim_type_p1_q__SHIFT 0x00000000
+#define IA_DEBUG_REG8__grp_components_valid_MASK 0xf0000000L
+#define IA_DEBUG_REG8__grp_components_valid__SHIFT 0x0000001c
+#define IA_DEBUG_REG8__grp_continued_MASK 0x00000800L
+#define IA_DEBUG_REG8__grp_continued__SHIFT 0x0000000b
+#define IA_DEBUG_REG8__grp_eopg_MASK 0x04000000L
+#define IA_DEBUG_REG8__grp_eopg__SHIFT 0x0000001a
+#define IA_DEBUG_REG8__grp_eop_MASK 0x02000000L
+#define IA_DEBUG_REG8__grp_eop__SHIFT 0x00000019
+#define IA_DEBUG_REG8__grp_event_flag_MASK 0x08000000L
+#define IA_DEBUG_REG8__grp_event_flag__SHIFT 0x0000001b
+#define IA_DEBUG_REG8__grp_null_primitive_MASK 0x01000000L
+#define IA_DEBUG_REG8__grp_null_primitive__SHIFT 0x00000018
+#define IA_DEBUG_REG8__grp_output_path_MASK 0x00e00000L
+#define IA_DEBUG_REG8__grp_output_path__SHIFT 0x00000015
+#define IA_DEBUG_REG8__grp_state_sel_MASK 0x00007000L
+#define IA_DEBUG_REG8__grp_state_sel__SHIFT 0x0000000c
+#define IA_DEBUG_REG8__grp_sub_prim_type_MASK 0x001f8000L
+#define IA_DEBUG_REG8__grp_sub_prim_type__SHIFT 0x0000000f
+#define IA_DEBUG_REG8__last_group_of_inst_p5_q_MASK 0x00000100L
+#define IA_DEBUG_REG8__last_group_of_inst_p5_q__SHIFT 0x00000008
+#define IA_DEBUG_REG8__shift_prim0_null_flag_p5_q_MASK 0x00000400L
+#define IA_DEBUG_REG8__shift_prim0_null_flag_p5_q__SHIFT 0x0000000a
+#define IA_DEBUG_REG8__shift_prim1_null_flag_p5_q_MASK 0x00000200L
+#define IA_DEBUG_REG8__shift_prim1_null_flag_p5_q__SHIFT 0x00000009
+#define IA_DEBUG_REG8__shift_vect_end_of_packet_p5_q_MASK 0x00000080L
+#define IA_DEBUG_REG8__shift_vect_end_of_packet_p5_q__SHIFT 0x00000007
+#define IA_DEBUG_REG8__two_cycle_xfer_p1_q_MASK 0x00000020L
+#define IA_DEBUG_REG8__two_cycle_xfer_p1_q__SHIFT 0x00000005
+#define IA_DEBUG_REG8__two_prim_input_p1_q_MASK 0x00000040L
+#define IA_DEBUG_REG8__two_prim_input_p1_q__SHIFT 0x00000006
+#define IA_DEBUG_REG9__eopg_between_prims_p6_MASK 0x00000400L
+#define IA_DEBUG_REG9__eopg_between_prims_p6__SHIFT 0x0000000a
+#define IA_DEBUG_REG9__eopg_on_last_prim_p6_MASK 0x00000200L
+#define IA_DEBUG_REG9__eopg_on_last_prim_p6__SHIFT 0x00000009
+#define IA_DEBUG_REG9__gfx_se_switch_p6_MASK 0x00000002L
+#define IA_DEBUG_REG9__gfx_se_switch_p6__SHIFT 0x00000001
+#define IA_DEBUG_REG9__grp_se1_fifo_empty_MASK 0x00040000L
+#define IA_DEBUG_REG9__grp_se1_fifo_empty__SHIFT 0x00000012
+#define IA_DEBUG_REG9__grp_se1_fifo_full_MASK 0x00080000L
+#define IA_DEBUG_REG9__grp_se1_fifo_full__SHIFT 0x00000013
+#define IA_DEBUG_REG9__null_eoi_xfer_prim0_p6_MASK 0x00000008L
+#define IA_DEBUG_REG9__null_eoi_xfer_prim0_p6__SHIFT 0x00000003
+#define IA_DEBUG_REG9__null_eoi_xfer_prim1_p6_MASK 0x00000004L
+#define IA_DEBUG_REG9__null_eoi_xfer_prim1_p6__SHIFT 0x00000002
+#define IA_DEBUG_REG9__prim0_eoi_p6_MASK 0x00000020L
+#define IA_DEBUG_REG9__prim0_eoi_p6__SHIFT 0x00000005
+#define IA_DEBUG_REG9__prim0_valid_eopg_p6_MASK 0x00000080L
+#define IA_DEBUG_REG9__prim0_valid_eopg_p6__SHIFT 0x00000007
+#define IA_DEBUG_REG9__prim1_eoi_p6_MASK 0x00000010L
+#define IA_DEBUG_REG9__prim1_eoi_p6__SHIFT 0x00000004
+#define IA_DEBUG_REG9__prim1_to_other_se_p6_MASK 0x00000100L
+#define IA_DEBUG_REG9__prim1_to_other_se_p6__SHIFT 0x00000008
+#define IA_DEBUG_REG9__prim1_valid_eopg_p6_MASK 0x00000040L
+#define IA_DEBUG_REG9__prim1_valid_eopg_p6__SHIFT 0x00000006
+#define IA_DEBUG_REG9__prim1_xfer_p6_MASK 0x00020000L
+#define IA_DEBUG_REG9__prim1_xfer_p6__SHIFT 0x00000011
+#define IA_DEBUG_REG9__prim_count_eq_group_size_p6_MASK 0x00000800L
+#define IA_DEBUG_REG9__prim_count_eq_group_size_p6__SHIFT 0x0000000b
+#define IA_DEBUG_REG9__prim_counter_q_MASK 0xfffc0000L
+#define IA_DEBUG_REG9__prim_counter_q__SHIFT 0x00000012
+#define IA_DEBUG_REG9__prim_count_gt_group_size_p6_MASK 0x00001000L
+#define IA_DEBUG_REG9__prim_count_gt_group_size_p6__SHIFT 0x0000000c
+#define IA_DEBUG_REG9__send_to_se1_p6_MASK 0x00000001L
+#define IA_DEBUG_REG9__send_to_se1_p6__SHIFT 0x00000000
+#define IA_DEBUG_REG9__shift_vect_end_of_packet_p5_q_MASK 0x00010000L
+#define IA_DEBUG_REG9__shift_vect_end_of_packet_p5_q__SHIFT 0x00000010
+#define IA_DEBUG_REG9__SPARE0_MASK 0x00004000L
+#define IA_DEBUG_REG9__SPARE0__SHIFT 0x0000000e
+#define IA_DEBUG_REG9__SPARE1_MASK 0x00008000L
+#define IA_DEBUG_REG9__SPARE1__SHIFT 0x0000000f
+#define IA_DEBUG_REG9__two_prim_output_p5_q_MASK 0x00002000L
+#define IA_DEBUG_REG9__two_prim_output_p5_q__SHIFT 0x0000000d
+#define IA_ENHANCE__MISC_MASK 0xffffffffL
+#define IA_ENHANCE__MISC__SHIFT 0x00000000
+#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON_MASK 0x00040000L
+#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON__SHIFT 0x00000012
+#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON_MASK 0x00010000L
+#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON__SHIFT 0x00000010
+#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE_MASK 0x0000ffffL
+#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE__SHIFT 0x00000000
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI_MASK 0x00080000L
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI__SHIFT 0x00000013
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP_MASK 0x00020000L
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP__SHIFT 0x00000011
+#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP_MASK 0x00100000L
+#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP__SHIFT 0x00000014
+#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define IA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define IA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define IA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define IA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define IA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define IA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define IA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define IA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define IA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define IA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define IA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define IA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define IA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define IA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define IA_VMID_OVERRIDE__ENABLE_MASK 0x00000001L
+#define IA_VMID_OVERRIDE__ENABLE__SHIFT 0x00000000
+#define IA_VMID_OVERRIDE__VMID_MASK 0x0000001eL
+#define IA_VMID_OVERRIDE__VMID__SHIFT 0x00000001
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x00040000L
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x00000012
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x00010000L
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x00000010
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x00100000L
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x00000014
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x00080000L
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x00000013
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x01000000L
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x00000018
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x00400000L
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x00000016
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0x0000c000L
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0x0000000e
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x00002000L
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0x0000000d
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x00020000L
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x00000011
+#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x00000001L
+#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x00000000
+#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x00000002L
+#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x00000001
+#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x00000004L
+#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x00000002
+#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x00000008L
+#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x00000003
+#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x00000010L
+#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x00000004
+#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x00000020L
+#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x00000005
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x02000000L
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x00000019
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x00200000L
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x00000015
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x08000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x0000001b
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x04000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x0000001a
+#define PA_CL_CNTL_STATUS__CL_BUSY_MASK 0x80000000L
+#define PA_CL_CNTL_STATUS__CL_BUSY__SHIFT 0x0000001f
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x00000008L
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x00000003
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x00000001L
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x00000000
+#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000L
+#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x0000001f
+#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000L
+#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x0000001e
+#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000L
+#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x0000001d
+#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000L
+#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x0000001c
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x00000006L
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x00000001
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x00000010L
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x00000004
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL_MASK 0x00000020L
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL__SHIFT 0x00000005
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x00004000L
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0x0000000e
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x00002000L
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0x0000000d
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x00001000L
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0x0000000c
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x00000200L
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x00000009
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x00000100L
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x00000008
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x00000800L
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0x0000000b
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x00000400L
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0x0000000a
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x00000008L
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x00000003
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x00100000L
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x00000014
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x00000004L
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x00000002
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x00000040L
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x00000006
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x00000080L
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x00000007
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x00000001L
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x00000000
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x00000010L
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x00000004
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x00000002L
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x00000001
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x00000020L
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x00000005
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xffffffffL
+#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xffffffffL
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xffffffffL
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x00000000
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x00000000
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x00000002L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x00000001
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x00000004L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x00000002
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x00000008L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x00000003
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x00000010L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x00000004
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x00000020L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x00000005
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x00000040L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x00000006
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x00000080L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x00000007
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x00000100L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x00000008
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x00000200L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x00000009
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x00000400L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0x0000000a
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x00000800L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0x0000000b
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x00001000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0x0000000c
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x00002000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0x0000000d
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x00004000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0x0000000e
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x00008000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0x0000000f
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x00020000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x00000011
+#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x00000019
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x00100000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x00000014
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x00010000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x00000010
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x00040000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x00000012
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x00080000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x00000013
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x00400000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x00000016
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x00800000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x00000017
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x00000018
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x00200000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x00000015
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x00000800L
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0x0000000b
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x00000002L
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x00000001
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x00000001L
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x00000000
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x00000008L
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x00000003
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x00000004L
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x00000002
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x00000020L
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x00000005
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x00000010L
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x00000004
+#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x00000400L
+#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0x0000000a
+#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100L
+#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x00000008
+#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200L
+#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x00000009
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x00000010L
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x00000004
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x03000000L
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x00000018
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x0001e000L
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0x0000000d
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x00700000L
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x00000014
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x00000007L
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x00000000
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0x0000ffffL
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x00000000
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xffff0000L
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x00000010
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0x0000ffffL
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x00000000
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xffff0000L
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x0000001c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0x0000000fL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x00000000
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0x000000f0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x00000004
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0x00000f00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x00000008
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0x0000f000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0x0000000c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0x000f0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x00000010
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0x00f00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x00000014
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0x0f000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x00000018
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xf0000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x0000001c
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0x0000000fL
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x00000000
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0x000000f0L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x00000004
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0x00000f00L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x00000008
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0x0000f000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0x0000000c
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0x000f0000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x00000010
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0x00f00000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x00000014
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0x0f000000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x00000018
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xf0000000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x0000001c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0x00000f00L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x00000008
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0x0000f000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0x0000000c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0x000f0000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x00000010
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0x00f00000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x00000014
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0x0f000000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x00000018
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xf0000000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x0000001c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0x0000000fL
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x00000000
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0x000000f0L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x00000004
+#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0x0000ffffL
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x00000000
+#define PA_SC_DEBUG_CNTL__SC_DEBUG_INDX_MASK 0x0000003fL
+#define PA_SC_DEBUG_CNTL__SC_DEBUG_INDX__SHIFT 0x00000000
+#define PA_SC_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define PA_SC_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define PA_SC_DEBUG_REG0__REG0_FIELD0_MASK 0x00000003L
+#define PA_SC_DEBUG_REG0__REG0_FIELD0__SHIFT 0x00000000
+#define PA_SC_DEBUG_REG0__REG0_FIELD1_MASK 0x0000000cL
+#define PA_SC_DEBUG_REG0__REG0_FIELD1__SHIFT 0x00000002
+#define PA_SC_DEBUG_REG1__REG1_FIELD0_MASK 0x00000003L
+#define PA_SC_DEBUG_REG1__REG1_FIELD0__SHIFT 0x00000000
+#define PA_SC_DEBUG_REG1__REG1_FIELD1_MASK 0x0000000cL
+#define PA_SC_DEBUG_REG1__REG1_FIELD1__SHIFT 0x00000002
+#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xf0000000L
+#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x0000001c
+#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x0003f000L
+#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0x0000000c
+#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0x00fc0000L
+#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x00000012
+#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0x0f000000L
+#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x00000018
+#define PA_SC_EDGERULE__ER_POINT_MASK 0x000000f0L
+#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x00000004
+#define PA_SC_EDGERULE__ER_RECT_MASK 0x00000f00L
+#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x00000008
+#define PA_SC_EDGERULE__ER_TRI_MASK 0x0000000fL
+#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x00000000
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x00000004L
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x00000002
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x00000200L
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x00000009
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x00004000L
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0x0000000e
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x00200000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x00000015
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x00800000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x00000017
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x00040000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x00000012
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x00010000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0x00000010
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x00400000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x00000016
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x00080000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x00000013
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x00002000L
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0x0000000d
+#define PA_SC_ENHANCE__DISABLE_PW_BUBBLE_COLLAPSE_MASK 0x000000c0L
+#define PA_SC_ENHANCE__DISABLE_PW_BUBBLE_COLLAPSE__SHIFT 0x00000006
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x00000002L
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x00000001
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x00000020L
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x00000005
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x00000400L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0x0000000a
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x00000800L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0x0000000b
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x00001000L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0x0000000c
+#define PA_SC_ENHANCE__ECO_SPARE0_MASK 0x80000000L
+#define PA_SC_ENHANCE__ECO_SPARE0__SHIFT 0x0000001f
+#define PA_SC_ENHANCE__ECO_SPARE1_MASK 0x40000000L
+#define PA_SC_ENHANCE__ECO_SPARE1__SHIFT 0x0000001e
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x00000008L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x00000003
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x00000010L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x00000004
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x00008000L
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0x0000000f
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x01000000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x00000018
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x00020000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0x00000011
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x00100000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x00000014
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x00000001L
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x00000000
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x00000100L
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x00000008
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x000000ffL
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x00000000
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x00007fc0L
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x00000006
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xff800000L
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x00000017
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x0000003fL
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x00000000
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x001f8000L
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0x0000000f
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0x0000ffffL
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x00000000
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xffff0000L
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x00000010
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0x00fc0000L
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x00000012
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0x00000fc0L
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x00000006
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x0000003fL
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x00000000
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x0003f000L
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0x0000000c
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x00001000L
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0x0000000c
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x00000200L
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x00000009
+#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x00000400L
+#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0x0000000a
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x00000800L
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0x0000000b
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000L
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x0000001d
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0x0000ffffL
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x00000000
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000L
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x0000001c
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0x00ff0000L
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x00000010
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0x0000ff00L
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x00000008
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0x0000000fL
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x00000000
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x00000002
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x00000000
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x00000003
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x00000001
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x02000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x00000019
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x04000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x0000001a
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x00080000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x00000013
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0x00f00000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x00000014
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x00008000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0x0000000f
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x00004000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0x0000000e
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x01000000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x00000018
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x00040000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x00000012
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x00020000L
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x00000011
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x08000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x0000001b
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x0000001c
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x00010000L
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x00000010
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x00000080L
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x00000007
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x00000200L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x00000009
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x00000400L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0x0000000a
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x00000100L
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x00000008
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x00000002
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x00000001
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x00000003
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x00000070L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x00000004
+#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x00000000
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x00000800L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0x0000000b
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x00001000L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0x0000000c
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x00002000L
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0x0000000d
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001ffL
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x00000000
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0x0000000cL
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x00000002
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x00000030L
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x00000004
+#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x00000300L
+#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x00000008
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0x0000c000L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0x0000000e
+#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0x00000c00L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0x0000000a
+#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x00003000L
+#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0x0000000c
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x00000000
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0x0000000cL
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x00000002
+#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x00000030L
+#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x00000004
+#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x00000040L
+#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x00000006
+#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x00000080L
+#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x00000007
+#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x00030000L
+#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x00000010
+#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0x000c0000L
+#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x00000012
+#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x00300000L
+#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x00000014
+#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x03000000L
+#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x00000018
+#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0x0c000000L
+#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x0000001a
+#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0x30000000L
+#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x0000001c
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0x0000ffffL
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xffff0000L
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0x0000ffffL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xffff0000L
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xffffffffL
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x00000000
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0x0000ffffL
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x00000000
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xffff0000L
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x00000010
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x00007fffL
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x00000000
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7fff0000L
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x00000010
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x00007fffL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x00000000
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7fff0000L
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x00000010
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x0000001f
+#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000L
+#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x0000001f
+#define PA_SU_DEBUG_CNTL__SU_DEBUG_INDX_MASK 0x0000001fL
+#define PA_SU_DEBUG_CNTL__SU_DEBUG_INDX__SHIFT 0x00000000
+#define PA_SU_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define PA_SU_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x000001ffL
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x00000000
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x01ff0000L
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x00000010
+#define PA_SU_LINE_CNTL__WIDTH_MASK 0x0000ffffL
+#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x00000000
+#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST_MASK 0x00000010L
+#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST__SHIFT 0x00000004
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x00000004L
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x00000002
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x00000008L
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x00000003
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x00000003L
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x00000000
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xffffffffL
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x00000000
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0x00ffffffL
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0x0000ffffL
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0x0000ffffL
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0x0000ffffL
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0x0000ffffL
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xffff0000L
+#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x00000010
+#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0x0000ffffL
+#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x00000000
+#define PA_SU_POINT_SIZE__HEIGHT_MASK 0x0000ffffL
+#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x00000000
+#define PA_SU_POINT_SIZE__WIDTH_MASK 0xffff0000L
+#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x00000010
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x00000100L
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x00000008
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0x000000ffL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x00000000
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xffffffffL
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x00000000
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x00000020L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x00000005
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x00000001
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x00000040L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x00000006
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x00000002
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0x0000ff00L
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x00000008
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x00000080L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x00000007
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x00000003
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x00000010L
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x00000004
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000001L
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x00000000
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000L
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x0000001e
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000L
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x0000001f
+#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x00000002L
+#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x00000001
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x00000001L
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x00000000
+#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x00000004L
+#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x00000002
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x00200000L
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x00000015
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x00100000L
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x00000014
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x00000700L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x00000008
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0x000000e0L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x00000005
+#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x00000018L
+#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x00000003
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x00001000L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0x0000000c
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x00000800L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0x0000000b
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x00002000L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0x0000000d
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x00080000L
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x00000013
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x00010000L
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x00000010
+#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x00000001L
+#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x00000000
+#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x00000038L
+#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x00000003
+#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x00000006L
+#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x00000001
+#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_IA_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_IA_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_IA_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_IA_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x00000000
+#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xffffffffL
+#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x00000000
+#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x00000001L
+#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x00000000
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xffffffffL
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x00000000
+#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_SQ_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_SQ_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xffffffffL
+#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x00000000
+#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xffffffffL
+#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x00000000
+#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xffffffffL
+#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x00000000
+#define RAS_TA_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_TA_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_TD_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_TD_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RAS_VGT_SIGNATURE0__SIGNATURE_MASK 0xffffffffL
+#define RAS_VGT_SIGNATURE0__SIGNATURE__SHIFT 0x00000000
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x00000002L
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x00000001
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x00000001L
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x00000000
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x00000004L
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x00000002
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x0007fff8L
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x00000003
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xfff80000L
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x00000013
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x00000000
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xfffffffeL
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x00000001
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x0000001b
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x00000000
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x0000ff00L
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x00000008
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x0000001c
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x00000001
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000fcL
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x00000002
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x0000001d
+#define RLC_CGCG_CGLS_CTRL__SPARE_MASK 0x80000000L
+#define RLC_CGCG_CGLS_CTRL__SPARE__SHIFT 0x0000001f
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0x0000000fL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x00000000
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0x000000f0L
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x00000004
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0x0fff0000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x00000010
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xf0000000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x0000001c
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0x00000f00L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x00000008
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0x0000f000L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0x0000000c
+#define RLC_CGTT_MGCG_OVERRIDE__OVERRIDE_MASK 0xffffffffL
+#define RLC_CGTT_MGCG_OVERRIDE__OVERRIDE__SHIFT 0x00000000
+#define RLC_CNTL__FORCE_RETRY_MASK 0x00000002L
+#define RLC_CNTL__FORCE_RETRY__SHIFT 0x00000001
+#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x00000004L
+#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x00000002
+#define RLC_CNTL__RESERVED_MASK 0xffffff00L
+#define RLC_CNTL__RESERVED__SHIFT 0x00000008
+#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x00000001L
+#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x00000000
+#define RLC_CNTL__RLC_STEP_F32_MASK 0x00000008L
+#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x00000003
+#define RLC_CNTL__SOFT_RESET_DEBUG_MODE_MASK 0x00000010L
+#define RLC_CNTL__SOFT_RESET_DEBUG_MODE__SHIFT 0x00000004
+#define RLC_CU_STATUS__WORK_PENDING_MASK 0xffffffffL
+#define RLC_CU_STATUS__WORK_PENDING__SHIFT 0x00000000
+#define RLC_DEBUG__DATA_MASK 0xffffffffL
+#define RLC_DEBUG__DATA__SHIFT 0x00000000
+#define RLC_DEBUG_SELECT__RESERVED_MASK 0xffff8000L
+#define RLC_DEBUG_SELECT__RESERVED__SHIFT 0x0000000f
+#define RLC_DEBUG_SELECT__SELECT_MASK 0x000000ffL
+#define RLC_DEBUG_SELECT__SELECT__SHIFT 0x00000000
+#define RLC_DRIVER_CPDMA_STATUS__DRIVER_ACK_MASK 0x00000010L
+#define RLC_DRIVER_CPDMA_STATUS__DRIVER_ACK__SHIFT 0x00000004
+#define RLC_DRIVER_CPDMA_STATUS__DRIVER_REQUEST_MASK 0x00000001L
+#define RLC_DRIVER_CPDMA_STATUS__DRIVER_REQUEST__SHIFT 0x00000000
+#define RLC_DRIVER_CPDMA_STATUS__RESERVED1_MASK 0x0000000eL
+#define RLC_DRIVER_CPDMA_STATUS__RESERVED1__SHIFT 0x00000001
+#define RLC_DRIVER_CPDMA_STATUS__RESERVED_MASK 0xffffffe0L
+#define RLC_DRIVER_CPDMA_STATUS__RESERVED__SHIFT 0x00000005
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK_MASK 0xffffffffL
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK__SHIFT 0x00000000
+#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xffffffffL
+#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x00000000
+#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x000001ffL
+#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x00000000
+#define RLC_GPM_SCRATCH_ADDR__RESERVED_MASK 0xfffffe00L
+#define RLC_GPM_SCRATCH_ADDR__RESERVED__SHIFT 0x00000009
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xffffffffL
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x00000000
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xffffffc0L
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x00000006
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x0000003fL
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x00000000
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xffffffffL
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x00000000
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xffffffffL
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x00000000
+#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK_MASK 0xffffffffL
+#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK__SHIFT 0x00000000
+#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST_MASK 0x00000ff0L
+#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST__SHIFT 0x00000004
+#define RLC_LB_CNTL__LB_CNT_CP_BUSY_MASK 0x00000002L
+#define RLC_LB_CNTL__LB_CNT_CP_BUSY__SHIFT 0x00000001
+#define RLC_LB_CNTL__LB_CNT_REG_INC_MASK 0x00000008L
+#define RLC_LB_CNTL__LB_CNT_REG_INC__SHIFT 0x00000003
+#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK 0x00000004L
+#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE__SHIFT 0x00000002
+#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK 0x00000001L
+#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE__SHIFT 0x00000000
+#define RLC_LB_CNTL__RESERVED_MASK 0xfffffff0L
+#define RLC_LB_CNTL__RESERVED__SHIFT 0x00000004
+#define RLC_LB_CNTR_INIT__LB_CNTR_INIT_MASK 0xffffffffL
+#define RLC_LB_CNTR_INIT__LB_CNTR_INIT__SHIFT 0x00000000
+#define RLC_LB_CNTR_MAX__LB_CNTR_MAX_MASK 0xffffffffL
+#define RLC_LB_CNTR_MAX__LB_CNTR_MAX__SHIFT 0x00000000
+#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK_MASK 0xffffffffL
+#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK__SHIFT 0x00000000
+#define RLC_LB_PARAMS__FIFO_SAMPLES_MASK 0x000000feL
+#define RLC_LB_PARAMS__FIFO_SAMPLES__SHIFT 0x00000001
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL_MASK 0xffff0000L
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL__SHIFT 0x00000010
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLES_MASK 0x0000ff00L
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLES__SHIFT 0x00000008
+#define RLC_LB_PARAMS__SKIP_L2_CHECK_MASK 0x00000001L
+#define RLC_LB_PARAMS__SKIP_L2_CHECK__SHIFT 0x00000000
+#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR_MASK 0xffffffffL
+#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR__SHIFT 0x00000000
+#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK 0x000000ffL
+#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT 0x00000000
+#define RLC_MAX_PG_CU__SPARE_MASK 0xffffff00L
+#define RLC_MAX_PG_CU__SPARE__SHIFT 0x00000008
+#define RLC_MC_CNTL__RDNFO_STALL_MASK 0x10000000L
+#define RLC_MC_CNTL__RDNFO_STALL__SHIFT 0x0000001c
+#define RLC_MC_CNTL__RDNFO_URG_MASK 0x00f00000L
+#define RLC_MC_CNTL__RDNFO_URG__SHIFT 0x00000014
+#define RLC_MC_CNTL__RDREQ_PRIV_MASK 0x08000000L
+#define RLC_MC_CNTL__RDREQ_PRIV__SHIFT 0x0000001b
+#define RLC_MC_CNTL__RDREQ_SWAP_MASK 0x03000000L
+#define RLC_MC_CNTL__RDREQ_SWAP__SHIFT 0x00000018
+#define RLC_MC_CNTL__RDREQ_TRAN_MASK 0x04000000L
+#define RLC_MC_CNTL__RDREQ_TRAN__SHIFT 0x0000001a
+#define RLC_MC_CNTL__RESERVED_B_MASK 0x000fe000L
+#define RLC_MC_CNTL__RESERVED_B__SHIFT 0x0000000d
+#define RLC_MC_CNTL__RESERVED_MASK 0xe0000000L
+#define RLC_MC_CNTL__RESERVED__SHIFT 0x0000001d
+#define RLC_MC_CNTL__WRNFO_STALL_MASK 0x00000010L
+#define RLC_MC_CNTL__WRNFO_STALL__SHIFT 0x00000004
+#define RLC_MC_CNTL__WRNFO_URG_MASK 0x000001e0L
+#define RLC_MC_CNTL__WRNFO_URG__SHIFT 0x00000005
+#define RLC_MC_CNTL__WRREQ_DW_IMASK_MASK 0x00001e00L
+#define RLC_MC_CNTL__WRREQ_DW_IMASK__SHIFT 0x00000009
+#define RLC_MC_CNTL__WRREQ_PRIV_MASK 0x00000008L
+#define RLC_MC_CNTL__WRREQ_PRIV__SHIFT 0x00000003
+#define RLC_MC_CNTL__WRREQ_SWAP_MASK 0x00000003L
+#define RLC_MC_CNTL__WRREQ_SWAP__SHIFT 0x00000000
+#define RLC_MC_CNTL__WRREQ_TRAN_MASK 0x00000004L
+#define RLC_MC_CNTL__WRREQ_TRAN__SHIFT 0x00000002
+#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xff000000L
+#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x00000018
+#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x000000fcL
+#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x00000002
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x00000002L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x00000001
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x00000001L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x00000000
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0x00ff0000L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x00000010
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0x0000ff00L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x00000008
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0x0000000a
+#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000007L
+#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x00000000
+#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK_MASK 0xffffffffL
+#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK__SHIFT 0x00000000
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x00010000L
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x00000010
+#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK 0x00000004L
+#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE__SHIFT 0x00000002
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x00000001L
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x00000000
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x00000002L
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x00000001
+#define RLC_PG_CNTL__PG_ERROR_STATUS_MASK 0xff000000L
+#define RLC_PG_CNTL__PG_ERROR_STATUS__SHIFT 0x00000018
+#define RLC_PG_CNTL__RESERVED1_MASK 0x00f80000L
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x00000013
+#define RLC_PG_CNTL__RESERVED_MASK 0xfffffff0L
+#define RLC_PG_CNTL__RESERVED__SHIFT 0x00000004
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x00040000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x00000012
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x00020000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x00000011
+#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK 0x00000008L
+#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE__SHIFT 0x00000003
+#define RLC_SAVE_AND_RESTORE_BASE__BASE_MASK 0xffffffffL
+#define RLC_SAVE_AND_RESTORE_BASE__BASE__SHIFT 0x00000000
+#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xffffffffL
+#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x00000000
+#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xffffffffL
+#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x00000000
+#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xffffffffL
+#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x00000000
+#define RLC_SERDES_RD_MASTER_INDEX__CU_ID_MASK 0x0000000fL
+#define RLC_SERDES_RD_MASTER_INDEX__CU_ID__SHIFT 0x00000000
+#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID_MASK 0x0000c000L
+#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID__SHIFT 0x0000000e
+#define RLC_SERDES_RD_MASTER_INDEX__NON_SE_MASK 0x00003800L
+#define RLC_SERDES_RD_MASTER_INDEX__NON_SE__SHIFT 0x0000000b
+#define RLC_SERDES_RD_MASTER_INDEX__SE_ID_MASK 0x000001c0L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_ID__SHIFT 0x00000006
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID_MASK 0x00000200L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID__SHIFT 0x00000009
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_MASK 0x00000400L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU__SHIFT 0x0000000a
+#define RLC_SERDES_RD_MASTER_INDEX__SH_ID_MASK 0x00000030L
+#define RLC_SERDES_RD_MASTER_INDEX__SH_ID__SHIFT 0x00000004
+#define RLC_SERDES_RD_MASTER_INDEX__SPARE_MASK 0xffffc000L
+#define RLC_SERDES_RD_MASTER_INDEX__SPARE__SHIFT 0x0000000e
+#define RLC_SERDES_WR_CTRL__BPM_ADDR_MASK 0x000000ffL
+#define RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT 0x00000000
+#define RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK 0x00100000L
+#define RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0__SHIFT 0x00000014
+#define RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_1_MASK 0x00200000L
+#define RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_1__SHIFT 0x00000015
+#define RLC_SERDES_WR_CTRL__CGLS_DISABLE_MASK 0x00020000L
+#define RLC_SERDES_WR_CTRL__CGLS_DISABLE__SHIFT 0x00000011
+#define RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK 0x00010000L
+#define RLC_SERDES_WR_CTRL__CGLS_ENABLE__SHIFT 0x00000010
+#define RLC_SERDES_WR_CTRL__CGLS_OFF_MASK 0x00080000L
+#define RLC_SERDES_WR_CTRL__CGLS_OFF__SHIFT 0x00000013
+#define RLC_SERDES_WR_CTRL__CGLS_ON_MASK 0x00040000L
+#define RLC_SERDES_WR_CTRL__CGLS_ON__SHIFT 0x00000012
+#define RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK 0x00400000L
+#define RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0__SHIFT 0x00000016
+#define RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK 0x00800000L
+#define RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1__SHIFT 0x00000017
+#define RLC_SERDES_WR_CTRL__P1_SELECT_MASK 0x00000400L
+#define RLC_SERDES_WR_CTRL__P1_SELECT__SHIFT 0x0000000a
+#define RLC_SERDES_WR_CTRL__P2_SELECT_MASK 0x00000800L
+#define RLC_SERDES_WR_CTRL__P2_SELECT__SHIFT 0x0000000b
+#define RLC_SERDES_WR_CTRL__POWER_DOWN_MASK 0x00000100L
+#define RLC_SERDES_WR_CTRL__POWER_DOWN__SHIFT 0x00000008
+#define RLC_SERDES_WR_CTRL__POWER_UP_MASK 0x00000200L
+#define RLC_SERDES_WR_CTRL__POWER_UP__SHIFT 0x00000009
+#define RLC_SERDES_WR_CTRL__READ_COMMAND_MASK 0x00002000L
+#define RLC_SERDES_WR_CTRL__READ_COMMAND__SHIFT 0x0000000d
+#define RLC_SERDES_WR_CTRL__REG_ADDR_MASK 0xf0000000L
+#define RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT 0x0000001c
+#define RLC_SERDES_WR_CTRL__RESERVED_1_MASK 0x0000c000L
+#define RLC_SERDES_WR_CTRL__RESERVED_1__SHIFT 0x0000000e
+#define RLC_SERDES_WR_CTRL__RESERVED_2_MASK 0x0f000000L
+#define RLC_SERDES_WR_CTRL__RESERVED_2__SHIFT 0x00000018
+#define RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK 0x00001000L
+#define RLC_SERDES_WR_CTRL__WRITE_COMMAND__SHIFT 0x0000000c
+#define RLC_SERDES_WR_DATA__DATA_MASK 0xffffffffL
+#define RLC_SERDES_WR_DATA__DATA__SHIFT 0x00000000
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE_MASK 0xfffffffeL
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE__SHIFT 0x00000001
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE_MASK 0x00000001L
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE__SHIFT 0x00000000
+#define RLC_SMU_PG_CTRL__SPARE_MASK 0xfffffffeL
+#define RLC_SMU_PG_CTRL__SPARE__SHIFT 0x00000001
+#define RLC_SMU_PG_CTRL__START_PG_MASK 0x00000001L
+#define RLC_SMU_PG_CTRL__START_PG__SHIFT 0x00000000
+#define RLC_SMU_PG_WAKE_UP_CTRL__SPARE_MASK 0xfffffffeL
+#define RLC_SMU_PG_WAKE_UP_CTRL__SPARE__SHIFT 0x00000001
+#define RLC_SMU_PG_WAKE_UP_CTRL__START_PG_WAKE_UP_MASK 0x00000001L
+#define RLC_SMU_PG_WAKE_UP_CTRL__START_PG_WAKE_UP__SHIFT 0x00000000
+#define RLC_SOFT_RESET_GPU__RESERVED_MASK 0xfffffffeL
+#define RLC_SOFT_RESET_GPU__RESERVED__SHIFT 0x00000001
+#define RLC_SOFT_RESET_GPU__SOFT_RESET_GPU_MASK 0x00000001L
+#define RLC_SOFT_RESET_GPU__SOFT_RESET_GPU__SHIFT 0x00000000
+#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xffffffffL
+#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x00000000
+#define RLC_STAT__RESERVED_MASK 0xfffffff0L
+#define RLC_STAT__RESERVED__SHIFT 0x00000004
+#define RLC_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_STAT__RLC_BUSY__SHIFT 0x00000000
+#define RLC_STAT__RLC_GPM_BUSY_MASK 0x00000002L
+#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x00000001
+#define RLC_STAT__RLC_SPM_BUSY_MASK 0x00000004L
+#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x00000002
+#define RLC_THREAD1_DELAY__CU_IDEL_DELAY_MASK 0x000000ffL
+#define RLC_THREAD1_DELAY__CU_IDEL_DELAY__SHIFT 0x00000000
+#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY_MASK 0x0000ff00L
+#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY__SHIFT 0x00000008
+#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY_MASK 0x00ff0000L
+#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY__SHIFT 0x00000010
+#define RLC_THREAD1_DELAY__SPARE_MASK 0xff000000L
+#define RLC_THREAD1_DELAY__SPARE__SHIFT 0x00000018
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xffffffffL
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x00000000
+#define SCRATCH_ADDR__OBSOLETE_ADDR_MASK 0xffffffffL
+#define SCRATCH_ADDR__OBSOLETE_ADDR__SHIFT 0x00000000
+#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xffffffffL
+#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x00000000
+#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xffffffffL
+#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x00000000
+#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xffffffffL
+#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x00000000
+#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xffffffffL
+#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x00000000
+#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xffffffffL
+#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x00000000
+#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xffffffffL
+#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x00000000
+#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xffffffffL
+#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x00000000
+#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xffffffffL
+#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x00000000
+#define SCRATCH_UMSK__OBSOLETE_SWAP_MASK 0x00030000L
+#define SCRATCH_UMSK__OBSOLETE_SWAP__SHIFT 0x00000010
+#define SCRATCH_UMSK__OBSOLETE_UMSK_MASK 0x000000ffL
+#define SCRATCH_UMSK__OBSOLETE_UMSK__SHIFT 0x00000000
+#define SETUP_DEBUG_REG0__cl_dyn_sclk_vld_MASK 0x80000000L
+#define SETUP_DEBUG_REG0__cl_dyn_sclk_vld__SHIFT 0x0000001f
+#define SETUP_DEBUG_REG0__event_gated_MASK 0x10000000L
+#define SETUP_DEBUG_REG0__event_gated__SHIFT 0x0000001c
+#define SETUP_DEBUG_REG0__event_id_gated_MASK 0x0fc00000L
+#define SETUP_DEBUG_REG0__event_id_gated__SHIFT 0x00000016
+#define SETUP_DEBUG_REG0__geom_busy_MASK 0x00200000L
+#define SETUP_DEBUG_REG0__geom_busy__SHIFT 0x00000015
+#define SETUP_DEBUG_REG0__geom_enable_MASK 0x00008000L
+#define SETUP_DEBUG_REG0__geom_enable__SHIFT 0x0000000f
+#define SETUP_DEBUG_REG0__ge_stallb_MASK 0x00004000L
+#define SETUP_DEBUG_REG0__ge_stallb__SHIFT 0x0000000e
+#define SETUP_DEBUG_REG0__pfifo_busy_MASK 0x00080000L
+#define SETUP_DEBUG_REG0__pfifo_busy__SHIFT 0x00000013
+#define SETUP_DEBUG_REG0__pmode_prim_gated_MASK 0x20000000L
+#define SETUP_DEBUG_REG0__pmode_prim_gated__SHIFT 0x0000001d
+#define SETUP_DEBUG_REG0__pmode_state_MASK 0x00003f00L
+#define SETUP_DEBUG_REG0__pmode_state__SHIFT 0x00000008
+#define SETUP_DEBUG_REG0__su_baryc_cntl_state_MASK 0x00000003L
+#define SETUP_DEBUG_REG0__su_baryc_cntl_state__SHIFT 0x00000000
+#define SETUP_DEBUG_REG0__su_clip_baryc_free_MASK 0x00030000L
+#define SETUP_DEBUG_REG0__su_clip_baryc_free__SHIFT 0x00000010
+#define SETUP_DEBUG_REG0__su_clip_rtr_MASK 0x00040000L
+#define SETUP_DEBUG_REG0__su_clip_rtr__SHIFT 0x00000012
+#define SETUP_DEBUG_REG0__su_cntl_busy_MASK 0x00100000L
+#define SETUP_DEBUG_REG0__su_cntl_busy__SHIFT 0x00000014
+#define SETUP_DEBUG_REG0__su_cntl_state_MASK 0x0000003cL
+#define SETUP_DEBUG_REG0__su_cntl_state__SHIFT 0x00000002
+#define SETUP_DEBUG_REG0__su_dyn_sclk_vld_MASK 0x40000000L
+#define SETUP_DEBUG_REG0__su_dyn_sclk_vld__SHIFT 0x0000001e
+#define SETUP_DEBUG_REG1__x_sort0_gated_23_8_MASK 0xffff0000L
+#define SETUP_DEBUG_REG1__x_sort0_gated_23_8__SHIFT 0x00000010
+#define SETUP_DEBUG_REG1__y_sort0_gated_23_8_MASK 0x0000ffffL
+#define SETUP_DEBUG_REG1__y_sort0_gated_23_8__SHIFT 0x00000000
+#define SETUP_DEBUG_REG2__x_sort1_gated_23_8_MASK 0xffff0000L
+#define SETUP_DEBUG_REG2__x_sort1_gated_23_8__SHIFT 0x00000010
+#define SETUP_DEBUG_REG2__y_sort1_gated_23_8_MASK 0x0000ffffL
+#define SETUP_DEBUG_REG2__y_sort1_gated_23_8__SHIFT 0x00000000
+#define SETUP_DEBUG_REG3__x_sort2_gated_23_8_MASK 0xffff0000L
+#define SETUP_DEBUG_REG3__x_sort2_gated_23_8__SHIFT 0x00000010
+#define SETUP_DEBUG_REG3__y_sort2_gated_23_8_MASK 0x0000ffffL
+#define SETUP_DEBUG_REG3__y_sort2_gated_23_8__SHIFT 0x00000000
+#define SETUP_DEBUG_REG4__attr_indx_sort0_gated_MASK 0x00003fffL
+#define SETUP_DEBUG_REG4__attr_indx_sort0_gated__SHIFT 0x00000000
+#define SETUP_DEBUG_REG4__backfacing_gated_MASK 0x00008000L
+#define SETUP_DEBUG_REG4__backfacing_gated__SHIFT 0x0000000f
+#define SETUP_DEBUG_REG4__clipped_gated_MASK 0x00080000L
+#define SETUP_DEBUG_REG4__clipped_gated__SHIFT 0x00000013
+#define SETUP_DEBUG_REG4__dealloc_slot_gated_MASK 0x00700000L
+#define SETUP_DEBUG_REG4__dealloc_slot_gated__SHIFT 0x00000014
+#define SETUP_DEBUG_REG4__diamond_rule_gated_MASK 0x03000000L
+#define SETUP_DEBUG_REG4__diamond_rule_gated__SHIFT 0x00000018
+#define SETUP_DEBUG_REG4__eop_gated_MASK 0x80000000L
+#define SETUP_DEBUG_REG4__eop_gated__SHIFT 0x0000001f
+#define SETUP_DEBUG_REG4__fpov_gated_MASK 0x60000000L
+#define SETUP_DEBUG_REG4__fpov_gated__SHIFT 0x0000001d
+#define SETUP_DEBUG_REG4__null_prim_gated_MASK 0x00004000L
+#define SETUP_DEBUG_REG4__null_prim_gated__SHIFT 0x0000000e
+#define SETUP_DEBUG_REG4__st_indx_gated_MASK 0x00070000L
+#define SETUP_DEBUG_REG4__st_indx_gated__SHIFT 0x00000010
+#define SETUP_DEBUG_REG4__type_gated_MASK 0x1c000000L
+#define SETUP_DEBUG_REG4__type_gated__SHIFT 0x0000001a
+#define SETUP_DEBUG_REG4__xmajor_gated_MASK 0x00800000L
+#define SETUP_DEBUG_REG4__xmajor_gated__SHIFT 0x00000017
+#define SETUP_DEBUG_REG5__attr_indx_sort1_gated_MASK 0x0fffc000L
+#define SETUP_DEBUG_REG5__attr_indx_sort1_gated__SHIFT 0x0000000e
+#define SETUP_DEBUG_REG5__attr_indx_sort2_gated_MASK 0x00003fffL
+#define SETUP_DEBUG_REG5__attr_indx_sort2_gated__SHIFT 0x00000000
+#define SETUP_DEBUG_REG5__pa_reg_sclk_vld_MASK 0x80000000L
+#define SETUP_DEBUG_REG5__pa_reg_sclk_vld__SHIFT 0x0000001f
+#define SETUP_DEBUG_REG5__provoking_vtx_gated_MASK 0x30000000L
+#define SETUP_DEBUG_REG5__provoking_vtx_gated__SHIFT 0x0000001c
+#define SETUP_DEBUG_REG5__valid_prim_gated_MASK 0x40000000L
+#define SETUP_DEBUG_REG5__valid_prim_gated__SHIFT 0x0000001e
+#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0x0000ffffL
+#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x00000000
+#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xffff0000L
+#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x00000010
+#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0x0000ffffL
+#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x00000000
+#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xffff0000L
+#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x00000010
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x00000007L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x00000000
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x00000038L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x00000003
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x000001c0L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x00000006
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0x00000e00L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x00000009
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x00003000L
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0x0000000c
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0x0000c000L
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0x0000000e
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x00030000L
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x00000010
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0x000c0000L
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x00000012
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x01000000L
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x00000018
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x00000100L
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x00000008
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x00001000L
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0x0000000c
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x00000001L
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x00000000
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x00000010L
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x00000004
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x00030000L
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x00000010
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x00100000L
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x00000014
+#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE__SHIFT 0x00000008
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x00000010L
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x00000004
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x00003c00L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0x0000000a
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE__SHIFT 0x00000009
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x00000040L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x00000006
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE_MASK 0xffff0000L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE__SHIFT 0x00000010
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x00000080L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x00000007
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0x0000000fL
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x00000000
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x02000000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x00000019
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x01000000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x00000018
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0x00e00000L
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x00000015
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x001fffffL
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x00000000
+#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET_MASK 0x04000000L
+#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET__SHIFT 0x0000001a
+#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL_MASK 0x08000000L
+#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL__SHIFT 0x0000001b
+#define SPI_DEBUG_BUSY__CS0_BUSY_MASK 0x00000080L
+#define SPI_DEBUG_BUSY__CS0_BUSY__SHIFT 0x00000007
+#define SPI_DEBUG_BUSY__CS1_BUSY_MASK 0x00000100L
+#define SPI_DEBUG_BUSY__CS1_BUSY__SHIFT 0x00000008
+#define SPI_DEBUG_BUSY__CS2_BUSY_MASK 0x00000200L
+#define SPI_DEBUG_BUSY__CS2_BUSY__SHIFT 0x00000009
+#define SPI_DEBUG_BUSY__CS3_BUSY_MASK 0x00000800L
+#define SPI_DEBUG_BUSY__CS3_BUSY__SHIFT 0x0000000b
+#define SPI_DEBUG_BUSY__CS4_BUSY_MASK 0x00001000L
+#define SPI_DEBUG_BUSY__CS4_BUSY__SHIFT 0x0000000c
+#define SPI_DEBUG_BUSY__CS5_BUSY_MASK 0x00002000L
+#define SPI_DEBUG_BUSY__CS5_BUSY__SHIFT 0x0000000d
+#define SPI_DEBUG_BUSY__CS6_BUSY_MASK 0x00004000L
+#define SPI_DEBUG_BUSY__CS6_BUSY__SHIFT 0x0000000e
+#define SPI_DEBUG_BUSY__CS7_BUSY_MASK 0x00008000L
+#define SPI_DEBUG_BUSY__CS7_BUSY__SHIFT 0x0000000f
+#define SPI_DEBUG_BUSY__CSG_BUSY_MASK 0x00000080L
+#define SPI_DEBUG_BUSY__CSG_BUSY__SHIFT 0x00000007
+#define SPI_DEBUG_BUSY__ES_BUSY_MASK 0x00000004L
+#define SPI_DEBUG_BUSY__ES_BUSY__SHIFT 0x00000002
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY_MASK 0x00008000L
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY__SHIFT 0x0000000f
+#define SPI_DEBUG_BUSY__GRBM_BUSY_MASK 0x00010000L
+#define SPI_DEBUG_BUSY__GRBM_BUSY__SHIFT 0x00000010
+#define SPI_DEBUG_BUSY__GS_BUSY_MASK 0x00000008L
+#define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x00000003
+#define SPI_DEBUG_BUSY__HS_BUSY_MASK 0x00000002L
+#define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x00000001
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY_MASK 0x00000400L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY__SHIFT 0x0000000a
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY_MASK 0x00000800L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY__SHIFT 0x0000000b
+#define SPI_DEBUG_BUSY__LS_BUSY_MASK 0x00000001L
+#define SPI_DEBUG_BUSY__LS_BUSY__SHIFT 0x00000000
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY_MASK 0x00100000L
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY__SHIFT 0x00000014
+#define SPI_DEBUG_BUSY__PS0_BUSY_MASK 0x00000020L
+#define SPI_DEBUG_BUSY__PS0_BUSY__SHIFT 0x00000005
+#define SPI_DEBUG_BUSY__PS1_BUSY_MASK 0x00000040L
+#define SPI_DEBUG_BUSY__PS1_BUSY__SHIFT 0x00000006
+#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY_MASK 0x00001000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY__SHIFT 0x0000000c
+#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY_MASK 0x00002000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY__SHIFT 0x0000000d
+#define SPI_DEBUG_BUSY__SPIS_BUSY_MASK 0x00020000L
+#define SPI_DEBUG_BUSY__SPIS_BUSY__SHIFT 0x00000011
+#define SPI_DEBUG_BUSY__VS_BUSY_MASK 0x00000010L
+#define SPI_DEBUG_BUSY__VS_BUSY__SHIFT 0x00000004
+#define SPI_DEBUG_CNTL__DEBUG_GRBM_OVERRIDE_MASK 0x00000001L
+#define SPI_DEBUG_CNTL__DEBUG_GRBM_OVERRIDE__SHIFT 0x00000000
+#define SPI_DEBUG_CNTL__DEBUG_GROUP_SEL_MASK 0x000003e0L
+#define SPI_DEBUG_CNTL__DEBUG_GROUP_SEL__SHIFT 0x00000005
+#define SPI_DEBUG_CNTL__DEBUG_PIPE_SEL_MASK 0x0e000000L
+#define SPI_DEBUG_CNTL__DEBUG_PIPE_SEL__SHIFT 0x00000019
+#define SPI_DEBUG_CNTL__DEBUG_REG_EN_MASK 0x80000000L
+#define SPI_DEBUG_CNTL__DEBUG_REG_EN__SHIFT 0x0000001f
+#define SPI_DEBUG_CNTL__DEBUG_SH_SEL_MASK 0x00010000L
+#define SPI_DEBUG_CNTL__DEBUG_SH_SEL__SHIFT 0x00000010
+#define SPI_DEBUG_CNTL__DEBUG_SIMD_SEL_MASK 0x0000fc00L
+#define SPI_DEBUG_CNTL__DEBUG_SIMD_SEL__SHIFT 0x0000000a
+#define SPI_DEBUG_CNTL__DEBUG_THREAD_TYPE_SEL_MASK 0x0000001eL
+#define SPI_DEBUG_CNTL__DEBUG_THREAD_TYPE_SEL__SHIFT 0x00000001
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_0_MASK 0x00020000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_0__SHIFT 0x00000011
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_1_MASK 0x00040000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_1__SHIFT 0x00000012
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_2_MASK 0x00080000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_2__SHIFT 0x00000013
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_3_MASK 0x00100000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_3__SHIFT 0x00000014
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_4_MASK 0x00200000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_4__SHIFT 0x00000015
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_5_MASK 0x00400000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_5__SHIFT 0x00000016
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_6_MASK 0x00800000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_6__SHIFT 0x00000017
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_7_MASK 0x01000000L
+#define SPI_DEBUG_CNTL__SPI_ECO_SPARE_7__SHIFT 0x00000018
+#define SPI_DEBUG_READ__DATA_MASK 0x00ffffffL
+#define SPI_DEBUG_READ__DATA__SHIFT 0x00000000
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0x0000ff00L
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x00000008
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0x000000ffL
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x00000000
+#define SPI_GDS_CREDITS__UNUSED_MASK 0xffff0000L
+#define SPI_GDS_CREDITS__UNUSED__SHIFT 0x00000010
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x00000001L
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x00000000
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x00000002L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x00000001
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x00003800L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0x0000000b
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x0000001cL
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x00000002
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0x000000e0L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x00000005
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x00000700L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x00000008
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x00004000L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0x0000000e
+#define SPI_LB_CTR_CTRL__LOAD_MASK 0x00000001L
+#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x00000000
+#define SPI_LB_CU_MASK__CU_MASK_MASK 0x0000ffffL
+#define SPI_LB_CU_MASK__CU_MASK__SHIFT 0x00000000
+#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xffffffffL
+#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x00000000
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0x000000f0L
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x00000004
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0x0000000fL
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x00000000
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0x0000f000L
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0x0000000c
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0x00000f00L
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x00000008
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0x00f00000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x00000014
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0x000f0000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x00000010
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xf0000000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x0000001c
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0x0f000000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x00000018
+#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK_MASK 0x0000ffffL
+#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK__SHIFT 0x00000000
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x00004000L
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0x0000000e
+#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x0000003fL
+#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x00000000
+#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x00000040L
+#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x00000006
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0x0000000d
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0x0000000c
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x00000005
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x00000006
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x00000004
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x00000007
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x00000001
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x00000002
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x00000003
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x00000000
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x00008000L
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0x0000000f
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0x0000000b
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x00000008
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x00000009
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0x0000000a
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0x0000000e
+#define SPI_PS_INPUT_CNTL_0__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_0__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_10__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_10__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_11__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_11__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_12__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_12__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_13__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_13__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_14__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_14__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_15__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_15__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_16__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_16__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_17__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_17__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_18__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_18__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_19__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_19__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_1__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_1__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_2__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_2__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_3__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_3__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_4__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_4__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_5__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_5__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_6__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_6__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_7__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_7__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_8__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_8__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_CNTL_9__CYL_WRAP_MASK 0x0001e000L
+#define SPI_PS_INPUT_CNTL_9__CYL_WRAP__SHIFT 0x0000000d
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x00000008
+#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x00000012
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0x0000000a
+#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x0000003fL
+#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x00000000
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x00000011
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0x0000000d
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0x0000000c
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x00000005
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x00000006
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x00000004
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x00000007
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x00000001
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x00000002
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x00000003
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x00000000
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x00008000L
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0x0000000f
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0x0000000b
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x00000008
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x00000009
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0x0000000a
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0x0000000e
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000fffL
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x00000000
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0x0000000fL
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x00000000
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0x000000f0L
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x00000004
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0x00000f00L
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x00000008
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0x0000f000L
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0x0000000c
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0x000f0000L
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x00000010
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0x00f00000L
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x00000014
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0x0f000000L
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x00000018
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xf0000000L
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x0000001c
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_HI_VS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_PGM_HI_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_LO_VS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_PGM_LO_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_ES__CACHE_CTL_MASK 0x38000000L
+#define SPI_SHADER_PGM_RSRC1_ES__CACHE_CTL__SHIFT 0x0000001b
+#define SPI_SHADER_PGM_RSRC1_ES__CDBG_USER_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC1_ES__CDBG_USER__SHIFT 0x0000001e
+#define SPI_SHADER_PGM_RSRC1_ES__CU_GROUP_ENABLE_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_ES__CU_GROUP_ENABLE__SHIFT 0x0000001a
+#define SPI_SHADER_PGM_RSRC1_ES__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_ES__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_ES__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_ES__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_ES__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_ES__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_ES__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_ES__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_ES__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_ES__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_ES__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_ES__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_ES__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_ES__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_ES__VGPR_COMP_CNT_MASK 0x03000000L
+#define SPI_SHADER_PGM_RSRC1_ES__VGPR_COMP_CNT__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_ES__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_ES__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_GS__CACHE_CTL_MASK 0x0e000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CACHE_CTL__SHIFT 0x00000019
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER__SHIFT 0x0000001c
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_HS__CACHE_CTL_MASK 0x07000000L
+#define SPI_SHADER_PGM_RSRC1_HS__CACHE_CTL__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER__SHIFT 0x0000001b
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_LS__CACHE_CTL_MASK 0x1c000000L
+#define SPI_SHADER_PGM_RSRC1_LS__CACHE_CTL__SHIFT 0x0000001a
+#define SPI_SHADER_PGM_RSRC1_LS__CDBG_USER_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC1_LS__CDBG_USER__SHIFT 0x0000001d
+#define SPI_SHADER_PGM_RSRC1_LS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_LS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_LS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_LS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_LS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_LS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_LS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_LS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_LS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_LS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_LS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_LS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_LS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_LS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_LS__VGPR_COMP_CNT_MASK 0x03000000L
+#define SPI_SHADER_PGM_RSRC1_LS__VGPR_COMP_CNT__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_LS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_LS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_PS__CACHE_CTL_MASK 0x0e000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CACHE_CTL__SHIFT 0x00000019
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER__SHIFT 0x0000001c
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC1_VS__CACHE_CTL_MASK 0x38000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CACHE_CTL__SHIFT 0x0000001b
+#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER__SHIFT 0x0000001e
+#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE__SHIFT 0x0000001a
+#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE__SHIFT 0x00000016
+#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP__SHIFT 0x00000015
+#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE_MASK 0x000ff000L
+#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE__SHIFT 0x00000017
+#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY_MASK 0x00000c00L
+#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC1_VS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_VS__PRIV__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC1_VS__SGPRS_MASK 0x000003c0L
+#define SPI_SHADER_PGM_RSRC1_VS__SGPRS__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT_MASK 0x03000000L
+#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT__SHIFT 0x00000018
+#define SPI_SHADER_PGM_RSRC1_VS__VGPRS_MASK 0x0000003fL
+#define SPI_SHADER_PGM_RSRC1_VS__VGPRS__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_ES__EXCP_EN_MASK 0x00007f00L
+#define SPI_SHADER_PGM_RSRC2_ES__EXCP_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_ES_GS__EXCP_EN_MASK 0x0001ff00L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__EXCP_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_ES_GS__LDS_SIZE_MASK 0x1ff00000L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__LDS_SIZE__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC2_ES_GS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_ES_GS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_ES_GS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_ES_GS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_ES_GS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_ES_GS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_ES__LDS_SIZE_MASK 0x1ff00000L
+#define SPI_SHADER_PGM_RSRC2_ES__LDS_SIZE__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC2_ES__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_ES__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_ES__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_ES__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_ES__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_ES__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_ES__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_ES__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_ES_VS__EXCP_EN_MASK 0x0001ff00L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__EXCP_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_ES_VS__LDS_SIZE_MASK 0x1ff00000L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__LDS_SIZE__SHIFT 0x00000014
+#define SPI_SHADER_PGM_RSRC2_ES_VS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_ES_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_ES_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_ES_VS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_ES_VS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_ES_VS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0x00003f80L
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x0000fe00L
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x00000009
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN_MASK 0x00000100L
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_LS_ES__EXCP_EN_MASK 0x01ff0000L
+#define SPI_SHADER_PGM_RSRC2_LS_ES__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_LS_ES__LDS_SIZE_MASK 0x0000ff80L
+#define SPI_SHADER_PGM_RSRC2_LS_ES__LDS_SIZE__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_LS_ES__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_LS_ES__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_LS_ES__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_LS_ES__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_LS_ES__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_LS_ES__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_LS__EXCP_EN_MASK 0x007f0000L
+#define SPI_SHADER_PGM_RSRC2_LS__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_LS_HS__EXCP_EN_MASK 0x01ff0000L
+#define SPI_SHADER_PGM_RSRC2_LS_HS__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_LS_HS__LDS_SIZE_MASK 0x0000ff80L
+#define SPI_SHADER_PGM_RSRC2_LS_HS__LDS_SIZE__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_LS_HS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_LS_HS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_LS_HS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_LS_HS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_LS_HS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_LS_HS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_LS__LDS_SIZE_MASK 0x0000ff80L
+#define SPI_SHADER_PGM_RSRC2_LS__LDS_SIZE__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_LS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_LS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_LS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_LS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_LS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_LS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_LS_VS__EXCP_EN_MASK 0x01ff0000L
+#define SPI_SHADER_PGM_RSRC2_LS_VS__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_LS_VS__LDS_SIZE_MASK 0x0000ff80L
+#define SPI_SHADER_PGM_RSRC2_LS_VS__LDS_SIZE__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_LS_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_LS_VS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_LS_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_LS_VS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_LS_VS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_LS_VS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x007f0000L
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x00000010
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0x0000ff00L
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN_MASK 0x000fe000L
+#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN__SHIFT 0x0000000d
+#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN__SHIFT 0x00000007
+#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN__SHIFT 0x00000000
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN_MASK 0x00000100L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN__SHIFT 0x00000008
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN_MASK 0x00000200L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN__SHIFT 0x00000009
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN_MASK 0x00000400L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN__SHIFT 0x0000000a
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN_MASK 0x00000800L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN__SHIFT 0x0000000b
+#define SPI_SHADER_PGM_RSRC2_VS__SO_EN_MASK 0x00001000L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_EN__SHIFT 0x0000000c
+#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT__SHIFT 0x00000006
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MASK 0x0000003eL
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR__SHIFT 0x00000001
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0x0000000fL
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x00000000
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0x000000f0L
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x00000004
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0x00000f00L
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x00000008
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0x0000f000L
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0x0000000c
+#define SPI_SHADER_TBA_HI_ES__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_GS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_HS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_LS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_PS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_HI_VS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TBA_HI_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_ES__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_GS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_HS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_LS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_PS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TBA_LO_VS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TBA_LO_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_ES__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_GS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_HS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_LS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_PS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_HI_VS__MEM_BASE_MASK 0x000000ffL
+#define SPI_SHADER_TMA_HI_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_ES__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_ES__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_GS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_GS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_HS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_HS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_LS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_LS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_PS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_PS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_TMA_LO_VS__MEM_BASE_MASK 0xffffffffL
+#define SPI_SHADER_TMA_LO_VS__MEM_BASE__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_ES_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_ES_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_GS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_GS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_HS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_HS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_LS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_LS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_0__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_0__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_10__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_10__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_11__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_11__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_12__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_12__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_13__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_13__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_14__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_14__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_15__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_15__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_1__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_1__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_2__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_2__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_3__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_3__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_4__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_4__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_5__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_5__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_6__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_6__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_7__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_7__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_8__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_8__DATA__SHIFT 0x00000000
+#define SPI_SHADER_USER_DATA_VS_9__DATA_MASK 0xffffffffL
+#define SPI_SHADER_USER_DATA_VS_9__DATA__SHIFT 0x00000000
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0x0000000fL
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x00000000
+#define SPI_SLAVE_DEBUG_BUSY__ES_VTX_BUSY_MASK 0x00000004L
+#define SPI_SLAVE_DEBUG_BUSY__ES_VTX_BUSY__SHIFT 0x00000002
+#define SPI_SLAVE_DEBUG_BUSY__EVENT_CNTL_BUSY_MASK 0x00200000L
+#define SPI_SLAVE_DEBUG_BUSY__EVENT_CNTL_BUSY__SHIFT 0x00000015
+#define SPI_SLAVE_DEBUG_BUSY__GS_VTX_BUSY_MASK 0x00000008L
+#define SPI_SLAVE_DEBUG_BUSY__GS_VTX_BUSY__SHIFT 0x00000003
+#define SPI_SLAVE_DEBUG_BUSY__HS_VTX_BUSY_MASK 0x00000002L
+#define SPI_SLAVE_DEBUG_BUSY__HS_VTX_BUSY__SHIFT 0x00000001
+#define SPI_SLAVE_DEBUG_BUSY__LS_VTX_BUSY_MASK 0x00000001L
+#define SPI_SLAVE_DEBUG_BUSY__LS_VTX_BUSY__SHIFT 0x00000000
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC00_BUSY_MASK 0x00000200L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC00_BUSY__SHIFT 0x00000009
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC01_BUSY_MASK 0x00000400L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC01_BUSY__SHIFT 0x0000000a
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC02_BUSY_MASK 0x00000800L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC02_BUSY__SHIFT 0x0000000b
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC03_BUSY_MASK 0x00001000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC03_BUSY__SHIFT 0x0000000c
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC10_BUSY_MASK 0x00002000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC10_BUSY__SHIFT 0x0000000d
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC11_BUSY_MASK 0x00004000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC11_BUSY__SHIFT 0x0000000e
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC12_BUSY_MASK 0x00008000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC12_BUSY__SHIFT 0x0000000f
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC13_BUSY_MASK 0x00010000L
+#define SPI_SLAVE_DEBUG_BUSY__SGPR_WC13_BUSY__SHIFT 0x00000010
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC00_BUSY_MASK 0x00000020L
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC00_BUSY__SHIFT 0x00000005
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC01_BUSY_MASK 0x00000040L
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC01_BUSY__SHIFT 0x00000006
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC10_BUSY_MASK 0x00000080L
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC10_BUSY__SHIFT 0x00000007
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC11_BUSY_MASK 0x00000100L
+#define SPI_SLAVE_DEBUG_BUSY__VGPR_WC11_BUSY__SHIFT 0x00000008
+#define SPI_SLAVE_DEBUG_BUSY__VS_VTX_BUSY_MASK 0x00000010L
+#define SPI_SLAVE_DEBUG_BUSY__VS_VTX_BUSY__SHIFT 0x00000004
+#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER0_BUSY_MASK 0x00020000L
+#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER0_BUSY__SHIFT 0x00000011
+#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER1_BUSY_MASK 0x00040000L
+#define SPI_SLAVE_DEBUG_BUSY__WAVEBUFFER1_BUSY__SHIFT 0x00000012
+#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC0_BUSY_MASK 0x00080000L
+#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC0_BUSY__SHIFT 0x00000013
+#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC1_BUSY_MASK 0x00100000L
+#define SPI_SLAVE_DEBUG_BUSY__WAVE_WC1_BUSY__SHIFT 0x00000014
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0x0000ffffL
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x00000000
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xffff0000L
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x00000010
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0x0000ffffL
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x00000000
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xffff0000L
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x00000010
+#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x01fff000L
+#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0x0000000c
+#define SPI_TMPRING_SIZE__WAVES_MASK 0x00000fffL
+#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x00000000
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x0000003eL
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x00000001
+#define SPI_VS_OUT_CONFIG__VS_HALF_PACK_MASK 0x00000040L
+#define SPI_VS_OUT_CONFIG__VS_HALF_PACK__SHIFT 0x00000006
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000ffffL
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x00000000
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xffff0000L
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x00000010
+#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS_MASK 0xffffffffL
+#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x00000000
+#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x0000ffffL
+#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x00000000
+#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE_MASK 0x40000000L
+#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE__SHIFT 0x0000001e
+#define SQ_BUF_RSRC_WORD1__STRIDE_MASK 0x3fff0000L
+#define SQ_BUF_RSRC_WORD1__STRIDE__SHIFT 0x00000010
+#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE_MASK 0x80000000L
+#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE__SHIFT 0x0000001f
+#define SQ_BUF_RSRC_WORD2__NUM_RECORDS_MASK 0xffffffffL
+#define SQ_BUF_RSRC_WORD2__NUM_RECORDS__SHIFT 0x00000000
+#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE_MASK 0x00800000L
+#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE__SHIFT 0x00000017
+#define SQ_BUF_RSRC_WORD3__ATC_MASK 0x01000000L
+#define SQ_BUF_RSRC_WORD3__ATC__SHIFT 0x00000018
+#define SQ_BUF_RSRC_WORD3__DATA_FORMAT_MASK 0x00078000L
+#define SQ_BUF_RSRC_WORD3__DATA_FORMAT__SHIFT 0x0000000f
+#define SQ_BUF_RSRC_WORD3__DST_SEL_W_MASK 0x00000e00L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_W__SHIFT 0x00000009
+#define SQ_BUF_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_X__SHIFT 0x00000000
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Y__SHIFT 0x00000003
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Z_MASK 0x000001c0L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Z__SHIFT 0x00000006
+#define SQ_BUF_RSRC_WORD3__ELEMENT_SIZE_MASK 0x00180000L
+#define SQ_BUF_RSRC_WORD3__ELEMENT_SIZE__SHIFT 0x00000013
+#define SQ_BUF_RSRC_WORD3__HASH_ENABLE_MASK 0x02000000L
+#define SQ_BUF_RSRC_WORD3__HASH_ENABLE__SHIFT 0x00000019
+#define SQ_BUF_RSRC_WORD3__HEAP_MASK 0x04000000L
+#define SQ_BUF_RSRC_WORD3__HEAP__SHIFT 0x0000001a
+#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE_MASK 0x00600000L
+#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE__SHIFT 0x00000015
+#define SQ_BUF_RSRC_WORD3__MTYPE_MASK 0x38000000L
+#define SQ_BUF_RSRC_WORD3__MTYPE__SHIFT 0x0000001b
+#define SQ_BUF_RSRC_WORD3__NUM_FORMAT_MASK 0x00007000L
+#define SQ_BUF_RSRC_WORD3__NUM_FORMAT__SHIFT 0x0000000c
+#define SQ_BUF_RSRC_WORD3__TYPE_MASK 0xc0000000L
+#define SQ_BUF_RSRC_WORD3__TYPE__SHIFT 0x0000001e
+#define SQC_CACHES__DATA_INVALIDATE_MASK 0x00000002L
+#define SQC_CACHES__DATA_INVALIDATE__SHIFT 0x00000001
+#define SQC_CACHES__INST_INVALIDATE_MASK 0x00000001L
+#define SQC_CACHES__INST_INVALIDATE__SHIFT 0x00000000
+#define SQC_CACHES__INVALIDATE_VOLATILE_MASK 0x00000004L
+#define SQC_CACHES__INVALIDATE_VOLATILE__SHIFT 0x00000002
+#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0x0000000cL
+#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x00000002
+#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x00000080L
+#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x00000007
+#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x00000100L
+#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x00000008
+#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x00000040L
+#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x00000006
+#define SQC_CONFIG__IDENTITY_HASH_BANK_MASK 0x00000200L
+#define SQC_CONFIG__IDENTITY_HASH_BANK__SHIFT 0x00000009
+#define SQC_CONFIG__IDENTITY_HASH_SET_MASK 0x00000400L
+#define SQC_CONFIG__IDENTITY_HASH_SET__SHIFT 0x0000000a
+#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x00000003L
+#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x00000000
+#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x00000030L
+#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x00000004
+#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x00000800L
+#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0x0000000b
+#define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L
+#define SQ_CONFIG__DEBUG_EN__SHIFT 0x00000008
+#define SQ_CONFIG__DISABLE_IB_DEP_CHECK_MASK 0x00000400L
+#define SQ_CONFIG__DISABLE_IB_DEP_CHECK__SHIFT 0x0000000a
+#define SQ_CONFIG__DISABLE_SCA_BYPASS_MASK 0x00000200L
+#define SQ_CONFIG__DISABLE_SCA_BYPASS__SHIFT 0x00000009
+#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE_MASK 0x00008000L
+#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE__SHIFT 0x0000000f
+#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE_MASK 0x00002000L
+#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE__SHIFT 0x0000000d
+#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE_MASK 0x00004000L
+#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE__SHIFT 0x0000000e
+#define SQ_CONFIG__EARLY_TA_DONE_DISABLE_MASK 0x00001000L
+#define SQ_CONFIG__EARLY_TA_DONE_DISABLE__SHIFT 0x0000000c
+#define SQ_CONFIG__ENABLE_SOFT_CLAUSE_MASK 0x00000800L
+#define SQ_CONFIG__ENABLE_SOFT_CLAUSE__SHIFT 0x0000000b
+#define SQ_CONFIG__UNUSED_MASK 0x000000ffL
+#define SQ_CONFIG__UNUSED__SHIFT 0x00000000
+#define SQC_SECDED_CNT__DATA_DED_MASK 0xff000000L
+#define SQC_SECDED_CNT__DATA_DED__SHIFT 0x00000018
+#define SQC_SECDED_CNT__DATA_SEC_MASK 0x00ff0000L
+#define SQC_SECDED_CNT__DATA_SEC__SHIFT 0x00000010
+#define SQC_SECDED_CNT__INST_DED_MASK 0x0000ff00L
+#define SQC_SECDED_CNT__INST_DED__SHIFT 0x00000008
+#define SQC_SECDED_CNT__INST_SEC_MASK 0x000000ffL
+#define SQC_SECDED_CNT__INST_SEC__SHIFT 0x00000000
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED_MASK 0x000000ffL
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED__SHIFT 0x00000000
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0_MASK 0x000000ffL
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0__SHIFT 0x00000000
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX1_MASK 0x0000ff00L
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX1__SHIFT 0x00000008
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_HOST_MASK 0xff000000L
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_HOST__SHIFT 0x00000018
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_IMMED_MASK 0x00ff0000L
+#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_IMMED__SHIFT 0x00000010
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_CMD_MASK 0x0000000fL
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_CMD__SHIFT 0x00000000
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_REG_MASK 0x000000f0L
+#define SQ_DEBUG_STS_GLOBAL3__FIFO_LEVEL_HOST_REG__SHIFT 0x00000004
+#define SQ_DEBUG_STS_GLOBAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_GLOBAL__BUSY__SHIFT 0x00000000
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_MSG_BUSY_MASK 0x00000002L
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_MSG_BUSY__SHIFT 0x00000001
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH0_MASK 0x0000fff0L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH0__SHIFT 0x00000004
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH1_MASK 0x0fff0000L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SH1__SHIFT 0x00000010
+#define SQ_DEBUG_STS_LOCAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_LOCAL__BUSY__SHIFT 0x00000000
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL_MASK 0x000003f0L
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL__SHIFT 0x00000004
+#define SQ_DED_CNT__LDS_DED_MASK 0x0000003fL
+#define SQ_DED_CNT__LDS_DED__SHIFT 0x00000000
+#define SQ_DED_CNT__SGPR_DED_MASK 0x00001f00L
+#define SQ_DED_CNT__SGPR_DED__SHIFT 0x00000008
+#define SQ_DED_CNT__VGPR_DED_MASK 0x01ff0000L
+#define SQ_DED_CNT__VGPR_DED__SHIFT 0x00000010
+#define SQ_DED_INFO__SIMD_ID_MASK 0x00000030L
+#define SQ_DED_INFO__SIMD_ID__SHIFT 0x00000004
+#define SQ_DED_INFO__SOURCE_MASK 0x000001c0L
+#define SQ_DED_INFO__SOURCE__SHIFT 0x00000006
+#define SQ_DED_INFO__VM_ID_MASK 0x00001e00L
+#define SQ_DED_INFO__VM_ID__SHIFT 0x00000009
+#define SQ_DED_INFO__WAVE_ID_MASK 0x0000000fL
+#define SQ_DED_INFO__WAVE_ID__SHIFT 0x00000000
+#define SQ_DS_0__ENCODING_MASK 0xfc000000L
+#define SQ_DS_0__ENCODING__SHIFT 0x0000001a
+#define SQ_DS_0__GDS_MASK 0x00020000L
+#define SQ_DS_0__GDS__SHIFT 0x00000011
+#define SQ_DS_0__OFFSET0_MASK 0x000000ffL
+#define SQ_DS_0__OFFSET0__SHIFT 0x00000000
+#define SQ_DS_0__OFFSET1_MASK 0x0000ff00L
+#define SQ_DS_0__OFFSET1__SHIFT 0x00000008
+#define SQ_DS_0__OP_MASK 0x03fc0000L
+#define SQ_DS_0__OP__SHIFT 0x00000012
+#define SQ_DS_1__ADDR_MASK 0x000000ffL
+#define SQ_DS_1__ADDR__SHIFT 0x00000000
+#define SQ_DS_1__DATA0_MASK 0x0000ff00L
+#define SQ_DS_1__DATA0__SHIFT 0x00000008
+#define SQ_DS_1__DATA1_MASK 0x00ff0000L
+#define SQ_DS_1__DATA1__SHIFT 0x00000010
+#define SQ_DS_1__VDST_MASK 0xff000000L
+#define SQ_DS_1__VDST__SHIFT 0x00000018
+#define SQ_EXP_0__COMPR_MASK 0x00000400L
+#define SQ_EXP_0__COMPR__SHIFT 0x0000000a
+#define SQ_EXP_0__DONE_MASK 0x00000800L
+#define SQ_EXP_0__DONE__SHIFT 0x0000000b
+#define SQ_EXP_0__ENCODING_MASK 0xfc000000L
+#define SQ_EXP_0__ENCODING__SHIFT 0x0000001a
+#define SQ_EXP_0__EN_MASK 0x0000000fL
+#define SQ_EXP_0__EN__SHIFT 0x00000000
+#define SQ_EXP_0__TGT_MASK 0x000003f0L
+#define SQ_EXP_0__TGT__SHIFT 0x00000004
+#define SQ_EXP_0__VM_MASK 0x00001000L
+#define SQ_EXP_0__VM__SHIFT 0x0000000c
+#define SQ_EXP_1__VSRC0_MASK 0x000000ffL
+#define SQ_EXP_1__VSRC0__SHIFT 0x00000000
+#define SQ_EXP_1__VSRC1_MASK 0x0000ff00L
+#define SQ_EXP_1__VSRC1__SHIFT 0x00000008
+#define SQ_EXP_1__VSRC2_MASK 0x00ff0000L
+#define SQ_EXP_1__VSRC2__SHIFT 0x00000010
+#define SQ_EXP_1__VSRC3_MASK 0xff000000L
+#define SQ_EXP_1__VSRC3__SHIFT 0x00000018
+#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE_MASK 0x00030000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE__SHIFT 0x00000010
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0x0000000fL
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x00000000
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0x00000f00L
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x00000008
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0x000c0000L
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x00000012
+#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS_MASK 0xffffffffL
+#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x000000ffL
+#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD1__DATA_FORMAT_MASK 0x03f00000L
+#define SQ_IMG_RSRC_WORD1__DATA_FORMAT__SHIFT 0x00000014
+#define SQ_IMG_RSRC_WORD1__MIN_LOD_MASK 0x000fff00L
+#define SQ_IMG_RSRC_WORD1__MIN_LOD__SHIFT 0x00000008
+#define SQ_IMG_RSRC_WORD1__MTYPE_MASK 0xc0000000L
+#define SQ_IMG_RSRC_WORD1__MTYPE__SHIFT 0x0000001e
+#define SQ_IMG_RSRC_WORD1__NUM_FORMAT_MASK 0x3c000000L
+#define SQ_IMG_RSRC_WORD1__NUM_FORMAT__SHIFT 0x0000001a
+#define SQ_IMG_RSRC_WORD2__HEIGHT_MASK 0x0fffc000L
+#define SQ_IMG_RSRC_WORD2__HEIGHT__SHIFT 0x0000000e
+#define SQ_IMG_RSRC_WORD2__INTERLACED_MASK 0x80000000L
+#define SQ_IMG_RSRC_WORD2__INTERLACED__SHIFT 0x0000001f
+#define SQ_IMG_RSRC_WORD2__PERF_MOD_MASK 0x70000000L
+#define SQ_IMG_RSRC_WORD2__PERF_MOD__SHIFT 0x0000001c
+#define SQ_IMG_RSRC_WORD2__WIDTH_MASK 0x00003fffL
+#define SQ_IMG_RSRC_WORD2__WIDTH__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD3__ATC_MASK 0x08000000L
+#define SQ_IMG_RSRC_WORD3__ATC__SHIFT 0x0000001b
+#define SQ_IMG_RSRC_WORD3__BASE_LEVEL_MASK 0x0000f000L
+#define SQ_IMG_RSRC_WORD3__BASE_LEVEL__SHIFT 0x0000000c
+#define SQ_IMG_RSRC_WORD3__DST_SEL_W_MASK 0x00000e00L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_W__SHIFT 0x00000009
+#define SQ_IMG_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_X__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Y__SHIFT 0x00000003
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Z_MASK 0x000001c0L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Z__SHIFT 0x00000006
+#define SQ_IMG_RSRC_WORD3__LAST_LEVEL_MASK 0x000f0000L
+#define SQ_IMG_RSRC_WORD3__LAST_LEVEL__SHIFT 0x00000010
+#define SQ_IMG_RSRC_WORD3__MTYPE_MASK 0x04000000L
+#define SQ_IMG_RSRC_WORD3__MTYPE__SHIFT 0x0000001a
+#define SQ_IMG_RSRC_WORD3__POW2_PAD_MASK 0x02000000L
+#define SQ_IMG_RSRC_WORD3__POW2_PAD__SHIFT 0x00000019
+#define SQ_IMG_RSRC_WORD3__TILING_INDEX_MASK 0x01f00000L
+#define SQ_IMG_RSRC_WORD3__TILING_INDEX__SHIFT 0x00000014
+#define SQ_IMG_RSRC_WORD3__TYPE_MASK 0xf0000000L
+#define SQ_IMG_RSRC_WORD3__TYPE__SHIFT 0x0000001c
+#define SQ_IMG_RSRC_WORD4__DEPTH_MASK 0x00001fffL
+#define SQ_IMG_RSRC_WORD4__DEPTH__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD4__PITCH_MASK 0x07ffe000L
+#define SQ_IMG_RSRC_WORD4__PITCH__SHIFT 0x0000000d
+#define SQ_IMG_RSRC_WORD5__BASE_ARRAY_MASK 0x00001fffL
+#define SQ_IMG_RSRC_WORD5__BASE_ARRAY__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD5__LAST_ARRAY_MASK 0x03ffe000L
+#define SQ_IMG_RSRC_WORD5__LAST_ARRAY__SHIFT 0x0000000d
+#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID_MASK 0x000ff000L
+#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID__SHIFT 0x0000000c
+#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN_MASK 0x00100000L
+#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN__SHIFT 0x00000014
+#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN_MASK 0x00000fffL
+#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN__SHIFT 0x00000000
+#define SQ_IMG_RSRC_WORD6__UNUNSED_MASK 0xffe00000L
+#define SQ_IMG_RSRC_WORD6__UNUNSED__SHIFT 0x00000015
+#define SQ_IMG_RSRC_WORD7__UNUNSED_MASK 0xffffffffL
+#define SQ_IMG_RSRC_WORD7__UNUNSED__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD0__ANISO_BIAS_MASK 0x07e00000L
+#define SQ_IMG_SAMP_WORD0__ANISO_BIAS__SHIFT 0x00000015
+#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD_MASK 0x00070000L
+#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD__SHIFT 0x00000010
+#define SQ_IMG_SAMP_WORD0__CLAMP_X_MASK 0x00000007L
+#define SQ_IMG_SAMP_WORD0__CLAMP_X__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD0__CLAMP_Y_MASK 0x00000038L
+#define SQ_IMG_SAMP_WORD0__CLAMP_Y__SHIFT 0x00000003
+#define SQ_IMG_SAMP_WORD0__CLAMP_Z_MASK 0x000001c0L
+#define SQ_IMG_SAMP_WORD0__CLAMP_Z__SHIFT 0x00000006
+#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC_MASK 0x00007000L
+#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC__SHIFT 0x0000000c
+#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP_MASK 0x10000000L
+#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP__SHIFT 0x0000001c
+#define SQ_IMG_SAMP_WORD0__FILTER_MODE_MASK 0x60000000L
+#define SQ_IMG_SAMP_WORD0__FILTER_MODE__SHIFT 0x0000001d
+#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA_MASK 0x00100000L
+#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA__SHIFT 0x00000014
+#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED_MASK 0x00008000L
+#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED__SHIFT 0x0000000f
+#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO_MASK 0x00000e00L
+#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO__SHIFT 0x00000009
+#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC_MASK 0x00080000L
+#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC__SHIFT 0x00000013
+#define SQ_IMG_SAMP_WORD0__TRUNC_COORD_MASK 0x08000000L
+#define SQ_IMG_SAMP_WORD0__TRUNC_COORD__SHIFT 0x0000001b
+#define SQ_IMG_SAMP_WORD1__MAX_LOD_MASK 0x00fff000L
+#define SQ_IMG_SAMP_WORD1__MAX_LOD__SHIFT 0x0000000c
+#define SQ_IMG_SAMP_WORD1__MIN_LOD_MASK 0x00000fffL
+#define SQ_IMG_SAMP_WORD1__MIN_LOD__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD1__PERF_MIP_MASK 0x0f000000L
+#define SQ_IMG_SAMP_WORD1__PERF_MIP__SHIFT 0x00000018
+#define SQ_IMG_SAMP_WORD1__PERF_Z_MASK 0xf0000000L
+#define SQ_IMG_SAMP_WORD1__PERF_Z__SHIFT 0x0000001c
+#define SQ_IMG_SAMP_WORD2__DISABLE_LSB_CEIL_MASK 0x20000000L
+#define SQ_IMG_SAMP_WORD2__DISABLE_LSB_CEIL__SHIFT 0x0000001d
+#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX_MASK 0x40000000L
+#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX__SHIFT 0x0000001e
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_MASK 0x00003fffL
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC_MASK 0x000fc000L
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC__SHIFT 0x0000000e
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD2__MIP_FILTER_MASK 0x0c000000L
+#define SQ_IMG_SAMP_WORD2__MIP_FILTER__SHIFT 0x0000001a
+#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP_MASK 0x10000000L
+#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP__SHIFT 0x0000001c
+#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER_MASK 0x00300000L
+#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER__SHIFT 0x00000014
+#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER_MASK 0x00c00000L
+#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER__SHIFT 0x00000016
+#define SQ_IMG_SAMP_WORD2__Z_FILTER_MASK 0x03000000L
+#define SQ_IMG_SAMP_WORD2__Z_FILTER__SHIFT 0x00000018
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR_MASK 0x00000fffL
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR__SHIFT 0x00000000
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE_MASK 0xc0000000L
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE__SHIFT 0x0000001e
+#define SQ_IND_DATA__DATA_MASK 0xffffffffL
+#define SQ_IND_DATA__DATA__SHIFT 0x00000000
+#define SQ_IND_INDEX__AUTO_INCR_MASK 0x00001000L
+#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0x0000000c
+#define SQ_IND_INDEX__FORCE_READ_MASK 0x00002000L
+#define SQ_IND_INDEX__FORCE_READ__SHIFT 0x0000000d
+#define SQ_IND_INDEX__INDEX_MASK 0xffff0000L
+#define SQ_IND_INDEX__INDEX__SHIFT 0x00000010
+#define SQ_IND_INDEX__READ_TIMEOUT_MASK 0x00004000L
+#define SQ_IND_INDEX__READ_TIMEOUT__SHIFT 0x0000000e
+#define SQ_IND_INDEX__SIMD_ID_MASK 0x00000030L
+#define SQ_IND_INDEX__SIMD_ID__SHIFT 0x00000004
+#define SQ_IND_INDEX__THREAD_ID_MASK 0x00000fc0L
+#define SQ_IND_INDEX__THREAD_ID__SHIFT 0x00000006
+#define SQ_IND_INDEX__UNINDEXED_MASK 0x00008000L
+#define SQ_IND_INDEX__UNINDEXED__SHIFT 0x0000000f
+#define SQ_IND_INDEX__WAVE_ID_MASK 0x0000000fL
+#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x00000000
+#define SQ_INST__ENCODING_MASK 0xffffffffL
+#define SQ_INST__ENCODING__SHIFT 0x00000000
+#define SQ_INTERRUPT_WORD_AUTO__CMD_TIMESTAMP_MASK 0x00000010L
+#define SQ_INTERRUPT_WORD_AUTO__CMD_TIMESTAMP__SHIFT 0x00000004
+#define SQ_INTERRUPT_WORD_AUTO__ENCODING_MASK 0x0c000000L
+#define SQ_INTERRUPT_WORD_AUTO__ENCODING__SHIFT 0x0000001a
+#define SQ_INTERRUPT_WORD_AUTO__HOST_CMD_OVERFLOW_MASK 0x00000020L
+#define SQ_INTERRUPT_WORD_AUTO__HOST_CMD_OVERFLOW__SHIFT 0x00000005
+#define SQ_INTERRUPT_WORD_AUTO__HOST_REG_OVERFLOW_MASK 0x00000040L
+#define SQ_INTERRUPT_WORD_AUTO__HOST_REG_OVERFLOW__SHIFT 0x00000006
+#define SQ_INTERRUPT_WORD_AUTO__IMMED_OVERFLOW_MASK 0x00000080L
+#define SQ_INTERRUPT_WORD_AUTO__IMMED_OVERFLOW__SHIFT 0x00000007
+#define SQ_INTERRUPT_WORD_AUTO__REG_TIMESTAMP_MASK 0x00000008L
+#define SQ_INTERRUPT_WORD_AUTO__REG_TIMESTAMP__SHIFT 0x00000003
+#define SQ_INTERRUPT_WORD_AUTO__SE_ID_MASK 0x02000000L
+#define SQ_INTERRUPT_WORD_AUTO__SE_ID__SHIFT 0x00000019
+#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE_BUF_FULL_MASK 0x00000004L
+#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE_BUF_FULL__SHIFT 0x00000002
+#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE_MASK 0x00000001L
+#define SQ_INTERRUPT_WORD_AUTO__THREAD_TRACE__SHIFT 0x00000000
+#define SQ_INTERRUPT_WORD_AUTO__WLT_MASK 0x00000002L
+#define SQ_INTERRUPT_WORD_AUTO__WLT__SHIFT 0x00000001
+#define SQ_INTERRUPT_WORD_CMN__ENCODING_MASK 0x0c000000L
+#define SQ_INTERRUPT_WORD_CMN__ENCODING__SHIFT 0x0000001a
+#define SQ_INTERRUPT_WORD_CMN__SE_ID_MASK 0x02000000L
+#define SQ_INTERRUPT_WORD_CMN__SE_ID__SHIFT 0x00000019
+#define SQ_INTERRUPT_WORD_WAVE__CU_ID_MASK 0x00f00000L
+#define SQ_INTERRUPT_WORD_WAVE__CU_ID__SHIFT 0x00000014
+#define SQ_INTERRUPT_WORD_WAVE__DATA_MASK 0x000000ffL
+#define SQ_INTERRUPT_WORD_WAVE__DATA__SHIFT 0x00000000
+#define SQ_INTERRUPT_WORD_WAVE__ENCODING_MASK 0x0c000000L
+#define SQ_INTERRUPT_WORD_WAVE__ENCODING__SHIFT 0x0000001a
+#define SQ_INTERRUPT_WORD_WAVE__PRIV_MASK 0x00000200L
+#define SQ_INTERRUPT_WORD_WAVE__PRIV__SHIFT 0x00000009
+#define SQ_INTERRUPT_WORD_WAVE__SE_ID_MASK 0x02000000L
+#define SQ_INTERRUPT_WORD_WAVE__SE_ID__SHIFT 0x00000019
+#define SQ_INTERRUPT_WORD_WAVE__SH_ID_MASK 0x01000000L
+#define SQ_INTERRUPT_WORD_WAVE__SH_ID__SHIFT 0x00000018
+#define SQ_INTERRUPT_WORD_WAVE__SIMD_ID_MASK 0x000c0000L
+#define SQ_INTERRUPT_WORD_WAVE__SIMD_ID__SHIFT 0x00000012
+#define SQ_INTERRUPT_WORD_WAVE__VM_ID_MASK 0x00003c00L
+#define SQ_INTERRUPT_WORD_WAVE__VM_ID__SHIFT 0x0000000a
+#define SQ_INTERRUPT_WORD_WAVE__WAVE_ID_MASK 0x0003c000L
+#define SQ_INTERRUPT_WORD_WAVE__WAVE_ID__SHIFT 0x0000000e
+#define SQ_LB_CTR_CTRL__CLEAR_MASK 0x00000004L
+#define SQ_LB_CTR_CTRL__CLEAR__SHIFT 0x00000002
+#define SQ_LB_CTR_CTRL__LOAD_MASK 0x00000002L
+#define SQ_LB_CTR_CTRL__LOAD__SHIFT 0x00000001
+#define SQ_LB_CTR_CTRL__START_MASK 0x00000001L
+#define SQ_LB_CTR_CTRL__START__SHIFT 0x00000000
+#define SQ_LB_DATA_ALU_CYCLES__DATA_MASK 0xffffffffL
+#define SQ_LB_DATA_ALU_CYCLES__DATA__SHIFT 0x00000000
+#define SQ_LB_DATA_ALU_STALLS__DATA_MASK 0xffffffffL
+#define SQ_LB_DATA_ALU_STALLS__DATA__SHIFT 0x00000000
+#define SQ_LB_DATA_TEX_CYCLES__DATA_MASK 0xffffffffL
+#define SQ_LB_DATA_TEX_CYCLES__DATA__SHIFT 0x00000000
+#define SQ_LB_DATA_TEX_STALLS__DATA_MASK 0xffffffffL
+#define SQ_LB_DATA_TEX_STALLS__DATA__SHIFT 0x00000000
+#define SQ_MIMG_0__DA_MASK 0x00004000L
+#define SQ_MIMG_0__DA__SHIFT 0x0000000e
+#define SQ_MIMG_0__DMASK_MASK 0x00000f00L
+#define SQ_MIMG_0__DMASK__SHIFT 0x00000008
+#define SQ_MIMG_0__ENCODING_MASK 0xfc000000L
+#define SQ_MIMG_0__ENCODING__SHIFT 0x0000001a
+#define SQ_MIMG_0__GLC_MASK 0x00002000L
+#define SQ_MIMG_0__GLC__SHIFT 0x0000000d
+#define SQ_MIMG_0__LWE_MASK 0x00020000L
+#define SQ_MIMG_0__LWE__SHIFT 0x00000011
+#define SQ_MIMG_0__OP_MASK 0x01fc0000L
+#define SQ_MIMG_0__OP__SHIFT 0x00000012
+#define SQ_MIMG_0__R128_MASK 0x00008000L
+#define SQ_MIMG_0__R128__SHIFT 0x0000000f
+#define SQ_MIMG_0__SLC_MASK 0x02000000L
+#define SQ_MIMG_0__SLC__SHIFT 0x00000019
+#define SQ_MIMG_0__TFE_MASK 0x00010000L
+#define SQ_MIMG_0__TFE__SHIFT 0x00000010
+#define SQ_MIMG_0__UNORM_MASK 0x00001000L
+#define SQ_MIMG_0__UNORM__SHIFT 0x0000000c
+#define SQ_MIMG_1__SRSRC_MASK 0x001f0000L
+#define SQ_MIMG_1__SRSRC__SHIFT 0x00000010
+#define SQ_MIMG_1__SSAMP_MASK 0x03e00000L
+#define SQ_MIMG_1__SSAMP__SHIFT 0x00000015
+#define SQ_MIMG_1__VADDR_MASK 0x000000ffL
+#define SQ_MIMG_1__VADDR__SHIFT 0x00000000
+#define SQ_MIMG_1__VDATA_MASK 0x0000ff00L
+#define SQ_MIMG_1__VDATA__SHIFT 0x00000008
+#define SQ_MTBUF_0__ADDR64_MASK 0x00008000L
+#define SQ_MTBUF_0__ADDR64__SHIFT 0x0000000f
+#define SQ_MTBUF_0__DFMT_MASK 0x00780000L
+#define SQ_MTBUF_0__DFMT__SHIFT 0x00000013
+#define SQ_MTBUF_0__ENCODING_MASK 0xfc000000L
+#define SQ_MTBUF_0__ENCODING__SHIFT 0x0000001a
+#define SQ_MTBUF_0__GLC_MASK 0x00004000L
+#define SQ_MTBUF_0__GLC__SHIFT 0x0000000e
+#define SQ_MTBUF_0__IDXEN_MASK 0x00002000L
+#define SQ_MTBUF_0__IDXEN__SHIFT 0x0000000d
+#define SQ_MTBUF_0__NFMT_MASK 0x03800000L
+#define SQ_MTBUF_0__NFMT__SHIFT 0x00000017
+#define SQ_MTBUF_0__OFFEN_MASK 0x00001000L
+#define SQ_MTBUF_0__OFFEN__SHIFT 0x0000000c
+#define SQ_MTBUF_0__OFFSET_MASK 0x00000fffL
+#define SQ_MTBUF_0__OFFSET__SHIFT 0x00000000
+#define SQ_MTBUF_0__OP_MASK 0x00070000L
+#define SQ_MTBUF_0__OP__SHIFT 0x00000010
+#define SQ_MTBUF_1__SLC_MASK 0x00400000L
+#define SQ_MTBUF_1__SLC__SHIFT 0x00000016
+#define SQ_MTBUF_1__SOFFSET_MASK 0xff000000L
+#define SQ_MTBUF_1__SOFFSET__SHIFT 0x00000018
+#define SQ_MTBUF_1__SRSRC_MASK 0x001f0000L
+#define SQ_MTBUF_1__SRSRC__SHIFT 0x00000010
+#define SQ_MTBUF_1__TFE_MASK 0x00800000L
+#define SQ_MTBUF_1__TFE__SHIFT 0x00000017
+#define SQ_MTBUF_1__VADDR_MASK 0x000000ffL
+#define SQ_MTBUF_1__VADDR__SHIFT 0x00000000
+#define SQ_MTBUF_1__VDATA_MASK 0x0000ff00L
+#define SQ_MTBUF_1__VDATA__SHIFT 0x00000008
+#define SQ_MUBUF_0__ADDR64_MASK 0x00008000L
+#define SQ_MUBUF_0__ADDR64__SHIFT 0x0000000f
+#define SQ_MUBUF_0__ENCODING_MASK 0xfc000000L
+#define SQ_MUBUF_0__ENCODING__SHIFT 0x0000001a
+#define SQ_MUBUF_0__GLC_MASK 0x00004000L
+#define SQ_MUBUF_0__GLC__SHIFT 0x0000000e
+#define SQ_MUBUF_0__IDXEN_MASK 0x00002000L
+#define SQ_MUBUF_0__IDXEN__SHIFT 0x0000000d
+#define SQ_MUBUF_0__LDS_MASK 0x00010000L
+#define SQ_MUBUF_0__LDS__SHIFT 0x00000010
+#define SQ_MUBUF_0__OFFEN_MASK 0x00001000L
+#define SQ_MUBUF_0__OFFEN__SHIFT 0x0000000c
+#define SQ_MUBUF_0__OFFSET_MASK 0x00000fffL
+#define SQ_MUBUF_0__OFFSET__SHIFT 0x00000000
+#define SQ_MUBUF_0__OP_MASK 0x01fc0000L
+#define SQ_MUBUF_0__OP__SHIFT 0x00000012
+#define SQ_MUBUF_1__SLC_MASK 0x00400000L
+#define SQ_MUBUF_1__SLC__SHIFT 0x00000016
+#define SQ_MUBUF_1__SOFFSET_MASK 0xff000000L
+#define SQ_MUBUF_1__SOFFSET__SHIFT 0x00000018
+#define SQ_MUBUF_1__SRSRC_MASK 0x001f0000L
+#define SQ_MUBUF_1__SRSRC__SHIFT 0x00000010
+#define SQ_MUBUF_1__TFE_MASK 0x00800000L
+#define SQ_MUBUF_1__TFE__SHIFT 0x00000017
+#define SQ_MUBUF_1__VADDR_MASK 0x000000ffL
+#define SQ_MUBUF_1__VADDR__SHIFT 0x00000000
+#define SQ_MUBUF_1__VDATA_MASK 0x0000ff00L
+#define SQ_MUBUF_1__VDATA__SHIFT 0x00000008
+#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xf0000000L
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x000001ffL
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK_MASK 0x0f000000L
+#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK__SHIFT 0x00000018
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0x00f00000L
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x00000014
+#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK_MASK 0x0000f000L
+#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK__SHIFT 0x0000000c
+#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK_MASK 0x000f0000L
+#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK__SHIFT 0x00000010
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x00000000
+#define SQ_PERFCOUNTER_CTRL__CNTR_RATE_MASK 0x00001f00L
+#define SQ_PERFCOUNTER_CTRL__CNTR_RATE__SHIFT 0x00000008
+#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x00000006
+#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH_MASK 0x00002000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH__SHIFT 0x0000000d
+#define SQ_PERFCOUNTER_CTRL__ES_EN_MASK 0x00000008L
+#define SQ_PERFCOUNTER_CTRL__ES_EN__SHIFT 0x00000003
+#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x00000002
+#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x00000004
+#define SQ_PERFCOUNTER_CTRL__LS_EN_MASK 0x00000020L
+#define SQ_PERFCOUNTER_CTRL__LS_EN__SHIFT 0x00000005
+#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x00000000
+#define SQ_PERFCOUNTER_CTRL__VS_EN_MASK 0x00000002L
+#define SQ_PERFCOUNTER_CTRL__VS_EN__SHIFT 0x00000001
+#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
+#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x0000001b
+#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK 0x00003fffL
+#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT 0x00000000
+#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03ff0000L
+#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x00000010
+#define SQ_POWER_THROTTLE2__USE_REF_CLOCK_MASK 0x80000000L
+#define SQ_POWER_THROTTLE2__USE_REF_CLOCK__SHIFT 0x0000001f
+#define SQ_POWER_THROTTLE__MAX_POWER_MASK 0x3fff0000L
+#define SQ_POWER_THROTTLE__MAX_POWER__SHIFT 0x00000010
+#define SQ_POWER_THROTTLE__MIN_POWER_MASK 0x00003fffL
+#define SQ_POWER_THROTTLE__MIN_POWER__SHIFT 0x00000000
+#define SQ_POWER_THROTTLE__PHASE_OFFSET_MASK 0xc0000000L
+#define SQ_POWER_THROTTLE__PHASE_OFFSET__SHIFT 0x0000001e
+#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x0000007fL
+#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x00000000
+#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x001ffc00L
+#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0x0000000a
+#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x00000380L
+#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x00000007
+#define SQ_REG_CREDITS__CMD_CREDITS_MASK 0x00000f00L
+#define SQ_REG_CREDITS__CMD_CREDITS__SHIFT 0x00000008
+#define SQ_REG_CREDITS__CMD_OVERFLOW_MASK 0x80000000L
+#define SQ_REG_CREDITS__CMD_OVERFLOW__SHIFT 0x0000001f
+#define SQ_REG_CREDITS__IMMED_OVERFLOW_MASK 0x40000000L
+#define SQ_REG_CREDITS__IMMED_OVERFLOW__SHIFT 0x0000001e
+#define SQ_REG_CREDITS__REG_BUSY_MASK 0x10000000L
+#define SQ_REG_CREDITS__REG_BUSY__SHIFT 0x0000001c
+#define SQ_REG_CREDITS__SRBM_CREDITS_MASK 0x0000003fL
+#define SQ_REG_CREDITS__SRBM_CREDITS__SHIFT 0x00000000
+#define SQ_REG_CREDITS__SRBM_OVERFLOW_MASK 0x20000000L
+#define SQ_REG_CREDITS__SRBM_OVERFLOW__SHIFT 0x0000001d
+#define SQ_SEC_CNT__LDS_SEC_MASK 0x0000003fL
+#define SQ_SEC_CNT__LDS_SEC__SHIFT 0x00000000
+#define SQ_SEC_CNT__SGPR_SEC_MASK 0x00001f00L
+#define SQ_SEC_CNT__SGPR_SEC__SHIFT 0x00000008
+#define SQ_SEC_CNT__VGPR_SEC_MASK 0x01ff0000L
+#define SQ_SEC_CNT__VGPR_SEC__SHIFT 0x00000010
+#define SQ_SMRD__ENCODING_MASK 0xf8000000L
+#define SQ_SMRD__ENCODING__SHIFT 0x0000001b
+#define SQ_SMRD__IMM_MASK 0x00000100L
+#define SQ_SMRD__IMM__SHIFT 0x00000008
+#define SQ_SMRD__OFFSET_MASK 0x000000ffL
+#define SQ_SMRD__OFFSET__SHIFT 0x00000000
+#define SQ_SMRD__OP_MASK 0x07c00000L
+#define SQ_SMRD__OP__SHIFT 0x00000016
+#define SQ_SMRD__SBASE_MASK 0x00007e00L
+#define SQ_SMRD__SBASE__SHIFT 0x00000009
+#define SQ_SMRD__SDST_MASK 0x003f8000L
+#define SQ_SMRD__SDST__SHIFT 0x0000000f
+#define SQ_SOP1__ENCODING_MASK 0xff800000L
+#define SQ_SOP1__ENCODING__SHIFT 0x00000017
+#define SQ_SOP1__OP_MASK 0x0000ff00L
+#define SQ_SOP1__OP__SHIFT 0x00000008
+#define SQ_SOP1__SDST_MASK 0x007f0000L
+#define SQ_SOP1__SDST__SHIFT 0x00000010
+#define SQ_SOP1__SSRC0_MASK 0x000000ffL
+#define SQ_SOP1__SSRC0__SHIFT 0x00000000
+#define SQ_SOP2__ENCODING_MASK 0xc0000000L
+#define SQ_SOP2__ENCODING__SHIFT 0x0000001e
+#define SQ_SOP2__OP_MASK 0x3f800000L
+#define SQ_SOP2__OP__SHIFT 0x00000017
+#define SQ_SOP2__SDST_MASK 0x007f0000L
+#define SQ_SOP2__SDST__SHIFT 0x00000010
+#define SQ_SOP2__SSRC0_MASK 0x000000ffL
+#define SQ_SOP2__SSRC0__SHIFT 0x00000000
+#define SQ_SOP2__SSRC1_MASK 0x0000ff00L
+#define SQ_SOP2__SSRC1__SHIFT 0x00000008
+#define SQ_SOPC__ENCODING_MASK 0xff800000L
+#define SQ_SOPC__ENCODING__SHIFT 0x00000017
+#define SQ_SOPC__OP_MASK 0x007f0000L
+#define SQ_SOPC__OP__SHIFT 0x00000010
+#define SQ_SOPC__SSRC0_MASK 0x000000ffL
+#define SQ_SOPC__SSRC0__SHIFT 0x00000000
+#define SQ_SOPC__SSRC1_MASK 0x0000ff00L
+#define SQ_SOPC__SSRC1__SHIFT 0x00000008
+#define SQ_SOPK__ENCODING_MASK 0xf0000000L
+#define SQ_SOPK__ENCODING__SHIFT 0x0000001c
+#define SQ_SOPK__OP_MASK 0x0f800000L
+#define SQ_SOPK__OP__SHIFT 0x00000017
+#define SQ_SOPK__SDST_MASK 0x007f0000L
+#define SQ_SOPK__SDST__SHIFT 0x00000010
+#define SQ_SOPK__SIMM16_MASK 0x0000ffffL
+#define SQ_SOPK__SIMM16__SHIFT 0x00000000
+#define SQ_SOPP__ENCODING_MASK 0xff800000L
+#define SQ_SOPP__ENCODING__SHIFT 0x00000017
+#define SQ_SOPP__OP_MASK 0x007f0000L
+#define SQ_SOPP__OP__SHIFT 0x00000010
+#define SQ_SOPP__SIMM16_MASK 0x0000ffffL
+#define SQ_SOPP__SIMM16__SHIFT 0x00000000
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000ffffL
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x00000000
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xffff0000L
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_BASE2__ADDR_HI_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_BASE2__ADDR_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_BASE2__ATC_MASK 0x00000010L
+#define SQ_THREAD_TRACE_BASE2__ATC__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_BASE__ADDR_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_BASE__ADDR__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_CNTR__CNTR_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_CNTR__CNTR__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER_MASK 0x80000000L
+#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER__SHIFT 0x0000001f
+#define SQ_THREAD_TRACE_HIWATER__HIWATER_MASK 0x00000007L
+#define SQ_THREAD_TRACE_HIWATER__HIWATER__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_MASK__CU_SEL_MASK 0x0000001fL
+#define SQ_THREAD_TRACE_MASK__CU_SEL__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_MASK__RANDOM_SEED_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_MASK__RANDOM_SEED__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_MASK__REG_STALL_EN_MASK 0x00000080L
+#define SQ_THREAD_TRACE_MASK__REG_STALL_EN__SHIFT 0x00000007
+#define SQ_THREAD_TRACE_MASK__SH_SEL_MASK 0x00000020L
+#define SQ_THREAD_TRACE_MASK__SH_SEL__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN_MASK 0x00004000L
+#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN_MASK 0x00008000L
+#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN__SHIFT 0x0000000f
+#define SQ_THREAD_TRACE_MASK__VM_ID_MASK_MASK 0x00003000L
+#define SQ_THREAD_TRACE_MASK__VM_ID_MASK__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN_MASK 0x02000000L
+#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN__SHIFT 0x00000019
+#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE_MASK 0x01800000L
+#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE__SHIFT 0x00000017
+#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN_MASK 0x40000000L
+#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN__SHIFT 0x0000001e
+#define SQ_THREAD_TRACE_MODE__ISSUE_MASK_MASK 0x18000000L
+#define SQ_THREAD_TRACE_MODE__ISSUE_MASK__SHIFT 0x0000001b
+#define SQ_THREAD_TRACE_MODE__MASK_CS_MASK 0x001c0000L
+#define SQ_THREAD_TRACE_MODE__MASK_CS__SHIFT 0x00000012
+#define SQ_THREAD_TRACE_MODE__MASK_ES_MASK 0x00000e00L
+#define SQ_THREAD_TRACE_MODE__MASK_ES__SHIFT 0x00000009
+#define SQ_THREAD_TRACE_MODE__MASK_GS_MASK 0x000001c0L
+#define SQ_THREAD_TRACE_MODE__MASK_GS__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_MODE__MASK_HS_MASK 0x00007000L
+#define SQ_THREAD_TRACE_MODE__MASK_HS__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_MODE__MASK_LS_MASK 0x00038000L
+#define SQ_THREAD_TRACE_MODE__MASK_LS__SHIFT 0x0000000f
+#define SQ_THREAD_TRACE_MODE__MASK_PS_MASK 0x00000007L
+#define SQ_THREAD_TRACE_MODE__MASK_PS__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_MODE__MASK_VS_MASK 0x00000038L
+#define SQ_THREAD_TRACE_MODE__MASK_VS__SHIFT 0x00000003
+#define SQ_THREAD_TRACE_MODE__MODE_MASK 0x00600000L
+#define SQ_THREAD_TRACE_MODE__MODE__SHIFT 0x00000015
+#define SQ_THREAD_TRACE_MODE__PRIV_MASK 0x04000000L
+#define SQ_THREAD_TRACE_MODE__PRIV__SHIFT 0x0000001a
+#define SQ_THREAD_TRACE_MODE__TEST_MODE_MASK 0x20000000L
+#define SQ_THREAD_TRACE_MODE__TEST_MODE__SHIFT 0x0000001d
+#define SQ_THREAD_TRACE_MODE__WRAP_MASK 0x80000000L
+#define SQ_THREAD_TRACE_MODE__WRAP__SHIFT 0x0000001f
+#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK_MASK 0x0000ffffL
+#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_SIZE__SIZE_MASK 0x003fffffL
+#define SQ_THREAD_TRACE_SIZE__SIZE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x40000000L
+#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x0000001e
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x00070000L
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x00000007L
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_STATUS__FULL_MASK 0x80000000L
+#define SQ_THREAD_TRACE_STATUS__FULL__SHIFT 0x0000001f
+#define SQ_THREAD_TRACE_STATUS__NEW_BUF_MASK 0x20000000L
+#define SQ_THREAD_TRACE_STATUS__NEW_BUF__SHIFT 0x0000001d
+#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK_MASK 0x0000ffffL
+#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL_MASK 0x01000000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL__SHIFT 0x00000018
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK_MASK 0x00ff0000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK_MASK 0x0000ffffL
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE_MASK 0x0000fc00L
+#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_EVENT__STAGE_MASK 0x000001c0L
+#define SQ_THREAD_TRACE_WORD_EVENT__STAGE__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE_MASK 0x0000f000L
+#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID_MASK 0x00000600L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID__SHIFT 0x00000009
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID_MASK 0x000001e0L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI_MASK 0x00ffffffL
+#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID_MASK 0x00000600L
+#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID__SHIFT 0x00000009
+#define SQ_THREAD_TRACE_WORD_INST__SIZE_MASK 0x00000800L
+#define SQ_THREAD_TRACE_WORD_INST__SIZE__SHIFT 0x0000000b
+#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID_MASK 0x000003c0L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID_MASK 0x0000c000L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID_MASK 0x00003c00L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI_MASK 0x0000ffffL
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID_MASK 0x000001e0L
+#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST0_MASK 0x00000300L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST0__SHIFT 0x00000008
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST1_MASK 0x00000c00L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST1__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST2_MASK 0x00003000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST2__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST3_MASK 0x0000c000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST3__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST4_MASK 0x00030000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST4__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST5_MASK 0x000c0000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST5__SHIFT 0x00000012
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST6_MASK 0x00300000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST6__SHIFT 0x00000014
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST7_MASK 0x00c00000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST7__SHIFT 0x00000016
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST8_MASK 0x03000000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST8__SHIFT 0x00000018
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST9_MASK 0x0c000000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST9__SHIFT 0x0000001a
+#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE_MASK 0x000000c0L
+#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_MISC__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_MISC__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0_MASK 0x01fff000L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0__SHIFT 0x0000000c
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO_MASK 0xfe000000L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO__SHIFT 0x00000019
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK_MASK 0x00000c00L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID_MASK 0x000003c0L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI_MASK 0x0000003fL
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2_MASK 0x0007ffc0L
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3_MASK 0xfff80000L
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3__SHIFT 0x00000013
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID_MASK 0x00000180L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID__SHIFT 0x00000007
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV_MASK 0x00000200L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV__SHIFT 0x00000009
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP_MASK 0x00008000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP__SHIFT 0x0000000f
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV_MASK 0x00004000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE_MASK 0x00001c00L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO_MASK 0xffff0000L
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI_MASK 0xffffffffL
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID_MASK 0x000003c0L
+#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID_MASK 0x0000c000L
+#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT_MASK 0x1fc00000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT__SHIFT 0x00000016
+#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID_MASK 0x000003c0L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID__SHIFT 0x00000006
+#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER_MASK 0x001f0000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER__SHIFT 0x00000010
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID__SHIFT 0x00000005
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID_MASK 0x0000c000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID__SHIFT 0x0000000e
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID_MASK 0xe0000000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID__SHIFT 0x0000001d
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED_MASK 0x00200000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED__SHIFT 0x00000015
+#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID_MASK 0x00003c00L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA__SHIFT 0x00000004
+#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE_MASK 0x0000000fL
+#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE__SHIFT 0x00000000
+#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID_MASK 0x00003c00L
+#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID__SHIFT 0x0000000a
+#define SQ_THREAD_TRACE_WPTR__READ_OFFSET_MASK 0xc0000000L
+#define SQ_THREAD_TRACE_WPTR__READ_OFFSET__SHIFT 0x0000001e
+#define SQ_THREAD_TRACE_WPTR__WPTR_MASK 0x3fffffffL
+#define SQ_THREAD_TRACE_WPTR__WPTR__SHIFT 0x00000000
+#define SQ_TIME_HI__TIME_MASK 0xffffffffL
+#define SQ_TIME_HI__TIME__SHIFT 0x00000000
+#define SQ_TIME_LO__TIME_MASK 0xffffffffL
+#define SQ_TIME_LO__TIME__SHIFT 0x00000000
+#define SQ_VINTRP__ATTRCHAN_MASK 0x00000300L
+#define SQ_VINTRP__ATTRCHAN__SHIFT 0x00000008
+#define SQ_VINTRP__ATTR_MASK 0x0000fc00L
+#define SQ_VINTRP__ATTR__SHIFT 0x0000000a
+#define SQ_VINTRP__ENCODING_MASK 0xfc000000L
+#define SQ_VINTRP__ENCODING__SHIFT 0x0000001a
+#define SQ_VINTRP__OP_MASK 0x00030000L
+#define SQ_VINTRP__OP__SHIFT 0x00000010
+#define SQ_VINTRP__VDST_MASK 0x03fc0000L
+#define SQ_VINTRP__VDST__SHIFT 0x00000012
+#define SQ_VINTRP__VSRC_MASK 0x000000ffL
+#define SQ_VINTRP__VSRC__SHIFT 0x00000000
+#define SQ_VOP1__ENCODING_MASK 0xfe000000L
+#define SQ_VOP1__ENCODING__SHIFT 0x00000019
+#define SQ_VOP1__OP_MASK 0x0001fe00L
+#define SQ_VOP1__OP__SHIFT 0x00000009
+#define SQ_VOP1__SRC0_MASK 0x000001ffL
+#define SQ_VOP1__SRC0__SHIFT 0x00000000
+#define SQ_VOP1__VDST_MASK 0x01fe0000L
+#define SQ_VOP1__VDST__SHIFT 0x00000011
+#define SQ_VOP2__ENCODING_MASK 0x80000000L
+#define SQ_VOP2__ENCODING__SHIFT 0x0000001f
+#define SQ_VOP2__OP_MASK 0x7e000000L
+#define SQ_VOP2__OP__SHIFT 0x00000019
+#define SQ_VOP2__SRC0_MASK 0x000001ffL
+#define SQ_VOP2__SRC0__SHIFT 0x00000000
+#define SQ_VOP2__VDST_MASK 0x01fe0000L
+#define SQ_VOP2__VDST__SHIFT 0x00000011
+#define SQ_VOP2__VSRC1_MASK 0x0001fe00L
+#define SQ_VOP2__VSRC1__SHIFT 0x00000009
+#define SQ_VOP3_0__ABS_MASK 0x00000700L
+#define SQ_VOP3_0__ABS__SHIFT 0x00000008
+#define SQ_VOP3_0__CLAMP_MASK 0x00000800L
+#define SQ_VOP3_0__CLAMP__SHIFT 0x0000000b
+#define SQ_VOP3_0__ENCODING_MASK 0xfc000000L
+#define SQ_VOP3_0__ENCODING__SHIFT 0x0000001a
+#define SQ_VOP3_0__OP_MASK 0x03fe0000L
+#define SQ_VOP3_0__OP__SHIFT 0x00000011
+#define SQ_VOP3_0_SDST_ENC__ENCODING_MASK 0xfc000000L
+#define SQ_VOP3_0_SDST_ENC__ENCODING__SHIFT 0x0000001a
+#define SQ_VOP3_0_SDST_ENC__OP_MASK 0x03fe0000L
+#define SQ_VOP3_0_SDST_ENC__OP__SHIFT 0x00000011
+#define SQ_VOP3_0_SDST_ENC__SDST_MASK 0x00007f00L
+#define SQ_VOP3_0_SDST_ENC__SDST__SHIFT 0x00000008
+#define SQ_VOP3_0_SDST_ENC__VDST_MASK 0x000000ffL
+#define SQ_VOP3_0_SDST_ENC__VDST__SHIFT 0x00000000
+#define SQ_VOP3_0__VDST_MASK 0x000000ffL
+#define SQ_VOP3_0__VDST__SHIFT 0x00000000
+#define SQ_VOP3_1__NEG_MASK 0xe0000000L
+#define SQ_VOP3_1__NEG__SHIFT 0x0000001d
+#define SQ_VOP3_1__OMOD_MASK 0x18000000L
+#define SQ_VOP3_1__OMOD__SHIFT 0x0000001b
+#define SQ_VOP3_1__SRC0_MASK 0x000001ffL
+#define SQ_VOP3_1__SRC0__SHIFT 0x00000000
+#define SQ_VOP3_1__SRC1_MASK 0x0003fe00L
+#define SQ_VOP3_1__SRC1__SHIFT 0x00000009
+#define SQ_VOP3_1__SRC2_MASK 0x07fc0000L
+#define SQ_VOP3_1__SRC2__SHIFT 0x00000012
+#define SQ_VOPC__ENCODING_MASK 0xfe000000L
+#define SQ_VOPC__ENCODING__SHIFT 0x00000019
+#define SQ_VOPC__OP_MASK 0x01fe0000L
+#define SQ_VOPC__OP__SHIFT 0x00000011
+#define SQ_VOPC__SRC0_MASK 0x000001ffL
+#define SQ_VOPC__SRC0__SHIFT 0x00000000
+#define SQ_VOPC__VSRC1_MASK 0x0001fe00L
+#define SQ_VOPC__VSRC1__SHIFT 0x00000009
+#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xffffffffL
+#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x00000000
+#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xffffffffL
+#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x00000000
+#define SQ_WAVE_GPR_ALLOC__SGPR_BASE_MASK 0x003f0000L
+#define SQ_WAVE_GPR_ALLOC__SGPR_BASE__SHIFT 0x00000010
+#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE_MASK 0x0f000000L
+#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE__SHIFT 0x00000018
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x0000003fL
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x00000000
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x00003f00L
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0x00000008
+#define SQ_WAVE_HW_ID__CU_ID_MASK 0x00000f00L
+#define SQ_WAVE_HW_ID__CU_ID__SHIFT 0x00000008
+#define SQ_WAVE_HW_ID__ME_ID_MASK 0xc0000000L
+#define SQ_WAVE_HW_ID__ME_ID__SHIFT 0x0000001e
+#define SQ_WAVE_HW_ID__PIPE_ID_MASK 0x000000c0L
+#define SQ_WAVE_HW_ID__PIPE_ID__SHIFT 0x00000006
+#define SQ_WAVE_HW_ID__QUEUE_ID_MASK 0x07000000L
+#define SQ_WAVE_HW_ID__QUEUE_ID__SHIFT 0x00000018
+#define SQ_WAVE_HW_ID__SE_ID_MASK 0x00002000L
+#define SQ_WAVE_HW_ID__SE_ID__SHIFT 0x0000000d
+#define SQ_WAVE_HW_ID__SH_ID_MASK 0x00001000L
+#define SQ_WAVE_HW_ID__SH_ID__SHIFT 0x0000000c
+#define SQ_WAVE_HW_ID__SIMD_ID_MASK 0x00000030L
+#define SQ_WAVE_HW_ID__SIMD_ID__SHIFT 0x00000004
+#define SQ_WAVE_HW_ID__STATE_ID_MASK 0x38000000L
+#define SQ_WAVE_HW_ID__STATE_ID__SHIFT 0x0000001b
+#define SQ_WAVE_HW_ID__TG_ID_MASK 0x000f0000L
+#define SQ_WAVE_HW_ID__TG_ID__SHIFT 0x00000010
+#define SQ_WAVE_HW_ID__VM_ID_MASK 0x00f00000L
+#define SQ_WAVE_HW_ID__VM_ID__SHIFT 0x00000014
+#define SQ_WAVE_HW_ID__WAVE_ID_MASK 0x0000000fL
+#define SQ_WAVE_HW_ID__WAVE_ID__SHIFT 0x00000000
+#define SQ_WAVE_IB_DBG0__ECC_ST_MASK 0x00c00000L
+#define SQ_WAVE_IB_DBG0__ECC_ST__SHIFT 0x00000016
+#define SQ_WAVE_IB_DBG0__HYB_CNT_MASK 0x06000000L
+#define SQ_WAVE_IB_DBG0__HYB_CNT__SHIFT 0x00000019
+#define SQ_WAVE_IB_DBG0__IBUF_RPTR_MASK 0x00000300L
+#define SQ_WAVE_IB_DBG0__IBUF_RPTR__SHIFT 0x00000008
+#define SQ_WAVE_IB_DBG0__IBUF_ST_MASK 0x00000007L
+#define SQ_WAVE_IB_DBG0__IBUF_ST__SHIFT 0x00000000
+#define SQ_WAVE_IB_DBG0__IBUF_WPTR_MASK 0x00000c00L
+#define SQ_WAVE_IB_DBG0__IBUF_WPTR__SHIFT 0x0000000a
+#define SQ_WAVE_IB_DBG0__INST_STR_ST_MASK 0x00070000L
+#define SQ_WAVE_IB_DBG0__INST_STR_ST__SHIFT 0x00000010
+#define SQ_WAVE_IB_DBG0__IS_HYB_MASK 0x01000000L
+#define SQ_WAVE_IB_DBG0__IS_HYB__SHIFT 0x00000018
+#define SQ_WAVE_IB_DBG0__KILL_MASK 0x08000000L
+#define SQ_WAVE_IB_DBG0__KILL__SHIFT 0x0000001b
+#define SQ_WAVE_IB_DBG0__MISC_CNT_MASK 0x00380000L
+#define SQ_WAVE_IB_DBG0__MISC_CNT__SHIFT 0x00000013
+#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH_MASK 0x10000000L
+#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH__SHIFT 0x0000001c
+#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW_MASK 0x00000010L
+#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW__SHIFT 0x00000004
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_MASK 0x000000e0L
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT__SHIFT 0x00000005
+#define SQ_WAVE_IB_DBG0__PC_INVALID_MASK 0x00000008L
+#define SQ_WAVE_IB_DBG0__PC_INVALID__SHIFT 0x00000003
+#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x00000070L
+#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x00000004
+#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0x00001f00L
+#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x00000008
+#define SQ_WAVE_IB_STS__VALU_CNT_MASK 0x0000e000L
+#define SQ_WAVE_IB_STS__VALU_CNT__SHIFT 0x0000000d
+#define SQ_WAVE_IB_STS__VM_CNT_MASK 0x0000000fL
+#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0x00000000
+#define SQ_WAVE_INST_DW0__INST_DW0_MASK 0xffffffffL
+#define SQ_WAVE_INST_DW0__INST_DW0__SHIFT 0x00000000
+#define SQ_WAVE_INST_DW1__INST_DW1_MASK 0xffffffffL
+#define SQ_WAVE_INST_DW1__INST_DW1__SHIFT 0x00000000
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0x000000ffL
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x00000000
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x001ff000L
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0x0000000c
+#define SQ_WAVE_M0__M0_MASK 0xffffffffL
+#define SQ_WAVE_M0__M0__SHIFT 0x00000000
+#define SQ_WAVE_MODE__CSP_MASK 0xe0000000L
+#define SQ_WAVE_MODE__CSP__SHIFT 0x0000001d
+#define SQ_WAVE_MODE__DEBUG_EN_MASK 0x00000800L
+#define SQ_WAVE_MODE__DEBUG_EN__SHIFT 0x0000000b
+#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x00000100L
+#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x00000008
+#define SQ_WAVE_MODE__EXCP_EN_MASK 0x0007f000L
+#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0x0000000c
+#define SQ_WAVE_MODE__FP_DENORM_MASK 0x000000f0L
+#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x00000004
+#define SQ_WAVE_MODE__FP_ROUND_MASK 0x0000000fL
+#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x00000000
+#define SQ_WAVE_MODE__IEEE_MASK 0x00000200L
+#define SQ_WAVE_MODE__IEEE__SHIFT 0x00000009
+#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x00000400L
+#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0x0000000a
+#define SQ_WAVE_MODE__VSKIP_MASK 0x10000000L
+#define SQ_WAVE_MODE__VSKIP__SHIFT 0x0000001c
+#define SQ_WAVE_PC_HI__PC_HI_MASK 0x000000ffL
+#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x00000000
+#define SQ_WAVE_PC_LO__PC_LO_MASK 0xffffffffL
+#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x00000000
+#define SQ_WAVE_STATUS__COND_DBG_SYS_MASK 0x00200000L
+#define SQ_WAVE_STATUS__COND_DBG_SYS__SHIFT 0x00000015
+#define SQ_WAVE_STATUS__COND_DBG_USER_MASK 0x00100000L
+#define SQ_WAVE_STATUS__COND_DBG_USER__SHIFT 0x00000014
+#define SQ_WAVE_STATUS__DATA_ATC_MASK 0x00400000L
+#define SQ_WAVE_STATUS__DATA_ATC__SHIFT 0x00000016
+#define SQ_WAVE_STATUS__DISPATCH_CACHE_CTRL_MASK 0x07000000L
+#define SQ_WAVE_STATUS__DISPATCH_CACHE_CTRL__SHIFT 0x00000018
+#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x00020000L
+#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x00000011
+#define SQ_WAVE_STATUS__EXECZ_MASK 0x00000200L
+#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x00000009
+#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x00000100L
+#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x00000008
+#define SQ_WAVE_STATUS__HALT_MASK 0x00002000L
+#define SQ_WAVE_STATUS__HALT__SHIFT 0x0000000d
+#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x00001000L
+#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0x0000000c
+#define SQ_WAVE_STATUS__INST_ATC_MASK 0x00800000L
+#define SQ_WAVE_STATUS__INST_ATC__SHIFT 0x00000017
+#define SQ_WAVE_STATUS__IN_TG_MASK 0x00000800L
+#define SQ_WAVE_STATUS__IN_TG__SHIFT 0x0000000b
+#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x08000000L
+#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x0000001b
+#define SQ_WAVE_STATUS__PERF_EN_MASK 0x00080000L
+#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x00000013
+#define SQ_WAVE_STATUS__PRIV_MASK 0x00000020L
+#define SQ_WAVE_STATUS__PRIV__SHIFT 0x00000005
+#define SQ_WAVE_STATUS__SCC_MASK 0x00000001L
+#define SQ_WAVE_STATUS__SCC__SHIFT 0x00000000
+#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x00040000L
+#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x00000012
+#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x00000006L
+#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x00000001
+#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x00000040L
+#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x00000006
+#define SQ_WAVE_STATUS__TRAP_MASK 0x00004000L
+#define SQ_WAVE_STATUS__TRAP__SHIFT 0x0000000e
+#define SQ_WAVE_STATUS__TTRACE_CU_EN_MASK 0x00008000L
+#define SQ_WAVE_STATUS__TTRACE_CU_EN__SHIFT 0x0000000f
+#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x00000080L
+#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x00000007
+#define SQ_WAVE_STATUS__VALID_MASK 0x00010000L
+#define SQ_WAVE_STATUS__VALID__SHIFT 0x00000010
+#define SQ_WAVE_STATUS__VCCZ_MASK 0x00000400L
+#define SQ_WAVE_STATUS__VCCZ__SHIFT 0x0000000a
+#define SQ_WAVE_STATUS__WAVE_PRIO_MASK 0x00000018L
+#define SQ_WAVE_STATUS__WAVE_PRIO__SHIFT 0x00000003
+#define SQ_WAVE_TBA_HI__ADDR_HI_MASK 0x000000ffL
+#define SQ_WAVE_TBA_HI__ADDR_HI__SHIFT 0x00000000
+#define SQ_WAVE_TBA_LO__ADDR_LO_MASK 0xffffffffL
+#define SQ_WAVE_TBA_LO__ADDR_LO__SHIFT 0x00000000
+#define SQ_WAVE_TMA_HI__ADDR_HI_MASK 0x000000ffL
+#define SQ_WAVE_TMA_HI__ADDR_HI__SHIFT 0x00000000
+#define SQ_WAVE_TMA_LO__ADDR_LO_MASK 0xffffffffL
+#define SQ_WAVE_TMA_LO__ADDR_LO__SHIFT 0x00000000
+#define SQ_WAVE_TRAPSTS__DP_RATE_MASK 0xe0000000L
+#define SQ_WAVE_TRAPSTS__DP_RATE__SHIFT 0x0000001d
+#define SQ_WAVE_TRAPSTS__EXCP_CYCLE_MASK 0x003f0000L
+#define SQ_WAVE_TRAPSTS__EXCP_CYCLE__SHIFT 0x00000010
+#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x0000007fL
+#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x00000000
+#define SQ_WAVE_TTMP0__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP0__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP10__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP10__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP11__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP11__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP1__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP1__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP2__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP2__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP3__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP3__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP4__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP4__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP5__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP5__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP6__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP6__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP7__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP7__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP8__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP8__DATA__SHIFT 0x00000000
+#define SQ_WAVE_TTMP9__DATA_MASK 0xffffffffL
+#define SQ_WAVE_TTMP9__DATA__SHIFT 0x00000000
+#define SX_DEBUG_1__DEBUG_DATA_MASK 0xffffff80L
+#define SX_DEBUG_1__DEBUG_DATA__SHIFT 0x00000007
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT_MASK 0x0000007fL
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT__SHIFT 0x00000000
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY_MASK 0x80000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY__SHIFT 0x0000001f
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY__SHIFT 0x0000001e
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY__SHIFT 0x0000001d
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY__SHIFT 0x0000001c
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY__SHIFT 0x0000001b
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY__SHIFT 0x0000001a
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY__SHIFT 0x00000019
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY__SHIFT 0x00000017
+#define SX_DEBUG_BUSY_2__COL_DBIF0_READ_VALID_MASK 0x01000000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_READ_VALID__SHIFT 0x00000018
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY__SHIFT 0x00000016
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY__SHIFT 0x00000014
+#define SX_DEBUG_BUSY_2__COL_DBIF1_READ_VALID_MASK 0x00200000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_READ_VALID__SHIFT 0x00000015
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY__SHIFT 0x00000013
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY__SHIFT 0x00000011
+#define SX_DEBUG_BUSY_2__COL_DBIF2_READ_VALID_MASK 0x00040000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_READ_VALID__SHIFT 0x00000012
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY__SHIFT 0x00000010
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY__SHIFT 0x0000000e
+#define SX_DEBUG_BUSY_2__COL_DBIF3_READ_VALID_MASK 0x00008000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_READ_VALID__SHIFT 0x0000000f
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY__SHIFT 0x0000000d
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY__SHIFT 0x0000000c
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0_MASK 0x00000400L
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0__SHIFT 0x0000000a
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE_MASK 0x00000800L
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE__SHIFT 0x0000000b
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY__SHIFT 0x00000009
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0_MASK 0x00000080L
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0__SHIFT 0x00000007
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE_MASK 0x00000100L
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE__SHIFT 0x00000008
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY__SHIFT 0x00000006
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0_MASK 0x00000010L
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0__SHIFT 0x00000004
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE_MASK 0x00000020L
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE__SHIFT 0x00000005
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY__SHIFT 0x00000003
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0_MASK 0x00000002L
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0__SHIFT 0x00000001
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE_MASK 0x00000004L
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE__SHIFT 0x00000002
+#define SX_DEBUG_BUSY_2__COL_SCBD_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_2__COL_SCBD_BUSY__SHIFT 0x00000000
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY_MASK 0x80000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY__SHIFT 0x0000001f
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY__SHIFT 0x0000001e
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY__SHIFT 0x0000001d
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY__SHIFT 0x0000001c
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY__SHIFT 0x0000001b
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY__SHIFT 0x0000001a
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY__SHIFT 0x00000019
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY__SHIFT 0x00000018
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY__SHIFT 0x00000017
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY__SHIFT 0x00000016
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY__SHIFT 0x00000015
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY__SHIFT 0x00000014
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY__SHIFT 0x00000013
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY__SHIFT 0x00000012
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY__SHIFT 0x00000011
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY__SHIFT 0x00000010
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY__SHIFT 0x0000000f
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY__SHIFT 0x0000000e
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY__SHIFT 0x0000000d
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY__SHIFT 0x0000000c
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY__SHIFT 0x0000000b
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY__SHIFT 0x0000000a
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY__SHIFT 0x00000009
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY__SHIFT 0x00000008
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY__SHIFT 0x00000007
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY__SHIFT 0x00000006
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY__SHIFT 0x00000005
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY__SHIFT 0x00000004
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY__SHIFT 0x00000003
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY__SHIFT 0x00000002
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY__SHIFT 0x00000001
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY__SHIFT 0x00000000
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY__SHIFT 0x00000018
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY__SHIFT 0x00000017
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY__SHIFT 0x00000016
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY__SHIFT 0x00000015
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY__SHIFT 0x00000014
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY__SHIFT 0x00000013
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY__SHIFT 0x00000012
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY__SHIFT 0x00000011
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY__SHIFT 0x00000010
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY__SHIFT 0x0000000f
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY__SHIFT 0x0000000e
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY__SHIFT 0x0000000d
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY__SHIFT 0x0000000c
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY__SHIFT 0x0000000b
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY__SHIFT 0x0000000a
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY__SHIFT 0x00000009
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY__SHIFT 0x00000008
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY__SHIFT 0x00000007
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY__SHIFT 0x00000006
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY__SHIFT 0x00000005
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY__SHIFT 0x00000004
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY__SHIFT 0x00000003
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY__SHIFT 0x00000002
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY__SHIFT 0x00000001
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY__SHIFT 0x00000000
+#define SX_DEBUG_BUSY_4__RESERVED_MASK 0xfe000000L
+#define SX_DEBUG_BUSY_4__RESERVED__SHIFT 0x00000019
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL_MASK 0x80000000L
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL__SHIFT 0x0000001f
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL_MASK 0x40000000L
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL__SHIFT 0x0000001e
+#define SX_DEBUG_BUSY__PA_SX_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY__PA_SX_BUSY__SHIFT 0x00000002
+#define SX_DEBUG_BUSY__PCCMD_VALID_MASK 0x08000000L
+#define SX_DEBUG_BUSY__PCCMD_VALID__SHIFT 0x0000001b
+#define SX_DEBUG_BUSY__POS_BANK0VAL0_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY__POS_BANK0VAL0_BUSY__SHIFT 0x00000013
+#define SX_DEBUG_BUSY__POS_BANK0VAL1_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY__POS_BANK0VAL1_BUSY__SHIFT 0x00000012
+#define SX_DEBUG_BUSY__POS_BANK0VAL2_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY__POS_BANK0VAL2_BUSY__SHIFT 0x00000011
+#define SX_DEBUG_BUSY__POS_BANK0VAL3_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY__POS_BANK0VAL3_BUSY__SHIFT 0x00000010
+#define SX_DEBUG_BUSY__POS_BANK1VAL0_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY__POS_BANK1VAL0_BUSY__SHIFT 0x0000000f
+#define SX_DEBUG_BUSY__POS_BANK1VAL1_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY__POS_BANK1VAL1_BUSY__SHIFT 0x0000000e
+#define SX_DEBUG_BUSY__POS_BANK1VAL2_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY__POS_BANK1VAL2_BUSY__SHIFT 0x0000000d
+#define SX_DEBUG_BUSY__POS_BANK1VAL3_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY__POS_BANK1VAL3_BUSY__SHIFT 0x0000000c
+#define SX_DEBUG_BUSY__POS_BANK2VAL0_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY__POS_BANK2VAL0_BUSY__SHIFT 0x0000000b
+#define SX_DEBUG_BUSY__POS_BANK2VAL1_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY__POS_BANK2VAL1_BUSY__SHIFT 0x0000000a
+#define SX_DEBUG_BUSY__POS_BANK2VAL2_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY__POS_BANK2VAL2_BUSY__SHIFT 0x00000009
+#define SX_DEBUG_BUSY__POS_BANK2VAL3_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY__POS_BANK2VAL3_BUSY__SHIFT 0x00000008
+#define SX_DEBUG_BUSY__POS_BANK3VAL0_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY__POS_BANK3VAL0_BUSY__SHIFT 0x00000007
+#define SX_DEBUG_BUSY__POS_BANK3VAL1_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY__POS_BANK3VAL1_BUSY__SHIFT 0x00000006
+#define SX_DEBUG_BUSY__POS_BANK3VAL2_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY__POS_BANK3VAL2_BUSY__SHIFT 0x00000005
+#define SX_DEBUG_BUSY__POS_BANK3VAL3_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY__POS_BANK3VAL3_BUSY__SHIFT 0x00000004
+#define SX_DEBUG_BUSY__POS_FREE_OR_VALIDS_MASK 0x00000001L
+#define SX_DEBUG_BUSY__POS_FREE_OR_VALIDS__SHIFT 0x00000000
+#define SX_DEBUG_BUSY__POS_INMUX_VALID_MASK 0x00100000L
+#define SX_DEBUG_BUSY__POS_INMUX_VALID__SHIFT 0x00000014
+#define SX_DEBUG_BUSY__POS_REQUESTER_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY__POS_REQUESTER_BUSY__SHIFT 0x00000001
+#define SX_DEBUG_BUSY__POS_SCBD_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY__POS_SCBD_BUSY__SHIFT 0x00000003
+#define SX_DEBUG_BUSY__VDATA0_VALID_MASK 0x20000000L
+#define SX_DEBUG_BUSY__VDATA0_VALID__SHIFT 0x0000001d
+#define SX_DEBUG_BUSY__VDATA1_VALID_MASK 0x10000000L
+#define SX_DEBUG_BUSY__VDATA1_VALID__SHIFT 0x0000001c
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ1_MASK 0x04000000L
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ1__SHIFT 0x0000001a
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ2_MASK 0x02000000L
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ2__SHIFT 0x00000019
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ3_MASK 0x01000000L
+#define SX_DEBUG_BUSY__WRCTRL0_VALIDQ3__SHIFT 0x00000018
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ1_MASK 0x00800000L
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ1__SHIFT 0x00000017
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ2_MASK 0x00400000L
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ2__SHIFT 0x00000016
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ3_MASK 0x00200000L
+#define SX_DEBUG_BUSY__WRCTRL1_VALIDQ3__SHIFT 0x00000015
+#define SXIFCCG_DEBUG_REG0__point_address_MASK 0x000001c0L
+#define SXIFCCG_DEBUG_REG0__point_address__SHIFT 0x00000006
+#define SXIFCCG_DEBUG_REG0__position_address_MASK 0x0000003fL
+#define SXIFCCG_DEBUG_REG0__position_address__SHIFT 0x00000000
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_advance_MASK 0x80000000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_advance__SHIFT 0x0000001f
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_inc_MASK 0x40000000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_inc__SHIFT 0x0000001e
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_sel_MASK 0x0c000000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_aux_sel__SHIFT 0x0000001a
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_pci_MASK 0x03ff0000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_pci__SHIFT 0x00000010
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_req_mask_MASK 0x0000f000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_req_mask__SHIFT 0x0000000c
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_sp_id_MASK 0x30000000L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_sp_id__SHIFT 0x0000001c
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_state_var_indx_MASK 0x00000e00L
+#define SXIFCCG_DEBUG_REG0__sx_pending_rd_state_var_indx__SHIFT 0x00000009
+#define SXIFCCG_DEBUG_REG1__aux_sel_MASK 0x00300000L
+#define SXIFCCG_DEBUG_REG1__aux_sel__SHIFT 0x00000014
+#define SXIFCCG_DEBUG_REG1__available_positions_MASK 0x0000007fL
+#define SXIFCCG_DEBUG_REG1__available_positions__SHIFT 0x00000000
+#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_0_MASK 0xf0000000L
+#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_0__SHIFT 0x0000001c
+#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_1_MASK 0x0f000000L
+#define SXIFCCG_DEBUG_REG1__pasx_req_cnt_1__SHIFT 0x00000018
+#define SXIFCCG_DEBUG_REG1__statevar_bits_disable_sp_MASK 0x000f0000L
+#define SXIFCCG_DEBUG_REG1__statevar_bits_disable_sp__SHIFT 0x00000010
+#define SXIFCCG_DEBUG_REG1__statevar_bits_vs_out_misc_vec_ena_MASK 0x00008000L
+#define SXIFCCG_DEBUG_REG1__statevar_bits_vs_out_misc_vec_ena__SHIFT 0x0000000f
+#define SXIFCCG_DEBUG_REG1__sx_pending_fifo_contents_MASK 0x00007c00L
+#define SXIFCCG_DEBUG_REG1__sx_pending_fifo_contents__SHIFT 0x0000000a
+#define SXIFCCG_DEBUG_REG1__sx_receive_indx_MASK 0x00000380L
+#define SXIFCCG_DEBUG_REG1__sx_receive_indx__SHIFT 0x00000007
+#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_0_MASK 0x00800000L
+#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_0__SHIFT 0x00000017
+#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_1_MASK 0x00400000L
+#define SXIFCCG_DEBUG_REG1__sx_to_pa_empty_1__SHIFT 0x00000016
+#define SXIFCCG_DEBUG_REG2__param_cache_base_MASK 0x0000007fL
+#define SXIFCCG_DEBUG_REG2__param_cache_base__SHIFT 0x00000000
+#define SXIFCCG_DEBUG_REG2__req_active_verts_loaded_MASK 0x00008000L
+#define SXIFCCG_DEBUG_REG2__req_active_verts_loaded__SHIFT 0x0000000f
+#define SXIFCCG_DEBUG_REG2__req_active_verts_MASK 0x007f0000L
+#define SXIFCCG_DEBUG_REG2__req_active_verts__SHIFT 0x00000010
+#define SXIFCCG_DEBUG_REG2__sx_aux_MASK 0x00000180L
+#define SXIFCCG_DEBUG_REG2__sx_aux__SHIFT 0x00000007
+#define SXIFCCG_DEBUG_REG2__sx_request_indx_MASK 0x00007e00L
+#define SXIFCCG_DEBUG_REG2__sx_request_indx__SHIFT 0x00000009
+#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_active_verts_MASK 0xfc000000L
+#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_active_verts__SHIFT 0x0000001a
+#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_state_var_indx_MASK 0x03800000L
+#define SXIFCCG_DEBUG_REG2__vgt_to_ccgen_state_var_indx__SHIFT 0x00000017
+#define SXIFCCG_DEBUG_REG3__ALWAYS_ZERO_MASK 0x000000ffL
+#define SXIFCCG_DEBUG_REG3__ALWAYS_ZERO__SHIFT 0x00000000
+#define SXIFCCG_DEBUG_REG3__available_positions_MASK 0x001fc000L
+#define SXIFCCG_DEBUG_REG3__available_positions__SHIFT 0x0000000e
+#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_fifo_full_MASK 0x20000000L
+#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_fifo_full__SHIFT 0x0000001d
+#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_write_MASK 0x80000000L
+#define SXIFCCG_DEBUG_REG3__ccgen_to_clipcc_write__SHIFT 0x0000001f
+#define SXIFCCG_DEBUG_REG3__current_state_MASK 0x00600000L
+#define SXIFCCG_DEBUG_REG3__current_state__SHIFT 0x00000015
+#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist0_vec_ena_MASK 0x00002000L
+#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist0_vec_ena__SHIFT 0x0000000d
+#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist1_vec_ena_MASK 0x00001000L
+#define SXIFCCG_DEBUG_REG3__statevar_bits_vs_out_ccdist1_vec_ena__SHIFT 0x0000000c
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_empty_MASK 0x02000000L
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_empty__SHIFT 0x00000019
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_full_MASK 0x04000000L
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_full__SHIFT 0x0000001a
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_write_MASK 0x40000000L
+#define SXIFCCG_DEBUG_REG3__sx0_receive_fifo_write__SHIFT 0x0000001e
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_empty_MASK 0x00800000L
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_empty__SHIFT 0x00000017
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_entriesavailable_MASK 0x00000f00L
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_entriesavailable__SHIFT 0x00000008
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_full_MASK 0x01000000L
+#define SXIFCCG_DEBUG_REG3__vertex_fifo_full__SHIFT 0x00000018
+#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_empty_MASK 0x08000000L
+#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_empty__SHIFT 0x0000001b
+#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_full_MASK 0x10000000L
+#define SXIFCCG_DEBUG_REG3__vgt_to_ccgen_fifo_full__SHIFT 0x0000001c
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003ffL
+#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x00000000
+#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000ffc00L
+#define SX_PERFCOUNTER0_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0x0000000a
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define SX_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2_MASK 0x000003ffL
+#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT2__SHIFT 0x00000000
+#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3_MASK 0x000ffc00L
+#define SX_PERFCOUNTER1_SELECT1__PERFCOUNTER_SELECT3__SHIFT 0x0000000a
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define SX_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define SX_PERFCOUNTER2_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1_MASK 0x000ffc00L
+#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT1__SHIFT 0x0000000a
+#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT_MASK 0x000000ffL
+#define SX_PERFCOUNTER3_SELECT__PERFCOUNTER_SELECT__SHIFT 0x00000000
+#define TA_BC_BASE_ADDR__ADDRESS_MASK 0xffffffffL
+#define TA_BC_BASE_ADDR__ADDRESS__SHIFT 0x00000000
+#define TA_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000ffL
+#define TA_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x00000000
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000fL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x00000000
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define TA_CNTL__ALIGNER_CREDIT_MASK 0x001f0000L
+#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x00000010
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE_MASK 0x00010000L
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE__SHIFT 0x00000010
+#define TA_CNTL__TC_DATA_CREDIT_MASK 0x0000e000L
+#define TA_CNTL__TC_DATA_CREDIT__SHIFT 0x0000000d
+#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xffc00000L
+#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x00000016
+#define TA_CS_BC_BASE_ADDR__ADDRESS_MASK 0xffffffffL
+#define TA_CS_BC_BASE_ADDR__ADDRESS__SHIFT 0x00000000
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000ffL
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x00000000
+#define TA_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define TA_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define TA_DEBUG_INDEX__INDEX_MASK 0x0000001fL
+#define TA_DEBUG_INDEX__INDEX__SHIFT 0x00000000
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000ffL
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003fc00L
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003fc00L
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x0003fc00L
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TA_SCRATCH__SCRATCH_MASK 0xffffffffL
+#define TA_SCRATCH__SCRATCH__SHIFT 0x00000000
+#define TA_STATUS__AL_BUSY_MASK 0x40000000L
+#define TA_STATUS__AL_BUSY__SHIFT 0x0000001e
+#define TA_STATUS__BUSY_MASK 0x80000000L
+#define TA_STATUS__BUSY__SHIFT 0x0000001f
+#define TA_STATUS__FA_BUSY_MASK 0x20000000L
+#define TA_STATUS__FA_BUSY__SHIFT 0x0000001d
+#define TA_STATUS__FA_LFIFO_EMPTYB_MASK 0x00200000L
+#define TA_STATUS__FA_LFIFO_EMPTYB__SHIFT 0x00000015
+#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x00100000L
+#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x00000014
+#define TA_STATUS__FA_SFIFO_EMPTYB_MASK 0x00400000L
+#define TA_STATUS__FA_SFIFO_EMPTYB__SHIFT 0x00000016
+#define TA_STATUS__FG_BUSY_MASK 0x02000000L
+#define TA_STATUS__FG_BUSY__SHIFT 0x00000019
+#define TA_STATUS__FG_LFIFO_EMPTYB_MASK 0x00002000L
+#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0x0000000d
+#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x00001000L
+#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0x0000000c
+#define TA_STATUS__FG_SFIFO_EMPTYB_MASK 0x00004000L
+#define TA_STATUS__FG_SFIFO_EMPTYB__SHIFT 0x0000000e
+#define TA_STATUS__FL_BUSY_MASK 0x08000000L
+#define TA_STATUS__FL_BUSY__SHIFT 0x0000001b
+#define TA_STATUS__FL_LFIFO_EMPTYB_MASK 0x00020000L
+#define TA_STATUS__FL_LFIFO_EMPTYB__SHIFT 0x00000011
+#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x00010000L
+#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x00000010
+#define TA_STATUS__FL_SFIFO_EMPTYB_MASK 0x00040000L
+#define TA_STATUS__FL_SFIFO_EMPTYB__SHIFT 0x00000012
+#define TA_STATUS__IN_BUSY_MASK 0x01000000L
+#define TA_STATUS__IN_BUSY__SHIFT 0x00000018
+#define TA_STATUS__LA_BUSY_MASK 0x04000000L
+#define TA_STATUS__LA_BUSY__SHIFT 0x0000001a
+#define TA_STATUS__TA_BUSY_MASK 0x10000000L
+#define TA_STATUS__TA_BUSY__SHIFT 0x0000001c
+#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define TCA_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define TCA_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define TCA_CTRL__HOLE_TIMEOUT_MASK 0x0000000fL
+#define TCA_CTRL__HOLE_TIMEOUT__SHIFT 0x00000000
+#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0f000000L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x00000018
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf0000000L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0f000000L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x00000018
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xf0000000L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define TCC_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000fL
+#define TCC_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x00000000
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define TCC_CTRL__CACHE_SIZE_MASK 0x00000003L
+#define TCC_CTRL__CACHE_SIZE__SHIFT 0x00000000
+#define TCC_CTRL__LATENCY_FIFO_SIZE_MASK 0x000f0000L
+#define TCC_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x00000010
+#define TCC_CTRL__RATE_MASK 0x0000000cL
+#define TCC_CTRL__RATE__SHIFT 0x00000002
+#define TCC_CTRL__SRC_FIFO_SIZE_MASK 0x0000f000L
+#define TCC_CTRL__SRC_FIFO_SIZE__SHIFT 0x0000000c
+#define TCC_CTRL__WB_OR_INV_ALL_VMIDS_MASK 0x00100000L
+#define TCC_CTRL__WB_OR_INV_ALL_VMIDS__SHIFT 0x00000014
+#define TCC_CTRL__WRITEBACK_MARGIN_MASK 0x000000f0L
+#define TCC_CTRL__WRITEBACK_MARGIN__SHIFT 0x00000004
+#define TCC_EDC_COUNTER__DED_COUNT_MASK 0x000f0000L
+#define TCC_EDC_COUNTER__DED_COUNT__SHIFT 0x00000010
+#define TCC_EDC_COUNTER__SEC_COUNT_MASK 0x0000000fL
+#define TCC_EDC_COUNTER__SEC_COUNT__SHIFT 0x00000000
+#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0f000000L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x00000018
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xf0000000L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0f000000L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x00000018
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xf0000000L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCI_CNTL_1__REQ_FIFO_DEPTH_MASK 0x00ff0000L
+#define TCI_CNTL_1__REQ_FIFO_DEPTH__SHIFT 0x00000010
+#define TCI_CNTL_1__WBINVL1_NUM_CYCLES_MASK 0x0000ffffL
+#define TCI_CNTL_1__WBINVL1_NUM_CYCLES__SHIFT 0x00000000
+#define TCI_CNTL_1__WDATA_RAM_DEPTH_MASK 0xff000000L
+#define TCI_CNTL_1__WDATA_RAM_DEPTH__SHIFT 0x00000018
+#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2_MASK 0x00000001L
+#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2__SHIFT 0x00000000
+#define TCI_CNTL_2__TCA_MAX_CREDIT_MASK 0x000001feL
+#define TCI_CNTL_2__TCA_MAX_CREDIT__SHIFT 0x00000001
+#define TCI_STATUS__TCI_BUSY_MASK 0x00000001L
+#define TCI_STATUS__TCI_BUSY__SHIFT 0x00000000
+#define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x000001c0L
+#define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x00000006
+#define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x00000030L
+#define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x00000004
+#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000000fL
+#define TCP_ADDR_CONFIG__NUM_TCC_BANKS__SHIFT 0x00000000
+#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x00000200L
+#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x00000009
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS_MASK 0x00000700L
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS__SHIFT 0x00000008
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT_MASK 0x07000000L
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT__SHIFT 0x00000018
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS_MASK 0x00000007L
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS__SHIFT 0x00000000
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT_MASK 0x00070000L
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT__SHIFT 0x00000010
+#define TCP_CHAN_STEER_HI__CHAN8_MASK 0x0000000fL
+#define TCP_CHAN_STEER_HI__CHAN8__SHIFT 0x00000000
+#define TCP_CHAN_STEER_HI__CHAN9_MASK 0x000000f0L
+#define TCP_CHAN_STEER_HI__CHAN9__SHIFT 0x00000004
+#define TCP_CHAN_STEER_HI__CHANA_MASK 0x00000f00L
+#define TCP_CHAN_STEER_HI__CHANA__SHIFT 0x00000008
+#define TCP_CHAN_STEER_HI__CHANB_MASK 0x0000f000L
+#define TCP_CHAN_STEER_HI__CHANB__SHIFT 0x0000000c
+#define TCP_CHAN_STEER_HI__CHANC_MASK 0x000f0000L
+#define TCP_CHAN_STEER_HI__CHANC__SHIFT 0x00000010
+#define TCP_CHAN_STEER_HI__CHAND_MASK 0x00f00000L
+#define TCP_CHAN_STEER_HI__CHAND__SHIFT 0x00000014
+#define TCP_CHAN_STEER_HI__CHANE_MASK 0x0f000000L
+#define TCP_CHAN_STEER_HI__CHANE__SHIFT 0x00000018
+#define TCP_CHAN_STEER_HI__CHANF_MASK 0xf0000000L
+#define TCP_CHAN_STEER_HI__CHANF__SHIFT 0x0000001c
+#define TCP_CHAN_STEER_LO__CHAN0_MASK 0x0000000fL
+#define TCP_CHAN_STEER_LO__CHAN0__SHIFT 0x00000000
+#define TCP_CHAN_STEER_LO__CHAN1_MASK 0x000000f0L
+#define TCP_CHAN_STEER_LO__CHAN1__SHIFT 0x00000004
+#define TCP_CHAN_STEER_LO__CHAN2_MASK 0x00000f00L
+#define TCP_CHAN_STEER_LO__CHAN2__SHIFT 0x00000008
+#define TCP_CHAN_STEER_LO__CHAN3_MASK 0x0000f000L
+#define TCP_CHAN_STEER_LO__CHAN3__SHIFT 0x0000000c
+#define TCP_CHAN_STEER_LO__CHAN4_MASK 0x000f0000L
+#define TCP_CHAN_STEER_LO__CHAN4__SHIFT 0x00000010
+#define TCP_CHAN_STEER_LO__CHAN5_MASK 0x00f00000L
+#define TCP_CHAN_STEER_LO__CHAN5__SHIFT 0x00000014
+#define TCP_CHAN_STEER_LO__CHAN6_MASK 0x0f000000L
+#define TCP_CHAN_STEER_LO__CHAN6__SHIFT 0x00000018
+#define TCP_CHAN_STEER_LO__CHAN7_MASK 0xf0000000L
+#define TCP_CHAN_STEER_LO__CHAN7__SHIFT 0x0000001c
+#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000L
+#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x0000001c
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x00000020L
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x00000005
+#define TCP_CNTL__FLAT_BUF_HASH_ENABLE_MASK 0x00000010L
+#define TCP_CNTL__FLAT_BUF_HASH_ENABLE__SHIFT 0x00000004
+#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT_MASK 0x0fc00000L
+#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT__SHIFT 0x00000016
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x001f8000L
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0x0000000f
+#define TCP_CNTL__FORCE_HIT_MASK 0x00000001L
+#define TCP_CNTL__FORCE_HIT__SHIFT 0x00000000
+#define TCP_CNTL__FORCE_MISS_MASK 0x00000002L
+#define TCP_CNTL__FORCE_MISS__SHIFT 0x00000001
+#define TCP_CNTL__INV_ALL_VMIDS_MASK 0x20000000L
+#define TCP_CNTL__INV_ALL_VMIDS__SHIFT 0x0000001d
+#define TCP_CNTL__L1_SIZE_MASK 0x0000000cL
+#define TCP_CNTL__L1_SIZE__SHIFT 0x00000002
+#define TCP_CREDIT__LFIFO_CREDIT_MASK 0x000003ffL
+#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x00000000
+#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x007f0000L
+#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x00000010
+#define TCP_CREDIT__TD_CREDIT_MASK 0xe0000000L
+#define TCP_CREDIT__TD_CREDIT__SHIFT 0x0000001d
+#define TCP_EDC_COUNTER__DED_COUNT_MASK 0x000f0000L
+#define TCP_EDC_COUNTER__DED_COUNT__SHIFT 0x00000010
+#define TCP_EDC_COUNTER__SEC_COUNT_MASK 0x0000000fL
+#define TCP_EDC_COUNTER__SEC_COUNT__SHIFT 0x00000000
+#define TCP_INVALIDATE__START_MASK 0x00000001L
+#define TCP_INVALIDATE__START__SHIFT 0x00000000
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TCP_STATUS__TCP_BUSY_MASK 0x00000001L
+#define TCP_STATUS__TCP_BUSY__SHIFT 0x00000000
+#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000ff0L
+#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x00000004
+#define TD_CGTT_CTRL__ON_DELAY_MASK 0x0000000fL
+#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x00000000
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x0000001f
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x0000001e
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x0000001d
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x0000001c
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x0000001b
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x0000001a
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x00000019
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x00000018
+#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x00100000L
+#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x00000014
+#define TD_CNTL__EXTEND_LDS_STALL_MASK 0x00000600L
+#define TD_CNTL__EXTEND_LDS_STALL__SHIFT 0x00000009
+#define TD_CNTL__GATHER4_DX9_MODE_MASK 0x00080000L
+#define TD_CNTL__GATHER4_DX9_MODE__SHIFT 0x00000013
+#define TD_CNTL__GATHER4_FLOAT_MODE_MASK 0x00010000L
+#define TD_CNTL__GATHER4_FLOAT_MODE__SHIFT 0x00000010
+#define TD_CNTL__LD_FLOAT_MODE_MASK 0x00040000L
+#define TD_CNTL__LD_FLOAT_MODE__SHIFT 0x00000012
+#define TD_CNTL__LDS_STALL_PHASE_ADJUST_MASK 0x00001800L
+#define TD_CNTL__LDS_STALL_PHASE_ADJUST__SHIFT 0x0000000b
+#define TD_CNTL__PAD_STALL_EN_MASK 0x00000100L
+#define TD_CNTL__PAD_STALL_EN__SHIFT 0x00000008
+#define TD_CNTL__PRECISION_COMPATIBILITY_MASK 0x00008000L
+#define TD_CNTL__PRECISION_COMPATIBILITY__SHIFT 0x0000000f
+#define TD_CNTL__SYNC_PHASE_SH_MASK 0x00000003L
+#define TD_CNTL__SYNC_PHASE_SH__SHIFT 0x00000000
+#define TD_CNTL__SYNC_PHASE_VC_SMX_MASK 0x00000030L
+#define TD_CNTL__SYNC_PHASE_VC_SMX__SHIFT 0x00000004
+#define TD_DEBUG_DATA__DATA_MASK 0x00ffffffL
+#define TD_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define TD_DEBUG_INDEX__INDEX_MASK 0x0000001fL
+#define TD_DEBUG_INDEX__INDEX__SHIFT 0x00000000
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000ffL
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003fc00L
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003fc00L
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define TD_SCRATCH__SCRATCH_MASK 0xffffffffL
+#define TD_SCRATCH__SCRATCH__SHIFT 0x00000000
+#define TD_STATUS__BUSY_MASK 0x80000000L
+#define TD_STATUS__BUSY__SHIFT 0x0000001f
+#define USER_SQC_BANK_DISABLE__SQC0_BANK_DISABLE_MASK 0x000f0000L
+#define USER_SQC_BANK_DISABLE__SQC0_BANK_DISABLE__SHIFT 0x00000010
+#define USER_SQC_BANK_DISABLE__SQC1_BANK_DISABLE_MASK 0x00f00000L
+#define USER_SQC_BANK_DISABLE__SQC1_BANK_DISABLE__SHIFT 0x00000014
+#define USER_SQC_BANK_DISABLE__SQC2_BANK_DISABLE_MASK 0x0f000000L
+#define USER_SQC_BANK_DISABLE__SQC2_BANK_DISABLE__SHIFT 0x00000018
+#define USER_SQC_BANK_DISABLE__SQC3_BANK_DISABLE_MASK 0xf0000000L
+#define USER_SQC_BANK_DISABLE__SQC3_BANK_DISABLE__SHIFT 0x0000001c
+#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN_MASK 0x000000c0L
+#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT 0x00000006
+#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION_MASK 0x00000003L
+#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT 0x00000000
+#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD_MASK 0x00000800L
+#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD__SHIFT 0x0000000b
+#define VGT_CACHE_INVALIDATION__ES_LIMIT_MASK 0x001f0000L
+#define VGT_CACHE_INVALIDATION__ES_LIMIT__SHIFT 0x00000010
+#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN_MASK 0x00001000L
+#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN__SHIFT 0x0000000c
+#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH_MASK 0x00002000L
+#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH__SHIFT 0x0000000d
+#define VGT_CACHE_INVALIDATION__USE_GS_DONE_MASK 0x00000200L
+#define VGT_CACHE_INVALIDATION__USE_GS_DONE__SHIFT 0x00000009
+#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER_MASK 0x00000020L
+#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER__SHIFT 0x00000005
+#define VGT_CNTL_STATUS__VGT_BUSY_MASK 0x00000001L
+#define VGT_CNTL_STATUS__VGT_BUSY__SHIFT 0x00000000
+#define VGT_CNTL_STATUS__VGT_GS_BUSY_MASK 0x00000080L
+#define VGT_CNTL_STATUS__VGT_GS_BUSY__SHIFT 0x00000007
+#define VGT_CNTL_STATUS__VGT_HS_BUSY_MASK 0x00000100L
+#define VGT_CNTL_STATUS__VGT_HS_BUSY__SHIFT 0x00000008
+#define VGT_CNTL_STATUS__VGT_OUT_BUSY_MASK 0x00000004L
+#define VGT_CNTL_STATUS__VGT_OUT_BUSY__SHIFT 0x00000002
+#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY_MASK 0x00000002L
+#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY__SHIFT 0x00000001
+#define VGT_CNTL_STATUS__VGT_PI_BUSY_MASK 0x00000040L
+#define VGT_CNTL_STATUS__VGT_PI_BUSY__SHIFT 0x00000006
+#define VGT_CNTL_STATUS__VGT_PT_BUSY_MASK 0x00000008L
+#define VGT_CNTL_STATUS__VGT_PT_BUSY__SHIFT 0x00000003
+#define VGT_CNTL_STATUS__VGT_TE11_BUSY_MASK 0x00000200L
+#define VGT_CNTL_STATUS__VGT_TE11_BUSY__SHIFT 0x00000009
+#define VGT_CNTL_STATUS__VGT_TE_BUSY_MASK 0x00000010L
+#define VGT_CNTL_STATUS__VGT_TE_BUSY__SHIFT 0x00000004
+#define VGT_CNTL_STATUS__VGT_VR_BUSY_MASK 0x00000020L
+#define VGT_CNTL_STATUS__VGT_VR_BUSY__SHIFT 0x00000005
+#define VGT_DEBUG_CNTL__VGT_DEBUG_INDX_MASK 0x0000003fL
+#define VGT_DEBUG_CNTL__VGT_DEBUG_INDX__SHIFT 0x00000000
+#define VGT_DEBUG_CNTL__VGT_DEBUG_SEL_BUS_B_MASK 0x00000040L
+#define VGT_DEBUG_CNTL__VGT_DEBUG_SEL_BUS_B__SHIFT 0x00000006
+#define VGT_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define VGT_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define VGT_DEBUG_REG0__cm_busy_MASK 0x00008000L
+#define VGT_DEBUG_REG0__cm_busy__SHIFT 0x0000000f
+#define VGT_DEBUG_REG0__combined_out_busy_MASK 0x00200000L
+#define VGT_DEBUG_REG0__combined_out_busy__SHIFT 0x00000015
+#define VGT_DEBUG_REG0__core_clk_busy_MASK 0x04000000L
+#define VGT_DEBUG_REG0__core_clk_busy__SHIFT 0x0000001a
+#define VGT_DEBUG_REG0__frmt_busy_MASK 0x00020000L
+#define VGT_DEBUG_REG0__frmt_busy__SHIFT 0x00000011
+#define VGT_DEBUG_REG0__gog_busy_MASK 0x00010000L
+#define VGT_DEBUG_REG0__gog_busy__SHIFT 0x00000010
+#define VGT_DEBUG_REG0__gs_busy_MASK 0x00001000L
+#define VGT_DEBUG_REG0__gs_busy__SHIFT 0x0000000c
+#define VGT_DEBUG_REG0__gs_clk_busy_MASK 0x08000000L
+#define VGT_DEBUG_REG0__gs_clk_busy__SHIFT 0x0000001b
+#define VGT_DEBUG_REG0__pa_interfaces_busy_MASK 0x00800000L
+#define VGT_DEBUG_REG0__pa_interfaces_busy__SHIFT 0x00000017
+#define VGT_DEBUG_REG0__pi_busy_MASK 0x00000100L
+#define VGT_DEBUG_REG0__pi_busy__SHIFT 0x00000008
+#define VGT_DEBUG_REG0__pt_pi_busy_MASK 0x00000400L
+#define VGT_DEBUG_REG0__pt_pi_busy__SHIFT 0x0000000a
+#define VGT_DEBUG_REG0__rcm_busy_MASK 0x00002000L
+#define VGT_DEBUG_REG0__rcm_busy__SHIFT 0x0000000d
+#define VGT_DEBUG_REG0__reg_clk_busy_MASK 0x01000000L
+#define VGT_DEBUG_REG0__reg_clk_busy__SHIFT 0x00000018
+#define VGT_DEBUG_REG0__sclk_core_vld_MASK 0x20000000L
+#define VGT_DEBUG_REG0__sclk_core_vld__SHIFT 0x0000001d
+#define VGT_DEBUG_REG0__sclk_gs_vld_MASK 0x40000000L
+#define VGT_DEBUG_REG0__sclk_gs_vld__SHIFT 0x0000001e
+#define VGT_DEBUG_REG0__SPARE0_MASK 0x80000000L
+#define VGT_DEBUG_REG0__SPARE0__SHIFT 0x0000001f
+#define VGT_DEBUG_REG0__SPARE10_MASK 0x00040000L
+#define VGT_DEBUG_REG0__SPARE10__SHIFT 0x00000012
+#define VGT_DEBUG_REG0__SPARE1_MASK 0x10000000L
+#define VGT_DEBUG_REG0__SPARE1__SHIFT 0x0000001c
+#define VGT_DEBUG_REG0__SPARE2_MASK 0x02000000L
+#define VGT_DEBUG_REG0__SPARE2__SHIFT 0x00000019
+#define VGT_DEBUG_REG0__SPARE3_MASK 0x00100000L
+#define VGT_DEBUG_REG0__SPARE3__SHIFT 0x00000014
+#define VGT_DEBUG_REG0__SPARE4_MASK 0x00000080L
+#define VGT_DEBUG_REG0__SPARE4__SHIFT 0x00000007
+#define VGT_DEBUG_REG0__SPARE5_MASK 0x00000040L
+#define VGT_DEBUG_REG0__SPARE5__SHIFT 0x00000006
+#define VGT_DEBUG_REG0__SPARE6_MASK 0x00000020L
+#define VGT_DEBUG_REG0__SPARE6__SHIFT 0x00000005
+#define VGT_DEBUG_REG0__SPARE7_MASK 0x00000010L
+#define VGT_DEBUG_REG0__SPARE7__SHIFT 0x00000004
+#define VGT_DEBUG_REG0__SPARE8_MASK 0x00000008L
+#define VGT_DEBUG_REG0__SPARE8__SHIFT 0x00000003
+#define VGT_DEBUG_REG0__SPARE9_MASK 0x00000002L
+#define VGT_DEBUG_REG0__SPARE9__SHIFT 0x00000001
+#define VGT_DEBUG_REG0__spi_vs_interfaces_busy_MASK 0x00400000L
+#define VGT_DEBUG_REG0__spi_vs_interfaces_busy__SHIFT 0x00000016
+#define VGT_DEBUG_REG0__te11_pi_busy_MASK 0x00080000L
+#define VGT_DEBUG_REG0__te11_pi_busy__SHIFT 0x00000013
+#define VGT_DEBUG_REG0__te_pi_busy_MASK 0x00000800L
+#define VGT_DEBUG_REG0__te_pi_busy__SHIFT 0x0000000b
+#define VGT_DEBUG_REG0__tm_busy_MASK 0x00004000L
+#define VGT_DEBUG_REG0__tm_busy__SHIFT 0x0000000e
+#define VGT_DEBUG_REG0__vgt_busy_extended_MASK 0x00000001L
+#define VGT_DEBUG_REG0__vgt_busy_extended__SHIFT 0x00000000
+#define VGT_DEBUG_REG0__vgt_busy_MASK 0x00000004L
+#define VGT_DEBUG_REG0__vgt_busy__SHIFT 0x00000002
+#define VGT_DEBUG_REG0__vr_pi_busy_MASK 0x00000200L
+#define VGT_DEBUG_REG0__vr_pi_busy__SHIFT 0x00000009
+#define VGT_DEBUG_REG10__eopg_r2_q_MASK 0x00000020L
+#define VGT_DEBUG_REG10__eopg_r2_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG10__eotg_r2_q_MASK 0x00000040L
+#define VGT_DEBUG_REG10__eotg_r2_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG10__es_rb_space_avail_r2_q_8_0_MASK 0xff800000L
+#define VGT_DEBUG_REG10__es_rb_space_avail_r2_q_8_0__SHIFT 0x00000017
+#define VGT_DEBUG_REG10__gs_rb_space_avail_r3_q_9_0_MASK 0x007fe000L
+#define VGT_DEBUG_REG10__gs_rb_space_avail_r3_q_9_0__SHIFT 0x0000000d
+#define VGT_DEBUG_REG10__index_buffer_depth_r1_q_MASK 0x0000001fL
+#define VGT_DEBUG_REG10__index_buffer_depth_r1_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG10__onchip_gs_en_r0_q_MASK 0x00000180L
+#define VGT_DEBUG_REG10__onchip_gs_en_r0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_q_MASK 0x00001000L
+#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_qq_MASK 0x00000800L
+#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_qq__SHIFT 0x0000000b
+#define VGT_DEBUG_REG10__rcm_mem_gsprim_re_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG10__SPARE2_MASK 0x00000600L
+#define VGT_DEBUG_REG10__SPARE2__SHIFT 0x00000009
+#define VGT_DEBUG_REG11__counters_available_r0_MASK 0x00001000L
+#define VGT_DEBUG_REG11__counters_available_r0__SHIFT 0x0000000c
+#define VGT_DEBUG_REG11__counters_avail_r0_MASK 0x00000800L
+#define VGT_DEBUG_REG11__counters_avail_r0__SHIFT 0x0000000b
+#define VGT_DEBUG_REG11__counters_busy_r0_MASK 0x00000400L
+#define VGT_DEBUG_REG11__counters_busy_r0__SHIFT 0x0000000a
+#define VGT_DEBUG_REG11__es_r0_rtr_MASK 0x00100000L
+#define VGT_DEBUG_REG11__es_r0_rtr__SHIFT 0x00000014
+#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_busy_MASK 0x00000008L
+#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_busy__SHIFT 0x00000003
+#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_full_MASK 0x08000000L
+#define VGT_DEBUG_REG11__es_rb_dealloc_fifo_full__SHIFT 0x0000001b
+#define VGT_DEBUG_REG11__es_rb_roll_over_r3_MASK 0x00000200L
+#define VGT_DEBUG_REG11__es_rb_roll_over_r3__SHIFT 0x00000009
+#define VGT_DEBUG_REG11__es_tbl_empty_MASK 0x40000000L
+#define VGT_DEBUG_REG11__es_tbl_empty__SHIFT 0x0000001e
+#define VGT_DEBUG_REG11__gog_tm_vs_event_rtr_MASK 0x00200000L
+#define VGT_DEBUG_REG11__gog_tm_vs_event_rtr__SHIFT 0x00000015
+#define VGT_DEBUG_REG11__gs_issue_rtr_MASK 0x00010000L
+#define VGT_DEBUG_REG11__gs_issue_rtr__SHIFT 0x00000010
+#define VGT_DEBUG_REG11__gs_r0_rtr_MASK 0x00080000L
+#define VGT_DEBUG_REG11__gs_r0_rtr__SHIFT 0x00000013
+#define VGT_DEBUG_REG11__hold_eswave_MASK 0x00000100L
+#define VGT_DEBUG_REG11__hold_eswave__SHIFT 0x00000008
+#define VGT_DEBUG_REG11__no_active_states_r0_MASK 0x80000000L
+#define VGT_DEBUG_REG11__no_active_states_r0__SHIFT 0x0000001f
+#define VGT_DEBUG_REG11__send_event_q_MASK 0x20000000L
+#define VGT_DEBUG_REG11__send_event_q__SHIFT 0x0000001d
+#define VGT_DEBUG_REG11__SPARE0_MASK 0x00040000L
+#define VGT_DEBUG_REG11__SPARE0__SHIFT 0x00000012
+#define VGT_DEBUG_REG11__SPARE1_MASK 0x00000020L
+#define VGT_DEBUG_REG11__SPARE1__SHIFT 0x00000005
+#define VGT_DEBUG_REG11__spi_esthread_fifo_busy_MASK 0x00000080L
+#define VGT_DEBUG_REG11__spi_esthread_fifo_busy__SHIFT 0x00000007
+#define VGT_DEBUG_REG11__spi_gsthread_fifo_busy_MASK 0x00000040L
+#define VGT_DEBUG_REG11__spi_gsthread_fifo_busy__SHIFT 0x00000006
+#define VGT_DEBUG_REG11__tm_busy_q_MASK 0x00000001L
+#define VGT_DEBUG_REG11__tm_busy_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG11__tm_noif_busy_q_MASK 0x00000002L
+#define VGT_DEBUG_REG11__tm_noif_busy_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG11__tm_out_busy_q_MASK 0x00000004L
+#define VGT_DEBUG_REG11__tm_out_busy_q__SHIFT 0x00000002
+#define VGT_DEBUG_REG11__tm_pt_event_rtr_MASK 0x00020000L
+#define VGT_DEBUG_REG11__tm_pt_event_rtr__SHIFT 0x00000011
+#define VGT_DEBUG_REG11__tm_rcm_es_tbl_rtr_MASK 0x01000000L
+#define VGT_DEBUG_REG11__tm_rcm_es_tbl_rtr__SHIFT 0x00000018
+#define VGT_DEBUG_REG11__tm_rcm_gs_event_rtr_MASK 0x00400000L
+#define VGT_DEBUG_REG11__tm_rcm_gs_event_rtr__SHIFT 0x00000016
+#define VGT_DEBUG_REG11__tm_rcm_gs_tbl_rtr_MASK 0x00800000L
+#define VGT_DEBUG_REG11__tm_rcm_gs_tbl_rtr__SHIFT 0x00000017
+#define VGT_DEBUG_REG11__VGT_SPI_esthread_rtr_q_MASK 0x00008000L
+#define VGT_DEBUG_REG11__VGT_SPI_esthread_rtr_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG11__VGT_SPI_gsthread_rtr_q_MASK 0x00004000L
+#define VGT_DEBUG_REG11__VGT_SPI_gsthread_rtr_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG11__vs_dealloc_tbl_busy_MASK 0x00000010L
+#define VGT_DEBUG_REG11__vs_dealloc_tbl_busy__SHIFT 0x00000004
+#define VGT_DEBUG_REG11__vs_dealloc_tbl_full_MASK 0x10000000L
+#define VGT_DEBUG_REG11__vs_dealloc_tbl_full__SHIFT 0x0000001c
+#define VGT_DEBUG_REG11__vs_event_fifo_empty_MASK 0x02000000L
+#define VGT_DEBUG_REG11__vs_event_fifo_empty__SHIFT 0x00000019
+#define VGT_DEBUG_REG11__vs_event_fifo_full_MASK 0x04000000L
+#define VGT_DEBUG_REG11__vs_event_fifo_full__SHIFT 0x0000001a
+#define VGT_DEBUG_REG11__vs_event_fifo_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG11__vs_event_fifo_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG12__gs_state0_r0_q_MASK 0x00000007L
+#define VGT_DEBUG_REG12__gs_state0_r0_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG12__gs_state1_r0_q_MASK 0x00000038L
+#define VGT_DEBUG_REG12__gs_state1_r0_q__SHIFT 0x00000003
+#define VGT_DEBUG_REG12__gs_state2_r0_q_MASK 0x000001c0L
+#define VGT_DEBUG_REG12__gs_state2_r0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG12__gs_state3_r0_q_MASK 0x00000e00L
+#define VGT_DEBUG_REG12__gs_state3_r0_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG12__gs_state4_r0_q_MASK 0x00007000L
+#define VGT_DEBUG_REG12__gs_state4_r0_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG12__gs_state5_r0_q_MASK 0x00038000L
+#define VGT_DEBUG_REG12__gs_state5_r0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG12__gs_state6_r0_q_MASK 0x001c0000L
+#define VGT_DEBUG_REG12__gs_state6_r0_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG12__gs_state7_r0_q_MASK 0x00e00000L
+#define VGT_DEBUG_REG12__gs_state7_r0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG12__gs_state8_r0_q_MASK 0x07000000L
+#define VGT_DEBUG_REG12__gs_state8_r0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG12__gs_state9_r0_q_MASK 0x38000000L
+#define VGT_DEBUG_REG12__gs_state9_r0_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG12__hold_eswave_eop_MASK 0x40000000L
+#define VGT_DEBUG_REG12__hold_eswave_eop__SHIFT 0x0000001e
+#define VGT_DEBUG_REG12__SPARE0_MASK 0x80000000L
+#define VGT_DEBUG_REG12__SPARE0__SHIFT 0x0000001f
+#define VGT_DEBUG_REG13__active_cm_sm_r0_q_MASK 0xf8000000L
+#define VGT_DEBUG_REG13__active_cm_sm_r0_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG13__es_tbl_full_MASK 0x01000000L
+#define VGT_DEBUG_REG13__es_tbl_full__SHIFT 0x00000018
+#define VGT_DEBUG_REG13__gsfetch_done_cnt_q_not_0_MASK 0x00800000L
+#define VGT_DEBUG_REG13__gsfetch_done_cnt_q_not_0__SHIFT 0x00000017
+#define VGT_DEBUG_REG13__gsfetch_done_fifo_cnt_q_not_0_MASK 0x00400000L
+#define VGT_DEBUG_REG13__gsfetch_done_fifo_cnt_q_not_0__SHIFT 0x00000016
+#define VGT_DEBUG_REG13__gs_state10_r0_q_MASK 0x00000007L
+#define VGT_DEBUG_REG13__gs_state10_r0_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG13__gs_state11_r0_q_MASK 0x00000038L
+#define VGT_DEBUG_REG13__gs_state11_r0_q__SHIFT 0x00000003
+#define VGT_DEBUG_REG13__gs_state12_r0_q_MASK 0x000001c0L
+#define VGT_DEBUG_REG13__gs_state12_r0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG13__gs_state13_r0_q_MASK 0x00000e00L
+#define VGT_DEBUG_REG13__gs_state13_r0_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG13__gs_state14_r0_q_MASK 0x00007000L
+#define VGT_DEBUG_REG13__gs_state14_r0_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG13__gs_state15_r0_q_MASK 0x00038000L
+#define VGT_DEBUG_REG13__gs_state15_r0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG13__gs_tbl_wrptr_r0_q_3_0_MASK 0x003c0000L
+#define VGT_DEBUG_REG13__gs_tbl_wrptr_r0_q_3_0__SHIFT 0x00000012
+#define VGT_DEBUG_REG13__SPARE0_MASK 0x04000000L
+#define VGT_DEBUG_REG13__SPARE0__SHIFT 0x0000001a
+#define VGT_DEBUG_REG13__SPARE1_MASK 0x02000000L
+#define VGT_DEBUG_REG13__SPARE1__SHIFT 0x00000019
+#define VGT_DEBUG_REG14__es_flush_cnt_busy_q_MASK 0x00000400L
+#define VGT_DEBUG_REG14__es_flush_cnt_busy_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG14__gsfetch_done_fifo_full_MASK 0x00000010L
+#define VGT_DEBUG_REG14__gsfetch_done_fifo_full__SHIFT 0x00000004
+#define VGT_DEBUG_REG14__gsfetch_done_se1_cnt_q_not_0_MASK 0x20000000L
+#define VGT_DEBUG_REG14__gsfetch_done_se1_cnt_q_not_0__SHIFT 0x0000001d
+#define VGT_DEBUG_REG14__gs_rb_space_avail_r0_MASK 0x00000020L
+#define VGT_DEBUG_REG14__gs_rb_space_avail_r0__SHIFT 0x00000005
+#define VGT_DEBUG_REG14__gs_tbl_full_r0_MASK 0x00000800L
+#define VGT_DEBUG_REG14__gs_tbl_full_r0__SHIFT 0x0000000b
+#define VGT_DEBUG_REG14__se1spi_esthread_fifo_busy_MASK 0x08000000L
+#define VGT_DEBUG_REG14__se1spi_esthread_fifo_busy__SHIFT 0x0000001b
+#define VGT_DEBUG_REG14__se1spi_gsthread_fifo_busy_MASK 0x00200000L
+#define VGT_DEBUG_REG14__se1spi_gsthread_fifo_busy__SHIFT 0x00000015
+#define VGT_DEBUG_REG14__smx1_es_done_cnt_r0_q_not_0_MASK 0x04000000L
+#define VGT_DEBUG_REG14__smx1_es_done_cnt_r0_q_not_0__SHIFT 0x0000001a
+#define VGT_DEBUG_REG14__smx_es_done_cnt_r0_q_not_0_MASK 0x00000040L
+#define VGT_DEBUG_REG14__smx_es_done_cnt_r0_q_not_0__SHIFT 0x00000006
+#define VGT_DEBUG_REG14__SPARE0_MASK 0x40000000L
+#define VGT_DEBUG_REG14__SPARE0__SHIFT 0x0000001e
+#define VGT_DEBUG_REG14__SPARE1_MASK 0x10000000L
+#define VGT_DEBUG_REG14__SPARE1__SHIFT 0x0000001c
+#define VGT_DEBUG_REG14__SPARE2_MASK 0x001ff000L
+#define VGT_DEBUG_REG14__SPARE2__SHIFT 0x0000000c
+#define VGT_DEBUG_REG14__SPARE3_MASK 0x0000000fL
+#define VGT_DEBUG_REG14__SPARE3__SHIFT 0x00000000
+#define VGT_DEBUG_REG14__SPARE8_MASK 0x00000180L
+#define VGT_DEBUG_REG14__SPARE8__SHIFT 0x00000007
+#define VGT_DEBUG_REG14__SPARE_MASK 0x01c00000L
+#define VGT_DEBUG_REG14__SPARE__SHIFT 0x00000016
+#define VGT_DEBUG_REG14__VGT_SE1SPI_esthread_rtr_q_MASK 0x80000000L
+#define VGT_DEBUG_REG14__VGT_SE1SPI_esthread_rtr_q__SHIFT 0x0000001f
+#define VGT_DEBUG_REG14__VGT_SE1SPI_gsthread_rtr_q_MASK 0x02000000L
+#define VGT_DEBUG_REG14__VGT_SE1SPI_gsthread_rtr_q__SHIFT 0x00000019
+#define VGT_DEBUG_REG14__vs_done_cnt_q_not_0_MASK 0x00000200L
+#define VGT_DEBUG_REG14__vs_done_cnt_q_not_0__SHIFT 0x00000009
+#define VGT_DEBUG_REG15__active_sm_q_MASK 0x000003e0L
+#define VGT_DEBUG_REG15__active_sm_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG15__cm_busy_q_MASK 0x00000001L
+#define VGT_DEBUG_REG15__cm_busy_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG15__cntr_tbl_wrptr_q_MASK 0x000f8000L
+#define VGT_DEBUG_REG15__cntr_tbl_wrptr_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG15__counters_busy_q_MASK 0x00000002L
+#define VGT_DEBUG_REG15__counters_busy_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG15__counters_full_MASK 0x00000010L
+#define VGT_DEBUG_REG15__counters_full__SHIFT 0x00000004
+#define VGT_DEBUG_REG15__entry_rdptr_q_MASK 0x00007c00L
+#define VGT_DEBUG_REG15__entry_rdptr_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG15__gs_done_array_q_not_0_MASK 0x10000000L
+#define VGT_DEBUG_REG15__gs_done_array_q_not_0__SHIFT 0x0000001c
+#define VGT_DEBUG_REG15__output_fifo_empty_MASK 0x00000004L
+#define VGT_DEBUG_REG15__output_fifo_empty__SHIFT 0x00000002
+#define VGT_DEBUG_REG15__output_fifo_full_MASK 0x00000008L
+#define VGT_DEBUG_REG15__output_fifo_full__SHIFT 0x00000003
+#define VGT_DEBUG_REG15__SPARE25_MASK 0x03f00000L
+#define VGT_DEBUG_REG15__SPARE25__SHIFT 0x00000014
+#define VGT_DEBUG_REG15__SPARE31_MASK 0xe0000000L
+#define VGT_DEBUG_REG15__SPARE31__SHIFT 0x0000001d
+#define VGT_DEBUG_REG15__st_cut_mode_q_MASK 0x0c000000L
+#define VGT_DEBUG_REG15__st_cut_mode_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG16__gog_busy_MASK 0x00000001L
+#define VGT_DEBUG_REG16__gog_busy__SHIFT 0x00000000
+#define VGT_DEBUG_REG16__gog_out_prim_state_sel_MASK 0x0e000000L
+#define VGT_DEBUG_REG16__gog_out_prim_state_sel__SHIFT 0x00000019
+#define VGT_DEBUG_REG16__gog_state_q_MASK 0x0000000eL
+#define VGT_DEBUG_REG16__gog_state_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG16__gog_tm_vs_event_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG16__gog_tm_vs_event_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG16__indx_valid_r0_q_MASK 0x00080000L
+#define VGT_DEBUG_REG16__indx_valid_r0_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG16__indx_valid_r1_q_MASK 0x00020000L
+#define VGT_DEBUG_REG16__indx_valid_r1_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG16__indx_valid_r2_q_MASK 0x00002000L
+#define VGT_DEBUG_REG16__indx_valid_r2_q__SHIFT 0x0000000d
+#define VGT_DEBUG_REG16__multiple_streams_en_r1_q_MASK 0x10000000L
+#define VGT_DEBUG_REG16__multiple_streams_en_r1_q__SHIFT 0x0000001c
+#define VGT_DEBUG_REG16__new_vs_thread_r2_MASK 0x80000000L
+#define VGT_DEBUG_REG16__new_vs_thread_r2__SHIFT 0x0000001f
+#define VGT_DEBUG_REG16__num_gs_r2_q_not_0_MASK 0x40000000L
+#define VGT_DEBUG_REG16__num_gs_r2_q_not_0__SHIFT 0x0000001e
+#define VGT_DEBUG_REG16__prim_valid_r0_q_MASK 0x00100000L
+#define VGT_DEBUG_REG16__prim_valid_r0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG16__prim_valid_r1_q_MASK 0x00010000L
+#define VGT_DEBUG_REG16__prim_valid_r1_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG16__prim_valid_r2_q_MASK 0x00004000L
+#define VGT_DEBUG_REG16__prim_valid_r2_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG16__r0_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG16__r0_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG16__r1_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG16__r1_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG16__r1_upstream_rtr_MASK 0x00000040L
+#define VGT_DEBUG_REG16__r1_upstream_rtr__SHIFT 0x00000006
+#define VGT_DEBUG_REG16__r2_indx_rtr_MASK 0x00000200L
+#define VGT_DEBUG_REG16__r2_indx_rtr__SHIFT 0x00000009
+#define VGT_DEBUG_REG16__r2_prim_rtr_MASK 0x00000100L
+#define VGT_DEBUG_REG16__r2_prim_rtr__SHIFT 0x00000008
+#define VGT_DEBUG_REG16__r2_rtr_MASK 0x00000400L
+#define VGT_DEBUG_REG16__r2_rtr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG16__r2_vs_tbl_rtr_MASK 0x00000080L
+#define VGT_DEBUG_REG16__r2_vs_tbl_rtr__SHIFT 0x00000007
+#define VGT_DEBUG_REG16__r3_force_vs_tbl_we_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG16__r3_force_vs_tbl_we_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG16__send_event_q_MASK 0x00400000L
+#define VGT_DEBUG_REG16__send_event_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG16__SPARE24_MASK 0x01800000L
+#define VGT_DEBUG_REG16__SPARE24__SHIFT 0x00000017
+#define VGT_DEBUG_REG16__valid_r0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG16__valid_r0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG16__valid_r1_q_MASK 0x00040000L
+#define VGT_DEBUG_REG16__valid_r1_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG16__valid_r2_q_MASK 0x00008000L
+#define VGT_DEBUG_REG16__valid_r2_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG16__vert_seen_since_sopg_r2_q_MASK 0x01000000L
+#define VGT_DEBUG_REG16__vert_seen_since_sopg_r2_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG16__vs_vert_count_r2_q_not_0_MASK 0x20000000L
+#define VGT_DEBUG_REG16__vs_vert_count_r2_q_not_0__SHIFT 0x0000001d
+#define VGT_DEBUG_REG17__gog_out_indx_13_0_MASK 0xfffc0000L
+#define VGT_DEBUG_REG17__gog_out_indx_13_0__SHIFT 0x00000012
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx0_5_0_MASK 0x0003f000L
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx0_5_0__SHIFT 0x0000000c
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx1_5_0_MASK 0x00000fc0L
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx1_5_0__SHIFT 0x00000006
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx2_5_0_MASK 0x0000003fL
+#define VGT_DEBUG_REG17__gog_out_prim_rel_indx2_5_0__SHIFT 0x00000000
+#define VGT_DEBUG_REG18__components_valid_r0_q_MASK 0xe0000000L
+#define VGT_DEBUG_REG18__components_valid_r0_q__SHIFT 0x0000001d
+#define VGT_DEBUG_REG18__eject_vtx_vect_r1_d_MASK 0x00800000L
+#define VGT_DEBUG_REG18__eject_vtx_vect_r1_d__SHIFT 0x00000017
+#define VGT_DEBUG_REG18__eop_r0_q_MASK 0x00400000L
+#define VGT_DEBUG_REG18__eop_r0_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG18__grp_vr_valid_MASK 0x00000001L
+#define VGT_DEBUG_REG18__grp_vr_valid__SHIFT 0x00000000
+#define VGT_DEBUG_REG18__gs_scenario_a_r0_q_MASK 0x08000000L
+#define VGT_DEBUG_REG18__gs_scenario_a_r0_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG18__gs_scenario_b_r0_q_MASK 0x10000000L
+#define VGT_DEBUG_REG18__gs_scenario_b_r0_q__SHIFT 0x0000001c
+#define VGT_DEBUG_REG18__indices_to_send_q_MASK 0x00000700L
+#define VGT_DEBUG_REG18__indices_to_send_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG18__indx0_hit_d_MASK 0x00040000L
+#define VGT_DEBUG_REG18__indx0_hit_d__SHIFT 0x00000012
+#define VGT_DEBUG_REG18__indx0_new_d_MASK 0x00002000L
+#define VGT_DEBUG_REG18__indx0_new_d__SHIFT 0x0000000d
+#define VGT_DEBUG_REG18__indx1_hit_d_MASK 0x00020000L
+#define VGT_DEBUG_REG18__indx1_hit_d__SHIFT 0x00000011
+#define VGT_DEBUG_REG18__indx1_new_d_MASK 0x00004000L
+#define VGT_DEBUG_REG18__indx1_new_d__SHIFT 0x0000000e
+#define VGT_DEBUG_REG18__indx2_hit_d_MASK 0x00010000L
+#define VGT_DEBUG_REG18__indx2_hit_d__SHIFT 0x00000010
+#define VGT_DEBUG_REG18__indx2_new_d_MASK 0x00008000L
+#define VGT_DEBUG_REG18__indx2_new_d__SHIFT 0x0000000f
+#define VGT_DEBUG_REG18__last_group_of_instance_r0_q_MASK 0x00100000L
+#define VGT_DEBUG_REG18__last_group_of_instance_r0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG18__last_indx_of_prim_MASK 0x00001000L
+#define VGT_DEBUG_REG18__last_indx_of_prim__SHIFT 0x0000000c
+#define VGT_DEBUG_REG18__null_primitive_r0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG18__null_primitive_r0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG18__out_vr_indx_read_MASK 0x00000040L
+#define VGT_DEBUG_REG18__out_vr_indx_read__SHIFT 0x00000006
+#define VGT_DEBUG_REG18__out_vr_prim_read_MASK 0x00000080L
+#define VGT_DEBUG_REG18__out_vr_prim_read__SHIFT 0x00000007
+#define VGT_DEBUG_REG18__pipe0_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG18__pipe0_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG18__pipe0_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG18__pipe0_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG18__pipe1_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG18__pipe1_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG18__pipe1_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG18__pipe1_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG18__st_vertex_reuse_off_r0_q_MASK 0x00080000L
+#define VGT_DEBUG_REG18__st_vertex_reuse_off_r0_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG18__sub_prim_type_r0_q_MASK 0x07000000L
+#define VGT_DEBUG_REG18__sub_prim_type_r0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG18__valid_indices_MASK 0x00000800L
+#define VGT_DEBUG_REG18__valid_indices__SHIFT 0x0000000b
+#define VGT_DEBUG_REG18__vr_grp_read_MASK 0x00000008L
+#define VGT_DEBUG_REG18__vr_grp_read__SHIFT 0x00000003
+#define VGT_DEBUG_REG19__buffered_prim_eject_vtx_vect_MASK 0x00080000L
+#define VGT_DEBUG_REG19__buffered_prim_eject_vtx_vect__SHIFT 0x00000013
+#define VGT_DEBUG_REG19__buffered_prim_eop_MASK 0x00040000L
+#define VGT_DEBUG_REG19__buffered_prim_eop__SHIFT 0x00000012
+#define VGT_DEBUG_REG19__buffered_prim_event_MASK 0x00010000L
+#define VGT_DEBUG_REG19__buffered_prim_event__SHIFT 0x00000010
+#define VGT_DEBUG_REG19__buffered_prim_null_primitive_MASK 0x00020000L
+#define VGT_DEBUG_REG19__buffered_prim_null_primitive__SHIFT 0x00000011
+#define VGT_DEBUG_REG19__buffered_prim_type_event_MASK 0x03f00000L
+#define VGT_DEBUG_REG19__buffered_prim_type_event__SHIFT 0x00000014
+#define VGT_DEBUG_REG19__filter_event_MASK 0x80000000L
+#define VGT_DEBUG_REG19__filter_event__SHIFT 0x0000001f
+#define VGT_DEBUG_REG19__hold_prim_MASK 0x00000800L
+#define VGT_DEBUG_REG19__hold_prim__SHIFT 0x0000000b
+#define VGT_DEBUG_REG19__new_packet_q_MASK 0x00008000L
+#define VGT_DEBUG_REG19__new_packet_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG19__null_terminate_vtx_vector_MASK 0x40000000L
+#define VGT_DEBUG_REG19__null_terminate_vtx_vector__SHIFT 0x0000001e
+#define VGT_DEBUG_REG19__num_new_unique_rel_indx_MASK 0x30000000L
+#define VGT_DEBUG_REG19__num_new_unique_rel_indx__SHIFT 0x0000001c
+#define VGT_DEBUG_REG19__pa_clipp_fifo_busy_q_MASK 0x00000020L
+#define VGT_DEBUG_REG19__pa_clipp_fifo_busy_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG19__pa_clips_fifo_busy_q_MASK 0x00000010L
+#define VGT_DEBUG_REG19__pa_clips_fifo_busy_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG19__pa_clipv_fifo_busy_q_MASK 0x00000400L
+#define VGT_DEBUG_REG19__pa_clipv_fifo_busy_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG19__prim_buffer_empty_MASK 0x00000004L
+#define VGT_DEBUG_REG19__prim_buffer_empty__SHIFT 0x00000002
+#define VGT_DEBUG_REG19__prim_buffer_full_MASK 0x00000008L
+#define VGT_DEBUG_REG19__prim_buffer_full__SHIFT 0x00000003
+#define VGT_DEBUG_REG19__separate_out_busy_q_MASK 0x00000001L
+#define VGT_DEBUG_REG19__separate_out_busy_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG19__separate_out_indx_busy_q_MASK 0x00000002L
+#define VGT_DEBUG_REG19__separate_out_indx_busy_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG19__spi_vsthread_fifo_busy_q_MASK 0x00000100L
+#define VGT_DEBUG_REG19__spi_vsthread_fifo_busy_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG19__spi_vsvert_fifo_busy_q_MASK 0x00000200L
+#define VGT_DEBUG_REG19__spi_vsvert_fifo_busy_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG19__VGT_PA_clipp_rtr_q_MASK 0x00000080L
+#define VGT_DEBUG_REG19__VGT_PA_clipp_rtr_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG19__VGT_PA_clips_rtr_q_MASK 0x00000040L
+#define VGT_DEBUG_REG19__VGT_PA_clips_rtr_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG19__VGT_PA_clipv_rtr_q_MASK 0x00004000L
+#define VGT_DEBUG_REG19__VGT_PA_clipv_rtr_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG19__VGT_SE1SPI_vsvert_rtr_q_MASK 0x08000000L
+#define VGT_DEBUG_REG19__VGT_SE1SPI_vsvert_rtr_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG19__VGT_SE1SPI_vswave_rtr_q_MASK 0x04000000L
+#define VGT_DEBUG_REG19__VGT_SE1SPI_vswave_rtr_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG19__VGT_SPI_vsthread_rtr_q_MASK 0x00001000L
+#define VGT_DEBUG_REG19__VGT_SPI_vsthread_rtr_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG19__VGT_SPI_vsvert_rtr_q_MASK 0x00002000L
+#define VGT_DEBUG_REG19__VGT_SPI_vsvert_rtr_q__SHIFT 0x0000000d
+#define VGT_DEBUG_REG1__gog_out_indx_valid_MASK 0x10000000L
+#define VGT_DEBUG_REG1__gog_out_indx_valid__SHIFT 0x0000001c
+#define VGT_DEBUG_REG1__gog_out_prim_valid_MASK 0x40000000L
+#define VGT_DEBUG_REG1__gog_out_prim_valid__SHIFT 0x0000001e
+#define VGT_DEBUG_REG1__gs_pi_read_MASK 0x08000000L
+#define VGT_DEBUG_REG1__gs_pi_read__SHIFT 0x0000001b
+#define VGT_DEBUG_REG1__out_indx_read_MASK 0x20000000L
+#define VGT_DEBUG_REG1__out_indx_read__SHIFT 0x0000001d
+#define VGT_DEBUG_REG1__out_prim_read_MASK 0x80000000L
+#define VGT_DEBUG_REG1__out_prim_read__SHIFT 0x0000001f
+#define VGT_DEBUG_REG1__pi_gs_valid_MASK 0x04000000L
+#define VGT_DEBUG_REG1__pi_gs_valid__SHIFT 0x0000001a
+#define VGT_DEBUG_REG1__pi_pt_valid_MASK 0x00001000L
+#define VGT_DEBUG_REG1__pi_pt_valid__SHIFT 0x0000000c
+#define VGT_DEBUG_REG1__pi_te_valid_MASK 0x00004000L
+#define VGT_DEBUG_REG1__pi_te_valid__SHIFT 0x0000000e
+#define VGT_DEBUG_REG1__pi_vr_valid_MASK 0x00000400L
+#define VGT_DEBUG_REG1__pi_vr_valid__SHIFT 0x0000000a
+#define VGT_DEBUG_REG1__pt_out_indx_valid_MASK 0x00100000L
+#define VGT_DEBUG_REG1__pt_out_indx_valid__SHIFT 0x00000014
+#define VGT_DEBUG_REG1__pt_out_prim_valid_MASK 0x00400000L
+#define VGT_DEBUG_REG1__pt_out_prim_valid__SHIFT 0x00000016
+#define VGT_DEBUG_REG1__pt_pi_read_MASK 0x00002000L
+#define VGT_DEBUG_REG1__pt_pi_read__SHIFT 0x0000000d
+#define VGT_DEBUG_REG1__SPARE0_MASK 0x00000200L
+#define VGT_DEBUG_REG1__SPARE0__SHIFT 0x00000009
+#define VGT_DEBUG_REG1__SPARE10_MASK 0x00200000L
+#define VGT_DEBUG_REG1__SPARE10__SHIFT 0x00000015
+#define VGT_DEBUG_REG1__SPARE11_MASK 0x00080000L
+#define VGT_DEBUG_REG1__SPARE11__SHIFT 0x00000013
+#define VGT_DEBUG_REG1__SPARE12_MASK 0x00020000L
+#define VGT_DEBUG_REG1__SPARE12__SHIFT 0x00000011
+#define VGT_DEBUG_REG1__SPARE1_MASK 0x00000100L
+#define VGT_DEBUG_REG1__SPARE1__SHIFT 0x00000008
+#define VGT_DEBUG_REG1__SPARE23_MASK 0x00800000L
+#define VGT_DEBUG_REG1__SPARE23__SHIFT 0x00000017
+#define VGT_DEBUG_REG1__SPARE25_MASK 0x02000000L
+#define VGT_DEBUG_REG1__SPARE25__SHIFT 0x00000019
+#define VGT_DEBUG_REG1__SPARE2_MASK 0x00000080L
+#define VGT_DEBUG_REG1__SPARE2__SHIFT 0x00000007
+#define VGT_DEBUG_REG1__SPARE3_MASK 0x00000040L
+#define VGT_DEBUG_REG1__SPARE3__SHIFT 0x00000006
+#define VGT_DEBUG_REG1__SPARE4_MASK 0x00000020L
+#define VGT_DEBUG_REG1__SPARE4__SHIFT 0x00000005
+#define VGT_DEBUG_REG1__SPARE5_MASK 0x00000010L
+#define VGT_DEBUG_REG1__SPARE5__SHIFT 0x00000004
+#define VGT_DEBUG_REG1__SPARE6_MASK 0x00000008L
+#define VGT_DEBUG_REG1__SPARE6__SHIFT 0x00000003
+#define VGT_DEBUG_REG1__SPARE7_MASK 0x00000004L
+#define VGT_DEBUG_REG1__SPARE7__SHIFT 0x00000002
+#define VGT_DEBUG_REG1__SPARE8_MASK 0x00000002L
+#define VGT_DEBUG_REG1__SPARE8__SHIFT 0x00000001
+#define VGT_DEBUG_REG1__SPARE9_MASK 0x00000001L
+#define VGT_DEBUG_REG1__SPARE9__SHIFT 0x00000000
+#define VGT_DEBUG_REG1__te_grp_read_MASK 0x00008000L
+#define VGT_DEBUG_REG1__te_grp_read__SHIFT 0x0000000f
+#define VGT_DEBUG_REG1__te_out_data_valid_MASK 0x01000000L
+#define VGT_DEBUG_REG1__te_out_data_valid__SHIFT 0x00000018
+#define VGT_DEBUG_REG1__vr_out_indx_valid_MASK 0x00010000L
+#define VGT_DEBUG_REG1__vr_out_indx_valid__SHIFT 0x00000010
+#define VGT_DEBUG_REG1__vr_out_prim_valid_MASK 0x00040000L
+#define VGT_DEBUG_REG1__vr_out_prim_valid__SHIFT 0x00000012
+#define VGT_DEBUG_REG1__vr_pi_read_MASK 0x00000800L
+#define VGT_DEBUG_REG1__vr_pi_read__SHIFT 0x0000000b
+#define VGT_DEBUG_REG20__alloc_counter_q_MASK 0x003c0000L
+#define VGT_DEBUG_REG20__alloc_counter_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG20__curr_dealloc_distance_q_MASK 0x1fc00000L
+#define VGT_DEBUG_REG20__curr_dealloc_distance_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG20__curr_slot_in_vtx_vect_q_not_0_MASK 0x40000000L
+#define VGT_DEBUG_REG20__curr_slot_in_vtx_vect_q_not_0__SHIFT 0x0000001e
+#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexcount_not_0_MASK 0x00010000L
+#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexcount_not_0__SHIFT 0x00000010
+#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexindex_MASK 0x0000ffffL
+#define VGT_DEBUG_REG20__dbg_VGT_SPI_vsthread_sovertexindex__SHIFT 0x00000000
+#define VGT_DEBUG_REG20__int_vtx_counter_q_not_0_MASK 0x80000000L
+#define VGT_DEBUG_REG20__int_vtx_counter_q_not_0__SHIFT 0x0000001f
+#define VGT_DEBUG_REG20__new_allocate_q_MASK 0x20000000L
+#define VGT_DEBUG_REG20__new_allocate_q__SHIFT 0x0000001d
+#define VGT_DEBUG_REG20__SPARE17_MASK 0x00020000L
+#define VGT_DEBUG_REG20__SPARE17__SHIFT 0x00000011
+#define VGT_DEBUG_REG21__buff_full_p1_MASK 0x01000000L
+#define VGT_DEBUG_REG21__buff_full_p1__SHIFT 0x00000018
+#define VGT_DEBUG_REG21__eopg_p0_q_MASK 0x40000000L
+#define VGT_DEBUG_REG21__eopg_p0_q__SHIFT 0x0000001e
+#define VGT_DEBUG_REG21__eotg_r2_q_MASK 0x04000000L
+#define VGT_DEBUG_REG21__eotg_r2_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG21__full_state_p1_q_MASK 0x00008000L
+#define VGT_DEBUG_REG21__full_state_p1_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG21__indx_count_q_not_0_MASK 0x00002000L
+#define VGT_DEBUG_REG21__indx_count_q_not_0__SHIFT 0x0000000d
+#define VGT_DEBUG_REG21__indx_side_fifo_empty_MASK 0x00000002L
+#define VGT_DEBUG_REG21__indx_side_fifo_empty__SHIFT 0x00000001
+#define VGT_DEBUG_REG21__indx_side_fifo_full_MASK 0x00000080L
+#define VGT_DEBUG_REG21__indx_side_fifo_full__SHIFT 0x00000007
+#define VGT_DEBUG_REG21__indx_side_indx_valid_MASK 0x00010000L
+#define VGT_DEBUG_REG21__indx_side_indx_valid__SHIFT 0x00000010
+#define VGT_DEBUG_REG21__interfaces_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG21__interfaces_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG21__is_event_p0_q_MASK 0x00100000L
+#define VGT_DEBUG_REG21__is_event_p0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG21__lshs_dealloc_p1_MASK 0x00200000L
+#define VGT_DEBUG_REG21__lshs_dealloc_p1__SHIFT 0x00000015
+#define VGT_DEBUG_REG21__null_r2_q_MASK 0x08000000L
+#define VGT_DEBUG_REG21__null_r2_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG21__out_indx_fifo_empty_MASK 0x00000001L
+#define VGT_DEBUG_REG21__out_indx_fifo_empty__SHIFT 0x00000000
+#define VGT_DEBUG_REG21__out_indx_fifo_full_MASK 0x00000040L
+#define VGT_DEBUG_REG21__out_indx_fifo_full__SHIFT 0x00000006
+#define VGT_DEBUG_REG21__p0_dr_MASK 0x10000000L
+#define VGT_DEBUG_REG21__p0_dr__SHIFT 0x0000001c
+#define VGT_DEBUG_REG21__p0_nobp_MASK 0x80000000L
+#define VGT_DEBUG_REG21__p0_nobp__SHIFT 0x0000001f
+#define VGT_DEBUG_REG21__p0_rtr_MASK 0x20000000L
+#define VGT_DEBUG_REG21__p0_rtr__SHIFT 0x0000001d
+#define VGT_DEBUG_REG21__pipe0_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG21__pipe0_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG21__pipe0_rtr_MASK 0x00000100L
+#define VGT_DEBUG_REG21__pipe0_rtr__SHIFT 0x00000008
+#define VGT_DEBUG_REG21__pipe1_dr_MASK 0x00000008L
+#define VGT_DEBUG_REG21__pipe1_dr__SHIFT 0x00000003
+#define VGT_DEBUG_REG21__pipe1_rtr_MASK 0x00000200L
+#define VGT_DEBUG_REG21__pipe1_rtr__SHIFT 0x00000009
+#define VGT_DEBUG_REG21__pipe2_dr_MASK 0x00000010L
+#define VGT_DEBUG_REG21__pipe2_dr__SHIFT 0x00000004
+#define VGT_DEBUG_REG21__pipe2_rtr_MASK 0x00000400L
+#define VGT_DEBUG_REG21__pipe2_rtr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG21__stateid_p0_q_MASK 0x000e0000L
+#define VGT_DEBUG_REG21__stateid_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG21__stream_id_r2_q_MASK 0x00400000L
+#define VGT_DEBUG_REG21__stream_id_r2_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG21__strmout_valid_p1_MASK 0x02000000L
+#define VGT_DEBUG_REG21__strmout_valid_p1__SHIFT 0x00000019
+#define VGT_DEBUG_REG21__vsthread_buff_empty_MASK 0x00000020L
+#define VGT_DEBUG_REG21__vsthread_buff_empty__SHIFT 0x00000005
+#define VGT_DEBUG_REG21__vsthread_buff_full_MASK 0x00000800L
+#define VGT_DEBUG_REG21__vsthread_buff_full__SHIFT 0x0000000b
+#define VGT_DEBUG_REG21__vtx_vect_counter_q_not_0_MASK 0x00800000L
+#define VGT_DEBUG_REG21__vtx_vect_counter_q_not_0__SHIFT 0x00000017
+#define VGT_DEBUG_REG21__wait_for_external_eopg_q_MASK 0x00004000L
+#define VGT_DEBUG_REG21__wait_for_external_eopg_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG22__cm_state16_MASK 0x00000003L
+#define VGT_DEBUG_REG22__cm_state16__SHIFT 0x00000000
+#define VGT_DEBUG_REG22__cm_state17_MASK 0x0000000cL
+#define VGT_DEBUG_REG22__cm_state17__SHIFT 0x00000002
+#define VGT_DEBUG_REG22__cm_state18_MASK 0x00000030L
+#define VGT_DEBUG_REG22__cm_state18__SHIFT 0x00000004
+#define VGT_DEBUG_REG22__cm_state19_MASK 0x000000c0L
+#define VGT_DEBUG_REG22__cm_state19__SHIFT 0x00000006
+#define VGT_DEBUG_REG22__cm_state20_MASK 0x00000300L
+#define VGT_DEBUG_REG22__cm_state20__SHIFT 0x00000008
+#define VGT_DEBUG_REG22__cm_state21_MASK 0x00000c00L
+#define VGT_DEBUG_REG22__cm_state21__SHIFT 0x0000000a
+#define VGT_DEBUG_REG22__cm_state22_MASK 0x00003000L
+#define VGT_DEBUG_REG22__cm_state22__SHIFT 0x0000000c
+#define VGT_DEBUG_REG22__cm_state23_MASK 0x0000c000L
+#define VGT_DEBUG_REG22__cm_state23__SHIFT 0x0000000e
+#define VGT_DEBUG_REG22__cm_state24_MASK 0x00030000L
+#define VGT_DEBUG_REG22__cm_state24__SHIFT 0x00000010
+#define VGT_DEBUG_REG22__cm_state25_MASK 0x000c0000L
+#define VGT_DEBUG_REG22__cm_state25__SHIFT 0x00000012
+#define VGT_DEBUG_REG22__cm_state26_MASK 0x00300000L
+#define VGT_DEBUG_REG22__cm_state26__SHIFT 0x00000014
+#define VGT_DEBUG_REG22__cm_state27_MASK 0x00c00000L
+#define VGT_DEBUG_REG22__cm_state27__SHIFT 0x00000016
+#define VGT_DEBUG_REG22__cm_state28_MASK 0x03000000L
+#define VGT_DEBUG_REG22__cm_state28__SHIFT 0x00000018
+#define VGT_DEBUG_REG22__cm_state29_MASK 0x0c000000L
+#define VGT_DEBUG_REG22__cm_state29__SHIFT 0x0000001a
+#define VGT_DEBUG_REG22__cm_state30_MASK 0x30000000L
+#define VGT_DEBUG_REG22__cm_state30__SHIFT 0x0000001c
+#define VGT_DEBUG_REG22__cm_state31_MASK 0xc0000000L
+#define VGT_DEBUG_REG22__cm_state31__SHIFT 0x0000001e
+#define VGT_DEBUG_REG23__frmt_busy_MASK 0x00000001L
+#define VGT_DEBUG_REG23__frmt_busy__SHIFT 0x00000000
+#define VGT_DEBUG_REG23__new_verts_r2_q_MASK 0x00018000L
+#define VGT_DEBUG_REG23__new_verts_r2_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG23__prim_dr_r2_q_MASK 0x00001000L
+#define VGT_DEBUG_REG23__prim_dr_r2_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG23__prim_fifo_empty_MASK 0x00000200L
+#define VGT_DEBUG_REG23__prim_fifo_empty__SHIFT 0x00000009
+#define VGT_DEBUG_REG23__prim_fifo_full_MASK 0x00000400L
+#define VGT_DEBUG_REG23__prim_fifo_full__SHIFT 0x0000000a
+#define VGT_DEBUG_REG23__prim_r2_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG23__prim_r2_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG23__prim_r3_rtr_MASK 0x00000008L
+#define VGT_DEBUG_REG23__prim_r3_rtr__SHIFT 0x00000003
+#define VGT_DEBUG_REG23__prim_state_sel_r2_q_MASK 0x00e00000L
+#define VGT_DEBUG_REG23__prim_state_sel_r2_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG23__rcm_frmt_prim_rtr_MASK 0x00000004L
+#define VGT_DEBUG_REG23__rcm_frmt_prim_rtr__SHIFT 0x00000002
+#define VGT_DEBUG_REG23__rcm_frmt_vert_rtr_MASK 0x00000002L
+#define VGT_DEBUG_REG23__rcm_frmt_vert_rtr__SHIFT 0x00000001
+#define VGT_DEBUG_REG23__SPARE_MASK 0xff000000L
+#define VGT_DEBUG_REG23__SPARE__SHIFT 0x00000018
+#define VGT_DEBUG_REG23__vert_dr_r0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG23__vert_dr_r0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG23__vert_dr_r1_q_MASK 0x00002000L
+#define VGT_DEBUG_REG23__vert_dr_r1_q__SHIFT 0x0000000d
+#define VGT_DEBUG_REG23__vert_dr_r2_q_MASK 0x00000800L
+#define VGT_DEBUG_REG23__vert_dr_r2_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG23__vert_r0_rtr_MASK 0x00000100L
+#define VGT_DEBUG_REG23__vert_r0_rtr__SHIFT 0x00000008
+#define VGT_DEBUG_REG23__vert_r1_rtr_MASK 0x00000080L
+#define VGT_DEBUG_REG23__vert_r1_rtr__SHIFT 0x00000007
+#define VGT_DEBUG_REG23__vert_r2_rtr_MASK 0x00000040L
+#define VGT_DEBUG_REG23__vert_r2_rtr__SHIFT 0x00000006
+#define VGT_DEBUG_REG23__vert_r3_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG23__vert_r3_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG23__verts_sent_r2_q_MASK 0x001e0000L
+#define VGT_DEBUG_REG23__verts_sent_r2_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG24__avail_es_rb_space_r0_q_23_0_MASK 0x00ffffffL
+#define VGT_DEBUG_REG24__avail_es_rb_space_r0_q_23_0__SHIFT 0x00000000
+#define VGT_DEBUG_REG24__dependent_st_cut_mode_q_MASK 0x03000000L
+#define VGT_DEBUG_REG24__dependent_st_cut_mode_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG24__SPARE31_MASK 0xfc000000L
+#define VGT_DEBUG_REG24__SPARE31__SHIFT 0x0000001a
+#define VGT_DEBUG_REG25__active_sm_r0_q_MASK 0x3c000000L
+#define VGT_DEBUG_REG25__active_sm_r0_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG25__add_gs_rb_space_r0_q_MASK 0x80000000L
+#define VGT_DEBUG_REG25__add_gs_rb_space_r0_q__SHIFT 0x0000001f
+#define VGT_DEBUG_REG25__add_gs_rb_space_r1_q_MASK 0x40000000L
+#define VGT_DEBUG_REG25__add_gs_rb_space_r1_q__SHIFT 0x0000001e
+#define VGT_DEBUG_REG25__avail_gs_rb_space_r0_q_25_0_MASK 0x03ffffffL
+#define VGT_DEBUG_REG25__avail_gs_rb_space_r0_q_25_0__SHIFT 0x00000000
+#define VGT_DEBUG_REG26__cm_state0_MASK 0x00000003L
+#define VGT_DEBUG_REG26__cm_state0__SHIFT 0x00000000
+#define VGT_DEBUG_REG26__cm_state10_MASK 0x00300000L
+#define VGT_DEBUG_REG26__cm_state10__SHIFT 0x00000014
+#define VGT_DEBUG_REG26__cm_state11_MASK 0x00c00000L
+#define VGT_DEBUG_REG26__cm_state11__SHIFT 0x00000016
+#define VGT_DEBUG_REG26__cm_state12_MASK 0x03000000L
+#define VGT_DEBUG_REG26__cm_state12__SHIFT 0x00000018
+#define VGT_DEBUG_REG26__cm_state13_MASK 0x0c000000L
+#define VGT_DEBUG_REG26__cm_state13__SHIFT 0x0000001a
+#define VGT_DEBUG_REG26__cm_state14_MASK 0x30000000L
+#define VGT_DEBUG_REG26__cm_state14__SHIFT 0x0000001c
+#define VGT_DEBUG_REG26__cm_state15_MASK 0xc0000000L
+#define VGT_DEBUG_REG26__cm_state15__SHIFT 0x0000001e
+#define VGT_DEBUG_REG26__cm_state1_MASK 0x0000000cL
+#define VGT_DEBUG_REG26__cm_state1__SHIFT 0x00000002
+#define VGT_DEBUG_REG26__cm_state2_MASK 0x00000030L
+#define VGT_DEBUG_REG26__cm_state2__SHIFT 0x00000004
+#define VGT_DEBUG_REG26__cm_state3_MASK 0x000000c0L
+#define VGT_DEBUG_REG26__cm_state3__SHIFT 0x00000006
+#define VGT_DEBUG_REG26__cm_state4_MASK 0x00000300L
+#define VGT_DEBUG_REG26__cm_state4__SHIFT 0x00000008
+#define VGT_DEBUG_REG26__cm_state5_MASK 0x00000c00L
+#define VGT_DEBUG_REG26__cm_state5__SHIFT 0x0000000a
+#define VGT_DEBUG_REG26__cm_state6_MASK 0x00003000L
+#define VGT_DEBUG_REG26__cm_state6__SHIFT 0x0000000c
+#define VGT_DEBUG_REG26__cm_state7_MASK 0x0000c000L
+#define VGT_DEBUG_REG26__cm_state7__SHIFT 0x0000000e
+#define VGT_DEBUG_REG26__cm_state8_MASK 0x00030000L
+#define VGT_DEBUG_REG26__cm_state8__SHIFT 0x00000010
+#define VGT_DEBUG_REG26__cm_state9_MASK 0x000c0000L
+#define VGT_DEBUG_REG26__cm_state9__SHIFT 0x00000012
+#define VGT_DEBUG_REG27__eop_p1_q_MASK 0x00000800L
+#define VGT_DEBUG_REG27__eop_p1_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG27__event_flag_p1_q_MASK 0x00000400L
+#define VGT_DEBUG_REG27__event_flag_p1_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG27__first_vsprim_of_gsprim_p0_q_MASK 0x00080000L
+#define VGT_DEBUG_REG27__first_vsprim_of_gsprim_p0_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG27__gsc0_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG27__gsc0_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG27__gsc0_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG27__gsc0_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG27__gsc_2cycle_output_MASK 0x00010000L
+#define VGT_DEBUG_REG27__gsc_2cycle_output__SHIFT 0x00000010
+#define VGT_DEBUG_REG27__gsc_2nd_cycle_p0_q_MASK 0x00020000L
+#define VGT_DEBUG_REG27__gsc_2nd_cycle_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG27__gsc_eop_p0_q_MASK 0x00008000L
+#define VGT_DEBUG_REG27__gsc_eop_p0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG27__gsc_indx_count_p0_q_MASK 0x7ff00000L
+#define VGT_DEBUG_REG27__gsc_indx_count_p0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG27__gsc_null_primitive_p0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG27__gsc_null_primitive_p0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG27__gs_out_prim_type_p0_q_MASK 0x00003000L
+#define VGT_DEBUG_REG27__gs_out_prim_type_p0_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG27__indices_to_send_p0_q_MASK 0x00000300L
+#define VGT_DEBUG_REG27__indices_to_send_p0_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG27__last_indx_of_prim_p1_q_MASK 0x00000080L
+#define VGT_DEBUG_REG27__last_indx_of_prim_p1_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG27__last_indx_of_vsprim_MASK 0x00040000L
+#define VGT_DEBUG_REG27__last_indx_of_vsprim__SHIFT 0x00000012
+#define VGT_DEBUG_REG27__last_vsprim_of_gsprim_MASK 0x80000000L
+#define VGT_DEBUG_REG27__last_vsprim_of_gsprim__SHIFT 0x0000001f
+#define VGT_DEBUG_REG27__pipe0_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG27__pipe0_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG27__pipe0_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG27__pipe0_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG27__pipe1_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG27__pipe1_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG27__pipe1_rtr_MASK 0x00000040L
+#define VGT_DEBUG_REG27__pipe1_rtr__SHIFT 0x00000006
+#define VGT_DEBUG_REG27__tm_pt_event_rtr_MASK 0x00000008L
+#define VGT_DEBUG_REG27__tm_pt_event_rtr__SHIFT 0x00000003
+#define VGT_DEBUG_REG28__advance_inner_point_p1_MASK 0x00800000L
+#define VGT_DEBUG_REG28__advance_inner_point_p1__SHIFT 0x00000017
+#define VGT_DEBUG_REG28__advance_outer_point_p1_MASK 0x00400000L
+#define VGT_DEBUG_REG28__advance_outer_point_p1__SHIFT 0x00000016
+#define VGT_DEBUG_REG28__con_state_q_MASK 0x0000000fL
+#define VGT_DEBUG_REG28__con_state_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG28__first_ring_of_patch_p0_q_MASK 0x00010000L
+#define VGT_DEBUG_REG28__first_ring_of_patch_p0_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG28__last_edge_of_outer_ring_p0_q_MASK 0x00040000L
+#define VGT_DEBUG_REG28__last_edge_of_outer_ring_p0_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG28__last_point_of_inner_ring_p1_MASK 0x00100000L
+#define VGT_DEBUG_REG28__last_point_of_inner_ring_p1__SHIFT 0x00000014
+#define VGT_DEBUG_REG28__last_point_of_outer_ring_p1_MASK 0x00080000L
+#define VGT_DEBUG_REG28__last_point_of_outer_ring_p1__SHIFT 0x00000013
+#define VGT_DEBUG_REG28__last_ring_of_patch_p0_q_MASK 0x00020000L
+#define VGT_DEBUG_REG28__last_ring_of_patch_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG28__next_ring_is_rect_p0_q_MASK 0x01000000L
+#define VGT_DEBUG_REG28__next_ring_is_rect_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG28__outer_edge_tf_eq_one_p0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG28__outer_edge_tf_eq_one_p0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG28__outer_parity_p0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG28__outer_parity_p0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG28__parallel_parity_p0_q_MASK 0x00008000L
+#define VGT_DEBUG_REG28__parallel_parity_p0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG28__pipe0_edge_dr_MASK 0x00000200L
+#define VGT_DEBUG_REG28__pipe0_edge_dr__SHIFT 0x00000009
+#define VGT_DEBUG_REG28__pipe0_edge_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG28__pipe0_edge_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG28__pipe0_patch_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG28__pipe0_patch_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG28__pipe0_patch_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG28__pipe0_patch_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG28__pipe1_dr_MASK 0x00000400L
+#define VGT_DEBUG_REG28__pipe1_dr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG28__pipe1_edge_rtr_MASK 0x40000000L
+#define VGT_DEBUG_REG28__pipe1_edge_rtr__SHIFT 0x0000001e
+#define VGT_DEBUG_REG28__pipe1_inner1_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG28__pipe1_inner1_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG28__pipe1_inner2_rtr_MASK 0x10000000L
+#define VGT_DEBUG_REG28__pipe1_inner2_rtr__SHIFT 0x0000001c
+#define VGT_DEBUG_REG28__pipe1_outer1_rtr_MASK 0x02000000L
+#define VGT_DEBUG_REG28__pipe1_outer1_rtr__SHIFT 0x00000019
+#define VGT_DEBUG_REG28__pipe1_outer2_rtr_MASK 0x04000000L
+#define VGT_DEBUG_REG28__pipe1_outer2_rtr__SHIFT 0x0000001a
+#define VGT_DEBUG_REG28__pipe1_patch_rtr_MASK 0x20000000L
+#define VGT_DEBUG_REG28__pipe1_patch_rtr__SHIFT 0x0000001d
+#define VGT_DEBUG_REG28__pipe1_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG28__pipe1_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG28__process_tri_1st_2nd_half_p0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG28__process_tri_1st_2nd_half_p0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG28__process_tri_center_poly_p0_q_MASK 0x00000080L
+#define VGT_DEBUG_REG28__process_tri_center_poly_p0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG28__process_tri_middle_p0_q_MASK 0x00000020L
+#define VGT_DEBUG_REG28__process_tri_middle_p0_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG28__second_cycle_q_MASK 0x00000010L
+#define VGT_DEBUG_REG28__second_cycle_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG28__use_stored_inner_q_ring2_MASK 0x80000000L
+#define VGT_DEBUG_REG28__use_stored_inner_q_ring2__SHIFT 0x0000001f
+#define VGT_DEBUG_REG29__advance_inner_point_p1_MASK 0x00800000L
+#define VGT_DEBUG_REG29__advance_inner_point_p1__SHIFT 0x00000017
+#define VGT_DEBUG_REG29__advance_outer_point_p1_MASK 0x00400000L
+#define VGT_DEBUG_REG29__advance_outer_point_p1__SHIFT 0x00000016
+#define VGT_DEBUG_REG29__con_state_q_MASK 0x0000000fL
+#define VGT_DEBUG_REG29__con_state_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG29__first_ring_of_patch_p0_q_MASK 0x00010000L
+#define VGT_DEBUG_REG29__first_ring_of_patch_p0_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG29__last_edge_of_outer_ring_p0_q_MASK 0x00040000L
+#define VGT_DEBUG_REG29__last_edge_of_outer_ring_p0_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG29__last_point_of_inner_ring_p1_MASK 0x00100000L
+#define VGT_DEBUG_REG29__last_point_of_inner_ring_p1__SHIFT 0x00000014
+#define VGT_DEBUG_REG29__last_point_of_outer_ring_p1_MASK 0x00080000L
+#define VGT_DEBUG_REG29__last_point_of_outer_ring_p1__SHIFT 0x00000013
+#define VGT_DEBUG_REG29__last_ring_of_patch_p0_q_MASK 0x00020000L
+#define VGT_DEBUG_REG29__last_ring_of_patch_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG29__next_ring_is_rect_p0_q_MASK 0x01000000L
+#define VGT_DEBUG_REG29__next_ring_is_rect_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG29__outer_edge_tf_eq_one_p0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG29__outer_edge_tf_eq_one_p0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG29__outer_parity_p0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG29__outer_parity_p0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG29__parallel_parity_p0_q_MASK 0x00008000L
+#define VGT_DEBUG_REG29__parallel_parity_p0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG29__pipe0_edge_dr_MASK 0x00000200L
+#define VGT_DEBUG_REG29__pipe0_edge_dr__SHIFT 0x00000009
+#define VGT_DEBUG_REG29__pipe0_edge_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG29__pipe0_edge_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG29__pipe0_patch_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG29__pipe0_patch_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG29__pipe0_patch_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG29__pipe0_patch_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG29__pipe1_dr_MASK 0x00000400L
+#define VGT_DEBUG_REG29__pipe1_dr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG29__pipe1_edge_rtr_MASK 0x40000000L
+#define VGT_DEBUG_REG29__pipe1_edge_rtr__SHIFT 0x0000001e
+#define VGT_DEBUG_REG29__pipe1_inner1_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG29__pipe1_inner1_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG29__pipe1_inner2_rtr_MASK 0x10000000L
+#define VGT_DEBUG_REG29__pipe1_inner2_rtr__SHIFT 0x0000001c
+#define VGT_DEBUG_REG29__pipe1_outer1_rtr_MASK 0x02000000L
+#define VGT_DEBUG_REG29__pipe1_outer1_rtr__SHIFT 0x00000019
+#define VGT_DEBUG_REG29__pipe1_outer2_rtr_MASK 0x04000000L
+#define VGT_DEBUG_REG29__pipe1_outer2_rtr__SHIFT 0x0000001a
+#define VGT_DEBUG_REG29__pipe1_patch_rtr_MASK 0x20000000L
+#define VGT_DEBUG_REG29__pipe1_patch_rtr__SHIFT 0x0000001d
+#define VGT_DEBUG_REG29__pipe1_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG29__pipe1_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG29__process_tri_1st_2nd_half_p0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG29__process_tri_1st_2nd_half_p0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG29__process_tri_center_poly_p0_q_MASK 0x00000080L
+#define VGT_DEBUG_REG29__process_tri_center_poly_p0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG29__process_tri_middle_p0_q_MASK 0x00000020L
+#define VGT_DEBUG_REG29__process_tri_middle_p0_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG29__second_cycle_q_MASK 0x00000010L
+#define VGT_DEBUG_REG29__second_cycle_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG29__use_stored_inner_q_ring3_MASK 0x80000000L
+#define VGT_DEBUG_REG29__use_stored_inner_q_ring3__SHIFT 0x0000001f
+#define VGT_DEBUG_REG2__grpModBusy_MASK 0x00000080L
+#define VGT_DEBUG_REG2__grpModBusy__SHIFT 0x00000007
+#define VGT_DEBUG_REG2__hs_grp_busy_MASK 0x00000001L
+#define VGT_DEBUG_REG2__hs_grp_busy__SHIFT 0x00000000
+#define VGT_DEBUG_REG2__hsInputFifoEmpty_MASK 0x00001000L
+#define VGT_DEBUG_REG2__hsInputFifoEmpty__SHIFT 0x0000000c
+#define VGT_DEBUG_REG2__hsInputFifoFull_MASK 0x00040000L
+#define VGT_DEBUG_REG2__hsInputFifoFull__SHIFT 0x00000012
+#define VGT_DEBUG_REG2__hs_noif_busy_MASK 0x00000002L
+#define VGT_DEBUG_REG2__hs_noif_busy__SHIFT 0x00000001
+#define VGT_DEBUG_REG2__hs_te11_tess_input_rts_MASK 0x00000040L
+#define VGT_DEBUG_REG2__hs_te11_tess_input_rts__SHIFT 0x00000006
+#define VGT_DEBUG_REG2__hsTifFifoEmpty_MASK 0x00002000L
+#define VGT_DEBUG_REG2__hsTifFifoEmpty__SHIFT 0x0000000d
+#define VGT_DEBUG_REG2__hsTifFifoFull_MASK 0x00080000L
+#define VGT_DEBUG_REG2__hsTifFifoFull__SHIFT 0x00000013
+#define VGT_DEBUG_REG2__hsVertFifoEmpty_MASK 0x00000400L
+#define VGT_DEBUG_REG2__hsVertFifoEmpty__SHIFT 0x0000000a
+#define VGT_DEBUG_REG2__hsVertFifoFull_MASK 0x00010000L
+#define VGT_DEBUG_REG2__hsVertFifoFull__SHIFT 0x00000010
+#define VGT_DEBUG_REG2__hsWaveFifoEmpty_MASK 0x00000800L
+#define VGT_DEBUG_REG2__hsWaveFifoEmpty__SHIFT 0x0000000b
+#define VGT_DEBUG_REG2__hsWaveFifoFull_MASK 0x00020000L
+#define VGT_DEBUG_REG2__hsWaveFifoFull__SHIFT 0x00000011
+#define VGT_DEBUG_REG2__lsFwaveFlag_MASK 0x08000000L
+#define VGT_DEBUG_REG2__lsFwaveFlag__SHIFT 0x0000001b
+#define VGT_DEBUG_REG2__ls_sh_id_MASK 0x04000000L
+#define VGT_DEBUG_REG2__ls_sh_id__SHIFT 0x0000001a
+#define VGT_DEBUG_REG2__lsVertFifoEmpty_MASK 0x00000100L
+#define VGT_DEBUG_REG2__lsVertFifoEmpty__SHIFT 0x00000008
+#define VGT_DEBUG_REG2__lsVertFifoFull_MASK 0x00004000L
+#define VGT_DEBUG_REG2__lsVertFifoFull__SHIFT 0x0000000e
+#define VGT_DEBUG_REG2__lsVertIfBusy_0_MASK 0x00000008L
+#define VGT_DEBUG_REG2__lsVertIfBusy_0__SHIFT 0x00000003
+#define VGT_DEBUG_REG2__lsWaveFifoEmpty_MASK 0x00000200L
+#define VGT_DEBUG_REG2__lsWaveFifoEmpty__SHIFT 0x00000009
+#define VGT_DEBUG_REG2__lsWaveFifoFull_MASK 0x00008000L
+#define VGT_DEBUG_REG2__lsWaveFifoFull__SHIFT 0x0000000f
+#define VGT_DEBUG_REG2__lsWaveIfBusy_0_MASK 0x00000020L
+#define VGT_DEBUG_REG2__lsWaveIfBusy_0__SHIFT 0x00000005
+#define VGT_DEBUG_REG2__lsWaveSendFlush_MASK 0x10000000L
+#define VGT_DEBUG_REG2__lsWaveSendFlush__SHIFT 0x0000001c
+#define VGT_DEBUG_REG2__p0_dr_MASK 0x00400000L
+#define VGT_DEBUG_REG2__p0_dr__SHIFT 0x00000016
+#define VGT_DEBUG_REG2__p0_rtr_MASK 0x00100000L
+#define VGT_DEBUG_REG2__p0_rtr__SHIFT 0x00000014
+#define VGT_DEBUG_REG2__p0_rts_MASK 0x01000000L
+#define VGT_DEBUG_REG2__p0_rts__SHIFT 0x00000018
+#define VGT_DEBUG_REG2__p1_dr_MASK 0x00800000L
+#define VGT_DEBUG_REG2__p1_dr__SHIFT 0x00000017
+#define VGT_DEBUG_REG2__p1_rtr_MASK 0x00200000L
+#define VGT_DEBUG_REG2__p1_rtr__SHIFT 0x00000015
+#define VGT_DEBUG_REG2__p1_rts_MASK 0x02000000L
+#define VGT_DEBUG_REG2__p1_rts__SHIFT 0x00000019
+#define VGT_DEBUG_REG2__SPARE_MASK 0xffffffffL
+#define VGT_DEBUG_REG2__SPARE__SHIFT 0x00000000
+#define VGT_DEBUG_REG2__te11_hs_tess_input_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG2__te11_hs_tess_input_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG2__tfmmIsBusy_MASK 0x00000004L
+#define VGT_DEBUG_REG2__tfmmIsBusy__SHIFT 0x00000002
+#define VGT_DEBUG_REG30__dynamic_hs_p0_q_MASK 0x01000000L
+#define VGT_DEBUG_REG30__dynamic_hs_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG30__event_or_null_p0_q_MASK 0x00000008L
+#define VGT_DEBUG_REG30__event_or_null_p0_q__SHIFT 0x00000003
+#define VGT_DEBUG_REG30__first_data_chunk_invalid_p0_q_MASK 0x08000000L
+#define VGT_DEBUG_REG30__first_data_chunk_invalid_p0_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG30__first_data_ret_of_req_p0_q_MASK 0x04000000L
+#define VGT_DEBUG_REG30__first_data_ret_of_req_p0_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG30__first_fetch_of_tg_p0_q_MASK 0x02000000L
+#define VGT_DEBUG_REG30__first_fetch_of_tg_p0_q__SHIFT 0x00000019
+#define VGT_DEBUG_REG30__last_tf_of_tg_MASK 0x00080000L
+#define VGT_DEBUG_REG30__last_tf_of_tg__SHIFT 0x00000013
+#define VGT_DEBUG_REG30__pipe0_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG30__pipe0_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG30__pipe0_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG30__pipe0_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG30__pipe0_tf_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG30__pipe0_tf_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG30__pipe1_rtr_MASK 0x00000020L
+#define VGT_DEBUG_REG30__pipe1_rtr__SHIFT 0x00000005
+#define VGT_DEBUG_REG30__pipe1_tf_rtr_MASK 0x00000040L
+#define VGT_DEBUG_REG30__pipe1_tf_rtr__SHIFT 0x00000006
+#define VGT_DEBUG_REG30__pipe2_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG30__pipe2_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG30__pipe2_rtr_MASK 0x00000080L
+#define VGT_DEBUG_REG30__pipe2_rtr__SHIFT 0x00000007
+#define VGT_DEBUG_REG30__pipe4_dr_MASK 0x40000000L
+#define VGT_DEBUG_REG30__pipe4_dr__SHIFT 0x0000001e
+#define VGT_DEBUG_REG30__pipe4_rtr_MASK 0x80000000L
+#define VGT_DEBUG_REG30__pipe4_rtr__SHIFT 0x0000001f
+#define VGT_DEBUG_REG30__tf_fetch_state_q_MASK 0x00070000L
+#define VGT_DEBUG_REG30__tf_fetch_state_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG30__tf_pointer_p0_q_MASK 0x00f00000L
+#define VGT_DEBUG_REG30__tf_pointer_p0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG30__tf_xfer_count_p2_q_MASK 0x30000000L
+#define VGT_DEBUG_REG30__tf_xfer_count_p2_q__SHIFT 0x0000001c
+#define VGT_DEBUG_REG30__ttp_patch_fifo_empty_MASK 0x00000200L
+#define VGT_DEBUG_REG30__ttp_patch_fifo_empty__SHIFT 0x00000009
+#define VGT_DEBUG_REG30__ttp_patch_fifo_full_MASK 0x00000100L
+#define VGT_DEBUG_REG30__ttp_patch_fifo_full__SHIFT 0x00000008
+#define VGT_DEBUG_REG30__ttp_tf0_fifo_empty_MASK 0x00000400L
+#define VGT_DEBUG_REG30__ttp_tf0_fifo_empty__SHIFT 0x0000000a
+#define VGT_DEBUG_REG30__ttp_tf1_fifo_empty_MASK 0x00000800L
+#define VGT_DEBUG_REG30__ttp_tf1_fifo_empty__SHIFT 0x0000000b
+#define VGT_DEBUG_REG30__ttp_tf2_fifo_empty_MASK 0x00001000L
+#define VGT_DEBUG_REG30__ttp_tf2_fifo_empty__SHIFT 0x0000000c
+#define VGT_DEBUG_REG30__ttp_tf3_fifo_empty_MASK 0x00002000L
+#define VGT_DEBUG_REG30__ttp_tf3_fifo_empty__SHIFT 0x0000000d
+#define VGT_DEBUG_REG30__ttp_tf4_fifo_empty_MASK 0x00004000L
+#define VGT_DEBUG_REG30__ttp_tf4_fifo_empty__SHIFT 0x0000000e
+#define VGT_DEBUG_REG30__ttp_tf5_fifo_empty_MASK 0x00008000L
+#define VGT_DEBUG_REG30__ttp_tf5_fifo_empty__SHIFT 0x0000000f
+#define VGT_DEBUG_REG31__inner_ring_done_q_MASK 0x80000000L
+#define VGT_DEBUG_REG31__inner_ring_done_q__SHIFT 0x0000001f
+#define VGT_DEBUG_REG31__outer_ring_done_q_MASK 0x40000000L
+#define VGT_DEBUG_REG31__outer_ring_done_q__SHIFT 0x0000001e
+#define VGT_DEBUG_REG31__pg_con_inner_point1_rts_MASK 0x00400000L
+#define VGT_DEBUG_REG31__pg_con_inner_point1_rts__SHIFT 0x00000016
+#define VGT_DEBUG_REG31__pg_con_inner_point2_rts_MASK 0x00800000L
+#define VGT_DEBUG_REG31__pg_con_inner_point2_rts__SHIFT 0x00000017
+#define VGT_DEBUG_REG31__pg_con_outer_point1_rts_MASK 0x00100000L
+#define VGT_DEBUG_REG31__pg_con_outer_point1_rts__SHIFT 0x00000014
+#define VGT_DEBUG_REG31__pg_con_outer_point2_rts_MASK 0x00200000L
+#define VGT_DEBUG_REG31__pg_con_outer_point2_rts__SHIFT 0x00000015
+#define VGT_DEBUG_REG31__pg_edge_fifo_empty_MASK 0x02000000L
+#define VGT_DEBUG_REG31__pg_edge_fifo_empty__SHIFT 0x00000019
+#define VGT_DEBUG_REG31__pg_edge_fifo_full_MASK 0x10000000L
+#define VGT_DEBUG_REG31__pg_edge_fifo_full__SHIFT 0x0000001c
+#define VGT_DEBUG_REG31__pg_inner3_perp_fifo_empty_MASK 0x04000000L
+#define VGT_DEBUG_REG31__pg_inner3_perp_fifo_empty__SHIFT 0x0000001a
+#define VGT_DEBUG_REG31__pg_inner_perp_fifo_full_MASK 0x20000000L
+#define VGT_DEBUG_REG31__pg_inner_perp_fifo_full__SHIFT 0x0000001d
+#define VGT_DEBUG_REG31__pg_patch_fifo_empty_MASK 0x01000000L
+#define VGT_DEBUG_REG31__pg_patch_fifo_empty__SHIFT 0x00000018
+#define VGT_DEBUG_REG31__pg_patch_fifo_full_MASK 0x08000000L
+#define VGT_DEBUG_REG31__pg_patch_fifo_full__SHIFT 0x0000001b
+#define VGT_DEBUG_REG31__pipe0_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG31__pipe0_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG31__pipe0_rtr_MASK 0x00000002L
+#define VGT_DEBUG_REG31__pipe0_rtr__SHIFT 0x00000001
+#define VGT_DEBUG_REG31__pipe1_inner_dr_MASK 0x00000008L
+#define VGT_DEBUG_REG31__pipe1_inner_dr__SHIFT 0x00000003
+#define VGT_DEBUG_REG31__pipe1_outer_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG31__pipe1_outer_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG31__pipe2_inner_dr_MASK 0x00000020L
+#define VGT_DEBUG_REG31__pipe2_inner_dr__SHIFT 0x00000005
+#define VGT_DEBUG_REG31__pipe2_inner_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG31__pipe2_inner_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG31__pipe2_outer_dr_MASK 0x00000010L
+#define VGT_DEBUG_REG31__pipe2_outer_dr__SHIFT 0x00000004
+#define VGT_DEBUG_REG31__pipe2_outer_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG31__pipe2_outer_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG31__pipe3_inner_dr_MASK 0x00000080L
+#define VGT_DEBUG_REG31__pipe3_inner_dr__SHIFT 0x00000007
+#define VGT_DEBUG_REG31__pipe3_inner_rtr_MASK 0x00008000L
+#define VGT_DEBUG_REG31__pipe3_inner_rtr__SHIFT 0x0000000f
+#define VGT_DEBUG_REG31__pipe3_outer_dr_MASK 0x00000040L
+#define VGT_DEBUG_REG31__pipe3_outer_dr__SHIFT 0x00000006
+#define VGT_DEBUG_REG31__pipe3_outer_rtr_MASK 0x00004000L
+#define VGT_DEBUG_REG31__pipe3_outer_rtr__SHIFT 0x0000000e
+#define VGT_DEBUG_REG31__pipe4_inner_dr_MASK 0x00000200L
+#define VGT_DEBUG_REG31__pipe4_inner_dr__SHIFT 0x00000009
+#define VGT_DEBUG_REG31__pipe4_inner_rtr_MASK 0x00020000L
+#define VGT_DEBUG_REG31__pipe4_inner_rtr__SHIFT 0x00000011
+#define VGT_DEBUG_REG31__pipe4_outer_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG31__pipe4_outer_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG31__pipe4_outer_rtr_MASK 0x00010000L
+#define VGT_DEBUG_REG31__pipe4_outer_rtr__SHIFT 0x00000010
+#define VGT_DEBUG_REG31__pipe5_inner_dr_MASK 0x00000800L
+#define VGT_DEBUG_REG31__pipe5_inner_dr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG31__pipe5_inner_rtr_MASK 0x00080000L
+#define VGT_DEBUG_REG31__pipe5_inner_rtr__SHIFT 0x00000013
+#define VGT_DEBUG_REG31__pipe5_outer_dr_MASK 0x00000400L
+#define VGT_DEBUG_REG31__pipe5_outer_dr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG31__pipe5_outer_rtr_MASK 0x00040000L
+#define VGT_DEBUG_REG31__pipe5_outer_rtr__SHIFT 0x00000012
+#define VGT_DEBUG_REG32__event_flag_p5_q_MASK 0x00000100L
+#define VGT_DEBUG_REG32__event_flag_p5_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG32__event_null_special_p0_q_MASK 0x00000080L
+#define VGT_DEBUG_REG32__event_null_special_p0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG32__fifos_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG32__fifos_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG32__first_point_of_edge_p5_q_MASK 0x00000400L
+#define VGT_DEBUG_REG32__first_point_of_edge_p5_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG32__first_point_of_patch_p5_q_MASK 0x00000200L
+#define VGT_DEBUG_REG32__first_point_of_patch_p5_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG32__first_ring_of_patch_MASK 0x00000001L
+#define VGT_DEBUG_REG32__first_ring_of_patch__SHIFT 0x00000000
+#define VGT_DEBUG_REG32__inner2_fifos_rtr_MASK 0x01000000L
+#define VGT_DEBUG_REG32__inner2_fifos_rtr__SHIFT 0x00000018
+#define VGT_DEBUG_REG32__inner_fifos_rtr_MASK 0x02000000L
+#define VGT_DEBUG_REG32__inner_fifos_rtr__SHIFT 0x00000019
+#define VGT_DEBUG_REG32__last_edge_of_inner_ring_MASK 0x00000010L
+#define VGT_DEBUG_REG32__last_edge_of_inner_ring__SHIFT 0x00000004
+#define VGT_DEBUG_REG32__last_edge_of_outer_ring_MASK 0x00000004L
+#define VGT_DEBUG_REG32__last_edge_of_outer_ring__SHIFT 0x00000002
+#define VGT_DEBUG_REG32__last_patch_of_tg_p0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG32__last_patch_of_tg_p0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG32__last_patch_of_tg_p5_q_MASK 0x00000800L
+#define VGT_DEBUG_REG32__last_patch_of_tg_p5_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG32__last_point_of_inner_edge_MASK 0x00000020L
+#define VGT_DEBUG_REG32__last_point_of_inner_edge__SHIFT 0x00000005
+#define VGT_DEBUG_REG32__last_point_of_outer_edge_MASK 0x00000008L
+#define VGT_DEBUG_REG32__last_point_of_outer_edge__SHIFT 0x00000003
+#define VGT_DEBUG_REG32__last_ring_of_patch_MASK 0x00000002L
+#define VGT_DEBUG_REG32__last_ring_of_patch__SHIFT 0x00000001
+#define VGT_DEBUG_REG32__outer_fifos_rtr_MASK 0x04000000L
+#define VGT_DEBUG_REG32__outer_fifos_rtr__SHIFT 0x0000001a
+#define VGT_DEBUG_REG32__pg_edge_fifo2_full_MASK 0x00020000L
+#define VGT_DEBUG_REG32__pg_edge_fifo2_full__SHIFT 0x00000011
+#define VGT_DEBUG_REG32__pg_edge_fifo3_full_MASK 0x00010000L
+#define VGT_DEBUG_REG32__pg_edge_fifo3_full__SHIFT 0x00000010
+#define VGT_DEBUG_REG32__pg_inner2_point_fifo_full_MASK 0x00100000L
+#define VGT_DEBUG_REG32__pg_inner2_point_fifo_full__SHIFT 0x00000014
+#define VGT_DEBUG_REG32__pg_inner3_point_fifo_full_MASK 0x00040000L
+#define VGT_DEBUG_REG32__pg_inner3_point_fifo_full__SHIFT 0x00000012
+#define VGT_DEBUG_REG32__pg_inner_point_fifo_full_MASK 0x00400000L
+#define VGT_DEBUG_REG32__pg_inner_point_fifo_full__SHIFT 0x00000016
+#define VGT_DEBUG_REG32__pg_outer2_point_fifo_full_MASK 0x00200000L
+#define VGT_DEBUG_REG32__pg_outer2_point_fifo_full__SHIFT 0x00000015
+#define VGT_DEBUG_REG32__pg_outer3_point_fifo_full_MASK 0x00080000L
+#define VGT_DEBUG_REG32__pg_outer3_point_fifo_full__SHIFT 0x00000013
+#define VGT_DEBUG_REG32__pg_outer_point_fifo_full_MASK 0x00800000L
+#define VGT_DEBUG_REG32__pg_outer_point_fifo_full__SHIFT 0x00000017
+#define VGT_DEBUG_REG32__pipe5_inner2_rtr_MASK 0x00008000L
+#define VGT_DEBUG_REG32__pipe5_inner2_rtr__SHIFT 0x0000000f
+#define VGT_DEBUG_REG32__pipe5_inner3_rtr_MASK 0x00004000L
+#define VGT_DEBUG_REG32__pipe5_inner3_rtr__SHIFT 0x0000000e
+#define VGT_DEBUG_REG32__SPARE_MASK 0x80000000L
+#define VGT_DEBUG_REG32__SPARE__SHIFT 0x0000001f
+#define VGT_DEBUG_REG32__tess_topology_p5_q_MASK 0x00003000L
+#define VGT_DEBUG_REG32__tess_topology_p5_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG33__con_prim_fifo_empty_MASK 0x00040000L
+#define VGT_DEBUG_REG33__con_prim_fifo_empty__SHIFT 0x00000012
+#define VGT_DEBUG_REG33__con_prim_fifo_full_MASK 0x00010000L
+#define VGT_DEBUG_REG33__con_prim_fifo_full__SHIFT 0x00000010
+#define VGT_DEBUG_REG33__con_ring1_busy_MASK 0x80000000L
+#define VGT_DEBUG_REG33__con_ring1_busy__SHIFT 0x0000001f
+#define VGT_DEBUG_REG33__con_ring2_busy_MASK 0x40000000L
+#define VGT_DEBUG_REG33__con_ring2_busy__SHIFT 0x0000001e
+#define VGT_DEBUG_REG33__con_ring3_busy_MASK 0x20000000L
+#define VGT_DEBUG_REG33__con_ring3_busy__SHIFT 0x0000001d
+#define VGT_DEBUG_REG33__con_vert_fifo_empty_MASK 0x00080000L
+#define VGT_DEBUG_REG33__con_vert_fifo_empty__SHIFT 0x00000013
+#define VGT_DEBUG_REG33__con_vert_fifo_full_MASK 0x00020000L
+#define VGT_DEBUG_REG33__con_vert_fifo_full__SHIFT 0x00000011
+#define VGT_DEBUG_REG33__first_prim_of_patch_q_MASK 0x00008000L
+#define VGT_DEBUG_REG33__first_prim_of_patch_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG33__last_patch_of_tg_p0_q_MASK 0x00100000L
+#define VGT_DEBUG_REG33__last_patch_of_tg_p0_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG33__pipe0_patch_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG33__pipe0_patch_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG33__pipe0_patch_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG33__pipe0_patch_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG33__pipe1_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG33__pipe1_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG33__pipe1_patch_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG33__pipe1_patch_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG33__pipe2_dr_MASK 0x00000008L
+#define VGT_DEBUG_REG33__pipe2_dr__SHIFT 0x00000003
+#define VGT_DEBUG_REG33__pipe2_rtr_MASK 0x00000080L
+#define VGT_DEBUG_REG33__pipe2_rtr__SHIFT 0x00000007
+#define VGT_DEBUG_REG33__pipe3_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG33__pipe3_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG33__pipe3_rtr_MASK 0x00000200L
+#define VGT_DEBUG_REG33__pipe3_rtr__SHIFT 0x00000009
+#define VGT_DEBUG_REG33__ring1_in_sync_q_MASK 0x00000800L
+#define VGT_DEBUG_REG33__ring1_in_sync_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG33__ring1_pipe1_dr_MASK 0x00000040L
+#define VGT_DEBUG_REG33__ring1_pipe1_dr__SHIFT 0x00000006
+#define VGT_DEBUG_REG33__ring1_valid_p2_MASK 0x00800000L
+#define VGT_DEBUG_REG33__ring1_valid_p2__SHIFT 0x00000017
+#define VGT_DEBUG_REG33__ring2_in_sync_q_MASK 0x00000400L
+#define VGT_DEBUG_REG33__ring2_in_sync_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG33__ring2_pipe1_dr_MASK 0x00000020L
+#define VGT_DEBUG_REG33__ring2_pipe1_dr__SHIFT 0x00000005
+#define VGT_DEBUG_REG33__ring2_valid_p2_MASK 0x00400000L
+#define VGT_DEBUG_REG33__ring2_valid_p2__SHIFT 0x00000016
+#define VGT_DEBUG_REG33__ring3_in_sync_q_MASK 0x00002000L
+#define VGT_DEBUG_REG33__ring3_in_sync_q__SHIFT 0x0000000d
+#define VGT_DEBUG_REG33__ring3_pipe1_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG33__ring3_pipe1_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG33__ring3_valid_p2_MASK 0x00200000L
+#define VGT_DEBUG_REG33__ring3_valid_p2__SHIFT 0x00000015
+#define VGT_DEBUG_REG33__te11_out_vert_gs_en_MASK 0x10000000L
+#define VGT_DEBUG_REG33__te11_out_vert_gs_en__SHIFT 0x0000001c
+#define VGT_DEBUG_REG33__tess_topology_p0_q_MASK 0x0c000000L
+#define VGT_DEBUG_REG33__tess_topology_p0_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG33__tess_type_p0_q_MASK 0x03000000L
+#define VGT_DEBUG_REG33__tess_type_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG33__tm_te11_event_rtr_MASK 0x00004000L
+#define VGT_DEBUG_REG33__tm_te11_event_rtr__SHIFT 0x0000000e
+#define VGT_DEBUG_REG34__advance_inner_point_p1_MASK 0x00800000L
+#define VGT_DEBUG_REG34__advance_inner_point_p1__SHIFT 0x00000017
+#define VGT_DEBUG_REG34__advance_outer_point_p1_MASK 0x00400000L
+#define VGT_DEBUG_REG34__advance_outer_point_p1__SHIFT 0x00000016
+#define VGT_DEBUG_REG34__con_state_q_MASK 0x0000000fL
+#define VGT_DEBUG_REG34__con_state_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG34__first_ring_of_patch_p0_q_MASK 0x00010000L
+#define VGT_DEBUG_REG34__first_ring_of_patch_p0_q__SHIFT 0x00000010
+#define VGT_DEBUG_REG34__last_edge_of_outer_ring_p0_q_MASK 0x00040000L
+#define VGT_DEBUG_REG34__last_edge_of_outer_ring_p0_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG34__last_point_of_inner_ring_p1_MASK 0x00100000L
+#define VGT_DEBUG_REG34__last_point_of_inner_ring_p1__SHIFT 0x00000014
+#define VGT_DEBUG_REG34__last_point_of_outer_ring_p1_MASK 0x00080000L
+#define VGT_DEBUG_REG34__last_point_of_outer_ring_p1__SHIFT 0x00000013
+#define VGT_DEBUG_REG34__last_ring_of_patch_p0_q_MASK 0x00020000L
+#define VGT_DEBUG_REG34__last_ring_of_patch_p0_q__SHIFT 0x00000011
+#define VGT_DEBUG_REG34__next_ring_is_rect_p0_q_MASK 0x01000000L
+#define VGT_DEBUG_REG34__next_ring_is_rect_p0_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG34__outer_edge_tf_eq_one_p0_q_MASK 0x00200000L
+#define VGT_DEBUG_REG34__outer_edge_tf_eq_one_p0_q__SHIFT 0x00000015
+#define VGT_DEBUG_REG34__outer_parity_p0_q_MASK 0x00004000L
+#define VGT_DEBUG_REG34__outer_parity_p0_q__SHIFT 0x0000000e
+#define VGT_DEBUG_REG34__parallel_parity_p0_q_MASK 0x00008000L
+#define VGT_DEBUG_REG34__parallel_parity_p0_q__SHIFT 0x0000000f
+#define VGT_DEBUG_REG34__pipe0_edge_dr_MASK 0x00000200L
+#define VGT_DEBUG_REG34__pipe0_edge_dr__SHIFT 0x00000009
+#define VGT_DEBUG_REG34__pipe0_edge_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG34__pipe0_edge_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG34__pipe0_patch_dr_MASK 0x00000100L
+#define VGT_DEBUG_REG34__pipe0_patch_dr__SHIFT 0x00000008
+#define VGT_DEBUG_REG34__pipe0_patch_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG34__pipe0_patch_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG34__pipe1_dr_MASK 0x00000400L
+#define VGT_DEBUG_REG34__pipe1_dr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG34__pipe1_edge_rtr_MASK 0x40000000L
+#define VGT_DEBUG_REG34__pipe1_edge_rtr__SHIFT 0x0000001e
+#define VGT_DEBUG_REG34__pipe1_inner1_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG34__pipe1_inner1_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG34__pipe1_inner2_rtr_MASK 0x10000000L
+#define VGT_DEBUG_REG34__pipe1_inner2_rtr__SHIFT 0x0000001c
+#define VGT_DEBUG_REG34__pipe1_outer1_rtr_MASK 0x02000000L
+#define VGT_DEBUG_REG34__pipe1_outer1_rtr__SHIFT 0x00000019
+#define VGT_DEBUG_REG34__pipe1_outer2_rtr_MASK 0x04000000L
+#define VGT_DEBUG_REG34__pipe1_outer2_rtr__SHIFT 0x0000001a
+#define VGT_DEBUG_REG34__pipe1_patch_rtr_MASK 0x20000000L
+#define VGT_DEBUG_REG34__pipe1_patch_rtr__SHIFT 0x0000001d
+#define VGT_DEBUG_REG34__pipe1_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG34__pipe1_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG34__process_tri_1st_2nd_half_p0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG34__process_tri_1st_2nd_half_p0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG34__process_tri_center_poly_p0_q_MASK 0x00000080L
+#define VGT_DEBUG_REG34__process_tri_center_poly_p0_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG34__process_tri_middle_p0_q_MASK 0x00000020L
+#define VGT_DEBUG_REG34__process_tri_middle_p0_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG34__second_cycle_q_MASK 0x00000010L
+#define VGT_DEBUG_REG34__second_cycle_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG34__use_stored_inner_q_ring1_MASK 0x80000000L
+#define VGT_DEBUG_REG34__use_stored_inner_q_ring1__SHIFT 0x0000001f
+#define VGT_DEBUG_REG35__event_flag_p1_q_MASK 0x00040000L
+#define VGT_DEBUG_REG35__event_flag_p1_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG35__first_req_of_tg_p1_q_MASK 0x10000000L
+#define VGT_DEBUG_REG35__first_req_of_tg_p1_q__SHIFT 0x0000001c
+#define VGT_DEBUG_REG35__last_req_of_tg_p2_MASK 0x00000800L
+#define VGT_DEBUG_REG35__last_req_of_tg_p2__SHIFT 0x0000000b
+#define VGT_DEBUG_REG35__null_flag_p1_q_MASK 0x00080000L
+#define VGT_DEBUG_REG35__null_flag_p1_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG35__pipe0_dr_MASK 0x00000001L
+#define VGT_DEBUG_REG35__pipe0_dr__SHIFT 0x00000000
+#define VGT_DEBUG_REG35__pipe0_rtr_MASK 0x00000004L
+#define VGT_DEBUG_REG35__pipe0_rtr__SHIFT 0x00000002
+#define VGT_DEBUG_REG35__pipe1_dr_MASK 0x00000002L
+#define VGT_DEBUG_REG35__pipe1_dr__SHIFT 0x00000001
+#define VGT_DEBUG_REG35__pipe1_rtr_MASK 0x00000008L
+#define VGT_DEBUG_REG35__pipe1_rtr__SHIFT 0x00000003
+#define VGT_DEBUG_REG35__second_tf_ret_data_q_MASK 0x08000000L
+#define VGT_DEBUG_REG35__second_tf_ret_data_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG35__spi_vgt_hs_done_cnt_q_MASK 0x0003f000L
+#define VGT_DEBUG_REG35__spi_vgt_hs_done_cnt_q__SHIFT 0x0000000c
+#define VGT_DEBUG_REG35__TC_VGT_rdret_data_in_MASK 0x80000000L
+#define VGT_DEBUG_REG35__TC_VGT_rdret_data_in__SHIFT 0x0000001f
+#define VGT_DEBUG_REG35__tf_data_fifo_busy_q_MASK 0x00000040L
+#define VGT_DEBUG_REG35__tf_data_fifo_busy_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG35__tf_data_fifo_cnt_q_MASK 0x07f00000L
+#define VGT_DEBUG_REG35__tf_data_fifo_cnt_q__SHIFT 0x00000014
+#define VGT_DEBUG_REG35__tf_data_fifo_rtr_q_MASK 0x00000080L
+#define VGT_DEBUG_REG35__tf_data_fifo_rtr_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG35__tfreq_tg_fifo_empty_MASK 0x00000010L
+#define VGT_DEBUG_REG35__tfreq_tg_fifo_empty__SHIFT 0x00000004
+#define VGT_DEBUG_REG35__tfreq_tg_fifo_full_MASK 0x00000020L
+#define VGT_DEBUG_REG35__tfreq_tg_fifo_full__SHIFT 0x00000005
+#define VGT_DEBUG_REG35__tf_skid_fifo_empty_MASK 0x00000100L
+#define VGT_DEBUG_REG35__tf_skid_fifo_empty__SHIFT 0x00000008
+#define VGT_DEBUG_REG35__tf_skid_fifo_full_MASK 0x00000200L
+#define VGT_DEBUG_REG35__tf_skid_fifo_full__SHIFT 0x00000009
+#define VGT_DEBUG_REG35__VGT_TC_rdnfo_stall_out_MASK 0x40000000L
+#define VGT_DEBUG_REG35__VGT_TC_rdnfo_stall_out__SHIFT 0x0000001e
+#define VGT_DEBUG_REG35__vgt_tc_rdreq_rtr_q_MASK 0x00000400L
+#define VGT_DEBUG_REG35__vgt_tc_rdreq_rtr_q__SHIFT 0x0000000a
+#define VGT_DEBUG_REG35__VGT_TC_rdreq_send_out_MASK 0x20000000L
+#define VGT_DEBUG_REG35__VGT_TC_rdreq_send_out__SHIFT 0x0000001d
+#define VGT_DEBUG_REG3__hsWaveRelInd_MASK 0xfc000000L
+#define VGT_DEBUG_REG3__hsWaveRelInd__SHIFT 0x0000001a
+#define VGT_DEBUG_REG3__lsPatchCnt_MASK 0x03fc0000L
+#define VGT_DEBUG_REG3__lsPatchCnt__SHIFT 0x00000012
+#define VGT_DEBUG_REG3__lsTgRelInd_MASK 0x00000fffL
+#define VGT_DEBUG_REG3__lsTgRelInd__SHIFT 0x00000000
+#define VGT_DEBUG_REG3__lsWaveRelInd_MASK 0x0003f000L
+#define VGT_DEBUG_REG3__lsWaveRelInd__SHIFT 0x0000000c
+#define VGT_DEBUG_REG4__hsCpCnt_MASK 0x1f000000L
+#define VGT_DEBUG_REG4__hsCpCnt__SHIFT 0x00000018
+#define VGT_DEBUG_REG4__hsFwaveFlag_MASK 0x40000000L
+#define VGT_DEBUG_REG4__hsFwaveFlag__SHIFT 0x0000001e
+#define VGT_DEBUG_REG4__hsPatchCnt_MASK 0x000000ffL
+#define VGT_DEBUG_REG4__hsPatchCnt__SHIFT 0x00000000
+#define VGT_DEBUG_REG4__hsPrimId_15_0_MASK 0x00ffff00L
+#define VGT_DEBUG_REG4__hsPrimId_15_0__SHIFT 0x00000008
+#define VGT_DEBUG_REG4__hsWaveSendFlush_MASK 0x20000000L
+#define VGT_DEBUG_REG4__hsWaveSendFlush__SHIFT 0x0000001d
+#define VGT_DEBUG_REG4__SPARE_MASK 0xffffffffL
+#define VGT_DEBUG_REG4__SPARE__SHIFT 0x00000000
+#define VGT_DEBUG_REG5__hsVertCreditCnt_0_MASK 0x0000f800L
+#define VGT_DEBUG_REG5__hsVertCreditCnt_0__SHIFT 0x0000000b
+#define VGT_DEBUG_REG5__hsWaveCreditCnt_0_MASK 0x000000f8L
+#define VGT_DEBUG_REG5__hsWaveCreditCnt_0__SHIFT 0x00000003
+#define VGT_DEBUG_REG5__lsVertCreditCnt_0_MASK 0xf8000000L
+#define VGT_DEBUG_REG5__lsVertCreditCnt_0__SHIFT 0x0000001b
+#define VGT_DEBUG_REG5__lsWaveCreditCnt_0_MASK 0x00f80000L
+#define VGT_DEBUG_REG5__lsWaveCreditCnt_0__SHIFT 0x00000013
+#define VGT_DEBUG_REG5__SPARE1_MASK 0x07000000L
+#define VGT_DEBUG_REG5__SPARE1__SHIFT 0x00000018
+#define VGT_DEBUG_REG5__SPARE2_MASK 0x00070000L
+#define VGT_DEBUG_REG5__SPARE2__SHIFT 0x00000010
+#define VGT_DEBUG_REG5__SPARE3_MASK 0x00000700L
+#define VGT_DEBUG_REG5__SPARE3__SHIFT 0x00000008
+#define VGT_DEBUG_REG5__SPARE4_MASK 0x00000007L
+#define VGT_DEBUG_REG5__SPARE4__SHIFT 0x00000000
+#define VGT_DEBUG_REG6__debug_BASE_MASK 0x0000ffffL
+#define VGT_DEBUG_REG6__debug_BASE__SHIFT 0x00000000
+#define VGT_DEBUG_REG6__debug_SIZE_MASK 0xffff0000L
+#define VGT_DEBUG_REG6__debug_SIZE__SHIFT 0x00000010
+#define VGT_DEBUG_REG7__debug_tfmmFifoEmpty_MASK 0x00000001L
+#define VGT_DEBUG_REG7__debug_tfmmFifoEmpty__SHIFT 0x00000000
+#define VGT_DEBUG_REG7__debug_tfmmFifoFull_MASK 0x00000002L
+#define VGT_DEBUG_REG7__debug_tfmmFifoFull__SHIFT 0x00000001
+#define VGT_DEBUG_REG7__hs_pipe0_dr_MASK 0x00000004L
+#define VGT_DEBUG_REG7__hs_pipe0_dr__SHIFT 0x00000002
+#define VGT_DEBUG_REG7__hs_pipe0_rtr_MASK 0x00000008L
+#define VGT_DEBUG_REG7__hs_pipe0_rtr__SHIFT 0x00000003
+#define VGT_DEBUG_REG7__hs_pipe1_rtr_MASK 0x00000010L
+#define VGT_DEBUG_REG7__hs_pipe1_rtr__SHIFT 0x00000004
+#define VGT_DEBUG_REG7__SPARE_MASK 0x0000ffe0L
+#define VGT_DEBUG_REG7__SPARE__SHIFT 0x00000005
+#define VGT_DEBUG_REG7__TF_addr_MASK 0xffff0000L
+#define VGT_DEBUG_REG7__TF_addr__SHIFT 0x00000010
+#define VGT_DEBUG_REG8__es_gs_rtr_MASK 0x00004000L
+#define VGT_DEBUG_REG8__es_gs_rtr__SHIFT 0x0000000e
+#define VGT_DEBUG_REG8__gs_event_fifo_empty_MASK 0x02000000L
+#define VGT_DEBUG_REG8__gs_event_fifo_empty__SHIFT 0x00000019
+#define VGT_DEBUG_REG8__gs_event_fifo_rtr_MASK 0x00008000L
+#define VGT_DEBUG_REG8__gs_event_fifo_rtr__SHIFT 0x0000000f
+#define VGT_DEBUG_REG8__gsprim_buff_empty_q_MASK 0x04000000L
+#define VGT_DEBUG_REG8__gsprim_buff_empty_q__SHIFT 0x0000001a
+#define VGT_DEBUG_REG8__gsprim_buff_full_q_MASK 0x08000000L
+#define VGT_DEBUG_REG8__gsprim_buff_full_q__SHIFT 0x0000001b
+#define VGT_DEBUG_REG8__gs_tbl_r3_rtr_MASK 0x00020000L
+#define VGT_DEBUG_REG8__gs_tbl_r3_rtr__SHIFT 0x00000011
+#define VGT_DEBUG_REG8__gs_tbl_valid_r3_q_MASK 0x00000020L
+#define VGT_DEBUG_REG8__gs_tbl_valid_r3_q__SHIFT 0x00000005
+#define VGT_DEBUG_REG8__hold_for_es_flush_MASK 0x01000000L
+#define VGT_DEBUG_REG8__hold_for_es_flush__SHIFT 0x00000018
+#define VGT_DEBUG_REG8__prim_skid_fifo_empty_MASK 0x00040000L
+#define VGT_DEBUG_REG8__prim_skid_fifo_empty__SHIFT 0x00000012
+#define VGT_DEBUG_REG8__r0_rtr_MASK 0x00000400L
+#define VGT_DEBUG_REG8__r0_rtr__SHIFT 0x0000000a
+#define VGT_DEBUG_REG8__r1_inst_rtr_MASK 0x00000004L
+#define VGT_DEBUG_REG8__r1_inst_rtr__SHIFT 0x00000002
+#define VGT_DEBUG_REG8__r1_rtr_MASK 0x00000800L
+#define VGT_DEBUG_REG8__r1_rtr__SHIFT 0x0000000b
+#define VGT_DEBUG_REG8__r2_indx_rtr_MASK 0x00001000L
+#define VGT_DEBUG_REG8__r2_indx_rtr__SHIFT 0x0000000c
+#define VGT_DEBUG_REG8__r2_no_bp_rtr_MASK 0x00800000L
+#define VGT_DEBUG_REG8__r2_no_bp_rtr__SHIFT 0x00000017
+#define VGT_DEBUG_REG8__r2_rtr_MASK 0x00002000L
+#define VGT_DEBUG_REG8__r2_rtr__SHIFT 0x0000000d
+#define VGT_DEBUG_REG8__rcm_busy_q_MASK 0x00000001L
+#define VGT_DEBUG_REG8__rcm_busy_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG8__rcm_noif_busy_q_MASK 0x00000002L
+#define VGT_DEBUG_REG8__rcm_noif_busy_q__SHIFT 0x00000001
+#define VGT_DEBUG_REG8__spi_esvert_fifo_busy_q_MASK 0x00000010L
+#define VGT_DEBUG_REG8__spi_esvert_fifo_busy_q__SHIFT 0x00000004
+#define VGT_DEBUG_REG8__spi_gsprim_fifo_busy_q_MASK 0x00000008L
+#define VGT_DEBUG_REG8__spi_gsprim_fifo_busy_q__SHIFT 0x00000003
+#define VGT_DEBUG_REG8__te_prim_fifo_empty_MASK 0x10000000L
+#define VGT_DEBUG_REG8__te_prim_fifo_empty__SHIFT 0x0000001c
+#define VGT_DEBUG_REG8__te_prim_fifo_full_MASK 0x20000000L
+#define VGT_DEBUG_REG8__te_prim_fifo_full__SHIFT 0x0000001d
+#define VGT_DEBUG_REG8__te_vert_fifo_empty_MASK 0x40000000L
+#define VGT_DEBUG_REG8__te_vert_fifo_empty__SHIFT 0x0000001e
+#define VGT_DEBUG_REG8__te_vert_fifo_full_MASK 0x80000000L
+#define VGT_DEBUG_REG8__te_vert_fifo_full__SHIFT 0x0000001f
+#define VGT_DEBUG_REG8__tm_rcm_es_tbl_rtr_MASK 0x00200000L
+#define VGT_DEBUG_REG8__tm_rcm_es_tbl_rtr__SHIFT 0x00000015
+#define VGT_DEBUG_REG8__tm_rcm_gs_event_rtr_MASK 0x00010000L
+#define VGT_DEBUG_REG8__tm_rcm_gs_event_rtr__SHIFT 0x00000010
+#define VGT_DEBUG_REG8__tm_rcm_gs_tbl_rtr_MASK 0x00100000L
+#define VGT_DEBUG_REG8__tm_rcm_gs_tbl_rtr__SHIFT 0x00000014
+#define VGT_DEBUG_REG8__valid_r0_q_MASK 0x00000040L
+#define VGT_DEBUG_REG8__valid_r0_q__SHIFT 0x00000006
+#define VGT_DEBUG_REG8__valid_r1_q_MASK 0x00000080L
+#define VGT_DEBUG_REG8__valid_r1_q__SHIFT 0x00000007
+#define VGT_DEBUG_REG8__valid_r2_MASK 0x00000100L
+#define VGT_DEBUG_REG8__valid_r2_q_MASK 0x00000200L
+#define VGT_DEBUG_REG8__valid_r2_q__SHIFT 0x00000009
+#define VGT_DEBUG_REG8__valid_r2__SHIFT 0x00000008
+#define VGT_DEBUG_REG8__VGT_SPI_esvert_rtr_q_MASK 0x00400000L
+#define VGT_DEBUG_REG8__VGT_SPI_esvert_rtr_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG8__VGT_SPI_gsprim_rtr_q_MASK 0x00080000L
+#define VGT_DEBUG_REG8__VGT_SPI_gsprim_rtr_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG9__eop_indx_r3_MASK 0x00000010L
+#define VGT_DEBUG_REG9__eop_indx_r3__SHIFT 0x00000004
+#define VGT_DEBUG_REG9__eop_prim_r3_MASK 0x00000020L
+#define VGT_DEBUG_REG9__eop_prim_r3__SHIFT 0x00000005
+#define VGT_DEBUG_REG9__es_eov_r3_MASK 0x00000040L
+#define VGT_DEBUG_REG9__es_eov_r3__SHIFT 0x00000006
+#define VGT_DEBUG_REG9__es_per_gs_vert_cnt_r3_q_not_0_MASK 0x02000000L
+#define VGT_DEBUG_REG9__es_per_gs_vert_cnt_r3_q_not_0__SHIFT 0x00000019
+#define VGT_DEBUG_REG9__es_tbl_state_r3_q_0_MASK 0x00000080L
+#define VGT_DEBUG_REG9__es_tbl_state_r3_q_0__SHIFT 0x00000007
+#define VGT_DEBUG_REG9__gs_eov_r3_MASK 0x00000008L
+#define VGT_DEBUG_REG9__gs_eov_r3__SHIFT 0x00000003
+#define VGT_DEBUG_REG9__gs_instancing_state_q_MASK 0x01000000L
+#define VGT_DEBUG_REG9__gs_instancing_state_q__SHIFT 0x00000018
+#define VGT_DEBUG_REG9__gs_pending_state_r3_q_MASK 0x00400000L
+#define VGT_DEBUG_REG9__gs_pending_state_r3_q__SHIFT 0x00000016
+#define VGT_DEBUG_REG9__gs_prim_per_es_ctr_r3_q_not_0_MASK 0x04000000L
+#define VGT_DEBUG_REG9__gs_prim_per_es_ctr_r3_q_not_0__SHIFT 0x0000001a
+#define VGT_DEBUG_REG9__gs_tbl_eop_r3_q_MASK 0x00040000L
+#define VGT_DEBUG_REG9__gs_tbl_eop_r3_q__SHIFT 0x00000012
+#define VGT_DEBUG_REG9__gs_tbl_num_es_per_gs_r3_q_not_0_MASK 0x00000400L
+#define VGT_DEBUG_REG9__gs_tbl_num_es_per_gs_r3_q_not_0__SHIFT 0x0000000a
+#define VGT_DEBUG_REG9__gs_tbl_prim_cnt_r3_q_MASK 0x0003f800L
+#define VGT_DEBUG_REG9__gs_tbl_prim_cnt_r3_q__SHIFT 0x0000000b
+#define VGT_DEBUG_REG9__gs_tbl_state_r3_q_MASK 0x00380000L
+#define VGT_DEBUG_REG9__gs_tbl_state_r3_q__SHIFT 0x00000013
+#define VGT_DEBUG_REG9__indices_to_send_r2_q_MASK 0x00000003L
+#define VGT_DEBUG_REG9__indices_to_send_r2_q__SHIFT 0x00000000
+#define VGT_DEBUG_REG9__invalidate_rb_roll_over_q_MASK 0x00800000L
+#define VGT_DEBUG_REG9__invalidate_rb_roll_over_q__SHIFT 0x00000017
+#define VGT_DEBUG_REG9__off_chip_hs_r2_q_MASK 0x80000000L
+#define VGT_DEBUG_REG9__off_chip_hs_r2_q__SHIFT 0x0000001f
+#define VGT_DEBUG_REG9__pending_es_flush_r3_MASK 0x00000200L
+#define VGT_DEBUG_REG9__pending_es_flush_r3__SHIFT 0x00000009
+#define VGT_DEBUG_REG9__pending_es_send_r3_q_MASK 0x00000100L
+#define VGT_DEBUG_REG9__pending_es_send_r3_q__SHIFT 0x00000008
+#define VGT_DEBUG_REG9__pre_r0_rtr_MASK 0x08000000L
+#define VGT_DEBUG_REG9__pre_r0_rtr__SHIFT 0x0000001b
+#define VGT_DEBUG_REG9__SPARE0_MASK 0x40000000L
+#define VGT_DEBUG_REG9__SPARE0__SHIFT 0x0000001e
+#define VGT_DEBUG_REG9__valid_indices_r3_MASK 0x00000004L
+#define VGT_DEBUG_REG9__valid_indices_r3__SHIFT 0x00000002
+#define VGT_DEBUG_REG9__valid_pre_r0_q_MASK 0x20000000L
+#define VGT_DEBUG_REG9__valid_pre_r0_q__SHIFT 0x0000001d
+#define VGT_DEBUG_REG9__valid_r3_q_MASK 0x10000000L
+#define VGT_DEBUG_REG9__valid_r3_q__SHIFT 0x0000001c
+#define VGT_DMA_BASE__BASE_ADDR_MASK 0xffffffffL
+#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x00000000
+#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0x000000ffL
+#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x00000000
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x000001ffL
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x00000000
+#define VGT_DMA_INDEX_TYPE__ATC_MASK 0x00000100L
+#define VGT_DMA_INDEX_TYPE__ATC__SHIFT 0x00000008
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x00000030L
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x00000004
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x00000000
+#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x00000200L
+#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x00000009
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x000000c0L
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x00000006
+#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x00000400L
+#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0x0000000a
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0x0000000cL
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x00000002
+#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xffffffffL
+#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x00000000
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xffffffffL
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x00000000
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x0000003fL
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x00000000
+#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xffffffffL
+#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x00000000
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x0000003fL
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x00000000
+#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0x0000000cL
+#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x00000002
+#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x00000020L
+#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x00000005
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x00000003L
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x00000000
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x00000010L
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x00000004
+#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x00000040L
+#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x00000006
+#define VGT_ENHANCE__MISC_MASK 0xffffffffL
+#define VGT_ENHANCE__MISC__SHIFT 0x00000000
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007fffL
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x00000000
+#define VGT_ESGS_RING_SIZE__MEM_SIZE_MASK 0xffffffffL
+#define VGT_ESGS_RING_SIZE__MEM_SIZE__SHIFT 0x00000000
+#define VGT_ES_PER_GS__ES_PER_GS_MASK 0x000007ffL
+#define VGT_ES_PER_GS__ES_PER_GS__SHIFT 0x00000000
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0x0fffffffL
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x00000000
+#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07fc0000L
+#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0x00000012
+#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003fL
+#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x00000000
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x0000001b
+#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH_MASK 0x003fff00L
+#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH__SHIFT 0x00000008
+#define VGT_FIFO_DEPTHS__RESERVED_0_MASK 0x00000080L
+#define VGT_FIFO_DEPTHS__RESERVED_0__SHIFT 0x00000007
+#define VGT_FIFO_DEPTHS__RESERVED_1_MASK 0xffc00000L
+#define VGT_FIFO_DEPTHS__RESERVED_1__SHIFT 0x00000016
+#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH_MASK 0x0000007fL
+#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH__SHIFT 0x00000000
+#define VGT_GROUP_DECR__DECR_MASK 0x0000000fL
+#define VGT_GROUP_DECR__DECR__SHIFT 0x00000000
+#define VGT_GROUP_FIRST_DECR__FIRST_DECR_MASK 0x0000000fL
+#define VGT_GROUP_FIRST_DECR__FIRST_DECR__SHIFT 0x00000000
+#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER_MASK 0x00070000L
+#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER__SHIFT 0x00000010
+#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE_MASK 0x0000001fL
+#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE__SHIFT 0x00000000
+#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER_MASK 0x00004000L
+#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER__SHIFT 0x0000000e
+#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS_MASK 0x00008000L
+#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS__SHIFT 0x0000000f
+#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN_MASK 0x00000008L
+#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN__SHIFT 0x00000003
+#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN_MASK 0x00000001L
+#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN__SHIFT 0x00000000
+#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN_MASK 0x00000002L
+#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN__SHIFT 0x00000001
+#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN_MASK 0x00000004L
+#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN__SHIFT 0x00000002
+#define VGT_GROUP_VECT_0_CNTL__SHIFT_MASK 0x00ff0000L
+#define VGT_GROUP_VECT_0_CNTL__SHIFT__SHIFT 0x00000010
+#define VGT_GROUP_VECT_0_CNTL__STRIDE_MASK 0x0000ff00L
+#define VGT_GROUP_VECT_0_CNTL__STRIDE__SHIFT 0x00000008
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV_MASK 0x0f000000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV__SHIFT 0x00000018
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET_MASK 0xf0000000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET__SHIFT 0x0000001c
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV_MASK 0x0000000fL
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV__SHIFT 0x00000000
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET_MASK 0x000000f0L
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET__SHIFT 0x00000004
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV_MASK 0x00000f00L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV__SHIFT 0x00000008
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET_MASK 0x0000f000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET__SHIFT 0x0000000c
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV_MASK 0x000f0000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV__SHIFT 0x00000010
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET_MASK 0x00f00000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET__SHIFT 0x00000014
+#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN_MASK 0x00000008L
+#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN__SHIFT 0x00000003
+#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN_MASK 0x00000001L
+#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN__SHIFT 0x00000000
+#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN_MASK 0x00000002L
+#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN__SHIFT 0x00000001
+#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN_MASK 0x00000004L
+#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN__SHIFT 0x00000002
+#define VGT_GROUP_VECT_1_CNTL__SHIFT_MASK 0x00ff0000L
+#define VGT_GROUP_VECT_1_CNTL__SHIFT__SHIFT 0x00000010
+#define VGT_GROUP_VECT_1_CNTL__STRIDE_MASK 0x0000ff00L
+#define VGT_GROUP_VECT_1_CNTL__STRIDE__SHIFT 0x00000008
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV_MASK 0x0f000000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV__SHIFT 0x00000018
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET_MASK 0xf0000000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET__SHIFT 0x0000001c
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV_MASK 0x0000000fL
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV__SHIFT 0x00000000
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET_MASK 0x000000f0L
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET__SHIFT 0x00000004
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV_MASK 0x00000f00L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV__SHIFT 0x00000008
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET_MASK 0x0000f000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET__SHIFT 0x0000000c
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV_MASK 0x000f0000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV__SHIFT 0x00000010
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET_MASK 0x00f00000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET__SHIFT 0x00000014
+#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x000001fcL
+#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x00000002
+#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x00000001L
+#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x00000000
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x000007ffL
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x00000000
+#define VGT_GS_MODE__COMPUTE_MODE_MASK 0x00004000L
+#define VGT_GS_MODE__COMPUTE_MODE__SHIFT 0x0000000e
+#define VGT_GS_MODE__CUT_MODE_MASK 0x00000030L
+#define VGT_GS_MODE__CUT_MODE__SHIFT 0x00000004
+#define VGT_GS_MODE__ELEMENT_INFO_EN_MASK 0x00010000L
+#define VGT_GS_MODE__ELEMENT_INFO_EN__SHIFT 0x00000010
+#define VGT_GS_MODE__ES_PASSTHRU_MASK 0x00002000L
+#define VGT_GS_MODE__ES_PASSTHRU__SHIFT 0x0000000d
+#define VGT_GS_MODE__ES_WRITE_OPTIMIZE_MASK 0x00080000L
+#define VGT_GS_MODE__ES_WRITE_OPTIMIZE__SHIFT 0x00000013
+#define VGT_GS_MODE__FAST_COMPUTE_MODE_MASK 0x00008000L
+#define VGT_GS_MODE__FAST_COMPUTE_MODE__SHIFT 0x0000000f
+#define VGT_GS_MODE__GS_C_PACK_EN_MASK 0x00000800L
+#define VGT_GS_MODE__GS_C_PACK_EN__SHIFT 0x0000000b
+#define VGT_GS_MODE__GS_WRITE_OPTIMIZE_MASK 0x00100000L
+#define VGT_GS_MODE__GS_WRITE_OPTIMIZE__SHIFT 0x00000014
+#define VGT_GS_MODE__MODE_MASK 0x00000007L
+#define VGT_GS_MODE__MODE__SHIFT 0x00000000
+#define VGT_GS_MODE__ONCHIP_MASK 0x00600000L
+#define VGT_GS_MODE__ONCHIP__SHIFT 0x00000015
+#define VGT_GS_MODE__PARTIAL_THD_AT_EOI_MASK 0x00020000L
+#define VGT_GS_MODE__PARTIAL_THD_AT_EOI__SHIFT 0x00000011
+#define VGT_GS_MODE__RESERVED_0_MASK 0x00000008L
+#define VGT_GS_MODE__RESERVED_0__SHIFT 0x00000003
+#define VGT_GS_MODE__RESERVED_1_MASK 0x000007c0L
+#define VGT_GS_MODE__RESERVED_1__SHIFT 0x00000006
+#define VGT_GS_MODE__RESERVED_2_MASK 0x00001000L
+#define VGT_GS_MODE__RESERVED_2__SHIFT 0x0000000c
+#define VGT_GS_MODE__SUPPRESS_CUTS_MASK 0x00040000L
+#define VGT_GS_MODE__SUPPRESS_CUTS__SHIFT 0x00000012
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1_MASK 0x00003f00L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1__SHIFT 0x00000008
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2_MASK 0x003f0000L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2__SHIFT 0x00000010
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3_MASK 0x0fc00000L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3__SHIFT 0x00000016
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x0000003fL
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x00000000
+#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM_MASK 0x80000000L
+#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM__SHIFT 0x0000001f
+#define VGT_GS_PER_ES__GS_PER_ES_MASK 0x000007ffL
+#define VGT_GS_PER_ES__GS_PER_ES__SHIFT 0x00000000
+#define VGT_GS_PER_VS__GS_PER_VS_MASK 0x0000000fL
+#define VGT_GS_PER_VS__GS_PER_VS__SHIFT 0x00000000
+#define VGT_GS_VERTEX_REUSE__VERT_REUSE_MASK 0x0000001fL
+#define VGT_GS_VERTEX_REUSE__VERT_REUSE__SHIFT 0x00000000
+#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007fffL
+#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x00000000
+#define VGT_GSVS_RING_OFFSET_1__OFFSET_MASK 0x00007fffL
+#define VGT_GSVS_RING_OFFSET_1__OFFSET__SHIFT 0x00000000
+#define VGT_GSVS_RING_OFFSET_2__OFFSET_MASK 0x00007fffL
+#define VGT_GSVS_RING_OFFSET_2__OFFSET__SHIFT 0x00000000
+#define VGT_GSVS_RING_OFFSET_3__OFFSET_MASK 0x00007fffL
+#define VGT_GSVS_RING_OFFSET_3__OFFSET__SHIFT 0x00000000
+#define VGT_GSVS_RING_SIZE__MEM_SIZE_MASK 0xffffffffL
+#define VGT_GSVS_RING_SIZE__MEM_SIZE__SHIFT 0x00000000
+#define VGT_HOS_CNTL__TESS_MODE_MASK 0x00000003L
+#define VGT_HOS_CNTL__TESS_MODE__SHIFT 0x00000000
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xffffffffL
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x00000000
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xffffffffL
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x00000000
+#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH_MASK 0x000000ffL
+#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH__SHIFT 0x00000000
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x0000007fL
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x00000000
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x00000600L
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0x00000009
+#define VGT_IMMED_DATA__DATA_MASK 0xffffffffL
+#define VGT_IMMED_DATA__DATA__SHIFT 0x00000000
+#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x00000000
+#define VGT_INDX_OFFSET__INDX_OFFSET_MASK 0xffffffffL
+#define VGT_INDX_OFFSET__INDX_OFFSET__SHIFT 0x00000000
+#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xffffffffL
+#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x00000000
+#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE_MASK 0xffffffffL
+#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE__SHIFT 0x00000000
+#define VGT_LAST_COPY_STATE__DST_STATE_ID_MASK 0x00070000L
+#define VGT_LAST_COPY_STATE__DST_STATE_ID__SHIFT 0x00000010
+#define VGT_LAST_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+#define VGT_LAST_COPY_STATE__SRC_STATE_ID__SHIFT 0x00000000
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003f00L
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x00000008
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0x000fc000L
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0x0000000e
+#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0x000000ffL
+#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x00000000
+#define VGT_MAX_VTX_INDX__MAX_INDX_MASK 0xffffffffL
+#define VGT_MAX_VTX_INDX__MAX_INDX__SHIFT 0x00000000
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x00000003L
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x00000000
+#define VGT_MIN_VTX_INDX__MIN_INDX_MASK 0xffffffffL
+#define VGT_MIN_VTX_INDX__MIN_INDX__SHIFT 0x00000000
+#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x00000001L
+#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x00000000
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xffffffffL
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x00000000
+#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xffffffffL
+#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x00000000
+#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xffffffffL
+#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x00000000
+#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST_MASK 0x0000007fL
+#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST__SHIFT 0x00000000
+#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT_MASK 0x00000007L
+#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT__SHIFT 0x00000000
+#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xf0000000L
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000ffL
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xf0000000L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0f000000L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x00000018
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003ffL
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x00000000
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000ffc00L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0x0000000a
+#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00f00000L
+#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x00000014
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0f000000L
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x00000018
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xf0000000L
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000ffc00L
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0x0000000a
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000ffL
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define VGT_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xf0000000L
+#define VGT_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000ffL
+#define VGT_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x00000000
+#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xffffffffL
+#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x00000000
+#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xffffffffL
+#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x00000000
+#define VGT_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xf0000000L
+#define VGT_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x0000001c
+#define VGT_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000ffL
+#define VGT_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x00000000
+#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK_MASK 0x000000ffL
+#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK__SHIFT 0x00000000
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x00000002L
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x00000001
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x00000001L
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x00000000
+#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xffffffffL
+#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x00000000
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003fL
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x00000000
+#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x00000001L
+#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x00000000
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS_MASK 0x00000100L
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS__SHIFT 0x00000008
+#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x00000018L
+#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x00000003
+#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x00000020L
+#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x00000005
+#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x00000004L
+#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x00000002
+#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x00000003L
+#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x00000000
+#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0x000000c0L
+#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x00000006
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN_MASK 0x0000000fL
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN_MASK 0x000000f0L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN__SHIFT 0x00000004
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN_MASK 0x00000f00L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN__SHIFT 0x00000008
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN_MASK 0x0000f000L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN__SHIFT 0x0000000c
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK 0x00000070L
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK_MASK 0x00000f00L
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK__SHIFT 0x00000008
+#define VGT_STRMOUT_CONFIG__RAST_STREAM__SHIFT 0x00000004
+#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN_MASK 0x00000001L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN__SHIFT 0x00000000
+#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN_MASK 0x00000002L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN__SHIFT 0x00000001
+#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN_MASK 0x00000004L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN__SHIFT 0x00000002
+#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN_MASK 0x00000008L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN__SHIFT 0x00000003
+#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK_MASK 0x80000000L
+#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK__SHIFT 0x0000001f
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xffffffffL
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x00000000
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xffffffffL
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x00000000
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x000001ffL
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x00000000
+#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE_MASK 0x000003ffL
+#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE__SHIFT 0x00000000
+#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE_MASK 0x000003ffL
+#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE__SHIFT 0x00000000
+#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE_MASK 0x000003ffL
+#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE__SHIFT 0x00000000
+#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE_MASK 0x000003ffL
+#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE__SHIFT 0x00000000
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x00000080L
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x00000007
+#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x00000001L
+#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x00000000
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x0000007eL
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x00000001
+#define VGT_TF_MEMORY_BASE__BASE_MASK 0xffffffffL
+#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x00000000
+#define VGT_TF_PARAM__DEPRECATED_MASK 0x00000200L
+#define VGT_TF_PARAM__DEPRECATED__SHIFT 0x00000009
+#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x00004000L
+#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0x0000000e
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD_MASK 0x00003c00L
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD__SHIFT 0x0000000a
+#define VGT_TF_PARAM__PARTITIONING_MASK 0x0000001cL
+#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x00000002
+#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x00018000L
+#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0x0000000f
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x00000100L
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x00000008
+#define VGT_TF_PARAM__TOPOLOGY_MASK 0x000000e0L
+#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x00000005
+#define VGT_TF_PARAM__TYPE_MASK 0x00000003L
+#define VGT_TF_PARAM__TYPE__SHIFT 0x00000000
+#define VGT_TF_RING_SIZE__SIZE_MASK 0x0000ffffL
+#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x00000000
+#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH_MASK 0x000000ffL
+#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH__SHIFT 0x00000000
+#define VGT_VTX_CNT_EN__VTX_CNT_EN_MASK 0x00000001L
+#define VGT_VTX_CNT_EN__VTX_CNT_EN__SHIFT 0x00000000
+#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT_MASK 0x000003ffL
+#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT__SHIFT 0x00000000
+#define WD_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define WD_DEBUG_DATA__DATA__SHIFT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_d.h
new file mode 100644
index 000000000000..dc4e5b93801d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_d.h
@@ -0,0 +1,1274 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GMC_6_0_D_H
+#define GMC_6_0_D_H
+
+#define ixMC_IO_DEBUG_ACMD_CLKSEL_D0 0x00CE
+#define ixMC_IO_DEBUG_ACMD_CLKSEL_D1 0x00DE
+#define ixMC_IO_DEBUG_ACMD_MISC_D0 0x00AE
+#define ixMC_IO_DEBUG_ACMD_MISC_D1 0x00BE
+#define ixMC_IO_DEBUG_ACMD_OFSCAL_D0 0x00EE
+#define ixMC_IO_DEBUG_ACMD_OFSCAL_D1 0x00FE
+#define ixMC_IO_DEBUG_ACMD_RXPHASE_D0 0x010E
+#define ixMC_IO_DEBUG_ACMD_RXPHASE_D1 0x011E
+#define ixMC_IO_DEBUG_ACMD_TXBST_PD_D0 0x018E
+#define ixMC_IO_DEBUG_ACMD_TXBST_PD_D1 0x019E
+#define ixMC_IO_DEBUG_ACMD_TXBST_PU_D0 0x01AE
+#define ixMC_IO_DEBUG_ACMD_TXBST_PU_D1 0x01BE
+#define ixMC_IO_DEBUG_ACMD_TXPHASE_D0 0x012E
+#define ixMC_IO_DEBUG_ACMD_TXPHASE_D1 0x013E
+#define ixMC_IO_DEBUG_ACMD_TXSLF_D0 0x016E
+#define ixMC_IO_DEBUG_ACMD_TXSLF_D1 0x017E
+#define ixMC_IO_DEBUG_ADDRH_CLKSEL_D0 0x00CD
+#define ixMC_IO_DEBUG_ADDRH_CLKSEL_D1 0x00DD
+#define ixMC_IO_DEBUG_ADDRH_MISC_D0 0x00AD
+#define ixMC_IO_DEBUG_ADDRH_MISC_D1 0x00BD
+#define ixMC_IO_DEBUG_ADDRH_RXPHASE_D0 0x010D
+#define ixMC_IO_DEBUG_ADDRH_RXPHASE_D1 0x011D
+#define ixMC_IO_DEBUG_ADDRH_TXBST_PD_D0 0x018D
+#define ixMC_IO_DEBUG_ADDRH_TXBST_PD_D1 0x019D
+#define ixMC_IO_DEBUG_ADDRH_TXBST_PU_D0 0x01AD
+#define ixMC_IO_DEBUG_ADDRH_TXBST_PU_D1 0x01BD
+#define ixMC_IO_DEBUG_ADDRH_TXPHASE_D0 0x012D
+#define ixMC_IO_DEBUG_ADDRH_TXPHASE_D1 0x013D
+#define ixMC_IO_DEBUG_ADDRH_TXSLF_D0 0x016D
+#define ixMC_IO_DEBUG_ADDRH_TXSLF_D1 0x017D
+#define ixMC_IO_DEBUG_ADDRL_CLKSEL_D0 0x00CC
+#define ixMC_IO_DEBUG_ADDRL_CLKSEL_D1 0x00DC
+#define ixMC_IO_DEBUG_ADDRL_MISC_D0 0x00AC
+#define ixMC_IO_DEBUG_ADDRL_MISC_D1 0x00BC
+#define ixMC_IO_DEBUG_ADDRL_RXPHASE_D0 0x010C
+#define ixMC_IO_DEBUG_ADDRL_RXPHASE_D1 0x011C
+#define ixMC_IO_DEBUG_ADDRL_TXBST_PD_D0 0x018C
+#define ixMC_IO_DEBUG_ADDRL_TXBST_PD_D1 0x019C
+#define ixMC_IO_DEBUG_ADDRL_TXBST_PU_D0 0x01AC
+#define ixMC_IO_DEBUG_ADDRL_TXBST_PU_D1 0x01BC
+#define ixMC_IO_DEBUG_ADDRL_TXPHASE_D0 0x012C
+#define ixMC_IO_DEBUG_ADDRL_TXPHASE_D1 0x013C
+#define ixMC_IO_DEBUG_ADDRL_TXSLF_D0 0x016C
+#define ixMC_IO_DEBUG_ADDRL_TXSLF_D1 0x017C
+#define ixMC_IO_DEBUG_CK_CLKSEL_D0 0x00CB
+#define ixMC_IO_DEBUG_CK_CLKSEL_D1 0x00DB
+#define ixMC_IO_DEBUG_CK_MISC_D0 0x00AB
+#define ixMC_IO_DEBUG_CK_MISC_D1 0x00BB
+#define ixMC_IO_DEBUG_CK_RXPHASE_D0 0x010B
+#define ixMC_IO_DEBUG_CK_RXPHASE_D1 0x011B
+#define ixMC_IO_DEBUG_CK_TXBST_PD_D0 0x018B
+#define ixMC_IO_DEBUG_CK_TXBST_PD_D1 0x019B
+#define ixMC_IO_DEBUG_CK_TXBST_PU_D0 0x01AB
+#define ixMC_IO_DEBUG_CK_TXBST_PU_D1 0x01BB
+#define ixMC_IO_DEBUG_CK_TXPHASE_D0 0x012B
+#define ixMC_IO_DEBUG_CK_TXPHASE_D1 0x013B
+#define ixMC_IO_DEBUG_CK_TXSLF_D0 0x016B
+#define ixMC_IO_DEBUG_CK_TXSLF_D1 0x017B
+#define ixMC_IO_DEBUG_CMD_CLKSEL_D0 0x00CF
+#define ixMC_IO_DEBUG_CMD_CLKSEL_D1 0x00DF
+#define ixMC_IO_DEBUG_CMD_MISC_D0 0x00AF
+#define ixMC_IO_DEBUG_CMD_MISC_D1 0x00BF
+#define ixMC_IO_DEBUG_CMD_OFSCAL_D0 0x00EF
+#define ixMC_IO_DEBUG_CMD_OFSCAL_D1 0x00FF
+#define ixMC_IO_DEBUG_CMD_RX_EQ_D0 0x01CF
+#define ixMC_IO_DEBUG_CMD_RX_EQ_D1 0x01DF
+#define ixMC_IO_DEBUG_CMD_RXPHASE_D0 0x010F
+#define ixMC_IO_DEBUG_CMD_RXPHASE_D1 0x011F
+#define ixMC_IO_DEBUG_CMD_TXBST_PD_D0 0x018F
+#define ixMC_IO_DEBUG_CMD_TXBST_PD_D1 0x019F
+#define ixMC_IO_DEBUG_CMD_TXBST_PU_D0 0x01AF
+#define ixMC_IO_DEBUG_CMD_TXBST_PU_D1 0x01BF
+#define ixMC_IO_DEBUG_CMD_TXPHASE_D0 0x012F
+#define ixMC_IO_DEBUG_CMD_TXPHASE_D1 0x013F
+#define ixMC_IO_DEBUG_CMD_TXSLF_D0 0x016F
+#define ixMC_IO_DEBUG_CMD_TXSLF_D1 0x017F
+#define ixMC_IO_DEBUG_DBI_CDR_PHSIZE_D0 0x014F
+#define ixMC_IO_DEBUG_DBI_CDR_PHSIZE_D1 0x015F
+#define ixMC_IO_DEBUG_DBI_CLKSEL_D0 0x00C8
+#define ixMC_IO_DEBUG_DBI_CLKSEL_D1 0x00D8
+#define ixMC_IO_DEBUG_DBI_MISC_D0 0x00A8
+#define ixMC_IO_DEBUG_DBI_MISC_D1 0x00B8
+#define ixMC_IO_DEBUG_DBI_OFSCAL_D0 0x00E8
+#define ixMC_IO_DEBUG_DBI_OFSCAL_D1 0x00F8
+#define ixMC_IO_DEBUG_DBI_RX_EQ_D0 0x01C8
+#define ixMC_IO_DEBUG_DBI_RX_EQ_D1 0x01D8
+#define ixMC_IO_DEBUG_DBI_RXPHASE_D0 0x0108
+#define ixMC_IO_DEBUG_DBI_RXPHASE_D1 0x0118
+#define ixMC_IO_DEBUG_DBI_RX_VREF_CAL_D0 0x0148
+#define ixMC_IO_DEBUG_DBI_RX_VREF_CAL_D1 0x0158
+#define ixMC_IO_DEBUG_DBI_TXBST_PD_D0 0x0188
+#define ixMC_IO_DEBUG_DBI_TXBST_PD_D1 0x0198
+#define ixMC_IO_DEBUG_DBI_TXBST_PU_D0 0x01A8
+#define ixMC_IO_DEBUG_DBI_TXBST_PU_D1 0x01B8
+#define ixMC_IO_DEBUG_DBI_TXPHASE_D0 0x0128
+#define ixMC_IO_DEBUG_DBI_TXPHASE_D1 0x0138
+#define ixMC_IO_DEBUG_DBI_TXSLF_D0 0x0168
+#define ixMC_IO_DEBUG_DBI_TXSLF_D1 0x0178
+#define ixMC_IO_DEBUG_DQ0_RX_DYN_PM_D0 0x01CD
+#define ixMC_IO_DEBUG_DQ0_RX_DYN_PM_D1 0x01DD
+#define ixMC_IO_DEBUG_DQ0_RX_EQ_PM_D0 0x01CB
+#define ixMC_IO_DEBUG_DQ0_RX_EQ_PM_D1 0x01DB
+#define ixMC_IO_DEBUG_DQ1_RX_DYN_PM_D0 0x01CE
+#define ixMC_IO_DEBUG_DQ1_RX_DYN_PM_D1 0x01DE
+#define ixMC_IO_DEBUG_DQ1_RX_EQ_PM_D0 0x01CC
+#define ixMC_IO_DEBUG_DQ1_RX_EQ_PM_D1 0x01DC
+#define ixMC_IO_DEBUG_DQB0_CDR_PHSIZE_D0 0x014B
+#define ixMC_IO_DEBUG_DQB0_CDR_PHSIZE_D1 0x015B
+#define ixMC_IO_DEBUG_DQB0H_CLKSEL_D0 0x00C1
+#define ixMC_IO_DEBUG_DQB0H_CLKSEL_D1 0x00D1
+#define ixMC_IO_DEBUG_DQB0H_MISC_D0 0x00A1
+#define ixMC_IO_DEBUG_DQB0H_MISC_D1 0x00B1
+#define ixMC_IO_DEBUG_DQB0H_OFSCAL_D0 0x00E1
+#define ixMC_IO_DEBUG_DQB0H_OFSCAL_D1 0x00F1
+#define ixMC_IO_DEBUG_DQB0H_RX_EQ_D0 0x01C1
+#define ixMC_IO_DEBUG_DQB0H_RX_EQ_D1 0x01D1
+#define ixMC_IO_DEBUG_DQB0H_RXPHASE_D0 0x0101
+#define ixMC_IO_DEBUG_DQB0H_RXPHASE_D1 0x0111
+#define ixMC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0 0x0141
+#define ixMC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1 0x0151
+#define ixMC_IO_DEBUG_DQB0H_TXBST_PD_D0 0x0181
+#define ixMC_IO_DEBUG_DQB0H_TXBST_PD_D1 0x0191
+#define ixMC_IO_DEBUG_DQB0H_TXBST_PU_D0 0x01A1
+#define ixMC_IO_DEBUG_DQB0H_TXBST_PU_D1 0x01B1
+#define ixMC_IO_DEBUG_DQB0H_TXPHASE_D0 0x0121
+#define ixMC_IO_DEBUG_DQB0H_TXPHASE_D1 0x0131
+#define ixMC_IO_DEBUG_DQB0H_TXSLF_D0 0x0161
+#define ixMC_IO_DEBUG_DQB0H_TXSLF_D1 0x0171
+#define ixMC_IO_DEBUG_DQB0L_CLKSEL_D0 0x00C0
+#define ixMC_IO_DEBUG_DQB0L_CLKSEL_D1 0x00D0
+#define ixMC_IO_DEBUG_DQB0L_MISC_D0 0x00A0
+#define ixMC_IO_DEBUG_DQB0L_MISC_D1 0x00B0
+#define ixMC_IO_DEBUG_DQB0L_OFSCAL_D0 0x00E0
+#define ixMC_IO_DEBUG_DQB0L_OFSCAL_D1 0x00F0
+#define ixMC_IO_DEBUG_DQB0L_RX_EQ_D0 0x01C0
+#define ixMC_IO_DEBUG_DQB0L_RX_EQ_D1 0x01D0
+#define ixMC_IO_DEBUG_DQB0L_RXPHASE_D0 0x0100
+#define ixMC_IO_DEBUG_DQB0L_RXPHASE_D1 0x0110
+#define ixMC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0 0x0140
+#define ixMC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1 0x0150
+#define ixMC_IO_DEBUG_DQB0L_TXBST_PD_D0 0x0180
+#define ixMC_IO_DEBUG_DQB0L_TXBST_PD_D1 0x0190
+#define ixMC_IO_DEBUG_DQB0L_TXBST_PU_D0 0x01A0
+#define ixMC_IO_DEBUG_DQB0L_TXBST_PU_D1 0x01B0
+#define ixMC_IO_DEBUG_DQB0L_TXPHASE_D0 0x0120
+#define ixMC_IO_DEBUG_DQB0L_TXPHASE_D1 0x0130
+#define ixMC_IO_DEBUG_DQB0L_TXSLF_D0 0x0160
+#define ixMC_IO_DEBUG_DQB0L_TXSLF_D1 0x0170
+#define ixMC_IO_DEBUG_DQB1_CDR_PHSIZE_D0 0x014C
+#define ixMC_IO_DEBUG_DQB1_CDR_PHSIZE_D1 0x015C
+#define ixMC_IO_DEBUG_DQB1H_CLKSEL_D0 0x00C3
+#define ixMC_IO_DEBUG_DQB1H_CLKSEL_D1 0x00D3
+#define ixMC_IO_DEBUG_DQB1H_MISC_D0 0x00A3
+#define ixMC_IO_DEBUG_DQB1H_MISC_D1 0x00B3
+#define ixMC_IO_DEBUG_DQB1H_OFSCAL_D0 0x00E3
+#define ixMC_IO_DEBUG_DQB1H_OFSCAL_D1 0x00F3
+#define ixMC_IO_DEBUG_DQB1H_RX_EQ_D0 0x01C3
+#define ixMC_IO_DEBUG_DQB1H_RX_EQ_D1 0x01D3
+#define ixMC_IO_DEBUG_DQB1H_RXPHASE_D0 0x0103
+#define ixMC_IO_DEBUG_DQB1H_RXPHASE_D1 0x0113
+#define ixMC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0 0x0143
+#define ixMC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1 0x0153
+#define ixMC_IO_DEBUG_DQB1H_TXBST_PD_D0 0x0183
+#define ixMC_IO_DEBUG_DQB1H_TXBST_PD_D1 0x0193
+#define ixMC_IO_DEBUG_DQB1H_TXBST_PU_D0 0x01A3
+#define ixMC_IO_DEBUG_DQB1H_TXBST_PU_D1 0x01B3
+#define ixMC_IO_DEBUG_DQB1H_TXPHASE_D0 0x0123
+#define ixMC_IO_DEBUG_DQB1H_TXPHASE_D1 0x0133
+#define ixMC_IO_DEBUG_DQB1H_TXSLF_D0 0x0163
+#define ixMC_IO_DEBUG_DQB1H_TXSLF_D1 0x0173
+#define ixMC_IO_DEBUG_DQB1L_CLKSEL_D0 0x00C2
+#define ixMC_IO_DEBUG_DQB1L_CLKSEL_D1 0x00D2
+#define ixMC_IO_DEBUG_DQB1L_MISC_D0 0x00A2
+#define ixMC_IO_DEBUG_DQB1L_MISC_D1 0x00B2
+#define ixMC_IO_DEBUG_DQB1L_OFSCAL_D0 0x00E2
+#define ixMC_IO_DEBUG_DQB1L_OFSCAL_D1 0x00F2
+#define ixMC_IO_DEBUG_DQB1L_RX_EQ_D0 0x01C2
+#define ixMC_IO_DEBUG_DQB1L_RX_EQ_D1 0x01D2
+#define ixMC_IO_DEBUG_DQB1L_RXPHASE_D0 0x0102
+#define ixMC_IO_DEBUG_DQB1L_RXPHASE_D1 0x0112
+#define ixMC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0 0x0142
+#define ixMC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1 0x0152
+#define ixMC_IO_DEBUG_DQB1L_TXBST_PD_D0 0x0182
+#define ixMC_IO_DEBUG_DQB1L_TXBST_PD_D1 0x0192
+#define ixMC_IO_DEBUG_DQB1L_TXBST_PU_D0 0x01A2
+#define ixMC_IO_DEBUG_DQB1L_TXBST_PU_D1 0x01B2
+#define ixMC_IO_DEBUG_DQB1L_TXPHASE_D0 0x0122
+#define ixMC_IO_DEBUG_DQB1L_TXPHASE_D1 0x0132
+#define ixMC_IO_DEBUG_DQB1L_TXSLF_D0 0x0162
+#define ixMC_IO_DEBUG_DQB1L_TXSLF_D1 0x0172
+#define ixMC_IO_DEBUG_DQB2_CDR_PHSIZE_D0 0x014D
+#define ixMC_IO_DEBUG_DQB2_CDR_PHSIZE_D1 0x015D
+#define ixMC_IO_DEBUG_DQB2H_CLKSEL_D0 0x00C5
+#define ixMC_IO_DEBUG_DQB2H_CLKSEL_D1 0x00D5
+#define ixMC_IO_DEBUG_DQB2H_MISC_D0 0x00A5
+#define ixMC_IO_DEBUG_DQB2H_MISC_D1 0x00B5
+#define ixMC_IO_DEBUG_DQB2H_OFSCAL_D0 0x00E5
+#define ixMC_IO_DEBUG_DQB2H_OFSCAL_D1 0x00F5
+#define ixMC_IO_DEBUG_DQB2H_RX_EQ_D0 0x01C5
+#define ixMC_IO_DEBUG_DQB2H_RX_EQ_D1 0x01D5
+#define ixMC_IO_DEBUG_DQB2H_RXPHASE_D0 0x0105
+#define ixMC_IO_DEBUG_DQB2H_RXPHASE_D1 0x0115
+#define ixMC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0 0x0145
+#define ixMC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1 0x0155
+#define ixMC_IO_DEBUG_DQB2H_TXBST_PD_D0 0x0185
+#define ixMC_IO_DEBUG_DQB2H_TXBST_PD_D1 0x0195
+#define ixMC_IO_DEBUG_DQB2H_TXBST_PU_D0 0x01A5
+#define ixMC_IO_DEBUG_DQB2H_TXBST_PU_D1 0x01B5
+#define ixMC_IO_DEBUG_DQB2H_TXPHASE_D0 0x0125
+#define ixMC_IO_DEBUG_DQB2H_TXPHASE_D1 0x0135
+#define ixMC_IO_DEBUG_DQB2H_TXSLF_D0 0x0165
+#define ixMC_IO_DEBUG_DQB2H_TXSLF_D1 0x0175
+#define ixMC_IO_DEBUG_DQB2L_CLKSEL_D0 0x00C4
+#define ixMC_IO_DEBUG_DQB2L_CLKSEL_D1 0x00D4
+#define ixMC_IO_DEBUG_DQB2L_MISC_D0 0x00A4
+#define ixMC_IO_DEBUG_DQB2L_MISC_D1 0x00B4
+#define ixMC_IO_DEBUG_DQB2L_OFSCAL_D0 0x00E4
+#define ixMC_IO_DEBUG_DQB2L_OFSCAL_D1 0x00F4
+#define ixMC_IO_DEBUG_DQB2L_RX_EQ_D0 0x01C4
+#define ixMC_IO_DEBUG_DQB2L_RX_EQ_D1 0x01D4
+#define ixMC_IO_DEBUG_DQB2L_RXPHASE_D0 0x0104
+#define ixMC_IO_DEBUG_DQB2L_RXPHASE_D1 0x0114
+#define ixMC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0 0x0144
+#define ixMC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1 0x0154
+#define ixMC_IO_DEBUG_DQB2L_TXBST_PD_D0 0x0184
+#define ixMC_IO_DEBUG_DQB2L_TXBST_PD_D1 0x0194
+#define ixMC_IO_DEBUG_DQB2L_TXBST_PU_D0 0x01A4
+#define ixMC_IO_DEBUG_DQB2L_TXBST_PU_D1 0x01B4
+#define ixMC_IO_DEBUG_DQB2L_TXPHASE_D0 0x0124
+#define ixMC_IO_DEBUG_DQB2L_TXPHASE_D1 0x0134
+#define ixMC_IO_DEBUG_DQB2L_TXSLF_D0 0x0164
+#define ixMC_IO_DEBUG_DQB2L_TXSLF_D1 0x0174
+#define ixMC_IO_DEBUG_DQB3_CDR_PHSIZE_D0 0x014E
+#define ixMC_IO_DEBUG_DQB3_CDR_PHSIZE_D1 0x015E
+#define ixMC_IO_DEBUG_DQB3H_CLKSEL_D0 0x00C7
+#define ixMC_IO_DEBUG_DQB3H_CLKSEL_D1 0x00D7
+#define ixMC_IO_DEBUG_DQB3H_MISC_D0 0x00A7
+#define ixMC_IO_DEBUG_DQB3H_MISC_D1 0x00B7
+#define ixMC_IO_DEBUG_DQB3H_OFSCAL_D0 0x00E7
+#define ixMC_IO_DEBUG_DQB3H_OFSCAL_D1 0x00F7
+#define ixMC_IO_DEBUG_DQB3H_RX_EQ_D0 0x01C7
+#define ixMC_IO_DEBUG_DQB3H_RX_EQ_D1 0x01D7
+#define ixMC_IO_DEBUG_DQB3H_RXPHASE_D0 0x0107
+#define ixMC_IO_DEBUG_DQB3H_RXPHASE_D1 0x0117
+#define ixMC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0 0x0147
+#define ixMC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1 0x0157
+#define ixMC_IO_DEBUG_DQB3H_TXBST_PD_D0 0x0187
+#define ixMC_IO_DEBUG_DQB3H_TXBST_PD_D1 0x0197
+#define ixMC_IO_DEBUG_DQB3H_TXBST_PU_D0 0x01A7
+#define ixMC_IO_DEBUG_DQB3H_TXBST_PU_D1 0x01B7
+#define ixMC_IO_DEBUG_DQB3H_TXPHASE_D0 0x0127
+#define ixMC_IO_DEBUG_DQB3H_TXPHASE_D1 0x0137
+#define ixMC_IO_DEBUG_DQB3H_TXSLF_D0 0x0167
+#define ixMC_IO_DEBUG_DQB3H_TXSLF_D1 0x0177
+#define ixMC_IO_DEBUG_DQB3L_CLKSEL_D0 0x00C6
+#define ixMC_IO_DEBUG_DQB3L_CLKSEL_D1 0x00D6
+#define ixMC_IO_DEBUG_DQB3L_MISC_D0 0x00A6
+#define ixMC_IO_DEBUG_DQB3L_MISC_D1 0x00B6
+#define ixMC_IO_DEBUG_DQB3L_OFSCAL_D0 0x00E6
+#define ixMC_IO_DEBUG_DQB3L_OFSCAL_D1 0x00F6
+#define ixMC_IO_DEBUG_DQB3L_RX_EQ_D0 0x01C6
+#define ixMC_IO_DEBUG_DQB3L_RX_EQ_D1 0x01D6
+#define ixMC_IO_DEBUG_DQB3L_RXPHASE_D0 0x0106
+#define ixMC_IO_DEBUG_DQB3L_RXPHASE_D1 0x0116
+#define ixMC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0 0x0146
+#define ixMC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1 0x0156
+#define ixMC_IO_DEBUG_DQB3L_TXBST_PD_D0 0x0186
+#define ixMC_IO_DEBUG_DQB3L_TXBST_PD_D1 0x0196
+#define ixMC_IO_DEBUG_DQB3L_TXBST_PU_D0 0x01A6
+#define ixMC_IO_DEBUG_DQB3L_TXBST_PU_D1 0x01B6
+#define ixMC_IO_DEBUG_DQB3L_TXPHASE_D0 0x0126
+#define ixMC_IO_DEBUG_DQB3L_TXPHASE_D1 0x0136
+#define ixMC_IO_DEBUG_DQB3L_TXSLF_D0 0x0166
+#define ixMC_IO_DEBUG_DQB3L_TXSLF_D1 0x0176
+#define ixMC_IO_DEBUG_EDC_CDR_PHSIZE_D0 0x00ED
+#define ixMC_IO_DEBUG_EDC_CDR_PHSIZE_D1 0x00FD
+#define ixMC_IO_DEBUG_EDC_CLKSEL_D0 0x00C9
+#define ixMC_IO_DEBUG_EDC_CLKSEL_D1 0x00D9
+#define ixMC_IO_DEBUG_EDC_MISC_D0 0x00A9
+#define ixMC_IO_DEBUG_EDC_MISC_D1 0x00B9
+#define ixMC_IO_DEBUG_EDC_OFSCAL_D0 0x00E9
+#define ixMC_IO_DEBUG_EDC_OFSCAL_D1 0x00F9
+#define ixMC_IO_DEBUG_EDC_RX_DYN_PM_D0 0x00EC
+#define ixMC_IO_DEBUG_EDC_RX_DYN_PM_D1 0x00FC
+#define ixMC_IO_DEBUG_EDC_RX_EQ_D0 0x01C9
+#define ixMC_IO_DEBUG_EDC_RX_EQ_D1 0x01D9
+#define ixMC_IO_DEBUG_EDC_RX_EQ_PM_D0 0x00EB
+#define ixMC_IO_DEBUG_EDC_RX_EQ_PM_D1 0x00FB
+#define ixMC_IO_DEBUG_EDC_RXPHASE_D0 0x0109
+#define ixMC_IO_DEBUG_EDC_RXPHASE_D1 0x0119
+#define ixMC_IO_DEBUG_EDC_RX_VREF_CAL_D0 0x0149
+#define ixMC_IO_DEBUG_EDC_RX_VREF_CAL_D1 0x0159
+#define ixMC_IO_DEBUG_EDC_TXBST_PD_D0 0x0189
+#define ixMC_IO_DEBUG_EDC_TXBST_PD_D1 0x0199
+#define ixMC_IO_DEBUG_EDC_TXBST_PU_D0 0x01A9
+#define ixMC_IO_DEBUG_EDC_TXBST_PU_D1 0x01B9
+#define ixMC_IO_DEBUG_EDC_TXPHASE_D0 0x0129
+#define ixMC_IO_DEBUG_EDC_TXPHASE_D1 0x0139
+#define ixMC_IO_DEBUG_EDC_TXSLF_D0 0x0169
+#define ixMC_IO_DEBUG_EDC_TXSLF_D1 0x0179
+#define ixMC_IO_DEBUG_UP_0 0x0000
+#define ixMC_IO_DEBUG_UP_100 0x0064
+#define ixMC_IO_DEBUG_UP_10 0x000A
+#define ixMC_IO_DEBUG_UP_101 0x0065
+#define ixMC_IO_DEBUG_UP_102 0x0066
+#define ixMC_IO_DEBUG_UP_103 0x0067
+#define ixMC_IO_DEBUG_UP_104 0x0068
+#define ixMC_IO_DEBUG_UP_105 0x0069
+#define ixMC_IO_DEBUG_UP_106 0x006A
+#define ixMC_IO_DEBUG_UP_107 0x006B
+#define ixMC_IO_DEBUG_UP_108 0x006C
+#define ixMC_IO_DEBUG_UP_109 0x006D
+#define ixMC_IO_DEBUG_UP_1 0x0001
+#define ixMC_IO_DEBUG_UP_110 0x006E
+#define ixMC_IO_DEBUG_UP_11 0x000B
+#define ixMC_IO_DEBUG_UP_111 0x006F
+#define ixMC_IO_DEBUG_UP_112 0x0070
+#define ixMC_IO_DEBUG_UP_113 0x0071
+#define ixMC_IO_DEBUG_UP_114 0x0072
+#define ixMC_IO_DEBUG_UP_115 0x0073
+#define ixMC_IO_DEBUG_UP_116 0x0074
+#define ixMC_IO_DEBUG_UP_117 0x0075
+#define ixMC_IO_DEBUG_UP_118 0x0076
+#define ixMC_IO_DEBUG_UP_119 0x0077
+#define ixMC_IO_DEBUG_UP_120 0x0078
+#define ixMC_IO_DEBUG_UP_12 0x000C
+#define ixMC_IO_DEBUG_UP_121 0x0079
+#define ixMC_IO_DEBUG_UP_122 0x007A
+#define ixMC_IO_DEBUG_UP_123 0x007B
+#define ixMC_IO_DEBUG_UP_124 0x007C
+#define ixMC_IO_DEBUG_UP_125 0x007D
+#define ixMC_IO_DEBUG_UP_126 0x007E
+#define ixMC_IO_DEBUG_UP_127 0x007F
+#define ixMC_IO_DEBUG_UP_128 0x0080
+#define ixMC_IO_DEBUG_UP_129 0x0081
+#define ixMC_IO_DEBUG_UP_130 0x0082
+#define ixMC_IO_DEBUG_UP_13 0x000D
+#define ixMC_IO_DEBUG_UP_131 0x0083
+#define ixMC_IO_DEBUG_UP_132 0x0084
+#define ixMC_IO_DEBUG_UP_133 0x0085
+#define ixMC_IO_DEBUG_UP_134 0x0086
+#define ixMC_IO_DEBUG_UP_135 0x0087
+#define ixMC_IO_DEBUG_UP_136 0x0088
+#define ixMC_IO_DEBUG_UP_137 0x0089
+#define ixMC_IO_DEBUG_UP_138 0x008A
+#define ixMC_IO_DEBUG_UP_139 0x008B
+#define ixMC_IO_DEBUG_UP_140 0x008C
+#define ixMC_IO_DEBUG_UP_14 0x000E
+#define ixMC_IO_DEBUG_UP_141 0x008D
+#define ixMC_IO_DEBUG_UP_142 0x008E
+#define ixMC_IO_DEBUG_UP_143 0x008F
+#define ixMC_IO_DEBUG_UP_144 0x0090
+#define ixMC_IO_DEBUG_UP_145 0x0091
+#define ixMC_IO_DEBUG_UP_146 0x0092
+#define ixMC_IO_DEBUG_UP_147 0x0093
+#define ixMC_IO_DEBUG_UP_148 0x0094
+#define ixMC_IO_DEBUG_UP_149 0x0095
+#define ixMC_IO_DEBUG_UP_150 0x0096
+#define ixMC_IO_DEBUG_UP_15 0x000F
+#define ixMC_IO_DEBUG_UP_151 0x0097
+#define ixMC_IO_DEBUG_UP_152 0x0098
+#define ixMC_IO_DEBUG_UP_153 0x0099
+#define ixMC_IO_DEBUG_UP_154 0x009A
+#define ixMC_IO_DEBUG_UP_155 0x009B
+#define ixMC_IO_DEBUG_UP_156 0x009C
+#define ixMC_IO_DEBUG_UP_157 0x009D
+#define ixMC_IO_DEBUG_UP_158 0x009E
+#define ixMC_IO_DEBUG_UP_159 0x009F
+#define ixMC_IO_DEBUG_UP_16 0x0010
+#define ixMC_IO_DEBUG_UP_17 0x0011
+#define ixMC_IO_DEBUG_UP_18 0x0012
+#define ixMC_IO_DEBUG_UP_19 0x0013
+#define ixMC_IO_DEBUG_UP_20 0x0014
+#define ixMC_IO_DEBUG_UP_2 0x0002
+#define ixMC_IO_DEBUG_UP_21 0x0015
+#define ixMC_IO_DEBUG_UP_22 0x0016
+#define ixMC_IO_DEBUG_UP_23 0x0017
+#define ixMC_IO_DEBUG_UP_24 0x0018
+#define ixMC_IO_DEBUG_UP_25 0x0019
+#define ixMC_IO_DEBUG_UP_26 0x001A
+#define ixMC_IO_DEBUG_UP_27 0x001B
+#define ixMC_IO_DEBUG_UP_28 0x001C
+#define ixMC_IO_DEBUG_UP_29 0x001D
+#define ixMC_IO_DEBUG_UP_30 0x001E
+#define ixMC_IO_DEBUG_UP_3 0x0003
+#define ixMC_IO_DEBUG_UP_31 0x001F
+#define ixMC_IO_DEBUG_UP_32 0x0020
+#define ixMC_IO_DEBUG_UP_33 0x0021
+#define ixMC_IO_DEBUG_UP_34 0x0022
+#define ixMC_IO_DEBUG_UP_35 0x0023
+#define ixMC_IO_DEBUG_UP_36 0x0024
+#define ixMC_IO_DEBUG_UP_37 0x0025
+#define ixMC_IO_DEBUG_UP_38 0x0026
+#define ixMC_IO_DEBUG_UP_39 0x0027
+#define ixMC_IO_DEBUG_UP_40 0x0028
+#define ixMC_IO_DEBUG_UP_4 0x0004
+#define ixMC_IO_DEBUG_UP_41 0x0029
+#define ixMC_IO_DEBUG_UP_42 0x002A
+#define ixMC_IO_DEBUG_UP_43 0x002B
+#define ixMC_IO_DEBUG_UP_44 0x002C
+#define ixMC_IO_DEBUG_UP_45 0x002D
+#define ixMC_IO_DEBUG_UP_46 0x002E
+#define ixMC_IO_DEBUG_UP_47 0x002F
+#define ixMC_IO_DEBUG_UP_48 0x0030
+#define ixMC_IO_DEBUG_UP_49 0x0031
+#define ixMC_IO_DEBUG_UP_50 0x0032
+#define ixMC_IO_DEBUG_UP_5 0x0005
+#define ixMC_IO_DEBUG_UP_51 0x0033
+#define ixMC_IO_DEBUG_UP_52 0x0034
+#define ixMC_IO_DEBUG_UP_53 0x0035
+#define ixMC_IO_DEBUG_UP_54 0x0036
+#define ixMC_IO_DEBUG_UP_55 0x0037
+#define ixMC_IO_DEBUG_UP_56 0x0038
+#define ixMC_IO_DEBUG_UP_57 0x0039
+#define ixMC_IO_DEBUG_UP_58 0x003A
+#define ixMC_IO_DEBUG_UP_59 0x003B
+#define ixMC_IO_DEBUG_UP_60 0x003C
+#define ixMC_IO_DEBUG_UP_6 0x0006
+#define ixMC_IO_DEBUG_UP_61 0x003D
+#define ixMC_IO_DEBUG_UP_62 0x003E
+#define ixMC_IO_DEBUG_UP_63 0x003F
+#define ixMC_IO_DEBUG_UP_64 0x0040
+#define ixMC_IO_DEBUG_UP_65 0x0041
+#define ixMC_IO_DEBUG_UP_66 0x0042
+#define ixMC_IO_DEBUG_UP_67 0x0043
+#define ixMC_IO_DEBUG_UP_68 0x0044
+#define ixMC_IO_DEBUG_UP_69 0x0045
+#define ixMC_IO_DEBUG_UP_70 0x0046
+#define ixMC_IO_DEBUG_UP_7 0x0007
+#define ixMC_IO_DEBUG_UP_71 0x0047
+#define ixMC_IO_DEBUG_UP_72 0x0048
+#define ixMC_IO_DEBUG_UP_73 0x0049
+#define ixMC_IO_DEBUG_UP_74 0x004A
+#define ixMC_IO_DEBUG_UP_75 0x004B
+#define ixMC_IO_DEBUG_UP_76 0x004C
+#define ixMC_IO_DEBUG_UP_77 0x004D
+#define ixMC_IO_DEBUG_UP_78 0x004E
+#define ixMC_IO_DEBUG_UP_79 0x004F
+#define ixMC_IO_DEBUG_UP_80 0x0050
+#define ixMC_IO_DEBUG_UP_8 0x0008
+#define ixMC_IO_DEBUG_UP_81 0x0051
+#define ixMC_IO_DEBUG_UP_82 0x0052
+#define ixMC_IO_DEBUG_UP_83 0x0053
+#define ixMC_IO_DEBUG_UP_84 0x0054
+#define ixMC_IO_DEBUG_UP_85 0x0055
+#define ixMC_IO_DEBUG_UP_86 0x0056
+#define ixMC_IO_DEBUG_UP_87 0x0057
+#define ixMC_IO_DEBUG_UP_88 0x0058
+#define ixMC_IO_DEBUG_UP_89 0x0059
+#define ixMC_IO_DEBUG_UP_90 0x005A
+#define ixMC_IO_DEBUG_UP_9 0x0009
+#define ixMC_IO_DEBUG_UP_91 0x005B
+#define ixMC_IO_DEBUG_UP_92 0x005C
+#define ixMC_IO_DEBUG_UP_93 0x005D
+#define ixMC_IO_DEBUG_UP_94 0x005E
+#define ixMC_IO_DEBUG_UP_95 0x005F
+#define ixMC_IO_DEBUG_UP_96 0x0060
+#define ixMC_IO_DEBUG_UP_97 0x0061
+#define ixMC_IO_DEBUG_UP_98 0x0062
+#define ixMC_IO_DEBUG_UP_99 0x0063
+#define ixMC_IO_DEBUG_WCDR_CDR_PHSIZE_D0 0x01EA
+#define ixMC_IO_DEBUG_WCDR_CDR_PHSIZE_D1 0x01FA
+#define ixMC_IO_DEBUG_WCDR_CLKSEL_D0 0x01E1
+#define ixMC_IO_DEBUG_WCDR_CLKSEL_D1 0x01F1
+#define ixMC_IO_DEBUG_WCDR_MISC_D0 0x01E0
+#define ixMC_IO_DEBUG_WCDR_MISC_D1 0x01F0
+#define ixMC_IO_DEBUG_WCDR_OFSCAL_D0 0x01E2
+#define ixMC_IO_DEBUG_WCDR_OFSCAL_D1 0x01F2
+#define ixMC_IO_DEBUG_WCDR_RX_DYN_PM_D0 0x01EC
+#define ixMC_IO_DEBUG_WCDR_RX_DYN_PM_D1 0x01FC
+#define ixMC_IO_DEBUG_WCDR_RX_EQ_D0 0x01E9
+#define ixMC_IO_DEBUG_WCDR_RX_EQ_D1 0x01F9
+#define ixMC_IO_DEBUG_WCDR_RX_EQ_PM_D0 0x01EB
+#define ixMC_IO_DEBUG_WCDR_RX_EQ_PM_D1 0x01FB
+#define ixMC_IO_DEBUG_WCDR_RXPHASE_D0 0x01E3
+#define ixMC_IO_DEBUG_WCDR_RXPHASE_D1 0x01F3
+#define ixMC_IO_DEBUG_WCDR_RX_VREF_CAL_D0 0x01E5
+#define ixMC_IO_DEBUG_WCDR_RX_VREF_CAL_D1 0x01F5
+#define ixMC_IO_DEBUG_WCDR_TXBST_PD_D0 0x01E7
+#define ixMC_IO_DEBUG_WCDR_TXBST_PD_D1 0x01F7
+#define ixMC_IO_DEBUG_WCDR_TXBST_PU_D0 0x01E8
+#define ixMC_IO_DEBUG_WCDR_TXBST_PU_D1 0x01F8
+#define ixMC_IO_DEBUG_WCDR_TXPHASE_D0 0x01E4
+#define ixMC_IO_DEBUG_WCDR_TXPHASE_D1 0x01F4
+#define ixMC_IO_DEBUG_WCDR_TXSLF_D0 0x01E6
+#define ixMC_IO_DEBUG_WCDR_TXSLF_D1 0x01F6
+#define ixMC_IO_DEBUG_WCK_CLKSEL_D0 0x00CA
+#define ixMC_IO_DEBUG_WCK_CLKSEL_D1 0x00DA
+#define ixMC_IO_DEBUG_WCK_MISC_D0 0x00AA
+#define ixMC_IO_DEBUG_WCK_MISC_D1 0x00BA
+#define ixMC_IO_DEBUG_WCK_OFSCAL_D0 0x00EA
+#define ixMC_IO_DEBUG_WCK_OFSCAL_D1 0x00FA
+#define ixMC_IO_DEBUG_WCK_RX_EQ_D0 0x01CA
+#define ixMC_IO_DEBUG_WCK_RX_EQ_D1 0x01DA
+#define ixMC_IO_DEBUG_WCK_RXPHASE_D0 0x010A
+#define ixMC_IO_DEBUG_WCK_RXPHASE_D1 0x011A
+#define ixMC_IO_DEBUG_WCK_RX_VREF_CAL_D0 0x014A
+#define ixMC_IO_DEBUG_WCK_RX_VREF_CAL_D1 0x015A
+#define ixMC_IO_DEBUG_WCK_TXBST_PD_D0 0x018A
+#define ixMC_IO_DEBUG_WCK_TXBST_PD_D1 0x019A
+#define ixMC_IO_DEBUG_WCK_TXBST_PU_D0 0x01AA
+#define ixMC_IO_DEBUG_WCK_TXBST_PU_D1 0x01BA
+#define ixMC_IO_DEBUG_WCK_TXPHASE_D0 0x012A
+#define ixMC_IO_DEBUG_WCK_TXPHASE_D1 0x013A
+#define ixMC_IO_DEBUG_WCK_TXSLF_D0 0x016A
+#define ixMC_IO_DEBUG_WCK_TXSLF_D1 0x017A
+#define ixMC_TSM_DEBUG_BCNT0 0x0003
+#define ixMC_TSM_DEBUG_BCNT10 0x000D
+#define ixMC_TSM_DEBUG_BCNT1 0x0004
+#define ixMC_TSM_DEBUG_BCNT2 0x0005
+#define ixMC_TSM_DEBUG_BCNT3 0x0006
+#define ixMC_TSM_DEBUG_BCNT4 0x0007
+#define ixMC_TSM_DEBUG_BCNT5 0x0008
+#define ixMC_TSM_DEBUG_BCNT6 0x0009
+#define ixMC_TSM_DEBUG_BCNT7 0x000A
+#define ixMC_TSM_DEBUG_BCNT8 0x000B
+#define ixMC_TSM_DEBUG_BCNT9 0x000C
+#define ixMC_TSM_DEBUG_BKPT 0x0013
+#define ixMC_TSM_DEBUG_FLAG 0x0001
+#define ixMC_TSM_DEBUG_GCNT 0x0000
+#define ixMC_TSM_DEBUG_MISC 0x0002
+#define ixMC_TSM_DEBUG_ST01 0x0010
+#define ixMC_TSM_DEBUG_ST23 0x0011
+#define ixMC_TSM_DEBUG_ST45 0x0012
+#define mmATC_ATS_CNTL 0x0CC9
+#define mmATC_ATS_DEBUG 0x0CCA
+#define mmATC_ATS_DEFAULT_PAGE_CNTL 0x0CD1
+#define mmATC_ATS_DEFAULT_PAGE_LOW 0x0CD0
+#define mmATC_ATS_FAULT_CNTL 0x0CCD
+#define mmATC_ATS_FAULT_DEBUG 0x0CCB
+#define mmATC_ATS_FAULT_STATUS_ADDR 0x0CCF
+#define mmATC_ATS_FAULT_STATUS_INFO 0x0CCE
+#define mmATC_ATS_STATUS 0x0CCC
+#define mmATC_L1_ADDRESS_OFFSET 0x0CDD
+#define mmATC_L1_CNTL 0x0CDC
+#define mmATC_L1RD_DEBUG_TLB 0x0CDE
+#define mmATC_L1RD_STATUS 0x0CE0
+#define mmATC_L1WR_DEBUG_TLB 0x0CDF
+#define mmATC_L1WR_STATUS 0x0CE1
+#define mmATC_L2_CNTL 0x0CD5
+#define mmATC_L2_DEBUG 0x0CD7
+#define mmATC_MISC_CG 0x0CD4
+#define mmATC_VM_APERTURE0_CNTL 0x0CC4
+#define mmATC_VM_APERTURE0_CNTL2 0x0CC6
+#define mmATC_VM_APERTURE0_HIGH_ADDR 0x0CC2
+#define mmATC_VM_APERTURE0_LOW_ADDR 0x0CC0
+#define mmATC_VM_APERTURE1_CNTL 0x0CC5
+#define mmATC_VM_APERTURE1_CNTL2 0x0CC7
+#define mmATC_VM_APERTURE1_HIGH_ADDR 0x0CC3
+#define mmATC_VM_APERTURE1_LOW_ADDR 0x0CC1
+#define mmATC_VMID0_PASID_MAPPING 0x0CE7
+#define mmATC_VMID10_PASID_MAPPING 0x0CF1
+#define mmATC_VMID11_PASID_MAPPING 0x0CF2
+#define mmATC_VMID12_PASID_MAPPING 0x0CF3
+#define mmATC_VMID13_PASID_MAPPING 0x0CF4
+#define mmATC_VMID14_PASID_MAPPING 0x0CF5
+#define mmATC_VMID15_PASID_MAPPING 0x0CF6
+#define mmATC_VMID1_PASID_MAPPING 0x0CE8
+#define mmATC_VMID2_PASID_MAPPING 0x0CE9
+#define mmATC_VMID3_PASID_MAPPING 0x0CEA
+#define mmATC_VMID4_PASID_MAPPING 0x0CEB
+#define mmATC_VMID5_PASID_MAPPING 0x0CEC
+#define mmATC_VMID6_PASID_MAPPING 0x0CED
+#define mmATC_VMID7_PASID_MAPPING 0x0CEE
+#define mmATC_VMID8_PASID_MAPPING 0x0CEF
+#define mmATC_VMID9_PASID_MAPPING 0x0CF0
+#define mmATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x0CE6
+#define mmCC_MC_MAX_CHANNEL 0x096E
+#define mmDLL_CNTL 0x0AE9
+#define mmGMCON_DEBUG 0x0D5F
+#define mmGMCON_MISC 0x0D43
+#define mmGMCON_MISC2 0x0D44
+#define mmGMCON_MISC3 0x0D51
+#define mmGMCON_PERF_MON_CNTL0 0x0D4A
+#define mmGMCON_PERF_MON_CNTL1 0x0D4B
+#define mmGMCON_PERF_MON_RSLT0 0x0D4C
+#define mmGMCON_PERF_MON_RSLT1 0x0D4D
+#define mmGMCON_PGFSM_CONFIG 0x0D4E
+#define mmGMCON_PGFSM_READ 0x0D50
+#define mmGMCON_PGFSM_WRITE 0x0D4F
+#define mmGMCON_RENG_EXECUTE 0x0D42
+#define mmGMCON_RENG_RAM_DATA 0x0D41
+#define mmGMCON_RENG_RAM_INDEX 0x0D40
+#define mmGMCON_STCTRL_REGISTER_SAVE_EXCL_SET0 0x0D48
+#define mmGMCON_STCTRL_REGISTER_SAVE_EXCL_SET1 0x0D49
+#define mmGMCON_STCTRL_REGISTER_SAVE_RANGE0 0x0D45
+#define mmGMCON_STCTRL_REGISTER_SAVE_RANGE1 0x0D46
+#define mmGMCON_STCTRL_REGISTER_SAVE_RANGE2 0x0D47
+#define mmMC_ARB_ADDR_HASH 0x09DC
+#define mmMC_ARB_AGE_RD 0x09E9
+#define mmMC_ARB_AGE_WR 0x09EA
+#define mmMC_ARB_BANKMAP 0x09D7
+#define mmMC_ARB_BURST_TIME 0x0A02
+#define mmMC_ARB_CAC_CNTL 0x09D4
+#define mmMC_ARB_CG 0x09FA
+#define mmMC_ARB_DRAM_TIMING 0x09DD
+#define mmMC_ARB_DRAM_TIMING_1 0x09FC
+#define mmMC_ARB_DRAM_TIMING2 0x09DE
+#define mmMC_ARB_DRAM_TIMING2_1 0x09FF
+#define mmMC_ARB_FED_CNTL 0x09C1
+#define mmMC_ARB_GDEC_RD_CNTL 0x09EE
+#define mmMC_ARB_GDEC_WR_CNTL 0x09EF
+#define mmMC_ARB_GECC2 0x09C9
+#define mmMC_ARB_GECC2_CLI 0x09CA
+#define mmMC_ARB_GECC2_DEBUG 0x09C4
+#define mmMC_ARB_GECC2_DEBUG2 0x09C5
+#define mmMC_ARB_GECC2_MISC 0x09C3
+#define mmMC_ARB_GECC2_STATUS 0x09C2
+#define mmMC_ARB_LAZY0_RD 0x09E5
+#define mmMC_ARB_LAZY0_WR 0x09E6
+#define mmMC_ARB_LAZY1_RD 0x09E7
+#define mmMC_ARB_LAZY1_WR 0x09E8
+#define mmMC_ARB_LM_RD 0x09F0
+#define mmMC_ARB_LM_WR 0x09F1
+#define mmMC_ARB_MINCLKS 0x09DA
+#define mmMC_ARB_MISC 0x09D6
+#define mmMC_ARB_MISC2 0x09D5
+#define mmMC_ARB_PM_CNTL 0x09ED
+#define mmMC_ARB_POP 0x09D9
+#define mmMC_ARB_RAMCFG 0x09D8
+#define mmMC_ARB_REMREQ 0x09F2
+#define mmMC_ARB_REPLAY 0x09F3
+#define mmMC_ARB_RET_CREDITS_RD 0x09F4
+#define mmMC_ARB_RET_CREDITS_WR 0x09F5
+#define mmMC_ARB_RFSH_CNTL 0x09EB
+#define mmMC_ARB_RFSH_RATE 0x09EC
+#define mmMC_ARB_RTT_CNTL0 0x09D0
+#define mmMC_ARB_RTT_CNTL1 0x09D1
+#define mmMC_ARB_RTT_CNTL2 0x09D2
+#define mmMC_ARB_RTT_DATA 0x09CF
+#define mmMC_ARB_RTT_DEBUG 0x09D3
+#define mmMC_ARB_SQM_CNTL 0x09DB
+#define mmMC_ARB_TM_CNTL_RD 0x09E3
+#define mmMC_ARB_TM_CNTL_WR 0x09E4
+#define mmMC_ARB_WCDR 0x09FB
+#define mmMC_ARB_WCDR_2 0x09CE
+#define mmMC_ARB_WTM_CNTL_RD 0x09DF
+#define mmMC_ARB_WTM_CNTL_WR 0x09E0
+#define mmMC_ARB_WTM_GRPWT_RD 0x09E1
+#define mmMC_ARB_WTM_GRPWT_WR 0x09E2
+#define mmMC_BIST_AUTO_CNTL 0x0A06
+#define mmMC_BIST_CMD_CNTL 0x0A8E
+#define mmMC_BIST_CMP_CNTL 0x0A8D
+#define mmMC_BIST_CMP_CNTL_2 0x0AB6
+#define mmMC_BIST_CNTL 0x0A05
+#define mmMC_BIST_DATA_MASK 0x0A12
+#define mmMC_BIST_DATA_WORD0 0x0A0A
+#define mmMC_BIST_DATA_WORD1 0x0A0B
+#define mmMC_BIST_DATA_WORD2 0x0A0C
+#define mmMC_BIST_DATA_WORD3 0x0A0D
+#define mmMC_BIST_DATA_WORD4 0x0A0E
+#define mmMC_BIST_DATA_WORD5 0x0A0F
+#define mmMC_BIST_DATA_WORD6 0x0A10
+#define mmMC_BIST_DATA_WORD7 0x0A11
+#define mmMC_BIST_DIR_CNTL 0x0A07
+#define mmMC_BIST_EADDR 0x0A09
+#define mmMC_BIST_MISMATCH_ADDR 0x0A13
+#define mmMC_BIST_RDATA_EDC 0x0A1D
+#define mmMC_BIST_RDATA_MASK 0x0A1C
+#define mmMC_BIST_RDATA_WORD0 0x0A14
+#define mmMC_BIST_RDATA_WORD1 0x0A15
+#define mmMC_BIST_RDATA_WORD2 0x0A16
+#define mmMC_BIST_RDATA_WORD3 0x0A17
+#define mmMC_BIST_RDATA_WORD4 0x0A18
+#define mmMC_BIST_RDATA_WORD5 0x0A19
+#define mmMC_BIST_RDATA_WORD6 0x0A1A
+#define mmMC_BIST_RDATA_WORD7 0x0A1B
+#define mmMC_BIST_SADDR 0x0A08
+#define mmMC_CG_CONFIG 0x096F
+#define mmMC_CG_CONFIG_MCD 0x0829
+#define mmMC_CG_DATAPORT 0x0A21
+#define mmMC_CITF_CNTL 0x0970
+#define mmMC_CITF_CREDITS_ARB_RD 0x0972
+#define mmMC_CITF_CREDITS_ARB_WR 0x0973
+#define mmMC_CITF_CREDITS_VM 0x0971
+#define mmMC_CITF_CREDITS_XBAR 0x0989
+#define mmMC_CITF_DAGB_CNTL 0x0974
+#define mmMC_CITF_DAGB_DLY 0x0977
+#define mmMC_CITF_INT_CREDITS 0x0975
+#define mmMC_CITF_INT_CREDITS_WR 0x097D
+#define mmMC_CITF_MISC_RD_CG 0x0992
+#define mmMC_CITF_MISC_VM_CG 0x0994
+#define mmMC_CITF_MISC_WR_CG 0x0993
+#define mmMC_CITF_PERF_MON_CNTL2 0x098E
+#define mmMC_CITF_PERF_MON_RSLT2 0x0991
+#define mmMC_CITF_REMREQ 0x097A
+#define mmMC_CITF_RET_MODE 0x0976
+#define mmMC_CITF_WTM_RD_CNTL 0x097F
+#define mmMC_CITF_WTM_WR_CNTL 0x0980
+#define mmMC_CITF_XTRA_ENABLE 0x096D
+#define mmMC_CONFIG 0x0800
+#define mmMC_CONFIG_MCD 0x0828
+#define mmMC_HUB_MISC_DBG 0x0831
+#define mmMC_HUB_MISC_FRAMING 0x0834
+#define mmMC_HUB_MISC_HUB_CG 0x082E
+#define mmMC_HUB_MISC_IDLE_STATUS 0x0847
+#define mmMC_HUB_MISC_OVERRIDE 0x0833
+#define mmMC_HUB_MISC_POWER 0x082D
+#define mmMC_HUB_MISC_SIP_CG 0x0830
+#define mmMC_HUB_MISC_STATUS 0x0832
+#define mmMC_HUB_MISC_VM_CG 0x082F
+#define mmMC_HUB_RDREQ_CNTL 0x083B
+#define mmMC_HUB_RDREQ_CREDITS 0x0844
+#define mmMC_HUB_RDREQ_CREDITS2 0x0845
+#define mmMC_HUB_RDREQ_DMIF 0x0863
+#define mmMC_HUB_RDREQ_DMIF_LIMIT 0x0848
+#define mmMC_HUB_RDREQ_GBL0 0x0856
+#define mmMC_HUB_RDREQ_GBL1 0x0857
+#define mmMC_HUB_RDREQ_HDP 0x085B
+#define mmMC_HUB_RDREQ_MCDW 0x0851
+#define mmMC_HUB_RDREQ_MCDX 0x0852
+#define mmMC_HUB_RDREQ_MCDY 0x0853
+#define mmMC_HUB_RDREQ_MCDZ 0x0854
+#define mmMC_HUB_RDREQ_MCIF 0x0864
+#define mmMC_HUB_RDREQ_RLC 0x085D
+#define mmMC_HUB_RDREQ_SEM 0x085E
+#define mmMC_HUB_RDREQ_SIP 0x0855
+#define mmMC_HUB_RDREQ_SMU 0x0858
+#define mmMC_HUB_RDREQ_STATUS 0x0839
+#define mmMC_HUB_RDREQ_UMC 0x0860
+#define mmMC_HUB_RDREQ_UVD 0x0861
+#define mmMC_HUB_RDREQ_VCE 0x085F
+#define mmMC_HUB_RDREQ_VCEU 0x0866
+#define mmMC_HUB_RDREQ_VMC 0x0865
+#define mmMC_HUB_RDREQ_WTM_CNTL 0x083D
+#define mmMC_HUB_RDREQ_XDMAM 0x0882
+#define mmMC_HUB_SHARED_DAGB_DLY 0x0846
+#define mmMC_HUB_WDP_BP 0x0837
+#define mmMC_HUB_WDP_CNTL 0x0835
+#define mmMC_HUB_WDP_CREDITS 0x083F
+#define mmMC_HUB_WDP_ERR 0x0836
+#define mmMC_HUB_WDP_GBL0 0x0841
+#define mmMC_HUB_WDP_GBL1 0x0842
+#define mmMC_HUB_WDP_HDP 0x0879
+#define mmMC_HUB_WDP_IH 0x0872
+#define mmMC_HUB_WDP_MCDW 0x0867
+#define mmMC_HUB_WDP_MCDX 0x0868
+#define mmMC_HUB_WDP_MCDY 0x0869
+#define mmMC_HUB_WDP_MCDZ 0x086A
+#define mmMC_HUB_WDP_MCIF 0x086F
+#define mmMC_HUB_WDP_MGPU 0x0843
+#define mmMC_HUB_WDP_MGPU2 0x0840
+#define mmMC_HUB_WDP_RLC 0x0873
+#define mmMC_HUB_WDP_SEM 0x0874
+#define mmMC_HUB_WDP_SH0 0x086E
+#define mmMC_HUB_WDP_SH1 0x0876
+#define mmMC_HUB_WDP_SIP 0x086B
+#define mmMC_HUB_WDP_SMU 0x0875
+#define mmMC_HUB_WDP_STATUS 0x0838
+#define mmMC_HUB_WDP_UMC 0x0877
+#define mmMC_HUB_WDP_UVD 0x0878
+#define mmMC_HUB_WDP_VCE 0x0870
+#define mmMC_HUB_WDP_VCEU 0x087F
+#define mmMC_HUB_WDP_WTM_CNTL 0x083E
+#define mmMC_HUB_WDP_XDMA 0x0881
+#define mmMC_HUB_WDP_XDMAM 0x0880
+#define mmMC_HUB_WDP_XDP 0x0871
+#define mmMC_HUB_WRRET_CNTL 0x083C
+#define mmMC_HUB_WRRET_MCDW 0x087B
+#define mmMC_HUB_WRRET_MCDX 0x087C
+#define mmMC_HUB_WRRET_MCDY 0x087D
+#define mmMC_HUB_WRRET_MCDZ 0x087E
+#define mmMC_HUB_WRRET_STATUS 0x083A
+#define mmMC_IMP_CNTL 0x0A36
+#define mmMC_IMP_DEBUG 0x0A37
+#define mmMC_IMP_DQ_STATUS 0x0ABC
+#define mmMC_IMP_STATUS 0x0A38
+#define mmMC_IO_APHY_STR_CNTL_D0 0x0A97
+#define mmMC_IO_APHY_STR_CNTL_D1 0x0A98
+#define mmMC_IO_CDRCNTL1_D0 0x0ADD
+#define mmMC_IO_CDRCNTL1_D1 0x0ADE
+#define mmMC_IO_CDRCNTL2_D0 0x0AE4
+#define mmMC_IO_CDRCNTL2_D1 0x0AE5
+#define mmMC_IO_CDRCNTL_D0 0x0A55
+#define mmMC_IO_CDRCNTL_D1 0x0A56
+#define mmMC_IO_DPHY_STR_CNTL_D0 0x0A4E
+#define mmMC_IO_DPHY_STR_CNTL_D1 0x0A54
+#define mmMC_IO_PAD_CNTL 0x0A73
+#define mmMC_IO_PAD_CNTL_D0 0x0A74
+#define mmMC_IO_PAD_CNTL_D1 0x0A75
+#define mmMC_IO_RXCNTL1_DPHY0_D0 0x0ADF
+#define mmMC_IO_RXCNTL1_DPHY0_D1 0x0AE1
+#define mmMC_IO_RXCNTL1_DPHY1_D0 0x0AE0
+#define mmMC_IO_RXCNTL1_DPHY1_D1 0x0AE2
+#define mmMC_IO_RXCNTL_DPHY0_D0 0x0A4C
+#define mmMC_IO_RXCNTL_DPHY0_D1 0x0A52
+#define mmMC_IO_RXCNTL_DPHY1_D0 0x0A4D
+#define mmMC_IO_RXCNTL_DPHY1_D1 0x0A53
+#define mmMC_IO_TXCNTL_APHY_D0 0x0A4B
+#define mmMC_IO_TXCNTL_APHY_D1 0x0A51
+#define mmMC_IO_TXCNTL_DPHY0_D0 0x0A49
+#define mmMC_IO_TXCNTL_DPHY0_D1 0x0A4F
+#define mmMC_IO_TXCNTL_DPHY1_D0 0x0A4A
+#define mmMC_IO_TXCNTL_DPHY1_D1 0x0A50
+#define mmMCLK_PWRMGT_CNTL 0x0AE8
+#define mmMC_MEM_POWER_LS 0x082A
+#define mmMC_NPL_STATUS 0x0A76
+#define mmMC_PHY_TIMING_2 0x0ACE
+#define mmMC_PHY_TIMING_D0 0x0ACC
+#define mmMC_PHY_TIMING_D1 0x0ACD
+#define mmMC_PMG_AUTO_CFG 0x0A35
+#define mmMC_PMG_AUTO_CMD 0x0A34
+#define mmMC_PMG_CFG 0x0A84
+#define mmMC_PMG_CMD_EMRS 0x0A83
+#define mmMC_PMG_CMD_MRS 0x0AAB
+#define mmMC_PMG_CMD_MRS1 0x0AD1
+#define mmMC_PMG_CMD_MRS2 0x0AD7
+#define mmMC_RD_CB 0x0981
+#define mmMC_RD_DB 0x0982
+#define mmMC_RD_GRP_EXT 0x0978
+#define mmMC_RD_GRP_GFX 0x0803
+#define mmMC_RD_GRP_LCL 0x098A
+#define mmMC_RD_GRP_OTH 0x0807
+#define mmMC_RD_GRP_SYS 0x0805
+#define mmMC_RD_HUB 0x0985
+#define mmMC_RD_TC0 0x0983
+#define mmMC_RD_TC1 0x0984
+#define mmMC_RPB_ARB_CNTL 0x0951
+#define mmMC_RPB_BIF_CNTL 0x0952
+#define mmMC_RPB_CID_QUEUE_EX 0x095A
+#define mmMC_RPB_CID_QUEUE_EX_DATA 0x095B
+#define mmMC_RPB_CID_QUEUE_RD 0x0957
+#define mmMC_RPB_CID_QUEUE_WR 0x0956
+#define mmMC_RPB_CONF 0x094D
+#define mmMC_RPB_DBG1 0x094F
+#define mmMC_RPB_EFF_CNTL 0x0950
+#define mmMC_RPB_IF_CONF 0x094E
+#define mmMC_RPB_PERF_COUNTER_CNTL 0x0958
+#define mmMC_RPB_PERF_COUNTER_STATUS 0x0959
+#define mmMC_RPB_RD_SWITCH_CNTL 0x0955
+#define mmMC_RPB_WR_COMBINE_CNTL 0x0954
+#define mmMC_RPB_WR_SWITCH_CNTL 0x0953
+#define mmMC_SEQ_BIT_REMAP_B0_D0 0x0AA3
+#define mmMC_SEQ_BIT_REMAP_B0_D1 0x0AA7
+#define mmMC_SEQ_BIT_REMAP_B1_D0 0x0AA4
+#define mmMC_SEQ_BIT_REMAP_B1_D1 0x0AA8
+#define mmMC_SEQ_BIT_REMAP_B2_D0 0x0AA5
+#define mmMC_SEQ_BIT_REMAP_B2_D1 0x0AA9
+#define mmMC_SEQ_BIT_REMAP_B3_D0 0x0AA6
+#define mmMC_SEQ_BIT_REMAP_B3_D1 0x0AAA
+#define mmMC_SEQ_BYTE_REMAP_D0 0x0A93
+#define mmMC_SEQ_BYTE_REMAP_D1 0x0A94
+#define mmMC_SEQ_CAS_TIMING 0x0A29
+#define mmMC_SEQ_CAS_TIMING_LP 0x0A9C
+#define mmMC_SEQ_CG 0x0A9A
+#define mmMC_SEQ_CMD 0x0A31
+#define mmMC_SEQ_CNTL 0x0A25
+#define mmMC_SEQ_CNTL_2 0x0AD4
+#define mmMC_SEQ_DRAM 0x0A26
+#define mmMC_SEQ_DRAM_2 0x0A27
+#define mmMC_SEQ_DRAM_ERROR_INSERTION 0x0ACB
+#define mmMC_SEQ_FIFO_CTL 0x0A57
+#define mmMC_SEQ_IO_DEBUG_DATA 0x0A92
+#define mmMC_SEQ_IO_DEBUG_INDEX 0x0A91
+#define mmMC_SEQ_IO_RDBI 0x0AB4
+#define mmMC_SEQ_IO_REDC 0x0AB5
+#define mmMC_SEQ_IO_RESERVE_D0 0x0AB7
+#define mmMC_SEQ_IO_RESERVE_D1 0x0AB8
+#define mmMC_SEQ_IO_RWORD0 0x0AAC
+#define mmMC_SEQ_IO_RWORD1 0x0AAD
+#define mmMC_SEQ_IO_RWORD2 0x0AAE
+#define mmMC_SEQ_IO_RWORD3 0x0AAF
+#define mmMC_SEQ_IO_RWORD4 0x0AB0
+#define mmMC_SEQ_IO_RWORD5 0x0AB1
+#define mmMC_SEQ_IO_RWORD6 0x0AB2
+#define mmMC_SEQ_IO_RWORD7 0x0AB3
+#define mmMC_SEQ_MISC0 0x0A80
+#define mmMC_SEQ_MISC1 0x0A81
+#define mmMC_SEQ_MISC3 0x0A8B
+#define mmMC_SEQ_MISC4 0x0A8C
+#define mmMC_SEQ_MISC5 0x0A95
+#define mmMC_SEQ_MISC6 0x0A96
+#define mmMC_SEQ_MISC7 0x0A99
+#define mmMC_SEQ_MISC8 0x0A5F
+#define mmMC_SEQ_MISC9 0x0AE7
+#define mmMC_SEQ_MISC_TIMING 0x0A2A
+#define mmMC_SEQ_MISC_TIMING2 0x0A2B
+#define mmMC_SEQ_MISC_TIMING2_LP 0x0A9E
+#define mmMC_SEQ_MISC_TIMING_LP 0x0A9D
+#define mmMC_SEQ_MPLL_OVERRIDE 0x0A22
+#define mmMC_SEQ_PERF_CNTL 0x0A77
+#define mmMC_SEQ_PERF_CNTL_1 0x0AFD
+#define mmMC_SEQ_PERF_SEQ_CNT_A_I0 0x0A79
+#define mmMC_SEQ_PERF_SEQ_CNT_A_I1 0x0A7A
+#define mmMC_SEQ_PERF_SEQ_CNT_B_I0 0x0A7B
+#define mmMC_SEQ_PERF_SEQ_CNT_B_I1 0x0A7C
+#define mmMC_SEQ_PERF_SEQ_CNT_C_I0 0x0AD9
+#define mmMC_SEQ_PERF_SEQ_CNT_C_I1 0x0ADA
+#define mmMC_SEQ_PERF_SEQ_CNT_D_I0 0x0ADB
+#define mmMC_SEQ_PERF_SEQ_CNT_D_I1 0x0ADC
+#define mmMC_SEQ_PERF_SEQ_CTL 0x0A78
+#define mmMC_SEQ_PMG_CMD_EMRS_LP 0x0AA1
+#define mmMC_SEQ_PMG_CMD_MRS1_LP 0x0AD2
+#define mmMC_SEQ_PMG_CMD_MRS2_LP 0x0AD8
+#define mmMC_SEQ_PMG_CMD_MRS_LP 0x0AA2
+#define mmMC_SEQ_PMG_PG_HWCNTL 0x0AB9
+#define mmMC_SEQ_PMG_PG_SWCNTL_0 0x0ABA
+#define mmMC_SEQ_PMG_PG_SWCNTL_1 0x0ABB
+#define mmMC_SEQ_PMG_TIMING 0x0A2C
+#define mmMC_SEQ_PMG_TIMING_LP 0x0AD3
+#define mmMC_SEQ_RAS_TIMING 0x0A28
+#define mmMC_SEQ_RAS_TIMING_LP 0x0A9B
+#define mmMC_SEQ_RD_CTL_D0 0x0A2D
+#define mmMC_SEQ_RD_CTL_D0_LP 0x0AC7
+#define mmMC_SEQ_RD_CTL_D1 0x0A2E
+#define mmMC_SEQ_RD_CTL_D1_LP 0x0AC8
+#define mmMC_SEQ_RESERVE_0_S 0x0A1E
+#define mmMC_SEQ_RESERVE_1_S 0x0A1F
+#define mmMC_SEQ_RESERVE_M 0x0A82
+#define mmMC_SEQ_RXFRAMING_BYTE0_D0 0x0A67
+#define mmMC_SEQ_RXFRAMING_BYTE0_D1 0x0A6D
+#define mmMC_SEQ_RXFRAMING_BYTE1_D0 0x0A68
+#define mmMC_SEQ_RXFRAMING_BYTE1_D1 0x0A6E
+#define mmMC_SEQ_RXFRAMING_BYTE2_D0 0x0A69
+#define mmMC_SEQ_RXFRAMING_BYTE2_D1 0x0A6F
+#define mmMC_SEQ_RXFRAMING_BYTE3_D0 0x0A6A
+#define mmMC_SEQ_RXFRAMING_BYTE3_D1 0x0A70
+#define mmMC_SEQ_RXFRAMING_DBI_D0 0x0A6B
+#define mmMC_SEQ_RXFRAMING_DBI_D1 0x0A71
+#define mmMC_SEQ_RXFRAMING_EDC_D0 0x0A6C
+#define mmMC_SEQ_RXFRAMING_EDC_D1 0x0A72
+#define mmMC_SEQ_STATUS_M 0x0A7D
+#define mmMC_SEQ_STATUS_S 0x0A20
+#define mmMC_SEQ_SUP_CNTL 0x0A32
+#define mmMC_SEQ_SUP_DEC_STAT 0x0A88
+#define mmMC_SEQ_SUP_GP0_STAT 0x0A8F
+#define mmMC_SEQ_SUP_GP1_STAT 0x0A90
+#define mmMC_SEQ_SUP_GP2_STAT 0x0A85
+#define mmMC_SEQ_SUP_GP3_STAT 0x0A86
+#define mmMC_SEQ_SUP_IR_STAT 0x0A87
+#define mmMC_SEQ_SUP_PGM 0x0A33
+#define mmMC_SEQ_SUP_PGM_STAT 0x0A89
+#define mmMC_SEQ_SUP_R_PGM 0x0A8A
+#define mmMC_SEQ_TCG_CNTL 0x0ABD
+#define mmMC_SEQ_TIMER_RD 0x0ACA
+#define mmMC_SEQ_TIMER_WR 0x0AC9
+#define mmMC_SEQ_TRAIN_CAPTURE 0x0A3E
+#define mmMC_SEQ_TRAIN_EDC_THRESHOLD 0x0A3B
+#define mmMC_SEQ_TRAIN_EDC_THRESHOLD2 0x0AFE
+#define mmMC_SEQ_TRAIN_EDC_THRESHOLD3 0x0AFF
+#define mmMC_SEQ_TRAIN_TIMING 0x0A40
+#define mmMC_SEQ_TRAIN_WAKEUP_CLEAR 0x0A3F
+#define mmMC_SEQ_TRAIN_WAKEUP_CNTL 0x0A3A
+#define mmMC_SEQ_TRAIN_WAKEUP_EDGE 0x0A3C
+#define mmMC_SEQ_TRAIN_WAKEUP_MASK 0x0A3D
+#define mmMC_SEQ_TSM_BCNT 0x0AC2
+#define mmMC_SEQ_TSM_CTRL 0x0ABE
+#define mmMC_SEQ_TSM_DBI 0x0AC6
+#define mmMC_SEQ_TSM_DEBUG_DATA 0x0AD0
+#define mmMC_SEQ_TSM_DEBUG_INDEX 0x0ACF
+#define mmMC_SEQ_TSM_EDC 0x0AC5
+#define mmMC_SEQ_TSM_FLAG 0x0AC3
+#define mmMC_SEQ_TSM_GCNT 0x0ABF
+#define mmMC_SEQ_TSM_MISC 0x0AE6
+#define mmMC_SEQ_TSM_NCNT 0x0AC1
+#define mmMC_SEQ_TSM_OCNT 0x0AC0
+#define mmMC_SEQ_TSM_UPDATE 0x0AC4
+#define mmMC_SEQ_TSM_WCDR 0x0AE3
+#define mmMC_SEQ_TXFRAMING_BYTE0_D0 0x0A58
+#define mmMC_SEQ_TXFRAMING_BYTE0_D1 0x0A60
+#define mmMC_SEQ_TXFRAMING_BYTE1_D0 0x0A59
+#define mmMC_SEQ_TXFRAMING_BYTE1_D1 0x0A61
+#define mmMC_SEQ_TXFRAMING_BYTE2_D0 0x0A5A
+#define mmMC_SEQ_TXFRAMING_BYTE2_D1 0x0A62
+#define mmMC_SEQ_TXFRAMING_BYTE3_D0 0x0A5B
+#define mmMC_SEQ_TXFRAMING_BYTE3_D1 0x0A63
+#define mmMC_SEQ_TXFRAMING_DBI_D0 0x0A5C
+#define mmMC_SEQ_TXFRAMING_DBI_D1 0x0A64
+#define mmMC_SEQ_TXFRAMING_EDC_D0 0x0A5D
+#define mmMC_SEQ_TXFRAMING_EDC_D1 0x0A65
+#define mmMC_SEQ_TXFRAMING_FCK_D0 0x0A5E
+#define mmMC_SEQ_TXFRAMING_FCK_D1 0x0A66
+#define mmMC_SEQ_VENDOR_ID_I0 0x0A7E
+#define mmMC_SEQ_VENDOR_ID_I1 0x0A7F
+#define mmMC_SEQ_WCDR_CTRL 0x0A39
+#define mmMC_SEQ_WR_CTL_2 0x0AD5
+#define mmMC_SEQ_WR_CTL_2_LP 0x0AD6
+#define mmMC_SEQ_WR_CTL_D0 0x0A2F
+#define mmMC_SEQ_WR_CTL_D0_LP 0x0A9F
+#define mmMC_SEQ_WR_CTL_D1 0x0A30
+#define mmMC_SEQ_WR_CTL_D1_LP 0x0AA0
+#define mmMC_SHARED_BLACKOUT_CNTL 0x082B
+#define mmMC_SHARED_CHMAP 0x0801
+#define mmMC_SHARED_CHREMAP 0x0802
+#define mmMC_TRAIN_EDCCDR_R_D0 0x0A41
+#define mmMC_TRAIN_EDCCDR_R_D1 0x0A42
+#define mmMC_TRAIN_EDC_STATUS_D0 0x0A45
+#define mmMC_TRAIN_EDC_STATUS_D1 0x0A48
+#define mmMC_TRAIN_PRBSERR_0_D0 0x0A43
+#define mmMC_TRAIN_PRBSERR_0_D1 0x0A46
+#define mmMC_TRAIN_PRBSERR_1_D0 0x0A44
+#define mmMC_TRAIN_PRBSERR_1_D1 0x0A47
+#define mmMC_TRAIN_PRBSERR_2_D0 0x0AFB
+#define mmMC_TRAIN_PRBSERR_2_D1 0x0AFC
+#define mmMC_VM_AGP_BASE 0x080C
+#define mmMC_VM_AGP_BOT 0x080B
+#define mmMC_VM_AGP_TOP 0x080A
+#define mmMC_VM_DC_WRITE_CNTL 0x0810
+#define mmMC_VM_DC_WRITE_HIT_REGION_0_HIGH_ADDR 0x0815
+#define mmMC_VM_DC_WRITE_HIT_REGION_0_LOW_ADDR 0x0811
+#define mmMC_VM_DC_WRITE_HIT_REGION_1_HIGH_ADDR 0x0816
+#define mmMC_VM_DC_WRITE_HIT_REGION_1_LOW_ADDR 0x0812
+#define mmMC_VM_DC_WRITE_HIT_REGION_2_HIGH_ADDR 0x0817
+#define mmMC_VM_DC_WRITE_HIT_REGION_2_LOW_ADDR 0x0813
+#define mmMC_VM_DC_WRITE_HIT_REGION_3_HIGH_ADDR 0x0818
+#define mmMC_VM_DC_WRITE_HIT_REGION_3_LOW_ADDR 0x0814
+#define mmMC_VM_FB_LOCATION 0x0809
+#define mmMC_VM_FB_OFFSET 0x081A
+#define mmMC_VM_MB_L1_TLB0_DEBUG 0x0891
+#define mmMC_VM_MB_L1_TLB0_STATUS 0x0895
+#define mmMC_VM_MB_L1_TLB1_STATUS 0x0896
+#define mmMC_VM_MB_L1_TLB2_DEBUG 0x0893
+#define mmMC_VM_MB_L1_TLB2_STATUS 0x0897
+#define mmMC_VM_MB_L1_TLB3_DEBUG 0x08A5
+#define mmMC_VM_MB_L1_TLB3_STATUS 0x08A6
+#define mmMC_VM_MB_L2ARBITER_L2_CREDITS 0x08A1
+#define mmMC_VM_MD_L1_TLB0_DEBUG 0x0998
+#define mmMC_VM_MD_L1_TLB0_STATUS 0x099B
+#define mmMC_VM_MD_L1_TLB1_DEBUG 0x0999
+#define mmMC_VM_MD_L1_TLB1_STATUS 0x099C
+#define mmMC_VM_MD_L1_TLB2_DEBUG 0x099A
+#define mmMC_VM_MD_L1_TLB2_STATUS 0x099D
+#define mmMC_VM_MD_L1_TLB3_DEBUG 0x09A7
+#define mmMC_VM_MD_L1_TLB3_STATUS 0x09A8
+#define mmMC_VM_MD_L2ARBITER_L2_CREDITS 0x09A4
+#define mmMC_VM_MX_L1_TLB_CNTL 0x0819
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x080F
+#define mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x080E
+#define mmMC_VM_SYSTEM_APERTURE_LOW_ADDR 0x080D
+#define mmMC_WR_CB 0x0986
+#define mmMC_WR_DB 0x0987
+#define mmMC_WR_GRP_EXT 0x0979
+#define mmMC_WR_GRP_GFX 0x0804
+#define mmMC_WR_GRP_LCL 0x098B
+#define mmMC_WR_GRP_OTH 0x0808
+#define mmMC_WR_GRP_SYS 0x0806
+#define mmMC_WR_HUB 0x0988
+#define mmMC_WR_TC0 0x097B
+#define mmMC_WR_TC1 0x097C
+#define mmMC_XBAR_ADDR_DEC 0x0C80
+#define mmMC_XBAR_ARB 0x0C8D
+#define mmMC_XBAR_ARB_MAX_BURST 0x0C8E
+#define mmMC_XBAR_CHTRIREMAP 0x0C8B
+#define mmMC_XBAR_PERF_MON_CNTL0 0x0C8F
+#define mmMC_XBAR_PERF_MON_CNTL1 0x0C90
+#define mmMC_XBAR_PERF_MON_CNTL2 0x0C91
+#define mmMC_XBAR_PERF_MON_MAX_THSH 0x0C96
+#define mmMC_XBAR_PERF_MON_RSLT0 0x0C92
+#define mmMC_XBAR_PERF_MON_RSLT1 0x0C93
+#define mmMC_XBAR_PERF_MON_RSLT2 0x0C94
+#define mmMC_XBAR_PERF_MON_RSLT3 0x0C95
+#define mmMC_XBAR_RDREQ_CREDIT 0x0C83
+#define mmMC_XBAR_RDREQ_PRI_CREDIT 0x0C84
+#define mmMC_XBAR_RDRET_CREDIT1 0x0C87
+#define mmMC_XBAR_RDRET_CREDIT2 0x0C88
+#define mmMC_XBAR_RDRET_PRI_CREDIT1 0x0C89
+#define mmMC_XBAR_RDRET_PRI_CREDIT2 0x0C8A
+#define mmMC_XBAR_REMOTE 0x0C81
+#define mmMC_XBAR_SPARE0 0x0C97
+#define mmMC_XBAR_SPARE1 0x0C98
+#define mmMC_XBAR_TWOCHAN 0x0C8C
+#define mmMC_XBAR_WRREQ_CREDIT 0x0C82
+#define mmMC_XBAR_WRRET_CREDIT1 0x0C85
+#define mmMC_XBAR_WRRET_CREDIT2 0x0C86
+#define mmMC_XPB_CLG_CFG0 0x08E9
+#define mmMC_XPB_CLG_CFG10 0x08F3
+#define mmMC_XPB_CLG_CFG1 0x08EA
+#define mmMC_XPB_CLG_CFG11 0x08F4
+#define mmMC_XPB_CLG_CFG12 0x08F5
+#define mmMC_XPB_CLG_CFG13 0x08F6
+#define mmMC_XPB_CLG_CFG14 0x08F7
+#define mmMC_XPB_CLG_CFG15 0x08F8
+#define mmMC_XPB_CLG_CFG16 0x08F9
+#define mmMC_XPB_CLG_CFG17 0x08FA
+#define mmMC_XPB_CLG_CFG18 0x08FB
+#define mmMC_XPB_CLG_CFG19 0x08FC
+#define mmMC_XPB_CLG_CFG20 0x0928
+#define mmMC_XPB_CLG_CFG2 0x08EB
+#define mmMC_XPB_CLG_CFG21 0x0929
+#define mmMC_XPB_CLG_CFG22 0x092A
+#define mmMC_XPB_CLG_CFG23 0x092B
+#define mmMC_XPB_CLG_CFG24 0x092C
+#define mmMC_XPB_CLG_CFG25 0x092D
+#define mmMC_XPB_CLG_CFG26 0x092E
+#define mmMC_XPB_CLG_CFG27 0x092F
+#define mmMC_XPB_CLG_CFG28 0x0930
+#define mmMC_XPB_CLG_CFG29 0x0931
+#define mmMC_XPB_CLG_CFG30 0x0932
+#define mmMC_XPB_CLG_CFG3 0x08EC
+#define mmMC_XPB_CLG_CFG31 0x0933
+#define mmMC_XPB_CLG_CFG32 0x0936
+#define mmMC_XPB_CLG_CFG33 0x0937
+#define mmMC_XPB_CLG_CFG34 0x0938
+#define mmMC_XPB_CLG_CFG35 0x0939
+#define mmMC_XPB_CLG_CFG36 0x093A
+#define mmMC_XPB_CLG_CFG4 0x08ED
+#define mmMC_XPB_CLG_CFG5 0x08EE
+#define mmMC_XPB_CLG_CFG6 0x08EF
+#define mmMC_XPB_CLG_CFG7 0x08F0
+#define mmMC_XPB_CLG_CFG8 0x08F1
+#define mmMC_XPB_CLG_CFG9 0x08F2
+#define mmMC_XPB_CLG_EXTRA 0x08FD
+#define mmMC_XPB_CLG_EXTRA_RD 0x0935
+#define mmMC_XPB_CLK_GAT 0x091E
+#define mmMC_XPB_INTF_CFG 0x091F
+#define mmMC_XPB_INTF_CFG2 0x0934
+#define mmMC_XPB_INTF_STS 0x0920
+#define mmMC_XPB_LB_ADDR 0x08FE
+#define mmMC_XPB_MAP_INVERT_FLUSH_NUM_LSB 0x0923
+#define mmMC_XPB_MISC_CFG 0x0927
+#define mmMC_XPB_P2P_BAR0 0x0904
+#define mmMC_XPB_P2P_BAR1 0x0905
+#define mmMC_XPB_P2P_BAR2 0x0906
+#define mmMC_XPB_P2P_BAR3 0x0907
+#define mmMC_XPB_P2P_BAR4 0x0908
+#define mmMC_XPB_P2P_BAR5 0x0909
+#define mmMC_XPB_P2P_BAR6 0x090A
+#define mmMC_XPB_P2P_BAR7 0x090B
+#define mmMC_XPB_P2P_BAR_CFG 0x0903
+#define mmMC_XPB_P2P_BAR_DEBUG 0x090D
+#define mmMC_XPB_P2P_BAR_DELTA_ABOVE 0x090E
+#define mmMC_XPB_P2P_BAR_DELTA_BELOW 0x090F
+#define mmMC_XPB_P2P_BAR_SETUP 0x090C
+#define mmMC_XPB_PEER_SYS_BAR0 0x0910
+#define mmMC_XPB_PEER_SYS_BAR1 0x0911
+#define mmMC_XPB_PEER_SYS_BAR2 0x0912
+#define mmMC_XPB_PEER_SYS_BAR3 0x0913
+#define mmMC_XPB_PEER_SYS_BAR4 0x0914
+#define mmMC_XPB_PEER_SYS_BAR5 0x0915
+#define mmMC_XPB_PEER_SYS_BAR6 0x0916
+#define mmMC_XPB_PEER_SYS_BAR7 0x0917
+#define mmMC_XPB_PEER_SYS_BAR8 0x0918
+#define mmMC_XPB_PEER_SYS_BAR9 0x0919
+#define mmMC_XPB_PERF_KNOBS 0x0924
+#define mmMC_XPB_PIPE_STS 0x0921
+#define mmMC_XPB_RTR_DEST_MAP0 0x08DB
+#define mmMC_XPB_RTR_DEST_MAP1 0x08DC
+#define mmMC_XPB_RTR_DEST_MAP2 0x08DD
+#define mmMC_XPB_RTR_DEST_MAP3 0x08DE
+#define mmMC_XPB_RTR_DEST_MAP4 0x08DF
+#define mmMC_XPB_RTR_DEST_MAP5 0x08E0
+#define mmMC_XPB_RTR_DEST_MAP6 0x08E1
+#define mmMC_XPB_RTR_DEST_MAP7 0x08E2
+#define mmMC_XPB_RTR_DEST_MAP8 0x08E3
+#define mmMC_XPB_RTR_DEST_MAP9 0x08E4
+#define mmMC_XPB_RTR_SRC_APRTR0 0x08CD
+#define mmMC_XPB_RTR_SRC_APRTR1 0x08CE
+#define mmMC_XPB_RTR_SRC_APRTR2 0x08CF
+#define mmMC_XPB_RTR_SRC_APRTR3 0x08D0
+#define mmMC_XPB_RTR_SRC_APRTR4 0x08D1
+#define mmMC_XPB_RTR_SRC_APRTR5 0x08D2
+#define mmMC_XPB_RTR_SRC_APRTR6 0x08D3
+#define mmMC_XPB_RTR_SRC_APRTR7 0x08D4
+#define mmMC_XPB_RTR_SRC_APRTR8 0x08D5
+#define mmMC_XPB_RTR_SRC_APRTR9 0x08D6
+#define mmMC_XPB_STICKY 0x0925
+#define mmMC_XPB_STICKY_W1C 0x0926
+#define mmMC_XPB_SUB_CTRL 0x0922
+#define mmMC_XPB_UNC_THRESH_HST 0x08FF
+#define mmMC_XPB_UNC_THRESH_SID 0x0900
+#define mmMC_XPB_WCB_CFG 0x0902
+#define mmMC_XPB_WCB_STS 0x0901
+#define mmMC_XPB_XDMA_PEER_SYS_BAR0 0x091A
+#define mmMC_XPB_XDMA_PEER_SYS_BAR1 0x091B
+#define mmMC_XPB_XDMA_PEER_SYS_BAR2 0x091C
+#define mmMC_XPB_XDMA_PEER_SYS_BAR3 0x091D
+#define mmMC_XPB_XDMA_RTR_DEST_MAP0 0x08E5
+#define mmMC_XPB_XDMA_RTR_DEST_MAP1 0x08E6
+#define mmMC_XPB_XDMA_RTR_DEST_MAP2 0x08E7
+#define mmMC_XPB_XDMA_RTR_DEST_MAP3 0x08E8
+#define mmMC_XPB_XDMA_RTR_SRC_APRTR0 0x08D7
+#define mmMC_XPB_XDMA_RTR_SRC_APRTR1 0x08D8
+#define mmMC_XPB_XDMA_RTR_SRC_APRTR2 0x08D9
+#define mmMC_XPB_XDMA_RTR_SRC_APRTR3 0x08DA
+#define mmMPLL_AD_FUNC_CNTL 0x0AF0
+#define mmMPLL_AD_STATUS 0x0AF6
+#define mmMPLL_CNTL_MODE 0x0AEC
+#define mmMPLL_CONTROL 0x0AF5
+#define mmMPLL_DQ_0_0_STATUS 0x0AF7
+#define mmMPLL_DQ_0_1_STATUS 0x0AF8
+#define mmMPLL_DQ_1_0_STATUS 0x0AF9
+#define mmMPLL_DQ_1_1_STATUS 0x0AFA
+#define mmMPLL_DQ_FUNC_CNTL 0x0AF1
+#define mmMPLL_FUNC_CNTL 0x0AED
+#define mmMPLL_FUNC_CNTL_1 0x0AEE
+#define mmMPLL_FUNC_CNTL_2 0x0AEF
+#define mmMPLL_SEQ_UCODE_1 0x0AEA
+#define mmMPLL_SEQ_UCODE_2 0x0AEB
+#define mmMPLL_SS1 0x0AF3
+#define mmMPLL_SS2 0x0AF4
+#define mmMPLL_TIME 0x0AF2
+#define mmVM_CONTEXT0_CNTL 0x0504
+#define mmVM_CONTEXT0_CNTL2 0x050C
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x054F
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR 0x055F
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR 0x0557
+#define mmVM_CONTEXT0_PROTECTION_FAULT_ADDR 0x053E
+#define mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x0546
+#define mmVM_CONTEXT0_PROTECTION_FAULT_STATUS 0x0536
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x0510
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x0511
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x0512
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x0513
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x0514
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x0515
+#define mmVM_CONTEXT1_CNTL 0x0505
+#define mmVM_CONTEXT1_CNTL2 0x050D
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x0550
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR 0x0560
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR 0x0558
+#define mmVM_CONTEXT1_PROTECTION_FAULT_ADDR 0x053F
+#define mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x0547
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS 0x0537
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x0551
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x0552
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x0553
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x0554
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x0555
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x0556
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x050E
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x050F
+#define mmVM_CONTEXTS_DISABLE 0x0535
+#define mmVM_DEBUG 0x056F
+#define mmVM_DUMMY_PAGE_FAULT_ADDR 0x0507
+#define mmVM_DUMMY_PAGE_FAULT_CNTL 0x0506
+#define mmVM_FAULT_CLIENT_ID 0x054E
+#define mmVM_INVALIDATE_REQUEST 0x051E
+#define mmVM_INVALIDATE_RESPONSE 0x051F
+#define mmVM_L2_BANK_SELECT_MASKA 0x0572
+#define mmVM_L2_BANK_SELECT_MASKB 0x0573
+#define mmVM_L2_CG 0x0570
+#define mmVM_L2_CNTL 0x0500
+#define mmVM_L2_CNTL2 0x0501
+#define mmVM_L2_CNTL3 0x0502
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR 0x0576
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR 0x0575
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET 0x0577
+#define mmVM_L2_STATUS 0x0503
+#define mmVM_PRT_APERTURE0_HIGH_ADDR 0x0530
+#define mmVM_PRT_APERTURE0_LOW_ADDR 0x052C
+#define mmVM_PRT_APERTURE1_HIGH_ADDR 0x0531
+#define mmVM_PRT_APERTURE1_LOW_ADDR 0x052D
+#define mmVM_PRT_APERTURE2_HIGH_ADDR 0x0532
+#define mmVM_PRT_APERTURE2_LOW_ADDR 0x052E
+#define mmVM_PRT_APERTURE3_HIGH_ADDR 0x0533
+#define mmVM_PRT_APERTURE3_LOW_ADDR 0x052F
+#define mmVM_PRT_CNTL 0x0534
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_sh_mask.h
new file mode 100644
index 000000000000..0f6c6c8d089b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gmc/gmc_6_0_sh_mask.h
@@ -0,0 +1,11895 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GMC_6_0_SH_MASK_H
+#define GMC_6_0_SH_MASK_H
+
+#define ATC_ATS_CNTL__CREDITS_ATS_RPB_MASK 0x00003f00L
+#define ATC_ATS_CNTL__CREDITS_ATS_RPB__SHIFT 0x00000008
+#define ATC_ATS_CNTL__DEBUG_ECO_MASK 0x000f0000L
+#define ATC_ATS_CNTL__DEBUG_ECO__SHIFT 0x00000010
+#define ATC_ATS_CNTL__DISABLE_ATC_MASK 0x00000001L
+#define ATC_ATS_CNTL__DISABLE_ATC__SHIFT 0x00000000
+#define ATC_ATS_CNTL__DISABLE_PASID_MASK 0x00000004L
+#define ATC_ATS_CNTL__DISABLE_PASID__SHIFT 0x00000002
+#define ATC_ATS_CNTL__DISABLE_PRI_MASK 0x00000002L
+#define ATC_ATS_CNTL__DISABLE_PRI__SHIFT 0x00000001
+#define ATC_ATS_DEBUG__ADDRESS_TRANSLATION_REQUEST_WRITE_PERMS_MASK 0x00000004L
+#define ATC_ATS_DEBUG__ADDRESS_TRANSLATION_REQUEST_WRITE_PERMS__SHIFT 0x00000002
+#define ATC_ATS_DEBUG__DISALLOW_ERR_TO_DONE_MASK 0x00004000L
+#define ATC_ATS_DEBUG__DISALLOW_ERR_TO_DONE__SHIFT 0x0000000e
+#define ATC_ATS_DEBUG__EXE_BIT_MASK 0x00000080L
+#define ATC_ATS_DEBUG__EXE_BIT__SHIFT 0x00000007
+#define ATC_ATS_DEBUG__IDENT_RETURN_MASK 0x00000002L
+#define ATC_ATS_DEBUG__IDENT_RETURN__SHIFT 0x00000001
+#define ATC_ATS_DEBUG__IGNORE_FED_MASK 0x00008000L
+#define ATC_ATS_DEBUG__IGNORE_FED__SHIFT 0x0000000f
+#define ATC_ATS_DEBUG__INVALIDATE_ALL_MASK 0x00000001L
+#define ATC_ATS_DEBUG__INVALIDATE_ALL__SHIFT 0x00000000
+#define ATC_ATS_DEBUG__INVALIDATION_REQUESTS_DISALLOWED_WHEN_ATC_IS_DISABLED_MASK 0x00010000L
+#define ATC_ATS_DEBUG__INVALIDATION_REQUESTS_DISALLOWED_WHEN_ATC_IS_DISABLED__SHIFT 0x00000010
+#define ATC_ATS_DEBUG__NUM_REQUESTS_AT_ERR_MASK 0x00003c00L
+#define ATC_ATS_DEBUG__NUM_REQUESTS_AT_ERR__SHIFT 0x0000000a
+#define ATC_ATS_DEBUG__PAGE_REQUEST_PERMS_MASK 0x00000100L
+#define ATC_ATS_DEBUG__PAGE_REQUEST_PERMS__SHIFT 0x00000008
+#define ATC_ATS_DEBUG__PAGE_REQUESTS_USE_RELAXED_ORDERING_MASK 0x00000020L
+#define ATC_ATS_DEBUG__PAGE_REQUESTS_USE_RELAXED_ORDERING__SHIFT 0x00000005
+#define ATC_ATS_DEBUG__PRIV_BIT_MASK 0x00000040L
+#define ATC_ATS_DEBUG__PRIV_BIT__SHIFT 0x00000006
+#define ATC_ATS_DEBUG__UNTRANSLATED_ONLY_REQUESTS_CARRY_SIZE_MASK 0x00000200L
+#define ATC_ATS_DEBUG__UNTRANSLATED_ONLY_REQUESTS_CARRY_SIZE__SHIFT 0x00000009
+#define ATC_ATS_DEFAULT_PAGE_CNTL__DEFAULT_PAGE_HIGH_MASK 0x0000003cL
+#define ATC_ATS_DEFAULT_PAGE_CNTL__DEFAULT_PAGE_HIGH__SHIFT 0x00000002
+#define ATC_ATS_DEFAULT_PAGE_CNTL__SEND_DEFAULT_PAGE_MASK 0x00000001L
+#define ATC_ATS_DEFAULT_PAGE_CNTL__SEND_DEFAULT_PAGE__SHIFT 0x00000000
+#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE_MASK 0xffffffffL
+#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE__SHIFT 0x00000000
+#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE_MASK 0x03f00000L
+#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE__SHIFT 0x00000014
+#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE_MASK 0x0000fc00L
+#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE__SHIFT 0x0000000a
+#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG_MASK 0x0000003fL
+#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG__SHIFT 0x00000000
+#define ATC_ATS_FAULT_DEBUG__ALLOW_SUBSEQUENT_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000100L
+#define ATC_ATS_FAULT_DEBUG__ALLOW_SUBSEQUENT_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x00000008
+#define ATC_ATS_FAULT_DEBUG__CLEAR_FAULT_STATUS_ADDR_MASK 0x00010000L
+#define ATC_ATS_FAULT_DEBUG__CLEAR_FAULT_STATUS_ADDR__SHIFT 0x00000010
+#define ATC_ATS_FAULT_DEBUG__CREDITS_ATS_IH_MASK 0x0000001fL
+#define ATC_ATS_FAULT_DEBUG__CREDITS_ATS_IH__SHIFT 0x00000000
+#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR_MASK 0xffffffffL
+#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR__SHIFT 0x00000000
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2_MASK 0x00010000L
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2__SHIFT 0x00000010
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO_MASK 0x00008000L
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO__SHIFT 0x0000000f
+#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE_MASK 0x0000003fL
+#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE__SHIFT 0x00000000
+#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION_MASK 0x00020000L
+#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION__SHIFT 0x00000011
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH_MASK 0x0f000000L
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH__SHIFT 0x00000018
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST_MASK 0x00040000L
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST__SHIFT 0x00000012
+#define ATC_ATS_FAULT_STATUS_INFO__STATUS_MASK 0x00f80000L
+#define ATC_ATS_FAULT_STATUS_INFO__STATUS__SHIFT 0x00000013
+#define ATC_ATS_FAULT_STATUS_INFO__VMID_MASK 0x00007c00L
+#define ATC_ATS_FAULT_STATUS_INFO__VMID__SHIFT 0x0000000a
+#define ATC_ATS_STATUS__BUSY_MASK 0x00000001L
+#define ATC_ATS_STATUS__BUSY__SHIFT 0x00000000
+#define ATC_ATS_STATUS__CRASHED_MASK 0x00000002L
+#define ATC_ATS_STATUS__CRASHED__SHIFT 0x00000001
+#define ATC_ATS_STATUS__DEADLOCK_DETECTION_MASK 0x00000004L
+#define ATC_ATS_STATUS__DEADLOCK_DETECTION__SHIFT 0x00000002
+#define ATC_L1_ADDRESS_OFFSET__LOGICAL_ADDRESS_MASK 0xffffffffL
+#define ATC_L1_ADDRESS_OFFSET__LOGICAL_ADDRESS__SHIFT 0x00000000
+#define ATC_L1_CNTL__DONT_NEED_ATS_BEHAVIOR_MASK 0x00000003L
+#define ATC_L1_CNTL__DONT_NEED_ATS_BEHAVIOR__SHIFT 0x00000000
+#define ATC_L1_CNTL__NEED_ATS_BEHAVIOR_MASK 0x00000004L
+#define ATC_L1_CNTL__NEED_ATS_BEHAVIOR__SHIFT 0x00000002
+#define ATC_L1_CNTL__NEED_ATS_SNOOP_DEFAULT_MASK 0x00000010L
+#define ATC_L1_CNTL__NEED_ATS_SNOOP_DEFAULT__SHIFT 0x00000004
+#define ATC_L1RD_DEBUG_TLB__CREDITS_L1_L2_MASK 0x0003f000L
+#define ATC_L1RD_DEBUG_TLB__CREDITS_L1_L2__SHIFT 0x0000000c
+#define ATC_L1RD_DEBUG_TLB__CREDITS_L1_RPB_MASK 0x0ff00000L
+#define ATC_L1RD_DEBUG_TLB__CREDITS_L1_RPB__SHIFT 0x00000014
+#define ATC_L1RD_DEBUG_TLB__DEBUG_ECO_MASK 0x30000000L
+#define ATC_L1RD_DEBUG_TLB__DEBUG_ECO__SHIFT 0x0000001c
+#define ATC_L1RD_DEBUG_TLB__DISABLE_FRAGMENTS_MASK 0x00000001L
+#define ATC_L1RD_DEBUG_TLB__DISABLE_FRAGMENTS__SHIFT 0x00000000
+#define ATC_L1RD_DEBUG_TLB__EFFECTIVE_CAM_SIZE_MASK 0x000000f0L
+#define ATC_L1RD_DEBUG_TLB__EFFECTIVE_CAM_SIZE__SHIFT 0x00000004
+#define ATC_L1RD_DEBUG_TLB__EFFECTIVE_WORK_QUEUE_SIZE_MASK 0x00000700L
+#define ATC_L1RD_DEBUG_TLB__EFFECTIVE_WORK_QUEUE_SIZE__SHIFT 0x00000008
+#define ATC_L1RD_DEBUG_TLB__INVALIDATE_ALL_MASK 0x40000000L
+#define ATC_L1RD_DEBUG_TLB__INVALIDATE_ALL__SHIFT 0x0000001e
+#define ATC_L1RD_STATUS__BAD_NEED_ATS_MASK 0x00000100L
+#define ATC_L1RD_STATUS__BAD_NEED_ATS__SHIFT 0x00000008
+#define ATC_L1RD_STATUS__BUSY_MASK 0x00000001L
+#define ATC_L1RD_STATUS__BUSY__SHIFT 0x00000000
+#define ATC_L1RD_STATUS__DEADLOCK_DETECTION_MASK 0x00000002L
+#define ATC_L1RD_STATUS__DEADLOCK_DETECTION__SHIFT 0x00000001
+#define ATC_L1WR_DEBUG_TLB__CREDITS_L1_L2_MASK 0x0003f000L
+#define ATC_L1WR_DEBUG_TLB__CREDITS_L1_L2__SHIFT 0x0000000c
+#define ATC_L1WR_DEBUG_TLB__CREDITS_L1_RPB_MASK 0x0ff00000L
+#define ATC_L1WR_DEBUG_TLB__CREDITS_L1_RPB__SHIFT 0x00000014
+#define ATC_L1WR_DEBUG_TLB__DEBUG_ECO_MASK 0x30000000L
+#define ATC_L1WR_DEBUG_TLB__DEBUG_ECO__SHIFT 0x0000001c
+#define ATC_L1WR_DEBUG_TLB__DISABLE_FRAGMENTS_MASK 0x00000001L
+#define ATC_L1WR_DEBUG_TLB__DISABLE_FRAGMENTS__SHIFT 0x00000000
+#define ATC_L1WR_DEBUG_TLB__EFFECTIVE_CAM_SIZE_MASK 0x000000f0L
+#define ATC_L1WR_DEBUG_TLB__EFFECTIVE_CAM_SIZE__SHIFT 0x00000004
+#define ATC_L1WR_DEBUG_TLB__EFFECTIVE_WORK_QUEUE_SIZE_MASK 0x00000700L
+#define ATC_L1WR_DEBUG_TLB__EFFECTIVE_WORK_QUEUE_SIZE__SHIFT 0x00000008
+#define ATC_L1WR_DEBUG_TLB__INVALIDATE_ALL_MASK 0x40000000L
+#define ATC_L1WR_DEBUG_TLB__INVALIDATE_ALL__SHIFT 0x0000001e
+#define ATC_L1WR_STATUS__BAD_NEED_ATS_MASK 0x00000100L
+#define ATC_L1WR_STATUS__BAD_NEED_ATS__SHIFT 0x00000008
+#define ATC_L1WR_STATUS__BUSY_MASK 0x00000001L
+#define ATC_L1WR_STATUS__BUSY__SHIFT 0x00000000
+#define ATC_L1WR_STATUS__DEADLOCK_DETECTION_MASK 0x00000002L
+#define ATC_L1WR_STATUS__DEADLOCK_DETECTION__SHIFT 0x00000001
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x00000000
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000400L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x0000000a
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000030L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x00000004
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000800L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x0000000b
+#define ATC_L2_DEBUG__CREDITS_L2_ATS_MASK 0x0000003fL
+#define ATC_L2_DEBUG__CREDITS_L2_ATS__SHIFT 0x00000000
+#define ATC_MISC_CG__ENABLE_MASK 0x00040000L
+#define ATC_MISC_CG__ENABLE__SHIFT 0x00000012
+#define ATC_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define ATC_MISC_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define ATC_MISC_CG__OFFDLY_MASK 0x00000fc0L
+#define ATC_MISC_CG__OFFDLY__SHIFT 0x00000006
+#define ATC_VM_APERTURE0_CNTL2__VMIDS_USING_RANGE_MASK 0x0000ffffL
+#define ATC_VM_APERTURE0_CNTL2__VMIDS_USING_RANGE__SHIFT 0x00000000
+#define ATC_VM_APERTURE0_CNTL__ATS_ACCESS_MODE_MASK 0x00000003L
+#define ATC_VM_APERTURE0_CNTL__ATS_ACCESS_MODE__SHIFT 0x00000000
+#define ATC_VM_APERTURE0_HIGH_ADDR__VIRTUAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define ATC_VM_APERTURE0_HIGH_ADDR__VIRTUAL_PAGE_NUMBER__SHIFT 0x00000000
+#define ATC_VM_APERTURE0_LOW_ADDR__VIRTUAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define ATC_VM_APERTURE0_LOW_ADDR__VIRTUAL_PAGE_NUMBER__SHIFT 0x00000000
+#define ATC_VM_APERTURE1_CNTL2__VMIDS_USING_RANGE_MASK 0x0000ffffL
+#define ATC_VM_APERTURE1_CNTL2__VMIDS_USING_RANGE__SHIFT 0x00000000
+#define ATC_VM_APERTURE1_CNTL__ATS_ACCESS_MODE_MASK 0x00000003L
+#define ATC_VM_APERTURE1_CNTL__ATS_ACCESS_MODE__SHIFT 0x00000000
+#define ATC_VM_APERTURE1_HIGH_ADDR__VIRTUAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define ATC_VM_APERTURE1_HIGH_ADDR__VIRTUAL_PAGE_NUMBER__SHIFT 0x00000000
+#define ATC_VM_APERTURE1_LOW_ADDR__VIRTUAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define ATC_VM_APERTURE1_LOW_ADDR__VIRTUAL_PAGE_NUMBER__SHIFT 0x00000000
+#define ATC_VMID0_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID0_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID0_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID0_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID10_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID10_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID10_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID10_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID11_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID11_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID11_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID11_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID12_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID12_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID12_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID12_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID13_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID13_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID13_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID13_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID14_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID14_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID14_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID14_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID15_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID15_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID15_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID15_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID1_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID1_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID1_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID1_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID2_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID2_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID2_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID2_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID3_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID3_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID3_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID3_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID4_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID4_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID4_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID4_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID5_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID5_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID5_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID5_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID6_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID6_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID6_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID6_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID7_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID7_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID7_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID7_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID8_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID8_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID8_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID8_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID9_PASID_MAPPING__PASID_MASK 0x0000ffffL
+#define ATC_VMID9_PASID_MAPPING__PASID__SHIFT 0x00000000
+#define ATC_VMID9_PASID_MAPPING__VALID_MASK 0x80000000L
+#define ATC_VMID9_PASID_MAPPING__VALID__SHIFT 0x0000001f
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED_MASK 0x00000001L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED__SHIFT 0x00000000
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED_MASK 0x00000400L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED__SHIFT 0x0000000a
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED_MASK 0x00000800L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED__SHIFT 0x0000000b
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED_MASK 0x00001000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED__SHIFT 0x0000000c
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED_MASK 0x00002000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED__SHIFT 0x0000000d
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED_MASK 0x00004000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED__SHIFT 0x0000000e
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED_MASK 0x00008000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED__SHIFT 0x0000000f
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED_MASK 0x00000002L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED__SHIFT 0x00000001
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED_MASK 0x00000004L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED__SHIFT 0x00000002
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED_MASK 0x00000008L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED__SHIFT 0x00000003
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED_MASK 0x00000010L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED__SHIFT 0x00000004
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED_MASK 0x00000020L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED__SHIFT 0x00000005
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED_MASK 0x00000040L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED__SHIFT 0x00000006
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED_MASK 0x00000080L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED__SHIFT 0x00000007
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED_MASK 0x00000100L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED__SHIFT 0x00000008
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED_MASK 0x00000200L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED__SHIFT 0x00000009
+#define CC_MC_MAX_CHANNEL__NOOFCHAN_MASK 0x0000001eL
+#define CC_MC_MAX_CHANNEL__NOOFCHAN__SHIFT 0x00000001
+#define DLL_CNTL__DLL_LOCK_TIME_MASK 0x003ff000L
+#define DLL_CNTL__DLL_LOCK_TIME__SHIFT 0x0000000c
+#define DLL_CNTL__DLL_RESET_TIME_MASK 0x000003ffL
+#define DLL_CNTL__DLL_RESET_TIME__SHIFT 0x00000000
+#define DLL_CNTL__MRDCK0_BYPASS_MASK 0x01000000L
+#define DLL_CNTL__MRDCK0_BYPASS__SHIFT 0x00000018
+#define DLL_CNTL__MRDCK1_BYPASS_MASK 0x02000000L
+#define DLL_CNTL__MRDCK1_BYPASS__SHIFT 0x00000019
+#define DLL_CNTL__PWR2_MODE_MASK 0x04000000L
+#define DLL_CNTL__PWR2_MODE__SHIFT 0x0000001a
+#define GMCON_DEBUG__GFX_CLEAR_MASK 0x00000002L
+#define GMCON_DEBUG__GFX_CLEAR__SHIFT 0x00000001
+#define GMCON_DEBUG__GFX_STALL_MASK 0x00000001L
+#define GMCON_DEBUG__GFX_STALL__SHIFT 0x00000000
+#define GMCON_DEBUG__MISC_FLAGS_MASK 0x3ffffffcL
+#define GMCON_DEBUG__MISC_FLAGS__SHIFT 0x00000002
+#define GMCON_MISC2__RENG_MEM_POWER_CTRL_OVERRIDE0_MASK 0x00000007L
+#define GMCON_MISC2__RENG_MEM_POWER_CTRL_OVERRIDE0__SHIFT 0x00000000
+#define GMCON_MISC2__RENG_MEM_POWER_CTRL_OVERRIDE1_MASK 0x00000038L
+#define GMCON_MISC2__RENG_MEM_POWER_CTRL_OVERRIDE1__SHIFT 0x00000003
+#define GMCON_MISC2__RENG_SR_HOLD_THRESHOLD_MASK 0x0000fc00L
+#define GMCON_MISC2__RENG_SR_HOLD_THRESHOLD__SHIFT 0x0000000a
+#define GMCON_MISC2__STCTRL_EXTEND_GMC_OFFLINE_MASK 0x20000000L
+#define GMCON_MISC2__STCTRL_EXTEND_GMC_OFFLINE__SHIFT 0x0000001d
+#define GMCON_MISC2__STCTRL_IGNORE_ARB_BUSY_MASK 0x10000000L
+#define GMCON_MISC2__STCTRL_IGNORE_ARB_BUSY__SHIFT 0x0000001c
+#define GMCON_MISC2__STCTRL_LPT_TARGET_MASK 0x0fff0000L
+#define GMCON_MISC2__STCTRL_LPT_TARGET__SHIFT 0x00000010
+#define GMCON_MISC3__RENG_DISABLE_MCC_MASK 0x0000003fL
+#define GMCON_MISC3__RENG_DISABLE_MCC__SHIFT 0x00000000
+#define GMCON_MISC3__RENG_DISABLE_MCD_MASK 0x00000fc0L
+#define GMCON_MISC3__RENG_DISABLE_MCD__SHIFT 0x00000006
+#define GMCON_MISC3__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00fff000L
+#define GMCON_MISC3__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0x0000000c
+#define GMCON_MISC__ALLOW_DEEP_SLEEP_MODE_MASK 0x30000000L
+#define GMCON_MISC__ALLOW_DEEP_SLEEP_MODE__SHIFT 0x0000001c
+#define GMCON_MISC__CRITICAL_REGS_LOCK_MASK 0x08000000L
+#define GMCON_MISC__CRITICAL_REGS_LOCK__SHIFT 0x0000001b
+#define GMCON_MISC__RENG_EXECUTE_NOW_MODE_MASK 0x00000400L
+#define GMCON_MISC__RENG_EXECUTE_NOW_MODE__SHIFT 0x0000000a
+#define GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00000800L
+#define GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x0000000b
+#define GMCON_MISC__RENG_SRBM_CREDITS_MCD_MASK 0x0000f000L
+#define GMCON_MISC__RENG_SRBM_CREDITS_MCD__SHIFT 0x0000000c
+#define GMCON_MISC__STCTRL_DISABLE_ALLOW_SR_MASK 0x02000000L
+#define GMCON_MISC__STCTRL_DISABLE_ALLOW_SR__SHIFT 0x00000019
+#define GMCON_MISC__STCTRL_DISABLE_GMC_OFFLINE_MASK 0x04000000L
+#define GMCON_MISC__STCTRL_DISABLE_GMC_OFFLINE__SHIFT 0x0000001a
+#define GMCON_MISC__STCTRL_FORCE_ALLOW_SR_MASK 0x40000000L
+#define GMCON_MISC__STCTRL_FORCE_ALLOW_SR__SHIFT 0x0000001e
+#define GMCON_MISC__STCTRL_GMC_IDLE_THRESHOLD_MASK 0x00060000L
+#define GMCON_MISC__STCTRL_GMC_IDLE_THRESHOLD__SHIFT 0x00000011
+#define GMCON_MISC__STCTRL_IGNORE_ALLOW_STOP_MASK 0x00400000L
+#define GMCON_MISC__STCTRL_IGNORE_ALLOW_STOP__SHIFT 0x00000016
+#define GMCON_MISC__STCTRL_IGNORE_PRE_SR_MASK 0x00200000L
+#define GMCON_MISC__STCTRL_IGNORE_PRE_SR__SHIFT 0x00000015
+#define GMCON_MISC__STCTRL_IGNORE_PROTECTION_FAULT_MASK 0x01000000L
+#define GMCON_MISC__STCTRL_IGNORE_PROTECTION_FAULT__SHIFT 0x00000018
+#define GMCON_MISC__STCTRL_IGNORE_SR_COMMIT_MASK 0x00800000L
+#define GMCON_MISC__STCTRL_IGNORE_SR_COMMIT__SHIFT 0x00000017
+#define GMCON_MISC__STCTRL_SRBM_IDLE_THRESHOLD_MASK 0x00180000L
+#define GMCON_MISC__STCTRL_SRBM_IDLE_THRESHOLD__SHIFT 0x00000013
+#define GMCON_MISC__STCTRL_STUTTER_EN_MASK 0x00010000L
+#define GMCON_MISC__STCTRL_STUTTER_EN__SHIFT 0x00000010
+#define GMCON_PERF_MON_CNTL0__ALLOW_WRAP_MASK 0x10000000L
+#define GMCON_PERF_MON_CNTL0__ALLOW_WRAP__SHIFT 0x0000001c
+#define GMCON_PERF_MON_CNTL0__START_MODE_MASK 0x03000000L
+#define GMCON_PERF_MON_CNTL0__START_MODE__SHIFT 0x00000018
+#define GMCON_PERF_MON_CNTL0__START_THRESH_MASK 0x00000fffL
+#define GMCON_PERF_MON_CNTL0__START_THRESH__SHIFT 0x00000000
+#define GMCON_PERF_MON_CNTL0__STOP_MODE_MASK 0x0c000000L
+#define GMCON_PERF_MON_CNTL0__STOP_MODE__SHIFT 0x0000001a
+#define GMCON_PERF_MON_CNTL0__STOP_THRESH_MASK 0x00fff000L
+#define GMCON_PERF_MON_CNTL0__STOP_THRESH__SHIFT 0x0000000c
+#define GMCON_PERF_MON_CNTL1__MON0_ID_MASK 0x00fc0000L
+#define GMCON_PERF_MON_CNTL1__MON0_ID__SHIFT 0x00000012
+#define GMCON_PERF_MON_CNTL1__MON1_ID_MASK 0x3f000000L
+#define GMCON_PERF_MON_CNTL1__MON1_ID__SHIFT 0x00000018
+#define GMCON_PERF_MON_CNTL1__START_TRIG_ID_MASK 0x00000fc0L
+#define GMCON_PERF_MON_CNTL1__START_TRIG_ID__SHIFT 0x00000006
+#define GMCON_PERF_MON_CNTL1__STOP_TRIG_ID_MASK 0x0003f000L
+#define GMCON_PERF_MON_CNTL1__STOP_TRIG_ID__SHIFT 0x0000000c
+#define GMCON_PERF_MON_CNTL1__THRESH_CNTR_ID_MASK 0x0000003fL
+#define GMCON_PERF_MON_CNTL1__THRESH_CNTR_ID__SHIFT 0x00000000
+#define GMCON_PERF_MON_RSLT0__COUNT_MASK 0xffffffffL
+#define GMCON_PERF_MON_RSLT0__COUNT__SHIFT 0x00000000
+#define GMCON_PERF_MON_RSLT1__COUNT_MASK 0xffffffffL
+#define GMCON_PERF_MON_RSLT1__COUNT__SHIFT 0x00000000
+#define GMCON_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000ffL
+#define GMCON_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x00000000
+#define GMCON_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
+#define GMCON_PGFSM_CONFIG__P1_SELECT__SHIFT 0x0000000a
+#define GMCON_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
+#define GMCON_PGFSM_CONFIG__P2_SELECT__SHIFT 0x0000000b
+#define GMCON_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
+#define GMCON_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x00000008
+#define GMCON_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
+#define GMCON_PGFSM_CONFIG__POWER_UP__SHIFT 0x00000009
+#define GMCON_PGFSM_CONFIG__READ_MASK 0x00002000L
+#define GMCON_PGFSM_CONFIG__READ__SHIFT 0x0000000d
+#define GMCON_PGFSM_CONFIG__REG_ADDR_MASK 0xf0000000L
+#define GMCON_PGFSM_CONFIG__REG_ADDR__SHIFT 0x0000001c
+#define GMCON_PGFSM_CONFIG__RSRVD_MASK 0x07ffc000L
+#define GMCON_PGFSM_CONFIG__RSRVD__SHIFT 0x0000000e
+#define GMCON_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
+#define GMCON_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x0000001b
+#define GMCON_PGFSM_CONFIG__WRITE_MASK 0x00001000L
+#define GMCON_PGFSM_CONFIG__WRITE__SHIFT 0x0000000c
+#define GMCON_PGFSM_READ__PGFSM_SELECT_MASK 0x0f000000L
+#define GMCON_PGFSM_READ__PGFSM_SELECT__SHIFT 0x00000018
+#define GMCON_PGFSM_READ__READ_VALUE_MASK 0x00ffffffL
+#define GMCON_PGFSM_READ__READ_VALUE__SHIFT 0x00000000
+#define GMCON_PGFSM_READ__SERDES_MASTER_BUSY_MASK 0x10000000L
+#define GMCON_PGFSM_READ__SERDES_MASTER_BUSY__SHIFT 0x0000001c
+#define GMCON_PGFSM_WRITE__WRITE_VALUE_MASK 0xffffffffL
+#define GMCON_PGFSM_WRITE__WRITE_VALUE__SHIFT 0x00000000
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_DSP_END_PTR_MASK 0x003ff000L
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_DSP_END_PTR__SHIFT 0x0000000c
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0xffc00000L
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0x00000016
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000002L
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x00000001
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000ffcL
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x00000002
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK 0x00000001L
+#define GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP__SHIFT 0x00000000
+#define GMCON_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xffffffffL
+#define GMCON_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x00000000
+#define GMCON_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003ffL
+#define GMCON_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x00000010
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3__SHIFT 0x00000010
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE0_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE0__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT0_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT0__SHIFT 0x00000010
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE1_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE1__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT1_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT1__SHIFT 0x00000010
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE2_MASK 0x0000ffffL
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE2__SHIFT 0x00000000
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT2_MASK 0xffff0000L
+#define GMCON_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT2__SHIFT 0x00000010
+#define MC_ARB_ADDR_HASH__BANK_XOR_ENABLE_MASK 0x0000000fL
+#define MC_ARB_ADDR_HASH__BANK_XOR_ENABLE__SHIFT 0x00000000
+#define MC_ARB_ADDR_HASH__COL_XOR_MASK 0x00000ff0L
+#define MC_ARB_ADDR_HASH__COL_XOR__SHIFT 0x00000004
+#define MC_ARB_ADDR_HASH__ROW_XOR_MASK 0x0ffff000L
+#define MC_ARB_ADDR_HASH__ROW_XOR__SHIFT 0x0000000c
+#define MC_ARB_AGE_RD__DIVIDE_GROUP0_MASK 0x01000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP0__SHIFT 0x00000018
+#define MC_ARB_AGE_RD__DIVIDE_GROUP1_MASK 0x02000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP1__SHIFT 0x00000019
+#define MC_ARB_AGE_RD__DIVIDE_GROUP2_MASK 0x04000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP2__SHIFT 0x0000001a
+#define MC_ARB_AGE_RD__DIVIDE_GROUP3_MASK 0x08000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP3__SHIFT 0x0000001b
+#define MC_ARB_AGE_RD__DIVIDE_GROUP4_MASK 0x10000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP4__SHIFT 0x0000001c
+#define MC_ARB_AGE_RD__DIVIDE_GROUP5_MASK 0x20000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP5__SHIFT 0x0000001d
+#define MC_ARB_AGE_RD__DIVIDE_GROUP6_MASK 0x40000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP6__SHIFT 0x0000001e
+#define MC_ARB_AGE_RD__DIVIDE_GROUP7_MASK 0x80000000L
+#define MC_ARB_AGE_RD__DIVIDE_GROUP7__SHIFT 0x0000001f
+#define MC_ARB_AGE_RD__ENABLE_GROUP0_MASK 0x00010000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP0__SHIFT 0x00000010
+#define MC_ARB_AGE_RD__ENABLE_GROUP1_MASK 0x00020000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP1__SHIFT 0x00000011
+#define MC_ARB_AGE_RD__ENABLE_GROUP2_MASK 0x00040000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP2__SHIFT 0x00000012
+#define MC_ARB_AGE_RD__ENABLE_GROUP3_MASK 0x00080000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP3__SHIFT 0x00000013
+#define MC_ARB_AGE_RD__ENABLE_GROUP4_MASK 0x00100000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP4__SHIFT 0x00000014
+#define MC_ARB_AGE_RD__ENABLE_GROUP5_MASK 0x00200000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP5__SHIFT 0x00000015
+#define MC_ARB_AGE_RD__ENABLE_GROUP6_MASK 0x00400000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP6__SHIFT 0x00000016
+#define MC_ARB_AGE_RD__ENABLE_GROUP7_MASK 0x00800000L
+#define MC_ARB_AGE_RD__ENABLE_GROUP7__SHIFT 0x00000017
+#define MC_ARB_AGE_RD__RATE_GROUP0_MASK 0x00000003L
+#define MC_ARB_AGE_RD__RATE_GROUP0__SHIFT 0x00000000
+#define MC_ARB_AGE_RD__RATE_GROUP1_MASK 0x0000000cL
+#define MC_ARB_AGE_RD__RATE_GROUP1__SHIFT 0x00000002
+#define MC_ARB_AGE_RD__RATE_GROUP2_MASK 0x00000030L
+#define MC_ARB_AGE_RD__RATE_GROUP2__SHIFT 0x00000004
+#define MC_ARB_AGE_RD__RATE_GROUP3_MASK 0x000000c0L
+#define MC_ARB_AGE_RD__RATE_GROUP3__SHIFT 0x00000006
+#define MC_ARB_AGE_RD__RATE_GROUP4_MASK 0x00000300L
+#define MC_ARB_AGE_RD__RATE_GROUP4__SHIFT 0x00000008
+#define MC_ARB_AGE_RD__RATE_GROUP5_MASK 0x00000c00L
+#define MC_ARB_AGE_RD__RATE_GROUP5__SHIFT 0x0000000a
+#define MC_ARB_AGE_RD__RATE_GROUP6_MASK 0x00003000L
+#define MC_ARB_AGE_RD__RATE_GROUP6__SHIFT 0x0000000c
+#define MC_ARB_AGE_RD__RATE_GROUP7_MASK 0x0000c000L
+#define MC_ARB_AGE_RD__RATE_GROUP7__SHIFT 0x0000000e
+#define MC_ARB_AGE_WR__DIVIDE_GROUP0_MASK 0x01000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP0__SHIFT 0x00000018
+#define MC_ARB_AGE_WR__DIVIDE_GROUP1_MASK 0x02000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP1__SHIFT 0x00000019
+#define MC_ARB_AGE_WR__DIVIDE_GROUP2_MASK 0x04000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP2__SHIFT 0x0000001a
+#define MC_ARB_AGE_WR__DIVIDE_GROUP3_MASK 0x08000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP3__SHIFT 0x0000001b
+#define MC_ARB_AGE_WR__DIVIDE_GROUP4_MASK 0x10000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP4__SHIFT 0x0000001c
+#define MC_ARB_AGE_WR__DIVIDE_GROUP5_MASK 0x20000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP5__SHIFT 0x0000001d
+#define MC_ARB_AGE_WR__DIVIDE_GROUP6_MASK 0x40000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP6__SHIFT 0x0000001e
+#define MC_ARB_AGE_WR__DIVIDE_GROUP7_MASK 0x80000000L
+#define MC_ARB_AGE_WR__DIVIDE_GROUP7__SHIFT 0x0000001f
+#define MC_ARB_AGE_WR__ENABLE_GROUP0_MASK 0x00010000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP0__SHIFT 0x00000010
+#define MC_ARB_AGE_WR__ENABLE_GROUP1_MASK 0x00020000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP1__SHIFT 0x00000011
+#define MC_ARB_AGE_WR__ENABLE_GROUP2_MASK 0x00040000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP2__SHIFT 0x00000012
+#define MC_ARB_AGE_WR__ENABLE_GROUP3_MASK 0x00080000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP3__SHIFT 0x00000013
+#define MC_ARB_AGE_WR__ENABLE_GROUP4_MASK 0x00100000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP4__SHIFT 0x00000014
+#define MC_ARB_AGE_WR__ENABLE_GROUP5_MASK 0x00200000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP5__SHIFT 0x00000015
+#define MC_ARB_AGE_WR__ENABLE_GROUP6_MASK 0x00400000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP6__SHIFT 0x00000016
+#define MC_ARB_AGE_WR__ENABLE_GROUP7_MASK 0x00800000L
+#define MC_ARB_AGE_WR__ENABLE_GROUP7__SHIFT 0x00000017
+#define MC_ARB_AGE_WR__RATE_GROUP0_MASK 0x00000003L
+#define MC_ARB_AGE_WR__RATE_GROUP0__SHIFT 0x00000000
+#define MC_ARB_AGE_WR__RATE_GROUP1_MASK 0x0000000cL
+#define MC_ARB_AGE_WR__RATE_GROUP1__SHIFT 0x00000002
+#define MC_ARB_AGE_WR__RATE_GROUP2_MASK 0x00000030L
+#define MC_ARB_AGE_WR__RATE_GROUP2__SHIFT 0x00000004
+#define MC_ARB_AGE_WR__RATE_GROUP3_MASK 0x000000c0L
+#define MC_ARB_AGE_WR__RATE_GROUP3__SHIFT 0x00000006
+#define MC_ARB_AGE_WR__RATE_GROUP4_MASK 0x00000300L
+#define MC_ARB_AGE_WR__RATE_GROUP4__SHIFT 0x00000008
+#define MC_ARB_AGE_WR__RATE_GROUP5_MASK 0x00000c00L
+#define MC_ARB_AGE_WR__RATE_GROUP5__SHIFT 0x0000000a
+#define MC_ARB_AGE_WR__RATE_GROUP6_MASK 0x00003000L
+#define MC_ARB_AGE_WR__RATE_GROUP6__SHIFT 0x0000000c
+#define MC_ARB_AGE_WR__RATE_GROUP7_MASK 0x0000c000L
+#define MC_ARB_AGE_WR__RATE_GROUP7__SHIFT 0x0000000e
+#define MC_ARB_BANKMAP__BANK0_MASK 0x0000000fL
+#define MC_ARB_BANKMAP__BANK0__SHIFT 0x00000000
+#define MC_ARB_BANKMAP__BANK1_MASK 0x000000f0L
+#define MC_ARB_BANKMAP__BANK1__SHIFT 0x00000004
+#define MC_ARB_BANKMAP__BANK2_MASK 0x00000f00L
+#define MC_ARB_BANKMAP__BANK2__SHIFT 0x00000008
+#define MC_ARB_BANKMAP__BANK3_MASK 0x0000f000L
+#define MC_ARB_BANKMAP__BANK3__SHIFT 0x0000000c
+#define MC_ARB_BANKMAP__RANK_MASK 0x000f0000L
+#define MC_ARB_BANKMAP__RANK__SHIFT 0x00000010
+#define MC_ARB_BURST_TIME__STATE0_MASK 0x0000001fL
+#define MC_ARB_BURST_TIME__STATE0__SHIFT 0x00000000
+#define MC_ARB_BURST_TIME__STATE1_MASK 0x000003e0L
+#define MC_ARB_BURST_TIME__STATE1__SHIFT 0x00000005
+#define MC_ARB_BURST_TIME__STATE2_MASK 0x00007c00L
+#define MC_ARB_BURST_TIME__STATE2__SHIFT 0x0000000a
+#define MC_ARB_BURST_TIME__STATE3_MASK 0x000f8000L
+#define MC_ARB_BURST_TIME__STATE3__SHIFT 0x0000000f
+#define MC_ARB_CAC_CNTL__ALLOW_OVERFLOW_MASK 0x00002000L
+#define MC_ARB_CAC_CNTL__ALLOW_OVERFLOW__SHIFT 0x0000000d
+#define MC_ARB_CAC_CNTL__ENABLE_MASK 0x00000001L
+#define MC_ARB_CAC_CNTL__ENABLE__SHIFT 0x00000000
+#define MC_ARB_CAC_CNTL__READ_WEIGHT_MASK 0x0000007eL
+#define MC_ARB_CAC_CNTL__READ_WEIGHT__SHIFT 0x00000001
+#define MC_ARB_CAC_CNTL__WRITE_WEIGHT_MASK 0x00001f80L
+#define MC_ARB_CAC_CNTL__WRITE_WEIGHT__SHIFT 0x00000007
+#define MC_ARB_CG__CG_ARB_REQ_MASK 0x000000ffL
+#define MC_ARB_CG__CG_ARB_REQ__SHIFT 0x00000000
+#define MC_ARB_CG__CG_ARB_RESP_MASK 0x0000ff00L
+#define MC_ARB_CG__CG_ARB_RESP__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING_1__ACTRD_MASK 0x000000ffL
+#define MC_ARB_DRAM_TIMING_1__ACTRD__SHIFT 0x00000000
+#define MC_ARB_DRAM_TIMING_1__ACTWR_MASK 0x0000ff00L
+#define MC_ARB_DRAM_TIMING_1__ACTWR__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING_1__RASMACTRD_MASK 0x00ff0000L
+#define MC_ARB_DRAM_TIMING_1__RASMACTRD__SHIFT 0x00000010
+#define MC_ARB_DRAM_TIMING_1__RASMACTWR_MASK 0xff000000L
+#define MC_ARB_DRAM_TIMING_1__RASMACTWR__SHIFT 0x00000018
+#define MC_ARB_DRAM_TIMING2_1__BUS_TURN_MASK 0x1f000000L
+#define MC_ARB_DRAM_TIMING2_1__BUS_TURN__SHIFT 0x00000018
+#define MC_ARB_DRAM_TIMING2_1__RAS2RAS_MASK 0x000000ffL
+#define MC_ARB_DRAM_TIMING2_1__RAS2RAS__SHIFT 0x00000000
+#define MC_ARB_DRAM_TIMING2_1__RP_MASK 0x0000ff00L
+#define MC_ARB_DRAM_TIMING2_1__RP__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING2_1__WRPLUSRP_MASK 0x00ff0000L
+#define MC_ARB_DRAM_TIMING2_1__WRPLUSRP__SHIFT 0x00000010
+#define MC_ARB_DRAM_TIMING2__BUS_TURN_MASK 0x1f000000L
+#define MC_ARB_DRAM_TIMING2__BUS_TURN__SHIFT 0x00000018
+#define MC_ARB_DRAM_TIMING2__RAS2RAS_MASK 0x000000ffL
+#define MC_ARB_DRAM_TIMING2__RAS2RAS__SHIFT 0x00000000
+#define MC_ARB_DRAM_TIMING2__RP_MASK 0x0000ff00L
+#define MC_ARB_DRAM_TIMING2__RP__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING2__WRPLUSRP_MASK 0x00ff0000L
+#define MC_ARB_DRAM_TIMING2__WRPLUSRP__SHIFT 0x00000010
+#define MC_ARB_DRAM_TIMING__ACTRD_MASK 0x000000ffL
+#define MC_ARB_DRAM_TIMING__ACTRD__SHIFT 0x00000000
+#define MC_ARB_DRAM_TIMING__ACTWR_MASK 0x0000ff00L
+#define MC_ARB_DRAM_TIMING__ACTWR__SHIFT 0x00000008
+#define MC_ARB_DRAM_TIMING__RASMACTRD_MASK 0x00ff0000L
+#define MC_ARB_DRAM_TIMING__RASMACTRD__SHIFT 0x00000010
+#define MC_ARB_DRAM_TIMING__RASMACTWR_MASK 0xff000000L
+#define MC_ARB_DRAM_TIMING__RASMACTWR__SHIFT 0x00000018
+#define MC_ARB_FED_CNTL__KEEP_POISON_IN_PAGE_MASK 0x00000010L
+#define MC_ARB_FED_CNTL__KEEP_POISON_IN_PAGE__SHIFT 0x00000004
+#define MC_ARB_FED_CNTL__MODE_MASK 0x00000003L
+#define MC_ARB_FED_CNTL__MODE__SHIFT 0x00000000
+#define MC_ARB_FED_CNTL__WR_ERR_MASK 0x0000000cL
+#define MC_ARB_FED_CNTL__WR_ERR__SHIFT 0x00000002
+#define MC_ARB_GDEC_RD_CNTL__PAGEBIT0_MASK 0x0000000fL
+#define MC_ARB_GDEC_RD_CNTL__PAGEBIT0__SHIFT 0x00000000
+#define MC_ARB_GDEC_RD_CNTL__PAGEBIT1_MASK 0x000000f0L
+#define MC_ARB_GDEC_RD_CNTL__PAGEBIT1__SHIFT 0x00000004
+#define MC_ARB_GDEC_RD_CNTL__REM_DEFAULT_GRP_MASK 0x00003c00L
+#define MC_ARB_GDEC_RD_CNTL__REM_DEFAULT_GRP__SHIFT 0x0000000a
+#define MC_ARB_GDEC_RD_CNTL__USE_RANK_MASK 0x00000100L
+#define MC_ARB_GDEC_RD_CNTL__USE_RANK__SHIFT 0x00000008
+#define MC_ARB_GDEC_RD_CNTL__USE_RSNO_MASK 0x00000200L
+#define MC_ARB_GDEC_RD_CNTL__USE_RSNO__SHIFT 0x00000009
+#define MC_ARB_GDEC_WR_CNTL__PAGEBIT0_MASK 0x0000000fL
+#define MC_ARB_GDEC_WR_CNTL__PAGEBIT0__SHIFT 0x00000000
+#define MC_ARB_GDEC_WR_CNTL__PAGEBIT1_MASK 0x000000f0L
+#define MC_ARB_GDEC_WR_CNTL__PAGEBIT1__SHIFT 0x00000004
+#define MC_ARB_GDEC_WR_CNTL__REM_DEFAULT_GRP_MASK 0x00003c00L
+#define MC_ARB_GDEC_WR_CNTL__REM_DEFAULT_GRP__SHIFT 0x0000000a
+#define MC_ARB_GDEC_WR_CNTL__USE_RANK_MASK 0x00000100L
+#define MC_ARB_GDEC_WR_CNTL__USE_RANK__SHIFT 0x00000008
+#define MC_ARB_GDEC_WR_CNTL__USE_RSNO_MASK 0x00000200L
+#define MC_ARB_GDEC_WR_CNTL__USE_RSNO__SHIFT 0x00000009
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI0_MASK 0x000000ffL
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI0__SHIFT 0x00000000
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI1_MASK 0x0000ff00L
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI1__SHIFT 0x00000008
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI2_MASK 0x00ff0000L
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI2__SHIFT 0x00000010
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI3_MASK 0xff000000L
+#define MC_ARB_GECC2_CLI__NO_GECC_CLI3__SHIFT 0x00000018
+#define MC_ARB_GECC2__CLOSE_BANK_RMW_MASK 0x00004000L
+#define MC_ARB_GECC2__CLOSE_BANK_RMW__SHIFT 0x0000000e
+#define MC_ARB_GECC2__COLFIFO_WATER_MASK 0x001f8000L
+#define MC_ARB_GECC2__COLFIFO_WATER__SHIFT 0x0000000f
+#define MC_ARB_GECC2_DEBUG2__ERR0_START_MASK 0x0000ff00L
+#define MC_ARB_GECC2_DEBUG2__ERR0_START__SHIFT 0x00000008
+#define MC_ARB_GECC2_DEBUG2__ERR1_START_MASK 0x00ff0000L
+#define MC_ARB_GECC2_DEBUG2__ERR1_START__SHIFT 0x00000010
+#define MC_ARB_GECC2_DEBUG2__ERR2_START_MASK 0xff000000L
+#define MC_ARB_GECC2_DEBUG2__ERR2_START__SHIFT 0x00000018
+#define MC_ARB_GECC2_DEBUG2__PERIOD_MASK 0x000000ffL
+#define MC_ARB_GECC2_DEBUG2__PERIOD__SHIFT 0x00000000
+#define MC_ARB_GECC2_DEBUG__DATA_FIELD_MASK 0x00000018L
+#define MC_ARB_GECC2_DEBUG__DATA_FIELD__SHIFT 0x00000003
+#define MC_ARB_GECC2_DEBUG__DIRECTION_MASK 0x00000004L
+#define MC_ARB_GECC2_DEBUG__DIRECTION__SHIFT 0x00000002
+#define MC_ARB_GECC2_DEBUG__NUM_ERR_BITS_MASK 0x00000003L
+#define MC_ARB_GECC2_DEBUG__NUM_ERR_BITS__SHIFT 0x00000000
+#define MC_ARB_GECC2_DEBUG__SW_INJECTION_MASK 0x00000020L
+#define MC_ARB_GECC2_DEBUG__SW_INJECTION__SHIFT 0x00000005
+#define MC_ARB_GECC2__ECC_MODE_MASK 0x00000006L
+#define MC_ARB_GECC2__ECC_MODE__SHIFT 0x00000001
+#define MC_ARB_GECC2__ENABLE_MASK 0x00000001L
+#define MC_ARB_GECC2__ENABLE__SHIFT 0x00000000
+#define MC_ARB_GECC2__EXOR_BANK_SEL_MASK 0x00000060L
+#define MC_ARB_GECC2__EXOR_BANK_SEL__SHIFT 0x00000005
+#define MC_ARB_GECC2_MISC__STREAK_BREAK_MASK 0x0000000fL
+#define MC_ARB_GECC2_MISC__STREAK_BREAK__SHIFT 0x00000000
+#define MC_ARB_GECC2__NO_GECC_CLI_MASK 0x00000780L
+#define MC_ARB_GECC2__NO_GECC_CLI__SHIFT 0x00000007
+#define MC_ARB_GECC2__PAGE_BIT0_MASK 0x00000018L
+#define MC_ARB_GECC2__PAGE_BIT0__SHIFT 0x00000003
+#define MC_ARB_GECC2__READ_ERR_MASK 0x00003800L
+#define MC_ARB_GECC2__READ_ERR__SHIFT 0x0000000b
+#define MC_ARB_GECC2_STATUS__CORR_CLEAR0_MASK 0x00000100L
+#define MC_ARB_GECC2_STATUS__CORR_CLEAR0__SHIFT 0x00000008
+#define MC_ARB_GECC2_STATUS__CORR_CLEAR1_MASK 0x00001000L
+#define MC_ARB_GECC2_STATUS__CORR_CLEAR1__SHIFT 0x0000000c
+#define MC_ARB_GECC2_STATUS__CORR_STS0_MASK 0x00000001L
+#define MC_ARB_GECC2_STATUS__CORR_STS0__SHIFT 0x00000000
+#define MC_ARB_GECC2_STATUS__CORR_STS1_MASK 0x00000010L
+#define MC_ARB_GECC2_STATUS__CORR_STS1__SHIFT 0x00000004
+#define MC_ARB_GECC2_STATUS__FED_CLEAR0_MASK 0x00000400L
+#define MC_ARB_GECC2_STATUS__FED_CLEAR0__SHIFT 0x0000000a
+#define MC_ARB_GECC2_STATUS__FED_CLEAR1_MASK 0x00004000L
+#define MC_ARB_GECC2_STATUS__FED_CLEAR1__SHIFT 0x0000000e
+#define MC_ARB_GECC2_STATUS__FED_STS0_MASK 0x00000004L
+#define MC_ARB_GECC2_STATUS__FED_STS0__SHIFT 0x00000002
+#define MC_ARB_GECC2_STATUS__FED_STS1_MASK 0x00000040L
+#define MC_ARB_GECC2_STATUS__FED_STS1__SHIFT 0x00000006
+#define MC_ARB_GECC2_STATUS__RSVD0_MASK 0x00000008L
+#define MC_ARB_GECC2_STATUS__RSVD0__SHIFT 0x00000003
+#define MC_ARB_GECC2_STATUS__RSVD1_MASK 0x00000080L
+#define MC_ARB_GECC2_STATUS__RSVD1__SHIFT 0x00000007
+#define MC_ARB_GECC2_STATUS__RSVD2_MASK 0x00000800L
+#define MC_ARB_GECC2_STATUS__RSVD2__SHIFT 0x0000000b
+#define MC_ARB_GECC2_STATUS__UNCORR_CLEAR0_MASK 0x00000200L
+#define MC_ARB_GECC2_STATUS__UNCORR_CLEAR0__SHIFT 0x00000009
+#define MC_ARB_GECC2_STATUS__UNCORR_CLEAR1_MASK 0x00002000L
+#define MC_ARB_GECC2_STATUS__UNCORR_CLEAR1__SHIFT 0x0000000d
+#define MC_ARB_GECC2_STATUS__UNCORR_STS0_MASK 0x00000002L
+#define MC_ARB_GECC2_STATUS__UNCORR_STS0__SHIFT 0x00000001
+#define MC_ARB_GECC2_STATUS__UNCORR_STS1_MASK 0x00000020L
+#define MC_ARB_GECC2_STATUS__UNCORR_STS1__SHIFT 0x00000005
+#define MC_ARB_LAZY0_RD__GROUP0_MASK 0x000000ffL
+#define MC_ARB_LAZY0_RD__GROUP0__SHIFT 0x00000000
+#define MC_ARB_LAZY0_RD__GROUP1_MASK 0x0000ff00L
+#define MC_ARB_LAZY0_RD__GROUP1__SHIFT 0x00000008
+#define MC_ARB_LAZY0_RD__GROUP2_MASK 0x00ff0000L
+#define MC_ARB_LAZY0_RD__GROUP2__SHIFT 0x00000010
+#define MC_ARB_LAZY0_RD__GROUP3_MASK 0xff000000L
+#define MC_ARB_LAZY0_RD__GROUP3__SHIFT 0x00000018
+#define MC_ARB_LAZY0_WR__GROUP0_MASK 0x000000ffL
+#define MC_ARB_LAZY0_WR__GROUP0__SHIFT 0x00000000
+#define MC_ARB_LAZY0_WR__GROUP1_MASK 0x0000ff00L
+#define MC_ARB_LAZY0_WR__GROUP1__SHIFT 0x00000008
+#define MC_ARB_LAZY0_WR__GROUP2_MASK 0x00ff0000L
+#define MC_ARB_LAZY0_WR__GROUP2__SHIFT 0x00000010
+#define MC_ARB_LAZY0_WR__GROUP3_MASK 0xff000000L
+#define MC_ARB_LAZY0_WR__GROUP3__SHIFT 0x00000018
+#define MC_ARB_LAZY1_RD__GROUP4_MASK 0x000000ffL
+#define MC_ARB_LAZY1_RD__GROUP4__SHIFT 0x00000000
+#define MC_ARB_LAZY1_RD__GROUP5_MASK 0x0000ff00L
+#define MC_ARB_LAZY1_RD__GROUP5__SHIFT 0x00000008
+#define MC_ARB_LAZY1_RD__GROUP6_MASK 0x00ff0000L
+#define MC_ARB_LAZY1_RD__GROUP6__SHIFT 0x00000010
+#define MC_ARB_LAZY1_RD__GROUP7_MASK 0xff000000L
+#define MC_ARB_LAZY1_RD__GROUP7__SHIFT 0x00000018
+#define MC_ARB_LAZY1_WR__GROUP4_MASK 0x000000ffL
+#define MC_ARB_LAZY1_WR__GROUP4__SHIFT 0x00000000
+#define MC_ARB_LAZY1_WR__GROUP5_MASK 0x0000ff00L
+#define MC_ARB_LAZY1_WR__GROUP5__SHIFT 0x00000008
+#define MC_ARB_LAZY1_WR__GROUP6_MASK 0x00ff0000L
+#define MC_ARB_LAZY1_WR__GROUP6__SHIFT 0x00000010
+#define MC_ARB_LAZY1_WR__GROUP7_MASK 0xff000000L
+#define MC_ARB_LAZY1_WR__GROUP7__SHIFT 0x00000018
+#define MC_ARB_LM_RD__BANKGROUP_CONFIG_MASK 0x00e00000L
+#define MC_ARB_LM_RD__BANKGROUP_CONFIG__SHIFT 0x00000015
+#define MC_ARB_LM_RD__ENABLE_TWO_LIST_MASK 0x00040000L
+#define MC_ARB_LM_RD__ENABLE_TWO_LIST__SHIFT 0x00000012
+#define MC_ARB_LM_RD__POPIDLE_RST_TWOLIST_MASK 0x00080000L
+#define MC_ARB_LM_RD__POPIDLE_RST_TWOLIST__SHIFT 0x00000013
+#define MC_ARB_LM_RD__SKID1_RST_TWOLIST_MASK 0x00100000L
+#define MC_ARB_LM_RD__SKID1_RST_TWOLIST__SHIFT 0x00000014
+#define MC_ARB_LM_RD__STREAK_BREAK_MASK 0x00010000L
+#define MC_ARB_LM_RD__STREAK_BREAK__SHIFT 0x00000010
+#define MC_ARB_LM_RD__STREAK_LIMIT_MASK 0x000000ffL
+#define MC_ARB_LM_RD__STREAK_LIMIT__SHIFT 0x00000000
+#define MC_ARB_LM_RD__STREAK_LIMIT_UBER_MASK 0x0000ff00L
+#define MC_ARB_LM_RD__STREAK_LIMIT_UBER__SHIFT 0x00000008
+#define MC_ARB_LM_RD__STREAK_UBER_MASK 0x00020000L
+#define MC_ARB_LM_RD__STREAK_UBER__SHIFT 0x00000011
+#define MC_ARB_LM_WR__BANKGROUP_CONFIG_MASK 0x00e00000L
+#define MC_ARB_LM_WR__BANKGROUP_CONFIG__SHIFT 0x00000015
+#define MC_ARB_LM_WR__ENABLE_TWO_LIST_MASK 0x00040000L
+#define MC_ARB_LM_WR__ENABLE_TWO_LIST__SHIFT 0x00000012
+#define MC_ARB_LM_WR__POPIDLE_RST_TWOLIST_MASK 0x00080000L
+#define MC_ARB_LM_WR__POPIDLE_RST_TWOLIST__SHIFT 0x00000013
+#define MC_ARB_LM_WR__SKID1_RST_TWOLIST_MASK 0x00100000L
+#define MC_ARB_LM_WR__SKID1_RST_TWOLIST__SHIFT 0x00000014
+#define MC_ARB_LM_WR__STREAK_BREAK_MASK 0x00010000L
+#define MC_ARB_LM_WR__STREAK_BREAK__SHIFT 0x00000010
+#define MC_ARB_LM_WR__STREAK_LIMIT_MASK 0x000000ffL
+#define MC_ARB_LM_WR__STREAK_LIMIT__SHIFT 0x00000000
+#define MC_ARB_LM_WR__STREAK_LIMIT_UBER_MASK 0x0000ff00L
+#define MC_ARB_LM_WR__STREAK_LIMIT_UBER__SHIFT 0x00000008
+#define MC_ARB_LM_WR__STREAK_UBER_MASK 0x00020000L
+#define MC_ARB_LM_WR__STREAK_UBER__SHIFT 0x00000011
+#define MC_ARB_MINCLKS__ARB_RW_SWITCH_MASK 0x00010000L
+#define MC_ARB_MINCLKS__ARB_RW_SWITCH__SHIFT 0x00000010
+#define MC_ARB_MINCLKS__READ_CLKS_MASK 0x000000ffL
+#define MC_ARB_MINCLKS__READ_CLKS__SHIFT 0x00000000
+#define MC_ARB_MINCLKS__WRITE_CLKS_MASK 0x0000ff00L
+#define MC_ARB_MINCLKS__WRITE_CLKS__SHIFT 0x00000008
+#define MC_ARB_MISC2__ARB_DEBUG29_MASK 0x20000000L
+#define MC_ARB_MISC2__ARB_DEBUG29__SHIFT 0x0000001d
+#define MC_ARB_MISC2__GECC_MASK 0x00040000L
+#define MC_ARB_MISC2__GECC_RST_MASK 0x00080000L
+#define MC_ARB_MISC2__GECC_RST__SHIFT 0x00000013
+#define MC_ARB_MISC2__GECC__SHIFT 0x00000012
+#define MC_ARB_MISC2__GECC_STATUS_MASK 0x00100000L
+#define MC_ARB_MISC2__GECC_STATUS__SHIFT 0x00000014
+#define MC_ARB_MISC2__POP_IDLE_REPLAY_MASK 0x00000800L
+#define MC_ARB_MISC2__POP_IDLE_REPLAY__SHIFT 0x0000000b
+#define MC_ARB_MISC2__RDRET_NO_BP_MASK 0x00002000L
+#define MC_ARB_MISC2__RDRET_NO_BP__SHIFT 0x0000000d
+#define MC_ARB_MISC2__RDRET_NO_REORDERING_MASK 0x00001000L
+#define MC_ARB_MISC2__RDRET_NO_REORDERING__SHIFT 0x0000000c
+#define MC_ARB_MISC2__RDRET_SEQ_SKID_MASK 0x0003c000L
+#define MC_ARB_MISC2__RDRET_SEQ_SKID__SHIFT 0x0000000e
+#define MC_ARB_MISC2__REPLAY_DEBUG_MASK 0x10000000L
+#define MC_ARB_MISC2__REPLAY_DEBUG__SHIFT 0x0000001c
+#define MC_ARB_MISC2__SEQ_RDY_POP_IDLE_MASK 0x40000000L
+#define MC_ARB_MISC2__SEQ_RDY_POP_IDLE__SHIFT 0x0000001e
+#define MC_ARB_MISC2__TAGFIFO_THRESHOLD_MASK 0x01e00000L
+#define MC_ARB_MISC2__TAGFIFO_THRESHOLD__SHIFT 0x00000015
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT4_MASK 0x00000040L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT4__SHIFT 0x00000006
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT5_MASK 0x00000080L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT5__SHIFT 0x00000007
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT6_MASK 0x00000100L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT6__SHIFT 0x00000008
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT7_MASK 0x00000200L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT7__SHIFT 0x00000009
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT8_MASK 0x00000400L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_COLBIT8__SHIFT 0x0000000a
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_ENABLE_MASK 0x00000020L
+#define MC_ARB_MISC2__TCCDL4_BANKBIT3_XOR_ENABLE__SHIFT 0x00000005
+#define MC_ARB_MISC2__TCCDL4_REPLAY_EOB_MASK 0x80000000L
+#define MC_ARB_MISC2__TCCDL4_REPLAY_EOB__SHIFT 0x0000001f
+#define MC_ARB_MISC2__WCDR_REPLAY_MASKCNT_MASK 0x0e000000L
+#define MC_ARB_MISC2__WCDR_REPLAY_MASKCNT__SHIFT 0x00000019
+#define MC_ARB_MISC__CALI_ENABLE_MASK 0x00100000L
+#define MC_ARB_MISC__CALI_ENABLE__SHIFT 0x00000014
+#define MC_ARB_MISC__CALI_RATES_MASK 0x00600000L
+#define MC_ARB_MISC__CALI_RATES__SHIFT 0x00000015
+#define MC_ARB_MISC__CHAN_COUPLE_MASK 0x000007f8L
+#define MC_ARB_MISC__CHAN_COUPLE__SHIFT 0x00000003
+#define MC_ARB_MISC__DISPURG_NOSW2WR_MASK 0x01000000L
+#define MC_ARB_MISC__DISPURG_NOSW2WR__SHIFT 0x00000018
+#define MC_ARB_MISC__DISPURG_STALL_MASK 0x02000000L
+#define MC_ARB_MISC__DISPURG_STALL__SHIFT 0x00000019
+#define MC_ARB_MISC__DISPURG_THROTTLE_MASK 0x3c000000L
+#define MC_ARB_MISC__DISPURG_THROTTLE__SHIFT 0x0000001a
+#define MC_ARB_MISC__DISPURGVLD_NOWRT_MASK 0x00800000L
+#define MC_ARB_MISC__DISPURGVLD_NOWRT__SHIFT 0x00000017
+#define MC_ARB_MISC__HARSHNESS_MASK 0x0007f800L
+#define MC_ARB_MISC__HARSHNESS__SHIFT 0x0000000b
+#define MC_ARB_MISC__IDLE_RFSH_MASK 0x00000002L
+#define MC_ARB_MISC__IDLE_RFSH__SHIFT 0x00000001
+#define MC_ARB_MISC__SMART_RDWR_SW_MASK 0x00080000L
+#define MC_ARB_MISC__SMART_RDWR_SW__SHIFT 0x00000013
+#define MC_ARB_MISC__STICKY_RFSH_MASK 0x00000001L
+#define MC_ARB_MISC__STICKY_RFSH__SHIFT 0x00000000
+#define MC_ARB_MISC__STUTTER_RFSH_MASK 0x00000004L
+#define MC_ARB_MISC__STUTTER_RFSH__SHIFT 0x00000002
+#define MC_ARB_PM_CNTL__BLKOUT_ON_D1_MASK 0x00000020L
+#define MC_ARB_PM_CNTL__BLKOUT_ON_D1__SHIFT 0x00000005
+#define MC_ARB_PM_CNTL__IDLE_CNT_MASK 0x00f00000L
+#define MC_ARB_PM_CNTL__IDLE_CNT__SHIFT 0x00000014
+#define MC_ARB_PM_CNTL__IDLE_ON_D1_MASK 0x00000040L
+#define MC_ARB_PM_CNTL__IDLE_ON_D1__SHIFT 0x00000006
+#define MC_ARB_PM_CNTL__IDLE_ON_D2_MASK 0x00040000L
+#define MC_ARB_PM_CNTL__IDLE_ON_D2__SHIFT 0x00000012
+#define MC_ARB_PM_CNTL__IDLE_ON_D3_MASK 0x00080000L
+#define MC_ARB_PM_CNTL__IDLE_ON_D3__SHIFT 0x00000013
+#define MC_ARB_PM_CNTL__OVERRIDE_CGSTATE_MASK 0x00000003L
+#define MC_ARB_PM_CNTL__OVERRIDE_CGSTATE__SHIFT 0x00000000
+#define MC_ARB_PM_CNTL__OVRR_CGRFSH_MASK 0x00000004L
+#define MC_ARB_PM_CNTL__OVRR_CGRFSH__SHIFT 0x00000002
+#define MC_ARB_PM_CNTL__OVRR_CGSQM_MASK 0x00000008L
+#define MC_ARB_PM_CNTL__OVRR_CGSQM__SHIFT 0x00000003
+#define MC_ARB_PM_CNTL__OVRR_PM_MASK 0x00000080L
+#define MC_ARB_PM_CNTL__OVRR_PM__SHIFT 0x00000007
+#define MC_ARB_PM_CNTL__OVRR_PM_STATE_MASK 0x00000300L
+#define MC_ARB_PM_CNTL__OVRR_PM_STATE__SHIFT 0x00000008
+#define MC_ARB_PM_CNTL__OVRR_RD_MASK 0x00000400L
+#define MC_ARB_PM_CNTL__OVRR_RD__SHIFT 0x0000000a
+#define MC_ARB_PM_CNTL__OVRR_RD_STATE_MASK 0x00000800L
+#define MC_ARB_PM_CNTL__OVRR_RD_STATE__SHIFT 0x0000000b
+#define MC_ARB_PM_CNTL__OVRR_RFSH_MASK 0x00004000L
+#define MC_ARB_PM_CNTL__OVRR_RFSH__SHIFT 0x0000000e
+#define MC_ARB_PM_CNTL__OVRR_RFSH_STATE_MASK 0x00008000L
+#define MC_ARB_PM_CNTL__OVRR_RFSH_STATE__SHIFT 0x0000000f
+#define MC_ARB_PM_CNTL__OVRR_WR_MASK 0x00001000L
+#define MC_ARB_PM_CNTL__OVRR_WR__SHIFT 0x0000000c
+#define MC_ARB_PM_CNTL__OVRR_WR_STATE_MASK 0x00002000L
+#define MC_ARB_PM_CNTL__OVRR_WR_STATE__SHIFT 0x0000000d
+#define MC_ARB_PM_CNTL__SRFSH_ON_D1_MASK 0x00000010L
+#define MC_ARB_PM_CNTL__SRFSH_ON_D1__SHIFT 0x00000004
+#define MC_ARB_POP__ALLOW_EOB_BY_WRRET_STALL_MASK 0x00080000L
+#define MC_ARB_POP__ALLOW_EOB_BY_WRRET_STALL__SHIFT 0x00000013
+#define MC_ARB_POP__ENABLE_ARB_MASK 0x00000001L
+#define MC_ARB_POP__ENABLE_ARB__SHIFT 0x00000000
+#define MC_ARB_POP__ENABLE_TWO_PAGE_MASK 0x00040000L
+#define MC_ARB_POP__ENABLE_TWO_PAGE__SHIFT 0x00000012
+#define MC_ARB_POP__POP_DEPTH_MASK 0x0000003cL
+#define MC_ARB_POP__POP_DEPTH__SHIFT 0x00000002
+#define MC_ARB_POP__QUICK_STOP_MASK 0x00020000L
+#define MC_ARB_POP__QUICK_STOP__SHIFT 0x00000011
+#define MC_ARB_POP__SKID_DEPTH_MASK 0x00007000L
+#define MC_ARB_POP__SKID_DEPTH__SHIFT 0x0000000c
+#define MC_ARB_POP__SPEC_OPEN_MASK 0x00000002L
+#define MC_ARB_POP__SPEC_OPEN__SHIFT 0x00000001
+#define MC_ARB_POP__WAIT_AFTER_RFSH_MASK 0x00018000L
+#define MC_ARB_POP__WAIT_AFTER_RFSH__SHIFT 0x0000000f
+#define MC_ARB_POP__WRDATAINDEX_DEPTH_MASK 0x00000fc0L
+#define MC_ARB_POP__WRDATAINDEX_DEPTH__SHIFT 0x00000006
+#define MC_ARB_RAMCFG__CHANSIZE_MASK 0x00000100L
+#define MC_ARB_RAMCFG__CHANSIZE__SHIFT 0x00000008
+#define MC_ARB_RAMCFG__NOOFBANK_MASK 0x00000003L
+#define MC_ARB_RAMCFG__NOOFBANK__SHIFT 0x00000000
+#define MC_ARB_RAMCFG__NOOFCOLS_MASK 0x000000c0L
+#define MC_ARB_RAMCFG__NOOFCOLS__SHIFT 0x00000006
+#define MC_ARB_RAMCFG__NOOFGROUPS_MASK 0x00001000L
+#define MC_ARB_RAMCFG__NOOFGROUPS__SHIFT 0x0000000c
+#define MC_ARB_RAMCFG__NOOFRANKS_MASK 0x00000004L
+#define MC_ARB_RAMCFG__NOOFRANKS__SHIFT 0x00000002
+#define MC_ARB_RAMCFG__NOOFROWS_MASK 0x00000038L
+#define MC_ARB_RAMCFG__NOOFROWS__SHIFT 0x00000003
+#define MC_ARB_REMREQ__RD_WATER_MASK 0x000000ffL
+#define MC_ARB_REMREQ__RD_WATER__SHIFT 0x00000000
+#define MC_ARB_REMREQ__WR_LAZY_TIMER_MASK 0x00f00000L
+#define MC_ARB_REMREQ__WR_LAZY_TIMER__SHIFT 0x00000014
+#define MC_ARB_REMREQ__WR_MAXBURST_SIZE_MASK 0x000f0000L
+#define MC_ARB_REMREQ__WR_MAXBURST_SIZE__SHIFT 0x00000010
+#define MC_ARB_REMREQ__WR_WATER_MASK 0x0000ff00L
+#define MC_ARB_REMREQ__WR_WATER__SHIFT 0x00000008
+#define MC_ARB_REPLAY__BOS_ENABLE_WAIT_CYC_MASK 0x00000080L
+#define MC_ARB_REPLAY__BOS_ENABLE_WAIT_CYC__SHIFT 0x00000007
+#define MC_ARB_REPLAY__BOS_WAIT_CYC_MASK 0x00007f00L
+#define MC_ARB_REPLAY__BOS_WAIT_CYC__SHIFT 0x00000008
+#define MC_ARB_REPLAY__BREAK_ON_STALL_MASK 0x00000040L
+#define MC_ARB_REPLAY__BREAK_ON_STALL__SHIFT 0x00000006
+#define MC_ARB_REPLAY__ENABLE_RD_MASK 0x00000001L
+#define MC_ARB_REPLAY__ENABLE_RD__SHIFT 0x00000000
+#define MC_ARB_REPLAY__ENABLE_WR_MASK 0x00000002L
+#define MC_ARB_REPLAY__ENABLE_WR__SHIFT 0x00000001
+#define MC_ARB_REPLAY__IGNORE_WR_CDC_MASK 0x00000020L
+#define MC_ARB_REPLAY__IGNORE_WR_CDC__SHIFT 0x00000005
+#define MC_ARB_REPLAY__RAW_ENABLE_MASK 0x00000010L
+#define MC_ARB_REPLAY__RAW_ENABLE__SHIFT 0x00000004
+#define MC_ARB_REPLAY__WAW_ENABLE_MASK 0x00000008L
+#define MC_ARB_REPLAY__WAW_ENABLE__SHIFT 0x00000003
+#define MC_ARB_REPLAY__WRACK_MODE_MASK 0x00000004L
+#define MC_ARB_REPLAY__WRACK_MODE__SHIFT 0x00000002
+#define MC_ARB_RET_CREDITS_RD__DISP_MASK 0x00ff0000L
+#define MC_ARB_RET_CREDITS_RD__DISP__SHIFT 0x00000010
+#define MC_ARB_RET_CREDITS_RD__HUB_MASK 0x0000ff00L
+#define MC_ARB_RET_CREDITS_RD__HUB__SHIFT 0x00000008
+#define MC_ARB_RET_CREDITS_RD__LCL_MASK 0x000000ffL
+#define MC_ARB_RET_CREDITS_RD__LCL__SHIFT 0x00000000
+#define MC_ARB_RET_CREDITS_RD__RETURN_CREDIT_MASK 0xff000000L
+#define MC_ARB_RET_CREDITS_RD__RETURN_CREDIT__SHIFT 0x00000018
+#define MC_ARB_RET_CREDITS_WR__HUB_MASK 0x0000ff00L
+#define MC_ARB_RET_CREDITS_WR__HUB__SHIFT 0x00000008
+#define MC_ARB_RET_CREDITS_WR__LCL_MASK 0x000000ffL
+#define MC_ARB_RET_CREDITS_WR__LCL__SHIFT 0x00000000
+#define MC_ARB_RET_CREDITS_WR__RETURN_CREDIT_MASK 0x00ff0000L
+#define MC_ARB_RET_CREDITS_WR__RETURN_CREDIT__SHIFT 0x00000010
+#define MC_ARB_RET_CREDITS_WR__WRRET_SEQ_SKID_MASK 0x0f000000L
+#define MC_ARB_RET_CREDITS_WR__WRRET_SEQ_SKID__SHIFT 0x00000018
+#define MC_ARB_RFSH_CNTL__ACCUM_MASK 0x00000800L
+#define MC_ARB_RFSH_CNTL__ACCUM__SHIFT 0x0000000b
+#define MC_ARB_RFSH_CNTL__ENABLE_MASK 0x00000001L
+#define MC_ARB_RFSH_CNTL__ENABLE__SHIFT 0x00000000
+#define MC_ARB_RFSH_CNTL__URG0_MASK 0x0000003eL
+#define MC_ARB_RFSH_CNTL__URG0__SHIFT 0x00000001
+#define MC_ARB_RFSH_CNTL__URG1_MASK 0x000007c0L
+#define MC_ARB_RFSH_CNTL__URG1__SHIFT 0x00000006
+#define MC_ARB_RFSH_RATE__POWERMODE0_MASK 0x000000ffL
+#define MC_ARB_RFSH_RATE__POWERMODE0__SHIFT 0x00000000
+#define MC_ARB_RTT_CNTL0__BREAK_ON_HARSH_MASK 0x00000100L
+#define MC_ARB_RTT_CNTL0__BREAK_ON_HARSH__SHIFT 0x00000008
+#define MC_ARB_RTT_CNTL0__BREAK_ON_URGENTRD_MASK 0x00000200L
+#define MC_ARB_RTT_CNTL0__BREAK_ON_URGENTRD__SHIFT 0x00000009
+#define MC_ARB_RTT_CNTL0__BREAK_ON_URGENTWR_MASK 0x00000400L
+#define MC_ARB_RTT_CNTL0__BREAK_ON_URGENTWR__SHIFT 0x0000000a
+#define MC_ARB_RTT_CNTL0__DATA_CNTL_MASK 0x01000000L
+#define MC_ARB_RTT_CNTL0__DATA_CNTL__SHIFT 0x00000018
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_0_MASK 0x00008000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_0__SHIFT 0x0000000f
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_1_MASK 0x00010000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_1__SHIFT 0x00000010
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_2_MASK 0x00020000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_2__SHIFT 0x00000011
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_3_MASK 0x00040000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_3__SHIFT 0x00000012
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_4_MASK 0x00080000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_4__SHIFT 0x00000013
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_5_MASK 0x00100000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_5__SHIFT 0x00000014
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_6_MASK 0x00200000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_6__SHIFT 0x00000015
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_7_MASK 0x00400000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_7__SHIFT 0x00000016
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_8_MASK 0x00800000L
+#define MC_ARB_RTT_CNTL0__DEBUG_RSV_8__SHIFT 0x00000017
+#define MC_ARB_RTT_CNTL0__ENABLE_MASK 0x00000001L
+#define MC_ARB_RTT_CNTL0__ENABLE__SHIFT 0x00000000
+#define MC_ARB_RTT_CNTL0__FLUSH_ON_ENTER_MASK 0x00000010L
+#define MC_ARB_RTT_CNTL0__FLUSH_ON_ENTER__SHIFT 0x00000004
+#define MC_ARB_RTT_CNTL0__HARSH_START_MASK 0x00000020L
+#define MC_ARB_RTT_CNTL0__HARSH_START__SHIFT 0x00000005
+#define MC_ARB_RTT_CNTL0__NEIGHBOR_BIT_MASK 0x02000000L
+#define MC_ARB_RTT_CNTL0__NEIGHBOR_BIT__SHIFT 0x00000019
+#define MC_ARB_RTT_CNTL0__START_IDLE_MASK 0x00000002L
+#define MC_ARB_RTT_CNTL0__START_IDLE__SHIFT 0x00000001
+#define MC_ARB_RTT_CNTL0__START_R2W_MASK 0x0000000cL
+#define MC_ARB_RTT_CNTL0__START_R2W_RFSH_MASK 0x00004000L
+#define MC_ARB_RTT_CNTL0__START_R2W_RFSH__SHIFT 0x0000000e
+#define MC_ARB_RTT_CNTL0__START_R2W__SHIFT 0x00000002
+#define MC_ARB_RTT_CNTL0__TPS_HARSH_PRIORITY_MASK 0x00000040L
+#define MC_ARB_RTT_CNTL0__TPS_HARSH_PRIORITY__SHIFT 0x00000006
+#define MC_ARB_RTT_CNTL0__TRAIN_PERIOD_MASK 0x00003800L
+#define MC_ARB_RTT_CNTL0__TRAIN_PERIOD__SHIFT 0x0000000b
+#define MC_ARB_RTT_CNTL0__TWRT_HARSH_PRIORITY_MASK 0x00000080L
+#define MC_ARB_RTT_CNTL0__TWRT_HARSH_PRIORITY__SHIFT 0x00000007
+#define MC_ARB_RTT_CNTL1__WINDOW_DEC_THRESHOLD_MASK 0x000fe000L
+#define MC_ARB_RTT_CNTL1__WINDOW_DEC_THRESHOLD__SHIFT 0x0000000d
+#define MC_ARB_RTT_CNTL1__WINDOW_INC_THRESHOLD_MASK 0x00001fc0L
+#define MC_ARB_RTT_CNTL1__WINDOW_INC_THRESHOLD__SHIFT 0x00000006
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MASK 0x0000001fL
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MAX_MASK 0x01f00000L
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MAX__SHIFT 0x00000014
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MIN_MASK 0x3e000000L
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE_MIN__SHIFT 0x00000019
+#define MC_ARB_RTT_CNTL1__WINDOW_SIZE__SHIFT 0x00000000
+#define MC_ARB_RTT_CNTL1__WINDOW_UPDATE_COUNT_MASK 0xc0000000L
+#define MC_ARB_RTT_CNTL1__WINDOW_UPDATE_COUNT__SHIFT 0x0000001e
+#define MC_ARB_RTT_CNTL1__WINDOW_UPDATE_MASK 0x00000020L
+#define MC_ARB_RTT_CNTL1__WINDOW_UPDATE__SHIFT 0x00000005
+#define MC_ARB_RTT_CNTL2__FILTER_CNTL_MASK 0x00002000L
+#define MC_ARB_RTT_CNTL2__FILTER_CNTL__SHIFT 0x0000000d
+#define MC_ARB_RTT_CNTL2__PHASE_ADJUST_SIZE_MASK 0x00001000L
+#define MC_ARB_RTT_CNTL2__PHASE_ADJUST_SIZE__SHIFT 0x0000000c
+#define MC_ARB_RTT_CNTL2__PHASE_ADJUST_THRESHOLD_MASK 0x00000fc0L
+#define MC_ARB_RTT_CNTL2__PHASE_ADJUST_THRESHOLD__SHIFT 0x00000006
+#define MC_ARB_RTT_CNTL2__SAMPLE_CNT_MASK 0x0000003fL
+#define MC_ARB_RTT_CNTL2__SAMPLE_CNT__SHIFT 0x00000000
+#define MC_ARB_RTT_DATA__PATTERN_MASK 0x000000ffL
+#define MC_ARB_RTT_DATA__PATTERN__SHIFT 0x00000000
+#define MC_ARB_RTT_DEBUG__DEBUG_BYTE_CH0_MASK 0x00000003L
+#define MC_ARB_RTT_DEBUG__DEBUG_BYTE_CH0__SHIFT 0x00000000
+#define MC_ARB_RTT_DEBUG__DEBUG_BYTE_CH1_MASK 0x0000000cL
+#define MC_ARB_RTT_DEBUG__DEBUG_BYTE_CH1__SHIFT 0x00000002
+#define MC_ARB_RTT_DEBUG__SHIFTED_PHASE_CH0_MASK 0x00000ff0L
+#define MC_ARB_RTT_DEBUG__SHIFTED_PHASE_CH0__SHIFT 0x00000004
+#define MC_ARB_RTT_DEBUG__SHIFTED_PHASE_CH1_MASK 0x01fe0000L
+#define MC_ARB_RTT_DEBUG__SHIFTED_PHASE_CH1__SHIFT 0x00000011
+#define MC_ARB_RTT_DEBUG__WINDOW_SIZE_CH0_MASK 0x0001f000L
+#define MC_ARB_RTT_DEBUG__WINDOW_SIZE_CH0__SHIFT 0x0000000c
+#define MC_ARB_RTT_DEBUG__WINDOW_SIZE_CH1_MASK 0x3e000000L
+#define MC_ARB_RTT_DEBUG__WINDOW_SIZE_CH1__SHIFT 0x00000019
+#define MC_ARB_SQM_CNTL__DYN_SQM_ENABLE_MASK 0x00000100L
+#define MC_ARB_SQM_CNTL__DYN_SQM_ENABLE__SHIFT 0x00000008
+#define MC_ARB_SQM_CNTL__MIN_PENAL_MASK 0x000000ffL
+#define MC_ARB_SQM_CNTL__MIN_PENAL__SHIFT 0x00000000
+#define MC_ARB_SQM_CNTL__RATIO_DEBUG_MASK 0xff000000L
+#define MC_ARB_SQM_CNTL__RATIO_DEBUG__SHIFT 0x00000018
+#define MC_ARB_SQM_CNTL__RATIO_MASK 0x00ff0000L
+#define MC_ARB_SQM_CNTL__RATIO__SHIFT 0x00000010
+#define MC_ARB_SQM_CNTL__SQM_RESERVE_MASK 0x0000fe00L
+#define MC_ARB_SQM_CNTL__SQM_RESERVE__SHIFT 0x00000009
+#define MC_ARB_TM_CNTL_RD__BANK_SELECT_MASK 0x00000006L
+#define MC_ARB_TM_CNTL_RD__BANK_SELECT__SHIFT 0x00000001
+#define MC_ARB_TM_CNTL_RD__GROUPBY_RANK_MASK 0x00000001L
+#define MC_ARB_TM_CNTL_RD__GROUPBY_RANK__SHIFT 0x00000000
+#define MC_ARB_TM_CNTL_RD__MATCH_BANK_MASK 0x00000010L
+#define MC_ARB_TM_CNTL_RD__MATCH_BANK__SHIFT 0x00000004
+#define MC_ARB_TM_CNTL_RD__MATCH_RANK_MASK 0x00000008L
+#define MC_ARB_TM_CNTL_RD__MATCH_RANK__SHIFT 0x00000003
+#define MC_ARB_TM_CNTL_WR__BANK_SELECT_MASK 0x00000006L
+#define MC_ARB_TM_CNTL_WR__BANK_SELECT__SHIFT 0x00000001
+#define MC_ARB_TM_CNTL_WR__GROUPBY_RANK_MASK 0x00000001L
+#define MC_ARB_TM_CNTL_WR__GROUPBY_RANK__SHIFT 0x00000000
+#define MC_ARB_TM_CNTL_WR__MATCH_BANK_MASK 0x00000010L
+#define MC_ARB_TM_CNTL_WR__MATCH_BANK__SHIFT 0x00000004
+#define MC_ARB_TM_CNTL_WR__MATCH_RANK_MASK 0x00000008L
+#define MC_ARB_TM_CNTL_WR__MATCH_RANK__SHIFT 0x00000003
+#define MC_ARB_WCDR_2__DEBUG_0_MASK 0x00000200L
+#define MC_ARB_WCDR_2__DEBUG_0__SHIFT 0x00000009
+#define MC_ARB_WCDR_2__DEBUG_1_MASK 0x00000400L
+#define MC_ARB_WCDR_2__DEBUG_1__SHIFT 0x0000000a
+#define MC_ARB_WCDR_2__DEBUG_2_MASK 0x00000800L
+#define MC_ARB_WCDR_2__DEBUG_2__SHIFT 0x0000000b
+#define MC_ARB_WCDR_2__DEBUG_3_MASK 0x00001000L
+#define MC_ARB_WCDR_2__DEBUG_3__SHIFT 0x0000000c
+#define MC_ARB_WCDR_2__DEBUG_4_MASK 0x00002000L
+#define MC_ARB_WCDR_2__DEBUG_4__SHIFT 0x0000000d
+#define MC_ARB_WCDR_2__DEBUG_5_MASK 0x00004000L
+#define MC_ARB_WCDR_2__DEBUG_5__SHIFT 0x0000000e
+#define MC_ARB_WCDR_2__WPRE_INC_STEP_MASK 0x0000000fL
+#define MC_ARB_WCDR_2__WPRE_INC_STEP__SHIFT 0x00000000
+#define MC_ARB_WCDR_2__WPRE_MIN_THRESHOLD_MASK 0x000001f0L
+#define MC_ARB_WCDR_2__WPRE_MIN_THRESHOLD__SHIFT 0x00000004
+#define MC_ARB_WCDR__IDLE_BURST_MASK 0x00001f80L
+#define MC_ARB_WCDR__IDLE_BURST_MODE_MASK 0x00002000L
+#define MC_ARB_WCDR__IDLE_BURST_MODE__SHIFT 0x0000000d
+#define MC_ARB_WCDR__IDLE_BURST__SHIFT 0x00000007
+#define MC_ARB_WCDR__IDLE_DEGLITCH_ENABLE_MASK 0x00010000L
+#define MC_ARB_WCDR__IDLE_DEGLITCH_ENABLE__SHIFT 0x00000010
+#define MC_ARB_WCDR__IDLE_ENABLE_MASK 0x00000001L
+#define MC_ARB_WCDR__IDLE_ENABLE__SHIFT 0x00000000
+#define MC_ARB_WCDR__IDLE_PERIOD_MASK 0x0000007cL
+#define MC_ARB_WCDR__IDLE_PERIOD__SHIFT 0x00000002
+#define MC_ARB_WCDR__IDLE_WAKEUP_MASK 0x0000c000L
+#define MC_ARB_WCDR__IDLE_WAKEUP__SHIFT 0x0000000e
+#define MC_ARB_WCDR__SEQ_IDLE_MASK 0x00000002L
+#define MC_ARB_WCDR__SEQ_IDLE__SHIFT 0x00000001
+#define MC_ARB_WCDR__WPRE_ENABLE_MASK 0x00020000L
+#define MC_ARB_WCDR__WPRE_ENABLE__SHIFT 0x00000011
+#define MC_ARB_WCDR__WPRE_INC_READ_MASK 0x02000000L
+#define MC_ARB_WCDR__WPRE_INC_READ__SHIFT 0x00000019
+#define MC_ARB_WCDR__WPRE_INC_SEQIDLE_MASK 0x08000000L
+#define MC_ARB_WCDR__WPRE_INC_SEQIDLE__SHIFT 0x0000001b
+#define MC_ARB_WCDR__WPRE_INC_SKIDIDLE_MASK 0x04000000L
+#define MC_ARB_WCDR__WPRE_INC_SKIDIDLE__SHIFT 0x0000001a
+#define MC_ARB_WCDR__WPRE_MAX_BURST_MASK 0x01c00000L
+#define MC_ARB_WCDR__WPRE_MAX_BURST__SHIFT 0x00000016
+#define MC_ARB_WCDR__WPRE_THRESHOLD_MASK 0x003c0000L
+#define MC_ARB_WCDR__WPRE_THRESHOLD__SHIFT 0x00000012
+#define MC_ARB_WCDR__WPRE_TWOPAGE_MASK 0x10000000L
+#define MC_ARB_WCDR__WPRE_TWOPAGE__SHIFT 0x0000001c
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP0_MASK 0x00000008L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP0__SHIFT 0x00000003
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP1_MASK 0x00000010L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP1__SHIFT 0x00000004
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP2_MASK 0x00000020L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP2__SHIFT 0x00000005
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP3_MASK 0x00000040L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP3__SHIFT 0x00000006
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP4_MASK 0x00000080L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP4__SHIFT 0x00000007
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP5_MASK 0x00000100L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP5__SHIFT 0x00000008
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP6_MASK 0x00000200L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP6__SHIFT 0x00000009
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP7_MASK 0x00000400L
+#define MC_ARB_WTM_CNTL_RD__ALLOW_STUTTER_GRP7__SHIFT 0x0000000a
+#define MC_ARB_WTM_CNTL_RD__HARSH_PRI_MASK 0x00000004L
+#define MC_ARB_WTM_CNTL_RD__HARSH_PRI__SHIFT 0x00000002
+#define MC_ARB_WTM_CNTL_RD__WTMODE_MASK 0x00000003L
+#define MC_ARB_WTM_CNTL_RD__WTMODE__SHIFT 0x00000000
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP0_MASK 0x00000008L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP0__SHIFT 0x00000003
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP1_MASK 0x00000010L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP1__SHIFT 0x00000004
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP2_MASK 0x00000020L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP2__SHIFT 0x00000005
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP3_MASK 0x00000040L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP3__SHIFT 0x00000006
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP4_MASK 0x00000080L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP4__SHIFT 0x00000007
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP5_MASK 0x00000100L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP5__SHIFT 0x00000008
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP6_MASK 0x00000200L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP6__SHIFT 0x00000009
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP7_MASK 0x00000400L
+#define MC_ARB_WTM_CNTL_WR__ALLOW_STUTTER_GRP7__SHIFT 0x0000000a
+#define MC_ARB_WTM_CNTL_WR__HARSH_PRI_MASK 0x00000004L
+#define MC_ARB_WTM_CNTL_WR__HARSH_PRI__SHIFT 0x00000002
+#define MC_ARB_WTM_CNTL_WR__WTMODE_MASK 0x00000003L
+#define MC_ARB_WTM_CNTL_WR__WTMODE__SHIFT 0x00000000
+#define MC_ARB_WTM_GRPWT_RD__GRP0_MASK 0x00000003L
+#define MC_ARB_WTM_GRPWT_RD__GRP0__SHIFT 0x00000000
+#define MC_ARB_WTM_GRPWT_RD__GRP1_MASK 0x0000000cL
+#define MC_ARB_WTM_GRPWT_RD__GRP1__SHIFT 0x00000002
+#define MC_ARB_WTM_GRPWT_RD__GRP2_MASK 0x00000030L
+#define MC_ARB_WTM_GRPWT_RD__GRP2__SHIFT 0x00000004
+#define MC_ARB_WTM_GRPWT_RD__GRP3_MASK 0x000000c0L
+#define MC_ARB_WTM_GRPWT_RD__GRP3__SHIFT 0x00000006
+#define MC_ARB_WTM_GRPWT_RD__GRP4_MASK 0x00000300L
+#define MC_ARB_WTM_GRPWT_RD__GRP4__SHIFT 0x00000008
+#define MC_ARB_WTM_GRPWT_RD__GRP5_MASK 0x00000c00L
+#define MC_ARB_WTM_GRPWT_RD__GRP5__SHIFT 0x0000000a
+#define MC_ARB_WTM_GRPWT_RD__GRP6_MASK 0x00003000L
+#define MC_ARB_WTM_GRPWT_RD__GRP6__SHIFT 0x0000000c
+#define MC_ARB_WTM_GRPWT_RD__GRP7_MASK 0x0000c000L
+#define MC_ARB_WTM_GRPWT_RD__GRP7__SHIFT 0x0000000e
+#define MC_ARB_WTM_GRPWT_RD__GRP_EXT_MASK 0x00ff0000L
+#define MC_ARB_WTM_GRPWT_RD__GRP_EXT__SHIFT 0x00000010
+#define MC_ARB_WTM_GRPWT_WR__GRP0_MASK 0x00000003L
+#define MC_ARB_WTM_GRPWT_WR__GRP0__SHIFT 0x00000000
+#define MC_ARB_WTM_GRPWT_WR__GRP1_MASK 0x0000000cL
+#define MC_ARB_WTM_GRPWT_WR__GRP1__SHIFT 0x00000002
+#define MC_ARB_WTM_GRPWT_WR__GRP2_MASK 0x00000030L
+#define MC_ARB_WTM_GRPWT_WR__GRP2__SHIFT 0x00000004
+#define MC_ARB_WTM_GRPWT_WR__GRP3_MASK 0x000000c0L
+#define MC_ARB_WTM_GRPWT_WR__GRP3__SHIFT 0x00000006
+#define MC_ARB_WTM_GRPWT_WR__GRP4_MASK 0x00000300L
+#define MC_ARB_WTM_GRPWT_WR__GRP4__SHIFT 0x00000008
+#define MC_ARB_WTM_GRPWT_WR__GRP5_MASK 0x00000c00L
+#define MC_ARB_WTM_GRPWT_WR__GRP5__SHIFT 0x0000000a
+#define MC_ARB_WTM_GRPWT_WR__GRP6_MASK 0x00003000L
+#define MC_ARB_WTM_GRPWT_WR__GRP6__SHIFT 0x0000000c
+#define MC_ARB_WTM_GRPWT_WR__GRP7_MASK 0x0000c000L
+#define MC_ARB_WTM_GRPWT_WR__GRP7__SHIFT 0x0000000e
+#define MC_ARB_WTM_GRPWT_WR__GRP_EXT_MASK 0x00ff0000L
+#define MC_ARB_WTM_GRPWT_WR__GRP_EXT__SHIFT 0x00000010
+#define MC_BIST_AUTO_CNTL__ADR_GEN_MASK 0x000000f0L
+#define MC_BIST_AUTO_CNTL__ADR_GEN__SHIFT 0x00000004
+#define MC_BIST_AUTO_CNTL__ADR_RESET_MASK 0x02000000L
+#define MC_BIST_AUTO_CNTL__ADR_RESET__SHIFT 0x00000019
+#define MC_BIST_AUTO_CNTL__LFSR_KEY_MASK 0x00ffff00L
+#define MC_BIST_AUTO_CNTL__LFSR_KEY__SHIFT 0x00000008
+#define MC_BIST_AUTO_CNTL__LFSR_RESET_MASK 0x01000000L
+#define MC_BIST_AUTO_CNTL__LFSR_RESET__SHIFT 0x00000018
+#define MC_BIST_AUTO_CNTL__MOP_MASK 0x00000003L
+#define MC_BIST_AUTO_CNTL__MOP__SHIFT 0x00000000
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_LOOP_MASK 0x00000004L
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_LOOP__SHIFT 0x00000002
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_MODE_MASK 0x00000002L
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_MODE__SHIFT 0x00000001
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_MODE_U_MASK 0x00010000L
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_MODE_U__SHIFT 0x00000010
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_RUN_MASK 0x00020000L
+#define MC_BIST_CMD_CNTL__CMD_ISSUE_RUN__SHIFT 0x00000011
+#define MC_BIST_CMD_CNTL__DONE_MASK 0x80000000L
+#define MC_BIST_CMD_CNTL__DONE__SHIFT 0x0000001f
+#define MC_BIST_CMD_CNTL__ENABLE_D0_MASK 0x10000000L
+#define MC_BIST_CMD_CNTL__ENABLE_D0__SHIFT 0x0000001c
+#define MC_BIST_CMD_CNTL__ENABLE_D1_MASK 0x20000000L
+#define MC_BIST_CMD_CNTL__ENABLE_D1__SHIFT 0x0000001d
+#define MC_BIST_CMD_CNTL__LOOP_CNT_MAX_MASK 0x0000fff0L
+#define MC_BIST_CMD_CNTL__LOOP_CNT_MAX__SHIFT 0x00000004
+#define MC_BIST_CMD_CNTL__LOOP_CNT_RD_MASK 0x0ffc0000L
+#define MC_BIST_CMD_CNTL__LOOP_CNT_RD__SHIFT 0x00000012
+#define MC_BIST_CMD_CNTL__LOOP_END_CONDITION_MASK 0x00000008L
+#define MC_BIST_CMD_CNTL__LOOP_END_CONDITION__SHIFT 0x00000003
+#define MC_BIST_CMD_CNTL__RESET_MASK 0x00000001L
+#define MC_BIST_CMD_CNTL__RESET__SHIFT 0x00000000
+#define MC_BIST_CMD_CNTL__STATUS_CH_MASK 0x40000000L
+#define MC_BIST_CMD_CNTL__STATUS_CH__SHIFT 0x0000001e
+#define MC_BIST_CMP_CNTL_2__DATA_STORE_CNT_MASK 0x0000001fL
+#define MC_BIST_CMP_CNTL_2__DATA_STORE_CNT_RST_MASK 0x00000100L
+#define MC_BIST_CMP_CNTL_2__DATA_STORE_CNT_RST__SHIFT 0x00000008
+#define MC_BIST_CMP_CNTL_2__DATA_STORE_CNT__SHIFT 0x00000000
+#define MC_BIST_CMP_CNTL_2__EDC_STORE_CNT_MASK 0x0001f000L
+#define MC_BIST_CMP_CNTL_2__EDC_STORE_CNT_RST_MASK 0x00100000L
+#define MC_BIST_CMP_CNTL_2__EDC_STORE_CNT_RST__SHIFT 0x00000014
+#define MC_BIST_CMP_CNTL_2__EDC_STORE_CNT__SHIFT 0x0000000c
+#define MC_BIST_CMP_CNTL__CMP_MASK 0x00030000L
+#define MC_BIST_CMP_CNTL__CMP_MASK_BIT_MASK 0x00000ff0L
+#define MC_BIST_CMP_CNTL__CMP_MASK_BIT__SHIFT 0x00000004
+#define MC_BIST_CMP_CNTL__CMP_MASK_BYTE_MASK 0x0000000fL
+#define MC_BIST_CMP_CNTL__CMP_MASK_BYTE__SHIFT 0x00000000
+#define MC_BIST_CMP_CNTL__CMP__SHIFT 0x00000010
+#define MC_BIST_CMP_CNTL__DATA_STORE_MODE_MASK 0x00300000L
+#define MC_BIST_CMP_CNTL__DATA_STORE_MODE__SHIFT 0x00000014
+#define MC_BIST_CMP_CNTL__DATA_STORE_SEL_MASK 0x00002000L
+#define MC_BIST_CMP_CNTL__DATA_STORE_SEL__SHIFT 0x0000000d
+#define MC_BIST_CMP_CNTL__DAT_MODE_MASK 0x00040000L
+#define MC_BIST_CMP_CNTL__DAT_MODE__SHIFT 0x00000012
+#define MC_BIST_CMP_CNTL__EDC_STORE_MODE_MASK 0x00080000L
+#define MC_BIST_CMP_CNTL__EDC_STORE_MODE__SHIFT 0x00000013
+#define MC_BIST_CMP_CNTL__EDC_STORE_SEL_MASK 0x00004000L
+#define MC_BIST_CMP_CNTL__EDC_STORE_SEL__SHIFT 0x0000000e
+#define MC_BIST_CMP_CNTL__ENABLE_CMD_FIFO_MASK 0x00008000L
+#define MC_BIST_CMP_CNTL__ENABLE_CMD_FIFO__SHIFT 0x0000000f
+#define MC_BIST_CMP_CNTL__LOAD_RTEDC_MASK 0x00001000L
+#define MC_BIST_CMP_CNTL__LOAD_RTEDC__SHIFT 0x0000000c
+#define MC_BIST_CMP_CNTL__MISMATCH_CNT_MASK 0xffc00000L
+#define MC_BIST_CMP_CNTL__MISMATCH_CNT__SHIFT 0x00000016
+#define MC_BIST_CNTL__ADR_MODE_MASK 0x00000020L
+#define MC_BIST_CNTL__ADR_MODE__SHIFT 0x00000005
+#define MC_BIST_CNTL__DAT_MODE_MASK 0x00000040L
+#define MC_BIST_CNTL__DAT_MODE__SHIFT 0x00000006
+#define MC_BIST_CNTL__DONE_MASK 0x40000000L
+#define MC_BIST_CNTL__DONE__SHIFT 0x0000001e
+#define MC_BIST_CNTL__ENABLE_D0_MASK 0x00001000L
+#define MC_BIST_CNTL__ENABLE_D0__SHIFT 0x0000000c
+#define MC_BIST_CNTL__ENABLE_D1_MASK 0x00002000L
+#define MC_BIST_CNTL__ENABLE_D1__SHIFT 0x0000000d
+#define MC_BIST_CNTL__LOAD_RTDATA_CH_MASK 0x00004000L
+#define MC_BIST_CNTL__LOAD_RTDATA_CH__SHIFT 0x0000000e
+#define MC_BIST_CNTL__LOAD_RTDATA_MASK 0x80000000L
+#define MC_BIST_CNTL__LOAD_RTDATA__SHIFT 0x0000001f
+#define MC_BIST_CNTL__LOOP_CNT_MASK 0x0fff0000L
+#define MC_BIST_CNTL__LOOP_CNT__SHIFT 0x00000010
+#define MC_BIST_CNTL__LOOP_MASK 0x00000c00L
+#define MC_BIST_CNTL__LOOP__SHIFT 0x0000000a
+#define MC_BIST_CNTL__MOP_MODE_MASK 0x00000010L
+#define MC_BIST_CNTL__MOP_MODE__SHIFT 0x00000004
+#define MC_BIST_CNTL__PTR_RST_D0_MASK 0x00000004L
+#define MC_BIST_CNTL__PTR_RST_D0__SHIFT 0x00000002
+#define MC_BIST_CNTL__PTR_RST_D1_MASK 0x00000008L
+#define MC_BIST_CNTL__PTR_RST_D1__SHIFT 0x00000003
+#define MC_BIST_CNTL__RESET_MASK 0x00000001L
+#define MC_BIST_CNTL__RESET__SHIFT 0x00000000
+#define MC_BIST_CNTL__RUN_MASK 0x00000002L
+#define MC_BIST_CNTL__RUN__SHIFT 0x00000001
+#define MC_BIST_DATA_MASK__MASK_MASK 0xffffffffL
+#define MC_BIST_DATA_MASK__MASK__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD0__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD0__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD1__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD1__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD2__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD2__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD3__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD3__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD4__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD4__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD5__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD5__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD6__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD6__DATA__SHIFT 0x00000000
+#define MC_BIST_DATA_WORD7__DATA_MASK 0xffffffffL
+#define MC_BIST_DATA_WORD7__DATA__SHIFT 0x00000000
+#define MC_BIST_DIR_CNTL__CMD_RTR_D0_MASK 0x00000040L
+#define MC_BIST_DIR_CNTL__CMD_RTR_D0__SHIFT 0x00000006
+#define MC_BIST_DIR_CNTL__CMD_RTR_D1_MASK 0x00000100L
+#define MC_BIST_DIR_CNTL__CMD_RTR_D1__SHIFT 0x00000008
+#define MC_BIST_DIR_CNTL__DATA_LOAD_MASK 0x00000020L
+#define MC_BIST_DIR_CNTL__DATA_LOAD__SHIFT 0x00000005
+#define MC_BIST_DIR_CNTL__DAT_RTR_D0_MASK 0x00000080L
+#define MC_BIST_DIR_CNTL__DAT_RTR_D0__SHIFT 0x00000007
+#define MC_BIST_DIR_CNTL__DAT_RTR_D1_MASK 0x00000200L
+#define MC_BIST_DIR_CNTL__DAT_RTR_D1__SHIFT 0x00000009
+#define MC_BIST_DIR_CNTL__EOB_MASK 0x00000008L
+#define MC_BIST_DIR_CNTL__EOB__SHIFT 0x00000003
+#define MC_BIST_DIR_CNTL__MOP3_MASK 0x00000400L
+#define MC_BIST_DIR_CNTL__MOP3__SHIFT 0x0000000a
+#define MC_BIST_DIR_CNTL__MOP_LOAD_MASK 0x00000010L
+#define MC_BIST_DIR_CNTL__MOP_LOAD__SHIFT 0x00000004
+#define MC_BIST_DIR_CNTL__MOP_MASK 0x00000007L
+#define MC_BIST_DIR_CNTL__MOP__SHIFT 0x00000000
+#define MC_BIST_EADDR__BANK_MASK 0x0f000000L
+#define MC_BIST_EADDR__BANK__SHIFT 0x00000018
+#define MC_BIST_EADDR__COLH_MASK 0x20000000L
+#define MC_BIST_EADDR__COLH__SHIFT 0x0000001d
+#define MC_BIST_EADDR__COL_MASK 0x000003ffL
+#define MC_BIST_EADDR__COL__SHIFT 0x00000000
+#define MC_BIST_EADDR__RANK_MASK 0x10000000L
+#define MC_BIST_EADDR__RANK__SHIFT 0x0000001c
+#define MC_BIST_EADDR__ROWH_MASK 0xc0000000L
+#define MC_BIST_EADDR__ROWH__SHIFT 0x0000001e
+#define MC_BIST_EADDR__ROW_MASK 0x00fffc00L
+#define MC_BIST_EADDR__ROW__SHIFT 0x0000000a
+#define MC_BIST_MISMATCH_ADDR__BANK_MASK 0x0f000000L
+#define MC_BIST_MISMATCH_ADDR__BANK__SHIFT 0x00000018
+#define MC_BIST_MISMATCH_ADDR__COLH_MASK 0x20000000L
+#define MC_BIST_MISMATCH_ADDR__COLH__SHIFT 0x0000001d
+#define MC_BIST_MISMATCH_ADDR__COL_MASK 0x000003ffL
+#define MC_BIST_MISMATCH_ADDR__COL__SHIFT 0x00000000
+#define MC_BIST_MISMATCH_ADDR__RANK_MASK 0x10000000L
+#define MC_BIST_MISMATCH_ADDR__RANK__SHIFT 0x0000001c
+#define MC_BIST_MISMATCH_ADDR__ROWH_MASK 0xc0000000L
+#define MC_BIST_MISMATCH_ADDR__ROWH__SHIFT 0x0000001e
+#define MC_BIST_MISMATCH_ADDR__ROW_MASK 0x00fffc00L
+#define MC_BIST_MISMATCH_ADDR__ROW__SHIFT 0x0000000a
+#define MC_BIST_RDATA_EDC__EDC_MASK 0xffffffffL
+#define MC_BIST_RDATA_EDC__EDC__SHIFT 0x00000000
+#define MC_BIST_RDATA_MASK__MASK_MASK 0xffffffffL
+#define MC_BIST_RDATA_MASK__MASK__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD0__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD0__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD1__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD1__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD2__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD2__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD3__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD3__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD4__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD4__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD5__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD5__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD6__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD6__RDATA__SHIFT 0x00000000
+#define MC_BIST_RDATA_WORD7__RDATA_MASK 0xffffffffL
+#define MC_BIST_RDATA_WORD7__RDATA__SHIFT 0x00000000
+#define MC_BIST_SADDR__BANK_MASK 0x0f000000L
+#define MC_BIST_SADDR__BANK__SHIFT 0x00000018
+#define MC_BIST_SADDR__COLH_MASK 0x20000000L
+#define MC_BIST_SADDR__COLH__SHIFT 0x0000001d
+#define MC_BIST_SADDR__COL_MASK 0x000003ffL
+#define MC_BIST_SADDR__COL__SHIFT 0x00000000
+#define MC_BIST_SADDR__RANK_MASK 0x10000000L
+#define MC_BIST_SADDR__RANK__SHIFT 0x0000001c
+#define MC_BIST_SADDR__ROWH_MASK 0xc0000000L
+#define MC_BIST_SADDR__ROWH__SHIFT 0x0000001e
+#define MC_BIST_SADDR__ROW_MASK 0x00fffc00L
+#define MC_BIST_SADDR__ROW__SHIFT 0x0000000a
+#define MC_CG_CONFIG__INDEX_MASK 0x003fffc0L
+#define MC_CG_CONFIG__INDEX__SHIFT 0x00000006
+#define MC_CG_CONFIG_MCD__INDEX_MASK 0x1fffe000L
+#define MC_CG_CONFIG_MCD__INDEX__SHIFT 0x0000000d
+#define MC_CG_CONFIG_MCD__MCD0_WR_ENABLE_MASK 0x00000001L
+#define MC_CG_CONFIG_MCD__MCD0_WR_ENABLE__SHIFT 0x00000000
+#define MC_CG_CONFIG_MCD__MCD1_WR_ENABLE_MASK 0x00000002L
+#define MC_CG_CONFIG_MCD__MCD1_WR_ENABLE__SHIFT 0x00000001
+#define MC_CG_CONFIG_MCD__MCD2_WR_ENABLE_MASK 0x00000004L
+#define MC_CG_CONFIG_MCD__MCD2_WR_ENABLE__SHIFT 0x00000002
+#define MC_CG_CONFIG_MCD__MCD3_WR_ENABLE_MASK 0x00000008L
+#define MC_CG_CONFIG_MCD__MCD3_WR_ENABLE__SHIFT 0x00000003
+#define MC_CG_CONFIG_MCD__MCD4_WR_ENABLE_MASK 0x00000010L
+#define MC_CG_CONFIG_MCD__MCD4_WR_ENABLE__SHIFT 0x00000004
+#define MC_CG_CONFIG_MCD__MCD5_WR_ENABLE_MASK 0x00000020L
+#define MC_CG_CONFIG_MCD__MCD5_WR_ENABLE__SHIFT 0x00000005
+#define MC_CG_CONFIG_MCD__MC_RD_ENABLE_MASK 0x00000700L
+#define MC_CG_CONFIG_MCD__MC_RD_ENABLE__SHIFT 0x00000008
+#define MC_CG_CONFIG__MCDW_WR_ENABLE_MASK 0x00000001L
+#define MC_CG_CONFIG__MCDW_WR_ENABLE__SHIFT 0x00000000
+#define MC_CG_CONFIG__MCDX_WR_ENABLE_MASK 0x00000002L
+#define MC_CG_CONFIG__MCDX_WR_ENABLE__SHIFT 0x00000001
+#define MC_CG_CONFIG__MCDY_WR_ENABLE_MASK 0x00000004L
+#define MC_CG_CONFIG__MCDY_WR_ENABLE__SHIFT 0x00000002
+#define MC_CG_CONFIG__MCDZ_WR_ENABLE_MASK 0x00000008L
+#define MC_CG_CONFIG__MCDZ_WR_ENABLE__SHIFT 0x00000003
+#define MC_CG_CONFIG__MC_RD_ENABLE_MASK 0x00000030L
+#define MC_CG_CONFIG__MC_RD_ENABLE__SHIFT 0x00000004
+#define MC_CG_DATAPORT__DATA_FIELD_MASK 0xffffffffL
+#define MC_CG_DATAPORT__DATA_FIELD__SHIFT 0x00000000
+#define MC_CITF_CNTL__EXEMPTPM_MASK 0x00000008L
+#define MC_CITF_CNTL__EXEMPTPM__SHIFT 0x00000003
+#define MC_CITF_CNTL__GFX_IDLE_OVERRIDE_MASK 0x00000030L
+#define MC_CITF_CNTL__GFX_IDLE_OVERRIDE__SHIFT 0x00000004
+#define MC_CITF_CNTL__IGNOREPM_MASK 0x00000004L
+#define MC_CITF_CNTL__IGNOREPM__SHIFT 0x00000002
+#define MC_CITF_CNTL__MCD_SRBM_MASK_ENABLE_MASK 0x00000040L
+#define MC_CITF_CNTL__MCD_SRBM_MASK_ENABLE__SHIFT 0x00000006
+#define MC_CITF_CREDITS_ARB_RD__HUB_PRI_MASK 0x02000000L
+#define MC_CITF_CREDITS_ARB_RD__HUB_PRI__SHIFT 0x00000019
+#define MC_CITF_CREDITS_ARB_RD__LCL_PRI_MASK 0x01000000L
+#define MC_CITF_CREDITS_ARB_RD__LCL_PRI__SHIFT 0x00000018
+#define MC_CITF_CREDITS_ARB_RD__READ_HUB_MASK 0x0000ff00L
+#define MC_CITF_CREDITS_ARB_RD__READ_HUB__SHIFT 0x00000008
+#define MC_CITF_CREDITS_ARB_RD__READ_LCL_MASK 0x000000ffL
+#define MC_CITF_CREDITS_ARB_RD__READ_LCL__SHIFT 0x00000000
+#define MC_CITF_CREDITS_ARB_RD__READ_PRI_MASK 0x00ff0000L
+#define MC_CITF_CREDITS_ARB_RD__READ_PRI__SHIFT 0x00000010
+#define MC_CITF_CREDITS_ARB_WR__HUB_PRI_MASK 0x00010000L
+#define MC_CITF_CREDITS_ARB_WR__HUB_PRI__SHIFT 0x00000010
+#define MC_CITF_CREDITS_ARB_WR__LCL_PRI_MASK 0x00020000L
+#define MC_CITF_CREDITS_ARB_WR__LCL_PRI__SHIFT 0x00000011
+#define MC_CITF_CREDITS_ARB_WR__WRITE_HUB_MASK 0x0000ff00L
+#define MC_CITF_CREDITS_ARB_WR__WRITE_HUB__SHIFT 0x00000008
+#define MC_CITF_CREDITS_ARB_WR__WRITE_LCL_MASK 0x000000ffL
+#define MC_CITF_CREDITS_ARB_WR__WRITE_LCL__SHIFT 0x00000000
+#define MC_CITF_CREDITS_VM__READ_ALL_MASK 0x0000003fL
+#define MC_CITF_CREDITS_VM__READ_ALL__SHIFT 0x00000000
+#define MC_CITF_CREDITS_VM__WRITE_ALL_MASK 0x00000fc0L
+#define MC_CITF_CREDITS_VM__WRITE_ALL__SHIFT 0x00000006
+#define MC_CITF_CREDITS_XBAR__READ_LCL_MASK 0x000000ffL
+#define MC_CITF_CREDITS_XBAR__READ_LCL__SHIFT 0x00000000
+#define MC_CITF_CREDITS_XBAR__WRITE_LCL_MASK 0x0000ff00L
+#define MC_CITF_CREDITS_XBAR__WRITE_LCL__SHIFT 0x00000008
+#define MC_CITF_DAGB_CNTL__CENTER_RD_MAX_BURST_MASK 0x0000001eL
+#define MC_CITF_DAGB_CNTL__CENTER_RD_MAX_BURST__SHIFT 0x00000001
+#define MC_CITF_DAGB_CNTL__CENTER_WR_MAX_BURST_MASK 0x000003c0L
+#define MC_CITF_DAGB_CNTL__CENTER_WR_MAX_BURST__SHIFT 0x00000006
+#define MC_CITF_DAGB_CNTL__DISABLE_SELF_INIT_MASK 0x00000020L
+#define MC_CITF_DAGB_CNTL__DISABLE_SELF_INIT__SHIFT 0x00000005
+#define MC_CITF_DAGB_CNTL__JUMP_AHEAD_MASK 0x00000001L
+#define MC_CITF_DAGB_CNTL__JUMP_AHEAD__SHIFT 0x00000000
+#define MC_CITF_DAGB_DLY__CLI_MASK 0x001f0000L
+#define MC_CITF_DAGB_DLY__CLI__SHIFT 0x00000010
+#define MC_CITF_DAGB_DLY__DLY_MASK 0x0000001fL
+#define MC_CITF_DAGB_DLY__DLY__SHIFT 0x00000000
+#define MC_CITF_DAGB_DLY__POS_MASK 0x1f000000L
+#define MC_CITF_DAGB_DLY__POS__SHIFT 0x00000018
+#define MC_CITF_INT_CREDITS__CNTR_RD_HUB_HP_MASK 0x00fc0000L
+#define MC_CITF_INT_CREDITS__CNTR_RD_HUB_HP__SHIFT 0x00000012
+#define MC_CITF_INT_CREDITS__CNTR_RD_HUB_LP_MASK 0x0003f000L
+#define MC_CITF_INT_CREDITS__CNTR_RD_HUB_LP__SHIFT 0x0000000c
+#define MC_CITF_INT_CREDITS__CNTR_RD_LCL_MASK 0x3f000000L
+#define MC_CITF_INT_CREDITS__CNTR_RD_LCL__SHIFT 0x00000018
+#define MC_CITF_INT_CREDITS__REMRDRET_MASK 0x0000003fL
+#define MC_CITF_INT_CREDITS__REMRDRET__SHIFT 0x00000000
+#define MC_CITF_INT_CREDITS_WR__CNTR_WR_HUB_MASK 0x0000003fL
+#define MC_CITF_INT_CREDITS_WR__CNTR_WR_HUB__SHIFT 0x00000000
+#define MC_CITF_INT_CREDITS_WR__CNTR_WR_LCL_MASK 0x00000fc0L
+#define MC_CITF_INT_CREDITS_WR__CNTR_WR_LCL__SHIFT 0x00000006
+#define MC_CITF_MISC_RD_CG__ENABLE_MASK 0x00040000L
+#define MC_CITF_MISC_RD_CG__ENABLE__SHIFT 0x00000012
+#define MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_CITF_MISC_RD_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_CITF_MISC_RD_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_CITF_MISC_RD_CG__OFFDLY__SHIFT 0x00000006
+#define MC_CITF_MISC_RD_CG__ONDLY_MASK 0x0000003fL
+#define MC_CITF_MISC_RD_CG__ONDLY__SHIFT 0x00000000
+#define MC_CITF_MISC_RD_CG__RDYDLY_MASK 0x0003f000L
+#define MC_CITF_MISC_RD_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_CITF_MISC_VM_CG__ENABLE_MASK 0x00040000L
+#define MC_CITF_MISC_VM_CG__ENABLE__SHIFT 0x00000012
+#define MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_CITF_MISC_VM_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_CITF_MISC_VM_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_CITF_MISC_VM_CG__OFFDLY__SHIFT 0x00000006
+#define MC_CITF_MISC_VM_CG__ONDLY_MASK 0x0000003fL
+#define MC_CITF_MISC_VM_CG__ONDLY__SHIFT 0x00000000
+#define MC_CITF_MISC_VM_CG__RDYDLY_MASK 0x0003f000L
+#define MC_CITF_MISC_VM_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_CITF_MISC_WR_CG__ENABLE_MASK 0x00040000L
+#define MC_CITF_MISC_WR_CG__ENABLE__SHIFT 0x00000012
+#define MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_CITF_MISC_WR_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_CITF_MISC_WR_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_CITF_MISC_WR_CG__OFFDLY__SHIFT 0x00000006
+#define MC_CITF_MISC_WR_CG__ONDLY_MASK 0x0000003fL
+#define MC_CITF_MISC_WR_CG__ONDLY__SHIFT 0x00000000
+#define MC_CITF_MISC_WR_CG__RDYDLY_MASK 0x0003f000L
+#define MC_CITF_MISC_WR_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_CITF_PERF_MON_CNTL2__CID_MASK 0x000001ffL
+#define MC_CITF_PERF_MON_CNTL2__CID__SHIFT 0x00000000
+#define MC_CITF_PERF_MON_RSLT2__CB_RD_BUSY_MASK 0x00000040L
+#define MC_CITF_PERF_MON_RSLT2__CB_RD_BUSY__SHIFT 0x00000006
+#define MC_CITF_PERF_MON_RSLT2__CB_WR_BUSY_MASK 0x00001000L
+#define MC_CITF_PERF_MON_RSLT2__CB_WR_BUSY__SHIFT 0x0000000c
+#define MC_CITF_PERF_MON_RSLT2__DB_RD_BUSY_MASK 0x00000080L
+#define MC_CITF_PERF_MON_RSLT2__DB_RD_BUSY__SHIFT 0x00000007
+#define MC_CITF_PERF_MON_RSLT2__DB_WR_BUSY_MASK 0x00002000L
+#define MC_CITF_PERF_MON_RSLT2__DB_WR_BUSY__SHIFT 0x0000000d
+#define MC_CITF_PERF_MON_RSLT2__SX_WR_BUSY_MASK 0x00004000L
+#define MC_CITF_PERF_MON_RSLT2__SX_WR_BUSY__SHIFT 0x0000000e
+#define MC_CITF_PERF_MON_RSLT2__TC0_RD_BUSY_MASK 0x00000100L
+#define MC_CITF_PERF_MON_RSLT2__TC0_RD_BUSY__SHIFT 0x00000008
+#define MC_CITF_PERF_MON_RSLT2__TC0_WR_BUSY_MASK 0x00010000L
+#define MC_CITF_PERF_MON_RSLT2__TC0_WR_BUSY__SHIFT 0x00000010
+#define MC_CITF_PERF_MON_RSLT2__TC1_RD_BUSY_MASK 0x00000400L
+#define MC_CITF_PERF_MON_RSLT2__TC1_RD_BUSY__SHIFT 0x0000000a
+#define MC_CITF_PERF_MON_RSLT2__TC1_WR_BUSY_MASK 0x00020000L
+#define MC_CITF_PERF_MON_RSLT2__TC1_WR_BUSY__SHIFT 0x00000011
+#define MC_CITF_PERF_MON_RSLT2__TC2_RD_BUSY_MASK 0x00008000L
+#define MC_CITF_PERF_MON_RSLT2__TC2_RD_BUSY__SHIFT 0x0000000f
+#define MC_CITF_PERF_MON_RSLT2__TC2_WR_BUSY_MASK 0x00040000L
+#define MC_CITF_PERF_MON_RSLT2__TC2_WR_BUSY__SHIFT 0x00000012
+#define MC_CITF_PERF_MON_RSLT2__VC0_RD_BUSY_MASK 0x00000200L
+#define MC_CITF_PERF_MON_RSLT2__VC0_RD_BUSY__SHIFT 0x00000009
+#define MC_CITF_PERF_MON_RSLT2__VC1_RD_BUSY_MASK 0x00000800L
+#define MC_CITF_PERF_MON_RSLT2__VC1_RD_BUSY__SHIFT 0x0000000b
+#define MC_CITF_REMREQ__CREDITS_ENABLE_MASK 0x00004000L
+#define MC_CITF_REMREQ__CREDITS_ENABLE__SHIFT 0x0000000e
+#define MC_CITF_REMREQ__READ_CREDITS_MASK 0x0000007fL
+#define MC_CITF_REMREQ__READ_CREDITS__SHIFT 0x00000000
+#define MC_CITF_REMREQ__WRITE_CREDITS_MASK 0x00003f80L
+#define MC_CITF_REMREQ__WRITE_CREDITS__SHIFT 0x00000007
+#define MC_CITF_RET_MODE__INORDER_RD_MASK 0x00000001L
+#define MC_CITF_RET_MODE__INORDER_RD__SHIFT 0x00000000
+#define MC_CITF_RET_MODE__INORDER_WR_MASK 0x00000002L
+#define MC_CITF_RET_MODE__INORDER_WR__SHIFT 0x00000001
+#define MC_CITF_RET_MODE__LCLPRI_RD_MASK 0x00000010L
+#define MC_CITF_RET_MODE__LCLPRI_RD__SHIFT 0x00000004
+#define MC_CITF_RET_MODE__LCLPRI_WR_MASK 0x00000020L
+#define MC_CITF_RET_MODE__LCLPRI_WR__SHIFT 0x00000005
+#define MC_CITF_RET_MODE__REMPRI_RD_MASK 0x00000004L
+#define MC_CITF_RET_MODE__REMPRI_RD__SHIFT 0x00000002
+#define MC_CITF_RET_MODE__REMPRI_WR_MASK 0x00000008L
+#define MC_CITF_RET_MODE__REMPRI_WR__SHIFT 0x00000003
+#define MC_CITF_WTM_RD_CNTL__DISABLE_REMOTE_MASK 0x01000000L
+#define MC_CITF_WTM_RD_CNTL__DISABLE_REMOTE__SHIFT 0x00000018
+#define MC_CITF_WTM_RD_CNTL__GROUP0_DECREMENT_MASK 0x00000007L
+#define MC_CITF_WTM_RD_CNTL__GROUP0_DECREMENT__SHIFT 0x00000000
+#define MC_CITF_WTM_RD_CNTL__GROUP1_DECREMENT_MASK 0x00000038L
+#define MC_CITF_WTM_RD_CNTL__GROUP1_DECREMENT__SHIFT 0x00000003
+#define MC_CITF_WTM_RD_CNTL__GROUP2_DECREMENT_MASK 0x000001c0L
+#define MC_CITF_WTM_RD_CNTL__GROUP2_DECREMENT__SHIFT 0x00000006
+#define MC_CITF_WTM_RD_CNTL__GROUP3_DECREMENT_MASK 0x00000e00L
+#define MC_CITF_WTM_RD_CNTL__GROUP3_DECREMENT__SHIFT 0x00000009
+#define MC_CITF_WTM_RD_CNTL__GROUP4_DECREMENT_MASK 0x00007000L
+#define MC_CITF_WTM_RD_CNTL__GROUP4_DECREMENT__SHIFT 0x0000000c
+#define MC_CITF_WTM_RD_CNTL__GROUP5_DECREMENT_MASK 0x00038000L
+#define MC_CITF_WTM_RD_CNTL__GROUP5_DECREMENT__SHIFT 0x0000000f
+#define MC_CITF_WTM_RD_CNTL__GROUP6_DECREMENT_MASK 0x001c0000L
+#define MC_CITF_WTM_RD_CNTL__GROUP6_DECREMENT__SHIFT 0x00000012
+#define MC_CITF_WTM_RD_CNTL__GROUP7_DECREMENT_MASK 0x00e00000L
+#define MC_CITF_WTM_RD_CNTL__GROUP7_DECREMENT__SHIFT 0x00000015
+#define MC_CITF_WTM_WR_CNTL__DISABLE_REMOTE_MASK 0x01000000L
+#define MC_CITF_WTM_WR_CNTL__DISABLE_REMOTE__SHIFT 0x00000018
+#define MC_CITF_WTM_WR_CNTL__GROUP0_DECREMENT_MASK 0x00000007L
+#define MC_CITF_WTM_WR_CNTL__GROUP0_DECREMENT__SHIFT 0x00000000
+#define MC_CITF_WTM_WR_CNTL__GROUP1_DECREMENT_MASK 0x00000038L
+#define MC_CITF_WTM_WR_CNTL__GROUP1_DECREMENT__SHIFT 0x00000003
+#define MC_CITF_WTM_WR_CNTL__GROUP2_DECREMENT_MASK 0x000001c0L
+#define MC_CITF_WTM_WR_CNTL__GROUP2_DECREMENT__SHIFT 0x00000006
+#define MC_CITF_WTM_WR_CNTL__GROUP3_DECREMENT_MASK 0x00000e00L
+#define MC_CITF_WTM_WR_CNTL__GROUP3_DECREMENT__SHIFT 0x00000009
+#define MC_CITF_WTM_WR_CNTL__GROUP4_DECREMENT_MASK 0x00007000L
+#define MC_CITF_WTM_WR_CNTL__GROUP4_DECREMENT__SHIFT 0x0000000c
+#define MC_CITF_WTM_WR_CNTL__GROUP5_DECREMENT_MASK 0x00038000L
+#define MC_CITF_WTM_WR_CNTL__GROUP5_DECREMENT__SHIFT 0x0000000f
+#define MC_CITF_WTM_WR_CNTL__GROUP6_DECREMENT_MASK 0x001c0000L
+#define MC_CITF_WTM_WR_CNTL__GROUP6_DECREMENT__SHIFT 0x00000012
+#define MC_CITF_WTM_WR_CNTL__GROUP7_DECREMENT_MASK 0x00e00000L
+#define MC_CITF_WTM_WR_CNTL__GROUP7_DECREMENT__SHIFT 0x00000015
+#define MC_CITF_XTRA_ENABLE__ARB_DBG_MASK 0x00000f00L
+#define MC_CITF_XTRA_ENABLE__ARB_DBG__SHIFT 0x00000008
+#define MC_CITF_XTRA_ENABLE__CB1_RD_MASK 0x00000001L
+#define MC_CITF_XTRA_ENABLE__CB1_RD__SHIFT 0x00000000
+#define MC_CITF_XTRA_ENABLE__CB1_WR_MASK 0x00000002L
+#define MC_CITF_XTRA_ENABLE__CB1_WR__SHIFT 0x00000001
+#define MC_CITF_XTRA_ENABLE__DB1_RD_MASK 0x00000004L
+#define MC_CITF_XTRA_ENABLE__DB1_RD__SHIFT 0x00000002
+#define MC_CITF_XTRA_ENABLE__DB1_WR_MASK 0x00000008L
+#define MC_CITF_XTRA_ENABLE__DB1_WR__SHIFT 0x00000003
+#define MC_CITF_XTRA_ENABLE__TC2_RD_MASK 0x00000010L
+#define MC_CITF_XTRA_ENABLE__TC2_RD__SHIFT 0x00000004
+#define MC_CITF_XTRA_ENABLE__TC2_WR_MASK 0x00001000L
+#define MC_CITF_XTRA_ENABLE__TC2_WR__SHIFT 0x0000000c
+#define MC_CONFIG__MCC_INDEX_MODE_ENABLE_MASK 0x80000000L
+#define MC_CONFIG__MCC_INDEX_MODE_ENABLE__SHIFT 0x0000001f
+#define MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK 0x00000001L
+#define MC_CONFIG_MCD__MCD0_WR_ENABLE__SHIFT 0x00000000
+#define MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK 0x00000002L
+#define MC_CONFIG_MCD__MCD1_WR_ENABLE__SHIFT 0x00000001
+#define MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK 0x00000004L
+#define MC_CONFIG_MCD__MCD2_WR_ENABLE__SHIFT 0x00000002
+#define MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK 0x00000008L
+#define MC_CONFIG_MCD__MCD3_WR_ENABLE__SHIFT 0x00000003
+#define MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK 0x00000010L
+#define MC_CONFIG_MCD__MCD4_WR_ENABLE__SHIFT 0x00000004
+#define MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK 0x00000020L
+#define MC_CONFIG_MCD__MCD5_WR_ENABLE__SHIFT 0x00000005
+#define MC_CONFIG_MCD__MCD_INDEX_MODE_ENABLE_MASK 0x80000000L
+#define MC_CONFIG_MCD__MCD_INDEX_MODE_ENABLE__SHIFT 0x0000001f
+#define MC_CONFIG_MCD__MC_RD_ENABLE_MASK 0x00000700L
+#define MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT 0x00000008
+#define MC_CONFIG__MCDW_WR_ENABLE_MASK 0x00000001L
+#define MC_CONFIG__MCDW_WR_ENABLE__SHIFT 0x00000000
+#define MC_CONFIG__MCDX_WR_ENABLE_MASK 0x00000002L
+#define MC_CONFIG__MCDX_WR_ENABLE__SHIFT 0x00000001
+#define MC_CONFIG__MCDY_WR_ENABLE_MASK 0x00000004L
+#define MC_CONFIG__MCDY_WR_ENABLE__SHIFT 0x00000002
+#define MC_CONFIG__MCDZ_WR_ENABLE_MASK 0x00000008L
+#define MC_CONFIG__MCDZ_WR_ENABLE__SHIFT 0x00000003
+#define MC_CONFIG__MC_RD_ENABLE_MASK 0x00000030L
+#define MC_CONFIG__MC_RD_ENABLE__SHIFT 0x00000004
+#define MC_HUB_MISC_DBG__SELECT0_MASK 0x0000000fL
+#define MC_HUB_MISC_DBG__SELECT0__SHIFT 0x00000000
+#define MC_HUB_MISC_DBG__SELECT1_MASK 0x000000f0L
+#define MC_HUB_MISC_DBG__SELECT1__SHIFT 0x00000004
+#define MC_HUB_MISC_FRAMING__BITS_MASK 0xffffffffL
+#define MC_HUB_MISC_FRAMING__BITS__SHIFT 0x00000000
+#define MC_HUB_MISC_HUB_CG__ENABLE_MASK 0x00040000L
+#define MC_HUB_MISC_HUB_CG__ENABLE__SHIFT 0x00000012
+#define MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_HUB_MISC_HUB_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_HUB_MISC_HUB_CG__OFFDLY__SHIFT 0x00000006
+#define MC_HUB_MISC_HUB_CG__ONDLY_MASK 0x0000003fL
+#define MC_HUB_MISC_HUB_CG__ONDLY__SHIFT 0x00000000
+#define MC_HUB_MISC_HUB_CG__RDYDLY_MASK 0x0003f000L
+#define MC_HUB_MISC_HUB_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_CP_READ_MASK 0x00000001L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_CP_READ__SHIFT 0x00000000
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_CP_WRITE_MASK 0x00000002L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_CP_WRITE__SHIFT 0x00000001
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_DISP_READ_MASK 0x00000400L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_DISP_READ__SHIFT 0x0000000a
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_DISP_WRITE_MASK 0x00000800L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_DISP_WRITE__SHIFT 0x0000000b
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_GFX_READ_MASK 0x00000004L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_GFX_READ__SHIFT 0x00000002
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_GFX_WRITE_MASK 0x00000008L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_GFX_WRITE__SHIFT 0x00000003
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_HDP_READ_MASK 0x00010000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_HDP_READ__SHIFT 0x00000010
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_HDP_WRITE_MASK 0x00020000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_HDP_WRITE__SHIFT 0x00000011
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_OTH_READ_MASK 0x00040000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_OTH_READ__SHIFT 0x00000012
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_OTH_WRITE_MASK 0x00080000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_OTH_WRITE__SHIFT 0x00000013
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_RLC_READ_MASK 0x00000040L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_RLC_READ__SHIFT 0x00000006
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_RLC_WRITE_MASK 0x00000080L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_RLC_WRITE__SHIFT 0x00000007
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_SMU_READ_MASK 0x00004000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_SMU_READ__SHIFT 0x0000000e
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_SMU_WRITE_MASK 0x00008000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_SMU_WRITE__SHIFT 0x0000000f
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_UVD_READ_MASK 0x00001000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_UVD_READ__SHIFT 0x0000000c
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_UVD_WRITE_MASK 0x00002000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_UVD_WRITE__SHIFT 0x0000000d
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VCE_READ_MASK 0x01000000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VCE_READ__SHIFT 0x00000018
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VCE_WRITE_MASK 0x02000000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VCE_WRITE__SHIFT 0x00000019
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VMC_READ_MASK 0x00100000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VMC_READ__SHIFT 0x00000014
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VMC_WRITE_MASK 0x00200000L
+#define MC_HUB_MISC_IDLE_STATUS__OUTSTANDING_VMC_WRITE__SHIFT 0x00000015
+#define MC_HUB_MISC_OVERRIDE__IDLE_MASK 0x00000003L
+#define MC_HUB_MISC_OVERRIDE__IDLE__SHIFT 0x00000000
+#define MC_HUB_MISC_POWER__PM_BLACKOUT_CNTL_MASK 0x00000018L
+#define MC_HUB_MISC_POWER__PM_BLACKOUT_CNTL__SHIFT 0x00000003
+#define MC_HUB_MISC_POWER__SRBM_GATE_OVERRIDE_MASK 0x00000004L
+#define MC_HUB_MISC_POWER__SRBM_GATE_OVERRIDE__SHIFT 0x00000002
+#define MC_HUB_MISC_SIP_CG__ENABLE_MASK 0x00040000L
+#define MC_HUB_MISC_SIP_CG__ENABLE__SHIFT 0x00000012
+#define MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_HUB_MISC_SIP_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_HUB_MISC_SIP_CG__OFFDLY__SHIFT 0x00000006
+#define MC_HUB_MISC_SIP_CG__ONDLY_MASK 0x0000003fL
+#define MC_HUB_MISC_SIP_CG__ONDLY__SHIFT 0x00000000
+#define MC_HUB_MISC_SIP_CG__RDYDLY_MASK 0x0003f000L
+#define MC_HUB_MISC_SIP_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_HUB_MISC_STATUS__GFX_BUSY_MASK 0x00002000L
+#define MC_HUB_MISC_STATUS__GFX_BUSY__SHIFT 0x0000000d
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_RDREQ_MASK 0x00000004L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_RDREQ__SHIFT 0x00000002
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_RDRET_MASK 0x00000008L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_RDRET__SHIFT 0x00000003
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_WRREQ_MASK 0x00000010L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_WRREQ__SHIFT 0x00000004
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_WRRET_MASK 0x00000020L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_HUB_WRRET__SHIFT 0x00000005
+#define MC_HUB_MISC_STATUS__OUTSTANDING_MCD_READ_MASK 0x00000100L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_MCD_READ__SHIFT 0x00000008
+#define MC_HUB_MISC_STATUS__OUTSTANDING_MCD_WRITE_MASK 0x00000200L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_MCD_WRITE__SHIFT 0x00000009
+#define MC_HUB_MISC_STATUS__OUTSTANDING_READ_MASK 0x00000001L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_READ__SHIFT 0x00000000
+#define MC_HUB_MISC_STATUS__OUTSTANDING_RPB_READ_MASK 0x00000040L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_RPB_READ__SHIFT 0x00000006
+#define MC_HUB_MISC_STATUS__OUTSTANDING_RPB_WRITE_MASK 0x00000080L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_RPB_WRITE__SHIFT 0x00000007
+#define MC_HUB_MISC_STATUS__OUTSTANDING_WRITE_MASK 0x00000002L
+#define MC_HUB_MISC_STATUS__OUTSTANDING_WRITE__SHIFT 0x00000001
+#define MC_HUB_MISC_STATUS__READ_DEADLOCK_WARNING_MASK 0x00001000L
+#define MC_HUB_MISC_STATUS__READ_DEADLOCK_WARNING__SHIFT 0x0000000c
+#define MC_HUB_MISC_STATUS__RPB_BUSY_MASK 0x00000400L
+#define MC_HUB_MISC_STATUS__RPB_BUSY__SHIFT 0x0000000a
+#define MC_HUB_MISC_STATUS__WRITE_DEADLOCK_WARNING_MASK 0x00000800L
+#define MC_HUB_MISC_STATUS__WRITE_DEADLOCK_WARNING__SHIFT 0x0000000b
+#define MC_HUB_MISC_VM_CG__ENABLE_MASK 0x00040000L
+#define MC_HUB_MISC_VM_CG__ENABLE__SHIFT 0x00000012
+#define MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_HUB_MISC_VM_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_HUB_MISC_VM_CG__OFFDLY_MASK 0x00000fc0L
+#define MC_HUB_MISC_VM_CG__OFFDLY__SHIFT 0x00000006
+#define MC_HUB_MISC_VM_CG__ONDLY_MASK 0x0000003fL
+#define MC_HUB_MISC_VM_CG__ONDLY__SHIFT 0x00000000
+#define MC_HUB_MISC_VM_CG__RDYDLY_MASK 0x0003f000L
+#define MC_HUB_MISC_VM_CG__RDYDLY__SHIFT 0x0000000c
+#define MC_HUB_RDREQ_CNTL__BREAK_HDP_DEADLOCK_MASK 0x00000200L
+#define MC_HUB_RDREQ_CNTL__BREAK_HDP_DEADLOCK__SHIFT 0x00000009
+#define MC_HUB_RDREQ_CNTL__DEBUG_REG_MASK 0x0001fc00L
+#define MC_HUB_RDREQ_CNTL__DEBUG_REG__SHIFT 0x0000000a
+#define MC_HUB_RDREQ_CNTL__DISABLE_SELF_INIT_GBL0_MASK 0x00020000L
+#define MC_HUB_RDREQ_CNTL__DISABLE_SELF_INIT_GBL0__SHIFT 0x00000011
+#define MC_HUB_RDREQ_CNTL__DISABLE_SELF_INIT_GBL1_MASK 0x00040000L
+#define MC_HUB_RDREQ_CNTL__DISABLE_SELF_INIT_GBL1__SHIFT 0x00000012
+#define MC_HUB_RDREQ_CNTL__JUMPAHEAD_GBL0_MASK 0x00000004L
+#define MC_HUB_RDREQ_CNTL__JUMPAHEAD_GBL0__SHIFT 0x00000002
+#define MC_HUB_RDREQ_CNTL__JUMPAHEAD_GBL1_MASK 0x00000008L
+#define MC_HUB_RDREQ_CNTL__JUMPAHEAD_GBL1__SHIFT 0x00000003
+#define MC_HUB_RDREQ_CNTL__MCDW_STALL_MODE_MASK 0x00000020L
+#define MC_HUB_RDREQ_CNTL__MCDW_STALL_MODE__SHIFT 0x00000005
+#define MC_HUB_RDREQ_CNTL__MCDX_STALL_MODE_MASK 0x00000040L
+#define MC_HUB_RDREQ_CNTL__MCDX_STALL_MODE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_CNTL__MCDY_STALL_MODE_MASK 0x00000080L
+#define MC_HUB_RDREQ_CNTL__MCDY_STALL_MODE__SHIFT 0x00000007
+#define MC_HUB_RDREQ_CNTL__MCDZ_STALL_MODE_MASK 0x00000100L
+#define MC_HUB_RDREQ_CNTL__MCDZ_STALL_MODE__SHIFT 0x00000008
+#define MC_HUB_RDREQ_CNTL__OVERRIDE_STALL_ENABLE_MASK 0x00000010L
+#define MC_HUB_RDREQ_CNTL__OVERRIDE_STALL_ENABLE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_CNTL__PWRXPRESS_MODE_MASK 0x00080000L
+#define MC_HUB_RDREQ_CNTL__PWRXPRESS_MODE__SHIFT 0x00000013
+#define MC_HUB_RDREQ_CNTL__REMOTE_BLACKOUT_MASK 0x00000001L
+#define MC_HUB_RDREQ_CNTL__REMOTE_BLACKOUT__SHIFT 0x00000000
+#define MC_HUB_RDREQ_CREDITS2__STOR1_PRI_MASK 0x000000ffL
+#define MC_HUB_RDREQ_CREDITS2__STOR1_PRI__SHIFT 0x00000000
+#define MC_HUB_RDREQ_CREDITS__STOR0_MASK 0x00ff0000L
+#define MC_HUB_RDREQ_CREDITS__STOR0__SHIFT 0x00000010
+#define MC_HUB_RDREQ_CREDITS__STOR1_MASK 0xff000000L
+#define MC_HUB_RDREQ_CREDITS__STOR1__SHIFT 0x00000018
+#define MC_HUB_RDREQ_CREDITS__VM0_MASK 0x000000ffL
+#define MC_HUB_RDREQ_CREDITS__VM0__SHIFT 0x00000000
+#define MC_HUB_RDREQ_CREDITS__VM1_MASK 0x0000ff00L
+#define MC_HUB_RDREQ_CREDITS__VM1__SHIFT 0x00000008
+#define MC_HUB_RDREQ_DMIF__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_DMIF__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_DMIF__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_DMIF__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_DMIF__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_DMIF__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK 0x00000003L
+#define MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_DMIF_LIMIT__LIMIT_COUNT_MASK 0x0000007cL
+#define MC_HUB_RDREQ_DMIF_LIMIT__LIMIT_COUNT__SHIFT 0x00000002
+#define MC_HUB_RDREQ_DMIF__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_DMIF__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_DMIF__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_DMIF__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_DMIF__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_DMIF__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_DMIF__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_DMIF__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_DMIF__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_DMIF__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_GBL0__STALL_THRESHOLD_MASK 0x000000ffL
+#define MC_HUB_RDREQ_GBL0__STALL_THRESHOLD__SHIFT 0x00000000
+#define MC_HUB_RDREQ_GBL1__STALL_THRESHOLD_MASK 0x000000ffL
+#define MC_HUB_RDREQ_GBL1__STALL_THRESHOLD__SHIFT 0x00000000
+#define MC_HUB_RDREQ_HDP__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_HDP__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_HDP__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_HDP__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_HDP__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_HDP__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_HDP__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_HDP__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_HDP__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_HDP__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_HDP__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_HDP__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_HDP__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_HDP__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_HDP__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_HDP__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_MCDW__ASK_CREDITS_MASK 0x0003f800L
+#define MC_HUB_RDREQ_MCDW__ASK_CREDITS__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCDW__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_RDREQ_MCDW__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCDW__BUS_MASK 0x00000004L
+#define MC_HUB_RDREQ_MCDW__BUS__SHIFT 0x00000002
+#define MC_HUB_RDREQ_MCDW__DISPLAY_CREDITS_MASK 0x01fc0000L
+#define MC_HUB_RDREQ_MCDW__DISPLAY_CREDITS__SHIFT 0x00000012
+#define MC_HUB_RDREQ_MCDW__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCDW__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCDW__LAZY_TIMER_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCDW__LAZY_TIMER__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCDW__MAXBURST_MASK 0x00000078L
+#define MC_HUB_RDREQ_MCDW__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCDW__STALL_THRESHOLD_MASK 0xfe000000L
+#define MC_HUB_RDREQ_MCDW__STALL_THRESHOLD__SHIFT 0x00000019
+#define MC_HUB_RDREQ_MCDX__ASK_CREDITS_MASK 0x0003f800L
+#define MC_HUB_RDREQ_MCDX__ASK_CREDITS__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCDX__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_RDREQ_MCDX__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCDX__BUS_MASK 0x00000004L
+#define MC_HUB_RDREQ_MCDX__BUS__SHIFT 0x00000002
+#define MC_HUB_RDREQ_MCDX__DISPLAY_CREDITS_MASK 0x01fc0000L
+#define MC_HUB_RDREQ_MCDX__DISPLAY_CREDITS__SHIFT 0x00000012
+#define MC_HUB_RDREQ_MCDX__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCDX__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCDX__LAZY_TIMER_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCDX__LAZY_TIMER__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCDX__MAXBURST_MASK 0x00000078L
+#define MC_HUB_RDREQ_MCDX__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCDX__STALL_THRESHOLD_MASK 0xfe000000L
+#define MC_HUB_RDREQ_MCDX__STALL_THRESHOLD__SHIFT 0x00000019
+#define MC_HUB_RDREQ_MCDY__ASK_CREDITS_MASK 0x0003f800L
+#define MC_HUB_RDREQ_MCDY__ASK_CREDITS__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCDY__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_RDREQ_MCDY__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCDY__BUS_MASK 0x00000004L
+#define MC_HUB_RDREQ_MCDY__BUS__SHIFT 0x00000002
+#define MC_HUB_RDREQ_MCDY__DISPLAY_CREDITS_MASK 0x01fc0000L
+#define MC_HUB_RDREQ_MCDY__DISPLAY_CREDITS__SHIFT 0x00000012
+#define MC_HUB_RDREQ_MCDY__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCDY__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCDY__LAZY_TIMER_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCDY__LAZY_TIMER__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCDY__MAXBURST_MASK 0x00000078L
+#define MC_HUB_RDREQ_MCDY__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCDY__STALL_THRESHOLD_MASK 0xfe000000L
+#define MC_HUB_RDREQ_MCDY__STALL_THRESHOLD__SHIFT 0x00000019
+#define MC_HUB_RDREQ_MCDZ__ASK_CREDITS_MASK 0x0003f800L
+#define MC_HUB_RDREQ_MCDZ__ASK_CREDITS__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCDZ__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_RDREQ_MCDZ__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCDZ__BUS_MASK 0x00000004L
+#define MC_HUB_RDREQ_MCDZ__BUS__SHIFT 0x00000002
+#define MC_HUB_RDREQ_MCDZ__DISPLAY_CREDITS_MASK 0x01fc0000L
+#define MC_HUB_RDREQ_MCDZ__DISPLAY_CREDITS__SHIFT 0x00000012
+#define MC_HUB_RDREQ_MCDZ__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCDZ__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCDZ__LAZY_TIMER_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCDZ__LAZY_TIMER__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCDZ__MAXBURST_MASK 0x00000078L
+#define MC_HUB_RDREQ_MCDZ__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCDZ__STALL_THRESHOLD_MASK 0xfe000000L
+#define MC_HUB_RDREQ_MCDZ__STALL_THRESHOLD__SHIFT 0x00000019
+#define MC_HUB_RDREQ_MCIF__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_MCIF__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_MCIF__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_MCIF__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_MCIF__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_MCIF__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_MCIF__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_MCIF__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_MCIF__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_MCIF__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_MCIF__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_MCIF__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_MCIF__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_MCIF__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_MCIF__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_MCIF__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_RLC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_RLC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_RLC__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_RLC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_RLC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_RLC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_RLC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_RLC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_RLC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_RLC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_RLC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_RLC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_RLC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_RLC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_RLC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_RLC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_SEM__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_SEM__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_SEM__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_SEM__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_SEM__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_SEM__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_SEM__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_SEM__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_SEM__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_SEM__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_SEM__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_SEM__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_SEM__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_SEM__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_SEM__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_SEM__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_SIP__ASK_CREDITS_MASK 0x0000007fL
+#define MC_HUB_RDREQ_SIP__ASK_CREDITS__SHIFT 0x00000000
+#define MC_HUB_RDREQ_SIP__DISPLAY_CREDITS_MASK 0x00007f00L
+#define MC_HUB_RDREQ_SIP__DISPLAY_CREDITS__SHIFT 0x00000008
+#define MC_HUB_RDREQ_SIP__DUMMY_MASK 0x00000080L
+#define MC_HUB_RDREQ_SIP__DUMMY__SHIFT 0x00000007
+#define MC_HUB_RDREQ_SMU__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_SMU__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_SMU__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_SMU__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_SMU__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_SMU__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_SMU__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_SMU__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_SMU__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_SMU__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_SMU__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_SMU__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_SMU__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_SMU__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_SMU__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_SMU__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_STATUS__GBL0_BYPASS_STOR_FULL_MASK 0x00000080L
+#define MC_HUB_RDREQ_STATUS__GBL0_BYPASS_STOR_FULL__SHIFT 0x00000007
+#define MC_HUB_RDREQ_STATUS__GBL0_STOR_FULL_MASK 0x00000040L
+#define MC_HUB_RDREQ_STATUS__GBL0_STOR_FULL__SHIFT 0x00000006
+#define MC_HUB_RDREQ_STATUS__GBL0_VM_FULL_MASK 0x00000020L
+#define MC_HUB_RDREQ_STATUS__GBL0_VM_FULL__SHIFT 0x00000005
+#define MC_HUB_RDREQ_STATUS__GBL1_BYPASS_STOR_FULL_MASK 0x00000400L
+#define MC_HUB_RDREQ_STATUS__GBL1_BYPASS_STOR_FULL__SHIFT 0x0000000a
+#define MC_HUB_RDREQ_STATUS__GBL1_STOR_FULL_MASK 0x00000200L
+#define MC_HUB_RDREQ_STATUS__GBL1_STOR_FULL__SHIFT 0x00000009
+#define MC_HUB_RDREQ_STATUS__GBL1_VM_FULL_MASK 0x00000100L
+#define MC_HUB_RDREQ_STATUS__GBL1_VM_FULL__SHIFT 0x00000008
+#define MC_HUB_RDREQ_STATUS__MCDW_RD_AVAIL_MASK 0x00000002L
+#define MC_HUB_RDREQ_STATUS__MCDW_RD_AVAIL__SHIFT 0x00000001
+#define MC_HUB_RDREQ_STATUS__MCDX_RD_AVAIL_MASK 0x00000004L
+#define MC_HUB_RDREQ_STATUS__MCDX_RD_AVAIL__SHIFT 0x00000002
+#define MC_HUB_RDREQ_STATUS__MCDY_RD_AVAIL_MASK 0x00000008L
+#define MC_HUB_RDREQ_STATUS__MCDY_RD_AVAIL__SHIFT 0x00000003
+#define MC_HUB_RDREQ_STATUS__MCDZ_RD_AVAIL_MASK 0x00000010L
+#define MC_HUB_RDREQ_STATUS__MCDZ_RD_AVAIL__SHIFT 0x00000004
+#define MC_HUB_RDREQ_STATUS__PWRXPRESS_ERR_MASK 0x00000800L
+#define MC_HUB_RDREQ_STATUS__PWRXPRESS_ERR__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_STATUS__SIP_AVAIL_MASK 0x00000001L
+#define MC_HUB_RDREQ_STATUS__SIP_AVAIL__SHIFT 0x00000000
+#define MC_HUB_RDREQ_UMC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_UMC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_UMC__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_UMC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_UMC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_UMC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_UMC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_UMC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_UMC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_UMC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_UMC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_UMC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_UMC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_UMC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_UMC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_UMC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_UVD__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_UVD__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_UVD__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_UVD__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_UVD__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_UVD__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_UVD__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_UVD__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_UVD__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_UVD__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_UVD__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_UVD__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_UVD__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_UVD__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_UVD__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_UVD__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_UVD__VM_BYPASS_MASK 0x00010000L
+#define MC_HUB_RDREQ_UVD__VM_BYPASS__SHIFT 0x00000010
+#define MC_HUB_RDREQ_VCE__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_VCE__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_VCE__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_VCE__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_VCE__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_VCE__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_VCE__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_VCE__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_VCE__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_VCE__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_VCE__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_VCE__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_VCE__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_VCE__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_VCE__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_VCE__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_VCEU__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_VCEU__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_VCEU__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_VCEU__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_VCEU__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_VCEU__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_VCEU__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_VCEU__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_VCEU__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_VCEU__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_VCEU__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_VCEU__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_VCEU__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_VCEU__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_VCEU__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_VCEU__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_VMC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_VMC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_VMC__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_VMC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_VMC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_VMC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_VMC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_VMC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_VMC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_VMC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_VMC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_VMC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_VMC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_VMC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_VMC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_VMC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP0_DECREMENT_MASK 0x00000007L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP0_DECREMENT__SHIFT 0x00000000
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP1_DECREMENT_MASK 0x00000038L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP1_DECREMENT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP2_DECREMENT_MASK 0x000001c0L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP2_DECREMENT__SHIFT 0x00000006
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP3_DECREMENT_MASK 0x00000e00L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP3_DECREMENT__SHIFT 0x00000009
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP4_DECREMENT_MASK 0x00007000L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP4_DECREMENT__SHIFT 0x0000000c
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP5_DECREMENT_MASK 0x00038000L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP5_DECREMENT__SHIFT 0x0000000f
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP6_DECREMENT_MASK 0x001c0000L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP6_DECREMENT__SHIFT 0x00000012
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP7_DECREMENT_MASK 0x00e00000L
+#define MC_HUB_RDREQ_WTM_CNTL__GROUP7_DECREMENT__SHIFT 0x00000015
+#define MC_HUB_RDREQ_XDMAM__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_RDREQ_XDMAM__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_RDREQ_XDMAM__ENABLE_MASK 0x00000001L
+#define MC_HUB_RDREQ_XDMAM__ENABLE__SHIFT 0x00000000
+#define MC_HUB_RDREQ_XDMAM__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_RDREQ_XDMAM__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_RDREQ_XDMAM__MAXBURST_MASK 0x00000780L
+#define MC_HUB_RDREQ_XDMAM__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_RDREQ_XDMAM__PRESCALE_MASK 0x00000006L
+#define MC_HUB_RDREQ_XDMAM__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_RDREQ_XDMAM__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_RDREQ_XDMAM__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_RDREQ_XDMAM__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_RDREQ_XDMAM__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_RDREQ_XDMAM__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_RDREQ_XDMAM__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_SHARED_DAGB_DLY__CLI_MASK 0x001f0000L
+#define MC_HUB_SHARED_DAGB_DLY__CLI__SHIFT 0x00000010
+#define MC_HUB_SHARED_DAGB_DLY__DLY_MASK 0x0000003fL
+#define MC_HUB_SHARED_DAGB_DLY__DLY__SHIFT 0x00000000
+#define MC_HUB_SHARED_DAGB_DLY__POS_MASK 0x1f000000L
+#define MC_HUB_SHARED_DAGB_DLY__POS__SHIFT 0x00000018
+#define MC_HUB_WDP_BP__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_BP__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_BP__RDRET_MASK 0x0003fffeL
+#define MC_HUB_WDP_BP__RDRET__SHIFT 0x00000001
+#define MC_HUB_WDP_BP__WRREQ_MASK 0x3ffc0000L
+#define MC_HUB_WDP_BP__WRREQ__SHIFT 0x00000012
+#define MC_HUB_WDP_CNTL__DEBUG_REG_MASK 0x00001fe0L
+#define MC_HUB_WDP_CNTL__DEBUG_REG__SHIFT 0x00000005
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_GBL0_MASK 0x00002000L
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_GBL0__SHIFT 0x0000000d
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_GBL1_MASK 0x00004000L
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_GBL1__SHIFT 0x0000000e
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_INTERNAL_MASK 0x00008000L
+#define MC_HUB_WDP_CNTL__DISABLE_SELF_INIT_INTERNAL__SHIFT 0x0000000f
+#define MC_HUB_WDP_CNTL__DISP_WAIT_EOP_MASK 0x00040000L
+#define MC_HUB_WDP_CNTL__DISP_WAIT_EOP__SHIFT 0x00000012
+#define MC_HUB_WDP_CNTL__FAIR_CH_SW_MASK 0x00010000L
+#define MC_HUB_WDP_CNTL__FAIR_CH_SW__SHIFT 0x00000010
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_GBL0_MASK 0x00000002L
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_GBL0__SHIFT 0x00000001
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_GBL1_MASK 0x00000004L
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_GBL1__SHIFT 0x00000002
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_INTERNAL_MASK 0x00000008L
+#define MC_HUB_WDP_CNTL__JUMPAHEAD_INTERNAL__SHIFT 0x00000003
+#define MC_HUB_WDP_CNTL__LCLWRREQ_BYPASS_MASK 0x00020000L
+#define MC_HUB_WDP_CNTL__LCLWRREQ_BYPASS__SHIFT 0x00000011
+#define MC_HUB_WDP_CNTL__MCD_WAIT_EOP_MASK 0x00080000L
+#define MC_HUB_WDP_CNTL__MCD_WAIT_EOP__SHIFT 0x00000013
+#define MC_HUB_WDP_CNTL__OVERRIDE_STALL_ENABLE_MASK 0x00000010L
+#define MC_HUB_WDP_CNTL__OVERRIDE_STALL_ENABLE__SHIFT 0x00000004
+#define MC_HUB_WDP_CNTL__SIP_WAIT_EOP_MASK 0x00100000L
+#define MC_HUB_WDP_CNTL__SIP_WAIT_EOP__SHIFT 0x00000014
+#define MC_HUB_WDP_CREDITS__STOR0_MASK 0x00ff0000L
+#define MC_HUB_WDP_CREDITS__STOR0__SHIFT 0x00000010
+#define MC_HUB_WDP_CREDITS__STOR1_MASK 0xff000000L
+#define MC_HUB_WDP_CREDITS__STOR1__SHIFT 0x00000018
+#define MC_HUB_WDP_CREDITS__VM0_MASK 0x000000ffL
+#define MC_HUB_WDP_CREDITS__VM0__SHIFT 0x00000000
+#define MC_HUB_WDP_CREDITS__VM1_MASK 0x0000ff00L
+#define MC_HUB_WDP_CREDITS__VM1__SHIFT 0x00000008
+#define MC_HUB_WDP_ERR__MGPU1_TARG_SYS_MASK 0x00000001L
+#define MC_HUB_WDP_ERR__MGPU1_TARG_SYS__SHIFT 0x00000000
+#define MC_HUB_WDP_ERR__MGPU2_TARG_SYS_MASK 0x00000002L
+#define MC_HUB_WDP_ERR__MGPU2_TARG_SYS__SHIFT 0x00000001
+#define MC_HUB_WDP_GBL0__LAZY_TIMER_MASK 0x000000f0L
+#define MC_HUB_WDP_GBL0__LAZY_TIMER__SHIFT 0x00000004
+#define MC_HUB_WDP_GBL0__MAXBURST_MASK 0x0000000fL
+#define MC_HUB_WDP_GBL0__MAXBURST__SHIFT 0x00000000
+#define MC_HUB_WDP_GBL0__STALL_MODE_MASK 0x00010000L
+#define MC_HUB_WDP_GBL0__STALL_MODE__SHIFT 0x00000010
+#define MC_HUB_WDP_GBL0__STALL_THRESHOLD_MASK 0x0000ff00L
+#define MC_HUB_WDP_GBL0__STALL_THRESHOLD__SHIFT 0x00000008
+#define MC_HUB_WDP_GBL1__LAZY_TIMER_MASK 0x000000f0L
+#define MC_HUB_WDP_GBL1__LAZY_TIMER__SHIFT 0x00000004
+#define MC_HUB_WDP_GBL1__MAXBURST_MASK 0x0000000fL
+#define MC_HUB_WDP_GBL1__MAXBURST__SHIFT 0x00000000
+#define MC_HUB_WDP_GBL1__STALL_MODE_MASK 0x00010000L
+#define MC_HUB_WDP_GBL1__STALL_MODE__SHIFT 0x00000010
+#define MC_HUB_WDP_GBL1__STALL_THRESHOLD_MASK 0x0000ff00L
+#define MC_HUB_WDP_GBL1__STALL_THRESHOLD__SHIFT 0x00000008
+#define MC_HUB_WDP_HDP__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_HDP__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_HDP__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_HDP__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_HDP__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_HDP__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_HDP__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_HDP__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_HDP__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_HDP__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_HDP__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_HDP__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_HDP__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_HDP__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_HDP__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_HDP__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_IH__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_IH__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_IH__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_IH__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_IH__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_IH__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_IH__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_IH__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_IH__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_IH__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_IH__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_IH__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_IH__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_IH__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_IH__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_IH__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_MCDW__ASK_CREDITS_MASK 0x00001f80L
+#define MC_HUB_WDP_MCDW__ASK_CREDITS__SHIFT 0x00000007
+#define MC_HUB_WDP_MCDW__ASK_CREDITS_W_MASK 0x7f000000L
+#define MC_HUB_WDP_MCDW__ASK_CREDITS_W__SHIFT 0x00000018
+#define MC_HUB_WDP_MCDW__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_WDP_MCDW__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_WDP_MCDW__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCDW__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCDW__LAZY_TIMER_MASK 0x0001e000L
+#define MC_HUB_WDP_MCDW__LAZY_TIMER__SHIFT 0x0000000d
+#define MC_HUB_WDP_MCDW__MAXBURST_MASK 0x00000078L
+#define MC_HUB_WDP_MCDW__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_WDP_MCDW__STALL_MODE_MASK 0x00000004L
+#define MC_HUB_WDP_MCDW__STALL_MODE__SHIFT 0x00000002
+#define MC_HUB_WDP_MCDW__STALL_THRESHOLD_MASK 0x00fe0000L
+#define MC_HUB_WDP_MCDW__STALL_THRESHOLD__SHIFT 0x00000011
+#define MC_HUB_WDP_MCDX__ASK_CREDITS_MASK 0x00001f80L
+#define MC_HUB_WDP_MCDX__ASK_CREDITS__SHIFT 0x00000007
+#define MC_HUB_WDP_MCDX__ASK_CREDITS_W_MASK 0x7f000000L
+#define MC_HUB_WDP_MCDX__ASK_CREDITS_W__SHIFT 0x00000018
+#define MC_HUB_WDP_MCDX__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_WDP_MCDX__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_WDP_MCDX__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCDX__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCDX__LAZY_TIMER_MASK 0x0001e000L
+#define MC_HUB_WDP_MCDX__LAZY_TIMER__SHIFT 0x0000000d
+#define MC_HUB_WDP_MCDX__MAXBURST_MASK 0x00000078L
+#define MC_HUB_WDP_MCDX__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_WDP_MCDX__STALL_MODE_MASK 0x00000004L
+#define MC_HUB_WDP_MCDX__STALL_MODE__SHIFT 0x00000002
+#define MC_HUB_WDP_MCDX__STALL_THRESHOLD_MASK 0x00fe0000L
+#define MC_HUB_WDP_MCDX__STALL_THRESHOLD__SHIFT 0x00000011
+#define MC_HUB_WDP_MCDY__ASK_CREDITS_MASK 0x00001f80L
+#define MC_HUB_WDP_MCDY__ASK_CREDITS__SHIFT 0x00000007
+#define MC_HUB_WDP_MCDY__ASK_CREDITS_W_MASK 0x7f000000L
+#define MC_HUB_WDP_MCDY__ASK_CREDITS_W__SHIFT 0x00000018
+#define MC_HUB_WDP_MCDY__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_WDP_MCDY__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_WDP_MCDY__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCDY__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCDY__LAZY_TIMER_MASK 0x0001e000L
+#define MC_HUB_WDP_MCDY__LAZY_TIMER__SHIFT 0x0000000d
+#define MC_HUB_WDP_MCDY__MAXBURST_MASK 0x00000078L
+#define MC_HUB_WDP_MCDY__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_WDP_MCDY__STALL_MODE_MASK 0x00000004L
+#define MC_HUB_WDP_MCDY__STALL_MODE__SHIFT 0x00000002
+#define MC_HUB_WDP_MCDY__STALL_THRESHOLD_MASK 0x00fe0000L
+#define MC_HUB_WDP_MCDY__STALL_THRESHOLD__SHIFT 0x00000011
+#define MC_HUB_WDP_MCDZ__ASK_CREDITS_MASK 0x00001f80L
+#define MC_HUB_WDP_MCDZ__ASK_CREDITS__SHIFT 0x00000007
+#define MC_HUB_WDP_MCDZ__ASK_CREDITS_W_MASK 0x7f000000L
+#define MC_HUB_WDP_MCDZ__ASK_CREDITS_W__SHIFT 0x00000018
+#define MC_HUB_WDP_MCDZ__BLACKOUT_EXEMPT_MASK 0x00000002L
+#define MC_HUB_WDP_MCDZ__BLACKOUT_EXEMPT__SHIFT 0x00000001
+#define MC_HUB_WDP_MCDZ__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCDZ__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCDZ__LAZY_TIMER_MASK 0x0001e000L
+#define MC_HUB_WDP_MCDZ__LAZY_TIMER__SHIFT 0x0000000d
+#define MC_HUB_WDP_MCDZ__MAXBURST_MASK 0x00000078L
+#define MC_HUB_WDP_MCDZ__MAXBURST__SHIFT 0x00000003
+#define MC_HUB_WDP_MCDZ__STALL_MODE_MASK 0x00000004L
+#define MC_HUB_WDP_MCDZ__STALL_MODE__SHIFT 0x00000002
+#define MC_HUB_WDP_MCDZ__STALL_THRESHOLD_MASK 0x00fe0000L
+#define MC_HUB_WDP_MCDZ__STALL_THRESHOLD__SHIFT 0x00000011
+#define MC_HUB_WDP_MCIF__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_MCIF__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_MCIF__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_MCIF__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_MCIF__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_MCIF__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_MCIF__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_MCIF__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_MCIF__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_MCIF__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_MCIF__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_MCIF__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_MCIF__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_MCIF__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_MCIF__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_MCIF__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_MGPU2__CID2_MASK 0x000000ffL
+#define MC_HUB_WDP_MGPU2__CID2__SHIFT 0x00000000
+#define MC_HUB_WDP_MGPU__CID_MASK 0x0000ff00L
+#define MC_HUB_WDP_MGPU__CID__SHIFT 0x00000008
+#define MC_HUB_WDP_MGPU__ENABLE_MASK 0x00800000L
+#define MC_HUB_WDP_MGPU__ENABLE__SHIFT 0x00000017
+#define MC_HUB_WDP_MGPU__MGPU_PRIORITY_TIME_MASK 0x007f0000L
+#define MC_HUB_WDP_MGPU__MGPU_PRIORITY_TIME__SHIFT 0x00000010
+#define MC_HUB_WDP_MGPU__OTH_PRIORITY_TIME_MASK 0x7f000000L
+#define MC_HUB_WDP_MGPU__OTH_PRIORITY_TIME__SHIFT 0x00000018
+#define MC_HUB_WDP_MGPU__STOR_MASK 0x000000ffL
+#define MC_HUB_WDP_MGPU__STOR__SHIFT 0x00000000
+#define MC_HUB_WDP_RLC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_RLC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_RLC__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_RLC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_RLC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_RLC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_RLC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_RLC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_RLC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_RLC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_RLC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_RLC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_RLC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_RLC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_RLC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_RLC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_SEM__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_SEM__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_SEM__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_SEM__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_SEM__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_SEM__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_SEM__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_SEM__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_SEM__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_SEM__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_SEM__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_SEM__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_SEM__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_SEM__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_SEM__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_SEM__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_SH0__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_SH0__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_SH0__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_SH0__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_SH0__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_SH0__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_SH0__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_SH0__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_SH0__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_SH0__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_SH0__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_SH0__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_SH0__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_SH0__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_SH0__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_SH0__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_SH1__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_SH1__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_SH1__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_SH1__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_SH1__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_SH1__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_SH1__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_SH1__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_SH1__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_SH1__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_SH1__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_SH1__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_SH1__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_SH1__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_SH1__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_SH1__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_SIP__ASK_CREDITS_MASK 0x000001fcL
+#define MC_HUB_WDP_SIP__ASK_CREDITS__SHIFT 0x00000002
+#define MC_HUB_WDP_SIP__STALL_MODE_MASK 0x00000003L
+#define MC_HUB_WDP_SIP__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WDP_SMU__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_SMU__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_SMU__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_SMU__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_SMU__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_SMU__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_SMU__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_SMU__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_SMU__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_SMU__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_SMU__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_SMU__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_SMU__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_SMU__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_SMU__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_SMU__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_STATUS__GBL0_BYPASS_STOR_FULL_MASK 0x00000080L
+#define MC_HUB_WDP_STATUS__GBL0_BYPASS_STOR_FULL__SHIFT 0x00000007
+#define MC_HUB_WDP_STATUS__GBL0_STOR_FULL_MASK 0x00000040L
+#define MC_HUB_WDP_STATUS__GBL0_STOR_FULL__SHIFT 0x00000006
+#define MC_HUB_WDP_STATUS__GBL0_VM_FULL_MASK 0x00000020L
+#define MC_HUB_WDP_STATUS__GBL0_VM_FULL__SHIFT 0x00000005
+#define MC_HUB_WDP_STATUS__GBL1_BYPASS_STOR_FULL_MASK 0x00000400L
+#define MC_HUB_WDP_STATUS__GBL1_BYPASS_STOR_FULL__SHIFT 0x0000000a
+#define MC_HUB_WDP_STATUS__GBL1_STOR_FULL_MASK 0x00000200L
+#define MC_HUB_WDP_STATUS__GBL1_STOR_FULL__SHIFT 0x00000009
+#define MC_HUB_WDP_STATUS__GBL1_VM_FULL_MASK 0x00000100L
+#define MC_HUB_WDP_STATUS__GBL1_VM_FULL__SHIFT 0x00000008
+#define MC_HUB_WDP_STATUS__MCDW_RD_AVAIL_MASK 0x00000002L
+#define MC_HUB_WDP_STATUS__MCDW_RD_AVAIL__SHIFT 0x00000001
+#define MC_HUB_WDP_STATUS__MCDW_WR_AVAIL_MASK 0x00000800L
+#define MC_HUB_WDP_STATUS__MCDW_WR_AVAIL__SHIFT 0x0000000b
+#define MC_HUB_WDP_STATUS__MCDX_RD_AVAIL_MASK 0x00000004L
+#define MC_HUB_WDP_STATUS__MCDX_RD_AVAIL__SHIFT 0x00000002
+#define MC_HUB_WDP_STATUS__MCDX_WR_AVAIL_MASK 0x00001000L
+#define MC_HUB_WDP_STATUS__MCDX_WR_AVAIL__SHIFT 0x0000000c
+#define MC_HUB_WDP_STATUS__MCDY_RD_AVAIL_MASK 0x00000008L
+#define MC_HUB_WDP_STATUS__MCDY_RD_AVAIL__SHIFT 0x00000003
+#define MC_HUB_WDP_STATUS__MCDY_WR_AVAIL_MASK 0x00002000L
+#define MC_HUB_WDP_STATUS__MCDY_WR_AVAIL__SHIFT 0x0000000d
+#define MC_HUB_WDP_STATUS__MCDZ_RD_AVAIL_MASK 0x00000010L
+#define MC_HUB_WDP_STATUS__MCDZ_RD_AVAIL__SHIFT 0x00000004
+#define MC_HUB_WDP_STATUS__MCDZ_WR_AVAIL_MASK 0x00004000L
+#define MC_HUB_WDP_STATUS__MCDZ_WR_AVAIL__SHIFT 0x0000000e
+#define MC_HUB_WDP_STATUS__SIP_AVAIL_MASK 0x00000001L
+#define MC_HUB_WDP_STATUS__SIP_AVAIL__SHIFT 0x00000000
+#define MC_HUB_WDP_UMC__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_UMC__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_UMC__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_UMC__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_UMC__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_UMC__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_UMC__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_UMC__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_UMC__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_UMC__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_UMC__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_UMC__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_UMC__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_UMC__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_UMC__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_UMC__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_UVD__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_UVD__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_UVD__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_UVD__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_UVD__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_UVD__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_UVD__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_UVD__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_UVD__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_UVD__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_UVD__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_UVD__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_UVD__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_UVD__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_UVD__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_UVD__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_UVD__VM_BYPASS_MASK 0x00010000L
+#define MC_HUB_WDP_UVD__VM_BYPASS__SHIFT 0x00000010
+#define MC_HUB_WDP_VCE__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_VCE__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_VCE__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_VCE__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_VCE__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_VCE__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_VCE__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_VCE__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_VCE__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_VCE__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_VCE__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_VCE__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_VCE__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_VCE__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_VCE__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_VCE__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_VCEU__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_VCEU__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_VCEU__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_VCEU__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_VCEU__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_VCEU__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_VCEU__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_VCEU__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_VCEU__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_VCEU__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_VCEU__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_VCEU__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_VCEU__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_VCEU__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_VCEU__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_VCEU__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_WTM_CNTL__GROUP0_DECREMENT_MASK 0x00000007L
+#define MC_HUB_WDP_WTM_CNTL__GROUP0_DECREMENT__SHIFT 0x00000000
+#define MC_HUB_WDP_WTM_CNTL__GROUP1_DECREMENT_MASK 0x00000038L
+#define MC_HUB_WDP_WTM_CNTL__GROUP1_DECREMENT__SHIFT 0x00000003
+#define MC_HUB_WDP_WTM_CNTL__GROUP2_DECREMENT_MASK 0x000001c0L
+#define MC_HUB_WDP_WTM_CNTL__GROUP2_DECREMENT__SHIFT 0x00000006
+#define MC_HUB_WDP_WTM_CNTL__GROUP3_DECREMENT_MASK 0x00000e00L
+#define MC_HUB_WDP_WTM_CNTL__GROUP3_DECREMENT__SHIFT 0x00000009
+#define MC_HUB_WDP_WTM_CNTL__GROUP4_DECREMENT_MASK 0x00007000L
+#define MC_HUB_WDP_WTM_CNTL__GROUP4_DECREMENT__SHIFT 0x0000000c
+#define MC_HUB_WDP_WTM_CNTL__GROUP5_DECREMENT_MASK 0x00038000L
+#define MC_HUB_WDP_WTM_CNTL__GROUP5_DECREMENT__SHIFT 0x0000000f
+#define MC_HUB_WDP_WTM_CNTL__GROUP6_DECREMENT_MASK 0x001c0000L
+#define MC_HUB_WDP_WTM_CNTL__GROUP6_DECREMENT__SHIFT 0x00000012
+#define MC_HUB_WDP_WTM_CNTL__GROUP7_DECREMENT_MASK 0x00e00000L
+#define MC_HUB_WDP_WTM_CNTL__GROUP7_DECREMENT__SHIFT 0x00000015
+#define MC_HUB_WDP_XDMA__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_XDMA__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_XDMA__BYPASS_AVAIL_OVERRIDE_MASK 0x00010000L
+#define MC_HUB_WDP_XDMA__BYPASS_AVAIL_OVERRIDE__SHIFT 0x00000010
+#define MC_HUB_WDP_XDMA__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_XDMA__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_XDMA__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_XDMA__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_XDMA__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_XDMA__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_XDMAM__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_XDMAM__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_XDMAM__BYPASS_AVAIL_OVERRIDE_MASK 0x00010000L
+#define MC_HUB_WDP_XDMAM__BYPASS_AVAIL_OVERRIDE__SHIFT 0x00000010
+#define MC_HUB_WDP_XDMAM__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_XDMAM__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_XDMAM__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_XDMAM__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_XDMAM__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_XDMAM__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_XDMAM__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_XDMAM__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_XDMAM__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_XDMAM__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_XDMAM__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_XDMAM__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_XDMAM__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_XDMAM__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_XDMA__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_XDMA__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_XDMA__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_XDMA__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_XDMA__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_XDMA__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_XDMA__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_XDMA__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WDP_XDP__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_HUB_WDP_XDP__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_HUB_WDP_XDP__ENABLE_MASK 0x00000001L
+#define MC_HUB_WDP_XDP__ENABLE__SHIFT 0x00000000
+#define MC_HUB_WDP_XDP__LAZY_TIMER_MASK 0x00007800L
+#define MC_HUB_WDP_XDP__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_HUB_WDP_XDP__MAXBURST_MASK 0x00000780L
+#define MC_HUB_WDP_XDP__MAXBURST__SHIFT 0x00000007
+#define MC_HUB_WDP_XDP__PRESCALE_MASK 0x00000006L
+#define MC_HUB_WDP_XDP__PRESCALE__SHIFT 0x00000001
+#define MC_HUB_WDP_XDP__STALL_MODE_MASK 0x00000030L
+#define MC_HUB_WDP_XDP__STALL_MODE__SHIFT 0x00000004
+#define MC_HUB_WDP_XDP__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_HUB_WDP_XDP__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_HUB_WDP_XDP__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_HUB_WDP_XDP__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_HUB_WRRET_CNTL__BP_ENABLE_MASK 0x00200000L
+#define MC_HUB_WRRET_CNTL__BP_ENABLE__SHIFT 0x00000015
+#define MC_HUB_WRRET_CNTL__BP_MASK 0x001ffffeL
+#define MC_HUB_WRRET_CNTL__BP__SHIFT 0x00000001
+#define MC_HUB_WRRET_CNTL__DEBUG_REG_MASK 0x3fc00000L
+#define MC_HUB_WRRET_CNTL__DEBUG_REG__SHIFT 0x00000016
+#define MC_HUB_WRRET_CNTL__DISABLE_SELF_INIT_MASK 0x40000000L
+#define MC_HUB_WRRET_CNTL__DISABLE_SELF_INIT__SHIFT 0x0000001e
+#define MC_HUB_WRRET_CNTL__FAIR_CH_SW_MASK 0x80000000L
+#define MC_HUB_WRRET_CNTL__FAIR_CH_SW__SHIFT 0x0000001f
+#define MC_HUB_WRRET_CNTL__JUMPAHEAD_MASK 0x00000001L
+#define MC_HUB_WRRET_CNTL__JUMPAHEAD__SHIFT 0x00000000
+#define MC_HUB_WRRET_MCDW__CREDIT_COUNT_MASK 0x000000feL
+#define MC_HUB_WRRET_MCDW__CREDIT_COUNT__SHIFT 0x00000001
+#define MC_HUB_WRRET_MCDW__STALL_MODE_MASK 0x00000001L
+#define MC_HUB_WRRET_MCDW__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WRRET_MCDX__CREDIT_COUNT_MASK 0x000000feL
+#define MC_HUB_WRRET_MCDX__CREDIT_COUNT__SHIFT 0x00000001
+#define MC_HUB_WRRET_MCDX__STALL_MODE_MASK 0x00000001L
+#define MC_HUB_WRRET_MCDX__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WRRET_MCDY__CREDIT_COUNT_MASK 0x000000feL
+#define MC_HUB_WRRET_MCDY__CREDIT_COUNT__SHIFT 0x00000001
+#define MC_HUB_WRRET_MCDY__STALL_MODE_MASK 0x00000001L
+#define MC_HUB_WRRET_MCDY__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WRRET_MCDZ__CREDIT_COUNT_MASK 0x000000feL
+#define MC_HUB_WRRET_MCDZ__CREDIT_COUNT__SHIFT 0x00000001
+#define MC_HUB_WRRET_MCDZ__STALL_MODE_MASK 0x00000001L
+#define MC_HUB_WRRET_MCDZ__STALL_MODE__SHIFT 0x00000000
+#define MC_HUB_WRRET_STATUS__MCDW_AVAIL_MASK 0x00000001L
+#define MC_HUB_WRRET_STATUS__MCDW_AVAIL__SHIFT 0x00000000
+#define MC_HUB_WRRET_STATUS__MCDX_AVAIL_MASK 0x00000002L
+#define MC_HUB_WRRET_STATUS__MCDX_AVAIL__SHIFT 0x00000001
+#define MC_HUB_WRRET_STATUS__MCDY_AVAIL_MASK 0x00000004L
+#define MC_HUB_WRRET_STATUS__MCDY_AVAIL__SHIFT 0x00000002
+#define MC_HUB_WRRET_STATUS__MCDZ_AVAIL_MASK 0x00000008L
+#define MC_HUB_WRRET_STATUS__MCDZ_AVAIL__SHIFT 0x00000003
+#define MC_IMP_CNTL__CAL_PWRON_MASK 0x80000000L
+#define MC_IMP_CNTL__CAL_PWRON__SHIFT 0x0000001f
+#define MC_IMP_CNTL__CAL_VREF_MASK 0x007f0000L
+#define MC_IMP_CNTL__CAL_VREFMODE_MASK 0x00000040L
+#define MC_IMP_CNTL__CAL_VREFMODE__SHIFT 0x00000006
+#define MC_IMP_CNTL__CAL_VREF_SEL_MASK 0x00000020L
+#define MC_IMP_CNTL__CAL_VREF_SEL__SHIFT 0x00000005
+#define MC_IMP_CNTL__CAL_VREF__SHIFT 0x00000010
+#define MC_IMP_CNTL__CAL_WHEN_IDLE_MASK 0x20000000L
+#define MC_IMP_CNTL__CAL_WHEN_IDLE__SHIFT 0x0000001d
+#define MC_IMP_CNTL__CAL_WHEN_REFRESH_MASK 0x40000000L
+#define MC_IMP_CNTL__CAL_WHEN_REFRESH__SHIFT 0x0000001e
+#define MC_IMP_CNTL__CLEAR_TIMEOUT_ERR_MASK 0x00000200L
+#define MC_IMP_CNTL__CLEAR_TIMEOUT_ERR__SHIFT 0x00000009
+#define MC_IMP_CNTL__MEM_IO_SAMPLE_CNT_MASK 0x0000e000L
+#define MC_IMP_CNTL__MEM_IO_SAMPLE_CNT__SHIFT 0x0000000d
+#define MC_IMP_CNTL__MEM_IO_UPDATE_RATE_MASK 0x0000001fL
+#define MC_IMP_CNTL__MEM_IO_UPDATE_RATE__SHIFT 0x00000000
+#define MC_IMP_CNTL__TIMEOUT_ERR_MASK 0x00000100L
+#define MC_IMP_CNTL__TIMEOUT_ERR__SHIFT 0x00000008
+#define MC_IMP_DEBUG__DEBUG_CAL_DONE_MASK 0x80000000L
+#define MC_IMP_DEBUG__DEBUG_CAL_DONE__SHIFT 0x0000001f
+#define MC_IMP_DEBUG__DEBUG_CAL_EN_MASK 0x10000000L
+#define MC_IMP_DEBUG__DEBUG_CAL_EN__SHIFT 0x0000001c
+#define MC_IMP_DEBUG__DEBUG_CAL_INTR_MASK 0x40000000L
+#define MC_IMP_DEBUG__DEBUG_CAL_INTR__SHIFT 0x0000001e
+#define MC_IMP_DEBUG__DEBUG_CAL_START_MASK 0x20000000L
+#define MC_IMP_DEBUG__DEBUG_CAL_START__SHIFT 0x0000001d
+#define MC_IMP_DEBUG__PMVCAL_RESERVED_MASK 0x0fff0000L
+#define MC_IMP_DEBUG__PMVCAL_RESERVED__SHIFT 0x00000010
+#define MC_IMP_DEBUG__TIMEOUT_CNTR_MASK 0x0000ff00L
+#define MC_IMP_DEBUG__TIMEOUT_CNTR__SHIFT 0x00000008
+#define MC_IMP_DEBUG__TSTARTUP_CNTR_MASK 0x000000ffL
+#define MC_IMP_DEBUG__TSTARTUP_CNTR__SHIFT 0x00000000
+#define MC_IMP_DQ_STATUS__CH0_DQ_NSTR_MASK 0x0000ff00L
+#define MC_IMP_DQ_STATUS__CH0_DQ_NSTR__SHIFT 0x00000008
+#define MC_IMP_DQ_STATUS__CH0_DQ_PSTR_MASK 0x000000ffL
+#define MC_IMP_DQ_STATUS__CH0_DQ_PSTR__SHIFT 0x00000000
+#define MC_IMP_DQ_STATUS__CH1_DQ_NSTR_MASK 0xff000000L
+#define MC_IMP_DQ_STATUS__CH1_DQ_NSTR__SHIFT 0x00000018
+#define MC_IMP_DQ_STATUS__CH1_DQ_PSTR_MASK 0x00ff0000L
+#define MC_IMP_DQ_STATUS__CH1_DQ_PSTR__SHIFT 0x00000010
+#define MC_IMP_STATUS__NSTR_ACCUM_VAL_MASK 0xff000000L
+#define MC_IMP_STATUS__NSTR_ACCUM_VAL__SHIFT 0x00000018
+#define MC_IMP_STATUS__NSTR_CAL_MASK 0x00ff0000L
+#define MC_IMP_STATUS__NSTR_CAL__SHIFT 0x00000010
+#define MC_IMP_STATUS__PSTR_ACCUM_VAL_MASK 0x0000ff00L
+#define MC_IMP_STATUS__PSTR_ACCUM_VAL__SHIFT 0x00000008
+#define MC_IMP_STATUS__PSTR_CAL_MASK 0x000000ffL
+#define MC_IMP_STATUS__PSTR_CAL__SHIFT 0x00000000
+#define MC_IO_APHY_STR_CNTL_D0__CAL_SEL_MASK 0x0c000000L
+#define MC_IO_APHY_STR_CNTL_D0__CAL_SEL__SHIFT 0x0000001a
+#define MC_IO_APHY_STR_CNTL_D0__LOAD_A_STR_MASK 0x10000000L
+#define MC_IO_APHY_STR_CNTL_D0__LOAD_A_STR__SHIFT 0x0000001c
+#define MC_IO_APHY_STR_CNTL_D0__LOAD_D_RD_STR_MASK 0x20000000L
+#define MC_IO_APHY_STR_CNTL_D0__LOAD_D_RD_STR__SHIFT 0x0000001d
+#define MC_IO_APHY_STR_CNTL_D0__NSTR_OFF_A_MASK 0x00000fc0L
+#define MC_IO_APHY_STR_CNTL_D0__NSTR_OFF_A__SHIFT 0x00000006
+#define MC_IO_APHY_STR_CNTL_D0__PSTR_OFF_A_MASK 0x0000003fL
+#define MC_IO_APHY_STR_CNTL_D0__PSTR_OFF_A__SHIFT 0x00000000
+#define MC_IO_APHY_STR_CNTL_D0__PSTR_OFF_D_RD_MASK 0x0003f000L
+#define MC_IO_APHY_STR_CNTL_D0__PSTR_OFF_D_RD__SHIFT 0x0000000c
+#define MC_IO_APHY_STR_CNTL_D0__USE_A_CAL_MASK 0x01000000L
+#define MC_IO_APHY_STR_CNTL_D0__USE_A_CAL__SHIFT 0x00000018
+#define MC_IO_APHY_STR_CNTL_D0__USE_D_RD_CAL_MASK 0x02000000L
+#define MC_IO_APHY_STR_CNTL_D0__USE_D_RD_CAL__SHIFT 0x00000019
+#define MC_IO_APHY_STR_CNTL_D1__CAL_SEL_MASK 0x0c000000L
+#define MC_IO_APHY_STR_CNTL_D1__CAL_SEL__SHIFT 0x0000001a
+#define MC_IO_APHY_STR_CNTL_D1__LOAD_A_STR_MASK 0x10000000L
+#define MC_IO_APHY_STR_CNTL_D1__LOAD_A_STR__SHIFT 0x0000001c
+#define MC_IO_APHY_STR_CNTL_D1__LOAD_D_RD_STR_MASK 0x20000000L
+#define MC_IO_APHY_STR_CNTL_D1__LOAD_D_RD_STR__SHIFT 0x0000001d
+#define MC_IO_APHY_STR_CNTL_D1__NSTR_OFF_A_MASK 0x00000fc0L
+#define MC_IO_APHY_STR_CNTL_D1__NSTR_OFF_A__SHIFT 0x00000006
+#define MC_IO_APHY_STR_CNTL_D1__PSTR_OFF_A_MASK 0x0000003fL
+#define MC_IO_APHY_STR_CNTL_D1__PSTR_OFF_A__SHIFT 0x00000000
+#define MC_IO_APHY_STR_CNTL_D1__PSTR_OFF_D_RD_MASK 0x0003f000L
+#define MC_IO_APHY_STR_CNTL_D1__PSTR_OFF_D_RD__SHIFT 0x0000000c
+#define MC_IO_APHY_STR_CNTL_D1__USE_A_CAL_MASK 0x01000000L
+#define MC_IO_APHY_STR_CNTL_D1__USE_A_CAL__SHIFT 0x00000018
+#define MC_IO_APHY_STR_CNTL_D1__USE_D_RD_CAL_MASK 0x02000000L
+#define MC_IO_APHY_STR_CNTL_D1__USE_D_RD_CAL__SHIFT 0x00000019
+#define MC_IO_CDRCNTL1_D0__DQ_RXPHASE_B0_MASK 0x000000ffL
+#define MC_IO_CDRCNTL1_D0__DQ_RXPHASE_B0__SHIFT 0x00000000
+#define MC_IO_CDRCNTL1_D0__DQ_RXPHASE_B1_MASK 0x0000ff00L
+#define MC_IO_CDRCNTL1_D0__DQ_RXPHASE_B1__SHIFT 0x00000008
+#define MC_IO_CDRCNTL1_D0__WCDR_TXPHASE_B0_MASK 0x00ff0000L
+#define MC_IO_CDRCNTL1_D0__WCDR_TXPHASE_B0__SHIFT 0x00000010
+#define MC_IO_CDRCNTL1_D0__WCDR_TXPHASE_B1_MASK 0xff000000L
+#define MC_IO_CDRCNTL1_D0__WCDR_TXPHASE_B1__SHIFT 0x00000018
+#define MC_IO_CDRCNTL1_D1__DQ_RXPHASE_B0_MASK 0x000000ffL
+#define MC_IO_CDRCNTL1_D1__DQ_RXPHASE_B0__SHIFT 0x00000000
+#define MC_IO_CDRCNTL1_D1__DQ_RXPHASE_B1_MASK 0x0000ff00L
+#define MC_IO_CDRCNTL1_D1__DQ_RXPHASE_B1__SHIFT 0x00000008
+#define MC_IO_CDRCNTL1_D1__WCDR_TXPHASE_B0_MASK 0x00ff0000L
+#define MC_IO_CDRCNTL1_D1__WCDR_TXPHASE_B0__SHIFT 0x00000010
+#define MC_IO_CDRCNTL1_D1__WCDR_TXPHASE_B1_MASK 0xff000000L
+#define MC_IO_CDRCNTL1_D1__WCDR_TXPHASE_B1__SHIFT 0x00000018
+#define MC_IO_CDRCNTL2_D0__CDR_FB_SEL0_MASK 0x00000001L
+#define MC_IO_CDRCNTL2_D0__CDR_FB_SEL0__SHIFT 0x00000000
+#define MC_IO_CDRCNTL2_D0__CDR_FB_SEL1_MASK 0x00000002L
+#define MC_IO_CDRCNTL2_D0__CDR_FB_SEL1__SHIFT 0x00000001
+#define MC_IO_CDRCNTL2_D0__EDC_RXEN_OVR0_MASK 0x00000004L
+#define MC_IO_CDRCNTL2_D0__EDC_RXEN_OVR0__SHIFT 0x00000002
+#define MC_IO_CDRCNTL2_D0__EDC_RXEN_OVR1_MASK 0x00000008L
+#define MC_IO_CDRCNTL2_D0__EDC_RXEN_OVR1__SHIFT 0x00000003
+#define MC_IO_CDRCNTL2_D0__TXCDRBYPASS0_MASK 0x00000010L
+#define MC_IO_CDRCNTL2_D0__TXCDRBYPASS0__SHIFT 0x00000004
+#define MC_IO_CDRCNTL2_D0__TXCDRBYPASS1_MASK 0x00000020L
+#define MC_IO_CDRCNTL2_D0__TXCDRBYPASS1__SHIFT 0x00000005
+#define MC_IO_CDRCNTL2_D0__WCK_RXEN_OVR0_MASK 0x00000040L
+#define MC_IO_CDRCNTL2_D0__WCK_RXEN_OVR0__SHIFT 0x00000006
+#define MC_IO_CDRCNTL2_D0__WCK_RXEN_OVR1_MASK 0x00000080L
+#define MC_IO_CDRCNTL2_D0__WCK_RXEN_OVR1__SHIFT 0x00000007
+#define MC_IO_CDRCNTL2_D1__CDR_FB_SEL0_MASK 0x00000001L
+#define MC_IO_CDRCNTL2_D1__CDR_FB_SEL0__SHIFT 0x00000000
+#define MC_IO_CDRCNTL2_D1__CDR_FB_SEL1_MASK 0x00000002L
+#define MC_IO_CDRCNTL2_D1__CDR_FB_SEL1__SHIFT 0x00000001
+#define MC_IO_CDRCNTL2_D1__EDC_RXEN_OVR0_MASK 0x00000004L
+#define MC_IO_CDRCNTL2_D1__EDC_RXEN_OVR0__SHIFT 0x00000002
+#define MC_IO_CDRCNTL2_D1__EDC_RXEN_OVR1_MASK 0x00000008L
+#define MC_IO_CDRCNTL2_D1__EDC_RXEN_OVR1__SHIFT 0x00000003
+#define MC_IO_CDRCNTL2_D1__TXCDRBYPASS0_MASK 0x00000010L
+#define MC_IO_CDRCNTL2_D1__TXCDRBYPASS0__SHIFT 0x00000004
+#define MC_IO_CDRCNTL2_D1__TXCDRBYPASS1_MASK 0x00000020L
+#define MC_IO_CDRCNTL2_D1__TXCDRBYPASS1__SHIFT 0x00000005
+#define MC_IO_CDRCNTL2_D1__WCK_RXEN_OVR0_MASK 0x00000040L
+#define MC_IO_CDRCNTL2_D1__WCK_RXEN_OVR0__SHIFT 0x00000006
+#define MC_IO_CDRCNTL2_D1__WCK_RXEN_OVR1_MASK 0x00000080L
+#define MC_IO_CDRCNTL2_D1__WCK_RXEN_OVR1__SHIFT 0x00000007
+#define MC_IO_CDRCNTL_D0__DQRXCDREN_B0_MASK 0x00400000L
+#define MC_IO_CDRCNTL_D0__DQRXCDREN_B0__SHIFT 0x00000016
+#define MC_IO_CDRCNTL_D0__DQRXCDREN_B1_MASK 0x00800000L
+#define MC_IO_CDRCNTL_D0__DQRXCDREN_B1__SHIFT 0x00000017
+#define MC_IO_CDRCNTL_D0__DQRXSEL_B0_MASK 0x10000000L
+#define MC_IO_CDRCNTL_D0__DQRXSEL_B0__SHIFT 0x0000001c
+#define MC_IO_CDRCNTL_D0__DQRXSEL_B1_MASK 0x20000000L
+#define MC_IO_CDRCNTL_D0__DQRXSEL_B1__SHIFT 0x0000001d
+#define MC_IO_CDRCNTL_D0__DQTXCDREN_B0_MASK 0x00100000L
+#define MC_IO_CDRCNTL_D0__DQTXCDREN_B0__SHIFT 0x00000014
+#define MC_IO_CDRCNTL_D0__DQTXCDREN_B1_MASK 0x00200000L
+#define MC_IO_CDRCNTL_D0__DQTXCDREN_B1__SHIFT 0x00000015
+#define MC_IO_CDRCNTL_D0__DQTXSEL_B0_MASK 0x40000000L
+#define MC_IO_CDRCNTL_D0__DQTXSEL_B0__SHIFT 0x0000001e
+#define MC_IO_CDRCNTL_D0__DQTXSEL_B1_MASK 0x80000000L
+#define MC_IO_CDRCNTL_D0__DQTXSEL_B1__SHIFT 0x0000001f
+#define MC_IO_CDRCNTL_D0__RXCDRBYPASS_B01_MASK 0x00000400L
+#define MC_IO_CDRCNTL_D0__RXCDRBYPASS_B01__SHIFT 0x0000000a
+#define MC_IO_CDRCNTL_D0__RXCDRBYPASS_B23_MASK 0x00000800L
+#define MC_IO_CDRCNTL_D0__RXCDRBYPASS_B23__SHIFT 0x0000000b
+#define MC_IO_CDRCNTL_D0__RXCDREN_B01_MASK 0x00000100L
+#define MC_IO_CDRCNTL_D0__RXCDREN_B01__SHIFT 0x00000008
+#define MC_IO_CDRCNTL_D0__RXCDREN_B23_MASK 0x00000200L
+#define MC_IO_CDRCNTL_D0__RXCDREN_B23__SHIFT 0x00000009
+#define MC_IO_CDRCNTL_D0__RXPHASE1_B01_MASK 0x0000f000L
+#define MC_IO_CDRCNTL_D0__RXPHASE1_B01__SHIFT 0x0000000c
+#define MC_IO_CDRCNTL_D0__RXPHASE1_B23_MASK 0x000f0000L
+#define MC_IO_CDRCNTL_D0__RXPHASE1_B23__SHIFT 0x00000010
+#define MC_IO_CDRCNTL_D0__RXPHASE_B01_MASK 0x0000000fL
+#define MC_IO_CDRCNTL_D0__RXPHASE_B01__SHIFT 0x00000000
+#define MC_IO_CDRCNTL_D0__RXPHASE_B23_MASK 0x000000f0L
+#define MC_IO_CDRCNTL_D0__RXPHASE_B23__SHIFT 0x00000004
+#define MC_IO_CDRCNTL_D0__WCDREDC_B0_MASK 0x04000000L
+#define MC_IO_CDRCNTL_D0__WCDREDC_B0__SHIFT 0x0000001a
+#define MC_IO_CDRCNTL_D0__WCDREDC_B1_MASK 0x08000000L
+#define MC_IO_CDRCNTL_D0__WCDREDC_B1__SHIFT 0x0000001b
+#define MC_IO_CDRCNTL_D0__WCDRRXCDREN_B0_MASK 0x01000000L
+#define MC_IO_CDRCNTL_D0__WCDRRXCDREN_B0__SHIFT 0x00000018
+#define MC_IO_CDRCNTL_D0__WCDRRXCDREN_B1_MASK 0x02000000L
+#define MC_IO_CDRCNTL_D0__WCDRRXCDREN_B1__SHIFT 0x00000019
+#define MC_IO_CDRCNTL_D1__DQRXCDREN_B0_MASK 0x00400000L
+#define MC_IO_CDRCNTL_D1__DQRXCDREN_B0__SHIFT 0x00000016
+#define MC_IO_CDRCNTL_D1__DQRXCDREN_B1_MASK 0x00800000L
+#define MC_IO_CDRCNTL_D1__DQRXCDREN_B1__SHIFT 0x00000017
+#define MC_IO_CDRCNTL_D1__DQRXSEL_B0_MASK 0x10000000L
+#define MC_IO_CDRCNTL_D1__DQRXSEL_B0__SHIFT 0x0000001c
+#define MC_IO_CDRCNTL_D1__DQRXSEL_B1_MASK 0x20000000L
+#define MC_IO_CDRCNTL_D1__DQRXSEL_B1__SHIFT 0x0000001d
+#define MC_IO_CDRCNTL_D1__DQTXCDREN_B0_MASK 0x00100000L
+#define MC_IO_CDRCNTL_D1__DQTXCDREN_B0__SHIFT 0x00000014
+#define MC_IO_CDRCNTL_D1__DQTXCDREN_B1_MASK 0x00200000L
+#define MC_IO_CDRCNTL_D1__DQTXCDREN_B1__SHIFT 0x00000015
+#define MC_IO_CDRCNTL_D1__DQTXSEL_B0_MASK 0x40000000L
+#define MC_IO_CDRCNTL_D1__DQTXSEL_B0__SHIFT 0x0000001e
+#define MC_IO_CDRCNTL_D1__DQTXSEL_B1_MASK 0x80000000L
+#define MC_IO_CDRCNTL_D1__DQTXSEL_B1__SHIFT 0x0000001f
+#define MC_IO_CDRCNTL_D1__RXCDRBYPASS_B01_MASK 0x00000400L
+#define MC_IO_CDRCNTL_D1__RXCDRBYPASS_B01__SHIFT 0x0000000a
+#define MC_IO_CDRCNTL_D1__RXCDRBYPASS_B23_MASK 0x00000800L
+#define MC_IO_CDRCNTL_D1__RXCDRBYPASS_B23__SHIFT 0x0000000b
+#define MC_IO_CDRCNTL_D1__RXCDREN_B01_MASK 0x00000100L
+#define MC_IO_CDRCNTL_D1__RXCDREN_B01__SHIFT 0x00000008
+#define MC_IO_CDRCNTL_D1__RXCDREN_B23_MASK 0x00000200L
+#define MC_IO_CDRCNTL_D1__RXCDREN_B23__SHIFT 0x00000009
+#define MC_IO_CDRCNTL_D1__RXPHASE1_B01_MASK 0x0000f000L
+#define MC_IO_CDRCNTL_D1__RXPHASE1_B01__SHIFT 0x0000000c
+#define MC_IO_CDRCNTL_D1__RXPHASE1_B23_MASK 0x000f0000L
+#define MC_IO_CDRCNTL_D1__RXPHASE1_B23__SHIFT 0x00000010
+#define MC_IO_CDRCNTL_D1__RXPHASE_B01_MASK 0x0000000fL
+#define MC_IO_CDRCNTL_D1__RXPHASE_B01__SHIFT 0x00000000
+#define MC_IO_CDRCNTL_D1__RXPHASE_B23_MASK 0x000000f0L
+#define MC_IO_CDRCNTL_D1__RXPHASE_B23__SHIFT 0x00000004
+#define MC_IO_CDRCNTL_D1__WCDREDC_B0_MASK 0x04000000L
+#define MC_IO_CDRCNTL_D1__WCDREDC_B0__SHIFT 0x0000001a
+#define MC_IO_CDRCNTL_D1__WCDREDC_B1_MASK 0x08000000L
+#define MC_IO_CDRCNTL_D1__WCDREDC_B1__SHIFT 0x0000001b
+#define MC_IO_CDRCNTL_D1__WCDRRXCDREN_B0_MASK 0x01000000L
+#define MC_IO_CDRCNTL_D1__WCDRRXCDREN_B0__SHIFT 0x00000018
+#define MC_IO_CDRCNTL_D1__WCDRRXCDREN_B1_MASK 0x02000000L
+#define MC_IO_CDRCNTL_D1__WCDRRXCDREN_B1__SHIFT 0x00000019
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ACMD_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRH_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_ADDRL_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CK_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_CMD_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DBI_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ0_RX_DYN_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ0_RX_EQ_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ1_RX_DYN_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQ1_RX_EQ_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0H_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB0L_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1H_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB1L_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2H_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB2L_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3H_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_DQB3L_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_DYN_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_EQ_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_EDC_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_100__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_100__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_100__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_100__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_100__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_100__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_100__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_100__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_101__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_101__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_101__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_101__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_101__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_101__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_101__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_101__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_102__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_102__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_102__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_102__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_102__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_102__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_102__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_102__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_103__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_103__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_103__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_103__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_103__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_103__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_103__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_103__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_104__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_104__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_104__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_104__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_104__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_104__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_104__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_104__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_105__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_105__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_105__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_105__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_105__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_105__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_105__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_105__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_106__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_106__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_106__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_106__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_106__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_106__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_106__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_106__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_107__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_107__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_107__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_107__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_107__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_107__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_107__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_107__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_108__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_108__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_108__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_108__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_108__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_108__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_108__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_108__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_109__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_109__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_109__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_109__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_109__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_109__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_109__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_109__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_10__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_10__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_10__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_10__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_10__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_10__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_10__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_10__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_110__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_110__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_110__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_110__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_110__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_110__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_110__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_110__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_111__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_111__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_111__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_111__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_111__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_111__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_111__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_111__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_112__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_112__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_112__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_112__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_112__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_112__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_112__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_112__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_113__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_113__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_113__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_113__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_113__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_113__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_113__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_113__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_114__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_114__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_114__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_114__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_114__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_114__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_114__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_114__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_115__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_115__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_115__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_115__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_115__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_115__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_115__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_115__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_116__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_116__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_116__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_116__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_116__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_116__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_116__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_116__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_117__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_117__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_117__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_117__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_117__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_117__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_117__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_117__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_118__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_118__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_118__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_118__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_118__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_118__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_118__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_118__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_119__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_119__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_119__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_119__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_119__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_119__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_119__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_119__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_11__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_11__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_11__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_11__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_11__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_11__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_11__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_11__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_120__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_120__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_120__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_120__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_120__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_120__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_120__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_120__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_121__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_121__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_121__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_121__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_121__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_121__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_121__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_121__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_122__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_122__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_122__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_122__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_122__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_122__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_122__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_122__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_123__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_123__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_123__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_123__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_123__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_123__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_123__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_123__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_124__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_124__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_124__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_124__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_124__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_124__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_124__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_124__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_125__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_125__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_125__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_125__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_125__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_125__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_125__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_125__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_126__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_126__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_126__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_126__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_126__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_126__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_126__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_126__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_127__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_127__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_127__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_127__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_127__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_127__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_127__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_127__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_128__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_128__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_128__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_128__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_128__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_128__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_128__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_128__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_129__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_129__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_129__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_129__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_129__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_129__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_129__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_129__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_12__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_12__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_12__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_12__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_12__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_12__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_12__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_12__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_130__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_130__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_130__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_130__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_130__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_130__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_130__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_130__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_131__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_131__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_131__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_131__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_131__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_131__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_131__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_131__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_132__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_132__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_132__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_132__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_132__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_132__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_132__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_132__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_133__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_133__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_133__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_133__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_133__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_133__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_133__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_133__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_134__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_134__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_134__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_134__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_134__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_134__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_134__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_134__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_135__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_135__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_135__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_135__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_135__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_135__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_135__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_135__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_136__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_136__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_136__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_136__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_136__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_136__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_136__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_136__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_137__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_137__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_137__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_137__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_137__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_137__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_137__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_137__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_138__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_138__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_138__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_138__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_138__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_138__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_138__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_138__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_139__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_139__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_139__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_139__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_139__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_139__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_139__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_139__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_13__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_13__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_13__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_13__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_13__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_13__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_13__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_13__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_140__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_140__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_140__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_140__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_140__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_140__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_140__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_140__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_141__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_141__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_141__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_141__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_141__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_141__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_141__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_141__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_142__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_142__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_142__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_142__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_142__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_142__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_142__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_142__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_143__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_143__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_143__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_143__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_143__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_143__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_143__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_143__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_144__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_144__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_144__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_144__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_144__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_144__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_144__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_144__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_145__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_145__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_145__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_145__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_145__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_145__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_145__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_145__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_146__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_146__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_146__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_146__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_146__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_146__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_146__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_146__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_147__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_147__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_147__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_147__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_147__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_147__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_147__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_147__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_148__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_148__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_148__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_148__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_148__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_148__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_148__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_148__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_149__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_149__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_149__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_149__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_149__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_149__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_149__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_149__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_14__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_14__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_14__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_14__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_14__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_14__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_14__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_14__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_150__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_150__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_150__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_150__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_150__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_150__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_150__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_150__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_151__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_151__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_151__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_151__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_151__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_151__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_151__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_151__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_152__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_152__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_152__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_152__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_152__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_152__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_152__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_152__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_153__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_153__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_153__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_153__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_153__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_153__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_153__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_153__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_154__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_154__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_154__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_154__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_154__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_154__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_154__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_154__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_155__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_155__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_155__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_155__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_155__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_155__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_155__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_155__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_156__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_156__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_156__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_156__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_156__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_156__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_156__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_156__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_157__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_157__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_157__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_157__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_157__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_157__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_157__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_157__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_158__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_158__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_158__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_158__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_158__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_158__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_158__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_158__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_159__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_159__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_159__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_159__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_159__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_159__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_159__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_159__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_15__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_15__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_15__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_15__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_15__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_15__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_15__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_15__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_16__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_16__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_16__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_16__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_16__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_16__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_16__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_16__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_17__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_17__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_17__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_17__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_17__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_17__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_17__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_17__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_18__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_18__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_18__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_18__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_18__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_18__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_18__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_18__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_19__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_19__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_19__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_19__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_19__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_19__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_19__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_19__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_20__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_20__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_20__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_20__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_20__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_20__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_20__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_20__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_21__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_21__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_21__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_21__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_21__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_21__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_21__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_21__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_22__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_22__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_22__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_22__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_22__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_22__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_22__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_22__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_23__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_23__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_23__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_23__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_23__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_23__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_23__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_23__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_24__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_24__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_24__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_24__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_24__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_24__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_24__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_24__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_25__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_25__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_25__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_25__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_25__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_25__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_25__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_25__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_26__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_26__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_26__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_26__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_26__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_26__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_26__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_26__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_27__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_27__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_27__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_27__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_27__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_27__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_27__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_27__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_28__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_28__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_28__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_28__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_28__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_28__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_28__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_28__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_29__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_29__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_29__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_29__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_29__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_29__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_29__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_29__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_2__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_2__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_2__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_2__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_2__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_2__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_2__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_2__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_30__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_30__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_30__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_30__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_30__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_30__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_30__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_30__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_31__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_31__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_31__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_31__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_31__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_31__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_31__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_31__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_32__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_32__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_32__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_32__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_32__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_32__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_32__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_32__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_33__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_33__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_33__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_33__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_33__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_33__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_33__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_33__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_34__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_34__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_34__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_34__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_34__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_34__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_34__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_34__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_35__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_35__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_35__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_35__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_35__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_35__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_35__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_35__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_36__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_36__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_36__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_36__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_36__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_36__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_36__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_36__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_37__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_37__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_37__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_37__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_37__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_37__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_37__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_37__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_38__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_38__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_38__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_38__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_38__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_38__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_38__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_38__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_39__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_39__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_39__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_39__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_39__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_39__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_39__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_39__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_3__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_3__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_3__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_3__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_3__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_3__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_3__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_3__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_40__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_40__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_40__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_40__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_40__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_40__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_40__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_40__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_41__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_41__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_41__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_41__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_41__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_41__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_41__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_41__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_42__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_42__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_42__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_42__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_42__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_42__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_42__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_42__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_43__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_43__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_43__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_43__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_43__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_43__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_43__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_43__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_44__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_44__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_44__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_44__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_44__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_44__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_44__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_44__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_45__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_45__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_45__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_45__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_45__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_45__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_45__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_45__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_46__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_46__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_46__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_46__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_46__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_46__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_46__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_46__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_47__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_47__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_47__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_47__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_47__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_47__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_47__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_47__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_48__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_48__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_48__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_48__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_48__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_48__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_48__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_48__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_49__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_49__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_49__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_49__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_49__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_49__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_49__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_49__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_4__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_4__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_4__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_4__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_4__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_4__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_4__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_4__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_50__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_50__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_50__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_50__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_50__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_50__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_50__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_50__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_51__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_51__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_51__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_51__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_51__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_51__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_51__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_51__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_52__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_52__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_52__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_52__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_52__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_52__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_52__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_52__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_53__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_53__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_53__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_53__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_53__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_53__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_53__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_53__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_54__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_54__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_54__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_54__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_54__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_54__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_54__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_54__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_55__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_55__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_55__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_55__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_55__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_55__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_55__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_55__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_56__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_56__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_56__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_56__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_56__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_56__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_56__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_56__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_57__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_57__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_57__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_57__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_57__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_57__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_57__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_57__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_58__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_58__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_58__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_58__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_58__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_58__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_58__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_58__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_59__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_59__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_59__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_59__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_59__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_59__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_59__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_59__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_5__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_5__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_5__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_5__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_5__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_5__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_5__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_5__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_60__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_60__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_60__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_60__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_60__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_60__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_60__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_60__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_61__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_61__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_61__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_61__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_61__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_61__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_61__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_61__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_62__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_62__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_62__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_62__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_62__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_62__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_62__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_62__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_63__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_63__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_63__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_63__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_63__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_63__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_63__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_63__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_64__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_64__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_64__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_64__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_64__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_64__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_64__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_64__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_65__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_65__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_65__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_65__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_65__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_65__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_65__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_65__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_66__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_66__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_66__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_66__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_66__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_66__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_66__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_66__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_67__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_67__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_67__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_67__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_67__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_67__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_67__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_67__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_68__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_68__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_68__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_68__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_68__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_68__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_68__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_68__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_69__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_69__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_69__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_69__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_69__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_69__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_69__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_69__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_6__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_6__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_6__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_6__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_6__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_6__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_6__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_6__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_70__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_70__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_70__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_70__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_70__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_70__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_70__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_70__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_71__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_71__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_71__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_71__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_71__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_71__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_71__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_71__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_72__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_72__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_72__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_72__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_72__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_72__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_72__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_72__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_73__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_73__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_73__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_73__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_73__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_73__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_73__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_73__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_74__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_74__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_74__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_74__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_74__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_74__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_74__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_74__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_75__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_75__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_75__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_75__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_75__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_75__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_75__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_75__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_76__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_76__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_76__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_76__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_76__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_76__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_76__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_76__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_77__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_77__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_77__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_77__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_77__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_77__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_77__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_77__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_78__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_78__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_78__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_78__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_78__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_78__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_78__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_78__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_79__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_79__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_79__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_79__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_79__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_79__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_79__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_79__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_7__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_7__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_7__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_7__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_7__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_7__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_7__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_7__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_80__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_80__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_80__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_80__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_80__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_80__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_80__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_80__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_81__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_81__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_81__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_81__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_81__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_81__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_81__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_81__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_82__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_82__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_82__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_82__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_82__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_82__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_82__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_82__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_83__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_83__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_83__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_83__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_83__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_83__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_83__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_83__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_84__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_84__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_84__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_84__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_84__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_84__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_84__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_84__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_85__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_85__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_85__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_85__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_85__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_85__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_85__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_85__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_86__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_86__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_86__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_86__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_86__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_86__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_86__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_86__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_87__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_87__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_87__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_87__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_87__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_87__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_87__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_87__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_88__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_88__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_88__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_88__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_88__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_88__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_88__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_88__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_89__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_89__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_89__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_89__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_89__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_89__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_89__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_89__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_8__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_8__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_8__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_8__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_8__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_8__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_8__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_8__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_90__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_90__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_90__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_90__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_90__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_90__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_90__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_90__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_91__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_91__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_91__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_91__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_91__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_91__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_91__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_91__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_92__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_92__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_92__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_92__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_92__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_92__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_92__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_92__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_93__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_93__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_93__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_93__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_93__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_93__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_93__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_93__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_94__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_94__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_94__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_94__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_94__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_94__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_94__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_94__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_95__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_95__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_95__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_95__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_95__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_95__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_95__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_95__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_96__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_96__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_96__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_96__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_96__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_96__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_96__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_96__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_97__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_97__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_97__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_97__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_97__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_97__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_97__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_97__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_98__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_98__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_98__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_98__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_98__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_98__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_98__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_98__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_99__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_99__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_99__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_99__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_99__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_99__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_99__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_99__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_UP_9__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_UP_9__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_UP_9__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_UP_9__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_UP_9__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_UP_9__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_UP_9__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_UP_9__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_CDR_PHSIZE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_DYN_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_EQ_PM_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCDR_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_CLKSEL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_CLKSEL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_MISC_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_MISC_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_OFSCAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_OFSCAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RX_EQ_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RX_EQ_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_RX_VREF_CAL_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXBST_PD_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXBST_PU_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXPHASE_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXPHASE_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXSLF_D0__VALUE3__SHIFT 0x00000018
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE0_MASK 0x000000ffL
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE0__SHIFT 0x00000000
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE1_MASK 0x0000ff00L
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE1__SHIFT 0x00000008
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE2_MASK 0x00ff0000L
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE2__SHIFT 0x00000010
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE3_MASK 0xff000000L
+#define MC_IO_DEBUG_WCK_TXSLF_D1__VALUE3__SHIFT 0x00000018
+#define MC_IO_DPHY_STR_CNTL_D0__CAL_SEL_MASK 0x0c000000L
+#define MC_IO_DPHY_STR_CNTL_D0__CAL_SEL__SHIFT 0x0000001a
+#define MC_IO_DPHY_STR_CNTL_D0__LOAD_D_STR_MASK 0x10000000L
+#define MC_IO_DPHY_STR_CNTL_D0__LOAD_D_STR__SHIFT 0x0000001c
+#define MC_IO_DPHY_STR_CNTL_D0__LOAD_S_STR_MASK 0x20000000L
+#define MC_IO_DPHY_STR_CNTL_D0__LOAD_S_STR__SHIFT 0x0000001d
+#define MC_IO_DPHY_STR_CNTL_D0__NSTR_OFF_D_MASK 0x00000fc0L
+#define MC_IO_DPHY_STR_CNTL_D0__NSTR_OFF_D__SHIFT 0x00000006
+#define MC_IO_DPHY_STR_CNTL_D0__NSTR_OFF_S_MASK 0x00fc0000L
+#define MC_IO_DPHY_STR_CNTL_D0__NSTR_OFF_S__SHIFT 0x00000012
+#define MC_IO_DPHY_STR_CNTL_D0__PSTR_OFF_D_MASK 0x0000003fL
+#define MC_IO_DPHY_STR_CNTL_D0__PSTR_OFF_D__SHIFT 0x00000000
+#define MC_IO_DPHY_STR_CNTL_D0__PSTR_OFF_S_MASK 0x0003f000L
+#define MC_IO_DPHY_STR_CNTL_D0__PSTR_OFF_S__SHIFT 0x0000000c
+#define MC_IO_DPHY_STR_CNTL_D0__USE_D_CAL_MASK 0x01000000L
+#define MC_IO_DPHY_STR_CNTL_D0__USE_D_CAL__SHIFT 0x00000018
+#define MC_IO_DPHY_STR_CNTL_D0__USE_S_CAL_MASK 0x02000000L
+#define MC_IO_DPHY_STR_CNTL_D0__USE_S_CAL__SHIFT 0x00000019
+#define MC_IO_DPHY_STR_CNTL_D1__CAL_SEL_MASK 0x0c000000L
+#define MC_IO_DPHY_STR_CNTL_D1__CAL_SEL__SHIFT 0x0000001a
+#define MC_IO_DPHY_STR_CNTL_D1__LOAD_D_STR_MASK 0x10000000L
+#define MC_IO_DPHY_STR_CNTL_D1__LOAD_D_STR__SHIFT 0x0000001c
+#define MC_IO_DPHY_STR_CNTL_D1__LOAD_S_STR_MASK 0x20000000L
+#define MC_IO_DPHY_STR_CNTL_D1__LOAD_S_STR__SHIFT 0x0000001d
+#define MC_IO_DPHY_STR_CNTL_D1__NSTR_OFF_D_MASK 0x00000fc0L
+#define MC_IO_DPHY_STR_CNTL_D1__NSTR_OFF_D__SHIFT 0x00000006
+#define MC_IO_DPHY_STR_CNTL_D1__NSTR_OFF_S_MASK 0x00fc0000L
+#define MC_IO_DPHY_STR_CNTL_D1__NSTR_OFF_S__SHIFT 0x00000012
+#define MC_IO_DPHY_STR_CNTL_D1__PSTR_OFF_D_MASK 0x0000003fL
+#define MC_IO_DPHY_STR_CNTL_D1__PSTR_OFF_D__SHIFT 0x00000000
+#define MC_IO_DPHY_STR_CNTL_D1__PSTR_OFF_S_MASK 0x0003f000L
+#define MC_IO_DPHY_STR_CNTL_D1__PSTR_OFF_S__SHIFT 0x0000000c
+#define MC_IO_DPHY_STR_CNTL_D1__USE_D_CAL_MASK 0x01000000L
+#define MC_IO_DPHY_STR_CNTL_D1__USE_D_CAL__SHIFT 0x00000018
+#define MC_IO_DPHY_STR_CNTL_D1__USE_S_CAL_MASK 0x02000000L
+#define MC_IO_DPHY_STR_CNTL_D1__USE_S_CAL__SHIFT 0x00000019
+#define MC_IO_PAD_CNTL__ATBEN_MASK 0x3f000000L
+#define MC_IO_PAD_CNTL__ATBEN__SHIFT 0x00000018
+#define MC_IO_PAD_CNTL__ATBSEL_D0_MASK 0x80000000L
+#define MC_IO_PAD_CNTL__ATBSEL_D0__SHIFT 0x0000001f
+#define MC_IO_PAD_CNTL__ATBSEL_D1_MASK 0x40000000L
+#define MC_IO_PAD_CNTL__ATBSEL_D1__SHIFT 0x0000001e
+#define MC_IO_PAD_CNTL__ATBSEL_MASK 0x00f00000L
+#define MC_IO_PAD_CNTL__ATBSEL__SHIFT 0x00000014
+#define MC_IO_PAD_CNTL_D0__CK_AUTO_EN_MASK 0x00100000L
+#define MC_IO_PAD_CNTL_D0__CK_AUTO_EN__SHIFT 0x00000014
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_N_MASK 0x00c00000L
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_N__SHIFT 0x00000016
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_P_MASK 0x03000000L
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_P__SHIFT 0x00000018
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_SEL_MASK 0x00200000L
+#define MC_IO_PAD_CNTL_D0__CK_DELAY_SEL__SHIFT 0x00000015
+#define MC_IO_PAD_CNTL_D0__DELAY_ADR_SYNC_MASK 0x00000010L
+#define MC_IO_PAD_CNTL_D0__DELAY_ADR_SYNC__SHIFT 0x00000004
+#define MC_IO_PAD_CNTL_D0__DELAY_CLK_SYNC_MASK 0x00000004L
+#define MC_IO_PAD_CNTL_D0__DELAY_CLK_SYNC__SHIFT 0x00000002
+#define MC_IO_PAD_CNTL_D0__DELAY_CMD_SYNC_MASK 0x00000008L
+#define MC_IO_PAD_CNTL_D0__DELAY_CMD_SYNC__SHIFT 0x00000003
+#define MC_IO_PAD_CNTL_D0__DIFF_STR_MASK 0x20000000L
+#define MC_IO_PAD_CNTL_D0__DIFF_STR__SHIFT 0x0000001d
+#define MC_IO_PAD_CNTL_D0__DISABLE_ADR_MASK 0x00002000L
+#define MC_IO_PAD_CNTL_D0__DISABLE_ADR__SHIFT 0x0000000d
+#define MC_IO_PAD_CNTL_D0__DISABLE_CMD_MASK 0x00001000L
+#define MC_IO_PAD_CNTL_D0__DISABLE_CMD__SHIFT 0x0000000c
+#define MC_IO_PAD_CNTL_D0__EN_RD_STR_DLY_MASK 0x00000800L
+#define MC_IO_PAD_CNTL_D0__EN_RD_STR_DLY__SHIFT 0x0000000b
+#define MC_IO_PAD_CNTL_D0__FORCE_EN_RD_STR_MASK 0x00000400L
+#define MC_IO_PAD_CNTL_D0__FORCE_EN_RD_STR__SHIFT 0x0000000a
+#define MC_IO_PAD_CNTL_D0__GDDR_PWRON_MASK 0x40000000L
+#define MC_IO_PAD_CNTL_D0__GDDR_PWRON__SHIFT 0x0000001e
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_ADR_MASK 0x00000200L
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_ADR__SHIFT 0x00000009
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_CLK_MASK 0x00000080L
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_CLK__SHIFT 0x00000007
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_CMD_MASK 0x00000100L
+#define MC_IO_PAD_CNTL_D0__MEM_FALL_OUT_CMD__SHIFT 0x00000008
+#define MC_IO_PAD_CNTL_D0__TXPWROFF_CKE_MASK 0x08000000L
+#define MC_IO_PAD_CNTL_D0__TXPWROFF_CKE__SHIFT 0x0000001b
+#define MC_IO_PAD_CNTL_D0__TXPWROFF_CLK_MASK 0x80000000L
+#define MC_IO_PAD_CNTL_D0__TXPWROFF_CLK__SHIFT 0x0000001f
+#define MC_IO_PAD_CNTL_D0__UNI_STR_MASK 0x10000000L
+#define MC_IO_PAD_CNTL_D0__UNI_STR__SHIFT 0x0000001c
+#define MC_IO_PAD_CNTL_D0__VREFI_EN_MASK 0x00004000L
+#define MC_IO_PAD_CNTL_D0__VREFI_EN__SHIFT 0x0000000e
+#define MC_IO_PAD_CNTL_D0__VREFI_SEL_MASK 0x000f8000L
+#define MC_IO_PAD_CNTL_D0__VREFI_SEL__SHIFT 0x0000000f
+#define MC_IO_PAD_CNTL_D1__CK_AUTO_EN_MASK 0x00100000L
+#define MC_IO_PAD_CNTL_D1__CK_AUTO_EN__SHIFT 0x00000014
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_N_MASK 0x00c00000L
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_N__SHIFT 0x00000016
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_P_MASK 0x03000000L
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_P__SHIFT 0x00000018
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_SEL_MASK 0x00200000L
+#define MC_IO_PAD_CNTL_D1__CK_DELAY_SEL__SHIFT 0x00000015
+#define MC_IO_PAD_CNTL_D1__DELAY_ADR_SYNC_MASK 0x00000010L
+#define MC_IO_PAD_CNTL_D1__DELAY_ADR_SYNC__SHIFT 0x00000004
+#define MC_IO_PAD_CNTL_D1__DELAY_CLK_SYNC_MASK 0x00000004L
+#define MC_IO_PAD_CNTL_D1__DELAY_CLK_SYNC__SHIFT 0x00000002
+#define MC_IO_PAD_CNTL_D1__DELAY_CMD_SYNC_MASK 0x00000008L
+#define MC_IO_PAD_CNTL_D1__DELAY_CMD_SYNC__SHIFT 0x00000003
+#define MC_IO_PAD_CNTL_D1__DELAY_DATA_SYNC_MASK 0x00000001L
+#define MC_IO_PAD_CNTL_D1__DELAY_DATA_SYNC__SHIFT 0x00000000
+#define MC_IO_PAD_CNTL_D1__DELAY_STR_SYNC_MASK 0x00000002L
+#define MC_IO_PAD_CNTL_D1__DELAY_STR_SYNC__SHIFT 0x00000001
+#define MC_IO_PAD_CNTL_D1__DIFF_STR_MASK 0x20000000L
+#define MC_IO_PAD_CNTL_D1__DIFF_STR__SHIFT 0x0000001d
+#define MC_IO_PAD_CNTL_D1__DISABLE_ADR_MASK 0x00002000L
+#define MC_IO_PAD_CNTL_D1__DISABLE_ADR__SHIFT 0x0000000d
+#define MC_IO_PAD_CNTL_D1__DISABLE_CMD_MASK 0x00001000L
+#define MC_IO_PAD_CNTL_D1__DISABLE_CMD__SHIFT 0x0000000c
+#define MC_IO_PAD_CNTL_D1__EN_RD_STR_DLY_MASK 0x00000800L
+#define MC_IO_PAD_CNTL_D1__EN_RD_STR_DLY__SHIFT 0x0000000b
+#define MC_IO_PAD_CNTL_D1__FORCE_EN_RD_STR_MASK 0x00000400L
+#define MC_IO_PAD_CNTL_D1__FORCE_EN_RD_STR__SHIFT 0x0000000a
+#define MC_IO_PAD_CNTL_D1__GDDR_PWRON_MASK 0x40000000L
+#define MC_IO_PAD_CNTL_D1__GDDR_PWRON__SHIFT 0x0000001e
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_ADR_MASK 0x00000200L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_ADR__SHIFT 0x00000009
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_CLK_MASK 0x00000080L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_CLK__SHIFT 0x00000007
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_CMD_MASK 0x00000100L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_CMD__SHIFT 0x00000008
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_DATA_MASK 0x00000020L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_DATA__SHIFT 0x00000005
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_STR_MASK 0x00000040L
+#define MC_IO_PAD_CNTL_D1__MEM_FALL_OUT_STR__SHIFT 0x00000006
+#define MC_IO_PAD_CNTL_D1__TXPWROFF_CKE_MASK 0x08000000L
+#define MC_IO_PAD_CNTL_D1__TXPWROFF_CKE__SHIFT 0x0000001b
+#define MC_IO_PAD_CNTL_D1__TXPWROFF_CLK_MASK 0x80000000L
+#define MC_IO_PAD_CNTL_D1__TXPWROFF_CLK__SHIFT 0x0000001f
+#define MC_IO_PAD_CNTL_D1__UNI_STR_MASK 0x10000000L
+#define MC_IO_PAD_CNTL_D1__UNI_STR__SHIFT 0x0000001c
+#define MC_IO_PAD_CNTL_D1__VREFI_EN_MASK 0x00004000L
+#define MC_IO_PAD_CNTL_D1__VREFI_EN__SHIFT 0x0000000e
+#define MC_IO_PAD_CNTL_D1__VREFI_SEL_MASK 0x000f8000L
+#define MC_IO_PAD_CNTL_D1__VREFI_SEL__SHIFT 0x0000000f
+#define MC_IO_PAD_CNTL__MEM_IO_IMP_MAX_MASK 0x0000ff00L
+#define MC_IO_PAD_CNTL__MEM_IO_IMP_MAX__SHIFT 0x00000008
+#define MC_IO_PAD_CNTL__MEM_IO_IMP_MIN_MASK 0x000000ffL
+#define MC_IO_PAD_CNTL__MEM_IO_IMP_MIN__SHIFT 0x00000000
+#define MC_IO_PAD_CNTL__OVL_YCLKON_D0_MASK 0x00040000L
+#define MC_IO_PAD_CNTL__OVL_YCLKON_D0__SHIFT 0x00000012
+#define MC_IO_PAD_CNTL__OVL_YCLKON_D1_MASK 0x00080000L
+#define MC_IO_PAD_CNTL__OVL_YCLKON_D1__SHIFT 0x00000013
+#define MC_IO_PAD_CNTL__RXPHASE_GRAY_MASK 0x00020000L
+#define MC_IO_PAD_CNTL__RXPHASE_GRAY__SHIFT 0x00000011
+#define MC_IO_PAD_CNTL__TXPHASE_GRAY_MASK 0x00010000L
+#define MC_IO_PAD_CNTL__TXPHASE_GRAY__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY0_D0__DLL_RSV_MASK 0xf0000000L
+#define MC_IO_RXCNTL1_DPHY0_D0__DLL_RSV__SHIFT 0x0000001c
+#define MC_IO_RXCNTL1_DPHY0_D0__PMD_LOOPBACK_MASK 0x0e000000L
+#define MC_IO_RXCNTL1_DPHY0_D0__PMD_LOOPBACK__SHIFT 0x00000019
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL1_MSB_MASK 0x0000000fL
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL1_MSB__SHIFT 0x00000000
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL2_MSB_MASK 0x000000f0L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL2_MSB__SHIFT 0x00000004
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL3_MASK 0x0000ff00L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFCAL3__SHIFT 0x00000008
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFPDNB_1_MASK 0x00040000L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFPDNB_1__SHIFT 0x00000012
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFSEL2_MASK 0x00010000L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFSEL2__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFSEL3_MASK 0x00020000L
+#define MC_IO_RXCNTL1_DPHY0_D0__VREFSEL3__SHIFT 0x00000011
+#define MC_IO_RXCNTL1_DPHY0_D1__DLL_RSV_MASK 0xf0000000L
+#define MC_IO_RXCNTL1_DPHY0_D1__DLL_RSV__SHIFT 0x0000001c
+#define MC_IO_RXCNTL1_DPHY0_D1__PMD_LOOPBACK_MASK 0x0e000000L
+#define MC_IO_RXCNTL1_DPHY0_D1__PMD_LOOPBACK__SHIFT 0x00000019
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL1_MSB_MASK 0x0000000fL
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL1_MSB__SHIFT 0x00000000
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL2_MSB_MASK 0x000000f0L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL2_MSB__SHIFT 0x00000004
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL3_MASK 0x0000ff00L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFCAL3__SHIFT 0x00000008
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFPDNB_1_MASK 0x00040000L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFPDNB_1__SHIFT 0x00000012
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFSEL2_MASK 0x00010000L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFSEL2__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFSEL3_MASK 0x00020000L
+#define MC_IO_RXCNTL1_DPHY0_D1__VREFSEL3__SHIFT 0x00000011
+#define MC_IO_RXCNTL1_DPHY1_D0__DLL_RSV_MASK 0xf0000000L
+#define MC_IO_RXCNTL1_DPHY1_D0__DLL_RSV__SHIFT 0x0000001c
+#define MC_IO_RXCNTL1_DPHY1_D0__PMD_LOOPBACK_MASK 0x0e000000L
+#define MC_IO_RXCNTL1_DPHY1_D0__PMD_LOOPBACK__SHIFT 0x00000019
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL1_MSB_MASK 0x0000000fL
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL1_MSB__SHIFT 0x00000000
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL2_MSB_MASK 0x000000f0L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL2_MSB__SHIFT 0x00000004
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL3_MASK 0x0000ff00L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFCAL3__SHIFT 0x00000008
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFPDNB_1_MASK 0x00040000L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFPDNB_1__SHIFT 0x00000012
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFSEL2_MASK 0x00010000L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFSEL2__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFSEL3_MASK 0x00020000L
+#define MC_IO_RXCNTL1_DPHY1_D0__VREFSEL3__SHIFT 0x00000011
+#define MC_IO_RXCNTL1_DPHY1_D1__DLL_RSV_MASK 0xf0000000L
+#define MC_IO_RXCNTL1_DPHY1_D1__DLL_RSV__SHIFT 0x0000001c
+#define MC_IO_RXCNTL1_DPHY1_D1__PMD_LOOPBACK_MASK 0x0e000000L
+#define MC_IO_RXCNTL1_DPHY1_D1__PMD_LOOPBACK__SHIFT 0x00000019
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL1_MSB_MASK 0x0000000fL
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL1_MSB__SHIFT 0x00000000
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL2_MSB_MASK 0x000000f0L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL2_MSB__SHIFT 0x00000004
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL3_MASK 0x0000ff00L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFCAL3__SHIFT 0x00000008
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFPDNB_1_MASK 0x00040000L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFPDNB_1__SHIFT 0x00000012
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFSEL2_MASK 0x00010000L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFSEL2__SHIFT 0x00000010
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFSEL3_MASK 0x00020000L
+#define MC_IO_RXCNTL1_DPHY1_D1__VREFSEL3__SHIFT 0x00000011
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_B0_MASK 0x00700000L
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_B0__SHIFT 0x00000014
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_B1_MASK 0x07000000L
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_B1__SHIFT 0x00000018
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_M_MASK 0x10000000L
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_ADJ_M__SHIFT 0x0000001c
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_BW_CTRL_MASK 0xc0000000L
+#define MC_IO_RXCNTL_DPHY0_D0__DLL_BW_CTRL__SHIFT 0x0000001e
+#define MC_IO_RXCNTL_DPHY0_D0__RCVSEL_MASK 0x00000004L
+#define MC_IO_RXCNTL_DPHY0_D0__RCVSEL__SHIFT 0x00000002
+#define MC_IO_RXCNTL_DPHY0_D0__REFCLK_PWRON_MASK 0x20000000L
+#define MC_IO_RXCNTL_DPHY0_D0__REFCLK_PWRON__SHIFT 0x0000001d
+#define MC_IO_RXCNTL_DPHY0_D0__RXBIASSEL_MASK 0x00000003L
+#define MC_IO_RXCNTL_DPHY0_D0__RXBIASSEL__SHIFT 0x00000000
+#define MC_IO_RXCNTL_DPHY0_D0__RXDPWRON_DLY_MASK 0x00000030L
+#define MC_IO_RXCNTL_DPHY0_D0__RXDPWRON_DLY__SHIFT 0x00000004
+#define MC_IO_RXCNTL_DPHY0_D0__RXLP_MASK 0x00000080L
+#define MC_IO_RXCNTL_DPHY0_D0__RXLP__SHIFT 0x00000007
+#define MC_IO_RXCNTL_DPHY0_D0__RXPDNB_MASK 0x00000040L
+#define MC_IO_RXCNTL_DPHY0_D0__RXPDNB__SHIFT 0x00000006
+#define MC_IO_RXCNTL_DPHY0_D0__RX_PEAKSEL_MASK 0x000c0000L
+#define MC_IO_RXCNTL_DPHY0_D0__RX_PEAKSEL__SHIFT 0x00000012
+#define MC_IO_RXCNTL_DPHY0_D0__VREFCAL_MASK 0x00000f00L
+#define MC_IO_RXCNTL_DPHY0_D0__VREFCAL__SHIFT 0x00000008
+#define MC_IO_RXCNTL_DPHY0_D0__VREFCAL_STR_MASK 0x0000f000L
+#define MC_IO_RXCNTL_DPHY0_D0__VREFCAL_STR__SHIFT 0x0000000c
+#define MC_IO_RXCNTL_DPHY0_D0__VREFPDNB_MASK 0x00000008L
+#define MC_IO_RXCNTL_DPHY0_D0__VREFPDNB__SHIFT 0x00000003
+#define MC_IO_RXCNTL_DPHY0_D0__VREFSEL_MASK 0x00010000L
+#define MC_IO_RXCNTL_DPHY0_D0__VREFSEL__SHIFT 0x00000010
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_B0_MASK 0x00700000L
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_B0__SHIFT 0x00000014
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_B1_MASK 0x07000000L
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_B1__SHIFT 0x00000018
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_M_MASK 0x10000000L
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_ADJ_M__SHIFT 0x0000001c
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_BW_CTRL_MASK 0xc0000000L
+#define MC_IO_RXCNTL_DPHY0_D1__DLL_BW_CTRL__SHIFT 0x0000001e
+#define MC_IO_RXCNTL_DPHY0_D1__RCVSEL_MASK 0x00000004L
+#define MC_IO_RXCNTL_DPHY0_D1__RCVSEL__SHIFT 0x00000002
+#define MC_IO_RXCNTL_DPHY0_D1__REFCLK_PWRON_MASK 0x20000000L
+#define MC_IO_RXCNTL_DPHY0_D1__REFCLK_PWRON__SHIFT 0x0000001d
+#define MC_IO_RXCNTL_DPHY0_D1__RXBIASSEL_MASK 0x00000003L
+#define MC_IO_RXCNTL_DPHY0_D1__RXBIASSEL__SHIFT 0x00000000
+#define MC_IO_RXCNTL_DPHY0_D1__RXDPWRON_DLY_MASK 0x00000030L
+#define MC_IO_RXCNTL_DPHY0_D1__RXDPWRON_DLY__SHIFT 0x00000004
+#define MC_IO_RXCNTL_DPHY0_D1__RXLP_MASK 0x00000080L
+#define MC_IO_RXCNTL_DPHY0_D1__RXLP__SHIFT 0x00000007
+#define MC_IO_RXCNTL_DPHY0_D1__RXPDNB_MASK 0x00000040L
+#define MC_IO_RXCNTL_DPHY0_D1__RXPDNB__SHIFT 0x00000006
+#define MC_IO_RXCNTL_DPHY0_D1__RX_PEAKSEL_MASK 0x000c0000L
+#define MC_IO_RXCNTL_DPHY0_D1__RX_PEAKSEL__SHIFT 0x00000012
+#define MC_IO_RXCNTL_DPHY0_D1__VREFCAL_MASK 0x00000f00L
+#define MC_IO_RXCNTL_DPHY0_D1__VREFCAL__SHIFT 0x00000008
+#define MC_IO_RXCNTL_DPHY0_D1__VREFCAL_STR_MASK 0x0000f000L
+#define MC_IO_RXCNTL_DPHY0_D1__VREFCAL_STR__SHIFT 0x0000000c
+#define MC_IO_RXCNTL_DPHY0_D1__VREFPDNB_MASK 0x00000008L
+#define MC_IO_RXCNTL_DPHY0_D1__VREFPDNB__SHIFT 0x00000003
+#define MC_IO_RXCNTL_DPHY0_D1__VREFSEL_MASK 0x00010000L
+#define MC_IO_RXCNTL_DPHY0_D1__VREFSEL__SHIFT 0x00000010
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_B0_MASK 0x00700000L
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_B0__SHIFT 0x00000014
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_B1_MASK 0x07000000L
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_B1__SHIFT 0x00000018
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_M_MASK 0x10000000L
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_ADJ_M__SHIFT 0x0000001c
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_BW_CTRL_MASK 0xc0000000L
+#define MC_IO_RXCNTL_DPHY1_D0__DLL_BW_CTRL__SHIFT 0x0000001e
+#define MC_IO_RXCNTL_DPHY1_D0__RCVSEL_MASK 0x00000004L
+#define MC_IO_RXCNTL_DPHY1_D0__RCVSEL__SHIFT 0x00000002
+#define MC_IO_RXCNTL_DPHY1_D0__REFCLK_PWRON_MASK 0x20000000L
+#define MC_IO_RXCNTL_DPHY1_D0__REFCLK_PWRON__SHIFT 0x0000001d
+#define MC_IO_RXCNTL_DPHY1_D0__RXBIASSEL_MASK 0x00000003L
+#define MC_IO_RXCNTL_DPHY1_D0__RXBIASSEL__SHIFT 0x00000000
+#define MC_IO_RXCNTL_DPHY1_D0__RXDPWRON_DLY_MASK 0x00000030L
+#define MC_IO_RXCNTL_DPHY1_D0__RXDPWRON_DLY__SHIFT 0x00000004
+#define MC_IO_RXCNTL_DPHY1_D0__RXLP_MASK 0x00000080L
+#define MC_IO_RXCNTL_DPHY1_D0__RXLP__SHIFT 0x00000007
+#define MC_IO_RXCNTL_DPHY1_D0__RXPDNB_MASK 0x00000040L
+#define MC_IO_RXCNTL_DPHY1_D0__RXPDNB__SHIFT 0x00000006
+#define MC_IO_RXCNTL_DPHY1_D0__RX_PEAKSEL_MASK 0x000c0000L
+#define MC_IO_RXCNTL_DPHY1_D0__RX_PEAKSEL__SHIFT 0x00000012
+#define MC_IO_RXCNTL_DPHY1_D0__VREFCAL_MASK 0x00000f00L
+#define MC_IO_RXCNTL_DPHY1_D0__VREFCAL__SHIFT 0x00000008
+#define MC_IO_RXCNTL_DPHY1_D0__VREFCAL_STR_MASK 0x0000f000L
+#define MC_IO_RXCNTL_DPHY1_D0__VREFCAL_STR__SHIFT 0x0000000c
+#define MC_IO_RXCNTL_DPHY1_D0__VREFPDNB_MASK 0x00000008L
+#define MC_IO_RXCNTL_DPHY1_D0__VREFPDNB__SHIFT 0x00000003
+#define MC_IO_RXCNTL_DPHY1_D0__VREFSEL_MASK 0x00010000L
+#define MC_IO_RXCNTL_DPHY1_D0__VREFSEL__SHIFT 0x00000010
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_B0_MASK 0x00700000L
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_B0__SHIFT 0x00000014
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_B1_MASK 0x07000000L
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_B1__SHIFT 0x00000018
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_M_MASK 0x10000000L
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_ADJ_M__SHIFT 0x0000001c
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_BW_CTRL_MASK 0xc0000000L
+#define MC_IO_RXCNTL_DPHY1_D1__DLL_BW_CTRL__SHIFT 0x0000001e
+#define MC_IO_RXCNTL_DPHY1_D1__RCVSEL_MASK 0x00000004L
+#define MC_IO_RXCNTL_DPHY1_D1__RCVSEL__SHIFT 0x00000002
+#define MC_IO_RXCNTL_DPHY1_D1__REFCLK_PWRON_MASK 0x20000000L
+#define MC_IO_RXCNTL_DPHY1_D1__REFCLK_PWRON__SHIFT 0x0000001d
+#define MC_IO_RXCNTL_DPHY1_D1__RXBIASSEL_MASK 0x00000003L
+#define MC_IO_RXCNTL_DPHY1_D1__RXBIASSEL__SHIFT 0x00000000
+#define MC_IO_RXCNTL_DPHY1_D1__RXDPWRON_DLY_MASK 0x00000030L
+#define MC_IO_RXCNTL_DPHY1_D1__RXDPWRON_DLY__SHIFT 0x00000004
+#define MC_IO_RXCNTL_DPHY1_D1__RXLP_MASK 0x00000080L
+#define MC_IO_RXCNTL_DPHY1_D1__RXLP__SHIFT 0x00000007
+#define MC_IO_RXCNTL_DPHY1_D1__RXPDNB_MASK 0x00000040L
+#define MC_IO_RXCNTL_DPHY1_D1__RXPDNB__SHIFT 0x00000006
+#define MC_IO_RXCNTL_DPHY1_D1__RX_PEAKSEL_MASK 0x000c0000L
+#define MC_IO_RXCNTL_DPHY1_D1__RX_PEAKSEL__SHIFT 0x00000012
+#define MC_IO_RXCNTL_DPHY1_D1__VREFCAL_MASK 0x00000f00L
+#define MC_IO_RXCNTL_DPHY1_D1__VREFCAL__SHIFT 0x00000008
+#define MC_IO_RXCNTL_DPHY1_D1__VREFCAL_STR_MASK 0x0000f000L
+#define MC_IO_RXCNTL_DPHY1_D1__VREFCAL_STR__SHIFT 0x0000000c
+#define MC_IO_RXCNTL_DPHY1_D1__VREFPDNB_MASK 0x00000008L
+#define MC_IO_RXCNTL_DPHY1_D1__VREFPDNB__SHIFT 0x00000003
+#define MC_IO_RXCNTL_DPHY1_D1__VREFSEL_MASK 0x00010000L
+#define MC_IO_RXCNTL_DPHY1_D1__VREFSEL__SHIFT 0x00000010
+#define MC_IO_TXCNTL_APHY_D0__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_APHY_D0__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_APHY_D0__CKE_BIT_MASK 0x40000000L
+#define MC_IO_TXCNTL_APHY_D0__CKE_BIT__SHIFT 0x0000001e
+#define MC_IO_TXCNTL_APHY_D0__CKE_SEL_MASK 0x80000000L
+#define MC_IO_TXCNTL_APHY_D0__CKE_SEL__SHIFT 0x0000001f
+#define MC_IO_TXCNTL_APHY_D0__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_APHY_D0__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_APHY_D0__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_APHY_D0__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_APHY_D0__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_APHY_D0__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_APHY_D0__NDRV_MASK 0x00700000L
+#define MC_IO_TXCNTL_APHY_D0__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_APHY_D0__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_APHY_D0__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_APHY_D0__PMA_LOOPBACK_MASK 0x0000e000L
+#define MC_IO_TXCNTL_APHY_D0__PMA_LOOPBACK__SHIFT 0x0000000d
+#define MC_IO_TXCNTL_APHY_D0__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_APHY_D0__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_APHY_D0__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_APHY_D0__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_APHY_D0__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_APHY_D0__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_APHY_D0__TXBPASS_SEL_MASK 0x00001000L
+#define MC_IO_TXCNTL_APHY_D0__TXBPASS_SEL__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_APHY_D0__TXBYPASS_DATA_MASK 0x38000000L
+#define MC_IO_TXCNTL_APHY_D0__TXBYPASS_DATA__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_APHY_D0__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_APHY_D0__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_APHY_D0__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_APHY_D0__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_APHY_D0__TXRESET_MASK 0x02000000L
+#define MC_IO_TXCNTL_APHY_D0__TXRESET__SHIFT 0x00000019
+#define MC_IO_TXCNTL_APHY_D0__YCLKON_MASK 0x00800000L
+#define MC_IO_TXCNTL_APHY_D0__YCLKON__SHIFT 0x00000017
+#define MC_IO_TXCNTL_APHY_D1__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_APHY_D1__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_APHY_D1__CKE_BIT_MASK 0x40000000L
+#define MC_IO_TXCNTL_APHY_D1__CKE_BIT__SHIFT 0x0000001e
+#define MC_IO_TXCNTL_APHY_D1__CKE_SEL_MASK 0x80000000L
+#define MC_IO_TXCNTL_APHY_D1__CKE_SEL__SHIFT 0x0000001f
+#define MC_IO_TXCNTL_APHY_D1__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_APHY_D1__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_APHY_D1__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_APHY_D1__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_APHY_D1__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_APHY_D1__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_APHY_D1__NDRV_MASK 0x00700000L
+#define MC_IO_TXCNTL_APHY_D1__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_APHY_D1__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_APHY_D1__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_APHY_D1__PMA_LOOPBACK_MASK 0x0000e000L
+#define MC_IO_TXCNTL_APHY_D1__PMA_LOOPBACK__SHIFT 0x0000000d
+#define MC_IO_TXCNTL_APHY_D1__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_APHY_D1__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_APHY_D1__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_APHY_D1__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_APHY_D1__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_APHY_D1__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_APHY_D1__TXBPASS_SEL_MASK 0x00001000L
+#define MC_IO_TXCNTL_APHY_D1__TXBPASS_SEL__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_APHY_D1__TXBYPASS_DATA_MASK 0x38000000L
+#define MC_IO_TXCNTL_APHY_D1__TXBYPASS_DATA__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_APHY_D1__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_APHY_D1__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_APHY_D1__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_APHY_D1__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_APHY_D1__TXRESET_MASK 0x02000000L
+#define MC_IO_TXCNTL_APHY_D1__TXRESET__SHIFT 0x00000019
+#define MC_IO_TXCNTL_APHY_D1__YCLKON_MASK 0x00800000L
+#define MC_IO_TXCNTL_APHY_D1__YCLKON__SHIFT 0x00000017
+#define MC_IO_TXCNTL_DPHY0_D0__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_DPHY0_D0__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_DPHY0_D0__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_DPHY0_D0__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_DPHY0_D0__EDCTX_CLKGATE_EN_MASK 0x02000000L
+#define MC_IO_TXCNTL_DPHY0_D0__EDCTX_CLKGATE_EN__SHIFT 0x00000019
+#define MC_IO_TXCNTL_DPHY0_D0__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_DPHY0_D0__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_DPHY0_D0__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_DPHY0_D0__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_DPHY0_D0__NDRV_MASK 0x00f00000L
+#define MC_IO_TXCNTL_DPHY0_D0__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_DPHY0_D0__NTERM_MASK 0x0000f000L
+#define MC_IO_TXCNTL_DPHY0_D0__NTERM__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_DPHY0_D0__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_DPHY0_D0__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_DPHY0_D0__PLL_LOOPBCK_MASK 0x08000000L
+#define MC_IO_TXCNTL_DPHY0_D0__PLL_LOOPBCK__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_DPHY0_D0__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_DPHY0_D0__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_DPHY0_D0__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_DPHY0_D0__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_DPHY0_D0__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_DPHY0_D0__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_DPHY0_D0__TXBYPASS_DATA_MASK 0xf0000000L
+#define MC_IO_TXCNTL_DPHY0_D0__TXBYPASS_DATA__SHIFT 0x0000001c
+#define MC_IO_TXCNTL_DPHY0_D0__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_DPHY0_D0__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_DPHY0_D0__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_DPHY0_D0__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_DPHY0_D1__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_DPHY0_D1__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_DPHY0_D1__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_DPHY0_D1__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_DPHY0_D1__EDCTX_CLKGATE_EN_MASK 0x02000000L
+#define MC_IO_TXCNTL_DPHY0_D1__EDCTX_CLKGATE_EN__SHIFT 0x00000019
+#define MC_IO_TXCNTL_DPHY0_D1__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_DPHY0_D1__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_DPHY0_D1__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_DPHY0_D1__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_DPHY0_D1__NDRV_MASK 0x00f00000L
+#define MC_IO_TXCNTL_DPHY0_D1__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_DPHY0_D1__NTERM_MASK 0x0000f000L
+#define MC_IO_TXCNTL_DPHY0_D1__NTERM__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_DPHY0_D1__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_DPHY0_D1__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_DPHY0_D1__PLL_LOOPBCK_MASK 0x08000000L
+#define MC_IO_TXCNTL_DPHY0_D1__PLL_LOOPBCK__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_DPHY0_D1__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_DPHY0_D1__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_DPHY0_D1__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_DPHY0_D1__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_DPHY0_D1__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_DPHY0_D1__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_DPHY0_D1__TXBYPASS_DATA_MASK 0xf0000000L
+#define MC_IO_TXCNTL_DPHY0_D1__TXBYPASS_DATA__SHIFT 0x0000001c
+#define MC_IO_TXCNTL_DPHY0_D1__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_DPHY0_D1__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_DPHY0_D1__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_DPHY0_D1__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_DPHY1_D0__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_DPHY1_D0__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_DPHY1_D0__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_DPHY1_D0__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_DPHY1_D0__EDCTX_CLKGATE_EN_MASK 0x02000000L
+#define MC_IO_TXCNTL_DPHY1_D0__EDCTX_CLKGATE_EN__SHIFT 0x00000019
+#define MC_IO_TXCNTL_DPHY1_D0__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_DPHY1_D0__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_DPHY1_D0__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_DPHY1_D0__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_DPHY1_D0__NDRV_MASK 0x00f00000L
+#define MC_IO_TXCNTL_DPHY1_D0__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_DPHY1_D0__NTERM_MASK 0x0000f000L
+#define MC_IO_TXCNTL_DPHY1_D0__NTERM__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_DPHY1_D0__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_DPHY1_D0__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_DPHY1_D0__PLL_LOOPBCK_MASK 0x08000000L
+#define MC_IO_TXCNTL_DPHY1_D0__PLL_LOOPBCK__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_DPHY1_D0__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_DPHY1_D0__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_DPHY1_D0__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_DPHY1_D0__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_DPHY1_D0__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_DPHY1_D0__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_DPHY1_D0__TXBYPASS_DATA_MASK 0xf0000000L
+#define MC_IO_TXCNTL_DPHY1_D0__TXBYPASS_DATA__SHIFT 0x0000001c
+#define MC_IO_TXCNTL_DPHY1_D0__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_DPHY1_D0__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_DPHY1_D0__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_DPHY1_D0__TXPD__SHIFT 0x00000007
+#define MC_IO_TXCNTL_DPHY1_D1__BIASSEL_MASK 0x00000003L
+#define MC_IO_TXCNTL_DPHY1_D1__BIASSEL__SHIFT 0x00000000
+#define MC_IO_TXCNTL_DPHY1_D1__DRVDUTY_MASK 0x0000000cL
+#define MC_IO_TXCNTL_DPHY1_D1__DRVDUTY__SHIFT 0x00000002
+#define MC_IO_TXCNTL_DPHY1_D1__EDCTX_CLKGATE_EN_MASK 0x02000000L
+#define MC_IO_TXCNTL_DPHY1_D1__EDCTX_CLKGATE_EN__SHIFT 0x00000019
+#define MC_IO_TXCNTL_DPHY1_D1__EMPH_MASK 0x00000040L
+#define MC_IO_TXCNTL_DPHY1_D1__EMPH__SHIFT 0x00000006
+#define MC_IO_TXCNTL_DPHY1_D1__LOWCMEN_MASK 0x00000010L
+#define MC_IO_TXCNTL_DPHY1_D1__LOWCMEN__SHIFT 0x00000004
+#define MC_IO_TXCNTL_DPHY1_D1__NDRV_MASK 0x00f00000L
+#define MC_IO_TXCNTL_DPHY1_D1__NDRV__SHIFT 0x00000014
+#define MC_IO_TXCNTL_DPHY1_D1__NTERM_MASK 0x0000f000L
+#define MC_IO_TXCNTL_DPHY1_D1__NTERM__SHIFT 0x0000000c
+#define MC_IO_TXCNTL_DPHY1_D1__PDRV_MASK 0x000f0000L
+#define MC_IO_TXCNTL_DPHY1_D1__PDRV__SHIFT 0x00000010
+#define MC_IO_TXCNTL_DPHY1_D1__PLL_LOOPBCK_MASK 0x08000000L
+#define MC_IO_TXCNTL_DPHY1_D1__PLL_LOOPBCK__SHIFT 0x0000001b
+#define MC_IO_TXCNTL_DPHY1_D1__PTERM_MASK 0x00000f00L
+#define MC_IO_TXCNTL_DPHY1_D1__PTERM__SHIFT 0x00000008
+#define MC_IO_TXCNTL_DPHY1_D1__QDR_MASK 0x00000020L
+#define MC_IO_TXCNTL_DPHY1_D1__QDR__SHIFT 0x00000005
+#define MC_IO_TXCNTL_DPHY1_D1__TSTEN_MASK 0x01000000L
+#define MC_IO_TXCNTL_DPHY1_D1__TSTEN__SHIFT 0x00000018
+#define MC_IO_TXCNTL_DPHY1_D1__TXBYPASS_DATA_MASK 0xf0000000L
+#define MC_IO_TXCNTL_DPHY1_D1__TXBYPASS_DATA__SHIFT 0x0000001c
+#define MC_IO_TXCNTL_DPHY1_D1__TXBYPASS_MASK 0x04000000L
+#define MC_IO_TXCNTL_DPHY1_D1__TXBYPASS__SHIFT 0x0000001a
+#define MC_IO_TXCNTL_DPHY1_D1__TXPD_MASK 0x00000080L
+#define MC_IO_TXCNTL_DPHY1_D1__TXPD__SHIFT 0x00000007
+#define MCLK_PWRMGT_CNTL__DLL_READY_MASK 0x00000040L
+#define MCLK_PWRMGT_CNTL__DLL_READY_READ_MASK 0x01000000L
+#define MCLK_PWRMGT_CNTL__DLL_READY_READ__SHIFT 0x00000018
+#define MCLK_PWRMGT_CNTL__DLL_READY__SHIFT 0x00000006
+#define MCLK_PWRMGT_CNTL__DLL_SPEED_MASK 0x0000001fL
+#define MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT 0x00000000
+#define MCLK_PWRMGT_CNTL__MC_INT_CNTL_MASK 0x00000080L
+#define MCLK_PWRMGT_CNTL__MC_INT_CNTL__SHIFT 0x00000007
+#define MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK 0x00000100L
+#define MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT 0x00000008
+#define MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK 0x00010000L
+#define MCLK_PWRMGT_CNTL__MRDCK0_RESET__SHIFT 0x00000010
+#define MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK 0x00000200L
+#define MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT 0x00000009
+#define MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK 0x00020000L
+#define MCLK_PWRMGT_CNTL__MRDCK1_RESET__SHIFT 0x00000011
+#define MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000fc0L
+#define MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x00000006
+#define MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003fL
+#define MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x00000000
+#define MC_NPL_STATUS__D0_NDELAY_MASK 0x0000000cL
+#define MC_NPL_STATUS__D0_NDELAY__SHIFT 0x00000002
+#define MC_NPL_STATUS__D0_NEARLY_MASK 0x00000020L
+#define MC_NPL_STATUS__D0_NEARLY__SHIFT 0x00000005
+#define MC_NPL_STATUS__D0_PDELAY_MASK 0x00000003L
+#define MC_NPL_STATUS__D0_PDELAY__SHIFT 0x00000000
+#define MC_NPL_STATUS__D0_PEARLY_MASK 0x00000010L
+#define MC_NPL_STATUS__D0_PEARLY__SHIFT 0x00000004
+#define MC_NPL_STATUS__D1_NDELAY_MASK 0x00000300L
+#define MC_NPL_STATUS__D1_NDELAY__SHIFT 0x00000008
+#define MC_NPL_STATUS__D1_NEARLY_MASK 0x00000800L
+#define MC_NPL_STATUS__D1_NEARLY__SHIFT 0x0000000b
+#define MC_NPL_STATUS__D1_PDELAY_MASK 0x000000c0L
+#define MC_NPL_STATUS__D1_PDELAY__SHIFT 0x00000006
+#define MC_NPL_STATUS__D1_PEARLY_MASK 0x00000400L
+#define MC_NPL_STATUS__D1_PEARLY__SHIFT 0x0000000a
+#define MC_PHY_TIMING_2__ADR_CLKEN_D0_MASK 0x00040000L
+#define MC_PHY_TIMING_2__ADR_CLKEN_D0__SHIFT 0x00000012
+#define MC_PHY_TIMING_2__ADR_CLKEN_D1_MASK 0x00080000L
+#define MC_PHY_TIMING_2__ADR_CLKEN_D1__SHIFT 0x00000013
+#define MC_PHY_TIMING_2__IND_LD_CNT_MASK 0x0000007fL
+#define MC_PHY_TIMING_2__IND_LD_CNT__SHIFT 0x00000000
+#define MC_PHY_TIMING_2__RXC0_FRC_MASK 0x00001000L
+#define MC_PHY_TIMING_2__RXC0_FRC__SHIFT 0x0000000c
+#define MC_PHY_TIMING_2__RXC0_INV_MASK 0x00000100L
+#define MC_PHY_TIMING_2__RXC0_INV__SHIFT 0x00000008
+#define MC_PHY_TIMING_2__RXC1_FRC_MASK 0x00002000L
+#define MC_PHY_TIMING_2__RXC1_FRC__SHIFT 0x0000000d
+#define MC_PHY_TIMING_2__RXC1_INV_MASK 0x00000200L
+#define MC_PHY_TIMING_2__RXC1_INV__SHIFT 0x00000009
+#define MC_PHY_TIMING_2__TXC0_FRC_MASK 0x00004000L
+#define MC_PHY_TIMING_2__TXC0_FRC__SHIFT 0x0000000e
+#define MC_PHY_TIMING_2__TXC0_INV_MASK 0x00000400L
+#define MC_PHY_TIMING_2__TXC0_INV__SHIFT 0x0000000a
+#define MC_PHY_TIMING_2__TXC1_FRC_MASK 0x00008000L
+#define MC_PHY_TIMING_2__TXC1_FRC__SHIFT 0x0000000f
+#define MC_PHY_TIMING_2__TXC1_INV_MASK 0x00000800L
+#define MC_PHY_TIMING_2__TXC1_INV__SHIFT 0x0000000b
+#define MC_PHY_TIMING_2__TX_CDREN_D0_MASK 0x00010000L
+#define MC_PHY_TIMING_2__TX_CDREN_D0__SHIFT 0x00000010
+#define MC_PHY_TIMING_2__TX_CDREN_D1_MASK 0x00020000L
+#define MC_PHY_TIMING_2__TX_CDREN_D1__SHIFT 0x00000011
+#define MC_PHY_TIMING_2__WR_DLY_MASK 0x00f00000L
+#define MC_PHY_TIMING_2__WR_DLY__SHIFT 0x00000014
+#define MC_PHY_TIMING_D0__RXC0_DLY_MASK 0x0000000fL
+#define MC_PHY_TIMING_D0__RXC0_DLY__SHIFT 0x00000000
+#define MC_PHY_TIMING_D0__RXC0_EXT_MASK 0x000000f0L
+#define MC_PHY_TIMING_D0__RXC0_EXT__SHIFT 0x00000004
+#define MC_PHY_TIMING_D0__RXC1_DLY_MASK 0x00000f00L
+#define MC_PHY_TIMING_D0__RXC1_DLY__SHIFT 0x00000008
+#define MC_PHY_TIMING_D0__RXC1_EXT_MASK 0x0000f000L
+#define MC_PHY_TIMING_D0__RXC1_EXT__SHIFT 0x0000000c
+#define MC_PHY_TIMING_D0__TXC0_DLY_MASK 0x00070000L
+#define MC_PHY_TIMING_D0__TXC0_DLY__SHIFT 0x00000010
+#define MC_PHY_TIMING_D0__TXC0_EXT_MASK 0x00f00000L
+#define MC_PHY_TIMING_D0__TXC0_EXT__SHIFT 0x00000014
+#define MC_PHY_TIMING_D0__TXC1_DLY_MASK 0x07000000L
+#define MC_PHY_TIMING_D0__TXC1_DLY__SHIFT 0x00000018
+#define MC_PHY_TIMING_D0__TXC1_EXT_MASK 0xf0000000L
+#define MC_PHY_TIMING_D0__TXC1_EXT__SHIFT 0x0000001c
+#define MC_PHY_TIMING_D1__RXC0_DLY_MASK 0x0000000fL
+#define MC_PHY_TIMING_D1__RXC0_DLY__SHIFT 0x00000000
+#define MC_PHY_TIMING_D1__RXC0_EXT_MASK 0x000000f0L
+#define MC_PHY_TIMING_D1__RXC0_EXT__SHIFT 0x00000004
+#define MC_PHY_TIMING_D1__RXC1_DLY_MASK 0x00000f00L
+#define MC_PHY_TIMING_D1__RXC1_DLY__SHIFT 0x00000008
+#define MC_PHY_TIMING_D1__RXC1_EXT_MASK 0x0000f000L
+#define MC_PHY_TIMING_D1__RXC1_EXT__SHIFT 0x0000000c
+#define MC_PHY_TIMING_D1__TXC0_DLY_MASK 0x00070000L
+#define MC_PHY_TIMING_D1__TXC0_DLY__SHIFT 0x00000010
+#define MC_PHY_TIMING_D1__TXC0_EXT_MASK 0x00f00000L
+#define MC_PHY_TIMING_D1__TXC0_EXT__SHIFT 0x00000014
+#define MC_PHY_TIMING_D1__TXC1_DLY_MASK 0x07000000L
+#define MC_PHY_TIMING_D1__TXC1_DLY__SHIFT 0x00000018
+#define MC_PHY_TIMING_D1__TXC1_EXT_MASK 0xf0000000L
+#define MC_PHY_TIMING_D1__TXC1_EXT__SHIFT 0x0000001c
+#define MC_PMG_AUTO_CFG__DLL_CNT_MASK 0xff000000L
+#define MC_PMG_AUTO_CFG__DLL_CNT__SHIFT 0x00000018
+#define MC_PMG_AUTO_CFG__EXIT_ALLOW_STOP_MASK 0x00000800L
+#define MC_PMG_AUTO_CFG__EXIT_ALLOW_STOP__SHIFT 0x0000000b
+#define MC_PMG_AUTO_CFG__MRS_WAIT_CNT_MASK 0x000f0000L
+#define MC_PMG_AUTO_CFG__MRS_WAIT_CNT__SHIFT 0x00000010
+#define MC_PMG_AUTO_CFG__PREA_SRX_MASK 0x00002000L
+#define MC_PMG_AUTO_CFG__PREA_SRX__SHIFT 0x0000000d
+#define MC_PMG_AUTO_CFG__RFS_SRX_MASK 0x00001000L
+#define MC_PMG_AUTO_CFG__RFS_SRX__SHIFT 0x0000000c
+#define MC_PMG_AUTO_CFG__RST_MRS_MASK 0x00000002L
+#define MC_PMG_AUTO_CFG__RST_MRS__SHIFT 0x00000001
+#define MC_PMG_AUTO_CFG__RXPDNB_MASK 0x00400000L
+#define MC_PMG_AUTO_CFG__RXPDNB__SHIFT 0x00000016
+#define MC_PMG_AUTO_CFG__SCDS_MODE_MASK 0x00000400L
+#define MC_PMG_AUTO_CFG__SCDS_MODE__SHIFT 0x0000000a
+#define MC_PMG_AUTO_CFG__SELFREFR_COMMIT_0_MASK 0x00008000L
+#define MC_PMG_AUTO_CFG__SELFREFR_COMMIT_0__SHIFT 0x0000000f
+#define MC_PMG_AUTO_CFG__SELFREFR_COMMIT_1_MASK 0x00800000L
+#define MC_PMG_AUTO_CFG__SELFREFR_COMMIT_1__SHIFT 0x00000017
+#define MC_PMG_AUTO_CFG__SS_ALWAYS_SLF_MASK 0x00000100L
+#define MC_PMG_AUTO_CFG__SS_ALWAYS_SLF__SHIFT 0x00000008
+#define MC_PMG_AUTO_CFG__SS_S_SLF_MASK 0x00000200L
+#define MC_PMG_AUTO_CFG__SS_S_SLF__SHIFT 0x00000009
+#define MC_PMG_AUTO_CFG__STUTTER_EN_MASK 0x00004000L
+#define MC_PMG_AUTO_CFG__STUTTER_EN__SHIFT 0x0000000e
+#define MC_PMG_AUTO_CFG__SYC_CLK_MASK 0x00000001L
+#define MC_PMG_AUTO_CFG__SYC_CLK__SHIFT 0x00000000
+#define MC_PMG_AUTO_CFG__TRI_MIO_MASK 0x00000004L
+#define MC_PMG_AUTO_CFG__TRI_MIO__SHIFT 0x00000002
+#define MC_PMG_AUTO_CFG__WRITE_DURING_DLOCK_MASK 0x00100000L
+#define MC_PMG_AUTO_CFG__WRITE_DURING_DLOCK__SHIFT 0x00000014
+#define MC_PMG_AUTO_CFG__XSR_TMR_MASK 0x000000f0L
+#define MC_PMG_AUTO_CFG__XSR_TMR__SHIFT 0x00000004
+#define MC_PMG_AUTO_CFG__YCLK_ON_MASK 0x00200000L
+#define MC_PMG_AUTO_CFG__YCLK_ON__SHIFT 0x00000015
+#define MC_PMG_AUTO_CMD__ADR_MASK 0x0001ffffL
+#define MC_PMG_AUTO_CMD__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_AUTO_CMD__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_AUTO_CMD__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_AUTO_CMD__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_AUTO_CMD__ADR__SHIFT 0x00000000
+#define MC_PMG_CFG__DPM_WAKE_MASK 0x00000400L
+#define MC_PMG_CFG__DPM_WAKE__SHIFT 0x0000000a
+#define MC_PMG_CFG__EARLY_ACK_ACPI_MASK 0x00400000L
+#define MC_PMG_CFG__EARLY_ACK_ACPI__SHIFT 0x00000016
+#define MC_PMG_CFG__MRS_WAIT_CNT_MASK 0x000f0000L
+#define MC_PMG_CFG__MRS_WAIT_CNT__SHIFT 0x00000010
+#define MC_PMG_CFG__PREA_SRX_MASK 0x00002000L
+#define MC_PMG_CFG__PREA_SRX__SHIFT 0x0000000d
+#define MC_PMG_CFG__RFS_SRX_MASK 0x00001000L
+#define MC_PMG_CFG__RFS_SRX__SHIFT 0x0000000c
+#define MC_PMG_CFG__RST_EMRS_MASK 0x00000004L
+#define MC_PMG_CFG__RST_EMRS__SHIFT 0x00000002
+#define MC_PMG_CFG__RST_MRS1_MASK 0x00000100L
+#define MC_PMG_CFG__RST_MRS1__SHIFT 0x00000008
+#define MC_PMG_CFG__RST_MRS2_MASK 0x00000200L
+#define MC_PMG_CFG__RST_MRS2__SHIFT 0x00000009
+#define MC_PMG_CFG__RST_MRS_MASK 0x00000002L
+#define MC_PMG_CFG__RST_MRS__SHIFT 0x00000001
+#define MC_PMG_CFG__RXPDNB_MASK 0x02000000L
+#define MC_PMG_CFG__RXPDNB__SHIFT 0x00000019
+#define MC_PMG_CFG__SYC_CLK_MASK 0x00000001L
+#define MC_PMG_CFG__SYC_CLK__SHIFT 0x00000000
+#define MC_PMG_CFG__TRI_MIO_MASK 0x00000008L
+#define MC_PMG_CFG__TRI_MIO__SHIFT 0x00000003
+#define MC_PMG_CFG__WRITE_DURING_DLOCK_MASK 0x00100000L
+#define MC_PMG_CFG__WRITE_DURING_DLOCK__SHIFT 0x00000014
+#define MC_PMG_CFG__XSR_TMR_MASK 0x000000f0L
+#define MC_PMG_CFG__XSR_TMR__SHIFT 0x00000004
+#define MC_PMG_CFG__YCLK_ON_MASK 0x00200000L
+#define MC_PMG_CFG__YCLK_ON__SHIFT 0x00000015
+#define MC_PMG_CFG__ZQCL_SEND_MASK 0x0c000000L
+#define MC_PMG_CFG__ZQCL_SEND__SHIFT 0x0000001a
+#define MC_PMG_CMD_EMRS__ADR_MASK 0x0000ffffL
+#define MC_PMG_CMD_EMRS__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_CMD_EMRS__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_CMD_EMRS__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_CMD_EMRS__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_CMD_EMRS__ADR__SHIFT 0x00000000
+#define MC_PMG_CMD_EMRS__BNK_MSB_MASK 0x00080000L
+#define MC_PMG_CMD_EMRS__BNK_MSB__SHIFT 0x00000013
+#define MC_PMG_CMD_EMRS__CSB_MASK 0x00600000L
+#define MC_PMG_CMD_EMRS__CSB__SHIFT 0x00000015
+#define MC_PMG_CMD_EMRS__END_MASK 0x00100000L
+#define MC_PMG_CMD_EMRS__END__SHIFT 0x00000014
+#define MC_PMG_CMD_EMRS__MOP_MASK 0x00070000L
+#define MC_PMG_CMD_EMRS__MOP__SHIFT 0x00000010
+#define MC_PMG_CMD_MRS1__ADR_MASK 0x0000ffffL
+#define MC_PMG_CMD_MRS1__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_CMD_MRS1__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_CMD_MRS1__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_CMD_MRS1__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_CMD_MRS1__ADR__SHIFT 0x00000000
+#define MC_PMG_CMD_MRS1__BNK_MSB_MASK 0x00080000L
+#define MC_PMG_CMD_MRS1__BNK_MSB__SHIFT 0x00000013
+#define MC_PMG_CMD_MRS1__CSB_MASK 0x00600000L
+#define MC_PMG_CMD_MRS1__CSB__SHIFT 0x00000015
+#define MC_PMG_CMD_MRS1__END_MASK 0x00100000L
+#define MC_PMG_CMD_MRS1__END__SHIFT 0x00000014
+#define MC_PMG_CMD_MRS1__MOP_MASK 0x00070000L
+#define MC_PMG_CMD_MRS1__MOP__SHIFT 0x00000010
+#define MC_PMG_CMD_MRS2__ADR_MASK 0x0000ffffL
+#define MC_PMG_CMD_MRS2__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_CMD_MRS2__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_CMD_MRS2__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_CMD_MRS2__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_CMD_MRS2__ADR__SHIFT 0x00000000
+#define MC_PMG_CMD_MRS2__BNK_MSB_MASK 0x00080000L
+#define MC_PMG_CMD_MRS2__BNK_MSB__SHIFT 0x00000013
+#define MC_PMG_CMD_MRS2__CSB_MASK 0x00600000L
+#define MC_PMG_CMD_MRS2__CSB__SHIFT 0x00000015
+#define MC_PMG_CMD_MRS2__END_MASK 0x00100000L
+#define MC_PMG_CMD_MRS2__END__SHIFT 0x00000014
+#define MC_PMG_CMD_MRS2__MOP_MASK 0x00070000L
+#define MC_PMG_CMD_MRS2__MOP__SHIFT 0x00000010
+#define MC_PMG_CMD_MRS__ADR_MASK 0x0000ffffL
+#define MC_PMG_CMD_MRS__ADR_MSB0_MASK 0x20000000L
+#define MC_PMG_CMD_MRS__ADR_MSB0__SHIFT 0x0000001d
+#define MC_PMG_CMD_MRS__ADR_MSB1_MASK 0x10000000L
+#define MC_PMG_CMD_MRS__ADR_MSB1__SHIFT 0x0000001c
+#define MC_PMG_CMD_MRS__ADR__SHIFT 0x00000000
+#define MC_PMG_CMD_MRS__BNK_MSB_MASK 0x00080000L
+#define MC_PMG_CMD_MRS__BNK_MSB__SHIFT 0x00000013
+#define MC_PMG_CMD_MRS__CSB_MASK 0x00600000L
+#define MC_PMG_CMD_MRS__CSB__SHIFT 0x00000015
+#define MC_PMG_CMD_MRS__END_MASK 0x00100000L
+#define MC_PMG_CMD_MRS__END__SHIFT 0x00000014
+#define MC_PMG_CMD_MRS__MOP_MASK 0x00070000L
+#define MC_PMG_CMD_MRS__MOP__SHIFT 0x00000010
+#define MC_RD_CB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_CB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_CB__ENABLE_MASK 0x00000001L
+#define MC_RD_CB__ENABLE__SHIFT 0x00000000
+#define MC_RD_CB__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_CB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_CB__MAX_BURST_MASK 0x00000780L
+#define MC_RD_CB__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_CB__PRESCALE_MASK 0x00000006L
+#define MC_RD_CB__PRESCALE__SHIFT 0x00000001
+#define MC_RD_CB__STALL_MODE_MASK 0x00000030L
+#define MC_RD_CB__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_CB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_CB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_CB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_CB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RD_DB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_DB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_DB__ENABLE_MASK 0x00000001L
+#define MC_RD_DB__ENABLE__SHIFT 0x00000000
+#define MC_RD_DB__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_DB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_DB__MAX_BURST_MASK 0x00000780L
+#define MC_RD_DB__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_DB__PRESCALE_MASK 0x00000006L
+#define MC_RD_DB__PRESCALE__SHIFT 0x00000001
+#define MC_RD_DB__STALL_MODE_MASK 0x00000030L
+#define MC_RD_DB__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_DB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_DB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_DB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_DB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RD_GRP_EXT__DBSTEN0_MASK 0x0000000fL
+#define MC_RD_GRP_EXT__DBSTEN0__SHIFT 0x00000000
+#define MC_RD_GRP_EXT__TC0_MASK 0x000000f0L
+#define MC_RD_GRP_EXT__TC0__SHIFT 0x00000004
+#define MC_RD_GRP_GFX__CP_MASK 0x0000000fL
+#define MC_RD_GRP_GFX__CP__SHIFT 0x00000000
+#define MC_RD_GRP_GFX__XDMAM_MASK 0x000f0000L
+#define MC_RD_GRP_GFX__XDMAM__SHIFT 0x00000010
+#define MC_RD_GRP_LCL__CB0_MASK 0x0000f000L
+#define MC_RD_GRP_LCL__CB0__SHIFT 0x0000000c
+#define MC_RD_GRP_LCL__CBCMASK0_MASK 0x000f0000L
+#define MC_RD_GRP_LCL__CBCMASK0__SHIFT 0x00000010
+#define MC_RD_GRP_LCL__CBFMASK0_MASK 0x00f00000L
+#define MC_RD_GRP_LCL__CBFMASK0__SHIFT 0x00000014
+#define MC_RD_GRP_LCL__DB0_MASK 0x0f000000L
+#define MC_RD_GRP_LCL__DB0__SHIFT 0x00000018
+#define MC_RD_GRP_LCL__DBHTILE0_MASK 0xf0000000L
+#define MC_RD_GRP_LCL__DBHTILE0__SHIFT 0x0000001c
+#define MC_RD_GRP_OTH__HDP_MASK 0x00000f00L
+#define MC_RD_GRP_OTH__HDP__SHIFT 0x00000008
+#define MC_RD_GRP_OTH__SEM_MASK 0x0000f000L
+#define MC_RD_GRP_OTH__SEM__SHIFT 0x0000000c
+#define MC_RD_GRP_OTH__UMC_MASK 0x000f0000L
+#define MC_RD_GRP_OTH__UMC__SHIFT 0x00000010
+#define MC_RD_GRP_OTH__UVD_EXT0_MASK 0x0000000fL
+#define MC_RD_GRP_OTH__UVD_EXT0__SHIFT 0x00000000
+#define MC_RD_GRP_OTH__UVD_EXT1_MASK 0x0f000000L
+#define MC_RD_GRP_OTH__UVD_EXT1__SHIFT 0x00000018
+#define MC_RD_GRP_OTH__UVD_MASK 0x00f00000L
+#define MC_RD_GRP_OTH__UVD__SHIFT 0x00000014
+#define MC_RD_GRP_SYS__DMIF_MASK 0x0000f000L
+#define MC_RD_GRP_SYS__DMIF__SHIFT 0x0000000c
+#define MC_RD_GRP_SYS__MCIF_MASK 0x000f0000L
+#define MC_RD_GRP_SYS__MCIF__SHIFT 0x00000010
+#define MC_RD_GRP_SYS__RLC_MASK 0x0000000fL
+#define MC_RD_GRP_SYS__RLC__SHIFT 0x00000000
+#define MC_RD_GRP_SYS__SMU_MASK 0x00f00000L
+#define MC_RD_GRP_SYS__SMU__SHIFT 0x00000014
+#define MC_RD_GRP_SYS__VCE_MASK 0x0f000000L
+#define MC_RD_GRP_SYS__VCE__SHIFT 0x00000018
+#define MC_RD_GRP_SYS__VCEU_MASK 0xf0000000L
+#define MC_RD_GRP_SYS__VCEU__SHIFT 0x0000001c
+#define MC_RD_GRP_SYS__VMC_MASK 0x000000f0L
+#define MC_RD_GRP_SYS__VMC__SHIFT 0x00000004
+#define MC_RD_HUB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_HUB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_HUB__ENABLE_MASK 0x00000001L
+#define MC_RD_HUB__ENABLE__SHIFT 0x00000000
+#define MC_RD_HUB__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_HUB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_HUB__MAX_BURST_MASK 0x00000780L
+#define MC_RD_HUB__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_HUB__PRESCALE_MASK 0x00000006L
+#define MC_RD_HUB__PRESCALE__SHIFT 0x00000001
+#define MC_RD_HUB__STALL_MODE_MASK 0x00000030L
+#define MC_RD_HUB__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_HUB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_HUB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_HUB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_HUB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RD_TC0__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_TC0__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_TC0__ENABLE_MASK 0x00000001L
+#define MC_RD_TC0__ENABLE__SHIFT 0x00000000
+#define MC_RD_TC0__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_TC0__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_TC0__MAX_BURST_MASK 0x00000780L
+#define MC_RD_TC0__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_TC0__PRESCALE_MASK 0x00000006L
+#define MC_RD_TC0__PRESCALE__SHIFT 0x00000001
+#define MC_RD_TC0__STALL_MODE_MASK 0x00000030L
+#define MC_RD_TC0__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_TC0__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_TC0__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_TC0__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_TC0__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RD_TC1__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_RD_TC1__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_RD_TC1__ENABLE_MASK 0x00000001L
+#define MC_RD_TC1__ENABLE__SHIFT 0x00000000
+#define MC_RD_TC1__LAZY_TIMER_MASK 0x00007800L
+#define MC_RD_TC1__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_RD_TC1__MAX_BURST_MASK 0x00000780L
+#define MC_RD_TC1__MAX_BURST__SHIFT 0x00000007
+#define MC_RD_TC1__PRESCALE_MASK 0x00000006L
+#define MC_RD_TC1__PRESCALE__SHIFT 0x00000001
+#define MC_RD_TC1__STALL_MODE_MASK 0x00000030L
+#define MC_RD_TC1__STALL_MODE__SHIFT 0x00000004
+#define MC_RD_TC1__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_RD_TC1__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_RD_TC1__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_RD_TC1__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_RPB_ARB_CNTL__ATC_SWITCH_NUM_MASK 0x00ff0000L
+#define MC_RPB_ARB_CNTL__ATC_SWITCH_NUM__SHIFT 0x00000010
+#define MC_RPB_ARB_CNTL__RD_SWITCH_NUM_MASK 0x0000ff00L
+#define MC_RPB_ARB_CNTL__RD_SWITCH_NUM__SHIFT 0x00000008
+#define MC_RPB_ARB_CNTL__WR_SWITCH_NUM_MASK 0x000000ffL
+#define MC_RPB_ARB_CNTL__WR_SWITCH_NUM__SHIFT 0x00000000
+#define MC_RPB_BIF_CNTL__ARB_SWITCH_NUM_MASK 0x000000ffL
+#define MC_RPB_BIF_CNTL__ARB_SWITCH_NUM__SHIFT 0x00000000
+#define MC_RPB_BIF_CNTL__XPB_SWITCH_NUM_MASK 0x0000ff00L
+#define MC_RPB_BIF_CNTL__XPB_SWITCH_NUM__SHIFT 0x00000008
+#define MC_RPB_CID_QUEUE_EX_DATA__READ_ENTRIES_MASK 0xffff0000L
+#define MC_RPB_CID_QUEUE_EX_DATA__READ_ENTRIES__SHIFT 0x00000010
+#define MC_RPB_CID_QUEUE_EX_DATA__WRITE_ENTRIES_MASK 0x0000ffffL
+#define MC_RPB_CID_QUEUE_EX_DATA__WRITE_ENTRIES__SHIFT 0x00000000
+#define MC_RPB_CID_QUEUE_EX__OFFSET_MASK 0x0000003eL
+#define MC_RPB_CID_QUEUE_EX__OFFSET__SHIFT 0x00000001
+#define MC_RPB_CID_QUEUE_EX__START_MASK 0x00000001L
+#define MC_RPB_CID_QUEUE_EX__START__SHIFT 0x00000000
+#define MC_RPB_CID_QUEUE_RD__CLIENT_ID_MASK 0x000000ffL
+#define MC_RPB_CID_QUEUE_RD__CLIENT_ID__SHIFT 0x00000000
+#define MC_RPB_CID_QUEUE_RD__READ_QUEUE_MASK 0x00000c00L
+#define MC_RPB_CID_QUEUE_RD__READ_QUEUE__SHIFT 0x0000000a
+#define MC_RPB_CID_QUEUE_RD__WRITE_QUEUE_MASK 0x00000300L
+#define MC_RPB_CID_QUEUE_RD__WRITE_QUEUE__SHIFT 0x00000008
+#define MC_RPB_CID_QUEUE_WR__CLIENT_ID_MASK 0x000000ffL
+#define MC_RPB_CID_QUEUE_WR__CLIENT_ID__SHIFT 0x00000000
+#define MC_RPB_CID_QUEUE_WR__READ_QUEUE_MASK 0x00001800L
+#define MC_RPB_CID_QUEUE_WR__READ_QUEUE__SHIFT 0x0000000b
+#define MC_RPB_CID_QUEUE_WR__UPDATE_MASK 0x00002000L
+#define MC_RPB_CID_QUEUE_WR__UPDATE_MODE_MASK 0x00000100L
+#define MC_RPB_CID_QUEUE_WR__UPDATE_MODE__SHIFT 0x00000008
+#define MC_RPB_CID_QUEUE_WR__UPDATE__SHIFT 0x0000000d
+#define MC_RPB_CID_QUEUE_WR__WRITE_QUEUE_MASK 0x00000600L
+#define MC_RPB_CID_QUEUE_WR__WRITE_QUEUE__SHIFT 0x00000009
+#define MC_RPB_CONF__RPB_RD_PCIE_ORDER_MASK 0x00010000L
+#define MC_RPB_CONF__RPB_RD_PCIE_ORDER__SHIFT 0x00000010
+#define MC_RPB_CONF__RPB_WR_PCIE_ORDER_MASK 0x00020000L
+#define MC_RPB_CONF__RPB_WR_PCIE_ORDER__SHIFT 0x00000011
+#define MC_RPB_CONF__XPB_PCIE_ORDER_MASK 0x00008000L
+#define MC_RPB_CONF__XPB_PCIE_ORDER__SHIFT 0x0000000f
+#define MC_RPB_DBG1__DEBUG_BITS_MASK 0xfff00000L
+#define MC_RPB_DBG1__DEBUG_BITS__SHIFT 0x00000014
+#define MC_RPB_DBG1__RPB_BIF_OUTSTANDING_RD_32B_MASK 0x000fff00L
+#define MC_RPB_DBG1__RPB_BIF_OUTSTANDING_RD_32B__SHIFT 0x00000008
+#define MC_RPB_DBG1__RPB_BIF_OUTSTANDING_RD_MASK 0x000000ffL
+#define MC_RPB_DBG1__RPB_BIF_OUTSTANDING_RD__SHIFT 0x00000000
+#define MC_RPB_EFF_CNTL__RD_LAZY_TIMER_MASK 0x0000ff00L
+#define MC_RPB_EFF_CNTL__RD_LAZY_TIMER__SHIFT 0x00000008
+#define MC_RPB_EFF_CNTL__WR_LAZY_TIMER_MASK 0x000000ffL
+#define MC_RPB_EFF_CNTL__WR_LAZY_TIMER__SHIFT 0x00000000
+#define MC_RPB_IF_CONF__OUTSTANDING_WRRET_ASK_MASK 0x0000ff00L
+#define MC_RPB_IF_CONF__OUTSTANDING_WRRET_ASK__SHIFT 0x00000008
+#define MC_RPB_IF_CONF__RPB_BIF_CREDITS_MASK 0x000000ffL
+#define MC_RPB_IF_CONF__RPB_BIF_CREDITS__SHIFT 0x00000000
+#define MC_RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS_MASK 0x00000008L
+#define MC_RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS__SHIFT 0x00000003
+#define MC_RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER_MASK 0x00000004L
+#define MC_RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER__SHIFT 0x00000002
+#define MC_RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS_MASK 0x000001e0L
+#define MC_RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS__SHIFT 0x00000005
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0_MASK 0x00003e00L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0__SHIFT 0x00000009
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1_MASK 0x0007c000L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1__SHIFT 0x0000000e
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2_MASK 0x00f80000L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2__SHIFT 0x00000013
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3_MASK 0x1f000000L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3__SHIFT 0x00000018
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT_MASK 0x00000003L
+#define MC_RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT__SHIFT 0x00000000
+#define MC_RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION_MASK 0x00000010L
+#define MC_RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION__SHIFT 0x00000004
+#define MC_RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE_MASK 0xffffffffL
+#define MC_RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE__SHIFT 0x00000000
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE0_SWITCH_NUM_MASK 0x000000ffL
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE0_SWITCH_NUM__SHIFT 0x00000000
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE1_SWITCH_NUM_MASK 0x0000ff00L
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE1_SWITCH_NUM__SHIFT 0x00000008
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE2_SWITCH_NUM_MASK 0x00ff0000L
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE2_SWITCH_NUM__SHIFT 0x00000010
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE3_SWITCH_NUM_MASK 0xff000000L
+#define MC_RPB_RD_SWITCH_CNTL__QUEUE3_SWITCH_NUM__SHIFT 0x00000018
+#define MC_RPB_WR_COMBINE_CNTL__WC_ALIGN_MASK 0x00000080L
+#define MC_RPB_WR_COMBINE_CNTL__WC_ALIGN__SHIFT 0x00000007
+#define MC_RPB_WR_COMBINE_CNTL__WC_ENABLE_MASK 0x00000001L
+#define MC_RPB_WR_COMBINE_CNTL__WC_ENABLE__SHIFT 0x00000000
+#define MC_RPB_WR_COMBINE_CNTL__WC_FLUSH_TIMER_MASK 0x00000078L
+#define MC_RPB_WR_COMBINE_CNTL__WC_FLUSH_TIMER__SHIFT 0x00000003
+#define MC_RPB_WR_COMBINE_CNTL__WC_MAX_PACKET_SIZE_MASK 0x00000006L
+#define MC_RPB_WR_COMBINE_CNTL__WC_MAX_PACKET_SIZE__SHIFT 0x00000001
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE0_SWITCH_NUM_MASK 0x000000ffL
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE0_SWITCH_NUM__SHIFT 0x00000000
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE1_SWITCH_NUM_MASK 0x0000ff00L
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE1_SWITCH_NUM__SHIFT 0x00000008
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE2_SWITCH_NUM_MASK 0x00ff0000L
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE2_SWITCH_NUM__SHIFT 0x00000010
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE3_SWITCH_NUM_MASK 0xff000000L
+#define MC_RPB_WR_SWITCH_CNTL__QUEUE3_SWITCH_NUM__SHIFT 0x00000018
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B0_D0__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B0_D1__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B1_D0__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B1_D1__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B2_D0__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B2_D1__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B3_D0__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT0_MASK 0x00000007L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT0__SHIFT 0x00000000
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT1_MASK 0x00000038L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT1__SHIFT 0x00000003
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT2_MASK 0x000001c0L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT2__SHIFT 0x00000006
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT3_MASK 0x00000e00L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT3__SHIFT 0x00000009
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT4_MASK 0x00007000L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT4__SHIFT 0x0000000c
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT5_MASK 0x00038000L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT5__SHIFT 0x0000000f
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT6_MASK 0x001c0000L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT6__SHIFT 0x00000012
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT7_MASK 0x00e00000L
+#define MC_SEQ_BIT_REMAP_B3_D1__BIT7__SHIFT 0x00000015
+#define MC_SEQ_BYTE_REMAP_D0__BYTE0_MASK 0x00000003L
+#define MC_SEQ_BYTE_REMAP_D0__BYTE0__SHIFT 0x00000000
+#define MC_SEQ_BYTE_REMAP_D0__BYTE1_MASK 0x0000000cL
+#define MC_SEQ_BYTE_REMAP_D0__BYTE1__SHIFT 0x00000002
+#define MC_SEQ_BYTE_REMAP_D0__BYTE2_MASK 0x00000030L
+#define MC_SEQ_BYTE_REMAP_D0__BYTE2__SHIFT 0x00000004
+#define MC_SEQ_BYTE_REMAP_D0__BYTE3_MASK 0x000000c0L
+#define MC_SEQ_BYTE_REMAP_D0__BYTE3__SHIFT 0x00000006
+#define MC_SEQ_BYTE_REMAP_D1__BYTE0_MASK 0x00000003L
+#define MC_SEQ_BYTE_REMAP_D1__BYTE0__SHIFT 0x00000000
+#define MC_SEQ_BYTE_REMAP_D1__BYTE1_MASK 0x0000000cL
+#define MC_SEQ_BYTE_REMAP_D1__BYTE1__SHIFT 0x00000002
+#define MC_SEQ_BYTE_REMAP_D1__BYTE2_MASK 0x00000030L
+#define MC_SEQ_BYTE_REMAP_D1__BYTE2__SHIFT 0x00000004
+#define MC_SEQ_BYTE_REMAP_D1__BYTE3_MASK 0x000000c0L
+#define MC_SEQ_BYTE_REMAP_D1__BYTE3__SHIFT 0x00000006
+#define MC_SEQ_CAS_TIMING_LP__TCCDL_MASK 0x00000e00L
+#define MC_SEQ_CAS_TIMING_LP__TCCDL__SHIFT 0x00000009
+#define MC_SEQ_CAS_TIMING_LP__TCL_MASK 0x1f000000L
+#define MC_SEQ_CAS_TIMING_LP__TCL__SHIFT 0x00000018
+#define MC_SEQ_CAS_TIMING_LP__TNOPR_MASK 0x0000000cL
+#define MC_SEQ_CAS_TIMING_LP__TNOPR__SHIFT 0x00000002
+#define MC_SEQ_CAS_TIMING_LP__TNOPW_MASK 0x00000003L
+#define MC_SEQ_CAS_TIMING_LP__TNOPW__SHIFT 0x00000000
+#define MC_SEQ_CAS_TIMING_LP__TR2R_MASK 0x0000f000L
+#define MC_SEQ_CAS_TIMING_LP__TR2R__SHIFT 0x0000000c
+#define MC_SEQ_CAS_TIMING_LP__TR2W_MASK 0x000001f0L
+#define MC_SEQ_CAS_TIMING_LP__TR2W__SHIFT 0x00000004
+#define MC_SEQ_CAS_TIMING_LP__TW2R_MASK 0x001f0000L
+#define MC_SEQ_CAS_TIMING_LP__TW2R__SHIFT 0x00000010
+#define MC_SEQ_CAS_TIMING__TCCDL_MASK 0x00000e00L
+#define MC_SEQ_CAS_TIMING__TCCDL__SHIFT 0x00000009
+#define MC_SEQ_CAS_TIMING__TCL_MASK 0x1f000000L
+#define MC_SEQ_CAS_TIMING__TCL__SHIFT 0x00000018
+#define MC_SEQ_CAS_TIMING__TNOPR_MASK 0x0000000cL
+#define MC_SEQ_CAS_TIMING__TNOPR__SHIFT 0x00000002
+#define MC_SEQ_CAS_TIMING__TNOPW_MASK 0x00000003L
+#define MC_SEQ_CAS_TIMING__TNOPW__SHIFT 0x00000000
+#define MC_SEQ_CAS_TIMING__TR2R_MASK 0x0000f000L
+#define MC_SEQ_CAS_TIMING__TR2R__SHIFT 0x0000000c
+#define MC_SEQ_CAS_TIMING__TR2W_MASK 0x000001f0L
+#define MC_SEQ_CAS_TIMING__TR2W__SHIFT 0x00000004
+#define MC_SEQ_CAS_TIMING__TW2R_MASK 0x001f0000L
+#define MC_SEQ_CAS_TIMING__TW2R__SHIFT 0x00000010
+#define MC_SEQ_CG__CG_SEQ_REQ_MASK 0x000000ffL
+#define MC_SEQ_CG__CG_SEQ_REQ__SHIFT 0x00000000
+#define MC_SEQ_CG__CG_SEQ_RESP_MASK 0x0000ff00L
+#define MC_SEQ_CG__CG_SEQ_RESP__SHIFT 0x00000008
+#define MC_SEQ_CG__SEQ_CG_REQ_MASK 0x00ff0000L
+#define MC_SEQ_CG__SEQ_CG_REQ__SHIFT 0x00000010
+#define MC_SEQ_CG__SEQ_CG_RESP_MASK 0xff000000L
+#define MC_SEQ_CG__SEQ_CG_RESP__SHIFT 0x00000018
+#define MC_SEQ_CMD__ADR_MASK 0x0000ffffL
+#define MC_SEQ_CMD__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_CMD__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_CMD__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_CMD__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_CMD__ADR__SHIFT 0x00000000
+#define MC_SEQ_CMD__CHAN0_MASK 0x01000000L
+#define MC_SEQ_CMD__CHAN0__SHIFT 0x00000018
+#define MC_SEQ_CMD__CHAN1_MASK 0x02000000L
+#define MC_SEQ_CMD__CHAN1__SHIFT 0x00000019
+#define MC_SEQ_CMD__CSB_MASK 0x00600000L
+#define MC_SEQ_CMD__CSB__SHIFT 0x00000015
+#define MC_SEQ_CMD__END_MASK 0x00100000L
+#define MC_SEQ_CMD__END__SHIFT 0x00000014
+#define MC_SEQ_CMD__MOP_MASK 0x000f0000L
+#define MC_SEQ_CMD__MOP__SHIFT 0x00000010
+#define MC_SEQ_CNTL_2__ARB_RTDAT_WMK_MSB_MASK 0x00000300L
+#define MC_SEQ_CNTL_2__ARB_RTDAT_WMK_MSB__SHIFT 0x00000008
+#define MC_SEQ_CNTL_2__DRST_NSTR_MASK 0x0000fc00L
+#define MC_SEQ_CNTL_2__DRST_NSTR__SHIFT 0x0000000a
+#define MC_SEQ_CNTL_2__DRST_PSTR_MASK 0x003f0000L
+#define MC_SEQ_CNTL_2__DRST_PSTR__SHIFT 0x00000010
+#define MC_SEQ_CNTL_2__PLL_RX_PWRON_D0_MASK 0x0f000000L
+#define MC_SEQ_CNTL_2__PLL_RX_PWRON_D0__SHIFT 0x00000018
+#define MC_SEQ_CNTL_2__PLL_RX_PWRON_D1_MASK 0xf0000000L
+#define MC_SEQ_CNTL_2__PLL_RX_PWRON_D1__SHIFT 0x0000001c
+#define MC_SEQ_CNTL_2__PLL_TX_PWRON_D0_MASK 0x00400000L
+#define MC_SEQ_CNTL_2__PLL_TX_PWRON_D0__SHIFT 0x00000016
+#define MC_SEQ_CNTL_2__PLL_TX_PWRON_D1_MASK 0x00800000L
+#define MC_SEQ_CNTL_2__PLL_TX_PWRON_D1__SHIFT 0x00000017
+#define MC_SEQ_CNTL__ARB_REQCMD_WMK_MASK 0x00f00000L
+#define MC_SEQ_CNTL__ARB_REQCMD_WMK__SHIFT 0x00000014
+#define MC_SEQ_CNTL__ARB_REQDAT_WMK_MASK 0x0f000000L
+#define MC_SEQ_CNTL__ARB_REQDAT_WMK__SHIFT 0x00000018
+#define MC_SEQ_CNTL__ARB_RTDAT_WMK_MASK 0xf0000000L
+#define MC_SEQ_CNTL__ARB_RTDAT_WMK__SHIFT 0x0000001c
+#define MC_SEQ_CNTL__BANKGROUP_ENB_MASK 0x00040000L
+#define MC_SEQ_CNTL__BANKGROUP_ENB__SHIFT 0x00000012
+#define MC_SEQ_CNTL__BANKGROUP_SIZE_MASK 0x00020000L
+#define MC_SEQ_CNTL__BANKGROUP_SIZE__SHIFT 0x00000011
+#define MC_SEQ_CNTL__CHANNEL_DISABLE_MASK 0x00000300L
+#define MC_SEQ_CNTL__CHANNEL_DISABLE__SHIFT 0x00000008
+#define MC_SEQ_CNTL__DAT_INV_MASK 0x00000040L
+#define MC_SEQ_CNTL__DAT_INV__SHIFT 0x00000006
+#define MC_SEQ_CNTL__MEM_ADDR_MAP_BANK_MASK 0x0000000cL
+#define MC_SEQ_CNTL__MEM_ADDR_MAP_BANK__SHIFT 0x00000002
+#define MC_SEQ_CNTL__MEM_ADDR_MAP_COLS_MASK 0x00000003L
+#define MC_SEQ_CNTL__MEM_ADDR_MAP_COLS__SHIFT 0x00000000
+#define MC_SEQ_CNTL__MSK_DF1_MASK 0x00000080L
+#define MC_SEQ_CNTL__MSK_DF1__SHIFT 0x00000007
+#define MC_SEQ_CNTL__MSKOFF_DAT_TH_MASK 0x00008000L
+#define MC_SEQ_CNTL__MSKOFF_DAT_TH__SHIFT 0x0000000f
+#define MC_SEQ_CNTL__MSKOFF_DAT_TL_MASK 0x00004000L
+#define MC_SEQ_CNTL__MSKOFF_DAT_TL__SHIFT 0x0000000e
+#define MC_SEQ_CNTL__RET_HOLD_EOP_MASK 0x00010000L
+#define MC_SEQ_CNTL__RET_HOLD_EOP__SHIFT 0x00000010
+#define MC_SEQ_CNTL__RTR_OVERRIDE_MASK 0x00080000L
+#define MC_SEQ_CNTL__RTR_OVERRIDE__SHIFT 0x00000013
+#define MC_SEQ_CNTL__SAFE_MODE_MASK 0x00000030L
+#define MC_SEQ_CNTL__SAFE_MODE__SHIFT 0x00000004
+#define MC_SEQ_DRAM_2__ADBI_ACT_MASK 0x04000000L
+#define MC_SEQ_DRAM_2__ADBI_ACT__SHIFT 0x0000001a
+#define MC_SEQ_DRAM_2__ADBI_DF1_MASK 0x02000000L
+#define MC_SEQ_DRAM_2__ADBI_DF1__SHIFT 0x00000019
+#define MC_SEQ_DRAM_2__ADR_DBI_ACM_MASK 0x00000004L
+#define MC_SEQ_DRAM_2__ADR_DBI_ACM__SHIFT 0x00000002
+#define MC_SEQ_DRAM_2__ADR_DBI_MASK 0x00000002L
+#define MC_SEQ_DRAM_2__ADR_DBI__SHIFT 0x00000001
+#define MC_SEQ_DRAM_2__ADR_DDR_MASK 0x00000001L
+#define MC_SEQ_DRAM_2__ADR_DDR__SHIFT 0x00000000
+#define MC_SEQ_DRAM_2__BNK_MRS_MASK 0x00002000L
+#define MC_SEQ_DRAM_2__BNK_MRS__SHIFT 0x0000000d
+#define MC_SEQ_DRAM_2__CMD_QDR_MASK 0x00000008L
+#define MC_SEQ_DRAM_2__CMD_QDR__SHIFT 0x00000003
+#define MC_SEQ_DRAM_2__CS_BY16_MASK 0x80000000L
+#define MC_SEQ_DRAM_2__CS_BY16__SHIFT 0x0000001f
+#define MC_SEQ_DRAM_2__DAT_QDR_MASK 0x00000010L
+#define MC_SEQ_DRAM_2__DAT_QDR__SHIFT 0x00000004
+#define MC_SEQ_DRAM_2__DBI_ACT_MASK 0x10000000L
+#define MC_SEQ_DRAM_2__DBI_ACT__SHIFT 0x0000001c
+#define MC_SEQ_DRAM_2__DBI_DF1_MASK 0x08000000L
+#define MC_SEQ_DRAM_2__DBI_DF1__SHIFT 0x0000001b
+#define MC_SEQ_DRAM_2__DBI_EDC_DF1_MASK 0x20000000L
+#define MC_SEQ_DRAM_2__DBI_EDC_DF1__SHIFT 0x0000001d
+#define MC_SEQ_DRAM_2__DBI_OVR_MASK 0x00004000L
+#define MC_SEQ_DRAM_2__DBI_OVR__SHIFT 0x0000000e
+#define MC_SEQ_DRAM_2__DLL_EST_MASK 0x00001000L
+#define MC_SEQ_DRAM_2__DLL_EST__SHIFT 0x0000000c
+#define MC_SEQ_DRAM_2__DQM_EST_MASK 0x00000080L
+#define MC_SEQ_DRAM_2__DQM_EST__SHIFT 0x00000007
+#define MC_SEQ_DRAM_2__PCH_BNK_MASK 0x01000000L
+#define MC_SEQ_DRAM_2__PCH_BNK__SHIFT 0x00000018
+#define MC_SEQ_DRAM_2__PLL_CLR_MASK 0x00000800L
+#define MC_SEQ_DRAM_2__PLL_CLR__SHIFT 0x0000000b
+#define MC_SEQ_DRAM_2__PLL_CNT_MASK 0x00ff0000L
+#define MC_SEQ_DRAM_2__PLL_CNT__SHIFT 0x00000010
+#define MC_SEQ_DRAM_2__PLL_EST_MASK 0x00000400L
+#define MC_SEQ_DRAM_2__PLL_EST__SHIFT 0x0000000a
+#define MC_SEQ_DRAM_2__RDAT_EDC_MASK 0x00000040L
+#define MC_SEQ_DRAM_2__RDAT_EDC__SHIFT 0x00000006
+#define MC_SEQ_DRAM_2__RD_DQS_MASK 0x00000100L
+#define MC_SEQ_DRAM_2__RD_DQS__SHIFT 0x00000008
+#define MC_SEQ_DRAM_2__TESTCHIP_EN_MASK 0x40000000L
+#define MC_SEQ_DRAM_2__TESTCHIP_EN__SHIFT 0x0000001e
+#define MC_SEQ_DRAM_2__TRI_CLK_MASK 0x00008000L
+#define MC_SEQ_DRAM_2__TRI_CLK__SHIFT 0x0000000f
+#define MC_SEQ_DRAM_2__WDAT_EDC_MASK 0x00000020L
+#define MC_SEQ_DRAM_2__WDAT_EDC__SHIFT 0x00000005
+#define MC_SEQ_DRAM_2__WR_DQS_MASK 0x00000200L
+#define MC_SEQ_DRAM_2__WR_DQS__SHIFT 0x00000009
+#define MC_SEQ_DRAM__ADR_2CK_MASK 0x00000001L
+#define MC_SEQ_DRAM__ADR_2CK__SHIFT 0x00000000
+#define MC_SEQ_DRAM__ADR_DF1_MASK 0x00000004L
+#define MC_SEQ_DRAM__ADR_DF1__SHIFT 0x00000002
+#define MC_SEQ_DRAM__ADR_MUX_MASK 0x00000002L
+#define MC_SEQ_DRAM__ADR_MUX__SHIFT 0x00000001
+#define MC_SEQ_DRAM__AP8_MASK 0x00000008L
+#define MC_SEQ_DRAM__AP8__SHIFT 0x00000003
+#define MC_SEQ_DRAM__BO4_MASK 0x00004000L
+#define MC_SEQ_DRAM__BO4__SHIFT 0x0000000e
+#define MC_SEQ_DRAM__CKE_ACT_MASK 0x00002000L
+#define MC_SEQ_DRAM__CKE_ACT__SHIFT 0x0000000d
+#define MC_SEQ_DRAM__CKE_DYN_MASK 0x00001000L
+#define MC_SEQ_DRAM__CKE_DYN__SHIFT 0x0000000c
+#define MC_SEQ_DRAM__DAT_DF1_MASK 0x00000010L
+#define MC_SEQ_DRAM__DAT_DF1__SHIFT 0x00000004
+#define MC_SEQ_DRAM__DAT_INV_MASK 0x01000000L
+#define MC_SEQ_DRAM__DAT_INV__SHIFT 0x00000018
+#define MC_SEQ_DRAM__DLL_CLR_MASK 0x00008000L
+#define MC_SEQ_DRAM__DLL_CLR__SHIFT 0x0000000f
+#define MC_SEQ_DRAM__DLL_CNT_MASK 0x00ff0000L
+#define MC_SEQ_DRAM__DLL_CNT__SHIFT 0x00000010
+#define MC_SEQ_DRAM__DQM_ACT_MASK 0x00000080L
+#define MC_SEQ_DRAM__DQM_ACT__SHIFT 0x00000007
+#define MC_SEQ_DRAM__DQM_DF1_MASK 0x00000040L
+#define MC_SEQ_DRAM__DQM_DF1__SHIFT 0x00000006
+#define MC_SEQ_DRAM__DQS_DF1_MASK 0x00000020L
+#define MC_SEQ_DRAM__DQS_DF1__SHIFT 0x00000005
+#define MC_SEQ_DRAM_ERROR_INSERTION__RX_MASK 0xffff0000L
+#define MC_SEQ_DRAM_ERROR_INSERTION__RX__SHIFT 0x00000010
+#define MC_SEQ_DRAM_ERROR_INSERTION__TX_MASK 0x0000ffffL
+#define MC_SEQ_DRAM_ERROR_INSERTION__TX__SHIFT 0x00000000
+#define MC_SEQ_DRAM__INV_ACM_MASK 0x02000000L
+#define MC_SEQ_DRAM__INV_ACM__SHIFT 0x00000019
+#define MC_SEQ_DRAM__ODT_ACT_MASK 0x08000000L
+#define MC_SEQ_DRAM__ODT_ACT__SHIFT 0x0000001b
+#define MC_SEQ_DRAM__ODT_ENB_MASK 0x04000000L
+#define MC_SEQ_DRAM__ODT_ENB__SHIFT 0x0000001a
+#define MC_SEQ_DRAM__RST_CTL_MASK 0x10000000L
+#define MC_SEQ_DRAM__RST_CTL__SHIFT 0x0000001c
+#define MC_SEQ_DRAM__STB_CNT_MASK 0x00000f00L
+#define MC_SEQ_DRAM__STB_CNT__SHIFT 0x00000008
+#define MC_SEQ_DRAM__TRI_CKE_MASK 0x40000000L
+#define MC_SEQ_DRAM__TRI_CKE__SHIFT 0x0000001e
+#define MC_SEQ_DRAM__TRI_MIO_DYN_MASK 0x20000000L
+#define MC_SEQ_DRAM__TRI_MIO_DYN__SHIFT 0x0000001d
+#define MC_SEQ_FIFO_CTL__CG_DIS_D0_MASK 0x00000100L
+#define MC_SEQ_FIFO_CTL__CG_DIS_D0__SHIFT 0x00000008
+#define MC_SEQ_FIFO_CTL__CG_DIS_D1_MASK 0x00000200L
+#define MC_SEQ_FIFO_CTL__CG_DIS_D1__SHIFT 0x00000009
+#define MC_SEQ_FIFO_CTL__R_LD_INIT_MASK 0x00000030L
+#define MC_SEQ_FIFO_CTL__R_LD_INIT__SHIFT 0x00000004
+#define MC_SEQ_FIFO_CTL__R_SYC_SEL_MASK 0x000000c0L
+#define MC_SEQ_FIFO_CTL__R_SYC_SEL__SHIFT 0x00000006
+#define MC_SEQ_FIFO_CTL__SYC_DLY_MASK 0x00007000L
+#define MC_SEQ_FIFO_CTL__SYC_DLY__SHIFT 0x0000000c
+#define MC_SEQ_FIFO_CTL__W_ASYC_EXT_MASK 0x00030000L
+#define MC_SEQ_FIFO_CTL__W_ASYC_EXT__SHIFT 0x00000010
+#define MC_SEQ_FIFO_CTL__W_DSYC_EXT_MASK 0x000c0000L
+#define MC_SEQ_FIFO_CTL__W_DSYC_EXT__SHIFT 0x00000012
+#define MC_SEQ_FIFO_CTL__W_LD_INIT_D0_MASK 0x00000003L
+#define MC_SEQ_FIFO_CTL__W_LD_INIT_D0__SHIFT 0x00000000
+#define MC_SEQ_FIFO_CTL__W_LD_INIT_D1_MASK 0x00000c00L
+#define MC_SEQ_FIFO_CTL__W_LD_INIT_D1__SHIFT 0x0000000a
+#define MC_SEQ_FIFO_CTL__W_SYC_SEL_MASK 0x0000000cL
+#define MC_SEQ_FIFO_CTL__W_SYC_SEL__SHIFT 0x00000002
+#define MC_SEQ_IO_DEBUG_DATA__IO_DEBUG_DATA_MASK 0xffffffffL
+#define MC_SEQ_IO_DEBUG_DATA__IO_DEBUG_DATA__SHIFT 0x00000000
+#define MC_SEQ_IO_DEBUG_INDEX__IO_DEBUG_INDEX_MASK 0x000001ffL
+#define MC_SEQ_IO_DEBUG_INDEX__IO_DEBUG_INDEX__SHIFT 0x00000000
+#define MC_SEQ_IO_RDBI__MASK_MASK 0xffffffffL
+#define MC_SEQ_IO_RDBI__MASK__SHIFT 0x00000000
+#define MC_SEQ_IO_REDC__EDC_MASK 0xffffffffL
+#define MC_SEQ_IO_REDC__EDC__SHIFT 0x00000000
+#define MC_SEQ_IO_RESERVE_D0__APHY_RSV_MASK 0xff000000L
+#define MC_SEQ_IO_RESERVE_D0__APHY_RSV__SHIFT 0x00000018
+#define MC_SEQ_IO_RESERVE_D0__DPHY0_RSV_MASK 0x00000fffL
+#define MC_SEQ_IO_RESERVE_D0__DPHY0_RSV__SHIFT 0x00000000
+#define MC_SEQ_IO_RESERVE_D0__DPHY1_RSV_MASK 0x00fff000L
+#define MC_SEQ_IO_RESERVE_D0__DPHY1_RSV__SHIFT 0x0000000c
+#define MC_SEQ_IO_RESERVE_D1__APHY_RSV_MASK 0xff000000L
+#define MC_SEQ_IO_RESERVE_D1__APHY_RSV__SHIFT 0x00000018
+#define MC_SEQ_IO_RESERVE_D1__DPHY0_RSV_MASK 0x00000fffL
+#define MC_SEQ_IO_RESERVE_D1__DPHY0_RSV__SHIFT 0x00000000
+#define MC_SEQ_IO_RESERVE_D1__DPHY1_RSV_MASK 0x00fff000L
+#define MC_SEQ_IO_RESERVE_D1__DPHY1_RSV__SHIFT 0x0000000c
+#define MC_SEQ_IO_RWORD0__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD0__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD1__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD1__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD2__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD2__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD3__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD3__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD4__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD4__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD5__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD5__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD6__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD6__RDATA__SHIFT 0x00000000
+#define MC_SEQ_IO_RWORD7__RDATA_MASK 0xffffffffL
+#define MC_SEQ_IO_RWORD7__RDATA__SHIFT 0x00000000
+#define MC_SEQ_MISC0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC3__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC3__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC4__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC4__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC5__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC5__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC6__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC6__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC7__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC7__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC8__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC8__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC9__VALUE_MASK 0xffffffffL
+#define MC_SEQ_MISC9__VALUE__SHIFT 0x00000000
+#define MC_SEQ_MISC_TIMING2__FAW_MASK 0x00001f00L
+#define MC_SEQ_MISC_TIMING2__FAW__SHIFT 0x00000008
+#define MC_SEQ_MISC_TIMING2_LP__FAW_MASK 0x00001f00L
+#define MC_SEQ_MISC_TIMING2_LP__FAW__SHIFT 0x00000008
+#define MC_SEQ_MISC_TIMING2_LP__PA2RDATA_MASK 0x00000007L
+#define MC_SEQ_MISC_TIMING2_LP__PA2RDATA__SHIFT 0x00000000
+#define MC_SEQ_MISC_TIMING2_LP__PA2WDATA_MASK 0x00000070L
+#define MC_SEQ_MISC_TIMING2_LP__PA2WDATA__SHIFT 0x00000004
+#define MC_SEQ_MISC_TIMING2_LP__TADR_MASK 0x00e00000L
+#define MC_SEQ_MISC_TIMING2_LP__TADR__SHIFT 0x00000015
+#define MC_SEQ_MISC_TIMING2_LP__TFCKTR_MASK 0x0f000000L
+#define MC_SEQ_MISC_TIMING2_LP__TFCKTR__SHIFT 0x00000018
+#define MC_SEQ_MISC_TIMING2_LP__TREDC_MASK 0x0000e000L
+#define MC_SEQ_MISC_TIMING2_LP__TREDC__SHIFT 0x0000000d
+#define MC_SEQ_MISC_TIMING2_LP__TWDATATR_MASK 0xf0000000L
+#define MC_SEQ_MISC_TIMING2_LP__TWDATATR__SHIFT 0x0000001c
+#define MC_SEQ_MISC_TIMING2_LP__TWEDC_MASK 0x001f0000L
+#define MC_SEQ_MISC_TIMING2_LP__TWEDC__SHIFT 0x00000010
+#define MC_SEQ_MISC_TIMING2__PA2RDATA_MASK 0x00000007L
+#define MC_SEQ_MISC_TIMING2__PA2RDATA__SHIFT 0x00000000
+#define MC_SEQ_MISC_TIMING2__PA2WDATA_MASK 0x00000070L
+#define MC_SEQ_MISC_TIMING2__PA2WDATA__SHIFT 0x00000004
+#define MC_SEQ_MISC_TIMING2__T32AW_MASK 0x01e00000L
+#define MC_SEQ_MISC_TIMING2__T32AW__SHIFT 0x00000015
+#define MC_SEQ_MISC_TIMING2__TREDC_MASK 0x0000e000L
+#define MC_SEQ_MISC_TIMING2__TREDC__SHIFT 0x0000000d
+#define MC_SEQ_MISC_TIMING2__TWDATATR_MASK 0xf0000000L
+#define MC_SEQ_MISC_TIMING2__TWDATATR__SHIFT 0x0000001c
+#define MC_SEQ_MISC_TIMING2__TWEDC_MASK 0x001f0000L
+#define MC_SEQ_MISC_TIMING2__TWEDC__SHIFT 0x00000010
+#define MC_SEQ_MISC_TIMING_LP__TRFC_MASK 0x1ff00000L
+#define MC_SEQ_MISC_TIMING_LP__TRFC__SHIFT 0x00000014
+#define MC_SEQ_MISC_TIMING_LP__TRP_MASK 0x000f8000L
+#define MC_SEQ_MISC_TIMING_LP__TRP_RDA_MASK 0x00003f00L
+#define MC_SEQ_MISC_TIMING_LP__TRP_RDA__SHIFT 0x00000008
+#define MC_SEQ_MISC_TIMING_LP__TRP__SHIFT 0x0000000f
+#define MC_SEQ_MISC_TIMING_LP__TRP_WRA_MASK 0x0000003fL
+#define MC_SEQ_MISC_TIMING_LP__TRP_WRA__SHIFT 0x00000000
+#define MC_SEQ_MISC_TIMING__TRFC_MASK 0x1ff00000L
+#define MC_SEQ_MISC_TIMING__TRFC__SHIFT 0x00000014
+#define MC_SEQ_MISC_TIMING__TRP_MASK 0x000f8000L
+#define MC_SEQ_MISC_TIMING__TRP_RDA_MASK 0x00003f00L
+#define MC_SEQ_MISC_TIMING__TRP_RDA__SHIFT 0x00000008
+#define MC_SEQ_MISC_TIMING__TRP__SHIFT 0x0000000f
+#define MC_SEQ_MISC_TIMING__TRP_WRA_MASK 0x0000003fL
+#define MC_SEQ_MISC_TIMING__TRP_WRA__SHIFT 0x00000000
+#define MC_SEQ_MPLL_OVERRIDE__AD_PLL_RESET_OVERRIDE_MASK 0x00000001L
+#define MC_SEQ_MPLL_OVERRIDE__AD_PLL_RESET_OVERRIDE__SHIFT 0x00000000
+#define MC_SEQ_MPLL_OVERRIDE__ATGM_CLK_SEL_OVERRIDE_MASK 0x00000020L
+#define MC_SEQ_MPLL_OVERRIDE__ATGM_CLK_SEL_OVERRIDE__SHIFT 0x00000005
+#define MC_SEQ_MPLL_OVERRIDE__DQ_0_0_PLL_RESET_OVERRIDE_MASK 0x00000002L
+#define MC_SEQ_MPLL_OVERRIDE__DQ_0_0_PLL_RESET_OVERRIDE__SHIFT 0x00000001
+#define MC_SEQ_MPLL_OVERRIDE__DQ_0_1_PLL_RESET_OVERRIDE_MASK 0x00000004L
+#define MC_SEQ_MPLL_OVERRIDE__DQ_0_1_PLL_RESET_OVERRIDE__SHIFT 0x00000002
+#define MC_SEQ_MPLL_OVERRIDE__DQ_1_0_PLL_RESET_OVERRIDE_MASK 0x00000008L
+#define MC_SEQ_MPLL_OVERRIDE__DQ_1_0_PLL_RESET_OVERRIDE__SHIFT 0x00000003
+#define MC_SEQ_MPLL_OVERRIDE__DQ_1_1_PLL_RESET_OVERRIDE_MASK 0x00000010L
+#define MC_SEQ_MPLL_OVERRIDE__DQ_1_1_PLL_RESET_OVERRIDE__SHIFT 0x00000004
+#define MC_SEQ_MPLL_OVERRIDE__TEST_BYPASS_CLK_EN_OVERRIDE_MASK 0x00000040L
+#define MC_SEQ_MPLL_OVERRIDE__TEST_BYPASS_CLK_EN_OVERRIDE__SHIFT 0x00000006
+#define MC_SEQ_MPLL_OVERRIDE__TEST_BYPASS_CLK_SEL_OVERRIDE_MASK 0x00000080L
+#define MC_SEQ_MPLL_OVERRIDE__TEST_BYPASS_CLK_SEL_OVERRIDE__SHIFT 0x00000007
+#define MC_SEQ_PERF_CNTL_1__PAUSE_MASK 0x00000001L
+#define MC_SEQ_PERF_CNTL_1__PAUSE__SHIFT 0x00000000
+#define MC_SEQ_PERF_CNTL_1__SEL_A_MSB_MASK 0x00000100L
+#define MC_SEQ_PERF_CNTL_1__SEL_A_MSB__SHIFT 0x00000008
+#define MC_SEQ_PERF_CNTL_1__SEL_B_MSB_MASK 0x00000200L
+#define MC_SEQ_PERF_CNTL_1__SEL_B_MSB__SHIFT 0x00000009
+#define MC_SEQ_PERF_CNTL_1__SEL_CH0_C_MSB_MASK 0x00000400L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH0_C_MSB__SHIFT 0x0000000a
+#define MC_SEQ_PERF_CNTL_1__SEL_CH0_D_MSB_MASK 0x00000800L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH0_D_MSB__SHIFT 0x0000000b
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_A_MSB_MASK 0x00001000L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_A_MSB__SHIFT 0x0000000c
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_B_MSB_MASK 0x00002000L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_B_MSB__SHIFT 0x0000000d
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_C_MSB_MASK 0x00004000L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_C_MSB__SHIFT 0x0000000e
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_D_MSB_MASK 0x00008000L
+#define MC_SEQ_PERF_CNTL_1__SEL_CH1_D_MSB__SHIFT 0x0000000f
+#define MC_SEQ_PERF_CNTL__CNTL_MASK 0xc0000000L
+#define MC_SEQ_PERF_CNTL__CNTL__SHIFT 0x0000001e
+#define MC_SEQ_PERF_CNTL__MONITOR_PERIOD_MASK 0x3fffffffL
+#define MC_SEQ_PERF_CNTL__MONITOR_PERIOD__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_A_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_A_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_A_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_A_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_B_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_B_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_B_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_B_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_C_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_C_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_C_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_C_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_D_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_D_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CNT_D_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_PERF_SEQ_CNT_D_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CTL__SEL_A_MASK 0x0000000fL
+#define MC_SEQ_PERF_SEQ_CTL__SEL_A__SHIFT 0x00000000
+#define MC_SEQ_PERF_SEQ_CTL__SEL_B_MASK 0x000000f0L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_B__SHIFT 0x00000004
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH0_C_MASK 0x00000f00L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH0_C__SHIFT 0x00000008
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH0_D_MASK 0x0000f000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH0_D__SHIFT 0x0000000c
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_A_MASK 0x000f0000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_A__SHIFT 0x00000010
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_B_MASK 0x00f00000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_B__SHIFT 0x00000014
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_C_MASK 0x0f000000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_C__SHIFT 0x00000018
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_D_MASK 0xf0000000L
+#define MC_SEQ_PERF_SEQ_CTL__SEL_CH1_D__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MASK 0x0000ffffL
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_EMRS_LP__ADR__SHIFT 0x00000000
+#define MC_SEQ_PMG_CMD_EMRS_LP__BNK_MSB_MASK 0x00080000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__BNK_MSB__SHIFT 0x00000013
+#define MC_SEQ_PMG_CMD_EMRS_LP__CSB_MASK 0x00600000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__CSB__SHIFT 0x00000015
+#define MC_SEQ_PMG_CMD_EMRS_LP__END_MASK 0x00100000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__END__SHIFT 0x00000014
+#define MC_SEQ_PMG_CMD_EMRS_LP__MOP_MASK 0x00070000L
+#define MC_SEQ_PMG_CMD_EMRS_LP__MOP__SHIFT 0x00000010
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MASK 0x0000ffffL
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_MRS1_LP__ADR__SHIFT 0x00000000
+#define MC_SEQ_PMG_CMD_MRS1_LP__BNK_MSB_MASK 0x00080000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__BNK_MSB__SHIFT 0x00000013
+#define MC_SEQ_PMG_CMD_MRS1_LP__CSB_MASK 0x00600000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__CSB__SHIFT 0x00000015
+#define MC_SEQ_PMG_CMD_MRS1_LP__END_MASK 0x00100000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__END__SHIFT 0x00000014
+#define MC_SEQ_PMG_CMD_MRS1_LP__MOP_MASK 0x00070000L
+#define MC_SEQ_PMG_CMD_MRS1_LP__MOP__SHIFT 0x00000010
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MASK 0x0000ffffL
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_MRS2_LP__ADR__SHIFT 0x00000000
+#define MC_SEQ_PMG_CMD_MRS2_LP__BNK_MSB_MASK 0x00080000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__BNK_MSB__SHIFT 0x00000013
+#define MC_SEQ_PMG_CMD_MRS2_LP__CSB_MASK 0x00600000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__CSB__SHIFT 0x00000015
+#define MC_SEQ_PMG_CMD_MRS2_LP__END_MASK 0x00100000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__END__SHIFT 0x00000014
+#define MC_SEQ_PMG_CMD_MRS2_LP__MOP_MASK 0x00070000L
+#define MC_SEQ_PMG_CMD_MRS2_LP__MOP__SHIFT 0x00000010
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MASK 0x0000ffffL
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MSB0_MASK 0x20000000L
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MSB0__SHIFT 0x0000001d
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MSB1_MASK 0x10000000L
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR_MSB1__SHIFT 0x0000001c
+#define MC_SEQ_PMG_CMD_MRS_LP__ADR__SHIFT 0x00000000
+#define MC_SEQ_PMG_CMD_MRS_LP__BNK_MSB_MASK 0x00080000L
+#define MC_SEQ_PMG_CMD_MRS_LP__BNK_MSB__SHIFT 0x00000013
+#define MC_SEQ_PMG_CMD_MRS_LP__CSB_MASK 0x00600000L
+#define MC_SEQ_PMG_CMD_MRS_LP__CSB__SHIFT 0x00000015
+#define MC_SEQ_PMG_CMD_MRS_LP__END_MASK 0x00100000L
+#define MC_SEQ_PMG_CMD_MRS_LP__END__SHIFT 0x00000014
+#define MC_SEQ_PMG_CMD_MRS_LP__MOP_MASK 0x00070000L
+#define MC_SEQ_PMG_CMD_MRS_LP__MOP__SHIFT 0x00000010
+#define MC_SEQ_PMG_PG_HWCNTL__ACAO_MASK 0x00040000L
+#define MC_SEQ_PMG_PG_HWCNTL__ACAO__SHIFT 0x00000012
+#define MC_SEQ_PMG_PG_HWCNTL__AC_DLY_MASK 0x00000300L
+#define MC_SEQ_PMG_PG_HWCNTL__AC_DLY__SHIFT 0x00000008
+#define MC_SEQ_PMG_PG_HWCNTL__D_DLY_MASK 0x000000c0L
+#define MC_SEQ_PMG_PG_HWCNTL__D_DLY__SHIFT 0x00000006
+#define MC_SEQ_PMG_PG_HWCNTL__G_DLY_MASK 0x00003c00L
+#define MC_SEQ_PMG_PG_HWCNTL__G_DLY__SHIFT 0x0000000a
+#define MC_SEQ_PMG_PG_HWCNTL__PWRGATE_EN_MASK 0x00000001L
+#define MC_SEQ_PMG_PG_HWCNTL__PWRGATE_EN__SHIFT 0x00000000
+#define MC_SEQ_PMG_PG_HWCNTL__RXAO_MASK 0x00020000L
+#define MC_SEQ_PMG_PG_HWCNTL__RXAO__SHIFT 0x00000011
+#define MC_SEQ_PMG_PG_HWCNTL__STAGGER_EN_MASK 0x00000002L
+#define MC_SEQ_PMG_PG_HWCNTL__STAGGER_EN__SHIFT 0x00000001
+#define MC_SEQ_PMG_PG_HWCNTL__TPGCG_MASK 0x0000003cL
+#define MC_SEQ_PMG_PG_HWCNTL__TPGCG__SHIFT 0x00000002
+#define MC_SEQ_PMG_PG_HWCNTL__TXAO_MASK 0x00010000L
+#define MC_SEQ_PMG_PG_HWCNTL__TXAO__SHIFT 0x00000010
+#define MC_SEQ_PMG_PG_SWCNTL_0__GMCON_SR_COMMIT_MASK 0x80000000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__GMCON_SR_COMMIT__SHIFT 0x0000001f
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMA0_AC_ENB_MASK 0x00010000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMA0_AC_ENB__SHIFT 0x00000010
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DBI_RX_ENB_MASK 0x00000020L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DBI_RX_ENB__SHIFT 0x00000005
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DBI_TX_ENB_MASK 0x00000002L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DBI_TX_ENB__SHIFT 0x00000001
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DQ_RX_ENB_MASK 0x00000010L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DQ_RX_ENB__SHIFT 0x00000004
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DQ_TX_ENB_MASK 0x00000001L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_DQ_TX_ENB__SHIFT 0x00000000
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_EDC_RX_ENB_MASK 0x00000040L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_EDC_RX_ENB__SHIFT 0x00000006
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_EDC_TX_ENB_MASK 0x00000004L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_EDC_TX_ENB__SHIFT 0x00000002
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_WCLKX_RX_ENB_MASK 0x00000080L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_WCLKX_RX_ENB__SHIFT 0x00000007
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_WCLKX_TX_ENB_MASK 0x00000008L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD0_WCLKX_TX_ENB__SHIFT 0x00000003
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DBI_RX_ENB_MASK 0x00002000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DBI_RX_ENB__SHIFT 0x0000000d
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DBI_TX_ENB_MASK 0x00000200L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DBI_TX_ENB__SHIFT 0x00000009
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DQ_RX_ENB_MASK 0x00001000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DQ_RX_ENB__SHIFT 0x0000000c
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DQ_TX_ENB_MASK 0x00000100L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_DQ_TX_ENB__SHIFT 0x00000008
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_EDC_RX_ENB_MASK 0x00004000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_EDC_RX_ENB__SHIFT 0x0000000e
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_EDC_TX_ENB_MASK 0x00000400L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_EDC_TX_ENB__SHIFT 0x0000000a
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_WCLKX_RX_ENB_MASK 0x00008000L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_WCLKX_RX_ENB__SHIFT 0x0000000f
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_WCLKX_TX_ENB_MASK 0x00000800L
+#define MC_SEQ_PMG_PG_SWCNTL_0__PMD1_WCLKX_TX_ENB__SHIFT 0x0000000b
+#define MC_SEQ_PMG_PG_SWCNTL_1__GMCON_SR_COMMIT_MASK 0x80000000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__GMCON_SR_COMMIT__SHIFT 0x0000001f
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMA1_AC_ENB_MASK 0x00010000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMA1_AC_ENB__SHIFT 0x00000010
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DBI_RX_ENB_MASK 0x00000020L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DBI_RX_ENB__SHIFT 0x00000005
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DBI_TX_ENB_MASK 0x00000002L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DBI_TX_ENB__SHIFT 0x00000001
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DQ_RX_ENB_MASK 0x00000010L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DQ_RX_ENB__SHIFT 0x00000004
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DQ_TX_ENB_MASK 0x00000001L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_DQ_TX_ENB__SHIFT 0x00000000
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_EDC_RX_ENB_MASK 0x00000040L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_EDC_RX_ENB__SHIFT 0x00000006
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_EDC_TX_ENB_MASK 0x00000004L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_EDC_TX_ENB__SHIFT 0x00000002
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_WCLKX_RX_ENB_MASK 0x00000080L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_WCLKX_RX_ENB__SHIFT 0x00000007
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_WCLKX_TX_ENB_MASK 0x00000008L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD2_WCLKX_TX_ENB__SHIFT 0x00000003
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DBI_RX_ENB_MASK 0x00002000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DBI_RX_ENB__SHIFT 0x0000000d
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DBI_TX_ENB_MASK 0x00000200L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DBI_TX_ENB__SHIFT 0x00000009
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DQ_RX_ENB_MASK 0x00001000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DQ_RX_ENB__SHIFT 0x0000000c
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DQ_TX_ENB_MASK 0x00000100L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_DQ_TX_ENB__SHIFT 0x00000008
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_EDC_RX_ENB_MASK 0x00004000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_EDC_RX_ENB__SHIFT 0x0000000e
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_EDC_TX_ENB_MASK 0x00000400L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_EDC_TX_ENB__SHIFT 0x0000000a
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_WCLKX_RX_ENB_MASK 0x00008000L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_WCLKX_RX_ENB__SHIFT 0x0000000f
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_WCLKX_TX_ENB_MASK 0x00000800L
+#define MC_SEQ_PMG_PG_SWCNTL_1__PMD3_WCLKX_TX_ENB__SHIFT 0x0000000b
+#define MC_SEQ_PMG_TIMING_LP__SEQ_IDLE_MASK 0x001c0000L
+#define MC_SEQ_PMG_TIMING_LP__SEQ_IDLE__SHIFT 0x00000012
+#define MC_SEQ_PMG_TIMING_LP__SEQ_IDLE_SS_MASK 0xff000000L
+#define MC_SEQ_PMG_TIMING_LP__SEQ_IDLE_SS__SHIFT 0x00000018
+#define MC_SEQ_PMG_TIMING_LP__TCKE_MASK 0x0003f000L
+#define MC_SEQ_PMG_TIMING_LP__TCKE_PULSE_MASK 0x00000f00L
+#define MC_SEQ_PMG_TIMING_LP__TCKE_PULSE_MSB_MASK 0x00800000L
+#define MC_SEQ_PMG_TIMING_LP__TCKE_PULSE_MSB__SHIFT 0x00000017
+#define MC_SEQ_PMG_TIMING_LP__TCKE_PULSE__SHIFT 0x00000008
+#define MC_SEQ_PMG_TIMING_LP__TCKE__SHIFT 0x0000000c
+#define MC_SEQ_PMG_TIMING_LP__TCKSRE_MASK 0x00000007L
+#define MC_SEQ_PMG_TIMING_LP__TCKSRE__SHIFT 0x00000000
+#define MC_SEQ_PMG_TIMING_LP__TCKSRX_MASK 0x00000070L
+#define MC_SEQ_PMG_TIMING_LP__TCKSRX__SHIFT 0x00000004
+#define MC_SEQ_PMG_TIMING__SEQ_IDLE_MASK 0x001c0000L
+#define MC_SEQ_PMG_TIMING__SEQ_IDLE__SHIFT 0x00000012
+#define MC_SEQ_PMG_TIMING__SEQ_IDLE_SS_MASK 0xff000000L
+#define MC_SEQ_PMG_TIMING__SEQ_IDLE_SS__SHIFT 0x00000018
+#define MC_SEQ_PMG_TIMING__TCKE_MASK 0x0003f000L
+#define MC_SEQ_PMG_TIMING__TCKE_PULSE_MASK 0x00000f00L
+#define MC_SEQ_PMG_TIMING__TCKE_PULSE_MSB_MASK 0x00800000L
+#define MC_SEQ_PMG_TIMING__TCKE_PULSE_MSB__SHIFT 0x00000017
+#define MC_SEQ_PMG_TIMING__TCKE_PULSE__SHIFT 0x00000008
+#define MC_SEQ_PMG_TIMING__TCKE__SHIFT 0x0000000c
+#define MC_SEQ_PMG_TIMING__TCKSRE_MASK 0x00000007L
+#define MC_SEQ_PMG_TIMING__TCKSRE__SHIFT 0x00000000
+#define MC_SEQ_PMG_TIMING__TCKSRX_MASK 0x00000070L
+#define MC_SEQ_PMG_TIMING__TCKSRX__SHIFT 0x00000004
+#define MC_SEQ_RAS_TIMING_LP__TRCDRA_MASK 0x000f8000L
+#define MC_SEQ_RAS_TIMING_LP__TRCDRA__SHIFT 0x0000000f
+#define MC_SEQ_RAS_TIMING_LP__TRCDR_MASK 0x00007c00L
+#define MC_SEQ_RAS_TIMING_LP__TRCDR__SHIFT 0x0000000a
+#define MC_SEQ_RAS_TIMING_LP__TRCDWA_MASK 0x000003e0L
+#define MC_SEQ_RAS_TIMING_LP__TRCDWA__SHIFT 0x00000005
+#define MC_SEQ_RAS_TIMING_LP__TRCDW_MASK 0x0000001fL
+#define MC_SEQ_RAS_TIMING_LP__TRCDW__SHIFT 0x00000000
+#define MC_SEQ_RAS_TIMING_LP__TRC_MASK 0x7f000000L
+#define MC_SEQ_RAS_TIMING_LP__TRC__SHIFT 0x00000018
+#define MC_SEQ_RAS_TIMING_LP__TRRD_MASK 0x00f00000L
+#define MC_SEQ_RAS_TIMING_LP__TRRD__SHIFT 0x00000014
+#define MC_SEQ_RAS_TIMING__TRCDRA_MASK 0x000f8000L
+#define MC_SEQ_RAS_TIMING__TRCDRA__SHIFT 0x0000000f
+#define MC_SEQ_RAS_TIMING__TRCDR_MASK 0x00007c00L
+#define MC_SEQ_RAS_TIMING__TRCDR__SHIFT 0x0000000a
+#define MC_SEQ_RAS_TIMING__TRCDWA_MASK 0x000003e0L
+#define MC_SEQ_RAS_TIMING__TRCDWA__SHIFT 0x00000005
+#define MC_SEQ_RAS_TIMING__TRCDW_MASK 0x0000001fL
+#define MC_SEQ_RAS_TIMING__TRCDW__SHIFT 0x00000000
+#define MC_SEQ_RAS_TIMING__TRC_MASK 0x7f000000L
+#define MC_SEQ_RAS_TIMING__TRC__SHIFT 0x00000018
+#define MC_SEQ_RAS_TIMING__TRRD_MASK 0x00f00000L
+#define MC_SEQ_RAS_TIMING__TRRD__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D0_LP__RBS_DLY_MASK 0x01f00000L
+#define MC_SEQ_RD_CTL_D0_LP__RBS_DLY__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D0_LP__RBS_WEDC_DLY_MASK 0x3e000000L
+#define MC_SEQ_RD_CTL_D0_LP__RBS_WEDC_DLY__SHIFT 0x00000019
+#define MC_SEQ_RD_CTL_D0_LP__RCV_DLY_MASK 0x00000007L
+#define MC_SEQ_RD_CTL_D0_LP__RCV_DLY__SHIFT 0x00000000
+#define MC_SEQ_RD_CTL_D0_LP__RCV_EXT_MASK 0x000000f8L
+#define MC_SEQ_RD_CTL_D0_LP__RCV_EXT__SHIFT 0x00000003
+#define MC_SEQ_RD_CTL_D0_LP__RST_HLD_MASK 0x0000f000L
+#define MC_SEQ_RD_CTL_D0_LP__RST_HLD__SHIFT 0x0000000c
+#define MC_SEQ_RD_CTL_D0_LP__RST_SEL_MASK 0x00000300L
+#define MC_SEQ_RD_CTL_D0_LP__RST_SEL__SHIFT 0x00000008
+#define MC_SEQ_RD_CTL_D0_LP__RXDPWRON_DLY_MASK 0x00000c00L
+#define MC_SEQ_RD_CTL_D0_LP__RXDPWRON_DLY__SHIFT 0x0000000a
+#define MC_SEQ_RD_CTL_D0_LP__STR_PRE_MASK 0x00010000L
+#define MC_SEQ_RD_CTL_D0_LP__STR_PRE__SHIFT 0x00000010
+#define MC_SEQ_RD_CTL_D0_LP__STR_PST_MASK 0x00020000L
+#define MC_SEQ_RD_CTL_D0_LP__STR_PST__SHIFT 0x00000011
+#define MC_SEQ_RD_CTL_D0__RBS_DLY_MASK 0x01f00000L
+#define MC_SEQ_RD_CTL_D0__RBS_DLY__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D0__RBS_WEDC_DLY_MASK 0x3e000000L
+#define MC_SEQ_RD_CTL_D0__RBS_WEDC_DLY__SHIFT 0x00000019
+#define MC_SEQ_RD_CTL_D0__RCV_DLY_MASK 0x00000007L
+#define MC_SEQ_RD_CTL_D0__RCV_DLY__SHIFT 0x00000000
+#define MC_SEQ_RD_CTL_D0__RCV_EXT_MASK 0x000000f8L
+#define MC_SEQ_RD_CTL_D0__RCV_EXT__SHIFT 0x00000003
+#define MC_SEQ_RD_CTL_D0__RST_HLD_MASK 0x0000f000L
+#define MC_SEQ_RD_CTL_D0__RST_HLD__SHIFT 0x0000000c
+#define MC_SEQ_RD_CTL_D0__RST_SEL_MASK 0x00000300L
+#define MC_SEQ_RD_CTL_D0__RST_SEL__SHIFT 0x00000008
+#define MC_SEQ_RD_CTL_D0__RXDPWRON_DLY_MASK 0x00000c00L
+#define MC_SEQ_RD_CTL_D0__RXDPWRON_DLY__SHIFT 0x0000000a
+#define MC_SEQ_RD_CTL_D0__STR_PRE_MASK 0x00010000L
+#define MC_SEQ_RD_CTL_D0__STR_PRE__SHIFT 0x00000010
+#define MC_SEQ_RD_CTL_D0__STR_PST_MASK 0x00020000L
+#define MC_SEQ_RD_CTL_D0__STR_PST__SHIFT 0x00000011
+#define MC_SEQ_RD_CTL_D1_LP__RBS_DLY_MASK 0x01f00000L
+#define MC_SEQ_RD_CTL_D1_LP__RBS_DLY__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D1_LP__RBS_WEDC_DLY_MASK 0x3e000000L
+#define MC_SEQ_RD_CTL_D1_LP__RBS_WEDC_DLY__SHIFT 0x00000019
+#define MC_SEQ_RD_CTL_D1_LP__RCV_DLY_MASK 0x00000007L
+#define MC_SEQ_RD_CTL_D1_LP__RCV_DLY__SHIFT 0x00000000
+#define MC_SEQ_RD_CTL_D1_LP__RCV_EXT_MASK 0x000000f8L
+#define MC_SEQ_RD_CTL_D1_LP__RCV_EXT__SHIFT 0x00000003
+#define MC_SEQ_RD_CTL_D1_LP__RST_HLD_MASK 0x0000f000L
+#define MC_SEQ_RD_CTL_D1_LP__RST_HLD__SHIFT 0x0000000c
+#define MC_SEQ_RD_CTL_D1_LP__RST_SEL_MASK 0x00000300L
+#define MC_SEQ_RD_CTL_D1_LP__RST_SEL__SHIFT 0x00000008
+#define MC_SEQ_RD_CTL_D1_LP__RXDPWRON_DLY_MASK 0x00000c00L
+#define MC_SEQ_RD_CTL_D1_LP__RXDPWRON_DLY__SHIFT 0x0000000a
+#define MC_SEQ_RD_CTL_D1_LP__STR_PRE_MASK 0x00010000L
+#define MC_SEQ_RD_CTL_D1_LP__STR_PRE__SHIFT 0x00000010
+#define MC_SEQ_RD_CTL_D1_LP__STR_PST_MASK 0x00020000L
+#define MC_SEQ_RD_CTL_D1_LP__STR_PST__SHIFT 0x00000011
+#define MC_SEQ_RD_CTL_D1__RBS_DLY_MASK 0x01f00000L
+#define MC_SEQ_RD_CTL_D1__RBS_DLY__SHIFT 0x00000014
+#define MC_SEQ_RD_CTL_D1__RBS_WEDC_DLY_MASK 0x3e000000L
+#define MC_SEQ_RD_CTL_D1__RBS_WEDC_DLY__SHIFT 0x00000019
+#define MC_SEQ_RD_CTL_D1__RCV_DLY_MASK 0x00000007L
+#define MC_SEQ_RD_CTL_D1__RCV_DLY__SHIFT 0x00000000
+#define MC_SEQ_RD_CTL_D1__RCV_EXT_MASK 0x000000f8L
+#define MC_SEQ_RD_CTL_D1__RCV_EXT__SHIFT 0x00000003
+#define MC_SEQ_RD_CTL_D1__RST_HLD_MASK 0x0000f000L
+#define MC_SEQ_RD_CTL_D1__RST_HLD__SHIFT 0x0000000c
+#define MC_SEQ_RD_CTL_D1__RST_SEL_MASK 0x00000300L
+#define MC_SEQ_RD_CTL_D1__RST_SEL__SHIFT 0x00000008
+#define MC_SEQ_RD_CTL_D1__RXDPWRON_DLY_MASK 0x00000c00L
+#define MC_SEQ_RD_CTL_D1__RXDPWRON_DLY__SHIFT 0x0000000a
+#define MC_SEQ_RD_CTL_D1__STR_PRE_MASK 0x00010000L
+#define MC_SEQ_RD_CTL_D1__STR_PRE__SHIFT 0x00000010
+#define MC_SEQ_RD_CTL_D1__STR_PST_MASK 0x00020000L
+#define MC_SEQ_RD_CTL_D1__STR_PST__SHIFT 0x00000011
+#define MC_SEQ_RESERVE_0_S__SCLK_FIELD_MASK 0xffffffffL
+#define MC_SEQ_RESERVE_0_S__SCLK_FIELD__SHIFT 0x00000000
+#define MC_SEQ_RESERVE_1_S__SCLK_FIELD_MASK 0xffffffffL
+#define MC_SEQ_RESERVE_1_S__SCLK_FIELD__SHIFT 0x00000000
+#define MC_SEQ_RESERVE_M__MCLK_FIELD_MASK 0xffffffffL
+#define MC_SEQ_RESERVE_M__MCLK_FIELD__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE0_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE0_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE1_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE1_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE2_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE2_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE3_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_BYTE3_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_DBI_D0__DBI3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_DBI_D1__DBI3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_EDC_D0__EDC3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR0_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR0__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR1_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR1__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR2_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR2__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR3_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_EDC_D0__WCDR3__SHIFT 0x0000001c
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC0_MASK 0x0000000fL
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC0__SHIFT 0x00000000
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC1_MASK 0x000000f0L
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC1__SHIFT 0x00000004
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC2_MASK 0x00000f00L
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC2__SHIFT 0x00000008
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC3_MASK 0x0000f000L
+#define MC_SEQ_RXFRAMING_EDC_D1__EDC3__SHIFT 0x0000000c
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR0_MASK 0x000f0000L
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR0__SHIFT 0x00000010
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR1_MASK 0x00f00000L
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR1__SHIFT 0x00000014
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR2_MASK 0x0f000000L
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR2__SHIFT 0x00000018
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR3_MASK 0xf0000000L
+#define MC_SEQ_RXFRAMING_EDC_D1__WCDR3__SHIFT 0x0000001c
+#define MC_SEQ_STATUS_M__CMD_RDY_D0_MASK 0x00000004L
+#define MC_SEQ_STATUS_M__CMD_RDY_D0__SHIFT 0x00000002
+#define MC_SEQ_STATUS_M__CMD_RDY_D1_MASK 0x00000008L
+#define MC_SEQ_STATUS_M__CMD_RDY_D1__SHIFT 0x00000003
+#define MC_SEQ_STATUS_M__PMG_FSMSTATE_MASK 0x01f00000L
+#define MC_SEQ_STATUS_M__PMG_FSMSTATE__SHIFT 0x00000014
+#define MC_SEQ_STATUS_M__PMG_PWRSTATE_MASK 0x00010000L
+#define MC_SEQ_STATUS_M__PMG_PWRSTATE__SHIFT 0x00000010
+#define MC_SEQ_STATUS_M__PWRUP_COMPL_D0_MASK 0x00000001L
+#define MC_SEQ_STATUS_M__PWRUP_COMPL_D0__SHIFT 0x00000000
+#define MC_SEQ_STATUS_M__PWRUP_COMPL_D1_MASK 0x00000002L
+#define MC_SEQ_STATUS_M__PWRUP_COMPL_D1__SHIFT 0x00000001
+#define MC_SEQ_STATUS_M__SEQ0_ARB_CMD_FIFO_EMPTY_MASK 0x00000100L
+#define MC_SEQ_STATUS_M__SEQ0_ARB_CMD_FIFO_EMPTY__SHIFT 0x00000008
+#define MC_SEQ_STATUS_M__SEQ0_BUSY_HYS_MASK 0x02000000L
+#define MC_SEQ_STATUS_M__SEQ0_BUSY_HYS__SHIFT 0x00000019
+#define MC_SEQ_STATUS_M__SEQ0_BUSY_MASK 0x00004000L
+#define MC_SEQ_STATUS_M__SEQ0_BUSY__SHIFT 0x0000000e
+#define MC_SEQ_STATUS_M__SEQ0_RS_DATA_FIFO_FULL_MASK 0x00001000L
+#define MC_SEQ_STATUS_M__SEQ0_RS_DATA_FIFO_FULL__SHIFT 0x0000000c
+#define MC_SEQ_STATUS_M__SEQ1_ARB_CMD_FIFO_EMPTY_MASK 0x00000200L
+#define MC_SEQ_STATUS_M__SEQ1_ARB_CMD_FIFO_EMPTY__SHIFT 0x00000009
+#define MC_SEQ_STATUS_M__SEQ1_BUSY_HYS_MASK 0x04000000L
+#define MC_SEQ_STATUS_M__SEQ1_BUSY_HYS__SHIFT 0x0000001a
+#define MC_SEQ_STATUS_M__SEQ1_BUSY_MASK 0x00008000L
+#define MC_SEQ_STATUS_M__SEQ1_BUSY__SHIFT 0x0000000f
+#define MC_SEQ_STATUS_M__SEQ1_RS_DATA_FIFO_FULL_MASK 0x00002000L
+#define MC_SEQ_STATUS_M__SEQ1_RS_DATA_FIFO_FULL__SHIFT 0x0000000d
+#define MC_SEQ_STATUS_M__SLF_D0_MASK 0x00000010L
+#define MC_SEQ_STATUS_M__SLF_D0__SHIFT 0x00000004
+#define MC_SEQ_STATUS_M__SLF_D1_MASK 0x00000020L
+#define MC_SEQ_STATUS_M__SLF_D1__SHIFT 0x00000005
+#define MC_SEQ_STATUS_M__SS_SLF_D0_MASK 0x00000040L
+#define MC_SEQ_STATUS_M__SS_SLF_D0__SHIFT 0x00000006
+#define MC_SEQ_STATUS_M__SS_SLF_D1_MASK 0x00000080L
+#define MC_SEQ_STATUS_M__SS_SLF_D1__SHIFT 0x00000007
+#define MC_SEQ_STATUS_S__SEQ0_ARB_CMD_FIFO_FULL_MASK 0x00000010L
+#define MC_SEQ_STATUS_S__SEQ0_ARB_CMD_FIFO_FULL__SHIFT 0x00000004
+#define MC_SEQ_STATUS_S__SEQ0_ARB_DATA_FIFO_FULL_MASK 0x00000001L
+#define MC_SEQ_STATUS_S__SEQ0_ARB_DATA_FIFO_FULL__SHIFT 0x00000000
+#define MC_SEQ_STATUS_S__SEQ0_RS_DATA_FIFO_EMPTY_MASK 0x00000100L
+#define MC_SEQ_STATUS_S__SEQ0_RS_DATA_FIFO_EMPTY__SHIFT 0x00000008
+#define MC_SEQ_STATUS_S__SEQ1_ARB_CMD_FIFO_FULL_MASK 0x00000020L
+#define MC_SEQ_STATUS_S__SEQ1_ARB_CMD_FIFO_FULL__SHIFT 0x00000005
+#define MC_SEQ_STATUS_S__SEQ1_ARB_DATA_FIFO_FULL_MASK 0x00000002L
+#define MC_SEQ_STATUS_S__SEQ1_ARB_DATA_FIFO_FULL__SHIFT 0x00000001
+#define MC_SEQ_STATUS_S__SEQ1_RS_DATA_FIFO_EMPTY_MASK 0x00000200L
+#define MC_SEQ_STATUS_S__SEQ1_RS_DATA_FIFO_EMPTY__SHIFT 0x00000009
+#define MC_SEQ_SUP_CNTL__BKPT_CLEAR_MASK 0x00000080L
+#define MC_SEQ_SUP_CNTL__BKPT_CLEAR__SHIFT 0x00000007
+#define MC_SEQ_SUP_CNTL__FAST_WRITE_MASK 0x00000040L
+#define MC_SEQ_SUP_CNTL__FAST_WRITE__SHIFT 0x00000006
+#define MC_SEQ_SUP_CNTL__PGM_CHKSUM_MASK 0xff800000L
+#define MC_SEQ_SUP_CNTL__PGM_CHKSUM__SHIFT 0x00000017
+#define MC_SEQ_SUP_CNTL__PGM_READ_MASK 0x00000020L
+#define MC_SEQ_SUP_CNTL__PGM_READ__SHIFT 0x00000005
+#define MC_SEQ_SUP_CNTL__PGM_WRITE_MASK 0x00000010L
+#define MC_SEQ_SUP_CNTL__PGM_WRITE__SHIFT 0x00000004
+#define MC_SEQ_SUP_CNTL__RESET_PC_MASK 0x00000008L
+#define MC_SEQ_SUP_CNTL__RESET_PC__SHIFT 0x00000003
+#define MC_SEQ_SUP_CNTL__RUN_MASK 0x00000001L
+#define MC_SEQ_SUP_CNTL__RUN__SHIFT 0x00000000
+#define MC_SEQ_SUP_CNTL__SINGLE_STEP_MASK 0x00000002L
+#define MC_SEQ_SUP_CNTL__SINGLE_STEP__SHIFT 0x00000001
+#define MC_SEQ_SUP_CNTL__SW_WAKE_MASK 0x00000004L
+#define MC_SEQ_SUP_CNTL__SW_WAKE__SHIFT 0x00000002
+#define MC_SEQ_SUP_DEC_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_DEC_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_GP0_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_GP0_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_GP1_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_GP1_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_GP2_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_GP2_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_GP3_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_GP3_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_IR_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_IR_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_PGM__CNTL_MASK 0xffffffffL
+#define MC_SEQ_SUP_PGM__CNTL__SHIFT 0x00000000
+#define MC_SEQ_SUP_PGM_STAT__STATUS_MASK 0xffffffffL
+#define MC_SEQ_SUP_PGM_STAT__STATUS__SHIFT 0x00000000
+#define MC_SEQ_SUP_R_PGM__PGM_MASK 0xffffffffL
+#define MC_SEQ_SUP_R_PGM__PGM__SHIFT 0x00000000
+#define MC_SEQ_TCG_CNTL__AREF_BOTH_MASK 0x04000000L
+#define MC_SEQ_TCG_CNTL__AREF_BOTH__SHIFT 0x0000001a
+#define MC_SEQ_TCG_CNTL__AREF_LAST_MASK 0x02000000L
+#define MC_SEQ_TCG_CNTL__AREF_LAST__SHIFT 0x00000019
+#define MC_SEQ_TCG_CNTL__BURST_NUM_MASK 0x00380000L
+#define MC_SEQ_TCG_CNTL__BURST_NUM__SHIFT 0x00000013
+#define MC_SEQ_TCG_CNTL__DATA_CNT_MASK 0x0000f000L
+#define MC_SEQ_TCG_CNTL__DATA_CNT__SHIFT 0x0000000c
+#define MC_SEQ_TCG_CNTL__DONE_MASK 0x80000000L
+#define MC_SEQ_TCG_CNTL__DONE__SHIFT 0x0000001f
+#define MC_SEQ_TCG_CNTL__ENABLE_D0_MASK 0x00000002L
+#define MC_SEQ_TCG_CNTL__ENABLE_D0__SHIFT 0x00000001
+#define MC_SEQ_TCG_CNTL__ENABLE_D1_MASK 0x00000004L
+#define MC_SEQ_TCG_CNTL__ENABLE_D1__SHIFT 0x00000002
+#define MC_SEQ_TCG_CNTL__FRAME_TRAIN_MASK 0x00040000L
+#define MC_SEQ_TCG_CNTL__FRAME_TRAIN__SHIFT 0x00000012
+#define MC_SEQ_TCG_CNTL__INFINITE_CMD_MASK 0x00000080L
+#define MC_SEQ_TCG_CNTL__INFINITE_CMD__SHIFT 0x00000007
+#define MC_SEQ_TCG_CNTL__ISSUE_AREF_MASK 0x00400000L
+#define MC_SEQ_TCG_CNTL__ISSUE_AREF__SHIFT 0x00000016
+#define MC_SEQ_TCG_CNTL__LOAD_FIFO_MASK 0x00010000L
+#define MC_SEQ_TCG_CNTL__LOAD_FIFO__SHIFT 0x00000010
+#define MC_SEQ_TCG_CNTL__MOP_MASK 0x00000f00L
+#define MC_SEQ_TCG_CNTL__MOP__SHIFT 0x00000008
+#define MC_SEQ_TCG_CNTL__NFIFO_MASK 0x00000070L
+#define MC_SEQ_TCG_CNTL__NFIFO__SHIFT 0x00000004
+#define MC_SEQ_TCG_CNTL__RESET_MASK 0x00000001L
+#define MC_SEQ_TCG_CNTL__RESET__SHIFT 0x00000000
+#define MC_SEQ_TCG_CNTL__SHORT_LDFF_MASK 0x00020000L
+#define MC_SEQ_TCG_CNTL__SHORT_LDFF__SHIFT 0x00000011
+#define MC_SEQ_TCG_CNTL__START_MASK 0x00000008L
+#define MC_SEQ_TCG_CNTL__START__SHIFT 0x00000003
+#define MC_SEQ_TCG_CNTL__TXDBI_CNTL_MASK 0x00800000L
+#define MC_SEQ_TCG_CNTL__TXDBI_CNTL__SHIFT 0x00000017
+#define MC_SEQ_TCG_CNTL__VPTR_MASK_MASK 0x01000000L
+#define MC_SEQ_TCG_CNTL__VPTR_MASK__SHIFT 0x00000018
+#define MC_SEQ_TIMER_RD__COUNTER_MASK 0xffffffffL
+#define MC_SEQ_TIMER_RD__COUNTER__SHIFT 0x00000000
+#define MC_SEQ_TIMER_WR__COUNTER_MASK 0xffffffffL
+#define MC_SEQ_TIMER_WR__COUNTER__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOP0_WAKEUP_MASK 0x00040000L
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOP0_WAKEUP__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOP1_WAKEUP_MASK 0x00080000L
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOP1_WAKEUP__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOPB0_WAKEUP_MASK 0x00200000L
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOPB0_WAKEUP__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOPB1_WAKEUP_MASK 0x00400000L
+#define MC_SEQ_TRAIN_CAPTURE__ALLOWSTOPB1_WAKEUP__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_CAPTURE__D0_ARF_WAKEUP_MASK 0x00000001L
+#define MC_SEQ_TRAIN_CAPTURE__D0_ARF_WAKEUP__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_CAPTURE__D0_CMD_FIFO_READY_WAKEUP_MASK 0x00000100L
+#define MC_SEQ_TRAIN_CAPTURE__D0_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_CAPTURE__D0_DATA_FIFO_READY_WAKEUP_MASK 0x00000400L
+#define MC_SEQ_TRAIN_CAPTURE__D0_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_CAPTURE__D0_IDLEH_WAKEUP_MASK 0x01000000L
+#define MC_SEQ_TRAIN_CAPTURE__D0_IDLEH_WAKEUP__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_CAPTURE__D0_REDC_WAKEUP_MASK 0x00000004L
+#define MC_SEQ_TRAIN_CAPTURE__D0_REDC_WAKEUP__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_CAPTURE__D0_WEDC_WAKEUP_MASK 0x00000010L
+#define MC_SEQ_TRAIN_CAPTURE__D0_WEDC_WAKEUP__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_CAPTURE__D1_ARF_WAKEUP_MASK 0x00000002L
+#define MC_SEQ_TRAIN_CAPTURE__D1_ARF_WAKEUP__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_CAPTURE__D1_CMD_FIFO_READY_WAKEUP_MASK 0x00000200L
+#define MC_SEQ_TRAIN_CAPTURE__D1_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_CAPTURE__D1_DATA_FIFO_READY_WAKEUP_MASK 0x00000800L
+#define MC_SEQ_TRAIN_CAPTURE__D1_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_CAPTURE__D1_IDLEH_WAKEUP_MASK 0x02000000L
+#define MC_SEQ_TRAIN_CAPTURE__D1_IDLEH_WAKEUP__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_CAPTURE__D1_REDC_WAKEUP_MASK 0x00000008L
+#define MC_SEQ_TRAIN_CAPTURE__D1_REDC_WAKEUP__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_CAPTURE__D1_WEDC_WAKEUP_MASK 0x00000020L
+#define MC_SEQ_TRAIN_CAPTURE__D1_WEDC_WAKEUP__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_CAPTURE__DPM_LPT_WAKEUP_MASK 0x00800000L
+#define MC_SEQ_TRAIN_CAPTURE__DPM_LPT_WAKEUP__SHIFT 0x00000017
+#define MC_SEQ_TRAIN_CAPTURE__DPM_WAKEUP_MASK 0x00100000L
+#define MC_SEQ_TRAIN_CAPTURE__DPM_WAKEUP__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_CAPTURE__MCLK_FREQ_CHANGE_WAKEUP_MASK 0x00000040L
+#define MC_SEQ_TRAIN_CAPTURE__MCLK_FREQ_CHANGE_WAKEUP__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_CAPTURE__PHY_PG_WAKEUP_MASK 0x04000000L
+#define MC_SEQ_TRAIN_CAPTURE__PHY_PG_WAKEUP__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_CAPTURE__RESERVE0_WAKEUP_MASK 0x00002000L
+#define MC_SEQ_TRAIN_CAPTURE__RESERVE0_WAKEUP__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_CAPTURE__SCLK_SRBM_READY_WAKEUP_MASK 0x00000080L
+#define MC_SEQ_TRAIN_CAPTURE__SCLK_SRBM_READY_WAKEUP__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_CAPTURE__SOFTWARE_WAKEUP_WAKEUP_MASK 0x00001000L
+#define MC_SEQ_TRAIN_CAPTURE__SOFTWARE_WAKEUP_WAKEUP__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_CAPTURE__TCG_DONE_WAKEUP_MASK 0x00020000L
+#define MC_SEQ_TRAIN_CAPTURE__TCG_DONE_WAKEUP__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_CAPTURE__TIMER_DONE_WAKEUP_MASK 0x00008000L
+#define MC_SEQ_TRAIN_CAPTURE__TIMER_DONE_WAKEUP__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_CAPTURE__TSM_DONE_WAKEUP_MASK 0x00004000L
+#define MC_SEQ_TRAIN_CAPTURE__TSM_DONE_WAKEUP__SHIFT 0x0000000e
+#define MC_SEQ_TRAIN_EDC_THRESHOLD2__THRESHOLD_PERIOD_MASK 0xffffffffL
+#define MC_SEQ_TRAIN_EDC_THRESHOLD2__THRESHOLD_PERIOD__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH0_LINK_RETRAIN_IN_PROGRESS_MASK 0x00000100L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH0_LINK_RETRAIN_IN_PROGRESS__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH0_LINK_RETRAIN_STATUS_MASK 0x00000001L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH0_LINK_RETRAIN_STATUS__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH1_LINK_RETRAIN_IN_PROGRESS_MASK 0x00000200L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH1_LINK_RETRAIN_IN_PROGRESS__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH1_LINK_RETRAIN_STATUS_MASK 0x00000002L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CH1_LINK_RETRAIN_STATUS__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CLEAR_RETRAIN_STATUS_MASK 0x00000004L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__CLEAR_RETRAIN_STATUS__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__RETRAIN_MONITOR_MASK 0x00000030L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__RETRAIN_MONITOR__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__RETRAIN_VBI_MASK 0x00000008L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD3__RETRAIN_VBI__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_EDC_THRESHOLD__READ_EDC_THRESHOLD_MASK 0xffff0000L
+#define MC_SEQ_TRAIN_EDC_THRESHOLD__READ_EDC_THRESHOLD__SHIFT 0x00000010
+#define MC_SEQ_TRAIN_EDC_THRESHOLD__WRITE_EDC_THRESHOLD_MASK 0x0000ffffL
+#define MC_SEQ_TRAIN_EDC_THRESHOLD__WRITE_EDC_THRESHOLD__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_TIMING__TARF2T_MASK 0x000003e0L
+#define MC_SEQ_TRAIN_TIMING__TARF2T__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_TIMING__TLD2LD_MASK 0x000f8000L
+#define MC_SEQ_TRAIN_TIMING__TLD2LD__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_TIMING__TT2ROW_MASK 0x00007c00L
+#define MC_SEQ_TRAIN_TIMING__TT2ROW__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_TIMING__TWT2RT_MASK 0x0000001fL
+#define MC_SEQ_TRAIN_TIMING__TWT2RT__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOP0_WAKEUP_MASK 0x00040000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOP0_WAKEUP__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOP1_WAKEUP_MASK 0x00080000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOP1_WAKEUP__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOPB0_WAKEUP_MASK 0x00200000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOPB0_WAKEUP__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOPB1_WAKEUP_MASK 0x00400000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__ALLOWSTOPB1_WAKEUP__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__CLEARALL_MASK 0x00010000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__CLEARALL__SHIFT 0x00000010
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_ARF_WAKEUP_MASK 0x00000001L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_ARF_WAKEUP__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_CMD_FIFO_READY_WAKEUP_MASK 0x00000100L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_DATA_FIFO_READY_WAKEUP_MASK 0x00000400L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_IDLEH_WAKEUP_MASK 0x01000000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_IDLEH_WAKEUP__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_REDC_WAKEUP_MASK 0x00000004L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_REDC_WAKEUP__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_WEDC_WAKEUP_MASK 0x00000010L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D0_WEDC_WAKEUP__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_ARF_WAKEUP_MASK 0x00000002L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_ARF_WAKEUP__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_CMD_FIFO_READY_WAKEUP_MASK 0x00000200L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_DATA_FIFO_READY_WAKEUP_MASK 0x00000800L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_IDLEH_WAKEUP_MASK 0x02000000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_IDLEH_WAKEUP__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_REDC_WAKEUP_MASK 0x00000008L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_REDC_WAKEUP__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_WEDC_WAKEUP_MASK 0x00000020L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__D1_WEDC_WAKEUP__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__DPM_LPT_WAKEUP_MASK 0x00800000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__DPM_LPT_WAKEUP__SHIFT 0x00000017
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__DPM_WAKEUP_MASK 0x00100000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__DPM_WAKEUP__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__MCLK_FREQ_CHANGE_WAKEUP_MASK 0x00000040L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__MCLK_FREQ_CHANGE_WAKEUP__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__PHY_PG_WAKEUP_MASK 0x04000000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__PHY_PG_WAKEUP__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__RESERVE0_WAKEUP_MASK 0x00002000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__RESERVE0_WAKEUP__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__SCLK_SRBM_READY_WAKEUP_MASK 0x00000080L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__SCLK_SRBM_READY_WAKEUP__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__SOFTWARE_WAKEUP_WAKEUP_MASK 0x00001000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__SOFTWARE_WAKEUP_WAKEUP__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TCG_DONE_WAKEUP_MASK 0x00020000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TCG_DONE_WAKEUP__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TIMER_DONE_WAKEUP_MASK 0x00008000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TIMER_DONE_WAKEUP__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TSM_DONE_WAKEUP_MASK 0x00004000L
+#define MC_SEQ_TRAIN_WAKEUP_CLEAR__TSM_DONE_WAKEUP__SHIFT 0x0000000e
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_ADDR_TRAIN_MASK 0x00000100L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_ADDR_TRAIN__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_READ_TRAIN_MASK 0x00000400L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_READ_TRAIN__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WAKEUP_EARLY_MASK 0x00100000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WAKEUP_EARLY__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WCK_TRAIN_MASK 0x00000200L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WCK_TRAIN__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WRITE_TRAIN_MASK 0x00000800L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__AUTO_REFRESH_WRITE_TRAIN__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_RD_D0_MASK 0x01000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_RD_D0__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_RD_D1_MASK 0x04000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_RD_D1__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_WR_D0_MASK 0x02000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_WR_D0__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_WR_D1_MASK 0x08000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BLOCK_ARB_WR_D1__SHIFT 0x0000001b
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_ADDR_TRAIN_MASK 0x00000001L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_ADDR_TRAIN__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_READ_TRAIN_MASK 0x00000004L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_READ_TRAIN__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_WCK_TRAIN_MASK 0x00000002L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_WCK_TRAIN__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_WRITE_TRAIN_MASK 0x00000008L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__BOOT_UP_WRITE_TRAIN__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__DISP_ASTOP_WAKEUP_MASK 0x20000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__DISP_ASTOP_WAKEUP__SHIFT 0x0000001d
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_ADDR_TRAIN_MASK 0x00010000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_ADDR_TRAIN__SHIFT 0x00000010
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_READ_TRAIN_MASK 0x00040000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_READ_TRAIN__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_WCK_TRAIN_MASK 0x00020000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_WCK_TRAIN__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_WRITE_TRAIN_MASK 0x00080000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__READ_ECC_WRITE_TRAIN__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_ADDR_TRAIN_MASK 0x00000010L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_ADDR_TRAIN__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_READ_TRAIN_MASK 0x00000040L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_READ_TRAIN__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_WCK_TRAIN_MASK 0x00000020L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_WCK_TRAIN__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_WRITE_TRAIN_MASK 0x00000080L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SELF_REFRESH_WRITE_TRAIN__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__STOP_WCK_D0_MASK 0x00200000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__STOP_WCK_D0__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__STOP_WCK_D1_MASK 0x00400000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__STOP_WCK_D1__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SW_WAKEUP_MASK 0x10000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__SW_WAKEUP__SHIFT 0x0000001c
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK 0x40000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0__SHIFT 0x0000001e
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK 0x80000000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1__SHIFT 0x0000001f
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_ADDR_TRAIN_MASK 0x00001000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_ADDR_TRAIN__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_READ_TRAIN_MASK 0x00004000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_READ_TRAIN__SHIFT 0x0000000e
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_WCK_TRAIN_MASK 0x00002000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_WCK_TRAIN__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_WRITE_TRAIN_MASK 0x00008000L
+#define MC_SEQ_TRAIN_WAKEUP_CNTL__WRITE_ECC_WRITE_TRAIN__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOP0_WAKEUP_MASK 0x00040000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOP0_WAKEUP__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOP1_WAKEUP_MASK 0x00080000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOP1_WAKEUP__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOPB0_WAKEUP_MASK 0x00200000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOPB0_WAKEUP__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOPB1_WAKEUP_MASK 0x00400000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__ALLOWSTOPB1_WAKEUP__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_ARF_WAKEUP_MASK 0x00000001L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_ARF_WAKEUP__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_CMD_FIFO_READY_WAKEUP_MASK 0x00000100L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_DATA_FIFO_READY_WAKEUP_MASK 0x00000400L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_IDLEH_WAKEUP_MASK 0x01000000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_IDLEH_WAKEUP__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_REDC_WAKEUP_MASK 0x00000004L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_REDC_WAKEUP__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_WEDC_WAKEUP_MASK 0x00000010L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D0_WEDC_WAKEUP__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_ARF_WAKEUP_MASK 0x00000002L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_ARF_WAKEUP__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_CMD_FIFO_READY_WAKEUP_MASK 0x00000200L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_DATA_FIFO_READY_WAKEUP_MASK 0x00000800L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_IDLEH_WAKEUP_MASK 0x02000000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_IDLEH_WAKEUP__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_REDC_WAKEUP_MASK 0x00000008L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_REDC_WAKEUP__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_WEDC_WAKEUP_MASK 0x00000020L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__D1_WEDC_WAKEUP__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__DPM_LPT_WAKEUP_MASK 0x00800000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__DPM_LPT_WAKEUP__SHIFT 0x00000017
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__DPM_WAKEUP_MASK 0x00100000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__DPM_WAKEUP__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__MCLK_FREQ_CHANGE_WAKEUP_MASK 0x00000040L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__MCLK_FREQ_CHANGE_WAKEUP__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__PHY_PG_WAKEUP_MASK 0x04000000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__PHY_PG_WAKEUP__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__RESERVE0_WAKEUP_MASK 0x00002000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__RESERVE0_WAKEUP__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__SCLK_SRBM_READY_WAKEUP_MASK 0x00000080L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__SCLK_SRBM_READY_WAKEUP__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__SOFTWARE_WAKEUP_WAKEUP_MASK 0x00001000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__SOFTWARE_WAKEUP_WAKEUP__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TCG_DONE_WAKEUP_MASK 0x00020000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TCG_DONE_WAKEUP__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TIMER_DONE_WAKEUP_MASK 0x00008000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TIMER_DONE_WAKEUP__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TSM_DONE_WAKEUP_MASK 0x00004000L
+#define MC_SEQ_TRAIN_WAKEUP_EDGE__TSM_DONE_WAKEUP__SHIFT 0x0000000e
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOP0_WAKEUP_MASK 0x00040000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOP0_WAKEUP__SHIFT 0x00000012
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOP1_WAKEUP_MASK 0x00080000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOP1_WAKEUP__SHIFT 0x00000013
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOPB0_WAKEUP_MASK 0x00200000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOPB0_WAKEUP__SHIFT 0x00000015
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOPB1_WAKEUP_MASK 0x00400000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__ALLOWSTOPB1_WAKEUP__SHIFT 0x00000016
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_ARF_WAKEUP_MASK 0x00000001L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_ARF_WAKEUP__SHIFT 0x00000000
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_CMD_FIFO_READY_WAKEUP_MASK 0x00000100L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000008
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_DATA_FIFO_READY_WAKEUP_MASK 0x00000400L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000a
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_IDLEH_WAKEUP_MASK 0x01000000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_IDLEH_WAKEUP__SHIFT 0x00000018
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_REDC_WAKEUP_MASK 0x00000004L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_REDC_WAKEUP__SHIFT 0x00000002
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_WEDC_WAKEUP_MASK 0x00000010L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D0_WEDC_WAKEUP__SHIFT 0x00000004
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_ARF_WAKEUP_MASK 0x00000002L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_ARF_WAKEUP__SHIFT 0x00000001
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_CMD_FIFO_READY_WAKEUP_MASK 0x00000200L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_CMD_FIFO_READY_WAKEUP__SHIFT 0x00000009
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_DATA_FIFO_READY_WAKEUP_MASK 0x00000800L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_DATA_FIFO_READY_WAKEUP__SHIFT 0x0000000b
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_IDLEH_WAKEUP_MASK 0x02000000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_IDLEH_WAKEUP__SHIFT 0x00000019
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_REDC_WAKEUP_MASK 0x00000008L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_REDC_WAKEUP__SHIFT 0x00000003
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_WEDC_WAKEUP_MASK 0x00000020L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__D1_WEDC_WAKEUP__SHIFT 0x00000005
+#define MC_SEQ_TRAIN_WAKEUP_MASK__DPM_LPT_WAKEUP_MASK 0x00800000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__DPM_LPT_WAKEUP__SHIFT 0x00000017
+#define MC_SEQ_TRAIN_WAKEUP_MASK__DPM_WAKEUP_MASK 0x00100000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__DPM_WAKEUP__SHIFT 0x00000014
+#define MC_SEQ_TRAIN_WAKEUP_MASK__MCLK_FREQ_CHANGE_WAKEUP_MASK 0x00000040L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__MCLK_FREQ_CHANGE_WAKEUP__SHIFT 0x00000006
+#define MC_SEQ_TRAIN_WAKEUP_MASK__PHY_PG_WAKEUP_MASK 0x04000000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__PHY_PG_WAKEUP__SHIFT 0x0000001a
+#define MC_SEQ_TRAIN_WAKEUP_MASK__RESERVE0_WAKEUP_MASK 0x00002000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__RESERVE0_WAKEUP__SHIFT 0x0000000d
+#define MC_SEQ_TRAIN_WAKEUP_MASK__SCLK_SRBM_READY_WAKEUP_MASK 0x00000080L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__SCLK_SRBM_READY_WAKEUP__SHIFT 0x00000007
+#define MC_SEQ_TRAIN_WAKEUP_MASK__SOFTWARE_WAKEUP_WAKEUP_MASK 0x00001000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__SOFTWARE_WAKEUP_WAKEUP__SHIFT 0x0000000c
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TCG_DONE_WAKEUP_MASK 0x00020000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TCG_DONE_WAKEUP__SHIFT 0x00000011
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TIMER_DONE_WAKEUP_MASK 0x00008000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TIMER_DONE_WAKEUP__SHIFT 0x0000000f
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TSM_DONE_WAKEUP_MASK 0x00004000L
+#define MC_SEQ_TRAIN_WAKEUP_MASK__TSM_DONE_WAKEUP__SHIFT 0x0000000e
+#define MC_SEQ_TSM_BCNT__BCNT_TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_BCNT__BCNT_TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_BCNT__COMP_VALUE_MASK 0x00ff0000L
+#define MC_SEQ_TSM_BCNT__COMP_VALUE__SHIFT 0x00000010
+#define MC_SEQ_TSM_BCNT__DONE_TESTS_MASK 0xff000000L
+#define MC_SEQ_TSM_BCNT__DONE_TESTS__SHIFT 0x00000018
+#define MC_SEQ_TSM_BCNT__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_BCNT__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_BCNT__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_BCNT__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_CTRL__CAPTURE_START_MASK 0x00000002L
+#define MC_SEQ_TSM_CTRL__CAPTURE_START__SHIFT 0x00000001
+#define MC_SEQ_TSM_CTRL__DIRECTION_MASK 0x00000020L
+#define MC_SEQ_TSM_CTRL__DIRECTION__SHIFT 0x00000005
+#define MC_SEQ_TSM_CTRL__DONE_MASK 0x00000004L
+#define MC_SEQ_TSM_CTRL__DONE__SHIFT 0x00000002
+#define MC_SEQ_TSM_CTRL__ERR_MASK 0x00000008L
+#define MC_SEQ_TSM_CTRL__ERR__SHIFT 0x00000003
+#define MC_SEQ_TSM_CTRL__INVERT_MASK 0x00000040L
+#define MC_SEQ_TSM_CTRL__INVERT__SHIFT 0x00000006
+#define MC_SEQ_TSM_CTRL__MASK_BITS_MASK 0x00000080L
+#define MC_SEQ_TSM_CTRL__MASK_BITS__SHIFT 0x00000007
+#define MC_SEQ_TSM_CTRL__POINTER_MASK 0xffff0000L
+#define MC_SEQ_TSM_CTRL__POINTER__SHIFT 0x00000010
+#define MC_SEQ_TSM_CTRL__ROT_INV_MASK 0x00000400L
+#define MC_SEQ_TSM_CTRL__ROT_INV__SHIFT 0x0000000a
+#define MC_SEQ_TSM_CTRL__START_MASK 0x00000001L
+#define MC_SEQ_TSM_CTRL__START__SHIFT 0x00000000
+#define MC_SEQ_TSM_CTRL__STEP_MASK 0x00000010L
+#define MC_SEQ_TSM_CTRL__STEP__SHIFT 0x00000004
+#define MC_SEQ_TSM_CTRL__UPDATE_LOOP_MASK 0x00000300L
+#define MC_SEQ_TSM_CTRL__UPDATE_LOOP__SHIFT 0x00000008
+#define MC_SEQ_TSM_DBI__DBI_MASK 0xffffffffL
+#define MC_SEQ_TSM_DBI__DBI__SHIFT 0x00000000
+#define MC_SEQ_TSM_DEBUG_DATA__TSM_DEBUG_DATA_MASK 0xffffffffL
+#define MC_SEQ_TSM_DEBUG_DATA__TSM_DEBUG_DATA__SHIFT 0x00000000
+#define MC_SEQ_TSM_DEBUG_INDEX__TSM_DEBUG_INDEX_MASK 0x0000001fL
+#define MC_SEQ_TSM_DEBUG_INDEX__TSM_DEBUG_INDEX__SHIFT 0x00000000
+#define MC_SEQ_TSM_EDC__EDC_MASK 0xffffffffL
+#define MC_SEQ_TSM_EDC__EDC__SHIFT 0x00000000
+#define MC_SEQ_TSM_FLAG__ERROR_TESTS_MASK 0xff000000L
+#define MC_SEQ_TSM_FLAG__ERROR_TESTS__SHIFT 0x00000018
+#define MC_SEQ_TSM_FLAG__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_FLAG__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_FLAG__FLAG_TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_FLAG__FLAG_TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_FLAG__NBBL_MASK_MASK 0x000f0000L
+#define MC_SEQ_TSM_FLAG__NBBL_MASK__SHIFT 0x00000010
+#define MC_SEQ_TSM_FLAG__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_FLAG__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_GCNT__COMP_VALUE_MASK 0xffff0000L
+#define MC_SEQ_TSM_GCNT__COMP_VALUE__SHIFT 0x00000010
+#define MC_SEQ_TSM_GCNT__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_GCNT__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_GCNT__TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_GCNT__TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_GCNT__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_GCNT__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_MISC__WCDR_MASK_MASK 0x000f0000L
+#define MC_SEQ_TSM_MISC__WCDR_MASK__SHIFT 0x00000010
+#define MC_SEQ_TSM_MISC__WCDR_PTR_MASK 0x0000ffffL
+#define MC_SEQ_TSM_MISC__WCDR_PTR__SHIFT 0x00000000
+#define MC_SEQ_TSM_NCNT__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_NCNT__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_NCNT__NIBBLE_SKIP_MASK 0x0f000000L
+#define MC_SEQ_TSM_NCNT__NIBBLE_SKIP__SHIFT 0x00000018
+#define MC_SEQ_TSM_NCNT__RANGE_HIGH_MASK 0x00f00000L
+#define MC_SEQ_TSM_NCNT__RANGE_HIGH__SHIFT 0x00000014
+#define MC_SEQ_TSM_NCNT__RANGE_LOW_MASK 0x000f0000L
+#define MC_SEQ_TSM_NCNT__RANGE_LOW__SHIFT 0x00000010
+#define MC_SEQ_TSM_NCNT__TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_NCNT__TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_NCNT__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_NCNT__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_OCNT__CMP_VALUE_MASK 0xffff0000L
+#define MC_SEQ_TSM_OCNT__CMP_VALUE__SHIFT 0x00000010
+#define MC_SEQ_TSM_OCNT__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_OCNT__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_OCNT__TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_OCNT__TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_OCNT__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_OCNT__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_UPDATE__AREF_COUNT_MASK 0x00ff0000L
+#define MC_SEQ_TSM_UPDATE__AREF_COUNT__SHIFT 0x00000010
+#define MC_SEQ_TSM_UPDATE__CAPTR_TESTS_MASK 0xff000000L
+#define MC_SEQ_TSM_UPDATE__CAPTR_TESTS__SHIFT 0x00000018
+#define MC_SEQ_TSM_UPDATE__FALSE_ACT_MASK 0x000000f0L
+#define MC_SEQ_TSM_UPDATE__FALSE_ACT__SHIFT 0x00000004
+#define MC_SEQ_TSM_UPDATE__TRUE_ACT_MASK 0x0000000fL
+#define MC_SEQ_TSM_UPDATE__TRUE_ACT__SHIFT 0x00000000
+#define MC_SEQ_TSM_UPDATE__UPDT_TESTS_MASK 0x0000ff00L
+#define MC_SEQ_TSM_UPDATE__UPDT_TESTS__SHIFT 0x00000008
+#define MC_SEQ_TSM_WCDR__WCDR_MASK 0xffffffffL
+#define MC_SEQ_TSM_WCDR__WCDR__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE0_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE0_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE1_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE1_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE2_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE2_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE3_D0__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ4_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ4__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ5_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ5__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ6_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ6__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ7_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_BYTE3_D1__DQ7__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_DBI_D0__DBI3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_DBI_D1__DBI3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_EDC_D0__EDC3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR0_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR0__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR1_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR1__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR2_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR2__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR3_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_EDC_D0__WCDR3__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_EDC_D1__EDC3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR0_MASK 0x000f0000L
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR0__SHIFT 0x00000010
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR1_MASK 0x00f00000L
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR1__SHIFT 0x00000014
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR2_MASK 0x0f000000L
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR2__SHIFT 0x00000018
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR3_MASK 0xf0000000L
+#define MC_SEQ_TXFRAMING_EDC_D1__WCDR3__SHIFT 0x0000001c
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_FCK_D0__FCK3__SHIFT 0x0000000c
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK0_MASK 0x0000000fL
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK0__SHIFT 0x00000000
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK1_MASK 0x000000f0L
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK1__SHIFT 0x00000004
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK2_MASK 0x00000f00L
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK2__SHIFT 0x00000008
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK3_MASK 0x0000f000L
+#define MC_SEQ_TXFRAMING_FCK_D1__FCK3__SHIFT 0x0000000c
+#define MC_SEQ_VENDOR_ID_I0__VALUE_MASK 0xffffffffL
+#define MC_SEQ_VENDOR_ID_I0__VALUE__SHIFT 0x00000000
+#define MC_SEQ_VENDOR_ID_I1__VALUE_MASK 0xffffffffL
+#define MC_SEQ_VENDOR_ID_I1__VALUE__SHIFT 0x00000000
+#define MC_SEQ_WCDR_CTRL__AREF_EN_MASK 0x00004000L
+#define MC_SEQ_WCDR_CTRL__AREF_EN__SHIFT 0x0000000e
+#define MC_SEQ_WCDR_CTRL__PRBS_EN_MASK 0x00100000L
+#define MC_SEQ_WCDR_CTRL__PRBS_EN__SHIFT 0x00000014
+#define MC_SEQ_WCDR_CTRL__PRBS_RST_MASK 0x00200000L
+#define MC_SEQ_WCDR_CTRL__PRBS_RST__SHIFT 0x00000015
+#define MC_SEQ_WCDR_CTRL__PREAMBLE_MASK 0x0f000000L
+#define MC_SEQ_WCDR_CTRL__PREAMBLE__SHIFT 0x00000018
+#define MC_SEQ_WCDR_CTRL__PRE_MASK_MASK 0xf0000000L
+#define MC_SEQ_WCDR_CTRL__PRE_MASK__SHIFT 0x0000001c
+#define MC_SEQ_WCDR_CTRL__RD_EN_MASK 0x00002000L
+#define MC_SEQ_WCDR_CTRL__RD_EN__SHIFT 0x0000000d
+#define MC_SEQ_WCDR_CTRL__TRAIN_EN_MASK 0x00008000L
+#define MC_SEQ_WCDR_CTRL__TRAIN_EN__SHIFT 0x0000000f
+#define MC_SEQ_WCDR_CTRL__TWCDRL_MASK 0x000f0000L
+#define MC_SEQ_WCDR_CTRL__TWCDRL__SHIFT 0x00000010
+#define MC_SEQ_WCDR_CTRL__WCDR_PRE_MASK 0x000000ffL
+#define MC_SEQ_WCDR_CTRL__WCDR_PRE__SHIFT 0x00000000
+#define MC_SEQ_WCDR_CTRL__WCDR_TIM_MASK 0x00000f00L
+#define MC_SEQ_WCDR_CTRL__WCDR_TIM__SHIFT 0x00000008
+#define MC_SEQ_WCDR_CTRL__WR_EN_MASK 0x00001000L
+#define MC_SEQ_WCDR_CTRL__WR_EN__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_2__DAT_DLY_H_D0_MASK 0x00000001L
+#define MC_SEQ_WR_CTL_2__DAT_DLY_H_D0__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_2__DAT_DLY_H_D1_MASK 0x00000008L
+#define MC_SEQ_WR_CTL_2__DAT_DLY_H_D1__SHIFT 0x00000003
+#define MC_SEQ_WR_CTL_2__DQS_DLY_H_D0_MASK 0x00000002L
+#define MC_SEQ_WR_CTL_2__DQS_DLY_H_D0__SHIFT 0x00000001
+#define MC_SEQ_WR_CTL_2__DQS_DLY_H_D1_MASK 0x00000010L
+#define MC_SEQ_WR_CTL_2__DQS_DLY_H_D1__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_2_LP__DAT_DLY_H_D0_MASK 0x00000001L
+#define MC_SEQ_WR_CTL_2_LP__DAT_DLY_H_D0__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_2_LP__DAT_DLY_H_D1_MASK 0x00000008L
+#define MC_SEQ_WR_CTL_2_LP__DAT_DLY_H_D1__SHIFT 0x00000003
+#define MC_SEQ_WR_CTL_2_LP__DQS_DLY_H_D0_MASK 0x00000002L
+#define MC_SEQ_WR_CTL_2_LP__DQS_DLY_H_D0__SHIFT 0x00000001
+#define MC_SEQ_WR_CTL_2_LP__DQS_DLY_H_D1_MASK 0x00000010L
+#define MC_SEQ_WR_CTL_2_LP__DQS_DLY_H_D1__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_2_LP__OEN_DLY_H_D0_MASK 0x00000004L
+#define MC_SEQ_WR_CTL_2_LP__OEN_DLY_H_D0__SHIFT 0x00000002
+#define MC_SEQ_WR_CTL_2_LP__OEN_DLY_H_D1_MASK 0x00000020L
+#define MC_SEQ_WR_CTL_2_LP__OEN_DLY_H_D1__SHIFT 0x00000005
+#define MC_SEQ_WR_CTL_2_LP__WCDR_EN_MASK 0x00000040L
+#define MC_SEQ_WR_CTL_2_LP__WCDR_EN__SHIFT 0x00000006
+#define MC_SEQ_WR_CTL_2__OEN_DLY_H_D0_MASK 0x00000004L
+#define MC_SEQ_WR_CTL_2__OEN_DLY_H_D0__SHIFT 0x00000002
+#define MC_SEQ_WR_CTL_2__OEN_DLY_H_D1_MASK 0x00000020L
+#define MC_SEQ_WR_CTL_2__OEN_DLY_H_D1__SHIFT 0x00000005
+#define MC_SEQ_WR_CTL_2__WCDR_EN_MASK 0x00000040L
+#define MC_SEQ_WR_CTL_2__WCDR_EN__SHIFT 0x00000006
+#define MC_SEQ_WR_CTL_D0__ADR_2Y_DLY_MASK 0x00000400L
+#define MC_SEQ_WR_CTL_D0__ADR_2Y_DLY__SHIFT 0x0000000a
+#define MC_SEQ_WR_CTL_D0__ADR_DLY_MASK 0x20000000L
+#define MC_SEQ_WR_CTL_D0__ADR_DLY__SHIFT 0x0000001d
+#define MC_SEQ_WR_CTL_D0__CMD_2Y_DLY_MASK 0x00000800L
+#define MC_SEQ_WR_CTL_D0__CMD_2Y_DLY__SHIFT 0x0000000b
+#define MC_SEQ_WR_CTL_D0__CMD_DLY_MASK 0x40000000L
+#define MC_SEQ_WR_CTL_D0__CMD_DLY__SHIFT 0x0000001e
+#define MC_SEQ_WR_CTL_D0__DAT_2Y_DLY_MASK 0x00000200L
+#define MC_SEQ_WR_CTL_D0__DAT_2Y_DLY__SHIFT 0x00000009
+#define MC_SEQ_WR_CTL_D0__DAT_DLY_MASK 0x0000000fL
+#define MC_SEQ_WR_CTL_D0__DAT_DLY__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_D0__DQS_DLY_MASK 0x000000f0L
+#define MC_SEQ_WR_CTL_D0__DQS_DLY__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_D0__DQS_XTR_MASK 0x00000100L
+#define MC_SEQ_WR_CTL_D0__DQS_XTR__SHIFT 0x00000008
+#define MC_SEQ_WR_CTL_D0_LP__ADR_2Y_DLY_MASK 0x00000400L
+#define MC_SEQ_WR_CTL_D0_LP__ADR_2Y_DLY__SHIFT 0x0000000a
+#define MC_SEQ_WR_CTL_D0_LP__ADR_DLY_MASK 0x20000000L
+#define MC_SEQ_WR_CTL_D0_LP__ADR_DLY__SHIFT 0x0000001d
+#define MC_SEQ_WR_CTL_D0_LP__CMD_2Y_DLY_MASK 0x00000800L
+#define MC_SEQ_WR_CTL_D0_LP__CMD_2Y_DLY__SHIFT 0x0000000b
+#define MC_SEQ_WR_CTL_D0_LP__CMD_DLY_MASK 0x40000000L
+#define MC_SEQ_WR_CTL_D0_LP__CMD_DLY__SHIFT 0x0000001e
+#define MC_SEQ_WR_CTL_D0_LP__DAT_2Y_DLY_MASK 0x00000200L
+#define MC_SEQ_WR_CTL_D0_LP__DAT_2Y_DLY__SHIFT 0x00000009
+#define MC_SEQ_WR_CTL_D0_LP__DAT_DLY_MASK 0x0000000fL
+#define MC_SEQ_WR_CTL_D0_LP__DAT_DLY__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_D0_LP__DQS_DLY_MASK 0x000000f0L
+#define MC_SEQ_WR_CTL_D0_LP__DQS_DLY__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_D0_LP__DQS_XTR_MASK 0x00000100L
+#define MC_SEQ_WR_CTL_D0_LP__DQS_XTR__SHIFT 0x00000008
+#define MC_SEQ_WR_CTL_D0_LP__ODT_DLY_MASK 0x0f000000L
+#define MC_SEQ_WR_CTL_D0_LP__ODT_DLY__SHIFT 0x00000018
+#define MC_SEQ_WR_CTL_D0_LP__ODT_EXT_MASK 0x10000000L
+#define MC_SEQ_WR_CTL_D0_LP__ODT_EXT__SHIFT 0x0000001c
+#define MC_SEQ_WR_CTL_D0_LP__OEN_DLY_MASK 0x0000f000L
+#define MC_SEQ_WR_CTL_D0_LP__OEN_DLY__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_D0_LP__OEN_EXT_MASK 0x000f0000L
+#define MC_SEQ_WR_CTL_D0_LP__OEN_EXT__SHIFT 0x00000010
+#define MC_SEQ_WR_CTL_D0_LP__OEN_SEL_MASK 0x00300000L
+#define MC_SEQ_WR_CTL_D0_LP__OEN_SEL__SHIFT 0x00000014
+#define MC_SEQ_WR_CTL_D0__ODT_DLY_MASK 0x0f000000L
+#define MC_SEQ_WR_CTL_D0__ODT_DLY__SHIFT 0x00000018
+#define MC_SEQ_WR_CTL_D0__ODT_EXT_MASK 0x10000000L
+#define MC_SEQ_WR_CTL_D0__ODT_EXT__SHIFT 0x0000001c
+#define MC_SEQ_WR_CTL_D0__OEN_DLY_MASK 0x0000f000L
+#define MC_SEQ_WR_CTL_D0__OEN_DLY__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_D0__OEN_EXT_MASK 0x000f0000L
+#define MC_SEQ_WR_CTL_D0__OEN_EXT__SHIFT 0x00000010
+#define MC_SEQ_WR_CTL_D0__OEN_SEL_MASK 0x00300000L
+#define MC_SEQ_WR_CTL_D0__OEN_SEL__SHIFT 0x00000014
+#define MC_SEQ_WR_CTL_D1__ADR_2Y_DLY_MASK 0x00000400L
+#define MC_SEQ_WR_CTL_D1__ADR_2Y_DLY__SHIFT 0x0000000a
+#define MC_SEQ_WR_CTL_D1__ADR_DLY_MASK 0x20000000L
+#define MC_SEQ_WR_CTL_D1__ADR_DLY__SHIFT 0x0000001d
+#define MC_SEQ_WR_CTL_D1__CMD_2Y_DLY_MASK 0x00000800L
+#define MC_SEQ_WR_CTL_D1__CMD_2Y_DLY__SHIFT 0x0000000b
+#define MC_SEQ_WR_CTL_D1__CMD_DLY_MASK 0x40000000L
+#define MC_SEQ_WR_CTL_D1__CMD_DLY__SHIFT 0x0000001e
+#define MC_SEQ_WR_CTL_D1__DAT_2Y_DLY_MASK 0x00000200L
+#define MC_SEQ_WR_CTL_D1__DAT_2Y_DLY__SHIFT 0x00000009
+#define MC_SEQ_WR_CTL_D1__DAT_DLY_MASK 0x0000000fL
+#define MC_SEQ_WR_CTL_D1__DAT_DLY__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_D1__DQS_DLY_MASK 0x000000f0L
+#define MC_SEQ_WR_CTL_D1__DQS_DLY__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_D1__DQS_XTR_MASK 0x00000100L
+#define MC_SEQ_WR_CTL_D1__DQS_XTR__SHIFT 0x00000008
+#define MC_SEQ_WR_CTL_D1_LP__ADR_2Y_DLY_MASK 0x00000400L
+#define MC_SEQ_WR_CTL_D1_LP__ADR_2Y_DLY__SHIFT 0x0000000a
+#define MC_SEQ_WR_CTL_D1_LP__ADR_DLY_MASK 0x20000000L
+#define MC_SEQ_WR_CTL_D1_LP__ADR_DLY__SHIFT 0x0000001d
+#define MC_SEQ_WR_CTL_D1_LP__CMD_2Y_DLY_MASK 0x00000800L
+#define MC_SEQ_WR_CTL_D1_LP__CMD_2Y_DLY__SHIFT 0x0000000b
+#define MC_SEQ_WR_CTL_D1_LP__CMD_DLY_MASK 0x40000000L
+#define MC_SEQ_WR_CTL_D1_LP__CMD_DLY__SHIFT 0x0000001e
+#define MC_SEQ_WR_CTL_D1_LP__DAT_2Y_DLY_MASK 0x00000200L
+#define MC_SEQ_WR_CTL_D1_LP__DAT_2Y_DLY__SHIFT 0x00000009
+#define MC_SEQ_WR_CTL_D1_LP__DAT_DLY_MASK 0x0000000fL
+#define MC_SEQ_WR_CTL_D1_LP__DAT_DLY__SHIFT 0x00000000
+#define MC_SEQ_WR_CTL_D1_LP__DQS_DLY_MASK 0x000000f0L
+#define MC_SEQ_WR_CTL_D1_LP__DQS_DLY__SHIFT 0x00000004
+#define MC_SEQ_WR_CTL_D1_LP__DQS_XTR_MASK 0x00000100L
+#define MC_SEQ_WR_CTL_D1_LP__DQS_XTR__SHIFT 0x00000008
+#define MC_SEQ_WR_CTL_D1_LP__ODT_DLY_MASK 0x0f000000L
+#define MC_SEQ_WR_CTL_D1_LP__ODT_DLY__SHIFT 0x00000018
+#define MC_SEQ_WR_CTL_D1_LP__ODT_EXT_MASK 0x10000000L
+#define MC_SEQ_WR_CTL_D1_LP__ODT_EXT__SHIFT 0x0000001c
+#define MC_SEQ_WR_CTL_D1_LP__OEN_DLY_MASK 0x0000f000L
+#define MC_SEQ_WR_CTL_D1_LP__OEN_DLY__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_D1_LP__OEN_EXT_MASK 0x000f0000L
+#define MC_SEQ_WR_CTL_D1_LP__OEN_EXT__SHIFT 0x00000010
+#define MC_SEQ_WR_CTL_D1_LP__OEN_SEL_MASK 0x00300000L
+#define MC_SEQ_WR_CTL_D1_LP__OEN_SEL__SHIFT 0x00000014
+#define MC_SEQ_WR_CTL_D1__ODT_DLY_MASK 0x0f000000L
+#define MC_SEQ_WR_CTL_D1__ODT_DLY__SHIFT 0x00000018
+#define MC_SEQ_WR_CTL_D1__ODT_EXT_MASK 0x10000000L
+#define MC_SEQ_WR_CTL_D1__ODT_EXT__SHIFT 0x0000001c
+#define MC_SEQ_WR_CTL_D1__OEN_DLY_MASK 0x0000f000L
+#define MC_SEQ_WR_CTL_D1__OEN_DLY__SHIFT 0x0000000c
+#define MC_SEQ_WR_CTL_D1__OEN_EXT_MASK 0x000f0000L
+#define MC_SEQ_WR_CTL_D1__OEN_EXT__SHIFT 0x00000010
+#define MC_SEQ_WR_CTL_D1__OEN_SEL_MASK 0x00300000L
+#define MC_SEQ_WR_CTL_D1__OEN_SEL__SHIFT 0x00000014
+#define MC_SHARED_BLACKOUT_CNTL__BLACKOUT_MODE_MASK 0x00000007L
+#define MC_SHARED_BLACKOUT_CNTL__BLACKOUT_MODE__SHIFT 0x00000000
+#define MC_SHARED_CHMAP__CHAN0_MASK 0x0000000fL
+#define MC_SHARED_CHMAP__CHAN0__SHIFT 0x00000000
+#define MC_SHARED_CHMAP__CHAN1_MASK 0x000000f0L
+#define MC_SHARED_CHMAP__CHAN1__SHIFT 0x00000004
+#define MC_SHARED_CHMAP__CHAN2_MASK 0x00000f00L
+#define MC_SHARED_CHMAP__CHAN2__SHIFT 0x00000008
+#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0x0000f000L
+#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0x0000000c
+#define MC_SHARED_CHREMAP__CHAN0_MASK 0x00000007L
+#define MC_SHARED_CHREMAP__CHAN0__SHIFT 0x00000000
+#define MC_SHARED_CHREMAP__CHAN1_MASK 0x00000038L
+#define MC_SHARED_CHREMAP__CHAN1__SHIFT 0x00000003
+#define MC_SHARED_CHREMAP__CHAN2_MASK 0x000001c0L
+#define MC_SHARED_CHREMAP__CHAN2__SHIFT 0x00000006
+#define MC_SHARED_CHREMAP__CHAN3_MASK 0x00000e00L
+#define MC_SHARED_CHREMAP__CHAN3__SHIFT 0x00000009
+#define MC_SHARED_CHREMAP__CHAN4_MASK 0x00007000L
+#define MC_SHARED_CHREMAP__CHAN4__SHIFT 0x0000000c
+#define MC_SHARED_CHREMAP__CHAN5_MASK 0x00038000L
+#define MC_SHARED_CHREMAP__CHAN5__SHIFT 0x0000000f
+#define MC_SHARED_CHREMAP__CHAN6_MASK 0x001c0000L
+#define MC_SHARED_CHREMAP__CHAN6__SHIFT 0x00000012
+#define MC_SHARED_CHREMAP__CHAN7_MASK 0x00e00000L
+#define MC_SHARED_CHREMAP__CHAN7__SHIFT 0x00000015
+#define MC_TRAIN_EDCCDR_R_D0__EDC0_MASK 0x000000ffL
+#define MC_TRAIN_EDCCDR_R_D0__EDC0__SHIFT 0x00000000
+#define MC_TRAIN_EDCCDR_R_D0__EDC1_MASK 0x0000ff00L
+#define MC_TRAIN_EDCCDR_R_D0__EDC1__SHIFT 0x00000008
+#define MC_TRAIN_EDCCDR_R_D0__EDC2_MASK 0x00ff0000L
+#define MC_TRAIN_EDCCDR_R_D0__EDC2__SHIFT 0x00000010
+#define MC_TRAIN_EDCCDR_R_D0__EDC3_MASK 0xff000000L
+#define MC_TRAIN_EDCCDR_R_D0__EDC3__SHIFT 0x00000018
+#define MC_TRAIN_EDCCDR_R_D1__EDC0_MASK 0x000000ffL
+#define MC_TRAIN_EDCCDR_R_D1__EDC0__SHIFT 0x00000000
+#define MC_TRAIN_EDCCDR_R_D1__EDC1_MASK 0x0000ff00L
+#define MC_TRAIN_EDCCDR_R_D1__EDC1__SHIFT 0x00000008
+#define MC_TRAIN_EDCCDR_R_D1__EDC2_MASK 0x00ff0000L
+#define MC_TRAIN_EDCCDR_R_D1__EDC2__SHIFT 0x00000010
+#define MC_TRAIN_EDCCDR_R_D1__EDC3_MASK 0xff000000L
+#define MC_TRAIN_EDCCDR_R_D1__EDC3__SHIFT 0x00000018
+#define MC_TRAIN_EDC_STATUS_D0__REDC_CNT_MASK 0xffff0000L
+#define MC_TRAIN_EDC_STATUS_D0__REDC_CNT__SHIFT 0x00000010
+#define MC_TRAIN_EDC_STATUS_D0__WEDC_CNT_MASK 0x0000ffffL
+#define MC_TRAIN_EDC_STATUS_D0__WEDC_CNT__SHIFT 0x00000000
+#define MC_TRAIN_EDC_STATUS_D1__REDC_CNT_MASK 0xffff0000L
+#define MC_TRAIN_EDC_STATUS_D1__REDC_CNT__SHIFT 0x00000010
+#define MC_TRAIN_EDC_STATUS_D1__WEDC_CNT_MASK 0x0000ffffL
+#define MC_TRAIN_EDC_STATUS_D1__WEDC_CNT__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_0_D0__DQ_STATUS_MASK 0xffffffffL
+#define MC_TRAIN_PRBSERR_0_D0__DQ_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_0_D1__DQ_STATUS_MASK 0xffffffffL
+#define MC_TRAIN_PRBSERR_0_D1__DQ_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_1_D0__DBI_STATUS_MASK 0x0000000fL
+#define MC_TRAIN_PRBSERR_1_D0__DBI_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_1_D0__EDC_STATUS_MASK 0x000000f0L
+#define MC_TRAIN_PRBSERR_1_D0__EDC_STATUS__SHIFT 0x00000004
+#define MC_TRAIN_PRBSERR_1_D0__PMA_PRBSCLR_MASK 0x10000000L
+#define MC_TRAIN_PRBSERR_1_D0__PMA_PRBSCLR__SHIFT 0x0000001c
+#define MC_TRAIN_PRBSERR_1_D0__PMD0_PRBSCLR_MASK 0x20000000L
+#define MC_TRAIN_PRBSERR_1_D0__PMD0_PRBSCLR__SHIFT 0x0000001d
+#define MC_TRAIN_PRBSERR_1_D0__PMD1_PRBSCLR_MASK 0x40000000L
+#define MC_TRAIN_PRBSERR_1_D0__PMD1_PRBSCLR__SHIFT 0x0000001e
+#define MC_TRAIN_PRBSERR_1_D0__WCDR_STATUS_MASK 0x0000f000L
+#define MC_TRAIN_PRBSERR_1_D0__WCDR_STATUS__SHIFT 0x0000000c
+#define MC_TRAIN_PRBSERR_1_D0__WCK_STATUS_MASK 0x00000f00L
+#define MC_TRAIN_PRBSERR_1_D0__WCK_STATUS__SHIFT 0x00000008
+#define MC_TRAIN_PRBSERR_1_D1__DBI_STATUS_MASK 0x0000000fL
+#define MC_TRAIN_PRBSERR_1_D1__DBI_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_1_D1__EDC_STATUS_MASK 0x000000f0L
+#define MC_TRAIN_PRBSERR_1_D1__EDC_STATUS__SHIFT 0x00000004
+#define MC_TRAIN_PRBSERR_1_D1__PMA_PRBSCLR_MASK 0x10000000L
+#define MC_TRAIN_PRBSERR_1_D1__PMA_PRBSCLR__SHIFT 0x0000001c
+#define MC_TRAIN_PRBSERR_1_D1__PMD0_PRBSCLR_MASK 0x20000000L
+#define MC_TRAIN_PRBSERR_1_D1__PMD0_PRBSCLR__SHIFT 0x0000001d
+#define MC_TRAIN_PRBSERR_1_D1__PMD1_PRBSCLR_MASK 0x40000000L
+#define MC_TRAIN_PRBSERR_1_D1__PMD1_PRBSCLR__SHIFT 0x0000001e
+#define MC_TRAIN_PRBSERR_1_D1__WCDR_STATUS_MASK 0x0000f000L
+#define MC_TRAIN_PRBSERR_1_D1__WCDR_STATUS__SHIFT 0x0000000c
+#define MC_TRAIN_PRBSERR_1_D1__WCK_STATUS_MASK 0x00000f00L
+#define MC_TRAIN_PRBSERR_1_D1__WCK_STATUS__SHIFT 0x00000008
+#define MC_TRAIN_PRBSERR_2_D0__ABI_STATUS_MASK 0x10000000L
+#define MC_TRAIN_PRBSERR_2_D0__ABI_STATUS__SHIFT 0x0000001c
+#define MC_TRAIN_PRBSERR_2_D0__ADDR_STATUS_MASK 0x03ff0000L
+#define MC_TRAIN_PRBSERR_2_D0__ADDR_STATUS__SHIFT 0x00000010
+#define MC_TRAIN_PRBSERR_2_D0__CAS_STATUS_MASK 0x00000400L
+#define MC_TRAIN_PRBSERR_2_D0__CAS_STATUS__SHIFT 0x0000000a
+#define MC_TRAIN_PRBSERR_2_D0__CKB_STATUS_MASK 0x00000002L
+#define MC_TRAIN_PRBSERR_2_D0__CKB_STATUS__SHIFT 0x00000001
+#define MC_TRAIN_PRBSERR_2_D0__CKE_STATUS_MASK 0x00000100L
+#define MC_TRAIN_PRBSERR_2_D0__CKE_STATUS__SHIFT 0x00000008
+#define MC_TRAIN_PRBSERR_2_D0__CK_STATUS_MASK 0x00000001L
+#define MC_TRAIN_PRBSERR_2_D0__CK_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_2_D0__CS_STATUS_MASK 0x00000030L
+#define MC_TRAIN_PRBSERR_2_D0__CS_STATUS__SHIFT 0x00000004
+#define MC_TRAIN_PRBSERR_2_D0__RAS_STATUS_MASK 0x00000200L
+#define MC_TRAIN_PRBSERR_2_D0__RAS_STATUS__SHIFT 0x00000009
+#define MC_TRAIN_PRBSERR_2_D0__WE_STATUS_MASK 0x00000800L
+#define MC_TRAIN_PRBSERR_2_D0__WE_STATUS__SHIFT 0x0000000b
+#define MC_TRAIN_PRBSERR_2_D1__ABI_STATUS_MASK 0x10000000L
+#define MC_TRAIN_PRBSERR_2_D1__ABI_STATUS__SHIFT 0x0000001c
+#define MC_TRAIN_PRBSERR_2_D1__ADDR_STATUS_MASK 0x03ff0000L
+#define MC_TRAIN_PRBSERR_2_D1__ADDR_STATUS__SHIFT 0x00000010
+#define MC_TRAIN_PRBSERR_2_D1__CAS_STATUS_MASK 0x00000400L
+#define MC_TRAIN_PRBSERR_2_D1__CAS_STATUS__SHIFT 0x0000000a
+#define MC_TRAIN_PRBSERR_2_D1__CKB_STATUS_MASK 0x00000002L
+#define MC_TRAIN_PRBSERR_2_D1__CKB_STATUS__SHIFT 0x00000001
+#define MC_TRAIN_PRBSERR_2_D1__CKE_STATUS_MASK 0x00000100L
+#define MC_TRAIN_PRBSERR_2_D1__CKE_STATUS__SHIFT 0x00000008
+#define MC_TRAIN_PRBSERR_2_D1__CK_STATUS_MASK 0x00000001L
+#define MC_TRAIN_PRBSERR_2_D1__CK_STATUS__SHIFT 0x00000000
+#define MC_TRAIN_PRBSERR_2_D1__CS_STATUS_MASK 0x00000030L
+#define MC_TRAIN_PRBSERR_2_D1__CS_STATUS__SHIFT 0x00000004
+#define MC_TRAIN_PRBSERR_2_D1__RAS_STATUS_MASK 0x00000200L
+#define MC_TRAIN_PRBSERR_2_D1__RAS_STATUS__SHIFT 0x00000009
+#define MC_TRAIN_PRBSERR_2_D1__WE_STATUS_MASK 0x00000800L
+#define MC_TRAIN_PRBSERR_2_D1__WE_STATUS__SHIFT 0x0000000b
+#define MC_TSM_DEBUG_BCNT0__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT0__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT0__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT0__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT0__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT0__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT0__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT0__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT10__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT10__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT10__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT10__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT10__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT10__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT10__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT10__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT1__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT1__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT1__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT1__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT1__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT1__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT1__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT1__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT2__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT2__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT2__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT2__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT2__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT2__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT2__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT2__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT3__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT3__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT3__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT3__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT3__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT3__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT3__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT3__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT4__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT4__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT4__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT4__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT4__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT4__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT4__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT4__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT5__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT5__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT5__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT5__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT5__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT5__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT5__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT5__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT6__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT6__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT6__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT6__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT6__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT6__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT6__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT6__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT7__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT7__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT7__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT7__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT7__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT7__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT7__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT7__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT8__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT8__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT8__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT8__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT8__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT8__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT8__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT8__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BCNT9__BYTE0_MASK 0x000000ffL
+#define MC_TSM_DEBUG_BCNT9__BYTE0__SHIFT 0x00000000
+#define MC_TSM_DEBUG_BCNT9__BYTE1_MASK 0x0000ff00L
+#define MC_TSM_DEBUG_BCNT9__BYTE1__SHIFT 0x00000008
+#define MC_TSM_DEBUG_BCNT9__BYTE2_MASK 0x00ff0000L
+#define MC_TSM_DEBUG_BCNT9__BYTE2__SHIFT 0x00000010
+#define MC_TSM_DEBUG_BCNT9__BYTE3_MASK 0xff000000L
+#define MC_TSM_DEBUG_BCNT9__BYTE3__SHIFT 0x00000018
+#define MC_TSM_DEBUG_BKPT__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_BKPT__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_FLAG__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_FLAG__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_GCNT__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_GCNT__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_MISC__FLAG_MASK 0x000000ffL
+#define MC_TSM_DEBUG_MISC__FLAG__SHIFT 0x00000000
+#define MC_TSM_DEBUG_MISC__NCNT_RD_MASK 0x00000f00L
+#define MC_TSM_DEBUG_MISC__NCNT_RD__SHIFT 0x00000008
+#define MC_TSM_DEBUG_MISC__NCNT_WR_MASK 0x0000f000L
+#define MC_TSM_DEBUG_MISC__NCNT_WR__SHIFT 0x0000000c
+#define MC_TSM_DEBUG_ST01__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_ST01__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_ST23__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_ST23__DATA__SHIFT 0x00000000
+#define MC_TSM_DEBUG_ST45__DATA_MASK 0xffffffffL
+#define MC_TSM_DEBUG_ST45__DATA__SHIFT 0x00000000
+#define MC_VM_AGP_BASE__AGP_BASE_MASK 0x0003ffffL
+#define MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x00000000
+#define MC_VM_AGP_BOT__AGP_BOT_MASK 0x0003ffffL
+#define MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x00000000
+#define MC_VM_AGP_TOP__AGP_TOP_MASK 0x0003ffffL
+#define MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_CNTL__DC_MEMORY_WRITE_LOCAL_MASK 0x00000100L
+#define MC_VM_DC_WRITE_CNTL__DC_MEMORY_WRITE_LOCAL__SHIFT 0x00000008
+#define MC_VM_DC_WRITE_CNTL__DC_MEMORY_WRITE_SYSTEM_MASK 0x00000200L
+#define MC_VM_DC_WRITE_CNTL__DC_MEMORY_WRITE_SYSTEM__SHIFT 0x00000009
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_0_MODE_MASK 0x00000003L
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_0_MODE__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_1_MODE_MASK 0x0000000cL
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_1_MODE__SHIFT 0x00000002
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_2_MODE_MASK 0x00000030L
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_2_MODE__SHIFT 0x00000004
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_3_MODE_MASK 0x000000c0L
+#define MC_VM_DC_WRITE_CNTL__DC_WRITE_HIT_REGION_3_MODE__SHIFT 0x00000006
+#define MC_VM_DC_WRITE_HIT_REGION_0_HIGH_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_0_HIGH_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_0_LOW_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_0_LOW_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_1_HIGH_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_1_HIGH_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_1_LOW_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_1_LOW_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_2_HIGH_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_2_HIGH_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_2_LOW_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_2_LOW_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_3_HIGH_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_3_HIGH_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_DC_WRITE_HIT_REGION_3_LOW_ADDR__PHYSICAL_ADDRESS_MASK 0x0fffffffL
+#define MC_VM_DC_WRITE_HIT_REGION_3_LOW_ADDR__PHYSICAL_ADDRESS__SHIFT 0x00000000
+#define MC_VM_FB_LOCATION__FB_BASE_MASK 0x0000ffffL
+#define MC_VM_FB_LOCATION__FB_BASE__SHIFT 0x00000000
+#define MC_VM_FB_LOCATION__FB_TOP_MASK 0xffff0000L
+#define MC_VM_FB_LOCATION__FB_TOP__SHIFT 0x00000010
+#define MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x0003ffffL
+#define MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB0_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MB_L1_TLB0_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MB_L1_TLB0_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MB_L1_TLB0_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MB_L1_TLB0_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB0_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB0_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MB_L1_TLB0_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MB_L1_TLB0_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MB_L1_TLB0_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MB_L1_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB0_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB1_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB1_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB2_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MB_L1_TLB2_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MB_L1_TLB2_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MB_L1_TLB2_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MB_L1_TLB2_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB2_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB2_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MB_L1_TLB2_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MB_L1_TLB2_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MB_L1_TLB2_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MB_L1_TLB2_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB2_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB3_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MB_L1_TLB3_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MB_L1_TLB3_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MB_L1_TLB3_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MB_L1_TLB3_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB3_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MB_L1_TLB3_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MB_L1_TLB3_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MB_L1_TLB3_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MB_L1_TLB3_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MB_L1_TLB3_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MB_L1_TLB3_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MB_L2ARBITER_L2_CREDITS__L2_IF_CREDITS_MASK 0x0000003fL
+#define MC_VM_MB_L2ARBITER_L2_CREDITS__L2_IF_CREDITS__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB0_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MD_L1_TLB0_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MD_L1_TLB0_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MD_L1_TLB0_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MD_L1_TLB0_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB0_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB0_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MD_L1_TLB0_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MD_L1_TLB0_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MD_L1_TLB0_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MD_L1_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB0_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB1_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MD_L1_TLB1_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MD_L1_TLB1_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MD_L1_TLB1_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MD_L1_TLB1_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB1_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB1_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MD_L1_TLB1_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MD_L1_TLB1_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MD_L1_TLB1_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MD_L1_TLB1_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB1_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB2_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MD_L1_TLB2_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MD_L1_TLB2_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MD_L1_TLB2_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MD_L1_TLB2_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB2_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB2_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MD_L1_TLB2_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MD_L1_TLB2_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MD_L1_TLB2_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MD_L1_TLB2_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB2_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB3_DEBUG__EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00007000L
+#define MC_VM_MD_L1_TLB3_DEBUG__EFFECTIVE_L1_QUEUE_SIZE__SHIFT 0x0000000c
+#define MC_VM_MD_L1_TLB3_DEBUG__EFFECTIVE_L1_TLB_SIZE_MASK 0x00000e00L
+#define MC_VM_MD_L1_TLB3_DEBUG__EFFECTIVE_L1_TLB_SIZE__SHIFT 0x00000009
+#define MC_VM_MD_L1_TLB3_DEBUG__INVALIDATE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB3_DEBUG__INVALIDATE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MD_L1_TLB3_DEBUG__L1_TLB_DEBUG_MASK 0x00078000L
+#define MC_VM_MD_L1_TLB3_DEBUG__L1_TLB_DEBUG__SHIFT 0x0000000f
+#define MC_VM_MD_L1_TLB3_DEBUG__SEND_FREE_AT_RTN_MASK 0x00000100L
+#define MC_VM_MD_L1_TLB3_DEBUG__SEND_FREE_AT_RTN__SHIFT 0x00000008
+#define MC_VM_MD_L1_TLB3_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MD_L1_TLB3_STATUS__BUSY__SHIFT 0x00000000
+#define MC_VM_MD_L2ARBITER_L2_CREDITS__L2_IF_CREDITS_MASK 0x0000003fL
+#define MC_VM_MD_L2ARBITER_L2_CREDITS__L2_IF_CREDITS__SHIFT 0x00000000
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x00000007
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x00000006
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING__SHIFT 0x00000001
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x00000000
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x00000003
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x00000005
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define MC_WR_CB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_CB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_CB__ENABLE_MASK 0x00000001L
+#define MC_WR_CB__ENABLE__SHIFT 0x00000000
+#define MC_WR_CB__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_CB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_CB__MAX_BURST_MASK 0x00000780L
+#define MC_WR_CB__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_CB__PRESCALE_MASK 0x00000006L
+#define MC_WR_CB__PRESCALE__SHIFT 0x00000001
+#define MC_WR_CB__STALL_MODE_MASK 0x00000030L
+#define MC_WR_CB__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_CB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_CB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_CB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_CB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_WR_DB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_DB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_DB__ENABLE_MASK 0x00000001L
+#define MC_WR_DB__ENABLE__SHIFT 0x00000000
+#define MC_WR_DB__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_DB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_DB__MAX_BURST_MASK 0x00000780L
+#define MC_WR_DB__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_DB__PRESCALE_MASK 0x00000006L
+#define MC_WR_DB__PRESCALE__SHIFT 0x00000001
+#define MC_WR_DB__STALL_MODE_MASK 0x00000030L
+#define MC_WR_DB__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_DB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_DB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_DB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_DB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_WR_GRP_EXT__DBSTEN0_MASK 0x0000000fL
+#define MC_WR_GRP_EXT__DBSTEN0__SHIFT 0x00000000
+#define MC_WR_GRP_EXT__TC0_MASK 0x000000f0L
+#define MC_WR_GRP_EXT__TC0__SHIFT 0x00000004
+#define MC_WR_GRP_GFX__CP_MASK 0x0000000fL
+#define MC_WR_GRP_GFX__CP__SHIFT 0x00000000
+#define MC_WR_GRP_GFX__XDMA_MASK 0x0000f000L
+#define MC_WR_GRP_GFX__XDMAM_MASK 0x000f0000L
+#define MC_WR_GRP_GFX__XDMAM__SHIFT 0x00000010
+#define MC_WR_GRP_GFX__XDMA__SHIFT 0x0000000c
+#define MC_WR_GRP_LCL__CB0_MASK 0x0000000fL
+#define MC_WR_GRP_LCL__CB0__SHIFT 0x00000000
+#define MC_WR_GRP_LCL__CBCMASK0_MASK 0x000000f0L
+#define MC_WR_GRP_LCL__CBCMASK0__SHIFT 0x00000004
+#define MC_WR_GRP_LCL__CBFMASK0_MASK 0x00000f00L
+#define MC_WR_GRP_LCL__CBFMASK0__SHIFT 0x00000008
+#define MC_WR_GRP_LCL__CBIMMED0_MASK 0xf0000000L
+#define MC_WR_GRP_LCL__CBIMMED0__SHIFT 0x0000001c
+#define MC_WR_GRP_LCL__DB0_MASK 0x0000f000L
+#define MC_WR_GRP_LCL__DB0__SHIFT 0x0000000c
+#define MC_WR_GRP_LCL__DBHTILE0_MASK 0x000f0000L
+#define MC_WR_GRP_LCL__DBHTILE0__SHIFT 0x00000010
+#define MC_WR_GRP_LCL__SX0_MASK 0x00f00000L
+#define MC_WR_GRP_LCL__SX0__SHIFT 0x00000014
+#define MC_WR_GRP_OTH__HDP_MASK 0x00000f00L
+#define MC_WR_GRP_OTH__HDP__SHIFT 0x00000008
+#define MC_WR_GRP_OTH__SEM_MASK 0x0000f000L
+#define MC_WR_GRP_OTH__SEM__SHIFT 0x0000000c
+#define MC_WR_GRP_OTH__UMC_MASK 0x000f0000L
+#define MC_WR_GRP_OTH__UMC__SHIFT 0x00000010
+#define MC_WR_GRP_OTH__UVD_EXT0_MASK 0x0000000fL
+#define MC_WR_GRP_OTH__UVD_EXT0__SHIFT 0x00000000
+#define MC_WR_GRP_OTH__UVD_EXT1_MASK 0xf0000000L
+#define MC_WR_GRP_OTH__UVD_EXT1__SHIFT 0x0000001c
+#define MC_WR_GRP_OTH__UVD_MASK 0x00f00000L
+#define MC_WR_GRP_OTH__UVD__SHIFT 0x00000014
+#define MC_WR_GRP_OTH__XDP_MASK 0x0f000000L
+#define MC_WR_GRP_OTH__XDP__SHIFT 0x00000018
+#define MC_WR_GRP_SYS__IH_MASK 0x0000000fL
+#define MC_WR_GRP_SYS__IH__SHIFT 0x00000000
+#define MC_WR_GRP_SYS__MCIF_MASK 0x000000f0L
+#define MC_WR_GRP_SYS__MCIF__SHIFT 0x00000004
+#define MC_WR_GRP_SYS__RLC_MASK 0x00000f00L
+#define MC_WR_GRP_SYS__RLC__SHIFT 0x00000008
+#define MC_WR_GRP_SYS__SMU_MASK 0x00f00000L
+#define MC_WR_GRP_SYS__SMU__SHIFT 0x00000014
+#define MC_WR_GRP_SYS__VCE_MASK 0x0f000000L
+#define MC_WR_GRP_SYS__VCE__SHIFT 0x00000018
+#define MC_WR_GRP_SYS__VCEU_MASK 0xf0000000L
+#define MC_WR_GRP_SYS__VCEU__SHIFT 0x0000001c
+#define MC_WR_HUB__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_HUB__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_HUB__ENABLE_MASK 0x00000001L
+#define MC_WR_HUB__ENABLE__SHIFT 0x00000000
+#define MC_WR_HUB__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_HUB__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_HUB__MAX_BURST_MASK 0x00000780L
+#define MC_WR_HUB__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_HUB__PRESCALE_MASK 0x00000006L
+#define MC_WR_HUB__PRESCALE__SHIFT 0x00000001
+#define MC_WR_HUB__STALL_MODE_MASK 0x00000030L
+#define MC_WR_HUB__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_HUB__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_HUB__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_HUB__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_HUB__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_WR_TC0__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_TC0__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_TC0__ENABLE_MASK 0x00000001L
+#define MC_WR_TC0__ENABLE__SHIFT 0x00000000
+#define MC_WR_TC0__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_TC0__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_TC0__MAX_BURST_MASK 0x00000780L
+#define MC_WR_TC0__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_TC0__PRESCALE_MASK 0x00000006L
+#define MC_WR_TC0__PRESCALE__SHIFT 0x00000001
+#define MC_WR_TC0__STALL_MODE_MASK 0x00000030L
+#define MC_WR_TC0__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_TC0__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_TC0__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_TC0__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_TC0__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_WR_TC1__BLACKOUT_EXEMPT_MASK 0x00000008L
+#define MC_WR_TC1__BLACKOUT_EXEMPT__SHIFT 0x00000003
+#define MC_WR_TC1__ENABLE_MASK 0x00000001L
+#define MC_WR_TC1__ENABLE__SHIFT 0x00000000
+#define MC_WR_TC1__LAZY_TIMER_MASK 0x00007800L
+#define MC_WR_TC1__LAZY_TIMER__SHIFT 0x0000000b
+#define MC_WR_TC1__MAX_BURST_MASK 0x00000780L
+#define MC_WR_TC1__MAX_BURST__SHIFT 0x00000007
+#define MC_WR_TC1__PRESCALE_MASK 0x00000006L
+#define MC_WR_TC1__PRESCALE__SHIFT 0x00000001
+#define MC_WR_TC1__STALL_MODE_MASK 0x00000030L
+#define MC_WR_TC1__STALL_MODE__SHIFT 0x00000004
+#define MC_WR_TC1__STALL_OVERRIDE_MASK 0x00000040L
+#define MC_WR_TC1__STALL_OVERRIDE__SHIFT 0x00000006
+#define MC_WR_TC1__STALL_OVERRIDE_WTM_MASK 0x00008000L
+#define MC_WR_TC1__STALL_OVERRIDE_WTM__SHIFT 0x0000000f
+#define MC_XBAR_ADDR_DEC__GECC_MASK 0x00000002L
+#define MC_XBAR_ADDR_DEC__GECC__SHIFT 0x00000001
+#define MC_XBAR_ADDR_DEC__NO_DIV_BY_3_MASK 0x00000001L
+#define MC_XBAR_ADDR_DEC__NO_DIV_BY_3__SHIFT 0x00000000
+#define MC_XBAR_ADDR_DEC__RB_SPLIT_COLHI_MASK 0x00000008L
+#define MC_XBAR_ADDR_DEC__RB_SPLIT_COLHI__SHIFT 0x00000003
+#define MC_XBAR_ADDR_DEC__RB_SPLIT_MASK 0x00000004L
+#define MC_XBAR_ADDR_DEC__RB_SPLIT__SHIFT 0x00000002
+#define MC_XBAR_ARB__BREAK_BURST_CID_CHANGE_MASK 0x00000004L
+#define MC_XBAR_ARB__BREAK_BURST_CID_CHANGE__SHIFT 0x00000002
+#define MC_XBAR_ARB__DISABLE_HUB_STALL_HIGHEST_MASK 0x00000002L
+#define MC_XBAR_ARB__DISABLE_HUB_STALL_HIGHEST__SHIFT 0x00000001
+#define MC_XBAR_ARB__HUBRD_HIGHEST_MASK 0x00000001L
+#define MC_XBAR_ARB__HUBRD_HIGHEST__SHIFT 0x00000000
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT0_MASK 0x0000000fL
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT0__SHIFT 0x00000000
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT1_MASK 0x000000f0L
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT1__SHIFT 0x00000004
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT2_MASK 0x00000f00L
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT2__SHIFT 0x00000008
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT3_MASK 0x0000f000L
+#define MC_XBAR_ARB_MAX_BURST__RD_PORT3__SHIFT 0x0000000c
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT0_MASK 0x000f0000L
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT0__SHIFT 0x00000010
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT1_MASK 0x00f00000L
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT1__SHIFT 0x00000014
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT2_MASK 0x0f000000L
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT2__SHIFT 0x00000018
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT3_MASK 0xf0000000L
+#define MC_XBAR_ARB_MAX_BURST__WR_PORT3__SHIFT 0x0000001c
+#define MC_XBAR_CHTRIREMAP__CH0_MASK 0x00000003L
+#define MC_XBAR_CHTRIREMAP__CH0__SHIFT 0x00000000
+#define MC_XBAR_CHTRIREMAP__CH1_MASK 0x0000000cL
+#define MC_XBAR_CHTRIREMAP__CH1__SHIFT 0x00000002
+#define MC_XBAR_CHTRIREMAP__CH2_MASK 0x00000030L
+#define MC_XBAR_CHTRIREMAP__CH2__SHIFT 0x00000004
+#define MC_XBAR_PERF_MON_CNTL0__ALLOW_WRAP_MASK 0x10000000L
+#define MC_XBAR_PERF_MON_CNTL0__ALLOW_WRAP__SHIFT 0x0000001c
+#define MC_XBAR_PERF_MON_CNTL0__START_MODE_MASK 0x03000000L
+#define MC_XBAR_PERF_MON_CNTL0__START_MODE__SHIFT 0x00000018
+#define MC_XBAR_PERF_MON_CNTL0__START_THRESH_MASK 0x00000fffL
+#define MC_XBAR_PERF_MON_CNTL0__START_THRESH__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_CNTL0__STOP_MODE_MASK 0x0c000000L
+#define MC_XBAR_PERF_MON_CNTL0__STOP_MODE__SHIFT 0x0000001a
+#define MC_XBAR_PERF_MON_CNTL0__STOP_THRESH_MASK 0x00fff000L
+#define MC_XBAR_PERF_MON_CNTL0__STOP_THRESH__SHIFT 0x0000000c
+#define MC_XBAR_PERF_MON_CNTL1__START_TRIG_ID_MASK 0x0000ff00L
+#define MC_XBAR_PERF_MON_CNTL1__START_TRIG_ID__SHIFT 0x00000008
+#define MC_XBAR_PERF_MON_CNTL1__STOP_TRIG_ID_MASK 0x00ff0000L
+#define MC_XBAR_PERF_MON_CNTL1__STOP_TRIG_ID__SHIFT 0x00000010
+#define MC_XBAR_PERF_MON_CNTL1__THRESH_CNTR_ID_MASK 0x000000ffL
+#define MC_XBAR_PERF_MON_CNTL1__THRESH_CNTR_ID__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_CNTL2__MON0_ID_MASK 0x000000ffL
+#define MC_XBAR_PERF_MON_CNTL2__MON0_ID__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_CNTL2__MON1_ID_MASK 0x0000ff00L
+#define MC_XBAR_PERF_MON_CNTL2__MON1_ID__SHIFT 0x00000008
+#define MC_XBAR_PERF_MON_CNTL2__MON2_ID_MASK 0x00ff0000L
+#define MC_XBAR_PERF_MON_CNTL2__MON2_ID__SHIFT 0x00000010
+#define MC_XBAR_PERF_MON_CNTL2__MON3_ID_MASK 0xff000000L
+#define MC_XBAR_PERF_MON_CNTL2__MON3_ID__SHIFT 0x00000018
+#define MC_XBAR_PERF_MON_MAX_THSH__MON0_MASK 0x000000ffL
+#define MC_XBAR_PERF_MON_MAX_THSH__MON0__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_MAX_THSH__MON1_MASK 0x0000ff00L
+#define MC_XBAR_PERF_MON_MAX_THSH__MON1__SHIFT 0x00000008
+#define MC_XBAR_PERF_MON_MAX_THSH__MON2_MASK 0x00ff0000L
+#define MC_XBAR_PERF_MON_MAX_THSH__MON2__SHIFT 0x00000010
+#define MC_XBAR_PERF_MON_MAX_THSH__MON3_MASK 0xff000000L
+#define MC_XBAR_PERF_MON_MAX_THSH__MON3__SHIFT 0x00000018
+#define MC_XBAR_PERF_MON_RSLT0__COUNT_MASK 0xffffffffL
+#define MC_XBAR_PERF_MON_RSLT0__COUNT__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_RSLT1__COUNT_MASK 0xffffffffL
+#define MC_XBAR_PERF_MON_RSLT1__COUNT__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_RSLT2__COUNT_MASK 0xffffffffL
+#define MC_XBAR_PERF_MON_RSLT2__COUNT__SHIFT 0x00000000
+#define MC_XBAR_PERF_MON_RSLT3__COUNT_MASK 0xffffffffL
+#define MC_XBAR_PERF_MON_RSLT3__COUNT__SHIFT 0x00000000
+#define MC_XBAR_RDREQ_CREDIT__OUT0_MASK 0x000000ffL
+#define MC_XBAR_RDREQ_CREDIT__OUT0__SHIFT 0x00000000
+#define MC_XBAR_RDREQ_CREDIT__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_RDREQ_CREDIT__OUT1__SHIFT 0x00000008
+#define MC_XBAR_RDREQ_CREDIT__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_RDREQ_CREDIT__OUT2__SHIFT 0x00000010
+#define MC_XBAR_RDREQ_CREDIT__OUT3_MASK 0xff000000L
+#define MC_XBAR_RDREQ_CREDIT__OUT3__SHIFT 0x00000018
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT0_MASK 0x000000ffL
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT0__SHIFT 0x00000000
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT1__SHIFT 0x00000008
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT2__SHIFT 0x00000010
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT3_MASK 0xff000000L
+#define MC_XBAR_RDREQ_PRI_CREDIT__OUT3__SHIFT 0x00000018
+#define MC_XBAR_RDRET_CREDIT1__OUT0_MASK 0x000000ffL
+#define MC_XBAR_RDRET_CREDIT1__OUT0__SHIFT 0x00000000
+#define MC_XBAR_RDRET_CREDIT1__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_RDRET_CREDIT1__OUT1__SHIFT 0x00000008
+#define MC_XBAR_RDRET_CREDIT1__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_RDRET_CREDIT1__OUT2__SHIFT 0x00000010
+#define MC_XBAR_RDRET_CREDIT1__OUT3_MASK 0xff000000L
+#define MC_XBAR_RDRET_CREDIT1__OUT3__SHIFT 0x00000018
+#define MC_XBAR_RDRET_CREDIT2__HUB_LP_RDRET_SKID_MASK 0x00ff0000L
+#define MC_XBAR_RDRET_CREDIT2__HUB_LP_RDRET_SKID__SHIFT 0x00000010
+#define MC_XBAR_RDRET_CREDIT2__OUT4_MASK 0x000000ffL
+#define MC_XBAR_RDRET_CREDIT2__OUT4__SHIFT 0x00000000
+#define MC_XBAR_RDRET_CREDIT2__OUT5_MASK 0x0000ff00L
+#define MC_XBAR_RDRET_CREDIT2__OUT5__SHIFT 0x00000008
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT0_MASK 0x000000ffL
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT0__SHIFT 0x00000000
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT1__SHIFT 0x00000008
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT2__SHIFT 0x00000010
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT3_MASK 0xff000000L
+#define MC_XBAR_RDRET_PRI_CREDIT1__OUT3__SHIFT 0x00000018
+#define MC_XBAR_RDRET_PRI_CREDIT2__OUT4_MASK 0x000000ffL
+#define MC_XBAR_RDRET_PRI_CREDIT2__OUT4__SHIFT 0x00000000
+#define MC_XBAR_RDRET_PRI_CREDIT2__OUT5_MASK 0x0000ff00L
+#define MC_XBAR_RDRET_PRI_CREDIT2__OUT5__SHIFT 0x00000008
+#define MC_XBAR_REMOTE__RDREQ_EN_GOQ_MASK 0x00000002L
+#define MC_XBAR_REMOTE__RDREQ_EN_GOQ__SHIFT 0x00000001
+#define MC_XBAR_REMOTE__WRREQ_EN_GOQ_MASK 0x00000001L
+#define MC_XBAR_REMOTE__WRREQ_EN_GOQ__SHIFT 0x00000000
+#define MC_XBAR_SPARE0__BIT_MASK 0xffffffffL
+#define MC_XBAR_SPARE0__BIT__SHIFT 0x00000000
+#define MC_XBAR_SPARE1__BIT_MASK 0xffffffffL
+#define MC_XBAR_SPARE1__BIT__SHIFT 0x00000000
+#define MC_XBAR_TWOCHAN__CH0_MASK 0x00000006L
+#define MC_XBAR_TWOCHAN__CH0__SHIFT 0x00000001
+#define MC_XBAR_TWOCHAN__CH1_MASK 0x00000018L
+#define MC_XBAR_TWOCHAN__CH1__SHIFT 0x00000003
+#define MC_XBAR_TWOCHAN__DISABLE_ONEPORT_MASK 0x00000001L
+#define MC_XBAR_TWOCHAN__DISABLE_ONEPORT__SHIFT 0x00000000
+#define MC_XBAR_WRREQ_CREDIT__OUT0_MASK 0x000000ffL
+#define MC_XBAR_WRREQ_CREDIT__OUT0__SHIFT 0x00000000
+#define MC_XBAR_WRREQ_CREDIT__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_WRREQ_CREDIT__OUT1__SHIFT 0x00000008
+#define MC_XBAR_WRREQ_CREDIT__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_WRREQ_CREDIT__OUT2__SHIFT 0x00000010
+#define MC_XBAR_WRREQ_CREDIT__OUT3_MASK 0xff000000L
+#define MC_XBAR_WRREQ_CREDIT__OUT3__SHIFT 0x00000018
+#define MC_XBAR_WRRET_CREDIT1__OUT0_MASK 0x000000ffL
+#define MC_XBAR_WRRET_CREDIT1__OUT0__SHIFT 0x00000000
+#define MC_XBAR_WRRET_CREDIT1__OUT1_MASK 0x0000ff00L
+#define MC_XBAR_WRRET_CREDIT1__OUT1__SHIFT 0x00000008
+#define MC_XBAR_WRRET_CREDIT1__OUT2_MASK 0x00ff0000L
+#define MC_XBAR_WRRET_CREDIT1__OUT2__SHIFT 0x00000010
+#define MC_XBAR_WRRET_CREDIT1__OUT3_MASK 0xff000000L
+#define MC_XBAR_WRRET_CREDIT1__OUT3__SHIFT 0x00000018
+#define MC_XBAR_WRRET_CREDIT2__OUT4_MASK 0x000000ffL
+#define MC_XBAR_WRRET_CREDIT2__OUT4__SHIFT 0x00000000
+#define MC_XBAR_WRRET_CREDIT2__OUT5_MASK 0x0000ff00L
+#define MC_XBAR_WRRET_CREDIT2__OUT5__SHIFT 0x00000008
+#define MC_XPB_CLG_CFG0__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG0__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG0__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG0__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG0__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG0__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG0__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG0__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG0__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG0__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG10__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG10__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG10__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG10__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG10__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG10__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG10__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG10__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG10__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG10__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG11__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG11__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG11__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG11__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG11__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG11__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG11__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG11__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG11__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG11__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG12__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG12__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG12__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG12__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG12__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG12__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG12__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG12__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG12__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG12__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG13__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG13__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG13__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG13__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG13__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG13__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG13__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG13__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG13__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG13__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG14__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG14__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG14__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG14__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG14__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG14__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG14__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG14__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG14__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG14__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG15__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG15__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG15__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG15__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG15__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG15__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG15__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG15__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG15__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG15__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG16__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG16__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG16__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG16__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG16__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG16__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG16__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG16__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG16__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG16__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG17__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG17__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG17__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG17__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG17__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG17__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG17__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG17__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG17__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG17__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG18__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG18__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG18__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG18__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG18__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG18__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG18__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG18__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG18__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG18__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG19__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG19__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG19__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG19__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG19__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG19__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG19__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG19__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG19__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG19__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG1__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG1__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG1__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG1__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG1__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG1__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG1__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG1__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG1__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG1__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG20__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG20__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG20__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG20__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG20__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG20__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG20__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG20__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG20__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG20__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG21__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG21__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG21__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG21__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG21__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG21__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG21__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG21__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG21__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG21__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG22__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG22__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG22__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG22__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG22__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG22__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG22__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG22__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG22__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG22__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG23__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG23__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG23__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG23__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG23__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG23__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG23__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG23__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG23__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG23__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG24__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG24__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG24__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG24__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG24__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG24__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG24__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG24__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG24__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG24__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG25__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG25__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG25__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG25__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG25__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG25__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG25__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG25__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG25__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG25__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG26__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG26__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG26__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG26__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG26__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG26__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG26__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG26__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG26__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG26__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG27__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG27__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG27__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG27__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG27__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG27__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG27__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG27__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG27__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG27__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG28__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG28__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG28__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG28__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG28__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG28__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG28__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG28__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG28__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG28__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG29__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG29__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG29__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG29__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG29__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG29__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG29__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG29__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG29__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG29__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG2__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG2__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG2__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG2__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG2__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG2__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG2__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG2__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG2__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG2__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG30__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG30__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG30__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG30__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG30__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG30__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG30__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG30__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG30__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG30__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG31__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG31__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG31__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG31__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG31__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG31__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG31__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG31__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG31__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG31__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG32__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG32__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG32__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG32__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG32__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG32__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG32__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG32__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG32__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG32__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG33__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG33__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG33__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG33__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG33__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG33__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG33__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG33__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG33__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG33__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG34__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG34__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG34__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG34__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG34__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG34__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG34__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG34__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG34__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG34__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG35__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG35__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG35__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG35__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG35__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG35__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG35__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG35__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG35__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG35__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG36__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG36__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG36__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG36__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG36__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG36__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG36__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG36__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG36__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG36__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG3__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG3__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG3__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG3__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG3__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG3__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG3__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG3__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG3__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG3__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG4__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG4__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG4__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG4__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG4__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG4__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG4__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG4__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG4__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG4__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG5__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG5__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG5__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG5__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG5__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG5__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG5__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG5__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG5__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG5__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG6__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG6__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG6__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG6__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG6__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG6__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG6__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG6__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG6__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG6__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG7__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG7__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG7__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG7__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG7__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG7__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG7__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG7__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG7__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG7__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG8__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG8__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG8__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG8__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG8__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG8__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG8__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG8__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG8__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG8__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_CFG9__HOST_FLUSH_MASK 0x00003c00L
+#define MC_XPB_CLG_CFG9__HOST_FLUSH__SHIFT 0x0000000a
+#define MC_XPB_CLG_CFG9__LB_TYPE_MASK 0x00000070L
+#define MC_XPB_CLG_CFG9__LB_TYPE__SHIFT 0x00000004
+#define MC_XPB_CLG_CFG9__P2P_BAR_MASK 0x00000380L
+#define MC_XPB_CLG_CFG9__P2P_BAR__SHIFT 0x00000007
+#define MC_XPB_CLG_CFG9__SIDE_FLUSH_MASK 0x0003c000L
+#define MC_XPB_CLG_CFG9__SIDE_FLUSH__SHIFT 0x0000000e
+#define MC_XPB_CLG_CFG9__WCB_NUM_MASK 0x0000000fL
+#define MC_XPB_CLG_CFG9__WCB_NUM__SHIFT 0x00000000
+#define MC_XPB_CLG_EXTRA__CMP0_MASK 0x000000ffL
+#define MC_XPB_CLG_EXTRA__CMP0__SHIFT 0x00000000
+#define MC_XPB_CLG_EXTRA__CMP1_MASK 0x01fe0000L
+#define MC_XPB_CLG_EXTRA__CMP1__SHIFT 0x00000011
+#define MC_XPB_CLG_EXTRA__MSK0_MASK 0x0000ff00L
+#define MC_XPB_CLG_EXTRA__MSK0__SHIFT 0x00000008
+#define MC_XPB_CLG_EXTRA_RD__CMP0_MASK 0x000000ffL
+#define MC_XPB_CLG_EXTRA_RD__CMP0__SHIFT 0x00000000
+#define MC_XPB_CLG_EXTRA_RD__CMP1_MASK 0x01fe0000L
+#define MC_XPB_CLG_EXTRA_RD__CMP1__SHIFT 0x00000011
+#define MC_XPB_CLG_EXTRA_RD__MSK0_MASK 0x0000ff00L
+#define MC_XPB_CLG_EXTRA_RD__MSK0__SHIFT 0x00000008
+#define MC_XPB_CLG_EXTRA_RD__VLD0_MASK 0x00010000L
+#define MC_XPB_CLG_EXTRA_RD__VLD0__SHIFT 0x00000010
+#define MC_XPB_CLG_EXTRA_RD__VLD1_MASK 0x02000000L
+#define MC_XPB_CLG_EXTRA_RD__VLD1__SHIFT 0x00000019
+#define MC_XPB_CLG_EXTRA__VLD0_MASK 0x00010000L
+#define MC_XPB_CLG_EXTRA__VLD0__SHIFT 0x00000010
+#define MC_XPB_CLG_EXTRA__VLD1_MASK 0x02000000L
+#define MC_XPB_CLG_EXTRA__VLD1__SHIFT 0x00000019
+#define MC_XPB_CLK_GAT__ENABLE_MASK 0x00040000L
+#define MC_XPB_CLK_GAT__ENABLE__SHIFT 0x00000012
+#define MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK 0x00080000L
+#define MC_XPB_CLK_GAT__MEM_LS_ENABLE__SHIFT 0x00000013
+#define MC_XPB_CLK_GAT__OFFDLY_MASK 0x00000fc0L
+#define MC_XPB_CLK_GAT__OFFDLY__SHIFT 0x00000006
+#define MC_XPB_CLK_GAT__ONDLY_MASK 0x0000003fL
+#define MC_XPB_CLK_GAT__ONDLY__SHIFT 0x00000000
+#define MC_XPB_CLK_GAT__RDYDLY_MASK 0x0003f000L
+#define MC_XPB_CLK_GAT__RDYDLY__SHIFT 0x0000000c
+#define MC_XPB_INTF_CFG2__RPB_RDREQ_CRD_MASK 0x000000ffL
+#define MC_XPB_INTF_CFG2__RPB_RDREQ_CRD__SHIFT 0x00000000
+#define MC_XPB_INTF_CFG__BIF_MEM_SNOOP_SEL_MASK 0x02000000L
+#define MC_XPB_INTF_CFG__BIF_MEM_SNOOP_SEL__SHIFT 0x00000019
+#define MC_XPB_INTF_CFG__BIF_MEM_SNOOP_VAL_MASK 0x04000000L
+#define MC_XPB_INTF_CFG__BIF_MEM_SNOOP_VAL__SHIFT 0x0000001a
+#define MC_XPB_INTF_CFG__BIF_REG_SNOOP_SEL_MASK 0x00800000L
+#define MC_XPB_INTF_CFG__BIF_REG_SNOOP_SEL__SHIFT 0x00000017
+#define MC_XPB_INTF_CFG__BIF_REG_SNOOP_VAL_MASK 0x01000000L
+#define MC_XPB_INTF_CFG__BIF_REG_SNOOP_VAL__SHIFT 0x00000018
+#define MC_XPB_INTF_CFG__MC_WRRET_ASK_MASK 0x0000ff00L
+#define MC_XPB_INTF_CFG__MC_WRRET_ASK__SHIFT 0x00000008
+#define MC_XPB_INTF_CFG__RPB_WRREQ_CRD_MASK 0x000000ffL
+#define MC_XPB_INTF_CFG__RPB_WRREQ_CRD__SHIFT 0x00000000
+#define MC_XPB_INTF_CFG__XSP_ORDERING_SEL_MASK 0x40000000L
+#define MC_XPB_INTF_CFG__XSP_ORDERING_SEL__SHIFT 0x0000001e
+#define MC_XPB_INTF_CFG__XSP_ORDERING_VAL_MASK 0x80000000L
+#define MC_XPB_INTF_CFG__XSP_ORDERING_VAL__SHIFT 0x0000001f
+#define MC_XPB_INTF_CFG__XSP_REQ_CRD_MASK 0x007f0000L
+#define MC_XPB_INTF_CFG__XSP_REQ_CRD__SHIFT 0x00000010
+#define MC_XPB_INTF_CFG__XSP_SNOOP_SEL_MASK 0x18000000L
+#define MC_XPB_INTF_CFG__XSP_SNOOP_SEL__SHIFT 0x0000001b
+#define MC_XPB_INTF_CFG__XSP_SNOOP_VAL_MASK 0x20000000L
+#define MC_XPB_INTF_CFG__XSP_SNOOP_VAL__SHIFT 0x0000001d
+#define MC_XPB_INTF_STS__CNS_BUF_BUSY_MASK 0x00040000L
+#define MC_XPB_INTF_STS__CNS_BUF_BUSY__SHIFT 0x00000012
+#define MC_XPB_INTF_STS__CNS_BUF_FULL_MASK 0x00020000L
+#define MC_XPB_INTF_STS__CNS_BUF_FULL__SHIFT 0x00000011
+#define MC_XPB_INTF_STS__HOP_ATTR_BUF_FULL_MASK 0x00010000L
+#define MC_XPB_INTF_STS__HOP_ATTR_BUF_FULL__SHIFT 0x00000010
+#define MC_XPB_INTF_STS__HOP_DATA_BUF_FULL_MASK 0x00008000L
+#define MC_XPB_INTF_STS__HOP_DATA_BUF_FULL__SHIFT 0x0000000f
+#define MC_XPB_INTF_STS__RPB_RDREQ_CRD_MASK 0x07f80000L
+#define MC_XPB_INTF_STS__RPB_RDREQ_CRD__SHIFT 0x00000013
+#define MC_XPB_INTF_STS__RPB_WRREQ_CRD_MASK 0x000000ffL
+#define MC_XPB_INTF_STS__RPB_WRREQ_CRD__SHIFT 0x00000000
+#define MC_XPB_INTF_STS__XSP_REQ_CRD_MASK 0x00007f00L
+#define MC_XPB_INTF_STS__XSP_REQ_CRD__SHIFT 0x00000008
+#define MC_XPB_LB_ADDR__CMP0_MASK 0x000003ffL
+#define MC_XPB_LB_ADDR__CMP0__SHIFT 0x00000000
+#define MC_XPB_LB_ADDR__CMP1_MASK 0x03f00000L
+#define MC_XPB_LB_ADDR__CMP1__SHIFT 0x00000014
+#define MC_XPB_LB_ADDR__MASK0_MASK 0x000ffc00L
+#define MC_XPB_LB_ADDR__MASK0__SHIFT 0x0000000a
+#define MC_XPB_LB_ADDR__MASK1_MASK 0xfc000000L
+#define MC_XPB_LB_ADDR__MASK1__SHIFT 0x0000001a
+#define MC_XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM_MASK 0x0000ffffL
+#define MC_XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM__SHIFT 0x00000000
+#define MC_XPB_MISC_CFG__FIELDNAME0_MASK 0x000000ffL
+#define MC_XPB_MISC_CFG__FIELDNAME0__SHIFT 0x00000000
+#define MC_XPB_MISC_CFG__FIELDNAME1_MASK 0x0000ff00L
+#define MC_XPB_MISC_CFG__FIELDNAME1__SHIFT 0x00000008
+#define MC_XPB_MISC_CFG__FIELDNAME2_MASK 0x00ff0000L
+#define MC_XPB_MISC_CFG__FIELDNAME2__SHIFT 0x00000010
+#define MC_XPB_MISC_CFG__FIELDNAME3_MASK 0x7f000000L
+#define MC_XPB_MISC_CFG__FIELDNAME3__SHIFT 0x00000018
+#define MC_XPB_MISC_CFG__TRIGGERNAME_MASK 0x80000000L
+#define MC_XPB_MISC_CFG__TRIGGERNAME__SHIFT 0x0000001f
+#define MC_XPB_P2P_BAR0__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR0__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR0__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR0__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR0__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR0__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR0__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR0__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR0__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR0__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR0__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR0__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR0__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR0__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR0__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR0__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR1__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR1__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR1__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR1__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR1__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR1__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR1__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR1__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR1__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR1__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR1__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR1__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR1__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR1__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR1__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR1__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR2__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR2__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR2__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR2__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR2__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR2__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR2__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR2__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR2__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR2__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR2__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR2__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR2__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR2__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR2__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR2__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR3__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR3__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR3__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR3__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR3__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR3__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR3__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR3__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR3__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR3__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR3__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR3__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR3__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR3__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR3__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR3__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR4__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR4__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR4__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR4__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR4__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR4__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR4__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR4__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR4__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR4__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR4__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR4__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR4__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR4__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR4__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR4__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR5__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR5__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR5__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR5__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR5__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR5__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR5__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR5__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR5__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR5__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR5__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR5__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR5__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR5__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR5__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR5__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR6__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR6__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR6__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR6__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR6__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR6__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR6__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR6__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR6__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR6__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR6__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR6__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR6__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR6__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR6__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR6__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR7__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR7__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR7__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR7__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR7__HOST_FLUSH_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR7__HOST_FLUSH__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR7__MEM_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR7__MEM_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR7__REG_SYS_BAR_MASK 0x000000f0L
+#define MC_XPB_P2P_BAR7__REG_SYS_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR7__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR7__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR7__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR7__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR7__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR7__VALID__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR_CFG__ADDR_SIZE_MASK 0x0000000fL
+#define MC_XPB_P2P_BAR_CFG__ADDR_SIZE__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_CFG__ATC_TRANSLATED_MASK 0x00001000L
+#define MC_XPB_P2P_BAR_CFG__ATC_TRANSLATED__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR_CFG__COMPRESS_DIS_MASK 0x00000100L
+#define MC_XPB_P2P_BAR_CFG__COMPRESS_DIS__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_CFG__RD_EN_MASK 0x00000800L
+#define MC_XPB_P2P_BAR_CFG__RD_EN__SHIFT 0x0000000b
+#define MC_XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR_MASK 0x00000400L
+#define MC_XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR__SHIFT 0x0000000a
+#define MC_XPB_P2P_BAR_CFG__SEND_BAR_MASK 0x00000030L
+#define MC_XPB_P2P_BAR_CFG__SEND_BAR__SHIFT 0x00000004
+#define MC_XPB_P2P_BAR_CFG__SEND_DIS_MASK 0x00000080L
+#define MC_XPB_P2P_BAR_CFG__SEND_DIS__SHIFT 0x00000007
+#define MC_XPB_P2P_BAR_CFG__SNOOP_MASK 0x00000040L
+#define MC_XPB_P2P_BAR_CFG__SNOOP__SHIFT 0x00000006
+#define MC_XPB_P2P_BAR_CFG__UPDATE_DIS_MASK 0x00000200L
+#define MC_XPB_P2P_BAR_CFG__UPDATE_DIS__SHIFT 0x00000009
+#define MC_XPB_P2P_BAR_DEBUG__HOST_FLUSH_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR_DEBUG__HOST_FLUSH__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_DEBUG__MEM_SYS_BAR_MASK 0x0000f000L
+#define MC_XPB_P2P_BAR_DEBUG__MEM_SYS_BAR__SHIFT 0x0000000c
+#define MC_XPB_P2P_BAR_DEBUG__SEL_MASK 0x000000ffL
+#define MC_XPB_P2P_BAR_DEBUG__SEL__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_DELTA_ABOVE__DELTA_MASK 0x0fffff00L
+#define MC_XPB_P2P_BAR_DELTA_ABOVE__DELTA__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_DELTA_ABOVE__EN_MASK 0x000000ffL
+#define MC_XPB_P2P_BAR_DELTA_ABOVE__EN__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_DELTA_BELOW__DELTA_MASK 0x0fffff00L
+#define MC_XPB_P2P_BAR_DELTA_BELOW__DELTA__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_DELTA_BELOW__EN_MASK 0x000000ffL
+#define MC_XPB_P2P_BAR_DELTA_BELOW__EN__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_SETUP__ADDRESS_MASK 0xffff0000L
+#define MC_XPB_P2P_BAR_SETUP__ADDRESS__SHIFT 0x00000010
+#define MC_XPB_P2P_BAR_SETUP__COMPRESS_DIS_MASK 0x00004000L
+#define MC_XPB_P2P_BAR_SETUP__COMPRESS_DIS__SHIFT 0x0000000e
+#define MC_XPB_P2P_BAR_SETUP__REG_SYS_BAR_MASK 0x00000f00L
+#define MC_XPB_P2P_BAR_SETUP__REG_SYS_BAR__SHIFT 0x00000008
+#define MC_XPB_P2P_BAR_SETUP__RESERVED_MASK 0x00008000L
+#define MC_XPB_P2P_BAR_SETUP__RESERVED__SHIFT 0x0000000f
+#define MC_XPB_P2P_BAR_SETUP__SEL_MASK 0x000000ffL
+#define MC_XPB_P2P_BAR_SETUP__SEL__SHIFT 0x00000000
+#define MC_XPB_P2P_BAR_SETUP__SEND_DIS_MASK 0x00002000L
+#define MC_XPB_P2P_BAR_SETUP__SEND_DIS__SHIFT 0x0000000d
+#define MC_XPB_P2P_BAR_SETUP__VALID_MASK 0x00001000L
+#define MC_XPB_P2P_BAR_SETUP__VALID__SHIFT 0x0000000c
+#define MC_XPB_PEER_SYS_BAR0__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR0__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR0__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR0__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR0__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR1__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR1__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR1__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR1__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR1__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR2__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR2__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR2__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR2__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR2__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR3__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR3__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR3__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR3__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR3__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR4__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR4__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR4__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR4__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR4__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR4__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR5__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR5__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR5__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR5__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR5__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR5__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR6__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR6__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR6__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR6__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR6__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR6__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR7__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR7__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR7__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR7__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR7__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR7__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR8__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR8__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR8__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR8__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR8__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR8__VALID__SHIFT 0x00000000
+#define MC_XPB_PEER_SYS_BAR9__ADDR_MASK 0x07fffffcL
+#define MC_XPB_PEER_SYS_BAR9__ADDR__SHIFT 0x00000002
+#define MC_XPB_PEER_SYS_BAR9__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_PEER_SYS_BAR9__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_PEER_SYS_BAR9__VALID_MASK 0x00000001L
+#define MC_XPB_PEER_SYS_BAR9__VALID__SHIFT 0x00000000
+#define MC_XPB_PERF_KNOBS__CNS_FIFO_DEPTH_MASK 0x0000003fL
+#define MC_XPB_PERF_KNOBS__CNS_FIFO_DEPTH__SHIFT 0x00000000
+#define MC_XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH_MASK 0x00000fc0L
+#define MC_XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH__SHIFT 0x00000006
+#define MC_XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH_MASK 0x0003f000L
+#define MC_XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH__SHIFT 0x0000000c
+#define MC_XPB_PIPE_STS__RET_BUF_FULL_MASK 0x00800000L
+#define MC_XPB_PIPE_STS__RET_BUF_FULL__SHIFT 0x00000017
+#define MC_XPB_PIPE_STS__WCB_ANY_PBUF_MASK 0x00000001L
+#define MC_XPB_PIPE_STS__WCB_ANY_PBUF__SHIFT 0x00000000
+#define MC_XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT_MASK 0x000000feL
+#define MC_XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x00000001
+#define MC_XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL_MASK 0x00200000L
+#define MC_XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL__SHIFT 0x00000015
+#define MC_XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL_MASK 0x00008000L
+#define MC_XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL__SHIFT 0x0000000f
+#define MC_XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL_MASK 0x00020000L
+#define MC_XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL__SHIFT 0x00000011
+#define MC_XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL_MASK 0x00080000L
+#define MC_XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL__SHIFT 0x00000013
+#define MC_XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT_MASK 0x00007f00L
+#define MC_XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x00000008
+#define MC_XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL_MASK 0x00400000L
+#define MC_XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL__SHIFT 0x00000016
+#define MC_XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL_MASK 0x00010000L
+#define MC_XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL__SHIFT 0x00000010
+#define MC_XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL_MASK 0x00040000L
+#define MC_XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL__SHIFT 0x00000012
+#define MC_XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL_MASK 0x00100000L
+#define MC_XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL__SHIFT 0x00000014
+#define MC_XPB_PIPE_STS__XPB_CLK_BUSY_BITS_MASK 0xff000000L
+#define MC_XPB_PIPE_STS__XPB_CLK_BUSY_BITS__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP0__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP0__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP1__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP1__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP2__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP2__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP3__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP3__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP4__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP4__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP4__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP4__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP4__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP4__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP4__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP4__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP4__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP4__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP4__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP4__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP5__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP5__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP5__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP5__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP5__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP5__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP5__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP5__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP5__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP5__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP5__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP5__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP6__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP6__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP6__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP6__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP6__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP6__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP6__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP6__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP6__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP6__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP6__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP6__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP7__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP7__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP7__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP7__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP7__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP7__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP7__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP7__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP7__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP7__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP7__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP7__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP8__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP8__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP8__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP8__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP8__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP8__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP8__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP8__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP8__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP8__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP8__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP8__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_DEST_MAP9__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_RTR_DEST_MAP9__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_RTR_DEST_MAP9__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_RTR_DEST_MAP9__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_RTR_DEST_MAP9__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_RTR_DEST_MAP9__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_RTR_DEST_MAP9__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_RTR_DEST_MAP9__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_RTR_DEST_MAP9__NMR_MASK 0x00000001L
+#define MC_XPB_RTR_DEST_MAP9__NMR__SHIFT 0x00000000
+#define MC_XPB_RTR_DEST_MAP9__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_RTR_DEST_MAP9__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR4__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR4__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR5__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR5__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR6__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR6__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR7__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR7__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR8__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR8__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_RTR_SRC_APRTR9__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_RTR_SRC_APRTR9__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_STICKY__BITS_MASK 0xffffffffL
+#define MC_XPB_STICKY__BITS__SHIFT 0x00000000
+#define MC_XPB_STICKY_W1C__BITS_MASK 0xffffffffL
+#define MC_XPB_STICKY_W1C__BITS__SHIFT 0x00000000
+#define MC_XPB_SUB_CTRL__RESET_CGR_MASK 0x00080000L
+#define MC_XPB_SUB_CTRL__RESET_CGR__SHIFT 0x00000013
+#define MC_XPB_SUB_CTRL__RESET_CNS_MASK 0x00000400L
+#define MC_XPB_SUB_CTRL__RESET_CNS__SHIFT 0x0000000a
+#define MC_XPB_SUB_CTRL__RESET_HOP_MASK 0x00010000L
+#define MC_XPB_SUB_CTRL__RESET_HOP__SHIFT 0x00000010
+#define MC_XPB_SUB_CTRL__RESET_HST_MASK 0x00008000L
+#define MC_XPB_SUB_CTRL__RESET_HST__SHIFT 0x0000000f
+#define MC_XPB_SUB_CTRL__RESET_MAP_MASK 0x00002000L
+#define MC_XPB_SUB_CTRL__RESET_MAP__SHIFT 0x0000000d
+#define MC_XPB_SUB_CTRL__RESET_RET_MASK 0x00001000L
+#define MC_XPB_SUB_CTRL__RESET_RET__SHIFT 0x0000000c
+#define MC_XPB_SUB_CTRL__RESET_RTR_MASK 0x00000800L
+#define MC_XPB_SUB_CTRL__RESET_RTR__SHIFT 0x0000000b
+#define MC_XPB_SUB_CTRL__RESET_SID_MASK 0x00020000L
+#define MC_XPB_SUB_CTRL__RESET_SID__SHIFT 0x00000011
+#define MC_XPB_SUB_CTRL__RESET_SRB_MASK 0x00040000L
+#define MC_XPB_SUB_CTRL__RESET_SRB__SHIFT 0x00000012
+#define MC_XPB_SUB_CTRL__RESET_WCB_MASK 0x00004000L
+#define MC_XPB_SUB_CTRL__RESET_WCB__SHIFT 0x0000000e
+#define MC_XPB_SUB_CTRL__STALL_CNS_RTR_REQ_MASK 0x00000002L
+#define MC_XPB_SUB_CTRL__STALL_CNS_RTR_REQ__SHIFT 0x00000001
+#define MC_XPB_SUB_CTRL__STALL_HST_HOP_REQ_MASK 0x00000100L
+#define MC_XPB_SUB_CTRL__STALL_HST_HOP_REQ__SHIFT 0x00000008
+#define MC_XPB_SUB_CTRL__STALL_MAP_WCB_REQ_MASK 0x00000010L
+#define MC_XPB_SUB_CTRL__STALL_MAP_WCB_REQ__SHIFT 0x00000004
+#define MC_XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND_MASK 0x00000040L
+#define MC_XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND__SHIFT 0x00000006
+#define MC_XPB_SUB_CTRL__STALL_RTR_MAP_REQ_MASK 0x00000008L
+#define MC_XPB_SUB_CTRL__STALL_RTR_MAP_REQ__SHIFT 0x00000003
+#define MC_XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ_MASK 0x00000004L
+#define MC_XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ__SHIFT 0x00000002
+#define MC_XPB_SUB_CTRL__STALL_WCB_HST_REQ_MASK 0x00000080L
+#define MC_XPB_SUB_CTRL__STALL_WCB_HST_REQ__SHIFT 0x00000007
+#define MC_XPB_SUB_CTRL__STALL_WCB_SID_REQ_MASK 0x00000020L
+#define MC_XPB_SUB_CTRL__STALL_WCB_SID_REQ__SHIFT 0x00000005
+#define MC_XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR_MASK 0x00000200L
+#define MC_XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR__SHIFT 0x00000009
+#define MC_XPB_SUB_CTRL__WRREQ_BYPASS_XPB_MASK 0x00000001L
+#define MC_XPB_SUB_CTRL__WRREQ_BYPASS_XPB__SHIFT 0x00000000
+#define MC_XPB_UNC_THRESH_HST__CHANGE_PREF_MASK 0x0000003fL
+#define MC_XPB_UNC_THRESH_HST__CHANGE_PREF__SHIFT 0x00000000
+#define MC_XPB_UNC_THRESH_HST__STRONG_PREF_MASK 0x00000fc0L
+#define MC_XPB_UNC_THRESH_HST__STRONG_PREF__SHIFT 0x00000006
+#define MC_XPB_UNC_THRESH_HST__USE_UNFULL_MASK 0x0003f000L
+#define MC_XPB_UNC_THRESH_HST__USE_UNFULL__SHIFT 0x0000000c
+#define MC_XPB_UNC_THRESH_SID__CHANGE_PREF_MASK 0x0000003fL
+#define MC_XPB_UNC_THRESH_SID__CHANGE_PREF__SHIFT 0x00000000
+#define MC_XPB_UNC_THRESH_SID__STRONG_PREF_MASK 0x00000fc0L
+#define MC_XPB_UNC_THRESH_SID__STRONG_PREF__SHIFT 0x00000006
+#define MC_XPB_UNC_THRESH_SID__USE_UNFULL_MASK 0x0003f000L
+#define MC_XPB_UNC_THRESH_SID__USE_UNFULL__SHIFT 0x0000000c
+#define MC_XPB_WCB_CFG__HST_MAX_MASK 0x00030000L
+#define MC_XPB_WCB_CFG__HST_MAX__SHIFT 0x00000010
+#define MC_XPB_WCB_CFG__SID_MAX_MASK 0x000c0000L
+#define MC_XPB_WCB_CFG__SID_MAX__SHIFT 0x00000012
+#define MC_XPB_WCB_CFG__TIMEOUT_MASK 0x0000ffffL
+#define MC_XPB_WCB_CFG__TIMEOUT__SHIFT 0x00000000
+#define MC_XPB_WCB_STS__PBUF_VLD_MASK 0x0000ffffL
+#define MC_XPB_WCB_STS__PBUF_VLD__SHIFT 0x00000000
+#define MC_XPB_WCB_STS__WCB_HST_DATA_BUF_CNT_MASK 0x007f0000L
+#define MC_XPB_WCB_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x00000010
+#define MC_XPB_WCB_STS__WCB_SID_DATA_BUF_CNT_MASK 0x3f800000L
+#define MC_XPB_WCB_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x00000017
+#define MC_XPB_XDMA_PEER_SYS_BAR0__ADDR_MASK 0x07fffffcL
+#define MC_XPB_XDMA_PEER_SYS_BAR0__ADDR__SHIFT 0x00000002
+#define MC_XPB_XDMA_PEER_SYS_BAR0__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_XDMA_PEER_SYS_BAR0__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_XDMA_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define MC_XPB_XDMA_PEER_SYS_BAR0__VALID__SHIFT 0x00000000
+#define MC_XPB_XDMA_PEER_SYS_BAR1__ADDR_MASK 0x07fffffcL
+#define MC_XPB_XDMA_PEER_SYS_BAR1__ADDR__SHIFT 0x00000002
+#define MC_XPB_XDMA_PEER_SYS_BAR1__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_XDMA_PEER_SYS_BAR1__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_XDMA_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define MC_XPB_XDMA_PEER_SYS_BAR1__VALID__SHIFT 0x00000000
+#define MC_XPB_XDMA_PEER_SYS_BAR2__ADDR_MASK 0x07fffffcL
+#define MC_XPB_XDMA_PEER_SYS_BAR2__ADDR__SHIFT 0x00000002
+#define MC_XPB_XDMA_PEER_SYS_BAR2__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_XDMA_PEER_SYS_BAR2__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_XDMA_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define MC_XPB_XDMA_PEER_SYS_BAR2__VALID__SHIFT 0x00000000
+#define MC_XPB_XDMA_PEER_SYS_BAR3__ADDR_MASK 0x07fffffcL
+#define MC_XPB_XDMA_PEER_SYS_BAR3__ADDR__SHIFT 0x00000002
+#define MC_XPB_XDMA_PEER_SYS_BAR3__SIDE_OK_MASK 0x00000002L
+#define MC_XPB_XDMA_PEER_SYS_BAR3__SIDE_OK__SHIFT 0x00000001
+#define MC_XPB_XDMA_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define MC_XPB_XDMA_PEER_SYS_BAR3__VALID__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_XDMA_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_XDMA_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__NMR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_XDMA_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_XDMA_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__NMR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_XDMA_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_XDMA_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__NMR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7c000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x0000001a
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000ffffeL
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x00000001
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_MASK 0x00f00000L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x00000018
+#define MC_XPB_XDMA_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x00000014
+#define MC_XPB_XDMA_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__NMR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L
+#define MC_XPB_XDMA_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x00000019
+#define MC_XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x00000000
+#define MC_XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x01ffffffL
+#define MC_XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x00000000
+#define MPLL_AD_FUNC_CNTL__SPARE_MASK 0xfffffff8L
+#define MPLL_AD_FUNC_CNTL__SPARE__SHIFT 0x00000003
+#define MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK 0x00000007L
+#define MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT 0x00000000
+#define MPLL_AD_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_AD_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_AD_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_AD_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_AD_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_AD_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_AD_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_AD_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_AD_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_AD_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_AD_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_AD_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_CNTL_MODE__FAST_LOCK_CNTRL_MASK 0x00600000L
+#define MPLL_CNTL_MODE__FAST_LOCK_CNTRL__SHIFT 0x00000015
+#define MPLL_CNTL_MODE__FAST_LOCK_EN_MASK 0x00100000L
+#define MPLL_CNTL_MODE__FAST_LOCK_EN__SHIFT 0x00000014
+#define MPLL_CNTL_MODE__FORCE_TESTMODE_MASK 0x00020000L
+#define MPLL_CNTL_MODE__FORCE_TESTMODE__SHIFT 0x00000011
+#define MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK 0x80000000L
+#define MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT 0x0000001f
+#define MPLL_CNTL_MODE__INSTR_DELAY_MASK 0x000000ffL
+#define MPLL_CNTL_MODE__INSTR_DELAY__SHIFT 0x00000000
+#define MPLL_CNTL_MODE__MPLL_CHG_STATUS_MASK 0x00010000L
+#define MPLL_CNTL_MODE__MPLL_CHG_STATUS__SHIFT 0x00000010
+#define MPLL_CNTL_MODE__MPLL_CTLREQ_MASK 0x00004000L
+#define MPLL_CNTL_MODE__MPLL_CTLREQ__SHIFT 0x0000000e
+#define MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK 0x00000800L
+#define MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT 0x0000000b
+#define MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK 0x00000100L
+#define MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT 0x00000008
+#define MPLL_CNTL_MODE__QDR_MASK 0x00002000L
+#define MPLL_CNTL_MODE__QDR__SHIFT 0x0000000d
+#define MPLL_CNTL_MODE__SPARE_1_MASK 0x00001000L
+#define MPLL_CNTL_MODE__SPARE_1__SHIFT 0x0000000c
+#define MPLL_CNTL_MODE__SPARE_2_MASK 0x00800000L
+#define MPLL_CNTL_MODE__SPARE_2__SHIFT 0x00000017
+#define MPLL_CNTL_MODE__SPARE_3_MASK 0x70000000L
+#define MPLL_CNTL_MODE__SPARE_3__SHIFT 0x0000001c
+#define MPLL_CNTL_MODE__SS_DSMODE_EN_MASK 0x04000000L
+#define MPLL_CNTL_MODE__SS_DSMODE_EN__SHIFT 0x0000001a
+#define MPLL_CNTL_MODE__SS_SSEN_MASK 0x03000000L
+#define MPLL_CNTL_MODE__SS_SSEN__SHIFT 0x00000018
+#define MPLL_CNTL_MODE__VTOI_BIAS_CNTRL_MASK 0x08000000L
+#define MPLL_CNTL_MODE__VTOI_BIAS_CNTRL__SHIFT 0x0000001b
+#define MPLL_CONTROL__AD_BG_PWRON_MASK 0x00001000L
+#define MPLL_CONTROL__AD_BG_PWRON__SHIFT 0x0000000c
+#define MPLL_CONTROL__AD_PLL_PWRON_MASK 0x00002000L
+#define MPLL_CONTROL__AD_PLL_PWRON__SHIFT 0x0000000d
+#define MPLL_CONTROL__AD_PLL_RESET_MASK 0x00004000L
+#define MPLL_CONTROL__AD_PLL_RESET__SHIFT 0x0000000e
+#define MPLL_CONTROL__DQ_0_0_BG_PWRON_MASK 0x00010000L
+#define MPLL_CONTROL__DQ_0_0_BG_PWRON__SHIFT 0x00000010
+#define MPLL_CONTROL__DQ_0_0_PLL_PWRON_MASK 0x00020000L
+#define MPLL_CONTROL__DQ_0_0_PLL_PWRON__SHIFT 0x00000011
+#define MPLL_CONTROL__DQ_0_0_PLL_RESET_MASK 0x00040000L
+#define MPLL_CONTROL__DQ_0_0_PLL_RESET__SHIFT 0x00000012
+#define MPLL_CONTROL__DQ_0_1_BG_PWRON_MASK 0x00100000L
+#define MPLL_CONTROL__DQ_0_1_BG_PWRON__SHIFT 0x00000014
+#define MPLL_CONTROL__DQ_0_1_PLL_PWRON_MASK 0x00200000L
+#define MPLL_CONTROL__DQ_0_1_PLL_PWRON__SHIFT 0x00000015
+#define MPLL_CONTROL__DQ_0_1_PLL_RESET_MASK 0x00400000L
+#define MPLL_CONTROL__DQ_0_1_PLL_RESET__SHIFT 0x00000016
+#define MPLL_CONTROL__DQ_1_0_BG_PWRON_MASK 0x01000000L
+#define MPLL_CONTROL__DQ_1_0_BG_PWRON__SHIFT 0x00000018
+#define MPLL_CONTROL__DQ_1_0_PLL_PWRON_MASK 0x02000000L
+#define MPLL_CONTROL__DQ_1_0_PLL_PWRON__SHIFT 0x00000019
+#define MPLL_CONTROL__DQ_1_0_PLL_RESET_MASK 0x04000000L
+#define MPLL_CONTROL__DQ_1_0_PLL_RESET__SHIFT 0x0000001a
+#define MPLL_CONTROL__DQ_1_1_BG_PWRON_MASK 0x10000000L
+#define MPLL_CONTROL__DQ_1_1_BG_PWRON__SHIFT 0x0000001c
+#define MPLL_CONTROL__DQ_1_1_PLL_PWRON_MASK 0x20000000L
+#define MPLL_CONTROL__DQ_1_1_PLL_PWRON__SHIFT 0x0000001d
+#define MPLL_CONTROL__DQ_1_1_PLL_RESET_MASK 0x40000000L
+#define MPLL_CONTROL__DQ_1_1_PLL_RESET__SHIFT 0x0000001e
+#define MPLL_CONTROL__GDDR_PWRON_MASK 0x00000001L
+#define MPLL_CONTROL__GDDR_PWRON__SHIFT 0x00000000
+#define MPLL_CONTROL__PLL_BUF_PWRON_TX_MASK 0x00000004L
+#define MPLL_CONTROL__PLL_BUF_PWRON_TX__SHIFT 0x00000002
+#define MPLL_CONTROL__REFCLK_PWRON_MASK 0x00000002L
+#define MPLL_CONTROL__REFCLK_PWRON__SHIFT 0x00000001
+#define MPLL_CONTROL__SPARE_AD_0_MASK 0x00008000L
+#define MPLL_CONTROL__SPARE_AD_0__SHIFT 0x0000000f
+#define MPLL_CONTROL__SPARE_DQ_0_0_MASK 0x00080000L
+#define MPLL_CONTROL__SPARE_DQ_0_0__SHIFT 0x00000013
+#define MPLL_CONTROL__SPARE_DQ_0_1_MASK 0x00800000L
+#define MPLL_CONTROL__SPARE_DQ_0_1__SHIFT 0x00000017
+#define MPLL_CONTROL__SPARE_DQ_1_0_MASK 0x08000000L
+#define MPLL_CONTROL__SPARE_DQ_1_0__SHIFT 0x0000001b
+#define MPLL_CONTROL__SPARE_DQ_1_1_MASK 0x80000000L
+#define MPLL_CONTROL__SPARE_DQ_1_1__SHIFT 0x0000001f
+#define MPLL_DQ_0_0_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_DQ_0_0_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_DQ_0_0_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_DQ_0_0_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_DQ_0_0_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_DQ_0_0_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_DQ_0_0_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_DQ_0_0_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_DQ_0_0_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_DQ_0_0_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_DQ_0_0_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_DQ_0_0_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_DQ_0_1_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_DQ_0_1_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_DQ_0_1_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_DQ_0_1_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_DQ_0_1_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_DQ_0_1_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_DQ_0_1_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_DQ_0_1_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_DQ_0_1_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_DQ_0_1_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_DQ_0_1_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_DQ_0_1_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_DQ_1_0_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_DQ_1_0_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_DQ_1_0_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_DQ_1_0_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_DQ_1_0_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_DQ_1_0_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_DQ_1_0_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_DQ_1_0_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_DQ_1_0_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_DQ_1_0_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_DQ_1_0_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_DQ_1_0_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_DQ_1_1_STATUS__FREQ_LOCK_MASK 0x00040000L
+#define MPLL_DQ_1_1_STATUS__FREQ_LOCK__SHIFT 0x00000012
+#define MPLL_DQ_1_1_STATUS__FREQ_UNLOCK_STICKY_MASK 0x00080000L
+#define MPLL_DQ_1_1_STATUS__FREQ_UNLOCK_STICKY__SHIFT 0x00000013
+#define MPLL_DQ_1_1_STATUS__OINT_RESET_MASK 0x00020000L
+#define MPLL_DQ_1_1_STATUS__OINT_RESET__SHIFT 0x00000011
+#define MPLL_DQ_1_1_STATUS__TEST_FBDIV_FRAC_MASK 0x00000070L
+#define MPLL_DQ_1_1_STATUS__TEST_FBDIV_FRAC__SHIFT 0x00000004
+#define MPLL_DQ_1_1_STATUS__TEST_FBDIV_INT_MASK 0x0001ff80L
+#define MPLL_DQ_1_1_STATUS__TEST_FBDIV_INT__SHIFT 0x00000007
+#define MPLL_DQ_1_1_STATUS__VCTRLADC_MASK 0x00000007L
+#define MPLL_DQ_1_1_STATUS__VCTRLADC__SHIFT 0x00000000
+#define MPLL_DQ_FUNC_CNTL__SPARE_0_MASK 0x00000008L
+#define MPLL_DQ_FUNC_CNTL__SPARE_0__SHIFT 0x00000003
+#define MPLL_DQ_FUNC_CNTL__SPARE_MASK 0xffffffe0L
+#define MPLL_DQ_FUNC_CNTL__SPARE__SHIFT 0x00000005
+#define MPLL_DQ_FUNC_CNTL__YCLK_POST_DIV_MASK 0x00000007L
+#define MPLL_DQ_FUNC_CNTL__YCLK_POST_DIV__SHIFT 0x00000000
+#define MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK 0x00000010L
+#define MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT 0x00000004
+#define MPLL_FUNC_CNTL_1__CLKF_MASK 0x0fff0000L
+#define MPLL_FUNC_CNTL_1__CLKFRAC_MASK 0x0000fff0L
+#define MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT 0x00000004
+#define MPLL_FUNC_CNTL_1__CLKF__SHIFT 0x00000010
+#define MPLL_FUNC_CNTL_1__SPARE_0_MASK 0x0000000cL
+#define MPLL_FUNC_CNTL_1__SPARE_0__SHIFT 0x00000002
+#define MPLL_FUNC_CNTL_1__SPARE_1_MASK 0xf0000000L
+#define MPLL_FUNC_CNTL_1__SPARE_1__SHIFT 0x0000001c
+#define MPLL_FUNC_CNTL_1__VCO_MODE_MASK 0x00000003L
+#define MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT 0x00000000
+#define MPLL_FUNC_CNTL_2__BACKUP_2_MASK 0x000e0000L
+#define MPLL_FUNC_CNTL_2__BACKUP_2__SHIFT 0x00000011
+#define MPLL_FUNC_CNTL_2__BACKUP_MASK 0xf8000000L
+#define MPLL_FUNC_CNTL_2__BACKUP__SHIFT 0x0000001b
+#define MPLL_FUNC_CNTL_2__LF_CNTRL_MASK 0x07f00000L
+#define MPLL_FUNC_CNTL_2__LF_CNTRL__SHIFT 0x00000014
+#define MPLL_FUNC_CNTL_2__MPLL_UNLOCK_CLEAR_MASK 0x00000080L
+#define MPLL_FUNC_CNTL_2__MPLL_UNLOCK_CLEAR__SHIFT 0x00000007
+#define MPLL_FUNC_CNTL_2__PFD_RESET_CNTRL_MASK 0x00003000L
+#define MPLL_FUNC_CNTL_2__PFD_RESET_CNTRL__SHIFT 0x0000000c
+#define MPLL_FUNC_CNTL_2__RESET_EN_MASK 0x00000004L
+#define MPLL_FUNC_CNTL_2__RESET_EN__SHIFT 0x00000002
+#define MPLL_FUNC_CNTL_2__RESET_TIMER_MASK 0x00000c00L
+#define MPLL_FUNC_CNTL_2__RESET_TIMER__SHIFT 0x0000000a
+#define MPLL_FUNC_CNTL_2__TEST_BYPCLK_EN_MASK 0x00000008L
+#define MPLL_FUNC_CNTL_2__TEST_BYPCLK_EN__SHIFT 0x00000003
+#define MPLL_FUNC_CNTL_2__TEST_BYPCLK_SRC_MASK 0x00000010L
+#define MPLL_FUNC_CNTL_2__TEST_BYPCLK_SRC__SHIFT 0x00000004
+#define MPLL_FUNC_CNTL_2__TEST_BYPMCLK_MASK 0x00000040L
+#define MPLL_FUNC_CNTL_2__TEST_BYPMCLK__SHIFT 0x00000006
+#define MPLL_FUNC_CNTL_2__TEST_FBDIV_FRAC_BYPASS_MASK 0x00000020L
+#define MPLL_FUNC_CNTL_2__TEST_FBDIV_FRAC_BYPASS__SHIFT 0x00000005
+#define MPLL_FUNC_CNTL_2__TEST_FBDIV_SSC_BYPASS_MASK 0x00000200L
+#define MPLL_FUNC_CNTL_2__TEST_FBDIV_SSC_BYPASS__SHIFT 0x00000009
+#define MPLL_FUNC_CNTL_2__TEST_VCTL_CNTRL_MASK 0x00000100L
+#define MPLL_FUNC_CNTL_2__TEST_VCTL_CNTRL__SHIFT 0x00000008
+#define MPLL_FUNC_CNTL_2__TEST_VCTL_EN_MASK 0x00000002L
+#define MPLL_FUNC_CNTL_2__TEST_VCTL_EN__SHIFT 0x00000001
+#define MPLL_FUNC_CNTL_2__VCTRLADC_EN_MASK 0x00000001L
+#define MPLL_FUNC_CNTL_2__VCTRLADC_EN__SHIFT 0x00000000
+#define MPLL_FUNC_CNTL__BG_100ADJ_MASK 0x00000f00L
+#define MPLL_FUNC_CNTL__BG_100ADJ__SHIFT 0x00000008
+#define MPLL_FUNC_CNTL__BG_135ADJ_MASK 0x000f0000L
+#define MPLL_FUNC_CNTL__BG_135ADJ__SHIFT 0x00000010
+#define MPLL_FUNC_CNTL__BWCTRL_MASK 0x0ff00000L
+#define MPLL_FUNC_CNTL__BWCTRL__SHIFT 0x00000014
+#define MPLL_FUNC_CNTL__REG_BIAS_MASK 0xc0000000L
+#define MPLL_FUNC_CNTL__REG_BIAS__SHIFT 0x0000001e
+#define MPLL_FUNC_CNTL__SPARE_0_MASK 0x00000020L
+#define MPLL_FUNC_CNTL__SPARE_0__SHIFT 0x00000005
+#define MPLL_SEQ_UCODE_1__INSTR0_MASK 0x0000000fL
+#define MPLL_SEQ_UCODE_1__INSTR0__SHIFT 0x00000000
+#define MPLL_SEQ_UCODE_1__INSTR1_MASK 0x000000f0L
+#define MPLL_SEQ_UCODE_1__INSTR1__SHIFT 0x00000004
+#define MPLL_SEQ_UCODE_1__INSTR2_MASK 0x00000f00L
+#define MPLL_SEQ_UCODE_1__INSTR2__SHIFT 0x00000008
+#define MPLL_SEQ_UCODE_1__INSTR3_MASK 0x0000f000L
+#define MPLL_SEQ_UCODE_1__INSTR3__SHIFT 0x0000000c
+#define MPLL_SEQ_UCODE_1__INSTR4_MASK 0x000f0000L
+#define MPLL_SEQ_UCODE_1__INSTR4__SHIFT 0x00000010
+#define MPLL_SEQ_UCODE_1__INSTR5_MASK 0x00f00000L
+#define MPLL_SEQ_UCODE_1__INSTR5__SHIFT 0x00000014
+#define MPLL_SEQ_UCODE_1__INSTR6_MASK 0x0f000000L
+#define MPLL_SEQ_UCODE_1__INSTR6__SHIFT 0x00000018
+#define MPLL_SEQ_UCODE_1__INSTR7_MASK 0xf0000000L
+#define MPLL_SEQ_UCODE_1__INSTR7__SHIFT 0x0000001c
+#define MPLL_SEQ_UCODE_2__INSTR10_MASK 0x00000f00L
+#define MPLL_SEQ_UCODE_2__INSTR10__SHIFT 0x00000008
+#define MPLL_SEQ_UCODE_2__INSTR11_MASK 0x0000f000L
+#define MPLL_SEQ_UCODE_2__INSTR11__SHIFT 0x0000000c
+#define MPLL_SEQ_UCODE_2__INSTR12_MASK 0x000f0000L
+#define MPLL_SEQ_UCODE_2__INSTR12__SHIFT 0x00000010
+#define MPLL_SEQ_UCODE_2__INSTR13_MASK 0x00f00000L
+#define MPLL_SEQ_UCODE_2__INSTR13__SHIFT 0x00000014
+#define MPLL_SEQ_UCODE_2__INSTR14_MASK 0x0f000000L
+#define MPLL_SEQ_UCODE_2__INSTR14__SHIFT 0x00000018
+#define MPLL_SEQ_UCODE_2__INSTR15_MASK 0xf0000000L
+#define MPLL_SEQ_UCODE_2__INSTR15__SHIFT 0x0000001c
+#define MPLL_SEQ_UCODE_2__INSTR8_MASK 0x0000000fL
+#define MPLL_SEQ_UCODE_2__INSTR8__SHIFT 0x00000000
+#define MPLL_SEQ_UCODE_2__INSTR9_MASK 0x000000f0L
+#define MPLL_SEQ_UCODE_2__INSTR9__SHIFT 0x00000004
+#define MPLL_SS1__CLKV_MASK 0x03ffffffL
+#define MPLL_SS1__CLKV__SHIFT 0x00000000
+#define MPLL_SS1__SPARE_MASK 0xfc000000L
+#define MPLL_SS1__SPARE__SHIFT 0x0000001a
+#define MPLL_SS2__CLKS_MASK 0x00000fffL
+#define MPLL_SS2__CLKS__SHIFT 0x00000000
+#define MPLL_SS2__SPARE_MASK 0xfffff000L
+#define MPLL_SS2__SPARE__SHIFT 0x0000000c
+#define MPLL_TIME__MPLL_LOCK_TIME_MASK 0x0000ffffL
+#define MPLL_TIME__MPLL_LOCK_TIME__SHIFT 0x00000000
+#define MPLL_TIME__MPLL_RESET_TIME_MASK 0xffff0000L
+#define MPLL_TIME__MPLL_RESET_TIME__SHIFT 0x00000010
+#define VM_CONTEXT0_CNTL2__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000008L
+#define VM_CONTEXT0_CNTL2__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x00000003
+#define VM_CONTEXT0_CNTL2__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VM_CONTEXT0_CNTL2__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT0_CNTL2__ENABLE_CLEAR_PROTECTION_FAULT_STATUS_ADDR_WHEN_INVALIDATE_CONTEXT_MASK 0x00000002L
+#define VM_CONTEXT0_CNTL2__ENABLE_CLEAR_PROTECTION_FAULT_STATUS_ADDR_WHEN_INVALIDATE_CONTEXT__SHIFT 0x00000001
+#define VM_CONTEXT0_CNTL2__ENABLE_INTERRUPT_PROCESSING_FOR_SUBSEQUENT_FAULTS_PER_CONTEXT_MASK 0x00000004L
+#define VM_CONTEXT0_CNTL2__ENABLE_INTERRUPT_PROCESSING_FOR_SUBSEQUENT_FAULTS_PER_CONTEXT__SHIFT 0x00000002
+#define VM_CONTEXT0_CNTL2__WAIT_FOR_IDLE_WHEN_INVALIDATE_MASK 0x00000010L
+#define VM_CONTEXT0_CNTL2__WAIT_FOR_IDLE_WHEN_INVALIDATE__SHIFT 0x00000004
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000007
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000040L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000006
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x00000000
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x0f000000L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x00000018
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x00000001
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x0000000a
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000009
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00000800L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x0000000b
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000016
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000015
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00800000L
+#define VM_CONTEXT0_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000017
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000004
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000008L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000003
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000010
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x0000000f
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00020000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000011
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00002000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x0000000d
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00001000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x0000000c
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00004000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x0000000e
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00080000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000013
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00040000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000012
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00100000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000014
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT0_PROTECTION_FAULT_ADDR__LOGICAL_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_CONTEXT0_PROTECTION_FAULT_ADDR__LOGICAL_PAGE_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR__PHYSICAL_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR__PHYSICAL_PAGE_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_ID_MASK 0x000ff000L
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_ID__SHIFT 0x0000000c
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_RW_MASK 0x01000000L
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_RW__SHIFT 0x00000018
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__PROTECTIONS_MASK 0x000000ffL
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__PROTECTIONS__SHIFT 0x00000000
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__VMID_MASK 0x1e000000L
+#define VM_CONTEXT0_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x00000019
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT1_CNTL2__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000008L
+#define VM_CONTEXT1_CNTL2__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x00000003
+#define VM_CONTEXT1_CNTL2__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VM_CONTEXT1_CNTL2__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT1_CNTL2__ENABLE_CLEAR_PROTECTION_FAULT_STATUS_ADDR_WHEN_INVALIDATE_CONTEXT_MASK 0x00000002L
+#define VM_CONTEXT1_CNTL2__ENABLE_CLEAR_PROTECTION_FAULT_STATUS_ADDR_WHEN_INVALIDATE_CONTEXT__SHIFT 0x00000001
+#define VM_CONTEXT1_CNTL2__ENABLE_INTERRUPT_PROCESSING_FOR_SUBSEQUENT_FAULTS_PER_CONTEXT_MASK 0x00000004L
+#define VM_CONTEXT1_CNTL2__ENABLE_INTERRUPT_PROCESSING_FOR_SUBSEQUENT_FAULTS_PER_CONTEXT__SHIFT 0x00000002
+#define VM_CONTEXT1_CNTL2__WAIT_FOR_IDLE_WHEN_INVALIDATE_MASK 0x00000010L
+#define VM_CONTEXT1_CNTL2__WAIT_FOR_IDLE_WHEN_INVALIDATE__SHIFT 0x00000004
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000007
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000040L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000006
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x00000000
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x0f000000L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x00000018
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x00000001
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x0000000a
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000009
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00000800L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x0000000b
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000016
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000015
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00800000L
+#define VM_CONTEXT1_CNTL__PRIVILEGED_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000017
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000004
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000008L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000003
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000010
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x0000000f
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00020000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000011
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00002000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x0000000d
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00001000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x0000000c
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00004000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x0000000e
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00080000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x00000013
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00040000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x00000012
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_SAVE_MASK 0x00100000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_SAVE__SHIFT 0x00000014
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR__LOGICAL_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR__LOGICAL_PAGE_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR__PHYSICAL_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR__PHYSICAL_PAGE_ADDR__SHIFT 0x00000000
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_ID_MASK 0x000ff000L
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_ID__SHIFT 0x0000000c
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_RW_MASK 0x01000000L
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__MEMORY_CLIENT_RW__SHIFT 0x00000018
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__PROTECTIONS_MASK 0x000000ffL
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__PROTECTIONS__SHIFT 0x00000000
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__VMID_MASK 0x1e000000L
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x00000019
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR__PHYSICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x00000000
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0x0000000a
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0x0000000b
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0x0000000c
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0x0000000d
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0x0000000e
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0x0000000f
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x00000001
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x00000002
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x00000003
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x00000004
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x00000005
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x00000006
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x00000007
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x00000008
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x00000009
+#define VM_DEBUG__FLAGS_MASK 0xffffffffL
+#define VM_DEBUG__FLAGS__SHIFT 0x00000000
+#define VM_DUMMY_PAGE_FAULT_ADDR__DUMMY_PAGE_ADDR_MASK 0x0fffffffL
+#define VM_DUMMY_PAGE_FAULT_ADDR__DUMMY_PAGE_ADDR__SHIFT 0x00000000
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x00000001
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MASK_MASK 0x0000000cL
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MASK__SHIFT 0x00000002
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x00000000
+#define VM_FAULT_CLIENT_ID__MEMORY_CLIENT_MASK 0x000001ffL
+#define VM_FAULT_CLIENT_ID__MEMORY_CLIENT_MASK_MASK 0x0003fe00L
+#define VM_FAULT_CLIENT_ID__MEMORY_CLIENT_MASK__SHIFT 0x00000009
+#define VM_FAULT_CLIENT_ID__MEMORY_CLIENT__SHIFT 0x00000000
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_0_MASK 0x00000001L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_0__SHIFT 0x00000000
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_10_MASK 0x00000400L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_10__SHIFT 0x0000000a
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_11_MASK 0x00000800L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_11__SHIFT 0x0000000b
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_12_MASK 0x00001000L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_12__SHIFT 0x0000000c
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_13_MASK 0x00002000L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_13__SHIFT 0x0000000d
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_14_MASK 0x00004000L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_14__SHIFT 0x0000000e
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_15_MASK 0x00008000L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_15__SHIFT 0x0000000f
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_1_MASK 0x00000002L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_1__SHIFT 0x00000001
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_2_MASK 0x00000004L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_2__SHIFT 0x00000002
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_3_MASK 0x00000008L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_3__SHIFT 0x00000003
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_4_MASK 0x00000010L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_4__SHIFT 0x00000004
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_5_MASK 0x00000020L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_5__SHIFT 0x00000005
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_6_MASK 0x00000040L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_6__SHIFT 0x00000006
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_7_MASK 0x00000080L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_7__SHIFT 0x00000007
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_8_MASK 0x00000100L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_8__SHIFT 0x00000008
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_9_MASK 0x00000200L
+#define VM_INVALIDATE_REQUEST__INVALIDATE_DOMAIN_9__SHIFT 0x00000009
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_0_MASK 0x00000001L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_0__SHIFT 0x00000000
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_10_MASK 0x00000400L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_10__SHIFT 0x0000000a
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_11_MASK 0x00000800L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_11__SHIFT 0x0000000b
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_12_MASK 0x00001000L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_12__SHIFT 0x0000000c
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_13_MASK 0x00002000L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_13__SHIFT 0x0000000d
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_14_MASK 0x00004000L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_14__SHIFT 0x0000000e
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_15_MASK 0x00008000L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_15__SHIFT 0x0000000f
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_1_MASK 0x00000002L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_1__SHIFT 0x00000001
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_2_MASK 0x00000004L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_2__SHIFT 0x00000002
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_3_MASK 0x00000008L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_3__SHIFT 0x00000003
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_4_MASK 0x00000010L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_4__SHIFT 0x00000004
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_5_MASK 0x00000020L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_5__SHIFT 0x00000005
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_6_MASK 0x00000040L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_6__SHIFT 0x00000006
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_7_MASK 0x00000080L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_7__SHIFT 0x00000007
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_8_MASK 0x00000100L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_8__SHIFT 0x00000008
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_9_MASK 0x00000200L
+#define VM_INVALIDATE_RESPONSE__DOMAIN_INVALIDATED_9__SHIFT 0x00000009
+#define VM_L2_BANK_SELECT_MASKA__BANK_SELECT_MASK_MASK 0x0fffffffL
+#define VM_L2_BANK_SELECT_MASKA__BANK_SELECT_MASK__SHIFT 0x00000000
+#define VM_L2_BANK_SELECT_MASKB__BANK_SELECT_MASK_MASK 0x000000ffL
+#define VM_L2_BANK_SELECT_MASKB__BANK_SELECT_MASK__SHIFT 0x00000000
+#define VM_L2_CG__ENABLE_MASK 0x00040000L
+#define VM_L2_CG__ENABLE__SHIFT 0x00000012
+#define VM_L2_CG__MEM_LS_ENABLE_MASK 0x00080000L
+#define VM_L2_CG__MEM_LS_ENABLE__SHIFT 0x00000013
+#define VM_L2_CG__OFFDLY_MASK 0x00000fc0L
+#define VM_L2_CG__OFFDLY__SHIFT 0x00000006
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x00000016
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x00000015
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x00000000
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0c000000L
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x0000001a
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x00000001
+#define VM_L2_CNTL2__L2_CACHE_BIGK_VMID_MODE_MASK 0x03800000L
+#define VM_L2_CNTL2__L2_CACHE_BIGK_VMID_MODE__SHIFT 0x00000017
+#define VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003fL
+#define VM_L2_CNTL3__BANK_SELECT__SHIFT 0x00000000
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00e00000L
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x00000015
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x0000001c
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x00000014
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0f000000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x00000018
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x0000001d
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000f8000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x0000000f
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000c0L
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x00000006
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001f00L
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x00000008
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x00000013
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0x0000000f
+#define VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x00000000
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x00000001
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x0000000a
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x00000009
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03e00000L
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x00000015
+#define VM_L2_CNTL__L2_CACHE_4K_SWAP_TAG_INDEX_LSBS_MASK 0x0c000000L
+#define VM_L2_CNTL__L2_CACHE_4K_SWAP_TAG_INDEX_LSBS__SHIFT 0x0000001a
+#define VM_L2_CNTL__L2_CACHE_BIGK_SWAP_TAG_INDEX_LSBS_MASK 0x70000000L
+#define VM_L2_CNTL__L2_CACHE_BIGK_SWAP_TAG_INDEX_LSBS__SHIFT 0x0000001c
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x00000004
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000cL
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x00000002
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0x0000000c
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x00000008
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x00000012
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET__PHYSICAL_PAGE_OFFSET_MASK 0x0fffffffL
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET__PHYSICAL_PAGE_OFFSET__SHIFT 0x00000000
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001fffeL
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x00000001
+#define VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define VM_L2_STATUS__L2_BUSY__SHIFT 0x00000000
+#define VM_PRT_APERTURE0_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE0_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE0_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE0_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE1_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE1_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE1_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE1_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE2_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE2_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE2_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE2_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE3_HIGH_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE3_HIGH_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_APERTURE3_LOW_ADDR__LOGICAL_PAGE_NUMBER_MASK 0x0fffffffL
+#define VM_PRT_APERTURE3_LOW_ADDR__LOGICAL_PAGE_NUMBER__SHIFT 0x00000000
+#define VM_PRT_CNTL__L1_TLB_STORE_INVALID_ENTRIES_MASK 0x00000008L
+#define VM_PRT_CNTL__L1_TLB_STORE_INVALID_ENTRIES__SHIFT 0x00000003
+#define VM_PRT_CNTL__L2_CACHE_STORE_INVALID_ENTRIES_MASK 0x00000004L
+#define VM_PRT_CNTL__L2_CACHE_STORE_INVALID_ENTRIES__SHIFT 0x00000002
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
new file mode 100644
index 000000000000..edc8a793a95d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
@@ -0,0 +1,275 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef OSS_1_0_D_H
+#define OSS_1_0_D_H
+
+#define ixCLIENT0_BM 0x0220
+#define ixCLIENT0_CD0 0x0210
+#define ixCLIENT0_CD1 0x0214
+#define ixCLIENT0_CD2 0x0218
+#define ixCLIENT0_CD3 0x021C
+#define ixCLIENT0_CK0 0x0200
+#define ixCLIENT0_CK1 0x0204
+#define ixCLIENT0_CK2 0x0208
+#define ixCLIENT0_CK3 0x020C
+#define ixCLIENT0_K0 0x01F0
+#define ixCLIENT0_K1 0x01F4
+#define ixCLIENT0_K2 0x01F8
+#define ixCLIENT0_K3 0x01FC
+#define ixCLIENT0_OFFSET 0x0224
+#define ixCLIENT0_OFFSET_HI 0x0290
+#define ixCLIENT0_STATUS 0x0228
+#define ixCLIENT1_BM 0x025C
+#define ixCLIENT1_CD0 0x024C
+#define ixCLIENT1_CD1 0x0250
+#define ixCLIENT1_CD2 0x0254
+#define ixCLIENT1_CD3 0x0258
+#define ixCLIENT1_CK0 0x023C
+#define ixCLIENT1_CK1 0x0240
+#define ixCLIENT1_CK2 0x0244
+#define ixCLIENT1_CK3 0x0248
+#define ixCLIENT1_K0 0x022C
+#define ixCLIENT1_K1 0x0230
+#define ixCLIENT1_K2 0x0234
+#define ixCLIENT1_K3 0x0238
+#define ixCLIENT1_OFFSET 0x0260
+#define ixCLIENT1_OFFSET_HI 0x0294
+#define ixCLIENT1_PORT_STATUS 0x0264
+#define ixCLIENT2_BM 0x01E4
+#define ixCLIENT2_CD0 0x01D4
+#define ixCLIENT2_CD1 0x01D8
+#define ixCLIENT2_CD2 0x01DC
+#define ixCLIENT2_CD3 0x01E0
+#define ixCLIENT2_CK0 0x01C4
+#define ixCLIENT2_CK1 0x01C8
+#define ixCLIENT2_CK2 0x01CC
+#define ixCLIENT2_CK3 0x01D0
+#define ixCLIENT2_K0 0x01B4
+#define ixCLIENT2_K1 0x01B8
+#define ixCLIENT2_K2 0x01BC
+#define ixCLIENT2_K3 0x01C0
+#define ixCLIENT2_OFFSET 0x01E8
+#define ixCLIENT2_OFFSET_HI 0x0298
+#define ixCLIENT2_STATUS 0x01EC
+#define ixCLIENT3_BM 0x02D4
+#define ixCLIENT3_CD0 0x02C4
+#define ixCLIENT3_CD1 0x02C8
+#define ixCLIENT3_CD2 0x02CC
+#define ixCLIENT3_CD3 0x02D0
+#define ixCLIENT3_CK0 0x02B4
+#define ixCLIENT3_CK1 0x02B8
+#define ixCLIENT3_CK2 0x02BC
+#define ixCLIENT3_CK3 0x02C0
+#define ixCLIENT3_K0 0x02A4
+#define ixCLIENT3_K1 0x02A8
+#define ixCLIENT3_K2 0x02AC
+#define ixCLIENT3_K3 0x02B0
+#define ixCLIENT3_OFFSET 0x02D8
+#define ixCLIENT3_OFFSET_HI 0x02A0
+#define ixCLIENT3_STATUS 0x02DC
+#define ixDH_TEST 0x0000
+#define ixEXP0 0x0034
+#define ixEXP1 0x0038
+#define ixEXP2 0x003C
+#define ixEXP3 0x0040
+#define ixEXP4 0x0044
+#define ixEXP5 0x0048
+#define ixEXP6 0x004C
+#define ixEXP7 0x0050
+#define ixHFS_SEED0 0x0278
+#define ixHFS_SEED1 0x027C
+#define ixHFS_SEED2 0x0280
+#define ixHFS_SEED3 0x0284
+#define ixKEFUSE0 0x0268
+#define ixKEFUSE1 0x026C
+#define ixKEFUSE2 0x0270
+#define ixKEFUSE3 0x0274
+#define ixKHFS0 0x0004
+#define ixKHFS1 0x0008
+#define ixKHFS2 0x000C
+#define ixKHFS3 0x0010
+#define ixKSESSION0 0x0014
+#define ixKSESSION1 0x0018
+#define ixKSESSION2 0x001C
+#define ixKSESSION3 0x0020
+#define ixKSIG0 0x0024
+#define ixKSIG1 0x0028
+#define ixKSIG2 0x002C
+#define ixKSIG3 0x0030
+#define ixLX0 0x0054
+#define ixLX1 0x0058
+#define ixLX2 0x005C
+#define ixLX3 0x0060
+#define ixRINGOSC_MASK 0x0288
+#define ixSPU_PORT_STATUS 0x029C
+#define mmCC_DRM_ID_STRAPS 0x1559
+#define mmCC_SYS_RB_BACKEND_DISABLE 0x03A0
+#define mmCC_SYS_RB_REDUNDANCY 0x039F
+#define mmCGTT_DRM_CLK_CTRL0 0x1579
+#define mmCP_CONFIG 0x0F92
+#define mmDC_TEST_DEBUG_DATA 0x157D
+#define mmDC_TEST_DEBUG_INDEX 0x157C
+#define mmGC_USER_SYS_RB_BACKEND_DISABLE 0x03A1
+#define mmHDP_ADDR_CONFIG 0x0BD2
+#define mmHDP_DEBUG0 0x0BCC
+#define mmHDP_DEBUG1 0x0BCD
+#define mmHDP_HOST_PATH_CNTL 0x0B00
+#define mmHDP_LAST_SURFACE_HIT 0x0BCE
+#define mmHDP_MEMIO_ADDR 0x0BF7
+#define mmHDP_MEMIO_CNTL 0x0BF6
+#define mmHDP_MEMIO_RD_DATA 0x0BFA
+#define mmHDP_MEMIO_STATUS 0x0BF8
+#define mmHDP_MEMIO_WR_DATA 0x0BF9
+#define mmHDP_MEM_POWER_LS 0x0BD4
+#define mmHDP_MISC_CNTL 0x0BD3
+#define mmHDP_NONSURFACE_BASE 0x0B01
+#define mmHDP_NONSURFACE_INFO 0x0B02
+#define mmHDP_NONSURFACE_PREFETCH 0x0BD5
+#define mmHDP_NONSURFACE_SIZE 0x0B03
+#define mmHDP_NONSURF_FLAGS 0x0BC9
+#define mmHDP_NONSURF_FLAGS_CLR 0x0BCA
+#define mmHDP_OUTSTANDING_REQ 0x0BD1
+#define mmHDP_SC_MULTI_CHIP_CNTL 0x0BD0
+#define mmHDP_SW_SEMAPHORE 0x0BCB
+#define mmHDP_TILING_CONFIG 0x0BCF
+#define mmHDP_XDP_BARS_ADDR_39_36 0x0C44
+#define mmHDP_XDP_BUSY_STS 0x0C3E
+#define mmHDP_XDP_CGTT_BLK_CTRL 0x0C33
+#define mmHDP_XDP_CHKN 0x0C40
+#define mmHDP_XDP_D2H_BAR_UPDATE 0x0C02
+#define mmHDP_XDP_D2H_FLUSH 0x0C01
+#define mmHDP_XDP_D2H_RSVD_10 0x0C0A
+#define mmHDP_XDP_D2H_RSVD_11 0x0C0B
+#define mmHDP_XDP_D2H_RSVD_12 0x0C0C
+#define mmHDP_XDP_D2H_RSVD_13 0x0C0D
+#define mmHDP_XDP_D2H_RSVD_14 0x0C0E
+#define mmHDP_XDP_D2H_RSVD_15 0x0C0F
+#define mmHDP_XDP_D2H_RSVD_16 0x0C10
+#define mmHDP_XDP_D2H_RSVD_17 0x0C11
+#define mmHDP_XDP_D2H_RSVD_18 0x0C12
+#define mmHDP_XDP_D2H_RSVD_19 0x0C13
+#define mmHDP_XDP_D2H_RSVD_20 0x0C14
+#define mmHDP_XDP_D2H_RSVD_21 0x0C15
+#define mmHDP_XDP_D2H_RSVD_22 0x0C16
+#define mmHDP_XDP_D2H_RSVD_23 0x0C17
+#define mmHDP_XDP_D2H_RSVD_24 0x0C18
+#define mmHDP_XDP_D2H_RSVD_25 0x0C19
+#define mmHDP_XDP_D2H_RSVD_26 0x0C1A
+#define mmHDP_XDP_D2H_RSVD_27 0x0C1B
+#define mmHDP_XDP_D2H_RSVD_28 0x0C1C
+#define mmHDP_XDP_D2H_RSVD_29 0x0C1D
+#define mmHDP_XDP_D2H_RSVD_30 0x0C1E
+#define mmHDP_XDP_D2H_RSVD_3 0x0C03
+#define mmHDP_XDP_D2H_RSVD_31 0x0C1F
+#define mmHDP_XDP_D2H_RSVD_32 0x0C20
+#define mmHDP_XDP_D2H_RSVD_33 0x0C21
+#define mmHDP_XDP_D2H_RSVD_34 0x0C22
+#define mmHDP_XDP_D2H_RSVD_4 0x0C04
+#define mmHDP_XDP_D2H_RSVD_5 0x0C05
+#define mmHDP_XDP_D2H_RSVD_6 0x0C06
+#define mmHDP_XDP_D2H_RSVD_7 0x0C07
+#define mmHDP_XDP_D2H_RSVD_8 0x0C08
+#define mmHDP_XDP_D2H_RSVD_9 0x0C09
+#define mmHDP_XDP_DBG_ADDR 0x0C41
+#define mmHDP_XDP_DBG_DATA 0x0C42
+#define mmHDP_XDP_DBG_MASK 0x0C43
+#define mmHDP_XDP_DIRECT2HDP_FIRST 0x0C00
+#define mmHDP_XDP_DIRECT2HDP_LAST 0x0C23
+#define mmHDP_XDP_FLUSH_ARMED_STS 0x0C3C
+#define mmHDP_XDP_FLUSH_CNTR0_STS 0x0C3D
+#define mmHDP_XDP_HDP_IPH_CFG 0x0C31
+#define mmHDP_XDP_HDP_MBX_MC_CFG 0x0C2D
+#define mmHDP_XDP_HDP_MC_CFG 0x0C2E
+#define mmHDP_XDP_HST_CFG 0x0C2F
+#define mmHDP_XDP_P2P_BAR0 0x0C34
+#define mmHDP_XDP_P2P_BAR1 0x0C35
+#define mmHDP_XDP_P2P_BAR2 0x0C36
+#define mmHDP_XDP_P2P_BAR3 0x0C37
+#define mmHDP_XDP_P2P_BAR4 0x0C38
+#define mmHDP_XDP_P2P_BAR5 0x0C39
+#define mmHDP_XDP_P2P_BAR6 0x0C3A
+#define mmHDP_XDP_P2P_BAR7 0x0C3B
+#define mmHDP_XDP_P2P_BAR_CFG 0x0C24
+#define mmHDP_XDP_P2P_MBX_ADDR0 0x0C26
+#define mmHDP_XDP_P2P_MBX_ADDR1 0x0C27
+#define mmHDP_XDP_P2P_MBX_ADDR2 0x0C28
+#define mmHDP_XDP_P2P_MBX_ADDR3 0x0C29
+#define mmHDP_XDP_P2P_MBX_ADDR4 0x0C2A
+#define mmHDP_XDP_P2P_MBX_ADDR5 0x0C2B
+#define mmHDP_XDP_P2P_MBX_ADDR6 0x0C2C
+#define mmHDP_XDP_P2P_MBX_OFFSET 0x0C25
+#define mmHDP_XDP_SID_CFG 0x0C30
+#define mmHDP_XDP_SRBM_CFG 0x0C32
+#define mmHDP_XDP_STICKY 0x0C3F
+#define mmIH_ADVFAULT_CNTL 0x0F8C
+#define mmIH_CNTL 0x0F86
+#define mmIH_LEVEL_STATUS 0x0F87
+#define mmIH_PERFCOUNTER0_RESULT 0x0F8A
+#define mmIH_PERFCOUNTER1_RESULT 0x0F8B
+#define mmIH_PERFMON_CNTL 0x0F89
+#define mmIH_RB_BASE 0x0F81
+#define mmIH_RB_CNTL 0x0F80
+#define mmIH_RB_RPTR 0x0F82
+#define mmIH_RB_WPTR 0x0F83
+#define mmIH_RB_WPTR_ADDR_HI 0x0F84
+#define mmIH_RB_WPTR_ADDR_LO 0x0F85
+#define mmIH_STATUS 0x0F88
+#define mmSEM_MAILBOX 0x0F9B
+#define mmSEM_MAILBOX_CLIENTCONFIG 0x0F9A
+#define mmSEM_MAILBOX_CONTROL 0x0F9C
+#define mmSEM_MCIF_CONFIG 0x0F90
+#define mmSRBM_CAM_DATA 0x0397
+#define mmSRBM_CAM_INDEX 0x0396
+#define mmSRBM_CHIP_REVISION 0x039B
+#define mmSRBM_CNTL 0x0390
+#define mmSRBM_DEBUG 0x03A4
+#define mmSRBM_DEBUG_CNTL 0x0399
+#define mmSRBM_DEBUG_DATA 0x039A
+#define mmSRBM_DEBUG_SNAPSHOT 0x03A5
+#define mmSRBM_GFX_CNTL 0x0391
+#define mmSRBM_INT_ACK 0x03AA
+#define mmSRBM_INT_CNTL 0x03A8
+#define mmSRBM_INT_STATUS 0x03A9
+#define mmSRBM_MC_CLKEN_CNTL 0x03B3
+#define mmSRBM_PERFCOUNTER0_HI 0x0704
+#define mmSRBM_PERFCOUNTER0_LO 0x0703
+#define mmSRBM_PERFCOUNTER0_SELECT 0x0701
+#define mmSRBM_PERFCOUNTER1_HI 0x0706
+#define mmSRBM_PERFCOUNTER1_LO 0x0705
+#define mmSRBM_PERFCOUNTER1_SELECT 0x0702
+#define mmSRBM_PERFMON_CNTL 0x0700
+#define mmSRBM_READ_ERROR 0x03A6
+#define mmSRBM_SOFT_RESET 0x0398
+#define mmSRBM_STATUS 0x0394
+#define mmSRBM_STATUS2 0x0393
+#define mmSRBM_SYS_CLKEN_CNTL 0x03B4
+#define mmSRBM_UVD_CLKEN_CNTL 0x03B6
+#define mmSRBM_VCE_CLKEN_CNTL 0x03B5
+#define mmUVD_CONFIG 0x0F98
+#define mmVCE_CONFIG 0x0F94
+#define mmXDMA_MSTR_MEM_OVERFLOW_CNTL 0x03F8
+
+/* from the old sid.h */
+#define mmDMA_TILING_CONFIG 0x342E
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
new file mode 100644
index 000000000000..1c540fe136cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
@@ -0,0 +1,1079 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef OSS_1_0_SH_MASK_H
+#define OSS_1_0_SH_MASK_H
+
+#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000L
+#define CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT 0x0000001c
+#define CC_DRM_ID_STRAPS__DEVICE_ID_MASK 0x000ffff0L
+#define CC_DRM_ID_STRAPS__DEVICE_ID__SHIFT 0x00000004
+#define CC_DRM_ID_STRAPS__MAJOR_REV_ID_MASK 0x00f00000L
+#define CC_DRM_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x00000014
+#define CC_DRM_ID_STRAPS__MINOR_REV_ID_MASK 0x0f000000L
+#define CC_DRM_ID_STRAPS__MINOR_REV_ID__SHIFT 0x00000018
+#define CC_SYS_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00ff0000L
+#define CC_SYS_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x00000010
+#define CLIENT0_BM__RESERVED_MASK 0xffffffffL
+#define CLIENT0_BM__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CD0__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CD0__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CD1__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CD1__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CD2__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CD2__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CD3__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CD3__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CK0__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CK0__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CK1__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CK1__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CK2__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CK2__RESERVED__SHIFT 0x00000000
+#define CLIENT0_CK3__RESERVED_MASK 0xffffffffL
+#define CLIENT0_CK3__RESERVED__SHIFT 0x00000000
+#define CLIENT0_K0__RESERVED_MASK 0xffffffffL
+#define CLIENT0_K0__RESERVED__SHIFT 0x00000000
+#define CLIENT0_K1__RESERVED_MASK 0xffffffffL
+#define CLIENT0_K1__RESERVED__SHIFT 0x00000000
+#define CLIENT0_K2__RESERVED_MASK 0xffffffffL
+#define CLIENT0_K2__RESERVED__SHIFT 0x00000000
+#define CLIENT0_K3__RESERVED_MASK 0xffffffffL
+#define CLIENT0_K3__RESERVED__SHIFT 0x00000000
+#define CLIENT0_OFFSET_HI__RESERVED_MASK 0xffffffffL
+#define CLIENT0_OFFSET_HI__RESERVED__SHIFT 0x00000000
+#define CLIENT0_OFFSET__RESERVED_MASK 0xffffffffL
+#define CLIENT0_OFFSET__RESERVED__SHIFT 0x00000000
+#define CLIENT0_STATUS__RESERVED_MASK 0xffffffffL
+#define CLIENT0_STATUS__RESERVED__SHIFT 0x00000000
+#define CLIENT1_BM__RESERVED_MASK 0xffffffffL
+#define CLIENT1_BM__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CD0__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CD0__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CD1__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CD1__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CD2__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CD2__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CD3__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CD3__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CK0__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CK0__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CK1__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CK1__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CK2__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CK2__RESERVED__SHIFT 0x00000000
+#define CLIENT1_CK3__RESERVED_MASK 0xffffffffL
+#define CLIENT1_CK3__RESERVED__SHIFT 0x00000000
+#define CLIENT1_K0__RESERVED_MASK 0xffffffffL
+#define CLIENT1_K0__RESERVED__SHIFT 0x00000000
+#define CLIENT1_K1__RESERVED_MASK 0xffffffffL
+#define CLIENT1_K1__RESERVED__SHIFT 0x00000000
+#define CLIENT1_K2__RESERVED_MASK 0xffffffffL
+#define CLIENT1_K2__RESERVED__SHIFT 0x00000000
+#define CLIENT1_K3__RESERVED_MASK 0xffffffffL
+#define CLIENT1_K3__RESERVED__SHIFT 0x00000000
+#define CLIENT1_OFFSET_HI__RESERVED_MASK 0xffffffffL
+#define CLIENT1_OFFSET_HI__RESERVED__SHIFT 0x00000000
+#define CLIENT1_OFFSET__RESERVED_MASK 0xffffffffL
+#define CLIENT1_OFFSET__RESERVED__SHIFT 0x00000000
+#define CLIENT1_PORT_STATUS__RESERVED_MASK 0xffffffffL
+#define CLIENT1_PORT_STATUS__RESERVED__SHIFT 0x00000000
+#define CLIENT2_BM__RESERVED_MASK 0xffffffffL
+#define CLIENT2_BM__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CD0__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CD0__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CD1__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CD1__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CD2__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CD2__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CD3__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CD3__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CK0__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CK0__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CK1__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CK1__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CK2__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CK2__RESERVED__SHIFT 0x00000000
+#define CLIENT2_CK3__RESERVED_MASK 0xffffffffL
+#define CLIENT2_CK3__RESERVED__SHIFT 0x00000000
+#define CLIENT2_K0__RESERVED_MASK 0xffffffffL
+#define CLIENT2_K0__RESERVED__SHIFT 0x00000000
+#define CLIENT2_K1__RESERVED_MASK 0xffffffffL
+#define CLIENT2_K1__RESERVED__SHIFT 0x00000000
+#define CLIENT2_K2__RESERVED_MASK 0xffffffffL
+#define CLIENT2_K2__RESERVED__SHIFT 0x00000000
+#define CLIENT2_K3__RESERVED_MASK 0xffffffffL
+#define CLIENT2_K3__RESERVED__SHIFT 0x00000000
+#define CLIENT2_OFFSET_HI__RESERVED_MASK 0xffffffffL
+#define CLIENT2_OFFSET_HI__RESERVED__SHIFT 0x00000000
+#define CLIENT2_OFFSET__RESERVED_MASK 0xffffffffL
+#define CLIENT2_OFFSET__RESERVED__SHIFT 0x00000000
+#define CLIENT2_STATUS__RESERVED_MASK 0xffffffffL
+#define CLIENT2_STATUS__RESERVED__SHIFT 0x00000000
+#define CLIENT3_BM__RESERVED_MASK 0xffffffffL
+#define CLIENT3_BM__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CD0__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CD0__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CD1__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CD1__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CD2__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CD2__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CD3__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CD3__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CK0__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CK0__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CK1__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CK1__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CK2__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CK2__RESERVED__SHIFT 0x00000000
+#define CLIENT3_CK3__RESERVED_MASK 0xffffffffL
+#define CLIENT3_CK3__RESERVED__SHIFT 0x00000000
+#define CLIENT3_K0__RESERVED_MASK 0xffffffffL
+#define CLIENT3_K0__RESERVED__SHIFT 0x00000000
+#define CLIENT3_K1__RESERVED_MASK 0xffffffffL
+#define CLIENT3_K1__RESERVED__SHIFT 0x00000000
+#define CLIENT3_K2__RESERVED_MASK 0xffffffffL
+#define CLIENT3_K2__RESERVED__SHIFT 0x00000000
+#define CLIENT3_K3__RESERVED_MASK 0xffffffffL
+#define CLIENT3_K3__RESERVED__SHIFT 0x00000000
+#define CLIENT3_OFFSET_HI__RESERVED_MASK 0xffffffffL
+#define CLIENT3_OFFSET_HI__RESERVED__SHIFT 0x00000000
+#define CLIENT3_OFFSET__RESERVED_MASK 0xffffffffL
+#define CLIENT3_OFFSET__RESERVED__SHIFT 0x00000000
+#define CLIENT3_STATUS__RESERVED_MASK 0xffffffffL
+#define CLIENT3_STATUS__RESERVED__SHIFT 0x00000000
+#define CP_CONFIG__CP_RDREQ_URG_MASK 0x00000f00L
+#define CP_CONFIG__CP_RDREQ_URG__SHIFT 0x00000008
+#define CP_CONFIG__CP_REQ_TRAN_MASK 0x00010000L
+#define CP_CONFIG__CP_REQ_TRAN__SHIFT 0x00000010
+#define DC_TEST_DEBUG_DATA__DC_TEST_DEBUG_DATA_MASK 0xffffffffL
+#define DC_TEST_DEBUG_DATA__DC_TEST_DEBUG_DATA__SHIFT 0x00000000
+#define DC_TEST_DEBUG_INDEX__DC_TEST_DEBUG_INDEX_MASK 0x000000ffL
+#define DC_TEST_DEBUG_INDEX__DC_TEST_DEBUG_INDEX__SHIFT 0x00000000
+#define DC_TEST_DEBUG_INDEX__DC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+#define DC_TEST_DEBUG_INDEX__DC_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+#define DH_TEST__DH_TEST_MASK 0x00000001L
+#define DH_TEST__DH_TEST__SHIFT 0x00000000
+#define EXP0__RESERVED_MASK 0xffffffffL
+#define EXP0__RESERVED__SHIFT 0x00000000
+#define EXP1__RESERVED_MASK 0xffffffffL
+#define EXP1__RESERVED__SHIFT 0x00000000
+#define EXP2__RESERVED_MASK 0xffffffffL
+#define EXP2__RESERVED__SHIFT 0x00000000
+#define EXP3__RESERVED_MASK 0xffffffffL
+#define EXP3__RESERVED__SHIFT 0x00000000
+#define EXP4__RESERVED_MASK 0xffffffffL
+#define EXP4__RESERVED__SHIFT 0x00000000
+#define EXP5__RESERVED_MASK 0xffffffffL
+#define EXP5__RESERVED__SHIFT 0x00000000
+#define EXP6__RESERVED_MASK 0xffffffffL
+#define EXP6__RESERVED__SHIFT 0x00000000
+#define EXP7__RESERVED_MASK 0xffffffffL
+#define EXP7__RESERVED__SHIFT 0x00000000
+#define GC_USER_SYS_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00ff0000L
+#define GC_USER_SYS_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x00000010
+#define HDP_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define HDP_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define HDP_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define HDP_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define HDP_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define HDP_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define HDP_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define HDP_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define HDP_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HDP_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define HDP_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define HDP_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define HDP_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define HDP_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define HDP_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define HDP_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define HDP_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define HDP_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x00000000
+#define HDP_DEBUG1__HDP_DEBUG__SHIFT 0x00000000
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x0000001d
+#define HDP_HOST_PATH_CNTL__BIF_RDRET_CREDIT_MASK 0x00000007L
+#define HDP_HOST_PATH_CNTL__BIF_RDRET_CREDIT__SHIFT 0x00000000
+#define HDP_HOST_PATH_CNTL__CACHE_INVALIDATE_MASK 0x00400000L
+#define HDP_HOST_PATH_CNTL__CACHE_INVALIDATE__SHIFT 0x00000016
+#define HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK 0x00800000L
+#define HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS__SHIFT 0x00000017
+#define HDP_HOST_PATH_CNTL__LIN_RD_CACHE_DIS_MASK 0x80000000L
+#define HDP_HOST_PATH_CNTL__LIN_RD_CACHE_DIS__SHIFT 0x0000001f
+#define HDP_HOST_PATH_CNTL__MC_WRREQ_CREDIT_MASK 0x000001f8L
+#define HDP_HOST_PATH_CNTL__MC_WRREQ_CREDIT__SHIFT 0x00000003
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0x0000000b
+#define HDP_HOST_PATH_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0f000000L
+#define HDP_HOST_PATH_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x00000018
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x00000015
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x00000013
+#define HDP_HOST_PATH_CNTL__WRITE_THROUGH_CACHE_DIS_MASK 0x40000000L
+#define HDP_HOST_PATH_CNTL__WRITE_THROUGH_CACHE_DIS__SHIFT 0x0000001e
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x00000009
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x0000003fL
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x00000000
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xffffffffL
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x00000000
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003f00L
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x00000008
+#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003cL
+#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x00000002
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0x0000000f
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0x0000000e
+#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
+#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x00000001
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x00000007
+#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
+#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x00000000
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x00000006
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xffffffffL
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x00000000
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x00000003
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x00000001
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x00000002
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x00000000
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xffffffffL
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x00000000
+#define HDP_MEM_POWER_LS__LS_ENABLE_MASK 0x00000001L
+#define HDP_MEM_POWER_LS__LS_ENABLE__SHIFT 0x00000000
+#define HDP_MEM_POWER_LS__LS_HOLD_MASK 0x00001f80L
+#define HDP_MEM_POWER_LS__LS_HOLD__SHIFT 0x00000007
+#define HDP_MEM_POWER_LS__LS_SETUP_MASK 0x0000007eL
+#define HDP_MEM_POWER_LS__LS_SETUP__SHIFT 0x00000001
+#define HDP_MISC_CNTL__ADDRLIB_LINEAR_BYPASS_MASK 0x00100000L
+#define HDP_MISC_CNTL__ADDRLIB_LINEAR_BYPASS__SHIFT 0x00000014
+#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
+#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x00000015
+#define HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK 0x00000001L
+#define HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE__SHIFT 0x00000000
+#define HDP_MISC_CNTL__HDP_BIF_RDRET_CREDIT_MASK 0x00000780L
+#define HDP_MISC_CNTL__HDP_BIF_RDRET_CREDIT__SHIFT 0x00000007
+#define HDP_MISC_CNTL__MC_RDREQ_CREDIT_MASK 0x0007e000L
+#define HDP_MISC_CNTL__MC_RDREQ_CREDIT__SHIFT 0x0000000d
+#define HDP_MISC_CNTL__MULTIPLE_READS_MASK 0x00000040L
+#define HDP_MISC_CNTL__MULTIPLE_READS__SHIFT 0x00000006
+#define HDP_MISC_CNTL__NO_SPLIT_ARRAY_LINEAR_MASK 0x00001000L
+#define HDP_MISC_CNTL__NO_SPLIT_ARRAY_LINEAR__SHIFT 0x0000000c
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x00000005
+#define HDP_MISC_CNTL__READ_CACHE_INVALIDATE_MASK 0x00080000L
+#define HDP_MISC_CNTL__READ_CACHE_INVALIDATE__SHIFT 0x00000013
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0x0000000b
+#define HDP_MISC_CNTL__VM_ID_MASK 0x0000001eL
+#define HDP_MISC_CNTL__VM_ID__SHIFT 0x00000001
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_MASK 0xffffffffL
+#define HDP_NONSURFACE_BASE__NONSURF_BASE__SHIFT 0x00000000
+#define HDP_NONSURFACE_INFO__NONSURF_ADDR_TYPE_MASK 0x00000001L
+#define HDP_NONSURFACE_INFO__NONSURF_ADDR_TYPE__SHIFT 0x00000000
+#define HDP_NONSURFACE_INFO__NONSURF_ARRAY_MODE_MASK 0x0000001eL
+#define HDP_NONSURFACE_INFO__NONSURF_ARRAY_MODE__SHIFT 0x00000001
+#define HDP_NONSURFACE_INFO__NONSURF_BANK_HEIGHT_MASK 0x03000000L
+#define HDP_NONSURFACE_INFO__NONSURF_BANK_HEIGHT__SHIFT 0x00000018
+#define HDP_NONSURFACE_INFO__NONSURF_BANK_WIDTH_MASK 0x00c00000L
+#define HDP_NONSURFACE_INFO__NONSURF_BANK_WIDTH__SHIFT 0x00000016
+#define HDP_NONSURFACE_INFO__NONSURF_ENDIAN_MASK 0x00000060L
+#define HDP_NONSURFACE_INFO__NONSURF_ENDIAN__SHIFT 0x00000005
+#define HDP_NONSURFACE_INFO__NONSURF_MACRO_TILE_ASPECT_MASK 0x0c000000L
+#define HDP_NONSURFACE_INFO__NONSURF_MACRO_TILE_ASPECT__SHIFT 0x0000001a
+#define HDP_NONSURFACE_INFO__NONSURF_MICRO_TILE_MODE_MASK 0x30000000L
+#define HDP_NONSURFACE_INFO__NONSURF_MICRO_TILE_MODE__SHIFT 0x0000001c
+#define HDP_NONSURFACE_INFO__NONSURF_NUM_BANKS_MASK 0x00300000L
+#define HDP_NONSURFACE_INFO__NONSURF_NUM_BANKS__SHIFT 0x00000014
+#define HDP_NONSURFACE_INFO__NONSURF_PIXEL_SIZE_MASK 0x00000380L
+#define HDP_NONSURFACE_INFO__NONSURF_PIXEL_SIZE__SHIFT 0x00000007
+#define HDP_NONSURFACE_INFO__NONSURF_PRIV_MASK 0x00008000L
+#define HDP_NONSURFACE_INFO__NONSURF_PRIV__SHIFT 0x0000000f
+#define HDP_NONSURFACE_INFO__NONSURF_SAMPLE_NUM_MASK 0x00001c00L
+#define HDP_NONSURFACE_INFO__NONSURF_SAMPLE_NUM__SHIFT 0x0000000a
+#define HDP_NONSURFACE_INFO__NONSURF_SAMPLE_SIZE_MASK 0x00006000L
+#define HDP_NONSURFACE_INFO__NONSURF_SAMPLE_SIZE__SHIFT 0x0000000d
+#define HDP_NONSURFACE_INFO__NONSURF_SLICE_TILE_MAX_MSB_MASK 0x40000000L
+#define HDP_NONSURFACE_INFO__NONSURF_SLICE_TILE_MAX_MSB__SHIFT 0x0000001e
+#define HDP_NONSURFACE_INFO__NONSURF_TILE_COMPACT_MASK 0x00010000L
+#define HDP_NONSURFACE_INFO__NONSURF_TILE_COMPACT__SHIFT 0x00000010
+#define HDP_NONSURFACE_INFO__NONSURF_TILE_SPLIT_MASK 0x000e0000L
+#define HDP_NONSURFACE_INFO__NONSURF_TILE_SPLIT__SHIFT 0x00000011
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PIPE_CONFIG_MASK 0xf8000000L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PIPE_CONFIG__SHIFT 0x0000001b
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_DIR_MASK 0x00000038L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_DIR__SHIFT 0x00000003
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_MAX_Z_MASK 0x000ffe00L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_MAX_Z__SHIFT 0x00000009
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_NUM_MASK 0x000001c0L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_NUM__SHIFT 0x00000006
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_PRI_MASK 0x00000007L
+#define HDP_NONSURFACE_PREFETCH__NONSURF_PREFETCH_PRI__SHIFT 0x00000000
+#define HDP_NONSURFACE_SIZE__NONSURF_PITCH_TILE_MAX_MASK 0x000007ffL
+#define HDP_NONSURFACE_SIZE__NONSURF_PITCH_TILE_MAX__SHIFT 0x00000000
+#define HDP_NONSURFACE_SIZE__NONSURF_SLICE_TILE_MAX_MASK 0xfffff800L
+#define HDP_NONSURFACE_SIZE__NONSURF_SLICE_TILE_MAX__SHIFT 0x0000000b
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x00000001
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x00000000
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x00000001
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x00000000
+#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000ff00L
+#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x00000008
+#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000ffL
+#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x00000000
+#define HDP_SC_MULTI_CHIP_CNTL__LOG2_NUM_CHIPS_MASK 0x00000007L
+#define HDP_SC_MULTI_CHIP_CNTL__LOG2_NUM_CHIPS__SHIFT 0x00000000
+#define HDP_SC_MULTI_CHIP_CNTL__MULTI_CHIP_TILE_SIZE_MASK 0x00000018L
+#define HDP_SC_MULTI_CHIP_CNTL__MULTI_CHIP_TILE_SIZE__SHIFT 0x00000003
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xffffffffL
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x00000000
+#define HDP_TILING_CONFIG__BANK_SWAPS_MASK 0x00003800L
+#define HDP_TILING_CONFIG__BANK_SWAPS__SHIFT 0x0000000b
+#define HDP_TILING_CONFIG__BANK_TILING_MASK 0x00000030L
+#define HDP_TILING_CONFIG__BANK_TILING__SHIFT 0x00000004
+#define HDP_TILING_CONFIG__GROUP_SIZE_MASK 0x000000c0L
+#define HDP_TILING_CONFIG__GROUP_SIZE__SHIFT 0x00000006
+#define HDP_TILING_CONFIG__PIPE_TILING_MASK 0x0000000eL
+#define HDP_TILING_CONFIG__PIPE_TILING__SHIFT 0x00000001
+#define HDP_TILING_CONFIG__ROW_TILING_MASK 0x00000700L
+#define HDP_TILING_CONFIG__ROW_TILING__SHIFT 0x00000008
+#define HDP_TILING_CONFIG__SAMPLE_SPLIT_MASK 0x0000c000L
+#define HDP_TILING_CONFIG__SAMPLE_SPLIT__SHIFT 0x0000000e
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000fL
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x00000000
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000f0L
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x00000004
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000f00L
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x00000008
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000f000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0x0000000c
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000f0000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x00000010
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00f00000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x00000014
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0f000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x00000018
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xf0000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x0000001c
+#define HDP_XDP_BUSY_STS__BUSY_BITS_MASK 0x0003ffffL
+#define HDP_XDP_BUSY_STS__BUSY_BITS__SHIFT 0x00000000
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_0_ON_DELAY_MASK 0x0000000fL
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_0_ON_DELAY__SHIFT 0x00000000
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_1_OFF_DELAY_MASK 0x00000ff0L
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_1_OFF_DELAY__SHIFT 0x00000004
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_2_RSVD_MASK 0x3ffff000L
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_2_RSVD__SHIFT 0x0000000c
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_3_SOFT_CORE_OVERRIDE_MASK 0x40000000L
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_3_SOFT_CORE_OVERRIDE__SHIFT 0x0000001e
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_4_SOFT_REG_OVERRIDE_MASK 0x80000000L
+#define HDP_XDP_CGTT_BLK_CTRL__CGTT_BLK_CTRL_4_SOFT_REG_OVERRIDE__SHIFT 0x0000001f
+#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000ffL
+#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x00000000
+#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000ff00L
+#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x00000008
+#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00ff0000L
+#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x00000010
+#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xff000000L
+#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x00000018
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000ffffL
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x00000000
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x00000014
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000f0000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x00000010
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x00000012
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000fL
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x00000000
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x00000008
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000f0L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x00000004
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x00000013
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x00000014
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x00000010
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_SIDE_MASK 0x00020000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_SIDE__SHIFT 0x00000011
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000f800L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0x0000000b
+#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_DBG_ADDR__CTRL_MASK 0xffff0000L
+#define HDP_XDP_DBG_ADDR__CTRL__SHIFT 0x00000010
+#define HDP_XDP_DBG_ADDR__STS_MASK 0x0000ffffL
+#define HDP_XDP_DBG_ADDR__STS__SHIFT 0x00000000
+#define HDP_XDP_DBG_DATA__CTRL_MASK 0xffff0000L
+#define HDP_XDP_DBG_DATA__CTRL__SHIFT 0x00000010
+#define HDP_XDP_DBG_DATA__STS_MASK 0x0000ffffL
+#define HDP_XDP_DBG_DATA__STS__SHIFT 0x00000000
+#define HDP_XDP_DBG_MASK__CTRL_MASK 0xffff0000L
+#define HDP_XDP_DBG_MASK__CTRL__SHIFT 0x00000010
+#define HDP_XDP_DBG_MASK__STS_MASK 0x0000ffffL
+#define HDP_XDP_DBG_MASK__STS__SHIFT 0x00000000
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xffffffffL
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x00000000
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xffffffffL
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x00000000
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03ffffffL
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x00000000
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0x0000000c
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0x0000000d
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE_MASK 0x0000003fL
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE__SHIFT 0x00000000
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE_MASK 0x00000fc0L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE__SHIFT 0x00000006
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_PRIV_MASK 0x00000001L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_PRIV__SHIFT 0x00000000
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000006L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x00000001
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_TRAN_MASK 0x00000008L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_TRAN__SHIFT 0x00000003
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x000000f0L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x00000004
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_PRIV_MASK 0x00000001L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_PRIV__SHIFT 0x00000000
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_SWAP_MASK 0x00000006L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_SWAP__SHIFT 0x00000001
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_TRAN_MASK 0x00000008L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_TRAN__SHIFT 0x00000003
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_VMID_MASK 0x07800000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_WRREQ_VMID__SHIFT 0x00000017
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_MC_STALL_ON_BUF_FULL_MASK_MASK 0x00700000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_MC_STALL_ON_BUF_FULL_MASK__SHIFT 0x00000014
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_PRIV_MASK 0x00000010L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_PRIV__SHIFT 0x00000004
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_SWAP_MASK 0x00000060L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_SWAP__SHIFT 0x00000005
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_TRAN_MASK 0x00000080L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_TRAN__SHIFT 0x00000007
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_VMID_MASK 0x78000000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_SID_TAP_WRREQ_VMID__SHIFT 0x0000001b
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000fc000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0x0000000e
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XL8R_WRREQ_CRD_OVERRIDE_MASK 0x00003f00L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XL8R_WRREQ_CRD_OVERRIDE__SHIFT 0x00000008
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x00000000
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x00000001
+#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000ffffL
+#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000f0000L
+#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x00000010
+#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
+#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x00000014
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000fL
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x00000000
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x00000004
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x01e00000L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x00000015
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_MASK 0x001ffffeL
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR__SHIFT 0x00000001
+#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x00000000
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x00003fffL
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x00000000
+#define HDP_XDP_SID_CFG__SID_CFG_FLNUM_MSB_SEL_MASK 0x00000018L
+#define HDP_XDP_SID_CFG__SID_CFG_FLNUM_MSB_SEL__SHIFT 0x00000003
+#define HDP_XDP_SID_CFG__SID_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_SID_CFG__SID_CFG_WR_COMBINE_EN__SHIFT 0x00000000
+#define HDP_XDP_SID_CFG__SID_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_SID_CFG__SID_CFG_WR_COMBINE_TIMER__SHIFT 0x00000001
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_REG_CLK_ENABLE_COUNT_MASK 0x0000003fL
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_REG_CLK_ENABLE_COUNT__SHIFT 0x00000000
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_REG_CLK_GATING_DIS_MASK 0x00000040L
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_REG_CLK_GATING_DIS__SHIFT 0x00000006
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_WAKE_DYN_CLK_MASK 0x00000080L
+#define HDP_XDP_SRBM_CFG__SRBM_CFG_WAKE_DYN_CLK__SHIFT 0x00000007
+#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000ffffL
+#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x00000000
+#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xffff0000L
+#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x00000010
+#define HFS_SEED0__RESERVED_MASK 0xffffffffL
+#define HFS_SEED0__RESERVED__SHIFT 0x00000000
+#define HFS_SEED1__RESERVED_MASK 0xffffffffL
+#define HFS_SEED1__RESERVED__SHIFT 0x00000000
+#define HFS_SEED2__RESERVED_MASK 0xffffffffL
+#define HFS_SEED2__RESERVED__SHIFT 0x00000000
+#define HFS_SEED3__RESERVED_MASK 0xffffffffL
+#define HFS_SEED3__RESERVED__SHIFT 0x00000000
+#define IH_ADVFAULT_CNTL__NUM_FAULTS_DROPPED_MASK 0x0000ff00L
+#define IH_ADVFAULT_CNTL__NUM_FAULTS_DROPPED__SHIFT 0x00000008
+#define IH_ADVFAULT_CNTL__WAIT_TIMER_MASK 0x3fff0000L
+#define IH_ADVFAULT_CNTL__WAIT_TIMER__SHIFT 0x00000010
+#define IH_ADVFAULT_CNTL__WATERMARK_ENABLE_MASK 0x00000008L
+#define IH_ADVFAULT_CNTL__WATERMARK_ENABLE__SHIFT 0x00000003
+#define IH_ADVFAULT_CNTL__WATERMARK_MASK 0x00000007L
+#define IH_ADVFAULT_CNTL__WATERMARK_REACHED_MASK 0x00000010L
+#define IH_ADVFAULT_CNTL__WATERMARK_REACHED__SHIFT 0x00000004
+#define IH_ADVFAULT_CNTL__WATERMARK__SHIFT 0x00000000
+#define IH_CNTL__CLIENT_FIFO_HIGHWATER_MASK 0x00000300L
+#define IH_CNTL__CLIENT_FIFO_HIGHWATER__SHIFT 0x00000008
+#define IH_CNTL__ENABLE_INTR_MASK 0x00000001L
+#define IH_CNTL__ENABLE_INTR__SHIFT 0x00000000
+#define IH_CNTL__MC_FIFO_HIGHWATER_MASK 0x00007c00L
+#define IH_CNTL__MC_FIFO_HIGHWATER__SHIFT 0x0000000a
+#define IH_CNTL__MC_SWAP_MASK 0x00000006L
+#define IH_CNTL__MC_SWAP__SHIFT 0x00000001
+#define IH_CNTL__MC_TRAN_MASK 0x00000008L
+#define IH_CNTL__MC_TRAN__SHIFT 0x00000003
+#define IH_CNTL__MC_VMID_MASK 0x1e000000L
+#define IH_CNTL__MC_VMID__SHIFT 0x00000019
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01f00000L
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x00000014
+#define IH_CNTL__MC_WRREQ_CREDIT_MASK 0x000f8000L
+#define IH_CNTL__MC_WRREQ_CREDIT__SHIFT 0x0000000f
+#define IH_CNTL__RPTR_REARM_MASK 0x00000010L
+#define IH_CNTL__RPTR_REARM__SHIFT 0x00000004
+#define IH_LEVEL_STATUS__BIF_STATUS_MASK 0x00000010L
+#define IH_LEVEL_STATUS__BIF_STATUS__SHIFT 0x00000004
+#define IH_LEVEL_STATUS__DC_STATUS_MASK 0x00000001L
+#define IH_LEVEL_STATUS__DC_STATUS__SHIFT 0x00000000
+#define IH_LEVEL_STATUS__ROM_STATUS_MASK 0x00000004L
+#define IH_LEVEL_STATUS__ROM_STATUS__SHIFT 0x00000002
+#define IH_LEVEL_STATUS__SRBM_STATUS_MASK 0x00000008L
+#define IH_LEVEL_STATUS__SRBM_STATUS__SHIFT 0x00000003
+#define IH_LEVEL_STATUS__XDMA_STATUS_MASK 0x00000020L
+#define IH_LEVEL_STATUS__XDMA_STATUS__SHIFT 0x00000005
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xffffffffL
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x00000000
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xffffffffL
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x00000000
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x00000001
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00000200L
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x00000009
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x00000000
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00000100L
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x00000008
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x000000fcL
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x00000002
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x0000fc00L
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x0000000a
+#define IH_RB_BASE__ADDR_MASK 0xffffffffL
+#define IH_RB_BASE__ADDR__SHIFT 0x00000000
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x00000000
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000040L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x00000006
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE__SHIFT 0x00000007
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003eL
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x00000001
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x0000001f
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x00000010
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x00000008
+#define IH_RB_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x00003e00L
+#define IH_RB_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x00000009
+#define IH_RB_RPTR__OFFSET_MASK 0x0003fffcL
+#define IH_RB_RPTR__OFFSET__SHIFT 0x00000002
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x000000ffL
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x00000000
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xfffffffcL
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x00000002
+#define IH_RB_WPTR__OFFSET_MASK 0x0003fffcL
+#define IH_RB_WPTR__OFFSET__SHIFT 0x00000002
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x00000000
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0x0000000a
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__IDLE__SHIFT 0x00000000
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x00000001
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x00000008
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x00000009
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x00000006
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x00000007
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x00000004
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL__SHIFT 0x00000003
+#define IH_STATUS__RB_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_IDLE__SHIFT 0x00000002
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x00000005
+#define KEFUSE0__RESERVED_MASK 0xffffffffL
+#define KEFUSE0__RESERVED__SHIFT 0x00000000
+#define KEFUSE1__RESERVED_MASK 0xffffffffL
+#define KEFUSE1__RESERVED__SHIFT 0x00000000
+#define KEFUSE2__RESERVED_MASK 0xffffffffL
+#define KEFUSE2__RESERVED__SHIFT 0x00000000
+#define KEFUSE3__RESERVED_MASK 0xffffffffL
+#define KEFUSE3__RESERVED__SHIFT 0x00000000
+#define KHFS0__RESERVED_MASK 0xffffffffL
+#define KHFS0__RESERVED__SHIFT 0x00000000
+#define KHFS1__RESERVED_MASK 0xffffffffL
+#define KHFS1__RESERVED__SHIFT 0x00000000
+#define KHFS2__RESERVED_MASK 0xffffffffL
+#define KHFS2__RESERVED__SHIFT 0x00000000
+#define KHFS3__RESERVED_MASK 0xffffffffL
+#define KHFS3__RESERVED__SHIFT 0x00000000
+#define KSESSION0__RESERVED_MASK 0xffffffffL
+#define KSESSION0__RESERVED__SHIFT 0x00000000
+#define KSESSION1__RESERVED_MASK 0xffffffffL
+#define KSESSION1__RESERVED__SHIFT 0x00000000
+#define KSESSION2__RESERVED_MASK 0xffffffffL
+#define KSESSION2__RESERVED__SHIFT 0x00000000
+#define KSESSION3__RESERVED_MASK 0xffffffffL
+#define KSESSION3__RESERVED__SHIFT 0x00000000
+#define KSIG0__RESERVED_MASK 0xffffffffL
+#define KSIG0__RESERVED__SHIFT 0x00000000
+#define KSIG1__RESERVED_MASK 0xffffffffL
+#define KSIG1__RESERVED__SHIFT 0x00000000
+#define KSIG2__RESERVED_MASK 0xffffffffL
+#define KSIG2__RESERVED__SHIFT 0x00000000
+#define KSIG3__RESERVED_MASK 0xffffffffL
+#define KSIG3__RESERVED__SHIFT 0x00000000
+#define LX0__RESERVED_MASK 0xffffffffL
+#define LX0__RESERVED__SHIFT 0x00000000
+#define LX1__RESERVED_MASK 0xffffffffL
+#define LX1__RESERVED__SHIFT 0x00000000
+#define LX2__RESERVED_MASK 0xffffffffL
+#define LX2__RESERVED__SHIFT 0x00000000
+#define LX3__RESERVED_MASK 0xffffffffL
+#define LX3__RESERVED__SHIFT 0x00000000
+#define RINGOSC_MASK__MASK_MASK 0x0000ffffL
+#define RINGOSC_MASK__MASK__SHIFT 0x00000000
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x00000000
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1__SHIFT 0x00000003
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2_MASK 0x000001c0L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2__SHIFT 0x00000006
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3_MASK 0x00000e00L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3__SHIFT 0x00000009
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0_MASK 0x00038000L
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0__SHIFT 0x0000000f
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0_MASK 0x00e00000L
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0__SHIFT 0x00000015
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE_MASK 0x0000ff00L
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE__SHIFT 0x00000008
+#define SEM_MAILBOX_CONTROL__SIDEPORT_ENABLE_MASK 0x000000ffL
+#define SEM_MAILBOX_CONTROL__SIDEPORT_ENABLE__SHIFT 0x00000000
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000ff00L
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x00000008
+#define SEM_MAILBOX__SIDEPORT_MASK 0x000000ffL
+#define SEM_MAILBOX__SIDEPORT__SHIFT 0x00000000
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP_MASK 0x00000003L
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP__SHIFT 0x00000000
+#define SPU_PORT_STATUS__RESERVED_MASK 0xffffffffL
+#define SPU_PORT_STATUS__RESERVED__SHIFT 0x00000000
+#define SRBM_CAM_DATA__CAM_ADDR_MASK 0x0000ffffL
+#define SRBM_CAM_DATA__CAM_ADDR__SHIFT 0x00000000
+#define SRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xffff0000L
+#define SRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x00000010
+#define SRBM_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
+#define SRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x00000000
+#define SRBM_CHIP_REVISION__CHIP_REVISION_MASK 0x000000ffL
+#define SRBM_CHIP_REVISION__CHIP_REVISION__SHIFT 0x00000000
+#define SRBM_CNTL__COMBINE_SYSTEM_MC_MASK 0x00020000L
+#define SRBM_CNTL__COMBINE_SYSTEM_MC__SHIFT 0x00000011
+#define SRBM_CNTL__PWR_REQUEST_HALT_MASK 0x00010000L
+#define SRBM_CNTL__PWR_REQUEST_HALT__SHIFT 0x00000010
+#define SRBM_CNTL__READ_TIMEOUT_MASK 0x000003ffL
+#define SRBM_CNTL__READ_TIMEOUT__SHIFT 0x00000000
+#define SRBM_DEBUG_CNTL__SRBM_DEBUG_INDEX_MASK 0x0000003fL
+#define SRBM_DEBUG_CNTL__SRBM_DEBUG_INDEX__SHIFT 0x00000000
+#define SRBM_DEBUG_DATA__DATA_MASK 0xffffffffL
+#define SRBM_DEBUG_DATA__DATA__SHIFT 0x00000000
+#define SRBM_DEBUG__DISABLE_READ_TIMEOUT_MASK 0x00000002L
+#define SRBM_DEBUG__DISABLE_READ_TIMEOUT__SHIFT 0x00000001
+#define SRBM_DEBUG__IGNORE_RDY_MASK 0x00000001L
+#define SRBM_DEBUG__IGNORE_RDY__SHIFT 0x00000000
+#define SRBM_DEBUG__MC_CLOCK_DOMAIN_OVERRIDE_MASK 0x00000100L
+#define SRBM_DEBUG__MC_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x00000008
+#define SRBM_DEBUG_SNAPSHOT__BIF_RDY_MASK 0x00000080L
+#define SRBM_DEBUG_SNAPSHOT__BIF_RDY__SHIFT 0x00000007
+#define SRBM_DEBUG_SNAPSHOT__DC_RDY_MASK 0x00000040L
+#define SRBM_DEBUG_SNAPSHOT__DC_RDY__SHIFT 0x00000006
+#define SRBM_DEBUG__SNAPSHOT_FREE_CNTRS_MASK 0x00000004L
+#define SRBM_DEBUG__SNAPSHOT_FREE_CNTRS__SHIFT 0x00000002
+#define SRBM_DEBUG_SNAPSHOT__GRBM_RDY_MASK 0x00000020L
+#define SRBM_DEBUG_SNAPSHOT__GRBM_RDY__SHIFT 0x00000005
+#define SRBM_DEBUG_SNAPSHOT__MCB_RDY_MASK 0x00000001L
+#define SRBM_DEBUG_SNAPSHOT__MCB_RDY__SHIFT 0x00000000
+#define SRBM_DEBUG_SNAPSHOT__MCC0_RDY_MASK 0x10000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC0_RDY__SHIFT 0x0000001c
+#define SRBM_DEBUG_SNAPSHOT__MCC1_RDY_MASK 0x08000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC1_RDY__SHIFT 0x0000001b
+#define SRBM_DEBUG_SNAPSHOT__MCC2_RDY_MASK 0x04000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC2_RDY__SHIFT 0x0000001a
+#define SRBM_DEBUG_SNAPSHOT__MCC3_RDY_MASK 0x02000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC3_RDY__SHIFT 0x00000019
+#define SRBM_DEBUG_SNAPSHOT__MCC4_RDY_MASK 0x01000000L
+#define SRBM_DEBUG_SNAPSHOT__MCC4_RDY__SHIFT 0x00000018
+#define SRBM_DEBUG_SNAPSHOT__MCC5_RDY_MASK 0x00800000L
+#define SRBM_DEBUG_SNAPSHOT__MCC5_RDY__SHIFT 0x00000017
+#define SRBM_DEBUG_SNAPSHOT__MCC6_RDY_MASK 0x00400000L
+#define SRBM_DEBUG_SNAPSHOT__MCC6_RDY__SHIFT 0x00000016
+#define SRBM_DEBUG_SNAPSHOT__MCC7_RDY_MASK 0x00200000L
+#define SRBM_DEBUG_SNAPSHOT__MCC7_RDY__SHIFT 0x00000015
+#define SRBM_DEBUG_SNAPSHOT__MCD0_RDY_MASK 0x00100000L
+#define SRBM_DEBUG_SNAPSHOT__MCD0_RDY__SHIFT 0x00000014
+#define SRBM_DEBUG_SNAPSHOT__MCD1_RDY_MASK 0x00080000L
+#define SRBM_DEBUG_SNAPSHOT__MCD1_RDY__SHIFT 0x00000013
+#define SRBM_DEBUG_SNAPSHOT__MCD2_RDY_MASK 0x00040000L
+#define SRBM_DEBUG_SNAPSHOT__MCD2_RDY__SHIFT 0x00000012
+#define SRBM_DEBUG_SNAPSHOT__MCD3_RDY_MASK 0x00020000L
+#define SRBM_DEBUG_SNAPSHOT__MCD3_RDY__SHIFT 0x00000011
+#define SRBM_DEBUG_SNAPSHOT__MCD4_RDY_MASK 0x00010000L
+#define SRBM_DEBUG_SNAPSHOT__MCD4_RDY__SHIFT 0x00000010
+#define SRBM_DEBUG_SNAPSHOT__MCD5_RDY_MASK 0x00008000L
+#define SRBM_DEBUG_SNAPSHOT__MCD5_RDY__SHIFT 0x0000000f
+#define SRBM_DEBUG_SNAPSHOT__MCD6_RDY_MASK 0x00004000L
+#define SRBM_DEBUG_SNAPSHOT__MCD6_RDY__SHIFT 0x0000000e
+#define SRBM_DEBUG_SNAPSHOT__MCD7_RDY_MASK 0x00002000L
+#define SRBM_DEBUG_SNAPSHOT__MCD7_RDY__SHIFT 0x0000000d
+#define SRBM_DEBUG_SNAPSHOT__ORB_RDY_MASK 0x00001000L
+#define SRBM_DEBUG_SNAPSHOT__ORB_RDY__SHIFT 0x0000000c
+#define SRBM_DEBUG_SNAPSHOT__REGBB_RDY_MASK 0x00000800L
+#define SRBM_DEBUG_SNAPSHOT__REGBB_RDY__SHIFT 0x0000000b
+#define SRBM_DEBUG_SNAPSHOT__UVD_RDY_MASK 0x00000200L
+#define SRBM_DEBUG_SNAPSHOT__UVD_RDY__SHIFT 0x00000009
+#define SRBM_DEBUG_SNAPSHOT__VCE_RDY_MASK 0x20000000L
+#define SRBM_DEBUG_SNAPSHOT__VCE_RDY__SHIFT 0x0000001d
+#define SRBM_DEBUG_SNAPSHOT__XDMA_RDY_MASK 0x00000100L
+#define SRBM_DEBUG_SNAPSHOT__XDMA_RDY__SHIFT 0x00000008
+#define SRBM_DEBUG_SNAPSHOT__XSP_RDY_MASK 0x00000400L
+#define SRBM_DEBUG_SNAPSHOT__XSP_RDY__SHIFT 0x0000000a
+#define SRBM_DEBUG__SYS_CLOCK_DOMAIN_OVERRIDE_MASK 0x00000010L
+#define SRBM_DEBUG__SYS_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x00000004
+#define SRBM_DEBUG__UVD_CLOCK_DOMAIN_OVERRIDE_MASK 0x00000040L
+#define SRBM_DEBUG__UVD_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x00000006
+#define SRBM_DEBUG__VCE_CLOCK_DOMAIN_OVERRIDE_MASK 0x00000020L
+#define SRBM_DEBUG__VCE_CLOCK_DOMAIN_OVERRIDE__SHIFT 0x00000005
+#define SRBM_GFX_CNTL__VMID_MASK 0x000000f0L
+#define SRBM_GFX_CNTL__VMID__SHIFT 0x00000004
+#define SRBM_INT_ACK__RDERR_INT_ACK_MASK 0x00000001L
+#define SRBM_INT_ACK__RDERR_INT_ACK__SHIFT 0x00000000
+#define SRBM_INT_CNTL__RDERR_INT_MASK_MASK 0x00000001L
+#define SRBM_INT_CNTL__RDERR_INT_MASK__SHIFT 0x00000000
+#define SRBM_INT_STATUS__RDERR_INT_STAT_MASK 0x00000001L
+#define SRBM_INT_STATUS__RDERR_INT_STAT__SHIFT 0x00000000
+#define SRBM_MC_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define SRBM_MC_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define SRBM_MC_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define SRBM_MC_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER0_HI__PERF_COUNT0_HI_MASK 0xffffffffL
+#define SRBM_PERFCOUNTER0_HI__PERF_COUNT0_HI__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER0_LO__PERF_COUNT0_LO_MASK 0xffffffffL
+#define SRBM_PERFCOUNTER0_LO__PERF_COUNT0_LO__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003fL
+#define SRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER1_HI__PERF_COUNT1_HI_MASK 0xffffffffL
+#define SRBM_PERFCOUNTER1_HI__PERF_COUNT1_HI__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER1_LO__PERF_COUNT1_LO_MASK 0xffffffffL
+#define SRBM_PERFCOUNTER1_LO__PERF_COUNT1_LO__SHIFT 0x00000000
+#define SRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003fL
+#define SRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x00000000
+#define SRBM_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
+#define SRBM_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x00000008
+#define SRBM_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+#define SRBM_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0x0000000a
+#define SRBM_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000fL
+#define SRBM_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x00000000
+#define SRBM_READ_ERROR__READ_ADDRESS_MASK 0x0003fffcL
+#define SRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x00000002
+#define SRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
+#define SRBM_READ_ERROR__READ_ERROR__SHIFT 0x0000001f
+#define SRBM_READ_ERROR__READ_REQUESTER_GRBM_MASK 0x02000000L
+#define SRBM_READ_ERROR__READ_REQUESTER_GRBM__SHIFT 0x00000019
+#define SRBM_READ_ERROR__READ_REQUESTER_HI_MASK 0x01000000L
+#define SRBM_READ_ERROR__READ_REQUESTER_HI__SHIFT 0x00000018
+#define SRBM_READ_ERROR__READ_REQUESTER_SMU_MASK 0x04000000L
+#define SRBM_READ_ERROR__READ_REQUESTER_SMU__SHIFT 0x0000001a
+#define SRBM_READ_ERROR__READ_REQUESTER_TST_MASK 0x00400000L
+#define SRBM_READ_ERROR__READ_REQUESTER_TST__SHIFT 0x00000016
+#define SRBM_READ_ERROR__READ_REQUESTER_UVD_MASK 0x20000000L
+#define SRBM_READ_ERROR__READ_REQUESTER_UVD__SHIFT 0x0000001d
+#define SRBM_READ_ERROR__READ_REQUESTER_VCE_MASK 0x00100000L
+#define SRBM_READ_ERROR__READ_REQUESTER_VCE__SHIFT 0x00000014
+#define SRBM_SOFT_RESET__SOFT_RESET_BIF_MASK 0x00000002L
+#define SRBM_SOFT_RESET__SOFT_RESET_BIF__SHIFT 0x00000001
+#define SRBM_SOFT_RESET__SOFT_RESET_DC_MASK 0x00000020L
+#define SRBM_SOFT_RESET__SOFT_RESET_DC__SHIFT 0x00000005
+#define SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK 0x00000100L
+#define SRBM_SOFT_RESET__SOFT_RESET_GRBM__SHIFT 0x00000008
+#define SRBM_SOFT_RESET__SOFT_RESET_HDP_MASK 0x00000200L
+#define SRBM_SOFT_RESET__SOFT_RESET_HDP__SHIFT 0x00000009
+#define SRBM_SOFT_RESET__SOFT_RESET_IH_MASK 0x00000400L
+#define SRBM_SOFT_RESET__SOFT_RESET_IH__SHIFT 0x0000000a
+#define SRBM_SOFT_RESET__SOFT_RESET_MC_MASK 0x00000800L
+#define SRBM_SOFT_RESET__SOFT_RESET_MC__SHIFT 0x0000000b
+#define SRBM_SOFT_RESET__SOFT_RESET_ORB_MASK 0x00800000L
+#define SRBM_SOFT_RESET__SOFT_RESET_ORB__SHIFT 0x00000017
+#define SRBM_SOFT_RESET__SOFT_RESET_REGBB_MASK 0x00400000L
+#define SRBM_SOFT_RESET__SOFT_RESET_REGBB__SHIFT 0x00000016
+#define SRBM_SOFT_RESET__SOFT_RESET_ROM_MASK 0x00004000L
+#define SRBM_SOFT_RESET__SOFT_RESET_ROM__SHIFT 0x0000000e
+#define SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK 0x00008000L
+#define SRBM_SOFT_RESET__SOFT_RESET_SEM__SHIFT 0x0000000f
+#define SRBM_SOFT_RESET__SOFT_RESET_TST_MASK 0x00200000L
+#define SRBM_SOFT_RESET__SOFT_RESET_TST__SHIFT 0x00000015
+#define SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK 0x00040000L
+#define SRBM_SOFT_RESET__SOFT_RESET_UVD__SHIFT 0x00000012
+#define SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK 0x01000000L
+#define SRBM_SOFT_RESET__SOFT_RESET_VCE__SHIFT 0x00000018
+#define SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK 0x00020000L
+#define SRBM_SOFT_RESET__SOFT_RESET_VMC__SHIFT 0x00000011
+#define SRBM_SOFT_RESET__SOFT_RESET_XDMA_MASK 0x02000000L
+#define SRBM_SOFT_RESET__SOFT_RESET_XDMA__SHIFT 0x00000019
+#define SRBM_SOFT_RESET__SOFT_RESET_XSP_MASK 0x00080000L
+#define SRBM_SOFT_RESET__SOFT_RESET_XSP__SHIFT 0x00000013
+#define SRBM_STATUS2__TST_RQ_PENDING_MASK 0x00000002L
+#define SRBM_STATUS2__TST_RQ_PENDING__SHIFT 0x00000001
+#define SRBM_STATUS2__VCE_BUSY_MASK 0x00000080L
+#define SRBM_STATUS2__VCE_BUSY__SHIFT 0x00000007
+#define SRBM_STATUS2__VCE_RQ_PENDING_MASK 0x00000008L
+#define SRBM_STATUS2__VCE_RQ_PENDING__SHIFT 0x00000003
+#define SRBM_STATUS2__XDMA_BUSY_MASK 0x00000100L
+#define SRBM_STATUS2__XDMA_BUSY__SHIFT 0x00000008
+#define SRBM_STATUS2__XSP_BUSY_MASK 0x00000010L
+#define SRBM_STATUS2__XSP_BUSY__SHIFT 0x00000004
+#define SRBM_STATUS__BIF_BUSY_MASK 0x20000000L
+#define SRBM_STATUS__BIF_BUSY__SHIFT 0x0000001d
+#define SRBM_STATUS__GRBM_RQ_PENDING_MASK 0x00000020L
+#define SRBM_STATUS__GRBM_RQ_PENDING__SHIFT 0x00000005
+#define SRBM_STATUS__HI_RQ_PENDING_MASK 0x00000040L
+#define SRBM_STATUS__HI_RQ_PENDING__SHIFT 0x00000006
+#define SRBM_STATUS__IH_BUSY_MASK 0x00020000L
+#define SRBM_STATUS__IH_BUSY__SHIFT 0x00000011
+#define SRBM_STATUS__IO_EXTERN_SIGNAL_MASK 0x00000080L
+#define SRBM_STATUS__IO_EXTERN_SIGNAL__SHIFT 0x00000007
+#define SRBM_STATUS__MCB_BUSY_MASK 0x00000200L
+#define SRBM_STATUS__MCB_BUSY__SHIFT 0x00000009
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK 0x00000400L
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY__SHIFT 0x0000000a
+#define SRBM_STATUS__MCC_BUSY_MASK 0x00000800L
+#define SRBM_STATUS__MCC_BUSY__SHIFT 0x0000000b
+#define SRBM_STATUS__MCD_BUSY_MASK 0x00001000L
+#define SRBM_STATUS__MCD_BUSY__SHIFT 0x0000000c
+#define SRBM_STATUS__SEM_BUSY_MASK 0x00004000L
+#define SRBM_STATUS__SEM_BUSY__SHIFT 0x0000000e
+#define SRBM_STATUS__SMU_RQ_PENDING_MASK 0x00000010L
+#define SRBM_STATUS__SMU_RQ_PENDING__SHIFT 0x00000004
+#define SRBM_STATUS__UVD_BUSY_MASK 0x00080000L
+#define SRBM_STATUS__UVD_BUSY__SHIFT 0x00000013
+#define SRBM_STATUS__UVD_RQ_PENDING_MASK 0x00000002L
+#define SRBM_STATUS__UVD_RQ_PENDING__SHIFT 0x00000001
+#define SRBM_STATUS__VMC_BUSY_MASK 0x00000100L
+#define SRBM_STATUS__VMC_BUSY__SHIFT 0x00000008
+#define SRBM_SYS_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define SRBM_SYS_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define SRBM_SYS_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define SRBM_SYS_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define SRBM_UVD_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define SRBM_UVD_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define SRBM_UVD_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define SRBM_UVD_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define SRBM_VCE_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001f00L
+#define SRBM_VCE_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x00000008
+#define SRBM_VCE_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000fL
+#define SRBM_VCE_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x00000000
+#define UVD_CONFIG__UVD_RDREQ_URG_MASK 0x00000f00L
+#define UVD_CONFIG__UVD_RDREQ_URG__SHIFT 0x00000008
+#define UVD_CONFIG__UVD_REQ_TRAN_MASK 0x00010000L
+#define UVD_CONFIG__UVD_REQ_TRAN__SHIFT 0x00000010
+#define VCE_CONFIG__VCE_RDREQ_URG_MASK 0x00000f00L
+#define VCE_CONFIG__VCE_RDREQ_URG__SHIFT 0x00000008
+#define VCE_CONFIG__VCE_REQ_TRAN_MASK 0x00010000L
+#define VCE_CONFIG__VCE_REQ_TRAN__SHIFT 0x00000010
+#define XDMA_MSTR_CNTL__XDMA_MSTR_LAT_TEST_EN_MASK 0x00080000L
+#define XDMA_MSTR_CNTL__XDMA_MSTR_LAT_TEST_EN__SHIFT 0x00000013
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_COUNT_ENABLE_MASK 0x80000000L
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_COUNT_ENABLE__SHIFT 0x0000001f
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_COUNT_MASK 0x0000ffffL
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_COUNT__SHIFT 0x00000000
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_THRESHOLD_MASK 0x3fff0000L
+#define XDMA_MSTR_MEM_OVERFLOW_CNTL__XDMA_MSTR_OVERFLOW_THRESHOLD__SHIFT 0x00000010
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
new file mode 100644
index 000000000000..6b10be61efc3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
@@ -0,0 +1,148 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_6_0_D_H
+#define SMU_6_0_D_H
+
+#define ixLCAC_MC0_CNTL 0x011C
+#define ixLCAC_MC0_OVR_SEL 0x011D
+#define ixLCAC_MC0_OVR_VAL 0x011E
+#define ixLCAC_MC1_CNTL 0x011F
+#define ixLCAC_MC1_OVR_SEL 0x0120
+#define ixLCAC_MC1_OVR_VAL 0x0121
+#define ixLCAC_MC2_CNTL 0x0122
+#define ixLCAC_MC2_OVR_SEL 0x0123
+#define ixLCAC_MC2_OVR_VAL 0x0124
+#define ixLCAC_MC3_CNTL 0x0125
+#define ixLCAC_MC3_OVR_SEL 0x0126
+#define ixLCAC_MC3_OVR_VAL 0x0127
+#define ixLCAC_MC4_CNTL 0x0128
+#define ixLCAC_MC4_OVR_SEL 0x0129
+#define ixLCAC_MC4_OVR_VAL 0x012A
+#define ixLCAC_MC5_CNTL 0x012B
+#define ixLCAC_MC5_OVR_SEL 0x012C
+#define ixLCAC_MC5_OVR_VAL 0x012D
+#define ixSMC_PC_C 0x80000370
+#define ixTHM_TMON0_DEBUG 0x03F0
+#define ixTHM_TMON0_INT_DATA 0x0380
+#define ixTHM_TMON0_RDIL0_DATA 0x0300
+#define ixTHM_TMON0_RDIL10_DATA 0x030A
+#define ixTHM_TMON0_RDIL11_DATA 0x030B
+#define ixTHM_TMON0_RDIL12_DATA 0x030C
+#define ixTHM_TMON0_RDIL13_DATA 0x030D
+#define ixTHM_TMON0_RDIL14_DATA 0x030E
+#define ixTHM_TMON0_RDIL15_DATA 0x030F
+#define ixTHM_TMON0_RDIL1_DATA 0x0301
+#define ixTHM_TMON0_RDIL2_DATA 0x0302
+#define ixTHM_TMON0_RDIL3_DATA 0x0303
+#define ixTHM_TMON0_RDIL4_DATA 0x0304
+#define ixTHM_TMON0_RDIL5_DATA 0x0305
+#define ixTHM_TMON0_RDIL6_DATA 0x0306
+#define ixTHM_TMON0_RDIL7_DATA 0x0307
+#define ixTHM_TMON0_RDIL8_DATA 0x0308
+#define ixTHM_TMON0_RDIL9_DATA 0x0309
+#define ixTHM_TMON0_RDIR0_DATA 0x0310
+#define ixTHM_TMON0_RDIR10_DATA 0x031A
+#define ixTHM_TMON0_RDIR11_DATA 0x031B
+#define ixTHM_TMON0_RDIR12_DATA 0x031C
+#define ixTHM_TMON0_RDIR13_DATA 0x031D
+#define ixTHM_TMON0_RDIR14_DATA 0x031E
+#define ixTHM_TMON0_RDIR15_DATA 0x031F
+#define ixTHM_TMON0_RDIR1_DATA 0x0311
+#define ixTHM_TMON0_RDIR2_DATA 0x0312
+#define ixTHM_TMON0_RDIR3_DATA 0x0313
+#define ixTHM_TMON0_RDIR4_DATA 0x0314
+#define ixTHM_TMON0_RDIR5_DATA 0x0315
+#define ixTHM_TMON0_RDIR6_DATA 0x0316
+#define ixTHM_TMON0_RDIR7_DATA 0x0317
+#define ixTHM_TMON0_RDIR8_DATA 0x0318
+#define ixTHM_TMON0_RDIR9_DATA 0x0319
+#define ixTHM_TMON1_DEBUG 0x03F1
+#define ixTHM_TMON1_INT_DATA 0x0381
+#define ixTHM_TMON1_RDIL0_DATA 0x0320
+#define ixTHM_TMON1_RDIL10_DATA 0x032A
+#define ixTHM_TMON1_RDIL11_DATA 0x032B
+#define ixTHM_TMON1_RDIL12_DATA 0x032C
+#define ixTHM_TMON1_RDIL13_DATA 0x032D
+#define ixTHM_TMON1_RDIL14_DATA 0x032E
+#define ixTHM_TMON1_RDIL15_DATA 0x032F
+#define ixTHM_TMON1_RDIL1_DATA 0x0321
+#define ixTHM_TMON1_RDIL2_DATA 0x0322
+#define ixTHM_TMON1_RDIL3_DATA 0x0323
+#define ixTHM_TMON1_RDIL4_DATA 0x0324
+#define ixTHM_TMON1_RDIL5_DATA 0x0325
+#define ixTHM_TMON1_RDIL6_DATA 0x0326
+#define ixTHM_TMON1_RDIL7_DATA 0x0327
+#define ixTHM_TMON1_RDIL8_DATA 0x0328
+#define ixTHM_TMON1_RDIL9_DATA 0x0329
+#define ixTHM_TMON1_RDIR0_DATA 0x0330
+#define ixTHM_TMON1_RDIR10_DATA 0x033A
+#define ixTHM_TMON1_RDIR11_DATA 0x033B
+#define ixTHM_TMON1_RDIR12_DATA 0x033C
+#define ixTHM_TMON1_RDIR13_DATA 0x033D
+#define ixTHM_TMON1_RDIR14_DATA 0x033E
+#define ixTHM_TMON1_RDIR15_DATA 0x033F
+#define ixTHM_TMON1_RDIR1_DATA 0x0331
+#define ixTHM_TMON1_RDIR2_DATA 0x0332
+#define ixTHM_TMON1_RDIR3_DATA 0x0333
+#define ixTHM_TMON1_RDIR4_DATA 0x0334
+#define ixTHM_TMON1_RDIR5_DATA 0x0335
+#define ixTHM_TMON1_RDIR6_DATA 0x0336
+#define ixTHM_TMON1_RDIR7_DATA 0x0337
+#define ixTHM_TMON1_RDIR8_DATA 0x0338
+#define ixTHM_TMON1_RDIR9_DATA 0x0339
+#define mmGPIOPAD_A 0x05E7
+#define mmGPIOPAD_EN 0x05E8
+#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x05F1
+#define mmGPIOPAD_INT_EN 0x05EE
+#define mmGPIOPAD_INT_POLARITY 0x05F0
+#define mmGPIOPAD_INT_STAT 0x05EC
+#define mmGPIOPAD_INT_STAT_AK 0x05ED
+#define mmGPIOPAD_INT_STAT_EN 0x05EB
+#define mmGPIOPAD_INT_TYPE 0x05EF
+#define mmGPIOPAD_MASK 0x05E6
+#define mmGPIOPAD_PD_EN 0x05F4
+#define mmGPIOPAD_PINSTRAPS 0x05EA
+#define mmGPIOPAD_PU_EN 0x05F3
+#define mmGPIOPAD_RCVR_SEL 0x05F2
+#define mmGPIOPAD_STRENGTH 0x05E5
+#define mmGPIOPAD_SW_INT_STAT 0x05E4
+#define mmGPIOPAD_Y 0x05E9
+#define mmSMC_IND_ACCESS_CNTL 0x008A
+#define mmSMC_IND_DATA_0 0x0081
+#define mmSMC_IND_DATA 0x0081
+#define mmSMC_IND_DATA_1 0x0083
+#define mmSMC_IND_DATA_2 0x0085
+#define mmSMC_IND_DATA_3 0x0087
+#define mmSMC_IND_INDEX_0 0x0080
+#define mmSMC_IND_INDEX 0x0080
+#define mmSMC_IND_INDEX_1 0x0082
+#define mmSMC_IND_INDEX_2 0x0084
+#define mmSMC_IND_INDEX_3 0x0086
+#define mmSMC_MESSAGE_0 0x008B
+#define mmSMC_MESSAGE_1 0x008D
+#define mmSMC_MESSAGE_2 0x008F
+#define mmSMC_RESP_0 0x008C
+#define mmSMC_RESP_1 0x008E
+#define mmSMC_RESP_2 0x0090
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
new file mode 100644
index 000000000000..7d3925b7266e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
@@ -0,0 +1,715 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_6_0_SH_MASK_H
+#define SMU_6_0_SH_MASK_H
+
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03ffffffL
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003f0L
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x00000004
+#define GPIOPAD_A__GPIO_A_MASK 0x7fffffffL
+#define GPIOPAD_A__GPIO_A__SHIFT 0x00000000
+#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffffL
+#define GPIOPAD_EN__GPIO_EN__SHIFT 0x00000000
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x00000020L
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x00000005
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x00000040L
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x00000006
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x0000001fL
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x00000000
+#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffffL
+#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x00000000
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x0000001f
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffffL
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x00000000
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x0000001f
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x00000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0x0000000a
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0x0000000b
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0x0000000c
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0x0000000d
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0x0000000e
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0x0000000f
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x00000010
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x00000011
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x00000012
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x00000013
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x00000001
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x00000014
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x00000015
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x00000016
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x00000017
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x00000018
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x00000019
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x0000001a
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x0000001b
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x0000001c
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x00000002
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x00000003
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x00000004
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x00000005
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x00000006
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x00000007
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x00000008
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x00000009
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x0000001f
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffffL
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x00000000
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x0000001f
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffffL
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x00000000
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x0000001f
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffffL
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x00000000
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x0000001f
+#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffffL
+#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x00000000
+#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffffL
+#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x00000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x00000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0x0000000a
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0x0000000b
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0x0000000c
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0x0000000d
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0x0000000e
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0x0000000f
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x00000010
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x00000011
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x00000012
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x00000013
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x00000001
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x00000014
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x00000015
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x00000016
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x00000017
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x00000018
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x00000019
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x0000001a
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x0000001b
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x0000001c
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x0000001d
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x00000002
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x0000001e
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x00000003
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x00000004
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x00000005
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x00000006
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x00000007
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x00000008
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x00000009
+#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffffL
+#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x00000000
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffffL
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x00000000
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0x0000000fL
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x00000000
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0x000000f0L
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x00000004
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x00000000
+#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffffL
+#define GPIOPAD_Y__GPIO_Y__SHIFT 0x00000000
+#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x00000001L
+#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x00000000
+#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x00000001L
+#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x00000000
+#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x00000001L
+#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x00000000
+#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x00000001L
+#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x00000000
+#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x00000001L
+#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x00000000
+#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x00000000
+#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x00000001L
+#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x00000000
+#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x0001fffeL
+#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x00000001
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffffL
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x00000000
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffffL
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x00000000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x00000001L
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x00000000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x00000100L
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x00000008
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x00010000L
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x00000010
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x01000000L
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x00000018
+#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffffL
+#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x00000000
+#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffffL
+#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x00000000
+#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffffffffL
+#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x00000000
+#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffffffffL
+#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x00000000
+#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffffffffL
+#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x00000000
+#define SMC_PC_C__smc_pc_c_MASK 0xffffffffL
+#define SMC_PC_C__smc_pc_c__SHIFT 0x00000000
+#define SMC_RESP_0__SMC_RESP_MASK 0xffffffffL
+#define SMC_RESP_0__SMC_RESP__SHIFT 0x00000000
+#define SMC_RESP_1__SMC_RESP_MASK 0xffffffffL
+#define SMC_RESP_1__SMC_RESP__SHIFT 0x00000000
+#define SMC_RESP_2__SMC_RESP_MASK 0xffffffffL
+#define SMC_RESP_2__SMC_RESP__SHIFT 0x00000000
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0x000ff000L
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0x0000000c
+#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x00000010L
+#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x00000004
+#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x00000008L
+#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x00000003
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x00000002L
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x00000001
+#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000L
+#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x0000001c
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x00000001L
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x00000000
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0x00000c00L
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0x0000000a
+#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x00000004L
+#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x00000002
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000L
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x0000001d
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0x0f000000L
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x00000018
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000L
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x0000001c
+#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x0000001fL
+#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x00000000
+#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
+#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x00000005
+#define THM_TMON0_INT_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_INT_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_INT_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_INT_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_INT_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_INT_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL0_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL10_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL11_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL12_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL13_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL14_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL15_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL1_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL2_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL3_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL4_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL5_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL6_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL7_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL8_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIL9_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR0_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR10_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR11_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR12_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR13_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR14_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR15_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR1_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR2_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR3_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR4_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR5_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR6_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR7_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR8_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x00000000
+#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x00000800L
+#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON0_RDIR9_DATA__Z_MASK 0x000007ffL
+#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x0000001fL
+#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x00000000
+#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
+#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x00000005
+#define THM_TMON1_INT_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_INT_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_INT_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_INT_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_INT_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_INT_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL0_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL10_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL11_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL12_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL13_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL14_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL15_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL1_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL2_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL3_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL4_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL5_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL6_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL7_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL8_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIL9_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR0_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR10_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR11_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR12_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR13_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR14_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR15_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR1_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR2_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR3_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR4_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR5_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR6_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR7_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR8_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x00000000
+#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0x00fff000L
+#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0x0000000c
+#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x00000800L
+#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0x0000000b
+#define THM_TMON1_RDIR9_DATA__Z_MASK 0x000007ffL
+#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
index 3014d4a58c43..a9ef1562f43b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
@@ -176,6 +176,8 @@
#define mmSMU1_SMU_SMC_IND_DATA 0x83
#define mmSMU2_SMU_SMC_IND_DATA 0x85
#define mmSMU3_SMU_SMC_IND_DATA 0x87
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define ixRCU_UC_EVENTS 0xc0000004
#define ixRCU_MISC_CTRL 0xc0000010
#define ixCC_RCU_FUSES 0xc00c0000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index 933917479985..22dd4c2b7290 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -87,6 +87,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index 44b1855cb8df..eca2b851f25f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -90,6 +90,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_d.h
new file mode 100644
index 000000000000..5c0e3f3332e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_d.h
@@ -0,0 +1,96 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef UVD_4_0_D_H
+#define UVD_4_0_D_H
+
+#define ixUVD_CGC_CTRL2 0x00C1
+#define ixUVD_CGC_MEM_CTRL 0x00C0
+#define ixUVD_LMI_ADDR_EXT2 0x00AB
+#define ixUVD_LMI_CACHE_CTRL 0x009B
+#define ixUVD_LMI_SWAP_CNTL2 0x00AA
+#define ixUVD_MIF_CURR_ADDR_CONFIG 0x0048
+#define ixUVD_MIF_RECON1_ADDR_CONFIG 0x0114
+#define ixUVD_MIF_REF_ADDR_CONFIG 0x004C
+#define mmUVD_CGC_CTRL 0x3D2C
+#define mmUVD_CGC_GATE 0x3D2A
+#define mmUVD_CGC_STATUS 0x3D2B
+#define mmUVD_CGC_UDEC_STATUS 0x3D2D
+#define mmUVD_CONTEXT_ID 0x3DBD
+#define mmUVD_CTX_DATA 0x3D29
+#define mmUVD_CTX_INDEX 0x3D28
+#define mmUVD_ENGINE_CNTL 0x3BC6
+#define mmUVD_GPCOM_VCPU_CMD 0x3BC3
+#define mmUVD_GPCOM_VCPU_DATA0 0x3BC4
+#define mmUVD_GPCOM_VCPU_DATA1 0x3BC5
+#define mmUVD_GP_SCRATCH4 0x3D38
+#define mmUVD_LMI_ADDR_EXT 0x3D65
+#define mmUVD_LMI_CTRL 0x3D66
+#define mmUVD_LMI_CTRL2 0x3D3D
+#define mmUVD_LMI_EXT40_ADDR 0x3D26
+#define mmUVD_LMI_STATUS 0x3D67
+#define mmUVD_LMI_SWAP_CNTL 0x3D6D
+#define mmUVD_MASTINT_EN 0x3D40
+#define mmUVD_MPC_CNTL 0x3D77
+#define mmUVD_MPC_SET_ALU 0x3D7E
+#define mmUVD_MPC_SET_MUX 0x3D7D
+#define mmUVD_MPC_SET_MUXA0 0x3D79
+#define mmUVD_MPC_SET_MUXA1 0x3D7A
+#define mmUVD_MPC_SET_MUXB0 0x3D7B
+#define mmUVD_MPC_SET_MUXB1 0x3D7C
+#define mmUVD_MP_SWAP_CNTL 0x3D6F
+#define mmUVD_NO_OP 0x3BFF
+#define mmUVD_PGFSM_CONFIG 0x38F8
+#define mmUVD_PGFSM_READ_TILE1 0x38FA
+#define mmUVD_PGFSM_READ_TILE2 0x38FB
+#define mmUVD_POWER_STATUS 0x38FC
+#define mmUVD_RBC_IB_BASE 0x3DA1
+#define mmUVD_RBC_IB_SIZE 0x3DA2
+#define mmUVD_RBC_IB_SIZE_UPDATE 0x3DF1
+#define mmUVD_RBC_RB_BASE 0x3DA3
+#define mmUVD_RBC_RB_CNTL 0x3DA9
+#define mmUVD_RBC_RB_RPTR 0x3DA4
+#define mmUVD_RBC_RB_RPTR_ADDR 0x3DAA
+#define mmUVD_RBC_RB_WPTR 0x3DA5
+#define mmUVD_RBC_RB_WPTR_CNTL 0x3DA6
+#define mmUVD_SEMA_ADDR_HIGH 0x3BC1
+#define mmUVD_SEMA_ADDR_LOW 0x3BC0
+#define mmUVD_SEMA_CMD 0x3BC2
+#define mmUVD_SEMA_CNTL 0x3D00
+#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0x3DB3
+#define mmUVD_SEMA_TIMEOUT_STATUS 0x3DB0
+#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0x3DB2
+#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0x3DB1
+#define mmUVD_SOFT_RESET 0x3DA0
+#define mmUVD_STATUS 0x3DAF
+#define mmUVD_UDEC_ADDR_CONFIG 0x3BD3
+#define mmUVD_UDEC_DB_ADDR_CONFIG 0x3BD4
+#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3BD5
+#define mmUVD_VCPU_CACHE_OFFSET0 0x3D36
+#define mmUVD_VCPU_CACHE_OFFSET1 0x3D38
+#define mmUVD_VCPU_CACHE_OFFSET2 0x3D3A
+#define mmUVD_VCPU_CACHE_SIZE0 0x3D37
+#define mmUVD_VCPU_CACHE_SIZE1 0x3D39
+#define mmUVD_VCPU_CACHE_SIZE2 0x3D3B
+#define mmUVD_VCPU_CNTL 0x3D98
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
new file mode 100644
index 000000000000..8ee3149df5b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
@@ -0,0 +1,795 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef UVD_4_0_SH_MASK_H
+#define UVD_4_0_SH_MASK_H
+
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK 0x00000001L
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN__SHIFT 0x00000000
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK 0x00000002L
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN__SHIFT 0x00000001
+#define UVD_CGC_CTRL2__GATER_DIV_ID_MASK 0x0000001cL
+#define UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT 0x00000002
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003cL
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x00000002
+#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007c0L
+#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x00000006
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x00000000
+#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L
+#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x00000017
+#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L
+#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x0000001a
+#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L
+#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x00000015
+#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L
+#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x00000016
+#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L
+#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x0000001b
+#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L
+#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x00000019
+#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L
+#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x00000012
+#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L
+#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x00000018
+#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L
+#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x00000014
+#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L
+#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x00000013
+#define UVD_CGC_CTRL__SCPU_MODE_MASK 0x40000000L
+#define UVD_CGC_CTRL__SCPU_MODE__SHIFT 0x0000001e
+#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L
+#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x00000010
+#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L
+#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0x0000000c
+#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L
+#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0x0000000e
+#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L
+#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0x0000000d
+#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L
+#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x00000011
+#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L
+#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0x0000000f
+#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L
+#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0x0000000b
+#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L
+#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x0000001d
+#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L
+#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x0000001c
+#define UVD_CGC_GATE__IDCT_MASK 0x00000080L
+#define UVD_CGC_GATE__IDCT__SHIFT 0x00000007
+#define UVD_CGC_GATE__LBSI_MASK 0x00000400L
+#define UVD_CGC_GATE__LBSI__SHIFT 0x0000000a
+#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L
+#define UVD_CGC_GATE__LMI_MC__SHIFT 0x00000005
+#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L
+#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x00000006
+#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L
+#define UVD_CGC_GATE__LRBBM__SHIFT 0x0000000b
+#define UVD_CGC_GATE__MPC_MASK 0x00000200L
+#define UVD_CGC_GATE__MPC__SHIFT 0x00000009
+#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L
+#define UVD_CGC_GATE__MPEG2__SHIFT 0x00000002
+#define UVD_CGC_GATE__MPRD_MASK 0x00000100L
+#define UVD_CGC_GATE__MPRD__SHIFT 0x00000008
+#define UVD_CGC_GATE__RBC_MASK 0x00000010L
+#define UVD_CGC_GATE__RBC__SHIFT 0x00000004
+#define UVD_CGC_GATE__REGS_MASK 0x00000008L
+#define UVD_CGC_GATE__REGS__SHIFT 0x00000003
+#define UVD_CGC_GATE__SCPU_MASK 0x00080000L
+#define UVD_CGC_GATE__SCPU__SHIFT 0x00000013
+#define UVD_CGC_GATE__SYS_MASK 0x00000001L
+#define UVD_CGC_GATE__SYS__SHIFT 0x00000000
+#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L
+#define UVD_CGC_GATE__UDEC_CM__SHIFT 0x0000000d
+#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L
+#define UVD_CGC_GATE__UDEC_DB__SHIFT 0x0000000f
+#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L
+#define UVD_CGC_GATE__UDEC_IT__SHIFT 0x0000000e
+#define UVD_CGC_GATE__UDEC_MASK 0x00000002L
+#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L
+#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x00000010
+#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L
+#define UVD_CGC_GATE__UDEC_RE__SHIFT 0x0000000c
+#define UVD_CGC_GATE__UDEC__SHIFT 0x00000001
+#define UVD_CGC_GATE__VCPU_MASK 0x00040000L
+#define UVD_CGC_GATE__VCPU__SHIFT 0x00000012
+#define UVD_CGC_GATE__WCB_MASK 0x00020000L
+#define UVD_CGC_GATE__WCB__SHIFT 0x00000011
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN__SHIFT 0x0000000d
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN__SHIFT 0x00000000
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00f00000L
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x00000014
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000f0000L
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x00000010
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN__SHIFT 0x0000000c
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN__SHIFT 0x00000001
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN__SHIFT 0x00000002
+#define UVD_CGC_MEM_CTRL__SCPU_LS_EN_MASK 0x00000800L
+#define UVD_CGC_MEM_CTRL__SCPU_LS_EN__SHIFT 0x0000000b
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN__SHIFT 0x00000009
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN__SHIFT 0x00000005
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN__SHIFT 0x00000007
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN__SHIFT 0x00000006
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN__SHIFT 0x00000008
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN__SHIFT 0x00000004
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN__SHIFT 0x0000000a
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN__SHIFT 0x00000003
+#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L
+#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0x0000000e
+#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L
+#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0x0000000f
+#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L
+#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x00000015
+#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L
+#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x00000016
+#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L
+#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0x0000000c
+#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L
+#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0x0000000d
+#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L
+#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x00000017
+#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L
+#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x00000014
+#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L
+#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x00000013
+#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L
+#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x00000007
+#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L
+#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x00000006
+#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L
+#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x00000008
+#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L
+#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x00000011
+#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L
+#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x00000010
+#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L
+#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x00000012
+#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L
+#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0x0000000b
+#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L
+#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x00000009
+#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L
+#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0x0000000a
+#define UVD_CGC_STATUS__SCPU_SCLK_MASK 0x08000000L
+#define UVD_CGC_STATUS__SCPU_SCLK__SHIFT 0x0000001b
+#define UVD_CGC_STATUS__SCPU_VCLK_MASK 0x10000000L
+#define UVD_CGC_STATUS__SCPU_VCLK__SHIFT 0x0000001c
+#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L
+#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x00000001
+#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L
+#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x00000000
+#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L
+#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x00000002
+#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L
+#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x00000004
+#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L
+#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x00000003
+#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L
+#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x00000005
+#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L
+#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x00000019
+#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L
+#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x0000001a
+#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L
+#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x00000018
+#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L
+#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x00000004
+#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L
+#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x00000003
+#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L
+#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x00000005
+#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L
+#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0x0000000a
+#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L
+#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x00000009
+#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L
+#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0x0000000b
+#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L
+#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x00000007
+#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L
+#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x00000006
+#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L
+#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x00000008
+#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L
+#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0x0000000d
+#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L
+#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0x0000000c
+#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L
+#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0x0000000e
+#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L
+#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x00000001
+#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L
+#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x00000000
+#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L
+#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x00000002
+#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xffffffffL
+#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x00000000
+#define UVD_CTX_DATA__DATA_MASK 0xffffffffL
+#define UVD_CTX_DATA__DATA__SHIFT 0x00000000
+#define UVD_CTX_INDEX__INDEX_MASK 0x000001ffL
+#define UVD_CTX_INDEX__INDEX__SHIFT 0x00000000
+#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x00000001L
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x00000002L
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x00000001
+#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x00000000
+#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7ffffffeL
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x00000000
+#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x00000001
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x0000001f
+#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xffffffffL
+#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x00000000
+#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xffffffffL
+#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x00000000
+#define UVD_LMI_ADDR_EXT2__SCPU_ADDR_EXT_MASK 0x0000000fL
+#define UVD_LMI_ADDR_EXT2__SCPU_ADDR_EXT__SHIFT 0x00000000
+#define UVD_LMI_ADDR_EXT2__SCPU_NC0_ADDR_EXT_MASK 0x00000f00L
+#define UVD_LMI_ADDR_EXT2__SCPU_NC0_ADDR_EXT__SHIFT 0x00000008
+#define UVD_LMI_ADDR_EXT2__SCPU_NC1_ADDR_EXT_MASK 0x0000f000L
+#define UVD_LMI_ADDR_EXT2__SCPU_NC1_ADDR_EXT__SHIFT 0x0000000c
+#define UVD_LMI_ADDR_EXT2__SCPU_VM_ADDR_EXT_MASK 0x000000f0L
+#define UVD_LMI_ADDR_EXT2__SCPU_VM_ADDR_EXT__SHIFT 0x00000004
+#define UVD_LMI_ADDR_EXT__CM_ADDR_EXT_MASK 0x000000f0L
+#define UVD_LMI_ADDR_EXT__CM_ADDR_EXT__SHIFT 0x00000004
+#define UVD_LMI_ADDR_EXT__IT_ADDR_EXT_MASK 0x00000f00L
+#define UVD_LMI_ADDR_EXT__IT_ADDR_EXT__SHIFT 0x00000008
+#define UVD_LMI_ADDR_EXT__MP_ADDR_EXT_MASK 0x00f00000L
+#define UVD_LMI_ADDR_EXT__MP_ADDR_EXT__SHIFT 0x00000014
+#define UVD_LMI_ADDR_EXT__RE_ADDR_EXT_MASK 0x000f0000L
+#define UVD_LMI_ADDR_EXT__RE_ADDR_EXT__SHIFT 0x00000010
+#define UVD_LMI_ADDR_EXT__VCPU_ADDR_EXT_MASK 0x0000000fL
+#define UVD_LMI_ADDR_EXT__VCPU_ADDR_EXT__SHIFT 0x00000000
+#define UVD_LMI_ADDR_EXT__VCPU_NC0_ADDR_EXT_MASK 0x0f000000L
+#define UVD_LMI_ADDR_EXT__VCPU_NC0_ADDR_EXT__SHIFT 0x00000018
+#define UVD_LMI_ADDR_EXT__VCPU_NC1_ADDR_EXT_MASK 0xf0000000L
+#define UVD_LMI_ADDR_EXT__VCPU_NC1_ADDR_EXT__SHIFT 0x0000001c
+#define UVD_LMI_ADDR_EXT__VCPU_VM_ADDR_EXT_MASK 0x0000f000L
+#define UVD_LMI_ADDR_EXT__VCPU_VM_ADDR_EXT__SHIFT 0x0000000c
+#define UVD_LMI_CACHE_CTRL__CM_EN_MASK 0x00000004L
+#define UVD_LMI_CACHE_CTRL__CM_EN__SHIFT 0x00000002
+#define UVD_LMI_CACHE_CTRL__CM_FLUSH_MASK 0x00000008L
+#define UVD_LMI_CACHE_CTRL__CM_FLUSH__SHIFT 0x00000003
+#define UVD_LMI_CACHE_CTRL__IT_EN_MASK 0x00000001L
+#define UVD_LMI_CACHE_CTRL__IT_EN__SHIFT 0x00000000
+#define UVD_LMI_CACHE_CTRL__IT_FLUSH_MASK 0x00000002L
+#define UVD_LMI_CACHE_CTRL__IT_FLUSH__SHIFT 0x00000001
+#define UVD_LMI_CACHE_CTRL__VCPU_EN_MASK 0x00000010L
+#define UVD_LMI_CACHE_CTRL__VCPU_EN__SHIFT 0x00000004
+#define UVD_LMI_CACHE_CTRL__VCPU_FLUSH_MASK 0x00000020L
+#define UVD_LMI_CACHE_CTRL__VCPU_FLUSH__SHIFT 0x00000005
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x00000002
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x00000007
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x00000003
+#define UVD_LMI_CTRL2__MCIF_WR_WATERMARK_MASK 0x00000070L
+#define UVD_LMI_CTRL2__MCIF_WR_WATERMARK__SHIFT 0x00000004
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x00000009
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0x0000000b
+#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L
+#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x00000000
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0x0000000f
+#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L
+#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x00000001
+#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
+#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x00000008
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0x0000000d
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0x0000000e
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x0000000b
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x00000016
+#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L
+#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0x0000000e
+#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000f8000L
+#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0x0000000f
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0x0000000d
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x00000017
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x00000018
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK 0x00100000L
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL__SHIFT 0x00000014
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x00000019
+#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L
+#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0x0000000c
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x0000001a
+#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L
+#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x00000009
+#define UVD_LMI_CTRL__RFU_MASK 0xf8000000L
+#define UVD_LMI_CTRL__RFU_MASK 0xfc000000L
+#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001a
+#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001b
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x00000015
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x00000008
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000ffL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x00000000
+#define UVD_LMI_EXT40_ADDR__ADDR_MASK 0x000000ffL
+#define UVD_LMI_EXT40_ADDR__ADDR__SHIFT 0x00000000
+#define UVD_LMI_EXT40_ADDR__INDEX_MASK 0x001f0000L
+#define UVD_LMI_EXT40_ADDR__INDEX__SHIFT 0x00000010
+#define UVD_LMI_EXT40_ADDR__WRITE_ADDR_MASK 0x80000000L
+#define UVD_LMI_EXT40_ADDR__WRITE_ADDR__SHIFT 0x0000001f
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0x0000000c
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0x0000000d
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x00000007
+#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L
+#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L
+#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x00000008
+#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x00000000
+#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L
+#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0x0000000b
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x00000009
+#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x00000004
+#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L
+#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0x0000000a
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x00000006
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x00000005
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x00000003
+#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x00000002
+#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x00000001
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP_MASK 0x00000003L
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP__SHIFT 0x00000000
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP_MASK 0x0000000cL
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP__SHIFT 0x00000002
+#define UVD_LMI_SWAP_CNTL__CM_MC_SWAP_MASK 0x00000c00L
+#define UVD_LMI_SWAP_CNTL__CM_MC_SWAP__SHIFT 0x0000000a
+#define UVD_LMI_SWAP_CNTL__CSM_MC_SWAP_MASK 0x000c0000L
+#define UVD_LMI_SWAP_CNTL__CSM_MC_SWAP__SHIFT 0x00000012
+#define UVD_LMI_SWAP_CNTL__DB_R_MC_SWAP_MASK 0x0000c000L
+#define UVD_LMI_SWAP_CNTL__DB_R_MC_SWAP__SHIFT 0x0000000e
+#define UVD_LMI_SWAP_CNTL__DB_W_MC_SWAP_MASK 0x00030000L
+#define UVD_LMI_SWAP_CNTL__DBW_MC_SWAP_MASK 0x03000000L
+#define UVD_LMI_SWAP_CNTL__DB_W_MC_SWAP__SHIFT 0x00000010
+#define UVD_LMI_SWAP_CNTL__DBW_MC_SWAP__SHIFT 0x00000018
+#define UVD_LMI_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000cL
+#define UVD_LMI_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x00000002
+#define UVD_LMI_SWAP_CNTL__IT_MC_SWAP_MASK 0x00003000L
+#define UVD_LMI_SWAP_CNTL__IT_MC_SWAP__SHIFT 0x0000000c
+#define UVD_LMI_SWAP_CNTL__MP_MC_SWAP_MASK 0xc0000000L
+#define UVD_LMI_SWAP_CNTL__MP_MC_SWAP__SHIFT 0x0000001e
+#define UVD_LMI_SWAP_CNTL__MP_REF16_MC_SWAP_MASK 0x00c00000L
+#define UVD_LMI_SWAP_CNTL__MP_REF16_MC_SWAP__SHIFT 0x00000016
+#define UVD_LMI_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_LMI_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x00000000
+#define UVD_LMI_SWAP_CNTL__RB_RPTR_MC_SWAP_MASK 0x00000030L
+#define UVD_LMI_SWAP_CNTL__RB_RPTR_MC_SWAP__SHIFT 0x00000004
+#define UVD_LMI_SWAP_CNTL__RB_WR_MC_SWAP_MASK 0x0c000000L
+#define UVD_LMI_SWAP_CNTL__RB_WR_MC_SWAP__SHIFT 0x0000001a
+#define UVD_LMI_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L
+#define UVD_LMI_SWAP_CNTL__RE_MC_SWAP__SHIFT 0x0000001c
+#define UVD_LMI_SWAP_CNTL__VCPU_R_MC_SWAP_MASK 0x000000c0L
+#define UVD_LMI_SWAP_CNTL__VCPU_R_MC_SWAP__SHIFT 0x00000006
+#define UVD_LMI_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000300L
+#define UVD_LMI_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x00000008
+#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007ffff0L
+#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x00000004
+#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x00000000
+#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
+#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x00000002
+#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L
+#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x00000001
+#define UVD_MIF_CURR_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_MIF_CURR_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_MIF_CURR_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_MIF_CURR_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_MIF_CURR_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_MIF_CURR_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_MIF_CURR_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_MIF_CURR_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_MIF_CURR_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_MIF_CURR_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_MIF_CURR_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_MIF_RECON1_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_MIF_RECON1_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_MIF_RECON1_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_MIF_RECON1_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_MIF_RECON1_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_MIF_RECON1_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_MIF_RECON1_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_MIF_RECON1_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_MIF_REF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_MIF_REF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_MIF_REF_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_MIF_REF_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_MIF_REF_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_MIF_REF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_MIF_REF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_MIF_REF_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_MIF_REF_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_MIF_REF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_MIF_REF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_MPC_CNTL__AVE_WEIGHT_MASK 0x00030000L
+#define UVD_MPC_CNTL__AVE_WEIGHT__SHIFT 0x00000010
+#define UVD_MPC_CNTL__DBG_MUX_MASK 0x00000700L
+#define UVD_MPC_CNTL__DBG_MUX__SHIFT 0x00000008
+#define UVD_MPC_CNTL__PERF_RST_MASK 0x00000040L
+#define UVD_MPC_CNTL__PERF_RST__SHIFT 0x00000006
+#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L
+#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x00000003
+#define UVD_MPC_CNTL__URGENT_EN_MASK 0x00040000L
+#define UVD_MPC_CNTL__URGENT_EN__SHIFT 0x00000012
+#define UVD_MPC_SET_ALU__FUNCT_MASK 0x00000007L
+#define UVD_MPC_SET_ALU__FUNCT__SHIFT 0x00000000
+#define UVD_MPC_SET_ALU__OPERAND_MASK 0x00000ff0L
+#define UVD_MPC_SET_ALU__OPERAND__SHIFT 0x00000004
+#define UVD_MPC_SET_MUXA0__VARA_0_MASK 0x0000003fL
+#define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x00000000
+#define UVD_MPC_SET_MUXA0__VARA_1_MASK 0x00000fc0L
+#define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x00000006
+#define UVD_MPC_SET_MUXA0__VARA_2_MASK 0x0003f000L
+#define UVD_MPC_SET_MUXA0__VARA_2__SHIFT 0x0000000c
+#define UVD_MPC_SET_MUXA0__VARA_3_MASK 0x00fc0000L
+#define UVD_MPC_SET_MUXA0__VARA_3__SHIFT 0x00000012
+#define UVD_MPC_SET_MUXA0__VARA_4_MASK 0x3f000000L
+#define UVD_MPC_SET_MUXA0__VARA_4__SHIFT 0x00000018
+#define UVD_MPC_SET_MUXA1__VARA_5_MASK 0x0000003fL
+#define UVD_MPC_SET_MUXA1__VARA_5__SHIFT 0x00000000
+#define UVD_MPC_SET_MUXA1__VARA_6_MASK 0x00000fc0L
+#define UVD_MPC_SET_MUXA1__VARA_6__SHIFT 0x00000006
+#define UVD_MPC_SET_MUXA1__VARA_7_MASK 0x0003f000L
+#define UVD_MPC_SET_MUXA1__VARA_7__SHIFT 0x0000000c
+#define UVD_MPC_SET_MUXB0__VARB_0_MASK 0x0000003fL
+#define UVD_MPC_SET_MUXB0__VARB_0__SHIFT 0x00000000
+#define UVD_MPC_SET_MUXB0__VARB_1_MASK 0x00000fc0L
+#define UVD_MPC_SET_MUXB0__VARB_1__SHIFT 0x00000006
+#define UVD_MPC_SET_MUXB0__VARB_2_MASK 0x0003f000L
+#define UVD_MPC_SET_MUXB0__VARB_2__SHIFT 0x0000000c
+#define UVD_MPC_SET_MUXB0__VARB_3_MASK 0x00fc0000L
+#define UVD_MPC_SET_MUXB0__VARB_3__SHIFT 0x00000012
+#define UVD_MPC_SET_MUXB0__VARB_4_MASK 0x3f000000L
+#define UVD_MPC_SET_MUXB0__VARB_4__SHIFT 0x00000018
+#define UVD_MPC_SET_MUXB1__VARB_5_MASK 0x0000003fL
+#define UVD_MPC_SET_MUXB1__VARB_5__SHIFT 0x00000000
+#define UVD_MPC_SET_MUXB1__VARB_6_MASK 0x00000fc0L
+#define UVD_MPC_SET_MUXB1__VARB_6__SHIFT 0x00000006
+#define UVD_MPC_SET_MUXB1__VARB_7_MASK 0x0003f000L
+#define UVD_MPC_SET_MUXB1__VARB_7__SHIFT 0x0000000c
+#define UVD_MPC_SET_MUX__SET_0_MASK 0x00000007L
+#define UVD_MPC_SET_MUX__SET_0__SHIFT 0x00000000
+#define UVD_MPC_SET_MUX__SET_1_MASK 0x00000038L
+#define UVD_MPC_SET_MUX__SET_1__SHIFT 0x00000003
+#define UVD_MPC_SET_MUX__SET_2_MASK 0x000001c0L
+#define UVD_MPC_SET_MUX__SET_2__SHIFT 0x00000006
+#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP_MASK 0x00000003L
+#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP__SHIFT 0x00000000
+#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP_MASK 0x00300000L
+#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP__SHIFT 0x00000014
+#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP_MASK 0x00c00000L
+#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP__SHIFT 0x00000016
+#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP_MASK 0x03000000L
+#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP__SHIFT 0x00000018
+#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP_MASK 0x0c000000L
+#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP__SHIFT 0x0000001a
+#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP_MASK 0x30000000L
+#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP__SHIFT 0x0000001c
+#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP_MASK 0xc0000000L
+#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP__SHIFT 0x0000001e
+#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP_MASK 0x0000000cL
+#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP__SHIFT 0x00000002
+#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP_MASK 0x00000030L
+#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP__SHIFT 0x00000004
+#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP_MASK 0x000000c0L
+#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP__SHIFT 0x00000006
+#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP_MASK 0x00000300L
+#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP__SHIFT 0x00000008
+#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP_MASK 0x00000c00L
+#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP__SHIFT 0x0000000a
+#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP_MASK 0x00003000L
+#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP__SHIFT 0x0000000c
+#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP_MASK 0x0000c000L
+#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP__SHIFT 0x0000000e
+#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP_MASK 0x00030000L
+#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP__SHIFT 0x00000010
+#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP_MASK 0x000c0000L
+#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP__SHIFT 0x00000012
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK 0x000000ffL
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR__SHIFT 0x00000000
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK 0x00000400L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT__SHIFT 0x0000000a
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_P2_SELECT_MASK 0x00000800L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_P2_SELECT__SHIFT 0x0000000b
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK 0x00000100L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN__SHIFT 0x00000008
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK 0x00000200L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP__SHIFT 0x00000009
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_READ_MASK 0x00002000L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_READ__SHIFT 0x0000000d
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_REG_ADDR_MASK 0xf0000000L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_REG_ADDR__SHIFT 0x0000001c
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_WRITE_MASK 0x00001000L
+#define UVD_PGFSM_CONFIG__UVD_PGFSM_WRITE__SHIFT 0x0000000c
+#define UVD_PGFSM_READ_TILE1__UVD_PGFSM_READ_TILE1_VALUE_MASK 0x00ffffffL
+#define UVD_PGFSM_READ_TILE1__UVD_PGFSM_READ_TILE1_VALUE__SHIFT 0x00000000
+#define UVD_PGFSM_READ_TILE2__UVD_PGFSM_READ_TILE2_VALUE_MASK 0x00ffffffL
+#define UVD_PGFSM_READ_TILE2__UVD_PGFSM_READ_TILE2_VALUE__SHIFT 0x00000000
+#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000001L
+#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x00000000
+#define UVD_RBC_IB_BASE__IB_BASE_MASK 0xffffffc0L
+#define UVD_RBC_IB_BASE__IB_BASE__SHIFT 0x00000006
+#define UVD_RBC_IB_SIZE__IB_SIZE_MASK 0x007ffff0L
+#define UVD_RBC_IB_SIZE__IB_SIZE__SHIFT 0x00000004
+#define UVD_RBC_RB_BASE__RB_BASE_MASK 0xffffffc0L
+#define UVD_RBC_RB_BASE__RB_BASE__SHIFT 0x00000006
+#define UVD_RBC_RB_CNTL__RB_BLKSZ_MASK 0x00001f00L
+#define UVD_RBC_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define UVD_RBC_RB_CNTL__RB_BUFSZ_MASK 0x0000001fL
+#define UVD_RBC_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK 0x00010000L
+#define UVD_RBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x00000010
+#define UVD_RBC_RB_CNTL__RB_NO_UPDATE_MASK 0x01000000L
+#define UVD_RBC_RB_CNTL__RB_NO_UPDATE__SHIFT 0x00000018
+#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x10000000L
+#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x0000001c
+#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN_MASK 0x00100000L
+#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN__SHIFT 0x00000014
+#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xffffffffL
+#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000000
+#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007ffff0L
+#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x00000004
+#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007ffff0L
+#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x00000004
+#define UVD_SEMA_ADDR_HIGH__ADDR_42_23_MASK 0x000fffffL
+#define UVD_SEMA_ADDR_HIGH__ADDR_42_23__SHIFT 0x00000000
+#define UVD_SEMA_ADDR_LOW__ADDR_22_3_MASK 0x000fffffL
+#define UVD_SEMA_ADDR_LOW__ADDR_22_3__SHIFT 0x00000000
+#define UVD_SEMA_CMD__MODE_MASK 0x00000040L
+#define UVD_SEMA_CMD__MODE__SHIFT 0x00000006
+#define UVD_SEMA_CMD__REQ_CMD_MASK 0x0000000fL
+#define UVD_SEMA_CMD__REQ_CMD__SHIFT 0x00000000
+#define UVD_SEMA_CMD__VMID_EN_MASK 0x00000080L
+#define UVD_SEMA_CMD__VMID_EN__SHIFT 0x00000007
+#define UVD_SEMA_CMD__VMID_MASK 0x00000f00L
+#define UVD_SEMA_CMD__VMID__SHIFT 0x00000008
+#define UVD_SEMA_CMD__WR_PHASE_MASK 0x00000030L
+#define UVD_SEMA_CMD__WR_PHASE__SHIFT 0x00000004
+#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS_MASK 0x00000002L
+#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS__SHIFT 0x00000001
+#define UVD_SEMA_CNTL__SEMAPHORE_EN_MASK 0x00000001L
+#define UVD_SEMA_CNTL__SEMAPHORE_EN__SHIFT 0x00000000
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x00000018
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT_MASK 0x001ffffeL
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT__SHIFT 0x00000001
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN_MASK 0x00000001L
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN__SHIFT 0x00000000
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000004L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x00000002
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR_MASK 0x00000008L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR__SHIFT 0x00000003
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT_MASK 0x00000002L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT__SHIFT 0x00000001
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000001L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x00000000
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x00000018
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT_MASK 0x001ffffeL
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT__SHIFT 0x00000001
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN_MASK 0x00000001L
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN__SHIFT 0x00000000
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x00000018
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT_MASK 0x001ffffeL
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT__SHIFT 0x00000001
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN_MASK 0x00000001L
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN__SHIFT 0x00000000
+#define UVD_SOFT_RESET__CSM_SOFT_RESET_MASK 0x00000020L
+#define UVD_SOFT_RESET__CSM_SOFT_RESET__SHIFT 0x00000005
+#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L
+#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x00000006
+#define UVD_SOFT_RESET__FWV_SOFT_RESET_MASK 0x00000200L
+#define UVD_SOFT_RESET__FWV_SOFT_RESET__SHIFT 0x00000009
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0x0000000c
+#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L
+#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0x0000000a
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x00000001
+#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L
+#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x00000010
+#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L
+#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x00000002
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0x0000000d
+#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L
+#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0x0000000f
+#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L
+#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x00000008
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0x0000000b
+#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x00000000
+#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L
+#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0x0000000e
+#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L
+#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x00000007
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x00000004
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x00000003
+#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L
+#define UVD_STATUS__RBC_BUSY__SHIFT 0x00000000
+#define UVD_STATUS__VCPU_REPORT_MASK 0x000000feL
+#define UVD_STATUS__VCPU_REPORT__SHIFT 0x00000001
+#define UVD_UDEC_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_UDEC_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_UDEC_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_UDEC_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_UDEC_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_UDEC_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_UDEC_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_UDEC_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_UDEC_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_UDEC_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_UDEC_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_UDEC_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_UDEC_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_UDEC_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_UDEC_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_UDEC_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_UDEC_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_UDEC_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_UDEC_DB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_UDEC_DB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_UDEC_DB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_UDEC_DB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_UDEC_DB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_UDEC_DB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_UDEC_DB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_UDEC_DB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_UDEC_DB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_UDEC_DB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_UDEC_DB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_UDEC_DBW_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define UVD_UDEC_DBW_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
+#define UVD_UDEC_DBW_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
+#define UVD_UDEC_DBW_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+#define UVD_UDEC_DBW_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
+#define UVD_UDEC_DBW_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
+#define UVD_UDEC_DBW_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define UVD_UDEC_DBW_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01ffffffL
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x01ffffffL
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x01ffffffL
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001fffffL
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001fffffL
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x00000000
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001fffffL
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x00000000
+#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L
+#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x00000008
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000010L
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x00000004
+#define UVD_VCPU_CNTL__CABAC_MB_ACC_MASK 0x10000000L
+#define UVD_VCPU_CNTL__CABAC_MB_ACC__SHIFT 0x0000001c
+#define UVD_VCPU_CNTL__CLK_ACTIVE_MASK 0x00020000L
+#define UVD_VCPU_CNTL__CLK_ACTIVE__SHIFT 0x00000011
+#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L
+#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x00000009
+#define UVD_VCPU_CNTL__DBG_MUX_MASK 0x0000e000L
+#define UVD_VCPU_CNTL__DBG_MUX__SHIFT 0x0000000d
+#define UVD_VCPU_CNTL__ECPU_AM32_EN_MASK 0x20000000L
+#define UVD_VCPU_CNTL__ECPU_AM32_EN__SHIFT 0x0000001d
+#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000fL
+#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x00000000
+#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L
+#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x00000010
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x00000005
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x00000006
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0ff00000L
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x00000014
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000007
+#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L
+#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x00000012
+#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L
+#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0x0000000a
+#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L
+#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0x0000000b
+#define UVD_VCPU_CNTL__WMV9_EN_MASK 0x40000000L
+#define UVD_VCPU_CNTL__WMV9_EN__SHIFT 0x0000001e
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
new file mode 100644
index 000000000000..2176548e9203
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VCE_1_0_D_H
+#define VCE_1_0_D_H
+
+#define mmVCE_CLOCK_GATING_A 0x80BE
+#define mmVCE_CLOCK_GATING_B 0x80BF
+#define mmVCE_LMI_CACHE_CTRL 0x83BD
+#define mmVCE_LMI_CTRL 0x83A6
+#define mmVCE_LMI_CTRL2 0x839D
+#define mmVCE_LMI_MISC_CTRL 0x83B5
+#define mmVCE_LMI_STATUS 0x83A7
+#define mmVCE_LMI_SWAP_CNTL 0x83AD
+#define mmVCE_LMI_SWAP_CNTL1 0x83AE
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR 0x8397
+#define mmVCE_LMI_VM_CTRL 0x83A8
+#define mmVCE_RB_ARB_CTRL 0x809F
+#define mmVCE_RB_BASE_HI 0x8061
+#define mmVCE_RB_BASE_HI2 0x805C
+#define mmVCE_RB_BASE_LO 0x8060
+#define mmVCE_RB_BASE_LO2 0x805B
+#define mmVCE_RB_RPTR 0x8063
+#define mmVCE_RB_RPTR2 0x805E
+#define mmVCE_RB_SIZE 0x8062
+#define mmVCE_RB_SIZE2 0x805D
+#define mmVCE_RB_WPTR 0x8064
+#define mmVCE_RB_WPTR2 0x805F
+#define mmVCE_SOFT_RESET 0x8048
+#define mmVCE_STATUS 0x8001
+#define mmVCE_SYS_INT_ACK 0x8341
+#define mmVCE_SYS_INT_EN 0x8340
+#define mmVCE_SYS_INT_STATUS 0x8341
+#define mmVCE_UENC_CLOCK_GATING 0x816F
+#define mmVCE_UENC_DMA_DCLK_CTRL 0x8250
+#define mmVCE_UENC_REG_CLOCK_GATING 0x8170
+#define mmVCE_VCPU_CACHE_OFFSET0 0x8009
+#define mmVCE_VCPU_CACHE_OFFSET1 0x800B
+#define mmVCE_VCPU_CACHE_OFFSET2 0x800D
+#define mmVCE_VCPU_CACHE_SIZE0 0x800A
+#define mmVCE_VCPU_CACHE_SIZE1 0x800C
+#define mmVCE_VCPU_CACHE_SIZE2 0x800E
+#define mmVCE_VCPU_CNTL 0x8005
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
new file mode 100644
index 000000000000..ea5b26b11cb1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
@@ -0,0 +1,99 @@
+/*
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VCE_1_0_SH_MASK_H
+#define VCE_1_0_SH_MASK_H
+
+#define VCE_LMI_CACHE_CTRL__VCPU_EN_MASK 0x00000001L
+#define VCE_LMI_CACHE_CTRL__VCPU_EN__SHIFT 0x00000000
+#define VCE_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
+#define VCE_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x00000008
+#define VCE_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
+#define VCE_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x00000015
+#define VCE_LMI_SWAP_CNTL1__RD_MC_CID_SWAP_MASK 0x00003ffcL
+#define VCE_LMI_SWAP_CNTL1__RD_MC_CID_SWAP__SHIFT 0x00000002
+#define VCE_LMI_SWAP_CNTL1__VCPU_R_MC_SWAP_MASK 0x00000003L
+#define VCE_LMI_SWAP_CNTL1__VCPU_R_MC_SWAP__SHIFT 0x00000000
+#define VCE_LMI_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000003L
+#define VCE_LMI_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x00000000
+#define VCE_LMI_SWAP_CNTL__WR_MC_CID_SWAP_MASK 0x00003ffcL
+#define VCE_LMI_SWAP_CNTL__WR_MC_CID_SWAP__SHIFT 0x00000002
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR__BAR_MASK 0xffffffffL
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR__BAR__SHIFT 0x00000000
+#define VCE_RB_BASE_HI2__RB_BASE_HI_MASK 0xffffffffL
+#define VCE_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x00000000
+#define VCE_RB_BASE_HI__RB_BASE_HI_MASK 0xffffffffL
+#define VCE_RB_BASE_HI__RB_BASE_HI__SHIFT 0x00000000
+#define VCE_RB_BASE_LO2__RB_BASE_LO_MASK 0xffffffc0L
+#define VCE_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x00000006
+#define VCE_RB_BASE_LO__RB_BASE_LO_MASK 0xffffffc0L
+#define VCE_RB_BASE_LO__RB_BASE_LO__SHIFT 0x00000006
+#define VCE_RB_RPTR2__RB_RPTR_MASK 0x007ffff0L
+#define VCE_RB_RPTR2__RB_RPTR__SHIFT 0x00000004
+#define VCE_RB_RPTR__RB_RPTR_MASK 0x007ffff0L
+#define VCE_RB_RPTR__RB_RPTR__SHIFT 0x00000004
+#define VCE_RB_SIZE2__RB_SIZE_MASK 0x007ffff0L
+#define VCE_RB_SIZE2__RB_SIZE__SHIFT 0x00000004
+#define VCE_RB_SIZE__RB_SIZE_MASK 0x007ffff0L
+#define VCE_RB_SIZE__RB_SIZE__SHIFT 0x00000004
+#define VCE_RB_WPTR2__RB_WPTR_MASK 0x007ffff0L
+#define VCE_RB_WPTR2__RB_WPTR__SHIFT 0x00000004
+#define VCE_RB_WPTR__RB_WPTR_MASK 0x007ffff0L
+#define VCE_RB_WPTR__RB_WPTR__SHIFT 0x00000004
+#define VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK 0x00000001L
+#define VCE_SOFT_RESET__ECPU_SOFT_RESET__SHIFT 0x00000000
+#define VCE_STATUS__JOB_BUSY_MASK 0x00000001L
+#define VCE_STATUS__JOB_BUSY__SHIFT 0x00000000
+#define VCE_STATUS__UENC_BUSY_MASK 0x00000100L
+#define VCE_STATUS__UENC_BUSY__SHIFT 0x00000008
+#define VCE_STATUS__VCPU_REPORT_MASK 0x000000feL
+#define VCE_STATUS__VCPU_REPORT__SHIFT 0x00000001
+#define VCE_SYS_INT_ACK__VCE_SYS_INT_TRAP_INTERRUPT_ACK_MASK 0x00000008L
+#define VCE_SYS_INT_ACK__VCE_SYS_INT_TRAP_INTERRUPT_ACK__SHIFT 0x00000003
+#define VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK 0x00000008L
+#define VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN__SHIFT 0x00000003
+#define VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK 0x00000008L
+#define VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT__SHIFT 0x00000003
+#define VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK 0x00000002L
+#define VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON__SHIFT 0x00000001
+#define VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK 0x00000004L
+#define VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON__SHIFT 0x00000002
+#define VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK 0x00000001L
+#define VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_OFFSET0__OFFSET_MASK 0x0fffffffL
+#define VCE_VCPU_CACHE_OFFSET0__OFFSET__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_OFFSET1__OFFSET_MASK 0x0fffffffL
+#define VCE_VCPU_CACHE_OFFSET1__OFFSET__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_OFFSET2__OFFSET_MASK 0x0fffffffL
+#define VCE_VCPU_CACHE_OFFSET2__OFFSET__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_SIZE0__SIZE_MASK 0x00ffffffL
+#define VCE_VCPU_CACHE_SIZE0__SIZE__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_SIZE1__SIZE_MASK 0x00ffffffL
+#define VCE_VCPU_CACHE_SIZE1__SIZE__SHIFT 0x00000000
+#define VCE_VCPU_CACHE_SIZE2__SIZE_MASK 0x00ffffffL
+#define VCE_VCPU_CACHE_SIZE2__SIZE__SHIFT 0x00000000
+#define VCE_VCPU_CNTL__CLK_EN_MASK 0x00000001L
+#define VCE_VCPU_CNTL__CLK_EN__SHIFT 0x00000000
+#define VCE_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00040000L
+#define VCE_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000012
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index df7c18b6a02a..e4a1697ec1d3 100755..100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -106,6 +106,7 @@ enum cgs_ucode_id {
CGS_UCODE_ID_CP_MEC_JT2,
CGS_UCODE_ID_GMCON_RENG,
CGS_UCODE_ID_RLC_G,
+ CGS_UCODE_ID_STORAGE,
CGS_UCODE_ID_MAXIMUM,
};
@@ -619,6 +620,8 @@ typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info);
+typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_gpu_mem_info_t gpu_mem_info;
@@ -670,6 +673,7 @@ struct cgs_ops {
cgs_call_acpi_method call_acpi_method;
/* get system info */
cgs_query_system_info query_system_info;
+ cgs_is_virtualization_enabled_t is_virtualization_enabled;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -773,4 +777,6 @@ struct cgs_device
CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
resource_base)
+#define cgs_is_virtualization_enabled(cgs_device) \
+ CGS_CALL(is_virtualization_enabled, cgs_device)
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7174f7a68266..c81cf1412728 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -41,7 +41,7 @@
#define PP_CHECK_HW(hwmgr) \
do { \
if ((hwmgr) == NULL || (hwmgr)->hwmgr_func == NULL) \
- return -EINVAL; \
+ return 0; \
} while (0)
static int pp_early_init(void *handle)
@@ -115,6 +115,7 @@ static int pp_hw_init(void *handle)
struct pp_instance *pp_handle;
struct pp_smumgr *smumgr;
struct pp_eventmgr *eventmgr;
+ struct pp_hwmgr *hwmgr;
int ret = 0;
if (handle == NULL)
@@ -122,6 +123,7 @@ static int pp_hw_init(void *handle)
pp_handle = (struct pp_instance *)handle;
smumgr = pp_handle->smu_mgr;
+ hwmgr = pp_handle->hwmgr;
if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
smumgr->smumgr_funcs->smu_init == NULL ||
@@ -141,9 +143,11 @@ static int pp_hw_init(void *handle)
return ret;
}
- hw_init_power_state_table(pp_handle->hwmgr);
- eventmgr = pp_handle->eventmgr;
+ PP_CHECK_HW(hwmgr);
+
+ hw_init_power_state_table(hwmgr);
+ eventmgr = pp_handle->eventmgr;
if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
return -EINVAL;
@@ -243,7 +247,9 @@ static int pp_suspend(void *handle)
pp_handle = (struct pp_instance *)handle;
eventmgr = pp_handle->eventmgr;
- pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
+
+ if (eventmgr != NULL)
+ pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
return 0;
}
@@ -273,7 +279,8 @@ static int pp_resume(void *handle)
}
eventmgr = pp_handle->eventmgr;
- pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
+ if (eventmgr != NULL)
+ pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
return 0;
}
@@ -340,8 +347,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
return (((struct pp_instance *)handle)->hwmgr->dpm_level);
}
@@ -436,7 +442,8 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
}
}
-int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output)
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+ void *input, void *output)
{
int ret = 0;
struct pp_instance *pp_handle;
@@ -447,6 +454,9 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
if (pp_handle == NULL)
return -EINVAL;
+ if (pp_handle->eventmgr == NULL)
+ return 0;
+
switch (event_id) {
case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
@@ -475,7 +485,7 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
return ret;
}
-enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
struct pp_hwmgr *hwmgr;
struct pp_power_state *state;
@@ -581,6 +591,23 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
}
+static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (handle == NULL)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
+}
+
static int pp_dpm_get_temperature(void *handle)
{
struct pp_hwmgr *hwmgr;
@@ -820,6 +847,21 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
}
+static struct amd_vce_state*
+pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (handle) {
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr && idx < hwmgr->num_vce_state_tables)
+ return &hwmgr->vce_states[idx];
+ }
+
+ return NULL;
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -836,6 +878,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
+ .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
.get_pp_num_states = pp_dpm_get_pp_num_states,
.get_pp_table = pp_dpm_get_pp_table,
.set_pp_table = pp_dpm_set_pp_table,
@@ -846,6 +889,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
.read_sensor = pp_dpm_read_sensor,
+ .get_vce_clock_state = pp_dpm_get_vce_clock_state,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
@@ -864,6 +908,13 @@ static int amd_pp_instance_init(struct amd_pp_init *pp_init,
if (ret)
goto fail_smum;
+
+ amd_pp->pp_handle = handle;
+
+ if ((amdgpu_dpm == 0)
+ || cgs_is_virtualization_enabled(pp_init->device))
+ return 0;
+
ret = hwmgr_init(pp_init, handle);
if (ret)
goto fail_hwmgr;
@@ -872,7 +923,6 @@ static int amd_pp_instance_init(struct amd_pp_init *pp_init,
if (ret)
goto fail_eventmgr;
- amd_pp->pp_handle = handle;
return 0;
fail_eventmgr:
@@ -891,12 +941,13 @@ static int amd_pp_instance_fini(void *handle)
if (instance == NULL)
return -EINVAL;
- eventmgr_fini(instance->eventmgr);
-
- hwmgr_fini(instance->hwmgr);
+ if ((amdgpu_dpm != 0)
+ && !cgs_is_virtualization_enabled(instance->smu_mgr->device)) {
+ eventmgr_fini(instance->eventmgr);
+ hwmgr_fini(instance->hwmgr);
+ }
smum_fini(instance->smu_mgr);
-
kfree(handle);
return 0;
}
@@ -953,6 +1004,10 @@ int amd_powerplay_reset(void *handle)
if (ret)
return ret;
+ if ((amdgpu_dpm == 0)
+ || cgs_is_virtualization_enabled(instance->smu_mgr->device))
+ return 0;
+
hw_init_power_state_table(instance->hwmgr);
if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
@@ -976,6 +1031,8 @@ int amd_powerplay_display_configuration_change(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
phm_store_dal_configuration_data(hwmgr, display_config);
return 0;
@@ -993,6 +1050,8 @@ int amd_powerplay_get_display_power_level(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
return phm_get_dal_power_level(hwmgr, output);
}
@@ -1010,6 +1069,8 @@ int amd_powerplay_get_current_clocks(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
phm_get_dal_power_level(hwmgr, &simple_clocks);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) {
@@ -1054,6 +1115,8 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, s
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
result = phm_get_clock_by_type(hwmgr, type, clocks);
return result;
@@ -1072,6 +1135,8 @@ int amd_powerplay_get_display_mode_validation_clocks(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ PP_CHECK_HW(hwmgr);
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
result = phm_get_max_high_clocks(hwmgr, clocks);
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
index b6f45fd01fa6..ec36c0e28388 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
@@ -154,7 +154,7 @@ int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_
int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
- /* TODO */
+ phm_disable_clock_power_gatings(eventmgr->hwmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index 2028980f1ed4..b0c63c5f54c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -169,7 +169,7 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
@@ -182,7 +182,7 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
+ AMD_PG_STATE_UNGATE);
cz_dpm_update_uvd_dpm(hwmgr, false);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 960424913496..4b14f259a147 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -66,7 +66,7 @@ static const struct cz_power_state *cast_const_PhwCzPowerState(
return (struct cz_power_state *)hw_ps;
}
-uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -1017,7 +1017,7 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
+static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
void *output, void *storage, int result)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1225,7 +1225,7 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1239,7 +1239,7 @@ int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -1277,7 +1277,7 @@ int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1533,7 +1533,7 @@ static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
return result;
}
-int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
{
return sizeof(struct cz_power_state);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index a6abe81bc843..71822ae73a12 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -35,7 +35,7 @@ static int phm_run_table(struct pp_hwmgr *hwmgr,
phm_table_function *function;
if (rt_table->function_list == NULL) {
- printk(KERN_INFO "[ powerplay ] this function not implement!\n");
+ pr_debug("[ powerplay ] this function not implement!\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 14f8c1f4da3d..c355a0f51663 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -209,6 +209,19 @@ int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr)
return 0;
}
+int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface)) {
+ if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
+ return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
+ }
+ return 0;
+}
+
+
int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
@@ -272,7 +285,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw
PHM_FUNC_CHECK(hwmgr);
if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
- return -EINVAL;
+ return false;
return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 1167205057b3..dc6700aee18f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -80,20 +80,17 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
topaz_set_asic_special_caps(hwmgr);
- hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
- PP_VBI_TIME_SUPPORT_MASK |
+ hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
hwmgr->pp_table_version = PP_TABLE_V0;
break;
case CHIP_TONGA:
tonga_set_asic_special_caps(hwmgr);
- hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
- PP_VBI_TIME_SUPPORT_MASK);
+ hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
break;
case CHIP_FIJI:
fiji_set_asic_special_caps(hwmgr);
- hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
- PP_VBI_TIME_SUPPORT_MASK |
+ hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
break;
case CHIP_POLARIS11:
@@ -685,20 +682,24 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
{
- if (amdgpu_sclk_deep_sleep_en)
+ if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
else
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
- if (amdgpu_powercontainment)
+ if (amdgpu_pp_feature_mask & PP_POWER_CONTAINMENT_MASK) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
- else
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ } else {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
-
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ }
hwmgr->feature_mask = amdgpu_pp_feature_mask;
return 0;
@@ -710,13 +711,15 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t vol;
int ret = 0;
- if (hwmgr->chip_id < CHIP_POLARIS10) {
- atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+ if (hwmgr->chip_id < CHIP_TONGA) {
+ ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
+ } else if (hwmgr->chip_id < CHIP_POLARIS10) {
+ ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
if (*voltage >= 2000 || *voltage == 0)
*voltage = 1150;
} else {
ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
- *voltage = (uint16_t)vol/100;
+ *voltage = (uint16_t)(vol/100);
}
return ret;
}
@@ -734,9 +737,6 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TCPRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -765,8 +765,6 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
return 0;
}
@@ -789,9 +787,6 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
-
return 0;
}
@@ -808,8 +803,6 @@ int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EVV);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 1944d289f846..f5e8fda964f7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -25,6 +25,7 @@
#include "linux/delay.h"
#include "hwmgr.h"
#include "amd_acpi.h"
+#include "pp_acpi.h"
bool acpi_atcs_functions_supported(void *device, uint32_t index)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 1126bd4f74dc..0894527d932f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1320,7 +1320,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
if (0 != result)
return result;
- *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
+ *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)
+ (&get_voltage_info_param_space))->ulVoltageLevel);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 7de701d8a450..c45bd2560468 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -131,7 +131,7 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
/**
* Private Function to get the PowerPlay Table Address.
*/
-const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
{
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
@@ -1049,7 +1049,7 @@ static int check_powerplay_tables(
return 0;
}
-int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
{
int result = 0;
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1100,7 +1100,7 @@ int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
return result;
}
-int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1201,17 +1201,20 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
{
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
- const ATOM_Tonga_VCE_State_Table *vce_state_table =
- (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
+ const ATOM_Tonga_VCE_State_Table *vce_state_table;
- if (vce_state_table == NULL)
+
+ if (pp_table == NULL)
return 0;
+ vce_state_table = (void *)pp_table +
+ le16_to_cpu(pp_table->usVCEStateTableOffset);
+
return vce_state_table->ucNumEntries;
}
static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
- struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+ struct amd_vce_state *vce_state, void **clock_info, uint32_t *flag)
{
const ATOM_Tonga_VCE_State_Record *vce_state_record;
ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
@@ -1315,7 +1318,7 @@ int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
- if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+ if ((i != 0) && (i <= AMD_MAX_VCE_LEVELS)) {
for (j = 0; j < i; j++)
ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index ccf7ebeaf892..a4e9cf429e62 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1507,7 +1507,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr,
return 0;
}
-int get_number_of_vce_state_table_entries(
+static int get_number_of_vce_state_table_entries(
struct pp_hwmgr *hwmgr)
{
const ATOM_PPLIB_POWERPLAYTABLE *table =
@@ -1521,9 +1521,9 @@ int get_number_of_vce_state_table_entries(
return 0;
}
-int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
+static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6eb6db199250..a1fc4fcac1e0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -75,7 +75,7 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -91,7 +91,7 @@ int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -99,7 +99,7 @@ int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -107,7 +107,7 @@ int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -116,7 +116,7 @@ int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -150,14 +150,20 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
smu7_update_uvd_dpm(hwmgr, true);
smu7_powerdown_uvd(hwmgr);
} else {
smu7_powerup_uvd(hwmgr);
- smu7_update_uvd_dpm(hwmgr, false);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
+ smu7_update_uvd_dpm(hwmgr, false);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 609996c84ad5..a74f60a575ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -89,7 +89,7 @@ enum DPM_EVENT_SRC {
static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
-struct smu7_power_state *cast_phw_smu7_power_state(
+static struct smu7_power_state *cast_phw_smu7_power_state(
struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -99,7 +99,7 @@ struct smu7_power_state *cast_phw_smu7_power_state(
return (struct smu7_power_state *)hw_ps;
}
-const struct smu7_power_state *cast_const_phw_smu7_power_state(
+static const struct smu7_power_state *cast_const_phw_smu7_power_state(
const struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -115,7 +115,7 @@ const struct smu7_power_state *cast_const_phw_smu7_power_state(
* @param hwmgr the address of the powerplay hardware manager.
* @return always 0
*/
-int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
{
cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
@@ -124,7 +124,7 @@ int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
return 0;
}
-uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
{
uint32_t speedCntl = 0;
@@ -135,7 +135,7 @@ uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
}
-int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
{
uint32_t link_width;
@@ -155,7 +155,7 @@ int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
* @param pHwMgr the address of the powerplay hardware manager.
* @return always PP_Result_OK
*/
-int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
@@ -802,7 +802,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -993,13 +993,6 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
SWRST_COMMAND_1, RESETLC, 0x0);
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -EINVAL);
-
-
if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
return -EINVAL;
@@ -1153,7 +1146,7 @@ static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
-int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->pcie_performance_request = true;
@@ -1161,15 +1154,15 @@ int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result = 0;
int result = 0;
tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is already running right now, no need to enable DPM!",
- return 0);
+ "DPM is already running",
+ );
if (smu7_voltage_control(hwmgr)) {
tmp_result = smu7_enable_voltage_control(hwmgr);
@@ -1352,6 +1345,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct cgs_system_info sys_info = {0};
+ int result;
data->dll_default_on = false;
data->mclk_dpm0_activity_target = 0xa;
@@ -1426,7 +1421,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDCI);
- if ((hwmgr->pp_table_version != PP_TABLE_V0)
+ if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
&& (table_info->cac_dtp_table->usClockStretchAmount != 0))
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
@@ -1439,6 +1434,18 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->pcie_lane_performance.min = 16;
data->pcie_lane_power_saving.max = 0;
data->pcie_lane_power_saving.min = 16;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (!result) {
+ if (sys_info.value & AMD_PG_SUPPORT_UVD)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ if (sys_info.value & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+ }
}
/**
@@ -1460,19 +1467,17 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
- if (table_info == NULL)
- return -EINVAL;
-
- sclk_table = table_info->vdd_dep_on_sclk;
-
for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
- if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
+ if ((hwmgr->pp_table_version == PP_TABLE_V1)
+ && !phm_get_sclk_for_voltage_evv(hwmgr,
table_info->vddgfx_lookup_table, vv_id, &sclk)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher)) {
+ sclk_table = table_info->vdd_dep_on_sclk;
+
for (j = 1; j < sclk_table->count; j++) {
if (sclk_table->entries[j].clk == sclk &&
sclk_table->entries[j].cks_enable == 0) {
@@ -1498,12 +1503,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
}
}
} else {
-
if ((hwmgr->pp_table_version == PP_TABLE_V0)
|| !phm_get_sclk_for_voltage_evv(hwmgr,
table_info->vddc_lookup_table, vv_id, &sclk)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher)) {
+ if (table_info == NULL)
+ return -EINVAL;
+ sclk_table = table_info->vdd_dep_on_sclk;
+
for (j = 1; j < sclk_table->count; j++) {
if (sclk_table->entries[j].clk == sclk &&
sclk_table->entries[j].cks_enable == 0) {
@@ -1864,7 +1872,7 @@ static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1993,8 +2001,9 @@ static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
table_info->cac_dtp_table->usTargetOperatingTemp;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport);
+ if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ODFuzzyFanControlSupport);
}
return 0;
@@ -2127,15 +2136,20 @@ static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
}
static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
- struct phm_clock_and_voltage_limits *tab)
+ struct phm_clock_and_voltage_limits *tab)
{
+ uint32_t vddc, vddci;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (tab) {
- smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
- &data->vddc_leakage);
- smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
- &data->vddci_leakage);
+ vddc = tab->vddc;
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
+ &data->vddc_leakage);
+ tab->vddc = vddc;
+ vddci = tab->vddci;
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
+ &data->vddci_leakage);
+ tab->vddci = vddci;
}
return 0;
@@ -2253,7 +2267,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data;
int result;
@@ -2978,19 +2992,19 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
data->highest_mclk = memory_clock;
- performance_level = &(ps->performance_levels
- [ps->performance_level_count++]);
-
PP_ASSERT_WITH_CODE(
(ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
"Performance levels exceeds SMC limit!",
return -EINVAL);
PP_ASSERT_WITH_CODE(
- (ps->performance_level_count <=
+ (ps->performance_level_count <
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -EINVAL);
+ "Performance levels exceeds Driver limit, Skip!",
+ return 0);
+
+ performance_level = &(ps->performance_levels
+ [ps->performance_level_count++]);
/* Performance levels are arranged from low to high. */
performance_level->memory_clock = memory_clock;
@@ -3672,14 +3686,16 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
-int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+static int
+smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
}
-int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+static int
+smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
{
uint32_t num_active_displays = 0;
struct cgs_display_info info = {0};
@@ -3701,7 +3717,7 @@ int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
* @param hwmgr the address of the powerplay hardware manager.
* @return always OK
*/
-int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t num_active_displays = 0;
@@ -3751,7 +3767,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
return smu7_program_display_gap(hwmgr);
}
@@ -3775,13 +3791,14 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
}
-int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
const void *thermal_interrupt_info)
{
return 0;
}
-bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool
+smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
bool is_update_required = false;
@@ -3810,7 +3827,9 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
(pl1->pcie_lane == pl2->pcie_lane));
}
-int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
+ const struct pp_hw_power_state *pstate1,
+ const struct pp_hw_power_state *pstate2, bool *equal)
{
const struct smu7_power_state *psa;
const struct smu7_power_state *psb;
@@ -3843,7 +3862,7 @@ int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta
return 0;
}
-int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -3972,7 +3991,7 @@ static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -4225,18 +4244,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
+ struct phm_clock_voltage_dependency_table *sclk_table;
int i;
- if (table_info == NULL)
- return -EINVAL;
-
- dep_sclk_table = table_info->vdd_dep_on_sclk;
-
- for (i = 0; i < dep_sclk_table->count; i++) {
- clocks->clock[i] = dep_sclk_table->entries[i].clk;
- clocks->count++;
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
+ return -EINVAL;
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < dep_sclk_table->count; i++) {
+ clocks->clock[i] = dep_sclk_table->entries[i].clk;
+ clocks->count++;
+ }
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+ for (i = 0; i < sclk_table->count; i++) {
+ clocks->clock[i] = sclk_table->entries[i].clk;
+ clocks->count++;
+ }
}
+
return 0;
}
@@ -4258,17 +4285,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
(struct phm_ppt_v1_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
int i;
+ struct phm_clock_voltage_dependency_table *mclk_table;
- if (table_info == NULL)
- return -EINVAL;
-
- dep_mclk_table = table_info->vdd_dep_on_mclk;
-
- for (i = 0; i < dep_mclk_table->count; i++) {
- clocks->clock[i] = dep_mclk_table->entries[i].clk;
- clocks->latency[i] = smu7_get_mem_latency(hwmgr,
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ if (table_info == NULL)
+ return -EINVAL;
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+ for (i = 0; i < dep_mclk_table->count; i++) {
+ clocks->clock[i] = dep_mclk_table->entries[i].clk;
+ clocks->latency[i] = smu7_get_mem_latency(hwmgr,
dep_mclk_table->entries[i].clk);
- clocks->count++;
+ clocks->count++;
+ }
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+ for (i = 0; i < mclk_table->count; i++) {
+ clocks->clock[i] = mclk_table->entries[i].clk;
+ clocks->count++;
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 41b634ffa5b0..26477f0f09dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -603,9 +603,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
return 0;
}
-static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
+ uint32_t target_tdp)
{
- return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index fb6c6f6106d5..29d0319b22e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ return -ENODEV;
fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true;
@@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ return -ENODEV;
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
@@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0))
- return 0;
+ return -ENODEV;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 3fb5e57a378b..3a883e6c601a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,6 +29,8 @@
#include "amd_shared.h"
#include "cgs_common.h"
+extern int amdgpu_dpm;
+
enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
AMDGPU_PP_SENSOR_VDDNB,
@@ -349,6 +351,7 @@ struct amd_powerplay_funcs {
int (*get_fan_control_mode)(void *handle);
int (*set_fan_speed_percent)(void *handle, uint32_t percent);
int (*get_fan_speed_percent)(void *handle, uint32_t *speed);
+ int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
int (*get_pp_table)(void *handle, char **table);
int (*set_pp_table)(void *handle, const char *buf, size_t size);
@@ -359,6 +362,7 @@ struct amd_powerplay_funcs {
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
int (*read_sensor)(void *handle, int idx, int32_t *value);
+ struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
};
struct amd_powerplay {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index d4495839c64c..26129972f686 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -334,6 +334,7 @@ struct phm_clocks {
uint32_t clock[MAX_NUM_CLOCKS];
};
+extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 4f0fedd1e9d3..6cdb7cbf515e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -38,8 +38,6 @@ struct pp_hwmgr;
struct phm_fan_speed_info;
struct pp_atomctrl_voltage_table;
-extern int amdgpu_powercontainment;
-extern int amdgpu_sclk_deep_sleep_en;
extern unsigned amdgpu_pp_feature_mask;
#define VOLTAGE_SCALE 4
@@ -85,7 +83,9 @@ enum PP_FEATURE_MASK {
PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
PP_VBI_TIME_SUPPORT_MASK = 0x80,
PP_ULV_MASK = 0x100,
- PP_ENABLE_GFX_CG_THRU_SMU = 0x200
+ PP_ENABLE_GFX_CG_THRU_SMU = 0x200,
+ PP_CLOCK_STRETCH_MASK = 0x400,
+ PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800
};
enum PHM_BackEnd_Magic {
@@ -367,7 +367,7 @@ struct pp_table_func {
int (*pptable_get_vce_state_table_entry)(
struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag);
};
@@ -586,18 +586,6 @@ struct phm_microcode_version_info {
uint32_t NB;
};
-#define PP_MAX_VCE_LEVELS 6
-
-enum PP_VCE_LEVEL {
- PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-
enum PP_TABLE_VERSION {
PP_TABLE_V0 = 0,
PP_TABLE_V1,
@@ -620,7 +608,7 @@ struct pp_hwmgr {
void *hardcode_pp_table;
bool need_pp_table_upload;
- struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
uint32_t num_vce_state_tables;
enum amd_dpm_forced_level dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index 9ceaed9ac52a..827860fffe78 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -156,15 +156,6 @@ struct pp_power_state {
struct pp_hw_power_state hardware;
};
-
-/*Structure to hold a VCE state entry*/
-struct pp_vce_state {
- uint32_t evclk;
- uint32_t ecclk;
- uint32_t sclk;
- uint32_t mclk;
-};
-
enum PP_MMProfilingState {
PP_MMProfilingState_NA = 0,
PP_MMProfilingState_Started,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
index 3df5de2cdab0..8fe8ba9434ff 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -21,9 +21,6 @@
*
*/
-extern bool acpi_atcs_functions_supported(void *device,
- uint32_t index);
-extern int acpi_pcie_perf_request(void *device,
- uint8_t perf_req,
- bool advertise);
-extern bool acpi_atcs_notify_pcie_device_ready(void *device);
+bool acpi_atcs_functions_supported(void *device, uint32_t index);
+int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
+bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 76310ac7ef0d..6aeb1d20cc3b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -1958,6 +1958,12 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
int res;
uint64_t tmp64;
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
if (smu_data->smu7_data.fan_table_start == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
@@ -2049,7 +2055,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
-int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2125,7 +2131,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2150,7 +2156,7 @@ uint32_t fiji_get_mac_definition(uint32_t value)
return SMU73_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 02fe1df855a9..26eff56b4a99 100755..100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -159,7 +159,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
+static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
{
int i, result = -1;
uint32_t reg, data;
@@ -224,7 +224,7 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
+static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
{
int result = 0;
uint32_t table_start;
@@ -260,7 +260,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
int32_t vr_config;
uint32_t table_start;
@@ -299,7 +299,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_restore_vft_table(struct pp_smumgr *smumgr)
+static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -311,7 +311,7 @@ int fiji_restore_vft_table(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_save_vft_table(struct pp_smumgr *smumgr)
+static int fiji_save_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -322,7 +322,7 @@ int fiji_save_vft_table(struct pp_smumgr *smumgr)
return -EINVAL;
}
-int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -396,7 +396,8 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!(smu7_is_smc_ram_running(smumgr)
+ || cgs_is_virtualization_enabled(smumgr->device))) {
fiji_avfs_event_mgr(smumgr, false);
/* Check if SMU is running in protected mode */
@@ -443,6 +444,9 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
uint32_t efuse = 0;
uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ return 0;
+
if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
mask, &efuse)) {
if (efuse)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index 8c889caba420..a24971a33bfd 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2006,6 +2006,12 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
return 0;
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
if (0 == smu7_data->fan_table_start) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
return 0;
@@ -2140,7 +2146,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2163,7 +2169,7 @@ uint32_t iceland_get_mac_definition(uint32_t value)
return SMU71_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 4ccc0b72324d..5190e821200c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -1885,6 +1885,12 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
int res;
uint64_t tmp64;
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
if (smu_data->smu7_data.fan_table_start == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
@@ -2174,7 +2180,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2201,7 +2207,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
@@ -2214,6 +2220,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t tmp;
int result;
bool error = false;
@@ -2233,8 +2240,10 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
offsetof(SMU74_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
- if (!result)
+ if (!result) {
+ data->soft_regs_start = tmp;
smu_data->smu7_data.soft_regs_start = tmp;
+ }
error |= (0 != result);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5c3598ab7dae..f38a68747df0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -118,7 +118,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
}
-int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
uint32_t vr_config;
uint32_t dpm_table_start;
@@ -172,7 +172,8 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+static int
+polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 6af744f42ec9..f49b5487b951 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -278,6 +278,9 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G;
break;
+ case UCODE_ID_MEC_STORAGE:
+ result = CGS_UCODE_ID_STORAGE;
+ break;
default:
break;
}
@@ -363,12 +366,16 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
&info);
if (!result) {
- entry->version = info.version;
+ entry->version = info.fw_version;
entry->id = (uint16_t)fw_type;
entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
entry->meta_data_addr_high = 0;
entry->meta_data_addr_low = 0;
+
+ /* digest need be excluded out */
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ info.image_size -= 20;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
}
@@ -400,8 +407,14 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
0x0);
if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
+ if (!cgs_is_virtualization_enabled(smumgr->device)) {
+ smu7_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_SMU_DRAM_ADDR_HI,
+ smu_data->smu_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_SMU_DRAM_ADDR_LO,
+ smu_data->smu_buffer.mc_addr_low);
+ }
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
+ UCODE_ID_SDMA1_MASK
@@ -452,6 +465,10 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
@@ -532,7 +549,6 @@ int smu7_init(struct pp_smumgr *smumgr)
smu_data = (struct smu7_smumgr *)(smumgr->backend);
smu_data->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- smu_data->smu_buffer.data_size = 200*4096;
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
@@ -555,6 +571,10 @@ int smu7_init(struct pp_smumgr *smumgr)
(cgs_handle_t)smu_data->header_buffer.handle);
return -EINVAL);
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ return 0;
+
+ smu_data->smu_buffer.data_size = 200*4096;
smu_allocate_memory(smumgr->device,
smu_data->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 76352f2423ae..919be435b49c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -28,8 +28,6 @@
#include <pp_endian.h>
#define SMC_RAM_END 0x40000
-#define mmSMC_IND_INDEX_11 0x01AC
-#define mmSMC_IND_DATA_11 0x01AD
struct smu7_buffer_entry {
uint32_t data_size;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
index de2a24d85f48..2e1493ce1bb5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -2496,6 +2496,12 @@ int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_MicrocodeFanControl))
return 0;
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
if (0 == smu_data->smu7_data.fan_table_start) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
@@ -2651,7 +2657,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x\n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2675,7 +2681,7 @@ uint32_t tonga_get_mac_definition(uint32_t value)
case SMU_MAX_LEVELS_MVDD:
return SMU72_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac value %x\n", value);
+ printk(KERN_WARNING "can't get the mac value %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 5f9124046b9b..eff9a232e72e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -140,7 +140,8 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
int result;
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!(smu7_is_smc_ram_running(smumgr) ||
+ cgs_is_virtualization_enabled(smumgr->device))) {
/*Check if SMU is running in protected mode*/
if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index b961a1c6caf3..dbd4fd3a810b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
__field(struct amd_sched_job *, sched_job)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(const char *, name)
__field(u32, job_count)
__field(int, hw_job_count)
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,
TP_PROTO(struct amd_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
),
TP_fast_assign(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 963a24d46a93..1bf83ed113b3 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,10 +32,7 @@
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
-
-struct kmem_cache *sched_fence_slab;
-atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
/* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq)
@@ -141,7 +138,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(2);
+ entity->fence_context = dma_fence_context_alloc(2);
return 0;
}
@@ -221,32 +218,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
kfifo_free(&entity->job_queue);
}
-static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
amd_sched_wakeup(entity->sched);
}
-static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
}
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
- struct fence * fence = entity->dependency;
+ struct dma_fence * fence = entity->dependency;
struct amd_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -257,23 +254,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
* Fence is from the same scheduler, only need to wait for
* it to be scheduled
*/
- fence = fence_get(&s_fence->scheduled);
- fence_put(entity->dependency);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(entity->dependency);
entity->dependency = fence;
- if (!fence_add_callback(fence, &entity->cb,
- amd_sched_entity_clear_dep))
+ if (!dma_fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
- if (!fence_add_callback(entity->dependency, &entity->cb,
- amd_sched_entity_wakeup))
+ if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+ amd_sched_entity_wakeup))
return true;
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -354,7 +351,8 @@ static void amd_sched_job_finish(struct work_struct *work)
sched->ops->free_job(s_job);
}
-static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+static void amd_sched_job_finish_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
finish_cb);
@@ -388,8 +386,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
- if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
- fence_put(s_job->s_fence->parent);
+ if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
}
}
@@ -410,21 +408,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct amd_sched_fence *s_fence = s_job->s_fence;
- struct fence *fence;
+ struct dma_fence *fence;
spin_unlock(&sched->job_list_lock);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
@@ -446,8 +444,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
- fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
- amd_sched_job_finish_cb);
+ dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
@@ -511,7 +509,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
return entity;
}
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
@@ -521,7 +519,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->finished);
+ dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
@@ -547,7 +545,7 @@ static int amd_sched_main(void *param)
struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
- struct fence *fence;
+ struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
(!amd_sched_blocked(sched) &&
@@ -569,15 +567,15 @@ static int amd_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
@@ -618,13 +616,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
- if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
- sched_fence_slab = kmem_cache_create(
- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!sched_fence_slab)
- return -ENOMEM;
- }
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -645,6 +636,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
- if (atomic_dec_and_test(&sched_fence_slab_ref))
- kmem_cache_destroy(sched_fence_slab);
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb502ef..d8dc681bcda6 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -25,14 +25,11 @@
#define _GPU_SCHEDULER_H_
#include <linux/kfifo.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
struct amd_gpu_scheduler;
struct amd_sched_rq;
-extern struct kmem_cache *sched_fence_slab;
-extern atomic_t sched_fence_slab_ref;
-
/**
* A scheduler entity is a wrapper around a job queue or a group
* of other entities. Entities take turns emitting jobs from their
@@ -50,8 +47,8 @@ struct amd_sched_entity {
atomic_t fence_seq;
uint64_t fence_context;
- struct fence *dependency;
- struct fence_cb cb;
+ struct dma_fence *dependency;
+ struct dma_fence_cb cb;
};
/**
@@ -66,10 +63,10 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
- struct fence scheduled;
- struct fence finished;
- struct fence_cb cb;
- struct fence *parent;
+ struct dma_fence scheduled;
+ struct dma_fence finished;
+ struct dma_fence_cb cb;
+ struct dma_fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
@@ -79,15 +76,15 @@ struct amd_sched_job {
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- struct fence_cb finish_cb;
+ struct dma_fence_cb finish_cb;
struct work_struct finish_work;
struct list_head node;
struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops_scheduled;
-extern const struct fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
+extern const struct dma_fence_ops amd_sched_fence_ops_finished;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
{
if (f->ops == &amd_sched_fence_ops_scheduled)
return container_of(f, struct amd_sched_fence, scheduled);
@@ -103,8 +100,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
- struct fence *(*dependency)(struct amd_sched_job *sched_job);
- struct fence *(*run_job)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
void (*timedout_job)(struct amd_sched_job *sched_job);
void (*free_job)(struct amd_sched_job *sched_job);
};
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+int amd_sched_fence_slab_init(void);
+void amd_sched_fence_slab_fini(void);
+
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 6b63beaf7574..33f54d0a5c4f 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,6 +27,25 @@
#include <drm/drmP.h>
#include "gpu_scheduler.h"
+static struct kmem_cache *sched_fence_slab;
+
+int amd_sched_fence_slab_init(void)
+{
+ sched_fence_slab = kmem_cache_create(
+ "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!sched_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amd_sched_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(sched_fence_slab);
+}
+
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
void *owner)
{
@@ -42,46 +61,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
- fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
- &fence->lock, entity->fence_context, seq);
- fence_init(&fence->finished, &amd_sched_fence_ops_finished,
- &fence->lock, entity->fence_context + 1, seq);
+ dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->scheduled);
+ int ret = dma_fence_signal(&fence->scheduled);
if (!ret)
- FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->scheduled, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "was already signaled\n");
}
void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->finished);
+ int ret = dma_fence_signal(&fence->finished);
if (!ret)
- FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->finished, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "was already signaled\n");
}
-static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
{
return "amd_sched";
}
-static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
return (const char *)fence->sched->name;
}
-static bool amd_sched_fence_enable_signaling(struct fence *f)
+static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
{
return true;
}
@@ -95,22 +118,22 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
*/
static void amd_sched_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(fence->parent);
+ dma_fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
/**
- * amd_sched_fence_release - callback that fence can be freed
+ * amd_sched_fence_release_scheduled - callback that fence can be freed
*
* @fence: fence
*
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release_scheduled(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
@@ -118,33 +141,33 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
}
/**
- * amd_sched_fence_release_scheduled - drop extra reference
+ * amd_sched_fence_release_finished - drop extra reference
*
* @f: fence
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
-static void amd_sched_fence_release_finished(struct fence *f)
+static void amd_sched_fence_release_finished(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(&fence->scheduled);
+ dma_fence_put(&fence->scheduled);
}
-const struct fence_ops amd_sched_fence_ops_scheduled = {
+const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_scheduled,
};
-const struct fence_ops amd_sched_fence_ops_finished = {
+const struct dma_fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_finished,
};
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 28e6471257d0..0b6eaa49a1db 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -65,9 +65,7 @@ static const struct file_operations arcpgu_drm_ops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index b7a8b2ac4055..b69c66b4897e 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -14,170 +14,45 @@
*
*/
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_encoder_slave.h>
-#include <drm/drm_atomic_helper.h>
#include "arcpgu.h"
-struct arcpgu_drm_connector {
- struct drm_connector connector;
- struct drm_encoder_slave *encoder_slave;
-};
-
-static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
-{
- const struct drm_encoder_slave_funcs *sfuncs;
- struct drm_encoder_slave *slave;
- struct arcpgu_drm_connector *con =
- container_of(connector, struct arcpgu_drm_connector, connector);
-
- slave = con->encoder_slave;
- if (slave == NULL) {
- dev_err(connector->dev->dev,
- "connector_get_modes: cannot find slave encoder for connector\n");
- return 0;
- }
-
- sfuncs = slave->slave_funcs;
- if (sfuncs->get_modes == NULL)
- return 0;
-
- return sfuncs->get_modes(&slave->base, connector);
-}
-
-static enum drm_connector_status
-arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- enum drm_connector_status status = connector_status_unknown;
- const struct drm_encoder_slave_funcs *sfuncs;
- struct drm_encoder_slave *slave;
-
- struct arcpgu_drm_connector *con =
- container_of(connector, struct arcpgu_drm_connector, connector);
-
- slave = con->encoder_slave;
- if (slave == NULL) {
- dev_err(connector->dev->dev,
- "connector_detect: cannot find slave encoder for connector\n");
- return status;
- }
-
- sfuncs = slave->slave_funcs;
- if (sfuncs && sfuncs->detect)
- return sfuncs->detect(&slave->base, connector);
-
- dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n");
- return status;
-}
-
-static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_helper_funcs
-arcpgu_drm_connector_helper_funcs = {
- .get_modes = arcpgu_drm_connector_get_modes,
-};
-
-static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .detect = arcpgu_drm_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = arcpgu_drm_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = {
- .dpms = drm_i2c_encoder_dpms,
- .mode_fixup = drm_i2c_encoder_mode_fixup,
- .mode_set = drm_i2c_encoder_mode_set,
- .prepare = drm_i2c_encoder_prepare,
- .commit = drm_i2c_encoder_commit,
- .detect = drm_i2c_encoder_detect,
-};
-
static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
{
- struct arcpgu_drm_connector *arcpgu_connector;
- struct drm_i2c_encoder_driver *driver;
- struct drm_encoder_slave *encoder;
- struct drm_connector *connector;
- struct i2c_client *i2c_slave;
- int ret;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ int ret = 0;
encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
if (encoder == NULL)
return -ENOMEM;
- i2c_slave = of_find_i2c_device_by_node(np);
- if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
- dev_err(drm->dev, "failed to find i2c slave encoder\n");
- return -EPROBE_DEFER;
- }
-
- if (i2c_slave->dev.driver == NULL) {
- dev_err(drm->dev, "failed to find i2c slave driver\n");
+ /* Locate drm bridge from the hdmi encoder DT node */
+ bridge = of_drm_find_bridge(np);
+ if (!bridge)
return -EPROBE_DEFER;
- }
- driver =
- to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver));
- ret = driver->encoder_init(i2c_slave, drm, encoder);
- if (ret) {
- dev_err(drm->dev, "failed to initialize i2c encoder slave\n");
- return ret;
- }
-
- encoder->base.possible_crtcs = 1;
- encoder->base.possible_clones = 0;
- ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+ encoder->possible_crtcs = 1;
+ encoder->possible_clones = 0;
+ ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
return ret;
- drm_encoder_helper_add(&encoder->base,
- &arcpgu_drm_encoder_helper_funcs);
-
- arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
- GFP_KERNEL);
- if (!arcpgu_connector) {
- ret = -ENOMEM;
- goto error_encoder_cleanup;
- }
-
- connector = &arcpgu_connector->connector;
- drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
- ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0) {
- dev_err(drm->dev, "failed to initialize drm connector\n");
- goto error_encoder_cleanup;
- }
+ /* Link drm_bridge to encoder */
+ bridge->encoder = encoder;
+ encoder->bridge = bridge;
- ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
- if (ret < 0) {
- dev_err(drm->dev, "could not attach connector to encoder\n");
- drm_connector_unregister(connector);
- goto error_connector_cleanup;
- }
-
- arcpgu_connector->encoder_slave = encoder;
-
- return 0;
-
-error_connector_cleanup:
- drm_connector_cleanup(connector);
+ ret = drm_bridge_attach(drm, bridge);
+ if (ret)
+ drm_encoder_cleanup(encoder);
-error_encoder_cleanup:
- drm_encoder_cleanup(&encoder->base);
return ret;
}
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
index 2bf06d71556a..bca3a678c955 100644
--- a/drivers/gpu/drm/arc/arcpgu_sim.c
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -41,12 +41,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
return count;
}
-static enum drm_connector_status
-arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
@@ -61,7 +55,6 @@ arcpgu_drm_connector_helper_funcs = {
static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .detect = arcpgu_drm_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = arcpgu_drm_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 48019ae22ddb..7d4e5aa77195 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -150,15 +150,14 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
clk_prepare_enable(hdlcd->clk);
hdlcd_crtc_mode_set_nofb(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
+ drm_crtc_vblank_on(crtc);
}
static void hdlcd_crtc_disable(struct drm_crtc *crtc)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
- if (!crtc->state->active)
- return;
-
+ drm_crtc_vblank_off(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
clk_disable_unprepare(hdlcd->clk);
}
@@ -223,14 +222,12 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
{
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
- unsigned int depth, bpp;
u32 src_w, src_h, dest_w, dest_h;
dma_addr_t scanout_start;
if (!plane->state->fb)
return;
- drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
dest_w = plane->state->crtc_w;
@@ -238,7 +235,8 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
scanout_start = gem->paddr + plane->state->fb->offsets[0] +
plane->state->crtc_y * plane->state->fb->pitches[0] +
- plane->state->crtc_x * bpp / 8;
+ plane->state->crtc_x *
+ drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index fb6a418ce6be..e5f4f4a6546d 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -268,9 +268,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
@@ -337,14 +335,10 @@ static int hdlcd_drm_bind(struct device *dev)
if (ret)
goto err_free;
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto err_unload;
-
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("Failed to bind all components\n");
- goto err_unregister;
+ goto err_unload;
}
ret = pm_runtime_set_active(dev);
@@ -371,22 +365,29 @@ static int hdlcd_drm_bind(struct device *dev)
goto err_fbdev;
}
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_register;
+
return 0;
+err_register:
+ if (hdlcd->fbdev) {
+ drm_fbdev_cma_fini(hdlcd->fbdev);
+ hdlcd->fbdev = NULL;
+ }
err_fbdev:
drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
drm_vblank_cleanup(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
component_unbind_all(dev, drm);
-err_unregister:
- drm_dev_unregister(drm);
err_unload:
drm_irq_uninstall(drm);
of_reserved_mem_device_release(drm->dev);
err_free:
+ drm_mode_config_cleanup(drm);
dev_set_drvdata(dev, NULL);
drm_dev_unref(drm);
@@ -398,6 +399,7 @@ static void hdlcd_drm_unbind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ drm_dev_unregister(drm);
if (hdlcd->fbdev) {
drm_fbdev_cma_fini(hdlcd->fbdev);
hdlcd->fbdev = NULL;
@@ -411,7 +413,6 @@ static void hdlcd_drm_unbind(struct device *dev)
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
drm_mode_config_cleanup(drm);
- drm_dev_unregister(drm);
drm_dev_unref(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
@@ -453,7 +454,8 @@ static int hdlcd_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
match);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 9280358b8f15..32f746e31379 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -42,6 +42,7 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
+ atomic_set(&malidp->config_valid, 0);
hwdev->set_config_valid(hwdev);
/* don't wait for config_valid flag if we are in config mode */
if (hwdev->in_config_mode(hwdev))
@@ -91,8 +92,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_planes(drm, state, 0);
malidp_atomic_commit_hw_done(state);
@@ -155,6 +155,12 @@ static int malidp_init(struct drm_device *drm)
return 0;
}
+static void malidp_fini(struct drm_device *drm)
+{
+ malidp_de_planes_destroy(drm);
+ drm_mode_config_cleanup(drm);
+}
+
static int malidp_irq_init(struct platform_device *pdev)
{
int irq_de, irq_se, ret = 0;
@@ -197,9 +203,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
@@ -355,10 +359,6 @@ static int malidp_bind(struct device *dev)
if (ret < 0)
goto init_fail;
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto register_fail;
-
/* Set the CRTC's port so that the encoder component can find it */
ep = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!ep) {
@@ -377,6 +377,8 @@ static int malidp_bind(struct device *dev)
if (ret < 0)
goto irq_init_fail;
+ drm->irq_enabled = true;
+
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
@@ -395,23 +397,31 @@ static int malidp_bind(struct device *dev)
}
drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto register_fail;
+
return 0;
+register_fail:
+ if (malidp->fbdev) {
+ drm_fbdev_cma_fini(malidp->fbdev);
+ malidp->fbdev = NULL;
+ }
fbdev_fail:
drm_vblank_cleanup(drm);
vblank_fail:
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
+ drm->irq_enabled = false;
irq_init_fail:
component_unbind_all(dev, drm);
bind_fail:
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
port_fail:
- drm_dev_unregister(drm);
-register_fail:
- malidp_de_planes_destroy(drm);
- drm_mode_config_cleanup(drm);
+ malidp_fini(drm);
init_fail:
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
@@ -432,6 +442,7 @@ static void malidp_unbind(struct device *dev)
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev = malidp->dev;
+ drm_dev_unregister(drm);
if (malidp->fbdev) {
drm_fbdev_cma_fini(malidp->fbdev);
malidp->fbdev = NULL;
@@ -443,9 +454,7 @@ static void malidp_unbind(struct device *dev)
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
- drm_dev_unregister(drm);
- malidp_de_planes_destroy(drm);
- drm_mode_config_cleanup(drm);
+ malidp_fini(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
clk_disable_unprepare(hwdev->mclk);
@@ -493,7 +502,9 @@ static int malidp_platform_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
+ port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
match);
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 271d2fb9711c..9fc8a2e405e4 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -39,6 +39,9 @@ struct malidp_plane_state {
/* size of the required rotation memory if plane is rotated */
u32 rotmem_size;
+ /* internal format ID */
+ u8 format;
+ u8 n_planes;
};
#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index a6132f1d58c1..4bdf531f7844 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -125,6 +125,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
@@ -198,9 +199,6 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
{
- unsigned int depth;
- int bpp;
-
/* RGB888 or BGR888 can't be rotated */
if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
return -EINVAL;
@@ -210,9 +208,7 @@ static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
* worth of pixel data. Required size is then:
* size = rotated_width * (bpp / 8) * 8;
*/
- drm_fb_get_bpp_depth(fmt, &depth, &bpp);
-
- return w * bpp;
+ return w * drm_format_plane_cpp(fmt, 0) * 8;
}
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
@@ -271,6 +267,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
{
u32 status, count = 100;
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
@@ -441,6 +438,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp500_de_formats,
.n_input_formats = ARRAY_SIZE(malidp500_de_formats),
+ .bus_align_bytes = 8,
},
.query_hw = malidp500_query_hw,
.enter_config_mode = malidp500_enter_config_mode,
@@ -473,6 +471,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp550_de_formats,
.n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .bus_align_bytes = 8,
},
.query_hw = malidp550_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
@@ -506,6 +505,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
},
.input_formats = malidp550_de_formats,
.n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .bus_align_bytes = 16,
},
.query_hw = malidp650_query_hw,
.enter_config_mode = malidp550_enter_config_mode,
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 141743e9f3a6..087e1202db3d 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -88,6 +88,9 @@ struct malidp_hw_regmap {
/* list of supported input formats for each layer */
const struct malidp_input_format *input_formats;
const u8 n_input_formats;
+
+ /* pitch alignment requirement in bytes */
+ const u8 bus_align_bytes;
};
struct malidp_hw_device {
@@ -229,6 +232,12 @@ void malidp_se_irq_fini(struct drm_device *drm);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format);
+static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
+ unsigned int pitch)
+{
+ return !(pitch & (hwdev->map.bus_align_bytes - 1));
+}
+
/*
* background color components are defined as 12bits values,
* they will be shifted right when stored on hardware that
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 82c193e5e0d6..63eec8f37cfc 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -27,6 +27,10 @@
#define LAYER_H_FLIP (1 << 10)
#define LAYER_V_FLIP (1 << 11)
#define LAYER_ROT_MASK (0xf << 8)
+#define LAYER_COMP_MASK (0x3 << 12)
+#define LAYER_COMP_PIXEL (0x3 << 12)
+#define LAYER_COMP_PLANE (0x2 << 12)
+#define MALIDP_LAYER_COMPOSE 0x008
#define MALIDP_LAYER_SIZE 0x00c
#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
@@ -34,6 +38,14 @@
#define MALIDP_LAYER_OFFSET 0x014
#define MALIDP_LAYER_STRIDE 0x018
+/*
+ * This 4-entry look-up-table is used to determine the full 8-bit alpha value
+ * for formats with 1- or 2-bit alpha channels.
+ * We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0%
+ * opacity for 2-bit formats.
+ */
+#define MALIDP_ALPHA_LUT 0xffaa5500
+
static void malidp_de_plane_destroy(struct drm_plane *plane)
{
struct malidp_plane *mp = to_malidp_plane(plane);
@@ -46,7 +58,8 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
devm_kfree(plane->dev->dev, mp);
}
-struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
+static struct
+drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
{
struct malidp_plane_state *state, *m_state;
@@ -58,13 +71,15 @@ struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
m_state = to_malidp_plane_state(plane->state);
__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
state->rotmem_size = m_state->rotmem_size;
+ state->format = m_state->format;
+ state->n_planes = m_state->n_planes;
}
return &state->base;
}
-void malidp_destroy_plane_state(struct drm_plane *plane,
- struct drm_plane_state *state)
+static void malidp_destroy_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct malidp_plane_state *m_state = to_malidp_plane_state(state);
@@ -75,6 +90,7 @@ void malidp_destroy_plane_state(struct drm_plane *plane,
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
.destroy = malidp_de_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = malidp_duplicate_plane_state,
@@ -86,17 +102,29 @@ static int malidp_de_plane_check(struct drm_plane *plane,
{
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
- u8 format_id;
+ struct drm_framebuffer *fb;
+ int i;
u32 src_w, src_h;
if (!state->crtc || !state->fb)
return 0;
- format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
- state->fb->pixel_format);
- if (format_id == MALIDP_INVALID_FORMAT_ID)
+ fb = state->fb;
+
+ ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
+ fb->pixel_format);
+ if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
+ ms->n_planes = drm_format_num_planes(fb->pixel_format);
+ for (i = 0; i < ms->n_planes; i++) {
+ if (!malidp_hw_pitch_valid(mp->hwdev, fb->pitches[i])) {
+ DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
+ fb->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
@@ -135,17 +163,13 @@ static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_gem_cma_object *obj;
struct malidp_plane *mp;
const struct malidp_hw_regmap *map;
- u8 format_id;
+ struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
u16 ptr;
- u32 format, src_w, src_h, dest_w, dest_h, val = 0;
- int num_planes, i;
+ u32 src_w, src_h, dest_w, dest_h, val;
+ int i;
mp = to_malidp_plane(plane);
-
map = &mp->hwdev->map;
- format = plane->state->fb->pixel_format;
- format_id = malidp_hw_get_format_id(map, mp->layer->id, format);
- num_planes = drm_format_num_planes(format);
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
@@ -158,9 +182,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
dest_h = plane->state->crtc_h;
}
- malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
+ malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
- for (i = 0; i < num_planes; i++) {
+ for (i = 0; i < ms->n_planes; i++) {
/* calculate the offset for the layer's plane registers */
ptr = mp->layer->ptr + (i << 4);
@@ -181,9 +205,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
LAYER_V_VAL(plane->state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
- /* first clear the rotation bits in the register */
- malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK,
- mp->layer->base + MALIDP_LAYER_CONTROL);
+ /* first clear the rotation bits */
+ val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
+ val &= ~LAYER_ROT_MASK;
/* setup the rotation and axis flip bits */
if (plane->state->rotation & DRM_ROTATE_MASK)
@@ -193,11 +217,18 @@ static void malidp_de_plane_update(struct drm_plane *plane,
if (plane->state->rotation & DRM_REFLECT_Y)
val |= LAYER_H_FLIP;
+ /*
+ * always enable pixel alpha blending until we have a way to change
+ * blend modes
+ */
+ val &= ~LAYER_COMP_MASK;
+ val |= LAYER_COMP_PIXEL;
+
/* set the 'enable layer' bit */
val |= LAYER_ENABLE;
- malidp_hw_setbits(mp->hwdev, val,
- mp->layer->base + MALIDP_LAYER_CONTROL);
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
}
static void malidp_de_plane_disable(struct drm_plane *plane,
@@ -222,6 +253,8 @@ int malidp_de_planes_init(struct drm_device *drm)
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
+ unsigned long flags = DRM_ROTATE_0 | DRM_ROTATE_90 | DRM_ROTATE_180 |
+ DRM_ROTATE_270 | DRM_REFLECT_X | DRM_REFLECT_Y;
u32 *formats;
int ret, i, j, n;
@@ -254,26 +287,18 @@ int malidp_de_planes_init(struct drm_device *drm)
if (ret < 0)
goto cleanup;
- if (!drm->mode_config.rotation_property) {
- unsigned long flags = DRM_ROTATE_0 |
- DRM_ROTATE_90 |
- DRM_ROTATE_180 |
- DRM_ROTATE_270 |
- DRM_REFLECT_X |
- DRM_REFLECT_Y;
- drm->mode_config.rotation_property =
- drm_mode_create_rotation_property(drm, flags);
- }
- /* SMART layer can't be rotated */
- if (drm->mode_config.rotation_property && (id != DE_SMART))
- drm_object_attach_property(&plane->base.base,
- drm->mode_config.rotation_property,
- DRM_ROTATE_0);
-
drm_plane_helper_add(&plane->base,
&malidp_de_plane_helper_funcs);
plane->hwdev = malidp->dev;
plane->layer = &map->layers[i];
+
+ /* Skip the features which the SMART layer doesn't have */
+ if (id == DE_SMART)
+ continue;
+
+ drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
+ malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
+ plane->layer->base + MALIDP_LAYER_COMPOSE);
}
kfree(formats);
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index ffd673615772..a18f156c8b66 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,5 +1,5 @@
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
- armada_gem.o armada_overlay.o
+ armada_gem.o armada_overlay.o armada_trace.o
armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index a51f8cbcfe26..95cb3966b2ca 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -18,6 +18,7 @@
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
+#include "armada_trace.h"
struct armada_frame_work {
struct armada_plane_work work;
@@ -164,19 +165,37 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
}
}
+void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ u32 addr = drm_fb_obj(fb)->dev_addr;
+ u32 pixel_format = fb->pixel_format;
+ int num_planes = drm_format_num_planes(pixel_format);
+ int i;
+
+ if (num_planes > 3)
+ num_planes = 3;
+
+ for (i = 0; i < num_planes; i++)
+ addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
+ x * drm_format_plane_cpp(pixel_format, i);
+ for (; i < 3; i++)
+ addrs[i] = 0;
+}
+
static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
int x, int y, struct armada_regs *regs, bool interlaced)
{
- struct armada_gem_object *obj = drm_fb_obj(fb);
unsigned pitch = fb->pitches[0];
- unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
- uint32_t addr_odd, addr_even;
+ u32 addrs[3], addr_odd, addr_even;
unsigned i = 0;
DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
pitch, x, y, fb->bits_per_pixel);
- addr_odd = addr_even = obj->dev_addr + offset;
+ armada_drm_plane_calc_addrs(addrs, fb, x, y);
+
+ addr_odd = addr_even = addrs[0];
if (interlaced) {
addr_even += pitch;
@@ -192,17 +211,18 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
}
static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
- struct armada_plane *plane)
+ struct drm_plane *plane)
{
- struct armada_plane_work *work = xchg(&plane->work, NULL);
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_plane_work *work = xchg(&dplane->work, NULL);
/* Handle any pending frame work. */
if (work) {
- work->fn(dcrtc, plane, work);
+ work->fn(dcrtc, dplane, work);
drm_crtc_vblank_put(&dcrtc->crtc);
}
- wake_up(&plane->frame_wait);
+ wake_up(&dplane->frame_wait);
}
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
@@ -307,14 +327,12 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
{
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
-
/*
* Tell the DRM core that vblank IRQs aren't going to happen for
* a while. This cleans up any pending vblank events for us.
*/
drm_crtc_vblank_off(&dcrtc->crtc);
- armada_drm_plane_work_run(dcrtc, plane);
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
@@ -416,10 +434,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;
- if (ovl_plane) {
- struct armada_plane *plane = drm_to_armada_plane(ovl_plane);
- armada_drm_plane_work_run(dcrtc, plane);
- }
+ if (ovl_plane)
+ armada_drm_plane_work_run(dcrtc, ovl_plane);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
@@ -449,10 +465,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
spin_unlock(&dcrtc->irq_lock);
- if (stat & GRA_FRAME_IRQ) {
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
- armada_drm_plane_work_run(dcrtc, plane);
- }
+ if (stat & GRA_FRAME_IRQ)
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
static irqreturn_t armada_drm_irq(int irq, void *arg)
@@ -466,6 +480,8 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+ trace_armada_drm_irq(&dcrtc->crtc, stat);
+
/* Mask out those interrupts we haven't enabled */
v = stat & dcrtc->irq_ena;
@@ -531,6 +547,35 @@ static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
return val;
}
+static void armada_drm_primary_set(struct drm_crtc *crtc,
+ struct drm_plane *plane, int x, int y)
+{
+ struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[8];
+ bool interlaced = dcrtc->interlaced;
+ unsigned i;
+ u32 ctrl0;
+
+ i = armada_drm_crtc_calc_fb(plane->fb, x, y, regs, interlaced);
+
+ armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
+ armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
+
+ ctrl0 = state->ctrl0;
+ if (interlaced)
+ ctrl0 |= CFG_GRA_FTOGGLE;
+
+ armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+ LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_end(regs, i);
+ armada_drm_crtc_update_regs(dcrtc, regs);
+}
+
/* The mode_config.mutex will be held for this call */
static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *adj,
@@ -547,9 +592,20 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
- i = armada_drm_crtc_calc_fb(dcrtc->crtc.primary->fb,
- x, y, regs, interlaced);
+ val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+ val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
+ val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
+
+ if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
+ val |= CFG_PALETTE_ENA;
+
+ drm_to_armada_plane(crtc->primary)->state.ctrl0 = val;
+ drm_to_armada_plane(crtc->primary)->state.src_hw =
+ drm_to_armada_plane(crtc->primary)->state.dst_hw =
+ adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+ drm_to_armada_plane(crtc->primary)->state.dst_yx = 0;
+ i = 0;
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
lm = adj->crtc_htotal - adj->crtc_hsync_end;
bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
@@ -625,8 +681,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
- armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
- armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
@@ -638,22 +692,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
}
- val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
- val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
- val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
-
- if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
-
- if (interlaced)
- val |= CFG_GRA_FTOGGLE;
-
- armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
- CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
- CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
- LCD_SPU_DMA_CTRL0);
-
val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
@@ -662,6 +700,8 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_end(regs, i);
armada_drm_crtc_update_regs(dcrtc, regs);
+
+ armada_drm_primary_set(crtc, crtc->primary, x, y);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
armada_drm_crtc_update(dcrtc);
@@ -1038,7 +1078,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
* interrupt, so complete it now.
*/
if (dpms_blanked(dcrtc->dpms))
- armada_drm_plane_work_run(dcrtc, drm_to_armada_plane(dcrtc->crtc.primary));
+ armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
return 0;
}
@@ -1172,7 +1212,6 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
- writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 04fdd22d483b..b08043e8cc3b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -41,10 +41,18 @@ struct armada_plane_work {
struct armada_plane_work *);
};
+struct armada_plane_state {
+ u32 src_hw;
+ u32 dst_hw;
+ u32 dst_yx;
+ u32 ctrl0;
+};
+
struct armada_plane {
struct drm_plane base;
wait_queue_head_t frame_wait;
struct armada_plane_work *work;
+ struct armada_plane_state state;
};
#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
@@ -54,6 +62,8 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
struct armada_plane_work *armada_drm_plane_work_cancel(
struct armada_crtc *dcrtc, struct armada_plane *plane);
+void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
+ int x, int y);
struct armada_crtc {
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index d4f7ab0a30d4..90222e60d2d6 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -113,7 +113,7 @@ static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
struct drm_info_node *node;
node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (node == NULL) {
+ if (!node) {
debugfs_remove(ent);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index 3b2bb6128d40..77952d559a3c 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -53,6 +53,7 @@ struct armada_variant {
extern const struct armada_variant armada510_ops;
struct armada_private {
+ struct drm_device drm;
struct work_struct fb_unref_work;
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 1e0e68f608e4..07086b427c22 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -49,106 +49,6 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static int armada_drm_load(struct drm_device *dev, unsigned long flags)
-{
- struct armada_private *priv;
- struct resource *mem = NULL;
- int ret, n;
-
- for (n = 0; ; n++) {
- struct resource *r = platform_get_resource(dev->platformdev,
- IORESOURCE_MEM, n);
- if (!r)
- break;
-
- /* Resources above 64K are graphics memory */
- if (resource_size(r) > SZ_64K)
- mem = r;
- else
- return -EINVAL;
- }
-
- if (!mem)
- return -ENXIO;
-
- if (!devm_request_mem_region(dev->dev, mem->start,
- resource_size(mem), "armada-drm"))
- return -EBUSY;
-
- priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- DRM_ERROR("failed to allocate private\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(dev->platformdev, dev);
- dev->dev_private = priv;
-
- INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
- INIT_KFIFO(priv->fb_unref);
-
- /* Mode setting support */
- drm_mode_config_init(dev);
- dev->mode_config.min_width = 320;
- dev->mode_config.min_height = 200;
-
- /*
- * With vscale enabled, the maximum width is 1920 due to the
- * 1920 by 3 lines RAM
- */
- dev->mode_config.max_width = 1920;
- dev->mode_config.max_height = 2048;
-
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.funcs = &armada_drm_mode_config_funcs;
- drm_mm_init(&priv->linear, mem->start, resource_size(mem));
- mutex_init(&priv->linear_lock);
-
- ret = component_bind_all(dev->dev, dev);
- if (ret)
- goto err_kms;
-
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
- if (ret)
- goto err_comp;
-
- dev->irq_enabled = true;
-
- ret = armada_fbdev_init(dev);
- if (ret)
- goto err_comp;
-
- drm_kms_helper_poll_init(dev);
-
- return 0;
-
- err_comp:
- component_unbind_all(dev->dev, dev);
- err_kms:
- drm_mode_config_cleanup(dev);
- drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
-
- return ret;
-}
-
-static int armada_drm_unload(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- drm_kms_helper_poll_fini(dev);
- armada_fbdev_fini(dev);
-
- component_unbind_all(dev->dev, dev);
-
- drm_mode_config_cleanup(dev);
- drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- dev->dev_private = NULL;
-
- return 0;
-}
-
/* These are called under the vbl_lock. */
static int armada_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
@@ -186,16 +86,10 @@ static const struct file_operations armada_drm_fops = {
};
static struct drm_driver armada_drm_driver = {
- .load = armada_drm_load,
.lastclose = armada_drm_lastclose,
- .unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = armada_drm_debugfs_init,
- .debugfs_cleanup = armada_drm_debugfs_cleanup,
-#endif
.gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -218,12 +112,138 @@ static struct drm_driver armada_drm_driver = {
static int armada_drm_bind(struct device *dev)
{
- return drm_platform_init(&armada_drm_driver, to_platform_device(dev));
+ struct armada_private *priv;
+ struct resource *mem = NULL;
+ int ret, n;
+
+ for (n = 0; ; n++) {
+ struct resource *r = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM, n);
+ if (!r)
+ break;
+
+ /* Resources above 64K are graphics memory */
+ if (resource_size(r) > SZ_64K)
+ mem = r;
+ else
+ return -EINVAL;
+ }
+
+ if (!mem)
+ return -ENXIO;
+
+ if (!devm_request_mem_region(dev, mem->start, resource_size(mem),
+ "armada-drm"))
+ return -EBUSY;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * The drm_device structure must be at the start of
+ * armada_private for drm_dev_unref() to work correctly.
+ */
+ BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
+
+ ret = drm_dev_init(&priv->drm, &armada_drm_driver, dev);
+ if (ret) {
+ dev_err(dev, "[" DRM_NAME ":%s] drm_dev_init failed: %d\n",
+ __func__, ret);
+ kfree(priv);
+ return ret;
+ }
+
+ priv->drm.platformdev = to_platform_device(dev);
+ priv->drm.dev_private = priv;
+
+ platform_set_drvdata(priv->drm.platformdev, &priv->drm);
+
+ INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+ INIT_KFIFO(priv->fb_unref);
+
+ /* Mode setting support */
+ drm_mode_config_init(&priv->drm);
+ priv->drm.mode_config.min_width = 320;
+ priv->drm.mode_config.min_height = 200;
+
+ /*
+ * With vscale enabled, the maximum width is 1920 due to the
+ * 1920 by 3 lines RAM
+ */
+ priv->drm.mode_config.max_width = 1920;
+ priv->drm.mode_config.max_height = 2048;
+
+ priv->drm.mode_config.preferred_depth = 24;
+ priv->drm.mode_config.funcs = &armada_drm_mode_config_funcs;
+ drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+ mutex_init(&priv->linear_lock);
+
+ ret = component_bind_all(dev, &priv->drm);
+ if (ret)
+ goto err_kms;
+
+ ret = drm_vblank_init(&priv->drm, priv->drm.mode_config.num_crtc);
+ if (ret)
+ goto err_comp;
+
+ priv->drm.irq_enabled = true;
+
+ ret = armada_fbdev_init(&priv->drm);
+ if (ret)
+ goto err_comp;
+
+ drm_kms_helper_poll_init(&priv->drm);
+
+ ret = drm_dev_register(&priv->drm, 0);
+ if (ret)
+ goto err_poll;
+
+#ifdef CONFIG_DEBUG_FS
+ armada_drm_debugfs_init(priv->drm.primary);
+#endif
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ armada_drm_driver.name, armada_drm_driver.major,
+ armada_drm_driver.minor, armada_drm_driver.patchlevel,
+ armada_drm_driver.date, dev_name(dev),
+ priv->drm.primary->index);
+
+ return 0;
+
+ err_poll:
+ drm_kms_helper_poll_fini(&priv->drm);
+ armada_fbdev_fini(&priv->drm);
+ err_comp:
+ component_unbind_all(dev, &priv->drm);
+ err_kms:
+ drm_mode_config_cleanup(&priv->drm);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+ drm_dev_unref(&priv->drm);
+ return ret;
}
static void armada_drm_unbind(struct device *dev)
{
- drm_put_dev(dev_get_drvdata(dev));
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct armada_private *priv = drm->dev_private;
+
+ drm_kms_helper_poll_fini(&priv->drm);
+ armada_fbdev_fini(&priv->drm);
+
+#ifdef CONFIG_DEBUG_FS
+ armada_drm_debugfs_cleanup(priv->drm.primary);
+#endif
+ drm_dev_unregister(&priv->drm);
+
+ component_unbind_all(dev, &priv->drm);
+
+ drm_mode_config_cleanup(&priv->drm);
+ drm_mm_takedown(&priv->linear);
+ flush_work(&priv->fb_unref_work);
+
+ drm_dev_unref(&priv->drm);
}
static int compare_of(struct device *dev, void *data)
@@ -254,7 +274,7 @@ static void armada_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index ca73ad8614fe..c5dc06a55883 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -19,16 +19,10 @@
static /*const*/ struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int armada_fb_create(struct drm_fb_helper *fbh,
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 806791897304..a293c8be232c 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -17,12 +17,11 @@
static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
- unsigned long addr = (unsigned long)vmf->virtual_address;
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
int ret;
- pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
- ret = vm_insert_pfn(vma, addr, pfn);
+ pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ ret = vm_insert_pfn(vma, vmf->address, pfn);
switch (ret) {
case 0:
@@ -212,7 +211,7 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
return obj;
}
-struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct armada_gem_object *obj;
@@ -419,7 +418,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
/* Prime support */
-struct sg_table *
+static struct sg_table *
armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
@@ -594,11 +593,7 @@ int armada_gem_map_import(struct armada_gem_object *dobj)
int ret;
dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
- DMA_TO_DEVICE);
- if (!dobj->sgt) {
- DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
- return -EINVAL;
- }
+ DMA_TO_DEVICE);
if (IS_ERR(dobj->sgt)) {
ret = PTR_ERR(dobj->sgt);
dobj->sgt = NULL;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 152b4e716269..6743615232f5 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -15,6 +15,7 @@
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
+#include "armada_trace.h"
struct armada_ovl_plane_properties {
uint32_t colorkey_yr;
@@ -32,10 +33,6 @@ struct armada_ovl_plane_properties {
struct armada_ovl_plane {
struct armada_plane base;
struct drm_framebuffer *old_fb;
- uint32_t src_hw;
- uint32_t dst_hw;
- uint32_t dst_yx;
- uint32_t ctrl0;
struct {
struct armada_plane_work work;
struct armada_regs regs[13];
@@ -87,6 +84,8 @@ static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
{
struct armada_ovl_plane *dplane = container_of(plane, struct armada_ovl_plane, base);
+ trace_armada_ovl_plane_work(&dcrtc->crtc, &plane->base);
+
armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
armada_ovl_retire_fb(dplane, NULL);
}
@@ -120,6 +119,10 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
bool visible;
int ret;
+ trace_armada_ovl_plane_update(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
DRM_ROTATE_0,
0, INT_MAX, true, false, &visible);
@@ -141,22 +144,22 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
- if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+ if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
val = (drm_rect_height(&src) & 0xffff0000) |
drm_rect_width(&src) >> 16;
- dplane->src_hw = val;
+ dplane->base.state.src_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- dplane->dst_hw = val;
+ dplane->base.state.dst_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
val = dest.y1 << 16 | dest.x1;
- dplane->dst_yx = val;
+ dplane->base.state.dst_yx = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
return 0;
- } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+ } else if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
dcrtc->base + LCD_SPU_SRAM_PARA1);
@@ -166,9 +169,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
if (plane->fb != fb) {
- struct armada_gem_object *obj = drm_fb_obj(fb);
- uint32_t addr[3], pixel_format;
- int i, num_planes, hsub;
+ u32 addrs[3], pixel_format;
+ int num_planes, hsub;
/*
* Take a reference on the new framebuffer - we want to
@@ -182,6 +184,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
src_y = src.y1 >> 16;
src_x = src.x1 >> 16;
+ armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
+
pixel_format = fb->pixel_format;
hsub = drm_format_horz_chroma_subsampling(pixel_format);
num_planes = drm_format_num_planes(pixel_format);
@@ -194,24 +198,17 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (src_x & (hsub - 1) && num_planes == 1)
ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
- for (i = 0; i < num_planes; i++)
- addr[i] = obj->dev_addr + fb->offsets[i] +
- src_y * fb->pitches[i] +
- src_x * drm_format_plane_cpp(pixel_format, i);
- for (; i < ARRAY_SIZE(addr); i++)
- addr[i] = 0;
-
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
+ armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V1);
val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -223,28 +220,28 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
}
val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
- if (dplane->src_hw != val) {
- dplane->src_hw = val;
+ if (dplane->base.state.src_hw != val) {
+ dplane->base.state.src_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- if (dplane->dst_hw != val) {
- dplane->dst_hw = val;
+ if (dplane->base.state.dst_hw != val) {
+ dplane->base.state.dst_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
val = dest.y1 << 16 | dest.x1;
- if (dplane->dst_yx != val) {
- dplane->dst_yx = val;
+ if (dplane->base.state.dst_yx != val) {
+ dplane->base.state.dst_yx = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
- if (dplane->ctrl0 != ctrl0) {
- dplane->ctrl0 = ctrl0;
+ if (dplane->base.state.ctrl0 != ctrl0) {
+ dplane->base.state.ctrl0 = ctrl0;
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
@@ -275,7 +272,7 @@ static int armada_ovl_plane_disable(struct drm_plane *plane)
armada_drm_crtc_plane_disable(dcrtc, plane);
dcrtc->plane = NULL;
- dplane->ctrl0 = 0;
+ dplane->base.state.ctrl0 = 0;
fb = xchg(&dplane->old_fb, NULL);
if (fb)
diff --git a/drivers/gpu/drm/armada/armada_trace.c b/drivers/gpu/drm/armada/armada_trace.c
new file mode 100644
index 000000000000..068b336ba75f
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_trace.c
@@ -0,0 +1,4 @@
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "armada_trace.h"
+#endif
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
new file mode 100644
index 000000000000..dc0cba70fd1a
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -0,0 +1,66 @@
+#if !defined(ARMADA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define ARMADA_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM armada
+#define TRACE_INCLUDE_FILE armada_trace
+
+TRACE_EVENT(armada_drm_irq,
+ TP_PROTO(struct drm_crtc *crtc, u32 stat),
+ TP_ARGS(crtc, stat),
+ TP_STRUCT__entry(
+ __field(struct drm_crtc *, crtc)
+ __field(u32, stat)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->stat = stat;
+ ),
+ TP_printk("crtc %p stat 0x%08x",
+ __entry->crtc, __entry->stat)
+);
+
+TRACE_EVENT(armada_ovl_plane_update,
+ TP_PROTO(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h),
+ TP_ARGS(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h),
+ TP_STRUCT__entry(
+ __field(struct drm_plane *, plane)
+ __field(struct drm_crtc *, crtc)
+ __field(struct drm_framebuffer *, fb)
+ ),
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->crtc = crtc;
+ __entry->fb = fb;
+ ),
+ TP_printk("plane %p crtc %p fb %p",
+ __entry->plane, __entry->crtc, __entry->fb)
+);
+
+TRACE_EVENT(armada_ovl_plane_work,
+ TP_PROTO(struct drm_crtc *crtc, struct drm_plane *plane),
+ TP_ARGS(crtc, plane),
+ TP_STRUCT__entry(
+ __field(struct drm_plane *, plane)
+ __field(struct drm_crtc *, crtc)
+ ),
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("plane %p crtc %p",
+ __entry->plane, __entry->crtc)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index f54afd2113a9..fd7c9eec92e4 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -188,9 +188,7 @@ static const struct file_operations ast_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = ast_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 7a86e24e2687..d6f5ec64c667 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -253,7 +253,7 @@ static int astfb_create(struct drm_fb_helper *helper,
err_release_fbi:
drm_fb_helper_release_fbi(helper);
err_free_vram:
- vfree(afbdev->sysram);
+ vfree(sysram);
return ret;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 5957c3e659fe..e26c98f51eb4 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -839,12 +839,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-static enum drm_connector_status
-ast_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
.mode_valid = ast_mode_valid,
.get_modes = ast_get_modes,
@@ -853,7 +847,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
static const struct drm_connector_funcs ast_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = ast_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = ast_connector_destroy,
};
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 0743e65cb240..2a1368fac1d1 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
.ttm_tt_populate = ast_ttm_tt_populate,
.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
.init_mem_type = ast_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = ast_bo_evict_flags,
.move = NULL,
.verify_access = ast_bo_verify_access,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 5f484310bee9..cbd0070265c9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -464,7 +464,7 @@ atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&dc->commit.wait.lock);
@@ -521,6 +521,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (async)
queue_work(dc->wq, &commit->work);
else
@@ -748,9 +749,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 9d4c030672f0..246ed1e33d8a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
- (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)))
+ drm_rotation_90_or_270(state->base.rotation))
cfg |= ATMEL_HLCDC_YUV422ROT;
atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
- if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+ if (drm_rotation_90_or_270(state->base.rotation)) {
tmp = state->crtc_w;
state->crtc_w = state->crtc_h;
state->crtc_h = tmp;
@@ -883,9 +883,9 @@ static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
return 0;
}
-static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
- const struct atmel_hlcdc_layer_desc *desc,
- struct atmel_hlcdc_plane_properties *props)
+static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
+ const struct atmel_hlcdc_layer_desc *desc,
+ struct atmel_hlcdc_plane_properties *props)
{
struct regmap *regmap = plane->layer.hlcdc->regmap;
@@ -902,10 +902,18 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_GA_MASK);
}
- if (desc->layout.xstride && desc->layout.pstride)
- drm_object_attach_property(&plane->base.base,
- plane->base.dev->mode_config.rotation_property,
- DRM_ROTATE_0);
+ if (desc->layout.xstride && desc->layout.pstride) {
+ int ret;
+
+ ret = drm_plane_create_rotation_property(&plane->base,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_90 |
+ DRM_ROTATE_180 |
+ DRM_ROTATE_270);
+ if (ret)
+ return ret;
+ }
if (desc->layout.csc) {
/*
@@ -925,6 +933,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 2),
0x40040890);
}
+
+ return 0;
}
static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
@@ -1036,7 +1046,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
&atmel_hlcdc_layer_plane_helper_funcs);
/* Set default property values*/
- atmel_hlcdc_plane_init_properties(plane, desc, props);
+ ret = atmel_hlcdc_plane_init_properties(plane, desc, props);
+ if (ret)
+ return ERR_PTR(ret);
return plane;
}
@@ -1054,15 +1066,6 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
if (!props->alpha)
return ERR_PTR(-ENOMEM);
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 |
- DRM_ROTATE_90 |
- DRM_ROTATE_180 |
- DRM_ROTATE_270);
- if (!dev->mode_config.rotation_property)
- return ERR_PTR(-ENOMEM);
-
return props;
}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 534227df23f3..15a293e65b31 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -70,9 +70,7 @@ static const struct file_operations bochs_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index e1ec498a6b6e..da790a1c302a 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -22,14 +22,10 @@ static int bochsfb_mmap(struct fb_info *info,
static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
.fb_mmap = bochsfb_mmap,
};
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 0b4e5d117043..d5e63eff357b 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -216,12 +216,6 @@ bochs_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
-static enum drm_connector_status bochs_connector_detect(struct drm_connector
- *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
.mode_valid = bochs_connector_mode_valid,
@@ -230,7 +224,6 @@ static const struct drm_connector_helper_funcs bochs_connector_connector_helper_
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = bochs_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
};
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 269cfca9ca06..099a3c688c26 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -199,6 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.ttm_tt_populate = ttm_pool_populate,
.ttm_tt_unpopulate = ttm_pool_unpopulate,
.init_mem_type = bochs_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bochs_bo_evict_flags,
.move = NULL,
.verify_access = bochs_bo_verify_access,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 10e12e74fc9f..eb8688ec6f18 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -39,6 +39,15 @@ config DRM_DW_HDMI_AHB_AUDIO
Designware HDMI block. This is used in conjunction with
the i.MX6 HDMI driver.
+config DRM_DW_HDMI_I2S_AUDIO
+ tristate "Synopsis Designware I2S Audio interface"
+ depends on SND_SOC
+ depends on DRM_DW_HDMI
+ select SND_SOC_HDMI_CODEC
+ help
+ Support the I2S Audio interface which is part of the Synopsis
+ Designware HDMI block.
+
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
depends on OF
@@ -57,6 +66,13 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_SIL_SII8620
+ tristate "Silicon Image SII8620 HDMI/MHL bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Silicon Image SII8620 HDMI/MHL bridge chip driver.
+
config DRM_SII902X
tristate "Silicon Image sii902x RGB/HDMI bridge"
depends on OF
@@ -74,6 +90,13 @@ config DRM_TOSHIBA_TC358767
---help---
Toshiba TC358767 eDP bridge chip driver.
+config DRM_TI_TFP410
+ tristate "TI TFP410 DVI/HDMI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ ---help---
+ Texas Instruments TFP410 DVI/HDMI Transmitter driver
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cdf3a3cf765d..2e83a7855399 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -4,9 +4,12 @@ obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
+obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
+obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index d2b0499ab7d7..2fed567f9943 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -6,6 +6,14 @@ config DRM_I2C_ADV7511
help
Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+config DRM_I2C_ADV7511_AUDIO
+ bool "ADV7511 HDMI Audio driver"
+ depends on DRM_I2C_ADV7511 && SND_SOC
+ select SND_SOC_HDMI_CODEC
+ help
+ Support the ADV7511 HDMI Audio interface. This is used in
+ conjunction with the AV7511 HDMI driver.
+
config DRM_I2C_ADV7533
bool "ADV7533 encoder"
depends on DRM_I2C_ADV7511
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index 9019327fff4c..5ba675534f6e 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,3 +1,4 @@
adv7511-y := adv7511_drv.o
+adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 161c923d6162..992d76ce02bb 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -309,6 +309,8 @@ struct adv7511 {
struct drm_display_mode curr_mode;
unsigned int f_tmds;
+ unsigned int f_audio;
+ unsigned int audio_source;
unsigned int current_edid_segment;
uint8_t edid_buf[256];
@@ -334,6 +336,7 @@ struct adv7511 {
bool use_timing_gen;
enum adv7511_type type;
+ struct platform_device *audio_pdev;
};
#ifdef CONFIG_DRM_I2C_ADV7533
@@ -389,4 +392,17 @@ static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
}
#endif
+#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
+int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
+void adv7511_audio_exit(struct adv7511 *adv7511);
+#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
+static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
+{
+ return 0;
+}
+static inline void adv7511_audio_exit(struct adv7511 *adv7511)
+{
+}
+#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */
+
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
new file mode 100644
index 000000000000..cf92ebfe6ab7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -0,0 +1,213 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Copyright (c) 2016, Linaro Limited
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <sound/core.h>
+#include <sound/hdmi-codec.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include "adv7511.h"
+
+static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
+ unsigned int *cts, unsigned int *n)
+{
+ switch (fs) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ }
+
+ *cts = ((f_tmds * *n) / (128 * fs)) * 1000;
+}
+
+static int adv7511_update_cts_n(struct adv7511 *adv7511)
+{
+ unsigned int cts = 0;
+ unsigned int n = 0;
+
+ adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0,
+ (cts >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1,
+ (cts >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2,
+ cts & 0xff);
+
+ return 0;
+}
+
+int adv7511_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ unsigned int audio_source, i2s_format = 0;
+ unsigned int invert_clock;
+ unsigned int rate;
+ unsigned int len;
+
+ switch (hparms->sample_rate) {
+ case 32000:
+ rate = ADV7511_SAMPLE_FREQ_32000;
+ break;
+ case 44100:
+ rate = ADV7511_SAMPLE_FREQ_44100;
+ break;
+ case 48000:
+ rate = ADV7511_SAMPLE_FREQ_48000;
+ break;
+ case 88200:
+ rate = ADV7511_SAMPLE_FREQ_88200;
+ break;
+ case 96000:
+ rate = ADV7511_SAMPLE_FREQ_96000;
+ break;
+ case 176400:
+ rate = ADV7511_SAMPLE_FREQ_176400;
+ break;
+ case 192000:
+ rate = ADV7511_SAMPLE_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (hparms->sample_width) {
+ case 16:
+ len = ADV7511_I2S_SAMPLE_LEN_16;
+ break;
+ case 18:
+ len = ADV7511_I2S_SAMPLE_LEN_18;
+ break;
+ case 20:
+ len = ADV7511_I2S_SAMPLE_LEN_20;
+ break;
+ case 24:
+ len = ADV7511_I2S_SAMPLE_LEN_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt->fmt) {
+ case HDMI_I2S:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_I2S;
+ break;
+ case HDMI_RIGHT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_RIGHT_J;
+ break;
+ case HDMI_LEFT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ invert_clock = fmt->bit_clk_inv;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70,
+ audio_source << 4);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6),
+ invert_clock << 6);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03,
+ i2s_format);
+
+ adv7511->audio_source = audio_source;
+
+ adv7511->f_audio = hparms->sample_rate;
+
+ adv7511_update_cts_n(adv7511);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+ regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ return 0;
+}
+
+static int audio_startup(struct device *dev, void *data)
+{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
+
+ /* hide Audio infoframe updates */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+ /* enable N/CTS, enable Audio sample packets */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(5), BIT(5));
+ /* enable N/CTS */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(6), BIT(6));
+ /* not copyrighted */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1,
+ BIT(5), BIT(5));
+ /* enable audio infoframes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ BIT(3), BIT(3));
+ /* AV mute disable */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
+ BIT(7) | BIT(6), BIT(7));
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
+ BIT(5), 0);
+ return 0;
+}
+
+static void audio_shutdown(struct device *dev, void *data)
+{
+}
+
+static const struct hdmi_codec_ops adv7511_codec_ops = {
+ .hw_params = adv7511_hdmi_hw_params,
+ .audio_shutdown = audio_shutdown,
+ .audio_startup = audio_startup,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &adv7511_codec_ops,
+ .max_i2s_channels = 2,
+ .i2s = 1,
+};
+
+int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
+{
+ adv7511->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(adv7511->audio_pdev);
+}
+
+void adv7511_audio_exit(struct adv7511 *adv7511)
+{
+ if (adv7511->audio_pdev) {
+ platform_device_unregister(adv7511->audio_pdev);
+ adv7511->audio_pdev = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 8ed3906dd411..8dba729f6ef9 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1037,6 +1037,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto err_unregister_cec;
}
+ adv7511_audio_init(dev, adv7511);
+
return 0;
err_unregister_cec:
@@ -1058,6 +1060,8 @@ static int adv7511_remove(struct i2c_client *i2c)
drm_bridge_remove(&adv7511->bridge);
+ adv7511_audio_exit(adv7511);
+
i2c_unregister_device(adv7511->i2c_edid);
kfree(adv7511->edid);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index d7f7b7ce8ebe..8b210373cfa2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -29,6 +29,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x17, 0xd0 },
{ 0x24, 0x20 },
{ 0x57, 0x11 },
+ { 0x05, 0xc8 },
};
static const struct regmap_config adv7533_cec_regmap_config = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 6e0447f329a2..eb9bf8786c24 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -112,7 +112,7 @@ int analogix_dp_enable_psr(struct device *dev)
struct edp_vsc_psr psr_vsc;
if (!dp->psr_support)
- return -EINVAL;
+ return 0;
/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
@@ -135,7 +135,7 @@ int analogix_dp_disable_psr(struct device *dev)
struct edp_vsc_psr psr_vsc;
if (!dp->psr_support)
- return -EINVAL;
+ return 0;
/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index cd37ac058675..303083ad28e3 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -1162,5 +1162,5 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
(msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
- return num_transferred;
+ return num_transferred > 0 ? num_transferred : -EBUSY;
}
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index afec232185a7..e5706981c934 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
@@ -23,6 +24,7 @@ struct dumb_vga {
struct drm_connector connector;
struct i2c_adapter *ddc;
+ struct regulator *vdd;
};
static inline struct dumb_vga *
@@ -124,8 +126,30 @@ static int dumb_vga_attach(struct drm_bridge *bridge)
return 0;
}
+static void dumb_vga_enable(struct drm_bridge *bridge)
+{
+ struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
+ int ret = 0;
+
+ if (vga->vdd)
+ ret = regulator_enable(vga->vdd);
+
+ if (ret)
+ DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
+}
+
+static void dumb_vga_disable(struct drm_bridge *bridge)
+{
+ struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
+
+ if (vga->vdd)
+ regulator_disable(vga->vdd);
+}
+
static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
.attach = dumb_vga_attach,
+ .enable = dumb_vga_enable,
+ .disable = dumb_vga_disable,
};
static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
@@ -169,6 +193,15 @@ static int dumb_vga_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, vga);
+ vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
+ if (IS_ERR(vga->vdd)) {
+ ret = PTR_ERR(vga->vdd);
+ if (ret == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ vga->vdd = NULL;
+ dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
+ }
+
vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
if (IS_ERR(vga->ddc)) {
if (PTR_ERR(vga->ddc) == -ENODEV) {
diff --git a/drivers/gpu/drm/bridge/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
index 91f631beecc7..fd1f745c6073 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
@@ -11,4 +11,11 @@ struct dw_hdmi_audio_data {
u8 *eld;
};
+struct dw_hdmi_i2s_audio_data {
+ struct dw_hdmi *hdmi;
+
+ void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
+ u8 (*read)(struct dw_hdmi *hdmi, int offset);
+};
+
#endif
diff --git a/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c
new file mode 100644
index 000000000000..aaf287d2e91d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c
@@ -0,0 +1,141 @@
+/*
+ * dw-hdmi-i2s-audio.c
+ *
+ * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/bridge/dw_hdmi.h>
+
+#include <sound/hdmi-codec.h>
+
+#include "dw-hdmi.h"
+#include "dw-hdmi-audio.h"
+
+#define DRIVER_NAME "dw-hdmi-i2s-audio"
+
+static inline void hdmi_write(struct dw_hdmi_i2s_audio_data *audio,
+ u8 val, int offset)
+{
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ audio->write(hdmi, val, offset);
+}
+
+static inline u8 hdmi_read(struct dw_hdmi_i2s_audio_data *audio, int offset)
+{
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ return audio->read(hdmi, offset);
+}
+
+static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+ u8 conf0 = 0;
+ u8 conf1 = 0;
+ u8 inputclkfs = 0;
+
+ /* it cares I2S only */
+ if ((fmt->fmt != HDMI_I2S) ||
+ (fmt->bit_clk_master | fmt->frame_clk_master)) {
+ dev_err(dev, "unsupported format/settings\n");
+ return -EINVAL;
+ }
+
+ inputclkfs = HDMI_AUD_INPUTCLKFS_64FS;
+ conf0 = HDMI_AUD_CONF0_I2S_ALL_ENABLE;
+
+ switch (hparms->sample_width) {
+ case 16:
+ conf1 = HDMI_AUD_CONF1_WIDTH_16;
+ break;
+ case 24:
+ case 32:
+ conf1 = HDMI_AUD_CONF1_WIDTH_24;
+ break;
+ }
+
+ dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate);
+
+ hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS);
+ hdmi_write(audio, conf0, HDMI_AUD_CONF0);
+ hdmi_write(audio, conf1, HDMI_AUD_CONF1);
+
+ dw_hdmi_audio_enable(hdmi);
+
+ return 0;
+}
+
+static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ dw_hdmi_audio_disable(hdmi);
+
+ hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
+}
+
+static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
+ .hw_params = dw_hdmi_i2s_hw_params,
+ .audio_shutdown = dw_hdmi_i2s_audio_shutdown,
+};
+
+static int snd_dw_hdmi_probe(struct platform_device *pdev)
+{
+ struct dw_hdmi_i2s_audio_data *audio = pdev->dev.platform_data;
+ struct platform_device_info pdevinfo;
+ struct hdmi_codec_pdata pdata;
+ struct platform_device *platform;
+
+ pdata.ops = &dw_hdmi_i2s_ops;
+ pdata.i2s = 1;
+ pdata.max_i2s_channels = 6;
+ pdata.data = audio;
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.parent = pdev->dev.parent;
+ pdevinfo.id = PLATFORM_DEVID_AUTO;
+ pdevinfo.name = HDMI_CODEC_DRV_NAME;
+ pdevinfo.data = &pdata;
+ pdevinfo.size_data = sizeof(pdata);
+ pdevinfo.dma_mask = DMA_BIT_MASK(32);
+
+ platform = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(platform))
+ return PTR_ERR(platform);
+
+ dev_set_drvdata(&pdev->dev, platform);
+
+ return 0;
+}
+
+static int snd_dw_hdmi_remove(struct platform_device *pdev)
+{
+ struct platform_device *platform = dev_get_drvdata(&pdev->dev);
+
+ platform_device_unregister(platform);
+
+ return 0;
+}
+
+static struct platform_driver snd_dw_hdmi_driver = {
+ .probe = snd_dw_hdmi_probe,
+ .remove = snd_dw_hdmi_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(snd_dw_hdmi_driver);
+
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("Synopsis Designware HDMI I2S ALSA SoC interface");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index ab7023e5dfde..235ce7d1583d 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1,14 +1,15 @@
/*
+ * DesignWare High-Definition Multimedia Interface (HDMI) driver
+ *
+ * Copyright (C) 2013-2015 Mentor Graphics Inc.
* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Designware High-Definition Multimedia Interface (HDMI) driver
- *
- * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*/
#include <linux/module.h>
#include <linux/irq.h>
@@ -101,6 +102,17 @@ struct hdmi_data_info {
struct hdmi_vmode video_mode;
};
+struct dw_hdmi_i2c {
+ struct i2c_adapter adap;
+
+ struct mutex lock; /* used to serialize data transfers */
+ struct completion cmp;
+ u8 stat;
+
+ u8 slave_reg;
+ bool is_regaddr;
+};
+
struct dw_hdmi {
struct drm_connector connector;
struct drm_encoder *encoder;
@@ -111,6 +123,7 @@ struct dw_hdmi {
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
const struct dw_hdmi_plat_data *plat_data;
@@ -198,6 +211,201 @@ static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg,
hdmi_modb(hdmi, data << shift, mask, reg);
}
+static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi)
+{
+ /* Software reset */
+ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_SOFTRSTZ);
+
+ /* Set Standard Mode speed (determined to be 100KHz on iMX6) */
+ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_DIV);
+
+ /* Set done, not acknowledged and arbitration interrupt polarities */
+ hdmi_writeb(hdmi, HDMI_I2CM_INT_DONE_POL, HDMI_I2CM_INT);
+ hdmi_writeb(hdmi, HDMI_I2CM_CTLINT_NAC_POL | HDMI_I2CM_CTLINT_ARB_POL,
+ HDMI_I2CM_CTLINT);
+
+ /* Clear DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_I2CM_STAT0);
+
+ /* Mute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_MUTE_I2CM_STAT0);
+}
+
+static int dw_hdmi_i2c_read(struct dw_hdmi *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ dev_dbg(hdmi->dev, "set read register address to 0\n");
+ i2c->slave_reg = 0x00;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS);
+ hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_READ,
+ HDMI_I2CM_OPERATION);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat)
+ return -EAGAIN;
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
+ return -EIO;
+
+ *buf++ = hdmi_readb(hdmi, HDMI_I2CM_DATAI);
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_i2c_write(struct dw_hdmi *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ /* Use the first write byte as register address */
+ i2c->slave_reg = buf[0];
+ length--;
+ buf++;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ hdmi_writeb(hdmi, *buf++, HDMI_I2CM_DATAO);
+ hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS);
+ hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_WRITE,
+ HDMI_I2CM_OPERATION);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat)
+ return -EAGAIN;
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct dw_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ u8 addr = msgs[0].addr;
+ int i, ret = 0;
+
+ dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
+
+ for (i = 0; i < num; i++) {
+ if (msgs[i].addr != addr) {
+ dev_warn(hdmi->dev,
+ "unsupported transfer, changed slave address\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (msgs[i].len == 0) {
+ dev_dbg(hdmi->dev,
+ "unsupported transfer %d/%d, no data\n",
+ i + 1, num);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_lock(&i2c->lock);
+
+ /* Unmute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, 0x00, HDMI_IH_MUTE_I2CM_STAT0);
+
+ /* Set slave device address taken from the first I2C message */
+ hdmi_writeb(hdmi, addr, HDMI_I2CM_SLAVE);
+
+ /* Set slave device register address on transfer */
+ i2c->is_regaddr = false;
+
+ for (i = 0; i < num; i++) {
+ dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = dw_hdmi_i2c_read(hdmi, msgs[i].buf, msgs[i].len);
+ else
+ ret = dw_hdmi_i2c_write(hdmi, msgs[i].buf, msgs[i].len);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Mute DONE and ERROR interrupts */
+ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE,
+ HDMI_IH_MUTE_I2CM_STAT0);
+
+ mutex_unlock(&i2c->lock);
+
+ return ret;
+}
+
+static u32 dw_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm dw_hdmi_algorithm = {
+ .master_xfer = dw_hdmi_i2c_xfer,
+ .functionality = dw_hdmi_i2c_func,
+};
+
+static struct i2c_adapter *dw_hdmi_i2c_adapter(struct dw_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct dw_hdmi_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&i2c->lock);
+ init_completion(&i2c->cmp);
+
+ adap = &i2c->adap;
+ adap->class = I2C_CLASS_DDC;
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = hdmi->dev;
+ adap->algo = &dw_hdmi_algorithm;
+ strlcpy(adap->name, "DesignWare HDMI", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
+ devm_kfree(hdmi->dev, i2c);
+ return ERR_PTR(ret);
+ }
+
+ hdmi->i2c = i2c;
+
+ dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+ return adap;
+}
+
static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
unsigned int n)
{
@@ -1512,16 +1720,40 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.mode_set = dw_hdmi_bridge_mode_set,
};
+static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ unsigned int stat;
+
+ stat = hdmi_readb(hdmi, HDMI_IH_I2CM_STAT0);
+ if (!stat)
+ return IRQ_NONE;
+
+ hdmi_writeb(hdmi, stat, HDMI_IH_I2CM_STAT0);
+
+ i2c->stat = stat;
+
+ complete(&i2c->cmp);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
{
struct dw_hdmi *hdmi = dev_id;
u8 intr_stat;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (hdmi->i2c)
+ ret = dw_hdmi_i2c_irq(hdmi);
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
- if (intr_stat)
+ if (intr_stat) {
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+ return IRQ_WAKE_THREAD;
+ }
- return intr_stat ? IRQ_WAKE_THREAD : IRQ_NONE;
+ return ret;
}
static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
@@ -1639,10 +1871,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
struct device_node *np = dev->of_node;
struct platform_device_info pdevinfo;
struct device_node *ddc_node;
- struct dw_hdmi_audio_data audio;
struct dw_hdmi *hdmi;
int ret;
u32 val = 1;
+ u8 config0;
+ u8 config1;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
@@ -1681,7 +1914,7 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
- hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ hdmi->ddc = of_get_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
if (!hdmi->ddc) {
dev_dbg(hdmi->dev, "failed to read ddc node\n");
@@ -1693,20 +1926,22 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
}
hdmi->regs = devm_ioremap_resource(dev, iores);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
+ goto err_res;
+ }
hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr");
if (IS_ERR(hdmi->isfr_clk)) {
ret = PTR_ERR(hdmi->isfr_clk);
dev_err(hdmi->dev, "Unable to get HDMI isfr clk: %d\n", ret);
- return ret;
+ goto err_res;
}
ret = clk_prepare_enable(hdmi->isfr_clk);
if (ret) {
dev_err(hdmi->dev, "Cannot enable HDMI isfr clock: %d\n", ret);
- return ret;
+ goto err_res;
}
hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb");
@@ -1744,6 +1979,13 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
*/
hdmi_init_clk_regenerator(hdmi);
+ /* If DDC bus is not specified, try to register HDMI I2C bus */
+ if (!hdmi->ddc) {
+ hdmi->ddc = dw_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->ddc))
+ hdmi->ddc = NULL;
+ }
+
/*
* Configure registers related to HDMI interrupt
* generation before registering IRQ.
@@ -1770,7 +2012,12 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
- if (hdmi_readb(hdmi, HDMI_CONFIG1_ID) & HDMI_CONFIG1_AHB) {
+ config0 = hdmi_readb(hdmi, HDMI_CONFIG0_ID);
+ config1 = hdmi_readb(hdmi, HDMI_CONFIG1_ID);
+
+ if (config1 & HDMI_CONFIG1_AHB) {
+ struct dw_hdmi_audio_data audio;
+
audio.phys = iores->start;
audio.base = hdmi->regs;
audio.irq = irq;
@@ -1782,16 +2029,39 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
pdevinfo.size_data = sizeof(audio);
pdevinfo.dma_mask = DMA_BIT_MASK(32);
hdmi->audio = platform_device_register_full(&pdevinfo);
+ } else if (config0 & HDMI_CONFIG0_I2S) {
+ struct dw_hdmi_i2s_audio_data audio;
+
+ audio.hdmi = hdmi;
+ audio.write = hdmi_writeb;
+ audio.read = hdmi_readb;
+
+ pdevinfo.name = "dw-hdmi-i2s-audio";
+ pdevinfo.data = &audio;
+ pdevinfo.size_data = sizeof(audio);
+ pdevinfo.dma_mask = DMA_BIT_MASK(32);
+ hdmi->audio = platform_device_register_full(&pdevinfo);
}
+ /* Reset HDMI DDC I2C master controller and mute I2CM interrupts */
+ if (hdmi->i2c)
+ dw_hdmi_i2c_init(hdmi);
+
dev_set_drvdata(dev, hdmi);
return 0;
err_iahb:
+ if (hdmi->i2c) {
+ i2c_del_adapter(&hdmi->i2c->adap);
+ hdmi->ddc = NULL;
+ }
+
clk_disable_unprepare(hdmi->iahb_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
+err_res:
+ i2c_put_adapter(hdmi->ddc);
return ret;
}
@@ -1809,13 +2079,18 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
- i2c_put_adapter(hdmi->ddc);
+
+ if (hdmi->i2c)
+ i2c_del_adapter(&hdmi->i2c->adap);
+ else
+ i2c_put_adapter(hdmi->ddc);
}
EXPORT_SYMBOL_GPL(dw_hdmi_unbind);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>");
MODULE_DESCRIPTION("DW HDMI transmitter driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dw-hdmi");
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h
index fc9a560429d6..55135bbd0c16 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi.h
@@ -545,6 +545,9 @@
#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
enum {
+/* CONFIG0_ID field values */
+ HDMI_CONFIG0_I2S = 0x10,
+
/* CONFIG1_ID field values */
HDMI_CONFIG1_AHB = 0x01,
@@ -566,6 +569,10 @@ enum {
HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2,
HDMI_IH_PHY_STAT0_HPD = 0x1,
+/* IH_I2CM_STAT0 and IH_MUTE_I2CM_STAT0 field values */
+ HDMI_IH_I2CM_STAT0_DONE = 0x2,
+ HDMI_IH_I2CM_STAT0_ERROR = 0x1,
+
/* IH_MUTE_I2CMPHY_STAT0 field values */
HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2,
HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1,
@@ -887,6 +894,17 @@ enum {
HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08,
HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04,
+/* AUD_CONF0 field values */
+ HDMI_AUD_CONF0_SW_RESET = 0x80,
+ HDMI_AUD_CONF0_I2S_ALL_ENABLE = 0x2F,
+
+/* AUD_CONF1 field values */
+ HDMI_AUD_CONF1_MODE_I2S = 0x00,
+ HDMI_AUD_CONF1_MODE_RIGHT_J = 0x02,
+ HDMI_AUD_CONF1_MODE_LEFT_J = 0x04,
+ HDMI_AUD_CONF1_WIDTH_16 = 0x10,
+ HDMI_AUD_CONF1_WIDTH_24 = 0x18,
+
/* AUD_CTS3 field values */
HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5,
HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0,
@@ -901,6 +919,12 @@ enum {
HDMI_AUD_CTS3_CTS_MANUAL = 0x10,
HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f,
+/* HDMI_AUD_INPUTCLKFS field values */
+ HDMI_AUD_INPUTCLKFS_128FS = 0,
+ HDMI_AUD_INPUTCLKFS_256FS = 1,
+ HDMI_AUD_INPUTCLKFS_512FS = 2,
+ HDMI_AUD_INPUTCLKFS_64FS = 4,
+
/* AHB_DMA_CONF0 field values */
HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7,
HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80,
@@ -1032,6 +1056,21 @@ enum {
HDMI_A_VIDPOLCFG_HSYNCPOL_MASK = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
+
+/* I2CM_OPERATION field values */
+ HDMI_I2CM_OPERATION_WRITE = 0x10,
+ HDMI_I2CM_OPERATION_READ_EXT = 0x2,
+ HDMI_I2CM_OPERATION_READ = 0x1,
+
+/* I2CM_INT field values */
+ HDMI_I2CM_INT_DONE_POL = 0x8,
+ HDMI_I2CM_INT_DONE_MASK = 0x4,
+
+/* I2CM_CTLINT field values */
+ HDMI_I2CM_CTLINT_NAC_POL = 0x80,
+ HDMI_I2CM_CTLINT_NAC_MASK = 0x40,
+ HDMI_I2CM_CTLINT_ARB_POL = 0x8,
+ HDMI_I2CM_CTLINT_ARB_MASK = 0x4,
};
#endif /* __DW_HDMI_H__ */
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index f1a99938e924..27f98c518dde 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -239,16 +239,9 @@ static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs =
.get_modes = ptn3460_get_modes,
};
-static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
- bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs ptn3460_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = ptn3460_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 6f7c2f9860d2..ac8cc5b50d9f 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -477,16 +477,9 @@ static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
.get_modes = ps8622_get_modes,
};
-static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
- bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs ps8622_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = ps8622_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
new file mode 100644
index 000000000000..b2c267df7ee7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -0,0 +1,1564 @@
+/*
+ * Silicon Image SiI8620 HDMI/MHL bridge driver
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/bridge/mhl.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "sil-sii8620.h"
+
+#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+
+enum sii8620_mode {
+ CM_DISCONNECTED,
+ CM_DISCOVERY,
+ CM_MHL1,
+ CM_MHL3,
+ CM_ECBUS_S
+};
+
+enum sii8620_sink_type {
+ SINK_NONE,
+ SINK_HDMI,
+ SINK_DVI
+};
+
+enum sii8620_mt_state {
+ MT_STATE_READY,
+ MT_STATE_BUSY,
+ MT_STATE_DONE
+};
+
+struct sii8620 {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct clk *clk_xtal;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_int;
+ struct regulator_bulk_data supplies[2];
+ struct mutex lock; /* context lock, protects fields below */
+ int error;
+ enum sii8620_mode mode;
+ enum sii8620_sink_type sink_type;
+ u8 cbus_status;
+ u8 stat[MHL_DST_SIZE];
+ u8 xstat[MHL_XDS_SIZE];
+ u8 devcap[MHL_DCAP_SIZE];
+ u8 xdevcap[MHL_XDC_SIZE];
+ u8 avif[19];
+ struct edid *edid;
+ unsigned int gen2_write_burst:1;
+ enum sii8620_mt_state mt_state;
+ struct list_head mt_queue;
+};
+
+struct sii8620_mt_msg;
+
+typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg);
+
+struct sii8620_mt_msg {
+ struct list_head node;
+ u8 reg[4];
+ u8 ret;
+ sii8620_mt_msg_cb send;
+ sii8620_mt_msg_cb recv;
+};
+
+static const u8 sii8620_i2c_page[] = {
+ 0x39, /* Main System */
+ 0x3d, /* TDM and HSIC */
+ 0x49, /* TMDS Receiver, MHL EDID */
+ 0x4d, /* eMSC, HDCP, HSIC */
+ 0x5d, /* MHL Spec */
+ 0x64, /* MHL CBUS */
+ 0x59, /* Hardware TPI (Transmitter Programming Interface) */
+ 0x61, /* eCBUS-S, eCBUS-D */
+};
+
+static void sii8620_fetch_edid(struct sii8620 *ctx);
+static void sii8620_set_upstream_edid(struct sii8620 *ctx);
+static void sii8620_enable_hpd(struct sii8620 *ctx);
+static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+
+static int sii8620_clear_error(struct sii8620 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data = addr;
+ struct i2c_msg msg[] = {
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = 1,
+ .buf = &data
+ },
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags | I2C_M_RD,
+ .len = len,
+ .buf = buf
+ },
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 2) {
+ dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n",
+ addr, len, ret);
+ ctx->error = ret < 0 ? ret : -EIO;
+ }
+}
+
+static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
+{
+ u8 ret;
+
+ sii8620_read_buf(ctx, addr, &ret, 1);
+ return ret;
+}
+
+static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf,
+ int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data[2];
+ struct i2c_msg msg = {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = len + 1,
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ if (len > 1) {
+ msg.buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!msg.buf) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+ memcpy(msg.buf + 1, buf, len);
+ } else {
+ msg.buf = data;
+ msg.buf[1] = *buf;
+ }
+
+ msg.buf[0] = addr;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 1) {
+ dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n",
+ addr, len, buf, ret);
+ ctx->error = ret ?: -EIO;
+ }
+
+ if (len > 1)
+ kfree(msg.buf);
+}
+
+#define sii8620_write(ctx, addr, arr...) \
+({\
+ u8 d[] = { arr }; \
+ sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \
+})
+
+static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 2)
+ sii8620_write(ctx, seq[i], seq[i + 1]);
+}
+
+#define sii8620_write_seq(ctx, seq...) \
+({\
+ const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+#define sii8620_write_seq_static(ctx, seq...) \
+({\
+ static const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
+{
+ val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask);
+ sii8620_write(ctx, addr, val);
+}
+
+static void sii8620_mt_cleanup(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg, *n;
+
+ list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) {
+ list_del(&msg->node);
+ kfree(msg);
+ }
+ ctx->mt_state = MT_STATE_READY;
+}
+
+static void sii8620_mt_work(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg;
+
+ if (ctx->error)
+ return;
+ if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue))
+ return;
+
+ if (ctx->mt_state == MT_STATE_DONE) {
+ ctx->mt_state = MT_STATE_READY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
+ node);
+ if (msg->recv)
+ msg->recv(ctx, msg);
+ list_del(&msg->node);
+ kfree(msg);
+ }
+
+ if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue))
+ return;
+
+ ctx->mt_state = MT_STATE_BUSY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+ if (msg->send)
+ msg->send(ctx, msg);
+}
+
+static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ switch (msg->reg[0]) {
+ case MHL_WRITE_STAT:
+ case MHL_SET_INT:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_WRITE_STAT);
+ break;
+ case MHL_MSC_MSG:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_MSC_MSG);
+ break;
+ default:
+ dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
+ msg->reg[0]);
+ }
+}
+
+static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+
+ if (!msg)
+ ctx->error = -ENOMEM;
+ else
+ list_add_tail(&msg->node, &ctx->mt_queue);
+
+ return msg;
+}
+
+static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = cmd;
+ msg->reg[1] = arg1;
+ msg->reg[2] = arg2;
+ msg->send = sii8620_mt_msc_cmd_send;
+}
+
+static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val);
+}
+
+static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask);
+}
+
+static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data);
+}
+
+static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
+{
+ sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
+}
+
+static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE,
+ REG_EDID_CTRL, ctrl,
+ REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START
+ );
+}
+
+/* copy src to dst and set changed bits in src */
+static void sii8620_update_array(u8 *dst, u8 *src, int count)
+{
+ while (--count >= 0) {
+ *src ^= *dst;
+ *dst++ ^= *src++;
+ }
+}
+
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+ static const char * const sink_str[] = {
+ [SINK_NONE] = "NONE",
+ [SINK_HDMI] = "HDMI",
+ [SINK_DVI] = "DVI"
+ };
+
+ u8 dcap[MHL_DCAP_SIZE];
+ char sink_name[20];
+ struct device *dev = ctx->dev;
+
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+ if (ctx->error < 0)
+ return;
+
+ dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
+ dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+ dcap[MHL_DCAP_MHL_VERSION] / 16,
+ dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
+ dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
+ dcap[MHL_DCAP_DEVICE_ID_L]);
+ sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+
+ if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+ return;
+
+ sii8620_fetch_edid(ctx);
+ if (!ctx->edid) {
+ dev_err(ctx->dev, "Cannot fetch EDID\n");
+ sii8620_mhl_disconnected(ctx);
+ return;
+ }
+
+ if (drm_detect_hdmi_monitor(ctx->edid))
+ ctx->sink_type = SINK_HDMI;
+ else
+ ctx->sink_type = SINK_DVI;
+
+ drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name));
+
+ dev_info(dev, "detected sink(type: %s): %s\n",
+ sink_str[ctx->sink_type], sink_name);
+ sii8620_set_upstream_edid(ctx);
+ sii8620_enable_hpd(ctx);
+}
+
+static void sii8620_mr_xdevcap(struct sii8620 *ctx)
+{
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
+ MHL_XDC_SIZE);
+
+ sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+ MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+ sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+}
+
+static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE
+ | BIT_INTR9_EDID_ERROR,
+ REG_EDID_CTRL, ctrl,
+ REG_EDID_FIFO_ADDR, 0
+ );
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ sii8620_mr_xdevcap(ctx);
+ else
+ sii8620_mr_devcap(ctx);
+}
+
+static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP;
+ msg->send = sii8620_mt_read_devcap_send;
+ msg->recv = sii8620_mt_read_devcap_recv;
+}
+
+static void sii8620_fetch_edid(struct sii8620 *ctx)
+{
+ u8 lm_ddc, ddc_cmd, int3, cbus;
+ int fetched, i;
+ int edid_len = EDID_LENGTH;
+ u8 *edid;
+
+ sii8620_readb(ctx, REG_CBUS_STATUS);
+ lm_ddc = sii8620_readb(ctx, REG_LM_DDC);
+ ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD);
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, 0,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_HDCP2X_POLL_CS, 0x71,
+ REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX,
+ REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED,
+ );
+
+ for (i = 0; i < 256; ++i) {
+ u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS);
+
+ if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG))
+ break;
+ sii8620_write(ctx, REG_DDC_STATUS,
+ BIT_DDC_STATUS_DDC_FIFO_EMPTY);
+ }
+
+ sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1);
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!edid) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+
+#define FETCH_SIZE 16
+ for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) {
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ sii8620_write_seq(ctx,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO,
+ REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY
+ );
+ sii8620_write_seq(ctx,
+ REG_DDC_SEGM, fetched >> 8,
+ REG_DDC_OFFSET, fetched & 0xff,
+ REG_DDC_DIN_CNT1, FETCH_SIZE,
+ REG_DDC_DIN_CNT2, 0,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
+ );
+
+ do {
+ int3 = sii8620_readb(ctx, REG_INTR3);
+ cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if (int3 & BIT_DDC_CMD_DONE)
+ break;
+
+ if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+ kfree(edid);
+ edid = NULL;
+ goto end;
+ }
+ } while (1);
+
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
+ usleep_range(10, 20);
+
+ sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
+ if (fetched + FETCH_SIZE == EDID_LENGTH) {
+ u8 ext = ((struct edid *)edid)->extensions;
+
+ if (ext) {
+ u8 *new_edid;
+
+ edid_len += ext * EDID_LENGTH;
+ new_edid = krealloc(edid, edid_len, GFP_KERNEL);
+ if (!new_edid) {
+ kfree(edid);
+ ctx->error = -ENOMEM;
+ return;
+ }
+ edid = new_edid;
+ }
+ }
+
+ if (fetched + FETCH_SIZE == edid_len)
+ sii8620_write(ctx, REG_INTR3, int3);
+ }
+
+ sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+
+end:
+ kfree(ctx->edid);
+ ctx->edid = (struct edid *)edid;
+}
+
+static void sii8620_set_upstream_edid(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N
+ | BIT_DPD_PD_MHL_CLK_N, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL3, 0x00,
+ REG_PKT_FILTER_0, 0xFF,
+ REG_PKT_FILTER_1, 0xFF,
+ REG_ALICE0_BW_I2C, 0x06
+ );
+
+ sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER,
+ BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_EDID_FIFO_ADDR, 0,
+ );
+
+ sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid,
+ (ctx->edid->extensions + 1) * EDID_LENGTH);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE,
+ REG_INTR9_MASK, 0
+ );
+}
+
+static void sii8620_xtal_set_rate(struct sii8620 *ctx)
+{
+ static const struct {
+ unsigned int rate;
+ u8 div;
+ u8 tp1;
+ } rates[] = {
+ { 19200, 0x04, 0x53 },
+ { 20000, 0x04, 0x62 },
+ { 24000, 0x05, 0x75 },
+ { 30000, 0x06, 0x92 },
+ { 38400, 0x0c, 0xbc },
+ };
+ unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i)
+ if (rate <= rates[i].rate)
+ break;
+
+ if (rate != rates[i].rate)
+ dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n",
+ rate, rates[i].rate);
+
+ sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div);
+ sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1);
+}
+
+static int sii8620_hw_on(struct sii8620 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ return ret;
+ usleep_range(10000, 20000);
+ return clk_prepare_enable(ctx->clk_xtal);
+}
+
+static int sii8620_hw_off(struct sii8620 *ctx)
+{
+ clk_disable_unprepare(ctx->clk_xtal);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii8620_hw_reset(struct sii8620 *ctx)
+{
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ usleep_range(5000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ msleep(300);
+}
+
+static void sii8620_cbus_reset(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+ | BIT_PWD_SRST_CBUS_RST_SW_EN,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
+ );
+}
+
+static void sii8620_set_auto_zone(struct sii8620 *ctx)
+{
+ if (ctx->mode != CM_MHL1) {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, 0x0,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ }
+}
+
+static void sii8620_stop_video(struct sii8620 *ctx)
+{
+ u8 uninitialized_var(val);
+
+ sii8620_write_seq_static(ctx,
+ REG_TPI_INTR_EN, 0,
+ REG_HDCP2X_INTR0_MASK, 0,
+ REG_TPI_COPP_DATA2, 0,
+ REG_TPI_INTR_ST0, ~0,
+ );
+
+ switch (ctx->sink_type) {
+ case SINK_DVI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE;
+ break;
+ case SINK_HDMI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE
+ | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
+ break;
+ default:
+ return;
+ }
+
+ sii8620_write(ctx, REG_TPI_SC, val);
+}
+
+static void sii8620_start_hdmi(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
+ | BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
+ REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
+ | BIT_VID_OVRRD_M1080P_OVRRD,
+ REG_VID_MODE, 0,
+ REG_MHL_TOP_CTL, 0x1,
+ REG_MHLTX_CTL6, 0xa0,
+ REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+ REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+ );
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL |
+ MHL_DST_LM_PATH_ENABLED);
+
+ sii8620_set_auto_zone(ctx);
+
+ sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+
+ sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+}
+
+static void sii8620_start_video(struct sii8620 *ctx)
+{
+ if (ctx->mode < CM_MHL3)
+ sii8620_stop_video(ctx);
+
+ switch (ctx->sink_type) {
+ case SINK_HDMI:
+ sii8620_start_hdmi(ctx);
+ break;
+ case SINK_DVI:
+ default:
+ break;
+ }
+}
+
+static void sii8620_disable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN,
+ REG_INTR8_MASK, 0
+ );
+}
+
+static void sii8620_enable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS
+ | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN
+ | BIT_HPD_CTRL_HPD_HIGH,
+ );
+}
+
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
+ );
+ ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (!ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_CTRL, 0,
+ REG_MDT_RCV_CTRL, 0
+ );
+ ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+ | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+ | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+ | BIT_MDT_XMIT_SM_ERROR,
+ REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+ | BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+ | BIT_MDT_RFIFO_DATA_RDY
+ );
+ sii8620_enable_gen2_write_burst(ctx);
+}
+
+static void sii8620_mhl_discover(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_DISC_PULSE_PROCEED,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K),
+ REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT
+ | BIT_MHL_EST_INT
+ | BIT_NOT_MHL_EST_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_RGND_READY_INT,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_DP_CTL1, 0xA2,
+ REG_MHL_DP_CTL2, 0x03,
+ REG_MHL_DP_CTL3, 0x35,
+ REG_MHL_DP_CTL5, 0x02,
+ REG_MHL_DP_CTL6, 0x02,
+ REG_MHL_DP_CTL7, 0x03,
+ REG_COC_CTLC, 0xFF,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC,
+ REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE
+ | BIT_COC_CALIBRATION_DONE,
+ REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD
+ | BIT_CBUS_CMD_ABORT,
+ REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE
+ | BIT_CBUS_HPD_CHG
+ | BIT_CBUS_MSC_MR_WRITE_STAT
+ | BIT_CBUS_MSC_MR_MSC_MSG
+ | BIT_CBUS_MSC_MR_WRITE_BURST
+ | BIT_CBUS_MSC_MR_SET_INT
+ | BIT_CBUS_MSC_MT_DONE_NACK
+ );
+}
+
+static void sii8620_peer_specific_init(struct sii8620 *ctx)
+{
+ if (ctx->mode == CM_MHL3)
+ sii8620_write_seq_static(ctx,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
+ REG_EMSCINTRMASK1,
+ BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR
+ );
+ else
+ sii8620_write_seq_static(ctx,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_HDCP2X_INTR0, 0xFF,
+ REG_INTR1, 0xFF,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD
+ | BIT_SYS_CTRL1_TX_CTRL_HDMI
+ );
+}
+
+#define SII8620_MHL_VERSION 0x32
+#define SII8620_SCRATCHPAD_SIZE 16
+#define SII8620_INT_STAT_SIZE 0x33
+
+static void sii8620_set_dev_cap(struct sii8620 *ctx)
+{
+ static const u8 devcap[MHL_DCAP_SIZE] = {
+ [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION,
+ [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER,
+ [MHL_DCAP_ADOPTER_ID_H] = 0x01,
+ [MHL_DCAP_ADOPTER_ID_L] = 0x41,
+ [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444
+ | MHL_DCAP_VID_LINK_PPIXEL
+ | MHL_DCAP_VID_LINK_16BPP,
+ [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH,
+ [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS,
+ [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI,
+ [MHL_DCAP_BANDWIDTH] = 0x0f,
+ [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT
+ | MHL_DCAP_FEATURE_RAP_SUPPORT
+ | MHL_DCAP_FEATURE_SP_SUPPORT,
+ [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE,
+ [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE,
+ };
+ static const u8 xdcap[MHL_XDC_SIZE] = {
+ [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075
+ | MHL_XDC_ECBUS_S_8BIT,
+ [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150
+ | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600,
+ [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST,
+ [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE,
+ };
+
+ sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap));
+ sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap));
+}
+
+static void sii8620_mhl_init(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN,
+ );
+
+ sii8620_peer_specific_init(ctx);
+
+ sii8620_disable_hpd(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_TMDS0_CCTRL1, 0x90,
+ REG_TMDS_CLK_EN, 0x01,
+ REG_TMDS_CH_EN, 0x11,
+ REG_BGR_BIAS, 0x87,
+ REG_ALICE0_ZONE_CTRL, 0xE8,
+ REG_ALICE0_MODE_CTRL, 0x04,
+ );
+ sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0);
+ sii8620_write_seq_static(ctx,
+ REG_TPI_HW_OPT3, 0x76,
+ REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE,
+ REG_TPI_DTD_B2, 79,
+ );
+ sii8620_set_dev_cap(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_TIMEOUT, 100,
+ REG_MDT_XMIT_CTRL, 0x03,
+ REG_MDT_XFIFO_STAT, 0x00,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_CBUS_LINK_CTRL_8, 0x1D,
+ );
+
+ sii8620_start_gen2_write_burst(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_BIST_CTRL, 0x00,
+ REG_COC_CTL1, 0x10,
+ REG_COC_CTL2, 0x18,
+ REG_COC_CTLF, 0x07,
+ REG_COC_CTL11, 0xF8,
+ REG_COC_CTL17, 0x61,
+ REG_COC_CTL18, 0x46,
+ REG_COC_CTL19, 0x15,
+ REG_COC_CTL1A, 0x01,
+ REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN,
+ REG_MHL_COC_CTL4, 0x2D,
+ REG_MHL_COC_CTL5, 0xF9,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ );
+ sii8620_disable_gen2_write_burst(ctx);
+
+ /* currently MHL3 is not supported, so we force version to 0 */
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
+ MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
+ | MHL_DST_CONN_POW_STAT);
+ sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
+}
+
+static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
+{
+ if (ctx->mode == mode)
+ return;
+
+ ctx->mode = mode;
+
+ switch (mode) {
+ case CM_MHL1:
+ sii8620_write_seq_static(ctx,
+ REG_CBUS_MSC_COMPAT_CTRL, 0x02,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN,
+ REG_COC_INTR_MASK, 0
+ );
+ break;
+ case CM_MHL3:
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_COC_CTL0, 0x40,
+ REG_MHL_COC_CTL1, 0x07
+ );
+ break;
+ case CM_DISCONNECTED:
+ break;
+ default:
+ dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
+ break;
+ }
+
+ sii8620_set_auto_zone(ctx);
+
+ if (mode != CM_MHL1)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_DP_CTL0, 0xBC,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x39,
+ REG_MHL_DP_CTL2, 0x2A,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x08
+ );
+}
+
+static void sii8620_disconnect(struct sii8620 *ctx)
+{
+ sii8620_disable_gen2_write_burst(ctx);
+ sii8620_stop_video(ctx);
+ msleep(50);
+ sii8620_cbus_reset(ctx);
+ sii8620_set_mode(ctx, CM_DISCONNECTED);
+ sii8620_write_seq_static(ctx,
+ REG_COC_CTL0, 0x40,
+ REG_CBUS3_CNVT, 0x84,
+ REG_COC_CTL14, 0x00,
+ REG_COC_CTL0, 0x40,
+ REG_HRXCTRL3, 0x07,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x3F,
+ REG_MHL_DP_CTL2, 0x2F,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x03
+ );
+ sii8620_disable_hpd(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_COC_CTL1, 0x07,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_DISC_CTRL8, 0x00,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_INT_CTRL, 0x00,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ REG_DISC_CTRL1, 0x25,
+ REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT,
+ REG_MDT_INT_1, 0xff,
+ REG_MDT_INT_1_MASK, 0x00,
+ REG_MDT_INT_0, 0xff,
+ REG_MDT_INT_0_MASK, 0x00,
+ REG_COC_INTR, 0xff,
+ REG_COC_INTR_MASK, 0x00,
+ REG_TRXINTH, 0xff,
+ REG_TRXINTMH, 0x00,
+ REG_CBUS_INT_0, 0xff,
+ REG_CBUS_INT_0_MASK, 0x00,
+ REG_CBUS_INT_1, 0xff,
+ REG_CBUS_INT_1_MASK, 0x00,
+ REG_EMSCINTR, 0xff,
+ REG_EMSCINTRMASK, 0x00,
+ REG_EMSCINTR1, 0xff,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_INTR8, 0xff,
+ REG_INTR8_MASK, 0x00,
+ REG_TPI_INTR_ST0, 0xff,
+ REG_TPI_INTR_EN, 0x00,
+ REG_HDCP2X_INTR0, 0xff,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_INTR9, 0xff,
+ REG_INTR9_MASK, 0x00,
+ REG_INTR3, 0xff,
+ REG_INTR3_MASK, 0x00,
+ REG_INTR5, 0xff,
+ REG_INTR5_MASK, 0x00,
+ REG_INTR2, 0xff,
+ REG_INTR2_MASK, 0x00,
+ );
+ memset(ctx->stat, 0, sizeof(ctx->stat));
+ memset(ctx->xstat, 0, sizeof(ctx->xstat));
+ memset(ctx->devcap, 0, sizeof(ctx->devcap));
+ memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+ ctx->cbus_status = 0;
+ ctx->sink_type = SINK_NONE;
+ kfree(ctx->edid);
+ ctx->edid = NULL;
+ sii8620_mt_cleanup(ctx);
+}
+
+static void sii8620_mhl_disconnected(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN
+ );
+ sii8620_disconnect(ctx);
+}
+
+static void sii8620_irq_disc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0);
+
+ if (stat & VAL_CBUS_MHL_DISCON)
+ sii8620_mhl_disconnected(ctx);
+
+ if (stat & BIT_RGND_READY_INT) {
+ u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2);
+
+ if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) {
+ sii8620_mhl_discover(ctx);
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_NOMHL_EST
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_NOT_MHL_EST_INT
+ );
+ }
+ }
+ if (stat & BIT_MHL_EST_INT)
+ sii8620_mhl_init(ctx);
+
+ sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
+}
+
+static void sii8620_irq_g2wb(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
+
+ if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
+ dev_dbg(ctx->dev, "HAWB idle\n");
+
+ sii8620_write(ctx, REG_MDT_INT_0, stat);
+}
+
+static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
+ sii8620_set_mode(ctx, CM_MHL1);
+ sii8620_peer_specific_init(ctx);
+ sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+ | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
+ }
+}
+
+static void sii8620_status_changed_path(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL
+ | MHL_DST_LM_PATH_ENABLED);
+ sii8620_mt_read_devcap(ctx, false);
+ } else {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL);
+ }
+}
+
+static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
+{
+ u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE);
+ sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE);
+
+ sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
+ sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
+
+ if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+ sii8620_status_changed_dcap(ctx);
+
+ if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+ sii8620_status_changed_path(ctx);
+}
+
+static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
+{
+ u8 ints[MHL_INT_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+ sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+}
+
+static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+
+ if (list_empty(&ctx->mt_queue)) {
+ dev_err(dev, "unexpected MSC MT response\n");
+ return NULL;
+ }
+
+ return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+}
+
+static void sii8620_msc_mt_done(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+
+ if (!msg)
+ return;
+
+ msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0);
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+ u8 buf[2];
+
+ if (!msg)
+ return;
+
+ sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
+
+ switch (buf[0]) {
+ case MHL_MSC_MSG_RAPK:
+ msg->ret = buf[1];
+ ctx->mt_state = MT_STATE_DONE;
+ break;
+ default:
+ dev_err(ctx->dev, "%s message type %d,%d not supported",
+ __func__, buf[0], buf[1]);
+ }
+}
+
+static void sii8620_irq_msc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0);
+
+ if (stat & ~BIT_CBUS_HPD_CHG)
+ sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG);
+
+ if (stat & BIT_CBUS_HPD_CHG) {
+ u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) {
+ sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG);
+ } else {
+ stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ }
+ ctx->cbus_status = cbus_stat;
+ }
+
+ if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
+ sii8620_msc_mr_write_stat(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_SET_INT)
+ sii8620_msc_mr_set_int(ctx);
+
+ if (stat & BIT_CBUS_MSC_MT_DONE)
+ sii8620_msc_mt_done(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_MSC_MSG)
+ sii8620_msc_mr_msc_msg(ctx);
+}
+
+static void sii8620_irq_coc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_COC_INTR);
+
+ sii8620_write(ctx, REG_COC_INTR, stat);
+}
+
+static void sii8620_irq_merr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1);
+
+ sii8620_write(ctx, REG_CBUS_INT_1, stat);
+}
+
+static void sii8620_irq_edid(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR9);
+
+ sii8620_write(ctx, REG_INTR9, stat);
+
+ if (stat & BIT_INTR9_DEVCAP_DONE)
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_scdt_high(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
+ REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
+ );
+}
+
+static void sii8620_scdt_low(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
+ BIT_TMDS_CSTAT_P3_CLR_AVI);
+
+ sii8620_stop_video(ctx);
+
+ sii8620_write(ctx, REG_INTR8_MASK, 0);
+}
+
+static void sii8620_irq_scdt(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR5);
+
+ if (stat & BIT_INTR_SCDT_CHANGE) {
+ u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
+
+ if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+ sii8620_scdt_high(ctx);
+ else
+ sii8620_scdt_low(ctx);
+ }
+
+ sii8620_write(ctx, REG_INTR5, stat);
+}
+
+static void sii8620_new_vsi(struct sii8620 *ctx)
+{
+ u8 vsif[11];
+
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2,
+ VAL_RX_HDMI_CTRL2_DEFVAL |
+ BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
+ ARRAY_SIZE(vsif));
+}
+
+static void sii8620_new_avi(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+}
+
+static void sii8620_irq_infr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR8)
+ & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
+
+ sii8620_write(ctx, REG_INTR8, stat);
+
+ if (stat & BIT_CEA_NEW_VSI)
+ sii8620_new_vsi(ctx);
+
+ if (stat & BIT_CEA_NEW_AVI)
+ sii8620_new_avi(ctx);
+
+ if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
+ sii8620_start_video(ctx);
+}
+
+/* endian agnostic, non-volatile version of test_bit */
+static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
+{
+ return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE));
+}
+
+static irqreturn_t sii8620_irq_thread(int irq, void *data)
+{
+ static const struct {
+ int bit;
+ void (*handler)(struct sii8620 *ctx);
+ } irq_vec[] = {
+ { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
+ { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
+ { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+ { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
+ { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+ { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+ { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
+ { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
+ };
+ struct sii8620 *ctx = data;
+ u8 stats[LEN_FAST_INTR_STAT];
+ int i, ret;
+
+ mutex_lock(&ctx->lock);
+
+ sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats));
+ for (i = 0; i < ARRAY_SIZE(irq_vec); ++i)
+ if (sii8620_test_bit(irq_vec[i].bit, stats))
+ irq_vec[i].handler(ctx);
+
+ sii8620_mt_work(ctx);
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret);
+ sii8620_mhl_disconnected(ctx);
+ }
+ mutex_unlock(&ctx->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void sii8620_cable_in(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+ u8 ver[5];
+ int ret;
+
+ ret = sii8620_hw_on(ctx);
+ if (ret) {
+ dev_err(dev, "Error powering on, %d.\n", ret);
+ return;
+ }
+ sii8620_hw_reset(ctx);
+
+ sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0],
+ ver[3], ver[2], ver[4]);
+
+ sii8620_write(ctx, REG_DPD,
+ BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN);
+
+ sii8620_xtal_set_rate(ctx);
+ sii8620_disconnect(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG
+ | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734,
+ REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN,
+ );
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ enable_irq(to_i2c_client(ctx->dev)->irq);
+}
+
+static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sii8620, bridge);
+}
+
+static bool sii8620_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ bool ret = false;
+ int max_clock = 74250;
+
+ mutex_lock(&ctx->lock);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ goto out;
+
+ if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
+ max_clock = 300000;
+
+ ret = mode->clock <= max_clock;
+
+out:
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+ .mode_fixup = sii8620_mode_fixup,
+};
+
+static int sii8620_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct sii8620 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->mt_queue);
+
+ ctx->clk_xtal = devm_clk_get(dev, "xtal");
+ if (IS_ERR(ctx->clk_xtal)) {
+ dev_err(dev, "failed to get xtal clock from DT\n");
+ return PTR_ERR(ctx->clk_xtal);
+ }
+
+ if (!client->irq) {
+ dev_err(dev, "no irq provided\n");
+ return -EINVAL;
+ }
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sii8620_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "sii8620", ctx);
+
+ ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->gpio_reset)) {
+ dev_err(dev, "failed to get reset gpio from DT\n");
+ return PTR_ERR(ctx->gpio_reset);
+ }
+
+ ctx->supplies[0].supply = "cvcc10";
+ ctx->supplies[1].supply = "iovcc18";
+ ret = devm_regulator_bulk_get(dev, 2, ctx->supplies);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &sii8620_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ sii8620_cable_in(ctx);
+
+ return 0;
+}
+
+static int sii8620_remove(struct i2c_client *client)
+{
+ struct sii8620 *ctx = i2c_get_clientdata(client);
+
+ disable_irq(to_i2c_client(ctx->dev)->irq);
+ drm_bridge_remove(&ctx->bridge);
+ sii8620_hw_off(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id sii8620_dt_match[] = {
+ { .compatible = "sil,sii8620" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sii8620_dt_match);
+
+static const struct i2c_device_id sii8620_id[] = {
+ { "sii8620", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, sii8620_id);
+static struct i2c_driver sii8620_driver = {
+ .driver = {
+ .name = "sii8620",
+ .of_match_table = of_match_ptr(sii8620_dt_match),
+ },
+ .probe = sii8620_probe,
+ .remove = sii8620_remove,
+ .id_table = sii8620_id,
+};
+
+module_i2c_driver(sii8620_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h
new file mode 100644
index 000000000000..6ff616a4f6ce
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.h
@@ -0,0 +1,1517 @@
+/*
+ * Registers of Silicon Image SiI8620 Mobile HD Transmitter
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SIL_SII8620_H__
+#define __SIL_SII8620_H__
+
+/* Vendor ID Low byte, default value: 0x01 */
+#define REG_VND_IDL 0x0000
+
+/* Vendor ID High byte, default value: 0x00 */
+#define REG_VND_IDH 0x0001
+
+/* Device ID Low byte, default value: 0x60 */
+#define REG_DEV_IDL 0x0002
+
+/* Device ID High byte, default value: 0x86 */
+#define REG_DEV_IDH 0x0003
+
+/* Device Revision, default value: 0x10 */
+#define REG_DEV_REV 0x0004
+
+/* OTP DBYTE510, default value: 0x00 */
+#define REG_OTP_DBYTE510 0x0006
+
+/* System Control #1, default value: 0x00 */
+#define REG_SYS_CTRL1 0x0008
+#define BIT_SYS_CTRL1_OTPVMUTEOVR_SET BIT(7)
+#define BIT_SYS_CTRL1_VSYNCPIN BIT(6)
+#define BIT_SYS_CTRL1_OTPADROPOVR_SET BIT(5)
+#define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD BIT(4)
+#define BIT_SYS_CTRL1_OTP2XVOVR_EN BIT(3)
+#define BIT_SYS_CTRL1_OTP2XAOVR_EN BIT(2)
+#define BIT_SYS_CTRL1_TX_CTRL_HDMI BIT(1)
+#define BIT_SYS_CTRL1_OTPAMUTEOVR_SET BIT(0)
+
+/* System Control DPD, default value: 0x90 */
+#define REG_DPD 0x000b
+#define BIT_DPD_PWRON_PLL BIT(7)
+#define BIT_DPD_PDNTX12 BIT(6)
+#define BIT_DPD_PDNRX12 BIT(5)
+#define BIT_DPD_OSC_EN BIT(4)
+#define BIT_DPD_PWRON_HSIC BIT(3)
+#define BIT_DPD_PDIDCK_N BIT(2)
+#define BIT_DPD_PD_MHL_CLK_N BIT(1)
+
+/* Dual link Control, default value: 0x00 */
+#define REG_DCTL 0x000d
+#define BIT_DCTL_TDM_LCLK_PHASE BIT(7)
+#define BIT_DCTL_HSIC_CLK_PHASE BIT(6)
+#define BIT_DCTL_CTS_TCK_PHASE BIT(5)
+#define BIT_DCTL_EXT_DDC_SEL BIT(4)
+#define BIT_DCTL_TRANSCODE BIT(3)
+#define BIT_DCTL_HSIC_RX_STROBE_PHASE BIT(2)
+#define BIT_DCTL_HSIC_TX_BIST_START_SEL BIT(1)
+#define BIT_DCTL_TCLKNX_PHASE BIT(0)
+
+/* PWD Software Reset, default value: 0x20 */
+#define REG_PWD_SRST 0x000e
+#define BIT_PWD_SRST_COC_DOC_RST BIT(7)
+#define BIT_PWD_SRST_CBUS_RST_SW BIT(6)
+#define BIT_PWD_SRST_CBUS_RST_SW_EN BIT(5)
+#define BIT_PWD_SRST_MHLFIFO_RST BIT(4)
+#define BIT_PWD_SRST_CBUS_RST BIT(3)
+#define BIT_PWD_SRST_SW_RST_AUTO BIT(2)
+#define BIT_PWD_SRST_HDCP2X_SW_RST BIT(1)
+#define BIT_PWD_SRST_SW_RST BIT(0)
+
+/* AKSV_1, default value: 0x00 */
+#define REG_AKSV_1 0x001d
+
+/* Video H Resolution #1, default value: 0x00 */
+#define REG_H_RESL 0x003a
+
+/* Video Mode, default value: 0x00 */
+#define REG_VID_MODE 0x004a
+#define BIT_VID_MODE_M1080P BIT(6)
+
+/* Video Input Mode, default value: 0xc0 */
+#define REG_VID_OVRRD 0x0051
+#define BIT_VID_OVRRD_PP_AUTO_DISABLE BIT(7)
+#define BIT_VID_OVRRD_M1080P_OVRRD BIT(6)
+#define BIT_VID_OVRRD_MINIVSYNC_ON BIT(5)
+#define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK BIT(4)
+#define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN BIT(3)
+#define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD BIT(2)
+#define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD BIT(0)
+
+/* I2C Address reassignment, default value: 0x00 */
+#define REG_PAGE_MHLSPEC_ADDR 0x0057
+#define REG_PAGE7_ADDR 0x0058
+#define REG_PAGE8_ADDR 0x005c
+
+/* Fast Interrupt Status, default value: 0x00 */
+#define REG_FAST_INTR_STAT 0x005f
+#define LEN_FAST_INTR_STAT 7
+#define BIT_FAST_INTR_STAT_TIMR 8
+#define BIT_FAST_INTR_STAT_INT2 9
+#define BIT_FAST_INTR_STAT_DDC 10
+#define BIT_FAST_INTR_STAT_SCDT 11
+#define BIT_FAST_INTR_STAT_INFR 13
+#define BIT_FAST_INTR_STAT_EDID 14
+#define BIT_FAST_INTR_STAT_HDCP 15
+#define BIT_FAST_INTR_STAT_MSC 16
+#define BIT_FAST_INTR_STAT_MERR 17
+#define BIT_FAST_INTR_STAT_G2WB 18
+#define BIT_FAST_INTR_STAT_G2WB_ERR 19
+#define BIT_FAST_INTR_STAT_DISC 28
+#define BIT_FAST_INTR_STAT_BLOCK 30
+#define BIT_FAST_INTR_STAT_LTRN 31
+#define BIT_FAST_INTR_STAT_HDCP2 32
+#define BIT_FAST_INTR_STAT_TDM 42
+#define BIT_FAST_INTR_STAT_COC 51
+
+/* GPIO Control, default value: 0x15 */
+#define REG_GPIO_CTRL1 0x006e
+#define BIT_CTRL1_GPIO_I_8 BIT(5)
+#define BIT_CTRL1_GPIO_OEN_8 BIT(4)
+#define BIT_CTRL1_GPIO_I_7 BIT(3)
+#define BIT_CTRL1_GPIO_OEN_7 BIT(2)
+#define BIT_CTRL1_GPIO_I_6 BIT(1)
+#define BIT_CTRL1_GPIO_OEN_6 BIT(0)
+
+/* Interrupt Control, default value: 0x06 */
+#define REG_INT_CTRL 0x006f
+#define BIT_INT_CTRL_SOFTWARE_WP BIT(7)
+#define BIT_INT_CTRL_INTR_OD BIT(2)
+#define BIT_INT_CTRL_INTR_POLARITY BIT(1)
+
+/* Interrupt State, default value: 0x00 */
+#define REG_INTR_STATE 0x0070
+#define BIT_INTR_STATE_INTR_STATE BIT(0)
+
+/* Interrupt Source #1, default value: 0x00 */
+#define REG_INTR1 0x0071
+
+/* Interrupt Source #2, default value: 0x00 */
+#define REG_INTR2 0x0072
+
+/* Interrupt Source #3, default value: 0x01 */
+#define REG_INTR3 0x0073
+#define BIT_DDC_CMD_DONE BIT(3)
+
+/* Interrupt Source #5, default value: 0x00 */
+#define REG_INTR5 0x0074
+
+/* Interrupt #1 Mask, default value: 0x00 */
+#define REG_INTR1_MASK 0x0075
+
+/* Interrupt #2 Mask, default value: 0x00 */
+#define REG_INTR2_MASK 0x0076
+
+/* Interrupt #3 Mask, default value: 0x00 */
+#define REG_INTR3_MASK 0x0077
+
+/* Interrupt #5 Mask, default value: 0x00 */
+#define REG_INTR5_MASK 0x0078
+#define BIT_INTR_SCDT_CHANGE BIT(0)
+
+/* Hot Plug Connection Control, default value: 0x45 */
+#define REG_HPD_CTRL 0x0079
+#define BIT_HPD_CTRL_HPD_DS_SIGNAL BIT(7)
+#define BIT_HPD_CTRL_HPD_OUT_OD_EN BIT(6)
+#define BIT_HPD_CTRL_HPD_HIGH BIT(5)
+#define BIT_HPD_CTRL_HPD_OUT_OVR_EN BIT(4)
+#define BIT_HPD_CTRL_GPIO_I_1 BIT(3)
+#define BIT_HPD_CTRL_GPIO_OEN_1 BIT(2)
+#define BIT_HPD_CTRL_GPIO_I_0 BIT(1)
+#define BIT_HPD_CTRL_GPIO_OEN_0 BIT(0)
+
+/* GPIO Control, default value: 0x55 */
+#define REG_GPIO_CTRL 0x007a
+#define BIT_CTRL_GPIO_I_5 BIT(7)
+#define BIT_CTRL_GPIO_OEN_5 BIT(6)
+#define BIT_CTRL_GPIO_I_4 BIT(5)
+#define BIT_CTRL_GPIO_OEN_4 BIT(4)
+#define BIT_CTRL_GPIO_I_3 BIT(3)
+#define BIT_CTRL_GPIO_OEN_3 BIT(2)
+#define BIT_CTRL_GPIO_I_2 BIT(1)
+#define BIT_CTRL_GPIO_OEN_2 BIT(0)
+
+/* Interrupt Source 7, default value: 0x00 */
+#define REG_INTR7 0x007b
+
+/* Interrupt Source 8, default value: 0x00 */
+#define REG_INTR8 0x007c
+
+/* Interrupt #7 Mask, default value: 0x00 */
+#define REG_INTR7_MASK 0x007d
+
+/* Interrupt #8 Mask, default value: 0x00 */
+#define REG_INTR8_MASK 0x007e
+#define BIT_CEA_NEW_VSI BIT(2)
+#define BIT_CEA_NEW_AVI BIT(1)
+
+/* IEEE, default value: 0x10 */
+#define REG_TMDS_CCTRL 0x0080
+#define BIT_TMDS_CCTRL_TMDS_OE BIT(4)
+
+/* TMDS Control #4, default value: 0x02 */
+#define REG_TMDS_CTRL4 0x0085
+#define BIT_TMDS_CTRL4_SCDT_CKDT_SEL BIT(1)
+#define BIT_TMDS_CTRL4_TX_EN_BY_SCDT BIT(0)
+
+/* BIST CNTL, default value: 0x00 */
+#define REG_BIST_CTRL 0x00bb
+#define BIT_RXBIST_VGB_EN BIT(7)
+#define BIT_TXBIST_VGB_EN BIT(6)
+#define BIT_BIST_START_SEL BIT(5)
+#define BIT_BIST_START_BIT BIT(4)
+#define BIT_BIST_ALWAYS_ON BIT(3)
+#define BIT_BIST_TRANS BIT(2)
+#define BIT_BIST_RESET BIT(1)
+#define BIT_BIST_EN BIT(0)
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_TEST_SEL 0x00bd
+#define MSK_BIST_TEST_SEL_BIST_PATT_SEL 0x0f
+
+/* BIST VIDEO_MODE, default value: 0x00 */
+#define REG_BIST_VIDEO_MODE 0x00be
+#define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0 0x0f
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_DURATION_0 0x00bf
+
+/* BIST DURATION1, default value: 0x00 */
+#define REG_BIST_DURATION_1 0x00c0
+
+/* BIST DURATION2, default value: 0x00 */
+#define REG_BIST_DURATION_2 0x00c1
+
+/* BIST 8BIT_PATTERN, default value: 0x00 */
+#define REG_BIST_8BIT_PATTERN 0x00c2
+
+/* LM DDC, default value: 0x80 */
+#define REG_LM_DDC 0x00c7
+#define BIT_LM_DDC_SW_TPI_EN_DISABLED BIT(7)
+
+#define BIT_LM_DDC_VIDEO_MUTE_EN BIT(5)
+#define BIT_LM_DDC_DDC_TPI_SW BIT(2)
+#define BIT_LM_DDC_DDC_GRANT BIT(1)
+#define BIT_LM_DDC_DDC_GPU_REQUEST BIT(0)
+
+/* DDC I2C Manual, default value: 0x03 */
+#define REG_DDC_MANUAL 0x00ec
+#define BIT_DDC_MANUAL_MAN_DDC BIT(7)
+#define BIT_DDC_MANUAL_VP_SEL BIT(6)
+#define BIT_DDC_MANUAL_DSDA BIT(5)
+#define BIT_DDC_MANUAL_DSCL BIT(4)
+#define BIT_DDC_MANUAL_GCP_HW_CTL_EN BIT(3)
+#define BIT_DDC_MANUAL_DDCM_ABORT_WP BIT(2)
+#define BIT_DDC_MANUAL_IO_DSDA BIT(1)
+#define BIT_DDC_MANUAL_IO_DSCL BIT(0)
+
+/* DDC I2C Target Slave Address, default value: 0x00 */
+#define REG_DDC_ADDR 0x00ed
+#define MSK_DDC_ADDR_DDC_ADDR 0xfe
+
+/* DDC I2C Target Segment Address, default value: 0x00 */
+#define REG_DDC_SEGM 0x00ee
+
+/* DDC I2C Target Offset Address, default value: 0x00 */
+#define REG_DDC_OFFSET 0x00ef
+
+/* DDC I2C Data In count #1, default value: 0x00 */
+#define REG_DDC_DIN_CNT1 0x00f0
+
+/* DDC I2C Data In count #2, default value: 0x00 */
+#define REG_DDC_DIN_CNT2 0x00f1
+#define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8 0x03
+
+/* DDC I2C Status, default value: 0x04 */
+#define REG_DDC_STATUS 0x00f2
+#define BIT_DDC_STATUS_DDC_BUS_LOW BIT(6)
+#define BIT_DDC_STATUS_DDC_NO_ACK BIT(5)
+#define BIT_DDC_STATUS_DDC_I2C_IN_PROG BIT(4)
+#define BIT_DDC_STATUS_DDC_FIFO_FULL BIT(3)
+#define BIT_DDC_STATUS_DDC_FIFO_EMPTY BIT(2)
+#define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE BIT(1)
+#define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE BIT(0)
+
+/* DDC I2C Command, default value: 0x70 */
+#define REG_DDC_CMD 0x00f3
+#define BIT_DDC_CMD_HDCP_DDC_EN BIT(6)
+#define BIT_DDC_CMD_SDA_DEL_EN BIT(5)
+#define BIT_DDC_CMD_DDC_FLT_EN BIT(4)
+
+#define MSK_DDC_CMD_DDC_CMD 0x0f
+#define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 0x04
+#define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO 0x09
+#define VAL_DDC_CMD_DDC_CMD_ABORT 0x0f
+
+/* DDC I2C FIFO Data In/Out, default value: 0x00 */
+#define REG_DDC_DATA 0x00f4
+
+/* DDC I2C Data Out Counter, default value: 0x00 */
+#define REG_DDC_DOUT_CNT 0x00f5
+#define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8 BIT(7)
+#define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT 0x1f
+
+/* DDC I2C Delay Count, default value: 0x14 */
+#define REG_DDC_DELAY_CNT 0x00f6
+
+/* Test Control, default value: 0x80 */
+#define REG_TEST_TXCTRL 0x00f7
+#define BIT_TEST_TXCTRL_RCLK_REF_SEL BIT(7)
+#define BIT_TEST_TXCTRL_PCLK_REF_SEL BIT(6)
+#define MSK_TEST_TXCTRL_BYPASS_PLL_CLK 0x3c
+#define BIT_TEST_TXCTRL_HDMI_MODE BIT(1)
+#define BIT_TEST_TXCTRL_TST_PLLCK BIT(0)
+
+/* CBUS Address, default value: 0x00 */
+#define REG_PAGE_CBUS_ADDR 0x00f8
+
+/* I2C Device Address re-assignment */
+#define REG_PAGE1_ADDR 0x00fc
+#define REG_PAGE2_ADDR 0x00fd
+#define REG_PAGE3_ADDR 0x00fe
+#define REG_HW_TPI_ADDR 0x00ff
+
+/* USBT CTRL0, default value: 0x00 */
+#define REG_UTSRST 0x0100
+#define BIT_UTSRST_FC_SRST BIT(5)
+#define BIT_UTSRST_KEEPER_SRST BIT(4)
+#define BIT_UTSRST_HTX_SRST BIT(3)
+#define BIT_UTSRST_TRX_SRST BIT(2)
+#define BIT_UTSRST_TTX_SRST BIT(1)
+#define BIT_UTSRST_HRX_SRST BIT(0)
+
+/* HSIC RX Control3, default value: 0x07 */
+#define REG_HRXCTRL3 0x0104
+#define MSK_HRXCTRL3_HRX_AFFCTRL 0xf0
+#define BIT_HRXCTRL3_HRX_OUT_EN BIT(2)
+#define BIT_HRXCTRL3_STATUS_EN BIT(1)
+#define BIT_HRXCTRL3_HRX_STAY_RESET BIT(0)
+
+/* HSIC RX INT Registers */
+#define REG_HRXINTL 0x0111
+#define REG_HRXINTH 0x0112
+
+/* TDM TX NUMBITS, default value: 0x0c */
+#define REG_TTXNUMB 0x0116
+#define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0
+#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3)
+#define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07
+
+/* TDM TX NUMSPISYM, default value: 0x04 */
+#define REG_TTXSPINUMS 0x0117
+
+/* TDM TX NUMHSICSYM, default value: 0x14 */
+#define REG_TTXHSICNUMS 0x0118
+
+/* TDM TX NUMTOTSYM, default value: 0x18 */
+#define REG_TTXTOTNUMS 0x0119
+
+/* TDM TX INT Low, default value: 0x00 */
+#define REG_TTXINTL 0x0136
+#define BIT_TTXINTL_TTX_INTR7 BIT(7)
+#define BIT_TTXINTL_TTX_INTR6 BIT(6)
+#define BIT_TTXINTL_TTX_INTR5 BIT(5)
+#define BIT_TTXINTL_TTX_INTR4 BIT(4)
+#define BIT_TTXINTL_TTX_INTR3 BIT(3)
+#define BIT_TTXINTL_TTX_INTR2 BIT(2)
+#define BIT_TTXINTL_TTX_INTR1 BIT(1)
+#define BIT_TTXINTL_TTX_INTR0 BIT(0)
+
+/* TDM TX INT High, default value: 0x00 */
+#define REG_TTXINTH 0x0137
+#define BIT_TTXINTH_TTX_INTR15 BIT(7)
+#define BIT_TTXINTH_TTX_INTR14 BIT(6)
+#define BIT_TTXINTH_TTX_INTR13 BIT(5)
+#define BIT_TTXINTH_TTX_INTR12 BIT(4)
+#define BIT_TTXINTH_TTX_INTR11 BIT(3)
+#define BIT_TTXINTH_TTX_INTR10 BIT(2)
+#define BIT_TTXINTH_TTX_INTR9 BIT(1)
+#define BIT_TTXINTH_TTX_INTR8 BIT(0)
+
+/* TDM RX Control, default value: 0x1c */
+#define REG_TRXCTRL 0x013b
+#define BIT_TRXCTRL_TRX_CLR_WVALLOW BIT(4)
+#define BIT_TRXCTRL_TRX_FROM_SE_COC BIT(3)
+#define MSK_TRXCTRL_TRX_NUMBPS_2_0 0x07
+
+/* TDM RX NUMSPISYM, default value: 0x04 */
+#define REG_TRXSPINUMS 0x013c
+
+/* TDM RX NUMHSICSYM, default value: 0x14 */
+#define REG_TRXHSICNUMS 0x013d
+
+/* TDM RX NUMTOTSYM, default value: 0x18 */
+#define REG_TRXTOTNUMS 0x013e
+
+/* TDM RX Status 2nd, default value: 0x00 */
+#define REG_TRXSTA2 0x015c
+
+/* TDM RX INT Low, default value: 0x00 */
+#define REG_TRXINTL 0x0163
+
+/* TDM RX INT High, default value: 0x00 */
+#define REG_TRXINTH 0x0164
+
+/* TDM RX INTMASK High, default value: 0x00 */
+#define REG_TRXINTMH 0x0166
+
+/* HSIC TX CRTL, default value: 0x00 */
+#define REG_HTXCTRL 0x0169
+#define BIT_HTXCTRL_HTX_ALLSBE_SOP BIT(4)
+#define BIT_HTXCTRL_HTX_RGDINV_USB BIT(3)
+#define BIT_HTXCTRL_HTX_RSPTDM_BUSY BIT(2)
+#define BIT_HTXCTRL_HTX_DRVCONN1 BIT(1)
+#define BIT_HTXCTRL_HTX_DRVRST1 BIT(0)
+
+/* HSIC TX INT Low, default value: 0x00 */
+#define REG_HTXINTL 0x017d
+
+/* HSIC TX INT High, default value: 0x00 */
+#define REG_HTXINTH 0x017e
+
+/* HSIC Keeper, default value: 0x00 */
+#define REG_KEEPER 0x0181
+#define MSK_KEEPER_KEEPER_MODE_1_0 0x03
+
+/* HSIC Flow Control General, default value: 0x02 */
+#define REG_FCGC 0x0183
+#define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1)
+#define BIT_FCGC_HSIC_FC_ENABLE BIT(0)
+
+/* HSIC Flow Control CTR13, default value: 0xfc */
+#define REG_FCCTR13 0x0191
+
+/* HSIC Flow Control CTR14, default value: 0xff */
+#define REG_FCCTR14 0x0192
+
+/* HSIC Flow Control CTR15, default value: 0xff */
+#define REG_FCCTR15 0x0193
+
+/* HSIC Flow Control CTR50, default value: 0x03 */
+#define REG_FCCTR50 0x01b6
+
+/* HSIC Flow Control INTR0, default value: 0x00 */
+#define REG_FCINTR0 0x01ec
+#define REG_FCINTR1 0x01ed
+#define REG_FCINTR2 0x01ee
+#define REG_FCINTR3 0x01ef
+#define REG_FCINTR4 0x01f0
+#define REG_FCINTR5 0x01f1
+#define REG_FCINTR6 0x01f2
+#define REG_FCINTR7 0x01f3
+
+/* TDM Low Latency, default value: 0x20 */
+#define REG_TDMLLCTL 0x01fc
+#define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL 0xc0
+#define MSK_TDMLLCTL_TRX_LL_SEL_MODE 0x30
+#define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL 0x0c
+#define BIT_TDMLLCTL_TTX_LL_TIE_LOW BIT(1)
+#define BIT_TDMLLCTL_TTX_LL_SEL_MODE BIT(0)
+
+/* TMDS 0 Clock Control, default value: 0x10 */
+#define REG_TMDS0_CCTRL1 0x0210
+#define MSK_TMDS0_CCTRL1_TEST_SEL 0xc0
+#define MSK_TMDS0_CCTRL1_CLK1X_CTL 0x30
+
+/* TMDS Clock Enable, default value: 0x00 */
+#define REG_TMDS_CLK_EN 0x0211
+#define BIT_TMDS_CLK_EN_CLK_EN BIT(0)
+
+/* TMDS Channel Enable, default value: 0x00 */
+#define REG_TMDS_CH_EN 0x0212
+#define BIT_TMDS_CH_EN_CH0_EN BIT(4)
+#define BIT_TMDS_CH_EN_CH12_EN BIT(0)
+
+/* BGR_BIAS, default value: 0x07 */
+#define REG_BGR_BIAS 0x0215
+#define BIT_BGR_BIAS_BGR_EN BIT(7)
+#define MSK_BGR_BIAS_BIAS_BGR_D 0x0f
+
+/* TMDS 0 Digital I2C BW, default value: 0x0a */
+#define REG_ALICE0_BW_I2C 0x0231
+
+/* TMDS 0 Digital Zone Control, default value: 0xe0 */
+#define REG_ALICE0_ZONE_CTRL 0x024c
+#define BIT_ALICE0_ZONE_CTRL_ICRST_N BIT(7)
+#define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20 BIT(6)
+#define MSK_ALICE0_ZONE_CTRL_SZONE_I2C 0x30
+#define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL 0x0f
+
+/* TMDS 0 Digital PLL Mode Control, default value: 0x00 */
+#define REG_ALICE0_MODE_CTRL 0x024d
+#define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C 0x0c
+#define MSK_ALICE0_MODE_CTRL_DIV20_CTRL 0x03
+
+/* MHL Tx Control 6th, default value: 0xa0 */
+#define REG_MHLTX_CTL6 0x0285
+#define MSK_MHLTX_CTL6_EMI_SEL 0xe0
+#define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8 0x03
+
+/* Packet Filter0, default value: 0x00 */
+#define REG_PKT_FILTER_0 0x0290
+#define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT BIT(7)
+#define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT BIT(6)
+#define BIT_PKT_FILTER_0_DROP_MPEG_PKT BIT(5)
+#define BIT_PKT_FILTER_0_DROP_SPIF_PKT BIT(4)
+#define BIT_PKT_FILTER_0_DROP_AIF_PKT BIT(3)
+#define BIT_PKT_FILTER_0_DROP_AVI_PKT BIT(2)
+#define BIT_PKT_FILTER_0_DROP_CTS_PKT BIT(1)
+#define BIT_PKT_FILTER_0_DROP_GCP_PKT BIT(0)
+
+/* Packet Filter1, default value: 0x00 */
+#define REG_PKT_FILTER_1 0x0291
+#define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS BIT(7)
+#define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS BIT(6)
+#define BIT_PKT_FILTER_1_DROP_AUDIO_PKT BIT(3)
+#define BIT_PKT_FILTER_1_DROP_GEN2_PKT BIT(2)
+#define BIT_PKT_FILTER_1_DROP_GEN_PKT BIT(1)
+#define BIT_PKT_FILTER_1_DROP_VSIF_PKT BIT(0)
+
+/* TMDS Clock Status, default value: 0x10 */
+#define REG_TMDS_CSTAT_P3 0x02a0
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE BIT(7)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE BIT(6)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP BIT(5)
+#define BIT_TMDS_CSTAT_P3_CLR_AVI BIT(3)
+#define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS BIT(2)
+#define BIT_TMDS_CSTAT_P3_SCDT BIT(1)
+#define BIT_TMDS_CSTAT_P3_CKDT BIT(0)
+
+/* RX_HDMI Control, default value: 0x10 */
+#define REG_RX_HDMI_CTRL0 0x02a1
+#define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC BIT(5)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE BIT(3)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE BIT(2)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN BIT(1)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE BIT(0)
+
+/* RX_HDMI Control, default value: 0x38 */
+#define REG_RX_HDMI_CTRL2 0x02a3
+#define MSK_RX_HDMI_CTRL2_IDLE_CNT 0xf0
+#define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4)
+#define BIT_RX_HDMI_CTRL2_USE_AV_MUTE BIT(3)
+#define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI BIT(0)
+
+/* RX_HDMI Control, default value: 0x0f */
+#define REG_RX_HDMI_CTRL3 0x02a4
+#define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN 0x0f
+
+/* rx_hdmi Clear Buffer, default value: 0x00 */
+#define REG_RX_HDMI_CLR_BUFFER 0x02ac
+#define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP 0xc0
+#define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI BIT(5)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI BIT(4)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3)
+#define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID BIT(2)
+#define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN BIT(1)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN BIT(0)
+
+/* RX_HDMI VSI Header1, default value: 0x00 */
+#define REG_RX_HDMI_MON_PKT_HEADER1 0x02b8
+
+/* RX_HDMI VSI MHL Monitor, default value: 0x3c */
+#define REG_RX_HDMI_VSIF_MHL_MON 0x02d7
+
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03
+
+/* Interrupt Source 9, default value: 0x00 */
+#define REG_INTR9 0x02e0
+#define BIT_INTR9_EDID_ERROR BIT(6)
+#define BIT_INTR9_EDID_DONE BIT(5)
+#define BIT_INTR9_DEVCAP_DONE BIT(4)
+
+/* Interrupt 9 Mask, default value: 0x00 */
+#define REG_INTR9_MASK 0x02e1
+
+/* TPI CBUS Start, default value: 0x00 */
+#define REG_TPI_CBUS_START 0x02e2
+#define BIT_TPI_CBUS_START_RCP_REQ_START BIT(7)
+#define BIT_TPI_CBUS_START_RCPK_REPLY_START BIT(6)
+#define BIT_TPI_CBUS_START_RCPE_REPLY_START BIT(5)
+#define BIT_TPI_CBUS_START_PUT_LINK_MODE_START BIT(4)
+#define BIT_TPI_CBUS_START_PUT_DCAPCHG_START BIT(3)
+#define BIT_TPI_CBUS_START_PUT_DCAPRDY_START BIT(2)
+#define BIT_TPI_CBUS_START_GET_EDID_START_0 BIT(1)
+#define BIT_TPI_CBUS_START_GET_DEVCAP_START BIT(0)
+
+/* EDID Control, default value: 0x10 */
+#define REG_EDID_CTRL 0x02e3
+#define BIT_EDID_CTRL_EDID_PRIME_VALID BIT(7)
+#define BIT_EDID_CTRL_XDEVCAP_EN BIT(6)
+#define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP BIT(5)
+#define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO BIT(4)
+#define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3)
+#define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL BIT(2)
+#define BIT_EDID_CTRL_INVALID_BKSV BIT(1)
+#define BIT_EDID_CTRL_EDID_MODE_EN BIT(0)
+
+/* EDID FIFO Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR 0x02e9
+
+/* EDID FIFO Write Data, default value: 0x00 */
+#define REG_EDID_FIFO_WR_DATA 0x02ea
+
+/* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR_MON 0x02eb
+
+/* EDID FIFO Read Data, default value: 0x00 */
+#define REG_EDID_FIFO_RD_DATA 0x02ec
+
+/* EDID DDC Segment Pointer, default value: 0x00 */
+#define REG_EDID_START_EXT 0x02ed
+
+/* TX IP BIST CNTL and Status, default value: 0x00 */
+#define REG_TX_IP_BIST_CNTLSTA 0x02f2
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE BIT(5)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON BIT(4)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN BIT(3)
+#define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL BIT(2)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN BIT(1)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL BIT(0)
+
+/* TX IP BIST INST LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_INST_LOW 0x02f3
+#define REG_TX_IP_BIST_INST_HIGH 0x02f4
+
+/* TX IP BIST PATTERN LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_PAT_LOW 0x02f5
+#define REG_TX_IP_BIST_PAT_HIGH 0x02f6
+
+/* TX IP BIST CONFIGURE LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_CONF_LOW 0x02f7
+#define REG_TX_IP_BIST_CONF_HIGH 0x02f8
+
+/* E-MSC General Control, default value: 0x80 */
+#define REG_GENCTL 0x0300
+#define BIT_GENCTL_SPEC_TRANS_DIS BIT(7)
+#define BIT_GENCTL_DIS_XMIT_ERR_STATE BIT(6)
+#define BIT_GENCTL_SPI_MISO_EDGE BIT(5)
+#define BIT_GENCTL_SPI_MOSI_EDGE BIT(4)
+#define BIT_GENCTL_CLR_EMSC_RFIFO BIT(3)
+#define BIT_GENCTL_CLR_EMSC_XFIFO BIT(2)
+#define BIT_GENCTL_START_TRAIN_SEQ BIT(1)
+#define BIT_GENCTL_EMSC_EN BIT(0)
+
+/* E-MSC Comma ErrorCNT, default value: 0x03 */
+#define REG_COMMECNT 0x0305
+#define BIT_COMMECNT_I2C_TO_EMSC_EN BIT(7)
+#define MSK_COMMECNT_COMMA_CHAR_ERR_CNT 0x0f
+
+/* E-MSC RFIFO ByteCnt, default value: 0x00 */
+#define REG_EMSCRFIFOBCNTL 0x031a
+#define REG_EMSCRFIFOBCNTH 0x031b
+
+/* SPI Burst Cnt Status, default value: 0x00 */
+#define REG_SPIBURSTCNT 0x031e
+
+/* SPI Burst Status and SWRST, default value: 0x00 */
+#define REG_SPIBURSTSTAT 0x0322
+#define BIT_SPIBURSTSTAT_SPI_HDCPRST BIT(7)
+#define BIT_SPIBURSTSTAT_SPI_CBUSRST BIT(6)
+#define BIT_SPIBURSTSTAT_SPI_SRST BIT(5)
+#define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE BIT(0)
+
+/* E-MSC 1st Interrupt, default value: 0x00 */
+#define REG_EMSCINTR 0x0323
+#define BIT_EMSCINTR_EMSC_XFIFO_EMPTY BIT(7)
+#define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT BIT(6)
+#define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR BIT(5)
+#define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR BIT(4)
+#define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR BIT(3)
+#define BIT_EMSCINTR_EMSC_XMIT_DONE BIT(2)
+#define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT BIT(1)
+#define BIT_EMSCINTR_SPI_DVLD BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK 0x0324
+
+/* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_XMIT_WRITE_PORT 0x032a
+
+/* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_RCV_READ_PORT 0x032b
+
+/* E-MSC 2nd Interrupt, default value: 0x00 */
+#define REG_EMSCINTR1 0x032c
+#define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK1 0x032d
+#define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0 BIT(0)
+
+/* MHL Top Ctl, default value: 0x00 */
+#define REG_MHL_TOP_CTL 0x0330
+#define BIT_MHL_TOP_CTL_MHL3_DOC_SEL BIT(7)
+#define BIT_MHL_TOP_CTL_MHL_PP_SEL BIT(6)
+#define MSK_MHL_TOP_CTL_IF_TIMING_CTL 0x03
+
+/* MHL DataPath 1st Ctl, default value: 0xbc */
+#define REG_MHL_DP_CTL0 0x0331
+#define BIT_MHL_DP_CTL0_DP_OE BIT(7)
+#define BIT_MHL_DP_CTL0_TX_OE_OVR BIT(6)
+#define MSK_MHL_DP_CTL0_TX_OE 0x3f
+
+/* MHL DataPath 2nd Ctl, default value: 0xbb */
+#define REG_MHL_DP_CTL1 0x0332
+#define MSK_MHL_DP_CTL1_CK_SWING_CTL 0xf0
+#define MSK_MHL_DP_CTL1_DT_SWING_CTL 0x0f
+
+/* MHL DataPath 3rd Ctl, default value: 0x2f */
+#define REG_MHL_DP_CTL2 0x0333
+#define BIT_MHL_DP_CTL2_CLK_BYPASS_EN BIT(7)
+#define MSK_MHL_DP_CTL2_DAMP_TERM_SEL 0x30
+#define MSK_MHL_DP_CTL2_CK_TERM_SEL 0x0c
+#define MSK_MHL_DP_CTL2_DT_TERM_SEL 0x03
+
+/* MHL DataPath 4th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL3 0x0334
+#define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 5th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL4 0x0335
+#define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 6th Ctl, default value: 0x3f */
+#define REG_MHL_DP_CTL5 0x0336
+#define BIT_MHL_DP_CTL5_RSEN_EN_OVR BIT(7)
+#define BIT_MHL_DP_CTL5_RSEN_EN BIT(6)
+#define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL 0x30
+#define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL 0x0c
+#define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL 0x03
+
+/* MHL PLL 1st Ctl, default value: 0x05 */
+#define REG_MHL_PLL_CTL0 0x0337
+#define BIT_MHL_PLL_CTL0_AUD_CLK_EN BIT(7)
+
+#define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6 0x60
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4 0x50
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2 0x40
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5 0x30
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3 0x20
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1 0x00
+
+#define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X 0x08
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X 0x04
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X 0x00
+
+#define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL BIT(1)
+#define BIT_MHL_PLL_CTL0_ZONE_MASK_OE BIT(0)
+
+/* MHL PLL 3rd Ctl, default value: 0x80 */
+#define REG_MHL_PLL_CTL2 0x0339
+#define BIT_MHL_PLL_CTL2_CLKDETECT_EN BIT(7)
+#define BIT_MHL_PLL_CTL2_MEAS_FVCO BIT(3)
+#define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK BIT(2)
+#define MSK_MHL_PLL_CTL2_PLL_LF_SEL 0x03
+
+/* MHL CBUS 1st Ctl, default value: 0x12 */
+#define REG_MHL_CBUS_CTL0 0x0340
+#define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE BIT(7)
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL 0x30
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747 0x10
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740 0x20
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754 0x30
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL 0x0c
+
+#define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL 0x03
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK 0x01
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG 0x02
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03
+
+/* MHL CBUS 2nd Ctl, default value: 0x03 */
+#define REG_MHL_CBUS_CTL1 0x0341
+#define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL 0x07
+#define VAL_MHL_CBUS_CTL1_0888_OHM 0x00
+#define VAL_MHL_CBUS_CTL1_1115_OHM 0x04
+#define VAL_MHL_CBUS_CTL1_1378_OHM 0x07
+
+/* MHL CoC 1st Ctl, default value: 0xc3 */
+#define REG_MHL_COC_CTL0 0x0342
+#define BIT_MHL_COC_CTL0_COC_BIAS_EN BIT(7)
+#define MSK_MHL_COC_CTL0_COC_BIAS_CTL 0x70
+#define MSK_MHL_COC_CTL0_COC_TERM_CTL 0x07
+
+/* MHL CoC 2nd Ctl, default value: 0x87 */
+#define REG_MHL_COC_CTL1 0x0343
+#define BIT_MHL_COC_CTL1_COC_EN BIT(7)
+#define MSK_MHL_COC_CTL1_COC_DRV_CTL 0x3f
+
+/* MHL CoC 4th Ctl, default value: 0x00 */
+#define REG_MHL_COC_CTL3 0x0345
+#define BIT_MHL_COC_CTL3_COC_AECHO_EN BIT(0)
+
+/* MHL CoC 5th Ctl, default value: 0x28 */
+#define REG_MHL_COC_CTL4 0x0346
+#define MSK_MHL_COC_CTL4_COC_IF_CTL 0xf0
+#define MSK_MHL_COC_CTL4_COC_SLEW_CTL 0x0f
+
+/* MHL CoC 6th Ctl, default value: 0x0d */
+#define REG_MHL_COC_CTL5 0x0347
+
+/* MHL DoC 1st Ctl, default value: 0x18 */
+#define REG_MHL_DOC_CTL0 0x0349
+#define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN BIT(7)
+#define MSK_MHL_DOC_CTL0_DOC_DM_TERM 0x38
+#define MSK_MHL_DOC_CTL0_DOC_OPMODE 0x06
+#define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN BIT(0)
+
+/* MHL DataPath 7th Ctl, default value: 0x2a */
+#define REG_MHL_DP_CTL6 0x0350
+#define BIT_MHL_DP_CTL6_DP_TAP2_SGN BIT(5)
+#define BIT_MHL_DP_CTL6_DP_TAP2_EN BIT(4)
+#define BIT_MHL_DP_CTL6_DP_TAP1_SGN BIT(3)
+#define BIT_MHL_DP_CTL6_DP_TAP1_EN BIT(2)
+#define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN BIT(1)
+#define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL BIT(0)
+
+/* MHL DataPath 8th Ctl, default value: 0x06 */
+#define REG_MHL_DP_CTL7 0x0351
+#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0
+#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f
+
+/* Tx Zone Ctl1, default value: 0x00 */
+#define REG_TX_ZONE_CTL1 0x0361
+#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08
+
+/* MHL3 Tx Zone Ctl, default value: 0x00 */
+#define REG_MHL3_TX_ZONE_CTL 0x0364
+#define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7)
+#define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE 0x03
+
+#define MSK_TX_ZONE_CTL3_TX_ZONE 0x03
+#define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS 0x00
+#define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS 0x01
+#define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS 0x02
+
+/* HDCP Polling Control and Status, default value: 0x70 */
+#define REG_HDCP2X_POLL_CS 0x0391
+
+#define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4)
+#define MSK_HDCP2X_POLL_CS_ 0x0c
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT BIT(1)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN BIT(0)
+
+/* HDCP Interrupt 0, default value: 0x00 */
+#define REG_HDCP2X_INTR0 0x0398
+
+/* HDCP Interrupt 0 Mask, default value: 0x00 */
+#define REG_HDCP2X_INTR0_MASK 0x0399
+
+/* HDCP General Control 0, default value: 0x02 */
+#define REG_HDCP2X_CTRL_0 0x03a0
+#define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN BIT(7)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL BIT(6)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR BIT(5)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE BIT(4)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE BIT(3)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER BIT(2)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX BIT(1)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_EN BIT(0)
+
+/* HDCP General Control 1, default value: 0x08 */
+#define REG_HDCP2X_CTRL_1 0x03a1
+#define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0 0xf0
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW BIT(3)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR BIT(2)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK BIT(1)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW BIT(0)
+
+/* HDCP Misc Control, default value: 0x00 */
+#define REG_HDCP2X_MISC_CTRL 0x03a5
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR BIT(2)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD BIT(0)
+
+/* HDCP RPT SMNG K, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_K 0x03a6
+
+/* HDCP RPT SMNG In, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_IN 0x03a7
+
+/* HDCP Auth Status, default value: 0x00 */
+#define REG_HDCP2X_AUTH_STAT 0x03aa
+
+/* HDCP RPT RCVID Out, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVID_OUT 0x03ac
+
+/* HDCP TP1, default value: 0x62 */
+#define REG_HDCP2X_TP1 0x03b4
+
+/* HDCP GP Out 0, default value: 0x00 */
+#define REG_HDCP2X_GP_OUT0 0x03c7
+
+/* HDCP Repeater RCVR ID 0, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVR_ID0 0x03d1
+
+/* HDCP DDCM Status, default value: 0x00 */
+#define REG_HDCP2X_DDCM_STS 0x03d8
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f
+
+/* HDMI2MHL3 Control, default value: 0x0a */
+#define REG_M3_CTRL 0x03e0
+#define BIT_M3_CTRL_H2M_SWRST BIT(4)
+#define BIT_M3_CTRL_SW_MHL3_SEL BIT(3)
+#define BIT_M3_CTRL_M3AV_EN BIT(2)
+#define BIT_M3_CTRL_ENC_TMDS BIT(1)
+#define BIT_M3_CTRL_MHL3_MASTER_EN BIT(0)
+
+#define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_ENC_TMDS)
+#define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_M3AV_EN \
+ | BIT_M3_CTRL_ENC_TMDS \
+ | BIT_M3_CTRL_MHL3_MASTER_EN)
+
+/* HDMI2MHL3 Port0 Control, default value: 0x04 */
+#define REG_M3_P0CTRL 0x03e1
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN BIT(4)
+#define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN BIT(3)
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN BIT(2)
+#define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED BIT(1)
+#define BIT_M3_P0CTRL_MHL3_P0_PORT_EN BIT(0)
+
+#define REG_M3_POSTM 0x03e2
+#define MSK_M3_POSTM_RRP_DECODE 0xf8
+#define MSK_M3_POSTM_MHL3_P0_STM_ID 0x07
+
+/* HDMI2MHL3 Scramble Control, default value: 0x41 */
+#define REG_M3_SCTRL 0x03e6
+#define MSK_M3_SCTRL_MHL3_SR_LENGTH 0xf0
+#define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN BIT(0)
+
+/* HSIC Div Ctl, default value: 0x05 */
+#define REG_DIV_CTL_MAIN 0x03f2
+#define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN 0x1c
+#define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN 0x03
+
+/* MHL Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_DEVCAP_0 0x0400
+
+/* MHL Interrupt 1st Byte, default value: 0x00 */
+#define REG_MHL_INT_0 0x0420
+
+/* Device Status 1st byte, default value: 0x00 */
+#define REG_MHL_STAT_0 0x0430
+
+/* CBUS Scratch Pad 1st Byte, default value: 0x00 */
+#define REG_MHL_SCRPAD_0 0x0440
+
+/* MHL Extended Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_EXTDEVCAP_0 0x0480
+
+/* Device Extended Status 1st byte, default value: 0x00 */
+#define REG_MHL_EXTSTAT_0 0x0490
+
+/* TPI DTD Byte2, default value: 0x00 */
+#define REG_TPI_DTD_B2 0x0602
+
+#define VAL_TPI_QUAN_RANGE_LIMITED 0x01
+#define VAL_TPI_QUAN_RANGE_FULL 0x02
+#define VAL_TPI_FORMAT_RGB 0x00
+#define VAL_TPI_FORMAT_YCBCR444 0x01
+#define VAL_TPI_FORMAT_YCBCR422 0x02
+#define VAL_TPI_FORMAT_INTERNAL_RGB 0x03
+#define VAL_TPI_FORMAT(_fmt, _qr) \
+ (VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2))
+
+/* Input Format, default value: 0x00 */
+#define REG_TPI_INPUT 0x0609
+#define BIT_TPI_INPUT_EXTENDEDBITMODE BIT(7)
+#define BIT_TPI_INPUT_ENDITHER BIT(6)
+#define MSK_TPI_INPUT_INPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_INPUT_INPUT_FORMAT 0x03
+
+/* Output Format, default value: 0x00 */
+#define REG_TPI_OUTPUT 0x060a
+#define BIT_TPI_OUTPUT_CSCMODE709 BIT(4)
+#define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_OUTPUT_OUTPUT_FORMAT 0x03
+
+/* TPI AVI Check Sum, default value: 0x00 */
+#define REG_TPI_AVI_CHSUM 0x060c
+
+/* TPI System Control, default value: 0x00 */
+#define REG_TPI_SC 0x061a
+#define BIT_TPI_SC_TPI_UPDATE_FLG BIT(7)
+#define BIT_TPI_SC_TPI_REAUTH_CTL BIT(6)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_1 BIT(5)
+#define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN BIT(4)
+#define BIT_TPI_SC_TPI_AV_MUTE BIT(3)
+#define BIT_TPI_SC_DDC_GPU_REQUEST BIT(2)
+#define BIT_TPI_SC_DDC_TPI_SW BIT(1)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI BIT(0)
+
+/* TPI COPP Query Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA1 0x0629
+#define BIT_TPI_COPP_DATA1_COPP_GPROT BIT(7)
+#define BIT_TPI_COPP_DATA1_COPP_LPROT BIT(6)
+#define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS 0x30
+#define VAL_TPI_COPP_LINK_STATUS_NORMAL 0x00
+#define VAL_TPI_COPP_LINK_STATUS_LINK_LOST 0x10
+#define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20
+#define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED 0x30
+#define BIT_TPI_COPP_DATA1_COPP_HDCP_REP BIT(3)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0 BIT(2)
+#define BIT_TPI_COPP_DATA1_COPP_PROTYPE BIT(1)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1 BIT(0)
+
+/* TPI COPP Control Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA2 0x062a
+#define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION BIT(5)
+#define BIT_TPI_COPP_DATA2_KSV_FORWARD BIT(4)
+#define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN BIT(3)
+#define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK BIT(2)
+#define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD BIT(1)
+#define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL BIT(0)
+
+/* TPI Interrupt Enable, default value: 0x00 */
+#define REG_TPI_INTR_EN 0x063c
+
+/* TPI Interrupt Status Low Byte, default value: 0x00 */
+#define REG_TPI_INTR_ST0 0x063d
+#define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT BIT(7)
+#define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT BIT(6)
+#define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT BIT(5)
+#define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT BIT(3)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1)
+#define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT BIT(0)
+
+/* TPI DS BCAPS Status, default value: 0x00 */
+#define REG_TPI_DS_BCAPS 0x0644
+
+/* TPI BStatus1, default value: 0x00 */
+#define REG_TPI_BSTATUS1 0x0645
+#define BIT_TPI_BSTATUS1_DS_DEV_EXCEED BIT(7)
+#define MSK_TPI_BSTATUS1_DS_DEV_CNT 0x7f
+
+/* TPI BStatus2, default value: 0x10 */
+#define REG_TPI_BSTATUS2 0x0646
+#define MSK_TPI_BSTATUS2_DS_BSTATUS 0xe0
+#define BIT_TPI_BSTATUS2_DS_HDMI_MODE BIT(4)
+#define BIT_TPI_BSTATUS2_DS_CASC_EXCEED BIT(3)
+#define MSK_TPI_BSTATUS2_DS_DEPTH 0x07
+
+/* TPI HW Optimization Control #3, default value: 0x00 */
+#define REG_TPI_HW_OPT3 0x06bb
+#define BIT_TPI_HW_OPT3_DDC_DEBUG BIT(7)
+#define BIT_TPI_HW_OPT3_RI_CHECK_SKIP BIT(3)
+#define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE BIT(2)
+#define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL 0x03
+
+/* TPI Info Frame Select, default value: 0x00 */
+#define REG_TPI_INFO_FSEL 0x06bf
+#define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5)
+#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07
+
+/* TPI Info Byte #0, default value: 0x00 */
+#define REG_TPI_INFO_B0 0x06c0
+
+/* CoC Status, default value: 0x00 */
+#define REG_COC_STAT_0 0x0700
+#define REG_COC_STAT_1 0x0701
+#define REG_COC_STAT_2 0x0702
+#define REG_COC_STAT_3 0x0703
+#define REG_COC_STAT_4 0x0704
+#define REG_COC_STAT_5 0x0705
+
+/* CoC 1st Ctl, default value: 0x40 */
+#define REG_COC_CTL0 0x0710
+
+/* CoC 2nd Ctl, default value: 0x0a */
+#define REG_COC_CTL1 0x0711
+#define MSK_COC_CTL1_COC_CTRL1_7_6 0xc0
+#define MSK_COC_CTL1_COC_CTRL1_5_0 0x3f
+
+/* CoC 3rd Ctl, default value: 0x14 */
+#define REG_COC_CTL2 0x0712
+#define MSK_COC_CTL2_COC_CTRL2_7_6 0xc0
+#define MSK_COC_CTL2_COC_CTRL2_5_0 0x3f
+
+/* CoC 4th Ctl, default value: 0x40 */
+#define REG_COC_CTL3 0x0713
+#define BIT_COC_CTL3_COC_CTRL3_7 BIT(7)
+#define MSK_COC_CTL3_COC_CTRL3_6_0 0x7f
+
+/* CoC 7th Ctl, default value: 0x00 */
+#define REG_COC_CTL6 0x0716
+#define BIT_COC_CTL6_COC_CTRL6_7 BIT(7)
+#define BIT_COC_CTL6_COC_CTRL6_6 BIT(6)
+#define MSK_COC_CTL6_COC_CTRL6_5_0 0x3f
+
+/* CoC 8th Ctl, default value: 0x06 */
+#define REG_COC_CTL7 0x0717
+#define BIT_COC_CTL7_COC_CTRL7_7 BIT(7)
+#define BIT_COC_CTL7_COC_CTRL7_6 BIT(6)
+#define BIT_COC_CTL7_COC_CTRL7_5 BIT(5)
+#define MSK_COC_CTL7_COC_CTRL7_4_3 0x18
+#define MSK_COC_CTL7_COC_CTRL7_2_0 0x07
+
+/* CoC 10th Ctl, default value: 0x00 */
+#define REG_COC_CTL9 0x0719
+
+/* CoC 11th Ctl, default value: 0x00 */
+#define REG_COC_CTLA 0x071a
+
+/* CoC 12th Ctl, default value: 0x00 */
+#define REG_COC_CTLB 0x071b
+
+/* CoC 13th Ctl, default value: 0x0f */
+#define REG_COC_CTLC 0x071c
+
+/* CoC 14th Ctl, default value: 0x0a */
+#define REG_COC_CTLD 0x071d
+#define BIT_COC_CTLD_COC_CTRLD_7 BIT(7)
+#define MSK_COC_CTLD_COC_CTRLD_6_0 0x7f
+
+/* CoC 15th Ctl, default value: 0x0a */
+#define REG_COC_CTLE 0x071e
+#define BIT_COC_CTLE_COC_CTRLE_7 BIT(7)
+#define MSK_COC_CTLE_COC_CTRLE_6_0 0x7f
+
+/* CoC 16th Ctl, default value: 0x00 */
+#define REG_COC_CTLF 0x071f
+#define MSK_COC_CTLF_COC_CTRLF_7_3 0xf8
+#define MSK_COC_CTLF_COC_CTRLF_2_0 0x07
+
+/* CoC 18th Ctl, default value: 0x32 */
+#define REG_COC_CTL11 0x0721
+#define MSK_COC_CTL11_COC_CTRL11_7_4 0xf0
+#define MSK_COC_CTL11_COC_CTRL11_3_0 0x0f
+
+/* CoC 21st Ctl, default value: 0x00 */
+#define REG_COC_CTL14 0x0724
+#define MSK_COC_CTL14_COC_CTRL14_7_4 0xf0
+#define MSK_COC_CTL14_COC_CTRL14_3_0 0x0f
+
+/* CoC 22nd Ctl, default value: 0x00 */
+#define REG_COC_CTL15 0x0725
+#define BIT_COC_CTL15_COC_CTRL15_7 BIT(7)
+#define MSK_COC_CTL15_COC_CTRL15_6_4 0x70
+#define MSK_COC_CTL15_COC_CTRL15_3_0 0x0f
+
+/* CoC Interrupt, default value: 0x00 */
+#define REG_COC_INTR 0x0726
+
+/* CoC Interrupt Mask, default value: 0x00 */
+#define REG_COC_INTR_MASK 0x0727
+#define BIT_COC_PLL_LOCK_STATUS_CHANGE BIT(0)
+#define BIT_COC_CALIBRATION_DONE BIT(1)
+
+/* CoC Misc Ctl, default value: 0x00 */
+#define REG_COC_MISC_CTL0 0x0728
+#define BIT_COC_MISC_CTL0_FSM_MON BIT(7)
+
+/* CoC 24th Ctl, default value: 0x00 */
+#define REG_COC_CTL17 0x072a
+#define MSK_COC_CTL17_COC_CTRL17_7_4 0xf0
+#define MSK_COC_CTL17_COC_CTRL17_3_0 0x0f
+
+/* CoC 25th Ctl, default value: 0x00 */
+#define REG_COC_CTL18 0x072b
+#define MSK_COC_CTL18_COC_CTRL18_7_4 0xf0
+#define MSK_COC_CTL18_COC_CTRL18_3_0 0x0f
+
+/* CoC 26th Ctl, default value: 0x00 */
+#define REG_COC_CTL19 0x072c
+#define MSK_COC_CTL19_COC_CTRL19_7_4 0xf0
+#define MSK_COC_CTL19_COC_CTRL19_3_0 0x0f
+
+/* CoC 27th Ctl, default value: 0x00 */
+#define REG_COC_CTL1A 0x072d
+#define MSK_COC_CTL1A_COC_CTRL1A_7_2 0xfc
+#define MSK_COC_CTL1A_COC_CTRL1A_1_0 0x03
+
+/* DoC 9th Status, default value: 0x00 */
+#define REG_DOC_STAT_8 0x0740
+
+/* DoC 10th Status, default value: 0x00 */
+#define REG_DOC_STAT_9 0x0741
+
+/* DoC 5th CFG, default value: 0x00 */
+#define REG_DOC_CFG4 0x074e
+#define MSK_DOC_CFG4_DBG_STATE_DOC_FSM 0x0f
+
+/* DoC 1st Ctl, default value: 0x40 */
+#define REG_DOC_CTL0 0x0751
+
+/* DoC 7th Ctl, default value: 0x00 */
+#define REG_DOC_CTL6 0x0757
+#define BIT_DOC_CTL6_DOC_CTRL6_7 BIT(7)
+#define BIT_DOC_CTL6_DOC_CTRL6_6 BIT(6)
+#define MSK_DOC_CTL6_DOC_CTRL6_5_4 0x30
+#define MSK_DOC_CTL6_DOC_CTRL6_3_0 0x0f
+
+/* DoC 8th Ctl, default value: 0x00 */
+#define REG_DOC_CTL7 0x0758
+#define BIT_DOC_CTL7_DOC_CTRL7_7 BIT(7)
+#define BIT_DOC_CTL7_DOC_CTRL7_6 BIT(6)
+#define BIT_DOC_CTL7_DOC_CTRL7_5 BIT(5)
+#define MSK_DOC_CTL7_DOC_CTRL7_4_3 0x18
+#define MSK_DOC_CTL7_DOC_CTRL7_2_0 0x07
+
+/* DoC 9th Ctl, default value: 0x00 */
+#define REG_DOC_CTL8 0x076c
+#define BIT_DOC_CTL8_DOC_CTRL8_7 BIT(7)
+#define MSK_DOC_CTL8_DOC_CTRL8_6_4 0x70
+#define MSK_DOC_CTL8_DOC_CTRL8_3_2 0x0c
+#define MSK_DOC_CTL8_DOC_CTRL8_1_0 0x03
+
+/* DoC 10th Ctl, default value: 0x00 */
+#define REG_DOC_CTL9 0x076d
+
+/* DoC 11th Ctl, default value: 0x00 */
+#define REG_DOC_CTLA 0x076e
+
+/* DoC 15th Ctl, default value: 0x00 */
+#define REG_DOC_CTLE 0x0772
+#define BIT_DOC_CTLE_DOC_CTRLE_7 BIT(7)
+#define BIT_DOC_CTLE_DOC_CTRLE_6 BIT(6)
+#define MSK_DOC_CTLE_DOC_CTRLE_5_4 0x30
+#define MSK_DOC_CTLE_DOC_CTRLE_3_0 0x0f
+
+/* Interrupt Mask 1st, default value: 0x00 */
+#define REG_MHL_INT_0_MASK 0x0580
+
+/* Interrupt Mask 2nd, default value: 0x00 */
+#define REG_MHL_INT_1_MASK 0x0581
+
+/* Interrupt Mask 3rd, default value: 0x00 */
+#define REG_MHL_INT_2_MASK 0x0582
+
+/* Interrupt Mask 4th, default value: 0x00 */
+#define REG_MHL_INT_3_MASK 0x0583
+
+/* MDT Receive Time Out, default value: 0x00 */
+#define REG_MDT_RCV_TIMEOUT 0x0584
+
+/* MDT Transmit Time Out, default value: 0x00 */
+#define REG_MDT_XMIT_TIMEOUT 0x0585
+
+/* MDT Receive Control, default value: 0x00 */
+#define REG_MDT_RCV_CTRL 0x0586
+#define BIT_MDT_RCV_CTRL_MDT_RCV_EN BIT(7)
+#define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN BIT(6)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN BIT(4)
+#define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN BIT(3)
+#define BIT_MDT_RCV_CTRL_MDT_DISABLE BIT(2)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive Read Port, default value: 0x00 */
+#define REG_MDT_RCV_READ_PORT 0x0587
+
+/* MDT Transmit Control, default value: 0x70 */
+#define REG_MDT_XMIT_CTRL 0x0588
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
+#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive WRITE Port, default value: 0x00 */
+#define REG_MDT_XMIT_WRITE_PORT 0x0589
+
+/* MDT RFIFO Status, default value: 0x00 */
+#define REG_MDT_RFIFO_STAT 0x058a
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT 0xe0
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f
+
+/* MDT XFIFO Status, default value: 0x80 */
+#define REG_MDT_XFIFO_STAT 0x058b
+#define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0
+#define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN BIT(4)
+#define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN 0x0f
+
+/* MDT Interrupt 0, default value: 0x0c */
+#define REG_MDT_INT_0 0x058c
+#define BIT_MDT_RFIFO_DATA_RDY BIT(0)
+#define BIT_MDT_IDLE_AFTER_HAWB_DISABLE BIT(2)
+#define BIT_MDT_XFIFO_EMPTY BIT(3)
+
+/* MDT Interrupt 0 Mask, default value: 0x00 */
+#define REG_MDT_INT_0_MASK 0x058d
+
+/* MDT Interrupt 1, default value: 0x00 */
+#define REG_MDT_INT_1 0x058e
+#define BIT_MDT_RCV_TIMEOUT BIT(0)
+#define BIT_MDT_RCV_SM_ABORT_PKT_RCVD BIT(1)
+#define BIT_MDT_RCV_SM_ERROR BIT(2)
+#define BIT_MDT_XMIT_TIMEOUT BIT(5)
+#define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD BIT(6)
+#define BIT_MDT_XMIT_SM_ERROR BIT(7)
+
+/* MDT Interrupt 1 Mask, default value: 0x00 */
+#define REG_MDT_INT_1_MASK 0x058f
+
+/* CBUS Vendor ID, default value: 0x01 */
+#define REG_CBUS_VENDOR_ID 0x0590
+
+/* CBUS Connection Status, default value: 0x00 */
+#define REG_CBUS_STATUS 0x0591
+#define BIT_CBUS_STATUS_MHL_CABLE_PRESENT BIT(4)
+#define BIT_CBUS_STATUS_MSC_HB_SUCCESS BIT(3)
+#define BIT_CBUS_STATUS_CBUS_HPD BIT(2)
+#define BIT_CBUS_STATUS_MHL_MODE BIT(1)
+#define BIT_CBUS_STATUS_CBUS_CONNECTED BIT(0)
+
+/* CBUS Interrupt 1st, default value: 0x00 */
+#define REG_CBUS_INT_0 0x0592
+#define BIT_CBUS_MSC_MT_DONE_NACK BIT(7)
+#define BIT_CBUS_MSC_MR_SET_INT BIT(6)
+#define BIT_CBUS_MSC_MR_WRITE_BURST BIT(5)
+#define BIT_CBUS_MSC_MR_MSC_MSG BIT(4)
+#define BIT_CBUS_MSC_MR_WRITE_STAT BIT(3)
+#define BIT_CBUS_HPD_CHG BIT(2)
+#define BIT_CBUS_MSC_MT_DONE BIT(1)
+#define BIT_CBUS_CNX_CHG BIT(0)
+
+/* CBUS Interrupt Mask 1st, default value: 0x00 */
+#define REG_CBUS_INT_0_MASK 0x0593
+
+/* CBUS Interrupt 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1 0x0594
+#define BIT_CBUS_CMD_ABORT BIT(6)
+#define BIT_CBUS_MSC_ABORT_RCVD BIT(3)
+#define BIT_CBUS_DDC_ABORT BIT(2)
+#define BIT_CBUS_CEC_ABORT BIT(1)
+
+/* CBUS Interrupt Mask 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1_MASK 0x0595
+
+/* CBUS DDC Abort Interrupt, default value: 0x00 */
+#define REG_DDC_ABORT_INT 0x0598
+
+/* CBUS DDC Abort Interrupt Mask, default value: 0x00 */
+#define REG_DDC_ABORT_INT_MASK 0x0599
+
+/* CBUS MSC Requester Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT 0x059a
+
+/* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT_MASK 0x059b
+
+/* CBUS MSC Responder Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT 0x059c
+
+/* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT_MASK 0x059d
+
+/* CBUS RX DISCOVERY interrupt, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0 0x059e
+
+/* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0_MASK 0x059f
+
+/* CBUS_Link_Layer Control #8, default value: 0x00 */
+#define REG_CBUS_LINK_CTRL_8 0x05a7
+
+/* MDT State Machine Status, default value: 0x00 */
+#define REG_MDT_SM_STAT 0x05b5
+#define MSK_MDT_SM_STAT_MDT_RCV_STATE 0xf0
+#define MSK_MDT_SM_STAT_MDT_XMIT_STATE 0x0f
+
+/* CBUS MSC command trigger, default value: 0x00 */
+#define REG_MSC_COMMAND_START 0x05b8
+#define BIT_MSC_COMMAND_START_DEBUG BIT(5)
+#define BIT_MSC_COMMAND_START_WRITE_BURST BIT(4)
+#define BIT_MSC_COMMAND_START_WRITE_STAT BIT(3)
+#define BIT_MSC_COMMAND_START_READ_DEVCAP BIT(2)
+#define BIT_MSC_COMMAND_START_MSC_MSG BIT(1)
+#define BIT_MSC_COMMAND_START_PEER BIT(0)
+
+/* CBUS MSC Command/Offset, default value: 0x00 */
+#define REG_MSC_CMD_OR_OFFSET 0x05b9
+
+/* CBUS MSC Transmit Data */
+#define REG_MSC_1ST_TRANSMIT_DATA 0x05ba
+#define REG_MSC_2ND_TRANSMIT_DATA 0x05bb
+
+/* CBUS MSC Requester Received Data */
+#define REG_MSC_MT_RCVD_DATA0 0x05bc
+#define REG_MSC_MT_RCVD_DATA1 0x05bd
+
+/* CBUS MSC Responder MSC_MSG Received Data */
+#define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA 0x05bf
+#define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA 0x05c0
+
+/* CBUS MSC Heartbeat Control, default value: 0x27 */
+#define REG_MSC_HEARTBEAT_CTRL 0x05c4
+#define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN BIT(7)
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f
+
+/* CBUS MSC Compatibility Control, default value: 0x02 */
+#define REG_CBUS_MSC_COMPAT_CTRL 0x05c7
+#define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN BIT(7)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2)
+
+/* CBUS3 Converter Control, default value: 0x24 */
+#define REG_CBUS3_CNVT 0x05dc
+#define MSK_CBUS3_CNVT_CBUS3_RETRYLMT 0xf0
+#define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL 0x0c
+#define BIT_CBUS3_CNVT_TEARCBUS_EN BIT(1)
+#define BIT_CBUS3_CNVT_CBUS3CNVT_EN BIT(0)
+
+/* Discovery Control1, default value: 0x24 */
+#define REG_DISC_CTRL1 0x05e0
+#define BIT_DISC_CTRL1_CBUS_INTR_EN BIT(7)
+#define BIT_DISC_CTRL1_HB_ONLY BIT(6)
+#define MSK_DISC_CTRL1_DISC_ATT 0x30
+#define MSK_DISC_CTRL1_DISC_CYC 0x0c
+#define BIT_DISC_CTRL1_DISC_EN BIT(0)
+
+#define VAL_PUP_OFF 0
+#define VAL_PUP_20K 1
+#define VAL_PUP_5K 2
+
+/* Discovery Control4, default value: 0x80 */
+#define REG_DISC_CTRL4 0x05e3
+#define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL 0xc0
+#define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL 0x30
+#define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4))
+
+/* Discovery Control5, default value: 0x03 */
+#define REG_DISC_CTRL5 0x05e4
+#define BIT_DISC_CTRL5_DSM_OVRIDE BIT(3)
+#define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL 0x03
+
+/* Discovery Control8, default value: 0x81 */
+#define REG_DISC_CTRL8 0x05e7
+#define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS BIT(7)
+#define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN BIT(0)
+
+/* Discovery Control9, default value: 0x54 */
+#define REG_DISC_CTRL9 0x05e8
+#define BIT_DISC_CTRL9_MHL3_RSEN_BYP BIT(7)
+#define BIT_DISC_CTRL9_MHL3DISC_EN BIT(6)
+#define BIT_DISC_CTRL9_WAKE_DRVFLT BIT(4)
+#define BIT_DISC_CTRL9_NOMHL_EST BIT(3)
+#define BIT_DISC_CTRL9_DISC_PULSE_PROCEED BIT(2)
+#define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS BIT(1)
+#define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0)
+
+/* Discovery Status1, default value: 0x00 */
+#define REG_DISC_STAT1 0x05eb
+#define BIT_DISC_STAT1_PSM_OVRIDE BIT(5)
+#define MSK_DISC_STAT1_DISC_SM 0x0f
+
+/* Discovery Status2, default value: 0x00 */
+#define REG_DISC_STAT2 0x05ec
+#define BIT_DISC_STAT2_CBUS_OE_POL BIT(6)
+#define BIT_DISC_STAT2_CBUS_SATUS BIT(5)
+#define BIT_DISC_STAT2_RSEN BIT(4)
+
+#define MSK_DISC_STAT2_MHL_VRSN 0x0c
+#define VAL_DISC_STAT2_DEFAULT 0x00
+#define VAL_DISC_STAT2_MHL1_2 0x04
+#define VAL_DISC_STAT2_MHL3 0x08
+#define VAL_DISC_STAT2_RESERVED 0x0c
+
+#define MSK_DISC_STAT2_RGND 0x03
+#define VAL_RGND_OPEN 0x00
+#define VAL_RGND_2K 0x01
+#define VAL_RGND_1K 0x02
+#define VAL_RGND_SHORT 0x03
+
+/* Interrupt CBUS_reg1 INTR0, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0 0x05ed
+#define BIT_RGND_READY_INT BIT(6)
+#define BIT_CBUS_MHL12_DISCON_INT BIT(5)
+#define BIT_CBUS_MHL3_DISCON_INT BIT(4)
+#define BIT_NOT_MHL_EST_INT BIT(3)
+#define BIT_MHL_EST_INT BIT(2)
+#define BIT_MHL3_EST_INT BIT(1)
+#define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \
+ | BIT_CBUS_MHL3_DISCON_INT \
+ | BIT_NOT_MHL_EST_INT)
+
+/* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0_MASK 0x05ee
+
+#endif /* __SIL_SII8620_H__ */
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 44d476ea6d2e..de9ffb49e9f6 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -908,7 +908,7 @@ static int tc_main_link_setup(struct tc_data *tc)
goto err_dpcd_read;
if (tmp[0] != tc->assr) {
- dev_warn(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
+ dev_dbg(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
tc->assr);
/* trying with disabled scrambler */
tc->link.scrambler_dis = 1;
@@ -1038,12 +1038,6 @@ err:
return ret;
}
-static enum drm_connector_status
-tc_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void tc_bridge_pre_enable(struct drm_bridge *bridge)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1168,7 +1162,6 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
static const struct drm_connector_funcs tc_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = tc_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
new file mode 100644
index 000000000000..b054ea349952
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2016 Texas Instruments
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+struct tfp410 {
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ struct i2c_adapter *ddc;
+
+ struct device *dev;
+};
+
+static inline struct tfp410 *
+drm_bridge_to_tfp410(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tfp410, bridge);
+}
+
+static inline struct tfp410 *
+drm_connector_to_tfp410(struct drm_connector *connector)
+{
+ return container_of(connector, struct tfp410, connector);
+}
+
+static int tfp410_get_modes(struct drm_connector *connector)
+{
+ struct tfp410 *dvi = drm_connector_to_tfp410(connector);
+ struct edid *edid;
+ int ret;
+
+ if (!dvi->ddc)
+ goto fallback;
+
+ edid = drm_get_edid(connector, dvi->ddc);
+ if (!edid) {
+ DRM_INFO("EDID read failed. Fallback to standard modes\n");
+ goto fallback;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ return drm_add_edid_modes(connector, edid);
+fallback:
+ /* No EDID, fallback on the XGA standard modes */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+ /* And prefer a mode pretty much anything can handle */
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs tfp410_con_helper_funcs = {
+ .get_modes = tfp410_get_modes,
+};
+
+static enum drm_connector_status
+tfp410_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tfp410 *dvi = drm_connector_to_tfp410(connector);
+
+ if (dvi->ddc) {
+ if (drm_probe_ddc(dvi->ddc))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+ }
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs tfp410_con_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = tfp410_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int tfp410_attach(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ dev_err(dvi->dev, "Missing encoder\n");
+ return -ENODEV;
+ }
+
+ drm_connector_helper_add(&dvi->connector,
+ &tfp410_con_helper_funcs);
+ ret = drm_connector_init(bridge->dev, &dvi->connector,
+ &tfp410_con_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_mode_connector_attach_encoder(&dvi->connector,
+ bridge->encoder);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs tfp410_bridge_funcs = {
+ .attach = tfp410_attach,
+};
+
+static int tfp410_get_connector_ddc(struct tfp410 *dvi)
+{
+ struct device_node *ep = NULL, *connector_node = NULL;
+ struct device_node *ddc_phandle = NULL;
+ int ret = 0;
+
+ /* port@1 is the connector node */
+ ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 1, -1);
+ if (!ep)
+ goto fail;
+
+ connector_node = of_graph_get_remote_port_parent(ep);
+ if (!connector_node)
+ goto fail;
+
+ ddc_phandle = of_parse_phandle(connector_node, "ddc-i2c-bus", 0);
+ if (!ddc_phandle)
+ goto fail;
+
+ dvi->ddc = of_get_i2c_adapter_by_node(ddc_phandle);
+ if (dvi->ddc)
+ dev_info(dvi->dev, "Connector's ddc i2c bus found\n");
+ else
+ ret = -EPROBE_DEFER;
+
+fail:
+ of_node_put(ep);
+ of_node_put(connector_node);
+ of_node_put(ddc_phandle);
+ return ret;
+}
+
+static int tfp410_init(struct device *dev)
+{
+ struct tfp410 *dvi;
+ int ret;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device-tree data is missing\n");
+ return -ENXIO;
+ }
+
+ dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
+ if (!dvi)
+ return -ENOMEM;
+ dev_set_drvdata(dev, dvi);
+
+ dvi->bridge.funcs = &tfp410_bridge_funcs;
+ dvi->bridge.of_node = dev->of_node;
+ dvi->dev = dev;
+
+ ret = tfp410_get_connector_ddc(dvi);
+ if (ret)
+ goto fail;
+
+ ret = drm_bridge_add(&dvi->bridge);
+ if (ret) {
+ dev_err(dev, "drm_bridge_add() failed: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ i2c_put_adapter(dvi->ddc);
+ return ret;
+}
+
+static int tfp410_fini(struct device *dev)
+{
+ struct tfp410 *dvi = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&dvi->bridge);
+
+ if (dvi->ddc)
+ i2c_put_adapter(dvi->ddc);
+
+ return 0;
+}
+
+static int tfp410_probe(struct platform_device *pdev)
+{
+ return tfp410_init(&pdev->dev);
+}
+
+static int tfp410_remove(struct platform_device *pdev)
+{
+ return tfp410_fini(&pdev->dev);
+}
+
+static const struct of_device_id tfp410_match[] = {
+ { .compatible = "ti,tfp410" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tfp410_match);
+
+struct platform_driver tfp410_platform_driver = {
+ .probe = tfp410_probe,
+ .remove = tfp410_remove,
+ .driver = {
+ .name = "tfp410-bridge",
+ .of_match_table = tfp410_match,
+ },
+};
+
+#if IS_ENABLED(CONFIG_I2C)
+/* There is currently no i2c functionality. */
+static int tfp410_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int reg;
+
+ if (!client->dev.of_node ||
+ of_property_read_u32(client->dev.of_node, "reg", &reg)) {
+ dev_err(&client->dev,
+ "Can't get i2c reg property from device-tree\n");
+ return -ENXIO;
+ }
+
+ return tfp410_init(&client->dev);
+}
+
+static int tfp410_i2c_remove(struct i2c_client *client)
+{
+ return tfp410_fini(&client->dev);
+}
+
+static const struct i2c_device_id tfp410_i2c_ids[] = {
+ { "tfp410", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids);
+
+static struct i2c_driver tfp410_i2c_driver = {
+ .driver = {
+ .name = "tfp410",
+ .of_match_table = of_match_ptr(tfp410_match),
+ },
+ .id_table = tfp410_i2c_ids,
+ .probe = tfp410_i2c_probe,
+ .remove = tfp410_i2c_remove,
+};
+#endif /* IS_ENABLED(CONFIG_I2C) */
+
+static struct {
+ uint i2c:1;
+ uint platform:1;
+} tfp410_registered_driver;
+
+static int __init tfp410_module_init(void)
+{
+ int ret;
+
+#if IS_ENABLED(CONFIG_I2C)
+ ret = i2c_add_driver(&tfp410_i2c_driver);
+ if (ret)
+ pr_err("%s: registering i2c driver failed: %d",
+ __func__, ret);
+ else
+ tfp410_registered_driver.i2c = 1;
+#endif
+
+ ret = platform_driver_register(&tfp410_platform_driver);
+ if (ret)
+ pr_err("%s: registering platform driver failed: %d",
+ __func__, ret);
+ else
+ tfp410_registered_driver.platform = 1;
+
+ if (tfp410_registered_driver.i2c ||
+ tfp410_registered_driver.platform)
+ return 0;
+
+ return ret;
+}
+module_init(tfp410_module_init);
+
+static void __exit tfp410_module_exit(void)
+{
+#if IS_ENABLED(CONFIG_I2C)
+ if (tfp410_registered_driver.i2c)
+ i2c_del_driver(&tfp410_i2c_driver);
+#endif
+ if (tfp410_registered_driver.platform)
+ platform_driver_unregister(&tfp410_platform_driver);
+}
+module_exit(tfp410_module_exit);
+
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("TI TFP410 DVI bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 6c76d125995b..d893ea21a359 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -126,9 +126,7 @@ static const struct file_operations cirrus_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = cirrus_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
};
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index daecf1ad76a4..3a6309d7d8e4 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -138,12 +138,12 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
{
struct drm_device *dev = afbdev->helper.dev;
struct cirrus_device *cdev = dev->dev_private;
- u32 bpp, depth;
+ u32 bpp;
u32 size;
struct drm_gem_object *gobj;
-
int ret = 0;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 76bcb43e7c06..2c3c0d4072ce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -52,10 +52,10 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
struct cirrus_framebuffer *cirrus_fb;
+ u32 bpp;
int ret;
- u32 bpp, depth;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 17c915d9a03e..9a4a27c1afd2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -498,12 +498,6 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
return NULL;
}
-static enum drm_connector_status cirrus_vga_detect(struct drm_connector
- *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void cirrus_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
@@ -517,7 +511,6 @@ static const struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs
static const struct drm_connector_funcs cirrus_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = cirrus_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = cirrus_connector_destroy,
};
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 5e7e63ce7bce..d6da848f7c6f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.ttm_tt_populate = cirrus_ttm_tt_populate,
.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
.init_mem_type = cirrus_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = cirrus_bo_evict_flags,
.move = NULL,
.verify_access = cirrus_bo_verify_access,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 23739609427d..60697482b94c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
@@ -74,6 +76,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_release);
int
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
{
+ kref_init(&state->ref);
+
/* TODO legacy paths should maybe do a better job about
* setting this appropriately?
*/
@@ -215,22 +219,16 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
EXPORT_SYMBOL(drm_atomic_state_clear);
/**
- * drm_atomic_state_free - free all memory for an atomic state
- * @state: atomic state to deallocate
+ * __drm_atomic_state_free - free all memory for an atomic state
+ * @ref: This atomic state to deallocate
*
* This frees all memory associated with an atomic state, including all the
* per-object state for planes, crtcs and connectors.
*/
-void drm_atomic_state_free(struct drm_atomic_state *state)
+void __drm_atomic_state_free(struct kref *ref)
{
- struct drm_device *dev;
- struct drm_mode_config *config;
-
- if (!state)
- return;
-
- dev = state->dev;
- config = &dev->mode_config;
+ struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
+ struct drm_mode_config *config = &state->dev->mode_config;
drm_atomic_state_clear(state);
@@ -243,7 +241,7 @@ void drm_atomic_state_free(struct drm_atomic_state *state)
kfree(state);
}
}
-EXPORT_SYMBOL(drm_atomic_state_free);
+EXPORT_SYMBOL(__drm_atomic_state_free);
/**
* drm_atomic_get_crtc_state - get crtc state
@@ -292,6 +290,23 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
+static void set_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc, s64 __user *fence_ptr)
+{
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
+}
+
+static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ s64 __user *fence_ptr;
+
+ fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
@@ -420,18 +435,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
ssize_t expected_size,
bool *replaced)
{
- struct drm_device *dev = crtc->dev;
struct drm_property_blob *new_blob = NULL;
if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
+ new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
if (new_blob == NULL)
return -EINVAL;
- if (expected_size > 0 && expected_size != new_blob->length)
+
+ if (expected_size > 0 && expected_size != new_blob->length) {
+ drm_property_unreference_blob(new_blob);
return -EINVAL;
+ }
}
drm_atomic_replace_property_blob(blob, new_blob, replaced);
+ drm_property_unreference_blob(new_blob);
return 0;
}
@@ -493,6 +511,16 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
+ } else if (property == config->prop_out_fence_ptr) {
+ s64 __user *fence_ptr = u64_to_user_ptr(val);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
else
@@ -535,6 +563,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = (state->ctm) ? state->ctm->base.id : 0;
else if (property == config->gamma_lut_property)
*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
+ else if (property == config->prop_out_fence_ptr)
+ *val = 0;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else
@@ -606,6 +636,28 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
return 0;
}
+static void drm_atomic_crtc_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+
+ drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
+ drm_printf(p, "\tenable=%d\n", state->enable);
+ drm_printf(p, "\tactive=%d\n", state->active);
+ drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
+ drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
+ drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
+ drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
+ drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
+ drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
+ drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
+ drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
+ drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
+
+ if (crtc->funcs->atomic_print_state)
+ crtc->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
@@ -690,6 +742,17 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_unreference(fb);
+ } else if (property == config->prop_in_fence_fd) {
+ if (state->fence)
+ return -EINVAL;
+
+ if (U642I64(val) == -1)
+ return 0;
+
+ state->fence = sync_file_get_fence(val);
+ if (!state->fence)
+ return -EINVAL;
+
} else if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
return drm_atomic_set_crtc_for_plane(state, crtc);
@@ -709,7 +772,9 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
state->src_w = val;
} else if (property == config->prop_src_h) {
state->src_h = val;
- } else if (property == config->rotation_property) {
+ } else if (property == plane->rotation_property) {
+ if (!is_power_of_2(val & DRM_ROTATE_MASK))
+ return -EINVAL;
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@@ -749,6 +814,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
if (property == config->prop_fb_id) {
*val = (state->fb) ? state->fb->base.id : 0;
+ } else if (property == config->prop_in_fence_fd) {
+ *val = -1;
} else if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->prop_crtc_x) {
@@ -767,7 +834,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
- } else if (property == config->rotation_property) {
+ } else if (property == plane->rotation_property) {
*val = state->rotation;
} else if (property == plane->zpos_property) {
*val = state->zpos;
@@ -837,9 +904,10 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(state->fb->pixel_format);
- DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
+ drm_get_format_name(state->fb->pixel_format,
+ &format_name));
return ret;
}
@@ -880,6 +948,39 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
return 0;
}
+static void drm_atomic_plane_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct drm_plane *plane = state->plane;
+ struct drm_rect src = drm_plane_state_src(state);
+ struct drm_rect dest = drm_plane_state_dest(state);
+
+ drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
+ drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
+ if (state->fb) {
+ struct drm_framebuffer *fb = state->fb;
+ int i, n = drm_format_num_planes(fb->pixel_format);
+ struct drm_format_name_buf format_name;
+
+ drm_printf(p, "\t\tformat=%s\n",
+ drm_get_format_name(fb->pixel_format, &format_name));
+ drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
+ drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
+ drm_printf(p, "\t\tlayers:\n");
+ for (i = 0; i < n; i++) {
+ drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
+ drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
+ }
+ }
+ drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
+ drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
+ drm_printf(p, "\trotation=%x\n", state->rotation);
+
+ if (plane->funcs->atomic_print_state)
+ plane->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
@@ -986,15 +1087,53 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
* now?) atomic writes to DPMS property:
*/
return -EINVAL;
+ } else if (property == config->tv_select_subconnector_property) {
+ state->tv.subconnector = val;
+ } else if (property == config->tv_left_margin_property) {
+ state->tv.margins.left = val;
+ } else if (property == config->tv_right_margin_property) {
+ state->tv.margins.right = val;
+ } else if (property == config->tv_top_margin_property) {
+ state->tv.margins.top = val;
+ } else if (property == config->tv_bottom_margin_property) {
+ state->tv.margins.bottom = val;
+ } else if (property == config->tv_mode_property) {
+ state->tv.mode = val;
+ } else if (property == config->tv_brightness_property) {
+ state->tv.brightness = val;
+ } else if (property == config->tv_contrast_property) {
+ state->tv.contrast = val;
+ } else if (property == config->tv_flicker_reduction_property) {
+ state->tv.flicker_reduction = val;
+ } else if (property == config->tv_overscan_property) {
+ state->tv.overscan = val;
+ } else if (property == config->tv_saturation_property) {
+ state->tv.saturation = val;
+ } else if (property == config->tv_hue_property) {
+ state->tv.hue = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
return -EINVAL;
}
+
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_connector_set_property);
+static void drm_atomic_connector_print_state(struct drm_printer *p,
+ const struct drm_connector_state *state)
+{
+ struct drm_connector *connector = state->connector;
+
+ drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
+ drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+
+ if (connector->funcs->atomic_print_state)
+ connector->funcs->atomic_print_state(p, state);
+}
+
/**
* drm_atomic_connector_get_property - get property value from connector state
* @connector: the drm connector to set a property on
@@ -1022,6 +1161,30 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->dpms_property) {
*val = connector->dpms;
+ } else if (property == config->tv_select_subconnector_property) {
+ *val = state->tv.subconnector;
+ } else if (property == config->tv_left_margin_property) {
+ *val = state->tv.margins.left;
+ } else if (property == config->tv_right_margin_property) {
+ *val = state->tv.margins.right;
+ } else if (property == config->tv_top_margin_property) {
+ *val = state->tv.margins.top;
+ } else if (property == config->tv_bottom_margin_property) {
+ *val = state->tv.margins.bottom;
+ } else if (property == config->tv_mode_property) {
+ *val = state->tv.mode;
+ } else if (property == config->tv_brightness_property) {
+ *val = state->tv.brightness;
+ } else if (property == config->tv_contrast_property) {
+ *val = state->tv.contrast;
+ } else if (property == config->tv_flicker_reduction_property) {
+ *val = state->tv.flicker_reduction;
+ } else if (property == config->tv_overscan_property) {
+ *val = state->tv.overscan;
+ } else if (property == config->tv_saturation_property) {
+ *val = state->tv.saturation;
+ } else if (property == config->tv_hue_property) {
+ *val = state->tv.hue;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
@@ -1133,22 +1296,48 @@ void
drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb)
{
- if (plane_state->fb)
- drm_framebuffer_unreference(plane_state->fb);
- if (fb)
- drm_framebuffer_reference(fb);
- plane_state->fb = fb;
-
if (fb)
DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
fb->base.id, plane_state);
else
DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
plane_state);
+
+ drm_framebuffer_assign(&plane_state->fb, fb);
}
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
/**
+ * drm_atomic_set_fence_for_plane - set fence for plane
+ * @plane_state: atomic state object for the plane
+ * @fence: dma_fence to use for the plane
+ *
+ * Helper to setup the plane_state fence in case it is not set yet.
+ * By using this drivers doesn't need to worry if the user choose
+ * implicit or explicit fencing.
+ *
+ * This function will not set the fence to the state if it was set
+ * via explicit fencing interfaces on the atomic ioctl. It will
+ * all drope the reference to the fence as we not storing it
+ * anywhere.
+ *
+ * Otherwise, if plane_state->fence is not set this function we
+ * just set it with the received implict fence.
+ */
+void
+drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence)
+{
+ if (plane_state->fence) {
+ dma_fence_put(fence);
+ return;
+ }
+
+ plane_state->fence = fence;
+}
+EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
+
+/**
* drm_atomic_set_crtc_for_connector - set crtc for connector
* @conn_state: atomic state object for the connector
* @crtc: crtc to use for the connector
@@ -1459,16 +1648,107 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
}
EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
+static void drm_atomic_print_state(const struct drm_atomic_state *state)
+{
+ struct drm_printer p = drm_info_printer(state->dev->dev);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
+
+ DRM_DEBUG_ATOMIC("checking %p\n", state);
+
+ for_each_plane_in_state(state, plane, plane_state, i)
+ drm_atomic_plane_print_state(&p, plane_state);
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ drm_atomic_crtc_print_state(&p, crtc_state);
+
+ for_each_connector_in_state(state, connector, connector_state, i)
+ drm_atomic_connector_print_state(&p, connector_state);
+}
+
+/**
+ * drm_state_dump - dump entire device atomic state
+ * @dev: the drm device
+ * @p: where to print the state to
+ *
+ * Just for debugging. Drivers might want an option to dump state
+ * to dmesg in case of error irq's. (Hint, you probably want to
+ * ratelimit this!)
+ *
+ * The caller must drm_modeset_lock_all(), or if this is called
+ * from error irq handler, it should not be enabled by default.
+ * (Ie. if you are debugging errors you might not care that this
+ * is racey. But calling this without all modeset locks held is
+ * not inherently safe.)
+ */
+void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return;
+
+ list_for_each_entry(plane, &config->plane_list, head)
+ drm_atomic_plane_print_state(p, plane->state);
+
+ list_for_each_entry(crtc, &config->crtc_list, head)
+ drm_atomic_crtc_print_state(p, crtc->state);
+
+ list_for_each_entry(connector, &config->connector_list, head)
+ drm_atomic_connector_print_state(p, connector->state);
+}
+EXPORT_SYMBOL(drm_state_dump);
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_state_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_modeset_lock_all(dev);
+ drm_state_dump(dev, &p);
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+/* any use in debugfs files to dump individual planes/crtc/etc? */
+static const struct drm_info_list drm_atomic_debugfs_list[] = {
+ {"state", drm_state_info, 0},
+};
+
+int drm_atomic_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+int drm_atomic_debugfs_cleanup(struct drm_minor *minor)
+{
+ return drm_debugfs_remove_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor);
+}
+#endif
+
/*
* The big monstor ioctl
*/
static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_device *dev, struct drm_file *file_priv,
- struct fence *fence, uint64_t user_data)
+ struct drm_device *dev, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
- int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
if (!e)
@@ -1478,17 +1758,6 @@ static struct drm_pending_vblank_event *create_vblank_event(
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- if (file_priv) {
- ret = drm_event_reserve_init(dev, file_priv, &e->base,
- &e->event.base);
- if (ret) {
- kfree(e);
- return NULL;
- }
- }
-
- e->base.fence = fence;
-
return e;
}
@@ -1593,6 +1862,203 @@ void drm_atomic_clean_old_fb(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_clean_old_fb);
+/**
+ * DOC: explicit fencing properties
+ *
+ * Explicit fencing allows userspace to control the buffer synchronization
+ * between devices. A Fence or a group of fences are transfered to/from
+ * userspace using Sync File fds and there are two DRM properties for that.
+ * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
+ * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
+ *
+ * As a contrast, with implicit fencing the kernel keeps track of any
+ * ongoing rendering, and automatically ensures that the atomic update waits
+ * for any pending rendering to complete. For shared buffers represented with
+ * a struct &dma_buf this is tracked in &reservation_object structures.
+ * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
+ * whereas explicit fencing is what Android wants.
+ *
+ * "IN_FENCE_FDâ€:
+ * Use this property to pass a fence that DRM should wait on before
+ * proceeding with the Atomic Commit request and show the framebuffer for
+ * the plane on the screen. The fence can be either a normal fence or a
+ * merged one, the sync_file framework will handle both cases and use a
+ * fence_array if a merged fence is received. Passing -1 here means no
+ * fences to wait on.
+ *
+ * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
+ * it will only check if the Sync File is a valid one.
+ *
+ * On the driver side the fence is stored on the @fence parameter of
+ * struct &drm_plane_state. Drivers which also support implicit fencing
+ * should set the implicit fence using drm_atomic_set_fence_for_plane(),
+ * to make sure there's consistent behaviour between drivers in precedence
+ * of implicit vs. explicit fencing.
+ *
+ * "OUT_FENCE_PTRâ€:
+ * Use this property to pass a file descriptor pointer to DRM. Once the
+ * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
+ * the file descriptor number of a Sync File. This Sync File contains the
+ * CRTC fence that will be signaled when all framebuffers present on the
+ * Atomic Commit * request for that given CRTC are scanned out on the
+ * screen.
+ *
+ * The Atomic Commit request fails if a invalid pointer is passed. If the
+ * Atomic Commit request fails for any other reason the out fence fd
+ * returned will be -1. On a Atomic Commit with the
+ * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
+ *
+ * Note that out-fences don't have a special interface to drivers and are
+ * internally represented by a struct &drm_pending_vblank_event in struct
+ * &drm_crtc_state, which is also used by the nonblocking atomic commit
+ * helpers and for the DRM event handling for existing userspace.
+ */
+
+struct drm_out_fence_state {
+ s64 __user *out_fence_ptr;
+ struct sync_file *sync_file;
+ int fd;
+};
+
+static int setup_out_fence(struct drm_out_fence_state *fence_state,
+ struct dma_fence *fence)
+{
+ fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fence_state->fd < 0)
+ return fence_state->fd;
+
+ if (put_user(fence_state->fd, fence_state->out_fence_ptr))
+ return -EFAULT;
+
+ fence_state->sync_file = sync_file_create(fence);
+ if (!fence_state->sync_file)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int prepare_crtc_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_mode_atomic *arg,
+ struct drm_file *file_priv,
+ struct drm_out_fence_state **fence_state,
+ unsigned int *num_fences)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
+
+ if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+ return 0;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ u64 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
+ struct drm_pending_vblank_event *e;
+
+ e = create_vblank_event(dev, arg->user_data);
+ if (!e)
+ return -ENOMEM;
+
+ crtc_state->event = e;
+ }
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ struct drm_pending_vblank_event *e = crtc_state->event;
+
+ if (!file_priv)
+ continue;
+
+ ret = drm_event_reserve_init(dev, file_priv, &e->base,
+ &e->event.base);
+ if (ret) {
+ kfree(e);
+ crtc_state->event = NULL;
+ return ret;
+ }
+ }
+
+ if (fence_ptr) {
+ struct dma_fence *fence;
+ struct drm_out_fence_state *f;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ fence = drm_crtc_create_fence(crtc);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ crtc_state->event->base.fence = fence;
+ }
+ }
+
+ return 0;
+}
+
+static void complete_crtc_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_out_fence_state *fence_state,
+ unsigned int num_fences,
+ bool install_fds)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ if (install_fds) {
+ for (i = 0; i < num_fences; i++)
+ fd_install(fence_state[i].fd,
+ fence_state[i].sync_file->file);
+
+ kfree(fence_state);
+ return;
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * TEST_ONLY and PAGE_FLIP_EVENT are mutually
+ * exclusive, if they weren't, this code should be
+ * called on success for TEST_ONLY too.
+ */
+ if (crtc_state->event)
+ drm_event_cancel_free(dev, &crtc_state->event->base);
+ }
+
+ if (!fence_state)
+ return;
+
+ for (i = 0; i < num_fences; i++) {
+ if (fence_state[i].sync_file)
+ fput(fence_state[i].sync_file->file);
+ if (fence_state[i].fd >= 0)
+ put_unused_fd(fence_state[i].fd);
+
+ /* If this fails log error to the user */
+ if (fence_state[i].out_fence_ptr &&
+ put_user(-1, fence_state[i].out_fence_ptr))
+ DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
+ }
+
+ kfree(fence_state);
+}
+
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -1605,11 +2071,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_out_fence_state *fence_state = NULL;
unsigned plane_mask;
int ret = 0;
- unsigned int i, j;
+ unsigned int i, j, num_fences = 0;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1724,50 +2189,30 @@ retry:
drm_mode_object_unreference(obj);
}
- if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- struct drm_pending_vblank_event *e;
-
- e = create_vblank_event(dev, file_priv, NULL,
- arg->user_data);
- if (!e) {
- ret = -ENOMEM;
- goto out;
- }
-
- crtc_state->event = e;
- }
- }
+ ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
+ &num_fences);
+ if (ret)
+ goto out;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
/*
* Unlike commit, check_only does not clean up state.
- * Below we call drm_atomic_state_free for it.
+ * Below we call drm_atomic_state_put for it.
*/
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
+ if (unlikely(drm_debug & DRM_UT_STATE))
+ drm_atomic_print_state(state);
+
ret = drm_atomic_commit(state);
}
out:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
- if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- /*
- * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
- * if they weren't, this code should be called on success
- * for TEST_ONLY too.
- */
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->event)
- continue;
-
- drm_event_cancel_free(dev, &crtc_state->event->base);
- }
- }
+ complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
@@ -1775,8 +2220,7 @@ out:
goto retry;
}
- if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c3f83476f996..583f47f27b36 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,7 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "drm_crtc_internal.h"
@@ -458,10 +458,11 @@ mode_fixup(struct drm_atomic_state *state)
* removed from the crtc.
* crtc_state->active_changed is set when crtc_state->active changes,
* which is used for dpms.
+ * See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
*
- * Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
+ * Drivers which set ->mode_changed (e.g. in their ->atomic_check hooks if a
* plane update can't be done without a full modeset) _must_ call this function
* afterwards after that change. It is permitted to call this function multiple
* times for the same update, e.g. when the ->atomic_check functions depend upon
@@ -510,9 +511,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
for_each_connector_in_state(state, connector, connector_state, i) {
/*
- * This only sets crtc->mode_changed for routing changes,
- * drivers must set crtc->mode_changed themselves when connector
- * properties need to be updated.
+ * This only sets crtc->connectors_changed for routing changes,
+ * drivers must set crtc->connectors_changed themselves when
+ * connector properties need to be updated.
*/
ret = update_connector_routing(state, connector,
connector_state);
@@ -594,10 +595,6 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_plane_state *plane_state;
int i, ret = 0;
- ret = drm_atomic_normalize_zpos(dev, state);
- if (ret)
- return ret;
-
for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
@@ -1009,14 +1006,22 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
* @dev: DRM device
* @state: atomic state object with old state structures
- * @pre_swap: if true, do an interruptible wait
+ * @pre_swap: If true, do an interruptible wait, and @state is the new state.
+ * Otherwise @state is the old state.
*
* For implicit sync, driver should fish the exclusive fence out from the
* incoming fb's and stash it in the drm_plane_state. This is called after
* drm_atomic_helper_swap_state() so it uses the current plane state (and
* just uses the atomic state to find the changed planes)
*
- * Returns zero if success or < 0 if fence_wait() fails.
+ * Note that @pre_swap is needed since the point where we block for fences moves
+ * around depending upon whether an atomic commit is blocking or
+ * non-blocking. For async commit all waiting needs to happen after
+ * drm_atomic_helper_swap_state() is called, but for synchronous commits we want
+ * to wait **before** we do anything that can't be easily rolled back. That is
+ * before we call drm_atomic_helper_swap_state().
+ *
+ * Returns zero if success or < 0 if dma_fence_wait() fails.
*/
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -1040,11 +1045,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
* still interrupt the operation. Instead of blocking until the
* timer expires, make the wait interruptible.
*/
- ret = fence_wait(plane_state->fence, pre_swap);
+ ret = dma_fence_wait(plane_state->fence, pre_swap);
if (ret)
return ret;
- fence_put(plane_state->fence);
+ dma_fence_put(plane_state->fence);
plane_state->fence = NULL;
}
@@ -1150,7 +1155,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
* drm_atomic_helper_commit_tail - commit atomic update to hardware
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
* This is the default implemenation for the ->atomic_commit_tail() hook of the
* &drm_mode_config_helper_funcs vtable.
@@ -1161,53 +1166,53 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
*
* For drivers supporting runtime PM the recommended sequence is instead ::
*
- * drm_atomic_helper_commit_modeset_disables(dev, state);
+ * drm_atomic_helper_commit_modeset_disables(dev, old_state);
*
- * drm_atomic_helper_commit_modeset_enables(dev, state);
+ * drm_atomic_helper_commit_modeset_enables(dev, old_state);
*
- * drm_atomic_helper_commit_planes(dev, state,
+ * drm_atomic_helper_commit_planes(dev, old_state,
* DRM_PLANE_COMMIT_ACTIVE_ONLY);
*
* for committing the atomic update to hardware. See the kerneldoc entries for
* these three functions for more details.
*/
-void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = old_state->dev;
- drm_atomic_helper_commit_modeset_disables(dev, state);
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, state, 0);
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
- drm_atomic_helper_commit_modeset_enables(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
- drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_commit_hw_done(old_state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_wait_for_vblanks(dev, old_state);
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
-static void commit_tail(struct drm_atomic_state *state)
+static void commit_tail(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = old_state->dev;
struct drm_mode_config_helper_funcs *funcs;
funcs = dev->mode_config.helper_private;
- drm_atomic_helper_wait_for_fences(dev, state, false);
+ drm_atomic_helper_wait_for_fences(dev, old_state, false);
- drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_wait_for_dependencies(old_state);
if (funcs && funcs->atomic_commit_tail)
- funcs->atomic_commit_tail(state);
+ funcs->atomic_commit_tail(old_state);
else
- drm_atomic_helper_commit_tail(state);
+ drm_atomic_helper_commit_tail(old_state);
- drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_helper_commit_cleanup_done(old_state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(old_state);
}
static void commit_work(struct work_struct *work)
@@ -1229,9 +1234,6 @@ static void commit_work(struct work_struct *work)
* function implements nonblocking commits, using
* drm_atomic_helper_setup_commit() and related functions.
*
- * Note that right now this function does not support nonblocking commits, hence
- * driver writers must implement their own version for now.
- *
* Committing the actual hardware state is done through the
* ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable,
* or it's default implementation drm_atomic_helper_commit_tail().
@@ -1289,6 +1291,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* make sure work items don't artifically stall on each another.
*/
+ drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
@@ -1500,10 +1503,10 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
/**
* drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
* This function waits for all preceeding commits that touch the same CRTC as
- * @state to both be committed to the hardware (as signalled by
+ * @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
* by calling drm_crtc_vblank_send_event on the event member of
* &drm_crtc_state).
@@ -1511,7 +1514,7 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -1519,7 +1522,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
int i;
long ret;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
spin_lock(&crtc->commit_lock);
commit = preceeding_commit(crtc);
if (commit)
@@ -1550,7 +1553,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
* This function is used to signal completion of the hardware commit step. After
* this step the driver is not allowed to read or change any permanent software
@@ -1563,15 +1566,15 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- commit = state->crtcs[i].commit;
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
+ commit = old_state->crtcs[i].commit;
if (!commit)
continue;
@@ -1586,16 +1589,16 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
/**
* drm_atomic_helper_commit_cleanup_done - signal completion of commit
- * @state: new modeset state to be committed
+ * @old_state: atomic state object with old state structures
*
- * This signals completion of the atomic update @state, including any cleanup
- * work. If used, it must be called right before calling
- * drm_atomic_state_free().
+ * This signals completion of the atomic update @old_state, including any
+ * cleanup work. If used, it must be called right before calling
+ * drm_atomic_state_put().
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -1603,8 +1606,8 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
int i;
long ret;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- commit = state->crtcs[i].commit;
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
+ commit = old_state->crtcs[i].commit;
if (WARN_ON(!commit))
continue;
@@ -2113,18 +2116,13 @@ retry:
state->legacy_cursor_update = true;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2186,18 +2184,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2326,18 +2319,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2412,7 +2400,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
primary_state->crtc_h = vdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
- if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+ if (drm_rotation_90_or_270(primary_state->rotation)) {
primary_state->src_w = vdisplay << 16;
primary_state->src_h = hdisplay << 16;
} else {
@@ -2479,11 +2467,8 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
}
err = drm_atomic_commit(state);
-
free:
- if (err < 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
@@ -2534,7 +2519,7 @@ retry:
err = drm_atomic_helper_disable_all(dev, &ctx);
if (err < 0) {
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
state = ERR_PTR(err);
goto unlock;
}
@@ -2623,18 +2608,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2683,18 +2663,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2743,18 +2718,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2827,18 +2797,13 @@ retry:
}
ret = drm_atomic_nonblocking_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2914,19 +2879,14 @@ retry:
crtc_state->active = active;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
-
- connector->dpms = old_mode;
- drm_atomic_state_free(state);
-
+ if (ret != 0)
+ connector->dpms = old_mode;
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -3117,6 +3077,8 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
if (state->fb)
drm_framebuffer_reference(state->fb);
+
+ state->fence = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
@@ -3155,6 +3117,9 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
{
if (state->fb)
drm_framebuffer_unreference(state->fb);
+
+ if (state->fence)
+ dma_fence_put(state->fence);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
@@ -3333,7 +3298,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
free:
if (err < 0) {
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
state = ERR_PTR(err);
}
@@ -3448,22 +3413,14 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
-
- drm_property_unreference_blob(blob);
-
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
drm_property_unreference_blob(blob);
-
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 85172a977bf3..1f2412c7ccfd 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -89,7 +89,7 @@
* On top of this basic transformation additional properties can be exposed by
* the driver:
*
- * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ * - Rotation is set up with drm_plane_create_rotation_property(). It adds a
* rotation and reflection step between the source and destination rectangles.
* Without this property the rectangle is only scaled, but not rotated or
* reflected.
@@ -105,18 +105,12 @@
*/
/**
- * drm_mode_create_rotation_property - create a new rotation property
- * @dev: DRM device
+ * drm_plane_create_rotation_property - create a new rotation property
+ * @plane: drm plane
+ * @rotation: initial value of the rotation property
* @supported_rotations: bitmask of supported rotations and reflections
*
* This creates a new property with the selected support for transformations.
- * The resulting property should be stored in @rotation_property in
- * &drm_mode_config. It then must be attached to each plane which supports
- * rotations using drm_object_attach_property().
- *
- * FIXME: Probably better if the rotation property is created on each plane,
- * like the zpos property. Otherwise it's not possible to allow different
- * rotation modes on different planes.
*
* Since a rotation by 180° degress is the same as reflecting both along the x
* and the y axis the rotation property is somewhat redundant. Drivers can use
@@ -144,8 +138,9 @@
* rotation. After reflection, the rotation is applied to the image sampled from
* the source rectangle, before scaling it to fit the destination rectangle.
*/
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations)
+int drm_plane_create_rotation_property(struct drm_plane *plane,
+ unsigned int rotation,
+ unsigned int supported_rotations)
{
static const struct drm_prop_enum_list props[] = {
{ __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" },
@@ -155,12 +150,28 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
{ __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" },
{ __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" },
};
+ struct drm_property *prop;
+
+ WARN_ON((supported_rotations & DRM_ROTATE_MASK) == 0);
+ WARN_ON(!is_power_of_2(rotation & DRM_ROTATE_MASK));
+ WARN_ON(rotation & ~supported_rotations);
- return drm_property_create_bitmask(dev, 0, "rotation",
+ prop = drm_property_create_bitmask(plane->dev, 0, "rotation",
props, ARRAY_SIZE(props),
supported_rotations);
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&plane->base, prop, rotation);
+
+ if (plane->state)
+ plane->state->rotation = rotation;
+
+ plane->rotation_property = prop;
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
+EXPORT_SYMBOL(drm_plane_create_rotation_property);
/**
* drm_rotation_simplify() - Try to simplify the rotation
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index d28ffdd2b929..6543ebde501a 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -41,6 +41,10 @@
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * linear/pass-thru gamma table should be used. This is generally the
+ * driver boot-up state too.
+ *
* “DEGAMMA_LUT_SIZEâ€:
* Unsinged range property to give the size of the lookup table to be set
* on the DEGAMMA_LUT property (the size depends on the underlying
@@ -54,6 +58,10 @@
* lookup through the gamma LUT. The data is interpreted as a struct
* &drm_color_ctm.
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * unit/pass-thru matrix should be used. This is generally the driver
+ * boot-up state too.
+ *
* “GAMMA_LUTâ€:
* Blob property to set the gamma lookup table (LUT) mapping pixel data
* after the transformation matrix to data sent to the connector. The
@@ -62,6 +70,10 @@
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
*
+ * Setting this to NULL (blob property value set to 0) means a
+ * linear/pass-thru gamma table should be used. This is generally the
+ * driver boot-up state too.
+ *
* “GAMMA_LUT_SIZEâ€:
* Unsigned range property to give the size of the lookup table to be set
* on the GAMMA_LUT property (the size depends on the underlying hardware).
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2db7fb510b6c..5a4526289392 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -588,6 +588,50 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
+/**
+ * DOC: standard connector properties
+ *
+ * DRM connectors have a few standardized properties:
+ *
+ * EDID:
+ * Blob property which contains the current EDID read from the sink. This
+ * is useful to parse sink identification information like vendor, model
+ * and serial. Drivers should update this property by calling
+ * drm_mode_connector_update_edid_property(), usually after having parsed
+ * the EDID using drm_add_edid_modes(). Userspace cannot change this
+ * property.
+ * DPMS:
+ * Legacy property for setting the power state of the connector. For atomic
+ * drivers this is only provided for backwards compatibility with existing
+ * drivers, it remaps to controlling the "ACTIVE" property on the CRTC the
+ * connector is linked to. Drivers should never set this property directly,
+ * it is handled by the DRM core by calling the ->dpms() callback in
+ * &drm_connector_funcs. Atomic drivers should implement this hook using
+ * drm_atomic_helper_connector_dpms(). This is the only property standard
+ * connector property that userspace can change.
+ * PATH:
+ * Connector path property to identify how this sink is physically
+ * connected. Used by DP MST. This should be set by calling
+ * drm_mode_connector_set_path_property(), in the case of DP MST with the
+ * path property the MST manager created. Userspace cannot change this
+ * property.
+ * TILE:
+ * Connector tile group property to indicate how a set of DRM connector
+ * compose together into one logical screen. This is used by both high-res
+ * external screens (often only using a single cable, but exposing multiple
+ * DP MST sinks), or high-res integrated panels (like dual-link DSI) which
+ * are not gen-locked. Note that for tiled panels which are genlocked, like
+ * dual-link LVDS or dual-link DSI, the driver should try to not expose the
+ * tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers
+ * should update this value using drm_mode_connector_set_tile_property().
+ * Userspace cannot change this property.
+ *
+ * Connectors also have one standardized atomic property:
+ *
+ * CRTC_ID:
+ * Mode object ID of the &drm_crtc this connector should be connected to.
+ */
+
int drm_connector_create_standard_properties(struct drm_device *dev)
{
struct drm_property *prop;
@@ -1121,3 +1165,107 @@ out_unlock:
return ret;
}
+
+/**
+ * DOC: Tile group
+ *
+ * Tile groups are used to represent tiled monitors with a unique integer
+ * identifier. Tiled monitors using DisplayID v1.3 have a unique 8-byte handle,
+ * we store this in a tile group, so we have a common identifier for all tiles
+ * in a monitor group. The property is called "TILE". Drivers can manage tile
+ * groups using drm_mode_create_tile_group(), drm_mode_put_tile_group() and
+ * drm_mode_get_tile_group(). But this is only needed for internal panels where
+ * the tile group information is exposed through a non-standard way.
+ */
+
+static void drm_tile_group_free(struct kref *kref)
+{
+ struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
+ struct drm_device *dev = tg->dev;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.tile_idr, tg->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ kfree(tg);
+}
+
+/**
+ * drm_mode_put_tile_group - drop a reference to a tile group.
+ * @dev: DRM device
+ * @tg: tile group to drop reference to.
+ *
+ * drop reference to tile group and free if 0.
+ */
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg)
+{
+ kref_put(&tg->refcount, drm_tile_group_free);
+}
+EXPORT_SYMBOL(drm_mode_put_tile_group);
+
+/**
+ * drm_mode_get_tile_group - get a reference to an existing tile group
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Use the unique bytes to get a reference to an existing tile group.
+ *
+ * RETURNS:
+ * tile group or NULL if not found.
+ */
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int id;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
+ if (!memcmp(tg->group_data, topology, 8)) {
+ if (!kref_get_unless_zero(&tg->refcount))
+ tg = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+ }
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_mode_get_tile_group);
+
+/**
+ * drm_mode_create_tile_group - create a tile group from a displayid description
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Create a tile group for the unique monitor, and get a unique
+ * identifier for the tile group.
+ *
+ * RETURNS:
+ * new tile group or error.
+ */
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int ret;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&tg->refcount);
+ memcpy(tg->group_data, topology, 8);
+ tg->dev = dev;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ tg->id = ret;
+ } else {
+ kfree(tg);
+ tg = ERR_PTR(ret);
+ }
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+}
+EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2d7bedf28647..e75f62cd8a65 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/dma-fence.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -40,23 +41,11 @@
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
-#include <drm/drm_framebuffer.h>
+#include <drm/drm_debugfs_crc.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-/*
- * Global properties
- */
-static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
- { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
- { DRM_PLANE_TYPE_PRIMARY, "Primary" },
- { DRM_PLANE_TYPE_CURSOR, "Cursor" },
-};
-
-/*
- * Optional properties
- */
/**
* drm_crtc_force_disable - Forcibly turn off a CRTC
* @crtc: CRTC to turn off
@@ -102,8 +91,6 @@ out:
}
EXPORT_SYMBOL(drm_crtc_force_disable_all);
-DEFINE_WW_CLASS(crtc_ww_class);
-
static unsigned int drm_num_crtcs(struct drm_device *dev)
{
unsigned int num = 0;
@@ -116,12 +103,16 @@ static unsigned int drm_num_crtcs(struct drm_device *dev)
return num;
}
-static int drm_crtc_register_all(struct drm_device *dev)
+int drm_crtc_register_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret = 0;
drm_for_each_crtc(crtc, dev) {
+ if (drm_debugfs_crtc_add(crtc))
+ DRM_ERROR("Failed to initialize debugfs entry for CRTC '%s'.\n",
+ crtc->name);
+
if (crtc->funcs->late_register)
ret = crtc->funcs->late_register(crtc);
if (ret)
@@ -131,16 +122,84 @@ static int drm_crtc_register_all(struct drm_device *dev)
return 0;
}
-static void drm_crtc_unregister_all(struct drm_device *dev)
+void drm_crtc_unregister_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, dev) {
if (crtc->funcs->early_unregister)
crtc->funcs->early_unregister(crtc);
+ drm_debugfs_crtc_remove(crtc);
}
}
+static int drm_crtc_crc_init(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+ spin_lock_init(&crtc->crc.lock);
+ init_waitqueue_head(&crtc->crc.wq);
+ crtc->crc.source = kstrdup("auto", GFP_KERNEL);
+ if (!crtc->crc.source)
+ return -ENOMEM;
+#endif
+ return 0;
+}
+
+static void drm_crtc_crc_fini(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+ kfree(crtc->crc.source);
+#endif
+}
+
+static const struct dma_fence_ops drm_crtc_fence_ops;
+
+static struct drm_crtc *fence_to_crtc(struct dma_fence *fence)
+{
+ BUG_ON(fence->ops != &drm_crtc_fence_ops);
+ return container_of(fence->lock, struct drm_crtc, fence_lock);
+}
+
+static const char *drm_crtc_fence_get_driver_name(struct dma_fence *fence)
+{
+ struct drm_crtc *crtc = fence_to_crtc(fence);
+
+ return crtc->dev->driver->name;
+}
+
+static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct drm_crtc *crtc = fence_to_crtc(fence);
+
+ return crtc->timeline_name;
+}
+
+static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static const struct dma_fence_ops drm_crtc_fence_ops = {
+ .get_driver_name = drm_crtc_fence_get_driver_name,
+ .get_timeline_name = drm_crtc_fence_get_timeline_name,
+ .enable_signaling = drm_crtc_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+};
+
+struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ dma_fence_init(fence, &drm_crtc_fence_ops, &crtc->fence_lock,
+ crtc->fence_context, ++crtc->fence_seqno);
+
+ return fence;
+}
+
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@@ -198,6 +257,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return -ENOMEM;
}
+ crtc->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&crtc->fence_lock);
+ snprintf(crtc->timeline_name, sizeof(crtc->timeline_name),
+ "CRTC:%d-%s", crtc->base.id, crtc->name);
+
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list);
@@ -205,14 +269,22 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->primary = primary;
crtc->cursor = cursor;
- if (primary)
+ if (primary && !primary->possible_crtcs)
primary->possible_crtcs = 1 << drm_crtc_index(crtc);
- if (cursor)
+ if (cursor && !cursor->possible_crtcs)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+ ret = drm_crtc_crc_init(crtc);
+ if (ret) {
+ drm_mode_object_unregister(dev, &crtc->base);
+ return ret;
+ }
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&crtc->base, config->prop_active, 0);
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
+ drm_object_attach_property(&crtc->base,
+ config->prop_out_fence_ptr, 0);
}
return 0;
@@ -236,6 +308,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
* the indices on the drm_crtc after us in the crtc_list.
*/
+ drm_crtc_crc_fini(crtc);
+
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
@@ -255,301 +329,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_cleanup);
-int drm_modeset_register_all(struct drm_device *dev)
-{
- int ret;
-
- ret = drm_plane_register_all(dev);
- if (ret)
- goto err_plane;
-
- ret = drm_crtc_register_all(dev);
- if (ret)
- goto err_crtc;
-
- ret = drm_encoder_register_all(dev);
- if (ret)
- goto err_encoder;
-
- ret = drm_connector_register_all(dev);
- if (ret)
- goto err_connector;
-
- return 0;
-
-err_connector:
- drm_encoder_unregister_all(dev);
-err_encoder:
- drm_crtc_unregister_all(dev);
-err_crtc:
- drm_plane_unregister_all(dev);
-err_plane:
- return ret;
-}
-
-void drm_modeset_unregister_all(struct drm_device *dev)
-{
- drm_connector_unregister_all(dev);
- drm_encoder_unregister_all(dev);
- drm_crtc_unregister_all(dev);
- drm_plane_unregister_all(dev);
-}
-
-static int drm_mode_create_standard_properties(struct drm_device *dev)
-{
- struct drm_property *prop;
- int ret;
-
- ret = drm_connector_create_standard_properties(dev);
- if (ret)
- return ret;
-
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
- "type", drm_plane_type_enum_list,
- ARRAY_SIZE(drm_plane_type_enum_list));
- if (!prop)
- return -ENOMEM;
- dev->mode_config.plane_type_property = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_X", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_x = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_Y", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_y = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_W", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_w = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "SRC_H", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_src_h = prop;
-
- prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_X", INT_MIN, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_x = prop;
-
- prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_Y", INT_MIN, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_y = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_W", 0, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_w = prop;
-
- prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_H", 0, INT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_h = prop;
-
- prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
- "FB_ID", DRM_MODE_OBJECT_FB);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_fb_id = prop;
-
- prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
- "CRTC_ID", DRM_MODE_OBJECT_CRTC);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_crtc_id = prop;
-
- prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
- "ACTIVE");
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_active = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
- "MODE_ID", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.prop_mode_id = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "DEGAMMA_LUT", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.degamma_lut_property = prop;
-
- prop = drm_property_create_range(dev,
- DRM_MODE_PROP_IMMUTABLE,
- "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.degamma_lut_size_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "CTM", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.ctm_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB,
- "GAMMA_LUT", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.gamma_lut_property = prop;
-
- prop = drm_property_create_range(dev,
- DRM_MODE_PROP_IMMUTABLE,
- "GAMMA_LUT_SIZE", 0, UINT_MAX);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.gamma_lut_size_property = prop;
-
- return 0;
-}
-
-/**
- * drm_mode_getresources - get graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a set of configuration description structures and return
- * them to the user, including CRTC, connector and framebuffer configuration.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getresources(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_card_res *card_res = data;
- struct list_head *lh;
- struct drm_framebuffer *fb;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int ret = 0;
- int connector_count = 0;
- int crtc_count = 0;
- int fb_count = 0;
- int encoder_count = 0;
- int copied = 0;
- uint32_t __user *fb_id;
- uint32_t __user *crtc_id;
- uint32_t __user *connector_id;
- uint32_t __user *encoder_id;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
-
- mutex_lock(&file_priv->fbs_lock);
- /*
- * For the non-control nodes we need to limit the list of resources
- * by IDs in the group list for this node
- */
- list_for_each(lh, &file_priv->fbs)
- fb_count++;
-
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, filp_head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- mutex_unlock(&file_priv->fbs_lock);
- return -EFAULT;
- }
- copied++;
- }
- }
- card_res->count_fbs = fb_count;
- mutex_unlock(&file_priv->fbs_lock);
-
- /* mode_config.mutex protects the connector list against e.g. DP MST
- * connector hot-adding. CRTC/Plane lists are invariant. */
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_crtc(crtc, dev)
- crtc_count++;
-
- drm_for_each_connector(connector, dev)
- connector_count++;
-
- drm_for_each_encoder(encoder, dev)
- encoder_count++;
-
- card_res->max_height = dev->mode_config.max_height;
- card_res->min_height = dev->mode_config.min_height;
- card_res->max_width = dev->mode_config.max_width;
- card_res->min_width = dev->mode_config.min_width;
-
- /* CRTCs */
- if (card_res->count_crtcs >= crtc_count) {
- copied = 0;
- crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- drm_for_each_crtc(crtc, dev) {
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_crtcs = crtc_count;
-
- /* Encoders */
- if (card_res->count_encoders >= encoder_count) {
- copied = 0;
- encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- drm_for_each_encoder(encoder, dev) {
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_encoders = encoder_count;
-
- /* Connectors */
- if (card_res->count_connectors >= connector_count) {
- copied = 0;
- connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- drm_for_each_connector(connector, dev) {
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- card_res->count_connectors = connector_count;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-}
-
/**
* drm_mode_getcrtc - get CRTC configuration
* @dev: drm device for the ioctl
@@ -695,8 +474,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->state &&
- crtc->primary->state->rotation & (DRM_ROTATE_90 |
- DRM_ROTATE_270))
+ drm_rotation_90_or_270(crtc->primary->state->rotation))
swap(hdisplay, vdisplay);
return drm_framebuffer_check_src_coords(x << 16, y << 16,
@@ -796,9 +574,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
ret = drm_plane_check_pixel_format(crtc->primary,
fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(fb->pixel_format);
- DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format,
+ &format_name));
goto out;
}
}
@@ -902,362 +681,3 @@ int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
-
-/**
- * drm_mode_config_reset - call ->reset callbacks
- * @dev: drm device
- *
- * This functions calls all the crtc's, encoder's and connector's ->reset
- * callback. Drivers can use this in e.g. their driver load or resume code to
- * reset hardware and software state.
- */
-void drm_mode_config_reset(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
- struct drm_plane *plane;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
-
- drm_for_each_plane(plane, dev)
- if (plane->funcs->reset)
- plane->funcs->reset(plane);
-
- drm_for_each_crtc(crtc, dev)
- if (crtc->funcs->reset)
- crtc->funcs->reset(crtc);
-
- drm_for_each_encoder(encoder, dev)
- if (encoder->funcs->reset)
- encoder->funcs->reset(encoder);
-
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_connector(connector, dev)
- if (connector->funcs->reset)
- connector->funcs->reset(connector);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_mode_config_reset);
-
-/**
- * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This creates a new dumb buffer in the driver's backing storage manager (GEM,
- * TTM or something else entirely) and returns the resulting buffer handle. This
- * handle can then be wrapped up into a framebuffer modeset object.
- *
- * Note that userspace is not allowed to use such objects for render
- * acceleration - drivers must create their own private ioctls for such a use
- * case.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_create_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_create_dumb *args = data;
- u32 cpp, stride, size;
-
- if (!dev->driver->dumb_create)
- return -ENOSYS;
- if (!args->width || !args->height || !args->bpp)
- return -EINVAL;
-
- /* overflow checks for 32bit size calculations */
- /* NOTE: DIV_ROUND_UP() can overflow */
- cpp = DIV_ROUND_UP(args->bpp, 8);
- if (!cpp || cpp > 0xffffffffU / args->width)
- return -EINVAL;
- stride = cpp * args->width;
- if (args->height > 0xffffffffU / stride)
- return -EINVAL;
-
- /* test for wrap-around */
- size = args->height * stride;
- if (PAGE_ALIGN(size) == 0)
- return -EINVAL;
-
- /*
- * handle, pitch and size are output parameters. Zero them out to
- * prevent drivers from accidentally using uninitialized data. Since
- * not all existing userspace is clearing these fields properly we
- * cannot reject IOCTL with garbage in them.
- */
- args->handle = 0;
- args->pitch = 0;
- args->size = 0;
-
- return dev->driver->dumb_create(file_priv, dev, args);
-}
-
-/**
- * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Allocate an offset in the drm device node's address space to be able to
- * memory map a dumb buffer.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_map_dumb *args = data;
-
- /* call driver ioctl to get mmap offset */
- if (!dev->driver->dumb_map_offset)
- return -ENOSYS;
-
- return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
-}
-
-/**
- * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This destroys the userspace handle for the given dumb backing storage buffer.
- * Since buffer objects must be reference counted in the kernel a buffer object
- * won't be immediately freed if a framebuffer modeset object still uses it.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_destroy_dumb *args = data;
-
- if (!dev->driver->dumb_destroy)
- return -ENOSYS;
-
- return dev->driver->dumb_destroy(file_priv, dev, args->handle);
-}
-
-/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
- * @dev: DRM device
- *
- * Initialize @dev's mode_config structure, used for tracking the graphics
- * configuration of @dev.
- *
- * Since this initializes the modeset locks, no locking is possible. Which is no
- * problem, since this should happen single threaded at init time. It is the
- * driver's problem to ensure this guarantee.
- *
- */
-void drm_mode_config_init(struct drm_device *dev)
-{
- mutex_init(&dev->mode_config.mutex);
- drm_modeset_lock_init(&dev->mode_config.connection_mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- mutex_init(&dev->mode_config.fb_lock);
- mutex_init(&dev->mode_config.blob_lock);
- INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.crtc_list);
- INIT_LIST_HEAD(&dev->mode_config.connector_list);
- INIT_LIST_HEAD(&dev->mode_config.encoder_list);
- INIT_LIST_HEAD(&dev->mode_config.property_list);
- INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
- INIT_LIST_HEAD(&dev->mode_config.plane_list);
- idr_init(&dev->mode_config.crtc_idr);
- idr_init(&dev->mode_config.tile_idr);
- ida_init(&dev->mode_config.connector_ida);
-
- drm_modeset_lock_all(dev);
- drm_mode_create_standard_properties(dev);
- drm_modeset_unlock_all(dev);
-
- /* Just to be sure */
- dev->mode_config.num_fb = 0;
- dev->mode_config.num_connector = 0;
- dev->mode_config.num_crtc = 0;
- dev->mode_config.num_encoder = 0;
- dev->mode_config.num_overlay_plane = 0;
- dev->mode_config.num_total_plane = 0;
-}
-EXPORT_SYMBOL(drm_mode_config_init);
-
-/**
- * drm_mode_config_cleanup - free up DRM mode_config info
- * @dev: DRM device
- *
- * Free up all the connectors and CRTCs associated with this DRM device, then
- * free up the framebuffers and associated buffer objects.
- *
- * Note that since this /should/ happen single-threaded at driver/device
- * teardown time, no locking is required. It's the driver's job to ensure that
- * this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
- */
-void drm_mode_config_cleanup(struct drm_device *dev)
-{
- struct drm_connector *connector, *ot;
- struct drm_crtc *crtc, *ct;
- struct drm_encoder *encoder, *enct;
- struct drm_framebuffer *fb, *fbt;
- struct drm_property *property, *pt;
- struct drm_property_blob *blob, *bt;
- struct drm_plane *plane, *plt;
-
- list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
- head) {
- encoder->funcs->destroy(encoder);
- }
-
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
- }
-
- list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
- head) {
- drm_property_destroy(dev, property);
- }
-
- list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
- head) {
- plane->funcs->destroy(plane);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
-
- list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
- head_global) {
- drm_property_unreference_blob(blob);
- }
-
- /*
- * Single-threaded teardown context, so it's not required to grab the
- * fb_lock to protect against concurrent fb_list access. Contrary, it
- * would actually deadlock with the drm_framebuffer_cleanup function.
- *
- * Also, if there are any framebuffers left, that's a driver leak now,
- * so politely WARN about this.
- */
- WARN_ON(!list_empty(&dev->mode_config.fb_list));
- list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- drm_framebuffer_free(&fb->base.refcount);
- }
-
- ida_destroy(&dev->mode_config.connector_ida);
- idr_destroy(&dev->mode_config.tile_idr);
- idr_destroy(&dev->mode_config.crtc_idr);
- drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
-}
-EXPORT_SYMBOL(drm_mode_config_cleanup);
-
-/**
- * DOC: Tile group
- *
- * Tile groups are used to represent tiled monitors with a unique
- * integer identifier. Tiled monitors using DisplayID v1.3 have
- * a unique 8-byte handle, we store this in a tile group, so we
- * have a common identifier for all tiles in a monitor group.
- */
-static void drm_tile_group_free(struct kref *kref)
-{
- struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
- struct drm_device *dev = tg->dev;
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.tile_idr, tg->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
- kfree(tg);
-}
-
-/**
- * drm_mode_put_tile_group - drop a reference to a tile group.
- * @dev: DRM device
- * @tg: tile group to drop reference to.
- *
- * drop reference to tile group and free if 0.
- */
-void drm_mode_put_tile_group(struct drm_device *dev,
- struct drm_tile_group *tg)
-{
- kref_put(&tg->refcount, drm_tile_group_free);
-}
-
-/**
- * drm_mode_get_tile_group - get a reference to an existing tile group
- * @dev: DRM device
- * @topology: 8-bytes unique per monitor.
- *
- * Use the unique bytes to get a reference to an existing tile group.
- *
- * RETURNS:
- * tile group or NULL if not found.
- */
-struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8])
-{
- struct drm_tile_group *tg;
- int id;
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
- if (!memcmp(tg->group_data, topology, 8)) {
- if (!kref_get_unless_zero(&tg->refcount))
- tg = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
- return tg;
- }
- }
- mutex_unlock(&dev->mode_config.idr_mutex);
- return NULL;
-}
-EXPORT_SYMBOL(drm_mode_get_tile_group);
-
-/**
- * drm_mode_create_tile_group - create a tile group from a displayid description
- * @dev: DRM device
- * @topology: 8-bytes unique per monitor.
- *
- * Create a tile group for the unique monitor, and get a unique
- * identifier for the tile group.
- *
- * RETURNS:
- * new tile group or error.
- */
-struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8])
-{
- struct drm_tile_group *tg;
- int ret;
-
- tg = kzalloc(sizeof(*tg), GFP_KERNEL);
- if (!tg)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&tg->refcount);
- memcpy(tg->group_data, topology, 8);
- tg->dev = dev;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
- if (ret >= 0) {
- tg->id = ret;
- } else {
- kfree(tg);
- tg = ERR_PTR(ret);
- }
-
- mutex_unlock(&dev->mode_config.idr_mutex);
- return tg;
-}
-EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c48ba02c5365..cdf6860c9d22 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -40,10 +40,29 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb);
+int drm_crtc_register_all(struct drm_device *dev);
+void drm_crtc_unregister_all(struct drm_device *dev);
-void drm_fb_release(struct drm_file *file_priv);
+struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc);
+
+/* IOCTLs */
+int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+
+/* drm_mode_config.c */
+int drm_modeset_register_all(struct drm_device *dev);
+void drm_modeset_unregister_all(struct drm_device *dev);
+
+/* IOCTLs */
+int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
-/* dumb buffer support IOCTLs */
+
+/* drm_dumb_buffers.c */
+/* IOCTLs */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
@@ -51,14 +70,6 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-/* IOCTLs */
-int drm_mode_getresources(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_getcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_setcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-
/* drm_color_mgmt.c */
/* IOCTLs */
@@ -147,6 +158,8 @@ void drm_framebuffer_free(struct kref *kref);
int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
const struct drm_framebuffer *fb);
+void drm_fb_release(struct drm_file *file_priv);
+
/* IOCTL */
int drm_mode_addfb(struct drm_device *dev,
@@ -166,9 +179,6 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_modeset_register_all(struct drm_device *dev);
-void drm_modeset_unregister_all(struct drm_device *dev);
-
/* drm_plane.c */
int drm_plane_register_all(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 1205790ed960..2e3e46a53805 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -36,6 +36,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
+#include <drm/drm_atomic.h>
#include "drm_internal.h"
#if defined(CONFIG_DEBUG_FS)
@@ -163,6 +164,14 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
return ret;
}
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ ret = drm_atomic_debugfs_init(minor);
+ if (ret) {
+ DRM_ERROR("Failed to create atomic debugfs files\n");
+ return ret;
+ }
+ }
+
if (dev->driver->debugfs_init) {
ret = dev->driver->debugfs_init(minor);
if (ret) {
@@ -219,6 +228,7 @@ EXPORT_SYMBOL(drm_debugfs_remove_files);
int drm_debugfs_cleanup(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
+ int ret;
if (!minor->debugfs_root)
return 0;
@@ -226,6 +236,14 @@ int drm_debugfs_cleanup(struct drm_minor *minor)
if (dev->driver->debugfs_cleanup)
dev->driver->debugfs_cleanup(minor);
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ ret = drm_atomic_debugfs_cleanup(minor);
+ if (ret) {
+ DRM_ERROR("DRM: Failed to remove atomic debugfs entries\n");
+ return ret;
+ }
+ }
+
drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
debugfs_remove(minor->debugfs_root);
@@ -415,5 +433,37 @@ void drm_debugfs_connector_remove(struct drm_connector *connector)
connector->debugfs_entry = NULL;
}
-#endif /* CONFIG_DEBUG_FS */
+int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+ struct drm_minor *minor = crtc->dev->primary;
+ struct dentry *root;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index);
+ if (!name)
+ return -ENOMEM;
+ root = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+ if (!root)
+ return -ENOMEM;
+
+ crtc->debugfs_entry = root;
+
+ if (drm_debugfs_crtc_crc_add(crtc))
+ goto error;
+
+ return 0;
+
+error:
+ drm_debugfs_crtc_remove(crtc);
+ return -ENOMEM;
+}
+
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+ debugfs_remove_recursive(crtc->debugfs_entry);
+ crtc->debugfs_entry = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
new file mode 100644
index 000000000000..00e771fb7df2
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * Copyright © 2016 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Based on code from the i915 driver.
+ * Original author: Damien Lespiau <damien.lespiau@intel.com>
+ *
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <drm/drmP.h>
+#include "drm_internal.h"
+
+/**
+ * DOC: CRC ABI
+ *
+ * DRM device drivers can provide to userspace CRC information of each frame as
+ * it reached a given hardware component (a "source").
+ *
+ * Userspace can control generation of CRCs in a given CRTC by writing to the
+ * file dri/0/crtc-N/crc/control in debugfs, with N being the index of the CRTC.
+ * Accepted values are source names (which are driver-specific) and the "auto"
+ * keyword, which will let the driver select a default source of frame CRCs
+ * for this CRTC.
+ *
+ * Once frame CRC generation is enabled, userspace can capture them by reading
+ * the dri/0/crtc-N/crc/data file. Each line in that file contains the frame
+ * number in the first field and then a number of unsigned integer fields
+ * containing the CRC data. Fields are separated by a single space and the number
+ * of CRC fields is source-specific.
+ *
+ * Note that though in some cases the CRC is computed in a specified way and on
+ * the frame contents as supplied by userspace (eDP 1.3), in general the CRC
+ * computation is performed in an unspecified way and on frame contents that have
+ * been already processed in also an unspecified way and thus userspace cannot
+ * rely on being able to generate matching CRC values for the frame contents that
+ * it submits. In this general case, the maximum userspace can do is to compare
+ * the reported CRCs of frames that should have the same contents.
+ */
+
+static int crc_control_show(struct seq_file *m, void *data)
+{
+ struct drm_crtc *crtc = m->private;
+
+ seq_printf(m, "%s\n", crtc->crc.source);
+
+ return 0;
+}
+
+static int crc_control_open(struct inode *inode, struct file *file)
+{
+ struct drm_crtc *crtc = inode->i_private;
+
+ return single_open(file, crc_control_show, crtc);
+}
+
+static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_crtc *crtc = m->private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ char *source;
+
+ if (len == 0)
+ return 0;
+
+ if (len > PAGE_SIZE - 1) {
+ DRM_DEBUG_KMS("Expected < %lu bytes into crtc crc control\n",
+ PAGE_SIZE);
+ return -E2BIG;
+ }
+
+ source = memdup_user_nul(ubuf, len);
+ if (IS_ERR(source))
+ return PTR_ERR(source);
+
+ if (source[len] == '\n')
+ source[len] = '\0';
+
+ spin_lock_irq(&crc->lock);
+
+ if (crc->opened) {
+ spin_unlock_irq(&crc->lock);
+ kfree(source);
+ return -EBUSY;
+ }
+
+ kfree(crc->source);
+ crc->source = source;
+
+ spin_unlock_irq(&crc->lock);
+
+ *offp += len;
+ return len;
+}
+
+static const struct file_operations drm_crtc_crc_control_fops = {
+ .owner = THIS_MODULE,
+ .open = crc_control_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = crc_control_write
+};
+
+static int crtc_crc_open(struct inode *inode, struct file *filep)
+{
+ struct drm_crtc *crtc = inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entries = NULL;
+ size_t values_cnt;
+ int ret;
+
+ if (crc->opened)
+ return -EBUSY;
+
+ ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
+ ret = -EINVAL;
+ goto err_disable;
+ }
+
+ if (WARN_ON(values_cnt == 0)) {
+ ret = -EINVAL;
+ goto err_disable;
+ }
+
+ entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
+ if (!entries) {
+ ret = -ENOMEM;
+ goto err_disable;
+ }
+
+ spin_lock_irq(&crc->lock);
+ crc->entries = entries;
+ crc->values_cnt = values_cnt;
+ crc->opened = true;
+ spin_unlock_irq(&crc->lock);
+
+ return 0;
+
+err_disable:
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+ return ret;
+}
+
+static int crtc_crc_release(struct inode *inode, struct file *filep)
+{
+ struct drm_crtc *crtc = filep->f_inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ size_t values_cnt;
+
+ spin_lock_irq(&crc->lock);
+ kfree(crc->entries);
+ crc->entries = NULL;
+ crc->head = 0;
+ crc->tail = 0;
+ crc->values_cnt = 0;
+ crc->opened = false;
+ spin_unlock_irq(&crc->lock);
+
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+
+ return 0;
+}
+
+static int crtc_crc_data_count(struct drm_crtc_crc *crc)
+{
+ assert_spin_locked(&crc->lock);
+ return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
+}
+
+/*
+ * 1 frame field of 10 chars plus a number of CRC fields of 10 chars each, space
+ * separated, with a newline at the end and null-terminated.
+ */
+#define LINE_LEN(values_cnt) (10 + 11 * values_cnt + 1 + 1)
+#define MAX_LINE_LEN (LINE_LEN(DRM_MAX_CRC_NR))
+
+static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
+ size_t count, loff_t *pos)
+{
+ struct drm_crtc *crtc = filep->f_inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entry;
+ char buf[MAX_LINE_LEN];
+ int ret, i;
+
+ spin_lock_irq(&crc->lock);
+
+ if (!crc->source) {
+ spin_unlock_irq(&crc->lock);
+ return 0;
+ }
+
+ /* Nothing to read? */
+ while (crtc_crc_data_count(crc) == 0) {
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock_irq(&crc->lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_lock_irq(crc->wq,
+ crtc_crc_data_count(crc),
+ crc->lock);
+ if (ret) {
+ spin_unlock_irq(&crc->lock);
+ return ret;
+ }
+ }
+
+ /* We know we have an entry to be read */
+ entry = &crc->entries[crc->tail];
+
+ if (count < LINE_LEN(crc->values_cnt)) {
+ spin_unlock_irq(&crc->lock);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(DRM_CRC_ENTRIES_NR);
+ crc->tail = (crc->tail + 1) & (DRM_CRC_ENTRIES_NR - 1);
+
+ spin_unlock_irq(&crc->lock);
+
+ if (entry->has_frame_counter)
+ sprintf(buf, "0x%08x", entry->frame);
+ else
+ sprintf(buf, "XXXXXXXXXX");
+
+ for (i = 0; i < crc->values_cnt; i++)
+ sprintf(buf + 10 + i * 11, " 0x%08x", entry->crcs[i]);
+ sprintf(buf + 10 + crc->values_cnt * 11, "\n");
+
+ if (copy_to_user(user_buf, buf, LINE_LEN(crc->values_cnt)))
+ return -EFAULT;
+
+ return LINE_LEN(crc->values_cnt);
+}
+
+static const struct file_operations drm_crtc_crc_data_fops = {
+ .owner = THIS_MODULE,
+ .open = crtc_crc_open,
+ .read = crtc_crc_read,
+ .release = crtc_crc_release,
+};
+
+/**
+ * drm_debugfs_crtc_crc_add - Add files to debugfs for capture of frame CRCs
+ * @crtc: CRTC to whom the frames will belong
+ *
+ * Adds files to debugfs directory that allows userspace to control the
+ * generation of frame CRCs and to read them.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+ struct dentry *crc_ent, *ent;
+
+ if (!crtc->funcs->set_crc_source)
+ return 0;
+
+ crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
+ if (!crc_ent)
+ return -ENOMEM;
+
+ ent = debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_control_fops);
+ if (!ent)
+ goto error;
+
+ ent = debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_data_fops);
+ if (!ent)
+ goto error;
+
+ return 0;
+
+error:
+ debugfs_remove_recursive(crc_ent);
+
+ return -ENOMEM;
+}
+
+/**
+ * drm_crtc_add_crc_entry - Add entry with CRC information for a frame
+ * @crtc: CRTC to which the frame belongs
+ * @has_frame: whether this entry has a frame number to go with
+ * @frame: number of the frame these CRCs are about
+ * @crcs: array of CRC values, with length matching #drm_crtc_crc.values_cnt
+ *
+ * For each frame, the driver polls the source of CRCs for new data and calls
+ * this function to add them to the buffer from where userspace reads.
+ */
+int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs)
+{
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entry;
+ int head, tail;
+
+ assert_spin_locked(&crc->lock);
+
+ /* Caller may not have noticed yet that userspace has stopped reading */
+ if (!crc->opened)
+ return -EINVAL;
+
+ head = crc->head;
+ tail = crc->tail;
+
+ if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+ DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+ return -ENOBUFS;
+ }
+
+ entry = &crc->entries[head];
+ entry->frame = frame;
+ entry->has_frame_counter = has_frame;
+ memcpy(&entry->crcs, crcs, sizeof(*crcs) * crc->values_cnt);
+
+ head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
+ crc->head = head;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_crtc_add_crc_entry);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index a7b2a751f6fe..e02563966271 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -142,12 +142,25 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
sizeof(dp_dual_mode_hdmi_id)) == 0;
}
+static bool is_type1_adaptor(uint8_t adaptor_id)
+{
+ return adaptor_id == 0 || adaptor_id == 0xff;
+}
+
static bool is_type2_adaptor(uint8_t adaptor_id)
{
return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_REV_TYPE2);
}
+static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
+ const uint8_t adaptor_id)
+{
+ return is_hdmi_adaptor(hdmi_id) &&
+ (adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
+ DP_DUAL_MODE_TYPE_HAS_DPCD));
+}
+
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
* @adapter: I2C adapter for the DDC bus
@@ -185,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
hdmi_id, sizeof(hdmi_id));
+ DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n",
+ ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
@@ -202,13 +217,26 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
+ DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n",
+ adaptor_id, ret);
if (ret == 0) {
+ if (is_lspcon_adaptor(hdmi_id, adaptor_id))
+ return DRM_DP_DUAL_MODE_LSPCON;
if (is_type2_adaptor(adaptor_id)) {
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
+ /*
+ * If neither a proper type 1 ID nor a broken type 1 adaptor
+ * as described above, assume type 1, but let the user know
+ * that we may have misdetected the type.
+ */
+ if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+ DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n",
+ adaptor_id);
+
}
if (is_hdmi_adaptor(hdmi_id))
@@ -364,3 +392,96 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
}
}
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
+
+/**
+ * drm_lspcon_get_mode: Get LSPCON's current mode of operation by
+ * reading offset (0x80, 0x41)
+ * @adapter: I2C-over-aux adapter
+ * @mode: current lspcon mode of operation output variable
+ *
+ * Returns:
+ * 0 on success, sets the current_mode value to appropriate mode
+ * -error on failure
+ */
+int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode *mode)
+{
+ u8 data;
+ int ret = 0;
+
+ if (!mode) {
+ DRM_ERROR("NULL input\n");
+ return -EINVAL;
+ }
+
+ /* Read Status: i2c over aux */
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
+ &data, sizeof(data));
+ if (ret < 0) {
+ DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
+ return -EFAULT;
+ }
+
+ if (data & DP_DUAL_MODE_LSPCON_MODE_PCON)
+ *mode = DRM_LSPCON_MODE_PCON;
+ else
+ *mode = DRM_LSPCON_MODE_LS;
+ return 0;
+}
+EXPORT_SYMBOL(drm_lspcon_get_mode);
+
+/**
+ * drm_lspcon_set_mode: Change LSPCON's mode of operation by
+ * writing offset (0x80, 0x40)
+ * @adapter: I2C-over-aux adapter
+ * @mode: required mode of operation
+ *
+ * Returns:
+ * 0 on success, -error on failure/timeout
+ */
+int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode mode)
+{
+ u8 data = 0;
+ int ret;
+ int time_out = 200;
+ enum drm_lspcon_mode current_mode;
+
+ if (mode == DRM_LSPCON_MODE_PCON)
+ data = DP_DUAL_MODE_LSPCON_MODE_PCON;
+
+ /* Change mode */
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
+ &data, sizeof(data));
+ if (ret < 0) {
+ DRM_ERROR("LSPCON mode change failed\n");
+ return ret;
+ }
+
+ /*
+ * Confirm mode change by reading the status bit.
+ * Sometimes, it takes a while to change the mode,
+ * so wait and retry until time out or done.
+ */
+ do {
+ ret = drm_lspcon_get_mode(adapter, &current_mode);
+ if (ret) {
+ DRM_ERROR("can't confirm LSPCON mode change\n");
+ return ret;
+ } else {
+ if (current_mode != mode) {
+ msleep(10);
+ time_out -= 10;
+ } else {
+ DRM_DEBUG_KMS("LSPCON mode changed to %s\n",
+ mode == DRM_LSPCON_MODE_LS ?
+ "LS" : "PCON");
+ return 0;
+ }
+ }
+ } while (time_out);
+
+ DRM_ERROR("LSPCON mode change timed out\n");
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(drm_lspcon_set_mode);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 04e457117980..aa644487749c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref)
/* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
}
kfree(port);
}
@@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_put_port(port);
goto out;
}
- if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector);
}
@@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
if (!port->input && port->vcpi.vcpi > 0) {
drm_dp_mst_reset_vcpi_slots(mgr, port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6efdba4993fc..a525751b4559 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -32,7 +32,10 @@
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/slab.h>
+
+#include <drm/drm_drv.h>
#include <drm/drmP.h>
+
#include "drm_crtc_internal.h"
#include "drm_legacy.h"
#include "drm_internal.h"
@@ -257,10 +260,7 @@ static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
drm_debugfs_cleanup(minor);
}
-/**
- * drm_minor_acquire - Acquire a DRM minor
- * @minor_id: Minor ID of the DRM-minor
- *
+/*
* Looks up the given minor-ID and returns the respective DRM-minor object. The
* refence-count of the underlying device is increased so you must release this
* object with drm_minor_release().
@@ -268,10 +268,6 @@ static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
* As long as you hold this minor, it is guaranteed that the object and the
* minor->dev pointer will stay valid! However, the device may get unplugged and
* unregistered while you hold the minor.
- *
- * Returns:
- * Pointer to minor-object with increased device-refcount, or PTR_ERR on
- * failure.
*/
struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
@@ -294,12 +290,6 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
return minor;
}
-/**
- * drm_minor_release - Release DRM minor
- * @minor: Pointer to DRM minor object
- *
- * Release a minor that was previously acquired via drm_minor_acquire().
- */
void drm_minor_release(struct drm_minor *minor)
{
drm_dev_unref(minor->dev);
@@ -313,9 +303,10 @@ void drm_minor_release(struct drm_minor *minor)
* callbacks implemented by the driver. The driver then needs to initialize all
* the various subsystems for the drm device like memory management, vblank
* handling, modesetting support and intial output configuration plus obviously
- * initialize all the corresponding hardware bits. Finally when everything is up
- * and running and ready for userspace the device instance can be published
- * using drm_dev_register().
+ * initialize all the corresponding hardware bits. An important part of this is
+ * also calling drm_dev_set_unique() to set the userspace-visible unique name of
+ * this device instance. Finally when everything is up and running and ready for
+ * userspace the device instance can be published using drm_dev_register().
*
* There is also deprecated support for initalizing device instances using
* bus-specific helpers and the ->load() callback. But due to
@@ -337,17 +328,6 @@ void drm_minor_release(struct drm_minor *minor)
* dev_priv field of &drm_device.
*/
-static int drm_dev_set_unique(struct drm_device *dev, const char *name)
-{
- if (!name)
- return -EINVAL;
-
- kfree(dev->unique);
- dev->unique = kstrdup(name, GFP_KERNEL);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-
/**
* drm_put_dev - Unregister and release a DRM device
* @dev: DRM device
@@ -517,12 +497,6 @@ int drm_dev_init(struct drm_device *dev,
goto err_free;
}
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
- if (ret)
- goto err_minors;
- }
-
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret)
@@ -568,6 +542,9 @@ err_minors:
drm_fs_inode_free(dev->anon_inode);
err_free:
mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
@@ -630,6 +607,9 @@ static void drm_dev_release(struct kref *ref)
drm_minor_free(dev, DRM_MINOR_CONTROL);
mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
kfree(dev->unique);
kfree(dev);
}
@@ -667,6 +647,62 @@ void drm_dev_unref(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unref);
+static int create_compat_control_link(struct drm_device *dev)
+{
+ struct drm_minor *minor;
+ char *name;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
+ minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
+ if (!minor)
+ return 0;
+
+ /*
+ * Some existing userspace out there uses the existing of the controlD*
+ * sysfs files to figure out whether it's a modeset driver. It only does
+ * readdir, hence a symlink is sufficient (and the least confusing
+ * option). Otherwise controlD* is entirely unused.
+ *
+ * Old controlD chardev have been allocated in the range
+ * 64-127.
+ */
+ name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
+ if (!name)
+ return -ENOMEM;
+
+ ret = sysfs_create_link(minor->kdev->kobj.parent,
+ &minor->kdev->kobj,
+ name);
+
+ kfree(name);
+
+ return ret;
+}
+
+static void remove_compat_control_link(struct drm_device *dev)
+{
+ struct drm_minor *minor;
+ char *name;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
+ if (!minor)
+ return;
+
+ name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
+ if (!name)
+ return;
+
+ sysfs_remove_link(minor->kdev->kobj.parent, name);
+
+ kfree(name);
+}
+
/**
* drm_dev_register - Register DRM device
* @dev: Device to register
@@ -705,6 +741,10 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_minors;
+ ret = create_compat_control_link(dev);
+ if (ret)
+ goto err_minors;
+
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
@@ -718,6 +758,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto out_unlock;
err_minors:
+ remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
@@ -758,12 +799,33 @@ void drm_dev_unregister(struct drm_device *dev)
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_legacy_rmmap(dev, r_list->map);
+ remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
}
EXPORT_SYMBOL(drm_dev_unregister);
+/**
+ * drm_dev_set_unique - Set the unique name of a DRM device
+ * @dev: device of which to set the unique name
+ * @name: unique name
+ *
+ * Sets the unique name of a DRM device using the specified string. Drivers
+ * can use this at driver probe time if the unique name of the devices they
+ * drive is static.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_dev_set_unique(struct drm_device *dev, const char *name)
+{
+ kfree(dev->unique);
+ dev->unique = kstrdup(name, GFP_KERNEL);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(drm_dev_set_unique);
+
/*
* DRM Core
* The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
new file mode 100644
index 000000000000..8ac5a1c1d811
--- /dev/null
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * The KMS API doesn't standardize backing storage object creation and leaves it
+ * to driver-specific ioctls. Furthermore actually creating a buffer object even
+ * for GEM-based drivers is done through a driver-specific ioctl - GEM only has
+ * a common userspace interface for sharing and destroying objects. While not an
+ * issue for full-fledged graphics stacks that include device-specific userspace
+ * components (in libdrm for instance), this limit makes DRM-based early boot
+ * graphics unnecessarily complex.
+ *
+ * Dumb objects partly alleviate the problem by providing a standard API to
+ * create dumb buffers suitable for scanout, which can then be used to create
+ * KMS frame buffers.
+ *
+ * To support dumb objects drivers must implement the dumb_create,
+ * dumb_destroy and dumb_map_offset operations from struct &drm_driver. See
+ * there for further details.
+ *
+ * Note that dumb objects may not be used for gpu acceleration, as has been
+ * attempted on some ARM embedded platforms. Such drivers really must have
+ * a hardware-specific ioctl to allocate suitable buffer objects.
+ */
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+ u32 cpp, stride, size;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ if (!args->width || !args->height || !args->bpp)
+ return -EINVAL;
+
+ /* overflow checks for 32bit size calculations */
+ /* NOTE: DIV_ROUND_UP() can overflow */
+ cpp = DIV_ROUND_UP(args->bpp, 8);
+ if (!cpp || cpp > 0xffffffffU / args->width)
+ return -EINVAL;
+ stride = cpp * args->width;
+ if (args->height > 0xffffffffU / stride)
+ return -EINVAL;
+
+ /* test for wrap-around */
+ size = args->height * stride;
+ if (PAGE_ALIGN(size) == 0)
+ return -EINVAL;
+
+ /*
+ * handle, pitch and size are output parameters. Zero them out to
+ * prevent drivers from accidentally using uninitialized data. Since
+ * not all existing userspace is clearing these fields properly we
+ * cannot reject IOCTL with garbage in them.
+ */
+ args->handle = 0;
+ args->pitch = 0;
+ args->size = 0;
+
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+/**
+ * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Allocate an offset in the drm device node's address space to be able to
+ * memory map a dumb buffer.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
+
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ec77bd3e1f08..336be31ff3de 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -957,13 +957,13 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 58 - 720(1440)x480i@240 */
+ /* 58 - 720(1440)x480i@240Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 59 - 720(1440)x480i@240 */
+ /* 59 - 720(1440)x480i@240Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
@@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
return ret == xfers ? 0 : -1;
}
+static void connector_bad_edid(struct drm_connector *connector,
+ u8 *edid, int num_blocks)
+{
+ int i;
+
+ if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+ return;
+
+ dev_warn(connector->dev->dev,
+ "%s: EDID is invalid:\n",
+ connector->name);
+ for (i = 0; i < num_blocks; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
+ char prefix[20];
+
+ if (drm_edid_is_zero(block, EDID_LENGTH))
+ sprintf(prefix, "\t[%02x] ZERO ", i);
+ else if (!drm_edid_block_valid(block, i, false, NULL))
+ sprintf(prefix, "\t[%02x] BAD ", i);
+ else
+ sprintf(prefix, "\t[%02x] GOOD ", i);
+
+ print_hex_dump(KERN_WARNING,
+ prefix, DUMP_PREFIX_NONE, 16, 1,
+ block, EDID_LENGTH, false);
+ }
+}
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1282,20 +1310,19 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data)
{
int i, j = 0, valid_extensions = 0;
- u8 *block, *new;
- bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+ u8 *edid, *new;
- if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
/* base block fetch */
for (i = 0; i < 4; i++) {
- if (get_edid_block(data, block, 0, EDID_LENGTH))
+ if (get_edid_block(data, edid, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block, 0, print_bad_edid,
+ if (drm_edid_block_valid(edid, 0, false,
&connector->edid_corrupt))
break;
- if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
connector->null_edid_counter++;
goto carp;
}
@@ -1304,58 +1331,62 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
goto carp;
/* if there's no extensions, we're done */
- if (block[0x7e] == 0)
- return (struct edid *)block;
+ valid_extensions = edid[0x7e];
+ if (valid_extensions == 0)
+ return (struct edid *)edid;
- new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
- block = new;
+ edid = new;
+
+ for (j = 1; j <= edid[0x7e]; j++) {
+ u8 *block = edid + j * EDID_LENGTH;
- for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
- if (get_edid_block(data,
- block + (valid_extensions + 1) * EDID_LENGTH,
- j, EDID_LENGTH))
+ if (get_edid_block(data, block, j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1)
- * EDID_LENGTH, j,
- print_bad_edid,
- NULL)) {
- valid_extensions++;
+ if (drm_edid_block_valid(block, j, false, NULL))
break;
- }
}
- if (i == 4 && print_bad_edid) {
- dev_warn(connector->dev->dev,
- "%s: Ignoring invalid EDID block %d.\n",
- connector->name, j);
-
- connector->bad_edid_counter++;
- }
+ if (i == 4)
+ valid_extensions--;
}
- if (valid_extensions != block[0x7e]) {
- block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
- block[0x7e] = valid_extensions;
- new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (valid_extensions != edid[0x7e]) {
+ u8 *base;
+
+ connector_bad_edid(connector, edid, edid[0x7e] + 1);
+
+ edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
+ edid[0x7e] = valid_extensions;
+
+ new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
- block = new;
- }
- return (struct edid *)block;
+ base = new;
+ for (i = 0; i <= edid[0x7e]; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
-carp:
- if (print_bad_edid) {
- dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- connector->name, j);
+ if (!drm_edid_block_valid(block, i, false, NULL))
+ continue;
+
+ memcpy(base, block, EDID_LENGTH);
+ base += EDID_LENGTH;
+ }
+
+ kfree(edid);
+ edid = new;
}
- connector->bad_edid_counter++;
+ return (struct edid *)edid;
+
+carp:
+ connector_bad_edid(connector, edid, 1);
out:
- kfree(block);
+ kfree(edid);
return NULL;
}
EXPORT_SYMBOL_GPL(drm_do_get_edid);
@@ -2582,6 +2613,41 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
return clock;
}
+static bool
+cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode)
+{
+ /*
+ * For certain VICs the spec allows the vertical
+ * front porch to vary by one or two lines.
+ *
+ * cea_modes[] stores the variant with the shortest
+ * vertical front porch. We can adjust the mode to
+ * get the other variants by simply increasing the
+ * vertical front porch length.
+ */
+ BUILD_BUG_ON(edid_cea_modes[8].vtotal != 262 ||
+ edid_cea_modes[9].vtotal != 262 ||
+ edid_cea_modes[12].vtotal != 262 ||
+ edid_cea_modes[13].vtotal != 262 ||
+ edid_cea_modes[23].vtotal != 312 ||
+ edid_cea_modes[24].vtotal != 312 ||
+ edid_cea_modes[27].vtotal != 312 ||
+ edid_cea_modes[28].vtotal != 312);
+
+ if (((vic == 8 || vic == 9 ||
+ vic == 12 || vic == 13) && mode->vtotal < 263) ||
+ ((vic == 23 || vic == 24 ||
+ vic == 27 || vic == 28) && mode->vtotal < 314)) {
+ mode->vsync_start++;
+ mode->vsync_end++;
+ mode->vtotal++;
+
+ return true;
+ }
+
+ return false;
+}
+
static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
@@ -2591,19 +2657,21 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
return 0;
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
+ struct drm_display_mode cea_mode = edid_cea_modes[vic];
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
- clock1 = cea_mode->clock;
- clock2 = cea_mode_alternate_clock(cea_mode);
+ clock1 = cea_mode.clock;
+ clock2 = cea_mode_alternate_clock(&cea_mode);
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
- if (drm_mode_equal_no_clocks(to_match, cea_mode))
- return vic;
+ do {
+ if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
+ return vic;
+ } while (cea_mode_alternate_timings(vic, &cea_mode));
}
return 0;
@@ -2624,18 +2692,23 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
return 0;
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
- const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
+ struct drm_display_mode cea_mode = edid_cea_modes[vic];
unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */
- clock1 = cea_mode->clock;
- clock2 = cea_mode_alternate_clock(cea_mode);
+ clock1 = cea_mode.clock;
+ clock2 = cea_mode_alternate_clock(&cea_mode);
- if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
- KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
- drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
- return vic;
+ if (KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock1) &&
+ KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock2))
+ continue;
+
+ do {
+ if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
+ return vic;
+ } while (cea_mode_alternate_timings(vic, &cea_mode));
}
+
return 0;
}
EXPORT_SYMBOL(drm_match_cea_mode);
@@ -3580,32 +3653,6 @@ int drm_av_sync_delay(struct drm_connector *connector,
EXPORT_SYMBOL(drm_av_sync_delay);
/**
- * drm_select_eld - select one ELD from multiple HDMI/DP sinks
- * @encoder: the encoder just changed display mode
- *
- * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
- * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
- *
- * Return: The connector associated with the first HDMI/DP sink that has ELD
- * attached to it.
- */
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
-{
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
-
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
- drm_for_each_connector(connector, dev)
- if (connector->encoder == encoder && connector->eld[0])
- return connector;
-
- return NULL;
-}
-EXPORT_SYMBOL(drm_select_eld);
-
-/**
* drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 5c067719164d..992879f15f23 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -110,11 +110,9 @@ int drm_encoder_init(struct drm_device *dev,
{
int ret;
- drm_modeset_lock_all(dev);
-
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
- goto out_unlock;
+ return ret;
encoder->dev = dev;
encoder->encoder_type = encoder_type;
@@ -142,9 +140,6 @@ out_put:
if (ret)
drm_mode_object_unregister(dev, &encoder->base);
-out_unlock:
- drm_modeset_unlock_all(dev);
-
return ret;
}
EXPORT_SYMBOL(drm_encoder_init);
@@ -164,12 +159,10 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
* the indices on the drm_encoder after us in the encoder_list.
*/
- drm_modeset_lock_all(dev);
drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
list_del(&encoder->head);
dev->mode_config.num_encoder--;
- drm_modeset_unlock_all(dev);
memset(encoder, 0, sizeof(*encoder));
}
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 1fd6eac1400c..81b3558302b5 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,13 +18,16 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/reservation.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -176,20 +179,20 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
+ const struct drm_format_info *info;
struct drm_fb_cma *fb_cma;
struct drm_gem_cma_object *objs[4];
struct drm_gem_object *obj;
- unsigned int hsub;
- unsigned int vsub;
int ret;
int i;
- hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info)
+ return ERR_PTR(-EINVAL);
- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
- unsigned int width = mode_cmd->width / (i ? hsub : 1);
- unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
@@ -200,7 +203,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
}
min_size = (height - 1) * mode_cmd->pitches[i]
- + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + width * info->cpp[i]
+ mode_cmd->offsets[i];
if (obj->size < min_size) {
@@ -265,16 +268,51 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+/**
+ * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for cleanup_fb for CMA based framebuffer drivers.
+ */
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dma_buf *dma_buf;
+ struct dma_fence *fence;
+
+ if ((plane->state->fb == state->fb) || !state->fb)
+ return 0;
+
+ dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
+ if (dma_buf) {
+ fence = reservation_object_get_excl_rcu(dma_buf->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
+
#ifdef CONFIG_DEBUG_FS
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ const struct drm_format_info *info;
+ int i;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->pixel_format);
- for (i = 0; i < n; i++) {
+ info = drm_format_info(fb->pixel_format);
+
+ for (i = 0; i < info->num_planes; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
drm_gem_cma_describe(fb_cma->obj[i], m);
@@ -311,14 +349,10 @@ static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
.fb_mmap = drm_fb_cma_mmap,
};
@@ -557,7 +591,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
- drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
+ if (fbdev_cma->fb_helper.fbdev)
+ drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
if (fbdev_cma->fb) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 03414bde1f15..e934b541feea 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -49,6 +49,7 @@ MODULE_PARM_DESC(fbdev_emulation,
"Enable legacy fbdev emulation [default=true]");
static LIST_HEAD(kernel_fb_helper_list);
+static DEFINE_MUTEX(kernel_fb_helper_lock);
/**
* DOC: fbdev helpers
@@ -97,6 +98,10 @@ static LIST_HEAD(kernel_fb_helper_list);
* mmap page writes.
*/
+#define drm_fb_helper_for_each_connector(fbh, i__) \
+ for (({ lockdep_assert_held(&(fbh)->dev->mode_config.mutex); }), \
+ i__ = 0; i__ < (fbh)->connector_count; i__++)
+
/**
* drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
* emulation helper
@@ -130,8 +135,13 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
mutex_unlock(&dev->mode_config.mutex);
return 0;
fail:
- for (i = 0; i < fb_helper->connector_count; i++) {
- kfree(fb_helper->connector_info[i]);
+ drm_fb_helper_for_each_connector(fb_helper, i) {
+ struct drm_fb_helper_connector *fb_helper_connector =
+ fb_helper->connector_info[i];
+
+ drm_connector_unreference(fb_helper_connector->connector);
+
+ kfree(fb_helper_connector);
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
@@ -251,6 +261,9 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
continue;
funcs = mode_set->crtc->helper_private;
+ if (funcs->mode_set_base_atomic == NULL)
+ continue;
+
drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
@@ -304,6 +317,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
continue;
}
+ if (funcs->mode_set_base_atomic == NULL)
+ continue;
+
drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
@@ -367,9 +383,7 @@ fail:
if (ret == -EDEADLK)
goto backoff;
- if (ret != 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
backoff:
@@ -394,11 +408,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
- if (dev->mode_config.rotation_property) {
+ if (plane->rotation_property)
drm_mode_plane_set_obj_prop(plane,
- dev->mode_config.rotation_property,
+ plane->rotation_property,
DRM_ROTATE_0);
- }
}
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -557,7 +570,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
continue;
/* Walk the connectors & encoders on this fb turning them on/off */
- for (j = 0; j < fb_helper->connector_count; j++) {
+ drm_fb_helper_for_each_connector(fb_helper, j) {
connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode);
drm_object_property_set_value(&connector->base,
@@ -603,6 +616,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_blank);
+static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
+ struct drm_mode_set *modeset)
+{
+ int i;
+
+ for (i = 0; i < modeset->num_connectors; i++) {
+ drm_connector_unreference(modeset->connectors[i]);
+ modeset->connectors[i] = NULL;
+ }
+ modeset->num_connectors = 0;
+
+ drm_mode_destroy(helper->dev, modeset->mode);
+ modeset->mode = NULL;
+
+ /* FIXME should hold a ref? */
+ modeset->fb = NULL;
+}
+
static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
@@ -612,10 +643,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->connector_info[i]);
}
kfree(helper->connector_info);
+
for (i = 0; i < helper->crtc_count; i++) {
- kfree(helper->crtc_info[i].mode_set.connectors);
- if (helper->crtc_info[i].mode_set.mode)
- drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
+
+ drm_fb_helper_modeset_release(helper, modeset);
+ kfree(modeset->connectors);
}
kfree(helper->crtc_info);
}
@@ -644,7 +677,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
clip->x2 = clip->y2 = 0;
spin_unlock_irqrestore(&helper->dirty_lock, flags);
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ /* call dirty callback only when it has been really touched */
+ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
+ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
}
/**
@@ -821,12 +856,14 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!drm_fbdev_emulation)
return;
+ mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
}
+ mutex_unlock(&kernel_fb_helper_lock);
drm_fb_helper_crtc_free(fb_helper);
@@ -1211,11 +1248,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
- /* Need to resize the fb object !!! */
- if (var->bits_per_pixel > fb->bits_per_pixel ||
- var->xres > fb->width || var->yres > fb->height ||
- var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
- DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ /*
+ * Changes struct fb_var_screeninfo are currently not pushed back
+ * to KMS, hence fail if different settings are requested.
+ */
+ if (var->bits_per_pixel != fb->bits_per_pixel ||
+ var->xres != fb->width || var->yres != fb->height ||
+ var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
@@ -1361,16 +1401,13 @@ retry:
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
-
fail:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK)
goto backoff;
- if (ret != 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
backoff:
@@ -1439,7 +1476,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int ret = 0;
int crtc_count = 0;
int i;
- struct fb_info *info;
struct drm_fb_helper_surface_size sizes;
int gamma_size = 0;
@@ -1455,7 +1491,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
/* first up get a count of crtcs now in use and new min/maxes width/heights */
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
@@ -1542,8 +1578,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (ret < 0)
return ret;
- info = fb_helper->fbdev;
-
/*
* Set the fb pointer - usually drm_setup_crtcs does this for hotplug
* events, but at init time drm_setup_crtcs needs to be called before
@@ -1555,20 +1589,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (fb_helper->crtc_info[i].mode_set.num_connectors)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
-
- info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
- return -EINVAL;
-
- dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
-
- if (list_empty(&kernel_fb_helper_list)) {
- register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
-
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
-
return 0;
}
@@ -1700,7 +1720,7 @@ static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
int count = 0;
int i;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
connector = fb_helper->connector_info[i]->connector;
count += connector->funcs->fill_modes(connector, maxX, maxY);
}
@@ -1800,7 +1820,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
struct drm_connector *connector;
int i = 0;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
@@ -1811,7 +1831,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
if (any_enabled)
return;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, false);
}
@@ -1832,7 +1852,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
return false;
count = 0;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
if (enabled[i])
count++;
}
@@ -1843,7 +1863,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
if (!enabled[i])
continue;
fb_helper_conn = fb_helper->connector_info[i];
@@ -1869,8 +1889,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
can_clone = true;
dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
- for (i = 0; i < fb_helper->connector_count; i++) {
-
+ drm_fb_helper_for_each_connector(fb_helper, i) {
if (!enabled[i])
continue;
@@ -1901,7 +1920,7 @@ static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
int i;
int hoffset = 0, voffset = 0;
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
fb_helper_conn = fb_helper->connector_info[i];
if (!fb_helper_conn->connector->has_tile)
continue;
@@ -1929,19 +1948,20 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_fb_helper_connector *fb_helper_conn;
- int i;
- uint64_t conn_configured = 0, mask;
+ const u64 mask = BIT_ULL(fb_helper->connector_count) - 1;
+ u64 conn_configured = 0;
int tile_pass = 0;
- mask = (1 << fb_helper->connector_count) - 1;
+ int i;
+
retry:
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
fb_helper_conn = fb_helper->connector_info[i];
- if (conn_configured & (1 << i))
+ if (conn_configured & BIT_ULL(i))
continue;
if (enabled[i] == false) {
- conn_configured |= (1 << i);
+ conn_configured |= BIT_ULL(i);
continue;
}
@@ -1982,7 +2002,7 @@ retry:
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
- conn_configured |= (1 << i);
+ conn_configured |= BIT_ULL(i);
}
if ((conn_configured & mask) != mask) {
@@ -2082,21 +2102,22 @@ out:
return best_score;
}
-static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
+ u32 width, u32 height)
{
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_fb_offset *offsets;
- struct drm_mode_set *modeset;
bool *enabled;
- int width, height;
int i;
DRM_DEBUG_KMS("\n");
+ if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0)
+ DRM_DEBUG_KMS("No connectors reported connected with modes\n");
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
+ /* prevent concurrent modification of connector_count by hotplug */
+ lockdep_assert_held(&fb_helper->dev->mode_config.mutex);
crtcs = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
@@ -2111,7 +2132,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
goto out;
}
-
drm_enable_connectors(fb_helper, enabled);
if (!(fb_helper->funcs->initial_config &&
@@ -2136,45 +2156,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- modeset->num_connectors = 0;
- modeset->fb = NULL;
- }
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ drm_fb_helper_modeset_release(fb_helper,
+ &fb_helper->crtc_info[i].mode_set);
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
struct drm_fb_offset *offset = &offsets[i];
- modeset = &fb_crtc->mode_set;
+ struct drm_mode_set *modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
+ struct drm_connector *connector =
+ fb_helper->connector_info[i]->connector;
+
DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
+
fb_crtc->desired_mode = mode;
fb_crtc->x = offset->x;
fb_crtc->y = offset->y;
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
- modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ drm_connector_reference(connector);
+ modeset->connectors[modeset->num_connectors++] = connector;
modeset->fb = fb_helper->fb;
modeset->x = offset->x;
modeset->y = offset->y;
}
}
-
- /* Clear out any old modes if there are no more connected outputs. */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- if (modeset->num_connectors == 0) {
- BUG_ON(modeset->fb);
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = NULL;
- }
- }
out:
kfree(crtcs);
kfree(modes);
@@ -2227,25 +2237,38 @@ out:
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
struct drm_device *dev = fb_helper->dev;
- int count = 0;
+ struct fb_info *info;
+ int ret;
if (!drm_fbdev_emulation)
return 0;
mutex_lock(&dev->mode_config.mutex);
- count = drm_fb_helper_probe_connector_modes(fb_helper,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
+ drm_setup_crtcs(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
mutex_unlock(&dev->mode_config.mutex);
- /*
- * we shouldn't end up with no modes here.
- */
- if (count == 0)
- dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+ if (ret)
+ return ret;
+
+ info = fb_helper->fbdev;
+ info->var.pixclock = 0;
+ ret = register_framebuffer(info);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
+
+ mutex_lock(&kernel_fb_helper_lock);
+ if (list_empty(&kernel_fb_helper_list))
+ register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- drm_setup_crtcs(fb_helper);
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+ mutex_unlock(&kernel_fb_helper_lock);
- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ return 0;
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
@@ -2273,28 +2296,22 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
- u32 max_width, max_height;
if (!drm_fbdev_emulation)
return 0;
- mutex_lock(&fb_helper->dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.mutex);
if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.mutex);
return 0;
}
DRM_DEBUG_KMS("\n");
- max_width = fb_helper->fb->width;
- max_height = fb_helper->fb->height;
+ drm_setup_crtcs(fb_helper, fb_helper->fb->width, fb_helper->fb->height);
- drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.mutex);
- drm_modeset_lock_all(dev);
- drm_setup_crtcs(fb_helper);
- drm_modeset_unlock_all(dev);
drm_fb_helper_set_par(fb_helper->fbdev);
return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e84faecf5225..5d96de40b63f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -51,10 +51,11 @@ DEFINE_MUTEX(drm_global_mutex);
* Drivers must define the file operations structure that forms the DRM
* userspace API entry point, even though most of those operations are
* implemented in the DRM core. The mandatory functions are drm_open(),
- * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
- * Drivers which implement private ioctls that require 32/64 bit compatibility
- * support must provided their onw .compat_ioctl() handler that processes
- * private ioctls and calls drm_compat_ioctl() for core ioctls.
+ * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
+ * (note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n). Drivers which
+ * implement private ioctls that require 32/64 bit compatibility support must
+ * provide their own .compat_ioctl() handler that processes private ioctls and
+ * calls drm_compat_ioctl() for core ioctls.
*
* In addition drm_read() and drm_poll() provide support for DRM events. DRM
* events are a generic and extensible means to send asynchronous events to
@@ -75,9 +76,7 @@ DEFINE_MUTEX(drm_global_mutex);
* .open = drm_open,
* .release = drm_release,
* .unlocked_ioctl = drm_ioctl,
- * #ifdef CONFIG_COMPAT
- * .compat_ioctl = drm_compat_ioctl,
- * #endif
+ * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
* .poll = drm_poll,
* .read = drm_read,
* .llseek = no_llseek,
@@ -663,6 +662,10 @@ void drm_event_cancel_free(struct drm_device *dev,
list_del(&p->pending_link);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (p->fence)
+ dma_fence_put(p->fence);
+
kfree(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);
@@ -692,8 +695,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
}
if (e->fence) {
- fence_signal(e->fence);
- fence_put(e->fence);
+ dma_fence_signal(e->fence);
+ dma_fence_put(e->fence);
}
if (!e->file_priv) {
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 29c56b4331e0..90d2cc8da8eb 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -79,17 +79,13 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/**
- * drm_get_format_name - return a string for drm fourcc format
+ * drm_get_format_name - fill a string with a drm fourcc format's name
* @format: format to compute name of
- *
- * Note that the buffer returned by this function is owned by the caller
- * and will need to be freed using kfree().
+ * @buf: caller-supplied buffer
*/
-char *drm_get_format_name(uint32_t format)
+const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf)
{
- char *buf = kmalloc(32, GFP_KERNEL);
-
- snprintf(buf, 32,
+ snprintf(buf->str, sizeof(buf->str),
"%c%c%c%c %s-endian (0x%08x)",
printable_char(format & 0xff),
printable_char((format >> 8) & 0xff),
@@ -98,87 +94,109 @@ char *drm_get_format_name(uint32_t format)
format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
format);
- return buf;
+ return buf->str;
}
EXPORT_SYMBOL(drm_get_format_name);
+/*
+ * Internal function to query information for a given format. See
+ * drm_format_info() for the public API.
+ */
+const struct drm_format_info *__drm_format_info(u32 format)
+{
+ static const struct drm_format_info formats[] = {
+ { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+ { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+ { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ };
+
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
/**
- * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * drm_format_info - query information for a given format
* @format: pixel format (DRM_FORMAT_*)
- * @depth: storage for the depth value
- * @bpp: storage for the bpp value
*
- * This only supports RGB formats here for compat with code that doesn't use
- * pixel formats directly yet.
+ * The caller should only pass a supported pixel format to this function.
+ * Unsupported pixel formats will generate a warning in the kernel log.
+ *
+ * Returns:
+ * The instance of struct drm_format_info that describes the pixel format, or
+ * NULL if the format is unsupported.
*/
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
- int *bpp)
+const struct drm_format_info *drm_format_info(u32 format)
{
- char *format_name;
+ const struct drm_format_info *info;
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- *depth = 8;
- *bpp = 8;
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- *depth = 15;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- *depth = 16;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- *depth = 24;
- *bpp = 24;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- *depth = 24;
- *bpp = 32;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- *depth = 30;
- *bpp = 32;
- break;
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- *depth = 32;
- *bpp = 32;
- break;
- default:
- format_name = drm_get_format_name(format);
- DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
- kfree(format_name);
- *depth = 0;
- *bpp = 0;
- break;
- }
+ info = __drm_format_info(format);
+ WARN_ON(!info);
+ return info;
}
-EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+EXPORT_SYMBOL(drm_format_info);
/**
* drm_format_num_planes - get the number of planes for format
@@ -189,28 +207,10 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth);
*/
int drm_format_num_planes(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 3;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->num_planes : 1;
}
EXPORT_SYMBOL(drm_format_num_planes);
@@ -224,40 +224,13 @@ EXPORT_SYMBOL(drm_format_num_planes);
*/
int drm_format_plane_cpp(uint32_t format, int plane)
{
- unsigned int depth;
- int bpp;
+ const struct drm_format_info *info;
- if (plane >= drm_format_num_planes(format))
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return 2;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return plane ? 2 : 1;
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 1;
- default:
- drm_fb_get_bpp_depth(format, &depth, &bpp);
- return bpp >> 3;
- }
+ return info->cpp[plane];
}
EXPORT_SYMBOL(drm_format_plane_cpp);
@@ -271,28 +244,10 @@ EXPORT_SYMBOL(drm_format_plane_cpp);
*/
int drm_format_horz_chroma_subsampling(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->hsub : 1;
}
EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
@@ -306,18 +261,10 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
*/
int drm_format_vert_chroma_subsampling(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->vsub : 1;
}
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
@@ -332,13 +279,16 @@ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
*/
int drm_format_plane_width(int width, uint32_t format, int plane)
{
- if (plane >= drm_format_num_planes(format))
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return width;
- return width / drm_format_horz_chroma_subsampling(format);
+ return width / info->hsub;
}
EXPORT_SYMBOL(drm_format_plane_width);
@@ -353,12 +303,15 @@ EXPORT_SYMBOL(drm_format_plane_width);
*/
int drm_format_plane_height(int height, uint32_t format, int plane)
{
- if (plane >= drm_format_num_planes(format))
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return height;
- return height / drm_format_vert_chroma_subsampling(format);
+ return height / info->vsub;
}
EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 398efd67cb93..cbf0c893f426 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -126,111 +126,34 @@ int drm_mode_addfb(struct drm_device *dev,
return 0;
}
-static int format_check(const struct drm_mode_fb_cmd2 *r)
-{
- uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
- char *format_name;
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_XBGR4444:
- case DRM_FORMAT_RGBX4444:
- case DRM_FORMAT_BGRX4444:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ABGR4444:
- case DRM_FORMAT_RGBA4444:
- case DRM_FORMAT_BGRA4444:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_AYUV:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 0;
- default:
- format_name = drm_get_format_name(r->pixel_format);
- DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
- kfree(format_name);
- return -EINVAL;
- }
-}
-
static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
- int ret, hsub, vsub, num_planes, i;
-
- ret = format_check(r);
- if (ret) {
- char *format_name = drm_get_format_name(r->pixel_format);
- DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
- kfree(format_name);
- return ret;
+ const struct drm_format_info *info;
+ int i;
+
+ info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
+ if (!info) {
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("bad framebuffer format %s\n",
+ drm_get_format_name(r->pixel_format,
+ &format_name));
+ return -EINVAL;
}
- hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
- num_planes = drm_format_num_planes(r->pixel_format);
-
- if (r->width == 0 || r->width % hsub) {
+ if (r->width == 0 || r->width % info->hsub) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
return -EINVAL;
}
- if (r->height == 0 || r->height % vsub) {
+ if (r->height == 0 || r->height % info->vsub) {
DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
return -EINVAL;
}
- for (i = 0; i < num_planes; i++) {
- unsigned int width = r->width / (i != 0 ? hsub : 1);
- unsigned int height = r->height / (i != 0 ? vsub : 1);
- unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? info->hsub : 1);
+ unsigned int height = r->height / (i != 0 ? info->vsub : 1);
+ unsigned int cpp = info->cpp[i];
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
@@ -254,6 +177,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
return -EINVAL;
}
+ if (r->flags & DRM_MODE_FB_MODIFIERS &&
+ r->modifier[i] != r->modifier[0]) {
+ DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
+ return -EINVAL;
+ }
+
/* modifier specific checks: */
switch (r->modifier[i]) {
case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
@@ -273,7 +203,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
}
}
- for (i = num_planes; i < 4; i++) {
+ for (i = info->num_planes; i < 4; i++) {
if (r->modifier[i]) {
DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
return -EINVAL;
@@ -751,6 +681,11 @@ EXPORT_SYMBOL(drm_framebuffer_lookup);
* those used for fbdev. Note that the caller must hold a reference of it's own,
* i.e. the object may not be destroyed through this call (since it'll lead to a
* locking inversion).
+ *
+ * NOTE: This function is deprecated. For driver-private framebuffers it is not
+ * recommended to embed a framebuffer struct info fbdev struct, instead, a
+ * framebuffer pointer is preferred and drm_framebuffer_unreference() should be
+ * called when the framebuffer is to be cleaned up.
*/
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
{
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e66af289a016..db80ec860e33 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -24,9 +24,6 @@
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4
-/* drm_irq.c */
-extern unsigned int drm_timestamp_monotonic;
-
/* drm_fops.c */
extern struct mutex drm_global_mutex;
void drm_lastclose(struct drm_device *dev);
@@ -46,12 +43,21 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf);
+/* drm_drv.c */
+struct drm_minor *drm_minor_acquire(unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
/* drm_info.c */
int drm_name_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_irq.c */
+extern unsigned int drm_timestamp_monotonic;
+
+/* IOCTLS */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_modeset_ctl(struct drm_device *dev, void *data,
@@ -100,6 +106,9 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
int drm_debugfs_cleanup(struct drm_minor *minor);
int drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
+int drm_debugfs_crtc_add(struct drm_crtc *crtc);
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
#else
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
@@ -119,4 +128,17 @@ static inline int drm_debugfs_connector_add(struct drm_connector *connector)
static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
{
}
+
+static inline int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+ return 0;
+}
+static inline void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+}
+
+static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+ return 0;
+}
#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 0ad2c47f808f..fed22c2b98b6 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -229,6 +229,22 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
struct drm_crtc *crtc;
req->value = 0;
+
+ /* Only some caps make sense with UMS/render-only drivers. */
+ switch (req->capability) {
+ case DRM_CAP_TIMESTAMP_MONOTONIC:
+ req->value = drm_timestamp_monotonic;
+ return 0;
+ case DRM_CAP_PRIME:
+ req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
+ req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+ return 0;
+ }
+
+ /* Other caps only work with KMS drivers */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENOTSUPP;
+
switch (req->capability) {
case DRM_CAP_DUMB_BUFFER:
if (dev->driver->dumb_create)
@@ -243,13 +259,6 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
case DRM_CAP_DUMB_PREFER_SHADOW:
req->value = dev->mode_config.prefer_shadow;
break;
- case DRM_CAP_PRIME:
- req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
- req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
- break;
- case DRM_CAP_TIMESTAMP_MONOTONIC:
- req->value = drm_timestamp_monotonic;
- break;
case DRM_CAP_ASYNC_PAGE_FLIP:
req->value = dev->mode_config.async_page_flip;
break;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b969a64a1514..273625a85036 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -93,7 +93,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
* Reset the stored timestamp for the current vblank count to correspond
* to the last vblank occurred.
*
- * Only to be called from drm_vblank_on().
+ * Only to be called from drm_crtc_vblank_on().
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
@@ -234,6 +234,16 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
}
+static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return 0;
+
+ return vblank->count;
+}
+
/**
* drm_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
@@ -296,7 +306,7 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
* Always update the count and timestamp to maintain the
* appearance that the counter has been ticking all along until
* this time. This makes the count account for the entire time
- * between drm_vblank_on() and drm_vblank_off().
+ * between drm_crtc_vblank_on() and drm_crtc_vblank_off().
*/
drm_update_vblank_count(dev, pipe, 0);
@@ -888,31 +898,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
}
/**
- * drm_vblank_count - retrieve "cooked" vblank counter value
- * @dev: DRM device
- * @pipe: index of CRTC for which to retrieve the counter
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity.
- *
- * This is the legacy version of drm_crtc_vblank_count().
- *
- * Returns:
- * The software vblank counter.
- */
-u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return 0;
-
- return vblank->count;
-}
-EXPORT_SYMBOL(drm_vblank_count);
-
-/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
*
@@ -920,8 +905,6 @@ EXPORT_SYMBOL(drm_vblank_count);
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
- * This is the native KMS version of drm_vblank_count().
- *
* Returns:
* The software vblank counter.
*/
@@ -952,8 +935,10 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
u32 vblank_count;
unsigned int seq;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs)) {
+ *vblanktime = (struct timeval) { 0 };
return 0;
+ }
do {
seq = read_seqbegin(&vblank->seqlock);
@@ -1270,21 +1255,20 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
/**
- * drm_vblank_off - disable vblank events on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_vblank_off - disable vblank events on a CRTC
+ * @crtc: CRTC in question
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
- * stored so that drm_vblank_on() can restore it again.
+ * stored so that drm_vblank_on can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
* reset, e.g. when suspending.
- *
- * This is the legacy version of drm_crtc_vblank_off().
*/
-void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
+void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
@@ -1300,7 +1284,8 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
- /* Avoid redundant vblank disables without previous drm_vblank_on(). */
+ /* Avoid redundant vblank disables without previous
+ * drm_crtc_vblank_on(). */
if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
vblank_disable_and_save(dev, pipe);
@@ -1331,25 +1316,6 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
}
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
-EXPORT_SYMBOL(drm_vblank_off);
-
-/**
- * drm_crtc_vblank_off - disable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * Drivers can use this function to shut down the vblank interrupt handling when
- * disabling a crtc. This function ensures that the latest vblank frame count is
- * stored so that drm_vblank_on can restore it again.
- *
- * Drivers must use this function when the hardware vblank counter can get
- * reset, e.g. when suspending.
- *
- * This is the native kms version of drm_vblank_off().
- */
-void drm_crtc_vblank_off(struct drm_crtc *crtc)
-{
- drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
-}
EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
@@ -1385,19 +1351,18 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_reset);
/**
- * drm_vblank_on - enable vblank events on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
- * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionally called
+ * drm_crtc_vblank_off() again. Note that calls to drm_crtc_vblank_on() and
+ * drm_crtc_vblank_off() can be unbalanced and so can also be unconditionally called
* in driver load code to reflect the current hardware state of the crtc.
- *
- * This is the legacy version of drm_crtc_vblank_on().
*/
-void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1424,49 +1389,10 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-EXPORT_SYMBOL(drm_vblank_on);
-
-/**
- * drm_crtc_vblank_on - enable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * This functions restores the vblank interrupt state captured with
- * drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionally called
- * in driver load code to reflect the current hardware state of the crtc.
- *
- * This is the native kms version of drm_vblank_on().
- */
-void drm_crtc_vblank_on(struct drm_crtc *crtc)
-{
- drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
-}
EXPORT_SYMBOL(drm_crtc_vblank_on);
-/**
- * drm_vblank_pre_modeset - account for vblanks across mode sets
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * Account for vblank events across mode setting events, which will likely
- * reset the hardware frame counter.
- *
- * This is done by grabbing a temporary vblank reference to ensure that the
- * vblank interrupt keeps running across the modeset sequence. With this the
- * software-side vblank frame counting will ensure that there are no jumps or
- * discontinuities.
- *
- * Unfortunately this approach is racy and also doesn't work when the vblank
- * interrupt stops running, e.g. across system suspend resume. It is therefore
- * highly recommended that drivers use the newer drm_vblank_off() and
- * drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
- * using "cooked" software vblank frame counters and not relying on any hardware
- * counters.
- *
- * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
- * again.
- */
-void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
+static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
+ unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1490,17 +1416,9 @@ void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
vblank->inmodeset |= 0x2;
}
}
-EXPORT_SYMBOL(drm_vblank_pre_modeset);
-/**
- * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * This function again drops the temporary vblank reference acquired in
- * drm_vblank_pre_modeset.
- */
-void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
+static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
+ unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1523,7 +1441,6 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
vblank->inmodeset = 0;
}
}
-EXPORT_SYMBOL(drm_vblank_post_modeset);
/*
* drm_modeset_ctl - handle vblank event counter changes across mode switch
@@ -1556,10 +1473,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- drm_vblank_pre_modeset(dev, pipe);
+ drm_legacy_vblank_pre_modeset(dev, pipe);
break;
case _DRM_POST_MODESET:
- drm_vblank_post_modeset(dev, pipe);
+ drm_legacy_vblank_post_modeset(dev, pipe);
break;
default:
return -EINVAL;
@@ -1594,11 +1511,10 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
spin_lock_irqsave(&dev->event_lock, flags);
/*
- * drm_vblank_off() might have been called after we called
- * drm_vblank_get(). drm_vblank_off() holds event_lock
- * around the vblank disable, so no need for further locking.
- * The reference from drm_vblank_get() protects against
- * vblank disable from another source.
+ * drm_crtc_vblank_off() might have been called after we called
+ * drm_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
+ * vblank disable, so no need for further locking. The reference from
+ * drm_vblank_get() protects against vblank disable from another source.
*/
if (!vblank->enabled) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index c901f3c5b269..32d43f86a8f2 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -176,7 +176,8 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, task_pid_nr(current),
- master->lock.hw_lock->lock, lock->flags);
+ master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
+ lock->flags);
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 11d44a1e0ab3..ca1e344f318d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -104,6 +104,68 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
u64 end,
enum drm_mm_search_flags flags);
+#ifdef CONFIG_DRM_DEBUG_MM
+#include <linux/stackdepot.h>
+
+#define STACKDEPTH 32
+#define BUFSZ 4096
+
+static noinline void save_stack(struct drm_mm_node *node)
+{
+ unsigned long entries[STACKDEPTH];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = STACKDEPTH,
+ .skip = 1
+ };
+
+ save_stack_trace(&trace);
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries-1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ /* May be called under spinlock, so avoid sleeping */
+ node->stack = depot_save_stack(&trace, GFP_NOWAIT);
+}
+
+static void show_leaks(struct drm_mm *mm)
+{
+ struct drm_mm_node *node;
+ unsigned long entries[STACKDEPTH];
+ char *buf;
+
+ buf = kmalloc(BUFSZ, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ list_for_each_entry(node, &mm->head_node.node_list, node_list) {
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = STACKDEPTH
+ };
+
+ if (!node->stack) {
+ DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
+ node->start, node->size);
+ continue;
+ }
+
+ depot_fetch_stack(node->stack, &trace);
+ snprint_stack_trace(buf, BUFSZ, &trace, 0);
+ DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
+ node->start, node->size, buf);
+ }
+
+ kfree(buf);
+}
+
+#undef STACKDEPTH
+#undef BUFSZ
+#else
+static void save_stack(struct drm_mm_node *node) { }
+static void show_leaks(struct drm_mm *mm) { }
+#endif
+
#define START(node) ((node)->start)
#define LAST(node) ((node)->start + (node)->size - 1)
@@ -112,19 +174,12 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
START, LAST, static inline, drm_mm_interval_tree)
struct drm_mm_node *
-drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
{
return drm_mm_interval_tree_iter_first(&mm->interval_tree,
start, last);
}
-EXPORT_SYMBOL(drm_mm_interval_first);
-
-struct drm_mm_node *
-drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
-{
- return drm_mm_interval_tree_iter_next(node, start, last);
-}
-EXPORT_SYMBOL(drm_mm_interval_next);
+EXPORT_SYMBOL(__drm_mm_interval_first);
static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
struct drm_mm_node *node)
@@ -228,6 +283,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
+
+ save_stack(node);
}
/**
@@ -249,6 +306,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
u64 end = node->start + node->size;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
+ u64 adj_start, adj_end;
if (WARN_ON(node->size == 0))
return -EINVAL;
@@ -270,9 +328,13 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
if (!hole->hole_follows)
return -ENOSPC;
- hole_start = __drm_mm_hole_node_start(hole);
- hole_end = __drm_mm_hole_node_end(hole);
- if (hole_start > node->start || hole_end < end)
+ adj_start = hole_start = __drm_mm_hole_node_start(hole);
+ adj_end = hole_end = __drm_mm_hole_node_end(hole);
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole, node->color, &adj_start, &adj_end);
+
+ if (adj_start > node->start || adj_end < end)
return -ENOSPC;
node->mm = mm;
@@ -293,6 +355,8 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->hole_follows = 1;
}
+ save_stack(node);
+
return 0;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
@@ -397,6 +461,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
+
+ save_stack(node);
}
/**
@@ -839,6 +905,7 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
+ mm->head_node.allocated = 0;
mm->head_node.hole_follows = 1;
mm->head_node.scanned_block = 0;
mm->head_node.scanned_prev_free = 0;
@@ -861,10 +928,12 @@ EXPORT_SYMBOL(drm_mm_init);
* Note that it is a bug to call this function on an allocator which is not
* clean.
*/
-void drm_mm_takedown(struct drm_mm * mm)
+void drm_mm_takedown(struct drm_mm *mm)
{
- WARN(!list_empty(&mm->head_node.node_list),
- "Memory manager not clean during takedown.\n");
+ if (WARN(!list_empty(&mm->head_node.node_list),
+ "Memory manager not clean during takedown.\n"))
+ show_leaks(mm);
+
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
new file mode 100644
index 000000000000..2735a5847ffa
--- /dev/null
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drm_mode_config.h>
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+int drm_modeset_register_all(struct drm_device *dev)
+{
+ int ret;
+
+ ret = drm_plane_register_all(dev);
+ if (ret)
+ goto err_plane;
+
+ ret = drm_crtc_register_all(dev);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_encoder_register_all(dev);
+ if (ret)
+ goto err_encoder;
+
+ ret = drm_connector_register_all(dev);
+ if (ret)
+ goto err_connector;
+
+ return 0;
+
+err_connector:
+ drm_encoder_unregister_all(dev);
+err_encoder:
+ drm_crtc_unregister_all(dev);
+err_crtc:
+ drm_plane_unregister_all(dev);
+err_plane:
+ return ret;
+}
+
+void drm_modeset_unregister_all(struct drm_device *dev)
+{
+ drm_connector_unregister_all(dev);
+ drm_encoder_unregister_all(dev);
+ drm_crtc_unregister_all(dev);
+ drm_plane_unregister_all(dev);
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_card_res *card_res = data;
+ struct list_head *lh;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret = 0;
+ int connector_count = 0;
+ int crtc_count = 0;
+ int fb_count = 0;
+ int encoder_count = 0;
+ int copied = 0;
+ uint32_t __user *fb_id;
+ uint32_t __user *crtc_id;
+ uint32_t __user *connector_id;
+ uint32_t __user *encoder_id;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+
+ mutex_lock(&file_priv->fbs_lock);
+ /*
+ * For the non-control nodes we need to limit the list of resources
+ * by IDs in the group list for this node
+ */
+ list_for_each(lh, &file_priv->fbs)
+ fb_count++;
+
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file_priv->fbs_lock);
+
+ /* mode_config.mutex protects the connector list against e.g. DP MST
+ * connector hot-adding. CRTC/Plane lists are invariant. */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_crtc(crtc, dev)
+ crtc_count++;
+
+ drm_for_each_connector(connector, dev)
+ connector_count++;
+
+ drm_for_each_encoder(encoder, dev)
+ encoder_count++;
+
+ card_res->max_height = dev->mode_config.max_height;
+ card_res->min_height = dev->mode_config.min_height;
+ card_res->max_width = dev->mode_config.max_width;
+ card_res->min_width = dev->mode_config.min_width;
+
+ /* CRTCs */
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+ drm_for_each_crtc(crtc, dev) {
+ if (put_user(crtc->base.id, crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_crtcs = crtc_count;
+
+ /* Encoders */
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+ drm_for_each_encoder(encoder, dev) {
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_encoders = encoder_count;
+
+ /* Connectors */
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+ drm_for_each_connector(connector, dev) {
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_connectors = connector_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_config_reset - call ->reset callbacks
+ * @dev: drm device
+ *
+ * This functions calls all the crtc's, encoder's and connector's ->reset
+ * callback. Drivers can use this in e.g. their driver load or resume code to
+ * reset hardware and software state.
+ */
+void drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ drm_for_each_plane(plane, dev)
+ if (plane->funcs->reset)
+ plane->funcs->reset(plane);
+
+ drm_for_each_crtc(crtc, dev)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev)
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
+
+/*
+ * Global properties
+ */
+static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
+ { DRM_PLANE_TYPE_OVERLAY, "Overlay" },
+ { DRM_PLANE_TYPE_PRIMARY, "Primary" },
+ { DRM_PLANE_TYPE_CURSOR, "Cursor" },
+};
+
+static int drm_mode_create_standard_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_connector_create_standard_properties(dev);
+ if (ret)
+ return ret;
+
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "type", drm_plane_type_enum_list,
+ ARRAY_SIZE(drm_plane_type_enum_list));
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.plane_type_property = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_X", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_x = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_Y", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_W", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "SRC_H", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_src_h = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_X", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_x = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_Y", INT_MIN, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_y = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_W", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_w = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_H", 0, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_h = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "FB_ID", DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_id = prop;
+
+ prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+ "IN_FENCE_FD", -1, INT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_in_fence_fd = prop;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "OUT_FENCE_PTR", 0, U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_out_fence_ptr = prop;
+
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "CRTC_ID", DRM_MODE_OBJECT_CRTC);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_crtc_id = prop;
+
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
+ "ACTIVE");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_active = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+ "MODE_ID", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_mode_id = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "DEGAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_size_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "CTM", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.ctm_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "GAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "GAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_size_property = prop;
+
+ return 0;
+}
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ mutex_init(&dev->mode_config.mutex);
+ drm_modeset_lock_init(&dev->mode_config.connection_mutex);
+ mutex_init(&dev->mode_config.idr_mutex);
+ mutex_init(&dev->mode_config.fb_lock);
+ mutex_init(&dev->mode_config.blob_lock);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ idr_init(&dev->mode_config.crtc_idr);
+ idr_init(&dev->mode_config.tile_idr);
+ ida_init(&dev->mode_config.connector_ida);
+
+ drm_modeset_lock_all(dev);
+ drm_mode_create_standard_properties(dev);
+ drm_modeset_unlock_all(dev);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+ dev->mode_config.num_overlay_plane = 0;
+ dev->mode_config.num_total_plane = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_property_blob *blob, *bt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
+ list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+ head_global) {
+ drm_property_unreference_blob(blob);
+ }
+
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ drm_framebuffer_free(&fb->base.refcount);
+ }
+
+ ida_destroy(&dev->mode_config.connector_ida);
+ idr_destroy(&dev->mode_config.tile_idr);
+ idr_destroy(&dev->mode_config.crtc_idr);
+ drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 53f07ac7c174..ac6a35212501 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -49,13 +49,7 @@
*/
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
- DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
- "0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+ DRM_DEBUG_KMS("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
@@ -165,6 +159,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
unsigned int vfieldrate, hperiod;
int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
int interlace;
+ u64 tmp;
/* allocate the drm_display_mode structure. If failure, we will
* return directly
@@ -322,8 +317,11 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
- drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
- drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+ tmp = drm_mode->htotal; /* perform intermediate calcs in u64 */
+ tmp *= HV_FACTOR * 1000;
+ do_div(tmp, hperiod);
+ tmp -= drm_mode->clock % CVT_CLOCK_STEP;
+ drm_mode->clock = tmp;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
if (interlaced) {
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 1d45738f8f98..cc232ac6c950 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -38,7 +38,7 @@
* Some userspace presumes that the first connected connector is the main
* display, where it's supposed to display e.g. the login screen. For
* laptops, this should be the main panel. Use this function to sort all
- * (eDP/LVDS) panels to the front of the connector list, instead of
+ * (eDP/LVDS/DSI) panels to the front of the connector list, instead of
* painstakingly trying to initialize them in the right order.
*/
void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
@@ -51,7 +51,8 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DSI)
list_move_tail(&connector->head, &panel_list);
}
@@ -70,17 +71,31 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info;
int i;
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info || !info->depth) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("non-RGB pixel format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name));
+
+ fb->depth = 0;
+ fb->bits_per_pixel = 0;
+ } else {
+ fb->depth = info->depth;
+ fb->bits_per_pixel = info->cpp[0] * 8;
+ }
+
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
- fb->modifier[i] = mode_cmd->modifier[i];
}
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
- &fb->bits_per_pixel);
+ fb->modifier = mode_cmd->modifier[0];
fb->pixel_format = mode_cmd->pixel_format;
fb->flags = mode_cmd->flags;
}
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 61146f5b4f56..3551ae31f143 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -52,14 +52,16 @@
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
*
- * On top of of these per-object locks using &ww_mutex there's also an overall
- * dev->mode_config.lock, for protecting everything else. Mostly this means
- * probe state of connectors, and preventing hotplug add/removal of connectors.
+ * On top of of these per-object locks using &ww_mutex there's also an overall
+ * dev->mode_config.lock, for protecting everything else. Mostly this means
+ * probe state of connectors, and preventing hotplug add/removal of connectors.
*
- * Finally there's a bunch of dedicated locks to protect drm core internal
- * lists and lookup data structures.
+ * Finally there's a bunch of dedicated locks to protect drm core internal
+ * lists and lookup data structures.
*/
+static DEFINE_WW_CLASS(crtc_ww_class);
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: DRM device
@@ -398,6 +400,17 @@ int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
/**
+ * drm_modeset_lock_init - initialize lock
+ * @lock: lock to init
+ */
+void drm_modeset_lock_init(struct drm_modeset_lock *lock)
+{
+ ww_mutex_init(&lock->mutex, &crtc_ww_class);
+ INIT_LIST_HEAD(&lock->head);
+}
+EXPORT_SYMBOL(drm_modeset_lock_init);
+
+/**
* drm_modeset_lock - take modeset lock
* @lock: lock to take
* @ctx: acquire ctx
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index bc98bb94264d..47848ed8ca48 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -6,6 +6,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
+static void drm_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
/**
* drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
* @dev: DRM device
@@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
EXPORT_SYMBOL(drm_of_find_possible_crtcs);
/**
+ * drm_of_component_match_add - Add a component helper OF node match rule
+ * @master: master device
+ * @matchptr: component match pointer
+ * @compare: compare function used for matching component
+ * @node: of_node
+ */
+void drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node)
+{
+ of_node_get(node);
+ component_match_add_release(master, matchptr, drm_release_of,
+ compare, node);
+}
+EXPORT_SYMBOL_GPL(drm_of_component_match_add);
+
+/**
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
@@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, port);
+ drm_of_component_match_add(dev, &match, compare_of, port);
of_node_put(port);
}
@@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of,
+ remote);
of_node_put(remote);
}
of_node_put(port);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 249c0ae52c6d..62b98f386fd1 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -79,7 +79,7 @@ static unsigned int drm_num_planes(struct drm_device *dev)
* Zero on success, error code on failure.
*/
int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs,
+ uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
enum drm_plane_type type,
@@ -137,6 +137,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_in_fence_fd, -1);
drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
@@ -195,7 +196,7 @@ void drm_plane_unregister_all(struct drm_device *dev)
* Zero on success, error code on failure.
*/
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs,
+ uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
bool is_primary)
@@ -220,7 +221,8 @@ void drm_plane_cleanup(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_fini(&plane->mutex);
+
kfree(plane->format_types);
drm_mode_object_unregister(dev, &plane->base);
@@ -235,7 +237,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
dev->mode_config.num_total_plane--;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
dev->mode_config.num_overlay_plane--;
- drm_modeset_unlock_all(dev);
WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
if (plane->state && plane->funcs->atomic_destroy_state)
@@ -479,9 +480,10 @@ static int __setplane_internal(struct drm_plane *plane,
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
if (ret) {
- char *format_name = drm_get_format_name(fb->pixel_format);
- DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
- kfree(format_name);
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->pixel_format,
+ &format_name));
goto out;
}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7899fc1dcdb0..7a7dddf604d7 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -130,15 +130,8 @@ int drm_plane_helper_check_state(struct drm_plane_state *state,
unsigned int rotation = state->rotation;
int hscale, vscale;
- src->x1 = state->src_x;
- src->y1 = state->src_y;
- src->x2 = state->src_x + state->src_w;
- src->y2 = state->src_y + state->src_h;
-
- dst->x1 = state->crtc_x;
- dst->y1 = state->crtc_y;
- dst->x2 = state->crtc_x + state->crtc_w;
- dst->y2 = state->crtc_y + state->crtc_h;
+ *src = drm_plane_state_src(state);
+ *dst = drm_plane_state_dest(state);
if (!fb) {
state->visible = false;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index b22a94dd7b53..8d77b2462594 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -290,7 +290,8 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
*
* This wraps dma_buf_export() for use by generic GEM drivers that are using
* drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
- * a reference to the drm_device which is released by drm_gem_dmabuf_release().
+ * a reference to the &drm_device and the exported &drm_gem_object (stored in
+ * exp_info->priv) which is released by drm_gem_dmabuf_release().
*
* Returns the new dmabuf.
*/
@@ -300,8 +301,11 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf *dma_buf;
dma_buf = dma_buf_export(exp_info);
- if (!IS_ERR(dma_buf))
- drm_dev_ref(dev);
+ if (IS_ERR(dma_buf))
+ return dma_buf;
+
+ drm_dev_ref(dev);
+ drm_gem_object_reference(exp_info->priv);
return dma_buf;
}
@@ -472,8 +476,6 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
*/
obj->dma_buf = dmabuf;
get_dma_buf(obj->dma_buf);
- /* Grab a new ref since the callers is now used by the dma-buf */
- drm_gem_object_reference(obj);
return dmabuf;
}
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
new file mode 100644
index 000000000000..ad3caaa1f48b
--- /dev/null
+++ b/drivers/gpu/drm/drm_print.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ */
+
+#include <stdarg.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include <drm/drm_print.h>
+
+void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
+{
+ seq_printf(p->arg, "%pV", vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_seq_file);
+
+void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ dev_printk(KERN_INFO, p->arg, "[" DRM_NAME "] %pV", vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_info);
+
+/**
+ * drm_printf - print to a &drm_printer stream
+ * @p: the &drm_printer
+ * @f: format string
+ */
+void drm_printf(struct drm_printer *p, const char *f, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, f);
+ vaf.fmt = f;
+ vaf.va = &args;
+ p->printfn(p, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_printf);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index f6b64d7d3528..ac953f037be7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -152,6 +152,14 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+static enum drm_connector_status
+drm_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector->funcs->detect ?
+ connector->funcs->detect(connector, force) :
+ connector_status_connected;
+}
+
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
@@ -239,7 +247,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
- connector->status = connector->funcs->detect(connector, true);
+ connector->status = drm_connector_detect(connector, true);
}
/*
@@ -384,7 +392,11 @@ static void output_poll_execute(struct work_struct *work)
if (!drm_kms_helper_poll)
goto out;
- mutex_lock(&dev->mode_config.mutex);
+ if (!mutex_trylock(&dev->mode_config.mutex)) {
+ repoll = true;
+ goto out;
+ }
+
drm_for_each_connector(connector, dev) {
/* Ignore forced connectors. */
@@ -405,7 +417,7 @@ static void output_poll_execute(struct work_struct *work)
repoll = true;
- connector->status = connector->funcs->detect(connector, false);
+ connector->status = drm_connector_detect(connector, false);
if (old_status != connector->status) {
const char *old, *new;
@@ -565,7 +577,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
old_status = connector->status;
- connector->status = connector->funcs->detect(connector, false);
+ connector->status = drm_connector_detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index a4d81cf4ffa0..24be69d29964 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -65,9 +65,9 @@ static bool drm_property_type_valid(struct drm_property *property)
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
@@ -125,9 +125,9 @@ EXPORT_SYMBOL(drm_property_create);
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is only allowed to set one of the predefined values for enumeration
* properties.
@@ -173,9 +173,9 @@ EXPORT_SYMBOL(drm_property_create_enum);
* @supported_bits: bitmask of all supported enumeration values
*
* This creates a new bitmask drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Compared to plain enumeration properties userspace is allowed to set any
* or'ed together combination of the predefined property bitflag values
@@ -245,9 +245,9 @@ static struct drm_property *property_create_range(struct drm_device *dev,
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is allowed to set any unsigned integer value in the (min, max)
* range inclusive.
@@ -273,9 +273,9 @@ EXPORT_SYMBOL(drm_property_create_range);
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is allowed to set any signed integer value in the (min, max)
* range inclusive.
@@ -300,9 +300,9 @@ EXPORT_SYMBOL(drm_property_create_signed_range);
* @type: object type from DRM_MODE_OBJECT_* defines
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* Userspace is only allowed to set this to any property value of the given
* @type. Only useful for atomic properties, which is enforced.
@@ -338,9 +338,9 @@ EXPORT_SYMBOL(drm_property_create_object);
* @name: name of the property
*
* This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy(), which is done automatically when calling
- * drm_mode_config_cleanup().
+ * object with drm_object_attach_property(). The returned property object must
+ * be freed with drm_property_destroy(), which is done automatically when
+ * calling drm_mode_config_cleanup().
*
* This is implemented as a ranged property with only {0, 1} as valid values.
*
@@ -729,7 +729,6 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
- void __user *blob_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -739,8 +738,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
return -ENOENT;
if (out_resp->length == blob->length) {
- blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)) {
+ if (copy_to_user(u64_to_user_ptr(out_resp->data),
+ blob->data,
+ blob->length)) {
ret = -EFAULT;
goto unref;
}
@@ -757,7 +757,6 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
{
struct drm_mode_create_blob *out_resp = data;
struct drm_property_blob *blob;
- void __user *blob_ptr;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -767,8 +766,9 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
if (IS_ERR(blob))
return PTR_ERR(blob);
- blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
+ if (copy_from_user(blob->data,
+ u64_to_user_ptr(out_resp->data),
+ out_resp->length)) {
ret = -EFAULT;
goto out_blob;
}
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 73e53a8d1b37..e6057d8cdcd5 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -281,17 +281,10 @@ EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
*/
void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point)
{
- int w = drm_rect_width(r);
- int h = drm_rect_height(r);
-
if (fixed_point)
- DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix,
- w >> 16, ((w & 0xffff) * 15625) >> 10,
- h >> 16, ((h & 0xffff) * 15625) >> 10,
- r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
- r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
+ DRM_DEBUG_KMS("%s" DRM_RECT_FP_FMT "\n", prefix, DRM_RECT_FP_ARG(r));
else
- DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1);
+ DRM_DEBUG_KMS("%s" DRM_RECT_FMT "\n", prefix, DRM_RECT_ARG(r));
}
EXPORT_SYMBOL(drm_rect_debug_print);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index caa4e4ca616d..bd311c77c254 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -124,8 +124,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Using vm_pgoff as a selector forces us to use this unusual
* addressing scheme.
*/
- resource_size_t offset = (unsigned long)vmf->virtual_address -
- vma->vm_start;
+ resource_size_t offset = vmf->address - vma->vm_start;
resource_size_t baddr = map->offset + offset;
struct drm_agp_mem *agpmem;
struct page *page;
@@ -195,7 +194,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!map)
return VM_FAULT_SIGBUS; /* Nothing allocated */
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+ offset = vmf->address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i);
if (!page)
@@ -301,7 +300,8 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!dma->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
- offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
+ offset = vmf->address - vma->vm_start;
+ /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((void *)dma->pagelist[page_nr]);
@@ -337,7 +337,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!entry->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+ offset = vmf->address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset];
diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
index 8c44ba9a694e..65f1ba1099bd 100644
--- a/drivers/gpu/drm/etnaviv/cmdstream.xml.h
+++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
@@ -8,10 +8,34 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- cmdstream.xml ( 12589 bytes, from 2014-02-17 14:57:56)
-- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
-
-Copyright (C) 2014
+- cmdstream.xml ( 14094 bytes, from 2016-11-11 06:55:14)
+- copyright.xml ( 1597 bytes, from 2016-10-29 07:29:22)
+- common.xml ( 23344 bytes, from 2016-11-10 15:14:07)
+
+Copyright (C) 2012-2016 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
*/
@@ -26,6 +50,7 @@ Copyright (C) 2014
#define FE_OPCODE_STALL 0x00000009
#define FE_OPCODE_CALL 0x0000000a
#define FE_OPCODE_RETURN 0x0000000b
+#define FE_OPCODE_DRAW_INSTANCED 0x0000000c
#define FE_OPCODE_CHIP_SELECT 0x0000000d
#define PRIMITIVE_TYPE_POINTS 0x00000001
#define PRIMITIVE_TYPE_LINES 0x00000002
@@ -214,5 +239,32 @@ Copyright (C) 2014
#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002
#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001
+#define VIV_FE_DRAW_INSTANCED 0x00000000
+
+#define VIV_FE_DRAW_INSTANCED_HEADER 0x00000000
+#define VIV_FE_DRAW_INSTANCED_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_DRAW_INSTANCED_HEADER_OP__SHIFT 27
+#define VIV_FE_DRAW_INSTANCED_HEADER_OP_DRAW_INSTANCED 0x60000000
+#define VIV_FE_DRAW_INSTANCED_HEADER_INDEXED 0x00100000
+#define VIV_FE_DRAW_INSTANCED_HEADER_TYPE__MASK 0x000f0000
+#define VIV_FE_DRAW_INSTANCED_HEADER_TYPE__SHIFT 16
+#define VIV_FE_DRAW_INSTANCED_HEADER_TYPE(x) (((x) << VIV_FE_DRAW_INSTANCED_HEADER_TYPE__SHIFT) & VIV_FE_DRAW_INSTANCED_HEADER_TYPE__MASK)
+#define VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__MASK 0x0000ffff
+#define VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__SHIFT 0
+#define VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO(x) (((x) << VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__SHIFT) & VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__MASK)
+
+#define VIV_FE_DRAW_INSTANCED_COUNT 0x00000004
+#define VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__MASK 0xff000000
+#define VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__SHIFT 24
+#define VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI(x) (((x) << VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__SHIFT) & VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__MASK)
+#define VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__MASK 0x00ffffff
+#define VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__SHIFT 0
+#define VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT(x) (((x) << VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__SHIFT) & VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__MASK)
+
+#define VIV_FE_DRAW_INSTANCED_START 0x00000008
+#define VIV_FE_DRAW_INSTANCED_START_INDEX__MASK 0xffffffff
+#define VIV_FE_DRAW_INSTANCED_START_INDEX__SHIFT 0
+#define VIV_FE_DRAW_INSTANCED_START_INDEX(x) (((x) << VIV_FE_DRAW_INSTANCED_START_INDEX__SHIFT) & VIV_FE_DRAW_INSTANCED_START_INDEX__MASK)
+
#endif /* CMDSTREAM_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
index dcfd565c88d1..2a2e5e366ab7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -143,6 +143,7 @@ static bool etnaviv_validate_load_state(struct etna_validation_state *state,
static uint8_t cmd_length[32] = {
[FE_OPCODE_DRAW_PRIMITIVES] = 4,
[FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
+ [FE_OPCODE_DRAW_INSTANCED] = 4,
[FE_OPCODE_NOP] = 2,
[FE_OPCODE_STALL] = 2,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index aa687669e22b..00368b14d08d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -16,6 +16,7 @@
#include <linux/component.h>
#include <linux/of_platform.h>
+#include <drm/drm_of.h>
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
@@ -478,9 +479,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -505,6 +504,7 @@ static struct drm_driver etnaviv_drm_driver = {
.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
.gem_prime_vmap = etnaviv_gem_prime_vmap,
.gem_prime_vunmap = etnaviv_gem_prime_vunmap,
+ .gem_prime_mmap = etnaviv_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
.debugfs_cleanup = etnaviv_debugfs_cleanup,
@@ -629,8 +629,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!core_node)
break;
- component_match_add(&pdev->dev, &match, compare_of,
- core_node);
+ drm_of_component_match_add(&pdev->dev, &match,
+ compare_of, core_node);
of_node_put(core_node);
}
} else if (dev->platform_data) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 65e057639653..c255eda40526 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -78,6 +78,8 @@ int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 2bef501d4a17..af65491a78e2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -160,7 +160,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
file_size += sizeof(*iter.hdr) * n_obj;
/* Allocate the file in vmalloc memory, it's likely to be big */
- iter.start = vmalloc(file_size);
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_HIGHMEM |
+ __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL);
if (!iter.start) {
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 0370b842d9cc..114dddbd297b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -202,15 +202,14 @@ int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
page = pages[pgoff];
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ ret = vm_insert_page(vma, vmf->address, page);
out:
switch (ret) {
@@ -409,20 +408,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
bool write = !!(op & ETNA_PREP_WRITE);
- int ret;
-
- if (op & ETNA_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
- write))
- return -EBUSY;
- } else {
- unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
+ unsigned long remain =
+ op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
+ long lret;
- ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
- write, true, remain);
- if (ret <= 0)
- return ret == 0 ? -ETIMEDOUT : ret;
- }
+ lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+ write, true, remain);
+ if (lret < 0)
+ return lret;
+ else if (lret == 0)
+ return remain == 0 ? -EBUSY : -ETIMEDOUT;
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
if (!etnaviv_obj->sgt) {
@@ -470,10 +465,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
}
#ifdef CONFIG_DEBUG_FS
-static void etnaviv_gem_describe_fence(struct fence *fence,
+static void etnaviv_gem_describe_fence(struct dma_fence *fence,
const char *type, struct seq_file *m)
{
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
seq_printf(m, "\t%9s: %s %s seq %u\n",
type,
fence->ops->get_driver_name(fence),
@@ -486,7 +481,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
@@ -763,7 +758,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- flags, pvec + pinned, NULL);
+ flags, pvec + pinned, NULL, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index b93618c1aa69..62b47972a52e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -23,10 +23,12 @@
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ int npages = obj->size >> PAGE_SHIFT;
- BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */
+ if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
+ return NULL;
- return etnaviv_obj->sgt;
+ return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
}
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
@@ -39,6 +41,19 @@ void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
/* TODO msm_gem_vunmap() */
}
+int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ int ret;
+
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ if (ret < 0)
+ return ret;
+
+ return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
+}
+
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b1254f885fed..0a67124bb2a4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -15,7 +15,7 @@
*/
#include <linux/component.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
#include "etnaviv_dump.h"
@@ -639,6 +639,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
gpu->memory_base = PHYS_OFFSET;
else
gpu->memory_base = dma_mask - SZ_2G + 1;
+ } else if (PHYS_OFFSET >= SZ_2G) {
+ dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
+ gpu->memory_base = PHYS_OFFSET;
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
}
ret = etnaviv_hw_reset(gpu);
@@ -882,7 +886,7 @@ static void recover_worker(struct work_struct *work)
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
if (!gpu->event[i].used)
continue;
- fence_signal(gpu->event[i].fence);
+ dma_fence_signal(gpu->event[i].fence);
gpu->event[i].fence = NULL;
gpu->event[i].used = false;
complete(&gpu->event_free);
@@ -952,55 +956,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
/* fence object management */
struct etnaviv_fence {
struct etnaviv_gpu *gpu;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
+static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
{
return container_of(fence, struct etnaviv_fence, base);
}
-static const char *etnaviv_fence_get_driver_name(struct fence *fence)
+static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
{
return "etnaviv";
}
-static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
+static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return dev_name(f->gpu->dev);
}
-static bool etnaviv_fence_enable_signaling(struct fence *fence)
+static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool etnaviv_fence_signaled(struct fence *fence)
+static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return fence_completed(f->gpu, f->base.seqno);
}
-static void etnaviv_fence_release(struct fence *fence)
+static void etnaviv_fence_release(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops etnaviv_fence_ops = {
+static const struct dma_fence_ops etnaviv_fence_ops = {
.get_driver_name = etnaviv_fence_get_driver_name,
.get_timeline_name = etnaviv_fence_get_timeline_name,
.enable_signaling = etnaviv_fence_enable_signaling,
.signaled = etnaviv_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = etnaviv_fence_release,
};
-static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_fence *f;
@@ -1010,8 +1014,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
f->gpu = gpu;
- fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
- gpu->fence_context, ++gpu->next_fence);
+ dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+ gpu->fence_context, ++gpu->next_fence);
return &f->base;
}
@@ -1021,7 +1025,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
{
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -1039,7 +1043,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
/* Wait on any existing exclusive fence which isn't our own */
fence = reservation_object_get_excl(robj);
if (fence && fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1052,7 +1056,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(robj));
if (fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1158,11 +1162,11 @@ static void retire_worker(struct work_struct *work)
mutex_lock(&gpu->lock);
list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
- if (!fence_is_signaled(cmdbuf->fence))
+ if (!dma_fence_is_signaled(cmdbuf->fence))
break;
list_del(&cmdbuf->node);
- fence_put(cmdbuf->fence);
+ dma_fence_put(cmdbuf->fence);
for (i = 0; i < cmdbuf->nr_bos; i++) {
struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
@@ -1275,7 +1279,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned int event, i;
int ret;
@@ -1391,7 +1395,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
while ((event = ffs(intr)) != 0) {
- struct fence *fence;
+ struct dma_fence *fence;
event -= 1;
@@ -1401,7 +1405,7 @@ static irqreturn_t irq_handler(int irq, void *data)
fence = gpu->event[event].fence;
gpu->event[event].fence = NULL;
- fence_signal(fence);
+ dma_fence_signal(fence);
/*
* Events can be processed out of order. Eg,
@@ -1553,7 +1557,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
return ret;
gpu->drm = drm;
- gpu->fence_context = fence_context_alloc(1);
+ gpu->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&gpu->fence_spinlock);
INIT_LIST_HEAD(&gpu->active_cmd_list);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 73c278dc3706..8c6b824e9d0a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity {
struct etnaviv_event {
bool used;
- struct fence *fence;
+ struct dma_fence *fence;
};
struct etnaviv_cmdbuf;
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf {
/* vram node used if the cmdbuf is mapped through the MMUv2 */
struct drm_mm_node vram_node;
/* fence after which this buffer is to be disposed */
- struct fence *fence;
+ struct dma_fence *fence;
/* target exec state */
u32 exec_state;
/* per GPU in-flight list */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 465d344f3391..d706ca4e2f02 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -114,7 +114,7 @@ config DRM_EXYNOS_ROTATOR
config DRM_EXYNOS_GSC
bool "GScaler"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !VIDEO_SAMSUNG_EXYNOS_GSC
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index def78c8c1780..739180ac3da5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -69,7 +69,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
spin_lock(&priv->lock);
priv->pending &= ~commit->crtcs;
@@ -254,6 +254,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -262,6 +263,26 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
return 0;
}
+int exynos_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
@@ -345,9 +366,7 @@ static const struct file_operations exynos_drm_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d215149e737b..80c4d5b81689 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -301,6 +301,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
bool nonblock);
+int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 40ce841eb952..23cce0a3f5fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -190,7 +190,7 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = exynos_atomic_check,
.atomic_commit = exynos_atomic_commit,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4cfb39d543b4..9f35deb56170 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -63,15 +63,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 147ef0d298cb..95871577015d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1433,7 +1433,7 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
&img_pos[EXYNOS_DRM_OPS_SRC],
&img_pos[EXYNOS_DRM_OPS_DST]);
if (ret) {
- dev_err(dev, "failed to set precalser.\n");
+ dev_err(dev, "failed to set prescaler.\n");
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f2ae72ba7d5a..57b81460fec8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -231,12 +231,12 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
int ret;
if (flags & ~(EXYNOS_BO_MASK)) {
- DRM_ERROR("invalid flags.\n");
+ DRM_ERROR("invalid GEM buffer flags: %u\n", flags);
return ERR_PTR(-EINVAL);
}
if (!size) {
- DRM_ERROR("invalid size.\n");
+ DRM_ERROR("invalid GEM buffer size: %lu\n", size);
return ERR_PTR(-EINVAL);
}
@@ -455,8 +455,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pgoff_t page_offset;
int ret;
- page_offset = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
DRM_ERROR("invalid page offset\n");
@@ -465,8 +464,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
pfn = page_to_pfn(exynos_gem->pages[page_offset]);
- ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out:
switch (ret) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 52a9d269484e..bef57987759d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1610,7 +1610,7 @@ static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
&img_pos[EXYNOS_DRM_OPS_SRC],
&img_pos[EXYNOS_DRM_OPS_DST]);
if (ret) {
- dev_err(dev, "failed to set precalser.\n");
+ dev_err(dev, "failed to set prescaler.\n");
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index e8fb6ef947ee..5ed8b1effe71 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -47,19 +47,6 @@
#define HOTPLUG_DEBOUNCE_MS 1100
-/* AVI header and aspect ratio */
-#define HDMI_AVI_VERSION 0x02
-#define HDMI_AVI_LENGTH 0x0d
-
-/* AUI header info */
-#define HDMI_AUI_VERSION 0x01
-#define HDMI_AUI_LENGTH 0x0a
-
-/* AVI active format aspect ratio */
-#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x08
-#define AVI_4_3_CENTER_RATIO 0x09
-#define AVI_16_9_CENTER_RATIO 0x0a
-
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -131,7 +118,6 @@ struct hdmi_context {
bool dvi_mode;
struct delayed_work hotplug_work;
struct drm_display_mode current_mode;
- u8 cea_video_id;
const struct hdmi_driver_data *drv_data;
void __iomem *regs;
@@ -681,6 +667,13 @@ static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
}
}
+static inline void hdmi_reg_write_buf(struct hdmi_context *hdata, u32 reg_id,
+ u8 *buf, int size)
+{
+ for (reg_id = hdmi_map_reg(hdata, reg_id); size; --size, reg_id += 4)
+ writel(*buf++, hdata->regs + reg_id);
+}
+
static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
u32 reg_id, u32 value, u32 mask)
{
@@ -762,93 +755,50 @@ static int hdmi_clk_set_parents(struct hdmi_context *hdata, bool to_phy)
return ret;
}
-static u8 hdmi_chksum(struct hdmi_context *hdata,
- u32 start, u8 len, u32 hdr_sum)
-{
- int i;
-
- /* hdr_sum : header0 + header1 + header2
- * start : start address of packet byte1
- * len : packet bytes - 1 */
- for (i = 0; i < len; ++i)
- hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
-
- /* return 2's complement of 8 bit hdr_sum */
- return (u8)(~(hdr_sum & 0xff) + 1);
-}
-
-static void hdmi_reg_infoframe(struct hdmi_context *hdata,
- union hdmi_infoframe *infoframe)
+static void hdmi_reg_infoframes(struct hdmi_context *hdata)
{
- u32 hdr_sum;
- u8 chksum;
- u8 ar;
+ union hdmi_infoframe frm;
+ u8 buf[25];
+ int ret;
if (hdata->dvi_mode) {
- hdmi_reg_writeb(hdata, HDMI_VSI_CON,
- HDMI_VSI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_AVI_CON,
HDMI_AVI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+ HDMI_VSI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
return;
}
- switch (infoframe->any.type) {
- case HDMI_INFOFRAME_TYPE_AVI:
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+ &hdata->current_mode);
+ if (!ret)
+ ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
+ if (ret > 0) {
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
- infoframe->any.version);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
- hdr_sum = infoframe->any.type + infoframe->any.version +
- infoframe->any.length;
-
- /* Output format zero hardcoded ,RGB YBCR selection */
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
- AVI_ACTIVE_FORMAT_VALID |
- AVI_UNDERSCANNED_DISPLAY_VALID);
-
- /*
- * Set the aspect ratio as per the mode, mentioned in
- * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
- */
- ar = hdata->current_mode.picture_aspect_ratio;
- switch (ar) {
- case HDMI_PICTURE_ASPECT_4_3:
- ar |= AVI_4_3_CENTER_RATIO;
- break;
- case HDMI_PICTURE_ASPECT_16_9:
- ar |= AVI_16_9_CENTER_RATIO;
- break;
- case HDMI_PICTURE_ASPECT_NONE:
- default:
- ar |= AVI_SAME_AS_PIC_ASPECT_RATIO;
- break;
- }
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), ar);
+ hdmi_reg_write_buf(hdata, HDMI_AVI_HEADER0, buf, ret);
+ } else {
+ DRM_INFO("%s: invalid AVI infoframe (%d)\n", __func__, ret);
+ }
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), hdata->cea_video_id);
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi,
+ &hdata->current_mode);
+ if (!ret)
+ ret = hdmi_vendor_infoframe_pack(&frm.vendor.hdmi, buf,
+ sizeof(buf));
+ if (ret > 0) {
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC);
+ hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, ret);
+ }
- chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
- infoframe->any.length, hdr_sum);
- DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
- hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
- break;
- case HDMI_INFOFRAME_TYPE_AUDIO:
- hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
- infoframe->any.version);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
- hdr_sum = infoframe->any.type + infoframe->any.version +
- infoframe->any.length;
- chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
- infoframe->any.length, hdr_sum);
- DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
- hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
- break;
- default:
- break;
+ ret = hdmi_audio_infoframe_init(&frm.audio);
+ if (!ret) {
+ frm.audio.channels = 2;
+ ret = hdmi_audio_infoframe_pack(&frm.audio, buf, sizeof(buf));
+ }
+ if (ret > 0) {
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_EVERY_VSYNC);
+ hdmi_reg_write_buf(hdata, HDMI_AUI_HEADER0, buf, ret);
}
}
@@ -1127,8 +1077,6 @@ static void hdmi_start(struct hdmi_context *hdata, bool start)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- union hdmi_infoframe infoframe;
-
/* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1164,15 +1112,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
- infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
- infoframe.any.version = HDMI_AVI_VERSION;
- infoframe.any.length = HDMI_AVI_LENGTH;
- hdmi_reg_infoframe(hdata, &infoframe);
-
- infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
- infoframe.any.version = HDMI_AUI_VERSION;
- infoframe.any.length = HDMI_AUI_LENGTH;
- hdmi_reg_infoframe(hdata, &infoframe);
+ hdmi_reg_infoframes(hdata);
/* enable AVI packet every vsync, fixes purple line problem */
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
@@ -1458,7 +1398,6 @@ static void hdmi_mode_set(struct drm_encoder *encoder,
"INTERLACED" : "PROGRESSIVE");
drm_mode_copy(&hdata->current_mode, m);
- hdata->cea_video_id = drm_match_cea_mode(mode);
}
static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
@@ -1907,6 +1846,8 @@ err_disable_pm_runtime:
err_hdmiphy:
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->regs_hdmiphy)
+ iounmap(hdata->regs_hdmiphy);
err_ddc:
put_device(&hdata->ddc_adpt->dev);
@@ -1929,6 +1870,9 @@ static int hdmi_remove(struct platform_device *pdev)
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->regs_hdmiphy)
+ iounmap(hdata->regs_hdmiphy);
+
put_device(&hdata->ddc_adpt->dev);
return 0;
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 169667a22bdc..a0507dc18d9e 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -361,9 +361,11 @@
/* AUI bit definition */
#define HDMI_AUI_CON_NO_TRAN (0 << 0)
+#define HDMI_AUI_CON_EVERY_VSYNC (1 << 1)
/* VSI bit definition */
#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
+#define HDMI_VSI_CON_EVERY_VSYNC (1 << 1)
/* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
index b35a292287f3..aca34f656bea 100644
--- a/drivers/gpu/drm/fsl-dcu/Makefile
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -3,6 +3,5 @@ fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
fsl_dcu_drm_rgb.o \
fsl_dcu_drm_plane.o \
fsl_dcu_drm_crtc.o \
- fsl_dcu_drm_fbdev.o \
fsl_tcon.o
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu-drm.o
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index b2d5e188b1b8..deb57435cc89 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -25,8 +25,13 @@
static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct drm_device *dev = crtc->dev;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
struct drm_pending_vblank_event *event = crtc->state->event;
+ regmap_write(fsl_dev->regmap,
+ DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
+
if (event) {
crtc->state->event = NULL;
@@ -39,11 +44,15 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
+static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ /* always disable planes on the CRTC */
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
+
drm_crtc_vblank_off(crtc);
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
@@ -122,8 +131,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
+ .atomic_disable = fsl_dcu_drm_crtc_atomic_disable,
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
- .disable = fsl_dcu_drm_disable_crtc,
.enable = fsl_dcu_drm_crtc_enable,
.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index e04efbed1a54..537ca159ffe5 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -32,6 +32,9 @@
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
+static int legacyfb_depth = 24;
+module_param(legacyfb_depth, int, 0444);
+
static bool fsl_dcu_drm_is_volatile_reg(struct device *dev, unsigned int reg)
{
if (reg == DCU_INT_STATUS || reg == DCU_UPDATE_MODE)
@@ -59,8 +62,6 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev)
regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0);
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return ret;
}
@@ -87,7 +88,18 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
goto done;
dev->irq_enabled = true;
- fsl_dcu_fbdev_init(dev);
+ if (legacyfb_depth != 16 && legacyfb_depth != 24 &&
+ legacyfb_depth != 32) {
+ dev_warn(dev->dev,
+ "Invalid legacyfb_depth. Defaulting to 24bpp\n");
+ legacyfb_depth = 24;
+ }
+ fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1, 1);
+ if (IS_ERR(fsl_dev->fbdev)) {
+ ret = PTR_ERR(fsl_dev->fbdev);
+ fsl_dev->fbdev = NULL;
+ goto done;
+ }
return 0;
done:
@@ -108,6 +120,7 @@ static int fsl_dcu_unload(struct drm_device *dev)
{
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ drm_crtc_force_disable_all(dev);
drm_kms_helper_poll_fini(dev);
if (fsl_dev->fbdev)
@@ -139,8 +152,6 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
drm_handle_vblank(dev, 0);
regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status);
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return IRQ_HANDLED;
}
@@ -180,9 +191,7 @@ static const struct file_operations fsl_dcu_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -338,11 +347,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
fsl_dev->soc = id->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "could not get memory IO resource\n");
- return -ENODEV;
- }
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
@@ -352,7 +356,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
fsl_dev->irq = platform_get_irq(pdev, 0);
if (fsl_dev->irq < 0) {
dev_err(dev, "failed to get irq\n");
- return -ENXIO;
+ return fsl_dev->irq;
}
fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
@@ -430,9 +434,9 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
{
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
+ drm_put_dev(fsl_dev->drm);
clk_disable_unprepare(fsl_dev->clk);
clk_unregister(fsl_dev->pix_clk);
- drm_put_dev(fsl_dev->drm);
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index 3b371fe7491e..e9e9aeecf2eb 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -197,7 +197,6 @@ struct fsl_dcu_drm_device {
struct drm_atomic_state *state;
};
-void fsl_dcu_fbdev_init(struct drm_device *dev);
int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev);
#endif /* __FSL_DCU_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
deleted file mode 100644
index 8b8b819ea704..000000000000
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright 2015 Freescale Semiconductor, Inc.
- *
- * Freescale DCU drm device driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "fsl_dcu_drm_drv.h"
-
-/* initialize fbdev helper */
-void fsl_dcu_fbdev_init(struct drm_device *dev)
-{
- struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev->dev);
-
- fsl_dev->fbdev = drm_fbdev_cma_init(dev, 24, 1, 1);
-}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 9e6f7d8112b3..a99f48847420 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -160,11 +160,6 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
DCU_LAYER_POST_SKIP(0) |
DCU_LAYER_PRE_SKIP(0));
}
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
- regmap_write(fsl_dev->regmap,
- DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
return;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index e1dd75b18118..05a8ee106879 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -58,17 +58,10 @@ static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static enum drm_connector_status
-fsl_dcu_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.destroy = fsl_dcu_drm_connector_destroy,
- .detect = fsl_dcu_drm_connector_detect,
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index ea733ab5b1e0..5efdb7fbb7ee 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -387,19 +387,6 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
}
/**
- * Detect the LVDS connection.
- *
- * This always returns CONNECTOR_STATUS_CONNECTED.
- * This connector should only have
- * been set up if the LVDS was actually connected anyway.
- */
-static enum drm_connector_status cdv_intel_lvds_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-/**
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
@@ -521,7 +508,6 @@ static const struct drm_connector_helper_funcs
static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = cdv_intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = cdv_intel_lvds_set_property,
.destroy = cdv_intel_lvds_destroy,
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 3a44e705db53..8b44fa542562 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -124,8 +124,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
psbfb->gtt->offset;
- page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
+ page_num = vma_pages(vma);
+ address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -185,9 +185,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
static struct fb_ops psbfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = psbfb_copyarea,
@@ -198,9 +196,7 @@ static struct fb_ops psbfb_ops = {
static struct fb_ops psbfb_roll_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
@@ -211,9 +207,7 @@ static struct fb_ops psbfb_roll_ops = {
static struct fb_ops psbfb_unaccel_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
@@ -236,22 +230,20 @@ static int psb_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
- u32 bpp, depth;
+ const struct drm_format_info *info;
int ret;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ /*
+ * Reject unknown formats, YUV formats, and formats with more than
+ * 4 bytes per pixel.
+ */
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info || !info->depth || info->cpp[0] > 4)
+ return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (bpp) {
- case 8:
- case 16:
- case 24:
- case 32:
- break;
- default:
- return -EINVAL;
- }
+
drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
fb->gtt = gt;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
@@ -298,7 +290,6 @@ static struct drm_framebuffer *psb_framebuffer_create
* psbfb_alloc - allocate frame buffer memory
* @dev: the DRM device
* @aligned_size: space needed
- * @force: fall back to GEM buffers if need be
*
* Allocate the frame buffer. In the usual case we get a GTT range that
* is stolen memory backed and life is simple. If there isn't sufficient
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 6d1cb6b370b1..527c62917660 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -197,15 +197,14 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Page relative to the VMA start - we must calculate this ourselves
because vmf->pgoff is the fake GEM offset */
- page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
- >> PAGE_SHIFT;
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/* CPU view of the page, don't go via the GART for CPU writes */
if (r->stolen)
pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
else
pfn = page_to_pfn(r->pages[page_offset]);
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ ret = vm_insert_pfn(vma, vmf->address, pfn);
fail:
mutex_unlock(&dev_priv->mmap_mutex);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 8f69225ce2b4..3f4f424196b2 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -76,6 +76,7 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
* psb_gtt_insert - put an object into the GTT
* @dev: our DRM device
* @r: our GTT range
+ * @resume: on resume
*
* Take our preallocated GTT range and insert the GEM object into
* the GTT. This is protected via the gtt mutex which the caller
@@ -130,7 +131,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
* page table entries with the dummy page. This is protected via the gtt
* mutex which the caller must hold.
*/
-void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 __iomem *gtt_slot;
@@ -321,6 +322,7 @@ out:
* @len: length (bytes) of address space required
* @name: resource name
* @backed: resource should be backed by stolen pages
+ * @align: requested alignment
*
* Ask the kernel core to find us a suitable range of addresses
* to use for a GTT mapping.
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 50eb944fb78a..ff37ea585664 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -473,6 +473,7 @@ static const struct file_operations psb_gem_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = psb_unlocked_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b74372760d7f..05d7aaf47eea 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -753,10 +753,6 @@ extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index fd7c91254841..483fdce74e39 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -500,19 +500,6 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
}
/*
- * Detect the LVDS connection.
- *
- * This always returns CONNECTOR_STATUS_CONNECTED.
- * This connector should only have
- * been set up if the LVDS was actually connected anyway.
- */
-static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
- *connector, bool force)
-{
- return connector_status_connected;
-}
-
-/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int psb_intel_lvds_get_modes(struct drm_connector *connector)
@@ -643,7 +630,6 @@ const struct drm_connector_helper_funcs
const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = psb_intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = psb_intel_lvds_set_property,
.destroy = psb_intel_lvds_destroy,
diff --git a/drivers/gpu/drm/hisilicon/Kconfig b/drivers/gpu/drm/hisilicon/Kconfig
index 558c61b1b8e8..2fd2724b7a7d 100644
--- a/drivers/gpu/drm/hisilicon/Kconfig
+++ b/drivers/gpu/drm/hisilicon/Kconfig
@@ -2,4 +2,5 @@
# hisilicon drm device configuration.
# Please keep this list sorted alphabetically
+source "drivers/gpu/drm/hisilicon/hibmc/Kconfig"
source "drivers/gpu/drm/hisilicon/kirin/Kconfig"
diff --git a/drivers/gpu/drm/hisilicon/Makefile b/drivers/gpu/drm/hisilicon/Makefile
index e3f6d493c996..c8155bfb1ff1 100644
--- a/drivers/gpu/drm/hisilicon/Makefile
+++ b/drivers/gpu/drm/hisilicon/Makefile
@@ -2,4 +2,5 @@
# Makefile for hisilicon drm drivers.
# Please keep this list sorted alphabetically
+obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc/
obj-$(CONFIG_DRM_HISI_KIRIN) += kirin/
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
new file mode 100644
index 000000000000..380622a0da35
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -0,0 +1,9 @@
+config DRM_HISI_HIBMC
+ tristate "DRM Support for Hisilicon Hibmc"
+ depends on DRM && PCI
+ select DRM_KMS_HELPER
+ select DRM_TTM
+
+ help
+ Choose this option if you have a Hisilicon Hibmc soc chipset.
+ If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
new file mode 100644
index 000000000000..f2e04c035673
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o
+
+obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
new file mode 100644
index 000000000000..2a1386e33126
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -0,0 +1,477 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "hibmc_drm_drv.h"
+#include "hibmc_drm_regs.h"
+
+struct hibmc_display_panel_pll {
+ unsigned long M;
+ unsigned long N;
+ unsigned long OD;
+ unsigned long POD;
+};
+
+struct hibmc_dislay_pll_config {
+ unsigned long hdisplay;
+ unsigned long vdisplay;
+ u32 pll1_config_value;
+ u32 pll2_config_value;
+};
+
+static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
+ {800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ},
+ {1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ},
+ {1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ},
+ {1280, 768, CRT_PLL1_HS_80MHZ, CRT_PLL2_HS_80MHZ},
+ {1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ},
+ {1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ},
+ {1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ},
+ {1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
+};
+
+#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1)))
+
+static int hibmc_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (src_w != state->crtc_w || src_h != state->crtc_h) {
+ DRM_DEBUG_ATOMIC("scale not support\n");
+ return -EINVAL;
+ }
+
+ if (state->crtc_x < 0 || state->crtc_y < 0) {
+ DRM_DEBUG_ATOMIC("crtc_x/y of drm_plane state is invalid\n");
+ return -EINVAL;
+ }
+
+ if (state->crtc_x + state->crtc_w >
+ crtc_state->adjusted_mode.hdisplay ||
+ state->crtc_y + state->crtc_h >
+ crtc_state->adjusted_mode.vdisplay) {
+ DRM_DEBUG_ATOMIC("visible portion of plane is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hibmc_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ u32 reg;
+ int ret;
+ u64 gpu_addr = 0;
+ unsigned int line_l;
+ struct hibmc_drm_private *priv = plane->dev->dev_private;
+ struct hibmc_framebuffer *hibmc_fb;
+ struct hibmc_bo *bo;
+
+ if (!state->fb)
+ return;
+
+ hibmc_fb = to_hibmc_framebuffer(state->fb);
+ bo = gem_to_hibmc_bo(hibmc_fb->obj);
+ ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ if (ret) {
+ DRM_ERROR("failed to reserve ttm_bo: %d", ret);
+ return;
+ }
+
+ ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ ttm_bo_unreserve(&bo->bo);
+ if (ret) {
+ DRM_ERROR("failed to pin hibmc_bo: %d", ret);
+ return;
+ }
+
+ writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
+
+ reg = state->fb->width * (state->fb->bits_per_pixel / 8);
+ /* now line_pad is 16 */
+ reg = PADDING(16, reg);
+
+ line_l = state->fb->width * state->fb->bits_per_pixel / 8;
+ line_l = PADDING(16, line_l);
+ writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
+ HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
+ priv->mmio + HIBMC_CRT_FB_WIDTH);
+
+ /* SET PIXEL FORMAT */
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK;
+ reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT,
+ state->fb->bits_per_pixel / 16);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
+static const u32 channel_formats1[] = {
+ DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888
+};
+
+static struct drm_plane_funcs hibmc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
+ .atomic_check = hibmc_plane_atomic_check,
+ .atomic_update = hibmc_plane_atomic_update,
+};
+
+static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_plane *plane;
+ int ret = 0;
+
+ plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane) {
+ DRM_ERROR("failed to alloc memory when init plane\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ /*
+ * plane init
+ * TODO: Now only support primary plane, overlay planes
+ * need to do.
+ */
+ ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
+ channel_formats1,
+ ARRAY_SIZE(channel_formats1),
+ DRM_PLANE_TYPE_PRIMARY,
+ NULL);
+ if (ret) {
+ DRM_ERROR("failed to init plane: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
+ return plane;
+}
+
+static void hibmc_crtc_enable(struct drm_crtc *crtc)
+{
+ unsigned int reg;
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg |= HIBMC_CURR_GATE_LOCALMEM(1);
+ reg |= HIBMC_CURR_GATE_DISPLAY(1);
+ hibmc_set_current_gate(priv, reg);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void hibmc_crtc_disable(struct drm_crtc *crtc)
+{
+ unsigned int reg;
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+
+ drm_crtc_vblank_off(crtc);
+
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg |= HIBMC_CURR_GATE_LOCALMEM(0);
+ reg |= HIBMC_CURR_GATE_DISPLAY(0);
+ hibmc_set_current_gate(priv, reg);
+}
+
+static unsigned int format_pll_reg(void)
+{
+ unsigned int pllreg = 0;
+ struct hibmc_display_panel_pll pll = {0};
+
+ /*
+ * Note that all PLL's have the same format. Here,
+ * we just use Panel PLL parameter to work out the bit
+ * fields in the register.On returning a 32 bit number, the value can
+ * be applied to any PLL in the calling function.
+ */
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_BYPASS, 0);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POWER, 1);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_INPUT, 0);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POD, pll.POD);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_OD, pll.OD);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_N, pll.N);
+ pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_M, pll.M);
+
+ return pllreg;
+}
+
+static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
+{
+ u32 val;
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ val = readl(priv->mmio + CRT_PLL1_HS);
+ val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1));
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ val = CRT_PLL1_HS_INTER_BYPASS(1) | CRT_PLL1_HS_POWERON(1);
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ writel(pll, priv->mmio + CRT_PLL1_HS);
+
+ usleep_range(1000, 2000);
+
+ val = pll & ~(CRT_PLL1_HS_POWERON(1));
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ usleep_range(1000, 2000);
+
+ val &= ~(CRT_PLL1_HS_INTER_BYPASS(1));
+ writel(val, priv->mmio + CRT_PLL1_HS);
+
+ usleep_range(1000, 2000);
+
+ val |= CRT_PLL1_HS_OUTER_BYPASS(1);
+ writel(val, priv->mmio + CRT_PLL1_HS);
+}
+
+static void get_pll_config(unsigned long x, unsigned long y,
+ u32 *pll1, u32 *pll2)
+{
+ int i;
+ int count = ARRAY_SIZE(hibmc_pll_table);
+
+ for (i = 0; i < count; i++) {
+ if (hibmc_pll_table[i].hdisplay == x &&
+ hibmc_pll_table[i].vdisplay == y) {
+ *pll1 = hibmc_pll_table[i].pll1_config_value;
+ *pll2 = hibmc_pll_table[i].pll2_config_value;
+ return;
+ }
+ }
+
+ /* if found none, we use default value */
+ *pll1 = CRT_PLL1_HS_25MHZ;
+ *pll2 = CRT_PLL2_HS_25MHZ;
+}
+
+/*
+ * This function takes care the extra registers and bit fields required to
+ * setup a mode in board.
+ * Explanation about Display Control register:
+ * FPGA only supports 7 predefined pixel clocks, and clock select is
+ * in bit 4:0 of new register 0x802a8.
+ */
+static unsigned int display_ctrl_adjust(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ unsigned int ctrl)
+{
+ unsigned long x, y;
+ u32 pll1; /* bit[31:0] of PLL */
+ u32 pll2; /* bit[63:32] of PLL */
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ x = mode->hdisplay;
+ y = mode->vdisplay;
+
+ get_pll_config(x, y, &pll1, &pll2);
+ writel(pll2, priv->mmio + CRT_PLL2_HS);
+ set_vclock_hisilicon(dev, pll1);
+
+ /*
+ * Hisilicon has to set up the top-left and bottom-right
+ * registers as well.
+ * Note that normal chip only use those two register for
+ * auto-centering mode.
+ */
+ writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_TOP, 0) |
+ HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_LEFT, 0),
+ priv->mmio + HIBMC_CRT_AUTO_CENTERING_TL);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM, y - 1) |
+ HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_RIGHT, x - 1),
+ priv->mmio + HIBMC_CRT_AUTO_CENTERING_BR);
+
+ /*
+ * Assume common fields in ctrl have been properly set before
+ * calling this function.
+ * This function only sets the extra fields in ctrl.
+ */
+
+ /* Set bit 25 of display controller: Select CRT or VGA clock */
+ ctrl &= ~HIBMC_CRT_DISP_CTL_CRTSELECT_MASK;
+ ctrl &= ~HIBMC_CRT_DISP_CTL_CLOCK_PHASE_MASK;
+
+ ctrl |= HIBMC_CRT_DISP_CTL_CRTSELECT(HIBMC_CRTSELECT_CRT);
+
+ /* clock_phase_polarity is 0 */
+ ctrl |= HIBMC_CRT_DISP_CTL_CLOCK_PHASE(0);
+
+ writel(ctrl, priv->mmio + HIBMC_CRT_DISP_CTL);
+
+ return ctrl;
+}
+
+static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ unsigned int val;
+ struct drm_display_mode *mode = &crtc->state->mode;
+ struct drm_device *dev = crtc->dev;
+ struct hibmc_drm_private *priv = dev->dev_private;
+ int width = mode->hsync_end - mode->hsync_start;
+ int height = mode->vsync_end - mode->vsync_start;
+
+ writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL);
+ writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) |
+ HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_DISP_END, mode->hdisplay - 1),
+ priv->mmio + HIBMC_CRT_HORZ_TOTAL);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_WIDTH, width) |
+ HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_START, mode->hsync_start - 1),
+ priv->mmio + HIBMC_CRT_HORZ_SYNC);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_TOTAL, mode->vtotal - 1) |
+ HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_DISP_END, mode->vdisplay - 1),
+ priv->mmio + HIBMC_CRT_VERT_TOTAL);
+
+ writel(HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_HEIGHT, height) |
+ HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_START, mode->vsync_start - 1),
+ priv->mmio + HIBMC_CRT_VERT_SYNC);
+
+ val = HIBMC_FIELD(HIBMC_CRT_DISP_CTL_VSYNC_PHASE, 0);
+ val |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_HSYNC_PHASE, 0);
+ val |= HIBMC_CRT_DISP_CTL_TIMING(1);
+ val |= HIBMC_CRT_DISP_CTL_PLANE(1);
+
+ display_ctrl_adjust(dev, mode, val);
+}
+
+static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ unsigned int reg;
+ struct drm_device *dev = crtc->dev;
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg |= HIBMC_CURR_GATE_DISPLAY(1);
+ reg |= HIBMC_CURR_GATE_LOCALMEM(1);
+ hibmc_set_current_gate(priv, reg);
+
+ /* We can add more initialization as needed. */
+}
+
+static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (crtc->state->event)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static const struct drm_crtc_funcs hibmc_crtc_funcs = {
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
+ .enable = hibmc_crtc_enable,
+ .disable = hibmc_crtc_disable,
+ .mode_set_nofb = hibmc_crtc_mode_set_nofb,
+ .atomic_begin = hibmc_crtc_atomic_begin,
+ .atomic_flush = hibmc_crtc_atomic_flush,
+};
+
+int hibmc_de_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ int ret;
+
+ plane = hibmc_plane_init(priv);
+ if (IS_ERR(plane)) {
+ DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane));
+ return PTR_ERR(plane);
+ }
+
+ crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL);
+ if (!crtc) {
+ DRM_ERROR("failed to alloc memory when init crtc\n");
+ return -ENOMEM;
+ }
+
+ ret = drm_crtc_init_with_planes(dev, crtc, plane,
+ NULL, &hibmc_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init crtc: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_mode_crtc_set_gamma_size(crtc, 256);
+ if (ret) {
+ DRM_ERROR("failed to set gamma size: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &hibmc_crtc_helper_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
new file mode 100644
index 000000000000..7e2043f4348c
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -0,0 +1,456 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "hibmc_drm_drv.h"
+#include "hibmc_drm_regs.h"
+
+static const struct file_operations hibmc_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .mmap = hibmc_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+};
+
+static int hibmc_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct hibmc_drm_private *priv =
+ (struct hibmc_drm_private *)dev->dev_private;
+
+ writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1),
+ priv->mmio + HIBMC_RAW_INTERRUPT_EN);
+
+ return 0;
+}
+
+static void hibmc_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct hibmc_drm_private *priv =
+ (struct hibmc_drm_private *)dev->dev_private;
+
+ writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0),
+ priv->mmio + HIBMC_RAW_INTERRUPT_EN);
+}
+
+irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv =
+ (struct hibmc_drm_private *)dev->dev_private;
+ u32 status;
+
+ status = readl(priv->mmio + HIBMC_RAW_INTERRUPT);
+
+ if (status & HIBMC_RAW_INTERRUPT_VBLANK(1)) {
+ writel(HIBMC_RAW_INTERRUPT_VBLANK(1),
+ priv->mmio + HIBMC_RAW_INTERRUPT);
+ drm_handle_vblank(dev, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct drm_driver hibmc_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+ .fops = &hibmc_fops,
+ .name = "hibmc",
+ .date = "20160828",
+ .desc = "hibmc drm driver",
+ .major = 1,
+ .minor = 0,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = hibmc_enable_vblank,
+ .disable_vblank = hibmc_disable_vblank,
+ .gem_free_object_unlocked = hibmc_gem_free_object,
+ .dumb_create = hibmc_dumb_create,
+ .dumb_map_offset = hibmc_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .irq_handler = hibmc_drm_interrupt,
+};
+
+static int __maybe_unused hibmc_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct hibmc_drm_private *priv = drm_dev->dev_private;
+
+ drm_kms_helper_poll_disable(drm_dev);
+ priv->suspend_state = drm_atomic_helper_suspend(drm_dev);
+ if (IS_ERR(priv->suspend_state)) {
+ DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n",
+ PTR_ERR(priv->suspend_state));
+ drm_kms_helper_poll_enable(drm_dev);
+ return PTR_ERR(priv->suspend_state);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused hibmc_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct hibmc_drm_private *priv = drm_dev->dev_private;
+
+ drm_atomic_helper_resume(drm_dev, priv->suspend_state);
+ drm_kms_helper_poll_enable(drm_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hibmc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hibmc_pm_suspend,
+ hibmc_pm_resume)
+};
+
+static int hibmc_kms_init(struct hibmc_drm_private *priv)
+{
+ int ret;
+
+ drm_mode_config_init(priv->dev);
+ priv->mode_config_initialized = true;
+
+ priv->dev->mode_config.min_width = 0;
+ priv->dev->mode_config.min_height = 0;
+ priv->dev->mode_config.max_width = 1920;
+ priv->dev->mode_config.max_height = 1440;
+
+ priv->dev->mode_config.fb_base = priv->fb_base;
+ priv->dev->mode_config.preferred_depth = 24;
+ priv->dev->mode_config.prefer_shadow = 0;
+
+ priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
+
+ ret = hibmc_de_init(priv);
+ if (ret) {
+ DRM_ERROR("failed to init de: %d\n", ret);
+ return ret;
+ }
+
+ ret = hibmc_vdac_init(priv);
+ if (ret) {
+ DRM_ERROR("failed to init vdac: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hibmc_kms_fini(struct hibmc_drm_private *priv)
+{
+ if (priv->mode_config_initialized) {
+ drm_mode_config_cleanup(priv->dev);
+ priv->mode_config_initialized = false;
+ }
+}
+
+/*
+ * It can operate in one of three modes: 0, 1 or Sleep.
+ */
+void hibmc_set_power_mode(struct hibmc_drm_private *priv,
+ unsigned int power_mode)
+{
+ unsigned int control_value = 0;
+ void __iomem *mmio = priv->mmio;
+ unsigned int input = 1;
+
+ if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP)
+ return;
+
+ if (power_mode == HIBMC_PW_MODE_CTL_MODE_SLEEP)
+ input = 0;
+
+ control_value = readl(mmio + HIBMC_POWER_MODE_CTRL);
+ control_value &= ~(HIBMC_PW_MODE_CTL_MODE_MASK |
+ HIBMC_PW_MODE_CTL_OSC_INPUT_MASK);
+ control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_MODE, power_mode);
+ control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_OSC_INPUT, input);
+ writel(control_value, mmio + HIBMC_POWER_MODE_CTRL);
+}
+
+void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
+{
+ unsigned int gate_reg;
+ unsigned int mode;
+ void __iomem *mmio = priv->mmio;
+
+ /* Get current power mode. */
+ mode = (readl(mmio + HIBMC_POWER_MODE_CTRL) &
+ HIBMC_PW_MODE_CTL_MODE_MASK) >> HIBMC_PW_MODE_CTL_MODE_SHIFT;
+
+ switch (mode) {
+ case HIBMC_PW_MODE_CTL_MODE_MODE0:
+ gate_reg = HIBMC_MODE0_GATE;
+ break;
+
+ case HIBMC_PW_MODE_CTL_MODE_MODE1:
+ gate_reg = HIBMC_MODE1_GATE;
+ break;
+
+ default:
+ gate_reg = HIBMC_MODE0_GATE;
+ break;
+ }
+ writel(gate, mmio + gate_reg);
+}
+
+static void hibmc_hw_config(struct hibmc_drm_private *priv)
+{
+ unsigned int reg;
+
+ /* On hardware reset, power mode 0 is default. */
+ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
+
+ /* Enable display power gate & LOCALMEM power gate*/
+ reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
+ reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
+ reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
+ reg |= HIBMC_CURR_GATE_DISPLAY(1);
+ reg |= HIBMC_CURR_GATE_LOCALMEM(1);
+
+ hibmc_set_current_gate(priv, reg);
+
+ /*
+ * Reset the memory controller. If the memory controller
+ * is not reset in chip,the system might hang when sw accesses
+ * the memory.The memory should be resetted after
+ * changing the MXCLK.
+ */
+ reg = readl(priv->mmio + HIBMC_MISC_CTRL);
+ reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
+ reg |= HIBMC_MSCCTL_LOCALMEM_RESET(0);
+ writel(reg, priv->mmio + HIBMC_MISC_CTRL);
+
+ reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
+ reg |= HIBMC_MSCCTL_LOCALMEM_RESET(1);
+
+ writel(reg, priv->mmio + HIBMC_MISC_CTRL);
+}
+
+static int hibmc_hw_map(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct pci_dev *pdev = dev->pdev;
+ resource_size_t addr, size, ioaddr, iosize;
+
+ ioaddr = pci_resource_start(pdev, 1);
+ iosize = pci_resource_len(pdev, 1);
+ priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize);
+ if (!priv->mmio) {
+ DRM_ERROR("Cannot map mmio region\n");
+ return -ENOMEM;
+ }
+
+ addr = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ priv->fb_map = devm_ioremap(dev->dev, addr, size);
+ if (!priv->fb_map) {
+ DRM_ERROR("Cannot map framebuffer\n");
+ return -ENOMEM;
+ }
+ priv->fb_base = addr;
+ priv->fb_size = size;
+
+ return 0;
+}
+
+static int hibmc_hw_init(struct hibmc_drm_private *priv)
+{
+ int ret;
+
+ ret = hibmc_hw_map(priv);
+ if (ret)
+ return ret;
+
+ hibmc_hw_config(priv);
+
+ return 0;
+}
+
+static int hibmc_unload(struct drm_device *dev)
+{
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ hibmc_fbdev_fini(priv);
+
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+ if (priv->msi_enabled)
+ pci_disable_msi(dev->pdev);
+ drm_vblank_cleanup(dev);
+
+ hibmc_kms_fini(priv);
+ hibmc_mm_fini(priv);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+static int hibmc_load(struct drm_device *dev)
+{
+ struct hibmc_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("no memory to allocate for hibmc_drm_private\n");
+ return -ENOMEM;
+ }
+ dev->dev_private = priv;
+ priv->dev = dev;
+
+ ret = hibmc_hw_init(priv);
+ if (ret)
+ goto err;
+
+ ret = hibmc_mm_init(priv);
+ if (ret)
+ goto err;
+
+ ret = hibmc_kms_init(priv);
+ if (ret)
+ goto err;
+
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret) {
+ DRM_ERROR("failed to initialize vblank: %d\n", ret);
+ goto err;
+ }
+
+ priv->msi_enabled = 0;
+ ret = pci_enable_msi(dev->pdev);
+ if (ret) {
+ DRM_WARN("enabling MSI failed: %d\n", ret);
+ } else {
+ priv->msi_enabled = 1;
+ ret = drm_irq_install(dev, dev->pdev->irq);
+ if (ret)
+ DRM_WARN("install irq failed: %d\n", ret);
+ }
+
+ /* reset all the states of crtc/plane/encoder/connector */
+ drm_mode_config_reset(dev);
+
+ ret = hibmc_fbdev_init(priv);
+ if (ret) {
+ DRM_ERROR("failed to initialize fbdev: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ hibmc_unload(dev);
+ DRM_ERROR("failed to initialize drm driver: %d\n", ret);
+ return ret;
+}
+
+static int hibmc_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct drm_device *dev;
+ int ret;
+
+ dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ DRM_ERROR("failed to allocate drm_device\n");
+ return PTR_ERR(dev);
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ DRM_ERROR("failed to enable pci device: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = hibmc_load(dev);
+ if (ret) {
+ DRM_ERROR("failed to load hibmc: %d\n", ret);
+ goto err_disable;
+ }
+
+ ret = drm_dev_register(dev, 0);
+ if (ret) {
+ DRM_ERROR("failed to register drv for userspace access: %d\n",
+ ret);
+ goto err_unload;
+ }
+ return 0;
+
+err_unload:
+ hibmc_unload(dev);
+err_disable:
+ pci_disable_device(pdev);
+err_free:
+ drm_dev_unref(dev);
+
+ return ret;
+}
+
+static void hibmc_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(dev);
+ hibmc_unload(dev);
+ drm_dev_unref(dev);
+}
+
+static struct pci_device_id hibmc_pci_table[] = {
+ {0x19e5, 0x1711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+
+static struct pci_driver hibmc_pci_driver = {
+ .name = "hibmc-drm",
+ .id_table = hibmc_pci_table,
+ .probe = hibmc_pci_probe,
+ .remove = hibmc_pci_remove,
+ .driver.pm = &hibmc_pm_ops,
+};
+
+static int __init hibmc_init(void)
+{
+ return pci_register_driver(&hibmc_pci_driver);
+}
+
+static void __exit hibmc_exit(void)
+{
+ return pci_unregister_driver(&hibmc_pci_driver);
+}
+
+module_init(hibmc_init);
+module_exit(hibmc_exit);
+
+MODULE_DEVICE_TABLE(pci, hibmc_pci_table);
+MODULE_AUTHOR("RongrongZou <zourongrong@huawei.com>");
+MODULE_DESCRIPTION("DRM Driver for Hisilicon Hibmc");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
new file mode 100644
index 000000000000..e195521eb41e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -0,0 +1,114 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef HIBMC_DRM_DRV_H
+#define HIBMC_DRM_DRV_H
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo_driver.h>
+
+struct hibmc_framebuffer {
+ struct drm_framebuffer fb;
+ struct drm_gem_object *obj;
+};
+
+struct hibmc_fbdev {
+ struct drm_fb_helper helper;
+ struct hibmc_framebuffer *fb;
+ int size;
+};
+
+struct hibmc_drm_private {
+ /* hw */
+ void __iomem *mmio;
+ void __iomem *fb_map;
+ unsigned long fb_base;
+ unsigned long fb_size;
+ bool msi_enabled;
+
+ /* drm */
+ struct drm_device *dev;
+ bool mode_config_initialized;
+ struct drm_atomic_state *suspend_state;
+
+ /* ttm */
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ bool initialized;
+
+ /* fbdev */
+ struct hibmc_fbdev *fbdev;
+ bool mm_inited;
+};
+
+#define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb)
+
+struct hibmc_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ struct ttm_place placements[3];
+ int pin_count;
+};
+
+static inline struct hibmc_bo *hibmc_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct hibmc_bo, bo);
+}
+
+static inline struct hibmc_bo *gem_to_hibmc_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct hibmc_bo, gem);
+}
+
+void hibmc_set_power_mode(struct hibmc_drm_private *priv,
+ unsigned int power_mode);
+void hibmc_set_current_gate(struct hibmc_drm_private *priv,
+ unsigned int gate);
+
+int hibmc_de_init(struct hibmc_drm_private *priv);
+int hibmc_vdac_init(struct hibmc_drm_private *priv);
+int hibmc_fbdev_init(struct hibmc_drm_private *priv);
+void hibmc_fbdev_fini(struct hibmc_drm_private *priv);
+
+int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+struct hibmc_framebuffer *
+hibmc_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int hibmc_mm_init(struct hibmc_drm_private *hibmc);
+void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
+int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int hibmc_bo_unpin(struct hibmc_bo *bo);
+void hibmc_gem_free_object(struct drm_gem_object *obj);
+int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset);
+int hibmc_mmap(struct file *filp, struct vm_area_struct *vma);
+
+extern const struct drm_mode_config_funcs hibmc_mode_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
new file mode 100644
index 000000000000..9b0696735ba1
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -0,0 +1,267 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include "hibmc_drm_drv.h"
+
+static int hibmcfb_create_object(
+ struct hibmc_drm_private *priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_gem_object *gobj;
+ struct drm_device *dev = priv->dev;
+ u32 size;
+ int ret = 0;
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = hibmc_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static struct fb_ops hibmc_drm_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct hibmc_fbdev *hi_fbdev =
+ container_of(helper, struct hibmc_fbdev, helper);
+ struct hibmc_drm_private *priv = helper->dev->dev_private;
+ struct fb_info *info;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ int ret = 0;
+ int ret1;
+ size_t size;
+ unsigned int bytes_per_pixel;
+ struct hibmc_bo *bo = NULL;
+
+ DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+ sizes->surface_depth = 32;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height);
+
+ ret = hibmcfb_create_object(priv, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ bo = gem_to_hibmc_bo(gobj);
+
+ ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ if (ret) {
+ DRM_ERROR("failed to reserve ttm_bo: %d\n", ret);
+ goto out_unref_gem;
+ }
+
+ ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin fbcon: %d\n", ret);
+ goto out_unreserve_ttm_bo;
+ }
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fbcon: %d\n", ret);
+ goto out_unpin_bo;
+ }
+ ttm_bo_unreserve(&bo->bo);
+
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ DRM_ERROR("failed to allocate fbi: %d\n", ret);
+ goto out_release_fbi;
+ }
+
+ info->par = hi_fbdev;
+
+ hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
+ if (IS_ERR(hi_fbdev->fb)) {
+ ret = PTR_ERR(info);
+ DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+ goto out_release_fbi;
+ }
+
+ priv->fbdev->size = size;
+ hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
+
+ strcpy(info->fix.id, "hibmcdrmfb");
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &hibmc_drm_fb_ops;
+
+ drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
+ hi_fbdev->fb->fb.depth);
+ drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = bo->kmap.virtual;
+ info->screen_size = size;
+
+ info->fix.smem_start = bo->bo.mem.bus.offset + bo->bo.mem.bus.base;
+ info->fix.smem_len = size;
+ return 0;
+
+out_release_fbi:
+ drm_fb_helper_release_fbi(helper);
+ ret1 = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ if (ret1) {
+ DRM_ERROR("failed to rsv ttm_bo when release fbi: %d\n", ret1);
+ goto out_unref_gem;
+ }
+ ttm_bo_kunmap(&bo->kmap);
+out_unpin_bo:
+ hibmc_bo_unpin(bo);
+out_unreserve_ttm_bo:
+ ttm_bo_unreserve(&bo->bo);
+out_unref_gem:
+ drm_gem_object_unreference_unlocked(gobj);
+
+ return ret;
+}
+
+static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
+{
+ struct hibmc_framebuffer *gfb = fbdev->fb;
+ struct drm_fb_helper *fbh = &fbdev->helper;
+
+ drm_fb_helper_unregister_fbi(fbh);
+ drm_fb_helper_release_fbi(fbh);
+
+ drm_fb_helper_fini(fbh);
+
+ if (gfb)
+ drm_framebuffer_unreference(&gfb->fb);
+}
+
+static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
+ .fb_probe = hibmc_drm_fb_create,
+};
+
+int hibmc_fbdev_init(struct hibmc_drm_private *priv)
+{
+ int ret;
+ struct fb_var_screeninfo *var;
+ struct fb_fix_screeninfo *fix;
+ struct hibmc_fbdev *hifbdev;
+
+ hifbdev = devm_kzalloc(priv->dev->dev, sizeof(*hifbdev), GFP_KERNEL);
+ if (!hifbdev) {
+ DRM_ERROR("failed to allocate hibmc_fbdev\n");
+ return -ENOMEM;
+ }
+
+ priv->fbdev = hifbdev;
+ drm_fb_helper_prepare(priv->dev, &hifbdev->helper,
+ &hibmc_fbdev_helper_funcs);
+
+ /* Now just one crtc and one channel */
+ ret = drm_fb_helper_init(priv->dev,
+ &hifbdev->helper, 1, 1);
+ if (ret) {
+ DRM_ERROR("failed to initialize fb helper: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(&hifbdev->helper);
+ if (ret) {
+ DRM_ERROR("failed to add all connectors: %d\n", ret);
+ goto fini;
+ }
+
+ ret = drm_fb_helper_initial_config(&hifbdev->helper, 16);
+ if (ret) {
+ DRM_ERROR("failed to setup initial conn config: %d\n", ret);
+ goto fini;
+ }
+
+ var = &hifbdev->helper.fbdev->var;
+ fix = &hifbdev->helper.fbdev->fix;
+
+ DRM_DEBUG_DRIVER("Member of info->var is :\n"
+ "xres=%d\n"
+ "yres=%d\n"
+ "xres_virtual=%d\n"
+ "yres_virtual=%d\n"
+ "xoffset=%d\n"
+ "yoffset=%d\n"
+ "bits_per_pixel=%d\n"
+ "...\n", var->xres, var->yres, var->xres_virtual,
+ var->yres_virtual, var->xoffset, var->yoffset,
+ var->bits_per_pixel);
+ DRM_DEBUG_DRIVER("Member of info->fix is :\n"
+ "smem_start=%lx\n"
+ "smem_len=%d\n"
+ "type=%d\n"
+ "type_aux=%d\n"
+ "visual=%d\n"
+ "xpanstep=%d\n"
+ "ypanstep=%d\n"
+ "ywrapstep=%d\n"
+ "line_length=%d\n"
+ "accel=%d\n"
+ "capabilities=%d\n"
+ "...\n", fix->smem_start, fix->smem_len, fix->type,
+ fix->type_aux, fix->visual, fix->xpanstep,
+ fix->ypanstep, fix->ywrapstep, fix->line_length,
+ fix->accel, fix->capabilities);
+
+ return 0;
+
+fini:
+ drm_fb_helper_fini(&hifbdev->helper);
+ return ret;
+}
+
+void hibmc_fbdev_fini(struct hibmc_drm_private *priv)
+{
+ if (!priv->fbdev)
+ return;
+
+ hibmc_fbdev_destroy(priv->fbdev);
+ priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
new file mode 100644
index 000000000000..f7035bf3ec1f
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
@@ -0,0 +1,196 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef HIBMC_DRM_HW_H
+#define HIBMC_DRM_HW_H
+
+/* register definition */
+#define HIBMC_MISC_CTRL 0x4
+
+#define HIBMC_MSCCTL_LOCALMEM_RESET(x) ((x) << 6)
+#define HIBMC_MSCCTL_LOCALMEM_RESET_MASK 0x40
+
+#define HIBMC_CURRENT_GATE 0x000040
+#define HIBMC_CURR_GATE_DISPLAY(x) ((x) << 2)
+#define HIBMC_CURR_GATE_DISPLAY_MASK 0x4
+
+#define HIBMC_CURR_GATE_LOCALMEM(x) ((x) << 1)
+#define HIBMC_CURR_GATE_LOCALMEM_MASK 0x2
+
+#define HIBMC_MODE0_GATE 0x000044
+#define HIBMC_MODE1_GATE 0x000048
+#define HIBMC_POWER_MODE_CTRL 0x00004C
+
+#define HIBMC_PW_MODE_CTL_OSC_INPUT(x) ((x) << 3)
+#define HIBMC_PW_MODE_CTL_OSC_INPUT_MASK 0x8
+
+#define HIBMC_PW_MODE_CTL_MODE(x) ((x) << 0)
+#define HIBMC_PW_MODE_CTL_MODE_MASK 0x03
+#define HIBMC_PW_MODE_CTL_MODE_SHIFT 0
+
+#define HIBMC_PW_MODE_CTL_MODE_MODE0 0
+#define HIBMC_PW_MODE_CTL_MODE_MODE1 1
+#define HIBMC_PW_MODE_CTL_MODE_SLEEP 2
+
+#define HIBMC_PANEL_PLL_CTRL 0x00005C
+#define HIBMC_CRT_PLL_CTRL 0x000060
+
+#define HIBMC_PLL_CTRL_BYPASS(x) ((x) << 18)
+#define HIBMC_PLL_CTRL_BYPASS_MASK 0x40000
+
+#define HIBMC_PLL_CTRL_POWER(x) ((x) << 17)
+#define HIBMC_PLL_CTRL_POWER_MASK 0x20000
+
+#define HIBMC_PLL_CTRL_INPUT(x) ((x) << 16)
+#define HIBMC_PLL_CTRL_INPUT_MASK 0x10000
+
+#define HIBMC_PLL_CTRL_POD(x) ((x) << 14)
+#define HIBMC_PLL_CTRL_POD_MASK 0xC000
+
+#define HIBMC_PLL_CTRL_OD(x) ((x) << 12)
+#define HIBMC_PLL_CTRL_OD_MASK 0x3000
+
+#define HIBMC_PLL_CTRL_N(x) ((x) << 8)
+#define HIBMC_PLL_CTRL_N_MASK 0xF00
+
+#define HIBMC_PLL_CTRL_M(x) ((x) << 0)
+#define HIBMC_PLL_CTRL_M_MASK 0xFF
+
+#define HIBMC_CRT_DISP_CTL 0x80200
+
+#define HIBMC_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25)
+#define HIBMC_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000
+
+#define HIBMC_CRTSELECT_CRT 1
+
+#define HIBMC_CRT_DISP_CTL_CLOCK_PHASE(x) ((x) << 14)
+#define HIBMC_CRT_DISP_CTL_CLOCK_PHASE_MASK 0x4000
+
+#define HIBMC_CRT_DISP_CTL_VSYNC_PHASE(x) ((x) << 13)
+#define HIBMC_CRT_DISP_CTL_VSYNC_PHASE_MASK 0x2000
+
+#define HIBMC_CRT_DISP_CTL_HSYNC_PHASE(x) ((x) << 12)
+#define HIBMC_CRT_DISP_CTL_HSYNC_PHASE_MASK 0x1000
+
+#define HIBMC_CRT_DISP_CTL_TIMING(x) ((x) << 8)
+#define HIBMC_CRT_DISP_CTL_TIMING_MASK 0x100
+
+#define HIBMC_CRT_DISP_CTL_PLANE(x) ((x) << 2)
+#define HIBMC_CRT_DISP_CTL_PLANE_MASK 4
+
+#define HIBMC_CRT_DISP_CTL_FORMAT(x) ((x) << 0)
+#define HIBMC_CRT_DISP_CTL_FORMAT_MASK 0x03
+
+#define HIBMC_CRT_FB_ADDRESS 0x080204
+
+#define HIBMC_CRT_FB_WIDTH 0x080208
+#define HIBMC_CRT_FB_WIDTH_WIDTH(x) ((x) << 16)
+#define HIBMC_CRT_FB_WIDTH_WIDTH_MASK 0x3FFF0000
+#define HIBMC_CRT_FB_WIDTH_OFFS(x) ((x) << 0)
+#define HIBMC_CRT_FB_WIDTH_OFFS_MASK 0x3FFF
+
+#define HIBMC_CRT_HORZ_TOTAL 0x08020C
+#define HIBMC_CRT_HORZ_TOTAL_TOTAL(x) ((x) << 16)
+#define HIBMC_CRT_HORZ_TOTAL_TOTAL_MASK 0xFFF0000
+
+#define HIBMC_CRT_HORZ_TOTAL_DISP_END(x) ((x) << 0)
+#define HIBMC_CRT_HORZ_TOTAL_DISP_END_MASK 0xFFF
+
+#define HIBMC_CRT_HORZ_SYNC 0x080210
+#define HIBMC_CRT_HORZ_SYNC_WIDTH(x) ((x) << 16)
+#define HIBMC_CRT_HORZ_SYNC_WIDTH_MASK 0xFF0000
+
+#define HIBMC_CRT_HORZ_SYNC_START(x) ((x) << 0)
+#define HIBMC_CRT_HORZ_SYNC_START_MASK 0xFFF
+
+#define HIBMC_CRT_VERT_TOTAL 0x080214
+#define HIBMC_CRT_VERT_TOTAL_TOTAL(x) ((x) << 16)
+#define HIBMC_CRT_VERT_TOTAL_TOTAL_MASK 0x7FFF0000
+
+#define HIBMC_CRT_VERT_TOTAL_DISP_END(x) ((x) << 0)
+#define HIBMC_CRT_VERT_TOTAL_DISP_END_MASK 0x7FF
+
+#define HIBMC_CRT_VERT_SYNC 0x080218
+#define HIBMC_CRT_VERT_SYNC_HEIGHT(x) ((x) << 16)
+#define HIBMC_CRT_VERT_SYNC_HEIGHT_MASK 0x3F0000
+
+#define HIBMC_CRT_VERT_SYNC_START(x) ((x) << 0)
+#define HIBMC_CRT_VERT_SYNC_START_MASK 0x7FF
+
+/* Auto Centering */
+#define HIBMC_CRT_AUTO_CENTERING_TL 0x080280
+#define HIBMC_CRT_AUTO_CENTERING_TL_TOP(x) ((x) << 16)
+#define HIBMC_CRT_AUTO_CENTERING_TL_TOP_MASK 0x7FF0000
+
+#define HIBMC_CRT_AUTO_CENTERING_TL_LEFT(x) ((x) << 0)
+#define HIBMC_CRT_AUTO_CENTERING_TL_LEFT_MASK 0x7FF
+
+#define HIBMC_CRT_AUTO_CENTERING_BR 0x080284
+#define HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM(x) ((x) << 16)
+#define HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM_MASK 0x7FF0000
+
+#define HIBMC_CRT_AUTO_CENTERING_BR_RIGHT(x) ((x) << 0)
+#define HIBMC_CRT_AUTO_CENTERING_BR_RIGHT_MASK 0x7FF
+
+/* register to control panel output */
+#define HIBMC_DISPLAY_CONTROL_HISILE 0x80288
+#define HIBMC_DISPLAY_CONTROL_FPVDDEN(x) ((x) << 0)
+#define HIBMC_DISPLAY_CONTROL_PANELDATE(x) ((x) << 1)
+#define HIBMC_DISPLAY_CONTROL_FPEN(x) ((x) << 2)
+#define HIBMC_DISPLAY_CONTROL_VBIASEN(x) ((x) << 3)
+
+#define HIBMC_RAW_INTERRUPT 0x80290
+#define HIBMC_RAW_INTERRUPT_VBLANK(x) ((x) << 2)
+#define HIBMC_RAW_INTERRUPT_VBLANK_MASK 0x4
+
+#define HIBMC_RAW_INTERRUPT_EN 0x80298
+#define HIBMC_RAW_INTERRUPT_EN_VBLANK(x) ((x) << 2)
+#define HIBMC_RAW_INTERRUPT_EN_VBLANK_MASK 0x4
+
+/* register and values for PLL control */
+#define CRT_PLL1_HS 0x802a8
+#define CRT_PLL1_HS_OUTER_BYPASS(x) ((x) << 30)
+#define CRT_PLL1_HS_INTER_BYPASS(x) ((x) << 29)
+#define CRT_PLL1_HS_POWERON(x) ((x) << 24)
+
+#define CRT_PLL1_HS_25MHZ 0x23d40f02
+#define CRT_PLL1_HS_40MHZ 0x23940801
+#define CRT_PLL1_HS_65MHZ 0x23940d01
+#define CRT_PLL1_HS_78MHZ 0x23540F82
+#define CRT_PLL1_HS_74MHZ 0x23941dc2
+#define CRT_PLL1_HS_80MHZ 0x23941001
+#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2
+#define CRT_PLL1_HS_108MHZ 0x23b41b01
+#define CRT_PLL1_HS_162MHZ 0x23480681
+#define CRT_PLL1_HS_148MHZ 0x23541dc2
+#define CRT_PLL1_HS_193MHZ 0x234807c1
+
+#define CRT_PLL2_HS 0x802ac
+#define CRT_PLL2_HS_25MHZ 0x206B851E
+#define CRT_PLL2_HS_40MHZ 0x30000000
+#define CRT_PLL2_HS_65MHZ 0x40000000
+#define CRT_PLL2_HS_78MHZ 0x50E147AE
+#define CRT_PLL2_HS_74MHZ 0x602B6AE7
+#define CRT_PLL2_HS_80MHZ 0x70000000
+#define CRT_PLL2_HS_108MHZ 0x80000000
+#define CRT_PLL2_HS_162MHZ 0xA0000000
+#define CRT_PLL2_HS_148MHZ 0xB0CCCCCD
+#define CRT_PLL2_HS_193MHZ 0xC0872B02
+
+#define HIBMC_FIELD(field, value) (field(value) & field##_MASK)
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
new file mode 100644
index 000000000000..12a18557c5fd
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -0,0 +1,140 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "hibmc_drm_drv.h"
+#include "hibmc_drm_regs.h"
+
+static int hibmc_connector_get_modes(struct drm_connector *connector)
+{
+ return drm_add_modes_noedid(connector, 800, 600);
+}
+
+static int hibmc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+hibmc_connector_best_encoder(struct drm_connector *connector)
+{
+ return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
+}
+
+static const struct drm_connector_helper_funcs
+ hibmc_connector_helper_funcs = {
+ .get_modes = hibmc_connector_get_modes,
+ .mode_valid = hibmc_connector_mode_valid,
+ .best_encoder = hibmc_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs hibmc_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_connector *
+hibmc_connector_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_connector *connector;
+ int ret;
+
+ connector = devm_kzalloc(dev->dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector) {
+ DRM_ERROR("failed to alloc memory when init connector\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = drm_connector_init(dev, connector,
+ &hibmc_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ if (ret) {
+ DRM_ERROR("failed to init connector: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ drm_connector_helper_add(connector,
+ &hibmc_connector_helper_funcs);
+
+ return connector;
+}
+
+static void hibmc_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ u32 reg;
+ struct drm_device *dev = encoder->dev;
+ struct hibmc_drm_private *priv = dev->dev_private;
+
+ reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
+ reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1);
+ reg |= HIBMC_DISPLAY_CONTROL_PANELDATE(1);
+ reg |= HIBMC_DISPLAY_CONTROL_FPEN(1);
+ reg |= HIBMC_DISPLAY_CONTROL_VBIASEN(1);
+ writel(reg, priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
+}
+
+static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
+ .mode_set = hibmc_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs hibmc_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int hibmc_vdac_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = priv->dev;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ connector = hibmc_connector_init(priv);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("failed to create connector: %ld\n",
+ PTR_ERR(connector));
+ return PTR_ERR(connector);
+ }
+
+ encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder) {
+ DRM_ERROR("failed to alloc memory when init encoder\n");
+ return -ENOMEM;
+ }
+
+ encoder->possible_crtcs = 0x1;
+ ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init encoder: %d\n", ret);
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
new file mode 100644
index 000000000000..e76abf61edae
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -0,0 +1,558 @@
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Rongrong Zou <zourongrong@huawei.com>
+ * Rongrong Zou <zourongrong@gmail.com>
+ * Jianhua Li <lijianhua@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <ttm/ttm_page_alloc.h>
+
+#include "hibmc_drm_drv.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static inline struct hibmc_drm_private *
+hibmc_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct hibmc_drm_private, bdev);
+}
+
+static int
+hibmc_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+hibmc_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
+{
+ int ret;
+
+ hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM;
+ hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global);
+ hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init;
+ hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release;
+ ret = drm_global_item_ref(&hibmc->mem_global_ref);
+ if (ret) {
+ DRM_ERROR("could not get ref on ttm global: %d\n", ret);
+ return ret;
+ }
+
+ hibmc->bo_global_ref.mem_glob =
+ hibmc->mem_global_ref.object;
+ hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
+ hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
+ hibmc->bo_global_ref.ref.init = &ttm_bo_global_init;
+ hibmc->bo_global_ref.ref.release = &ttm_bo_global_release;
+ ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
+ if (ret) {
+ DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
+ drm_global_item_unref(&hibmc->mem_global_ref);
+ return ret;
+ }
+ return 0;
+}
+
+static void
+hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
+{
+ drm_global_item_unref(&hibmc->bo_global_ref.ref);
+ drm_global_item_unref(&hibmc->mem_global_ref);
+ hibmc->mem_global_ref.release = NULL;
+}
+
+static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool hibmc_ttm_bo_is_hibmc_bo(struct ttm_buffer_object *bo)
+{
+ return bo->destroy == &hibmc_bo_ttm_destroy;
+}
+
+static int
+hibmc_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("unsupported memory type %u\n", type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void hibmc_ttm_placement(struct hibmc_bo *bo, int domain)
+{
+ u32 count = 0;
+ u32 i;
+
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[count++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[count++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+ if (!count)
+ bo->placements[count++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+
+ bo->placement.num_placement = count;
+ bo->placement.num_busy_placement = count;
+ for (i = 0; i < count; i++) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
+}
+
+static void
+hibmc_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct hibmc_bo *hibmcbo = hibmc_bo(bo);
+
+ if (!hibmc_ttm_bo_is_hibmc_bo(bo))
+ return;
+
+ hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_SYSTEM);
+ *pl = hibmcbo->placement;
+}
+
+static int hibmc_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ struct hibmc_bo *hibmcbo = hibmc_bo(bo);
+
+ return drm_vma_node_verify_access(&hibmcbo->gem.vma_node,
+ filp->private_data);
+}
+
+static int hibmc_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct hibmc_drm_private *hibmc = hibmc_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(hibmc->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void hibmc_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func hibmc_tt_backend_func = {
+ .destroy = &hibmc_ttm_backend_destroy,
+};
+
+static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ u32 page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+ int ret;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt) {
+ DRM_ERROR("failed to allocate ttm_tt\n");
+ return NULL;
+ }
+ tt->func = &hibmc_tt_backend_func;
+ ret = ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page);
+ if (ret) {
+ DRM_ERROR("failed to initialize ttm_tt: %d\n", ret);
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int hibmc_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver hibmc_bo_driver = {
+ .ttm_tt_create = hibmc_ttm_tt_create,
+ .ttm_tt_populate = hibmc_ttm_tt_populate,
+ .ttm_tt_unpopulate = hibmc_ttm_tt_unpopulate,
+ .init_mem_type = hibmc_bo_init_mem_type,
+ .evict_flags = hibmc_bo_evict_flags,
+ .move = NULL,
+ .verify_access = hibmc_bo_verify_access,
+ .io_mem_reserve = &hibmc_ttm_io_mem_reserve,
+ .io_mem_free = NULL,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
+};
+
+int hibmc_mm_init(struct hibmc_drm_private *hibmc)
+{
+ int ret;
+ struct drm_device *dev = hibmc->dev;
+ struct ttm_bo_device *bdev = &hibmc->bdev;
+
+ ret = hibmc_ttm_global_init(hibmc);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&hibmc->bdev,
+ hibmc->bo_global_ref.ref.object,
+ &hibmc_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ hibmc_ttm_global_release(hibmc);
+ DRM_ERROR("error initializing bo driver: %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ hibmc->fb_size >> PAGE_SHIFT);
+ if (ret) {
+ hibmc_ttm_global_release(hibmc);
+ DRM_ERROR("failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ hibmc->mm_inited = true;
+ return 0;
+}
+
+void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
+{
+ if (!hibmc->mm_inited)
+ return;
+
+ ttm_bo_device_release(&hibmc->bdev);
+ hibmc_ttm_global_release(hibmc);
+ hibmc->mm_inited = false;
+}
+
+static void hibmc_bo_unref(struct hibmc_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ *bo = NULL;
+}
+
+int hibmc_bo_create(struct drm_device *dev, int size, int align,
+ u32 flags, struct hibmc_bo **phibmcbo)
+{
+ struct hibmc_drm_private *hibmc = dev->dev_private;
+ struct hibmc_bo *hibmcbo;
+ size_t acc_size;
+ int ret;
+
+ hibmcbo = kzalloc(sizeof(*hibmcbo), GFP_KERNEL);
+ if (!hibmcbo) {
+ DRM_ERROR("failed to allocate hibmcbo\n");
+ return -ENOMEM;
+ }
+ ret = drm_gem_object_init(dev, &hibmcbo->gem, size);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm gem object: %d\n", ret);
+ kfree(hibmcbo);
+ return ret;
+ }
+
+ hibmcbo->bo.bdev = &hibmc->bdev;
+
+ hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&hibmc->bdev, size,
+ sizeof(struct hibmc_bo));
+
+ ret = ttm_bo_init(&hibmc->bdev, &hibmcbo->bo, size,
+ ttm_bo_type_device, &hibmcbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, NULL, hibmc_bo_ttm_destroy);
+ if (ret) {
+ hibmc_bo_unref(&hibmcbo);
+ DRM_ERROR("failed to initialize ttm_bo: %d\n", ret);
+ return ret;
+ }
+
+ *phibmcbo = hibmcbo;
+ return 0;
+}
+
+int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = bo->bo.offset;
+ return 0;
+ }
+
+ hibmc_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = bo->bo.offset;
+ return 0;
+}
+
+int hibmc_bo_unpin(struct hibmc_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret) {
+ DRM_ERROR("validate failed for unpin: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hibmc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct hibmc_drm_private *hibmc;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return -EINVAL;
+
+ file_priv = filp->private_data;
+ hibmc = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &hibmc->bdev);
+}
+
+int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct hibmc_bo *hibmcbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = PAGE_ALIGN(size);
+ if (size == 0) {
+ DRM_ERROR("error: zero size\n");
+ return -EINVAL;
+ }
+
+ ret = hibmc_bo_create(dev, size, 0, 0, &hibmcbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object: %d\n", ret);
+ return ret;
+ }
+ *obj = &hibmcbo->gem;
+ return 0;
+}
+
+int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gobj;
+ u32 handle;
+ int ret;
+
+ args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16);
+ args->size = args->pitch * args->height;
+
+ ret = hibmc_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create GEM object: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret) {
+ DRM_ERROR("failed to unreference GEM object: %d\n", ret);
+ return ret;
+ }
+
+ args->handle = handle;
+ return 0;
+}
+
+void hibmc_gem_free_object(struct drm_gem_object *obj)
+{
+ struct hibmc_bo *hibmcbo = gem_to_hibmc_bo(obj);
+
+ hibmc_bo_unref(&hibmcbo);
+}
+
+static u64 hibmc_bo_mmap_offset(struct hibmc_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ struct hibmc_bo *bo;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ bo = gem_to_hibmc_bo(obj);
+ *offset = hibmc_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
+}
+
+static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
+
+ drm_gem_object_unreference_unlocked(hibmc_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(hibmc_fb);
+}
+
+static const struct drm_framebuffer_funcs hibmc_fb_funcs = {
+ .destroy = hibmc_user_framebuffer_destroy,
+};
+
+struct hibmc_framebuffer *
+hibmc_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct hibmc_framebuffer *hibmc_fb;
+ int ret;
+
+ hibmc_fb = kzalloc(sizeof(*hibmc_fb), GFP_KERNEL);
+ if (!hibmc_fb) {
+ DRM_ERROR("failed to allocate hibmc_fb\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_helper_mode_fill_fb_struct(&hibmc_fb->fb, mode_cmd);
+ hibmc_fb->obj = obj;
+ ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ kfree(hibmc_fb);
+ return ERR_PTR(ret);
+ }
+
+ return hibmc_fb;
+}
+
+static struct drm_framebuffer *
+hibmc_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct hibmc_framebuffer *hibmc_fb;
+
+ DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
+ mode_cmd->width, mode_cmd->height,
+ (mode_cmd->pixel_format) & 0xff,
+ (mode_cmd->pixel_format >> 8) & 0xff,
+ (mode_cmd->pixel_format >> 16) & 0xff,
+ (mode_cmd->pixel_format >> 24) & 0xff);
+
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
+ if (IS_ERR(hibmc_fb)) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR((long)hibmc_fb);
+ }
+ return &hibmc_fb->fb;
+}
+
+const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = hibmc_user_framebuffer_create,
+};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 7e7a4d43d6b6..afc2b5d2d5f0 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -608,17 +608,16 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
- char *format_name;
+ struct drm_format_name_buf format_name;
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
u32 addr = (u32)obj->paddr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
ch + 1, y, in_h, stride, (u32)obj->paddr);
- format_name = drm_get_format_name(fb->pixel_format);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
- addr, fb->width, fb->height, fmt, format_name);
- kfree(format_name);
+ addr, fb->width, fb->height, fmt,
+ drm_get_format_name(fb->pixel_format, &format_name));
/* get reg offset */
reg_ctrl = RD_CH_CTRL(ch);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 90377a609c98..ebd5f4fe4c23 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -24,6 +24,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include "kirin_drm_drv.h"
@@ -151,9 +152,7 @@ static const struct file_operations kirin_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -260,14 +259,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np)
DRM_ERROR("no valid endpoint node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(endpoint);
remote = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
if (!remote) {
DRM_ERROR("no valid remote node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(remote);
if (!of_device_is_available(remote)) {
DRM_ERROR("not available for remote node\n");
@@ -294,7 +292,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
if (IS_ERR(remote))
return PTR_ERR(remote);
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of, remote);
+ of_node_put(remote);
return component_master_add_with_match(dev, &kirin_drm_ops, match);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 9798d400d817..86f47e190309 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -41,12 +41,15 @@ struct tda998x_priv {
struct i2c_client *hdmi;
struct mutex mutex;
u16 rev;
+ u8 cec_addr;
u8 current_page;
- int dpms;
- bool is_hdmi_sink;
+ bool is_on;
+ bool supports_infoframes;
+ bool sink_has_audio;
u8 vip_cntrl_0;
u8 vip_cntrl_1;
u8 vip_cntrl_2;
+ unsigned long tmds_clock;
struct tda998x_audio_params audio_params;
struct platform_device *audio_pdev;
@@ -105,6 +108,8 @@ struct tda998x_priv {
# define I2C_MASTER_DIS_FILT (1 << 1)
# define I2C_MASTER_APP_STRT_LAT (1 << 2)
#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
+# define FEAT_POWERDOWN_PREFILT BIT(0)
+# define FEAT_POWERDOWN_CSC BIT(1)
# define FEAT_POWERDOWN_SPDIF (1 << 3)
#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
@@ -370,35 +375,46 @@ struct tda998x_priv {
static void
cec_write(struct tda998x_priv *priv, u16 addr, u8 val)
{
- struct i2c_client *client = priv->cec;
u8 buf[] = {addr, val};
+ struct i2c_msg msg = {
+ .addr = priv->cec_addr,
+ .len = 2,
+ .buf = buf,
+ };
int ret;
- ret = i2c_master_send(client, buf, sizeof(buf));
+ ret = i2c_transfer(priv->hdmi->adapter, &msg, 1);
if (ret < 0)
- dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+ dev_err(&priv->hdmi->dev, "Error %d writing to cec:0x%x\n",
+ ret, addr);
}
static u8
cec_read(struct tda998x_priv *priv, u8 addr)
{
- struct i2c_client *client = priv->cec;
u8 val;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cec_addr,
+ .len = 1,
+ .buf = &addr,
+ }, {
+ .addr = priv->cec_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = &val,
+ },
+ };
int ret;
- ret = i2c_master_send(client, &addr, sizeof(addr));
- if (ret < 0)
- goto fail;
-
- ret = i2c_master_recv(client, &val, sizeof(val));
- if (ret < 0)
- goto fail;
+ ret = i2c_transfer(priv->hdmi->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0) {
+ dev_err(&priv->hdmi->dev, "Error %d reading from cec:0x%x\n",
+ ret, addr);
+ val = 0;
+ }
return val;
-
-fail:
- dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
- return 0;
}
static int
@@ -579,9 +595,9 @@ tda998x_reset(struct tda998x_priv *priv)
* HPD assertion: it needs a delay of 100ms to avoid timing out while
* trying to read EDID data.
*
- * However, tda998x_encoder_get_modes() may be called at any moment
+ * However, tda998x_connector_get_modes() may be called at any moment
* after tda998x_connector_detect() indicates that we are connected, so
- * we need to delay probing modes in tda998x_encoder_get_modes() after
+ * we need to delay probing modes in tda998x_connector_get_modes() after
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
@@ -630,28 +646,30 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
bool handled = false;
sta = cec_read(priv, REG_CEC_INTSTATUS);
- cec = cec_read(priv, REG_CEC_RXSHPDINT);
- lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
- flag0 = reg_read(priv, REG_INT_FLAGS_0);
- flag1 = reg_read(priv, REG_INT_FLAGS_1);
- flag2 = reg_read(priv, REG_INT_FLAGS_2);
- DRM_DEBUG_DRIVER(
- "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
- sta, cec, lvl, flag0, flag1, flag2);
-
- if (cec & CEC_RXSHPDINT_HPD) {
- if (lvl & CEC_RXSHPDLEV_HPD)
- tda998x_edid_delay_start(priv);
- else
- schedule_work(&priv->detect_work);
-
- handled = true;
- }
+ if (sta & CEC_INTSTATUS_HDMI) {
+ cec = cec_read(priv, REG_CEC_RXSHPDINT);
+ lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
+ flag0 = reg_read(priv, REG_INT_FLAGS_0);
+ flag1 = reg_read(priv, REG_INT_FLAGS_1);
+ flag2 = reg_read(priv, REG_INT_FLAGS_2);
+ DRM_DEBUG_DRIVER(
+ "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
+ sta, cec, lvl, flag0, flag1, flag2);
+
+ if (cec & CEC_RXSHPDINT_HPD) {
+ if (lvl & CEC_RXSHPDLEV_HPD)
+ tda998x_edid_delay_start(priv);
+ else
+ schedule_work(&priv->detect_work);
+
+ handled = true;
+ }
- if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
- priv->wq_edid_wait = 0;
- wake_up(&priv->wq_edid);
- handled = true;
+ if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
+ priv->wq_edid_wait = 0;
+ wake_up(&priv->wq_edid);
+ handled = true;
+ }
}
return IRQ_RETVAL(handled);
@@ -700,6 +718,8 @@ tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
}
+/* Audio support */
+
static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
{
if (on) {
@@ -713,8 +733,7 @@ static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
static int
tda998x_configure_audio(struct tda998x_priv *priv,
- struct tda998x_audio_params *params,
- unsigned mode_clock)
+ struct tda998x_audio_params *params)
{
u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
u32 n;
@@ -771,7 +790,7 @@ tda998x_configure_audio(struct tda998x_priv *priv,
* assume 100MHz requires larger divider.
*/
adiv = AUDIO_DIV_SERCLK_8;
- if (mode_clock > 100000)
+ if (priv->tmds_clock > 100000)
adiv++; /* AUDIO_DIV_SERCLK_16 */
/* S/PDIF asks for a larger divider */
@@ -819,58 +838,281 @@ tda998x_configure_audio(struct tda998x_priv *priv,
return tda998x_write_aif(priv, &params->cea);
}
-/* DRM encoder functions */
+static int tda998x_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+ int i, ret;
+ struct tda998x_audio_params audio = {
+ .sample_width = params->sample_width,
+ .sample_rate = params->sample_rate,
+ .cea = params->cea,
+ };
+
+ memcpy(audio.status, params->iec.status,
+ min(sizeof(audio.status), sizeof(params->iec.status)));
-static void tda998x_encoder_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
+ daifmt->bit_clk_master || daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_I2S)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_I2S;
+ break;
+ case HDMI_SPDIF:
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_SPDIF)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_SPDIF;
+ break;
+ default:
+ dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
+ return -EINVAL;
+ }
+
+ if (audio.config == 0) {
+ dev_err(dev, "%s: No audio configuration found\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->audio_mutex);
+ if (priv->supports_infoframes && priv->sink_has_audio)
+ ret = tda998x_configure_audio(priv, &audio);
+ else
+ ret = 0;
+
+ if (ret == 0)
+ priv->audio_params = audio;
+ mutex_unlock(&priv->audio_mutex);
+
+ return ret;
+}
+
+static void tda998x_audio_shutdown(struct device *dev, void *data)
{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
- priv->audio_params = p->audio_params;
+ mutex_lock(&priv->audio_mutex);
+
+ reg_write(priv, REG_ENA_AP, 0);
+
+ priv->audio_params.format = AFMT_UNUSED;
+
+ mutex_unlock(&priv->audio_mutex);
}
-static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
- /* we only care about on or off: */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
+ mutex_lock(&priv->audio_mutex);
- if (mode == priv->dpms)
- return;
+ tda998x_audio_mute(priv, enable);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- /* enable video ports, audio will be enabled later */
- reg_write(priv, REG_ENA_VP_0, 0xff);
- reg_write(priv, REG_ENA_VP_1, 0xff);
- reg_write(priv, REG_ENA_VP_2, 0xff);
- /* set muxing after enabling ports: */
- reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
- reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
- reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
- break;
- case DRM_MODE_DPMS_OFF:
- /* disable video ports */
- reg_write(priv, REG_ENA_VP_0, 0x00);
- reg_write(priv, REG_ENA_VP_1, 0x00);
- reg_write(priv, REG_ENA_VP_2, 0x00);
- break;
+ mutex_unlock(&priv->audio_mutex);
+ return 0;
+}
+
+static int tda998x_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->audio_mutex);
+ memcpy(buf, priv->connector.eld,
+ min(sizeof(priv->connector.eld), len));
+ mutex_unlock(&priv->audio_mutex);
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = tda998x_audio_hw_params,
+ .audio_shutdown = tda998x_audio_shutdown,
+ .digital_mute = tda998x_audio_digital_mute,
+ .get_eld = tda998x_audio_get_eld,
+};
+
+static int tda998x_audio_codec_init(struct tda998x_priv *priv,
+ struct device *dev)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 2,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
+ if (priv->audio_port[i].format == AFMT_I2S &&
+ priv->audio_port[i].config != 0)
+ codec_data.i2s = 1;
+ if (priv->audio_port[i].format == AFMT_SPDIF &&
+ priv->audio_port[i].config != 0)
+ codec_data.spdif = 1;
}
- priv->dpms = mode;
+ priv->audio_pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(priv->audio_pdev);
+}
+
+/* DRM connector functions */
+
+static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
+ return drm_atomic_helper_connector_dpms(connector, mode);
+ else
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static int tda998x_connector_fill_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ int ret;
+
+ mutex_lock(&priv->audio_mutex);
+ ret = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+
+ if (connector->edid_blob_ptr) {
+ struct edid *edid = (void *)connector->edid_blob_ptr->data;
+
+ priv->sink_has_audio = drm_detect_monitor_audio(edid);
+ } else {
+ priv->sink_has_audio = false;
+ }
+ mutex_unlock(&priv->audio_mutex);
+
+ return ret;
+}
+
+static enum drm_connector_status
+tda998x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
+
+ return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void tda998x_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tda998x_connector_funcs = {
+ .dpms = tda998x_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = tda998x_connector_fill_modes,
+ .detect = tda998x_connector_detect,
+ .destroy = tda998x_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
+{
+ struct tda998x_priv *priv = data;
+ u8 offset, segptr;
+ int ret, i;
+
+ offset = (blk & 1) ? 128 : 0;
+ segptr = blk / 2;
+
+ reg_write(priv, REG_DDC_ADDR, 0xa0);
+ reg_write(priv, REG_DDC_OFFS, offset);
+ reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
+ reg_write(priv, REG_DDC_SEGM, segptr);
+
+ /* enable reading EDID: */
+ priv->wq_edid_wait = 1;
+ reg_write(priv, REG_EDID_CTRL, 0x1);
+
+ /* flag must be cleared by sw: */
+ reg_write(priv, REG_EDID_CTRL, 0x0);
+
+ /* wait for block read to complete: */
+ if (priv->hdmi->irq) {
+ i = wait_event_timeout(priv->wq_edid,
+ !priv->wq_edid_wait,
+ msecs_to_jiffies(100));
+ if (i < 0) {
+ dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
+ return i;
+ }
+ } else {
+ for (i = 100; i > 0; i--) {
+ msleep(1);
+ ret = reg_read(priv, REG_INT_FLAGS_2);
+ if (ret < 0)
+ return ret;
+ if (ret & INT_FLAGS_2_EDID_BLK_RD)
+ break;
+ }
+ }
+
+ if (i == 0) {
+ dev_err(&priv->hdmi->dev, "read edid timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
+ if (ret != length) {
+ dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
+ blk, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tda998x_connector_get_modes(struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+ struct edid *edid;
+ int n;
+
+ /*
+ * If we get killed while waiting for the HPD timeout, return
+ * no modes found: we are not in a restartable path, so we
+ * can't handle signals gracefully.
+ */
+ if (tda998x_edid_delay_wait(priv))
+ return 0;
+
+ if (priv->rev == TDA19988)
+ reg_clear(priv, REG_TX4, TX4_PD_RAM);
+
+ edid = drm_do_get_edid(connector, read_edid_block, priv);
+
+ if (priv->rev == TDA19988)
+ reg_set(priv, REG_TX4, TX4_PD_RAM);
+
+ if (!edid) {
+ dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
+ return 0;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ kfree(edid);
+
+ return n;
}
static int tda998x_connector_mode_valid(struct drm_connector *connector,
@@ -888,6 +1130,80 @@ static int tda998x_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static struct drm_encoder *
+tda998x_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+
+ return &priv->encoder;
+}
+
+static
+const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
+ .get_modes = tda998x_connector_get_modes,
+ .mode_valid = tda998x_connector_mode_valid,
+ .best_encoder = tda998x_connector_best_encoder,
+};
+
+static int tda998x_connector_init(struct tda998x_priv *priv,
+ struct drm_device *drm)
+{
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ connector->interlace_allowed = 1;
+
+ if (priv->hdmi->irq)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ drm_connector_helper_add(connector, &tda998x_connector_helper_funcs);
+ ret = drm_connector_init(drm, connector, &tda998x_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret)
+ return ret;
+
+ drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+
+ return 0;
+}
+
+/* DRM encoder functions */
+
+static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ bool on;
+
+ /* we only care about on or off: */
+ on = mode == DRM_MODE_DPMS_ON;
+
+ if (on == priv->is_on)
+ return;
+
+ if (on) {
+ /* enable video ports, audio will be enabled later */
+ reg_write(priv, REG_ENA_VP_0, 0xff);
+ reg_write(priv, REG_ENA_VP_1, 0xff);
+ reg_write(priv, REG_ENA_VP_2, 0xff);
+ /* set muxing after enabling ports: */
+ reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+ reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+ reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
+
+ priv->is_on = true;
+ } else {
+ /* disable video ports */
+ reg_write(priv, REG_ENA_VP_0, 0x00);
+ reg_write(priv, REG_ENA_VP_1, 0x00);
+ reg_write(priv, REG_ENA_VP_2, 0x00);
+
+ priv->is_on = false;
+ }
+}
+
static void
tda998x_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -971,6 +1287,8 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
div = 3;
}
+ mutex_lock(&priv->audio_mutex);
+
/* mute the audio FIFO: */
reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -982,6 +1300,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* no pre-filter or interpolator: */
reg_write(priv, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
HVF_CNTRL_0_INTPOL(0));
+ reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_PREFILT);
reg_write(priv, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
reg_write(priv, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
VIP_CNTRL_4_BLC(0));
@@ -1004,6 +1323,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* set color matrix bypass flag: */
reg_write(priv, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP |
MAT_CONTRL_MAT_SC(1));
+ reg_set(priv, REG_FEAT_POWERDOWN, FEAT_POWERDOWN_CSC);
/* set BIAS tmds value: */
reg_write(priv, REG_ANA_GENERAL, 0x09);
@@ -1064,8 +1384,22 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
/* must be last register set: */
reg_write(priv, REG_TBG_CNTRL_0, 0);
- /* Only setup the info frames if the sink is HDMI */
- if (priv->is_hdmi_sink) {
+ priv->tmds_clock = adjusted_mode->clock;
+
+ /* CEA-861B section 6 says that:
+ * CEA version 1 (CEA-861) has no support for infoframes.
+ * CEA version 2 (CEA-861A) supports version 1 AVI infoframes,
+ * and optional basic audio.
+ * CEA version 3 (CEA-861B) supports version 1 and 2 AVI infoframes,
+ * and optional digital audio, with audio infoframes.
+ *
+ * Since we only support generation of version 2 AVI infoframes,
+ * ignore CEA version 2 and below (iow, behave as if we're a
+ * CEA-861 source.)
+ */
+ priv->supports_infoframes = priv->connector.display_info.cea_rev >= 3;
+
+ if (priv->supports_infoframes) {
/* We need to turn HDMI HDCP stuff on to get audio through */
reg &= ~TBG_CNTRL_1_DWIN_DIS;
reg_write(priv, REG_TBG_CNTRL_1, reg);
@@ -1074,127 +1408,12 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
tda998x_write_avi(priv, adjusted_mode);
- if (priv->audio_params.format != AFMT_UNUSED) {
- mutex_lock(&priv->audio_mutex);
- tda998x_configure_audio(priv,
- &priv->audio_params,
- adjusted_mode->clock);
- mutex_unlock(&priv->audio_mutex);
- }
- }
-}
-
-static enum drm_connector_status
-tda998x_connector_detect(struct drm_connector *connector, bool force)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
-
- return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
-{
- struct tda998x_priv *priv = data;
- u8 offset, segptr;
- int ret, i;
-
- offset = (blk & 1) ? 128 : 0;
- segptr = blk / 2;
-
- reg_write(priv, REG_DDC_ADDR, 0xa0);
- reg_write(priv, REG_DDC_OFFS, offset);
- reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
- reg_write(priv, REG_DDC_SEGM, segptr);
-
- /* enable reading EDID: */
- priv->wq_edid_wait = 1;
- reg_write(priv, REG_EDID_CTRL, 0x1);
-
- /* flag must be cleared by sw: */
- reg_write(priv, REG_EDID_CTRL, 0x0);
-
- /* wait for block read to complete: */
- if (priv->hdmi->irq) {
- i = wait_event_timeout(priv->wq_edid,
- !priv->wq_edid_wait,
- msecs_to_jiffies(100));
- if (i < 0) {
- dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
- return i;
- }
- } else {
- for (i = 100; i > 0; i--) {
- msleep(1);
- ret = reg_read(priv, REG_INT_FLAGS_2);
- if (ret < 0)
- return ret;
- if (ret & INT_FLAGS_2_EDID_BLK_RD)
- break;
- }
- }
-
- if (i == 0) {
- dev_err(&priv->hdmi->dev, "read edid timeout\n");
- return -ETIMEDOUT;
+ if (priv->audio_params.format != AFMT_UNUSED &&
+ priv->sink_has_audio)
+ tda998x_configure_audio(priv, &priv->audio_params);
}
- ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
- if (ret != length) {
- dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
- blk, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int tda998x_connector_get_modes(struct drm_connector *connector)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- struct edid *edid;
- int n;
-
- /*
- * If we get killed while waiting for the HPD timeout, return
- * no modes found: we are not in a restartable path, so we
- * can't handle signals gracefully.
- */
- if (tda998x_edid_delay_wait(priv))
- return 0;
-
- if (priv->rev == TDA19988)
- reg_clear(priv, REG_TX4, TX4_PD_RAM);
-
- edid = drm_do_get_edid(connector, read_edid_block, priv);
-
- if (priv->rev == TDA19988)
- reg_set(priv, REG_TX4, TX4_PD_RAM);
-
- if (!edid) {
- dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
- return 0;
- }
-
- drm_mode_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
- priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
- drm_edid_to_eld(connector, edid);
-
- kfree(edid);
-
- return n;
-}
-
-static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
- struct drm_connector *connector)
-{
- if (priv->hdmi->irq)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ mutex_unlock(&priv->audio_mutex);
}
static void tda998x_destroy(struct tda998x_priv *priv)
@@ -1215,145 +1434,6 @@ static void tda998x_destroy(struct tda998x_priv *priv)
i2c_unregister_device(priv->cec);
}
-static int tda998x_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
- int i, ret;
- struct tda998x_audio_params audio = {
- .sample_width = params->sample_width,
- .sample_rate = params->sample_rate,
- .cea = params->cea,
- };
-
- if (!priv->encoder.crtc)
- return -ENODEV;
-
- memcpy(audio.status, params->iec.status,
- min(sizeof(audio.status), sizeof(params->iec.status)));
-
- switch (daifmt->fmt) {
- case HDMI_I2S:
- if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
- daifmt->bit_clk_master || daifmt->frame_clk_master) {
- dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
- daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
- return -EINVAL;
- }
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
- if (priv->audio_port[i].format == AFMT_I2S)
- audio.config = priv->audio_port[i].config;
- audio.format = AFMT_I2S;
- break;
- case HDMI_SPDIF:
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
- if (priv->audio_port[i].format == AFMT_SPDIF)
- audio.config = priv->audio_port[i].config;
- audio.format = AFMT_SPDIF;
- break;
- default:
- dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
- return -EINVAL;
- }
-
- if (audio.config == 0) {
- dev_err(dev, "%s: No audio configutation found\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&priv->audio_mutex);
- ret = tda998x_configure_audio(priv,
- &audio,
- priv->encoder.crtc->hwmode.clock);
-
- if (ret == 0)
- priv->audio_params = audio;
- mutex_unlock(&priv->audio_mutex);
-
- return ret;
-}
-
-static void tda998x_audio_shutdown(struct device *dev, void *data)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
-
- mutex_lock(&priv->audio_mutex);
-
- reg_write(priv, REG_ENA_AP, 0);
-
- priv->audio_params.format = AFMT_UNUSED;
-
- mutex_unlock(&priv->audio_mutex);
-}
-
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
-
- mutex_lock(&priv->audio_mutex);
-
- tda998x_audio_mute(priv, enable);
-
- mutex_unlock(&priv->audio_mutex);
- return 0;
-}
-
-static int tda998x_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct tda998x_priv *priv = dev_get_drvdata(dev);
- struct drm_mode_config *config = &priv->encoder.dev->mode_config;
- struct drm_connector *connector;
- int ret = -ENODEV;
-
- mutex_lock(&config->mutex);
- list_for_each_entry(connector, &config->connector_list, head) {
- if (&priv->encoder == connector->encoder) {
- memcpy(buf, connector->eld,
- min(sizeof(connector->eld), len));
- ret = 0;
- }
- }
- mutex_unlock(&config->mutex);
-
- return ret;
-}
-
-static const struct hdmi_codec_ops audio_codec_ops = {
- .hw_params = tda998x_audio_hw_params,
- .audio_shutdown = tda998x_audio_shutdown,
- .digital_mute = tda998x_audio_digital_mute,
- .get_eld = tda998x_audio_get_eld,
-};
-
-static int tda998x_audio_codec_init(struct tda998x_priv *priv,
- struct device *dev)
-{
- struct hdmi_codec_pdata codec_data = {
- .ops = &audio_codec_ops,
- .max_i2s_channels = 2,
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
- if (priv->audio_port[i].format == AFMT_I2S &&
- priv->audio_port[i].config != 0)
- codec_data.i2s = 1;
- if (priv->audio_port[i].format == AFMT_SPDIF &&
- priv->audio_port[i].config != 0)
- codec_data.spdif = 1;
- }
-
- priv->audio_pdev = platform_device_register_data(
- dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(priv->audio_pdev);
-}
-
/* I2C driver functions */
static int tda998x_get_audio_ports(struct tda998x_priv *priv,
@@ -1403,22 +1483,21 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
- unsigned short cec_addr;
+
+ mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+ /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+ priv->cec_addr = 0x34 + (client->addr & 0x03);
priv->current_page = 0xff;
priv->hdmi = client;
- /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
- cec_addr = 0x34 + (client->addr & 0x03);
- priv->cec = i2c_new_dummy(client->adapter, cec_addr);
+ priv->cec = i2c_new_dummy(client->adapter, priv->cec_addr);
if (!priv->cec)
return -ENODEV;
- priv->dpms = DRM_MODE_DPMS_OFF;
-
mutex_init(&priv->mutex); /* protect the page access */
init_waitqueue_head(&priv->edid_delay_waitq);
setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
@@ -1478,7 +1557,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* initialize the optional IRQ */
if (client->irq) {
- int irqf_trigger;
+ unsigned long irq_flags;
/* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
@@ -1488,11 +1567,11 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
reg_read(priv, REG_INT_FLAGS_1);
reg_read(priv, REG_INT_FLAGS_2);
- irqf_trigger =
+ irq_flags =
irqd_get_trigger_type(irq_get_irq_data(client->irq));
+ irq_flags |= IRQF_SHARED | IRQF_ONESHOT;
ret = request_threaded_irq(client->irq, NULL,
- tda998x_irq_thread,
- irqf_trigger | IRQF_ONESHOT,
+ tda998x_irq_thread, irq_flags,
"tda998x", priv);
if (ret) {
dev_err(&client->dev,
@@ -1519,8 +1598,6 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->vip_cntrl_2 = video;
}
- mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
-
ret = tda998x_get_audio_ports(priv, np);
if (ret)
goto fail;
@@ -1567,45 +1644,25 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
.destroy = tda998x_encoder_destroy,
};
-static struct drm_encoder *
-tda998x_connector_best_encoder(struct drm_connector *connector)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-
- return &priv->encoder;
-}
-
-static
-const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
- .get_modes = tda998x_connector_get_modes,
- .mode_valid = tda998x_connector_mode_valid,
- .best_encoder = tda998x_connector_best_encoder,
-};
-
-static void tda998x_connector_destroy(struct drm_connector *connector)
+static void tda998x_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
-{
- if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
- return drm_atomic_helper_connector_dpms(connector, mode);
- else
- return drm_helper_connector_dpms(connector, mode);
+ priv->audio_params = p->audio_params;
}
-static const struct drm_connector_funcs tda998x_connector_funcs = {
- .dpms = tda998x_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = tda998x_connector_detect,
- .destroy = tda998x_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int tda998x_bind(struct device *dev, struct device *master, void *data)
{
struct tda998x_encoder_params *params = dev->platform_data;
@@ -1630,7 +1687,6 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
crtcs = 1 << 0;
}
- priv->connector.interlace_allowed = 1;
priv->encoder.possible_crtcs = crtcs;
ret = tda998x_create(client, priv);
@@ -1638,9 +1694,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
return ret;
if (!dev->of_node && params)
- tda998x_encoder_set_config(priv, params);
-
- tda998x_encoder_set_polling(priv, &priv->connector);
+ tda998x_set_config(priv, params);
drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
@@ -1648,24 +1702,12 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_encoder;
- drm_connector_helper_add(&priv->connector,
- &tda998x_connector_helper_funcs);
- ret = drm_connector_init(drm, &priv->connector,
- &tda998x_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ ret = tda998x_connector_init(priv, drm);
if (ret)
goto err_connector;
- ret = drm_connector_register(&priv->connector);
- if (ret)
- goto err_sysfs;
-
- drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
-
return 0;
-err_sysfs:
- drm_connector_cleanup(&priv->connector);
err_connector:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
@@ -1678,7 +1720,6 @@ static void tda998x_unbind(struct device *dev, struct device *master,
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
- drm_connector_unregister(&priv->connector);
drm_connector_cleanup(&priv->connector);
drm_encoder_cleanup(&priv->encoder);
tda998x_destroy(priv);
@@ -1692,6 +1733,10 @@ static const struct component_ops tda998x_ops = {
static int
tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_warn(&client->dev, "adapter does not support I2C\n");
+ return -EIO;
+ }
return component_add(&client->dev, &tda998x_ops);
}
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index d91856779beb..ab4e6cbe1f8b 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -113,9 +113,7 @@ static const struct file_operations i810_buffer_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 0be55dc1ef4b..02504a7cfaf2 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -49,9 +49,7 @@ static const struct file_operations i810_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 7769e469118f..5ddde7349fbd 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -11,6 +11,7 @@ config DRM_I915
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
+ select RELAY
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
@@ -24,28 +25,59 @@ config DRM_I915
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
- If M is selected, the module will be called i915. AGP support
- is required for this driver to work. This driver is used by
- the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
- replaces the older i830 module that supported a subset of the
- hardware in older X.org releases.
+
+ This driver is used by the Intel driver in X.org 6.8 and
+ XFree86 4.4 and above. It replaces the older i830 module that
+ supported a subset of the hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
-config DRM_I915_PRELIMINARY_HW_SUPPORT
- bool "Enable preliminary support for prerelease Intel hardware by default"
+ If "M" is selected, the module will be called i915.
+
+config DRM_I915_ALPHA_SUPPORT
+ bool "Enable alpha quality support for new Intel hardware by default"
depends on DRM_I915
default n
help
- Choose this option if you have prerelease Intel hardware and want the
- i915 driver to support it by default. You can enable such support at
- runtime with the module option i915.preliminary_hw_support=1; this
- option changes the default for that module option.
+ Choose this option if you have new Intel hardware and want to enable
+ the alpha quality i915 driver support for the hardware in this kernel
+ version. You can also enable the support at runtime using the module
+ parameter i915.alpha_support=1; this option changes the default for
+ that module parameter.
+
+ It is recommended to upgrade to a kernel version with proper support
+ as soon as it is available. Generally fixes for platforms with alpha
+ support are not backported to older kernels.
If in doubt, say "N".
+config DRM_I915_CAPTURE_ERROR
+ bool "Enable capturing GPU state following a hang"
+ depends on DRM_I915
+ default y
+ help
+ This option enables capturing the GPU state when a hang is detected.
+ This information is vital for triaging hangs and assists in debugging.
+ Please report any hang to
+ https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+ for triaging.
+
+ If in doubt, say "Y".
+
+config DRM_I915_COMPRESS_ERROR
+ bool "Compress GPU error state"
+ depends on DRM_I915_CAPTURE_ERROR
+ select ZLIB_DEFLATE
+ default y
+ help
+ This option selects ZLIB_DEFLATE if it isn't already
+ selected and causes any error state captured upon a GPU hang
+ to be compressed using zlib.
+
+ If in doubt, say "Y".
+
config DRM_I915_USERPTR
bool "Always enable userptr support"
depends on DRM_I915
@@ -60,6 +92,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
+ depends on 64BIT
default n
help
Choose this option if you want to enable Intel GVT-g graphics
@@ -79,6 +112,15 @@ config DRM_I915_GVT
If in doubt, say "N".
+config DRM_I915_GVT_KVMGT
+ tristate "Enable KVM/VFIO support for Intel GVT-g"
+ depends on DRM_I915_GVT
+ depends on KVM
+ default n
+ help
+ Choose this option if you want to enable KVMGT support for
+ Intel GVT-g.
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index cee87bfd10c4..51ba630a134b 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -21,6 +21,7 @@ config DRM_I915_DEBUG
select PREEMPT_COUNT
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
+ select DRM_DEBUG_MM if DRM=y
default n
help
Choose this option to turn on extra driver debugging that may affect
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a998c2bce70a..3dea46af9fe6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,19 +33,22 @@ i915-y += i915_cmd_parser.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
- i915_gem_fence.o \
+ i915_gem_fence_reg.o \
i915_gem_gtt.o \
+ i915_gem_internal.o \
i915_gem.o \
i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_gem_timeline.o \
i915_gem_userptr.o \
- i915_gpu_error.o \
i915_trace_points.o \
+ i915_vma.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
+ intel_hangcheck.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
@@ -102,11 +105,15 @@ i915-y += dvo_ch7017.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
+ intel_lspcon.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
intel_tv.o
+# Post-mortem debug and GPU hang state capture
+i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index d0f21a6ad60d..8a46a7f31d53 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,5 +1,10 @@
GVT_DIR := gvt
-GVT_SOURCE := gvt.o
+GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
+ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
+ execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
-i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
+i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+
+CFLAGS_kvmgt.o := -Wno-unused-function
+obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
new file mode 100644
index 000000000000..0d41ebc4aea6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Pei Zhang <pei.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ u32 alloc_flag, search_flag;
+ u64 start, end, size;
+ struct drm_mm_node *node;
+ int retried = 0;
+ int ret;
+
+ if (high_gm) {
+ search_flag = DRM_MM_SEARCH_BELOW;
+ alloc_flag = DRM_MM_CREATE_TOP;
+ node = &vgpu->gm.high_gm_node;
+ size = vgpu_hidden_sz(vgpu);
+ start = gvt_hidden_gmadr_base(gvt);
+ end = gvt_hidden_gmadr_end(gvt);
+ } else {
+ search_flag = DRM_MM_SEARCH_DEFAULT;
+ alloc_flag = DRM_MM_CREATE_DEFAULT;
+ node = &vgpu->gm.low_gm_node;
+ size = vgpu_aperture_sz(vgpu);
+ start = gvt_aperture_gmadr_base(gvt);
+ end = gvt_aperture_gmadr_end(gvt);
+ }
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+search_again:
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
+ node, size, 4096, 0,
+ start, end, search_flag,
+ alloc_flag);
+ if (ret) {
+ ret = i915_gem_evict_something(&dev_priv->ggtt.base,
+ size, 4096, 0, start, end, 0);
+ if (ret == 0 && ++retried < 3)
+ goto search_again;
+
+ gvt_err("fail to alloc %s gm space from host, retried %d\n",
+ high_gm ? "high" : "low", retried);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ ret = alloc_gm(vgpu, false);
+ if (ret)
+ return ret;
+
+ ret = alloc_gm(vgpu, true);
+ if (ret)
+ goto out_free_aperture;
+
+ gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
+ vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
+
+ gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
+ vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
+
+ return 0;
+out_free_aperture:
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ drm_mm_remove_node(&vgpu->gm.low_gm_node);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static void free_vgpu_gm(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ drm_mm_remove_node(&vgpu->gm.low_gm_node);
+ drm_mm_remove_node(&vgpu->gm.high_gm_node);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+/**
+ * intel_vgpu_write_fence - write fence registers owned by a vGPU
+ * @vgpu: vGPU instance
+ * @fence: vGPU fence register number
+ * @value: Fence register value to be written
+ *
+ * This function is used to write fence registers owned by a vGPU. The vGPU
+ * fence register number will be translated into HW fence register number.
+ *
+ */
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+ u32 fence, u64 value)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ i915_reg_t fence_reg_lo, fence_reg_hi;
+
+ assert_rpm_wakelock_held(dev_priv);
+
+ if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+ return;
+
+ reg = vgpu->fence.regs[fence];
+ if (WARN_ON(!reg))
+ return;
+
+ fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
+ fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
+
+ I915_WRITE(fence_reg_lo, 0);
+ POSTING_READ(fence_reg_lo);
+
+ I915_WRITE(fence_reg_hi, upper_32_bits(value));
+ I915_WRITE(fence_reg_lo, lower_32_bits(value));
+ POSTING_READ(fence_reg_lo);
+}
+
+static void free_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ u32 i;
+
+ if (WARN_ON(!vgpu_fence_sz(vgpu)))
+ return;
+
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = vgpu->fence.regs[i];
+ intel_vgpu_write_fence(vgpu, i, 0);
+ list_add_tail(&reg->link,
+ &dev_priv->mm.fence_list);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ int i;
+ struct list_head *pos, *q;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* Request fences from host */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i = 0;
+ list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
+ reg = list_entry(pos, struct drm_i915_fence_reg, link);
+ if (reg->pin_count || reg->vma)
+ continue;
+ list_del(pos);
+ vgpu->fence.regs[i] = reg;
+ intel_vgpu_write_fence(vgpu, i, 0);
+ if (++i == vgpu_fence_sz(vgpu))
+ break;
+ }
+ if (i != vgpu_fence_sz(vgpu))
+ goto out_free_fence;
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ return 0;
+out_free_fence:
+ /* Return fences to host, if fail */
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = vgpu->fence.regs[i];
+ if (!reg)
+ continue;
+ list_add_tail(&reg->link,
+ &dev_priv->mm.fence_list);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ return -ENOSPC;
+}
+
+static void free_resource(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
+ gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
+ gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
+}
+
+static int alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ unsigned long request, avail, max, taken;
+ const char *item;
+
+ if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+ gvt_err("Invalid vGPU creation params\n");
+ return -EINVAL;
+ }
+
+ item = "low GM space";
+ max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ taken = gvt->gm.vgpu_allocated_low_gm_size;
+ avail = max - taken;
+ request = MB_TO_BYTES(param->low_gm_sz);
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_aperture_sz(vgpu) = request;
+
+ item = "high GM space";
+ max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+ taken = gvt->gm.vgpu_allocated_high_gm_size;
+ avail = max - taken;
+ request = MB_TO_BYTES(param->high_gm_sz);
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_hidden_sz(vgpu) = request;
+
+ item = "fence";
+ max = gvt_fence_sz(gvt) - HOST_FENCE;
+ taken = gvt->fence.vgpu_allocated_fence_num;
+ avail = max - taken;
+ request = param->fence_sz;
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_fence_sz(vgpu) = request;
+
+ gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
+ gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
+ gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+ return 0;
+
+no_enough_resource:
+ gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
+ gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
+ vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+ BYTES_TO_MB(max), BYTES_TO_MB(taken));
+ return -ENOSPC;
+}
+
+/**
+ * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to free the HW resource owned by a vGPU.
+ *
+ */
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
+{
+ free_vgpu_gm(vgpu);
+ free_vgpu_fence(vgpu);
+ free_resource(vgpu);
+}
+
+/**
+ * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * @vgpu: vGPU
+ * @param: vGPU creation params
+ *
+ * This function is used to allocate HW resource for a vGPU. User specifies
+ * the resource configuration through the creation params.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ int ret;
+
+ ret = alloc_resource(vgpu, param);
+ if (ret)
+ return ret;
+
+ ret = alloc_vgpu_gm(vgpu);
+ if (ret)
+ goto out_free_resource;
+
+ ret = alloc_vgpu_fence(vgpu);
+ if (ret)
+ goto out_free_vgpu_gm;
+
+ return 0;
+
+out_free_vgpu_gm:
+ free_vgpu_gm(vgpu);
+out_free_resource:
+ free_resource(vgpu);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
new file mode 100644
index 000000000000..db516382a4d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+enum {
+ INTEL_GVT_PCI_BAR_GTTMMIO = 0,
+ INTEL_GVT_PCI_BAR_APERTURE,
+ INTEL_GVT_PCI_BAR_PIO,
+ INTEL_GVT_PCI_BAR_MAX,
+};
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 4))
+ return -EINVAL;
+
+ if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
+ return -EINVAL;
+
+ memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
+ return 0;
+}
+
+static int map_aperture(struct intel_vgpu *vgpu, bool map)
+{
+ u64 first_gfn, first_mfn;
+ u64 val;
+ int ret;
+
+ if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
+ return 0;
+
+ val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+ else
+ val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+
+ first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
+ first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
+ first_mfn,
+ vgpu_aperture_sz(vgpu) >>
+ PAGE_SHIFT, map);
+ if (ret)
+ return ret;
+
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
+ return 0;
+}
+
+static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
+{
+ u64 start, end;
+ u64 val;
+ int ret;
+
+ if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
+ return 0;
+
+ val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+ else
+ start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+
+ start &= ~GENMASK(3, 0);
+ end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
+
+ ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
+ if (ret)
+ return ret;
+
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
+ return 0;
+}
+
+static int emulate_pci_command_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u8 old = vgpu_cfg_space(vgpu)[offset];
+ u8 new = *(u8 *)p_data;
+ u8 changed = old ^ new;
+ int ret;
+
+ if (!(changed & PCI_COMMAND_MEMORY))
+ return 0;
+
+ if (old & PCI_COMMAND_MEMORY) {
+ ret = trap_gttmmio(vgpu, false);
+ if (ret)
+ return ret;
+ ret = map_aperture(vgpu, false);
+ if (ret)
+ return ret;
+ } else {
+ ret = trap_gttmmio(vgpu, true);
+ if (ret)
+ return ret;
+ ret = map_aperture(vgpu, true);
+ if (ret)
+ return ret;
+ }
+
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ return 0;
+}
+
+static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int bar_index =
+ (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
+ u32 new = *(u32 *)(p_data);
+ bool lo = IS_ALIGNED(offset, 8);
+ u64 size;
+ int ret = 0;
+ bool mmio_enabled =
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
+
+ if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
+ return -EINVAL;
+
+ if (new == 0xffffffff) {
+ /*
+ * Power-up software can determine how much address
+ * space the device requires by writing a value of
+ * all 1's to the register and then reading the value
+ * back. The device will return 0's in all don't-care
+ * address bits.
+ */
+ size = vgpu->cfg_space.bar[bar_index].size;
+ if (lo) {
+ new = rounddown(new, size);
+ } else {
+ u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
+ /* for 32bit mode bar it returns all-0 in upper 32
+ * bit, for 64bit mode bar it will calculate the
+ * size with lower 32bit and return the corresponding
+ * value
+ */
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ new &= (~(size-1)) >> 32;
+ else
+ new = 0;
+ }
+ /*
+ * Unmapp & untrap the BAR, since guest hasn't configured a
+ * valid GPA
+ */
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, false);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, false);
+ break;
+ }
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ } else {
+ /*
+ * Unmapp & untrap the old BAR first, since guest has
+ * re-configured the BAR
+ */
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, false);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, false);
+ break;
+ }
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ /* Track the new BAR */
+ if (mmio_enabled) {
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, true);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, true);
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int ret;
+
+ if (WARN_ON(bytes > 4))
+ return -EINVAL;
+
+ if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
+ return -EINVAL;
+
+ /* First check if it's PCI_COMMAND */
+ if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
+ if (WARN_ON(bytes > 2))
+ return -EINVAL;
+ return emulate_pci_command_write(vgpu, offset, p_data, bytes);
+ }
+
+ switch (rounddown(offset, 4)) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_1:
+ case PCI_BASE_ADDRESS_2:
+ case PCI_BASE_ADDRESS_3:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
+
+ case INTEL_GVT_PCI_SWSCI:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
+ if (ret)
+ return ret;
+ break;
+
+ case INTEL_GVT_PCI_OPREGION:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
+ if (ret)
+ return ret;
+
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ break;
+ default:
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
new file mode 100644
index 000000000000..d26a092c70e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -0,0 +1,2831 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+#define INVALID_OP (~0U)
+
+#define OP_LEN_MI 9
+#define OP_LEN_2D 10
+#define OP_LEN_3D_MEDIA 16
+#define OP_LEN_MFX_VC 16
+#define OP_LEN_VEBOX 16
+
+#define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
+
+struct sub_op_bits {
+ int hi;
+ int low;
+};
+struct decode_info {
+ char *name;
+ int op_len;
+ int nr_sub_op;
+ struct sub_op_bits *sub_op;
+};
+
+#define MAX_CMD_BUDGET 0x7fffffff
+#define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
+#define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
+#define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
+
+#define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
+#define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
+#define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
+
+/* Render Command Map */
+
+/* MI_* command Opcode (28:23) */
+#define OP_MI_NOOP 0x0
+#define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
+#define OP_MI_USER_INTERRUPT 0x2
+#define OP_MI_WAIT_FOR_EVENT 0x3
+#define OP_MI_FLUSH 0x4
+#define OP_MI_ARB_CHECK 0x5
+#define OP_MI_RS_CONTROL 0x6 /* HSW+ */
+#define OP_MI_REPORT_HEAD 0x7
+#define OP_MI_ARB_ON_OFF 0x8
+#define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
+#define OP_MI_BATCH_BUFFER_END 0xA
+#define OP_MI_SUSPEND_FLUSH 0xB
+#define OP_MI_PREDICATE 0xC /* IVB+ */
+#define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
+#define OP_MI_SET_APPID 0xE /* IVB+ */
+#define OP_MI_RS_CONTEXT 0xF /* HSW+ */
+#define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
+#define OP_MI_DISPLAY_FLIP 0x14
+#define OP_MI_SEMAPHORE_MBOX 0x16
+#define OP_MI_SET_CONTEXT 0x18
+#define OP_MI_MATH 0x1A
+#define OP_MI_URB_CLEAR 0x19
+#define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
+#define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
+
+#define OP_MI_STORE_DATA_IMM 0x20
+#define OP_MI_STORE_DATA_INDEX 0x21
+#define OP_MI_LOAD_REGISTER_IMM 0x22
+#define OP_MI_UPDATE_GTT 0x23
+#define OP_MI_STORE_REGISTER_MEM 0x24
+#define OP_MI_FLUSH_DW 0x26
+#define OP_MI_CLFLUSH 0x27
+#define OP_MI_REPORT_PERF_COUNT 0x28
+#define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
+#define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
+#define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
+#define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
+#define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
+#define OP_MI_2E 0x2E /* BDW+ */
+#define OP_MI_2F 0x2F /* BDW+ */
+#define OP_MI_BATCH_BUFFER_START 0x31
+
+/* Bit definition for dword 0 */
+#define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
+
+#define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+#define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
+#define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
+
+/* 2D command: Opcode (28:22) */
+#define OP_2D(x) ((2<<7) | x)
+
+#define OP_XY_SETUP_BLT OP_2D(0x1)
+#define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
+#define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
+#define OP_XY_PIXEL_BLT OP_2D(0x24)
+#define OP_XY_SCANLINES_BLT OP_2D(0x25)
+#define OP_XY_TEXT_BLT OP_2D(0x26)
+#define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
+#define OP_XY_COLOR_BLT OP_2D(0x50)
+#define OP_XY_PAT_BLT OP_2D(0x51)
+#define OP_XY_MONO_PAT_BLT OP_2D(0x52)
+#define OP_XY_SRC_COPY_BLT OP_2D(0x53)
+#define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
+#define OP_XY_FULL_BLT OP_2D(0x55)
+#define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
+#define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
+#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
+#define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
+#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
+#define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
+#define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
+#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
+#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
+#define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
+#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
+
+/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
+#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
+ ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
+
+#define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
+
+#define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
+#define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
+#define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
+
+#define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
+
+#define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
+
+#define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
+#define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
+#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
+#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
+#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
+
+#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
+#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
+#define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
+#define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
+
+#define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
+#define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
+#define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
+#define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
+#define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
+#define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
+#define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
+#define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
+#define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
+#define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
+#define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
+#define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
+#define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
+#define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
+#define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
+#define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
+#define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
+#define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
+#define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
+#define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
+#define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
+#define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
+#define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
+#define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
+#define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
+#define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
+#define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
+#define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
+#define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
+#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
+#define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
+#define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
+#define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
+#define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
+#define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
+
+#define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
+#define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
+#define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
+#define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
+#define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
+#define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
+#define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
+#define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
+#define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
+#define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
+#define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
+
+#define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
+#define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
+#define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
+#define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
+#define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
+#define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
+#define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
+#define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
+#define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
+#define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
+#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
+#define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
+#define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
+#define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
+#define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
+#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
+#define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
+#define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
+#define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
+#define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
+
+/* VCCP Command Parser */
+
+/*
+ * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
+ * git://anongit.freedesktop.org/vaapi/intel-driver
+ * src/i965_defines.h
+ *
+ */
+
+#define OP_MFX(pipeline, op, sub_opa, sub_opb) \
+ (3 << 13 | \
+ (pipeline) << 11 | \
+ (op) << 8 | \
+ (sub_opa) << 5 | \
+ (sub_opb))
+
+#define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
+#define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
+#define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
+#define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
+#define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
+#define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
+#define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
+#define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
+#define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
+#define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
+#define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
+
+#define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
+
+#define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
+#define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
+#define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
+#define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
+#define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
+#define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
+#define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
+#define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
+#define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
+#define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
+#define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
+#define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
+
+#define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
+#define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
+#define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
+#define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
+#define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
+
+#define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
+#define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
+#define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
+#define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
+#define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
+
+#define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
+#define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
+#define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
+
+#define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
+#define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
+#define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
+
+#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
+ (3 << 13 | \
+ (pipeline) << 11 | \
+ (op) << 8 | \
+ (sub_opa) << 5 | \
+ (sub_opb))
+
+#define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
+#define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
+#define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
+
+struct parser_exec_state;
+
+typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
+
+#define GVT_CMD_HASH_BITS 7
+
+/* which DWords need address fix */
+#define ADDR_FIX_1(x1) (1 << (x1))
+#define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
+#define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
+#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
+#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
+
+struct cmd_info {
+ char *name;
+ u32 opcode;
+
+#define F_LEN_MASK (1U<<0)
+#define F_LEN_CONST 1U
+#define F_LEN_VAR 0U
+
+/*
+ * command has its own ip advance logic
+ * e.g. MI_BATCH_START, MI_BATCH_END
+ */
+#define F_IP_ADVANCE_CUSTOM (1<<1)
+
+#define F_POST_HANDLE (1<<2)
+ u32 flag;
+
+#define R_RCS (1 << RCS)
+#define R_VCS1 (1 << VCS)
+#define R_VCS2 (1 << VCS2)
+#define R_VCS (R_VCS1 | R_VCS2)
+#define R_BCS (1 << BCS)
+#define R_VECS (1 << VECS)
+#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
+ /* rings that support this cmd: BLT/RCS/VCS/VECS */
+ uint16_t rings;
+
+ /* devices that support this cmd: SNB/IVB/HSW/... */
+ uint16_t devices;
+
+ /* which DWords are address that need fix up.
+ * bit 0 means a 32-bit non address operand in command
+ * bit 1 means address operand, which could be 32-bit
+ * or 64-bit depending on different architectures.(
+ * defined by "gmadr_bytes_in_cmd" in intel_gvt.
+ * No matter the address length, each address only takes
+ * one bit in the bitmap.
+ */
+ uint16_t addr_bitmap;
+
+ /* flag == F_LEN_CONST : command length
+ * flag == F_LEN_VAR : length bias bits
+ * Note: length is in DWord
+ */
+ uint8_t len;
+
+ parser_cmd_handler handler;
+};
+
+struct cmd_entry {
+ struct hlist_node hlist;
+ struct cmd_info *info;
+};
+
+enum {
+ RING_BUFFER_INSTRUCTION,
+ BATCH_BUFFER_INSTRUCTION,
+ BATCH_BUFFER_2ND_LEVEL,
+};
+
+enum {
+ GTT_BUFFER,
+ PPGTT_BUFFER
+};
+
+struct parser_exec_state {
+ struct intel_vgpu *vgpu;
+ int ring_id;
+
+ int buf_type;
+
+ /* batch buffer address type */
+ int buf_addr_type;
+
+ /* graphics memory address of ring buffer start */
+ unsigned long ring_start;
+ unsigned long ring_size;
+ unsigned long ring_head;
+ unsigned long ring_tail;
+
+ /* instruction graphics memory address */
+ unsigned long ip_gma;
+
+ /* mapped va of the instr_gma */
+ void *ip_va;
+ void *rb_va;
+
+ void *ret_bb_va;
+ /* next instruction when return from batch buffer to ring buffer */
+ unsigned long ret_ip_gma_ring;
+
+ /* next instruction when return from 2nd batch buffer to batch buffer */
+ unsigned long ret_ip_gma_bb;
+
+ /* batch buffer address type (GTT or PPGTT)
+ * used when ret from 2nd level batch buffer
+ */
+ int saved_buf_addr_type;
+
+ struct cmd_info *info;
+
+ struct intel_vgpu_workload *workload;
+};
+
+#define gmadr_dw_number(s) \
+ (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
+
+static unsigned long bypass_scan_mask = 0;
+static bool bypass_batch_buffer_scan = true;
+
+/* ring ALL, type = 0 */
+static struct sub_op_bits sub_op_mi[] = {
+ {31, 29},
+ {28, 23},
+};
+
+static struct decode_info decode_info_mi = {
+ "MI",
+ OP_LEN_MI,
+ ARRAY_SIZE(sub_op_mi),
+ sub_op_mi,
+};
+
+/* ring RCS, command type 2 */
+static struct sub_op_bits sub_op_2d[] = {
+ {31, 29},
+ {28, 22},
+};
+
+static struct decode_info decode_info_2d = {
+ "2D",
+ OP_LEN_2D,
+ ARRAY_SIZE(sub_op_2d),
+ sub_op_2d,
+};
+
+/* ring RCS, command type 3 */
+static struct sub_op_bits sub_op_3d_media[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 16},
+};
+
+static struct decode_info decode_info_3d_media = {
+ "3D_Media",
+ OP_LEN_3D_MEDIA,
+ ARRAY_SIZE(sub_op_3d_media),
+ sub_op_3d_media,
+};
+
+/* ring VCS, command type 3 */
+static struct sub_op_bits sub_op_mfx_vc[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 21},
+ {20, 16},
+};
+
+static struct decode_info decode_info_mfx_vc = {
+ "MFX_VC",
+ OP_LEN_MFX_VC,
+ ARRAY_SIZE(sub_op_mfx_vc),
+ sub_op_mfx_vc,
+};
+
+/* ring VECS, command type 3 */
+static struct sub_op_bits sub_op_vebox[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 21},
+ {20, 16},
+};
+
+static struct decode_info decode_info_vebox = {
+ "VEBOX",
+ OP_LEN_VEBOX,
+ ARRAY_SIZE(sub_op_vebox),
+ sub_op_vebox,
+};
+
+static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
+ [RCS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_3d_media,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VCS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_mfx_vc,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [BCS] = {
+ &decode_info_mi,
+ NULL,
+ &decode_info_2d,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VECS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_vebox,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VCS2] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_mfx_vc,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+};
+
+static inline u32 get_opcode(u32 cmd, int ring_id)
+{
+ struct decode_info *d_info;
+
+ if (ring_id >= I915_NUM_ENGINES)
+ return INVALID_OP;
+
+ d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ if (d_info == NULL)
+ return INVALID_OP;
+
+ return cmd >> (32 - d_info->op_len);
+}
+
+static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
+ unsigned int opcode, int ring_id)
+{
+ struct cmd_entry *e;
+
+ hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
+ if ((opcode == e->info->opcode) &&
+ (e->info->rings & (1 << ring_id)))
+ return e->info;
+ }
+ return NULL;
+}
+
+static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
+ u32 cmd, int ring_id)
+{
+ u32 opcode;
+
+ opcode = get_opcode(cmd, ring_id);
+ if (opcode == INVALID_OP)
+ return NULL;
+
+ return find_cmd_entry(gvt, opcode, ring_id);
+}
+
+static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
+{
+ return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
+}
+
+static inline void print_opcode(u32 cmd, int ring_id)
+{
+ struct decode_info *d_info;
+ int i;
+
+ if (ring_id >= I915_NUM_ENGINES)
+ return;
+
+ d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ if (d_info == NULL)
+ return;
+
+ gvt_err("opcode=0x%x %s sub_ops:",
+ cmd >> (32 - d_info->op_len), d_info->name);
+
+ for (i = 0; i < d_info->nr_sub_op; i++)
+ pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
+ d_info->sub_op[i].low));
+
+ pr_err("\n");
+}
+
+static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
+{
+ return s->ip_va + (index << 2);
+}
+
+static inline u32 cmd_val(struct parser_exec_state *s, int index)
+{
+ return *cmd_ptr(s, index);
+}
+
+static void parser_exec_state_dump(struct parser_exec_state *s)
+{
+ int cnt = 0;
+ int i;
+
+ gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
+ s->ring_id, s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
+
+ gvt_err(" %s %s ip_gma(%08lx) ",
+ s->buf_type == RING_BUFFER_INSTRUCTION ?
+ "RING_BUFFER" : "BATCH_BUFFER",
+ s->buf_addr_type == GTT_BUFFER ?
+ "GTT" : "PPGTT", s->ip_gma);
+
+ if (s->ip_va == NULL) {
+ gvt_err(" ip_va(NULL)");
+ return;
+ }
+
+ gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+ cmd_val(s, 2), cmd_val(s, 3));
+
+ print_opcode(cmd_val(s, 0), s->ring_id);
+
+ /* print the whole page to trace */
+ pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+ cmd_val(s, 2), cmd_val(s, 3));
+
+ s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
+
+ while (cnt < 1024) {
+ pr_err("ip_va=%p: ", s->ip_va);
+ for (i = 0; i < 8; i++)
+ pr_err("%08x ", cmd_val(s, i));
+ pr_err("\n");
+
+ s->ip_va += 8 * sizeof(u32);
+ cnt += 8;
+ }
+}
+
+static inline void update_ip_va(struct parser_exec_state *s)
+{
+ unsigned long len = 0;
+
+ if (WARN_ON(s->ring_head == s->ring_tail))
+ return;
+
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ unsigned long ring_top = s->ring_start + s->ring_size;
+
+ if (s->ring_head > s->ring_tail) {
+ if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
+ len = (s->ip_gma - s->ring_head);
+ else if (s->ip_gma >= s->ring_start &&
+ s->ip_gma <= s->ring_tail)
+ len = (ring_top - s->ring_head) +
+ (s->ip_gma - s->ring_start);
+ } else
+ len = (s->ip_gma - s->ring_head);
+
+ s->ip_va = s->rb_va + len;
+ } else {/* shadow batch buffer */
+ s->ip_va = s->ret_bb_va;
+ }
+}
+
+static inline int ip_gma_set(struct parser_exec_state *s,
+ unsigned long ip_gma)
+{
+ WARN_ON(!IS_ALIGNED(ip_gma, 4));
+
+ s->ip_gma = ip_gma;
+ update_ip_va(s);
+ return 0;
+}
+
+static inline int ip_gma_advance(struct parser_exec_state *s,
+ unsigned int dw_len)
+{
+ s->ip_gma += (dw_len << 2);
+
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->ip_gma >= s->ring_start + s->ring_size)
+ s->ip_gma -= s->ring_size;
+ update_ip_va(s);
+ } else {
+ s->ip_va += (dw_len << 2);
+ }
+
+ return 0;
+}
+
+static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
+{
+ if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
+ return info->len;
+ else
+ return (cmd & ((1U << info->len) - 1)) + 2;
+ return 0;
+}
+
+static inline int cmd_length(struct parser_exec_state *s)
+{
+ return get_cmd_length(s->info, cmd_val(s, 0));
+}
+
+/* do not remove this, some platform may need clflush here */
+#define patch_value(s, addr, val) do { \
+ *addr = val; \
+} while (0)
+
+static bool is_shadowed_mmio(unsigned int offset)
+{
+ bool ret = false;
+
+ if ((offset == 0x2168) || /*BB current head register UDW */
+ (offset == 0x2140) || /*BB current header register */
+ (offset == 0x211c) || /*second BB header register UDW */
+ (offset == 0x2114)) { /*second BB header register UDW */
+ ret = true;
+ }
+ return ret;
+}
+
+static int cmd_reg_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index, char *cmd)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ if (offset + 4 > gvt->device_info.mmio_size) {
+ gvt_err("%s access to (%x) outside of MMIO range\n",
+ cmd, offset);
+ return -EINVAL;
+ }
+
+ if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
+ gvt_err("vgpu%d: %s access to non-render register (%x)\n",
+ s->vgpu->id, cmd, offset);
+ return 0;
+ }
+
+ if (is_shadowed_mmio(offset)) {
+ gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
+ s->vgpu->id, offset);
+ return 0;
+ }
+
+ if (offset == i915_mmio_reg_offset(DERRMR) ||
+ offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
+ /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
+ patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
+ }
+
+ /* TODO: Update the global mask if this MMIO is a masked-MMIO */
+ intel_gvt_mmio_set_cmd_accessed(gvt, offset);
+ return 0;
+}
+
+#define cmd_reg(s, i) \
+ (cmd_val(s, i) & GENMASK(22, 2))
+
+#define cmd_reg_inhibit(s, i) \
+ (cmd_val(s, i) & GENMASK(22, 18))
+
+#define cmd_gma(s, i) \
+ (cmd_val(s, i) & GENMASK(31, 2))
+
+#define cmd_gma_hi(s, i) \
+ (cmd_val(s, i) & GENMASK(15, 0))
+
+static int cmd_handler_lri(struct parser_exec_state *s)
+{
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+ struct intel_gvt *gvt = s->vgpu->gvt;
+
+ for (i = 1; i < cmd_len; i += 2) {
+ if (IS_BROADWELL(gvt->dev_priv) &&
+ (s->ring_id != RCS)) {
+ if (s->ring_id == BCS &&
+ cmd_reg(s, i) ==
+ i915_mmio_reg_offset(DERRMR))
+ ret |= 0;
+ else
+ ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ }
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
+ }
+ return ret;
+}
+
+static int cmd_handler_lrr(struct parser_exec_state *s)
+{
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len; i += 2) {
+ if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ ret |= ((cmd_reg_inhibit(s, i) ||
+ (cmd_reg_inhibit(s, i + 1)))) ?
+ -EINVAL : 0;
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
+ ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
+ }
+ return ret;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+ unsigned long guest_gma, int op_size, bool index_mode);
+
+static int cmd_handler_lrm(struct parser_exec_state *s)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+ int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len;) {
+ if (IS_BROADWELL(gvt->dev_priv))
+ ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
+ if (cmd_val(s, 0) & (1 << 22)) {
+ gma = cmd_gma(s, i + 1);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, i + 2)) << 32;
+ ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ }
+ i += gmadr_dw_number(s) + 1;
+ }
+ return ret;
+}
+
+static int cmd_handler_srm(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len;) {
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
+ if (cmd_val(s, 0) & (1 << 22)) {
+ gma = cmd_gma(s, i + 1);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, i + 2)) << 32;
+ ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ }
+ i += gmadr_dw_number(s) + 1;
+ }
+ return ret;
+}
+
+struct cmd_interrupt_event {
+ int pipe_control_notify;
+ int mi_flush_dw;
+ int mi_user_interrupt;
+};
+
+static struct cmd_interrupt_event cmd_interrupt_events[] = {
+ [RCS] = {
+ .pipe_control_notify = RCS_PIPE_CONTROL,
+ .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
+ .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
+ },
+ [BCS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = BCS_MI_FLUSH_DW,
+ .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
+ },
+ [VCS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VCS_MI_FLUSH_DW,
+ .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
+ },
+ [VCS2] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VCS2_MI_FLUSH_DW,
+ .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
+ },
+ [VECS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VECS_MI_FLUSH_DW,
+ .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
+ },
+};
+
+static int cmd_handler_pipe_control(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ bool index_mode = false;
+ unsigned int post_sync;
+ int ret = 0;
+
+ post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
+
+ /* LRI post sync */
+ if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
+ ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
+ /* post sync */
+ else if (post_sync) {
+ if (post_sync == 2)
+ ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
+ else if (post_sync == 3)
+ ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
+ else if (post_sync == 1) {
+ /* check ggtt*/
+ if ((cmd_val(s, 2) & (1 << 2))) {
+ gma = cmd_val(s, 2) & GENMASK(31, 3);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, 3)) << 32;
+ /* Store Data Index */
+ if (cmd_val(s, 1) & (1 << 21))
+ index_mode = true;
+ ret |= cmd_address_audit(s, gma, sizeof(u64),
+ index_mode);
+ }
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
+ set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
+ s->workload->pending_events);
+ return 0;
+}
+
+static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
+{
+ set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
+ s->workload->pending_events);
+ return 0;
+}
+
+static int cmd_advance_default(struct parser_exec_state *s)
+{
+ return ip_gma_advance(s, cmd_length(s));
+}
+
+static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
+{
+ int ret;
+
+ if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+ s->buf_type = BATCH_BUFFER_INSTRUCTION;
+ ret = ip_gma_set(s, s->ret_ip_gma_bb);
+ s->buf_addr_type = s->saved_buf_addr_type;
+ } else {
+ s->buf_type = RING_BUFFER_INSTRUCTION;
+ s->buf_addr_type = GTT_BUFFER;
+ if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
+ s->ret_ip_gma_ring -= s->ring_size;
+ ret = ip_gma_set(s, s->ret_ip_gma_ring);
+ }
+ return ret;
+}
+
+struct mi_display_flip_command_info {
+ int pipe;
+ int plane;
+ int event;
+ i915_reg_t stride_reg;
+ i915_reg_t ctrl_reg;
+ i915_reg_t surf_reg;
+ u64 stride_val;
+ u64 tile_val;
+ u64 surf_val;
+ bool async_flip;
+};
+
+struct plane_code_mapping {
+ int pipe;
+ int plane;
+ int event;
+};
+
+static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct plane_code_mapping gen8_plane_code[] = {
+ [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
+ [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
+ [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
+ [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
+ [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
+ [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
+ };
+ u32 dword0, dword1, dword2;
+ u32 v;
+
+ dword0 = cmd_val(s, 0);
+ dword1 = cmd_val(s, 1);
+ dword2 = cmd_val(s, 2);
+
+ v = (dword0 & GENMASK(21, 19)) >> 19;
+ if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+ return -EINVAL;
+
+ info->pipe = gen8_plane_code[v].pipe;
+ info->plane = gen8_plane_code[v].plane;
+ info->event = gen8_plane_code[v].event;
+ info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+ info->tile_val = (dword1 & 0x1);
+ info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+ info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+ if (info->plane == PLANE_A) {
+ info->ctrl_reg = DSPCNTR(info->pipe);
+ info->stride_reg = DSPSTRIDE(info->pipe);
+ info->surf_reg = DSPSURF(info->pipe);
+ } else if (info->plane == PLANE_B) {
+ info->ctrl_reg = SPRCTL(info->pipe);
+ info->stride_reg = SPRSTRIDE(info->pipe);
+ info->surf_reg = SPRSURF(info->pipe);
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int skl_decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ u32 dword0 = cmd_val(s, 0);
+ u32 dword1 = cmd_val(s, 1);
+ u32 dword2 = cmd_val(s, 2);
+ u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
+
+ switch (plane) {
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
+ info->pipe = PIPE_A;
+ info->event = PRIMARY_A_FLIP_DONE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
+ info->pipe = PIPE_B;
+ info->event = PRIMARY_B_FLIP_DONE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
+ info->pipe = PIPE_C;
+ info->event = PRIMARY_C_FLIP_DONE;
+ break;
+ default:
+ gvt_err("unknown plane code %d\n", plane);
+ return -EINVAL;
+ }
+
+ info->pipe = PRIMARY_PLANE;
+ info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+ info->tile_val = (dword1 & GENMASK(2, 0));
+ info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+ info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+ info->ctrl_reg = DSPCNTR(info->pipe);
+ info->stride_reg = DSPSTRIDE(info->pipe);
+ info->surf_reg = DSPSURF(info->pipe);
+
+ return 0;
+}
+
+static int gen8_check_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ u32 stride, tile;
+
+ if (!info->async_flip)
+ return 0;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
+ tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
+ GENMASK(12, 10)) >> 10;
+ } else {
+ stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
+ GENMASK(15, 6)) >> 6;
+ tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
+ }
+
+ if (stride != info->stride_val)
+ gvt_dbg_cmd("cannot change stride during async flip\n");
+
+ if (tile != info->tile_val)
+ gvt_dbg_cmd("cannot change tile during async flip\n");
+
+ return 0;
+}
+
+static int gen8_update_plane_mmio_from_mi_display_flip(
+ struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct intel_vgpu *vgpu = s->vgpu;
+
+ set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
+ info->surf_val << 12);
+ if (IS_SKYLAKE(dev_priv)) {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
+ info->stride_val);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
+ info->tile_val << 10);
+ } else {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
+ info->stride_val << 6);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
+ info->tile_val << 10);
+ }
+
+ vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ return 0;
+}
+
+static int decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv))
+ return gen8_decode_mi_display_flip(s, info);
+ if (IS_SKYLAKE(dev_priv))
+ return skl_decode_mi_display_flip(s, info);
+
+ return -ENODEV;
+}
+
+static int check_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ return gen8_check_mi_display_flip(s, info);
+ return -ENODEV;
+}
+
+static int update_plane_mmio_from_mi_display_flip(
+ struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ return gen8_update_plane_mmio_from_mi_display_flip(s, info);
+ return -ENODEV;
+}
+
+static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
+{
+ struct mi_display_flip_command_info info;
+ int ret;
+ int i;
+ int len = cmd_length(s);
+
+ ret = decode_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("fail to decode MI display flip command\n");
+ return ret;
+ }
+
+ ret = check_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("invalid MI display flip command\n");
+ return ret;
+ }
+
+ ret = update_plane_mmio_from_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("fail to update plane mmio\n");
+ return ret;
+ }
+
+ for (i = 0; i < len; i++)
+ patch_value(s, cmd_ptr(s, i), MI_NOOP);
+ return 0;
+}
+
+static bool is_wait_for_flip_pending(u32 cmd)
+{
+ return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
+ MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
+ MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
+}
+
+static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
+{
+ u32 cmd = cmd_val(s, 0);
+
+ if (!is_wait_for_flip_pending(cmd))
+ return 0;
+
+ patch_value(s, cmd_ptr(s, 0), MI_NOOP);
+ return 0;
+}
+
+static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
+{
+ unsigned long addr;
+ unsigned long gma_high, gma_low;
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ return INTEL_GVT_INVALID_ADDR;
+
+ gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
+ if (gmadr_bytes == 4) {
+ addr = gma_low;
+ } else {
+ gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
+ addr = (((unsigned long)gma_high) << 32) | gma_low;
+ }
+ return addr;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+ unsigned long guest_gma, int op_size, bool index_mode)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
+ int i;
+ int ret;
+
+ if (op_size > max_surface_size) {
+ gvt_err("command address audit fail name %s\n", s->info->name);
+ return -EINVAL;
+ }
+
+ if (index_mode) {
+ if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ } else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
+ (!vgpu_gmadr_is_valid(s->vgpu,
+ guest_gma + op_size - 1))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+err:
+ gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
+ s->info->name, guest_gma, op_size);
+
+ pr_err("cmd dump: ");
+ for (i = 0; i < cmd_length(s); i++) {
+ if (!(i % 4))
+ pr_err("\n%08x ", cmd_val(s, i));
+ else
+ pr_err("%08x ", cmd_val(s, i));
+ }
+ pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
+ vgpu->id,
+ vgpu_aperture_gmadr_base(vgpu),
+ vgpu_aperture_gmadr_end(vgpu),
+ vgpu_hidden_gmadr_base(vgpu),
+ vgpu_hidden_gmadr_end(vgpu));
+ return ret;
+}
+
+static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ int op_size = (cmd_length(s) - 3) * sizeof(u32);
+ int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
+ unsigned long gma, gma_low, gma_high;
+ int ret = 0;
+
+ /* check ppggt */
+ if (!(cmd_val(s, 0) & (1 << 22)))
+ return 0;
+
+ gma = cmd_val(s, 2) & GENMASK(31, 2);
+
+ if (gmadr_bytes == 8) {
+ gma_low = cmd_val(s, 1) & GENMASK(31, 2);
+ gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+ gma = (gma_high << 32) | gma_low;
+ core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
+ }
+ ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
+ return ret;
+}
+
+static inline int unexpected_cmd(struct parser_exec_state *s)
+{
+ gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
+ s->vgpu->id, s->info->name);
+ return -EINVAL;
+}
+
+static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
+ sizeof(u32);
+ unsigned long gma, gma_high;
+ int ret = 0;
+
+ if (!(cmd_val(s, 0) & (1 << 22)))
+ return ret;
+
+ gma = cmd_val(s, 1) & GENMASK(31, 2);
+ if (gmadr_bytes == 8) {
+ gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+ gma = (gma_high << 32) | gma;
+ }
+ ret = cmd_address_audit(s, gma, op_size, false);
+ return ret;
+}
+
+static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_clflush(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_conditional_batch_buffer_end(
+ struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ bool index_mode = false;
+ int ret = 0;
+
+ /* Check post-sync and ppgtt bit */
+ if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
+ gma = cmd_val(s, 1) & GENMASK(31, 3);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
+ /* Store Data Index */
+ if (cmd_val(s, 0) & (1 << 21))
+ index_mode = true;
+ ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ }
+ /* Check notify bit */
+ if ((cmd_val(s, 0) & (1 << 8)))
+ set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
+ s->workload->pending_events);
+ return ret;
+}
+
+static void addr_type_update_snb(struct parser_exec_state *s)
+{
+ if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
+ (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
+ s->buf_addr_type = PPGTT_BUFFER;
+ }
+}
+
+
+static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
+ unsigned long gma, unsigned long end_gma, void *va)
+{
+ unsigned long copy_len, offset;
+ unsigned long len = 0;
+ unsigned long gpa;
+
+ while (gma != end_gma) {
+ gpa = intel_vgpu_gma_to_gpa(mm, gma);
+ if (gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid gma address: %lx\n", gma);
+ return -EFAULT;
+ }
+
+ offset = gma & (GTT_PAGE_SIZE - 1);
+
+ copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
+ GTT_PAGE_SIZE - offset : end_gma - gma;
+
+ intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
+
+ len += copy_len;
+ gma += copy_len;
+ }
+ return 0;
+}
+
+
+/*
+ * Check whether a batch buffer needs to be scanned. Currently
+ * the only criteria is based on privilege.
+ */
+static int batch_buffer_needs_scan(struct parser_exec_state *s)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+
+ if (bypass_batch_buffer_scan)
+ return 0;
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ /* BDW decides privilege based on address space */
+ if (cmd_val(s, 0) & (1 << 8))
+ return 0;
+ }
+ return 1;
+}
+
+static uint32_t find_bb_size(struct parser_exec_state *s)
+{
+ unsigned long gma = 0;
+ struct cmd_info *info;
+ uint32_t bb_size = 0;
+ uint32_t cmd_len = 0;
+ bool met_bb_end = false;
+ u32 cmd;
+
+ /* get the start gm address of the batch buffer */
+ gma = get_gma_bb_from_cmd(s, 1);
+ cmd = cmd_val(s, 0);
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+ do {
+ copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + 4, &cmd);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+
+ if (info->opcode == OP_MI_BATCH_BUFFER_END) {
+ met_bb_end = true;
+ } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
+ if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
+ /* chained batch buffer */
+ met_bb_end = true;
+ }
+ }
+ cmd_len = get_cmd_length(info, cmd) << 2;
+ bb_size += cmd_len;
+ gma += cmd_len;
+
+ } while (!met_bb_end);
+
+ return bb_size;
+}
+
+static int perform_bb_shadow(struct parser_exec_state *s)
+{
+ struct intel_shadow_bb_entry *entry_obj;
+ unsigned long gma = 0;
+ uint32_t bb_size;
+ void *dst = NULL;
+ int ret = 0;
+
+ /* get the start gm address of the batch buffer */
+ gma = get_gma_bb_from_cmd(s, 1);
+
+ /* get the size of the batch buffer */
+ bb_size = find_bb_size(s);
+
+ /* allocate shadow batch buffer */
+ entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
+ if (entry_obj == NULL)
+ return -ENOMEM;
+
+ entry_obj->obj =
+ i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
+ roundup(bb_size, PAGE_SIZE));
+ if (IS_ERR(entry_obj->obj)) {
+ ret = PTR_ERR(entry_obj->obj);
+ goto free_entry;
+ }
+ entry_obj->len = bb_size;
+ INIT_LIST_HEAD(&entry_obj->list);
+
+ dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto put_obj;
+ }
+
+ ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
+ if (ret) {
+ gvt_err("failed to set shadow batch to CPU\n");
+ goto unmap_src;
+ }
+
+ entry_obj->va = dst;
+ entry_obj->bb_start_cmd_va = s->ip_va;
+
+ /* copy batch buffer to shadow batch buffer*/
+ ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + bb_size,
+ dst);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ goto unmap_src;
+ }
+
+ list_add(&entry_obj->list, &s->workload->shadow_bb);
+ /*
+ * ip_va saves the virtual address of the shadow batch buffer, while
+ * ip_gma saves the graphics address of the original batch buffer.
+ * As the shadow batch buffer is just a copy from the originial one,
+ * it should be right to use shadow batch buffer'va and original batch
+ * buffer's gma in pair. After all, we don't want to pin the shadow
+ * buffer here (too early).
+ */
+ s->ip_va = dst;
+ s->ip_gma = gma;
+
+ return 0;
+
+unmap_src:
+ i915_gem_object_unpin_map(entry_obj->obj);
+put_obj:
+ i915_gem_object_put(entry_obj->obj);
+free_entry:
+ kfree(entry_obj);
+ return ret;
+}
+
+static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
+{
+ bool second_level;
+ int ret = 0;
+
+ if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+ gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
+ return -EINVAL;
+ }
+
+ second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
+ if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
+ gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
+ return -EINVAL;
+ }
+
+ s->saved_buf_addr_type = s->buf_addr_type;
+ addr_type_update_snb(s);
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
+ s->buf_type = BATCH_BUFFER_INSTRUCTION;
+ } else if (second_level) {
+ s->buf_type = BATCH_BUFFER_2ND_LEVEL;
+ s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
+ s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
+ }
+
+ if (batch_buffer_needs_scan(s)) {
+ ret = perform_bb_shadow(s);
+ if (ret < 0)
+ gvt_err("invalid shadow batch buffer\n");
+ } else {
+ /* emulate a batch buffer end to do return right */
+ ret = cmd_handler_mi_batch_buffer_end(s);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct cmd_info cmd_info[] = {
+ {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, NULL},
+
+ {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, cmd_handler_mi_user_interrupt},
+
+ {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
+ D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
+
+ {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
+ D_ALL, 0, 1, NULL},
+
+ {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
+ F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ cmd_handler_mi_batch_buffer_end},
+
+ {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, NULL},
+
+ {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
+ D_ALL, 0, 1, NULL},
+
+ {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
+ R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
+
+ {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
+ 0, 8, NULL},
+
+ {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
+
+ {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+
+ {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
+
+ {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
+ 0, 8, cmd_handler_mi_store_data_index},
+
+ {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
+ D_ALL, 0, 8, cmd_handler_lri},
+
+ {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
+ cmd_handler_mi_update_gtt},
+
+ {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
+
+ {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
+ cmd_handler_mi_flush_dw},
+
+ {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
+ 10, cmd_handler_mi_clflush},
+
+ {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
+
+ {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
+
+ {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
+ D_ALL, 0, 8, cmd_handler_lrr},
+
+ {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, NULL},
+
+ {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, NULL},
+
+ {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
+ 8, cmd_handler_mi_op_2e},
+
+ {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
+ 8, cmd_handler_mi_op_2f},
+
+ {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
+ F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
+ cmd_handler_mi_batch_buffer_start},
+
+ {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
+ F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_mi_conditional_batch_buffer_end},
+
+ {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
+ R_RCS | R_BCS, D_ALL, 0, 2, NULL},
+
+ {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ 0, 8, NULL},
+
+ {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+ {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ 0, 8, NULL},
+
+ {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(3), 8, NULL},
+
+ {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, 0, 8, NULL},
+
+ {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+ {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
+
+ {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
+ R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
+ OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
+ OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
+ OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
+ OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BLEND_STATE_POINTERS",
+ OP_3DSTATE_BLEND_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
+ OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_VS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_HS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_DS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_GS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_PS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+ {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+ {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+ 8, NULL},
+
+ {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
+ R_RCS, D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+ 8, NULL},
+
+ {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
+ R_RCS, D_ALL, 0, 1, NULL},
+
+ {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
+ D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
+ D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
+ R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ ADDR_FIX_2(2, 4), 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
+ OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
+ OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
+
+ {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
+ 1, NULL},
+
+ {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(1), 8, NULL},
+
+ {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
+
+ {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ 0, 8, NULL},
+
+ {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
+ D_SKL_PLUS, 0, 8, NULL},
+
+ {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+ {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+ {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
+ NULL},
+
+ {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
+ F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
+ R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
+ F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
+ F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
+
+ {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 6, NULL},
+
+ {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+ {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
+ R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
+
+ {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+ {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+ {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
+
+ {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
+ 0, 12, NULL},
+
+ {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
+ 0, 20, NULL},
+};
+
+static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
+{
+ hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
+}
+
+#define GVT_MAX_CMD_LENGTH 20 /* In Dword */
+
+static void trace_cs_command(struct parser_exec_state *s,
+ cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
+{
+ /* This buffer is used by ftrace to store all commands copied from
+ * guest gma space. Sometimes commands can cross pages, this should
+ * not be handled in ftrace logic. So this is just used as a
+ * 'bounce buffer'
+ */
+ u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
+ int i;
+ u32 cmd_len = cmd_length(s);
+ /* The chosen value of GVT_MAX_CMD_LENGTH are just based on
+ * following two considerations:
+ * 1) From observation, most common ring commands is not that long.
+ * But there are execeptions. So it indeed makes sence to observe
+ * longer commands.
+ * 2) From the performance and debugging point of view, dumping all
+ * contents of very commands is not necessary.
+ * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
+ * future for performance considerations.
+ */
+ if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
+ gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
+ cmd_len = GVT_MAX_CMD_LENGTH;
+ }
+
+ for (i = 0; i < cmd_len; i++)
+ cmd_trace_buf[i] = cmd_val(s, i);
+
+ trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
+ cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
+ cost_pre_cmd_handler, cost_cmd_handler);
+}
+
+/* call the cmd handler, and advance ip */
+static int cmd_parser_exec(struct parser_exec_state *s)
+{
+ struct cmd_info *info;
+ u32 cmd;
+ int ret = 0;
+ cycles_t t0, t1, t2;
+ struct parser_exec_state s_before_advance_custom;
+
+ t0 = get_cycles();
+
+ cmd = cmd_val(s, 0);
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+
+ gvt_dbg_cmd("%s\n", info->name);
+
+ s->info = info;
+
+ t1 = get_cycles();
+
+ memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
+
+ if (info->handler) {
+ ret = info->handler(s);
+ if (ret < 0) {
+ gvt_err("%s handler error\n", info->name);
+ return ret;
+ }
+ }
+ t2 = get_cycles();
+
+ trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
+
+ if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
+ ret = cmd_advance_default(s);
+ if (ret) {
+ gvt_err("%s IP advance error\n", info->name);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static inline bool gma_out_of_range(unsigned long gma,
+ unsigned long gma_head, unsigned int gma_tail)
+{
+ if (gma_tail >= gma_head)
+ return (gma < gma_head) || (gma > gma_tail);
+ else
+ return (gma > gma_tail) && (gma < gma_head);
+}
+
+static int command_scan(struct parser_exec_state *s,
+ unsigned long rb_head, unsigned long rb_tail,
+ unsigned long rb_start, unsigned long rb_len)
+{
+
+ unsigned long gma_head, gma_tail, gma_bottom;
+ int ret = 0;
+
+ gma_head = rb_start + rb_head;
+ gma_tail = rb_start + rb_tail;
+ gma_bottom = rb_start + rb_len;
+
+ gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
+
+ while (s->ip_gma != gma_tail) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (!(s->ip_gma >= rb_start) ||
+ !(s->ip_gma < gma_bottom)) {
+ gvt_err("ip_gma %lx out of ring scope."
+ "(base:0x%lx, bottom: 0x%lx)\n",
+ s->ip_gma, rb_start,
+ gma_bottom);
+ parser_exec_state_dump(s);
+ return -EINVAL;
+ }
+ if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
+ gvt_err("ip_gma %lx out of range."
+ "base 0x%lx head 0x%lx tail 0x%lx\n",
+ s->ip_gma, rb_start,
+ rb_head, rb_tail);
+ parser_exec_state_dump(s);
+ break;
+ }
+ }
+ ret = cmd_parser_exec(s);
+ if (ret) {
+ gvt_err("cmd parser error\n");
+ parser_exec_state_dump(s);
+ break;
+ }
+ }
+
+ gvt_dbg_cmd("scan_end\n");
+
+ return ret;
+}
+
+static int scan_workload(struct intel_vgpu_workload *workload)
+{
+ unsigned long gma_head, gma_tail, gma_bottom;
+ struct parser_exec_state s;
+ int ret = 0;
+
+ /* ring base is page aligned */
+ if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
+ return -EINVAL;
+
+ gma_head = workload->rb_start + workload->rb_head;
+ gma_tail = workload->rb_start + workload->rb_tail;
+ gma_bottom = workload->rb_start + _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+ s.buf_type = RING_BUFFER_INSTRUCTION;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = workload->vgpu;
+ s.ring_id = workload->ring_id;
+ s.ring_start = workload->rb_start;
+ s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+ s.ring_head = gma_head;
+ s.ring_tail = gma_tail;
+ s.rb_va = workload->shadow_ring_buffer_va;
+ s.workload = workload;
+
+ if ((bypass_scan_mask & (1 << workload->ring_id)) ||
+ gma_head == gma_tail)
+ return 0;
+
+ ret = ip_gma_set(&s, gma_head);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, workload->rb_head, workload->rb_tail,
+ workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
+
+out:
+ return ret;
+}
+
+static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+
+ unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
+ struct parser_exec_state s;
+ int ret = 0;
+
+ /* ring base is page aligned */
+ if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
+ return -EINVAL;
+
+ ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
+ ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
+ PAGE_SIZE);
+ gma_head = wa_ctx->indirect_ctx.guest_gma;
+ gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
+ gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
+
+ s.buf_type = RING_BUFFER_INSTRUCTION;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = wa_ctx->workload->vgpu;
+ s.ring_id = wa_ctx->workload->ring_id;
+ s.ring_start = wa_ctx->indirect_ctx.guest_gma;
+ s.ring_size = ring_size;
+ s.ring_head = gma_head;
+ s.ring_tail = gma_tail;
+ s.rb_va = wa_ctx->indirect_ctx.shadow_va;
+ s.workload = wa_ctx->workload;
+
+ ret = ip_gma_set(&s, gma_head);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, 0, ring_tail,
+ wa_ctx->indirect_ctx.guest_gma, ring_size);
+out:
+ return ret;
+}
+
+static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
+ unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
+ unsigned int copy_len = 0;
+ int ret;
+
+ guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+ /* calculate workload ring buffer size */
+ workload->rb_len = (workload->rb_tail + guest_rb_size -
+ workload->rb_head) % guest_rb_size;
+
+ gma_head = workload->rb_start + workload->rb_head;
+ gma_tail = workload->rb_start + workload->rb_tail;
+ gma_top = workload->rb_start + guest_rb_size;
+
+ /* allocate shadow ring buffer */
+ ret = intel_ring_begin(workload->req, workload->rb_len / 4);
+ if (ret)
+ return ret;
+
+ /* get shadow ring buffer va */
+ workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+
+ /* head > tail --> copy head <-> top */
+ if (gma_head > gma_tail) {
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+ gma_head, gma_top,
+ workload->shadow_ring_buffer_va);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ return ret;
+ }
+ copy_len = gma_top - gma_head;
+ gma_head = workload->rb_start;
+ }
+
+ /* copy head or start <-> tail */
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+ gma_head, gma_tail,
+ workload->shadow_ring_buffer_va + copy_len);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ return ret;
+ }
+ ring->tail += workload->rb_len;
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
+{
+ int ret;
+
+ ret = shadow_workload_ring_buffer(workload);
+ if (ret) {
+ gvt_err("fail to shadow workload ring_buffer\n");
+ return ret;
+ }
+
+ ret = scan_workload(workload);
+ if (ret) {
+ gvt_err("scan workload error\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
+ int ctx_size = wa_ctx->indirect_ctx.size;
+ unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+ void *map;
+
+ obj = i915_gem_object_create(dev,
+ roundup(ctx_size + CACHELINE_BYTES,
+ PAGE_SIZE));
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ /* get the va of the shadow batch buffer */
+ map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(map)) {
+ gvt_err("failed to vmap shadow indirect ctx\n");
+ ret = PTR_ERR(map);
+ goto put_obj;
+ }
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (ret) {
+ gvt_err("failed to set shadow indirect ctx to CPU\n");
+ goto unmap_src;
+ }
+
+ ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
+ wa_ctx->workload->vgpu->gtt.ggtt_mm,
+ guest_gma, guest_gma + ctx_size,
+ map);
+ if (ret) {
+ gvt_err("fail to copy guest indirect ctx\n");
+ goto unmap_src;
+ }
+
+ wa_ctx->indirect_ctx.obj = obj;
+ wa_ctx->indirect_ctx.shadow_va = map;
+ return 0;
+
+unmap_src:
+ i915_gem_object_unpin_map(obj);
+put_obj:
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ return ret;
+}
+
+static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
+ unsigned char *bb_start_sva;
+
+ per_ctx_start[0] = 0x18800001;
+ per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
+
+ bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
+
+ return 0;
+}
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ int ret;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return 0;
+
+ ret = shadow_indirect_ctx(wa_ctx);
+ if (ret) {
+ gvt_err("fail to shadow indirect ctx\n");
+ return ret;
+ }
+
+ combine_wa_ctx(wa_ctx);
+
+ ret = scan_wa_ctx(wa_ctx);
+ if (ret) {
+ gvt_err("scan wa ctx error\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
+ unsigned int opcode, int rings)
+{
+ struct cmd_info *info = NULL;
+ unsigned int ring;
+
+ for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+ info = find_cmd_entry(gvt, opcode, ring);
+ if (info)
+ break;
+ }
+ return info;
+}
+
+static int init_cmd_table(struct intel_gvt *gvt)
+{
+ int i;
+ struct cmd_entry *e;
+ struct cmd_info *info;
+ unsigned int gen_type;
+
+ gen_type = intel_gvt_get_device_type(gvt);
+
+ for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ if (!(cmd_info[i].devices & gen_type))
+ continue;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->info = &cmd_info[i];
+ info = find_cmd_entry_any_ring(gvt,
+ e->info->opcode, e->info->rings);
+ if (info) {
+ gvt_err("%s %s duplicated\n", e->info->name,
+ info->name);
+ return -EEXIST;
+ }
+
+ INIT_HLIST_NODE(&e->hlist);
+ add_cmd_entry(gvt, e);
+ gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
+ }
+ return 0;
+}
+
+static void clean_cmd_table(struct intel_gvt *gvt)
+{
+ struct hlist_node *tmp;
+ struct cmd_entry *e;
+ int i;
+
+ hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
+ kfree(e);
+
+ hash_init(gvt->cmd_table);
+}
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
+{
+ clean_cmd_table(gvt);
+}
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
+{
+ int ret;
+
+ ret = init_cmd_table(gvt);
+ if (ret) {
+ intel_gvt_clean_cmd_parser(gvt);
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
new file mode 100644
index 000000000000..bed33514103c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+#ifndef _GVT_CMD_PARSER_H_
+#define _GVT_CMD_PARSER_H_
+
+#define GVT_CMD_HASH_BITS 7
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 7ef412be665f..68cba7bd980a 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -24,11 +24,34 @@
#ifndef __GVT_DEBUG_H__
#define __GVT_DEBUG_H__
+#define gvt_err(fmt, args...) \
+ DRM_ERROR("gvt: "fmt, ##args)
+
#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
-/*
- * Other GVT debug stuff will be introduced in the GVT device model patches.
- */
+#define gvt_dbg_irq(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
+
+#define gvt_dbg_mm(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
+
+#define gvt_dbg_mmio(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
+
+#define gvt_dbg_dpy(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
+
+#define gvt_dbg_el(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
+
+#define gvt_dbg_sched(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
+
+#define gvt_dbg_render(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
+
+#define gvt_dbg_cmd(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
new file mode 100644
index 000000000000..c0c884aeb30e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int get_edp_pipe(struct intel_vgpu *vgpu)
+{
+ u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
+ int pipe = -1;
+
+ switch (data & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+ return pipe;
+}
+
+static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ return 0;
+
+ if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
+ return 0;
+ return 1;
+}
+
+static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+ return -EINVAL;
+
+ if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ return 1;
+
+ if (edp_pipe_is_enabled(vgpu) &&
+ get_edp_pipe(vgpu) == pipe)
+ return 1;
+ return 0;
+}
+
+/* EDID with 1024x768 as its resolution */
+static unsigned char virtual_dp_monitor_edid[] = {
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /* Standard Timings. All invalid */
+ 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
+ /* 18 Byte Data Blocks 1: invalid */
+ 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0xef,
+};
+
+#define DPCD_HEADER_SIZE 0xb
+
+static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
+ 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
+
+ if (IS_SKYLAKE(dev_priv))
+ vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
+ SDE_PORTE_HOTPLUG_SPT);
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+
+ if (IS_SKYLAKE(dev_priv) &&
+ intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (IS_BROADWELL(dev_priv))
+ vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
+ GEN8_PORT_DP_A_HOTPLUG;
+ else
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+ }
+}
+
+static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
+{
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+ kfree(port->edid);
+ port->edid = NULL;
+
+ kfree(port->dpcd);
+ port->dpcd = NULL;
+}
+
+static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
+ int type)
+{
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+ port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
+ if (!port->edid)
+ return -ENOMEM;
+
+ port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
+ if (!port->dpcd) {
+ kfree(port->edid);
+ return -ENOMEM;
+ }
+
+ memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+ EDID_SIZE);
+ port->edid->data_valid = true;
+
+ memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
+ port->dpcd->data_valid = true;
+ port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
+ port->type = type;
+
+ emulate_monitor_status_change(vgpu);
+ return 0;
+}
+
+/**
+ * intel_gvt_check_vblank_emulation - check if vblank emulation timer should
+ * be turned on/off when a virtual pipe is enabled/disabled.
+ * @gvt: a GVT device
+ *
+ * This function is used to turn on/off vblank timer according to currently
+ * enabled/disabled virtual pipes.
+ *
+ */
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ struct intel_vgpu *vgpu;
+ bool have_enabled_pipe = false;
+ int pipe, id;
+
+ if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+ return;
+
+ hrtimer_cancel(&irq->vblank_timer.timer);
+
+ for_each_active_vgpu(gvt, vgpu, id) {
+ for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
+ have_enabled_pipe =
+ pipe_is_enabled(vgpu, pipe);
+ if (have_enabled_pipe)
+ break;
+ }
+ }
+
+ if (have_enabled_pipe)
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+}
+
+static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_irq *irq = &vgpu->irq;
+ int vblank_event[] = {
+ [PIPE_A] = PIPE_A_VBLANK,
+ [PIPE_B] = PIPE_B_VBLANK,
+ [PIPE_C] = PIPE_C_VBLANK,
+ };
+ int event;
+
+ if (pipe < PIPE_A || pipe > PIPE_C)
+ return;
+
+ for_each_set_bit(event, irq->flip_done_event[pipe],
+ INTEL_GVT_EVENT_MAX) {
+ clear_bit(event, irq->flip_done_event[pipe]);
+ if (!pipe_is_enabled(vgpu, pipe))
+ continue;
+
+ vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ }
+
+ if (pipe_is_enabled(vgpu, pipe)) {
+ vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
+ }
+}
+
+static void emulate_vblank(struct intel_vgpu *vgpu)
+{
+ int pipe;
+
+ for_each_pipe(vgpu->gvt->dev_priv, pipe)
+ emulate_vblank_on_pipe(vgpu, pipe);
+}
+
+/**
+ * intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
+ * @gvt: a GVT device
+ *
+ * This function is used to trigger vblank interrupts for vGPUs on GVT device
+ *
+ */
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ int id;
+
+ if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+ return;
+
+ for_each_active_vgpu(gvt, vgpu, id)
+ emulate_vblank(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_display - clean vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean vGPU virtual display emulation stuffs
+ *
+ */
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (IS_SKYLAKE(dev_priv))
+ clean_virtual_dp_monitor(vgpu, PORT_D);
+ else
+ clean_virtual_dp_monitor(vgpu, PORT_B);
+}
+
+/**
+ * intel_vgpu_init_display- initialize vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU virtual display emulation stuffs
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_init_display(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ intel_vgpu_init_i2c_edid(vgpu);
+
+ if (IS_SKYLAKE(dev_priv))
+ return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
+ else
+ return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
new file mode 100644
index 000000000000..7a60cb848268
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_DISPLAY_H_
+#define _GVT_DISPLAY_H_
+
+#define SBI_REG_MAX 20
+#define DPCD_SIZE 0x700
+
+#define intel_vgpu_port(vgpu, port) \
+ (&(vgpu->display.ports[port]))
+
+#define intel_vgpu_has_monitor_on_port(vgpu, port) \
+ (intel_vgpu_port(vgpu, port)->edid && \
+ intel_vgpu_port(vgpu, port)->edid->data_valid)
+
+#define intel_vgpu_port_is_dp(vgpu, port) \
+ ((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_D))
+
+#define INTEL_GVT_MAX_UEVENT_VARS 3
+
+/* DPCD start */
+#define DPCD_SIZE 0x700
+
+/* DPCD */
+#define DP_SET_POWER 0x600
+#define DP_SET_POWER_D0 0x1
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+
+#define AUX_BURST_SIZE 16
+
+/* DPCD addresses */
+#define DPCD_REV 0x000
+#define DPCD_MAX_LINK_RATE 0x001
+#define DPCD_MAX_LANE_COUNT 0x002
+
+#define DPCD_TRAINING_PATTERN_SET 0x102
+#define DPCD_SINK_COUNT 0x200
+#define DPCD_LANE0_1_STATUS 0x202
+#define DPCD_LANE2_3_STATUS 0x203
+#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DPCD_SINK_STATUS 0x205
+
+/* link training */
+#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
+#define DPCD_LINK_TRAINING_DISABLED 0x00
+#define DPCD_TRAINING_PATTERN_1 0x01
+#define DPCD_TRAINING_PATTERN_2 0x02
+
+#define DPCD_CP_READY_MASK (1 << 6)
+
+/* lane status */
+#define DPCD_LANES_CR_DONE 0x11
+#define DPCD_LANES_EQ_DONE 0x22
+#define DPCD_SYMBOL_LOCKED 0x44
+
+#define DPCD_INTERLANE_ALIGN_DONE 0x01
+
+#define DPCD_SINK_IN_SYNC 0x03
+/* DPCD end */
+
+#define SBI_RESPONSE_MASK 0x3
+#define SBI_RESPONSE_SHIFT 0x1
+#define SBI_STAT_MASK 0x1
+#define SBI_STAT_SHIFT 0x0
+#define SBI_OPCODE_SHIFT 8
+#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
+#define SBI_CMD_IORD 2
+#define SBI_CMD_IOWR 3
+#define SBI_CMD_CRRD 6
+#define SBI_CMD_CRWR 7
+#define SBI_ADDR_OFFSET_SHIFT 16
+#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
+
+struct intel_vgpu_sbi_register {
+ unsigned int offset;
+ u32 value;
+};
+
+struct intel_vgpu_sbi {
+ int number;
+ struct intel_vgpu_sbi_register registers[SBI_REG_MAX];
+};
+
+enum intel_gvt_plane_type {
+ PRIMARY_PLANE = 0,
+ CURSOR_PLANE,
+ SPRITE_PLANE,
+ MAX_PLANE
+};
+
+struct intel_vgpu_dpcd_data {
+ bool data_valid;
+ u8 data[DPCD_SIZE];
+};
+
+enum intel_vgpu_port_type {
+ GVT_CRT = 0,
+ GVT_DP_A,
+ GVT_DP_B,
+ GVT_DP_C,
+ GVT_DP_D,
+ GVT_HDMI_B,
+ GVT_HDMI_C,
+ GVT_HDMI_D,
+ GVT_PORT_MAX
+};
+
+struct intel_vgpu_port {
+ /* per display EDID information */
+ struct intel_vgpu_edid_data *edid;
+ /* per display DPCD information */
+ struct intel_vgpu_dpcd_data *dpcd;
+ int type;
+};
+
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
+
+int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
new file mode 100644
index 000000000000..bda85dff7b2a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define GMBUS1_TOTAL_BYTES_SHIFT 16
+#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
+#define gmbus1_total_byte_count(v) (((v) >> \
+ GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
+#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
+#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
+#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
+
+/* GMBUS0 bits definitions */
+#define _GMBUS_PIN_SEL_MASK (0x7)
+
+static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+ unsigned char chr = 0;
+
+ if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
+ gvt_err("Driver tries to read EDID without proper sequence!\n");
+ return 0;
+ }
+ if (edid->current_edid_read >= EDID_SIZE) {
+ gvt_err("edid_get_byte() exceeds the size of EDID!\n");
+ return 0;
+ }
+
+ if (!edid->edid_available) {
+ gvt_err("Reading EDID but EDID is not available!\n");
+ return 0;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
+ struct intel_vgpu_edid_data *edid_data =
+ intel_vgpu_port(vgpu, edid->port)->edid;
+
+ chr = edid_data->edid_block[edid->current_edid_read];
+ edid->current_edid_read++;
+ } else {
+ gvt_err("No EDID available during the reading?\n");
+ }
+ return chr;
+}
+
+static inline int get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == 2)
+ port = PORT_E;
+ else if (port_select == 4)
+ port = PORT_C;
+ else if (port_select == 5)
+ port = PORT_B;
+ else if (port_select == 6)
+ port = PORT_D;
+ return port;
+}
+
+static void reset_gmbus_controller(struct intel_vgpu *vgpu)
+{
+ vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
+ if (!vgpu->display.i2c_edid.edid_available)
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+}
+
+/* GMBUS0 */
+static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ int port, pin_select;
+
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+
+ pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
+
+ intel_vgpu_init_i2c_edid(vgpu);
+
+ if (pin_select == 0)
+ return 0;
+
+ port = get_port_from_gmbus0(pin_select);
+ if (WARN_ON(port < 0))
+ return 0;
+
+ vgpu->display.i2c_edid.state = I2C_GMBUS;
+ vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
+ !intel_vgpu_port_is_dp(vgpu, port)) {
+ vgpu->display.i2c_edid.port = port;
+ vgpu->display.i2c_edid.edid_available = true;
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
+ } else
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ return 0;
+}
+
+static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ u32 slave_addr;
+ u32 wvalue = *(u32 *)p_data;
+
+ if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
+ if (!(wvalue & GMBUS_SW_CLR_INT)) {
+ vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
+ reset_gmbus_controller(vgpu);
+ }
+ /*
+ * TODO: "This bit is cleared to zero when an event
+ * causes the HW_RDY bit transition to occur "
+ */
+ } else {
+ /*
+ * per bspec setting this bit can cause:
+ * 1) INT status bit cleared
+ * 2) HW_RDY bit asserted
+ */
+ if (wvalue & GMBUS_SW_CLR_INT) {
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
+ }
+
+ /* For virtualization, we suppose that HW is always ready,
+ * so GMBUS_SW_RDY should always be cleared
+ */
+ if (wvalue & GMBUS_SW_RDY)
+ wvalue &= ~GMBUS_SW_RDY;
+
+ i2c_edid->gmbus.total_byte_count =
+ gmbus1_total_byte_count(wvalue);
+ slave_addr = gmbus1_slave_addr(wvalue);
+
+ /* vgpu gmbus only support EDID */
+ if (slave_addr == EDID_ADDR) {
+ i2c_edid->slave_selected = true;
+ } else if (slave_addr != 0) {
+ gvt_dbg_dpy(
+ "vgpu%d: unsupported gmbus slave addr(0x%x)\n"
+ " gmbus operations will be ignored.\n",
+ vgpu->id, slave_addr);
+ }
+
+ if (wvalue & GMBUS_CYCLE_INDEX)
+ i2c_edid->current_edid_read =
+ gmbus1_slave_index(wvalue);
+
+ i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
+ switch (gmbus1_bus_cycle(wvalue)) {
+ case GMBUS_NOCYCLE:
+ break;
+ case GMBUS_STOP:
+ /* From spec:
+ * This can only cause a STOP to be generated
+ * if a GMBUS cycle is generated, the GMBUS is
+ * currently in a data/wait/idle phase, or it is in a
+ * WAIT phase
+ */
+ if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
+ != GMBUS_NOCYCLE) {
+ intel_vgpu_init_i2c_edid(vgpu);
+ /* After the 'stop' cycle, hw state would become
+ * 'stop phase' and then 'idle phase' after a
+ * few milliseconds. In emulation, we just set
+ * it as 'idle phase' ('stop phase' is not
+ * visible in gmbus interface)
+ */
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ }
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ case NIDX_STOP:
+ case IDX_STOP:
+ /* From hw spec the GMBUS phase
+ * transition like this:
+ * START (-->INDEX) -->DATA
+ */
+ i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
+ break;
+ default:
+ gvt_err("Unknown/reserved GMBUS cycle detected!\n");
+ break;
+ }
+ /*
+ * From hw spec the WAIT state will be
+ * cleared:
+ * (1) in a new GMBUS cycle
+ * (2) by generating a stop
+ */
+ vgpu_vreg(vgpu, offset) = wvalue;
+ }
+ return 0;
+}
+
+static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int i;
+ unsigned char byte_data;
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ int byte_left = i2c_edid->gmbus.total_byte_count -
+ i2c_edid->current_edid_read;
+ int byte_count = byte_left;
+ u32 reg_data = 0;
+
+ /* Data can only be recevied if previous settings correct */
+ if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
+ if (byte_left <= 0) {
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ return 0;
+ }
+
+ if (byte_count > 4)
+ byte_count = 4;
+ for (i = 0; i < byte_count; i++) {
+ byte_data = edid_get_byte(vgpu);
+ reg_data |= (byte_data << (i << 3));
+ }
+
+ memcpy(&vgpu_vreg(vgpu, offset), &reg_data, byte_count);
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+
+ if (byte_left <= 4) {
+ switch (i2c_edid->gmbus.cycle_type) {
+ case NIDX_STOP:
+ case IDX_STOP:
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ default:
+ i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
+ break;
+ }
+ intel_vgpu_init_i2c_edid(vgpu);
+ }
+ /*
+ * Read GMBUS3 during send operation,
+ * return the latest written value
+ */
+ } else {
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
+ vgpu->id);
+ }
+ return 0;
+}
+
+static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = vgpu_vreg(vgpu, offset);
+
+ if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
+ vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
+ memcpy(p_data, (void *)&value, bytes);
+ return 0;
+}
+
+static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 wvalue = *(u32 *)p_data;
+
+ if (wvalue & GMBUS_INUSE)
+ vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
+ /* All other bits are read-only */
+ return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ return -EINVAL;
+
+ if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+ return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+ return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
+
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ return -EINVAL;
+
+ if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
+ return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
+ return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+ return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+ return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
+
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+ return 0;
+}
+
+enum {
+ AUX_CH_CTL = 0,
+ AUX_CH_DATA1,
+ AUX_CH_DATA2,
+ AUX_CH_DATA3,
+ AUX_CH_DATA4,
+ AUX_CH_DATA5
+};
+
+static inline int get_aux_ch_reg(unsigned int offset)
+{
+ int reg;
+
+ switch (offset & 0xff) {
+ case 0x10:
+ reg = AUX_CH_CTL;
+ break;
+ case 0x14:
+ reg = AUX_CH_DATA1;
+ break;
+ case 0x18:
+ reg = AUX_CH_DATA2;
+ break;
+ case 0x1c:
+ reg = AUX_CH_DATA3;
+ break;
+ case 0x20:
+ reg = AUX_CH_DATA4;
+ break;
+ case 0x24:
+ reg = AUX_CH_DATA5;
+ break;
+ default:
+ reg = -1;
+ break;
+ }
+ return reg;
+}
+
+#define AUX_CTL_MSG_LENGTH(reg) \
+ ((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
+
+/**
+ * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate AUX channel register write
+ *
+ */
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+ int port_idx,
+ unsigned int offset,
+ void *p_data)
+{
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ int msg_length, ret_msg_size;
+ int msg, addr, ctrl, op;
+ u32 value = *(u32 *)p_data;
+ int aux_data_for_write = 0;
+ int reg = get_aux_ch_reg(offset);
+
+ if (reg != AUX_CH_CTL) {
+ vgpu_vreg(vgpu, offset) = value;
+ return;
+ }
+
+ msg_length = AUX_CTL_MSG_LENGTH(value);
+ // check the msg in DATA register.
+ msg = vgpu_vreg(vgpu, offset + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24) & 0xff;
+ op = ctrl >> 4;
+ if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* The ctl write to clear some states */
+ return;
+ }
+
+ /* Always set the wanted value for vms. */
+ ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
+ vgpu_vreg(vgpu, offset) =
+ DP_AUX_CH_CTL_DONE |
+ ((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
+ DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
+
+ if (msg_length == 3) {
+ if (!(op & GVT_AUX_I2C_MOT)) {
+ /* stop */
+ intel_vgpu_init_i2c_edid(vgpu);
+ } else {
+ /* start or restart */
+ i2c_edid->aux_ch.i2c_over_aux_ch = true;
+ i2c_edid->aux_ch.aux_ch_mot = true;
+ if (addr == 0) {
+ /* reset the address */
+ intel_vgpu_init_i2c_edid(vgpu);
+ } else if (addr == EDID_ADDR) {
+ i2c_edid->state = I2C_AUX_CH;
+ i2c_edid->port = port_idx;
+ i2c_edid->slave_selected = true;
+ if (intel_vgpu_has_monitor_on_port(vgpu,
+ port_idx) &&
+ intel_vgpu_port_is_dp(vgpu, port_idx))
+ i2c_edid->edid_available = true;
+ }
+ }
+ } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
+ /* TODO
+ * We only support EDID reading from I2C_over_AUX. And
+ * we do not expect the index mode to be used. Right now
+ * the WRITE operation is ignored. It is good enough to
+ * support the gfx driver to do EDID access.
+ */
+ } else {
+ if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+ return;
+ if (WARN_ON(msg_length != 4))
+ return;
+ if (i2c_edid->edid_available && i2c_edid->slave_selected) {
+ unsigned char val = edid_get_byte(vgpu);
+
+ aux_data_for_write = (val << 16);
+ }
+ }
+ /* write the return value in AUX_CH_DATA reg which includes:
+ * ACK of I2C_WRITE
+ * returned byte if it is READ
+ */
+ aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24;
+ vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
+}
+
+/**
+ * intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU i2c edid emulation stuffs
+ *
+ */
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+
+ edid->state = I2C_NOT_SPECIFIED;
+
+ edid->port = -1;
+ edid->slave_selected = false;
+ edid->edid_available = false;
+ edid->current_edid_read = 0;
+
+ memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
+
+ edid->aux_ch.i2c_over_aux_ch = false;
+ edid->aux_ch.aux_ch_mot = false;
+}
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
new file mode 100644
index 000000000000..f6dfc8b795ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EDID_H_
+#define _GVT_EDID_H_
+
+#define EDID_SIZE 128
+#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
+
+#define GVT_AUX_NATIVE_WRITE 0x8
+#define GVT_AUX_NATIVE_READ 0x9
+#define GVT_AUX_I2C_WRITE 0x0
+#define GVT_AUX_I2C_READ 0x1
+#define GVT_AUX_I2C_STATUS 0x2
+#define GVT_AUX_I2C_MOT 0x4
+#define GVT_AUX_I2C_REPLY_ACK 0x0
+
+struct intel_vgpu_edid_data {
+ bool data_valid;
+ unsigned char edid_block[EDID_SIZE];
+};
+
+enum gmbus_cycle_type {
+ GMBUS_NOCYCLE = 0x0,
+ NIDX_NS_W = 0x1,
+ IDX_NS_W = 0x3,
+ GMBUS_STOP = 0x4,
+ NIDX_STOP = 0x5,
+ IDX_STOP = 0x7
+};
+
+/*
+ * States of GMBUS
+ *
+ * GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
+ * registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
+ * not considered here. Below describes the usage of GMBUS registers that are
+ * cared by the EDID virtualization
+ *
+ * GMBUS0:
+ * R/W
+ * port selection. value of bit0 - bit2 corresponds to the GPIO registers.
+ *
+ * GMBUS1:
+ * R/W Protect
+ * Command and Status.
+ * bit0 is the direction bit: 1 is read; 0 is write.
+ * bit1 - bit7 is slave 7-bit address.
+ * bit16 - bit24 total byte count (ignore?)
+ *
+ * GMBUS2:
+ * Most of bits are read only except bit 15 (IN_USE)
+ * Status register
+ * bit0 - bit8 current byte count
+ * bit 11: hardware ready;
+ *
+ * GMBUS3:
+ * Read/Write
+ * Data for transfer
+ */
+
+/* From hw specs, Other phases like START, ADDRESS, INDEX
+ * are invisible to GMBUS MMIO interface. So no definitions
+ * in below enum types
+ */
+enum gvt_gmbus_phase {
+ GMBUS_IDLE_PHASE = 0,
+ GMBUS_DATA_PHASE,
+ GMBUS_WAIT_PHASE,
+ //GMBUS_STOP_PHASE,
+ GMBUS_MAX_PHASE
+};
+
+struct intel_vgpu_i2c_gmbus {
+ unsigned int total_byte_count; /* from GMBUS1 */
+ enum gmbus_cycle_type cycle_type;
+ enum gvt_gmbus_phase phase;
+};
+
+struct intel_vgpu_i2c_aux_ch {
+ bool i2c_over_aux_ch;
+ bool aux_ch_mot;
+};
+
+enum i2c_state {
+ I2C_NOT_SPECIFIED = 0,
+ I2C_GMBUS = 1,
+ I2C_AUX_CH = 2
+};
+
+/* I2C sequences cannot interleave.
+ * GMBUS and AUX_CH sequences cannot interleave.
+ */
+struct intel_vgpu_i2c_edid {
+ enum i2c_state state;
+
+ unsigned int port;
+ bool slave_selected;
+ bool edid_available;
+ unsigned int current_edid_read;
+
+ struct intel_vgpu_i2c_gmbus gmbus;
+ struct intel_vgpu_i2c_aux_ch aux_ch;
+};
+
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu);
+
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes);
+
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes);
+
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+ int port_idx,
+ unsigned int offset,
+ void *p_data);
+
+#endif /*_GVT_EDID_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
new file mode 100644
index 000000000000..f32bb6f6495c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define _EL_OFFSET_STATUS 0x234
+#define _EL_OFFSET_STATUS_BUF 0x370
+#define _EL_OFFSET_STATUS_PTR 0x3A0
+
+#define execlist_ring_mmio(gvt, ring_id, offset) \
+ (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+
+#define valid_context(ctx) ((ctx)->valid)
+#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+ ((a)->lrca == (b)->lrca))
+
+static int context_switch_events[] = {
+ [RCS] = RCS_AS_CONTEXT_SWITCH,
+ [BCS] = BCS_AS_CONTEXT_SWITCH,
+ [VCS] = VCS_AS_CONTEXT_SWITCH,
+ [VCS2] = VCS2_AS_CONTEXT_SWITCH,
+ [VECS] = VECS_AS_CONTEXT_SWITCH,
+};
+
+static int ring_id_to_context_switch_event(int ring_id)
+{
+ if (WARN_ON(ring_id < RCS && ring_id >
+ ARRAY_SIZE(context_switch_events)))
+ return -EINVAL;
+
+ return context_switch_events[ring_id];
+}
+
+static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
+{
+ gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
+ execlist->running_slot ?
+ execlist->running_slot->index : -1,
+ execlist->running_context ?
+ execlist->running_context->context_id : 0,
+ execlist->pending_slot ?
+ execlist->pending_slot->index : -1);
+
+ execlist->running_slot = execlist->pending_slot;
+ execlist->pending_slot = NULL;
+ execlist->running_context = execlist->running_context ?
+ &execlist->running_slot->ctx[0] : NULL;
+
+ gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
+ execlist->running_slot ?
+ execlist->running_slot->index : -1,
+ execlist->running_context ?
+ execlist->running_context->context_id : 0,
+ execlist->pending_slot ?
+ execlist->pending_slot->index : -1);
+}
+
+static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+ struct execlist_ctx_descriptor_format *desc = execlist->running_context;
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ struct execlist_status_format status;
+ int ring_id = execlist->ring_id;
+ u32 status_reg = execlist_ring_mmio(vgpu->gvt,
+ ring_id, _EL_OFFSET_STATUS);
+
+ status.ldw = vgpu_vreg(vgpu, status_reg);
+ status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+ if (running) {
+ status.current_execlist_pointer = !!running->index;
+ status.execlist_write_pointer = !!!running->index;
+ status.execlist_0_active = status.execlist_0_valid =
+ !!!(running->index);
+ status.execlist_1_active = status.execlist_1_valid =
+ !!(running->index);
+ } else {
+ status.context_id = 0;
+ status.execlist_0_active = status.execlist_0_valid = 0;
+ status.execlist_1_active = status.execlist_1_valid = 0;
+ }
+
+ status.context_id = desc ? desc->context_id : 0;
+ status.execlist_queue_full = !!(pending);
+
+ vgpu_vreg(vgpu, status_reg) = status.ldw;
+ vgpu_vreg(vgpu, status_reg + 4) = status.udw;
+
+ gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
+ vgpu->id, status_reg, status.ldw, status.udw);
+}
+
+static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
+ struct execlist_context_status_format *status,
+ bool trigger_interrupt_later)
+{
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ int ring_id = execlist->ring_id;
+ struct execlist_context_status_pointer_format ctx_status_ptr;
+ u32 write_pointer;
+ u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
+
+ ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_PTR);
+ ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_BUF);
+
+ ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+
+ write_pointer = ctx_status_ptr.write_ptr;
+
+ if (write_pointer == 0x7)
+ write_pointer = 0;
+ else {
+ ++write_pointer;
+ write_pointer %= 0x6;
+ }
+
+ offset = ctx_status_buf_reg + write_pointer * 8;
+
+ vgpu_vreg(vgpu, offset) = status->ldw;
+ vgpu_vreg(vgpu, offset + 4) = status->udw;
+
+ ctx_status_ptr.write_ptr = write_pointer;
+ vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+
+ gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
+
+ if (trigger_interrupt_later)
+ return;
+
+ intel_vgpu_trigger_virtual_event(vgpu,
+ ring_id_to_context_switch_event(execlist->ring_id));
+}
+
+static int emulate_execlist_ctx_schedule_out(
+ struct intel_vgpu_execlist *execlist,
+ struct execlist_ctx_descriptor_format *ctx)
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+ struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
+ struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
+ struct execlist_context_status_format status;
+
+ memset(&status, 0, sizeof(status));
+
+ gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
+
+ if (WARN_ON(!same_context(ctx, execlist->running_context))) {
+ gvt_err("schedule out context is not running context,"
+ "ctx id %x running ctx id %x\n",
+ ctx->context_id,
+ execlist->running_context->context_id);
+ return -EINVAL;
+ }
+
+ /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
+ if (valid_context(ctx1) && same_context(ctx0, ctx)) {
+ gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
+
+ execlist->running_context = ctx1;
+
+ emulate_execlist_status(execlist);
+
+ status.context_complete = status.element_switch = 1;
+ status.context_id = ctx->context_id;
+
+ emulate_csb_update(execlist, &status, false);
+ /*
+ * ctx1 is not valid, ctx == ctx0
+ * ctx1 is valid, ctx1 == ctx
+ * --> last element is finished
+ * emulate:
+ * active-to-idle if there is *no* pending execlist
+ * context-complete if there *is* pending execlist
+ */
+ } else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
+ || (valid_context(ctx1) && same_context(ctx1, ctx))) {
+ gvt_dbg_el("need to switch virtual execlist slot\n");
+
+ switch_virtual_execlist_slot(execlist);
+
+ emulate_execlist_status(execlist);
+
+ status.context_complete = status.active_to_idle = 1;
+ status.context_id = ctx->context_id;
+
+ if (!pending) {
+ emulate_csb_update(execlist, &status, false);
+ } else {
+ emulate_csb_update(execlist, &status, true);
+
+ memset(&status, 0, sizeof(status));
+
+ status.idle_to_active = 1;
+ status.context_id = 0;
+
+ emulate_csb_update(execlist, &status, false);
+ }
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
+ struct intel_vgpu_execlist *execlist)
+{
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ int ring_id = execlist->ring_id;
+ u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS);
+ struct execlist_status_format status;
+
+ status.ldw = vgpu_vreg(vgpu, status_reg);
+ status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+ if (status.execlist_queue_full) {
+ gvt_err("virtual execlist slots are full\n");
+ return NULL;
+ }
+
+ return &execlist->slot[status.execlist_write_pointer];
+}
+
+static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
+ struct execlist_ctx_descriptor_format ctx[2])
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *slot =
+ get_next_execlist_slot(execlist);
+
+ struct execlist_ctx_descriptor_format *ctx0, *ctx1;
+ struct execlist_context_status_format status;
+
+ gvt_dbg_el("emulate schedule-in\n");
+
+ if (!slot) {
+ gvt_err("no available execlist slot\n");
+ return -EINVAL;
+ }
+
+ memset(&status, 0, sizeof(status));
+ memset(slot->ctx, 0, sizeof(slot->ctx));
+
+ slot->ctx[0] = ctx[0];
+ slot->ctx[1] = ctx[1];
+
+ gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
+ slot->index, ctx[0].context_id,
+ ctx[1].context_id);
+
+ /*
+ * no running execlist, make this write bundle as running execlist
+ * -> idle-to-active
+ */
+ if (!running) {
+ gvt_dbg_el("no current running execlist\n");
+
+ execlist->running_slot = slot;
+ execlist->pending_slot = NULL;
+ execlist->running_context = &slot->ctx[0];
+
+ gvt_dbg_el("running slot index %d running context %x\n",
+ execlist->running_slot->index,
+ execlist->running_context->context_id);
+
+ emulate_execlist_status(execlist);
+
+ status.idle_to_active = 1;
+ status.context_id = 0;
+
+ emulate_csb_update(execlist, &status, false);
+ return 0;
+ }
+
+ ctx0 = &running->ctx[0];
+ ctx1 = &running->ctx[1];
+
+ gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
+ running->index, ctx0->context_id, ctx1->context_id);
+
+ /*
+ * already has an running execlist
+ * a. running ctx1 is valid,
+ * ctx0 is finished, and running ctx1 == new execlist ctx[0]
+ * b. running ctx1 is not valid,
+ * ctx0 == new execlist ctx[0]
+ * ----> lite-restore + preempted
+ */
+ if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
+ /* condition a */
+ (!same_context(ctx0, execlist->running_context))) ||
+ (!valid_context(ctx1) &&
+ same_context(ctx0, &slot->ctx[0]))) { /* condition b */
+ gvt_dbg_el("need to switch virtual execlist slot\n");
+
+ execlist->pending_slot = slot;
+ switch_virtual_execlist_slot(execlist);
+
+ emulate_execlist_status(execlist);
+
+ status.lite_restore = status.preempted = 1;
+ status.context_id = ctx[0].context_id;
+
+ emulate_csb_update(execlist, &status, false);
+ } else {
+ gvt_dbg_el("emulate as pending slot\n");
+ /*
+ * otherwise
+ * --> emulate pending execlist exist + but no preemption case
+ */
+ execlist->pending_slot = slot;
+ emulate_execlist_status(execlist);
+ }
+ return 0;
+}
+
+static void free_workload(struct intel_vgpu_workload *workload)
+{
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+ intel_gvt_mm_unreference(workload->shadow_mm);
+ kmem_cache_free(workload->vgpu->workloads, workload);
+}
+
+#define get_desc_from_elsp_dwords(ed, i) \
+ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
+
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
+ unsigned long add, int gmadr_bytes)
+{
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ return -1;
+
+ *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
+ BATCH_BUFFER_ADDR_MASK;
+ if (gmadr_bytes == 8) {
+ *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
+ add & BATCH_BUFFER_ADDR_HIGH_MASK;
+ }
+
+ return 0;
+}
+
+static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+ /* pin the gem object to ggtt */
+ if (!list_empty(&workload->shadow_bb)) {
+ struct intel_shadow_bb_entry *entry_obj =
+ list_first_entry(&workload->shadow_bb,
+ struct intel_shadow_bb_entry,
+ list);
+ struct intel_shadow_bb_entry *temp;
+
+ list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+ list) {
+ struct i915_vma *vma;
+
+ vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
+ 4, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin\n");
+ return;
+ }
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ /* update the relocate gma with shadow batch buffer*/
+ set_gma_to_bb_cmd(entry_obj,
+ i915_ggtt_offset(vma),
+ gmadr_bytes);
+ }
+ }
+}
+
+static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ int ring_id = wa_ctx->workload->ring_id;
+ struct i915_gem_context *shadow_ctx =
+ wa_ctx->workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap_atomic(page);
+
+ shadow_ring_context->bb_per_ctx_ptr.val =
+ (shadow_ring_context->bb_per_ctx_ptr.val &
+ (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
+ shadow_ring_context->rcs_indirect_ctx.val =
+ (shadow_ring_context->rcs_indirect_ctx.val &
+ (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
+
+ kunmap_atomic(shadow_ring_context);
+ return 0;
+}
+
+static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct i915_vma *vma;
+ unsigned char *per_ctx_va =
+ (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return;
+
+ vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin indirect ctx obj\n");
+ return;
+ }
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
+
+ wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
+ memset(per_ctx_va, 0, CACHELINE_BYTES);
+
+ update_wa_ctx_2_shadow_ctx(wa_ctx);
+}
+
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct execlist_ctx_descriptor_format ctx[2];
+ int ring_id = workload->ring_id;
+
+ intel_vgpu_pin_mm(workload->shadow_mm);
+ intel_vgpu_sync_oos_pages(workload->vgpu);
+ intel_vgpu_flush_post_shadow(workload->vgpu);
+ prepare_shadow_batch_buffer(workload);
+ prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (!workload->emulate_schedule_in)
+ return 0;
+
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+ return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ /* release all the shadow batch buffer */
+ if (!list_empty(&workload->shadow_bb)) {
+ struct intel_shadow_bb_entry *entry_obj =
+ list_first_entry(&workload->shadow_bb,
+ struct intel_shadow_bb_entry,
+ list);
+ struct intel_shadow_bb_entry *temp;
+
+ list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+ list) {
+ i915_gem_object_unpin_map(entry_obj->obj);
+ i915_gem_object_put(entry_obj->obj);
+ list_del(&entry_obj->list);
+ kfree(entry_obj);
+ }
+ }
+}
+
+static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ if (wa_ctx->indirect_ctx.size == 0)
+ return;
+
+ i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+}
+
+static int complete_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_execlist *execlist =
+ &vgpu->execlist[workload->ring_id];
+ struct intel_vgpu_workload *next_workload;
+ struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+ bool lite_restore = false;
+ int ret;
+
+ gvt_dbg_el("complete workload %p status %d\n", workload,
+ workload->status);
+
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+
+ if (workload->status || vgpu->resetting)
+ goto out;
+
+ if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+ struct execlist_ctx_descriptor_format *this_desc, *next_desc;
+
+ next_workload = container_of(next,
+ struct intel_vgpu_workload, list);
+ this_desc = &workload->ctx_desc;
+ next_desc = &next_workload->ctx_desc;
+
+ lite_restore = same_context(this_desc, next_desc);
+ }
+
+ if (lite_restore) {
+ gvt_dbg_el("next context == current - no schedule-out\n");
+ free_workload(workload);
+ return 0;
+ }
+
+ ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
+ if (ret)
+ goto err;
+out:
+ free_workload(workload);
+ return 0;
+err:
+ free_workload(workload);
+ return ret;
+}
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void read_guest_pdps(struct intel_vgpu *vgpu,
+ u64 ring_context_gpa, u32 pdp[8])
+{
+ u64 gpa;
+ int i;
+
+ gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+
+ for (i = 0; i < 8; i++)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static int prepare_mm(struct intel_vgpu_workload *workload)
+{
+ struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
+ struct intel_vgpu_mm *mm;
+ int page_table_level;
+ u32 pdp[8];
+
+ if (desc->addressing_mode == 1) { /* legacy 32-bit */
+ page_table_level = 3;
+ } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
+ page_table_level = 4;
+ } else {
+ gvt_err("Advanced Context mode(SVM) is not supported!\n");
+ return -EINVAL;
+ }
+
+ read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+
+ mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+
+ mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_err("fail to create mm object.\n");
+ return PTR_ERR(mm);
+ }
+ }
+ workload->shadow_mm = mm;
+ return 0;
+}
+
+#define get_last_workload(q) \
+ (list_empty(q) ? NULL : container_of(q->prev, \
+ struct intel_vgpu_workload, list))
+
+static int submit_context(struct intel_vgpu *vgpu, int ring_id,
+ struct execlist_ctx_descriptor_format *desc,
+ bool emulate_schedule_in)
+{
+ struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_workload *workload = NULL;
+ u64 ring_context_gpa;
+ u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ int ret;
+
+ ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
+ if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
+ return -EINVAL;
+ }
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &head, 4);
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_tail.val), &tail, 4);
+
+ head &= RB_HEAD_OFF_MASK;
+ tail &= RB_TAIL_OFF_MASK;
+
+ if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+ gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+ gvt_dbg_el("ctx head %x real head %lx\n", head,
+ last_workload->rb_tail);
+ /*
+ * cannot use guest context head pointer here,
+ * as it might not be updated at this time
+ */
+ head = last_workload->rb_tail;
+ }
+
+ gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+
+ workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
+ if (!workload)
+ return -ENOMEM;
+
+ /* record some ring buffer register values for scan and shadow */
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_start.val), &start, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->vgpu = vgpu;
+ workload->ring_id = ring_id;
+ workload->ctx_desc = *desc;
+ workload->ring_context_gpa = ring_context_gpa;
+ workload->rb_head = head;
+ workload->rb_tail = tail;
+ workload->rb_start = start;
+ workload->rb_ctl = ctl;
+ workload->prepare = prepare_execlist_workload;
+ workload->complete = complete_execlist_workload;
+ workload->status = -EINPROGRESS;
+ workload->emulate_schedule_in = emulate_schedule_in;
+
+ if (ring_id == RCS) {
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
+
+ workload->wa_ctx.indirect_ctx.guest_gma =
+ indirect_ctx & INDIRECT_CTX_ADDR_MASK;
+ workload->wa_ctx.indirect_ctx.size =
+ (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
+ CACHELINE_BYTES;
+ workload->wa_ctx.per_ctx.guest_gma =
+ per_ctx & PER_CTX_ADDR_MASK;
+ workload->wa_ctx.workload = workload;
+
+ WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
+ }
+
+ if (emulate_schedule_in)
+ memcpy(&workload->elsp_dwords,
+ &vgpu->execlist[ring_id].elsp_dwords,
+ sizeof(workload->elsp_dwords));
+
+ gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
+ workload, ring_id, head, tail, start, ctl);
+
+ gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
+ emulate_schedule_in);
+
+ ret = prepare_mm(workload);
+ if (ret) {
+ kmem_cache_free(vgpu->workloads, workload);
+ return ret;
+ }
+
+ queue_workload(workload);
+ return 0;
+}
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
+ unsigned long valid_desc_bitmap = 0;
+ bool emulate_schedule_in = true;
+ int ret;
+ int i;
+
+ memset(valid_desc, 0, sizeof(valid_desc));
+
+ desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
+ desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (!desc[i]->valid)
+ continue;
+
+ if (!desc[i]->privilege_access) {
+ gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ /* TODO: add another guest context checks here. */
+ set_bit(i, &valid_desc_bitmap);
+ valid_desc[i] = *desc[i];
+ }
+
+ if (!valid_desc_bitmap) {
+ gvt_err("vgpu%d: no valid desc in a elsp submission\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ if (!test_bit(0, (void *)&valid_desc_bitmap) &&
+ test_bit(1, (void *)&valid_desc_bitmap)) {
+ gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ /* submit workload */
+ for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
+ ret = submit_context(vgpu, ring_id, &valid_desc[i],
+ emulate_schedule_in);
+ if (ret) {
+ gvt_err("vgpu%d: fail to schedule workload\n",
+ vgpu->id);
+ return ret;
+ }
+ emulate_schedule_in = false;
+ }
+ return 0;
+}
+
+static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct execlist_context_status_pointer_format ctx_status_ptr;
+ u32 ctx_status_ptr_reg;
+
+ memset(execlist, 0, sizeof(*execlist));
+
+ execlist->vgpu = vgpu;
+ execlist->ring_id = ring_id;
+ execlist->slot[0].index = 0;
+ execlist->slot[1].index = 1;
+
+ ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_PTR);
+
+ ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+ ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+ vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+}
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
+{
+ kmem_cache_destroy(vgpu->workloads);
+}
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
+{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ /* each ring has a virtual execlist engine */
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ init_vgpu_execlist(vgpu, i);
+ INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+ }
+
+ vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+ sizeof(struct intel_vgpu_workload), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!vgpu->workloads)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine;
+ struct intel_vgpu_workload *pos, *n;
+ unsigned int tmp;
+
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ /* free the unsubmited workload in the queue */
+ list_for_each_entry_safe(pos, n,
+ &vgpu->workload_q_head[engine->id], list) {
+ list_del_init(&pos->list);
+ free_workload(pos);
+ }
+
+ init_vgpu_execlist(vgpu, engine->id);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
new file mode 100644
index 000000000000..7eced40a1e30
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EXECLIST_H_
+#define _GVT_EXECLIST_H_
+
+struct execlist_ctx_descriptor_format {
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+ union {
+ u32 ldw;
+ struct {
+ u32 valid : 1;
+ u32 force_pd_restore : 1;
+ u32 force_restore : 1;
+ u32 addressing_mode : 2;
+ u32 llc_coherency : 1;
+ u32 fault_handling : 2;
+ u32 privilege_access : 1;
+ u32 reserved : 3;
+ u32 lrca : 20;
+ };
+ };
+};
+
+struct execlist_status_format {
+ union {
+ u32 ldw;
+ struct {
+ u32 current_execlist_pointer :1;
+ u32 execlist_write_pointer :1;
+ u32 execlist_queue_full :1;
+ u32 execlist_1_valid :1;
+ u32 execlist_0_valid :1;
+ u32 last_ctx_switch_reason :9;
+ u32 current_active_elm_status :2;
+ u32 arbitration_enable :1;
+ u32 execlist_1_active :1;
+ u32 execlist_0_active :1;
+ u32 reserved :13;
+ };
+ };
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+};
+
+struct execlist_context_status_pointer_format {
+ union {
+ u32 dw;
+ struct {
+ u32 write_ptr :3;
+ u32 reserved :5;
+ u32 read_ptr :3;
+ u32 reserved2 :5;
+ u32 mask :16;
+ };
+ };
+};
+
+struct execlist_context_status_format {
+ union {
+ u32 ldw;
+ struct {
+ u32 idle_to_active :1;
+ u32 preempted :1;
+ u32 element_switch :1;
+ u32 active_to_idle :1;
+ u32 context_complete :1;
+ u32 wait_on_sync_flip :1;
+ u32 wait_on_vblank :1;
+ u32 wait_on_semaphore :1;
+ u32 wait_on_scanline :1;
+ u32 reserved :2;
+ u32 semaphore_wait_mode :1;
+ u32 display_plane :3;
+ u32 lite_restore :1;
+ u32 reserved_2 :16;
+ };
+ };
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+};
+
+struct execlist_mmio_pair {
+ u32 addr;
+ u32 val;
+};
+
+/* The first 52 dwords in register state context */
+struct execlist_ring_context {
+ u32 nop1;
+ u32 lri_cmd_1;
+ struct execlist_mmio_pair ctx_ctrl;
+ struct execlist_mmio_pair ring_header;
+ struct execlist_mmio_pair ring_tail;
+ struct execlist_mmio_pair rb_start;
+ struct execlist_mmio_pair rb_ctrl;
+ struct execlist_mmio_pair bb_cur_head_UDW;
+ struct execlist_mmio_pair bb_cur_head_LDW;
+ struct execlist_mmio_pair bb_state;
+ struct execlist_mmio_pair second_bb_addr_UDW;
+ struct execlist_mmio_pair second_bb_addr_LDW;
+ struct execlist_mmio_pair second_bb_state;
+ struct execlist_mmio_pair bb_per_ctx_ptr;
+ struct execlist_mmio_pair rcs_indirect_ctx;
+ struct execlist_mmio_pair rcs_indirect_ctx_offset;
+ u32 nop2;
+ u32 nop3;
+ u32 nop4;
+ u32 lri_cmd_2;
+ struct execlist_mmio_pair ctx_timestamp;
+ struct execlist_mmio_pair pdp3_UDW;
+ struct execlist_mmio_pair pdp3_LDW;
+ struct execlist_mmio_pair pdp2_UDW;
+ struct execlist_mmio_pair pdp2_LDW;
+ struct execlist_mmio_pair pdp1_UDW;
+ struct execlist_mmio_pair pdp1_LDW;
+ struct execlist_mmio_pair pdp0_UDW;
+ struct execlist_mmio_pair pdp0_LDW;
+};
+
+struct intel_vgpu_elsp_dwords {
+ u32 data[4];
+ u32 index;
+};
+
+struct intel_vgpu_execlist_slot {
+ struct execlist_ctx_descriptor_format ctx[2];
+ u32 index;
+};
+
+struct intel_vgpu_execlist {
+ struct intel_vgpu_execlist_slot slot[2];
+ struct intel_vgpu_execlist_slot *running_slot;
+ struct intel_vgpu_execlist_slot *pending_slot;
+ struct execlist_ctx_descriptor_format *running_context;
+ int ring_id;
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_elsp_dwords elsp_dwords;
+};
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+ unsigned long engine_mask);
+
+#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
new file mode 100644
index 000000000000..2fae2a2ca96f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Changbin Du <changbin.du@intel.com>
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/crc32.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+#define FIRMWARE_VERSION (0x0)
+
+struct gvt_firmware_header {
+ u64 magic;
+ u32 crc32; /* protect the data after this field */
+ u32 version;
+ u64 cfg_space_size;
+ u64 cfg_space_offset; /* offset in the file */
+ u64 mmio_size;
+ u64 mmio_offset; /* offset in the file */
+ unsigned char data[1];
+};
+
+#define RD(offset) (readl(mmio + offset.reg))
+#define WR(v, offset) (writel(v, mmio + offset.reg))
+
+static void bdw_forcewake_get(void __iomem *mmio)
+{
+ WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
+
+ RD(ECOBUS);
+
+ if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
+ gvt_err("fail to wait forcewake idle\n");
+
+ WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
+
+ if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
+ gvt_err("fail to wait forcewake ack\n");
+
+ if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
+ gvt_err("fail to wait c0 wake up\n");
+}
+
+#undef RD
+#undef WR
+
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
+static ssize_t
+gvt_firmware_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ memcpy(buf, attr->private + offset, count);
+ return count;
+}
+
+static struct bin_attribute firmware_attr = {
+ .attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
+ .read = gvt_firmware_read,
+ .write = NULL,
+ .mmap = NULL,
+};
+
+static int expose_firmware_sysfs(struct intel_gvt *gvt,
+ void __iomem *mmio)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct intel_gvt_mmio_info *e;
+ struct gvt_firmware_header *h;
+ void *firmware;
+ void *p;
+ unsigned long size;
+ int i;
+ int ret;
+
+ size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+ firmware = vmalloc(size);
+ if (!firmware)
+ return -ENOMEM;
+
+ h = firmware;
+
+ h->magic = VGT_MAGIC;
+ h->version = FIRMWARE_VERSION;
+ h->cfg_space_size = info->cfg_space_size;
+ h->cfg_space_offset = offsetof(struct gvt_firmware_header, data);
+ h->mmio_size = info->mmio_size;
+ h->mmio_offset = h->cfg_space_offset + h->cfg_space_size;
+
+ p = firmware + h->cfg_space_offset;
+
+ for (i = 0; i < h->cfg_space_size; i += 4)
+ pci_read_config_dword(pdev, i, p + i);
+
+ memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
+
+ p = firmware + h->mmio_offset;
+
+ hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+ int j;
+
+ for (j = 0; j < e->length; j += 4)
+ *(u32 *)(p + e->offset + j) =
+ readl(mmio + e->offset + j);
+ }
+
+ memcpy(gvt->firmware.mmio, p, info->mmio_size);
+
+ firmware_attr.size = size;
+ firmware_attr.private = firmware;
+
+ ret = device_create_bin_file(&pdev->dev, &firmware_attr);
+ if (ret) {
+ vfree(firmware);
+ return ret;
+ }
+ return 0;
+}
+
+static void clean_firmware_sysfs(struct intel_gvt *gvt)
+{
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+
+ device_remove_bin_file(&pdev->dev, &firmware_attr);
+ vfree(firmware_attr.private);
+}
+
+/**
+ * intel_gvt_free_firmware - free GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+void intel_gvt_free_firmware(struct intel_gvt *gvt)
+{
+ if (!gvt->firmware.firmware_loaded)
+ clean_firmware_sysfs(gvt);
+
+ kfree(gvt->firmware.cfg_space);
+ kfree(gvt->firmware.mmio);
+}
+
+static int verify_firmware(struct intel_gvt *gvt,
+ const struct firmware *fw)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct gvt_firmware_header *h;
+ unsigned long id, crc32_start;
+ const void *mem;
+ const char *item;
+ u64 file, request;
+
+ h = (struct gvt_firmware_header *)fw->data;
+
+ crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ mem = fw->data + crc32_start;
+
+#define VERIFY(s, a, b) do { \
+ item = (s); file = (u64)(a); request = (u64)(b); \
+ if ((a) != (b)) \
+ goto invalid_firmware; \
+} while (0)
+
+ VERIFY("magic number", h->magic, VGT_MAGIC);
+ VERIFY("version", h->version, FIRMWARE_VERSION);
+ VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start));
+ VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size);
+ VERIFY("mmio size", h->mmio_size, info->mmio_size);
+
+ mem = (fw->data + h->cfg_space_offset);
+
+ id = *(u16 *)(mem + PCI_VENDOR_ID);
+ VERIFY("vender id", id, pdev->vendor);
+
+ id = *(u16 *)(mem + PCI_DEVICE_ID);
+ VERIFY("device id", id, pdev->device);
+
+ id = *(u8 *)(mem + PCI_REVISION_ID);
+ VERIFY("revision id", id, pdev->revision);
+
+#undef VERIFY
+ return 0;
+
+invalid_firmware:
+ gvt_dbg_core("Invalid firmware: %s [file] 0x%llx [request] 0x%llx\n",
+ item, file, request);
+ return -EINVAL;
+}
+
+#define GVT_FIRMWARE_PATH "i915/gvt"
+
+/**
+ * intel_gvt_load_firmware - load GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+int intel_gvt_load_firmware(struct intel_gvt *gvt)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct intel_gvt_firmware *firmware = &gvt->firmware;
+ struct gvt_firmware_header *h;
+ const struct firmware *fw;
+ char *path;
+ void __iomem *mmio;
+ void *mem;
+ int ret;
+
+ path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ mem = kmalloc(info->cfg_space_size, GFP_KERNEL);
+ if (!mem) {
+ kfree(path);
+ return -ENOMEM;
+ }
+
+ firmware->cfg_space = mem;
+
+ mem = kmalloc(info->mmio_size, GFP_KERNEL);
+ if (!mem) {
+ kfree(path);
+ kfree(firmware->cfg_space);
+ return -ENOMEM;
+ }
+
+ firmware->mmio = mem;
+
+ mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
+ if (!mmio) {
+ kfree(path);
+ kfree(firmware->cfg_space);
+ kfree(firmware->mmio);
+ return -EINVAL;
+ }
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
+ bdw_forcewake_get(mmio);
+
+ sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+ GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
+ pdev->revision);
+
+ gvt_dbg_core("request hw state firmware %s...\n", path);
+
+ ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+ kfree(path);
+
+ if (ret)
+ goto expose_firmware;
+
+ gvt_dbg_core("success.\n");
+
+ ret = verify_firmware(gvt, fw);
+ if (ret)
+ goto out_free_fw;
+
+ gvt_dbg_core("verified.\n");
+
+ h = (struct gvt_firmware_header *)fw->data;
+
+ memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset,
+ h->cfg_space_size);
+ memcpy(firmware->mmio, fw->data + h->mmio_offset,
+ h->mmio_size);
+
+ release_firmware(fw);
+ firmware->firmware_loaded = true;
+ pci_iounmap(pdev, mmio);
+ return 0;
+
+out_free_fw:
+ release_firmware(fw);
+expose_firmware:
+ expose_firmware_sysfs(gvt, mmio);
+ pci_iounmap(pdev, mmio);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
new file mode 100644
index 000000000000..7eaaf1c9ed2b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -0,0 +1,2244 @@
+/*
+ * GTT virtualization
+ *
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+static bool enable_out_of_sync = false;
+static int preallocated_oos_pages = 8192;
+
+/*
+ * validate a gm address and related range size,
+ * translate it to host gm address
+ */
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
+{
+ if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
+ && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
+ gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
+ vgpu->id, addr, size);
+ return false;
+ }
+ return true;
+}
+
+/* translate a guest gmadr to host gmadr */
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
+{
+ if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
+ "invalid guest gmadr %llx\n", g_addr))
+ return -EACCES;
+
+ if (vgpu_gmadr_is_aperture(vgpu, g_addr))
+ *h_addr = vgpu_aperture_gmadr_base(vgpu)
+ + (g_addr - vgpu_aperture_offset(vgpu));
+ else
+ *h_addr = vgpu_hidden_gmadr_base(vgpu)
+ + (g_addr - vgpu_hidden_offset(vgpu));
+ return 0;
+}
+
+/* translate a host gmadr to guest gmadr */
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
+{
+ if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+ "invalid host gmadr %llx\n", h_addr))
+ return -EACCES;
+
+ if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
+ *g_addr = vgpu_aperture_gmadr_base(vgpu)
+ + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
+ else
+ *g_addr = vgpu_hidden_gmadr_base(vgpu)
+ + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
+ return 0;
+}
+
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+ unsigned long *h_index)
+{
+ u64 h_addr;
+ int ret;
+
+ ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
+ &h_addr);
+ if (ret)
+ return ret;
+
+ *h_index = h_addr >> GTT_PAGE_SHIFT;
+ return 0;
+}
+
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+ unsigned long *g_index)
+{
+ u64 g_addr;
+ int ret;
+
+ ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
+ &g_addr);
+ if (ret)
+ return ret;
+
+ *g_index = g_addr >> GTT_PAGE_SHIFT;
+ return 0;
+}
+
+#define gtt_type_is_entry(type) \
+ (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
+ && type != GTT_TYPE_PPGTT_PTE_ENTRY \
+ && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_type_is_pt(type) \
+ (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
+
+#define gtt_type_is_pte_pt(type) \
+ (type == GTT_TYPE_PPGTT_PTE_PT)
+
+#define gtt_type_is_root_pointer(type) \
+ (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_init_entry(e, t, p, v) do { \
+ (e)->type = t; \
+ (e)->pdev = p; \
+ memcpy(&(e)->val64, &v, sizeof(v)); \
+} while (0)
+
+/*
+ * Mappings between GTT_TYPE* enumerations.
+ * Following information can be found according to the given type:
+ * - type of next level page table
+ * - type of entry inside this level page table
+ * - type of entry with PSE set
+ *
+ * If the given type doesn't have such a kind of information,
+ * e.g. give a l4 root entry type, then request to get its PSE type,
+ * give a PTE page table type, then request to get its next level page
+ * table type, as we know l4 root entry doesn't have a PSE bit,
+ * and a PTE page table doesn't have a next level page table type,
+ * GTT_TYPE_INVALID will be returned. This is useful when traversing a
+ * page table.
+ */
+
+struct gtt_type_table_entry {
+ int entry_type;
+ int next_pt_type;
+ int pse_entry_type;
+};
+
+#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
+ [type] = { \
+ .entry_type = e_type, \
+ .next_pt_type = npt_type, \
+ .pse_entry_type = pse_type, \
+ }
+
+static struct gtt_type_table_entry gtt_type_table[] = {
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
+ GTT_TYPE_GGTT_PTE,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+};
+
+static inline int get_next_pt_type(int type)
+{
+ return gtt_type_table[type].next_pt_type;
+}
+
+static inline int get_entry_type(int type)
+{
+ return gtt_type_table[type].entry_type;
+}
+
+static inline int get_pse_type(int type)
+{
+ return gtt_type_table[type].pse_entry_type;
+}
+
+static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+{
+ void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ u64 pte;
+
+#ifdef readq
+ pte = readq(addr);
+#else
+ pte = ioread32(addr);
+ pte |= (u64)ioread32(addr + 4) << 32;
+#endif
+ return pte;
+}
+
+static void write_pte64(struct drm_i915_private *dev_priv,
+ unsigned long index, u64 pte)
+{
+ void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+
+#ifdef writeq
+ writeq(pte, addr);
+#else
+ iowrite32((u32)pte, addr);
+ iowrite32(pte >> 32, addr + 4);
+#endif
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (WARN_ON(info->gtt_entry_size != 8))
+ return e;
+
+ if (hypervisor_access) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
+ (index << info->gtt_entry_size_shift),
+ &e->val64, 8);
+ WARN_ON(ret);
+ } else if (!pt) {
+ e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+ } else {
+ e->val64 = *((u64 *)pt + index);
+ }
+ return e;
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (WARN_ON(info->gtt_entry_size != 8))
+ return e;
+
+ if (hypervisor_access) {
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
+ (index << info->gtt_entry_size_shift),
+ &e->val64, 8);
+ WARN_ON(ret);
+ } else if (!pt) {
+ write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+ } else {
+ *((u64 *)pt + index) = e->val64;
+ }
+ return e;
+}
+
+#define GTT_HAW 46
+
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+
+static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
+{
+ unsigned long pfn;
+
+ if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
+ pfn = (e->val64 & ADDR_1G_MASK) >> 12;
+ else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
+ pfn = (e->val64 & ADDR_2M_MASK) >> 12;
+ else
+ pfn = (e->val64 & ADDR_4K_MASK) >> 12;
+ return pfn;
+}
+
+static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
+{
+ if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+ e->val64 &= ~ADDR_1G_MASK;
+ pfn &= (ADDR_1G_MASK >> 12);
+ } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
+ e->val64 &= ~ADDR_2M_MASK;
+ pfn &= (ADDR_2M_MASK >> 12);
+ } else {
+ e->val64 &= ~ADDR_4K_MASK;
+ pfn &= (ADDR_4K_MASK >> 12);
+ }
+
+ e->val64 |= (pfn << 12);
+}
+
+static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
+{
+ /* Entry doesn't have PSE bit. */
+ if (get_pse_type(e->type) == GTT_TYPE_INVALID)
+ return false;
+
+ e->type = get_entry_type(e->type);
+ if (!(e->val64 & (1 << 7)))
+ return false;
+
+ e->type = get_pse_type(e->type);
+ return true;
+}
+
+static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
+{
+ /*
+ * i915 writes PDP root pointer registers without present bit,
+ * it also works, so we need to treat root pointer entry
+ * specifically.
+ */
+ if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+ || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
+ return (e->val64 != 0);
+ else
+ return (e->val64 & (1 << 0));
+}
+
+static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 &= ~(1 << 0);
+}
+
+/*
+ * Per-platform GMA routines.
+ */
+static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
+{
+ unsigned long x = (gma >> GTT_PAGE_SHIFT);
+
+ trace_gma_index(__func__, gma, x);
+ return x;
+}
+
+#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
+static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
+{ \
+ unsigned long x = (exp); \
+ trace_gma_index(__func__, gma, x); \
+ return x; \
+}
+
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
+
+static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
+ .get_entry = gtt_get_entry64,
+ .set_entry = gtt_set_entry64,
+ .clear_present = gtt_entry_clear_present,
+ .test_present = gen8_gtt_test_present,
+ .test_pse = gen8_gtt_test_pse,
+ .get_pfn = gen8_gtt_get_pfn,
+ .set_pfn = gen8_gtt_set_pfn,
+};
+
+static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
+ .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
+ .gma_to_pte_index = gen8_gma_to_pte_index,
+ .gma_to_pde_index = gen8_gma_to_pde_index,
+ .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
+ .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
+ .gma_to_pml4_index = gen8_gma_to_pml4_index,
+};
+
+static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
+ struct intel_gvt_gtt_entry *m)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long gfn, mfn;
+
+ *m = *p;
+
+ if (!ops->test_present(p))
+ return 0;
+
+ gfn = ops->get_pfn(p);
+
+ mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate gfn: 0x%lx\n", gfn);
+ return -ENXIO;
+ }
+
+ ops->set_pfn(m, mfn);
+ return 0;
+}
+
+/*
+ * MM helpers.
+ */
+struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index)
+{
+ struct intel_gvt *gvt = mm->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ e->type = mm->page_table_entry_type;
+
+ ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
+ ops->test_pse(e);
+ return e;
+}
+
+struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index)
+{
+ struct intel_gvt *gvt = mm->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
+}
+
+/*
+ * PPGTT shadow page table helpers.
+ */
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
+ struct intel_vgpu_ppgtt_spt *spt,
+ void *page_table, int type,
+ struct intel_gvt_gtt_entry *e, unsigned long index,
+ bool guest)
+{
+ struct intel_gvt *gvt = spt->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ e->type = get_entry_type(type);
+
+ if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+ return e;
+
+ ops->get_entry(page_table, e, index, guest,
+ spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->vgpu);
+ ops->test_pse(e);
+ return e;
+}
+
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
+ struct intel_vgpu_ppgtt_spt *spt,
+ void *page_table, int type,
+ struct intel_gvt_gtt_entry *e, unsigned long index,
+ bool guest)
+{
+ struct intel_gvt *gvt = spt->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+ return e;
+
+ return ops->set_entry(page_table, e, index, guest,
+ spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->vgpu);
+}
+
+#define ppgtt_get_guest_entry(spt, e, index) \
+ ppgtt_spt_get_entry(spt, NULL, \
+ spt->guest_page_type, e, index, true)
+
+#define ppgtt_set_guest_entry(spt, e, index) \
+ ppgtt_spt_set_entry(spt, NULL, \
+ spt->guest_page_type, e, index, true)
+
+#define ppgtt_get_shadow_entry(spt, e, index) \
+ ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
+ spt->shadow_page.type, e, index, false)
+
+#define ppgtt_set_shadow_entry(spt, e, index) \
+ ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
+ spt->shadow_page.type, e, index, false)
+
+/**
+ * intel_vgpu_init_guest_page - init a guest page data structure
+ * @vgpu: a vGPU
+ * @p: a guest page data structure
+ * @gfn: guest memory page frame number
+ * @handler: function will be called when target guest memory page has
+ * been modified.
+ *
+ * This function is called when user wants to track a guest memory page.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p,
+ unsigned long gfn,
+ int (*handler)(void *, u64, void *, int),
+ void *data)
+{
+ INIT_HLIST_NODE(&p->node);
+
+ p->writeprotection = false;
+ p->gfn = gfn;
+ p->handler = handler;
+ p->data = data;
+ p->oos_page = NULL;
+ p->write_cnt = 0;
+
+ hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
+ return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page);
+
+/**
+ * intel_vgpu_clean_guest_page - release the resource owned by guest page data
+ * structure
+ * @vgpu: a vGPU
+ * @p: a tracked guest page
+ *
+ * This function is called when user tries to stop tracking a guest memory
+ * page.
+ */
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ if (!hlist_unhashed(&p->node))
+ hash_del(&p->node);
+
+ if (p->oos_page)
+ detach_oos_page(vgpu, p->oos_page);
+
+ if (p->writeprotection)
+ intel_gvt_hypervisor_unset_wp_page(vgpu, p);
+}
+
+/**
+ * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
+ * @vgpu: a vGPU
+ * @gfn: guest memory page frame number
+ *
+ * This function is called when emulation logic wants to know if a trapped GFN
+ * is a tracked guest page.
+ *
+ * Returns:
+ * Pointer to guest page data structure, NULL if failed.
+ */
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_guest_page *p;
+
+ hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
+ p, node, gfn) {
+ if (p->gfn == gfn)
+ return p;
+ }
+ return NULL;
+}
+
+static inline int init_shadow_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_shadow_page *p, int type)
+{
+ p->vaddr = page_address(p->page);
+ p->type = type;
+
+ INIT_HLIST_NODE(&p->node);
+
+ p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
+ if (p->mfn == INTEL_GVT_INVALID_ADDR)
+ return -EFAULT;
+
+ hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
+ return 0;
+}
+
+static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
+{
+ if (!hlist_unhashed(&p->node))
+ hash_del(&p->node);
+}
+
+static inline struct intel_vgpu_shadow_page *find_shadow_page(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ struct intel_vgpu_shadow_page *p;
+
+ hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
+ p, node, mfn) {
+ if (p->mfn == mfn)
+ return p;
+ }
+ return NULL;
+}
+
+#define guest_page_to_ppgtt_spt(ptr) \
+ container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
+
+#define shadow_page_to_ppgtt_spt(ptr) \
+ container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
+
+static void *alloc_spt(gfp_t gfp_mask)
+{
+ struct intel_vgpu_ppgtt_spt *spt;
+
+ spt = kzalloc(sizeof(*spt), gfp_mask);
+ if (!spt)
+ return NULL;
+
+ spt->shadow_page.page = alloc_page(gfp_mask);
+ if (!spt->shadow_page.page) {
+ kfree(spt);
+ return NULL;
+ }
+ return spt;
+}
+
+static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+ __free_page(spt->shadow_page.page);
+ kfree(spt);
+}
+
+static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
+
+ clean_shadow_page(&spt->shadow_page);
+ intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
+ list_del_init(&spt->post_shadow_list);
+
+ free_spt(spt);
+}
+
+static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
+{
+ struct hlist_node *n;
+ struct intel_vgpu_shadow_page *sp;
+ int i;
+
+ hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
+ ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ u64 pa, void *p_data, int bytes);
+
+static int ppgtt_write_protection_handler(void *gp, u64 pa,
+ void *p_data, int bytes)
+{
+ struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ if (!gpt->writeprotection)
+ return -EINVAL;
+
+ ret = ppgtt_handle_guest_write_page_table_bytes(gp,
+ pa, p_data, bytes);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
+ struct intel_vgpu *vgpu, int type, unsigned long gfn)
+{
+ struct intel_vgpu_ppgtt_spt *spt = NULL;
+ int ret;
+
+retry:
+ spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
+ if (!spt) {
+ if (reclaim_one_mm(vgpu->gvt))
+ goto retry;
+
+ gvt_err("fail to allocate ppgtt shadow page\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spt->vgpu = vgpu;
+ spt->guest_page_type = type;
+ atomic_set(&spt->refcount, 1);
+ INIT_LIST_HEAD(&spt->post_shadow_list);
+
+ /*
+ * TODO: guest page type may be different with shadow page type,
+ * when we support PSE page in future.
+ */
+ ret = init_shadow_page(vgpu, &spt->shadow_page, type);
+ if (ret) {
+ gvt_err("fail to initialize shadow page for spt\n");
+ goto err;
+ }
+
+ ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
+ gfn, ppgtt_write_protection_handler, NULL);
+ if (ret) {
+ gvt_err("fail to initialize guest page for spt\n");
+ goto err;
+ }
+
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+ return spt;
+err:
+ ppgtt_free_shadow_page(spt);
+ return ERR_PTR(ret);
+}
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
+
+ if (p)
+ return shadow_page_to_ppgtt_spt(p);
+
+ gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
+ vgpu->id, mfn);
+ return NULL;
+}
+
+#define pt_entry_size_shift(spt) \
+ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
+
+#define pt_entries(spt) \
+ (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
+
+#define for_each_present_guest_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); i++) \
+ if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+ ppgtt_get_guest_entry(spt, e, i)))
+
+#define for_each_present_shadow_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); i++) \
+ if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+ ppgtt_get_shadow_entry(spt, e, i)))
+
+static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
+
+ atomic_inc(&spt->refcount);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *e)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s;
+ intel_gvt_gtt_type_t cur_pt_type;
+
+ if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
+ return -EINVAL;
+
+ if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+ && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ cur_pt_type = get_next_pt_type(e->type) + 1;
+ if (ops->get_pfn(e) ==
+ vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
+ return 0;
+ }
+ s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ if (!s) {
+ gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
+ vgpu->id, ops->get_pfn(e));
+ return -ENXIO;
+ }
+ return ppgtt_invalidate_shadow_page(s);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ struct intel_gvt_gtt_entry e;
+ unsigned long index;
+ int ret;
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_change(spt->vgpu->id, "die", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+
+ trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+
+ if (atomic_dec_return(&spt->refcount) > 0)
+ return 0;
+
+ if (gtt_type_is_pte_pt(spt->shadow_page.type))
+ goto release;
+
+ for_each_present_shadow_entry(spt, &e, index) {
+ if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
+ gvt_err("GVT doesn't support pse bit for now\n");
+ return -EINVAL;
+ }
+ ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
+ spt->vgpu, &e);
+ if (ret)
+ goto fail;
+ }
+release:
+ trace_spt_change(spt->vgpu->id, "release", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+ ppgtt_free_shadow_page(spt);
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
+ spt->vgpu->id, spt, e.val64, e.type);
+ return ret;
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
+ struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s = NULL;
+ struct intel_vgpu_guest_page *g;
+ int ret;
+
+ if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
+ if (g) {
+ s = guest_page_to_ppgtt_spt(g);
+ ppgtt_get_shadow_page(s);
+ } else {
+ int type = get_next_pt_type(we->type);
+
+ s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+
+ ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
+ if (ret)
+ goto fail;
+
+ ret = ppgtt_populate_shadow_page(s);
+ if (ret)
+ goto fail;
+
+ trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
+ s->shadow_page.type);
+ }
+ return s;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, s, we->val64, we->type);
+ return ERR_PTR(ret);
+}
+
+static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
+ struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
+{
+ struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
+
+ se->type = ge->type;
+ se->val64 = ge->val64;
+
+ ops->set_pfn(se, s->shadow_page.mfn);
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_vgpu_ppgtt_spt *s;
+ struct intel_gvt_gtt_entry se, ge;
+ unsigned long i;
+ int ret;
+
+ trace_spt_change(spt->vgpu->id, "born", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+
+ if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
+ for_each_present_guest_entry(spt, &ge, i) {
+ ret = gtt_entry_p2m(vgpu, &ge, &se);
+ if (ret)
+ goto fail;
+ ppgtt_set_shadow_entry(spt, &se, i);
+ }
+ return 0;
+ }
+
+ for_each_present_guest_entry(spt, &ge, i) {
+ if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
+ gvt_err("GVT doesn't support pse bit now\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &se, i);
+ ppgtt_generate_shadow_entry(&se, s, &ge);
+ ppgtt_set_shadow_entry(spt, &se, i);
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, spt, ge.val64, ge.type);
+ return ret;
+}
+
+static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
+ unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry e;
+ int ret;
+
+ ppgtt_get_shadow_entry(spt, &e, index);
+
+ trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
+ index);
+
+ if (!ops->test_present(&e))
+ return 0;
+
+ if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
+ return 0;
+
+ if (gtt_type_is_pt(get_next_pt_type(e.type))) {
+ struct intel_vgpu_ppgtt_spt *s =
+ ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
+ if (!s) {
+ gvt_err("fail to find guest page\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+ ret = ppgtt_invalidate_shadow_page(s);
+ if (ret)
+ goto fail;
+ }
+ ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &e, index);
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, spt, e.val64, e.type);
+ return ret;
+}
+
+static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
+ struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_entry m;
+ struct intel_vgpu_ppgtt_spt *s;
+ int ret;
+
+ trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
+ we->val64, index);
+
+ if (gtt_type_is_pt(get_next_pt_type(we->type))) {
+ s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &m, index);
+ ppgtt_generate_shadow_entry(&m, s, we);
+ ppgtt_set_shadow_entry(spt, &m, index);
+ } else {
+ ret = gtt_entry_p2m(vgpu, we, &m);
+ if (ret)
+ goto fail;
+ ppgtt_set_shadow_entry(spt, &m, index);
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
+ spt, we->val64, we->type);
+ return ret;
+}
+
+static int sync_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *spt =
+ guest_page_to_ppgtt_spt(oos_page->guest_page);
+ struct intel_gvt_gtt_entry old, new, m;
+ int index;
+ int ret;
+
+ trace_oos_change(vgpu->id, "sync", oos_page->id,
+ oos_page->guest_page, spt->guest_page_type);
+
+ old.type = new.type = get_entry_type(spt->guest_page_type);
+ old.val64 = new.val64 = 0;
+
+ for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
+ index++) {
+ ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
+ ops->get_entry(NULL, &new, index, true,
+ oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
+
+ if (old.val64 == new.val64
+ && !test_and_clear_bit(index, spt->post_shadow_bitmap))
+ continue;
+
+ trace_oos_sync(vgpu->id, oos_page->id,
+ oos_page->guest_page, spt->guest_page_type,
+ new.val64, index);
+
+ ret = gtt_entry_p2m(vgpu, &new, &m);
+ if (ret)
+ return ret;
+
+ ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
+ ppgtt_set_shadow_entry(spt, &m, index);
+ }
+
+ oos_page->guest_page->write_cnt = 0;
+ list_del_init(&spt->post_shadow_list);
+ return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_vgpu_ppgtt_spt *spt =
+ guest_page_to_ppgtt_spt(oos_page->guest_page);
+
+ trace_oos_change(vgpu->id, "detach", oos_page->id,
+ oos_page->guest_page, spt->guest_page_type);
+
+ oos_page->guest_page->write_cnt = 0;
+ oos_page->guest_page->oos_page = NULL;
+ oos_page->guest_page = NULL;
+
+ list_del_init(&oos_page->vm_list);
+ list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
+
+ return 0;
+}
+
+static int attach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ret;
+
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
+ oos_page->mem, GTT_PAGE_SIZE);
+ if (ret)
+ return ret;
+
+ oos_page->guest_page = gpt;
+ gpt->oos_page = oos_page;
+
+ list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
+
+ trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ return 0;
+}
+
+static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ int ret;
+
+ ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
+ if (ret)
+ return ret;
+
+ trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+ list_del_init(&gpt->oos_page->vm_list);
+ return sync_oos_page(vgpu, gpt->oos_page);
+}
+
+static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+ int ret;
+
+ WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
+
+ if (list_empty(&gtt->oos_page_free_list_head)) {
+ oos_page = container_of(gtt->oos_page_use_list_head.next,
+ struct intel_vgpu_oos_page, list);
+ ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ if (ret)
+ return ret;
+ ret = detach_oos_page(vgpu, oos_page);
+ if (ret)
+ return ret;
+ } else
+ oos_page = container_of(gtt->oos_page_free_list_head.next,
+ struct intel_vgpu_oos_page, list);
+ return attach_oos_page(vgpu, oos_page, gpt);
+}
+
+static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+
+ if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
+ return -EINVAL;
+
+ trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+ list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
+ return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
+}
+
+/**
+ * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to sync all the out-of-synced shadow for vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_oos_page *oos_page;
+ int ret;
+
+ if (!enable_out_of_sync)
+ return 0;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
+ oos_page = container_of(pos,
+ struct intel_vgpu_oos_page, vm_list);
+ ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * The heart of PPGTT shadow page table.
+ */
+static int ppgtt_handle_guest_write_page_table(
+ struct intel_vgpu_guest_page *gpt,
+ struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+
+ int ret;
+ int new_present;
+
+ new_present = ops->test_present(we);
+
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ if (ret)
+ goto fail;
+
+ if (new_present) {
+ ret = ppgtt_handle_guest_entry_add(gpt, we, index);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
+ vgpu->id, spt, we->val64, we->type);
+ return ret;
+}
+
+static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
+{
+ return enable_out_of_sync
+ && gtt_type_is_pte_pt(
+ guest_page_to_ppgtt_spt(gpt)->guest_page_type)
+ && gpt->write_cnt >= 2;
+}
+
+static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
+ unsigned long index)
+{
+ set_bit(index, spt->post_shadow_bitmap);
+ if (!list_empty(&spt->post_shadow_list))
+ return;
+
+ list_add_tail(&spt->post_shadow_list,
+ &spt->vgpu->gtt.post_shadow_list_head);
+}
+
+/**
+ * intel_vgpu_flush_post_shadow - flush the post shadow transactions
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to flush all the post shadows for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_gvt_gtt_entry ge;
+ unsigned long index;
+ int ret;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
+ spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
+ post_shadow_list);
+
+ for_each_set_bit(index, spt->post_shadow_bitmap,
+ GTT_ENTRY_NUM_IN_ONE_PAGE) {
+ ppgtt_get_guest_entry(spt, &ge, index);
+
+ ret = ppgtt_handle_guest_write_page_table(
+ &spt->guest_page, &ge, index);
+ if (ret)
+ return ret;
+ clear_bit(index, spt->post_shadow_bitmap);
+ }
+ list_del_init(&spt->post_shadow_list);
+ }
+ return 0;
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ u64 pa, void *p_data, int bytes)
+{
+ struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt_gtt_entry we;
+ unsigned long index;
+ int ret;
+
+ index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
+
+ ppgtt_get_guest_entry(spt, &we, index);
+
+ ops->test_pse(&we);
+
+ if (bytes == info->gtt_entry_size) {
+ ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
+ if (ret)
+ return ret;
+ } else {
+ if (!test_bit(index, spt->post_shadow_bitmap)) {
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ if (ret)
+ return ret;
+ }
+
+ ppgtt_set_post_shadow(spt, index);
+ }
+
+ if (!enable_out_of_sync)
+ return 0;
+
+ gpt->write_cnt++;
+
+ if (gpt->oos_page)
+ ops->set_entry(gpt->oos_page->mem, &we, index,
+ false, 0, vgpu);
+
+ if (can_do_out_of_sync(gpt)) {
+ if (!gpt->oos_page)
+ ppgtt_allocate_oos_page(vgpu, gpt);
+
+ ret = ppgtt_set_guest_page_oos(vgpu, gpt);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * mm page table allocation policy for bdw+
+ * - for ggtt, only virtual page table will be allocated.
+ * - for ppgtt, dedicated virtual/shadow page table will be allocated.
+ */
+static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ void *mem;
+
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ mm->page_table_entry_cnt = 4;
+ mm->page_table_entry_size = mm->page_table_entry_cnt *
+ info->gtt_entry_size;
+ mem = kzalloc(mm->has_shadow_page_table ?
+ mm->page_table_entry_size * 2
+ : mm->page_table_entry_size,
+ GFP_ATOMIC);
+ if (!mem)
+ return -ENOMEM;
+ mm->virtual_page_table = mem;
+ if (!mm->has_shadow_page_table)
+ return 0;
+ mm->shadow_page_table = mem + mm->page_table_entry_size;
+ } else if (mm->type == INTEL_GVT_MM_GGTT) {
+ mm->page_table_entry_cnt =
+ (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
+ mm->page_table_entry_size = mm->page_table_entry_cnt *
+ info->gtt_entry_size;
+ mem = vzalloc(mm->page_table_entry_size);
+ if (!mem)
+ return -ENOMEM;
+ mm->virtual_page_table = mem;
+ }
+ return 0;
+}
+
+static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
+{
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ kfree(mm->virtual_page_table);
+ } else if (mm->type == INTEL_GVT_MM_GGTT) {
+ if (mm->virtual_page_table)
+ vfree(mm->virtual_page_table);
+ }
+ mm->virtual_page_table = mm->shadow_page_table = NULL;
+}
+
+static void invalidate_mm(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+ struct intel_gvt_gtt_entry se;
+ int i;
+
+ if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
+ return;
+
+ for (i = 0; i < mm->page_table_entry_cnt; i++) {
+ ppgtt_get_shadow_root_entry(mm, &se, i);
+ if (!ops->test_present(&se))
+ continue;
+ ppgtt_invalidate_shadow_page_by_shadow_entry(
+ vgpu, &se);
+ se.val64 = 0;
+ ppgtt_set_shadow_root_entry(mm, &se, i);
+
+ trace_gpt_change(vgpu->id, "destroy root pointer",
+ NULL, se.type, se.val64, i);
+ }
+ mm->shadowed = false;
+}
+
+/**
+ * intel_vgpu_destroy_mm - destroy a mm object
+ * @mm: a kref object
+ *
+ * This function is used to destroy a mm object for vGPU
+ *
+ */
+void intel_vgpu_destroy_mm(struct kref *mm_ref)
+{
+ struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+
+ if (!mm->initialized)
+ goto out;
+
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+
+ if (mm->has_shadow_page_table)
+ invalidate_mm(mm);
+
+ gtt->mm_free_page_table(mm);
+out:
+ kfree(mm);
+}
+
+static int shadow_mm(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_gvt_gtt_entry ge, se;
+ int i;
+ int ret;
+
+ if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
+ return 0;
+
+ mm->shadowed = true;
+
+ for (i = 0; i < mm->page_table_entry_cnt; i++) {
+ ppgtt_get_guest_root_entry(mm, &ge, i);
+ if (!ops->test_present(&ge))
+ continue;
+
+ trace_gpt_change(vgpu->id, __func__, NULL,
+ ge.type, ge.val64, i);
+
+ spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(spt)) {
+ gvt_err("fail to populate guest root pointer\n");
+ ret = PTR_ERR(spt);
+ goto fail;
+ }
+ ppgtt_generate_shadow_entry(&se, spt, &ge);
+ ppgtt_set_shadow_root_entry(mm, &se, i);
+
+ trace_gpt_change(vgpu->id, "populate root pointer",
+ NULL, se.type, se.val64, i);
+ }
+ return 0;
+fail:
+ invalidate_mm(mm);
+ return ret;
+}
+
+/**
+ * intel_vgpu_create_mm - create a mm object for a vGPU
+ * @vgpu: a vGPU
+ * @mm_type: mm object type, should be PPGTT or GGTT
+ * @virtual_page_table: page table root pointers. Could be NULL if user wants
+ * to populate shadow later.
+ * @page_table_level: describe the page table level of the mm object
+ * @pde_base_index: pde root pointer base in GGTT MMIO.
+ *
+ * This function is used to create a mm object for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code in pointer if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+ int mm_type, void *virtual_page_table, int page_table_level,
+ u32 pde_base_index)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_mm *mm;
+ int ret;
+
+ mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+ if (!mm) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mm->type = mm_type;
+
+ if (page_table_level == 1)
+ mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
+ else if (page_table_level == 3)
+ mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+ else if (page_table_level == 4)
+ mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mm->page_table_level = page_table_level;
+ mm->pde_base_index = pde_base_index;
+
+ mm->vgpu = vgpu;
+ mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
+
+ kref_init(&mm->ref);
+ atomic_set(&mm->pincount, 0);
+ INIT_LIST_HEAD(&mm->list);
+ INIT_LIST_HEAD(&mm->lru_list);
+ list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
+
+ ret = gtt->mm_alloc_page_table(mm);
+ if (ret) {
+ gvt_err("fail to allocate page table for mm\n");
+ goto fail;
+ }
+
+ mm->initialized = true;
+
+ if (virtual_page_table)
+ memcpy(mm->virtual_page_table, virtual_page_table,
+ mm->page_table_entry_size);
+
+ if (mm->has_shadow_page_table) {
+ ret = shadow_mm(mm);
+ if (ret)
+ goto fail;
+ list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+ }
+ return mm;
+fail:
+ gvt_err("fail to create mm\n");
+ if (mm)
+ intel_gvt_mm_unreference(mm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
+ * @mm: a vGPU mm object
+ *
+ * This function is called when user doesn't want to use a vGPU mm object
+ */
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
+{
+ if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+ return;
+
+ atomic_dec(&mm->pincount);
+}
+
+/**
+ * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
+ * @vgpu: a vGPU
+ *
+ * This function is called when user wants to use a vGPU mm object. If this
+ * mm object hasn't been shadowed yet, the shadow will be populated at this
+ * time.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
+{
+ int ret;
+
+ if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+ return 0;
+
+ atomic_inc(&mm->pincount);
+
+ if (!mm->shadowed) {
+ ret = shadow_mm(mm);
+ if (ret)
+ return ret;
+ }
+
+ list_del_init(&mm->lru_list);
+ list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
+ return 0;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt)
+{
+ struct intel_vgpu_mm *mm;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, lru_list);
+
+ if (mm->type != INTEL_GVT_MM_PPGTT)
+ continue;
+ if (atomic_read(&mm->pincount))
+ continue;
+
+ list_del_init(&mm->lru_list);
+ invalidate_mm(mm);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * GMA translation APIs.
+ */
+static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s;
+
+ if (WARN_ON(!mm->has_shadow_page_table))
+ return -EINVAL;
+
+ s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ if (!s)
+ return -ENXIO;
+
+ if (!guest)
+ ppgtt_get_shadow_entry(s, e, index);
+ else
+ ppgtt_get_guest_entry(s, e, index);
+ return 0;
+}
+
+/**
+ * intel_vgpu_gma_to_gpa - translate a gma to GPA
+ * @mm: mm object. could be a PPGTT or GGTT mm object
+ * @gma: graphics memory address in this mm object
+ *
+ * This function is used to translate a graphics memory address in specific
+ * graphics memory space to guest physical address.
+ *
+ * Returns:
+ * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
+ unsigned long gpa = INTEL_GVT_INVALID_ADDR;
+ unsigned long gma_index[4];
+ struct intel_gvt_gtt_entry e;
+ int i, index;
+ int ret;
+
+ if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
+ return INTEL_GVT_INVALID_ADDR;
+
+ if (mm->type == INTEL_GVT_MM_GGTT) {
+ if (!vgpu_gmadr_is_valid(vgpu, gma))
+ goto err;
+
+ ggtt_get_guest_entry(mm, &e,
+ gma_ops->gma_to_ggtt_pte_index(gma));
+ gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ + (gma & ~GTT_PAGE_MASK);
+
+ trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
+ return gpa;
+ }
+
+ switch (mm->page_table_level) {
+ case 4:
+ ppgtt_get_shadow_root_entry(mm, &e, 0);
+ gma_index[0] = gma_ops->gma_to_pml4_index(gma);
+ gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
+ gma_index[2] = gma_ops->gma_to_pde_index(gma);
+ gma_index[3] = gma_ops->gma_to_pte_index(gma);
+ index = 4;
+ break;
+ case 3:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_l3_pdp_index(gma));
+ gma_index[0] = gma_ops->gma_to_pde_index(gma);
+ gma_index[1] = gma_ops->gma_to_pte_index(gma);
+ index = 2;
+ break;
+ case 2:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_pde_index(gma));
+ gma_index[0] = gma_ops->gma_to_pte_index(gma);
+ index = 1;
+ break;
+ default:
+ WARN_ON(1);
+ goto err;
+ }
+
+ /* walk into the shadow page table and get gpa from guest entry */
+ for (i = 0; i < index; i++) {
+ ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
+ (i == index - 1));
+ if (ret)
+ goto err;
+ }
+
+ gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ + (gma & ~GTT_PAGE_MASK);
+
+ trace_gma_translate(vgpu->id, "ppgtt", 0,
+ mm->page_table_level, gma, gpa);
+ return gpa;
+err:
+ gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
+ return INTEL_GVT_INVALID_ADDR;
+}
+
+static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ unsigned long index = off >> info->gtt_entry_size_shift;
+ struct intel_gvt_gtt_entry e;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ ggtt_get_guest_entry(ggtt_mm, &e, index);
+ memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
+ bytes);
+ return 0;
+}
+
+/**
+ * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data will be returned to guest
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register read
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ off -= info->gtt_start_offset;
+ ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
+ return ret;
+}
+
+static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
+ unsigned long gma;
+ struct intel_gvt_gtt_entry e, m;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ gma = g_gtt_index << GTT_PAGE_SHIFT;
+
+ /* the VM may configure the whole GM space when ballooning is used */
+ if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
+ "vgpu%d: found oob ggtt write, offset %x\n",
+ vgpu->id, off)) {
+ return 0;
+ }
+
+ ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+ memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
+ bytes);
+
+ if (ops->test_present(&e)) {
+ ret = gtt_entry_p2m(vgpu, &e, &m);
+ if (ret) {
+ gvt_err("vgpu%d: fail to translate guest gtt entry\n",
+ vgpu->id);
+ return ret;
+ }
+ } else {
+ m = e;
+ m.val64 = 0;
+ }
+
+ ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ return 0;
+}
+
+/*
+ * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data from guest write
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register write
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ off -= info->gtt_start_offset;
+ ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
+ return ret;
+}
+
+static int alloc_scratch_pages(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t type)
+{
+ struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ int page_entry_num = GTT_PAGE_SIZE >>
+ vgpu->gvt->device_info.gtt_entry_size_shift;
+ struct page *scratch_pt;
+ unsigned long mfn;
+ int i;
+ void *p;
+
+ if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+ return -EINVAL;
+
+ scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ if (!scratch_pt) {
+ gvt_err("fail to allocate scratch page\n");
+ return -ENOMEM;
+ }
+
+ p = kmap_atomic(scratch_pt);
+ mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
+ kunmap_atomic(p);
+ __free_page(scratch_pt);
+ return -EFAULT;
+ }
+ gtt->scratch_pt[type].page_mfn = mfn;
+ gtt->scratch_pt[type].page = scratch_pt;
+ gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
+ vgpu->id, type, mfn);
+
+ /* Build the tree by full filled the scratch pt with the entries which
+ * point to the next level scratch pt or scratch page. The
+ * scratch_pt[type] indicate the scratch pt/scratch page used by the
+ * 'type' pt.
+ * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
+ * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+ * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
+ */
+ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+ struct intel_gvt_gtt_entry se;
+
+ memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
+ se.type = get_entry_type(type - 1);
+ ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
+
+ /* The entry parameters like present/writeable/cache type
+ * set to the same as i915's scratch page tree.
+ */
+ se.val64 |= _PAGE_PRESENT | _PAGE_RW;
+ if (type == GTT_TYPE_PPGTT_PDE_PT)
+ se.val64 |= PPAT_CACHED_INDEX;
+
+ for (i = 0; i < page_entry_num; i++)
+ ops->set_entry(p, &se, i, false, 0, vgpu);
+ }
+
+ kunmap_atomic(p);
+
+ return 0;
+}
+
+static int release_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ if (vgpu->gtt.scratch_pt[i].page != NULL) {
+ __free_page(vgpu->gtt.scratch_pt[i].page);
+ vgpu->gtt.scratch_pt[i].page = NULL;
+ vgpu->gtt.scratch_pt[i].page_mfn = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int create_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+ int i, ret;
+
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ ret = alloc_scratch_pages(vgpu, i);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ release_scratch_page_tree(vgpu);
+ return ret;
+}
+
+/**
+ * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+ struct intel_vgpu_mm *ggtt_mm;
+
+ hash_init(gtt->guest_page_hash_table);
+ hash_init(gtt->shadow_page_hash_table);
+
+ INIT_LIST_HEAD(&gtt->mm_list_head);
+ INIT_LIST_HEAD(&gtt->oos_page_list_head);
+ INIT_LIST_HEAD(&gtt->post_shadow_list_head);
+
+ ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
+ NULL, 1, 0);
+ if (IS_ERR(ggtt_mm)) {
+ gvt_err("fail to create mm for ggtt.\n");
+ return PTR_ERR(ggtt_mm);
+ }
+
+ gtt->ggtt_mm = ggtt_mm;
+
+ return create_scratch_page_tree(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean up per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ ppgtt_free_all_shadow_page(vgpu);
+ release_scratch_page_tree(vgpu);
+
+ list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ vgpu->gvt->gtt.mm_free_page_table(mm);
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+ kfree(mm);
+ }
+}
+
+static void clean_spt_oos(struct intel_gvt *gvt)
+{
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct list_head *pos, *n;
+ struct intel_vgpu_oos_page *oos_page;
+
+ WARN(!list_empty(&gtt->oos_page_use_list_head),
+ "someone is still using oos page\n");
+
+ list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
+ oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
+ list_del(&oos_page->list);
+ kfree(oos_page);
+ }
+}
+
+static int setup_spt_oos(struct intel_gvt *gvt)
+{
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_oos_page *oos_page;
+ int i;
+ int ret;
+
+ INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
+ INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
+
+ for (i = 0; i < preallocated_oos_pages; i++) {
+ oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
+ if (!oos_page) {
+ gvt_err("fail to pre-allocate oos page\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&oos_page->list);
+ INIT_LIST_HEAD(&oos_page->vm_list);
+ oos_page->id = i;
+ list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
+ }
+
+ gvt_dbg_mm("%d oos pages preallocated\n", i);
+
+ return 0;
+fail:
+ clean_spt_oos(gvt);
+ return ret;
+}
+
+/**
+ * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ * @root_entry: PPGTT page table root pointers
+ *
+ * This function is used to find a PPGTT mm object from mm object pool
+ *
+ * Returns:
+ * pointer to mm object on success, NULL if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry)
+{
+ struct list_head *pos;
+ struct intel_vgpu_mm *mm;
+ u64 *src, *dst;
+
+ list_for_each(pos, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ if (mm->type != INTEL_GVT_MM_PPGTT)
+ continue;
+
+ if (mm->page_table_level != page_table_level)
+ continue;
+
+ src = root_entry;
+ dst = mm->virtual_page_table;
+
+ if (page_table_level == 3) {
+ if (src[0] == dst[0]
+ && src[1] == dst[1]
+ && src[2] == dst[2]
+ && src[3] == dst[3])
+ return mm;
+ } else {
+ if (src[0] == dst[0])
+ return mm;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ struct intel_vgpu_mm *mm;
+
+ if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+ return -EINVAL;
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+ mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_err("fail to create mm\n");
+ return PTR_ERR(mm);
+ }
+ }
+ return 0;
+}
+
+/**
+ * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ struct intel_vgpu_mm *mm;
+
+ if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+ return -EINVAL;
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ if (!mm) {
+ gvt_err("fail to find ppgtt instance.\n");
+ return -EINVAL;
+ }
+ intel_gvt_mm_unreference(mm);
+ return 0;
+}
+
+/**
+ * intel_gvt_init_gtt - initialize mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to initialize
+ * the mm components of a GVT device.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_gvt_init_gtt(struct intel_gvt *gvt)
+{
+ int ret;
+
+ gvt_dbg_core("init gtt\n");
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+ gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
+ gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
+ gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
+ } else {
+ return -ENODEV;
+ }
+
+ if (enable_out_of_sync) {
+ ret = setup_spt_oos(gvt);
+ if (ret) {
+ gvt_err("fail to initialize SPT oos\n");
+ return ret;
+ }
+ }
+ INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
+ return 0;
+}
+
+/**
+ * intel_gvt_clean_gtt - clean up mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the
+ * the mm components of a GVT device.
+ *
+ */
+void intel_gvt_clean_gtt(struct intel_gvt *gvt)
+{
+ if (enable_out_of_sync)
+ clean_spt_oos(gvt);
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
new file mode 100644
index 000000000000..d250013bc37b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef _GVT_GTT_H_
+#define _GVT_GTT_H_
+
+#define GTT_PAGE_SHIFT 12
+#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
+#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
+
+struct intel_vgpu_mm;
+
+#define INTEL_GVT_GTT_HASH_BITS 8
+#define INTEL_GVT_INVALID_ADDR (~0UL)
+
+struct intel_gvt_gtt_entry {
+ u64 val64;
+ int type;
+};
+
+struct intel_gvt_gtt_pte_ops {
+ struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ bool (*test_present)(struct intel_gvt_gtt_entry *e);
+ void (*clear_present)(struct intel_gvt_gtt_entry *e);
+ bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+ void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
+ unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
+};
+
+struct intel_gvt_gtt_gma_ops {
+ unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
+ unsigned long (*gma_to_pte_index)(unsigned long gma);
+ unsigned long (*gma_to_pde_index)(unsigned long gma);
+ unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
+ unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
+ unsigned long (*gma_to_pml4_index)(unsigned long gma);
+};
+
+struct intel_gvt_gtt {
+ struct intel_gvt_gtt_pte_ops *pte_ops;
+ struct intel_gvt_gtt_gma_ops *gma_ops;
+ int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
+ void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
+ struct list_head oos_page_use_list_head;
+ struct list_head oos_page_free_list_head;
+ struct list_head mm_lru_list_head;
+};
+
+enum {
+ INTEL_GVT_MM_GGTT = 0,
+ INTEL_GVT_MM_PPGTT,
+};
+
+typedef enum {
+ GTT_TYPE_INVALID = -1,
+
+ GTT_TYPE_GGTT_PTE,
+
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_ENTRY,
+
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+
+ GTT_TYPE_PPGTT_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PML4_PT,
+
+ GTT_TYPE_MAX,
+} intel_gvt_gtt_type_t;
+
+struct intel_vgpu_mm {
+ int type;
+ bool initialized;
+ bool shadowed;
+
+ int page_table_entry_type;
+ u32 page_table_entry_size;
+ u32 page_table_entry_cnt;
+ void *virtual_page_table;
+ void *shadow_page_table;
+
+ int page_table_level;
+ bool has_shadow_page_table;
+ u32 pde_base_index;
+
+ struct list_head list;
+ struct kref ref;
+ atomic_t pincount;
+ struct list_head lru_list;
+ struct intel_vgpu *vgpu;
+};
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
+ struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index);
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
+ struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index);
+
+#define ggtt_get_guest_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_set_guest_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_get_shadow_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ggtt_set_shadow_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_get_guest_root_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_set_guest_root_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_get_shadow_root_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_set_shadow_root_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+ int mm_type, void *virtual_page_table, int page_table_level,
+ u32 pde_base_index);
+extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
+
+struct intel_vgpu_guest_page;
+
+struct intel_vgpu_scratch_pt {
+ struct page *page;
+ unsigned long page_mfn;
+};
+
+
+struct intel_vgpu_gtt {
+ struct intel_vgpu_mm *ggtt_mm;
+ unsigned long active_ppgtt_mm_bitmap;
+ struct list_head mm_list_head;
+ DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+ DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+ atomic_t n_write_protected_guest_page;
+ struct list_head oos_page_list_head;
+ struct list_head post_shadow_list_head;
+ struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
+
+};
+
+extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
+extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+
+extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+
+extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry);
+
+struct intel_vgpu_oos_page;
+
+struct intel_vgpu_shadow_page {
+ void *vaddr;
+ struct page *page;
+ int type;
+ struct hlist_node node;
+ unsigned long mfn;
+};
+
+struct intel_vgpu_guest_page {
+ struct hlist_node node;
+ bool writeprotection;
+ unsigned long gfn;
+ int (*handler)(void *, u64, void *, int);
+ void *data;
+ unsigned long write_cnt;
+ struct intel_vgpu_oos_page *oos_page;
+};
+
+struct intel_vgpu_oos_page {
+ struct intel_vgpu_guest_page *guest_page;
+ struct list_head list;
+ struct list_head vm_list;
+ int id;
+ unsigned char mem[GTT_PAGE_SIZE];
+};
+
+#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
+
+struct intel_vgpu_ppgtt_spt {
+ struct intel_vgpu_shadow_page shadow_page;
+ struct intel_vgpu_guest_page guest_page;
+ int guest_page_type;
+ atomic_t refcount;
+ struct intel_vgpu *vgpu;
+ DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
+ struct list_head post_shadow_list;
+};
+
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page,
+ unsigned long gfn,
+ int (*handler)(void *gp, u64, void *, int),
+ void *data);
+
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
+
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
+
+static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
+{
+ kref_get(&mm->ref);
+}
+
+static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
+{
+ kref_put(&mm->ref, intel_vgpu_destroy_mm);
+}
+
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
+
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
+
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
+ unsigned long gma);
+
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry);
+
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes);
+
+#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 927f4579f5b6..398877c3d2fd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -19,12 +19,23 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#include <linux/types.h>
#include <xen/xen.h>
+#include <linux/kthread.h>
#include "i915_drv.h"
+#include "gvt.h"
struct intel_gvt_host intel_gvt_host;
@@ -33,6 +44,16 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
+static const struct intel_gvt_ops intel_gvt_ops = {
+ .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
+ .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
+ .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
+ .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
+ .vgpu_create = intel_gvt_create_vgpu,
+ .vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_reset = intel_gvt_reset_vgpu,
+};
+
/**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
* @gvt: intel gvt device
@@ -47,6 +68,8 @@ static const char * const supported_hypervisors[] = {
*/
int intel_gvt_init_host(void)
{
+ int ret;
+
if (intel_gvt_host.initialized)
return 0;
@@ -61,10 +84,12 @@ int intel_gvt_init_host(void)
symbol_get(xengt_mpt), "xengt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
} else {
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
/* not in Xen. Try KVMGT */
intel_gvt_host.mpt = try_then_request_module(
- symbol_get(kvmgt_mpt), "kvm");
+ symbol_get(kvmgt_mpt), "kvmgt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
+#endif
}
/* Fail to load MPT modules - bail out */
@@ -72,7 +97,8 @@ int intel_gvt_init_host(void)
return -EINVAL;
/* Try to detect if we're running in host instead of VM. */
- if (!intel_gvt_hypervisor_detect_host())
+ ret = intel_gvt_hypervisor_detect_host();
+ if (ret)
return -ENODEV;
gvt_dbg_core("Running with hypervisor %s in host mode\n",
@@ -84,9 +110,67 @@ int intel_gvt_init_host(void)
static void init_device_info(struct intel_gvt *gvt)
{
- if (IS_BROADWELL(gvt->dev_priv))
- gvt->device_info.max_support_vgpus = 8;
- /* This function will grow large in GVT device model patches. */
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = 256;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
+ }
+ info->msi_cap_offset = pdev->msi_cap;
+}
+
+static int gvt_service_thread(void *data)
+{
+ struct intel_gvt *gvt = (struct intel_gvt *)data;
+ int ret;
+
+ gvt_dbg_core("service thread start\n");
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(gvt->service_thread_wq,
+ kthread_should_stop() || gvt->service_request);
+
+ if (kthread_should_stop())
+ break;
+
+ if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
+ continue;
+
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
+ (void *)&gvt->service_request)) {
+ mutex_lock(&gvt->lock);
+ intel_gvt_emulate_vblank(gvt);
+ mutex_unlock(&gvt->lock);
+ }
+ }
+
+ return 0;
+}
+
+static void clean_service_thread(struct intel_gvt *gvt)
+{
+ kthread_stop(gvt->service_thread);
+}
+
+static int init_service_thread(struct intel_gvt *gvt)
+{
+ init_waitqueue_head(&gvt->service_thread_wq);
+
+ gvt->service_thread = kthread_run(gvt_service_thread,
+ gvt, "gvt_service_thread");
+ if (IS_ERR(gvt->service_thread)) {
+ gvt_err("fail to start service thread.\n");
+ return PTR_ERR(gvt->service_thread);
+ }
+ return 0;
}
/**
@@ -99,14 +183,26 @@ static void init_device_info(struct intel_gvt *gvt)
*/
void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
{
- struct intel_gvt *gvt = &dev_priv->gvt;
+ struct intel_gvt *gvt = to_gvt(dev_priv);
- if (WARN_ON(!gvt->initialized))
+ if (WARN_ON(!gvt))
return;
- /* Other de-initialization of GVT components will be introduced. */
+ clean_service_thread(gvt);
+ intel_gvt_clean_cmd_parser(gvt);
+ intel_gvt_clean_sched_policy(gvt);
+ intel_gvt_clean_workload_scheduler(gvt);
+ intel_gvt_clean_opregion(gvt);
+ intel_gvt_clean_gtt(gvt);
+ intel_gvt_clean_irq(gvt);
+ intel_gvt_clean_mmio_info(gvt);
+ intel_gvt_free_firmware(gvt);
+
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_clean_vgpu_types(gvt);
- gvt->initialized = false;
+ kfree(dev_priv->gvt);
+ dev_priv->gvt = NULL;
}
/**
@@ -122,7 +218,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
*/
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
- struct intel_gvt *gvt = &dev_priv->gvt;
+ struct intel_gvt *gvt;
+ int ret;
+
/*
* Cannot initialize GVT device without intel_gvt_host gets
* initialized first.
@@ -130,16 +228,91 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!intel_gvt_host.initialized))
return -EINVAL;
- if (WARN_ON(gvt->initialized))
+ if (WARN_ON(dev_priv->gvt))
return -EEXIST;
+ gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
+ if (!gvt)
+ return -ENOMEM;
+
gvt_dbg_core("init gvt device\n");
+ mutex_init(&gvt->lock);
+ gvt->dev_priv = dev_priv;
+
init_device_info(gvt);
- /*
- * Other initialization of GVT components will be introduce here.
- */
- gvt_dbg_core("gvt device creation is done\n");
- gvt->initialized = true;
+
+ ret = intel_gvt_setup_mmio_info(gvt);
+ if (ret)
+ return ret;
+
+ ret = intel_gvt_load_firmware(gvt);
+ if (ret)
+ goto out_clean_mmio_info;
+
+ ret = intel_gvt_init_irq(gvt);
+ if (ret)
+ goto out_free_firmware;
+
+ ret = intel_gvt_init_gtt(gvt);
+ if (ret)
+ goto out_clean_irq;
+
+ ret = intel_gvt_init_opregion(gvt);
+ if (ret)
+ goto out_clean_gtt;
+
+ ret = intel_gvt_init_workload_scheduler(gvt);
+ if (ret)
+ goto out_clean_opregion;
+
+ ret = intel_gvt_init_sched_policy(gvt);
+ if (ret)
+ goto out_clean_workload_scheduler;
+
+ ret = intel_gvt_init_cmd_parser(gvt);
+ if (ret)
+ goto out_clean_sched_policy;
+
+ ret = init_service_thread(gvt);
+ if (ret)
+ goto out_clean_cmd_parser;
+
+ ret = intel_gvt_init_vgpu_types(gvt);
+ if (ret)
+ goto out_clean_thread;
+
+ ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
+ &intel_gvt_ops);
+ if (ret) {
+ gvt_err("failed to register gvt-g host device: %d\n", ret);
+ goto out_clean_types;
+ }
+
+ gvt_dbg_core("gvt device initialization is done\n");
+ dev_priv->gvt = gvt;
return 0;
+
+out_clean_types:
+ intel_gvt_clean_vgpu_types(gvt);
+out_clean_thread:
+ clean_service_thread(gvt);
+out_clean_cmd_parser:
+ intel_gvt_clean_cmd_parser(gvt);
+out_clean_sched_policy:
+ intel_gvt_clean_sched_policy(gvt);
+out_clean_workload_scheduler:
+ intel_gvt_clean_workload_scheduler(gvt);
+out_clean_opregion:
+ intel_gvt_clean_opregion(gvt);
+out_clean_gtt:
+ intel_gvt_clean_gtt(gvt);
+out_clean_irq:
+ intel_gvt_clean_irq(gvt);
+out_free_firmware:
+ intel_gvt_free_firmware(gvt);
+out_clean_mmio_info:
+ intel_gvt_clean_mmio_info(gvt);
+ kfree(gvt);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index fb619a6e519d..b1a7c8dd4b5f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_H_
@@ -26,6 +35,17 @@
#include "debug.h"
#include "hypercall.h"
+#include "mmio.h"
+#include "reg.h"
+#include "interrupt.h"
+#include "gtt.h"
+#include "display.h"
+#include "edid.h"
+#include "execlist.h"
+#include "scheduler.h"
+#include "sched_policy.h"
+#include "render.h"
+#include "cmd_parser.h"
#define GVT_MAX_VGPU 8
@@ -45,25 +65,381 @@ extern struct intel_gvt_host intel_gvt_host;
/* Describe per-platform limitations. */
struct intel_gvt_device_info {
u32 max_support_vgpus;
- /* This data structure will grow bigger in GVT device model patches */
+ u32 cfg_space_size;
+ u32 mmio_size;
+ u32 mmio_bar;
+ unsigned long msi_cap_offset;
+ u32 gtt_start_offset;
+ u32 gtt_entry_size;
+ u32 gtt_entry_size_shift;
+ int gmadr_bytes_in_cmd;
+ u32 max_surface_size;
+};
+
+/* GM resources owned by a vGPU */
+struct intel_vgpu_gm {
+ u64 aperture_sz;
+ u64 hidden_sz;
+ struct drm_mm_node low_gm_node;
+ struct drm_mm_node high_gm_node;
+};
+
+#define INTEL_GVT_MAX_NUM_FENCES 32
+
+/* Fences owned by a vGPU */
+struct intel_vgpu_fence {
+ struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
+ u32 base;
+ u32 size;
+};
+
+struct intel_vgpu_mmio {
+ void *vreg;
+ void *sreg;
+ bool disable_warn_untrack;
+};
+
+#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
+#define INTEL_GVT_MAX_BAR_NUM 4
+
+struct intel_vgpu_pci_bar {
+ u64 size;
+ bool tracked;
+};
+
+struct intel_vgpu_cfg_space {
+ unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
+ struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
+};
+
+#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
+
+#define INTEL_GVT_MAX_PIPE 4
+
+struct intel_vgpu_irq {
+ bool irq_warn_once[INTEL_GVT_EVENT_MAX];
+ DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+ INTEL_GVT_EVENT_MAX);
+};
+
+struct intel_vgpu_opregion {
+ void *va;
+ u32 gfn[INTEL_GVT_OPREGION_PAGES];
+ struct page *pages[INTEL_GVT_OPREGION_PAGES];
+};
+
+#define vgpu_opregion(vgpu) (&(vgpu->opregion))
+
+#define INTEL_GVT_MAX_PORT 5
+
+struct intel_vgpu_display {
+ struct intel_vgpu_i2c_edid i2c_edid;
+ struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
+ struct intel_vgpu_sbi sbi;
};
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
+ bool active;
+ bool resetting;
+ void *sched_data;
+
+ struct intel_vgpu_fence fence;
+ struct intel_vgpu_gm gm;
+ struct intel_vgpu_cfg_space cfg_space;
+ struct intel_vgpu_mmio mmio;
+ struct intel_vgpu_irq irq;
+ struct intel_vgpu_gtt gtt;
+ struct intel_vgpu_opregion opregion;
+ struct intel_vgpu_display display;
+ struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
+ struct list_head workload_q_head[I915_NUM_ENGINES];
+ struct kmem_cache *workloads;
+ atomic_t running_workload_num;
+ DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
+ struct i915_gem_context *shadow_ctx;
+ struct notifier_block shadow_ctx_notifier_block;
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+ struct {
+ struct device *mdev;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+ struct rb_root cache;
+ struct mutex cache_lock;
+ void *vfio_group;
+ struct notifier_block iommu_notifier;
+ } vdev;
+#endif
+};
+
+struct intel_gvt_gm {
+ unsigned long vgpu_allocated_low_gm_size;
+ unsigned long vgpu_allocated_high_gm_size;
+};
+
+struct intel_gvt_fence {
+ unsigned long vgpu_allocated_fence_num;
+};
+
+#define INTEL_GVT_MMIO_HASH_BITS 9
+
+struct intel_gvt_mmio {
+ u32 *mmio_attribute;
+ DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
+};
+
+struct intel_gvt_firmware {
+ void *cfg_space;
+ void *mmio;
+ bool firmware_loaded;
+};
+
+struct intel_gvt_opregion {
+ void __iomem *opregion_va;
+ u32 opregion_pa;
+};
+
+#define NR_MAX_INTEL_VGPU_TYPES 20
+struct intel_vgpu_type {
+ char name[16];
+ unsigned int max_instance;
+ unsigned int avail_instance;
+ unsigned int low_gm_size;
+ unsigned int high_gm_size;
+ unsigned int fence;
};
struct intel_gvt {
struct mutex lock;
- bool initialized;
-
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info;
+ struct intel_gvt_gm gm;
+ struct intel_gvt_fence fence;
+ struct intel_gvt_mmio mmio;
+ struct intel_gvt_firmware firmware;
+ struct intel_gvt_irq irq;
+ struct intel_gvt_gtt gtt;
+ struct intel_gvt_opregion opregion;
+ struct intel_gvt_workload_scheduler scheduler;
+ DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+ struct intel_vgpu_type *types;
+ unsigned int num_types;
+
+ struct task_struct *service_thread;
+ wait_queue_head_t service_thread_wq;
+ unsigned long service_request;
+};
+
+static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
+{
+ return i915->gvt;
+}
+
+enum {
+ INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
};
+static inline void intel_gvt_request_service(struct intel_gvt *gvt,
+ int service)
+{
+ set_bit(service, (void *)&gvt->service_request);
+ wake_up(&gvt->service_thread_wq);
+}
+
+void intel_gvt_free_firmware(struct intel_gvt *gvt);
+int intel_gvt_load_firmware(struct intel_gvt *gvt);
+
+/* Aperture/GM space definitions for GVT device */
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+/* Aperture/GM space definitions for GVT device */
+#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+
+#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_sz(gvt) \
+ ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+
+#define gvt_aperture_gmadr_base(gvt) (0)
+#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
+ + gvt_aperture_sz(gvt) - 1)
+
+#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
+ + gvt_aperture_sz(gvt))
+#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ + gvt_hidden_sz(gvt) - 1)
+
+#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+
+/* Aperture/GM space definitions for vGPU */
+#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
+#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
+#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
+#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_base(vgpu) \
+ (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
+
+#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_end(vgpu) \
+ (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
+#define vgpu_aperture_gmadr_end(vgpu) \
+ (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
+#define vgpu_hidden_gmadr_end(vgpu) \
+ (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
+
+#define vgpu_fence_base(vgpu) (vgpu->fence.base)
+#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+
+struct intel_vgpu_creation_params {
+ __u64 handle;
+ __u64 low_gm_sz; /* in MB */
+ __u64 high_gm_sz; /* in MB */
+ __u64 fence_sz;
+ __s32 primary;
+ __u64 vgpu_id;
+};
+
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param);
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+ u32 fence, u64 value);
+
+/* Macros for easily accessing vGPU virtual/shadow register */
+#define vgpu_vreg(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg8(vgpu, reg) \
+ (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg16(vgpu, reg) \
+ (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg64(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg8(vgpu, reg) \
+ (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg16(vgpu, reg) \
+ (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg64(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+
+#define for_each_active_vgpu(gvt, vgpu, id) \
+ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
+ for_each_if(vgpu->active)
+
+static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
+ u32 offset, u32 val, bool low)
+{
+ u32 *pval;
+
+ /* BAR offset should be 32 bits algiend */
+ offset = rounddown(offset, 4);
+ pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+
+ if (low) {
+ /*
+ * only update bit 31 - bit 4,
+ * leave the bit 3 - bit 0 unchanged.
+ */
+ *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
+ } else {
+ *pval = val;
+ }
+}
+
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
+
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type);
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
+
+
+/* validating GM functions */
+#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
+ ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
+ (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
+ ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
+ (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_valid(vgpu, gmadr) \
+ ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
+ (vgpu_gmadr_is_hidden(vgpu, gmadr))))
+
+#define gvt_gmadr_is_aperture(gvt, gmadr) \
+ ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
+ (gmadr <= gvt_aperture_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_hidden(gvt, gmadr) \
+ ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
+ (gmadr <= gvt_hidden_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_valid(gvt, gmadr) \
+ (gvt_gmadr_is_aperture(gvt, gmadr) || \
+ gvt_gmadr_is_hidden(gvt, gmadr))
+
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+ unsigned long *h_index);
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+ unsigned long *g_index);
+
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+void intel_gvt_clean_opregion(struct intel_gvt *gvt);
+int intel_gvt_init_opregion(struct intel_gvt *gvt);
+
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
+
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
+int setup_vgpu_mmio(struct intel_vgpu *vgpu);
+void populate_pvinfo_page(struct intel_vgpu *vgpu);
+
+struct intel_gvt_ops {
+ int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
+ struct intel_vgpu_type *);
+ void (*vgpu_destroy)(struct intel_vgpu *);
+ void (*vgpu_reset)(struct intel_vgpu *);
+};
+
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
new file mode 100644
index 000000000000..522809710312
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -0,0 +1,2848 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Pei Zhang <pei.zhang@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+/* XXX FIXME i915 has changed PP_XXX definition */
+#define PCH_PP_STATUS _MMIO(0xc7200)
+#define PCH_PP_CONTROL _MMIO(0xc7204)
+#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
+#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
+#define PCH_PP_DIVISOR _MMIO(0xc7210)
+
+/* Register contains RO bits */
+#define F_RO (1 << 0)
+/* Register contains graphics address */
+#define F_GMADR (1 << 1)
+/* Mode mask registers with high 16 bits as the mask bits */
+#define F_MODE_MASK (1 << 2)
+/* This reg can be accessed by GPU commands */
+#define F_CMD_ACCESS (1 << 3)
+/* This reg has been accessed by a VM */
+#define F_ACCESSED (1 << 4)
+/* This reg has been accessed through GPU commands */
+#define F_CMD_ACCESSED (1 << 5)
+/* This reg could be accessed by unaligned address */
+#define F_UNALIGN (1 << 6)
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
+{
+ if (IS_BROADWELL(gvt->dev_priv))
+ return D_BDW;
+ else if (IS_SKYLAKE(gvt->dev_priv))
+ return D_SKL;
+
+ return 0;
+}
+
+bool intel_gvt_match_device(struct intel_gvt *gvt,
+ unsigned long device)
+{
+ return intel_gvt_get_device_type(gvt) & device;
+}
+
+static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+}
+
+static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+}
+
+static int new_mmio_info(struct intel_gvt *gvt,
+ u32 offset, u32 flags, u32 size,
+ u32 addr_mask, u32 ro_mask, u32 device,
+ void *read, void *write)
+{
+ struct intel_gvt_mmio_info *info, *p;
+ u32 start, end, i;
+
+ if (!intel_gvt_match_device(gvt, device))
+ return 0;
+
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+
+ start = offset;
+ end = offset + size;
+
+ for (i = start; i < end; i += 4) {
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->offset = i;
+ p = intel_gvt_find_mmio_info(gvt, info->offset);
+ if (p)
+ gvt_err("dup mmio definition offset %x\n",
+ info->offset);
+ info->size = size;
+ info->length = (i + 4) < end ? 4 : (end - i);
+ info->addr_mask = addr_mask;
+ info->device = device;
+ info->read = read ? read : intel_vgpu_default_mmio_read;
+ info->write = write ? write : intel_vgpu_default_mmio_write;
+ gvt->mmio.mmio_attribute[info->offset / 4] = flags;
+ INIT_HLIST_NODE(&info->node);
+ hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
+ }
+ return 0;
+}
+
+static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+{
+ enum intel_engine_id id;
+ struct intel_engine_cs *engine;
+
+ reg &= ~GENMASK(11, 0);
+ for_each_engine(engine, gvt->dev_priv, id) {
+ if (engine->mmio_base == reg)
+ return id;
+ }
+ return -1;
+}
+
+#define offset_to_fence_num(offset) \
+ ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
+
+#define fence_num_to_offset(num) \
+ (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
+
+static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
+ unsigned int fence_num, void *p_data, unsigned int bytes)
+{
+ if (fence_num >= vgpu_fence_sz(vgpu)) {
+ gvt_err("vgpu%d: found oob fence register access\n",
+ vgpu->id);
+ gvt_err("vgpu%d: total fence num %d access fence num %d\n",
+ vgpu->id, vgpu_fence_sz(vgpu), fence_num);
+ memset(p_data, 0, bytes);
+ }
+ return 0;
+}
+
+static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ int ret;
+
+ ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
+ p_data, bytes);
+ if (ret)
+ return ret;
+ read_vreg(vgpu, off, p_data, bytes);
+ return 0;
+}
+
+static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int fence_num = offset_to_fence_num(off);
+ int ret;
+
+ ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
+ if (ret)
+ return ret;
+ write_vreg(vgpu, off, p_data, bytes);
+
+ intel_vgpu_write_fence(vgpu, fence_num,
+ vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
+ return 0;
+}
+
+#define CALC_MODE_MASK_REG(old, new) \
+ (((new) & GENMASK(31, 16)) \
+ | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
+ | ((new) & ((new) >> 16))))
+
+static int mul_force_wake_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 old, new;
+ uint32_t ack_reg_offset;
+
+ old = vgpu_vreg(vgpu, offset);
+ new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ switch (offset) {
+ case FORCEWAKE_RENDER_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
+ break;
+ case FORCEWAKE_BLITTER_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
+ break;
+ case FORCEWAKE_MEDIA_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
+ break;
+ default:
+ /*should not hit here*/
+ gvt_err("invalid forcewake offset 0x%x\n", offset);
+ return 1;
+ }
+ } else {
+ ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
+ }
+
+ vgpu_vreg(vgpu, offset) = new;
+ vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
+ return 0;
+}
+
+static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes, unsigned long bitmap)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+
+ vgpu->resetting = true;
+
+ intel_vgpu_stop_schedule(vgpu);
+ /*
+ * The current_vgpu will set to NULL after stopping the
+ * scheduler when the reset is triggered by current vgpu.
+ */
+ if (scheduler->current_vgpu == NULL) {
+ mutex_unlock(&vgpu->gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&vgpu->gvt->lock);
+ }
+
+ intel_vgpu_reset_execlist(vgpu, bitmap);
+
+ /* full GPU reset */
+ if (bitmap == 0xff) {
+ mutex_unlock(&vgpu->gvt->lock);
+ intel_vgpu_clean_gtt(vgpu);
+ mutex_lock(&vgpu->gvt->lock);
+ setup_vgpu_mmio(vgpu);
+ populate_pvinfo_page(vgpu);
+ intel_vgpu_init_gtt(vgpu);
+ }
+
+ vgpu->resetting = false;
+
+ return 0;
+}
+
+static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+ u64 bitmap = 0;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & GEN6_GRDOM_FULL) {
+ gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
+ bitmap = 0xff;
+ }
+ if (data & GEN6_GRDOM_RENDER) {
+ gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+ bitmap |= (1 << RCS);
+ }
+ if (data & GEN6_GRDOM_MEDIA) {
+ gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+ bitmap |= (1 << VCS);
+ }
+ if (data & GEN6_GRDOM_BLT) {
+ gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+ bitmap |= (1 << BCS);
+ }
+ if (data & GEN6_GRDOM_VECS) {
+ gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+ bitmap |= (1 << VECS);
+ }
+ if (data & GEN8_GRDOM_MEDIA2) {
+ gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+ if (HAS_BSD2(vgpu->gvt->dev_priv))
+ bitmap |= (1 << VCS2);
+ }
+ return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+}
+
+static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
+}
+
+static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
+}
+
+static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
+ vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
+
+ } else
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &=
+ ~(PP_ON | PP_SEQUENCE_POWER_DOWN
+ | PP_CYCLE_DELAY_ACTIVE);
+ return 0;
+}
+
+static int transconf_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
+ vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
+ return 0;
+}
+
+static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
+ vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
+ else
+ vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
+
+ if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
+ vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
+
+ return 0;
+}
+
+static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = (1 << 17);
+ return 0;
+}
+
+static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = 3;
+ return 0;
+}
+
+static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = (0x2f << 16);
+ return 0;
+}
+
+static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & PIPECONF_ENABLE)
+ vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+ intel_gvt_check_vblank_emulation(vgpu->gvt);
+ return 0;
+}
+
+static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
+ vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
+ } else {
+ vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
+ if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
+ vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
+ &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
+ }
+ return 0;
+}
+
+static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
+ return 0;
+}
+
+#define FDI_LINK_TRAIN_PATTERN1 0
+#define FDI_LINK_TRAIN_PATTERN2 1
+
+static int fdi_auto_training_started(struct intel_vgpu *vgpu)
+{
+ u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
+ u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
+ u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
+
+ if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
+ (rx_ctl & FDI_RX_ENABLE) &&
+ (rx_ctl & FDI_AUTO_TRAINING) &&
+ (tx_ctl & DP_TP_CTL_ENABLE) &&
+ (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
+ return 1;
+ else
+ return 0;
+}
+
+static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
+ enum pipe pipe, unsigned int train_pattern)
+{
+ i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
+ unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
+ unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
+ unsigned int fdi_iir_check_bits;
+
+ fdi_rx_imr = FDI_RX_IMR(pipe);
+ fdi_tx_ctl = FDI_TX_CTL(pipe);
+ fdi_rx_ctl = FDI_RX_CTL(pipe);
+
+ if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
+ fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
+ fdi_iir_check_bits = FDI_RX_BIT_LOCK;
+ } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
+ fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
+ fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
+ } else {
+ gvt_err("Invalid train pattern %d\n", train_pattern);
+ return -EINVAL;
+ }
+
+ fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
+ fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
+
+ /* If imr bit has been masked */
+ if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
+ return 0;
+
+ if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
+ == fdi_tx_check_bits)
+ && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
+ == fdi_rx_check_bits))
+ return 1;
+ else
+ return 0;
+}
+
+#define INVALID_INDEX (~0U)
+
+static unsigned int calc_index(unsigned int offset, unsigned int start,
+ unsigned int next, unsigned int end, i915_reg_t i915_end)
+{
+ unsigned int range = next - start;
+
+ if (!end)
+ end = i915_mmio_reg_offset(i915_end);
+ if (offset < start || offset > end)
+ return INVALID_INDEX;
+ offset -= start;
+ return offset / range;
+}
+
+#define FDI_RX_CTL_TO_PIPE(offset) \
+ calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
+
+#define FDI_TX_CTL_TO_PIPE(offset) \
+ calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
+
+#define FDI_RX_IMR_TO_PIPE(offset) \
+ calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
+
+static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ i915_reg_t fdi_rx_iir;
+ unsigned int index;
+ int ret;
+
+ if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_RX_CTL_TO_PIPE(offset);
+ else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_TX_CTL_TO_PIPE(offset);
+ else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_RX_IMR_TO_PIPE(offset);
+ else {
+ gvt_err("Unsupport registers %x\n", offset);
+ return -EINVAL;
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ fdi_rx_iir = FDI_RX_IIR(index);
+
+ ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
+
+ ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
+
+ if (offset == _FDI_RXA_CTL)
+ if (fdi_auto_training_started(vgpu))
+ vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
+ DP_TP_STATUS_AUTOTRAIN_DONE;
+ return 0;
+}
+
+#define DP_TP_CTL_TO_PORT(offset) \
+ calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
+
+static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ i915_reg_t status_reg;
+ unsigned int index;
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ index = DP_TP_CTL_TO_PORT(offset);
+ data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
+ if (data == 0x2) {
+ status_reg = DP_TP_STATUS(index);
+ vgpu_vreg(vgpu, status_reg) |= (1 << 25);
+ }
+ return 0;
+}
+
+static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 reg_val;
+ u32 sticky_mask;
+
+ reg_val = *((u32 *)p_data);
+ sticky_mask = GENMASK(27, 26) | (1 << 24);
+
+ vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
+ (vgpu_vreg(vgpu, offset) & sticky_mask);
+ vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
+ return 0;
+}
+
+static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
+ vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ return 0;
+}
+
+static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & FDI_MPHY_IOSFSB_RESET_CTL)
+ vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
+ else
+ vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
+ return 0;
+}
+
+#define DSPSURF_TO_PIPE(offset) \
+ calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
+
+static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ unsigned int index = DSPSURF_TO_PIPE(offset);
+ i915_reg_t surflive_reg = DSPSURFLIVE(index);
+ int flip_event[] = {
+ [PIPE_A] = PRIMARY_A_FLIP_DONE,
+ [PIPE_B] = PRIMARY_B_FLIP_DONE,
+ [PIPE_C] = PRIMARY_C_FLIP_DONE,
+ };
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+ set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+ return 0;
+}
+
+#define SPRSURF_TO_PIPE(offset) \
+ calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
+
+static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int index = SPRSURF_TO_PIPE(offset);
+ i915_reg_t surflive_reg = SPRSURFLIVE(index);
+ int flip_event[] = {
+ [PIPE_A] = SPRITE_A_FLIP_DONE,
+ [PIPE_B] = SPRITE_B_FLIP_DONE,
+ [PIPE_C] = SPRITE_C_FLIP_DONE,
+ };
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+ set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+ return 0;
+}
+
+static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
+ unsigned int reg)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum intel_gvt_event_type event;
+
+ if (reg == _DPA_AUX_CH_CTL)
+ event = AUX_CHANNEL_A;
+ else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
+ event = AUX_CHANNEL_B;
+ else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
+ event = AUX_CHANNEL_C;
+ else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
+ event = AUX_CHANNEL_D;
+ else {
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ return 0;
+}
+
+static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
+ unsigned int reg, int len, bool data_valid)
+{
+ /* mark transaction done */
+ value |= DP_AUX_CH_CTL_DONE;
+ value &= ~DP_AUX_CH_CTL_SEND_BUSY;
+ value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
+
+ if (data_valid)
+ value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
+ else
+ value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
+
+ /* message size */
+ value &= ~(0xf << 20);
+ value |= (len << 20);
+ vgpu_vreg(vgpu, reg) = value;
+
+ if (value & DP_AUX_CH_CTL_INTERRUPT)
+ return trigger_aux_channel_interrupt(vgpu, reg);
+ return 0;
+}
+
+static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
+ uint8_t t)
+{
+ if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
+ /* training pattern 1 for CR */
+ /* set LANE0_CR_DONE, LANE1_CR_DONE */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
+ /* set LANE2_CR_DONE, LANE3_CR_DONE */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_TRAINING_PATTERN_2) {
+ /* training pattern 2 for EQ */
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
+ /* set INTERLANE_ALIGN_DONE */
+ dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
+ DPCD_INTERLANE_ALIGN_DONE;
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_LINK_TRAINING_DISABLED) {
+ /* finish link training */
+ /* set sink status as synchronized */
+ dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
+ }
+}
+
+#define _REG_HSW_DP_AUX_CH_CTL(dp) \
+ ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
+
+#define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
+
+#define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
+
+#define dpy_is_valid_port(port) \
+ (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
+
+static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int msg, addr, ctrl, op, len;
+ int port_index = OFFSET_TO_DP_AUX_PORT(offset);
+ struct intel_vgpu_dpcd_data *dpcd = NULL;
+ struct intel_vgpu_port *port = NULL;
+ u32 data;
+
+ if (!dpy_is_valid_port(port_index)) {
+ gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
+ return 0;
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
+ offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
+ /* SKL DPB/C/D aux ctl register changed */
+ return 0;
+ } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+ offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
+ /* write to the data registers */
+ return 0;
+ }
+
+ if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* just want to clear the sticky bits */
+ vgpu_vreg(vgpu, offset) = 0;
+ return 0;
+ }
+
+ port = &display->ports[port_index];
+ dpcd = port->dpcd;
+
+ /* read out message from DATA1 register */
+ msg = vgpu_vreg(vgpu, offset + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24) & 0xff;
+ len = msg & 0xff;
+ op = ctrl >> 4;
+
+ if (op == GVT_AUX_NATIVE_WRITE) {
+ int t;
+ uint8_t buf[16];
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * Write request exceeds what we supported,
+ * DCPD spec: When a Source Device is writing a DPCD
+ * address not supported by the Sink Device, the Sink
+ * Device shall reply with AUX NACK and “M†equal to
+ * zero.
+ */
+
+ /* NAK the write */
+ vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
+ return 0;
+ }
+
+ /*
+ * Write request format: (command + address) occupies
+ * 3 bytes, followed by (len + 1) bytes of data.
+ */
+ if (WARN_ON((len + 4) > AUX_BURST_SIZE))
+ return -EINVAL;
+
+ /* unpack data from vreg to buf */
+ for (t = 0; t < 4; t++) {
+ u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
+
+ buf[t * 4] = (r >> 24) & 0xff;
+ buf[t * 4 + 1] = (r >> 16) & 0xff;
+ buf[t * 4 + 2] = (r >> 8) & 0xff;
+ buf[t * 4 + 3] = r & 0xff;
+ }
+
+ /* write to virtual DPCD */
+ if (dpcd && dpcd->data_valid) {
+ for (t = 0; t <= len; t++) {
+ int p = addr + t;
+
+ dpcd->data[p] = buf[t];
+ /* check for link training */
+ if (p == DPCD_TRAINING_PATTERN_SET)
+ dp_aux_ch_ctl_link_training(dpcd,
+ buf[t]);
+ }
+ }
+
+ /* ACK the write */
+ vgpu_vreg(vgpu, offset + 4) = 0;
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
+ dpcd && dpcd->data_valid);
+ return 0;
+ }
+
+ if (op == GVT_AUX_NATIVE_READ) {
+ int idx, i, ret = 0;
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * read request exceeds what we supported
+ * DPCD spec: A Sink Device receiving a Native AUX CH
+ * read request for an unsupported DPCD address must
+ * reply with an AUX ACK and read data set equal to
+ * zero instead of replying with AUX NACK.
+ */
+
+ /* ACK the READ*/
+ vgpu_vreg(vgpu, offset + 4) = 0;
+ vgpu_vreg(vgpu, offset + 8) = 0;
+ vgpu_vreg(vgpu, offset + 12) = 0;
+ vgpu_vreg(vgpu, offset + 16) = 0;
+ vgpu_vreg(vgpu, offset + 20) = 0;
+
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+ true);
+ return 0;
+ }
+
+ for (idx = 1; idx <= 5; idx++) {
+ /* clear the data registers */
+ vgpu_vreg(vgpu, offset + 4 * idx) = 0;
+ }
+
+ /*
+ * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
+ */
+ if (WARN_ON((len + 2) > AUX_BURST_SIZE))
+ return -EINVAL;
+
+ /* read from virtual DPCD to vreg */
+ /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
+ if (dpcd && dpcd->data_valid) {
+ for (i = 1; i <= (len + 1); i++) {
+ int t;
+
+ t = dpcd->data[addr + i - 1];
+ t <<= (24 - 8 * (i % 4));
+ ret |= t;
+
+ if ((i % 4 == 3) || (i == (len + 1))) {
+ vgpu_vreg(vgpu, offset +
+ (i / 4 + 1) * 4) = ret;
+ ret = 0;
+ }
+ }
+ }
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+ dpcd && dpcd->data_valid);
+ return 0;
+ }
+
+ /* i2c transaction starts */
+ intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
+
+ if (data & DP_AUX_CH_CTL_INTERRUPT)
+ trigger_aux_channel_interrupt(vgpu, offset);
+ return 0;
+}
+
+static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool vga_disable;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
+
+ gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
+ vga_disable ? "Disable" : "Enable");
+ return 0;
+}
+
+static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
+ unsigned int sbi_offset)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int num = display->sbi.number;
+ int i;
+
+ for (i = 0; i < num; ++i)
+ if (display->sbi.registers[i].offset == sbi_offset)
+ break;
+
+ if (i == num)
+ return 0;
+
+ return display->sbi.registers[i].value;
+}
+
+static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
+ unsigned int offset, u32 value)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int num = display->sbi.number;
+ int i;
+
+ for (i = 0; i < num; ++i) {
+ if (display->sbi.registers[i].offset == offset)
+ break;
+ }
+
+ if (i == num) {
+ if (num == SBI_REG_MAX) {
+ gvt_err("vgpu%d: SBI caching meets maximum limits\n",
+ vgpu->id);
+ return;
+ }
+ display->sbi.number++;
+ }
+
+ display->sbi.registers[i].offset = offset;
+ display->sbi.registers[i].value = value;
+}
+
+static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
+ unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+ vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
+ sbi_offset);
+ }
+ read_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
+ data |= SBI_READY;
+
+ data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
+ data |= SBI_RESPONSE_SUCCESS;
+
+ vgpu_vreg(vgpu, offset) = data;
+
+ if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
+ unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+
+ write_virtual_sbi_register(vgpu, sbi_offset,
+ vgpu_vreg(vgpu, SBI_DATA));
+ }
+ return 0;
+}
+
+#define _vgtif_reg(x) \
+ (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
+
+static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool invalid_read = false;
+
+ read_vreg(vgpu, offset, p_data, bytes);
+
+ switch (offset) {
+ case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
+ if (offset + bytes > _vgtif_reg(vgt_id) + 4)
+ invalid_read = true;
+ break;
+ case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
+ _vgtif_reg(avail_rs.fence_num):
+ if (offset + bytes >
+ _vgtif_reg(avail_rs.fence_num) + 4)
+ invalid_read = true;
+ break;
+ case 0x78010: /* vgt_caps */
+ case 0x7881c:
+ break;
+ default:
+ invalid_read = true;
+ break;
+ }
+ if (invalid_read)
+ gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
+ offset, bytes, *(u32 *)p_data);
+ return 0;
+}
+
+static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
+{
+ int ret = 0;
+
+ switch (notification) {
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
+ ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
+ break;
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
+ ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
+ break;
+ case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
+ ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
+ break;
+ case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
+ ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
+ break;
+ case VGT_G2V_EXECLIST_CONTEXT_CREATE:
+ case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
+ case 1: /* Remove this in guest driver. */
+ break;
+ default:
+ gvt_err("Invalid PV notification %d\n", notification);
+ }
+ return ret;
+}
+
+static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+ char *env[3] = {NULL, NULL, NULL};
+ char vmid_str[20];
+ char display_ready_str[20];
+
+ snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
+ env[0] = display_ready_str;
+
+ snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
+ env[1] = vmid_str;
+
+ return kobject_uevent_env(kobj, KOBJ_ADD, env);
+}
+
+static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+ int ret;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ switch (offset) {
+ case _vgtif_reg(display_ready):
+ send_display_ready_uevent(vgpu, data ? 1 : 0);
+ break;
+ case _vgtif_reg(g2v_notify):
+ ret = handle_g2v_notification(vgpu, data);
+ break;
+ /* add xhot and yhot to handled list to avoid error log */
+ case 0x78830:
+ case 0x78834:
+ case _vgtif_reg(pdp[0].lo):
+ case _vgtif_reg(pdp[0].hi):
+ case _vgtif_reg(pdp[1].lo):
+ case _vgtif_reg(pdp[1].hi):
+ case _vgtif_reg(pdp[2].lo):
+ case _vgtif_reg(pdp[2].hi):
+ case _vgtif_reg(pdp[3].lo):
+ case _vgtif_reg(pdp[3].hi):
+ case _vgtif_reg(execlist_context_descriptor_lo):
+ case _vgtif_reg(execlist_context_descriptor_hi):
+ break;
+ default:
+ gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
+ offset, bytes, data);
+ break;
+ }
+ return 0;
+}
+
+static int pf_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 val = *(u32 *)p_data;
+
+ if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
+ offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
+ offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
+ WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
+ vgpu->id);
+ return 0;
+ }
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
+}
+
+static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
+ vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
+ else
+ vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
+ return 0;
+}
+
+static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
+ vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
+ return 0;
+}
+
+static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 mode;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ mode = vgpu_vreg(vgpu, offset);
+
+ if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
+ WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
+ vgpu->id);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 trtte = *(u32 *)p_data;
+
+ if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
+ WARN(1, "VM(%d): Use physical address for TRTT!\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+ write_vreg(vgpu, offset, p_data, bytes);
+ /* TRTTE is not per-context */
+ I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+
+ return 0;
+}
+
+static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 val = *(u32 *)p_data;
+
+ if (val & 1) {
+ /* unblock hw logic */
+ I915_WRITE(_MMIO(offset), val);
+ }
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 v = 0;
+
+ if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
+ v |= (1 << 0);
+
+ if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
+ v |= (1 << 8);
+
+ if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
+ v |= (1 << 16);
+
+ if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
+ v |= (1 << 24);
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = *(u32 *)p_data;
+ u32 cmd = value & 0xff;
+ u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
+
+ switch (cmd) {
+ case 0x6:
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from skylake platform.
+ */
+ if (!*data0)
+ *data0 = 0x1e1a1100;
+ else
+ *data0 = 0x61514b3d;
+ break;
+ case 0x5:
+ *data0 |= 0x1;
+ break;
+ }
+
+ gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
+ vgpu->id, value, *data0);
+
+ value &= ~(1 << 31);
+ return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+}
+
+static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ v &= (1 << 31) | (1 << 29) | (1 << 9) |
+ (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+ v |= (v >> 1);
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
+}
+
+static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t reg = {.reg = offset};
+
+ switch (offset) {
+ case 0x4ddc:
+ vgpu_vreg(vgpu, offset) = 0x8000003c;
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+ break;
+ case 0x42080:
+ vgpu_vreg(vgpu, offset) = 0x8000;
+ /* WaCompressedResourceDisplayNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ /* other bits are MBZ. */
+ v &= (1 << 31) | (1 << 30);
+ v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ struct intel_vgpu_execlist *execlist;
+ u32 data = *(u32 *)p_data;
+ int ret = 0;
+
+ if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
+ return -EINVAL;
+
+ execlist = &vgpu->execlist[ring_id];
+
+ execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
+ if (execlist->elsp_dwords.index == 3) {
+ ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+ if(ret)
+ gvt_err("fail submit workload on ring %d\n", ring_id);
+ }
+
+ ++execlist->elsp_dwords.index;
+ execlist->elsp_dwords.index &= 0x3;
+ return ret;
+}
+
+static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+ int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ bool enable_execlist;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
+ || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+ enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
+
+ gvt_dbg_core("EXECLIST %s on ring %d\n",
+ (enable_execlist ? "enabling" : "disabling"),
+ ring_id);
+
+ if (enable_execlist)
+ intel_vgpu_start_schedule(vgpu);
+ }
+ return 0;
+}
+
+static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ int rc = 0;
+ unsigned int id = 0;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, offset) = 0;
+
+ switch (offset) {
+ case 0x4260:
+ id = RCS;
+ break;
+ case 0x4264:
+ id = VCS;
+ break;
+ case 0x4268:
+ id = VCS2;
+ break;
+ case 0x426c:
+ id = BCS;
+ break;
+ case 0x4270:
+ id = VECS;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ set_bit(id, (void *)vgpu->tlb_handle_pending);
+
+ return rc;
+}
+
+static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+ data |= RESET_CTL_READY_TO_RESET;
+ else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
+ data &= ~RESET_CTL_READY_TO_RESET;
+
+ vgpu_vreg(vgpu, offset) = data;
+ return 0;
+}
+
+#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
+ ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
+ f, s, am, rm, d, r, w); \
+ if (ret) \
+ return ret; \
+} while (0)
+
+#define MMIO_D(reg, d) \
+ MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_DH(reg, d, r, w) \
+ MMIO_F(reg, 4, 0, 0, 0, d, r, w)
+
+#define MMIO_DFH(reg, d, f, r, w) \
+ MMIO_F(reg, 4, f, 0, 0, d, r, w)
+
+#define MMIO_GM(reg, d, r, w) \
+ MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
+
+#define MMIO_RO(reg, d, f, rm, r, w) \
+ MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
+
+#define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
+ MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
+} while (0)
+
+#define MMIO_RING_D(prefix, d) \
+ MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_RING_DFH(prefix, d, f, r, w) \
+ MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
+
+#define MMIO_RING_GM(prefix, d, r, w) \
+ MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
+
+#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
+ MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
+
+static int init_generic_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+
+ MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(SDEISR, D_ALL);
+
+ MMIO_RING_D(RING_HWSTAM, D_ALL);
+
+ MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+
+#define RING_REG(base) (base + 0x28)
+ MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x134)
+ MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+ MMIO_GM(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM(CCID, D_ALL, NULL, NULL);
+ MMIO_GM(0x12198, D_ALL, NULL, NULL);
+ MMIO_D(GEN7_CXT_SIZE, D_ALL);
+
+ MMIO_RING_D(RING_TAIL, D_ALL);
+ MMIO_RING_D(RING_HEAD, D_ALL);
+ MMIO_RING_D(RING_CTL, D_ALL);
+ MMIO_RING_D(RING_ACTHD, D_ALL);
+ MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
+
+ /* RING MODE */
+#define RING_REG(base) (base + 0x29c)
+ MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
+#undef RING_REG
+
+ MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+ MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+
+ MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_D(GAM_ECOCHK, D_ALL);
+ MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0x9030, D_ALL);
+ MMIO_D(0x20a0, D_ALL);
+ MMIO_D(0x2420, D_ALL);
+ MMIO_D(0x2430, D_ALL);
+ MMIO_D(0x2434, D_ALL);
+ MMIO_D(0x2438, D_ALL);
+ MMIO_D(0x243c, D_ALL);
+ MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
+
+ /* display */
+ MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_D(0x602a0, D_ALL);
+
+ MMIO_D(0x65050, D_ALL);
+ MMIO_D(0x650b4, D_ALL);
+
+ MMIO_D(0xc4040, D_ALL);
+ MMIO_D(DERRMR, D_ALL);
+
+ MMIO_D(PIPEDSL(PIPE_A), D_ALL);
+ MMIO_D(PIPEDSL(PIPE_B), D_ALL);
+ MMIO_D(PIPEDSL(PIPE_C), D_ALL);
+ MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
+
+ MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
+
+ MMIO_D(PIPESTAT(PIPE_A), D_ALL);
+ MMIO_D(PIPESTAT(PIPE_B), D_ALL);
+ MMIO_D(PIPESTAT(PIPE_C), D_ALL);
+ MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
+
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+ MMIO_D(CURCNTR(PIPE_A), D_ALL);
+ MMIO_D(CURCNTR(PIPE_B), D_ALL);
+ MMIO_D(CURCNTR(PIPE_C), D_ALL);
+
+ MMIO_D(CURPOS(PIPE_A), D_ALL);
+ MMIO_D(CURPOS(PIPE_B), D_ALL);
+ MMIO_D(CURPOS(PIPE_C), D_ALL);
+
+ MMIO_D(CURBASE(PIPE_A), D_ALL);
+ MMIO_D(CURBASE(PIPE_B), D_ALL);
+ MMIO_D(CURBASE(PIPE_C), D_ALL);
+
+ MMIO_D(0x700ac, D_ALL);
+ MMIO_D(0x710ac, D_ALL);
+ MMIO_D(0x720ac, D_ALL);
+
+ MMIO_D(0x70090, D_ALL);
+ MMIO_D(0x70094, D_ALL);
+ MMIO_D(0x70098, D_ALL);
+ MMIO_D(0x7009c, D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_A), D_ALL);
+ MMIO_D(DSPADDR(PIPE_A), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
+ MMIO_D(DSPPOS(PIPE_A), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_A), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_B), D_ALL);
+ MMIO_D(DSPADDR(PIPE_B), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
+ MMIO_D(DSPPOS(PIPE_B), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_B), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_C), D_ALL);
+ MMIO_D(DSPADDR(PIPE_C), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
+ MMIO_D(DSPPOS(PIPE_C), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_C), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_A), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
+ MMIO_D(SPRPOS(PIPE_A), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_A), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_A), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_A), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_B), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
+ MMIO_D(SPRPOS(PIPE_B), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_B), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_B), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_B), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_C), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
+ MMIO_D(SPRPOS(PIPE_C), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_C), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_C), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_C), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+
+ MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_A), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_B), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_C), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
+
+ MMIO_D(WM0_PIPEA_ILK, D_ALL);
+ MMIO_D(WM0_PIPEB_ILK, D_ALL);
+ MMIO_D(WM0_PIPEC_IVB, D_ALL);
+ MMIO_D(WM1_LP_ILK, D_ALL);
+ MMIO_D(WM2_LP_ILK, D_ALL);
+ MMIO_D(WM3_LP_ILK, D_ALL);
+ MMIO_D(WM1S_LP_ILK, D_ALL);
+ MMIO_D(WM2S_LP_IVB, D_ALL);
+ MMIO_D(WM3S_LP_IVB, D_ALL);
+
+ MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
+ MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
+ MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
+ MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
+
+ MMIO_D(0x48268, D_ALL);
+
+ MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
+ gmbus_mmio_write);
+ MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+
+ MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
+
+ MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
+
+ MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+
+ MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
+ MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
+ MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
+
+ MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
+ MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
+ MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
+
+ MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
+
+ MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
+ MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
+ MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
+
+ MMIO_D(_FDI_RXA_MISC, D_ALL);
+ MMIO_D(_FDI_RXB_MISC, D_ALL);
+ MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
+ MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
+ MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
+ MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
+
+ MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
+ MMIO_D(PCH_PP_DIVISOR, D_ALL);
+ MMIO_D(PCH_PP_STATUS, D_ALL);
+ MMIO_D(PCH_LVDS, D_ALL);
+ MMIO_D(_PCH_DPLL_A, D_ALL);
+ MMIO_D(_PCH_DPLL_B, D_ALL);
+ MMIO_D(_PCH_FPA0, D_ALL);
+ MMIO_D(_PCH_FPA1, D_ALL);
+ MMIO_D(_PCH_FPB0, D_ALL);
+ MMIO_D(_PCH_FPB1, D_ALL);
+ MMIO_D(PCH_DREF_CONTROL, D_ALL);
+ MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
+ MMIO_D(PCH_DPLL_SEL, D_ALL);
+
+ MMIO_D(0x61208, D_ALL);
+ MMIO_D(0x6120c, D_ALL);
+ MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
+ MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
+
+ MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
+ MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
+
+ MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
+ PORTA_HOTPLUG_STATUS_MASK
+ | PORTB_HOTPLUG_STATUS_MASK
+ | PORTC_HOTPLUG_STATUS_MASK
+ | PORTD_HOTPLUG_STATUS_MASK,
+ NULL, NULL);
+
+ MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
+ MMIO_D(FUSE_STRAP, D_ALL);
+ MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
+
+ MMIO_D(DISP_ARB_CTL, D_ALL);
+ MMIO_D(DISP_ARB_CTL2, D_ALL);
+
+ MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
+ MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
+ MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
+
+ MMIO_D(SOUTH_CHICKEN1, D_ALL);
+ MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
+ MMIO_D(_TRANSA_CHICKEN1, D_ALL);
+ MMIO_D(_TRANSB_CHICKEN1, D_ALL);
+ MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
+ MMIO_D(_TRANSA_CHICKEN2, D_ALL);
+ MMIO_D(_TRANSB_CHICKEN2, D_ALL);
+
+ MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
+ MMIO_D(ILK_DPFC_CONTROL, D_ALL);
+ MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
+ MMIO_D(ILK_DPFC_STATUS, D_ALL);
+ MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
+ MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
+ MMIO_D(ILK_FBC_RT_BASE, D_ALL);
+
+ MMIO_D(IPS_CTL, D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(0x60110, D_ALL);
+ MMIO_D(0x61110, D_ALL);
+ MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+
+ MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
+ MMIO_D(SPLL_CTL, D_ALL);
+ MMIO_D(_WRPLL_CTL1, D_ALL);
+ MMIO_D(_WRPLL_CTL2, D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
+
+ MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
+ MMIO_D(0x46508, D_ALL);
+
+ MMIO_D(0x49080, D_ALL);
+ MMIO_D(0x49180, D_ALL);
+ MMIO_D(0x49280, D_ALL);
+
+ MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
+ MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
+ MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
+
+ MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
+ MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
+ MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
+
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
+
+ MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
+ MMIO_D(SBI_ADDR, D_ALL);
+ MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
+ MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
+ MMIO_D(PIXCLK_GATE, D_ALL);
+
+ MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+
+ MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+
+ MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
+
+ MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
+
+ MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
+ MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
+
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
+
+ MMIO_D(_TRANSA_MSA_MISC, D_ALL);
+ MMIO_D(_TRANSB_MSA_MISC, D_ALL);
+ MMIO_D(_TRANSC_MSA_MISC, D_ALL);
+ MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
+
+ MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
+ MMIO_D(FORCEWAKE_ACK, D_ALL);
+ MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
+ MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
+ MMIO_D(GTFIFODBG, D_ALL);
+ MMIO_D(GTFIFOCTL, D_ALL);
+ MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
+ MMIO_D(ECOBUS, D_ALL);
+ MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
+ MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
+ MMIO_D(GEN6_RPNSWREQ, D_ALL);
+ MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
+ MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
+ MMIO_D(GEN6_RPSTAT1, D_ALL);
+ MMIO_D(GEN6_RP_CONTROL, D_ALL);
+ MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
+ MMIO_D(GEN6_RP_CUR_UP, D_ALL);
+ MMIO_D(GEN6_RP_PREV_UP, D_ALL);
+ MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
+ MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
+ MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
+ MMIO_D(GEN6_RP_UP_EI, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
+ MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
+ MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
+ MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
+ MMIO_D(GEN6_RC_SLEEP, D_ALL);
+ MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_PMINTRMSK, D_ALL);
+ MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+
+ MMIO_D(RSTDBYCTL, D_ALL);
+
+ MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
+ MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
+ MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
+ MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
+
+ MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(TILECTL, D_ALL);
+
+ MMIO_D(GEN6_UCGCTL1, D_ALL);
+ MMIO_D(GEN6_UCGCTL2, D_ALL);
+
+ MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
+ MMIO_D(GEN6_PCODE_DATA, D_ALL);
+ MMIO_D(0x13812c, D_ALL);
+ MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
+ MMIO_D(HSW_EDRAM_CAP, D_ALL);
+ MMIO_D(HSW_IDICR, D_ALL);
+ MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
+
+ MMIO_D(0x3c, D_ALL);
+ MMIO_D(0x860, D_ALL);
+ MMIO_D(ECOSKPD, D_ALL);
+ MMIO_D(0x121d0, D_ALL);
+ MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
+ MMIO_D(0x41d0, D_ALL);
+ MMIO_D(GAC_ECO_BITS, D_ALL);
+ MMIO_D(0x6200, D_ALL);
+ MMIO_D(0x6204, D_ALL);
+ MMIO_D(0x6208, D_ALL);
+ MMIO_D(0x7118, D_ALL);
+ MMIO_D(0x7180, D_ALL);
+ MMIO_D(0x7408, D_ALL);
+ MMIO_D(0x7c00, D_ALL);
+ MMIO_D(GEN6_MBCTL, D_ALL);
+ MMIO_D(0x911c, D_ALL);
+ MMIO_D(0x9120, D_ALL);
+ MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_D(GAB_CTL, D_ALL);
+ MMIO_D(0x48800, D_ALL);
+ MMIO_D(0xce044, D_ALL);
+ MMIO_D(0xe6500, D_ALL);
+ MMIO_D(0xe6504, D_ALL);
+ MMIO_D(0xe6600, D_ALL);
+ MMIO_D(0xe6604, D_ALL);
+ MMIO_D(0xe6700, D_ALL);
+ MMIO_D(0xe6704, D_ALL);
+ MMIO_D(0xe6800, D_ALL);
+ MMIO_D(0xe6804, D_ALL);
+ MMIO_D(PCH_GMBUS4, D_ALL);
+ MMIO_D(PCH_GMBUS5, D_ALL);
+
+ MMIO_D(0x902c, D_ALL);
+ MMIO_D(0xec008, D_ALL);
+ MMIO_D(0xec00c, D_ALL);
+ MMIO_D(0xec008 + 0x18, D_ALL);
+ MMIO_D(0xec00c + 0x18, D_ALL);
+ MMIO_D(0xec008 + 0x18 * 2, D_ALL);
+ MMIO_D(0xec00c + 0x18 * 2, D_ALL);
+ MMIO_D(0xec008 + 0x18 * 3, D_ALL);
+ MMIO_D(0xec00c + 0x18 * 3, D_ALL);
+ MMIO_D(0xec408, D_ALL);
+ MMIO_D(0xec40c, D_ALL);
+ MMIO_D(0xec408 + 0x18, D_ALL);
+ MMIO_D(0xec40c + 0x18, D_ALL);
+ MMIO_D(0xec408 + 0x18 * 2, D_ALL);
+ MMIO_D(0xec40c + 0x18 * 2, D_ALL);
+ MMIO_D(0xec408 + 0x18 * 3, D_ALL);
+ MMIO_D(0xec40c + 0x18 * 3, D_ALL);
+ MMIO_D(0xfc810, D_ALL);
+ MMIO_D(0xfc81c, D_ALL);
+ MMIO_D(0xfc828, D_ALL);
+ MMIO_D(0xfc834, D_ALL);
+ MMIO_D(0xfcc00, D_ALL);
+ MMIO_D(0xfcc0c, D_ALL);
+ MMIO_D(0xfcc18, D_ALL);
+ MMIO_D(0xfcc24, D_ALL);
+ MMIO_D(0xfd000, D_ALL);
+ MMIO_D(0xfd00c, D_ALL);
+ MMIO_D(0xfd018, D_ALL);
+ MMIO_D(0xfd024, D_ALL);
+ MMIO_D(0xfd034, D_ALL);
+
+ MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
+ MMIO_D(0x2054, D_ALL);
+ MMIO_D(0x12054, D_ALL);
+ MMIO_D(0x22054, D_ALL);
+ MMIO_D(0x1a054, D_ALL);
+
+ MMIO_D(0x44070, D_ALL);
+
+ MMIO_D(0x215c, D_HSW_PLUS);
+ MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
+ MMIO_D(OACONTROL, D_HSW);
+ MMIO_D(0x2b00, D_BDW_PLUS);
+ MMIO_D(0x2360, D_BDW_PLUS);
+ MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(BCS_SWCTRL, D_ALL);
+
+ MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ return 0;
+}
+
+static int init_broadwell_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+
+ MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
+ intel_vgpu_reg_master_irq_handler);
+
+ MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(0x1c134, D_BDW_PLUS);
+
+ MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
+ MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
+ MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+
+ MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0xd0)
+ MMIO_RING_F(RING_REG, 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x230)
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
+ MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x234)
+ MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x244)
+ MMIO_RING_D(RING_REG, D_BDW_PLUS);
+ MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x370)
+ MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
+ NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x3a0)
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+#undef RING_REG
+
+ MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
+ MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
+ MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
+ MMIO_D(0x1c1d0, D_BDW_PLUS);
+ MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
+ MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
+ MMIO_D(0x1c054, D_BDW_PLUS);
+
+ MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
+ MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
+
+ MMIO_D(GAMTARBMODE, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0x270)
+ MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+ MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
+
+ MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
+
+ MMIO_D(WM_MISC, D_BDW);
+ MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
+
+ MMIO_D(0x66c00, D_BDW_PLUS);
+ MMIO_D(0x66c04, D_BDW_PLUS);
+
+ MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
+
+ MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
+ MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
+ MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
+
+ MMIO_D(0xfdc, D_BDW);
+ MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
+ MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
+
+ MMIO_D(0xb1f0, D_BDW);
+ MMIO_D(0xb1c0, D_BDW);
+ MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0xb100, D_BDW);
+ MMIO_D(0xb10c, D_BDW);
+ MMIO_D(0xb110, D_BDW);
+
+ MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_D(0x83a4, D_BDW);
+ MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
+
+ MMIO_D(0x8430, D_BDW);
+
+ MMIO_D(0x110000, D_BDW_PLUS);
+
+ MMIO_D(0x48400, D_BDW_PLUS);
+
+ MMIO_D(0x6e570, D_BDW_PLUS);
+ MMIO_D(0x65f10, D_BDW_PLUS);
+
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+
+ MMIO_D(0x2248, D_BDW);
+
+ return 0;
+}
+
+static int init_skl_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+
+ MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
+
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
+ MMIO_D(0xa210, D_SKL_PLUS);
+ MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
+ MMIO_D(0x45504, D_SKL);
+ MMIO_D(0x45520, D_SKL);
+ MMIO_D(0x46000, D_SKL);
+ MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
+ MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
+ MMIO_D(0x6C040, D_SKL);
+ MMIO_D(0x6C048, D_SKL);
+ MMIO_D(0x6C050, D_SKL);
+ MMIO_D(0x6C044, D_SKL);
+ MMIO_D(0x6C04C, D_SKL);
+ MMIO_D(0x6C054, D_SKL);
+ MMIO_D(0x6c058, D_SKL);
+ MMIO_D(0x6c05c, D_SKL);
+ MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
+
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
+
+ MMIO_D(0x70380, D_SKL);
+ MMIO_D(0x71380, D_SKL);
+ MMIO_D(0x72380, D_SKL);
+ MMIO_D(0x7039c, D_SKL);
+
+ MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_D(0x8f074, D_SKL);
+ MMIO_D(0x8f004, D_SKL);
+ MMIO_D(0x8f034, D_SKL);
+
+ MMIO_D(0xb11c, D_SKL);
+
+ MMIO_D(0x51000, D_SKL);
+ MMIO_D(0x6c00c, D_SKL);
+
+ MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_D(0xd08, D_SKL);
+ MMIO_D(0x20e0, D_SKL);
+ MMIO_D(0x20ec, D_SKL);
+
+ /* TRTT */
+ MMIO_D(0x4de0, D_SKL);
+ MMIO_D(0x4de4, D_SKL);
+ MMIO_D(0x4de8, D_SKL);
+ MMIO_D(0x4dec, D_SKL);
+ MMIO_D(0x4df0, D_SKL);
+ MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
+ MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
+
+ MMIO_D(0x45008, D_SKL);
+
+ MMIO_D(0x46430, D_SKL);
+
+ MMIO_D(0x46520, D_SKL);
+
+ MMIO_D(0xc403c, D_SKL);
+ MMIO_D(0xb004, D_SKL);
+ MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
+
+ MMIO_D(0x65900, D_SKL);
+ MMIO_D(0x1082c0, D_SKL);
+ MMIO_D(0x4068, D_SKL);
+ MMIO_D(0x67054, D_SKL);
+ MMIO_D(0x6e560, D_SKL);
+ MMIO_D(0x6e554, D_SKL);
+ MMIO_D(0x2b20, D_SKL);
+ MMIO_D(0x65f00, D_SKL);
+ MMIO_D(0x65f08, D_SKL);
+ MMIO_D(0x320f0, D_SKL);
+
+ MMIO_D(_REG_VCS2_EXCC, D_SKL);
+ MMIO_D(0x70034, D_SKL);
+ MMIO_D(0x71034, D_SKL);
+ MMIO_D(0x72034, D_SKL);
+
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
+
+ MMIO_D(0x44500, D_SKL);
+ return 0;
+}
+
+/**
+ * intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
+ * @gvt: GVT device
+ * @offset: register offset
+ *
+ * This function is used to find the MMIO information entry from hash table
+ *
+ * Returns:
+ * pointer to MMIO information entry, NULL if not exists
+ */
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ struct intel_gvt_mmio_info *e;
+
+ WARN_ON(!IS_ALIGNED(offset, 4));
+
+ hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
+ if (e->offset == offset)
+ return e;
+ }
+ return NULL;
+}
+
+/**
+ * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the MMIO
+ * information table of GVT device
+ *
+ */
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
+{
+ struct hlist_node *tmp;
+ struct intel_gvt_mmio_info *e;
+ int i;
+
+ hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
+ kfree(e);
+
+ vfree(gvt->mmio.mmio_attribute);
+ gvt->mmio.mmio_attribute = NULL;
+}
+
+/**
+ * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to setup the MMIO
+ * information table for GVT device
+ *
+ * Returns:
+ * zero on success, negative if failed.
+ */
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
+ if (!gvt->mmio.mmio_attribute)
+ return -ENOMEM;
+
+ ret = init_generic_mmio_info(gvt);
+ if (ret)
+ goto err;
+
+ if (IS_BROADWELL(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_skl_mmio_info(gvt);
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ intel_gvt_clean_mmio_info(gvt);
+ return ret;
+}
+
+/**
+ * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |=
+ F_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_CMD_ACCESS;
+}
+
+/**
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_UNALIGN;
+}
+
+/**
+ * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |=
+ F_CMD_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
+ *
+ */
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_MODE_MASK;
+}
+
+/**
+ * intel_vgpu_default_mmio_read - default MMIO read handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ read_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+/**
+ * intel_t_default_mmio_write - default MMIO write handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 254df8bf1f35..30e543f5a703 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Dexuan Cui
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_HYPERCALL_H_
@@ -30,6 +39,23 @@
*/
struct intel_gvt_mpt {
int (*detect_host)(void);
+ int (*host_init)(struct device *dev, void *gvt, const void *ops);
+ void (*host_exit)(struct device *dev, void *gvt);
+ int (*attach_vgpu)(void *vgpu, unsigned long *handle);
+ void (*detach_vgpu)(unsigned long handle);
+ int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
+ unsigned long (*from_virt_to_mfn)(void *p);
+ int (*set_wp_page)(unsigned long handle, u64 gfn);
+ int (*unset_wp_page)(unsigned long handle, u64 gfn);
+ int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ unsigned long len);
+ int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ unsigned long len);
+ unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
+ int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
+ unsigned long mfn, unsigned int nr, bool map);
+ int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
+ bool map);
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
new file mode 100644
index 000000000000..f7be02ac4be1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -0,0 +1,741 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min he <min.he@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/* common offset among interrupt control registers */
+#define regbase_to_isr(base) (base)
+#define regbase_to_imr(base) (base + 0x4)
+#define regbase_to_iir(base) (base + 0x8)
+#define regbase_to_ier(base) (base + 0xC)
+
+#define iir_to_regbase(iir) (iir - 0x8)
+#define ier_to_regbase(ier) (ier - 0xC)
+
+#define get_event_virt_handler(irq, e) (irq->events[e].v_handler)
+#define get_irq_info(irq, e) (irq->events[e].info)
+
+#define irq_to_gvt(irq) \
+ container_of(irq, struct intel_gvt, irq)
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+ struct intel_gvt_irq_info *info);
+
+static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
+ [RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
+ [RCS_DEBUG] = "Render EU debug from SVG",
+ [RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
+ [RCS_CMD_STREAMER_ERR] = "Render CS error interrupt",
+ [RCS_PIPE_CONTROL] = "Render PIPE CONTROL notify",
+ [RCS_WATCHDOG_EXCEEDED] = "Render CS Watchdog counter exceeded",
+ [RCS_PAGE_DIRECTORY_FAULT] = "Render page directory faults",
+ [RCS_AS_CONTEXT_SWITCH] = "Render AS Context Switch Interrupt",
+
+ [VCS_MI_USER_INTERRUPT] = "Video CS MI USER INTERRUPT",
+ [VCS_MMIO_SYNC_FLUSH] = "Video MMIO sync flush status",
+ [VCS_CMD_STREAMER_ERR] = "Video CS error interrupt",
+ [VCS_MI_FLUSH_DW] = "Video MI FLUSH DW notify",
+ [VCS_WATCHDOG_EXCEEDED] = "Video CS Watchdog counter exceeded",
+ [VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults",
+ [VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt",
+ [VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT",
+ [VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify",
+ [VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt",
+
+ [BCS_MI_USER_INTERRUPT] = "Blitter CS MI USER INTERRUPT",
+ [BCS_MMIO_SYNC_FLUSH] = "Billter MMIO sync flush status",
+ [BCS_CMD_STREAMER_ERR] = "Blitter CS error interrupt",
+ [BCS_MI_FLUSH_DW] = "Blitter MI FLUSH DW notify",
+ [BCS_PAGE_DIRECTORY_FAULT] = "Blitter page directory faults",
+ [BCS_AS_CONTEXT_SWITCH] = "Blitter AS Context Switch Interrupt",
+
+ [VECS_MI_FLUSH_DW] = "Video Enhanced Streamer MI FLUSH DW notify",
+ [VECS_AS_CONTEXT_SWITCH] = "VECS Context Switch Interrupt",
+
+ [PIPE_A_FIFO_UNDERRUN] = "Pipe A FIFO underrun",
+ [PIPE_A_CRC_ERR] = "Pipe A CRC error",
+ [PIPE_A_CRC_DONE] = "Pipe A CRC done",
+ [PIPE_A_VSYNC] = "Pipe A vsync",
+ [PIPE_A_LINE_COMPARE] = "Pipe A line compare",
+ [PIPE_A_ODD_FIELD] = "Pipe A odd field",
+ [PIPE_A_EVEN_FIELD] = "Pipe A even field",
+ [PIPE_A_VBLANK] = "Pipe A vblank",
+ [PIPE_B_FIFO_UNDERRUN] = "Pipe B FIFO underrun",
+ [PIPE_B_CRC_ERR] = "Pipe B CRC error",
+ [PIPE_B_CRC_DONE] = "Pipe B CRC done",
+ [PIPE_B_VSYNC] = "Pipe B vsync",
+ [PIPE_B_LINE_COMPARE] = "Pipe B line compare",
+ [PIPE_B_ODD_FIELD] = "Pipe B odd field",
+ [PIPE_B_EVEN_FIELD] = "Pipe B even field",
+ [PIPE_B_VBLANK] = "Pipe B vblank",
+ [PIPE_C_VBLANK] = "Pipe C vblank",
+ [DPST_PHASE_IN] = "DPST phase in event",
+ [DPST_HISTOGRAM] = "DPST histogram event",
+ [GSE] = "GSE",
+ [DP_A_HOTPLUG] = "DP A Hotplug",
+ [AUX_CHANNEL_A] = "AUX Channel A",
+ [PERF_COUNTER] = "Performance counter",
+ [POISON] = "Poison",
+ [GTT_FAULT] = "GTT fault",
+ [PRIMARY_A_FLIP_DONE] = "Primary Plane A flip done",
+ [PRIMARY_B_FLIP_DONE] = "Primary Plane B flip done",
+ [PRIMARY_C_FLIP_DONE] = "Primary Plane C flip done",
+ [SPRITE_A_FLIP_DONE] = "Sprite Plane A flip done",
+ [SPRITE_B_FLIP_DONE] = "Sprite Plane B flip done",
+ [SPRITE_C_FLIP_DONE] = "Sprite Plane C flip done",
+
+ [PCU_THERMAL] = "PCU Thermal Event",
+ [PCU_PCODE2DRIVER_MAILBOX] = "PCU pcode2driver mailbox event",
+
+ [FDI_RX_INTERRUPTS_TRANSCODER_A] = "FDI RX Interrupts Combined A",
+ [AUDIO_CP_CHANGE_TRANSCODER_A] = "Audio CP Change Transcoder A",
+ [AUDIO_CP_REQUEST_TRANSCODER_A] = "Audio CP Request Transcoder A",
+ [FDI_RX_INTERRUPTS_TRANSCODER_B] = "FDI RX Interrupts Combined B",
+ [AUDIO_CP_CHANGE_TRANSCODER_B] = "Audio CP Change Transcoder B",
+ [AUDIO_CP_REQUEST_TRANSCODER_B] = "Audio CP Request Transcoder B",
+ [FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
+ [AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
+ [AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
+ [ERR_AND_DBG] = "South Error and Debug Interupts Combined",
+ [GMBUS] = "Gmbus",
+ [SDVO_B_HOTPLUG] = "SDVO B hotplug",
+ [CRT_HOTPLUG] = "CRT Hotplug",
+ [DP_B_HOTPLUG] = "DisplayPort/HDMI/DVI B Hotplug",
+ [DP_C_HOTPLUG] = "DisplayPort/HDMI/DVI C Hotplug",
+ [DP_D_HOTPLUG] = "DisplayPort/HDMI/DVI D Hotplug",
+ [AUX_CHANNEL_B] = "AUX Channel B",
+ [AUX_CHANNEL_C] = "AUX Channel C",
+ [AUX_CHANNEL_D] = "AUX Channel D",
+ [AUDIO_POWER_STATE_CHANGE_B] = "Audio Power State change Port B",
+ [AUDIO_POWER_STATE_CHANGE_C] = "Audio Power State change Port C",
+ [AUDIO_POWER_STATE_CHANGE_D] = "Audio Power State change Port D",
+
+ [INTEL_GVT_EVENT_RESERVED] = "RESERVED EVENTS!!!",
+};
+
+static inline struct intel_gvt_irq_info *regbase_to_irq_info(
+ struct intel_gvt *gvt,
+ unsigned int reg)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ int i;
+
+ for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+ if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg)
+ return irq->info[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IMR register bit change
+ * behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ u32 changed, masked, unmasked;
+ u32 imr = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IMR %x with val %x\n",
+ reg, imr);
+
+ gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
+
+ /* figure out newly masked/unmasked bits */
+ changed = vgpu_vreg(vgpu, reg) ^ imr;
+ masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+ unmasked = masked ^ changed;
+
+ gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
+ changed, masked, unmasked);
+
+ vgpu_vreg(vgpu, reg) = imr;
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the master IRQ register on gen8+.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ u32 changed, enabled, disabled;
+ u32 ier = *(u32 *)p_data;
+ u32 virtual_ier = vgpu_vreg(vgpu, reg);
+
+ gvt_dbg_irq("write master irq reg %x with val %x\n",
+ reg, ier);
+
+ gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
+
+ /*
+ * GEN8_MASTER_IRQ is a special irq register,
+ * only bit 31 is allowed to be modified
+ * and treated as an IER bit.
+ */
+ ier &= GEN8_MASTER_IRQ_CONTROL;
+ virtual_ier &= GEN8_MASTER_IRQ_CONTROL;
+ vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
+ vgpu_vreg(vgpu, reg) |= ier;
+
+ /* figure out newly enabled/disable bits */
+ changed = virtual_ier ^ ier;
+ enabled = (virtual_ier & changed) ^ changed;
+ disabled = enabled ^ changed;
+
+ gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+ changed, enabled, disabled);
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_ier_handler - Generic IER write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IER register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ struct intel_gvt_irq_info *info;
+ u32 changed, enabled, disabled;
+ u32 ier = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IER %x with val %x\n",
+ reg, ier);
+
+ gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
+
+ /* figure out newly enabled/disable bits */
+ changed = vgpu_vreg(vgpu, reg) ^ ier;
+ enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+ disabled = enabled ^ changed;
+
+ gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+ changed, enabled, disabled);
+ vgpu_vreg(vgpu, reg) = ier;
+
+ info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ if (info->has_upstream_irq)
+ update_upstream_irq(vgpu, info);
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_iir_handler - Generic IIR write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IIR register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
+ iir_to_regbase(reg));
+ u32 iir = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
+
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ vgpu_vreg(vgpu, reg) &= ~iir;
+
+ if (info->has_upstream_irq)
+ update_upstream_irq(vgpu, info);
+ return 0;
+}
+
+static struct intel_gvt_irq_map gen8_irq_map[] = {
+ { INTEL_GVT_IRQ_INFO_MASTER, 0, INTEL_GVT_IRQ_INFO_GT0, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 1, INTEL_GVT_IRQ_INFO_GT0, 0xffff0000 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 2, INTEL_GVT_IRQ_INFO_GT1, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 3, INTEL_GVT_IRQ_INFO_GT1, 0xffff0000 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 4, INTEL_GVT_IRQ_INFO_GT2, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 6, INTEL_GVT_IRQ_INFO_GT3, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 16, INTEL_GVT_IRQ_INFO_DE_PIPE_A, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 17, INTEL_GVT_IRQ_INFO_DE_PIPE_B, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 18, INTEL_GVT_IRQ_INFO_DE_PIPE_C, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 20, INTEL_GVT_IRQ_INFO_DE_PORT, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 22, INTEL_GVT_IRQ_INFO_DE_MISC, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 23, INTEL_GVT_IRQ_INFO_PCH, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 30, INTEL_GVT_IRQ_INFO_PCU, ~0 },
+ { -1, -1, ~0 },
+};
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+ struct intel_gvt_irq_info *info)
+{
+ struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+ struct intel_gvt_irq_map *map = irq->irq_map;
+ struct intel_gvt_irq_info *up_irq_info = NULL;
+ u32 set_bits = 0;
+ u32 clear_bits = 0;
+ int bit;
+ u32 val = vgpu_vreg(vgpu,
+ regbase_to_iir(i915_mmio_reg_offset(info->reg_base)))
+ & vgpu_vreg(vgpu,
+ regbase_to_ier(i915_mmio_reg_offset(info->reg_base)));
+
+ if (!info->has_upstream_irq)
+ return;
+
+ for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+ if (info->group != map->down_irq_group)
+ continue;
+
+ if (!up_irq_info)
+ up_irq_info = irq->info[map->up_irq_group];
+ else
+ WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+
+ bit = map->up_irq_bit;
+
+ if (val & map->down_irq_bitmask)
+ set_bits |= (1 << bit);
+ else
+ clear_bits |= (1 << bit);
+ }
+
+ WARN_ON(!up_irq_info);
+
+ if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
+ u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
+
+ vgpu_vreg(vgpu, isr) &= ~clear_bits;
+ vgpu_vreg(vgpu, isr) |= set_bits;
+ } else {
+ u32 iir = regbase_to_iir(
+ i915_mmio_reg_offset(up_irq_info->reg_base));
+ u32 imr = regbase_to_imr(
+ i915_mmio_reg_offset(up_irq_info->reg_base));
+
+ vgpu_vreg(vgpu, iir) |= (set_bits & ~vgpu_vreg(vgpu, imr));
+ }
+
+ if (up_irq_info->has_upstream_irq)
+ update_upstream_irq(vgpu, up_irq_info);
+}
+
+static void init_irq_map(struct intel_gvt_irq *irq)
+{
+ struct intel_gvt_irq_map *map;
+ struct intel_gvt_irq_info *up_info, *down_info;
+ int up_bit;
+
+ for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+ up_info = irq->info[map->up_irq_group];
+ up_bit = map->up_irq_bit;
+ down_info = irq->info[map->down_irq_group];
+
+ set_bit(up_bit, up_info->downstream_irq_bitmap);
+ down_info->has_upstream_irq = true;
+
+ gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n",
+ up_info->group, up_bit,
+ down_info->group, map->down_irq_bitmask);
+ }
+}
+
+/* =======================vEvent injection===================== */
+static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+{
+ return intel_gvt_hypervisor_inject_msi(vgpu);
+}
+
+static void propagate_event(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_irq_info *info;
+ unsigned int reg_base;
+ int bit;
+
+ info = get_irq_info(irq, event);
+ if (WARN_ON(!info))
+ return;
+
+ reg_base = i915_mmio_reg_offset(info->reg_base);
+ bit = irq->events[event].bit;
+
+ if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
+ regbase_to_imr(reg_base)))) {
+ gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
+ bit, irq_name[event], vgpu->id);
+ set_bit(bit, (void *)&vgpu_vreg(vgpu,
+ regbase_to_iir(reg_base)));
+ }
+}
+
+/* =======================vEvent Handlers===================== */
+static void handle_default_event_virt(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+ if (!vgpu->irq.irq_warn_once[event]) {
+ gvt_dbg_core("vgpu%d: IRQ receive event %d (%s)\n",
+ vgpu->id, event, irq_name[event]);
+ vgpu->irq.irq_warn_once[event] = true;
+ }
+ propagate_event(irq, event, vgpu);
+}
+
+/* =====================GEN specific logic======================= */
+/* GEN8 interrupt routines. */
+
+#define DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(regname, regbase) \
+static struct intel_gvt_irq_info gen8_##regname##_info = { \
+ .name = #regname"-IRQ", \
+ .reg_base = (regbase), \
+ .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \
+ INTEL_GVT_EVENT_RESERVED}, \
+}
+
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt0, GEN8_GT_ISR(0));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt1, GEN8_GT_ISR(1));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt2, GEN8_GT_ISR(2));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt3, GEN8_GT_ISR(3));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_a, GEN8_DE_PIPE_ISR(PIPE_A));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_b, GEN8_DE_PIPE_ISR(PIPE_B));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_c, GEN8_DE_PIPE_ISR(PIPE_C));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_port, GEN8_DE_PORT_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_misc, GEN8_DE_MISC_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(pcu, GEN8_PCU_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(master, GEN8_MASTER_IRQ);
+
+static struct intel_gvt_irq_info gvt_base_pch_info = {
+ .name = "PCH-IRQ",
+ .reg_base = SDEISR,
+ .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] =
+ INTEL_GVT_EVENT_RESERVED},
+};
+
+static void gen8_check_pending_irq(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+ int i;
+
+ if (!(vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) &
+ GEN8_MASTER_IRQ_CONTROL))
+ return;
+
+ for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+ struct intel_gvt_irq_info *info = irq->info[i];
+ u32 reg_base;
+
+ if (!info->has_upstream_irq)
+ continue;
+
+ reg_base = i915_mmio_reg_offset(info->reg_base);
+ if ((vgpu_vreg(vgpu, regbase_to_iir(reg_base))
+ & vgpu_vreg(vgpu, regbase_to_ier(reg_base))))
+ update_upstream_irq(vgpu, info);
+ }
+
+ if (vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ))
+ & ~GEN8_MASTER_IRQ_CONTROL)
+ inject_virtual_interrupt(vgpu);
+}
+
+static void gen8_init_irq(
+ struct intel_gvt_irq *irq)
+{
+ struct intel_gvt *gvt = irq_to_gvt(irq);
+
+#define SET_BIT_INFO(s, b, e, i) \
+ do { \
+ s->events[e].bit = b; \
+ s->events[e].info = s->info[i]; \
+ s->info[i]->bit_to_event[b] = e;\
+ } while (0)
+
+#define SET_IRQ_GROUP(s, g, i) \
+ do { \
+ s->info[g] = i; \
+ (i)->group = g; \
+ set_bit(g, s->irq_info_bitmap); \
+ } while (0)
+
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_MASTER, &gen8_master_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT0, &gen8_gt0_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT1, &gen8_gt1_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT2, &gen8_gt2_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT3, &gen8_gt3_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_A, &gen8_de_pipe_a_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_B, &gen8_de_pipe_b_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_C, &gen8_de_pipe_c_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PORT, &gen8_de_port_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_MISC, &gen8_de_misc_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCU, &gen8_pcu_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCH, &gvt_base_pch_info);
+
+ /* GEN8 level 2 interrupts. */
+
+ /* GEN8 interrupt GT0 events */
+ SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+ SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+ /* GEN8 interrupt GT1 events */
+ SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
+
+ if (HAS_BSD2(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
+ INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
+ INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH,
+ INTEL_GVT_IRQ_INFO_GT1);
+ }
+
+ /* GEN8 interrupt GT3 events */
+ SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3);
+ SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3);
+ SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3);
+
+ SET_BIT_INFO(irq, 0, PIPE_A_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 0, PIPE_B_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 0, PIPE_C_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+
+ /* GEN8 interrupt DE PORT events */
+ SET_BIT_INFO(irq, 0, AUX_CHANNEL_A, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 3, DP_A_HOTPLUG, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+ /* GEN8 interrupt DE MISC events */
+ SET_BIT_INFO(irq, 0, GSE, INTEL_GVT_IRQ_INFO_DE_MISC);
+
+ /* PCH events */
+ SET_BIT_INFO(irq, 17, GMBUS, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 19, CRT_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 21, DP_B_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+
+ if (IS_BROADWELL(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 5, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 5, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ } else if (IS_SKYLAKE(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+ SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ }
+
+ /* GEN8 interrupt PCU events */
+ SET_BIT_INFO(irq, 24, PCU_THERMAL, INTEL_GVT_IRQ_INFO_PCU);
+ SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU);
+}
+
+static struct intel_gvt_irq_ops gen8_irq_ops = {
+ .init_irq = gen8_init_irq,
+ .check_pending_irq = gen8_check_pending_irq,
+};
+
+/**
+ * intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU
+ * @vgpu: a vGPU
+ * @event: interrupt event
+ *
+ * This function is used to trigger a virtual interrupt event for vGPU.
+ * The caller provides the event to be triggered, the framework itself
+ * will emulate the IRQ register bit change.
+ *
+ */
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+ enum intel_gvt_event_type event)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq *irq = &gvt->irq;
+ gvt_event_virt_handler_t handler;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+
+ handler = get_event_virt_handler(irq, event);
+ WARN_ON(!handler);
+
+ handler(irq, event, vgpu);
+
+ ops->check_pending_irq(vgpu);
+}
+
+static void init_events(
+ struct intel_gvt_irq *irq)
+{
+ int i;
+
+ for (i = 0; i < INTEL_GVT_EVENT_MAX; i++) {
+ irq->events[i].info = NULL;
+ irq->events[i].v_handler = handle_default_event_virt;
+ }
+}
+
+static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
+{
+ struct intel_gvt_vblank_timer *vblank_timer;
+ struct intel_gvt_irq *irq;
+ struct intel_gvt *gvt;
+
+ vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
+ irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
+ gvt = container_of(irq, struct intel_gvt, irq);
+
+ intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
+ hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
+ return HRTIMER_RESTART;
+}
+
+/**
+ * intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver unloading stage, to clean up GVT-g IRQ
+ * emulation subsystem.
+ *
+ */
+void intel_gvt_clean_irq(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+
+ hrtimer_cancel(&irq->vblank_timer.timer);
+}
+
+#define VBLNAK_TIMER_PERIOD 16000000
+
+/**
+ * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver loading stage, to initialize the GVT-g IRQ
+ * emulation subsystem.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_irq(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
+
+ gvt_dbg_core("init irq framework\n");
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ irq->ops = &gen8_irq_ops;
+ irq->irq_map = gen8_irq_map;
+ } else {
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ /* common event initialization */
+ init_events(irq);
+
+ /* gen specific initialization */
+ irq->ops->init_irq(irq);
+
+ init_irq_map(irq);
+
+ hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ vblank_timer->timer.function = vblank_timer_fn;
+ vblank_timer->period = VBLNAK_TIMER_PERIOD;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
new file mode 100644
index 000000000000..5313fb1b33e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min he <min.he@intel.com>
+ *
+ */
+
+#ifndef _GVT_INTERRUPT_H_
+#define _GVT_INTERRUPT_H_
+
+enum intel_gvt_event_type {
+ RCS_MI_USER_INTERRUPT = 0,
+ RCS_DEBUG,
+ RCS_MMIO_SYNC_FLUSH,
+ RCS_CMD_STREAMER_ERR,
+ RCS_PIPE_CONTROL,
+ RCS_L3_PARITY_ERR,
+ RCS_WATCHDOG_EXCEEDED,
+ RCS_PAGE_DIRECTORY_FAULT,
+ RCS_AS_CONTEXT_SWITCH,
+ RCS_MONITOR_BUFF_HALF_FULL,
+
+ VCS_MI_USER_INTERRUPT,
+ VCS_MMIO_SYNC_FLUSH,
+ VCS_CMD_STREAMER_ERR,
+ VCS_MI_FLUSH_DW,
+ VCS_WATCHDOG_EXCEEDED,
+ VCS_PAGE_DIRECTORY_FAULT,
+ VCS_AS_CONTEXT_SWITCH,
+
+ VCS2_MI_USER_INTERRUPT,
+ VCS2_MI_FLUSH_DW,
+ VCS2_AS_CONTEXT_SWITCH,
+
+ BCS_MI_USER_INTERRUPT,
+ BCS_MMIO_SYNC_FLUSH,
+ BCS_CMD_STREAMER_ERR,
+ BCS_MI_FLUSH_DW,
+ BCS_PAGE_DIRECTORY_FAULT,
+ BCS_AS_CONTEXT_SWITCH,
+
+ VECS_MI_USER_INTERRUPT,
+ VECS_MI_FLUSH_DW,
+ VECS_AS_CONTEXT_SWITCH,
+
+ PIPE_A_FIFO_UNDERRUN,
+ PIPE_B_FIFO_UNDERRUN,
+ PIPE_A_CRC_ERR,
+ PIPE_B_CRC_ERR,
+ PIPE_A_CRC_DONE,
+ PIPE_B_CRC_DONE,
+ PIPE_A_ODD_FIELD,
+ PIPE_B_ODD_FIELD,
+ PIPE_A_EVEN_FIELD,
+ PIPE_B_EVEN_FIELD,
+ PIPE_A_LINE_COMPARE,
+ PIPE_B_LINE_COMPARE,
+ PIPE_C_LINE_COMPARE,
+ PIPE_A_VBLANK,
+ PIPE_B_VBLANK,
+ PIPE_C_VBLANK,
+ PIPE_A_VSYNC,
+ PIPE_B_VSYNC,
+ PIPE_C_VSYNC,
+ PRIMARY_A_FLIP_DONE,
+ PRIMARY_B_FLIP_DONE,
+ PRIMARY_C_FLIP_DONE,
+ SPRITE_A_FLIP_DONE,
+ SPRITE_B_FLIP_DONE,
+ SPRITE_C_FLIP_DONE,
+
+ PCU_THERMAL,
+ PCU_PCODE2DRIVER_MAILBOX,
+
+ DPST_PHASE_IN,
+ DPST_HISTOGRAM,
+ GSE,
+ DP_A_HOTPLUG,
+ AUX_CHANNEL_A,
+ PERF_COUNTER,
+ POISON,
+ GTT_FAULT,
+ ERROR_INTERRUPT_COMBINED,
+
+ FDI_RX_INTERRUPTS_TRANSCODER_A,
+ AUDIO_CP_CHANGE_TRANSCODER_A,
+ AUDIO_CP_REQUEST_TRANSCODER_A,
+ FDI_RX_INTERRUPTS_TRANSCODER_B,
+ AUDIO_CP_CHANGE_TRANSCODER_B,
+ AUDIO_CP_REQUEST_TRANSCODER_B,
+ FDI_RX_INTERRUPTS_TRANSCODER_C,
+ AUDIO_CP_CHANGE_TRANSCODER_C,
+ AUDIO_CP_REQUEST_TRANSCODER_C,
+ ERR_AND_DBG,
+ GMBUS,
+ SDVO_B_HOTPLUG,
+ CRT_HOTPLUG,
+ DP_B_HOTPLUG,
+ DP_C_HOTPLUG,
+ DP_D_HOTPLUG,
+ AUX_CHANNEL_B,
+ AUX_CHANNEL_C,
+ AUX_CHANNEL_D,
+ AUDIO_POWER_STATE_CHANGE_B,
+ AUDIO_POWER_STATE_CHANGE_C,
+ AUDIO_POWER_STATE_CHANGE_D,
+
+ INTEL_GVT_EVENT_RESERVED,
+ INTEL_GVT_EVENT_MAX,
+};
+
+struct intel_gvt_irq;
+struct intel_gvt;
+
+typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
+
+struct intel_gvt_irq_ops {
+ void (*init_irq)(struct intel_gvt_irq *irq);
+ void (*check_pending_irq)(struct intel_vgpu *vgpu);
+};
+
+/* the list of physical interrupt control register groups */
+enum intel_gvt_irq_type {
+ INTEL_GVT_IRQ_INFO_GT,
+ INTEL_GVT_IRQ_INFO_DPY,
+ INTEL_GVT_IRQ_INFO_PCH,
+ INTEL_GVT_IRQ_INFO_PM,
+
+ INTEL_GVT_IRQ_INFO_MASTER,
+ INTEL_GVT_IRQ_INFO_GT0,
+ INTEL_GVT_IRQ_INFO_GT1,
+ INTEL_GVT_IRQ_INFO_GT2,
+ INTEL_GVT_IRQ_INFO_GT3,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_A,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_B,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_C,
+ INTEL_GVT_IRQ_INFO_DE_PORT,
+ INTEL_GVT_IRQ_INFO_DE_MISC,
+ INTEL_GVT_IRQ_INFO_AUD,
+ INTEL_GVT_IRQ_INFO_PCU,
+
+ INTEL_GVT_IRQ_INFO_MAX,
+};
+
+#define INTEL_GVT_IRQ_BITWIDTH 32
+
+/* device specific interrupt bit definitions */
+struct intel_gvt_irq_info {
+ char *name;
+ i915_reg_t reg_base;
+ enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
+ unsigned long warned;
+ int group;
+ DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
+ bool has_upstream_irq;
+};
+
+/* per-event information */
+struct intel_gvt_event_info {
+ int bit; /* map to register bit */
+ int policy; /* forwarding policy */
+ struct intel_gvt_irq_info *info; /* register info */
+ gvt_event_virt_handler_t v_handler; /* for v_event */
+};
+
+struct intel_gvt_irq_map {
+ int up_irq_group;
+ int up_irq_bit;
+ int down_irq_group;
+ u32 down_irq_bitmask;
+};
+
+struct intel_gvt_vblank_timer {
+ struct hrtimer timer;
+ u64 period;
+};
+
+/* structure containing device specific IRQ state */
+struct intel_gvt_irq {
+ struct intel_gvt_irq_ops *ops;
+ struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
+ DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
+ struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
+ DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+ struct intel_gvt_irq_map *irq_map;
+ struct intel_gvt_vblank_timer vblank_timer;
+};
+
+int intel_gvt_init_irq(struct intel_gvt *gvt);
+void intel_gvt_clean_irq(struct intel_gvt *gvt);
+
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+ enum intel_gvt_event_type event);
+
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+
+int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
+int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
+int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
+
+#endif /* _GVT_INTERRUPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
new file mode 100644
index 000000000000..dc0365033157
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -0,0 +1,597 @@
+/*
+ * KVMGT - the implementation of Intel mediated pass-through framework for KVM
+ *
+ * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Jike Song <jike.song@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/eventfd.h>
+#include <linux/uuid.h>
+#include <linux/kvm_host.h>
+#include <linux/vfio.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static inline long kvmgt_pin_pages(struct device *dev, unsigned long *user_pfn,
+ long npage, int prot, unsigned long *phys_pfn)
+{
+ return 0;
+}
+static inline long kvmgt_unpin_pages(struct device *dev, unsigned long *pfn,
+ long npage)
+{
+ return 0;
+}
+
+static const struct intel_gvt_ops *intel_gvt_ops;
+
+
+/* helper macros copied from vfio-pci */
+#define VFIO_PCI_OFFSET_SHIFT 40
+#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+
+struct vfio_region {
+ u32 type;
+ u32 subtype;
+ size_t size;
+ u32 flags;
+};
+
+struct kvmgt_pgfn {
+ gfn_t gfn;
+ struct hlist_node hnode;
+};
+
+struct kvmgt_guest_info {
+ struct kvm *kvm;
+ struct intel_vgpu *vgpu;
+ struct kvm_page_track_notifier_node track_node;
+#define NR_BKT (1 << 18)
+ struct hlist_head ptable[NR_BKT];
+#undef NR_BKT
+};
+
+struct gvt_dma {
+ struct rb_node node;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+};
+
+static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct rb_node *node = vgpu->vdev.cache.rb_node;
+ struct gvt_dma *ret = NULL;
+
+ while (node) {
+ struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
+
+ if (gfn < itr->gfn)
+ node = node->rb_left;
+ else if (gfn > itr->gfn)
+ node = node->rb_right;
+ else {
+ ret = itr;
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct gvt_dma *entry;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find(vgpu, gfn);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+
+ return entry == NULL ? 0 : entry->pfn;
+}
+
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
+{
+ struct gvt_dma *new, *itr;
+ struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
+
+ new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
+ if (!new)
+ return;
+
+ new->gfn = gfn;
+ new->pfn = pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while (*link) {
+ parent = *link;
+ itr = rb_entry(parent, struct gvt_dma, node);
+
+ if (gfn == itr->gfn)
+ goto out;
+ else if (gfn < itr->gfn)
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &vgpu->vdev.cache);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+
+out:
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ kfree(new);
+}
+
+static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
+ struct gvt_dma *entry)
+{
+ rb_erase(&entry->node, &vgpu->vdev.cache);
+ kfree(entry);
+}
+
+static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct device *dev = vgpu->vdev.mdev;
+ struct gvt_dma *this;
+ unsigned long pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ this = __gvt_cache_find(vgpu, gfn);
+ if (!this) {
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+ }
+
+ pfn = this->pfn;
+ WARN_ON((kvmgt_unpin_pages(dev, &pfn, 1) != 1));
+ __gvt_cache_remove_entry(vgpu, this);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_init(struct intel_vgpu *vgpu)
+{
+ vgpu->vdev.cache = RB_ROOT;
+ mutex_init(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_destroy(struct intel_vgpu *vgpu)
+{
+ struct gvt_dma *dma;
+ struct rb_node *node = NULL;
+ struct device *dev = vgpu->vdev.mdev;
+ unsigned long pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while ((node = rb_first(&vgpu->vdev.cache))) {
+ dma = rb_entry(node, struct gvt_dma, node);
+ pfn = dma->pfn;
+
+ kvmgt_unpin_pages(dev, &pfn, 1);
+ __gvt_cache_remove_entry(vgpu, dma);
+ }
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
+ const char *name)
+{
+ int i;
+ struct intel_vgpu_type *t;
+ const char *driver_name = dev_driver_string(
+ &gvt->dev_priv->drm.pdev->dev);
+
+ for (i = 0; i < gvt->num_types; i++) {
+ t = &gvt->types[i];
+ if (!strncmp(t->name, name + strlen(driver_name) + 1,
+ sizeof(t->name)))
+ return t;
+ }
+
+ return NULL;
+}
+
+static struct attribute *type_attrs[] = {
+ NULL,
+};
+
+static struct attribute_group *intel_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (WARN_ON(!group))
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = type_attrs;
+ intel_vgpu_type_groups[i] = group;
+ }
+
+ return true;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = intel_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return false;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = intel_vgpu_type_groups[i];
+ kfree(group);
+ }
+}
+
+static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
+{
+ hash_init(info->ptable);
+}
+
+static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
+{
+ struct kvmgt_pgfn *p;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static struct kvmgt_pgfn *
+__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p, *res = NULL;
+
+ hash_for_each_possible(info->ptable, p, hnode, gfn) {
+ if (gfn == p->gfn) {
+ res = p;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ return !!p;
+}
+
+static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ return;
+
+ p = kmalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
+ if (WARN(!p, "gfn: 0x%llx\n", gfn))
+ return;
+
+ p->gfn = gfn;
+ hash_add(info->ptable, &p->hnode, gfn);
+}
+
+static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ if (p) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
+{
+ if (!intel_gvt_init_vgpu_type_groups(gvt))
+ return -EFAULT;
+
+ intel_gvt_ops = ops;
+
+ /* MDEV is not yet available */
+ return -ENODEV;
+}
+
+static void kvmgt_host_exit(struct device *dev, void *gvt)
+{
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+}
+
+static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvm *kvm = info->kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_add(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvm *kvm = info->kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (!kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node)
+{
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
+ intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
+ (void *)val, len);
+}
+
+static void kvmgt_page_track_flush_slot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ int i;
+ gfn_t gfn;
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i < slot->npages; i++) {
+ gfn = slot->base_gfn + i;
+ if (kvmgt_gfn_is_write_protected(info, gfn)) {
+ kvm_slot_page_track_remove_page(kvm, slot, gfn,
+ KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+}
+
+static bool kvmgt_check_guest(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ char s[12];
+ unsigned int *i;
+
+ eax = KVM_CPUID_SIGNATURE;
+ ebx = ecx = edx = 0;
+
+ asm volatile ("cpuid"
+ : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ :
+ : "cc", "memory");
+ i = (unsigned int *)s;
+ i[0] = ebx;
+ i[1] = ecx;
+ i[2] = edx;
+
+ return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
+}
+
+/**
+ * NOTE:
+ * It's actually impossible to check if we are running in KVM host,
+ * since the "KVM host" is simply native. So we only dectect guest here.
+ */
+static int kvmgt_detect_host(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
+ return -ENODEV;
+ }
+#endif
+ return kvmgt_check_guest() ? -ENODEV : 0;
+}
+
+static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+{
+ /* nothing to do here */
+ return 0;
+}
+
+static void kvmgt_detach_vgpu(unsigned long handle)
+{
+ /* nothing to do here */
+}
+
+static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct intel_vgpu *vgpu = info->vgpu;
+
+ if (vgpu->vdev.msi_trigger)
+ return eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1;
+
+ return false;
+}
+
+static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
+{
+ unsigned long pfn;
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ int rc;
+
+ pfn = gvt_cache_find(info->vgpu, gfn);
+ if (pfn != 0)
+ return pfn;
+
+ rc = kvmgt_pin_pages(info->vgpu->vdev.mdev, &gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (rc != 1) {
+ gvt_err("vfio_pin_pages failed for gfn: 0x%lx\n", gfn);
+ return 0;
+ }
+
+ gvt_cache_add(info->vgpu, gfn, pfn);
+ return pfn;
+}
+
+static void *kvmgt_gpa_to_hva(unsigned long handle, unsigned long gpa)
+{
+ unsigned long pfn;
+ gfn_t gfn = gpa_to_gfn(gpa);
+
+ pfn = kvmgt_gfn_to_pfn(handle, gfn);
+ if (!pfn)
+ return NULL;
+
+ return (char *)pfn_to_kaddr(pfn) + offset_in_page(gpa);
+}
+
+static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len, bool write)
+{
+ void *hva = NULL;
+
+ hva = kvmgt_gpa_to_hva(handle, gpa);
+ if (!hva)
+ return -EFAULT;
+
+ if (write)
+ memcpy(hva, buf, len);
+ else
+ memcpy(buf, hva, len);
+
+ return 0;
+}
+
+static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, false);
+}
+
+static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, true);
+}
+
+static unsigned long kvmgt_virt_to_pfn(void *addr)
+{
+ return PFN_DOWN(__pa(addr));
+}
+
+struct intel_gvt_mpt kvmgt_mpt = {
+ .detect_host = kvmgt_detect_host,
+ .host_init = kvmgt_host_init,
+ .host_exit = kvmgt_host_exit,
+ .attach_vgpu = kvmgt_attach_vgpu,
+ .detach_vgpu = kvmgt_detach_vgpu,
+ .inject_msi = kvmgt_inject_msi,
+ .from_virt_to_mfn = kvmgt_virt_to_pfn,
+ .set_wp_page = kvmgt_write_protect_add,
+ .unset_wp_page = kvmgt_write_protect_remove,
+ .read_gpa = kvmgt_read_gpa,
+ .write_gpa = kvmgt_write_gpa,
+ .gfn_to_mfn = kvmgt_gfn_to_pfn,
+};
+EXPORT_SYMBOL_GPL(kvmgt_mpt);
+
+static int __init kvmgt_init(void)
+{
+ return 0;
+}
+
+static void __exit kvmgt_exit(void)
+{
+}
+
+module_init(kvmgt_init);
+module_exit(kvmgt_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
new file mode 100644
index 000000000000..09c9450a1946
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Tina Zhang <tina.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/**
+ * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
+{
+ u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
+ ~GENMASK(3, 0);
+ return gpa - gttmmio_gpa;
+}
+
+#define reg_is_mmio(gvt, reg) \
+ (reg >= 0 && reg < gvt->device_info.mmio_size)
+
+#define reg_is_gtt(gvt, reg) \
+ (reg >= gvt->device_info.gtt_start_offset \
+ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+
+/**
+ * intel_vgpu_emulate_mmio_read - emulate MMIO read
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_mmio_info *mmio;
+ unsigned int offset = 0;
+ int ret = -EINVAL;
+
+ mutex_lock(&gvt->lock);
+
+ if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
+ p_data, bytes);
+ if (ret) {
+ gvt_err("vgpu%d: guest page read error %d, "
+ "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+ vgpu->id, ret,
+ gp->gfn, pa, *(u32 *)p_data, bytes);
+ }
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+ }
+
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+ if (WARN_ON(bytes > 8))
+ goto err;
+
+ if (reg_is_gtt(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ goto err;
+ if (WARN_ON(bytes != 4 && bytes != 8))
+ goto err;
+ if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ goto err;
+
+ ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
+ p_data, bytes);
+ if (ret)
+ goto err;
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+ goto err;
+
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+ if (!mmio && !vgpu->mmio.disable_warn_untrack) {
+ gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (offset == 0x206c) {
+ gvt_err("------------------------------------------\n");
+ gvt_err("vgpu%d: likely triggers a gfx reset\n",
+ vgpu->id);
+ gvt_err("------------------------------------------\n");
+ vgpu->mmio.disable_warn_untrack = true;
+ }
+ }
+
+ if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ goto err;
+ }
+
+ if (mmio) {
+ if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+ if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+ goto err;
+ if (WARN_ON(mmio->offset != offset))
+ goto err;
+ }
+ ret = mmio->read(vgpu, offset, p_data, bytes);
+ } else
+ ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+
+ if (ret)
+ goto err;
+
+ intel_gvt_mmio_set_accessed(gvt, offset);
+ mutex_unlock(&gvt->lock);
+ return 0;
+err:
+ gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
+ vgpu->id, offset, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
+
+/**
+ * intel_vgpu_emulate_mmio_write - emulate MMIO write
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_mmio_info *mmio;
+ unsigned int offset = 0;
+ u32 old_vreg = 0, old_sreg = 0;
+ int ret = -EINVAL;
+
+ mutex_lock(&gvt->lock);
+
+ if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ ret = gp->handler(gp, pa, p_data, bytes);
+ if (ret) {
+ gvt_err("vgpu%d: guest page write error %d, "
+ "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+ vgpu->id, ret,
+ gp->gfn, pa, *(u32 *)p_data, bytes);
+ }
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+ }
+
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+ if (WARN_ON(bytes > 8))
+ goto err;
+
+ if (reg_is_gtt(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ goto err;
+ if (WARN_ON(bytes != 4 && bytes != 8))
+ goto err;
+ if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ goto err;
+
+ ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
+ p_data, bytes);
+ if (ret)
+ goto err;
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+ if (!mmio && !vgpu->mmio.disable_warn_untrack)
+ gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ goto err;
+ }
+
+ if (mmio) {
+ u64 ro_mask = mmio->ro_mask;
+
+ if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+ if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+ goto err;
+ if (WARN_ON(mmio->offset != offset))
+ goto err;
+ }
+
+ if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+ old_vreg = vgpu_vreg(vgpu, offset);
+ old_sreg = vgpu_sreg(vgpu, offset);
+ }
+
+ if (!ro_mask) {
+ ret = mmio->write(vgpu, offset, p_data, bytes);
+ } else {
+ /* Protect RO bits like HW */
+ u64 data = 0;
+
+ /* all register bits are RO. */
+ if (ro_mask == ~(u64)0) {
+ gvt_err("vgpu%d: try to write RO reg %x\n",
+ vgpu->id, offset);
+ ret = 0;
+ goto out;
+ }
+ /* keep the RO bits in the virtual register */
+ memcpy(&data, p_data, bytes);
+ data &= ~mmio->ro_mask;
+ data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
+ ret = mmio->write(vgpu, offset, &data, bytes);
+ }
+
+ /* higher 16bits of mode ctl regs are mask bits for change */
+ if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+ u32 mask = vgpu_vreg(vgpu, offset) >> 16;
+
+ vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
+ | (vgpu_vreg(vgpu, offset) & mask);
+ vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
+ | (vgpu_sreg(vgpu, offset) & mask);
+ }
+ } else
+ ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ if (ret)
+ goto err;
+out:
+ intel_gvt_mmio_set_accessed(gvt, offset);
+ mutex_unlock(&gvt->lock);
+ return 0;
+err:
+ gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
+ vgpu->id, offset, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
new file mode 100644
index 000000000000..87d5b5e366a3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Tina Zhang <tina.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_MMIO_H_
+#define _GVT_MMIO_H_
+
+struct intel_gvt;
+struct intel_vgpu;
+
+#define D_SNB (1 << 0)
+#define D_IVB (1 << 1)
+#define D_HSW (1 << 2)
+#define D_BDW (1 << 3)
+#define D_SKL (1 << 4)
+
+#define D_GEN9PLUS (D_SKL)
+#define D_GEN8PLUS (D_BDW | D_SKL)
+#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
+#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_SKL_PLUS (D_SKL)
+#define D_BDW_PLUS (D_BDW | D_SKL)
+#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
+#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
+#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
+#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
+
+struct intel_gvt_mmio_info {
+ u32 offset;
+ u32 size;
+ u32 length;
+ u32 addr_mask;
+ u64 ro_mask;
+ u32 device;
+ int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+ int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+ u32 addr_range;
+ struct hlist_node node;
+};
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
+bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
+
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
+
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset);
+#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
+ typeof(reg) __reg = reg; \
+ u32 *offset = (u32 *)&__reg; \
+ *offset; \
+})
+
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
+
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+ unsigned int offset);
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+ unsigned int offset);
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 03601e3ffa7c..1af5830c0a56 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Dexuan Cui
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_MPT_H_
@@ -46,4 +55,254 @@ static inline int intel_gvt_hypervisor_detect_host(void)
return intel_gvt_host.mpt->detect_host();
}
+/**
+ * intel_gvt_hypervisor_host_init - init GVT-g host side
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+static inline int intel_gvt_hypervisor_host_init(struct device *dev,
+ void *gvt, const void *ops)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_init)
+ return 0;
+
+ return intel_gvt_host.mpt->host_init(dev, gvt, ops);
+}
+
+/**
+ * intel_gvt_hypervisor_host_exit - exit GVT-g host side
+ */
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
+ void *gvt)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_exit)
+ return;
+
+ intel_gvt_host.mpt->host_exit(dev, gvt);
+}
+
+/**
+ * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->attach_vgpu)
+ return 0;
+
+ return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
+}
+
+/**
+ * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->detach_vgpu)
+ return;
+
+ intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
+}
+
+#define MSI_CAP_CONTROL(offset) (offset + 2)
+#define MSI_CAP_ADDRESS(offset) (offset + 4)
+#define MSI_CAP_DATA(offset) (offset + 8)
+#define MSI_CAP_EN 0x1
+
+/**
+ * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
+{
+ unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
+ u16 control, data;
+ u32 addr;
+ int ret;
+
+ control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
+ addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
+ data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
+
+ /* Do not generate MSI if MSIEN is disable */
+ if (!(control & MSI_CAP_EN))
+ return 0;
+
+ if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
+ return -EINVAL;
+
+ gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
+ data);
+
+ ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
+ * @p: host kernel virtual address
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
+{
+ return intel_gvt_host.mpt->from_virt_to_mfn(p);
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ int ret;
+
+ if (p->writeprotection)
+ return 0;
+
+ ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
+ if (ret)
+ return ret;
+ p->writeprotection = true;
+ atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
+ * guest page
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ int ret;
+
+ if (!p->writeprotection)
+ return 0;
+
+ ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
+ if (ret)
+ return ret;
+ p->writeprotection = false;
+ atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
+ * @vgpu: a vGPU
+ * @gpfn: guest pfn
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
+}
+
+/**
+ * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
+ * @vgpu: a vGPU
+ * @gfn: guest PFN
+ * @mfn: host PFN
+ * @nr: amount of PFNs
+ * @map: map or unmap
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
+ struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long mfn, unsigned int nr,
+ bool map)
+{
+ /* a MPT implementation could have MMIO mapped elsewhere */
+ if (!intel_gvt_host.mpt->map_gfn_to_mfn)
+ return 0;
+
+ return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
+ map);
+}
+
+/**
+ * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
+ * @vgpu: a vGPU
+ * @start: the beginning of the guest physical address region
+ * @end: the end of the guest physical address region
+ * @map: map or unmap
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_trap_area(
+ struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
+{
+ /* a MPT implementation could have MMIO trapped elsewhere */
+ if (!intel_gvt_host.mpt->set_trap_area)
+ return 0;
+
+ return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
new file mode 100644
index 000000000000..d2a0fbc896c3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/acpi.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+ void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
+ u8 *buf;
+ int i;
+
+ if (WARN((vgpu_opregion(vgpu)->va),
+ "vgpu%d: opregion has been initialized already.\n",
+ vgpu->id))
+ return -EINVAL;
+
+ vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
+ GFP_DMA32 | __GFP_ZERO,
+ INTEL_GVT_OPREGION_PORDER);
+
+ if (!vgpu_opregion(vgpu)->va)
+ return -ENOMEM;
+
+ memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
+ INTEL_GVT_OPREGION_SIZE);
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+
+ /* for unknown reason, the value in LID field is incorrect
+ * which block the windows guest, so workaround it by force
+ * setting it to "OPEN"
+ */
+ buf = (u8 *)vgpu_opregion(vgpu)->va;
+ buf[INTEL_GVT_OPREGION_CLID] = 0x3;
+
+ return 0;
+}
+
+static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
+{
+ u64 mfn;
+ int i, ret;
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
+ mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+ + i * PAGE_SIZE);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to get MFN from VA\n");
+ return -EINVAL;
+ }
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
+ vgpu_opregion(vgpu)->gfn[i],
+ mfn, 1, map);
+ if (ret) {
+ gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
+{
+ gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
+
+ if (!vgpu_opregion(vgpu)->va)
+ return;
+
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ map_vgpu_opregion(vgpu, false);
+ free_pages((unsigned long)vgpu_opregion(vgpu)->va,
+ INTEL_GVT_OPREGION_PORDER);
+
+ vgpu_opregion(vgpu)->va = NULL;
+ }
+}
+
+/**
+ * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+ int ret;
+
+ gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
+
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ gvt_dbg_core("emulate opregion from kernel\n");
+
+ ret = init_vgpu_opregion(vgpu, gpa);
+ if (ret)
+ return ret;
+
+ ret = map_vgpu_opregion(vgpu, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_gvt_clean_opregion - clean host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ */
+void intel_gvt_clean_opregion(struct intel_gvt *gvt)
+{
+ memunmap(gvt->opregion.opregion_va);
+ gvt->opregion.opregion_va = NULL;
+}
+
+/**
+ * intel_gvt_init_opregion - initialize host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_opregion(struct intel_gvt *gvt)
+{
+ gvt_dbg_core("init host opregion\n");
+
+ pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
+ &gvt->opregion.opregion_pa);
+
+ gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
+ INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
+ if (!gvt->opregion.opregion_va) {
+ gvt_err("fail to map host opregion\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#define GVT_OPREGION_FUNC(scic) \
+ ({ \
+ u32 __ret; \
+ __ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
+ OPREGION_SCIC_FUNC_SHIFT; \
+ __ret; \
+ })
+
+#define GVT_OPREGION_SUBFUNC(scic) \
+ ({ \
+ u32 __ret; \
+ __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
+ OPREGION_SCIC_SUBFUNC_SHIFT; \
+ __ret; \
+ })
+
+static const char *opregion_func_name(u32 func)
+{
+ const char *name = NULL;
+
+ switch (func) {
+ case 0 ... 3:
+ case 5:
+ case 7 ... 15:
+ name = "Reserved";
+ break;
+
+ case 4:
+ name = "Get BIOS Data";
+ break;
+
+ case 6:
+ name = "System BIOS Callbacks";
+ break;
+
+ default:
+ name = "Unknown";
+ break;
+ }
+ return name;
+}
+
+static const char *opregion_subfunc_name(u32 subfunc)
+{
+ const char *name = NULL;
+
+ switch (subfunc) {
+ case 0:
+ name = "Supported Calls";
+ break;
+
+ case 1:
+ name = "Requested Callbacks";
+ break;
+
+ case 2 ... 3:
+ case 8 ... 9:
+ name = "Reserved";
+ break;
+
+ case 5:
+ name = "Boot Display";
+ break;
+
+ case 6:
+ name = "TV-Standard/Video-Connector";
+ break;
+
+ case 7:
+ name = "Internal Graphics";
+ break;
+
+ case 10:
+ name = "Spread Spectrum Clocks";
+ break;
+
+ case 11:
+ name = "Get AKSV";
+ break;
+
+ default:
+ name = "Unknown";
+ break;
+ }
+ return name;
+};
+
+static bool querying_capabilities(u32 scic)
+{
+ u32 func, subfunc;
+
+ func = GVT_OPREGION_FUNC(scic);
+ subfunc = GVT_OPREGION_SUBFUNC(scic);
+
+ if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
+ || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
+ || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
+ return true;
+ }
+ return false;
+}
+
+/**
+ * intel_vgpu_emulate_opregion_request - emulating OpRegion request
+ * @vgpu: a vGPU
+ * @swsci: SWSCI request
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
+{
+ u32 *scic, *parm;
+ u32 func, subfunc;
+
+ scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
+ parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+
+ if (!(swsci & SWSCI_SCI_SELECT)) {
+ gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
+ return 0;
+ }
+ /* ignore non 0->1 trasitions */
+ if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
+ & SWSCI_SCI_TRIGGER) ||
+ !(swsci & SWSCI_SCI_TRIGGER)) {
+ return 0;
+ }
+
+ func = GVT_OPREGION_FUNC(*scic);
+ subfunc = GVT_OPREGION_SUBFUNC(*scic);
+ if (!querying_capabilities(*scic)) {
+ gvt_err("vgpu%d: requesting runtime service: func \"%s\","
+ " subfunc \"%s\"\n",
+ vgpu->id,
+ opregion_func_name(func),
+ opregion_subfunc_name(subfunc));
+ /*
+ * emulate exit status of function call, '0' means
+ * "failure, generic, unsupported or unknown cause"
+ */
+ *scic &= ~OPREGION_SCIC_EXIT_MASK;
+ return 0;
+ }
+
+ *scic = 0;
+ *parm = 0;
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
new file mode 100644
index 000000000000..0dfe789d8f02
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_REG_H
+#define _GVT_REG_H
+
+#define INTEL_GVT_PCI_CLASS_VGA_OTHER 0x80
+
+#define INTEL_GVT_PCI_GMCH_CONTROL 0x50
+#define BDW_GMCH_GMS_SHIFT 8
+#define BDW_GMCH_GMS_MASK 0xff
+
+#define INTEL_GVT_PCI_SWSCI 0xe8
+#define SWSCI_SCI_SELECT (1 << 15)
+#define SWSCI_SCI_TRIGGER 1
+
+#define INTEL_GVT_PCI_OPREGION 0xfc
+
+#define INTEL_GVT_OPREGION_CLID 0x1AC
+#define INTEL_GVT_OPREGION_SCIC 0x200
+#define OPREGION_SCIC_FUNC_MASK 0x1E
+#define OPREGION_SCIC_FUNC_SHIFT 1
+#define OPREGION_SCIC_SUBFUNC_MASK 0xFF00
+#define OPREGION_SCIC_SUBFUNC_SHIFT 8
+#define OPREGION_SCIC_EXIT_MASK 0xE0
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA 4
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS 6
+#define INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS 0
+#define INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS 1
+#define INTEL_GVT_OPREGION_PARM 0x204
+
+#define INTEL_GVT_OPREGION_PAGES 2
+#define INTEL_GVT_OPREGION_PORDER 1
+#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
+
+#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
+
+#define _REG_VECS_EXCC 0x1A028
+#define _REG_VCS2_EXCC 0x1c028
+
+#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+
+#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
+ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
+
+#define FORCEWAKE_RENDER_GEN9_REG 0xa278
+#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
+#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
+#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044
+#define FORCEWAKE_MEDIA_GEN9_REG 0xa270
+#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
+#define FORCEWAKE_ACK_HSW_REG 0x130044
+
+#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
+#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
+#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
+#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
new file mode 100644
index 000000000000..44136b1f3aab
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+struct render_mmio {
+ int ring_id;
+ i915_reg_t reg;
+ u32 mask;
+ bool in_context;
+ u32 value;
+};
+
+static struct render_mmio gen8_render_mmio_list[] = {
+ {RCS, _MMIO(0x229c), 0xffff, false},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x2098), 0x0, false},
+ {RCS, _MMIO(0x20c0), 0xffff, true},
+ {RCS, _MMIO(0x24d0), 0, false},
+ {RCS, _MMIO(0x24d4), 0, false},
+ {RCS, _MMIO(0x24d8), 0, false},
+ {RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x7004), 0xffff, true},
+ {RCS, _MMIO(0x7008), 0xffff, true},
+ {RCS, _MMIO(0x7000), 0xffff, true},
+ {RCS, _MMIO(0x7010), 0xffff, true},
+ {RCS, _MMIO(0x7300), 0xffff, true},
+ {RCS, _MMIO(0x83a4), 0xffff, true},
+
+ {BCS, _MMIO(0x2229c), 0xffff, false},
+ {BCS, _MMIO(0x2209c), 0xffff, false},
+ {BCS, _MMIO(0x220c0), 0xffff, false},
+ {BCS, _MMIO(0x22098), 0x0, false},
+ {BCS, _MMIO(0x22028), 0x0, false},
+};
+
+static struct render_mmio gen9_render_mmio_list[] = {
+ {RCS, _MMIO(0x229c), 0xffff, false},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x2098), 0x0, false},
+ {RCS, _MMIO(0x20c0), 0xffff, true},
+ {RCS, _MMIO(0x24d0), 0, false},
+ {RCS, _MMIO(0x24d4), 0, false},
+ {RCS, _MMIO(0x24d8), 0, false},
+ {RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x7004), 0xffff, true},
+ {RCS, _MMIO(0x7008), 0xffff, true},
+ {RCS, _MMIO(0x7000), 0xffff, true},
+ {RCS, _MMIO(0x7010), 0xffff, true},
+ {RCS, _MMIO(0x7300), 0xffff, true},
+ {RCS, _MMIO(0x83a4), 0xffff, true},
+
+ {RCS, _MMIO(0x40e0), 0, false},
+ {RCS, _MMIO(0x40e4), 0, false},
+ {RCS, _MMIO(0x2580), 0xffff, true},
+ {RCS, _MMIO(0x7014), 0xffff, true},
+ {RCS, _MMIO(0x20ec), 0xffff, false},
+ {RCS, _MMIO(0xb118), 0, false},
+ {RCS, _MMIO(0xe100), 0xffff, true},
+ {RCS, _MMIO(0xe180), 0xffff, true},
+ {RCS, _MMIO(0xe184), 0xffff, true},
+ {RCS, _MMIO(0xe188), 0xffff, true},
+ {RCS, _MMIO(0xe194), 0xffff, true},
+ {RCS, _MMIO(0x4de0), 0, false},
+ {RCS, _MMIO(0x4de4), 0, false},
+ {RCS, _MMIO(0x4de8), 0, false},
+ {RCS, _MMIO(0x4dec), 0, false},
+ {RCS, _MMIO(0x4df0), 0, false},
+ {RCS, _MMIO(0x4df4), 0, false},
+
+ {BCS, _MMIO(0x2229c), 0xffff, false},
+ {BCS, _MMIO(0x2209c), 0xffff, false},
+ {BCS, _MMIO(0x220c0), 0xffff, false},
+ {BCS, _MMIO(0x22098), 0x0, false},
+ {BCS, _MMIO(0x22028), 0x0, false},
+
+ {VCS2, _MMIO(0x1c028), 0xffff, false},
+
+ {VECS, _MMIO(0x1a028), 0xffff, false},
+};
+
+static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
+static u32 gen9_render_mocs_L3[32];
+
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum forcewake_domains fw;
+ i915_reg_t reg;
+ u32 regs[] = {
+ [RCS] = 0x4260,
+ [VCS] = 0x4264,
+ [VCS2] = 0x4268,
+ [BCS] = 0x426c,
+ [VECS] = 0x4270,
+ };
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
+ return;
+
+ reg = _MMIO(regs[ring_id]);
+
+ /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
+ * we need to put a forcewake when invalidating RCS TLB caches,
+ * otherwise device can go to RC6 state and interrupt invalidation
+ * process
+ */
+ fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ | FW_REG_WRITE);
+ if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+ fw |= FORCEWAKE_RENDER;
+
+ intel_uncore_forcewake_get(dev_priv, fw);
+
+ I915_WRITE_FW(reg, 0x1);
+
+ if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ else
+ vgpu_vreg(vgpu, regs[ring_id]) = 0;
+
+ intel_uncore_forcewake_put(dev_priv, fw);
+
+ gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+}
+
+static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!IS_SKYLAKE(dev_priv))
+ return;
+
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ gen9_render_mocs[ring_id][i] = I915_READ(offset);
+ I915_WRITE(offset, vgpu_vreg(vgpu, offset));
+ POSTING_READ(offset);
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ gen9_render_mocs_L3[i] = I915_READ(l3_offset);
+ I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
+ POSTING_READ(l3_offset);
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!IS_SKYLAKE(dev_priv))
+ return;
+
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ vgpu_vreg(vgpu, offset) = I915_READ(offset);
+ I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
+ POSTING_READ(offset);
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
+ I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
+ POSTING_READ(l3_offset);
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct render_mmio *mmio;
+ u32 v;
+ int i, array_size;
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ mmio = gen9_render_mmio_list;
+ array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ load_mocs(vgpu, ring_id);
+ } else {
+ mmio = gen8_render_mmio_list;
+ array_size = ARRAY_SIZE(gen8_render_mmio_list);
+ }
+
+ for (i = 0; i < array_size; i++, mmio++) {
+ if (mmio->ring_id != ring_id)
+ continue;
+
+ mmio->value = I915_READ(mmio->reg);
+ if (mmio->mask)
+ v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
+ else
+ v = vgpu_vreg(vgpu, mmio->reg);
+
+ I915_WRITE(mmio->reg, v);
+ POSTING_READ(mmio->reg);
+
+ gvt_dbg_render("load reg %x old %x new %x\n",
+ i915_mmio_reg_offset(mmio->reg),
+ mmio->value, v);
+ }
+ handle_tlb_pending_event(vgpu, ring_id);
+}
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct render_mmio *mmio;
+ u32 v;
+ int i, array_size;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ mmio = gen9_render_mmio_list;
+ array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ restore_mocs(vgpu, ring_id);
+ } else {
+ mmio = gen8_render_mmio_list;
+ array_size = ARRAY_SIZE(gen8_render_mmio_list);
+ }
+
+ for (i = 0; i < array_size; i++, mmio++) {
+ if (mmio->ring_id != ring_id)
+ continue;
+
+ vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
+
+ if (mmio->mask) {
+ vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
+ v = mmio->value | (mmio->mask << 16);
+ } else
+ v = mmio->value;
+
+ I915_WRITE(mmio->reg, v);
+ POSTING_READ(mmio->reg);
+
+ gvt_dbg_render("restore reg %x old %x new %x\n",
+ i915_mmio_reg_offset(mmio->reg),
+ mmio->value, v);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h
new file mode 100644
index 000000000000..dac1a3cc458b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef __GVT_RENDER_H__
+#define __GVT_RENDER_H__
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
new file mode 100644
index 000000000000..678b0be85376
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Anhua Xu
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
+{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ if (!list_empty(workload_q_head(vgpu, i)))
+ return true;
+ }
+
+ return false;
+}
+
+static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ /* no target to schedule */
+ if (!scheduler->next_vgpu)
+ return;
+
+ gvt_dbg_sched("try to schedule next vgpu %d\n",
+ scheduler->next_vgpu->id);
+
+ /*
+ * after the flag is set, workload dispatch thread will
+ * stop dispatching workload for current vgpu
+ */
+ scheduler->need_reschedule = true;
+
+ /* still have uncompleted workload? */
+ for_each_engine(engine, gvt->dev_priv, i) {
+ if (scheduler->current_workload[i]) {
+ gvt_dbg_sched("still have running workload\n");
+ return;
+ }
+ }
+
+ gvt_dbg_sched("switch to next vgpu %d\n",
+ scheduler->next_vgpu->id);
+
+ /* switch current vgpu */
+ scheduler->current_vgpu = scheduler->next_vgpu;
+ scheduler->next_vgpu = NULL;
+
+ scheduler->need_reschedule = false;
+
+ /* wake up workload dispatch thread */
+ for_each_engine(engine, gvt->dev_priv, i)
+ wake_up(&scheduler->waitq[i]);
+}
+
+struct tbs_vgpu_data {
+ struct list_head list;
+ struct intel_vgpu *vgpu;
+ /* put some per-vgpu sched stats here */
+};
+
+struct tbs_sched_data {
+ struct intel_gvt *gvt;
+ struct delayed_work work;
+ unsigned long period;
+ struct list_head runq_head;
+};
+
+#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+
+static void tbs_sched_func(struct work_struct *work)
+{
+ struct tbs_sched_data *sched_data = container_of(work,
+ struct tbs_sched_data, work.work);
+ struct tbs_vgpu_data *vgpu_data;
+
+ struct intel_gvt *gvt = sched_data->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ struct intel_vgpu *vgpu = NULL;
+ struct list_head *pos, *head;
+
+ mutex_lock(&gvt->lock);
+
+ /* no vgpu or has already had a target */
+ if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+ goto out;
+
+ if (scheduler->current_vgpu) {
+ vgpu_data = scheduler->current_vgpu->sched_data;
+ head = &vgpu_data->list;
+ } else {
+ gvt_dbg_sched("no current vgpu search from q head\n");
+ head = &sched_data->runq_head;
+ }
+
+ /* search a vgpu with pending workload */
+ list_for_each(pos, head) {
+ if (pos == &sched_data->runq_head)
+ continue;
+
+ vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+ if (!vgpu_has_pending_workload(vgpu_data->vgpu))
+ continue;
+
+ vgpu = vgpu_data->vgpu;
+ break;
+ }
+
+ if (vgpu) {
+ scheduler->next_vgpu = vgpu;
+ gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
+ }
+out:
+ if (scheduler->next_vgpu) {
+ gvt_dbg_sched("try to schedule next vgpu %d\n",
+ scheduler->next_vgpu->id);
+ try_to_schedule_next_vgpu(gvt);
+ }
+
+ /*
+ * still have vgpu on runq
+ * or last schedule haven't finished due to running workload
+ */
+ if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+
+ mutex_unlock(&gvt->lock);
+}
+
+static int tbs_sched_init(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &gvt->scheduler;
+
+ struct tbs_sched_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->runq_head);
+ INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+ data->period = GVT_DEFAULT_TIME_SLICE;
+ data->gvt = gvt;
+
+ scheduler->sched_data = data;
+ return 0;
+}
+
+static void tbs_sched_clean(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &gvt->scheduler;
+ struct tbs_sched_data *data = scheduler->sched_data;
+
+ cancel_delayed_work(&data->work);
+ kfree(data);
+ scheduler->sched_data = NULL;
+}
+
+static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
+{
+ struct tbs_vgpu_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->vgpu = vgpu;
+ INIT_LIST_HEAD(&data->list);
+
+ vgpu->sched_data = data;
+ return 0;
+}
+
+static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
+{
+ kfree(vgpu->sched_data);
+ vgpu->sched_data = NULL;
+}
+
+static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
+{
+ struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
+ struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+ if (!list_empty(&vgpu_data->list))
+ return;
+
+ list_add_tail(&vgpu_data->list, &sched_data->runq_head);
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+}
+
+static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
+{
+ struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+ list_del_init(&vgpu_data->list);
+}
+
+static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
+ .init = tbs_sched_init,
+ .clean = tbs_sched_clean,
+ .init_vgpu = tbs_sched_init_vgpu,
+ .clean_vgpu = tbs_sched_clean_vgpu,
+ .start_schedule = tbs_sched_start_schedule,
+ .stop_schedule = tbs_sched_stop_schedule,
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
+{
+ gvt->scheduler.sched_ops = &tbs_schedule_ops;
+
+ return gvt->scheduler.sched_ops->init(gvt);
+}
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
+{
+ gvt->scheduler.sched_ops->clean(gvt);
+}
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
+{
+ return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+}
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
+{
+ vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+}
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
+{
+ gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+
+ vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+}
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+
+ gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+
+ scheduler->sched_ops->stop_schedule(vgpu);
+
+ if (scheduler->next_vgpu == vgpu)
+ scheduler->next_vgpu = NULL;
+
+ if (scheduler->current_vgpu == vgpu) {
+ /* stop workload dispatching */
+ scheduler->need_reschedule = true;
+ scheduler->current_vgpu = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
new file mode 100644
index 000000000000..bb8b9097e41a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Anhua Xu
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef __GVT_SCHED_POLICY__
+#define __GVT_SCHED_POLICY__
+
+struct intel_gvt_sched_policy_ops {
+ int (*init)(struct intel_gvt *gvt);
+ void (*clean)(struct intel_gvt *gvt);
+ int (*init_vgpu)(struct intel_vgpu *vgpu);
+ void (*clean_vgpu)(struct intel_vgpu *vgpu);
+ void (*start_schedule)(struct intel_vgpu *vgpu);
+ void (*stop_schedule)(struct intel_vgpu *vgpu);
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
new file mode 100644
index 000000000000..4db242250235
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Chanbin Du <changbin.du@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#include <linux/kthread.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void set_context_pdp_root_pointer(
+ struct execlist_ring_context *ring_context,
+ u32 pdp[8])
+{
+ struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ pdp_pair[i].val = pdp[7 - i];
+}
+
+static int populate_shadow_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+ void *dst;
+ unsigned long context_gpa, context_page_num;
+ int i;
+
+ gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = intel_lr_context_size(
+ gvt->dev_priv->engine[ring_id]);
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("Invalid guest context descriptor\n");
+ return -EINVAL;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ dst = kmap(page);
+ intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+ GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap(page);
+
+#define COPY_REG(name) \
+ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+ COPY_REG(ctx_ctrl);
+ COPY_REG(ctx_timestamp);
+
+ if (ring_id == RCS) {
+ COPY_REG(bb_per_ctx_ptr);
+ COPY_REG(rcs_indirect_ctx);
+ COPY_REG(rcs_indirect_ctx_offset);
+ }
+#undef COPY_REG
+
+ set_context_pdp_root_pointer(shadow_ring_context,
+ workload->shadow_mm->shadow_page_table);
+
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ workload->ring_context_gpa +
+ sizeof(*shadow_ring_context),
+ (void *)shadow_ring_context +
+ sizeof(*shadow_ring_context),
+ GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+ kunmap(page);
+ return 0;
+}
+
+static int shadow_context_status_change(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu, shadow_ctx_notifier_block);
+ struct drm_i915_gem_request *req =
+ (struct drm_i915_gem_request *)data;
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+ struct intel_vgpu_workload *workload =
+ scheduler->current_workload[req->engine->id];
+
+ switch (action) {
+ case INTEL_CONTEXT_SCHEDULE_IN:
+ intel_gvt_load_render_mmio(workload->vgpu,
+ workload->ring_id);
+ atomic_set(&workload->shadow_ctx_active, 1);
+ break;
+ case INTEL_CONTEXT_SCHEDULE_OUT:
+ intel_gvt_restore_render_mmio(workload->vgpu,
+ workload->ring_id);
+ atomic_set(&workload->shadow_ctx_active, 0);
+ break;
+ default:
+ WARN_ON(1);
+ return NOTIFY_OK;
+ }
+ wake_up(&workload->shadow_ctx_status_wq);
+ return NOTIFY_OK;
+}
+
+static int dispatch_workload(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct drm_i915_gem_request *rq;
+ int ret;
+
+ gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
+ ring_id, workload);
+
+ shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
+ goto out;
+ }
+
+ gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
+
+ workload->req = i915_gem_request_get(rq);
+
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ if (ret)
+ goto out;
+
+ ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret)
+ goto out;
+
+ ret = populate_shadow_context(workload);
+ if (ret)
+ goto out;
+
+ if (workload->prepare) {
+ ret = workload->prepare(workload);
+ if (ret)
+ goto out;
+ }
+
+ gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
+ ring_id, workload->req);
+
+ ret = 0;
+ workload->dispatched = true;
+out:
+ if (ret)
+ workload->status = ret;
+
+ if (!IS_ERR_OR_NULL(rq))
+ i915_add_request_no_flush(rq);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static struct intel_vgpu_workload *pick_next_workload(
+ struct intel_gvt *gvt, int ring_id)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload = NULL;
+
+ mutex_lock(&gvt->lock);
+
+ /*
+ * no current vgpu / will be scheduled out / no workload
+ * bail out
+ */
+ if (!scheduler->current_vgpu) {
+ gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+ goto out;
+ }
+
+ if (scheduler->need_reschedule) {
+ gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+ goto out;
+ }
+
+ if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
+ gvt_dbg_sched("ring id %d stop - no available workload\n",
+ ring_id);
+ goto out;
+ }
+
+ /*
+ * still have current workload, maybe the workload disptacher
+ * fail to submit it for some reason, resubmit it.
+ */
+ if (scheduler->current_workload[ring_id]) {
+ workload = scheduler->current_workload[ring_id];
+ gvt_dbg_sched("ring id %d still have current workload %p\n",
+ ring_id, workload);
+ goto out;
+ }
+
+ /*
+ * pick a workload as current workload
+ * once current workload is set, schedule policy routines
+ * will wait the current workload is finished when trying to
+ * schedule out a vgpu.
+ */
+ scheduler->current_workload[ring_id] = container_of(
+ workload_q_head(scheduler->current_vgpu, ring_id)->next,
+ struct intel_vgpu_workload, list);
+
+ workload = scheduler->current_workload[ring_id];
+
+ gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+
+ atomic_inc(&workload->vgpu->running_workload_num);
+out:
+ mutex_unlock(&gvt->lock);
+ return workload;
+}
+
+static void update_guest_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+ void *src;
+ unsigned long context_gpa, context_page_num;
+ int i;
+
+ gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = intel_lr_context_size(
+ gvt->dev_priv->engine[ring_id]);
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid guest context descriptor\n");
+ return;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ src = kmap(page);
+ intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
+ GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
+
+ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap(page);
+
+#define COPY_REG(name) \
+ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+ COPY_REG(ctx_ctrl);
+ COPY_REG(ctx_timestamp);
+
+#undef COPY_REG
+
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ workload->ring_context_gpa +
+ sizeof(*shadow_ring_context),
+ (void *)shadow_ring_context +
+ sizeof(*shadow_ring_context),
+ GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+ kunmap(page);
+}
+
+static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload;
+ int event;
+
+ mutex_lock(&gvt->lock);
+
+ workload = scheduler->current_workload[ring_id];
+
+ if (!workload->status && !workload->vgpu->resetting) {
+ wait_event(workload->shadow_ctx_status_wq,
+ !atomic_read(&workload->shadow_ctx_active));
+
+ update_guest_context(workload);
+
+ for_each_set_bit(event, workload->pending_events,
+ INTEL_GVT_EVENT_MAX)
+ intel_vgpu_trigger_virtual_event(workload->vgpu,
+ event);
+ }
+
+ gvt_dbg_sched("ring id %d complete workload %p status %d\n",
+ ring_id, workload, workload->status);
+
+ scheduler->current_workload[ring_id] = NULL;
+
+ atomic_dec(&workload->vgpu->running_workload_num);
+
+ list_del_init(&workload->list);
+ workload->complete(workload);
+
+ wake_up(&scheduler->workload_complete_wq);
+ mutex_unlock(&gvt->lock);
+}
+
+struct workload_thread_param {
+ struct intel_gvt *gvt;
+ int ring_id;
+};
+
+static DEFINE_MUTEX(scheduler_mutex);
+
+static int workload_thread(void *priv)
+{
+ struct workload_thread_param *p = (struct workload_thread_param *)priv;
+ struct intel_gvt *gvt = p->gvt;
+ int ring_id = p->ring_id;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload = NULL;
+ long lret;
+ int ret;
+ bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ kfree(p);
+
+ gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+
+ while (!kthread_should_stop()) {
+ add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ do {
+ workload = pick_next_workload(gvt, ring_id);
+ if (workload)
+ break;
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ } while (!kthread_should_stop());
+ remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+
+ if (!workload)
+ break;
+
+ mutex_lock(&scheduler_mutex);
+
+ gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
+ workload->ring_id, workload,
+ workload->vgpu->id);
+
+ intel_runtime_pm_get(gvt->dev_priv);
+
+ gvt_dbg_sched("ring id %d will dispatch workload %p\n",
+ workload->ring_id, workload);
+
+ if (need_force_wake)
+ intel_uncore_forcewake_get(gvt->dev_priv,
+ FORCEWAKE_ALL);
+
+ mutex_lock(&gvt->lock);
+ ret = dispatch_workload(workload);
+ mutex_unlock(&gvt->lock);
+
+ if (ret) {
+ gvt_err("fail to dispatch workload, skip\n");
+ goto complete;
+ }
+
+ gvt_dbg_sched("ring id %d wait workload %p\n",
+ workload->ring_id, workload);
+
+ lret = i915_wait_request(workload->req,
+ 0, MAX_SCHEDULE_TIMEOUT);
+ if (lret < 0) {
+ workload->status = lret;
+ gvt_err("fail to wait workload, skip\n");
+ } else {
+ workload->status = 0;
+ }
+
+complete:
+ gvt_dbg_sched("will complete workload %p\n, status: %d\n",
+ workload, workload->status);
+
+ complete_current_workload(gvt, ring_id);
+
+ if (workload->req)
+ i915_gem_request_put(fetch_and_zero(&workload->req));
+
+ if (need_force_wake)
+ intel_uncore_forcewake_put(gvt->dev_priv,
+ FORCEWAKE_ALL);
+
+ intel_runtime_pm_put(gvt->dev_priv);
+
+ mutex_unlock(&scheduler_mutex);
+
+ }
+ return 0;
+}
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ if (atomic_read(&vgpu->running_workload_num)) {
+ gvt_dbg_sched("wait vgpu idle\n");
+
+ wait_event(scheduler->workload_complete_wq,
+ !atomic_read(&vgpu->running_workload_num));
+ }
+}
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ int i;
+
+ gvt_dbg_core("clean workload scheduler\n");
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ if (scheduler->thread[i]) {
+ kthread_stop(scheduler->thread[i]);
+ scheduler->thread[i] = NULL;
+ }
+ }
+}
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct workload_thread_param *param = NULL;
+ int ret;
+ int i;
+
+ gvt_dbg_core("init workload scheduler\n");
+
+ init_waitqueue_head(&scheduler->workload_complete_wq);
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ /* check ring mask at init time */
+ if (!HAS_ENGINE(gvt->dev_priv, i))
+ continue;
+
+ init_waitqueue_head(&scheduler->waitq[i]);
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ param->gvt = gvt;
+ param->ring_id = i;
+
+ scheduler->thread[i] = kthread_run(workload_thread, param,
+ "gvt workload %d", i);
+ if (IS_ERR(scheduler->thread[i])) {
+ gvt_err("fail to create workload thread\n");
+ ret = PTR_ERR(scheduler->thread[i]);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ intel_gvt_clean_workload_scheduler(gvt);
+ kfree(param);
+ param = NULL;
+ return ret;
+}
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
+ &vgpu->shadow_ctx_notifier_block);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ /* a little hacky to mark as ctx closed */
+ vgpu->shadow_ctx->closed = true;
+ i915_gem_context_put(vgpu->shadow_ctx);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
+{
+ atomic_set(&vgpu->running_workload_num, 0);
+
+ vgpu->shadow_ctx = i915_gem_context_create_gvt(
+ &vgpu->gvt->dev_priv->drm);
+ if (IS_ERR(vgpu->shadow_ctx))
+ return PTR_ERR(vgpu->shadow_ctx);
+
+ vgpu->shadow_ctx->engine[RCS].initialised = true;
+
+ vgpu->shadow_ctx_notifier_block.notifier_call =
+ shadow_context_status_change;
+
+ atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
+ &vgpu->shadow_ctx_notifier_block);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
new file mode 100644
index 000000000000..3b30c28bff51
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Chanbin Du <changbin.du@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#ifndef _GVT_SCHEDULER_H_
+#define _GVT_SCHEDULER_H_
+
+struct intel_gvt_workload_scheduler {
+ struct intel_vgpu *current_vgpu;
+ struct intel_vgpu *next_vgpu;
+ struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
+ bool need_reschedule;
+
+ wait_queue_head_t workload_complete_wq;
+ struct task_struct *thread[I915_NUM_ENGINES];
+ wait_queue_head_t waitq[I915_NUM_ENGINES];
+
+ void *sched_data;
+ struct intel_gvt_sched_policy_ops *sched_ops;
+};
+
+#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
+#define INDIRECT_CTX_SIZE_MASK 0x3f
+struct shadow_indirect_ctx {
+ struct drm_i915_gem_object *obj;
+ unsigned long guest_gma;
+ unsigned long shadow_gma;
+ void *shadow_va;
+ uint32_t size;
+};
+
+#define PER_CTX_ADDR_MASK 0xfffff000
+struct shadow_per_ctx {
+ unsigned long guest_gma;
+ unsigned long shadow_gma;
+};
+
+struct intel_shadow_wa_ctx {
+ struct intel_vgpu_workload *workload;
+ struct shadow_indirect_ctx indirect_ctx;
+ struct shadow_per_ctx per_ctx;
+
+};
+
+struct intel_vgpu_workload {
+ struct intel_vgpu *vgpu;
+ int ring_id;
+ struct drm_i915_gem_request *req;
+ /* if this workload has been dispatched to i915? */
+ bool dispatched;
+ int status;
+
+ struct intel_vgpu_mm *shadow_mm;
+
+ /* different submission model may need different handler */
+ int (*prepare)(struct intel_vgpu_workload *);
+ int (*complete)(struct intel_vgpu_workload *);
+ struct list_head list;
+
+ DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+ void *shadow_ring_buffer_va;
+
+ /* execlist context information */
+ struct execlist_ctx_descriptor_format ctx_desc;
+ struct execlist_ring_context *ring_context;
+ unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+ bool restore_inhibit;
+ struct intel_vgpu_elsp_dwords elsp_dwords;
+ bool emulate_schedule_in;
+ atomic_t shadow_ctx_active;
+ wait_queue_head_t shadow_ctx_status_wq;
+ u64 ring_context_gpa;
+
+ /* shadow batch buffer */
+ struct list_head shadow_bb;
+ struct intel_shadow_wa_ctx wa_ctx;
+};
+
+/* Intel shadow batch buffer is a i915 gem object */
+struct intel_shadow_bb_entry {
+ struct list_head list;
+ struct drm_i915_gem_object *obj;
+ void *va;
+ unsigned long len;
+ void *bb_start_cmd_va;
+};
+
+#define workload_q_head(vgpu, ring_id) \
+ (&(vgpu->workload_q_head[ring_id]))
+
+#define queue_workload(workload) do { \
+ list_add_tail(&workload->list, \
+ workload_q_head(workload->vgpu, workload->ring_id)); \
+ wake_up(&workload->vgpu->gvt-> \
+ scheduler.waitq[workload->ring_id]); \
+} while (0)
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
new file mode 100644
index 000000000000..53a2d10cf3f1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright © 2011-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#if !defined(_GVT_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _GVT_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+#include <asm/tsc.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gvt
+
+TRACE_EVENT(spt_alloc,
+ TP_PROTO(int id, void *spt, int type, unsigned long mfn,
+ unsigned long gpt_gfn),
+
+ TP_ARGS(id, spt, type, mfn, gpt_gfn),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(void *, spt)
+ __field(int, type)
+ __field(unsigned long, mfn)
+ __field(unsigned long, gpt_gfn)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->spt = spt;
+ __entry->type = type;
+ __entry->mfn = mfn;
+ __entry->gpt_gfn = gpt_gfn;
+ ),
+
+ TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n",
+ __entry->id,
+ __entry->spt,
+ __entry->type,
+ __entry->mfn,
+ __entry->gpt_gfn)
+);
+
+TRACE_EVENT(spt_free,
+ TP_PROTO(int id, void *spt, int type),
+
+ TP_ARGS(id, spt, type),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(void *, spt)
+ __field(int, type)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->spt = spt;
+ __entry->type = type;
+ ),
+
+ TP_printk("VM%u [free] spt %p type %d\n",
+ __entry->id,
+ __entry->spt,
+ __entry->type)
+);
+
+#define MAX_BUF_LEN 256
+
+TRACE_EVENT(gma_index,
+ TP_PROTO(const char *prefix, unsigned long gma,
+ unsigned long index),
+
+ TP_ARGS(prefix, gma, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "%s gma 0x%lx index 0x%lx\n", prefix, gma, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gma_translate,
+ TP_PROTO(int id, char *type, int ring_id, int pt_level,
+ unsigned long gma, unsigned long gpa),
+
+ TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
+ id, type, ring_id, pt_level, gma, gpa);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_refcount,
+ TP_PROTO(int id, char *action, void *spt, int before, int after),
+
+ TP_ARGS(id, action, spt, before, after),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p before %d -> after %d\n",
+ id, action, spt, before, after);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_change,
+ TP_PROTO(int id, char *action, void *spt, unsigned long gfn,
+ int type),
+
+ TP_ARGS(id, action, spt, gfn, type),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p gfn 0x%lx type %d\n",
+ id, action, spt, gfn, type);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gpt_change,
+ TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
+ unsigned long index),
+
+ TP_ARGS(id, tag, spt, type, v, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p type %d entry 0x%llx index 0x%lx\n",
+ id, tag, spt, type, v, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_change,
+ TP_PROTO(int id, const char *tag, int page_id, void *gpt, int type),
+
+ TP_ARGS(id, tag, page_id, gpt, type),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [oos %s] page id %d gpt %p type %d\n",
+ id, tag, page_id, gpt, type);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_sync,
+ TP_PROTO(int id, int page_id, void *gpt, int type, u64 v,
+ unsigned long index),
+
+ TP_ARGS(id, page_id, gpt, type, v, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [oos sync] page id %d gpt %p type %d entry 0x%llx index 0x%lx\n",
+ id, page_id, gpt, type, v, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+#define MAX_CMD_STR_LEN 256
+TRACE_EVENT(gvt_command,
+ TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
+
+ TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
+
+ TP_STRUCT__entry(
+ __field(u8, vm_id)
+ __field(u8, ring_id)
+ __field(int, i)
+ __array(char, tmp_buf, MAX_CMD_STR_LEN)
+ __array(char, cmd_str, MAX_CMD_STR_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vm_id = vm_id;
+ __entry->ring_id = ring_id;
+ __entry->cmd_str[0] = '\0';
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ entry->i = 0;
+ while (cmd_len > 0) {
+ if (cmd_len >= 8) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
+ cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
+ cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
+ __entry->i += 8;
+ cmd_len -= 8;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len >= 4) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
+ cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
+ __entry->i += 4;
+ cmd_len -= 4;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len >= 2) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
+ __entry->i += 2;
+ cmd_len -= 2;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len == 1) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
+ __entry->i += 1;
+ cmd_len -= 1;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ }
+ }
+ strcat(__entry->cmd_str, "\n");
+ ),
+
+ TP_printk("%s", __entry->cmd_str)
+);
+#endif /* _GVT_TRACE_H_ */
+
+/* This part must be out of protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/gvt/trace_points.c
index 91315557e421..a3deed692b9c 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.h
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Intel Corporation
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -16,30 +16,21 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
*
*/
-#ifndef _I915_GEM_DMABUF_H_
-#define _I915_GEM_DMABUF_H_
-
-#include <linux/dma-buf.h>
-
-static inline struct reservation_object *
-i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
-{
- struct dma_buf *dma_buf;
-
- if (obj->base.dma_buf)
- dma_buf = obj->base.dma_buf;
- else if (obj->base.import_attach)
- dma_buf = obj->base.import_attach->dmabuf;
- else
- return NULL;
-
- return dma_buf->resv;
-}
+#include "trace.h"
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
new file mode 100644
index 000000000000..536d2b9d5777
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+ vfree(vgpu->mmio.vreg);
+ vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
+
+int setup_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+
+ if (vgpu->mmio.vreg)
+ memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
+ else {
+ vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ if (!vgpu->mmio.vreg)
+ return -ENOMEM;
+ }
+
+ vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+ memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+ memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+ vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+ /* set the bit 0:2(Core C-State ) to C0 */
+ vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+ return 0;
+}
+
+static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ u16 *gmch_ctl;
+ int i;
+
+ memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+ info->cfg_space_size);
+
+ if (!param->primary) {
+ vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ }
+
+ /* Show guest that there isn't any stolen memory.*/
+ gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+ *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+ intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+ gvt_aperture_pa_base(gvt), true);
+
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_MASTER);
+ /*
+ * Clear the bar upper 32bit and let guest to assign the new value
+ */
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+ for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+ vgpu->cfg_space.bar[i].size = pci_resource_len(
+ gvt->dev_priv->drm.pdev, i * 2);
+ vgpu->cfg_space.bar[i].tracked = false;
+ }
+}
+
+void populate_pvinfo_page(struct intel_vgpu *vgpu)
+{
+ /* setup the ballooning information */
+ vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
+ vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
+ vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
+ vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
+ vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
+ vgpu_aperture_gmadr_base(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
+ vgpu_aperture_sz(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
+ vgpu_hidden_gmadr_base(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
+ vgpu_hidden_sz(vgpu);
+
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+
+ gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
+ gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
+ vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
+ gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
+ vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
+ gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
+
+ WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+}
+
+/**
+ * intel_gvt_init_vgpu_types - initialize vGPU type list
+ * @gvt : GVT device
+ *
+ * Initialize vGPU type list based on available resource.
+ *
+ */
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
+{
+ unsigned int num_types;
+ unsigned int i, low_avail;
+ unsigned int min_low;
+
+ /* vGPU type name is defined as GVTg_Vx_y which contains
+ * physical GPU generation type and 'y' means maximum vGPU
+ * instances user can create on one physical GPU for this
+ * type.
+ *
+ * Depend on physical SKU resource, might see vGPU types like
+ * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
+ * different types of vGPU on same physical GPU depending on
+ * available resource. Each vGPU type will have "avail_instance"
+ * to indicate how many vGPU instance can be created for this
+ * type.
+ *
+ * Currently use static size here as we init type earlier..
+ */
+ low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
+ num_types = 4;
+
+ gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
+ GFP_KERNEL);
+ if (!gvt->types)
+ return -ENOMEM;
+
+ min_low = MB_TO_BYTES(32);
+ for (i = 0; i < num_types; ++i) {
+ if (low_avail / min_low == 0)
+ break;
+ gvt->types[i].low_gm_size = min_low;
+ gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+ gvt->types[i].fence = 4;
+ gvt->types[i].max_instance = low_avail / min_low;
+ gvt->types[i].avail_instance = gvt->types[i].max_instance;
+
+ if (IS_GEN8(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V4_%u",
+ gvt->types[i].max_instance);
+ else if (IS_GEN9(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V5_%u",
+ gvt->types[i].max_instance);
+
+ min_low <<= 1;
+ gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance,
+ gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+
+ gvt->num_types = i;
+ return 0;
+}
+
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
+{
+ kfree(gvt->types);
+}
+
+static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
+{
+ int i;
+ unsigned int low_gm_avail, high_gm_avail, fence_avail;
+ unsigned int low_gm_min, high_gm_min, fence_min, total_min;
+
+ /* Need to depend on maxium hw resource size but keep on
+ * static config for now.
+ */
+ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
+ gvt->gm.vgpu_allocated_low_gm_size;
+ high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+ gvt->gm.vgpu_allocated_high_gm_size;
+ fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
+ gvt->fence.vgpu_allocated_fence_num;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
+ high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
+ fence_min = fence_avail / gvt->types[i].fence;
+ total_min = min(min(low_gm_min, high_gm_min), fence_min);
+ gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
+ total_min);
+
+ gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ mutex_lock(&gvt->lock);
+
+ vgpu->active = false;
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+
+ if (atomic_read(&vgpu->running_workload_num)) {
+ mutex_unlock(&gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&gvt->lock);
+ }
+
+ intel_vgpu_stop_schedule(vgpu);
+ intel_vgpu_clean_sched_policy(vgpu);
+ intel_vgpu_clean_gvt_context(vgpu);
+ intel_vgpu_clean_execlist(vgpu);
+ intel_vgpu_clean_display(vgpu);
+ intel_vgpu_clean_opregion(vgpu);
+ intel_vgpu_clean_gtt(vgpu);
+ intel_gvt_hypervisor_detach_vgpu(vgpu);
+ intel_vgpu_free_resource(vgpu);
+ clean_vgpu_mmio(vgpu);
+ vfree(vgpu);
+
+ intel_gvt_update_vgpu_types(gvt);
+ mutex_unlock(&gvt->lock);
+}
+
+static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_vgpu *vgpu;
+ int ret;
+
+ gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
+ param->handle, param->low_gm_sz, param->high_gm_sz,
+ param->fence_sz);
+
+ vgpu = vzalloc(sizeof(*vgpu));
+ if (!vgpu)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&gvt->lock);
+
+ ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
+ if (ret < 0)
+ goto out_free_vgpu;
+
+ vgpu->id = ret;
+ vgpu->handle = param->handle;
+ vgpu->gvt = gvt;
+ bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
+
+ setup_vgpu_cfg_space(vgpu, param);
+
+ ret = setup_vgpu_mmio(vgpu);
+ if (ret)
+ goto out_free_vgpu;
+
+ ret = intel_vgpu_alloc_resource(vgpu, param);
+ if (ret)
+ goto out_clean_vgpu_mmio;
+
+ populate_pvinfo_page(vgpu);
+
+ ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
+ if (ret)
+ goto out_clean_vgpu_resource;
+
+ ret = intel_vgpu_init_gtt(vgpu);
+ if (ret)
+ goto out_detach_hypervisor_vgpu;
+
+ ret = intel_vgpu_init_display(vgpu);
+ if (ret)
+ goto out_clean_gtt;
+
+ ret = intel_vgpu_init_execlist(vgpu);
+ if (ret)
+ goto out_clean_display;
+
+ ret = intel_vgpu_init_gvt_context(vgpu);
+ if (ret)
+ goto out_clean_execlist;
+
+ ret = intel_vgpu_init_sched_policy(vgpu);
+ if (ret)
+ goto out_clean_shadow_ctx;
+
+ vgpu->active = true;
+ mutex_unlock(&gvt->lock);
+
+ return vgpu;
+
+out_clean_shadow_ctx:
+ intel_vgpu_clean_gvt_context(vgpu);
+out_clean_execlist:
+ intel_vgpu_clean_execlist(vgpu);
+out_clean_display:
+ intel_vgpu_clean_display(vgpu);
+out_clean_gtt:
+ intel_vgpu_clean_gtt(vgpu);
+out_detach_hypervisor_vgpu:
+ intel_gvt_hypervisor_detach_vgpu(vgpu);
+out_clean_vgpu_resource:
+ intel_vgpu_free_resource(vgpu);
+out_clean_vgpu_mmio:
+ clean_vgpu_mmio(vgpu);
+out_free_vgpu:
+ vfree(vgpu);
+ mutex_unlock(&gvt->lock);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_gvt_create_vgpu - create a virtual GPU
+ * @gvt: GVT device
+ * @type: type of the vGPU to create
+ *
+ * This function is called when user wants to create a virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type)
+{
+ struct intel_vgpu_creation_params param;
+ struct intel_vgpu *vgpu;
+
+ param.handle = 0;
+ param.primary = 1;
+ param.low_gm_sz = type->low_gm_size;
+ param.high_gm_sz = type->high_gm_size;
+ param.fence_sz = type->fence;
+
+ /* XXX current param based on MB */
+ param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
+ param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+
+ vgpu = __intel_gvt_create_vgpu(gvt, &param);
+ if (IS_ERR(vgpu))
+ return vgpu;
+
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+
+ return vgpu;
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to reset a virtual GPU.
+ *
+ */
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
+{
+}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 70980f82a15b..f5039f4f988f 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}
if (ret == 0 && needs_clflush_after)
- drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+ drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj);
return ret;
@@ -1308,10 +1308,11 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 27b0e34dadec..791bfc760075 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -79,10 +79,8 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_SEMICOLON
return 0;
}
@@ -109,12 +107,12 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return i915_gem_object_to_ggtt(obj, NULL) ? 'g' : ' ';
+ return !list_empty(&obj->userfault_link) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
- return obj->mapping ? 'M' : ' ';
+ return obj->mm.mapping ? 'M' : ' ';
}
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -138,11 +136,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
unsigned int frontbuffer_bits;
int pin_count = 0;
- enum intel_engine_id id;
lockdep_assert_held(&obj->base.dev->struct_mutex);
- seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
+ seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
get_active_flag(obj),
get_pin_flag(obj),
@@ -151,17 +148,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
- obj->base.write_domain);
- for_each_engine_id(engine, dev_priv, id)
- seq_printf(m, "%x ",
- i915_gem_active_get_seqno(&obj->last_read[id],
- &obj->base.dev->struct_mutex));
- seq_printf(m, "] %x %s%s%s",
- i915_gem_active_get_seqno(&obj->last_write,
- &obj->base.dev->struct_mutex),
+ obj->base.write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
- obj->dirty ? " dirty" : "",
- obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ obj->mm.dirty ? " dirty" : "",
+ obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
@@ -188,18 +178,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- if (obj->pin_display || obj->fault_mappable) {
- char s[3], *t = s;
- if (obj->pin_display)
- *t++ = 'p';
- if (obj->fault_mappable)
- *t++ = 'f';
- *t = '\0';
- seq_printf(m, " (%s mappable)", s);
- }
-
- engine = i915_gem_active_get_engine(&obj->last_write,
- &dev_priv->drm.struct_mutex);
+
+ engine = i915_gem_object_last_write_engine(obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
@@ -237,7 +217,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (obj->stolen == NULL)
continue;
@@ -247,7 +227,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (obj->stolen == NULL)
continue;
@@ -334,11 +314,12 @@ static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_gem_object *obj;
struct file_stats stats;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int j;
memset(&stats, 0, sizeof(stats));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
&engine->batch_pool.cache_list[j],
@@ -402,23 +383,23 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
if (ret)
return ret;
- seq_printf(m, "%u objects, %zu bytes\n",
+ seq_printf(m, "%u objects, %llu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
size = count = 0;
mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
size += obj->base.size;
++count;
- if (obj->madv == I915_MADV_DONTNEED) {
+ if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
- if (obj->mapping) {
+ if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@@ -426,7 +407,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
size += obj->base.size;
++count;
@@ -435,12 +416,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
++dpy_count;
}
- if (obj->madv == I915_MADV_DONTNEED) {
+ if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
- if (obj->mapping) {
+ if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@@ -512,7 +493,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (show_pin_display_only && !obj->pin_display)
continue;
@@ -566,12 +547,12 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->flip_queued_req) {
- struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
+ struct intel_engine_cs *engine = work->flip_queued_req->engine;
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
- i915_gem_request_get_seqno(work->flip_queued_req),
- dev_priv->next_seqno,
+ work->flip_queued_req->global_seqno,
+ atomic_read(&dev_priv->gt.global_timeline.next_seqno),
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
} else
@@ -607,6 +588,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int total = 0;
int ret, j;
@@ -614,7 +596,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
if (ret)
return ret;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
@@ -645,12 +627,24 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
+static void print_request(struct seq_file *m,
+ struct drm_i915_gem_request *rq,
+ const char *prefix)
+{
+ seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
+ rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+ rq->priotree.priority,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ rq->timeline->common->name);
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret, any;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -658,29 +652,18 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
return ret;
any = 0;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
int count;
count = 0;
- list_for_each_entry(req, &engine->request_list, link)
+ list_for_each_entry(req, &engine->timeline->requests, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
- list_for_each_entry(req, &engine->request_list, link) {
- struct pid *pid = req->ctx->pid;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
- seq_printf(m, " %x @ %d: %s [%d]\n",
- req->fence.seqno,
- (int) (jiffies - req->emitted_jiffies),
- task ? task->comm : "<unknown>",
- task ? task->pid : -1);
- rcu_read_unlock();
- }
+ list_for_each_entry(req, &engine->timeline->requests, link)
+ print_request(m, req, " ");
any++;
}
@@ -701,22 +684,23 @@ static void i915_ring_seqno_info(struct seq_file *m,
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, intel_engine_get_seqno(engine));
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_ring_seqno_info(m, engine);
return 0;
@@ -727,6 +711,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i, pipe;
intel_runtime_pm_get(dev_priv);
@@ -743,17 +728,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(dev_priv, pipe) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
+ seq_printf(m, "Pipe %c power disabled\n",
+ pipe_name(pipe));
+ continue;
+ }
+
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
+ intel_display_power_put(dev_priv, power_domain);
+ }
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@@ -895,7 +895,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
@@ -935,26 +935,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_hws_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = node_to_i915(node);
- struct intel_engine_cs *engine;
- const u32 *hws;
- int i;
-
- engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
- hws = engine->status_page.page_addr;
- if (hws == NULL)
- return 0;
-
- for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
- seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i * 4,
- hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
- }
- return 0;
-}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static ssize_t
i915_error_state_write(struct file *filp,
@@ -1038,19 +1019,14 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release,
};
+#endif
+
static int
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- int ret;
-
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret)
- return ret;
-
- *val = dev_priv->next_seqno;
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ *val = 1 + atomic_read(&dev_priv->gt.global_timeline.next_seqno);
return 0;
}
@@ -1065,7 +1041,7 @@ i915_next_seqno_set(void *data, u64 val)
if (ret)
return ret;
- ret = i915_gem_set_seqno(dev, val);
+ ret = i915_gem_set_global_seqno(dev, val);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1277,15 +1253,42 @@ out:
return ret;
}
+static void i915_instdone_info(struct drm_i915_private *dev_priv,
+ struct seq_file *m,
+ struct intel_instdone *instdone)
+{
+ int slice;
+ int subslice;
+
+ seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
+ instdone->instdone);
+
+ if (INTEL_GEN(dev_priv) <= 3)
+ return;
+
+ seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
+ instdone->slice_common);
+
+ if (INTEL_GEN(dev_priv) <= 6)
+ return;
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->row[slice][subslice]);
+}
+
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
- u32 instdone[I915_NUM_INSTDONE_REG];
+ struct intel_instdone instdone;
enum intel_engine_id id;
- int j;
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
seq_printf(m, "Wedged\n");
@@ -1303,12 +1306,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
}
- i915_get_extra_instdone(dev_priv, instdone);
+ intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
intel_runtime_pm_put(dev_priv);
@@ -1319,16 +1322,27 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
} else
seq_printf(m, "Hangcheck inactive\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *rb;
+
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
- engine->hangcheck.seqno,
- seqno[id],
- engine->last_submitted_seqno);
+ engine->hangcheck.seqno, seqno[id],
+ intel_engine_last_submit(engine));
seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)));
+ spin_lock_irq(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock_irq(&b->lock);
+
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
@@ -1336,18 +1350,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
if (engine->id == RCS) {
- seq_puts(m, "\tinstdone read =");
+ seq_puts(m, "\tinstdone read =\n");
- for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
- seq_printf(m, " 0x%08x", instdone[j]);
+ i915_instdone_info(dev_priv, m, &instdone);
- seq_puts(m, "\n\tinstdone accu =");
+ seq_puts(m, "\tinstdone accu =\n");
- for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
- seq_printf(m, " 0x%08x",
- engine->hangcheck.instdone[j]);
-
- seq_puts(m, "\n");
+ i915_instdone_info(dev_priv, m,
+ &engine->hangcheck.instdone);
}
}
@@ -1357,14 +1367,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
rgvmodectl = I915_READ(MEMMODECTL);
@@ -1372,7 +1377,6 @@ static int ironlake_drpc_info(struct seq_file *m)
crstandvid = I915_READ16(CRSTANDVID);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
@@ -1635,10 +1639,13 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
- if (INTEL_GEN(dev_priv) >= 7)
+ if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
+ uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
+ BDW_FBC_COMPRESSION_MASK :
+ IVB_FBC_COMPRESSION_MASK;
seq_printf(m, "Compressing: %s\n",
- yesno(I915_READ(FBC_STATUS2) &
- FBC_COMPRESSION_MASK));
+ yesno(I915_READ(FBC_STATUS2) & mask));
+ }
mutex_unlock(&dev_priv->fbc.lock);
intel_runtime_pm_put(dev_priv);
@@ -1717,6 +1724,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
@@ -1730,10 +1738,10 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv);
- seq_printf(m, "self-refresh: %s\n",
- sr_enabled ? "enabled" : "disabled");
+ seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
return 0;
}
@@ -1867,7 +1875,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.height,
fbdev_fb->base.depth,
fbdev_fb->base.bits_per_pixel,
- fbdev_fb->base.modifier[0],
+ fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
describe_obj(m, fbdev_fb->obj);
seq_putc(m, '\n');
@@ -1885,7 +1893,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel,
- fb->base.modifier[0],
+ fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
@@ -1909,6 +1917,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1935,7 +1944,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
@@ -1974,7 +1983,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
seq_printf(m, "\tBound in GGTT at 0x%08x\n",
i915_ggtt_offset(vma));
- if (i915_gem_object_get_pages(vma->obj)) {
+ if (i915_gem_object_pin_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n\n");
return;
}
@@ -1993,6 +2002,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
kunmap_atomic(reg_state);
}
+ i915_gem_object_unpin_pages(vma->obj);
seq_putc(m, '\n');
}
@@ -2002,6 +2012,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
if (!i915.enable_execlists) {
@@ -2014,7 +2025,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@@ -2022,84 +2033,6 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return 0;
}
-static int i915_execlists(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- u32 status_pointer;
- u8 read_pointer;
- u8 write_pointer;
- u32 status;
- u32 ctx_id;
- struct list_head *cursor;
- int i, ret;
-
- if (!i915.enable_execlists) {
- seq_puts(m, "Logical Ring Contexts are disabled\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- intel_runtime_pm_get(dev_priv);
-
- for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_request *head_req = NULL;
- int count = 0;
-
- seq_printf(m, "%s\n", engine->name);
-
- status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
- ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
- seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
- status, ctx_id);
-
- status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
- seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
-
- read_pointer = GEN8_CSB_READ_PTR(status_pointer);
- write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
- if (read_pointer > write_pointer)
- write_pointer += GEN8_CSB_ENTRIES;
- seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
- read_pointer, write_pointer);
-
- for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
- status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
- ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
-
- seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
- i, status, ctx_id);
- }
-
- spin_lock_bh(&engine->execlist_lock);
- list_for_each(cursor, &engine->execlist_queue)
- count++;
- head_req = list_first_entry_or_null(&engine->execlist_queue,
- struct drm_i915_gem_request,
- execlist_link);
- spin_unlock_bh(&engine->execlist_lock);
-
- seq_printf(m, "\t%d requests in queue\n", count);
- if (head_req) {
- seq_printf(m, "\tHead request context: %u\n",
- head_req->ctx->hw_id);
- seq_printf(m, "\tHead request tail: %u\n",
- head_req->tail);
- }
-
- seq_putc(m, '\n');
- }
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -2127,12 +2060,7 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
@@ -2172,7 +2100,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_puts(m, "L-shaped memory detected\n");
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -2201,14 +2128,15 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i;
if (!ppgtt)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -2223,11 +2151,12 @@ static void gen6_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n",
@@ -2296,9 +2225,10 @@ out_unlock:
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int count = 0;
- for_each_engine(engine, i915)
+ for_each_engine(engine, i915, id)
count += intel_engine_has_waiter(engine);
return count;
@@ -2325,8 +2255,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_file *file;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
- seq_printf(m, "GPU busy? %s [%x]\n",
- yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
+ seq_printf(m, "GPU busy? %s [%d requests]\n",
+ yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
@@ -2361,7 +2291,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
if (INTEL_GEN(dev_priv) >= 6 &&
dev_priv->rps.enabled &&
- dev_priv->gt.active_engines) {
+ dev_priv->gt.active_requests) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -2442,6 +2372,32 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
return 0;
}
+static void i915_guc_log_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ seq_puts(m, "\nGuC logging stats:\n");
+
+ seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_ISR_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
+
+ seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_DPC_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
+
+ seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
+
+ seq_printf(m, "\tTotal flush interrupt count: %u\n",
+ guc->log.flush_interrupt_count);
+
+ seq_printf(m, "\tCapture miss count: %u\n",
+ guc->log.capture_miss_count);
+}
+
static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
@@ -2461,7 +2417,7 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
tot += submissions;
seq_printf(m, "\tSubmissions: %llu %s\n",
@@ -2504,7 +2460,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = guc.submissions[id];
total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
@@ -2515,6 +2471,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
i915_guc_client_info(m, dev_priv, &client);
+ i915_guc_log_info(m, dev_priv);
+
/* Add more as required ... */
return 0;
@@ -2526,10 +2484,10 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj;
int i = 0, pg;
- if (!dev_priv->guc.log_vma)
+ if (!dev_priv->guc.log.vma)
return 0;
- obj = dev_priv->guc.log_vma->obj;
+ obj = dev_priv->guc.log.vma->obj;
for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
@@ -2546,6 +2504,44 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
return 0;
}
+static int i915_guc_log_control_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (!dev_priv->guc.log.vma)
+ return -EINVAL;
+
+ *val = i915.guc_log_level;
+
+ return 0;
+}
+
+static int i915_guc_log_control_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+
+ if (!dev_priv->guc.log.vma)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ intel_runtime_pm_get(dev_priv);
+ ret = i915_guc_log_control(dev_priv, val);
+ intel_runtime_pm_put(dev_priv);
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
+ i915_guc_log_control_get, i915_guc_log_control_set,
+ "%lld\n");
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2575,11 +2571,22 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else {
for_each_pipe(dev_priv, pipe) {
+ enum transcoder cpu_transcoder =
+ intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain))
+ continue;
+
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
enabled = true;
+
+ intel_display_power_put(dev_priv, power_domain);
}
}
@@ -3004,7 +3011,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane_state *state;
struct drm_plane *plane = &intel_plane->base;
- char *format_name;
+ struct drm_format_name_buf format_name;
if (!plane->state) {
seq_puts(m, "plane->state is NULL!\n");
@@ -3014,9 +3021,9 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
state = plane->state;
if (state->fb) {
- format_name = drm_get_format_name(state->fb->pixel_format);
+ drm_get_format_name(state->fb->pixel_format, &format_name);
} else {
- format_name = kstrdup("N/A", GFP_KERNEL);
+ sprintf(format_name.str, "N/A");
}
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
@@ -3032,10 +3039,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
((state->src_w & 0xffff) * 15625) >> 10,
(state->src_h >> 16),
((state->src_h & 0xffff) * 15625) >> 10,
- format_name,
+ format_name.str,
plane_rotation(state->rotation));
-
- kfree(format_name);
}
}
@@ -3121,6 +3126,146 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_engine_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ intel_runtime_pm_get(dev_priv);
+
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct drm_i915_gem_request *rq;
+ struct rb_node *rb;
+ u64 addr;
+
+ seq_printf(m, "%s\n", engine->name);
+ seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
+ intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine),
+ engine->hangcheck.seqno,
+ engine->hangcheck.score);
+
+ rcu_read_lock();
+
+ seq_printf(m, "\tRequests:\n");
+
+ rq = list_first_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tfirst ");
+
+ rq = list_last_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tlast ");
+
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ print_request(m, rq, "\t\tactive ");
+ seq_printf(m,
+ "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ }
+
+ seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+ I915_READ(RING_START(engine->mmio_base)),
+ rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+ seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
+ I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+ rq ? rq->ring->head : 0);
+ seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
+ I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+ rq ? rq->ring->tail : 0);
+ seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
+ I915_READ(RING_CTL(engine->mmio_base)),
+ I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+ rcu_read_unlock();
+
+ addr = intel_engine_get_active_head(engine);
+ seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ addr = intel_engine_get_last_batch_head(engine);
+ seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+
+ if (i915.enable_execlists) {
+ u32 ptr, read, write;
+ struct rb_node *rb;
+
+ seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
+ I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+ I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+ ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+ read = GEN8_CSB_READ_PTR(ptr);
+ write = GEN8_CSB_WRITE_PTR(ptr);
+ seq_printf(m, "\tExeclist CSB read %d, write %d\n",
+ read, write);
+ if (read >= GEN8_CSB_ENTRIES)
+ read = 0;
+ if (write >= GEN8_CSB_ENTRIES)
+ write = 0;
+ if (read > write)
+ write += GEN8_CSB_ENTRIES;
+ while (read < write) {
+ unsigned int idx = ++read % GEN8_CSB_ENTRIES;
+
+ seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+ idx,
+ I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+ I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+ }
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->execlist_port[0].request);
+ if (rq)
+ print_request(m, rq, "\t\tELSP[0] ");
+ else
+ seq_printf(m, "\t\tELSP[0] idle\n");
+ rq = READ_ONCE(engine->execlist_port[1].request);
+ if (rq)
+ print_request(m, rq, "\t\tELSP[1] ");
+ else
+ seq_printf(m, "\t\tELSP[1] idle\n");
+ rcu_read_unlock();
+
+ spin_lock_irq(&engine->timeline->lock);
+ for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
+ rq = rb_entry(rb, typeof(*rq), priotree.node);
+ print_request(m, rq, "\t\tQ ");
+ }
+ spin_unlock_irq(&engine->timeline->lock);
+ } else if (INTEL_GEN(dev_priv) > 6) {
+ seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE(engine)));
+ seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+ I915_READ(RING_PP_DIR_DCLV(engine)));
+ }
+
+ spin_lock_irq(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock_irq(&b->lock);
+
+ seq_puts(m, "\n");
+ }
+
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+}
+
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3147,7 +3292,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
uint64_t offset;
seq_printf(m, "%s\n", engine->name);
@@ -3172,22 +3317,13 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(engine->semaphore.mbox.signal[j]));
seq_putc(m, '\n');
}
- seq_puts(m, "\nSync seqno:\n");
- for_each_engine(engine, dev_priv) {
- for (j = 0; j < num_rings; j++)
- seq_printf(m, " 0x%08x ",
- engine->semaphore.sync_seqno[j]);
- seq_putc(m, '\n');
- }
- seq_putc(m, '\n');
-
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -3236,7 +3372,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_engine_id(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
@@ -3280,7 +3416,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_universal_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
entry->start, entry->end,
@@ -3914,8 +4050,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
int ret = 0;
@@ -3941,10 +4076,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
ret = drm_atomic_commit(state);
out:
- drm_modeset_unlock_all(dev);
WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
- if (ret)
- drm_atomic_state_free(state);
+ drm_modeset_unlock_all(dev);
+ drm_atomic_state_put(state);
}
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -3982,10 +4116,8 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source source)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -4056,15 +4188,15 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
drm_modeset_lock(&crtc->base.mutex, NULL);
if (crtc->base.state->active)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
@@ -4463,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
- num_levels = ilk_wm_max_level(dev) + 1;
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
drm_modeset_lock_all(dev);
@@ -4579,7 +4711,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
- num_levels = ilk_wm_max_level(dev) + 1;
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
if (len >= sizeof(tmp))
return -EINVAL;
@@ -4704,13 +4836,9 @@ i915_wedged_set(void *data, u64 val)
if (i915_reset_in_progress(&dev_priv->gpu_error))
return -EAGAIN;
- intel_runtime_pm_get(dev_priv);
-
i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
- intel_runtime_pm_put(dev_priv);
-
return 0;
}
@@ -4778,10 +4906,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
-#define DROP_ALL (DROP_UNBOUND | \
- DROP_BOUND | \
- DROP_RETIRE | \
- DROP_ACTIVE)
+#define DROP_FREED 0x10
+#define DROP_ALL (DROP_UNBOUND | \
+ DROP_BOUND | \
+ DROP_RETIRE | \
+ DROP_ACTIVE | \
+ DROP_FREED)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -4825,6 +4955,11 @@ i915_drop_caches_set(void *data, u64 val)
unlock:
mutex_unlock(&dev->struct_mutex);
+ if (val & DROP_FREED) {
+ synchronize_rcu();
+ flush_work(&dev_priv->mm.free_work);
+ }
+
return ret;
}
@@ -4945,22 +5080,16 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
u32 snpcr;
- int ret;
if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -5253,10 +5382,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
- {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
- {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
- {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
@@ -5275,7 +5400,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
- {"i915_execlists", i915_execlists, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -5287,6 +5411,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_engine_info", i915_engine_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
@@ -5309,7 +5434,9 @@ static const struct i915_debugfs_files {
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
+#endif
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
@@ -5318,7 +5445,8 @@ static const struct i915_debugfs_files {
{"i915_fbc_false_color", &i915_fbc_fc_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
- {"i915_dp_test_active", &i915_displayport_test_active_fops}
+ {"i915_dp_test_active", &i915_displayport_test_active_fops},
+ {"i915_guc_log_control", &i915_guc_log_control_fops}
};
void intel_display_crc_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bfb2efd8d4d4..445fec9c2841 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -114,7 +114,7 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
fmt, ##__VA_ARGS__)
-static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
{
enum intel_pch ret = PCH_NOP;
@@ -125,16 +125,16 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
ret = PCH_IBX;
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
- } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
@@ -150,7 +150,7 @@ static void intel_detect_pch(struct drm_device *dev)
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
*/
- if (INTEL_INFO(dev)->num_pipes == 0) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
return;
}
@@ -174,40 +174,46 @@ static void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN5(dev));
+ WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
- WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) ||
+ IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) &&
+ !IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev) &&
- !IS_KABYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev) &&
- !IS_KABYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
- WARN_ON(!IS_KABYLAKE(dev));
+ WARN_ON(!IS_KABYLAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -215,7 +221,8 @@ static void intel_detect_pch(struct drm_device *dev)
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_type = intel_virt_detect_pch(dev);
+ dev_priv->pch_type =
+ intel_virt_detect_pch(dev_priv);
} else
continue;
@@ -255,16 +262,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
- value = intel_engine_initialized(&dev_priv->engine[VCS]);
+ value = !!dev_priv->engine[VCS];
break;
case I915_PARAM_HAS_BLT:
- value = intel_engine_initialized(&dev_priv->engine[BCS]);
+ value = !!dev_priv->engine[BCS];
break;
case I915_PARAM_HAS_VEBOX:
- value = intel_engine_initialized(&dev_priv->engine[VECS]);
+ value = !!dev_priv->engine[VECS];
break;
case I915_PARAM_HAS_BSD2:
- value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+ value = !!dev_priv->engine[VCS2];
break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_GEN(dev_priv) >= 4;
@@ -316,6 +323,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
*/
value = i915_gem_mmap_gtt_version();
break;
+ case I915_PARAM_HAS_SCHEDULER:
+ value = dev_priv->engine[RCS] &&
+ dev_priv->engine[RCS]->schedule;
+ break;
case I915_PARAM_MMAP_VERSION:
/* Remember to bump this if the version changes! */
case I915_PARAM_HAS_GEM:
@@ -367,12 +378,12 @@ static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -399,7 +410,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
return ret;
}
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -413,16 +424,16 @@ static void
intel_setup_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return;
dev_priv->mchbar_need_disable = false;
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
@@ -440,7 +451,7 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = true;
/* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
@@ -453,10 +464,10 @@ static void
intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
u32 deven_val;
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
@@ -484,7 +495,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
struct drm_device *dev = cookie;
- intel_modeset_vga_set_state(dev, state);
+ intel_modeset_vga_set_state(to_i915(dev), state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -530,40 +541,17 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static void i915_gem_fini(struct drm_device *dev)
+static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode. And the only
- * known way to disable logical contexts is through a GPU reset.
- *
- * So in order to leave the system in a known default configuration,
- * always reset the GPU upon unload. Afterwards we then clean up the
- * GEM state tracking, flushing off the requests and leaving the
- * system in a known idle state.
- *
- * Note that is of the upmost importance that the GPU is idle and
- * all stray writes are flushed *before* we dismantle the backing
- * storage for the pinned objects.
- *
- * However, since we are uncertain that reseting the GPU on older
- * machines is a good idea, we don't - just in case it leaves the
- * machine in an unusable condition.
- */
- if (HAS_HW_CONTEXTS(dev)) {
- int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
- WARN_ON(reset && reset != -ENODEV);
- }
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_cleanup_engines(&dev_priv->drm);
+ i915_gem_context_fini(&dev_priv->drm);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_engines(dev);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
+ rcu_barrier();
+ flush_work(&dev_priv->mm.free_work);
- WARN_ON(!list_empty(&to_i915(dev)->context_list));
+ WARN_ON(!list_empty(&dev_priv->context_list));
}
static int i915_load_modeset_init(struct drm_device *dev)
@@ -611,7 +599,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
- intel_modeset_init(dev);
+ ret = intel_modeset_init(dev);
+ if (ret)
+ goto cleanup_irq;
intel_guc_init(dev);
@@ -621,7 +611,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
ret = intel_fbdev_init(dev);
@@ -636,7 +626,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
- i915_gem_fini(dev);
+ if (i915_gem_suspend(dev))
+ DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+ i915_gem_fini(dev_priv);
cleanup_irq:
intel_guc_fini(dev);
drm_irq_uninstall(dev);
@@ -771,6 +763,19 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
destroy_workqueue(dev_priv->wq);
}
+/*
+ * We don't keep the workarounds for pre-production hardware, so we expect our
+ * driver to fail on these machines in one way or another. A little warning on
+ * dmesg may help both the user and the bug triagers.
+ */
+static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
+{
+ if (IS_HSW_EARLY_SDV(dev_priv) ||
+ IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
+ DRM_ERROR("This is a pre-production stepping. "
+ "It may not be fully functional.\n");
+}
+
/**
* i915_driver_init_early - setup state not requiring device access
* @dev_priv: device private
@@ -829,25 +834,24 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv);
intel_irq_init(dev_priv);
+ intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- i915_gem_load_init(&dev_priv->drm);
+ ret = i915_gem_load_init(&dev_priv->drm);
+ if (ret < 0)
+ goto err_gvt;
intel_display_crc_init(dev_priv);
intel_device_info_dump(dev_priv);
- /* Not all pre-production machines fall into this category, only the
- * very first ones. Almost everything should work, except for maybe
- * suspend/resume. And we don't implement workarounds that affect only
- * pre-production machines. */
- if (IS_HSW_EARLY_SDV(dev_priv))
- DRM_INFO("This is an early pre-production Haswell machine. "
- "It may not be fully functional.\n");
+ intel_detect_preproduction_hw(dev_priv);
return 0;
+err_gvt:
+ intel_gvt_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -870,7 +874,7 @@ static int i915_mmio_setup(struct drm_device *dev)
int mmio_bar;
int mmio_size;
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -879,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev)
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
mmio_size = 512 * 1024;
else
mmio_size = 2 * 1024 * 1024;
@@ -982,7 +986,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
int ret;
if (i915_inject_load_failure())
@@ -1023,7 +1026,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
@@ -1040,7 +1043,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+ if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1070,7 +1073,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
- if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+ if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
if (pci_enable_msi(pdev) < 0)
DRM_DEBUG_DRIVER("can't enable MSI");
}
@@ -1121,6 +1124,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
+ i915_guc_register(dev_priv);
i915_setup_sysfs(dev_priv);
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
@@ -1159,6 +1163,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_opregion_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
+ i915_guc_unregister(dev_priv);
i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
@@ -1167,8 +1172,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
/**
* i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
+ * @pdev: PCI device
+ * @ent: matching PCI ID entry
*
* The driver load routine has to do several things:
* - drive output discovery via intel_modeset_init()
@@ -1242,6 +1247,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver.name, driver.major, driver.minor, driver.patchlevel,
driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ DRM_INFO("DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
intel_runtime_pm_put(dev_priv);
@@ -1309,7 +1318,7 @@ void i915_driver_unload(struct drm_device *dev)
drain_workqueue(dev_priv->wq);
intel_guc_fini(dev);
- i915_gem_fini(dev);
+ i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
@@ -1431,9 +1440,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev);
+ intel_suspend_hw(dev_priv);
- i915_gem_suspend_gtt_mappings(dev);
+ i915_gem_suspend_gtt_mappings(dev_priv);
i915_save_state(dev);
@@ -1447,8 +1456,6 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
- intel_display_set_init_power(dev_priv, false);
-
intel_csr_ucode_suspend(dev_priv);
out:
@@ -1466,6 +1473,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
+ intel_display_set_init_power(dev_priv, false);
+
fw_csr = !IS_BROXTON(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
@@ -1507,7 +1516,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Fujitsu FSC S7110
* Acer Aspire 1830T
*/
- if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
+ if (!(hibernation && INTEL_GEN(dev_priv) < 6))
pci_set_power_state(pdev, PCI_D3hot);
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
@@ -1595,6 +1604,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_resume(dev);
+ drm_kms_helper_poll_enable(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
@@ -1602,8 +1613,6 @@ static int i915_drm_resume(struct drm_device *dev)
* notifications.
* */
intel_hpd_init(dev_priv);
- /* Config may have changed between suspend and resume */
- drm_helper_hpd_irq_event(dev);
intel_opregion_register(dev_priv);
@@ -1616,7 +1625,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_autoenable_gt_powersave(dev_priv);
- drm_kms_helper_poll_enable(dev);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1721,6 +1729,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
+static void disable_engines_irq(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Ensure irq handler finishes, and not run again. */
+ disable_irq(dev_priv->drm.irq);
+ for_each_engine(engine, dev_priv, id)
+ tasklet_kill(&engine->irq_tasklet);
+}
+
+static void enable_engines_irq(struct drm_i915_private *dev_priv)
+{
+ enable_irq(dev_priv->drm.irq);
+}
+
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
@@ -1754,7 +1778,11 @@ void i915_reset(struct drm_i915_private *dev_priv)
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
+ disable_engines_irq(dev_priv);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ enable_engines_irq(dev_priv);
+
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -2240,7 +2268,6 @@ err1:
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
- struct drm_device *dev = &dev_priv->drm;
int err;
int ret;
@@ -2264,10 +2291,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
vlv_check_no_gt_access(dev_priv);
- if (rpm_resume) {
- intel_init_clock_gating(dev);
- i915_gem_restore_fences(dev);
- }
+ if (rpm_resume)
+ intel_init_clock_gating(dev_priv);
return ret;
}
@@ -2282,37 +2307,18 @@ static int intel_runtime_suspend(struct device *kdev)
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Suspending device\n");
- /*
- * We could deadlock here in case another thread holding struct_mutex
- * calls RPM suspend concurrently, since the RPM suspend will wait
- * first for this RPM suspend to finish. In this case the concurrent
- * RPM resume will be followed by its RPM suspend counterpart. Still
- * for consistency return -EAGAIN, which will reschedule this suspend.
- */
- if (!mutex_trylock(&dev->struct_mutex)) {
- DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
- /*
- * Bump the expiration timestamp, otherwise the suspend won't
- * be rescheduled.
- */
- pm_runtime_mark_last_busy(kdev);
-
- return -EAGAIN;
- }
-
disable_rpm_wakeref_asserts(dev_priv);
/*
* We are safe here against re-faults, since the fault handler takes
* an RPM reference.
*/
- i915_gem_release_all_mmaps(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_runtime_suspend(dev_priv);
intel_guc_suspend(dev);
@@ -2386,7 +2392,7 @@ static int intel_runtime_resume(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n");
@@ -2404,7 +2410,7 @@ static int intel_runtime_resume(struct device *kdev)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
@@ -2420,7 +2426,7 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2495,9 +2501,7 @@ static const struct file_operations i915_driver_fops = {
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
@@ -2577,7 +2581,7 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object,
- .gem_free_object = i915_gem_free_object,
+ .gem_free_object_unlocked = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8b9ee4e390c0..56002a52936d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -41,6 +41,7 @@
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
+#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <drm/drmP.h>
@@ -59,9 +60,14 @@
#include "intel_ringbuffer.h"
#include "i915_gem.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
+
+#include "i915_vma.h"
#include "intel_gvt.h"
@@ -70,7 +76,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160919"
+#define DRIVER_DATE "20161121"
+#define DRIVER_TIMESTAMP 1479717903
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -122,6 +129,11 @@ static inline const char *onoff(bool v)
return v ? "on" : "off";
}
+static inline const char *enableddisabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
@@ -182,9 +194,10 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
enum port {
+ PORT_NONE = -1,
PORT_A = 0,
PORT_B,
PORT_C,
@@ -310,7 +323,7 @@ struct i915_hotplug {
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
for_each_if ((__mask) & (1 << (__p)))
-#define for_each_plane(__dev_priv, __pipe, __p) \
+#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
(__p)++)
@@ -455,23 +468,6 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
-struct drm_i915_fence_reg {
- struct list_head link;
- struct drm_i915_private *i915;
- struct i915_vma *vma;
- int pin_count;
- int id;
- /**
- * Whether the tiling parameters for the currently
- * associated fence register have changed. Note that
- * for the purposes of tracking tiling changes we also
- * treat the unfenced register, the register slot that
- * the object occupies whilst it executes a fenced
- * command (such as BLT on gen2/3), as a "fence".
- */
- bool dirty;
-};
-
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
@@ -483,6 +479,7 @@ struct sdvo_device_mapping {
struct intel_connector;
struct intel_encoder;
+struct intel_atomic_state;
struct intel_crtc_state;
struct intel_initial_plane_config;
struct intel_crtc;
@@ -490,16 +487,20 @@ struct intel_limit;
struct dpll;
struct drm_i915_display_funcs {
- int (*get_display_clock_speed)(struct drm_device *dev);
- int (*get_fifo_size)(struct drm_device *dev, int plane);
+ int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
+ int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *newstate);
- void (*initial_watermarks)(struct intel_crtc_state *cstate);
- void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+ void (*initial_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*atomic_update_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*optimize_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct drm_atomic_state *state);
- void (*update_wm)(struct drm_crtc *crtc);
+ void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
@@ -521,7 +522,7 @@ struct drm_i915_display_funcs {
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
- void (*init_clock_gating)(struct drm_device *dev);
+ void (*init_clock_gating)(struct drm_i915_private *dev_priv);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
@@ -558,6 +559,18 @@ enum forcewake_domains {
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
+enum decoupled_power_domain {
+ GEN9_DECOUPLED_PD_BLITTER = 0,
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+enum decoupled_ops {
+ GEN9_DECOUPLED_OP_WRITE = 0,
+ GEN9_DECOUPLED_OP_READ
+};
+
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
i915_reg_t reg, unsigned int op);
@@ -581,13 +594,25 @@ struct intel_uncore_funcs {
uint32_t val, bool trace);
};
+struct intel_forcewake_range {
+ u32 start;
+ u32 end;
+
+ enum forcewake_domains domains;
+};
+
struct intel_uncore {
spinlock_t lock; /** lock is also taken in irq contexts. */
+ const struct intel_forcewake_range *fw_domains_table;
+ unsigned int fw_domains_table_entries;
+
struct intel_uncore_funcs funcs;
unsigned fifo_count;
+
enum forcewake_domains fw_domains;
+ enum forcewake_domains fw_domains_active;
struct intel_uncore_forcewake_domain {
struct drm_i915_private *i915;
@@ -633,54 +658,55 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
-#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
- func(is_mobile) sep \
- func(is_i85x) sep \
- func(is_i915g) sep \
- func(is_i945gm) sep \
- func(is_g33) sep \
- func(hws_needs_physical) sep \
- func(is_g4x) sep \
- func(is_pineview) sep \
- func(is_broadwater) sep \
- func(is_crestline) sep \
- func(is_ivybridge) sep \
- func(is_valleyview) sep \
- func(is_cherryview) sep \
- func(is_haswell) sep \
- func(is_broadwell) sep \
- func(is_skylake) sep \
- func(is_broxton) sep \
- func(is_kabylake) sep \
- func(is_preliminary) sep \
- func(has_fbc) sep \
- func(has_psr) sep \
- func(has_runtime_pm) sep \
- func(has_csr) sep \
- func(has_resource_streamer) sep \
- func(has_rc6) sep \
- func(has_rc6p) sep \
- func(has_dp_mst) sep \
- func(has_gmbus_irq) sep \
- func(has_hw_contexts) sep \
- func(has_logical_ring_contexts) sep \
- func(has_l3_dpf) sep \
- func(has_gmch_display) sep \
- func(has_guc) sep \
- func(has_pipe_cxsr) sep \
- func(has_hotplug) sep \
- func(cursor_needs_physical) sep \
- func(has_overlay) sep \
- func(overlay_needs_physical) sep \
- func(supports_tv) sep \
- func(has_llc) sep \
- func(has_snoop) sep \
- func(has_ddi) sep \
- func(has_fpga_dbg) sep \
- func(has_pooled_eu)
-
-#define DEFINE_FLAG(name) u8 name:1
-#define SEP_SEMICOLON ;
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+ /* Keep is_* in chronological order */ \
+ func(is_mobile); \
+ func(is_i85x); \
+ func(is_i915g); \
+ func(is_i945gm); \
+ func(is_g33); \
+ func(is_g4x); \
+ func(is_pineview); \
+ func(is_broadwater); \
+ func(is_crestline); \
+ func(is_ivybridge); \
+ func(is_valleyview); \
+ func(is_cherryview); \
+ func(is_haswell); \
+ func(is_broadwell); \
+ func(is_skylake); \
+ func(is_broxton); \
+ func(is_kabylake); \
+ func(is_alpha_support); \
+ /* Keep has_* in alphabetical order */ \
+ func(has_64bit_reloc); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_fbc); \
+ func(has_fpga_dbg); \
+ func(has_gmbus_irq); \
+ func(has_gmch_display); \
+ func(has_guc); \
+ func(has_hotplug); \
+ func(has_hw_contexts); \
+ func(has_l3_dpf); \
+ func(has_llc); \
+ func(has_logical_ring_contexts); \
+ func(has_overlay); \
+ func(has_pipe_cxsr); \
+ func(has_pooled_eu); \
+ func(has_psr); \
+ func(has_rc6); \
+ func(has_rc6p); \
+ func(has_resource_streamer); \
+ func(has_runtime_pm); \
+ func(has_snoop); \
+ func(cursor_needs_physical); \
+ func(hws_needs_physical); \
+ func(overlay_needs_physical); \
+ func(supports_tv); \
+ func(has_decoupled_mmio)
struct sseu_dev_info {
u8 slice_mask;
@@ -709,7 +735,9 @@ struct intel_device_info {
u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
- DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -726,14 +754,15 @@ struct intel_device_info {
} color;
};
-#undef DEFINE_FLAG
-#undef SEP_SEMICOLON
-
struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
struct timeval time;
+ struct timeval boottime;
+ struct timeval uptime;
+
+ struct drm_i915_private *i915;
char error_msg[128];
bool simulated;
@@ -759,11 +788,12 @@ struct drm_i915_error_state {
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
- u32 extra_instdone[I915_NUM_INSTDONE_REG];
+
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore;
+ struct drm_i915_error_object *guc_log;
struct drm_i915_error_engine {
int engine_id;
@@ -775,12 +805,14 @@ struct drm_i915_error_state {
struct i915_address_space *vm;
int num_requests;
+ /* position of active request inside the ring */
+ u32 rq_head, rq_post, rq_tail;
+
/* our own tracking of ring head and tail */
u32 cpu_ring_head;
u32 cpu_ring_tail;
u32 last_seqno;
- u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */
u32 start;
@@ -791,7 +823,6 @@ struct drm_i915_error_state {
u32 hws;
u32 ipeir;
u32 ipehr;
- u32 instdone;
u32 bbstate;
u32 instpm;
u32 instps;
@@ -802,11 +833,13 @@ struct drm_i915_error_state {
u64 faddr;
u32 rc_psmi; /* sleep state */
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+ struct intel_instdone instdone;
struct drm_i915_error_object {
- int page_count;
u64 gtt_offset;
u64 gtt_size;
+ int page_count;
+ int unused;
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
@@ -815,10 +848,11 @@ struct drm_i915_error_state {
struct drm_i915_error_request {
long jiffies;
pid_t pid;
+ u32 context;
u32 seqno;
u32 head;
u32 tail;
- } *requests;
+ } *requests, execlist[2];
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
@@ -914,6 +948,7 @@ struct i915_gem_context {
struct drm_i915_file_private *file_priv;
struct i915_hw_ppgtt *ppgtt;
struct pid *pid;
+ const char *name;
struct i915_ctx_hang_stats hang_stats;
@@ -924,6 +959,7 @@ struct i915_gem_context {
/* Unique identifier for this context, used by the hw for tracking */
unsigned int hw_id;
u32 user_handle;
+ int priority; /* greater priorities are serviced first */
u32 ggtt_alignment;
@@ -972,6 +1008,9 @@ struct intel_fbc {
bool enabled;
bool active;
+ bool underrun_detected;
+ struct work_struct underrun_work;
+
struct intel_fbc_state_cache {
struct {
unsigned int mode_flags;
@@ -1297,6 +1336,12 @@ struct i915_power_well {
/* cached hw enabled state */
bool hw_enabled;
unsigned long domains;
+ /* unique identifier for this power well */
+ unsigned long id;
+ /*
+ * Arbitraty data associated with this power well. Platform and power
+ * well specific.
+ */
unsigned long data;
const struct i915_power_well_ops *ops;
};
@@ -1334,11 +1379,22 @@ struct i915_gem_mm {
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
- * are idle and not used by the GPU) but still have
- * (presumably uncached) pages still attached.
+ * are idle and not used by the GPU). These objects may or may
+ * not actually have any pages attached.
*/
struct list_head unbound_list;
+ /** List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /**
+ * List of objects which are pending destruction.
+ */
+ struct llist_head free_list;
+ struct work_struct free_work;
+
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@@ -1368,7 +1424,7 @@ struct i915_gem_mm {
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
- size_t object_memory;
+ u64 object_memory;
u32 object_count;
};
@@ -1387,6 +1443,9 @@ struct i915_error_state_file_priv {
struct drm_i915_error_state *error;
};
+#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
+#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -1620,7 +1679,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- struct skl_ddb_entry pipe[I915_MAX_PIPES];
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
};
@@ -1628,15 +1686,12 @@ struct skl_ddb_allocation {
struct skl_wm_values {
unsigned dirty_pipes;
struct skl_ddb_allocation ddb;
- uint32_t wm_linetime[I915_MAX_PIPES];
- uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
- uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
};
struct skl_wm_level {
- bool plane_en[I915_MAX_PLANES];
- uint16_t plane_res_b[I915_MAX_PLANES];
- uint8_t plane_res_l[I915_MAX_PLANES];
+ bool plane_en;
+ uint16_t plane_res_b;
+ uint8_t plane_res_l;
};
/*
@@ -1664,7 +1719,6 @@ struct skl_wm_level {
*/
struct i915_runtime_pm {
atomic_t wakeref_count;
- atomic_t atomic_seq;
bool suspended;
bool irqs_enabled;
};
@@ -1748,6 +1802,7 @@ struct drm_i915_private {
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *requests;
+ struct kmem_cache *dependencies;
const struct intel_device_info info;
@@ -1759,7 +1814,7 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu;
- struct intel_gvt gvt;
+ struct intel_gvt *gvt;
struct intel_guc guc;
@@ -1787,9 +1842,8 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
- struct intel_engine_cs engine[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct i915_vma *semaphore;
- u32 next_seqno;
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
@@ -1814,8 +1868,10 @@ struct drm_i915_private {
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
- u32 pm_irq_mask;
+ u32 pm_imr;
+ u32 pm_ier;
u32 pm_rps_events;
+ u32 pm_guc_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1892,8 +1948,8 @@ struct drm_i915_private {
/* Kernel Modesetting */
- struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
- struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
+ struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
+ struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
@@ -2009,13 +2065,6 @@ struct drm_i915_private {
*/
uint16_t skl_latency[8];
- /*
- * The skl_wm_values structure is a bit too big for stack
- * allocation, so we keep the staging struct where we store
- * intermediate results here instead.
- */
- struct skl_wm_values skl_results;
-
/* current hardware state */
union {
struct ilk_wm_values hw;
@@ -2047,6 +2096,10 @@ struct drm_i915_private {
void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
+ struct list_head timelines;
+ struct i915_gem_timeline global_timeline;
+ u32 active_requests;
+
/**
* Is the GPU currently considered idle, or busy executing
* userspace requests? Whilst idle, we allow runtime power
@@ -2054,7 +2107,6 @@ struct drm_i915_private {
* In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
- unsigned int active_engines;
bool awake;
/**
@@ -2074,12 +2126,15 @@ struct drm_i915_private {
* off the idle_work.
*/
struct delayed_work idle_work;
+
+ ktime_t last_init_time;
} gt;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
- struct intel_encoder *dig_port_map[I915_MAX_PORTS];
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *av_enc_map[I915_MAX_PIPES];
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
@@ -2103,19 +2158,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
}
/* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, dev_priv__) \
- for ((engine__) = &(dev_priv__)->engine[0]; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (intel_engine_initialized(engine__))
-
-/* Iterator with engine_id */
-#define for_each_engine_id(engine__, dev_priv__, id__) \
- for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (((id__) = (engine__)->id, \
- intel_engine_initialized(engine__)))
+#define for_each_engine(engine__, dev_priv__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_NUM_ENGINES; \
+ (id__)++) \
+ for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
@@ -2126,7 +2173,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
- tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
+ tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -2137,30 +2184,6 @@ enum hdmi_force_audio {
#define I915_GTT_OFFSET_NONE ((u32)-1)
-struct drm_i915_gem_object_ops {
- unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
-
- /* Interface between the GEM object and its backing storage.
- * get_pages() is called once prior to the use of the associated set
- * of pages before to binding them into the GTT, and put_pages() is
- * called after we no longer need them. As we expect there to be
- * associated cost with migrating pages between the backing storage
- * and making them available for the GPU (e.g. clflush), we may hold
- * onto the pages after they are no longer referenced by the GPU
- * in case they may be used again shortly (for example migrating the
- * pages to a different memory domain within the GTT). put_pages()
- * will therefore most likely be called when the object itself is
- * being released or under memory pressure (where we attempt to
- * reap pages for the shrinker).
- */
- int (*get_pages)(struct drm_i915_gem_object *);
- void (*put_pages)(struct drm_i915_gem_object *);
-
- int (*dmabuf_export)(struct drm_i915_gem_object *);
- void (*release)(struct drm_i915_gem_object *);
-};
-
/*
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
* considered to be the frontbuffer for the given plane interface-wise. This
@@ -2182,232 +2205,6 @@ struct drm_i915_gem_object_ops {
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
-struct drm_i915_gem_object {
- struct drm_gem_object base;
-
- const struct drm_i915_gem_object_ops *ops;
-
- /** List of VMAs backed by this object */
- struct list_head vma_list;
-
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
- struct list_head global_list;
-
- /** Used in execbuf to temporarily hold a ref */
- struct list_head obj_exec_link;
-
- struct list_head batch_pool_link;
-
- unsigned long flags;
- /**
- * This is set if the object is on the active lists (has pending
- * rendering and so a non-zero seqno), and is not set if it i s on
- * inactive (ready to be unbound) list.
- */
-#define I915_BO_ACTIVE_SHIFT 0
-#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
-#define __I915_BO_ACTIVE(bo) \
- ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
-
- /**
- * This is set if the object has been written to since last bound
- * to the GTT
- */
- unsigned int dirty:1;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- unsigned int madv:2;
-
- /**
- * Whether the current gtt mapping needs to be mappable (and isn't just
- * mappable by accident). Track pin and fault separate for a more
- * accurate mappable working set.
- */
- unsigned int fault_mappable:1;
-
- /*
- * Is the object to be mapped as read-only to the GPU
- * Only honoured if hardware has relevant pte bit
- */
- unsigned long gt_ro:1;
- unsigned int cache_level:3;
- unsigned int cache_dirty:1;
-
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
-
- /** Current tiling stride for the object, if it's tiled. */
- unsigned int tiling_and_stride;
-#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
-#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
-#define STRIDE_MASK (~TILING_MASK)
-
- /** Count of VMA actually bound by this object */
- unsigned int bind_count;
- unsigned int pin_display;
-
- struct sg_table *pages;
- int pages_pin_count;
- struct get_page {
- struct scatterlist *sg;
- int last;
- } get_page;
- void *mapping;
-
- /** Breadcrumb of last rendering to the buffer.
- * There can only be one writer, but we allow for multiple readers.
- * If there is a writer that necessarily implies that all other
- * read requests are complete - but we may only be lazily clearing
- * the read requests. A read request is naturally the most recent
- * request on a ring, so we may have two different write and read
- * requests on one ring where the write request is older than the
- * read request. This allows for the CPU to read from an active
- * buffer by only waiting for the write to complete.
- */
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_write;
-
- /** References from framebuffers, locks out tiling changes. */
- unsigned long framebuffer_references;
-
- /** Record of address bit 17 of each page at last unbind. */
- unsigned long *bit_17;
-
- struct i915_gem_userptr {
- uintptr_t ptr;
- unsigned read_only :1;
- unsigned workers :4;
-#define I915_GEM_USERPTR_MAX_WORKERS 15
-
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
-
- /** for phys allocated objects */
- struct drm_dma_handle *phys_handle;
-};
-
-static inline struct drm_i915_gem_object *
-to_intel_bo(struct drm_gem_object *gem)
-{
- /* Assert that to_intel_bo(NULL) == NULL */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
-
- return container_of(gem, struct drm_i915_gem_object, base);
-}
-
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup(struct drm_file *file, u32 handle)
-{
- return to_intel_bo(drm_gem_object_lookup(file, handle));
-}
-
-__deprecated
-extern struct drm_gem_object *
-drm_gem_object_lookup(struct drm_file *file, u32 handle);
-
-__attribute__((nonnull))
-static inline struct drm_i915_gem_object *
-i915_gem_object_get(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_reference(&obj->base);
- return obj;
-}
-
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_unreference(&obj->base);
-}
-
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_unreference_unlocked(&obj->base);
-}
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
-}
-
-static inline unsigned long
-i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
-{
- return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
-}
-
-static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_active(obj);
-}
-
-static inline void
-i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
-{
- obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline void
-i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
-{
- obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline bool
-i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
- int engine)
-{
- return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline unsigned int
-i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & TILING_MASK;
-}
-
-static inline bool
-i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
-}
-
-static inline unsigned int
-i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & STRIDE_MASK;
-}
-
-static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
-{
- i915_gem_object_get(vma->obj);
- return vma;
-}
-
-static inline void i915_vma_put(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- i915_gem_object_put(vma->obj);
-}
-
/*
* Optimised SGL iterator for GEM objects
*/
@@ -2434,6 +2231,14 @@ static __always_inline struct sgt_iter {
return s;
}
+static inline struct scatterlist *____sg_next(struct scatterlist *sg)
+{
+ ++sg;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+ return sg;
+}
+
/**
* __sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
@@ -2448,9 +2253,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
- return sg_is_last(sg) ? NULL :
- likely(!sg_is_chain(++sg)) ? sg :
- sg_chain_ptr(sg);
+ return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
/**
@@ -2574,23 +2377,19 @@ struct drm_i915_cmd_table {
int count;
};
-/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
-#define __I915__(p) ({ \
- struct drm_i915_private *__p; \
- if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
- __p = (struct drm_i915_private *)p; \
- else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
- __p = to_i915((struct drm_device *)p); \
- else \
- BUILD_BUG(); \
- __p; \
-})
-#define INTEL_INFO(p) (&__I915__(p)->info)
-#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
-#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+static inline const struct intel_device_info *
+intel_info(const struct drm_i915_private *dev_priv)
+{
+ return &dev_priv->info;
+}
+
+#define INTEL_INFO(dev_priv) intel_info((dev_priv))
+
+#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
+#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
#define REVID_FOREVER 0xff
-#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
+#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
#define GEN_FOREVER (0)
/*
@@ -2598,7 +2397,7 @@ struct drm_i915_cmd_table {
*
* Use GEN_FOREVER for unbound start and or end.
*/
-#define IS_GEN(p, s, e) ({ \
+#define IS_GEN(dev_priv, s, e) ({ \
unsigned int __s = (s), __e = (e); \
BUILD_BUG_ON(!__builtin_constant_p(s)); \
BUILD_BUG_ON(!__builtin_constant_p(e)); \
@@ -2608,7 +2407,7 @@ struct drm_i915_cmd_table {
__e = BITS_PER_LONG - 1; \
else \
__e = (e) - 1; \
- !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+ !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
})
/*
@@ -2619,75 +2418,75 @@ struct drm_i915_cmd_table {
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
-#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
-#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
-#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
-#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
- INTEL_DEVID(dev) == 0x0152 || \
- INTEL_DEVID(dev) == 0x015a)
-#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
-#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
-#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
-#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
-#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
-#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
- (INTEL_DEVID(dev) & 0xf) == 0xb || \
- (INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
+#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
+#define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x)
+#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
+#define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g)
+#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
+#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
+#define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm)
+#define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater)
+#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline)
+#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
+#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
+#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
+#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
+#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview)
+#define IS_G33(dev_priv) ((dev_priv)->info.is_g33)
+#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
+#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
+#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
+ INTEL_DEVID(dev_priv) == 0x0152 || \
+ INTEL_DEVID(dev_priv) == 0x015a)
+#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview)
+#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview)
+#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell)
+#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
+#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
+#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
+#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
+#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
+#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
+ ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
/* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
- (INTEL_DEVID(dev) & 0xf) == 0xe)
-#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
- INTEL_DEVID(dev) == 0x0A1E)
-#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
- INTEL_DEVID(dev) == 0x1913 || \
- INTEL_DEVID(dev) == 0x1916 || \
- INTEL_DEVID(dev) == 0x1921 || \
- INTEL_DEVID(dev) == 0x1926)
-#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
- INTEL_DEVID(dev) == 0x1915 || \
- INTEL_DEVID(dev) == 0x191E)
-#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \
- INTEL_DEVID(dev) == 0x5913 || \
- INTEL_DEVID(dev) == 0x5916 || \
- INTEL_DEVID(dev) == 0x5921 || \
- INTEL_DEVID(dev) == 0x5926)
-#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \
- INTEL_DEVID(dev) == 0x5915 || \
- INTEL_DEVID(dev) == 0x591E)
-#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
-
-#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
+ INTEL_DEVID(dev_priv) == 0x0A1E)
+#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
+ INTEL_DEVID(dev_priv) == 0x1913 || \
+ INTEL_DEVID(dev_priv) == 0x1916 || \
+ INTEL_DEVID(dev_priv) == 0x1921 || \
+ INTEL_DEVID(dev_priv) == 0x1926)
+#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
+ INTEL_DEVID(dev_priv) == 0x1915 || \
+ INTEL_DEVID(dev_priv) == 0x191E)
+#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
+ INTEL_DEVID(dev_priv) == 0x5913 || \
+ INTEL_DEVID(dev_priv) == 0x5916 || \
+ INTEL_DEVID(dev_priv) == 0x5921 || \
+ INTEL_DEVID(dev_priv) == 0x5926)
+#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
+ INTEL_DEVID(dev_priv) == 0x5915 || \
+ INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
+
+#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
#define SKL_REVID_A0 0x0
#define SKL_REVID_B0 0x1
@@ -2705,7 +2504,8 @@ struct drm_i915_cmd_table {
#define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9
-#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define IS_BXT_REVID(dev_priv, since, until) \
+ (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
#define KBL_REVID_A0 0x0
#define KBL_REVID_B0 0x1
@@ -2713,8 +2513,8 @@ struct drm_i915_cmd_table {
#define KBL_REVID_D0 0x3
#define KBL_REVID_E0 0x4
-#define IS_KBL_REVID(p, since, until) \
- (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+#define IS_KBL_REVID(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
/*
* The genX designation typically refers to the render engine, so render
@@ -2722,14 +2522,14 @@ struct drm_i915_cmd_table {
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.).
*/
-#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
-#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
-#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
-#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
-#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
-#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
-#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
-#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
+#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
+#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
+#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
+#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
+#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
+#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
+#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
@@ -2740,31 +2540,34 @@ struct drm_i915_cmd_table {
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
- (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
+ (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
-#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
-#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
-#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
-#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- HAS_EDRAM(dev))
-#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical)
+#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
+#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
+#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
+#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
+ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
+
+#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
-#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts)
-#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts)
-#define USES_PPGTT(dev) (i915.enable_ppgtt)
-#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
+#define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_contexts)
+#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
+#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3)
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
+ ((dev_priv)->info.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -2778,46 +2581,49 @@ struct drm_i915_cmd_table {
* legacy irq no. is shared with another device. The kernel then disables that
* interrupt source and so prevents the other device from working properly.
*/
-#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
+#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5)
+#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+ !(IS_I915G(dev_priv) || \
+ IS_I915GM(dev_priv)))
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
+#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
-#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst)
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
-#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
-#define HAS_RUNTIME_PM(dev) (INTEL_INFO(dev)->has_runtime_pm)
-#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
+#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
-#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
+
+#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
+#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
* properties, so we have separate macros to test them.
*/
-#define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc)
-#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
-#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
+#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
+#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
+#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
-#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2832,26 +2638,33 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
-#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
-#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
-#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
+#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
/* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
-#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
+#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
+ 2 : HAS_L3_DPF(dev_priv))
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
+
#include "i915_trace.h"
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -2882,12 +2695,20 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#else
+#define i915_compat_ioctl NULL
#endif
+extern const struct dev_pm_ops i915_pm_ops;
+
+extern int i915_driver_load(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern void i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
+extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -2969,7 +2790,7 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
- return dev_priv->gvt.initialized;
+ return dev_priv->gvt;
}
static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
@@ -3071,7 +2892,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_gem_load_init(struct drm_device *dev);
+int i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
@@ -3082,7 +2903,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- size_t size);
+ u64 size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
@@ -3095,77 +2916,86 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags);
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
-int __must_check i915_vma_unbind(struct i915_vma *vma);
-void i915_vma_close(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
-
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
-int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __sg_page_count(struct scatterlist *sg)
+static inline int __sg_page_count(const struct scatterlist *sg)
{
return sg->length >> PAGE_SHIFT;
}
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n, unsigned int *offset);
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
-static inline dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
+ might_lock(&obj->mm.lock);
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
+ if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
+ return 0;
- return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
+ return __i915_gem_object_get_pages(obj);
}
-static inline struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
- return NULL;
-
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
+ GEM_BUG_ON(!obj->mm.pages);
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
+ atomic_inc(&obj->mm.pages_pin_count);
+}
- return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->mm.pages_pin_count);
}
-static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pages == NULL);
- obj->pages_pin_count++;
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(!obj->mm.pages);
+
+ atomic_dec(&obj->mm.pages_pin_count);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
-static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+static inline void
+i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pages_pin_count == 0);
- obj->pages_pin_count--;
+ __i915_gem_object_unpin_pages(obj);
}
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+ I915_MM_NORMAL = 0,
+ I915_MM_SHRINKER
+};
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
+
enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
@@ -3181,8 +3011,8 @@ enum i915_map_type {
* the kernel address space. Based on the @type of mapping, the PTE will be
* set to either WriteBack or WriteCombine (via pgprot_t).
*
- * The caller must hold the struct_mutex, and is responsible for calling
- * i915_gem_object_unpin_map() when the mapping is no longer required.
+ * The caller is responsible for calling i915_gem_object_unpin_map() when the
+ * mapping is no longer required.
*
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
@@ -3198,12 +3028,9 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
- *
- * The caller must hold the struct_mutex.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
}
@@ -3236,7 +3063,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
-int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
@@ -3265,19 +3092,25 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
int __must_check i915_gem_suspend(struct drm_device *dev);
void i915_gem_resume(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int __must_check
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly);
+int i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps);
+int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int priority);
+#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
@@ -3337,57 +3170,17 @@ i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
}
-/* i915_gem_fence.c */
+/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
-/**
- * i915_vma_pin_fence - pin fencing state
- * @vma: vma to pin fencing for
- *
- * This pins the fencing state (whether tiled or untiled) to make sure the
- * vma (and its object) is ready to be used as a scanout target. Fencing
- * status must be synchronize first by calling i915_vma_get_fence():
- *
- * The resulting fence pin reference must be released again with
- * i915_vma_unpin_fence().
- *
- * Returns:
- *
- * True if the vma has a fence, false otherwise.
- */
-static inline bool
-i915_vma_pin_fence(struct i915_vma *vma)
-{
- if (vma->fence) {
- vma->fence->pin_count++;
- return true;
- } else
- return false;
-}
-
-/**
- * i915_vma_unpin_fence - unpin fencing state
- * @vma: vma to unpin fencing for
- *
- * This releases the fence pin reference acquired through
- * i915_vma_pin_fence. It will handle both objects with and without an
- * attached fence correctly, callers do not need to distinguish this.
- */
-static inline void
-i915_vma_unpin_fence(struct i915_vma *vma)
-{
- if (vma->fence) {
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
- }
-}
-
-void i915_gem_restore_fences(struct drm_device *dev);
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -3397,6 +3190,9 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+ unsigned int flags);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
@@ -3430,6 +3226,16 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_free);
}
+static inline struct intel_timeline *
+i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct i915_address_space *vm;
+
+ vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ return &vm->timeline.engine[engine->id];
+}
+
static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
@@ -3473,7 +3279,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3483,6 +3289,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size);
+/* i915_gem_internal.c */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
+ unsigned int size);
+
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target,
@@ -3521,6 +3332,8 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
#endif
/* i915_gpu_error.c */
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
@@ -3541,7 +3354,20 @@ void i915_error_state_get(struct drm_device *dev,
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
+#else
+
+static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
+ const char *error_msg)
+{
+}
+
+static inline void i915_destroy_error_state(struct drm_device *dev)
+{
+}
+
+#endif
+
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
@@ -3591,6 +3417,9 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
+bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+ enum port port);
+
/* intel_opregion.c */
#ifdef CONFIG_ACPI
@@ -3647,15 +3476,16 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv);
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_init(struct drm_device *dev);
+extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
extern void intel_connector_unregister(struct drm_connector *);
-extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
+ bool state);
extern void intel_display_resume(struct drm_device *dev);
-extern void i915_redisable_vga(struct drm_device *dev);
-extern void i915_redisable_vga_power_on(struct drm_device *dev);
+extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
+extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
@@ -3674,7 +3504,7 @@ extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
@@ -3702,6 +3532,23 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
/* intel_dpio_phy.c */
+void bxt_port_to_phy_channel(enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_lat_optim_mask);
+uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale);
@@ -3791,11 +3638,30 @@ __raw_write(64, q)
#undef __raw_write
/* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections inside IRQ handlers where forcewake is explicitly
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
* controlled.
+ *
* Think twice, and think again, before using these.
- * Note: Should only be used between intel_uncore_forcewake_irqlock() and
- * intel_uncore_forcewake_irqunlock().
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&dev_priv->uncore.lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&dev_priv->uncore.lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
*/
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
@@ -3807,11 +3673,11 @@ __raw_write(64, q)
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
-static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
+static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return VLV_VGACNTRL;
- else if (INTEL_INFO(dev)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
return CPU_VGACNTRL;
else
return VGACNTRL;
@@ -3872,7 +3738,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
/* Ensure our read of the seqno is coherent so that we
@@ -3923,7 +3789,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
wake_up_process(tsk);
rcu_read_unlock();
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 947e82c2b175..412f3513f269 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,12 +29,12 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include <linux/dma-fence-array.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@@ -42,13 +42,14 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
- return HAS_LLC(dev) || level != I915_CACHE_NONE;
+ return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
}
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -63,13 +64,13 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
}
static int
-insert_mappable_node(struct drm_i915_private *i915,
+insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
- size, 0, 0, 0,
- i915->ggtt.mappable_end,
+ return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
+ size, 0, -1,
+ 0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
}
@@ -82,7 +83,7 @@ remove_mappable_node(struct drm_mm_node *node)
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
- size_t size)
+ u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
@@ -91,7 +92,7 @@ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
}
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
- size_t size)
+ u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
@@ -104,6 +105,8 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
+ might_sleep();
+
if (!i915_reset_in_progress(error))
return 0;
@@ -114,7 +117,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
*/
ret = wait_event_interruptible_timeout(error->reset_queue,
!i915_reset_in_progress(error),
- 10*HZ);
+ I915_RESET_TIMEOUT);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
@@ -167,7 +170,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int
+static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
@@ -177,7 +180,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
@@ -185,7 +188,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
- return PTR_ERR(page);
+ return ERR_CAST(page);
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
@@ -200,11 +203,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
@@ -214,29 +217,33 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_address(sg) = obj->phys_handle->busaddr;
sg_dma_len(sg) = obj->base.size;
- obj->pages = st;
- return 0;
+ return st;
}
static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- int ret;
+ GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
- BUG_ON(obj->madv == __I915_MADV_PURGED);
+ if (obj->mm.madv == I915_MADV_DONTNEED)
+ obj->mm.dirty = false;
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ drm_clflush_sg(pages);
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+}
- if (obj->madv == I915_MADV_DONTNEED)
- obj->dirty = 0;
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __i915_gem_object_release_shmem(obj, pages);
- if (obj->dirty) {
+ if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
int i;
@@ -255,22 +262,23 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
kunmap_atomic(dst);
set_page_dirty(page);
- if (obj->madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
vaddr += PAGE_SIZE;
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
}
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
drm_pci_free(obj->base.dev, obj->phys_handle);
+ i915_gem_object_unpin_pages(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
@@ -292,7 +300,12 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -311,90 +324,209 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret;
}
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for just read access or read-write access
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly)
+static long
+i915_gem_object_wait_fence(struct dma_fence *fence,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
{
- struct reservation_object *resv;
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct drm_i915_gem_request *rq;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
- if (!readonly) {
- active = obj->last_read;
- active_mask = i915_gem_object_get_active(obj);
- } else {
- active_mask = 1;
- active = &obj->last_write;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return timeout;
+
+ if (!dma_fence_is_i915(fence))
+ return dma_fence_wait_timeout(fence,
+ flags & I915_WAIT_INTERRUPTIBLE,
+ timeout);
+
+ rq = to_request(fence);
+ if (i915_gem_request_completed(rq))
+ goto out;
+
+ /* This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we wait. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery). Not all clients even want their results
+ * immediately and for them we should just let the GPU select its own
+ * frequency to maximise efficiency. To prevent a single client from
+ * forcing the clocks too high for the whole system, we only allow
+ * each client to waitboost once in a busy period.
+ */
+ if (rps) {
+ if (INTEL_GEN(rq->i915) >= 6)
+ gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
+ else
+ rps = NULL;
+ }
+
+ timeout = i915_wait_request(rq, flags, timeout);
+
+out:
+ if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
+ i915_gem_request_retire_upto(rq);
+
+ if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
+ /* The GPU is now idle and this client has stalled.
+ * Since no other client has submitted a request in the
+ * meantime, assume that this client is the only one
+ * supplying work to the GPU but is unable to keep that
+ * work supplied because it is waiting. Since the GPU is
+ * then never kept fully busy, RPS autoclocking will
+ * keep the clocks relatively low, causing further delays.
+ * Compensate by giving the synchronous client credit for
+ * a waitboost next time.
+ */
+ spin_lock(&rq->i915->rps.client_lock);
+ list_del_init(&rps->link);
+ spin_unlock(&rq->i915->rps.client_lock);
}
- for_each_active(active_mask, idx) {
+ return timeout;
+}
+
+static long
+i915_gem_object_wait_reservation(struct reservation_object *resv,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
int ret;
- ret = i915_gem_active_wait(&active[idx],
- &obj->base.dev->struct_mutex);
+ ret = reservation_object_get_fences_rcu(resv,
+ &excl, &count, &shared);
if (ret)
return ret;
- }
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- long err;
+ for (i = 0; i < count; i++) {
+ timeout = i915_gem_object_wait_fence(shared[i],
+ flags, timeout,
+ rps);
+ if (timeout <= 0)
+ break;
- err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
- MAX_SCHEDULE_TIMEOUT);
- if (err < 0)
- return err;
+ dma_fence_put(shared[i]);
+ }
+
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(resv);
}
- return 0;
+ if (excl && timeout > 0)
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+
+ dma_fence_put(excl);
+
+ return timeout;
}
-/* A nonblocking variant of the above wait. Must be called prior to
- * acquiring the mutex for the object, as the object state may change
- * during this call. A reference must be held by the caller for the object.
- */
-static __must_check int
-__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
- struct intel_rps_client *rps,
- bool readonly)
+static void __fence_set_priority(struct dma_fence *fence, int prio)
{
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct drm_i915_gem_request *rq;
+ struct intel_engine_cs *engine;
- active_mask = __I915_BO_ACTIVE(obj);
- if (!active_mask)
- return 0;
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ rq = to_request(fence);
+ engine = rq->engine;
+ if (!engine->schedule)
+ return;
+
+ engine->schedule(rq, prio);
+}
- if (!readonly) {
- active = obj->last_read;
+static void fence_set_priority(struct dma_fence *fence, int prio)
+{
+ /* Recurse once into a fence-array */
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ int i;
+
+ for (i = 0; i < array->num_fences; i++)
+ __fence_set_priority(array->fences[i], prio);
} else {
- active_mask = 1;
- active = &obj->last_write;
+ __fence_set_priority(fence, prio);
}
+}
- for_each_active(active_mask, idx) {
+int
+i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int prio)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
int ret;
- ret = i915_gem_active_wait_unlocked(&active[idx],
- I915_WAIT_INTERRUPTIBLE,
- NULL, rps);
+ ret = reservation_object_get_fences_rcu(obj->resv,
+ &excl, &count, &shared);
if (ret)
return ret;
+
+ for (i = 0; i < count; i++) {
+ fence_set_priority(shared[i], prio);
+ dma_fence_put(shared[i]);
+ }
+
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(obj->resv);
}
+ if (excl) {
+ fence_set_priority(excl, prio);
+ dma_fence_put(excl);
+ }
return 0;
}
+/**
+ * Waits for rendering to the object to be completed
+ * @obj: i915 gem object
+ * @flags: how to wait (under a lock, for all rendering or just for writes etc)
+ * @timeout: how long to wait
+ * @rps: client (user process) to charge for any waitboosting
+ */
+int
+i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
+{
+ might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ GEM_BUG_ON(debug_locks &&
+ !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
+ !!(flags & I915_WAIT_LOCKED));
+#endif
+ GEM_BUG_ON(timeout < 0);
+
+ timeout = i915_gem_object_wait_reservation(obj->resv,
+ flags, timeout,
+ rps);
+ return timeout < 0 ? timeout : 0;
+}
+
static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
struct drm_i915_file_private *fpriv = file->driver_priv;
@@ -416,7 +548,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
return 0;
}
- if (obj->madv != I915_MADV_WILLNEED)
+ if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
if (obj->base.filp == NULL)
@@ -426,9 +558,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_put_pages(obj);
- if (ret)
- return ret;
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ if (obj->mm.pages)
+ return -EBUSY;
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
@@ -438,23 +570,29 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops;
- return i915_gem_object_get_pages(obj);
+ return i915_gem_object_pin_pages(obj);
}
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- int ret = 0;
+ int ret;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
return ret;
@@ -516,7 +654,7 @@ i915_gem_create(struct drm_file *file,
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (ret)
return ret;
@@ -548,6 +686,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_create *args = data;
+ i915_gem_flush_free_objects(to_i915(dev));
+
return i915_gem_create(file, dev,
args->size, &args->handle);
}
@@ -614,21 +754,24 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
{
int ret;
- *needs_clflush = 0;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ *needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu read domain, set ourself into the gtt
@@ -661,20 +804,25 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
{
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
*needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu write domain, set ourself into the
@@ -704,7 +852,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
obj->cache_dirty = true;
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->dirty = 1;
+ obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
@@ -713,32 +861,6 @@ err_unpin:
return ret;
}
-/* Per-page copy function for the shmem pread fastpath.
- * Flushes invalid cachelines before reading the target if
- * needs_clflush is set. */
-static int
-shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
- char __user *user_data,
- bool page_do_bit17_swizzling, bool needs_clflush)
-{
- char *vaddr;
- int ret;
-
- if (unlikely(page_do_bit17_swizzling))
- return -EINVAL;
-
- vaddr = kmap_atomic(page);
- if (needs_clflush)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- ret = __copy_to_user_inatomic(user_data,
- vaddr + shmem_page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- return ret ? -EFAULT : 0;
-}
-
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
bool swizzled)
@@ -764,7 +886,7 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length,
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
static int
-shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pread_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling, bool needs_clflush)
{
@@ -773,60 +895,130 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap(page);
if (needs_clflush)
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data,
- vaddr, shmem_page_offset,
- page_length);
+ ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
else
- ret = __copy_to_user(user_data,
- vaddr + shmem_page_offset,
- page_length);
+ ret = __copy_to_user(user_data, vaddr + offset, length);
kunmap(page);
return ret ? - EFAULT : 0;
}
-static inline unsigned long
-slow_user_access(struct io_mapping *mapping,
- uint64_t page_base, int page_offset,
- char __user *user_data,
- unsigned long length, bool pwrite)
+static int
+shmem_pread(struct page *page, int offset, int length, char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ int ret;
+
+ ret = -ENODEV;
+ if (!page_do_bit17_swizzling) {
+ char *vaddr = kmap_atomic(page);
+
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + offset, length);
+ ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ kunmap_atomic(vaddr);
+ }
+ if (ret == 0)
+ return 0;
+
+ return shmem_pread_slow(page, offset, length, user_data,
+ page_do_bit17_swizzling, needs_clflush);
+}
+
+static int
+i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args)
+{
+ char __user *user_data;
+ u64 remain;
+ unsigned int obj_do_bit17_swizzling;
+ unsigned int needs_clflush;
+ unsigned int idx, offset;
+ int ret;
+
+ obj_do_bit17_swizzling = 0;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ obj_do_bit17_swizzling = BIT(17);
+
+ ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ remain = args->size;
+ user_data = u64_to_user_ptr(args->data_ptr);
+ offset = offset_in_page(args->offset);
+ for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+ struct page *page = i915_gem_object_get_page(obj, idx);
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ ret = shmem_pread(page, offset, length, user_data,
+ page_to_phys(page) & obj_do_bit17_swizzling,
+ needs_clflush);
+ if (ret)
+ break;
+
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return ret;
+}
+
+static inline bool
+gtt_user_read(struct io_mapping *mapping,
+ loff_t base, int offset,
+ char __user *user_data, int length)
{
- void __iomem *ioaddr;
void *vaddr;
- uint64_t unwritten;
+ unsigned long unwritten;
- ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force *)ioaddr + page_offset;
- if (pwrite)
- unwritten = __copy_from_user(vaddr, user_data, length);
- else
- unwritten = __copy_to_user(user_data, vaddr, length);
-
- io_mapping_unmap(ioaddr);
+ vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = (void __force *)
+ io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_to_user(user_data, vaddr + offset, length);
+ io_mapping_unmap(vaddr);
+ }
return unwritten;
}
static int
-i915_gem_gtt_pread(struct drm_device *dev,
- struct drm_i915_gem_object *obj, uint64_t size,
- uint64_t data_offset, uint64_t data_ptr)
+i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_mm_node node;
- char __user *user_data;
- uint64_t remain;
- uint64_t offset;
+ struct i915_vma *vma;
+ void __user *user_data;
+ u64 remain, offset;
int ret;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+
+ intel_runtime_pm_get(i915);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
@@ -837,35 +1029,21 @@ i915_gem_gtt_pread(struct drm_device *dev,
}
}
if (IS_ERR(vma)) {
- ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- remove_mappable_node(&node);
- goto out;
- }
-
- i915_gem_object_pin_pages(obj);
+ goto out_unlock;
+ GEM_BUG_ON(!node.allocated);
}
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
goto out_unpin;
- user_data = u64_to_user_ptr(data_ptr);
- remain = size;
- offset = data_offset;
+ mutex_unlock(&i915->drm.struct_mutex);
- mutex_unlock(&dev->struct_mutex);
- if (likely(!i915.prefault_disable)) {
- ret = fault_in_pages_writeable(user_data, remain);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- goto out_unpin;
- }
- }
+ user_data = u64_to_user_ptr(args->data_ptr);
+ remain = args->size;
+ offset = args->offset;
while (remain > 0) {
/* Operation in this page
@@ -882,19 +1060,14 @@ i915_gem_gtt_pread(struct drm_device *dev,
wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start,
- I915_CACHE_NONE, 0);
+ node.start, I915_CACHE_NONE, 0);
wmb();
} else {
page_base += offset & PAGE_MASK;
}
- /* This is a slow read/write as it tries to read from
- * and write to user memory which may result into page
- * faults, and so we cannot perform this under struct_mutex.
- */
- if (slow_user_access(&ggtt->mappable, page_base,
- page_offset, user_data,
- page_length, false)) {
+
+ if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+ user_data, page_length)) {
ret = -EFAULT;
break;
}
@@ -904,111 +1077,19 @@ i915_gem_gtt_pread(struct drm_device *dev,
offset += page_length;
}
- mutex_lock(&dev->struct_mutex);
- if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
- /* The user has modified the object whilst we tried
- * reading from it, and we now have no idea what domain
- * the pages should be in. As we have just been touching
- * them directly, flush everything back to the GTT
- * domain.
- */
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- }
-
+ mutex_lock(&i915->drm.struct_mutex);
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
- node.start, node.size,
- true);
- i915_gem_object_unpin_pages(obj);
+ node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
}
-out:
- return ret;
-}
-
-static int
-i915_gem_shmem_pread(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
-{
- char __user *user_data;
- ssize_t remain;
- loff_t offset;
- int shmem_page_offset, page_length, ret = 0;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int prefaulted = 0;
- int needs_clflush = 0;
- struct sg_page_iter sg_iter;
-
- ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
- if (ret)
- return ret;
-
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = args->offset;
- remain = args->size;
-
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
- if (remain <= 0)
- break;
-
- /* Operation in this page
- *
- * shmem_page_offset = offset within page in shmem file
- * page_length = bytes to copy for this page
- */
- shmem_page_offset = offset_in_page(offset);
- page_length = remain;
- if ((shmem_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - shmem_page_offset;
-
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- ret = shmem_pread_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- needs_clflush);
- if (ret == 0)
- goto next_page;
-
- mutex_unlock(&dev->struct_mutex);
-
- if (likely(!i915.prefault_disable) && !prefaulted) {
- ret = fault_in_pages_writeable(user_data, remain);
- /* Userspace is tricking us, but we've already clobbered
- * its pages with the prefault and promised to write the
- * data up to the first fault. Hence ignore any errors
- * and just continue. */
- (void)ret;
- prefaulted = 1;
- }
-
- ret = shmem_pread_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- needs_clflush);
-
- mutex_lock(&dev->struct_mutex);
-
- if (ret)
- goto out;
-
-next_page:
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
-
-out:
- i915_gem_obj_finish_shmem_access(obj);
+out_unlock:
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1027,7 +1108,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_pread *args = data;
struct drm_i915_gem_object *obj;
- int ret = 0;
+ int ret;
if (args->size == 0)
return 0;
@@ -1045,36 +1126,29 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL;
- goto err;
+ goto out;
}
trace_i915_gem_object_pread(obj, args->offset, args->size);
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
- goto err;
+ goto out;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err;
-
- ret = i915_gem_shmem_pread(dev, obj, args, file);
+ goto out;
- /* pread for non shmem backed objects */
- if (ret == -EFAULT || ret == -ENODEV) {
- intel_runtime_pm_get(to_i915(dev));
- ret = i915_gem_gtt_pread(dev, obj, args->size,
- args->offset, args->data_ptr);
- intel_runtime_pm_put(to_i915(dev));
- }
+ ret = i915_gem_shmem_pread(obj, args);
+ if (ret == -EFAULT || ret == -ENODEV)
+ ret = i915_gem_gtt_pread(obj, args);
+ i915_gem_object_unpin_pages(obj);
+out:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-
-err:
- i915_gem_object_put_unlocked(obj);
return ret;
}
@@ -1082,51 +1156,52 @@ err:
* page faults in the source data
*/
-static inline int
-fast_user_write(struct io_mapping *mapping,
- loff_t page_base, int page_offset,
- char __user *user_data,
- int length)
+static inline bool
+ggtt_write(struct io_mapping *mapping,
+ loff_t base, int offset,
+ char __user *user_data, int length)
{
- void __iomem *vaddr_atomic;
void *vaddr;
unsigned long unwritten;
- vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force*)vaddr_atomic + page_offset;
- unwritten = __copy_from_user_inatomic_nocache(vaddr,
+ vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
user_data, length);
- io_mapping_unmap_atomic(vaddr_atomic);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = (void __force *)
+ io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_from_user(vaddr + offset, user_data, length);
+ io_mapping_unmap(vaddr);
+ }
+
return unwritten;
}
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
- * @i915: i915 device private data
- * @obj: i915 gem object
+ * @obj: i915 GEM object
* @args: pwrite arguments structure
- * @file: drm file pointer
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
- struct drm_device *dev = obj->base.dev;
- struct i915_vma *vma;
struct drm_mm_node node;
- uint64_t remain, offset;
- char __user *user_data;
+ struct i915_vma *vma;
+ u64 remain, offset;
+ void __user *user_data;
int ret;
- bool hit_slow_path = false;
- if (i915_gem_object_is_tiled(obj))
- return -EFAULT;
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) {
@@ -1139,25 +1214,19 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
}
}
if (IS_ERR(vma)) {
- ret = insert_mappable_node(i915, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- remove_mappable_node(&node);
- goto out;
- }
-
- i915_gem_object_pin_pages(obj);
+ goto out_unlock;
+ GEM_BUG_ON(!node.allocated);
}
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin;
+ mutex_unlock(&i915->drm.struct_mutex);
+
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->dirty = true;
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -1170,8 +1239,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* page_length = bytes to copy for this page
*/
u32 page_base = node.start;
- unsigned page_offset = offset_in_page(offset);
- unsigned page_length = PAGE_SIZE - page_offset;
+ unsigned int page_offset = offset_in_page(offset);
+ unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
@@ -1188,92 +1257,36 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (fast_user_write(&ggtt->mappable, page_base,
- page_offset, user_data, page_length)) {
- hit_slow_path = true;
- mutex_unlock(&dev->struct_mutex);
- if (slow_user_access(&ggtt->mappable,
- page_base,
- page_offset, user_data,
- page_length, true)) {
- ret = -EFAULT;
- mutex_lock(&dev->struct_mutex);
- goto out_flush;
- }
-
- mutex_lock(&dev->struct_mutex);
+ if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+ user_data, page_length)) {
+ ret = -EFAULT;
+ break;
}
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-
-out_flush:
- if (hit_slow_path) {
- if (ret == 0 &&
- (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
- /* The user has modified the object whilst we tried
- * reading from it, and we now have no idea what domain
- * the pages should be in. As we have just been touching
- * them directly, flush everything back to the GTT
- * domain.
- */
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- }
- }
-
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+
+ mutex_lock(&i915->drm.struct_mutex);
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
- node.start, node.size,
- true);
- i915_gem_object_unpin_pages(obj);
+ node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
}
-out:
+out_unlock:
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
-/* Per-page copy function for the shmem pwrite fastpath.
- * Flushes invalid cachelines before writing to the target if
- * needs_clflush_before is set and flushes out any written cachelines after
- * writing if needs_clflush is set. */
-static int
-shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
- char __user *user_data,
- bool page_do_bit17_swizzling,
- bool needs_clflush_before,
- bool needs_clflush_after)
-{
- char *vaddr;
- int ret;
-
- if (unlikely(page_do_bit17_swizzling))
- return -EINVAL;
-
- vaddr = kmap_atomic(page);
- if (needs_clflush_before)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
- user_data, page_length);
- if (needs_clflush_after)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- return ret ? -EFAULT : 0;
-}
-
-/* Only difference to the fast-path function is that this can handle bit17
- * and uses non-atomic copy and kmap functions. */
static int
-shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pwrite_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling,
bool needs_clflush_before,
@@ -1284,124 +1297,114 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap(page);
if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
- user_data,
- page_length);
+ ret = __copy_from_user_swizzled(vaddr, offset, user_data,
+ length);
else
- ret = __copy_from_user(vaddr + shmem_page_offset,
- user_data,
- page_length);
+ ret = __copy_from_user(vaddr + offset, user_data, length);
if (needs_clflush_after)
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
kunmap(page);
return ret ? -EFAULT : 0;
}
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set.
+ */
static int
-i915_gem_shmem_pwrite(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int shmem_page_offset, page_length, ret = 0;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int hit_slowpath = 0;
- unsigned int needs_clflush;
- struct sg_page_iter sg_iter;
+ int ret;
- ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
- if (ret)
- return ret;
+ ret = -ENODEV;
+ if (!page_do_bit17_swizzling) {
+ char *vaddr = kmap_atomic(page);
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = args->offset;
- remain = args->size;
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + offset, len);
+ ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
+ if (needs_clflush_after)
+ drm_clflush_virt_range(vaddr + offset, len);
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
- struct page *page = sg_page_iter_page(&sg_iter);
- int partial_cacheline_write;
+ kunmap_atomic(vaddr);
+ }
+ if (ret == 0)
+ return ret;
- if (remain <= 0)
- break;
+ return shmem_pwrite_slow(page, offset, len, user_data,
+ page_do_bit17_swizzling,
+ needs_clflush_before,
+ needs_clflush_after);
+}
- /* Operation in this page
- *
- * shmem_page_offset = offset within page in shmem file
- * page_length = bytes to copy for this page
- */
- shmem_page_offset = offset_in_page(offset);
-
- page_length = remain;
- if ((shmem_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - shmem_page_offset;
-
- /* If we don't overwrite a cacheline completely we need to be
- * careful to have up-to-date data by first clflushing. Don't
- * overcomplicate things and flush the entire patch. */
- partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
- ((shmem_page_offset | page_length)
- & (boot_cpu_data.x86_clflush_size - 1));
-
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write,
- needs_clflush & CLFLUSH_AFTER);
- if (ret == 0)
- goto next_page;
-
- hit_slowpath = 1;
- mutex_unlock(&dev->struct_mutex);
- ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write,
- needs_clflush & CLFLUSH_AFTER);
+static int
+i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ void __user *user_data;
+ u64 remain;
+ unsigned int obj_do_bit17_swizzling;
+ unsigned int partial_cacheline_write;
+ unsigned int needs_clflush;
+ unsigned int offset, idx;
+ int ret;
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
- if (ret)
- goto out;
+ ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
-next_page:
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
+ obj_do_bit17_swizzling = 0;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ obj_do_bit17_swizzling = BIT(17);
-out:
- i915_gem_obj_finish_shmem_access(obj);
+ /* If we don't overwrite a cacheline completely we need to be
+ * careful to have up-to-date data by first clflushing. Don't
+ * overcomplicate things and flush the entire patch.
+ */
+ partial_cacheline_write = 0;
+ if (needs_clflush & CLFLUSH_BEFORE)
+ partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
- if (hit_slowpath) {
- /*
- * Fixup: Flush cpu caches in case we didn't flush the dirty
- * cachelines in-line while writing and the object moved
- * out of the cpu write domain while we've dropped the lock.
- */
- if (!(needs_clflush & CLFLUSH_AFTER) &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- if (i915_gem_clflush_object(obj, obj->pin_display))
- needs_clflush |= CLFLUSH_AFTER;
- }
- }
+ user_data = u64_to_user_ptr(args->data_ptr);
+ remain = args->size;
+ offset = offset_in_page(args->offset);
+ for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+ struct page *page = i915_gem_object_get_page(obj, idx);
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ ret = shmem_pwrite(page, offset, length, user_data,
+ page_to_phys(page) & obj_do_bit17_swizzling,
+ (offset | length) & partial_cacheline_write,
+ needs_clflush & CLFLUSH_AFTER);
+ if (ret)
+ break;
- if (needs_clflush & CLFLUSH_AFTER)
- i915_gem_chipset_flush(to_i915(dev));
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ i915_gem_obj_finish_shmem_access(obj);
return ret;
}
@@ -1417,7 +1420,6 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1430,13 +1432,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- if (likely(!i915.prefault_disable)) {
- ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
- args->size);
- if (ret)
- return -EFAULT;
- }
-
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -1450,15 +1445,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
goto err;
- intel_runtime_pm_get(dev_priv);
-
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err_rpm;
+ goto err;
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -1468,30 +1465,23 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* perspective, requiring manual detiling by the client.
*/
if (!i915_gem_object_has_struct_page(obj) ||
- cpu_write_needs_clflush(obj)) {
- ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
+ cpu_write_needs_clflush(obj))
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
- * textures). Fallback to the shmem path in that case. */
- }
+ * textures). Fallback to the shmem path in that case.
+ */
+ ret = i915_gem_gtt_pwrite_fast(obj, args);
if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
else
- ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ ret = i915_gem_shmem_pwrite(obj, args);
}
- i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
- return ret;
-
-err_rpm:
- intel_runtime_pm_put(dev_priv);
+ i915_gem_object_unpin_pages(obj);
err:
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return ret;
}
@@ -1502,6 +1492,30 @@ write_origin(struct drm_i915_gem_object *obj, unsigned domain)
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915;
+ struct list_head *list;
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ continue;
+
+ if (i915_vma_is_active(vma))
+ continue;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ }
+
+ i915 = to_i915(obj->base.dev);
+ list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
+ list_move_tail(&obj->global_link, list);
+}
+
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -1517,7 +1531,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
- int ret;
+ int err;
/* Only handle setting domains to types used by the CPU. */
if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
@@ -1537,29 +1551,48 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
- if (ret)
- goto err;
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write_domain ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
+ if (err)
+ goto out;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err;
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out;
+
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out_unpin;
if (read_domains & I915_GEM_DOMAIN_GTT)
- ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
else
- ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+
+ /* And bump the LRU for this access */
+ i915_gem_object_bump_inactive_ggtt(obj);
+
+ mutex_unlock(&dev->struct_mutex);
if (write_domain != 0)
intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-
-err:
- i915_gem_object_put_unlocked(obj);
- return ret;
+ return err;
}
/**
@@ -1589,7 +1622,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
}
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return err;
}
@@ -1635,7 +1668,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* pages from.
*/
if (!obj->base.filp) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
@@ -1647,7 +1680,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINTR;
}
vma = find_vma(mm, addr);
@@ -1661,7 +1694,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
/* This may race, but that's ok, it only gets set */
WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1763,8 +1796,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
int ret;
/* We don't use vmf->pgoff since that has the fake offset */
- page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
- PAGE_SHIFT;
+ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -1773,7 +1805,14 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
* repeat the flush holding the lock in the normal manner to catch cases
* where we are gazumped.
*/
- ret = __unsafe_wait_rendering(obj, NULL, !write);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
@@ -1784,7 +1823,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
goto err_rpm;
/* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
ret = -EFAULT;
goto err_unlock;
}
@@ -1806,15 +1845,14 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Use a partial view if it is bigger than available space */
chunk_size = MIN_CHUNK_PAGES;
if (i915_gem_object_is_tiled(obj))
- chunk_size = max(chunk_size, tile_row_pages(obj));
+ chunk_size = roundup(chunk_size, tile_row_pages(obj));
memset(&view, 0, sizeof(view));
view.type = I915_GGTT_VIEW_PARTIAL;
view.params.partial.offset = rounddown(page_offset, chunk_size);
view.params.partial.size =
min_t(unsigned int, chunk_size,
- (area->vm_end - area->vm_start) / PAGE_SIZE -
- view.params.partial.offset);
+ vma_pages(area) - view.params.partial.offset);
/* If the partial covers the entire object, just create a
* normal VMA.
@@ -1842,22 +1880,25 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
if (ret)
goto err_unpin;
+ /* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(dev_priv);
+ if (list_empty(&obj->userfault_link))
+ list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
- if (ret)
- goto err_unpin;
- obj->fault_mappable = true;
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(dev_priv);
+ i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
case -EIO:
@@ -1919,15 +1960,23 @@ err:
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
+ *
+ * Note that RPM complicates somewhat by adding an additional
+ * requirement that operations to the GGTT be made holding the RPM
+ * wakeref.
*/
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
- if (!obj->fault_mappable)
- return;
+ if (list_empty(&obj->userfault_link))
+ goto out;
+ list_del_init(&obj->userfault_link);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
@@ -1940,16 +1989,45 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
*/
wmb();
- obj->fault_mappable = false;
+out:
+ intel_runtime_pm_put(i915);
}
-void
-i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj, *on;
+ int i;
+
+ /*
+ * Only called during RPM suspend. All users of the userfault_list
+ * must be holding an RPM wakeref to ensure that this can not
+ * run concurrently with themselves (and use the struct_mutex for
+ * protection between themselves).
+ */
+
+ list_for_each_entry_safe(obj, on,
+ &dev_priv->mm.userfault_list, userfault_link) {
+ list_del_init(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+ }
+
+ /* The fence will be lost when the device powers down. If any were
+ * in use by hardware (i.e. they are pinned), we should not be powering
+ * down! All other fences will be reacquired by the user upon waking.
+ */
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ if (WARN_ON(reg->pin_count))
+ continue;
+
+ if (!reg->vma)
+ continue;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- i915_gem_release_mmap(obj);
+ GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+ reg->dirty = true;
+ }
}
/**
@@ -2063,7 +2141,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return ret;
}
@@ -2106,16 +2184,18 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
- obj->madv = __I915_MADV_PURGED;
+ obj->mm.madv = __I915_MADV_PURGED;
}
/* Try to discard unwanted pages */
-static void
-i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
struct address_space *mapping;
- switch (obj->madv) {
+ lockdep_assert_held(&obj->mm.lock);
+ GEM_BUG_ON(obj->mm.pages);
+
+ switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
case __I915_MADV_PURGED:
@@ -2130,85 +2210,119 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
}
static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
- int ret;
-
- BUG_ON(obj->madv == __I915_MADV_PURGED);
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- i915_gem_clflush_object(obj, true);
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
+ __i915_gem_object_release_shmem(obj, pages);
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_save_bit_17_swizzle(obj);
+ i915_gem_object_save_bit_17_swizzle(obj, pages);
- if (obj->madv == I915_MADV_DONTNEED)
- obj->dirty = 0;
-
- for_each_sgt_page(page, sgt_iter, obj->pages) {
- if (obj->dirty)
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
set_page_dirty(page);
- if (obj->madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
-int
-i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
- const struct drm_i915_gem_object_ops *ops = obj->ops;
+ struct radix_tree_iter iter;
+ void **slot;
- if (obj->pages == NULL)
- return 0;
+ radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+}
- if (obj->pages_pin_count)
- return -EBUSY;
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ return;
GEM_BUG_ON(obj->bind_count);
+ if (!READ_ONCE(obj->mm.pages))
+ return;
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
- list_del(&obj->global_list);
+ pages = fetch_and_zero(&obj->mm.pages);
+ GEM_BUG_ON(!pages);
- if (obj->mapping) {
+ if (obj->mm.mapping) {
void *ptr;
- ptr = ptr_mask_bits(obj->mapping);
+ ptr = ptr_mask_bits(obj->mm.mapping);
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
- obj->mapping = NULL;
+ obj->mm.mapping = NULL;
}
- ops->put_pages(obj);
- obj->pages = NULL;
+ __i915_gem_object_reset_page_iter(obj);
- i915_gem_object_invalidate(obj);
+ obj->ops->put_pages(obj, pages);
+unlock:
+ mutex_unlock(&obj->mm.lock);
+}
+static unsigned int swiotlb_max_size(void)
+{
+#if IS_ENABLED(CONFIG_SWIOTLB)
+ return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
+#else
return 0;
+#endif
}
-static int
+static void i915_sg_trim(struct sg_table *orig_st)
+{
+ struct sg_table new_st;
+ struct scatterlist *sg, *new_sg;
+ unsigned int i;
+
+ if (orig_st->nents == orig_st->orig_nents)
+ return;
+
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
+ return;
+
+ new_sg = new_st.sgl;
+ for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, 0);
+ /* called before being DMA mapped, no need to copy sg->dma_* */
+ new_sg = sg_next(new_sg);
+ }
+
+ sg_free_table(orig_st);
+
+ *orig_st = new_st;
+}
+
+static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
@@ -2219,6 +2333,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
+ unsigned int max_segment;
int ret;
gfp_t gfp;
@@ -2226,17 +2341,21 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ max_segment = swiotlb_max_size();
+ if (!max_segment)
+ max_segment = rounddown(UINT_MAX, PAGE_SIZE);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
/* Get the list of pages out of our struct file. They'll be pinned
@@ -2264,22 +2383,15 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
- i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- goto err_pages;
+ goto err_sg;
}
}
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- st->nents++;
- sg_set_page(sg, page, PAGE_SIZE, 0);
- sg = sg_next(sg);
- continue;
- }
-#endif
- if (!i || page_to_pfn(page) != last_pfn + 1) {
+ if (!i ||
+ sg->length >= max_segment ||
+ page_to_pfn(page) != last_pfn + 1) {
if (i)
sg = sg_next(sg);
st->nents++;
@@ -2292,27 +2404,24 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
-#ifdef CONFIG_SWIOTLB
- if (!swiotlb_nr_tbl())
-#endif
+ if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
- obj->pages = st;
- ret = i915_gem_gtt_prepare_object(obj);
+ /* Trim unused sg entries to avoid wasting memory. */
+ i915_sg_trim(st);
+
+ ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret)
goto err_pages;
if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj);
+ i915_gem_object_do_bit_17_swizzle(obj, st);
- if (i915_gem_object_is_tiled(obj) &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
- i915_gem_object_pin_pages(obj);
+ return st;
- return 0;
-
-err_pages:
+err_sg:
sg_mark_end(sg);
+err_pages:
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
@@ -2329,43 +2438,73 @@ err_pages:
if (ret == -ENOSPC)
ret = -ENOMEM;
- return ret;
+ return ERR_PTR(ret);
}
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_get_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_put_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int
-i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- const struct drm_i915_gem_object_ops *ops = obj->ops;
- int ret;
+ lockdep_assert_held(&obj->mm.lock);
- if (obj->pages)
- return 0;
+ obj->mm.get_page.sg_pos = pages->sgl;
+ obj->mm.get_page.sg_idx = 0;
- if (obj->madv != I915_MADV_WILLNEED) {
+ obj->mm.pages = pages;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
+}
+
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *pages;
+
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
- BUG_ON(obj->pages_pin_count);
+ pages = obj->ops->get_pages(obj);
+ if (unlikely(IS_ERR(pages)))
+ return PTR_ERR(pages);
- ret = ops->get_pages(obj);
- if (ret)
- return ret;
+ __i915_gem_object_set_pages(obj, pages);
+ return 0;
+}
- list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return err;
- return 0;
+ if (unlikely(!obj->mm.pages)) {
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
@@ -2373,7 +2512,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->pages;
+ struct sg_table *sgt = obj->mm.pages;
struct sgt_iter sgt_iter;
struct page *page;
struct page *stack_pages[32];
@@ -2424,21 +2563,31 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
- ret = i915_gem_object_get_pages(obj);
+ ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
return ERR_PTR(ret);
- i915_gem_object_pin_pages(obj);
- pinned = obj->pages_pin_count > 1;
+ pinned = true;
+ if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+ if (unlikely(!obj->mm.pages)) {
+ ret = ____i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+ pinned = false;
+ }
+ GEM_BUG_ON(!obj->mm.pages);
- ptr = ptr_unpack_bits(obj->mapping, has_type);
+ ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
if (ptr && has_type != type) {
if (pinned) {
ret = -EBUSY;
- goto err;
+ goto err_unpin;
}
if (is_vmalloc_addr(ptr))
@@ -2446,59 +2595,28 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
else
kunmap(kmap_to_page(ptr));
- ptr = obj->mapping = NULL;
+ ptr = obj->mm.mapping = NULL;
}
if (!ptr) {
ptr = i915_gem_object_map(obj, type);
if (!ptr) {
ret = -ENOMEM;
- goto err;
+ goto err_unpin;
}
- obj->mapping = ptr_pack_bits(ptr, type);
+ obj->mm.mapping = ptr_pack_bits(ptr, type);
}
+out_unlock:
+ mutex_unlock(&obj->mm.lock);
return ptr;
-err:
- i915_gem_object_unpin_pages(obj);
- return ERR_PTR(ret);
-}
-
-static void
-i915_gem_object_retire__write(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- struct drm_i915_gem_object *obj =
- container_of(active, struct drm_i915_gem_object, last_write);
-
- intel_fb_obj_flush(obj, true, ORIGIN_CS);
-}
-
-static void
-i915_gem_object_retire__read(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- int idx = request->engine->id;
- struct drm_i915_gem_object *obj =
- container_of(active, struct drm_i915_gem_object, last_read[idx]);
-
- GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
-
- i915_gem_object_clear_active(obj, idx);
- if (i915_gem_object_is_active(obj))
- return;
-
- /* Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (obj->bind_count)
- list_move_tail(&obj->global_list,
- &request->i915->mm.bound_list);
-
- i915_gem_object_put(obj);
+err_unpin:
+ atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+ ptr = ERR_PTR(ret);
+ goto out_unlock;
}
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2545,13 +2663,10 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
- list_for_each_entry(request, &engine->request_list, link) {
- if (i915_gem_request_completed(request))
+ list_for_each_entry(request, &engine->timeline->requests, link) {
+ if (__i915_gem_request_completed(request))
continue;
- if (!i915_sw_fence_done(&request->submit))
- break;
-
return request;
}
@@ -2579,10 +2694,9 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
struct i915_gem_context *incomplete_ctx;
+ struct intel_timeline *timeline;
bool ring_hung;
- /* Ensure irq handler finishes, and not run again. */
- tasklet_kill(&engine->irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@@ -2591,12 +2705,15 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
return;
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+ if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
+ ring_hung = false;
+
i915_set_reset_status(request->ctx, ring_hung);
if (!ring_hung)
return;
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->fence.seqno);
+ engine->name, request->global_seqno);
/* Setup the CS to resume from the breadcrumb of the hung request */
engine->reset_hw(engine, request);
@@ -2613,21 +2730,28 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
if (i915_gem_context_is_default(incomplete_ctx))
return;
- list_for_each_entry_continue(request, &engine->request_list, link)
+ list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == incomplete_ctx)
reset_request(request);
+
+ timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+ list_for_each_entry(request, &timeline->requests, link)
+ reset_request(request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
i915_gem_retire_requests(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_reset_engine(engine);
- i915_gem_restore_fences(&dev_priv->drm);
+ i915_gem_restore_fences(dev_priv);
if (dev_priv->gt.awake) {
intel_sanitize_gt_powersave(dev_priv);
@@ -2639,6 +2763,8 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ i915_gem_request_submit(request);
+ intel_engine_init_global_seqno(request->engine, request->global_seqno);
}
static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
@@ -2649,7 +2775,8 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
- intel_engine_init_seqno(engine, engine->last_submitted_seqno);
+ intel_engine_init_global_seqno(engine,
+ intel_engine_last_submit(engine));
/*
* Clear the execlists queue up before freeing the requests, as those
@@ -2658,26 +2785,30 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
*/
if (i915.enable_execlists) {
- spin_lock(&engine->execlist_lock);
- INIT_LIST_HEAD(&engine->execlist_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
i915_gem_request_put(engine->execlist_port[0].request);
i915_gem_request_put(engine->execlist_port[1].request);
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- spin_unlock(&engine->execlist_lock);
- }
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
- engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ }
}
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
i915_gem_context_lost(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_cleanup_engine(engine);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
@@ -2716,12 +2847,20 @@ i915_gem_idle_work_handler(struct work_struct *work)
container_of(work, typeof(*dev_priv), gt.idle_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
return;
- if (READ_ONCE(dev_priv->gt.active_engines))
+ /*
+ * Wait for last execlists context complete, but bail out in case a
+ * new request is submitted.
+ */
+ wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
+ intel_execlists_idle(dev_priv), 10);
+
+ if (READ_ONCE(dev_priv->gt.active_requests))
return;
rearm_hangcheck =
@@ -2735,10 +2874,20 @@ i915_gem_idle_work_handler(struct work_struct *work)
goto out_rearm;
}
- if (dev_priv->gt.active_engines)
+ /*
+ * New request retired after this work handler started, extend active
+ * period until next instance of the work.
+ */
+ if (work_pending(work))
+ goto out_unlock;
+
+ if (dev_priv->gt.active_requests)
goto out_unlock;
- for_each_engine(engine, dev_priv)
+ if (wait_for(intel_execlists_idle(dev_priv), 10))
+ DRM_ERROR("Timeout waiting for engines to idle\n");
+
+ for_each_engine(engine, dev_priv, id)
i915_gem_batch_pool_fini(&engine->batch_pool);
GEM_BUG_ON(!dev_priv->gt.awake);
@@ -2768,9 +2917,26 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
if (vma->vm->file == fpriv)
i915_vma_close(vma);
+
+ if (i915_gem_object_is_active(obj) &&
+ !i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_set_active_reference(obj);
+ i915_gem_object_get(obj);
+ }
mutex_unlock(&obj->base.dev->struct_mutex);
}
+static unsigned long to_wait_timeout(s64 timeout_ns)
+{
+ if (timeout_ns < 0)
+ return MAX_SCHEDULE_TIMEOUT;
+
+ if (timeout_ns == 0)
+ return 0;
+
+ return nsecs_to_jiffies_timeout(timeout_ns);
+}
+
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @dev: drm device pointer
@@ -2799,10 +2965,9 @@ int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_i915_gem_wait *args = data;
- struct intel_rps_client *rps = to_rps_client(file);
struct drm_i915_gem_object *obj;
- unsigned long active;
- int idx, ret = 0;
+ ktime_t start;
+ long ret;
if (args->flags != 0)
return -EINVAL;
@@ -2811,133 +2976,29 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (!obj)
return -ENOENT;
- active = __I915_BO_ACTIVE(obj);
- for_each_active(active, idx) {
- s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
- ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
- I915_WAIT_INTERRUPTIBLE,
- timeout, rps);
- if (ret)
- break;
- }
-
- i915_gem_object_put_unlocked(obj);
- return ret;
-}
-
-static void __i915_vma_iounmap(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_pinned(vma));
-
- if (vma->iomap == NULL)
- return;
-
- io_mapping_unmap(vma->iomap);
- vma->iomap = NULL;
-}
-
-int i915_vma_unbind(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- unsigned long active;
- int ret;
-
- /* First wait upon any activity as retiring the request may
- * have side-effects such as unpinning or even unbinding this vma.
- */
- active = i915_vma_get_active(vma);
- if (active) {
- int idx;
-
- /* When a closed VMA is retired, it is unbound - eek.
- * In order to prevent it from being recursively closed,
- * take a pin on the vma so that the second unbind is
- * aborted.
- */
- __i915_vma_pin(vma);
+ start = ktime_get();
- for_each_active(active, idx) {
- ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->dev->struct_mutex);
- if (ret)
- break;
- }
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+ to_wait_timeout(args->timeout_ns),
+ to_rps_client(file));
- __i915_vma_unpin(vma);
- if (ret)
- return ret;
-
- GEM_BUG_ON(i915_vma_is_active(vma));
+ if (args->timeout_ns > 0) {
+ args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (args->timeout_ns < 0)
+ args->timeout_ns = 0;
}
- if (i915_vma_is_pinned(vma))
- return -EBUSY;
-
- if (!drm_mm_node_allocated(&vma->node))
- goto destroy;
-
- GEM_BUG_ON(obj->bind_count == 0);
- GEM_BUG_ON(!obj->pages);
-
- if (i915_vma_is_map_and_fenceable(vma)) {
- /* release the fence reg _after_ flushing */
- ret = i915_vma_put_fence(vma);
- if (ret)
- return ret;
-
- /* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
-
- __i915_vma_iounmap(vma);
- vma->flags &= ~I915_VMA_CAN_FENCE;
- }
-
- if (likely(!vma->vm->closed)) {
- trace_i915_vma_unbind(vma);
- vma->vm->unbind_vma(vma);
- }
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
-
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
- if (vma->pages != obj->pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- /* Since the unbound list is global, only move to that list if
- * no more VMAs exist. */
- if (--obj->bind_count == 0)
- list_move_tail(&obj->global_list,
- &to_i915(obj->base.dev)->mm.unbound_list);
-
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
-
-destroy:
- if (unlikely(i915_vma_is_closed(vma)))
- i915_vma_destroy(vma);
-
- return 0;
+ i915_gem_object_put(obj);
+ return ret;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags)
+static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
{
- struct intel_engine_cs *engine;
- int ret;
+ int ret, i;
- for_each_engine(engine, dev_priv) {
- if (engine->last_context == NULL)
- continue;
-
- ret = intel_engine_idle(engine, flags);
+ for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+ ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
if (ret)
return ret;
}
@@ -2945,187 +3006,45 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
return 0;
}
-static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
- unsigned long cache_level)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
- struct drm_mm_node *gtt_space = &vma->node;
- struct drm_mm_node *other;
-
- /*
- * On some machines we have to be careful when putting differing types
- * of snoopable memory together to avoid the prefetcher crossing memory
- * domains and dying. During vm initialisation, we decide whether or not
- * these constraints apply and set the drm_mm.color_adjust
- * appropriately.
- */
- if (vma->vm->mm.color_adjust == NULL)
- return true;
-
- if (!drm_mm_node_allocated(gtt_space))
- return true;
-
- if (list_empty(&gtt_space->node_list))
- return true;
-
- other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
- if (other->allocated && !other->hole_follows && other->color != cache_level)
- return false;
-
- other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
- if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
- return false;
-
- return true;
-}
-
-/**
- * i915_vma_insert - finds a slot for the vma in its address space
- * @vma: the vma
- * @size: requested size in bytes (can be larger than the VMA)
- * @alignment: required alignment
- * @flags: mask of PIN_* flags to use
- *
- * First we try to allocate some free space that meets the requirements for
- * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
- * preferrably the oldest idle entry to make room for the new VMA.
- *
- * Returns:
- * 0 on success, negative error code otherwise.
- */
-static int
-i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
- struct drm_i915_gem_object *obj = vma->obj;
- u64 start, end;
int ret;
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
-
- size = max(size, vma->size);
- if (flags & PIN_MAPPABLE)
- size = i915_gem_get_ggtt_size(dev_priv, size,
- i915_gem_object_get_tiling(obj));
-
- alignment = max(max(alignment, vma->display_alignment),
- i915_gem_get_ggtt_alignment(dev_priv, size,
- i915_gem_object_get_tiling(obj),
- flags & PIN_MAPPABLE));
-
- start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-
- end = vma->vm->total;
- if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
- if (flags & PIN_ZONE_4G)
- end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
-
- /* If binding the object/GGTT view requires more space than the entire
- * aperture has, reject it early before evicting everything in a vain
- * attempt to find space.
- */
- if (size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
- size, obj->base.size,
- flags & PIN_MAPPABLE ? "mappable" : "total",
- end);
- return -E2BIG;
- }
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- return ret;
-
- i915_gem_object_pin_pages(obj);
+ if (flags & I915_WAIT_LOCKED) {
+ struct i915_gem_timeline *tl;
- if (flags & PIN_OFFSET_FIXED) {
- u64 offset = flags & PIN_OFFSET_MASK;
- if (offset & (alignment - 1) || offset > end - size) {
- ret = -EINVAL;
- goto err_unpin;
- }
+ lockdep_assert_held(&i915->drm.struct_mutex);
- vma->node.start = offset;
- vma->node.size = size;
- vma->node.color = obj->cache_level;
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
- if (ret) {
- ret = i915_gem_evict_for_vma(vma);
- if (ret == 0)
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ list_for_each_entry(tl, &i915->gt.timelines, link) {
+ ret = wait_for_timeline(tl, flags);
if (ret)
- goto err_unpin;
+ return ret;
}
} else {
- u32 search_flag, alloc_flag;
-
- if (flags & PIN_HIGH) {
- search_flag = DRM_MM_SEARCH_BELOW;
- alloc_flag = DRM_MM_CREATE_TOP;
- } else {
- search_flag = DRM_MM_SEARCH_DEFAULT;
- alloc_flag = DRM_MM_CREATE_DEFAULT;
- }
-
- /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
- * so we know that we always have a minimum alignment of 4096.
- * The drm_mm range manager is optimised to return results
- * with zero alignment, so where possible use the optimal
- * path.
- */
- if (alignment <= 4096)
- alignment = 0;
-
-search_free:
- ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
- &vma->node,
- size, alignment,
- obj->cache_level,
- start, end,
- search_flag,
- alloc_flag);
- if (ret) {
- ret = i915_gem_evict_something(vma->vm, size, alignment,
- obj->cache_level,
- start, end,
- flags);
- if (ret == 0)
- goto search_free;
-
- goto err_unpin;
- }
+ ret = wait_for_timeline(&i915->gt.global_timeline, flags);
+ if (ret)
+ return ret;
}
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
-
- list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- obj->bind_count++;
return 0;
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return ret;
}
-bool
-i915_gem_clflush_object(struct drm_i915_gem_object *obj,
- bool force)
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ bool force)
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj->pages == NULL)
- return false;
+ if (!obj->mm.pages)
+ return;
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen || obj->phys_handle)
- return false;
+ return;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
@@ -3137,14 +3056,12 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
*/
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
obj->cache_dirty = true;
- return false;
+ return;
}
trace_i915_gem_object_clflush(obj);
- drm_clflush_sg(obj->pages);
+ drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false;
-
- return true;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3173,7 +3090,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
- POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
+ POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
@@ -3190,9 +3107,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
-
+ i915_gem_clflush_object(obj, obj->pin_display);
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
obj->base.write_domain = 0;
@@ -3201,24 +3116,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
I915_GEM_DOMAIN_CPU);
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- continue;
-
- if (i915_vma_is_active(vma))
- continue;
-
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- }
-}
-
/**
* Moves a single object to the GTT read, and possibly write domain.
* @obj: object to act on
@@ -3233,7 +3130,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_wait_rendering(obj, !write);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -3248,7 +3152,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
@@ -3267,21 +3171,19 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
- obj->dirty = 1;
+ obj->mm.dirty = true;
}
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
+ i915_gem_object_unpin_pages(obj);
return 0;
}
@@ -3304,10 +3206,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct i915_vma *vma;
- int ret = 0;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->cache_level == cache_level)
- goto out;
+ return 0;
/* Inspect the list of currently bound VMA and unbind any that would
* be invalid given the new cache-level. This is principally to
@@ -3350,11 +3254,17 @@ restart:
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
+ if (!HAS_LLC(to_i915(obj->base.dev)) &&
+ cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
@@ -3396,20 +3306,14 @@ restart:
}
}
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
+ cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ obj->cache_dirty = true;
+
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
-out:
- /* Flush the dirty CPU caches to the backing storage so that the
- * object is now coherent at its new cache level (with respect
- * to the access domain).
- */
- if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
- if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
- }
-
return 0;
}
@@ -3418,10 +3322,14 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
+ int err = 0;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (!obj) {
+ err = -ENOENT;
+ goto out;
+ }
switch (obj->cache_level) {
case I915_CACHE_LLC:
@@ -3437,15 +3345,15 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
args->caching = I915_CACHING_NONE;
break;
}
-
- i915_gem_object_put_unlocked(obj);
- return 0;
+out:
+ rcu_read_unlock();
+ return err;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
@@ -3462,23 +3370,21 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
return -ENODEV;
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
- level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+ level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
break;
default:
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto rpm_put;
+ return ret;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj) {
@@ -3487,13 +3393,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
}
ret = i915_gem_object_set_cache_level(obj, level);
-
i915_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
-rpm_put:
- intel_runtime_pm_put(dev_priv);
-
return ret;
}
@@ -3511,6 +3413,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 old_read_domains, old_write_domain;
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
@@ -3526,7 +3430,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+ HAS_WT(to_i915(obj->base.dev)) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
goto err_unpin_display;
@@ -3543,16 +3448,32 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (view->type == I915_GGTT_VIEW_NORMAL)
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
PIN_MAPPABLE | PIN_NONBLOCK);
- if (IS_ERR(vma))
- vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
+ if (IS_ERR(vma)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int flags;
+
+ /* Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ flags = 0;
+ if (HAS_GMCH_DISPLAY(i915))
+ flags = PIN_MAPPABLE;
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
+ }
if (IS_ERR(vma))
goto err_unpin_display;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
-
- i915_gem_object_flush_cpu_write_domain(obj);
+ /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
+ if (obj->cache_dirty) {
+ i915_gem_clflush_object(obj, true);
+ intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+ }
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3577,6 +3498,8 @@ err_unpin_display:
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+
if (WARN_ON(vma->obj->pin_display == 0))
return;
@@ -3588,7 +3511,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
i915_vma_unpin(vma);
- WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
}
/**
@@ -3605,7 +3527,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_wait_rendering(obj, !write);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -3627,7 +3556,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
@@ -3661,11 +3590,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
- int ret;
-
- ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
- if (ret)
- return ret;
+ long ret;
/* ABI: return -EIO if already wedged */
if (i915_terminally_wedged(&dev_priv->gpu_error))
@@ -3692,98 +3617,12 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+ ret = i915_wait_request(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(target);
- return ret;
-}
-
-static bool
-i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- if (!drm_mm_node_allocated(&vma->node))
- return false;
-
- if (vma->node.size < size)
- return true;
-
- if (alignment && vma->node.start & (alignment - 1))
- return true;
-
- if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
- return true;
-
- if (flags & PIN_OFFSET_BIAS &&
- vma->node.start < (flags & PIN_OFFSET_MASK))
- return true;
-
- if (flags & PIN_OFFSET_FIXED &&
- vma->node.start != (flags & PIN_OFFSET_MASK))
- return true;
-
- return false;
-}
-
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- bool mappable, fenceable;
- u32 fence_size, fence_alignment;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj));
- fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj),
- true);
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
-
- mappable = (vma->node.start + fence_size <=
- dev_priv->ggtt.mappable_end);
-
- if (mappable && fenceable)
- vma->flags |= I915_VMA_CAN_FENCE;
- else
- vma->flags &= ~I915_VMA_CAN_FENCE;
-}
-
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags)
-{
- unsigned int bound = vma->flags;
- int ret;
-
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
-
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
- ret = -EBUSY;
- goto err;
- }
-
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- ret = i915_vma_insert(vma, size, alignment, flags);
- if (ret)
- goto err;
- }
-
- ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
- if (ret)
- goto err;
-
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
- __i915_vma_set_map_and_fenceable(vma);
-
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
-
-err:
- __i915_vma_unpin(vma);
- return ret;
+ return ret < 0 ? ret : 0;
}
struct i915_vma *
@@ -3793,10 +3632,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags)
{
- struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
struct i915_vma *vma;
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
if (IS_ERR(vma))
return vma;
@@ -3806,6 +3648,41 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
(i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
return ERR_PTR(-ENOSPC);
+ if (flags & PIN_MAPPABLE) {
+ u32 fence_size;
+
+ fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
+ i915_gem_object_get_tiling(obj));
+ /* If the required space is larger than the available
+ * aperture, we will not able to find a slot for the
+ * object and unbinding the object now will be in
+ * vain. Worse, doing so may cause us to ping-pong
+ * the object in and out of the Global GTT and
+ * waste a lot of cycles under the mutex.
+ */
+ if (fence_size > dev_priv->ggtt.mappable_end)
+ return ERR_PTR(-E2BIG);
+
+ /* If NONBLOCK is set the caller is optimistically
+ * trying to cache the full object within the mappable
+ * aperture, and *must* have a fallback in place for
+ * situations where we cannot bind the object. We
+ * can be a little more lax here and use the fallback
+ * more often to avoid costly migrations of ourselves
+ * and other objects within the aperture.
+ *
+ * Half-the-aperture is used as a simple heuristic.
+ * More interesting would to do search for a free
+ * block prior to making the commitment to unbind.
+ * That caters for the self-harm case, and with a
+ * little more heuristics (e.g. NOFAULT, NOEVICT)
+ * we could try to minimise harm to others.
+ */
+ if (flags & PIN_NONBLOCK &&
+ fence_size > dev_priv->ggtt.mappable_end / 2)
+ return ERR_PTR(-ENOSPC);
+ }
+
WARN(i915_vma_is_pinned(vma),
"bo is already pinned in ggtt with incorrect alignment:"
" offset=%08x, req.alignment=%llx,"
@@ -3852,83 +3729,42 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
}
static __always_inline unsigned int
-__busy_set_if_active(const struct i915_gem_active *active,
+__busy_set_if_active(const struct dma_fence *fence,
unsigned int (*flag)(unsigned int id))
{
- struct drm_i915_gem_request *request;
+ struct drm_i915_gem_request *rq;
- request = rcu_dereference(active->request);
- if (!request || i915_gem_request_completed(request))
- return 0;
-
- /* This is racy. See __i915_gem_active_get_rcu() for an in detail
- * discussion of how to handle the race correctly, but for reporting
- * the busy state we err on the side of potentially reporting the
- * wrong engine as being busy (but we guarantee that the result
- * is at least self-consistent).
- *
- * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
- * whilst we are inspecting it, even under the RCU read lock as we are.
- * This means that there is a small window for the engine and/or the
- * seqno to have been overwritten. The seqno will always be in the
- * future compared to the intended, and so we know that if that
- * seqno is idle (on whatever engine) our request is idle and the
- * return 0 above is correct.
- *
- * The issue is that if the engine is switched, it is just as likely
- * to report that it is busy (but since the switch happened, we know
- * the request should be idle). So there is a small chance that a busy
- * result is actually the wrong engine.
+ /* We have to check the current hw status of the fence as the uABI
+ * guarantees forward progress. We could rely on the idle worker
+ * to eventually flush us, but to minimise latency just ask the
+ * hardware.
*
- * So why don't we care?
- *
- * For starters, the busy ioctl is a heuristic that is by definition
- * racy. Even with perfect serialisation in the driver, the hardware
- * state is constantly advancing - the state we report to the user
- * is stale.
- *
- * The critical information for the busy-ioctl is whether the object
- * is idle as userspace relies on that to detect whether its next
- * access will stall, or if it has missed submitting commands to
- * the hardware allowing the GPU to stall. We never generate a
- * false-positive for idleness, thus busy-ioctl is reliable at the
- * most fundamental level, and we maintain the guarantee that a
- * busy object left to itself will eventually become idle (and stay
- * idle!).
- *
- * We allow ourselves the leeway of potentially misreporting the busy
- * state because that is an optimisation heuristic that is constantly
- * in flux. Being quickly able to detect the busy/idle state is much
- * more important than accurate logging of exactly which engines were
- * busy.
- *
- * For accuracy in reporting the engine, we could use
- *
- * result = 0;
- * request = __i915_gem_active_get_rcu(active);
- * if (request) {
- * if (!i915_gem_request_completed(request))
- * result = flag(request->engine->exec_id);
- * i915_gem_request_put(request);
- * }
- *
- * but that still remains susceptible to both hardware and userspace
- * races. So we accept making the result of that race slightly worse,
- * given the rarity of the race and its low impact on the result.
+ * Note we only report on the status of native fences.
*/
- return flag(READ_ONCE(request->engine->exec_id));
+ if (!dma_fence_is_i915(fence))
+ return 0;
+
+ /* opencode to_request() in order to avoid const warnings */
+ rq = container_of(fence, struct drm_i915_gem_request, fence);
+ if (i915_gem_request_completed(rq))
+ return 0;
+
+ return flag(rq->engine->exec_id);
}
static __always_inline unsigned int
-busy_check_reader(const struct i915_gem_active *active)
+busy_check_reader(const struct dma_fence *fence)
{
- return __busy_set_if_active(active, __busy_read_flag);
+ return __busy_set_if_active(fence, __busy_read_flag);
}
static __always_inline unsigned int
-busy_check_writer(const struct i915_gem_active *active)
+busy_check_writer(const struct dma_fence *fence)
{
- return __busy_set_if_active(active, __busy_write_id);
+ if (!fence)
+ return 0;
+
+ return __busy_set_if_active(fence, __busy_write_id);
}
int
@@ -3937,64 +3773,58 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- unsigned long active;
+ struct reservation_object_list *list;
+ unsigned int seq;
+ int err;
- obj = i915_gem_object_lookup(file, args->handle);
+ err = -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
if (!obj)
- return -ENOENT;
+ goto out;
- args->busy = 0;
- active = __I915_BO_ACTIVE(obj);
- if (active) {
- int idx;
+ /* A discrepancy here is that we do not report the status of
+ * non-i915 fences, i.e. even though we may report the object as idle,
+ * a call to set-domain may still stall waiting for foreign rendering.
+ * This also means that wait-ioctl may report an object as busy,
+ * where busy-ioctl considers it idle.
+ *
+ * We trade the ability to warn of foreign fences to report on which
+ * i915 engines are active for the object.
+ *
+ * Alternatively, we can trade that extra information on read/write
+ * activity with
+ * args->busy =
+ * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * to report the overall busyness. This is what the wait-ioctl does.
+ *
+ */
+retry:
+ seq = raw_read_seqcount(&obj->resv->seq);
- /* Yes, the lookups are intentionally racy.
- *
- * First, we cannot simply rely on __I915_BO_ACTIVE. We have
- * to regard the value as stale and as our ABI guarantees
- * forward progress, we confirm the status of each active
- * request with the hardware.
- *
- * Even though we guard the pointer lookup by RCU, that only
- * guarantees that the pointer and its contents remain
- * dereferencable and does *not* mean that the request we
- * have is the same as the one being tracked by the object.
- *
- * Consider that we lookup the request just as it is being
- * retired and freed. We take a local copy of the pointer,
- * but before we add its engine into the busy set, the other
- * thread reallocates it and assigns it to a task on another
- * engine with a fresh and incomplete seqno. Guarding against
- * that requires careful serialisation and reference counting,
- * i.e. using __i915_gem_active_get_request_rcu(). We don't,
- * instead we expect that if the result is busy, which engines
- * are busy is not completely reliable - we only guarantee
- * that the object was busy.
- */
- rcu_read_lock();
+ /* Translate the exclusive fence to the READ *and* WRITE engine */
+ args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
- for_each_active(active, idx)
- args->busy |= busy_check_reader(&obj->last_read[idx]);
+ /* Translate shared fences to READ set of engines */
+ list = rcu_dereference(obj->resv->fence);
+ if (list) {
+ unsigned int shared_count = list->shared_count, i;
- /* For ABI sanity, we only care that the write engine is in
- * the set of read engines. This should be ensured by the
- * ordering of setting last_read/last_write in
- * i915_vma_move_to_active(), and then in reverse in retire.
- * However, for good measure, we always report the last_write
- * request as a busy read as well as being a busy write.
- *
- * We don't care that the set of active read/write engines
- * may change during construction of the result, as it is
- * equally liable to change before userspace can inspect
- * the result.
- */
- args->busy |= busy_check_writer(&obj->last_write);
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *fence =
+ rcu_dereference(list->shared[i]);
- rcu_read_unlock();
+ args->busy |= busy_check_reader(fence);
+ }
}
- i915_gem_object_put_unlocked(obj);
- return 0;
+ if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
+ goto retry;
+
+ err = 0;
+out:
+ rcu_read_unlock();
+ return err;
}
int
@@ -4011,7 +3841,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
- int ret;
+ int err;
switch (args->madv) {
case I915_MADV_DONTNEED:
@@ -4021,77 +3851,111 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
obj = i915_gem_object_lookup(file_priv, args->handle);
- if (!obj) {
- ret = -ENOENT;
- goto unlock;
- }
+ if (!obj)
+ return -ENOENT;
- if (obj->pages &&
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ goto out;
+
+ if (obj->mm.pages &&
i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (obj->madv == I915_MADV_WILLNEED)
- i915_gem_object_unpin_pages(obj);
- if (args->madv == I915_MADV_WILLNEED)
- i915_gem_object_pin_pages(obj);
+ if (obj->mm.madv == I915_MADV_WILLNEED) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (args->madv == I915_MADV_WILLNEED) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
}
- if (obj->madv != __I915_MADV_PURGED)
- obj->madv = args->madv;
+ if (obj->mm.madv != __I915_MADV_PURGED)
+ obj->mm.madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
+ if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
i915_gem_object_truncate(obj);
- args->retained = obj->madv != __I915_MADV_PURGED;
+ args->retained = obj->mm.madv != __I915_MADV_PURGED;
+ mutex_unlock(&obj->mm.lock);
+out:
i915_gem_object_put(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return err;
+}
+
+static void
+frontbuffer_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(active, typeof(*obj), frontbuffer_write);
+
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
- int i;
+ mutex_init(&obj->mm.lock);
- INIT_LIST_HEAD(&obj->global_list);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- init_request_active(&obj->last_read[i],
- i915_gem_object_retire__read);
- init_request_active(&obj->last_write,
- i915_gem_object_retire__write);
+ INIT_LIST_HEAD(&obj->global_link);
+ INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
obj->ops = ops;
+ reservation_object_init(&obj->__builtin_resv);
+ obj->resv = &obj->__builtin_resv;
+
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- obj->madv = I915_MADV_WILLNEED;
+ init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
+
+ obj->mm.madv = I915_MADV_WILLNEED;
+ INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&obj->mm.get_page.lock);
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
};
-struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- size_t size)
+/* Note we don't consider signbits :| */
+#define overflows_type(x, T) \
+ (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+
+struct drm_i915_gem_object *
+i915_gem_object_create(struct drm_device *dev, u64 size)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
int ret;
+ /* There is a prevalence of the assumption that we fit the object's
+ * page count inside a 32bit _signed_ variable. Let's document this and
+ * catch if we ever need to fix it. In the meantime, if you do spot
+ * such a local variable, please consider fixing!
+ */
+ if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -4101,7 +3965,7 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
- if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
/* 965gm cannot relocate objects above 4GiB. */
mask &= ~__GFP_HIGHMEM;
mask |= __GFP_DMA32;
@@ -4115,7 +3979,7 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (HAS_LLC(dev)) {
+ if (HAS_LLC(dev_priv)) {
/* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
@@ -4138,7 +4002,6 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
fail:
i915_gem_object_free(obj);
-
return ERR_PTR(ret);
}
@@ -4150,7 +4013,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
* back the contents from the GPU.
*/
- if (obj->madv != I915_MADV_WILLNEED)
+ if (obj->mm.madv != I915_MADV_WILLNEED)
return false;
if (obj->base.filp == NULL)
@@ -4166,16 +4029,72 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
return atomic_long_read(&obj->base.filp->f_count) == 1;
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
+static void __i915_gem_free_objects(struct drm_i915_private *i915,
+ struct llist_node *freed)
{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_vma *vma, *next;
+ struct drm_i915_gem_object *obj, *on;
- intel_runtime_pm_get(dev_priv);
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
+ llist_for_each_entry(obj, freed, freed) {
+ struct i915_vma *vma, *vn;
+
+ trace_i915_gem_object_destroy(obj);
- trace_i915_gem_object_destroy(obj);
+ GEM_BUG_ON(i915_gem_object_is_active(obj));
+ list_for_each_entry_safe(vma, vn,
+ &obj->vma_list, obj_link) {
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ vma->flags &= ~I915_VMA_PIN_MASK;
+ i915_vma_close(vma);
+ }
+ GEM_BUG_ON(!list_empty(&obj->vma_list));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
+
+ list_del(&obj->global_link);
+ }
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ llist_for_each_entry_safe(obj, on, freed, freed) {
+ GEM_BUG_ON(obj->bind_count);
+ GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+
+ if (obj->ops->release)
+ obj->ops->release(obj);
+
+ if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
+ atomic_set(&obj->mm.pages_pin_count, 0);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ GEM_BUG_ON(obj->mm.pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
+
+ reservation_object_fini(&obj->__builtin_resv);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(i915, obj->base.size);
+
+ kfree(obj->bit_17);
+ i915_gem_object_free(obj);
+ }
+}
+
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
+{
+ struct llist_node *freed;
+
+ freed = llist_del_all(&i915->mm.free_list);
+ if (unlikely(freed))
+ __i915_gem_free_objects(i915, freed);
+}
+
+static void __i915_gem_free_work(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, struct drm_i915_private, mm.free_work);
+ struct llist_node *freed;
/* All file-owned VMA should have been released by this point through
* i915_gem_close_object(), or earlier by i915_gem_context_close().
@@ -4184,47 +4103,62 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
* the GTT either for the user or for scanout). Those VMA still need to
* unbound now.
*/
- list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_close(vma);
- }
- GEM_BUG_ON(obj->bind_count);
- /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
- * before progressing. */
- if (obj->stolen)
- i915_gem_object_unpin_pages(obj);
+ while ((freed = llist_del_all(&i915->mm.free_list)))
+ __i915_gem_free_objects(i915, freed);
+}
- WARN_ON(atomic_read(&obj->frontbuffer_bits));
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
- if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
- i915_gem_object_is_tiled(obj))
- i915_gem_object_unpin_pages(obj);
+ /* We can't simply use call_rcu() from i915_gem_free_object()
+ * as we need to block whilst unbinding, and the call_rcu
+ * task may be called from softirq context. So we take a
+ * detour through a worker.
+ */
+ if (llist_add(&obj->freed, &i915->mm.free_list))
+ schedule_work(&i915->mm.free_work);
+}
- if (WARN_ON(obj->pages_pin_count))
- obj->pages_pin_count = 0;
- if (discard_backing_storage(obj))
- obj->madv = I915_MADV_DONTNEED;
- i915_gem_object_put_pages(obj);
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- BUG_ON(obj->pages);
+ if (obj->mm.quirked)
+ __i915_gem_object_unpin_pages(obj);
- if (obj->base.import_attach)
- drm_prime_gem_destroy(&obj->base, NULL);
+ if (discard_backing_storage(obj))
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ /* Before we free the object, make sure any pure RCU-only
+ * read-side critical sections are complete, e.g.
+ * i915_gem_busy_ioctl(). For the corresponding synchronized
+ * lookup see i915_gem_object_lookup_rcu().
+ */
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+}
- if (obj->ops->release)
- obj->ops->release(obj);
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(dev_priv, obj->base.size);
+ GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
+ if (i915_gem_object_is_active(obj))
+ i915_gem_object_set_active_reference(obj);
+ else
+ i915_gem_object_put(obj);
+}
- kfree(obj->bit_17);
- i915_gem_object_free(obj);
+static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- intel_runtime_pm_put(dev_priv);
+ for_each_engine(engine, dev_priv, id)
+ GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
}
int i915_gem_suspend(struct drm_device *dev)
@@ -4255,18 +4189,46 @@ int i915_gem_suspend(struct drm_device *dev)
goto err;
i915_gem_retire_requests(dev_priv);
+ GEM_BUG_ON(dev_priv->gt.active_requests);
+ assert_kernel_context_is_current(dev_priv);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
flush_delayed_work(&dev_priv->gt.idle_work);
+ flush_work(&dev_priv->mm.free_work);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
+ WARN_ON(!intel_execlists_idle(dev_priv));
+
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode. And the only
+ * known way to disable logical contexts is through a GPU reset.
+ *
+ * So in order to leave the system in a known default configuration,
+ * always reset the GPU upon unload and suspend. Afterwards we then
+ * clean up the GEM state tracking, flushing off the requests and
+ * leaving the system in a known idle state.
+ *
+ * Note that is of the upmost importance that the GPU is idle and
+ * all stray writes are flushed *before* we dismantle the backing
+ * storage for the pinned objects.
+ *
+ * However, since we are uncertain that resetting the GPU on older
+ * machines is a good idea, we don't - just in case it leaves the
+ * machine in an unusable condition.
+ */
+ if (HAS_HW_CONTEXTS(dev_priv)) {
+ int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ WARN_ON(reset && reset != -ENODEV);
+ }
return 0;
@@ -4279,8 +4241,10 @@ void i915_gem_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ WARN_ON(dev_priv->gt.awake);
+
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
+ i915_gem_restore_gtt_mappings(dev_priv);
/* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
@@ -4291,55 +4255,51 @@ void i915_gem_resume(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-void i915_gem_init_swizzling(struct drm_device *dev)
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen < 5 ||
+ if (INTEL_GEN(dev_priv) < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN8(dev))
+ else if (IS_GEN8(dev_priv))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
}
-static void init_unused_ring(struct drm_device *dev, u32 base)
+static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
I915_WRITE(RING_TAIL(base), 0);
I915_WRITE(RING_START(base), 0);
}
-static void init_unused_rings(struct drm_device *dev)
-{
- if (IS_I830(dev)) {
- init_unused_ring(dev, PRB1_BASE);
- init_unused_ring(dev, SRB0_BASE);
- init_unused_ring(dev, SRB1_BASE);
- init_unused_ring(dev, SRB2_BASE);
- init_unused_ring(dev, SRB3_BASE);
- } else if (IS_GEN2(dev)) {
- init_unused_ring(dev, SRB0_BASE);
- init_unused_ring(dev, SRB1_BASE);
- } else if (IS_GEN3(dev)) {
- init_unused_ring(dev, PRB1_BASE);
- init_unused_ring(dev, PRB2_BASE);
+static void init_unused_rings(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv)) {
+ init_unused_ring(dev_priv, PRB1_BASE);
+ init_unused_ring(dev_priv, SRB0_BASE);
+ init_unused_ring(dev_priv, SRB1_BASE);
+ init_unused_ring(dev_priv, SRB2_BASE);
+ init_unused_ring(dev_priv, SRB3_BASE);
+ } else if (IS_GEN2(dev_priv)) {
+ init_unused_ring(dev_priv, SRB0_BASE);
+ init_unused_ring(dev_priv, SRB1_BASE);
+ } else if (IS_GEN3(dev_priv)) {
+ init_unused_ring(dev_priv, PRB1_BASE);
+ init_unused_ring(dev_priv, PRB2_BASE);
}
}
@@ -4348,31 +4308,34 @@ i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
+ dev_priv->gt.last_init_time = ktime_get();
+
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
+ if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
- if (IS_HASWELL(dev))
- I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+ if (IS_HASWELL(dev_priv))
+ I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
- if (HAS_PCH_NOP(dev)) {
- if (IS_IVYBRIDGE(dev)) {
+ if (HAS_PCH_NOP(dev_priv)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
}
}
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
/*
* At least 830 can leave some of the unused rings
@@ -4380,18 +4343,18 @@ i915_gem_init_hw(struct drm_device *dev)
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
- init_unused_rings(dev);
+ init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
DRM_ERROR("PPGTT enable HW failed %d\n", ret);
goto out;
}
/* Need to do basic initialisation of all rings first: */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
ret = engine->init_hw(engine);
if (ret)
goto out;
@@ -4490,21 +4453,15 @@ i915_gem_cleanup_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
dev_priv->gt.cleanup_engine(engine);
}
-static void
-init_engine_lists(struct intel_engine_cs *engine)
-{
- INIT_LIST_HEAD(&engine->request_list);
-}
-
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
@@ -4528,41 +4485,52 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
fence->id = i;
list_add_tail(&fence->link, &dev_priv->mm.fence_list);
}
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
- i915_gem_detect_bit_6_swizzle(dev);
+ i915_gem_detect_bit_6_swizzle(dev_priv);
}
-void
+int
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
+ int err = -ENOMEM;
+
+ dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!dev_priv->objects)
+ goto err_out;
+
+ dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!dev_priv->vmas)
+ goto err_objects;
+
+ dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_DESTROY_BY_RCU);
+ if (!dev_priv->requests)
+ goto err_vmas;
- dev_priv->objects =
- kmem_cache_create("i915_gem_object",
- sizeof(struct drm_i915_gem_object), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- dev_priv->vmas =
- kmem_cache_create("i915_gem_vma",
- sizeof(struct i915_vma), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- dev_priv->requests =
- kmem_cache_create("i915_gem_request",
- sizeof(struct drm_i915_gem_request), 0,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_DESTROY_BY_RCU,
- NULL);
+ dev_priv->dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!dev_priv->dependencies)
+ goto err_requests;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ INIT_LIST_HEAD(&dev_priv->gt.timelines);
+ err = i915_gem_timeline_init__global(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (err)
+ goto err_dependencies;
INIT_LIST_HEAD(&dev_priv->context_list);
+ INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+ init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- init_engine_lists(&dev_priv->engine[i]);
+ INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -4579,12 +4547,33 @@ i915_gem_load_init(struct drm_device *dev)
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock);
+
+ return 0;
+
+err_dependencies:
+ kmem_cache_destroy(dev_priv->dependencies);
+err_requests:
+ kmem_cache_destroy(dev_priv->requests);
+err_vmas:
+ kmem_cache_destroy(dev_priv->vmas);
+err_objects:
+ kmem_cache_destroy(dev_priv->objects);
+err_out:
+ return err;
}
void i915_gem_load_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
+ WARN_ON(!list_empty(&dev_priv->gt.timelines));
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ kmem_cache_destroy(dev_priv->dependencies);
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
@@ -4633,7 +4622,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, global_list) {
+ list_for_each_entry(obj, *p, global_link) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -4669,7 +4658,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv;
int ret;
- DRM_DEBUG_DRIVER("\n");
+ DRM_DEBUG("\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
@@ -4725,21 +4714,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
-{
- struct page *page;
-
- /* Only default objects have per-page dirty tracking */
- if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
- return NULL;
-
- page = i915_gem_object_get_page(obj, n);
- set_page_dirty(page);
- return page;
-}
-
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
@@ -4758,14 +4732,13 @@ i915_gem_object_create_from_data(struct drm_device *dev,
if (ret)
goto fail;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto fail;
- i915_gem_object_pin_pages(obj);
- sg = obj->pages;
+ sg = obj->mm.pages;
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
- obj->dirty = 1; /* Backing store is now out of date */
+ obj->mm.dirty = true; /* Backing store is now out of date */
i915_gem_object_unpin_pages(obj);
if (WARN_ON(bytes != size)) {
@@ -4780,3 +4753,156 @@ fail:
i915_gem_object_put(obj);
return ERR_PTR(ret);
}
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n,
+ unsigned int *offset)
+{
+ struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+ struct scatterlist *sg;
+ unsigned int idx, count;
+
+ might_sleep();
+ GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ /* As we iterate forward through the sg, we record each entry in a
+ * radixtree for quick repeated (backwards) lookups. If we have seen
+ * this index previously, we will have an entry for it.
+ *
+ * Initial lookup is O(N), but this is amortized to O(1) for
+ * sequential page access (where each new request is consecutive
+ * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+ * i.e. O(1) with a large constant!
+ */
+ if (n < READ_ONCE(iter->sg_idx))
+ goto lookup;
+
+ mutex_lock(&iter->lock);
+
+ /* We prefer to reuse the last sg so that repeated lookup of this
+ * (or the subsequent) sg are fast - comparing against the last
+ * sg is faster than going through the radixtree.
+ */
+
+ sg = iter->sg_pos;
+ idx = iter->sg_idx;
+ count = __sg_page_count(sg);
+
+ while (idx + count <= n) {
+ unsigned long exception, i;
+ int ret;
+
+ /* If we cannot allocate and insert this entry, or the
+ * individual pages from this range, cancel updating the
+ * sg_idx so that on this lookup we are forced to linearly
+ * scan onwards, but on future lookups we will try the
+ * insertion again (in which case we need to be careful of
+ * the error return reporting that we have already inserted
+ * this index).
+ */
+ ret = radix_tree_insert(&iter->radix, idx, sg);
+ if (ret && ret != -EEXIST)
+ goto scan;
+
+ exception =
+ RADIX_TREE_EXCEPTIONAL_ENTRY |
+ idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
+ for (i = 1; i < count; i++) {
+ ret = radix_tree_insert(&iter->radix, idx + i,
+ (void *)exception);
+ if (ret && ret != -EEXIST)
+ goto scan;
+ }
+
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+scan:
+ iter->sg_pos = sg;
+ iter->sg_idx = idx;
+
+ mutex_unlock(&iter->lock);
+
+ if (unlikely(n < idx)) /* insertion completed by another thread */
+ goto lookup;
+
+ /* In case we failed to insert the entry into the radixtree, we need
+ * to look beyond the current sg.
+ */
+ while (idx + count <= n) {
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+ *offset = n - idx;
+ return sg;
+
+lookup:
+ rcu_read_lock();
+
+ sg = radix_tree_lookup(&iter->radix, n);
+ GEM_BUG_ON(!sg);
+
+ /* If this index is in the middle of multi-page sg entry,
+ * the radixtree will contain an exceptional entry that points
+ * to the start of that range. We will return the pointer to
+ * the base page and the offset of this page within the
+ * sg entry's range.
+ */
+ *offset = 0;
+ if (unlikely(radix_tree_exception(sg))) {
+ unsigned long base =
+ (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+
+ sg = radix_tree_lookup(&iter->radix, base);
+ GEM_BUG_ON(!sg);
+
+ *offset = n - base;
+ }
+
+ rcu_read_unlock();
+
+ return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 8292e797d9b5..51ec793f2e20 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,7 +28,9 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#else
-#define GEM_BUG_ON(expr)
+#define GEM_BUG_ON(expr) do { } while (0)
#endif
+#define I915_NUM_ENGINES 5
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index ed989596d9a3..b3bc119ec1bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
- i915_gem_object_put(obj);
+ __i915_gem_object_release_unless_active(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
@@ -97,9 +97,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj = NULL;
- struct drm_i915_gem_object *tmp, *next;
+ struct drm_i915_gem_object *tmp;
struct list_head *list;
- int n;
+ int n, ret;
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
@@ -112,40 +112,35 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
- list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
+ list_for_each_entry(tmp, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
- if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
- &tmp->base.dev->struct_mutex))
+ if (i915_gem_object_is_active(tmp))
break;
- /* While we're looping, do some clean up */
- if (tmp->madv == __I915_MADV_PURGED) {
- list_del(&tmp->batch_pool_link);
- i915_gem_object_put(tmp);
- continue;
- }
+ GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
+ true));
if (tmp->base.size >= size) {
+ /* Clear the set of shared fences early */
+ ww_mutex_lock(&tmp->resv->lock, NULL);
+ reservation_object_add_excl_fence(tmp->resv, NULL);
+ ww_mutex_unlock(&tmp->resv->lock);
+
obj = tmp;
break;
}
}
if (obj == NULL) {
- int ret;
-
- obj = i915_gem_object_create(&pool->engine->i915->drm, size);
+ obj = i915_gem_object_create_internal(pool->engine->i915, size);
if (IS_ERR(obj))
return obj;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- return ERR_PTR(ret);
-
- obj->madv = I915_MADV_DONTNEED;
}
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ERR_PTR(ret);
+
list_move_tail(&obj->batch_pool_link, list);
- i915_gem_object_pin_pages(obj);
return obj;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index df10f4e95736..1f94b8d6d83d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring)
intel_ring_free(ce->ring);
- i915_vma_put(ce->state);
+ __i915_gem_object_release_unless_active(ce->state->obj);
}
+ kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
@@ -192,7 +193,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(to_i915(dev))) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
@@ -303,19 +304,28 @@ __create_hw_context(struct drm_device *dev,
}
/* Default context will never have a file_priv */
- if (file_priv != NULL) {
+ ret = DEFAULT_CONTEXT_HANDLE;
+ if (file_priv) {
ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
- } else
- ret = DEFAULT_CONTEXT_HANDLE;
+ }
+ ctx->user_handle = ret;
ctx->file_priv = file_priv;
- if (file_priv)
+ if (file_priv) {
ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
+ current->comm,
+ pid_nr(ctx->pid),
+ ctx->user_handle);
+ if (!ctx->name) {
+ ret = -ENOMEM;
+ goto err_pid;
+ }
+ }
- ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
@@ -329,6 +339,9 @@ __create_hw_context(struct drm_device *dev,
return ctx;
+err_pid:
+ put_pid(ctx->pid);
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
err_out:
context_close(ctx);
return ERR_PTR(ret);
@@ -352,9 +365,9 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
if (USES_FULL_PPGTT(dev)) {
- struct i915_hw_ppgtt *ppgtt =
- i915_ppgtt_create(to_i915(dev), file_priv);
+ struct i915_hw_ppgtt *ppgtt;
+ ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -463,6 +476,7 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx);
}
+ ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -474,10 +488,11 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
@@ -492,13 +507,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
if (!i915_gem_context_is_default(ctx))
continue;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ctx->engine[engine->id].initialised = false;
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *kce =
&dev_priv->kernel_context->engine[engine->id];
@@ -563,6 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
struct drm_i915_private *dev_priv = req->i915;
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
+ enum intel_engine_id id;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -605,7 +621,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@@ -634,7 +650,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@@ -749,12 +765,36 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
return false;
}
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+ unsigned int flags)
+{
+ struct i915_vma *vma = ctx->engine[RCS].state;
+ int ret;
+
+ /* Clear this page out of any CPU caches for coherent swap-in/out.
+ * We only want to do this on the first bind so that we do not stall
+ * on an active context (which by nature is already on the GPU).
+ */
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return vma;
+}
+
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- struct i915_vma *vma = to->engine[RCS].state;
+ struct i915_vma *vma;
struct i915_gem_context *from;
u32 hw_flags;
int ret, i;
@@ -762,17 +802,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
- /* Clear this page out of any CPU caches for coherent swap-in/out. */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (ret)
- return ret;
- }
-
/* Trying to pin first makes error handling easier. */
- ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
- if (ret)
- return ret;
+ vma = i915_gem_context_pin_legacy(to, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
/*
* Pin can switch back to the default context if we end up calling into
@@ -929,21 +962,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ struct i915_gem_timeline *timeline;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
- if (engine->last_context == NULL)
- continue;
-
- if (engine->last_context == dev_priv->kernel_context)
- continue;
-
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
+ /* Queue this switch after all other activity */
+ list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ struct drm_i915_gem_request *prev;
+ struct intel_timeline *tl;
+
+ tl = &timeline->engine[engine->id];
+ prev = i915_gem_active_raw(&tl->last_request,
+ &dev_priv->drm.struct_mutex);
+ if (prev)
+ i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ &prev->submit,
+ GFP_KERNEL);
+ }
+
ret = i915_switch_context(req);
i915_add_request_no_flush(req);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 97c9d68b45df..5e38299b5df6 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -44,51 +44,42 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
- ret = i915_mutex_lock_interruptible(obj->base.dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- goto err_unlock;
-
- i915_gem_object_pin_pages(obj);
-
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
- goto err_unpin;
+ goto err_unpin_pages;
}
- ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
- src = obj->pages->sgl;
+ src = obj->mm.pages->sgl;
dst = st->sgl;
- for (i = 0; i < obj->pages->nents; i++) {
+ for (i = 0; i < obj->mm.pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- ret =-ENOMEM;
+ ret = -ENOMEM;
goto err_free_sg;
}
- mutex_unlock(&obj->base.dev->struct_mutex);
return st;
err_free_sg:
sg_free_table(st);
err_free:
kfree(st);
-err_unpin:
+err_unpin_pages:
i915_gem_object_unpin_pages(obj);
-err_unlock:
- mutex_unlock(&obj->base.dev->struct_mutex);
err:
return ERR_PTR(ret);
}
@@ -103,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
sg_free_table(sg);
kfree(sg);
- mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
- mutex_unlock(&obj->base.dev->struct_mutex);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
- void *addr;
- int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ERR_PTR(ret);
-
- addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- mutex_unlock(&dev->struct_mutex);
-
- return addr;
+ return i915_gem_object_pin_map(obj, I915_MAP_WB);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
- mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin_map(obj);
- mutex_unlock(&dev->struct_mutex);
}
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
@@ -179,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+ int err;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out;
- ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ err = i915_gem_object_set_to_cpu_domain(obj, write);
mutex_unlock(&dev->struct_mutex);
- return ret;
+
+out:
+ i915_gem_object_unpin_pages(obj);
+ return err;
}
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- int ret;
+ int err;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
mutex_unlock(&dev->struct_mutex);
- return ret;
+out:
+ i915_gem_object_unpin_pages(obj);
+ return err;
}
static const struct dma_buf_ops i915_dmabuf_ops = {
@@ -222,60 +211,17 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
-static void export_fences(struct drm_i915_gem_object *obj,
- struct dma_buf *dma_buf)
-{
- struct reservation_object *resv = dma_buf->resv;
- struct drm_i915_gem_request *req;
- unsigned long active;
- int idx;
-
- active = __I915_BO_ACTIVE(obj);
- if (!active)
- return;
-
- /* Serialise with execbuf to prevent concurrent fence-loops */
- mutex_lock(&obj->base.dev->struct_mutex);
-
- /* Mark the object for future fences before racily adding old fences */
- obj->base.dma_buf = dma_buf;
-
- ww_mutex_lock(&resv->lock, NULL);
-
- for_each_active(active, idx) {
- req = i915_gem_active_get(&obj->last_read[idx],
- &obj->base.dev->struct_mutex);
- if (!req)
- continue;
-
- if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &req->fence);
-
- i915_gem_request_put(req);
- }
-
- req = i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
- if (req) {
- reservation_object_add_excl_fence(resv, &req->fence);
- i915_gem_request_put(req);
- }
-
- ww_mutex_unlock(&resv->lock);
- mutex_unlock(&obj->base.dev->struct_mutex);
-}
-
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *dma_buf;
exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
+ exp_info.resv = obj->resv;
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@@ -283,30 +229,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- dma_buf = drm_gem_dmabuf_export(dev, &exp_info);
- if (IS_ERR(dma_buf))
- return dma_buf;
-
- export_fences(obj, dma_buf);
- return dma_buf;
+ return drm_gem_dmabuf_export(dev, &exp_info);
}
-static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
- struct sg_table *sg;
-
- sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg))
- return PTR_ERR(sg);
-
- obj->pages = sg;
- return 0;
+ return dma_buf_map_attachment(obj->base.import_attach,
+ DMA_BIDIRECTIONAL);
}
-static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- dma_buf_unmap_attachment(obj->base.import_attach,
- obj->pages, DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(obj->base.import_attach, pages,
+ DMA_BIDIRECTIONAL);
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
@@ -350,6 +287,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
+ obj->resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5b6f81c1dbca..bd08814b015c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,13 +33,17 @@
#include "intel_drv.h"
#include "i915_trace.h"
-static bool
-gpu_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_timeline *tl;
- for_each_engine(engine, dev_priv) {
- if (intel_engine_is_active(engine))
+ tl = &ggtt->base.timeline.engine[engine->id];
+ if (i915_gem_active_isset(&tl->last_request))
return false;
}
@@ -55,7 +59,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
- if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
+ if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false;
list_add(&vma->exec_list, unwind);
@@ -102,6 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *vma, *next;
int ret;
+ lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -152,7 +157,7 @@ search_again:
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC;
- if (gpu_is_idle(dev_priv)) {
+ if (ggtt_is_idle(dev_priv)) {
/* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
@@ -212,6 +217,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
{
struct drm_mm_node *node, *next;
+ lockdep_assert_held(&target->vm->dev->struct_mutex);
+
list_for_each_entry_safe(node, next,
&target->vm->mm.head_node.node_list,
node_list) {
@@ -265,7 +272,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
- WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
+ lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict_vm(vm);
if (do_idle) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7adb4c77cc7f..097d9d8c2315 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,7 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@@ -288,7 +287,7 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
if (DBG_USE_CPU_RELOC)
return DBG_USE_CPU_RELOC > 0;
- return (HAS_LLC(obj->base.dev) ||
+ return (HAS_LLC(to_i915(obj->base.dev)) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -332,7 +331,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->page = -1;
cache->vaddr = 0;
cache->i915 = i915;
- cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+ /* Must be a variable in the struct to allow GCC to unroll. */
+ cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->node.allocated = false;
}
@@ -370,8 +370,7 @@ static void reloc_cache_fini(struct reloc_cache *cache)
ggtt->base.clear_range(&ggtt->base,
cache->node.start,
- cache->node.size,
- true);
+ cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -419,17 +418,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
unsigned long offset;
void *vaddr;
- if (cache->node.allocated) {
- wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, page),
- cache->node.start, I915_CACHE_NONE, 0);
- cache->page = page;
- return unmask_page(cache->vaddr);
- }
-
if (cache->vaddr) {
- io_mapping_unmap_atomic(unmask_page(cache->vaddr));
+ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int ret;
@@ -467,6 +457,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
+ wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -474,7 +465,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT;
}
- vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
+ vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
@@ -552,27 +543,13 @@ repeat:
return 0;
}
-static bool object_is_idle(struct drm_i915_gem_object *obj)
-{
- unsigned long active = i915_gem_object_get_active(obj);
- int idx;
-
- for_each_active(active, idx) {
- if (!i915_gem_active_is_idle(&obj->last_read[idx],
- &obj->base.dev->struct_mutex))
- return false;
- }
-
- return true;
-}
-
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct reloc_cache *cache)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
@@ -591,7 +568,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
- if (unlikely(IS_GEN6(dev) &&
+ if (unlikely(IS_GEN6(dev_priv) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
PIN_GLOBAL);
@@ -649,10 +626,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EINVAL;
}
- /* We can't wait for rendering with pagefaults disabled */
- if (pagefault_disabled() && !object_is_idle(obj))
- return -EFAULT;
-
ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret)
return ret;
@@ -679,12 +652,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
remain = entry->relocation_count;
while (remain) {
struct drm_i915_gem_relocation_entry *r = stack_reloc;
- int count = remain;
- if (count > ARRAY_SIZE(stack_reloc))
- count = ARRAY_SIZE(stack_reloc);
+ unsigned long unwritten;
+ unsigned int count;
+
+ count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
remain -= count;
- if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
+ /* This is the fast path and we cannot handle a pagefault
+ * whilst holding the struct mutex lest the user pass in the
+ * relocations contained within a mmaped bo. For in such a case
+ * we, the page fault handler would call i915_gem_fault() and
+ * we would try to acquire the struct mutex again. Obviously
+ * this is bad and so lockdep complains vehemently.
+ */
+ pagefault_disable();
+ unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
+ pagefault_enable();
+ if (unlikely(unwritten)) {
ret = -EFAULT;
goto out;
}
@@ -696,11 +680,26 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
if (ret)
goto out;
- if (r->presumed_offset != offset &&
- __put_user(r->presumed_offset,
- &user_relocs->presumed_offset)) {
- ret = -EFAULT;
- goto out;
+ if (r->presumed_offset != offset) {
+ pagefault_disable();
+ unwritten = __put_user(r->presumed_offset,
+ &user_relocs->presumed_offset);
+ pagefault_enable();
+ if (unlikely(unwritten)) {
+ /* Note that reporting an error now
+ * leaves everything in an inconsistent
+ * state as we have *already* changed
+ * the relocation value inside the
+ * object. As we have not changed the
+ * reloc.presumed_offset or will not
+ * change the execobject.offset, on the
+ * call we may not rewrite the value
+ * inside the object, leaving it
+ * dangling and causing a GPU hang.
+ */
+ ret = -EFAULT;
+ goto out;
+ }
}
user_relocs++;
@@ -740,20 +739,11 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
struct i915_vma *vma;
int ret = 0;
- /* This is the fast path and we cannot handle a pagefault whilst
- * holding the struct mutex lest the user pass in the relocations
- * contained within a mmaped bo. For in such a case we, the page
- * fault handler would call i915_gem_fault() and we would try to
- * acquire the struct mutex again. Obviously this is bad and so
- * lockdep complains vehemently.
- */
- pagefault_disable();
list_for_each_entry(vma, &eb->vmas, exec_list) {
ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
- pagefault_enable();
return ret;
}
@@ -843,7 +833,7 @@ need_reloc_mappable(struct i915_vma *vma)
return false;
/* See also use_cpu_reloc() */
- if (HAS_LLC(vma->obj->base.dev))
+ if (HAS_LLC(to_i915(vma->obj->base.dev)))
return false;
if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
@@ -1111,44 +1101,20 @@ err:
return ret;
}
-static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
-{
- unsigned int mask;
-
- mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
- mask <<= I915_BO_ACTIVE_SHIFT;
-
- return mask;
-}
-
static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned int other_rings = eb_other_engines(req);
struct i915_vma *vma;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- struct reservation_object *resv;
-
- if (obj->flags & other_rings) {
- ret = i915_gem_request_await_object
- (req, obj, obj->base.pending_write_domain);
- if (ret)
- return ret;
- }
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- ret = i915_sw_fence_await_reservation
- (&req->submit, resv, &i915_fence_ops,
- obj->base.pending_write_domain, 10*HZ,
- GFP_KERNEL | __GFP_NOWARN);
- if (ret < 0)
- return ret;
- }
+ ret = i915_gem_request_await_object
+ (req, obj, obj->base.pending_write_domain);
+ if (ret)
+ return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false);
@@ -1281,6 +1247,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ctx;
}
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req,
unsigned int flags)
@@ -1290,8 +1262,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- obj->dirty = 1; /* be paranoid */
-
/* Add a reference if we're newly entering the active list.
* The order in which we add operations to the retirement queue is
* vital here: mark_active adds to the start of the callback list,
@@ -1299,37 +1269,31 @@ void i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!i915_gem_object_is_active(obj))
- i915_gem_object_get(obj);
- i915_gem_object_set_active(obj, idx);
- i915_gem_active_set(&obj->last_read[idx], req);
+ if (!i915_vma_is_active(vma))
+ obj->active_count++;
+ i915_vma_set_active(vma, idx);
+ i915_gem_active_set(&vma->last_read[idx], req);
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
- i915_gem_active_set(&obj->last_write, req);
-
- intel_fb_obj_invalidate(obj, ORIGIN_CS);
+ if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+ i915_gem_active_set(&obj->frontbuffer_write, req);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
}
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req);
-
- i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], req);
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void eb_export_fence(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
unsigned int flags)
{
- struct reservation_object *resv;
-
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (!resv)
- return;
+ struct reservation_object *resv = obj->resv;
/* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed
@@ -1599,12 +1563,12 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return NULL;
}
- engine = &dev_priv->engine[_VCS(bsd_idx)];
+ engine = dev_priv->engine[_VCS(bsd_idx)];
} else {
- engine = &dev_priv->engine[user_ring_map[user_ring_id]];
+ engine = dev_priv->engine[user_ring_map[user_ring_id]];
}
- if (!intel_engine_initialized(engine)) {
+ if (!engine) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return NULL;
}
@@ -1659,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
- if (!HAS_RESOURCE_STREAMER(dev)) {
+ if (!HAS_RESOURCE_STREAMER(dev_priv)) {
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
@@ -1913,7 +1877,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(to_i915(dev)) < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 8df1fa7234e8..0efa3571afc3 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -290,6 +290,8 @@ i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
if (!fence)
return 0;
@@ -341,6 +343,11 @@ i915_vma_get_fence(struct i915_vma *vma)
struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ /* Note that we revoke fences on runtime suspend. Therefore the user
+ * must keep the device awake whilst using the fence.
+ */
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
@@ -361,14 +368,14 @@ i915_vma_get_fence(struct i915_vma *vma)
/**
* i915_gem_restore_fences - restore fence state
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Restore the hw fence state to match the software tracking again, to be called
- * after a gpu reset and on resume.
+ * after a gpu reset and on resume. Note that on runtime suspend we only cancel
+ * the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_device *dev)
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -379,10 +386,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
- if (vma && !i915_gem_object_is_tiled(vma->obj))
+ if (vma && !i915_gem_object_is_tiled(vma->obj)) {
+ GEM_BUG_ON(!reg->dirty);
+ GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
+
+ list_move(&reg->link, &dev_priv->mm.fence_list);
+ vma->fence = NULL;
vma = NULL;
+ }
- fence_update(reg, vma);
+ fence_write(reg, vma);
+ reg->vma = vma;
}
}
@@ -436,19 +450,18 @@ void i915_gem_restore_fences(struct drm_device *dev)
/**
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+ if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
@@ -458,7 +471,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
if (dev_priv->preserve_bios_swizzle) {
if (I915_READ(DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
@@ -487,19 +500,20 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
- } else if (IS_GEN5(dev)) {
+ } else if (IS_GEN5(dev_priv)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
+ !IS_G33(dev_priv))) {
uint32_t dcc;
/* On 9xx chipsets, channel interleave by the CPU is
@@ -537,7 +551,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev) &&
+ if (IS_GEN4(dev_priv) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -628,6 +642,7 @@ i915_gem_swizzle_page(struct page *page)
/**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with
@@ -638,7 +653,8 @@ i915_gem_swizzle_page(struct page *page)
* by swapping them out and back in again).
*/
void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@@ -648,10 +664,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
return;
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
- if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj->bit_17) != 0)) {
+ if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
@@ -662,17 +677,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
/**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned.
*/
void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
+ const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
struct sgt_iter sgt_iter;
struct page *page;
- int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL) {
@@ -687,7 +704,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
new file mode 100644
index 000000000000..22c4a2d01adf
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_FENCE_REG_H__
+#define __I915_FENCE_REG_H__
+
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct i915_vma;
+
+struct drm_i915_fence_reg {
+ struct list_head link;
+ struct drm_i915_private *i915;
+ struct i915_vma *vma;
+ int pin_count;
+ int id;
+ /**
+ * Whether the tiling parameters for the currently
+ * associated fence register have changed. Note that
+ * for the purposes of tracking tiling changes we also
+ * treat the unfenced register, the register slot that
+ * the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ bool dirty;
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0bb4232f66bc..b4bde1452f2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,7 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
@@ -95,13 +96,6 @@
*
*/
-static inline struct i915_ggtt *
-i915_vm_to_ggtt(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
-}
-
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@ -175,7 +169,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
{
u32 pte_flags = 0;
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
@@ -191,15 +185,13 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
{
vma->vm->clear_range(vma->vm,
vma->node.start,
- vma->size,
- true);
+ vma->size);
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid)
+ enum i915_cache_level level)
{
- gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
pte |= addr;
switch (level) {
@@ -234,9 +226,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -256,9 +248,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -280,9 +272,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 flags)
+ u32 flags)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (!(flags & PTE_READ_ONLY))
@@ -296,9 +288,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -309,9 +301,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -328,10 +320,10 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-static int __setup_page_dma(struct drm_device *dev,
+static int __setup_page_dma(struct drm_i915_private *dev_priv,
struct i915_page_dma *p, gfp_t flags)
{
- struct device *kdev = &dev->pdev->dev;
+ struct device *kdev = &dev_priv->drm.pdev->dev;
p->page = alloc_page(flags);
if (!p->page)
@@ -348,14 +340,16 @@ static int __setup_page_dma(struct drm_device *dev,
return 0;
}
-static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static int setup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- return __setup_page_dma(dev, p, I915_GFP_DMA);
+ return __setup_page_dma(dev_priv, p, I915_GFP_DMA);
}
-static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static void cleanup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
if (WARN_ON(!p->page))
return;
@@ -373,27 +367,29 @@ static void *kmap_page_dma(struct i915_page_dma *p)
/* We use the flushing unmap only with ppgtt structures:
* page directories, page tables and scratch pages.
*/
-static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
+static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
{
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
}
#define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+#define kunmap_px(ppgtt, vaddr) \
+ kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
-#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
-#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
-#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
-#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
+#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
+#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
+#define fill32_px(dev_priv, px, v) \
+ fill_page_dma_32((dev_priv), px_base(px), (v))
-static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
- const uint64_t val)
+static void fill_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint64_t val)
{
int i;
uint64_t * const vaddr = kmap_page_dma(p);
@@ -401,38 +397,37 @@ static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
for (i = 0; i < 512; i++)
vaddr[i] = val;
- kunmap_page_dma(dev, vaddr);
+ kunmap_page_dma(dev_priv, vaddr);
}
-static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
- const uint32_t val32)
+static void fill_page_dma_32(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint32_t val32)
{
uint64_t v = val32;
v = v << 32 | val32;
- fill_page_dma(dev, p, v);
+ fill_page_dma(dev_priv, p, v);
}
static int
-setup_scratch_page(struct drm_device *dev,
+setup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch,
gfp_t gfp)
{
- return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
+ return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO);
}
-static void cleanup_scratch_page(struct drm_device *dev,
+static void cleanup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch)
{
- cleanup_page_dma(dev, scratch);
+ cleanup_page_dma(dev_priv, scratch);
}
-static struct i915_page_table *alloc_pt(struct drm_device *dev)
+static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv)
{
struct i915_page_table *pt;
- const size_t count = INTEL_INFO(dev)->gen >= 8 ?
- GEN8_PTES : GEN6_PTES;
+ const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES;
int ret = -ENOMEM;
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
@@ -445,7 +440,7 @@ static struct i915_page_table *alloc_pt(struct drm_device *dev)
if (!pt->used_ptes)
goto fail_bitmap;
- ret = setup_px(dev, pt);
+ ret = setup_px(dev_priv, pt);
if (ret)
goto fail_page_m;
@@ -459,9 +454,10 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
+static void free_pt(struct drm_i915_private *dev_priv,
+ struct i915_page_table *pt)
{
- cleanup_px(dev, pt);
+ cleanup_px(dev_priv, pt);
kfree(pt->used_ptes);
kfree(pt);
}
@@ -472,9 +468,9 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
gen8_pte_t scratch_pte;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC);
- fill_px(vm->dev, pt, scratch_pte);
+ fill_px(to_i915(vm->dev), pt, scratch_pte);
}
static void gen6_initialize_pt(struct i915_address_space *vm,
@@ -485,12 +481,12 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
WARN_ON(vm->scratch_page.daddr == 0);
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
- fill32_px(vm->dev, pt, scratch_pte);
+ fill32_px(to_i915(vm->dev), pt, scratch_pte);
}
-static struct i915_page_directory *alloc_pd(struct drm_device *dev)
+static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
{
struct i915_page_directory *pd;
int ret = -ENOMEM;
@@ -504,7 +500,7 @@ static struct i915_page_directory *alloc_pd(struct drm_device *dev)
if (!pd->used_pdes)
goto fail_bitmap;
- ret = setup_px(dev, pd);
+ ret = setup_px(dev_priv, pd);
if (ret)
goto fail_page_m;
@@ -518,10 +514,11 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+static void free_pd(struct drm_i915_private *dev_priv,
+ struct i915_page_directory *pd)
{
if (px_page(pd)) {
- cleanup_px(dev, pd);
+ cleanup_px(dev_priv, pd);
kfree(pd->used_pdes);
kfree(pd);
}
@@ -534,13 +531,13 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
- fill_px(vm->dev, pd, scratch_pde);
+ fill_px(to_i915(vm->dev), pd, scratch_pde);
}
-static int __pdp_init(struct drm_device *dev,
+static int __pdp_init(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
- size_t pdpes = I915_PDPES_PER_PDP(dev);
+ size_t pdpes = I915_PDPES_PER_PDP(dev_priv);
pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
sizeof(unsigned long),
@@ -569,22 +566,22 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
}
static struct
-i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
+i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
return ERR_PTR(-ENOMEM);
- ret = __pdp_init(dev, pdp);
+ ret = __pdp_init(dev_priv, pdp);
if (ret)
goto fail_bitmap;
- ret = setup_px(dev, pdp);
+ ret = setup_px(dev_priv, pdp);
if (ret)
goto fail_page_m;
@@ -598,12 +595,12 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pdp(struct drm_device *dev,
+static void free_pdp(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
__pdp_fini(pdp);
- if (USES_FULL_48BIT_PPGTT(dev)) {
- cleanup_px(dev, pdp);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ cleanup_px(dev_priv, pdp);
kfree(pdp);
}
}
@@ -615,7 +612,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- fill_px(vm->dev, pdp, scratch_pdpe);
+ fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
}
static void gen8_initialize_pml4(struct i915_address_space *vm,
@@ -626,7 +623,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
I915_CACHE_LLC);
- fill_px(vm->dev, pml4, scratch_pml4e);
+ fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
}
static void
@@ -637,7 +634,7 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pdpe_t *page_directorypo;
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+ if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
return;
page_directorypo = kmap_px(pdp);
@@ -653,7 +650,7 @@ gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
- WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_px(ppgtt, pagemap);
}
@@ -706,85 +703,172 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
- uint64_t start,
- uint64_t length,
- gen8_pte_t scratch_pte)
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+ ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask;
+}
+
+/* Removes entries from a single page table, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries.
+ */
+static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ uint64_t start,
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ unsigned int num_entries = gen8_pte_count(start, length);
+ unsigned int pte = gen8_pte_index(start);
+ unsigned int pte_end = pte + num_entries;
gen8_pte_t *pt_vaddr;
- unsigned pdpe = gen8_pdpe_index(start);
- unsigned pde = gen8_pde_index(start);
- unsigned pte = gen8_pte_index(start);
- unsigned num_entries = length >> PAGE_SHIFT;
- unsigned last_pte, i;
+ gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_LLC);
- if (WARN_ON(!pdp))
- return;
+ if (WARN_ON(!px_page(pt)))
+ return false;
- while (num_entries) {
- struct i915_page_directory *pd;
- struct i915_page_table *pt;
+ GEM_BUG_ON(pte_end > GEN8_PTES);
- if (WARN_ON(!pdp->page_directory[pdpe]))
- break;
+ bitmap_clear(pt->used_ptes, pte, num_entries);
+
+ if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
+ free_pt(to_i915(vm->dev), pt);
+ return true;
+ }
- pd = pdp->page_directory[pdpe];
+ pt_vaddr = kmap_px(pt);
+ while (pte < pte_end)
+ pt_vaddr[pte++] = scratch_pte;
+
+ kunmap_px(ppgtt, pt_vaddr);
+
+ return false;
+}
+
+/* Removes entries from a single page dir, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ uint64_t start,
+ uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_table *pt;
+ uint64_t pde;
+ gen8_pde_t *pde_vaddr;
+ gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
+ I915_CACHE_LLC);
+
+ gen8_for_each_pde(pt, pd, start, length, pde) {
if (WARN_ON(!pd->page_table[pde]))
break;
- pt = pd->page_table[pde];
+ if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
+ __clear_bit(pde, pd->used_pdes);
+ pde_vaddr = kmap_px(pd);
+ pde_vaddr[pde] = scratch_pde;
+ kunmap_px(ppgtt, pde_vaddr);
+ }
+ }
- if (WARN_ON(!px_page(pt)))
- break;
+ if (bitmap_empty(pd->used_pdes, I915_PDES)) {
+ free_pd(to_i915(vm->dev), pd);
+ return true;
+ }
- last_pte = pte + num_entries;
- if (last_pte > GEN8_PTES)
- last_pte = GEN8_PTES;
+ return false;
+}
- pt_vaddr = kmap_px(pt);
+/* Removes entries from a single page dir pointer, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+ struct i915_page_directory_pointer *pdp,
+ uint64_t start,
+ uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_page_directory *pd;
+ uint64_t pdpe;
+ gen8_ppgtt_pdpe_t *pdpe_vaddr;
+ gen8_ppgtt_pdpe_t scratch_pdpe =
+ gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- for (i = pte; i < last_pte; i++) {
- pt_vaddr[i] = scratch_pte;
- num_entries--;
+ gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+ if (WARN_ON(!pdp->page_directory[pdpe]))
+ break;
+
+ if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
+ __clear_bit(pdpe, pdp->used_pdpes);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ pdpe_vaddr = kmap_px(pdp);
+ pdpe_vaddr[pdpe] = scratch_pdpe;
+ kunmap_px(ppgtt, pdpe_vaddr);
+ }
}
+ }
- kunmap_px(ppgtt, pt_vaddr);
+ mark_tlbs_dirty(ppgtt);
- pte = 0;
- if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
- break;
- pde = 0;
- }
+ if (USES_FULL_48BIT_PPGTT(dev_priv) &&
+ bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) {
+ free_pdp(dev_priv, pdp);
+ return true;
}
+
+ return false;
}
-static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+/* Removes entries from a single pml4.
+ * This is the top-level structure in 4-level page tables used on gen8+.
+ * Empty entries are always scratch pml4e.
+ */
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, use_scratch);
+ struct i915_page_directory_pointer *pdp;
+ uint64_t pml4e;
+ gen8_ppgtt_pml4e_t *pml4e_vaddr;
+ gen8_ppgtt_pml4e_t scratch_pml4e =
+ gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
- gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
- scratch_pte);
- } else {
- uint64_t pml4e;
- struct i915_page_directory_pointer *pdp;
+ GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev)));
- gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
- gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
- scratch_pte);
+ gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+ if (WARN_ON(!pml4->pdps[pml4e]))
+ break;
+
+ if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
+ __clear_bit(pml4e, pml4->used_pml4es);
+ pml4e_vaddr = kmap_px(pml4);
+ pml4e_vaddr[pml4e] = scratch_pml4e;
+ kunmap_px(ppgtt, pml4e_vaddr);
}
}
}
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
+ gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
+ else
+ gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
+}
+
static void
gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
@@ -809,12 +893,12 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
- cache_level, true);
+ cache_level);
if (++pte == GEN8_PTES) {
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+ if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev)))
break;
pde = 0;
}
@@ -837,7 +921,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level);
} else {
@@ -852,7 +936,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct drm_device *dev,
+static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
struct i915_page_directory *pd)
{
int i;
@@ -864,34 +948,34 @@ static void gen8_free_page_tables(struct drm_device *dev,
if (WARN_ON(!pd->page_table[i]))
continue;
- free_pt(dev, pd->page_table[i]);
+ free_pt(dev_priv, pd->page_table[i]);
pd->page_table[i] = NULL;
}
}
static int gen8_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
goto free_scratch_page;
}
- vm->scratch_pd = alloc_pd(dev);
+ vm->scratch_pd = alloc_pd(dev_priv);
if (IS_ERR(vm->scratch_pd)) {
ret = PTR_ERR(vm->scratch_pd);
goto free_pt;
}
- if (USES_FULL_48BIT_PPGTT(dev)) {
- vm->scratch_pdp = alloc_pdp(dev);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ vm->scratch_pdp = alloc_pdp(dev_priv);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
goto free_pd;
@@ -900,17 +984,17 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
- if (USES_FULL_48BIT_PPGTT(dev))
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
free_pd:
- free_pd(dev, vm->scratch_pd);
+ free_pd(dev_priv, vm->scratch_pd);
free_pt:
- free_pt(dev, vm->scratch_pt);
+ free_pt(dev_priv, vm->scratch_pt);
free_scratch_page:
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return ret;
}
@@ -948,54 +1032,56 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- if (USES_FULL_48BIT_PPGTT(dev))
- free_pdp(dev, vm->scratch_pdp);
- free_pd(dev, vm->scratch_pd);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
+ free_pdp(dev_priv, vm->scratch_pdp);
+ free_pd(dev_priv, vm->scratch_pd);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
-static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
+static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
int i;
- for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
+ for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) {
if (WARN_ON(!pdp->page_directory[i]))
continue;
- gen8_free_page_tables(dev, pdp->page_directory[i]);
- free_pd(dev, pdp->page_directory[i]);
+ gen8_free_page_tables(dev_priv, pdp->page_directory[i]);
+ free_pd(dev_priv, pdp->page_directory[i]);
}
- free_pdp(dev, pdp);
+ free_pdp(dev_priv, pdp);
}
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int i;
for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
if (WARN_ON(!ppgtt->pml4.pdps[i]))
continue;
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]);
}
- cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
+ cleanup_px(dev_priv, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(to_i915(vm->dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
+ if (!USES_FULL_48BIT_PPGTT(dev_priv))
+ gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp);
else
gen8_ppgtt_cleanup_4lvl(ppgtt);
@@ -1026,7 +1112,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pts)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -1038,7 +1124,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
continue;
}
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt))
goto unwind_out;
@@ -1052,7 +1138,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pde, new_pts, I915_PDES)
- free_pt(dev, pd->page_table[pde]);
+ free_pt(dev_priv, pd->page_table[pde]);
return -ENOMEM;
}
@@ -1087,10 +1173,10 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pds)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
WARN_ON(!bitmap_empty(new_pds, pdpes));
@@ -1098,7 +1184,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
if (test_bit(pdpe, pdp->used_pdpes))
continue;
- pd = alloc_pd(dev);
+ pd = alloc_pd(dev_priv);
if (IS_ERR(pd))
goto unwind_out;
@@ -1112,7 +1198,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pdpe, new_pds, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
return -ENOMEM;
}
@@ -1140,7 +1226,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pdps)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory_pointer *pdp;
uint32_t pml4e;
@@ -1148,7 +1234,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
- pdp = alloc_pdp(dev);
+ pdp = alloc_pdp(dev_priv);
if (IS_ERR(pdp))
goto unwind_out;
@@ -1166,7 +1252,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- free_pdp(dev, pml4->pdps[pml4e]);
+ free_pdp(dev_priv, pml4->pdps[pml4e]);
return -ENOMEM;
}
@@ -1208,16 +1294,6 @@ err_out:
return -ENOMEM;
}
-/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
@@ -1225,12 +1301,12 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
int ret;
/* Wrap is never okay since we can only represent 48b, and we don't
@@ -1318,11 +1394,12 @@ err_out:
for_each_set_bit(temp, new_page_tables + pdpe *
BITS_TO_LONGS(I915_PDES), I915_PDES)
- free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
+ free_pt(dev_priv,
+ pdp->page_directory[pdpe]->page_table[temp]);
}
for_each_set_bit(pdpe, new_page_dirs, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
mark_tlbs_dirty(ppgtt);
@@ -1373,7 +1450,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
err_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
+ gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]);
return ret;
}
@@ -1383,7 +1460,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(vm->dev))
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
else
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
@@ -1452,9 +1529,9 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint64_t start = ppgtt->base.start;
uint64_t length = ppgtt->base.total;
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t pml4e;
@@ -1474,7 +1551,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
unsigned long *new_page_dirs, *new_page_tables;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev));
int ret;
/* We allocate temp bitmap for page tables for no gain
@@ -1507,6 +1584,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int ret;
ret = gen8_init_scratch(&ppgtt->base);
@@ -1522,8 +1600,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.bind_vma = ppgtt_bind_vma;
ppgtt->debug_dump = gen8_dump_ppgtt;
- if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
- ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ ret = setup_px(dev_priv, &ppgtt->pml4);
if (ret)
goto free_scratch;
@@ -1532,7 +1610,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.total = 1ULL << 48;
ppgtt->switch_mm = gen8_48b_mm_switch;
} else {
- ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
+ ret = __pdp_init(dev_priv, &ppgtt->pdp);
if (ret)
goto free_scratch;
@@ -1542,14 +1620,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0,
GEN8_PML4E_SHIFT);
- if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
+ if (intel_vgpu_active(dev_priv)) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
}
- if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, true);
return 0;
@@ -1569,7 +1647,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
u32 expected;
@@ -1724,29 +1802,30 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-static void gen8_ppgtt_enable(struct drm_device *dev)
+static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
- u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
+ for_each_engine(engine, dev_priv, id) {
+ u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
-static void gen7_ppgtt_enable(struct drm_device *dev)
+static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
+ enum intel_engine_id id;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
ecochk |= ECOCHK_PPGTT_WB_HSW;
} else {
ecochk |= ECOCHK_PPGTT_LLC_IVB;
@@ -1754,16 +1833,15 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
-static void gen6_ppgtt_enable(struct drm_device *dev)
+static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
@@ -1782,8 +1860,7 @@ static void gen6_ppgtt_enable(struct drm_device *dev)
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr, scratch_pte;
@@ -1794,7 +1871,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -1832,7 +1909,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
- vm->pte_encode(addr, cache_level, true, flags);
+ vm->pte_encode(addr, cache_level, flags);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
@@ -1850,8 +1927,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
uint64_t start_in, uint64_t length_in)
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
- struct drm_device *dev = vm->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
@@ -1881,7 +1957,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* We've already allocated a page table */
WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
goto unwind_out;
@@ -1929,7 +2005,7 @@ unwind_out:
struct i915_page_table *pt = ppgtt->pd.page_table[pde];
ppgtt->pd.page_table[pde] = vm->scratch_pt;
- free_pt(vm->dev, pt);
+ free_pt(dev_priv, pt);
}
mark_tlbs_dirty(ppgtt);
@@ -1938,16 +2014,16 @@ unwind_out:
static int gen6_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return PTR_ERR(vm->scratch_pt);
}
@@ -1958,17 +2034,17 @@ static int gen6_init_scratch(struct i915_address_space *vm)
static void gen6_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd = &ppgtt->pd;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -1976,7 +2052,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
gen6_for_all_pdes(pt, pd, pde)
if (pt != vm->scratch_pt)
- free_pt(dev, pt);
+ free_pt(dev_priv, pt);
gen6_free_scratch(vm);
}
@@ -1984,8 +2060,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool retried = false;
int ret;
@@ -2050,17 +2125,16 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
+ if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
ppgtt->switch_mm = gen6_mm_switch;
- else if (IS_HASWELL(dev))
+ else if (IS_HASWELL(dev_priv))
ppgtt->switch_mm = hsw_mm_switch;
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ppgtt->switch_mm = gen7_mm_switch;
else
BUG();
@@ -2111,8 +2185,10 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
}
static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv)
+ struct drm_i915_private *dev_priv,
+ const char *name)
{
+ i915_gem_timeline_init(dev_priv, &vm->timeline, name);
drm_mm_init(&vm->mm, vm->start, vm->total);
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
@@ -2120,44 +2196,50 @@ static void i915_address_space_init(struct i915_address_space *vm,
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
-static void gtt_write_workarounds(struct drm_device *dev)
+static void i915_address_space_fini(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ i915_gem_timeline_fini(&vm->timeline);
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+}
+static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
+{
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_SKYLAKE(dev))
+ else if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+ struct drm_i915_file_private *file_priv,
+ const char *name)
{
int ret;
ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret == 0) {
kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv);
+ i915_address_space_init(&ppgtt->base, dev_priv, name);
ppgtt->base.file = file_priv;
}
return ret;
}
-int i915_ppgtt_init_hw(struct drm_device *dev)
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
- gtt_write_workarounds(dev);
+ gtt_write_workarounds(dev_priv);
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
@@ -2165,24 +2247,25 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
if (i915.enable_execlists)
return 0;
- if (!USES_PPGTT(dev))
+ if (!USES_PPGTT(dev_priv))
return 0;
- if (IS_GEN6(dev))
- gen6_ppgtt_enable(dev);
- else if (IS_GEN7(dev))
- gen7_ppgtt_enable(dev);
- else if (INTEL_INFO(dev)->gen >= 8)
- gen8_ppgtt_enable(dev);
+ if (IS_GEN6(dev_priv))
+ gen6_ppgtt_enable(dev_priv);
+ else if (IS_GEN7(dev_priv))
+ gen7_ppgtt_enable(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_ppgtt_enable(dev_priv);
else
- MISSING_CASE(INTEL_INFO(dev)->gen);
+ MISSING_CASE(INTEL_GEN(dev_priv));
return 0;
}
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv)
+ struct drm_i915_file_private *fpriv,
+ const char *name)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
@@ -2191,7 +2274,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
+ ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
@@ -2202,7 +2285,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
return ppgtt;
}
-void i915_ppgtt_release(struct kref *kref)
+void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
@@ -2214,8 +2297,7 @@ void i915_ppgtt_release(struct kref *kref)
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
WARN_ON(!list_empty(&ppgtt->base.unbound_list));
- list_del(&ppgtt->base.global_link);
- drm_mm_takedown(&ppgtt->base.mm);
+ i915_address_space_fini(&ppgtt->base);
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
@@ -2239,11 +2321,12 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (INTEL_INFO(dev_priv)->gen < 6)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
@@ -2260,7 +2343,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
fault_reg & ~RING_FAULT_VALID);
}
}
- POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
+
+ /* Engine specific init may not have been done till this point. */
+ if (dev_priv->engine[RCS])
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
@@ -2273,33 +2359,32 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
}
}
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
*/
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return;
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
i915_ggtt_flush(dev_priv);
}
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- if (!dma_map_sg(&obj->base.dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return -ENOSPC;
+ if (dma_map_sg(&obj->base.dev->pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return 0;
- return 0;
+ return -ENOSPC;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -2317,16 +2402,11 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
+ gen8_set_pte(pte, gen8_pte_encode(addr, level));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2340,15 +2420,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gen8_pte_t __iomem *gtt_entries;
gen8_pte_t gtt_entry;
dma_addr_t addr;
- int rpm_atomic_seq;
int i = 0;
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = gen8_pte_encode(addr, level, true);
+ gtt_entry = gen8_pte_encode(addr, level);
gen8_set_pte(&gtt_entries[i++], gtt_entry);
}
@@ -2368,8 +2445,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
struct insert_entries {
@@ -2408,16 +2483,11 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- iowrite32(vm->pte_encode(addr, level, true, flags), pte);
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
/*
@@ -2437,15 +2507,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
gen6_pte_t __iomem *gtt_entries;
gen6_pte_t gtt_entry;
dma_addr_t addr;
- int rpm_atomic_seq;
int i = 0;
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = vm->pte_encode(addr, level, true, flags);
+ gtt_entry = vm->pte_encode(addr, level, flags);
iowrite32(gtt_entry, &gtt_entries[i++]);
}
@@ -2464,23 +2531,16 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void nop_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t start, uint64_t length)
{
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t start, uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -2488,9 +2548,6 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@@ -2498,21 +2555,16 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC,
- use_scratch);
+ I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -2520,9 +2572,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@@ -2530,13 +2579,11 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, use_scratch, 0);
+ I915_CACHE_LLC, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_page(struct i915_address_space *vm,
@@ -2545,16 +2592,10 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2562,40 +2603,25 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
-
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool unused)
+ uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
- unsigned first_entry = start >> PAGE_SHIFT;
- unsigned num_entries = length >> PAGE_SHIFT;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
- intel_gtt_clear_range(first_entry, num_entries);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+ intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0;
int ret;
@@ -2608,8 +2634,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
/*
* Without aliasing PPGTT there's no difference between
@@ -2625,6 +2653,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
u32 pte_flags;
int ret;
@@ -2639,14 +2668,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm,
vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
}
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt =
- to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->pages, vma->node.start,
cache_level, pte_flags);
@@ -2657,21 +2687,24 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size);
- if (vma->flags & I915_VMA_GLOBAL_BIND)
+ if (vma->flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm,
- vma->node.start, size,
- true);
+ vma->node.start, size);
+ intel_runtime_pm_put(i915);
+ }
if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
- vma->node.start, size,
- true);
+ vma->node.start, size);
}
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct device *kdev = &dev_priv->drm.pdev->dev;
@@ -2685,8 +2718,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
}
}
- dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
static void i915_gtt_color_adjust(struct drm_mm_node *node,
@@ -2717,6 +2749,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
*/
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long hole_start, hole_end;
+ struct i915_hw_ppgtt *ppgtt;
struct drm_mm_node *entry;
int ret;
@@ -2724,45 +2757,48 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
+ &ggtt->error_capture,
+ 4096, 0, -1,
+ 0, ggtt->mappable_end,
+ 0, 0);
+ if (ret)
+ return ret;
+
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start, true);
+ hole_end - hole_start);
}
/* And finally clear the reserved guard page */
ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
- true);
+ ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
- struct i915_hw_ppgtt *ppgtt;
-
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return -ENOMEM;
+ if (!ppgtt) {
+ ret = -ENOMEM;
+ goto err;
+ }
ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret) {
- kfree(ppgtt);
- return ret;
- }
+ if (ret)
+ goto err_ppgtt;
- if (ppgtt->base.allocate_va_range)
+ if (ppgtt->base.allocate_va_range) {
ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
ppgtt->base.total);
- if (ret) {
- ppgtt->base.cleanup(&ppgtt->base);
- kfree(ppgtt);
- return ret;
+ if (ret)
+ goto err_ppgtt_cleanup;
}
ppgtt->base.clear_range(&ppgtt->base,
ppgtt->base.start,
- ppgtt->base.total,
- true);
+ ppgtt->base.total);
dev_priv->mm.aliasing_ppgtt = ppgtt;
WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
@@ -2770,6 +2806,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
}
return 0;
+
+err_ppgtt_cleanup:
+ ppgtt->base.cleanup(&ppgtt->base);
+err_ppgtt:
+ kfree(ppgtt);
+err:
+ drm_mm_remove_node(&ggtt->error_capture);
+ return ret;
}
/**
@@ -2788,11 +2832,15 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
i915_gem_cleanup_stolen(&dev_priv->drm);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+
if (drm_mm_initialized(&ggtt->base.mm)) {
intel_vgt_deballoon(dev_priv);
- drm_mm_takedown(&ggtt->base.mm);
- list_del(&ggtt->base.global_link);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_address_space_fini(&ggtt->base);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
ggtt->base.cleanup(&ggtt->base);
@@ -2881,6 +2929,7 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
+ struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
struct pci_dev *pdev = ggtt->base.dev->pdev;
phys_addr_t phys_addr;
int ret;
@@ -2895,7 +2944,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(ggtt->base.dev))
+ if (IS_BROXTON(dev_priv))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -2904,9 +2953,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(ggtt->base.dev,
- &ggtt->base.scratch_page,
- GFP_DMA32);
+ ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -2995,7 +3042,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
- cleanup_scratch_page(vm->dev, &vm->scratch_page);
+ cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3190,11 +3237,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
/* Subtract the guard page before address space initialization to
* shrink the range used by drm_mm.
*/
+ mutex_lock(&dev_priv->drm.struct_mutex);
ggtt->base.total -= PAGE_SIZE;
- i915_address_space_init(&ggtt->base, dev_priv);
+ i915_address_space_init(&ggtt->base, dev_priv, "[global]");
ggtt->base.total += PAGE_SIZE;
if (!HAS_LLC(dev_priv))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
dev_priv->ggtt.mappable_base,
@@ -3209,7 +3258,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
- ret = i915_gem_init_stolen(&dev_priv->drm);
+ ret = i915_gem_init_stolen(dev_priv);
if (ret)
goto out_gtt_cleanup;
@@ -3228,23 +3277,21 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj, *on;
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.bound_list, global_list) {
+ &dev_priv->mm.bound_list, global_link) {
bool ggtt_bound = false;
struct i915_vma *vma;
@@ -3266,8 +3313,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
ggtt->base.closed = false;
- if (INTEL_INFO(dev)->gen >= 8) {
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (INTEL_GEN(dev_priv) >= 8) {
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
@@ -3275,7 +3322,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
return;
}
- if (USES_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv)) {
struct i915_address_space *vm;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
@@ -3296,137 +3343,28 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv);
}
-static void
-i915_vma_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *rq)
-{
- const unsigned int idx = rq->engine->id;
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_read[idx]);
-
- GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
- i915_vma_clear_active(vma, idx);
- if (i915_vma_is_active(vma))
- return;
-
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
- WARN_ON(i915_vma_unbind(vma));
-}
-
-void i915_vma_destroy(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(i915_vma_is_active(vma));
- GEM_BUG_ON(!i915_vma_is_closed(vma));
- GEM_BUG_ON(vma->fence);
-
- list_del(&vma->vm_link);
- if (!i915_vma_is_ggtt(vma))
- i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
-}
-
-void i915_vma_close(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_closed(vma));
- vma->flags |= I915_VMA_CLOSED;
-
- list_del_init(&vma->obj_link);
- if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
- WARN_ON(i915_vma_unbind(vma));
-}
-
-static struct i915_vma *
-__i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
- int i;
-
- GEM_BUG_ON(vm->closed);
-
- vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
- if (vma == NULL)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&vma->exec_list);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- init_request_active(&vma->last_read[i], i915_vma_retire);
- init_request_active(&vma->last_fence, NULL);
- list_add(&vma->vm_link, &vm->unbound_list);
- vma->vm = vm;
- vma->obj = obj;
- vma->size = obj->base.size;
-
- if (view) {
- vma->ggtt_view = *view;
- if (view->type == I915_GGTT_VIEW_PARTIAL) {
- vma->size = view->params.partial.size;
- vma->size <<= PAGE_SHIFT;
- } else if (view->type == I915_GGTT_VIEW_ROTATED) {
- vma->size =
- intel_rotation_info_size(&view->params.rotated);
- vma->size <<= PAGE_SHIFT;
- }
- }
-
- if (i915_is_ggtt(vm)) {
- vma->flags |= I915_VMA_GGTT;
- } else {
- i915_ppgtt_get(i915_vm_to_ppgtt(vm));
- }
-
- list_add_tail(&vma->obj_link, &obj->vma_list);
- return vma;
-}
-
-static inline bool vma_matches(struct i915_vma *vma,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- if (vma->vm != vm)
- return false;
-
- if (!i915_vma_is_ggtt(vma))
- return true;
-
- if (!view)
- return vma->ggtt_view.type == 0;
-
- if (vma->ggtt_view.type != view->type)
- return false;
-
- return memcmp(&vma->ggtt_view.params,
- &view->params,
- sizeof(view->params)) == 0;
-}
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
-
- return __i915_vma_create(obj, vm, view);
-}
-
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
- struct i915_vma *vma;
+ struct rb_node *rb;
- list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
- if (vma_matches(vma, vm, view))
+ rb = obj->vma_tree.rb_node;
+ while (rb) {
+ struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
+ long cmp;
+
+ cmp = i915_vma_compare(vma, vm, view);
+ if (cmp == 0)
return vma;
+ if (cmp < 0)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+
return NULL;
}
@@ -3437,11 +3375,14 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
vma = i915_gem_obj_to_vma(obj, vm, view);
- if (!vma)
- vma = __i915_vma_create(obj, vm, view);
+ if (!vma) {
+ vma = i915_vma_create(obj, vm, view);
+ GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
+ }
GEM_BUG_ON(i915_vma_is_closed(vma));
return vma;
@@ -3507,7 +3448,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */
i = 0;
- for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+ for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
page_addr_list[i++] = dma_addr;
GEM_BUG_ON(i != n_pages);
@@ -3543,35 +3484,47 @@ intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
- struct scatterlist *sg;
- struct sg_page_iter obj_sg_iter;
+ struct scatterlist *sg, *iter;
+ unsigned int count = view->params.partial.size;
+ unsigned int offset;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
- ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
+ ret = sg_alloc_table(st, count, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
+ iter = i915_gem_object_get_sg(obj,
+ view->params.partial.offset,
+ &offset);
+ GEM_BUG_ON(!iter);
+
sg = st->sgl;
st->nents = 0;
- for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
- view->params.partial.offset)
- {
- if (st->nents >= view->params.partial.size)
- break;
+ do {
+ unsigned int len;
- sg_set_page(sg, NULL, PAGE_SIZE, 0);
- sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
- sg_dma_len(sg) = PAGE_SIZE;
+ len = min(iter->length - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
- sg = sg_next(sg);
st->nents++;
- }
+ count -= len >> PAGE_SHIFT;
+ if (count == 0) {
+ sg_mark_end(sg);
+ return st;
+ }
- return st;
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
err_sg_alloc:
kfree(st);
@@ -3584,11 +3537,18 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
int ret = 0;
+ /* The vma->pages are only valid within the lifespan of the borrowed
+ * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+ * must be the vma->pages. A simple rule is that vma->pages must only
+ * be accessed when the obj->mm.pages are pinned.
+ */
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
if (vma->pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
@@ -3612,94 +3572,3 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return ret;
}
-/**
- * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
- * @vma: VMA to map
- * @cache_level: mapping cache level
- * @flags: flags like global or local mapping
- *
- * DMA addresses are taken from the scatter-gather table of this object (or of
- * this VMA in case of non-default GGTT views) and PTE entries set up.
- * Note that DMA addresses are also the only part of the SG table we care about.
- */
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags)
-{
- u32 bind_flags;
- u32 vma_flags;
- int ret;
-
- if (WARN_ON(flags == 0))
- return -EINVAL;
-
- bind_flags = 0;
- if (flags & PIN_GLOBAL)
- bind_flags |= I915_VMA_GLOBAL_BIND;
- if (flags & PIN_USER)
- bind_flags |= I915_VMA_LOCAL_BIND;
-
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
- if (flags & PIN_UPDATE)
- bind_flags |= vma_flags;
- else
- bind_flags &= ~vma_flags;
- if (bind_flags == 0)
- return 0;
-
- if (vma_flags == 0 && vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma);
- ret = vma->vm->allocate_va_range(vma->vm,
- vma->node.start,
- vma->node.size);
- if (ret)
- return ret;
- }
-
- ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
- if (ret)
- return ret;
-
- vma->flags |= bind_flags;
- return 0;
-}
-
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
-{
- void __iomem *ptr;
-
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
-
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
- return IO_ERR_PTR(-ENODEV);
-
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
-
- ptr = vma->iomap;
- if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
- vma->node.start,
- vma->node.size);
- if (ptr == NULL)
- return IO_ERR_PTR(-ENOMEM);
-
- vma->iomap = ptr;
- }
-
- __i915_vma_pin(vma);
- return ptr;
-}
-
-void i915_vma_unpin_and_release(struct i915_vma **p_vma)
-{
- struct i915_vma *vma;
-
- vma = fetch_and_zero(p_vma);
- if (!vma)
- return;
-
- i915_vma_unpin(vma);
- i915_vma_put(vma);
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index ec78be2f8c77..4f35be4c26c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -35,7 +35,9 @@
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
+#include <linux/mm.h>
+#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
#define I915_FENCE_REG_NONE -1
@@ -118,8 +120,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_LEGACY_PDPES 4
#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
-#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
- GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
+#define I915_PDPES_PER_PDP(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
+ GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -138,6 +140,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+struct sg_table;
+
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED,
@@ -168,133 +172,7 @@ extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
- struct drm_i915_fence_reg *fence;
- struct sg_table *pages;
- void __iomem *iomap;
- u64 size;
- u64 display_alignment;
-
- unsigned int flags;
- /**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, execbuffer
- * (objects are not allowed multiple times for the same batchbuffer),
- * and the framebuffer code. When switching/pageflipping, the
- * framebuffer code has at most two buffers pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits.
- */
-#define I915_VMA_PIN_MASK 0xf
-#define I915_VMA_PIN_OVERFLOW BIT(5)
-
- /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(6)
-#define I915_VMA_LOCAL_BIND BIT(7)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
-
-#define I915_VMA_GGTT BIT(8)
-#define I915_VMA_CAN_FENCE BIT(9)
-#define I915_VMA_CLOSED BIT(10)
-
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_fence;
-
- /**
- * Support different GGTT views into the same object.
- * This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
- * assumed in GEM functions which take no ggtt view parameter.
- */
- struct i915_ggtt_view ggtt_view;
-
- /** This object's place on the active/inactive lists */
- struct list_head vm_link;
-
- struct list_head obj_link; /* Link in the object's VMA list */
-
- /** This vma's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
-};
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-void i915_vma_unpin_and_release(struct i915_vma **p_vma);
-
-static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_GGTT;
-}
-
-static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CAN_FENCE;
-}
-
-static inline bool i915_vma_is_closed(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CLOSED;
-}
-
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
- return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
- return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
- unsigned int engine)
-{
- return vma->active & BIT(engine);
-}
-
-static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
- GEM_BUG_ON(upper_32_bits(vma->node.start));
- GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
- return lower_32_bits(vma->node.start);
-}
+struct i915_vma;
struct i915_page_dma {
struct page *page;
@@ -341,6 +219,7 @@ struct i915_pml4 {
struct i915_address_space {
struct drm_mm mm;
+ struct i915_gem_timeline timeline;
struct drm_device *dev;
/* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In
@@ -395,7 +274,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 flags); /* Create a valid PTE */
+ u32 flags); /* Create a valid PTE */
/* flags for pte_encode */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
@@ -403,8 +282,7 @@ struct i915_address_space {
uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch);
+ uint64_t length);
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
@@ -450,6 +328,8 @@ struct i915_ggtt {
bool do_idle_maps;
int mtrr;
+
+ struct drm_mm_node error_capture;
};
struct i915_hw_ppgtt {
@@ -602,16 +482,24 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd);
}
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, base);
+}
+
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
-int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv);
+ struct drm_i915_file_private *fpriv,
+ const char *name);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -624,11 +512,13 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
}
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
-void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)
@@ -646,88 +536,4 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
#define PIN_OFFSET_FIXED BIT(11)
#define PIN_OFFSET_MASK (~4095)
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
- return 0;
-
- return __i915_vma_do_pin(vma, size, alignment, flags);
-}
-
-static inline int i915_vma_pin_count(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_PIN_MASK;
-}
-
-static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
-{
- return i915_vma_pin_count(vma);
-}
-
-static inline void __i915_vma_pin(struct i915_vma *vma)
-{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
-}
-
-static inline void __i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
- vma->flags--;
-}
-
-static inline void i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- __i915_vma_unpin(vma);
-}
-
-/**
- * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
- * @vma: VMA to iomap
- *
- * The passed in VMA has to be pinned in the global GTT mappable region.
- * An extra pinning of the VMA is acquired for the return iomapping,
- * the caller must call i915_vma_unpin_iomap to relinquish the pinning
- * after the iomapping is no longer required.
- *
- * Callers must hold the struct_mutex.
- *
- * Returns a valid iomapped pointer or ERR_PTR.
- */
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
-#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
-
-/**
- * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
- * @vma: VMA to unpin
- *
- * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
- *
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
- */
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- GEM_BUG_ON(vma->iomap == NULL);
- i915_vma_unpin(vma);
-}
-
-static inline struct page *i915_vma_first_page(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
- return sg_page(vma->pages->sgl);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
new file mode 100644
index 000000000000..4b3ff3e5b911
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
+
+/* convert swiotlb segment size into sensible units (pages)! */
+#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
+
+static void internal_free_pages(struct sg_table *st)
+{
+ struct scatterlist *sg;
+
+ for (sg = st->sgl; sg; sg = __sg_next(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+
+ sg_free_table(st);
+ kfree(st);
+}
+
+static struct sg_table *
+i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int npages = obj->base.size / PAGE_SIZE;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int max_order;
+ gfp_t gfp;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = st->sgl;
+ st->nents = 0;
+
+ max_order = MAX_ORDER;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
+ max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
+#endif
+
+ gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ gfp &= ~__GFP_HIGHMEM;
+ gfp |= __GFP_DMA32;
+ }
+
+ do {
+ int order = min(fls(npages) - 1, max_order);
+ struct page *page;
+
+ do {
+ page = alloc_pages(gfp | (order ? QUIET : 0), order);
+ if (page)
+ break;
+ if (!order--)
+ goto err;
+
+ /* Limit subsequent allocations as well */
+ max_order = order;
+ } while (1);
+
+ sg_set_page(sg, page, PAGE_SIZE << order, 0);
+ st->nents++;
+
+ npages -= 1 << order;
+ if (!npages) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = __sg_next(sg);
+ } while (1);
+
+ if (i915_gem_gtt_prepare_pages(obj, st))
+ goto err;
+
+ /* Mark the pages as dontneed whilst they are still pinned. As soon
+ * as they are unpinned they are allowed to be reaped by the shrinker,
+ * and the caller is expected to repopulate - the contents of this
+ * object are only valid whilst active and pinned.
+ */
+ obj->mm.madv = I915_MADV_DONTNEED;
+ return st;
+
+err:
+ sg_mark_end(sg);
+ internal_free_pages(st);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_gem_gtt_finish_pages(obj, pages);
+ internal_free_pages(pages);
+
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = i915_gem_object_get_pages_internal,
+ .put_pages = i915_gem_object_put_pages_internal,
+};
+
+/**
+ * Creates a new object that wraps some internal memory for private use.
+ * This object is not backed by swappable storage, and as such its contents
+ * are volatile and only valid whilst pinned. If the object is reaped by the
+ * shrinker, its pages and data will be discarded. Equally, it is not a full
+ * GEM object and so not valid for access from userspace. This makes it useful
+ * for hardware interfaces like ringbuffers (which are pinned from the time
+ * the request is written to the time the hardware stops accessing it), but
+ * not for contexts (which need to be preserved when not active for later
+ * reuse). Note that it is not cleared upon allocation.
+ */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *i915,
+ unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(&i915->drm);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
new file mode 100644
index 000000000000..6a368de9d81e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
+
+#include <linux/reservation.h>
+
+#include <drm/drm_vma_manager.h>
+#include <drm/drm_gem.h>
+#include <drm/drmP.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_gem_object_ops {
+ unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
+
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
+
+ int (*dmabuf_export)(struct drm_i915_gem_object *);
+ void (*release)(struct drm_i915_gem_object *);
+};
+
+struct drm_i915_gem_object {
+ struct drm_gem_object base;
+
+ const struct drm_i915_gem_object_ops *ops;
+
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
+ struct rb_root vma_tree;
+
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
+ struct list_head global_link;
+ union {
+ struct rcu_head rcu;
+ struct llist_node freed;
+ };
+
+ /**
+ * Whether the object is currently in the GGTT mmap.
+ */
+ struct list_head userfault_link;
+
+ /** Used in execbuf to temporarily hold a ref */
+ struct list_head obj_exec_link;
+
+ struct list_head batch_pool_link;
+
+ unsigned long flags;
+
+ /**
+ * Have we taken a reference for the object for incomplete GPU
+ * activity?
+ */
+#define I915_BO_ACTIVE_REF 0
+
+ /*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+ unsigned int cache_level:3;
+ unsigned int cache_dirty:1;
+
+ atomic_t frontbuffer_bits;
+ unsigned int frontbuffer_ggtt_origin; /* write once */
+ struct i915_gem_active frontbuffer_write;
+
+ /** Current tiling stride for the object, if it's tiled. */
+ unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
+#define STRIDE_MASK (~TILING_MASK)
+
+ /** Count of VMA actually bound by this object */
+ unsigned int bind_count;
+ unsigned int active_count;
+ unsigned int pin_display;
+
+ struct {
+ struct mutex lock; /* protects the pages and their use */
+ atomic_t pages_pin_count;
+
+ struct sg_table *pages;
+ void *mapping;
+
+ struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+ } get_page;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * This is set if the object has been written to since the
+ * pages were last acquired.
+ */
+ bool dirty:1;
+
+ /**
+ * This is set if the object has been pinned due to unknown
+ * swizzling.
+ */
+ bool quirked:1;
+ } mm;
+
+ /** Breadcrumb of last rendering to the buffer.
+ * There can only be one writer, but we allow for multiple readers.
+ * If there is a writer that necessarily implies that all other
+ * read requests are complete - but we may only be lazily clearing
+ * the read requests. A read request is naturally the most recent
+ * request on a ring, so we may have two different write and read
+ * requests on one ring where the write request is older than the
+ * read request. This allows for the CPU to read from an active
+ * buffer by only waiting for the write to complete.
+ */
+ struct reservation_object *resv;
+
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned long framebuffer_references;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ /** for phys allocated objects */
+ struct drm_dma_handle *phys_handle;
+
+ struct reservation_object __builtin_resv;
+};
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+ /* Assert that to_intel_bo(NULL) == NULL */
+ BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+ return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+/**
+ * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
+ * @filp: DRM file private date
+ * @handle: userspace handle
+ *
+ * Returns:
+ *
+ * A pointer to the object named by the handle if such exists on @filp, NULL
+ * otherwise. This object is only valid whilst under the RCU read lock, and
+ * note carefully the object may be in the process of being destroyed.
+ */
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
+#endif
+ return idr_find(&file->object_idr, handle);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_i915_gem_object *obj;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, handle);
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+ rcu_read_unlock();
+
+ return obj;
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_reference(&obj->base);
+ return obj;
+}
+
+__deprecated
+extern void drm_gem_object_reference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+ __drm_gem_object_unreference(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference(struct drm_gem_object *);
+
+__deprecated
+extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
+
+static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+static inline bool
+i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+ return obj->active_count;
+}
+
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
+static inline unsigned int
+i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = reservation_object_get_excl_rcu(obj->resv);
+ rcu_read_unlock();
+
+ if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+ engine = to_request(fence)->engine;
+ dma_fence_put(fence);
+
+ return engine;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 95b7e9afd5f8..5af19b0bf713 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,17 +28,19 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-struct render_state {
+struct intel_render_state {
const struct intel_renderstate_rodata *rodata;
struct i915_vma *vma;
- u32 aux_batch_size;
- u32 aux_batch_offset;
+ u32 batch_offset;
+ u32 batch_size;
+ u32 aux_offset;
+ u32 aux_size;
};
static const struct intel_renderstate_rodata *
-render_state_get_rodata(const struct drm_i915_gem_request *req)
+render_state_get_rodata(const struct intel_engine_cs *engine)
{
- switch (INTEL_GEN(req->i915)) {
+ switch (INTEL_GEN(engine->i915)) {
case 6:
return &gen6_null_state;
case 7:
@@ -63,29 +65,26 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
*/
#define OUT_BATCH(batch, i, val) \
do { \
- if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) { \
- ret = -ENOSPC; \
- goto err_out; \
- } \
+ if ((i) >= PAGE_SIZE / sizeof(u32)) \
+ goto err; \
(batch)[(i)++] = (val); \
} while(0)
-static int render_state_setup(struct render_state *so)
+static int render_state_setup(struct intel_render_state *so,
+ struct drm_i915_private *i915)
{
- struct drm_device *dev = so->vma->vm->dev;
const struct intel_renderstate_rodata *rodata = so->rodata;
- const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
+ struct drm_i915_gem_object *obj = so->vma->obj;
unsigned int i = 0, reloc_index = 0;
- struct page *page;
+ unsigned int needs_clflush;
u32 *d;
int ret;
- ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
+ ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
if (ret)
return ret;
- page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
- d = kmap(page);
+ d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -93,12 +92,10 @@ static int render_state_setup(struct render_state *so)
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->vma->node.start;
s = lower_32_bits(r);
- if (has_64bit_reloc) {
+ if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
- rodata->batch[i + 1] != 0) {
- ret = -EINVAL;
- goto err_out;
- }
+ rodata->batch[i + 1] != 0)
+ goto err;
d[i++] = s;
s = upper_32_bits(r);
@@ -110,12 +107,20 @@ static int render_state_setup(struct render_state *so)
d[i++] = s;
}
+ if (rodata->reloc[reloc_index] != -1) {
+ DRM_ERROR("only %d relocs resolved\n", reloc_index);
+ goto err;
+ }
+
+ so->batch_offset = so->vma->node.start;
+ so->batch_size = rodata->batch_items * sizeof(u32);
+
while (i % CACHELINE_DWORDS)
OUT_BATCH(d, i, MI_NOOP);
- so->aux_batch_offset = i * sizeof(u32);
+ so->aux_offset = i * sizeof(u32);
- if (HAS_POOLED_EU(dev)) {
+ if (HAS_POOLED_EU(i915)) {
/*
* We always program 3x6 pool config but depending upon which
* subslice is disabled HW drops down to appropriate config
@@ -143,88 +148,133 @@ static int render_state_setup(struct render_state *so)
}
OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
- so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
-
+ so->aux_size = i * sizeof(u32) - so->aux_offset;
+ so->aux_offset += so->batch_offset;
/*
* Since we are sending length, we need to strictly conform to
* all requirements. For Gen2 this must be a multiple of 8.
*/
- so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
-
- kunmap(page);
-
- ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
- if (ret)
- return ret;
-
- if (rodata->reloc[reloc_index] != -1) {
- DRM_ERROR("only %d relocs resolved\n", reloc_index);
- return -EINVAL;
- }
+ so->aux_size = ALIGN(so->aux_size, 8);
- return 0;
+ if (needs_clflush)
+ drm_clflush_virt_range(d, i * sizeof(u32));
+ kunmap_atomic(d);
-err_out:
- kunmap(page);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+out:
+ i915_gem_obj_finish_shmem_access(obj);
return ret;
+
+err:
+ kunmap_atomic(d);
+ ret = -EINVAL;
+ goto out;
}
#undef OUT_BATCH
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
+int i915_gem_render_state_init(struct intel_engine_cs *engine)
{
- struct render_state so;
+ struct intel_render_state *so;
+ const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
int ret;
- if (WARN_ON(req->engine->id != RCS))
- return -ENOENT;
+ if (engine->id != RCS)
+ return 0;
- so.rodata = render_state_get_rodata(req);
- if (!so.rodata)
+ rodata = render_state_get_rodata(engine);
+ if (!rodata)
return 0;
- if (so.rodata->batch_items * 4 > 4096)
+ if (rodata->batch_items * 4 > 4096)
return -EINVAL;
- obj = i915_gem_object_create(&req->i915->drm, 4096);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ so = kmalloc(sizeof(*so), GFP_KERNEL);
+ if (!so)
+ return -ENOMEM;
- so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
- if (IS_ERR(so.vma)) {
- ret = PTR_ERR(so.vma);
- goto err_obj;
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto err_free;
}
- ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
- if (ret)
+ so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(so->vma)) {
+ ret = PTR_ERR(so->vma);
goto err_obj;
+ }
+
+ so->rodata = rodata;
+ engine->render_state = so;
+ return 0;
+
+err_obj:
+ i915_gem_object_put(obj);
+err_free:
+ kfree(so);
+ return ret;
+}
+
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
+{
+ struct intel_render_state *so;
+ int ret;
+
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
- ret = render_state_setup(&so);
+ so = req->engine->render_state;
+ if (!so)
+ return 0;
+
+ /* Recreate the page after shrinking */
+ if (!so->vma->obj->mm.pages)
+ so->batch_offset = -1;
+
+ ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
- goto err_unpin;
+ return ret;
+
+ if (so->vma->node.start != so->batch_offset) {
+ ret = render_state_setup(so, req->i915);
+ if (ret)
+ goto err_unpin;
+ }
- ret = req->engine->emit_bb_start(req, so.vma->node.start,
- so.rodata->batch_items * 4,
+ ret = req->engine->emit_bb_start(req,
+ so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
- if (so.aux_batch_size > 8) {
+ if (so->aux_size > 8) {
ret = req->engine->emit_bb_start(req,
- (so.vma->node.start +
- so.aux_batch_offset),
- so.aux_batch_size,
+ so->aux_offset, so->aux_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
}
- i915_vma_move_to_active(so.vma, req, 0);
+ i915_vma_move_to_active(so->vma, req, 0);
err_unpin:
- i915_vma_unpin(so.vma);
-err_obj:
- i915_gem_object_put(obj);
+ i915_vma_unpin(so->vma);
return ret;
}
+
+void i915_gem_render_state_fini(struct intel_engine_cs *engine)
+{
+ struct intel_render_state *so;
+ struct drm_i915_gem_object *obj;
+
+ so = fetch_and_zero(&engine->render_state);
+ if (!so)
+ return;
+
+ obj = so->vma->obj;
+
+ i915_vma_close(so->vma);
+ __i915_gem_object_release_unless_active(obj);
+
+ kfree(so);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 18cce3f06e9c..87481845799d 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -26,6 +26,8 @@
struct drm_i915_gem_request;
-int i915_gem_render_state_init(struct drm_i915_gem_request *req);
+int i915_gem_render_state_init(struct intel_engine_cs *engine);
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
+void i915_gem_render_state_fini(struct intel_engine_cs *engine);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8832f8ec1583..b8f403faadbb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -23,31 +23,26 @@
*/
#include <linux/prefetch.h>
+#include <linux/dma-fence-array.h>
#include "i915_drv.h"
-static const char *i915_fence_get_driver_name(struct fence *fence)
+static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
return "i915";
}
-static const char *i915_fence_get_timeline_name(struct fence *fence)
+static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
- /* Timelines are bound by eviction to a VM. However, since
- * we only have a global seqno at the moment, we only have
- * a single timeline. Note that each timeline will have
- * multiple execution contexts (fence contexts) as we allow
- * engines within a single timeline to execute in parallel.
- */
- return "global";
+ return to_request(fence)->timeline->common->name;
}
-static bool i915_fence_signaled(struct fence *fence)
+static bool i915_fence_signaled(struct dma_fence *fence)
{
return i915_gem_request_completed(to_request(fence));
}
-static bool i915_fence_enable_signaling(struct fence *fence)
+static bool i915_fence_enable_signaling(struct dma_fence *fence)
{
if (i915_fence_signaled(fence))
return false;
@@ -56,63 +51,27 @@ static bool i915_fence_enable_signaling(struct fence *fence)
return true;
}
-static signed long i915_fence_wait(struct fence *fence,
+static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
- signed long timeout_jiffies)
+ signed long timeout)
{
- s64 timeout_ns, *timeout;
- int ret;
-
- if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
- timeout_ns = jiffies_to_nsecs(timeout_jiffies);
- timeout = &timeout_ns;
- } else {
- timeout = NULL;
- }
-
- ret = i915_wait_request(to_request(fence),
- interruptible, timeout,
- NO_WAITBOOST);
- if (ret == -ETIME)
- return 0;
-
- if (ret < 0)
- return ret;
-
- if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
- timeout_jiffies = nsecs_to_jiffies(timeout_ns);
-
- return timeout_jiffies;
+ return i915_wait_request(to_request(fence), interruptible, timeout);
}
-static void i915_fence_value_str(struct fence *fence, char *str, int size)
-{
- snprintf(str, size, "%u", fence->seqno);
-}
-
-static void i915_fence_timeline_value_str(struct fence *fence, char *str,
- int size)
-{
- snprintf(str, size, "%u",
- intel_engine_get_seqno(to_request(fence)->engine));
-}
-
-static void i915_fence_release(struct fence *fence)
+static void i915_fence_release(struct dma_fence *fence)
{
struct drm_i915_gem_request *req = to_request(fence);
kmem_cache_free(req->i915->requests, req);
}
-const struct fence_ops i915_fence_ops = {
+const struct dma_fence_ops i915_fence_ops = {
.get_driver_name = i915_fence_get_driver_name,
.get_timeline_name = i915_fence_get_timeline_name,
.enable_signaling = i915_fence_enable_signaling,
.signaled = i915_fence_signaled,
.wait = i915_fence_wait,
.release = i915_fence_release,
- .fence_value_str = i915_fence_value_str,
- .timeline_value_str = i915_fence_timeline_value_str,
};
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@@ -154,6 +113,82 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+ return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+ struct i915_dependency *dep)
+{
+ kmem_cache_free(i915->dependencies, dep);
+}
+
+static void
+__i915_priotree_add_dependency(struct i915_priotree *pt,
+ struct i915_priotree *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&dep->dfs_link);
+ list_add(&dep->wait_link, &signal->waiters_list);
+ list_add(&dep->signal_link, &pt->signalers_list);
+ dep->signaler = signal;
+ dep->flags = flags;
+}
+
+static int
+i915_priotree_add_dependency(struct drm_i915_private *i915,
+ struct i915_priotree *pt,
+ struct i915_priotree *signal)
+{
+ struct i915_dependency *dep;
+
+ dep = i915_dependency_alloc(i915);
+ if (!dep)
+ return -ENOMEM;
+
+ __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
+ return 0;
+}
+
+static void
+i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
+{
+ struct i915_dependency *dep, *next;
+
+ GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));
+
+ /* Everyone we depended upon (the fences we wait to be signaled)
+ * should retire before us and remove themselves from our list.
+ * However, retirement is run independently on each timeline and
+ * so we may be called out-of-order.
+ */
+ list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
+ list_del(&dep->wait_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ /* Remove ourselves from everyone who depends upon us */
+ list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
+ list_del(&dep->signal_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+}
+
+static void
+i915_priotree_init(struct i915_priotree *pt)
+{
+ INIT_LIST_HEAD(&pt->signalers_list);
+ INIT_LIST_HEAD(&pt->waiters_list);
+ RB_CLEAR_NODE(&pt->node);
+ pt->priority = INT_MIN;
+}
+
void i915_gem_retire_noop(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
@@ -164,8 +199,17 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
struct i915_gem_active *active, *next;
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute));
+ GEM_BUG_ON(!i915_gem_request_completed(request));
+ GEM_BUG_ON(!request->i915->gt.active_requests);
+
trace_i915_gem_request_retire(request);
- list_del(&request->link);
+
+ spin_lock_irq(&request->engine->timeline->lock);
+ list_del_init(&request->link);
+ spin_unlock_irq(&request->engine->timeline->lock);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@@ -177,6 +221,12 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
*/
list_del(&request->ring_link);
request->ring->last_retired_head = request->postfix;
+ if (!--request->i915->gt.active_requests) {
+ GEM_BUG_ON(!request->i915->gt.awake);
+ mod_delayed_work(request->i915->wq,
+ &request->i915->gt.idle_work,
+ msecs_to_jiffies(100));
+ }
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@@ -214,6 +264,10 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
}
i915_gem_context_put(request->ctx);
+
+ dma_fence_signal(&request->fence);
+
+ i915_priotree_fini(request->i915, &request->priotree);
i915_gem_request_put(request);
}
@@ -223,10 +277,11 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&req->i915->drm.struct_mutex);
- GEM_BUG_ON(list_empty(&req->link));
+ if (list_empty(&req->link))
+ return;
do {
- tmp = list_first_entry(&engine->request_list,
+ tmp = list_first_entry(&engine->timeline->requests,
typeof(*tmp), link);
i915_gem_request_retire(tmp);
@@ -253,39 +308,50 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
return 0;
}
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
+ struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
- for_each_engine(engine, dev_priv) {
- ret = intel_engine_idle(engine,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
- if (ret)
- return ret;
- }
- i915_gem_retire_requests(dev_priv);
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
- while (intel_kick_waiters(dev_priv) ||
- intel_kick_signalers(dev_priv))
- yield();
+ if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
+ while (intel_breadcrumbs_busy(i915))
+ cond_resched(); /* spin until threads are complete */
}
+ atomic_set(&timeline->next_seqno, seqno);
/* Finally reset hw state */
- for_each_engine(engine, dev_priv)
- intel_engine_init_seqno(engine, seqno);
+ for_each_engine(engine, i915, id)
+ intel_engine_init_global_seqno(engine, seqno);
+
+ list_for_each_entry(timeline, &i915->gt.timelines, link) {
+ for_each_engine(engine, i915, id) {
+ struct intel_timeline *tl = &timeline->engine[id];
+
+ memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
+ }
+ }
return 0;
}
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
@@ -293,29 +359,87 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev_priv, seqno - 1);
- if (ret)
+ return i915_gem_init_global_seqno(dev_priv, seqno - 1);
+}
+
+static int reserve_global_seqno(struct drm_i915_private *i915)
+{
+ u32 active_requests = ++i915->gt.active_requests;
+ u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
+ int ret;
+
+ /* Reservation is fine until we need to wrap around */
+ if (likely(next_seqno + active_requests > next_seqno))
+ return 0;
+
+ ret = i915_gem_init_global_seqno(i915, 0);
+ if (ret) {
+ i915->gt.active_requests--;
return ret;
+ }
- dev_priv->next_seqno = seqno;
return 0;
}
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
{
- /* reserve 0 for non-seqno */
- if (unlikely(dev_priv->next_seqno == 0)) {
- int ret;
+ /* next_seqno only incremented under a mutex */
+ return ++tl->next_seqno.counter;
+}
- ret = i915_gem_init_seqno(dev_priv, 0);
- if (ret)
- return ret;
+static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
+{
+ return atomic_inc_return(&tl->next_seqno);
+}
- dev_priv->next_seqno = 1;
- }
+void __i915_gem_request_submit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_timeline *timeline;
+ u32 seqno;
- *seqno = dev_priv->next_seqno++;
- return 0;
+ /* Transfer from per-context onto the global per-engine timeline */
+ timeline = engine->timeline;
+ GEM_BUG_ON(timeline == request->timeline);
+ assert_spin_locked(&timeline->lock);
+
+ seqno = timeline_get_seqno(timeline->common);
+ GEM_BUG_ON(!seqno);
+ GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
+
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
+ request->previous_seqno = timeline->last_submitted_seqno;
+ timeline->last_submitted_seqno = seqno;
+
+ /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ request->global_seqno = seqno;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ intel_engine_enable_signaling(request);
+ spin_unlock(&request->lock);
+
+ GEM_BUG_ON(!request->global_seqno);
+ engine->emit_breadcrumb(request,
+ request->ring->vaddr + request->postfix);
+
+ spin_lock(&request->timeline->lock);
+ list_move_tail(&request->link, &timeline->requests);
+ spin_unlock(&request->timeline->lock);
+
+ i915_sw_fence_commit(&request->execute);
+}
+
+void i915_gem_request_submit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ __i915_gem_request_submit(request);
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
static int __i915_sw_fence_call
@@ -324,15 +448,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
struct drm_i915_gem_request *request =
container_of(fence, typeof(*request), submit);
- /* Will be called from irq-context when using foreign DMA fences */
-
switch (state) {
case FENCE_COMPLETE:
- request->engine->last_submitted_seqno = request->fence.seqno;
request->engine->submit_request(request);
break;
case FENCE_FREE:
+ i915_gem_request_put(request);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int __i915_sw_fence_call
+execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct drm_i915_gem_request *request =
+ container_of(fence, typeof(*request), execute);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ break;
+
+ case FENCE_FREE:
+ i915_gem_request_put(request);
break;
}
@@ -357,9 +497,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *req;
- u32 seqno;
int ret;
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
* and restart.
@@ -368,10 +509,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
return ERR_PTR(ret);
+ ret = reserve_global_seqno(dev_priv);
+ if (ret)
+ return ERR_PTR(ret);
+
/* Move the oldest request to the slab-cache (if not in use!) */
- req = list_first_entry_or_null(&engine->request_list,
+ req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
- if (req && i915_gem_request_completed(req))
+ if (req && __i915_gem_request_completed(req))
i915_gem_request_retire(req);
/* Beware: Dragons be flying overhead.
@@ -382,13 +527,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* of being read by __i915_gem_active_get_rcu(). As such,
* we have to be very careful when overwriting the contents. During
* the RCU lookup, we change chase the request->engine pointer,
- * read the request->fence.seqno and increment the reference count.
+ * read the request->global_seqno and increment the reference count.
*
* The reference count is incremented atomically. If it is zero,
* the lookup knows the request is unallocated and complete. Otherwise,
* it is either still in use, or has been reallocated and reset
- * with fence_init(). This increment is safe for release as we check
- * that the request we have a reference to and matches the active
+ * with dma_fence_init(). This increment is safe for release as we
+ * check that the request we have a reference to and matches the active
* request.
*
* Before we increment the refcount, we chase the request->engine
@@ -403,21 +548,32 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* Do not use kmem_cache_zalloc() here!
*/
req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
- if (!req)
- return ERR_PTR(-ENOMEM);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_unreserve;
+ }
- ret = i915_gem_get_seqno(dev_priv, &seqno);
- if (ret)
- goto err;
+ req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+ GEM_BUG_ON(req->timeline == engine->timeline);
spin_lock_init(&req->lock);
- fence_init(&req->fence,
- &i915_fence_ops,
- &req->lock,
- engine->fence_context,
- seqno);
+ dma_fence_init(&req->fence,
+ &i915_fence_ops,
+ &req->lock,
+ req->timeline->fence_context,
+ __timeline_get_seqno(req->timeline->common));
+
+ /* We bump the ref for the fence chain */
+ i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
+ i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify);
+
+ /* Ensure that the execute fence completes after the submit fence -
+ * as we complete the execute fence from within the submit fence
+ * callback, its completion would otherwise be visible first.
+ */
+ i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
- i915_sw_fence_init(&req->submit, submit_notify);
+ i915_priotree_init(&req->priotree);
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
@@ -425,6 +581,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->ctx = i915_gem_context_get(ctx);
/* No zalloc, must clear what we need by hand */
+ req->global_seqno = 0;
req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
@@ -437,6 +594,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* away, e.g. because a GPU scheduler has deferred it.
*/
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+ GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(req);
@@ -456,8 +614,9 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
err_ctx:
i915_gem_context_put(ctx);
-err:
kmem_cache_free(dev_priv->requests, req);
+err_unreserve:
+ dev_priv->gt.active_requests--;
return ERR_PTR(ret);
}
@@ -465,15 +624,36 @@ static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from)
{
- int idx, ret;
+ int ret;
GEM_BUG_ON(to == from);
- if (to->engine == from->engine)
+ if (to->engine->schedule) {
+ ret = i915_priotree_add_dependency(to->i915,
+ &to->priotree,
+ &from->priotree);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (to->timeline == from->timeline)
return 0;
- idx = intel_engine_sync_index(from->engine, to->engine);
- if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+ if (to->engine == from->engine) {
+ ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
+ &from->submit,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (!from->global_seqno) {
+ ret = i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
return 0;
trace_i915_gem_ring_sync_to(to, from);
@@ -491,7 +671,54 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
}
- from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+ to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
+ return 0;
+}
+
+int
+i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+ struct dma_fence *fence)
+{
+ struct dma_fence_array *array;
+ int ret;
+ int i;
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return 0;
+
+ if (dma_fence_is_i915(fence))
+ return i915_gem_request_await_request(req, to_request(fence));
+
+ if (!dma_fence_is_array(fence)) {
+ ret = i915_sw_fence_await_dma_fence(&req->submit,
+ fence, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ /* Note that if the fence-array was created in signal-on-any mode,
+ * we should *not* decompose it into its individual fences. However,
+ * we don't currently store which mode the fence-array is operating
+ * in. Fortunately, the only user of signal-on-any is private to
+ * amdgpu and we should not see any incoming fence-array from
+ * sync-file being in signal-on-any mode.
+ */
+
+ array = to_dma_fence_array(fence);
+ for (i = 0; i < array->num_fences; i++) {
+ struct dma_fence *child = array->fences[i];
+
+ if (dma_fence_is_i915(child))
+ ret = i915_gem_request_await_request(req,
+ to_request(child));
+ else
+ ret = i915_sw_fence_await_dma_fence(&req->submit,
+ child, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -520,43 +747,52 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write)
{
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct dma_fence *excl;
+ int ret = 0;
if (write) {
- active_mask = i915_gem_object_get_active(obj);
- active = obj->last_read;
+ struct dma_fence **shared;
+ unsigned int count, i;
+
+ ret = reservation_object_get_fences_rcu(obj->resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ ret = i915_gem_request_await_dma_fence(to, shared[i]);
+ if (ret)
+ break;
+
+ dma_fence_put(shared[i]);
+ }
+
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
} else {
- active_mask = 1;
- active = &obj->last_write;
+ excl = reservation_object_get_excl_rcu(obj->resv);
}
- for_each_active(active_mask, idx) {
- struct drm_i915_gem_request *request;
- int ret;
-
- request = i915_gem_active_peek(&active[idx],
- &obj->base.dev->struct_mutex);
- if (!request)
- continue;
+ if (excl) {
+ if (ret == 0)
+ ret = i915_gem_request_await_dma_fence(to, excl);
- ret = i915_gem_request_await_request(to, request);
- if (ret)
- return ret;
+ dma_fence_put(excl);
}
- return 0;
+ return ret;
}
static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- dev_priv->gt.active_engines |= intel_engine_flag(engine);
if (dev_priv->gt.awake)
return;
+ GEM_BUG_ON(!dev_priv->gt.active_requests);
+
intel_runtime_pm_get_noresume(dev_priv);
dev_priv->gt.awake = true;
@@ -579,11 +815,11 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
+ struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
- u32 request_start;
- u32 reserved_tail;
- int ret;
+ int err;
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_gem_request_add(request);
/*
@@ -591,8 +827,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- request_start = ring->tail;
- reserved_tail = request->reserved_space;
request->reserved_space = 0;
/*
@@ -603,10 +837,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* what.
*/
if (flush_caches) {
- ret = engine->emit_flush(request, EMIT_FLUSH);
+ err = engine->emit_flush(request, EMIT_FLUSH);
/* Not allowed to fail! */
- WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
+ WARN(err, "engine->emit_flush() failed: %d!\n", err);
}
/* Record the position of the start of the breadcrumb so that
@@ -614,20 +848,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
+ err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+ GEM_BUG_ON(err);
request->postfix = ring->tail;
-
- /* Not allowed to fail! */
- ret = engine->emit_request(request);
- WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
-
- /* Sanity check that the reserved size was large enough. */
- ret = ring->tail - request_start;
- if (ret < 0)
- ret += ring->size;
- WARN_ONCE(ret > reserved_tail,
- "Not enough space reserved (%d bytes) "
- "for adding the request (%d bytes)\n",
- reserved_tail, ret);
+ ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
@@ -635,21 +859,46 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* see a more recent value in the hws than we are tracking.
*/
- prev = i915_gem_active_raw(&engine->last_request,
+ prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
- if (prev)
+ if (prev) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
+ if (engine->schedule)
+ __i915_priotree_add_dependency(&request->priotree,
+ &prev->priotree,
+ &request->dep,
+ 0);
+ }
+
+ spin_lock_irq(&timeline->lock);
+ list_add_tail(&request->link, &timeline->requests);
+ spin_unlock_irq(&timeline->lock);
+
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
+ request->fence.seqno));
+
+ timeline->last_submitted_seqno = request->fence.seqno;
+ i915_gem_active_set(&timeline->last_request, request);
- request->emitted_jiffies = jiffies;
- request->previous_seqno = engine->last_pending_seqno;
- engine->last_pending_seqno = request->fence.seqno;
- i915_gem_active_set(&engine->last_request, request);
- list_add_tail(&request->link, &engine->request_list);
list_add_tail(&request->ring_link, &ring->request_list);
+ request->emitted_jiffies = jiffies;
i915_gem_mark_busy(engine);
+ /* Let the backend know a new request has arrived that may need
+ * to adjust the existing execution schedule due to a high priority
+ * request - i.e. we may want to preempt the current request in order
+ * to run a high priority dependency chain *before* we can execute this
+ * request.
+ *
+ * This is called before the request is ready to run so that we can
+ * decide whether to preempt the entire chain so that it is ready to
+ * run at the earliest possible convenience.
+ */
+ if (engine->schedule)
+ engine->schedule(request, request->ctx->priority);
+
local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -714,7 +963,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
timeout_us += local_clock_us(&cpu);
do {
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
if (signal_pending_state(state, current))
@@ -723,82 +972,108 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
if (busywait_stop(timeout_us, cpu))
break;
- cpu_relax_lowlatency();
+ cpu_relax();
} while (!need_resched());
return false;
}
+static long
+__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
+ unsigned int flags,
+ long timeout)
+{
+ const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
+ DEFINE_WAIT(reset);
+ DEFINE_WAIT(wait);
+
+ if (flags & I915_WAIT_LOCKED)
+ add_wait_queue(q, &reset);
+
+ do {
+ prepare_to_wait(&request->execute.wait, &wait, state);
+
+ if (i915_sw_fence_done(&request->execute))
+ break;
+
+ if (flags & I915_WAIT_LOCKED &&
+ i915_reset_in_progress(&request->i915->gpu_error)) {
+ __set_current_state(TASK_RUNNING);
+ i915_reset(request->i915);
+ reset_wait_queue(q, &reset);
+ continue;
+ }
+
+ if (signal_pending_state(state, current)) {
+ timeout = -ERESTARTSYS;
+ break;
+ }
+
+ timeout = io_schedule_timeout(timeout);
+ } while (timeout);
+ finish_wait(&request->execute.wait, &wait);
+
+ if (flags & I915_WAIT_LOCKED)
+ remove_wait_queue(q, &reset);
+
+ return timeout;
+}
+
/**
* i915_wait_request - wait until execution of request has finished
- * @req: duh!
+ * @req: the request to wait upon
* @flags: how to wait
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: client to charge for RPS boosting
+ * @timeout: how long to wait in jiffies
+ *
+ * i915_wait_request() waits for the request to be completed, for a
+ * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
+ * unbounded wait).
*
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
+ * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
+ * in via the flags, and vice versa if the struct_mutex is not held, the caller
+ * must not specify that the wait is locked.
*
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
+ * Returns the remaining time (in jiffies) if the request completed, which may
+ * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
+ * pending before the request completes.
*/
-int i915_wait_request(struct drm_i915_gem_request *req,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
struct intel_wait wait;
- unsigned long timeout_remain;
- int ret = 0;
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
- GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+ GEM_BUG_ON(debug_locks &&
+ !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
+ GEM_BUG_ON(timeout < 0);
if (i915_gem_request_completed(req))
- return 0;
-
- timeout_remain = MAX_SCHEDULE_TIMEOUT;
- if (timeout) {
- if (WARN_ON(*timeout < 0))
- return -EINVAL;
+ return timeout;
- if (*timeout == 0)
- return -ETIME;
-
- /* Record current time in case interrupted, or wedged */
- timeout_remain = nsecs_to_jiffies_timeout(*timeout);
- *timeout += ktime_get_raw_ns();
- }
+ if (!timeout)
+ return -ETIME;
trace_i915_gem_request_wait_begin(req);
- /* This client is about to stall waiting for the GPU. In many cases
- * this is undesirable and limits the throughput of the system, as
- * many clients cannot continue processing user input/output whilst
- * blocked. RPS autotuning may take tens of milliseconds to respond
- * to the GPU load and thus incurs additional latency for the client.
- * We can circumvent that by promoting the GPU frequency to maximum
- * before we wait. This makes the GPU throttle up much more quickly
- * (good for benchmarks and user experience, e.g. window animations),
- * but at a cost of spending more power processing the workload
- * (bad for battery). Not all clients even want their results
- * immediately and for them we should just let the GPU select its own
- * frequency to maximise efficiency. To prevent a single client from
- * forcing the clocks too high for the whole system, we only allow
- * each client to waitboost once in a busy period.
- */
- if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
- gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+ if (!i915_sw_fence_done(&req->execute)) {
+ timeout = __i915_request_wait_for_execute(req, flags, timeout);
+ if (timeout < 0)
+ goto complete;
+
+ GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
+ }
+ GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+ GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
@@ -808,7 +1083,7 @@ int i915_wait_request(struct drm_i915_gem_request *req,
if (flags & I915_WAIT_LOCKED)
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
- intel_wait_init(&wait, req->fence.seqno);
+ intel_wait_init(&wait, req->global_seqno);
if (intel_engine_add_wait(req->engine, &wait))
/* In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
@@ -818,16 +1093,17 @@ int i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
if (signal_pending_state(state, current)) {
- ret = -ERESTARTSYS;
+ timeout = -ERESTARTSYS;
break;
}
- timeout_remain = io_schedule_timeout(timeout_remain);
- if (timeout_remain == 0) {
- ret = -ETIME;
+ if (!timeout) {
+ timeout = -ETIME;
break;
}
+ timeout = io_schedule_timeout(timeout);
+
if (intel_wait_complete(&wait))
break;
@@ -874,74 +1150,32 @@ wakeup:
complete:
trace_i915_gem_request_wait_end(req);
- if (timeout) {
- *timeout -= ktime_get_raw_ns();
- if (*timeout < 0)
- *timeout = 0;
-
- /*
- * Apparently ktime isn't accurate enough and occasionally has a
- * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
- * things up to make the test happy. We allow up to 1 jiffy.
- *
- * This is a regrssion from the timespec->ktime conversion.
- */
- if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
- *timeout = 0;
- }
-
- if (IS_RPS_USER(rps) &&
- req->fence.seqno == req->engine->last_submitted_seqno) {
- /* The GPU is now idle and this client has stalled.
- * Since no other client has submitted a request in the
- * meantime, assume that this client is the only one
- * supplying work to the GPU but is unable to keep that
- * work supplied because it is waiting. Since the GPU is
- * then never kept fully busy, RPS autoclocking will
- * keep the clocks relatively low, causing further delays.
- * Compensate by giving the synchronous client credit for
- * a waitboost next time.
- */
- spin_lock(&req->i915->rps.client_lock);
- list_del_init(&rps->link);
- spin_unlock(&req->i915->rps.client_lock);
- }
-
- return ret;
+ return timeout;
}
-static bool engine_retire_requests(struct intel_engine_cs *engine)
+static void engine_retire_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request, *next;
- list_for_each_entry_safe(request, next, &engine->request_list, link) {
- if (!i915_gem_request_completed(request))
- return false;
+ list_for_each_entry_safe(request, next,
+ &engine->timeline->requests, link) {
+ if (!__i915_gem_request_completed(request))
+ return;
i915_gem_request_retire(request);
}
-
- return true;
}
void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (dev_priv->gt.active_engines == 0)
+ if (!dev_priv->gt.active_requests)
return;
- GEM_BUG_ON(!dev_priv->gt.awake);
-
- for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
- if (engine_retire_requests(engine))
- dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
-
- if (dev_priv->gt.active_engines == 0)
- queue_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
- msecs_to_jiffies(100));
+ for_each_engine(engine, dev_priv, id)
+ engine_retire_requests(engine);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 974bd7bcc801..e2b077df2da0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -25,11 +25,14 @@
#ifndef I915_GEM_REQUEST_H
#define I915_GEM_REQUEST_H
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "i915_gem.h"
#include "i915_sw_fence.h"
+struct drm_file;
+struct drm_i915_gem_object;
+
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
@@ -41,6 +44,33 @@ struct intel_signal_node {
struct intel_wait wait;
};
+struct i915_dependency {
+ struct i915_priotree *signaler;
+ struct list_head signal_link;
+ struct list_head wait_link;
+ struct list_head dfs_link;
+ unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+/* Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ */
+struct i915_priotree {
+ struct list_head signalers_list; /* those before us, we depend upon */
+ struct list_head waiters_list; /* those after us, they depend upon us */
+ struct rb_node node;
+ int priority;
+#define I915_PRIORITY_MAX 1024
+#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
+};
+
/**
* Request queue structure.
*
@@ -62,7 +92,7 @@ struct intel_signal_node {
* The requests are reference counted.
*/
struct drm_i915_gem_request {
- struct fence fence;
+ struct dma_fence fence;
spinlock_t lock;
/** On Which ring this request was generated */
@@ -81,10 +111,39 @@ struct drm_i915_gem_request {
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
struct intel_ring *ring;
+ struct intel_timeline *timeline;
struct intel_signal_node signaling;
+ /* Fences for the various phases in the request's lifetime.
+ *
+ * The submit fence is used to await upon all of the request's
+ * dependencies. When it is signaled, the request is ready to run.
+ * It is used by the driver to then queue the request for execution.
+ *
+ * The execute fence is used to signal when the request has been
+ * sent to hardware.
+ *
+ * It is illegal for the submit fence of one request to wait upon the
+ * execute fence of an earlier request. It should be sufficient to
+ * wait upon the submit fence of the earlier request.
+ */
struct i915_sw_fence submit;
+ struct i915_sw_fence execute;
wait_queue_t submitq;
+ wait_queue_t execq;
+
+ /* A list of everyone we wait upon, and everyone who waits upon us.
+ * Even though we will not be submitted to the hardware before the
+ * submit fence is signaled (it waits for all external events as well
+ * as our own requests), the scheduler still needs to know the
+ * dependency tree for the lifetime of the request (from execbuf
+ * to retirement), i.e. bidirectional dependency information for the
+ * request not tied to individual fences.
+ */
+ struct i915_priotree priotree;
+ struct i915_dependency dep;
+
+ u32 global_seqno;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
@@ -140,14 +199,11 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
-
- /** Link in the execlist submission queue, guarded by execlist_lock. */
- struct list_head execlist_link;
};
-extern const struct fence_ops i915_fence_ops;
+extern const struct dma_fence_ops i915_fence_ops;
-static inline bool fence_is_i915(struct fence *fence)
+static inline bool dma_fence_is_i915(const struct dma_fence *fence)
{
return fence->ops == &i915_fence_ops;
}
@@ -159,43 +215,31 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
-static inline u32
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
- return req ? req->fence.seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_engine(struct drm_i915_gem_request *req)
-{
- return req ? req->engine : NULL;
-}
-
static inline struct drm_i915_gem_request *
-to_request(struct fence *fence)
+to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
- GEM_BUG_ON(fence && !fence_is_i915(fence));
+ GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
return container_of(fence, struct drm_i915_gem_request, fence);
}
static inline struct drm_i915_gem_request *
i915_gem_request_get(struct drm_i915_gem_request *req)
{
- return to_request(fence_get(&req->fence));
+ return to_request(dma_fence_get(&req->fence));
}
static inline struct drm_i915_gem_request *
i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
{
- return to_request(fence_get_rcu(&req->fence));
+ return to_request(dma_fence_get_rcu(&req->fence));
}
static inline void
i915_gem_request_put(struct drm_i915_gem_request *req)
{
- fence_put(&req->fence);
+ dma_fence_put(&req->fence);
}
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
@@ -214,6 +258,8 @@ int
i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write);
+int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+ struct dma_fence *fence);
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
@@ -221,18 +267,21 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request_no_flush(req) \
__i915_add_request(req, false)
+void __i915_gem_request_submit(struct drm_i915_gem_request *request);
+void i915_gem_request_submit(struct drm_i915_gem_request *request);
+
struct intel_rps_client;
#define NO_WAITBOOST ERR_PTR(-1)
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
-int i915_wait_request(struct drm_i915_gem_request *req,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
+#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -245,17 +294,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
+__i915_gem_request_started(const struct drm_i915_gem_request *req)
{
+ GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno);
}
static inline bool
-i915_gem_request_completed(const struct drm_i915_gem_request *req)
+i915_gem_request_started(const struct drm_i915_gem_request *req)
{
+ if (!req->global_seqno)
+ return false;
+
+ return __i915_gem_request_started(req);
+}
+
+static inline bool
+__i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+ GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- req->fence.seqno);
+ req->global_seqno);
+}
+
+static inline bool
+i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+ if (!req->global_seqno)
+ return false;
+
+ return __i915_gem_request_completed(req);
}
bool __i915_spin_request(const struct drm_i915_gem_request *request,
@@ -263,7 +332,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request,
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us)
{
- return (i915_gem_request_started(request) &&
+ return (__i915_gem_request_started(request) &&
__i915_spin_request(request, state, timeout_us));
}
@@ -497,7 +566,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
* compiler.
*
* The atomic operation at the heart of
- * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+ * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
* atomic_inc_not_zero() which is only a full memory barrier
* when successful. That is, if i915_gem_request_get_rcu()
* returns the request (and so with the reference counted
@@ -552,53 +621,13 @@ i915_gem_active_isset(const struct i915_gem_active *active)
}
/**
- * i915_gem_active_is_idle - report whether the active tracker is idle
- * @active - the active tracker
- *
- * i915_gem_active_is_idle() returns true if the active tracker is currently
- * unassigned or if the request is complete (but not yet retired). Requires
- * the caller to hold struct_mutex (but that can be relaxed if desired).
- */
-static inline bool
-i915_gem_active_is_idle(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return !i915_gem_active_peek(active, mutex);
-}
-
-/**
* i915_gem_active_wait - waits until the request is completed
* @active - the active request on which to wait
- *
- * i915_gem_active_wait() waits until the request is completed before
- * returning. Note that it does not guarantee that the request is
- * retired first, see i915_gem_active_retire().
- *
- * i915_gem_active_wait() returns immediately if the active
- * request is already complete.
- */
-static inline int __must_check
-i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
-{
- struct drm_i915_gem_request *request;
-
- request = i915_gem_active_peek(active, mutex);
- if (!request)
- return 0;
-
- return i915_wait_request(request,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NULL);
-}
-
-/**
- * i915_gem_active_wait_unlocked - waits until the request is completed
- * @active - the active request on which to wait
* @flags - how to wait
* @timeout - how long to wait at most
* @rps - userspace client to charge for a waitboost
*
- * i915_gem_active_wait_unlocked() waits until the request is completed before
+ * i915_gem_active_wait() waits until the request is completed before
* returning, without requiring any locks to be held. Note that it does not
* retire any requests before returning.
*
@@ -614,21 +643,18 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
* Returns 0 if successful, or a negative error code.
*/
static inline int
-i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
{
struct drm_i915_gem_request *request;
- int ret = 0;
+ long ret = 0;
request = i915_gem_active_get_unlocked(active);
if (request) {
- ret = i915_wait_request(request, flags, timeout, rps);
+ ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(request);
}
- return ret;
+ return ret < 0 ? ret : 0;
}
/**
@@ -645,7 +671,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
struct mutex *mutex)
{
struct drm_i915_gem_request *request;
- int ret;
+ long ret;
request = i915_gem_active_raw(active, mutex);
if (!request)
@@ -653,8 +679,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
ret = i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NULL);
- if (ret)
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
return ret;
list_del_init(&active->link);
@@ -665,24 +691,6 @@ i915_gem_active_retire(struct i915_gem_active *active,
return 0;
}
-/* Convenience functions for peeking at state inside active's request whilst
- * guarded by the struct_mutex.
- */
-
-static inline uint32_t
-i915_gem_active_get_seqno(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
-}
-
-static inline struct intel_engine_cs *
-i915_gem_active_get_engine(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
-}
-
#define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1c237d02f30b..401006b4c6a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -35,17 +35,22 @@
#include "i915_drv.h"
#include "i915_trace.h"
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
- if (!mutex_is_locked(mutex))
+ switch (mutex_trylock_recursive(&dev->struct_mutex)) {
+ case MUTEX_TRYLOCK_FAILED:
return false;
-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
+
+ case MUTEX_TRYLOCK_RECURSIVE:
+ *unlock = false;
+ return true;
+ }
+
+ BUG();
}
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@@ -66,8 +71,11 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- /* Only shmemfs objects are backed by swap */
- if (!obj->base.filp)
+ if (!obj->mm.pages)
+ return false;
+
+ /* Consider only shrinkable ojects. */
+ if (!i915_gem_object_is_shrinkable(obj))
return false;
/* Only report true if by unbinding the object and putting its pages
@@ -78,7 +86,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (obj->pages_pin_count > obj->bind_count)
+ if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false;
if (any_vma_pinned(obj))
@@ -88,7 +96,14 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
- return swap_available() || obj->madv == I915_MADV_DONTNEED;
+ return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
+}
+
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+{
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ return !READ_ONCE(obj->mm.pages);
}
/**
@@ -128,6 +143,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
+ return 0;
trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv);
@@ -171,40 +190,51 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
- global_list))) {
- list_move_tail(&obj->global_list, &still_in_list);
+ global_link))) {
+ list_move_tail(&obj->global_link, &still_in_list);
+ if (!obj->mm.pages) {
+ list_del_init(&obj->global_link);
+ continue;
+ }
if (flags & I915_SHRINK_PURGEABLE &&
- obj->madv != I915_MADV_DONTNEED)
+ obj->mm.madv != I915_MADV_DONTNEED)
continue;
if (flags & I915_SHRINK_VMAPS &&
- !is_vmalloc_addr(obj->mapping))
+ !is_vmalloc_addr(obj->mm.mapping))
continue;
- if ((flags & I915_SHRINK_ACTIVE) == 0 &&
- i915_gem_object_is_active(obj))
+ if (!(flags & I915_SHRINK_ACTIVE) &&
+ (i915_gem_object_is_active(obj) ||
+ obj->framebuffer_references))
continue;
if (!can_release_pages(obj))
continue;
- i915_gem_object_get(obj);
-
- /* For the unbound phase, this should be a no-op! */
- i915_gem_object_unbind(obj);
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- i915_gem_object_put(obj);
+ if (unsafe_drop_pages(obj)) {
+ /* May arrive from get_pages on another bo */
+ mutex_lock_nested(&obj->mm.lock,
+ I915_MM_SHRINKER);
+ if (!obj->mm.pages) {
+ __i915_gem_object_invalidate(obj);
+ list_del_init(&obj->global_link);
+ count += obj->base.size >> PAGE_SHIFT;
+ }
+ mutex_unlock(&obj->mm.lock);
+ }
}
- list_splice(&still_in_list, phase->list);
+ list_splice_tail(&still_in_list, phase->list);
}
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
+ if (unlock)
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
@@ -238,19 +268,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
return freed;
}
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
-
- *unlock = false;
- } else
- *unlock = true;
-
- return true;
-}
-
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -267,11 +284,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
i915_gem_retire_requests(dev_priv);
count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@@ -372,13 +389,19 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+ if (!obj->mm.pages)
+ continue;
+
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+ if (!obj->mm.pages)
+ continue;
+
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 59989e8ee5dc..ebaa941c83af 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -89,9 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
-static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
@@ -109,13 +108,13 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*
*/
base = 0;
- if (INTEL_INFO(dev)->gen >= 3) {
+ if (INTEL_GEN(dev_priv) >= 3) {
u32 bsm;
pci_read_config_dword(pdev, INTEL_BSM, &bsm);
base = bsm & INTEL_BSM_MASK;
- } else if (IS_I865G(dev)) {
+ } else if (IS_I865G(dev_priv)) {
u32 tseg_size = 0;
u16 toud = 0;
u8 tmp;
@@ -138,7 +137,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I865_TOUD, &toud);
base = (toud << 16) + tseg_size;
- } else if (IS_I85X(dev)) {
+ } else if (IS_I85X(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -154,7 +153,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_845G(dev)) {
+ } else if (IS_845G(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -178,7 +177,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I830(dev)) {
+ } else if (IS_I830(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -204,7 +203,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
+ !IS_G4X(dev_priv)) {
struct {
u32 start, end;
} stolen[2] = {
@@ -214,7 +214,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u64 ggtt_start, ggtt_end;
ggtt_start = I915_READ(PGTBL_CTL);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -252,7 +252,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
+ r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
@@ -263,14 +263,14 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
*/
- r = devm_request_mem_region(dev->dev, base + 1,
+ r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
ggtt->stolen_size - 1,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (r == NULL && !IS_GEN3(dev)) {
+ if (r == NULL && !IS_GEN3(dev_priv)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)ggtt->stolen_size);
base = 0;
@@ -407,9 +407,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-int i915_gem_init_stolen(struct drm_device *dev)
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top;
@@ -417,7 +416,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
mutex_init(&dev_priv->mm.stolen_lock);
#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
+ if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
@@ -426,7 +425,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (ggtt->stolen_size == 0)
return 0;
- dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv);
if (dev_priv->mm.stolen_base == 0)
return 0;
@@ -437,7 +436,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
case 3:
break;
case 4:
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
g4x_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
@@ -456,7 +455,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
break;
default:
if (IS_BROADWELL(dev_priv) ||
- IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
+ IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
else
@@ -514,12 +513,10 @@ i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct sg_table *st;
struct scatterlist *sg;
- DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
- BUG_ON(offset > ggtt->stolen_size - size);
+ GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -528,11 +525,11 @@ i915_pages_create_for_stolen(struct drm_device *dev,
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
@@ -545,31 +542,36 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st;
}
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
- BUG();
- return -EINVAL;
+ return i915_pages_create_for_stolen(obj->base.dev,
+ obj->stolen->start,
+ obj->stolen->size);
}
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- /* Should only be called during free */
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ /* Should only be called from i915_gem_object_release_stolen() */
+ sg_free_table(pages);
+ kfree(pages);
}
-
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
- if (obj->stolen) {
- i915_gem_stolen_remove_node(dev_priv, obj->stolen);
- kfree(obj->stolen);
- obj->stolen = NULL;
- }
+ GEM_BUG_ON(!stolen);
+
+ __i915_gem_object_unpin_pages(obj);
+
+ i915_gem_stolen_remove_node(dev_priv, stolen);
+ kfree(stolen);
}
+
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
@@ -589,19 +591,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
- obj->pages = i915_pages_create_for_stolen(dev,
- stolen->start, stolen->size);
- if (obj->pages == NULL)
- goto cleanup;
-
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
-
- i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
-
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ obj->cache_level = HAS_LLC(to_i915(dev)) ?
+ I915_CACHE_LLC : I915_CACHE_NONE;
+
+ if (i915_gem_object_pin_pages(obj))
+ goto cleanup;
return obj;
@@ -621,7 +617,6 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
- DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
if (size == 0)
return NULL;
@@ -697,10 +692,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err;
+ goto err_pages;
}
/* To simplify the initialisation sequence between KMS and GTT,
@@ -714,20 +713,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- goto err;
+ goto err_pages;
}
- vma->pages = obj->pages;
+ vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
obj->bind_count++;
- list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- i915_gem_object_pin_pages(obj);
-
return obj;
+err_pages:
+ i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a14b1e3d4c78..c85e7b06bdba 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -60,7 +60,8 @@
/* Check pitch constriants for all chips & tiling formats */
static bool
-i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+i915_tiling_ok(struct drm_i915_private *dev_priv,
+ int stride, int size, int tiling_mode)
{
int tile_width;
@@ -71,8 +72,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode > I915_TILING_LAST)
return false;
- if (IS_GEN2(dev) ||
- (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ if (IS_GEN2(dev_priv) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
tile_width = 128;
else
tile_width = 512;
@@ -80,17 +81,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (INTEL_INFO(dev)->gen >= 4) {
+ } else if (INTEL_GEN(dev_priv) >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
if (stride > 8192)
return false;
- if (IS_GEN3(dev)) {
+ if (IS_GEN3(dev_priv)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
@@ -103,7 +104,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return false;
/* 965+ just needs multiples of tile width */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
@@ -198,14 +199,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- if (!i915_tiling_ok(dev,
+ if (!i915_tiling_ok(dev_priv,
args->stride, obj->base.size, args->tiling_mode)) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
err = -EBUSY;
@@ -260,14 +259,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!err) {
struct i915_vma *vma;
- if (obj->pages &&
- obj->madv == I915_MADV_WILLNEED &&
+ mutex_lock(&obj->mm.lock);
+ if (obj->mm.pages &&
+ obj->mm.madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (args->tiling_mode == I915_TILING_NONE)
- i915_gem_object_unpin_pages(obj);
- if (!i915_gem_object_is_tiled(obj))
- i915_gem_object_pin_pages(obj);
+ if (args->tiling_mode == I915_TILING_NONE) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (!i915_gem_object_is_tiled(obj)) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
}
+ mutex_unlock(&obj->mm.lock);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!vma->fence)
@@ -301,8 +308,6 @@ err:
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
return err;
}
@@ -326,12 +331,19 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (obj) {
+ args->tiling_mode =
+ READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
+ err = 0;
+ }
+ rcu_read_unlock();
+ if (unlikely(err))
+ return err;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
switch (args->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
@@ -339,11 +351,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
+ default:
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
- default:
- DRM_ERROR("unknown tiling mode\n");
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
@@ -356,6 +367,5 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- i915_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
new file mode 100644
index 000000000000..bf8a471b61e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static int __i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name,
+ struct lock_class_key *lockclass,
+ const char *lockname)
+{
+ unsigned int i;
+ u64 fences;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ timeline->i915 = i915;
+ timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
+ if (!timeline->name)
+ return -ENOMEM;
+
+ list_add(&timeline->link, &i915->gt.timelines);
+
+ /* Called during early_init before we know how many engines there are */
+ fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+ struct intel_timeline *tl = &timeline->engine[i];
+
+ tl->fence_context = fences++;
+ tl->common = timeline;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
+#else
+ spin_lock_init(&tl->lock);
+#endif
+ init_request_active(&tl->last_request, NULL);
+ INIT_LIST_HEAD(&tl->requests);
+ }
+
+ return 0;
+}
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915, timeline, name,
+ &class, "&timeline->lock");
+}
+
+int i915_gem_timeline_init__global(struct drm_i915_private *i915)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915,
+ &i915->gt.global_timeline,
+ "[execution]",
+ &class, "&global_timeline->lock");
+}
+
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
+{
+ lockdep_assert_held(&tl->i915->drm.struct_mutex);
+
+ list_del(&tl->link);
+ kfree(tl->name);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
new file mode 100644
index 000000000000..98d99a62b4ae
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_TIMELINE_H
+#define I915_GEM_TIMELINE_H
+
+#include <linux/list.h>
+
+#include "i915_gem_request.h"
+
+struct i915_gem_timeline;
+
+struct intel_timeline {
+ u64 fence_context;
+ u32 last_submitted_seqno;
+
+ spinlock_t lock;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /* Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_gem_active_get_request_rcu(), or hold the
+ * struct_mutex.
+ */
+ struct i915_gem_active last_request;
+ u32 sync_seqno[I915_NUM_ENGINES];
+
+ struct i915_gem_timeline *common;
+};
+
+struct i915_gem_timeline {
+ struct list_head link;
+ atomic_t next_seqno;
+
+ struct drm_i915_private *i915;
+ const char *name;
+
+ struct intel_timeline engine[I915_NUM_ENGINES];
+};
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *tl,
+ const char *name);
+int i915_gem_timeline_init__global(struct drm_i915_private *i915);
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index c6f780f5abc9..d068af2ec3a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -61,33 +61,26 @@ struct i915_mmu_object {
bool attached;
};
-static void wait_rendering(struct drm_i915_gem_object *obj)
-{
- unsigned long active = __I915_BO_ACTIVE(obj);
- int idx;
-
- for_each_active(active, idx)
- i915_gem_active_wait_unlocked(&obj->last_read[idx],
- 0, NULL, NULL);
-}
-
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj;
struct drm_device *dev = obj->base.dev;
- wait_rendering(obj);
+ i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
mutex_lock(&dev->struct_mutex);
/* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL;
- if (obj->pages != NULL) {
- /* We are inside a kthread context and can't be interrupted */
- WARN_ON(i915_gem_object_unbind(obj));
- WARN_ON(i915_gem_object_put_pages(obj));
- }
+ /* We are inside a kthread context and can't be interrupted */
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ WARN_ONCE(obj->mm.pages,
+ "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+ obj->bind_count,
+ atomic_read(&obj->mm.pages_pin_count),
+ obj->pin_display);
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
@@ -436,24 +429,25 @@ err:
return ret;
}
-static int
+static struct sg_table *
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages)
{
+ struct sg_table *pages;
int ret;
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = st_set_pages(&pages, pvec, num_pages);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- ret = i915_gem_gtt_prepare_object(obj);
+ ret = i915_gem_gtt_prepare_pages(obj, pages);
if (ret) {
- sg_free_table(obj->pages);
- kfree(obj->pages);
- obj->pages = NULL;
+ sg_free_table(pages);
+ kfree(pages);
+ return ERR_PTR(ret);
}
- return ret;
+ return pages;
}
static int
@@ -497,7 +491,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
- struct drm_device *dev = obj->base.dev;
const int npages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
int pinned, ret;
@@ -522,7 +515,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
flags,
- pvec + pinned, NULL);
+ pvec + pinned, NULL, NULL);
if (ret < 0)
break;
@@ -533,33 +526,32 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
}
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&obj->mm.lock);
if (obj->userptr.work == &work->work) {
+ struct sg_table *pages = ERR_PTR(ret);
+
if (pinned == npages) {
- ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
- if (ret == 0) {
- list_add_tail(&obj->global_list,
- &to_i915(dev)->mm.unbound_list);
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
+ pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+ if (!IS_ERR(pages)) {
+ __i915_gem_object_set_pages(obj, pages);
pinned = 0;
+ pages = NULL;
}
}
- obj->userptr.work = ERR_PTR(ret);
- }
- obj->userptr.workers--;
- i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
+ obj->userptr.work = ERR_CAST(pages);
+ }
+ mutex_unlock(&obj->mm.lock);
release_pages(pvec, pinned, 0);
drm_free_large(pvec);
+ i915_gem_object_put(obj);
put_task_struct(work->task);
kfree(work);
}
-static int
+static struct sg_table *
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
bool *active)
{
@@ -584,15 +576,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
* that error back to this function through
* obj->userptr.work = ERR_PTR.
*/
- if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
- return -EAGAIN;
-
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
obj->userptr.work = &work->work;
- obj->userptr.workers++;
work->obj = i915_gem_object_get(obj);
@@ -603,14 +591,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
schedule_work(&work->work);
*active = true;
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
-static int
+static struct sg_table *
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
+ struct sg_table *pages;
int pinned, ret;
bool active;
@@ -634,15 +623,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
- return PTR_ERR(obj->userptr.work);
+ return ERR_CAST(obj->userptr.work);
else
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
/* Let the mmu-notifier know that we have begun and need cancellation */
ret = __i915_gem_userptr_set_active(obj, true);
if (ret)
- return ret;
+ return ERR_PTR(ret);
pvec = NULL;
pinned = 0;
@@ -651,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_TEMPORARY);
if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -660,21 +649,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
active = false;
if (pinned < 0)
- ret = pinned, pinned = 0;
+ pages = ERR_PTR(pinned), pinned = 0;
else if (pinned < num_pages)
- ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
+ pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
else
- ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
- if (ret) {
+ pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+ if (IS_ERR(pages)) {
__i915_gem_userptr_set_active(obj, active);
release_pages(pvec, pinned, 0);
}
drm_free_large(pvec);
- return ret;
+ return pages;
}
static void
-i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@@ -682,22 +672,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false);
- if (obj->madv != I915_MADV_WILLNEED)
- obj->dirty = 0;
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ obj->mm.dirty = false;
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
- for_each_sgt_page(page, sgt_iter, obj->pages) {
- if (obj->dirty)
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
set_page_dirty(page);
mark_page_accessed(page);
put_page(page);
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void
@@ -717,7 +707,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
@@ -762,12 +753,13 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
int
i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
+ if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
@@ -816,7 +808,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 334f15df7c8d..ae84aa4b1467 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -28,6 +28,8 @@
*/
#include <generated/utsrelease.h>
+#include <linux/stop_machine.h>
+#include <linux/zlib.h>
#include "i915_drv.h"
static const char *engine_str(int engine)
@@ -172,6 +174,110 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)
+#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+ memset(zstream, 0, sizeof(*zstream));
+
+ zstream->workspace =
+ kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!zstream->workspace)
+ return false;
+
+ if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
+ kfree(zstream->workspace);
+ return false;
+ }
+
+ return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+ void *src,
+ struct drm_i915_error_object *dst)
+{
+ zstream->next_in = src;
+ zstream->avail_in = PAGE_SIZE;
+
+ do {
+ if (zstream->avail_out == 0) {
+ unsigned long page;
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return -ENOMEM;
+
+ dst->pages[dst->page_count++] = (void *)page;
+
+ zstream->next_out = (void *)page;
+ zstream->avail_out = PAGE_SIZE;
+ }
+
+ if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+ return -EIO;
+ } while (zstream->avail_in);
+
+ /* Fallback to uncompressed if we increase size? */
+ if (0 && zstream->total_out > zstream->total_in)
+ return -E2BIG;
+
+ return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+ struct drm_i915_error_object *dst)
+{
+ if (dst) {
+ zlib_deflate(zstream, Z_FINISH);
+ dst->unused = zstream->avail_out;
+ }
+
+ zlib_deflateEnd(zstream);
+ kfree(zstream->workspace);
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+ err_puts(m, ":");
+}
+
+#else
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+ return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+ void *src,
+ struct drm_i915_error_object *dst)
+{
+ unsigned long page;
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return -ENOMEM;
+
+ dst->pages[dst->page_count++] =
+ memcpy((void *)page, src, PAGE_SIZE);
+
+ return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+ struct drm_i915_error_object *dst)
+{
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+ err_puts(m, "~");
+}
+
+#endif
+
static void print_error_buffers(struct drm_i915_error_state_buf *m,
const char *name,
struct drm_i915_error_buffer *err,
@@ -228,13 +334,57 @@ static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
return "unknown";
}
+static void error_print_instdone(struct drm_i915_error_state_buf *m,
+ struct drm_i915_error_engine *ee)
+{
+ int slice;
+ int subslice;
+
+ err_printf(m, " INSTDONE: 0x%08x\n",
+ ee->instdone.instdone);
+
+ if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
+ return;
+
+ err_printf(m, " SC_INSTDONE: 0x%08x\n",
+ ee->instdone.slice_common);
+
+ if (INTEL_GEN(m->i915) <= 6)
+ return;
+
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.row[slice][subslice]);
+}
+
+static void error_print_request(struct drm_i915_error_state_buf *m,
+ const char *prefix,
+ struct drm_i915_error_request *erq)
+{
+ if (!erq->seqno)
+ return;
+
+ err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+ prefix, erq->pid,
+ erq->context, erq->seqno,
+ jiffies_to_msecs(jiffies - erq->jiffies),
+ erq->head, erq->tail);
+}
+
static void error_print_engine(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
{
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
- err_printf(m, " HEAD: 0x%08x\n", ee->head);
- err_printf(m, " TAIL: 0x%08x\n", ee->tail);
+ err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
+ err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
+ ee->tail, ee->rq_post, ee->rq_tail);
err_printf(m, " CTL: 0x%08x\n", ee->ctl);
err_printf(m, " MODE: 0x%08x\n", ee->mode);
err_printf(m, " HWS: 0x%08x\n", ee->hws);
@@ -242,7 +392,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
(u32)(ee->acthd>>32), (u32)ee->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
- err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone);
+
+ error_print_instdone(m, ee);
+
if (ee->batchbuffer) {
u64 start = ee->batchbuffer->gtt_offset;
u64 end = start + ee->batchbuffer->gtt_size;
@@ -263,17 +415,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
- err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[0],
- ee->semaphore_seqno[0]);
- err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[1],
- ee->semaphore_seqno[1]);
- if (HAS_VEBOX(m->i915)) {
- err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[2],
- ee->semaphore_seqno[2]);
- }
+ err_printf(m, " SYNC_0: 0x%08x\n",
+ ee->semaphore_mboxes[0]);
+ err_printf(m, " SYNC_1: 0x%08x\n",
+ ee->semaphore_mboxes[1]);
+ if (HAS_VEBOX(m->i915))
+ err_printf(m, " SYNC_2: 0x%08x\n",
+ ee->semaphore_mboxes[2]);
}
if (USES_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -296,6 +444,8 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hangcheck: %s [%d]\n",
hangcheck_action_to_str(ee->hangcheck_action),
ee->hangcheck_score);
+ error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
+ error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -307,40 +457,83 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
+static int
+ascii85_encode_len(int len)
+{
+ return DIV_ROUND_UP(len, 4);
+}
+
+static bool
+ascii85_encode(u32 in, char *out)
+{
+ int i;
+
+ if (in == 0)
+ return false;
+
+ out[5] = '\0';
+ for (i = 5; i--; ) {
+ out[i] = '!' + in % 85;
+ in /= 85;
+ }
+
+ return true;
+}
+
static void print_error_obj(struct drm_i915_error_state_buf *m,
+ struct intel_engine_cs *engine,
+ const char *name,
struct drm_i915_error_object *obj)
{
- int page, offset, elt;
+ char out[6];
+ int page;
+
+ if (!obj)
+ return;
- for (page = offset = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n", offset,
- obj->pages[page][elt]);
- offset += 4;
+ if (name) {
+ err_printf(m, "%s --- %s = 0x%08x %08x\n",
+ engine ? engine->name : "global", name,
+ upper_32_bits(obj->gtt_offset),
+ lower_32_bits(obj->gtt_offset));
+ }
+
+ err_compression_marker(m);
+ for (page = 0; page < obj->page_count; page++) {
+ int i, len;
+
+ len = PAGE_SIZE;
+ if (page == obj->page_count - 1)
+ len -= obj->unused;
+ len = ascii85_encode_len(len);
+
+ for (i = 0; i < len; i++) {
+ if (ascii85_encode(obj->pages[page][i], out))
+ err_puts(m, out);
+ else
+ err_puts(m, "z");
}
}
+ err_puts(m, "\n");
}
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info)
{
#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_SEMICOLON
}
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
- struct drm_device *dev = error_priv->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(error_priv->dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
- int i, j, offset, elt;
int max_hangcheck_score;
+ int i, j;
if (!error) {
err_printf(m, "no error state collected\n");
@@ -348,9 +541,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
err_printf(m, "%s\n", error->error_msg);
- err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
- error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "Time: %ld s %ld us\n",
+ error->time.tv_sec, error->time.tv_usec);
+ err_printf(m, "Boottime: %ld s %ld us\n",
+ error->boottime.tv_sec, error->boottime.tv_usec);
+ err_printf(m, "Uptime: %ld s %ld us\n",
+ error->uptime.tv_sec, error->uptime.tv_usec);
err_print_capabilities(m, &error->device_info);
max_hangcheck_score = 0;
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -375,7 +572,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev)) {
+ if (HAS_CSR(dev_priv)) {
struct intel_csr *csr = &dev_priv->csr;
err_printf(m, "DMC loaded: %s\n",
@@ -387,11 +584,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
for (i = 0; i < 4; i++)
err_printf(m, "GTIER gt %d: 0x%08x\n", i,
error->gtier[i]);
- } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
+ } else if (HAS_PCH_SPLIT(dev_priv) || IS_VALLEYVIEW(dev_priv))
err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
@@ -402,21 +599,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
- err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
- error->extra_instdone[i]);
-
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -438,7 +631,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j].name);
+ dev_priv->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@@ -456,7 +649,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i].name);
+ err_puts(m, dev_priv->engine[i]->name);
if (ee->pid != -1)
err_printf(m, " (submitted by %s [%d])",
ee->comm,
@@ -464,37 +657,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
-
- obj = ee->wa_batchbuffer;
- if (obj) {
- err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
+ print_error_obj(m, dev_priv->engine[i], NULL, obj);
}
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_requests);
- for (j = 0; j < ee->num_requests; j++) {
- err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
- ee->requests[j].pid,
- ee->requests[j].seqno,
- ee->requests[j].jiffies,
- ee->requests[j].head,
- ee->requests[j].tail);
- }
+ for (j = 0; j < ee->num_requests; j++)
+ error_print_request(m, " ", &ee->requests[j]);
}
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i].name);
+ dev_priv->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -504,83 +683,31 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- if ((obj = ee->ringbuffer)) {
- err_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "ringbuffer", ee->ringbuffer);
- if ((obj = ee->hws_page)) {
- u64 hws_offset = obj->gtt_offset;
- u32 *hws_page = &obj->pages[0][0];
+ print_error_obj(m, dev_priv->engine[i],
+ "HW Status", ee->hws_page);
- if (i915.enable_execlists) {
- hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
- hws_page = &obj->pages[LRC_PPHWSP_PN][0];
- }
- err_printf(m, "%s --- HW Status = 0x%08llx\n",
- dev_priv->engine[i].name, hws_offset);
- offset = 0;
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- hws_page[elt],
- hws_page[elt+1],
- hws_page[elt+2],
- hws_page[elt+3]);
- offset += 16;
- }
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "HW context", ee->ctx);
- obj = ee->wa_ctx;
- if (obj) {
- u64 wa_ctx_offset = obj->gtt_offset;
- u32 *wa_ctx_page = &obj->pages[0][0];
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
- u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
- engine->wa_ctx.per_ctx.size);
-
- err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
- dev_priv->engine[i].name, wa_ctx_offset);
- offset = 0;
- for (elt = 0; elt < wa_ctx_size; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- wa_ctx_page[elt + 0],
- wa_ctx_page[elt + 1],
- wa_ctx_page[elt + 2],
- wa_ctx_page[elt + 3]);
- offset += 16;
- }
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "WA context", ee->wa_ctx);
- if ((obj = ee->ctx)) {
- err_printf(m, "%s --- HW Context = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "WA batchbuffer", ee->wa_batchbuffer);
}
- if ((obj = error->semaphore)) {
- err_printf(m, "Semaphore page = 0x%08x\n",
- lower_32_bits(obj->gtt_offset));
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- elt * 4,
- obj->pages[0][elt],
- obj->pages[0][elt+1],
- obj->pages[0][elt+2],
- obj->pages[0][elt+3]);
- }
- }
+ print_error_obj(m, NULL, "Semaphores", error->semaphore);
+
+ print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
if (error->display)
- intel_display_print_error_state(m, dev, error->display);
+ intel_display_print_error_state(m, dev_priv, error->display);
out:
if (m->bytes == 0 && m->err)
@@ -629,7 +756,7 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
return;
for (page = 0; page < obj->page_count; page++)
- kfree(obj->pages[page]);
+ free_page((unsigned long)obj->pages[page]);
kfree(obj);
}
@@ -656,6 +783,7 @@ static void i915_error_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore);
+ i915_error_object_free(error->guc_log);
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
@@ -667,104 +795,63 @@ static void i915_error_state_free(struct kref *error_ref)
}
static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *dev_priv,
+i915_error_object_create(struct drm_i915_private *i915,
struct i915_vma *vma)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *src;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst;
- int num_pages;
- bool use_ggtt;
- int i = 0;
- u64 reloc_offset;
+ struct z_stream_s zstream;
+ unsigned long num_pages;
+ struct sgt_iter iter;
+ dma_addr_t dma;
if (!vma)
return NULL;
- src = vma->obj;
- if (!src->pages)
- return NULL;
-
- num_pages = src->base.size >> PAGE_SHIFT;
-
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+ num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
+ num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
+ GFP_ATOMIC | __GFP_NOWARN);
if (!dst)
return NULL;
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->page_count = 0;
+ dst->unused = 0;
- reloc_offset = dst->gtt_offset;
- use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- (vma->flags & I915_VMA_GLOBAL_BIND) &&
- reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
-
- /* Cannot access stolen address directly, try to use the aperture */
- if (src->stolen) {
- use_ggtt = true;
-
- if (!(vma->flags & I915_VMA_GLOBAL_BIND))
- goto unwind;
-
- reloc_offset = vma->node.start;
- if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
- goto unwind;
+ if (!compress_init(&zstream)) {
+ kfree(dst);
+ return NULL;
}
- /* Cannot access snooped pages through the aperture */
- if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
- !HAS_LLC(dev_priv))
- goto unwind;
-
- dst->page_count = num_pages;
- while (num_pages--) {
- unsigned long flags;
- void *d;
-
- d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
- if (d == NULL)
- goto unwind;
-
- local_irq_save(flags);
- if (use_ggtt) {
- void __iomem *s;
-
- /* Simply ignore tiling or any overlapping fence.
- * It's part of the error state, and this hopefully
- * captures what the GPU read.
- */
-
- s = io_mapping_map_atomic_wc(&ggtt->mappable,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
- } else {
- struct page *page;
- void *s;
-
- page = i915_gem_object_get_page(src, i);
-
- drm_clflush_pages(&page, 1);
+ for_each_sgt_dma(dma, iter, vma->pages) {
+ void __iomem *s;
+ int ret;
- s = kmap_atomic(page);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s);
+ ggtt->base.insert_page(&ggtt->base, dma, slot,
+ I915_CACHE_NONE, 0);
- drm_clflush_pages(&page, 1);
- }
- local_irq_restore(flags);
+ s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+ ret = compress_page(&zstream, (void __force *)s, dst);
+ io_mapping_unmap_atomic(s);
- dst->pages[i++] = d;
- reloc_offset += PAGE_SIZE;
+ if (ret)
+ goto unwind;
}
-
- return dst;
+ goto out;
unwind:
- while (i--)
- kfree(dst->pages[i]);
+ while (dst->page_count--)
+ free_page((unsigned long)dst->pages[dst->page_count]);
kfree(dst);
- return NULL;
+ dst = NULL;
+
+out:
+ compress_fini(&zstream, dst);
+ ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+ return dst;
}
/* The error capture is special as tries to run underneath the normal
@@ -773,16 +860,19 @@ unwind:
static inline uint32_t
__active_get_seqno(struct i915_gem_active *active)
{
- return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+ struct drm_i915_gem_request *request;
+
+ request = __i915_gem_active_peek(active);
+ return request ? request->global_seqno : 0;
}
static inline int
__active_get_engine_id(struct i915_gem_active *active)
{
- struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request;
- engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
- return engine ? engine->id : -1;
+ request = __i915_gem_active_peek(active);
+ return request ? request->engine->id : -1;
}
static void capture_bo(struct drm_i915_error_buffer *err,
@@ -795,17 +885,17 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name;
for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
- err->wseqno = __active_get_seqno(&obj->last_write);
- err->engine = __active_get_engine_id(&obj->last_write);
+ err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
+ err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
+ err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj);
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->dirty = obj->mm.dirty;
+ err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->cache_level = obj->cache_level;
}
@@ -855,7 +945,8 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
if (engine_id)
*engine_id = i;
- return error->engine[i].ipehr ^ error->engine[i].instdone;
+ return error->engine[i].ipehr ^
+ error->engine[i].instdone.instdone;
}
}
@@ -879,6 +970,26 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
}
}
+static inline u32
+gen8_engine_sync_index(struct intel_engine_cs *engine,
+ struct intel_engine_cs *other)
+{
+ int idx;
+
+ /*
+ * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+ * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+ * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+ * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+ * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
+ */
+
+ idx = (other - engine) - 1;
+ if (idx < 0)
+ idx += I915_NUM_ENGINES;
+
+ return idx;
+}
static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
@@ -891,7 +1002,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
if (!error->semaphore)
return;
- for_each_engine_id(to, dev_priv, id) {
+ for_each_engine(to, dev_priv, id) {
int idx;
u16 signal_offset;
u32 *tmp;
@@ -902,10 +1013,9 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
signal_offset =
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
tmp = error->semaphore->pages[0];
- idx = intel_engine_sync_index(engine, to);
+ idx = gen8_engine_sync_index(engine, to);
ee->semaphore_mboxes[idx] = tmp[signal_offset];
- ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
}
}
@@ -916,14 +1026,9 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
- ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
- ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
-
- if (HAS_VEBOX(dev_priv)) {
+ if (HAS_VEBOX(dev_priv))
ee->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(engine->mmio_base));
- ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
- }
}
static void error_record_engine_waiters(struct intel_engine_cs *engine,
@@ -940,7 +1045,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (RB_EMPTY_ROOT(&b->waiters))
return;
- if (!spin_trylock(&b->lock)) {
+ if (!spin_trylock_irq(&b->lock)) {
ee->waiters = ERR_PTR(-EDEADLK);
return;
}
@@ -948,7 +1053,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
count = 0;
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
count++;
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
waiter = NULL;
if (count)
@@ -958,7 +1063,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (!waiter)
return;
- if (!spin_trylock(&b->lock)) {
+ if (!spin_trylock_irq(&b->lock)) {
kfree(waiter);
ee->waiters = ERR_PTR(-EDEADLK);
return;
@@ -976,7 +1081,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (++ee->num_waiters == count)
break;
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static void error_record_engine_registers(struct drm_i915_error_state *error,
@@ -998,7 +1103,6 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_GEN(dev_priv) >= 8) {
@@ -1010,14 +1114,15 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(DMA_FADD_I8XX);
ee->ipeir = I915_READ(IPEIR);
ee->ipehr = I915_READ(IPEHR);
- ee->instdone = I915_READ(GEN2_INSTDONE);
}
+ intel_engine_get_instdone(engine, &ee->instdone);
+
ee->waiting = intel_engine_has_waiter(engine);
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine);
- ee->last_seqno = engine->last_submitted_seqno;
+ ee->last_seqno = intel_engine_last_submit(engine);
ee->start = I915_READ_START(engine);
ee->head = I915_READ_HEAD(engine);
ee->tail = I915_READ_TAIL(engine);
@@ -1079,6 +1184,20 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
}
}
+static void record_request(struct drm_i915_gem_request *request,
+ struct drm_i915_error_request *erq)
+{
+ erq->context = request->ctx->hw_id;
+ erq->seqno = request->global_seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->head = request->head;
+ erq->tail = request->tail;
+
+ rcu_read_lock();
+ erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ rcu_read_unlock();
+}
+
static void engine_record_requests(struct intel_engine_cs *engine,
struct drm_i915_gem_request *first,
struct drm_i915_error_engine *ee)
@@ -1088,7 +1207,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link)
+ list_for_each_entry_from(request, &engine->timeline->requests, link)
count++;
if (!count)
return;
@@ -1101,9 +1220,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link) {
- struct drm_i915_error_request *erq;
-
+ list_for_each_entry_from(request, &engine->timeline->requests, link) {
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@@ -1123,19 +1240,22 @@ static void engine_record_requests(struct intel_engine_cs *engine,
break;
}
- erq = &ee->requests[count++];
- erq->seqno = request->fence.seqno;
- erq->jiffies = request->emitted_jiffies;
- erq->head = request->head;
- erq->tail = request->tail;
-
- rcu_read_lock();
- erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
- rcu_read_unlock();
+ record_request(request, &ee->requests[count++]);
}
ee->num_requests = count;
}
+static void error_record_engine_execlists(struct intel_engine_cs *engine,
+ struct drm_i915_error_engine *ee)
+{
+ unsigned int n;
+
+ for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+ if (engine->execlist_port[n].request)
+ record_request(engine->execlist_port[n].request,
+ &ee->execlist[n]);
+}
+
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
@@ -1146,20 +1266,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_error_object_create(dev_priv, dev_priv->semaphore);
for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
+ struct intel_engine_cs *engine = dev_priv->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct drm_i915_gem_request *request;
ee->pid = -1;
ee->engine_id = -1;
- if (!intel_engine_initialized(engine))
+ if (!engine)
continue;
ee->engine_id = i;
error_record_engine_registers(error, engine, ee);
error_record_engine_waiters(engine, ee);
+ error_record_engine_execlists(engine, ee);
request = i915_gem_find_active_request(engine);
if (request) {
@@ -1202,6 +1323,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
error->simulated |=
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
+ ee->rq_head = request->head;
+ ee->rq_post = request->postfix;
+ ee->rq_tail = request->tail;
+
ring = request->ring;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
@@ -1302,11 +1427,21 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo = bo;
}
+static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ /* Capturing log buf contents won't be useful if logging was disabled */
+ if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
+ return;
+
+ error->guc_log = i915_error_object_create(dev_priv,
+ dev_priv->guc.log.vma);
+}
+
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* General organization
@@ -1318,62 +1453,60 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
*/
/* 1: Registers specific to a single generation */
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv)) {
error->gtier[0] = I915_READ(GTIER);
error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
}
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
error->err_int = I915_READ(GEN7_ERR_INT);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
error->forcewake = I915_READ_FW(FORCEWAKE_MT);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
error->derrmr = I915_READ(DERRMR);
error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
}
/* 3: Feature specific registers */
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
error->gac_eco = I915_READ(GAC_ECO_BITS);
}
/* 4: Everything else */
- if (HAS_HW_CONTEXTS(dev))
+ if (HAS_HW_CONTEXTS(dev_priv))
error->ccid = I915_READ(CCID);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
error->ier = I915_READ16(IER);
- } else if (!IS_VALLEYVIEW(dev)) {
+ } else if (!IS_VALLEYVIEW(dev_priv)) {
error->ier = I915_READ(IER);
}
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
-
- i915_get_extra_instdone(dev_priv, error->extra_instdone);
}
static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
@@ -1418,6 +1551,32 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
sizeof(error->device_info));
}
+static int capture(void *data)
+{
+ struct drm_i915_error_state *error = data;
+
+ i915_capture_gen_state(error->i915, error);
+ i915_capture_reg_state(error->i915, error);
+ i915_gem_record_fences(error->i915, error);
+ i915_gem_record_rings(error->i915, error);
+ i915_capture_active_buffers(error->i915, error);
+ i915_capture_pinned_buffers(error->i915, error);
+ i915_gem_capture_guc_log_buffer(error->i915, error);
+
+ do_gettimeofday(&error->time);
+ error->boottime = ktime_to_timeval(ktime_get_boottime());
+ error->uptime =
+ ktime_to_timeval(ktime_sub(ktime_get(),
+ error->i915->gt.last_init_time));
+
+ error->overlay = intel_overlay_capture_error_state(error->i915);
+ error->display = intel_display_capture_error_state(error->i915);
+
+ return 0;
+}
+
+#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -1435,6 +1594,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error;
unsigned long flags;
+ if (!i915.error_capture)
+ return;
+
if (READ_ONCE(dev_priv->gpu_error.first_error))
return;
@@ -1446,18 +1608,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
}
kref_init(&error->ref);
+ error->i915 = dev_priv;
- i915_capture_gen_state(dev_priv, error);
- i915_capture_reg_state(dev_priv, error);
- i915_gem_record_fences(dev_priv, error);
- i915_gem_record_rings(dev_priv, error);
- i915_capture_active_buffers(dev_priv, error);
- i915_capture_pinned_buffers(dev_priv, error);
-
- do_gettimeofday(&error->time);
-
- error->overlay = intel_overlay_capture_error_state(dev_priv);
- error->display = intel_display_capture_error_state(dev_priv);
+ stop_machine(capture, error, NULL);
i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
@@ -1476,7 +1629,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
return;
}
- if (!warned) {
+ if (!warned &&
+ ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
@@ -1497,7 +1651,6 @@ void i915_error_state_get(struct drm_device *dev,
if (error_priv->error)
kref_get(&error_priv->error->ref);
spin_unlock_irq(&dev_priv->gpu_error.lock);
-
}
void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
@@ -1519,33 +1672,3 @@ void i915_destroy_error_state(struct drm_device *dev)
if (error)
kref_put(&error->ref, i915_error_state_free);
}
-
-const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
-{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
- case I915_CACHE_L3_LLC: return " L3+LLC";
- case I915_CACHE_WT: return " WT";
- default: return "";
- }
-}
-
-/* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
- uint32_t *instdone)
-{
- memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
- if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
- instdone[0] = I915_READ(GEN2_INSTDONE);
- else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
- instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
- instdone[1] = I915_READ(GEN4_INSTDONE1);
- } else if (INTEL_GEN(dev_priv) >= 7) {
- instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
- instdone[1] = I915_READ(GEN7_SC_INSTDONE);
- instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 3106dcc06fe9..4462112725ef 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -23,6 +23,8 @@
*/
#include <linux/firmware.h>
#include <linux/circ_buf.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
#include "i915_drv.h"
#include "intel_guc.h"
@@ -85,6 +87,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
if (WARN_ON(len < 1 || len > 15))
return -EINVAL;
+ mutex_lock(&guc->action_lock);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
dev_priv->guc.action_count += 1;
@@ -123,6 +126,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
dev_priv->guc.action_status = status;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&guc->action_lock);
return ret;
}
@@ -170,6 +174,35 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
+static int host2guc_logbuffer_flush_complete(struct intel_guc *guc)
+{
+ u32 data[1];
+
+ data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE;
+
+ return host2guc_action(guc, data, 1);
+}
+
+static int host2guc_force_logbuffer_flush(struct intel_guc *guc)
+{
+ u32 data[2];
+
+ data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH;
+ data[1] = 0;
+
+ return host2guc_action(guc, data, 2);
+}
+
+static int host2guc_logging_control(struct intel_guc *guc, u32 control_val)
+{
+ u32 data[2];
+
+ data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING;
+ data[1] = control_val;
+
+ return host2guc_action(guc, data, 2);
+}
+
/*
* Initialise, update, or clear doorbell data shared with the GuC
*
@@ -187,7 +220,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
struct guc_context_desc desc;
size_t len;
- doorbell = client->client_base + client->doorbell_offset;
+ doorbell = client->vaddr + client->doorbell_offset;
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
test_bit(client->doorbell_id, doorbell_bitmap)) {
@@ -293,7 +326,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
{
struct guc_process_desc *desc;
- desc = client->client_base + client->proc_desc_offset;
+ desc = client->vaddr + client->proc_desc_offset;
memset(desc, 0, sizeof(*desc));
@@ -380,8 +413,8 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
gfx_addr = i915_ggtt_offset(client->vma);
desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
- desc.db_trigger_cpu = (uintptr_t)client->client_base +
- client->doorbell_offset;
+ desc.db_trigger_cpu =
+ (uintptr_t)client->vaddr + client->doorbell_offset;
desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
desc.process_desc = gfx_addr + client->proc_desc_offset;
desc.wq_addr = gfx_addr + client->wq_offset;
@@ -432,7 +465,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *gc = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
+ struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
u32 freespace;
int ret;
@@ -473,10 +506,9 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
struct intel_engine_cs *engine = rq->engine;
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
- void *base;
- u32 freespace, tail, wq_off, wq_page;
+ u32 freespace, tail, wq_off;
- desc = gc->client_base + gc->proc_desc_offset;
+ desc = gc->vaddr + gc->proc_desc_offset;
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
@@ -506,10 +538,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
gc->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
- wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
- wq_off &= PAGE_SIZE - 1;
- base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
- wqi = (struct guc_wq_item *)((char *)base + wq_off);
+ wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
@@ -521,9 +550,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = rq->fence.seqno;
-
- kunmap_atomic(base);
+ wqi->fence_id = rq->global_seqno;
}
static int guc_ring_doorbell(struct i915_guc_client *gc)
@@ -533,7 +560,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
- desc = gc->client_base + gc->proc_desc_offset;
+ desc = gc->vaddr + gc->proc_desc_offset;
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
@@ -549,7 +576,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = gc->client_base + gc->doorbell_offset;
+ db = gc->vaddr + gc->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@@ -601,13 +628,31 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
*/
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
- unsigned int engine_id = rq->engine->id;
+ struct drm_i915_private *dev_priv = rq->i915;
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned int engine_id = engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
int b_ret;
+ /* We keep the previous context alive until we retire the following
+ * request. This ensures that any the context object is still pinned
+ * for any residual writes the HW makes into it on the context switch
+ * into the next object following the breadcrumb. Otherwise, we may
+ * retire the context too early.
+ */
+ rq->previous_context = engine->last_context;
+ engine->last_context = rq->ctx;
+
+ i915_gem_request_submit(rq);
+
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
+
+ /* WA to flush out the pending GMADR writes to ring buffer. */
+ if (i915_vma_is_map_and_fenceable(rq->ring->vma))
+ POSTING_READ_FW(GUC_STATUS);
+
b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
@@ -616,7 +661,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
client->b_fail += 1;
guc->submissions[engine_id] += 1;
- guc->last_seqno[engine_id] = rq->fence.seqno;
+ guc->last_seqno[engine_id] = rq->global_seqno;
spin_unlock(&client->wq_lock);
}
@@ -685,14 +730,14 @@ guc_client_free(struct drm_i915_private *dev_priv,
* Be sure to drop any locks
*/
- if (client->client_base) {
+ if (client->vaddr) {
/*
* If we got as far as setting up a doorbell, make sure we
* shut it down before unmapping & deallocating the memory.
*/
guc_disable_doorbell(guc, client);
- kunmap(kmap_to_page(client->client_base));
+ i915_gem_object_unpin_map(client->vma->obj);
}
i915_vma_unpin_and_release(&client->vma);
@@ -781,6 +826,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
struct i915_guc_client *client;
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
+ void *vaddr;
uint16_t db_id;
client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -807,7 +853,12 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->vma = vma;
- client->client_base = kmap(i915_vma_first_page(vma));
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ goto err;
+
+ client->vaddr = vaddr;
spin_lock_init(&client->wq_lock);
client->wq_offset = GUC_DB_SIZE;
@@ -847,15 +898,411 @@ err:
return NULL;
}
+/*
+ * Sub buffer switch callback. Called whenever relay has to switch to a new
+ * sub buffer, relay stays on the same sub buffer if 0 is returned.
+ */
+static int subbuf_start_callback(struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ /* Use no-overwrite mode by default, where relay will stop accepting
+ * new data if there are no empty sub buffers left.
+ * There is no strict synchronization enforced by relay between Consumer
+ * and Producer. In overwrite mode, there is a possibility of getting
+ * inconsistent/garbled data, the producer could be writing on to the
+ * same sub buffer from which Consumer is reading. This can't be avoided
+ * unless Consumer is fast enough and can always run in tandem with
+ * Producer.
+ */
+ if (relay_buf_full(buf))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * file_create() callback. Creates relay file in debugfs.
+ */
+static struct dentry *create_buf_file_callback(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ /* This to enable the use of a single buffer for the relay channel and
+ * correspondingly have a single file exposed to User, through which
+ * it can collect the logs in order without any post-processing.
+ * Need to set 'is_global' even if parent is NULL for early logging.
+ */
+ *is_global = 1;
+
+ if (!parent)
+ return NULL;
+
+ /* Not using the channel filename passed as an argument, since for each
+ * channel relay appends the corresponding CPU number to the filename
+ * passed in relay_open(). This should be fine as relay just needs a
+ * dentry of the file associated with the channel buffer and that file's
+ * name need not be same as the filename passed as an argument.
+ */
+ buf_file = debugfs_create_file("guc_log", mode,
+ parent, buf, &relay_file_operations);
+ return buf_file;
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_callback(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+/* relay channel callbacks */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = subbuf_start_callback,
+ .create_buf_file = create_buf_file_callback,
+ .remove_buf_file = remove_buf_file_callback,
+};
+
+static void guc_log_remove_relay_file(struct intel_guc *guc)
+{
+ relay_close(guc->log.relay_chan);
+}
+
+static int guc_log_create_relay_channel(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+
+ /* Keep the size of sub buffers same as shared log buffer */
+ subbuf_size = guc->log.vma->obj->base.size;
+
+ /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
+
+ guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+ n_subbufs, &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+ return -ENOMEM;
+ }
+
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ guc->log.relay_chan = guc_log_relay_chan;
+ return 0;
+}
+
+static int guc_log_create_relay_file(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct dentry *log_dir;
+ int ret;
+
+ /* For now create the log file in /sys/kernel/debug/dri/0 dir */
+ log_dir = dev_priv->drm.primary->debugfs_root;
+
+ /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
+ * not mounted and so can't create the relay file.
+ * The relay API seems to fit well with debugfs only, for availing relay
+ * there are 3 requirements which can be met for debugfs file only in a
+ * straightforward/clean manner :-
+ * i) Need the associated dentry pointer of the file, while opening the
+ * relay channel.
+ * ii) Should be able to use 'relay_file_operations' fops for the file.
+ * iii) Set the 'i_private' field of file's inode to the pointer of
+ * relay channel buffer.
+ */
+ if (!log_dir) {
+ DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
+ return -ENODEV;
+ }
+
+ ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
+ if (ret) {
+ DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void guc_move_to_next_buf(struct intel_guc *guc)
+{
+ /* Make sure the updates made in the sub buffer are visible when
+ * Consumer sees the following update to offset inside the sub buffer.
+ */
+ smp_wmb();
+
+ /* All data has been written, so now move the offset of sub buffer. */
+ relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+
+ /* Switch to the next sub buffer */
+ relay_flush(guc->log.relay_chan);
+}
+
+static void *guc_get_write_buffer(struct intel_guc *guc)
+{
+ if (!guc->log.relay_chan)
+ return NULL;
+
+ /* Just get the base address of a new sub buffer and copy data into it
+ * ourselves. NULL will be returned in no-overwrite mode, if all sub
+ * buffers are full. Could have used the relay_write() to indirectly
+ * copy the data, but that would have been bit convoluted, as we need to
+ * write to only certain locations inside a sub buffer which cannot be
+ * done without using relay_reserve() along with relay_write(). So its
+ * better to use relay_reserve() alone.
+ */
+ return relay_reserve(guc->log.relay_chan, 0);
+}
+
+static bool
+guc_check_log_buf_overflow(struct intel_guc *guc,
+ enum guc_log_buffer_type type, unsigned int full_cnt)
+{
+ unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+ bool overflow = false;
+
+ if (full_cnt != prev_full_cnt) {
+ overflow = true;
+
+ guc->log.prev_overflow_count[type] = full_cnt;
+ guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+
+ if (full_cnt < prev_full_cnt) {
+ /* buffer_full_cnt is a 4 bit counter */
+ guc->log.total_overflow_count[type] += 16;
+ }
+ DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+ }
+
+ return overflow;
+}
+
+static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+ case GUC_DPC_LOG_BUFFER:
+ return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+ default:
+ MISSING_CASE(type);
+ }
+
+ return 0;
+}
+
+static void guc_read_update_log_buffer(struct intel_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
+ struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
+ struct guc_log_buffer_state log_buf_state_local;
+ enum guc_log_buffer_type type;
+ void *src_data, *dst_data;
+ bool new_overflow;
+
+ if (WARN_ON(!guc->log.buf_addr))
+ return;
+
+ /* Get the pointer to shared GuC log buffer */
+ log_buf_state = src_data = guc->log.buf_addr;
+
+ /* Get the pointer to local buffer to store the logs */
+ log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+
+ /* Actual logs are present from the 2nd page */
+ src_data += PAGE_SIZE;
+ dst_data += PAGE_SIZE;
+
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ /* Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state,
+ sizeof(struct guc_log_buffer_state));
+ buffer_size = guc_get_log_buffer_size(type);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_cnt = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
+ new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+
+ /* Update the state of shared log buffer */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ log_buf_state++;
+
+ if (unlikely(!log_buf_snapshot_state))
+ continue;
+
+ /* First copy the state structure in snapshot buffer */
+ memcpy(log_buf_snapshot_state, &log_buf_state_local,
+ sizeof(struct guc_log_buffer_state));
+
+ /* The write pointer could have been updated by GuC firmware,
+ * after sending the flush interrupt to Host, for consistency
+ * set write pointer value to same value of sampled_write_ptr
+ * in the snapshot buffer.
+ */
+ log_buf_snapshot_state->write_ptr = write_offset;
+ log_buf_snapshot_state++;
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ DRM_ERROR("invalid log buffer state\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ /* Just copy the newly written data */
+ if (read_offset > write_offset) {
+ i915_memcpy_from_wc(dst_data, src_data, write_offset);
+ bytes_to_copy = buffer_size - read_offset;
+ } else {
+ bytes_to_copy = write_offset - read_offset;
+ }
+ i915_memcpy_from_wc(dst_data + read_offset,
+ src_data + read_offset, bytes_to_copy);
+
+ src_data += buffer_size;
+ dst_data += buffer_size;
+ }
+
+ if (log_buf_snapshot_state)
+ guc_move_to_next_buf(guc);
+ else {
+ /* Used rate limited to avoid deluge of messages, logs might be
+ * getting consumed by User at a slow rate.
+ */
+ DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+ guc->log.capture_miss_count++;
+ }
+}
+
+static void guc_capture_logs_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, guc.log.flush_work);
+
+ i915_guc_capture_logs(dev_priv);
+}
+
+static void guc_log_cleanup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* First disable the flush interrupt */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ if (guc->log.flush_wq)
+ destroy_workqueue(guc->log.flush_wq);
+
+ guc->log.flush_wq = NULL;
+
+ if (guc->log.relay_chan)
+ guc_log_remove_relay_file(guc);
+
+ guc->log.relay_chan = NULL;
+
+ if (guc->log.buf_addr)
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+
+ guc->log.buf_addr = NULL;
+}
+
+static int guc_log_create_extras(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ void *vaddr;
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* Nothing to do */
+ if (i915.guc_log_level < 0)
+ return 0;
+
+ if (!guc->log.buf_addr) {
+ /* Create a WC (Uncached for read) vmalloc mapping of log
+ * buffer pages, so that we can directly get the data
+ * (up-to-date) from memory.
+ */
+ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ return ret;
+ }
+
+ guc->log.buf_addr = vaddr;
+ }
+
+ if (!guc->log.relay_chan) {
+ /* Create a relay channel, so that we have buffers for storing
+ * the GuC firmware logs, the channel will be linked with a file
+ * later on when debugfs is registered.
+ */
+ ret = guc_log_create_relay_channel(guc);
+ if (ret)
+ return ret;
+ }
+
+ if (!guc->log.flush_wq) {
+ INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (guc->log.flush_wq == NULL) {
+ DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
static void guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
uint32_t size, flags;
- if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
- return;
-
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
@@ -865,8 +1312,18 @@ static void guc_log_create(struct intel_guc *guc)
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
- vma = guc->log_vma;
+ vma = guc->log.vma;
if (!vma) {
+ /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
+ /* logging will not be enabled */
+ i915.guc_log_level = -1;
+ return;
+ }
+
vma = guc_allocate_vma(guc, size);
if (IS_ERR(vma)) {
/* logging will be off */
@@ -874,7 +1331,14 @@ static void guc_log_create(struct intel_guc *guc)
return;
}
- guc->log_vma = vma;
+ guc->log.vma = vma;
+
+ if (guc_log_create_extras(guc)) {
+ guc_log_cleanup(guc);
+ i915_vma_unpin_and_release(&guc->log.vma);
+ i915.guc_log_level = -1;
+ return;
+ }
}
/* each allocated unit is a page */
@@ -884,7 +1348,37 @@ static void guc_log_create(struct intel_guc *guc)
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+static int guc_log_late_setup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (i915.guc_log_level < 0)
+ return -EINVAL;
+
+ /* If log_level was set as -1 at boot time, then setup needed to
+ * handle log buffer flush interrupts would not have been done yet,
+ * so do that now.
+ */
+ ret = guc_log_create_extras(guc);
+ if (ret)
+ goto err;
+
+ ret = guc_log_create_relay_file(guc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ guc_log_cleanup(guc);
+ /* logging will remain off */
+ i915.guc_log_level = -1;
+ return ret;
}
static void guc_policies_init(struct guc_policies *policies)
@@ -917,6 +1411,7 @@ static void guc_addon_create(struct intel_guc *guc)
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct page *page;
u32 size;
@@ -944,10 +1439,10 @@ static void guc_addon_create(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.ggtt_offset;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
@@ -960,7 +1455,7 @@ static void guc_addon_create(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
@@ -1005,6 +1500,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
guc->ctx_pool_vma = vma;
ida_init(&guc->ctx_ids);
+ mutex_init(&guc->action_lock);
guc_log_create(guc);
guc_addon_create(guc);
@@ -1014,9 +1510,10 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ struct drm_i915_gem_request *request;
struct i915_guc_client *client;
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request;
+ enum intel_engine_id id;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
@@ -1033,11 +1530,13 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = i915_guc_submit;
+ engine->schedule = NULL;
/* Replay the current set of previously submitted requests */
- list_for_each_entry(request, &engine->request_list, link) {
+ list_for_each_entry(request,
+ &engine->timeline->requests, link) {
client->wq_rsvd += sizeof(struct guc_wq_item);
if (i915_sw_fence_done(&request->submit))
i915_guc_submit(request);
@@ -1066,7 +1565,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc;
i915_vma_unpin_and_release(&guc->ads_vma);
- i915_vma_unpin_and_release(&guc->log_vma);
+ i915_vma_unpin_and_release(&guc->log.vma);
if (guc->ctx_pool_vma)
ida_destroy(&guc->ctx_ids);
@@ -1087,6 +1586,8 @@ int intel_guc_suspend(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
+ gen9_disable_guc_interrupts(dev_priv);
+
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
@@ -1113,6 +1614,9 @@ int intel_guc_resume(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
+ if (i915.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
@@ -1122,3 +1626,104 @@ int intel_guc_resume(struct drm_device *dev)
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
+
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
+{
+ guc_read_update_log_buffer(&dev_priv->guc);
+
+ /* Generally device is expected to be active only at this
+ * time, so get/put should be really quick.
+ */
+ intel_runtime_pm_get(dev_priv);
+ host2guc_logbuffer_flush_complete(&dev_priv->guc);
+ intel_runtime_pm_put(dev_priv);
+}
+
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+ return;
+
+ /* First disable the interrupts, will be renabled afterwards */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ /* Before initiating the forceful flush, wait for any pending/ongoing
+ * flush to complete otherwise forceful flush may not actually happen.
+ */
+ flush_work(&dev_priv->guc.log.flush_work);
+
+ /* Ask GuC to update the log buffer state */
+ host2guc_force_logbuffer_flush(&dev_priv->guc);
+
+ /* GuC would have updated log buffer by now, so capture it */
+ i915_guc_capture_logs(dev_priv);
+}
+
+void i915_guc_unregister(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_cleanup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+void i915_guc_register(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_late_setup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
+{
+ union guc_log_control log_param;
+ int ret;
+
+ log_param.value = control_val;
+
+ if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
+ log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
+ return -EINVAL;
+
+ /* This combination doesn't make sense & won't have any effect */
+ if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+ return 0;
+
+ ret = host2guc_logging_control(&dev_priv->guc, log_param.value);
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret);
+ return ret;
+ }
+
+ i915.guc_log_level = log_param.verbosity;
+
+ /* If log_level was set as -1 at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would not have
+ * been enabled.
+ */
+ if (!dev_priv->guc.log.relay_chan) {
+ ret = guc_log_late_setup(&dev_priv->guc);
+ if (!ret)
+ gen9_enable_guc_interrupts(dev_priv);
+ } else if (!log_param.logging_enabled) {
+ /* Once logging is disabled, GuC won't generate logs & send an
+ * interrupt. But there could be some data in the log buffer
+ * which is yet to be captured. So request GuC to update the log
+ * buffer state and then collect the left over logs.
+ */
+ i915_guc_flush_logs(dev_priv);
+
+ /* As logging is disabled, update log level to reflect that */
+ i915.guc_log_level = -1;
+ } else {
+ /* In case interrupts were disabled, enable them now */
+ gen9_enable_guc_interrupts(dev_priv);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3fc286cd1157..07ca71cabb2b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -170,6 +170,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
} while (0)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
@@ -303,18 +304,18 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- new_val = dev_priv->pm_irq_mask;
+ new_val = dev_priv->pm_imr;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->pm_irq_mask) {
- dev_priv->pm_irq_mask = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+ if (new_val != dev_priv->pm_imr) {
+ dev_priv->pm_imr = new_val;
+ I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
POSTING_READ(gen6_pm_imr(dev_priv));
}
}
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -322,28 +323,54 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
snb_update_pm_irq(dev_priv, mask, mask);
}
-static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t mask)
+static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
- __gen6_disable_pm_irq(dev_priv, mask);
+ __gen6_mask_pm_irq(dev_priv, mask);
}
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(reg, dev_priv->pm_rps_events);
- I915_WRITE(reg, dev_priv->pm_rps_events);
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ I915_WRITE(reg, reset_mask);
+ I915_WRITE(reg, reset_mask);
POSTING_READ(reg);
+}
+
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier |= enable_mask;
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ gen6_unmask_pm_irq(dev_priv, enable_mask);
+ /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
+}
+
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier &= ~disable_mask;
+ __gen6_mask_pm_irq(dev_priv, disable_mask);
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ /* though a barrier is missing here, but don't really need a one */
+}
+
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -357,8 +384,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
WARN_ON_ONCE(dev_priv->rps.pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
- dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -379,9 +404,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
- __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
- ~dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
synchronize_irq(dev_priv->drm.irq);
@@ -395,6 +418,38 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
gen6_reset_rps_interrupts(dev_priv);
}
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (!dev_priv->guc.interrupts_enabled) {
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
+ dev_priv->pm_guc_events);
+ dev_priv->guc.interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->guc.interrupts_enabled = false;
+
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
+
+ gen9_reset_guc_interrupts(dev_priv);
+}
+
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -670,8 +725,8 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
htotal = mode->crtc_htotal;
@@ -776,8 +831,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
@@ -912,21 +967,22 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *vblank_time,
unsigned flags)
{
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc;
- if (pipe >= INTEL_INFO(dev)->num_pipes) {
+ if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
/* Get drm_crtc to timestamp: */
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc == NULL) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
- if (!crtc->hwmode.crtc_clock) {
+ if (!crtc->base.hwmode.crtc_clock) {
DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
return -EBUSY;
}
@@ -934,7 +990,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
- &crtc->hwmode);
+ &crtc->base.hwmode);
}
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
@@ -1058,8 +1114,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
if (intel_engine_has_waiter(engine))
return true;
@@ -1084,7 +1141,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
client_boost = dev_priv->rps.client_boost;
dev_priv->rps.client_boost = false;
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1257,20 +1314,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[BCS]);
+ notify_ring(dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1323,11 +1380,13 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
- if (master_ctl & GEN8_GT_PM_IRQ) {
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
- if (gt_iir[2] & dev_priv->pm_rps_events) {
+ if (gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events)) {
I915_WRITE_FW(GEN8_GT_IIR(2),
- gt_iir[2] & dev_priv->pm_rps_events);
+ gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events));
ret = IRQ_HANDLED;
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
@@ -1340,25 +1399,28 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir[4])
{
if (gt_iir[0]) {
- gen8_cs_irq_handler(&dev_priv->engine[RCS],
+ gen8_cs_irq_handler(dev_priv->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[BCS],
+ gen8_cs_irq_handler(dev_priv->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
if (gt_iir[1]) {
- gen8_cs_irq_handler(&dev_priv->engine[VCS],
+ gen8_cs_irq_handler(dev_priv->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+ gen8_cs_irq_handler(dev_priv->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
if (gt_iir[3])
- gen8_cs_irq_handler(&dev_priv->engine[VECS],
+ gen8_cs_irq_handler(dev_priv->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
if (gt_iir[2] & dev_priv->pm_rps_events)
gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+
+ if (gt_iir[2] & dev_priv->pm_guc_events)
+ gen9_guc_irq_handler(dev_priv, gt_iir[2]);
}
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@@ -1585,7 +1647,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&dev_priv->rps.work);
@@ -1598,13 +1660,48 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VECS]);
+ notify_ring(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
}
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
+{
+ if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
+ /* Sample the log buffer flush related bits & clear them out now
+ * itself from the message identity register to minimize the
+ * probability of losing a flush interrupt, when there are back
+ * to back flush interrupts.
+ * There can be a new flush interrupt, for different log buffer
+ * type (like for ISR), whilst Host is handling one (for DPC).
+ * Since same bit is used in message register for ISR & DPC, it
+ * could happen that GuC sets the bit for 2nd interrupt but Host
+ * clears out the bit on handling the 1st interrupt.
+ */
+ u32 msg, flush;
+
+ msg = I915_READ(SOFT_SCRATCH(15));
+ flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
+ GUC2HOST_MSG_FLUSH_LOG_BUFFER);
+ if (flush) {
+ /* Clear the message bits that are handled */
+ I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
+
+ /* Handle flush interrupt in bottom half */
+ queue_work(dev_priv->guc.log.flush_wq,
+ &dev_priv->guc.log.flush_work);
+
+ dev_priv->guc.log.flush_interrupt_count++;
+ } else {
+ /* Not clearing of unhandled event bits won't result in
+ * re-triggering of the interrupt.
+ */
+ }
+ }
+}
+
static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -2407,7 +2504,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
pipe_name(pipe),
fault_errors);
}
@@ -2551,92 +2648,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
-static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
+static inline void
+i915_err_print_instdone(struct drm_i915_private *dev_priv,
+ struct intel_instdone *instdone)
{
- uint32_t instdone[I915_NUM_INSTDONE_REG];
- u32 eir = I915_READ(EIR);
- int pipe, i;
+ int slice;
+ int subslice;
+
+ pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
+
+ if (INTEL_GEN(dev_priv) <= 3)
+ return;
+
+ pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
- if (!eir)
+ if (INTEL_GEN(dev_priv) <= 6)
return;
- pr_err("render error detected, EIR: 0x%08x\n", eir);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->sampler[slice][subslice]);
- i915_get_extra_instdone(dev_priv, instdone);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->row[slice][subslice]);
+}
- if (IS_G4X(dev_priv)) {
- if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- if (eir & GM45_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+{
+ u32 eir;
- if (!IS_GEN2(dev_priv)) {
- if (eir & I915_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+ if (!IS_GEN2(dev_priv))
+ I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
- if (eir & I915_ERROR_MEMORY_REFRESH) {
- pr_err("memory refresh error:\n");
- for_each_pipe(dev_priv, pipe)
- pr_err("pipe %c stat: 0x%08x\n",
- pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
- /* pipestat has already been acked */
- }
- if (eir & I915_ERROR_INSTRUCTION) {
- pr_err("instruction error\n");
- pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- if (INTEL_GEN(dev_priv) < 4) {
- u32 ipeir = I915_READ(IPEIR);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
- I915_WRITE(IPEIR, ipeir);
- POSTING_READ(IPEIR);
- } else {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- }
+ if (INTEL_GEN(dev_priv) < 4)
+ I915_WRITE(IPEIR, I915_READ(IPEIR));
+ else
+ I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
- I915_WRITE(EIR, eir);
- POSTING_READ(EIR);
+ I915_WRITE(EIR, I915_READ(EIR));
eir = I915_READ(EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
- DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
@@ -2665,7 +2722,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
va_end(args);
i915_capture_error_state(dev_priv, engine_mask, error_msg);
- i915_report_and_clear_eir(dev_priv);
+ i915_clear_error_registers(dev_priv);
if (!engine_mask)
return;
@@ -2694,45 +2751,40 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (INTEL_INFO(dev)->gen >= 4)
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
- else
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2753,38 +2805,36 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS |
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2798,411 +2848,14 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static bool
-ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
-{
- if (INTEL_GEN(engine->i915) >= 8) {
- return (ipehr >> 23) == 0x1c;
- } else {
- ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
- return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER);
- }
-}
-
-static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
- u64 offset)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
-
- if (INTEL_GEN(dev_priv) >= 8) {
- for_each_engine(signaller, dev_priv) {
- if (engine == signaller)
- continue;
-
- if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
- return signaller;
- }
- } else {
- u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
-
- for_each_engine(signaller, dev_priv) {
- if(engine == signaller)
- continue;
-
- if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
- return signaller;
- }
- }
-
- DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
- engine->name, ipehr, offset);
-
- return ERR_PTR(-ENODEV);
-}
-
-static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
+static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = engine->i915;
- void __iomem *vaddr;
- u32 cmd, ipehr, head;
- u64 offset = 0;
- int i, backwards;
-
- /*
- * This function does not support execlist mode - any attempt to
- * proceed further into this function will result in a kernel panic
- * when dereferencing ring->buffer, which is not set up in execlist
- * mode.
- *
- * The correct way of doing it would be to derive the currently
- * executing ring buffer from the current context, which is derived
- * from the currently running request. Unfortunately, to get the
- * current request we would have to grab the struct_mutex before doing
- * anything else, which would be ill-advised since some other thread
- * might have grabbed it already and managed to hang itself, causing
- * the hang checker to deadlock.
- *
- * Therefore, this function does not support execlist mode in its
- * current form. Just return NULL and move on.
- */
- if (engine->buffer == NULL)
- return NULL;
-
- ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine, ipehr))
- return NULL;
-
- /*
- * HEAD is likely pointing to the dword after the actual command,
- * so scan backwards until we find the MBOX. But limit it to just 3
- * or 4 dwords depending on the semaphore wait command size.
- * Note that we don't care about ACTHD here since that might
- * point at at batch, and semaphores are always emitted into the
- * ringbuffer itself.
- */
- head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
- vaddr = (void __iomem *)engine->buffer->vaddr;
-
- for (i = backwards; i; --i) {
- /*
- * Be paranoid and presume the hw has gone off into the wild -
- * our ring is smaller than what the hardware (and hence
- * HEAD_ADDR) allows. Also handles wrap-around.
- */
- head &= engine->buffer->size - 1;
-
- /* This here seems to blow up */
- cmd = ioread32(vaddr + head);
- if (cmd == ipehr)
- break;
-
- head -= 4;
- }
-
- if (!i)
- return NULL;
-
- *seqno = ioread32(vaddr + head + 4) + 1;
- if (INTEL_GEN(dev_priv) >= 8) {
- offset = ioread32(vaddr + head + 12);
- offset <<= 32;
- offset |= ioread32(vaddr + head + 8);
- }
- return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
-}
-
-static int semaphore_passed(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
- u32 seqno;
-
- engine->hangcheck.deadlock++;
-
- signaller = semaphore_waits_for(engine, &seqno);
- if (signaller == NULL)
- return -1;
-
- if (IS_ERR(signaller))
- return 0;
-
- /* Prevent pathological recursion due to driver bugs */
- if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
- return -1;
-
- if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
- return 1;
-
- /* cursory check for an unkickable deadlock */
- if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
- semaphore_passed(signaller) < 0)
- return -1;
-
- return 0;
-}
-
-static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
-
- for_each_engine(engine, dev_priv)
- engine->hangcheck.deadlock = 0;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
- u32 instdone[I915_NUM_INSTDONE_REG];
- bool stuck;
- int i;
-
- if (engine->id != RCS)
- return true;
-
- i915_get_extra_instdone(engine->i915, instdone);
-
- /* There might be unstable subunit states even when
- * actual head is not moving. Filter out the unstable ones by
- * accumulating the undone -> done transitions and only
- * consider those as progress.
- */
- stuck = true;
- for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
- const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
-
- if (tmp != engine->hangcheck.instdone[i])
- stuck = false;
-
- engine->hangcheck.instdone[i] |= tmp;
- }
-
- return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- if (acthd != engine->hangcheck.acthd) {
-
- /* Clear subunit states on head movement */
- memset(engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- return HANGCHECK_ACTIVE;
- }
-
- if (!subunits_stuck(engine))
- return HANGCHECK_ACTIVE;
-
- return HANGCHECK_HUNG;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- enum intel_engine_hangcheck_action ha;
- u32 tmp;
-
- ha = head_stuck(engine, acthd);
- if (ha != HANGCHECK_HUNG)
- return ha;
-
- if (IS_GEN2(dev_priv))
- return HANGCHECK_HUNG;
-
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- tmp = I915_READ_CTL(engine);
- if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, 0,
- "Kicking stuck wait on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- }
-
- if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
- switch (semaphore_passed(engine)) {
- default:
- return HANGCHECK_HUNG;
- case 1:
- i915_handle_error(dev_priv, 0,
- "Kicking stuck semaphore on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- case 0:
- return HANGCHECK_WAIT;
- }
- }
-
- return HANGCHECK_HUNG;
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void i915_hangcheck_elapsed(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- gpu_error.hangcheck_work.work);
- struct intel_engine_cs *engine;
- unsigned int hung = 0, stuck = 0;
- int busy_count = 0;
-#define BUSY 1
-#define KICK 5
-#define HUNG 20
-#define ACTIVE_DECAY 15
-
- if (!i915.enable_hangcheck)
- return;
-
- if (!READ_ONCE(dev_priv->gt.awake))
- return;
-
- /* As enabling the GPU requires fairly extensive mmio access,
- * periodically arm the mmio checker to see if we are triggering
- * any invalid access.
- */
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
-
- for_each_engine(engine, dev_priv) {
- bool busy = intel_engine_has_waiter(engine);
- u64 acthd;
- u32 seqno;
- u32 submit;
-
- semaphore_clear_deadlocks(dev_priv);
-
- /* We don't strictly need an irq-barrier here, as we are not
- * serving an interrupt request, be paranoid in case the
- * barrier has side-effects (such as preventing a broken
- * cacheline snoop) and so be sure that we can see the seqno
- * advance. If the seqno should stick, due to a stale
- * cacheline, we would erroneously declare the GPU hung.
- */
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- acthd = intel_engine_get_active_head(engine);
- seqno = intel_engine_get_seqno(engine);
- submit = READ_ONCE(engine->last_submitted_seqno);
-
- if (engine->hangcheck.seqno == seqno) {
- if (i915_seqno_passed(seqno, submit)) {
- engine->hangcheck.action = HANGCHECK_IDLE;
- } else {
- /* We always increment the hangcheck score
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, add a small increment to the
- * score so that we can catch a batch that is
- * being repeatedly kicked and so responsible
- * for stalling the machine.
- */
- engine->hangcheck.action =
- engine_stuck(engine, acthd);
-
- switch (engine->hangcheck.action) {
- case HANGCHECK_IDLE:
- case HANGCHECK_WAIT:
- break;
- case HANGCHECK_ACTIVE:
- engine->hangcheck.score += BUSY;
- break;
- case HANGCHECK_KICK:
- engine->hangcheck.score += KICK;
- break;
- case HANGCHECK_HUNG:
- engine->hangcheck.score += HUNG;
- break;
- }
- }
-
- if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
- hung |= intel_engine_flag(engine);
- if (engine->hangcheck.action != HANGCHECK_HUNG)
- stuck |= intel_engine_flag(engine);
- }
- } else {
- engine->hangcheck.action = HANGCHECK_ACTIVE;
-
- /* Gradually reduce the count so that we catch DoS
- * attempts across multiple batches.
- */
- if (engine->hangcheck.score > 0)
- engine->hangcheck.score -= ACTIVE_DECAY;
- if (engine->hangcheck.score < 0)
- engine->hangcheck.score = 0;
-
- /* Clear head and subunit states on seqno movement */
- acthd = 0;
-
- memset(engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
- }
-
- engine->hangcheck.seqno = seqno;
- engine->hangcheck.acthd = acthd;
- busy_count += busy;
- }
-
- if (hung) {
- char msg[80];
- unsigned int tmp;
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "No progress" : "Hang");
- for_each_engine_masked(engine, dev_priv, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return i915_handle_error(dev_priv, hung, msg);
- }
-
- /* Reset timer in case GPU hangs without another request being added */
- if (busy_count)
- i915_queue_hangcheck(dev_priv);
-}
-
-static void ibx_irq_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
GEN5_IRQ_RESET(SDE);
- if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+ if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
}
@@ -3218,7 +2871,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
WARN_ON(I915_READ(SDEIER) != 0);
@@ -3226,12 +2879,10 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_reset(struct drm_device *dev)
+static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
GEN5_IRQ_RESET(GT);
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
GEN5_IRQ_RESET(GEN6_PM);
}
@@ -3293,12 +2944,12 @@ static void ironlake_irq_reset(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
GEN5_IRQ_RESET(DE);
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
- ibx_irq_reset(dev);
+ ibx_irq_reset(dev_priv);
}
static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -3308,7 +2959,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3343,8 +2994,8 @@ static void gen8_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_DE_MISC_);
GEN5_IRQ_RESET(GEN8_PCU_);
- if (HAS_PCH_SPLIT(dev))
- ibx_irq_reset(dev);
+ if (HAS_PCH_SPLIT(dev_priv))
+ ibx_irq_reset(dev_priv);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3532,10 +3183,10 @@ static void ibx_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
- if (HAS_PCH_IBX(dev))
+ if (HAS_PCH_IBX(dev_priv))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
@@ -3552,14 +3203,14 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_DPF(dev)) {
+ if (HAS_L3_DPF(dev_priv)) {
/* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
- gt_irqs |= GT_PARITY_ERROR(dev);
+ dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
+ gt_irqs |= GT_PARITY_ERROR(dev_priv);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -3567,16 +3218,18 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_VEBOX(dev))
+ if (HAS_VEBOX(dev_priv)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+ dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ }
- dev_priv->pm_irq_mask = 0xffffffff;
- GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
+ dev_priv->pm_imr = 0xffffffff;
+ GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
@@ -3585,7 +3238,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 display_mask, extra_mask;
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
@@ -3616,7 +3269,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_postinstall(dev);
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev_priv)) {
/* Enable PCU event interrupts
*
* spinlocking not required here for correctness since interrupt
@@ -3696,14 +3349,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_L3_DPF(dev_priv))
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- dev_priv->pm_irq_mask = 0xffffffff;
+ dev_priv->pm_ier = 0x0;
+ dev_priv->pm_imr = ~dev_priv->pm_ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled.
+ * is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -3756,13 +3410,13 @@ static int gen8_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_pre_postinstall(dev);
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -3808,7 +3462,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3971,7 +3625,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4020,7 +3674,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4054,7 +3708,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_USER_INTERRUPT;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
@@ -4168,7 +3822,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4222,7 +3876,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4400,9 +4054,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4487,6 +4141,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ if (HAS_GUC_SCHED(dev_priv))
+ dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
@@ -4508,9 +4165,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->gen >= 8)
dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
- INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
- i915_hangcheck_elapsed);
-
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
@@ -4539,16 +4193,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
dev->driver->irq_uninstall = cherryview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
dev->driver->irq_uninstall = valleyview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
@@ -4557,13 +4211,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
+ else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
@@ -4577,21 +4231,25 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_GEN3(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
}
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- dev->driver->enable_vblank = i915_enable_vblank;
- dev->driver->disable_vblank = i915_disable_vblank;
}
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 768ad89d9cd4..d46ffe7086bc 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -39,7 +39,7 @@ struct i915_params i915 __read_mostly = {
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = -1,
- .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+ .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT),
.disable_power_well = -1,
.enable_ips = 1,
.fastboot = 0,
@@ -47,6 +47,7 @@ struct i915_params i915 __read_mostly = {
.load_detect_test = 0,
.force_reset_modeset_test = 0,
.reset = true,
+ .error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 1,
@@ -115,6 +116,14 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+module_param_named(error_capture, i915.error_capture, bool, 0600);
+MODULE_PARM_DESC(error_capture,
+ "Record the GPU state following a hang. "
+ "This information in /sys/class/drm/card<N>/error is vital for "
+ "triaging and debugging hangs.");
+#endif
+
module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
@@ -136,9 +145,10 @@ MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
-module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
-MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support.");
+module_param_named_unsafe(alpha_support, i915.alpha_support, int, 0400);
+MODULE_PARM_DESC(alpha_support,
+ "Enable alpha quality driver support for latest hardware. "
+ "See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
MODULE_PARM_DESC(disable_power_well,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 3a0dd78ddb38..817ad959941e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -40,7 +40,7 @@ struct i915_params {
int enable_ppgtt;
int enable_execlists;
int enable_psr;
- unsigned int preliminary_hw_support;
+ unsigned int alpha_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
@@ -59,6 +59,7 @@ struct i915_params {
bool load_detect_test;
bool force_reset_modeset_test;
bool reset;
+ bool error_capture;
bool disable_display;
bool verbose_state_checks;
bool nuclear_pageflip;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 687c768833b3..fce8e198bc76 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -288,7 +288,8 @@ static const struct intel_device_info intel_haswell_info = {
#define BDW_FEATURES \
HSW_FEATURES, \
BDW_COLORS, \
- .has_logical_ring_contexts = 1
+ .has_logical_ring_contexts = 1, \
+ .has_64bit_reloc = 1
static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES,
@@ -308,6 +309,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_cherryview = 1,
+ .has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
@@ -347,6 +349,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
+ .has_64bit_reloc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
@@ -360,6 +363,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hw_contexts = 1,
.has_logical_ring_contexts = 1,
.has_guc = 1,
+ .has_decoupled_mmio = 1,
.ddb_size = 512,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -431,17 +435,15 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-extern int i915_driver_load(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
- DRM_INFO("This hardware requires preliminary hardware support.\n"
- "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
+ DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
+ "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
+ "to enable support in this kernel version, or check for kernel updates.\n");
return -ENODEV;
}
@@ -463,8 +465,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return i915_driver_load(pdev, ent);
}
-extern void i915_driver_unload(struct drm_device *dev);
-
static void i915_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -473,8 +473,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
drm_dev_unref(dev);
}
-extern const struct dev_pm_ops i915_pm_ops;
-
static struct pci_driver i915_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 70d96162def6..c70c07a7b586 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -86,8 +86,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DEVEN 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
-#define BSM 0x5c
-#define BSM_MASK (0xFFFF << 20)
+/* BSM in include/drm/i915_drm.h */
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
@@ -831,96 +830,7 @@ enum skl_disp_power_wells {
#define CCK_FREQUENCY_STATUS_SHIFT 8
#define CCK_FREQUENCY_VALUES (0x1f << 0)
-/**
- * DOC: DPIO
- *
- * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
- * ports. DPIO is the name given to such a display PHY. These PHYs
- * don't follow the standard programming model using direct MMIO
- * registers, and instead their registers must be accessed trough IOSF
- * sideband. VLV has one such PHY for driving ports B and C, and CHV
- * adds another PHY for driving port D. Each PHY responds to specific
- * IOSF-SB port.
- *
- * Each display PHY is made up of one or two channels. Each channel
- * houses a common lane part which contains the PLL and other common
- * logic. CH0 common lane also contains the IOSF-SB logic for the
- * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
- * must be running when any DPIO registers are accessed.
- *
- * In addition to having their own registers, the PHYs are also
- * controlled through some dedicated signals from the display
- * controller. These include PLL reference clock enable, PLL enable,
- * and CRI clock selection, for example.
- *
- * Eeach channel also has two splines (also called data lanes), and
- * each spline is made up of one Physical Access Coding Sub-Layer
- * (PCS) block and two TX lanes. So each channel has two PCS blocks
- * and four TX lanes. The TX lanes are used as DP lanes or TMDS
- * data/clock pairs depending on the output type.
- *
- * Additionally the PHY also contains an AUX lane with AUX blocks
- * for each channel. This is used for DP AUX communication, but
- * this fact isn't really relevant for the driver since AUX is
- * controlled from the display controller side. No DPIO registers
- * need to be accessed during AUX communication,
- *
- * Generally on VLV/CHV the common lane corresponds to the pipe and
- * the spline (PCS/TX) corresponds to the port.
- *
- * For dual channel PHY (VLV/CHV):
- *
- * pipe A == CMN/PLL/REF CH0
- *
- * pipe B == CMN/PLL/REF CH1
- *
- * port B == PCS/TX CH0
- *
- * port C == PCS/TX CH1
- *
- * This is especially important when we cross the streams
- * ie. drive port B with pipe B, or port C with pipe A.
- *
- * For single channel PHY (CHV):
- *
- * pipe C == CMN/PLL/REF CH0
- *
- * port D == PCS/TX CH0
- *
- * On BXT the entire PHY channel corresponds to the port. That means
- * the PLL is also now associated with the port rather than the pipe,
- * and so the clock needs to be routed to the appropriate transcoder.
- * Port A PLL is directly connected to transcoder EDP and port B/C
- * PLLs can be routed to any transcoder A/B/C.
- *
- * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT). ::
- *
- *
- * Dual channel PHY (VLV/CHV/BXT)
- * ---------------------------------
- * | CH0 | CH1 |
- * | CMN/PLL/REF | CMN/PLL/REF |
- * |---------------|---------------| Display PHY
- * | PCS01 | PCS23 | PCS01 | PCS23 |
- * |-------|-------|-------|-------|
- * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
- * ---------------------------------
- * | DDI0 | DDI1 | DP/HDMI ports
- * ---------------------------------
- *
- * Single channel PHY (CHV/BXT)
- * -----------------
- * | CH0 |
- * | CMN/PLL/REF |
- * |---------------| Display PHY
- * | PCS01 | PCS23 |
- * |-------|-------|
- * |TX0|TX1|TX2|TX3|
- * -----------------
- * | DDI2 | DP/HDMI port
- * -----------------
- */
+/* DPIO registers */
#define DPIO_DEVFN 0
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
@@ -1276,7 +1186,19 @@ enum skl_disp_power_wells {
#define DPIO_UPAR_SHIFT 30
/* BXT PHY registers */
-#define _BXT_PHY(phy, a, b) _MMIO_PIPE((phy), (a), (b))
+#define _BXT_PHY0_BASE 0x6C000
+#define _BXT_PHY1_BASE 0x162000
+#define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE)
+
+#define _BXT_PHY(phy, reg) \
+ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
+
+#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
+ (reg_ch1) - _BXT_PHY0_BASE))
+#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
@@ -1293,8 +1215,8 @@ enum skl_disp_power_wells {
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) _BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP)
+#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP)
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@@ -1314,18 +1236,18 @@ enum skl_disp_power_wells {
#define PORT_PLL_P2_SHIFT 8
#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
-#define BXT_PORT_PLL_EBB_0(port) _MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \
- _PORT_PLL_EBB_0_B, \
- _PORT_PLL_EBB_0_C)
+#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_0_B, \
+ _PORT_PLL_EBB_0_C)
#define _PORT_PLL_EBB_4_A 0x162038
#define _PORT_PLL_EBB_4_B 0x6C038
#define _PORT_PLL_EBB_4_C 0x6C344
#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
#define PORT_PLL_RECALIBRATE (1 << 14)
-#define BXT_PORT_PLL_EBB_4(port) _MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \
- _PORT_PLL_EBB_4_B, \
- _PORT_PLL_EBB_4_C)
+#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_4_B, \
+ _PORT_PLL_EBB_4_C)
#define _PORT_PLL_0_A 0x162100
#define _PORT_PLL_0_B 0x6C100
@@ -1356,57 +1278,56 @@ enum skl_disp_power_wells {
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
#define PORT_PLL_DCO_AMP(x) ((x)<<10)
-#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
- _PORT_PLL_0_B, \
- _PORT_PLL_0_C)
-#define BXT_PORT_PLL(port, idx) _MMIO(_PORT_PLL_BASE(port) + (idx) * 4)
+#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_0_B, \
+ _PORT_PLL_0_C)
+#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
+ (idx) * 4)
/* BXT PHY common lane registers */
#define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16)
#define PHY_RESERVED (1 << 7)
-#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
- _PORT_CL1CM_DW0_A)
+#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
- _PORT_CL1CM_DW9_A)
+#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
#define _PORT_CL1CM_DW10_A 0x162028
#define _PORT_CL1CM_DW10_BC 0x6C028
#define IREF1RC_OFFSET_SHIFT 8
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
- _PORT_CL1CM_DW10_A)
+#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
#define SUS_CLK_CONFIG 0x3
-#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
- _PORT_CL1CM_DW28_A)
+#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
#define _PORT_CL1CM_DW30_A 0x162078
#define _PORT_CL1CM_DW30_BC 0x6C078
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
-#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
- _PORT_CL1CM_DW30_A)
+#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
-/* Defined for PHY0 only */
-#define BXT_PORT_CL2CM_DW6_BC _MMIO(0x6C358)
+/* The spec defines this only for BXT PHY0, but lets assume that this
+ * would exist for PHY1 too if it had a second channel.
+ */
+#define _PORT_CL2CM_DW6_A 0x162358
+#define _PORT_CL2CM_DW6_BC 0x6C358
+#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
#define GRC_DONE (1 << 22)
-#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC, \
- _PORT_REF_DW3_A)
+#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
@@ -1417,15 +1338,13 @@ enum skl_disp_power_wells {
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
-#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC, \
- _PORT_REF_DW6_A)
+#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
#define _PORT_REF_DW8_A 0x1621A0
#define _PORT_REF_DW8_BC 0x6C1A0
#define GRC_DIS (1 << 15)
#define GRC_RDY_OVRD (1 << 1)
-#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC, \
- _PORT_REF_DW8_A)
+#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
/* BXT PHY PCS registers */
#define _PORT_PCS_DW10_LN01_A 0x162428
@@ -1434,12 +1353,13 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW10_GRP_A 0x162C28
#define _PORT_PCS_DW10_GRP_B 0x6CC28
#define _PORT_PCS_DW10_GRP_C 0x6CE28
-#define BXT_PORT_PCS_DW10_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \
- _PORT_PCS_DW10_LN01_B, \
- _PORT_PCS_DW10_LN01_C)
-#define BXT_PORT_PCS_DW10_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A, \
- _PORT_PCS_DW10_GRP_B, \
- _PORT_PCS_DW10_GRP_C)
+#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_LN01_B, \
+ _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_GRP_B, \
+ _PORT_PCS_DW10_GRP_C)
+
#define TX2_SWING_CALC_INIT (1 << 31)
#define TX1_SWING_CALC_INIT (1 << 30)
@@ -1454,15 +1374,15 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW12_GRP_C 0x6CE30
#define LANESTAGGER_STRAP_OVRD (1 << 6)
#define LANE_STAGGER_MASK 0x1F
-#define BXT_PORT_PCS_DW12_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \
- _PORT_PCS_DW12_LN01_B, \
- _PORT_PCS_DW12_LN01_C)
-#define BXT_PORT_PCS_DW12_LN23(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \
- _PORT_PCS_DW12_LN23_B, \
- _PORT_PCS_DW12_LN23_C)
-#define BXT_PORT_PCS_DW12_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \
- _PORT_PCS_DW12_GRP_B, \
- _PORT_PCS_DW12_GRP_C)
+#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN01_B, \
+ _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN23_B, \
+ _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_GRP_B, \
+ _PORT_PCS_DW12_GRP_C)
/* BXT PHY TX registers */
#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
@@ -1474,12 +1394,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW2_GRP_A 0x162D08
#define _PORT_TX_DW2_GRP_B 0x6CD08
#define _PORT_TX_DW2_GRP_C 0x6CF08
-#define BXT_PORT_TX_DW2_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW2_GRP_A, \
- _PORT_TX_DW2_GRP_B, \
- _PORT_TX_DW2_GRP_C)
-#define BXT_PORT_TX_DW2_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW2_LN0_A, \
- _PORT_TX_DW2_LN0_B, \
- _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_LN0_B, \
+ _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_GRP_B, \
+ _PORT_TX_DW2_GRP_C)
#define MARGIN_000_SHIFT 16
#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
#define UNIQ_TRANS_SCALE_SHIFT 8
@@ -1491,12 +1411,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW3_GRP_A 0x162D0C
#define _PORT_TX_DW3_GRP_B 0x6CD0C
#define _PORT_TX_DW3_GRP_C 0x6CF0C
-#define BXT_PORT_TX_DW3_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW3_GRP_A, \
- _PORT_TX_DW3_GRP_B, \
- _PORT_TX_DW3_GRP_C)
-#define BXT_PORT_TX_DW3_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW3_LN0_A, \
- _PORT_TX_DW3_LN0_B, \
- _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_LN0_B, \
+ _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_GRP_B, \
+ _PORT_TX_DW3_GRP_C)
#define SCALE_DCOMP_METHOD (1 << 26)
#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
@@ -1506,12 +1426,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW4_GRP_A 0x162D10
#define _PORT_TX_DW4_GRP_B 0x6CD10
#define _PORT_TX_DW4_GRP_C 0x6CF10
-#define BXT_PORT_TX_DW4_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW4_LN0_A, \
- _PORT_TX_DW4_LN0_B, \
- _PORT_TX_DW4_LN0_C)
-#define BXT_PORT_TX_DW4_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW4_GRP_A, \
- _PORT_TX_DW4_GRP_B, \
- _PORT_TX_DW4_GRP_C)
+#define BXT_PORT_TX_DW4_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_LN0_B, \
+ _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_GRP_B, \
+ _PORT_TX_DW4_GRP_C)
#define DEEMPH_SHIFT 24
#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
@@ -1520,10 +1440,10 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW14_LN0_C 0x6C938
#define LATENCY_OPTIM_SHIFT 30
#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
-#define BXT_PORT_TX_DW14_LN(port, lane) _MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A, \
- _PORT_TX_DW14_LN0_B, \
- _PORT_TX_DW14_LN0_C) + \
- _BXT_LANE_OFFSET(lane))
+#define BXT_PORT_TX_DW14_LN(phy, ch, lane) \
+ _MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B, \
+ _PORT_TX_DW14_LN0_C) + \
+ _BXT_LANE_OFFSET(lane))
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 _MMIO(0x4F074)
@@ -1605,6 +1525,7 @@ enum skl_disp_power_wells {
#define RING_HEAD(base) _MMIO((base)+0x34)
#define RING_START(base) _MMIO((base)+0x38)
#define RING_CTL(base) _MMIO((base)+0x3c)
+#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_SYNC_0(base) _MMIO((base)+0x40)
#define RING_SYNC_1(base) _MMIO((base)+0x44)
#define RING_SYNC_2(base) _MMIO((base)+0x48)
@@ -1708,7 +1629,11 @@ enum skl_disp_power_wells {
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
-#define I915_NUM_INSTDONE_REG 4
+#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
+#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
+#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
+#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
+#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
#define RING_IPEIR(base) _MMIO((base)+0x64)
#define RING_IPEHR(base) _MMIO((base)+0x68)
/*
@@ -2089,9 +2014,9 @@ enum skl_disp_power_wells {
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
-#define GT_PARITY_ERROR(dev) \
+#define GT_PARITY_ERROR(dev_priv) \
(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
- (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+ (IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
@@ -2184,8 +2109,9 @@ enum skl_disp_power_wells {
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
-#define FBC_STATUS2 _MMIO(0x43214)
-#define FBC_COMPRESSION_MASK 0x7ff
+#define FBC_STATUS2 _MMIO(0x43214)
+#define IVB_FBC_COMPRESSION_MASK 0x7ff
+#define BDW_FBC_COMPRESSION_MASK 0xfff
#define FBC_LL_SIZE (1536)
@@ -6011,6 +5937,7 @@ enum {
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
#define GEN8_GT_VECS_IRQ (1<<6)
+#define GEN8_GT_GUC_IRQ (1<<5)
#define GEN8_GT_PM_IRQ (1<<4)
#define GEN8_GT_VCS2_IRQ (1<<3)
#define GEN8_GT_VCS1_IRQ (1<<2)
@@ -6022,6 +5949,16 @@ enum {
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
+#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31)
+#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30)
+#define GEN9_GUC_DISPLAY_EVENT (1<<29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28)
+#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27)
+#define GEN9_GUC_DB_RING_EVENT (1<<26)
+#define GEN9_GUC_DMA_DONE_EVENT (1<<25)
+#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24)
+#define GEN9_GUC_NOTIFICATION_EVENT (1<<23)
+
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS1_IRQ_SHIFT 0
@@ -7327,6 +7264,10 @@ enum {
#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
+#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
+#define AUD_CONFIG_N(n) \
+ (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
+ (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
@@ -7350,6 +7291,13 @@ enum {
#define _HSW_AUD_MISC_CTRL_B 0x65110
#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
+#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
+#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
+#define HSW_AUD_M_CTS_ENABLE(pipe) _MMIO_PIPE(pipe, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
+#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
+#define AUD_CONFIG_M_MASK 0xfffff
+
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
@@ -7394,6 +7342,13 @@ enum {
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
+/* Decoupled MMIO register pair for kernel driver */
+#define GEN9_DECOUPLED_REG0_DW0 _MMIO(0xF00)
+#define GEN9_DECOUPLED_REG0_DW1 _MMIO(0xF04)
+#define GEN9_DECOUPLED_DW1_GO (1<<31)
+#define GEN9_DECOUPLED_PD_SHIFT 28
+#define GEN9_DECOUPLED_OP_SHIFT 24
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a0af170062b1..b0e1e7ca75da 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,35 +29,31 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration control */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* save FBC interval */
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
-static void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_global_disable(dev_priv);
/* restore FBC interval */
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev);
+ i915_redisable_vga(dev_priv);
}
int i915_save_state(struct drm_device *dev)
@@ -68,14 +64,14 @@ int i915_save_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_save_display(dev);
+ i915_save_display(dev_priv);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
@@ -114,15 +110,15 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
- i915_restore_display(dev);
+ i915_restore_display(dev_priv);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1e5cbc585ca2..147420ccf49c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -8,11 +8,13 @@
*/
#include <linux/slab.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/reservation.h>
#include "i915_sw_fence.h"
+#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
+
static DEFINE_SPINLOCK(i915_sw_fence_lock);
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
@@ -114,11 +116,14 @@ static void i915_sw_fence_await(struct i915_sw_fence *fence)
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
}
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key)
{
BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
- init_waitqueue_head(&fence->wait);
+ __init_waitqueue_head(&fence->wait, name, key);
kref_init(&fence->kref);
atomic_set(&fence->pending, 1);
fence->flags = (unsigned long)fn;
@@ -135,6 +140,8 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
list_del(&wq->task_list);
__i915_sw_fence_complete(wq->private, key);
i915_sw_fence_put(wq->private);
+ if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
+ kfree(wq);
return 0;
}
@@ -192,9 +199,9 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
return err;
}
-int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
- struct i915_sw_fence *signaler,
- wait_queue_t *wq)
+static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ wait_queue_t *wq, gfp_t gfp)
{
unsigned long flags;
int pending;
@@ -206,8 +213,22 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
+ pending = 0;
+ if (!wq) {
+ wq = kmalloc(sizeof(*wq), gfp);
+ if (!wq) {
+ if (!gfpflags_allow_blocking(gfp))
+ return -ENOMEM;
+
+ i915_sw_fence_wait(signaler);
+ return 0;
+ }
+
+ pending |= I915_SW_FENCE_FLAG_ALLOC;
+ }
+
INIT_LIST_HEAD(&wq->task_list);
- wq->flags = 0;
+ wq->flags = pending;
wq->func = i915_sw_fence_wake;
wq->private = i915_sw_fence_get(fence);
@@ -226,49 +247,64 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return pending;
}
-struct dma_fence_cb {
- struct fence_cb base;
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ wait_queue_t *wq)
+{
+ return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
+}
+
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ gfp_t gfp)
+{
+ return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
+}
+
+struct i915_sw_dma_fence_cb {
+ struct dma_fence_cb base;
struct i915_sw_fence *fence;
- struct fence *dma;
+ struct dma_fence *dma;
struct timer_list timer;
};
static void timer_i915_sw_fence_wake(unsigned long data)
{
- struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+ struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
cb->dma = NULL;
i915_sw_fence_commit(cb->fence);
cb->timer.function = NULL;
}
-static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+static void dma_i915_sw_fence_wake(struct dma_fence *dma,
+ struct dma_fence_cb *data)
{
- struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
del_timer_sync(&cb->timer);
if (cb->timer.function)
i915_sw_fence_commit(cb->fence);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
kfree(cb);
}
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp)
{
- struct dma_fence_cb *cb;
+ struct i915_sw_dma_fence_cb *cb;
int ret;
- if (fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma))
return 0;
cb = kmalloc(sizeof(*cb), gfp);
@@ -276,7 +312,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
- return fence_wait(dma, false);
+ return dma_fence_wait(dma, false);
}
cb->fence = i915_sw_fence_get(fence);
@@ -287,11 +323,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
timer_i915_sw_fence_wake, (unsigned long)cb,
TIMER_IRQSAFE);
if (timeout) {
- cb->dma = fence_get(dma);
+ cb->dma = dma_fence_get(dma);
mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
}
- ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+ ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
if (ret == 0) {
ret = 1;
} else {
@@ -305,16 +341,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp)
{
- struct fence *excl;
+ struct dma_fence *excl;
int ret = 0, pending;
if (write) {
- struct fence **shared;
+ struct dma_fence **shared;
unsigned int count, i;
ret = reservation_object_get_fences_rcu(resv,
@@ -339,7 +375,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
}
for (i = 0; i < count; i++)
- fence_put(shared[i]);
+ dma_fence_put(shared[i]);
kfree(shared);
} else {
excl = reservation_object_get_excl_rcu(resv);
@@ -356,7 +392,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
ret |= pending;
}
- fence_put(excl);
+ dma_fence_put(excl);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 373141602ca4..0f3185ef7f4e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,8 +16,8 @@
#include <linux/wait.h>
struct completion;
-struct fence;
-struct fence_ops;
+struct dma_fence;
+struct dma_fence_ops;
struct reservation_object;
struct i915_sw_fence {
@@ -40,26 +40,54 @@ typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
enum i915_sw_fence_notify state);
#define __i915_sw_fence_call __aligned(4)
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key);
+#ifdef CONFIG_LOCKDEP
+#define i915_sw_fence_init(fence, fn) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __i915_sw_fence_init((fence), (fn), #fence, &__key); \
+} while (0)
+#else
+#define i915_sw_fence_init(fence, fn) \
+ __i915_sw_fence_init((fence), (fn), NULL, NULL)
+#endif
+
void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
wait_queue_t *wq);
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+ struct i915_sw_fence *after,
+ gfp_t gfp);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp);
+static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
+{
+ return atomic_read(&fence->pending) <= 0;
+}
+
static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
{
return atomic_read(&fence->pending) < 0;
}
+static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
+{
+ wait_event(fence->wait, i915_sw_fence_done(fence));
+}
+
#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 1012eeea1324..47590ab08d7e 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -514,6 +514,8 @@ static const struct attribute *vlv_attrs[] = {
NULL,
};
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -571,6 +573,21 @@ static struct bin_attribute error_state_attr = {
.write = error_state_write,
};
+static void i915_setup_error_capture(struct device *kdev)
+{
+ if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
+ DRM_ERROR("error_state sysfs setup failed\n");
+}
+
+static void i915_teardown_error_capture(struct device *kdev)
+{
+ sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+}
+#else
+static void i915_setup_error_capture(struct device *kdev) {}
+static void i915_teardown_error_capture(struct device *kdev) {}
+#endif
+
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@@ -617,17 +634,15 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
- ret = sysfs_create_bin_file(&kdev->kobj,
- &error_state_attr);
- if (ret)
- DRM_ERROR("error_state sysfs setup failed\n");
+ i915_setup_error_capture(kdev);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
- sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+ i915_teardown_error_capture(kdev);
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sysfs_remove_files(&kdev->kobj, vlv_attrs);
else
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 178798002a73..c5d210ebaa9a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -466,7 +466,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->i915->drm.primary->index;
__entry->sync_from = from->engine->id;
__entry->sync_to = to->engine->id;
- __entry->seqno = from->fence.seqno;
+ __entry->seqno = from->global_seqno;
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -489,9 +489,9 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
__entry->flags = flags;
- fence_enable_sw_signaling(&req->fence);
+ dma_fence_enable_sw_signaling(&req->fence);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -596,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
__entry->blocking =
mutex_is_locked(&req->i915->drm.struct_mutex);
),
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
new file mode 100644
index 000000000000..a792dcb902b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_vma.h"
+
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+#include "intel_frontbuffer.h"
+
+#include <drm/drm_gem.h>
+
+static void
+i915_vma_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *rq)
+{
+ const unsigned int idx = rq->engine->id;
+ struct i915_vma *vma =
+ container_of(active, struct i915_vma, last_read[idx]);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
+
+ i915_vma_clear_active(vma, idx);
+ if (i915_vma_is_active(vma))
+ return;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
+ WARN_ON(i915_vma_unbind(vma));
+
+ GEM_BUG_ON(!i915_gem_object_is_active(obj));
+ if (--obj->active_count)
+ return;
+
+ /* Bump our place on the bound list to keep it roughly in LRU order
+ * so that we don't steal from recently used but inactive objects
+ * (unless we are forced to ofc!)
+ */
+ if (obj->bind_count)
+ list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+
+ obj->mm.dirty = true; /* be paranoid */
+
+ if (i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_clear_active_reference(obj);
+ i915_gem_object_put(obj);
+ }
+}
+
+static struct i915_vma *
+__i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_vma *vma;
+ struct rb_node *rb, **p;
+ int i;
+
+ GEM_BUG_ON(vm->closed);
+
+ vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->exec_list);
+ for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+ init_request_active(&vma->last_read[i], i915_vma_retire);
+ init_request_active(&vma->last_fence, NULL);
+ list_add(&vma->vm_link, &vm->unbound_list);
+ vma->vm = vm;
+ vma->obj = obj;
+ vma->size = obj->base.size;
+
+ if (view) {
+ vma->ggtt_view = *view;
+ if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ vma->size = view->params.partial.size;
+ vma->size <<= PAGE_SHIFT;
+ } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ vma->size =
+ intel_rotation_info_size(&view->params.rotated);
+ vma->size <<= PAGE_SHIFT;
+ }
+ }
+
+ if (i915_is_ggtt(vm)) {
+ vma->flags |= I915_VMA_GGTT;
+ list_add(&vma->obj_link, &obj->vma_list);
+ } else {
+ i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+ list_add_tail(&vma->obj_link, &obj->vma_list);
+ }
+
+ rb = NULL;
+ p = &obj->vma_tree.rb_node;
+ while (*p) {
+ struct i915_vma *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, struct i915_vma, obj_node);
+ if (i915_vma_compare(pos, vm, view) < 0)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&vma->obj_node, rb, p);
+ rb_insert_color(&vma->obj_node, &obj->vma_tree);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+
+ return __i915_vma_create(obj, vm, view);
+}
+
+/**
+ * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
+ * @vma: VMA to map
+ * @cache_level: mapping cache level
+ * @flags: flags like global or local mapping
+ *
+ * DMA addresses are taken from the scatter-gather table of this object (or of
+ * this VMA in case of non-default GGTT views) and PTE entries set up.
+ * Note that DMA addresses are also the only part of the SG table we care about.
+ */
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 bind_flags;
+ u32 vma_flags;
+ int ret;
+
+ if (WARN_ON(flags == 0))
+ return -EINVAL;
+
+ bind_flags = 0;
+ if (flags & PIN_GLOBAL)
+ bind_flags |= I915_VMA_GLOBAL_BIND;
+ if (flags & PIN_USER)
+ bind_flags |= I915_VMA_LOCAL_BIND;
+
+ vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ if (flags & PIN_UPDATE)
+ bind_flags |= vma_flags;
+ else
+ bind_flags &= ~vma_flags;
+ if (bind_flags == 0)
+ return 0;
+
+ if (vma_flags == 0 && vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma);
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ return ret;
+ }
+
+ ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+
+ vma->flags |= bind_flags;
+ return 0;
+}
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+ void __iomem *ptr;
+
+ /* Access through the GTT requires the device to be awake. */
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
+ return IO_ERR_PTR(-ENODEV);
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+
+ ptr = vma->iomap;
+ if (ptr == NULL) {
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+ vma->node.start,
+ vma->node.size);
+ if (ptr == NULL)
+ return IO_ERR_PTR(-ENOMEM);
+
+ vma->iomap = ptr;
+ }
+
+ __i915_vma_pin(vma);
+ return ptr;
+}
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+{
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+
+ vma = fetch_and_zero(p_vma);
+ if (!vma)
+ return;
+
+ obj = vma->obj;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_release_unless_active(obj);
+}
+
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ if (!drm_mm_node_allocated(&vma->node))
+ return false;
+
+ if (vma->node.size < size)
+ return true;
+
+ if (alignment && vma->node.start & (alignment - 1))
+ return true;
+
+ if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
+ return true;
+
+ if (flags & PIN_OFFSET_BIAS &&
+ vma->node.start < (flags & PIN_OFFSET_MASK))
+ return true;
+
+ if (flags & PIN_OFFSET_FIXED &&
+ vma->node.start != (flags & PIN_OFFSET_MASK))
+ return true;
+
+ return false;
+}
+
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ bool mappable, fenceable;
+ u32 fence_size, fence_alignment;
+
+ fence_size = i915_gem_get_ggtt_size(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj));
+ fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj),
+ true);
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
+
+ mappable = (vma->node.start + fence_size <=
+ dev_priv->ggtt.mappable_end);
+
+ /*
+ * Explicitly disable for rotated VMA since the display does not
+ * need the fence and the VMA is not accessible to other users.
+ */
+ if (mappable && fenceable &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+ vma->flags |= I915_VMA_CAN_FENCE;
+ else
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+}
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *gtt_space = &vma->node;
+ struct drm_mm_node *other;
+
+ /*
+ * On some machines we have to be careful when putting differing types
+ * of snoopable memory together to avoid the prefetcher crossing memory
+ * domains and dying. During vm initialisation, we decide whether or not
+ * these constraints apply and set the drm_mm.color_adjust
+ * appropriately.
+ */
+ if (vma->vm->mm.color_adjust == NULL)
+ return true;
+
+ if (!drm_mm_node_allocated(gtt_space))
+ return true;
+
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+/**
+ * i915_vma_insert - finds a slot for the vma in its address space
+ * @vma: the vma
+ * @size: requested size in bytes (can be larger than the VMA)
+ * @alignment: required alignment
+ * @flags: mask of PIN_* flags to use
+ *
+ * First we try to allocate some free space that meets the requirements for
+ * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
+ * preferrably the oldest idle entry to make room for the new VMA.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+ struct drm_i915_gem_object *obj = vma->obj;
+ u64 start, end;
+ int ret;
+
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+ size = max(size, vma->size);
+ if (flags & PIN_MAPPABLE)
+ size = i915_gem_get_ggtt_size(dev_priv, size,
+ i915_gem_object_get_tiling(obj));
+
+ alignment = max(max(alignment, vma->display_alignment),
+ i915_gem_get_ggtt_alignment(dev_priv, size,
+ i915_gem_object_get_tiling(obj),
+ flags & PIN_MAPPABLE));
+
+ start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+
+ end = vma->vm->total;
+ if (flags & PIN_MAPPABLE)
+ end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+ if (flags & PIN_ZONE_4G)
+ end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+
+ /* If binding the object/GGTT view requires more space than the entire
+ * aperture has, reject it early before evicting everything in a vain
+ * attempt to find space.
+ */
+ if (size > end) {
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
+ size, obj->base.size,
+ flags & PIN_MAPPABLE ? "mappable" : "total",
+ end);
+ return -E2BIG;
+ }
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ if (flags & PIN_OFFSET_FIXED) {
+ u64 offset = flags & PIN_OFFSET_MASK;
+ if (offset & (alignment - 1) || offset > end - size) {
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ vma->node.start = offset;
+ vma->node.size = size;
+ vma->node.color = obj->cache_level;
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret) {
+ ret = i915_gem_evict_for_vma(vma);
+ if (ret == 0)
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret)
+ goto err_unpin;
+ }
+ } else {
+ u32 search_flag, alloc_flag;
+
+ if (flags & PIN_HIGH) {
+ search_flag = DRM_MM_SEARCH_BELOW;
+ alloc_flag = DRM_MM_CREATE_TOP;
+ } else {
+ search_flag = DRM_MM_SEARCH_DEFAULT;
+ alloc_flag = DRM_MM_CREATE_DEFAULT;
+ }
+
+ /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+ * so we know that we always have a minimum alignment of 4096.
+ * The drm_mm range manager is optimised to return results
+ * with zero alignment, so where possible use the optimal
+ * path.
+ */
+ if (alignment <= 4096)
+ alignment = 0;
+
+search_free:
+ ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+ &vma->node,
+ size, alignment,
+ obj->cache_level,
+ start, end,
+ search_flag,
+ alloc_flag);
+ if (ret) {
+ ret = i915_gem_evict_something(vma->vm, size, alignment,
+ obj->cache_level,
+ start, end,
+ flags);
+ if (ret == 0)
+ goto search_free;
+
+ goto err_unpin;
+ }
+
+ GEM_BUG_ON(vma->node.start < start);
+ GEM_BUG_ON(vma->node.start + vma->node.size > end);
+ }
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ obj->bind_count++;
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+}
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags)
+{
+ unsigned int bound = vma->flags;
+ int ret;
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+ GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
+
+ if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if ((bound & I915_VMA_BIND_MASK) == 0) {
+ ret = i915_vma_insert(vma, size, alignment, flags);
+ if (ret)
+ goto err;
+ }
+
+ ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+ if (ret)
+ goto err;
+
+ if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
+ __i915_vma_set_map_and_fenceable(vma);
+
+ GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+ return 0;
+
+err:
+ __i915_vma_unpin(vma);
+ return ret;
+}
+
+void i915_vma_destroy(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->node.allocated);
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ GEM_BUG_ON(!i915_vma_is_closed(vma));
+ GEM_BUG_ON(vma->fence);
+
+ list_del(&vma->vm_link);
+ if (!i915_vma_is_ggtt(vma))
+ i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+ kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_closed(vma));
+ vma->flags |= I915_VMA_CLOSED;
+
+ list_del(&vma->obj_link);
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+
+ if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
+ WARN_ON(i915_vma_unbind(vma));
+}
+
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ unsigned long active;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ /* First wait upon any activity as retiring the request may
+ * have side-effects such as unpinning or even unbinding this vma.
+ */
+ active = i915_vma_get_active(vma);
+ if (active) {
+ int idx;
+
+ /* When a closed VMA is retired, it is unbound - eek.
+ * In order to prevent it from being recursively closed,
+ * take a pin on the vma so that the second unbind is
+ * aborted.
+ *
+ * Even more scary is that the retire callback may free
+ * the object (last active vma). To prevent the explosion
+ * we defer the actual object free to a worker that can
+ * only proceed once it acquires the struct_mutex (which
+ * we currently hold, therefore it cannot free this object
+ * before we are finished).
+ */
+ __i915_vma_pin(vma);
+
+ for_each_active(active, idx) {
+ ret = i915_gem_active_retire(&vma->last_read[idx],
+ &vma->vm->dev->struct_mutex);
+ if (ret)
+ break;
+ }
+
+ __i915_vma_unpin(vma);
+ if (ret)
+ return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ }
+
+ if (i915_vma_is_pinned(vma))
+ return -EBUSY;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ goto destroy;
+
+ GEM_BUG_ON(obj->bind_count == 0);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ if (i915_vma_is_map_and_fenceable(vma)) {
+ /* release the fence reg _after_ flushing */
+ ret = i915_vma_put_fence(vma);
+ if (ret)
+ return ret;
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ __i915_vma_iounmap(vma);
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+ }
+
+ if (likely(!vma->vm->closed)) {
+ trace_i915_vma_unbind(vma);
+ vma->vm->unbind_vma(vma);
+ }
+ vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+
+ drm_mm_remove_node(&vma->node);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+ if (vma->pages != obj->mm.pages) {
+ GEM_BUG_ON(!vma->pages);
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist. */
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->global_link,
+ &to_i915(obj->base.dev)->mm.unbound_list);
+
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+
+destroy:
+ if (unlikely(i915_vma_is_closed(vma)))
+ i915_vma_destroy(vma);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
new file mode 100644
index 000000000000..85446f0b0b3f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_VMA_H__
+#define __I915_VMA_H__
+
+#include <linux/io-mapping.h>
+
+#include <drm/drm_mm.h>
+
+#include "i915_gem_gtt.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
+#include "i915_gem_request.h"
+
+
+enum i915_cache_level;
+
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct drm_i915_fence_reg *fence;
+ struct sg_table *pages;
+ void __iomem *iomap;
+ u64 size;
+ u64 display_alignment;
+
+ unsigned int flags;
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits.
+ */
+#define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW BIT(5)
+
+ /** Flags and address space this VMA is bound to */
+#define I915_VMA_GLOBAL_BIND BIT(6)
+#define I915_VMA_LOCAL_BIND BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+
+#define I915_VMA_GGTT BIT(8)
+#define I915_VMA_CAN_FENCE BIT(9)
+#define I915_VMA_CLOSED BIT(10)
+
+ unsigned int active;
+ struct i915_gem_active last_read[I915_NUM_ENGINES];
+ struct i915_gem_active last_fence;
+
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head vm_link;
+
+ struct list_head obj_link; /* Link in the object's VMA list */
+ struct rb_node obj_node;
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+};
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CAN_FENCE;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CLOSED;
+}
+
+static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+{
+ return vma->active;
+}
+
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+ return i915_vma_get_active(vma);
+}
+
+static inline void i915_vma_set_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active |= BIT(engine);
+}
+
+static inline void i915_vma_clear_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active &= ~BIT(engine);
+}
+
+static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
+ unsigned int engine)
+{
+ return vma->active & BIT(engine);
+}
+
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(upper_32_bits(vma->node.start));
+ GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
+ return lower_32_bits(vma->node.start);
+}
+
+static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
+{
+ i915_gem_object_get(vma->obj);
+ return vma;
+}
+
+static inline void i915_vma_put(struct i915_vma *vma)
+{
+ i915_gem_object_put(vma->obj);
+}
+
+static inline long
+i915_vma_compare(struct i915_vma *vma,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+
+ if (vma->vm != vm)
+ return vma->vm - vm;
+
+ if (!view)
+ return vma->ggtt_view.type;
+
+ if (vma->ggtt_view.type != view->type)
+ return vma->ggtt_view.type - view->type;
+
+ return memcmp(&vma->ggtt_view.params,
+ &view->params,
+ sizeof(view->params));
+}
+
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags);
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ /* Pin early to prevent the shrinker/eviction logic from destroying
+ * our vma as we insert and bind.
+ */
+ if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+ return 0;
+
+ return __i915_vma_do_pin(vma, size, alignment, flags);
+}
+
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_PIN_MASK;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+ return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+ vma->flags++;
+ GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ vma->flags--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ __i915_vma_unpin(vma);
+}
+
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON(vma->iomap == NULL);
+ i915_vma_unpin(vma);
+}
+
+static inline struct page *i915_vma_first_page(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+ return sg_page(vma->pages->sgl);
+}
+
+/**
+ * i915_vma_pin_fence - pin fencing state
+ * @vma: vma to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * vma (and its object) is ready to be used as a scanout target. Fencing
+ * status must be synchronize first by calling i915_vma_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_vma_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the vma has a fence, false otherwise.
+ */
+static inline bool
+i915_vma_pin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ vma->fence->pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * i915_vma_unpin_fence - unpin fencing state
+ * @vma: vma to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_vma_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+static inline void
+i915_vma_unpin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ GEM_BUG_ON(vma->fence->pin_count <= 0);
+ vma->fence->pin_count--;
+ }
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index b82de3072d4f..dbe9fb41ae53 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -84,7 +84,6 @@ intel_plane_duplicate_state(struct drm_plane *plane)
state = &intel_state->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
- intel_state->wait_req = NULL;
return state;
}
@@ -101,13 +100,13 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
@@ -142,10 +141,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.y2 =
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
- if (state->fb && intel_rotation_90_or_270(state->rotation)) {
- char *format_name;
- if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
+ if (state->fb && drm_rotation_90_or_270(state->rotation)) {
+ struct drm_format_name_buf format_name;
+
+ if (state->fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ state->fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
@@ -158,9 +158,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
switch (state->fb->pixel_format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
- format_name = drm_get_format_name(state->fb->pixel_format);
- DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", format_name);
- kfree(format_name);
+ DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(state->fb->pixel_format,
+ &format_name));
return -EINVAL;
default:
@@ -168,6 +168,14 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
}
}
+ /* CHV ignores the mirror bit when the rotate bit is set :( */
+ if (IS_CHERRYVIEW(dev_priv) &&
+ state->rotation & DRM_ROTATE_180 &&
+ state->rotation & DRM_REFLECT_X) {
+ DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+ return -EINVAL;
+ }
+
intel_state->base.visible = false;
ret = intel_plane->check_plane(plane, crtc_state, intel_state);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 6c70a5bfd7d8..49f10538d4aa 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -57,6 +57,63 @@
* struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
*/
+/* DP N/M table */
+#define LC_540M 540000
+#define LC_270M 270000
+#define LC_162M 162000
+
+struct dp_aud_n_m {
+ int sample_rate;
+ int clock;
+ u16 m;
+ u16 n;
+};
+
+/* Values according to DP 1.4 Table 2-104 */
+static const struct dp_aud_n_m dp_aud_n_m[] = {
+ { 32000, LC_162M, 1024, 10125 },
+ { 44100, LC_162M, 784, 5625 },
+ { 48000, LC_162M, 512, 3375 },
+ { 64000, LC_162M, 2048, 10125 },
+ { 88200, LC_162M, 1568, 5625 },
+ { 96000, LC_162M, 1024, 3375 },
+ { 128000, LC_162M, 4096, 10125 },
+ { 176400, LC_162M, 3136, 5625 },
+ { 192000, LC_162M, 2048, 3375 },
+ { 32000, LC_270M, 1024, 16875 },
+ { 44100, LC_270M, 784, 9375 },
+ { 48000, LC_270M, 512, 5625 },
+ { 64000, LC_270M, 2048, 16875 },
+ { 88200, LC_270M, 1568, 9375 },
+ { 96000, LC_270M, 1024, 5625 },
+ { 128000, LC_270M, 4096, 16875 },
+ { 176400, LC_270M, 3136, 9375 },
+ { 192000, LC_270M, 2048, 5625 },
+ { 32000, LC_540M, 1024, 33750 },
+ { 44100, LC_540M, 784, 18750 },
+ { 48000, LC_540M, 512, 11250 },
+ { 64000, LC_540M, 2048, 33750 },
+ { 88200, LC_540M, 1568, 18750 },
+ { 96000, LC_540M, 1024, 11250 },
+ { 128000, LC_540M, 4096, 33750 },
+ { 176400, LC_540M, 3136, 18750 },
+ { 192000, LC_540M, 2048, 11250 },
+};
+
+static const struct dp_aud_n_m *
+audio_config_dp_get_n_m(struct intel_crtc *intel_crtc, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
+ if (rate == dp_aud_n_m[i].sample_rate &&
+ intel_crtc->config->port_clock == dp_aud_n_m[i].clock)
+ return &dp_aud_n_m[i];
+ }
+
+ return NULL;
+}
+
static const struct {
int clock;
u32 config;
@@ -81,7 +138,7 @@ static const struct {
int clock;
int n;
int cts;
-} aud_ncts[] = {
+} hdmi_aud_ncts[] = {
{ 44100, TMDS_296M, 4459, 234375 },
{ 44100, TMDS_297M, 4704, 247500 },
{ 48000, TMDS_296M, 5824, 281250 },
@@ -121,45 +178,20 @@ static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted
return hdmi_audio_clock[i].config;
}
-static int audio_config_get_n(const struct drm_display_mode *mode, int rate)
+static int audio_config_hdmi_get_n(const struct drm_display_mode *adjusted_mode,
+ int rate)
{
int i;
- for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) {
- if ((rate == aud_ncts[i].sample_rate) &&
- (mode->clock == aud_ncts[i].clock)) {
- return aud_ncts[i].n;
+ for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
+ if (rate == hdmi_aud_ncts[i].sample_rate &&
+ adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
+ return hdmi_aud_ncts[i].n;
}
}
return 0;
}
-static uint32_t audio_config_setup_n_reg(int n, uint32_t val)
-{
- int n_low, n_up;
- uint32_t tmp = val;
-
- n_low = n & 0xfff;
- n_up = (n >> 12) & 0xff;
- tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK);
- tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) |
- (n_low << AUD_CONFIG_LOWER_N_SHIFT) |
- AUD_CONFIG_N_PROG_ENABLE);
- return tmp;
-}
-
-/* check whether N/CTS/M need be set manually */
-static bool audio_rate_need_prog(struct intel_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- if (((mode->clock == TMDS_297M) ||
- (mode->clock == TMDS_296M)) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
- return true;
- else
- return false;
-}
-
static bool intel_eld_uptodate(struct drm_connector *connector,
i915_reg_t reg_eldv, uint32_t bits_eldv,
i915_reg_t reg_elda, uint32_t bits_elda,
@@ -245,6 +277,100 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
+static void
+hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ const struct dp_aud_n_m *nm = audio_config_dp_get_n_m(intel_crtc, rate);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 tmp;
+
+ if (nm)
+ DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+ else
+ DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+
+ if (nm) {
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(nm->n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ }
+
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp &= ~AUD_CONFIG_M_MASK;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+
+ if (nm) {
+ tmp |= nm->m;
+ tmp |= AUD_M_CTS_M_VALUE_INDEX;
+ tmp |= AUD_M_CTS_M_PROG_ENABLE;
+ }
+
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+}
+
+static void
+hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ enum pipe pipe = intel_crtc->pipe;
+ int n;
+ u32 tmp;
+
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+
+ n = audio_config_hdmi_get_n(adjusted_mode, rate);
+ if (n != 0) {
+ DRM_DEBUG_KMS("using N %d\n", n);
+
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ } else {
+ DRM_DEBUG_KMS("using automatic N\n");
+ }
+
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ /*
+ * Let's disable "Enable CTS or M Prog bit"
+ * and let HW calculate the value
+ */
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+}
+
+static void
+hsw_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ hsw_dp_audio_config_update(intel_crtc, port, adjusted_mode);
+ else
+ hsw_hdmi_audio_config_update(intel_crtc, port, adjusted_mode);
+}
+
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -276,20 +402,16 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
}
static void hsw_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
+ struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
- struct i915_audio_component *acomp = dev_priv->audio_component;
+ enum port port = intel_encoder->port;
const uint8_t *eld = connector->eld;
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
- enum port port = intel_dig_port->port;
uint32_t tmp;
int len, i;
- int n, rate;
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
pipe_name(pipe), drm_eld_size(eld));
@@ -325,42 +447,17 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
- else
- tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
-
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
- if (!acomp)
- rate = 0;
- else if (port >= PORT_A && port <= PORT_E)
- rate = acomp->aud_sample_rate[port];
- else {
- DRM_ERROR("invalid port: %d\n", port);
- rate = 0;
- }
- n = audio_config_get_n(adjusted_mode, rate);
- if (n != 0)
- tmp = audio_config_setup_n_reg(n, tmp);
- else
- DRM_DEBUG_KMS("no suitable N value is found\n");
- }
-
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ hsw_audio_config_update(intel_crtc, port, adjusted_mode);
mutex_unlock(&dev_priv->av_mutex);
}
-static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum port port = enc_to_dig_port(&encoder->base)->port;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port = intel_encoder->port;
uint32_t tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
@@ -400,13 +497,13 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
}
static void ilk_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
+ struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum port port = enc_to_dig_port(&encoder->base)->port;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port = intel_encoder->port;
uint8_t *eld = connector->eld;
uint32_t tmp, eldv;
int len, i;
@@ -425,13 +522,13 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
* infrastructure is not there yet.
*/
- if (HAS_PCH_IBX(connector->dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(connector->dev) ||
- IS_CHERRYVIEW(connector->dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@@ -480,24 +577,26 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @intel_encoder: encoder on which to enable audio
+ * @crtc_state: pointer to the current crtc state.
+ * @conn_state: pointer to the current connector state.
*
* The enable sequences may only be performed after enabling the transcoder and
* port, and after completed link training.
*/
-void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
- connector = drm_select_eld(encoder);
- if (!connector)
+ connector = conn_state->connector;
+ if (!connector || !connector->eld[0])
return;
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -508,7 +607,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2);
- if (intel_crtc_has_dp_encoder(crtc->config))
+ if (intel_crtc_has_dp_encoder(crtc_state))
connector->eld[5] |= (1 << 2);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -518,13 +617,19 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
adjusted_mode);
mutex_lock(&dev_priv->av_mutex);
- intel_dig_port->audio_connector = connector;
+ intel_encoder->audio_connector = connector;
+
/* referred in audio callbacks */
- dev_priv->dig_port_map[port] = intel_encoder;
+ dev_priv->av_enc_map[pipe] = intel_encoder;
mutex_unlock(&dev_priv->av_mutex);
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ pipe = -1;
+
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ (int) port, (int) pipe);
}
/**
@@ -537,22 +642,27 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = crtc->pipe;
if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(intel_encoder);
mutex_lock(&dev_priv->av_mutex);
- intel_dig_port->audio_connector = NULL;
- dev_priv->dig_port_map[port] = NULL;
+ intel_encoder->audio_connector = NULL;
+ dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ pipe = -1;
+
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ (int) port, (int) pipe);
}
/**
@@ -627,74 +737,68 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
return dev_priv->cdclk_freq;
}
-static int i915_audio_component_sync_audio_rate(struct device *kdev,
- int port, int rate)
+static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
+ int port, int pipe)
+{
+
+ if (WARN_ON(pipe >= I915_MAX_PIPES))
+ return NULL;
+
+ /* MST */
+ if (pipe >= 0)
+ return dev_priv->av_enc_map[pipe];
+
+ /* Non-MST */
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_encoder *encoder;
+
+ encoder = dev_priv->av_enc_map[pipe];
+ if (encoder == NULL)
+ continue;
+
+ if (port == encoder->port)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
+ int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
struct intel_crtc *crtc;
- struct drm_display_mode *mode;
+ struct drm_display_mode *adjusted_mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
- enum pipe pipe = INVALID_PIPE;
- u32 tmp;
- int n;
int err = 0;
- /* HSW, BDW, SKL, KBL need this fix */
- if (!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_BROADWELL(dev_priv) &&
- !IS_HASWELL(dev_priv))
+ if (!HAS_DDI(dev_priv))
return 0;
i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
+
/* 1. get the pipe */
- intel_encoder = dev_priv->dig_port_map[port];
- /* intel_encoder might be NULL for DP MST */
+ intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder || !intel_encoder->base.crtc ||
- intel_encoder->type != INTEL_OUTPUT_HDMI) {
- DRM_DEBUG_KMS("no valid port %c\n", port_name(port));
+ (intel_encoder->type != INTEL_OUTPUT_HDMI &&
+ intel_encoder->type != INTEL_OUTPUT_DP)) {
+ DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
+
+ /* pipe passed from the audio driver will be -1 for Non-MST case */
crtc = to_intel_crtc(intel_encoder->base.crtc);
pipe = crtc->pipe;
- if (pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
- err = -ENODEV;
- goto unlock;
- }
- DRM_DEBUG_KMS("pipe %c connects port %c\n",
- pipe_name(pipe), port_name(port));
- mode = &crtc->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
- /* 2. check whether to set the N/CTS/M manually or not */
- if (!audio_rate_need_prog(crtc, mode)) {
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
- goto unlock;
- }
-
- n = audio_config_get_n(mode, rate);
- if (n == 0) {
- DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n",
- port_name(port));
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
- goto unlock;
- }
-
- /* 3. set the N/CTS/M */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp = audio_config_setup_n_reg(n, tmp);
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ hsw_audio_config_update(crtc, port, adjusted_mode);
unlock:
mutex_unlock(&dev_priv->av_mutex);
@@ -703,27 +807,29 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev,
}
static int i915_audio_component_get_eld(struct device *kdev, int port,
- bool *enabled,
+ int pipe, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
const u8 *eld;
int ret = -EINVAL;
mutex_lock(&dev_priv->av_mutex);
- intel_encoder = dev_priv->dig_port_map[port];
- /* intel_encoder might be NULL for DP MST */
- if (intel_encoder) {
- ret = 0;
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- *enabled = intel_dig_port->audio_connector != NULL;
- if (*enabled) {
- eld = intel_dig_port->audio_connector->eld;
- ret = drm_eld_size(eld);
- memcpy(buf, eld, min(max_bytes, ret));
- }
+
+ intel_encoder = get_saved_enc(dev_priv, port, pipe);
+ if (!intel_encoder) {
+ DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ mutex_unlock(&dev_priv->av_mutex);
+ return ret;
+ }
+
+ ret = 0;
+ *enabled = intel_encoder->audio_connector != NULL;
+ if (*enabled) {
+ eld = intel_encoder->audio_connector->eld;
+ ret = drm_eld_size(eld);
+ memcpy(buf, eld, min(max_bytes, ret));
}
mutex_unlock(&dev_priv->av_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c6e69e4cfa83..7ffab1abc518 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -996,6 +996,10 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
goto err;
}
+ /* Log about presence of sequences we won't run. */
+ if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
+ DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
@@ -1031,6 +1035,77 @@ static u8 translate_iboost(u8 val)
return mapping[val];
}
+static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port p;
+
+ if (!info->alternate_ddc_pin)
+ return;
+
+ for_each_port_masked(p, (1 << port) - 1) {
+ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+ if (info->alternate_ddc_pin != i->alternate_ddc_pin)
+ continue;
+
+ DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(p), i->alternate_ddc_pin,
+ port_name(port), port_name(p));
+
+ /*
+ * If we have multiple ports supposedly sharing the
+ * pin, then dvi/hdmi couldn't exist on the shared
+ * port. Otherwise they share the same ddc bin and
+ * system couldn't communicate with them separately.
+ *
+ * Due to parsing the ports in alphabetical order,
+ * a higher port will always clobber a lower one.
+ */
+ i->supports_dvi = false;
+ i->supports_hdmi = false;
+ i->alternate_ddc_pin = 0;
+ }
+}
+
+static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port p;
+
+ if (!info->alternate_aux_channel)
+ return;
+
+ for_each_port_masked(p, (1 << port) - 1) {
+ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+ if (info->alternate_aux_channel != i->alternate_aux_channel)
+ continue;
+
+ DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(p), i->alternate_aux_channel,
+ port_name(port), port_name(p));
+
+ /*
+ * If we have multiple ports supposedlt sharing the
+ * aux channel, then DP couldn't exist on the shared
+ * port. Otherwise they share the same aux channel
+ * and system couldn't communicate with them separately.
+ *
+ * Due to parsing the ports in alphabetical order,
+ * a higher port will always clobber a lower one.
+ */
+ i->supports_dp = false;
+ i->alternate_aux_channel = 0;
+ }
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb)
{
@@ -1072,7 +1147,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
- aux_channel = child->raw[25];
+ aux_channel = child->common.aux_channel;
ddc_pin = child->common.ddc_pin;
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
@@ -1105,54 +1180,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
- if (port == PORT_E) {
- info->alternate_ddc_pin = ddc_pin;
- /* if DDIE share ddc pin with other port, then
- * dvi/hdmi couldn't exist on the shared port.
- * Otherwise they share the same ddc bin and system
- * couldn't communicate with them seperately. */
- if (ddc_pin == DDC_PIN_B) {
- dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
- } else if (ddc_pin == DDC_PIN_C) {
- dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
- } else if (ddc_pin == DDC_PIN_D) {
- dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
- }
- } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
- DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
- else if (ddc_pin == DDC_PIN_C && port != PORT_C)
- DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
- else if (ddc_pin == DDC_PIN_D && port != PORT_D)
- DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+ info->alternate_ddc_pin = ddc_pin;
+
+ sanitize_ddc_pin(dev_priv, port);
}
if (is_dp) {
- if (port == PORT_E) {
- info->alternate_aux_channel = aux_channel;
- /* if DDIE share aux channel with other port, then
- * DP couldn't exist on the shared port. Otherwise
- * they share the same aux channel and system
- * couldn't communicate with them seperately. */
- if (aux_channel == DP_AUX_A)
- dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
- else if (aux_channel == DP_AUX_B)
- dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
- else if (aux_channel == DP_AUX_C)
- dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
- else if (aux_channel == DP_AUX_D)
- dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
- }
- else if (aux_channel == DP_AUX_A && port != PORT_A)
- DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
- else if (aux_channel == DP_AUX_B && port != PORT_B)
- DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
- else if (aux_channel == DP_AUX_C && port != PORT_C)
- DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
- else if (aux_channel == DP_AUX_D && port != PORT_D)
- DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+ info->alternate_aux_channel = aux_channel;
+
+ sanitize_aux_ch(dev_priv, port);
}
if (bdb->version >= 158) {
@@ -1641,7 +1677,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
+ enum port port)
{
static const struct {
u16 dp, hdmi;
@@ -1655,22 +1692,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
};
- int i;
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false;
- if (!dev_priv->vbt.child_dev_num)
+ if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
+ if (p_child->common.dvo_port == port_mapping[port].dp)
+ return true;
+
+ /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
+ if (p_child->common.dvo_port == port_mapping[port].hdmi &&
+ p_child->common.aux_channel != 0)
+ return true;
+
+ return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
const union child_device_config *p_child =
&dev_priv->vbt.child_dev[i];
- if ((p_child->common.dvo_port == port_mapping[port].dp ||
- p_child->common.dvo_port == port_mapping[port].hdmi) &&
- (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
- (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ if (child_dev_is_dp_dual_mode(p_child, port))
return true;
}
@@ -1759,3 +1809,52 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
return false;
}
+
+/**
+ * intel_bios_is_lspcon_present - if LSPCON is attached on %port
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if LSPCON is present on this port
+ */
+bool
+intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
+ if (!HAS_LSPCON(dev_priv))
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ if (!dev_priv->vbt.child_dev[i].common.lspcon)
+ continue;
+
+ switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ case DVO_PORT_DPA:
+ case DVO_PORT_HDMIA:
+ if (port == PORT_A)
+ return true;
+ break;
+ case DVO_PORT_DPB:
+ case DVO_PORT_HDMIB:
+ if (port == PORT_B)
+ return true;
+ break;
+ case DVO_PORT_DPC:
+ case DVO_PORT_HDMIC:
+ if (port == PORT_C)
+ return true;
+ break;
+ case DVO_PORT_DPD:
+ case DVO_PORT_HDMID:
+ if (port == PORT_D)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 495611b7068d..c9c46a538edb 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -83,16 +83,18 @@ static void irq_enable(struct intel_engine_cs *engine)
*/
engine->breadcrumbs.irq_posted = true;
- spin_lock_irq(&engine->i915->irq_lock);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
engine->irq_enable(engine);
- spin_unlock_irq(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
{
- spin_lock_irq(&engine->i915->irq_lock);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
- spin_unlock_irq(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq_lock);
engine->breadcrumbs.irq_posted = false;
}
@@ -293,9 +295,9 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool first;
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
first = __intel_engine_add_wait(engine, wait);
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
return first;
}
@@ -326,7 +328,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
if (RB_EMPTY_NODE(&wait->node))
return;
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
if (RB_EMPTY_NODE(&wait->node))
goto out_unlock;
@@ -400,7 +402,7 @@ out_unlock:
GEM_BUG_ON(rb_first(&b->waiters) !=
(b->first_wait ? &b->first_wait->node : NULL));
GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static bool signal_complete(struct drm_i915_gem_request *request)
@@ -464,7 +466,7 @@ static int intel_breadcrumbs_signaler(void *arg)
&request->signaling.wait);
local_bh_disable();
- fence_signal(&request->fence);
+ dma_fence_signal(&request->fence);
local_bh_enable(); /* kick start the tasklets */
/* Find the next oldest signal. Note that as we have
@@ -473,14 +475,14 @@ static int intel_breadcrumbs_signaler(void *arg)
* we just completed - so double check we are still
* the oldest before picking the next one.
*/
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
if (request == b->first_signal) {
struct rb_node *rb =
rb_next(&request->signaling.node);
b->first_signal = rb ? to_signaler(rb) : NULL;
}
rb_erase(&request->signaling.node, &b->signals);
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
i915_gem_request_put(request);
} else {
@@ -502,11 +504,20 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct rb_node *parent, **p;
bool first, wakeup;
- /* locked by fence_enable_sw_signaling() */
+ /* Note that we may be called from an interrupt handler on another
+ * device (e.g. nouveau signaling a fence completion causing us
+ * to submit a request, and so enable signaling). As such,
+ * we need to make sure that all other users of b->lock protect
+ * against interrupts, i.e. use spin_lock_irqsave.
+ */
+
+ /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
assert_spin_locked(&request->lock);
+ if (!request->global_seqno)
+ return;
request->signaling.wait.tsk = b->signaler;
- request->signaling.wait.seqno = request->fence.seqno;
+ request->signaling.wait.seqno = request->global_seqno;
i915_gem_request_get(request);
spin_lock(&b->lock);
@@ -530,8 +541,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
p = &b->signals.rb_node;
while (*p) {
parent = *p;
- if (i915_seqno_passed(request->fence.seqno,
- to_signaler(parent)->fence.seqno)) {
+ if (i915_seqno_passed(request->global_seqno,
+ to_signaler(parent)->global_seqno)) {
p = &parent->rb_right;
first = false;
} else {
@@ -592,7 +603,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
cancel_fake_irq(engine);
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
__intel_breadcrumbs_disable_irq(b);
if (intel_engine_has_waiter(engine)) {
@@ -605,7 +616,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
irq_disable(engine);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
@@ -618,33 +629,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
cancel_fake_irq(engine);
}
-unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int mask = 0;
- /* To avoid the task_struct disappearing beneath us as we wake up
- * the process, we must first inspect the task_struct->state under the
- * RCU lock, i.e. as we call wake_up_process() we must be holding the
- * rcu_read_lock().
- */
- for_each_engine(engine, i915)
- if (unlikely(intel_engine_wakeup(engine)))
- mask |= intel_engine_flag(engine);
+ for_each_engine(engine, i915, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
- return mask;
-}
+ spin_lock_irq(&b->lock);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- unsigned int mask = 0;
+ if (b->first_wait) {
+ wake_up_process(b->first_wait->tsk);
+ mask |= intel_engine_flag(engine);
+ }
- for_each_engine(engine, i915) {
- if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
- wake_up_process(engine->breadcrumbs.signaler);
+ if (b->first_signal) {
+ wake_up_process(b->signaler);
mask |= intel_engine_flag(engine);
}
+
+ spin_unlock_irq(&b->lock);
}
return mask;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 95a72771eea6..d81232b79f00 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -95,8 +95,7 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
@@ -180,7 +179,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
- if (INTEL_INFO(dev)->gen > 6) {
+ if (INTEL_GEN(dev_priv) > 6) {
uint16_t postoff = 0;
if (intel_crtc_state->limited_color_range)
@@ -273,7 +272,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
int i;
- if (HAS_GMCH_DISPLAY(dev)) {
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
@@ -288,7 +287,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
(drm_color_lut_extract(lut[i].green, 8) << 8) |
drm_color_lut_extract(lut[i].blue, 8);
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -297,7 +296,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
for (i = 0; i < 256; i++) {
uint32_t word = (i << 16) | (i << 8) | i;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -326,7 +325,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
+ if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
@@ -345,11 +344,10 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
static void broadwell_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
- uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
if (crtc_state_is_legacy(state)) {
haswell_load_luts(state);
@@ -428,8 +426,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
static void cherryview_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
@@ -446,7 +443,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->degamma_lut) {
lut = (struct drm_color_lut *) state->degamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.14 format. */
word0 =
@@ -461,7 +458,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->gamma_lut) {
lut = (struct drm_color_lut *) state->gamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.gamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.10 format. */
word0 =
@@ -497,12 +494,12 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state)
int intel_color_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
size_t gamma_length, degamma_length;
- degamma_length = INTEL_INFO(dev)->color.degamma_lut_size *
+ degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size *
sizeof(struct drm_color_lut);
- gamma_length = INTEL_INFO(dev)->color.gamma_lut_size *
+ gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size *
sizeof(struct drm_color_lut);
/*
@@ -529,19 +526,18 @@ int intel_color_check(struct drm_crtc *crtc,
void intel_color_init(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
drm_mode_crtc_set_gamma_size(crtc, 256);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
dev_priv->display.load_luts = cherryview_load_luts;
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts;
- } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
- IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) ||
+ IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
} else {
@@ -549,10 +545,10 @@ void intel_color_init(struct drm_crtc *crtc)
}
/* Enable color management support when we have degamma & gamma LUTs. */
- if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
- INTEL_INFO(dev)->color.gamma_lut_size != 0)
+ if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
+ INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
drm_crtc_enable_color_mgmt(crtc,
- INTEL_INFO(dev)->color.degamma_lut_size,
- true,
- INTEL_INFO(dev)->color.gamma_lut_size);
+ INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ true,
+ INTEL_INFO(dev_priv)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dfbcf16b41df..86ecec5601d4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -84,7 +84,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & ADPA_DAC_ENABLE))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -147,14 +147,13 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int mode)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
u32 adpa;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
@@ -165,16 +164,16 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev))
+ else if (HAS_PCH_CPT(dev_priv))
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
switch (mode) {
@@ -241,7 +240,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- int max_dotclk = to_i915(dev)->max_dotclk_freq;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -250,15 +250,15 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev_priv))
max_clock = 180000;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
- else if (IS_GEN3(dev) || IS_GEN4(dev))
+ else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
max_clock = 400000;
else
max_clock = 350000;
@@ -269,7 +269,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
- if (HAS_PCH_LPT(dev) &&
+ if (HAS_PCH_LPT(dev_priv) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
@@ -280,13 +280,13 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev)) {
+ if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
DRM_DEBUG_KMS("LPT only supports 24bpp\n");
return false;
@@ -296,7 +296,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
}
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
pipe_config->port_clock = 135000 * 2;
return true;
@@ -312,7 +312,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
- bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
u32 save_adpa;
crt->force_hotplug_required = 0;
@@ -419,10 +419,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
bool ret = false;
int i, tries = 0;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
return intel_ironlake_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv))
return valleyview_crt_detect_hotplug(connector);
/*
@@ -430,7 +430,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G4X(dev) && !IS_GM45(dev))
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
tries = 2;
else
tries = 1;
@@ -566,13 +566,13 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
st00 = I915_READ8(_VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
@@ -643,11 +643,36 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
return status;
}
+static int intel_spurious_crt_detect_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_DRIVER("Skipping CRT detection for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_spurious_crt_detect[] = {
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "ACER ZGB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "Intel DZ77BH-55K",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "DZ77BH-55K"),
+ },
+ },
+ { }
+};
+
static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
@@ -659,10 +684,14 @@ intel_crt_detect(struct drm_connector *connector, bool force)
connector->base.id, connector->name,
force);
+ /* Skip machines without VGA that falsely report hotplug events */
+ if (dmi_check_system(intel_spurious_crt_detect))
+ return connector_status_disconnected;
+
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -684,7 +713,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
+ if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -700,7 +729,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else if (INTEL_INFO(dev)->gen < 4)
+ else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
else if (i915.load_detect_test)
@@ -740,7 +769,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
- if (ret || !IS_G4X(dev))
+ if (ret || !IS_G4X(dev_priv))
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -762,11 +791,10 @@ static int intel_crt_set_property(struct drm_connector *connector,
void intel_crt_reset(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
@@ -808,32 +836,6 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
- return 1;
-}
-
-static const struct dmi_system_id intel_no_crt[] = {
- {
- .callback = intel_no_crt_dmi_callback,
- .ident = "ACER ZGB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
- },
- },
- {
- .callback = intel_no_crt_dmi_callback,
- .ident = "DELL XPS 8700",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
- },
- },
- { }
-};
-
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -843,13 +845,9 @@ void intel_crt_init(struct drm_device *dev)
i915_reg_t adpa_reg;
u32 adpa;
- /* Skip machines without VGA that falsely report hotplug events */
- if (dmi_check_system(intel_no_crt))
- return;
-
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@@ -893,12 +891,12 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
@@ -907,20 +905,23 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = adpa_reg;
crt->base.compute_config = intel_crt_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
} else {
crt->base.disable = intel_disable_crt;
}
crt->base.enable = intel_enable_crt;
- if (I915_HAS_HOTPLUG(dev))
+ if (I915_HAS_HOTPLUG(dev_priv) &&
+ !dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT;
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
+ crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.post_disable = hsw_post_disable_crt;
} else {
+ crt->base.port = PORT_NONE;
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
}
@@ -928,7 +929,7 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- if (!I915_HAS_HOTPLUG(dev))
+ if (!I915_HAS_HOTPLUG(dev_priv))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/*
@@ -941,7 +942,7 @@ void intel_crt_init(struct drm_device *dev)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
- if (HAS_PCH_LPT(dev)) {
+ if (HAS_PCH_LPT(dev_priv)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 1ea0e1f43397..d7a04bca8c28 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -168,12 +168,6 @@ struct stepping_info {
char substepping;
};
-static const struct stepping_info kbl_stepping_info[] = {
- {'A', '0'}, {'B', '0'}, {'C', '0'},
- {'D', '0'}, {'E', '0'}, {'F', '0'},
- {'G', '0'}, {'H', '0'}, {'I', '0'},
-};
-
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
@@ -194,10 +188,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
const struct stepping_info *si;
unsigned int size;
- if (IS_KABYLAKE(dev_priv)) {
- size = ARRAY_SIZE(kbl_stepping_info);
- si = kbl_stepping_info;
- } else if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 15d47c87def6..10ec9d4b7d45 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -167,8 +167,47 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x80005012, 0x000000C0, 0x3 },
};
+/* Kabylake H and S */
+static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x00000097, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Kabylake U */
+static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
+ { 0x0000201B, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x80009010, 0x000000C0, 0x3 },
+ { 0x0000201B, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00002016, 0x0000004F, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/* Kabylake Y */
+static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
+ { 0x00001017, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x8000800F, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000004C, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
/*
- * Skylake H and S
+ * Skylake/Kabylake H and S
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
@@ -185,7 +224,7 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
};
/*
- * Skylake U
+ * Skylake/Kabylake U
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
@@ -202,7 +241,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
};
/*
- * Skylake Y
+ * Skylake/Kabylake Y
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
@@ -218,7 +257,7 @@ static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
{ 0x00000018, 0x0000008A, 0x0 },
};
-/* Skylake U, H and S */
+/* Skylake/Kabylake U, H and S */
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000AC, 0x0 },
{ 0x00005012, 0x0000009D, 0x0 },
@@ -233,7 +272,7 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x80000018, 0x000000C0, 0x1 },
};
-/* Skylake Y */
+/* Skylake/Kabylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
@@ -334,10 +373,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
return skl_y_ddi_translations_dp;
- } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
+ } else if (IS_SKL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
return skl_u_ddi_translations_dp;
} else {
@@ -347,6 +386,21 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
+kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ if (IS_KBL_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
+ return kbl_y_ddi_translations_dp;
+ } else if (IS_KBL_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
+ return kbl_u_ddi_translations_dp;
+ } else {
+ *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
+ return kbl_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
@@ -362,7 +416,10 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
- return skl_get_buf_trans_dp(dev_priv, n_entries);
+ if (IS_KABYLAKE(dev_priv))
+ return kbl_get_buf_trans_dp(dev_priv, n_entries);
+ else
+ return skl_get_buf_trans_dp(dev_priv, n_entries);
}
static const struct ddi_buf_trans *
@@ -430,21 +487,18 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
if (IS_BROXTON(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_KABYLAKE(dev_priv)) {
+ ddi_translations_fdi = NULL;
+ ddi_translations_dp =
+ kbl_get_buf_trans_dp(dev_priv, &n_dp_entries);
+ ddi_translations_edp =
+ skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
+ } else if (IS_SKYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
-
- /* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
- iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-
- if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
- port != PORT_A && port != PORT_E &&
- n_edp_entries > 9))
- n_edp_entries = 9;
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
@@ -464,6 +518,17 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
}
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+
+ if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+ port != PORT_A && port != PORT_E &&
+ n_edp_entries > 9))
+ n_edp_entries = 9;
+ }
+
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
@@ -1020,13 +1085,13 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_INFO(dev)->gen <= 8)
+ if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
- else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
}
@@ -1081,14 +1146,14 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else
@@ -1189,7 +1254,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
(intel_crtc->config->pch_pfit.enabled ||
intel_crtc->config->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
@@ -1434,7 +1499,12 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
if (dp_iboost) {
iboost = dp_iboost;
} else {
- ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
+ if (IS_KABYLAKE(dev_priv))
+ ddi_translations = kbl_get_buf_trans_dp(dev_priv,
+ &n_entries);
+ else
+ ddi_translations = skl_get_buf_trans_dp(dev_priv,
+ &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
@@ -1477,7 +1547,6 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
{
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
- uint32_t val;
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
@@ -1506,38 +1575,11 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
}
}
- /*
- * While we write to the group register to program all lanes at once we
- * can read only lane registers and we pick lanes 0/1 for that.
- */
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
- val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW2_LN0(port));
- val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
- val |= ddi_translations[level].margin << MARGIN_000_SHIFT |
- ddi_translations[level].scale << UNIQ_TRANS_SCALE_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
- val &= ~SCALE_DCOMP_METHOD;
- if (ddi_translations[level].enable)
- val |= SCALE_DCOMP_METHOD;
-
- if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
-
- I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
- val &= ~DE_EMPHASIS;
- val |= ddi_translations[level].deemphasis << DEEMPH_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW4_GRP(port), val);
-
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
- val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
+ bxt_ddi_phy_set_signal_level(dev_priv, port,
+ ddi_translations[level].margin,
+ ddi_translations[level].scale,
+ ddi_translations[level].enable,
+ ddi_translations[level].deemphasis);
}
static uint32_t translate_signal_level(int signal_levels)
@@ -1711,8 +1753,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1742,10 +1783,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
intel_edp_panel_off(intel_dp);
}
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
- else if (INTEL_INFO(dev)->gen < 9)
+ else if (INTEL_GEN(dev_priv) < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
if (type == INTEL_OUTPUT_HDMI) {
@@ -1795,8 +1836,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@@ -1814,7 +1854,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
@@ -1824,7 +1864,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- intel_audio_codec_enable(intel_encoder);
+ intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
}
}
@@ -1853,332 +1893,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
}
}
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- enum port port;
-
- if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
- return false;
-
- if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
- (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
- phy);
-
- return false;
- }
-
- if (phy == DPIO_PHY1 &&
- !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
- DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
-
- return false;
- }
-
- if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
- phy);
-
- return false;
- }
-
- for_each_port_masked(port,
- phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
- BIT(PORT_A)) {
- u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
- if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
- "for port %c powered down "
- "(PHY_CTL %08x)\n",
- phy, port_name(port), tmp);
-
- return false;
- }
- }
-
- return true;
-}
-
-static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
-
- return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
-}
-
-static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- if (intel_wait_for_register(dev_priv,
- BXT_PORT_REF_DW3(phy),
- GRC_DONE, GRC_DONE,
- 10))
- DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
-}
-
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- u32 val;
-
- if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
- /* Still read out the GRC value for state verification */
- if (phy == DPIO_PHY0)
- dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
-
- if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
- "won't reprogram it\n", phy);
-
- return;
- }
-
- DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
- "force reprogramming it\n", phy);
- }
-
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val |= GT_DISPLAY_POWER_ON(phy);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-
- /*
- * The PHY registers start out inaccessible and respond to reads with
- * all 1s. Eventually they become accessible as they power up, then
- * the reserved bit will give the default 0. Poll on the reserved bit
- * becoming 0 to find when the PHY is accessible.
- * HW team confirmed that the time to reach phypowergood status is
- * anywhere between 50 us and 100us.
- */
- if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
- (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
- DRM_ERROR("timeout during PHY%d power on\n", phy);
- }
-
- /* Program PLL Rcomp code offset */
- val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
- val &= ~IREF0RC_OFFSET_MASK;
- val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
-
- val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
- val &= ~IREF1RC_OFFSET_MASK;
- val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
-
- /* Program power gating */
- val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
- val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
- SUS_CLK_CONFIG;
- I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
-
- if (phy == DPIO_PHY0) {
- val = I915_READ(BXT_PORT_CL2CM_DW6_BC);
- val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- I915_WRITE(BXT_PORT_CL2CM_DW6_BC, val);
- }
-
- val = I915_READ(BXT_PORT_CL1CM_DW30(phy));
- val &= ~OCL2_LDOFUSE_PWR_DIS;
- /*
- * On PHY1 disable power on the second channel, since no port is
- * connected there. On PHY0 both channels have a port, so leave it
- * enabled.
- * TODO: port C is only connected on BXT-P, so on BXT0/1 we should
- * power down the second channel on PHY0 as well.
- *
- * FIXME: Clarify programming of the following, the register is
- * read-only with bit 6 fixed at 0 at least in stepping A.
- */
- if (phy == DPIO_PHY1)
- val |= OCL2_LDOFUSE_PWR_DIS;
- I915_WRITE(BXT_PORT_CL1CM_DW30(phy), val);
-
- if (phy == DPIO_PHY0) {
- uint32_t grc_code;
- /*
- * PHY0 isn't connected to an RCOMP resistor so copy over
- * the corresponding calibrated value from PHY1, and disable
- * the automatic calibration on PHY0.
- */
- val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
- grc_code = val << GRC_CODE_FAST_SHIFT |
- val << GRC_CODE_SLOW_SHIFT |
- val;
- I915_WRITE(BXT_PORT_REF_DW6(DPIO_PHY0), grc_code);
-
- val = I915_READ(BXT_PORT_REF_DW8(DPIO_PHY0));
- val |= GRC_DIS | GRC_RDY_OVRD;
- I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
- }
-
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
- val |= COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- if (phy == DPIO_PHY1)
- bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
-}
-
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- uint32_t val;
-
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
- val &= ~COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val &= ~GT_DISPLAY_POWER_ON(phy);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-}
-
-static bool __printf(6, 7)
-__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- i915_reg_t reg, u32 mask, u32 expected,
- const char *reg_fmt, ...)
-{
- struct va_format vaf;
- va_list args;
- u32 val;
-
- val = I915_READ(reg);
- if ((val & mask) == expected)
- return true;
-
- va_start(args, reg_fmt);
- vaf.fmt = reg_fmt;
- vaf.va = &args;
-
- DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
- "current %08x, expected %08x (mask %08x)\n",
- phy, &vaf, reg.reg, val, (val & ~mask) | expected,
- mask);
-
- va_end(args);
-
- return false;
-}
-
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- uint32_t mask;
- bool ok;
-
-#define _CHK(reg, mask, exp, fmt, ...) \
- __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
- ## __VA_ARGS__)
-
- if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
- return false;
-
- ok = true;
-
- /* PLL Rcomp code offset */
- ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
- IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW9(%d)", phy);
- ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
- IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW10(%d)", phy);
-
- /* Power gating */
- mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
- ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
- "BXT_PORT_CL1CM_DW28(%d)", phy);
-
- if (phy == DPIO_PHY0)
- ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
- DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
- "BXT_PORT_CL2CM_DW6_BC");
-
- /*
- * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
- * at least on stepping A this bit is read-only and fixed at 0.
- */
-
- if (phy == DPIO_PHY0) {
- u32 grc_code = dev_priv->bxt_phy_grc;
-
- grc_code = grc_code << GRC_CODE_FAST_SHIFT |
- grc_code << GRC_CODE_SLOW_SHIFT |
- grc_code;
- mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
- GRC_CODE_NOM_MASK;
- ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
- "BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
-
- mask = GRC_DIS | GRC_RDY_OVRD;
- ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
- "BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
- }
-
- return ok;
-#undef _CHK
-}
-
-static uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- switch (pipe_config->lane_count) {
- case 1:
- return 0;
- case 2:
- return BIT(2) | BIT(0);
- case 4:
- return BIT(3) | BIT(2) | BIT(0);
- default:
- MISSING_CASE(pipe_config->lane_count);
-
- return 0;
- }
-}
-
static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int lane;
-
- for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
- /*
- * Note that on CHV this flag is called UPAR, but has
- * the same function.
- */
- val &= ~LATENCY_OPTIM;
- if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
- val |= LATENCY_OPTIM;
-
- I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
- }
-}
-
-static uint8_t
-bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
- int lane;
- uint8_t mask;
-
- mask = 0;
- for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
- if (val & LATENCY_OPTIM)
- mask |= BIT(lane);
- }
+ uint8_t mask = intel_crtc->config->lane_lat_optim_mask;
- return mask;
+ bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2347,7 +2069,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
- pipe_config);
+ pipe_config->lane_count);
return ret;
@@ -2438,7 +2160,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
- bool init_hdmi, init_dp;
+ bool init_hdmi, init_dp, init_lspcon = false;
int max_lanes;
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
@@ -2470,6 +2192,19 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ /*
+ * Lspcon device needs to be driven with DP connector
+ * with special detection sequence. So make sure DP
+ * is initialized before lspcon.
+ */
+ init_dp = true;
+ init_lspcon = true;
+ init_hdmi = false;
+ DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
+ }
+
if (!init_dp && !init_hdmi) {
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
@@ -2509,7 +2244,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* configuration so that we use the proper lane count for our
* calculations.
*/
- if (IS_BROXTON(dev) && port == PORT_A) {
+ if (IS_BROXTON(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
@@ -2520,6 +2255,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->max_lanes = max_lanes;
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
@@ -2532,7 +2268,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
@@ -2545,6 +2281,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
goto err;
}
+ if (init_lspcon) {
+ if (lspcon_init(intel_dig_port))
+ /* TODO: handle hdmi info frame part */
+ DRM_DEBUG_KMS("LSPCON init success on port %c\n",
+ port_name(port));
+ else
+ /*
+ * LSPCON init faied, but DP init was success, so
+ * lets try to drive as DP++ port.
+ */
+ DRM_ERROR("LSPCON init failed on port %c\n",
+ port_name(port));
+ }
+
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 73b6858600ac..185e3bbc9ec9 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -28,20 +28,14 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
-#define PRINT_S(name) "%s"
-#define SEP_EMPTY
-#define PRINT_FLAG(name) info->name ? #name "," : ""
-#define SEP_COMMA ,
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
- DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
info->gen,
dev_priv->drm.pdev->device,
- dev_priv->drm.pdev->revision,
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
-#undef PRINT_S
-#undef SEP_EMPTY
+ dev_priv->drm.pdev->revision);
+#define PRINT_FLAG(name) \
+ DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_COMMA
}
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
@@ -192,7 +186,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
const int s_max = 3, ss_max = 3, eu_max = 8;
int s, ss;
- u32 fuse2, eu_disable[s_max];
+ u32 fuse2, eu_disable[3]; /* s_max */
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
@@ -288,12 +282,13 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
- else
+ } else if (INTEL_GEN(dev_priv) >= 5) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
+ }
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fbcfed63a76e..cf5cff7b03b8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,7 +37,6 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
@@ -116,8 +115,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
@@ -600,7 +600,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_device *dev,
+static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
{
@@ -613,12 +613,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
- if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
+ if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -698,7 +699,8 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
@@ -753,7 +755,8 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
@@ -813,7 +816,8 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
@@ -845,7 +849,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(to_i915(dev))) {
*error_ppm = 0;
return calculated_clock->p > best_clock->p;
@@ -909,7 +913,8 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
@@ -977,7 +982,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit, &clock))
+ if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
continue;
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
@@ -1003,10 +1008,8 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
target_clock, refclk, NULL, best_clock);
}
-bool intel_crtc_active(struct drm_crtc *crtc)
+bool intel_crtc_active(struct intel_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
/* Be paranoid as we can arrive here with only partial
* state retrieved from the hardware during setup.
*
@@ -1020,27 +1023,25 @@ bool intel_crtc_active(struct drm_crtc *crtc)
* crtc->state->active once we have proper CRTC states wired up
* for atomic.
*/
- return intel_crtc->active && crtc->primary->state->fb &&
- intel_crtc->config->base.adjusted_mode.crtc_clock;
+ return crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->base.adjusted_mode.crtc_clock;
}
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- return intel_crtc->config->cpu_transcoder;
+ return crtc->config->cpu_transcoder;
}
-static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
@@ -1070,12 +1071,11 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
*/
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
@@ -1085,7 +1085,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
WARN(1, "pipe_off wait timed out\n");
} else {
/* Wait for the display line to settle */
- if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
+ if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
WARN(1, "pipe_off wait timed out\n");
}
}
@@ -1187,19 +1187,17 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
onoff(state), onoff(cur_state));
}
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
bool locked = true;
- if (WARN_ON(HAS_DDI(dev)))
+ if (WARN_ON(HAS_DDI(dev_priv)))
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
pp_reg = PP_CONTROL(0);
@@ -1209,7 +1207,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
/* XXX: else fix for eDP */
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
@@ -1232,10 +1230,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
static void assert_cursor(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
- struct drm_device *dev = &dev_priv->drm;
bool cur_state;
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -1294,11 +1291,10 @@ static void assert_plane(struct drm_i915_private *dev_priv,
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* Primary planes are fixed to pipes on gen4+ */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
u32 val = I915_READ(DSPCNTR(pipe));
I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
@@ -1320,29 +1316,28 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int sprite;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(SPCNTR(pipe, sprite));
I915_STATE_WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
}
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 val = I915_READ(SPRCTL(pipe));
I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
- } else if (INTEL_INFO(dev)->gen >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5) {
u32 val = I915_READ(DVSCNTR(pipe));
I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
@@ -1596,12 +1591,12 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
-static int intel_num_dvo_pipes(struct drm_device *dev)
+static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
int count = 0;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
count += crtc->base.state->active &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
}
@@ -1611,19 +1606,18 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, crtc->pipe);
/* PLL is protected by panel, make sure we can write it */
- if (IS_MOBILE(dev) && !IS_I830(dev))
+ if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
assert_panel_unlocked(dev_priv, crtc->pipe);
/* Enable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
+ if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
/*
* It appears to be important that we don't enable this
* for the current pipe before otherwise configuring the
@@ -1648,7 +1642,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
crtc->config->dpll_hw_state.dpll_md);
} else {
@@ -1683,14 +1677,13 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
*/
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev) &&
+ if (IS_I830(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
- !intel_num_dvo_pipes(dev)) {
+ !intel_num_dvo_pipes(dev_priv)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
@@ -1786,9 +1779,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
i915_reg_t reg;
uint32_t val, pipeconf_val;
@@ -1799,7 +1791,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Set the timing override bit before enabling the
* pch transcoder. */
reg = TRANS_CHICKEN2(pipe);
@@ -1877,7 +1869,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
i915_reg_t reg;
uint32_t val;
@@ -1898,7 +1889,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
@@ -1926,6 +1917,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
}
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ WARN_ON(!crtc->config->has_pch_encoder);
+
+ if (HAS_PCH_LPT(dev_priv))
+ return TRANSCODER_A;
+ else
+ return (enum transcoder) crtc->pipe;
+}
+
/**
* intel_enable_pipe - enable a pipe, asserting requirements
* @crtc: crtc responsible for the pipe
@@ -1939,7 +1942,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- enum pipe pch_transcoder;
i915_reg_t reg;
u32 val;
@@ -1949,11 +1951,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
- if (HAS_PCH_LPT(dev_priv))
- pch_transcoder = TRANSCODER_A;
- else
- pch_transcoder = pipe;
-
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1967,7 +1964,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
} else {
if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_rx_pll_enabled(dev_priv,
+ (enum pipe) intel_crtc_pch_transcoder(crtc));
assert_fdi_tx_pll_enabled(dev_priv,
(enum pipe) cpu_transcoder);
}
@@ -2139,7 +2137,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
unsigned int rotation)
{
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
*view = i915_ggtt_view_rotated;
view->params.rotated = to_intel_framebuffer(fb)->rot_info;
} else {
@@ -2191,7 +2189,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
+ alignment = intel_surf_alignment(dev_priv, fb->modifier);
intel_fill_fb_ggtt_view(&view, fb, rotation);
@@ -2260,7 +2258,7 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
unsigned int rotation)
{
- if (intel_rotation_90_or_270(rotation))
+ if (drm_rotation_90_or_270(rotation))
return to_intel_framebuffer(fb)->rotated[plane].pitch;
else
return fb->pitches[plane];
@@ -2296,7 +2294,7 @@ void intel_add_fb_offsets(int *x, int *y,
const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
unsigned int rotation = state->base.rotation;
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
*x += intel_fb->rotated[plane].x;
*y += intel_fb->rotated[plane].y;
} else {
@@ -2352,15 +2350,15 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_NONE) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
tile_size = intel_tile_size(dev_priv);
intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier[plane], cpp);
+ fb->modifier, cpp);
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@@ -2401,7 +2399,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
unsigned int rotation,
u32 alignment)
{
- uint64_t fb_modifier = fb->modifier[plane];
+ uint64_t fb_modifier = fb->modifier;
unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
u32 offset, offset_aligned;
@@ -2416,7 +2414,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
intel_tile_dims(dev_priv, &tile_width, &tile_height,
fb_modifier, cpp);
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@@ -2460,7 +2458,7 @@ u32 intel_compute_tile_offset(int *x, int *y,
if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
alignment = 4096;
else
- alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]);
+ alignment = intel_surf_alignment(dev_priv, fb->modifier);
return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
rotation, alignment);
@@ -2542,13 +2540,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
DRM_ROTATE_0, tile_size);
offset /= tile_size;
- if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_NONE) {
unsigned int tile_width, tile_height;
unsigned int pitch_tiles;
struct drm_rect r;
intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier[i], cpp);
+ fb->modifier, cpp);
rot_info->plane[i].offset = offset;
rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
@@ -2707,7 +2705,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
mode_cmd.pitches[0] = fb->pitches[0];
- mode_cmd.modifier[0] = fb->modifier[0];
+ mode_cmd.modifier[0] = fb->modifier;
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
@@ -2817,14 +2815,8 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
- intel_state->base.src.x1 = plane_state->src_x;
- intel_state->base.src.y1 = plane_state->src_y;
- intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w;
- intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h;
- intel_state->base.dst.x1 = plane_state->crtc_x;
- intel_state->base.dst.y1 = plane_state->crtc_y;
- intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
- intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+ intel_state->base.src = drm_plane_state_src(plane_state);
+ intel_state->base.dst = drm_plane_state_dest(plane_state);
obj = intel_fb_obj(fb);
if (i915_gem_object_is_tiled(obj))
@@ -2843,7 +2835,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
{
int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
- switch (fb->modifier[plane]) {
+ switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
case I915_FORMAT_MOD_X_TILED:
switch (cpp) {
@@ -2874,7 +2866,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
}
break;
default:
- MISSING_CASE(fb->modifier[plane]);
+ MISSING_CASE(fb->modifier);
}
return 2048;
@@ -2902,7 +2894,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
+ alignment = intel_surf_alignment(dev_priv, fb->modifier);
/*
* AUX surface offset is specified as the distance from the
@@ -2919,7 +2911,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
*
* TODO: linear and Y-tiled seem fine, Yf untested,
*/
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
while ((x + w) * cpp > fb->pitches[0]) {
@@ -2976,9 +2968,10 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
int ret;
/* Rotate src coordinates to match rotated GTT view */
- if (intel_rotation_90_or_270(rotation))
+ if (drm_rotation_90_or_270(rotation))
drm_rect_rotate(&plane_state->base.src,
- fb->width, fb->height, DRM_ROTATE_270);
+ fb->width << 16, fb->height << 16,
+ DRM_ROTATE_270);
/*
* Handle the AUX surface first since
@@ -3005,11 +2998,9 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(primary->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
u32 linear_offset;
u32 dspcntr;
@@ -3022,7 +3013,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
dspcntr |= DISPLAY_PLANE_ENABLE;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (intel_crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
@@ -3033,7 +3024,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
- } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+ } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
@@ -3068,28 +3059,34 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
}
if (INTEL_GEN(dev_priv) >= 4 &&
- fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
- if (IS_G4X(dev))
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ dspcntr |= DISPPLANE_MIRROR;
+
+ if (IS_G4X(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
+ if (rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += crtc_state->pipe_src_w - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
intel_crtc->dspaddr_offset = linear_offset;
intel_crtc->adjusted_x = x;
@@ -3098,14 +3095,17 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
- } else
- I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
+ } else {
+ I915_WRITE(DSPADDR(plane),
+ intel_fb_gtt_offset(fb, rotation) +
+ intel_crtc->dspaddr_offset);
+ }
POSTING_READ(reg);
}
@@ -3144,7 +3144,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
dspcntr = DISPPLANE_GAMMA_ENABLE;
dspcntr |= DISPLAY_PLANE_ENABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
switch (fb->pixel_format) {
@@ -3170,10 +3170,13 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
BUG();
}
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
@@ -3181,13 +3184,11 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -3201,7 +3202,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_crtc->dspaddr_offset);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -3276,12 +3277,12 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
- stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
+ stride /= intel_tile_height(dev_priv, fb->modifier, cpp);
} else {
- stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+ stride /= intel_fb_stride_alignment(dev_priv, fb->modifier,
fb->pixel_format);
}
@@ -3377,7 +3378,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
u32 plane_ctl;
unsigned int rotation = plane_state->base.rotation;
@@ -3398,7 +3398,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
PLANE_CTL_PIPE_CSC_ENABLE;
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
plane_ctl |= skl_plane_ctl_rotation(rotation);
@@ -3413,9 +3413,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_x = src_x;
intel_crtc->adjusted_y = src_y;
- if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
- skl_write_plane_wm(intel_crtc, wm, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
@@ -3450,13 +3447,6 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!crtc->primary->state->visible)
- skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
POSTING_READ(PLANE_SURF(pipe, 0));
@@ -3506,7 +3496,7 @@ __intel_display_resume(struct drm_device *dev,
int i, ret;
intel_modeset_setup_hw_state(dev);
- i915_redisable_vga(dev);
+ i915_redisable_vga(to_i915(dev));
if (!state)
return 0;
@@ -3584,7 +3574,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
return;
err:
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -3603,8 +3593,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
dev_priv->modeset_restore_state = NULL;
- dev_priv->modeset_restore_state = NULL;
-
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
if (!state) {
@@ -3646,6 +3634,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
}
+ if (state)
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
@@ -3683,8 +3673,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
@@ -3709,12 +3698,12 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
(pipe_config->pipe_src_h - 1));
/* on skylake this is done by detaching scalers */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(crtc);
if (pipe_config->pch_pfit.enabled)
skylake_pfit_enable(crtc);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
if (pipe_config->pch_pfit.enabled)
ironlake_pfit_enable(crtc);
else if (old_crtc_state->pch_pfit.enabled)
@@ -3734,7 +3723,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
/* enable normal train */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
@@ -3745,7 +3734,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
@@ -3759,7 +3748,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
udelay(1000);
/* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
FDI_FE_ERRC_ENABLE);
}
@@ -3903,7 +3892,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -3947,7 +3936,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -3956,7 +3945,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
@@ -4210,7 +4199,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
+ if (HAS_PCH_IBX(dev_priv))
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
@@ -4222,7 +4211,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -4240,6 +4229,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
bool intel_has_pending_fb_unpin(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
/* Note that we don't need to be called with mode_config.lock here
@@ -4254,7 +4244,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
continue;
if (crtc->flip_work)
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
return true;
}
@@ -4545,7 +4535,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_pch_transcoder_disabled(dev_priv, pipe);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
/* Write the TU size bits before fdi link training, so that error
@@ -4558,7 +4548,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
@@ -4588,7 +4578,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
+ if (HAS_PCH_CPT(dev_priv) &&
+ intel_crtc_has_dp_encoder(intel_crtc->config)) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4667,7 +4658,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
- need_scaling = intel_rotation_90_or_270(rotation) ?
+ need_scaling = drm_rotation_90_or_270(rotation) ?
(src_h != dst_w || src_w != dst_h):
(src_w != dst_w || src_h != dst_h);
@@ -4728,13 +4719,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
- DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->base.name,
- intel_crtc->pipe, SKL_CRTC_INDEX);
-
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, DRM_ROTATE_0,
state->pipe_src_w, state->pipe_src_h,
@@ -4755,7 +4741,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
struct drm_framebuffer *fb = plane_state->base.fb;
@@ -4763,10 +4748,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
bool force_detach = !fb || !plane_state->base.visible;
- DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
- intel_plane->base.base.id, intel_plane->base.name,
- intel_crtc->pipe, drm_plane_index(&intel_plane->base));
-
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
@@ -4858,7 +4839,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
* as some pre-programmed values are broken,
* e.g. x201.
*/
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
@@ -4883,7 +4864,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
*/
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4915,7 +4896,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
return;
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4930,7 +4911,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
}
/* We need to wait for a vblank before we can disable the plane. */
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
@@ -4984,7 +4965,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* Underruns don't always raise interrupts, so check manually. */
@@ -5007,7 +4988,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
@@ -5039,10 +5020,10 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev)) {
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
}
}
@@ -5061,7 +5042,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
crtc->wm.cxsr_allowed = true;
if (pipe_config->update_wm_post && pipe_config->base.active)
- intel_update_watermarks(&crtc->base);
+ intel_update_watermarks(crtc);
if (old_pri_state) {
struct intel_plane_state *primary_state =
@@ -5090,6 +5071,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
bool modeset = needs_modeset(&pipe_config->base);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (old_pri_state) {
struct intel_plane_state *primary_state =
@@ -5104,7 +5087,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
intel_pre_disable_primary(&crtc->base);
}
- if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
+ if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
crtc->wm.cxsr_allowed = false;
/*
@@ -5119,7 +5102,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
if (old_crtc_state->base.active) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
}
@@ -5132,7 +5115,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
*/
if (pipe_config->disable_lp_wm) {
ilk_disable_lp_wm(dev);
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
/*
@@ -5157,9 +5140,10 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
* us to.
*/
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else if (pipe_config->update_wm_pre)
- intel_update_watermarks(&crtc->base);
+ intel_update_watermarks(crtc);
}
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
@@ -5313,6 +5297,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5371,7 +5357,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(intel_crtc->config);
+ dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
@@ -5382,12 +5368,12 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
if (intel_crtc->config->has_pch_encoder)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5395,18 +5381,19 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
/* IPS only exists on ULT machines and is tied to pipe A. */
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
- return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
+ return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
}
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5461,7 +5448,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_pipe_clock(intel_crtc);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_pfit_enable(intel_crtc);
else
ironlake_pfit_enable(intel_crtc);
@@ -5477,9 +5464,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_enable_transcoder_func(crtc);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
@@ -5488,7 +5476,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, true);
assert_vblank_disabled(crtc);
@@ -5497,8 +5485,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->has_pch_encoder) {
- intel_wait_for_vblank(dev, pipe);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
@@ -5507,9 +5495,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
- if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
- intel_wait_for_vblank(dev, hsw_workaround_pipe);
- intel_wait_for_vblank(dev, hsw_workaround_pipe);
+ if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+ intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
}
}
@@ -5564,7 +5552,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (intel_crtc->config->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
i915_reg_t reg;
u32 temp;
@@ -5593,8 +5581,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5611,13 +5598,13 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(intel_crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc, false);
@@ -5698,13 +5685,13 @@ static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_digital_port *intel_dig_port;
switch (intel_encoder->type) {
case INTEL_OUTPUT_UNKNOWN:
/* Only DDI platforms should ever use this output type */
- WARN_ON_ONCE(!HAS_DDI(dev));
+ WARN_ON_ONCE(!HAS_DDI(dev_priv));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
@@ -5725,7 +5712,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_digital_port *intel_dig_port;
switch (intel_encoder->type) {
@@ -5738,7 +5725,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
* what's the status of the given connectors, play safe and
* run the DP detection too.
*/
- WARN_ON_ONCE(!HAS_DDI(dev));
+ WARN_ON_ONCE(!HAS_DDI(dev_priv));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5830,11 +5817,9 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
static int skl_calc_cdclk(int max_pixclk, int vco);
-static void intel_update_max_cdclk(struct drm_device *dev)
+static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@@ -5856,9 +5841,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
max_cdclk = 308571;
dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->max_cdclk_freq = 624000;
- } else if (IS_BROADWELL(dev)) {
+ } else if (IS_BROADWELL(dev_priv)) {
/*
* FIXME with extra cooling we can allow
* 540 MHz for ULX and 675 Mhz for ULT.
@@ -5867,15 +5852,15 @@ static void intel_update_max_cdclk(struct drm_device *dev)
*/
if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULX(dev))
+ else if (IS_BDW_ULX(dev_priv))
dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULT(dev))
+ else if (IS_BDW_ULT(dev_priv))
dev_priv->max_cdclk_freq = 540000;
else
dev_priv->max_cdclk_freq = 675000;
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->max_cdclk_freq = 320000;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
@@ -5891,11 +5876,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
dev_priv->max_dotclk_freq);
}
-static void intel_update_cdclk(struct drm_device *dev)
+static void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
if (INTEL_GEN(dev_priv) >= 9)
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
@@ -6056,14 +6039,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
return;
}
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
if (dev_priv->cdclk_pll.vco == 0 ||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6196,7 +6179,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
dev_priv->skl_preferred_vco_freq = vco;
if (changed)
- intel_update_max_cdclk(&dev_priv->drm);
+ intel_update_max_cdclk(dev_priv);
}
static void
@@ -6282,7 +6265,6 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
- struct drm_device *dev = &dev_priv->drm;
u32 freq_select, pcu_ack;
WARN_ON((cdclk == 24000) != (vco == 0));
@@ -6333,7 +6315,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
@@ -6380,7 +6362,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk_pll.vco == 0 ||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6414,7 +6396,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
!= dev_priv->cdclk_freq);
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
@@ -6471,7 +6453,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
mutex_unlock(&dev_priv->sb_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -6479,7 +6461,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
!= dev_priv->cdclk_freq);
switch (cdclk) {
@@ -6512,7 +6494,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -6675,7 +6657,7 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
cherryview_set_cdclk(dev, req_cdclk);
else
valleyview_set_cdclk(dev, req_cdclk);
@@ -6703,7 +6685,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_pipe_timings(intel_crtc);
intel_set_pipe_src_size(intel_crtc);
- if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
@@ -6718,7 +6700,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
} else {
@@ -6732,7 +6714,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6774,7 +6756,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_encoders_pre_enable(crtc, pipe_config, old_state);
@@ -6785,7 +6767,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6822,8 +6804,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
+ if (IS_GEN2(dev_priv))
+ intel_wait_for_vblank(dev_priv, pipe);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -6837,9 +6819,9 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
chv_disable_pll(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_disable_pll(dev_priv, pipe);
else
i9xx_disable_pll(intel_crtc);
@@ -6847,7 +6829,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
}
@@ -6885,7 +6867,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc_state, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.id, crtc->name);
@@ -6901,7 +6883,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
encoder->base.crtc = NULL;
intel_fbc_disable(intel_crtc);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_disable_shared_dpll(intel_crtc);
domains = intel_crtc->enabled_power_domains;
@@ -7027,6 +7009,7 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state = pipe_config->base.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
@@ -7039,7 +7022,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
@@ -7049,7 +7032,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_INFO(dev)->num_pipes == 2)
+ if (INTEL_INFO(dev_priv)->num_pipes == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7060,7 +7043,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
if (pipe_config->fdi_lanes <= 2)
return 0;
- other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -7079,7 +7062,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -7190,7 +7173,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
/*
@@ -7224,11 +7207,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
return -EINVAL;
- if (HAS_IPS(dev))
+ if (HAS_IPS(dev_priv))
hsw_compute_ips_config(crtc, pipe_config);
if (pipe_config->has_pch_encoder)
@@ -7237,10 +7220,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return 0;
}
-static int skylake_get_display_clock_speed(struct drm_device *dev)
+static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t cdctl;
+ u32 cdctl;
skl_dpll0_update(dev_priv);
@@ -7299,9 +7281,8 @@ static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
dev_priv->cdclk_pll.ref;
}
-static int broxton_get_display_clock_speed(struct drm_device *dev)
+static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 divider;
int div, vco;
@@ -7334,9 +7315,8 @@ static int broxton_get_display_clock_speed(struct drm_device *dev)
return DIV_ROUND_CLOSEST(vco, div);
}
-static int broadwell_get_display_clock_speed(struct drm_device *dev)
+static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -7354,9 +7334,8 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
return 675000;
}
-static int haswell_get_display_clock_speed(struct drm_device *dev)
+static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -7366,41 +7345,41 @@ static int haswell_get_display_clock_speed(struct drm_device *dev)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
- else if (IS_HSW_ULT(dev))
+ else if (IS_HSW_ULT(dev_priv))
return 337500;
else
return 540000;
}
-static int valleyview_get_display_clock_speed(struct drm_device *dev)
+static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
+ return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL);
}
-static int ilk_get_display_clock_speed(struct drm_device *dev)
+static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 450000;
}
-static int i945_get_display_clock_speed(struct drm_device *dev)
+static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 400000;
}
-static int i915_get_display_clock_speed(struct drm_device *dev)
+static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 333333;
}
-static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 200000;
}
-static int pnv_get_display_clock_speed(struct drm_device *dev)
+static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7423,9 +7402,9 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i915gm_get_display_clock_speed(struct drm_device *dev)
+static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7443,14 +7422,14 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i865_get_display_clock_speed(struct drm_device *dev)
+static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 266667;
}
-static int i85x_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 hpllcc = 0;
/*
@@ -7486,14 +7465,13 @@ static int i85x_get_display_clock_speed(struct drm_device *dev)
return 0;
}
-static int i830_get_display_clock_speed(struct drm_device *dev)
+static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 133333;
}
-static unsigned int intel_hpll_vco(struct drm_device *dev)
+static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
static const unsigned int blb_vco[8] = {
[0] = 3200000,
[1] = 4000000,
@@ -7536,20 +7514,20 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
uint8_t tmp = 0;
/* FIXME other chipsets? */
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
vco_table = ctg_vco;
- else if (IS_G4X(dev))
+ else if (IS_G4X(dev_priv))
vco_table = elk_vco;
- else if (IS_CRESTLINE(dev))
+ else if (IS_CRESTLINE(dev_priv))
vco_table = cl_vco;
- else if (IS_PINEVIEW(dev))
+ else if (IS_PINEVIEW(dev_priv))
vco_table = pnv_vco;
- else if (IS_G33(dev))
+ else if (IS_G33(dev_priv))
vco_table = blb_vco;
else
return 0;
- tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+ tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@@ -7560,10 +7538,10 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
return vco;
}
-static int gm45_get_display_clock_speed(struct drm_device *dev)
+static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7583,14 +7561,14 @@ static int gm45_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i965gm_get_display_clock_speed(struct drm_device *dev)
+static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 16, 10, 8 };
static const uint8_t div_4000[] = { 20, 12, 10 };
static const uint8_t div_5333[] = { 24, 16, 14 };
const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7621,15 +7599,15 @@ fail:
return 200000;
}
-static int g33_get_display_clock_speed(struct drm_device *dev)
+static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7718,10 +7696,10 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 fp, fp2 = 0;
- if (IS_PINEVIEW(dev)) {
+ if (IS_PINEVIEW(dev_priv)) {
fp = pnv_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
@@ -7789,12 +7767,11 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config->cpu_transcoder;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
@@ -7803,8 +7780,8 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed).
*/
- if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
- crtc->config->has_drrs) {
+ if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
+ INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -8091,11 +8068,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_crtc_state *pipe_config;
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
@@ -8106,7 +8082,7 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
pipe_config->pixel_multiplier = 1;
pipe_config->dpll = *dpll;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
chv_compute_dpll(crtc, pipe_config);
chv_prepare_pll(crtc, pipe_config);
chv_enable_pll(crtc, pipe_config);
@@ -8129,20 +8105,19 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- if (IS_CHERRYVIEW(dev))
- chv_disable_pll(to_i915(dev), pipe);
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_disable_pll(dev_priv, pipe);
else
- vlv_disable_pll(to_i915(dev), pipe);
+ vlv_disable_pll(dev_priv, pipe);
}
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
struct dpll *clock = &crtc_state->dpll;
@@ -8155,7 +8130,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -8168,11 +8143,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev) && reduced_clock)
+ if (IS_G4X(dev_priv) && reduced_clock)
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
}
switch (clock->p2) {
@@ -8189,7 +8164,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -8203,7 +8178,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_VCO_ENABLE;
crtc_state->dpll_hw_state.dpll = dpll;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
u32 dpll_md = (crtc_state->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc_state->dpll_hw_state.dpll_md = dpll_md;
@@ -8234,7 +8209,8 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev_priv) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
@@ -8249,8 +8225,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -8276,7 +8251,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
vsyncshift += adjusted_mode->crtc_htotal;
}
- if (INTEL_INFO(dev)->gen > 3)
+ if (INTEL_GEN(dev_priv) > 3)
I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
I915_WRITE(HTOTAL(cpu_transcoder),
@@ -8303,7 +8278,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
* documented on the DDI_FUNC_CTL register description, EDP Input Select
* bits. */
- if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
@@ -8399,8 +8374,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
uint32_t pipeconf;
pipeconf = 0;
@@ -8413,7 +8387,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
@@ -8435,7 +8410,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (HAS_PIPE_CXSR(dev)) {
+ if (HAS_PIPE_CXSR(dev_priv)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
@@ -8445,7 +8420,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- if (INTEL_INFO(dev)->gen < 4 ||
+ if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
@@ -8453,7 +8428,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
} else
pipeconf |= PIPECONF_PROGRESSIVE;
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
@@ -8653,11 +8628,11 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t tmp;
- if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ if (INTEL_GEN(dev_priv) <= 3 &&
+ (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
return;
tmp = I915_READ(PFIT_CONTROL);
@@ -8665,7 +8640,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
return;
/* Check whether the pfit is attached to our pipe. */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (crtc->pipe != PIPE_B)
return;
} else {
@@ -8729,10 +8704,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
- fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
}
}
@@ -8741,7 +8716,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(plane));
else
@@ -8761,7 +8736,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
- fb->modifier[0]);
+ fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -8810,8 +8785,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
@@ -8829,7 +8803,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
goto out;
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
pipe_config->pipe_bpp = 18;
@@ -8845,11 +8820,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
}
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
intel_get_pipe_timings(crtc, pipe_config);
@@ -8857,9 +8832,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(crtc, pipe_config);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
+ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
else
tmp = I915_READ(DPLL_MD(crtc->pipe));
@@ -8867,7 +8842,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
- } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
@@ -8879,13 +8855,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
/*
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
* on 830. Filter it out here so that we don't
* report errors due to that.
*/
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
@@ -8897,9 +8873,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
DPLL_PORTB_READY_MASK);
}
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
chv_crtc_clock_get(crtc, pipe_config);
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_crtc_clock_get(crtc, pipe_config);
else
i9xx_crtc_clock_get(crtc, pipe_config);
@@ -8950,7 +8926,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
}
}
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
has_ck505 = dev_priv->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
@@ -9198,7 +9174,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
- if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
+ if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
mutex_lock(&dev_priv->sb_lock);
@@ -9221,7 +9198,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
}
}
- reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9237,7 +9214,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
mutex_lock(&dev_priv->sb_lock);
- reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9345,9 +9322,11 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
*/
void intel_init_pch_refclk(struct drm_device *dev)
{
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
ironlake_init_pch_refclk(dev);
- else if (HAS_PCH_LPT(dev))
+ else if (HAS_PCH_LPT(dev_priv))
lpt_init_pch_refclk(dev);
}
@@ -9476,7 +9455,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
+ (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (crtc_state->sdvo_tv_clock)
factor = 20;
@@ -9650,11 +9629,10 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
@@ -9666,7 +9644,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
* gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily read).
*/
- if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
crtc->config->has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
@@ -9770,17 +9748,17 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
case PLANE_CTL_TILED_LINEAR:
- fb->modifier[0] = DRM_FORMAT_MOD_NONE;
+ fb->modifier = DRM_FORMAT_MOD_NONE;
break;
case PLANE_CTL_TILED_X:
plane_config->tiling = I915_TILING_X;
- fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
- fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
+ fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF:
- fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED;
break;
default:
MISSING_CASE(tiling);
@@ -9797,13 +9775,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0));
- stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+ stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier,
fb->pixel_format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
- fb->modifier[0]);
+ fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -9836,7 +9814,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
PF_PIPE_SEL_IVB(crtc->pipe));
}
@@ -9868,10 +9846,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
- fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
}
}
@@ -9881,7 +9859,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
offset = I915_READ(DSPOFFSET(pipe));
} else {
if (plane_config->tiling)
@@ -9900,7 +9878,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
- fb->modifier[0]);
+ fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -10025,7 +10003,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
@@ -10045,9 +10023,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
-
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
return I915_READ(D_COMP_HSW);
else
return I915_READ(D_COMP_BDW);
@@ -10055,9 +10031,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{
- struct drm_device *dev = &dev_priv->drm;
-
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
@@ -10172,7 +10146,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
}
/*
@@ -10205,7 +10179,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10225,7 +10199,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
hsw_restore_lcpll(dev_priv);
lpt_init_pch_refclk(dev);
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10242,6 +10216,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
bxt_set_cdclk(to_i915(dev), req_cdclk);
}
+static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
+ int pixel_rate)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+ /* BSpec says "Do not use DisplayPort with CDCLK less than
+ * 432 MHz, audio enabled, port width x4, and link rate
+ * HBR2 (5.4 GHz), or else there may be audio corruption or
+ * screen corruption."
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->has_audio &&
+ crtc_state->port_clock >= 540000 &&
+ crtc_state->lane_count == 4)
+ pixel_rate = max(432000, pixel_rate);
+
+ return pixel_rate;
+}
+
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
@@ -10267,9 +10264,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
pixel_rate = ilk_pipe_pixel_rate(crtc_state);
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+ if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
+ pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
+ pixel_rate);
intel_state->min_pixclk[i] = pixel_rate;
}
@@ -10352,7 +10349,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
WARN(cdclk != dev_priv->cdclk_freq,
"cdclk requested %d kHz but got %d kHz\n",
@@ -10639,8 +10636,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
@@ -10649,9 +10645,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10667,7 +10663,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
- if (INTEL_INFO(dev)->gen < 9 &&
+ if (INTEL_GEN(dev_priv) < 9 &&
(port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
@@ -10682,8 +10678,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
bool active;
@@ -10716,11 +10711,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (INTEL_INFO(dev)->gen >= 9) {
- skl_init_scalers(dev, crtc, pipe_config);
- }
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_init_scalers(dev_priv, crtc, pipe_config);
- if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
@@ -10728,13 +10721,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT(power_domain);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
ironlake_get_pfit_config(crtc, pipe_config);
}
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
@@ -10822,13 +10815,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_cursor_wm(intel_crtc, wm);
-
if (plane_state && plane_state->base.visible) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (plane_state->base.crtc_w) {
@@ -10847,10 +10836,10 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
}
cntl |= pipe << 28; /* Connect to correct pipe */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- if (plane_state->base.rotation == DRM_ROTATE_180)
+ if (plane_state->base.rotation & DRM_ROTATE_180)
cntl |= CURSOR_ROTATE_180;
}
@@ -10895,8 +10884,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= y << CURSOR_Y_SHIFT;
/* ILK+ do this automagically */
- if (HAS_GMCH_DISPLAY(dev) &&
- plane_state->base.rotation == DRM_ROTATE_180) {
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
+ plane_state->base.rotation & DRM_ROTATE_180) {
base += (plane_state->base.crtc_h *
plane_state->base.crtc_w - 1) * 4;
}
@@ -10904,13 +10893,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
i845_update_cursor(crtc, base, plane_state);
else
i9xx_update_cursor(crtc, base, plane_state);
}
-static bool cursor_size_ok(struct drm_device *dev,
+static bool cursor_size_ok(struct drm_i915_private *dev_priv,
uint32_t width, uint32_t height)
{
if (width == 0 || height == 0)
@@ -10922,11 +10911,11 @@ static bool cursor_size_ok(struct drm_device *dev,
* the precision of the register. Everything else requires
* square cursors, limited to a few power-of-two sizes.
*/
- if (IS_845G(dev) || IS_I865G(dev)) {
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
if ((width & 63) != 0)
return false;
- if (width > (IS_845G(dev) ? 64 : 512))
+ if (width > (IS_845G(dev_priv) ? 64 : 512))
return false;
if (height > 1023)
@@ -10935,7 +10924,7 @@ static bool cursor_size_ok(struct drm_device *dev,
switch (width | height) {
case 256:
case 128:
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return false;
case 64:
break;
@@ -11029,7 +11018,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return fb;
}
@@ -11114,6 +11103,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
@@ -11266,13 +11256,18 @@ found:
old->restore_state = restore_state;
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
return true;
fail:
- drm_atomic_state_free(state);
- drm_atomic_state_free(restore_state);
- restore_state = state = NULL;
+ if (state) {
+ drm_atomic_state_put(state);
+ state = NULL;
+ }
+ if (restore_state) {
+ drm_atomic_state_put(restore_state);
+ restore_state = NULL;
+ }
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
@@ -11300,10 +11295,9 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
return;
ret = drm_atomic_commit(state);
- if (ret) {
+ if (ret)
DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
- drm_atomic_state_free(state);
- }
+ drm_atomic_state_put(state);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11314,9 +11308,9 @@ static int i9xx_pll_refclk(struct drm_device *dev,
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
return dev_priv->vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
- else if (!IS_GEN2(dev))
+ else if (!IS_GEN2(dev_priv))
return 96000;
else
return 48000;
@@ -11341,7 +11335,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
fp = pipe_config->dpll_hw_state.fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev)) {
+ if (IS_PINEVIEW(dev_priv)) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
@@ -11349,8 +11343,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (!IS_GEN2(dev)) {
- if (IS_PINEVIEW(dev))
+ if (!IS_GEN2(dev_priv)) {
+ if (IS_PINEVIEW(dev_priv))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
@@ -11372,12 +11366,12 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
return;
}
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+ u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
@@ -11578,7 +11572,7 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
* really needed there. But since ctg has the registers,
* include it in the check anyway.
*/
- if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
return true;
/*
@@ -11641,8 +11635,7 @@ static bool pageflip_finished(struct intel_crtc *crtc,
void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
unsigned long flags;
@@ -11655,12 +11648,12 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL &&
!is_mmio_work(work) &&
- pageflip_finished(intel_crtc, work))
- page_flip_completed(intel_crtc);
+ pageflip_finished(crtc, work))
+ page_flip_completed(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -11668,8 +11661,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
unsigned long flags;
@@ -11682,12 +11674,12 @@ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL &&
is_mmio_work(work) &&
- pageflip_finished(intel_crtc, work))
- page_flip_completed(intel_crtc);
+ pageflip_finished(crtc, work))
+ page_flip_completed(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -11792,7 +11784,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
- intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_fb_modifier_to_tiling(fb->modifier));
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -11825,7 +11817,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_fb_modifier_to_tiling(fb->modifier));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
@@ -11848,6 +11840,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
@@ -11876,7 +11869,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* 48bits addresses, and we need a NOOP for the batch size to
* stay even.
*/
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
len += 2;
}
@@ -11913,7 +11906,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
@@ -11922,7 +11915,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring,
i915_ggtt_offset(req->engine->scratch) + 256);
- if (IS_GEN8(dev)) {
+ if (IS_GEN8(dev_priv)) {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
}
@@ -11930,7 +11923,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_fb_modifier_to_tiling(fb->modifier));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
@@ -11940,8 +11933,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
- struct reservation_object *resv;
-
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
@@ -11963,12 +11954,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
else if (i915.enable_execlists)
return true;
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv && !reservation_object_test_signaled_rcu(resv, false))
- return true;
-
- return engine != i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ return engine != i915_gem_object_last_write_engine(obj);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11983,7 +11969,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
- switch (fb->modifier[0]) {
+ switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
break;
case I915_FORMAT_MOD_X_TILED:
@@ -11996,7 +11982,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
ctl |= PLANE_CTL_TILED_YF;
break;
default:
- MISSING_CASE(fb->modifier[0]);
+ MISSING_CASE(fb->modifier);
}
/*
@@ -12021,7 +12007,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
dspcntr = I915_READ(reg);
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
@@ -12041,17 +12027,8 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
- struct reservation_object *resv;
- if (work->flip_queued_req)
- WARN_ON(i915_wait_request(work->flip_queued_req,
- 0, NULL, NO_WAITBOOST));
-
- /* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv)
- WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
- MAX_SCHEDULE_TIMEOUT) < 0);
+ WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
intel_pipe_update_start(crtc);
@@ -12114,8 +12091,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
WARN_ON(!in_interrupt());
@@ -12124,19 +12100,19 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
return;
spin_lock(&dev->event_lock);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL && !is_mmio_work(work) &&
- __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+ __pageflip_stall_check_cs(dev_priv, crtc, work)) {
WARN_ONCE(1,
"Kicking stuck page flip: queued at %d, now %d\n",
- work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
- page_flip_completed(intel_crtc);
+ work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
+ page_flip_completed(crtc);
work = NULL;
}
if (work != NULL && !is_mmio_work(work) &&
- intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+ intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
intel_queue_rps_boost_for_request(work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
@@ -12176,7 +12152,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* TILEOFF/LINOFF registers can't be changed via MI display flips.
* Note that pitch changes could also affect these register.
*/
- if (INTEL_INFO(dev)->gen > 3 &&
+ if (INTEL_GEN(dev_priv) > 3 &&
(fb->offsets[0] != crtc->primary->fb->offsets[0] ||
fb->pitches[0] != crtc->primary->fb->pitches[0]))
return -EINVAL;
@@ -12236,28 +12212,27 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
ret = -EIO;
- goto cleanup;
+ goto unlock;
}
atomic_inc(&intel_crtc->unpin_work_count);
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- engine = &dev_priv->engine[BCS];
- if (fb->modifier[0] != old_fb->modifier[0])
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ engine = dev_priv->engine[BCS];
+ if (fb->modifier != old_fb->modifier)
/* vlv: DISPLAY_FLIP fails to change tiling */
engine = NULL;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- engine = &dev_priv->engine[BCS];
- } else if (INTEL_INFO(dev)->gen >= 7) {
- engine = i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
+ engine = dev_priv->engine[BCS];
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ engine = i915_gem_object_last_write_engine(obj);
if (engine == NULL || engine->id != RCS)
- engine = &dev_priv->engine[BCS];
+ engine = dev_priv->engine[BCS];
} else {
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
}
mmio_flip = use_mmio_flip(engine, obj);
@@ -12285,10 +12260,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
-
- work->flip_queued_req = i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
- schedule_work(&work->mmio_work);
+ queue_work(system_unbound_wq, &work->mmio_work);
} else {
request = i915_gem_request_alloc(engine, engine->last_context);
if (IS_ERR(request)) {
@@ -12311,6 +12283,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
i915_add_request_no_flush(request);
}
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
i915_gem_track_fb(intel_fb_obj(old_fb), obj,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
@@ -12328,12 +12301,13 @@ cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
+unlock:
mutex_unlock(&dev->struct_mutex);
cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
@@ -12371,8 +12345,7 @@ retry:
goto retry;
}
- if (ret)
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
@@ -12407,7 +12380,7 @@ static bool intel_wm_need_update(struct drm_plane *plane,
if (!cur->base.fb || !new->base.fb)
return false;
- if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
+ if (cur->base.fb->modifier != new->base.fb->modifier ||
cur->base.rotation != new->base.rotation ||
drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
@@ -12446,7 +12419,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_framebuffer *fb = plane_state->fb;
int ret;
- if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
+ if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
ret = skl_update_scaler_plane(
to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state));
@@ -12513,7 +12486,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
/* Pre-gen9 platforms need two-step watermark updates */
if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
- INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
+ INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
if (visible || was_visible)
@@ -12525,7 +12498,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* cstate->update_wm was already set above, so this flag will
* take effect when we commit and program watermarks.
*/
- if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
!needs_scaling(old_plane_state))
pipe_config->disable_lp_wm = true;
@@ -12618,7 +12591,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
+ ret = dev_priv->display.compute_intermediate_wm(dev,
intel_crtc,
pipe_config);
if (ret) {
@@ -12630,7 +12603,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
if (mode_changed)
ret = skl_update_scaler_crtc(pipe_config);
@@ -12701,15 +12674,16 @@ static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
+ if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)))
bpp = 10*3;
- else if (INTEL_INFO(dev)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
bpp = 12*3;
else
bpp = 8*3;
@@ -12742,73 +12716,81 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
}
+static inline void
+intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
+ unsigned int lane_count, struct intel_link_m_n *m_n)
+{
+ DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->gmch_m, m_n->gmch_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane *plane;
struct intel_plane *intel_plane;
struct intel_plane_state *state;
struct drm_framebuffer *fb;
- DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
- crtc->base.base.id, crtc->base.name,
- context, pipe_config, pipe_name(crtc->pipe));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
+ crtc->base.base.id, crtc->base.name, context);
- DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
- DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
+ DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
- DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- pipe_config->has_pch_encoder,
- pipe_config->fdi_lanes,
- pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
- pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
- pipe_config->fdi_m_n.tu);
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
- pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
- pipe_config->dp_m_n.tu);
-
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m2_n2.gmch_m,
- pipe_config->dp_m2_n2.gmch_n,
- pipe_config->dp_m2_n2.link_m,
- pipe_config->dp_m2_n2.link_n,
- pipe_config->dp_m2_n2.tu);
+
+ if (pipe_config->has_pch_encoder)
+ intel_dump_m_n_config(pipe_config, "fdi",
+ pipe_config->fdi_lanes,
+ &pipe_config->fdi_m_n);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ intel_dump_m_n_config(pipe_config, "dp m_n",
+ pipe_config->lane_count, &pipe_config->dp_m_n);
+ if (pipe_config->has_drrs)
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
+ }
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
- pipe_config->has_audio,
- pipe_config->has_infoframe);
+ pipe_config->has_audio, pipe_config->has_infoframe);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
- DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
- DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+ DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
+ pipe_config->port_clock,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
- DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
- DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
- pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
- DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
- DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
-
- if (IS_BROXTON(dev)) {
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
+
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ else
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size,
+ enableddisabled(pipe_config->pch_pfit.enabled));
+
+ DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide);
+
+ if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
@@ -12823,13 +12805,13 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.pll9,
pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
pipe_config->dpll_hw_state.ctrl1,
pipe_config->dpll_hw_state.cfgcr1,
pipe_config->dpll_hw_state.cfgcr2);
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
pipe_config->dpll_hw_state.wrpll,
pipe_config->dpll_hw_state.spll);
@@ -12844,7 +12826,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("planes on this crtc\n");
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- char *format_name;
+ struct drm_format_name_buf format_name;
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe != crtc->pipe)
continue;
@@ -12857,23 +12839,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
continue;
}
- format_name = drm_get_format_name(fb->pixel_format);
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
- plane->base.id, plane->name);
- DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
- fb->base.id, fb->width, fb->height, format_name);
- DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
- state->scaler_id,
- state->base.src.x1 >> 16,
- state->base.src.y1 >> 16,
- drm_rect_width(&state->base.src) >> 16,
- drm_rect_height(&state->base.src) >> 16,
- state->base.dst.x1, state->base.dst.y1,
- drm_rect_width(&state->base.dst),
- drm_rect_height(&state->base.dst));
-
- kfree(format_name);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
+ plane->base.id, plane->name,
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->pixel_format, &format_name));
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
+ state->scaler_id,
+ state->base.src.x1 >> 16,
+ state->base.src.y1 >> 16,
+ drm_rect_width(&state->base.src) >> 16,
+ drm_rect_height(&state->base.src) >> 16,
+ state->base.dst.x1, state->base.dst.y1,
+ drm_rect_width(&state->base.dst),
+ drm_rect_height(&state->base.dst));
}
}
@@ -12907,7 +12886,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
switch (encoder->type) {
unsigned int port_mask;
case INTEL_OUTPUT_UNKNOWN:
- if (WARN_ON(!HAS_DDI(dev)))
+ if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
@@ -13188,7 +13167,7 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
}
static bool
-intel_pipe_config_compare(struct drm_device *dev,
+intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *current_config,
struct intel_crtc_state *pipe_config,
bool adjust)
@@ -13312,7 +13291,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (INTEL_INFO(dev)->gen < 8) {
+ if (INTEL_GEN(dev_priv) < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
if (current_config->has_drrs)
@@ -13338,8 +13317,8 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
- if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
- IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_I(limited_color_range);
PIPE_CONF_CHECK_I(has_infoframe);
@@ -13361,7 +13340,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -13379,7 +13358,7 @@ intel_pipe_config_compare(struct drm_device *dev,
}
/* BDW+ don't expose a synchronous way to read the state */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
PIPE_CONF_CHECK_I(ips_enabled);
PIPE_CONF_CHECK_I(double_wide);
@@ -13398,7 +13377,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+ if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
@@ -13436,33 +13415,67 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct skl_ddb_allocation hw_ddb, *sw_ddb;
- struct skl_ddb_entry *hw_entry, *sw_entry;
+ struct skl_pipe_wm hw_wm, *sw_wm;
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+ struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
- int plane;
+ int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
+ skl_pipe_wm_get_hw_state(crtc, &hw_wm);
+ sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
/* planes */
- for_each_plane(dev_priv, pipe, plane) {
- hw_entry = &hw_ddb.plane[pipe][plane];
- sw_entry = &sw_ddb->plane[pipe][plane];
+ for_each_universal_plane(dev_priv, pipe, plane) {
+ hw_plane_wm = &hw_wm.planes[plane];
+ sw_plane_wm = &sw_wm->planes[plane];
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->wm[level]))
+ continue;
- if (skl_ddb_entry_equal(hw_entry, sw_entry))
- continue;
+ DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1, level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
+ }
+
+ if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+ &sw_plane_wm->trans_wm)) {
+ DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1,
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw_ddb.plane[pipe][plane];
+ sw_ddb_entry = &sw_ddb->plane[pipe][plane];
- DRM_ERROR("mismatch in DDB state pipe %c plane %d "
- "(expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
}
/*
@@ -13472,25 +13485,60 @@ static void verify_wm_state(struct drm_crtc *crtc,
* once the plane becomes visible, we can skip this check
*/
if (intel_crtc->cursor_addr) {
- hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
+ sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->wm[level]))
+ continue;
- if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c cursor "
- "(expected (%u,%u), found (%u,%u))\n",
+ DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
+ }
+
+ if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+ &sw_plane_wm->trans_wm)) {
+ DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
pipe_name(pipe),
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+ sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
}
static void
-verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
+verify_connector_state(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
{
struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
- drm_for_each_connector(connector, dev) {
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
struct drm_connector_state *state = connector->state;
@@ -13605,7 +13653,7 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_pipe_config_sanity_check(dev_priv, pipe_config);
sw_config = to_intel_crtc_state(crtc->state);
- if (!intel_pipe_config_compare(dev, sw_config,
+ if (!intel_pipe_config_compare(dev_priv, sw_config,
pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(intel_crtc, pipe_config,
@@ -13698,15 +13746,16 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
static void
intel_modeset_verify_crtc(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state,
- struct drm_crtc_state *new_state)
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *old_state,
+ struct drm_crtc_state *new_state)
{
if (!needs_modeset(new_state) &&
!to_intel_crtc_state(new_state)->update_pipe)
return;
verify_wm_state(crtc, new_state);
- verify_connector_state(crtc->dev, crtc);
+ verify_connector_state(crtc->dev, state, crtc);
verify_crtc_state(crtc, old_state, new_state);
verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
}
@@ -13722,16 +13771,17 @@ verify_disabled_dpll_state(struct drm_device *dev)
}
static void
-intel_modeset_verify_disabled(struct drm_device *dev)
+intel_modeset_verify_disabled(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
verify_encoder_state(dev);
- verify_connector_state(dev, NULL);
+ verify_connector_state(dev, state, NULL);
verify_disabled_dpll_state(dev);
}
static void update_scanline_offset(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
* The scanline counter increments at the leading edge of hsync.
@@ -13751,7 +13801,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
*/
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
int vtotal;
@@ -13760,7 +13810,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
- } else if (HAS_DDI(dev) &&
+ } else if (HAS_DDI(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
@@ -13945,8 +13995,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
intel_state->cdclk, intel_state->dev_cdclk);
- } else
+ } else {
to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
+ }
intel_modeset_clear_plls(state);
@@ -14023,7 +14074,7 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (i915.fastboot &&
- intel_pipe_config_compare(dev,
+ intel_pipe_config_compare(dev_priv,
to_intel_crtc_state(crtc->state),
pipe_config, true)) {
crtc_state->mode_changed = false;
@@ -14047,8 +14098,9 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
return ret;
- } else
- intel_state->cdclk = dev_priv->cdclk_freq;
+ } else {
+ intel_state->cdclk = dev_priv->atomic_cdclk_freq;
+ }
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
@@ -14059,13 +14111,10 @@ static int intel_atomic_check(struct drm_device *dev,
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+ struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
@@ -14088,28 +14137,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
ret = drm_atomic_helper_prepare_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
- if (!ret && !nonblock) {
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
-
- if (!intel_plane_state->wait_req)
- continue;
-
- ret = i915_wait_request(intel_plane_state->wait_req,
- I915_WAIT_INTERRUPTIBLE,
- NULL, NULL);
- if (ret) {
- /* Any hang should be swallowed by the wait */
- WARN_ON(ret == -EIO);
- mutex_lock(&dev->struct_mutex);
- drm_atomic_helper_cleanup_planes(dev, state);
- mutex_unlock(&dev->struct_mutex);
- break;
- }
- }
- }
-
return ret;
}
@@ -14135,22 +14162,24 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
return;
for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
if (!((1 << pipe) & crtc_mask))
continue;
- ret = drm_crtc_vblank_get(crtc);
+ ret = drm_crtc_vblank_get(&crtc->base);
if (WARN_ON(ret != 0)) {
crtc_mask &= ~(1 << pipe);
continue;
}
- last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
+ last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
}
for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
long lret;
if (!((1 << pipe) & crtc_mask))
@@ -14158,12 +14187,12 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
lret = wait_event_timeout(dev->vblank[pipe].queue,
last_vblank_count[pipe] !=
- drm_crtc_vblank_count(crtc),
+ drm_crtc_vblank_count(&crtc->base),
msecs_to_jiffies(50));
WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
- drm_crtc_vblank_put(crtc);
+ drm_crtc_vblank_put(&crtc->base);
}
}
@@ -14237,16 +14266,23 @@ static void intel_update_crtcs(struct drm_atomic_state *state,
static void skl_update_crtcs(struct drm_atomic_state *state,
unsigned int *crtc_vblank_mask)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
struct drm_crtc_state *old_crtc_state;
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ struct intel_crtc_state *cstate;
unsigned int updated = 0;
bool progress;
enum pipe pipe;
+ int i;
+
+ const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i)
+ /* ignore allocations for crtc's that have been turned off. */
+ if (crtc->state->active)
+ entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/*
* Whenever the number of active pipes changes, we need to make sure we
@@ -14255,21 +14291,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* cause pipe underruns and other bad stuff.
*/
do {
- int i;
progress = false;
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
bool vbl_wait = false;
unsigned int cmask = drm_crtc_mask(crtc);
- pipe = to_intel_crtc(crtc)->pipe;
- if (updated & cmask || !crtc->state->active)
+ intel_crtc = to_intel_crtc(crtc);
+ cstate = to_intel_crtc_state(crtc->state);
+ pipe = intel_crtc->pipe;
+
+ if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
- pipe))
+
+ if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
continue;
updated |= cmask;
+ entries[i] = &cstate->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -14277,7 +14316,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* then we need to wait for a vblank to pass for the
* new ddb allocation to take effect.
*/
- if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+ if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
+ &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
!crtc->state->active_changed &&
intel_state->wm_results.dirty_pipes != updated)
vbl_wait = true;
@@ -14286,7 +14326,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
crtc_vblank_mask);
if (vbl_wait)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
progress = true;
}
@@ -14301,37 +14341,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
- int i, ret;
-
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
-
- if (!intel_plane_state->wait_req)
- continue;
-
- ret = i915_wait_request(intel_plane_state->wait_req,
- 0, NULL, NULL);
- /* EIO should be eaten, and we can't get interrupted in the
- * worker, and blocking commits have waited already. */
- WARN_ON(ret);
- }
+ int i;
drm_atomic_helper_wait_for_dependencies(state);
- if (intel_state->modeset) {
- memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
- sizeof(intel_state->min_pixclk));
- dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->atomic_cdclk_freq = intel_state->cdclk;
-
+ if (intel_state->modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- }
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -14364,8 +14382,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!crtc->state->active)
- intel_update_watermarks(crtc);
+ if (!crtc->state->active) {
+ /*
+ * Make sure we don't call initial_watermarks
+ * for ILK-style watermark updates.
+ */
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.initial_watermarks(intel_state,
+ to_intel_crtc_state(crtc->state));
+ else
+ intel_update_watermarks(intel_crtc);
+ }
}
}
@@ -14388,7 +14415,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_can_enable_sagv(state))
intel_disable_sagv(dev_priv);
- intel_modeset_verify_disabled(dev);
+ intel_modeset_verify_disabled(dev, state);
}
/* Complete the events for pipes that have now been disabled */
@@ -14431,7 +14458,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_cstate = to_intel_crtc_state(crtc->state);
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(intel_cstate);
+ dev_priv->display.optimize_watermarks(intel_state,
+ intel_cstate);
}
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -14440,7 +14468,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
- intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
+ intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
}
if (intel_state->modeset && intel_can_enable_sagv(state))
@@ -14457,7 +14485,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
* of triggering bugs in unclaimed access. After we finish
@@ -14475,12 +14503,33 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
static void intel_atomic_commit_work(struct work_struct *work)
{
- struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
+ struct drm_atomic_state *state =
+ container_of(work, struct drm_atomic_state, commit_work);
+
intel_atomic_commit_tail(state);
}
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify notify)
+{
+ struct intel_atomic_state *state =
+ container_of(fence, struct intel_atomic_state, commit_ready);
+
+ switch (notify) {
+ case FENCE_COMPLETE:
+ if (state->base.commit_work.func)
+ queue_work(system_unbound_wq, &state->base.commit_work);
+ break;
+
+ case FENCE_FREE:
+ drm_atomic_state_put(&state->base);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static void intel_atomic_track_fbs(struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state;
@@ -14502,10 +14551,6 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
* This function commits a top-level state object that has been validated
* with drm_atomic_helper_check().
*
- * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
- * nonblocking commits are only safe for pure plane updates. Everything else
- * should work though.
- *
* RETURNS
* Zero for success or -errno.
*/
@@ -14517,33 +14562,42 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- if (intel_state->modeset && nonblock) {
- DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
- return -EINVAL;
- }
-
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
- INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+ drm_atomic_state_get(state);
+ i915_sw_fence_init(&intel_state->commit_ready,
+ intel_atomic_commit_ready);
- ret = intel_atomic_prepare_commit(dev, state, nonblock);
+ ret = intel_atomic_prepare_commit(dev, state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ i915_sw_fence_commit(&intel_state->commit_ready);
return ret;
}
drm_atomic_helper_swap_state(state, true);
dev_priv->wm.distrust_bios_wm = false;
- dev_priv->wm.skl_results = intel_state->wm_results;
intel_shared_dpll_commit(state);
intel_atomic_track_fbs(state);
- if (nonblock)
- queue_work(system_unbound_wq, &state->commit_work);
- else
+ if (intel_state->modeset) {
+ memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+ sizeof(intel_state->min_pixclk));
+ dev_priv->active_crtcs = intel_state->active_crtcs;
+ dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+ }
+
+ drm_atomic_state_get(state);
+ INIT_WORK(&state->commit_work,
+ nonblock ? intel_atomic_commit_work : NULL);
+
+ i915_sw_fence_commit(&intel_state->commit_ready);
+ if (!nonblock) {
+ i915_sw_fence_wait(&intel_state->commit_ready);
intel_atomic_commit_tail(state);
+ }
return 0;
}
@@ -14581,9 +14635,8 @@ retry:
goto retry;
}
- if (ret)
out:
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
/*
@@ -14656,19 +14709,21 @@ int
intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct drm_device *dev = plane->dev;
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(new_state->state);
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
- struct reservation_object *resv;
- int ret = 0;
+ int ret;
if (!obj && !old_obj)
return 0;
if (old_obj) {
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
+ drm_atomic_get_existing_crtc_state(new_state->state,
+ plane->state->crtc);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14681,52 +14736,58 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (needs_modeset(crtc_state))
- ret = i915_gem_object_wait_rendering(old_obj, true);
- if (ret) {
- /* GPU hangs should have been swallowed by the wait */
- WARN_ON(ret == -EIO);
- return ret;
+ if (needs_modeset(crtc_state)) {
+ ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ old_obj->resv, NULL,
+ false, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
}
}
+ if (new_state->fence) { /* explicit fencing */
+ ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+ new_state->fence,
+ I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
if (!obj)
return 0;
- /* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- long lret;
-
- lret = reservation_object_wait_timeout_rcu(resv, false, true,
- MAX_SCHEDULE_TIMEOUT);
- if (lret == -ERESTARTSYS)
- return lret;
+ if (!new_state->fence) { /* implicit fencing */
+ ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ obj->resv, NULL,
+ false, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- WARN(lret < 0, "waiting returns %li\n", lret);
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
}
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev)->cursor_needs_physical) {
- int align = IS_I830(dev) ? 16 * 1024 : 256;
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
- if (ret)
+ if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
+ return ret;
+ }
} else {
struct i915_vma *vma;
vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (IS_ERR(vma))
- ret = PTR_ERR(vma);
- }
-
- if (ret == 0) {
- to_intel_plane_state(new_state)->wait_req =
- i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_KMS("failed to pin object\n");
+ return PTR_ERR(vma);
+ }
}
- return ret;
+ return 0;
}
/**
@@ -14742,9 +14803,8 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct intel_plane_state *old_intel_state;
- struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -14754,11 +14814,8 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
return;
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
- !INTEL_INFO(dev)->cursor_needs_physical))
+ !INTEL_INFO(dev_priv)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
-
- i915_gem_request_assign(&intel_state->wait_req, NULL);
- i915_gem_request_assign(&old_intel_state->wait_req, NULL);
}
int
@@ -14833,30 +14890,34 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *old_intel_state =
+ struct intel_crtc_state *intel_cstate =
+ to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *old_intel_cstate =
to_intel_crtc_state(old_crtc_state);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_crtc_state->state);
bool modeset = needs_modeset(crtc->state);
- enum pipe pipe = intel_crtc->pipe;
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
if (modeset)
- return;
+ goto out;
if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
intel_color_set_csc(crtc->state);
intel_color_load_luts(crtc->state);
}
- if (to_intel_crtc_state(crtc->state)->update_pipe)
- intel_update_pipe_config(intel_crtc, old_intel_state);
- else if (INTEL_GEN(dev_priv) >= 9) {
+ if (intel_cstate->update_pipe)
+ intel_update_pipe_config(intel_crtc, old_intel_cstate);
+ else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(intel_crtc);
- I915_WRITE(PIPE_WM_LINETIME(pipe),
- dev_priv->wm.skl_hw.wm_linetime[pipe]);
- }
+out:
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.atomic_update_watermarks(old_intel_state,
+ intel_cstate);
}
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -14876,9 +14937,6 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
*/
void intel_plane_destroy(struct drm_plane *plane)
{
- if (!plane)
- return;
-
drm_plane_cleanup(plane);
kfree(to_intel_plane(plane));
}
@@ -14892,53 +14950,63 @@ const struct drm_plane_funcs intel_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
-
};
-static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
- int pipe)
+static struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
const uint32_t *intel_primary_formats;
+ unsigned int supported_rotations;
unsigned int num_formats;
int ret;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
+ if (!primary) {
+ ret = -ENOMEM;
goto fail;
+ }
state = intel_create_plane_state(&primary->base);
- if (!state)
+ if (!state) {
+ ret = -ENOMEM;
goto fail;
+ }
+
primary->base.state = &state->base;
primary->can_scale = false;
primary->max_downscale = 1;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
primary->can_scale = true;
state->scaler_id = -1;
}
primary->pipe = pipe;
- primary->plane = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+ primary->plane = (enum plane) !pipe;
+ else
+ primary->plane = (enum plane) pipe;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
- primary->plane = !pipe;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
primary->update_plane = ironlake_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
- } else if (INTEL_INFO(dev)->gen >= 4) {
+ } else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -14952,57 +15020,56 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->disable_plane = i9xx_disable_primary_plane;
}
- if (INTEL_INFO(dev)->gen >= 9)
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ if (INTEL_GEN(dev_priv) >= 9)
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
- else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"plane %c", plane_name(primary->plane));
if (ret)
goto fail;
- if (INTEL_INFO(dev)->gen >= 4)
- intel_create_rotation_property(dev, primary);
+ if (INTEL_GEN(dev_priv) >= 9) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180;
+ } else {
+ supported_rotations = DRM_ROTATE_0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&primary->base,
+ DRM_ROTATE_0,
+ supported_rotations);
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
- return &primary->base;
+ return primary;
fail:
kfree(state);
kfree(primary);
- return NULL;
-}
-
-void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
-{
- if (!dev->mode_config.rotation_property) {
- unsigned long flags = DRM_ROTATE_0 |
- DRM_ROTATE_180;
-
- if (INTEL_INFO(dev)->gen >= 9)
- flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
-
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev, flags);
- }
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&plane->base.base,
- dev->mode_config.rotation_property,
- plane->base.state->rotation);
+ return ERR_PTR(ret);
}
static int
@@ -15029,7 +15096,8 @@ intel_check_cursor_plane(struct drm_plane *plane,
return 0;
/* Check for which cursor types we support */
- if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
+ if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
+ state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
return -EINVAL;
@@ -15041,7 +15109,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
- if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_NONE) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
return -EINVAL;
}
@@ -15056,7 +15124,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
- if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
+ if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
state->base.visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
@@ -15082,13 +15150,13 @@ intel_update_cursor_plane(struct drm_plane *plane,
{
struct drm_crtc *crtc = crtc_state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
if (!obj)
addr = 0;
- else if (!INTEL_INFO(dev)->cursor_needs_physical)
+ else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
addr = i915_gem_object_ggtt_offset(obj, NULL);
else
addr = obj->phys_handle->busaddr;
@@ -15097,20 +15165,25 @@ intel_update_cursor_plane(struct drm_plane *plane,
intel_crtc_update_cursor(crtc, state);
}
-static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
- int pipe)
+static struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *cursor = NULL;
struct intel_plane_state *state = NULL;
int ret;
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
- if (!cursor)
+ if (!cursor) {
+ ret = -ENOMEM;
goto fail;
+ }
state = intel_create_plane_state(&cursor->base);
- if (!state)
+ if (!state) {
+ ret = -ENOMEM;
goto fail;
+ }
+
cursor->base.state = &state->base;
cursor->can_scale = false;
@@ -15122,8 +15195,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
- ret = drm_universal_plane_init(dev, &cursor->base, 0,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ 0, &intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR,
@@ -15131,102 +15204,106 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
if (ret)
goto fail;
- if (INTEL_INFO(dev)->gen >= 4) {
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 |
- DRM_ROTATE_180);
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&cursor->base.base,
- dev->mode_config.rotation_property,
- state->base.rotation);
- }
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&cursor->base,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180);
- if (INTEL_INFO(dev)->gen >=9)
+ if (INTEL_GEN(dev_priv) >= 9)
state->scaler_id = -1;
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
- return &cursor->base;
+ return cursor;
fail:
kfree(state);
kfree(cursor);
- return NULL;
+ return ERR_PTR(ret);
}
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
int i;
- struct intel_scaler *intel_scaler;
- struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
- for (i = 0; i < intel_crtc->num_scalers; i++) {
- intel_scaler = &scaler_state->scalers[i];
- intel_scaler->in_use = 0;
- intel_scaler->mode = PS_SCALER_MODE_DYN;
+ for (i = 0; i < crtc->num_scalers; i++) {
+ struct intel_scaler *scaler = &scaler_state->scalers[i];
+
+ scaler->in_use = 0;
+ scaler->mode = PS_SCALER_MODE_DYN;
}
scaler_state->scaler_id = -1;
}
-static void intel_crtc_init(struct drm_device *dev, int pipe)
+static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state = NULL;
- struct drm_plane *primary = NULL;
- struct drm_plane *cursor = NULL;
- int ret;
+ struct intel_plane *primary = NULL;
+ struct intel_plane *cursor = NULL;
+ int sprite, ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
- if (intel_crtc == NULL)
- return;
+ if (!intel_crtc)
+ return -ENOMEM;
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
- if (!crtc_state)
+ if (!crtc_state) {
+ ret = -ENOMEM;
goto fail;
+ }
intel_crtc->config = crtc_state;
intel_crtc->base.state = &crtc_state->base;
crtc_state->base.crtc = &intel_crtc->base;
/* initialize shared scalers */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
if (pipe == PIPE_C)
intel_crtc->num_scalers = 1;
else
intel_crtc->num_scalers = SKL_NUM_SCALERS;
- skl_init_scalers(dev, intel_crtc, crtc_state);
+ skl_init_scalers(dev_priv, intel_crtc, crtc_state);
}
- primary = intel_primary_plane_create(dev, pipe);
- if (!primary)
+ primary = intel_primary_plane_create(dev_priv, pipe);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
goto fail;
+ }
- cursor = intel_cursor_plane_create(dev, pipe);
- if (!cursor)
+ for_each_sprite(dev_priv, pipe, sprite) {
+ struct intel_plane *plane;
+
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ }
+
+ cursor = intel_cursor_plane_create(dev_priv, pipe);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
goto fail;
+ }
- ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
- cursor, &intel_crtc_funcs,
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+ &primary->base, &cursor->base,
+ &intel_crtc_funcs,
"pipe %c", pipe_name(pipe));
if (ret)
goto fail;
- /*
- * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
- * is hooked to pipe B. Hence we want plane A feeding pipe B.
- */
intel_crtc->pipe = pipe;
- intel_crtc->plane = pipe;
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
- DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
- intel_crtc->plane = !pipe;
- }
+ intel_crtc->plane = primary->plane;
intel_crtc->cursor_base = ~0;
intel_crtc->cursor_cntl = ~0;
@@ -15236,21 +15313,26 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
- dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
- dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_color_init(&intel_crtc->base);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
- return;
+
+ return 0;
fail:
- intel_plane_destroy(primary);
- intel_plane_destroy(cursor);
+ /*
+ * drm_mode_config_cleanup() will free up any
+ * crtcs/planes already initialized.
+ */
kfree(crtc_state);
kfree(intel_crtc);
+
+ return ret;
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -15300,40 +15382,37 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
return index_mask;
}
-static bool has_edp_a(struct drm_device *dev)
+static bool has_edp_a(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!IS_MOBILE(dev))
+ if (!IS_MOBILE(dev_priv))
return false;
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
}
-static bool intel_crt_present(struct drm_device *dev)
+static bool intel_crt_present(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return false;
- if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
+ if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
return false;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return false;
- if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ if (HAS_PCH_LPT_H(dev_priv) &&
+ I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
if (!dev_priv->vbt.int_crt_support)
@@ -15393,10 +15472,10 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
intel_lvds_init(dev);
- if (intel_crt_present(dev))
+ if (intel_crt_present(dev_priv))
intel_crt_init(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -15407,7 +15486,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_C);
intel_dsi_init(dev);
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
int found;
/*
@@ -15417,7 +15496,7 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
- if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -15433,17 +15512,17 @@ static void intel_setup_outputs(struct drm_device *dev)
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
- if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
intel_ddi_init(dev, PORT_E);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
- dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
+ dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
- if (has_edp_a(dev))
+ if (has_edp_a(dev_priv))
intel_dp_init(dev, DP_A, PORT_A);
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
@@ -15466,7 +15545,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
/*
@@ -15484,21 +15563,21 @@ static void intel_setup_outputs(struct drm_device *dev)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
/*
* eDP not supported on port D,
* so no need to worry about it
@@ -15511,18 +15590,18 @@ static void intel_setup_outputs(struct drm_device *dev)
}
intel_dsi_init(dev);
- } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
+ } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
- if (!found && IS_G4X(dev)) {
+ if (!found && IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
- if (!found && IS_G4X(dev))
+ if (!found && IS_G4X(dev_priv))
intel_dp_init(dev, DP_B, PORT_B);
}
@@ -15535,21 +15614,20 @@ static void intel_setup_outputs(struct drm_device *dev)
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
intel_dp_init(dev, DP_C, PORT_C);
}
- if (IS_G4X(dev) &&
- (I915_READ(DP_D) & DP_DETECTED))
+ if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D, PORT_D);
- } else if (IS_GEN2(dev))
+ } else if (IS_GEN2(dev_priv))
intel_dvo_init(dev);
- if (SUPPORTS_TV(dev))
+ if (SUPPORTS_TV(dev_priv))
intel_tv_init(dev);
intel_psr_init(dev);
@@ -15604,6 +15682,8 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
+ if (obj->pin_display && obj->cache_dirty)
+ i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
mutex_unlock(&dev->struct_mutex);
@@ -15617,10 +15697,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
};
static
-u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
- uint32_t pixel_format)
+u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, uint32_t pixel_format)
{
- u32 gen = INTEL_INFO(dev)->gen;
+ u32 gen = INTEL_INFO(dev_priv)->gen;
if (gen >= 9) {
int cpp = drm_format_plane_cpp(pixel_format, 0);
@@ -15629,7 +15709,8 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
* pixels and 32K bytes."
*/
return min(8192 * cpp, 32768);
- } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv)) {
return 32*1024;
} else if (gen >= 4) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
@@ -15656,7 +15737,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
unsigned int tiling = i915_gem_object_get_tiling(obj);
int ret;
u32 pitch_limit, stride_alignment;
- char *format_name;
+ struct drm_format_name_buf format_name;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -15683,7 +15764,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
switch (mode_cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
- if (INTEL_INFO(dev)->gen < 9) {
+ if (INTEL_GEN(dev_priv) < 9) {
DRM_DEBUG("Unsupported tiling 0x%llx!\n",
mode_cmd->modifier[0]);
return -EINVAL;
@@ -15716,7 +15797,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
}
- pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
+ pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
@@ -15746,37 +15827,33 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB8888:
break;
case DRM_FORMAT_XRGB1555:
- if (INTEL_INFO(dev)->gen > 3) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (INTEL_GEN(dev_priv) > 3) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR8888:
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
- INTEL_INFO(dev)->gen < 9) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ INTEL_GEN(dev_priv) < 9) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
- if (INTEL_INFO(dev)->gen < 4) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (INTEL_GEN(dev_priv) < 4) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR2101010:
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
@@ -15784,17 +15861,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
- if (INTEL_INFO(dev)->gen < 5) {
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ if (INTEL_GEN(dev_priv) < 5) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
break;
default:
- format_name = drm_get_format_name(mode_cmd->pixel_format);
- DRM_DEBUG("unsupported pixel format: %s\n", format_name);
- kfree(format_name);
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
@@ -15835,17 +15910,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return fb;
}
-#ifndef CONFIG_DRM_FBDEV_EMULATION
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-#endif
-
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
@@ -16221,12 +16290,11 @@ static void intel_init_quirks(struct drm_device *dev)
}
/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_device *dev)
+static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
u8 sr1;
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+ i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
@@ -16244,11 +16312,11 @@ void intel_modeset_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
- intel_init_clock_gating(dev);
+ intel_init_clock_gating(dev_priv);
}
/*
@@ -16265,6 +16333,7 @@ static void sanitize_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct drm_modeset_acquire_ctx ctx;
@@ -16293,12 +16362,14 @@ retry:
if (WARN_ON(IS_ERR(state)))
goto fail;
+ intel_state = to_intel_atomic_state(state);
+
/*
* Hardware readout is the only time we don't want to calculate
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
+ intel_state->skip_intermediate_wm = true;
ret = intel_atomic_check(dev, state);
if (ret) {
@@ -16314,7 +16385,7 @@ retry:
* BIOS-programmed watermarks untouched and hope for the best.
*/
WARN(true, "Could not determine valid watermarks for inherited state\n");
- goto fail;
+ goto put_state;
}
/* Write calculated watermark values back */
@@ -16322,20 +16393,20 @@ retry:
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
cs->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(cs);
+ dev_priv->display.optimize_watermarks(intel_state, cs);
}
- drm_atomic_state_free(state);
+put_state:
+ drm_atomic_state_put(state);
fail:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
-void intel_modeset_init(struct drm_device *dev)
+int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int sprite, ret;
enum pipe pipe;
struct intel_crtc *crtc;
@@ -16353,10 +16424,10 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_quirks(dev);
- intel_init_pm(dev);
+ intel_init_pm(dev_priv);
- if (INTEL_INFO(dev)->num_pipes == 0)
- return;
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ return 0;
/*
* There may be no VBT; and if the BIOS enabled SSC we can
@@ -16364,7 +16435,7 @@ void intel_modeset_init(struct drm_device *dev)
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE);
@@ -16376,10 +16447,10 @@ void intel_modeset_init(struct drm_device *dev)
}
}
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
- } else if (IS_GEN3(dev)) {
+ } else if (IS_GEN3(dev_priv)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
@@ -16387,10 +16458,10 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 8192;
}
- if (IS_845G(dev) || IS_I865G(dev)) {
- dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
+ dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
} else {
@@ -16401,29 +16472,30 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.fb_base = ggtt->mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_INFO(dev)->num_pipes,
- INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
+ INTEL_INFO(dev_priv)->num_pipes,
+ INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
for_each_pipe(dev_priv, pipe) {
- intel_crtc_init(dev, pipe);
- for_each_sprite(dev_priv, pipe, sprite) {
- ret = intel_plane_init(dev, pipe, sprite);
- if (ret)
- DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
- pipe_name(pipe), sprite_name(pipe, sprite), ret);
+ int ret;
+
+ ret = intel_crtc_init(dev_priv, pipe);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
}
}
intel_update_czclk(dev_priv);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
+ dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
intel_shared_dpll_init(dev);
if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev);
+ intel_update_max_cdclk(dev_priv);
/* Just disable it once at startup */
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
intel_setup_outputs(dev);
drm_modeset_lock_all(dev);
@@ -16459,6 +16531,8 @@ void intel_modeset_init(struct drm_device *dev)
* since the watermark calculation done here will use pstate->fb.
*/
sanitize_watermarks(dev);
+
+ return 0;
}
static void intel_enable_pipe_a(struct drm_device *dev)
@@ -16488,11 +16562,10 @@ static void intel_enable_pipe_a(struct drm_device *dev)
static bool
intel_check_plane_mapping(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val;
- if (INTEL_INFO(dev)->num_pipes == 1)
+ if (INTEL_INFO(dev_priv)->num_pipes == 1)
return true;
val = I915_READ(DSPCNTR(!crtc->plane));
@@ -16566,7 +16639,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
- if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+ if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
bool plane;
DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
@@ -16596,7 +16669,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base);
- if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
+ if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -16668,21 +16741,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-void i915_redisable_vga_power_on(struct drm_device *dev)
+void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+ i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
}
}
-void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
@@ -16693,7 +16763,7 @@ void i915_redisable_vga(struct drm_device *dev)
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
- i915_redisable_vga_power_on(dev);
+ i915_redisable_vga_power_on(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
@@ -16765,7 +16835,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- crtc->active ? "enabled" : "disabled");
+ enableddisabled(crtc->active));
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
@@ -16788,7 +16858,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
encoder->base.crtc = &crtc->base;
crtc->config->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc->config);
@@ -16797,9 +16868,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id,
- encoder->base.name,
- encoder->base.crtc ? "enabled" : "disabled",
+ encoder->base.base.id, encoder->base.name,
+ enableddisabled(encoder->base.crtc),
pipe_name(pipe));
}
@@ -16828,9 +16898,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.encoder = NULL;
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id,
- connector->base.name,
- connector->base.encoder ? "enabled" : "disabled");
+ connector->base.base.id, connector->base.name,
+ enableddisabled(connector->base.encoder));
}
for_each_intel_crtc(dev, crtc) {
@@ -16889,7 +16958,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
}
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
intel_sanitize_crtc(crtc);
intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]");
@@ -16909,11 +16979,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
pll->on = false;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_wm_get_hw_state(dev);
- else if (IS_GEN9(dev))
+ else if (IS_GEN9(dev_priv))
skl_wm_get_hw_state(dev);
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
ilk_wm_get_hw_state(dev);
for_each_intel_crtc(dev, crtc) {
@@ -16963,10 +17033,9 @@ void intel_display_resume(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
- if (ret) {
+ if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_free(state);
- }
+ drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -17078,10 +17147,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
/*
* set vga decode state - true == enable VGA decode
*/
-int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
@@ -17105,6 +17173,8 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
struct intel_display_error_state {
u32 power_well_driver;
@@ -17233,17 +17303,16 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
void
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
if (!error)
return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(dev_priv, i) {
@@ -17256,13 +17325,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
- if (INTEL_INFO(dev)->gen <= 3) {
+ if (INTEL_GEN(dev_priv) <= 3) {
err_printf(m, " SIZE: %08x\n", error->plane[i].size);
err_printf(m, " POS: %08x\n", error->plane[i].pos);
}
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
err_printf(m, " SURF: %08x\n", error->plane[i].surface);
err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
@@ -17287,3 +17356,5 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 14a3cf0b7213..90283edcafba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -213,6 +213,81 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
return max_dotclk;
}
+static int
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+{
+ if (intel_dp->num_sink_rates) {
+ *sink_rates = intel_dp->sink_rates;
+ return intel_dp->num_sink_rates;
+ }
+
+ *sink_rates = default_rates;
+
+ return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+}
+
+static int
+intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ int size;
+
+ if (IS_BROXTON(dev_priv)) {
+ *source_rates = bxt_rates;
+ size = ARRAY_SIZE(bxt_rates);
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ *source_rates = skl_rates;
+ size = ARRAY_SIZE(skl_rates);
+ } else {
+ *source_rates = default_rates;
+ size = ARRAY_SIZE(default_rates);
+ }
+
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (!intel_dp_source_supports_hbr2(intel_dp))
+ size--;
+
+ return size;
+}
+
+static int intersect_rates(const int *source_rates, int source_len,
+ const int *sink_rates, int sink_len,
+ int *common_rates)
+{
+ int i = 0, j = 0, k = 0;
+
+ while (i < source_len && j < sink_len) {
+ if (source_rates[i] == sink_rates[j]) {
+ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+ return k;
+ common_rates[k] = source_rates[i];
+ ++k;
+ ++i;
+ ++j;
+ } else if (source_rates[i] < sink_rates[j]) {
+ ++i;
+ } else {
+ ++j;
+ }
+ }
+ return k;
+}
+
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+ int *common_rates)
+{
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len;
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ source_len = intel_dp_source_rates(intel_dp, &source_rates);
+
+ return intersect_rates(source_rates, source_len,
+ sink_rates, sink_len,
+ common_rates);
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -320,8 +395,7 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -344,7 +418,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
DP |= DP_PIPE_SELECT_CHV(pipe);
else if (pipe == PIPE_B)
DP |= DP_PIPEB_SELECT;
@@ -356,10 +430,10 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* So enable temporarily it if it's not already enabled.
*/
if (!pll_enabled) {
- release_cl_override = IS_CHERRYVIEW(dev) &&
+ release_cl_override = IS_CHERRYVIEW(dev_priv) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
- if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+ if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
@@ -383,7 +457,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
if (!pll_enabled) {
- vlv_force_pll_off(dev, pipe);
+ vlv_force_pll_off(dev_priv, pipe);
if (release_cl_override)
chv_phy_powergate_ch(dev_priv, phy, ch, false);
@@ -570,8 +644,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
- !IS_BROXTON(dev)))
+ if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv)))
return;
/*
@@ -591,7 +665,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
intel_dp->pps_reset = true;
else
intel_dp->pps_pipe = INVALID_PIPE;
@@ -664,7 +738,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
@@ -692,7 +766,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@@ -706,7 +780,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@@ -821,15 +895,16 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
uint32_t aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
uint32_t precharge, timeout;
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
precharge = 3;
else
precharge = 5;
- if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
+ if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -867,14 +942,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
int try, clock = 0;
- bool has_aux_irq = HAS_AUX_IRQ(dev);
+ bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
pps_lock(intel_dp);
@@ -1108,8 +1183,46 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
+static enum port intel_aux_port(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port aux_port;
+
+ if (!info->alternate_aux_channel) {
+ DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+ port_name(port), port_name(port));
+ return port;
+ }
+
+ switch (info->alternate_aux_channel) {
+ case DP_AUX_A:
+ aux_port = PORT_A;
+ break;
+ case DP_AUX_B:
+ aux_port = PORT_B;
+ break;
+ case DP_AUX_C:
+ aux_port = PORT_C;
+ break;
+ case DP_AUX_D:
+ aux_port = PORT_D;
+ break;
+ default:
+ MISSING_CASE(info->alternate_aux_channel);
+ aux_port = PORT_A;
+ break;
+ }
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+ port_name(aux_port), port_name(port));
+
+ return aux_port;
+}
+
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_B:
@@ -1123,7 +1236,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_B:
@@ -1137,7 +1250,7 @@ static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_A:
@@ -1153,7 +1266,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_A:
@@ -1168,36 +1281,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
}
}
-/*
- * On SKL we don't have Aux for port E so we rely
- * on VBT to set a proper alternate aux channel.
- */
-static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
-{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[PORT_E];
-
- switch (info->alternate_aux_channel) {
- case DP_AUX_A:
- return PORT_A;
- case DP_AUX_B:
- return PORT_B;
- case DP_AUX_C:
- return PORT_C;
- case DP_AUX_D:
- return PORT_D;
- default:
- MISSING_CASE(info->alternate_aux_channel);
- return PORT_A;
- }
-}
-
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
- if (port == PORT_E)
- port = skl_porte_aux_port(dev_priv);
-
switch (port) {
case PORT_A:
case PORT_B:
@@ -1211,11 +1297,8 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
- if (port == PORT_E)
- port = skl_porte_aux_port(dev_priv);
-
switch (port) {
case PORT_A:
case PORT_B:
@@ -1229,7 +1312,7 @@ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_ctl_reg(dev_priv, port);
@@ -1240,7 +1323,7 @@ static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_data_reg(dev_priv, port, index);
@@ -1253,7 +1336,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
static void intel_aux_reg_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = intel_aux_port(dev_priv,
+ dp_to_dig_port(intel_dp)->port);
int i;
intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
@@ -1281,78 +1365,37 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
-static int
-intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
-{
- if (intel_dp->num_sink_rates) {
- *sink_rates = intel_dp->sink_rates;
- return intel_dp->num_sink_rates;
- }
-
- *sink_rates = default_rates;
-
- return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
-}
-
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
-
- /* WaDisableHBR2:skl */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
- return false;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
- (INTEL_INFO(dev)->gen >= 9))
+ if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+ IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
return true;
else
return false;
}
-static int
-intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- int size;
-
- if (IS_BROXTON(dev)) {
- *source_rates = bxt_rates;
- size = ARRAY_SIZE(bxt_rates);
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
- *source_rates = skl_rates;
- size = ARRAY_SIZE(skl_rates);
- } else {
- *source_rates = default_rates;
- size = ARRAY_SIZE(default_rates);
- }
-
- /* This depends on the fact that 5.4 is last value in the array */
- if (!intel_dp_source_supports_hbr2(intel_dp))
- size--;
-
- return size;
-}
-
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct dp_link_dpll *divisor = NULL;
int i, count = 0;
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
divisor = chv_dpll;
count = ARRAY_SIZE(chv_dpll);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
}
@@ -1368,43 +1411,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
-static int intersect_rates(const int *source_rates, int source_len,
- const int *sink_rates, int sink_len,
- int *common_rates)
-{
- int i = 0, j = 0, k = 0;
-
- while (i < source_len && j < sink_len) {
- if (source_rates[i] == sink_rates[j]) {
- if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
- return k;
- common_rates[k] = source_rates[i];
- ++k;
- ++i;
- ++j;
- } else if (source_rates[i] < sink_rates[j]) {
- ++i;
- } else {
- ++j;
- }
- }
- return k;
-}
-
-static int intel_dp_common_rates(struct intel_dp *intel_dp,
- int *common_rates)
-{
- const int *source_rates, *sink_rates;
- int source_len, sink_len;
-
- sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
- source_len = intel_dp_source_rates(intel_dp, &source_rates);
-
- return intersect_rates(source_rates, source_len,
- sink_rates, sink_len,
- common_rates);
-}
-
static void snprintf_int_array(char *str, size_t len,
const int *array, int nelem)
{
@@ -1444,42 +1450,35 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("common rates: %s\n", str);
}
-static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+bool
+__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
{
- uint8_t rev;
- int len;
-
- if ((drm_debug & DRM_UT_KMS) == 0)
- return;
-
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
- return;
-
- len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
- if (len < 0)
- return;
+ u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
+ DP_SINK_OUI;
- DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+ return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
+ sizeof(*desc);
}
-static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+bool intel_dp_read_desc(struct intel_dp *intel_dp)
{
- uint8_t rev[2];
- int len;
+ struct intel_dp_desc *desc = &intel_dp->desc;
+ bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
+ DP_OUI_SUPPORT;
+ int dev_id_len;
- if ((drm_debug & DRM_UT_KMS) == 0)
- return;
-
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
- return;
+ if (!__intel_dp_read_desc(intel_dp, desc))
+ return false;
- len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
- if (len < 0)
- return;
+ dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
+ DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
+ drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
+ (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
+ dev_id_len, desc->device_id,
+ desc->hw_rev >> 4, desc->hw_rev & 0xf,
+ desc->sw_major_rev, desc->sw_minor_rev);
- DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+ return true;
}
static int rate_to_index(int find, const int *rates)
@@ -1543,8 +1542,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1569,7 +1567,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
max_clock = common_len - 1;
- if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
pipe_config->has_drrs = false;
@@ -1579,14 +1577,14 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
int ret;
ret = skl_update_scaler_crtc(pipe_config);
if (ret)
return ret;
}
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
@@ -1711,7 +1709,7 @@ found:
to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
}
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
return true;
@@ -1769,7 +1767,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_GEN7(dev) && port == PORT_A) {
+ if (IS_GEN7(dev_priv) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1780,7 +1778,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= crtc->pipe << 29;
- } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
@@ -1792,8 +1790,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
- if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
+ if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1805,7 +1802,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
else if (crtc->pipe == PIPE_B)
intel_dp->DP |= DP_PIPEB_SELECT;
@@ -2114,7 +2111,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2122,7 +2119,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
}
pp |= PANEL_POWER_ON;
- if (!IS_GEN5(dev))
+ if (!IS_GEN5(dev_priv))
pp |= PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2131,7 +2128,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -2363,7 +2360,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
* 2. Program DP PLL enable
*/
if (IS_GEN5(dev_priv))
- intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
+ intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@@ -2444,9 +2441,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & DP_PORT_EN))
goto out;
- if (IS_GEN7(dev) && port == PORT_A) {
+ if (IS_GEN7(dev_priv) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
- } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
enum pipe p;
for_each_pipe(dev_priv, p) {
@@ -2461,7 +2458,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
i915_mmio_reg_offset(intel_dp->output_reg));
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else {
*pipe = PORT_TO_PIPE(tmp);
@@ -2489,7 +2486,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
@@ -2515,8 +2512,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
+ if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->lane_count =
@@ -2636,7 +2632,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
dp_train_pat & DP_TRAINING_PATTERN_MASK);
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@@ -2662,8 +2658,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
I915_WRITE(DP_TP_CTL(port), temp);
- } else if ((IS_GEN7(dev) && port == PORT_A) ||
- (HAS_PCH_CPT(dev) && port != PORT_A)) {
+ } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -2683,7 +2679,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
} else {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
*DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
*DP &= ~DP_LINK_TRAIN_MASK;
@@ -2699,7 +2695,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
*DP |= DP_LINK_TRAIN_PAT_3_CHV;
} else {
DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
@@ -2735,7 +2731,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
}
static void intel_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2749,7 +2746,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_init_panel_power_sequencer(intel_dp);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -2760,10 +2757,10 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_unlock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
unsigned int lane_mask = 0x0;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
@@ -2777,7 +2774,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
if (pipe_config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
}
@@ -2787,7 +2784,7 @@ static void g4x_enable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
intel_edp_backlight_on(intel_dp);
}
@@ -2924,7 +2921,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder,
{
vlv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
@@ -2942,7 +2939,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder,
{
chv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -2979,21 +2976,20 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (INTEL_INFO(dev)->gen >= 9) {
+ else if (INTEL_GEN(dev_priv) >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_GEN7(dev) && port == PORT_A)
+ else if (IS_GEN7(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- else if (HAS_PCH_CPT(dev) && port != PORT_A)
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -3002,10 +2998,10 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3018,7 +3014,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3030,7 +3026,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3042,7 +3038,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_GEN7(dev) && port == PORT_A) {
+ } else if (IS_GEN7(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3343,21 +3339,21 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
signal_levels = chv_signal_levels(intel_dp);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_GEN7(dev) && port == PORT_A) {
+ } else if (IS_GEN7(dev_priv) && port == PORT_A) {
signal_levels = gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
- } else if (IS_GEN6(dev) && port == PORT_A) {
+ } else if (IS_GEN6(dev_priv) && port == PORT_A) {
signal_levels = gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
@@ -3402,7 +3398,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
enum port port = intel_dig_port->port;
uint32_t val;
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return;
val = I915_READ(DP_TP_CTL(port));
@@ -3437,7 +3433,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t DP = intel_dp->DP;
- if (WARN_ON(HAS_DDI(dev)))
+ if (WARN_ON(HAS_DDI(dev_priv)))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -3445,12 +3441,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("\n");
- if ((IS_GEN7(dev) && port == PORT_A) ||
- (HAS_PCH_CPT(dev) && port != PORT_A)) {
+ if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -3468,7 +3464,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -3486,7 +3482,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -3496,7 +3492,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
-static bool
+bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
@@ -3520,6 +3516,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
+ intel_dp_read_desc(intel_dp);
+
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
@@ -3551,8 +3549,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
/* Read the eDP Display control capabilities registers */
if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
- intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) ==
- sizeof(intel_dp->edp_dpcd)))
+ intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ sizeof(intel_dp->edp_dpcd))
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
@@ -3607,8 +3605,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (!is_edp(intel_dp) && !intel_dp->sink_count)
return false;
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(intel_dp->dpcd))
return true; /* native DP sink */
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
@@ -3622,23 +3619,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return true;
}
-static void
-intel_dp_probe_oui(struct intel_dp *intel_dp)
-{
- u8 buf[3];
-
- if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
- return;
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
- DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
- DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
-}
-
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
@@ -3682,7 +3662,7 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret = 0;
@@ -3703,7 +3683,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
}
do {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@@ -3726,7 +3706,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret;
@@ -3754,14 +3734,14 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
return -EIO;
}
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
return 0;
}
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int count, ret;
@@ -3772,7 +3752,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
return ret;
do {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@@ -3989,6 +3969,31 @@ go_again:
}
static void
+intel_dp_retrain_link(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ /* Suppress underruns caused by re-training */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ if (crtc->config->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), false);
+
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+
+ /* Keep underrun reporting disabled until things are stable */
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (crtc->config->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), true);
+}
+
+static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
@@ -4008,13 +4013,18 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
+ /* FIXME: we need to synchronize this sort of stuff with hardware
+ * readout */
+ if (WARN_ON_ONCE(!intel_dp->lane_count))
+ return;
+
/* if link training is requested we should perform it always */
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
+
+ intel_dp_retrain_link(intel_dp);
}
}
@@ -4096,7 +4106,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_connected;
/* if there's no downstream port, we're done */
- if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(dpcd))
return connector_status_connected;
/* If we're HPD-aware, SINK_COUNT changes dynamically */
@@ -4387,10 +4397,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp_print_rates(intel_dp);
- intel_dp_probe_oui(intel_dp);
-
- intel_dp_print_hw_revision(intel_dp);
- intel_dp_print_sw_revision(intel_dp);
+ intel_dp_read_desc(intel_dp);
intel_dp_configure_mst(intel_dp);
@@ -4454,21 +4461,11 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum drm_connector_status status = connector->status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (intel_dp->is_mst) {
- /* MST devices are disconnected from a monitor POV */
- intel_dp_unset_edid(intel_dp);
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
- return connector_status_disconnected;
- }
-
/* If full detect is not performed yet, do a full detect */
if (!intel_dp->detect_done)
status = intel_dp_long_pulse(intel_dp->attached_connector);
@@ -4756,11 +4753,16 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
+ if (IS_GEN9(dev_priv) && lspcon->active)
+ lspcon_resume(lspcon);
+
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
@@ -4867,15 +4869,13 @@ put_power:
}
/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_edp(struct drm_device *dev, enum port port)
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return false;
if (port == PORT_A)
@@ -5074,7 +5074,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@@ -5087,9 +5087,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
if (port == PORT_A)
port_sel = PANEL_PORT_SELECT_DPA;
else
@@ -5100,7 +5100,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
@@ -5108,7 +5108,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- IS_BROXTON(dev) ?
+ IS_BROXTON(dev_priv) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}
@@ -5116,7 +5116,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
static void intel_dp_pps_init(struct drm_device *dev,
struct intel_dp *intel_dp)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_initial_power_sequencer_setup(intel_dp);
} else {
intel_dp_init_panel_power_sequencer(dev, intel_dp);
@@ -5475,7 +5477,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
mutex_init(&dev_priv->drrs.mutex);
- if (INTEL_INFO(dev)->gen <= 6) {
+ if (INTEL_GEN(dev_priv) <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
}
@@ -5586,7 +5588,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_unlock(&dev->mode_config.mutex);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_dp->edp_notifier.notifier_call = edp_notify_handler;
register_reboot_notifier(&intel_dp->edp_notifier);
@@ -5595,7 +5597,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* If the current pipe isn't valid, try the PPS pipe, and if that
* fails just assume pipe A.
*/
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
else
pipe = PORT_TO_PIPE(intel_dp->DP);
@@ -5649,28 +5651,28 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_edp(dev, port))
+ if (intel_dp_is_edp(dev_priv, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -5684,7 +5686,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
is_edp(intel_dp) && port != PORT_B && port != PORT_C))
return false;
@@ -5705,7 +5707,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder);
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -5717,7 +5719,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
@@ -5734,7 +5736,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
+ if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -5751,7 +5753,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev) && !IS_GM45(dev)) {
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -5794,13 +5796,13 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
@@ -5808,7 +5810,7 @@ bool intel_dp_init(struct drm_device *dev,
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
intel_encoder->post_disable = ilk_post_disable_dp;
}
@@ -5817,7 +5819,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@@ -5826,6 +5828,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
}
intel_encoder->cloneable = 0;
+ intel_encoder->port = port;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hotplug.irq_port[port] = intel_dig_port;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index c438b02184cb..0048b520baf7 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -225,9 +225,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
- *
- * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
- * supported in source but still not enabled.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 54a9d7610d8f..b029d1026a28 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -43,7 +43,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
- pipe_config->dp_encoder_is_mst = true;
pipe_config->has_pch_encoder = false;
bpp = 24;
/*
@@ -523,6 +522,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->port = intel_dig_port->port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 047f48748944..7a8e82dabbf2 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -23,6 +23,565 @@
#include "intel_drv.h"
+/**
+ * DOC: DPIO
+ *
+ * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally on VLV/CHV the common lane corresponds to the pipe and
+ * the spline (PCS/TX) corresponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ * pipe A == CMN/PLL/REF CH0
+ *
+ * pipe B == CMN/PLL/REF CH1
+ *
+ * port B == PCS/TX CH0
+ *
+ * port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ * pipe C == CMN/PLL/REF CH0
+ *
+ * port D == PCS/TX CH0
+ *
+ * On BXT the entire PHY channel corresponds to the port. That means
+ * the PLL is also now associated with the port rather than the pipe,
+ * and so the clock needs to be routed to the appropriate transcoder.
+ * Port A PLL is directly connected to transcoder EDP and port B/C
+ * PLLs can be routed to any transcoder A/B/C.
+ *
+ * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+ * digital port D (CHV) or port A (BXT). ::
+ *
+ *
+ * Dual channel PHY (VLV/CHV/BXT)
+ * ---------------------------------
+ * | CH0 | CH1 |
+ * | CMN/PLL/REF | CMN/PLL/REF |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * | DDI0 | DDI1 | DP/HDMI ports
+ * ---------------------------------
+ *
+ * Single channel PHY (CHV/BXT)
+ * -----------------
+ * | CH0 |
+ * | CMN/PLL/REF |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * | DDI2 | DP/HDMI port
+ * -----------------
+ */
+
+/**
+ * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ */
+struct bxt_ddi_phy_info {
+ /**
+ * @dual_channel: true if this phy has a second channel.
+ */
+ bool dual_channel;
+
+ /**
+ * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
+ * Otherwise the GRC value will be copied from the phy indicated by
+ * this field.
+ */
+ enum dpio_phy rcomp_phy;
+
+ /**
+ * @channel: struct containing per channel information.
+ */
+ struct {
+ /**
+ * @port: which port maps to this channel.
+ */
+ enum port port;
+ } channel[2];
+};
+
+static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+ [DPIO_PHY0] = {
+ .dual_channel = true,
+ .rcomp_phy = DPIO_PHY1,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_B },
+ [DPIO_CH1] = { .port = PORT_C },
+ }
+ },
+ [DPIO_PHY1] = {
+ .dual_channel = false,
+ .rcomp_phy = -1,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_A },
+ }
+ },
+};
+
+static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
+{
+ return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
+ BIT(phy_info->channel[DPIO_CH0].port);
+}
+
+void bxt_port_to_phy_channel(enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch)
+{
+ const struct bxt_ddi_phy_info *phy_info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
+ phy_info = &bxt_ddi_phy_info[i];
+
+ if (port == phy_info->channel[DPIO_CH0].port) {
+ *phy = i;
+ *ch = DPIO_CH0;
+ return;
+ }
+
+ if (phy_info->dual_channel &&
+ port == phy_info->channel[DPIO_CH1].port) {
+ *phy = i;
+ *ch = DPIO_CH1;
+ return;
+ }
+ }
+
+ WARN(1, "PHY not found for PORT %c", port_name(port));
+ *phy = DPIO_PHY0;
+ *ch = DPIO_CH0;
+}
+
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis)
+{
+ u32 val;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+ val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
+ val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+ val &= ~SCALE_DCOMP_METHOD;
+ if (enable)
+ val |= SCALE_DCOMP_METHOD;
+
+ if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+ DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+
+ I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+ val &= ~DE_EMPHASIS;
+ val |= deemphasis << DEEMPH_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+}
+
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ enum port port;
+
+ if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
+ return false;
+
+ if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+ phy);
+
+ return false;
+ }
+
+ if (phy_info->rcomp_phy == -1 &&
+ !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
+ phy);
+
+ return false;
+ }
+
+ if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+ phy);
+
+ return false;
+ }
+
+ for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
+ u32 tmp = I915_READ(BXT_PHY_CTL(port));
+
+ if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
+ "for port %c powered down "
+ "(PHY_CTL %08x)\n",
+ phy, port_name(port), tmp);
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+
+ return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+}
+
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ if (intel_wait_for_register(dev_priv,
+ BXT_PORT_REF_DW3(phy),
+ GRC_DONE, GRC_DONE,
+ 10))
+ DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+}
+
+static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ u32 val;
+
+ if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+ /* Still read out the GRC value for state verification */
+ if (phy_info->rcomp_phy != -1)
+ dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+
+ if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
+
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
+ }
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val |= GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+
+ /*
+ * The PHY registers start out inaccessible and respond to reads with
+ * all 1s. Eventually they become accessible as they power up, then
+ * the reserved bit will give the default 0. Poll on the reserved bit
+ * becoming 0 to find when the PHY is accessible.
+ * HW team confirmed that the time to reach phypowergood status is
+ * anywhere between 50 us and 100us.
+ */
+ if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
+ DRM_ERROR("timeout during PHY%d power on\n", phy);
+ }
+
+ /* Program PLL Rcomp code offset */
+ val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+ val &= ~IREF0RC_OFFSET_MASK;
+ val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+
+ val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+ val &= ~IREF1RC_OFFSET_MASK;
+ val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+
+ /* Program power gating */
+ val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+ val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
+ SUS_CLK_CONFIG;
+ I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+
+ if (phy_info->dual_channel) {
+ val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+ val |= DW6_OLDO_DYN_PWR_DOWN_EN;
+ I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+ }
+
+ if (phy_info->rcomp_phy != -1) {
+ uint32_t grc_code;
+ /*
+ * PHY0 isn't connected to an RCOMP resistor so copy over
+ * the corresponding calibrated value from PHY1, and disable
+ * the automatic calibration on PHY0.
+ */
+ val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
+ phy_info->rcomp_phy);
+ grc_code = val << GRC_CODE_FAST_SHIFT |
+ val << GRC_CODE_SLOW_SHIFT |
+ val;
+ I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+
+ val = I915_READ(BXT_PORT_REF_DW8(phy));
+ val |= GRC_DIS | GRC_RDY_OVRD;
+ I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+ }
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val |= COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ if (phy_info->rcomp_phy == -1)
+ bxt_phy_wait_grc_done(dev_priv, phy);
+
+}
+
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ uint32_t val;
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val &= ~COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val &= ~GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+}
+
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
+ bool was_enabled;
+
+ lockdep_assert_held(&dev_priv->power_domains.lock);
+
+ if (rcomp_phy != -1) {
+ was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+
+ /*
+ * We need to copy the GRC calibration value from rcomp_phy,
+ * so make sure it's powered up.
+ */
+ if (!was_enabled)
+ _bxt_ddi_phy_init(dev_priv, rcomp_phy);
+ }
+
+ _bxt_ddi_phy_init(dev_priv, phy);
+
+ if (rcomp_phy != -1 && !was_enabled)
+ bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
+}
+
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ i915_reg_t reg, u32 mask, u32 expected,
+ const char *reg_fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ u32 val;
+
+ val = I915_READ(reg);
+ if ((val & mask) == expected)
+ return true;
+
+ va_start(args, reg_fmt);
+ vaf.fmt = reg_fmt;
+ vaf.va = &args;
+
+ DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+ "current %08x, expected %08x (mask %08x)\n",
+ phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+ mask);
+
+ va_end(args);
+
+ return false;
+}
+
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ uint32_t mask;
+ bool ok;
+
+#define _CHK(reg, mask, exp, fmt, ...) \
+ __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
+ ## __VA_ARGS__)
+
+ if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+ return false;
+
+ ok = true;
+
+ /* PLL Rcomp code offset */
+ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
+ ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
+
+ /* Power gating */
+ mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+ ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
+
+ if (phy_info->dual_channel)
+ ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
+ DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+ "BXT_PORT_CL2CM_DW6(%d)", phy);
+
+ if (phy_info->rcomp_phy != -1) {
+ u32 grc_code = dev_priv->bxt_phy_grc;
+
+ grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+ grc_code << GRC_CODE_SLOW_SHIFT |
+ grc_code;
+ mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+ GRC_CODE_NOM_MASK;
+ ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
+ "BXT_PORT_REF_DW6(%d)", phy);
+
+ mask = GRC_DIS | GRC_RDY_OVRD;
+ ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
+ "BXT_PORT_REF_DW8(%d)", phy);
+ }
+
+ return ok;
+#undef _CHK
+}
+
+uint8_t
+bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_count)
+{
+ switch (lane_count) {
+ case 1:
+ return 0;
+ case 2:
+ return BIT(2) | BIT(0);
+ case 4:
+ return BIT(3) | BIT(2) | BIT(0);
+ default:
+ MISSING_CASE(lane_count);
+
+ return 0;
+ }
+}
+
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_lat_optim_mask)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ /*
+ * Note that on CHV this flag is called UPAR, but has
+ * the same function.
+ */
+ val &= ~LATENCY_OPTIM;
+ if (lane_lat_optim_mask & BIT(lane))
+ val |= LATENCY_OPTIM;
+
+ I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+ }
+}
+
+uint8_t
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+ uint8_t mask;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ mask = 0;
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ if (val & LATENCY_OPTIM)
+ mask |= BIT(lane);
+ }
+
+ return mask;
+}
+
+
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 1c59ca50c430..58a756f2f224 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -188,13 +188,12 @@ out:
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
/* PCH only available on ILK+ */
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return;
if (pll == NULL)
@@ -1371,6 +1370,10 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
{
uint32_t temp;
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
/* Non-SSC reference */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1378,72 +1381,72 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
/* Disable 10 bit clock */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Write P1 & P2 */
- temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
temp |= pll->config.hw_state.ebb0;
- I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
- temp = I915_READ(BXT_PORT_PLL(port, 0));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
temp |= pll->config.hw_state.pll0;
- I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
- temp = I915_READ(BXT_PORT_PLL(port, 1));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
temp |= pll->config.hw_state.pll1;
- I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
- temp = I915_READ(BXT_PORT_PLL(port, 2));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
temp |= pll->config.hw_state.pll2;
- I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
- temp = I915_READ(BXT_PORT_PLL(port, 3));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
temp |= pll->config.hw_state.pll3;
- I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
- temp = I915_READ(BXT_PORT_PLL(port, 6));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= pll->config.hw_state.pll6;
- I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = I915_READ(BXT_PORT_PLL(port, 8));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
temp |= pll->config.hw_state.pll8;
- I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
- temp = I915_READ(BXT_PORT_PLL(port, 9));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
temp |= pll->config.hw_state.pll9;
- I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
- temp = I915_READ(BXT_PORT_PLL(port, 10));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->config.hw_state.pll10;
- I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp |= PORT_PLL_RECALIBRATE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= pll->config.hw_state.ebb4;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1459,11 +1462,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= pll->config.hw_state.pcsdw12;
- I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+ I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@@ -1485,6 +1488,10 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
bool ret;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
@@ -1495,36 +1502,36 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PORT_PLL_ENABLE))
goto out;
- hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
- hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
- hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+ hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
hw_state->pll0 &= PORT_PLL_M2_MASK;
- hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+ hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
- hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+ hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
- hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+ hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
- hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+ hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
- hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+ hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
- hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+ hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
- hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+ hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
@@ -1533,11 +1540,11 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
- hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
- if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
+ hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+ if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12,
- I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+ I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
ret = true;
@@ -1851,13 +1858,13 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dpll_mgr = &skl_pll_mgr;
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
dpll_mgr = &bxt_pll_mgr;
- else if (HAS_DDI(dev))
+ else if (HAS_DDI(dev_priv))
dpll_mgr = &hsw_pll_mgr;
- else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
@@ -1883,7 +1890,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
/* FIXME: Move this to a more suitable place */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_ddi_pll_init(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a19ec06f9e42..cd132c216a67 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -206,6 +206,7 @@ struct intel_encoder {
struct drm_encoder base;
enum intel_output_type type;
+ enum port port;
unsigned int cloneable;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
@@ -247,6 +248,8 @@ struct intel_encoder {
void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
+ /* for communication with audio component; protected by av_mutex */
+ const struct drm_connector *audio_connector;
};
struct intel_panel {
@@ -291,6 +294,9 @@ struct intel_connector {
*/
struct intel_encoder *encoder;
+ /* ACPI device id for ACPI and driver cooperation */
+ u32 acpi_device_id;
+
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
@@ -362,6 +368,8 @@ struct intel_atomic_state {
/* Gen9+ only */
struct skl_wm_values wm_results;
+
+ struct i915_sw_fence commit_ready;
};
struct intel_plane_state {
@@ -398,9 +406,6 @@ struct intel_plane_state {
int scaler_id;
struct drm_intel_sprite_colorkey ckey;
-
- /* async flip related structures */
- struct drm_i915_gem_request *wait_req;
};
struct intel_initial_plane_config {
@@ -465,9 +470,13 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
-struct skl_pipe_wm {
+struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
+};
+
+struct skl_pipe_wm {
+ struct skl_plane_wm planes[I915_MAX_PLANES];
uint32_t linetime;
};
@@ -493,14 +502,7 @@ struct intel_crtc_wm_state {
struct {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
-
- /* cached plane data rate */
- unsigned plane_data_rate[I915_MAX_PLANES];
- unsigned plane_y_data_rate[I915_MAX_PLANES];
-
- /* minimum block allocation */
- uint16_t minimum_blocks[I915_MAX_PLANES];
- uint16_t minimum_y_blocks[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb;
} skl;
};
@@ -653,7 +655,6 @@ struct intel_crtc_state {
bool double_wide;
- bool dp_encoder_is_mst;
int pbn;
struct intel_crtc_scaler_state scaler_state;
@@ -723,7 +724,6 @@ struct intel_crtc {
/* watermarks currently being used */
union {
struct intel_pipe_wm ilk;
- struct skl_pipe_wm skl;
} active;
/* allow CxSR on this pipe */
@@ -796,22 +796,22 @@ struct intel_plane {
};
struct intel_watermark_params {
- unsigned long fifo_size;
- unsigned long max_wm;
- unsigned long default_wm;
- unsigned long guard_size;
- unsigned long cacheline_size;
+ u16 fifo_size;
+ u16 max_wm;
+ u8 default_wm;
+ u8 guard_size;
+ u8 cacheline_size;
};
struct cxsr_latency {
- int is_desktop;
- int is_ddr3;
- unsigned long fsb_freq;
- unsigned long mem_freq;
- unsigned long display_sr;
- unsigned long display_hpll_disable;
- unsigned long cursor_sr;
- unsigned long cursor_hpll_disable;
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
+ u16 fsb_freq;
+ u16 mem_freq;
+ u16 display_sr;
+ u16 display_hpll_disable;
+ u16 cursor_sr;
+ u16 cursor_hpll_disable;
};
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@@ -872,6 +872,14 @@ enum link_m_n_set {
M2_N2
};
+struct intel_dp_desc {
+ u8 oui[3];
+ u8 device_id[6];
+ u8 hw_rev;
+ u8 sw_major_rev;
+ u8 sw_minor_rev;
+} __packed;
+
struct intel_dp {
i915_reg_t output_reg;
i915_reg_t aux_ch_ctl_reg;
@@ -894,6 +902,8 @@ struct intel_dp {
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
+ /* sink or branch descriptor */
+ struct intel_dp_desc desc;
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@@ -950,17 +960,22 @@ struct intel_dp {
bool compliance_test_active;
};
+struct intel_lspcon {
+ bool active;
+ enum drm_lspcon_mode mode;
+ bool desc_valid;
+};
+
struct intel_digital_port {
struct intel_encoder base;
enum port port;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
+ struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
- /* for communication with audio component; protected by av_mutex */
- const struct drm_connector *audio_connector;
};
struct intel_dp_mst_encoder {
@@ -1012,17 +1027,15 @@ vlv_pipe_to_channel(enum pipe pipe)
}
}
-static inline struct drm_crtc *
-intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+static inline struct intel_crtc *
+intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->pipe_to_crtc_mapping[pipe];
}
-static inline struct drm_crtc *
-intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+static inline struct intel_crtc *
+intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->plane_to_crtc_mapping[plane];
}
@@ -1082,15 +1095,6 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
-/*
- * Returns the number of planes for this pipe, ie the number of sprites + 1
- * (primary plane). This doesn't count the cursor plane then.
- */
-static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
-{
- return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
-}
-
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
@@ -1107,6 +1111,9 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1129,6 +1136,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -1176,12 +1186,15 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
/* intel_audio.c */
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
@@ -1230,18 +1243,17 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_EDP));
}
static inline void
-intel_wait_for_vblank(struct drm_device *dev, int pipe)
+intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- drm_wait_one_vblank(dev, pipe);
+ drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
-intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
{
- const struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->active)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
@@ -1285,21 +1297,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp);
-static inline bool
-intel_rotation_90_or_270(unsigned int rotation)
-{
- return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
-}
-
-void intel_create_rotation_property(struct drm_device *dev,
- struct intel_plane *plane);
-
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
/* modesetting asserts */
@@ -1327,12 +1330,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
@@ -1350,7 +1347,7 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-bool intel_crtc_active(struct drm_crtc *crtc);
+bool intel_crtc_active(struct intel_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
@@ -1396,7 +1393,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
@@ -1443,6 +1440,11 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
+bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+bool __intel_dp_read_desc(struct intel_dp *intel_dp,
+ struct intel_dp_desc *desc);
+bool intel_dp_read_desc(struct intel_dp *intel_dp);
+
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1487,6 +1489,10 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo
{
}
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
{
}
@@ -1513,6 +1519,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
@@ -1642,23 +1649,6 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
}
-static inline int
-assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
-{
- int seq = atomic_read(&dev_priv->pm.atomic_seq);
-
- assert_rpm_wakelock_held(dev_priv);
-
- return seq;
-}
-
-static inline void
-assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
-{
- WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
- "HW access outside of RPM atomic section\n");
-}
-
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @dev_priv: i915 device instance
@@ -1714,11 +1704,11 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
/* intel_pm.c */
-void intel_init_clock_gating(struct drm_device *dev);
-void intel_suspend_hw(struct drm_device *dev);
-int ilk_wm_max_level(const struct drm_device *dev);
-void intel_update_watermarks(struct drm_crtc *crtc);
-void intel_init_pm(struct drm_device *dev);
+void intel_init_clock_gating(struct drm_i915_private *dev_priv);
+void intel_suspend_hw(struct drm_i915_private *dev_priv);
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
+void intel_update_watermarks(struct intel_crtc *crtc);
+void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -1742,21 +1732,16 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+ struct skl_pipe_wm *out);
bool intel_can_enable_sagv(struct drm_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe);
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe);
-void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm);
-void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm,
- int plane);
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
@@ -1773,7 +1758,8 @@ bool intel_sdvo_init(struct drm_device *dev,
/* intel_sprite.c */
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
-int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);
@@ -1835,4 +1821,7 @@ int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void intel_color_set_csc(struct drm_crtc_state *crtc_state);
void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+/* intel_lspcon.c */
+bool lspcon_init(struct intel_digital_port *intel_dig_port);
+void lspcon_resume(struct intel_lspcon *lspcon);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b2e3d3a334f7..5b72c50d6f76 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -437,11 +437,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_dsi_device_ready(encoder);
}
@@ -464,7 +464,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -494,7 +494,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -656,7 +656,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -664,7 +663,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
@@ -741,7 +740,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
bool active = false;
@@ -762,7 +760,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
+ i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
@@ -771,7 +769,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ port == PORT_C)
enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
@@ -970,11 +969,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
DRM_DEBUG_KMS("\n");
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
@@ -1066,7 +1065,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1138,7 +1137,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@@ -1153,7 +1152,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
@@ -1242,7 +1241,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
+ if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
@@ -1299,12 +1298,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
}
-static enum drm_connector_status
-intel_dsi_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int intel_dsi_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1346,7 +1339,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
- if (HAS_GMCH_DISPLAY(dev) &&
+ if (HAS_GMCH_DISPLAY(to_i915(dev)) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
@@ -1408,7 +1401,6 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = intel_dsi_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_dsi_connector_destroy,
@@ -1450,9 +1442,9 @@ void intel_dsi_init(struct drm_device *dev)
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
@@ -1488,6 +1480,7 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_encoder->port = port;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index cd154ce6b6c1..0d8ff0034b88 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -126,6 +126,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u16 len;
enum port port;
+ DRM_DEBUG_KMS("\n");
+
flags = *data++;
type = *data++;
@@ -199,6 +201,8 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
u32 delay = *((const u32 *) data);
+ DRM_DEBUG_KMS("\n");
+
usleep_range(delay, delay + 10);
data += 4;
@@ -307,6 +311,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
u8 gpio_source, gpio_index;
bool value;
+ DRM_DEBUG_KMS("\n");
+
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
@@ -331,18 +337,36 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
+static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
+ DRM_DEBUG_KMS("Skipping I2C element execution\n");
+
return data + *(data + 6) + 7;
}
+static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ DRM_DEBUG_KMS("Skipping SPI element execution\n");
+
+ return data + *(data + 5) + 6;
+}
+
+static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ DRM_DEBUG_KMS("Skipping PMIC element execution\n");
+
+ return data + 15;
+}
+
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
- [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
+ [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c,
+ [MIPI_SEQ_ELEM_SPI] = mipi_exec_spi,
+ [MIPI_SEQ_ELEM_PMIC] = mipi_exec_pmic,
};
/*
@@ -385,11 +409,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
return;
data = dev_priv->vbt.dsi.sequence[seq_id];
- if (!data) {
- DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
- seq_id, sequence_name(seq_id));
+ if (!data)
return;
- }
WARN_ON(*data != seq_id);
@@ -420,7 +441,15 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
operation_size = *data++;
if (mipi_elem_exec) {
+ const u8 *next = data + operation_size;
+
data = mipi_elem_exec(intel_dsi, data);
+
+ /* Consistency check if we have size. */
+ if (operation_size && data != next) {
+ DRM_ERROR("Inconsistent operation size\n");
+ return;
+ }
} else if (operation_size) {
/* We have size, skip. */
DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
@@ -438,6 +467,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
static int vbt_panel_prepare(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_POWER_ON);
+ generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
return 0;
@@ -445,7 +476,8 @@ static int vbt_panel_prepare(struct drm_panel *panel)
static int vbt_panel_unprepare(struct drm_panel *panel)
{
- generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_POWER_OFF);
return 0;
}
@@ -453,12 +485,14 @@ static int vbt_panel_unprepare(struct drm_panel *panel)
static int vbt_panel_enable(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
+ generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_ON);
return 0;
}
static int vbt_panel_disable(struct drm_panel *panel)
{
+ generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_OFF);
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
return 0;
@@ -740,9 +774,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
- DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
- DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
- "disabled" : "enabled");
+ DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
+ DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
@@ -761,8 +794,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
DRM_DEBUG_KMS("BTA %s\n",
- intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
- "disabled" : "enabled");
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
/* delays in VBT are in unit of 100us, so need to convert
* here in ms
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 6ab58a01b18e..56eff6004bc0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -351,7 +351,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
- if (IS_BROXTON(encoder->base.dev))
+ if (IS_BROXTON(to_i915(encoder->base.dev)))
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
@@ -515,11 +515,11 @@ bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
return bxt_compute_dsi_pll(encoder, config);
return -ENODEV;
@@ -528,21 +528,21 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
void intel_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_disable_dsi_pll(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_disable_dsi_pll(encoder);
}
@@ -564,10 +564,10 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 2e452c505e7e..708645443046 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,12 +393,12 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
* its timings to get how the BIOS set up the panel.
*/
if (dvo_val & DVO_ENABLE) {
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc) {
- mode = intel_crtc_mode_get(dev, crtc);
+ mode = intel_crtc_mode_get(dev, &crtc->base);
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -412,16 +412,14 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
return mode;
}
-static char intel_dvo_port_name(i915_reg_t dvo_reg)
+static enum port intel_dvo_port(i915_reg_t dvo_reg)
{
if (i915_mmio_reg_equal(dvo_reg, DVOA))
- return 'A';
+ return PORT_A;
else if (i915_mmio_reg_equal(dvo_reg, DVOB))
- return 'B';
- else if (i915_mmio_reg_equal(dvo_reg, DVOC))
- return 'C';
+ return PORT_B;
else
- return '?';
+ return PORT_C;
}
void intel_dvo_init(struct drm_device *dev)
@@ -464,6 +462,7 @@ void intel_dvo_init(struct drm_device *dev)
bool dvoinit;
enum pipe pipe;
uint32_t dpll[I915_MAX_PIPES];
+ enum port port;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
@@ -511,12 +510,15 @@ void intel_dvo_init(struct drm_device *dev)
if (!dvoinit)
continue;
+ port = intel_dvo_port(dvo->dvo_reg);
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type,
- "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
+ "DVO %c", port_name(port));
intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 025e232a4205..3da4d466e332 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -82,12 +82,17 @@ static const struct engine_info {
},
};
-static struct intel_engine_cs *
+static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
- struct intel_engine_cs *engine = &dev_priv->engine[id];
+ struct intel_engine_cs *engine;
+
+ GEM_BUG_ON(dev_priv->engine[id]);
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
engine->id = id;
engine->i915 = dev_priv;
@@ -97,7 +102,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
- return engine;
+ /* Nothing to do here, execute in order of dependencies */
+ engine->schedule = NULL;
+
+ dev_priv->engine[id] = engine;
+ return 0;
}
/**
@@ -110,13 +119,16 @@ int intel_engines_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+ unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
int (*init)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int i;
int ret;
- WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
- WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+ WARN_ON(ring_mask == 0);
+ WARN_ON(ring_mask &
GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
@@ -131,7 +143,11 @@ int intel_engines_init(struct drm_device *dev)
if (!init)
continue;
- ret = init(intel_engine_setup(dev_priv, i));
+ ret = intel_engine_setup(dev_priv, i);
+ if (ret)
+ goto cleanup;
+
+ ret = init(dev_priv->engine[i]);
if (ret)
goto cleanup;
@@ -143,7 +159,7 @@ int intel_engines_init(struct drm_device *dev)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+ if (WARN_ON(mask != ring_mask))
device_info->ring_mask = mask;
device_info->num_rings = hweight32(mask);
@@ -151,17 +167,17 @@ int intel_engines_init(struct drm_device *dev)
return 0;
cleanup:
- for (i = 0; i < I915_NUM_ENGINES; i++) {
+ for_each_engine(engine, dev_priv, id) {
if (i915.enable_execlists)
- intel_logical_ring_cleanup(&dev_priv->engine[i]);
+ intel_logical_ring_cleanup(engine);
else
- intel_engine_cleanup(&dev_priv->engine[i]);
+ intel_engine_cleanup(engine);
}
return ret;
}
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -191,13 +207,13 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
kunmap(page);
}
- memset(engine->semaphore.sync_seqno, 0,
- sizeof(engine->semaphore.sync_seqno));
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
- engine->last_submitted_seqno = seqno;
+
+ GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+ engine->timeline->last_submitted_seqno = seqno;
engine->hangcheck.seqno = seqno;
@@ -207,15 +223,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
intel_engine_wakeup(engine);
}
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
- memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
-}
-
-static void intel_engine_init_requests(struct intel_engine_cs *engine)
+static void intel_engine_init_timeline(struct intel_engine_cs *engine)
{
- init_request_active(&engine->last_request, NULL);
- INIT_LIST_HEAD(&engine->request_list);
+ engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
}
/**
@@ -229,12 +239,10 @@ static void intel_engine_init_requests(struct intel_engine_cs *engine)
*/
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
- INIT_LIST_HEAD(&engine->execlist_queue);
- spin_lock_init(&engine->execlist_lock);
-
- engine->fence_context = fence_context_alloc(1);
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
- intel_engine_init_requests(engine);
+ intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
i915_gem_batch_pool_init(engine, &engine->batch_pool);
@@ -251,7 +259,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
if (!obj)
- obj = i915_gem_object_create(&engine->i915->drm, size);
+ obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
@@ -301,6 +309,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
return ret;
+ ret = i915_gem_render_state_init(engine);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -315,7 +327,142 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
intel_engine_cleanup_scratch(engine);
+ i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
}
+
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u64 acthd;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+ RING_ACTHD_UDW(engine->mmio_base));
+ else if (INTEL_GEN(dev_priv) >= 4)
+ acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+ else
+ acthd = I915_READ(ACTHD);
+
+ return acthd;
+}
+
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u64 bbaddr;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
+ RING_BBADDR_UDW(engine->mmio_base));
+ else
+ bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+
+ return bbaddr;
+}
+
+const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
+ case I915_CACHE_L3_LLC: return " L3+LLC";
+ case I915_CACHE_WT: return " WT";
+ default: return "";
+ }
+}
+
+static inline uint32_t
+read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
+ int subslice, i915_reg_t reg)
+{
+ uint32_t mcr;
+ uint32_t ret;
+ enum forcewake_domains fw_domains;
+
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ);
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ GEN8_MCR_SELECTOR,
+ FW_REG_READ | FW_REG_WRITE);
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
+ mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
+ /*
+ * The HW expects the slice and sublice selectors to be reset to 0
+ * after reading out the registers.
+ */
+ WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
+ mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+ mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+ ret = I915_READ_FW(reg);
+
+ mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+ I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ return ret;
+}
+
+/* NB: please notice the memset */
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+ struct intel_instdone *instdone)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u32 mmio_base = engine->mmio_base;
+ int slice;
+ int subslice;
+
+ memset(instdone, 0, sizeof(*instdone));
+
+ switch (INTEL_GEN(dev_priv)) {
+ default:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS)
+ break;
+
+ instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ read_subslice_reg(dev_priv, slice, subslice,
+ GEN7_SAMPLER_INSTDONE);
+ instdone->row[slice][subslice] =
+ read_subslice_reg(dev_priv, slice, subslice,
+ GEN7_ROW_INSTDONE);
+ }
+ break;
+ case 7:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS)
+ break;
+
+ instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
+
+ break;
+ case 6:
+ case 5:
+ case 4:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id == RCS)
+ /* HACK: Using the wrong struct member */
+ instdone->slice_common = I915_READ(GEN4_INSTDONE1);
+ break;
+ case 3:
+ case 2:
+ instdone->instdone = I915_READ(GEN2_INSTDONE);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index faa67624e1ed..62f215b12eb5 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -48,17 +48,17 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
{
- return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
+ return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8;
}
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen < 4;
+ return INTEL_GEN(dev_priv) < 4;
}
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen <= 3;
+ return INTEL_GEN(dev_priv) <= 3;
}
/*
@@ -84,7 +84,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
{
int w, h;
- if (intel_rotation_90_or_270(cache->plane.rotation)) {
+ if (drm_rotation_90_or_270(cache->plane.rotation)) {
w = cache->plane.src_h;
h = cache->plane.src_w;
} else {
@@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) == 7)
lines = min(lines, 2048);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ lines = min(lines, 2560);
/* Hardware needs the full buffer stride, not just the active area. */
return lines * cache->fb.stride;
@@ -349,7 +351,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
return ilk_fbc_is_active(dev_priv);
else if (IS_GM45(dev_priv))
return g4x_fbc_is_active(dev_priv);
@@ -363,9 +365,9 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
fbc->active = true;
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
gen7_fbc_activate(dev_priv);
- else if (INTEL_INFO(dev_priv)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_activate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_activate(dev_priv);
@@ -379,7 +381,7 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
fbc->active = false;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_deactivate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_deactivate(dev_priv);
@@ -559,7 +561,7 @@ again:
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
4096, 0, end);
- if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+ if (ret && INTEL_GEN(dev_priv) <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
@@ -592,7 +594,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fbc->threshold = ret;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
@@ -706,10 +708,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
- if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
max_w = 4096;
max_h = 4096;
- } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
max_w = 4096;
max_h = 2048;
} else {
@@ -774,6 +776,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ /* We don't need to use a state cache here since this information is
+ * global for all CRTC.
+ */
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@@ -802,7 +812,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
cache->plane.rotation != DRM_ROTATE_0) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
@@ -844,9 +854,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_choose(struct intel_crtc *crtc)
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (intel_vgpu_active(dev_priv)) {
@@ -859,13 +868,8 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
return false;
}
- if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
- fbc->no_fbc_reason = "no enabled pipes can have FBC";
- return false;
- }
-
- if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
- fbc->no_fbc_reason = "no enabled planes can have FBC";
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
return false;
}
@@ -1051,23 +1055,19 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
- bool fbc_crtc_present = false;
- int i, j;
+ bool crtc_chosen = false;
+ int i;
mutex_lock(&fbc->lock);
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (fbc->crtc == to_intel_crtc(crtc)) {
- fbc_crtc_present = true;
- break;
- }
- }
- /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
- if (!fbc_crtc_present && fbc->crtc != NULL)
+ /* Does this atomic commit involve the CRTC currently tied to FBC? */
+ if (fbc->crtc &&
+ !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base))
+ goto out;
+
+ if (!intel_fbc_can_enable(dev_priv))
goto out;
/* Simply choose the first CRTC that is compatible and has a visible
@@ -1077,25 +1077,29 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
+ struct intel_crtc_state *intel_crtc_state;
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc);
if (!intel_plane_state->base.visible)
continue;
- for_each_crtc_in_state(state, crtc, crtc_state, j) {
- struct intel_crtc_state *intel_crtc_state =
- to_intel_crtc_state(crtc_state);
+ if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
+ continue;
- if (plane_state->crtc != crtc)
- continue;
+ if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
+ continue;
- if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
- break;
+ intel_crtc_state = to_intel_crtc_state(
+ drm_atomic_get_existing_crtc_state(state, &crtc->base));
- intel_crtc_state->enable_fbc = true;
- goto out;
- }
+ intel_crtc_state->enable_fbc = true;
+ crtc_chosen = true;
+ break;
}
+ if (!crtc_chosen)
+ fbc->no_fbc_reason = "no suitable CRTC for FBC";
+
out:
mutex_unlock(&fbc->lock);
}
@@ -1221,6 +1225,59 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
cancel_work_sync(&fbc->work.work);
}
+static void intel_fbc_underrun_work_fn(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, fbc.underrun_work);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ mutex_lock(&fbc->lock);
+
+ /* Maybe we were scheduled twice. */
+ if (fbc->underrun_detected)
+ goto out;
+
+ DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+ fbc->underrun_detected = true;
+
+ intel_fbc_deactivate(dev_priv);
+out:
+ mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
+ * @dev_priv: i915 device instance
+ *
+ * Without FBC, most underruns are harmless and don't really cause too many
+ * problems, except for an annoying message on dmesg. With FBC, underruns can
+ * become black screens or even worse, especially when paired with bad
+ * watermarks. So in order for us to be on the safe side, completely disable FBC
+ * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
+ * already suggests that watermarks may be bad, so try to be as safe as
+ * possible.
+ *
+ * This function is called from the IRQ handler.
+ */
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (!fbc_supported(dev_priv))
+ return;
+
+ /* There's no guarantee that underrun_detected won't be set to true
+ * right after this check and before the work is scheduled, but that's
+ * not a problem since we'll check it again under the work function
+ * while FBC is locked. This check here is just to prevent us from
+ * unnecessarily scheduling the work, and it relies on the fact that we
+ * never switch underrun_detect back to false after it's true. */
+ if (READ_ONCE(fbc->underrun_detected))
+ return;
+
+ schedule_work(&fbc->underrun_work);
+}
+
/**
* intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
* @dev_priv: i915 device instance
@@ -1238,7 +1295,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
return;
for_each_intel_crtc(&dev_priv->drm, crtc)
- if (intel_crtc_active(&crtc->base) &&
+ if (intel_crtc_active(crtc) &&
to_intel_plane_state(crtc->base.primary->state)->base.visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
@@ -1292,6 +1349,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
enum pipe pipe;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
+ INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
@@ -1317,7 +1375,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
}
/* This value was pulled out of someone's hat */
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b7098f98bb67..beb08982dc0b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -102,16 +102,13 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_set_par = intel_fbdev_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int intelfb_alloc(struct drm_fb_helper *helper,
@@ -359,7 +356,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
- struct drm_device *dev = fb_helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
@@ -512,7 +509,7 @@ retry:
* fbdev helper library.
*/
if (num_connectors_enabled != num_connectors_detected &&
- num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+ num_connectors_enabled < INTEL_INFO(dev_priv)->num_pipes) {
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
num_connectors_detected);
@@ -636,7 +633,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
fb->base.pixel_format,
- fb->base.modifier[0]);
+ fb->base.modifier);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
@@ -700,11 +697,11 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
int intel_fbdev_init(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
+ if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -717,7 +714,7 @@ int intel_fbdev_init(struct drm_device *dev)
ifbdev->preferred_bpp = 32;
ret = drm_fb_helper_init(dev, &ifbdev->helper,
- INTEL_INFO(dev)->num_pipes, 4);
+ INTEL_INFO(dev_priv)->num_pipes, 4);
if (ret) {
kfree(ifbdev);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 2aa744081f09..e660d8b4bbc3 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -57,7 +57,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->cpu_fifo_underrun_disabled)
return false;
@@ -75,7 +75,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->pch_fifo_underrun_disabled)
return false;
@@ -245,22 +245,21 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
bool old;
assert_spin_locked(&dev_priv->irq_lock);
- old = !intel_crtc->cpu_fifo_underrun_disabled;
- intel_crtc->cpu_fifo_underrun_disabled = !enable;
+ old = !crtc->cpu_fifo_underrun_disabled;
+ crtc->cpu_fifo_underrun_disabled = !enable;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN5(dev) || IS_GEN6(dev))
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN8(dev) || IS_GEN9(dev))
+ else if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
@@ -314,8 +313,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc =
+ intel_get_crtc_for_pipe(dev_priv, (enum pipe) pch_transcoder);
unsigned long flags;
bool old;
@@ -330,8 +329,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- old = !intel_crtc->pch_fifo_underrun_disabled;
- intel_crtc->pch_fifo_underrun_disabled = !enable;
+ old = !crtc->pch_fifo_underrun_disabled;
+ crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv))
ibx_set_fifo_underrun_reporting(&dev_priv->drm,
@@ -358,7 +357,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
/* We may be called too early in init, thanks BIOS! */
if (crtc == NULL)
@@ -366,12 +365,14 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
/* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv) &&
- to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
+ crtc->cpu_fifo_underrun_disabled)
return;
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
DRM_ERROR("CPU pipe %c FIFO underrun\n",
pipe_name(pipe));
+
+ intel_fbc_handle_fifo_underrun_irq(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 76ceb539f9f0..7bab41218cf7 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -53,16 +53,17 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
-static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!frontbuffer_bits)
- return;
+ return false;
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+ return true;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 5cdf7aa75be5..0053258e03d3 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -64,7 +64,7 @@ struct drm_i915_gem_request;
*/
struct i915_guc_client {
struct i915_vma *vma;
- void *client_base; /* first page (only) of above */
+ void *vaddr;
struct i915_gem_context *owner;
struct intel_guc *guc;
@@ -123,10 +123,28 @@ struct intel_guc_fw {
uint32_t ucode_offset;
};
+struct intel_guc_log {
+ uint32_t flags;
+ struct i915_vma *vma;
+ void *buf_addr;
+ struct workqueue_struct *flush_wq;
+ struct work_struct flush_work;
+ struct rchan *relay_chan;
+
+ /* logging related stats */
+ u32 capture_miss_count;
+ u32 flush_interrupt_count;
+ u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 flush_count[GUC_MAX_LOG_BUFFER];
+};
+
struct intel_guc {
struct intel_guc_fw guc_fw;
- uint32_t log_flags;
- struct i915_vma *log_vma;
+ struct intel_guc_log log;
+
+ /* GuC2Host interrupt related state */
+ bool interrupts_enabled;
struct i915_vma *ads_vma;
struct i915_vma *ctx_pool_vma;
@@ -146,6 +164,9 @@ struct intel_guc {
uint64_t submissions[I915_NUM_ENGINES];
uint32_t last_seqno[I915_NUM_ENGINES];
+
+ /* To serialize the Host2GuC actions */
+ struct mutex action_lock;
};
/* intel_guc_loader.c */
@@ -163,5 +184,10 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
+void i915_guc_register(struct drm_i915_private *dev_priv);
+void i915_guc_unregister(struct drm_i915_private *dev_priv);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index e40db2d2ae99..324ea902558b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -104,9 +104,9 @@
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
#define GUC_LOG_CRASH_PAGES 1
#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_DPC_PAGES 3
+#define GUC_LOG_DPC_PAGES 7
#define GUC_LOG_DPC_SHIFT 6
-#define GUC_LOG_ISR_PAGES 3
+#define GUC_LOG_ISR_PAGES 7
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
@@ -419,15 +419,87 @@ struct guc_ads {
u32 reserved2[4];
} __packed;
+/* GuC logging structures */
+
+enum guc_log_buffer_type {
+ GUC_ISR_LOG_BUFFER,
+ GUC_DPC_LOG_BUFFER,
+ GUC_CRASH_DUMP_LOG_BUFFER,
+ GUC_MAX_LOG_BUFFER
+};
+
+/**
+ * DOC: GuC Log buffer Layout
+ *
+ * Page0 +-------------------------------+
+ * | ISR state header (32 bytes) |
+ * | DPC state header |
+ * | Crash dump state header |
+ * Page1 +-------------------------------+
+ * | ISR logs |
+ * Page9 +-------------------------------+
+ * | DPC logs |
+ * Page17 +-------------------------------+
+ * | Crash Dump logs |
+ * +-------------------------------+
+ *
+ * Below state structure is used for coordination of retrieval of GuC firmware
+ * logs. Separate state is maintained for each log buffer type.
+ * read_ptr points to the location where i915 read last in log buffer and
+ * is read only for GuC firmware. write_ptr is incremented by GuC with number
+ * of bytes written for each log entry and is read only for i915.
+ * When any type of log buffer becomes half full, GuC sends a flush interrupt.
+ * GuC firmware expects that while it is writing to 2nd half of the buffer,
+ * first half would get consumed by Host and then get a flush completed
+ * acknowledgment from Host, so that it does not end up doing any overwrite
+ * causing loss of logs. So when buffer gets half filled & i915 has requested
+ * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
+ * to the value of write_ptr and raise the interrupt.
+ * On receiving the interrupt i915 should read the buffer, clear flush_to_file
+ * field and also update read_ptr with the value of sample_write_ptr, before
+ * sending an acknowledgment to GuC. marker & version fields are for internal
+ * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
+ * time GuC detects the log buffer overflow.
+ */
+struct guc_log_buffer_state {
+ u32 marker[2];
+ u32 read_ptr;
+ u32 write_ptr;
+ u32 size;
+ u32 sampled_write_ptr;
+ union {
+ struct {
+ u32 flush_to_file:1;
+ u32 buffer_full_cnt:4;
+ u32 reserved:27;
+ };
+ u32 flags;
+ };
+ u32 version;
+} __packed;
+
+union guc_log_control {
+ struct {
+ u32 logging_enabled:1;
+ u32 reserved1:3;
+ u32 verbosity:4;
+ u32 reserved2:24;
+ };
+ u32 value;
+} __packed;
+
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,
HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+ HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
HOST2GUC_ACTION_LIMIT
};
@@ -449,4 +521,10 @@ enum guc2host_status {
GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
};
+/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
+enum guc2host_message {
+ GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1),
+ GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3)
+};
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 6fd39efb7894..34d6ad2cf7c1 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -100,12 +100,13 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
/* tell all command streamers NOT to forward interrupts or vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
@@ -117,12 +118,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
u32 tmp;
/* tell all command streamers to forward interrupts (but not vblank) to GuC */
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@@ -209,11 +211,13 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
GUC_CTL_VCS2_ENABLED;
+ params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
+
if (i915.guc_log_level >= 0) {
- params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
params[GUC_CTL_DEBUG] =
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
- }
+ } else
+ params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
if (guc->ads_vma) {
u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
@@ -347,7 +351,6 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- struct drm_device *dev = &dev_priv->drm;
struct i915_vma *vma;
int ret;
@@ -375,24 +378,22 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
/* Enable MIA caching. GuC clock gating is disabled. */
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
- /* WaDisableMinuteIaClockGating:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ /* WaDisableMinuteIaClockGating:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
- /* WaC6DisallowByGfxPause*/
- if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ /* WaC6DisallowByGfxPause:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
@@ -484,6 +485,7 @@ int intel_guc_setup(struct drm_device *dev)
}
guc_interrupts_release(dev_priv);
+ gen9_reset_guc_interrupts(dev_priv);
guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
@@ -528,6 +530,9 @@ int intel_guc_setup(struct drm_device *dev)
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
if (i915.enable_guc_submission) {
+ if (i915.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
err = i915_guc_submission_enable(dev_priv);
if (err)
goto fail;
@@ -561,7 +566,7 @@ fail:
ret = 0;
}
- if (err == 0 && !HAS_GUC_UCODE(dev))
+ if (err == 0 && !HAS_GUC_UCODE(dev_priv))
; /* Don't mention the GuC! */
else if (err == 0)
DRM_INFO("GuC firmware load skipped\n");
@@ -720,23 +725,28 @@ void intel_guc_init(struct drm_device *dev)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
- /* A negative value means "use platform default" */
- if (i915.enable_guc_loading < 0)
- i915.enable_guc_loading = HAS_GUC_UCODE(dev);
- if (i915.enable_guc_submission < 0)
- i915.enable_guc_submission = HAS_GUC_SCHED(dev);
+ if (!HAS_GUC(dev_priv)) {
+ i915.enable_guc_loading = 0;
+ i915.enable_guc_submission = 0;
+ } else {
+ /* A negative value means "use platform default" */
+ if (i915.enable_guc_loading < 0)
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+ if (i915.enable_guc_submission < 0)
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
+ }
- if (!HAS_GUC_UCODE(dev)) {
+ if (!HAS_GUC_UCODE(dev_priv)) {
fw_path = NULL;
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_GUC_UCODE;
guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
- } else if (IS_KABYLAKE(dev)) {
+ } else if (IS_KABYLAKE(dev_priv)) {
fw_path = I915_KBL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 434f4d5c553d..290384e86c63 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -31,14 +31,20 @@
* GPU among multiple virtual machines on a time-sharing basis. Each
* virtual machine is presented a virtual GPU (vGPU), which has equivalent
* features as the underlying physical GPU (pGPU), so i915 driver can run
- * seamlessly in a virtual machine. This file provides the englightments
- * of GVT and the necessary components used by GVT in i915 driver.
+ * seamlessly in a virtual machine.
+ *
+ * To virtualize GPU resources GVT-g driver depends on hypervisor technology
+ * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
+ * and be virtualized within GVT-g device module. More architectural design
+ * doc is available on https://01.org/group/2230/documentation-list.
*/
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
return true;
+ if (IS_SKYLAKE(dev_priv))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 960211df74db..25df2d65b985 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,7 +24,7 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
-#include "gvt/gvt.h"
+struct intel_gvt;
#ifdef CONFIG_DRM_I915_GVT
int intel_gvt_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
new file mode 100644
index 000000000000..53df5b11bff4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static bool
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
+{
+ if (INTEL_GEN(engine->i915) >= 8) {
+ return (ipehr >> 23) == 0x1c;
+ } else {
+ ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+ return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER);
+ }
+}
+
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
+ u64 offset)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_engine_cs *signaller;
+ enum intel_engine_id id;
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ for_each_engine(signaller, dev_priv, id) {
+ if (engine == signaller)
+ continue;
+
+ if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
+ return signaller;
+ }
+ } else {
+ u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
+
+ for_each_engine(signaller, dev_priv, id) {
+ if(engine == signaller)
+ continue;
+
+ if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
+ return signaller;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
+ engine->name, ipehr, offset);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ void __iomem *vaddr;
+ u32 cmd, ipehr, head;
+ u64 offset = 0;
+ int i, backwards;
+
+ /*
+ * This function does not support execlist mode - any attempt to
+ * proceed further into this function will result in a kernel panic
+ * when dereferencing ring->buffer, which is not set up in execlist
+ * mode.
+ *
+ * The correct way of doing it would be to derive the currently
+ * executing ring buffer from the current context, which is derived
+ * from the currently running request. Unfortunately, to get the
+ * current request we would have to grab the struct_mutex before doing
+ * anything else, which would be ill-advised since some other thread
+ * might have grabbed it already and managed to hang itself, causing
+ * the hang checker to deadlock.
+ *
+ * Therefore, this function does not support execlist mode in its
+ * current form. Just return NULL and move on.
+ */
+ if (engine->buffer == NULL)
+ return NULL;
+
+ ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+ if (!ipehr_is_semaphore_wait(engine, ipehr))
+ return NULL;
+
+ /*
+ * HEAD is likely pointing to the dword after the actual command,
+ * so scan backwards until we find the MBOX. But limit it to just 3
+ * or 4 dwords depending on the semaphore wait command size.
+ * Note that we don't care about ACTHD here since that might
+ * point at at batch, and semaphores are always emitted into the
+ * ringbuffer itself.
+ */
+ head = I915_READ_HEAD(engine) & HEAD_ADDR;
+ backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
+ vaddr = (void __iomem *)engine->buffer->vaddr;
+
+ for (i = backwards; i; --i) {
+ /*
+ * Be paranoid and presume the hw has gone off into the wild -
+ * our ring is smaller than what the hardware (and hence
+ * HEAD_ADDR) allows. Also handles wrap-around.
+ */
+ head &= engine->buffer->size - 1;
+
+ /* This here seems to blow up */
+ cmd = ioread32(vaddr + head);
+ if (cmd == ipehr)
+ break;
+
+ head -= 4;
+ }
+
+ if (!i)
+ return NULL;
+
+ *seqno = ioread32(vaddr + head + 4) + 1;
+ if (INTEL_GEN(dev_priv) >= 8) {
+ offset = ioread32(vaddr + head + 12);
+ offset <<= 32;
+ offset |= ioread32(vaddr + head + 8);
+ }
+ return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
+}
+
+static int semaphore_passed(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_engine_cs *signaller;
+ u32 seqno;
+
+ engine->hangcheck.deadlock++;
+
+ signaller = semaphore_waits_for(engine, &seqno);
+ if (signaller == NULL)
+ return -1;
+
+ if (IS_ERR(signaller))
+ return 0;
+
+ /* Prevent pathological recursion due to driver bugs */
+ if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
+ return -1;
+
+ if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
+ return 1;
+
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id)
+ engine->hangcheck.deadlock = 0;
+}
+
+static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
+{
+ u32 tmp = current_instdone | *old_instdone;
+ bool unchanged;
+
+ unchanged = tmp == *old_instdone;
+ *old_instdone |= tmp;
+
+ return unchanged;
+}
+
+static bool subunits_stuck(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_instdone instdone;
+ struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
+ bool stuck;
+ int slice;
+ int subslice;
+
+ if (engine->id != RCS)
+ return true;
+
+ intel_engine_get_instdone(engine, &instdone);
+
+ /* There might be unstable subunit states even when
+ * actual head is not moving. Filter out the unstable ones by
+ * accumulating the undone -> done transitions and only
+ * consider those as progress.
+ */
+ stuck = instdone_unchanged(instdone.instdone,
+ &accu_instdone->instdone);
+ stuck &= instdone_unchanged(instdone.slice_common,
+ &accu_instdone->slice_common);
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
+ &accu_instdone->sampler[slice][subslice]);
+ stuck &= instdone_unchanged(instdone.row[slice][subslice],
+ &accu_instdone->row[slice][subslice]);
+ }
+
+ return stuck;
+}
+
+static enum intel_engine_hangcheck_action
+head_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+ if (acthd != engine->hangcheck.acthd) {
+
+ /* Clear subunit states on head movement */
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+
+ return HANGCHECK_ACTIVE;
+ }
+
+ if (!subunits_stuck(engine))
+ return HANGCHECK_ACTIVE;
+
+ return HANGCHECK_HUNG;
+}
+
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ enum intel_engine_hangcheck_action ha;
+ u32 tmp;
+
+ ha = head_stuck(engine, acthd);
+ if (ha != HANGCHECK_HUNG)
+ return ha;
+
+ if (IS_GEN2(dev_priv))
+ return HANGCHECK_HUNG;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ tmp = I915_READ_CTL(engine);
+ if (tmp & RING_WAIT) {
+ i915_handle_error(dev_priv, 0,
+ "Kicking stuck wait on %s",
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
+ return HANGCHECK_KICK;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ switch (semaphore_passed(engine)) {
+ default:
+ return HANGCHECK_HUNG;
+ case 1:
+ i915_handle_error(dev_priv, 0,
+ "Kicking stuck semaphore on %s",
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
+ return HANGCHECK_KICK;
+ case 0:
+ return HANGCHECK_WAIT;
+ }
+ }
+
+ return HANGCHECK_HUNG;
+}
+
+/*
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. We keep track per ring seqno progress and
+ * if there are no progress, hangcheck score for that ring is increased.
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * we assume chip is wedged and try to fix it by resetting the chip.
+ */
+static void i915_hangcheck_elapsed(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ gpu_error.hangcheck_work.work);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int hung = 0, stuck = 0;
+ int busy_count = 0;
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+#define ACTIVE_DECAY 15
+
+ if (!i915.enable_hangcheck)
+ return;
+
+ if (!READ_ONCE(dev_priv->gt.awake))
+ return;
+
+ /* As enabling the GPU requires fairly extensive mmio access,
+ * periodically arm the mmio checker to see if we are triggering
+ * any invalid access.
+ */
+ intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+ for_each_engine(engine, dev_priv, id) {
+ bool busy = intel_engine_has_waiter(engine);
+ u64 acthd;
+ u32 seqno;
+ u32 submit;
+
+ semaphore_clear_deadlocks(dev_priv);
+
+ /* We don't strictly need an irq-barrier here, as we are not
+ * serving an interrupt request, be paranoid in case the
+ * barrier has side-effects (such as preventing a broken
+ * cacheline snoop) and so be sure that we can see the seqno
+ * advance. If the seqno should stick, due to a stale
+ * cacheline, we would erroneously declare the GPU hung.
+ */
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ acthd = intel_engine_get_active_head(engine);
+ seqno = intel_engine_get_seqno(engine);
+ submit = intel_engine_last_submit(engine);
+
+ if (engine->hangcheck.seqno == seqno) {
+ if (i915_seqno_passed(seqno, submit)) {
+ engine->hangcheck.action = HANGCHECK_IDLE;
+ } else {
+ /* We always increment the hangcheck score
+ * if the engine is busy and still processing
+ * the same request, so that no single request
+ * can run indefinitely (such as a chain of
+ * batches). The only time we do not increment
+ * the hangcheck score on this ring, if this
+ * engine is in a legitimate wait for another
+ * engine. In that case the waiting engine is a
+ * victim and we want to be sure we catch the
+ * right culprit. Then every time we do kick
+ * the ring, add a small increment to the
+ * score so that we can catch a batch that is
+ * being repeatedly kicked and so responsible
+ * for stalling the machine.
+ */
+ engine->hangcheck.action =
+ engine_stuck(engine, acthd);
+
+ switch (engine->hangcheck.action) {
+ case HANGCHECK_IDLE:
+ case HANGCHECK_WAIT:
+ break;
+ case HANGCHECK_ACTIVE:
+ engine->hangcheck.score += BUSY;
+ break;
+ case HANGCHECK_KICK:
+ engine->hangcheck.score += KICK;
+ break;
+ case HANGCHECK_HUNG:
+ engine->hangcheck.score += HUNG;
+ break;
+ }
+ }
+
+ if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ hung |= intel_engine_flag(engine);
+ if (engine->hangcheck.action != HANGCHECK_HUNG)
+ stuck |= intel_engine_flag(engine);
+ }
+ } else {
+ engine->hangcheck.action = HANGCHECK_ACTIVE;
+
+ /* Gradually reduce the count so that we catch DoS
+ * attempts across multiple batches.
+ */
+ if (engine->hangcheck.score > 0)
+ engine->hangcheck.score -= ACTIVE_DECAY;
+ if (engine->hangcheck.score < 0)
+ engine->hangcheck.score = 0;
+
+ /* Clear head and subunit states on seqno movement */
+ acthd = 0;
+
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+ }
+
+ engine->hangcheck.seqno = seqno;
+ engine->hangcheck.acthd = acthd;
+ busy_count += busy;
+ }
+
+ if (hung) {
+ char msg[80];
+ unsigned int tmp;
+ int len;
+
+ /* If some rings hung but others were still busy, only
+ * blame the hanging rings in the synopsis.
+ */
+ if (stuck != hung)
+ hung &= ~stuck;
+ len = scnprintf(msg, sizeof(msg),
+ "%s on ", stuck == hung ? "No progress" : "Hang");
+ for_each_engine_masked(engine, dev_priv, hung, tmp)
+ len += scnprintf(msg + len, sizeof(msg) - len,
+ "%s, ", engine->name);
+ msg[len-2] = '\0';
+
+ return i915_handle_error(dev_priv, hung, msg);
+ }
+
+ /* Reset timer in case GPU hangs without another request being added */
+ if (busy_count)
+ i915_queue_hangcheck(dev_priv);
+}
+
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
+{
+ memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+void intel_hangcheck_init(struct drm_i915_private *i915)
+{
+ INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
+ i915_hangcheck_elapsed);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f40a35f2913a..fb88e32e25a3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t enabled_bits;
- enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
@@ -864,7 +864,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -879,9 +879,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (crtc->config->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
@@ -911,9 +911,9 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -956,7 +956,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev) &&
+ if (!HAS_PCH_SPLIT(dev_priv) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -975,14 +975,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
}
-static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
WARN_ON(!crtc->config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void g4x_enable_hdmi(struct intel_encoder *encoder,
@@ -991,21 +993,20 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void ibx_enable_hdmi(struct intel_encoder *encoder,
@@ -1040,8 +1041,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
* FIXME: BSpec says this should be done at the end of
* of the modeset sequence, so not sure if this isn't too soon.
*/
- if (crtc->config->pipe_bpp > 24 &&
- crtc->config->pixel_multiplier > 1) {
+ if (pipe_config->pipe_bpp > 24 &&
+ pipe_config->pixel_multiplier > 1) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
@@ -1055,8 +1056,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
POSTING_READ(intel_hdmi->hdmi_reg);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void cpt_enable_hdmi(struct intel_encoder *encoder,
@@ -1073,7 +1074,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
/*
@@ -1086,7 +1087,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
* 4. enable HDMI clock gating
*/
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
I915_WRITE(TRANS_CHICKEN1(pipe),
I915_READ(TRANS_CHICKEN1(pipe)) |
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
@@ -1098,7 +1099,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
@@ -1110,8 +1111,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void vlv_enable_hdmi(struct intel_encoder *encoder,
@@ -1141,7 +1142,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -1164,7 +1165,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1178,9 +1179,7 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
@@ -1190,9 +1189,7 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
}
@@ -1241,7 +1238,7 @@ static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
@@ -1249,11 +1246,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
- if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+ if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
- if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+ if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
return MODE_OK;
@@ -1265,6 +1262,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
{
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
int clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -1287,7 +1285,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock, true);
/* if we can't do 8bpc we may still be able to do 12bpc */
- if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+ if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK)
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
return status;
@@ -1297,7 +1295,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(to_i915(dev)))
return false;
/*
@@ -1312,7 +1310,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
@@ -1339,7 +1337,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
clock_12bpc *= 2;
}
- if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
@@ -1644,13 +1642,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
intel_hdmi_prepare(encoder);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
}
@@ -1662,9 +1659,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
vlv_phy_pre_encoder_enable(encoder);
@@ -1673,7 +1668,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
0x2b247878);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1799,6 +1794,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
+static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ u8 ddc_pin;
+
+ if (info->alternate_ddc_pin) {
+ DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+ info->alternate_ddc_pin, port_name(port));
+ return info->alternate_ddc_pin;
+ }
+
+ switch (port) {
+ case PORT_B:
+ if (IS_BROXTON(dev_priv))
+ ddc_pin = GMBUS_PIN_1_BXT;
+ else
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ case PORT_C:
+ if (IS_BROXTON(dev_priv))
+ ddc_pin = GMBUS_PIN_2_BXT;
+ else
+ ddc_pin = GMBUS_PIN_DPC;
+ break;
+ case PORT_D:
+ if (IS_CHERRYVIEW(dev_priv))
+ ddc_pin = GMBUS_PIN_DPD_CHV;
+ else
+ ddc_pin = GMBUS_PIN_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ }
+
+ DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
+ ddc_pin, port_name(port));
+
+ return ddc_pin;
+}
+
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
@@ -1808,7 +1847,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
- uint8_t alternate_ddc_pin;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
@@ -1826,12 +1864,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+
switch (port) {
case PORT_B:
- if (IS_BROXTON(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
@@ -1842,61 +1878,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
- if (IS_BROXTON(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
- if (WARN_ON(IS_BROXTON(dev_priv)))
- intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
- else if (IS_CHERRYVIEW(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_E:
- /* On SKL PORT E doesn't have seperate GMBUS pin
- * We rely on VBT to set a proper alternate GMBUS pin. */
- alternate_ddc_pin =
- dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
- switch (alternate_ddc_pin) {
- case DDC_PIN_B:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
- break;
- case DDC_PIN_C:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
- break;
- case DDC_PIN_D:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
- break;
- default:
- MISSING_CASE(alternate_ddc_pin);
- }
intel_encoder->hpd_pin = HPD_PORT_E;
break;
- case PORT_A:
- intel_encoder->hpd_pin = HPD_PORT_A;
- /* Internal port only for eDP. */
default:
- BUG();
+ MISSING_CASE(port);
+ return;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
- } else if (IS_G4X(dev)) {
+ } else if (IS_G4X(dev_priv)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
- } else if (HAS_PCH_IBX(dev)) {
+ } else if (HAS_PCH_IBX(dev_priv)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
@@ -1906,7 +1913,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1920,7 +1927,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev) && !IS_GM45(dev)) {
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -1929,6 +1936,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
void intel_hdmi_init(struct drm_device *dev,
i915_reg_t hdmi_reg, enum port port)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
@@ -1949,7 +1957,7 @@ void intel_hdmi_init(struct drm_device *dev,
DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@@ -1957,29 +1965,30 @@ void intel_hdmi_init(struct drm_device *dev,
}
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev))
+ else if (HAS_PCH_IBX(dev_priv))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
- if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->port = port;
+ if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@@ -1993,7 +2002,7 @@ void intel_hdmi_init(struct drm_device *dev,
* to work on real hardware. And since g4x can send infoframes to
* only one port anyway, nothing is lost by allowing it.
*/
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 334d47b5811a..3d546c019de0 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -501,7 +501,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (intel_connector->mst_port)
continue;
- if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
+ if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
intel_connector->encoder->hpd_pin > HPD_NONE) {
connector->polled = enabled ?
DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 79aab9ad6faa..83f260bb4eef 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -138,11 +138,10 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- struct drm_device *dev = &dev_priv->drm;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
+ if (!IS_I830(dev_priv) && !IS_845G(dev_priv))
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -468,13 +467,9 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- const unsigned int fw =
- intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
- FW_REG_READ | FW_REG_WRITE);
int i = 0, inc, try = 0;
int ret = 0;
- intel_uncore_forcewake_get(dev_priv, fw);
retry:
I915_WRITE_FW(GMBUS0, bus->reg0);
@@ -576,7 +571,6 @@ timeout:
ret = -EAGAIN;
out:
- intel_uncore_forcewake_put(dev_priv, fw);
return ret;
}
@@ -633,10 +627,10 @@ int intel_setup_gmbus(struct drm_device *dev)
unsigned int pin;
int ret;
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return 0;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH_DISPLAY(dev_priv))
dev_priv->gpio_mmio_base =
@@ -674,7 +668,7 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
bus->force_bit = 1;
intel_gpio_setup(bus, pin);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0adb879833ff..0a09024d6ca3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -275,8 +275,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
engine->disable_lite_restore_wa =
- (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) &&
(engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
@@ -366,7 +365,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
+ reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
@@ -433,15 +432,17 @@ static bool can_merge_ctx(const struct i915_gem_context *prev,
static void execlists_dequeue(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *cursor, *last;
+ struct drm_i915_gem_request *last;
struct execlist_port *port = engine->execlist_port;
+ unsigned long flags;
+ struct rb_node *rb;
bool submit = false;
last = port->request;
if (last)
/* WaIdleLiteRestore:bdw,skl
* Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
- * as we resubmit the request. See gen8_emit_request()
+ * as we resubmit the request. See gen8_emit_breadcrumb()
* for where we prepare the padding after the end of the
* request.
*/
@@ -470,8 +471,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- spin_lock(&engine->execlist_lock);
- list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ rb = engine->execlist_first;
+ while (rb) {
+ struct drm_i915_gem_request *cursor =
+ rb_entry(rb, typeof(*cursor), priotree.node);
+
/* Can we combine this request with the current port? It has to
* be the same context/ringbuffer and not have any exceptions
* (e.g. GVT saying never to combine contexts).
@@ -494,7 +499,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* context (even though a different request) to
* the second port.
*/
- if (ctx_single_port_submission(cursor->ctx))
+ if (ctx_single_port_submission(last->ctx) ||
+ ctx_single_port_submission(cursor->ctx))
break;
GEM_BUG_ON(last->ctx == cursor->ctx);
@@ -502,17 +508,30 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
i915_gem_request_assign(&port->request, last);
port++;
}
+
+ rb = rb_next(rb);
+ rb_erase(&cursor->priotree.node, &engine->execlist_queue);
+ RB_CLEAR_NODE(&cursor->priotree.node);
+ cursor->priotree.priority = INT_MAX;
+
+ /* We keep the previous context alive until we retire the
+ * following request. This ensures that any the context object
+ * is still pinned for any residual writes the HW makes into it
+ * on the context switch into the next object following the
+ * breadcrumb. Otherwise, we may retire the context too early.
+ */
+ cursor->previous_context = engine->last_context;
+ engine->last_context = cursor->ctx;
+
+ __i915_gem_request_submit(cursor);
last = cursor;
submit = true;
}
if (submit) {
- /* Decouple all the requests submitted from the queue */
- engine->execlist_queue.next = &cursor->execlist_link;
- cursor->execlist_link.prev = &engine->execlist_queue;
-
i915_gem_request_assign(&port->request, last);
+ engine->execlist_first = rb;
}
- spin_unlock(&engine->execlist_lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
if (submit)
execlists_submit_ports(engine);
@@ -523,6 +542,28 @@ static bool execlists_elsp_idle(struct intel_engine_cs *engine)
return !engine->execlist_port[0].request;
}
+/**
+ * intel_execlists_idle() - Determine if all engine submission ports are idle
+ * @dev_priv: i915 device private
+ *
+ * Return true if there are no requests pending on any of the submission ports
+ * of any engines.
+ */
+bool intel_execlists_idle(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (!i915.enable_execlists)
+ return true;
+
+ for_each_engine(engine, dev_priv, id)
+ if (!execlists_elsp_idle(engine))
+ return false;
+
+ return true;
+}
+
static bool execlists_elsp_ready(struct intel_engine_cs *engine)
{
int port;
@@ -593,18 +634,147 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
}
+static bool insert_request(struct i915_priotree *pt, struct rb_root *root)
+{
+ struct rb_node **p, *rb;
+ bool first = true;
+
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ p = &root->rb_node;
+ while (*p) {
+ struct i915_priotree *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), node);
+ if (pt->priority > pos->priority) {
+ p = &rb->rb_left;
+ } else {
+ p = &rb->rb_right;
+ first = false;
+ }
+ }
+ rb_link_node(&pt->node, rb, p);
+ rb_insert_color(&pt->node, root);
+
+ return first;
+}
+
static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
- spin_lock_irqsave(&engine->execlist_lock, flags);
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
- list_add_tail(&request->execlist_link, &engine->execlist_queue);
+ if (insert_request(&request->priotree, &engine->execlist_queue))
+ engine->execlist_first = &request->priotree.node;
if (execlists_elsp_idle(engine))
tasklet_hi_schedule(&engine->irq_tasklet);
- spin_unlock_irqrestore(&engine->execlist_lock, flags);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
+static struct intel_engine_cs *
+pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
+{
+ struct intel_engine_cs *engine;
+
+ engine = container_of(pt,
+ struct drm_i915_gem_request,
+ priotree)->engine;
+ if (engine != locked) {
+ if (locked)
+ spin_unlock_irq(&locked->timeline->lock);
+ spin_lock_irq(&engine->timeline->lock);
+ }
+
+ return engine;
+}
+
+static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
+{
+ static DEFINE_MUTEX(lock);
+ struct intel_engine_cs *engine = NULL;
+ struct i915_dependency *dep, *p;
+ struct i915_dependency stack;
+ LIST_HEAD(dfs);
+
+ if (prio <= READ_ONCE(request->priotree.priority))
+ return;
+
+ /* Need global lock to use the temporary link inside i915_dependency */
+ mutex_lock(&lock);
+
+ stack.signaler = &request->priotree;
+ list_add(&stack.dfs_link, &dfs);
+
+ /* Recursively bump all dependent priorities to match the new request.
+ *
+ * A naive approach would be to use recursion:
+ * static void update_priorities(struct i915_priotree *pt, prio) {
+ * list_for_each_entry(dep, &pt->signalers_list, signal_link)
+ * update_priorities(dep->signal, prio)
+ * insert_request(pt);
+ * }
+ * but that may have unlimited recursion depth and so runs a very
+ * real risk of overunning the kernel stack. Instead, we build
+ * a flat list of all dependencies starting with the current request.
+ * As we walk the list of dependencies, we add all of its dependencies
+ * to the end of the list (this may include an already visited
+ * request) and continue to walk onwards onto the new dependencies. The
+ * end result is a topological list of requests in reverse order, the
+ * last element in the list is the request we must execute first.
+ */
+ list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ list_for_each_entry(p, &pt->signalers_list, signal_link)
+ if (prio > READ_ONCE(p->signaler->priority))
+ list_move_tail(&p->dfs_link, &dfs);
+
+ p = list_next_entry(dep, dfs_link);
+ if (!RB_EMPTY_NODE(&pt->node))
+ continue;
+
+ engine = pt_lock_engine(pt, engine);
+
+ /* If it is not already in the rbtree, we can update the
+ * priority inplace and skip over it (and its dependencies)
+ * if it is referenced *again* as we descend the dfs.
+ */
+ if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
+ pt->priority = prio;
+ list_del_init(&dep->dfs_link);
+ }
+ }
+
+ /* Fifo and depth-first replacement ensure our deps execute before us */
+ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ INIT_LIST_HEAD(&dep->dfs_link);
+
+ engine = pt_lock_engine(pt, engine);
+
+ if (prio <= pt->priority)
+ continue;
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
+
+ pt->priority = prio;
+ rb_erase(&pt->node, &engine->execlist_queue);
+ if (insert_request(pt, &engine->execlist_queue))
+ engine->execlist_first = &pt->node;
+ }
+
+ if (engine)
+ spin_unlock_irq(&engine->timeline->lock);
+
+ mutex_unlock(&lock);
+
+ /* XXX Do we need to preempt to make room for us and our deps? */
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -672,46 +842,6 @@ err_unpin:
return ret;
}
-/*
- * intel_logical_ring_advance() - advance the tail and prepare for submission
- * @request: Request to advance the logical ringbuffer of.
- *
- * The tail is updated in our logical ringbuffer struct, not in the actual context. What
- * really happens during submission is that the context and current tail will be placed
- * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
- * point, the tail *inside* the context is updated and the ELSP written to.
- */
-static int
-intel_logical_ring_advance(struct drm_i915_gem_request *request)
-{
- struct intel_ring *ring = request->ring;
- struct intel_engine_cs *engine = request->engine;
-
- intel_ring_advance(ring);
- request->tail = ring->tail;
-
- /*
- * Here we add two extra NOOPs as padding to avoid
- * lite restore of a context with HEAD==TAIL.
- *
- * Caller must reserve WA_TAIL_DWORDS for us!
- */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- request->wa_tail = ring->tail;
-
- /* We keep the previous context alive until we retire the following
- * request. This ensures that any the context object is still pinned
- * for any residual writes the HW makes into it on the context switch
- * into the next object following the breadcrumb. Otherwise, we may
- * retire the context too early.
- */
- request->previous_context = engine->last_context;
- engine->last_context = request->ctx;
- return 0;
-}
-
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -745,7 +875,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
- ce->state->obj->dirty = true;
+ ce->state->obj->mm.dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission) {
@@ -853,13 +983,12 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl,kbl
+ * WaDisableLSQCROPERFforOCL:kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
- IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1002,9 +1131,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
- /* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableCtxRestoreArbitration:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1075,9 +1203,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
{
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
@@ -1104,9 +1231,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, MI_NOOP);
}
- /* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
+ /* WaDisableCtxRestoreArbitration:bxt */
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1250,8 +1376,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
intel_engine_init_hangcheck(engine);
- if (!execlists_elsp_idle(engine))
+ /* After a GPU reset, we may have requests to replay */
+ if (!execlists_elsp_idle(engine)) {
+ engine->execlist_port[0].count = 0;
+ engine->execlist_port[1].count = 0;
execlists_submit_ports(engine);
+ }
return 0;
}
@@ -1326,10 +1456,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
memset(&port[1], 0, sizeof(port[1]));
}
- /* CS is stopped, and we will resubmit both ports on resume */
GEM_BUG_ON(request->ctx != port[0].request->ctx);
- port[0].count = 0;
- port[1].count = 0;
/* Reset WaIdleLiteRestore:bdw,skl as well */
request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
@@ -1570,39 +1697,35 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-
-static int gen8_emit_request(struct drm_i915_gem_request *request)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
{
- struct intel_ring *ring = request->ring;
- int ret;
-
- ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
- if (ret)
- return ret;
+ *out++ = MI_NOOP;
+ *out++ = MI_NOOP;
+ request->wa_tail = intel_ring_offset(request->ring, out);
+}
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
+ u32 *out)
+{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
- intel_ring_emit(ring,
- intel_hws_seqno_address(request->engine) |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, request->fence.seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance(request);
-}
+ *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+ *out++ = 0;
+ *out++ = request->global_seqno;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
+ request->tail = intel_ring_offset(request->ring, out);
-static int gen8_emit_request_render(struct drm_i915_gem_request *request)
-{
- struct intel_ring *ring = request->ring;
- int ret;
+ gen8_emit_wa_tail(request, out);
+}
- ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
- if (ret)
- return ret;
+static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
+static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
+ u32 *out)
+{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1610,21 +1733,24 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring,
- (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, i915_gem_request_get_seqno(request));
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE);
+ *out++ = intel_hws_seqno_address(request->engine);
+ *out++ = 0;
+ *out++ = request->global_seqno;
/* We're thrashing one dword of HWS. */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance(request);
+ *out++ = 0;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
+ request->tail = intel_ring_offset(request->ring, out);
+
+ gen8_emit_wa_tail(request, out);
}
+static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
+
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
int ret;
@@ -1641,7 +1767,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
- return i915_gem_render_state_init(req);
+ return i915_gem_render_state_emit(req);
}
/**
@@ -1652,9 +1778,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
/*
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
@@ -1681,14 +1804,19 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
lrc_destroy_wa_ctx_obj(engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
+ }
}
static void
@@ -1698,8 +1826,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_common_ring;
engine->reset_hw = reset_common_ring;
engine->emit_flush = gen8_emit_flush;
- engine->emit_request = gen8_emit_request;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
@@ -1820,7 +1950,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
- engine->emit_request = gen8_emit_request_render;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
ret = intel_engine_create_scratch(engine, 4096);
if (ret)
@@ -1945,7 +2076,7 @@ static void execlists_init_reg_state(u32 *reg_state,
RING_START(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
RING_CTL(engine->mmio_base),
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+ RING_CTL_SIZE(ring->size) | RING_VALID);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
RING_BBADDR_UDW(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2046,7 +2177,7 @@ populate_lr_context(struct i915_gem_context *ctx,
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
- ctx_obj->dirty = true;
+ ctx_obj->mm.dirty = true;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
@@ -2153,30 +2284,43 @@ error_deref_obj:
void intel_lr_context_resume(struct drm_i915_private *dev_priv)
{
- struct i915_gem_context *ctx = dev_priv->kernel_context;
struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+
+ /* Because we emit WA_TAIL_DWORDS there may be a disparity
+ * between our bookkeeping in ce->ring->head and ce->ring->tail and
+ * that stored in context. As we only write new commands from
+ * ce->ring->tail onwards, everything before that is junk. If the GPU
+ * starts reading from its RING_HEAD from the context, it may try to
+ * execute that junk and die.
+ *
+ * So to avoid that we reset the context images upon resume. For
+ * simplicity, we just zero everything out.
+ */
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u32 *reg;
- for_each_engine(engine, dev_priv) {
- struct intel_context *ce = &ctx->engine[engine->id];
- void *vaddr;
- uint32_t *reg_state;
-
- if (!ce->state)
- continue;
-
- vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
- if (WARN_ON(IS_ERR(vaddr)))
- continue;
+ if (!ce->state)
+ continue;
- reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ reg = i915_gem_object_pin_map(ce->state->obj,
+ I915_MAP_WB);
+ if (WARN_ON(IS_ERR(reg)))
+ continue;
- reg_state[CTX_RING_HEAD+1] = 0;
- reg_state[CTX_RING_TAIL+1] = 0;
+ reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
+ reg[CTX_RING_HEAD+1] = 0;
+ reg[CTX_RING_TAIL+1] = 0;
- ce->state->obj->dirty = true;
- i915_gem_object_unpin_map(ce->state->obj);
+ ce->state->obj->mm.dirty = true;
+ i915_gem_object_unpin_map(ce->state->obj);
- ce->ring->head = 0;
- ce->ring->tail = 0;
+ ce->ring->head = ce->ring->tail = 0;
+ ce->ring->last_retired_head = -1;
+ intel_ring_update_space(ce->ring);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4fed8165f98a..c1f546180ba2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -95,5 +95,6 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
+bool intel_execlists_idle(struct drm_i915_private *dev_priv);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
new file mode 100644
index 000000000000..daa523410953
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <drm/drm_edid.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_dual_mode_helper.h>
+#include "intel_drv.h"
+
+static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
+{
+ struct intel_digital_port *dig_port =
+ container_of(lspcon, struct intel_digital_port, lspcon);
+
+ return &dig_port->dp;
+}
+
+static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
+{
+ enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+ if (drm_lspcon_get_mode(adapter, &current_mode))
+ DRM_ERROR("Error reading LSPCON mode\n");
+ else
+ DRM_DEBUG_KMS("Current LSPCON mode %s\n",
+ current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS");
+ return current_mode;
+}
+
+static int lspcon_change_mode(struct intel_lspcon *lspcon,
+ enum drm_lspcon_mode mode, bool force)
+{
+ int err;
+ enum drm_lspcon_mode current_mode;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+ err = drm_lspcon_get_mode(adapter, &current_mode);
+ if (err) {
+ DRM_ERROR("Error reading LSPCON mode\n");
+ return err;
+ }
+
+ if (current_mode == mode) {
+ DRM_DEBUG_KMS("Current mode = desired LSPCON mode\n");
+ return 0;
+ }
+
+ err = drm_lspcon_set_mode(adapter, mode);
+ if (err < 0) {
+ DRM_ERROR("LSPCON mode change failed\n");
+ return err;
+ }
+
+ lspcon->mode = mode;
+ DRM_DEBUG_KMS("LSPCON mode changed done\n");
+ return 0;
+}
+
+static bool lspcon_probe(struct intel_lspcon *lspcon)
+{
+ enum drm_dp_dual_mode_type adaptor_type;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+ /* Lets probe the adaptor and check its type */
+ adaptor_type = drm_dp_dual_mode_detect(adapter);
+ if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
+ DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
+ drm_dp_get_dual_mode_type_name(adaptor_type));
+ return false;
+ }
+
+ /* Yay ... got a LSPCON device */
+ DRM_DEBUG_KMS("LSPCON detected\n");
+ lspcon->mode = lspcon_get_current_mode(lspcon);
+ lspcon->active = true;
+ return true;
+}
+
+static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ unsigned long start = jiffies;
+
+ if (!lspcon->desc_valid)
+ return;
+
+ while (1) {
+ struct intel_dp_desc desc;
+
+ /*
+ * The w/a only applies in PCON mode and we don't expect any
+ * AUX errors.
+ */
+ if (!__intel_dp_read_desc(intel_dp, &desc))
+ return;
+
+ if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
+ DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
+ jiffies_to_msecs(jiffies - start));
+ return;
+ }
+
+ if (time_after(jiffies, start + msecs_to_jiffies(1000)))
+ break;
+
+ usleep_range(10000, 15000);
+ }
+
+ DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
+}
+
+void lspcon_resume(struct intel_lspcon *lspcon)
+{
+ lspcon_resume_in_pcon_wa(lspcon);
+
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
+ DRM_ERROR("LSPCON resume failed\n");
+ else
+ DRM_DEBUG_KMS("LSPCON resume success\n");
+}
+
+bool lspcon_init(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (!IS_GEN9(dev_priv)) {
+ DRM_ERROR("LSPCON is supported on GEN9 only\n");
+ return false;
+ }
+
+ lspcon->active = false;
+ lspcon->mode = DRM_LSPCON_MODE_INVALID;
+
+ if (!lspcon_probe(lspcon)) {
+ DRM_ERROR("Failed to probe lspcon\n");
+ return false;
+ }
+
+ /*
+ * In the SW state machine, lets Put LSPCON in PCON mode only.
+ * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+ * 2.0 sinks.
+ */
+ if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
+ true) < 0) {
+ DRM_ERROR("LSPCON mode change to PCON failed\n");
+ return false;
+ }
+ }
+
+ if (!intel_dp_read_dpcd(dp)) {
+ DRM_ERROR("LSPCON DPCD read failed\n");
+ return false;
+ }
+
+ lspcon->desc_valid = intel_dp_read_desc(dp);
+
+ DRM_DEBUG_KMS("Success: LSPCON init\n");
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e1d47d51ea47..d12ef0047d49 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & LVDS_PORT_EN))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -122,8 +122,7 @@ out:
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
@@ -139,12 +138,12 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
pipe_config->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
tmp = I915_READ(PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
@@ -396,7 +395,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
@@ -406,7 +405,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
unsigned int lvds_bpp;
/* Should never happen!! */
- if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+ if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
@@ -431,7 +430,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(intel_crtc, pipe_config,
@@ -566,7 +565,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev_priv))
intel_display_resume(dev);
dev_priv->modeset_restore = MODESET_DONE;
@@ -949,16 +948,17 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
-static bool intel_lvds_supported(struct drm_device *dev)
+static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
{
/* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it. */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
return true;
/* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm */
- if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ if (INTEL_GEN(dev_priv) <= 4 &&
+ IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
return true;
return false;
@@ -984,27 +984,27 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
i915_reg_t lvds_reg;
u32 lvds;
int pipe;
u8 pin;
- if (!intel_lvds_supported(dev))
+ if (!intel_lvds_supported(dev_priv))
return;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
lvds = I915_READ(lvds_reg);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
if (dev_priv->vbt.edp.support) {
@@ -1064,12 +1064,13 @@ void intel_lvds_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
- intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN4(dev))
+ else if (IS_GEN4(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
@@ -1157,14 +1158,14 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Ironlake: FIXME if still fail, not try pipe mode now */
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
goto failed;
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- fixed_mode = intel_crtc_mode_get(dev, crtc);
+ fixed_mode = intel_crtc_mode_get(dev, &crtc->base);
if (fixed_mode) {
DRM_DEBUG_KMS("using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7acbbbf97833..f4429f67a4e3 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -642,24 +642,6 @@ static struct notifier_block intel_opregion_notifier = {
* (version 3)
*/
-static u32 get_did(struct intel_opregion *opregion, int i)
-{
- u32 did;
-
- if (i < ARRAY_SIZE(opregion->acpi->didl)) {
- did = opregion->acpi->didl[i];
- } else {
- i -= ARRAY_SIZE(opregion->acpi->didl);
-
- if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
- return 0;
-
- did = opregion->acpi->did2[i];
- }
-
- return did;
-}
-
static void set_did(struct intel_opregion *opregion, int i, u32 val)
{
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
@@ -674,11 +656,11 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static u32 acpi_display_type(struct drm_connector *connector)
+static u32 acpi_display_type(struct intel_connector *connector)
{
u32 display_type;
- switch (connector->connector_type) {
+ switch (connector->base.connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
display_type = ACPI_DISPLAY_TYPE_VGA;
@@ -707,7 +689,7 @@ static u32 acpi_display_type(struct drm_connector *connector)
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
default:
- MISSING_CASE(connector->connector_type);
+ MISSING_CASE(connector->base.connector_type);
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
}
@@ -718,34 +700,9 @@ static u32 acpi_display_type(struct drm_connector *connector)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_connector *connector;
- acpi_handle handle;
- struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
- unsigned long long device_id;
- acpi_status status;
- u32 temp, max_outputs;
- int i = 0;
-
- handle = ACPI_HANDLE(&pdev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev))
- return;
-
- if (acpi_is_video_device(handle))
- acpi_video_bus = acpi_dev;
- else {
- list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
- if (acpi_is_video_device(acpi_cdev->handle)) {
- acpi_video_bus = acpi_cdev;
- break;
- }
- }
- }
-
- if (!acpi_video_bus) {
- DRM_DEBUG_KMS("No ACPI video bus found\n");
- return;
- }
+ struct intel_connector *connector;
+ int i = 0, max_outputs;
+ int display_index[16] = {};
/*
* In theory, did2, the extended didl, gets added at opregion version
@@ -757,64 +714,58 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
- list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
- max_outputs);
- return;
- }
- status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
- NULL, &device_id);
- if (ACPI_SUCCESS(status)) {
- if (!device_id)
- goto blind_set;
- set_did(opregion, i++, (u32)(device_id & 0x0f0f));
- }
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ u32 device_id, type;
+
+ device_id = acpi_display_type(connector);
+
+ /* Use display type specific display index. */
+ type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+ >> ACPI_DISPLAY_TYPE_SHIFT;
+ device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+ connector->acpi_device_id = device_id;
+ if (i < max_outputs)
+ set_did(opregion, i, device_id);
+ i++;
}
-end:
DRM_DEBUG_KMS("%d outputs detected\n", i);
+ if (i > max_outputs)
+ DRM_ERROR("More than %d outputs in connector list\n",
+ max_outputs);
+
/* If fewer than max outputs, the list must be null terminated */
if (i < max_outputs)
set_did(opregion, i, 0);
- return;
-
-blind_set:
- i = 0;
- list_for_each_entry(connector,
- &dev_priv->drm.mode_config.connector_list, head) {
- int display_type = acpi_display_type(connector);
-
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs in connector list\n",
- max_outputs);
- return;
- }
-
- temp = get_did(opregion, i);
- set_did(opregion, i, temp | (1 << 31) | display_type | i);
- i++;
- }
- goto end;
}
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_connector *connector;
int i = 0;
- u32 disp_id;
-
- /* Initialize the CADL field by duplicating the DIDL values.
- * Technically, this is not always correct as display outputs may exist,
- * but not active. This initialization is necessary for some Clevo
- * laptops that check this field before processing the brightness and
- * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
- * there are less than eight devices. */
- do {
- disp_id = get_did(opregion, i);
- opregion->acpi->cadl[i] = disp_id;
- } while (++i < 8 && disp_id != 0);
+
+ /*
+ * Initialize the CADL field from the connector device ids. This is
+ * essentially the same as copying from the DIDL. Technically, this is
+ * not always correct as display outputs may exist, but not active. This
+ * initialization is necessary for some Clevo laptops that check this
+ * field before processing the brightness and display switching hotkeys.
+ *
+ * Note that internal panels should be at the front of the connector
+ * list already, ensuring they're not left out.
+ */
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ if (i >= ARRAY_SIZE(opregion->acpi->cadl))
+ break;
+ opregion->acpi->cadl[i++] = connector->acpi_device_id;
+ }
+
+ /* If fewer than 8 active devices, the list must be null terminated */
+ if (i < ARRAY_SIZE(opregion->acpi->cadl))
+ opregion->acpi->cadl[i] = 0;
}
void intel_opregion_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a24bc8c7889f..fd0e4dac7cc1 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -233,7 +233,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
return i915_gem_request_alloc(engine, dev_priv->kernel_context);
}
@@ -1222,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
- i915_gem_object_put_unlocked(new_bo);
+ i915_gem_object_put(new_bo);
out_free:
kfree(params);
@@ -1466,10 +1466,12 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
* hardware should be off already */
WARN_ON(dev_priv->overlay->active);
- i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
+ i915_gem_object_put(dev_priv->overlay->reg_bo);
kfree(dev_priv->overlay);
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1587,3 +1589,5 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
P(UVSCALEV);
#undef P
}
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index be4b4d546fd9..08ab6d762ca4 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -304,7 +304,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -325,7 +325,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
i965_scale_aspect(pipe_config, &pfit_control);
else
i9xx_scale_aspect(pipe_config, &pfit_control,
@@ -339,7 +339,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
@@ -355,7 +355,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
@@ -366,7 +366,7 @@ out:
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control;
@@ -1722,7 +1722,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
connector->name,
- panel->backlight.enabled ? "enabled" : "disabled",
+ enableddisabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a2f751cd187a..d67974eb127a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,7 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
+#include <drm/drm_atomic_helper.h>
/**
* DOC: RC6
@@ -55,10 +56,8 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-static void gen9_init_clock_gating(struct drm_device *dev)
+static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
@@ -81,11 +80,9 @@ static void gen9_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DISABLE_DUMMY0);
}
-static void bxt_init_clock_gating(struct drm_device *dev)
+static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
@@ -107,9 +104,8 @@ static void bxt_init_clock_gating(struct drm_device *dev)
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
+static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
tmp = I915_READ(CLKCFG);
@@ -146,9 +142,8 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u16 ddrpll, csipll;
ddrpll = I915_READ16(DDRMPLL1);
@@ -252,8 +247,8 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
+static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
+ bool is_ddr3,
int fsb,
int mem)
{
@@ -319,27 +314,26 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_device *dev = &dev_priv->drm;
u32 val;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
POSTING_READ(FW_BLC_SELF_VLV);
dev_priv->wm.vlv.cxsr = enable;
- } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+ } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
POSTING_READ(FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev)) {
+ } else if (IS_PINEVIEW(dev_priv)) {
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
POSTING_READ(DSPFW3);
- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
POSTING_READ(FW_BLC_SELF);
- } else if (IS_I915GM(dev)) {
+ } else if (IS_I915GM(dev_priv)) {
/*
* FIXME can't find a bit like this for 915G, and
* and yet it does have the related watermark in
@@ -353,8 +347,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
return;
}
- DRM_DEBUG_KMS("memory self-refresh is %s\n",
- enable ? "enabled" : "disabled");
+ DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable));
}
@@ -377,10 +370,9 @@ static const int pessimal_latency_ns = 5000;
#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-static int vlv_get_fifo_size(struct drm_device *dev,
+static int vlv_get_fifo_size(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int sprite0_start, sprite1_start, size;
switch (pipe) {
@@ -429,9 +421,8 @@ static int vlv_get_fifo_size(struct drm_device *dev,
return size;
}
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -445,9 +436,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -462,9 +452,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -624,11 +613,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
return wm_size;
}
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
{
- struct drm_crtc *crtc, *enabled = NULL;
+ struct intel_crtc *crtc, *enabled = NULL;
- for_each_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -639,27 +628,31 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
return enabled;
}
-static void pineview_update_wm(struct drm_crtc *unused_crtc)
+static void pineview_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
- dev_priv->fsb_freq, dev_priv->mem_freq);
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
intel_set_memory_cxsr(dev_priv, false);
return;
}
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc) {
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int clock = adjusted_mode->crtc_clock;
/* Display SR */
@@ -706,7 +699,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
}
}
-static bool g4x_compute_wm0(struct drm_device *dev,
+static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
int plane,
const struct intel_watermark_params *display,
int display_latency_ns,
@@ -715,24 +708,26 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int *plane_wm,
int *cursor_wm)
{
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
+ const struct drm_framebuffer *fb;
int htotal, hdisplay, clock, cpp;
int line_time_us, line_count;
int entries, tlb_miss;
- crtc = intel_get_crtc_for_plane(dev, plane);
+ crtc = intel_get_crtc_for_plane(dev_priv, plane);
if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
}
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
+ fb = crtc->base.primary->state->fb;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ hdisplay = crtc->config->pipe_src_w;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
@@ -747,7 +742,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * crtc->cursor->state->crtc_w * cpp;
+ entries = line_count * crtc->base.cursor->state->crtc_w * cpp;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -766,7 +761,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
-static bool g4x_check_srwm(struct drm_device *dev,
+static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
@@ -775,13 +770,13 @@ static bool g4x_check_srwm(struct drm_device *dev,
display_wm, cursor_wm);
if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
display_wm, display->max_wm);
return false;
}
if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
cursor_wm, cursor->max_wm);
return false;
}
@@ -794,15 +789,16 @@ static bool g4x_check_srwm(struct drm_device *dev,
return true;
}
-static bool g4x_compute_srwm(struct drm_device *dev,
+static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *display_wm, int *cursor_wm)
{
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
+ const struct drm_framebuffer *fb;
int hdisplay, htotal, cpp, clock;
unsigned long line_time_us;
int line_count, line_size;
@@ -814,12 +810,13 @@ static bool g4x_compute_srwm(struct drm_device *dev,
return false;
}
- crtc = intel_get_crtc_for_plane(dev, plane);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ crtc = intel_get_crtc_for_plane(dev_priv, plane);
+ adjusted_mode = &crtc->config->base.adjusted_mode;
+ fb = crtc->base.primary->state->fb;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ hdisplay = crtc->config->pipe_src_w;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -833,11 +830,11 @@ static bool g4x_compute_srwm(struct drm_device *dev,
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * cpp * crtc->cursor->state->crtc_w;
+ entries = line_count * cpp * crtc->base.cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
- return g4x_check_srwm(dev,
+ return g4x_check_srwm(dev_priv,
*display_wm, *cursor_wm,
display, cursor);
}
@@ -937,10 +934,8 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_device *dev)
+static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* all latencies in usec */
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1065,7 +1060,8 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
for (level = 0; level < wm_state->num_levels; level++) {
struct drm_device *dev = crtc->base.dev;
- const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ const int sr_fifo_size =
+ INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1;
struct intel_plane *plane;
wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
@@ -1095,15 +1091,16 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
static void vlv_compute_wm(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
- int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
int level;
memset(wm_state, 0, sizeof(*wm_state));
wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
- wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
+ wm_state->num_levels = dev_priv->wm.max_level + 1;
wm_state->num_active_planes = 0;
@@ -1183,7 +1180,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
}
/* clear any (partially) filled invalid levels */
- for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
+ for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) {
memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
}
@@ -1327,20 +1324,19 @@ static void vlv_merge_wm(struct drm_device *dev,
}
}
-static void vlv_update_wm(struct drm_crtc *crtc)
+static void vlv_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
struct vlv_wm_values wm = {};
- vlv_compute_wm(intel_crtc);
+ vlv_compute_wm(crtc);
vlv_merge_wm(dev, &wm);
if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
/* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(intel_crtc);
+ vlv_pipe_set_fifo_size(crtc);
return;
}
@@ -1356,9 +1352,9 @@ static void vlv_update_wm(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, false);
/* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(intel_crtc);
+ vlv_pipe_set_fifo_size(crtc);
- vlv_write_wm_values(intel_crtc, &wm);
+ vlv_write_wm_values(crtc, &wm);
DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
"sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
@@ -1382,30 +1378,29 @@ static void vlv_update_wm(struct drm_crtc *crtc)
#define single_plane_enabled(mask) is_power_of_2(mask)
-static void g4x_update_wm(struct drm_crtc *crtc)
+static void g4x_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = to_i915(dev);
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
bool cxsr_enabled;
- if (g4x_compute_wm0(dev, PIPE_A,
+ if (g4x_compute_wm0(dev_priv, PIPE_A,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1 << PIPE_A;
- if (g4x_compute_wm0(dev, PIPE_B,
+ if (g4x_compute_wm0(dev_priv, PIPE_B,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
+ g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
@@ -1440,25 +1435,27 @@ static void g4x_update_wm(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i965_update_wm(struct drm_crtc *unused_crtc)
+static void i965_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ int hdisplay = crtc->config->pipe_src_w;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
unsigned long line_time_us;
int entries;
@@ -1476,7 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- cpp * crtc->cursor->state->crtc_w;
+ cpp * crtc->base.cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1514,34 +1511,38 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
#undef FW_WM
-static void i9xx_update_wm(struct drm_crtc *unused_crtc)
+static void i9xx_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
- struct drm_crtc *crtc, *enabled = NULL;
+ struct intel_crtc *crtc, *enabled = NULL;
- if (IS_I945GM(dev))
+ if (IS_I945GM(dev_priv))
wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev))
+ else if (!IS_GEN2(dev_priv))
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- crtc = intel_get_crtc_for_plane(dev, 0);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
+ crtc = intel_get_crtc_for_plane(dev_priv, 0);
if (intel_crtc_active(crtc)) {
- const struct drm_display_mode *adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
- if (IS_GEN2(dev))
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (IS_GEN2(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1552,18 +1553,23 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
planea_wm = wm_info->max_wm;
}
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
wm_info = &i830_bc_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev, 1);
- crtc = intel_get_crtc_for_plane(dev, 1);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
+ crtc = intel_get_crtc_for_plane(dev_priv, 1);
if (intel_crtc_active(crtc)) {
- const struct drm_display_mode *adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
- if (IS_GEN2(dev))
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (IS_GEN2(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1579,10 +1585,10 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- if (IS_I915GM(dev) && enabled) {
+ if (IS_I915GM(dev_priv) && enabled) {
struct drm_i915_gem_object *obj;
- obj = intel_fb_obj(enabled->primary->state->fb);
+ obj = intel_fb_obj(enabled->base.primary->state->fb);
/* self-refresh seems busted with untiled */
if (!i915_gem_object_is_tiled(obj))
@@ -1598,19 +1604,24 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && enabled) {
+ if (HAS_FW_BLC(dev_priv) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &enabled->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ enabled->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
+ int hdisplay = enabled->config->pipe_src_w;
+ int cpp;
unsigned long line_time_us;
int entries;
- if (IS_I915GM(dev) || IS_I945GM(dev))
+ if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
line_time_us = max(htotal * 1000 / clock, 1);
@@ -1623,7 +1634,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
@@ -1647,23 +1658,22 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i845_update_wm(struct drm_crtc *unused_crtc)
+static void i845_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc == NULL)
return;
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
- dev_priv->display.get_fifo_size(dev, 0),
+ dev_priv->display.get_fifo_size(dev_priv, 0),
4, pessimal_latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -1852,23 +1862,25 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
}
-static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
+static unsigned int
+ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 3072;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
return 768;
else
return 512;
}
-static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
- int level, bool is_sprite)
+static unsigned int
+ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ int level, bool is_sprite)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -1879,18 +1891,18 @@ static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
return level == 0 ? 63 : 255;
}
-static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
- int level)
+static unsigned int
+ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
{
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 31;
else
return 15;
@@ -1903,7 +1915,8 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -1911,14 +1924,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_INFO(dev)->num_pipes;
+ fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (INTEL_INFO(dev)->gen <= 6)
+ if (INTEL_GEN(dev_priv) <= 6)
fifo_size /= 2;
}
@@ -1934,7 +1947,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
@@ -1947,7 +1960,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev, level);
+ return ilk_cursor_wm_reg_max(to_i915(dev), level);
}
static void ilk_compute_wm_maximums(const struct drm_device *dev,
@@ -1959,17 +1972,17 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
}
-static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
+static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev, level, false);
- max->spr = ilk_plane_wm_reg_max(dev, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev, level);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
static bool ilk_validate_wm_level(int level,
@@ -2076,14 +2089,13 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
PIPE_WM_LINETIME_TIME(linetime);
}
-static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
+static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[8])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
uint32_t val;
int ret, i;
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
@@ -2155,7 +2167,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
}
}
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2165,14 +2177,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
wm[2] = (sskpd >> 12) & 0xFF;
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
uint32_t sskpd = I915_READ(MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
- } else if (INTEL_INFO(dev)->gen >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5) {
uint32_t mltr = I915_READ(MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
@@ -2182,42 +2194,44 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
}
}
-static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
wm[0] = 13;
/* WaDoubleCursorLP3Latency:ivb */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
wm[3] *= 2;
}
-int ilk_wm_max_level(const struct drm_device *dev)
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
{
/* how many WM levels are we expecting */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 7;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 4;
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
return 3;
else
return 2;
}
-static void intel_print_wm_latency(struct drm_device *dev,
+static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
const uint16_t wm[8])
{
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
for (level = 0; level <= max_level; level++) {
unsigned int latency = wm[level];
@@ -2232,7 +2246,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -2246,7 +2260,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5], uint16_t min)
{
- int level, max_level = ilk_wm_max_level(&dev_priv->drm);
+ int level, max_level = ilk_wm_max_level(dev_priv);
if (wm[0] >= min)
return false;
@@ -2258,9 +2272,8 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return true;
}
-static void snb_wm_latency_quirk(struct drm_device *dev)
+static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool changed;
/*
@@ -2275,39 +2288,35 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
return;
DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_device *dev)
+static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+ intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
- intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
- intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN6(dev))
- snb_wm_latency_quirk(dev);
+ if (IS_GEN6(dev_priv))
+ snb_wm_latency_quirk(dev_priv);
}
-static void skl_setup_wm_latency(struct drm_device *dev)
+static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
- intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+ intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
+ intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
}
static bool ilk_validate_pipe_wm(struct drm_device *dev,
@@ -2345,7 +2354,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev), usable_level;
+ int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
pipe_wm = &cstate->wm.ilk.optimal;
@@ -2377,7 +2386,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
usable_level = max_level;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
+ if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2390,13 +2399,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
pipe_wm->wm[0] = pipe_wm->raw_wm[0];
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev, 1, &max);
+ ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
for (level = 1; level <= max_level; level++) {
struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
@@ -2432,7 +2441,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
{
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(to_i915(dev));
/*
* Start with the final, target watermarks, then combine with the
@@ -2516,16 +2525,16 @@ static void ilk_wm_merge(struct drm_device *dev,
struct intel_pipe_wm *merged)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
int last_enabled_level = max_level;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+ if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
+ merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
/* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
@@ -2556,7 +2565,7 @@ static void ilk_wm_merge(struct drm_device *dev,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+ if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -2577,7 +2586,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
return dev_priv->wm.pri_latency[level];
@@ -2588,6 +2597,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -2614,7 +2624,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
else
@@ -2625,7 +2635,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
* Always set WM1S_LP_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+ if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
WARN_ON(wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
@@ -2656,7 +2666,7 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(to_i915(dev));
int level1 = 0, level2 = 0;
for (level = 1; level <= max_level; level++) {
@@ -2775,7 +2785,6 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct drm_device *dev = &dev_priv->drm;
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
@@ -2801,7 +2810,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = I915_READ(WM_MISC);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
@@ -2831,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
@@ -2879,6 +2888,21 @@ skl_wm_plane_id(const struct intel_plane *plane)
}
}
+/*
+ * FIXME: We still don't have the proper code detect if we need to apply the WA,
+ * so assume we'll always need it in order to avoid underruns.
+ */
+static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ IS_KABYLAKE(dev_priv))
+ return true;
+
+ return false;
+}
+
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
@@ -2999,9 +3023,12 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
+ struct intel_plane *plane;
+ struct intel_crtc_state *cstate;
+ struct skl_plane_wm *wm;
enum pipe pipe;
- int level, plane;
+ int level, latency;
if (!intel_has_sagv(dev_priv))
return false;
@@ -3019,27 +3046,37 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/* Since we're now guaranteed to only have one active CRTC... */
pipe = ffs(intel_state->active_crtcs) - 1;
- crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ cstate = to_intel_crtc_state(crtc->base.state);
- if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)];
+
/* Skip this plane if it's not enabled */
- if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+ if (!wm->wm[0].plane_en)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(dev);
- intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+ for (level = ilk_wm_max_level(dev_priv);
+ !wm->wm[level].plane_en; --level)
{ }
+ latency = dev_priv->wm.skl_latency[level];
+
+ if (skl_needs_memory_bw_wa(intel_state) &&
+ plane->base.state->fb->modifier ==
+ I915_FORMAT_MOD_X_TILED)
+ latency += 15;
+
/*
* If any of the planes on this pipe don't enable wm levels
* that incur memory latencies higher then 30µs we can't enable
* the SAGV
*/
- if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+ if (latency < SKL_SAGV_BLOCK_TIME)
return false;
}
@@ -3058,7 +3095,6 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
struct drm_crtc *for_crtc = cstate->base.crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
- int pipe = to_intel_crtc(for_crtc)->pipe;
if (WARN_ON(!state) || !cstate->base.active) {
alloc->start = 0;
@@ -3086,7 +3122,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
* we currently hold.
*/
if (!intel_state->active_pipe_changes) {
- *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
+ /*
+ * alloc may be cleared by clear_intel_crtc_state,
+ * copy from old state to be sure
+ */
+ *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
return;
}
@@ -3129,7 +3169,7 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_universal_plane(dev_priv, pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
@@ -3173,7 +3213,7 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate)
src_h = drm_rect_height(&pstate->base.src);
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
- if (intel_rotation_90_or_270(pstate->base.rotation))
+ if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@@ -3204,7 +3244,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
/* for planar format */
@@ -3231,49 +3271,39 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
-skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
+skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+ unsigned *plane_data_rate,
+ unsigned *plane_y_data_rate)
{
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
- struct drm_crtc *crtc = cstate->crtc;
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_plane *plane;
+ struct drm_plane *plane;
const struct intel_plane *intel_plane;
- struct drm_plane_state *pstate;
+ const struct drm_plane_state *pstate;
unsigned int rate, total_data_rate = 0;
int id;
- int i;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- for_each_plane_in_state(state, plane, pstate, i) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
id = skl_wm_plane_id(to_intel_plane(plane));
intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe != intel_crtc->pipe)
- continue;
-
/* packed/uv */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 0);
- intel_cstate->wm.skl.plane_data_rate[id] = rate;
+ plane_data_rate[id] = rate;
+
+ total_data_rate += rate;
/* y-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 1);
- intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
- }
-
- /* Calculate CRTC's total data rate from cached values */
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- int id = skl_wm_plane_id(intel_plane);
+ plane_y_data_rate[id] = rate;
- /* packed/uv */
- total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
- total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
+ total_data_rate += rate;
}
return total_data_rate;
@@ -3297,14 +3327,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
return 0;
/* For Non Y-tile return 8-blocks */
- if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
+ if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 8;
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(src_w, src_h);
/* Halve UV plane width and height for NV12 */
@@ -3318,7 +3348,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
else
plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
- if (intel_rotation_90_or_270(pstate->rotation)) {
+ if (drm_rotation_90_or_270(pstate->rotation)) {
switch (plane_bpp) {
case 1:
min_scanlines = 32;
@@ -3342,6 +3372,30 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
}
+static void
+skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
+ uint16_t *minimum, uint16_t *y_minimum)
+{
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int id = skl_wm_plane_id(intel_plane);
+
+ if (id == PLANE_CURSOR)
+ continue;
+
+ if (!pstate->visible)
+ continue;
+
+ minimum[id] = skl_ddb_min_alloc(pstate, 0);
+ y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+ }
+
+ minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+}
+
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
@@ -3350,25 +3404,26 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
- struct drm_plane *plane;
- struct drm_plane_state *pstate;
enum pipe pipe = intel_crtc->pipe;
- struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
- uint16_t alloc_size, start, cursor_blocks;
- uint16_t *minimum = cstate->wm.skl.minimum_blocks;
- uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
+ struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
+ uint16_t alloc_size, start;
+ uint16_t minimum[I915_MAX_PLANES] = {};
+ uint16_t y_minimum[I915_MAX_PLANES] = {};
unsigned int total_data_rate;
int num_active;
int id, i;
+ unsigned plane_data_rate[I915_MAX_PLANES] = {};
+ unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
+
+ /* Clear the partitioning for disabled planes. */
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
if (WARN_ON(!state))
return 0;
if (!cstate->base.active) {
- ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+ alloc->start = alloc->end = 0;
return 0;
}
@@ -3379,57 +3434,43 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- cursor_blocks = skl_cursor_allocation(num_active);
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
-
- alloc_size -= cursor_blocks;
-
- /* 1. Allocate the mininum required blocks for each active plane */
- for_each_plane_in_state(state, plane, pstate, i) {
- intel_plane = to_intel_plane(plane);
- id = skl_wm_plane_id(intel_plane);
-
- if (intel_plane->pipe != pipe)
- continue;
-
- if (!to_intel_plane_state(pstate)->base.visible) {
- minimum[id] = 0;
- y_minimum[id] = 0;
- continue;
- }
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- minimum[id] = 0;
- y_minimum[id] = 0;
- continue;
- }
+ skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
- minimum[id] = skl_ddb_min_alloc(pstate, 0);
- y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
- }
+ /*
+ * 1. Allocate the mininum required blocks for each active plane
+ * and allocate the cursor, it doesn't require extra allocation
+ * proportional to the data rate.
+ */
- for (i = 0; i < PLANE_CURSOR; i++) {
+ for (i = 0; i < I915_MAX_PLANES; i++) {
alloc_size -= minimum[i];
alloc_size -= y_minimum[i];
}
+ ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+
/*
* 2. Distribute the remaining space in proportion to the amount of
* data each plane needs to fetch from memory.
*
* FIXME: we may not allocate every single block here.
*/
- total_data_rate = skl_get_total_relative_data_rate(cstate);
+ total_data_rate = skl_get_total_relative_data_rate(cstate,
+ plane_data_rate,
+ plane_y_data_rate);
if (total_data_rate == 0)
return 0;
start = alloc->start;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ for (id = 0; id < I915_MAX_PLANES; id++) {
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
- int id = skl_wm_plane_id(intel_plane);
- data_rate = cstate->wm.skl.plane_data_rate[id];
+ if (id == PLANE_CURSOR)
+ continue;
+
+ data_rate = plane_data_rate[id];
/*
* allocation for (packed formats) or (uv-plane part of planar format):
@@ -3451,7 +3492,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/*
* allocation for y_plane part of planar format:
*/
- y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
+ y_data_rate = plane_y_data_rate[id];
y_plane_blocks = y_minimum[id];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
@@ -3468,12 +3509,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
-static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
-{
- /* TODO: Take into account the scalers once we support them */
- return config->base.adjusted_mode.crtc_clock;
-}
-
/*
* The max latency should be 257 (max the punit can code is 255 and we add 2us
* for the read latency) and cpp should always be <= 8, so that
@@ -3524,7 +3559,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
* Adjusted plane pixel rate is just the pipe's adjusted pixel rate
* with additional adjustments for plane-specific scaling.
*/
- adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
+ adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
downscale_amount = skl_plane_downscale_amount(pstate);
pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
@@ -3553,22 +3588,28 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t width = 0, height = 0;
uint32_t plane_pixel_rate;
uint32_t y_tile_minimum, y_min_scanlines;
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(cstate->base.state);
+ bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
*enabled = false;
return 0;
}
+ if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED)
+ latency += 15;
+
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
- if (intel_rotation_90_or_270(pstate->rotation)) {
+ if (drm_rotation_90_or_270(pstate->rotation)) {
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3580,23 +3621,27 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
case 2:
y_min_scanlines = 8;
break;
- default:
- WARN(1, "Unsupported pixel depth for rotation");
case 4:
y_min_scanlines = 4;
break;
+ default:
+ MISSING_CASE(cpp);
+ return -EINVAL;
}
} else {
y_min_scanlines = 4;
}
+ if (apply_memory_bw_wa)
+ y_min_scanlines *= 2;
+
plane_bytes_per_line = width * cpp;
- if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
plane_blocks_per_line =
DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
plane_blocks_per_line /= y_min_scanlines;
- } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
+ } else if (fb->modifier == DRM_FORMAT_MOD_NONE) {
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
+ 1;
} else {
@@ -3611,11 +3656,14 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
- if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
selected_result = max(method2, y_tile_minimum);
} else {
- if ((ddb_allocation / plane_blocks_per_line) >= 1)
+ if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
+ (plane_bytes_per_line / 512 < 1))
+ selected_result = method2;
+ else if ((ddb_allocation / plane_blocks_per_line) >= 1)
selected_result = min(method1, method2);
else
selected_result = method1;
@@ -3625,8 +3673,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
if (level >= 1 && level <= 7) {
- if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
res_blocks += y_tile_minimum;
res_lines += y_min_scanlines;
} else {
@@ -3665,67 +3713,52 @@ static int
skl_compute_wm_level(const struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb,
struct intel_crtc_state *cstate,
+ struct intel_plane *intel_plane,
int level,
struct skl_wm_level *result)
{
struct drm_atomic_state *state = cstate->base.state;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- struct intel_plane_state *intel_pstate;
+ struct drm_plane *plane = &intel_plane->base;
+ struct intel_plane_state *intel_pstate = NULL;
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
int ret;
+ int i = skl_wm_plane_id(intel_plane);
+
+ if (state)
+ intel_pstate =
+ intel_atomic_get_existing_plane_state(state,
+ intel_plane);
/*
- * We'll only calculate watermarks for planes that are actually
- * enabled, so make sure all other planes are set as disabled.
+ * Note: If we start supporting multiple pending atomic commits against
+ * the same planes/CRTC's in the future, plane->state will no longer be
+ * the correct pre-state to use for the calculations here and we'll
+ * need to change where we get the 'unchanged' plane data from.
+ *
+ * For now this is fine because we only allow one queued commit against
+ * a CRTC. Even if the plane isn't modified by this transaction and we
+ * don't have a plane lock, we still have the CRTC's lock, so we know
+ * that no other transactions are racing with us to update it.
*/
- memset(result, 0, sizeof(*result));
+ if (!intel_pstate)
+ intel_pstate = to_intel_plane_state(plane->state);
- for_each_intel_plane_mask(&dev_priv->drm,
- intel_plane,
- cstate->base.plane_mask) {
- int i = skl_wm_plane_id(intel_plane);
-
- plane = &intel_plane->base;
- intel_pstate = NULL;
- if (state)
- intel_pstate =
- intel_atomic_get_existing_plane_state(state,
- intel_plane);
+ WARN_ON(!intel_pstate->base.fb);
- /*
- * Note: If we start supporting multiple pending atomic commits
- * against the same planes/CRTC's in the future, plane->state
- * will no longer be the correct pre-state to use for the
- * calculations here and we'll need to change where we get the
- * 'unchanged' plane data from.
- *
- * For now this is fine because we only allow one queued commit
- * against a CRTC. Even if the plane isn't modified by this
- * transaction and we don't have a plane lock, we still have
- * the CRTC's lock, so we know that no other transactions are
- * racing with us to update it.
- */
- if (!intel_pstate)
- intel_pstate = to_intel_plane_state(plane->state);
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- WARN_ON(!intel_pstate->base.fb);
-
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
-
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
- intel_pstate,
- ddb_blocks,
- level,
- &result->plane_res_b[i],
- &result->plane_res_l[i],
- &result->plane_en[i]);
- if (ret)
- return ret;
- }
+ ret = skl_compute_plane_wm(dev_priv,
+ cstate,
+ intel_pstate,
+ ddb_blocks,
+ level,
+ &result->plane_res_b,
+ &result->plane_res_l,
+ &result->plane_en);
+ if (ret)
+ return ret;
return 0;
}
@@ -3733,32 +3766,28 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
static uint32_t
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
{
+ uint32_t pixel_rate;
+
if (!cstate->base.active)
return 0;
- if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
+ pixel_rate = ilk_pipe_pixel_rate(cstate);
+
+ if (WARN_ON(pixel_rate == 0))
return 0;
return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
- skl_pipe_pixel_rate(cstate));
+ pixel_rate);
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
struct skl_wm_level *trans_wm /* out */)
{
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
-
if (!cstate->base.active)
return;
/* Until we know more, just disable transition WMs */
- for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
- int i = skl_wm_plane_id(intel_plane);
-
- trans_wm->plane_en[i] = false;
- }
+ trans_wm->plane_en = false;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
@@ -3767,77 +3796,34 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ struct intel_plane *intel_plane;
+ struct skl_plane_wm *wm;
+ int level, max_level = ilk_wm_max_level(dev_priv);
int ret;
- for (level = 0; level <= max_level; level++) {
- ret = skl_compute_wm_level(dev_priv, ddb, cstate,
- level, &pipe_wm->wm[level]);
- if (ret)
- return ret;
- }
- pipe_wm->linetime = skl_compute_linetime_wm(cstate);
-
- skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
-
- return 0;
-}
-
-static void skl_compute_wm_results(struct drm_device *dev,
- struct skl_pipe_wm *p_wm,
- struct skl_wm_values *r,
- struct intel_crtc *intel_crtc)
-{
- int level, max_level = ilk_wm_max_level(dev);
- enum pipe pipe = intel_crtc->pipe;
- uint32_t temp;
- int i;
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = 0;
-
- temp |= p_wm->wm[level].plane_res_l[i] <<
- PLANE_WM_LINES_SHIFT;
- temp |= p_wm->wm[level].plane_res_b[i];
- if (p_wm->wm[level].plane_en[i])
- temp |= PLANE_WM_EN;
+ /*
+ * We'll only calculate watermarks for planes that are actually
+ * enabled, so make sure all other planes are set as disabled.
+ */
+ memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- r->plane[pipe][i][level] = temp;
+ for_each_intel_plane_mask(&dev_priv->drm,
+ intel_plane,
+ cstate->base.plane_mask) {
+ wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)];
+
+ for (level = 0; level <= max_level; level++) {
+ ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+ intel_plane, level,
+ &wm->wm[level]);
+ if (ret)
+ return ret;
}
-
- temp = 0;
-
- temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
-
- if (p_wm->wm[level].plane_en[PLANE_CURSOR])
- temp |= PLANE_WM_EN;
-
- r->plane[pipe][PLANE_CURSOR][level] = temp;
-
+ skl_compute_transition_wm(cstate, &wm->trans_wm);
}
+ pipe_wm->linetime = skl_compute_linetime_wm(cstate);
- /* transition WMs */
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = 0;
- temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->trans_wm.plane_res_b[i];
- if (p_wm->trans_wm.plane_en[i])
- temp |= PLANE_WM_EN;
-
- r->plane_trans[pipe][i] = temp;
- }
-
- temp = 0;
- temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
- if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
- temp |= PLANE_WM_EN;
-
- r->plane_trans[pipe][PLANE_CURSOR] = temp;
-
- r->wm_linetime[pipe] = p_wm->linetime;
+ return 0;
}
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
@@ -3850,53 +3836,77 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
I915_WRITE(reg, 0);
}
-void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm,
- int plane)
+static void skl_write_wm_level(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const struct skl_wm_level *level)
+{
+ uint32_t val = 0;
+
+ if (level->plane_en) {
+ val |= PLANE_WM_EN;
+ val |= level->plane_res_b;
+ val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
+ }
+
+ I915_WRITE(reg, val);
+}
+
+static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb,
+ int plane)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- I915_WRITE(PLANE_WM(pipe, plane, level),
- wm->plane[pipe][plane][level]);
+ skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level),
+ &wm->wm[level]);
}
- I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
+ skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane),
+ &wm->trans_wm);
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
- &wm->ddb.plane[pipe][plane]);
+ &ddb->plane[pipe][plane]);
skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
- &wm->ddb.y_plane[pipe][plane]);
+ &ddb->y_plane[pipe][plane]);
}
-void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm)
+static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- I915_WRITE(CUR_WM(pipe, level),
- wm->plane[pipe][PLANE_CURSOR][level]);
+ skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
+ &wm->wm[level]);
}
- I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+ skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &wm->ddb.plane[pipe][PLANE_CURSOR]);
+ &ddb->plane[pipe][PLANE_CURSOR]);
}
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
{
- return new->pipe[pipe].start == old->pipe[pipe].start &&
- new->pipe[pipe].end == old->pipe[pipe].end;
+ if (l1->plane_en != l2->plane_en)
+ return false;
+
+ /* If both planes aren't enabled, the rest shouldn't matter */
+ if (!l1->plane_en)
+ return true;
+
+ return (l1->plane_res_l == l2->plane_res_l &&
+ l1->plane_res_b == l2->plane_res_b);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -3905,35 +3915,26 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore)
{
- struct drm_device *dev = state->dev;
- struct intel_crtc *intel_crtc;
- enum pipe otherp;
-
- for_each_intel_crtc(dev, intel_crtc) {
- otherp = intel_crtc->pipe;
-
- if (otherp == pipe)
- continue;
+ int i;
- if (skl_ddb_entries_overlap(&new->pipe[pipe],
- &old->pipe[otherp]))
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (i != ignore && entries[i] &&
+ skl_ddb_entries_overlap(ddb, entries[i]))
return true;
- }
return false;
}
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
- struct skl_ddb_allocation *ddb, /* out */
+ const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
+ struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
@@ -3941,7 +3942,7 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
if (ret)
return ret;
- if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
+ if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
*changed = false;
else
*changed = true;
@@ -3962,7 +3963,7 @@ pipes_modified(struct drm_atomic_state *state)
return ret;
}
-int
+static int
skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
@@ -3980,7 +3981,7 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
- drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
+ drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
id = skl_wm_plane_id(to_intel_plane(plane));
if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
@@ -4050,6 +4051,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
intel_state->wm_results.dirty_pipes = ~0;
}
+ /*
+ * We're not recomputing for the pipes not included in the commit, so
+ * make sure we start with the current state.
+ */
+ memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+
for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
struct intel_crtc_state *cstate;
@@ -4074,19 +4081,50 @@ skl_copy_wm_for_pipe(struct skl_wm_values *dst,
struct skl_wm_values *src,
enum pipe pipe)
{
- dst->wm_linetime[pipe] = src->wm_linetime[pipe];
- memcpy(dst->plane[pipe], src->plane[pipe],
- sizeof(dst->plane[pipe]));
- memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
- sizeof(dst->plane_trans[pipe]));
-
- dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
sizeof(dst->ddb.y_plane[pipe]));
memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
sizeof(dst->ddb.plane[pipe]));
}
+static void
+skl_print_wm_changes(const struct drm_atomic_state *state)
+{
+ const struct drm_device *dev = state->dev;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
+ const struct drm_crtc *crtc;
+ const struct drm_crtc_state *cstate;
+ const struct intel_plane *intel_plane;
+ const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
+ const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ int id;
+ int i;
+
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ const struct skl_ddb_entry *old, *new;
+
+ id = skl_wm_plane_id(intel_plane);
+ old = &old_ddb->plane[pipe][id];
+ new = &new_ddb->plane[pipe][id];
+
+ if (skl_ddb_entry_equal(old, new))
+ continue;
+
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ intel_plane->base.base.id,
+ intel_plane->base.name,
+ old->start, old->end,
+ new->start, new->end);
+ }
+ }
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -4129,13 +4167,14 @@ skl_compute_wm(struct drm_atomic_state *state)
* no suitable watermark values can be found.
*/
for_each_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_cstate =
to_intel_crtc_state(cstate);
+ const struct skl_pipe_wm *old_pipe_wm =
+ &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
- &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
+ &results->ddb, &changed);
if (ret)
return ret;
@@ -4147,44 +4186,51 @@ skl_compute_wm(struct drm_atomic_state *state)
continue;
intel_cstate->update_wm_pre = true;
- skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
}
+ skl_print_wm_changes(state);
+
return 0;
}
-static void skl_update_wm(struct drm_crtc *crtc)
+static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *results = &dev_priv->wm.skl_results;
- struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- enum pipe pipe = intel_crtc->pipe;
+ const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+ enum pipe pipe = crtc->pipe;
+ int plane;
- if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
+ if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
- intel_crtc->wm.active.skl = *pipe_wm;
+ I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ for_each_universal_plane(dev_priv, pipe, plane)
+ skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane);
- /*
- * If this pipe isn't active already, we're going to be enabling it
- * very soon. Since it's safe to update a pipe's ddb allocation while
- * the pipe's shut off, just do so here. Already active pipes will have
- * their watermarks updated once we update their planes.
- */
- if (crtc->state->active_changed) {
- int plane;
+ skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb);
+}
+
+static void skl_initial_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct skl_wm_values *results = &state->wm_results;
+ struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
+ enum pipe pipe = intel_crtc->pipe;
- for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
- skl_write_plane_wm(intel_crtc, results, plane);
+ if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
+ return;
- skl_write_cursor_wm(intel_crtc, results);
- }
+ mutex_lock(&dev_priv->wm.wm_mutex);
+
+ if (cstate->base.active_changed)
+ skl_atomic_update_crtc_wm(state, cstate);
skl_copy_wm_for_pipe(hw_vals, results, pipe);
@@ -4224,7 +4270,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (INTEL_INFO(dev)->gen >= 7 &&
+ if (INTEL_GEN(dev_priv) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
@@ -4242,7 +4288,8 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
+static void ilk_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4253,7 +4300,8 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
+static void ilk_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4266,114 +4314,75 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void skl_pipe_wm_active_state(uint32_t val,
- struct skl_pipe_wm *active,
- bool is_transwm,
- bool is_cursor,
- int i,
- int level)
+static inline void skl_wm_level_from_reg_val(uint32_t val,
+ struct skl_wm_level *level)
{
- bool is_enabled = (val & PLANE_WM_EN) != 0;
-
- if (!is_transwm) {
- if (!is_cursor) {
- active->wm[level].plane_en[i] = is_enabled;
- active->wm[level].plane_res_b[i] =
- val & PLANE_WM_BLOCKS_MASK;
- active->wm[level].plane_res_l[i] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- } else {
- active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
- active->wm[level].plane_res_b[PLANE_CURSOR] =
- val & PLANE_WM_BLOCKS_MASK;
- active->wm[level].plane_res_l[PLANE_CURSOR] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- }
- } else {
- if (!is_cursor) {
- active->trans_wm.plane_en[i] = is_enabled;
- active->trans_wm.plane_res_b[i] =
- val & PLANE_WM_BLOCKS_MASK;
- active->trans_wm.plane_res_l[i] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- } else {
- active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
- active->trans_wm.plane_res_b[PLANE_CURSOR] =
- val & PLANE_WM_BLOCKS_MASK;
- active->trans_wm.plane_res_l[PLANE_CURSOR] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- }
- }
+ level->plane_en = val & PLANE_WM_EN;
+ level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
+ level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
}
-static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+ struct skl_pipe_wm *out)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
+ struct intel_plane *intel_plane;
+ struct skl_plane_wm *wm;
enum pipe pipe = intel_crtc->pipe;
- int level, i, max_level;
- uint32_t temp;
-
- max_level = ilk_wm_max_level(dev);
-
- hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++)
- hw->plane[pipe][i][level] =
- I915_READ(PLANE_WM(pipe, i, level));
- hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
- }
-
- for (i = 0; i < intel_num_planes(intel_crtc); i++)
- hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
- hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
+ int level, id, max_level;
+ uint32_t val;
- if (!intel_crtc->active)
- return;
+ max_level = ilk_wm_max_level(dev_priv);
- hw->dirty_pipes |= drm_crtc_mask(crtc);
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ id = skl_wm_plane_id(intel_plane);
+ wm = &out->planes[id];
- active->linetime = hw->wm_linetime[pipe];
+ for (level = 0; level <= max_level; level++) {
+ if (id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM(pipe, id, level));
+ else
+ val = I915_READ(CUR_WM(pipe, level));
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = hw->plane[pipe][i][level];
- skl_pipe_wm_active_state(temp, active, false,
- false, i, level);
+ skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
- temp = hw->plane[pipe][PLANE_CURSOR][level];
- skl_pipe_wm_active_state(temp, active, false, true, i, level);
- }
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = hw->plane_trans[pipe][i];
- skl_pipe_wm_active_state(temp, active, true, false, i, 0);
+ if (id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM_TRANS(pipe, id));
+ else
+ val = I915_READ(CUR_WM_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
- temp = hw->plane_trans[pipe][PLANE_CURSOR];
- skl_pipe_wm_active_state(temp, active, true, true, i, 0);
+ if (!intel_crtc->active)
+ return;
- intel_crtc->wm.active.skl = *active;
+ out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
void skl_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *cstate;
skl_ddb_get_hw_state(dev_priv, ddb);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- skl_pipe_wm_get_hw_state(crtc);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ cstate = to_intel_crtc_state(crtc->state);
+
+ skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
+
+ if (intel_crtc->active)
+ hw->dirty_pipes |= drm_crtc_mask(crtc);
+ }
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
@@ -4400,7 +4409,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
memset(active, 0, sizeof(*active));
@@ -4422,7 +4431,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
active->linetime = hw->wm_linetime[pipe];
} else {
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
/*
* For inactive pipes, all watermark levels
@@ -4534,11 +4543,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
plane->wm.fifo_size = 63;
break;
case DRM_PLANE_TYPE_PRIMARY:
- plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+ plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0);
break;
case DRM_PLANE_TYPE_OVERLAY:
sprite = plane->plane;
- plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+ plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1);
break;
}
}
@@ -4603,15 +4612,15 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev))
+ else if (IS_IVYBRIDGE(dev_priv))
hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
@@ -4651,9 +4660,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_crtc *crtc)
+void intel_update_watermarks(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(crtc);
@@ -5355,6 +5364,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
@@ -5376,7 +5386,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC(dev_priv))
@@ -5392,9 +5402,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
- /* WaRsUseTimeoutMode */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ /* WaRsUseTimeoutMode:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
@@ -5422,6 +5431,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
@@ -5438,7 +5448,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev_priv))
@@ -5498,6 +5508,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 rc6vids, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@@ -5531,7 +5542,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -5568,10 +5579,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
- if (ret)
- DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
-
reset_rps(dev_priv, gen6_set_rps);
rc6vids = 0;
@@ -5861,7 +5868,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
if (WARN_ON(!dev_priv->vlv_pctx))
return;
- i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
+ i915_gem_object_put(dev_priv->vlv_pctx);
dev_priv->vlv_pctx = NULL;
}
@@ -5980,6 +5987,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6006,7 +6014,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -6068,6 +6076,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6107,7 +6116,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@@ -6790,7 +6799,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
if (READ_ONCE(dev_priv->rps.enabled))
goto out;
- rcs = &dev_priv->engine[RCS];
+ rcs = dev_priv->engine[RCS];
if (rcs->last_context)
goto out;
@@ -6843,10 +6852,8 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
}
}
-static void ibx_init_clock_gating(struct drm_device *dev)
+static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
@@ -6855,9 +6862,8 @@ static void ibx_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
-static void g4x_disable_trickle_feed(struct drm_device *dev)
+static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -6870,10 +6876,8 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
}
}
-static void ilk_init_lp_watermarks(struct drm_device *dev)
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6884,9 +6888,8 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
*/
}
-static void ironlake_init_clock_gating(struct drm_device *dev)
+static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
@@ -6918,7 +6921,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/*
* Based on the document from hardware guys the following bits
@@ -6927,7 +6930,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
* The bit 22 of 0x42004
* The bit 7,8,9 of 0x42020.
*/
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ilk */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -6953,14 +6956,13 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
/* WaDisable_RenderCache_OperationalFlush:ilk */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
- ibx_init_clock_gating(dev);
+ ibx_init_clock_gating(dev_priv);
}
-static void cpt_init_clock_gating(struct drm_device *dev)
+static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
uint32_t val;
@@ -6995,9 +6997,8 @@ static void cpt_init_clock_gating(struct drm_device *dev)
}
}
-static void gen6_check_mch_setup(struct drm_device *dev)
+static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
tmp = I915_READ(MCH_SSKPD);
@@ -7006,9 +7007,8 @@ static void gen6_check_mch_setup(struct drm_device *dev)
tmp);
}
-static void gen6_init_clock_gating(struct drm_device *dev)
+static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -7035,7 +7035,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -7096,11 +7096,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
- cpt_init_clock_gating(dev);
+ cpt_init_clock_gating(dev_priv);
- gen6_check_mch_setup(dev);
+ gen6_check_mch_setup(dev_priv);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -7121,15 +7121,13 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
-static void lpt_init_clock_gating(struct drm_device *dev)
+static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
- if (HAS_PCH_LPT_LP(dev))
+ if (HAS_PCH_LPT_LP(dev_priv))
I915_WRITE(SOUTH_DSPCLK_GATE_D,
I915_READ(SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -7140,11 +7138,9 @@ static void lpt_init_clock_gating(struct drm_device *dev)
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
-static void lpt_suspend_hw(struct drm_device *dev)
+static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -7175,11 +7171,9 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
-static void kabylake_init_clock_gating(struct drm_device *dev)
+static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
@@ -7196,11 +7190,9 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void skylake_init_clock_gating(struct drm_device *dev)
+static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:skl */
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
@@ -7211,12 +7203,11 @@ static void skylake_init_clock_gating(struct drm_device *dev)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void broadwell_init_clock_gating(struct drm_device *dev)
+static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -7259,14 +7250,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
- lpt_init_clock_gating(dev);
+ lpt_init_clock_gating(dev_priv);
}
-static void haswell_init_clock_gating(struct drm_device *dev)
+static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
@@ -7315,15 +7304,14 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- lpt_init_clock_gating(dev);
+ lpt_init_clock_gating(dev_priv);
}
-static void ivybridge_init_clock_gating(struct drm_device *dev)
+static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t snpcr;
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@@ -7337,7 +7325,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(dev))
+ if (IS_IVB_GT1(dev_priv))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
@@ -7353,7 +7341,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
- if (IS_IVB_GT1(dev))
+ if (IS_IVB_GT1(dev_priv))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
@@ -7380,7 +7368,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
gen7_setup_fixed_func_scheduler(dev_priv);
@@ -7410,16 +7398,14 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- if (!HAS_PCH_NOP(dev))
- cpt_init_clock_gating(dev);
+ if (!HAS_PCH_NOP(dev_priv))
+ cpt_init_clock_gating(dev_priv);
- gen6_check_mch_setup(dev);
+ gen6_check_mch_setup(dev_priv);
}
-static void valleyview_init_clock_gating(struct drm_device *dev)
+static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@@ -7498,10 +7484,8 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
-static void cherryview_init_clock_gating(struct drm_device *dev)
+static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -7534,9 +7518,8 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
}
-static void g4x_init_clock_gating(struct drm_device *dev)
+static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7547,7 +7530,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
@@ -7558,13 +7541,11 @@ static void g4x_init_clock_gating(struct drm_device *dev)
/* WaDisable_RenderCache_OperationalFlush:g4x */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
}
-static void crestline_init_clock_gating(struct drm_device *dev)
+static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
@@ -7577,10 +7558,8 @@ static void crestline_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void broadwater_init_clock_gating(struct drm_device *dev)
+static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
@@ -7594,16 +7573,15 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void gen3_init_clock_gating(struct drm_device *dev)
+static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
@@ -7619,10 +7597,8 @@ static void gen3_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
-static void i85x_init_clock_gating(struct drm_device *dev)
+static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
@@ -7633,10 +7609,8 @@ static void i85x_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
}
-static void i830_init_clock_gating(struct drm_device *dev)
+static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(MEM_MODE,
@@ -7644,20 +7618,18 @@ static void i830_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
-void intel_init_clock_gating(struct drm_device *dev)
+void intel_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->display.init_clock_gating(dev);
+ dev_priv->display.init_clock_gating(dev_priv);
}
-void intel_suspend_hw(struct drm_device *dev)
+void intel_suspend_hw(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_LPT(dev))
- lpt_suspend_hw(dev);
+ if (HAS_PCH_LPT(dev_priv))
+ lpt_suspend_hw(dev_priv);
}
-static void nop_init_clock_gating(struct drm_device *dev)
+static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
}
@@ -7712,29 +7684,28 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
}
/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_device *dev)
+void intel_init_pm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
intel_fbc_init(dev_priv);
/* For cxsr */
- if (IS_PINEVIEW(dev))
- i915_pineview_get_mem_freq(dev);
- else if (IS_GEN5(dev))
- i915_ironlake_get_mem_freq(dev);
+ if (IS_PINEVIEW(dev_priv))
+ i915_pineview_get_mem_freq(dev_priv);
+ else if (IS_GEN5(dev_priv))
+ i915_ironlake_get_mem_freq(dev_priv);
/* For FIFO watermark updates */
- if (INTEL_INFO(dev)->gen >= 9) {
- skl_setup_wm_latency(dev);
- dev_priv->display.update_wm = skl_update_wm;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_setup_wm_latency(dev_priv);
+ dev_priv->display.initial_watermarks = skl_initial_wm;
+ dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
- } else if (HAS_PCH_SPLIT(dev)) {
- ilk_setup_wm_latency(dev);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ ilk_setup_wm_latency(dev_priv);
- if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+ if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+ (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.compute_intermediate_wm =
@@ -7747,14 +7718,14 @@ void intel_init_pm(struct drm_device *dev)
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
}
- } else if (IS_CHERRYVIEW(dev)) {
- vlv_setup_wm_latency(dev);
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_VALLEYVIEW(dev)) {
- vlv_setup_wm_latency(dev);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_PINEVIEW(dev)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ } else if (IS_PINEVIEW(dev_priv)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
@@ -7768,15 +7739,15 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- } else if (IS_G4X(dev)) {
+ } else if (IS_G4X(dev_priv)) {
dev_priv->display.update_wm = g4x_update_wm;
- } else if (IS_GEN4(dev)) {
+ } else if (IS_GEN4(dev_priv)) {
dev_priv->display.update_wm = i965_update_wm;
- } else if (IS_GEN3(dev)) {
+ } else if (IS_GEN3(dev_priv)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else if (IS_GEN2(dev)) {
- if (INTEL_INFO(dev)->num_pipes == 1) {
+ } else if (IS_GEN2(dev_priv)) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
} else {
@@ -8024,5 +7995,4 @@ void intel_pm_setup(struct drm_device *dev)
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
- atomic_set(&dev_priv->pm.atomic_seq, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 108ba1e5d658..7b488e2793d9 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -268,7 +268,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->psr.link_standby)
@@ -344,7 +344,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev) && dig_port->port != PORT_A) {
+ if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return false;
}
@@ -354,20 +354,20 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
!dev_priv->psr.link_standby) {
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
return false;
}
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
@@ -402,7 +402,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
/* On HSW+ after we enable PSR on source it will activate it
* as soon as it match configure idle_frame count. So
* we just actually enable it here on activation time.
@@ -427,7 +427,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- if (!HAS_PSR(dev)) {
+ if (!HAS_PSR(dev_priv)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
@@ -448,7 +448,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
dev_priv->psr.busy_frontbuffer_bits = 0;
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
hsw_psr_setup_vsc(intel_dp);
if (dev_priv->psr.psr2_support) {
@@ -472,7 +472,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_psr_activate(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
@@ -498,7 +498,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
* - On HSW/BDW we get a recoverable frozen screen until next
* exit-activate sequence.
*/
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
@@ -580,7 +580,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
}
/* Disable PSR on Source */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
hsw_psr_disable(intel_dp);
else
vlv_psr_disable(intel_dp);
@@ -827,17 +827,17 @@ void intel_psr_init(struct drm_device *dev)
/* Per platform default */
if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
i915.enable_psr = 1;
else
i915.enable_psr = 0;
}
/* Set link_standby x link_off defaults */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
/* On VLV and CHV only standby mode is supported. */
dev_priv->psr.link_standby = true;
else
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ed9955dce156..aeb637dc1fdf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -405,22 +405,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- u64 acthd;
-
- if (INTEL_GEN(dev_priv) >= 8)
- acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
- RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_GEN(dev_priv) >= 4)
- acthd = I915_READ(RING_ACTHD(engine->mmio_base));
- else
- acthd = I915_READ(ACTHD);
-
- return acthd;
-}
-
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -585,9 +569,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
I915_WRITE_TAIL(engine, ring->tail);
(void)I915_READ_TAIL(engine);
- I915_WRITE_CTL(engine,
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
- | RING_VALID);
+ I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
@@ -666,7 +648,7 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
if (ret != 0)
return ret;
- ret = i915_gem_render_state_init(req);
+ ret = i915_gem_render_state_emit(req);
if (ret)
return ret;
@@ -851,15 +833,13 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
- /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
@@ -869,10 +849,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/
}
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
@@ -884,9 +862,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
- /* WaDisableMaskBasedCammingInRCC:skl,bxt */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableMaskBasedCammingInRCC:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
@@ -1003,47 +980,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
- if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
- }
-
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
- /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
- }
-
- /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
- * involving this register should also be added to WA batch as required.
- */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
- /* WaDisableLSQCROPERFforOCL:skl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_RO_PERF_DIS);
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
/* WaEnableGapsTsvCreditFix:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
- }
-
- /* WaDisablePowerCompilerClockGating:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
- WA_SET_BIT_MASKED(HIZ_CHICKEN,
- BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
-
- /* WaBarrierPerformanceFixDisable:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE |
- HDC_BARRIER_PERFORMANCE_DISABLE);
-
- /* WaDisableSbeCacheDispatchPortSharing:skl */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableGafsUnitClkGating:skl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
@@ -1271,91 +1213,64 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
i915_vma_unpin_and_release(&dev_priv->semaphore);
}
-static int gen8_rcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
- int ret, num_rings;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, (num_rings-1) * 8);
- if (ret)
- return ret;
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring,
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- intel_ring_emit(ring, lower_32_bits(gtt_offset));
- intel_ring_emit(ring, upper_32_bits(gtt_offset));
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring,
- MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(ring, 0);
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ *out++ = lower_32_bits(gtt_offset);
+ *out++ = upper_32_bits(gtt_offset);
+ *out++ = req->global_seqno;
+ *out++ = 0;
+ *out++ = (MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ *out++ = 0;
}
- intel_ring_advance(ring);
- return 0;
+ return out;
}
-static int gen8_xcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
- int ret, num_rings;
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, (num_rings-1) * 6);
- if (ret)
- return ret;
-
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(ring,
- (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
- intel_ring_emit(ring,
- lower_32_bits(gtt_offset) |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, upper_32_bits(gtt_offset));
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring,
- MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(ring, 0);
+ *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+ *out++ = upper_32_bits(gtt_offset);
+ *out++ = req->global_seqno;
+ *out++ = (MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ *out++ = 0;
}
- intel_ring_advance(ring);
- return 0;
+ return out;
}
-static int gen6_signal(struct drm_i915_gem_request *req)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
- int ret, num_rings;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
- if (ret)
- return ret;
+ enum intel_engine_id id;
+ int num_rings = 0;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
i915_reg_t mbox_reg;
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
@@ -1363,101 +1278,80 @@ static int gen6_signal(struct drm_i915_gem_request *req)
mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, mbox_reg);
- intel_ring_emit(ring, req->fence.seqno);
+ *out++ = MI_LOAD_REGISTER_IMM(1);
+ *out++ = i915_mmio_reg_offset(mbox_reg);
+ *out++ = req->global_seqno;
+ num_rings++;
}
}
+ if (num_rings & 1)
+ *out++ = MI_NOOP;
- /* If num_dwords was rounded, make sure the tail pointer is correct */
- if (num_rings % 2 == 0)
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- return 0;
+ return out;
}
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
- I915_WRITE_TAIL(request->engine,
- intel_ring_offset(request->ring, request->tail));
+ i915_gem_request_submit(request);
+
+ I915_WRITE_TAIL(request->engine, request->tail);
}
-static int i9xx_emit_request(struct drm_i915_gem_request *req)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
- struct intel_ring *ring = req->ring;
- int ret;
-
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
+ *out++ = MI_STORE_DWORD_INDEX;
+ *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+ *out++ = req->global_seqno;
+ *out++ = MI_USER_INTERRUPT;
- req->tail = ring->tail;
-
- return 0;
+ req->tail = intel_ring_offset(req->ring, out);
}
+static const int i9xx_emit_breadcrumb_sz = 4;
+
/**
- * gen6_sema_emit_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
*
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
-static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
- int ret;
-
- ret = req->engine->semaphore.signal(req);
- if (ret)
- return ret;
-
- return i9xx_emit_request(req);
+ return i9xx_emit_breadcrumb(req,
+ req->engine->semaphore.signal(req, out));
}
-static int gen8_render_emit_request(struct drm_i915_gem_request *req)
+static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
struct intel_engine_cs *engine = req->engine;
- struct intel_ring *ring = req->ring;
- int ret;
- if (engine->semaphore.signal) {
- ret = engine->semaphore.signal(req);
- if (ret)
- return ret;
- }
+ if (engine->semaphore.signal)
+ out = engine->semaphore.signal(req, out);
- ret = intel_ring_begin(req, 8);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_ring_emit(ring, intel_hws_seqno_address(engine));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+ PIPE_CONTROL_QW_WRITE);
+ *out++ = intel_hws_seqno_address(engine);
+ *out++ = 0;
+ *out++ = req->global_seqno;
/* We're thrashing one dword of HWS. */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- req->tail = ring->tail;
+ *out++ = 0;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
- return 0;
+ req->tail = intel_ring_offset(req->ring, out);
}
+static const int gen8_render_emit_breadcrumb_sz = 8;
+
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
@@ -1484,7 +1378,7 @@ gen8_ring_sync_to(struct drm_i915_gem_request *req,
MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_SAD_GTE_SDD);
- intel_ring_emit(ring, signal->fence.seqno);
+ intel_ring_emit(ring, signal->global_seqno);
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_advance(ring);
@@ -1522,7 +1416,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
- intel_ring_emit(ring, signal->fence.seqno - 1);
+ intel_ring_emit(ring, signal->global_seqno - 1);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1665,7 +1559,7 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@@ -1674,7 +1568,7 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~0);
- gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@@ -1819,14 +1713,19 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
vma = fetch_and_zero(&engine->status_page.vma);
if (!vma)
return;
+ obj = vma->obj;
+
i915_vma_unpin(vma);
- i915_gem_object_unpin_map(vma->obj);
- i915_vma_put(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_unless_active(obj);
}
static int init_status_page(struct intel_engine_cs *engine)
@@ -1834,9 +1733,10 @@ static int init_status_page(struct intel_engine_cs *engine)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags;
+ void *vaddr;
int ret;
- obj = i915_gem_object_create(&engine->i915->drm, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
@@ -1869,15 +1769,22 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_unpin;
+ }
+
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr =
- i915_gem_object_pin_map(obj, I915_MAP_WB);
+ engine->status_page.page_addr = memset(vaddr, 0, 4096);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
return 0;
+err_unpin:
+ i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ret;
@@ -1989,6 +1896,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
struct i915_vma *vma;
GEM_BUG_ON(!is_power_of_2(size));
+ GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
@@ -2023,7 +1931,11 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
void
intel_ring_free(struct intel_ring *ring)
{
- i915_vma_put(ring->vma);
+ struct drm_i915_gem_object *obj = ring->vma->obj;
+
+ i915_vma_close(ring->vma);
+ __i915_gem_object_release_unless_active(obj);
+
kfree(ring);
}
@@ -2039,14 +1951,13 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
return 0;
if (ce->state) {
- ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
- if (ret)
- goto error;
+ struct i915_vma *vma;
- ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
- PIN_GLOBAL | PIN_HIGH);
- if (ret)
+ vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto error;
+ }
}
/* The kernel context is only used as a placeholder for flushing the
@@ -2093,9 +2004,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
intel_engine_setup_common(engine);
- memset(engine->semaphore.sync_seqno, 0,
- sizeof(engine->semaphore.sync_seqno));
-
ret = intel_engine_init_common(engine);
if (ret)
goto error;
@@ -2146,9 +2054,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
dev_priv = engine->i915;
if (engine->buffer) {
@@ -2175,13 +2080,16 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
intel_ring_context_unpin(dev_priv->kernel_context, engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->buffer->head = engine->buffer->tail;
engine->buffer->last_retired_head = -1;
}
@@ -2211,7 +2119,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
- int ret;
+ long timeout;
+
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
intel_ring_update_space(ring);
if (ring->space >= bytes)
@@ -2241,11 +2151,11 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- ret = i915_wait_request(target,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NO_WAITBOOST);
- if (ret)
- return ret;
+ timeout = i915_wait_request(target,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ return timeout;
i915_gem_request_retire_upto(target);
@@ -2674,9 +2584,22 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->init_hw = init_ring_common;
engine->reset_hw = reset_ring_common;
- engine->emit_request = i9xx_emit_request;
- if (i915.semaphores)
- engine->emit_request = gen6_sema_emit_request;
+ engine->emit_breadcrumb = i9xx_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
+ if (i915.semaphores) {
+ int num_rings;
+
+ engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
+
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ if (INTEL_GEN(dev_priv) >= 8) {
+ engine->emit_breadcrumb_sz += num_rings * 6;
+ } else {
+ engine->emit_breadcrumb_sz += num_rings * 3;
+ if (num_rings & 1)
+ engine->emit_breadcrumb_sz++;
+ }
+ }
engine->submit_request = i9xx_submit_request;
if (INTEL_GEN(dev_priv) >= 8)
@@ -2703,10 +2626,18 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
- engine->emit_request = gen8_render_emit_request;
+ engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
engine->emit_flush = gen8_render_ring_flush;
- if (i915.semaphores)
+ if (i915.semaphores) {
+ int num_rings;
+
engine->semaphore.signal = gen8_rcs_signal;
+
+ num_rings =
+ hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ engine->emit_breadcrumb_sz += num_rings * 6;
+ }
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ec0b4a0c605d..3466b4e77e7c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -4,6 +4,7 @@
#include <linux/hashtable.h>
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
#define I915_CMD_HASH_ORDER 9
@@ -73,13 +74,40 @@ enum intel_engine_hangcheck_action {
#define HANGCHECK_SCORE_RING_HUNG 31
+#define I915_MAX_SLICES 3
+#define I915_MAX_SUBSLICES 3
+
+#define instdone_slice_mask(dev_priv__) \
+ (INTEL_GEN(dev_priv__) == 7 ? \
+ 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+ (INTEL_GEN(dev_priv__) == 7 ? \
+ 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+ for ((slice__) = 0, (subslice__) = 0; \
+ (slice__) < I915_MAX_SLICES; \
+ (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+ (slice__) += ((subslice__) == 0)) \
+ for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+ (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+
+struct intel_instdone {
+ u32 instdone;
+ /* The following exist only in the RCS engine */
+ u32 slice_common;
+ u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+ u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+};
+
struct intel_engine_hangcheck {
u64 acthd;
u32 seqno;
int score;
enum intel_engine_hangcheck_action action;
int deadlock;
- u32 instdone[I915_NUM_INSTDONE_REG];
+ struct intel_instdone instdone;
};
struct intel_ring {
@@ -130,6 +158,7 @@ struct i915_ctx_workarounds {
};
struct drm_i915_gem_request;
+struct intel_render_state;
struct intel_engine_cs {
struct drm_i915_private *i915;
@@ -141,7 +170,6 @@ struct intel_engine_cs {
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
-#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
enum intel_engine_hw_id {
@@ -152,10 +180,12 @@ struct intel_engine_cs {
VCS2_HW
} hw_id;
enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
- u64 fence_context;
u32 mmio_base;
unsigned int irq_shift;
struct intel_ring *buffer;
+ struct intel_timeline *timeline;
+
+ struct intel_render_state *render_state;
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
@@ -177,7 +207,7 @@ struct intel_engine_cs {
struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
bool irq_posted;
- spinlock_t lock; /* protects the lists of requests */
+ spinlock_t lock; /* protects the lists of requests; irqsafe */
struct rb_root waiters; /* sorted by retirement, priority */
struct rb_root signals; /* sorted by retirement */
struct intel_wait *first_wait; /* oldest waiter by retirement */
@@ -225,7 +255,9 @@ struct intel_engine_cs {
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
- int (*emit_request)(struct drm_i915_gem_request *req);
+ void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
+ u32 *out);
+ int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
* the legacy ringbuffer or to the end of an execlist).
@@ -235,6 +267,15 @@ struct intel_engine_cs {
*/
void (*submit_request)(struct drm_i915_gem_request *req);
+ /* Call when the priority on a request has changed and it and its
+ * dependencies may need rescheduling. Note the request itself may
+ * not be ready to run!
+ *
+ * Called under the struct_mutex.
+ */
+ void (*schedule)(struct drm_i915_gem_request *request,
+ int priority);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -282,8 +323,6 @@ struct intel_engine_cs {
* ie. transpose of f(x, y)
*/
struct {
- u32 sync_seqno[I915_NUM_ENGINES-1];
-
union {
#define GEN6_SEMAPHORE_LAST VECS_HW
#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
@@ -300,43 +339,22 @@ struct intel_engine_cs {
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal);
- int (*signal)(struct drm_i915_gem_request *req);
+ u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
} semaphore;
/* Execlists */
struct tasklet_struct irq_tasklet;
- spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct execlist_port {
struct drm_i915_gem_request *request;
unsigned int count;
} execlist_port[2];
- struct list_head execlist_queue;
+ struct rb_root execlist_queue;
+ struct rb_node *execlist_first;
unsigned int fw_domains;
bool disable_lite_restore_wa;
bool preempt_wa;
u32 ctx_desc_template;
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
- * Seqno of request most recently submitted to request_list.
- * Used exclusively by hang checker to avoid grabbing lock while
- * inspecting request list.
- */
- u32 last_submitted_seqno;
- u32 last_pending_seqno;
-
- /* An RCU guarded pointer to the last request. No reference is
- * held to the request, users must carefully acquire a reference to
- * the request using i915_gem_active_get_rcu(), or hold the
- * struct_mutex.
- */
- struct i915_gem_active last_request;
-
struct i915_gem_context *last_context;
struct intel_engine_hangcheck hangcheck;
@@ -368,39 +386,12 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline bool
-intel_engine_initialized(const struct intel_engine_cs *engine)
-{
- return engine->i915 != NULL;
-}
-
static inline unsigned
intel_engine_flag(const struct intel_engine_cs *engine)
{
return 1 << engine->id;
}
-static inline u32
-intel_engine_sync_index(struct intel_engine_cs *engine,
- struct intel_engine_cs *other)
-{
- int idx;
-
- /*
- * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
- * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
- * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
- * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
- * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
- */
-
- idx = (other - engine) - 1;
- if (idx < 0)
- idx += I915_NUM_ENGINES;
-
- return idx;
-}
-
static inline void
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
{
@@ -483,30 +474,23 @@ static inline void intel_ring_advance(struct intel_ring *ring)
*/
}
-static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
+static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- return value & (ring->size - 1);
+ u32 offset = addr - ring->vaddr;
+ return offset & (ring->size - 1);
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ring *ring);
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-static inline int intel_engine_idle(struct intel_engine_cs *engine,
- unsigned int flags)
-{
- /* Wait upon the last request to be completed */
- return i915_gem_active_wait_unlocked(&engine->last_request,
- flags, NULL, NULL);
-}
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
@@ -514,13 +498,30 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
+static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
+{
+ /* We are only peeking at the tail of the submit queue (and not the
+ * queue itself) in order to gain a hint as to the current active
+ * state of the engine. Callers are not expected to be taking
+ * engine->timeline->lock, nor are they expected to be concerned
+ * wtih serialising this hint with anything, so document it as
+ * a hint and nothing more.
+ */
+ return READ_ONCE(engine->timeline->last_submitted_seqno);
+}
+
int init_workarounds_ring(struct intel_engine_cs *engine);
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+ struct intel_instdone *instdone);
+
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case
@@ -586,12 +587,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-unsigned int intel_kick_waiters(struct drm_i915_private *i915);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915);
-
-static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
-{
- return i915_gem_active_isset(&engine->last_request);
-}
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6c11168facd6..356c662ad453 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -288,7 +288,6 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -304,7 +303,7 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
gen8_irq_power_well_post_enable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@@ -331,7 +330,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
- if (power_well->data == SKL_DISP_PW_2) {
+ if (power_well->id == SKL_DISP_PW_2) {
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
@@ -344,7 +343,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (power_well->data == SKL_DISP_PW_2)
+ if (power_well->id == SKL_DISP_PW_2)
gen8_irq_power_well_pre_disable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@@ -659,7 +658,7 @@ static void
gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum skl_disp_power_wells power_well_id = power_well->data;
+ enum skl_disp_power_wells power_well_id = power_well->id;
u32 val;
u32 mask;
@@ -704,7 +703,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
fuse_status = I915_READ(SKL_FUSE_STATUS);
- switch (power_well->data) {
+ switch (power_well->id) {
case SKL_DISP_PW_1:
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
@@ -728,13 +727,13 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
case SKL_DISP_PW_MISC_IO:
break;
default:
- WARN(1, "Unknown power well %lu\n", power_well->data);
+ WARN(1, "Unknown power well %lu\n", power_well->id);
return;
}
- req_mask = SKL_POWER_WELL_REQ(power_well->data);
+ req_mask = SKL_POWER_WELL_REQ(power_well->id);
enable_requested = tmp & req_mask;
- state_mask = SKL_POWER_WELL_STATE(power_well->data);
+ state_mask = SKL_POWER_WELL_STATE(power_well->id);
is_enabled = tmp & state_mask;
if (!enable && enable_requested)
@@ -770,14 +769,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
power_well->name, enable ? "enable" : "disable");
if (check_fuse_status) {
- if (power_well->data == SKL_DISP_PW_1) {
+ if (power_well->id == SKL_DISP_PW_1) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
1))
DRM_ERROR("PG1 distributing status timeout\n");
- } else if (power_well->data == SKL_DISP_PW_2) {
+ } else if (power_well->id == SKL_DISP_PW_2) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG2_DIST_STATUS,
@@ -819,8 +818,8 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
- SKL_POWER_WELL_STATE(power_well->data);
+ uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
+ SKL_POWER_WELL_STATE(power_well->id);
return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
}
@@ -846,45 +845,22 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
skl_set_power_well(dev_priv, power_well, false);
}
-static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
-{
- enum skl_disp_power_wells power_well_id = power_well->data;
-
- return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
-}
-
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum skl_disp_power_wells power_well_id = power_well->data;
- struct i915_power_well *cmn_a_well = NULL;
-
- if (power_well_id == BXT_DPIO_CMN_BC) {
- /*
- * We need to copy the GRC calibration value from the eDP PHY,
- * so make sure it's powered up.
- */
- cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
- intel_power_well_get(dev_priv, cmn_a_well);
- }
-
- bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
-
- if (cmn_a_well)
- intel_power_well_put(dev_priv, cmn_a_well);
+ bxt_ddi_phy_init(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_uninit(dev_priv, power_well->data);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return bxt_ddi_phy_is_enabled(dev_priv,
- bxt_power_well_to_phy(power_well));
+ return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -903,13 +879,11 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
}
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
@@ -933,7 +907,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
WARN_ON(dev_priv->cdclk_freq !=
- dev_priv->display.get_display_clock_speed(&dev_priv->drm));
+ dev_priv->display.get_display_clock_speed(dev_priv));
gen9_assert_dbuf_enabled(dev_priv);
@@ -976,7 +950,7 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- enum punit_power_well power_well_id = power_well->data;
+ enum punit_power_well power_well_id = power_well->id;
u32 mask;
u32 state;
u32 ctrl;
@@ -1030,7 +1004,7 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int power_well_id = power_well->data;
+ int power_well_id = power_well->id;
bool enabled = false;
u32 mask;
u32 state;
@@ -1092,7 +1066,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(&dev_priv->drm, pipe) {
+ for_each_pipe(dev_priv, pipe) {
u32 val = I915_READ(DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1123,7 +1097,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(&dev_priv->drm);
+ i915_redisable_vga_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
@@ -1139,13 +1113,15 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
intel_power_sequencer_reset(dev_priv);
- intel_hpd_poll_init(dev_priv);
+ /* Prevent us from re-enabling polling on accident in late suspend */
+ if (!dev_priv->drm.dev->power.is_suspended)
+ intel_hpd_poll_init(dev_priv);
}
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_set_power_well(dev_priv, power_well, true);
@@ -1155,7 +1131,7 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_display_power_well_deinit(dev_priv);
@@ -1165,7 +1141,7 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@@ -1191,7 +1167,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum pipe pipe;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
for_each_pipe(dev_priv, pipe)
assert_pll_disabled(dev_priv, pipe);
@@ -1214,7 +1190,7 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
struct i915_power_well *power_well;
power_well = &power_domains->power_wells[i];
- if (power_well->data == power_well_id)
+ if (power_well->id == power_well_id)
return power_well;
}
@@ -1338,10 +1314,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
uint32_t tmp;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
pipe = PIPE_A;
phy = DPIO_PHY0;
} else {
@@ -1369,7 +1345,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
@@ -1400,10 +1376,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
phy = DPIO_PHY0;
assert_pll_disabled(dev_priv, PIPE_A);
assert_pll_disabled(dev_priv, PIPE_B);
@@ -1552,7 +1528,7 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum pipe pipe = power_well->data;
+ enum pipe pipe = power_well->id;
bool enabled;
u32 state, ctrl;
@@ -1582,7 +1558,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well,
bool enable)
{
- enum pipe pipe = power_well->data;
+ enum pipe pipe = power_well->id;
u32 state;
u32 ctrl;
@@ -1615,7 +1591,7 @@ out:
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
@@ -1623,7 +1599,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, true);
@@ -1633,7 +1609,7 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
vlv_display_power_well_deinit(dev_priv);
@@ -1977,12 +1953,12 @@ static struct i915_power_well vlv_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .data = PUNIT_POWER_WELL_ALWAYS_ON,
+ .id = PUNIT_POWER_WELL_ALWAYS_ON,
},
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
+ .id = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
{
@@ -1992,7 +1968,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
},
{
.name = "dpio-tx-b-23",
@@ -2001,7 +1977,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
},
{
.name = "dpio-tx-c-01",
@@ -2010,7 +1986,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
},
{
.name = "dpio-tx-c-23",
@@ -2019,12 +1995,12 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
{
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_cmn_power_well_ops,
},
};
@@ -2044,19 +2020,19 @@ static struct i915_power_well chv_power_wells[] = {
* required for any pipe to work.
*/
.domains = CHV_DISPLAY_POWER_DOMAINS,
- .data = PIPE_A,
+ .id = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
{
.name = "dpio-common-bc",
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
},
{
.name = "dpio-common-d",
.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
},
};
@@ -2079,57 +2055,57 @@ static struct i915_power_well skl_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .data = SKL_DISP_PW_ALWAYS_ON,
+ .id = SKL_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_1,
+ .id = SKL_DISP_PW_1,
},
{
.name = "MISC IO power well",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_MISC_IO,
+ .id = SKL_DISP_PW_MISC_IO,
},
{
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .data = SKL_DISP_PW_DC_OFF,
+ .id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_2,
+ .id = SKL_DISP_PW_2,
},
{
.name = "DDI A/E power well",
.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_A_E,
+ .id = SKL_DISP_PW_DDI_A_E,
},
{
.name = "DDI B power well",
.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_B,
+ .id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C power well",
.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_C,
+ .id = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D power well",
.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_D,
+ .id = SKL_DISP_PW_DDI_D,
},
};
@@ -2144,31 +2120,33 @@ static struct i915_power_well bxt_power_wells[] = {
.name = "power well 1",
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_1,
+ .id = SKL_DISP_PW_1,
},
{
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .data = SKL_DISP_PW_DC_OFF,
+ .id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_2,
+ .id = SKL_DISP_PW_2,
},
{
.name = "dpio-common-a",
.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .data = BXT_DPIO_CMN_A,
+ .id = BXT_DPIO_CMN_A,
+ .data = DPIO_PHY1,
},
{
.name = "dpio-common-bc",
.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .data = BXT_DPIO_CMN_BC,
+ .id = BXT_DPIO_CMN_BC,
+ .data = DPIO_PHY0,
},
};
@@ -2590,20 +2568,19 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
- struct drm_device *dev = &dev_priv->drm;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
skl_display_core_init(dev_priv, resume);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
mutex_unlock(&power_domains->lock);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
@@ -2738,8 +2715,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
- if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
- atomic_inc(&dev_priv->pm.atomic_seq);
+ atomic_dec(&dev_priv->pm.wakeref_count);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
@@ -2758,7 +2734,6 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
struct device *kdev = &pdev->dev;
pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
@@ -2770,7 +2745,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
- if (!HAS_RUNTIME_PM(dev)) {
+ if (!HAS_RUNTIME_PM(dev_priv)) {
pm_runtime_dont_use_autosuspend(kdev);
pm_runtime_get_sync(kdev);
} else {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c551024d4871..27808e91cb5a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -251,7 +251,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg);
}
@@ -307,7 +307,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
static const struct _sdvo_cmd_name {
u8 cmd;
const char *name;
-} sdvo_cmd_names[] = {
+} __attribute__ ((packed)) sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
@@ -1133,7 +1133,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
- if (HAS_PCH_SPLIT(encoder->base.dev))
+ if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
/* We need to construct preferred input timings based on our
@@ -1195,8 +1195,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_display_mode *mode = &crtc_state->base.mode;
@@ -1269,13 +1268,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
return;
/* Set the SDVO control regs. */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
@@ -1286,7 +1285,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT)
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1294,9 +1293,10 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
- } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (crtc_state->pixel_multiplier - 1)
@@ -1304,7 +1304,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
- INTEL_INFO(dev)->gen < 5)
+ INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -1339,7 +1339,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
return false;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -1389,7 +1389,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* encoder->get_config we so already have a valid pixel multplier on all
* other platfroms.
*/
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1508,7 +1508,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_sdvo_write_sdvox(intel_sdvo, temp);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
@@ -1595,15 +1595,15 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
- struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
uint16_t hotplug;
- if (!I915_HAS_HOTPLUG(dev))
+ if (!I915_HAS_HOTPLUG(dev_priv))
return 0;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
- if (IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
@@ -2410,10 +2410,10 @@ static void
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *connector)
{
- struct drm_device *dev = connector->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+ if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
intel_sdvo->color_range_auto = true;
}
@@ -2981,6 +2981,7 @@ bool intel_sdvo_init(struct drm_device *dev,
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ intel_encoder->port = port;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
"SDVO %c", port_name(port));
@@ -2996,7 +2997,7 @@ bool intel_sdvo_init(struct drm_device *dev,
}
intel_encoder->compute_config = intel_sdvo_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 73a521fdf1bd..8f131a08d440 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -203,9 +203,6 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl;
@@ -227,13 +224,10 @@ skl_update_plane(struct drm_plane *drm_plane,
PLANE_CTL_PIPE_CSC_ENABLE;
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= skl_plane_ctl_rotation(rotation);
- if (wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_plane_wm(intel_crtc, wm, plane);
-
if (key->flags) {
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
@@ -292,14 +286,6 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!dplane->state->visible)
- skl_write_plane_wm(to_intel_crtc(crtc),
- &dev_priv->wm.skl_results, plane);
-
I915_WRITE(PLANE_CTL(pipe, plane), 0);
I915_WRITE(PLANE_SURF(pipe, plane), 0);
@@ -358,7 +344,7 @@ vlv_update_plane(struct drm_plane *dplane,
int plane = intel_plane->plane;
u32 sprctl;
u32 sprsurf_offset, linear_offset;
- unsigned int rotation = dplane->state->rotation;
+ unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
@@ -420,9 +406,15 @@ vlv_update_plane(struct drm_plane *dplane,
*/
sprctl |= SP_GAMMA_ENABLE;
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SP_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ sprctl |= SP_MIRROR;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -432,11 +424,11 @@ vlv_update_plane(struct drm_plane *dplane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SP_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += src_w;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -450,13 +442,13 @@ vlv_update_plane(struct drm_plane *dplane,
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
- if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
else
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
@@ -539,15 +531,18 @@ ivb_update_plane(struct drm_plane *plane,
*/
sprctl |= SPRITE_GAMMA_ENABLE;
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SPRITE_ROTATE_180;
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
/* Sizes are 0 based */
@@ -562,14 +557,11 @@ ivb_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SPRITE_ROTATE_180;
-
- /* HSW and BDW does this automagically in hardware */
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += src_w;
- y += src_h;
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += src_w;
+ y += src_h;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -590,9 +582,9 @@ ivb_update_plane(struct drm_plane *plane,
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
- else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(SPRLINOFF(pipe), linear_offset);
@@ -677,10 +669,13 @@ ilk_update_plane(struct drm_plane *plane,
*/
dvscntr |= DVS_GAMMA_ENABLE;
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
- if (IS_GEN6(dev))
+ if (rotation & DRM_ROTATE_180)
+ dvscntr |= DVS_ROTATE_180;
+
+ if (IS_GEN6(dev_priv))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
/* Sizes are 0 based */
@@ -696,9 +691,7 @@ ilk_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dvscntr |= DVS_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
}
@@ -719,7 +712,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(DVSLINOFF(pipe), linear_offset);
@@ -753,7 +746,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -769,15 +762,8 @@ intel_check_sprite_plane(struct drm_plane *plane,
bool can_scale;
int ret;
- src->x1 = state->base.src_x;
- src->y1 = state->base.src_y;
- src->x2 = state->base.src_x + state->base.src_w;
- src->y2 = state->base.src_y + state->base.src_h;
-
- dst->x1 = state->base.crtc_x;
- dst->y1 = state->base.crtc_y;
- dst->x2 = state->base.crtc_x + state->base.crtc_w;
- dst->y2 = state->base.crtc_y + state->base.crtc_h;
+ *src = drm_plane_state_src(&state->base);
+ *dst = drm_plane_state_dest(&state->base);
if (!fb) {
state->base.visible = false;
@@ -797,7 +783,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
}
/* setup can_scale, min_scale, max_scale */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
/* use scaler when colorkey is not required */
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
can_scale = 1;
@@ -913,7 +899,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
- if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
+ if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
@@ -932,7 +918,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
- if (INTEL_GEN(dev) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
ret = skl_check_plane_surface(state);
if (ret)
return ret;
@@ -944,6 +930,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
@@ -955,7 +942,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -987,9 +974,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_backoff(&ctx);
}
- if (ret)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -1039,19 +1024,18 @@ static uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-int
-intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane)
{
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
+ unsigned int supported_rotations;
int num_plane_formats;
int ret;
- if (INTEL_INFO(dev)->gen < 5)
- return -ENODEV;
-
intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
if (!intel_plane) {
ret = -ENOMEM;
@@ -1065,26 +1049,26 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
}
intel_plane->base.state = &state->base;
- switch (INTEL_INFO(dev)->gen) {
- case 5:
- case 6:
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_plane->can_scale = true;
- intel_plane->max_downscale = 16;
- intel_plane->update_plane = ilk_update_plane;
- intel_plane->disable_plane = ilk_disable_plane;
+ state->scaler_id = -1;
- if (IS_GEN6(dev)) {
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
- } else {
- plane_formats = ilk_plane_formats;
- num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
- }
- break;
+ intel_plane->update_plane = skl_update_plane;
+ intel_plane->disable_plane = skl_disable_plane;
+
+ plane_formats = skl_plane_formats;
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_plane->can_scale = false;
+ intel_plane->max_downscale = 1;
+
+ intel_plane->update_plane = vlv_update_plane;
+ intel_plane->disable_plane = vlv_disable_plane;
- case 7:
- case 8:
- if (IS_IVYBRIDGE(dev)) {
+ plane_formats = vlv_plane_formats;
+ num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
@@ -1092,33 +1076,38 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 1;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- intel_plane->update_plane = vlv_update_plane;
- intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
- plane_formats = vlv_plane_formats;
- num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
- } else {
- intel_plane->update_plane = ivb_update_plane;
- intel_plane->disable_plane = ivb_disable_plane;
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 16;
+
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
}
- break;
- case 9:
- intel_plane->can_scale = true;
- intel_plane->update_plane = skl_update_plane;
- intel_plane->disable_plane = skl_disable_plane;
- state->scaler_id = -1;
+ }
- plane_formats = skl_plane_formats;
- num_plane_formats = ARRAY_SIZE(skl_plane_formats);
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev)->gen);
- ret = -ENODEV;
- goto fail;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
+ } else {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180;
}
intel_plane->pipe = pipe;
@@ -1128,30 +1117,32 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
possible_crtcs = (1 << pipe);
- if (INTEL_INFO(dev)->gen >= 9)
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
+ if (INTEL_GEN(dev_priv) >= 9)
+ ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+ possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+ possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, plane));
if (ret)
goto fail;
- intel_create_rotation_property(dev, intel_plane);
+ drm_plane_create_rotation_property(&intel_plane->base,
+ DRM_ROTATE_0,
+ supported_rotations);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
- return 0;
+ return intel_plane;
fail:
kfree(state);
kfree(intel_plane);
- return ret;
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d960e4866595..78cdfc6833d6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -86,7 +86,8 @@ struct intel_tv {
};
struct video_levels {
- int blank, black, burst;
+ u16 blank, black;
+ u8 burst;
};
struct color_conversion {
@@ -339,34 +340,43 @@ static const struct video_levels component_levels = {
struct tv_mode {
const char *name;
- int clock;
- int refresh; /* in millihertz (for precision) */
+
+ u32 clock;
+ u16 refresh; /* in millihertz (for precision) */
u32 oversample;
- int hsync_end, hblank_start, hblank_end, htotal;
- bool progressive, trilevel_sync, component_only;
- int vsync_start_f1, vsync_start_f2, vsync_len;
- bool veq_ena;
- int veq_start_f1, veq_start_f2, veq_len;
- int vi_end_f1, vi_end_f2, nbr_end;
- bool burst_ena;
- int hburst_start, hburst_len;
- int vburst_start_f1, vburst_end_f1;
- int vburst_start_f2, vburst_end_f2;
- int vburst_start_f3, vburst_end_f3;
- int vburst_start_f4, vburst_end_f4;
+ u8 hsync_end;
+ u16 hblank_start, hblank_end, htotal;
+ bool progressive : 1, trilevel_sync : 1, component_only : 1;
+ u8 vsync_start_f1, vsync_start_f2, vsync_len;
+ bool veq_ena : 1;
+ u8 veq_start_f1, veq_start_f2, veq_len;
+ u8 vi_end_f1, vi_end_f2;
+ u16 nbr_end;
+ bool burst_ena : 1;
+ u8 hburst_start, hburst_len;
+ u8 vburst_start_f1;
+ u16 vburst_end_f1;
+ u8 vburst_start_f2;
+ u16 vburst_end_f2;
+ u8 vburst_start_f3;
+ u16 vburst_end_f3;
+ u8 vburst_start_f4;
+ u16 vburst_end_f4;
/*
* subcarrier programming
*/
- int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+ u16 dda2_size, dda3_size;
+ u8 dda1_inc;
+ u16 dda2_inc, dda3_inc;
u32 sc_reset;
- bool pal_burst;
+ bool pal_burst : 1;
/*
* blank/black levels
*/
const struct video_levels *composite_levels, *svideo_levels;
const struct color_conversion *composite_color, *svideo_color;
const u32 *filter_table;
- int max_srcw;
+ u16 max_srcw;
};
@@ -846,7 +856,7 @@ intel_enable_tv(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(dev);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
- intel_wait_for_vblank(encoder->base.dev,
+ intel_wait_for_vblank(dev_priv,
to_intel_crtc(encoder->base.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
@@ -1019,8 +1029,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1095,7 +1104,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (IS_I915GM(dev))
+ if (IS_I915GM(dev_priv))
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
@@ -1106,7 +1115,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
set_color_conversion(dev_priv, color_conversion);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1220,7 +1229,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* The TV sense state should be cleared to zero on cantiga platform. Otherwise
* the TV is misdetected. This is hardware requirement.
*/
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
@@ -1228,7 +1237,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
type = -1;
tv_dac = I915_READ(TV_DAC);
@@ -1258,7 +1267,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1610,7 +1619,9 @@ intel_tv_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
+
intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->port = PORT_NONE;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->cloneable = 0;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ee2306a79747..d7be0d94ba4d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,19 +231,21 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
+ struct drm_i915_private *dev_priv = domain->i915;
unsigned long irqflags;
- assert_rpm_device_not_suspended(domain->i915);
+ assert_rpm_device_not_suspended(dev_priv);
- spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
- if (--domain->wake_count == 0)
- domain->i915->uncore.funcs.force_wake_put(domain->i915,
- 1 << domain->id);
+ if (--domain->wake_count == 0) {
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
+ dev_priv->uncore.fw_domains_active &= ~domain->mask;
+ }
- spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return HRTIMER_NORESTART;
}
@@ -254,7 +256,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
- enum forcewake_domains fw = 0, active_domains;
+ enum forcewake_domains fw, active_domains;
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly. Wait until all pending
@@ -291,10 +293,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
WARN_ON(active_domains);
- for_each_fw_domain(domain, dev_priv)
- if (domain->wake_count)
- fw |= domain->mask;
-
+ fw = dev_priv->uncore.fw_domains_active;
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
@@ -403,6 +402,8 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake)
{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
@@ -420,6 +421,10 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
+ /* Enable Decoupled MMIO only on BXT C stepping onwards */
+ if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ info->has_decoupled_mmio = false;
+
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
@@ -443,9 +448,6 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
{
struct intel_uncore_forcewake_domain *domain;
- if (!dev_priv->uncore.funcs.force_wake_get)
- return;
-
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -453,8 +455,10 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
fw_domains &= ~domain->mask;
}
- if (fw_domains)
+ if (fw_domains) {
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ dev_priv->uncore.fw_domains_active |= fw_domains;
+ }
}
/**
@@ -509,9 +513,6 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
{
struct intel_uncore_forcewake_domain *domain;
- if (!dev_priv->uncore.funcs.force_wake_put)
- return;
-
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -567,13 +568,10 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
- struct intel_uncore_forcewake_domain *domain;
-
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- for_each_fw_domain(domain, dev_priv)
- WARN_ON(domain->wake_count);
+ WARN_ON(dev_priv->uncore.fw_domains_active);
}
/* We give fast paths for the really cool registers */
@@ -589,49 +587,148 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
__fwd; \
})
-#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
+static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
+{
+ if (offset < entry->start)
+ return -1;
+ else if (offset > entry->end)
+ return 1;
+ else
+ return 0;
+}
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5000, 0x8000) || \
- REG_RANGE((reg), 0xB000, 0x12000) || \
- REG_RANGE((reg), 0x2E000, 0x30000))
+/* Copied and "macroized" from lib/bsearch.c */
+#define BSEARCH(key, base, num, cmp) ({ \
+ unsigned int start__ = 0, end__ = (num); \
+ typeof(base) result__ = NULL; \
+ while (start__ < end__) { \
+ unsigned int mid__ = start__ + (end__ - start__) / 2; \
+ int ret__ = (cmp)((key), (base) + mid__); \
+ if (ret__ < 0) { \
+ end__ = mid__; \
+ } else if (ret__ > 0) { \
+ start__ = mid__ + 1; \
+ } else { \
+ result__ = (base) + mid__; \
+ break; \
+ } \
+ } \
+ result__; \
+})
+
+static enum forcewake_domains
+find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
+{
+ const struct intel_forcewake_range *entry;
+
+ entry = BSEARCH(offset,
+ dev_priv->uncore.fw_domains_table,
+ dev_priv->uncore.fw_domains_table_entries,
+ fw_range_cmp);
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x22000, 0x24000) || \
- REG_RANGE((reg), 0x30000, 0x40000))
+ return entry ? entry->domains : 0;
+}
-#define __vlv_reg_read_fw_domains(offset) \
+static void
+intel_fw_table_check(struct drm_i915_private *dev_priv)
+{
+ const struct intel_forcewake_range *ranges;
+ unsigned int num_ranges;
+ s32 prev;
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ return;
+
+ ranges = dev_priv->uncore.fw_domains_table;
+ if (!ranges)
+ return;
+
+ num_ranges = dev_priv->uncore.fw_domains_table_entries;
+
+ for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+ WARN_ON_ONCE(IS_GEN9(dev_priv) &&
+ (prev + 1) != (s32)ranges->start);
+ WARN_ON_ONCE(prev >= (s32)ranges->start);
+ prev = ranges->start;
+ WARN_ON_ONCE(prev >= (s32)ranges->end);
+ prev = ranges->end;
+ }
+}
+
+#define GEN_FW_RANGE(s, e, d) \
+ { .start = (s), .end = (e), .domains = (d) }
+
+#define HAS_FWTABLE(dev_priv) \
+ (IS_GEN9(dev_priv) || \
+ IS_CHERRYVIEW(dev_priv) || \
+ IS_VALLEYVIEW(dev_priv))
+
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __vlv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
+};
+
+#define __fwtable_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
+ if (NEEDS_FORCE_WAKE((offset))) \
+ __fwd = find_fw_domain(dev_priv, offset); \
__fwd; \
})
+/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
+ RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
/* TODO: Other registers are not yet used */
};
+static void intel_shadow_table_check(void)
+{
+ const i915_reg_t *reg = gen8_shadowed_regs;
+ s32 prev;
+ u32 offset;
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ return;
+
+ for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
+ offset = i915_mmio_reg_offset(*reg);
+ WARN_ON_ONCE(prev >= (s32)offset);
+ prev = offset;
+ }
+}
+
+static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
+{
+ u32 offset = i915_mmio_reg_offset(*reg);
+
+ if (key < offset)
+ return -1;
+ else if (key > offset)
+ return 1;
+ else
+ return 0;
+}
+
static bool is_gen8_shadowed(u32 offset)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
- if (offset == gen8_shadowed_regs[i].reg)
- return true;
+ const i915_reg_t *regs = gen8_shadowed_regs;
- return false;
+ return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
+ mmio_reg_cmp);
}
#define __gen8_reg_write_fw_domains(offset) \
@@ -644,143 +741,70 @@ static bool is_gen8_shadowed(u32 offset)
__fwd; \
})
-#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5200, 0x8000) || \
- REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0xB000, 0xB480) || \
- REG_RANGE((reg), 0xE000, 0xE800))
-
-#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x8800, 0x8900) || \
- REG_RANGE((reg), 0xD000, 0xD800) || \
- REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x1A000, 0x1C000) || \
- REG_RANGE((reg), 0x1E800, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x38000))
-
-#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x4000, 0x5000) || \
- REG_RANGE((reg), 0x8000, 0x8300) || \
- REG_RANGE((reg), 0x8500, 0x8600) || \
- REG_RANGE((reg), 0x9000, 0xB000) || \
- REG_RANGE((reg), 0xF000, 0x10000))
-
-#define __chv_reg_read_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- __fwd; \
-})
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __chv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
+};
-#define __chv_reg_write_fw_domains(offset) \
+#define __fwtable_reg_write_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
+ __fwd = find_fw_domain(dev_priv, offset); \
__fwd; \
})
-#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
- REG_RANGE((reg), 0xB00, 0x2000)
-
-#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x2700) || \
- REG_RANGE((reg), 0x3000, 0x4000) || \
- REG_RANGE((reg), 0x5200, 0x8000) || \
- REG_RANGE((reg), 0x8140, 0x8160) || \
- REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0x8C00, 0x8D00) || \
- REG_RANGE((reg), 0xB000, 0xB480) || \
- REG_RANGE((reg), 0xE000, 0xE900) || \
- REG_RANGE((reg), 0x24400, 0x24800))
-
-#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x8130, 0x8140) || \
- REG_RANGE((reg), 0x8800, 0x8A00) || \
- REG_RANGE((reg), 0xD000, 0xD800) || \
- REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x1A000, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x40000))
-
-#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
- REG_RANGE((reg), 0x9400, 0x9800)
-
-#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
- ((reg) < 0x40000 && \
- !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
-
-#define SKL_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
-
-#define __gen9_reg_read_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (!SKL_NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- __fwd = FORCEWAKE_BLITTER; \
- __fwd; \
-})
-
-static const i915_reg_t gen9_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- /* TODO: Other registers are not yet used */
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen9_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
-static bool is_gen9_shadowed(u32 offset)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
- if (offset == gen9_shadowed_regs[i].reg)
- return true;
-
- return false;
-}
-
-#define __gen9_reg_write_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- __fwd = FORCEWAKE_BLITTER; \
- __fwd; \
-})
-
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -815,6 +839,66 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
__unclaimed_reg_debug(dev_priv, reg, read, before);
}
+static const enum decoupled_power_domain fw2dpd_domain[] = {
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_BLITTER,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+/*
+ * Decoupled MMIO access for only 1 DWORD
+ */
+static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain,
+ enum decoupled_ops operation)
+{
+ enum decoupled_power_domain dp_domain;
+ u32 ctrl_reg_data = 0;
+
+ dp_domain = fw2dpd_domain[fw_domain - 1];
+
+ ctrl_reg_data |= reg;
+ ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
+ ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
+ ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ GEN9_DECOUPLED_REG0_DW1) &
+ GEN9_DECOUPLED_DW1_GO) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Decoupled MMIO wait timed out\n");
+}
+
+static inline u32
+__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain)
+{
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_READ);
+
+ return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
+}
+
+static inline void
+__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
+ u32 reg, u32 data,
+ enum forcewake_domains fw_domain)
+{
+
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
+
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_WRITE);
+}
+
+
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_rpm_wakelock_held(dev_priv);
@@ -869,26 +953,30 @@ __gen2_read(64)
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
- enum forcewake_domains fw_domains)
+static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv)
+ fw_domain_arm_timer(domain);
+
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ dev_priv->uncore.fw_domains_active |= fw_domains;
+}
+
+static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
if (WARN_ON(!fw_domains))
return;
- /* Ideally GCC would be constant-fold and eliminate this loop */
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
- if (domain->wake_count) {
- fw_domains &= ~domain->mask;
- continue;
- }
-
- fw_domain_arm_timer(domain);
- }
+ /* Turn on all requested but inactive supported forcewake domains. */
+ fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= ~dev_priv->uncore.fw_domains_active;
if (fw_domains)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ ___force_wake_auto(dev_priv, fw_domains);
}
#define __gen6_read(x) \
@@ -903,62 +991,50 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN6_READ_FOOTER; \
}
-#define __vlv_read(x) \
+#define __fwtable_read(x) \
static u##x \
-vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __vlv_reg_read_fw_domains(offset); \
+ fw_engine = __fwtable_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
-#define __chv_read(x) \
+#define __gen9_decoupled_read(x) \
static u##x \
-chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __chv_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- GEN6_READ_FOOTER; \
-}
-
-#define __gen9_read(x) \
-static u##x \
-gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_READ_HEADER(x); \
- fw_engine = __gen9_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ fw_engine = __fwtable_reg_read_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
+ unsigned i; \
+ u32 *ptr_data = (u32 *) &val; \
+ for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
+ *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
+ offset, \
+ fw_engine); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
GEN6_READ_FOOTER; \
}
-__gen9_read(8)
-__gen9_read(16)
-__gen9_read(32)
-__gen9_read(64)
-__chv_read(8)
-__chv_read(16)
-__chv_read(32)
-__chv_read(64)
-__vlv_read(8)
-__vlv_read(16)
-__vlv_read(32)
-__vlv_read(64)
+__gen9_decoupled_read(32)
+__gen9_decoupled_read(64)
+__fwtable_read(8)
+__fwtable_read(16)
+__fwtable_read(32)
+__fwtable_read(64)
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
-#undef __gen9_read
-#undef __chv_read
-#undef __vlv_read
+#undef __fwtable_read
#undef __gen6_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -1054,21 +1130,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
-#define __hsw_write(x) \
-static void \
-hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- u32 __fifo_ret = 0; \
- GEN6_WRITE_HEADER; \
- if (NEEDS_FORCE_WAKE(offset)) { \
- __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
- } \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (unlikely(__fifo_ret)) { \
- gen6_gt_check_fifodbg(dev_priv); \
- } \
- GEN6_WRITE_FOOTER; \
-}
-
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
@@ -1081,51 +1142,49 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
-#define __chv_write(x) \
+#define __fwtable_write(x) \
static void \
-chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __chv_reg_write_fw_domains(offset); \
+ fw_engine = __fwtable_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
-#define __gen9_write(x) \
+#define __gen9_decoupled_write(x) \
static void \
-gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
+gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, u##x val, \
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __gen9_reg_write_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ fw_engine = __fwtable_reg_write_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
+ __gen9_decoupled_mmio_write(dev_priv, \
+ offset, \
+ val, \
+ fw_engine); \
+ else \
+ __raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
-__gen9_write(8)
-__gen9_write(16)
-__gen9_write(32)
-__chv_write(8)
-__chv_write(16)
-__chv_write(32)
+__gen9_decoupled_write(32)
+__fwtable_write(8)
+__fwtable_write(16)
+__fwtable_write(32)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
-__hsw_write(8)
-__hsw_write(16)
-__hsw_write(32)
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)
-#undef __gen9_write
-#undef __chv_write
+#undef __fwtable_write
#undef __gen8_write
-#undef __hsw_write
#undef __gen6_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
@@ -1314,6 +1373,13 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
+#define ASSIGN_FW_DOMAINS_TABLE(d) \
+{ \
+ dev_priv->uncore.fw_domains_table = \
+ (struct intel_forcewake_range *)(d); \
+ dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+}
+
void intel_uncore_init(struct drm_i915_private *dev_priv)
{
i915_check_vgpu(dev_priv);
@@ -1327,13 +1393,23 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
switch (INTEL_INFO(dev_priv)->gen) {
default:
case 9:
- ASSIGN_WRITE_MMIO_VFUNCS(gen9);
- ASSIGN_READ_MMIO_VFUNCS(gen9);
+ ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ if (HAS_DECOUPLED_MMIO(dev_priv)) {
+ dev_priv->uncore.funcs.mmio_readl =
+ gen9_decoupled_read32;
+ dev_priv->uncore.funcs.mmio_readq =
+ gen9_decoupled_read64;
+ dev_priv->uncore.funcs.mmio_writel =
+ gen9_decoupled_write32;
+ }
break;
case 8:
if (IS_CHERRYVIEW(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(chv);
- ASSIGN_READ_MMIO_VFUNCS(chv);
+ ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
} else {
ASSIGN_WRITE_MMIO_VFUNCS(gen8);
@@ -1342,14 +1418,11 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
break;
case 7:
case 6:
- if (IS_HASWELL(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(hsw);
- } else {
- ASSIGN_WRITE_MMIO_VFUNCS(gen6);
- }
+ ASSIGN_WRITE_MMIO_VFUNCS(gen6);
if (IS_VALLEYVIEW(dev_priv)) {
- ASSIGN_READ_MMIO_VFUNCS(vlv);
+ ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
} else {
ASSIGN_READ_MMIO_VFUNCS(gen6);
}
@@ -1366,6 +1439,10 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
break;
}
+ intel_fw_table_check(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 8)
+ intel_shadow_table_check();
+
if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
@@ -1408,7 +1485,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
+ (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
break;
}
@@ -1815,35 +1892,16 @@ static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
+ u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv))
- return 0;
-
- switch (INTEL_GEN(dev_priv)) {
- case 9:
- fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 8:
- if (IS_CHERRYVIEW(dev_priv))
- fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 7:
- case 6:
- if (IS_VALLEYVIEW(dev_priv))
- fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gen);
- case 5: /* forcewake was introduced with gen6 */
- case 4:
- case 3:
- case 2:
- return 0;
+ if (HAS_FWTABLE(dev_priv)) {
+ fw_domains = __fwtable_reg_read_fw_domains(offset);
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ fw_domains = __gen6_reg_read_fw_domains(offset);
+ } else {
+ WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ fw_domains = 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1855,32 +1913,18 @@ static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
+ u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv))
- return 0;
-
- switch (INTEL_GEN(dev_priv)) {
- case 9:
- fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 8:
- if (IS_CHERRYVIEW(dev_priv))
- fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 7:
- case 6:
+ if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
+ fw_domains = __fwtable_reg_write_fw_domains(offset);
+ } else if (IS_GEN8(dev_priv)) {
+ fw_domains = __gen8_reg_write_fw_domains(offset);
+ } else if (IS_GEN(dev_priv, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gen);
- case 5:
- case 4:
- case 3:
- case 2:
- return 0;
+ } else {
+ WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ fw_domains = 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1910,6 +1954,9 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
WARN_ON(!op);
+ if (intel_vgpu_active(dev_priv))
+ return 0;
+
if (op & FW_REG_READ)
fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 68db9621f1f0..8886cab19f98 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -280,7 +280,8 @@ struct common_child_dev_config {
u8 dp_support:1;
u8 tmds_support:1;
u8 support_reserved:5;
- u8 not_common3[12];
+ u8 aux_channel;
+ u8 not_common3[11];
u8 iboost_level;
} __packed;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 98df09c2b388..33404295b447 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -18,7 +18,6 @@
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -151,38 +150,11 @@ static int imx_drm_atomic_check(struct drm_device *dev,
return ret;
}
-static int imx_drm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- struct drm_plane_state *plane_state;
- struct drm_plane *plane;
- struct dma_buf *dma_buf;
- int i;
-
- /*
- * If the plane fb has an dma-buf attached, fish out the exclusive
- * fence for the atomic helper to wait on.
- */
- for_each_plane_in_state(state, plane, plane_state, i) {
- if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
- dma_buf = drm_fb_cma_get_gem_obj(plane_state->fb,
- 0)->base.dma_buf;
- if (!dma_buf)
- continue;
- plane_state->fence =
- reservation_object_get_excl_rcu(dma_buf->resv);
- }
- }
-
- return drm_atomic_helper_commit(dev, state, nonblock);
-}
-
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
.atomic_check = imx_drm_atomic_check,
- .atomic_commit = imx_drm_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
@@ -357,8 +329,8 @@ static int imx_drm_bind(struct device *dev)
int ret;
drm = drm_dev_alloc(&imx_drm_driver, dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
if (!imxdrm) {
@@ -436,9 +408,11 @@ static int imx_drm_bind(struct device *dev)
err_fbhelper:
drm_kms_helper_poll_fini(drm);
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
if (imxdrm->fbhelper)
drm_fbdev_cma_fini(imxdrm->fbhelper);
err_unbind:
+#endif
component_unbind_all(drm->dev, drm);
err_vblank:
drm_vblank_cleanup(drm);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3ce391c239b0..516d06490465 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -101,12 +101,6 @@ struct imx_ldb {
const struct bus_mux *lvds_mux;
};
-static enum drm_connector_status imx_ldb_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
u32 bus_format)
{
@@ -319,18 +313,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
struct imx_ldb *ldb = imx_ldb_ch->ldb;
int mux, ret;
- /*
- * imx_ldb_encoder_disable is called by
- * drm_helper_disable_unused_functions without
- * the encoder being enabled before.
- */
- if (imx_ldb_ch == &ldb->channel[0] &&
- (ldb->ldb_ctrl & LDB_CH0_MODE_EN_MASK) == 0)
- return;
- else if (imx_ldb_ch == &ldb->channel[1] &&
- (ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0)
- return;
-
drm_panel_disable(imx_ldb_ch->panel);
if (imx_ldb_ch == &ldb->channel[0])
@@ -409,7 +391,6 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
static const struct drm_connector_funcs imx_ldb_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = imx_ldb_connector_detect,
.destroy = imx_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 8fc088843e55..3b602ee33c44 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -227,12 +227,6 @@ static int tve_setup_vga(struct imx_tve *tve)
TVE_TVDAC_TEST_MODE_MASK, 1);
}
-static enum drm_connector_status imx_tve_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int imx_tve_connector_get_modes(struct drm_connector *connector)
{
struct imx_tve *tve = con_to_tve(connector);
@@ -352,7 +346,6 @@ static int imx_tve_atomic_check(struct drm_encoder *encoder,
static const struct drm_connector_funcs imx_tve_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = imx_tve_connector_detect,
.destroy = imx_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 4e1ae3fc462d..6be515a9fb69 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
ipu_dc_disable_channel(ipu_crtc->dc);
ipu_di_disable(ipu_crtc->di);
+ /*
+ * Planes must be disabled before DC clock is removed, as otherwise the
+ * attached IDMACs will be left in undefined state, possibly hanging
+ * the IPU or even system.
+ */
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
ipu_dc_disable(ipu);
spin_lock_irq(&crtc->dev->event_lock);
@@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
}
spin_unlock_irq(&crtc->dev->event_lock);
- /* always disable planes on the CRTC */
- drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
-
drm_crtc_vblank_off(crtc);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index ce22d0a0ddc8..e74a0ad52950 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -50,6 +50,12 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
DRM_FORMAT_RGB565,
};
@@ -64,13 +70,14 @@ drm_plane_state_to_eba(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[0] +
- fb->pitches[0] * (state->src_y >> 16) +
- (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
+ return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y +
+ drm_format_plane_cpp(fb->pixel_format, 0) * x;
}
static inline unsigned long
@@ -79,13 +86,17 @@ drm_plane_state_to_ubo(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
unsigned long eba = drm_plane_state_to_eba(state);
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[1] +
- fb->pitches[1] * (state->src_y >> 16) / 2 +
- (state->src_x >> 16) / 2 - eba;
+ x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
+ y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
+
+ return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
+ drm_format_plane_cpp(fb->pixel_format, 1) * x - eba;
}
static inline unsigned long
@@ -94,69 +105,17 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
unsigned long eba = drm_plane_state_to_eba(state);
+ int x = state->src_x >> 16;
+ int y = state->src_y >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[2] +
- fb->pitches[2] * (state->src_y >> 16) / 2 +
- (state->src_x >> 16) / 2 - eba;
-}
-
-static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
- struct drm_plane_state *old_state)
-{
- struct drm_plane *plane = &ipu_plane->base;
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
- unsigned long eba, ubo, vbo;
- int active;
-
- eba = drm_plane_state_to_eba(state);
-
- switch (fb->pixel_format) {
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- if (old_state->fb)
- break;
-
- /*
- * Multiplanar formats have to meet the following restrictions:
- * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
- * - EBA, UBO and VBO are a multiple of 8
- * - UBO and VBO are unsigned and not larger than 0xfffff8
- * - Only EBA may be changed while scanout is active
- * - The strides of U and V planes must be identical.
- */
- ubo = drm_plane_state_to_ubo(state);
- vbo = drm_plane_state_to_vbo(state);
-
- if (fb->pixel_format == DRM_FORMAT_YUV420)
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- fb->pitches[1], ubo, vbo);
- else
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- fb->pitches[1], vbo, ubo);
-
- dev_dbg(ipu_plane->base.dev->dev,
- "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
- state->src_x >> 16, state->src_y >> 16);
- break;
- default:
- dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
- eba, state->src_x >> 16, state->src_y >> 16);
-
- break;
- }
+ x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
+ y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
- if (old_state->fb) {
- active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
- ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
- } else {
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
- }
+ return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
+ drm_format_plane_cpp(fb->pixel_format, 2) * x - eba;
}
void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -259,6 +218,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+ int hsub, vsub;
/* Ok to disable */
if (!fb)
@@ -338,6 +298,10 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
/*
* Multiplanar formats have to meet the following restrictions:
* - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
@@ -346,30 +310,49 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* - Only EBA may be changed while scanout is active
* - The strides of U and V planes must be identical.
*/
- ubo = drm_plane_state_to_ubo(state);
vbo = drm_plane_state_to_vbo(state);
- if ((ubo & 0x7) || (vbo & 0x7))
+ if (vbo & 0x7 || vbo > 0xfffff8)
return -EINVAL;
- if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
- return -EINVAL;
-
- if (old_fb) {
- old_ubo = drm_plane_state_to_ubo(old_state);
+ if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
old_vbo = drm_plane_state_to_vbo(old_state);
- if (ubo != old_ubo || vbo != old_vbo)
- return -EINVAL;
+ if (vbo != old_vbo)
+ crtc_state->mode_changed = true;
}
if (fb->pitches[1] != fb->pitches[2])
return -EINVAL;
+ /* fall-through */
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(state);
+
+ if (ubo & 0x7 || ubo > 0xfffff8)
+ return -EINVAL;
+
+ if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
+ old_ubo = drm_plane_state_to_ubo(old_state);
+ if (ubo != old_ubo)
+ crtc_state->mode_changed = true;
+ }
+
if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
return -EINVAL;
if (old_fb && old_fb->pitches[1] != fb->pitches[1])
crtc_state->mode_changed = true;
+
+ /*
+ * The x/y offsets must be even in case of horizontal/vertical
+ * chroma subsampling.
+ */
+ hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+ if (((state->src_x >> 16) & (hsub - 1)) ||
+ ((state->src_y >> 16) & (vsub - 1)))
+ return -EINVAL;
}
return 0;
@@ -386,15 +369,19 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
struct drm_plane_state *state = plane->state;
+ struct drm_crtc_state *crtc_state = state->crtc->state;
+ struct drm_framebuffer *fb = state->fb;
+ unsigned long eba, ubo, vbo;
enum ipu_color_space ics;
+ int active;
- if (old_state->fb) {
- struct drm_crtc_state *crtc_state = state->crtc->state;
+ eba = drm_plane_state_to_eba(state);
- if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
- ipu_plane_atomic_set_base(ipu_plane, old_state);
- return;
- }
+ if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
+ active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
+ ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ return;
}
switch (ipu_plane->dp_flow) {
@@ -424,6 +411,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
break;
default:
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
}
}
@@ -437,11 +425,50 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
- ipu_plane_atomic_set_base(ipu_plane, old_state);
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
+ if (fb->pixel_format == DRM_FORMAT_YVU420 ||
+ fb->pixel_format == DRM_FORMAT_YVU422 ||
+ fb->pixel_format == DRM_FORMAT_YVU444)
+ swap(ubo, vbo);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, vbo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+ state->src_x >> 16, state->src_y >> 16);
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(state);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, ubo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu, x = %d, y = %d", eba, ubo,
+ state->src_x >> 16, state->src_y >> 16);
+ break;
+ default:
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+ eba, state->src_x >> 16, state->src_y >> 16);
+ break;
+ }
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
ipu_plane_enable(ipu_plane);
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+ .prepare_fb = drm_fb_cma_prepare_fb,
.atomic_check = ipu_plane_atomic_check,
.atomic_disable = ipu_plane_atomic_disable,
.atomic_update = ipu_plane_atomic_update,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index d796ada2a47a..8582a83c0d9b 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -49,12 +49,6 @@ static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
return container_of(e, struct imx_parallel_display, encoder);
}
-static enum drm_connector_status imx_pd_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
@@ -143,7 +137,6 @@ static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
static const struct drm_connector_funcs imx_pd_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = imx_pd_connector_detect,
.destroy = imx_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 019b7ca392d7..c70310206ac5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -80,6 +80,7 @@ static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
ddp_comp);
priv->crtc = crtc;
+ writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
}
@@ -250,13 +251,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
- IRQF_TRIGGER_NONE, dev_name(dev), priv);
- if (ret < 0) {
- dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
- return ret;
- }
-
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
@@ -272,6 +266,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
+ IRQF_TRIGGER_NONE, dev_name(dev), priv);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+ return ret;
+ }
+
ret = component_add(dev, &mtk_disp_ovl_component_ops);
if (ret)
dev_err(dev, "Failed to add component: %d\n", ret);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0186e500d2a5..90fb831ef031 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -432,11 +432,16 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
unsigned long pll_rate;
unsigned int factor;
+ /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
pix_rate = 1000UL * mode->clock;
- if (mode->clock <= 74000)
+ if (mode->clock <= 27000)
+ factor = 16 * 3;
+ else if (mode->clock <= 84000)
factor = 8 * 3;
- else
+ else if (mode->clock <= 167000)
factor = 4 * 3;
+ else
+ factor = 2 * 3;
pll_rate = pix_rate * factor;
dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index df33b3ca6ffd..48cc01fd20c7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -123,7 +123,7 @@ static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int bpc)
{
writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
- writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE);
+ writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
mtk_dither_set(comp, bpc, DISP_OD_CFG);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf83f6507ec8..4b7fe7eaec01 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/component.h>
#include <linux/iommu.h>
#include <linux/of_address.h>
@@ -83,7 +84,7 @@ static void mtk_atomic_complete(struct mtk_drm_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void mtk_atomic_work(struct work_struct *work)
@@ -110,6 +111,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (async)
mtk_atomic_schedule(private, state);
else
@@ -247,16 +249,14 @@ static const struct file_operations mtk_drm_fops = {
.mmap = mtk_drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
};
static struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
@@ -415,7 +415,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_type == MTK_DPI) {
dev_info(dev, "Adding component match for %s\n",
node->full_name);
- component_match_add(dev, &match, compare_of, node);
+ drm_of_component_match_add(dev, &match, compare_of,
+ node);
} else {
struct mtk_ddp_comp *comp;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 28b2044ed9f2..2c42f90809d8 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -86,7 +86,7 @@
#define DSI_PHY_TIMECON0 0x110
#define LPX (0xff << 0)
-#define HS_PRPR (0xff << 8)
+#define HS_PREP (0xff << 8)
#define HS_ZERO (0xff << 16)
#define HS_TRAIL (0xff << 24)
@@ -102,10 +102,16 @@
#define CLK_TRAIL (0xff << 24)
#define DSI_PHY_TIMECON3 0x11c
-#define CLK_HS_PRPR (0xff << 0)
+#define CLK_HS_PREP (0xff << 0)
#define CLK_HS_POST (0xff << 8)
#define CLK_HS_EXIT (0xff << 16)
+#define T_LPX 5
+#define T_HS_PREP 6
+#define T_HS_TRAIL 8
+#define T_HS_EXIT 7
+#define T_HS_ZERO 10
+
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
struct phy;
@@ -161,20 +167,18 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
static void dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
- unsigned int ui, cycle_time;
- unsigned int lpx;
+ u32 ui, cycle_time;
ui = 1000 / dsi->data_rate + 0x01;
cycle_time = 8000 / dsi->data_rate + 0x01;
- lpx = 5;
- timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx;
- timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 |
- (4 * lpx);
+ timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
+ timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
+ T_HS_EXIT << 24;
timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
(NS_TO_CYCLE(0x150, cycle_time) << 16);
- timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 |
- NS_TO_CYCLE(0x40, cycle_time);
+ timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
+ NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -202,19 +206,47 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
{
struct device *dev = dsi->dev;
int ret;
+ u64 pixel_clock, total_bits;
+ u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
if (++dsi->refcount != 1)
return 0;
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB565:
+ bit_per_pixel = 16;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ bit_per_pixel = 18;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ bit_per_pixel = 24;
+ break;
+ }
+
/**
- * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio;
- * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000.
- * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi.
- * we set mipi_ratio is 1.05.
+ * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
+ * htotal_time = htotal * byte_per_pixel / num_lanes
+ * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
+ * mipi_ratio = (htotal_time + overhead_time) / htotal_time
+ * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
*/
- dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10);
+ pixel_clock = dsi->vm.pixelclock * 1000;
+ htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
+ dsi->vm.hsync_len;
+ htotal_bits = htotal * bit_per_pixel;
+
+ overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
+ T_HS_EXIT;
+ overhead_bits = overhead_cycles * dsi->lanes * 8;
+ total_bits = htotal_bits + overhead_bits;
- ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000);
+ dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
+ htotal * dsi->lanes);
+
+ ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
if (ret < 0) {
dev_err(dev, "Failed to set data rate: %d\n", ret);
goto err_refcount;
@@ -562,12 +594,6 @@ static void mtk_dsi_encoder_enable(struct drm_encoder *encoder)
mtk_output_dsi_enable(dsi);
}
-static enum drm_connector_status mtk_dsi_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
{
struct mtk_dsi *dsi = connector_to_dsi(connector);
@@ -584,7 +610,6 @@ static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = mtk_dsi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 71227deef21b..0e8c4d9af340 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1133,12 +1133,6 @@ static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
phy_power_on(hdmi->phy);
mtk_hdmi_aud_output_config(hdmi, mode);
- mtk_hdmi_setup_audio_infoframe(hdmi);
- mtk_hdmi_setup_avi_infoframe(hdmi, mode);
- mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
- if (mode->flags & DRM_MODE_FLAG_3D_MASK)
- mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
-
mtk_hdmi_hw_vid_black(hdmi, false);
mtk_hdmi_hw_aud_unmute(hdmi);
mtk_hdmi_hw_send_av_unmute(hdmi);
@@ -1401,6 +1395,16 @@ static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
hdmi->powered = true;
}
+static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ mtk_hdmi_setup_audio_infoframe(hdmi);
+ mtk_hdmi_setup_avi_infoframe(hdmi, mode);
+ mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
+}
+
static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1409,6 +1413,7 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
phy_power_on(hdmi->phy);
+ mtk_hdmi_send_infoframe(hdmi, &hdmi->mode);
hdmi->enabled = true;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index 8a24754b440f..51cb9cfb6646 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -265,6 +265,9 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
unsigned int pre_div;
unsigned int div;
+ unsigned int pre_ibias;
+ unsigned int hdmi_ibias;
+ unsigned int imp_en;
dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
rate, parent_rate);
@@ -298,18 +301,31 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
(0x1 << PLL_BR_SHIFT),
RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
RG_HDMITX_PLL_BR);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
+ if (rate < 165000000) {
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_PRD_IMP_EN);
+ pre_ibias = 0x3;
+ imp_en = 0x0;
+ hdmi_ibias = hdmi_phy->ibias;
+ } else {
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_PRD_IMP_EN);
+ pre_ibias = 0x6;
+ imp_en = 0xf;
+ hdmi_ibias = hdmi_phy->ibias_up;
+ }
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
- (0x3 << PRD_IBIAS_CLK_SHIFT) |
- (0x3 << PRD_IBIAS_D2_SHIFT) |
- (0x3 << PRD_IBIAS_D1_SHIFT) |
- (0x3 << PRD_IBIAS_D0_SHIFT),
+ (pre_ibias << PRD_IBIAS_CLK_SHIFT) |
+ (pre_ibias << PRD_IBIAS_D2_SHIFT) |
+ (pre_ibias << PRD_IBIAS_D1_SHIFT) |
+ (pre_ibias << PRD_IBIAS_D0_SHIFT),
RG_HDMITX_PRD_IBIAS_CLK |
RG_HDMITX_PRD_IBIAS_D2 |
RG_HDMITX_PRD_IBIAS_D1 |
RG_HDMITX_PRD_IBIAS_D0);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
- (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
+ (imp_en << DRV_IMP_EN_SHIFT),
+ RG_HDMITX_DRV_IMP_EN);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
(hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
(hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
@@ -318,12 +334,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
- (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
- (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
- (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
- (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
- RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
- RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
+ (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
+ (hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
+ (hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
+ (hdmi_ibias << DRV_IBIAS_D0_SHIFT),
+ RG_HDMITX_DRV_IBIAS_CLK |
+ RG_HDMITX_DRV_IBIAS_D2 |
+ RG_HDMITX_DRV_IBIAS_D1 |
+ RG_HDMITX_DRV_IBIAS_D0);
return 0;
}
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
new file mode 100644
index 000000000000..99719afcc77f
--- /dev/null
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -0,0 +1,9 @@
+config DRM_MESON
+ tristate "DRM Support for Amlogic Meson Display Controller"
+ depends on DRM && OF && (ARM || ARM64)
+ depends on ARCH_MESON || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select VIDEOMODE_HELPERS
+ select REGMAP_MMIO
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
new file mode 100644
index 000000000000..2591978b8aad
--- /dev/null
+++ b/drivers/gpu/drm/meson/Makefile
@@ -0,0 +1,4 @@
+meson-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
+meson-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
+
+obj-$(CONFIG_DRM_MESON) += meson.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
new file mode 100644
index 000000000000..4109e36c297f
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_canvas.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "meson_drv.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+/*
+ * CANVAS is a memory zone where physical memory frames information
+ * are stored for the VIU to scanout.
+ */
+
+/* DMC Registers */
+#define DMC_CAV_LUT_DATAL 0x48 /* 0x12 offset in data sheet */
+#define CANVAS_WIDTH_LBIT 29
+#define CANVAS_WIDTH_LWID 3
+#define DMC_CAV_LUT_DATAH 0x4c /* 0x13 offset in data sheet */
+#define CANVAS_WIDTH_HBIT 0
+#define CANVAS_HEIGHT_BIT 9
+#define CANVAS_BLKMODE_BIT 24
+#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
+#define CANVAS_LUT_WR_EN (0x2 << 8)
+#define CANVAS_LUT_RD_EN (0x1 << 8)
+
+void meson_canvas_setup(struct meson_drm *priv,
+ uint32_t canvas_index, uint32_t addr,
+ uint32_t stride, uint32_t height,
+ unsigned int wrap,
+ unsigned int blkmode)
+{
+ unsigned int val;
+
+ regmap_write(priv->dmc, DMC_CAV_LUT_DATAL,
+ (((addr + 7) >> 3)) |
+ (((stride + 7) >> 3) << CANVAS_WIDTH_LBIT));
+
+ regmap_write(priv->dmc, DMC_CAV_LUT_DATAH,
+ ((((stride + 7) >> 3) >> CANVAS_WIDTH_LWID) <<
+ CANVAS_WIDTH_HBIT) |
+ (height << CANVAS_HEIGHT_BIT) |
+ (wrap << 22) |
+ (blkmode << CANVAS_BLKMODE_BIT));
+
+ regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
+ CANVAS_LUT_WR_EN | canvas_index);
+
+ /* Force a read-back to make sure everything is flushed. */
+ regmap_read(priv->dmc, DMC_CAV_LUT_DATAH, &val);
+}
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
new file mode 100644
index 000000000000..af1759da4b27
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_canvas.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Canvas LUT Memory */
+
+#ifndef __MESON_CANVAS_H
+#define __MESON_CANVAS_H
+
+#define MESON_CANVAS_ID_OSD1 0x4e
+
+/* Canvas configuration. */
+#define MESON_CANVAS_WRAP_NONE 0x00
+#define MESON_CANVAS_WRAP_X 0x01
+#define MESON_CANVAS_WRAP_Y 0x02
+
+#define MESON_CANVAS_BLKMODE_LINEAR 0x00
+#define MESON_CANVAS_BLKMODE_32x32 0x01
+#define MESON_CANVAS_BLKMODE_64x64 0x02
+
+void meson_canvas_setup(struct meson_drm *priv,
+ uint32_t canvas_index, uint32_t addr,
+ uint32_t stride, uint32_t height,
+ unsigned int wrap,
+ unsigned int blkmode);
+
+#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
new file mode 100644
index 000000000000..749770e5c65f
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "meson_crtc.h"
+#include "meson_plane.h"
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_registers.h"
+
+/* CRTC definition */
+
+struct meson_crtc {
+ struct drm_crtc base;
+ struct drm_pending_vblank_event *event;
+ struct meson_drm *priv;
+};
+#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
+
+/* CRTC */
+
+static const struct drm_crtc_funcs meson_crtc_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+};
+
+static void meson_crtc_enable(struct drm_crtc *crtc)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct drm_plane *plane = meson_crtc->priv->primary_plane;
+ struct meson_drm *priv = meson_crtc->priv;
+
+ /* Enable VPP Postblend */
+ writel(plane->state->crtc_w,
+ priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
+
+ writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
+ priv->io_base + _REG(VPP_MISC));
+
+ priv->viu.osd1_enabled = true;
+}
+
+static void meson_crtc_disable(struct drm_crtc *crtc)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ priv->viu.osd1_enabled = false;
+
+ /* Disable VPP Postblend */
+ writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ meson_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+}
+
+static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ if (priv->viu.osd1_enabled)
+ priv->viu.osd1_commit = true;
+}
+
+static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
+ .enable = meson_crtc_enable,
+ .disable = meson_crtc_disable,
+ .atomic_begin = meson_crtc_atomic_begin,
+ .atomic_flush = meson_crtc_atomic_flush,
+};
+
+void meson_crtc_irq(struct meson_drm *priv)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(priv->crtc);
+ unsigned long flags;
+
+ /* Update the OSD registers */
+ if (priv->viu.osd1_enabled && priv->viu.osd1_commit) {
+ writel_relaxed(priv->viu.osd1_ctrl_stat,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[0],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[1],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W1));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[2],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W2));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[3],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
+ writel_relaxed(priv->viu.osd1_blk0_cfg[4],
+ priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
+
+ /* If output is interlace, make use of the Scaler */
+ if (priv->viu.osd1_interlace) {
+ struct drm_plane *plane = priv->primary_plane;
+ struct drm_plane_state *state = plane->state;
+ struct drm_rect dest = {
+ .x1 = state->crtc_x,
+ .y1 = state->crtc_y,
+ .x2 = state->crtc_x + state->crtc_w,
+ .y2 = state->crtc_y + state->crtc_h,
+ };
+
+ meson_vpp_setup_interlace_vscaler_osd1(priv, &dest);
+ } else
+ meson_vpp_disable_interlace_vscaler_osd1(priv);
+
+ /* Enable OSD1 */
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
+ priv->io_base + _REG(VPP_MISC));
+
+ priv->viu.osd1_commit = false;
+ }
+
+ drm_crtc_handle_vblank(priv->crtc);
+
+ spin_lock_irqsave(&priv->drm->event_lock, flags);
+ if (meson_crtc->event) {
+ drm_crtc_send_vblank_event(priv->crtc, meson_crtc->event);
+ drm_crtc_vblank_put(priv->crtc);
+ meson_crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&priv->drm->event_lock, flags);
+}
+
+int meson_crtc_create(struct meson_drm *priv)
+{
+ struct meson_crtc *meson_crtc;
+ struct drm_crtc *crtc;
+ int ret;
+
+ meson_crtc = devm_kzalloc(priv->drm->dev, sizeof(*meson_crtc),
+ GFP_KERNEL);
+ if (!meson_crtc)
+ return -ENOMEM;
+
+ meson_crtc->priv = priv;
+ crtc = &meson_crtc->base;
+ ret = drm_crtc_init_with_planes(priv->drm, crtc,
+ priv->primary_plane, NULL,
+ &meson_crtc_funcs, "meson_crtc");
+ if (ret) {
+ dev_err(priv->drm->dev, "Failed to init CRTC\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
+
+ priv->crtc = crtc;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_crtc.h b/drivers/gpu/drm/meson/meson_crtc.h
new file mode 100644
index 000000000000..b62b9e51764d
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_crtc.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#ifndef __MESON_CRTC_H
+#define __MESON_CRTC_H
+
+#include "meson_drv.h"
+
+int meson_crtc_create(struct meson_drm *priv);
+
+void meson_crtc_irq(struct meson_drm *priv);
+
+#endif /* __MESON_CRTC_H */
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
new file mode 100644
index 000000000000..ff1f6019b97b
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_fb_helper.h>
+
+#include "meson_drv.h"
+#include "meson_plane.h"
+#include "meson_crtc.h"
+#include "meson_venc_cvbs.h"
+
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_venc.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+#define DRIVER_NAME "meson"
+#define DRIVER_DESC "Amlogic Meson DRM driver"
+
+/*
+ * Video Processing Unit
+ *
+ * VPU Handles the Global Video Processing, it includes management of the
+ * clocks gates, blocks reset lines and power domains.
+ *
+ * What is missing :
+ * - Full reset of entire video processing HW blocks
+ * - Scaling and setup of the VPU clock
+ * - Bus clock gates
+ * - Powering up video processing HW blocks
+ * - Powering Up HDMI controller and PHY
+ */
+
+static void meson_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct meson_drm *priv = dev->dev_private;
+
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs meson_mode_config_funcs = {
+ .output_poll_changed = meson_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_fb_cma_create,
+};
+
+static int meson_enable_vblank(struct drm_device *dev, unsigned int crtc)
+{
+ struct meson_drm *priv = dev->dev_private;
+
+ meson_venc_enable_vsync(priv);
+
+ return 0;
+}
+
+static void meson_disable_vblank(struct drm_device *dev, unsigned int crtc)
+{
+ struct meson_drm *priv = dev->dev_private;
+
+ meson_venc_disable_vsync(priv);
+}
+
+static irqreturn_t meson_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct meson_drm *priv = dev->dev_private;
+
+ (void)readl_relaxed(priv->io_base + _REG(VENC_INTFLAG));
+
+ meson_crtc_irq(priv);
+
+ return IRQ_HANDLED;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver meson_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+ DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+
+ /* Vblank */
+ .enable_vblank = meson_enable_vblank,
+ .disable_vblank = meson_disable_vblank,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+
+ /* IRQ */
+ .irq_handler = meson_irq,
+
+ /* PRIME Ops */
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ /* GEM Ops */
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+
+ /* Misc */
+ .fops = &fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = "20161109",
+ .major = 1,
+ .minor = 0,
+};
+
+static bool meson_vpu_has_available_connectors(struct device *dev)
+{
+ struct device_node *ep, *remote;
+
+ /* Parses each endpoint and check if remote exists */
+ for_each_endpoint_of_node(dev->of_node, ep) {
+ /* If the endpoint node exists, consider it enabled */
+ remote = of_graph_get_remote_port(ep);
+ if (remote)
+ return true;
+ }
+
+ return false;
+}
+
+static struct regmap_config meson_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1000,
+};
+
+static int meson_drv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_drm *priv;
+ struct drm_device *drm;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ /* Checks if an output connector is available */
+ if (!meson_vpu_has_available_connectors(dev)) {
+ dev_err(dev, "No output connector available\n");
+ return -ENODEV;
+ }
+
+ drm = drm_dev_alloc(&meson_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto free_drm;
+ }
+ drm->dev_private = priv;
+ priv->drm = drm;
+ priv->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->io_base = regs;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
+ /* Simply ioremap since it may be a shared register zone */
+ regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!regs)
+ return -EADDRNOTAVAIL;
+
+ priv->hhi = devm_regmap_init_mmio(dev, regs,
+ &meson_regmap_config);
+ if (IS_ERR(priv->hhi)) {
+ dev_err(&pdev->dev, "Couldn't create the HHI regmap\n");
+ return PTR_ERR(priv->hhi);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
+ /* Simply ioremap since it may be a shared register zone */
+ regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!regs)
+ return -EADDRNOTAVAIL;
+
+ priv->dmc = devm_regmap_init_mmio(dev, regs,
+ &meson_regmap_config);
+ if (IS_ERR(priv->dmc)) {
+ dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
+ return PTR_ERR(priv->dmc);
+ }
+
+ priv->vsync_irq = platform_get_irq(pdev, 0);
+
+ drm_vblank_init(drm, 1);
+ drm_mode_config_init(drm);
+
+ /* Encoder Initialization */
+
+ ret = meson_venc_cvbs_create(priv);
+ if (ret)
+ goto free_drm;
+
+ /* Hardware Initialization */
+
+ meson_venc_init(priv);
+ meson_vpp_init(priv);
+ meson_viu_init(priv);
+
+ ret = meson_plane_create(priv);
+ if (ret)
+ goto free_drm;
+
+ ret = meson_crtc_create(priv);
+ if (ret)
+ goto free_drm;
+
+ ret = drm_irq_install(drm, priv->vsync_irq);
+ if (ret)
+ goto free_drm;
+
+ drm_mode_config_reset(drm);
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+ drm->mode_config.funcs = &meson_mode_config_funcs;
+
+ priv->fbdev = drm_fbdev_cma_init(drm, 32,
+ drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev)) {
+ ret = PTR_ERR(priv->fbdev);
+ goto free_drm;
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto free_drm;
+
+ return 0;
+
+free_drm:
+ drm_dev_unref(drm);
+
+ return ret;
+}
+
+static int meson_drv_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+ struct meson_drm *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_fbdev_cma_fini(priv->fbdev);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+ drm_dev_unref(drm);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "amlogic,meson-gxbb-vpu" },
+ { .compatible = "amlogic,meson-gxl-vpu" },
+ { .compatible = "amlogic,meson-gxm-vpu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver meson_drm_platform_driver = {
+ .probe = meson_drv_probe,
+ .remove = meson_drv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = dt_match,
+ },
+};
+
+module_platform_driver(meson_drm_platform_driver);
+
+MODULE_AUTHOR("Jasper St. Pierre <jstpierre@mecheye.net>");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
new file mode 100644
index 000000000000..6195327c51ca
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MESON_DRV_H
+#define __MESON_DRV_H
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <drm/drmP.h>
+
+struct meson_drm {
+ struct device *dev;
+ void __iomem *io_base;
+ struct regmap *hhi;
+ struct regmap *dmc;
+ int vsync_irq;
+
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ struct drm_fbdev_cma *fbdev;
+ struct drm_plane *primary_plane;
+
+ /* Components Data */
+ struct {
+ bool osd1_enabled;
+ bool osd1_interlace;
+ bool osd1_commit;
+ uint32_t osd1_ctrl_stat;
+ uint32_t osd1_blk0_cfg[5];
+ } viu;
+
+ struct {
+ unsigned int current_mode;
+ } venc;
+};
+
+static inline int meson_vpu_is_compatible(struct meson_drm *priv,
+ const char *compat)
+{
+ return of_device_is_compatible(priv->dev->of_node, compat);
+}
+
+#endif /* __MESON_DRV_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
new file mode 100644
index 000000000000..4942ca090b46
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_rect.h>
+
+#include "meson_plane.h"
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+struct meson_plane {
+ struct drm_plane base;
+ struct meson_drm *priv;
+};
+#define to_meson_plane(x) container_of(x, struct meson_plane, base)
+
+static int meson_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_rect clip = { 0, };
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ clip.x2 = crtc_state->mode.hdisplay;
+ clip.y2 = crtc_state->mode.vdisplay;
+
+ return drm_plane_helper_check_state(state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+}
+
+/* Takes a fixed 16.16 number and converts it to integer. */
+static inline int64_t fixed16_to_int(int64_t value)
+{
+ return value >> 16;
+}
+
+static void meson_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_plane *meson_plane = to_meson_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct meson_drm *priv = meson_plane->priv;
+ struct drm_gem_cma_object *gem;
+ struct drm_rect src = {
+ .x1 = (state->src_x),
+ .y1 = (state->src_y),
+ .x2 = (state->src_x + state->src_w),
+ .y2 = (state->src_y + state->src_h),
+ };
+ struct drm_rect dest = {
+ .x1 = state->crtc_x,
+ .y1 = state->crtc_y,
+ .x2 = state->crtc_x + state->crtc_w,
+ .y2 = state->crtc_y + state->crtc_h,
+ };
+ unsigned long flags;
+
+ /*
+ * Update Coordinates
+ * Update Formats
+ * Update Buffer
+ * Enable Plane
+ */
+ spin_lock_irqsave(&priv->drm->event_lock, flags);
+
+ /* Enable OSD and BLK0, set max global alpha */
+ priv->viu.osd1_ctrl_stat = OSD_ENABLE |
+ (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ OSD_BLK0_ENABLE;
+
+ /* Set up BLK0 to point to the right canvas */
+ priv->viu.osd1_blk0_cfg[0] = ((MESON_CANVAS_ID_OSD1 << OSD_CANVAS_SEL) |
+ OSD_ENDIANNESS_LE);
+
+ /* On GXBB, Use the old non-HDR RGB2YUV converter */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ /* For XRGB, replace the pixel's alpha by 0xFF */
+ writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ARGB;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ /* For ARGB, use the pixel's alpha */
+ writel_bits_relaxed(OSD_REPLACE_EN, 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ARGB;
+ break;
+ case DRM_FORMAT_RGB888:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
+ OSD_COLOR_MATRIX_24_RGB;
+ break;
+ case DRM_FORMAT_RGB565:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
+ OSD_COLOR_MATRIX_16_RGB565;
+ break;
+ };
+
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ priv->viu.osd1_interlace = true;
+
+ dest.y1 /= 2;
+ dest.y2 /= 2;
+ } else
+ priv->viu.osd1_interlace = false;
+
+ /*
+ * The format of these registers is (x2 << 16 | x1),
+ * where x2 is exclusive.
+ * e.g. +30x1920 would be (1919 << 16) | 30
+ */
+ priv->viu.osd1_blk0_cfg[1] = ((fixed16_to_int(src.x2) - 1) << 16) |
+ fixed16_to_int(src.x1);
+ priv->viu.osd1_blk0_cfg[2] = ((fixed16_to_int(src.y2) - 1) << 16) |
+ fixed16_to_int(src.y1);
+ priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
+ priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
+
+ /* Update Canvas with buffer address */
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ gem->paddr, fb->pitches[0],
+ fb->height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR);
+
+ spin_unlock_irqrestore(&priv->drm->event_lock, flags);
+}
+
+static void meson_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_plane *meson_plane = to_meson_plane(plane);
+ struct meson_drm *priv = meson_plane->priv;
+
+ /* Disable OSD1 */
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+}
+
+static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
+ .atomic_check = meson_plane_atomic_check,
+ .atomic_disable = meson_plane_atomic_disable,
+ .atomic_update = meson_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs meson_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const uint32_t supported_drm_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+};
+
+int meson_plane_create(struct meson_drm *priv)
+{
+ struct meson_plane *meson_plane;
+ struct drm_plane *plane;
+
+ meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
+ GFP_KERNEL);
+ if (!meson_plane)
+ return -ENOMEM;
+
+ meson_plane->priv = priv;
+ plane = &meson_plane->base;
+
+ drm_universal_plane_init(priv->drm, plane, 0xFF,
+ &meson_plane_funcs,
+ supported_drm_formats,
+ ARRAY_SIZE(supported_drm_formats),
+ DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
+
+ drm_plane_helper_add(plane, &meson_plane_helper_funcs);
+
+ priv->primary_plane = plane;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_plane.h b/drivers/gpu/drm/meson/meson_plane.h
new file mode 100644
index 000000000000..e26b8b0aa1fa
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_plane.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#ifndef __MESON_PLANE_H
+#define __MESON_PLANE_H
+
+#include "meson_drv.h"
+
+int meson_plane_create(struct meson_drm *priv);
+
+#endif /* __MESON_PLANE_H */
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
new file mode 100644
index 000000000000..6adf9c13fafa
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -0,0 +1,1395 @@
+/*
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __MESON_REGISTERS_H
+#define __MESON_REGISTERS_H
+
+/* Shift all registers by 2 */
+#define _REG(reg) ((reg) << 2)
+
+#define writel_bits_relaxed(mask, val, addr) \
+ writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
+
+/* vpp2 */
+#define VPP2_DUMMY_DATA 0x1900
+#define VPP2_LINE_IN_LENGTH 0x1901
+#define VPP2_PIC_IN_HEIGHT 0x1902
+#define VPP2_SCALE_COEF_IDX 0x1903
+#define VPP2_SCALE_COEF 0x1904
+#define VPP2_VSC_REGION12_STARTP 0x1905
+#define VPP2_VSC_REGION34_STARTP 0x1906
+#define VPP2_VSC_REGION4_ENDP 0x1907
+#define VPP2_VSC_START_PHASE_STEP 0x1908
+#define VPP2_VSC_REGION0_PHASE_SLOPE 0x1909
+#define VPP2_VSC_REGION1_PHASE_SLOPE 0x190a
+#define VPP2_VSC_REGION3_PHASE_SLOPE 0x190b
+#define VPP2_VSC_REGION4_PHASE_SLOPE 0x190c
+#define VPP2_VSC_PHASE_CTRL 0x190d
+#define VPP2_VSC_INI_PHASE 0x190e
+#define VPP2_HSC_REGION12_STARTP 0x1910
+#define VPP2_HSC_REGION34_STARTP 0x1911
+#define VPP2_HSC_REGION4_ENDP 0x1912
+#define VPP2_HSC_START_PHASE_STEP 0x1913
+#define VPP2_HSC_REGION0_PHASE_SLOPE 0x1914
+#define VPP2_HSC_REGION1_PHASE_SLOPE 0x1915
+#define VPP2_HSC_REGION3_PHASE_SLOPE 0x1916
+#define VPP2_HSC_REGION4_PHASE_SLOPE 0x1917
+#define VPP2_HSC_PHASE_CTRL 0x1918
+#define VPP2_SC_MISC 0x1919
+#define VPP2_PREBLEND_VD1_H_START_END 0x191a
+#define VPP2_PREBLEND_VD1_V_START_END 0x191b
+#define VPP2_POSTBLEND_VD1_H_START_END 0x191c
+#define VPP2_POSTBLEND_VD1_V_START_END 0x191d
+#define VPP2_PREBLEND_H_SIZE 0x1920
+#define VPP2_POSTBLEND_H_SIZE 0x1921
+#define VPP2_HOLD_LINES 0x1922
+#define VPP2_BLEND_ONECOLOR_CTRL 0x1923
+#define VPP2_PREBLEND_CURRENT_XY 0x1924
+#define VPP2_POSTBLEND_CURRENT_XY 0x1925
+#define VPP2_MISC 0x1926
+#define VPP2_OFIFO_SIZE 0x1927
+#define VPP2_FIFO_STATUS 0x1928
+#define VPP2_SMOKE_CTRL 0x1929
+#define VPP2_SMOKE1_VAL 0x192a
+#define VPP2_SMOKE2_VAL 0x192b
+#define VPP2_SMOKE1_H_START_END 0x192d
+#define VPP2_SMOKE1_V_START_END 0x192e
+#define VPP2_SMOKE2_H_START_END 0x192f
+#define VPP2_SMOKE2_V_START_END 0x1930
+#define VPP2_SCO_FIFO_CTRL 0x1933
+#define VPP2_HSC_PHASE_CTRL1 0x1934
+#define VPP2_HSC_INI_PAT_CTRL 0x1935
+#define VPP2_VADJ_CTRL 0x1940
+#define VPP2_VADJ1_Y 0x1941
+#define VPP2_VADJ1_MA_MB 0x1942
+#define VPP2_VADJ1_MC_MD 0x1943
+#define VPP2_VADJ2_Y 0x1944
+#define VPP2_VADJ2_MA_MB 0x1945
+#define VPP2_VADJ2_MC_MD 0x1946
+#define VPP2_MATRIX_PROBE_COLOR 0x195c
+#define VPP2_MATRIX_HL_COLOR 0x195d
+#define VPP2_MATRIX_PROBE_POS 0x195e
+#define VPP2_MATRIX_CTRL 0x195f
+#define VPP2_MATRIX_COEF00_01 0x1960
+#define VPP2_MATRIX_COEF02_10 0x1961
+#define VPP2_MATRIX_COEF11_12 0x1962
+#define VPP2_MATRIX_COEF20_21 0x1963
+#define VPP2_MATRIX_COEF22 0x1964
+#define VPP2_MATRIX_OFFSET0_1 0x1965
+#define VPP2_MATRIX_OFFSET2 0x1966
+#define VPP2_MATRIX_PRE_OFFSET0_1 0x1967
+#define VPP2_MATRIX_PRE_OFFSET2 0x1968
+#define VPP2_DUMMY_DATA1 0x1969
+#define VPP2_GAINOFF_CTRL0 0x196a
+#define VPP2_GAINOFF_CTRL1 0x196b
+#define VPP2_GAINOFF_CTRL2 0x196c
+#define VPP2_GAINOFF_CTRL3 0x196d
+#define VPP2_GAINOFF_CTRL4 0x196e
+#define VPP2_CHROMA_ADDR_PORT 0x1970
+#define VPP2_CHROMA_DATA_PORT 0x1971
+#define VPP2_GCLK_CTRL0 0x1972
+#define VPP2_GCLK_CTRL1 0x1973
+#define VPP2_SC_GCLK_CTRL 0x1974
+#define VPP2_MISC1 0x1976
+#define VPP2_DNLP_CTRL_00 0x1981
+#define VPP2_DNLP_CTRL_01 0x1982
+#define VPP2_DNLP_CTRL_02 0x1983
+#define VPP2_DNLP_CTRL_03 0x1984
+#define VPP2_DNLP_CTRL_04 0x1985
+#define VPP2_DNLP_CTRL_05 0x1986
+#define VPP2_DNLP_CTRL_06 0x1987
+#define VPP2_DNLP_CTRL_07 0x1988
+#define VPP2_DNLP_CTRL_08 0x1989
+#define VPP2_DNLP_CTRL_09 0x198a
+#define VPP2_DNLP_CTRL_10 0x198b
+#define VPP2_DNLP_CTRL_11 0x198c
+#define VPP2_DNLP_CTRL_12 0x198d
+#define VPP2_DNLP_CTRL_13 0x198e
+#define VPP2_DNLP_CTRL_14 0x198f
+#define VPP2_DNLP_CTRL_15 0x1990
+#define VPP2_VE_ENABLE_CTRL 0x19a1
+#define VPP2_VE_DEMO_LEFT_TOP_SCREEN_WIDTH 0x19a2
+#define VPP2_VE_DEMO_CENTER_BAR 0x19a3
+#define VPP2_VE_H_V_SIZE 0x19a4
+#define VPP2_VDO_MEAS_CTRL 0x19a8
+#define VPP2_VDO_MEAS_VS_COUNT_HI 0x19a9
+#define VPP2_VDO_MEAS_VS_COUNT_LO 0x19aa
+#define VPP2_OSD_VSC_PHASE_STEP 0x19c0
+#define VPP2_OSD_VSC_INI_PHASE 0x19c1
+#define VPP2_OSD_VSC_CTRL0 0x19c2
+#define VPP2_OSD_HSC_PHASE_STEP 0x19c3
+#define VPP2_OSD_HSC_INI_PHASE 0x19c4
+#define VPP2_OSD_HSC_CTRL0 0x19c5
+#define VPP2_OSD_HSC_INI_PAT_CTRL 0x19c6
+#define VPP2_OSD_SC_DUMMY_DATA 0x19c7
+#define VPP2_OSD_SC_CTRL0 0x19c8
+#define VPP2_OSD_SCI_WH_M1 0x19c9
+#define VPP2_OSD_SCO_H_START_END 0x19ca
+#define VPP2_OSD_SCO_V_START_END 0x19cb
+#define VPP2_OSD_SCALE_COEF_IDX 0x19cc
+#define VPP2_OSD_SCALE_COEF 0x19cd
+#define VPP2_INT_LINE_NUM 0x19ce
+
+/* viu */
+#define VIU_ADDR_START 0x1a00
+#define VIU_ADDR_END 0x1aff
+#define VIU_SW_RESET 0x1a01
+#define VIU_MISC_CTRL0 0x1a06
+#define VIU_MISC_CTRL1 0x1a07
+#define D2D3_INTF_LENGTH 0x1a08
+#define D2D3_INTF_CTRL0 0x1a09
+#define VIU_OSD1_CTRL_STAT 0x1a10
+#define VIU_OSD1_CTRL_STAT2 0x1a2d
+#define VIU_OSD1_COLOR_ADDR 0x1a11
+#define VIU_OSD1_COLOR 0x1a12
+#define VIU_OSD1_TCOLOR_AG0 0x1a17
+#define VIU_OSD1_TCOLOR_AG1 0x1a18
+#define VIU_OSD1_TCOLOR_AG2 0x1a19
+#define VIU_OSD1_TCOLOR_AG3 0x1a1a
+#define VIU_OSD1_BLK0_CFG_W0 0x1a1b
+#define VIU_OSD1_BLK1_CFG_W0 0x1a1f
+#define VIU_OSD1_BLK2_CFG_W0 0x1a23
+#define VIU_OSD1_BLK3_CFG_W0 0x1a27
+#define VIU_OSD1_BLK0_CFG_W1 0x1a1c
+#define VIU_OSD1_BLK1_CFG_W1 0x1a20
+#define VIU_OSD1_BLK2_CFG_W1 0x1a24
+#define VIU_OSD1_BLK3_CFG_W1 0x1a28
+#define VIU_OSD1_BLK0_CFG_W2 0x1a1d
+#define VIU_OSD1_BLK1_CFG_W2 0x1a21
+#define VIU_OSD1_BLK2_CFG_W2 0x1a25
+#define VIU_OSD1_BLK3_CFG_W2 0x1a29
+#define VIU_OSD1_BLK0_CFG_W3 0x1a1e
+#define VIU_OSD1_BLK1_CFG_W3 0x1a22
+#define VIU_OSD1_BLK2_CFG_W3 0x1a26
+#define VIU_OSD1_BLK3_CFG_W3 0x1a2a
+#define VIU_OSD1_BLK0_CFG_W4 0x1a13
+#define VIU_OSD1_BLK1_CFG_W4 0x1a14
+#define VIU_OSD1_BLK2_CFG_W4 0x1a15
+#define VIU_OSD1_BLK3_CFG_W4 0x1a16
+#define VIU_OSD1_FIFO_CTRL_STAT 0x1a2b
+#define VIU_OSD1_TEST_RDDATA 0x1a2c
+#define VIU_OSD1_PROT_CTRL 0x1a2e
+#define VIU_OSD2_CTRL_STAT 0x1a30
+#define VIU_OSD2_CTRL_STAT2 0x1a4d
+#define VIU_OSD2_COLOR_ADDR 0x1a31
+#define VIU_OSD2_COLOR 0x1a32
+#define VIU_OSD2_HL1_H_START_END 0x1a33
+#define VIU_OSD2_HL1_V_START_END 0x1a34
+#define VIU_OSD2_HL2_H_START_END 0x1a35
+#define VIU_OSD2_HL2_V_START_END 0x1a36
+#define VIU_OSD2_TCOLOR_AG0 0x1a37
+#define VIU_OSD2_TCOLOR_AG1 0x1a38
+#define VIU_OSD2_TCOLOR_AG2 0x1a39
+#define VIU_OSD2_TCOLOR_AG3 0x1a3a
+#define VIU_OSD2_BLK0_CFG_W0 0x1a3b
+#define VIU_OSD2_BLK1_CFG_W0 0x1a3f
+#define VIU_OSD2_BLK2_CFG_W0 0x1a43
+#define VIU_OSD2_BLK3_CFG_W0 0x1a47
+#define VIU_OSD2_BLK0_CFG_W1 0x1a3c
+#define VIU_OSD2_BLK1_CFG_W1 0x1a40
+#define VIU_OSD2_BLK2_CFG_W1 0x1a44
+#define VIU_OSD2_BLK3_CFG_W1 0x1a48
+#define VIU_OSD2_BLK0_CFG_W2 0x1a3d
+#define VIU_OSD2_BLK1_CFG_W2 0x1a41
+#define VIU_OSD2_BLK2_CFG_W2 0x1a45
+#define VIU_OSD2_BLK3_CFG_W2 0x1a49
+#define VIU_OSD2_BLK0_CFG_W3 0x1a3e
+#define VIU_OSD2_BLK1_CFG_W3 0x1a42
+#define VIU_OSD2_BLK2_CFG_W3 0x1a46
+#define VIU_OSD2_BLK3_CFG_W3 0x1a4a
+#define VIU_OSD2_BLK0_CFG_W4 0x1a64
+#define VIU_OSD2_BLK1_CFG_W4 0x1a65
+#define VIU_OSD2_BLK2_CFG_W4 0x1a66
+#define VIU_OSD2_BLK3_CFG_W4 0x1a67
+#define VIU_OSD2_FIFO_CTRL_STAT 0x1a4b
+#define VIU_OSD2_TEST_RDDATA 0x1a4c
+#define VIU_OSD2_PROT_CTRL 0x1a4e
+
+#define VD1_IF0_GEN_REG 0x1a50
+#define VD1_IF0_CANVAS0 0x1a51
+#define VD1_IF0_CANVAS1 0x1a52
+#define VD1_IF0_LUMA_X0 0x1a53
+#define VD1_IF0_LUMA_Y0 0x1a54
+#define VD1_IF0_CHROMA_X0 0x1a55
+#define VD1_IF0_CHROMA_Y0 0x1a56
+#define VD1_IF0_LUMA_X1 0x1a57
+#define VD1_IF0_LUMA_Y1 0x1a58
+#define VD1_IF0_CHROMA_X1 0x1a59
+#define VD1_IF0_CHROMA_Y1 0x1a5a
+#define VD1_IF0_RPT_LOOP 0x1a5b
+#define VD1_IF0_LUMA0_RPT_PAT 0x1a5c
+#define VD1_IF0_CHROMA0_RPT_PAT 0x1a5d
+#define VD1_IF0_LUMA1_RPT_PAT 0x1a5e
+#define VD1_IF0_CHROMA1_RPT_PAT 0x1a5f
+#define VD1_IF0_LUMA_PSEL 0x1a60
+#define VD1_IF0_CHROMA_PSEL 0x1a61
+#define VD1_IF0_DUMMY_PIXEL 0x1a62
+#define VD1_IF0_LUMA_FIFO_SIZE 0x1a63
+#define VD1_IF0_RANGE_MAP_Y 0x1a6a
+#define VD1_IF0_RANGE_MAP_CB 0x1a6b
+#define VD1_IF0_RANGE_MAP_CR 0x1a6c
+#define VD1_IF0_GEN_REG2 0x1a6d
+#define VD1_IF0_PROT_CNTL 0x1a6e
+#define VIU_VD1_FMT_CTRL 0x1a68
+#define VIU_VD1_FMT_W 0x1a69
+#define VD2_IF0_GEN_REG 0x1a70
+#define VD2_IF0_CANVAS0 0x1a71
+#define VD2_IF0_CANVAS1 0x1a72
+#define VD2_IF0_LUMA_X0 0x1a73
+#define VD2_IF0_LUMA_Y0 0x1a74
+#define VD2_IF0_CHROMA_X0 0x1a75
+#define VD2_IF0_CHROMA_Y0 0x1a76
+#define VD2_IF0_LUMA_X1 0x1a77
+#define VD2_IF0_LUMA_Y1 0x1a78
+#define VD2_IF0_CHROMA_X1 0x1a79
+#define VD2_IF0_CHROMA_Y1 0x1a7a
+#define VD2_IF0_RPT_LOOP 0x1a7b
+#define VD2_IF0_LUMA0_RPT_PAT 0x1a7c
+#define VD2_IF0_CHROMA0_RPT_PAT 0x1a7d
+#define VD2_IF0_LUMA1_RPT_PAT 0x1a7e
+#define VD2_IF0_CHROMA1_RPT_PAT 0x1a7f
+#define VD2_IF0_LUMA_PSEL 0x1a80
+#define VD2_IF0_CHROMA_PSEL 0x1a81
+#define VD2_IF0_DUMMY_PIXEL 0x1a82
+#define VD2_IF0_LUMA_FIFO_SIZE 0x1a83
+#define VD2_IF0_RANGE_MAP_Y 0x1a8a
+#define VD2_IF0_RANGE_MAP_CB 0x1a8b
+#define VD2_IF0_RANGE_MAP_CR 0x1a8c
+#define VD2_IF0_GEN_REG2 0x1a8d
+#define VD2_IF0_PROT_CNTL 0x1a8e
+#define VIU_VD2_FMT_CTRL 0x1a88
+#define VIU_VD2_FMT_W 0x1a89
+
+/* VIU Matrix Registers */
+#define VIU_OSD1_MATRIX_CTRL 0x1a90
+#define VIU_OSD1_MATRIX_COEF00_01 0x1a91
+#define VIU_OSD1_MATRIX_COEF02_10 0x1a92
+#define VIU_OSD1_MATRIX_COEF11_12 0x1a93
+#define VIU_OSD1_MATRIX_COEF20_21 0x1a94
+#define VIU_OSD1_MATRIX_COLMOD_COEF42 0x1a95
+#define VIU_OSD1_MATRIX_OFFSET0_1 0x1a96
+#define VIU_OSD1_MATRIX_OFFSET2 0x1a97
+#define VIU_OSD1_MATRIX_PRE_OFFSET0_1 0x1a98
+#define VIU_OSD1_MATRIX_PRE_OFFSET2 0x1a99
+#define VIU_OSD1_MATRIX_COEF22_30 0x1a9d
+#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
+#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
+#define VIU_OSD1_EOTF_CTL 0x1ad4
+#define VIU_OSD1_EOTF_COEF00_01 0x1ad5
+#define VIU_OSD1_EOTF_COEF02_10 0x1ad6
+#define VIU_OSD1_EOTF_COEF11_12 0x1ad7
+#define VIU_OSD1_EOTF_COEF20_21 0x1ad8
+#define VIU_OSD1_EOTF_COEF22_RS 0x1ad9
+#define VIU_OSD1_EOTF_LUT_ADDR_PORT 0x1ada
+#define VIU_OSD1_EOTF_LUT_DATA_PORT 0x1adb
+#define VIU_OSD1_OETF_CTL 0x1adc
+#define VIU_OSD1_OETF_LUT_ADDR_PORT 0x1add
+#define VIU_OSD1_OETF_LUT_DATA_PORT 0x1ade
+
+/* vpp */
+#define VPP_DUMMY_DATA 0x1d00
+#define VPP_LINE_IN_LENGTH 0x1d01
+#define VPP_PIC_IN_HEIGHT 0x1d02
+#define VPP_SCALE_COEF_IDX 0x1d03
+#define VPP_SCALE_COEF 0x1d04
+#define VPP_VSC_REGION12_STARTP 0x1d05
+#define VPP_VSC_REGION34_STARTP 0x1d06
+#define VPP_VSC_REGION4_ENDP 0x1d07
+#define VPP_VSC_START_PHASE_STEP 0x1d08
+#define VPP_VSC_REGION0_PHASE_SLOPE 0x1d09
+#define VPP_VSC_REGION1_PHASE_SLOPE 0x1d0a
+#define VPP_VSC_REGION3_PHASE_SLOPE 0x1d0b
+#define VPP_VSC_REGION4_PHASE_SLOPE 0x1d0c
+#define VPP_VSC_PHASE_CTRL 0x1d0d
+#define VPP_VSC_INI_PHASE 0x1d0e
+#define VPP_HSC_REGION12_STARTP 0x1d10
+#define VPP_HSC_REGION34_STARTP 0x1d11
+#define VPP_HSC_REGION4_ENDP 0x1d12
+#define VPP_HSC_START_PHASE_STEP 0x1d13
+#define VPP_HSC_REGION0_PHASE_SLOPE 0x1d14
+#define VPP_HSC_REGION1_PHASE_SLOPE 0x1d15
+#define VPP_HSC_REGION3_PHASE_SLOPE 0x1d16
+#define VPP_HSC_REGION4_PHASE_SLOPE 0x1d17
+#define VPP_HSC_PHASE_CTRL 0x1d18
+#define VPP_SC_MISC 0x1d19
+#define VPP_PREBLEND_VD1_H_START_END 0x1d1a
+#define VPP_PREBLEND_VD1_V_START_END 0x1d1b
+#define VPP_POSTBLEND_VD1_H_START_END 0x1d1c
+#define VPP_POSTBLEND_VD1_V_START_END 0x1d1d
+#define VPP_BLEND_VD2_H_START_END 0x1d1e
+#define VPP_BLEND_VD2_V_START_END 0x1d1f
+#define VPP_PREBLEND_H_SIZE 0x1d20
+#define VPP_POSTBLEND_H_SIZE 0x1d21
+#define VPP_HOLD_LINES 0x1d22
+#define VPP_BLEND_ONECOLOR_CTRL 0x1d23
+#define VPP_PREBLEND_CURRENT_XY 0x1d24
+#define VPP_POSTBLEND_CURRENT_XY 0x1d25
+#define VPP_MISC 0x1d26
+#define VPP_PREBLEND_ENABLE BIT(6)
+#define VPP_POSTBLEND_ENABLE BIT(7)
+#define VPP_OSD2_ALPHA_PREMULT BIT(8)
+#define VPP_OSD1_ALPHA_PREMULT BIT(9)
+#define VPP_VD1_POSTBLEND BIT(10)
+#define VPP_VD2_POSTBLEND BIT(11)
+#define VPP_OSD1_POSTBLEND BIT(12)
+#define VPP_OSD2_POSTBLEND BIT(13)
+#define VPP_VD1_PREBLEND BIT(14)
+#define VPP_VD2_PREBLEND BIT(15)
+#define VPP_OSD1_PREBLEND BIT(16)
+#define VPP_OSD2_PREBLEND BIT(17)
+#define VPP_OFIFO_SIZE 0x1d27
+#define VPP_FIFO_STATUS 0x1d28
+#define VPP_SMOKE_CTRL 0x1d29
+#define VPP_SMOKE1_VAL 0x1d2a
+#define VPP_SMOKE2_VAL 0x1d2b
+#define VPP_SMOKE3_VAL 0x1d2c
+#define VPP_SMOKE1_H_START_END 0x1d2d
+#define VPP_SMOKE1_V_START_END 0x1d2e
+#define VPP_SMOKE2_H_START_END 0x1d2f
+#define VPP_SMOKE2_V_START_END 0x1d30
+#define VPP_SMOKE3_H_START_END 0x1d31
+#define VPP_SMOKE3_V_START_END 0x1d32
+#define VPP_SCO_FIFO_CTRL 0x1d33
+#define VPP_HSC_PHASE_CTRL1 0x1d34
+#define VPP_HSC_INI_PAT_CTRL 0x1d35
+#define VPP_VADJ_CTRL 0x1d40
+#define VPP_VADJ1_Y 0x1d41
+#define VPP_VADJ1_MA_MB 0x1d42
+#define VPP_VADJ1_MC_MD 0x1d43
+#define VPP_VADJ2_Y 0x1d44
+#define VPP_VADJ2_MA_MB 0x1d45
+#define VPP_VADJ2_MC_MD 0x1d46
+#define VPP_HSHARP_CTRL 0x1d50
+#define VPP_HSHARP_LUMA_THRESH01 0x1d51
+#define VPP_HSHARP_LUMA_THRESH23 0x1d52
+#define VPP_HSHARP_CHROMA_THRESH01 0x1d53
+#define VPP_HSHARP_CHROMA_THRESH23 0x1d54
+#define VPP_HSHARP_LUMA_GAIN 0x1d55
+#define VPP_HSHARP_CHROMA_GAIN 0x1d56
+#define VPP_MATRIX_PROBE_COLOR 0x1d5c
+#define VPP_MATRIX_HL_COLOR 0x1d5d
+#define VPP_MATRIX_PROBE_POS 0x1d5e
+#define VPP_MATRIX_CTRL 0x1d5f
+#define VPP_MATRIX_COEF00_01 0x1d60
+#define VPP_MATRIX_COEF02_10 0x1d61
+#define VPP_MATRIX_COEF11_12 0x1d62
+#define VPP_MATRIX_COEF20_21 0x1d63
+#define VPP_MATRIX_COEF22 0x1d64
+#define VPP_MATRIX_OFFSET0_1 0x1d65
+#define VPP_MATRIX_OFFSET2 0x1d66
+#define VPP_MATRIX_PRE_OFFSET0_1 0x1d67
+#define VPP_MATRIX_PRE_OFFSET2 0x1d68
+#define VPP_DUMMY_DATA1 0x1d69
+#define VPP_GAINOFF_CTRL0 0x1d6a
+#define VPP_GAINOFF_CTRL1 0x1d6b
+#define VPP_GAINOFF_CTRL2 0x1d6c
+#define VPP_GAINOFF_CTRL3 0x1d6d
+#define VPP_GAINOFF_CTRL4 0x1d6e
+#define VPP_CHROMA_ADDR_PORT 0x1d70
+#define VPP_CHROMA_DATA_PORT 0x1d71
+#define VPP_GCLK_CTRL0 0x1d72
+#define VPP_GCLK_CTRL1 0x1d73
+#define VPP_SC_GCLK_CTRL 0x1d74
+#define VPP_MISC1 0x1d76
+#define VPP_BLACKEXT_CTRL 0x1d80
+#define VPP_DNLP_CTRL_00 0x1d81
+#define VPP_DNLP_CTRL_01 0x1d82
+#define VPP_DNLP_CTRL_02 0x1d83
+#define VPP_DNLP_CTRL_03 0x1d84
+#define VPP_DNLP_CTRL_04 0x1d85
+#define VPP_DNLP_CTRL_05 0x1d86
+#define VPP_DNLP_CTRL_06 0x1d87
+#define VPP_DNLP_CTRL_07 0x1d88
+#define VPP_DNLP_CTRL_08 0x1d89
+#define VPP_DNLP_CTRL_09 0x1d8a
+#define VPP_DNLP_CTRL_10 0x1d8b
+#define VPP_DNLP_CTRL_11 0x1d8c
+#define VPP_DNLP_CTRL_12 0x1d8d
+#define VPP_DNLP_CTRL_13 0x1d8e
+#define VPP_DNLP_CTRL_14 0x1d8f
+#define VPP_DNLP_CTRL_15 0x1d90
+#define VPP_PEAKING_HGAIN 0x1d91
+#define VPP_PEAKING_VGAIN 0x1d92
+#define VPP_PEAKING_NLP_1 0x1d93
+#define VPP_DOLBY_CTRL 0x1d93
+#define VPP_PEAKING_NLP_2 0x1d94
+#define VPP_PEAKING_NLP_3 0x1d95
+#define VPP_PEAKING_NLP_4 0x1d96
+#define VPP_PEAKING_NLP_5 0x1d97
+#define VPP_SHARP_LIMIT 0x1d98
+#define VPP_VLTI_CTRL 0x1d99
+#define VPP_HLTI_CTRL 0x1d9a
+#define VPP_CTI_CTRL 0x1d9b
+#define VPP_BLUE_STRETCH_1 0x1d9c
+#define VPP_BLUE_STRETCH_2 0x1d9d
+#define VPP_BLUE_STRETCH_3 0x1d9e
+#define VPP_CCORING_CTRL 0x1da0
+#define VPP_VE_ENABLE_CTRL 0x1da1
+#define VPP_VE_DEMO_LEFT_TOP_SCREEN_WIDTH 0x1da2
+#define VPP_VE_DEMO_CENTER_BAR 0x1da3
+#define VPP_VE_H_V_SIZE 0x1da4
+#define VPP_VDO_MEAS_CTRL 0x1da8
+#define VPP_VDO_MEAS_VS_COUNT_HI 0x1da9
+#define VPP_VDO_MEAS_VS_COUNT_LO 0x1daa
+#define VPP_INPUT_CTRL 0x1dab
+#define VPP_CTI_CTRL2 0x1dac
+#define VPP_PEAKING_SAT_THD1 0x1dad
+#define VPP_PEAKING_SAT_THD2 0x1dae
+#define VPP_PEAKING_SAT_THD3 0x1daf
+#define VPP_PEAKING_SAT_THD4 0x1db0
+#define VPP_PEAKING_SAT_THD5 0x1db1
+#define VPP_PEAKING_SAT_THD6 0x1db2
+#define VPP_PEAKING_SAT_THD7 0x1db3
+#define VPP_PEAKING_SAT_THD8 0x1db4
+#define VPP_PEAKING_SAT_THD9 0x1db5
+#define VPP_PEAKING_GAIN_ADD1 0x1db6
+#define VPP_PEAKING_GAIN_ADD2 0x1db7
+#define VPP_PEAKING_DNLP 0x1db8
+#define VPP_SHARP_DEMO_WIN_CTRL1 0x1db9
+#define VPP_SHARP_DEMO_WIN_CTRL2 0x1dba
+#define VPP_FRONT_HLTI_CTRL 0x1dbb
+#define VPP_FRONT_CTI_CTRL 0x1dbc
+#define VPP_FRONT_CTI_CTRL2 0x1dbd
+#define VPP_OSD_VSC_PHASE_STEP 0x1dc0
+#define VPP_OSD_VSC_INI_PHASE 0x1dc1
+#define VPP_OSD_VSC_CTRL0 0x1dc2
+#define VPP_OSD_HSC_PHASE_STEP 0x1dc3
+#define VPP_OSD_HSC_INI_PHASE 0x1dc4
+#define VPP_OSD_HSC_CTRL0 0x1dc5
+#define VPP_OSD_HSC_INI_PAT_CTRL 0x1dc6
+#define VPP_OSD_SC_DUMMY_DATA 0x1dc7
+#define VPP_OSD_SC_CTRL0 0x1dc8
+#define VPP_OSD_SCI_WH_M1 0x1dc9
+#define VPP_OSD_SCO_H_START_END 0x1dca
+#define VPP_OSD_SCO_V_START_END 0x1dcb
+#define VPP_OSD_SCALE_COEF_IDX 0x1dcc
+#define VPP_OSD_SCALE_COEF 0x1dcd
+#define VPP_INT_LINE_NUM 0x1dce
+
+/* viu2 */
+#define VIU2_ADDR_START 0x1e00
+#define VIU2_ADDR_END 0x1eff
+#define VIU2_SW_RESET 0x1e01
+#define VIU2_OSD1_CTRL_STAT 0x1e10
+#define VIU2_OSD1_CTRL_STAT2 0x1e2d
+#define VIU2_OSD1_COLOR_ADDR 0x1e11
+#define VIU2_OSD1_COLOR 0x1e12
+#define VIU2_OSD1_TCOLOR_AG0 0x1e17
+#define VIU2_OSD1_TCOLOR_AG1 0x1e18
+#define VIU2_OSD1_TCOLOR_AG2 0x1e19
+#define VIU2_OSD1_TCOLOR_AG3 0x1e1a
+#define VIU2_OSD1_BLK0_CFG_W0 0x1e1b
+#define VIU2_OSD1_BLK1_CFG_W0 0x1e1f
+#define VIU2_OSD1_BLK2_CFG_W0 0x1e23
+#define VIU2_OSD1_BLK3_CFG_W0 0x1e27
+#define VIU2_OSD1_BLK0_CFG_W1 0x1e1c
+#define VIU2_OSD1_BLK1_CFG_W1 0x1e20
+#define VIU2_OSD1_BLK2_CFG_W1 0x1e24
+#define VIU2_OSD1_BLK3_CFG_W1 0x1e28
+#define VIU2_OSD1_BLK0_CFG_W2 0x1e1d
+#define VIU2_OSD1_BLK1_CFG_W2 0x1e21
+#define VIU2_OSD1_BLK2_CFG_W2 0x1e25
+#define VIU2_OSD1_BLK3_CFG_W2 0x1e29
+#define VIU2_OSD1_BLK0_CFG_W3 0x1e1e
+#define VIU2_OSD1_BLK1_CFG_W3 0x1e22
+#define VIU2_OSD1_BLK2_CFG_W3 0x1e26
+#define VIU2_OSD1_BLK3_CFG_W3 0x1e2a
+#define VIU2_OSD1_BLK0_CFG_W4 0x1e13
+#define VIU2_OSD1_BLK1_CFG_W4 0x1e14
+#define VIU2_OSD1_BLK2_CFG_W4 0x1e15
+#define VIU2_OSD1_BLK3_CFG_W4 0x1e16
+#define VIU2_OSD1_FIFO_CTRL_STAT 0x1e2b
+#define VIU2_OSD1_TEST_RDDATA 0x1e2c
+#define VIU2_OSD1_PROT_CTRL 0x1e2e
+#define VIU2_OSD2_CTRL_STAT 0x1e30
+#define VIU2_OSD2_CTRL_STAT2 0x1e4d
+#define VIU2_OSD2_COLOR_ADDR 0x1e31
+#define VIU2_OSD2_COLOR 0x1e32
+#define VIU2_OSD2_HL1_H_START_END 0x1e33
+#define VIU2_OSD2_HL1_V_START_END 0x1e34
+#define VIU2_OSD2_HL2_H_START_END 0x1e35
+#define VIU2_OSD2_HL2_V_START_END 0x1e36
+#define VIU2_OSD2_TCOLOR_AG0 0x1e37
+#define VIU2_OSD2_TCOLOR_AG1 0x1e38
+#define VIU2_OSD2_TCOLOR_AG2 0x1e39
+#define VIU2_OSD2_TCOLOR_AG3 0x1e3a
+#define VIU2_OSD2_BLK0_CFG_W0 0x1e3b
+#define VIU2_OSD2_BLK1_CFG_W0 0x1e3f
+#define VIU2_OSD2_BLK2_CFG_W0 0x1e43
+#define VIU2_OSD2_BLK3_CFG_W0 0x1e47
+#define VIU2_OSD2_BLK0_CFG_W1 0x1e3c
+#define VIU2_OSD2_BLK1_CFG_W1 0x1e40
+#define VIU2_OSD2_BLK2_CFG_W1 0x1e44
+#define VIU2_OSD2_BLK3_CFG_W1 0x1e48
+#define VIU2_OSD2_BLK0_CFG_W2 0x1e3d
+#define VIU2_OSD2_BLK1_CFG_W2 0x1e41
+#define VIU2_OSD2_BLK2_CFG_W2 0x1e45
+#define VIU2_OSD2_BLK3_CFG_W2 0x1e49
+#define VIU2_OSD2_BLK0_CFG_W3 0x1e3e
+#define VIU2_OSD2_BLK1_CFG_W3 0x1e42
+#define VIU2_OSD2_BLK2_CFG_W3 0x1e46
+#define VIU2_OSD2_BLK3_CFG_W3 0x1e4a
+#define VIU2_OSD2_BLK0_CFG_W4 0x1e64
+#define VIU2_OSD2_BLK1_CFG_W4 0x1e65
+#define VIU2_OSD2_BLK2_CFG_W4 0x1e66
+#define VIU2_OSD2_BLK3_CFG_W4 0x1e67
+#define VIU2_OSD2_FIFO_CTRL_STAT 0x1e4b
+#define VIU2_OSD2_TEST_RDDATA 0x1e4c
+#define VIU2_OSD2_PROT_CTRL 0x1e4e
+#define VIU2_VD1_IF0_GEN_REG 0x1e50
+#define VIU2_VD1_IF0_CANVAS0 0x1e51
+#define VIU2_VD1_IF0_CANVAS1 0x1e52
+#define VIU2_VD1_IF0_LUMA_X0 0x1e53
+#define VIU2_VD1_IF0_LUMA_Y0 0x1e54
+#define VIU2_VD1_IF0_CHROMA_X0 0x1e55
+#define VIU2_VD1_IF0_CHROMA_Y0 0x1e56
+#define VIU2_VD1_IF0_LUMA_X1 0x1e57
+#define VIU2_VD1_IF0_LUMA_Y1 0x1e58
+#define VIU2_VD1_IF0_CHROMA_X1 0x1e59
+#define VIU2_VD1_IF0_CHROMA_Y1 0x1e5a
+#define VIU2_VD1_IF0_RPT_LOOP 0x1e5b
+#define VIU2_VD1_IF0_LUMA0_RPT_PAT 0x1e5c
+#define VIU2_VD1_IF0_CHROMA0_RPT_PAT 0x1e5d
+#define VIU2_VD1_IF0_LUMA1_RPT_PAT 0x1e5e
+#define VIU2_VD1_IF0_CHROMA1_RPT_PAT 0x1e5f
+#define VIU2_VD1_IF0_LUMA_PSEL 0x1e60
+#define VIU2_VD1_IF0_CHROMA_PSEL 0x1e61
+#define VIU2_VD1_IF0_DUMMY_PIXEL 0x1e62
+#define VIU2_VD1_IF0_LUMA_FIFO_SIZE 0x1e63
+#define VIU2_VD1_IF0_RANGE_MAP_Y 0x1e6a
+#define VIU2_VD1_IF0_RANGE_MAP_CB 0x1e6b
+#define VIU2_VD1_IF0_RANGE_MAP_CR 0x1e6c
+#define VIU2_VD1_IF0_GEN_REG2 0x1e6d
+#define VIU2_VD1_IF0_PROT_CNTL 0x1e6e
+#define VIU2_VD1_FMT_CTRL 0x1e68
+#define VIU2_VD1_FMT_W 0x1e69
+
+/* encode */
+#define ENCP_VFIFO2VD_CTL 0x1b58
+#define ENCP_VFIFO2VD_PIXEL_START 0x1b59
+#define ENCP_VFIFO2VD_PIXEL_END 0x1b5a
+#define ENCP_VFIFO2VD_LINE_TOP_START 0x1b5b
+#define ENCP_VFIFO2VD_LINE_TOP_END 0x1b5c
+#define ENCP_VFIFO2VD_LINE_BOT_START 0x1b5d
+#define ENCP_VFIFO2VD_LINE_BOT_END 0x1b5e
+#define VENC_SYNC_ROUTE 0x1b60
+#define VENC_VIDEO_EXSRC 0x1b61
+#define VENC_DVI_SETTING 0x1b62
+#define VENC_C656_CTRL 0x1b63
+#define VENC_UPSAMPLE_CTRL0 0x1b64
+#define VENC_UPSAMPLE_CTRL1 0x1b65
+#define VENC_UPSAMPLE_CTRL2 0x1b66
+#define TCON_INVERT_CTL 0x1b67
+#define VENC_VIDEO_PROG_MODE 0x1b68
+#define VENC_ENCI_LINE 0x1b69
+#define VENC_ENCI_PIXEL 0x1b6a
+#define VENC_ENCP_LINE 0x1b6b
+#define VENC_ENCP_PIXEL 0x1b6c
+#define VENC_STATA 0x1b6d
+#define VENC_INTCTRL 0x1b6e
+#define VENC_INTFLAG 0x1b6f
+#define VENC_VIDEO_TST_EN 0x1b70
+#define VENC_VIDEO_TST_MDSEL 0x1b71
+#define VENC_VIDEO_TST_Y 0x1b72
+#define VENC_VIDEO_TST_CB 0x1b73
+#define VENC_VIDEO_TST_CR 0x1b74
+#define VENC_VIDEO_TST_CLRBAR_STRT 0x1b75
+#define VENC_VIDEO_TST_CLRBAR_WIDTH 0x1b76
+#define VENC_VIDEO_TST_VDCNT_STSET 0x1b77
+#define VENC_VDAC_DACSEL0 0x1b78
+#define VENC_VDAC_DACSEL1 0x1b79
+#define VENC_VDAC_DACSEL2 0x1b7a
+#define VENC_VDAC_DACSEL3 0x1b7b
+#define VENC_VDAC_DACSEL4 0x1b7c
+#define VENC_VDAC_DACSEL5 0x1b7d
+#define VENC_VDAC_SETTING 0x1b7e
+#define VENC_VDAC_TST_VAL 0x1b7f
+#define VENC_VDAC_DAC0_GAINCTRL 0x1bf0
+#define VENC_VDAC_DAC0_OFFSET 0x1bf1
+#define VENC_VDAC_DAC1_GAINCTRL 0x1bf2
+#define VENC_VDAC_DAC1_OFFSET 0x1bf3
+#define VENC_VDAC_DAC2_GAINCTRL 0x1bf4
+#define VENC_VDAC_DAC2_OFFSET 0x1bf5
+#define VENC_VDAC_DAC3_GAINCTRL 0x1bf6
+#define VENC_VDAC_DAC3_OFFSET 0x1bf7
+#define VENC_VDAC_DAC4_GAINCTRL 0x1bf8
+#define VENC_VDAC_DAC4_OFFSET 0x1bf9
+#define VENC_VDAC_DAC5_GAINCTRL 0x1bfa
+#define VENC_VDAC_DAC5_OFFSET 0x1bfb
+#define VENC_VDAC_FIFO_CTRL 0x1bfc
+#define ENCL_TCON_INVERT_CTL 0x1bfd
+#define ENCP_VIDEO_EN 0x1b80
+#define ENCP_VIDEO_SYNC_MODE 0x1b81
+#define ENCP_MACV_EN 0x1b82
+#define ENCP_VIDEO_Y_SCL 0x1b83
+#define ENCP_VIDEO_PB_SCL 0x1b84
+#define ENCP_VIDEO_PR_SCL 0x1b85
+#define ENCP_VIDEO_SYNC_SCL 0x1b86
+#define ENCP_VIDEO_MACV_SCL 0x1b87
+#define ENCP_VIDEO_Y_OFFST 0x1b88
+#define ENCP_VIDEO_PB_OFFST 0x1b89
+#define ENCP_VIDEO_PR_OFFST 0x1b8a
+#define ENCP_VIDEO_SYNC_OFFST 0x1b8b
+#define ENCP_VIDEO_MACV_OFFST 0x1b8c
+#define ENCP_VIDEO_MODE 0x1b8d
+#define ENCP_VIDEO_MODE_ADV 0x1b8e
+#define ENCP_DBG_PX_RST 0x1b90
+#define ENCP_DBG_LN_RST 0x1b91
+#define ENCP_DBG_PX_INT 0x1b92
+#define ENCP_DBG_LN_INT 0x1b93
+#define ENCP_VIDEO_YFP1_HTIME 0x1b94
+#define ENCP_VIDEO_YFP2_HTIME 0x1b95
+#define ENCP_VIDEO_YC_DLY 0x1b96
+#define ENCP_VIDEO_MAX_PXCNT 0x1b97
+#define ENCP_VIDEO_HSPULS_BEGIN 0x1b98
+#define ENCP_VIDEO_HSPULS_END 0x1b99
+#define ENCP_VIDEO_HSPULS_SWITCH 0x1b9a
+#define ENCP_VIDEO_VSPULS_BEGIN 0x1b9b
+#define ENCP_VIDEO_VSPULS_END 0x1b9c
+#define ENCP_VIDEO_VSPULS_BLINE 0x1b9d
+#define ENCP_VIDEO_VSPULS_ELINE 0x1b9e
+#define ENCP_VIDEO_EQPULS_BEGIN 0x1b9f
+#define ENCP_VIDEO_EQPULS_END 0x1ba0
+#define ENCP_VIDEO_EQPULS_BLINE 0x1ba1
+#define ENCP_VIDEO_EQPULS_ELINE 0x1ba2
+#define ENCP_VIDEO_HAVON_END 0x1ba3
+#define ENCP_VIDEO_HAVON_BEGIN 0x1ba4
+#define ENCP_VIDEO_VAVON_ELINE 0x1baf
+#define ENCP_VIDEO_VAVON_BLINE 0x1ba6
+#define ENCP_VIDEO_HSO_BEGIN 0x1ba7
+#define ENCP_VIDEO_HSO_END 0x1ba8
+#define ENCP_VIDEO_VSO_BEGIN 0x1ba9
+#define ENCP_VIDEO_VSO_END 0x1baa
+#define ENCP_VIDEO_VSO_BLINE 0x1bab
+#define ENCP_VIDEO_VSO_ELINE 0x1bac
+#define ENCP_VIDEO_SYNC_WAVE_CURVE 0x1bad
+#define ENCP_VIDEO_MAX_LNCNT 0x1bae
+#define ENCP_VIDEO_SY_VAL 0x1bb0
+#define ENCP_VIDEO_SY2_VAL 0x1bb1
+#define ENCP_VIDEO_BLANKY_VAL 0x1bb2
+#define ENCP_VIDEO_BLANKPB_VAL 0x1bb3
+#define ENCP_VIDEO_BLANKPR_VAL 0x1bb4
+#define ENCP_VIDEO_HOFFST 0x1bb5
+#define ENCP_VIDEO_VOFFST 0x1bb6
+#define ENCP_VIDEO_RGB_CTRL 0x1bb7
+#define ENCP_VIDEO_FILT_CTRL 0x1bb8
+#define ENCP_VIDEO_OFLD_VPEQ_OFST 0x1bb9
+#define ENCP_VIDEO_OFLD_VOAV_OFST 0x1bba
+#define ENCP_VIDEO_MATRIX_CB 0x1bbb
+#define ENCP_VIDEO_MATRIX_CR 0x1bbc
+#define ENCP_VIDEO_RGBIN_CTRL 0x1bbd
+#define ENCP_MACV_BLANKY_VAL 0x1bc0
+#define ENCP_MACV_MAXY_VAL 0x1bc1
+#define ENCP_MACV_1ST_PSSYNC_STRT 0x1bc2
+#define ENCP_MACV_PSSYNC_STRT 0x1bc3
+#define ENCP_MACV_AGC_STRT 0x1bc4
+#define ENCP_MACV_AGC_END 0x1bc5
+#define ENCP_MACV_WAVE_END 0x1bc6
+#define ENCP_MACV_STRTLINE 0x1bc7
+#define ENCP_MACV_ENDLINE 0x1bc8
+#define ENCP_MACV_TS_CNT_MAX_L 0x1bc9
+#define ENCP_MACV_TS_CNT_MAX_H 0x1bca
+#define ENCP_MACV_TIME_DOWN 0x1bcb
+#define ENCP_MACV_TIME_LO 0x1bcc
+#define ENCP_MACV_TIME_UP 0x1bcd
+#define ENCP_MACV_TIME_RST 0x1bce
+#define ENCP_VBI_CTRL 0x1bd0
+#define ENCP_VBI_SETTING 0x1bd1
+#define ENCP_VBI_BEGIN 0x1bd2
+#define ENCP_VBI_WIDTH 0x1bd3
+#define ENCP_VBI_HVAL 0x1bd4
+#define ENCP_VBI_DATA0 0x1bd5
+#define ENCP_VBI_DATA1 0x1bd6
+#define C656_HS_ST 0x1be0
+#define C656_HS_ED 0x1be1
+#define C656_VS_LNST_E 0x1be2
+#define C656_VS_LNST_O 0x1be3
+#define C656_VS_LNED_E 0x1be4
+#define C656_VS_LNED_O 0x1be5
+#define C656_FS_LNST 0x1be6
+#define C656_FS_LNED 0x1be7
+#define ENCI_VIDEO_MODE 0x1b00
+#define ENCI_VIDEO_MODE_ADV 0x1b01
+#define ENCI_VIDEO_FSC_ADJ 0x1b02
+#define ENCI_VIDEO_BRIGHT 0x1b03
+#define ENCI_VIDEO_CONT 0x1b04
+#define ENCI_VIDEO_SAT 0x1b05
+#define ENCI_VIDEO_HUE 0x1b06
+#define ENCI_VIDEO_SCH 0x1b07
+#define ENCI_SYNC_MODE 0x1b08
+#define ENCI_SYNC_CTRL 0x1b09
+#define ENCI_SYNC_HSO_BEGIN 0x1b0a
+#define ENCI_SYNC_HSO_END 0x1b0b
+#define ENCI_SYNC_VSO_EVN 0x1b0c
+#define ENCI_SYNC_VSO_ODD 0x1b0d
+#define ENCI_SYNC_VSO_EVNLN 0x1b0e
+#define ENCI_SYNC_VSO_ODDLN 0x1b0f
+#define ENCI_SYNC_HOFFST 0x1b10
+#define ENCI_SYNC_VOFFST 0x1b11
+#define ENCI_SYNC_ADJ 0x1b12
+#define ENCI_RGB_SETTING 0x1b13
+#define ENCI_DE_H_BEGIN 0x1b16
+#define ENCI_DE_H_END 0x1b17
+#define ENCI_DE_V_BEGIN_EVEN 0x1b18
+#define ENCI_DE_V_END_EVEN 0x1b19
+#define ENCI_DE_V_BEGIN_ODD 0x1b1a
+#define ENCI_DE_V_END_ODD 0x1b1b
+#define ENCI_VBI_SETTING 0x1b20
+#define ENCI_VBI_CCDT_EVN 0x1b21
+#define ENCI_VBI_CCDT_ODD 0x1b22
+#define ENCI_VBI_CC525_LN 0x1b23
+#define ENCI_VBI_CC625_LN 0x1b24
+#define ENCI_VBI_WSSDT 0x1b25
+#define ENCI_VBI_WSS_LN 0x1b26
+#define ENCI_VBI_CGMSDT_L 0x1b27
+#define ENCI_VBI_CGMSDT_H 0x1b28
+#define ENCI_VBI_CGMS_LN 0x1b29
+#define ENCI_VBI_TTX_HTIME 0x1b2a
+#define ENCI_VBI_TTX_LN 0x1b2b
+#define ENCI_VBI_TTXDT0 0x1b2c
+#define ENCI_VBI_TTXDT1 0x1b2d
+#define ENCI_VBI_TTXDT2 0x1b2e
+#define ENCI_VBI_TTXDT3 0x1b2f
+#define ENCI_MACV_N0 0x1b30
+#define ENCI_MACV_N1 0x1b31
+#define ENCI_MACV_N2 0x1b32
+#define ENCI_MACV_N3 0x1b33
+#define ENCI_MACV_N4 0x1b34
+#define ENCI_MACV_N5 0x1b35
+#define ENCI_MACV_N6 0x1b36
+#define ENCI_MACV_N7 0x1b37
+#define ENCI_MACV_N8 0x1b38
+#define ENCI_MACV_N9 0x1b39
+#define ENCI_MACV_N10 0x1b3a
+#define ENCI_MACV_N11 0x1b3b
+#define ENCI_MACV_N12 0x1b3c
+#define ENCI_MACV_N13 0x1b3d
+#define ENCI_MACV_N14 0x1b3e
+#define ENCI_MACV_N15 0x1b3f
+#define ENCI_MACV_N16 0x1b40
+#define ENCI_MACV_N17 0x1b41
+#define ENCI_MACV_N18 0x1b42
+#define ENCI_MACV_N19 0x1b43
+#define ENCI_MACV_N20 0x1b44
+#define ENCI_MACV_N21 0x1b45
+#define ENCI_MACV_N22 0x1b46
+#define ENCI_DBG_PX_RST 0x1b48
+#define ENCI_DBG_FLDLN_RST 0x1b49
+#define ENCI_DBG_PX_INT 0x1b4a
+#define ENCI_DBG_FLDLN_INT 0x1b4b
+#define ENCI_DBG_MAXPX 0x1b4c
+#define ENCI_DBG_MAXLN 0x1b4d
+#define ENCI_MACV_MAX_AMP 0x1b50
+#define ENCI_MACV_PULSE_LO 0x1b51
+#define ENCI_MACV_PULSE_HI 0x1b52
+#define ENCI_MACV_BKP_MAX 0x1b53
+#define ENCI_CFILT_CTRL 0x1b54
+#define ENCI_CFILT7 0x1b55
+#define ENCI_YC_DELAY 0x1b56
+#define ENCI_VIDEO_EN 0x1b57
+#define ENCI_DVI_HSO_BEGIN 0x1c00
+#define ENCI_DVI_HSO_END 0x1c01
+#define ENCI_DVI_VSO_BLINE_EVN 0x1c02
+#define ENCI_DVI_VSO_BLINE_ODD 0x1c03
+#define ENCI_DVI_VSO_ELINE_EVN 0x1c04
+#define ENCI_DVI_VSO_ELINE_ODD 0x1c05
+#define ENCI_DVI_VSO_BEGIN_EVN 0x1c06
+#define ENCI_DVI_VSO_BEGIN_ODD 0x1c07
+#define ENCI_DVI_VSO_END_EVN 0x1c08
+#define ENCI_DVI_VSO_END_ODD 0x1c09
+#define ENCI_CFILT_CTRL2 0x1c0a
+#define ENCI_DACSEL_0 0x1c0b
+#define ENCI_DACSEL_1 0x1c0c
+#define ENCP_DACSEL_0 0x1c0d
+#define ENCP_DACSEL_1 0x1c0e
+#define ENCP_MAX_LINE_SWITCH_POINT 0x1c0f
+#define ENCI_TST_EN 0x1c10
+#define ENCI_TST_MDSEL 0x1c11
+#define ENCI_TST_Y 0x1c12
+#define ENCI_TST_CB 0x1c13
+#define ENCI_TST_CR 0x1c14
+#define ENCI_TST_CLRBAR_STRT 0x1c15
+#define ENCI_TST_CLRBAR_WIDTH 0x1c16
+#define ENCI_TST_VDCNT_STSET 0x1c17
+#define ENCI_VFIFO2VD_CTL 0x1c18
+#define ENCI_VFIFO2VD_PIXEL_START 0x1c19
+#define ENCI_VFIFO2VD_PIXEL_END 0x1c1a
+#define ENCI_VFIFO2VD_LINE_TOP_START 0x1c1b
+#define ENCI_VFIFO2VD_LINE_TOP_END 0x1c1c
+#define ENCI_VFIFO2VD_LINE_BOT_START 0x1c1d
+#define ENCI_VFIFO2VD_LINE_BOT_END 0x1c1e
+#define ENCI_VFIFO2VD_CTL2 0x1c1f
+#define ENCT_VFIFO2VD_CTL 0x1c20
+#define ENCT_VFIFO2VD_PIXEL_START 0x1c21
+#define ENCT_VFIFO2VD_PIXEL_END 0x1c22
+#define ENCT_VFIFO2VD_LINE_TOP_START 0x1c23
+#define ENCT_VFIFO2VD_LINE_TOP_END 0x1c24
+#define ENCT_VFIFO2VD_LINE_BOT_START 0x1c25
+#define ENCT_VFIFO2VD_LINE_BOT_END 0x1c26
+#define ENCT_VFIFO2VD_CTL2 0x1c27
+#define ENCT_TST_EN 0x1c28
+#define ENCT_TST_MDSEL 0x1c29
+#define ENCT_TST_Y 0x1c2a
+#define ENCT_TST_CB 0x1c2b
+#define ENCT_TST_CR 0x1c2c
+#define ENCT_TST_CLRBAR_STRT 0x1c2d
+#define ENCT_TST_CLRBAR_WIDTH 0x1c2e
+#define ENCT_TST_VDCNT_STSET 0x1c2f
+#define ENCP_DVI_HSO_BEGIN 0x1c30
+#define ENCP_DVI_HSO_END 0x1c31
+#define ENCP_DVI_VSO_BLINE_EVN 0x1c32
+#define ENCP_DVI_VSO_BLINE_ODD 0x1c33
+#define ENCP_DVI_VSO_ELINE_EVN 0x1c34
+#define ENCP_DVI_VSO_ELINE_ODD 0x1c35
+#define ENCP_DVI_VSO_BEGIN_EVN 0x1c36
+#define ENCP_DVI_VSO_BEGIN_ODD 0x1c37
+#define ENCP_DVI_VSO_END_EVN 0x1c38
+#define ENCP_DVI_VSO_END_ODD 0x1c39
+#define ENCP_DE_H_BEGIN 0x1c3a
+#define ENCP_DE_H_END 0x1c3b
+#define ENCP_DE_V_BEGIN_EVEN 0x1c3c
+#define ENCP_DE_V_END_EVEN 0x1c3d
+#define ENCP_DE_V_BEGIN_ODD 0x1c3e
+#define ENCP_DE_V_END_ODD 0x1c3f
+#define ENCI_SYNC_LINE_LENGTH 0x1c40
+#define ENCI_SYNC_PIXEL_EN 0x1c41
+#define ENCI_SYNC_TO_LINE_EN 0x1c42
+#define ENCI_SYNC_TO_PIXEL 0x1c43
+#define ENCP_SYNC_LINE_LENGTH 0x1c44
+#define ENCP_SYNC_PIXEL_EN 0x1c45
+#define ENCP_SYNC_TO_LINE_EN 0x1c46
+#define ENCP_SYNC_TO_PIXEL 0x1c47
+#define ENCT_SYNC_LINE_LENGTH 0x1c48
+#define ENCT_SYNC_PIXEL_EN 0x1c49
+#define ENCT_SYNC_TO_LINE_EN 0x1c4a
+#define ENCT_SYNC_TO_PIXEL 0x1c4b
+#define ENCL_SYNC_LINE_LENGTH 0x1c4c
+#define ENCL_SYNC_PIXEL_EN 0x1c4d
+#define ENCL_SYNC_TO_LINE_EN 0x1c4e
+#define ENCL_SYNC_TO_PIXEL 0x1c4f
+#define ENCP_VFIFO2VD_CTL2 0x1c50
+#define VENC_DVI_SETTING_MORE 0x1c51
+#define VENC_VDAC_DAC4_FILT_CTRL0 0x1c54
+#define VENC_VDAC_DAC4_FILT_CTRL1 0x1c55
+#define VENC_VDAC_DAC5_FILT_CTRL0 0x1c56
+#define VENC_VDAC_DAC5_FILT_CTRL1 0x1c57
+#define VENC_VDAC_DAC0_FILT_CTRL0 0x1c58
+#define VENC_VDAC_DAC0_FILT_CTRL1 0x1c59
+#define VENC_VDAC_DAC1_FILT_CTRL0 0x1c5a
+#define VENC_VDAC_DAC1_FILT_CTRL1 0x1c5b
+#define VENC_VDAC_DAC2_FILT_CTRL0 0x1c5c
+#define VENC_VDAC_DAC2_FILT_CTRL1 0x1c5d
+#define VENC_VDAC_DAC3_FILT_CTRL0 0x1c5e
+#define VENC_VDAC_DAC3_FILT_CTRL1 0x1c5f
+#define ENCT_VIDEO_EN 0x1c60
+#define ENCT_VIDEO_Y_SCL 0x1c61
+#define ENCT_VIDEO_PB_SCL 0x1c62
+#define ENCT_VIDEO_PR_SCL 0x1c63
+#define ENCT_VIDEO_Y_OFFST 0x1c64
+#define ENCT_VIDEO_PB_OFFST 0x1c65
+#define ENCT_VIDEO_PR_OFFST 0x1c66
+#define ENCT_VIDEO_MODE 0x1c67
+#define ENCT_VIDEO_MODE_ADV 0x1c68
+#define ENCT_DBG_PX_RST 0x1c69
+#define ENCT_DBG_LN_RST 0x1c6a
+#define ENCT_DBG_PX_INT 0x1c6b
+#define ENCT_DBG_LN_INT 0x1c6c
+#define ENCT_VIDEO_YFP1_HTIME 0x1c6d
+#define ENCT_VIDEO_YFP2_HTIME 0x1c6e
+#define ENCT_VIDEO_YC_DLY 0x1c6f
+#define ENCT_VIDEO_MAX_PXCNT 0x1c70
+#define ENCT_VIDEO_HAVON_END 0x1c71
+#define ENCT_VIDEO_HAVON_BEGIN 0x1c72
+#define ENCT_VIDEO_VAVON_ELINE 0x1c73
+#define ENCT_VIDEO_VAVON_BLINE 0x1c74
+#define ENCT_VIDEO_HSO_BEGIN 0x1c75
+#define ENCT_VIDEO_HSO_END 0x1c76
+#define ENCT_VIDEO_VSO_BEGIN 0x1c77
+#define ENCT_VIDEO_VSO_END 0x1c78
+#define ENCT_VIDEO_VSO_BLINE 0x1c79
+#define ENCT_VIDEO_VSO_ELINE 0x1c7a
+#define ENCT_VIDEO_MAX_LNCNT 0x1c7b
+#define ENCT_VIDEO_BLANKY_VAL 0x1c7c
+#define ENCT_VIDEO_BLANKPB_VAL 0x1c7d
+#define ENCT_VIDEO_BLANKPR_VAL 0x1c7e
+#define ENCT_VIDEO_HOFFST 0x1c7f
+#define ENCT_VIDEO_VOFFST 0x1c80
+#define ENCT_VIDEO_RGB_CTRL 0x1c81
+#define ENCT_VIDEO_FILT_CTRL 0x1c82
+#define ENCT_VIDEO_OFLD_VPEQ_OFST 0x1c83
+#define ENCT_VIDEO_OFLD_VOAV_OFST 0x1c84
+#define ENCT_VIDEO_MATRIX_CB 0x1c85
+#define ENCT_VIDEO_MATRIX_CR 0x1c86
+#define ENCT_VIDEO_RGBIN_CTRL 0x1c87
+#define ENCT_MAX_LINE_SWITCH_POINT 0x1c88
+#define ENCT_DACSEL_0 0x1c89
+#define ENCT_DACSEL_1 0x1c8a
+#define ENCL_VFIFO2VD_CTL 0x1c90
+#define ENCL_VFIFO2VD_PIXEL_START 0x1c91
+#define ENCL_VFIFO2VD_PIXEL_END 0x1c92
+#define ENCL_VFIFO2VD_LINE_TOP_START 0x1c93
+#define ENCL_VFIFO2VD_LINE_TOP_END 0x1c94
+#define ENCL_VFIFO2VD_LINE_BOT_START 0x1c95
+#define ENCL_VFIFO2VD_LINE_BOT_END 0x1c96
+#define ENCL_VFIFO2VD_CTL2 0x1c97
+#define ENCL_TST_EN 0x1c98
+#define ENCL_TST_MDSEL 0x1c99
+#define ENCL_TST_Y 0x1c9a
+#define ENCL_TST_CB 0x1c9b
+#define ENCL_TST_CR 0x1c9c
+#define ENCL_TST_CLRBAR_STRT 0x1c9d
+#define ENCL_TST_CLRBAR_WIDTH 0x1c9e
+#define ENCL_TST_VDCNT_STSET 0x1c9f
+#define ENCL_VIDEO_EN 0x1ca0
+#define ENCL_VIDEO_Y_SCL 0x1ca1
+#define ENCL_VIDEO_PB_SCL 0x1ca2
+#define ENCL_VIDEO_PR_SCL 0x1ca3
+#define ENCL_VIDEO_Y_OFFST 0x1ca4
+#define ENCL_VIDEO_PB_OFFST 0x1ca5
+#define ENCL_VIDEO_PR_OFFST 0x1ca6
+#define ENCL_VIDEO_MODE 0x1ca7
+#define ENCL_VIDEO_MODE_ADV 0x1ca8
+#define ENCL_DBG_PX_RST 0x1ca9
+#define ENCL_DBG_LN_RST 0x1caa
+#define ENCL_DBG_PX_INT 0x1cab
+#define ENCL_DBG_LN_INT 0x1cac
+#define ENCL_VIDEO_YFP1_HTIME 0x1cad
+#define ENCL_VIDEO_YFP2_HTIME 0x1cae
+#define ENCL_VIDEO_YC_DLY 0x1caf
+#define ENCL_VIDEO_MAX_PXCNT 0x1cb0
+#define ENCL_VIDEO_HAVON_END 0x1cb1
+#define ENCL_VIDEO_HAVON_BEGIN 0x1cb2
+#define ENCL_VIDEO_VAVON_ELINE 0x1cb3
+#define ENCL_VIDEO_VAVON_BLINE 0x1cb4
+#define ENCL_VIDEO_HSO_BEGIN 0x1cb5
+#define ENCL_VIDEO_HSO_END 0x1cb6
+#define ENCL_VIDEO_VSO_BEGIN 0x1cb7
+#define ENCL_VIDEO_VSO_END 0x1cb8
+#define ENCL_VIDEO_VSO_BLINE 0x1cb9
+#define ENCL_VIDEO_VSO_ELINE 0x1cba
+#define ENCL_VIDEO_MAX_LNCNT 0x1cbb
+#define ENCL_VIDEO_BLANKY_VAL 0x1cbc
+#define ENCL_VIDEO_BLANKPB_VAL 0x1cbd
+#define ENCL_VIDEO_BLANKPR_VAL 0x1cbe
+#define ENCL_VIDEO_HOFFST 0x1cbf
+#define ENCL_VIDEO_VOFFST 0x1cc0
+#define ENCL_VIDEO_RGB_CTRL 0x1cc1
+#define ENCL_VIDEO_FILT_CTRL 0x1cc2
+#define ENCL_VIDEO_OFLD_VPEQ_OFST 0x1cc3
+#define ENCL_VIDEO_OFLD_VOAV_OFST 0x1cc4
+#define ENCL_VIDEO_MATRIX_CB 0x1cc5
+#define ENCL_VIDEO_MATRIX_CR 0x1cc6
+#define ENCL_VIDEO_RGBIN_CTRL 0x1cc7
+#define ENCL_MAX_LINE_SWITCH_POINT 0x1cc8
+#define ENCL_DACSEL_0 0x1cc9
+#define ENCL_DACSEL_1 0x1cca
+#define RDMA_AHB_START_ADDR_MAN 0x1100
+#define RDMA_AHB_END_ADDR_MAN 0x1101
+#define RDMA_AHB_START_ADDR_1 0x1102
+#define RDMA_AHB_END_ADDR_1 0x1103
+#define RDMA_AHB_START_ADDR_2 0x1104
+#define RDMA_AHB_END_ADDR_2 0x1105
+#define RDMA_AHB_START_ADDR_3 0x1106
+#define RDMA_AHB_END_ADDR_3 0x1107
+#define RDMA_AHB_START_ADDR_4 0x1108
+#define RDMA_AHB_END_ADDR_4 0x1109
+#define RDMA_AHB_START_ADDR_5 0x110a
+#define RDMA_AHB_END_ADDR_5 0x110b
+#define RDMA_AHB_START_ADDR_6 0x110c
+#define RDMA_AHB_END_ADDR_6 0x110d
+#define RDMA_AHB_START_ADDR_7 0x110e
+#define RDMA_AHB_END_ADDR_7 0x110f
+#define RDMA_ACCESS_AUTO 0x1110
+#define RDMA_ACCESS_AUTO2 0x1111
+#define RDMA_ACCESS_AUTO3 0x1112
+#define RDMA_ACCESS_MAN 0x1113
+#define RDMA_CTRL 0x1114
+#define RDMA_STATUS 0x1115
+#define RDMA_STATUS2 0x1116
+#define RDMA_STATUS3 0x1117
+#define L_GAMMA_CNTL_PORT 0x1400
+#define L_GAMMA_DATA_PORT 0x1401
+#define L_GAMMA_ADDR_PORT 0x1402
+#define L_GAMMA_VCOM_HSWITCH_ADDR 0x1403
+#define L_RGB_BASE_ADDR 0x1405
+#define L_RGB_COEFF_ADDR 0x1406
+#define L_POL_CNTL_ADDR 0x1407
+#define L_DITH_CNTL_ADDR 0x1408
+#define L_GAMMA_PROBE_CTRL 0x1409
+#define L_GAMMA_PROBE_COLOR_L 0x140a
+#define L_GAMMA_PROBE_COLOR_H 0x140b
+#define L_GAMMA_PROBE_HL_COLOR 0x140c
+#define L_GAMMA_PROBE_POS_X 0x140d
+#define L_GAMMA_PROBE_POS_Y 0x140e
+#define L_STH1_HS_ADDR 0x1410
+#define L_STH1_HE_ADDR 0x1411
+#define L_STH1_VS_ADDR 0x1412
+#define L_STH1_VE_ADDR 0x1413
+#define L_STH2_HS_ADDR 0x1414
+#define L_STH2_HE_ADDR 0x1415
+#define L_STH2_VS_ADDR 0x1416
+#define L_STH2_VE_ADDR 0x1417
+#define L_OEH_HS_ADDR 0x1418
+#define L_OEH_HE_ADDR 0x1419
+#define L_OEH_VS_ADDR 0x141a
+#define L_OEH_VE_ADDR 0x141b
+#define L_VCOM_HSWITCH_ADDR 0x141c
+#define L_VCOM_VS_ADDR 0x141d
+#define L_VCOM_VE_ADDR 0x141e
+#define L_CPV1_HS_ADDR 0x141f
+#define L_CPV1_HE_ADDR 0x1420
+#define L_CPV1_VS_ADDR 0x1421
+#define L_CPV1_VE_ADDR 0x1422
+#define L_CPV2_HS_ADDR 0x1423
+#define L_CPV2_HE_ADDR 0x1424
+#define L_CPV2_VS_ADDR 0x1425
+#define L_CPV2_VE_ADDR 0x1426
+#define L_STV1_HS_ADDR 0x1427
+#define L_STV1_HE_ADDR 0x1428
+#define L_STV1_VS_ADDR 0x1429
+#define L_STV1_VE_ADDR 0x142a
+#define L_STV2_HS_ADDR 0x142b
+#define L_STV2_HE_ADDR 0x142c
+#define L_STV2_VS_ADDR 0x142d
+#define L_STV2_VE_ADDR 0x142e
+#define L_OEV1_HS_ADDR 0x142f
+#define L_OEV1_HE_ADDR 0x1430
+#define L_OEV1_VS_ADDR 0x1431
+#define L_OEV1_VE_ADDR 0x1432
+#define L_OEV2_HS_ADDR 0x1433
+#define L_OEV2_HE_ADDR 0x1434
+#define L_OEV2_VS_ADDR 0x1435
+#define L_OEV2_VE_ADDR 0x1436
+#define L_OEV3_HS_ADDR 0x1437
+#define L_OEV3_HE_ADDR 0x1438
+#define L_OEV3_VS_ADDR 0x1439
+#define L_OEV3_VE_ADDR 0x143a
+#define L_LCD_PWR_ADDR 0x143b
+#define L_LCD_PWM0_LO_ADDR 0x143c
+#define L_LCD_PWM0_HI_ADDR 0x143d
+#define L_LCD_PWM1_LO_ADDR 0x143e
+#define L_LCD_PWM1_HI_ADDR 0x143f
+#define L_INV_CNT_ADDR 0x1440
+#define L_TCON_MISC_SEL_ADDR 0x1441
+#define L_DUAL_PORT_CNTL_ADDR 0x1442
+#define MLVDS_CLK_CTL1_HI 0x1443
+#define MLVDS_CLK_CTL1_LO 0x1444
+#define L_TCON_DOUBLE_CTL 0x1449
+#define L_TCON_PATTERN_HI 0x144a
+#define L_TCON_PATTERN_LO 0x144b
+#define LDIM_BL_ADDR_PORT 0x144e
+#define LDIM_BL_DATA_PORT 0x144f
+#define L_DE_HS_ADDR 0x1451
+#define L_DE_HE_ADDR 0x1452
+#define L_DE_VS_ADDR 0x1453
+#define L_DE_VE_ADDR 0x1454
+#define L_HSYNC_HS_ADDR 0x1455
+#define L_HSYNC_HE_ADDR 0x1456
+#define L_HSYNC_VS_ADDR 0x1457
+#define L_HSYNC_VE_ADDR 0x1458
+#define L_VSYNC_HS_ADDR 0x1459
+#define L_VSYNC_HE_ADDR 0x145a
+#define L_VSYNC_VS_ADDR 0x145b
+#define L_VSYNC_VE_ADDR 0x145c
+#define L_LCD_MCU_CTL 0x145d
+#define DUAL_MLVDS_CTL 0x1460
+#define DUAL_MLVDS_LINE_START 0x1461
+#define DUAL_MLVDS_LINE_END 0x1462
+#define DUAL_MLVDS_PIXEL_W_START_L 0x1463
+#define DUAL_MLVDS_PIXEL_W_END_L 0x1464
+#define DUAL_MLVDS_PIXEL_W_START_R 0x1465
+#define DUAL_MLVDS_PIXEL_W_END_R 0x1466
+#define DUAL_MLVDS_PIXEL_R_START_L 0x1467
+#define DUAL_MLVDS_PIXEL_R_CNT_L 0x1468
+#define DUAL_MLVDS_PIXEL_R_START_R 0x1469
+#define DUAL_MLVDS_PIXEL_R_CNT_R 0x146a
+#define V_INVERSION_PIXEL 0x1470
+#define V_INVERSION_LINE 0x1471
+#define V_INVERSION_CONTROL 0x1472
+#define MLVDS2_CONTROL 0x1474
+#define MLVDS2_CONFIG_HI 0x1475
+#define MLVDS2_CONFIG_LO 0x1476
+#define MLVDS2_DUAL_GATE_WR_START 0x1477
+#define MLVDS2_DUAL_GATE_WR_END 0x1478
+#define MLVDS2_DUAL_GATE_RD_START 0x1479
+#define MLVDS2_DUAL_GATE_RD_END 0x147a
+#define MLVDS2_SECOND_RESET_CTL 0x147b
+#define MLVDS2_DUAL_GATE_CTL_HI 0x147c
+#define MLVDS2_DUAL_GATE_CTL_LO 0x147d
+#define MLVDS2_RESET_CONFIG_HI 0x147e
+#define MLVDS2_RESET_CONFIG_LO 0x147f
+#define GAMMA_CNTL_PORT 0x1480
+#define GAMMA_DATA_PORT 0x1481
+#define GAMMA_ADDR_PORT 0x1482
+#define GAMMA_VCOM_HSWITCH_ADDR 0x1483
+#define RGB_BASE_ADDR 0x1485
+#define RGB_COEFF_ADDR 0x1486
+#define POL_CNTL_ADDR 0x1487
+#define DITH_CNTL_ADDR 0x1488
+#define GAMMA_PROBE_CTRL 0x1489
+#define GAMMA_PROBE_COLOR_L 0x148a
+#define GAMMA_PROBE_COLOR_H 0x148b
+#define GAMMA_PROBE_HL_COLOR 0x148c
+#define GAMMA_PROBE_POS_X 0x148d
+#define GAMMA_PROBE_POS_Y 0x148e
+#define STH1_HS_ADDR 0x1490
+#define STH1_HE_ADDR 0x1491
+#define STH1_VS_ADDR 0x1492
+#define STH1_VE_ADDR 0x1493
+#define STH2_HS_ADDR 0x1494
+#define STH2_HE_ADDR 0x1495
+#define STH2_VS_ADDR 0x1496
+#define STH2_VE_ADDR 0x1497
+#define OEH_HS_ADDR 0x1498
+#define OEH_HE_ADDR 0x1499
+#define OEH_VS_ADDR 0x149a
+#define OEH_VE_ADDR 0x149b
+#define VCOM_HSWITCH_ADDR 0x149c
+#define VCOM_VS_ADDR 0x149d
+#define VCOM_VE_ADDR 0x149e
+#define CPV1_HS_ADDR 0x149f
+#define CPV1_HE_ADDR 0x14a0
+#define CPV1_VS_ADDR 0x14a1
+#define CPV1_VE_ADDR 0x14a2
+#define CPV2_HS_ADDR 0x14a3
+#define CPV2_HE_ADDR 0x14a4
+#define CPV2_VS_ADDR 0x14a5
+#define CPV2_VE_ADDR 0x14a6
+#define STV1_HS_ADDR 0x14a7
+#define STV1_HE_ADDR 0x14a8
+#define STV1_VS_ADDR 0x14a9
+#define STV1_VE_ADDR 0x14aa
+#define STV2_HS_ADDR 0x14ab
+#define STV2_HE_ADDR 0x14ac
+#define STV2_VS_ADDR 0x14ad
+#define STV2_VE_ADDR 0x14ae
+#define OEV1_HS_ADDR 0x14af
+#define OEV1_HE_ADDR 0x14b0
+#define OEV1_VS_ADDR 0x14b1
+#define OEV1_VE_ADDR 0x14b2
+#define OEV2_HS_ADDR 0x14b3
+#define OEV2_HE_ADDR 0x14b4
+#define OEV2_VS_ADDR 0x14b5
+#define OEV2_VE_ADDR 0x14b6
+#define OEV3_HS_ADDR 0x14b7
+#define OEV3_HE_ADDR 0x14b8
+#define OEV3_VS_ADDR 0x14b9
+#define OEV3_VE_ADDR 0x14ba
+#define LCD_PWR_ADDR 0x14bb
+#define LCD_PWM0_LO_ADDR 0x14bc
+#define LCD_PWM0_HI_ADDR 0x14bd
+#define LCD_PWM1_LO_ADDR 0x14be
+#define LCD_PWM1_HI_ADDR 0x14bf
+#define INV_CNT_ADDR 0x14c0
+#define TCON_MISC_SEL_ADDR 0x14c1
+#define DUAL_PORT_CNTL_ADDR 0x14c2
+#define MLVDS_CONTROL 0x14c3
+#define MLVDS_RESET_PATTERN_HI 0x14c4
+#define MLVDS_RESET_PATTERN_LO 0x14c5
+#define MLVDS_RESET_PATTERN_EXT 0x14c6
+#define MLVDS_CONFIG_HI 0x14c7
+#define MLVDS_CONFIG_LO 0x14c8
+#define TCON_DOUBLE_CTL 0x14c9
+#define TCON_PATTERN_HI 0x14ca
+#define TCON_PATTERN_LO 0x14cb
+#define TCON_CONTROL_HI 0x14cc
+#define TCON_CONTROL_LO 0x14cd
+#define LVDS_BLANK_DATA_HI 0x14ce
+#define LVDS_BLANK_DATA_LO 0x14cf
+#define LVDS_PACK_CNTL_ADDR 0x14d0
+#define DE_HS_ADDR 0x14d1
+#define DE_HE_ADDR 0x14d2
+#define DE_VS_ADDR 0x14d3
+#define DE_VE_ADDR 0x14d4
+#define HSYNC_HS_ADDR 0x14d5
+#define HSYNC_HE_ADDR 0x14d6
+#define HSYNC_VS_ADDR 0x14d7
+#define HSYNC_VE_ADDR 0x14d8
+#define VSYNC_HS_ADDR 0x14d9
+#define VSYNC_HE_ADDR 0x14da
+#define VSYNC_VS_ADDR 0x14db
+#define VSYNC_VE_ADDR 0x14dc
+#define LCD_MCU_CTL 0x14dd
+#define LCD_MCU_DATA_0 0x14de
+#define LCD_MCU_DATA_1 0x14df
+#define LVDS_GEN_CNTL 0x14e0
+#define LVDS_PHY_CNTL0 0x14e1
+#define LVDS_PHY_CNTL1 0x14e2
+#define LVDS_PHY_CNTL2 0x14e3
+#define LVDS_PHY_CNTL3 0x14e4
+#define LVDS_PHY_CNTL4 0x14e5
+#define LVDS_PHY_CNTL5 0x14e6
+#define LVDS_SRG_TEST 0x14e8
+#define LVDS_BIST_MUX0 0x14e9
+#define LVDS_BIST_MUX1 0x14ea
+#define LVDS_BIST_FIXED0 0x14eb
+#define LVDS_BIST_FIXED1 0x14ec
+#define LVDS_BIST_CNTL0 0x14ed
+#define LVDS_CLKB_CLKA 0x14ee
+#define LVDS_PHY_CLK_CNTL 0x14ef
+#define LVDS_SER_EN 0x14f0
+#define LVDS_PHY_CNTL6 0x14f1
+#define LVDS_PHY_CNTL7 0x14f2
+#define LVDS_PHY_CNTL8 0x14f3
+#define MLVDS_CLK_CTL0_HI 0x14f4
+#define MLVDS_CLK_CTL0_LO 0x14f5
+#define MLVDS_DUAL_GATE_WR_START 0x14f6
+#define MLVDS_DUAL_GATE_WR_END 0x14f7
+#define MLVDS_DUAL_GATE_RD_START 0x14f8
+#define MLVDS_DUAL_GATE_RD_END 0x14f9
+#define MLVDS_SECOND_RESET_CTL 0x14fa
+#define MLVDS_DUAL_GATE_CTL_HI 0x14fb
+#define MLVDS_DUAL_GATE_CTL_LO 0x14fc
+#define MLVDS_RESET_CONFIG_HI 0x14fd
+#define MLVDS_RESET_CONFIG_LO 0x14fe
+#define VPU_OSD1_MMC_CTRL 0x2701
+#define VPU_OSD2_MMC_CTRL 0x2702
+#define VPU_VD1_MMC_CTRL 0x2703
+#define VPU_VD2_MMC_CTRL 0x2704
+#define VPU_DI_IF1_MMC_CTRL 0x2705
+#define VPU_DI_MEM_MMC_CTRL 0x2706
+#define VPU_DI_INP_MMC_CTRL 0x2707
+#define VPU_DI_MTNRD_MMC_CTRL 0x2708
+#define VPU_DI_CHAN2_MMC_CTRL 0x2709
+#define VPU_DI_MTNWR_MMC_CTRL 0x270a
+#define VPU_DI_NRWR_MMC_CTRL 0x270b
+#define VPU_DI_DIWR_MMC_CTRL 0x270c
+#define VPU_VDIN0_MMC_CTRL 0x270d
+#define VPU_VDIN1_MMC_CTRL 0x270e
+#define VPU_BT656_MMC_CTRL 0x270f
+#define VPU_TVD3D_MMC_CTRL 0x2710
+#define VPU_TVDVBI_MMC_CTRL 0x2711
+#define VPU_TVDVBI_VSLATCH_ADDR 0x2712
+#define VPU_TVDVBI_WRRSP_ADDR 0x2713
+#define VPU_VDIN_PRE_ARB_CTRL 0x2714
+#define VPU_VDISP_PRE_ARB_CTRL 0x2715
+#define VPU_VPUARB2_PRE_ARB_CTRL 0x2716
+#define VPU_OSD3_MMC_CTRL 0x2717
+#define VPU_OSD4_MMC_CTRL 0x2718
+#define VPU_VD3_MMC_CTRL 0x2719
+#define VPU_VIU_VENC_MUX_CTRL 0x271a
+#define VIU1_SEL_VENC_MASK 0x3
+#define VIU1_SEL_VENC_ENCL 0
+#define VIU1_SEL_VENC_ENCI 1
+#define VIU1_SEL_VENC_ENCP 2
+#define VIU1_SEL_VENC_ENCT 3
+#define VIU2_SEL_VENC_MASK 0xc
+#define VIU2_SEL_VENC_ENCL 0
+#define VIU2_SEL_VENC_ENCI (1 << 2)
+#define VIU2_SEL_VENC_ENCP (2 << 2)
+#define VIU2_SEL_VENC_ENCT (3 << 2)
+#define VPU_HDMI_SETTING 0x271b
+#define ENCI_INFO_READ 0x271c
+#define ENCP_INFO_READ 0x271d
+#define ENCT_INFO_READ 0x271e
+#define ENCL_INFO_READ 0x271f
+#define VPU_SW_RESET 0x2720
+#define VPU_D2D3_MMC_CTRL 0x2721
+#define VPU_CONT_MMC_CTRL 0x2722
+#define VPU_CLK_GATE 0x2723
+#define VPU_RDMA_MMC_CTRL 0x2724
+#define VPU_MEM_PD_REG0 0x2725
+#define VPU_MEM_PD_REG1 0x2726
+#define VPU_HDMI_DATA_OVR 0x2727
+#define VPU_PROT1_MMC_CTRL 0x2728
+#define VPU_PROT2_MMC_CTRL 0x2729
+#define VPU_PROT3_MMC_CTRL 0x272a
+#define VPU_ARB4_V1_MMC_CTRL 0x272b
+#define VPU_ARB4_V2_MMC_CTRL 0x272c
+#define VPU_VPU_PWM_V0 0x2730
+#define VPU_VPU_PWM_V1 0x2731
+#define VPU_VPU_PWM_V2 0x2732
+#define VPU_VPU_PWM_V3 0x2733
+#define VPU_VPU_PWM_H0 0x2734
+#define VPU_VPU_PWM_H1 0x2735
+#define VPU_VPU_PWM_H2 0x2736
+#define VPU_VPU_PWM_H3 0x2737
+#define VPU_MISC_CTRL 0x2740
+#define VPU_ISP_GCLK_CTRL0 0x2741
+#define VPU_ISP_GCLK_CTRL1 0x2742
+#define VPU_VDIN_ASYNC_HOLD_CTRL 0x2743
+#define VPU_VDISP_ASYNC_HOLD_CTRL 0x2744
+#define VPU_VPUARB2_ASYNC_HOLD_CTRL 0x2745
+
+#define VPU_PROT1_CLK_GATE 0x2750
+#define VPU_PROT1_GEN_CNTL 0x2751
+#define VPU_PROT1_X_START_END 0x2752
+#define VPU_PROT1_Y_START_END 0x2753
+#define VPU_PROT1_Y_LEN_STEP 0x2754
+#define VPU_PROT1_RPT_LOOP 0x2755
+#define VPU_PROT1_RPT_PAT 0x2756
+#define VPU_PROT1_DDR 0x2757
+#define VPU_PROT1_RBUF_ROOM 0x2758
+#define VPU_PROT1_STAT_0 0x2759
+#define VPU_PROT1_STAT_1 0x275a
+#define VPU_PROT1_STAT_2 0x275b
+#define VPU_PROT1_REQ_ONOFF 0x275c
+#define VPU_PROT2_CLK_GATE 0x2760
+#define VPU_PROT2_GEN_CNTL 0x2761
+#define VPU_PROT2_X_START_END 0x2762
+#define VPU_PROT2_Y_START_END 0x2763
+#define VPU_PROT2_Y_LEN_STEP 0x2764
+#define VPU_PROT2_RPT_LOOP 0x2765
+#define VPU_PROT2_RPT_PAT 0x2766
+#define VPU_PROT2_DDR 0x2767
+#define VPU_PROT2_RBUF_ROOM 0x2768
+#define VPU_PROT2_STAT_0 0x2769
+#define VPU_PROT2_STAT_1 0x276a
+#define VPU_PROT2_STAT_2 0x276b
+#define VPU_PROT2_REQ_ONOFF 0x276c
+#define VPU_PROT3_CLK_GATE 0x2770
+#define VPU_PROT3_GEN_CNTL 0x2771
+#define VPU_PROT3_X_START_END 0x2772
+#define VPU_PROT3_Y_START_END 0x2773
+#define VPU_PROT3_Y_LEN_STEP 0x2774
+#define VPU_PROT3_RPT_LOOP 0x2775
+#define VPU_PROT3_RPT_PAT 0x2776
+#define VPU_PROT3_DDR 0x2777
+#define VPU_PROT3_RBUF_ROOM 0x2778
+#define VPU_PROT3_STAT_0 0x2779
+#define VPU_PROT3_STAT_1 0x277a
+#define VPU_PROT3_STAT_2 0x277b
+#define VPU_PROT3_REQ_ONOFF 0x277c
+
+/* osd super scale */
+#define OSDSR_HV_SIZEIN 0x3130
+#define OSDSR_CTRL_MODE 0x3131
+#define OSDSR_ABIC_HCOEF 0x3132
+#define OSDSR_YBIC_HCOEF 0x3133
+#define OSDSR_CBIC_HCOEF 0x3134
+#define OSDSR_ABIC_VCOEF 0x3135
+#define OSDSR_YBIC_VCOEF 0x3136
+#define OSDSR_CBIC_VCOEF 0x3137
+#define OSDSR_VAR_PARA 0x3138
+#define OSDSR_CONST_PARA 0x3139
+#define OSDSR_RKE_EXTWIN 0x313a
+#define OSDSR_UK_GRAD2DDIAG_TH_RATE 0x313b
+#define OSDSR_UK_GRAD2DDIAG_LIMIT 0x313c
+#define OSDSR_UK_GRAD2DADJA_TH_RATE 0x313d
+#define OSDSR_UK_GRAD2DADJA_LIMIT 0x313e
+#define OSDSR_UK_BST_GAIN 0x313f
+#define OSDSR_HVBLEND_TH 0x3140
+#define OSDSR_DEMO_WIND_TB 0x3141
+#define OSDSR_DEMO_WIND_LR 0x3142
+#define OSDSR_INT_BLANK_NUM 0x3143
+#define OSDSR_FRM_END_STAT 0x3144
+#define OSDSR_ABIC_HCOEF0 0x3145
+#define OSDSR_YBIC_HCOEF0 0x3146
+#define OSDSR_CBIC_HCOEF0 0x3147
+#define OSDSR_ABIC_VCOEF0 0x3148
+#define OSDSR_YBIC_VCOEF0 0x3149
+#define OSDSR_CBIC_VCOEF0 0x314a
+
+#endif /* __MESON_REGISTERS_H */
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
new file mode 100644
index 000000000000..252cfd4b19b1
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "meson_drv.h"
+#include "meson_vclk.h"
+
+/*
+ * VCLK is the "Pixel Clock" frequency generator from a dedicated PLL.
+ * We handle the following encodings :
+ * - CVBS 27MHz generator via the VCLK2 to the VENCI and VDAC blocks
+ *
+ * What is missing :
+ * - HDMI Pixel Clocks generation
+ */
+
+/* HHI Registers */
+#define HHI_VID_PLL_CLK_DIV 0x1a0 /* 0x68 offset in data sheet */
+#define VID_PLL_EN BIT(19)
+#define VID_PLL_BYPASS BIT(18)
+#define VID_PLL_PRESET BIT(15)
+#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
+#define VCLK2_DIV_MASK 0xff
+#define VCLK2_DIV_EN BIT(16)
+#define VCLK2_DIV_RESET BIT(17)
+#define CTS_VDAC_SEL_MASK (0xf << 28)
+#define CTS_VDAC_SEL_SHIFT 28
+#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
+#define VCLK2_EN BIT(19)
+#define VCLK2_SEL_MASK (0x7 << 16)
+#define VCLK2_SEL_SHIFT 16
+#define VCLK2_SOFT_RESET BIT(15)
+#define VCLK2_DIV1_EN BIT(0)
+#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
+#define CTS_ENCI_SEL_MASK (0xf << 28)
+#define CTS_ENCI_SEL_SHIFT 28
+#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
+#define CTS_ENCI_EN BIT(0)
+#define CTS_VDAC_EN BIT(4)
+
+#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+
+#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
+#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
+#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
+#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
+#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
+#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
+
+#define HDMI_PLL_RESET BIT(28)
+#define HDMI_PLL_LOCK BIT(31)
+
+/*
+ * Setup VCLK2 for 27MHz, and enable clocks for ENCI and VDAC
+ *
+ * TOFIX: Refactor into table to also handle HDMI frequency and paths
+ */
+static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
+{
+ unsigned int val;
+
+ /* Setup PLL to output 1.485GHz */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00404e00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4800023d);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0xa6212844);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c4d000c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+
+ /* Reset PLL */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET, HDMI_PLL_RESET);
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET, 0);
+ }
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* Disable VCLK2 */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0);
+
+ /* Disable vid_pll output clock */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_EN, 0);
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_PRESET, 0);
+ /* Enable vid_pll bypass to HDMI pll */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_BYPASS, VID_PLL_BYPASS);
+ /* Enable the vid_pll output clock */
+ regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
+ VID_PLL_EN, VID_PLL_EN);
+
+ /* Setup the VCLK2 divider value to achieve 27MHz */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
+ VCLK2_DIV_MASK, (55 - 1));
+
+ /* select vid_pll for vclk2 */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SEL_MASK, (4 << VCLK2_SEL_SHIFT));
+ /* enable vclk2 gate */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, VCLK2_EN);
+
+ /* select vclk_div1 for enci */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
+ CTS_ENCI_SEL_MASK, (8 << CTS_ENCI_SEL_SHIFT));
+ /* select vclk_div1 for vdac */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
+ CTS_VDAC_SEL_MASK, (8 << CTS_VDAC_SEL_SHIFT));
+
+ /* release vclk2_div_reset and enable vclk2_div */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
+ VCLK2_DIV_EN | VCLK2_DIV_RESET, VCLK2_DIV_EN);
+
+ /* enable vclk2_div1 gate */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_DIV1_EN, VCLK2_DIV1_EN);
+
+ /* reset vclk2 */
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SOFT_RESET, VCLK2_SOFT_RESET);
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SOFT_RESET, 0);
+
+ /* enable enci_clk */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
+ CTS_ENCI_EN, CTS_ENCI_EN);
+ /* enable vdac_clk */
+ regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
+ CTS_VDAC_EN, CTS_VDAC_EN);
+}
+
+void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ unsigned int freq)
+{
+ if (target == MESON_VCLK_TARGET_CVBS && freq == MESON_VCLK_CVBS)
+ meson_venci_cvbs_clock_config(priv);
+}
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
new file mode 100644
index 000000000000..ec62735996de
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Video Clock */
+
+#ifndef __MESON_VCLK_H
+#define __MESON_VCLK_H
+
+enum {
+ MESON_VCLK_TARGET_CVBS = 0,
+};
+
+/* 27MHz is the CVBS Pixel Clock */
+#define MESON_VCLK_CVBS 27000
+
+void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ unsigned int freq);
+
+#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
new file mode 100644
index 000000000000..d836b2274531
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "meson_drv.h"
+#include "meson_venc.h"
+#include "meson_vpp.h"
+#include "meson_vclk.h"
+#include "meson_registers.h"
+
+/*
+ * VENC Handle the pixels encoding to the output formats.
+ * We handle the following encodings :
+ * - CVBS Encoding via the ENCI encoder and VDAC digital to analog converter
+ *
+ * What is missing :
+ * - TMDS/HDMI Encoding via ENCI_DIV and ENCP
+ * - Setup of more clock rates for HDMI modes
+ * - LCD Panel encoding via ENCL
+ * - TV Panel encoding via ENCT
+ */
+
+struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
+ .mode_tag = MESON_VENC_MODE_CVBS_PAL,
+ .hso_begin = 3,
+ .hso_end = 129,
+ .vso_even = 3,
+ .vso_odd = 260,
+ .macv_max_amp = 7,
+ .video_prog_mode = 0xff,
+ .video_mode = 0x13,
+ .sch_adjust = 0x28,
+ .yc_delay = 0x343,
+ .pixel_start = 251,
+ .pixel_end = 1691,
+ .top_field_line_start = 22,
+ .top_field_line_end = 310,
+ .bottom_field_line_start = 23,
+ .bottom_field_line_end = 311,
+ .video_saturation = 9,
+ .video_contrast = 0,
+ .video_brightness = 0,
+ .video_hue = 0,
+ .analog_sync_adj = 0x8080,
+};
+
+struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc = {
+ .mode_tag = MESON_VENC_MODE_CVBS_NTSC,
+ .hso_begin = 5,
+ .hso_end = 129,
+ .vso_even = 3,
+ .vso_odd = 260,
+ .macv_max_amp = 0xb,
+ .video_prog_mode = 0xf0,
+ .video_mode = 0x8,
+ .sch_adjust = 0x20,
+ .yc_delay = 0x333,
+ .pixel_start = 227,
+ .pixel_end = 1667,
+ .top_field_line_start = 18,
+ .top_field_line_end = 258,
+ .bottom_field_line_start = 19,
+ .bottom_field_line_end = 259,
+ .video_saturation = 18,
+ .video_contrast = 3,
+ .video_brightness = 0,
+ .video_hue = 0,
+ .analog_sync_adj = 0x9c00,
+};
+
+void meson_venci_cvbs_mode_set(struct meson_drm *priv,
+ struct meson_cvbs_enci_mode *mode)
+{
+ if (mode->mode_tag == priv->venc.current_mode)
+ return;
+
+ /* CVBS Filter settings */
+ writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL));
+ writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2));
+
+ /* Digital Video Select : Interlace, clk27 clk, external */
+ writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
+
+ /* Reset Video Mode */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE));
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+
+ /* Horizontal sync signal output */
+ writel_relaxed(mode->hso_begin,
+ priv->io_base + _REG(ENCI_SYNC_HSO_BEGIN));
+ writel_relaxed(mode->hso_end,
+ priv->io_base + _REG(ENCI_SYNC_HSO_END));
+
+ /* Vertical Sync lines */
+ writel_relaxed(mode->vso_even,
+ priv->io_base + _REG(ENCI_SYNC_VSO_EVNLN));
+ writel_relaxed(mode->vso_odd,
+ priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
+
+ /* Macrovision max amplitude change */
+ writel_relaxed(0x8100 + mode->macv_max_amp,
+ priv->io_base + _REG(ENCI_MACV_MAX_AMP));
+
+ /* Video mode */
+ writel_relaxed(mode->video_prog_mode,
+ priv->io_base + _REG(VENC_VIDEO_PROG_MODE));
+ writel_relaxed(mode->video_mode,
+ priv->io_base + _REG(ENCI_VIDEO_MODE));
+
+ /* Advanced Video Mode :
+ * Demux shifting 0x2
+ * Blank line end at line17/22
+ * High bandwidth Luma Filter
+ * Low bandwidth Chroma Filter
+ * Bypass luma low pass filter
+ * No macrovision on CSYNC
+ */
+ writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+
+ writel(mode->sch_adjust, priv->io_base + _REG(ENCI_VIDEO_SCH));
+
+ /* Sync mode : MASTER Master mode, free run, send HSO/VSO out */
+ writel_relaxed(0x07, priv->io_base + _REG(ENCI_SYNC_MODE));
+
+ /* 0x3 Y, C, and Component Y delay */
+ writel_relaxed(mode->yc_delay, priv->io_base + _REG(ENCI_YC_DELAY));
+
+ /* Timings */
+ writel_relaxed(mode->pixel_start,
+ priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_START));
+ writel_relaxed(mode->pixel_end,
+ priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_END));
+
+ writel_relaxed(mode->top_field_line_start,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_START));
+ writel_relaxed(mode->top_field_line_end,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_END));
+
+ writel_relaxed(mode->bottom_field_line_start,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_START));
+ writel_relaxed(mode->bottom_field_line_end,
+ priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_END));
+
+ /* Internal Venc, Internal VIU Sync, Internal Vencoder */
+ writel_relaxed(0, priv->io_base + _REG(VENC_SYNC_ROUTE));
+
+ /* UNreset Interlaced TV Encoder */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
+
+ /* Enable Vfifo2vd, Y_Cb_Y_Cr select */
+ writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
+
+ /* Power UP Dacs */
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_SETTING));
+
+ /* Video Upsampling */
+ writel_relaxed(0x0061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL0));
+ writel_relaxed(0x4061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL1));
+ writel_relaxed(0x5061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL2));
+
+ /* Select Interlace Y DACs */
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL1));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL2));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL3));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL4));
+ writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL5));
+
+ /* Select ENCI for VIU */
+ meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
+
+ /* Enable ENCI FIFO */
+ writel_relaxed(0x2000, priv->io_base + _REG(VENC_VDAC_FIFO_CTRL));
+
+ /* Select ENCI DACs 0, 1, 4, and 5 */
+ writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_0));
+ writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_1));
+
+ /* Interlace video enable */
+ writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+
+ /* Configure Video Saturation / Contrast / Brightness / Hue */
+ writel_relaxed(mode->video_saturation,
+ priv->io_base + _REG(ENCI_VIDEO_SAT));
+ writel_relaxed(mode->video_contrast,
+ priv->io_base + _REG(ENCI_VIDEO_CONT));
+ writel_relaxed(mode->video_brightness,
+ priv->io_base + _REG(ENCI_VIDEO_BRIGHT));
+ writel_relaxed(mode->video_hue,
+ priv->io_base + _REG(ENCI_VIDEO_HUE));
+
+ /* Enable DAC0 Filter */
+ writel_relaxed(0x1, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0));
+ writel_relaxed(0xfc48, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL1));
+
+ /* 0 in Macrovision register 0 */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_MACV_N0));
+
+ /* Analog Synchronization and color burst value adjust */
+ writel_relaxed(mode->analog_sync_adj,
+ priv->io_base + _REG(ENCI_SYNC_ADJ));
+
+ /* Setup 27MHz vclk2 for ENCI and VDAC */
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS);
+
+ priv->venc.current_mode = mode->mode_tag;
+}
+
+/* Returns the current ENCI field polarity */
+unsigned int meson_venci_get_field(struct meson_drm *priv)
+{
+ return readl_relaxed(priv->io_base + _REG(ENCI_INFO_READ)) & BIT(29);
+}
+
+void meson_venc_enable_vsync(struct meson_drm *priv)
+{
+ writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
+}
+
+void meson_venc_disable_vsync(struct meson_drm *priv)
+{
+ writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL));
+}
+
+void meson_venc_init(struct meson_drm *priv)
+{
+ /* Disable all encoders */
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
+ writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
+ writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN));
+
+ /* Disable VSync IRQ */
+ meson_venc_disable_vsync(priv);
+
+ priv->venc.current_mode = MESON_VENC_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
new file mode 100644
index 000000000000..77d4a7d82c44
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Video Encoders
+ * - ENCI : Interlace Video Encoder
+ * - ENCI_DVI : Interlace Video Encoder for DVI/HDMI
+ * - ENCP : Progressive Video Encoder
+ */
+
+#ifndef __MESON_VENC_H
+#define __MESON_VENC_H
+
+enum {
+ MESON_VENC_MODE_NONE = 0,
+ MESON_VENC_MODE_CVBS_PAL,
+ MESON_VENC_MODE_CVBS_NTSC,
+};
+
+struct meson_cvbs_enci_mode {
+ unsigned int mode_tag;
+ unsigned int hso_begin; /* HSO begin position */
+ unsigned int hso_end; /* HSO end position */
+ unsigned int vso_even; /* VSO even line */
+ unsigned int vso_odd; /* VSO odd line */
+ unsigned int macv_max_amp; /* Macrovision max amplitude */
+ unsigned int video_prog_mode;
+ unsigned int video_mode;
+ unsigned int sch_adjust;
+ unsigned int yc_delay;
+ unsigned int pixel_start;
+ unsigned int pixel_end;
+ unsigned int top_field_line_start;
+ unsigned int top_field_line_end;
+ unsigned int bottom_field_line_start;
+ unsigned int bottom_field_line_end;
+ unsigned int video_saturation;
+ unsigned int video_contrast;
+ unsigned int video_brightness;
+ unsigned int video_hue;
+ unsigned int analog_sync_adj;
+};
+
+/* CVBS Timings and Parameters */
+extern struct meson_cvbs_enci_mode meson_cvbs_enci_pal;
+extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
+
+void meson_venci_cvbs_mode_set(struct meson_drm *priv,
+ struct meson_cvbs_enci_mode *mode);
+unsigned int meson_venci_get_field(struct meson_drm *priv);
+
+void meson_venc_enable_vsync(struct meson_drm *priv);
+void meson_venc_disable_vsync(struct meson_drm *priv);
+
+void meson_venc_init(struct meson_drm *priv);
+
+#endif /* __MESON_VENC_H */
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
new file mode 100644
index 000000000000..c809c085fd78
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "meson_venc_cvbs.h"
+#include "meson_venc.h"
+#include "meson_registers.h"
+
+/* HHI VDAC Registers */
+#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+
+struct meson_venc_cvbs {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct meson_drm *priv;
+};
+#define encoder_to_meson_venc_cvbs(x) \
+ container_of(x, struct meson_venc_cvbs, encoder)
+
+#define connector_to_meson_venc_cvbs(x) \
+ container_of(x, struct meson_venc_cvbs, connector)
+
+/* Supported Modes */
+
+struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
+ { /* PAL */
+ .enci = &meson_cvbs_enci_pal,
+ .mode = {
+ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 732, 795, 864, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50,
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
+ },
+ },
+ { /* NTSC */
+ .enci = &meson_cvbs_enci_ntsc,
+ .mode = {
+ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 739, 801, 858, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60,
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
+ },
+ },
+};
+
+/* Connector */
+
+static void meson_cvbs_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+meson_cvbs_connector_detect(struct drm_connector *connector, bool force)
+{
+ /* FIXME: Add load-detect or jack-detect if possible */
+ return connector_status_connected;
+}
+
+static int meson_cvbs_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ mode = drm_mode_duplicate(dev, &meson_mode->mode);
+ if (!mode) {
+ DRM_ERROR("Failed to create a new display mode\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+
+static int meson_cvbs_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* Validate the modes added in get_modes */
+ return MODE_OK;
+}
+
+static const struct drm_connector_funcs meson_cvbs_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = meson_cvbs_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = meson_cvbs_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const
+struct drm_connector_helper_funcs meson_cvbs_connector_helper_funcs = {
+ .get_modes = meson_cvbs_connector_get_modes,
+ .mode_valid = meson_cvbs_connector_mode_valid,
+};
+
+/* Encoder */
+
+static void meson_venc_cvbs_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs meson_venc_cvbs_encoder_funcs = {
+ .destroy = meson_venc_cvbs_encoder_destroy,
+};
+
+static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
+{
+ struct meson_venc_cvbs *meson_venc_cvbs =
+ encoder_to_meson_venc_cvbs(encoder);
+ struct meson_drm *priv = meson_venc_cvbs->priv;
+
+ /* Disable CVBS VDAC */
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+}
+
+static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
+{
+ struct meson_venc_cvbs *meson_venc_cvbs =
+ encoder_to_meson_venc_cvbs(encoder);
+ struct meson_drm *priv = meson_venc_cvbs->priv;
+
+ /* VDAC0 source is not from ATV */
+ writel_bits_relaxed(BIT(5), 0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
+
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
+
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+}
+
+static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct meson_venc_cvbs *meson_venc_cvbs =
+ encoder_to_meson_venc_cvbs(encoder);
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ if (drm_mode_equal(mode, &meson_mode->mode)) {
+ meson_venci_cvbs_mode_set(meson_venc_cvbs->priv,
+ meson_mode->enci);
+ break;
+ }
+ }
+}
+
+static const struct drm_encoder_helper_funcs
+ meson_venc_cvbs_encoder_helper_funcs = {
+ .atomic_check = meson_venc_cvbs_encoder_atomic_check,
+ .disable = meson_venc_cvbs_encoder_disable,
+ .enable = meson_venc_cvbs_encoder_enable,
+ .mode_set = meson_venc_cvbs_encoder_mode_set,
+};
+
+static bool meson_venc_cvbs_connector_is_available(struct meson_drm *priv)
+{
+ struct device_node *ep, *remote;
+
+ /* CVBS VDAC output is on the first port, first endpoint */
+ ep = of_graph_get_endpoint_by_regs(priv->dev->of_node, 0, 0);
+ if (!ep)
+ return false;
+
+
+ /* If the endpoint node exists, consider it enabled */
+ remote = of_graph_get_remote_port(ep);
+ if (remote) {
+ of_node_put(ep);
+ return true;
+ }
+
+ of_node_put(ep);
+ of_node_put(remote);
+
+ return false;
+}
+
+int meson_venc_cvbs_create(struct meson_drm *priv)
+{
+ struct drm_device *drm = priv->drm;
+ struct meson_venc_cvbs *meson_venc_cvbs;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ int ret;
+
+ if (!meson_venc_cvbs_connector_is_available(priv)) {
+ dev_info(drm->dev, "CVBS Output connector not available\n");
+ return -ENODEV;
+ }
+
+ meson_venc_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_venc_cvbs),
+ GFP_KERNEL);
+ if (!meson_venc_cvbs)
+ return -ENOMEM;
+
+ meson_venc_cvbs->priv = priv;
+ encoder = &meson_venc_cvbs->encoder;
+ connector = &meson_venc_cvbs->connector;
+
+ /* Connector */
+
+ drm_connector_helper_add(connector,
+ &meson_cvbs_connector_helper_funcs);
+
+ ret = drm_connector_init(drm, connector, &meson_cvbs_connector_funcs,
+ DRM_MODE_CONNECTOR_Composite);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init CVBS connector\n");
+ return ret;
+ }
+
+ connector->interlace_allowed = 1;
+
+ /* Encoder */
+
+ drm_encoder_helper_add(encoder, &meson_venc_cvbs_encoder_helper_funcs);
+
+ ret = drm_encoder_init(drm, encoder, &meson_venc_cvbs_encoder_funcs,
+ DRM_MODE_ENCODER_TVDAC, "meson_venc_cvbs");
+ if (ret) {
+ dev_err(priv->dev, "Failed to init CVBS encoder\n");
+ return ret;
+ }
+
+ encoder->possible_crtcs = BIT(0);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.h b/drivers/gpu/drm/meson/meson_venc_cvbs.h
new file mode 100644
index 000000000000..9256ccf9d931
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#ifndef __MESON_VENC_CVBS_H
+#define __MESON_VENC_CVBS_H
+
+#include "meson_drv.h"
+#include "meson_venc.h"
+
+struct meson_cvbs_mode {
+ struct meson_cvbs_enci_mode *enci;
+ struct drm_display_mode mode;
+};
+
+#define MESON_CVBS_MODES_COUNT 2
+
+/* Modes supported by the CVBS output */
+extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
+
+int meson_venc_cvbs_create(struct meson_drm *priv);
+
+#endif /* __MESON_VENC_CVBS_H */
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
new file mode 100644
index 000000000000..a6de8ba7af19
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "meson_drv.h"
+#include "meson_viu.h"
+#include "meson_vpp.h"
+#include "meson_venc.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+/*
+ * VIU Handles the Pixel scanout and the basic Colorspace conversions
+ * We handle the following features :
+ * - OSD1 RGB565/RGB888/xRGB8888 scanout
+ * - RGB conversion to x/cb/cr
+ * - Progressive or Interlace buffer scanout
+ * - OSD1 Commit on Vsync
+ * - HDR OSD matrix for GXL/GXM
+ *
+ * What is missing :
+ * - BGR888/xBGR8888/BGRx8888/BGRx8888 modes
+ * - YUV4:2:2 Y0CbY1Cr scanout
+ * - Conversion to YUV 4:4:4 from 4:2:2 input
+ * - Colorkey Alpha matching
+ * - Big endian scanout
+ * - X/Y reverse scanout
+ * - Global alpha setup
+ * - OSD2 support, would need interlace switching on vsync
+ * - OSD1 full scaling to support TV overscan
+ */
+
+/* OSD csc defines */
+
+enum viu_matrix_sel_e {
+ VIU_MATRIX_OSD_EOTF = 0,
+ VIU_MATRIX_OSD,
+};
+
+enum viu_lut_sel_e {
+ VIU_LUT_OSD_EOTF = 0,
+ VIU_LUT_OSD_OETF,
+};
+
+#define COEFF_NORM(a) ((int)((((a) * 2048.0) + 1) / 2))
+#define MATRIX_5X3_COEF_SIZE 24
+
+#define EOTF_COEFF_NORM(a) ((int)((((a) * 4096.0) + 1) / 2))
+#define EOTF_COEFF_SIZE 10
+#define EOTF_COEFF_RIGHTSHIFT 1
+
+static int RGB709_to_YUV709l_coeff[MATRIX_5X3_COEF_SIZE] = {
+ 0, 0, 0, /* pre offset */
+ COEFF_NORM(0.181873), COEFF_NORM(0.611831), COEFF_NORM(0.061765),
+ COEFF_NORM(-0.100251), COEFF_NORM(-0.337249), COEFF_NORM(0.437500),
+ COEFF_NORM(0.437500), COEFF_NORM(-0.397384), COEFF_NORM(-0.040116),
+ 0, 0, 0, /* 10'/11'/12' */
+ 0, 0, 0, /* 20'/21'/22' */
+ 64, 512, 512, /* offset */
+ 0, 0, 0 /* mode, right_shift, clip_en */
+};
+
+/* eotf matrix: bypass */
+static int eotf_bypass_coeff[EOTF_COEFF_SIZE] = {
+ EOTF_COEFF_NORM(1.0), EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(0.0),
+ EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(1.0), EOTF_COEFF_NORM(0.0),
+ EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(1.0),
+ EOTF_COEFF_RIGHTSHIFT /* right shift */
+};
+
+void meson_viu_set_osd_matrix(struct meson_drm *priv,
+ enum viu_matrix_sel_e m_select,
+ int *m, bool csc_on)
+{
+ if (m_select == VIU_MATRIX_OSD) {
+ /* osd matrix, VIU_MATRIX_0 */
+ writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_PRE_OFFSET0_1));
+ writel(m[2] & 0xfff,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_PRE_OFFSET2));
+ writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COEF00_01));
+ writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COEF02_10));
+ writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COEF11_12));
+ writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COEF20_21));
+
+ if (m[21]) {
+ writel(((m[11] & 0x1fff) << 16) | (m[12] & 0x1fff),
+ priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COEF22_30));
+ writel(((m[13] & 0x1fff) << 16) | (m[14] & 0x1fff),
+ priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COEF31_32));
+ writel(((m[15] & 0x1fff) << 16) | (m[16] & 0x1fff),
+ priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COEF40_41));
+ writel(m[17] & 0x1fff, priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
+ } else
+ writel((m[11] & 0x1fff) << 16, priv->io_base +
+ _REG(VIU_OSD1_MATRIX_COEF22_30));
+
+ writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
+ priv->io_base + _REG(VIU_OSD1_MATRIX_OFFSET0_1));
+ writel(m[20] & 0xfff,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_OFFSET2));
+
+ writel_bits_relaxed(3 << 30, m[21] << 30,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
+ writel_bits_relaxed(7 << 16, m[22] << 16,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
+
+ /* 23 reserved for clipping control */
+ writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_CTRL));
+ writel_bits_relaxed(BIT(1), 0,
+ priv->io_base + _REG(VIU_OSD1_MATRIX_CTRL));
+ } else if (m_select == VIU_MATRIX_OSD_EOTF) {
+ int i;
+
+ /* osd eotf matrix, VIU_MATRIX_OSD_EOTF */
+ for (i = 0; i < 5; i++)
+ writel(((m[i * 2] & 0x1fff) << 16) |
+ (m[i * 2 + 1] & 0x1fff), priv->io_base +
+ _REG(VIU_OSD1_EOTF_CTL + i + 1));
+
+ writel_bits_relaxed(BIT(30), csc_on ? BIT(30) : 0,
+ priv->io_base + _REG(VIU_OSD1_EOTF_CTL));
+ writel_bits_relaxed(BIT(31), csc_on ? BIT(31) : 0,
+ priv->io_base + _REG(VIU_OSD1_EOTF_CTL));
+ }
+}
+
+#define OSD_EOTF_LUT_SIZE 33
+#define OSD_OETF_LUT_SIZE 41
+
+void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
+ unsigned int *r_map, unsigned int *g_map,
+ unsigned int *b_map,
+ bool csc_on)
+{
+ unsigned int addr_port;
+ unsigned int data_port;
+ unsigned int ctrl_port;
+ int i;
+
+ if (lut_sel == VIU_LUT_OSD_EOTF) {
+ addr_port = VIU_OSD1_EOTF_LUT_ADDR_PORT;
+ data_port = VIU_OSD1_EOTF_LUT_DATA_PORT;
+ ctrl_port = VIU_OSD1_EOTF_CTL;
+ } else if (lut_sel == VIU_LUT_OSD_OETF) {
+ addr_port = VIU_OSD1_OETF_LUT_ADDR_PORT;
+ data_port = VIU_OSD1_OETF_LUT_DATA_PORT;
+ ctrl_port = VIU_OSD1_OETF_CTL;
+ } else
+ return;
+
+ if (lut_sel == VIU_LUT_OSD_OETF) {
+ writel(0, priv->io_base + _REG(addr_port));
+
+ for (i = 0; i < 20; i++)
+ writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
+ priv->io_base + _REG(data_port));
+
+ writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
+ priv->io_base + _REG(data_port));
+
+ for (i = 0; i < 20; i++)
+ writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
+ priv->io_base + _REG(data_port));
+
+ for (i = 0; i < 20; i++)
+ writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
+ priv->io_base + _REG(data_port));
+
+ writel(b_map[OSD_OETF_LUT_SIZE - 1],
+ priv->io_base + _REG(data_port));
+
+ if (csc_on)
+ writel_bits_relaxed(0x7 << 29, 7 << 29,
+ priv->io_base + _REG(ctrl_port));
+ else
+ writel_bits_relaxed(0x7 << 29, 0,
+ priv->io_base + _REG(ctrl_port));
+ } else if (lut_sel == VIU_LUT_OSD_EOTF) {
+ writel(0, priv->io_base + _REG(addr_port));
+
+ for (i = 0; i < 20; i++)
+ writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
+ priv->io_base + _REG(data_port));
+
+ writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
+ priv->io_base + _REG(data_port));
+
+ for (i = 0; i < 20; i++)
+ writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
+ priv->io_base + _REG(data_port));
+
+ for (i = 0; i < 20; i++)
+ writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
+ priv->io_base + _REG(data_port));
+
+ writel(b_map[OSD_EOTF_LUT_SIZE - 1],
+ priv->io_base + _REG(data_port));
+
+ if (csc_on)
+ writel_bits_relaxed(7 << 27, 7 << 27,
+ priv->io_base + _REG(ctrl_port));
+ else
+ writel_bits_relaxed(7 << 27, 0,
+ priv->io_base + _REG(ctrl_port));
+
+ writel_bits_relaxed(BIT(31), BIT(31),
+ priv->io_base + _REG(ctrl_port));
+ }
+}
+
+/* eotf lut: linear */
+static unsigned int eotf_33_linear_mapping[OSD_EOTF_LUT_SIZE] = {
+ 0x0000, 0x0200, 0x0400, 0x0600,
+ 0x0800, 0x0a00, 0x0c00, 0x0e00,
+ 0x1000, 0x1200, 0x1400, 0x1600,
+ 0x1800, 0x1a00, 0x1c00, 0x1e00,
+ 0x2000, 0x2200, 0x2400, 0x2600,
+ 0x2800, 0x2a00, 0x2c00, 0x2e00,
+ 0x3000, 0x3200, 0x3400, 0x3600,
+ 0x3800, 0x3a00, 0x3c00, 0x3e00,
+ 0x4000
+};
+
+/* osd oetf lut: linear */
+static unsigned int oetf_41_linear_mapping[OSD_OETF_LUT_SIZE] = {
+ 0, 0, 0, 0,
+ 0, 32, 64, 96,
+ 128, 160, 196, 224,
+ 256, 288, 320, 352,
+ 384, 416, 448, 480,
+ 512, 544, 576, 608,
+ 640, 672, 704, 736,
+ 768, 800, 832, 864,
+ 896, 928, 960, 992,
+ 1023, 1023, 1023, 1023,
+ 1023
+};
+
+static void meson_viu_load_matrix(struct meson_drm *priv)
+{
+ /* eotf lut bypass */
+ meson_viu_set_osd_lut(priv, VIU_LUT_OSD_EOTF,
+ eotf_33_linear_mapping, /* R */
+ eotf_33_linear_mapping, /* G */
+ eotf_33_linear_mapping, /* B */
+ false);
+
+ /* eotf matrix bypass */
+ meson_viu_set_osd_matrix(priv, VIU_MATRIX_OSD_EOTF,
+ eotf_bypass_coeff,
+ false);
+
+ /* oetf lut bypass */
+ meson_viu_set_osd_lut(priv, VIU_LUT_OSD_OETF,
+ oetf_41_linear_mapping, /* R */
+ oetf_41_linear_mapping, /* G */
+ oetf_41_linear_mapping, /* B */
+ false);
+
+ /* osd matrix RGB709 to YUV709 limit */
+ meson_viu_set_osd_matrix(priv, VIU_MATRIX_OSD,
+ RGB709_to_YUV709l_coeff,
+ true);
+}
+
+void meson_viu_init(struct meson_drm *priv)
+{
+ uint32_t reg;
+
+ /* Disable OSDs */
+ writel_bits_relaxed(BIT(0) | BIT(21), 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ writel_bits_relaxed(BIT(0) | BIT(21), 0,
+ priv->io_base + _REG(VIU_OSD2_CTRL_STAT));
+
+ /* On GXL/GXM, Use the 10bit HDR conversion matrix */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_viu_load_matrix(priv);
+
+ /* Initialize OSD1 fifo control register */
+ reg = BIT(0) | /* Urgent DDR request priority */
+ (4 << 5) | /* hold_fifo_lines */
+ (3 << 10) | /* burst length 64 */
+ (32 << 12) | /* fifo_depth_val: 32*8=256 */
+ (2 << 22) | /* 4 words in 1 burst */
+ (2 << 24);
+ writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+ writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
+
+ /* Set OSD alpha replace value */
+ writel_bits_relaxed(0xff << OSD_REPLACE_SHIFT,
+ 0xff << OSD_REPLACE_SHIFT,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ writel_bits_relaxed(0xff << OSD_REPLACE_SHIFT,
+ 0xff << OSD_REPLACE_SHIFT,
+ priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
+
+ priv->viu.osd1_enabled = false;
+ priv->viu.osd1_commit = false;
+ priv->viu.osd1_interlace = false;
+}
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h
new file mode 100644
index 000000000000..073b1910bd1b
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_viu.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Video Input Unit */
+
+#ifndef __MESON_VIU_H
+#define __MESON_VIU_H
+
+/* OSDx_BLKx_CFG */
+#define OSD_CANVAS_SEL 16
+
+#define OSD_ENDIANNESS_LE BIT(15)
+#define OSD_ENDIANNESS_BE (0)
+
+#define OSD_BLK_MODE_422 (0x03 << 8)
+#define OSD_BLK_MODE_16 (0x04 << 8)
+#define OSD_BLK_MODE_32 (0x05 << 8)
+#define OSD_BLK_MODE_24 (0x07 << 8)
+
+#define OSD_OUTPUT_COLOR_RGB BIT(7)
+#define OSD_OUTPUT_COLOR_YUV (0)
+
+#define OSD_COLOR_MATRIX_32_RGBA (0x00 << 2)
+#define OSD_COLOR_MATRIX_32_ARGB (0x01 << 2)
+#define OSD_COLOR_MATRIX_32_ABGR (0x02 << 2)
+#define OSD_COLOR_MATRIX_32_BGRA (0x03 << 2)
+
+#define OSD_COLOR_MATRIX_24_RGB (0x00 << 2)
+
+#define OSD_COLOR_MATRIX_16_RGB655 (0x00 << 2)
+#define OSD_COLOR_MATRIX_16_RGB565 (0x04 << 2)
+
+#define OSD_INTERLACE_ENABLED BIT(1)
+#define OSD_INTERLACE_ODD BIT(0)
+#define OSD_INTERLACE_EVEN (0)
+
+/* OSDx_CTRL_STAT */
+#define OSD_ENABLE BIT(21)
+#define OSD_BLK0_ENABLE BIT(0)
+
+#define OSD_GLOBAL_ALPHA_SHIFT 12
+
+/* OSDx_CTRL_STAT2 */
+#define OSD_REPLACE_EN BIT(14)
+#define OSD_REPLACE_SHIFT 6
+
+void meson_viu_init(struct meson_drm *priv);
+
+#endif /* __MESON_VIU_H */
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
new file mode 100644
index 000000000000..671909d8672e
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "meson_drv.h"
+#include "meson_vpp.h"
+#include "meson_registers.h"
+
+/*
+ * VPP Handles all the Post Processing after the Scanout from the VIU
+ * We handle the following post processings :
+ * - Postblend : Blends the OSD1 only
+ * We exclude OSD2, VS1, VS1 and Preblend output
+ * - Vertical OSD Scaler for OSD1 only, we disable vertical scaler and
+ * use it only for interlace scanout
+ * - Intermediate FIFO with default Amlogic values
+ *
+ * What is missing :
+ * - Preblend for video overlay pre-scaling
+ * - OSD2 support for cursor framebuffer
+ * - Video pre-scaling before postblend
+ * - Full Vertical/Horizontal OSD scaling to support TV overscan
+ * - HDR conversion
+ */
+
+void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux)
+{
+ writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL));
+}
+
+/*
+ * When the output is interlaced, the OSD must switch between
+ * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
+ * at each vsync.
+ * But the vertical scaler can provide such funtionnality if
+ * is configured for 2:1 scaling with interlace options enabled.
+ */
+void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv,
+ struct drm_rect *input)
+{
+ writel_relaxed(BIT(3) /* Enable scaler */ |
+ BIT(2), /* Select OSD1 */
+ priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+
+ writel_relaxed(((drm_rect_width(input) - 1) << 16) |
+ (drm_rect_height(input) - 1),
+ priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
+ /* 2:1 scaling */
+ writel_relaxed(((input->x1) << 16) | (input->x2),
+ priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
+ writel_relaxed(((input->y1 >> 1) << 16) | (input->y2 >> 1),
+ priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
+
+ /* 2:1 scaling values */
+ writel_relaxed(BIT(16), priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
+ writel_relaxed(BIT(25), priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
+
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+
+ writel_relaxed((4 << 0) /* osd_vsc_bank_length */ |
+ (4 << 3) /* osd_vsc_top_ini_rcv_num0 */ |
+ (1 << 8) /* osd_vsc_top_rpt_p0_num0 */ |
+ (6 << 11) /* osd_vsc_bot_ini_rcv_num0 */ |
+ (2 << 16) /* osd_vsc_bot_rpt_p0_num0 */ |
+ BIT(23) /* osd_prog_interlace */ |
+ BIT(24), /* Enable vertical scaler */
+ priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
+}
+
+void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv)
+{
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+}
+
+static unsigned int vpp_filter_coefs_4point_bspline[] = {
+ 0x15561500, 0x14561600, 0x13561700, 0x12561800,
+ 0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00,
+ 0x0f531e00, 0x0e531f00, 0x0d522100, 0x0c522200,
+ 0x0b522300, 0x0b512400, 0x0a502600, 0x0a4f2700,
+ 0x094e2900, 0x084e2a00, 0x084d2b00, 0x074c2c01,
+ 0x074b2d01, 0x064a2f01, 0x06493001, 0x05483201,
+ 0x05473301, 0x05463401, 0x04453601, 0x04433702,
+ 0x04423802, 0x03413a02, 0x03403b02, 0x033f3c02,
+ 0x033d3d03
+};
+
+static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
+ const unsigned int *coefs,
+ bool is_horizontal)
+{
+ int i;
+
+ writel_relaxed(is_horizontal ? BIT(8) : 0,
+ priv->io_base + _REG(VPP_OSD_SCALE_COEF_IDX));
+ for (i = 0; i < 33; i++)
+ writel_relaxed(coefs[i],
+ priv->io_base + _REG(VPP_OSD_SCALE_COEF));
+}
+
+void meson_vpp_init(struct meson_drm *priv)
+{
+ /* set dummy data default YUV black */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ writel_relaxed(0x108080, priv->io_base + _REG(VPP_DUMMY_DATA1));
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu")) {
+ writel_bits_relaxed(0xff << 16, 0xff << 16,
+ priv->io_base + _REG(VIU_MISC_CTRL1));
+ writel_relaxed(0x20000, priv->io_base + _REG(VPP_DOLBY_CTRL));
+ writel_relaxed(0x1020080,
+ priv->io_base + _REG(VPP_DUMMY_DATA1));
+ }
+
+ /* Initialize vpu fifo control registers */
+ writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
+ 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
+ writel_relaxed(0x08080808, priv->io_base + _REG(VPP_HOLD_LINES));
+
+ /* Turn off preblend */
+ writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Turn off POSTBLEND */
+ writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Force all planes off */
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
+ VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Disable Scalers */
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+
+ /* Write in the proper filter coefficients. */
+ meson_vpp_write_scaling_filter_coefs(priv,
+ vpp_filter_coefs_4point_bspline, false);
+ meson_vpp_write_scaling_filter_coefs(priv,
+ vpp_filter_coefs_4point_bspline, true);
+}
diff --git a/drivers/gpu/drm/meson/meson_vpp.h b/drivers/gpu/drm/meson/meson_vpp.h
new file mode 100644
index 000000000000..ede3b26e0f22
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_vpp.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Video Post Process */
+
+#ifndef __MESON_VPP_H
+#define __MESON_VPP_H
+
+/* Mux VIU/VPP to ENCI */
+#define MESON_VIU_VPP_MUX_ENCI 0x5
+
+void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux);
+
+void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv,
+ struct drm_rect *input);
+void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv);
+
+void meson_vpp_init(struct meson_drm *priv);
+
+#endif /* __MESON_VPP_H */
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 1443b3a34775..b0b874264f9d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -82,9 +82,7 @@ static const struct file_operations mgag200_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = mgag200_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 6b21cb27e1cc..3a03ac4045d8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1658,12 +1658,6 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
return NULL;
}
-static enum drm_connector_status mga_vga_detect(struct drm_connector
- *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void mga_connector_destroy(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1680,7 +1674,6 @@ static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs =
static const struct drm_connector_funcs mga_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = mga_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = mga_connector_destroy,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index dcf7d11ac380..5e20220ef4c6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.ttm_tt_populate = mgag200_ttm_tt_populate,
.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
.init_mem_type = mgag200_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = mgag200_bo_evict_flags,
.move = NULL,
.verify_access = mgag200_bo_verify_access,
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4e2806cf778c..028c24df2291 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -6,6 +6,8 @@ msm-y := \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
+ adreno/a5xx_gpu.o \
+ adreno/a5xx_power.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -37,6 +39,7 @@ msm-y := \
mdp/mdp5/mdp5_irq.o \
mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \
+ mdp/mdp5/mdp5_pipe.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_atomic.o \
@@ -48,6 +51,7 @@ msm-y := \
msm_gem_prime.o \
msm_gem_shrinker.o \
msm_gem_submit.o \
+ msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
msm_perf.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index fee24297fb92..4be092f911f9 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -206,12 +207,12 @@ enum a2xx_rb_copy_sample_select {
};
enum a2xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_MIN_DST_SRC = 2,
- BLEND_MAX_DST_SRC = 3,
- BLEND_DST_MINUS_SRC = 4,
- BLEND_DST_PLUS_SRC_BIAS = 5,
+ BLEND2_DST_PLUS_SRC = 0,
+ BLEND2_SRC_MINUS_DST = 1,
+ BLEND2_MIN_DST_SRC = 2,
+ BLEND2_MAX_DST_SRC = 3,
+ BLEND2_DST_MINUS_SRC = 4,
+ BLEND2_DST_PLUS_SRC_BIAS = 5,
};
enum adreno_mmu_clnt_beh {
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 27dabd5e57fb..a066c8b9eccd 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -129,10 +130,14 @@ enum a3xx_tex_fmt {
TFMT_Z16_UNORM = 9,
TFMT_X8Z24_UNORM = 10,
TFMT_Z32_FLOAT = 11,
- TFMT_NV12_UV_TILED = 17,
- TFMT_NV12_Y_TILED = 19,
- TFMT_NV12_UV = 21,
- TFMT_NV12_Y = 23,
+ TFMT_UV_64X32 = 16,
+ TFMT_VU_64X32 = 17,
+ TFMT_Y_64X32 = 18,
+ TFMT_NV12_64X32 = 19,
+ TFMT_UV_LINEAR = 20,
+ TFMT_VU_LINEAR = 21,
+ TFMT_Y_LINEAR = 22,
+ TFMT_NV12_LINEAR = 23,
TFMT_I420_Y = 24,
TFMT_I420_U = 26,
TFMT_I420_V = 27,
@@ -525,14 +530,6 @@ enum a3xx_uche_perfcounter_select {
UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
};
-enum a3xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a3xx_intp_mode {
SMOOTH = 0,
FLAT = 1,
@@ -1393,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
-#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1472,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index fd266ed963b6..b999349b7d2d 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -41,7 +41,7 @@ extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
-static void a3xx_me_init(struct msm_gpu *gpu)
+static bool a3xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
@@ -65,7 +65,7 @@ static void a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ return gpu->funcs->idle(gpu);
}
static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -294,15 +294,20 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
- a3xx_me_init(gpu);
-
- return 0;
+ return a3xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a3xx_recover(struct msm_gpu *gpu)
{
+ int i;
+
adreno_dump_info(gpu);
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a3xx_dump(gpu);
@@ -330,17 +335,22 @@ static void a3xx_destroy(struct msm_gpu *gpu)
kfree(a3xx_gpu);
}
-static void a3xx_idle(struct msm_gpu *gpu)
+static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
- A3XX_RBBM_STATUS_GPU_BUSY)))
+ A3XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
}
static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
@@ -419,91 +429,13 @@ static void a3xx_dump(struct msm_gpu *gpu)
}
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A3XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A3XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A3XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A3XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A3XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A3XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A3XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A3XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A3XX_SP_VS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A3XX_SP_FS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_A3XX_RBBM_PM_OVERRIDE2),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_A3XX_SQ_GPR_MANAGEMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_A3XX_SQ_INST_STORE_MANAGMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A3XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static const struct adreno_gpu_funcs funcs = {
@@ -583,7 +515,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3220b91f559a..4ce21b902779 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -46,6 +47,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum a4xx_color_fmt {
RB4_A8_UNORM = 1,
RB4_R8_UNORM = 2,
+ RB4_R8_SNORM = 3,
+ RB4_R8_UINT = 4,
+ RB4_R8_SINT = 5,
RB4_R4G4B4A4_UNORM = 8,
RB4_R5G5B5A1_UNORM = 10,
RB4_R5G6B5_UNORM = 14,
@@ -89,17 +93,10 @@ enum a4xx_color_fmt {
enum a4xx_tile_mode {
TILE4_LINEAR = 0,
+ TILE4_2 = 2,
TILE4_3 = 3,
};
-enum a4xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a4xx_vtx_fmt {
VFMT4_32_FLOAT = 1,
VFMT4_32_32_FLOAT = 2,
@@ -940,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
{
return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
}
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
@@ -1043,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -1061,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -1073,12 +1071,18 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
}
#define REG_A4XX_RB_BLEND_RED 0x000020f0
-#define A4XX_RB_BLEND_RED_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
}
+#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
@@ -1095,12 +1099,18 @@ static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
}
#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
-#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
}
+#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
@@ -1117,12 +1127,18 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
}
#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
-#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
}
+#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
@@ -1139,12 +1155,18 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
}
#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
}
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
@@ -1348,7 +1370,7 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
@@ -2177,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
-#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
-
static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
@@ -2272,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -2420,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -3117,6 +3151,8 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
@@ -3253,6 +3289,7 @@ static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
}
#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+#define A4XX_GRAS_SU_MODE_CONTROL_MSAA_ENABLE 0x00002000
#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
@@ -3670,6 +3707,8 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
+
#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
@@ -3690,6 +3729,20 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BIN_BASE 0x000021c0
+#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
@@ -3752,12 +3805,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
{
return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
}
-#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000
-#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23
-static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
-}
+#define A4XX_PC_HS_PARAM_CW 0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A4XX_VBIF_VERSION 0x00003000
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index d0d3c7baa8fe..511bc855cc7f 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -113,7 +113,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
}
-static void a4xx_me_init(struct msm_gpu *gpu)
+static bool a4xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
@@ -137,7 +137,7 @@ static void a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ return gpu->funcs->idle(gpu);
}
static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -292,15 +292,20 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
- a4xx_me_init(gpu);
-
- return 0;
+ return a4xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a4xx_recover(struct msm_gpu *gpu)
{
+ int i;
+
adreno_dump_info(gpu);
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a4xx_dump(gpu);
@@ -328,17 +333,21 @@ static void a4xx_destroy(struct msm_gpu *gpu)
kfree(a4xx_gpu);
}
-static void a4xx_idle(struct msm_gpu *gpu)
+static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
- A4XX_RBBM_STATUS_GPU_BUSY)))
+ A4XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ return true;
}
static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
@@ -460,87 +469,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A4XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A4XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A4XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A4XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A4XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A4XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A4XX_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A4XX_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A4XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A4XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_VS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_FS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A4XX_SP_VS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A4XX_SP_FS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A4XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A4XX_UCHE_INVALIDATE0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static void a4xx_dump(struct msm_gpu *gpu)
@@ -587,16 +522,8 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) {
static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- uint32_t hi, lo, tmp;
-
- tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
- do {
- hi = tmp;
- lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
- tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
- } while (tmp != hi);
-
- *value = (((uint64_t)hi) << 32) | lo;
+ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A4XX_RBBM_PERFCTR_CP_0_HI);
return 0;
}
@@ -672,7 +599,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644
index 000000000000..b6fe763ddf34
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -0,0 +1,3757 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
+
+Copyright (C) 2013-2016 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+ RB5_R8_UNORM = 3,
+ RB5_R4G4B4A4_UNORM = 8,
+ RB5_R5G5B5A1_UNORM = 10,
+ RB5_R5G6B5_UNORM = 14,
+ RB5_R16_FLOAT = 23,
+ RB5_R8G8B8A8_UNORM = 48,
+ RB5_R8G8B8_UNORM = 49,
+ RB5_R8G8B8A8_UINT = 51,
+ RB5_R10G10B10A2_UINT = 58,
+ RB5_R16G16_FLOAT = 69,
+ RB5_R32_FLOAT = 74,
+ RB5_R16G16B16A16_FLOAT = 98,
+ RB5_R32G32_FLOAT = 103,
+ RB5_R32G32B32A32_FLOAT = 130,
+};
+
+enum a5xx_tile_mode {
+ TILE5_LINEAR = 0,
+ TILE5_2 = 2,
+ TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+ VFMT5_8_UNORM = 3,
+ VFMT5_8_SNORM = 4,
+ VFMT5_8_UINT = 5,
+ VFMT5_8_SINT = 6,
+ VFMT5_8_8_UNORM = 15,
+ VFMT5_8_8_SNORM = 16,
+ VFMT5_8_8_UINT = 17,
+ VFMT5_8_8_SINT = 18,
+ VFMT5_16_UNORM = 21,
+ VFMT5_16_SNORM = 22,
+ VFMT5_16_FLOAT = 23,
+ VFMT5_16_UINT = 24,
+ VFMT5_16_SINT = 25,
+ VFMT5_8_8_8_UNORM = 33,
+ VFMT5_8_8_8_SNORM = 34,
+ VFMT5_8_8_8_UINT = 35,
+ VFMT5_8_8_8_SINT = 36,
+ VFMT5_8_8_8_8_UNORM = 48,
+ VFMT5_8_8_8_8_SNORM = 50,
+ VFMT5_8_8_8_8_UINT = 51,
+ VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_16_16_UNORM = 67,
+ VFMT5_16_16_SNORM = 68,
+ VFMT5_16_16_FLOAT = 69,
+ VFMT5_16_16_UINT = 70,
+ VFMT5_16_16_SINT = 71,
+ VFMT5_32_UNORM = 72,
+ VFMT5_32_SNORM = 73,
+ VFMT5_32_FLOAT = 74,
+ VFMT5_32_UINT = 75,
+ VFMT5_32_SINT = 76,
+ VFMT5_32_FIXED = 77,
+ VFMT5_16_16_16_UNORM = 88,
+ VFMT5_16_16_16_SNORM = 89,
+ VFMT5_16_16_16_FLOAT = 90,
+ VFMT5_16_16_16_UINT = 91,
+ VFMT5_16_16_16_SINT = 92,
+ VFMT5_16_16_16_16_UNORM = 96,
+ VFMT5_16_16_16_16_SNORM = 97,
+ VFMT5_16_16_16_16_FLOAT = 98,
+ VFMT5_16_16_16_16_UINT = 99,
+ VFMT5_16_16_16_16_SINT = 100,
+ VFMT5_32_32_UNORM = 101,
+ VFMT5_32_32_SNORM = 102,
+ VFMT5_32_32_FLOAT = 103,
+ VFMT5_32_32_UINT = 104,
+ VFMT5_32_32_SINT = 105,
+ VFMT5_32_32_FIXED = 106,
+ VFMT5_32_32_32_UNORM = 112,
+ VFMT5_32_32_32_SNORM = 113,
+ VFMT5_32_32_32_UINT = 114,
+ VFMT5_32_32_32_SINT = 115,
+ VFMT5_32_32_32_FLOAT = 116,
+ VFMT5_32_32_32_FIXED = 117,
+ VFMT5_32_32_32_32_UNORM = 128,
+ VFMT5_32_32_32_32_SNORM = 129,
+ VFMT5_32_32_32_32_FLOAT = 130,
+ VFMT5_32_32_32_32_UINT = 131,
+ VFMT5_32_32_32_32_SINT = 132,
+ VFMT5_32_32_32_32_FIXED = 133,
+};
+
+enum a5xx_tex_fmt {
+ TFMT5_A8_UNORM = 2,
+ TFMT5_8_UNORM = 3,
+ TFMT5_4_4_4_4_UNORM = 8,
+ TFMT5_5_5_5_1_UNORM = 10,
+ TFMT5_5_6_5_UNORM = 14,
+ TFMT5_8_8_UNORM = 15,
+ TFMT5_8_8_SNORM = 16,
+ TFMT5_L8_A8_UNORM = 19,
+ TFMT5_16_FLOAT = 23,
+ TFMT5_8_8_8_8_UNORM = 48,
+ TFMT5_8_8_8_UNORM = 49,
+ TFMT5_8_8_8_SNORM = 50,
+ TFMT5_9_9_9_E5_FLOAT = 53,
+ TFMT5_10_10_10_2_UNORM = 54,
+ TFMT5_11_11_10_FLOAT = 66,
+ TFMT5_16_16_FLOAT = 69,
+ TFMT5_32_FLOAT = 74,
+ TFMT5_16_16_16_16_FLOAT = 98,
+ TFMT5_32_32_FLOAT = 103,
+ TFMT5_32_32_32_32_FLOAT = 130,
+ TFMT5_X8Z24_UNORM = 160,
+};
+
+enum a5xx_tex_fetchsize {
+ TFETCH5_1_BYTE = 0,
+ TFETCH5_2_BYTE = 1,
+ TFETCH5_4_BYTE = 2,
+ TFETCH5_8_BYTE = 3,
+ TFETCH5_16_BYTE = 4,
+};
+
+enum a5xx_depth_format {
+ DEPTH5_NONE = 0,
+ DEPTH5_16 = 1,
+ DEPTH5_24_8 = 2,
+ DEPTH5_32 = 4,
+};
+
+enum a5xx_blit_buf {
+ BLIT_MRT0 = 0,
+ BLIT_MRT1 = 1,
+ BLIT_MRT2 = 2,
+ BLIT_MRT3 = 3,
+ BLIT_MRT4 = 4,
+ BLIT_MRT5 = 5,
+ BLIT_MRT6 = 6,
+ BLIT_MRT7 = 7,
+ BLIT_ZS = 8,
+ BLIT_Z32 = 9,
+};
+
+enum a5xx_tex_filter {
+ A5XX_TEX_NEAREST = 0,
+ A5XX_TEX_LINEAR = 1,
+ A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+ A5XX_TEX_REPEAT = 0,
+ A5XX_TEX_CLAMP_TO_EDGE = 1,
+ A5XX_TEX_MIRROR_REPEAT = 2,
+ A5XX_TEX_CLAMP_TO_BORDER = 3,
+ A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+ A5XX_TEX_ANISO_1 = 0,
+ A5XX_TEX_ANISO_2 = 1,
+ A5XX_TEX_ANISO_4 = 2,
+ A5XX_TEX_ANISO_8 = 3,
+ A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+ A5XX_TEX_X = 0,
+ A5XX_TEX_Y = 1,
+ A5XX_TEX_Z = 2,
+ A5XX_TEX_W = 3,
+ A5XX_TEX_ZERO = 4,
+ A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+ A5XX_TEX_1D = 0,
+ A5XX_TEX_2D = 1,
+ A5XX_TEX_CUBE = 2,
+ A5XX_TEX_3D = 3,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
+#define A5XX_INT0_CP_SW 0x00000100
+#define A5XX_INT0_CP_HW_ERROR 0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_INT0_CP_IB2 0x00002000
+#define A5XX_INT0_CP_IB1 0x00004000
+#define A5XX_INT0_CP_RB 0x00008000
+#define A5XX_INT0_CP_UNUSED_1 0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
+#define A5XX_INT0_UNKNOWN_1 0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_INT0_UNUSED_2 0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define REG_A5XX_CP_RB_BASE 0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL 0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A5XX_CP_RB_RPTR 0x00000806
+
+#define REG_A5XX_CP_RB_WPTR 0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
+
+#define REG_A5XX_CP_CNTL 0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT 0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE 0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE 0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
+
+#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD 0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
+
+#define REG_A5XX_RBBM_STATUS 0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
+
+#define REG_A5XX_RBBM_STATUS3 0x00000530
+
+#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
+
+#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0 0x00000c00
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
+
+#define REG_A5XX_VSC_BIN_SIZE 0x00000cdd
+#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_VSC_BIN_SIZE_X__MASK 0x00007fff
+#define A5XX_VSC_BIN_SIZE_X__SHIFT 0
+static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_Y__MASK 0x7fff0000
+#define A5XX_VSC_BIN_SIZE_Y__SHIFT 16
+static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL 0x00000d02
+
+#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+
+#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+
+#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION 0x00003000
+
+#define REG_A5XX_VBIF_CLKON 0x00003001
+
+#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
+
+#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
+
+#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN 0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+
+#define REG_A5XX_UNKNOWN_E001 0x0000e001
+
+#define REG_A5XX_UNKNOWN_E004 0x0000e004
+
+#define REG_A5XX_GRAS_CNTL 0x0000e005
+#define A5XX_GRAS_CNTL_VARYING 0x00000001
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
+#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A5XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A5XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E093 0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE 0x00000001
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
+#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
+
+#define REG_A5XX_RB_CNTL 0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS 0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
+#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK 0xff000000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+
+#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
+
+#define REG_A5XX_RB_RENDER_COMPONENTS 0x0000e147
+#define A5XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
+#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c1
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+
+#define REG_A5XX_RB_STENCIL_BASE_LO 0x0000e1c2
+
+#define REG_A5XX_RB_STENCIL_BASE_HI 0x0000e1c3
+
+#define REG_A5XX_RB_STENCIL_PITCH 0x0000e1c4
+#define A5XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5
+#define A5XX_RB_STENCIL_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E1C7 0x0000e1c7
+
+#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_CNTL 0x0000e210
+#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000003f
+#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val)
+{
+ return ((val) << A5XX_RB_BLIT_CNTL_BUF__SHIFT) & A5XX_RB_BLIT_CNTL_BUF__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
+
+#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
+
+#define REG_A5XX_RB_BLIT_DST_HI 0x0000e215
+
+#define REG_A5XX_RB_BLIT_DST_PITCH 0x0000e216
+#define A5XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW1 0x0000e219
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW2 0x0000e21a
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW3 0x0000e21b
+
+#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
+#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
+#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
+#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
+static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_CLEAR_CNTL_MASK__SHIFT) & A5XX_RB_CLEAR_CNTL_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x0000e244 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x0000e245 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_HI 0x0000e264
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_PITCH 0x0000e265
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_VPC_CNTL_0 0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+#define A5XX_VPC_CNTL_0_VARYING 0x00000800
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+#define REG_A5XX_UNKNOWN_E292 0x0000e292
+
+#define REG_A5XX_UNKNOWN_E293 0x0000e293
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
+
+#define REG_A5XX_UNKNOWN_E29A 0x0000e29a
+
+#define REG_A5XX_VPC_PACK 0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
+
+#define REG_A5XX_UNKNOWN_E2A1 0x0000e2a1
+
+#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0 0x0000e2a7
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0 0x0000e2a8
+
+#define REG_A5XX_VPC_SO_BUFFER_SIZE_0 0x0000e2a9
+
+#define REG_A5XX_UNKNOWN_E2AB 0x0000e2ab
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0 0x0000e2ac
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0 0x0000e2ad
+
+#define REG_A5XX_UNKNOWN_E2AE 0x0000e2ae
+
+#define REG_A5XX_UNKNOWN_E2B2 0x0000e2b2
+
+#define REG_A5XX_UNKNOWN_E2B9 0x0000e2b9
+
+#define REG_A5XX_UNKNOWN_E2C0 0x0000e2c0
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
+#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
+
+#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+
+#define REG_A5XX_UNKNOWN_E389 0x0000e389
+
+#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
+
+#define REG_A5XX_UNKNOWN_E38D 0x0000e38d
+
+#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+
+#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+
+#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0 0x0000e400
+#define A5XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
+#define A5XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A5XX_VFD_CONTROL_0_VTXCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+
+#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+
+#define REG_A5XX_VFD_CONTROL_4 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0xc0000000
+#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 30
+static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL 0x0000e580
+
+#define REG_A5XX_SP_VS_CONTROL_REG 0x0000e584
+#define A5XX_SP_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONTROL_REG 0x0000e585
+#define A5XX_SP_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONTROL_REG 0x0000e586
+#define A5XX_SP_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONTROL_REG 0x0000e587
+#define A5XX_SP_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONTROL_REG 0x0000e588
+#define A5XX_SP_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG 0x0000e589
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592
+#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
+#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val >> 2) << A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5AB 0x0000e5ab
+
+#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2
+
+#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
+
+#define REG_A5XX_SP_CS_CNTL_0 0x0000e5f0
+
+#define REG_A5XX_UNKNOWN_E600 0x0000e600
+
+#define REG_A5XX_UNKNOWN_E640 0x0000e640
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
+
+#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONTROL_REG 0x0000e78b
+#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONTROL_REG 0x0000e78c
+#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONTROL_REG 0x0000e78d
+#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONTROL_REG 0x0000e78e
+#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONTROL_REG 0x0000e78f
+#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
+
+#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+
+#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
+
+#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
+
+#define REG_A5XX_UNKNOWN_E7C0 0x0000e7c0
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
+
+#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5
+
+#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
+
+#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8
+
+#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9
+
+#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd
+
+#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce
+
+#define REG_A5XX_UNKNOWN_E7CF 0x0000e7cf
+
+#define REG_A5XX_HLSQ_GS_CONSTLEN 0x0000e7d2
+
+#define REG_A5XX_HLSQ_GS_INSTRLEN 0x0000e7d3
+
+#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4
+
+#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3 0x0000e7dc
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4 0x0000e7dd
+
+#define REG_A5XX_RB_2D_DST_FILL 0x00002101
+
+#define REG_A5XX_RB_2D_SRC_INFO 0x00002107
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_RB_2D_SRC_LO 0x00002108
+
+#define REG_A5XX_RB_2D_SRC_HI 0x00002109
+
+#define REG_A5XX_RB_2D_DST_INFO 0x00002110
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141
+
+#define REG_A5XX_RB_2D_DST_LO 0x00002111
+
+#define REG_A5XX_RB_2D_DST_HI 0x00002112
+
+#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143
+
+#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+
+#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_0 0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1 0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2 0x00000002
+
+#define REG_A5XX_TEX_SAMP_3 0x00000003
+
+#define REG_A5XX_TEX_CONST_0 0x00000000
+#define A5XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A5XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_0_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_TEX_CONST_0_TILE_MODE__SHIFT) & A5XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A5XX_TEX_CONST_0_SRGB 0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+#define A5XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A5XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A5XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWAP__SHIFT) & A5XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1 0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+{
+ return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A5XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+ return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3 0x00000003
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A5XX_TEX_CONST_4 0x00000004
+#define A5XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5 0x00000005
+#define A5XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A5XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_BASE_HI__SHIFT) & A5XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6 0x00000006
+
+#define REG_A5XX_TEX_CONST_7 0x00000007
+
+#define REG_A5XX_TEX_CONST_8 0x00000008
+
+#define REG_A5XX_TEX_CONST_9 0x00000009
+
+#define REG_A5XX_TEX_CONST_10 0x0000000a
+
+#define REG_A5XX_TEX_CONST_11 0x0000000b
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644
index 000000000000..b8647198c11c
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -0,0 +1,888 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+
+extern bool hang_debug;
+static void a5xx_dump(struct msm_gpu *gpu);
+
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->rb;
+ unsigned int i, ibs = 0;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->fence->seqno);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
+ OUT_RING(ring, submit->fence->seqno);
+
+ gpu->funcs->flush(gpu);
+}
+
+struct a5xx_hwcg {
+ u32 offset;
+ u32 value;
+};
+
+static const struct a5xx_hwcg a530_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+static const struct {
+ int (*test)(struct adreno_gpu *gpu);
+ const struct a5xx_hwcg *regs;
+ unsigned int count;
+} a5xx_hwcg_regs[] = {
+ { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
+};
+
+static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
+ const struct a5xx_hwcg *regs, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ gpu_write(gpu, regs[i].offset, regs[i].value);
+
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
+}
+
+static void a5xx_enable_hwcg(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
+ if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
+ _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
+ a5xx_hwcg_regs[i].count);
+ return;
+ }
+ }
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002F);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* Specify workarounds for various microcode issues */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* Workaround for token end syncs
+ * Force a WFI after every direct-render 3D mode draw and every
+ * 2D mode 3 draw
+ */
+ OUT_RING(ring, 0x0000000B);
+ } else {
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu);
+
+ return gpu->funcs->idle(gpu) ? 0 : -EINVAL;
+}
+
+static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_device *drm = gpu->dev;
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ mutex_lock(&drm->struct_mutex);
+ bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(bo))
+ return bo;
+
+ ptr = msm_gem_get_vaddr(bo);
+ if (!ptr) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (iova) {
+ int ret = msm_gem_get_iova(bo, gpu->id, iova);
+
+ if (ret) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(ret);
+ }
+ }
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+
+ msm_gem_put_vaddr(bo);
+ return bo;
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret;
+
+ if (!a5xx_gpu->pm4_bo) {
+ a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
+ &a5xx_gpu->pm4_iova);
+
+ if (IS_ERR(a5xx_gpu->pm4_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!a5xx_gpu->pfp_bo) {
+ a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
+ &a5xx_gpu->pfp_iova);
+
+ if (IS_ERR(a5xx_gpu->pfp_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+ REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+ REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+
+ return 0;
+}
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Enable RBBM error reporting bits */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ /*
+ * Mask out the activity signals from RB1-3 to avoid false
+ * positives
+ */
+
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+ 0xF0000000);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+ 0xFFFFFFFF);
+ }
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0xFFFF);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+ /* Increase VFD cache access so LRZ and other data gets evicted less */
+ gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+ /* Set the GMEM VA range (0 to gpu->gmem) */
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+ 0x00100000 + adreno_gpu->gmem - 1);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+ /* Enable ME/PFP split notification */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+ /* Enable HWCG */
+ a5xx_enable_hwcg(gpu);
+
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+ /* Set the highest bank bit */
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+ /* RBBM */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+ /* Content protect */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+ /* CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+ /* RB */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+ /* VPC */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+
+ /* UCHE */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Load the GPMU firmware before starting the HW init */
+ a5xx_gpmu_ucode_init(gpu);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ ret = a5xx_ucode_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Disable the interrupts through the initial bringup stage */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+ /* Clear ME_HALT to start the micro engine */
+ gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+ ret = a5xx_me_init(gpu);
+ if (ret)
+ return ret;
+
+ ret = a5xx_power_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send a pipeline event stat to get misbehaving counters to start
+ * ticking correctly
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb, 0x0F);
+
+ gpu->funcs->flush(gpu);
+ if (!gpu->funcs->idle(gpu))
+ return -EINVAL;
+ }
+
+ /* Put the GPU into unsecure mode */
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+ return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
+ }
+
+ if (hang_debug)
+ a5xx_dump(gpu);
+
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->gpmu_bo) {
+ if (a5xx_gpu->gpmu_bo)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ }
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+ return false;
+
+ /*
+ * Nearly every abnormality ends up pausing the GPU and triggering a
+ * fault so we can safely just watch for this one interrupt to fire
+ */
+ return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+static bool a5xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu))
+ return false;
+
+ if (spin_until(_a5xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
+
+ return false;
+ }
+
+ return true;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+ if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+ /*
+ * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+ * read it twice
+ */
+
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+ val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+ dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+ if (status & A5XX_CP_INT_CP_DMA_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+ if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 24) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, val);
+ }
+
+ if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+ u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+ const char *access[16] = { "reserved", "reserved",
+ "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+ "", "", "me read", "me write", "", "", "crashdump read",
+ "crashdump write" };
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+ status & 0xFFFFF, access[(status >> 24) & 0xF],
+ (status & (1 << 31)), status);
+ }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+ val & (1 << 28) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+ (val >> 24) & 0xF);
+
+ /* Clear the error */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+ uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+ addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+ dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+ addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+ dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+#define RBBM_ERROR_MASK \
+ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (status & RBBM_ERROR_MASK)
+ a5xx_rbbm_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a5xx_cp_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ a5xx_uche_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+ a5xx_gpmu_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A5XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
+};
+
+static const u32 a5xx_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
+ 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+ 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
+ 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
+ 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
+ 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
+ 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
+ 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
+ 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
+ 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
+ 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
+ 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
+ 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
+ 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
+ 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
+ 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
+ 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
+ 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
+ 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
+ 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
+ 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
+ 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
+ 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
+ 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
+ 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
+ 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
+ ~0
+};
+
+static void a5xx_dump(struct msm_gpu *gpu)
+{
+ dev_info(gpu->dev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Turn on the core power */
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ /* Turn the RBCCU domain first to limit the chances of voltage droop */
+ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+ /* Wait 3 usecs before polling */
+ udelay(3);
+
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret) {
+ DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+ gpu->name,
+ gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+ return ret;
+ }
+
+ /* Turn on the SP domain */
+ gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret)
+ DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+ gpu->name);
+
+ return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+
+ return msm_gpu_pm_suspend(gpu);
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ gpu->funcs->pm_resume(gpu);
+
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ gpu->funcs->pm_suspend(gpu);
+
+ adreno_show(gpu, m);
+}
+#endif
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a5xx_hw_init,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .last_fence = adreno_last_fence,
+ .submit = a5xx_submit,
+ .flush = adreno_flush,
+ .idle = a5xx_idle,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+ .show = a5xx_show,
+ },
+ .get_timestamp = a5xx_get_timestamp,
+};
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "No A5XX device is defined\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+ if (!a5xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a5xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a5xx_gpu->pdev = pdev;
+ adreno_gpu->registers = a5xx_registers;
+ adreno_gpu->reg_offsets = a5xx_register_offsets;
+
+ a5xx_gpu->lm_leakage = 0x4E001A;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ if (ret) {
+ a5xx_destroy(&(a5xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644
index 000000000000..1590f845d554
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+struct a5xx_gpu {
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ struct drm_gem_object *pm4_bo;
+ uint64_t pm4_iova;
+
+ struct drm_gem_object *pfp_bo;
+ uint64_t pfp_iova;
+
+ struct drm_gem_object *gpmu_bo;
+ uint64_t gpmu_iova;
+ uint32_t gpmu_dwords;
+
+ uint32_t lm_leakage;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+ uint32_t reg, uint32_t mask, uint32_t value)
+{
+ while (usecs--) {
+ udelay(1);
+ if ((gpu_read(gpu, reg) & mask) == value)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644
index 000000000000..72d52c71f769
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -0,0 +1,344 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+static struct {
+ uint32_t reg;
+ uint32_t value;
+} a5xx_sequence_regs[] = {
+ { 0xB9A1, 0x00010303 },
+ { 0xB9A2, 0x13000000 },
+ { 0xB9A3, 0x00460020 },
+ { 0xB9A4, 0x10000000 },
+ { 0xB9A5, 0x040A1707 },
+ { 0xB9A6, 0x00010000 },
+ { 0xB9A7, 0x0E000904 },
+ { 0xB9A8, 0x10000000 },
+ { 0xB9A9, 0x01165000 },
+ { 0xB9AA, 0x000E0002 },
+ { 0xB9AB, 0x03884141 },
+ { 0xB9AC, 0x10000840 },
+ { 0xB9AD, 0x572A5000 },
+ { 0xB9AE, 0x00000003 },
+ { 0xB9AF, 0x00000000 },
+ { 0xB9B0, 0x10000000 },
+ { 0xB828, 0x6C204010 },
+ { 0xB829, 0x6C204011 },
+ { 0xB82A, 0x6C204012 },
+ { 0xB82B, 0x6C204013 },
+ { 0xB82C, 0x6C204014 },
+ { 0xB90F, 0x00000004 },
+ { 0xB910, 0x00000002 },
+ { 0xB911, 0x00000002 },
+ { 0xB912, 0x00000002 },
+ { 0xB913, 0x00000002 },
+ { 0xB92F, 0x00000004 },
+ { 0xB930, 0x00000005 },
+ { 0xB931, 0x00000005 },
+ { 0xB932, 0x00000005 },
+ { 0xB933, 0x00000005 },
+ { 0xB96F, 0x00000001 },
+ { 0xB970, 0x00000003 },
+ { 0xB94F, 0x00000004 },
+ { 0xB950, 0x0000000B },
+ { 0xB951, 0x0000000B },
+ { 0xB952, 0x0000000B },
+ { 0xB953, 0x0000000B },
+ { 0xB907, 0x00000019 },
+ { 0xB927, 0x00000019 },
+ { 0xB947, 0x00000019 },
+ { 0xB967, 0x00000019 },
+ { 0xB987, 0x00000019 },
+ { 0xB906, 0x00220001 },
+ { 0xB926, 0x00220001 },
+ { 0xB946, 0x00220001 },
+ { 0xB966, 0x00220001 },
+ { 0xB986, 0x00300000 },
+ { 0xAC40, 0x0340FF41 },
+ { 0xAC41, 0x03BEFED0 },
+ { 0xAC42, 0x00331FED },
+ { 0xAC43, 0x021FFDD3 },
+ { 0xAC44, 0x5555AAAA },
+ { 0xAC45, 0x5555AAAA },
+ { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+ return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+}
+
+/* Setup thermal limit management */
+static void a5xx_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int i;
+
+ /* Write the block of sequence registers */
+ for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+ gpu_write(gpu, a5xx_sequence_regs[i].reg,
+ a5xx_sequence_regs[i].value);
+
+ /* Hard code the A530 GPU thermal sensor ID for the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
+ gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+ /* Until we get clock scaling 0 is always the active power level */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+ /* The threshold is fixed at 6000 for A530 */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+ /* Write the voltage table */
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+ gpu_write(gpu, AGC_MSG_STATE, 1);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /* Write the max power - hard coded to 5448 for A530 */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ if (!a5xx_gpu->gpmu_dwords)
+ return 0;
+
+ /* Turn off protected mode for this operation */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Kick off the IB to load the GPMU microcode */
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu);
+
+ if (!gpu->funcs->idle(gpu)) {
+ DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return -EINVAL;
+ }
+
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+ /* Kick off the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+ /*
+ * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+ * won't have advanced power collapse.
+ */
+ if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+ 0xBABEFACE))
+ DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+ gpu->name);
+
+ return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+ gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+ gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Set up the limits management */
+ a5xx_lm_setup(gpu);
+
+ /* Set up SP/TP power collpase */
+ a5xx_pc_init(gpu);
+
+ /* Start the GPMU */
+ ret = a5xx_gpmu_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Start the limits management */
+ a5xx_lm_enable(gpu);
+
+ return 0;
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *drm = gpu->dev;
+ const struct firmware *fw;
+ uint32_t dwords = 0, offset = 0, bosize;
+ unsigned int *data, *ptr, *cmds;
+ unsigned int cmds_size;
+
+ if (a5xx_gpu->gpmu_bo)
+ return;
+
+ /* Get the firmware */
+ if (request_firmware(&fw, adreno_gpu->info->gpmufw, drm->dev)) {
+ DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return;
+ }
+
+ data = (unsigned int *) fw->data;
+
+ /*
+ * The first dword is the size of the remaining data in dwords. Use it
+ * as a checksum of sorts and make sure it matches the actual size of
+ * the firmware that we read
+ */
+
+ if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
+ goto out;
+
+ /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+ if (data[1] != 2)
+ goto out;
+
+ cmds = data + data[2] + 3;
+ cmds_size = data[0] - data[2] - 2;
+
+ /*
+ * A single type4 opcode can only have so many values attached so
+ * add enough opcodes to load the all the commands
+ */
+ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+ mutex_lock(&drm->struct_mutex);
+ a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(a5xx_gpu->gpmu_bo))
+ goto err;
+
+ if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
+ goto err;
+
+ ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
+ if (!ptr)
+ goto err;
+
+ while (cmds_size > 0) {
+ int i;
+ uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+ TYPE4_MAX_PAYLOAD : cmds_size;
+
+ ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+ _size);
+
+ for (i = 0; i < _size; i++)
+ ptr[dwords++] = *cmds++;
+
+ offset += _size;
+ cmds_size -= _size;
+ }
+
+ msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
+ a5xx_gpu->gpmu_dwords = dwords;
+
+ goto out;
+
+err:
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+ if (a5xx_gpu->gpmu_bo)
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+
+ a5xx_gpu->gpmu_bo = NULL;
+ a5xx_gpu->gpmu_iova = 0;
+ a5xx_gpu->gpmu_dwords = 0;
+
+out:
+ /* No need to keep that firmware laying around anymore */
+ release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index e81481d1b7df..4a33ba6f1244 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -172,6 +173,14 @@ enum a3xx_color_swap {
XYZW = 3,
};
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 5127b75dbf40..893eb2b2531b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -25,9 +25,6 @@ bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
@@ -77,6 +74,15 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a420_pfp.fw",
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 3, 0, ANY_ID),
+ .revn = 530,
+ .name = "A530",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
+ .gpmufw = "a530v3_gpmu.fw2",
},
};
@@ -86,6 +92,8 @@ MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
MODULE_FIRMWARE("a420_pm4.fw");
MODULE_FIRMWARE("a420_pfp.fw");
+MODULE_FIRMWARE("a530_fm4.fw");
+MODULE_FIRMWARE("a530_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -148,12 +156,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
mutex_unlock(&dev->struct_mutex);
+
+ disable_irq(gpu->irq);
+
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
} else {
+ enable_irq(gpu->irq);
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
@@ -169,12 +181,20 @@ static void set_gpu_pdev(struct drm_device *dev,
priv->gpu_pdev = pdev;
}
+static const struct {
+ const char *str;
+ uint32_t flag;
+} quirks[] = {
+ { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
+ { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
+};
+
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
struct device_node *child, *node = dev->of_node;
u32 val;
- int ret;
+ int ret, i;
ret = of_property_read_u32(node, "qcom,chipid", &val);
if (ret) {
@@ -208,6 +228,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return -ENXIO;
}
+ for (i = 0; i < ARRAY_SIZE(quirks); i++)
+ if (of_property_read_bool(node, quirks[i].str))
+ config.quirks |= quirks[i].flag;
+
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f386f463278d..a18126150e11 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -22,7 +22,7 @@
#include "msm_mmu.h"
#define RB_SIZE SZ_32K
-#define RB_BLKSIZE 16
+#define RB_BLKSIZE 32
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
@@ -54,9 +54,6 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
}
}
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -79,11 +76,14 @@ int adreno_hw_init(struct msm_gpu *gpu)
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova);
- if (!adreno_is_a430(adreno_gpu))
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- rbmemptr(adreno_gpu, rptr));
+ if (!adreno_is_a430(adreno_gpu)) {
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ rbmemptr(adreno_gpu, rptr));
+ }
return 0;
}
@@ -126,11 +126,14 @@ void adreno_recover(struct msm_gpu *gpu)
adreno_gpu->memptrs->wptr = 0;
gpu->funcs->pm_resume(gpu);
+
+ disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
+ enable_irq(gpu->irq);
}
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
@@ -218,19 +221,18 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
-void adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr = get_wptr(gpu->rb);
- int ret;
/* wait for CP to drain ringbuffer: */
- ret = spin_until(get_rptr(adreno_gpu) == wptr);
-
- if (ret)
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ if (!spin_until(get_rptr(adreno_gpu) == wptr))
+ return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ return false;
}
#ifdef CONFIG_DEBUG_FS
@@ -278,7 +280,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
void adreno_dump_info(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int i;
printk("revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
@@ -290,11 +291,6 @@ void adreno_dump_info(struct msm_gpu *gpu)
printk("rptr: %d\n", get_rptr(adreno_gpu));
printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
printk("rb wptr: %d\n", get_wptr(gpu->rb));
-
- for (i = 0; i < 8; i++) {
- printk("CP_SCRATCH_REG%d: %u\n", i,
- gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
- }
}
/* would be nice to not have to duplicate the _show() stuff with printk(): */
@@ -350,6 +346,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->gmem = adreno_gpu->info->gmem;
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
+ adreno_gpu->quirks = config->quirks;
gpu->fast_rate = config->fast_rate;
gpu->slow_rate = config->slow_rate;
@@ -381,7 +378,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- mmu = gpu->mmu;
+ mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index a54f6e036b4a..e8d55b0306ed 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -28,6 +28,9 @@
#include "adreno_pm4.xml.h"
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+#define REG_SKIP ~0
+#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
@@ -35,73 +38,21 @@
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
- REG_ADRENO_CP_DEBUG,
- REG_ADRENO_CP_ME_RAM_WADDR,
- REG_ADRENO_CP_ME_RAM_DATA,
- REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_ADRENO_CP_WFI_PEND_CTR,
REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI,
REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_ADRENO_CP_RB_RPTR,
REG_ADRENO_CP_RB_WPTR,
- REG_ADRENO_CP_PROTECT_CTRL,
- REG_ADRENO_CP_ME_CNTL,
REG_ADRENO_CP_RB_CNTL,
- REG_ADRENO_CP_IB1_BASE,
- REG_ADRENO_CP_IB1_BUFSZ,
- REG_ADRENO_CP_IB2_BASE,
- REG_ADRENO_CP_IB2_BUFSZ,
- REG_ADRENO_CP_TIMESTAMP,
- REG_ADRENO_CP_ME_RAM_RADDR,
- REG_ADRENO_CP_ROQ_ADDR,
- REG_ADRENO_CP_ROQ_DATA,
- REG_ADRENO_CP_MERCIU_ADDR,
- REG_ADRENO_CP_MERCIU_DATA,
- REG_ADRENO_CP_MERCIU_DATA2,
- REG_ADRENO_CP_MEQ_ADDR,
- REG_ADRENO_CP_MEQ_DATA,
- REG_ADRENO_CP_HW_FAULT,
- REG_ADRENO_CP_PROTECT_STATUS,
- REG_ADRENO_SCRATCH_ADDR,
- REG_ADRENO_SCRATCH_UMSK,
- REG_ADRENO_SCRATCH_REG2,
- REG_ADRENO_RBBM_STATUS,
- REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_ADRENO_RBBM_INT_0_MASK,
- REG_ADRENO_RBBM_INT_0_STATUS,
- REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_ADRENO_RBBM_AHB_CMD,
- REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_ADRENO_RBBM_CLOCK_CTL,
- REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_ADRENO_VFD_CONTROL_0,
- REG_ADRENO_VFD_INDEX_MAX,
- REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_ADRENO_PA_SC_AA_CONFIG,
- REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_ADRENO_TP0_CHICKEN,
- REG_ADRENO_RBBM_RBBM_CTL,
- REG_ADRENO_UCHE_INVALIDATE0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_ADRENO_REGISTER_MAX,
};
+enum adreno_quirks {
+ ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+ ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+};
+
struct adreno_rev {
uint8_t core;
uint8_t major;
@@ -122,12 +73,16 @@ struct adreno_info {
uint32_t revn;
const char *name;
const char *pm4fw, *pfpfw;
+ const char *gpmufw;
uint32_t gmem;
struct msm_gpu *(*init)(struct drm_device *dev);
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
+#define rbmemptr(adreno_gpu, member) \
+ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
struct adreno_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t wptr;
@@ -153,7 +108,7 @@ struct adreno_gpu {
// different for z180..
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
- uint32_t memptrs_iova;
+ uint64_t memptrs_iova;
/*
* Register offsets are different between some GPUs.
@@ -161,6 +116,8 @@ struct adreno_gpu {
* code (a3xx_gpu.c) and stored in this common location.
*/
const unsigned int *reg_offsets;
+
+ uint32_t quirks;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -171,6 +128,7 @@ struct adreno_platform_config {
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
#endif
+ uint32_t quirks;
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -234,6 +192,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 530;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
@@ -241,7 +204,7 @@ void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu);
-void adreno_idle(struct msm_gpu *gpu);
+bool adreno_idle(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
@@ -278,8 +241,38 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
+static inline u32 PM4_PARITY(u32 val)
+{
+ return (0x9669 >> (0xF & (val ^
+ (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+ (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+ (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+ (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+ (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt + 1);
+ OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt + 1);
+ OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
/*
- * adreno_checkreg_off() - Checks the validity of a register enum
+ * adreno_reg_check() - Checks the validity of a register enum
* @gpu: Pointer to struct adreno_gpu
* @offset_name: The register enum that is checked
*/
@@ -290,6 +283,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu,
!gpu->reg_offsets[offset_name]) {
BUG();
}
+
+ /*
+ * REG_SKIP is a special value that tell us that the register in
+ * question isn't implemented on target but don't trigger a BUG(). This
+ * is used to cleanly implement adreno_gpu_write64() and
+ * adreno_gpu_read64() in a generic fashion
+ */
+ if (gpu->reg_offsets[offset_name] == REG_SKIP)
+ return false;
+
return true;
}
@@ -311,4 +314,39 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
+
+static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
+ enum adreno_regs lo, enum adreno_regs hi, u64 data)
+{
+ adreno_gpu_write(gpu, lo, lower_32_bits(data));
+ adreno_gpu_write(gpu, hi, upper_32_bits(data));
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+ ((1 << 30) | (1 << 29) | \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+ ((1 << 29) \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index d7477ff867c9..6a2930e75503 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2016 by the following authors:
@@ -58,6 +59,7 @@ enum vgt_event_type {
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
+ STAT_EVENT = 16,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
@@ -65,6 +67,10 @@ enum vgt_event_type {
PERFCOUNTER_STOP = 24,
VS_FETCH_DONE = 27,
FACENESS_FLUSH = 28,
+ UNK_1C = 28,
+ UNK_1D = 29,
+ BLIT = 30,
+ UNK_26 = 38,
};
enum pc_di_primtype {
@@ -82,7 +88,6 @@ enum pc_di_primtype {
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
DI_PT_TRISTRIP_ADJ = 13,
- DI_PT_PATCHES = 34,
};
enum pc_di_src_sel {
@@ -110,11 +115,15 @@ enum adreno_pm4_packet_type {
CP_TYPE1_PKT = 0x40000000,
CP_TYPE2_PKT = 0x80000000,
CP_TYPE3_PKT = 0xc0000000,
+ CP_TYPE4_PKT = 0x40000000,
+ CP_TYPE7_PKT = 0x70000000,
};
enum adreno_pm4_type3_packets {
CP_ME_INIT = 72,
CP_NOP = 16,
+ CP_PREEMPT_ENABLE = 28,
+ CP_PREEMPT_TOKEN = 30,
CP_INDIRECT_BUFFER = 63,
CP_INDIRECT_BUFFER_PFD = 55,
CP_WAIT_FOR_IDLE = 38,
@@ -163,6 +172,7 @@ enum adreno_pm4_type3_packets {
CP_TEST_TWO_MEMS = 113,
CP_REG_WR_NO_CTXT = 120,
CP_RECORD_PFP_TIMESTAMP = 17,
+ CP_SET_SECURE_MODE = 102,
CP_WAIT_FOR_ME = 19,
CP_SET_DRAW_STATE = 67,
CP_DRAW_INDX_OFFSET = 56,
@@ -178,6 +188,22 @@ enum adreno_pm4_type3_packets {
CP_WAIT_MEM_WRITES = 18,
CP_COND_REG_EXEC = 71,
CP_MEM_TO_REG = 66,
+ CP_EXEC_CS = 51,
+ CP_PERFCOUNTER_ACTION = 80,
+ CP_SMMU_TABLE_UPDATE = 83,
+ CP_CONTEXT_REG_BUNCH = 92,
+ CP_YIELD_ENABLE = 28,
+ CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+ CP_SKIP_IB2_ENABLE_LOCAL = 35,
+ CP_SET_SUBDRAW_SIZE = 53,
+ CP_SET_VISIBILITY_OVERRIDE = 100,
+ CP_PREEMPT_ENABLE_GLOBAL = 105,
+ CP_PREEMPT_ENABLE_LOCAL = 106,
+ CP_CONTEXT_SWITCH_YIELD = 107,
+ CP_SET_RENDER_MODE = 108,
+ CP_COMPUTE_CHECKPOINT = 110,
+ CP_MEM_TO_MEM = 115,
+ CP_BLIT = 44,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -196,6 +222,7 @@ enum adreno_state_block {
SB_VERT_SHADER = 4,
SB_GEOM_SHADER = 5,
SB_FRAG_SHADER = 6,
+ SB_COMPUTE_SHADER = 7,
};
enum adreno_state_type {
@@ -218,6 +245,17 @@ enum a4xx_index_size {
INDEX4_SIZE_32_BIT = 2,
};
+enum render_mode_cmd {
+ BYPASS = 1,
+ GMEM = 3,
+ BLIT2D = 5,
+};
+
+enum cp_blit_cmd {
+ BLIT_OP_FILL = 0,
+ BLIT_OP_BLIT = 1,
+};
+
#define REG_CP_LOAD_STATE_0 0x00000000
#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
@@ -258,6 +296,14 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
}
+#define REG_CP_LOAD_STATE_2 0x00000002
+#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK;
+}
+
#define REG_CP_DRAW_INDX_0 0x00000000
#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
@@ -389,7 +435,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
{
return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
@@ -437,30 +488,40 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
-#define REG_CP_SET_DRAW_STATE_0 0x00000000
-#define CP_SET_DRAW_STATE_0_COUNT__MASK 0x0000ffff
-#define CP_SET_DRAW_STATE_0_COUNT__SHIFT 0
-static inline uint32_t CP_SET_DRAW_STATE_0_COUNT(uint32_t val)
+static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff
+#define CP_SET_DRAW_STATE__0_COUNT__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
{
- return ((val) << CP_SET_DRAW_STATE_0_COUNT__SHIFT) & CP_SET_DRAW_STATE_0_COUNT__MASK;
+ return ((val) << CP_SET_DRAW_STATE__0_COUNT__SHIFT) & CP_SET_DRAW_STATE__0_COUNT__MASK;
}
-#define CP_SET_DRAW_STATE_0_DIRTY 0x00010000
-#define CP_SET_DRAW_STATE_0_DISABLE 0x00020000
-#define CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS 0x00040000
-#define CP_SET_DRAW_STATE_0_LOAD_IMMED 0x00080000
-#define CP_SET_DRAW_STATE_0_GROUP_ID__MASK 0x1f000000
-#define CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT 24
-static inline uint32_t CP_SET_DRAW_STATE_0_GROUP_ID(uint32_t val)
+#define CP_SET_DRAW_STATE__0_DIRTY 0x00010000
+#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
+#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
+#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
+static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
{
- return ((val) << CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE_0_GROUP_ID__MASK;
+ return ((val) << CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE__0_GROUP_ID__MASK;
}
-#define REG_CP_SET_DRAW_STATE_1 0x00000001
-#define CP_SET_DRAW_STATE_1_ADDR__MASK 0xffffffff
-#define CP_SET_DRAW_STATE_1_ADDR__SHIFT 0
-static inline uint32_t CP_SET_DRAW_STATE_1_ADDR(uint32_t val)
+static inline uint32_t REG_CP_SET_DRAW_STATE__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__1_ADDR_LO__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__1_ADDR_LO(uint32_t val)
{
- return ((val) << CP_SET_DRAW_STATE_1_ADDR__SHIFT) & CP_SET_DRAW_STATE_1_ADDR__MASK;
+ return ((val) << CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT) & CP_SET_DRAW_STATE__1_ADDR_LO__MASK;
+}
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__2_ADDR_HI__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__2_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT) & CP_SET_DRAW_STATE__2_ADDR_HI__MASK;
}
#define REG_CP_SET_BIN_0 0x00000000
@@ -533,5 +594,192 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
}
+#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0 0x00000000
+#define CP_SET_RENDER_MODE_0_MODE__MASK 0x000001ff
+#define CP_SET_RENDER_MODE_0_MODE__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_0_MODE(enum render_mode_cmd val)
+{
+ return ((val) << CP_SET_RENDER_MODE_0_MODE__SHIFT) & CP_SET_RENDER_MODE_0_MODE__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_1 0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2 0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3 0x00000003
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4 0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5 0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6 0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7 0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
+#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
+
+#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT) & CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_PERFCOUNTER_ACTION_2 0x00000002
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT) & CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_0 0x00000000
+#define CP_EVENT_WRITE_0_EVENT__MASK 0x000000ff
+#define CP_EVENT_WRITE_0_EVENT__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val)
+{
+ return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_1 0x00000001
+#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT) & CP_EVENT_WRITE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_2 0x00000002
+#define CP_EVENT_WRITE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT) & CP_EVENT_WRITE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_3 0x00000003
+
+#define REG_CP_BLIT_0 0x00000000
+#define CP_BLIT_0_OP__MASK 0x0000000f
+#define CP_BLIT_0_OP__SHIFT 0
+static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
+{
+ return ((val) << CP_BLIT_0_OP__SHIFT) & CP_BLIT_0_OP__MASK;
+}
+
+#define REG_CP_BLIT_1 0x00000001
+#define CP_BLIT_1_SRC_X1__MASK 0x0000ffff
+#define CP_BLIT_1_SRC_X1__SHIFT 0
+static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
+}
+#define CP_BLIT_1_SRC_Y1__MASK 0xffff0000
+#define CP_BLIT_1_SRC_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_Y1__SHIFT) & CP_BLIT_1_SRC_Y1__MASK;
+}
+
+#define REG_CP_BLIT_2 0x00000002
+#define CP_BLIT_2_SRC_X2__MASK 0x0000ffff
+#define CP_BLIT_2_SRC_X2__SHIFT 0
+static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
+}
+#define CP_BLIT_2_SRC_Y2__MASK 0xffff0000
+#define CP_BLIT_2_SRC_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_Y2__SHIFT) & CP_BLIT_2_SRC_Y2__MASK;
+}
+
+#define REG_CP_BLIT_3 0x00000003
+#define CP_BLIT_3_DST_X1__MASK 0x0000ffff
+#define CP_BLIT_3_DST_X1__SHIFT 0
+static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
+}
+#define CP_BLIT_3_DST_Y1__MASK 0xffff0000
+#define CP_BLIT_3_DST_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_Y1__SHIFT) & CP_BLIT_3_DST_Y1__MASK;
+}
+
+#define REG_CP_BLIT_4 0x00000004
+#define CP_BLIT_4_DST_X2__MASK 0x0000ffff
+#define CP_BLIT_4_DST_X2__SHIFT 0
+static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
+}
+#define CP_BLIT_4_DST_Y2__MASK 0xffff0000
+#define CP_BLIT_4_DST_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 4958594d5266..39dff7d5e89b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index f05ed0e1f3d6..3819fdefcae2 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -139,6 +139,7 @@ struct msm_dsi_host {
u32 err_work_state;
struct work_struct err_work;
+ struct work_struct hpd_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
@@ -981,7 +982,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
struct drm_device *dev = msm_host->dev;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- u32 iova;
+ uint64_t iova;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
mutex_lock(&dev->struct_mutex);
@@ -1146,7 +1147,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- u32 dma_base;
+ uint64_t dma_base;
bool triggered;
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
wmb(); /* make sure dsi controller enabled again */
}
+static void dsi_hpd_worker(struct work_struct *work)
+{
+ struct msm_dsi_host *msm_host =
+ container_of(work, struct msm_dsi_host, hpd_work);
+
+ drm_helper_hpd_irq_event(msm_host->dev);
+}
+
static void dsi_err_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id);
if (msm_host->dev)
- drm_helper_hpd_irq_event(msm_host->dev);
+ queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id);
if (msm_host->dev)
- drm_helper_hpd_irq_event(msm_host->dev);
+ queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
INIT_WORK(&msm_host->err_work, dsi_err_worker);
+ INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
msm_dsi->host = &msm_host->base;
msm_dsi->id = msm_host->id;
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 2d999494cdea..8b9f3ebaeba7 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 598fdaff0a41..26e3a01a99c2 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.name = vco_name,
+ .flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 38c90e1eb002..49008451085b 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "pxo" },
.num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 506434fac993..3fcbb30dc241 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f1072c18c81e..d7bf3232dc88 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 34c7df6549c1..0a97ff75ed6f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index aa94a553794f..143eab46ba68 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_8996_pll_ops,
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8996_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 92da69aa6187..99590758c68b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_pll_ops,
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8960_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 6eab7d0cf6b5..1b996ede7a65 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 6688e79cc88e..88037889589b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 9527dafc3e69..1c29618f4ddb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -373,7 +373,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (mdp4_crtc->cursor.stale) {
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
- uint32_t iova = mdp4_crtc->cursor.next_iova;
+ uint64_t iova = mdp4_crtc->cursor.next_iova;
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
@@ -418,7 +418,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_gem_object *cursor_bo, *old_bo;
unsigned long flags;
- uint32_t iova;
+ uint64_t iova;
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index a521207db8a1..b764d7f10312 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_print.h>
#include "msm_drv.h"
#include "mdp4_kms.h"
@@ -29,7 +30,16 @@ void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
+ struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev);
+ drm_state_dump(mdp4_kms->dev, &p);
+ }
}
void mdp4_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 571a91ee9607..b782efd4b95f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,6 +17,7 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
@@ -159,17 +160,18 @@ static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
- struct msm_mmu *mmu = mdp4_kms->mmu;
-
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
- }
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
+ }
+
if (mdp4_kms->rpm_enabled)
pm_runtime_disable(dev);
@@ -440,7 +442,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int irq, ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -531,24 +533,26 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ config->iommu, "mdp4");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+
+ mdp4_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
-
- mdp4_kms->mmu = mmu;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- mmu = NULL;
+ aspace = NULL;
}
- mdp4_kms->id = msm_register_mmu(dev, mmu);
+ mdp4_kms->id = msm_register_address_space(dev, aspace);
if (mdp4_kms->id < 0) {
ret = mdp4_kms->id;
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
@@ -598,6 +602,10 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
+ if (config.iommu) {
+ config.iommu->geometry.aperture_start = 0x1000;
+ config.iommu->geometry.aperture_end = 0xffffffff;
+ }
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 25fb83997119..62712ca164ee 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -43,7 +43,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
@@ -51,7 +51,7 @@ struct mdp4_kms {
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
- uint32_t blank_cursor_iova;
+ uint64_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 3903dbcda763..911e4690d36a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -40,7 +40,7 @@ enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb)
{
bool is_tile = false;
- if (fb->modifier[1] == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
+ if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
is_tile = true;
if (fb->pixel_format == DRM_FORMAT_NV12 && is_tile)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index ca6ca30650a0..27d5371acee0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,9 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30)
-- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
-- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index ac9e4cde1380..618b2ffed9b4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
- MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x44000, 0x47000 },
- .nb_stages = 5,
+ .nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
@@ -550,6 +550,10 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
static struct mdp5_cfg_platform config = {};
config.iommu = iommu_domain_alloc(&platform_bus_type);
+ if (config.iommu) {
+ config.iommu->geometry.aperture_start = 0x1000;
+ config.iommu->geometry.aperture_end = 0xffffffff;
+ }
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index fa2be7ce9468..1ce8a01a5a28 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -27,11 +27,8 @@
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
-#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
-
struct mdp5_crtc {
struct drm_crtc base;
- char name[8];
int id;
bool enabled;
@@ -102,7 +99,7 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
+ DBG("%s: flush=%08x", crtc->name, flush_mask);
return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
}
@@ -136,7 +133,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
- struct drm_plane *plane;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -148,16 +144,12 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/
if (!file || (event->base.file_priv == file)) {
mdp5_crtc->event = NULL;
- DBG("%s: send event: %p", mdp5_crtc->name, event);
+ DBG("%s: send event: %p", crtc->name, event);
drm_crtc_send_vblank_event(crtc, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- drm_atomic_crtc_for_each_plane(plane, crtc) {
- mdp5_plane_complete_flip(plane);
- }
-
if (mdp5_crtc->ctl && !crtc->state->enable) {
/* set STAGE_UNUSED for all layers */
mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
@@ -223,12 +215,7 @@ static void blend_setup(struct drm_crtc *crtc)
plane_cnt++;
}
- /*
- * If there is no base layer, enable border color.
- * Although it's not possbile in current blend logic,
- * put it here as a reminder.
- */
- if (!pstates[STAGE_BASE] && plane_cnt) {
+ if (!pstates[STAGE_BASE]) {
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
DBG("Border Color is enabled");
}
@@ -300,7 +287,7 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mdp5_crtc->name, mode->base.id, mode->name,
+ crtc->name, mode->base.id, mode->name,
mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
@@ -320,7 +307,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- DBG("%s", mdp5_crtc->name);
+ DBG("%s", crtc->name);
if (WARN_ON(!mdp5_crtc->enabled))
return;
@@ -339,7 +326,7 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- DBG("%s", mdp5_crtc->name);
+ DBG("%s", crtc->name);
if (WARN_ON(mdp5_crtc->enabled))
return;
@@ -365,31 +352,29 @@ static int pstate_cmp(const void *a, const void *b)
return pa->state->zpos - pb->state->zpos;
}
+/* is there a helper for this? */
+static bool is_fullscreen(struct drm_crtc_state *cstate,
+ struct drm_plane_state *pstate)
+{
+ return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
+ ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
+ ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
+}
+
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate;
- int cnt = 0, i;
+ int cnt = 0, base = 0, i;
- DBG("%s: check", mdp5_crtc->name);
+ DBG("%s: check", crtc->name);
- /* verify that there are not too many planes attached to crtc
- * and that we don't have conflicting mixer stages:
- */
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
- if (cnt >= (hw_cfg->lm.nb_stages)) {
- dev_err(dev->dev, "too many planes!\n");
- return -EINVAL;
- }
-
-
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
@@ -399,10 +384,26 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
/* assign a stage based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+ /* if the bottom-most layer is not fullscreen, we need to use
+ * it for solid-color:
+ */
+ if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
+ base++;
+
+ /* verify that there are not too many planes attached to crtc
+ * and that we don't have conflicting mixer stages:
+ */
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ if ((cnt + base) >= hw_cfg->lm.nb_stages) {
+ dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
+ return -EINVAL;
+ }
+
for (i = 0; i < cnt; i++) {
- pstates[i].state->stage = STAGE_BASE + i;
- DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
- pipe2name(mdp5_plane_pipe(pstates[i].plane)),
+ pstates[i].state->stage = STAGE_BASE + i + base;
+ DBG("%s: assign pipe %s on stage=%d", crtc->name,
+ pstates[i].plane->name,
pstates[i].state->stage);
}
@@ -412,8 +413,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- DBG("%s: begin", mdp5_crtc->name);
+ DBG("%s: begin", crtc->name);
}
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
@@ -423,7 +423,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
+ DBG("%s: event: %p", crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event);
@@ -489,7 +489,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
- uint32_t blendcfg, cursor_addr, stride;
+ uint32_t blendcfg, stride;
+ uint64_t cursor_addr;
int ret, lm;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
@@ -643,7 +644,7 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
- DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
+ DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
}
static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -765,9 +766,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
- snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
- pipe2name(mdp5_plane_pipe(plane)), id);
-
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
NULL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index d53e5510fd7c..3ce8b9dec9c1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -17,6 +17,8 @@
#include <linux/irq.h>
+#include <drm/drm_print.h>
+
#include "msm_drv.h"
#include "mdp5_kms.h"
@@ -30,7 +32,18 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
+ struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev);
+ drm_state_dump(mdp5_kms->dev, &p);
+ if (mdp5_kms->smp)
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+ }
}
void mdp5_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index ed7143d35b25..5f6cd8745dbc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -19,6 +19,7 @@
#include <linux/of_irq.h>
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
@@ -71,10 +72,49 @@ static int mdp5_hw_init(struct msm_kms *kms)
return 0;
}
+struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct msm_kms_state *state = to_kms_state(s);
+ struct mdp5_state *new_state;
+ int ret;
+
+ if (state->state)
+ return state->state;
+
+ ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
+ if (!new_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Copy state: */
+ new_state->hwpipe = mdp5_kms->state->hwpipe;
+ if (mdp5_kms->smp)
+ new_state->smp = mdp5_kms->state->smp;
+
+ state->state = new_state;
+
+ return new_state;
+}
+
+static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ swap(to_kms_state(state)->state, mdp5_kms->state);
+}
+
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
mdp5_enable(mdp5_kms);
+
+ if (mdp5_kms->smp)
+ mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
}
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
@@ -87,6 +127,9 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
for_each_plane_in_state(state, plane, plane_state, i)
mdp5_plane_complete_commit(plane, plane_state);
+ if (mdp5_kms->smp)
+ mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
+
mdp5_disable(mdp5_kms);
}
@@ -117,14 +160,66 @@ static int mdp5_set_split_display(struct msm_kms *kms,
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_mmu *mmu = mdp5_kms->mmu;
+ struct msm_gem_address_space *aspace = mdp5_kms->aspace;
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++)
+ mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
}
}
+#ifdef CONFIG_DEBUG_FS
+static int smp_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!mdp5_kms->smp) {
+ drm_printf(&p, "no SMP pool\n");
+ return 0;
+ }
+
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+
+ return 0;
+}
+
+static struct drm_info_list mdp5_debugfs_list[] = {
+ {"smp", smp_show },
+};
+
+static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mdp5_kms_debugfs_cleanup(struct msm_kms *kms, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list), minor);
+}
+#endif
+
static const struct mdp_kms_funcs kms_funcs = {
.base = {
.hw_init = mdp5_hw_init,
@@ -134,6 +229,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
+ .swap_state = mdp5_swap_state,
.prepare_commit = mdp5_prepare_commit,
.complete_commit = mdp5_complete_commit,
.wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
@@ -141,6 +237,10 @@ static const struct mdp_kms_funcs kms_funcs = {
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
.destroy = mdp5_kms_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = mdp5_kms_debugfs_init,
+ .debugfs_cleanup = mdp5_kms_debugfs_cleanup,
+#endif
},
.set_irqmask = mdp5_set_irqmask,
};
@@ -321,15 +421,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
- static const enum mdp5_pipe crtcs[] = {
- SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
- };
- static const enum mdp5_pipe vig_planes[] = {
- SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
- };
- static const enum mdp5_pipe dma_planes[] = {
- SSPP_DMA0, SSPP_DMA1,
- };
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
const struct mdp5_cfg_hw *hw_cfg;
@@ -337,58 +428,35 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- /* construct CRTCs and their private planes: */
- for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
+ /* Construct planes equaling the number of hw pipes, and CRTCs
+ * for the N layer-mixers (LM). The first N planes become primary
+ * planes for the CRTCs, with the remainder as overlay planes:
+ */
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ bool primary = i < mdp5_cfg->lm.count;
struct drm_plane *plane;
struct drm_crtc *crtc;
- plane = mdp5_plane_init(dev, crtcs[i], true,
- hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps);
+ plane = mdp5_plane_init(dev, primary);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
- pipe2name(crtcs[i]), ret);
+ dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
+ priv->planes[priv->num_planes++] = plane;
+
+ if (!primary)
+ continue;
crtc = mdp5_crtc_init(dev, plane, i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
- dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
- pipe2name(crtcs[i]), ret);
+ dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
}
- /* Construct video planes: */
- for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
- struct drm_plane *plane;
-
- plane = mdp5_plane_init(dev, vig_planes[i], false,
- hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct %s plane: %d\n",
- pipe2name(vig_planes[i]), ret);
- goto fail;
- }
- }
-
- /* DMA planes */
- for (i = 0; i < hw_cfg->pipe_dma.count; i++) {
- struct drm_plane *plane;
-
- plane = mdp5_plane_init(dev, dma_planes[i], false,
- hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct %s plane: %d\n",
- pipe2name(dma_planes[i]), ret);
- goto fail;
- }
- }
-
/* Construct encoders and modeset initialize connector devices
* for each external display interface.
*/
@@ -564,7 +632,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int irq, i, ret;
/* priv->kms would have been populated by the MDP5 driver */
@@ -606,30 +674,29 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
- dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
- iommu_domain_free(config->platform.iommu);
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ config->platform.iommu, "mdp5");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+ mdp5_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
ret);
- mmu->funcs->destroy(mmu);
goto fail;
}
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
- mmu = NULL;
+ aspace = NULL;;
}
- mdp5_kms->mmu = mmu;
- mdp5_kms->id = msm_register_mmu(dev, mmu);
+ mdp5_kms->id = msm_register_address_space(dev, aspace);
if (mdp5_kms->id < 0) {
ret = mdp5_kms->id;
dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
@@ -644,8 +711,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.max_width = config->hw->lm.max_width;
- dev->mode_config.max_height = config->hw->lm.max_height;
+ dev->mode_config.max_width = 0xffff;
+ dev->mode_config.max_height = 0xffff;
dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
@@ -673,6 +740,69 @@ static void mdp5_destroy(struct platform_device *pdev)
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
+
+ kfree(mdp5_kms->state);
+}
+
+static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
+ const enum mdp5_pipe *pipes, const uint32_t *offsets,
+ uint32_t caps)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
+ if (IS_ERR(hwpipe)) {
+ ret = PTR_ERR(hwpipe);
+ dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
+ pipe2name(pipes[i]), ret);
+ return ret;
+ }
+ hwpipe->idx = mdp5_kms->num_hwpipes;
+ mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
+ }
+
+ return 0;
+}
+
+static int hwpipe_init(struct mdp5_kms *mdp5_kms)
+{
+ static const enum mdp5_pipe rgb_planes[] = {
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
+ };
+ static const enum mdp5_pipe vig_planes[] = {
+ SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
+ };
+ static const enum mdp5_pipe dma_planes[] = {
+ SSPP_DMA0, SSPP_DMA1,
+ };
+ const struct mdp5_cfg_hw *hw_cfg;
+ int ret;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /* Construct RGB pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
+ hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
+ if (ret)
+ return ret;
+
+ /* Construct video (VIG) pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
+ hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
+ if (ret)
+ return ret;
+
+ /* Construct DMA pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
+ hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
@@ -696,6 +826,13 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
mdp5_kms->dev = dev;
mdp5_kms->pdev = pdev;
+ drm_modeset_lock_init(&mdp5_kms->state_lock);
+ mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
+ if (!mdp5_kms->state) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
ret = PTR_ERR(mdp5_kms->mmio);
@@ -749,7 +886,7 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
* this section initializes the SMP:
*/
if (mdp5_kms->caps & MDP_CAP_SMP) {
- mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
+ mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
if (IS_ERR(mdp5_kms->smp)) {
ret = PTR_ERR(mdp5_kms->smp);
mdp5_kms->smp = NULL;
@@ -764,6 +901,10 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
goto fail;
}
+ ret = hwpipe_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
/* set uninit-ed kms */
priv->kms = &mdp5_kms->base.base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 03738927be10..17b0cc101171 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -24,8 +24,11 @@
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
#include "mdp5_ctl.h"
+#include "mdp5_pipe.h"
#include "mdp5_smp.h"
+struct mdp5_state;
+
struct mdp5_kms {
struct mdp_kms base;
@@ -33,13 +36,21 @@ struct mdp5_kms {
struct platform_device *pdev;
+ unsigned num_hwpipes;
+ struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
+
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
+ /**
+ * Global atomic state. Do not access directly, use mdp5_get_state()
+ */
+ struct mdp5_state *state;
+ struct drm_modeset_lock state_lock;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
@@ -65,9 +76,27 @@ struct mdp5_kms {
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+/* Global atomic state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ *
+ * For atomic updates which require modifying global state,
+ */
+struct mdp5_state {
+ struct mdp5_hw_pipe_state hwpipe;
+ struct mdp5_smp_state smp;
+};
+
+struct mdp5_state *__must_check
+mdp5_get_state(struct drm_atomic_state *s);
+
+/* Atomic plane state. Subclasses the base drm_plane_state in order to
+ * track assigned hwpipe and hw specific state.
+ */
struct mdp5_plane_state {
struct drm_plane_state base;
+ struct mdp5_hw_pipe *hwpipe;
+
/* aligned with property */
uint8_t premultiplied;
uint8_t zpos;
@@ -76,11 +105,6 @@ struct mdp5_plane_state {
/* assigned by crtc blender */
enum mdp_mixer_stage_id stage;
- /* some additional transactional status to help us know in the
- * apply path whether we need to update SMP allocation, and
- * whether current update is still pending:
- */
- bool mode_changed : 1;
bool pending : 1;
};
#define to_mdp5_plane_state(x) \
@@ -114,6 +138,18 @@ static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
return msm_readl(mdp5_kms->mmio + reg);
}
+static inline const char *stage2name(enum mdp_mixer_stage_id stage)
+{
+ static const char *names[] = {
+#define NAME(n) [n] = #n
+ NAME(STAGE_UNUSED), NAME(STAGE_BASE),
+ NAME(STAGE0), NAME(STAGE1), NAME(STAGE2),
+ NAME(STAGE3), NAME(STAGE4), NAME(STAGE6),
+#undef NAME
+ };
+ return names[stage];
+}
+
static inline const char *pipe2name(enum mdp5_pipe pipe)
{
static const char *names[] = {
@@ -196,13 +232,10 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_flip(struct drm_plane *plane);
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
-struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane,
- uint32_t reg_offset, uint32_t caps);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
new file mode 100644
index 000000000000..1ae9dc8d260d
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
+ struct drm_plane *plane, uint32_t caps, uint32_t blkcfg)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_state *state;
+ struct mdp5_hw_pipe_state *old_state, *new_state;
+ struct mdp5_hw_pipe *hwpipe = NULL;
+ int i;
+
+ state = mdp5_get_state(s);
+ if (IS_ERR(state))
+ return ERR_CAST(state);
+
+ /* grab old_state after mdp5_get_state(), since now we hold lock: */
+ old_state = &mdp5_kms->state->hwpipe;
+ new_state = &state->hwpipe;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
+
+ /* skip if already in-use.. check both new and old state,
+ * since we cannot immediately re-use a pipe that is
+ * released in the current update in some cases:
+ * (1) mdp5 can have SMP (non-double-buffered)
+ * (2) hw pipe previously assigned to different CRTC
+ * (vblanks might not be aligned)
+ */
+ if (new_state->hwpipe_to_plane[cur->idx] ||
+ old_state->hwpipe_to_plane[cur->idx])
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ /* possible candidate, take the one with the
+ * fewest unneeded caps bits set:
+ */
+ if (!hwpipe || (hweight_long(cur->caps & ~caps) <
+ hweight_long(hwpipe->caps & ~caps)))
+ hwpipe = cur;
+ }
+
+ if (!hwpipe)
+ return ERR_PTR(-ENOMEM);
+
+ if (mdp5_kms->smp) {
+ int ret;
+
+ DBG("%s: alloc SMP blocks", hwpipe->name);
+ ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp,
+ hwpipe->pipe, blkcfg);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+
+ hwpipe->blkcfg = blkcfg;
+ }
+
+ DBG("%s: assign to plane %s for caps %x",
+ hwpipe->name, plane->name, caps);
+ new_state->hwpipe_to_plane[hwpipe->idx] = plane;
+
+ return hwpipe;
+}
+
+void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_state *state = mdp5_get_state(s);
+ struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
+
+ if (!hwpipe)
+ return;
+
+ if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
+ return;
+
+ DBG("%s: release from plane %s", hwpipe->name,
+ new_state->hwpipe_to_plane[hwpipe->idx]->name);
+
+ if (mdp5_kms->smp) {
+ DBG("%s: free SMP blocks", hwpipe->name);
+ mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe);
+ }
+
+ new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+}
+
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
+{
+ kfree(hwpipe);
+}
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps)
+{
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL);
+ if (!hwpipe)
+ return ERR_PTR(-ENOMEM);
+
+ hwpipe->name = pipe2name(pipe);
+ hwpipe->pipe = pipe;
+ hwpipe->reg_offset = reg_offset;
+ hwpipe->caps = caps;
+ hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
+
+ spin_lock_init(&hwpipe->pipe_lock);
+
+ return hwpipe;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
new file mode 100644
index 000000000000..611da7a660c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_PIPE_H__
+#define __MDP5_PIPE_H__
+
+#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+
+/* represents a hw pipe, which is dynamically assigned to a plane */
+struct mdp5_hw_pipe {
+ int idx;
+
+ const char *name;
+ enum mdp5_pipe pipe;
+
+ spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
+ uint32_t reg_offset;
+ uint32_t caps;
+
+ uint32_t flush_mask; /* used to commit pipe registers */
+
+ /* number of smp blocks per plane, ie:
+ * nblks_y | (nblks_u << 8) | (nblks_v << 16)
+ */
+ uint32_t blkcfg;
+};
+
+/* global atomic state of assignment between pipes and planes: */
+struct mdp5_hw_pipe_state {
+ struct drm_plane *hwpipe_to_plane[SSPP_MAX];
+};
+
+struct mdp5_hw_pipe *__must_check
+mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg);
+void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps);
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe);
+
+#endif /* __MDP5_PIPE_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 951c002b05df..c099da7bc212 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -16,19 +16,11 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_print.h>
#include "mdp5_kms.h"
struct mdp5_plane {
struct drm_plane base;
- const char *name;
-
- enum mdp5_pipe pipe;
-
- spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
- uint32_t reg_offset;
- uint32_t caps;
-
- uint32_t flush_mask; /* used to commit pipe registers */
uint32_t nformats;
uint32_t formats[32];
@@ -69,21 +61,12 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
static void mdp5_plane_install_rotation_property(struct drm_device *dev,
struct drm_plane *plane)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-
- if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) &&
- !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
- return;
-
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
-
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&plane->base,
- dev->mode_config.rotation_property,
- DRM_ROTATE_0);
+ drm_plane_create_rotation_property(plane,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
}
/* helper to install properties which are common to planes and crtcs */
@@ -184,6 +167,21 @@ done:
#undef SET_PROPERTY
}
+static void
+mdp5_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
+ drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
+ pstate->hwpipe->name : "(null)");
+ drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
+ drm_printf(p, "\tzpos=%u\n", pstate->zpos);
+ drm_printf(p, "\talpha=%u\n", pstate->alpha);
+ drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
+ drm_printf(p, "\tpending=%u\n", pstate->pending);
+}
+
static void mdp5_plane_reset(struct drm_plane *plane)
{
struct mdp5_plane_state *mdp5_state;
@@ -222,7 +220,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
if (mdp5_state && mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
- mdp5_state->mode_changed = false;
mdp5_state->pending = false;
return &mdp5_state->base;
@@ -231,10 +228,12 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
static void mdp5_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
if (state->fb)
drm_framebuffer_unreference(state->fb);
- kfree(to_mdp5_plane_state(state));
+ kfree(pstate);
}
static const struct drm_plane_funcs mdp5_plane_funcs = {
@@ -247,102 +246,121 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
+ .atomic_print_state = mdp5_plane_atomic_print_state,
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct drm_framebuffer *fb = new_state->fb;
if (!new_state->fb)
return 0;
- DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
+ DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
return msm_framebuffer_prepare(fb, mdp5_kms->id);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct drm_framebuffer *fb = old_state->fb;
if (!fb)
return;
- DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
+ DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, mdp5_kms->id);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
struct drm_plane_state *old_state = plane->state;
- const struct mdp_format *format;
- bool vflip, hflip;
+ struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
+ bool new_hwpipe = false;
+ uint32_t max_width, max_height;
+ uint32_t caps = 0;
- DBG("%s: check (%d -> %d)", mdp5_plane->name,
+ DBG("%s: check (%d -> %d)", plane->name,
plane_enabled(old_state), plane_enabled(state));
+ /* We don't allow faster-than-vblank updates.. if we did add this
+ * some day, we would need to disallow in cases where hwpipe
+ * changes
+ */
+ if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
+ return -EBUSY;
+
+ max_width = config->hw->lm.max_width << 16;
+ max_height = config->hw->lm.max_height << 16;
+
+ /* Make sure source dimensions are within bounds. */
+ if ((state->src_w > max_width) || (state->src_h > max_height)) {
+ struct drm_rect src = drm_plane_state_src(state);
+ DBG("Invalid source size "DRM_RECT_FP_FMT,
+ DRM_RECT_FP_ARG(&src));
+ return -ERANGE;
+ }
+
if (plane_enabled(state)) {
+ unsigned int rotation;
+ const struct mdp_format *format;
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ uint32_t blkcfg = 0;
+
format = to_mdp_format(msm_framebuffer_format(state->fb));
- if (MDP_FORMAT_IS_YUV(format) &&
- !pipe_supports_yuv(mdp5_plane->caps)) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support YUV\n");
+ if (MDP_FORMAT_IS_YUV(format))
+ caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
- return -EINVAL;
- }
+ if (((state->src_w >> 16) != state->crtc_w) ||
+ ((state->src_h >> 16) != state->crtc_h))
+ caps |= MDP_PIPE_CAP_SCALE;
- if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
- (((state->src_w >> 16) != state->crtc_w) ||
- ((state->src_h >> 16) != state->crtc_h))) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
- state->src_w >> 16, state->src_h >> 16,
- state->crtc_w, state->crtc_h);
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
- return -EINVAL;
- }
+ if (rotation & DRM_REFLECT_X)
+ caps |= MDP_PIPE_CAP_HFLIP;
- hflip = !!(state->rotation & DRM_REFLECT_X);
- vflip = !!(state->rotation & DRM_REFLECT_Y);
- if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
- (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support flip\n");
+ if (rotation & DRM_REFLECT_Y)
+ caps |= MDP_PIPE_CAP_VFLIP;
- return -EINVAL;
- }
- }
+ /* (re)allocate hw pipe if we don't have one or caps-mismatch: */
+ if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
+ new_hwpipe = true;
- if (plane_enabled(state) && plane_enabled(old_state)) {
- /* we cannot change SMP block configuration during scanout: */
- bool full_modeset = false;
- if (state->fb->pixel_format != old_state->fb->pixel_format) {
- DBG("%s: pixel_format change!", mdp5_plane->name);
- full_modeset = true;
- }
- if (state->src_w != old_state->src_w) {
- DBG("%s: src_w change!", mdp5_plane->name);
- full_modeset = true;
- }
- if (to_mdp5_plane_state(old_state)->pending) {
- DBG("%s: still pending!", mdp5_plane->name);
- full_modeset = true;
+ if (mdp5_kms->smp) {
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(state->fb));
+
+ blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
+ state->src_w >> 16, false);
+
+ if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
+ new_hwpipe = true;
}
- if (full_modeset) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_crtc_state(state->state, state->crtc);
- crtc_state->mode_changed = true;
- to_mdp5_plane_state(state)->mode_changed = true;
+
+ /* (re)assign hwpipe if needed, otherwise keep old one: */
+ if (new_hwpipe) {
+ /* TODO maybe we want to re-assign hwpipe sometimes
+ * in cases when we no-longer need some caps to make
+ * it available for other planes?
+ */
+ struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
+ mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
+ plane, caps, blkcfg);
+ if (IS_ERR(mdp5_state->hwpipe)) {
+ DBG("%s: failed to assign hwpipe!", plane->name);
+ return PTR_ERR(mdp5_state->hwpipe);
+ }
+ mdp5_pipe_release(state->state, old_hwpipe);
}
- } else {
- to_mdp5_plane_state(state)->mode_changed = true;
}
return 0;
@@ -351,16 +369,16 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
static void mdp5_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *state = plane->state;
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
- DBG("%s: update", mdp5_plane->name);
+ DBG("%s: update", plane->name);
- if (!plane_enabled(state)) {
- to_mdp5_plane_state(state)->pending = true;
- } else if (to_mdp5_plane_state(state)->mode_changed) {
+ mdp5_state->pending = true;
+
+ if (plane_enabled(state)) {
int ret;
- to_mdp5_plane_state(state)->pending = true;
+
ret = mdp5_plane_mode_set(plane,
state->crtc, state->fb,
state->crtc_x, state->crtc_y,
@@ -369,11 +387,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
state->src_w, state->src_h);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
- } else {
- unsigned long flags;
- spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
- set_scanout_locked(plane, state->fb);
- spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
}
}
@@ -387,9 +400,9 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
static void set_scanout_locked(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe;
+ enum mdp5_pipe pipe = hwpipe->pipe;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -669,18 +682,19 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *pstate = plane->state;
+ struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ enum mdp5_pipe pipe = hwpipe->pipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,};
- bool pe = mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT;
+ bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
int pe_left[COMP_MAX], pe_right[COMP_MAX];
int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
+ unsigned int rotation;
bool vflip, hflip;
unsigned long flags;
int ret;
@@ -700,27 +714,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_w = src_w >> 16;
src_h = src_h >> 16;
- DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
- /* Request some memory from the SMP: */
- if (mdp5_kms->smp) {
- ret = mdp5_smp_request(mdp5_kms->smp,
- mdp5_plane->pipe, format, src_w, false);
- if (ret)
- return ret;
- }
-
- /*
- * Currently we update the hw for allocations/requests immediately,
- * but once atomic modeset/pageflip is in place, the allocation
- * would move into atomic->check_plane_state(), while updating the
- * hw would remain here:
- */
- if (mdp5_kms->smp)
- mdp5_smp_configure(mdp5_kms->smp, pipe);
-
ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
if (ret)
return ret;
@@ -729,7 +726,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
if (ret)
return ret;
- if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
calc_pixel_ext(format, src_w, crtc_w, phasex_step,
pe_left, pe_right, true);
calc_pixel_ext(format, src_h, crtc_h, phasey_step,
@@ -743,14 +740,18 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
config |= get_scale_config(format, src_h, crtc_h, false);
DBG("scale config = %x", config);
- hflip = !!(pstate->rotation & DRM_REFLECT_X);
- vflip = !!(pstate->rotation & DRM_REFLECT_Y);
+ rotation = drm_rotation_simplify(pstate->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
+ hflip = !!(rotation & DRM_REFLECT_X);
+ vflip = !!(rotation & DRM_REFLECT_Y);
- spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
+ spin_lock_irqsave(&hwpipe->pipe_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
- MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) |
- MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height));
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h)));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
@@ -795,12 +796,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
/* not using secure mode: */
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
- if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT)
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
mdp5_write_pixel_ext(mdp5_kms, pipe, format,
src_w, pe_left, pe_right,
src_h, pe_top, pe_bottom);
- if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
+ if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
phasex_step[COMP_0]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
@@ -815,7 +816,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
}
- if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) {
+ if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
if (MDP_FORMAT_IS_YUV(format))
csc_enable(mdp5_kms, pipe,
mdp_get_default_csc_cfg(CSC_YUV2RGB));
@@ -825,56 +826,42 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
set_scanout_locked(plane, fb);
- spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
+ spin_unlock_irqrestore(&hwpipe->pipe_lock, flags);
return ret;
}
-void mdp5_plane_complete_flip(struct drm_plane *plane)
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
- DBG("%s: complete flip", mdp5_plane->name);
-
- if (mdp5_kms->smp)
- mdp5_smp_commit(mdp5_kms->smp, pipe);
-
- to_mdp5_plane_state(plane->state)->pending = false;
-}
+ if (WARN_ON(!pstate->hwpipe))
+ return 0;
-enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
-{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- return mdp5_plane->pipe;
+ return pstate->hwpipe->pipe;
}
uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
- return mdp5_plane->flush_mask;
+ if (WARN_ON(!pstate->hwpipe))
+ return 0;
+
+ return pstate->hwpipe->flush_mask;
}
/* called after vsync in thread context */
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
- if (!plane_enabled(plane->state) && mdp5_kms->smp) {
- DBG("%s: free SMP", mdp5_plane->name);
- mdp5_smp_release(mdp5_kms->smp, pipe);
- }
+ pstate->pending = false;
}
/* initialize plane */
-struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset,
- uint32_t caps)
+struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
@@ -889,19 +876,10 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
plane = &mdp5_plane->base;
- mdp5_plane->pipe = pipe;
- mdp5_plane->name = pipe2name(pipe);
- mdp5_plane->caps = caps;
-
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
- ARRAY_SIZE(mdp5_plane->formats),
- !pipe_supports_yuv(mdp5_plane->caps));
-
- mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
- mdp5_plane->reg_offset = reg_offset;
- spin_lock_init(&mdp5_plane->pipe_lock);
+ ARRAY_SIZE(mdp5_plane->formats), false);
- type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
type, NULL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 27d7b55b52c9..58f712d37e7f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -21,72 +21,6 @@
#include "mdp5_smp.h"
-/* SMP - Shared Memory Pool
- *
- * These are shared between all the clients, where each plane in a
- * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
- * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
- *
- * Based on the size of the attached scanout buffer, a certain # of
- * blocks must be allocated to that client out of the shared pool.
- *
- * In some hw, some blocks are statically allocated for certain pipes
- * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
- *
- * For each block that can be dynamically allocated, it can be either
- * free:
- * The block is free.
- *
- * pending:
- * The block is allocated to some client and not free.
- *
- * configured:
- * The block is allocated to some client, and assigned to that
- * client in MDP5_SMP_ALLOC registers.
- *
- * inuse:
- * The block is being actively used by a client.
- *
- * The updates happen in the following steps:
- *
- * 1) mdp5_smp_request():
- * When plane scanout is setup, calculate required number of
- * blocks needed per client, and request. Blocks neither inuse nor
- * configured nor pending by any other client are added to client's
- * pending set.
- * For shrinking, blocks in pending but not in configured can be freed
- * directly, but those already in configured will be freed later by
- * mdp5_smp_commit.
- *
- * 2) mdp5_smp_configure():
- * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
- * are configured for the union(pending, inuse)
- * Current pending is copied to configured.
- * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
- * concurrently for the same pipe.
- *
- * 3) mdp5_smp_commit():
- * After next vblank, copy configured -> inuse. Optionally update
- * MDP5_SMP_ALLOC registers if there are newly unused blocks
- *
- * 4) mdp5_smp_release():
- * Must be called after the pipe is disabled and no longer uses any SMB
- *
- * On the next vblank after changes have been committed to hw, the
- * client's pending blocks become it's in-use blocks (and no-longer
- * in-use blocks become available to other clients).
- *
- * btw, hurray for confusing overloaded acronyms! :-/
- *
- * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
- * should happen at (or before)? atomic->check(). And we'd need
- * an API to discard previous requests if update is aborted or
- * (test-only).
- *
- * TODO would perhaps be nice to have debugfs to dump out kernel
- * inuse and pending state of all clients..
- */
-
struct mdp5_smp {
struct drm_device *dev;
@@ -94,16 +28,8 @@ struct mdp5_smp {
int blk_cnt;
int blk_size;
-
- spinlock_t state_lock;
- mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
-
- struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
-static void update_smp_state(struct mdp5_smp *smp,
- u32 cid, mdp5_smp_state_t *assigned);
-
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
@@ -134,57 +60,38 @@ static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
return mdp5_cfg->smp.clients[pipe] + plane;
}
-/* step #1: update # of blocks pending for the client: */
+/* allocate blocks for the specified request: */
static int smp_request_block(struct mdp5_smp *smp,
+ struct mdp5_smp_state *state,
u32 cid, int nblks)
{
- struct mdp5_kms *mdp5_kms = get_kms(smp);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
- int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
+ void *cs = state->client_state[cid];
+ int i, avail, cnt = smp->blk_cnt;
uint8_t reserved;
- unsigned long flags;
- reserved = smp->reserved[cid];
+ /* we shouldn't be requesting blocks for an in-use client: */
+ WARN_ON(bitmap_weight(cs, cnt) > 0);
- spin_lock_irqsave(&smp->state_lock, flags);
+ reserved = smp->reserved[cid];
if (reserved) {
nblks = max(0, nblks - reserved);
DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
}
- avail = cnt - bitmap_weight(smp->state, cnt);
+ avail = cnt - bitmap_weight(state->state, cnt);
if (nblks > avail) {
- dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
- ret = -ENOSPC;
- goto fail;
+ return -ENOSPC;
}
- cur_nblks = bitmap_weight(ps->pending, cnt);
- if (nblks > cur_nblks) {
- /* grow the existing pending reservation: */
- for (i = cur_nblks; i < nblks; i++) {
- int blk = find_first_zero_bit(smp->state, cnt);
- set_bit(blk, ps->pending);
- set_bit(blk, smp->state);
- }
- } else {
- /* shrink the existing pending reservation: */
- for (i = cur_nblks; i > nblks; i--) {
- int blk = find_first_bit(ps->pending, cnt);
- clear_bit(blk, ps->pending);
-
- /* clear in global smp_state if not in configured
- * otherwise until _commit()
- */
- if (!test_bit(blk, ps->configured))
- clear_bit(blk, smp->state);
- }
+ for (i = 0; i < nblks; i++) {
+ int blk = find_first_zero_bit(state->state, cnt);
+ set_bit(blk, cs);
+ set_bit(blk, state->state);
}
-fail:
- spin_unlock_irqrestore(&smp->state_lock, flags);
return 0;
}
@@ -209,14 +116,15 @@ static void set_fifo_thresholds(struct mdp5_smp *smp,
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
- const struct mdp_format *format, u32 width, bool hdecim)
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
- struct drm_device *dev = mdp5_kms->dev;
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
- int i, hsub, nplanes, nlines, nblks, ret;
+ int i, hsub, nplanes, nlines;
u32 fmt = format->base.pixel_format;
+ uint32_t blkcfg = 0;
nplanes = drm_format_num_planes(fmt);
hsub = drm_format_horz_chroma_subsampling(fmt);
@@ -239,7 +147,7 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
hsub = 1;
}
- for (i = 0, nblks = 0; i < nplanes; i++) {
+ for (i = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
cpp = drm_format_plane_cpp(fmt, i);
@@ -251,60 +159,72 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
if (rev == 0)
n = roundup_pow_of_two(n);
+ blkcfg |= (n << (8 * i));
+ }
+
+ return blkcfg;
+}
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ int n = blkcfg & 0xff;
+
+ if (!n)
+ continue;
+
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
- ret = smp_request_block(smp, pipe2client(pipe, i), n);
+ ret = smp_request_block(smp, state, cid, n);
if (ret) {
dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
- nblks += n;
+ blkcfg >>= 8;
}
- set_fifo_thresholds(smp, pipe, nblks);
+ state->assigned |= (1 << pipe);
return 0;
}
/* Release SMP blocks for all clients of the pipe */
-void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe)
{
int i;
- unsigned long flags;
int cnt = smp->blk_cnt;
for (i = 0; i < pipe2nclients(pipe); i++) {
- mdp5_smp_state_t assigned;
u32 cid = pipe2client(pipe, i);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
-
- spin_lock_irqsave(&smp->state_lock, flags);
-
- /* clear hw assignment */
- bitmap_or(assigned, ps->inuse, ps->configured, cnt);
- update_smp_state(smp, CID_UNUSED, &assigned);
-
- /* free to global pool */
- bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
- bitmap_andnot(smp->state, smp->state, assigned, cnt);
+ void *cs = state->client_state[cid];
- /* clear client's infor */
- bitmap_zero(ps->pending, cnt);
- bitmap_zero(ps->configured, cnt);
- bitmap_zero(ps->inuse, cnt);
+ /* update global state: */
+ bitmap_andnot(state->state, state->state, cs, cnt);
- spin_unlock_irqrestore(&smp->state_lock, flags);
+ /* clear client's state */
+ bitmap_zero(cs, cnt);
}
- set_fifo_thresholds(smp, pipe, 0);
+ state->released |= (1 << pipe);
}
-static void update_smp_state(struct mdp5_smp *smp,
+/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
+ * happen after scanout completes.
+ */
+static unsigned update_smp_state(struct mdp5_smp *smp,
u32 cid, mdp5_smp_state_t *assigned)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
+ unsigned nblks = 0;
u32 blk, val;
for_each_set_bit(blk, *assigned, cnt) {
@@ -330,62 +250,88 @@ static void update_smp_state(struct mdp5_smp *smp,
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+
+ nblks++;
}
+
+ return nblks;
}
-/* step #2: configure hw for union(pending, inuse): */
-void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
- int cnt = smp->blk_cnt;
- mdp5_smp_state_t assigned;
- int i;
+ enum mdp5_pipe pipe;
- for (i = 0; i < pipe2nclients(pipe); i++) {
- u32 cid = pipe2client(pipe, i);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+ for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
+ unsigned i, nblks = 0;
- /*
- * if vblank has not happened since last smp_configure
- * skip the configure for now
- */
- if (!bitmap_equal(ps->inuse, ps->configured, cnt))
- continue;
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ void *cs = state->client_state[cid];
- bitmap_copy(ps->configured, ps->pending, cnt);
- bitmap_or(assigned, ps->inuse, ps->configured, cnt);
- update_smp_state(smp, cid, &assigned);
+ nblks += update_smp_state(smp, cid, cs);
+
+ DBG("assign %s:%u, %u blks",
+ pipe2name(pipe), i, nblks);
+ }
+
+ set_fifo_thresholds(smp, pipe, nblks);
}
+
+ state->assigned = 0;
}
-/* step #3: after vblank, copy configured -> inuse: */
-void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
- int cnt = smp->blk_cnt;
- mdp5_smp_state_t released;
- int i;
-
- for (i = 0; i < pipe2nclients(pipe); i++) {
- u32 cid = pipe2client(pipe, i);
- struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+ enum mdp5_pipe pipe;
- /*
- * Figure out if there are any blocks we where previously
- * using, which can be released and made available to other
- * clients:
- */
- if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
- unsigned long flags;
+ for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
+ DBG("release %s", pipe2name(pipe));
+ set_fifo_thresholds(smp, pipe, 0);
+ }
- spin_lock_irqsave(&smp->state_lock, flags);
- /* clear released blocks: */
- bitmap_andnot(smp->state, smp->state, released, cnt);
- spin_unlock_irqrestore(&smp->state_lock, flags);
+ state->released = 0;
+}
- update_smp_state(smp, CID_UNUSED, &released);
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct mdp5_hw_pipe_state *hwpstate;
+ struct mdp5_smp_state *state;
+ int total = 0, i, j;
+
+ drm_printf(p, "name\tinuse\tplane\n");
+ drm_printf(p, "----\t-----\t-----\n");
+
+ if (drm_can_sleep())
+ drm_modeset_lock(&mdp5_kms->state_lock, NULL);
+
+ /* grab these *after* we hold the state_lock */
+ hwpstate = &mdp5_kms->state->hwpipe;
+ state = &mdp5_kms->state->smp;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ for (j = 0; j < pipe2nclients(pipe); j++) {
+ u32 cid = pipe2client(pipe, j);
+ void *cs = state->client_state[cid];
+ int inuse = bitmap_weight(cs, smp->blk_cnt);
+
+ drm_printf(p, "%s:%d\t%d\t%s\n",
+ pipe2name(pipe), j, inuse,
+ plane ? plane->name : NULL);
+
+ total += inuse;
}
-
- bitmap_copy(ps->inuse, ps->configured, cnt);
}
+
+ drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
+ drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
+ bitmap_weight(state->state, smp->blk_cnt));
+
+ if (drm_can_sleep())
+ drm_modeset_unlock(&mdp5_kms->state_lock);
}
void mdp5_smp_destroy(struct mdp5_smp *smp)
@@ -393,8 +339,9 @@ void mdp5_smp_destroy(struct mdp5_smp *smp)
kfree(smp);
}
-struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
{
+ struct mdp5_smp_state *state = &mdp5_kms->state->smp;
struct mdp5_smp *smp = NULL;
int ret;
@@ -404,14 +351,13 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo
goto fail;
}
- smp->dev = dev;
+ smp->dev = mdp5_kms->dev;
smp->blk_cnt = cfg->mmb_count;
smp->blk_size = cfg->mmb_size;
/* statically tied MMBs cannot be re-allocated: */
- bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
+ bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
- spin_lock_init(&smp->state_lock);
return smp;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 20b87e800ea3..b41d0448fbe8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -19,12 +19,53 @@
#ifndef __MDP5_SMP_H__
#define __MDP5_SMP_H__
+#include <drm/drm_print.h>
+
#include "msm_drv.h"
-struct mdp5_client_smp_state {
- mdp5_smp_state_t inuse;
- mdp5_smp_state_t configured;
- mdp5_smp_state_t pending;
+/*
+ * SMP - Shared Memory Pool:
+ *
+ * SMP blocks are shared between all the clients, where each plane in
+ * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * In some hw, some blocks are statically allocated for certain pipes
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
+ *
+ *
+ * Atomic SMP State:
+ *
+ * On atomic updates that modify SMP configuration, the state is cloned
+ * (copied) and modified. For test-only, or in cases where atomic
+ * update fails (or if we hit ww_mutex deadlock/backoff condition) the
+ * new state is simply thrown away.
+ *
+ * Because the SMP registers are not double buffered, updates are a
+ * two step process:
+ *
+ * 1) in _prepare_commit() we configure things (via read-modify-write)
+ * for the newly assigned pipes, so we don't take away blocks
+ * assigned to pipes that are still scanning out
+ * 2) in _complete_commit(), after vblank/etc, we clear things for the
+ * released clients, since at that point old pipes are no longer
+ * scanning out.
+ */
+struct mdp5_smp_state {
+ /* global state of what blocks are in use: */
+ mdp5_smp_state_t state;
+
+ /* per client state of what blocks they are using: */
+ mdp5_smp_state_t client_state[MAX_CLIENTS];
+
+ /* assigned pipes (hw updated at _prepare_commit()): */
+ unsigned long assigned;
+
+ /* released pipes (hw updated at _complete_commit()): */
+ unsigned long released;
};
struct mdp5_kms;
@@ -36,13 +77,22 @@ struct mdp5_smp;
* which is then used to call the other mdp5_smp_*(handler, ...) functions.
*/
-struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms,
+ const struct mdp5_smp_block *cfg);
void mdp5_smp_destroy(struct mdp5_smp *smp);
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
- const struct mdp_format *format, u32 width, bool hdecim);
-void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
-void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
-void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
+
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim);
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg);
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe);
+
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 452e3518f98b..8994c365e218 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 73bae382eac3..30b5d23e53b4 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -141,7 +141,7 @@ static void complete_commit(struct msm_commit *c, bool async)
kms->funcs->complete_commit(kms, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
commit_destroy(c);
}
@@ -217,8 +217,9 @@ int msm_atomic_commit(struct drm_device *dev,
if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv);
- plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ drm_atomic_set_fence_for_plane(plane_state, fence);
}
}
@@ -240,6 +241,10 @@ int msm_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(state, true);
+ /* swap driver private state while still holding state_lock */
+ if (to_kms_state(state)->state)
+ priv->kms->funcs->swap_state(priv->kms, state);
+
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions: It must be guaranteed
@@ -256,6 +261,7 @@ int msm_atomic_commit(struct drm_device *dev,
* current layout.
*/
+ drm_atomic_state_get(state);
if (nonblock) {
queue_work(priv->atomic_wq, &c->work);
return 0;
@@ -269,3 +275,30 @@ error:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
}
+
+struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
+{
+ struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+ kfree(state);
+ return NULL;
+ }
+
+ return &state->base;
+}
+
+void msm_atomic_state_clear(struct drm_atomic_state *s)
+{
+ struct msm_kms_state *state = to_kms_state(s);
+ drm_atomic_state_default_clear(&state->base);
+ kfree(state->state);
+ state->state = NULL;
+}
+
+void msm_atomic_state_free(struct drm_atomic_state *state)
+{
+ kfree(to_kms_state(state)->state);
+ drm_atomic_state_default_release(state);
+ kfree(state);
+}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 663f2b6ef091..c1b40f5adb60 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -18,6 +18,8 @@
#ifdef CONFIG_DEBUG_FS
#include "msm_drv.h"
#include "msm_gpu.h"
+#include "msm_kms.h"
+#include "msm_debugfs.h"
static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
{
@@ -141,6 +143,7 @@ int msm_debugfs_late_init(struct drm_device *dev)
int msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
int ret;
ret = drm_debugfs_create_files(msm_debugfs_list,
@@ -152,15 +155,25 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
- return 0;
+ if (priv->kms->funcs->debugfs_init)
+ ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+ return ret;
}
void msm_debugfs_cleanup(struct drm_minor *minor)
{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
drm_debugfs_remove_files(msm_debugfs_list,
ARRAY_SIZE(msm_debugfs_list), minor);
- if (!minor->dev->dev_private)
+ if (!priv)
return;
+
+ if (priv->kms->funcs->debugfs_cleanup)
+ priv->kms->funcs->debugfs_cleanup(priv->kms, minor);
+
msm_rd_debugfs_cleanup(minor);
msm_perf_debugfs_cleanup(minor);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index fb5c0b0a7594..e29bb66f55b1 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_of.h>
+
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
@@ -44,17 +46,21 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.output_poll_changed = msm_fb_output_poll_changed,
.atomic_check = msm_atomic_check,
.atomic_commit = msm_atomic_commit,
+ .atomic_state_alloc = msm_atomic_state_alloc,
+ .atomic_state_clear = msm_atomic_state_clear,
+ .atomic_state_free = msm_atomic_state_free,
};
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
+int msm_register_address_space(struct drm_device *dev,
+ struct msm_gem_address_space *aspace)
{
struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_mmus++;
+ int idx = priv->num_aspaces++;
- if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
+ if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
return -EINVAL;
- priv->mmus[idx] = mmu;
+ priv->aspace[idx] = aspace;
return idx;
}
@@ -77,6 +83,10 @@ static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
+bool dumpstate = false;
+MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
+module_param(dumpstate, bool, 0600);
+
/*
* Util/helpers:
*/
@@ -228,7 +238,7 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->atomic_wq);
destroy_workqueue(priv->atomic_wq);
- if (kms)
+ if (kms && kms->funcs)
kms->funcs->destroy(kms);
if (gpu) {
@@ -766,9 +776,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -903,10 +911,8 @@ static int add_components_mdp(struct device *mdp_dev,
* remote-endpoint isn't a component that we need to add
*/
if (of_device_is_compatible(np, "qcom,mdp4") &&
- ep.port == 0) {
- of_node_put(ep_node);
+ ep.port == 0)
continue;
- }
/*
* It's okay if some of the ports don't have a remote endpoint
@@ -914,15 +920,12 @@ static int add_components_mdp(struct device *mdp_dev,
* any external interface.
*/
intf = of_graph_get_remote_port_parent(ep_node);
- if (!intf) {
- of_node_put(ep_node);
+ if (!intf)
continue;
- }
-
- component_match_add(master_dev, matchptr, compare_of, intf);
+ drm_of_component_match_add(master_dev, matchptr, compare_of,
+ intf);
of_node_put(intf);
- of_node_put(ep_node);
}
return 0;
@@ -962,8 +965,8 @@ static int add_display_components(struct device *dev,
put_device(mdp_dev);
/* add the MDP component itself */
- component_match_add(dev, matchptr, compare_of,
- mdp_dev->of_node);
+ drm_of_component_match_add(dev, matchptr, compare_of,
+ mdp_dev->of_node);
} else {
/* MDP4 */
mdp_dev = dev;
@@ -996,7 +999,7 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- component_match_add(dev, matchptr, compare_of, np);
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
@@ -1035,7 +1038,13 @@ static int msm_pdev_probe(struct platform_device *pdev)
if (ret)
return ret;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ /* on all devices that I am aware of, iommu's which can map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+ return ret;
+
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d0da52f2a806..ed4dad3ca133 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -52,6 +52,8 @@ struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
struct msm_fence_cb;
+struct msm_gem_address_space;
+struct msm_gem_vma;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
@@ -121,12 +123,16 @@ struct msm_drm_private {
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
- /* registered MMUs: */
- unsigned int num_mmus;
- struct msm_mmu *mmus[NUM_DOMAINS];
+ /* Registered address spaces.. currently this is fixed per # of
+ * iommu's. Ie. one for display block and one for gpu block.
+ * Eventually, to do per-process gpu pagetables, we'll want one
+ * of these per-process.
+ */
+ unsigned int num_aspaces;
+ struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
- struct drm_plane *planes[8];
+ struct drm_plane *planes[16];
unsigned int num_crtcs;
struct drm_crtc *crtcs[8];
@@ -173,8 +179,22 @@ int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
+struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
+void msm_atomic_state_clear(struct drm_atomic_state *state);
+void msm_atomic_state_free(struct drm_atomic_state *state);
+
+int msm_register_address_space(struct drm_device *dev,
+ struct msm_gem_address_space *aspace);
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name);
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
@@ -189,9 +209,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+ uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj, int id);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
@@ -217,7 +237,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence);
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
@@ -303,8 +323,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
-#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
-#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
static inline int align_pitch(int width, int bpp)
{
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 95cf8fe72ee5..9acf544e7a8f 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -88,11 +88,11 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
- uint32_t iova;
+ uint64_t iova;
for (i = 0; i < n; i++) {
ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
- DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+ DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ffd4a338ca12..bffe93498512 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -39,6 +39,7 @@ struct msm_fbdev {
static struct fb_ops msm_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
@@ -49,12 +50,6 @@ static struct fb_ops msm_fb_ops = {
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_mmap = msm_fbdev_mmap,
-
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
@@ -81,7 +76,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- uint32_t paddr;
+ uint64_t paddr;
int ret, size;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a9b9b1c95a2e..3f299c537b77 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -15,7 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "msm_drv.h"
#include "msm_fence.h"
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
fctx->dev = dev;
fctx->name = name;
- fctx->context = fence_context_alloc(1);
+ fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
struct msm_fence {
struct msm_fence_context *fctx;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct msm_fence *to_msm_fence(struct fence *fence)
+static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
{
return container_of(fence, struct msm_fence, base);
}
-static const char *msm_fence_get_driver_name(struct fence *fence)
+static const char *msm_fence_get_driver_name(struct dma_fence *fence)
{
return "msm";
}
-static const char *msm_fence_get_timeline_name(struct fence *fence)
+static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return f->fctx->name;
}
-static bool msm_fence_enable_signaling(struct fence *fence)
+static bool msm_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool msm_fence_signaled(struct fence *fence)
+static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return fence_completed(f->fctx, f->base.seqno);
}
-static void msm_fence_release(struct fence *fence)
+static void msm_fence_release(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops msm_fence_ops = {
+static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
.enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = msm_fence_release,
};
-struct fence *
+struct dma_fence *
msm_fence_alloc(struct msm_fence_context *fctx)
{
struct msm_fence *f;
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx)
f->fctx = fctx;
- fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
- fctx->context, ++fctx->last_fence);
+ dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+ fctx->context, ++fctx->last_fence);
return &f->base;
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index ceb5b3d314b4..56061aa1959d 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
-struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
+struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e31929..d8bc59c7e261 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -225,16 +225,14 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = page_to_pfn(pages[pgoff]);
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -296,12 +294,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
- msm_obj->domain[id].iova = 0;
- }
+ msm_gem_unmap_vma(priv->aspace[id],
+ &msm_obj->domain[id], msm_obj->sgt);
}
}
@@ -313,7 +307,7 @@ put_iova(struct drm_gem_object *obj)
* the refcnt counter needs to be atomic_t.
*/
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+ uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
@@ -326,16 +320,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
- struct msm_mmu *mmu = priv->mmus[id];
- uint32_t offset;
-
- if (WARN_ON(!mmu))
- return -EINVAL;
-
- offset = (uint32_t)mmap_offset(obj);
- ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
- obj->size, IOMMU_READ | IOMMU_WRITE);
- msm_obj->domain[id].iova = offset;
+ ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
+ msm_obj->sgt, obj->size >> PAGE_SHIFT);
} else {
msm_obj->domain[id].iova = physaddr(obj);
}
@@ -348,7 +334,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret;
@@ -370,7 +356,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_obj->domain[id].iova);
@@ -521,7 +507,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -540,7 +526,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = reservation_object_get_excl(msm_obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -553,7 +539,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(msm_obj->resv));
if (fence->context != fctx->context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -563,7 +549,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence)
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
@@ -616,10 +602,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
}
#ifdef CONFIG_DEBUG_FS
-static void describe_fence(struct fence *fence, const char *type,
+static void describe_fence(struct dma_fence *fence, const char *type,
struct seq_file *m)
{
- if (!fence_is_signaled(fence))
+ if (!dma_fence_is_signaled(fence))
seq_printf(m, "\t%9s: %s %s seq %u\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
@@ -631,9 +617,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct dma_fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
+ unsigned id;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -650,10 +638,15 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size, madv);
+ off, msm_obj->vaddr);
+
+ for (id = 0; id < priv->num_aspaces; id++)
+ seq_printf(m, " %08llx", msm_obj->domain[id].iova);
+
+ seq_printf(m, " %zu%s\n", obj->size, madv);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
@@ -761,7 +754,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
- unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -783,16 +775,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
- sz = sizeof(*msm_obj);
- if (use_vram)
- sz += sizeof(struct drm_mm_node);
-
- msm_obj = kzalloc(sz, GFP_KERNEL);
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
if (use_vram)
- msm_obj->vram_node = (void *)&msm_obj[1];
+ msm_obj->vram_node = &msm_obj->domain[0].node;
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index b2f13cfe945e..7d529516b332 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -24,6 +24,20 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+struct msm_gem_address_space {
+ const char *name;
+ /* NOTE: mm managed at the page level, size is in # of pages
+ * and position mm_node->start is in # of pages:
+ */
+ struct drm_mm mm;
+ struct msm_mmu *mmu;
+};
+
+struct msm_gem_vma {
+ struct drm_mm_node node;
+ uint64_t iova;
+};
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -61,10 +75,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct {
- // XXX
- uint32_t iova;
- } domain[NUM_DOMAINS];
+ struct msm_gem_vma domain[NUM_DOMAINS];
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
@@ -104,7 +115,7 @@ struct msm_gem_submit {
struct list_head node; /* node in gpu submit_list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
- struct fence *fence;
+ struct dma_fence *fence;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
unsigned int nr_cmds;
@@ -112,13 +123,13 @@ struct msm_gem_submit {
struct {
uint32_t type;
uint32_t size; /* in dwords */
- uint32_t iova;
+ uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
} *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
- uint32_t iova;
+ uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 283d2841ba58..ab1dd020eb04 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -18,33 +18,24 @@
#include "msm_drv.h"
#include "msm_gem.h"
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
- if (!mutex_is_locked(mutex))
+ switch (mutex_trylock_recursive(&dev->struct_mutex)) {
+ case MUTEX_TRYLOCK_FAILED:
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
-static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
+ case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
- } else {
- *unlock = true;
+ return true;
}
- return true;
+ BUG();
}
-
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -163,6 +154,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
- unregister_shrinker(&priv->shrinker);
+
+ if (priv->shrinker.nr_deferred) {
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+ unregister_shrinker(&priv->shrinker);
+ }
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37a65f3..166e84e4f0d4 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
void msm_gem_submit_free(struct msm_gem_submit *submit)
{
- fence_put(submit->fence);
+ dma_fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
kfree(submit);
@@ -241,7 +241,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
+ uint64_t iova;
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
@@ -266,7 +266,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+ struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -312,7 +312,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
- uint32_t iova, off;
+ uint32_t off;
+ uint64_t iova;
bool valid;
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
@@ -380,7 +381,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
- struct fence *in_fence = NULL;
+ struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
int out_fence_fd = -1;
unsigned i;
@@ -439,7 +440,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if (in_fence->context != gpu->fctx->context) {
- ret = fence_wait(in_fence, true);
+ ret = dma_fence_wait(in_fence, true);
if (ret)
goto out;
}
@@ -461,7 +462,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
void __user *userptr =
u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
- uint32_t iova;
+ uint64_t iova;
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
if (ret) {
@@ -542,7 +543,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (in_fence)
- fence_put(in_fence);
+ dma_fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644
index 000000000000..a311d26ccb21
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+void
+msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt)
+{
+ if (!vma->iova)
+ return;
+
+ if (aspace->mmu) {
+ unsigned size = vma->node.size << PAGE_SHIFT;
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
+ }
+
+ drm_mm_remove_node(&vma->node);
+
+ vma->iova = 0;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+{
+ int ret;
+
+ if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+ return 0;
+
+ ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
+ 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ return ret;
+
+ vma->iova = vma->node.start << PAGE_SHIFT;
+
+ if (aspace->mmu) {
+ unsigned size = npages << PAGE_SHIFT;
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, IOMMU_READ | IOMMU_WRITE);
+ }
+
+ return ret;
+}
+
+void
+msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
+{
+ drm_mm_takedown(&aspace->mm);
+ if (aspace->mmu)
+ aspace->mmu->funcs->destroy(aspace->mmu);
+ kfree(aspace);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name)
+{
+ struct msm_gem_address_space *aspace;
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ aspace->name = name;
+ aspace->mmu = msm_iommu_new(dev, domain);
+
+ drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
+ (domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
+
+ return aspace;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5bb09838b5ae..b28527a65d09 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -91,21 +91,20 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
- clk_prepare(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
+ if (gpu->grp_clks[0] && gpu->fast_rate)
+ clk_set_rate(gpu->grp_clks[0], gpu->fast_rate);
- if (rate_clk && gpu->fast_rate)
- clk_set_rate(rate_clk, gpu->fast_rate);
+ /* Set the RBBM timer rate to 19.2Mhz */
+ if (gpu->grp_clks[2])
+ clk_set_rate(gpu->grp_clks[2], 19200000);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
+ clk_prepare(gpu->grp_clks[i]);
+
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
@@ -114,24 +113,22 @@ static int enable_clk(struct msm_gpu *gpu)
static int disable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
-
- if (rate_clk && gpu->slow_rate)
- clk_set_rate(rate_clk, gpu->slow_rate);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
+ if (gpu->grp_clks[0] && gpu->slow_rate)
+ clk_set_rate(gpu->grp_clks[0], gpu->slow_rate);
+
+ if (gpu->grp_clks[2])
+ clk_set_rate(gpu->grp_clks[2], 0);
+
return 0;
}
@@ -476,7 +473,7 @@ static void retire_submits(struct msm_gpu *gpu)
submit = list_first_entry(&gpu->submit_list,
struct msm_gem_submit, node);
- if (fence_is_signaled(submit->fence)) {
+ if (dma_fence_is_signaled(submit->fence)) {
retire_submit(gpu, submit);
} else {
break;
@@ -528,7 +525,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
+ uint64_t iova;
/* can't happen yet.. but when we add 2d support we'll have
* to deal w/ cross-ring synchronization:
@@ -563,8 +560,8 @@ static irqreturn_t irq_handler(int irq, void *data)
}
static const char *clk_names[] = {
- "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
- "alt_mem_iface_clk",
+ "core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk",
+ "mem_iface_clk", "alt_mem_iface_clk",
};
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -656,12 +653,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
*/
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
+ /* TODO 32b vs 64b address space.. */
+ iommu->geometry.aperture_start = SZ_16M;
+ iommu->geometry.aperture_end = 0xffffffff;
+
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(gpu->mmu)) {
- ret = PTR_ERR(gpu->mmu);
+ gpu->aspace = msm_gem_address_space_create(&pdev->dev,
+ iommu, "gpu");
+ if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->mmu = NULL;
+ gpu->aspace = NULL;
iommu_domain_free(iommu);
goto fail;
}
@@ -669,7 +671,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_mmu(drm, gpu->mmu);
+ gpu->id = msm_register_address_space(drm, gpu->aspace);
/* Create ringbuffer: */
@@ -705,8 +707,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->mmu)
- gpu->mmu->funcs->destroy(gpu->mmu);
+ if (gpu->aspace)
+ msm_gem_address_space_destroy(gpu->aspace);
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index d61d98a6e047..c4c39d3272c7 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -50,7 +50,7 @@ struct msm_gpu_funcs {
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void (*flush)(struct msm_gpu *gpu);
- void (*idle)(struct msm_gpu *gpu);
+ bool (*idle)(struct msm_gpu *gpu);
irqreturn_t (*irq)(struct msm_gpu *irq);
uint32_t (*last_fence)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
@@ -80,7 +80,7 @@ struct msm_gpu {
/* ringbuffer: */
struct msm_ringbuffer *rb;
- uint32_t rb_iova;
+ uint64_t rb_iova;
/* list of GEM active objects: */
struct list_head active_list;
@@ -98,7 +98,7 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int id;
/* Power Control: */
@@ -154,6 +154,45 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
return msm_readl(gpu->mmio + (reg << 2));
}
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+ uint32_t val = gpu_read(gpu, reg);
+
+ val &= ~mask;
+ gpu_write(gpu, reg, val | or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ /*
+ * Why not a readq here? Two reasons: 1) many of the LO registers are
+ * not quad word aligned and 2) the GPU hardware designers have a bit
+ * of a history of putting registers where they fit, especially in
+ * spins. The longer a GPU family goes the higher the chance that
+ * we'll get burned. We could do a series of validity checks if we
+ * wanted to, but really is a readq() that much better? Nah.
+ */
+
+ /*
+ * For some lo/hi registers (like perfcounters), the hi value is latched
+ * when the lo is read, so make sure to read the lo first to trigger
+ * that
+ */
+ val = (u64) msm_readl(gpu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+{
+ /* Why not a writeq here? Read the screed above */
+ msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+ msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+}
+
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 3a294d0da3a0..61aaaa1de6eb 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -45,13 +45,13 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
iommu_detach_device(iommu->domain, mmu->dev);
}
-static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ unsigned long da = iova;
unsigned int i, j;
int ret;
@@ -62,7 +62,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
dma_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
+ VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -84,13 +84,13 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ unsigned long da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
+ VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 40e41e5cdbc6..e470f4cf8f76 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -40,6 +40,8 @@ struct msm_kms_funcs {
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ /* swap global atomic state: */
+ void (*swap_state)(struct msm_kms *kms, struct drm_atomic_state *state);
/* modeset, bracketing atomic_commit(): */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
@@ -56,6 +58,11 @@ struct msm_kms_funcs {
bool is_cmd_mode);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
+#ifdef CONFIG_DEBUG_FS
+ /* debugfs: */
+ int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
+ void (*debugfs_cleanup)(struct msm_kms *kms, struct drm_minor *minor);
+#endif
};
struct msm_kms {
@@ -65,6 +72,18 @@ struct msm_kms {
int irq;
};
+/**
+ * Subclass of drm_atomic_state, to allow kms backend to have driver
+ * private global state. The kms backend can do whatever it wants
+ * with the ->state ptr. On ->atomic_state_clear() the ->state ptr
+ * is kfree'd and set back to NULL.
+ */
+struct msm_kms_state {
+ struct drm_atomic_state base;
+ void *state;
+};
+#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
+
static inline void msm_kms_init(struct msm_kms *kms,
const struct msm_kms_funcs *funcs)
{
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index b8ca9a0e9170..f85c879e68d2 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -23,9 +23,9 @@
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
- int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len);
void (*destroy)(struct msm_mmu *mmu);
};
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 8487f461f05f..6607456dc626 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -289,7 +289,7 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
- uint32_t iova, uint32_t size)
+ uint64_t iova, uint32_t size)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf;
@@ -306,7 +306,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
}
rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, size }, 8);
+ (uint32_t[3]){ iova, size, iova >> 32 }, 12);
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr_locked(&obj->base);
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
new file mode 100644
index 000000000000..e9a8d90e6723
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -0,0 +1,19 @@
+config DRM_MXS
+ bool
+ help
+ Choose this option to select drivers for MXS FB devices
+
+config DRM_MXSFB
+ tristate "i.MX23/i.MX28/i.MX6SX MXSFB LCD controller"
+ depends on DRM && OF
+ depends on COMMON_CLK
+ select DRM_MXS
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_PANEL
+ help
+ Choose this option if you have an i.MX23/i.MX28/i.MX6SX MXSFB
+ LCD controller.
+
+ If M is selected the module will be called mxsfb.
diff --git a/drivers/gpu/drm/mxsfb/Makefile b/drivers/gpu/drm/mxsfb/Makefile
new file mode 100644
index 000000000000..857f3a4545ff
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/Makefile
@@ -0,0 +1,2 @@
+mxsfb-y := mxsfb_drv.o mxsfb_crtc.o mxsfb_out.o
+obj-$(CONFIG_DRM_MXSFB) += mxsfb.o
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
new file mode 100644
index 000000000000..081890336ce7
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * This code is based on drivers/video/fbdev/mxsfb.c :
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ * Copyright (C) 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/of_graph.h>
+#include <linux/platform_data/simplefb.h>
+#include <video/videomode.h>
+
+#include "mxsfb_drv.h"
+#include "mxsfb_regs.h"
+
+static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
+{
+ return (val & mxsfb->devdata->hs_wdth_mask) <<
+ mxsfb->devdata->hs_wdth_shift;
+}
+
+/* Setup the MXSFB registers for decoding the pixels out of the framebuffer */
+static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
+{
+ struct drm_crtc *crtc = &mxsfb->pipe.crtc;
+ struct drm_device *drm = crtc->dev;
+ const u32 format = crtc->primary->state->fb->pixel_format;
+ u32 ctrl, ctrl1;
+
+ ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER;
+
+ /*
+ * WARNING: The bus width, CTRL_SET_BUS_WIDTH(), is configured to
+ * match the selected mode here. This differs from the original
+ * MXSFB driver, which had the option to configure the bus width
+ * to arbitrary value. This limitation should not pose an issue.
+ */
+
+ /* CTRL1 contains IRQ config and status bits, preserve those. */
+ ctrl1 = readl(mxsfb->base + LCDC_CTRL1);
+ ctrl1 &= CTRL1_CUR_FRAME_DONE_IRQ_EN | CTRL1_CUR_FRAME_DONE_IRQ;
+
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ dev_dbg(drm->dev, "Setting up RGB565 mode\n");
+ ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
+ ctrl |= CTRL_SET_WORD_LENGTH(0);
+ ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
+ ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
+ ctrl |= CTRL_SET_WORD_LENGTH(3);
+ /* Do not use packed pixels = one pixel per word instead. */
+ ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
+ break;
+ default:
+ dev_err(drm->dev, "Unhandled pixel format %08x\n", format);
+ return -EINVAL;
+ }
+
+ writel(ctrl1, mxsfb->base + LCDC_CTRL1);
+ writel(ctrl, mxsfb->base + LCDC_CTRL);
+
+ return 0;
+}
+
+static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
+{
+ u32 reg;
+
+ if (mxsfb->clk_disp_axi)
+ clk_prepare_enable(mxsfb->clk_disp_axi);
+ clk_prepare_enable(mxsfb->clk);
+ mxsfb_enable_axi_clk(mxsfb);
+
+ /* If it was disabled, re-enable the mode again */
+ writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_SET);
+
+ /* Enable the SYNC signals first, then the DMA engine */
+ reg = readl(mxsfb->base + LCDC_VDCTRL4);
+ reg |= VDCTRL4_SYNC_SIGNALS_ON;
+ writel(reg, mxsfb->base + LCDC_VDCTRL4);
+
+ writel(CTRL_RUN, mxsfb->base + LCDC_CTRL + REG_SET);
+}
+
+static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
+{
+ u32 reg;
+
+ /*
+ * Even if we disable the controller here, it will still continue
+ * until its FIFOs are running out of data
+ */
+ writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_CLR);
+
+ readl_poll_timeout(mxsfb->base + LCDC_CTRL, reg, !(reg & CTRL_RUN),
+ 0, 1000);
+
+ reg = readl(mxsfb->base + LCDC_VDCTRL4);
+ reg &= ~VDCTRL4_SYNC_SIGNALS_ON;
+ writel(reg, mxsfb->base + LCDC_VDCTRL4);
+
+ mxsfb_disable_axi_clk(mxsfb);
+
+ clk_disable_unprepare(mxsfb->clk);
+ if (mxsfb->clk_disp_axi)
+ clk_disable_unprepare(mxsfb->clk_disp_axi);
+}
+
+static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
+{
+ struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
+ const u32 bus_flags = mxsfb->connector.display_info.bus_flags;
+ u32 vdctrl0, vsync_pulse_len, hsync_pulse_len;
+ int err;
+
+ /*
+ * It seems, you can't re-program the controller if it is still
+ * running. This may lead to shifted pictures (FIFO issue?), so
+ * first stop the controller and drain its FIFOs.
+ */
+ mxsfb_enable_axi_clk(mxsfb);
+
+ /* Clear the FIFOs */
+ writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
+
+ err = mxsfb_set_pixel_fmt(mxsfb);
+ if (err)
+ return;
+
+ clk_set_rate(mxsfb->clk, m->crtc_clock * 1000);
+
+ writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) |
+ TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay),
+ mxsfb->base + mxsfb->devdata->transfer_count);
+
+ vsync_pulse_len = m->crtc_vsync_end - m->crtc_vsync_start;
+
+ vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* Always in DOTCLOCK mode */
+ VDCTRL0_VSYNC_PERIOD_UNIT |
+ VDCTRL0_VSYNC_PULSE_WIDTH_UNIT |
+ VDCTRL0_SET_VSYNC_PULSE_WIDTH(vsync_pulse_len);
+ if (m->flags & DRM_MODE_FLAG_PHSYNC)
+ vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
+ if (m->flags & DRM_MODE_FLAG_PVSYNC)
+ vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
+ if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
+
+ writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
+
+ /* Frame length in lines. */
+ writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
+
+ /* Line length in units of clocks or pixels. */
+ hsync_pulse_len = m->crtc_hsync_end - m->crtc_hsync_start;
+ writel(set_hsync_pulse_width(mxsfb, hsync_pulse_len) |
+ VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
+ mxsfb->base + LCDC_VDCTRL2);
+
+ writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) |
+ SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end),
+ mxsfb->base + LCDC_VDCTRL3);
+
+ writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
+ mxsfb->base + LCDC_VDCTRL4);
+
+ mxsfb_disable_axi_clk(mxsfb);
+}
+
+void mxsfb_crtc_enable(struct mxsfb_drm_private *mxsfb)
+{
+ mxsfb_crtc_mode_set_nofb(mxsfb);
+ mxsfb_enable_controller(mxsfb);
+}
+
+void mxsfb_crtc_disable(struct mxsfb_drm_private *mxsfb)
+{
+ mxsfb_disable_controller(mxsfb);
+}
+
+void mxsfb_plane_atomic_update(struct mxsfb_drm_private *mxsfb,
+ struct drm_plane_state *state)
+{
+ struct drm_simple_display_pipe *pipe = &mxsfb->pipe;
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_framebuffer *fb = pipe->plane.state->fb;
+ struct drm_pending_vblank_event *event;
+ struct drm_gem_cma_object *gem;
+
+ if (!crtc)
+ return;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ if (drm_crtc_vblank_get(crtc) == 0) {
+ drm_crtc_arm_vblank_event(crtc, event);
+ } else {
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ if (!fb)
+ return;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ mxsfb_enable_axi_clk(mxsfb);
+ writel(gem->paddr, mxsfb->base + mxsfb->devdata->next_buf);
+ mxsfb_disable_axi_clk(mxsfb);
+}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
new file mode 100644
index 000000000000..79a18bf48b54
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * This code is based on drivers/video/fbdev/mxsfb.c :
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ * Copyright (C) 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+#include <linux/reservation.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "mxsfb_drv.h"
+#include "mxsfb_regs.h"
+
+enum mxsfb_devtype {
+ MXSFB_V3,
+ MXSFB_V4,
+};
+
+static const struct mxsfb_devdata mxsfb_devdata[] = {
+ [MXSFB_V3] = {
+ .transfer_count = LCDC_V3_TRANSFER_COUNT,
+ .cur_buf = LCDC_V3_CUR_BUF,
+ .next_buf = LCDC_V3_NEXT_BUF,
+ .debug0 = LCDC_V3_DEBUG0,
+ .hs_wdth_mask = 0xff,
+ .hs_wdth_shift = 24,
+ .ipversion = 3,
+ },
+ [MXSFB_V4] = {
+ .transfer_count = LCDC_V4_TRANSFER_COUNT,
+ .cur_buf = LCDC_V4_CUR_BUF,
+ .next_buf = LCDC_V4_NEXT_BUF,
+ .debug0 = LCDC_V4_DEBUG0,
+ .hs_wdth_mask = 0x3fff,
+ .hs_wdth_shift = 18,
+ .ipversion = 4,
+ },
+};
+
+static const uint32_t mxsfb_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565
+};
+
+static struct mxsfb_drm_private *
+drm_pipe_to_mxsfb_drm_private(struct drm_simple_display_pipe *pipe)
+{
+ return container_of(pipe, struct mxsfb_drm_private, pipe);
+}
+
+void mxsfb_enable_axi_clk(struct mxsfb_drm_private *mxsfb)
+{
+ if (mxsfb->clk_axi)
+ clk_prepare_enable(mxsfb->clk_axi);
+}
+
+void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
+{
+ if (mxsfb->clk_axi)
+ clk_disable_unprepare(mxsfb->clk_axi);
+}
+
+static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
+{
+ struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+
+ mxsfb_crtc_enable(mxsfb);
+}
+
+static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+
+ mxsfb_crtc_disable(mxsfb);
+}
+
+static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+
+ mxsfb_plane_atomic_update(mxsfb, plane_state);
+}
+
+static int mxsfb_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
+}
+
+struct drm_simple_display_pipe_funcs mxsfb_funcs = {
+ .enable = mxsfb_pipe_enable,
+ .disable = mxsfb_pipe_disable,
+ .update = mxsfb_pipe_update,
+ .prepare_fb = mxsfb_pipe_prepare_fb,
+};
+
+static int mxsfb_load(struct drm_device *drm, unsigned long flags)
+{
+ struct platform_device *pdev = to_platform_device(drm->dev);
+ struct mxsfb_drm_private *mxsfb;
+ struct resource *res;
+ int ret;
+
+ mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL);
+ if (!mxsfb)
+ return -ENOMEM;
+
+ drm->dev_private = mxsfb;
+ mxsfb->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mxsfb->base = devm_ioremap_resource(drm->dev, res);
+ if (IS_ERR(mxsfb->base))
+ return PTR_ERR(mxsfb->base);
+
+ mxsfb->clk = devm_clk_get(drm->dev, NULL);
+ if (IS_ERR(mxsfb->clk))
+ return PTR_ERR(mxsfb->clk);
+
+ mxsfb->clk_axi = devm_clk_get(drm->dev, "axi");
+ if (IS_ERR(mxsfb->clk_axi))
+ mxsfb->clk_axi = NULL;
+
+ mxsfb->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi");
+ if (IS_ERR(mxsfb->clk_disp_axi))
+ mxsfb->clk_disp_axi = NULL;
+
+ ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(drm->dev);
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialise vblank\n");
+ goto err_vblank;
+ }
+
+ /* Modeset init */
+ drm_mode_config_init(drm);
+
+ ret = mxsfb_create_output(drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to create outputs\n");
+ goto err_vblank;
+ }
+
+ ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
+ mxsfb_formats, ARRAY_SIZE(mxsfb_formats),
+ &mxsfb->connector);
+ if (ret < 0) {
+ dev_err(drm->dev, "Cannot setup simple display pipe\n");
+ goto err_vblank;
+ }
+
+ ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect panel\n");
+ goto err_vblank;
+ }
+
+ drm->mode_config.min_width = MXSFB_MIN_XRES;
+ drm->mode_config.min_height = MXSFB_MIN_YRES;
+ drm->mode_config.max_width = MXSFB_MAX_XRES;
+ drm->mode_config.max_height = MXSFB_MAX_YRES;
+ drm->mode_config.funcs = &mxsfb_mode_config_funcs;
+
+ drm_mode_config_reset(drm);
+
+ pm_runtime_get_sync(drm->dev);
+ ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
+ pm_runtime_put_sync(drm->dev);
+
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to install IRQ handler\n");
+ goto err_irq;
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ mxsfb->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(mxsfb->fbdev)) {
+ mxsfb->fbdev = NULL;
+ dev_err(drm->dev, "Failed to init FB CMA area\n");
+ goto err_cma;
+ }
+
+ platform_set_drvdata(pdev, drm);
+
+ drm_helper_hpd_irq_event(drm);
+
+ return 0;
+
+err_cma:
+ drm_irq_uninstall(drm);
+err_irq:
+ drm_panel_detach(mxsfb->panel);
+err_vblank:
+ pm_runtime_disable(drm->dev);
+
+ return ret;
+}
+
+static void mxsfb_unload(struct drm_device *drm)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+
+ if (mxsfb->fbdev)
+ drm_fbdev_cma_fini(mxsfb->fbdev);
+
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+
+ pm_runtime_get_sync(drm->dev);
+ drm_irq_uninstall(drm);
+ pm_runtime_put_sync(drm->dev);
+
+ drm->dev_private = NULL;
+
+ pm_runtime_disable(drm->dev);
+}
+
+static void mxsfb_lastclose(struct drm_device *drm)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(mxsfb->fbdev);
+}
+
+static int mxsfb_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+
+ /* Clear and enable VBLANK IRQ */
+ mxsfb_enable_axi_clk(mxsfb);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_SET);
+ mxsfb_disable_axi_clk(mxsfb);
+
+ return 0;
+}
+
+static void mxsfb_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+
+ /* Disable and clear VBLANK IRQ */
+ mxsfb_enable_axi_clk(mxsfb);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ mxsfb_disable_axi_clk(mxsfb);
+}
+
+static void mxsfb_irq_preinstall(struct drm_device *drm)
+{
+ mxsfb_disable_vblank(drm, 0);
+}
+
+static irqreturn_t mxsfb_irq_handler(int irq, void *data)
+{
+ struct drm_device *drm = data;
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+ u32 reg;
+
+ mxsfb_enable_axi_clk(mxsfb);
+
+ reg = readl(mxsfb->base + LCDC_CTRL1);
+
+ if (reg & CTRL1_CUR_FRAME_DONE_IRQ)
+ drm_crtc_handle_vblank(&mxsfb->pipe.crtc);
+
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+
+ mxsfb_disable_axi_clk(mxsfb);
+
+ return IRQ_HANDLED;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver mxsfb_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_PRIME | DRIVER_ATOMIC |
+ DRIVER_HAVE_IRQ,
+ .lastclose = mxsfb_lastclose,
+ .irq_handler = mxsfb_irq_handler,
+ .irq_preinstall = mxsfb_irq_preinstall,
+ .irq_uninstall = mxsfb_irq_preinstall,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = mxsfb_enable_vblank,
+ .disable_vblank = mxsfb_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .fops = &fops,
+ .name = "mxsfb-drm",
+ .desc = "MXSFB Controller DRM",
+ .date = "20160824",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct platform_device_id mxsfb_devtype[] = {
+ { .name = "imx23-fb", .driver_data = MXSFB_V3, },
+ { .name = "imx28-fb", .driver_data = MXSFB_V4, },
+ { .name = "imx6sx-fb", .driver_data = MXSFB_V4, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
+
+static const struct of_device_id mxsfb_dt_ids[] = {
+ { .compatible = "fsl,imx23-lcdif", .data = &mxsfb_devtype[0], },
+ { .compatible = "fsl,imx28-lcdif", .data = &mxsfb_devtype[1], },
+ { .compatible = "fsl,imx6sx-lcdif", .data = &mxsfb_devtype[2], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
+
+static int mxsfb_probe(struct platform_device *pdev)
+{
+ struct drm_device *drm;
+ const struct of_device_id *of_id =
+ of_match_device(mxsfb_dt_ids, &pdev->dev);
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ if (of_id)
+ pdev->id_entry = of_id->data;
+
+ drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev);
+ if (!drm)
+ return -ENOMEM;
+
+ ret = mxsfb_load(drm, 0);
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_unload;
+
+ return 0;
+
+err_unload:
+ mxsfb_unload(drm);
+err_free:
+ drm_dev_unref(drm);
+
+ return ret;
+}
+
+static int mxsfb_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+ mxsfb_unload(drm);
+ drm_dev_unref(drm);
+
+ return 0;
+}
+
+static struct platform_driver mxsfb_platform_driver = {
+ .probe = mxsfb_probe,
+ .remove = mxsfb_remove,
+ .id_table = mxsfb_devtype,
+ .driver = {
+ .name = "mxsfb",
+ .of_match_table = mxsfb_dt_ids,
+ },
+};
+
+module_platform_driver(mxsfb_platform_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Freescale MXS DRM/KMS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
new file mode 100644
index 000000000000..5d0883fc805b
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * i.MX23/i.MX28/i.MX6SX MXSFB LCD controller driver.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MXSFB_DRV_H__
+#define __MXSFB_DRV_H__
+
+struct mxsfb_devdata {
+ unsigned int transfer_count;
+ unsigned int cur_buf;
+ unsigned int next_buf;
+ unsigned int debug0;
+ unsigned int hs_wdth_mask;
+ unsigned int hs_wdth_shift;
+ unsigned int ipversion;
+};
+
+struct mxsfb_drm_private {
+ const struct mxsfb_devdata *devdata;
+
+ void __iomem *base; /* registers */
+ struct clk *clk;
+ struct clk *clk_axi;
+ struct clk *clk_disp_axi;
+
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+ struct drm_fbdev_cma *fbdev;
+};
+
+int mxsfb_setup_crtc(struct drm_device *dev);
+int mxsfb_create_output(struct drm_device *dev);
+
+void mxsfb_enable_axi_clk(struct mxsfb_drm_private *mxsfb);
+void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb);
+
+void mxsfb_crtc_enable(struct mxsfb_drm_private *mxsfb);
+void mxsfb_crtc_disable(struct mxsfb_drm_private *mxsfb);
+void mxsfb_plane_atomic_update(struct mxsfb_drm_private *mxsfb,
+ struct drm_plane_state *state);
+
+#endif /* __MXSFB_DRV_H__ */
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
new file mode 100644
index 000000000000..fa8d17399407
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_graph.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drmP.h>
+
+#include "mxsfb_drv.h"
+
+static struct mxsfb_drm_private *
+drm_connector_to_mxsfb_drm_private(struct drm_connector *connector)
+{
+ return container_of(connector, struct mxsfb_drm_private, connector);
+}
+
+static int mxsfb_panel_get_modes(struct drm_connector *connector)
+{
+ struct mxsfb_drm_private *mxsfb =
+ drm_connector_to_mxsfb_drm_private(connector);
+
+ if (mxsfb->panel)
+ return mxsfb->panel->funcs->get_modes(mxsfb->panel);
+
+ return 0;
+}
+
+static const struct
+drm_connector_helper_funcs mxsfb_panel_connector_helper_funcs = {
+ .get_modes = mxsfb_panel_get_modes,
+};
+
+static enum drm_connector_status
+mxsfb_panel_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct mxsfb_drm_private *mxsfb =
+ drm_connector_to_mxsfb_drm_private(connector);
+
+ if (mxsfb->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void mxsfb_panel_connector_destroy(struct drm_connector *connector)
+{
+ struct mxsfb_drm_private *mxsfb =
+ drm_connector_to_mxsfb_drm_private(connector);
+
+ if (mxsfb->panel)
+ drm_panel_detach(mxsfb->panel);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = mxsfb_panel_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mxsfb_panel_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int mxsfb_attach_endpoint(struct drm_device *drm,
+ const struct of_endpoint *ep)
+{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
+ struct device_node *np;
+ struct drm_panel *panel;
+ int ret = -EPROBE_DEFER;
+
+ np = of_graph_get_remote_port_parent(ep->local_node);
+ panel = of_drm_find_panel(np);
+ of_node_put(np);
+
+ if (!panel)
+ return -EPROBE_DEFER;
+
+ mxsfb->connector.dpms = DRM_MODE_DPMS_OFF;
+ mxsfb->connector.polled = 0;
+ drm_connector_helper_add(&mxsfb->connector,
+ &mxsfb_panel_connector_helper_funcs);
+ ret = drm_connector_init(drm, &mxsfb->connector,
+ &mxsfb_panel_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (!ret)
+ mxsfb->panel = panel;
+
+ return ret;
+}
+
+int mxsfb_create_output(struct drm_device *drm)
+{
+ struct device_node *ep_np = NULL;
+ struct of_endpoint ep;
+ int ret;
+
+ for_each_endpoint_of_node(drm->dev->of_node, ep_np) {
+ ret = of_graph_parse_endpoint(ep_np, &ep);
+ if (!ret)
+ ret = mxsfb_attach_endpoint(drm, &ep);
+
+ if (ret) {
+ of_node_put(ep_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
new file mode 100644
index 000000000000..31d62cd0d3d7
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * i.MX23/i.MX28/i.MX6SX MXSFB LCD controller driver.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MXSFB_REGS_H__
+#define __MXSFB_REGS_H__
+
+#define REG_SET 4
+#define REG_CLR 8
+
+#define LCDC_CTRL 0x00
+#define LCDC_CTRL1 0x10
+#define LCDC_V3_TRANSFER_COUNT 0x20
+#define LCDC_V4_TRANSFER_COUNT 0x30
+#define LCDC_V4_CUR_BUF 0x40
+#define LCDC_V4_NEXT_BUF 0x50
+#define LCDC_V3_CUR_BUF 0x30
+#define LCDC_V3_NEXT_BUF 0x40
+#define LCDC_VDCTRL0 0x70
+#define LCDC_VDCTRL1 0x80
+#define LCDC_VDCTRL2 0x90
+#define LCDC_VDCTRL3 0xa0
+#define LCDC_VDCTRL4 0xb0
+#define LCDC_V4_DEBUG0 0x1d0
+#define LCDC_V3_DEBUG0 0x1f0
+
+#define CTRL_SFTRST (1 << 31)
+#define CTRL_CLKGATE (1 << 30)
+#define CTRL_BYPASS_COUNT (1 << 19)
+#define CTRL_VSYNC_MODE (1 << 18)
+#define CTRL_DOTCLK_MODE (1 << 17)
+#define CTRL_DATA_SELECT (1 << 16)
+#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
+#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
+#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
+#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
+#define CTRL_MASTER (1 << 5)
+#define CTRL_DF16 (1 << 3)
+#define CTRL_DF18 (1 << 2)
+#define CTRL_DF24 (1 << 1)
+#define CTRL_RUN (1 << 0)
+
+#define CTRL1_FIFO_CLEAR (1 << 21)
+#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
+#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
+#define CTRL1_CUR_FRAME_DONE_IRQ_EN (1 << 13)
+#define CTRL1_CUR_FRAME_DONE_IRQ (1 << 9)
+
+#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
+#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
+#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
+#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff)
+
+#define VDCTRL0_ENABLE_PRESENT (1 << 28)
+#define VDCTRL0_VSYNC_ACT_HIGH (1 << 27)
+#define VDCTRL0_HSYNC_ACT_HIGH (1 << 26)
+#define VDCTRL0_DOTCLK_ACT_FALLING (1 << 25)
+#define VDCTRL0_ENABLE_ACT_HIGH (1 << 24)
+#define VDCTRL0_VSYNC_PERIOD_UNIT (1 << 21)
+#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT (1 << 20)
+#define VDCTRL0_HALF_LINE (1 << 19)
+#define VDCTRL0_HALF_LINE_MODE (1 << 18)
+#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
+
+#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
+
+#define VDCTRL3_MUX_SYNC_SIGNALS (1 << 29)
+#define VDCTRL3_VSYNC_ONLY (1 << 28)
+#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16)
+#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff)
+#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+#define GET_VERT_WAIT_CNT(x) ((x) & 0xffff)
+
+#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */
+#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */
+#define VDCTRL4_SYNC_SIGNALS_ON (1 << 18)
+#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff)
+
+#define DEBUG0_HSYNC (1 < 26)
+#define DEBUG0_VSYNC (1 < 25)
+
+#define MXSFB_MIN_XRES 120
+#define MXSFB_MIN_YRES 120
+#define MXSFB_MAX_XRES 0xffff
+#define MXSFB_MAX_YRES 0xffff
+
+#define RED 0
+#define GREEN 1
+#define BLUE 2
+#define TRANSP 3
+
+#define STMLCDIF_8BIT 1 /* pixel data bus to the display is of 8 bit width */
+#define STMLCDIF_16BIT 0 /* pixel data bus to the display is of 16 bit width */
+#define STMLCDIF_18BIT 2 /* pixel data bus to the display is of 18 bit width */
+#define STMLCDIF_24BIT 3 /* pixel data bus to the display is of 24 bit width */
+
+#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
+#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negative edge sampling */
+
+#endif /* __MXSFB_REGS_H__ */
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2527bf4ca5d9..fde6e3656636 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -22,6 +22,7 @@ nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
nouveau-y += nouveau_drm.o
nouveau-y += nouveau_hwmon.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o
nouveau-y += nouveau_nvif.o
nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
nouveau-y += nouveau_usif.o # userspace <-> nvif
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0cb7a18cde26..59d1d1c5de5f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -702,7 +702,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
- drm_vblank_pre_modeset(dev, nv_crtc->index);
+ drm_crtc_vblank_off(crtc);
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
NVBlankScreen(dev, nv_crtc->index, true);
@@ -734,7 +734,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
#endif
funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- drm_vblank_post_modeset(dev, nv_crtc->index);
+ drm_crtc_vblank_on(crtc);
}
static void nv_crtc_destroy(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index ec444eac6258..a79514d440b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -33,7 +33,7 @@
#include "nouveau_connector.h"
#include "nouveau_display.h"
#include "nvreg.h"
-
+#include "disp.h"
struct nouveau_plane {
struct drm_plane base;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index d15c296b5f33..ae49dfd1f97b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -34,6 +34,8 @@ struct nv50_disp_mthd_v1 {
#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22
#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23
#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24
+#define NV50_DISP_MTHD_V1_SOR_DP_MST_LINK 0x25
+#define NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI 0x26
#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30
__u8 method;
__u16 hasht;
@@ -90,6 +92,21 @@ struct nv50_disp_sor_dp_pwr_v0 {
__u8 pad02[6];
};
+struct nv50_disp_sor_dp_mst_link_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 pad02[6];
+};
+
+struct nv50_disp_sor_dp_mst_vcpi_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u8 start_slot;
+ __u8 num_slots;
+ __u16 pbn;
+ __u16 aligned_pbn;
+};
+
struct nv50_disp_pior_pwr_v0 {
__u8 version;
__u8 state;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index e6e9537537cf..82235f30277c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -52,7 +52,7 @@
#define GM107_DISP /* cl5070.h */ 0x00009470
#define GM200_DISP /* cl5070.h */ 0x00009570
#define GP100_DISP /* cl5070.h */ 0x00009770
-#define GP104_DISP /* cl5070.h */ 0x00009870
+#define GP102_DISP /* cl5070.h */ 0x00009870
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -90,7 +90,7 @@
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
-#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
+#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 8d815967767f..9e58b305b020 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -66,6 +66,35 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d))
+struct nvif_mclass {
+ s32 oclass;
+ int version;
+};
+
+#define nvif_mclass(o,m) ({ \
+ struct nvif_object *object = (o); \
+ struct nvif_sclass *sclass; \
+ const typeof(m[0]) *mclass = (m); \
+ int ret = -ENODEV; \
+ int cnt, i, j; \
+ \
+ cnt = nvif_object_sclass_get(object, &sclass); \
+ if (cnt >= 0) { \
+ for (i = 0; ret < 0 && mclass[i].oclass; i++) { \
+ for (j = 0; j < cnt; j++) { \
+ if (mclass[i].oclass == sclass[j].oclass && \
+ mclass[i].version >= sclass[j].minver && \
+ mclass[i].version <= sclass[j].maxver) { \
+ ret = i; \
+ break; \
+ } \
+ } \
+ } \
+ nvif_object_sclass_put(&sclass); \
+ } \
+ ret; \
+})
+
/*XXX*/
#include <core/object.h>
#define nvxx_object(a) ({ \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index d3d26a1e215d..b93f4c1a95e5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -8,5 +8,5 @@ int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index e82049667ce4..970ae753968a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -33,5 +33,5 @@ int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
index 934b0ae5521d..2ff64a20c0ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
@@ -1,6 +1,6 @@
#ifndef __NVBIOS_BOOST_H__
#define __NVBIOS_BOOST_H__
-u16 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
+u32 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
struct nvbios_boostE {
u8 pstate;
@@ -8,10 +8,10 @@ struct nvbios_boostE {
u32 max;
};
-u16 nvbios_boostEe(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
-u16 nvbios_boostEp(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
+u32 nvbios_boostEe(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
+u32 nvbios_boostEp(struct nvkm_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
struct nvbios_boostE *);
-u16 nvbios_boostEm(struct nvkm_bios *, u8, u8 *, u8 *, u8 *, u8 *,
+u32 nvbios_boostEm(struct nvkm_bios *, u8, u8 *, u8 *, u8 *, u8 *,
struct nvbios_boostE *);
struct nvbios_boostS {
@@ -21,7 +21,7 @@ struct nvbios_boostS {
u32 max;
};
-u16 nvbios_boostSe(struct nvkm_bios *, int, u16, u8 *, u8 *, u8, u8);
-u16 nvbios_boostSp(struct nvkm_bios *, int, u16, u8 *, u8 *, u8, u8,
+u32 nvbios_boostSe(struct nvkm_bios *, int, u32, u8 *, u8 *, u8, u8);
+u32 nvbios_boostSp(struct nvkm_bios *, int, u32, u8 *, u8 *, u8, u8,
struct nvbios_boostS *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
index 2f0e0c8e83be..76fe7d50a1ce 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
@@ -1,6 +1,6 @@
#ifndef __NVBIOS_CSTEP_H__
#define __NVBIOS_CSTEP_H__
-u16 nvbios_cstepTe(struct nvkm_bios *,
+u32 nvbios_cstepTe(struct nvkm_bios *,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
struct nvbios_cstepE {
@@ -8,10 +8,10 @@ struct nvbios_cstepE {
u8 index;
};
-u16 nvbios_cstepEe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
-u16 nvbios_cstepEp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+u32 nvbios_cstepEe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_cstepEp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepE *);
-u16 nvbios_cstepEm(struct nvkm_bios *, u8 pstate, u8 *ver, u8 *hdr,
+u32 nvbios_cstepEm(struct nvkm_bios *, u8 pstate, u8 *ver, u8 *hdr,
struct nvbios_cstepE *);
struct nvbios_cstepX {
@@ -20,7 +20,7 @@ struct nvbios_cstepX {
u8 voltage;
};
-u16 nvbios_cstepXe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
-u16 nvbios_cstepXp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
+u32 nvbios_cstepXe(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_cstepXp(struct nvkm_bios *, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepX *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
index 693ea7d9ec43..a7513e8406a3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
@@ -2,5 +2,5 @@
#define __NVBIOS_FAN_H__
#include <subdev/bios/therm.h>
-u16 nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan);
+u32 nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index a47d46dda704..b7a54e605469 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -6,6 +6,7 @@ enum dcb_gpio_func_name {
DCB_GPIO_TVDAC1 = 0x2d,
DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_LOGO_LED_PWM = 0x84,
DCB_GPIO_UNUSED = 0xff,
DCB_GPIO_VID0 = 0x04,
DCB_GPIO_VID1 = 0x05,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
index 9cb97477248b..e933d3eede70 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -1,10 +1,16 @@
#ifndef __NVBIOS_ICCSENSE_H__
#define __NVBIOS_ICCSENSE_H__
+struct pwr_rail_resistor_t {
+ u8 mohm;
+ bool enabled;
+};
+
struct pwr_rail_t {
u8 mode;
u8 extdev_id;
- u8 resistor_mohm;
- u8 rail;
+ u8 resistor_count;
+ struct pwr_rail_resistor_t resistors[3];
+ u16 config;
};
struct nvbios_iccsense {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
index d3bd250103d5..478b1c0d2089 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
@@ -1,6 +1,6 @@
#ifndef __NVBIOS_PERF_H__
#define __NVBIOS_PERF_H__
-u16 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
+u32 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
struct nvbios_perfE {
@@ -17,9 +17,9 @@ struct nvbios_perfE {
u8 pcie_width;
};
-u16 nvbios_perf_entry(struct nvkm_bios *, int idx,
+u32 nvbios_perf_entry(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_perfEp(struct nvkm_bios *, int idx,
+u32 nvbios_perfEp(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *);
struct nvbios_perfS {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
index 339a826aa176..38188d4c9ab5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
@@ -2,10 +2,10 @@
#define __NVBIOS_TIMING_H__
#include <subdev/bios/ramcfg.h>
-u16 nvbios_timingTe(struct nvkm_bios *,
+u32 nvbios_timingTe(struct nvkm_bios *,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-u16 nvbios_timingEe(struct nvkm_bios *, int idx,
+u32 nvbios_timingEe(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_timingEp(struct nvkm_bios *, int idx,
+u32 nvbios_timingEp(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
index 6633c6db9281..bea31cdd1dd1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -1,21 +1,24 @@
#ifndef __NVBIOS_VMAP_H__
#define __NVBIOS_VMAP_H__
struct nvbios_vmap {
+ u8 max0;
+ u8 max1;
+ u8 max2;
};
-u16 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+u32 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *);
struct nvbios_vmap_entry {
- u8 unk0;
+ u8 mode;
u8 link;
u32 min;
u32 max;
s32 arg[6];
};
-u16 nvbios_vmap_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
-u16 nvbios_vmap_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
+u32 nvbios_vmap_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
+u32 nvbios_vmap_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
struct nvbios_vmap_entry *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index b0df610cec2b..f0baa2c7de09 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -13,16 +13,17 @@ struct nvbios_volt {
u32 base;
/* GPIO mode */
- u8 vidmask;
- s16 step;
+ bool ranged;
+ u8 vidmask;
+ s16 step;
/* PWM mode */
u32 pwm_freq;
u32 pwm_range;
};
-u16 nvbios_volt_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_volt_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+u32 nvbios_volt_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_volt_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_volt *);
struct nvbios_volt_entry {
@@ -30,7 +31,7 @@ struct nvbios_volt_entry {
u8 vid;
};
-u16 nvbios_volt_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
-u16 nvbios_volt_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
+u32 nvbios_volt_entry(struct nvkm_bios *, int idx, u8 *ver, u8 *len);
+u32 nvbios_volt_entry_parse(struct nvkm_bios *, int idx, u8 *ver, u8 *len,
struct nvbios_volt_entry *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
new file mode 100644
index 000000000000..87f804fc3a88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
@@ -0,0 +1,24 @@
+#ifndef __NVBIOS_VPSTATE_H__
+#define __NVBIOS_VPSTATE_H__
+struct nvbios_vpstate_header {
+ u32 offset;
+
+ u8 version;
+ u8 hlen;
+ u8 ecount;
+ u8 elen;
+ u8 scount;
+ u8 slen;
+
+ u8 base_id;
+ u8 boost_id;
+ u8 tdp_id;
+};
+struct nvbios_vpstate_entry {
+ u8 pstate;
+ u16 clock_mhz;
+};
+int nvbios_vpstate_parse(struct nvkm_bios *, struct nvbios_vpstate_header *);
+int nvbios_vpstate_entry(struct nvkm_bios *, struct nvbios_vpstate_header *,
+ u8 idx, struct nvbios_vpstate_entry *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index fb54417bc458..e5275f742977 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -6,6 +6,10 @@
struct nvbios_pll;
struct nvkm_pll_vals;
+#define NVKM_CLK_CSTATE_DEFAULT -1 /* POSTed default */
+#define NVKM_CLK_CSTATE_BASE -2 /* pstate base */
+#define NVKM_CLK_CSTATE_HIGHEST -3 /* highest possible */
+
enum nv_clk_src {
nv_clk_src_crystal,
nv_clk_src_href,
@@ -52,6 +56,7 @@ struct nvkm_cstate {
struct list_head head;
u8 voltage;
u32 domain[nv_clk_src_max];
+ u8 id;
};
struct nvkm_pstate {
@@ -67,7 +72,8 @@ struct nvkm_pstate {
struct nvkm_domain {
enum nv_clk_src name;
u8 bios; /* 0xff for none */
-#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_VPSTATE 0x02
u8 flags;
const char *mname;
int mdiv;
@@ -93,10 +99,16 @@ struct nvkm_clk {
int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
int astate; /* perfmon adjustment (base) */
- int tstate; /* thermal adjustment (max-) */
int dstate; /* display adjustment (min+) */
+ u8 temp;
bool allow_reclock;
+#define NVKM_CLK_BOOST_NONE 0x0
+#define NVKM_CLK_BOOST_BIOS 0x1
+#define NVKM_CLK_BOOST_FULL 0x2
+ u8 boost_mode;
+ u32 base_khz;
+ u32 boost_khz;
/*XXX: die, these are here *only* to support the completely
* bat-shit insane what-was-nouveau_hw.c code
@@ -110,7 +122,7 @@ int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
-int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 3a410275fa71..794e432578b2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -93,8 +93,9 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
@@ -156,4 +157,6 @@ struct nvkm_ram_func {
int (*prog)(struct nvkm_ram *);
void (*tidy)(struct nvkm_ram *);
};
+
+extern const u8 gf100_pte_storage_type_map[256];
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index e61923d5e49c..f37538eb1fe5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -35,6 +35,8 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
/* interface to MEMX process running on PMU */
struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index b765f4ffcde6..08ef9983c643 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -15,12 +15,28 @@ struct nvkm_volt {
u32 max_uv;
u32 min_uv;
+
+ /*
+ * These are fully functional map entries creating a sw ceiling for
+ * the voltage. These all can describe different kind of curves, so
+ * that for any given temperature a different one can return the lowest
+ * value of all three.
+ */
+ u8 max0_id;
+ u8 max1_id;
+ u8 max2_id;
+
+ int speedo;
};
+int nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temperature);
+int nvkm_volt_map_min(struct nvkm_volt *volt, u8 id);
int nvkm_volt_get(struct nvkm_volt *);
-int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
+ int condition);
int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dc57b628e074..193573d191e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
if (!parent_adev)
return false;
- return acpi_has_method(parent_adev->handle, "_PR3");
+ return parent_adev->power.flags.power_resources &&
+ acpi_has_method(parent_adev->handle, "_PR3");
}
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f5101be806cb..8b1ca4add2ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -30,12 +30,37 @@
* Register locations derived from NVClock by Roderick Colenbrander
*/
+#include <linux/apple-gmux.h>
#include <linux/backlight.h>
+#include <linux/idr.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
+static struct ida bl_ida;
+#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
+
+struct backlight_connector {
+ struct list_head head;
+ int id;
+};
+
+static bool
+nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], struct backlight_connector
+ *connector)
+{
+ const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
+ if (nb < 0 || nb >= 100)
+ return false;
+ if (nb > 0)
+ snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
+ else
+ snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight");
+ connector->id = nb;
+ return true;
+}
+
static int
nv40_get_intensity(struct backlight_device *bd)
{
@@ -74,6 +99,8 @@ nv40_backlight_init(struct drm_connector *connector)
struct nvif_object *device = &drm->device.object;
struct backlight_properties props;
struct backlight_device *bd;
+ struct backlight_connector bl_connector;
+ char backlight_name[BL_NAME_SIZE];
if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
return 0;
@@ -81,10 +108,19 @@ nv40_backlight_init(struct drm_connector *connector)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 31;
- bd = backlight_device_register("nv_backlight", connector->kdev, drm,
+ if (!nouveau_get_backlight_name(backlight_name, &bl_connector)) {
+ NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
+ return 0;
+ }
+ bd = backlight_device_register(backlight_name , connector->kdev, drm,
&nv40_bl_ops, &props);
- if (IS_ERR(bd))
+
+ if (IS_ERR(bd)) {
+ if (bl_connector.id > 0)
+ ida_simple_remove(&bl_ida, bl_connector.id);
return PTR_ERR(bd);
+ }
+ list_add(&bl_connector.head, &drm->bl_connectors);
drm->backlight = bd;
bd->props.brightness = nv40_get_intensity(bd);
backlight_update_status(bd);
@@ -182,6 +218,8 @@ nv50_backlight_init(struct drm_connector *connector)
struct backlight_properties props;
struct backlight_device *bd;
const struct backlight_ops *ops;
+ struct backlight_connector bl_connector;
+ char backlight_name[BL_NAME_SIZE];
nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
if (!nv_encoder) {
@@ -203,11 +241,20 @@ nv50_backlight_init(struct drm_connector *connector)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 100;
- bd = backlight_device_register("nv_backlight", connector->kdev,
+ if (!nouveau_get_backlight_name(backlight_name, &bl_connector)) {
+ NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
+ return 0;
+ }
+ bd = backlight_device_register(backlight_name , connector->kdev,
nv_encoder, ops, &props);
- if (IS_ERR(bd))
+
+ if (IS_ERR(bd)) {
+ if (bl_connector.id > 0)
+ ida_simple_remove(&bl_ida, bl_connector.id);
return PTR_ERR(bd);
+ }
+ list_add(&bl_connector.head, &drm->bl_connectors);
drm->backlight = bd;
bd->props.brightness = bd->ops->get_brightness(bd);
backlight_update_status(bd);
@@ -221,6 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)
struct nvif_device *device = &drm->device;
struct drm_connector *connector;
+ if (apple_gmux_present()) {
+ NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&drm->bl_connectors);
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
@@ -232,6 +286,7 @@ nouveau_backlight_init(struct drm_device *dev)
case NV_DEVICE_INFO_V0_TESLA:
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
return nv50_backlight_init(connector);
default:
break;
@@ -246,9 +301,27 @@ void
nouveau_backlight_exit(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct backlight_connector *connector;
+
+ list_for_each_entry(connector, &drm->bl_connectors, head) {
+ if (connector->id >= 0)
+ ida_simple_remove(&bl_ida, connector->id);
+ }
if (drm->backlight) {
backlight_device_unregister(drm->backlight);
drm->backlight = NULL;
}
}
+
+void
+nouveau_backlight_ctor(void)
+{
+ ida_init(&bl_ida);
+}
+
+void
+nouveau_backlight_dtor(void)
+{
+ ida_destroy(&bl_ida);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index a1570b109434..23ffe8571a99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -333,6 +333,9 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
+ return nvif_rd32(device, 0x001800) & 0x0000000f;
+ else
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
else
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 0067586eb015..18eb061ccafb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -31,10 +31,8 @@
#define DCB_LOC_ON_CHIP 0
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
-#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
-#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
#define ROMPTR(d,x) ({ \
struct nouveau_drm *drm = nouveau_drm((d)); \
ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 343b8659472c..dd07ca140d12 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
static void
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (tile) {
spin_lock(&drm->tile.lock);
- tile->fence = (struct nouveau_fence *)fence_get(fence);
+ tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
@@ -1209,6 +1209,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
nvkm_vm_map(vma, new_mem->mm_node);
} else {
+ WARN_ON(ttm_bo_wait(bo, false, false));
nvkm_vm_unmap(vma);
}
}
@@ -1243,7 +1244,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct fence *fence = reservation_object_get_excl(bo->resv);
+ struct dma_fence *fence = reservation_object_get_excl(bo->resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1561,6 +1562,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c1084088f9e4..947c200655b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -30,6 +30,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
@@ -47,6 +48,301 @@
#include <nvif/cl0046.h>
#include <nvif/event.h>
+struct drm_display_mode *
+nouveau_conn_native_mode(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper = connector->helper_private;
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *largest = NULL;
+ int high_w = 0, high_h = 0, high_v = 0;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ if (helper->mode_valid(connector, mode) != MODE_OK ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+
+ /* Use preferred mode if there is one.. */
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ NV_DEBUG(drm, "native mode from preferred\n");
+ return drm_mode_duplicate(dev, mode);
+ }
+
+ /* Otherwise, take the resolution with the largest width, then
+ * height, then vertical refresh
+ */
+ if (mode->hdisplay < high_w)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay < high_h)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
+ mode->vrefresh < high_v)
+ continue;
+
+ high_w = mode->hdisplay;
+ high_h = mode->vdisplay;
+ high_v = mode->vrefresh;
+ largest = mode;
+ }
+
+ NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
+ high_w, high_h, high_v);
+ return largest ? drm_mode_duplicate(dev, largest) : NULL;
+}
+
+int
+nouveau_conn_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, u64 *val)
+{
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ struct nouveau_display *disp = nouveau_display(connector->dev);
+ struct drm_device *dev = connector->dev;
+
+ if (property == dev->mode_config.scaling_mode_property)
+ *val = asyc->scaler.mode;
+ else if (property == disp->underscan_property)
+ *val = asyc->scaler.underscan.mode;
+ else if (property == disp->underscan_hborder_property)
+ *val = asyc->scaler.underscan.hborder;
+ else if (property == disp->underscan_vborder_property)
+ *val = asyc->scaler.underscan.vborder;
+ else if (property == disp->dithering_mode)
+ *val = asyc->dither.mode;
+ else if (property == disp->dithering_depth)
+ *val = asyc->dither.depth;
+ else if (property == disp->vibrant_hue_property)
+ *val = asyc->procamp.vibrant_hue;
+ else if (property == disp->color_vibrance_property)
+ *val = asyc->procamp.color_vibrance;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+nouveau_conn_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ struct nouveau_display *disp = nouveau_display(dev);
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (val) {
+ case DRM_MODE_SCALE_NONE:
+ /* We allow 'None' for EDID modes, even on a fixed
+ * panel (some exist with support for lower refresh
+ * rates, which people might want to use for power-
+ * saving purposes).
+ *
+ * Non-EDID modes will force the use of GPU scaling
+ * to the native mode regardless of this setting.
+ */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ /* ... except prior to G80, where the code
+ * doesn't support such things.
+ */
+ if (disp->disp.oclass < NV50_DISP)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+ case DRM_MODE_SCALE_FULLSCREEN:
+ case DRM_MODE_SCALE_CENTER:
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (asyc->scaler.mode != val) {
+ asyc->scaler.mode = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_property) {
+ if (asyc->scaler.underscan.mode != val) {
+ asyc->scaler.underscan.mode = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_hborder_property) {
+ if (asyc->scaler.underscan.hborder != val) {
+ asyc->scaler.underscan.hborder = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->underscan_vborder_property) {
+ if (asyc->scaler.underscan.vborder != val) {
+ asyc->scaler.underscan.vborder = val;
+ asyc->set.scaler = true;
+ }
+ } else
+ if (property == disp->dithering_mode) {
+ if (asyc->dither.mode != val) {
+ asyc->dither.mode = val;
+ asyc->set.dither = true;
+ }
+ } else
+ if (property == disp->dithering_depth) {
+ if (asyc->dither.mode != val) {
+ asyc->dither.depth = val;
+ asyc->set.dither = true;
+ }
+ } else
+ if (property == disp->vibrant_hue_property) {
+ if (asyc->procamp.vibrant_hue != val) {
+ asyc->procamp.vibrant_hue = val;
+ asyc->set.procamp = true;
+ }
+ } else
+ if (property == disp->color_vibrance_property) {
+ if (asyc->procamp.color_vibrance != val) {
+ asyc->procamp.color_vibrance = val;
+ asyc->set.procamp = true;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+nouveau_conn_atomic_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(state);
+ __drm_atomic_helper_connector_destroy_state(&asyc->state);
+ kfree(asyc);
+}
+
+struct drm_connector_state *
+nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
+ struct nouveau_conn_atom *asyc;
+ if (!(asyc = kmalloc(sizeof(*asyc), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_connector_duplicate_state(connector, &asyc->state);
+ asyc->dither = armc->dither;
+ asyc->scaler = armc->scaler;
+ asyc->procamp = armc->procamp;
+ asyc->set.mask = 0;
+ return &asyc->state;
+}
+
+void
+nouveau_conn_reset(struct drm_connector *connector)
+{
+ struct nouveau_conn_atom *asyc;
+
+ if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
+ return;
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+ __drm_atomic_helper_connector_reset(connector, &asyc->state);
+ asyc->dither.mode = DITHERING_MODE_AUTO;
+ asyc->dither.depth = DITHERING_DEPTH_AUTO;
+ asyc->scaler.mode = DRM_MODE_SCALE_NONE;
+ asyc->scaler.underscan.mode = UNDERSCAN_OFF;
+ asyc->procamp.color_vibrance = 150;
+ asyc->procamp.vibrant_hue = 90;
+
+ if (nouveau_display(connector->dev)->disp.oclass < NV50_DISP) {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ /* See note in nouveau_conn_atomic_set_property(). */
+ asyc->scaler.mode = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void
+nouveau_conn_attach_properties(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
+ struct nouveau_display *disp = nouveau_display(dev);
+
+ /* Init DVI-I specific properties. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
+ drm_object_attach_property(&connector->base, dev->mode_config.
+ dvi_i_subconnector_property, 0);
+
+ /* Add overscan compensation options to digital outputs. */
+ if (disp->underscan_property &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)) {
+ drm_object_attach_property(&connector->base,
+ disp->underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&connector->base,
+ disp->underscan_hborder_property, 0);
+ drm_object_attach_property(&connector->base,
+ disp->underscan_vborder_property, 0);
+ }
+
+ /* Add hue and saturation options. */
+ if (disp->vibrant_hue_property)
+ drm_object_attach_property(&connector->base,
+ disp->vibrant_hue_property,
+ armc->procamp.vibrant_hue);
+ if (disp->color_vibrance_property)
+ drm_object_attach_property(&connector->base,
+ disp->color_vibrance_property,
+ armc->procamp.color_vibrance);
+
+ /* Scaling mode property. */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_TV:
+ break;
+ case DRM_MODE_CONNECTOR_VGA:
+ if (disp->disp.oclass < NV50_DISP)
+ break; /* Can only scale on DFPs. */
+ /* Fall-through. */
+ default:
+ drm_object_attach_property(&connector->base, dev->mode_config.
+ scaling_mode_property,
+ armc->scaler.mode);
+ break;
+ }
+
+ /* Dithering properties. */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_TV:
+ case DRM_MODE_CONNECTOR_VGA:
+ break;
+ default:
+ if (disp->dithering_mode) {
+ drm_object_attach_property(&connector->base,
+ disp->dithering_mode,
+ armc->dither.mode);
+ }
+ if (disp->dithering_depth) {
+ drm_object_attach_property(&connector->base,
+ disp->dithering_depth,
+ armc->dither.depth);
+ }
+ break;
+ }
+}
+
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
int nouveau_tv_disable = 0;
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
@@ -151,7 +447,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
int ret = nouveau_dp_detect(nv_encoder);
- if (ret == 0)
+ if (ret == NOUVEAU_DP_MST)
+ return NULL;
+ if (ret == NOUVEAU_DP_SST)
break;
} else
if ((vga_switcheroo_handler_flags() &
@@ -465,199 +763,39 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
- struct nouveau_display *disp = nouveau_display(connector->dev);
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
- struct drm_device *dev = connector->dev;
- struct nouveau_crtc *nv_crtc;
int ret;
- nv_crtc = NULL;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
-
- /* Scaling mode */
- if (property == dev->mode_config.scaling_mode_property) {
- bool modeset = false;
-
- switch (value) {
- case DRM_MODE_SCALE_NONE:
- /* We allow 'None' for EDID modes, even on a fixed
- * panel (some exist with support for lower refresh
- * rates, which people might want to use for power
- * saving purposes).
- *
- * Non-EDID modes will force the use of GPU scaling
- * to the native mode regardless of this setting.
- */
- switch (nv_connector->type) {
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_eDP:
- /* ... except prior to G80, where the code
- * doesn't support such things.
- */
- if (disp->disp.oclass < NV50_DISP)
- return -EINVAL;
- break;
- default:
- break;
- }
- break;
- case DRM_MODE_SCALE_FULLSCREEN:
- case DRM_MODE_SCALE_CENTER:
- case DRM_MODE_SCALE_ASPECT:
- break;
- default:
- return -EINVAL;
- }
-
- /* Changing between GPU and panel scaling requires a full
- * modeset
- */
- if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
- (value == DRM_MODE_SCALE_NONE))
- modeset = true;
- nv_connector->scaling_mode = value;
-
- if (!nv_crtc)
- return 0;
-
- if (modeset || !nv_crtc->set_scale) {
- ret = drm_crtc_helper_set_mode(&nv_crtc->base,
- &nv_crtc->base.mode,
- nv_crtc->base.x,
- nv_crtc->base.y, NULL);
- if (!ret)
- return -EINVAL;
- } else {
- ret = nv_crtc->set_scale(nv_crtc, true);
- if (ret)
- return ret;
- }
-
- return 0;
- }
-
- /* Underscan */
- if (property == disp->underscan_property) {
- if (nv_connector->underscan != value) {
- nv_connector->underscan = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_hborder_property) {
- if (nv_connector->underscan_hborder != value) {
- nv_connector->underscan_hborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
-
- if (property == disp->underscan_vborder_property) {
- if (nv_connector->underscan_vborder != value) {
- nv_connector->underscan_vborder = value;
- if (!nv_crtc || !nv_crtc->set_scale)
- return 0;
-
- return nv_crtc->set_scale(nv_crtc, true);
- }
-
- return 0;
- }
+ if (connector->dev->mode_config.funcs->atomic_commit)
+ return drm_atomic_helper_connector_set_property(connector, property, value);
- /* Dithering */
- if (property == disp->dithering_mode) {
- nv_connector->dithering_mode = value;
- if (!nv_crtc || !nv_crtc->set_dither)
- return 0;
-
- return nv_crtc->set_dither(nv_crtc, true);
- }
-
- if (property == disp->dithering_depth) {
- nv_connector->dithering_depth = value;
- if (!nv_crtc || !nv_crtc->set_dither)
- return 0;
-
- return nv_crtc->set_dither(nv_crtc, true);
- }
-
- if (nv_crtc && nv_crtc->set_color_vibrance) {
- /* Hue */
- if (property == disp->vibrant_hue_property) {
- nv_crtc->vibrant_hue = value - 90;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
- /* Saturation */
- if (property == disp->color_vibrance_property) {
- nv_crtc->color_vibrance = value - 100;
- return nv_crtc->set_color_vibrance(nv_crtc, true);
- }
+ ret = connector->funcs->atomic_set_property(&nv_connector->base,
+ &asyc->state,
+ property, value);
+ if (ret) {
+ if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
+ return get_slave_funcs(encoder)->set_property(
+ encoder, connector, property, value);
+ return ret;
}
- if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
- return get_slave_funcs(encoder)->set_property(
- encoder, connector, property, value);
-
- return -EINVAL;
-}
-
-static struct drm_display_mode *
-nouveau_connector_native_mode(struct drm_connector *connector)
-{
- const struct drm_connector_helper_funcs *helper = connector->helper_private;
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *largest = NULL;
- int high_w = 0, high_h = 0, high_v = 0;
+ nv_connector->scaling_mode = asyc->scaler.mode;
+ nv_connector->dithering_mode = asyc->dither.mode;
- list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
- mode->vrefresh = drm_mode_vrefresh(mode);
- if (helper->mode_valid(connector, mode) != MODE_OK ||
- (mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
-
- /* Use preferred mode if there is one.. */
- if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- NV_DEBUG(drm, "native mode from preferred\n");
- return drm_mode_duplicate(dev, mode);
- }
-
- /* Otherwise, take the resolution with the largest width, then
- * height, then vertical refresh
- */
- if (mode->hdisplay < high_w)
- continue;
-
- if (mode->hdisplay == high_w && mode->vdisplay < high_h)
- continue;
-
- if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
- mode->vrefresh < high_v)
- continue;
-
- high_w = mode->hdisplay;
- high_h = mode->vdisplay;
- high_v = mode->vrefresh;
- largest = mode;
+ if (connector->encoder && connector->encoder->crtc) {
+ ret = drm_crtc_helper_set_mode(connector->encoder->crtc,
+ &connector->encoder->crtc->mode,
+ connector->encoder->crtc->x,
+ connector->encoder->crtc->y,
+ NULL);
+ if (!ret)
+ return -EINVAL;
}
- NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
- high_w, high_h, high_v);
- return largest ? drm_mode_duplicate(dev, largest) : NULL;
+ return 0;
}
struct moderec {
@@ -805,8 +943,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
* the list of modes.
*/
if (!nv_connector->native_mode)
- nv_connector->native_mode =
- nouveau_connector_native_mode(connector);
+ nv_connector->native_mode = nouveau_conn_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
@@ -934,56 +1071,42 @@ nouveau_connector_helper_funcs = {
.best_encoder = nouveau_connector_best_encoder,
};
+static int
+nouveau_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (connector->dev->mode_config.funcs->atomic_commit)
+ return drm_atomic_helper_connector_dpms(connector, mode);
+ return drm_helper_connector_dpms(connector, mode);
+}
+
static const struct drm_connector_funcs
nouveau_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = nouveau_connector_dpms,
+ .reset = nouveau_conn_reset,
.detect = nouveau_connector_detect,
- .destroy = nouveau_connector_destroy,
+ .force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
+ .destroy = nouveau_connector_destroy,
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
};
static const struct drm_connector_funcs
nouveau_connector_funcs_lvds = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = nouveau_connector_dpms,
+ .reset = nouveau_conn_reset,
.detect = nouveau_connector_detect_lvds,
- .destroy = nouveau_connector_destroy,
+ .force = nouveau_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
-};
-
-static int
-nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
-{
- struct nouveau_encoder *nv_encoder = NULL;
-
- if (connector->encoder)
- nv_encoder = nouveau_encoder(connector->encoder);
- if (nv_encoder && nv_encoder->dcb &&
- nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- if (mode == DRM_MODE_DPMS_ON) {
- u8 data = DP_SET_POWER_D0;
- nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
- usleep_range(1000, 2000);
- } else {
- u8 data = DP_SET_POWER_D3;
- nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
- }
- }
-
- return drm_helper_connector_dpms(connector, mode);
-}
-
-static const struct drm_connector_funcs
-nouveau_connector_funcs_dp = {
- .dpms = nouveau_connector_dp_dpms,
- .detect = nouveau_connector_detect,
.destroy = nouveau_connector_destroy,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = nouveau_connector_set_property,
- .force = nouveau_connector_force
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
};
static int
@@ -995,19 +1118,20 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
struct nouveau_drm *drm = nouveau_drm(connector->dev);
const struct nvif_notify_conn_rep_v0 *rep = notify->data;
const char *name = connector->name;
+ struct nouveau_encoder *nv_encoder;
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ NV_DEBUG(drm, "service %s\n", name);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+ nv50_mstm_service(nv_encoder->dp.mstm);
} else {
bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
-
- mutex_lock(&drm->dev->mode_config.mutex);
- if (plugged)
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- mutex_unlock(&drm->dev->mode_config.mutex);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
+ if (!plugged)
+ nv50_mstm_remove(nv_encoder->dp.mstm);
+ }
drm_helper_hpd_irq_event(connector->dev);
}
@@ -1188,7 +1312,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
return ERR_PTR(ret);
}
- funcs = &nouveau_connector_funcs_dp;
+ funcs = &nouveau_connector_funcs;
break;
default:
funcs = &nouveau_connector_funcs;
@@ -1202,38 +1326,10 @@ nouveau_connector_create(struct drm_device *dev, int index)
drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
- /* Init DVI-I specific properties */
- if (nv_connector->type == DCB_CONNECTOR_DVI_I)
- drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
+ connector->funcs->reset(connector);
+ nouveau_conn_attach_properties(connector);
- /* Add overscan compensation options to digital outputs */
- if (disp->underscan_property &&
- (type == DRM_MODE_CONNECTOR_DVID ||
- type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_HDMIA ||
- type == DRM_MODE_CONNECTOR_DisplayPort)) {
- drm_object_attach_property(&connector->base,
- disp->underscan_property,
- UNDERSCAN_OFF);
- drm_object_attach_property(&connector->base,
- disp->underscan_hborder_property,
- 0);
- drm_object_attach_property(&connector->base,
- disp->underscan_vborder_property,
- 0);
- }
-
- /* Add hue and saturation options */
- if (disp->vibrant_hue_property)
- drm_object_attach_property(&connector->base,
- disp->vibrant_hue_property,
- 90);
- if (disp->color_vibrance_property)
- drm_object_attach_property(&connector->base,
- disp->color_vibrance_property,
- 150);
-
- /* default scaling mode */
+ /* Default scaling mode */
switch (nv_connector->type) {
case DCB_CONNECTOR_LVDS:
case DCB_CONNECTOR_LVDS_SPWG:
@@ -1250,23 +1346,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
- /* scaling mode property */
- switch (nv_connector->type) {
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- break;
- case DCB_CONNECTOR_VGA:
- if (disp->disp.oclass < NV50_DISP)
- break; /* can only scale on DFPs */
- /* fall-through */
- default:
- drm_object_attach_property(&connector->base, dev->mode_config.
- scaling_mode_property,
- nv_connector->scaling_mode);
- break;
- }
-
/* dithering properties */
switch (nv_connector->type) {
case DCB_CONNECTOR_TV_0:
@@ -1275,20 +1354,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
case DCB_CONNECTOR_VGA:
break;
default:
- if (disp->dithering_mode) {
- nv_connector->dithering_mode = DITHERING_MODE_AUTO;
- drm_object_attach_property(&connector->base,
- disp->dithering_mode,
- nv_connector->
- dithering_mode);
- }
- if (disp->dithering_depth) {
- nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
- drm_object_attach_property(&connector->base,
- disp->dithering_depth,
- nv_connector->
- dithering_depth);
- }
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 7446ee66ea04..096983c42a1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -35,30 +35,6 @@
struct nvkm_i2c_port;
-enum nouveau_underscan_type {
- UNDERSCAN_OFF,
- UNDERSCAN_ON,
- UNDERSCAN_AUTO,
-};
-
-/* the enum values specifically defined here match nv50/nvd0 hw values, and
- * the code relies on this
- */
-enum nouveau_dithering_mode {
- DITHERING_MODE_OFF = 0x00,
- DITHERING_MODE_ON = 0x01,
- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
- DITHERING_MODE_AUTO
-};
-
-enum nouveau_dithering_depth {
- DITHERING_DEPTH_6BPC = 0x00,
- DITHERING_DEPTH_8BPC = 0x02,
- DITHERING_DEPTH_AUTO
-};
-
struct nouveau_connector {
struct drm_connector base;
enum dcb_connector_type type;
@@ -70,12 +46,7 @@ struct nouveau_connector {
struct drm_dp_aux aux;
int dithering_mode;
- int dithering_depth;
int scaling_mode;
- bool scaling_full;
- enum nouveau_underscan_type underscan;
- u32 underscan_hborder;
- u32 underscan_vborder;
struct nouveau_encoder *detected_encoder;
struct edid *edid;
@@ -109,5 +80,74 @@ nouveau_connector_create(struct drm_device *, int index);
extern int nouveau_tv_disable;
extern int nouveau_ignorelid;
extern int nouveau_duallink;
+extern int nouveau_hdmimhz;
+
+#include <drm/drm_crtc.h>
+#define nouveau_conn_atom(p) \
+ container_of((p), struct nouveau_conn_atom, state)
+
+struct nouveau_conn_atom {
+ struct drm_connector_state state;
+
+ struct {
+ /* The enum values specifically defined here match nv50/gf119
+ * hw values, and the code relies on this.
+ */
+ enum {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+ } mode;
+ enum {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+ } depth;
+ } dither;
+
+ struct {
+ int mode; /* DRM_MODE_SCALE_* */
+ struct {
+ enum {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+ } mode;
+ u32 hborder;
+ u32 vborder;
+ } underscan;
+ bool full;
+ } scaler;
+
+ struct {
+ int color_vibrance;
+ int vibrant_hue;
+ } procamp;
+
+ union {
+ struct {
+ bool dither:1;
+ bool scaler:1;
+ bool procamp:1;
+ };
+ u8 mask;
+ } set;
+};
+void nouveau_conn_attach_properties(struct drm_connector *);
+void nouveau_conn_reset(struct drm_connector *);
+struct drm_connector_state *
+nouveau_conn_atomic_duplicate_state(struct drm_connector *);
+void nouveau_conn_atomic_destroy_state(struct drm_connector *,
+ struct drm_connector_state *);
+int nouveau_conn_atomic_set_property(struct drm_connector *,
+ struct drm_connector_state *,
+ struct drm_property *, u64);
+int nouveau_conn_atomic_get_property(struct drm_connector *,
+ const struct drm_connector_state *,
+ struct drm_property *, u64 *);
+struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 863f10b8d818..050fcf30a0d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -38,8 +38,6 @@ struct nouveau_crtc {
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
int saturation;
- int color_vibrance;
- int vibrant_hue;
int sharpness;
int last_dpms;
@@ -54,7 +52,6 @@ struct nouveau_crtc {
struct {
struct nouveau_bo *nvbo;
- bool visible;
uint32_t offset;
void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
void (*set_pos)(struct nouveau_crtc *, int x, int y);
@@ -70,10 +67,6 @@ struct nouveau_crtc {
int depth;
} lut;
- int (*set_dither)(struct nouveau_crtc *crtc, bool update);
- int (*set_scale)(struct nouveau_crtc *crtc, bool update);
- int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
-
void (*save)(struct drm_crtc *crtc);
void (*restore)(struct drm_crtc *crtc);
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index afbf557b23d4..cef08da1da4e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -24,7 +24,10 @@
*
*/
+#include <acpi/video.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <nvif/class.h>
@@ -92,7 +95,7 @@ calc(int blanks, int blanke, int total, int line)
return line;
}
-int
+static int
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
@@ -158,9 +161,13 @@ nouveau_display_vblstamp(struct drm_device *dev, unsigned int pipe,
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (nouveau_crtc(crtc)->index == pipe) {
+ struct drm_display_mode *mode;
+ if (dev->mode_config.funcs->atomic_commit)
+ mode = &crtc->state->adjusted_mode;
+ else
+ mode = &crtc->hwmode;
return drm_calc_vbltimestamp_from_scanoutpos(dev,
- pipe, max_error, time, flags,
- &crtc->hwmode);
+ pipe, max_error, time, flags, mode);
}
}
@@ -217,10 +224,6 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- struct nouveau_display *disp = nouveau_display(drm_fb->dev);
-
- if (disp->fb_dtor)
- disp->fb_dtor(drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
@@ -245,57 +248,45 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
-nouveau_framebuffer_init(struct drm_device *dev,
- struct nouveau_framebuffer *nv_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct nouveau_bo *nvbo)
+nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct nouveau_bo *nvbo,
+ struct nouveau_framebuffer **pfb)
{
- struct nouveau_display *disp = nouveau_display(dev);
- struct drm_framebuffer *fb = &nv_fb->base;
+ struct nouveau_framebuffer *fb;
int ret;
- drm_helper_mode_fill_fb_struct(fb, mode_cmd);
- nv_fb->nvbo = nvbo;
-
- ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret)
- return ret;
+ if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+ return -ENOMEM;
- if (disp->fb_ctor) {
- ret = disp->fb_ctor(fb);
- if (ret)
- disp->fb_dtor(fb);
- }
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->nvbo = nvbo;
+ ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ if (ret)
+ kfree(fb);
return ret;
}
-static struct drm_framebuffer *
+struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_framebuffer *fb;
+ struct nouveau_bo *nvbo;
struct drm_gem_object *gem;
- int ret = -ENOMEM;
+ int ret;
gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
+ nvbo = nouveau_gem_object(gem);
- nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
- if (!nouveau_fb)
- goto err_unref;
-
- ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
- if (ret)
- goto err;
+ ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
+ if (ret == 0)
+ return &fb->base;
- return &nouveau_fb->base;
-
-err:
- kfree(nouveau_fb);
-err_unref:
drm_gem_object_unreference_unlocked(gem);
return ERR_PTR(ret);
}
@@ -358,6 +349,55 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
} \
} while(0)
+static void
+nouveau_display_hpd_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
+
+ pm_runtime_get_sync(drm->dev->dev);
+
+ drm_helper_hpd_irq_event(drm->dev);
+
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+}
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
+ * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
+ * This should be dropped once that is merged.
+ */
+#ifndef ACPI_VIDEO_NOTIFY_PROBE
+#define ACPI_VIDEO_NOTIFY_PROBE 0x81
+#endif
+
+static int
+nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
+ struct acpi_bus_event *info = data;
+
+ if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
+ if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
+ /*
+ * This may be the only indication we receive of a
+ * connector hotplug on a runtime suspended GPU,
+ * schedule hpd_work to check.
+ */
+ schedule_work(&drm->hpd_work);
+
+ /* acpi-video should not generate keypresses for this */
+ return NOTIFY_BAD;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
int
nouveau_display_init(struct drm_device *dev)
{
@@ -385,16 +425,19 @@ nouveau_display_init(struct drm_device *dev)
}
void
-nouveau_display_fini(struct drm_device *dev)
+nouveau_display_fini(struct drm_device *dev, bool suspend)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
- int head;
+ struct drm_crtc *crtc;
+
+ if (!suspend)
+ drm_crtc_force_disable_all(dev);
/* Make sure that drm and hw vblank irqs get properly disabled. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_off(dev, head);
+ drm_for_each_crtc(crtc, dev)
+ drm_crtc_vblank_off(crtc);
/* disable flip completion events */
nvif_notify_put(&drm->flip);
@@ -495,7 +538,7 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
- GP104_DISP,
+ GP102_DISP,
GP100_DISP,
GM200_DISP,
GM107_DISP,
@@ -530,6 +573,8 @@ nouveau_display_create(struct drm_device *dev)
if (ret)
goto disp_create_err;
+ drm_mode_config_reset(dev);
+
if (dev->mode_config.num_crtc) {
ret = nouveau_display_vblank_init(dev);
if (ret)
@@ -537,6 +582,12 @@ nouveau_display_create(struct drm_device *dev)
}
nouveau_backlight_init(dev);
+ INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
+#ifdef CONFIG_ACPI
+ drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
+ register_acpi_notifier(&drm->acpi_nb);
+#endif
+
return 0;
vblank_err:
@@ -552,11 +603,13 @@ nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
+#endif
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
- drm_crtc_force_disable_all(dev);
drm_mode_config_cleanup(dev);
if (disp->dtor)
@@ -568,12 +621,138 @@ nouveau_display_destroy(struct drm_device *dev)
kfree(disp);
}
+static int
+nouveau_atomic_disable_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ struct drm_connector_state *connector_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ int ret;
+
+ if (!(crtc = connector->state->crtc))
+ return 0;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state->active = false;
+
+ drm_for_each_plane_mask(plane, connector->dev, crtc_state->plane_mask) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_atomic_disable(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector *connector;
+ int ret;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ drm_for_each_connector(connector, dev) {
+ ret = nouveau_atomic_disable_connector(state, connector);
+ if (ret)
+ break;
+ }
+
+ if (ret == 0)
+ ret = drm_atomic_commit(state);
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_atomic_state *
+nouveau_atomic_suspend(struct drm_device *dev)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret < 0) {
+ state = ERR_PTR(ret);
+ goto unlock;
+ }
+
+ state = drm_atomic_helper_duplicate_state(dev, &ctx);
+ if (IS_ERR(state))
+ goto unlock;
+
+ ret = nouveau_atomic_disable(dev, &ctx);
+ if (ret < 0) {
+ drm_atomic_state_put(state);
+ state = ERR_PTR(ret);
+ goto unlock;
+ }
+
+unlock:
+ if (PTR_ERR(state) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return state;
+}
+
int
nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
+ struct nouveau_display *disp = nouveau_display(dev);
struct drm_crtc *crtc;
- nouveau_display_fini(dev);
+ if (dev->mode_config.funcs->atomic_commit) {
+ if (!runtime) {
+ disp->suspend = nouveau_atomic_suspend(dev);
+ if (IS_ERR(disp->suspend)) {
+ int ret = PTR_ERR(disp->suspend);
+ disp->suspend = NULL;
+ return ret;
+ }
+ }
+
+ nouveau_display_fini(dev, true);
+ return 0;
+ }
+
+ nouveau_display_fini(dev, true);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -600,9 +779,19 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
void
nouveau_display_resume(struct drm_device *dev, bool runtime)
{
+ struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
- int ret, head;
+ int ret;
+
+ if (dev->mode_config.funcs->atomic_commit) {
+ nouveau_display_init(dev);
+ if (disp->suspend) {
+ drm_atomic_helper_resume(dev, disp->suspend);
+ disp->suspend = NULL;
+ }
+ return;
+ }
/* re-pin fb/cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -647,10 +836,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
drm_helper_resume_force_mode(dev);
- /* Make sure that drm and hw vblank irqs get resumed if needed. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_on(dev, head);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -692,10 +877,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
- BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- else
- BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
+ BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
@@ -724,6 +906,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan;
struct nouveau_cli *cli;
struct nouveau_fence *fence;
+ struct nv04_display *dispnv04 = nv04_display(dev);
+ int head = nouveau_crtc(crtc)->index;
int ret;
chan = drm->channel;
@@ -770,32 +954,23 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
drm_crtc_vblank_get(crtc);
/* Emit a page flip */
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
+ if (swap_interval) {
+ ret = RING_SPACE(chan, 8);
if (ret)
goto fail_unreserve;
- } else {
- struct nv04_display *dispnv04 = nv04_display(dev);
- int head = nouveau_crtc(crtc)->index;
-
- if (swap_interval) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- goto fail_unreserve;
-
- BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
- OUT_RING (chan, head);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
- OUT_RING (chan, 0);
- }
- nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
+ OUT_RING (chan, head);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
+ OUT_RING (chan, 0);
}
+ nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
goto fail_unreserve;
@@ -843,16 +1018,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- drm_crtc_arm_vblank_event(s->crtc, s->event);
- } else {
- drm_crtc_send_vblank_event(s->crtc, s->event);
-
- /* Give up ownership of vblank for page-flipped crtc */
- drm_crtc_vblank_put(s->crtc);
- }
- }
- else {
+ drm_crtc_arm_vblank_event(s->crtc, s->event);
+ } else {
/* Give up ownership of vblank for page-flipped crtc */
drm_crtc_vblank_put(s->crtc);
}
@@ -874,12 +1041,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
struct nouveau_page_flip_state state;
if (!nouveau_finish_page_flip(chan, &state)) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
- state.offset + state.crtc->y *
- state.pitch + state.crtc->x *
- state.bpp / 8);
- }
+ nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
+ state.offset + state.crtc->y *
+ state.pitch + state.crtc->x *
+ state.bpp / 8);
}
return NVIF_NOTIFY_KEEP;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 0420ee861ea4..4a75df06c139 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -22,8 +22,9 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
return container_of(fb, struct nouveau_framebuffer, base);
}
-int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
- const struct drm_mode_fb_cmd2 *, struct nouveau_bo *);
+int nouveau_framebuffer_new(struct drm_device *,
+ const struct drm_mode_fb_cmd2 *,
+ struct nouveau_bo *, struct nouveau_framebuffer **);
struct nouveau_page_flip_state {
struct list_head head;
@@ -39,9 +40,6 @@ struct nouveau_display {
int (*init)(struct drm_device *);
void (*fini)(struct drm_device *);
- int (*fb_ctor)(struct drm_framebuffer *);
- void (*fb_dtor)(struct drm_framebuffer *);
-
struct nvif_object disp;
struct drm_property *dithering_mode;
@@ -52,6 +50,8 @@ struct nouveau_display {
/* not really hue and saturation: */
struct drm_property *vibrant_hue_property;
struct drm_property *color_vibrance_property;
+
+ struct drm_atomic_state *suspend;
};
static inline struct nouveau_display *
@@ -63,7 +63,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev, bool suspend);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
@@ -91,6 +91,8 @@ int nouveau_crtc_set_config(struct drm_mode_set *set);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
extern int nouveau_backlight_init(struct drm_device *);
extern void nouveau_backlight_exit(struct drm_device *);
+extern void nouveau_backlight_ctor(void);
+extern void nouveau_backlight_dtor(void);
#else
static inline int
nouveau_backlight_init(struct drm_device *dev)
@@ -101,6 +103,17 @@ nouveau_backlight_init(struct drm_device *dev)
static inline void
nouveau_backlight_exit(struct drm_device *dev) {
}
+
+static inline void
+nouveau_backlight_ctor(void) {
+}
+
+static inline void
+nouveau_backlight_dtor(void) {
+}
#endif
+struct drm_framebuffer *
+nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
+ const struct drm_mode_fb_cmd2 *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 87d52d36f4fc..0d052e1660f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,6 +30,13 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include <nvif/class.h>
+#include <nvif/cl5070.h>
+
+MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)");
+static int nouveau_mst = 1;
+module_param_named(mst, nouveau_mst, int, 0400);
+
static void
nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
{
@@ -55,14 +62,14 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
struct drm_device *dev = nv_encoder->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c_aux *aux;
- u8 *dpcd = nv_encoder->dp.dpcd;
+ u8 dpcd[8];
int ret;
aux = nv_encoder->aux;
if (!aux)
return -ENODEV;
- ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, 8);
+ ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
if (ret)
return ret;
@@ -84,5 +91,11 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
nouveau_dp_probe_oui(dev, aux, dpcd);
- return 0;
+
+ ret = nv50_mstm_detect(nv_encoder->dp.mstm, dpcd, nouveau_mst);
+ if (ret == 1)
+ return NOUVEAU_DP_MST;
+ if (ret == 0)
+ return NOUVEAU_DP_SST;
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3100fd88a015..59348fc41c77 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -47,6 +47,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_vga.h"
+#include "nouveau_led.h"
#include "nouveau_hwmon.h"
#include "nouveau_acpi.h"
#include "nouveau_bios.h"
@@ -475,6 +476,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_hwmon_init(dev);
nouveau_accel_init(drm);
nouveau_fbcon_init(dev);
+ nouveau_led_init(dev);
if (nouveau_runtime_pm != 0) {
pm_runtime_use_autosuspend(dev->dev);
@@ -510,13 +512,14 @@ nouveau_drm_unload(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
+ nouveau_led_fini(dev);
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
nouveau_hwmon_fini(dev);
nouveau_debugfs_fini(drm);
if (dev->mode_config.num_crtc)
- nouveau_display_fini(dev);
+ nouveau_display_fini(dev, false);
nouveau_display_destroy(dev);
nouveau_bios_takedown(dev);
@@ -561,6 +564,8 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
struct nouveau_cli *cli;
int ret;
+ nouveau_led_suspend(dev);
+
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "suspending console...\n");
nouveau_fbcon_set_suspend(dev, 1);
@@ -649,6 +654,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
nouveau_fbcon_set_suspend(dev, 0);
}
+ nouveau_led_resume(dev);
+
return 0;
}
@@ -692,7 +699,12 @@ nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- return nouveau_do_resume(drm_dev, false);
+ ret = nouveau_do_resume(drm_dev, false);
+
+ /* Monitors may have been connected / disconnected during suspend */
+ schedule_work(&nouveau_drm(drm_dev)->hpd_work);
+
+ return ret;
}
static int
@@ -766,6 +778,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+
+ /* Monitors may have been connected / disconnected during suspend */
+ schedule_work(&nouveau_drm(drm_dev)->hpd_work);
+
return ret;
}
@@ -1030,6 +1046,7 @@ static void nouveau_display_options(void)
DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
+ DRM_DEBUG_DRIVER("... hdmimhz : %d\n", nouveau_hdmimhz);
}
static const struct dev_pm_ops nouveau_pm_ops = {
@@ -1105,6 +1122,7 @@ nouveau_drm_init(void)
#endif
nouveau_register_dsm_handler();
+ nouveau_backlight_ctor();
return drm_pci_init(&driver_pci, &nouveau_drm_pci_driver);
}
@@ -1115,6 +1133,7 @@ nouveau_drm_exit(void)
return;
drm_pci_exit(&driver_pci, &nouveau_drm_pci_driver);
+ nouveau_backlight_dtor();
nouveau_unregister_dsm_handler();
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822a0212cd48..8d5ed5bfdacb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -37,6 +37,8 @@
* - implemented limited ABI16/NVIF interop
*/
+#include <linux/notifier.h>
+
#include <nvif/client.h>
#include <nvif/device.h>
#include <nvif/ioctl.h>
@@ -161,11 +163,19 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
+ struct list_head bl_connectors;
+ struct work_struct hpd_work;
+#ifdef CONFIG_ACPI
+ struct notifier_block acpi_nb;
+#endif
/* power management */
struct nouveau_hwmon *hwmon;
struct nouveau_debugfs *debugfs;
+ /* led management */
+ struct nouveau_led *led;
+
/* display power reference */
bool have_disp_power_ref;
@@ -201,6 +211,10 @@ void nouveau_drm_device_remove(struct drm_device *dev);
if (unlikely(drm_debug & DRM_UT_DRIVER)) \
NV_PRINTK(info, &(drm)->client, f, ##a); \
} while(0)
+#define NV_ATOMIC(drm,f,a...) do { \
+ if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ NV_PRINTK(info, &(drm)->client, f, ##a); \
+} while(0)
extern int nouveau_modeset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index ee6a6d3fc80f..198e5f27682f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -30,6 +30,7 @@
#include <subdev/bios/dcb.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_dp_mst_helper.h>
#include "dispnv04/disp.h"
#define NV_DPMS_CLEARED 0x80
@@ -57,15 +58,16 @@ struct nouveau_encoder {
union {
struct {
- u8 dpcd[8];
+ struct nv50_mstm *mstm;
int link_nr;
int link_bw;
- u32 datarate;
} dp;
};
void (*enc_save)(struct drm_encoder *encoder);
void (*enc_restore)(struct drm_encoder *encoder);
+ void (*update)(struct nouveau_encoder *, u8 head,
+ struct drm_display_mode *, u8 proto, u8 depth);
};
struct nouveau_encoder *
@@ -90,9 +92,17 @@ get_slave_funcs(struct drm_encoder *enc)
}
/* nouveau_dp.c */
+enum nouveau_dp_status {
+ NOUVEAU_DP_SST,
+ NOUVEAU_DP_MST,
+};
+
int nouveau_dp_detect(struct nouveau_encoder *);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
+int nv50_mstm_detect(struct nv50_mstm *, u8 dpcd[8], int allow);
+void nv50_mstm_remove(struct nv50_mstm *);
+void nv50_mstm_service(struct nv50_mstm *);
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9f5692726c16..2f2a3dcd4ad7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -58,7 +58,7 @@ static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -90,7 +90,7 @@ static void
nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -122,7 +122,7 @@ static void
nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nvif_device *device = &drm->device;
int ret;
@@ -154,7 +154,7 @@ static int
nouveau_fbcon_sync(struct fb_info *info)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -181,7 +181,7 @@ static int
nouveau_fbcon_open(struct fb_info *info, int user)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
int ret = pm_runtime_get_sync(drm->dev->dev);
if (ret < 0 && ret != -EACCES)
return ret;
@@ -192,42 +192,30 @@ static int
nouveau_fbcon_release(struct fb_info *info, int user)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
pm_runtime_put(drm->dev->dev);
return 0;
}
static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
.fb_release = nouveau_fbcon_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = nouveau_fbcon_fillrect,
.fb_copyarea = nouveau_fbcon_copyarea,
.fb_imageblit = nouveau_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = nouveau_fbcon_open,
.fb_release = nouveau_fbcon_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
void
@@ -333,16 +321,15 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
{
struct nouveau_fbdev *fbcon =
container_of(helper, struct nouveau_fbdev, helper);
- struct drm_device *dev = fbcon->dev;
+ struct drm_device *dev = fbcon->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
- int size, ret;
+ int ret;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@@ -353,16 +340,17 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = roundup(size, PAGE_SIZE);
-
- ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, &nvbo);
+ ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height,
+ 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(drm, "failed to allocate framebuffer\n");
goto out;
}
+ ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
+ if (ret)
+ goto out_unref;
+
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
@@ -377,8 +365,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(nvbo, drm->client.vm,
- &fbcon->nouveau_fb.vma);
+ ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -394,13 +381,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
info->par = fbcon;
- nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
-
- nouveau_fb = &fbcon->nouveau_fb;
- fb = &nouveau_fb->base;
-
/* setup helper */
- fbcon->helper.fb = fb;
+ fbcon->helper.fb = &fb->base;
strcpy(info->fix.id, "nouveaufb");
if (!chan)
@@ -411,14 +393,14 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.mem.bus.base +
- nvbo->bo.mem.bus.offset;
- info->fix.smem_len = size;
+ info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
+ fb->nvbo->bo.mem.bus.offset;
+ info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
- info->screen_size = size;
+ info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
+ info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->base.pitches[0], fb->base.depth);
drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -429,20 +411,19 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* To allow resizeing without swapping buffers */
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
- nouveau_fb->base.width, nouveau_fb->base.height,
- nvbo->bo.offset, nvbo);
+ fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unlock:
if (chan)
- nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
- nouveau_bo_unmap(nvbo);
+ nouveau_bo_vma_del(fb->nvbo, &fb->vma);
+ nouveau_bo_unmap(fb->nvbo);
out_unpin:
- nouveau_bo_unpin(nvbo);
+ nouveau_bo_unpin(fb->nvbo);
out_unref:
- nouveau_bo_ref(NULL, &nvbo);
+ nouveau_bo_ref(NULL, &fb->nvbo);
out:
return ret;
}
@@ -458,28 +439,26 @@ nouveau_fbcon_output_poll_changed(struct drm_device *dev)
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
- struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
+ struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fbcon->helper.fb);
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_release_fbi(&fbcon->helper);
+ drm_fb_helper_fini(&fbcon->helper);
if (nouveau_fb->nvbo) {
- nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
+ nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
- nouveau_fb->nvbo = NULL;
+ drm_framebuffer_unreference(&nouveau_fb->base);
}
- drm_fb_helper_fini(&fbcon->helper);
- drm_framebuffer_unregister_private(&nouveau_fb->base);
- drm_framebuffer_cleanup(&nouveau_fb->base);
+
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
struct nouveau_fbdev *fbcon = info->par;
- struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -522,7 +501,6 @@ nouveau_fbcon_init(struct drm_device *dev)
if (!fbcon)
return -ENOMEM;
- fbcon->dev = dev;
drm->fbcon = fbcon;
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
@@ -545,7 +523,8 @@ nouveau_fbcon_init(struct drm_device *dev)
preferred_bpp = 32;
/* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
+ if (!dev->mode_config.funcs->atomic_commit)
+ drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index ca77ad001978..e2bca729721e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -33,8 +33,6 @@
struct nouveau_fbdev {
struct drm_fb_helper helper;
- struct nouveau_framebuffer nouveau_fb;
- struct drm_device *dev;
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4bb9ab892ae1..f2f348f0160c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,7 +28,7 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
#include <nvif/cl826e.h>
#include <nvif/notify.h>
@@ -38,11 +38,11 @@
#include "nouveau_dma.h"
#include "nouveau_fence.h"
-static const struct fence_ops nouveau_fence_ops_uevent;
-static const struct fence_ops nouveau_fence_ops_legacy;
+static const struct dma_fence_ops nouveau_fence_ops_uevent;
+static const struct dma_fence_ops nouveau_fence_ops_legacy;
static inline struct nouveau_fence *
-from_fence(struct fence *fence)
+from_fence(struct dma_fence *fence)
{
return container_of(fence, struct nouveau_fence, base);
}
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence)
{
int drop = 0;
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
rcu_assign_pointer(fence->channel, NULL);
- if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
+ if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
drop = 1;
}
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return drop;
}
static struct nouveau_fence *
-nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
+nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) {
struct nouveau_fence_priv *priv = (void*)drm->fence;
if (fence->ops != &nouveau_fence_ops_legacy &&
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_fence_work {
struct work_struct work;
- struct fence_cb cb;
+ struct dma_fence_cb cb;
void (*func)(void *);
void *data;
};
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork)
kfree(work);
}
-static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
+static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
}
void
-nouveau_fence_work(struct fence *fence,
+nouveau_fence_work(struct dma_fence *fence,
void (*func)(void *), void *data)
{
struct nouveau_fence_work *work;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto err;
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence,
work->func = func;
work->data = data;
- if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
+ if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
goto err_free;
return;
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
- fence_init(&fence->base, &nouveau_fence_ops_uevent,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
+ &fctx->lock, fctx->context, ++fctx->sequence);
else
- fence_init(&fence->base, &nouveau_fence_ops_legacy,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
+ &fctx->lock, fctx->context, ++fctx->sequence);
kref_get(&fctx->fence_ref);
- trace_fence_emit(&fence->base);
+ trace_dma_fence_emit(&fence->base);
ret = fctx->emit(fence);
if (!ret) {
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
if (nouveau_fence_update(chan, fctx))
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
struct nouveau_channel *chan;
unsigned long flags;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
spin_lock_irqsave(&fctx->lock, flags);
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
nvif_notify_put(&fctx->notify);
spin_unlock_irqrestore(&fctx->lock, flags);
}
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
static long
-nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
+nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
{
struct nouveau_fence *fence = from_fence(f);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
if (!lazy)
return nouveau_fence_wait_busy(fence, intr);
- ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
+ ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
if (ret < 0)
return ret;
else if (!ret)
@@ -391,7 +391,7 @@ int
nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct fence *fence;
+ struct dma_fence *fence;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
struct nouveau_fence *f;
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
return ret;
}
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
}
return ret;
@@ -456,7 +456,7 @@ void
nouveau_fence_unref(struct nouveau_fence **pfence)
{
if (*pfence)
- fence_put(&(*pfence)->base);
+ dma_fence_put(&(*pfence)->base);
*pfence = NULL;
}
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
return ret;
}
-static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
+static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";
}
-static const char *nouveau_fence_get_timeline_name(struct fence *f)
+static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
* result. The drm node should still be there, so we can derive the index from
* the fence context.
*/
-static bool nouveau_fence_is_signaled(struct fence *f)
+static bool nouveau_fence_is_signaled(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f)
return ret;
}
-static bool nouveau_fence_no_signaling(struct fence *f)
+static bool nouveau_fence_no_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f)
WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
/*
- * This needs uevents to work correctly, but fence_add_callback relies on
+ * This needs uevents to work correctly, but dma_fence_add_callback relies on
* being able to enable signaling. It will still get signaled eventually,
* just not right away.
*/
if (nouveau_fence_is_signaled(f)) {
list_del(&fence->head);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return false;
}
return true;
}
-static void nouveau_fence_release(struct fence *f)
+static void nouveau_fence_release(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static const struct fence_ops nouveau_fence_ops_legacy = {
+static const struct dma_fence_ops nouveau_fence_ops_legacy = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_no_signaling,
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = {
.release = nouveau_fence_release
};
-static bool nouveau_fence_enable_signaling(struct fence *f)
+static bool nouveau_fence_enable_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f)
ret = nouveau_fence_no_signaling(f);
if (ret)
- set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
+ set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
else if (!--fctx->notify_ref)
nvif_notify_put(&fctx->notify);
return ret;
}
-static const struct fence_ops nouveau_fence_ops_uevent = {
+static const struct dma_fence_ops nouveau_fence_ops_uevent = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_enable_signaling,
.signaled = nouveau_fence_is_signaled,
- .wait = fence_default_wait,
- .release = NULL
+ .wait = dma_fence_default_wait,
+ .release = nouveau_fence_release
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7115ad..ccdce1b4eec4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,14 +1,14 @@
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <nvif/notify.h>
struct nouveau_drm;
struct nouveau_bo;
struct nouveau_fence {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct fence *, void (*)(void *), void *);
+void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
@@ -92,7 +92,6 @@ struct nv84_fence_chan {
struct nouveau_fence_chan base;
struct nvkm_vma vma;
struct nvkm_vma vma_gart;
- struct nvkm_vma dispc_vma[4];
};
struct nv84_fence_priv {
@@ -102,7 +101,6 @@ struct nv84_fence_priv {
u32 *suspend;
};
-u64 nv84_fence_crtc(struct nouveau_channel *, int);
int nv84_fence_context_new(struct nouveau_channel *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 72e2399bce39..201b52b750dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
@@ -369,7 +369,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int trycnt = 0;
- int ret, i;
+ int ret = -EINVAL, i;
struct nouveau_bo *res_bo = NULL;
LIST_HEAD(gart_list);
LIST_HEAD(vram_list);
@@ -861,6 +861,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
+ long lret;
int ret;
gem = drm_gem_object_lookup(file_priv, req->handle);
@@ -868,19 +869,15 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- if (no_wait)
- ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
- else {
- long lret;
+ lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
+ no_wait ? 0 : 30 * HZ);
+ if (!lret)
+ ret = -EBUSY;
+ else if (lret > 0)
+ ret = 0;
+ else
+ ret = lret;
- lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
- if (!lret)
- ret = -EBUSY;
- else if (lret > 0)
- ret = 0;
- else
- ret = lret;
- }
nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
new file mode 100644
index 000000000000..3e2f1b6cd4df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2016 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Martin Peres <martin.peres@free.fr>
+ */
+
+#include <linux/leds.h>
+
+#include "nouveau_led.h"
+#include <nvkm/subdev/gpio.h>
+
+static enum led_brightness
+nouveau_led_get_brightness(struct led_classdev *led)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+ u32 div, duty;
+
+ div = nvif_rd32(device, 0x61c880) & 0x00ffffff;
+ duty = nvif_rd32(device, 0x61c884) & 0x00ffffff;
+
+ if (div > 0)
+ return duty * LED_FULL / div;
+ else
+ return 0;
+}
+
+static void
+nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+
+ u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
+ u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
+ u32 div, duty;
+
+ div = input_clk / freq;
+ duty = value * div / LED_FULL;
+
+ /* for now, this is safe to directly poke those registers because:
+ * - A: nvidia never puts the logo led to any other PWM controler
+ * than PDISPLAY.SOR[1].PWM.
+ * - B: nouveau does not touch these registers anywhere else
+ */
+ nvif_wr32(device, 0x61c880, div);
+ nvif_wr32(device, 0x61c884, 0xc0000000 | duty);
+}
+
+
+int
+nouveau_led_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct dcb_gpio_func logo_led;
+ int ret;
+
+ if (!gpio)
+ return 0;
+
+ /* check that there is a GPIO controlling the logo LED */
+ if (nvkm_gpio_find(gpio, 0, DCB_GPIO_LOGO_LED_PWM, 0xff, &logo_led))
+ return 0;
+
+ drm->led = kzalloc(sizeof(*drm->led), GFP_KERNEL);
+ if (!drm->led)
+ return -ENOMEM;
+ drm->led->dev = dev;
+
+ drm->led->led.name = "nvidia-logo";
+ drm->led->led.max_brightness = 255;
+ drm->led->led.brightness_get = nouveau_led_get_brightness;
+ drm->led->led.brightness_set = nouveau_led_set_brightness;
+
+ ret = led_classdev_register(dev->dev, &drm->led->led);
+ if (ret) {
+ kfree(drm->led);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_led_suspend(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_suspend(&drm->led->led);
+}
+
+void
+nouveau_led_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_resume(&drm->led->led);
+}
+
+void
+nouveau_led_fini(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led) {
+ led_classdev_unregister(&drm->led->led);
+ kfree(drm->led);
+ drm->led = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
new file mode 100644
index 000000000000..187ecdb82002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@free.fr>
+ */
+
+#ifndef __NOUVEAU_LED_H__
+#define __NOUVEAU_LED_H__
+
+#include "nouveau_drv.h"
+
+struct led_classdev;
+
+struct nouveau_led {
+ struct drm_device *dev;
+
+ struct led_classdev led;
+};
+
+static inline struct nouveau_led *
+nouveau_led(struct drm_device *dev)
+{
+ return nouveau_drm(dev)->led;
+}
+
+/* nouveau_led.c */
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
+int nouveau_led_init(struct drm_device *dev);
+void nouveau_led_suspend(struct drm_device *dev);
+void nouveau_led_resume(struct drm_device *dev);
+void nouveau_led_fini(struct drm_device *dev);
+#else
+static inline int nouveau_led_init(struct drm_device *dev) { return 0; };
+static inline void nouveau_led_suspend(struct drm_device *dev) { };
+static inline void nouveau_led_resume(struct drm_device *dev) { };
+static inline void nouveau_led_fini(struct drm_device *dev) { };
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index da8fd5ff9d0f..6a2b187e3c3b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,7 +30,7 @@ int
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -50,7 +50,7 @@ int
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -77,7 +77,7 @@ int
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t fg;
uint32_t bg;
@@ -133,7 +133,7 @@ int
nv04_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
+ struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
struct nvif_device *device = &drm->device;
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 1915b7b82a59..fa8f2375c398 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
priv->base.contexts = 15;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4e3de34ff6f4..2998bde29211 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -57,16 +57,13 @@ void
nv10_fence_context_del(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx = chan->fence;
- int i;
nouveau_fence_context_del(&fctx->base);
- for (i = 0; i < ARRAY_SIZE(fctx->head); i++)
- nvif_object_fini(&fctx->head[i]);
nvif_object_fini(&fctx->sema);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
}
-int
+static int
nv10_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx;
@@ -107,7 +104,7 @@ nv10_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index a87259f3983a..b7a508585304 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -7,7 +7,6 @@
struct nv10_fence_chan {
struct nouveau_fence_chan base;
struct nvif_object sema;
- struct nvif_object head[4];
};
struct nv10_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 7d5e562a55c5..79bc01111351 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv17_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7d0edcbcfca7..2c2c64507661 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -25,10 +25,12 @@
#include <linux/dma-mapping.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_plane_helper.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
@@ -38,6 +40,7 @@
#include <nvif/cl507c.h>
#include <nvif/cl507d.h>
#include <nvif/cl507e.h>
+#include <nvif/event.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
@@ -46,6 +49,7 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_fence.h"
+#include "nouveau_fbcon.h"
#include "nv50_display.h"
#define EVO_DMA_NR 9
@@ -61,6 +65,227 @@
#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
+#define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
+#define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
+
+/******************************************************************************
+ * Atomic state
+ *****************************************************************************/
+#define nv50_atom(p) container_of((p), struct nv50_atom, state)
+
+struct nv50_atom {
+ struct drm_atomic_state state;
+
+ struct list_head outp;
+ bool lock_core;
+ bool flush_disable;
+};
+
+struct nv50_outp_atom {
+ struct list_head head;
+
+ struct drm_encoder *encoder;
+ bool flush_disable;
+
+ union {
+ struct {
+ bool ctrl:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool ctrl:1;
+ };
+ u8 mask;
+ } set;
+};
+
+#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
+
+struct nv50_head_atom {
+ struct drm_crtc_state state;
+
+ struct {
+ u16 iW;
+ u16 iH;
+ u16 oW;
+ u16 oH;
+ } view;
+
+ struct nv50_head_mode {
+ bool interlace;
+ u32 clock;
+ struct {
+ u16 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ } h;
+ struct {
+ u32 active;
+ u16 synce;
+ u16 blanke;
+ u16 blanks;
+ u16 blank2s;
+ u16 blank2e;
+ u16 blankus;
+ } v;
+ } mode;
+
+ struct {
+ u32 handle;
+ u64 offset:40;
+ } lut;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 block:4;
+ u32 pitch:20;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } core;
+
+ struct {
+ bool visible;
+ u32 handle;
+ u64 offset:40;
+ u8 layout:1;
+ u8 format:1;
+ } curs;
+
+ struct {
+ u8 depth;
+ u8 cpp;
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+ } base;
+
+ struct {
+ u8 cpp;
+ } ovly;
+
+ struct {
+ bool enable:1;
+ u8 bits:2;
+ u8 mode:4;
+ } dither;
+
+ struct {
+ struct {
+ u16 cos:12;
+ u16 sin:12;
+ } sat;
+ } procamp;
+
+ union {
+ struct {
+ bool core:1;
+ bool curs:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool core:1;
+ bool curs:1;
+ bool view:1;
+ bool mode:1;
+ bool base:1;
+ bool ovly:1;
+ bool dither:1;
+ bool procamp:1;
+ };
+ u16 mask;
+ } set;
+};
+
+static inline struct nv50_head_atom *
+nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(statec))
+ return (void *)statec;
+ return nv50_head_atom(statec);
+}
+
+#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
+
+struct nv50_wndw_atom {
+ struct drm_plane_state state;
+ u8 interval;
+
+ struct drm_rect clip;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ bool awaken:1;
+ } ntfy;
+
+ struct {
+ u32 handle;
+ u16 offset:12;
+ u32 acquire;
+ u32 release;
+ } sema;
+
+ struct {
+ u8 enable:2;
+ } lut;
+
+ struct {
+ u8 mode:2;
+ u8 interval:4;
+
+ u8 format;
+ u8 kind:7;
+ u8 layout:1;
+ u8 block:4;
+ u32 pitch:20;
+ u16 w;
+ u16 h;
+
+ u32 handle;
+ u64 offset;
+ } image;
+
+ struct {
+ u16 x;
+ u16 y;
+ } point;
+
+ union {
+ struct {
+ bool ntfy:1;
+ bool sema:1;
+ bool image:1;
+ };
+ u8 mask;
+ } clr;
+
+ union {
+ struct {
+ bool ntfy:1;
+ bool sema:1;
+ bool image:1;
+ bool lut:1;
+ bool point:1;
+ };
+ u8 mask;
+ } set;
+};
/******************************************************************************
* EVO channel
@@ -133,34 +358,6 @@ nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
}
/******************************************************************************
- * Cursor Immediate
- *****************************************************************************/
-
-struct nv50_curs {
- struct nv50_pioc base;
-};
-
-static int
-nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
- int head, struct nv50_curs *curs)
-{
- struct nv50_disp_cursor_v0 args = {
- .head = head,
- };
- static const s32 oclass[] = {
- GK104_DISP_CURSOR,
- GF110_DISP_CURSOR,
- GT214_DISP_CURSOR,
- G82_DISP_CURSOR,
- NV50_DISP_CURSOR,
- 0
- };
-
- return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
- &curs->base);
-}
-
-/******************************************************************************
* Overlay Immediate
*****************************************************************************/
@@ -192,6 +389,11 @@ nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
* DMA EVO channel
*****************************************************************************/
+struct nv50_dmac_ctxdma {
+ struct list_head head;
+ struct nvif_object object;
+};
+
struct nv50_dmac {
struct nv50_chan base;
dma_addr_t handle;
@@ -199,6 +401,7 @@ struct nv50_dmac {
struct nvif_object sync;
struct nvif_object vram;
+ struct list_head ctxdma;
/* Protects against concurrent pushbuf access to this channel, lock is
* grabbed by evo_wait (if the pushbuf reservation is successful) and
@@ -207,9 +410,82 @@ struct nv50_dmac {
};
static void
+nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
+{
+ nvif_object_fini(&ctxdma->object);
+ list_del(&ctxdma->head);
+ kfree(ctxdma);
+}
+
+static struct nv50_dmac_ctxdma *
+nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
+{
+ struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+ struct nv50_dmac_ctxdma *ctxdma;
+ const u8 kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ const u32 handle = 0xfb000000 | kind;
+ struct {
+ struct nv_dma_v0 base;
+ union {
+ struct nv50_dma_v0 nv50;
+ struct gf100_dma_v0 gf100;
+ struct gf119_dma_v0 gf119;
+ };
+ } args = {};
+ u32 argc = sizeof(args.base);
+ int ret;
+
+ list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
+ if (ctxdma->object.handle == handle)
+ return ctxdma;
+ }
+
+ if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ list_add(&ctxdma->head, &dmac->ctxdma);
+
+ args.base.target = NV_DMA_V0_TARGET_VRAM;
+ args.base.access = NV_DMA_V0_ACCESS_RDWR;
+ args.base.start = 0;
+ args.base.limit = drm->device.info.ram_user - 1;
+
+ if (drm->device.info.chipset < 0x80) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xc0) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ args.nv50.kind = kind;
+ argc += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xd0) {
+ args.gf100.kind = kind;
+ argc += sizeof(args.gf100);
+ } else {
+ args.gf119.page = GF119_DMA_V0_PAGE_LP;
+ args.gf119.kind = kind;
+ argc += sizeof(args.gf119);
+ }
+
+ ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
+ &args, argc, &ctxdma->object);
+ if (ret) {
+ nv50_dmac_ctxdma_del(ctxdma);
+ return ERR_PTR(ret);
+ }
+
+ return ctxdma;
+}
+
+static void
nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
{
struct nvif_device *device = dmac->base.device;
+ struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
+
+ list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
+ nv50_dmac_ctxdma_del(ctxdma);
+ }
nvif_object_fini(&dmac->vram);
nvif_object_fini(&dmac->sync);
@@ -278,6 +554,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
+ INIT_LIST_HEAD(&dmac->ctxdma);
return ret;
}
@@ -297,7 +574,7 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
.pushbuf = 0xb0007d00,
};
static const s32 oclass[] = {
- GP104_DISP_CORE_CHANNEL_DMA,
+ GP102_DISP_CORE_CHANNEL_DMA,
GP100_DISP_CORE_CHANNEL_DMA,
GM200_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
@@ -381,34 +658,23 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
struct nv50_head {
struct nouveau_crtc base;
- struct nouveau_bo *image;
- struct nv50_curs curs;
- struct nv50_sync sync;
struct nv50_ovly ovly;
struct nv50_oimm oimm;
};
#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
-#define nv50_curs(c) (&nv50_head(c)->curs)
-#define nv50_sync(c) (&nv50_head(c)->sync)
#define nv50_ovly(c) (&nv50_head(c)->ovly)
#define nv50_oimm(c) (&nv50_head(c)->oimm)
#define nv50_chan(c) (&(c)->base.base)
#define nv50_vers(c) nv50_chan(c)->user.oclass
-struct nv50_fbdma {
- struct list_head head;
- struct nvif_object core;
- struct nvif_object base[4];
-};
-
struct nv50_disp {
struct nvif_object *disp;
struct nv50_mast mast;
- struct list_head fbdma;
-
struct nouveau_bo *sync;
+
+ struct mutex mutex;
};
static struct nv50_disp *
@@ -419,12 +685,6 @@ nv50_disp(struct drm_device *dev)
#define nv50_mast(d) (&nv50_disp(d)->mast)
-static struct drm_crtc *
-nv50_display_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
@@ -463,812 +723,1465 @@ evo_kick(u32 *push, void *evoc)
mutex_unlock(&dmac->lock);
}
-#if 1
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d) *((p)++) = (d)
-#else
#define evo_mthd(p,m,s) do { \
const u32 _m = (m), _s = (s); \
- printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
+ if (drm_debug & DRM_UT_KMS) \
+ printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
*((p)++) = ((_s << 18) | _m); \
} while(0)
+
#define evo_data(p,d) do { \
const u32 _d = (d); \
- printk(KERN_ERR "\t%08x\n", _d); \
+ if (drm_debug & DRM_UT_KMS) \
+ printk(KERN_ERR "\t%08x\n", _d); \
*((p)++) = _d; \
} while(0)
-#endif
-static bool
-evo_sync_wait(void *data)
+/******************************************************************************
+ * Plane
+ *****************************************************************************/
+#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
+
+struct nv50_wndw {
+ const struct nv50_wndw_func *func;
+ struct nv50_dmac *dmac;
+
+ struct drm_plane plane;
+
+ struct nvif_notify notify;
+ u16 ntfy;
+ u16 sema;
+ u32 data;
+};
+
+struct nv50_wndw_func {
+ void *(*dtor)(struct nv50_wndw *);
+ int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh);
+ void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw);
+
+ void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*sema_clr)(struct nv50_wndw *);
+ void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*ntfy_clr)(struct nv50_wndw *);
+ int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*image_clr)(struct nv50_wndw *);
+ void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+ u32 (*update)(struct nv50_wndw *, u32 interlock);
+};
+
+static int
+nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
- return true;
- usleep_range(1, 2);
- return false;
+ if (asyw->set.ntfy)
+ return wndw->func->ntfy_wait_begun(wndw, asyw);
+ return 0;
}
-static int
-evo_sync(struct drm_device *dev)
+static u32
+nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
+ struct nv50_wndw_atom *asyw)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
- struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_mast *mast = nv50_mast(dev);
- u32 *push = evo_wait(mast, 8);
- if (push) {
- nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | EVO_MAST_NTFY);
- evo_mthd(push, 0x0080, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_kick(push, mast);
- if (nvif_msec(device, 2000,
- if (evo_sync_wait(disp->sync))
- break;
- ) >= 0)
- return 0;
+ if (asyw->clr.sema && (!asyw->set.sema || flush))
+ wndw->func->sema_clr(wndw);
+ if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
+ wndw->func->ntfy_clr(wndw);
+ if (asyw->clr.image && (!asyw->set.image || flush))
+ wndw->func->image_clr(wndw);
+
+ return flush ? wndw->func->update(wndw, interlock) : 0;
+}
+
+static u32
+nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
+ struct nv50_wndw_atom *asyw)
+{
+ if (interlock) {
+ asyw->image.mode = 0;
+ asyw->image.interval = 1;
}
- return -EBUSY;
+ if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
+ if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
+ if (asyw->set.image) wndw->func->image_set(wndw, asyw);
+ if (asyw->set.lut ) wndw->func->lut (wndw, asyw);
+ if (asyw->set.point) wndw->func->point (wndw, asyw);
+
+ return wndw->func->update(wndw, interlock);
}
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nv50_display_crtc_sema(struct drm_device *dev, int crtc)
+static void
+nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- return nv50_disp(dev)->sync;
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
+ wndw->func->release(wndw, asyw, asyh);
+ asyw->ntfy.handle = 0;
+ asyw->sema.handle = 0;
}
-struct nv50_display_flip {
- struct nv50_disp *disp;
- struct nv50_sync *chan;
-};
-
-static bool
-nv50_display_flip_wait(void *data)
+static int
+nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
+ struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_display_flip *flip = data;
- if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
- flip->chan->data)
- return true;
- usleep_range(1, 2);
- return false;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ int ret;
+
+ NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
+ asyw->clip.x1 = 0;
+ asyw->clip.y1 = 0;
+ asyw->clip.x2 = asyh->state.mode.hdisplay;
+ asyw->clip.y2 = asyh->state.mode.vdisplay;
+
+ asyw->image.w = fb->base.width;
+ asyw->image.h = fb->base.height;
+ asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ if (asyw->image.kind) {
+ asyw->image.layout = 0;
+ if (drm->device.info.chipset >= 0xc0)
+ asyw->image.block = fb->nvbo->tile_mode >> 4;
+ else
+ asyw->image.block = fb->nvbo->tile_mode;
+ asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
+ } else {
+ asyw->image.layout = 1;
+ asyw->image.block = 0;
+ asyw->image.pitch = fb->base.pitches[0];
+ }
+
+ ret = wndw->func->acquire(wndw, asyw, asyh);
+ if (ret)
+ return ret;
+
+ if (asyw->set.image) {
+ if (!(asyw->image.mode = asyw->interval ? 0 : 1))
+ asyw->image.interval = asyw->interval;
+ else
+ asyw->image.interval = 0;
+ }
+
+ return 0;
}
-void
-nv50_display_flip_stop(struct drm_crtc *crtc)
+static int
+nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
{
- struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
- struct nv50_display_flip flip = {
- .disp = nv50_disp(crtc->dev),
- .chan = nv50_sync(crtc),
- };
- u32 *push;
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nv50_head_atom *harm = NULL, *asyh = NULL;
+ bool varm = false, asyv = false, asym = false;
+ int ret;
- push = evo_wait(flip.chan, 8);
- if (push) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, flip.chan);
+ NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
+ if (asyw->state.crtc) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+ asym = drm_atomic_crtc_needs_modeset(&asyh->state);
+ asyv = asyh->state.active;
}
- nvif_msec(device, 2000,
- if (nv50_display_flip_wait(&flip))
- break;
- );
+ if (armw->state.crtc) {
+ harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
+ if (IS_ERR(harm))
+ return PTR_ERR(harm);
+ varm = harm->state.crtc->state->active;
+ }
+
+ if (asyv) {
+ asyw->point.x = asyw->state.crtc_x;
+ asyw->point.y = asyw->state.crtc_y;
+ if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
+ asyw->set.point = true;
+
+ if (!varm || asym || armw->state.fb != asyw->state.fb) {
+ ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
+ if (ret)
+ return ret;
+ }
+ } else
+ if (varm) {
+ nv50_wndw_atomic_check_release(wndw, asyw, harm);
+ } else {
+ return 0;
+ }
+
+ if (!asyv || asym) {
+ asyw->clr.ntfy = armw->ntfy.handle != 0;
+ asyw->clr.sema = armw->sema.handle != 0;
+ if (wndw->func->image_clr)
+ asyw->clr.image = armw->image.handle != 0;
+ asyw->set.lut = wndw->func->lut && asyv;
+ }
+
+ return 0;
}
-int
-nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan, u32 swap_interval)
+static void
+nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_head *head = nv50_head(crtc);
- struct nv50_sync *sync = nv50_sync(crtc);
- u32 *push;
- int ret;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
- if (crtc->primary->fb->width != fb->width ||
- crtc->primary->fb->height != fb->height)
- return -EINVAL;
+ NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
+ if (!old_state->fb)
+ return;
+
+ nouveau_bo_unpin(fb->nvbo);
+}
- swap_interval <<= 4;
- if (swap_interval == 0)
- swap_interval |= 0x100;
- if (chan == NULL)
- evo_sync(crtc->dev);
+static int
+nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
+{
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nv50_head_atom *asyh;
+ struct nv50_dmac_ctxdma *ctxdma;
+ int ret;
- push = evo_wait(sync, 128);
- if (unlikely(push == NULL))
- return -EBUSY;
+ NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+ if (!asyw->state.fb)
+ return 0;
- if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ if (ret)
+ return ret;
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, sync->addr ^ 0x10);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
- OUT_RING (chan, sync->data + 1);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
- OUT_RING (chan, sync->addr);
- OUT_RING (chan, sync->data);
- } else
- if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
- u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
- ret = RING_SPACE(chan, 12);
- if (ret)
- return ret;
+ ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(fb->nvbo);
+ return PTR_ERR(ctxdma);
+ }
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram.handle);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr ^ 0x10));
- OUT_RING (chan, lower_32_bits(addr ^ 0x10));
- OUT_RING (chan, sync->data + 1);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, sync->data);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
- } else
- if (chan) {
- u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
- ret = RING_SPACE(chan, 10);
- if (ret)
- return ret;
+ asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
+ asyw->image.handle = ctxdma->object.handle;
+ asyw->image.offset = fb->nvbo->bo.offset;
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr ^ 0x10));
- OUT_RING (chan, lower_32_bits(addr ^ 0x10));
- OUT_RING (chan, sync->data + 1);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
- NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(addr));
- OUT_RING (chan, lower_32_bits(addr));
- OUT_RING (chan, sync->data);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
- NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
- }
-
- if (chan) {
- sync->addr ^= 0x10;
- sync->data++;
- FIRE_RING (chan);
- }
-
- /* queue the flip */
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0xfffe0000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, swap_interval);
- if (!(swap_interval & 0x00000100)) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- }
- evo_mthd(push, 0x0088, 4);
- evo_data(push, sync->addr);
- evo_data(push, sync->data++);
- evo_data(push, sync->data);
- evo_data(push, sync->base.sync.handle);
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_handle);
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) {
- evo_mthd(push, 0x0800, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
- } else {
- evo_mthd(push, 0x0400, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
+ if (wndw->func->prepare) {
+ asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+ if (IS_ERR(asyh))
+ return PTR_ERR(asyh);
+
+ wndw->func->prepare(wndw, asyh, asyw);
}
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, sync);
- nouveau_bo_ref(nv_fb->nvbo, &head->image);
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs
+nv50_wndw_helper = {
+ .prepare_fb = nv50_wndw_prepare_fb,
+ .cleanup_fb = nv50_wndw_cleanup_fb,
+ .atomic_check = nv50_wndw_atomic_check,
+};
+
+static void
+nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ __drm_atomic_helper_plane_destroy_state(&asyw->state);
+ dma_fence_put(asyw->state.fence);
+ kfree(asyw);
+}
+
+static struct drm_plane_state *
+nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw_atom *asyw;
+ if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
+ asyw->state.fence = NULL;
+ asyw->interval = 1;
+ asyw->sema = armw->sema;
+ asyw->ntfy = armw->ntfy;
+ asyw->image = armw->image;
+ asyw->point = armw->point;
+ asyw->lut = armw->lut;
+ asyw->clr.mask = 0;
+ asyw->set.mask = 0;
+ return &asyw->state;
+}
+
+static void
+nv50_wndw_reset(struct drm_plane *plane)
+{
+ struct nv50_wndw_atom *asyw;
+
+ if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
+ return;
+
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+ plane->state = &asyw->state;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_ROTATE_0;
+}
+
+static void
+nv50_wndw_destroy(struct drm_plane *plane)
+{
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ void *data;
+ nvif_notify_fini(&wndw->notify);
+ data = wndw->func->dtor(wndw);
+ drm_plane_cleanup(&wndw->plane);
+ kfree(data);
+}
+
+static const struct drm_plane_funcs
+nv50_wndw = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = nv50_wndw_destroy,
+ .reset = nv50_wndw_reset,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+};
+
+static void
+nv50_wndw_fini(struct nv50_wndw *wndw)
+{
+ nvif_notify_put(&wndw->notify);
+}
+
+static void
+nv50_wndw_init(struct nv50_wndw *wndw)
+{
+ nvif_notify_get(&wndw->notify);
+}
+
+static int
+nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
+ enum drm_plane_type type, const char *name, int index,
+ struct nv50_dmac *dmac, const u32 *format, int nformat,
+ struct nv50_wndw *wndw)
+{
+ int ret;
+
+ wndw->func = func;
+ wndw->dmac = dmac;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw, format,
+ nformat, type, "%s-%d", name, index);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
return 0;
}
/******************************************************************************
- * CRTC
+ * Cursor plane
*****************************************************************************/
+#define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
+
+struct nv50_curs {
+ struct nv50_wndw wndw;
+ struct nvif_object chan;
+};
+
+static u32
+nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_wr32(&curs->chan, 0x0080, 0x00000000);
+ return 0;
+}
+
+static void
+nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
+}
+
+static void
+nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
+ struct nv50_wndw_atom *asyw)
+{
+ asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
+ asyh->curs.offset = asyw->image.offset;
+ asyh->set.curs = asyh->curs.visible;
+}
+
+static void
+nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->curs.visible = false;
+}
+
static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
+nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- u32 *push, mode = 0x00;
+ int ret;
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.primary->fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
+ ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ asyh->curs.visible = asyw->state.visible;
+ if (ret || !asyh->curs.visible)
+ return ret;
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
+ switch (asyw->state.fb->width) {
+ case 32: asyh->curs.layout = 0; break;
+ case 64: asyh->curs.layout = 1; break;
+ default:
+ return -EINVAL;
}
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
- evo_data(push, mode);
- } else
- if (nv50_vers(mast) < GK104_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
- evo_data(push, mode);
- } else {
- evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
- evo_data(push, mode);
- }
+ if (asyw->state.fb->width != asyw->state.fb->height)
+ return -EINVAL;
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
+ switch (asyw->state.fb->pixel_format) {
+ case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
}
return 0;
}
+static void *
+nv50_curs_dtor(struct nv50_wndw *wndw)
+{
+ struct nv50_curs *curs = nv50_curs(wndw);
+ nvif_object_fini(&curs->chan);
+ return curs;
+}
+
+static const u32
+nv50_curs_format[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const struct nv50_wndw_func
+nv50_curs = {
+ .dtor = nv50_curs_dtor,
+ .acquire = nv50_curs_acquire,
+ .release = nv50_curs_release,
+ .prepare = nv50_curs_prepare,
+ .point = nv50_curs_point,
+ .update = nv50_curs_update,
+};
+
static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
+ struct nv50_curs **pcurs)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct nouveau_connector *nv_connector;
- int mode = DRM_MODE_SCALE_NONE;
- u32 oX, oY, *push;
+ static const struct nvif_mclass curses[] = {
+ { GK104_DISP_CURSOR, 0 },
+ { GF110_DISP_CURSOR, 0 },
+ { GT214_DISP_CURSOR, 0 },
+ { G82_DISP_CURSOR, 0 },
+ { NV50_DISP_CURSOR, 0 },
+ {}
+ };
+ struct nv50_disp_cursor_v0 args = {
+ .head = head->base.index,
+ };
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_curs *curs;
+ int cid, ret;
+
+ cid = nvif_mclass(disp->disp, curses);
+ if (cid < 0) {
+ NV_ERROR(drm, "No supported cursor immediate class\n");
+ return cid;
+ }
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode) {
- mode = nv_connector->scaling_mode;
- if (nv_connector->scaling_full) /* non-EDID LVDS/eDP mode */
- mode = DRM_MODE_SCALE_FULLSCREEN;
+ if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
+ "curs", head->base.index, &disp->mast.base,
+ nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
+ &curs->wndw);
+ if (ret) {
+ kfree(curs);
+ return ret;
}
- if (mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
+ ret = nvif_object_init(disp->disp, 0, curses[cid].oclass, &args,
+ sizeof(args), &curs->chan);
+ if (ret) {
+ NV_ERROR(drm, "curs%04x allocation failed: %d\n",
+ curses[cid].oclass, ret);
+ return ret;
+ }
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
+ return 0;
+}
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
+/******************************************************************************
+ * Primary plane
+ *****************************************************************************/
+#define nv50_base(p) container_of((p), struct nv50_base, wndw)
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
+struct nv50_base {
+ struct nv50_wndw wndw;
+ struct nv50_sync chan;
+ int id;
+};
+
+static int
+nv50_base_notify(struct nvif_notify *notify)
+{
+ return NVIF_NOTIFY_KEEP;
+}
+
+static void
+nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, asyw->lut.enable << 30);
+ evo_kick(push, &base->chan);
}
+}
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
+static void
+nv50_base_image_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 4))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
}
+}
- push = evo_wait(mast, 8);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- /*XXX: SCALE_CTRL_ACTIVE??? */
- evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+static void
+nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ const s32 oclass = base->chan.base.base.user.oclass;
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 10))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, (asyw->image.mode << 8) |
+ (asyw->image.interval << 4));
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, asyw->image.handle);
+ if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, asyw->image.offset >> 8);
+ evo_data(push, 0x00000000);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 20) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, (asyw->image.kind << 16) |
+ (asyw->image.format << 8));
+ } else
+ if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, asyw->image.offset >> 8);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
- evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 20) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, asyw->image.format << 8);
} else {
- evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, asyw->image.offset >> 8);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+ evo_data(push, (asyw->image.layout << 24) |
+ asyw->image.pitch |
+ asyw->image.block);
+ evo_data(push, asyw->image.format << 8);
}
+ evo_kick(push, &base->chan);
+ }
+}
- evo_kick(push, mast);
+static void
+nv50_base_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x00a4, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
+ }
+}
- if (update) {
- nv50_display_flip_stop(crtc);
- nv50_display_flip_next(crtc, crtc->primary->fb,
- NULL, 1);
- }
+static void
+nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 3))) {
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
+ evo_data(push, asyw->ntfy.handle);
+ evo_kick(push, &base->chan);
}
+}
- return 0;
+static void
+nv50_base_sema_clr(struct nv50_wndw *wndw)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
+ if ((push = evo_wait(&base->chan, 2))) {
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &base->chan);
+ }
}
-static int
-nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec)
+static void
+nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_base *base = nv50_base(wndw);
u32 *push;
+ if ((push = evo_wait(&base->chan, 5))) {
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, asyw->sema.offset);
+ evo_data(push, asyw->sema.acquire);
+ evo_data(push, asyw->sema.release);
+ evo_data(push, asyw->sema.handle);
+ evo_kick(push, &base->chan);
+ }
+}
- push = evo_wait(mast, 8);
- if (!push)
- return -ENOMEM;
+static u32
+nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
+{
+ struct nv50_base *base = nv50_base(wndw);
+ u32 *push;
- evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1);
- evo_data(push, usec);
- evo_kick(push, mast);
+ if (!(push = evo_wait(&base->chan, 2)))
+ return 0;
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, interlock);
+ evo_kick(push, &base->chan);
+
+ if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
+ return interlock ? 2 << (base->id * 8) : 0;
+ return interlock ? 2 << (base->id * 4) : 0;
+}
+
+static int
+nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ if (nvif_msec(&drm->device, 2000ULL,
+ u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
+ if ((data & 0xc0000000) == 0x40000000)
+ break;
+ usleep_range(1, 2);
+ ) < 0)
+ return -ETIMEDOUT;
return 0;
}
+static void
+nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
+{
+ asyh->base.cpp = 0;
+}
+
static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
+nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push, hue, vib;
- int adj;
+ const u32 format = asyw->state.fb->pixel_format;
+ const struct drm_format_info *info;
+ int ret;
- adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
- vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
- hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+ info = drm_format_info(format);
+ if (!info || !info->depth)
+ return -EINVAL;
- push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
- evo_data(push, (hue << 20) | (vib << 8));
- } else {
- evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (hue << 20) | (vib << 8));
- }
+ ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
+ asyh->base.depth = info->depth;
+ asyh->base.cpp = info->cpp[0];
+ asyh->base.x = asyw->state.src.x1 >> 16;
+ asyh->base.y = asyw->state.src.y1 >> 16;
+ asyh->base.w = asyw->state.fb->width;
+ asyh->base.h = asyw->state.fb->height;
+
+ switch (format) {
+ case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
+ case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
+ case DRM_FORMAT_XRGB1555 :
+ case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
+ case DRM_FORMAT_XRGB8888 :
+ case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
+ case DRM_FORMAT_XBGR8888 :
+ case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
}
+ asyw->lut.enable = 1;
+ asyw->set.image = true;
return 0;
}
+static void *
+nv50_base_dtor(struct nv50_wndw *wndw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ struct nv50_base *base = nv50_base(wndw);
+ nv50_dmac_destroy(&base->chan.base, disp->disp);
+ return base;
+}
+
+static const u32
+nv50_base_format[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+};
+
+static const struct nv50_wndw_func
+nv50_base = {
+ .dtor = nv50_base_dtor,
+ .acquire = nv50_base_acquire,
+ .release = nv50_base_release,
+ .sema_set = nv50_base_sema_set,
+ .sema_clr = nv50_base_sema_clr,
+ .ntfy_set = nv50_base_ntfy_set,
+ .ntfy_clr = nv50_base_ntfy_clr,
+ .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
+ .image_set = nv50_base_image_set,
+ .image_clr = nv50_base_image_clr,
+ .lut = nv50_base_lut,
+ .update = nv50_base_update,
+};
+
static int
-nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
- int x, int y, bool update)
+nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
+ struct nv50_base **pbase)
+{
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_base *base;
+ int ret;
+
+ if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
+ return -ENOMEM;
+ base->id = head->base.index;
+ base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
+ base->wndw.sema = EVO_FLIP_SEM0(base->id);
+ base->wndw.data = 0x00000000;
+
+ ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
+ "base", base->id, &base->chan.base,
+ nv50_base_format, ARRAY_SIZE(nv50_base_format),
+ &base->wndw);
+ if (ret) {
+ kfree(base);
+ return ret;
+ }
+
+ ret = nv50_base_create(&drm->device, disp->disp, base->id,
+ disp->sync->bo.offset, &base->chan);
+ if (ret)
+ return ret;
+
+ return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
+ false,
+ NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
+ &(struct nvif_notify_uevent_req) {},
+ sizeof(struct nvif_notify_uevent_req),
+ sizeof(struct nvif_notify_uevent_rep),
+ &base->wndw.notify);
+}
+
+/******************************************************************************
+ * Head
+ *****************************************************************************/
+static void
+nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
+ else
+ evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
+ evo_data(push, (asyh->procamp.sat.sin << 20) |
+ (asyh->procamp.sat.cos << 8));
+ evo_kick(push, core);
+ }
+}
- push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
- evo_data(push, (y << 16) | x);
- if (nv50_vers(mast) > NV50_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->r_handle);
- }
- } else {
- evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_handle);
- evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (y << 16) | x);
- }
+static void
+nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
+ else
+ if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
+ else
+ evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
+ evo_data(push, (asyh->dither.mode << 3) |
+ (asyh->dither.bits << 1) |
+ asyh->dither.enable);
+ evo_kick(push, core);
+ }
+}
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
+static void
+nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 bounds = 0;
+ u32 *push;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= 0x00000500; break;
+ case 4: bounds |= 0x00000300; break;
+ case 2: bounds |= 0x00000100; break;
+ default:
+ WARN_ON(1);
+ break;
}
- evo_kick(push, mast);
+ bounds |= 0x00000001;
}
- nv_crtc->fb.handle = nvfb->r_handle;
- return 0;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
+ evo_data(push, bounds);
+ evo_kick(push, core);
+ }
}
static void
-nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- } else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
- evo_data(push, mast->base.vram.handle);
- } else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, mast->base.vram.handle);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 bounds = 0;
+ u32 *push;
+
+ if (asyh->base.cpp) {
+ switch (asyh->base.cpp) {
+ case 8: bounds |= 0x00000500; break;
+ case 4: bounds |= 0x00000300; break;
+ case 2: bounds |= 0x00000100; break;
+ case 1: bounds |= 0x00000000; break;
+ default:
+ WARN_ON(1);
+ break;
}
- evo_kick(push, mast);
+ bounds |= 0x00000001;
+ }
+
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
+ evo_data(push, bounds);
+ evo_kick(push, core);
}
- nv_crtc->cursor.visible = true;
}
static void
-nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+nv50_head_curs_clr(struct nv50_head *head)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- u32 *push = evo_wait(mast, 16);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
evo_data(push, 0x05000000);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
evo_data(push, 0x05000000);
- evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
evo_data(push, 0x00000000);
} else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, mast);
+ evo_kick(push, core);
}
- nv_crtc->cursor.visible = false;
}
static void
-nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
-
- if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
- nv50_crtc_cursor_show(nv_crtc);
- else
- nv50_crtc_cursor_hide(nv_crtc);
-
- if (update) {
- u32 *push = evo_wait(mast, 2);
- if (push) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, mast);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 5))) {
+ if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ } else
+ if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
+ evo_data(push, asyh->curs.handle);
+ } else {
+ evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
+ evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+ (asyh->curs.format << 24));
+ evo_data(push, asyh->curs.offset >> 8);
+ evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
+ evo_data(push, asyh->curs.handle);
}
+ evo_kick(push, core);
}
}
static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+nv50_head_core_clr(struct nv50_head *head)
{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+ evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
+ else
+ evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, core);
+ }
}
static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
+nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
+ if ((push = evo_wait(core, 9))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 20 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.kind << 16 |
+ asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ /* EVO will complain with INVALID_STATE if we have an
+ * active cursor and (re)specify HeadSetContextDmaIso
+ * without also updating HeadSetOffsetCursor.
+ */
+ asyh->set.curs = asyh->curs.visible;
+ } else
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 20 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ } else {
+ evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
+ evo_data(push, asyh->core.offset >> 8);
+ evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
+ evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+ evo_data(push, asyh->core.layout << 24 |
+ (asyh->core.pitch >> 8) << 8 |
+ asyh->core.block);
+ evo_data(push, asyh->core.format << 8);
+ evo_data(push, asyh->core.handle);
+ evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
+ evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+ }
+ evo_kick(push, core);
+ }
+}
- nv50_display_flip_stop(crtc);
-
- push = evo_wait(mast, 6);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+static void
+nv50_head_lut_clr(struct nv50_head *head)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
evo_data(push, 0x40000000);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
evo_data(push, 0x40000000);
- evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
evo_data(push, 0x00000000);
} else {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
evo_data(push, 0x03000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
evo_data(push, 0x00000000);
}
-
- evo_kick(push, mast);
+ evo_kick(push, core);
}
-
- nv50_crtc_cursor_show_hide(nv_crtc, false, false);
}
static void
-nv50_crtc_commit(struct drm_crtc *crtc)
+nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
u32 *push;
-
- push = evo_wait(mast, 32);
- if (push) {
- if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ if ((push = evo_wait(core, 7))) {
+ if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
evo_data(push, 0xc0000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, asyh->lut.offset >> 8);
} else
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
evo_data(push, 0xc0000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
- evo_data(push, mast->base.vram.handle);
+ evo_data(push, asyh->lut.offset >> 8);
+ evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
+ evo_data(push, asyh->lut.handle);
} else {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.handle);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+ evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
evo_data(push, 0x83000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, asyh->lut.offset >> 8);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, mast->base.vram.handle);
- evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
+ evo_data(push, asyh->lut.handle);
+ }
+ evo_kick(push, core);
+ }
+}
+
+static void
+nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ struct nv50_head_mode *m = &asyh->mode;
+ u32 *push;
+ if ((push = evo_wait(core, 14))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
+ evo_data(push, 0x00800000 | m->clock);
+ evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
+ evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
+ evo_data(push, 0x00000000);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_data(push, asyh->mode.v.blankus);
+ evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
+ evo_data(push, 0x00000000); /* ??? */
evo_data(push, 0xffffff00);
+ evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
+ evo_data(push, m->clock * 1000);
+ evo_data(push, 0x00200000); /* ??? */
+ evo_data(push, m->clock * 1000);
}
+ evo_kick(push, core);
+ }
+}
- evo_kick(push, mast);
+static void
+nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+ u32 *push;
+ if ((push = evo_wait(core, 10))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
+ evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+ evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ } else {
+ evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
+ evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+ evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+ }
+ evo_kick(push, core);
}
+}
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
- nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
+static void
+nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
+{
+ if (asyh->clr.core && (!asyh->set.core || y))
+ nv50_head_lut_clr(head);
+ if (asyh->clr.core && (!asyh->set.core || y))
+ nv50_head_core_clr(head);
+ if (asyh->clr.curs && (!asyh->set.curs || y))
+ nv50_head_curs_clr(head);
}
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- return true;
+ if (asyh->set.view ) nv50_head_view (head, asyh);
+ if (asyh->set.mode ) nv50_head_mode (head, asyh);
+ if (asyh->set.core ) nv50_head_lut_set (head, asyh);
+ if (asyh->set.core ) nv50_head_core_set(head, asyh);
+ if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
+ if (asyh->set.base ) nv50_head_base (head, asyh);
+ if (asyh->set.ovly ) nv50_head_ovly (head, asyh);
+ if (asyh->set.dither ) nv50_head_dither (head, asyh);
+ if (asyh->set.procamp) nv50_head_procamp (head, asyh);
}
-static int
-nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+static void
+nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
- struct nv50_head *head = nv50_head(crtc);
- int ret;
+ const int vib = asyc->procamp.color_vibrance - 100;
+ const int hue = asyc->procamp.vibrant_hue - 90;
+ const int adj = (vib > 0) ? 50 : 0;
+ asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
+ asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
+ asyh->set.procamp = true;
+}
+
+static void
+nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
+{
+ struct drm_connector *connector = asyc->state.connector;
+ u32 mode = 0x00;
+
+ if (asyc->dither.mode == DITHERING_MODE_AUTO) {
+ if (asyh->base.depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = asyc->dither.mode;
+ }
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, true);
- if (ret == 0) {
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(nvfb->nvbo, &head->image);
+ if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= asyc->dither.depth;
}
- return ret;
+ asyh->dither.enable = mode;
+ asyh->dither.bits = mode >> 1;
+ asyh->dither.mode = mode >> 3;
+ asyh->set.dither = true;
}
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
+static void
+nv50_head_atomic_check_view(struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh,
+ struct nouveau_conn_atom *asyc)
{
- struct nv50_mast *mast = nv50_mast(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1, vblankus = 0;
- u32 *push;
- int ret;
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- /* XXX: Safe underestimate, even "0" works */
- vblankus = (vactive - mode->vdisplay - 2) * hactive;
- vblankus *= 1000;
- vblankus /= mode->clock;
+ struct drm_connector *connector = asyc->state.connector;
+ struct drm_display_mode *omode = &asyh->state.adjusted_mode;
+ struct drm_display_mode *umode = &asyh->state.mode;
+ int mode = asyc->scaler.mode;
+ struct edid *edid;
+
+ if (connector->edid_blob_ptr)
+ edid = (struct edid *)connector->edid_blob_ptr->data;
+ else
+ edid = NULL;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
+ if (!asyc->scaler.full) {
+ if (mode == DRM_MODE_SCALE_NONE)
+ omode = umode;
+ } else {
+ /* Non-EDID LVDS/eDP mode. */
+ mode = DRM_MODE_SCALE_FULLSCREEN;
}
- ret = nv50_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
+ asyh->view.iW = umode->hdisplay;
+ asyh->view.iH = umode->vdisplay;
+ asyh->view.oW = omode->hdisplay;
+ asyh->view.oH = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ asyh->view.oH *= 2;
- push = evo_wait(mast, 64);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x00800000 | mode->clock);
- evo_data(push, (ilace == 2) ? 2 : 0);
- evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
+ /* Add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
+ (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
+ drm_detect_hdmi_monitor(edid)))) {
+ u32 bX = asyc->scaler.underscan.hborder;
+ u32 bY = asyc->scaler.underscan.vborder;
+ u32 r = (asyh->view.oH << 19) / asyh->view.oW;
+
+ if (bX) {
+ asyh->view.oW -= (bX * 2);
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
} else {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000); /* ??? */
- evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
- evo_data(push, mode->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
+ asyh->view.oW -= (asyh->view.oW >> 4) + 32;
+ if (bY) asyh->view.oH -= (bY * 2);
+ else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
}
+ }
- evo_kick(push, mast);
+ /* Handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation.
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
+ asyh->view.oH = min((u16)umode->vdisplay, asyh->view.oH);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (asyh->view.oH < asyh->view.oW) {
+ u32 r = (asyh->view.iW << 19) / asyh->view.iH;
+ asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
+ } else {
+ u32 r = (asyh->view.iH << 19) / asyh->view.iW;
+ asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
+ }
+ break;
+ default:
+ break;
}
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nv50_crtc_set_dither(nv_crtc, false);
- nv50_crtc_set_scale(nv_crtc, false);
+ asyh->set.view = true;
+}
- /* G94 only accepts this after setting scale */
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA)
- nv50_crtc_set_raster_vblank_dmi(nv_crtc, vblankus);
+static void
+nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hbackp = mode->htotal - mode->hsync_end;
+ u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ u32 hfrontp = mode->hsync_start - mode->hdisplay;
+ u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ struct nv50_head_mode *m = &asyh->mode;
+
+ m->h.active = mode->htotal;
+ m->h.synce = mode->hsync_end - mode->hsync_start - 1;
+ m->h.blanke = m->h.synce + hbackp;
+ m->h.blanks = mode->htotal - hfrontp - 1;
+
+ m->v.active = mode->vtotal * vscan / ilace;
+ m->v.synce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ m->v.blanke = m->v.synce + vbackp;
+ m->v.blanks = m->v.active - vfrontp - 1;
+
+ /*XXX: Safe underestimate, even "0" works */
+ m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
+ m->v.blankus *= 1000;
+ m->v.blankus /= mode->clock;
- nv50_crtc_set_color_vibrance(nv_crtc, false);
- nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
- return 0;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ m->v.blank2e = m->v.active + m->v.synce + vbackp;
+ m->v.blank2s = m->v.blank2e + (mode->vdisplay * vscan / ilace);
+ m->v.active = (m->v.active * 2) + 1;
+ m->interlace = true;
+ } else {
+ m->v.blank2e = 0;
+ m->v.blank2s = 1;
+ m->interlace = false;
+ }
+ m->clock = mode->clock;
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ asyh->set.mode = true;
}
static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
{
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nv50_head *head = nv50_head(crtc);
+ struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+ struct nv50_head_atom *asyh = nv50_head_atom(state);
+ struct nouveau_conn_atom *asyc = NULL;
+ struct drm_connector_state *conns;
+ struct drm_connector *conn;
+ int i;
- if (!crtc->primary->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
+ NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
+ if (asyh->state.active) {
+ for_each_connector_in_state(asyh->state.state, conn, conns, i) {
+ if (conns->crtc == crtc) {
+ asyc = nouveau_conn_atom(conns);
+ break;
+ }
+ }
+
+ if (armh->state.active) {
+ if (asyc) {
+ if (asyh->state.mode_changed)
+ asyc->set.scaler = true;
+ if (armh->base.depth != asyh->base.depth)
+ asyc->set.dither = true;
+ }
+ } else {
+ asyc->set.mask = ~0;
+ asyh->set.mask = ~0;
+ }
+
+ if (asyh->state.mode_changed)
+ nv50_head_atomic_check_mode(head, asyh);
+
+ if (asyc) {
+ if (asyc->set.scaler)
+ nv50_head_atomic_check_view(armh, asyh, asyc);
+ if (asyc->set.dither)
+ nv50_head_atomic_check_dither(armh, asyh, asyc);
+ if (asyc->set.procamp)
+ nv50_head_atomic_check_procamp(armh, asyh, asyc);
+ }
+
+ if ((asyh->core.visible = (asyh->base.cpp != 0))) {
+ asyh->core.x = asyh->base.x;
+ asyh->core.y = asyh->base.y;
+ asyh->core.w = asyh->base.w;
+ asyh->core.h = asyh->base.h;
+ } else
+ if ((asyh->core.visible = asyh->curs.visible)) {
+ /*XXX: We need to either find some way of having the
+ * primary base layer appear black, while still
+ * being able to display the other layers, or we
+ * need to allocate a dummy black surface here.
+ */
+ asyh->core.x = 0;
+ asyh->core.y = 0;
+ asyh->core.w = asyh->state.mode.hdisplay;
+ asyh->core.h = asyh->state.mode.vdisplay;
+ }
+ asyh->core.handle = disp->mast.base.vram.handle;
+ asyh->core.offset = 0;
+ asyh->core.format = 0xcf;
+ asyh->core.kind = 0;
+ asyh->core.layout = 1;
+ asyh->core.block = 0;
+ asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
+ asyh->lut.handle = disp->mast.base.vram.handle;
+ asyh->lut.offset = head->base.lut.nvbo->bo.offset;
+ asyh->set.base = armh->base.cpp != asyh->base.cpp;
+ asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
+ } else {
+ asyh->core.visible = false;
+ asyh->curs.visible = false;
+ asyh->base.cpp = 0;
+ asyh->ovly.cpp = 0;
}
- ret = nv50_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
+ if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
+ if (asyh->core.visible) {
+ if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
+ asyh->set.core = true;
+ } else
+ if (armh->core.visible) {
+ asyh->clr.core = true;
+ }
- nv50_display_flip_stop(crtc);
- nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, true);
- nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
- return 0;
-}
+ if (asyh->curs.visible) {
+ if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
+ asyh->set.curs = true;
+ } else
+ if (armh->curs.visible) {
+ asyh->clr.curs = true;
+ }
+ } else {
+ asyh->clr.core = armh->core.visible;
+ asyh->clr.curs = armh->curs.visible;
+ asyh->set.core = asyh->core.visible;
+ asyh->set.curs = asyh->curs.visible;
+ }
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nv50_display_flip_stop(crtc);
- nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+ if (asyh->clr.mask || asyh->set.mask)
+ nv50_atom(asyh->state.state)->lock_core = true;
return 0;
}
static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
+nv50_head_lut_load(struct drm_crtc *crtc)
{
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -1292,64 +2205,95 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
}
}
-static void
-nv50_crtc_disable(struct drm_crtc *crtc)
+static int
+nv50_head_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic state)
{
- struct nv50_head *head = nv50_head(crtc);
- evo_sync(crtc->dev);
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(NULL, &head->image);
+ WARN_ON(1);
+ return 0;
}
+static const struct drm_crtc_helper_funcs
+nv50_head_help = {
+ .mode_set_base_atomic = nv50_head_mode_set_base_atomic,
+ .load_lut = nv50_head_lut_load,
+ .atomic_check = nv50_head_atomic_check,
+};
+
+/* This is identical to the version in the atomic helpers, except that
+ * it supports non-vblanked ("async") page flips.
+ */
static int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
+nv50_head_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event, u32 flags)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_gem_object *gem = NULL;
- struct nouveau_bo *nvbo = NULL;
+ struct drm_plane *plane = crtc->primary;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_state *crtc_state;
int ret = 0;
- if (handle) {
- if (width != 64 || height != 64)
- return -EINVAL;
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
- gem = drm_gem_object_lookup(file_priv, handle);
- if (unlikely(!gem))
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+ crtc_state->event = event;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
}
- if (ret == 0) {
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(nvbo, &nv_crtc->cursor.nvbo);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+
+ /* Make sure we don't accidentally do a full modeset. */
+ state->allow_modeset = false;
+ if (!crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
+ crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
}
- drm_gem_object_unreference_unlocked(gem);
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ nv50_wndw_atom(plane_state)->interval = 0;
+
+ ret = drm_atomic_nonblocking_commit(state);
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_put(state);
return ret;
-}
-static int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_curs *curs = nv50_curs(crtc);
- struct nv50_chan *chan = nv50_chan(curs);
- nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff));
- nvif_wr32(&chan->user, 0x0080, 0x00000000);
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
- nv_crtc->cursor_saved_x = x;
- nv_crtc->cursor_saved_y = y;
- return 0;
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
}
static int
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -1361,47 +2305,71 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
nv_crtc->lut.b[i] = b[i];
}
- nv50_crtc_lut_load(crtc);
-
+ nv50_head_lut_load(crtc);
return 0;
}
static void
-nv50_crtc_cursor_restore(struct nouveau_crtc *nv_crtc, int x, int y)
+nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- nv50_crtc_cursor_move(&nv_crtc->base, x, y);
+ struct nv50_head_atom *asyh = nv50_head_atom(state);
+ __drm_atomic_helper_crtc_destroy_state(&asyh->state);
+ kfree(asyh);
+}
- nv50_crtc_cursor_show_hide(nv_crtc, true, true);
+static struct drm_crtc_state *
+nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+ struct nv50_head_atom *asyh;
+ if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
+ return NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
+ asyh->view = armh->view;
+ asyh->mode = armh->mode;
+ asyh->lut = armh->lut;
+ asyh->core = armh->core;
+ asyh->curs = armh->curs;
+ asyh->base = armh->base;
+ asyh->ovly = armh->ovly;
+ asyh->dither = armh->dither;
+ asyh->procamp = armh->procamp;
+ asyh->clr.mask = 0;
+ asyh->set.mask = 0;
+ return &asyh->state;
+}
+
+static void
+__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+ crtc->state = state;
+ crtc->state->crtc = crtc;
}
static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
+nv50_head_reset(struct drm_crtc *crtc)
+{
+ struct nv50_head_atom *asyh;
+
+ if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
+ return;
+
+ __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
+}
+
+static void
+nv50_head_destroy(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
- struct nv50_fbdma *fbdma;
-
- list_for_each_entry(fbdma, &disp->fbdma, head) {
- nvif_object_fini(&fbdma->base[nv_crtc->index]);
- }
nv50_dmac_destroy(&head->ovly.base, disp->disp);
nv50_pioc_destroy(&head->oimm.base);
- nv50_dmac_destroy(&head->sync.base, disp->disp);
- nv50_pioc_destroy(&head->curs.base);
-
- /*XXX: this shouldn't be necessary, but the core doesn't call
- * disconnect() during the cleanup paths
- */
- if (head->image)
- nouveau_bo_unpin(head->image);
- nouveau_bo_ref(NULL, &head->image);
-
- /*XXX: ditto */
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo)
@@ -1412,34 +2380,27 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
-static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
- .dpms = nv50_crtc_dpms,
- .prepare = nv50_crtc_prepare,
- .commit = nv50_crtc_commit,
- .mode_fixup = nv50_crtc_mode_fixup,
- .mode_set = nv50_crtc_mode_set,
- .mode_set_base = nv50_crtc_mode_set_base,
- .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
- .load_lut = nv50_crtc_lut_load,
- .disable = nv50_crtc_disable,
-};
-
-static const struct drm_crtc_funcs nv50_crtc_func = {
- .cursor_set = nv50_crtc_cursor_set,
- .cursor_move = nv50_crtc_cursor_move,
- .gamma_set = nv50_crtc_gamma_set,
- .set_config = nouveau_crtc_set_config,
- .destroy = nv50_crtc_destroy,
- .page_flip = nouveau_crtc_page_flip,
+static const struct drm_crtc_funcs
+nv50_head_func = {
+ .reset = nv50_head_reset,
+ .gamma_set = nv50_head_gamma_set,
+ .destroy = nv50_head_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = nv50_head_page_flip,
+ .set_property = drm_atomic_helper_crtc_set_property,
+ .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
+ .atomic_destroy_state = nv50_head_atomic_destroy_state,
};
static int
-nv50_crtc_create(struct drm_device *dev, int index)
+nv50_head_create(struct drm_device *dev, int index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
+ struct nv50_base *base;
+ struct nv50_curs *curs;
struct drm_crtc *crtc;
int ret, i;
@@ -1448,21 +2409,25 @@ nv50_crtc_create(struct drm_device *dev, int index)
return -ENOMEM;
head->base.index = index;
- head->base.set_dither = nv50_crtc_set_dither;
- head->base.set_scale = nv50_crtc_set_scale;
- head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
- head->base.color_vibrance = 50;
- head->base.vibrant_hue = 0;
- head->base.cursor.set_pos = nv50_crtc_cursor_restore;
for (i = 0; i < 256; i++) {
head->base.lut.r[i] = i << 8;
head->base.lut.g[i] = i << 8;
head->base.lut.b[i] = i << 8;
}
+ ret = nv50_base_new(drm, head, &base);
+ if (ret == 0)
+ ret = nv50_curs_new(drm, head, &curs);
+ if (ret) {
+ kfree(head);
+ return ret;
+ }
+
crtc = &head->base.base;
- drm_crtc_init(dev, crtc, &nv50_crtc_func);
- drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+ drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
+ &curs->wndw.plane, &nv50_head_func,
+ "head-%d", head->base.index);
+ drm_crtc_helper_add(crtc, &nv50_head_help);
drm_mode_crtc_set_gamma_size(crtc, 256);
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
@@ -1481,20 +2446,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (ret)
goto out;
- /* allocate cursor resources */
- ret = nv50_curs_create(device, disp->disp, index, &head->curs);
- if (ret)
- goto out;
-
- /* allocate page flip / sync resources */
- ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
- &head->sync);
- if (ret)
- goto out;
-
- head->sync.addr = EVO_FLIP_SEM0(index);
- head->sync.data = 0x00000000;
-
/* allocate overlay resources */
ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
if (ret)
@@ -1507,43 +2458,64 @@ nv50_crtc_create(struct drm_device *dev, int index)
out:
if (ret)
- nv50_crtc_destroy(crtc);
+ nv50_head_destroy(crtc);
return ret;
}
/******************************************************************************
- * Encoder helpers
+ * Output path helpers
*****************************************************************************/
-static bool
-nv50_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int
+nv50_outp_atomic_check_view(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct drm_display_mode *native_mode)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_connector *connector = conn_state->connector;
+ struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+
+ NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
+ asyc->scaler.full = false;
+ if (!native_mode)
+ return 0;
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- nv_connector->scaling_full = false;
- if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) {
- switch (nv_connector->type) {
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_eDP:
- /* force use of scaler for non-edid modes */
- if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
- return true;
- nv_connector->scaling_full = true;
+ if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ /* Force use of scaler for non-EDID modes. */
+ if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
break;
- default:
- return true;
- }
+ mode = native_mode;
+ asyc->scaler.full = true;
+ break;
+ default:
+ break;
}
+ } else {
+ mode = native_mode;
+ }
- drm_mode_copy(adjusted_mode, nv_connector->native_mode);
+ if (!drm_mode_equal(adjusted_mode, mode)) {
+ drm_mode_copy(adjusted_mode, mode);
+ crtc_state->mode_changed = true;
}
- return true;
+ return 0;
+}
+
+static int
+nv50_outp_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct nouveau_connector *nv_connector =
+ nouveau_connector(conn_state->connector);
+ return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ nv_connector->native_mode);
}
/******************************************************************************
@@ -1574,21 +2546,39 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
}
static void
-nv50_dac_commit(struct drm_encoder *encoder)
+nv50_dac_disable(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0400 + (or * 0x080), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0180 + (or * 0x020), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
}
static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_dac_enable(struct drm_encoder *encoder)
{
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
u32 *push;
- nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
push = evo_wait(mast, 8);
if (push) {
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
@@ -1627,33 +2617,6 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nv_encoder->crtc = encoder->crtc;
}
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
- const int or = nv_encoder->or;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nv50_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0400 + (or * 0x080), 1);
- evo_data(push, 0x00000000);
- } else {
- evo_mthd(push, 0x0180 + (or * 0x020), 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
- }
- }
-
- nv_encoder->crtc = NULL;
-}
-
static enum drm_connector_status
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
@@ -1681,6 +2644,15 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
return connector_status_connected;
}
+static const struct drm_encoder_helper_funcs
+nv50_dac_help = {
+ .dpms = nv50_dac_dpms,
+ .atomic_check = nv50_outp_atomic_check,
+ .enable = nv50_dac_enable,
+ .disable = nv50_dac_disable,
+ .detect = nv50_dac_detect
+};
+
static void
nv50_dac_destroy(struct drm_encoder *encoder)
{
@@ -1688,18 +2660,8 @@ nv50_dac_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
- .dpms = nv50_dac_dpms,
- .mode_fixup = nv50_encoder_mode_fixup,
- .prepare = nv50_dac_disconnect,
- .commit = nv50_dac_commit,
- .mode_set = nv50_dac_mode_set,
- .disable = nv50_dac_disconnect,
- .get_crtc = nv50_display_crtc_get,
- .detect = nv50_dac_detect
-};
-
-static const struct drm_encoder_funcs nv50_dac_func = {
+static const struct drm_encoder_funcs
+nv50_dac_func = {
.destroy = nv50_dac_destroy,
};
@@ -1726,8 +2688,9 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
+ "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_dac_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -1737,7 +2700,26 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
* Audio
*****************************************************************************/
static void
-nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hda_eld_v0 eld;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
+ };
+
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+}
+
+static void
+nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1768,30 +2750,30 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
sizeof(args.base) + drm_eld_size(args.data));
}
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
static void
-nv50_audio_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_hda_eld_v0 eld;
+ struct nv50_disp_sor_hdmi_pwr_v0 pwr;
} args = {
.base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
- (0x0100 << nv_crtc->index),
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
};
nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-/******************************************************************************
- * HDMI
- *****************************************************************************/
static void
-nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1821,26 +2803,635 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
args.pwr.max_ac_packet = max_ac_packet / 32;
nvif_mthd(disp->disp, 0, &args, sizeof(args));
- nv50_audio_mode_set(encoder, mode);
+ nv50_audio_enable(encoder, mode);
+}
+
+/******************************************************************************
+ * MST
+ *****************************************************************************/
+#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
+#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
+#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
+
+struct nv50_mstm {
+ struct nouveau_encoder *outp;
+
+ struct drm_dp_mst_topology_mgr mgr;
+ struct nv50_msto *msto[4];
+
+ bool modified;
+};
+
+struct nv50_mstc {
+ struct nv50_mstm *mstm;
+ struct drm_dp_mst_port *port;
+ struct drm_connector connector;
+
+ struct drm_display_mode *native;
+ struct edid *edid;
+
+ int pbn;
+};
+
+struct nv50_msto {
+ struct drm_encoder encoder;
+
+ struct nv50_head *head;
+ struct nv50_mstc *mstc;
+ bool disabled;
+};
+
+static struct drm_dp_payload *
+nv50_msto_payload(struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+ int vcpi = mstc->port->vcpi.vcpi, i;
+
+ NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
+ NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
+ mstm->outp->base.base.name, i, payload->vcpi,
+ payload->start_slot, payload->num_slots);
+ }
+
+ for (i = 0; i < mstm->mgr.max_payloads; i++) {
+ struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
+ if (payload->vcpi == vcpi)
+ return payload;
+ }
+
+ return NULL;
}
static void
-nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
+nv50_msto_cleanup(struct nv50_msto *msto)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
+ NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
+ if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
+ drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
+ if (msto->disabled) {
+ msto->mstc = NULL;
+ msto->head = NULL;
+ msto->disabled = false;
+ }
+}
+
+static void
+nv50_msto_prepare(struct nv50_msto *msto)
+{
+ struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
struct {
struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_hdmi_pwr_v0 pwr;
+ struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
} args = {
.base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
- (0x0100 << nv_crtc->index),
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
+ .base.hasht = mstm->outp->dcb->hasht,
+ .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
+ (0x0100 << msto->head->base.index),
};
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
+ if (mstc->port && mstc->port->vcpi.vcpi > 0) {
+ struct drm_dp_payload *payload = nv50_msto_payload(msto);
+ if (payload) {
+ args.vcpi.start_slot = payload->start_slot;
+ args.vcpi.num_slots = payload->num_slots;
+ args.vcpi.pbn = mstc->port->vcpi.pbn;
+ args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
+ }
+ }
+
+ NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
+ msto->encoder.name, msto->head->base.base.name,
+ args.vcpi.start_slot, args.vcpi.num_slots,
+ args.vcpi.pbn, args.vcpi.aligned_pbn);
+ nvif_mthd(&drm->display->disp, 0, &args, sizeof(args));
+}
+
+static int
+nv50_msto_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
+ struct nv50_mstm *mstm = mstc->mstm;
+ int bpp = conn_state->connector->display_info.bpc * 3;
+ int slots;
+
+ mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
+
+ slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
+ if (slots < 0)
+ return slots;
+
+ return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+ mstc->native);
+}
+
+static void
+nv50_msto_enable(struct drm_encoder *encoder)
+{
+ struct nv50_head *head = nv50_head(encoder->crtc);
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = NULL;
+ struct nv50_mstm *mstm = NULL;
+ struct drm_connector *connector;
+ u8 proto, depth;
+ int slots;
+ bool r;
+
+ drm_for_each_connector(connector, encoder->dev) {
+ if (connector->state->best_encoder == &msto->encoder) {
+ mstc = nv50_mstc(connector);
+ mstm = mstc->mstm;
+ break;
+ }
+ }
+
+ if (WARN_ON(!mstc))
+ return;
+
+ r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, &slots);
+ WARN_ON(!r);
+
+ if (mstm->outp->dcb->sorconf.link & 1)
+ proto = 0x8;
+ else
+ proto = 0x9;
+
+ switch (mstc->connector.display_info.bpc) {
+ case 6: depth = 0x2; break;
+ case 8: depth = 0x5; break;
+ case 10:
+ default: depth = 0x6; break;
+ }
+
+ mstm->outp->update(mstm->outp, head->base.index,
+ &head->base.base.state->adjusted_mode, proto, depth);
+
+ msto->head = head;
+ msto->mstc = mstc;
+ mstm->modified = true;
+}
+
+static void
+nv50_msto_disable(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
+
+ if (mstc->port)
+ drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
+
+ mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
+ mstm->modified = true;
+ msto->disabled = true;
+}
+
+static const struct drm_encoder_helper_funcs
+nv50_msto_help = {
+ .disable = nv50_msto_disable,
+ .enable = nv50_msto_enable,
+ .atomic_check = nv50_msto_atomic_check,
+};
+
+static void
+nv50_msto_destroy(struct drm_encoder *encoder)
+{
+ struct nv50_msto *msto = nv50_msto(encoder);
+ drm_encoder_cleanup(&msto->encoder);
+ kfree(msto);
+}
+
+static const struct drm_encoder_funcs
+nv50_msto = {
+ .destroy = nv50_msto_destroy,
+};
+
+static int
+nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
+ struct nv50_msto **pmsto)
+{
+ struct nv50_msto *msto;
+ int ret;
+
+ if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
+ DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
+ if (ret) {
+ kfree(*pmsto);
+ *pmsto = NULL;
+ return ret;
+ }
+
+ drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
+ msto->encoder.possible_crtcs = heads;
+ return 0;
+}
+
+static struct drm_encoder *
+nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
+{
+ struct nv50_head *head = nv50_head(connector_state->crtc);
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (mstc->port) {
+ struct nv50_mstm *mstm = mstc->mstm;
+ return &mstm->msto[head->base.index]->encoder;
+ }
+ return NULL;
+}
+
+static struct drm_encoder *
+nv50_mstc_best_encoder(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (mstc->port) {
+ struct nv50_mstm *mstm = mstc->mstm;
+ return &mstm->msto[0]->encoder;
+ }
+ return NULL;
+}
+
+static enum drm_mode_status
+nv50_mstc_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static int
+nv50_mstc_get_modes(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ int ret = 0;
+
+ mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
+ drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
+ if (mstc->edid) {
+ ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
+ drm_edid_to_eld(&mstc->connector, mstc->edid);
+ }
+
+ if (!mstc->connector.display_info.bpc)
+ mstc->connector.display_info.bpc = 8;
+
+ if (mstc->native)
+ drm_mode_destroy(mstc->connector.dev, mstc->native);
+ mstc->native = nouveau_conn_native_mode(&mstc->connector);
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .best_encoder = nv50_mstc_best_encoder,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+};
+
+static enum drm_connector_status
+nv50_mstc_detect(struct drm_connector *connector, bool force)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (!mstc->port)
+ return connector_status_disconnected;
+ return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
+}
+
+static void
+nv50_mstc_destroy(struct drm_connector *connector)
+{
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ drm_connector_cleanup(&mstc->connector);
+ kfree(mstc);
+}
+
+static const struct drm_connector_funcs
+nv50_mstc = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = nouveau_conn_reset,
+ .detect = nv50_mstc_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = drm_atomic_helper_connector_set_property,
+ .destroy = nv50_mstc_destroy,
+ .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
+ .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
+ .atomic_set_property = nouveau_conn_atomic_set_property,
+ .atomic_get_property = nouveau_conn_atomic_get_property,
+};
+
+static int
+nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
+ const char *path, struct nv50_mstc **pmstc)
+{
+ struct drm_device *dev = mstm->outp->base.base.dev;
+ struct nv50_mstc *mstc;
+ int ret, i;
+
+ if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
+ return -ENOMEM;
+ mstc->mstm = mstm;
+ mstc->port = port;
+
+ ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ kfree(*pmstc);
+ *pmstc = NULL;
+ return ret;
+ }
+
+ drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
+
+ mstc->connector.funcs->reset(&mstc->connector);
+ nouveau_conn_attach_properties(&mstc->connector);
+
+ for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto; i++)
+ drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
+ drm_mode_connector_set_path_property(&mstc->connector, path);
+ return 0;
+}
+
+static void
+nv50_mstm_cleanup(struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
+ ret = drm_dp_check_act_status(&mstm->mgr);
+
+ ret = drm_dp_update_payload_part2(&mstm->mgr);
+
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+ nv50_msto_cleanup(msto);
+ }
+ }
+
+ mstm->modified = false;
+}
+
+static void
+nv50_mstm_prepare(struct nv50_mstm *mstm)
+{
+ struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
+ ret = drm_dp_update_payload_part1(&mstm->mgr);
+
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm)
+ nv50_msto_prepare(msto);
+ }
+ }
+}
+
+static void
+nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct nv50_mstm *mstm = nv50_mstm(mgr);
+ drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
+}
+
+static void
+nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+
+ drm_connector_unregister(&mstc->connector);
+
+ drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
+ mstc->port = NULL;
+ drm_modeset_unlock_all(drm->dev);
+
+ drm_connector_unreference(&mstc->connector);
+}
+
+static void
+nv50_mstm_register_connector(struct drm_connector *connector)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+
+ drm_modeset_lock_all(drm->dev);
+ drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
+ drm_modeset_unlock_all(drm->dev);
+
+ drm_connector_register(connector);
+}
+
+static struct drm_connector *
+nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, const char *path)
+{
+ struct nv50_mstm *mstm = nv50_mstm(mgr);
+ struct nv50_mstc *mstc;
+ int ret;
+
+ ret = nv50_mstc_new(mstm, port, path, &mstc);
+ if (ret) {
+ if (mstc)
+ mstc->connector.funcs->destroy(&mstc->connector);
+ return NULL;
+ }
+
+ return &mstc->connector;
+}
+
+static const struct drm_dp_mst_topology_cbs
+nv50_mstm = {
+ .add_connector = nv50_mstm_add_connector,
+ .register_connector = nv50_mstm_register_connector,
+ .destroy_connector = nv50_mstm_destroy_connector,
+ .hotplug = nv50_mstm_hotplug,
+};
+
+void
+nv50_mstm_service(struct nv50_mstm *mstm)
+{
+ struct drm_dp_aux *aux = mstm->mgr.aux;
+ bool handled = true;
+ int ret;
+ u8 esi[8] = {};
+
+ while (handled) {
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
+ if (ret != 8) {
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+ return;
+ }
+
+ drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
+ if (!handled)
+ break;
+
+ drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
+ }
+}
+
+void
+nv50_mstm_remove(struct nv50_mstm *mstm)
+{
+ if (mstm)
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+}
+
+static int
+nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
+{
+ struct nouveau_encoder *outp = mstm->outp;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_dp_mst_link_v0 mst;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
+ .base.hasht = outp->dcb->hasht,
+ .base.hashm = outp->dcb->hashm,
+ .mst.state = state,
+ };
+ struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
+ struct nvif_object *disp = &drm->display->disp;
+ int ret;
+
+ if (dpcd >= 0x12) {
+ ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+ if (ret < 0)
+ return ret;
+
+ dpcd &= ~DP_MST_EN;
+ if (state)
+ dpcd |= DP_MST_EN;
+
+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
+ if (ret < 0)
+ return ret;
+ }
+
+ return nvif_mthd(disp, 0, &args, sizeof(args));
+}
+
+int
+nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
+{
+ int ret, state = 0;
+
+ if (!mstm)
+ return 0;
+
+ if (dpcd[0] >= 0x12) {
+ ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+ if (ret < 0)
+ return ret;
+
+ if (!(dpcd[1] & DP_MST_CAP))
+ dpcd[0] = 0x11;
+ else
+ state = allow;
+ }
+
+ ret = nv50_mstm_enable(mstm, dpcd[0], state);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+ if (ret)
+ return nv50_mstm_enable(mstm, dpcd[0], 0);
+
+ return mstm->mgr.mst_state;
+}
+
+static void
+nv50_mstm_fini(struct nv50_mstm *mstm)
+{
+ if (mstm && mstm->mgr.mst_state)
+ drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
+}
+
+static void
+nv50_mstm_init(struct nv50_mstm *mstm)
+{
+ if (mstm && mstm->mgr.mst_state)
+ drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+}
+
+static void
+nv50_mstm_del(struct nv50_mstm **pmstm)
+{
+ struct nv50_mstm *mstm = *pmstm;
+ if (mstm) {
+ kfree(*pmstm);
+ *pmstm = NULL;
+ }
+}
+
+static int
+nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
+ int conn_base_id, struct nv50_mstm **pmstm)
+{
+ const int max_payloads = hweight8(outp->dcb->heads);
+ struct drm_device *dev = outp->base.base.dev;
+ struct nv50_mstm *mstm;
+ int ret, i;
+ u8 dpcd;
+
+ /* This is a workaround for some monitors not functioning
+ * correctly in MST mode on initial module load. I think
+ * some bad interaction with the VBIOS may be responsible.
+ *
+ * A good ol' off and on again seems to work here ;)
+ */
+ ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
+ if (ret >= 0 && dpcd >= 0x12)
+ drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
+
+ if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
+ return -ENOMEM;
+ mstm->outp = outp;
+ mstm->mgr.cbs = &nv50_mstm;
+
+ ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev->dev, aux, aux_max,
+ max_payloads, conn_base_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < max_payloads; i++) {
+ ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
+ i, &mstm->msto[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
/******************************************************************************
@@ -1861,89 +3452,91 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
.base.hashm = nv_encoder->dcb->hashm,
.pwr.state = mode == DRM_MODE_DPMS_ON,
};
- struct {
- struct nv50_disp_mthd_v1 base;
- struct nv50_disp_sor_dp_pwr_v0 pwr;
- } link = {
- .base.version = 1,
- .base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR,
- .base.hasht = nv_encoder->dcb->hasht,
- .base.hashm = nv_encoder->dcb->hashm,
- .pwr.state = mode == DRM_MODE_DPMS_ON,
- };
- struct drm_device *dev = encoder->dev;
- struct drm_encoder *partner;
- nv_encoder->last_dpms = mode;
-
- list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
- if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
- continue;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+}
- if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->dcb->or) {
- if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
- return;
- break;
- }
- }
+static void
+nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
+ struct drm_display_mode *mode, u8 proto, u8 depth)
+{
+ struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
+ u32 *push;
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- args.pwr.state = 1;
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
- nvif_mthd(disp->disp, 0, &link, sizeof(link));
+ if (!mode) {
+ nv_encoder->ctrl &= ~BIT(head);
+ if (!(nv_encoder->ctrl & 0x0000000f))
+ nv_encoder->ctrl = 0;
} else {
- nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ nv_encoder->ctrl |= proto << 8;
+ nv_encoder->ctrl |= BIT(head);
}
-}
-static void
-nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
-{
- struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
- u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
- if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ if ((push = evo_wait(core, 6))) {
+ if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+ if (mode) {
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ nv_encoder->ctrl |= 0x00001000;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ nv_encoder->ctrl |= 0x00002000;
+ nv_encoder->ctrl |= depth << 16;
+ }
evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
- evo_data(push, (nv_encoder->ctrl = temp));
} else {
+ if (mode) {
+ u32 magic = 0x31ec6000 | (head << 25);
+ u32 syncs = 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (head * 0x300), 2);
+ evo_data(push, syncs | (depth << 6));
+ evo_data(push, magic);
+ }
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, (nv_encoder->ctrl = temp));
}
- evo_kick(push, mast);
+ evo_data(push, nv_encoder->ctrl);
+ evo_kick(push, core);
}
}
static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
+nv50_sor_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
nv_encoder->crtc = NULL;
if (nv_crtc) {
- nv50_crtc_prepare(&nv_crtc->base);
- nv50_sor_ctrl(nv_encoder, 1 << nv_crtc->index, 0);
- nv50_audio_disconnect(encoder, nv_crtc);
- nv50_hdmi_disconnect(&nv_encoder->base.base, nv_crtc);
- }
-}
+ struct nvkm_i2c_aux *aux = nv_encoder->aux;
+ u8 pwr;
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
+ if (aux) {
+ int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
+ if (ret == 0) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D3;
+ nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
+ }
+ }
+
+ nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
+ nv50_audio_disable(encoder, nv_crtc);
+ nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
+ }
}
static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
+nv50_sor_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_lvds_script_v0 lvds;
@@ -1954,13 +3547,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
.base.hashm = nv_encoder->dcb->hashm,
};
struct nv50_disp *disp = nv50_disp(encoder->dev);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
- u32 mask, ctrl;
- u8 owner = 1 << nv_crtc->index;
u8 proto = 0xf;
u8 depth = 0x0;
@@ -1985,7 +3575,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
proto = 0x2;
}
- nv50_hdmi_mode_set(&nv_encoder->base.base, mode);
+ nv50_hdmi_enable(&nv_encoder->base.base, mode);
break;
case DCB_OUTPUT_LVDS:
proto = 0x0;
@@ -2019,94 +3609,60 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ if (nv_connector->base.display_info.bpc == 6)
depth = 0x2;
- } else
- if (nv_connector->base.display_info.bpc == 8) {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ else
+ if (nv_connector->base.display_info.bpc == 8)
depth = 0x5;
- } else {
- nv_encoder->dp.datarate = mode->clock * 30 / 8;
+ else
depth = 0x6;
- }
if (nv_encoder->dcb->sorconf.link & 1)
proto = 0x8;
else
proto = 0x9;
- nv50_audio_mode_set(encoder, mode);
+
+ nv50_audio_enable(encoder, mode);
break;
default:
BUG_ON(1);
break;
}
- nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
-
- if (nv50_vers(mast) >= GF110_DISP) {
- u32 *push = evo_wait(mast, 3);
- if (push) {
- u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
- u32 syncs = 0x00000001;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs | (depth << 6));
- evo_data(push, magic);
- evo_kick(push, mast);
- }
-
- ctrl = proto << 8;
- mask = 0x00000f00;
- } else {
- ctrl = (depth << 16) | (proto << 8);
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- ctrl |= 0x00001000;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- ctrl |= 0x00002000;
- mask = 0x000f3f00;
- }
-
- nv50_sor_ctrl(nv_encoder, mask | owner, ctrl | owner);
+ nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
}
+static const struct drm_encoder_helper_funcs
+nv50_sor_help = {
+ .dpms = nv50_sor_dpms,
+ .atomic_check = nv50_outp_atomic_check,
+ .enable = nv50_sor_enable,
+ .disable = nv50_sor_disable,
+};
+
static void
nv50_sor_destroy(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv50_mstm_del(&nv_encoder->dp.mstm);
drm_encoder_cleanup(encoder);
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
- .dpms = nv50_sor_dpms,
- .mode_fixup = nv50_encoder_mode_fixup,
- .prepare = nv50_sor_disconnect,
- .commit = nv50_sor_commit,
- .mode_set = nv50_sor_mode_set,
- .disable = nv50_sor_disconnect,
- .get_crtc = nv50_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nv50_sor_func = {
+static const struct drm_encoder_funcs
+nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- int type;
+ int type, ret;
switch (dcbe->type) {
case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
@@ -2122,7 +3678,16 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ nv_encoder->update = nv50_sor_update;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
+ "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_sor_help);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nvkm_i2c_aux *aux =
@@ -2131,6 +3696,15 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = &aux->i2c;
nv_encoder->aux = aux;
}
+
+ /*TODO: Use DP Info Table to check for support. */
+ if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
+ nv_connector->base.base.id,
+ &nv_encoder->dp.mstm);
+ if (ret)
+ return ret;
+ }
} else {
struct nvkm_i2c_bus *bus =
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
@@ -2138,20 +3712,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = &bus->i2c;
}
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
* PIOR
*****************************************************************************/
-
static void
nv50_pior_dpms(struct drm_encoder *encoder, int mode)
{
@@ -2172,30 +3738,48 @@ nv50_pior_dpms(struct drm_encoder *encoder, int mode)
nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-static bool
-nv50_pior_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int
+nv50_pior_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- if (!nv50_encoder_mode_fixup(encoder, mode, adjusted_mode))
- return false;
- adjusted_mode->clock *= 2;
- return true;
+ int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
+ if (ret)
+ return ret;
+ crtc_state->adjusted_mode.clock *= 2;
+ return 0;
}
static void
-nv50_pior_commit(struct drm_encoder *encoder)
+nv50_pior_disable(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+ evo_mthd(push, 0x0700 + (or * 0x040), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
}
static void
-nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_pior_enable(struct drm_encoder *encoder)
{
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
+ struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
u8 owner = 1 << nv_crtc->index;
u8 proto, depth;
u32 *push;
@@ -2218,8 +3802,6 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
break;
}
- nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);
-
push = evo_wait(mast, 8);
if (push) {
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
@@ -2238,29 +3820,13 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nv_encoder->crtc = encoder->crtc;
}
-static void
-nv50_pior_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nv50_mast *mast = nv50_mast(encoder->dev);
- const int or = nv_encoder->or;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nv50_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(mast, 4);
- if (push) {
- if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
- evo_mthd(push, 0x0700 + (or * 0x040), 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, mast);
- }
- }
-
- nv_encoder->crtc = NULL;
-}
+static const struct drm_encoder_helper_funcs
+nv50_pior_help = {
+ .dpms = nv50_pior_dpms,
+ .atomic_check = nv50_pior_atomic_check,
+ .enable = nv50_pior_enable,
+ .disable = nv50_pior_disable,
+};
static void
nv50_pior_destroy(struct drm_encoder *encoder)
@@ -2269,17 +3835,8 @@ nv50_pior_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
- .dpms = nv50_pior_dpms,
- .mode_fixup = nv50_pior_mode_fixup,
- .prepare = nv50_pior_disconnect,
- .commit = nv50_pior_commit,
- .mode_set = nv50_pior_mode_set,
- .disable = nv50_pior_disconnect,
- .get_crtc = nv50_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nv50_pior_func = {
+static const struct drm_encoder_funcs
+nv50_pior_func = {
.destroy = nv50_pior_destroy,
};
@@ -2321,149 +3878,464 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type, NULL);
- drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
+ drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
+ "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
+ drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
- * Framebuffer
+ * Atomic
*****************************************************************************/
static void
-nv50_fbdma_fini(struct nv50_fbdma *fbdma)
+nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(fbdma->base); i++)
- nvif_object_fini(&fbdma->base[i]);
- nvif_object_fini(&fbdma->core);
- list_del(&fbdma->head);
- kfree(fbdma);
+ struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_dmac *core = &disp->mast.base;
+ struct nv50_mstm *mstm;
+ struct drm_encoder *encoder;
+ u32 *push;
+
+ NV_ATOMIC(drm, "commit core %08x\n", interlock);
+
+ drm_for_each_encoder(encoder, drm->dev) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ mstm = nouveau_encoder(encoder)->dp.mstm;
+ if (mstm && mstm->modified)
+ nv50_mstm_prepare(mstm);
+ }
+ }
+
+ if ((push = evo_wait(core, 5))) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, interlock);
+ evo_data(push, 0x00000000);
+ nouveau_bo_wr32(disp->sync, 0, 0x00000000);
+ evo_kick(push, core);
+ if (nvif_msec(&drm->device, 2000ULL,
+ if (nouveau_bo_rd32(disp->sync, 0))
+ break;
+ usleep_range(1, 2);
+ ) < 0)
+ NV_ERROR(drm, "EVO timeout\n");
+ }
+
+ drm_for_each_encoder(encoder, drm->dev) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ mstm = nouveau_encoder(encoder)->dp.mstm;
+ if (mstm && mstm->modified)
+ nv50_mstm_cleanup(mstm);
+ }
+ }
}
-static int
-nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind)
+static void
+nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_mast *mast = nv50_mast(dev);
- struct __attribute__ ((packed)) {
- struct nv_dma_v0 base;
- union {
- struct nv50_dma_v0 nv50;
- struct gf100_dma_v0 gf100;
- struct gf119_dma_v0 gf119;
- };
- } args = {};
- struct nv50_fbdma *fbdma;
- struct drm_crtc *crtc;
- u32 size = sizeof(args.base);
- int ret;
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_outp_atom *outp, *outt;
+ u32 interlock_core = 0;
+ u32 interlock_chan = 0;
+ int i;
+
+ NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
+ drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
- list_for_each_entry(fbdma, &disp->fbdma, head) {
- if (fbdma->core.handle == name)
- return 0;
+ if (atom->lock_core)
+ mutex_lock(&disp->mutex);
+
+ /* Disable head(s). */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
+ asyh->clr.mask, asyh->set.mask);
+
+ if (asyh->clr.mask) {
+ nv50_head_flush_clr(head, asyh, atom->flush_disable);
+ interlock_core |= 1;
+ }
}
- fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL);
- if (!fbdma)
- return -ENOMEM;
- list_add(&fbdma->head, &disp->fbdma);
+ /* Disable plane(s). */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
- args.base.target = NV_DMA_V0_TARGET_VRAM;
- args.base.access = NV_DMA_V0_ACCESS_RDWR;
- args.base.start = offset;
- args.base.limit = offset + length - 1;
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
+ asyw->clr.mask, asyw->set.mask);
+ if (!asyw->clr.mask)
+ continue;
- if (drm->device.info.chipset < 0x80) {
- args.nv50.part = NV50_DMA_V0_PART_256;
- size += sizeof(args.nv50);
- } else
- if (drm->device.info.chipset < 0xc0) {
- args.nv50.part = NV50_DMA_V0_PART_256;
- args.nv50.kind = kind;
- size += sizeof(args.nv50);
- } else
- if (drm->device.info.chipset < 0xd0) {
- args.gf100.kind = kind;
- size += sizeof(args.gf100);
- } else {
- args.gf119.page = GF119_DMA_V0_PAGE_LP;
- args.gf119.kind = kind;
- size += sizeof(args.gf119);
+ interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
+ atom->flush_disable,
+ asyw);
+ }
+
+ /* Disable output path(s). */
+ list_for_each_entry(outp, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
+ outp->clr.mask, outp->set.mask);
+
+ if (outp->clr.mask) {
+ help->disable(encoder);
+ interlock_core |= 1;
+ if (outp->flush_disable) {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ interlock_core = 0;
+ interlock_chan = 0;
+ }
+ }
+ }
+
+ /* Flush disable. */
+ if (interlock_core) {
+ if (atom->flush_disable) {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ interlock_core = 0;
+ interlock_chan = 0;
+ }
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Update output path(s). */
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ const struct drm_encoder_helper_funcs *help;
+ struct drm_encoder *encoder;
+
+ encoder = outp->encoder;
+ help = encoder->helper_private;
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
+ outp->set.mask, outp->clr.mask);
+
+ if (outp->set.mask) {
+ help->enable(encoder);
+ interlock_core = 1;
+ }
+
+ list_del(&outp->head);
+ kfree(outp);
+ }
+
+ /* Update head(s). */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
struct nv50_head *head = nv50_head(crtc);
- int ret = nvif_object_init(&head->sync.base.base.user, name,
- NV_DMA_IN_MEMORY, &args, size,
- &fbdma->base[head->base.index]);
- if (ret) {
- nv50_fbdma_fini(fbdma);
- return ret;
+
+ NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+ asyh->set.mask, asyh->clr.mask);
+
+ if (asyh->set.mask) {
+ nv50_head_flush_set(head, asyh);
+ interlock_core = 1;
}
}
- ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
- &args, size, &fbdma->core);
- if (ret) {
- nv50_fbdma_fini(fbdma);
+ /* Update plane(s). */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+
+ NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
+ asyw->set.mask, asyw->clr.mask);
+ if ( !asyw->set.mask &&
+ (!asyw->clr.mask || atom->flush_disable))
+ continue;
+
+ interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
+ }
+
+ /* Flush update. */
+ if (interlock_core) {
+ if (!interlock_chan && atom->state.legacy_cursor_update) {
+ u32 *push = evo_wait(&disp->mast, 2);
+ if (push) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &disp->mast);
+ }
+ } else {
+ nv50_disp_atomic_commit_core(drm, interlock_chan);
+ }
+ }
+
+ if (atom->lock_core)
+ mutex_unlock(&disp->mutex);
+
+ /* Wait for HW to signal completion. */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ int ret = nv50_wndw_wait_armed(wndw, asyw);
+ if (ret)
+ NV_ERROR(drm, "%s: timeout\n", plane->name);
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc->state->event) {
+ unsigned long flags;
+ /* Get correct count/ts if racing with vblank irq */
+ drm_accurate_vblank_count(crtc);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+ }
+
+ drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_state_put(state);
+}
+
+static void
+nv50_disp_atomic_commit_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state =
+ container_of(work, typeof(*state), commit_work);
+ nv50_disp_atomic_commit_tail(state);
+}
+
+static int
+nv50_disp_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool nonblock)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ bool active = false;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+ goto done;
+
+ INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ goto done;
+
+ if (!nonblock) {
+ ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+ if (ret)
+ goto done;
+ }
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane_state);
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (asyw->set.image) {
+ asyw->ntfy.handle = wndw->dmac->sync.handle;
+ asyw->ntfy.offset = wndw->ntfy;
+ asyw->ntfy.awaken = false;
+ asyw->set.ntfy = true;
+ nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
+ wndw->ntfy ^= 0x10;
+ }
+ }
+
+ drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
+
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ nv50_disp_atomic_commit_tail(state);
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->state->enable) {
+ if (!drm->have_disp_power_ref) {
+ drm->have_disp_power_ref = true;
+ return ret;
+ }
+ active = true;
+ break;
+ }
+ }
+
+ if (!active && drm->have_disp_power_ref) {
+ pm_runtime_put_autosuspend(dev->dev);
+ drm->have_disp_power_ref = false;
+ }
+
+done:
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
+
+static struct nv50_outp_atom *
+nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
+{
+ struct nv50_outp_atom *outp;
+
+ list_for_each_entry(outp, &atom->outp, head) {
+ if (outp->encoder == encoder)
+ return outp;
+ }
+
+ outp = kzalloc(sizeof(*outp), GFP_KERNEL);
+ if (!outp)
+ return ERR_PTR(-ENOMEM);
+
+ list_add(&outp->head, &atom->outp);
+ outp->encoder = encoder;
+ return outp;
+}
+
+static int
+nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
+ struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = connector->state->best_encoder;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = connector->state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
+ if (crtc->state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ outp->flush_disable = true;
+ atom->flush_disable = true;
+ }
+ outp->clr.ctrl = true;
+ atom->lock_core = true;
}
return 0;
}
-static void
-nv50_fb_dtor(struct drm_framebuffer *fb)
+static int
+nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
+ struct drm_connector_state *connector_state)
{
+ struct drm_encoder *encoder = connector_state->best_encoder;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ struct nv50_outp_atom *outp;
+
+ if (!(crtc = connector_state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
+ if (crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ outp = nv50_disp_outp_atomic_add(atom, encoder);
+ if (IS_ERR(outp))
+ return PTR_ERR(outp);
+
+ outp->set.ctrl = true;
+ atom->lock_core = true;
+ }
+
+ return 0;
}
static int
-nv50_fb_ctor(struct drm_framebuffer *fb)
-{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nouveau_drm *drm = nouveau_drm(fb->dev);
- struct nouveau_bo *nvbo = nv_fb->nvbo;
- struct nv50_disp *disp = nv50_disp(fb->dev);
- u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
- u8 tile = nvbo->tile_mode;
-
- if (drm->device.info.chipset >= 0xc0)
- tile >>= 4; /* yep.. */
-
- switch (fb->depth) {
- case 8: nv_fb->r_format = 0x1e00; break;
- case 15: nv_fb->r_format = 0xe900; break;
- case 16: nv_fb->r_format = 0xe800; break;
- case 24:
- case 32: nv_fb->r_format = 0xcf00; break;
- case 30: nv_fb->r_format = 0xd100; break;
- default:
- NV_ERROR(drm, "unknown depth %d\n", fb->depth);
- return -EINVAL;
+nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ int ret, i;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ ret = nv50_disp_outp_atomic_check_clr(atom, connector);
+ if (ret)
+ return ret;
+
+ ret = nv50_disp_outp_atomic_check_set(atom, connector_state);
+ if (ret)
+ return ret;
}
- if (disp->disp->oclass < G82_DISP) {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x00100000);
- nv_fb->r_format |= kind << 16;
- } else
- if (disp->disp->oclass < GF110_DISP) {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x00100000);
- } else {
- nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
- (fb->pitches[0] | 0x01000000);
+ return 0;
+}
+
+static void
+nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_outp_atom *outp, *outt;
+
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ list_del(&outp->head);
+ kfree(outp);
}
- nv_fb->r_handle = 0xffff0000 | kind;
- return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0,
- drm->device.info.ram_user, kind);
+ drm_atomic_state_default_clear(state);
+}
+
+static void
+nv50_disp_atomic_state_free(struct drm_atomic_state *state)
+{
+ struct nv50_atom *atom = nv50_atom(state);
+ drm_atomic_state_default_release(&atom->state);
+ kfree(atom);
}
+static struct drm_atomic_state *
+nv50_disp_atomic_state_alloc(struct drm_device *dev)
+{
+ struct nv50_atom *atom;
+ if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
+ drm_atomic_state_init(dev, &atom->state) < 0) {
+ kfree(atom);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&atom->outp);
+ return &atom->state;
+}
+
+static const struct drm_mode_config_funcs
+nv50_disp_func = {
+ .fb_create = nouveau_user_framebuffer_create,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
+ .atomic_check = nv50_disp_atomic_check,
+ .atomic_commit = nv50_disp_atomic_commit,
+ .atomic_state_alloc = nv50_disp_atomic_state_alloc,
+ .atomic_state_clear = nv50_disp_atomic_state_clear,
+ .atomic_state_free = nv50_disp_atomic_state_free,
+};
+
/******************************************************************************
* Init
*****************************************************************************/
@@ -2471,12 +4343,30 @@ nv50_fb_ctor(struct drm_framebuffer *fb)
void
nv50_display_fini(struct drm_device *dev)
{
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (plane->funcs != &nv50_wndw)
+ continue;
+ nv50_wndw_fini(wndw);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ nv_encoder = nouveau_encoder(encoder);
+ nv50_mstm_fini(nv_encoder->dp.mstm);
+ }
+ }
}
int
nv50_display_init(struct drm_device *dev)
{
- struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_encoder *encoder;
+ struct drm_plane *plane;
struct drm_crtc *crtc;
u32 *push;
@@ -2484,16 +4374,35 @@ nv50_display_init(struct drm_device *dev)
if (!push)
return -EBUSY;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nv50_sync *sync = nv50_sync(crtc);
-
- nv50_crtc_lut_load(crtc);
- nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
- }
-
evo_mthd(push, 0x0088, 1);
evo_data(push, nv50_mast(dev)->base.sync.handle);
evo_kick(push, nv50_mast(dev));
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ const struct drm_encoder_helper_funcs *help;
+ struct nouveau_encoder *nv_encoder;
+
+ nv_encoder = nouveau_encoder(encoder);
+ help = encoder->helper_private;
+ if (help && help->dpms)
+ help->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ nv50_mstm_init(nv_encoder->dp.mstm);
+ }
+ }
+
+ drm_for_each_crtc(crtc, dev) {
+ nv50_head_lut_load(crtc);
+ }
+
+ drm_for_each_plane(plane, dev) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (plane->funcs != &nv50_wndw)
+ continue;
+ nv50_wndw_init(wndw);
+ }
+
return 0;
}
@@ -2501,11 +4410,6 @@ void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
- struct nv50_fbdma *fbdma, *fbtmp;
-
- list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) {
- nv50_fbdma_fini(fbdma);
- }
nv50_dmac_destroy(&disp->mast.base, disp->disp);
@@ -2518,6 +4422,10 @@ nv50_display_destroy(struct drm_device *dev)
kfree(disp);
}
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
int
nv50_display_create(struct drm_device *dev)
{
@@ -2532,15 +4440,17 @@ nv50_display_create(struct drm_device *dev)
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
- INIT_LIST_HEAD(&disp->fbdma);
+
+ mutex_init(&disp->mutex);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
- nouveau_display(dev)->fb_ctor = nv50_fb_ctor;
- nouveau_display(dev)->fb_dtor = nv50_fb_dtor;
disp->disp = &nouveau_display(dev)->disp;
+ dev->mode_config.funcs = &nv50_disp_func;
+ if (nouveau_atomic)
+ dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2572,7 +4482,7 @@ nv50_display_create(struct drm_device *dev)
crtcs = 2;
for (i = 0; i < crtcs; i++) {
- ret = nv50_crtc_create(dev, i);
+ ret = nv50_head_create(dev, i);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 70da347aa8c5..918187cee84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,11 +35,4 @@ int nv50_display_create(struct drm_device *);
void nv50_display_destroy(struct drm_device *);
int nv50_display_init(struct drm_device *);
void nv50_display_fini(struct drm_device *);
-
-void nv50_display_flip_stop(struct drm_crtc *);
-int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
- struct nouveau_channel *, u32 swap_interval);
-
-struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index af3d3c49411a..327dcd7901ed 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -30,7 +30,7 @@ int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -65,7 +65,7 @@ int
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -93,7 +93,7 @@ int
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
@@ -148,8 +148,8 @@ int
nv50_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
- struct drm_device *dev = nfbdev->dev;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
+ struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 4d6f202b7770..f68c7054fd53 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -35,13 +35,12 @@
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
- int ret, i;
+ int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -60,23 +59,6 @@ nv50_fence_context_new(struct nouveau_channel *chan)
.limit = limit,
}, sizeof(struct nv_dma_v0),
&fctx->sema);
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- u32 start = bo->bo.mem.start * PAGE_SIZE;
- u32 limit = start + bo->bo.mem.size - 1;
-
- ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
- NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
- .target = NV_DMA_V0_TARGET_VRAM,
- .access = NV_DMA_V0_ACCESS_RDWR,
- .start = start,
- .limit = limit,
- }, sizeof(struct nv_dma_v0),
- &fctx->head[i]);
- }
-
if (ret)
nv10_fence_context_del(chan);
return ret;
@@ -97,7 +79,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 127;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d8e6d6..52b87ae83e7b 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -28,13 +28,6 @@
#include "nv50_display.h"
-u64
-nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
-{
- struct nv84_fence_chan *fctx = chan->fence;
- return fctx->dispc_vma[crtc].offset;
-}
-
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
@@ -110,15 +103,8 @@ nv84_fence_read(struct nouveau_channel *chan)
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
- int i;
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
@@ -134,7 +120,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
struct nouveau_cli *cli = (void *)chan->user.client;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
- int ret, i;
+ int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -154,12 +140,6 @@ nv84_fence_context_new(struct nouveau_channel *chan)
&fctx->vma_gart);
}
- /* map display semaphore buffers into channel's vm */
- for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
- }
-
if (ret)
nv84_fence_context_del(chan);
return ret;
@@ -229,7 +209,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv84_fence_context_del;
priv->base.contexts = fifo->nr;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
/* Use VRAM if there is any ; otherwise fallback to system memory */
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 054b6a056d99..90f27bfa381f 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -30,7 +30,7 @@ int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -65,7 +65,7 @@ int
nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
int ret;
@@ -93,7 +93,7 @@ int
nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
@@ -148,8 +148,8 @@ int
nvc0_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+ struct drm_device *dev = nfbdev->helper.dev;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 1ee9294eca2e..29c20dfd894d 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -55,7 +55,7 @@ nvif_client_fini(struct nvif_client *client)
}
}
-const struct nvif_driver *
+static const struct nvif_driver *
nvif_drivers[] = {
#ifdef __KERNEL__
&nvif_driver_nvkm,
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index b0787ff833ef..278b3933dc96 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -155,10 +155,8 @@ nvif_notify_fini(struct nvif_notify *notify)
int ret = nvif_notify_put(notify);
if (ret >= 0 && object) {
ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret == 0) {
- notify->object = NULL;
- kfree((void *)notify->data);
- }
+ notify->object = NULL;
+ kfree((void *)notify->data);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 34ecd4a7e0c1..058ff46b5f16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -20,6 +20,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <core/device.h>
+#include <core/firmware.h>
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index a4458a8eb30a..255d81ccf916 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -4,4 +4,4 @@ nvkm-y += nvkm/engine/ce/gk104.o
nvkm-y += nvkm/engine/ce/gm107.o
nvkm-y += nvkm/engine/ce/gm200.o
nvkm-y += nvkm/engine/ce/gp100.o
-nvkm-y += nvkm/engine/ce/gp104.o
+nvkm-y += nvkm/engine/ce/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index 05bb65608dfe..d9ca9636a3e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_ce_data[] = {
+static uint32_t gf100_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ uint32_t gf100_ce_data[] = {
0x00000800,
};
-uint32_t gf100_ce_code[] = {
+static uint32_t gf100_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index 972281d10f38..f0a1cf31c7ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_ce_data[] = {
+static uint32_t gt215_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ uint32_t gt215_ce_data[] = {
0x00000800,
};
-uint32_t gt215_ce_code[] = {
+static uint32_t gt215_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
index 20e019788a53..985c8f653874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
static const struct nvkm_engine_func
-gp104_ce = {
+gp102_ce = {
.intr = gp100_ce_intr,
.sclass = {
{ -1, -1, PASCAL_DMA_COPY_B },
@@ -37,8 +37,8 @@ gp104_ce = {
};
int
-gp104_ce_new(struct nvkm_device *device, int index,
+gp102_ce_new(struct nvkm_device *device, int index,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gp102_ce, device, index, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7218a067a6c5..cceda959b47c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1357,7 +1357,7 @@ nvc0_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1394,7 +1394,7 @@ nvc1_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1430,7 +1430,7 @@ nvc3_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1466,7 +1466,7 @@ nvc4_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1503,7 +1503,7 @@ nvc8_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1540,7 +1540,7 @@ nvce_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1577,7 +1577,7 @@ nvcf_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1612,6 +1612,7 @@ nvd7_chipset = {
.pci = gf106_pci_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
@@ -1647,7 +1648,7 @@ nvd9_chipset = {
.pmu = gf119_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
@@ -1851,7 +1852,7 @@ nvf1_chipset = {
.fb = gk104_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1965,7 +1966,7 @@ nv117_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -1999,7 +2000,7 @@ nv118_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gf119_i2c_new,
+ .i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@@ -2130,7 +2131,7 @@ nv12b_chipset = {
.bar = gk20a_bar_new,
.bus = gf100_bus_new,
.clk = gm20b_clk_new,
- .fb = gk20a_fb_new,
+ .fb = gm20b_fb_new,
.fuse = gm107_fuse_new,
.ibus = gk20a_ibus_new,
.imem = gk20a_instmem_new,
@@ -2166,6 +2167,7 @@ nv130_chipset = {
.mmu = gf100_mmu_new,
.secboot = gm200_secboot_new,
.pci = gp100_pci_new,
+ .pmu = gp100_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[0] = gp100_ce_new,
@@ -2182,13 +2184,71 @@ nv130_chipset = {
};
static const struct nvkm_device_chip
+nv132_chipset = {
+ .name = "GP102",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp102_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
+static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
.bar = gf100_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
- .fb = gp104_fb_new,
+ .fb = gp102_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
+static const struct nvkm_device_chip
+nv136_chipset = {
+ .name = "GP106",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp102_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gm200_i2c_new,
@@ -2198,13 +2258,14 @@ nv134_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
- .ce[0] = gp104_ce_new,
- .ce[1] = gp104_ce_new,
- .ce[2] = gp104_ce_new,
- .ce[3] = gp104_ce_new,
- .disp = gp104_disp_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
};
@@ -2643,7 +2704,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x126: device->chip = &nv126_chipset; break;
case 0x12b: device->chip = &nv12b_chipset; break;
case 0x130: device->chip = &nv130_chipset; break;
+ case 0x132: device->chip = &nv132_chipset; break;
case 0x134: device->chip = &nv134_chipset; break;
+ case 0x136: device->chip = &nv136_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad0300cfa5..74a1ffa425f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1665,14 +1665,31 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
*pdevice = &pdev->device;
pdev->pdev = pci_dev;
- return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
- pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
- pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
- NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
- (u64)pci_domain_nr(pci_dev->bus) << 32 |
- pci_dev->bus->number << 16 |
- PCI_SLOT(pci_dev->devfn) << 8 |
- PCI_FUNC(pci_dev->devfn), name,
- cfg, dbg, detect, mmio, subdev_mask,
- &pdev->device);
+ ret = nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+ pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+ pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+ NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+ (u64)pci_domain_nr(pci_dev->bus) << 32 |
+ pci_dev->bus->number << 16 |
+ PCI_SLOT(pci_dev->devfn) << 8 |
+ PCI_FUNC(pci_dev->devfn), name,
+ cfg, dbg, detect, mmio, subdev_mask,
+ &pdev->device);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Set a preliminary DMA mask based on the .dma_bits member of the
+ * MMU subdevice. This allows other subdevices to create DMA mappings
+ * in their init() or oneinit() methods, which may be called before the
+ * TTM layer sets the DMA mask definitively.
+ * This is necessary for platforms where the default DMA mask of 32
+ * does not cover any system memory, i.e., when all RAM is > 4 GB.
+ */
+ if (pdev->device.mmu)
+ dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9b638bd905ff..f2bc0b7d9b93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -102,7 +102,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
if (iommu_present(&platform_bus_type)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
- if (IS_ERR(tdev->iommu.domain))
+ if (!tdev->iommu.domain)
goto error;
/*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 79a8f71cf788..513ee6b79553 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -326,7 +326,7 @@ nvkm_udevice = {
.sclass = nvkm_udevice_child_get,
};
-int
+static int
nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 77a52b54a31e..fa05d16ae948 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -11,7 +11,7 @@ nvkm-y += nvkm/engine/disp/gk110.o
nvkm-y += nvkm/engine/disp/gm107.o
nvkm-y += nvkm/engine/disp/gm200.o
nvkm-y += nvkm/engine/disp/gp100.o
-nvkm-y += nvkm/engine/disp/gp104.o
+nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/outpdp.o
@@ -48,14 +48,14 @@ nvkm-y += nvkm/engine/disp/rootgk110.o
nvkm-y += nvkm/engine/disp/rootgm107.o
nvkm-y += nvkm/engine/disp/rootgm200.o
nvkm-y += nvkm/engine/disp/rootgp100.o
-nvkm-y += nvkm/engine/disp/rootgp104.o
+nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
nvkm-y += nvkm/engine/disp/dmacnv50.o
nvkm-y += nvkm/engine/disp/dmacgf119.o
-nvkm-y += nvkm/engine/disp/dmacgp104.o
+nvkm-y += nvkm/engine/disp/dmacgp102.o
nvkm-y += nvkm/engine/disp/basenv50.o
nvkm-y += nvkm/engine/disp/baseg84.o
@@ -64,7 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
nvkm-y += nvkm/engine/disp/basegf119.o
nvkm-y += nvkm/engine/disp/basegk104.o
nvkm-y += nvkm/engine/disp/basegk110.o
-nvkm-y += nvkm/engine/disp/basegp104.o
+nvkm-y += nvkm/engine/disp/basegp102.o
nvkm-y += nvkm/engine/disp/corenv50.o
nvkm-y += nvkm/engine/disp/coreg84.o
@@ -77,7 +77,7 @@ nvkm-y += nvkm/engine/disp/coregk110.o
nvkm-y += nvkm/engine/disp/coregm107.o
nvkm-y += nvkm/engine/disp/coregm200.o
nvkm-y += nvkm/engine/disp/coregp100.o
-nvkm-y += nvkm/engine/disp/coregp104.o
+nvkm-y += nvkm/engine/disp/coregp102.o
nvkm-y += nvkm/engine/disp/ovlynv50.o
nvkm-y += nvkm/engine/disp/ovlyg84.o
@@ -85,7 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
nvkm-y += nvkm/engine/disp/ovlygt215.o
nvkm-y += nvkm/engine/disp/ovlygf119.o
nvkm-y += nvkm/engine/disp/ovlygk104.o
-nvkm-y += nvkm/engine/disp/ovlygp104.o
+nvkm-y += nvkm/engine/disp/ovlygp102.o
nvkm-y += nvkm/engine/disp/piocnv50.o
nvkm-y += nvkm/engine/disp/piocgf119.o
@@ -95,9 +95,11 @@ nvkm-y += nvkm/engine/disp/cursg84.o
nvkm-y += nvkm/engine/disp/cursgt215.o
nvkm-y += nvkm/engine/disp/cursgf119.o
nvkm-y += nvkm/engine/disp/cursgk104.o
+nvkm-y += nvkm/engine/disp/cursgp102.o
nvkm-y += nvkm/engine/disp/oimmnv50.o
nvkm-y += nvkm/engine/disp/oimmg84.o
nvkm-y += nvkm/engine/disp/oimmgt215.o
nvkm-y += nvkm/engine/disp/oimmgf119.o
nvkm-y += nvkm/engine/disp/oimmgk104.o
+nvkm-y += nvkm/engine/disp/oimmgp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
index 51688e37c54e..8a3cdeef8d2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
@@ -27,12 +27,12 @@
#include <nvif/class.h>
const struct nv50_disp_dmac_oclass
-gp104_disp_base_oclass = {
+gp102_disp_base_oclass = {
.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_base_new,
- .func = &gp104_disp_dmac_func,
+ .func = &gp102_disp_dmac_func,
.mthd = &gf119_disp_base_chan_mthd,
.chid = 1,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index dd2953bc9264..524a24eae1a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
if (mthd->addr) {
snprintf(cname_, sizeof(cname_), "%s %d",
- mthd->name, chan->chid);
+ mthd->name, chan->chid.user);
cname = cname_;
}
@@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
notify->size = sizeof(struct nvif_notify_uevent_rep);
notify->types = 1;
- notify->index = chan->chid;
+ notify->index = chan->chid.user;
return 0;
}
@@ -153,27 +153,27 @@ nv50_disp_chan_uevent = {
.fini = nv50_disp_chan_uevent_fini,
};
-int
+static int
nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
- *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
+ *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
return 0;
}
-int
+static int
nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
- nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
+ nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
return 0;
}
-int
+static int
nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
struct nvkm_event **pevent)
{
@@ -189,14 +189,14 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
return -EINVAL;
}
-int
+static int
nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
*addr = device->func->resource_addr(device, 0) +
- 0x640000 + (chan->chid * 0x1000);
+ 0x640000 + (chan->chid.user * 0x1000);
*size = 0x001000;
return 0;
}
@@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
- if (chan->chid >= 0)
- disp->chan[chan->chid] = NULL;
+ if (chan->chid.user >= 0)
+ disp->chan[chan->chid.user] = NULL;
return chan->func->dtor ? chan->func->dtor(chan) : chan;
}
@@ -263,7 +263,7 @@ nv50_disp_chan = {
int
nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid, int head,
+ struct nv50_disp_root *root, int ctrl, int user, int head,
const struct nvkm_oclass *oclass,
struct nv50_disp_chan *chan)
{
@@ -273,21 +273,22 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
chan->func = func;
chan->mthd = mthd;
chan->root = root;
- chan->chid = chid;
+ chan->chid.ctrl = ctrl;
+ chan->chid.user = user;
chan->head = head;
- if (disp->chan[chan->chid]) {
- chan->chid = -1;
+ if (disp->chan[chan->chid.user]) {
+ chan->chid.user = -1;
return -EBUSY;
}
- disp->chan[chan->chid] = chan;
+ disp->chan[chan->chid.user] = chan;
return 0;
}
int
nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid, int head,
+ struct nv50_disp_root *root, int ctrl, int user, int head,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
@@ -297,5 +298,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
return -ENOMEM;
*pobject = &chan->object;
- return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
+ return nv50_disp_chan_ctor(func, mthd, root, ctrl, user,
+ head, oclass, chan);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index f5f683d9fd20..737b38f6fbd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -7,7 +7,11 @@ struct nv50_disp_chan {
const struct nv50_disp_chan_func *func;
const struct nv50_disp_chan_mthd *mthd;
struct nv50_disp_root *root;
- int chid;
+
+ struct {
+ int ctrl;
+ int user;
+ } chid;
int head;
struct nvkm_object object;
@@ -25,11 +29,11 @@ struct nv50_disp_chan_func {
int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid, int head,
+ struct nv50_disp_root *, int ctrl, int user, int head,
const struct nvkm_oclass *, struct nv50_disp_chan *);
int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid, int head,
+ struct nv50_disp_root *, int ctrl, int user, int head,
const struct nvkm_oclass *, struct nvkm_object **);
extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
@@ -90,13 +94,16 @@ extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
struct nv50_disp_pioc_oclass {
int (*ctor)(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid,
+ struct nv50_disp_root *, int ctrl, int user,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
struct nvkm_sclass base;
const struct nv50_disp_chan_func *func;
const struct nv50_disp_chan_mthd *mthd;
- int chid;
+ struct {
+ int ctrl;
+ int user;
+ } chid;
};
extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
@@ -114,15 +121,17 @@ extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
+extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid,
+ struct nv50_disp_root *, int ctrl, int user,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
const struct nv50_disp_chan_mthd *,
- struct nv50_disp_root *, int chid,
+ struct nv50_disp_root *, int ctrl, int user,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
index 019379a3a01c..c65c9f3ff69f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
-const struct nv50_disp_mthd_list
+static const struct nv50_disp_mthd_list
g94_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
@@ -43,8 +43,8 @@ g94_disp_core_chan_mthd = {
.prev = 0x000004,
.data = {
{ "Global", 1, &nv50_disp_core_mthd_base },
- { "DAC", 3, &g84_disp_core_mthd_dac },
- { "SOR", 4, &g94_disp_core_mthd_sor },
+ { "DAC", 3, &g84_disp_core_mthd_dac },
+ { "SOR", 4, &g94_disp_core_mthd_sor },
{ "PIOR", 3, &nv50_disp_core_mthd_pior },
{ "HEAD", 2, &g84_disp_core_mthd_head },
{}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index 6922f4007b61..b0df4b752b8c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -29,7 +29,7 @@
#include <nvif/class.h>
static int
-gp104_disp_core_init(struct nv50_disp_dmac *chan)
+gp102_disp_core_init(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -59,20 +59,20 @@ gp104_disp_core_init(struct nv50_disp_dmac *chan)
return 0;
}
-const struct nv50_disp_dmac_func
-gp104_disp_core_func = {
- .init = gp104_disp_core_init,
+static const struct nv50_disp_dmac_func
+gp102_disp_core_func = {
+ .init = gp102_disp_core_init,
.fini = gf119_disp_core_fini,
.bind = gf119_disp_dmac_bind,
};
const struct nv50_disp_dmac_oclass
-gp104_disp_core_oclass = {
- .base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
+gp102_disp_core_oclass = {
+ .base.oclass = GP102_DISP_CORE_CHANNEL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_core_new,
- .func = &gp104_disp_core_func,
+ .func = &gp102_disp_core_func,
.mthd = &gk104_disp_core_chan_mthd,
.chid = 0,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
index dd99fc7060b1..fa781b5a7e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -33,5 +33,5 @@ g84_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &nv50_disp_pioc_func,
- .chid = 7,
+ .chid = { 7, 7 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index 2a1574e06ad6..2be6fb052c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -33,5 +33,5 @@ gf119_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &gf119_disp_pioc_func,
- .chid = 13,
+ .chid = { 13, 13 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
index 28e8f06c9472..2a99db4bf8f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -33,5 +33,5 @@ gk104_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &gf119_disp_pioc_func,
- .chid = 13,
+ .chid = { 13, 13 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
new file mode 100644
index 000000000000..e958210d8105
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_curs_oclass = {
+ .base.oclass = GK104_DISP_CURSOR,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = { 13, 17 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
index d8a4b9ca139c..00a7f3564450 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -33,5 +33,5 @@ gt215_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &nv50_disp_pioc_func,
- .chid = 7,
+ .chid = { 7, 7 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
index 8b1320499a0f..82ff82d8c1ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -33,7 +33,7 @@
int
nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid,
+ struct nv50_disp_root *root, int ctrl, int user,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
@@ -54,7 +54,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
} else
return ret;
- return nv50_disp_chan_new_(func, mthd, root, chid + head,
+ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
head, oclass, pobject);
}
@@ -65,5 +65,5 @@ nv50_disp_curs_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_curs_new,
.func = &nv50_disp_pioc_func,
- .chid = 7,
+ .chid = { 7, 7 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index a57f7cef307a..ce7cd74fbd5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
struct nvkm_object *object, u32 handle)
{
return nvkm_ramht_insert(chan->base.root->ramht, object,
- chan->base.chid, -9, handle,
- chan->base.chid << 27 | 0x00000001);
+ chan->base.chid.user, -9, handle,
+ chan->base.chid.user << 27 | 0x00000001);
}
void
@@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* deactivate channel */
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d fini: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d fini: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
}
/* disable error reporting and completion notification */
- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
}
static int
@@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
/* initialise channel for dma command submission */
- nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
- nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
- nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+ nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d init: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
index ad24c2c57696..cdead9500343 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
@@ -27,31 +27,32 @@
#include <subdev/timer.h>
static int
-gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
+gp102_disp_dmac_init(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
/* initialise channel for dma command submission */
- nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
- nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
- nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+ nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d init: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
@@ -59,8 +60,8 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
}
const struct nv50_disp_dmac_func
-gp104_disp_dmac_func = {
- .init = gp104_disp_dmac_init,
+gp102_disp_dmac_func = {
+ .init = gp102_disp_dmac_init,
.fini = gf119_disp_dmac_fini,
.bind = gf119_disp_dmac_bind,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 9c6645a357b9..0a1381a84552 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -149,7 +149,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
chan->func = func;
ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
- chid, head, oclass, &chan->base);
+ chid, chid, head, oclass, &chan->base);
if (ret)
return ret;
@@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
struct nvkm_object *object, u32 handle)
{
return nvkm_ramht_insert(chan->base.root->ramht, object,
- chan->base.chid, -10, handle,
- chan->base.chid << 28 |
- chan->base.chid);
+ chan->base.chid.user, -10, handle,
+ chan->base.chid.user << 28 |
+ chan->base.chid.user);
}
static void
@@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* deactivate channel */
- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
/* disable error reporting and completion notifications */
- nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+ nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
}
static int
@@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
struct nv50_disp *disp = chan->base.root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->base.chid;
+ int ctrl = chan->base.chid.ctrl;
+ int user = chan->base.chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
+ nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
/* initialise channel for dma command submission */
- nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
- nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
- nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
- nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
+ nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index 43ac05857853..ea4a0d062e31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -30,7 +30,7 @@ int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
extern const struct nv50_disp_dmac_func gf119_disp_core_func;
void gf119_disp_core_fini(struct nv50_disp_dmac *);
-extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
+extern const struct nv50_disp_dmac_func gp102_disp_dmac_func;
struct nv50_disp_dmac_oclass {
int (*ctor)(const struct nv50_disp_dmac_func *,
@@ -95,7 +95,7 @@ extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
+extern const struct nv50_disp_dmac_oclass gp102_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gp102_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gp102_disp_ovly_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 9688970eca47..4a93ceb850ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -319,9 +319,8 @@ static const struct dp_rates {
};
void
-nvkm_dp_train(struct work_struct *w)
+nvkm_dp_train(struct nvkm_output_dp *outp)
{
- struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
struct nv50_disp *disp = nv50_disp(outp->base.disp);
const struct dp_rates *cfg = nvkm_dp_rates;
struct dp_state _dp = {
@@ -353,9 +352,6 @@ nvkm_dp_train(struct work_struct *w)
}
cfg--;
- /* disable link interrupt handling during link training */
- nvkm_notify_put(&outp->irq);
-
/* ensure sink is not in a low-power state */
if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
@@ -400,9 +396,6 @@ nvkm_dp_train(struct work_struct *w)
dp_link_train_fini(dp);
- /* signal completion and enable link interrupt handling */
OUTP_DBG(&outp->base, "training complete");
atomic_set(&outp->lt.done, 1);
- wake_up(&outp->lt.wait);
- nvkm_notify_get(&outp->irq);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 6e10c5e0ef11..baf1dd9ff975 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -1,6 +1,6 @@
#ifndef __NVKM_DISP_DPORT_H__
#define __NVKM_DISP_DPORT_H__
-#include <core/os.h>
+struct nvkm_output_dp;
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000
@@ -77,5 +77,5 @@
#define DPCD_SC00_SET_POWER_D0 0x01
#define DPCD_SC00_SET_POWER_D3 0x03
-void nvkm_dp_train(struct work_struct *);
+void nvkm_dp_train(struct nvkm_output_dp *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 29e84b241cca..7b346ccc38b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -203,17 +203,20 @@ gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
/* see note in nv50_disp_intr_unk20_0() */
if (outp && outp->info.type == DCB_OUTPUT_DP) {
struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
- struct nvbios_init init = {
- .subdev = subdev,
- .bios = subdev->device->bios,
- .outp = &outp->info,
- .crtc = head,
- .offset = outpdp->info.script[4],
- .execute = 1,
- };
+ if (!outpdp->lt.mst) {
+ struct nvbios_init init = {
+ .subdev = subdev,
+ .bios = subdev->device->bios,
+ .outp = &outp->info,
+ .crtc = head,
+ .offset = outpdp->info.script[4],
+ .execute = 1,
+ };
- nvbios_exec(&init);
- atomic_set(&outpdp->lt.done, 0);
+ nvkm_notify_put(&outpdp->irq);
+ nvbios_exec(&init);
+ atomic_set(&outpdp->lt.done, 0);
+ }
}
}
@@ -314,7 +317,7 @@ gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
break;
}
- if (nvkm_output_dp_train(outp, pclk, true))
+ if (nvkm_output_dp_train(outp, pclk))
OUTP_ERR(outp, "link not trained before attach");
} else {
if (disp->func->sor.magic)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index 3bf3380336e4..f5d613f82709 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -25,7 +25,7 @@
#include "rootnv50.h"
static void
-gp104_disp_intr_error(struct nv50_disp *disp, int chid)
+gp102_disp_intr_error(struct nv50_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
@@ -51,12 +51,12 @@ gp104_disp_intr_error(struct nv50_disp *disp, int chid)
}
static const struct nv50_disp_func
-gp104_disp = {
+gp102_disp = {
.intr = gf119_disp_intr,
- .intr_error = gp104_disp_intr_error,
+ .intr_error = gp102_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
- .root = &gp104_disp_root_oclass,
+ .root = &gp102_disp_root_oclass,
.head.vblank_init = gf119_disp_vblank_init,
.head.vblank_fini = gf119_disp_vblank_fini,
.head.scanoutpos = gf119_disp_root_scanoutpos,
@@ -75,7 +75,7 @@ gp104_disp = {
};
int
-gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- return gf119_disp_new_(&gp104_disp, device, index, pdisp);
+ return gf119_disp_new_(&gp102_disp, device, index, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index fbb8c7dc18fd..567466f93cd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -590,6 +590,7 @@ nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
.execute = 1,
};
+ nvkm_notify_put(&outpdp->irq);
nvbios_exec(&init);
atomic_set(&outpdp->lt.done, 0);
}
@@ -779,7 +780,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
break;
}
- if (nvkm_output_dp_train(outp, datarate / soff, true))
+ if (nvkm_output_dp_train(outp, datarate / soff))
OUTP_ERR(outp, "link not trained before attach");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
index 54a4ae8d66c6..5ad5d0f5db05 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -33,5 +33,5 @@ g84_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &nv50_disp_pioc_func,
- .chid = 5,
+ .chid = { 5, 5 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
index c658db54afc5..1f9fd3403f07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -33,5 +33,5 @@ gf119_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &gf119_disp_pioc_func,
- .chid = 9,
+ .chid = { 9, 9 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
index b1fde8c125d6..0c09fe85e952 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -33,5 +33,5 @@ gk104_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &gf119_disp_pioc_func,
- .chid = 9,
+ .chid = { 9, 9 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
new file mode 100644
index 000000000000..abf82365c671
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_oimm_oclass = {
+ .base.oclass = GK104_DISP_OVERLAY,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = { 9, 13 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
index f4e7eb3d1177..1281db28aebd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -33,5 +33,5 @@ gt215_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &nv50_disp_pioc_func,
- .chid = 5,
+ .chid = { 5, 5 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
index 3940b9c966ec..07540f3d32dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -33,7 +33,7 @@
int
nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
const struct nv50_disp_chan_mthd *mthd,
- struct nv50_disp_root *root, int chid,
+ struct nv50_disp_root *root, int ctrl, int user,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
@@ -54,7 +54,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
} else
return ret;
- return nv50_disp_chan_new_(func, mthd, root, chid + head,
+ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
head, oclass, pobject);
}
@@ -65,5 +65,5 @@ nv50_disp_oimm_oclass = {
.base.maxver = 0,
.ctor = nv50_disp_oimm_new,
.func = &nv50_disp_pioc_func,
- .chid = 5,
+ .chid = { 5, 5 },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 3b7a9e7a1ea8..de36f73b14dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -31,7 +31,7 @@
#include <nvif/event.h>
int
-nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
+nvkm_output_dp_train(struct nvkm_output *base, u32 datarate)
{
struct nvkm_output_dp *outp = nvkm_output_dp(base);
bool retrain = true;
@@ -39,6 +39,8 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
u32 linkrate;
int ret, i;
+ mutex_lock(&outp->mutex);
+
/* check that the link is trained at a high enough rate */
ret = nvkm_rdaux(outp->aux, DPCD_LC00_LINK_BW_SET, link, 2);
if (ret) {
@@ -88,19 +90,10 @@ done:
outp->dpcd[DPCD_RC02] =
outp->base.info.dpconf.link_nr;
}
- atomic_set(&outp->lt.done, 0);
- schedule_work(&outp->lt.work);
- } else {
- nvkm_notify_get(&outp->irq);
- }
-
- if (wait) {
- if (!wait_event_timeout(outp->lt.wait,
- atomic_read(&outp->lt.done),
- msecs_to_jiffies(2000)))
- ret = -ETIMEDOUT;
+ nvkm_dp_train(outp);
}
+ mutex_unlock(&outp->mutex);
return ret;
}
@@ -118,7 +111,7 @@ nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool enable)
if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dpcd,
sizeof(outp->dpcd))) {
- nvkm_output_dp_train(&outp->base, 0, true);
+ nvkm_output_dp_train(&outp->base, 0);
return;
}
}
@@ -165,10 +158,10 @@ nvkm_output_dp_irq(struct nvkm_notify *notify)
};
OUTP_DBG(&outp->base, "IRQ: %d", line->mask);
- nvkm_output_dp_train(&outp->base, 0, true);
+ nvkm_output_dp_train(&outp->base, 0);
nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
- return NVKM_NOTIFY_DROP;
+ return NVKM_NOTIFY_KEEP;
}
static void
@@ -177,7 +170,6 @@ nvkm_output_dp_fini(struct nvkm_output *base)
struct nvkm_output_dp *outp = nvkm_output_dp(base);
nvkm_notify_put(&outp->hpd);
nvkm_notify_put(&outp->irq);
- flush_work(&outp->lt.work);
nvkm_output_dp_enable(outp, false);
}
@@ -187,6 +179,7 @@ nvkm_output_dp_init(struct nvkm_output *base)
struct nvkm_output_dp *outp = nvkm_output_dp(base);
nvkm_notify_put(&outp->base.conn->hpd);
nvkm_output_dp_enable(outp, true);
+ nvkm_notify_get(&outp->irq);
nvkm_notify_get(&outp->hpd);
}
@@ -238,11 +231,6 @@ nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
OUTP_DBG(&outp->base, "bios dp %02x %02x %02x %02x",
outp->version, hdr, cnt, len);
- /* link training */
- INIT_WORK(&outp->lt.work, nvkm_dp_train);
- init_waitqueue_head(&outp->lt.wait);
- atomic_set(&outp->lt.done, 0);
-
/* link maintenance */
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
&(struct nvkm_i2c_ntfy_req) {
@@ -257,6 +245,9 @@ nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
return ret;
}
+ mutex_init(&outp->mutex);
+ atomic_set(&outp->lt.done, 0);
+
/* hotplug detect, replaces gpio-based mechanism with aux events */
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
&(struct nvkm_i2c_ntfy_req) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 4e983f6d7032..3c83a561cd88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -29,10 +29,10 @@ struct nvkm_output_dp {
bool present;
u8 dpcd[16];
+ struct mutex mutex;
struct {
- struct work_struct work;
- wait_queue_head_t wait;
atomic_t done;
+ bool mst;
} lt;
};
@@ -41,9 +41,11 @@ struct nvkm_output_dp_func {
int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
int (*drv_ctl)(struct nvkm_output_dp *, int ln, int vs, int pe, int pc);
+ void (*vcpi)(struct nvkm_output_dp *, int head, u8 start_slot,
+ u8 num_slots, u16 pbn, u16 aligned_pbn);
};
-int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
+int nvkm_output_dp_train(struct nvkm_output *, u32 rate);
int nvkm_output_dp_ctor(const struct nvkm_output_dp_func *, struct nvkm_disp *,
int index, struct dcb_output *, struct nvkm_i2c_aux *,
@@ -63,6 +65,7 @@ int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
+void gf119_sor_dp_vcpi(struct nvkm_output_dp *, int, u8, u8, u16, u16);
int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
index 97e2dd2d908e..589bd2f12b41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
@@ -27,12 +27,12 @@
#include <nvif/class.h>
const struct nv50_disp_dmac_oclass
-gp104_disp_ovly_oclass = {
+gp102_disp_ovly_oclass = {
.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_ovly_new,
- .func = &gp104_disp_dmac_func,
+ .func = &gp102_disp_dmac_func,
.mthd = &gk104_disp_ovly_chan_mthd,
.chid = 5,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index a625a9876e34..0abaa6431943 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d fini: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d fini: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
}
/* disable error reporting and completion notification */
- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
}
static int
@@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
/* enable error reporting */
- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
/* activate channel */
- nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
+ nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
if (nvkm_msec(device, 2000,
- u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
+ u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
if ((tmp & 0x00030000) == 0x00010000)
break;
) < 0) {
- nvkm_error(subdev, "ch %d init: %08x\n", chid,
- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d init: %08x\n", user,
+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 9d2618dacf20..0211e0e8a35f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d timeout: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
}
@@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan)
struct nv50_disp *disp = chan->root->disp;
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int chid = chan->chid;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
- nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
if (nvkm_msec(device, 2000,
- u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
+ u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
if ((tmp & 0x00030000) == 0x00010000)
break;
) < 0) {
- nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index 8443e04dc626..37122ca579ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -27,32 +27,32 @@
#include <nvif/class.h>
static const struct nv50_disp_root_func
-gp104_disp_root = {
+gp102_disp_root = {
.init = gf119_disp_root_init,
.fini = gf119_disp_root_fini,
.dmac = {
- &gp104_disp_core_oclass,
- &gp104_disp_base_oclass,
- &gp104_disp_ovly_oclass,
+ &gp102_disp_core_oclass,
+ &gp102_disp_base_oclass,
+ &gp102_disp_ovly_oclass,
},
.pioc = {
- &gk104_disp_oimm_oclass,
- &gk104_disp_curs_oclass,
+ &gp102_disp_oimm_oclass,
+ &gp102_disp_curs_oclass,
},
};
static int
-gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+gp102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
- return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
+ return nv50_disp_root_new_(&gp102_disp_root, disp, oclass,
data, size, pobject);
}
const struct nvkm_disp_oclass
-gp104_disp_root_oclass = {
- .base.oclass = GP104_DISP,
+gp102_disp_root_oclass = {
+ .base.oclass = GP102_DISP,
.base.minver = -1,
.base.maxver = -1,
- .ctor = gp104_disp_root_new,
+ .ctor = gp102_disp_root_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 2f9cecd81d04..e70dc6a9ff7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -66,7 +66,7 @@ nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
return 0;
}
-int
+static int
nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
union {
@@ -173,13 +173,56 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
return 0;
} else
if (args->v0.state != 0) {
- nvkm_output_dp_train(&outpdp->base, 0, true);
+ nvkm_output_dp_train(&outpdp->base, 0);
return 0;
}
} else
return ret;
}
break;
+ case NV50_DISP_MTHD_V1_SOR_DP_MST_LINK: {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ union {
+ struct nv50_disp_sor_dp_mst_link_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+ nvif_ioctl(object, "disp sor dp mst link size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(object, "disp sor dp mst link vers %d state %d\n",
+ args->v0.version, args->v0.state);
+ if (outpdp->lt.mst != !!args->v0.state) {
+ outpdp->lt.mst = !!args->v0.state;
+ atomic_set(&outpdp->lt.done, 0);
+ nvkm_output_dp_train(&outpdp->base, 0);
+ }
+ return 0;
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI: {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ union {
+ struct nv50_disp_sor_dp_mst_vcpi_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+ nvif_ioctl(object, "disp sor dp mst vcpi size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(object, "disp sor dp mst vcpi vers %d "
+ "slot %02x/%02x pbn %04x/%04x\n",
+ args->v0.version, args->v0.start_slot,
+ args->v0.num_slots, args->v0.pbn,
+ args->v0.aligned_pbn);
+ if (!outpdp->func->vcpi)
+ return -ENODEV;
+ outpdp->func->vcpi(outpdp, head, args->v0.start_slot,
+ args->v0.num_slots, args->v0.pbn,
+ args->v0.aligned_pbn);
+ return 0;
+ } else
+ return ret;
+ }
+ break;
case NV50_DISP_MTHD_V1_PIOR_PWR:
if (!func->pior.power)
return -ENODEV;
@@ -207,8 +250,8 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
{
const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
- return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
- oclass, data, size, pobject);
+ return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl,
+ sclass->chid.user, oclass, data, size, pobject);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index ad00f1724b72..b147cf5b3518 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -41,5 +41,5 @@ extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
-extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 1bb9d661e9b3..4510cb6e10a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -45,14 +45,6 @@ static const struct nvkm_output_func
g94_sor_output_func = {
};
-int
-g94_sor_output_new(struct nvkm_disp *disp, int index,
- struct dcb_output *dcbE, struct nvkm_output **poutp)
-{
- return nvkm_output_new_(&g94_sor_output_func, disp,
- index, dcbE, poutp);
-}
-
/*******************************************************************************
* DisplayPort
******************************************************************************/
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 49bd5da194e1..6ffdaa65aa77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -56,11 +56,13 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
clksor |= bw << 18;
dpctrl |= ((1 << nr) - 1) << 16;
+ if (outp->lt.mst)
+ dpctrl |= 0x40000000;
if (ef)
dpctrl |= 0x00004000;
nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
- nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
return 0;
}
@@ -101,12 +103,24 @@ gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
return 0;
}
+void
+gf119_sor_dp_vcpi(struct nvkm_output_dp *outp, int head, u8 slot,
+ u8 slot_nr, u16 pbn, u16 aligned)
+{
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 hoff = head * 0x800;
+
+ nvkm_mask(device, 0x616588 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
+ nvkm_mask(device, 0x61658c + hoff, 0xffffffff, (aligned << 16) | pbn);
+}
+
static const struct nvkm_output_dp_func
gf119_sor_dp_func = {
.pattern = gf119_sor_dp_pattern,
.lnk_pwr = g94_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gf119_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index 37790b2617c5..4cf8ad4d18ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -43,6 +43,7 @@ gm107_sor_dp_func = {
.lnk_pwr = g94_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gf119_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index c44fa7ea672a..81b788fa61be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -120,6 +120,7 @@ gm200_sor_dp_func = {
.lnk_pwr = gm200_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gm200_sor_dp_drv_ctl,
+ .vcpi = gf119_sor_dp_vcpi,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index aeb3387a3fb0..15a992b3580a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -129,7 +129,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
}
-int
+static int
g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
@@ -170,7 +170,7 @@ g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
}
-int
+static int
g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 352a0baec84d..ec68ea9747d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -180,7 +180,8 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
list_del_init(&chan->head);
chan->killed = true;
- fifo->recover.mask |= 1ULL << engine->subdev.index;
+ if (engine != &fifo->base.engine)
+ fifo->recover.mask |= 1ULL << engine->subdev.index;
schedule_work(&fifo->recover.work);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 103c0afaaa6d..38c0910722c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -743,14 +743,14 @@ gk104_fifo_fault_engine[] = {
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
{ 0x06, "SCHED" },
- { 0x07, "HOST0" },
- { 0x08, "HOST1" },
- { 0x09, "HOST2" },
- { 0x0a, "HOST3" },
- { 0x0b, "HOST4" },
- { 0x0c, "HOST5" },
- { 0x0d, "HOST6" },
- { 0x0e, "HOST7" },
+ { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
+ { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
+ { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
+ { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
+ { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
+ { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
{ 0x0f, "HOSTSR" },
{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index bd1ff877aa06..29c080683b32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -32,14 +32,14 @@ gm107_fifo_fault_engine[] = {
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
{ 0x06, "SCHED" },
- { 0x07, "HOST0" },
- { 0x08, "HOST1" },
- { 0x09, "HOST2" },
- { 0x0a, "HOST3" },
- { 0x0b, "HOST4" },
- { 0x0c, "HOST5" },
- { 0x0d, "HOST6" },
- { 0x0e, "HOST7" },
+ { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
+ { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
+ { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
+ { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
+ { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
+ { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
{ 0x0f, "HOSTSR" },
{ 0x13, "PERF" },
{ 0x17, "PMU" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index eff83f7fb705..b2635aea9f6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -30,17 +30,17 @@ gp100_fifo_fault_engine[] = {
{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
- { 0x06, "HOST0" },
- { 0x07, "HOST1" },
- { 0x08, "HOST2" },
- { 0x09, "HOST3" },
- { 0x0a, "HOST4" },
- { 0x0b, "HOST5" },
- { 0x0c, "HOST6" },
- { 0x0d, "HOST7" },
- { 0x0e, "HOST8" },
- { 0x0f, "HOST9" },
- { 0x10, "HOST10" },
+ { 0x06, "HOST0", NULL, NVKM_ENGINE_FIFO },
+ { 0x07, "HOST1", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "HOST2", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "HOST3", NULL, NVKM_ENGINE_FIFO },
+ { 0x0a, "HOST4", NULL, NVKM_ENGINE_FIFO },
+ { 0x0b, "HOST5", NULL, NVKM_ENGINE_FIFO },
+ { 0x0c, "HOST6", NULL, NVKM_ENGINE_FIFO },
+ { 0x0d, "HOST7", NULL, NVKM_ENGINE_FIFO },
+ { 0x0e, "HOST8", NULL, NVKM_ENGINE_FIFO },
+ { 0x0f, "HOST9", NULL, NVKM_ENGINE_FIFO },
+ { 0x10, "HOST10", NULL, NVKM_ENGINE_FIFO },
{ 0x13, "PERF" },
{ 0x17, "PMU" },
{ 0x18, "PTP" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index cbc67f262322..12d964260a29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -60,6 +60,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret = 0;
+ mutex_lock(&subdev->mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
@@ -67,10 +68,12 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
chan->base.chid, chan->base.object.client->name);
- ret = -EBUSY;
- if (suspend)
- return ret;
+ ret = -ETIMEDOUT;
}
+ mutex_unlock(&subdev->mutex);
+
+ if (ret && suspend)
+ return ret;
if (offset) {
nvkm_kmap(inst);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index ed4351032ed6..a2df4f3e7763 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -40,7 +40,9 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_client *client = chan->base.object.client;
+ int ret = 0;
+ mutex_lock(&subdev->mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
@@ -48,10 +50,10 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
chan->base.chid, client->name);
- return -EBUSY;
+ ret = -ETIMEDOUT;
}
-
- return 0;
+ mutex_unlock(&subdev->mutex);
+ return ret;
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index c925ade5880e..74a64e3fd59a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -218,7 +218,7 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
}
}
-void
+static void
gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 6d3c5011e18c..4c4b5ab6e46d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -933,7 +933,7 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
}
}
-void
+static void
gm107_grctx_generate_tpcid(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 1e13278cf306..c8bb9191f9a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -106,6 +106,7 @@
#define CP_SEEK_2 0x00c800ff
#include "ctxnv40.h"
+#include "nv50.h"
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 8cb240b65ec2..12a703fe355d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_grgpc_data[] = {
+static uint32_t gf100_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x00000064,
/* 0x0004: gpc_mmio_list_tail */
@@ -36,7 +36,7 @@ uint32_t gf100_grgpc_data[] = {
0x00000000,
};
-uint32_t gf100_grgpc_code[] = {
+static uint32_t gf100_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 550d6ba0933b..ffbfc51200f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf117_grgpc_data[] = {
+static uint32_t gf117_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gf117_grgpc_data[] = {
0x00000000,
};
-uint32_t gf117_grgpc_code[] = {
+static uint32_t gf117_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 271b59d365e5..357f662de571 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk104_grgpc_data[] = {
+static uint32_t gk104_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk104_grgpc_data[] = {
0x00000000,
};
-uint32_t gk104_grgpc_code[] = {
+static uint32_t gk104_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index 73b4a32c5d29..4ffc8212a85c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk110_grgpc_data[] = {
+static uint32_t gk110_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk110_grgpc_data[] = {
0x00000000,
};
-uint32_t gk110_grgpc_code[] = {
+static uint32_t gk110_grgpc_code[] = {
0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 018169818317..09196206c9bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_grgpc_data[] = {
+static uint32_t gk208_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gk208_grgpc_data[] = {
0x00000000,
};
-uint32_t gk208_grgpc_code[] = {
+static uint32_t gk208_grgpc_code[] = {
0x03140ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index eca007f03fa9..6d7d004363d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gm107_grgpc_data[] = {
+static uint32_t gm107_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
/* 0x0004: gpc_mmio_list_tail */
@@ -40,7 +40,7 @@ uint32_t gm107_grgpc_data[] = {
0x00000000,
};
-uint32_t gm107_grgpc_code[] = {
+static uint32_t gm107_grgpc_code[] = {
0x03410ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index 8015b40a61d6..7538404b8b13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_grhub_data[] = {
+static uint32_t gf100_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gf100_grhub_data[] = {
0x0417e91c,
};
-uint32_t gf100_grhub_code[] = {
+static uint32_t gf100_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 2af90ec6852a..ce000a47ec6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf117_grhub_data[] = {
+static uint32_t gf117_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gf117_grhub_data[] = {
0x0417e91c,
};
-uint32_t gf117_grhub_code[] = {
+static uint32_t gf117_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index e8b8c1c94700..1f26cb6a233c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk104_grhub_data[] = {
+static uint32_t gk104_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk104_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk104_grhub_code[] = {
+static uint32_t gk104_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index f4ed2fb6f714..70436d93efe3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gk110_grhub_data[] = {
+static uint32_t gk110_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk110_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk110_grhub_code[] = {
+static uint32_t gk110_grhub_code[] = {
0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index ed488973c117..e0933a07426a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_grhub_data[] = {
+static uint32_t gk208_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gk208_grhub_data[] = {
0x0417e91c,
};
-uint32_t gk208_grhub_code[] = {
+static uint32_t gk208_grhub_code[] = {
0x030e0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 5c9051839557..9b432823bcbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gm107_grhub_data[] = {
+static uint32_t gm107_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
/* 0x0004: hub_mmio_list_tail */
@@ -205,7 +205,7 @@ uint32_t gm107_grhub_data[] = {
0x0417e91c,
};
-uint32_t gm107_grhub_code[] = {
+static uint32_t gm107_grhub_code[] = {
0x030e0ef5,
/* 0x0004: queue_put */
0x9800d898,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 157919c788e6..f65a5b0a1a4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1041,6 +1041,13 @@ gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
stat &= ~0x00000008;
}
+ if (stat & 0x00000010) {
+ u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0430));
+ nvkm_error(subdev, "GPC%d/TPC%d/MPC: %08x\n", gpc, tpc, trap);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0430), 0xc0000000);
+ stat &= ~0x00000010;
+ }
+
if (stat) {
nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat);
}
@@ -1258,7 +1265,7 @@ gf100_gr_ctxctl_isr(struct gf100_gr *gr)
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x409c18);
- if (stat & 0x00000001) {
+ if (!gr->firmware && (stat & 0x00000001)) {
u32 code = nvkm_rd32(device, 0x409814);
if (code == E_BAD_FWMTHD) {
u32 class = nvkm_rd32(device, 0x409808);
@@ -1270,15 +1277,14 @@ gf100_gr_ctxctl_isr(struct gf100_gr *gr)
nvkm_error(subdev, "FECS MTHD subc %d class %04x "
"mthd %04x data %08x\n",
subc, class, mthd, data);
-
- nvkm_wr32(device, 0x409c20, 0x00000001);
- stat &= ~0x00000001;
} else {
nvkm_error(subdev, "FECS ucode error %d\n", code);
}
+ nvkm_wr32(device, 0x409c20, 0x00000001);
+ stat &= ~0x00000001;
}
- if (stat & 0x00080000) {
+ if (!gr->firmware && (stat & 0x00080000)) {
nvkm_error(subdev, "FECS watchdog timeout\n");
gf100_gr_ctxctl_debug(gr);
nvkm_wr32(device, 0x409c20, 0x00080000);
@@ -1384,7 +1390,7 @@ gf100_gr_intr(struct nvkm_gr *base)
nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
-void
+static void
gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
{
@@ -1701,7 +1707,7 @@ gf100_gr_oneinit(struct nvkm_gr *base)
return 0;
}
-int
+static int
gf100_gr_init_(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
@@ -1756,6 +1762,50 @@ gf100_gr_ = {
};
int
+gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname,
+ struct gf100_gr_fuc *fuc, int ret)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct firmware *fw;
+ char f[32];
+
+ /* see if this firmware has a legacy path */
+ if (!strcmp(fwname, "fecs_inst"))
+ fwname = "fuc409c";
+ else if (!strcmp(fwname, "fecs_data"))
+ fwname = "fuc409d";
+ else if (!strcmp(fwname, "gpccs_inst"))
+ fwname = "fuc41ac";
+ else if (!strcmp(fwname, "gpccs_data"))
+ fwname = "fuc41ad";
+ else {
+ /* nope, let's just return the error we got */
+ nvkm_error(subdev, "failed to load %s\n", fwname);
+ return ret;
+ }
+
+ /* yes, try to load from the legacy path */
+ nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname);
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ snprintf(f, sizeof(f), "nouveau/%s", fwname);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ nvkm_error(subdev, "failed to load %s\n", fwname);
+ return ret;
+ }
+ }
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+int
gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
struct gf100_gr_fuc *fuc)
{
@@ -1765,10 +1815,8 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
int ret;
ret = nvkm_firmware_get(device, fwname, &fw);
- if (ret) {
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
+ if (ret)
+ return gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
fuc->size = fw->size;
fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 70335f65c51e..0124e468086e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -102,7 +102,7 @@ gf117_gr_pack_mmio[] = {
#include "fuc/hubgf117.fuc3.h"
-struct gf100_gr_ucode
+static struct gf100_gr_ucode
gf117_gr_fecs_ucode = {
.code.data = gf117_grhub_code,
.code.size = sizeof(gf117_grhub_code),
@@ -112,7 +112,7 @@ gf117_gr_fecs_ucode = {
#include "fuc/gpcgf117.fuc3.h"
-struct gf100_gr_ucode
+static struct gf100_gr_ucode
gf117_gr_gpccs_ucode = {
.code.data = gf117_grgpc_code,
.code.size = sizeof(gf117_grgpc_code),
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 45f965f608a7..2c67fac576d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -308,7 +308,7 @@ gm107_gr_init_bios(struct gf100_gr *gr)
}
}
-int
+static int
gm107_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index f1e15a4d4f64..b4e3c50badc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -187,6 +187,7 @@ nv30_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 300f5ed5de0b..e7ed04b935cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -123,6 +123,7 @@ nv34_gr = {
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
{}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 740df0f52c38..5e8abacbacc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -124,6 +124,7 @@ nv35_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 8616636ad7b4..dde89a4a0f5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -71,7 +71,7 @@ nvkm_perfdom_find(struct nvkm_pm *pm, int di)
return NULL;
}
-struct nvkm_perfsig *
+static struct nvkm_perfsig *
nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
{
struct nvkm_perfdom *dom = *pdom;
@@ -699,7 +699,7 @@ nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
return 1;
}
-int
+static int
nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
const struct nvkm_specsrc *spec)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index d2901e9a7808..fe2532ee4145 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -102,7 +102,7 @@ gf100_pm_gpc[] = {
{}
};
-const struct nvkm_specdom
+static const struct nvkm_specdom
gf100_pm_part[] = {
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index eca62221f299..4b57f8814560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-uint32_t g98_sec_data[] = {
+static uint32_t g98_sec_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
0x00000000,
@@ -150,7 +150,7 @@ uint32_t g98_sec_data[] = {
0x00000000,
};
-uint32_t g98_sec_code[] = {
+static uint32_t g98_sec_code[] = {
0x17f004bd,
0x0010fe35,
0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 370dcd8ff7b5..6eff637ac301 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -84,7 +84,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0100000000ULL;
limit = start + device->func->resource_size(device, 3);
- ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
if (ret)
return ret;
@@ -117,7 +117,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0000000000ULL;
limit = start + device->func->resource_size(device, 1);
- ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index dbcb0ef21587..be57220a2e01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -31,6 +31,7 @@ nvkm-y += nvkm/subdev/bios/timing.o
nvkm-y += nvkm/subdev/bios/therm.o
nvkm-y += nvkm/subdev/bios/vmap.o
nvkm-y += nvkm/subdev/bios/volt.o
+nvkm-y += nvkm/subdev/bios/vpstate.o
nvkm-y += nvkm/subdev/bios/xpio.o
nvkm-y += nvkm/subdev/bios/M0203.o
nvkm-y += nvkm/subdev/bios/M0205.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index 3756ec91a88d..eaf74eb72983 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -25,16 +25,16 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/boost.h>
-u16
+u32
nvbios_boostTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 boost = 0x0000;
+ u32 boost = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- boost = nvbios_rd16(bios, bit_P.offset + 0x30);
+ boost = nvbios_rd32(bios, bit_P.offset + 0x30);
if (boost) {
*ver = nvbios_rd08(bios, boost + 0);
@@ -52,15 +52,15 @@ nvbios_boostTe(struct nvkm_bios *bios,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_boostEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (data && idx < *cnt) {
data = data + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
@@ -68,14 +68,14 @@ nvbios_boostEe(struct nvkm_bios *bios, int idx,
*len = ssz;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_boostEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
- u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
+ u32 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data) {
info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
@@ -85,7 +85,7 @@ nvbios_boostEp(struct nvkm_bios *bios, int idx,
return data;
}
-u16
+u32
nvbios_boostEm(struct nvkm_bios *bios, u8 pstate,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
@@ -97,21 +97,21 @@ nvbios_boostEm(struct nvkm_bios *bios, u8 pstate,
return data;
}
-u16
+u32
nvbios_boostSe(struct nvkm_bios *bios, int idx,
- u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
+ u32 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
{
if (data && idx < cnt) {
data = data + *hdr + (idx * len);
*hdr = len;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_boostSp(struct nvkm_bios *bios, int idx,
- u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
+ u32 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
struct nvbios_boostS *info)
{
data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index 32e01624a162..5063382d8a6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -25,16 +25,16 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/cstep.h>
-u16
+u32
nvbios_cstepTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
{
struct bit_entry bit_P;
- u16 cstep = 0x0000;
+ u32 cstep = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- cstep = nvbios_rd16(bios, bit_P.offset + 0x34);
+ cstep = nvbios_rd32(bios, bit_P.offset + 0x34);
if (cstep) {
*ver = nvbios_rd08(bios, cstep + 0);
@@ -52,27 +52,27 @@ nvbios_cstepTe(struct nvkm_bios *bios,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_cstepEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
- u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+ u32 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
if (data && idx < cnt) {
data = data + *hdr + (idx * len);
*hdr = len;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
- u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
+ u32 data = nvbios_cstepEe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
@@ -81,7 +81,7 @@ nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
return data;
}
-u16
+u32
nvbios_cstepEm(struct nvkm_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
@@ -93,24 +93,24 @@ nvbios_cstepEm(struct nvkm_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
return data;
}
-u16
+u32
nvbios_cstepXe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
- u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+ u32 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
if (data && idx < xnr) {
data = data + *hdr + (cnt * len) + (idx * xsz);
*hdr = xsz;
return data;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_cstepXp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepX *info)
{
- u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
+ u32 data = nvbios_cstepXe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
info->freq = nvbios_rd16(bios, data + 0x00) * 1000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index d89e78c4e689..972370ed36f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -207,8 +207,11 @@ nvbios_dpcfg_match(struct nvkm_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
if (*ver >= 0x30) {
const u8 vsoff[] = { 0, 4, 7, 9 };
idx = (pc * 10) + vsoff[vs] + pe;
- if (*ver >= 0x40 && *hdr >= 0x12)
+ if (*ver >= 0x40 && *ver <= 0x41 && *hdr >= 0x12)
idx += nvbios_rd08(bios, outp + 0x11) * 40;
+ else
+ if (*ver >= 0x42)
+ idx += nvbios_rd08(bios, outp + 0x11) * 10;
} else {
while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
ver, hdr, cnt, len))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 80fed7e78dcb..456f9ea920dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -25,15 +25,15 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/fan.h>
-u16
+static u32
nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
- u16 fan = 0x0000;
+ u32 fan = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length >= 0x5a)
- fan = nvbios_rd16(bios, bit_P.offset + 0x58);
+ fan = nvbios_rd32(bios, bit_P.offset + 0x58);
if (fan) {
*ver = nvbios_rd08(bios, fan + 0);
@@ -49,25 +49,25 @@ nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
}
- return 0x0000;
+ return 0;
}
-u16
+static u32
nvbios_fan_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len)
{
- u16 data = nvbios_fan_table(bios, ver, hdr, cnt, len);
+ u32 data = nvbios_fan_table(bios, ver, hdr, cnt, len);
if (data && idx < *cnt)
return data + *hdr + (idx * (*len));
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
u8 ver, hdr, cnt, len;
- u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
+ u32 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
if (data) {
u8 type = nvbios_rd08(bios, data + 0x00);
switch (type) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 084328028af1..3953d11844ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -23,20 +23,21 @@
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/extdev.h>
#include <subdev/bios/iccsense.h>
-static u16
+static u32
nvbios_iccsense_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
u8 *len)
{
struct bit_entry bit_P;
- u16 iccsense;
+ u32 iccsense;
if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
bit_P.length < 0x2c)
return 0;
- iccsense = nvbios_rd16(bios, bit_P.offset + 0x28);
+ iccsense = nvbios_rd32(bios, bit_P.offset + 0x28);
if (!iccsense)
return 0;
@@ -60,7 +61,7 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
{
struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, hdr, cnt, len, i;
- u16 table, entry;
+ u32 table, entry;
table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len);
if (!table || !cnt)
@@ -77,23 +78,47 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
return -ENOMEM;
for (i = 0; i < cnt; ++i) {
+ struct nvbios_extdev_func extdev;
struct pwr_rail_t *rail = &iccsense->rail[i];
+ u8 res_start = 0;
+ int r;
+
entry = table + hdr + i * len;
switch(ver) {
case 0x10:
rail->mode = nvbios_rd08(bios, entry + 0x1);
rail->extdev_id = nvbios_rd08(bios, entry + 0x2);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3);
- rail->rail = nvbios_rd08(bios, entry + 0x4);
+ res_start = 0x3;
break;
case 0x20:
rail->mode = nvbios_rd08(bios, entry);
rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5);
- rail->rail = nvbios_rd08(bios, entry + 0x6);
+ res_start = 0x5;
+ break;
+ };
+
+ if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
+ continue;
+
+ switch (extdev.type) {
+ case NVBIOS_EXTDEV_INA209:
+ case NVBIOS_EXTDEV_INA219:
+ rail->resistor_count = 1;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ rail->resistor_count = 3;
+ break;
+ default:
+ rail->resistor_count = 0;
break;
};
+
+ for (r = 0; r < rail->resistor_count; ++r) {
+ rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
+ rail->resistors[r].enabled = !(nvbios_rd08(bios, entry + res_start + r * 2 + 1) & 0x40);
+ }
+ rail->config = nvbios_rd16(bios, entry + res_start + rail->resistor_count * 2);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
index 3ddf0939ded3..994cc2d7759b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
@@ -81,7 +81,7 @@ mxm_sor_map(struct nvkm_bios *bios, u8 conn)
u16 map = nvbios_rd16(bios, mxm + 4);
if (map) {
ver = nvbios_rd08(bios, map);
- if (ver == 0x10) {
+ if (ver == 0x10 || ver == 0x11) {
if (conn < nvbios_rd08(bios, map + 3)) {
map += nvbios_rd08(bios, map + 1);
map += conn;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 636bfb665bb9..c3068358f695 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -26,16 +26,16 @@
#include <subdev/bios/perf.h>
#include <subdev/pci.h>
-u16
+u32
nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 perf = 0x0000;
+ u32 perf = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version <= 2) {
- perf = nvbios_rd16(bios, bit_P.offset + 0);
+ perf = nvbios_rd32(bios, bit_P.offset + 0);
if (perf) {
*ver = nvbios_rd08(bios, perf + 0);
*hdr = nvbios_rd08(bios, perf + 1);
@@ -72,15 +72,15 @@ nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_perf_entry(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
if (perf && idx < *cnt) {
perf = perf + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
@@ -88,14 +88,14 @@ nvbios_perf_entry(struct nvkm_bios *bios, int idx,
*len = ssz;
return perf;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_perfEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *info)
{
- u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
+ u32 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
info->pstate = nvbios_rd08(bios, perf + 0x00);
switch (!!perf * *ver) {
@@ -163,7 +163,7 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
info->pcie_width = 0xff;
break;
default:
- return 0x0000;
+ return 0;
}
return perf;
}
@@ -202,7 +202,7 @@ nvbios_perf_fan_parse(struct nvkm_bios *bios,
struct nvbios_perf_fan *fan)
{
u8 ver, hdr, cnt, len, snr, ssz;
- u16 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+ u32 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
if (!perf)
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 212800ecdce9..7d1d3c6b4b72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -12,6 +12,7 @@ struct nvbios_source {
bool rw;
bool ignore_checksum;
bool no_pcir;
+ bool require_checksum;
};
int nvbios_extend(struct nvkm_bios *, u32 length);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index b2557e87afdd..7deb81b6dbac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -86,9 +86,12 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvbios_checksum(&bios->data[image.base], image.size)) {
nvkm_debug(subdev, "%08x: checksum failed\n",
image.base);
- if (mthd->func->rw)
+ if (!mthd->func->require_checksum) {
+ if (mthd->func->rw)
+ score += 1;
score += 1;
- score += 1;
+ } else
+ return 0;
} else {
score += 3;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index 8fecb5ff22a0..06572f8ce914 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -99,6 +99,7 @@ nvbios_acpi_fast = {
.init = acpi_init,
.read = acpi_read_fast,
.rw = false,
+ .require_checksum = true,
};
const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
index a54cfec0550d..5babc5a7c7d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
@@ -25,17 +25,17 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/therm.h>
-static u16
+static u32
therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
{
struct bit_entry bit_P;
- u16 therm = 0;
+ u32 therm = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
- therm = nvbios_rd16(bios, bit_P.offset + 12);
+ therm = nvbios_rd32(bios, bit_P.offset + 12);
else if (bit_P.version == 2)
- therm = nvbios_rd16(bios, bit_P.offset + 16);
+ therm = nvbios_rd32(bios, bit_P.offset + 16);
else
nvkm_error(&bios->subdev,
"unknown offset for thermal in BIT P %d\n",
@@ -44,7 +44,7 @@ therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
/* exit now if we haven't found the thermal table */
if (!therm)
- return 0x0000;
+ return 0;
*ver = nvbios_rd08(bios, therm + 0);
*hdr = nvbios_rd08(bios, therm + 1);
@@ -53,14 +53,14 @@ therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return therm + nvbios_rd08(bios, therm + 1);
}
-static u16
+static u32
nvbios_therm_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 therm = therm_table(bios, ver, &hdr, len, &cnt);
+ u32 therm = therm_table(bios, ver, &hdr, len, &cnt);
if (therm && idx < cnt)
return therm + idx * *len;
- return 0x0000;
+ return 0;
}
int
@@ -70,7 +70,7 @@ nvbios_therm_sensor_parse(struct nvkm_bios *bios,
{
s8 thrs_section, sensor_section, offset;
u8 ver, len, i;
- u16 entry;
+ u32 entry;
/* we only support the core domain for now */
if (domain != NVBIOS_THERM_DOMAIN_CORE)
@@ -154,7 +154,7 @@ nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
struct nvbios_therm_trip_point *cur_trip = NULL;
u8 ver, len, i;
- u16 entry;
+ u32 entry;
uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0,
75, 0, 85, 0, 100, 0, 100, 0 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 99f6432ac0af..7e83c3985020 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -25,19 +25,19 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/timing.h>
-u16
+u32
nvbios_timingTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 timing = 0x0000;
+ u32 timing = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
- timing = nvbios_rd16(bios, bit_P.offset + 4);
+ timing = nvbios_rd32(bios, bit_P.offset + 4);
else
if (bit_P.version == 2)
- timing = nvbios_rd16(bios, bit_P.offset + 8);
+ timing = nvbios_rd32(bios, bit_P.offset + 8);
if (timing) {
*ver = nvbios_rd08(bios, timing + 0);
@@ -62,15 +62,15 @@ nvbios_timingTe(struct nvkm_bios *bios,
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_timingEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (timing && idx < *cnt) {
timing += *hdr + idx * (*len + (snr * ssz));
*hdr = *len;
@@ -78,14 +78,14 @@ nvbios_timingEe(struct nvkm_bios *bios, int idx,
*len = ssz;
return timing;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_timingEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
{
- u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
+ u32 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
p->timing_ver = *ver;
p->timing_hdr = *hdr;
switch (!!data * *ver) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index 2f13db745948..c228ca15fa3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -25,15 +25,15 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/vmap.h>
-u16
+u32
nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
- u16 vmap = 0x0000;
+ u32 vmap = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2) {
- vmap = nvbios_rd16(bios, bit_P.offset + 0x20);
+ vmap = nvbios_rd32(bios, bit_P.offset + 0x20);
if (vmap) {
*ver = nvbios_rd08(bios, vmap + 0);
switch (*ver) {
@@ -50,40 +50,50 @@ nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *info)
{
- u16 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
+ u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
+ info->max0 = 0xff;
+ info->max1 = 0xff;
+ info->max2 = 0xff;
+ break;
case 0x20:
+ info->max0 = nvbios_rd08(bios, vmap + 0x7);
+ info->max1 = nvbios_rd08(bios, vmap + 0x8);
+ if (*len >= 0xc)
+ info->max2 = nvbios_rd08(bios, vmap + 0xc);
+ else
+ info->max2 = 0xff;
break;
}
return vmap;
}
-u16
+u32
nvbios_vmap_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
+ u32 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
if (vmap && idx < cnt) {
vmap = vmap + hdr + (idx * *len);
return vmap;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_vmap_entry *info)
{
- u16 vmap = nvbios_vmap_entry(bios, idx, ver, len);
+ u32 vmap = nvbios_vmap_entry(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
@@ -95,7 +105,7 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
break;
case 0x20:
- info->unk0 = nvbios_rd08(bios, vmap + 0x00);
+ info->mode = nvbios_rd08(bios, vmap + 0x00);
info->link = nvbios_rd08(bios, vmap + 0x01);
info->min = nvbios_rd32(bios, vmap + 0x02);
info->max = nvbios_rd32(bios, vmap + 0x06);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 6e0a33648be9..a7797a9e9cbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -25,18 +25,18 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/volt.h>
-u16
+u32
nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
- u16 volt = 0x0000;
+ u32 volt = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- volt = nvbios_rd16(bios, bit_P.offset + 0x0c);
+ volt = nvbios_rd32(bios, bit_P.offset + 0x0c);
else
if (bit_P.version == 1)
- volt = nvbios_rd16(bios, bit_P.offset + 0x10);
+ volt = nvbios_rd32(bios, bit_P.offset + 0x10);
if (volt) {
*ver = nvbios_rd08(bios, volt + 0);
@@ -62,33 +62,37 @@ nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_volt *info)
{
- u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
+ u32 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!volt * *ver) {
case 0x12:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x20:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x05);
+ info->ranged = false;
break;
case 0x30:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x40:
info->type = NVBIOS_VOLT_GPIO;
info->base = nvbios_rd32(bios, volt + 0x04);
info->step = nvbios_rd16(bios, volt + 0x08);
info->vidmask = nvbios_rd08(bios, volt + 0x0b);
+ info->ranged = true; /* XXX: find the flag byte */
/*XXX*/
info->min = 0;
info->max = info->base;
@@ -104,32 +108,34 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
info->pwm_freq = nvbios_rd32(bios, volt + 0x5) / 1000;
info->pwm_range = nvbios_rd32(bios, volt + 0x16);
} else {
- info->type = NVBIOS_VOLT_GPIO;
- info->vidmask = nvbios_rd08(bios, volt + 0x06);
- info->step = nvbios_rd16(bios, volt + 0x16);
+ info->type = NVBIOS_VOLT_GPIO;
+ info->vidmask = nvbios_rd08(bios, volt + 0x06);
+ info->step = nvbios_rd16(bios, volt + 0x16);
+ info->ranged =
+ !!(nvbios_rd08(bios, volt + 0x4) & 0x2);
}
break;
}
return volt;
}
-u16
+u32
nvbios_volt_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
- u16 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
+ u32 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
if (volt && idx < cnt) {
volt = volt + hdr + (idx * *len);
return volt;
}
- return 0x0000;
+ return 0;
}
-u16
+u32
nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_volt_entry *info)
{
- u16 volt = nvbios_volt_entry(bios, idx, ver, len);
+ u32 volt = nvbios_volt_entry(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!volt * *ver) {
case 0x12:
@@ -142,7 +148,10 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->vid = nvbios_rd08(bios, volt + 0x01) >> 2;
break;
case 0x40:
+ break;
case 0x50:
+ info->voltage = nvbios_rd32(bios, volt) & 0x001fffff;
+ info->vid = (nvbios_rd32(bios, volt) >> 23) & 0xff;
break;
}
return volt;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
new file mode 100644
index 000000000000..f199270163d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/vpstate.h>
+
+static u32
+nvbios_vpstate_offset(struct nvkm_bios *b)
+{
+ struct bit_entry bit_P;
+
+ if (!bit_entry(b, 'P', &bit_P)) {
+ if (bit_P.version == 2)
+ return nvbios_rd32(b, bit_P.offset + 0x38);
+ }
+
+ return 0x0000;
+}
+
+int
+nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
+{
+ if (!h)
+ return -EINVAL;
+
+ h->offset = nvbios_vpstate_offset(b);
+ if (!h->offset)
+ return -ENODEV;
+
+ h->version = nvbios_rd08(b, h->offset);
+ switch (h->version) {
+ case 0x10:
+ h->hlen = nvbios_rd08(b, h->offset + 0x1);
+ h->elen = nvbios_rd08(b, h->offset + 0x2);
+ h->slen = nvbios_rd08(b, h->offset + 0x3);
+ h->scount = nvbios_rd08(b, h->offset + 0x4);
+ h->ecount = nvbios_rd08(b, h->offset + 0x5);
+
+ h->base_id = nvbios_rd08(b, h->offset + 0x0f);
+ h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+ h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+nvbios_vpstate_entry(struct nvkm_bios *b, struct nvbios_vpstate_header *h,
+ u8 idx, struct nvbios_vpstate_entry *e)
+{
+ u32 offset;
+
+ if (!e || !h || idx > h->ecount)
+ return -EINVAL;
+
+ offset = h->offset + h->hlen + idx * (h->elen + (h->slen * h->scount));
+ e->pstate = nvbios_rd08(b, offset);
+ e->clock_mhz = nvbios_rd16(b, offset + 0x5);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 7102c25320fc..e4c8d310d870 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -27,6 +27,7 @@
#include <subdev/bios/boost.h>
#include <subdev/bios/cstep.h>
#include <subdev/bios/perf.h>
+#include <subdev/bios/vpstate.h>
#include <subdev/fb.h>
#include <subdev/therm.h>
#include <subdev/volt.h>
@@ -43,13 +44,13 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
struct nvkm_bios *bios = clk->subdev.device->bios;
struct nvbios_boostE boostE;
u8 ver, hdr, cnt, len;
- u16 data;
+ u32 data;
data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
if (data) {
struct nvbios_boostS boostS;
u8 idx = 0, sver, shdr;
- u16 subd;
+ u32 subd;
input = max(boostE.min, input);
input = min(boostE.max, input);
@@ -74,6 +75,88 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
/******************************************************************************
* C-States
*****************************************************************************/
+static bool
+nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
+ u32 max_volt, int temp)
+{
+ const struct nvkm_domain *domain = clk->domains;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
+ int voltage;
+
+ while (domain && domain->name != nv_clk_src_max) {
+ if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
+ u32 freq = cstate->domain[domain->name];
+ switch (clk->boost_mode) {
+ case NVKM_CLK_BOOST_NONE:
+ if (clk->base_khz && freq > clk->base_khz)
+ return false;
+ case NVKM_CLK_BOOST_BIOS:
+ if (clk->boost_khz && freq > clk->boost_khz)
+ return false;
+ }
+ }
+ domain++;
+ }
+
+ if (!volt)
+ return true;
+
+ voltage = nvkm_volt_map(volt, cstate->voltage, temp);
+ if (voltage < 0)
+ return false;
+ return voltage <= min(max_volt, volt->max_uv);
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
+ struct nvkm_cstate *start)
+{
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_volt *volt = device->volt;
+ struct nvkm_cstate *cstate;
+ int max_volt;
+
+ if (!pstate || !start)
+ return NULL;
+
+ if (!volt)
+ return start;
+
+ max_volt = volt->max_uv;
+ if (volt->max0_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max0_id, clk->temp));
+ if (volt->max1_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max1_id, clk->temp));
+ if (volt->max2_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max2_id, clk->temp));
+
+ for (cstate = start; &cstate->head != &pstate->list;
+ cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
+ if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
+ break;
+ }
+
+ return cstate;
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
+{
+ struct nvkm_cstate *cstate;
+ if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
+ return list_last_entry(&pstate->list, typeof(*cstate), head);
+ else {
+ list_for_each_entry(cstate, &pstate->list, head) {
+ if (cstate->id == cstatei)
+ return cstate;
+ }
+ }
+ return NULL;
+}
+
static int
nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
@@ -85,7 +168,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
int ret;
if (!list_empty(&pstate->list)) {
- cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
+ cstate = nvkm_cstate_get(clk, pstate, cstatei);
+ cstate = nvkm_cstate_find_best(clk, pstate, cstate);
} else {
cstate = &pstate->base;
}
@@ -99,7 +183,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, +1);
if (ret && ret != -ENODEV) {
nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
return ret;
@@ -113,7 +198,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, -1);
if (ret && ret != -ENODEV)
nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
}
@@ -138,22 +224,27 @@ static int
nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
{
struct nvkm_bios *bios = clk->subdev.device->bios;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
const struct nvkm_domain *domain = clk->domains;
struct nvkm_cstate *cstate = NULL;
struct nvbios_cstepX cstepX;
u8 ver, hdr;
- u16 data;
+ u32 data;
data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
if (!data)
return -ENOENT;
+ if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
+ return -EINVAL;
+
cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (!cstate)
return -ENOMEM;
*cstate = pstate->base;
cstate->voltage = cstepX.voltage;
+ cstate->id = idx;
while (domain && domain->name != nv_clk_src_max) {
if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
@@ -175,7 +266,7 @@ static int
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
struct nvkm_subdev *subdev = &clk->subdev;
- struct nvkm_ram *ram = subdev->device->fb->ram;
+ struct nvkm_fb *fb = subdev->device->fb;
struct nvkm_pci *pci = subdev->device->pci;
struct nvkm_pstate *pstate;
int ret, idx = 0;
@@ -190,7 +281,8 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
- if (ram && ram->func->calc) {
+ if (fb && fb->ram && fb->ram->func->calc) {
+ struct nvkm_ram *ram = fb->ram;
int khz = pstate->base.domain[nv_clk_src_mem];
do {
ret = ram->func->calc(ram, khz);
@@ -200,7 +292,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
ram->func->tidy(ram);
}
- return nvkm_cstate_prog(clk, pstate, 0);
+ return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
}
static void
@@ -214,14 +306,14 @@ nvkm_pstate_work(struct work_struct *work)
return;
clk->pwrsrc = power_supply_is_system_supplied();
- nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+ nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
- clk->astate, clk->tstate, clk->dstate);
+ clk->astate, clk->temp, clk->dstate);
pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
if (clk->state_nr && pstate != -1) {
pstate = (pstate < 0) ? clk->astate : pstate;
- pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
+ pstate = min(pstate, clk->state_nr - 1);
pstate = max(pstate, clk->dstate);
} else {
pstate = clk->pstate = -1;
@@ -316,7 +408,7 @@ nvkm_pstate_new(struct nvkm_clk *clk, int idx)
struct nvbios_cstepE cstepE;
struct nvbios_perfE perfE;
u8 ver, hdr, cnt, len;
- u16 data;
+ u32 data;
data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
if (!data)
@@ -448,13 +540,12 @@ nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
}
int
-nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
+nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
{
- if (!rel) clk->tstate = req;
- if ( rel) clk->tstate += rel;
- clk->tstate = min(clk->tstate, 0);
- clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
- return nvkm_pstate_calc(clk, true);
+ if (clk->temp == temp)
+ return 0;
+ clk->temp = temp;
+ return nvkm_pstate_calc(clk, false);
}
int
@@ -524,9 +615,9 @@ nvkm_clk_init(struct nvkm_subdev *subdev)
return clk->func->init(clk);
clk->astate = clk->state_nr - 1;
- clk->tstate = 0;
clk->dstate = 0;
clk->pstate = -1;
+ clk->temp = 90; /* reasonable default value */
nvkm_pstate_calc(clk, true);
return 0;
}
@@ -561,10 +652,22 @@ int
nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
int index, bool allow_reclock, struct nvkm_clk *clk)
{
+ struct nvkm_subdev *subdev = &clk->subdev;
+ struct nvkm_bios *bios = device->bios;
int ret, idx, arglen;
const char *mode;
+ struct nvbios_vpstate_header h;
+
+ nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
+
+ if (bios && !nvbios_vpstate_parse(bios, &h)) {
+ struct nvbios_vpstate_entry base, boost;
+ if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
+ clk->boost_khz = boost.clock_mhz * 1000;
+ if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
+ clk->base_khz = base.clock_mhz * 1000;
+ }
- nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
clk->func = func;
INIT_LIST_HEAD(&clk->states);
clk->domains = func->domains;
@@ -607,6 +710,8 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
if (mode)
clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
+ clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
+ NVKM_CLK_BOOST_NONE);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 89d5543118cf..7f67f9f5a550 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -457,7 +457,7 @@ gf100_clk = {
{ nv_clk_src_hubk06 , 0x00 },
{ nv_clk_src_hubk01 , 0x01 },
{ nv_clk_src_copy , 0x02 },
- { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
+ { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_rop , 0x04 },
{ nv_clk_src_mem , 0x05, 0, "memory", 1000 },
{ nv_clk_src_vdec , 0x06 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 06bc0d2d6ae1..0b37e3da7feb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -491,7 +491,7 @@ gk104_clk = {
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
- { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+ { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_mem , 0x03, 0, "memory", 500 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 056702ef69aa..96e0941c8edd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,7 @@ gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
return 0;
}
-int
+static int
gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
struct gt215_clk_info *info)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index a410c0db8a08..1730371933df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/pmu.h>
+#include <subdev/timer.h>
static void
pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
@@ -123,21 +124,13 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -EINVAL;
}
- /* reset PMU and load init table parser ucode */
- if (post) {
- nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
- nvkm_rd32(device, 0x000200);
- while (nvkm_rd32(device, 0x10a10c) & 0x00000006) {
- }
- }
-
ret = pmu_load(init, 0x04, post, &exec, &args);
if (ret)
return ret;
/* upload first chunk of init data */
if (post) {
+ // devinit tables
u32 pmu = pmu_args(init, args + 0x08, 0x08);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
@@ -146,6 +139,7 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
/* upload second chunk of init data */
if (post) {
+ // devinit boot scripts
u32 pmu = pmu_args(init, args + 0x08, 0x10);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
@@ -156,8 +150,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
if (post) {
nvkm_wr32(device, 0x10a040, 0x00005000);
pmu_exec(init, exec);
- while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) {
- }
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a040) & 0x00002000)
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
}
/* load and execute some other ucode image (bios therm?) */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index edcc157e6ac8..63566ba12fbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -24,8 +24,9 @@ nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o
+nvkm-y += nvkm/subdev/fb/gm20b.o
nvkm-y += nvkm/subdev/fb/gp100.o
-nvkm-y += nvkm/subdev/fb/gp104.o
+nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 76433cc66fff..3841ad6be99e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -50,24 +50,33 @@ gf100_fb_intr(struct nvkm_fb *base)
}
int
-gf100_fb_oneinit(struct nvkm_fb *fb)
+gf100_fb_oneinit(struct nvkm_fb *base)
{
- struct nvkm_device *device = fb->subdev.device;
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
int ret, size = 0x1000;
size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
size = min(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_rd);
+ false, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_wr);
+ false, &fb->base.mmu_wr);
if (ret)
return ret;
+ fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c10_page) {
+ fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c10))
+ return -EFAULT;
+ }
+
return 0;
}
@@ -123,14 +132,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_fb_ctor(func, device, index, &fb->base);
*pfb = &fb->base;
- fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c10_page) {
- fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c10))
- return -EFAULT;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 449f431644b3..412eb89834e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -16,4 +16,8 @@ void gf100_fb_init(struct nvkm_fb *);
void gf100_fb_intr(struct nvkm_fb *);
void gp100_fb_init(struct nvkm_fb *);
+
+void gm200_fb_init_page(struct nvkm_fb *fb);
+void gm200_fb_init(struct nvkm_fb *base);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index f815fe2bbf08..5d34d6136616 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,27 +20,21 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
+#include "gf100.h"
-#include <core/memory.h>
-
-static void
-gk20a_fb_init(struct nvkm_fb *fb)
-{
- struct nvkm_device *device = fb->subdev.device;
- nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
- nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
-}
-
+/* GK20A's FB is similar to GF100's, but without the ability to allocate VRAM */
static const struct nvkm_fb_func
gk20a_fb = {
+ .dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
- .init = gk20a_fb_init,
+ .init = gf100_fb_init,
.init_page = gf100_fb_init_page,
+ .intr = gf100_fb_intr,
.memtype_valid = gf100_fb_memtype_valid,
};
int
gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&gk20a_fb, device, index, pfb);
+ return gf100_fb_new_(&gk20a_fb, device, index, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index 62f653240be3..fe5886013ac0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -44,7 +44,7 @@ gm200_fb_init_page(struct nvkm_fb *fb)
}
}
-static void
+void
gm200_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
new file mode 100644
index 000000000000..b87c233bcd6d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "gf100.h"
+
+/* GM20B's FB is similar to GM200, but without the ability to allocate VRAM */
+static const struct nvkm_fb_func
+gm20b_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .init_page = gm200_fb_init_page,
+ .intr = gf100_fb_intr,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm20b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gm20b_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index 92cb71861bec..73b4ae1c73dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -27,7 +27,7 @@
#include <core/memory.h>
static const struct nvkm_fb_func
-gp104_fb = {
+gp102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
@@ -37,7 +37,7 @@ gp104_fb = {
};
int
-gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp104_fb, device, index, pfb);
+ return gf100_fb_new_(&gp102_fb, device, index, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 1b5fb02eab2a..0595e0722bfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base)
nvkm_fifo_chan_put(fifo, flags, &chan);
}
+static int
+nv50_fb_oneinit(struct nvkm_fb *base)
+{
+ struct nv50_fb *fb = nv50_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
+
+ fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c08_page) {
+ fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c08))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static void
nv50_fb_init(struct nvkm_fb *base)
{
@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base)
static const struct nvkm_fb_func
nv50_fb_ = {
.dtor = nv50_fb_dtor,
+ .oneinit = nv50_fb_oneinit,
.init = nv50_fb_init,
.intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new,
@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
fb->func = func;
*pfb = &fb->base;
- fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c08_page) {
- fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c08))
- return -EFAULT;
- } else {
- nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b9ec0ae6723a..b60068b7d8f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -24,6 +24,7 @@ int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+int gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32);
int gk104_ram_init(struct nvkm_ram *ram);
/* RAM type-specific MR calculation routines */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 772425ca5a9e..093223d1df4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -420,8 +420,6 @@ gf100_ram_tidy(struct nvkm_ram *base)
ram_exec(&ram->fuc, false);
}
-extern const u8 gf100_pte_storage_type_map[256];
-
void
gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1fa3ade468ae..7904fa41acef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -259,7 +259,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
if ((ram->base.mr[1] & 0x03c) != 0x030) {
@@ -658,7 +660,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
gk104_ram_train(fuc, 0x80020000, 0x01000000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -706,7 +710,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
if (vc == 1 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -936,7 +942,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_nsec(fuc, 1000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -1530,6 +1538,12 @@ gk104_ram_func = {
int
gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
+ return gk104_ram_ctor(fb, pram, 0x022554);
+}
+
+int
+gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
+{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
@@ -1544,7 +1558,7 @@ gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
return -ENOMEM;
*pram = &ram->base;
- ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
+ ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 43d807f6ca71..ac862d1d77bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -23,18 +23,8 @@
*/
#include "ram.h"
-static const struct nvkm_ram_func
-gm107_ram_func = {
- .init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
-};
-
int
gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
- return -ENOMEM;
-
- return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
+ return gk104_ram_ctor(fb, pram, 0x021c14);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index f3be408b5e5e..405faabe8dcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -92,13 +92,13 @@ gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
- u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
+ u32 fbpa_num = nvkm_rd32(device, 0x02243c), fbpa;
u32 fbio_opt = nvkm_rd32(device, 0x021c14);
u64 part, size = 0, comm = ~0ULL;
bool mixed = false;
int ret;
- nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
+ nvkm_debug(subdev, "02243c: %08x\n", fbpa_num);
nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
if (!(fbio_opt & (1 << fbpa))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index d15ea886df27..f10664372161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -95,7 +95,7 @@ struct gt215_ram {
struct gt215_ltrain ltrain;
};
-void
+static void
gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
{
int i, lo, hi;
@@ -149,7 +149,7 @@ gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
/*
* Link training for (at least) DDR3
*/
-int
+static int
gt215_link_train(struct gt215_ram *ram)
{
struct gt215_ltrain *train = &ram->ltrain;
@@ -267,7 +267,7 @@ out:
return ret;
}
-int
+static int
gt215_link_train_init(struct gt215_ram *ram)
{
static const u32 pattern[16] = {
@@ -333,7 +333,7 @@ gt215_link_train_init(struct gt215_ram *ram)
return 0;
}
-void
+static void
gt215_link_train_fini(struct gt215_ram *ram)
{
if (ram->ltrain.mem)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index b9f1ffdfc602..4dcd8742f2da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -23,6 +23,7 @@
* Ben Skeggs
*/
#include "priv.h"
+#include "ram.h"
struct ramxlat {
int id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index 26900333b1d6..eca8a445eab3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -23,6 +23,7 @@
* Roy Spliet <rspliet@eclipso.eu>
*/
#include "priv.h"
+#include "ram.h"
struct ramxlat {
int id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 3f45afd17d5a..2ead515b8530 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -37,7 +37,7 @@ gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
nvkm_wr32(device, 0x00dc80, intr1);
}
-void
+static void
gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = gpio->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index f0851d57df2f..01d5c5a56e2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -74,7 +74,7 @@ nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-const struct i2c_algorithm
+static const struct i2c_algorithm
nvkm_i2c_aux_i2c_algo = {
.master_xfer = nvkm_i2c_aux_i2c_xfer,
.functionality = nvkm_i2c_aux_i2c_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index 954f5b76bfcf..b80236a4eeac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -79,7 +79,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 base = aux->ch * 0x50;
- u32 ctrl, stat, timeout, retries;
+ u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
@@ -111,7 +111,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
+ do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
@@ -131,20 +131,20 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
} while (ctrl & 0x00010000);
- ret = 1;
+ ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
+ ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
- }
+ } while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index 61d729b82c69..ed458c7f056b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -79,7 +79,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 base = aux->ch * 0x50;
- u32 ctrl, stat, timeout, retries;
+ u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
@@ -111,7 +111,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
+ do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
@@ -131,20 +131,20 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
goto out;
}
} while (ctrl & 0x00010000);
- ret = 1;
+ ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
+ ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
- }
+ } while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index b7159b338fac..1a4ab825852c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -29,7 +29,7 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
nvkm_mask(device, 0x137250, 0x3f, 0);
nvkm_mask(device, 0x000200, 0x20, 0);
- usleep_range(20, 30);
+ udelay(20);
nvkm_mask(device, 0x000200, 0x20, 0x20);
nvkm_wr32(device, 0x12004c, 0x4);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index 41bd5d0f7692..f0af2a381eea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -96,60 +96,12 @@ nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
}
static void
-nvkm_iccsense_ina209_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0078: 0x0078 128 samples shunt
- * 0x0780: 0x0780 128 samples bus
- * 0x1800: 0x0000 +-40 mV shunt range
- * 0x2000: 0x0000 16V FSR
- */
- u16 value = 0x07ff;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
-nvkm_iccsense_ina3221_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0031: 0x0000 140 us conversion time shunt
- * 0x01c0: 0x0000 140 us conversion time bus
- * 0x0f00: 0x0f00 1024 samples
- * 0x7000: 0x?000 channels
- */
- u16 value = 0x0e07;
- if (sensor->rail_mask & 0x1)
- value |= 0x1 << 14;
- if (sensor->rail_mask & 0x2)
- value |= 0x1 << 13;
- if (sensor->rail_mask & 0x4)
- value |= 0x1 << 12;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_sensor *sensor)
{
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- case NVBIOS_EXTDEV_INA219:
- nvkm_iccsense_ina209_config(iccsense, sensor);
- break;
- case NVBIOS_EXTDEV_INA3221:
- nvkm_iccsense_ina3221_config(iccsense, sensor);
- break;
- default:
- break;
- }
+ struct nvkm_subdev *subdev = &iccsense->subdev;
+ nvkm_trace(subdev, "write config of extdev %i: 0x%04x\n", sensor->id, sensor->config);
+ nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, sensor->config);
}
int
@@ -196,7 +148,6 @@ nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
static struct nvkm_iccsense_sensor*
nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
{
-
struct nvkm_subdev *subdev = &iccsense->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_i2c *i2c = subdev->device->i2c;
@@ -245,7 +196,7 @@ nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
sensor->type = extdev.type;
sensor->i2c = &i2c_bus->i2c;
sensor->addr = addr;
- sensor->rail_mask = 0x0;
+ sensor->config = 0x0;
return sensor;
}
@@ -273,48 +224,56 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
iccsense->data_valid = true;
for (i = 0; i < stbl.nr_entry; ++i) {
- struct pwr_rail_t *r = &stbl.rail[i];
- struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_t *pwr_rail = &stbl.rail[i];
struct nvkm_iccsense_sensor *sensor;
- int (*read)(struct nvkm_iccsense *,
- struct nvkm_iccsense_rail *);
+ int r;
- if (!r->mode || r->resistor_mohm == 0)
+ if (pwr_rail->mode != 1 || !pwr_rail->resistor_count)
continue;
- sensor = nvkm_iccsense_get_sensor(iccsense, r->extdev_id);
+ sensor = nvkm_iccsense_get_sensor(iccsense, pwr_rail->extdev_id);
if (!sensor)
continue;
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- if (r->rail != 0)
- continue;
- read = nvkm_iccsense_ina209_read;
- break;
- case NVBIOS_EXTDEV_INA219:
- if (r->rail != 0)
+ if (!sensor->config)
+ sensor->config = pwr_rail->config;
+ else if (sensor->config != pwr_rail->config)
+ nvkm_error(subdev, "config mismatch found for extdev %i\n", pwr_rail->extdev_id);
+
+ for (r = 0; r < pwr_rail->resistor_count; ++r) {
+ struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_resistor_t *res = &pwr_rail->resistors[r];
+ int (*read)(struct nvkm_iccsense *,
+ struct nvkm_iccsense_rail *);
+
+ if (!res->mohm || !res->enabled)
continue;
- read = nvkm_iccsense_ina219_read;
- break;
- case NVBIOS_EXTDEV_INA3221:
- if (r->rail >= 3)
+
+ switch (sensor->type) {
+ case NVBIOS_EXTDEV_INA209:
+ read = nvkm_iccsense_ina209_read;
+ break;
+ case NVBIOS_EXTDEV_INA219:
+ read = nvkm_iccsense_ina219_read;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ read = nvkm_iccsense_ina3221_read;
+ break;
+ default:
continue;
- read = nvkm_iccsense_ina3221_read;
- break;
- default:
- continue;
+ }
+
+ rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+ if (!rail)
+ return -ENOMEM;
+
+ rail->read = read;
+ rail->sensor = sensor;
+ rail->idx = r;
+ rail->mohm = res->mohm;
+ nvkm_debug(subdev, "create rail for extdev %i: { idx: %i, mohm: %i }\n", pwr_rail->extdev_id, r, rail->mohm);
+ list_add_tail(&rail->head, &iccsense->rails);
}
-
- rail = kmalloc(sizeof(*rail), GFP_KERNEL);
- if (!rail)
- return -ENOMEM;
- sensor->rail_mask |= 1 << r->rail;
- rail->read = read;
- rail->sensor = sensor;
- rail->idx = r->rail;
- rail->mohm = r->resistor_mohm;
- list_add_tail(&rail->head, &iccsense->rails);
}
return 0;
}
@@ -329,7 +288,8 @@ nvkm_iccsense_init(struct nvkm_subdev *subdev)
return 0;
}
-struct nvkm_subdev_func iccsense_func = {
+static const struct nvkm_subdev_func
+iccsense_func = {
.oneinit = nvkm_iccsense_oneinit,
.init = nvkm_iccsense_init,
.dtor = nvkm_iccsense_dtor,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index b72c31d2f908..e90e0f6ed008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -10,7 +10,7 @@ struct nvkm_iccsense_sensor {
enum nvbios_extdev_type type;
struct i2c_adapter *i2c;
u8 addr;
- u8 rail_mask;
+ u16 config;
};
struct nvkm_iccsense_rail {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 8ed8f65ff664..10c987a654ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -104,7 +104,7 @@ nvkm_instobj_dtor(struct nvkm_memory *memory)
return iobj;
}
-const struct nvkm_memory_func
+static const struct nvkm_memory_func
nvkm_instobj_func = {
.dtor = nvkm_instobj_dtor,
.target = nvkm_instobj_target,
@@ -156,7 +156,7 @@ nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
return nvkm_wo32(iobj->parent, offset, data);
}
-const struct nvkm_memory_func
+static const struct nvkm_memory_func
nvkm_instobj_func_slow = {
.dtor = nvkm_instobj_dtor,
.target = nvkm_instobj_target,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 39c2a38e54f7..0c7ef250dcaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -47,8 +47,10 @@ nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
BUG_ON((first > limit) || (limit >= ltc->num_tags));
+ mutex_lock(&ltc->subdev.mutex);
ltc->func->cbc_clear(ltc, first, limit);
ltc->func->cbc_wait(ltc);
+ mutex_unlock(&ltc->subdev.mutex);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index c3d66ef5dc12..430a61c3df44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -34,7 +34,7 @@ g84_mc_reset[] = {
{}
};
-const struct nvkm_mc_map
+static const struct nvkm_mc_map
g84_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
{ 0x00020000, NVKM_ENGINE_VP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 21b65ee254e4..e3e2f5e83815 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -250,6 +250,10 @@ nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
}
nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+ nvkm_debug(&mxm->subdev, "module flags: %02x\n",
+ nvbios_rd08(bios, data + 0x01));
+ nvkm_debug(&mxm->subdev, "config flags: %02x\n",
+ nvbios_rd08(bios, data + 0x02));
if (mxm_shadow(mxm, ver)) {
nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index 45a2f8e784f9..9abfa5e2fe9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -23,8 +23,8 @@
*/
#include "mxms.h"
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
static u8 *
mxms_data(struct nvkm_mxm *mxm)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index db14fad2ddfc..844971e5e874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -190,8 +190,8 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm)
struct nvkm_bios *bios = subdev->device->bios;
u8 ver, hdr, cnt, len;
u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
- if (dcb == 0x0000 || ver != 0x40) {
- nvkm_debug(subdev, "unsupported DCB version\n");
+ if (dcb == 0x0000 || (ver != 0x40 && ver != 0x41)) {
+ nvkm_warn(subdev, "unsupported DCB version\n");
return;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 88b643b8664e..51fb4bf94a44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -8,3 +8,5 @@ nvkm-y += nvkm/subdev/pmu/gk110.o
nvkm-y += nvkm/subdev/pmu/gk208.o
nvkm-y += nvkm/subdev/pmu/gk20a.o
nvkm-y += nvkm/subdev/pmu/gm107.o
+nvkm-y += nvkm/subdev/pmu/gp100.o
+nvkm-y += nvkm/subdev/pmu/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 8dd164d13043..e611ce80f8ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -32,225 +32,85 @@ nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
pmu->func->pgob(pmu, enable);
}
-int
-nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
- u32 process, u32 message, u32 data0, u32 data1)
-{
- struct nvkm_subdev *subdev = &pmu->subdev;
- struct nvkm_device *device = subdev->device;
- u32 addr;
-
- mutex_lock(&subdev->mutex);
- /* wait for a free slot in the fifo */
- addr = nvkm_rd32(device, 0x10a4a0);
- if (nvkm_msec(device, 2000,
- u32 tmp = nvkm_rd32(device, 0x10a4b0);
- if (tmp != (addr ^ 8))
- break;
- ) < 0) {
- mutex_unlock(&subdev->mutex);
- return -EBUSY;
- }
-
- /* we currently only support a single process at a time waiting
- * on a synchronous reply, take the PMU mutex and tell the
- * receive handler what we're waiting for
- */
- if (reply) {
- pmu->recv.message = message;
- pmu->recv.process = process;
- }
-
- /* acquire data segment access */
- do {
- nvkm_wr32(device, 0x10a580, 0x00000001);
- } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
-
- /* write the packet */
- nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
- pmu->send.base));
- nvkm_wr32(device, 0x10a1c4, process);
- nvkm_wr32(device, 0x10a1c4, message);
- nvkm_wr32(device, 0x10a1c4, data0);
- nvkm_wr32(device, 0x10a1c4, data1);
- nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
-
- /* release data segment access */
- nvkm_wr32(device, 0x10a580, 0x00000000);
-
- /* wait for reply, if requested */
- if (reply) {
- wait_event(pmu->recv.wait, (pmu->recv.process == 0));
- reply[0] = pmu->recv.data[0];
- reply[1] = pmu->recv.data[1];
- }
-
- mutex_unlock(&subdev->mutex);
- return 0;
-}
-
static void
nvkm_pmu_recv(struct work_struct *work)
{
- struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
- struct nvkm_subdev *subdev = &pmu->subdev;
- struct nvkm_device *device = subdev->device;
- u32 process, message, data0, data1;
-
- /* nothing to do if GET == PUT */
- u32 addr = nvkm_rd32(device, 0x10a4cc);
- if (addr == nvkm_rd32(device, 0x10a4c8))
- return;
-
- /* acquire data segment access */
- do {
- nvkm_wr32(device, 0x10a580, 0x00000002);
- } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
-
- /* read the packet */
- nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
- pmu->recv.base));
- process = nvkm_rd32(device, 0x10a1c4);
- message = nvkm_rd32(device, 0x10a1c4);
- data0 = nvkm_rd32(device, 0x10a1c4);
- data1 = nvkm_rd32(device, 0x10a1c4);
- nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
-
- /* release data segment access */
- nvkm_wr32(device, 0x10a580, 0x00000000);
-
- /* wake process if it's waiting on a synchronous reply */
- if (pmu->recv.process) {
- if (process == pmu->recv.process &&
- message == pmu->recv.message) {
- pmu->recv.data[0] = data0;
- pmu->recv.data[1] = data1;
- pmu->recv.process = 0;
- wake_up(&pmu->recv.wait);
- return;
- }
- }
+ struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
+ return pmu->func->recv(pmu);
+}
- /* right now there's no other expected responses from the engine,
- * so assume that any unexpected message is an error.
- */
- nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
- (char)((process & 0x000000ff) >> 0),
- (char)((process & 0x0000ff00) >> 8),
- (char)((process & 0x00ff0000) >> 16),
- (char)((process & 0xff000000) >> 24),
- process, message, data0, data1);
+int
+nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
+ u32 process, u32 message, u32 data0, u32 data1)
+{
+ if (!pmu || !pmu->func->send)
+ return -ENODEV;
+ return pmu->func->send(pmu, reply, process, message, data0, data1);
}
static void
nvkm_pmu_intr(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- struct nvkm_device *device = pmu->subdev.device;
- u32 disp = nvkm_rd32(device, 0x10a01c);
- u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
-
- if (intr & 0x00000020) {
- u32 stat = nvkm_rd32(device, 0x10a16c);
- if (stat & 0x80000000) {
- nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
- stat & 0x00ffffff,
- nvkm_rd32(device, 0x10a168));
- nvkm_wr32(device, 0x10a16c, 0x00000000);
- intr &= ~0x00000020;
- }
- }
-
- if (intr & 0x00000040) {
- schedule_work(&pmu->recv.work);
- nvkm_wr32(device, 0x10a004, 0x00000040);
- intr &= ~0x00000040;
- }
-
- if (intr & 0x00000080) {
- nvkm_info(subdev, "wr32 %06x %08x\n",
- nvkm_rd32(device, 0x10a7a0),
- nvkm_rd32(device, 0x10a7a4));
- nvkm_wr32(device, 0x10a004, 0x00000080);
- intr &= ~0x00000080;
- }
-
- if (intr) {
- nvkm_error(subdev, "intr %08x\n", intr);
- nvkm_wr32(device, 0x10a004, intr);
- }
+ if (!pmu->func->intr)
+ return;
+ pmu->func->intr(pmu);
}
static int
nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- struct nvkm_device *device = pmu->subdev.device;
- nvkm_wr32(device, 0x10a014, 0x00000060);
+ if (pmu->func->fini)
+ pmu->func->fini(pmu);
+
flush_work(&pmu->recv.work);
return 0;
}
static int
-nvkm_pmu_init(struct nvkm_subdev *subdev)
+nvkm_pmu_reset(struct nvkm_pmu *pmu)
{
- struct nvkm_pmu *pmu = nvkm_pmu(subdev);
struct nvkm_device *device = pmu->subdev.device;
- int i;
- /* prevent previous ucode from running, wait for idle, reset */
- nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+ if (!(nvkm_rd32(device, 0x000200) & 0x00002000))
+ return 0;
+
+ /* Inhibit interrupts, and wait for idle. */
+ nvkm_wr32(device, 0x10a014, 0x0000ffff);
nvkm_msec(device, 2000,
if (!nvkm_rd32(device, 0x10a04c))
break;
);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
- nvkm_rd32(device, 0x000200);
+
+ /* Reset. */
+ pmu->func->reset(pmu);
+
+ /* Wait for IMEM/DMEM scrubbing to be complete. */
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
break;
);
- /* upload data segment */
- nvkm_wr32(device, 0x10a1c0, 0x01000000);
- for (i = 0; i < pmu->func->data.size / 4; i++)
- nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
-
- /* upload code segment */
- nvkm_wr32(device, 0x10a180, 0x01000000);
- for (i = 0; i < pmu->func->code.size / 4; i++) {
- if ((i & 0x3f) == 0)
- nvkm_wr32(device, 0x10a188, i >> 6);
- nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
- }
-
- /* start it running */
- nvkm_wr32(device, 0x10a10c, 0x00000000);
- nvkm_wr32(device, 0x10a104, 0x00000000);
- nvkm_wr32(device, 0x10a100, 0x00000002);
-
- /* wait for valid host->pmu ring configuration */
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x10a4d0))
- break;
- ) < 0)
- return -EBUSY;
- pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
- pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
+ return 0;
+}
- /* wait for valid pmu->host ring configuration */
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x10a4dc))
- break;
- ) < 0)
- return -EBUSY;
- pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
- pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
+static int
+nvkm_pmu_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ return nvkm_pmu_reset(pmu);
+}
- nvkm_wr32(device, 0x10a010, 0x000000e0);
- return 0;
+static int
+nvkm_pmu_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ int ret = nvkm_pmu_reset(pmu);
+ if (ret == 0 && pmu->func->init)
+ ret = pmu->func->init(pmu);
+ return ret;
}
static void *
@@ -262,6 +122,7 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func
nvkm_pmu = {
.dtor = nvkm_pmu_dtor,
+ .preinit = nvkm_pmu_preinit,
.init = nvkm_pmu_init,
.fini = nvkm_pmu_fini,
.intr = nvkm_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index e2faccffee6f..0bcf0b307a61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_pmu_data[] = {
+static uint32_t gf100_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
};
-uint32_t gf100_pmu_code[] = {
+static uint32_t gf100_pmu_code[] = {
0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index 2d5bdc539697..fe8905666c67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,4 +1,4 @@
-uint32_t gf119_pmu_data[] = {
+static uint32_t gf119_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
};
-uint32_t gf119_pmu_code[] = {
+static uint32_t gf119_pmu_code[] = {
0x03410ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 3c731ff12871..9cf4e6fc724e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -1,4 +1,4 @@
-uint32_t gk208_pmu_data[] = {
+static uint32_t gk208_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
};
-uint32_t gk208_pmu_code[] = {
+static uint32_t gk208_pmu_code[] = {
0x02f90ef5,
/* 0x0004: rd32 */
0xf607a040,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index e83341815ec6..5d692425b190 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_pmu_data[] = {
+static uint32_t gt215_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -916,7 +916,7 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
};
-uint32_t gt215_pmu_code[] = {
+static uint32_t gt215_pmu_code[] = {
0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index aeb8ccd891fc..0e36d4cb7201 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -30,6 +30,12 @@ gf100_pmu = {
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
.data.size = sizeof(gf100_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index fbc88d8ecd4d..0e4ba4248b15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -30,6 +30,12 @@ gf119_pmu = {
.code.size = sizeof(gf119_pmu_code),
.data.data = gf119_pmu_data,
.data.size = sizeof(gf119_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 86f9f3b13f71..2ad858d825ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -109,6 +109,12 @@ gk104_pmu = {
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
.data.size = sizeof(gk104_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
.pgob = gk104_pmu_pgob,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index ae255247c9d1..fc4b8ecfdaeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -88,6 +88,12 @@ gk110_pmu = {
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
.data.size = sizeof(gk110_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
.pgob = gk110_pmu_pgob,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index 3b4917637902..e9a91277683a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -30,6 +30,12 @@ gk208_pmu = {
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
.data.size = sizeof(gk208_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
.pgob = gk110_pmu_pgob,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 31b8692b4641..9a248ed75f09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -32,6 +32,12 @@ gm107_pmu = {
.code.size = sizeof(gm107_pmu_code),
.data.data = gm107_pmu_data,
.data.size = sizeof(gm107_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
new file mode 100644
index 000000000000..6c41c20c85a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static const struct nvkm_pmu_func
+gp100_pmu = {
+ .reset = gt215_pmu_reset,
+};
+
+int
+gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
new file mode 100644
index 000000000000..f017352206c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gp102_pmu_reset(struct nvkm_pmu *pmu)
+{
+ struct nvkm_device *device = pmu->subdev.device;
+ nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000000);
+}
+
+static const struct nvkm_pmu_func
+gp102_pmu = {
+ .reset = gp102_pmu_reset,
+};
+
+int
+gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 8ba7fa4ca75b..90d428b3be97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -24,21 +24,229 @@
#include "priv.h"
#include "fuc/gt215.fuc3.h"
-static void
+#include <subdev/timer.h>
+
+int
+gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
+ u32 process, u32 message, u32 data0, u32 data1)
+{
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 addr;
+
+ mutex_lock(&subdev->mutex);
+ /* wait for a free slot in the fifo */
+ addr = nvkm_rd32(device, 0x10a4a0);
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x10a4b0);
+ if (tmp != (addr ^ 8))
+ break;
+ ) < 0) {
+ mutex_unlock(&subdev->mutex);
+ return -EBUSY;
+ }
+
+ /* we currently only support a single process at a time waiting
+ * on a synchronous reply, take the PMU mutex and tell the
+ * receive handler what we're waiting for
+ */
+ if (reply) {
+ pmu->recv.message = message;
+ pmu->recv.process = process;
+ }
+
+ /* acquire data segment access */
+ do {
+ nvkm_wr32(device, 0x10a580, 0x00000001);
+ } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
+
+ /* write the packet */
+ nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+ pmu->send.base));
+ nvkm_wr32(device, 0x10a1c4, process);
+ nvkm_wr32(device, 0x10a1c4, message);
+ nvkm_wr32(device, 0x10a1c4, data0);
+ nvkm_wr32(device, 0x10a1c4, data1);
+ nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nvkm_wr32(device, 0x10a580, 0x00000000);
+
+ /* wait for reply, if requested */
+ if (reply) {
+ wait_event(pmu->recv.wait, (pmu->recv.process == 0));
+ reply[0] = pmu->recv.data[0];
+ reply[1] = pmu->recv.data[1];
+ }
+
+ mutex_unlock(&subdev->mutex);
+ return 0;
+}
+
+void
+gt215_pmu_recv(struct nvkm_pmu *pmu)
+{
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 process, message, data0, data1;
+
+ /* nothing to do if GET == PUT */
+ u32 addr = nvkm_rd32(device, 0x10a4cc);
+ if (addr == nvkm_rd32(device, 0x10a4c8))
+ return;
+
+ /* acquire data segment access */
+ do {
+ nvkm_wr32(device, 0x10a580, 0x00000002);
+ } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
+
+ /* read the packet */
+ nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+ pmu->recv.base));
+ process = nvkm_rd32(device, 0x10a1c4);
+ message = nvkm_rd32(device, 0x10a1c4);
+ data0 = nvkm_rd32(device, 0x10a1c4);
+ data1 = nvkm_rd32(device, 0x10a1c4);
+ nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
+
+ /* release data segment access */
+ nvkm_wr32(device, 0x10a580, 0x00000000);
+
+ /* wake process if it's waiting on a synchronous reply */
+ if (pmu->recv.process) {
+ if (process == pmu->recv.process &&
+ message == pmu->recv.message) {
+ pmu->recv.data[0] = data0;
+ pmu->recv.data[1] = data1;
+ pmu->recv.process = 0;
+ wake_up(&pmu->recv.wait);
+ return;
+ }
+ }
+
+ /* right now there's no other expected responses from the engine,
+ * so assume that any unexpected message is an error.
+ */
+ nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
+ (char)((process & 0x000000ff) >> 0),
+ (char)((process & 0x0000ff00) >> 8),
+ (char)((process & 0x00ff0000) >> 16),
+ (char)((process & 0xff000000) >> 24),
+ process, message, data0, data1);
+}
+
+void
+gt215_pmu_intr(struct nvkm_pmu *pmu)
+{
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 disp = nvkm_rd32(device, 0x10a01c);
+ u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
+
+ if (intr & 0x00000020) {
+ u32 stat = nvkm_rd32(device, 0x10a16c);
+ if (stat & 0x80000000) {
+ nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
+ stat & 0x00ffffff,
+ nvkm_rd32(device, 0x10a168));
+ nvkm_wr32(device, 0x10a16c, 0x00000000);
+ intr &= ~0x00000020;
+ }
+ }
+
+ if (intr & 0x00000040) {
+ schedule_work(&pmu->recv.work);
+ nvkm_wr32(device, 0x10a004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+
+ if (intr & 0x00000080) {
+ nvkm_info(subdev, "wr32 %06x %08x\n",
+ nvkm_rd32(device, 0x10a7a0),
+ nvkm_rd32(device, 0x10a7a4));
+ nvkm_wr32(device, 0x10a004, 0x00000080);
+ intr &= ~0x00000080;
+ }
+
+ if (intr) {
+ nvkm_error(subdev, "intr %08x\n", intr);
+ nvkm_wr32(device, 0x10a004, intr);
+ }
+}
+
+void
+gt215_pmu_fini(struct nvkm_pmu *pmu)
+{
+ nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
+}
+
+void
gt215_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
- nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
- nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+ nvkm_rd32(device, 0x000200);
+}
+
+int
+gt215_pmu_init(struct nvkm_pmu *pmu)
+{
+ struct nvkm_device *device = pmu->subdev.device;
+ int i;
+
+ /* upload data segment */
+ nvkm_wr32(device, 0x10a1c0, 0x01000000);
+ for (i = 0; i < pmu->func->data.size / 4; i++)
+ nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
+
+ /* upload code segment */
+ nvkm_wr32(device, 0x10a180, 0x01000000);
+ for (i = 0; i < pmu->func->code.size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nvkm_wr32(device, 0x10a188, i >> 6);
+ nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
+ }
+
+ /* start it running */
+ nvkm_wr32(device, 0x10a10c, 0x00000000);
+ nvkm_wr32(device, 0x10a104, 0x00000000);
+ nvkm_wr32(device, 0x10a100, 0x00000002);
+
+ /* wait for valid host->pmu ring configuration */
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a4d0))
+ break;
+ ) < 0)
+ return -EBUSY;
+ pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
+ pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
+
+ /* wait for valid pmu->host ring configuration */
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a4dc))
+ break;
+ ) < 0)
+ return -EBUSY;
+ pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
+ pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
+
+ nvkm_wr32(device, 0x10a010, 0x000000e0);
+ return 0;
}
static const struct nvkm_pmu_func
gt215_pmu = {
- .reset = gt215_pmu_reset,
.code.data = gt215_pmu_code,
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
.data.size = sizeof(gt215_pmu_data),
+ .reset = gt215_pmu_reset,
+ .init = gt215_pmu_init,
+ .fini = gt215_pmu_fini,
+ .intr = gt215_pmu_intr,
+ .send = gt215_pmu_send,
+ .recv = gt215_pmu_recv,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index f38c88fae3d6..2e2179a4ad17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -8,8 +8,6 @@ int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
int index, struct nvkm_pmu **);
struct nvkm_pmu_func {
- void (*reset)(struct nvkm_pmu *);
-
struct {
u32 *data;
u32 size;
@@ -20,8 +18,22 @@ struct nvkm_pmu_func {
u32 size;
} data;
+ void (*reset)(struct nvkm_pmu *);
+ int (*init)(struct nvkm_pmu *);
+ void (*fini)(struct nvkm_pmu *);
+ void (*intr)(struct nvkm_pmu *);
+ int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process,
+ u32 message, u32 data0, u32 data1);
+ void (*recv)(struct nvkm_pmu *);
void (*pgob)(struct nvkm_pmu *, bool);
};
+void gt215_pmu_reset(struct nvkm_pmu *);
+int gt215_pmu_init(struct nvkm_pmu *);
+void gt215_pmu_fini(struct nvkm_pmu *);
+void gt215_pmu_intr(struct nvkm_pmu *);
+void gt215_pmu_recv(struct nvkm_pmu *);
+int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
+
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index f1e2dc914366..ec48e4ace37a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -1364,7 +1364,7 @@ gm200_secboot_init(struct nvkm_secboot *sb)
return 0;
}
-int
+static int
gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index c34076223b7b..bcd179ba11d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -1,6 +1,7 @@
nvkm-y += nvkm/subdev/volt/base.o
nvkm-y += nvkm/subdev/volt/gpio.o
nvkm-y += nvkm/subdev/volt/nv40.o
+nvkm-y += nvkm/subdev/volt/gf100.o
nvkm-y += nvkm/subdev/volt/gk104.o
nvkm-y += nvkm/subdev/volt/gk20a.o
nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 1c3d23b0e84a..e344901cfdc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/vmap.h>
#include <subdev/bios/volt.h>
+#include <subdev/therm.h>
int
nvkm_volt_get(struct nvkm_volt *volt)
@@ -50,33 +51,45 @@ static int
nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
{
struct nvkm_subdev *subdev = &volt->subdev;
- int i, ret = -EINVAL;
+ int i, ret = -EINVAL, best_err = volt->max_uv, best = -1;
if (volt->func->volt_set)
return volt->func->volt_set(volt, uv);
for (i = 0; i < volt->vid_nr; i++) {
- if (volt->vid[i].uv == uv) {
- ret = volt->func->vid_set(volt, volt->vid[i].vid);
- nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+ int err = volt->vid[i].uv - uv;
+ if (err < 0 || err > best_err)
+ continue;
+
+ best_err = err;
+ best = i;
+ if (best_err == 0)
break;
- }
}
+
+ if (best == -1) {
+ nvkm_error(subdev, "couldn't set %iuv\n", uv);
+ return ret;
+ }
+
+ ret = volt->func->vid_set(volt, volt->vid[best].vid);
+ nvkm_debug(subdev, "set req %duv to %duv: %d\n", uv,
+ volt->vid[best].uv, ret);
return ret;
}
-static int
-nvkm_volt_map(struct nvkm_volt *volt, u8 id)
+int
+nvkm_volt_map_min(struct nvkm_volt *volt, u8 id)
{
struct nvkm_bios *bios = volt->subdev.device->bios;
struct nvbios_vmap_entry info;
u8 ver, len;
- u16 vmap;
+ u32 vmap;
vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
if (vmap) {
if (info.link != 0xff) {
- int ret = nvkm_volt_map(volt, info.link);
+ int ret = nvkm_volt_map_min(volt, info.link);
if (ret < 0)
return ret;
info.min += ret;
@@ -88,19 +101,79 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
}
int
-nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp)
+{
+ struct nvkm_bios *bios = volt->subdev.device->bios;
+ struct nvbios_vmap_entry info;
+ u8 ver, len;
+ u32 vmap;
+
+ vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
+ if (vmap) {
+ s64 result;
+
+ if (volt->speedo < 0)
+ return volt->speedo;
+
+ if (ver == 0x10 || (ver == 0x20 && info.mode == 0)) {
+ result = div64_s64((s64)info.arg[0], 10);
+ result += div64_s64((s64)info.arg[1] * volt->speedo, 10);
+ result += div64_s64((s64)info.arg[2] * volt->speedo * volt->speedo, 100000);
+ } else if (ver == 0x20) {
+ switch (info.mode) {
+ /* 0x0 handled above! */
+ case 0x1:
+ result = ((s64)info.arg[0] * 15625) >> 18;
+ result += ((s64)info.arg[1] * volt->speedo * 15625) >> 18;
+ result += ((s64)info.arg[2] * temp * 15625) >> 10;
+ result += ((s64)info.arg[3] * volt->speedo * temp * 15625) >> 18;
+ result += ((s64)info.arg[4] * volt->speedo * volt->speedo * 15625) >> 30;
+ result += ((s64)info.arg[5] * temp * temp * 15625) >> 18;
+ break;
+ case 0x3:
+ result = (info.min + info.max) / 2;
+ break;
+ case 0x2:
+ default:
+ result = info.min;
+ break;
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ result = min(max(result, (s64)info.min), (s64)info.max);
+
+ if (info.link != 0xff) {
+ int ret = nvkm_volt_map(volt, info.link, temp);
+ if (ret < 0)
+ return ret;
+ result += ret;
+ }
+ return result;
+ }
+
+ return id ? id * 10000 : -ENODEV;
+}
+
+int
+nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, u8 min_id, u8 temp,
+ int condition)
{
int ret;
if (volt->func->set_id)
return volt->func->set_id(volt, id, condition);
- ret = nvkm_volt_map(volt, id);
+ ret = nvkm_volt_map(volt, id, temp);
if (ret >= 0) {
int prev = nvkm_volt_get(volt);
if (!condition || prev < 0 ||
(condition < 0 && ret < prev) ||
(condition > 0 && ret > prev)) {
+ int min = nvkm_volt_map(volt, min_id, temp);
+ if (min >= 0)
+ ret = max(min, ret);
ret = nvkm_volt_set(volt, ret);
} else {
ret = 0;
@@ -112,14 +185,16 @@ nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
static void
nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_volt_entry ivid;
struct nvbios_volt info;
u8 ver, hdr, cnt, len;
- u16 data;
+ u32 data;
int i;
data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
- if (data && info.vidmask && info.base && info.step) {
+ if (data && info.vidmask && info.base && info.step && info.ranged) {
+ nvkm_debug(subdev, "found ranged based VIDs\n");
volt->min_uv = info.min;
volt->max_uv = info.max;
for (i = 0; i < info.vidmask + 1; i++) {
@@ -132,7 +207,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
info.base += info.step;
}
volt->vid_mask = info.vidmask;
- } else if (data && info.vidmask) {
+ } else if (data && info.vidmask && !info.ranged) {
+ nvkm_debug(subdev, "found entry based VIDs\n");
volt->min_uv = 0xffffffff;
volt->max_uv = 0;
for (i = 0; i < cnt; i++) {
@@ -154,6 +230,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
}
static int
+nvkm_volt_speedo_read(struct nvkm_volt *volt)
+{
+ if (volt->func->speedo_read)
+ return volt->func->speedo_read(volt);
+ return -EINVAL;
+}
+
+static int
nvkm_volt_init(struct nvkm_subdev *subdev)
{
struct nvkm_volt *volt = nvkm_volt(subdev);
@@ -167,6 +251,21 @@ nvkm_volt_init(struct nvkm_subdev *subdev)
return 0;
}
+static int
+nvkm_volt_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_volt *volt = nvkm_volt(subdev);
+
+ volt->speedo = nvkm_volt_speedo_read(volt);
+ if (volt->speedo > 0)
+ nvkm_debug(&volt->subdev, "speedo %x\n", volt->speedo);
+
+ if (volt->func->oneinit)
+ return volt->func->oneinit(volt);
+
+ return 0;
+}
+
static void *
nvkm_volt_dtor(struct nvkm_subdev *subdev)
{
@@ -177,6 +276,7 @@ static const struct nvkm_subdev_func
nvkm_volt = {
.dtor = nvkm_volt_dtor,
.init = nvkm_volt_init,
+ .oneinit = nvkm_volt_oneinit,
};
void
@@ -191,9 +291,22 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
/* Assuming the non-bios device should build the voltage table later */
if (bios) {
+ u8 ver, hdr, cnt, len;
+ struct nvbios_vmap vmap;
+
nvkm_volt_parse_bios(bios, volt);
nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
volt->min_uv, volt->max_uv);
+
+ if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) {
+ volt->max0_id = vmap.max0;
+ volt->max1_id = vmap.max1;
+ volt->max2_id = vmap.max2;
+ } else {
+ volt->max0_id = 0xff;
+ volt->max1_id = 0xff;
+ volt->max2_id = 0xff;
+ }
}
if (volt->vid_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
new file mode 100644
index 000000000000..d9ed6925ca64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include "priv.h"
+
+#include <subdev/fuse.h>
+
+static int
+gf100_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+
+ if (!fuse)
+ return -EINVAL;
+
+ return nvkm_fuse_read(fuse, 0x1cc);
+}
+
+int
+gf100_volt_oneinit(struct nvkm_volt *volt)
+{
+ struct nvkm_subdev *subdev = &volt->subdev;
+ if (volt->speedo <= 0)
+ nvkm_error(subdev, "couldn't find speedo value, volting not "
+ "possible\n");
+ return 0;
+}
+
+static const struct nvkm_volt_func
+gf100_volt = {
+ .oneinit = gf100_volt_oneinit,
+ .vid_get = nvkm_voltgpio_get,
+ .vid_set = nvkm_voltgpio_set,
+ .speedo_read = gf100_volt_speedo_read,
+};
+
+int
+gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+ struct nvkm_volt *volt;
+ int ret;
+
+ ret = nvkm_volt_new_(&gf100_volt, device, index, &volt);
+ *pvolt = volt;
+ if (ret)
+ return ret;
+
+ return nvkm_voltgpio_init(volt);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index 420bd84d8483..1c744e029454 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -27,6 +27,7 @@
#include <subdev/gpio.h>
#include <subdev/bios.h>
#include <subdev/bios/volt.h>
+#include <subdev/fuse.h>
#define gk104_volt(p) container_of((p), struct gk104_volt, base)
struct gk104_volt {
@@ -34,7 +35,7 @@ struct gk104_volt {
struct nvbios_volt bios;
};
-int
+static int
gk104_volt_get(struct nvkm_volt *base)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
@@ -47,7 +48,7 @@ gk104_volt_get(struct nvkm_volt *base)
return bios->base + bios->pwm_range * duty / div;
}
-int
+static int
gk104_volt_set(struct nvkm_volt *base, u32 uv)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
@@ -64,13 +65,33 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
return 0;
}
+static int
+gk104_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+ int ret;
+
+ if (!fuse)
+ return -EINVAL;
+
+ nvkm_wr32(device, 0x122634, 0x0);
+ ret = nvkm_fuse_read(fuse, 0x3a8);
+ nvkm_wr32(device, 0x122634, 0x41);
+ return ret;
+}
+
static const struct nvkm_volt_func
gk104_volt_pwm = {
+ .oneinit = gf100_volt_oneinit,
.volt_get = gk104_volt_get,
.volt_set = gk104_volt_set,
+ .speedo_read = gk104_volt_speedo_read,
}, gk104_volt_gpio = {
+ .oneinit = gf100_volt_oneinit,
.vid_get = nvkm_voltgpio_get,
.vid_set = nvkm_voltgpio_set,
+ .speedo_read = gk104_volt_speedo_read,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 74db4d28930f..2925b9cae681 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -25,7 +25,7 @@
#include <core/tegra.h>
-const struct cvb_coef gm20b_cvb_coef[] = {
+static const struct cvb_coef gm20b_cvb_coef[] = {
/* KHz, c0, c1, c2 */
/* 76800 */ { 1786666, -85625, 1632 },
/* 153600 */ { 1846729, -87525, 1632 },
@@ -58,7 +58,7 @@ static const struct cvb_coef gm20b_na_cvb_coef[] = {
/* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
};
-const u32 speedo_to_vmin[] = {
+static const u32 speedo_to_vmin[] = {
/* 0, 1, 2, 3, 4, */
950000, 840000, 818750, 840000, 810000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index d2bac1d77819..443c031b966b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
+#include "priv.h"
static const u8 tags[] = {
DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index d5140d991161..354bafe4b4e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -9,11 +9,13 @@ int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
int index, struct nvkm_volt **);
struct nvkm_volt_func {
+ int (*oneinit)(struct nvkm_volt *);
int (*volt_get)(struct nvkm_volt *);
int (*volt_set)(struct nvkm_volt *, u32 uv);
int (*vid_get)(struct nvkm_volt *);
int (*vid_set)(struct nvkm_volt *, u8 vid);
int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+ int (*speedo_read)(struct nvkm_volt *);
};
int nvkm_voltgpio_init(struct nvkm_volt *);
@@ -23,4 +25,6 @@ int nvkm_voltgpio_set(struct nvkm_volt *, u8);
int nvkm_voltpwm_init(struct nvkm_volt *volt);
int nvkm_voltpwm_get(struct nvkm_volt *volt);
int nvkm_voltpwm_set(struct nvkm_volt *volt, u32 uv);
+
+int gf100_volt_oneinit(struct nvkm_volt *);
#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 3485d1ecd655..aaa8a58390f1 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -24,23 +24,24 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
bool invert_polarity;
};
-static const struct omap_video_timings tvc_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+static const struct videomode tvc_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct of_device_id tvc_of_match[];
@@ -92,7 +93,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
if (!ddata->dev->of_node) {
in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
@@ -126,32 +127,32 @@ static void tvc_disable(struct omap_dss_device *dssdev)
}
static void tvc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void tvc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tvc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static u32 tvc_get_wss(struct omap_dss_device *dssdev)
@@ -253,14 +254,14 @@ static int tvc_probe(struct platform_device *pdev)
return -ENODEV;
}
- ddata->timings = tvc_pal_timings;
+ ddata->vm = tvc_pal_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &tvc_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = tvc_pal_timings;
+ dssdev->panel.vm = tvc_pal_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 684b7aeda411..d6875d9fcefa 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -19,32 +19,30 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings dvic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode dvic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 23500000,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
+ .hfront_porch = 48,
+ .hsync_len = 32,
+ .hback_porch = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
+ .vfront_porch = 3,
+ .vsync_len = 4,
+ .vback_porch = 7,
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
+ DISPLAY_FLAGS_SYNC_NEGEDGE | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct i2c_adapter *i2c_adapter;
};
@@ -90,7 +88,7 @@ static int dvic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dvi->set_timings(in, &ddata->timings);
+ in->ops.dvi->set_timings(in, &ddata->vm);
r = in->ops.dvi->enable(in);
if (r)
@@ -115,32 +113,32 @@ static void dvic_disable(struct omap_dss_device *dssdev)
}
static void dvic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dvi->set_timings(in, timings);
+ in->ops.dvi->set_timings(in, vm);
}
static void dvic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int dvic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dvi->check_timings(in, timings);
+ return in->ops.dvi->check_timings(in, vm);
}
static int dvic_ddc_read(struct i2c_adapter *adapter,
@@ -287,14 +285,14 @@ static int dvic_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings = dvic_default_timings;
+ ddata->vm = dvic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &dvic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = dvic_default_timings;
+ dssdev->panel.vm = dvic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 7bdf83af9797..1ef130641bae 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -21,21 +21,18 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings hdmic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode hdmic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 25175000,
- .hsw = 96,
- .hfp = 16,
- .hbp = 48,
- .vsw = 2,
- .vfp = 11,
- .vbp = 31,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .interlace = false,
+ .hsync_len = 96,
+ .hfront_porch = 16,
+ .hback_porch = 48,
+ .vsync_len = 2,
+ .vfront_porch = 11,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
struct panel_drv_data {
@@ -44,7 +41,7 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
int hpd_gpio;
};
@@ -96,7 +93,7 @@ static int hdmic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -123,32 +120,32 @@ static void hdmic_disable(struct omap_dss_device *dssdev)
}
static void hdmic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void hdmic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int hdmic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.hdmi->check_timings(in, timings);
+ return in->ops.hdmi->check_timings(in, vm);
}
static int hdmic_read_edid(struct omap_dss_device *dssdev,
@@ -259,14 +256,14 @@ static int hdmic_probe(struct platform_device *pdev)
goto err_reg;
}
- ddata->timings = hdmic_default_timings;
+ ddata->vm = hdmic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &hdmic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = hdmic_default_timings;
+ dssdev->panel.vm = hdmic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index fe4e7ec3bab0..f7a5731492d0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -27,7 +27,7 @@ struct panel_drv_data {
struct gpio_desc *enable_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -90,7 +90,7 @@ static int opa362_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
r = in->ops.atv->enable(in);
if (r)
@@ -123,38 +123,38 @@ static void opa362_disable(struct omap_dss_device *dssdev)
}
static void opa362_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "set_timings\n");
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void opa362_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
dev_dbg(dssdev->dev, "get_timings\n");
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int opa362_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "check_timings\n");
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static void opa362_set_type(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index d768217cefe0..13e32d02c884 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -24,7 +24,7 @@ struct panel_drv_data {
int pd_gpio;
int data_lines;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -81,7 +81,7 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->timings);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
@@ -113,44 +113,43 @@ static void tfp410_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tfp410_fix_timings(struct omap_video_timings *timings)
+static void tfp410_fix_timings(struct videomode *vm)
{
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ vm->flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE;
}
static void tfp410_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tfp410_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tfp410_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static const struct omapdss_dvi_ops tfp410_dvi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 46855c8f5cbf..58276a48112e 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
struct gpio_desc *ls_oe_gpio;
struct gpio_desc *hpd_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -80,7 +80,7 @@ static int tpd_enable(struct omap_dss_device *dssdev)
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -105,33 +105,33 @@ static void tpd_disable(struct omap_dss_device *dssdev)
}
static void tpd_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void tpd_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tpd_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
int r;
- r = in->ops.hdmi->check_timings(in, timings);
+ r = in->ops.hdmi->check_timings(in, vm);
return r;
}
@@ -234,25 +234,30 @@ static int tpd_probe(struct platform_device *pdev)
if (r)
return r;
-
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err_gpio;
+ }
ddata->ct_cp_hpd_gpio = gpio;
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err_gpio;
+ }
ddata->ls_oe_gpio = gpio;
gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
GPIOD_IN);
- if (IS_ERR(gpio))
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err_gpio;
+ }
ddata->hpd_gpio = gpio;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 7f16f985ab22..38003208d9ca 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -28,7 +28,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
/* used for non-DT boot, to be removed */
int backlight_gpio;
@@ -80,7 +80,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -122,32 +122,32 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
}
static void panel_dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int panel_dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver panel_dpi_ops = {
@@ -169,7 +169,6 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
const struct panel_dpi_platform_data *pdata;
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev, *in;
- struct videomode vm;
int r;
pdata = dev_get_platdata(&pdev->dev);
@@ -185,8 +184,7 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
ddata->data_lines = pdata->data_lines;
- videomode_from_timing(pdata->display_timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(pdata->display_timing, &ddata->vm);
dssdev = &ddata->dssdev;
dssdev->name = pdata->name;
@@ -214,7 +212,6 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
struct omap_dss_device *in;
int r;
struct display_timing timing;
- struct videomode vm;
struct gpio_desc *gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
@@ -245,8 +242,7 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
return r;
}
- videomode_from_timing(&timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(&timing, &ddata->vm);
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
@@ -295,7 +291,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->driver = &panel_dpi_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index b1f3b818edf4..dc026a843712 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -42,7 +42,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct platform_device *pdev;
@@ -382,8 +382,8 @@ static const struct backlight_ops dsicm_bl_ops = {
static void dsicm_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
static ssize_t dsicm_num_errors_show(struct device *dev,
@@ -589,7 +589,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
struct omap_dss_dsi_config dsi_config = {
.mode = OMAP_DSS_DSI_CMD_MODE,
.pixel_format = OMAP_DSS_DSI_FMT_RGB888,
- .timings = &ddata->timings,
+ .vm = &ddata->vm,
.hs_clk_min = 150000000,
.hs_clk_max = 300000000,
.lp_clk_min = 7000000,
@@ -892,8 +892,8 @@ static int dsicm_update(struct omap_dss_device *dssdev,
/* XXX no need to send this every frame, but dsi break if not done */
r = dsicm_set_update_window(ddata, 0, 0,
- dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
+ dssdev->panel.vm.hactive,
+ dssdev->panel.vm.vactive);
if (r)
goto err;
@@ -1023,9 +1023,8 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
goto err1;
}
- size = min(w * h * 3,
- dssdev->panel.timings.x_res *
- dssdev->panel.timings.y_res * 3);
+ size = min((u32)w * h * 3,
+ dssdev->panel.vm.hactive * dssdev->panel.vm.vactive * 3);
in->ops.dsi->bus_lock(in);
@@ -1186,14 +1185,14 @@ static int dsicm_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings.x_res = 864;
- ddata->timings.y_res = 480;
- ddata->timings.pixelclock = 864 * 480 * 60;
+ ddata->vm.hactive = 864;
+ ddata->vm.vactive = 480;
+ ddata->vm.pixelclock = 864 * 480 * 60;
dssdev = &ddata->dssdev;
dssdev->dev = dev;
dssdev->driver = &dsicm_ops;
- dssdev->panel.timings = ddata->timings;
+ dssdev->panel.vm = ddata->vm;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->owner = THIS_MODULE;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 6dfb96cea293..43d21edb51f5 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -19,25 +19,28 @@
#include "../dss/omapdss.h"
-static struct omap_video_timings lb035q02_timings = {
- .x_res = 320,
- .y_res = 240,
+static struct videomode lb035q02_vm = {
+ .hactive = 320,
+ .vactive = 240,
.pixelclock = 6500000,
- .hsw = 2,
- .hfp = 20,
- .hbp = 68,
-
- .vsw = 2,
- .vfp = 4,
- .vbp = 18,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 20,
+ .hback_porch = 68,
+
+ .vsync_len = 2,
+ .vfront_porch = 4,
+ .vback_porch = 18,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DE is active LOW
+ * DATA needs to be driven on the FALLING edge
+ */
};
struct panel_drv_data {
@@ -48,7 +51,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *enable_gpio;
};
@@ -158,7 +161,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -189,32 +192,32 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
}
static void lb035q02_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void lb035q02_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int lb035q02_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver lb035q02_ops = {
@@ -278,14 +281,14 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = lb035q02_timings;
+ ddata->vm = lb035q02_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 9f3d6f48f3e1..2de27ba01552 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -23,7 +23,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -65,22 +65,20 @@ static const struct {
{ 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
};
-static const struct omap_video_timings nec_8048_panel_timings = {
- .x_res = LCD_XRES,
- .y_res = LCD_YRES,
+static const struct videomode nec_8048_panel_vm = {
+ .hactive = LCD_XRES,
+ .vactive = LCD_YRES,
.pixelclock = LCD_PIXEL_CLOCK,
- .hfp = 6,
- .hsw = 1,
- .hbp = 4,
- .vfp = 3,
- .vsw = 1,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 6,
+ .hsync_len = 1,
+ .hback_porch = 4,
+ .vfront_porch = 3,
+ .vsync_len = 1,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -157,7 +155,7 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -188,32 +186,32 @@ static void nec_8048_disable(struct omap_dss_device *dssdev)
}
static void nec_8048_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void nec_8048_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int nec_8048_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver nec_8048_ops = {
@@ -306,14 +304,14 @@ static int nec_8048_probe(struct spi_device *spi)
goto err_gpio;
}
- ddata->videomode = nec_8048_panel_timings;
+ ddata->vm = nec_8048_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 3d3efc561ea9..04fe235b7cac 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
struct gpio_desc *ini_gpio; /* high = power on */
@@ -35,25 +35,27 @@ struct panel_drv_data {
struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
};
-static const struct omap_video_timings sharp_ls_timings = {
- .x_res = 480,
- .y_res = 640,
+static const struct videomode sharp_ls_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 19200000,
- .hsw = 2,
- .hfp = 1,
- .hbp = 28,
-
- .vsw = 1,
- .vfp = 1,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 1,
+ .hback_porch = 28,
+
+ .vsync_len = 1,
+ .vfront_porch = 1,
+ .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DATA needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -99,7 +101,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
@@ -154,32 +156,32 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
}
static void sharp_ls_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int sharp_ls_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver sharp_ls_ops = {
@@ -279,14 +281,14 @@ static int sharp_ls_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->videomode = sharp_ls_timings;
+ ddata->vm = sharp_ls_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
dssdev->driver = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 3557a4c7dd7b..746cb8d9cba1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -71,7 +71,7 @@ struct panel_drv_data {
int reset_gpio;
int datapairs;
- struct omap_video_timings videomode;
+ struct videomode vm;
char *name;
int enabled;
@@ -92,23 +92,20 @@ struct panel_drv_data {
struct backlight_device *bl_dev;
};
-static const struct omap_video_timings acx565akm_panel_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode acx565akm_panel_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 24000000,
- .hfp = 28,
- .hsw = 4,
- .hbp = 24,
- .vfp = 3,
- .vsw = 3,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hfront_porch = 28,
+ .hsync_len = 4,
+ .hback_porch = 24,
+ .vfront_porch = 3,
+ .vsync_len = 3,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -548,7 +545,7 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- in->ops.sdi->set_timings(in, &ddata->videomode);
+ in->ops.sdi->set_timings(in, &ddata->vm);
if (ddata->datapairs > 0)
in->ops.sdi->set_datapairs(in, ddata->datapairs);
@@ -662,32 +659,32 @@ static void acx565akm_disable(struct omap_dss_device *dssdev)
}
static void acx565akm_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.sdi->set_timings(in, timings);
+ in->ops.sdi->set_timings(in, vm);
}
static void acx565akm_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int acx565akm_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.sdi->check_timings(in, timings);
+ return in->ops.sdi->check_timings(in, vm);
}
static struct omap_dss_driver acx565akm_ops = {
@@ -845,14 +842,14 @@ static int acx565akm_probe(struct spi_device *spi)
acx565akm_bl_update_status(bldev);
- ddata->videomode = acx565akm_panel_timings;
+ ddata->vm = acx565akm_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index e859b3f893f7..f313dbfcbacb 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -37,28 +37,29 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct spi_device *spi_dev;
};
-static struct omap_video_timings td028ttec1_panel_timings = {
- .x_res = 480,
- .y_res = 640,
+static struct videomode td028ttec1_panel_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 22153000,
- .hfp = 24,
- .hsw = 8,
- .hbp = 8,
- .vfp = 4,
- .vsw = 2,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 24,
+ .hsync_len = 8,
+ .hback_porch = 8,
+ .vfront_porch = 4,
+ .vsync_len = 2,
+ .vback_porch = 2,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define JBT_COMMAND 0x000
@@ -208,7 +209,7 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -325,32 +326,32 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
}
static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver td028ttec1_ops = {
@@ -414,14 +415,14 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = td028ttec1_panel_timings;
+ ddata->vm = td028ttec1_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 66c6bbe6472b..0787dba44faa 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -56,7 +56,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -72,25 +72,27 @@ struct panel_drv_data {
u32 power_on_resume:1;
};
-static const struct omap_video_timings tpo_td043_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode tpo_td043_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 36000000,
- .hsw = 1,
- .hfp = 68,
- .hbp = 214,
+ .hsync_len = 1,
+ .hfront_porch = 68,
+ .hback_porch = 214,
- .vsw = 1,
- .vfp = 39,
- .vbp = 34,
+ .vsync_len = 1,
+ .vfront_porch = 39,
+ .vback_porch = 34,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -378,7 +380,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -418,32 +420,32 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
}
static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver tpo_td043_ops = {
@@ -546,14 +548,14 @@ static int tpo_td043_probe(struct spi_device *spi)
goto err_sysfs;
}
- ddata->videomode = tpo_td043_timings;
+ ddata->vm = tpo_td043_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 535240fba671..c839f6456db2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -75,7 +75,7 @@ struct dispc_features {
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -1679,7 +1679,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
- bool chroma_upscale = plane != OMAP_DSS_WB ? true : false;
+ bool chroma_upscale = plane != OMAP_DSS_WB;
if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
@@ -2179,7 +2179,7 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
* undocumented horizontal position and timing related limitations.
*/
static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *t, u16 pos_x,
+ const struct videomode *vm, u16 pos_x,
u16 width, u16 height, u16 out_width, u16 out_height,
bool five_taps)
{
@@ -2189,14 +2189,16 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
u64 val, blank;
int i;
- nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+ nonactive = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch - out_width;
i = 0;
if (out_height < height)
i++;
if (out_width < width)
i++;
- blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+ blank = div_u64((u64)(vm->hback_porch + vm->hsync_len + vm->hfront_porch) *
+ lclk, pclk);
DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
if (blank <= limits[i])
return -EINVAL;
@@ -2231,7 +2233,7 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
}
static unsigned long calc_core_clk_five_taps(unsigned long pclk,
- const struct omap_video_timings *mgr_timings, u16 width,
+ const struct videomode *vm, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
@@ -2242,7 +2244,7 @@ static unsigned long calc_core_clk_five_taps(unsigned long pclk,
return (unsigned long) pclk;
if (height > out_height) {
- unsigned int ppl = mgr_timings->x_res;
+ unsigned int ppl = vm->hactive;
tmp = (u64)pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
@@ -2324,7 +2326,7 @@ static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
}
static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2370,7 +2372,7 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
}
static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2392,7 +2394,7 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
again:
if (*five_taps)
- *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
+ *core_clk = calc_core_clk_five_taps(pclk, vm,
in_width, in_height, out_width,
out_height, color_mode);
else
@@ -2400,7 +2402,7 @@ again:
in_height, out_width, out_height,
mem_to_mem);
- error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
+ error = check_horiz_timing_omap3(pclk, lclk, vm,
pos_x, in_width, in_height, out_width,
out_height, *five_taps);
if (error && *five_taps) {
@@ -2435,7 +2437,7 @@ again:
return -EINVAL;
}
- if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, in_width,
+ if (check_horiz_timing_omap3(pclk, lclk, vm, pos_x, in_width,
in_height, out_width, out_height, *five_taps)) {
DSSERR("horizontal timing too tight\n");
return -EINVAL;
@@ -2455,7 +2457,7 @@ again:
}
static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2501,7 +2503,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
enum omap_overlay_caps caps,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, u16 pos_x,
@@ -2515,7 +2517,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (width == out_width && height == out_height)
return 0;
- if (!mem_to_mem && (pclk == 0 || mgr_timings->pixelclock == 0)) {
+ if (!mem_to_mem && (pclk == 0 || vm->pixelclock == 0)) {
DSSERR("cannot calculate scaling settings: pclk is zero\n");
return -EINVAL;
}
@@ -2551,7 +2553,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
- ret = dispc.feat->calc_scaling(pclk, lclk, mgr_timings, width, height,
+ ret = dispc.feat->calc_scaling(pclk, lclk, vm, width, height,
out_width, out_height, color_mode, five_taps,
x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
mem_to_mem);
@@ -2591,7 +2593,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 out_width, u16 out_height, enum omap_color_mode color_mode,
u8 rotation, bool mirror, u8 zorder, u8 pre_mult_alpha,
u8 global_alpha, enum omap_dss_rotation_type rotation_type,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
bool five_taps = true;
@@ -2605,7 +2607,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 in_height = height;
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
- bool ilace = mgr_timings->interlace;
+ bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
unsigned long pclk = dispc_plane_pclk_rate(plane);
unsigned long lclk = dispc_plane_lclk_rate(plane);
@@ -2647,7 +2649,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (!dss_feat_color_mode_supported(plane, color_mode))
return -EINVAL;
- r = dispc_ovl_calc_scaling(pclk, lclk, caps, mgr_timings, in_width,
+ r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
in_height, out_width, out_height, color_mode,
&five_taps, &x_predecim, &y_predecim, pos_x,
rotation_type, mem_to_mem);
@@ -2784,7 +2786,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
}
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
int r;
@@ -2803,14 +2805,14 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
- oi->rotation_type, replication, mgr_timings, mem_to_mem);
+ oi->rotation_type, replication, vm, mem_to_mem);
return r;
}
EXPORT_SYMBOL(dispc_ovl_setup);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *mgr_timings)
+ bool mem_to_mem, const struct videomode *vm)
{
int r;
u32 l;
@@ -2819,8 +2821,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
const u8 zorder = 0, global_alpha = 0;
const bool replication = false;
bool truncation;
- int in_width = mgr_timings->x_res;
- int in_height = mgr_timings->y_res;
+ int in_width = vm->hactive;
+ int in_height = vm->vactive;
enum omap_overlay_caps caps =
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
@@ -2833,7 +2835,7 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, mgr_timings, mem_to_mem);
+ replication, vm, mem_to_mem);
switch (wi->color_mode) {
case OMAP_DSS_COLOR_RGB16:
@@ -2867,8 +2869,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
} else {
int wbdelay;
- wbdelay = min(mgr_timings->vfp + mgr_timings->vsw +
- mgr_timings->vbp, 255);
+ wbdelay = min(vm->vfront_porch +
+ vm->vsync_len + vm->vback_porch, (u32)255);
/* WBDELAYCOUNT */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
@@ -3093,10 +3095,10 @@ static bool _dispc_mgr_size_ok(u16 width, u16 height)
height <= dispc.feat->mgr_height_max;
}
-static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
+static bool _dispc_lcd_timings_ok(int hsync_len, int hfp, int hbp,
int vsw, int vfp, int vbp)
{
- if (hsw < 1 || hsw > dispc.feat->sw_max ||
+ if (hsync_len < 1 || hsync_len > dispc.feat->sw_max ||
hfp < 1 || hfp > dispc.feat->hp_max ||
hbp < 1 || hbp > dispc.feat->hp_max ||
vsw < 1 || vsw > dispc.feat->sw_max ||
@@ -3110,113 +3112,77 @@ static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
unsigned long pclk)
{
if (dss_mgr_is_lcd(channel))
- return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+ return pclk <= dispc.feat->max_lcd_pclk;
else
- return pclk <= dispc.feat->max_tv_pclk ? true : false;
+ return pclk <= dispc.feat->max_tv_pclk;
}
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
{
- if (!_dispc_mgr_size_ok(timings->x_res, timings->y_res))
+ if (!_dispc_mgr_size_ok(vm->hactive, vm->vactive))
return false;
- if (!_dispc_mgr_pclk_ok(channel, timings->pixelclock))
+ if (!_dispc_mgr_pclk_ok(channel, vm->pixelclock))
return false;
if (dss_mgr_is_lcd(channel)) {
/* TODO: OMAP4+ supports interlace for LCD outputs */
- if (timings->interlace)
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
return false;
- if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
- timings->hbp, timings->vsw, timings->vfp,
- timings->vbp))
+ if (!_dispc_lcd_timings_ok(vm->hsync_len,
+ vm->hfront_porch, vm->hback_porch,
+ vm->vsync_len, vm->vfront_porch,
+ vm->vback_porch))
return false;
}
return true;
}
-static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
- int hfp, int hbp, int vsw, int vfp, int vbp,
- enum omap_dss_signal_level vsync_level,
- enum omap_dss_signal_level hsync_level,
- enum omap_dss_signal_edge data_pclk_edge,
- enum omap_dss_signal_level de_level,
- enum omap_dss_signal_edge sync_pclk_edge)
-
+static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
+ const struct videomode *vm)
{
u32 timing_h, timing_v, l;
bool onoff, rf, ipc, vs, hs, de;
- timing_h = FLD_VAL(hsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(hfp-1, dispc.feat->fp_start, 8) |
- FLD_VAL(hbp-1, dispc.feat->bp_start, 20);
- timing_v = FLD_VAL(vsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(vfp, dispc.feat->fp_start, 8) |
- FLD_VAL(vbp, dispc.feat->bp_start, 20);
+ timing_h = FLD_VAL(vm->hsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->hfront_porch - 1, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->hback_porch - 1, dispc.feat->bp_start, 20);
+ timing_v = FLD_VAL(vm->vsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->vfront_porch, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->vback_porch, dispc.feat->bp_start, 20);
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
- switch (vsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- vs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
vs = false;
- break;
- default:
- BUG();
- }
+ else
+ vs = true;
- switch (hsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- hs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
hs = false;
- break;
- default:
- BUG();
- }
+ else
+ hs = true;
- switch (de_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- de = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
de = false;
- break;
- default:
- BUG();
- }
+ else
+ de = true;
- switch (data_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
ipc = false;
- break;
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
+ else
ipc = true;
- break;
- default:
- BUG();
- }
/* always use the 'rf' setting */
onoff = true;
- switch (sync_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
- rf = false;
- break;
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
rf = true;
- break;
- default:
- BUG();
- }
+ else
+ rf = false;
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
@@ -3253,13 +3219,13 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
/* change name to mode? */
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
unsigned xtot, ytot;
unsigned long ht, vt;
- struct omap_video_timings t = *timings;
+ struct videomode t = *vm;
- DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
+ DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
if (!dispc_mgr_timings_ok(channel, &t)) {
BUG();
@@ -3267,34 +3233,37 @@ void dispc_mgr_set_timings(enum omap_channel channel,
}
if (dss_mgr_is_lcd(channel)) {
- _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
- t.vfp, t.vbp, t.vsync_level, t.hsync_level,
- t.data_pclk_edge, t.de_level, t.sync_pclk_edge);
+ _dispc_mgr_set_lcd_timings(channel, &t);
- xtot = t.x_res + t.hfp + t.hsw + t.hbp;
- ytot = t.y_res + t.vfp + t.vsw + t.vbp;
+ xtot = t.hactive + t.hfront_porch + t.hsync_len + t.hback_porch;
+ ytot = t.vactive + t.vfront_porch + t.vsync_len + t.vback_porch;
- ht = timings->pixelclock / xtot;
- vt = timings->pixelclock / xtot / ytot;
+ ht = vm->pixelclock / xtot;
+ vt = vm->pixelclock / xtot / ytot;
- DSSDBG("pck %u\n", timings->pixelclock);
- DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
- t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
+ DSSDBG("pck %lu\n", vm->pixelclock);
+ DSSDBG("hsync_len %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+ t.hsync_len, t.hfront_porch, t.hback_porch,
+ t.vsync_len, t.vfront_porch, t.vback_porch);
DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n",
- t.vsync_level, t.hsync_level, t.data_pclk_edge,
- t.de_level, t.sync_pclk_edge);
+ !!(t.flags & DISPLAY_FLAGS_VSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_HSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE),
+ !!(t.flags & DISPLAY_FLAGS_DE_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_SYNC_POSEDGE));
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
} else {
- if (t.interlace)
- t.y_res /= 2;
+ if (t.flags & DISPLAY_FLAGS_INTERLACED)
+ t.vactive /= 2;
if (dispc.feat->supports_double_pixel)
- REG_FLD_MOD(DISPC_CONTROL, t.double_pixel ? 1 : 0,
- 19, 17);
+ REG_FLD_MOD(DISPC_CONTROL,
+ !!(t.flags & DISPLAY_FLAGS_DOUBLECLK),
+ 19, 17);
}
- dispc_mgr_set_size(channel, t.x_res, t.y_res);
+ dispc_mgr_set_size(channel, t.hactive, t.vactive);
}
EXPORT_SYMBOL(dispc_mgr_set_timings);
@@ -4214,23 +4183,20 @@ EXPORT_SYMBOL(dispc_free_irq);
*/
static const struct dispc_errata_i734_data {
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_overlay_info ovli;
struct omap_overlay_manager_info mgri;
struct dss_lcd_mgr_config lcd_conf;
} i734 = {
- .timings = {
- .x_res = 8, .y_res = 1,
+ .vm = {
+ .hactive = 8, .vactive = 1,
.pixelclock = 16000000,
- .hsw = 8, .hfp = 4, .hbp = 4,
- .vsw = 1, .vfp = 1, .vbp = 1,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .interlace = false,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .double_pixel = false,
+ .hsync_len = 8, .hfront_porch = 4, .hback_porch = 4,
+ .vsync_len = 1, .vfront_porch = 1, .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
},
.ovli = {
.screen_width = 1,
@@ -4320,7 +4286,7 @@ static void dispc_errata_i734_wa(void)
/* Setup and enable GFX plane */
dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
- dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
+ dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.vm, false);
dispc_ovl_enable(OMAP_DSS_GFX, true);
/* Set up and enable display manager for LCD1 */
@@ -4328,7 +4294,7 @@ static void dispc_errata_i734_wa(void)
dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
&lcd_conf.clock_info);
dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
- dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
+ dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.vm);
dispc_clear_irqstatus(framedone_irq);
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8dcdd7cf9937..425a5a8dff8b 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -35,8 +35,8 @@
void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
EXPORT_SYMBOL(omapdss_default_get_resolution);
@@ -72,9 +72,9 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = dssdev->panel.timings;
+ *vm = dssdev->panel.vm;
}
EXPORT_SYMBOL(omapdss_default_get_timings);
@@ -217,73 +217,3 @@ struct omap_dss_device *omap_dss_find_device(void *data,
return NULL;
}
EXPORT_SYMBOL(omap_dss_find_device);
-
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt)
-{
- memset(ovt, 0, sizeof(*ovt));
-
- ovt->pixelclock = vm->pixelclock;
- ovt->x_res = vm->hactive;
- ovt->hbp = vm->hback_porch;
- ovt->hfp = vm->hfront_porch;
- ovt->hsw = vm->hsync_len;
- ovt->y_res = vm->vactive;
- ovt->vbp = vm->vback_porch;
- ovt->vfp = vm->vfront_porch;
- ovt->vsw = vm->vsync_len;
-
- ovt->vsync_level = vm->flags & DISPLAY_FLAGS_VSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->hsync_level = vm->flags & DISPLAY_FLAGS_HSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
- OMAPDSS_DRIVE_SIG_RISING_EDGE :
- OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- ovt->sync_pclk_edge = ovt->data_pclk_edge;
-}
-EXPORT_SYMBOL(videomode_to_omap_video_timings);
-
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm)
-{
- memset(vm, 0, sizeof(*vm));
-
- vm->pixelclock = ovt->pixelclock;
-
- vm->hactive = ovt->x_res;
- vm->hback_porch = ovt->hbp;
- vm->hfront_porch = ovt->hfp;
- vm->hsync_len = ovt->hsw;
- vm->vactive = ovt->y_res;
- vm->vback_porch = ovt->vbp;
- vm->vfront_porch = ovt->vfp;
- vm->vsync_len = ovt->vsw;
-
- if (ovt->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
-
- if (ovt->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
-
- if (ovt->de_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_DE_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_DE_LOW;
-
- if (ovt->data_pclk_edge == OMAPDSS_DRIVE_SIG_RISING_EDGE)
- vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- else
- vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
-}
-EXPORT_SYMBOL(omap_video_timings_to_videomode);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b268295b76cf..e75162d26ac0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -47,7 +47,7 @@ struct dpi_data {
struct mutex lock;
- struct omap_video_timings timings;
+ struct videomode vm;
struct dss_lcd_mgr_config mgr_config;
int data_lines;
@@ -333,31 +333,31 @@ static int dpi_set_mode(struct dpi_data *dpi)
{
struct omap_dss_device *out = &dpi->output;
enum omap_channel channel = out->dispc_channel;
- struct omap_video_timings *t = &dpi->timings;
+ struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
unsigned long pck;
int r = 0;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
+ r = dpi_set_pll_clk(dpi, channel, vm->pixelclock, &fck,
&lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
+ r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
pck = fck / lck_div / pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
return 0;
}
@@ -476,7 +476,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -484,25 +484,25 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_lock(&dpi->lock);
- dpi->timings = *timings;
+ dpi->vm = *vm;
mutex_unlock(&dpi->lock);
}
static void dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
mutex_lock(&dpi->lock);
- *timings = dpi->timings;
+ *vm = dpi->vm;
mutex_unlock(&dpi->lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
enum omap_channel channel = dpi->output.dispc_channel;
@@ -512,23 +512,23 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct dpi_clk_calc_ctx ctx;
bool ok;
- if (timings->x_res % 8 != 0)
+ if (vm->hactive % 8 != 0)
return -EINVAL;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
@@ -540,7 +540,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
pck = fck / lck_div / pck_div;
- timings->pixelclock = pck;
+ vm->pixelclock = pck;
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index e1be5e795cd8..f060bda31235 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -289,7 +289,7 @@ struct dsi_clk_calc_ctx {
struct dss_pll_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
- struct omap_video_timings dispc_vm;
+ struct videomode vm;
struct omap_dss_dsi_videomode_timings dsi_vm;
};
@@ -383,7 +383,7 @@ struct dsi_data {
unsigned scp_clk_refcount;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format pix_fmt;
enum omap_dss_dsi_mode mode;
struct omap_dss_dsi_videomode_timings vm_timings;
@@ -3321,12 +3321,12 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
* port's line buffer size
*/
- if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
+ if (dsi->line_buffer_size <= vm->hactive * bpp / 8)
num_line_buffers = 0;
else
num_line_buffers = 2;
@@ -3453,7 +3453,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
@@ -3494,7 +3494,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
exiths_clk = ths_exit + tclk_trail;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
if (!hsa_blanking_mode) {
@@ -3705,7 +3705,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
int vbp = dsi->vm_timings.vbp;
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int tl, t_he, width_bytes;
@@ -3713,7 +3713,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
t_he = hsync_end ?
((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
/* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
@@ -3722,7 +3722,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
hfp, hsync_end ? hsa : 0, tl);
DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
- vsa, timings->y_res);
+ vsa, vm->vactive);
r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
r = FLD_MOD(r, hbp, 11, 0); /* HBP */
@@ -3738,7 +3738,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
- r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */
+ r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */
r = FLD_MOD(r, tl, 31, 16); /* TL */
dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
}
@@ -3856,7 +3856,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
/* MODE, 1 = video mode */
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
- word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
+ word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
dsi_vc_write_long_header(dsidev, channel, data_type,
word_count, 0);
@@ -3918,8 +3918,8 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
int r;
const unsigned channel = dsi->update_channel;
const unsigned line_buf_size = dsi->line_buffer_size;
- u16 w = dsi->timings.x_res;
- u16 h = dsi->timings.y_res;
+ u16 w = dsi->vm.hactive;
+ u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
@@ -3969,7 +3969,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(dispc_channel, &dsi->timings);
+ dss_mgr_set_timings(dispc_channel, &dsi->vm);
dss_mgr_start_update(dispc_channel);
@@ -4056,8 +4056,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->framedone_callback = callback;
dsi->framedone_data = data;
- dw = dsi->timings.x_res;
- dh = dsi->timings.y_res;
+ dw = dsi->vm.hactive;
+ dh = dsi->vm.vactive;
#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dw * dh *
@@ -4120,16 +4120,21 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
/*
* override interlace, logic level and edge related parameters in
- * omap_video_timings with default values
+ * videomode with default values
*/
- dsi->timings.interlace = false;
- dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(channel, &dsi->timings);
+ dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(channel, &dsi->vm);
r = dsi_configure_dispc_clocks(dsidev);
if (r)
@@ -4331,7 +4336,7 @@ static void print_dsi_vm(const char *str,
wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
- bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
+ bl = t->hss + t->hsa + t->hse + t->hbp + t->hfront_porch;
tot = bl + pps;
#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
@@ -4340,14 +4345,14 @@ static void print_dsi_vm(const char *str,
"%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
str,
byteclk,
- t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
+ t->hss, t->hsa, t->hse, t->hbp, pps, t->hfront_porch,
bl, pps, tot,
TO_DSI_T(t->hss),
TO_DSI_T(t->hsa),
TO_DSI_T(t->hse),
TO_DSI_T(t->hbp),
TO_DSI_T(pps),
- TO_DSI_T(t->hfp),
+ TO_DSI_T(t->hfront_porch),
TO_DSI_T(bl),
TO_DSI_T(pps),
@@ -4356,13 +4361,13 @@ static void print_dsi_vm(const char *str,
#undef TO_DSI_T
}
-static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
+static void print_dispc_vm(const char *str, const struct videomode *vm)
{
- unsigned long pck = t->pixelclock;
+ unsigned long pck = vm->pixelclock;
int hact, bl, tot;
- hact = t->x_res;
- bl = t->hsw + t->hbp + t->hfp;
+ hact = vm->hactive;
+ bl = vm->hsync_len + vm->hbp + vm->hfront_porch;
tot = hact + bl;
#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
@@ -4371,12 +4376,12 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
"%u/%u/%u/%u = %u + %u = %u\n",
str,
pck,
- t->hsw, t->hbp, hact, t->hfp,
+ vm->hsync_len, vm->hbp, hact, vm->hfront_porch,
bl, hact, tot,
- TO_DISPC_T(t->hsw),
- TO_DISPC_T(t->hbp),
+ TO_DISPC_T(vm->hsync_len),
+ TO_DISPC_T(vm->hbp),
TO_DISPC_T(hact),
- TO_DISPC_T(t->hfp),
+ TO_DISPC_T(vm->hfront_porch),
TO_DISPC_T(bl),
TO_DISPC_T(hact),
TO_DISPC_T(tot));
@@ -4387,7 +4392,7 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
static void print_dsi_dispc_vm(const char *str,
const struct omap_dss_dsi_videomode_timings *t)
{
- struct omap_video_timings vm = { 0 };
+ struct videomode vm = { 0 };
unsigned long byteclk = t->hsclk / 4;
unsigned long pck;
u64 dsi_tput;
@@ -4396,13 +4401,13 @@ static void print_dsi_dispc_vm(const char *str,
dsi_tput = (u64)byteclk * t->ndl * 8;
pck = (u32)div64_u64(dsi_tput, t->bitspp);
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
- dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
+ dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfront_porch;
vm.pixelclock = pck;
- vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
+ vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
- vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
- vm.x_res = t->hact;
+ vm.hfront_porch = div64_u64((u64)t->hfront_porch * pck, byteclk);
+ vm.hactive = t->hact;
print_dispc_vm(str, &vm);
}
@@ -4412,19 +4417,19 @@ static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- struct omap_video_timings *t = &ctx->dispc_vm;
+ struct videomode *vm = &ctx->vm;
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
- *t = *ctx->config->timings;
- t->pixelclock = pck;
- t->x_res = ctx->config->timings->x_res;
- t->y_res = ctx->config->timings->y_res;
- t->hsw = t->hfp = t->hbp = t->vsw = 1;
- t->vfp = t->vbp = 0;
+ *vm = *ctx->config->vm;
+ vm->pixelclock = pck;
+ vm->hactive = ctx->config->vm->hactive;
+ vm->vactive = ctx->config->vm->vactive;
+ vm->hsync_len = vm->hfront_porch = vm->hback_porch = vm->vsync_len = 1;
+ vm->vfront_porch = vm->vback_porch = 0;
return true;
}
@@ -4475,7 +4480,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
* especially as we go to LP between each pixel packet due to HW
* "feature". So let's just estimate very roughly and multiply by 1.5.
*/
- pck = cfg->timings->pixelclock;
+ pck = cfg->vm->pixelclock;
pck = pck * 3 / 2;
txbyteclk = pck * bitspp / 8 / ndl;
@@ -4510,14 +4515,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
int dispc_htot, dispc_hbl; /* pixels */
int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
int hfp, hsa, hbp;
- const struct omap_video_timings *req_vm;
- struct omap_video_timings *dispc_vm;
+ const struct videomode *req_vm;
+ struct videomode *dispc_vm;
struct omap_dss_dsi_videomode_timings *dsi_vm;
u64 dsi_tput, dispc_tput;
dsi_tput = (u64)byteclk * ndl * 8;
- req_vm = cfg->timings;
+ req_vm = cfg->vm;
req_pck_min = ctx->req_pck_min;
req_pck_max = ctx->req_pck_max;
req_pck_nom = ctx->req_pck_nom;
@@ -4525,9 +4530,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dispc_pck = ctx->dispc_cinfo.pck;
dispc_tput = (u64)dispc_pck * bitspp;
- xres = req_vm->x_res;
+ xres = req_vm->hactive;
- panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
+ panel_hbl = req_vm->hfront_porch + req_vm->hback_porch +
+ req_vm->hsync_len;
panel_htot = xres + panel_hbl;
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
@@ -4557,7 +4563,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
hss = DIV_ROUND_UP(4, ndl);
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- if (ndl == 3 && req_vm->hsw == 0)
+ if (ndl == 3 && req_vm->hsync_len == 0)
hse = 1;
else
hse = DIV_ROUND_UP(4, ndl);
@@ -4596,14 +4602,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
hsa = 0;
- } else if (ndl == 3 && req_vm->hsw == 0) {
+ } else if (ndl == 3 && req_vm->hsync_len == 0) {
hsa = 0;
} else {
- hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
+ hsa = div64_u64((u64)req_vm->hsync_len * byteclk, req_pck_nom);
hsa = max(hsa - hse, 1);
}
- hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * byteclk, req_pck_nom);
hbp = max(hbp, 1);
hfp = dsi_hbl - (hss + hsa + hse + hbp);
@@ -4633,10 +4639,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dsi_vm->hact = xres;
dsi_vm->hfp = hfp;
- dsi_vm->vsa = req_vm->vsw;
- dsi_vm->vbp = req_vm->vbp;
- dsi_vm->vact = req_vm->y_res;
- dsi_vm->vfp = req_vm->vfp;
+ dsi_vm->vsa = req_vm->vsync_len;
+ dsi_vm->vbp = req_vm->vback_porch;
+ dsi_vm->vact = req_vm->vactive;
+ dsi_vm->vfp = req_vm->vfront_porch;
dsi_vm->trans_mode = cfg->trans_mode;
@@ -4650,19 +4656,19 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
/* setup DISPC videomode */
- dispc_vm = &ctx->dispc_vm;
+ dispc_vm = &ctx->vm;
*dispc_vm = *req_vm;
dispc_vm->pixelclock = dispc_pck;
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
+ hsa = div64_u64((u64)req_vm->hsync_len * dispc_pck,
req_pck_nom);
hsa = max(hsa, 1);
} else {
hsa = 1;
}
- hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * dispc_pck, req_pck_nom);
hbp = max(hbp, 1);
hfp = dispc_hbl - hsa - hbp;
@@ -4685,9 +4691,9 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (hfp < 1)
return false;
- dispc_vm->hfp = hfp;
- dispc_vm->hsw = hsa;
- dispc_vm->hbp = hbp;
+ dispc_vm->hfront_porch = hfp;
+ dispc_vm->hsync_len = hsa;
+ dispc_vm->hback_porch = hbp;
return true;
}
@@ -4707,9 +4713,9 @@ static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
return false;
#ifdef PRINT_VERBOSE_VM_TIMINGS
- print_dispc_vm("dispc", &ctx->dispc_vm);
+ print_dispc_vm("dispc", &ctx->vm);
print_dsi_vm("dsi ", &ctx->dsi_vm);
- print_dispc_vm("req ", ctx->config->timings);
+ print_dispc_vm("req ", ctx->config->vm);
print_dsi_dispc_vm("act ", &ctx->dsi_vm);
#endif
@@ -4758,7 +4764,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
const struct omap_dss_dsi_config *cfg,
struct dsi_clk_calc_ctx *ctx)
{
- const struct omap_video_timings *t = cfg->timings;
+ const struct videomode *vm = cfg->vm;
unsigned long clkin;
unsigned long pll_min;
unsigned long pll_max;
@@ -4774,9 +4780,9 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
ctx->config = cfg;
/* these limits should come from the panel driver */
- ctx->req_pck_min = t->pixelclock - 1000;
- ctx->req_pck_nom = t->pixelclock;
- ctx->req_pck_max = t->pixelclock + 1000;
+ ctx->req_pck_min = vm->pixelclock - 1000;
+ ctx->req_pck_nom = vm->pixelclock;
+ ctx->req_pck_max = vm->pixelclock + 1000;
byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
@@ -4833,7 +4839,7 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi->user_dsi_cinfo = ctx.dsi_cinfo;
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
- dsi->timings = ctx.dispc_vm;
+ dsi->vm = ctx.vm;
dsi->vm_timings = ctx.dsi_vm;
mutex_unlock(&dsi->lock);
@@ -5342,7 +5348,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->phy_base) {
DSSERR("can't ioremap DSI PHY\n");
return -ENOMEM;
}
@@ -5362,7 +5368,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->pll_base) {
DSSERR("can't ioremap DSI PLL\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 4fd06dc41cb3..56493b290731 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -366,8 +366,7 @@ bool dispc_div_calc(unsigned long dispc,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data);
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
@@ -390,7 +389,7 @@ void dispc_wb_enable(bool enable);
bool dispc_wb_is_enabled(void);
void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *timings);
+ bool mem_to_mem, const struct videomode *vm);
/* VENC */
int venc_init_platform_driver(void) __init;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 63e711545865..fb6cccd02374 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -181,7 +181,7 @@ struct hdmi_video_format {
};
struct hdmi_config {
- struct omap_video_timings timings;
+ struct videomode vm;
struct hdmi_avi_infoframe infoframe;
enum hdmi_core_hdmi_dvi hdmi_dvi_mode;
};
@@ -298,11 +298,11 @@ int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
struct hdmi_video_format *video_fmt);
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param);
+ struct videomode *vm, struct hdmi_config *param);
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index cbd28dfdb86a..e7162c16de2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -155,7 +155,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct hdmi_wp_data *wp = &hdmi.wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
@@ -169,12 +169,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi_wp_clear_irqenable(wp, 0xffffffff);
hdmi_wp_set_irqstatus(wp, 0xffffffff);
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -209,7 +210,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -255,30 +256,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -352,7 +353,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -643,7 +644,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
hd->audio_config = *dss_audio;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ef3afe99e487..e05b7ac4f7dd 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -310,7 +310,7 @@ void hdmi4_configure(struct hdmi_core_data *core,
struct hdmi_wp_data *wp, struct hdmi_config *cfg)
{
/* HDMI */
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
/* HDMI core */
struct hdmi_core_video_config v_core_cfg;
@@ -318,16 +318,16 @@ void hdmi4_configure(struct hdmi_core_data *core,
hdmi_core_init(&v_core_cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/*
* configure core video part
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 0c0a5139a301..678dfb02764a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -172,7 +172,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned pc;
@@ -181,12 +181,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (r)
return r;
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -226,7 +227,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -272,30 +273,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -378,7 +379,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -669,7 +670,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 8ab2093daa12..8de1d7b2ae55 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -292,35 +292,35 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
{
DSSDBG("hdmi_core_init\n");
- video_cfg->v_fc_config.timings = cfg->timings;
+ video_cfg->v_fc_config.vm = cfg->vm;
/* video core */
video_cfg->data_enable_pol = 1; /* It is always 1*/
- video_cfg->hblank = cfg->timings.hfp +
- cfg->timings.hbp + cfg->timings.hsw;
+ video_cfg->hblank = cfg->vm.hfront_porch +
+ cfg->vm.hback_porch + cfg->vm.hsync_len;
video_cfg->vblank_osc = 0;
- video_cfg->vblank = cfg->timings.vsw +
- cfg->timings.vfp + cfg->timings.vbp;
+ video_cfg->vblank = cfg->vm.vsync_len + cfg->vm.vfront_porch +
+ cfg->vm.vback_porch;
video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode;
- if (cfg->timings.interlace) {
+ if (cfg->vm.flags & DISPLAY_FLAGS_INTERLACED) {
/* set vblank_osc if vblank is fractional */
if (video_cfg->vblank % 2 != 0)
video_cfg->vblank_osc = 1;
- video_cfg->v_fc_config.timings.y_res /= 2;
+ video_cfg->v_fc_config.vm.vactive /= 2;
video_cfg->vblank /= 2;
- video_cfg->v_fc_config.timings.vfp /= 2;
- video_cfg->v_fc_config.timings.vsw /= 2;
- video_cfg->v_fc_config.timings.vbp /= 2;
+ video_cfg->v_fc_config.vm.vfront_porch /= 2;
+ video_cfg->v_fc_config.vm.vsync_len /= 2;
+ video_cfg->v_fc_config.vm.vback_porch /= 2;
}
- if (cfg->timings.double_pixel) {
- video_cfg->v_fc_config.timings.x_res *= 2;
+ if (cfg->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
+ video_cfg->v_fc_config.vm.hactive *= 2;
video_cfg->hblank *= 2;
- video_cfg->v_fc_config.timings.hfp *= 2;
- video_cfg->v_fc_config.timings.hsw *= 2;
- video_cfg->v_fc_config.timings.hbp *= 2;
+ video_cfg->v_fc_config.vm.hfront_porch *= 2;
+ video_cfg->v_fc_config.vm.hsync_len *= 2;
+ video_cfg->v_fc_config.vm.hback_porch *= 2;
}
}
@@ -329,13 +329,12 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
struct hdmi_core_vid_config *cfg)
{
void __iomem *base = core->base;
+ struct videomode *vm = &cfg->v_fc_config.vm;
unsigned char r = 0;
bool vsync_pol, hsync_pol;
- vsync_pol =
- cfg->v_fc_config.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol =
- cfg->v_fc_config.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
/* Set hsync, vsync and data-enable polarity */
r = hdmi_read_reg(base, HDMI_CORE_FC_INVIDCONF);
@@ -343,20 +342,16 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
r = FLD_MOD(r, hsync_pol, 5, 5);
r = FLD_MOD(r, cfg->data_enable_pol, 4, 4);
r = FLD_MOD(r, cfg->vblank_osc, 1, 1);
- r = FLD_MOD(r, cfg->v_fc_config.timings.interlace, 0, 0);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 0, 0);
hdmi_write_reg(base, HDMI_CORE_FC_INVIDCONF, r);
/* set x resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1,
- cfg->v_fc_config.timings.x_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0,
- cfg->v_fc_config.timings.x_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1, vm->hactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0, vm->hactive & 0xFF, 7, 0);
/* set y resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1,
- cfg->v_fc_config.timings.y_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0,
- cfg->v_fc_config.timings.y_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1, vm->vactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0, vm->vactive & 0xFF, 7, 0);
/* set horizontal blanking pixels */
REG_FLD_MOD(base, HDMI_CORE_FC_INHBLANK1, cfg->hblank >> 8, 4, 0);
@@ -366,30 +361,28 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
REG_FLD_MOD(base, HDMI_CORE_FC_INVBLANK, cfg->vblank, 7, 0);
/* set horizontal sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1,
- cfg->v_fc_config.timings.hfp >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0,
- cfg->v_fc_config.timings.hfp & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1, vm->hfront_porch >> 8,
+ 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0, vm->hfront_porch & 0xFF,
+ 7, 0);
/* set vertical sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY,
- cfg->v_fc_config.timings.vfp, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY, vm->vfront_porch, 7, 0);
/* set horizontal sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1,
- (cfg->v_fc_config.timings.hsw >> 8), 1, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0,
- cfg->v_fc_config.timings.hsw & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1, (vm->hsync_len >> 8),
+ 1, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0, vm->hsync_len & 0xFF,
+ 7, 0);
/* set vertical sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH,
- cfg->v_fc_config.timings.vsw, 5, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH, vm->vsync_len, 5, 0);
/* select DVI mode */
REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF,
- cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
+ cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
- if (cfg->v_fc_config.timings.double_pixel)
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 2, 7, 4);
else
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 1, 7, 4);
@@ -616,7 +609,7 @@ int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
@@ -624,16 +617,16 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_init(&v_core_cfg, cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/* support limited range with 24 bit color depth for now */
hdmi_core_configure_range(core);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 203694a52d18..b783d5a0750e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -144,87 +144,84 @@ void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
}
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 r;
bool vsync_pol, hsync_pol;
DSSDBG("Enter hdmi_wp_video_config_interface\n");
- vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG);
r = FLD_MOD(r, vsync_pol, 7, 7);
r = FLD_MOD(r, hsync_pol, 6, 6);
- r = FLD_MOD(r, timings->interlace, 3, 3);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 3, 3);
r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r);
}
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 timing_h = 0;
u32 timing_v = 0;
- unsigned hsw_offset = 1;
+ unsigned hsync_len_offset = 1;
DSSDBG("Enter hdmi_wp_video_config_timing\n");
/*
* On OMAP4 and OMAP5 ES1 the HSW field is programmed as is. On OMAP5
- * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsw-1.
+ * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsync_len-1.
* However, we don't support OMAP5 ES1 at all, so we can just check for
* OMAP4 here.
*/
if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4)
- hsw_offset = 0;
+ hsync_len_offset = 0;
- timing_h |= FLD_VAL(timings->hbp, 31, 20);
- timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw - hsw_offset, 7, 0);
+ timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
+ timing_h |= FLD_VAL(vm->hfront_porch, 19, 8);
+ timing_h |= FLD_VAL(vm->hsync_len - hsync_len_offset, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
- timing_v |= FLD_VAL(timings->vbp, 31, 20);
- timing_v |= FLD_VAL(timings->vfp, 19, 8);
- timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ timing_v |= FLD_VAL(vm->vback_porch, 31, 20);
+ timing_v |= FLD_VAL(vm->vfront_porch, 19, 8);
+ timing_v |= FLD_VAL(vm->vsync_len, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v);
}
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param)
+ struct videomode *vm, struct hdmi_config *param)
{
DSSDBG("Enter hdmi_wp_video_init_format\n");
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
- video_fmt->y_res = param->timings.y_res;
- video_fmt->x_res = param->timings.x_res;
-
- timings->hbp = param->timings.hbp;
- timings->hfp = param->timings.hfp;
- timings->hsw = param->timings.hsw;
- timings->vbp = param->timings.vbp;
- timings->vfp = param->timings.vfp;
- timings->vsw = param->timings.vsw;
-
- timings->vsync_level = param->timings.vsync_level;
- timings->hsync_level = param->timings.hsync_level;
- timings->interlace = param->timings.interlace;
- timings->double_pixel = param->timings.double_pixel;
-
- if (param->timings.interlace) {
+ video_fmt->y_res = param->vm.vactive;
+ video_fmt->x_res = param->vm.hactive;
+
+ vm->hback_porch = param->vm.hback_porch;
+ vm->hfront_porch = param->vm.hfront_porch;
+ vm->hsync_len = param->vm.hsync_len;
+ vm->vback_porch = param->vm.vback_porch;
+ vm->vfront_porch = param->vm.vfront_porch;
+ vm->vsync_len = param->vm.vsync_len;
+
+ vm->flags = param->vm.flags;
+
+ if (param->vm.flags & DISPLAY_FLAGS_INTERLACED) {
video_fmt->y_res /= 2;
- timings->vbp /= 2;
- timings->vfp /= 2;
- timings->vsw /= 2;
+ vm->vback_porch /= 2;
+ vm->vfront_porch /= 2;
+ vm->vsync_len /= 2;
}
- if (param->timings.double_pixel) {
+ if (param->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
video_fmt->x_res *= 2;
- timings->hfp *= 2;
- timings->hsw *= 2;
- timings->hbp *= 2;
+ vm->hfront_porch *= 2;
+ vm->hsync_len *= 2;
+ vm->hback_porch *= 2;
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 6eaf1adbd606..b420dde8c0fb 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -290,7 +290,7 @@ struct omap_dss_dsi_videomode_timings {
struct omap_dss_dsi_config {
enum omap_dss_dsi_mode mode;
enum omap_dss_dsi_pixel_format pixel_format;
- const struct omap_video_timings *timings;
+ const struct videomode *vm;
unsigned long hs_clk_min, hs_clk_max;
unsigned long lp_clk_min, lp_clk_max;
@@ -299,48 +299,12 @@ struct omap_dss_dsi_config {
enum omap_dss_dsi_trans_mode trans_mode;
};
-struct omap_video_timings {
- /* Unit: pixels */
- u16 x_res;
- /* Unit: pixels */
- u16 y_res;
- /* Unit: Hz */
- u32 pixelclock;
- /* Unit: pixel clocks */
- u16 hsw; /* Horizontal synchronization pulse width */
- /* Unit: pixel clocks */
- u16 hfp; /* Horizontal front porch */
- /* Unit: pixel clocks */
- u16 hbp; /* Horizontal back porch */
- /* Unit: line clocks */
- u16 vsw; /* Vertical synchronization pulse width */
- /* Unit: line clocks */
- u16 vfp; /* Vertical front porch */
- /* Unit: line clocks */
- u16 vbp; /* Vertical back porch */
-
- /* Vsync logic level */
- enum omap_dss_signal_level vsync_level;
- /* Hsync logic level */
- enum omap_dss_signal_level hsync_level;
- /* Interlaced or Progressive timings */
- bool interlace;
- /* Pixel clock edge to drive LCD data */
- enum omap_dss_signal_edge data_pclk_edge;
- /* Data enable logic level */
- enum omap_dss_signal_level de_level;
- /* Pixel clock edges to drive HSYNC and VSYNC signals */
- enum omap_dss_signal_edge sync_pclk_edge;
-
- bool double_pixel;
-};
-
-/* Hardcoded timings for tv modes. Venc only uses these to
+/* Hardcoded videomodes for tv. Venc only uses these to
* identify the mode, and does not actually use the configs
* itself. However, the configs should be something that
* a normal monitor can also show */
-extern const struct omap_video_timings omap_dss_pal_timings;
-extern const struct omap_video_timings omap_dss_ntsc_timings;
+extern const struct videomode omap_dss_pal_vm;
+extern const struct videomode omap_dss_ntsc_vm;
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
@@ -502,11 +466,11 @@ struct omapdss_dpi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
};
@@ -521,11 +485,11 @@ struct omapdss_sdi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
};
@@ -540,11 +504,11 @@ struct omapdss_dvi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
};
struct omapdss_atv_ops {
@@ -557,11 +521,11 @@ struct omapdss_atv_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_type)(struct omap_dss_device *dssdev,
enum omap_dss_venc_type type);
@@ -582,11 +546,11 @@ struct omapdss_hdmi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
@@ -692,7 +656,7 @@ struct omap_dss_device {
} phy;
struct {
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format dsi_pix_fmt;
enum omap_dss_dsi_mode dsi_mode;
@@ -785,11 +749,11 @@ struct omap_dss_driver {
int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
u32 (*get_wss)(struct omap_dss_device *dssdev);
@@ -819,11 +783,6 @@ struct omap_dss_device *omap_dss_find_device(void *data,
int (*match)(struct omap_dss_device *dssdev, void *data));
const char *omapdss_get_default_display_name(void);
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt);
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm);
-
int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
@@ -852,7 +811,7 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres);
int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -906,7 +865,7 @@ void dispc_mgr_go(enum omap_channel channel);
void dispc_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dispc_mgr_setup(enum omap_channel channel,
const struct omap_overlay_manager_info *info);
u32 dispc_mgr_gamma_size(enum omap_channel channel);
@@ -919,8 +878,7 @@ bool dispc_ovl_enabled(enum omap_plane plane);
void dispc_ovl_set_channel_out(enum omap_plane plane,
enum omap_channel channel);
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
- bool mem_to_mem);
+ bool replication, const struct videomode *vm, bool mem_to_mem);
enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel);
@@ -934,7 +892,7 @@ struct dss_mgr_ops {
int (*enable)(enum omap_channel channel);
void (*disable)(enum omap_channel channel);
void (*set_timings)(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void (*set_lcd_config)(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int (*register_framedone_handler)(enum omap_channel channel,
@@ -951,7 +909,7 @@ int dss_mgr_connect(enum omap_channel channel,
void dss_mgr_disconnect(enum omap_channel channel,
struct omap_dss_device *dst);
void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dss_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int dss_mgr_enable(enum omap_channel channel);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 24f859488201..a901af5a9bc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -201,10 +201,9 @@ void dss_mgr_disconnect(enum omap_channel channel,
}
EXPORT_SYMBOL(dss_mgr_disconnect);
-void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+void dss_mgr_set_timings(enum omap_channel channel, const struct videomode *vm)
{
- dss_mgr_ops->set_timings(channel, timings);
+ dss_mgr_ops->set_timings(channel, vm);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index cd53566d75eb..09724757366a 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -113,7 +113,7 @@ static struct {
struct semaphore bus_lock;
- struct omap_video_timings timings;
+ struct videomode vm;
int pixel_size;
int data_lines;
struct rfbi_timings intf_timings;
@@ -308,15 +308,15 @@ static int rfbi_transfer_area(struct omap_dss_device *dssdev,
u32 l;
int r;
struct omap_overlay_manager *mgr = rfbi.output.manager;
- u16 width = rfbi.timings.x_res;
- u16 height = rfbi.timings.y_res;
+ u16 width = rfbi.vm.hactive;
+ u16 height = rfbi.vm.vactive;
/*BUG_ON(callback == 0);*/
BUG_ON(rfbi.framedone_callback != NULL);
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ dss_mgr_set_timings(mgr, &rfbi.vm);
r = dss_mgr_enable(mgr);
if (r)
@@ -777,8 +777,8 @@ static int rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
static void rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
{
- rfbi.timings.x_res = w;
- rfbi.timings.y_res = h;
+ rfbi.vm.hactive = w;
+ rfbi.vm.vactive = h;
}
static void rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
@@ -854,25 +854,30 @@ static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
dss_mgr_set_lcd_config(mgr, &mgr_config);
/*
- * Set rfbi.timings with default values, the x_res and y_res fields
+ * Set rfbi.timings with default values, the hactive and vactive fields
* are expected to be already configured by the panel driver via
* omapdss_rfbi_set_size()
*/
- rfbi.timings.hsw = 1;
- rfbi.timings.hfp = 1;
- rfbi.timings.hbp = 1;
- rfbi.timings.vsw = 1;
- rfbi.timings.vfp = 0;
- rfbi.timings.vbp = 0;
-
- rfbi.timings.interlace = false;
- rfbi.timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- rfbi.timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ rfbi.vm.hsync_len = 1;
+ rfbi.vm.hfront_porch = 1;
+ rfbi.vm.hback_porch = 1;
+ rfbi.vm.vsync_len = 1;
+ rfbi.vm.vfront_porch = 0;
+ rfbi.vm.vback_porch = 0;
+
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(mgr, &rfbi.vm);
}
static int rfbi_display_enable(struct omap_dss_device *dssdev)
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 0a96c321ce62..b3bda2d3c08d 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -39,7 +39,7 @@ static struct {
struct regulator *vdds_sdi_reg;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
int datapairs;
struct omap_dss_device output;
@@ -131,7 +131,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &sdi.output;
enum omap_channel channel = dssdev->dispc_channel;
- struct omap_video_timings *t = &sdi.timings;
+ struct videomode *vm = &sdi.vm;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
@@ -151,10 +151,9 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
goto err_get_dispc;
/* 15.5.9.1.2 */
- t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE;
- r = sdi_calc_clock_div(t->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(vm->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
@@ -162,15 +161,15 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
r = dss_set_fck_rate(fck);
if (r)
@@ -229,26 +228,26 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- sdi.timings = *timings;
+ sdi.vm = *vm;
}
static void sdi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = sdi.timings;
+ *vm = sdi.vm;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
enum omap_channel channel = dssdev->dispc_channel;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 6eedf2118708..d74f7fcc2e46 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -262,47 +262,41 @@ static const struct venc_config venc_config_pal_bdghi = {
.fid_ext_start_y__fid_ext_offset_y = 0x01380005,
};
-const struct omap_video_timings omap_dss_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+const struct videomode omap_dss_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_pal_timings);
+EXPORT_SYMBOL(omap_dss_pal_vm);
-const struct omap_video_timings omap_dss_ntsc_timings = {
- .x_res = 720,
- .y_res = 482,
+const struct videomode omap_dss_ntsc_vm = {
+ .hactive = 720,
+ .vactive = 482,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 16,
- .hbp = 58,
- .vsw = 6,
- .vfp = 6,
- .vbp = 31,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 16,
+ .hback_porch = 58,
+ .vsync_len = 6,
+ .vfront_porch = 6,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_ntsc_timings);
+EXPORT_SYMBOL(omap_dss_ntsc_vm);
static struct {
struct platform_device *pdev;
@@ -313,7 +307,7 @@ static struct {
struct clk *tv_dac_clk;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_venc_type type;
bool invert_polarity;
@@ -427,13 +421,12 @@ static void venc_runtime_put(void)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(
- struct omap_video_timings *timings)
+static const struct venc_config *venc_timings_to_config(struct videomode *vm)
{
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return &venc_config_pal_trm;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return &venc_config_ntsc_trm;
BUG();
@@ -451,7 +444,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
goto err0;
venc_reset();
- venc_write_config(venc_timings_to_config(&venc.timings));
+ venc_write_config(venc_timings_to_config(&venc.vm));
dss_set_venc_output(venc.type);
dss_set_dac_pwrdn_bgz(1);
@@ -468,7 +461,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(channel, &venc.timings);
+ dss_mgr_set_timings(channel, &venc.vm);
r = regulator_enable(venc.vdda_dac_reg);
if (r)
@@ -546,17 +539,17 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
}
static void venc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_set_timings\n");
mutex_lock(&venc.venc_lock);
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.timings, timings, sizeof(*timings)))
+ if (memcmp(&venc.vm, vm, sizeof(*vm)))
venc.wss_data = 0;
- venc.timings = *timings;
+ venc.vm = *vm;
dispc_set_tv_pclk(13500000);
@@ -564,25 +557,25 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
static int venc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_check_timings\n");
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return 0;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return 0;
return -EINVAL;
}
static void venc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&venc.venc_lock);
- *timings = venc.timings;
+ *vm = venc.vm;
mutex_unlock(&venc.venc_lock);
}
@@ -602,7 +595,7 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
mutex_lock(&venc.venc_lock);
- config = venc_timings_to_config(&venc.timings);
+ config = venc_timings_to_config(&venc.vm);
/* Invert due to VENC_L21_WC_CTL:INV=1 */
venc.wss_data = (wss ^ 0xfffff) << 8;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 137fe690a0da..2580e8673908 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -42,73 +42,6 @@ bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
return omap_connector->hdmi_mode;
}
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings)
-{
- mode->clock = timings->pixelclock / 1000;
-
- mode->hdisplay = timings->x_res;
- mode->hsync_start = mode->hdisplay + timings->hfp;
- mode->hsync_end = mode->hsync_start + timings->hsw;
- mode->htotal = mode->hsync_end + timings->hbp;
-
- mode->vdisplay = timings->y_res;
- mode->vsync_start = mode->vdisplay + timings->vfp;
- mode->vsync_end = mode->vsync_start + timings->vsw;
- mode->vtotal = mode->vsync_end + timings->vbp;
-
- mode->flags = 0;
-
- if (timings->interlace)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- if (timings->double_pixel)
- mode->flags |= DRM_MODE_FLAG_DBLCLK;
-
- if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NHSYNC;
-
- if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NVSYNC;
-}
-
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode)
-{
- timings->pixelclock = mode->clock * 1000;
-
- timings->x_res = mode->hdisplay;
- timings->hfp = mode->hsync_start - mode->hdisplay;
- timings->hsw = mode->hsync_end - mode->hsync_start;
- timings->hbp = mode->htotal - mode->hsync_end;
-
- timings->y_res = mode->vdisplay;
- timings->vfp = mode->vsync_start - mode->vdisplay;
- timings->vsw = mode->vsync_end - mode->vsync_start;
- timings->vbp = mode->vtotal - mode->vsync_end;
-
- timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
- timings->double_pixel = !!(mode->flags & DRM_MODE_FLAG_DBLCLK);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-}
-
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
@@ -185,11 +118,11 @@ static int omap_connector_get_modes(struct drm_connector *connector)
kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(dev);
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
- dssdrv->get_timings(dssdev, &timings);
+ dssdrv->get_timings(dssdev, &vm);
- copy_timings_omap_to_drm(mode, &timings);
+ drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
@@ -207,12 +140,14 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
struct drm_device *dev = connector->dev;
struct drm_display_mode *new_mode;
int r, ret = MODE_BAD;
- copy_timings_drm_to_omap(&timings, mode);
+ drm_display_mode_to_videomode(mode, &vm);
+ vm.flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
mode->vrefresh = drm_mode_vrefresh(mode);
/*
@@ -221,13 +156,13 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
* panel's timings
*/
if (dssdrv->check_timings) {
- r = dssdrv->check_timings(dssdev, &timings);
+ r = dssdrv->check_timings(dssdev, &vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(&vm, &t, sizeof(struct videomode)))
r = -EINVAL;
else
r = 0;
@@ -236,7 +171,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
if (!r) {
/* check if vrefresh is still valid */
new_mode = drm_mode_duplicate(dev, mode);
- new_mode->clock = timings.pixelclock / 1000;
+ new_mode->clock = vm.pixelclock / 1000;
new_mode->vrefresh = 0;
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
ret = MODE_OK;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 180f644e861e..8dea89030e66 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -34,7 +34,7 @@ struct omap_crtc {
const char *name;
enum omap_channel channel;
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_drm_irq vblank_irq;
struct omap_drm_irq error_irq;
@@ -56,10 +56,10 @@ uint32_t pipe2vbl(struct drm_crtc *crtc)
return dispc_mgr_get_vsync_irq(omap_crtc->channel);
}
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return &omap_crtc->timings;
+ return &omap_crtc->vm;
}
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
@@ -201,7 +201,7 @@ static int omap_crtc_dss_enable(enum omap_channel channel)
dispc_mgr_setup(omap_crtc->channel, &info);
dispc_mgr_set_timings(omap_crtc->channel,
- &omap_crtc->timings);
+ &omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
@@ -215,11 +215,11 @@ static void omap_crtc_dss_disable(enum omap_channel channel)
}
static void omap_crtc_dss_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
DBG("%s", omap_crtc->name);
- omap_crtc->timings = *timings;
+ omap_crtc->vm = *vm;
}
static void omap_crtc_dss_set_lcd_config(enum omap_channel channel,
@@ -369,7 +369,10 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+ drm_display_mode_to_videomode(mode, &omap_crtc->vm);
+ omap_crtc->vm.flags |= DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -411,19 +414,6 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
}
- if (crtc->state->color_mgmt_changed) {
- struct drm_color_lut *lut = NULL;
- uint length = 0;
-
- if (crtc->state->gamma_lut) {
- lut = (struct drm_color_lut *)
- crtc->state->gamma_lut->data;
- length = crtc->state->gamma_lut->length /
- sizeof(*lut);
- }
- dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
- }
-
if (dispc_mgr_is_enabled(omap_crtc->channel)) {
DBG("%s: GO", omap_crtc->name);
@@ -438,13 +428,14 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static bool omap_crtc_is_plane_prop(struct drm_device *dev,
+static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc,
struct drm_property *property)
{
+ struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
return property == priv->zorder_prop ||
- property == dev->mode_config.rotation_property;
+ property == crtc->primary->rotation_property;
}
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
@@ -452,9 +443,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val)
{
- struct drm_device *dev = crtc->dev;
-
- if (omap_crtc_is_plane_prop(dev, property)) {
+ if (omap_crtc_is_plane_prop(crtc, property)) {
struct drm_plane_state *plane_state;
struct drm_plane *plane = crtc->primary;
@@ -479,9 +468,7 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t *val)
{
- struct drm_device *dev = crtc->dev;
-
- if (omap_crtc_is_plane_prop(dev, property)) {
+ if (omap_crtc_is_plane_prop(crtc, property)) {
/*
* Delegate property get to the primary plane. The
* drm_atomic_plane_get_property() function isn't exported, but
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e1cfba51cff6..fdc83cbcde61 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -105,7 +105,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
dispc_runtime_put();
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&priv->commit.lock);
@@ -176,6 +176,7 @@ static int omap_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -266,13 +267,15 @@ cleanup:
}
static int omap_modeset_create_crtc(struct drm_device *dev, int id,
- enum omap_channel channel)
+ enum omap_channel channel,
+ u32 possible_crtcs)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct drm_crtc *crtc;
- plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_PRIMARY);
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_PRIMARY,
+ possible_crtcs);
if (IS_ERR(plane))
return PTR_ERR(plane);
@@ -292,16 +295,6 @@ static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- if (priv->has_dmm) {
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 | DRM_ROTATE_90 |
- DRM_ROTATE_180 | DRM_ROTATE_270 |
- DRM_REFLECT_X | DRM_REFLECT_Y);
- if (!dev->mode_config.rotation_property)
- return -ENOMEM;
- }
-
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
if (!priv->zorder_prop)
return -ENOMEM;
@@ -318,6 +311,7 @@ static int omap_modeset_init(struct drm_device *dev)
int num_crtcs;
int i, id = 0;
int ret;
+ u32 possible_crtcs;
drm_mode_config_init(dev);
@@ -334,6 +328,7 @@ static int omap_modeset_init(struct drm_device *dev)
* We use the num_crtc argument to limit the number of crtcs we create.
*/
num_crtcs = min3(num_crtc, num_mgrs, num_ovls);
+ possible_crtcs = (1 << num_crtcs) - 1;
dssdev = NULL;
@@ -397,7 +392,8 @@ static int omap_modeset_init(struct drm_device *dev)
* allocated crtc, we create a new crtc for it
*/
if (!channel_used(dev, channel)) {
- ret = omap_modeset_create_crtc(dev, id, channel);
+ ret = omap_modeset_create_crtc(dev, id, channel,
+ possible_crtcs);
if (ret < 0) {
dev_err(dev->dev,
"could not create CRTC (channel %u)\n",
@@ -427,7 +423,8 @@ static int omap_modeset_init(struct drm_device *dev)
return -ENOMEM;
}
- ret = omap_modeset_create_crtc(dev, id, i);
+ ret = omap_modeset_create_crtc(dev, id, i,
+ possible_crtcs);
if (ret < 0) {
dev_err(dev->dev,
"could not create CRTC (channel %u)\n", i);
@@ -441,7 +438,8 @@ static int omap_modeset_init(struct drm_device *dev)
for (; id < num_ovls; id++) {
struct drm_plane *plane;
- plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_OVERLAY);
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_OVERLAY,
+ possible_crtcs);
if (IS_ERR(plane))
return PTR_ERR(plane);
@@ -752,22 +750,32 @@ static void dev_lastclose(struct drm_device *dev)
DBG("lastclose: dev=%p", dev);
- if (dev->mode_config.rotation_property) {
- /* need to restore default rotation state.. not sure
- * if there is a cleaner way to restore properties to
- * default state? Maybe a flag that properties should
- * automatically be restored to default state on
- * lastclose?
- */
- for (i = 0; i < priv->num_crtcs; i++) {
- drm_object_property_set_value(&priv->crtcs[i]->base,
- dev->mode_config.rotation_property, 0);
- }
+ /* need to restore default rotation state.. not sure
+ * if there is a cleaner way to restore properties to
+ * default state? Maybe a flag that properties should
+ * automatically be restored to default state on
+ * lastclose?
+ */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ struct drm_crtc *crtc = priv->crtcs[i];
- for (i = 0; i < priv->num_planes; i++) {
- drm_object_property_set_value(&priv->planes[i]->base,
- dev->mode_config.rotation_property, 0);
- }
+ if (!crtc->primary->rotation_property)
+ continue;
+
+ drm_object_property_set_value(&crtc->base,
+ crtc->primary->rotation_property,
+ DRM_ROTATE_0);
+ }
+
+ for (i = 0; i < priv->num_planes; i++) {
+ struct drm_plane *plane = priv->planes[i];
+
+ if (!plane->rotation_property)
+ continue;
+
+ drm_object_property_set_value(&plane->base,
+ plane->rotation_property,
+ DRM_ROTATE_0);
}
if (priv->fbdev) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index dcc30a98b9d4..7d9dd5400cef 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -148,7 +148,7 @@ static inline void omap_fbdev_free(struct drm_device *dev)
}
#endif
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
void omap_crtc_pre_init(void);
void omap_crtc_pre_uninit(void);
@@ -157,7 +157,8 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
int omap_crtc_wait_pending(struct drm_crtc *crtc);
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int id, enum drm_plane_type type);
+ int id, enum drm_plane_type type,
+ u32 possible_crtcs);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
@@ -171,11 +172,6 @@ struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings);
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode);
-
uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 0bbb9c59622e..a20f30039aee 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -102,7 +102,7 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
static int omap_encoder_update(struct drm_encoder *encoder,
enum omap_channel channel,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -111,13 +111,13 @@ static int omap_encoder_update(struct drm_encoder *encoder,
int ret;
if (dssdrv->check_timings) {
- ret = dssdrv->check_timings(dssdev, timings);
+ ret = dssdrv->check_timings(dssdev, vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(vm, &t, sizeof(struct videomode)))
ret = -EINVAL;
else
ret = 0;
@@ -129,7 +129,7 @@ static int omap_encoder_update(struct drm_encoder *encoder,
}
if (dssdrv->set_timings)
- dssdrv->set_timings(dssdev, timings);
+ dssdrv->set_timings(dssdev, vm);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index adb10fbe918d..8d8ac173f55d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -82,6 +82,7 @@ fallback:
static struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
@@ -92,11 +93,7 @@ static struct fb_ops omap_fb_ops = {
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_pan_display = omap_fbdev_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 505dee0db973..4a90c690f09e 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -336,8 +336,10 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
int i, npages = obj->size >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
- dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (omap_obj->addrs[i])
+ dma_unmap_page(obj->dev->dev,
+ omap_obj->addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
}
}
@@ -396,8 +398,7 @@ static int fault_1d(struct drm_gem_object *obj,
pgoff_t pgoff;
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (omap_obj->pages) {
omap_gem_cpu_sync(obj, pgoff);
@@ -407,11 +408,10 @@ static int fault_1d(struct drm_gem_object *obj,
pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
}
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
}
/* Special handling for the case of faulting in 2d tiled buffers */
@@ -425,7 +425,7 @@ static int fault_2d(struct drm_gem_object *obj,
struct page *pages[64]; /* XXX is this too much to have on stack? */
unsigned long pfn;
pgoff_t pgoff, base_pgoff;
- void __user *vaddr;
+ unsigned long vaddr;
int i, ret, slots;
/*
@@ -445,8 +445,7 @@ static int fault_2d(struct drm_gem_object *obj,
const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/*
* Actual address we start mapping at is rounded down to previous slot
@@ -457,7 +456,7 @@ static int fault_2d(struct drm_gem_object *obj,
/* figure out buffer width in slots */
slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
- vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+ vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
@@ -501,12 +500,11 @@ static int fault_2d(struct drm_gem_object *obj,
pfn = entry->paddr >> PAGE_SHIFT;
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
- vm_insert_mixed(vma, (unsigned long)vaddr,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 66ac8c40db26..82b2c23d6769 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -108,16 +108,12 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
win.src_x = state->src_x >> 16;
win.src_y = state->src_y >> 16;
- switch (state->rotation & DRM_ROTATE_MASK) {
- case DRM_ROTATE_90:
- case DRM_ROTATE_270:
+ if (drm_rotation_90_or_270(state->rotation)) {
win.src_w = state->src_h >> 16;
win.src_h = state->src_w >> 16;
- break;
- default:
+ } else {
win.src_w = state->src_w >> 16;
win.src_h = state->src_h >> 16;
- break;
}
/* update scanout: */
@@ -135,7 +131,9 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
/* and finally, update omapdss: */
ret = dispc_ovl_setup(omap_plane->id, &info, false,
omap_crtc_timings(state->crtc), false);
- if (WARN_ON(ret)) {
+ if (ret) {
+ dev_err(plane->dev->dev, "Failed to setup plane %s\n",
+ omap_plane->name);
dispc_ovl_enable(omap_plane->id, false);
return;
}
@@ -161,12 +159,20 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
- if (!state->crtc)
+ if (!state->fb)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ /* crtc should only be NULL when disabling (i.e., !state->fb) */
+ if (WARN_ON(!state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ /* we should have a crtc state if the plane is attached to a crtc */
+ if (WARN_ON(!crtc_state))
+ return 0;
+
+ if (!crtc_state->enable)
+ return 0;
if (state->crtc_x < 0 || state->crtc_y < 0)
return -EINVAL;
@@ -177,11 +183,9 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
- if (state->fb) {
- if (state->rotation != DRM_ROTATE_0 &&
- !omap_framebuffer_supports_rotation(state->fb))
- return -EINVAL;
- }
+ if (state->rotation != DRM_ROTATE_0 &&
+ !omap_framebuffer_supports_rotation(state->fb))
+ return -EINVAL;
return 0;
}
@@ -215,9 +219,17 @@ void omap_plane_install_properties(struct drm_plane *plane,
struct omap_drm_private *priv = dev->dev_private;
if (priv->has_dmm) {
- struct drm_property *prop = dev->mode_config.rotation_property;
-
- drm_object_attach_property(obj, prop, 0);
+ if (!plane->rotation_property)
+ drm_plane_create_rotation_property(plane,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270 |
+ DRM_REFLECT_X | DRM_REFLECT_Y);
+
+ /* Attach the rotation property also to the crtc object */
+ if (plane->rotation_property && obj != &plane->base)
+ drm_object_attach_property(obj, plane->rotation_property,
+ DRM_ROTATE_0);
}
drm_object_attach_property(obj, priv->zorder_prop, 0);
@@ -344,9 +356,9 @@ static const uint32_t error_irqs[] = {
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int id, enum drm_plane_type type)
+ int id, enum drm_plane_type type,
+ u32 possible_crtcs)
{
- struct omap_drm_private *priv = dev->dev_private;
struct drm_plane *plane;
struct omap_plane *omap_plane;
int ret;
@@ -369,7 +381,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
omap_plane->error_irq.irq = omap_plane_error_irq;
omap_irq_register(dev, &omap_plane->error_irq);
- ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1,
+ ret = drm_universal_plane_init(dev, plane, possible_crtcs,
&omap_plane_funcs, omap_plane->formats,
omap_plane->nformats, type, NULL);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 113db3c4a633..06aaf79de8c8 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -120,7 +120,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
mode->type |= DRM_MODE_TYPE_DRIVER;
- if (panel->desc->num_modes == 1)
+ if (panel->desc->num_timings == 1)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
@@ -555,6 +555,91 @@ static const struct panel_desc auo_b133htn01 = {
},
};
+static const struct display_timing auo_g133han01_timings = {
+ .pixelclock = { 134000000, 141200000, 149000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 39, 58, 77 },
+ .hback_porch = { 59, 88, 117 },
+ .hsync_len = { 28, 42, 56 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 3, 8, 11 },
+ .vback_porch = { 5, 14, 19 },
+ .vsync_len = { 4, 14, 19 },
+};
+
+static const struct panel_desc auo_g133han01 = {
+ .timings = &auo_g133han01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .prepare = 200,
+ .enable = 50,
+ .disable = 50,
+ .unprepare = 1000,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+};
+
+static const struct display_timing auo_g185han01_timings = {
+ .pixelclock = { 120000000, 144000000, 175000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 18, 60, 74 },
+ .hback_porch = { 12, 44, 54 },
+ .hsync_len = { 10, 24, 32 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 6, 10, 40 },
+ .vback_porch = { 2, 5, 20 },
+ .vsync_len = { 2, 5, 20 },
+};
+
+static const struct panel_desc auo_g185han01 = {
+ .timings = &auo_g185han01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 409,
+ .height = 230,
+ },
+ .delay = {
+ .prepare = 50,
+ .enable = 200,
+ .disable = 110,
+ .unprepare = 1000,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
+static const struct drm_display_mode auo_t215hvn01_mode = {
+ .clock = 148800,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 88,
+ .hsync_end = 1920 + 88 + 44,
+ .htotal = 1920 + 88 + 44 + 148,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 4,
+ .vsync_end = 1080 + 4 + 5,
+ .vtotal = 1080 + 4 + 5 + 36,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_t215hvn01 = {
+ .modes = &auo_t215hvn01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 430,
+ .height = 270,
+ },
+ .delay = {
+ .disable = 5,
+ .unprepare = 1000,
+ }
+};
+
static const struct drm_display_mode avic_tm070ddh03_mode = {
.clock = 51200,
.hdisplay = 1024,
@@ -583,6 +668,30 @@ static const struct panel_desc avic_tm070ddh03 = {
},
};
+static const struct drm_display_mode chunghwa_claa070wp03xg_mode = {
+ .clock = 66770,
+ .hdisplay = 800,
+ .hsync_start = 800 + 49,
+ .hsync_end = 800 + 49 + 33,
+ .htotal = 800 + 49 + 33 + 17,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 1,
+ .vsync_end = 1280 + 1 + 7,
+ .vtotal = 1280 + 1 + 7 + 15,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc chunghwa_claa070wp03xg = {
+ .modes = &chunghwa_claa070wp03xg_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 94,
+ .height = 150,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.clock = 72070,
.hdisplay = 1366,
@@ -877,27 +986,31 @@ static const struct panel_desc innolux_g101ice_l01 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
-static const struct drm_display_mode innolux_g121i1_l01_mode = {
- .clock = 71000,
- .hdisplay = 1280,
- .hsync_start = 1280 + 64,
- .hsync_end = 1280 + 64 + 32,
- .htotal = 1280 + 64 + 32 + 64,
- .vdisplay = 800,
- .vsync_start = 800 + 9,
- .vsync_end = 800 + 9 + 6,
- .vtotal = 800 + 9 + 6 + 9,
- .vrefresh = 60,
+static const struct display_timing innolux_g121i1_l01_timing = {
+ .pixelclock = { 67450000, 71000000, 74550000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 40, 80, 160 },
+ .hback_porch = { 39, 79, 159 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 11, 100 },
+ .vback_porch = { 4, 11, 99 },
+ .vsync_len = { 1, 1, 1 },
};
static const struct panel_desc innolux_g121i1_l01 = {
- .modes = &innolux_g121i1_l01_mode,
- .num_modes = 1,
+ .timings = &innolux_g121i1_l01_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 261,
.height = 163,
},
+ .delay = {
+ .enable = 200,
+ .disable = 20,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
static const struct drm_display_mode innolux_g121x1_l03_mode = {
@@ -1164,6 +1277,29 @@ static const struct panel_desc nec_nl4827hc19_05b = {
.bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
};
+static const struct drm_display_mode nvd_9128_mode = {
+ .clock = 29500,
+ .hdisplay = 800,
+ .hsync_start = 800 + 130,
+ .hsync_end = 800 + 130 + 98,
+ .htotal = 800 + 0 + 130 + 98,
+ .vdisplay = 480,
+ .vsync_start = 480 + 10,
+ .vsync_end = 480 + 10 + 50,
+ .vtotal = 480 + 0 + 10 + 50,
+};
+
+static const struct panel_desc nvd_9128 = {
+ .modes = &nvd_9128_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 156,
+ .height = 88,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct display_timing okaya_rs800480t_7x0gp_timing = {
.pixelclock = { 30000000, 30000000, 40000000 },
.hactive = { 800, 800, 800 },
@@ -1409,6 +1545,7 @@ static const struct drm_display_mode sharp_lq123p1jx31_mode = {
static const struct panel_desc sharp_lq123p1jx31 = {
.modes = &sharp_lq123p1jx31_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 259,
.height = 173,
@@ -1420,6 +1557,30 @@ static const struct panel_desc sharp_lq123p1jx31 = {
},
};
+static const struct drm_display_mode sharp_lq150x1lg11_mode = {
+ .clock = 71100,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 168,
+ .hsync_end = 1024 + 168 + 64,
+ .htotal = 1024 + 168 + 64 + 88,
+ .vdisplay = 768,
+ .vsync_start = 768 + 37,
+ .vsync_end = 768 + 37 + 2,
+ .vtotal = 768 + 37 + 2 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc sharp_lq150x1lg11 = {
+ .modes = &sharp_lq150x1lg11_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 304,
+ .height = 228,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
+};
+
static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
.clock = 33300,
.hdisplay = 800,
@@ -1575,9 +1736,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
+ .compatible = "auo,g133han01",
+ .data = &auo_g133han01,
+ }, {
+ .compatible = "auo,g185han01",
+ .data = &auo_g185han01,
+ }, {
+ .compatible = "auo,t215hvn01",
+ .data = &auo_t215hvn01,
+ }, {
.compatible = "avic,tm070ddh03",
.data = &avic_tm070ddh03,
}, {
+ .compatible = "chunghwa,claa070wp03xg",
+ .data = &chunghwa_claa070wp03xg,
+ }, {
.compatible = "chunghwa,claa101wa01a",
.data = &chunghwa_claa101wa01a
}, {
@@ -1653,6 +1826,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "nec,nl4827hc19-05b",
.data = &nec_nl4827hc19_05b,
}, {
+ .compatible = "nvd,9128",
+ .data = &nvd_9128,
+ }, {
.compatible = "okaya,rs800480t-7x0gp",
.data = &okaya_rs800480t_7x0gp,
}, {
@@ -1683,6 +1859,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq123p1jx31",
.data = &sharp_lq123p1jx31,
}, {
+ .compatible = "sharp,lq150x1lg11",
+ .data = &sharp_lq150x1lg11,
+ }, {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 04270f5d110c..74fc9362ecf9 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -578,7 +578,7 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
return 0;
}
-int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
{
struct qxl_rect rect;
int ret;
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 6911b8c44492..241af9131dc8 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -123,9 +123,6 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
qdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
- qdev->ddev->control->debugfs_root,
- qdev->ddev->control);
- drm_debugfs_create_files(files, nfiles,
qdev->ddev->primary->debugfs_root,
qdev->ddev->primary);
#endif
@@ -140,9 +137,6 @@ void qxl_debugfs_remove_files(struct qxl_device *qdev)
for (i = 0; i < qdev->debugfs_count; i++) {
drm_debugfs_remove_files(qdev->debugfs[i].files,
qdev->debugfs[i].num_files,
- qdev->ddev->control);
- drm_debugfs_remove_files(qdev->debugfs[i].files,
- qdev->debugfs[i].num_files,
qdev->ddev->primary);
}
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index a61c0d460ec2..4b5eab8a47b3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -36,7 +36,7 @@ static bool qxl_head_enabled(struct qxl_head *head)
return head->width && head->height;
}
-void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
{
if (qdev->client_monitors_config &&
count > qdev->client_monitors_config->count) {
@@ -57,11 +57,18 @@ void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
qdev->client_monitors_config->count = count;
}
+enum {
+ MONITORS_CONFIG_MODIFIED,
+ MONITORS_CONFIG_UNCHANGED,
+ MONITORS_CONFIG_BAD_CRC,
+};
+
static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
{
int i;
int num_monitors;
uint32_t crc;
+ int status = MONITORS_CONFIG_UNCHANGED;
num_monitors = qdev->rom->client_monitors_config.count;
crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
@@ -70,7 +77,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc,
sizeof(qdev->rom->client_monitors_config),
qdev->rom->client_monitors_config_crc);
- return 1;
+ return MONITORS_CONFIG_BAD_CRC;
}
if (num_monitors > qdev->monitors_config->max_allowed) {
DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
@@ -79,6 +86,10 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
} else {
num_monitors = qdev->rom->client_monitors_config.count;
}
+ if (qdev->client_monitors_config
+ && (num_monitors != qdev->client_monitors_config->count)) {
+ status = MONITORS_CONFIG_MODIFIED;
+ }
qxl_alloc_client_monitors_config(qdev, num_monitors);
/* we copy max from the client but it isn't used */
qdev->client_monitors_config->max_allowed =
@@ -88,17 +99,39 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
&qdev->rom->client_monitors_config.heads[i];
struct qxl_head *client_head =
&qdev->client_monitors_config->heads[i];
- client_head->x = c_rect->left;
- client_head->y = c_rect->top;
- client_head->width = c_rect->right - c_rect->left;
- client_head->height = c_rect->bottom - c_rect->top;
- client_head->surface_id = 0;
- client_head->id = i;
- client_head->flags = 0;
+ if (client_head->x != c_rect->left) {
+ client_head->x = c_rect->left;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->y != c_rect->top) {
+ client_head->y = c_rect->top;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->width != c_rect->right - c_rect->left) {
+ client_head->width = c_rect->right - c_rect->left;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->height != c_rect->bottom - c_rect->top) {
+ client_head->height = c_rect->bottom - c_rect->top;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->surface_id != 0) {
+ client_head->surface_id = 0;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->id != i) {
+ client_head->id = i;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
+ if (client_head->flags != 0) {
+ client_head->flags = 0;
+ status = MONITORS_CONFIG_MODIFIED;
+ }
DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
client_head->x, client_head->y);
}
- return 0;
+
+ return status;
}
static void qxl_update_offset_props(struct qxl_device *qdev)
@@ -124,9 +157,18 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
{
struct drm_device *dev = qdev->ddev;
- while (qxl_display_copy_rom_client_monitors_config(qdev)) {
+ int status;
+
+ status = qxl_display_copy_rom_client_monitors_config(qdev);
+ while (status == MONITORS_CONFIG_BAD_CRC) {
qxl_io_log(qdev, "failed crc check for client_monitors_config,"
" retrying\n");
+ status = qxl_display_copy_rom_client_monitors_config(qdev);
+ }
+ if (status == MONITORS_CONFIG_UNCHANGED) {
+ qxl_io_log(qdev, "config unchanged\n");
+ DRM_DEBUG("ignoring unchanged client monitors config");
+ return;
}
drm_modeset_lock_all(dev);
@@ -157,6 +199,9 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
false);
mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->hdisplay = head->width;
+ mode->vdisplay = head->height;
+ drm_mode_set_name(mode);
*pwidth = head->width;
*pheight = head->height;
drm_mode_probed_add(connector, mode);
@@ -607,7 +652,7 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-void
+static void
qxl_send_monitors_config(struct qxl_device *qdev)
{
int i;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 5f3e5ad99de7..785aad42e9bb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,7 +31,7 @@
* Definitions taken from spice-protocol, plus kernel driver specific bits.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -190,7 +190,7 @@ enum {
* spice-protocol/qxl_dev.h */
#define QXL_MAX_RES 96
struct qxl_release {
- struct fence base;
+ struct dma_fence base;
int id;
int type;
@@ -395,16 +395,11 @@ qxl_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj,
const struct drm_framebuffer_funcs *funcs);
void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
-void qxl_send_monitors_config(struct qxl_device *qdev);
int qxl_create_monitors_object(struct qxl_device *qdev);
int qxl_destroy_monitors_object(struct qxl_device *qdev);
-/* used by qxl_debugfs only */
-void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
-void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
-
/* qxl_gem.c */
-int qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_init(struct qxl_device *qdev);
void qxl_gem_fini(struct qxl_device *qdev);
int qxl_gem_object_create(struct qxl_device *qdev, int size,
int alignment, int initial_domain,
@@ -574,6 +569,5 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
struct qxl_drv_surface *
qxl_surface_lookup(struct drm_device *dev, int surface_id);
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
-int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 2cd879a4ae15..fd7e5e94be5b 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -81,16 +81,10 @@ static struct fb_deferred_io qxl_defio = {
static struct fb_ops qxlfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -197,7 +191,7 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
/*
* we are using a shadow draw buffer, at qdev->surface0_shadow
*/
- qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2,
+ qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2,
clips->y1, clips->y2);
image->dx = clips->x1;
image->dy = clips->y1;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index d9746e904ef1..3f185c4da5b7 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -111,10 +111,9 @@ void qxl_gem_object_close(struct drm_gem_object *obj,
{
}
-int qxl_gem_init(struct qxl_device *qdev)
+void qxl_gem_init(struct qxl_device *qdev)
{
INIT_LIST_HEAD(&qdev->gem.objects);
- return 0;
}
void qxl_gem_fini(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e642242728c0..af685f1d91f8 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -131,7 +131,7 @@ static int qxl_device_init(struct qxl_device *qdev,
mutex_init(&qdev->update_area_mutex);
mutex_init(&qdev->release_mutex);
mutex_init(&qdev->surf_evict_mutex);
- INIT_LIST_HEAD(&qdev->gem.objects);
+ qxl_gem_init(qdev);
qdev->rom_base = pci_resource_start(pdev, 2);
qdev->rom_size = pci_resource_len(pdev, 2);
@@ -273,6 +273,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
+ qxl_gem_fini(qdev);
qxl_bo_fini(qdev);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index cd83f050cf3e..e6ec845b5be0 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,7 +21,7 @@
*/
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -40,23 +40,24 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
-static const char *qxl_get_driver_name(struct fence *fence)
+static const char *qxl_get_driver_name(struct dma_fence *fence)
{
return "qxl";
}
-static const char *qxl_get_timeline_name(struct fence *fence)
+static const char *qxl_get_timeline_name(struct dma_fence *fence)
{
return "release";
}
-static bool qxl_nop_signaling(struct fence *fence)
+static bool qxl_nop_signaling(struct dma_fence *fence)
{
/* fences are always automatically signaled, so just pretend we did this.. */
return true;
}
-static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
+static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
{
struct qxl_device *qdev;
struct qxl_release *release;
@@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
retry:
sc++;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
qxl_io_notify_oom(qdev);
@@ -80,11 +81,11 @@ retry:
if (!qxl_queue_garbage_collect(qdev, true))
break;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
}
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
if (have_drawable_releases || sc < 4) {
@@ -96,9 +97,9 @@ retry:
return 0;
if (have_drawable_releases && sc > 300) {
- FENCE_WARN(fence, "failed to wait on release %llu "
- "after spincount %d\n",
- fence->context & ~0xf0000000, sc);
+ DMA_FENCE_WARN(fence, "failed to wait on release %llu "
+ "after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
goto signaled;
}
goto retry;
@@ -115,7 +116,7 @@ signaled:
return end - cur;
}
-static const struct fence_ops qxl_fence_ops = {
+static const struct dma_fence_ops qxl_fence_ops = {
.get_driver_name = qxl_get_driver_name,
.get_timeline_name = qxl_get_timeline_name,
.enable_signaling = qxl_nop_signaling,
@@ -133,7 +134,7 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
release = kmalloc(size, GFP_KERNEL);
if (!release) {
DRM_ERROR("Out of memory\n");
- return 0;
+ return -ENOMEM;
}
release->base.ops = NULL;
release->type = type;
@@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev,
WARN_ON(list_empty(&release->bos));
qxl_release_free_list(release);
- fence_signal(&release->base);
- fence_put(&release->base);
+ dma_fence_signal(&release->base);
+ dma_fence_put(&release->base);
} else {
qxl_release_free_list(release);
kfree(release);
@@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
* Since we never really allocated a context and we don't want to conflict,
* set the highest bits. This will break if we really allow exporting of dma-bufs.
*/
- fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
- release->id | 0xf0000000, release->base.seqno);
- trace_fence_emit(&release->base);
+ dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
+ release->id | 0xf0000000, release->base.seqno);
+ trace_dma_fence_emit(&release->base);
driver = bdev->driver;
glob = bo->glob;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index e26c82db948b..11761330a6b8 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -387,6 +387,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
.invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
.verify_access = &qxl_verify_access,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 74f99bac08b1..05f4ebe31ce2 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1156,7 +1156,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1260,9 +1260,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
@@ -1473,7 +1472,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- char *format_name;
+ struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1563,9 +1562,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- format_name = drm_get_format_name(target_fb->pixel_format);
- DRM_ERROR("Unsupported screen format %s\n", format_name);
- kfree(format_name);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format, &format_name));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 56bb758f4e33..fa4f8f008e4d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,6 +28,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_audio.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index d960d3915408..f8b05090232a 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -27,6 +27,7 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "evergreend.h"
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 103fc8650197..a0d4a0522fdc 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl)
{
- u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
-
- WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+ WREG32(SRBM_GFX_CNTL, RINGID(ring));
WREG32(CP_INT_CNTL, cp_int_cntl);
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b69c8de35bd3..595a19736458 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "r600d.h"
#include "r600_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1b0dcad916b0..44e0c5ed6418 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -66,7 +66,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -367,7 +367,7 @@ struct radeon_fence_driver {
};
struct radeon_fence {
- struct fence base;
+ struct dma_fence base;
struct radeon_device *rdev;
uint64_t seq;
@@ -746,7 +746,7 @@ struct radeon_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct fence *fence;
+ struct dma_fence *fence;
bool async;
};
@@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
*/
-extern const struct fence_ops radeon_fence_ops;
+extern const struct dma_fence_ops radeon_fence_ops;
-static inline struct radeon_fence *to_radeon_fence(struct fence *f)
+static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
{
struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5df3ec73021b..4134759a6823 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -29,6 +29,7 @@
#include "atom.h"
#include "atom-bits.h"
+#include "radeon_asic.h"
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 2fdcd04bc93f..0ae13cd2adda 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -34,6 +34,7 @@ struct radeon_atpx {
static struct radeon_atpx_priv {
bool atpx_detected;
+ bool bridge_pm_usable;
/* handle for device - and atpx */
acpi_handle dhandle;
struct radeon_atpx atpx;
@@ -203,7 +204,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
- atpx->functions.power_cntl = false;
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
atpx->is_hybrid = true;
}
@@ -548,11 +553,16 @@ static bool radeon_atpx_detect(void)
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
+ bool d3_supported = false;
+ struct pci_dev *parent_pdev;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
/* some newer PX laptops mark the dGPU as a non-VGA display device */
@@ -560,6 +570,9 @@ static bool radeon_atpx_detect(void)
vga_count++;
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
}
if (has_atpx && vga_count == 2) {
@@ -567,6 +580,7 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
+ radeon_atpx_priv.bridge_pm_usable = d3_supported;
radeon_atpx_init();
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 38e396dae0a9..c1135feb93c1 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -29,6 +29,7 @@
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
/* 10 khz */
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e18839d52e3e..27affbde058c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->ddc_bus->has_aux) {
+ if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
radeon_connector->ddc_bus->has_aux = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 2a10e24b34b1..fb16070b266e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -90,6 +90,9 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
+ if (radeon_crtc->cursor_out_of_bounds)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(radeon_crtc->cursor_addr));
@@ -143,21 +146,25 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
int xorigin = 0, yorigin = 0;
int w = radeon_crtc->cursor_width;
+ radeon_crtc->cursor_x = x;
+ radeon_crtc->cursor_y = y;
+
if (ASIC_IS_AVIVO(rdev)) {
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
}
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
- if (x < 0) {
+ if (x < 0)
xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
- x = 0;
- }
- if (y < 0) {
+ if (y < 0)
yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
- y = 0;
+
+ if (!ASIC_IS_AVIVO(rdev)) {
+ x += crtc->x;
+ y += crtc->y;
}
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
/* fixed on DCE6 and newer */
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
@@ -180,27 +187,31 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
if (i > 1) {
int cursor_end, frame_end;
- cursor_end = x - xorigin + w;
+ cursor_end = x + w;
frame_end = crtc->x + crtc->mode.crtc_hdisplay;
if (cursor_end >= frame_end) {
w = w - (cursor_end - frame_end);
if (!(frame_end & 0x7f))
w--;
- } else {
- if (!(cursor_end & 0x7f))
- w--;
+ } else if (cursor_end <= 0) {
+ goto out_of_bounds;
+ } else if (!(cursor_end & 0x7f)) {
+ w--;
}
if (w <= 0) {
- w = 1;
- cursor_end = x - xorigin + w;
- if (!(cursor_end & 0x7f)) {
- x--;
- WARN_ON_ONCE(x < 0);
- }
+ goto out_of_bounds;
}
}
}
+ if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
+ x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
+ y >= (crtc->y + crtc->mode.crtc_vdisplay))
+ goto out_of_bounds;
+
+ x += xorigin;
+ y += yorigin;
+
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
@@ -212,6 +223,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else {
+ x -= crtc->x;
+ y -= crtc->y;
+
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
y *= 2;
@@ -229,10 +243,20 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
yorigin * 256);
}
- radeon_crtc->cursor_x = x;
- radeon_crtc->cursor_y = y;
+ if (radeon_crtc->cursor_out_of_bounds) {
+ radeon_crtc->cursor_out_of_bounds = false;
+ if (radeon_crtc->cursor_bo)
+ radeon_show_cursor(crtc);
+ }
return 0;
+
+ out_of_bounds:
+ if (!radeon_crtc->cursor_out_of_bounds) {
+ radeon_hide_cursor(crtc);
+ radeon_crtc->cursor_out_of_bounds = true;
+ }
+ return 0;
}
int radeon_crtc_cursor_move(struct drm_crtc *crtc,
@@ -297,22 +321,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- radeon_crtc->cursor_width = width;
- radeon_crtc->cursor_height = height;
-
radeon_lock_cursor(crtc, true);
- if (hot_x != radeon_crtc->cursor_hot_x ||
+ if (width != radeon_crtc->cursor_width ||
+ height != radeon_crtc->cursor_height ||
+ hot_x != radeon_crtc->cursor_hot_x ||
hot_y != radeon_crtc->cursor_hot_y) {
int x, y;
x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
- radeon_cursor_move_locked(crtc, x, y);
-
+ radeon_crtc->cursor_width = width;
+ radeon_crtc->cursor_height = height;
radeon_crtc->cursor_hot_x = hot_x;
radeon_crtc->cursor_hot_y = hot_y;
+
+ radeon_cursor_move_locked(crtc, x, y);
}
radeon_show_cursor(crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index eb92aef46e3c..8a1df2a1afbd 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
+#endif
+
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
rdev->flags &= ~RADEON_IS_PX;
+
+ /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
+ if (!radeon_is_atpx_hybrid() &&
+ !radeon_has_atpx_dgpu_power_cntl())
+ rdev->flags &= ~RADEON_IS_PX;
}
/**
@@ -1320,7 +1333,7 @@ int radeon_device_init(struct radeon_device *rdev,
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
- rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
+ rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -1651,7 +1664,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
radeon_bo_evict_vram(rdev);
radeon_agp_suspend(rdev);
@@ -1933,9 +1949,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
rdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
- rdev->ddev->control->debugfs_root,
- rdev->ddev->control);
- drm_debugfs_create_files(files, nfiles,
rdev->ddev->primary->debugfs_root,
rdev->ddev->primary);
#endif
@@ -1950,9 +1963,6 @@ static void radeon_debugfs_remove_files(struct radeon_device *rdev)
for (i = 0; i < rdev->debugfs_count; i++) {
drm_debugfs_remove_files(rdev->debugfs[i].files,
rdev->debugfs[i].num_files,
- rdev->ddev->control);
- drm_debugfs_remove_files(rdev->debugfs[i].files,
- rdev->debugfs[i].num_files,
rdev->ddev->primary);
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index cdb8cb568c15..e7409e8a9f87 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
down_read(&rdev->exclusive_lock);
}
} else
- r = fence_wait(work->fence, false);
+ r = dma_fence_wait(work->fence, false);
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
@@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* confused about which BO the CRTC is scanning out
*/
- fence_put(work->fence);
+ dma_fence_put(work->fence);
work->fence = NULL;
}
@@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -617,7 +617,7 @@ pflip_cleanup:
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- fence_put(work->fence);
+ dma_fence_put(work->fence);
kfree(work);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 2d465648856a..474a8a1886f7 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd);
- tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+ tmp |= AUX_EN | AUX_LS_READ_EN;
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index de504ea29c06..6d1237d6e1b8 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -223,7 +223,8 @@ radeon_dp_mst_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+static struct
+drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -341,7 +342,8 @@ const struct drm_dp_mst_topology_cbs mst_cbs = {
.hotplug = radeon_dp_mst_hotplug,
};
-struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+static struct
+radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
@@ -597,7 +599,7 @@ static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
.commit = radeon_mst_encoder_commit,
};
-void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0daad446d2c7..899b6a1644bd 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -74,28 +74,22 @@ radeonfb_release(struct fb_info *info, int user)
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = radeonfb_open,
.fb_release = radeonfb_release,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
-int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
int pitch_mask = 0;
- switch (bpp / 8) {
+ switch (cpp) {
case 1:
pitch_mask = align_large ? 255 : 127;
break;
@@ -110,7 +104,7 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
- return aligned;
+ return aligned * cpp;
}
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,13 +133,13 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- u32 bpp, depth;
+ u32 cpp;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
- fb_tiled) * ((bpp + 1) / 8);
+ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, cpp,
+ fb_tiled);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
@@ -165,11 +159,11 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (bpp) {
- case 32:
+ switch (cpp) {
+ case 4:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
- case 16:
+ case 2:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
default:
break;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ef075acde9c..ef09f0a63754 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev,
(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
(*fence)->is_vm_update = false;
- fence_init(&(*fence)->base, &radeon_fence_ops,
- &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
+ dma_fence_init(&(*fence)->base, &radeon_fence_ops,
+ &rdev->fence_queue.lock,
+ rdev->fence_context + ring,
+ seq);
radeon_fence_ring_emit(rdev, ring, *fence);
trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
radeon_fence_schedule_check(rdev, ring);
@@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
- int ret = fence_signal_locked(&fence->base);
+ int ret = dma_fence_signal_locked(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
else
- FENCE_TRACE(&fence->base, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
- FENCE_TRACE(&fence->base, "pending\n");
+ DMA_FENCE_TRACE(&fence->base, "pending\n");
return 0;
}
@@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
return false;
}
-static bool radeon_fence_is_signaled(struct fence *f)
+static bool radeon_fence_is_signaled(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool radeon_fence_enable_signaling(struct fence *f)
+static bool radeon_fence_enable_signaling(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f)
fence->fence_wake.private = NULL;
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
- fence_get(f);
+ dma_fence_get(f);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
@@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
int ret;
- ret = fence_signal(&fence->base);
+ ret = dma_fence_signal(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
return true;
}
return false;
@@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
* exclusive_lock is not held in that case.
*/
if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
- return fence_wait(&fence->base, intr);
+ return dma_fence_wait(&fence->base, intr);
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
@@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
return r;
}
- r_sig = fence_signal(&fence->base);
+ r_sig = dma_fence_signal(&fence->base);
if (!r_sig)
- FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
return r;
}
@@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
@@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
*fence = NULL;
if (tmp) {
- fence_put(&tmp->base);
+ dma_fence_put(&tmp->base);
}
}
@@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev)
#endif
}
-static const char *radeon_fence_get_driver_name(struct fence *fence)
+static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
{
return "radeon";
}
-static const char *radeon_fence_get_timeline_name(struct fence *f)
+static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
switch (fence->ring) {
@@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f)
static inline bool radeon_test_signaled(struct radeon_fence *fence)
{
- return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
struct radeon_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct radeon_wait_cb *wait =
container_of(cb, struct radeon_wait_cb, base);
@@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
wake_up_process(wait->task);
}
-static signed long radeon_fence_default_wait(struct fence *f, bool intr,
+static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
signed long t)
{
struct radeon_fence *fence = to_radeon_fence(f);
@@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
cb.task = current;
- if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+ if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
return t;
while (t > 0) {
@@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
}
__set_current_state(TASK_RUNNING);
- fence_remove_callback(f, &cb.base);
+ dma_fence_remove_callback(f, &cb.base);
return t;
}
-const struct fence_ops radeon_fence_ops = {
+const struct dma_fence_ops radeon_fence_ops = {
.get_driver_name = radeon_fence_get_driver_name,
.get_timeline_name = radeon_fence_get_timeline_name,
.enable_signaling = radeon_fence_enable_signaling,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index deb9511725c9..0bcffd8a7bd3 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -745,7 +745,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
- args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->pitch = radeon_align_pitch(rdev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 868c3ba2efaa..222a1fa41d7c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -27,6 +27,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
#ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index bb75201a24ba..f1da484864a9 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -330,6 +330,7 @@ struct radeon_crtc {
u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
+ bool cursor_out_of_bounds;
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4b6542538ff9..326ad068c15a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,6 +47,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
+static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
@@ -79,6 +80,8 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
+ /* allow new DPM state to be picked */
+ radeon_pm_compute_clocks_dpm(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
@@ -882,7 +885,8 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ dpm_state = rdev->pm.dpm.ac_power ?
+ POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
restart_search:
/* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 02ac8a1de4ff..be5d7a38d3aa 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
bool shared)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
struct radeon_fence *fence;
unsigned i;
int r = 0;
@@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
flist = reservation_object_get_list(resv);
if (shared || !flist || r)
@@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
if (r)
break;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3de5e6e21662..0cf03ccbf0a7 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -863,6 +863,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 0cd0e7bdee55..d34d1cf33895 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
- struct fence *f;
+ struct dma_fence *f;
void *ptr;
int i, r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e402be8821c4..ad4d7b8b8322 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1714,6 +1714,7 @@ static int si_init_microcode(struct radeon_device *rdev)
(rdev->pdev->revision == 0x80) ||
(rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
(rdev->pdev->device == 0x6604) ||
(rdev->pdev->device == 0x6605))
new_smc = true;
@@ -7858,7 +7859,7 @@ static void si_program_aspm(struct radeon_device *rdev)
}
}
-int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
+static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
{
unsigned i;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 89bdf20344ae..8b5e697f2549 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2999,6 +2999,50 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
+ /* limit all SI kickers */
+ if (rdev->family == CHIP_PITCAIRN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811) ||
+ (rdev->pdev->device == 0x6816) ||
+ (rdev->pdev->device == 0x6817) ||
+ (rdev->pdev->device == 0x6806))
+ max_mclk = 120000;
+ } else if (rdev->family == CHIP_VERDE) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6820) ||
+ (rdev->pdev->device == 0x6821) ||
+ (rdev->pdev->device == 0x6822) ||
+ (rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682A) ||
+ (rdev->pdev->device == 0x682B)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_OLAND) {
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_HAINAN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6664) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ }
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
@@ -3011,16 +3055,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
- /* limit mclk on all R7 370 parts for stability */
- if (rdev->pdev->device == 0x6811 &&
- rdev->pdev->revision == 0x81)
- max_mclk = 120000;
- /* limit sclk/mclk on Jet parts for stability */
- if (rdev->pdev->device == 0x6665 &&
- rdev->pdev->revision == 0xc3) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 7316fc7fa0bd..a2ec6d8796a0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -149,8 +149,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
+ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
+ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
| DSMR_DIPM_DISP | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, value);
@@ -172,7 +172,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
mode->crtc_vsync_start - 1);
rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
+ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 73c971e39b1c..c05e00872778 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -110,6 +110,27 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
.num_lvds = 1,
};
+static const struct rcar_du_device_info rcar_du_r8a7792_info = {
+ .gen = 2,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ .num_crtcs = 2,
+ .routes = {
+ /* R8A7792 has two RGB outputs. */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 1,
+ },
+ },
+ .num_lvds = 0,
+};
+
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -157,13 +178,39 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
.num_lvds = 1,
};
+static const struct rcar_du_device_info rcar_du_r8a7796_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_VSP1_SOURCE,
+ .num_crtcs = 3,
+ .routes = {
+ /* R8A7796 has one RGB output, one LVDS output and one
+ * (currently unsupported) HDMI output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
+ { .compatible = "renesas,du-r8a7792", .data = &rcar_du_r8a7792_info },
{ .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
{ .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
+ { .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
{ }
};
@@ -201,9 +248,7 @@ static const struct file_operations rcar_du_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -285,7 +330,6 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
- drm_vblank_cleanup(ddev);
drm_dev_unref(ddev);
@@ -294,18 +338,12 @@ static int rcar_du_remove(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct rcar_du_device *rcdu;
struct drm_device *ddev;
struct resource *mem;
int ret;
- if (np == NULL) {
- dev_err(&pdev->dev, "no device tree node\n");
- return -ENODEV;
- }
-
- /* Allocate and initialize the DRM and R-Car device structures. */
+ /* Allocate and initialize the R-Car device structure. */
rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
if (rcdu == NULL)
return -ENOMEM;
@@ -315,31 +353,22 @@ static int rcar_du_probe(struct platform_device *pdev)
rcdu->dev = &pdev->dev;
rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
- ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- rcdu->ddev = ddev;
- ddev->dev_private = rcdu;
-
platform_set_drvdata(pdev, rcdu);
/* I/O resources */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(rcdu->mmio)) {
- ret = PTR_ERR(rcdu->mmio);
- goto error;
- }
-
- /* Initialize vertical blanking interrupts handling. Start with vblank
- * disabled for all CRTCs.
- */
- ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
- if (ret < 0)
- goto error;
+ if (IS_ERR(rcdu->mmio))
+ return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
+ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ rcdu->ddev = ddev;
+ ddev->dev_private = rcdu;
+
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 33b2fc53da3e..64738fca96d0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -105,16 +105,20 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) {
rcar_du_group_setup_defr8(rgrp);
- /* Configure input dot clock routing. We currently hardcode the
- * configuration to routing DOTCLKINn to DUn.
+ /*
+ * Configure input dot clock routing. We currently hardcode the
+ * configuration to routing DOTCLKINn to DUn. Register fields
+ * depend on the DU generation, but the resulting value is 0 in
+ * all cases.
+ *
+ * On Gen2 a single register in the first group controls dot
+ * clock selection for all channels, while on Gen3 dot clocks
+ * are setup through per-group registers, only available when
+ * the group has two channels.
*/
- rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE |
- DIDSR_LCDS_DCLKIN(2) |
- DIDSR_LCDS_DCLKIN(1) |
- DIDSR_LCDS_DCLKIN(0) |
- DIDSR_PDCS_CLK(2, 0) |
- DIDSR_PDCS_CLK(1, 0) |
- DIDSR_PDCS_CLK(0, 0));
+ if ((rcdu->info->gen < 3 && rgrp->index == 0) ||
+ (rcdu->info->gen == 3 && rgrp->num_crtcs > 1))
+ rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE);
}
if (rcdu->info->gen >= 3)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index e03004f4588d..f9515f53cc5b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -108,7 +108,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
if (hdmienc == NULL)
return -ENOMEM;
- /* Locate drm bridge from the hdmi encoder DT node */
+ /* Locate the DRM bridge from the HDMI encoder DT node. */
bridge = of_drm_find_bridge(np);
if (!bridge)
return -EPROBE_DEFER;
@@ -123,7 +123,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
renc->hdmi = hdmienc;
hdmienc->renc = renc;
- /* Link drm_bridge to encoder */
+ /* Link the bridge to the encoder. */
bridge->encoder = encoder;
encoder->bridge = bridge;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index bd9c3bb9252c..b5d3f16cfa12 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -231,8 +231,16 @@ static int rcar_du_atomic_check(struct drm_device *dev,
struct rcar_du_device *rcdu = dev->dev_private;
int ret;
- ret = drm_atomic_helper_check(dev, state);
- if (ret < 0)
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
return ret;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
@@ -264,7 +272,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&rcdu->commit.wait.lock);
@@ -330,6 +338,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -445,13 +454,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
}
ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
- of_node_put(encoder);
- of_node_put(connector);
-
if (ret && ret != -EPROBE_DEFER)
dev_warn(rcdu->dev,
- "failed to initialize encoder %s (%d), skipping\n",
- encoder->full_name, ret);
+ "failed to initialize encoder %s on output %u (%d), skipping\n",
+ of_node_full_name(encoder), output, ret);
+
+ of_node_put(encoder);
+ of_node_put(connector);
return ret;
}
@@ -559,6 +568,13 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
+ /* Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
+ if (ret < 0)
+ return ret;
+
/* Initialize the groups. */
num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 6afd0af312ba..3bcfd161c53f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -61,16 +61,9 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_lvds_connector_get_modes,
};
-static enum drm_connector_status
-rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .detect = rcar_du_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -79,7 +72,7 @@ static const struct drm_connector_funcs connector_funcs = {
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- /* TODO const */ struct device_node *np)
+ const struct device_node *np)
{
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
struct rcar_du_lvds_connector *lvdscon;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index d4881ee0be7e..639071dd235c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -19,6 +19,6 @@ struct rcar_du_encoder;
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- struct device_node *np);
+ const struct device_node *np);
#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index ef3a50321ecc..e3a4985f6f3f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -104,7 +104,14 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
rcar_lvds_write(lvds, LVDPLLCR, pllcr);
- /* Turn the PLL on, set it to LVDS normal mode, wait for the startup
+ /* Turn all the channels on. */
+ rcar_lvds_write(lvds, LVDCR1,
+ LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
+ LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
+ LVDCR1_CLKSTBY_GEN3);
+
+ /*
+ * Turn the PLL on, set it to LVDS normal mode, wait for the startup
* delay and turn the output on.
*/
lvdcr0 = LVDCR0_PLLON;
@@ -117,12 +124,6 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
lvdcr0 |= LVDCR0_LVRES;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- /* Turn all the channels on. */
- rcar_lvds_write(lvds, LVDCR1,
- LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
- LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
- LVDCR1_CLKSTBY_GEN3);
}
static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
@@ -241,10 +242,8 @@ int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
for (i = 0; i < rcdu->info->num_lvds; ++i) {
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (lvds == NULL) {
- dev_err(&pdev->dev, "failed to allocate private data\n");
+ if (lvds == NULL)
return -ENOMEM;
- }
lvds->dev = rcdu;
lvds->index = i;
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 3c58669a06ce..6f7f9c59f05b 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -1,7 +1,6 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
- depends on RESET_CONTROLLER
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index ca22e5ee89ca..d9aa382bb629 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -969,12 +969,6 @@ static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
.mode_valid = dw_mipi_dsi_mode_valid,
};
-static enum drm_connector_status
-dw_mipi_dsi_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void dw_mipi_dsi_drm_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
@@ -984,7 +978,6 @@ static void dw_mipi_dsi_drm_connector_destroy(struct drm_connector *connector)
static struct drm_connector_funcs dw_mipi_dsi_atomic_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = dw_mipi_dsi_detect,
.destroy = dw_mipi_dsi_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 8c8cbe837e61..2390c8577617 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
@@ -274,9 +275,7 @@ static const struct file_operations rockchip_drm_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
@@ -388,7 +387,7 @@ static void rockchip_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
@@ -437,7 +436,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
}
of_node_put(iommu);
- component_match_add(dev, &match, compare_of, port->parent);
+ drm_of_component_match_add(dev, &match, compare_of,
+ port->parent);
of_node_put(port);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index a16c69f96ed5..8f639c8597a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,15 +37,11 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
static struct fb_ops rockchip_drm_fbdev_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = rockchip_fbdev_mmap,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 3b807135a5cd..78c6d8e9b42c 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,9 +42,7 @@ static const struct file_operations savage_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 3dc0d8ff95ec..2db89bed52e8 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1004,6 +1004,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
if (IS_ERR(kvb_addr)) {
ret = PTR_ERR(kvb_addr);
+ kvb_addr = NULL;
goto done;
}
cmdbuf->vb_addr = kvb_addr;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 6547b1db460a..dddbdd62bed0 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -669,15 +669,8 @@ static void shmob_drm_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static enum drm_connector_status
-shmob_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = shmob_drm_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = shmob_drm_connector_destroy,
};
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index f0492603ea88..38dd55f4af81 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -245,9 +245,7 @@ static const struct file_operations shmob_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index ae9839886c4d..a836451920f0 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -72,9 +72,7 @@ static const struct file_operations sis_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 2784919a7366..ff71e25ab5bf 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_of.h>
#include "sti_crtc.h"
#include "sti_drv.h"
@@ -184,7 +185,7 @@ static void sti_atomic_complete(struct sti_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void sti_atomic_work(struct work_struct *work)
@@ -195,6 +196,26 @@ static void sti_atomic_work(struct work_struct *work)
sti_atomic_complete(private, private->commit.state);
}
+static int sti_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int sti_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state, bool nonblock)
{
@@ -217,6 +238,7 @@ static int sti_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
sti_atomic_schedule(private, state);
else
@@ -248,7 +270,7 @@ static void sti_output_poll_changed(struct drm_device *ddev)
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = sti_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = sti_atomic_check,
.atomic_commit = sti_atomic_commit,
};
@@ -275,9 +297,7 @@ static const struct file_operations sti_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.release = drm_release,
};
@@ -423,8 +443,8 @@ static int sti_platform_probe(struct platform_device *pdev)
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
- component_match_add(dev, &match, compare_of, child_np);
- of_node_put(child_np);
+ drm_of_component_match_add(dev, &match, compare_of,
+ child_np);
child_np = of_get_next_available_child(node, child_np);
}
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index e7c243f70870..96f336dd0e29 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -642,12 +642,6 @@ struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
.mode_valid = sti_hda_connector_mode_valid,
};
-static enum drm_connector_status
-sti_hda_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static int sti_hda_late_register(struct drm_connector *connector)
{
struct sti_hda_connector *hda_connector
@@ -665,7 +659,6 @@ static int sti_hda_late_register(struct drm_connector *connector)
static const struct drm_connector_funcs sti_hda_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = sti_hda_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 32c0584e3c35..2e08f969bb64 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -95,6 +95,22 @@ static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
break;
+ case DRM_FORMAT_ARGB4444:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB1555:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
+ break;
+
+ case DRM_FORMAT_RGBA5551:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
+ break;
+
+ case DRM_FORMAT_RGBA4444:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
+ break;
+
case DRM_FORMAT_XRGB8888:
*mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
break;
@@ -103,6 +119,10 @@ static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
*mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
break;
+ case DRM_FORMAT_RGB565:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
+ break;
+
default:
return -EINVAL;
}
@@ -389,7 +409,7 @@ static void sun4i_backend_unbind(struct device *dev, struct device *master,
reset_control_assert(backend->reset);
}
-static struct component_ops sun4i_backend_ops = {
+static const struct component_ops sun4i_backend_ops = {
.bind = sun4i_backend_bind,
.unbind = sun4i_backend_unbind,
};
@@ -408,6 +428,7 @@ static int sun4i_backend_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_backend_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-backend" },
+ { .compatible = "allwinner,sun6i-a31-display-backend" },
{ .compatible = "allwinner,sun8i-a33-display-backend" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 0da9862ad8ed..4ce665349f6b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_of.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -52,9 +53,7 @@ static const struct file_operations sun4i_drv_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -142,9 +141,9 @@ static int sun4i_drv_bind(struct device *dev)
/* Create our layers */
drv->layers = sun4i_layers_init(drm);
- if (!drv->layers) {
+ if (IS_ERR(drv->layers)) {
dev_err(drm->dev, "Couldn't create the planes\n");
- ret = -EINVAL;
+ ret = PTR_ERR(drv->layers);
goto free_drm;
}
@@ -201,12 +200,15 @@ static const struct component_master_ops sun4i_drv_master_ops = {
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-tcon");
}
@@ -239,7 +241,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
/* Add current component */
DRM_DEBUG_DRIVER("Adding component %s\n",
of_node_full_name(node));
- component_match_add(dev, match, compare_of, node);
+ drm_of_component_match_add(dev, match, compare_of, node);
count++;
}
@@ -322,6 +324,8 @@ static int sun4i_drv_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-engine" },
+ { .compatible = "allwinner,sun6i-a31-display-engine" },
+ { .compatible = "allwinner,sun6i-a31s-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index f0035bf5efea..5d53c977bca5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -73,12 +73,18 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
static const uint32_t sun4i_backend_layer_formats_primary[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
static const uint32_t sun4i_backend_layer_formats_overlay[] = {
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index c3ff10f559cc..f5e86fe7750e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -110,12 +110,6 @@ static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
.mode_valid = sun4i_rgb_mode_valid,
};
-static enum drm_connector_status
-sun4i_rgb_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void
sun4i_rgb_connector_destroy(struct drm_connector *connector)
{
@@ -129,7 +123,6 @@ sun4i_rgb_connector_destroy(struct drm_connector *connector)
static struct drm_connector_funcs sun4i_rgb_con_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = sun4i_rgb_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_rgb_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
@@ -152,15 +145,13 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (!IS_ERR(tcon->panel))
drm_panel_prepare(tcon->panel);
- drm_panel_enable(tcon->panel);
- }
-
- /* encoder->bridge can be NULL; drm_bridge_enable checks for it */
- drm_bridge_enable(encoder->bridge);
sun4i_tcon_channel_enable(tcon, 0);
+
+ if (!IS_ERR(tcon->panel))
+ drm_panel_enable(tcon->panel);
}
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
@@ -171,15 +162,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- sun4i_tcon_channel_disable(tcon, 0);
+ if (!IS_ERR(tcon->panel))
+ drm_panel_disable(tcon->panel);
- /* encoder->bridge can be NULL; drm_bridge_disable checks for it */
- drm_bridge_disable(encoder->bridge);
+ sun4i_tcon_channel_disable(tcon, 0);
- if (!IS_ERR(tcon->panel)) {
- drm_panel_disable(tcon->panel);
+ if (!IS_ERR(tcon->panel))
drm_panel_unprepare(tcon->panel);
- }
}
static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index cadacb517f95..ea2906f87cb9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -20,6 +20,7 @@
#include <linux/component.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
@@ -62,7 +63,7 @@ void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE, 0);
clk_disable_unprepare(tcon->sclk1);
@@ -80,7 +81,7 @@ void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE,
SUN4I_TCON1_CTL_TCON_ENABLE);
@@ -202,7 +203,7 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
u8 clk_delay;
u32 val;
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
@@ -266,7 +267,7 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
/*
* FIXME: Undocumented bits
*/
- if (tcon->has_mux)
+ if (tcon->quirks->has_unknown_mux)
regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, 1);
}
EXPORT_SYMBOL(sun4i_tcon1_mode_set);
@@ -327,7 +328,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
return PTR_ERR(tcon->sclk0);
}
- if (tcon->has_channel_1) {
+ if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
if (IS_ERR(tcon->sclk1)) {
dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
@@ -487,14 +488,7 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
drv->tcon = tcon;
tcon->drm = drm;
tcon->dev = dev;
-
- if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon")) {
- tcon->has_mux = true;
- tcon->has_channel_1 = true;
- } else {
- tcon->has_mux = false;
- tcon->has_channel_1 = false;
- }
+ tcon->quirks = of_device_get_match_data(dev);
tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
if (IS_ERR(tcon->lcd_rst)) {
@@ -551,7 +545,7 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
sun4i_tcon_free_clocks(tcon);
}
-static struct component_ops sun4i_tcon_ops = {
+static const struct component_ops sun4i_tcon_ops = {
.bind = sun4i_tcon_bind,
.unbind = sun4i_tcon_unbind,
};
@@ -588,9 +582,28 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
+ .has_unknown_mux = true,
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
+ /* nothing is supported */
+};
+
static const struct of_device_id sun4i_tcon_of_table[] = {
- { .compatible = "allwinner,sun5i-a13-tcon" },
- { .compatible = "allwinner,sun8i-a33-tcon" },
+ { .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
+ { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
+ { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
+ { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 12bd48925f4d..166064bafe2e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -142,6 +142,11 @@
#define SUN4I_TCON_MAX_CHANNELS 2
+struct sun4i_tcon_quirks {
+ bool has_unknown_mux; /* sun5i has undocumented mux */
+ bool has_channel_1; /* a33 does not have channel 1 */
+};
+
struct sun4i_tcon {
struct device *dev;
struct drm_device *drm;
@@ -160,12 +165,10 @@ struct sun4i_tcon {
/* Reset control */
struct reset_control *lcd_rst;
- /* Platform adjustments */
- bool has_mux;
-
struct drm_panel *panel;
- bool has_channel_1;
+ /* Platform adjustments */
+ const struct sun4i_tcon_quirks *quirks;
};
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 1dd3d9eabf2e..c6f47222e8fc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -537,12 +537,6 @@ static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs =
.mode_valid = sun4i_tv_comp_mode_valid,
};
-static enum drm_connector_status
-sun4i_tv_comp_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
static void
sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
{
@@ -551,7 +545,6 @@ sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
static struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .detect = sun4i_tv_comp_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_tv_comp_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
@@ -667,7 +660,7 @@ static void sun4i_tv_unbind(struct device *dev, struct device *master,
clk_disable_unprepare(tv->clk);
}
-static struct component_ops sun4i_tv_ops = {
+static const struct component_ops sun4i_tv_ops = {
.bind = sun4i_tv_bind,
.unbind = sun4i_tv_unbind,
};
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index bf6d846d8132..09bba853e2a4 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -80,7 +80,7 @@ static void sun6i_drc_unbind(struct device *dev, struct device *master,
reset_control_assert(drc->reset);
}
-static struct component_ops sun6i_drc_ops = {
+static const struct component_ops sun6i_drc_ops = {
.bind = sun6i_drc_bind,
.unbind = sun6i_drc_unbind,
};
@@ -98,6 +98,8 @@ static int sun6i_drc_remove(struct platform_device *pdev)
}
static const struct of_device_id sun6i_drc_of_table[] = {
+ { .compatible = "allwinner,sun6i-a31-drc" },
+ { .compatible = "allwinner,sun6i-a31s-drc" },
{ .compatible = "allwinner,sun8i-a33-drc" },
{ }
};
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index f418892b0c71..c54138c3a376 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -49,9 +49,7 @@ static const struct file_operations tdfx_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 63ebb154b9b5..bbf5a4b7e0b6 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -3,7 +3,6 @@ config DRM_TEGRA
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
depends on COMMON_CLK
depends on DRM
- depends on RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 059f409556d5..2fde44c3a1b3 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -539,9 +539,9 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->desc.owner = THIS_MODULE;
dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
- if (!dpaux->pinctrl) {
+ if (IS_ERR(dpaux->pinctrl)) {
dev_err(&pdev->dev, "failed to register pincontrol\n");
- return -ENODEV;
+ return PTR_ERR(dpaux->pinctrl);
}
#endif
/* enable and clear all interrupts */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8ab47b502d83..b8be3ee4d3b8 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -63,7 +63,7 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void tegra_atomic_work(struct work_struct *work)
@@ -96,6 +96,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
tegra_atomic_schedule(tegra, state);
else
@@ -801,9 +802,7 @@ static const struct file_operations tegra_drm_fops = {
.mmap = tegra_drm_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e6d71fa4028e..e4a5ab0a9677 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -186,14 +186,10 @@ unreference:
#ifdef CONFIG_DRM_FBDEV_EMULATION
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_blank = drm_fb_helper_blank,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_setcmap = drm_fb_helper_setcmap,
};
static int tegra_fbdev_probe(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 95e622e31931..7d853e6b5ff0 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -2,7 +2,7 @@
* NVIDIA Tegra DRM GEM helper functions
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
+ * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
*
* Based on the GEM/CMA helpers
*
@@ -36,6 +36,8 @@ static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ *sgt = obj->sgt;
+
return obj->paddr;
}
@@ -47,23 +49,51 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- return obj->vaddr;
+ if (obj->vaddr)
+ return obj->vaddr;
+ else if (obj->gem.import_attach)
+ return dma_buf_vmap(obj->gem.import_attach->dmabuf);
+ else
+ return vmap(obj->pages, obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
}
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+
+ if (obj->vaddr)
+ return;
+ else if (obj->gem.import_attach)
+ dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
+ else
+ vunmap(addr);
}
static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- return obj->vaddr + page * PAGE_SIZE;
+ if (obj->vaddr)
+ return obj->vaddr + page * PAGE_SIZE;
+ else if (obj->gem.import_attach)
+ return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
+ else
+ return vmap(obj->pages + page, 1, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
}
static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
void *addr)
{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+
+ if (obj->vaddr)
+ return;
+ else if (obj->gem.import_attach)
+ dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
+ else
+ vunmap(addr);
}
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
@@ -318,11 +348,6 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
get_dma_buf(buf);
bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
- if (!bo->sgt) {
- err = -ENOMEM;
- goto detach;
- }
-
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto detach;
@@ -427,10 +452,10 @@ static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!bo->pages)
return VM_FAULT_SIGBUS;
- offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
+ offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
page = bo->pages[offset];
- err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ err = vm_insert_page(vma, vmf->address, page);
switch (err) {
case -EAGAIN:
case 0:
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 0b3f2b977ba0..13f0d1b7cd98 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -268,9 +268,9 @@ static int gr3d_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
- if (IS_ERR(gr3d->clk)) {
+ if (IS_ERR(gr3d->clk_secondary)) {
dev_err(&pdev->dev, "cannot get secondary clock\n");
- return PTR_ERR(gr3d->clk);
+ return PTR_ERR(gr3d->clk_secondary);
}
gr3d->rst_secondary = devm_reset_control_get(&pdev->dev,
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 74d0540b8d4c..a8f528925009 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -349,8 +349,6 @@ static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
brick->hw.init = &init;
clk = devm_clk_register(sor->dev, &brick->hw);
- if (IS_ERR(clk))
- kfree(brick);
return clk;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 52ebe8fc1784..9942b0577d6e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -21,11 +21,15 @@
#include <drm/drm_flip_work.h>
#include <drm/drm_plane_helper.h>
#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
-#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
+#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
+#define TILCDC_PALETTE_SIZE 32
+#define TILCDC_PALETTE_FIRST_ENTRY 0x4000
struct tilcdc_crtc {
struct drm_crtc base;
@@ -33,7 +37,9 @@ struct tilcdc_crtc {
struct drm_plane primary;
const struct tilcdc_panel_info *info;
struct drm_pending_vblank_event *event;
+ struct mutex enable_lock;
bool enabled;
+ bool shutdown;
wait_queue_head_t frame_done_wq;
bool frame_done;
spinlock_t irq_lock;
@@ -53,6 +59,11 @@ struct tilcdc_crtc {
int sync_lost_count;
bool frame_intact;
+ struct work_struct recover_work;
+
+ dma_addr_t palette_dma_handle;
+ u16 *palette_base;
+ struct completion palette_loaded;
};
#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
@@ -71,17 +82,16 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_gem_cma_object *gem;
- unsigned int depth, bpp;
dma_addr_t start, end;
u64 dma_base_and_ceiling;
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
gem = drm_fb_cma_get_gem_obj(fb, 0);
start = gem->paddr + fb->offsets[0] +
crtc->y * fb->pitches[0] +
- crtc->x * bpp / 8;
+ crtc->x * drm_format_plane_cpp(fb->pixel_format, 0);
end = start + (crtc->mode.vdisplay * fb->pitches[0]);
@@ -90,7 +100,10 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
* unlikely that LCDC would fetch the DMA addresses in the middle of
* an update.
*/
- dma_base_and_ceiling = (u64)(end - 1) << 32 | start;
+ if (priv->rev == 1)
+ end -= 1;
+
+ dma_base_and_ceiling = (u64)end << 32 | start;
tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
if (tilcdc_crtc->curr_fb)
@@ -100,6 +113,56 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
tilcdc_crtc->curr_fb = fb;
}
+/*
+ * The driver currently only supports only true color formats. For
+ * true color the palette block is bypassed, but a 32 byte palette
+ * should still be loaded. The first 16-bit entry must be 0x4000 while
+ * all other entries must be zeroed.
+ */
+static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int ret;
+
+ reinit_completion(&tilcdc_crtc->palette_loaded);
+
+ /* Tell the LCDC where the palette is located. */
+ tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
+ tilcdc_crtc->palette_dma_handle);
+ tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
+ (u32) tilcdc_crtc->palette_dma_handle +
+ TILCDC_PALETTE_SIZE - 1);
+
+ /* Set dma load mode for palette loading only. */
+ tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
+ LCDC_PALETTE_LOAD_MODE_MASK);
+
+ /* Enable DMA Palette Loaded Interrupt */
+ if (priv->rev == 1)
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
+ else
+ tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
+
+ /* Enable LCDC DMA and wait for palette to be loaded. */
+ tilcdc_clear_irqstatus(dev, 0xffffffff);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+ dev_err(dev->dev, "%s: Palette loading timeout", __func__);
+
+ /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
+ else
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
+}
+
static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
@@ -108,6 +171,7 @@ static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
if (priv->rev == 1) {
tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA);
tilcdc_set(dev, LCDC_DMA_CTRL_REG,
LCDC_V1_END_OF_FRAME_INT_ENA);
@@ -126,6 +190,7 @@ static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
/* disable irqs that we might have enabled: */
if (priv->rev == 1) {
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
LCDC_V1_END_OF_FRAME_INT_ENA);
@@ -150,193 +215,68 @@ static void reset(struct drm_crtc *crtc)
tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
}
-static void tilcdc_crtc_enable(struct drm_crtc *crtc)
+/*
+ * Calculate the percentage difference between the requested pixel clock rate
+ * and the effective rate resulting from calculating the clock divider value.
+ */
+static unsigned int tilcdc_pclk_diff(unsigned long rate,
+ unsigned long real_rate)
{
- struct drm_device *dev = crtc->dev;
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- if (tilcdc_crtc->enabled)
- return;
-
- pm_runtime_get_sync(dev->dev);
-
- reset(crtc);
-
- tilcdc_crtc_enable_irqs(dev);
-
- tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
- tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
- tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
-
- drm_crtc_vblank_on(crtc);
+ int r = rate / 100, rr = real_rate / 100;
- tilcdc_crtc->enabled = true;
+ return (unsigned int)(abs(((rr - r) * 100) / r));
}
-void tilcdc_crtc_disable(struct drm_crtc *crtc)
+static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- if (!tilcdc_crtc->enabled)
- return;
-
- tilcdc_crtc->frame_done = false;
- tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
-
- /*
- * if necessary wait for framedone irq which will still come
- * before putting things to sleep..
- */
- if (priv->rev == 2) {
- int ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
- tilcdc_crtc->frame_done,
- msecs_to_jiffies(500));
- if (ret == 0)
- dev_err(dev->dev, "%s: timeout waiting for framedone\n",
- __func__);
- }
-
- drm_crtc_vblank_off(crtc);
-
- tilcdc_crtc_disable_irqs(dev);
-
- pm_runtime_put_sync(dev->dev);
-
- if (tilcdc_crtc->next_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->next_fb);
- tilcdc_crtc->next_fb = NULL;
- }
-
- if (tilcdc_crtc->curr_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->curr_fb);
- tilcdc_crtc->curr_fb = NULL;
- }
-
- drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- tilcdc_crtc->last_vblank = ktime_set(0, 0);
-
- tilcdc_crtc->enabled = false;
-}
-
-static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
-{
- return crtc->state && crtc->state->enable && crtc->state->active;
-}
-
-static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
-{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- struct tilcdc_drm_private *priv = crtc->dev->dev_private;
-
- drm_modeset_lock_crtc(crtc, NULL);
- tilcdc_crtc_disable(crtc);
- drm_modeset_unlock_crtc(crtc);
-
- flush_workqueue(priv->wq);
-
- of_node_put(crtc->port);
- drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
-}
-
-int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
-{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- unsigned long flags;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- if (tilcdc_crtc->event) {
- dev_err(dev->dev, "already pending page flip!\n");
- return -EBUSY;
- }
-
- drm_framebuffer_reference(fb);
-
- crtc->primary->fb = fb;
-
- spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
-
- if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
- ktime_t next_vblank;
- s64 tdiff;
-
- next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
- 1000000 / crtc->hwmode.vrefresh);
-
- tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
-
- if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
- tilcdc_crtc->next_fb = fb;
- }
-
- if (tilcdc_crtc->next_fb != fb)
- set_scanout(crtc, fb);
-
- tilcdc_crtc->event = event;
-
- spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
-
- return 0;
-}
-
-static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ unsigned long clk_rate, real_rate, req_rate;
+ unsigned int clkdiv;
+ int ret;
- if (!tilcdc_crtc->simulate_vesa_sync)
- return true;
+ clkdiv = 2; /* first try using a standard divider of 2 */
- /*
- * tilcdc does not generate VESA-compliant sync but aligns
- * VS on the second edge of HS instead of first edge.
- * We use adjusted_mode, to fixup sync by aligning both rising
- * edges and add HSKEW offset to fix the sync.
- */
- adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
- adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+ /* mode.clock is in KHz, set_rate wants parameter in Hz */
+ req_rate = crtc->mode.clock * 1000;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
- adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
- adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
- } else {
- adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
- adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
- }
+ ret = clk_set_rate(priv->clk, req_rate * clkdiv);
+ clk_rate = clk_get_rate(priv->clk);
+ if (ret < 0) {
+ /*
+ * If we fail to set the clock rate (some architectures don't
+ * use the common clock framework yet and may not implement
+ * all the clk API calls for every clock), try the next best
+ * thing: adjusting the clock divider, unless clk_get_rate()
+ * failed as well.
+ */
+ if (!clk_rate) {
+ /* Nothing more we can do. Just bail out. */
+ dev_err(dev->dev,
+ "failed to set the pixel clock - unable to read current lcdc clock rate\n");
+ return;
+ }
- return true;
-}
+ clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
-static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct tilcdc_drm_private *priv = dev->dev_private;
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- const unsigned clkdiv = 2; /* using a fixed divider of 2 */
- int ret;
+ /*
+ * Emit a warning if the real clock rate resulting from the
+ * calculated divider differs much from the requested rate.
+ *
+ * 5% is an arbitrary value - LCDs are usually quite tolerant
+ * about pixel clock rates.
+ */
+ real_rate = clkdiv * req_rate;
- /* mode.clock is in KHz, set_rate wants parameter in Hz */
- ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
- if (ret < 0) {
- dev_err(dev->dev, "failed to set display clock rate to: %d\n",
- crtc->mode.clock);
- return;
+ if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
+ dev_warn(dev->dev,
+ "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
+ clk_rate, real_rate);
+ }
}
- tilcdc_crtc->lcd_fck_rate = clk_get_rate(priv->clk);
+ tilcdc_crtc->lcd_fck_rate = clk_rate;
DBG("lcd_clk=%u, mode clock=%d, div=%u",
tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
@@ -351,7 +291,7 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
LCDC_V2_CORE_CLK_EN);
}
-static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -361,8 +301,6 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct drm_framebuffer *fb = crtc->primary->state->fb;
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
if (WARN_ON(!info))
return;
@@ -461,16 +399,16 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (info->tft_alt_mode)
reg |= LCDC_TFT_ALT_ENABLE;
if (priv->rev == 2) {
- unsigned int depth, bpp;
-
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
- switch (bpp) {
- case 16:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB565:
break;
- case 32:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
reg |= LCDC_V2_TFT_24BPP_UNPACK;
/* fallthrough */
- case 24:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB888:
reg |= LCDC_V2_TFT_24BPP_MODE;
break;
default:
@@ -511,15 +449,226 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
else
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
- drm_framebuffer_reference(fb);
+ tilcdc_crtc_set_clk(crtc);
+
+ tilcdc_crtc_load_palette(crtc);
set_scanout(crtc, fb);
- tilcdc_crtc_set_clk(crtc);
+ drm_framebuffer_reference(fb);
crtc->hwmode = crtc->state->adjusted_mode;
}
+static void tilcdc_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ mutex_lock(&tilcdc_crtc->enable_lock);
+ if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+ return;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+
+ reset(crtc);
+
+ tilcdc_crtc_set_mode(crtc);
+
+ tilcdc_crtc_enable_irqs(dev);
+
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
+ tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
+ LCDC_PALETTE_LOAD_MODE_MASK);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ drm_crtc_vblank_on(crtc);
+
+ tilcdc_crtc->enabled = true;
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+}
+
+static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int ret;
+
+ mutex_lock(&tilcdc_crtc->enable_lock);
+ if (shutdown)
+ tilcdc_crtc->shutdown = true;
+ if (!tilcdc_crtc->enabled) {
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+ return;
+ }
+ tilcdc_crtc->frame_done = false;
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ /*
+ * Wait for framedone irq which will still come before putting
+ * things to sleep..
+ */
+ ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
+ tilcdc_crtc->frame_done,
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_err(dev->dev, "%s: timeout waiting for framedone\n",
+ __func__);
+
+ drm_crtc_vblank_off(crtc);
+
+ tilcdc_crtc_disable_irqs(dev);
+
+ pm_runtime_put_sync(dev->dev);
+
+ if (tilcdc_crtc->next_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->next_fb);
+ tilcdc_crtc->next_fb = NULL;
+ }
+
+ if (tilcdc_crtc->curr_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->curr_fb);
+ tilcdc_crtc->curr_fb = NULL;
+ }
+
+ drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
+ tilcdc_crtc->last_vblank = ktime_set(0, 0);
+
+ tilcdc_crtc->enabled = false;
+ mutex_unlock(&tilcdc_crtc->enable_lock);
+}
+
+static void tilcdc_crtc_disable(struct drm_crtc *crtc)
+{
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ tilcdc_crtc_off(crtc, false);
+}
+
+void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
+{
+ tilcdc_crtc_off(crtc, true);
+}
+
+static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
+{
+ return crtc->state && crtc->state->enable && crtc->state->active;
+}
+
+static void tilcdc_crtc_recover_work(struct work_struct *work)
+{
+ struct tilcdc_crtc *tilcdc_crtc =
+ container_of(work, struct tilcdc_crtc, recover_work);
+ struct drm_crtc *crtc = &tilcdc_crtc->base;
+
+ dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__);
+
+ drm_modeset_lock_crtc(crtc, NULL);
+
+ if (!tilcdc_crtc_is_on(crtc))
+ goto out;
+
+ tilcdc_crtc_disable(crtc);
+ tilcdc_crtc_enable(crtc);
+out:
+ drm_modeset_unlock_crtc(crtc);
+}
+
+static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+
+ drm_modeset_lock_crtc(crtc, NULL);
+ tilcdc_crtc_disable(crtc);
+ drm_modeset_unlock_crtc(crtc);
+
+ flush_workqueue(priv->wq);
+
+ of_node_put(crtc->port);
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
+}
+
+int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ if (tilcdc_crtc->event) {
+ dev_err(dev->dev, "already pending page flip!\n");
+ return -EBUSY;
+ }
+
+ drm_framebuffer_reference(fb);
+
+ crtc->primary->fb = fb;
+
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+ if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+ ktime_t next_vblank;
+ s64 tdiff;
+
+ next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+ 1000000 / crtc->hwmode.vrefresh);
+
+ tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
+
+ if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+ tilcdc_crtc->next_fb = fb;
+ }
+
+ if (tilcdc_crtc->next_fb != fb)
+ set_scanout(crtc, fb);
+
+ tilcdc_crtc->event = event;
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+
+ return 0;
+}
+
+static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ if (!tilcdc_crtc->simulate_vesa_sync)
+ return true;
+
+ /*
+ * tilcdc does not generate VESA-compliant sync but aligns
+ * VS on the second edge of HS instead of first edge.
+ * We use adjusted_mode, to fixup sync by aligning both rising
+ * edges and add HSKEW offset to fix the sync.
+ */
+ adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
+ adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
+ } else {
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
+ }
+
+ return true;
+}
+
static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -560,7 +709,6 @@ static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
.enable = tilcdc_crtc_enable,
.disable = tilcdc_crtc_disable,
.atomic_check = tilcdc_crtc_atomic_check,
- .mode_set_nofb = tilcdc_crtc_mode_set_nofb,
};
int tilcdc_crtc_max_width(struct drm_crtc *crtc)
@@ -756,28 +904,48 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
}
if (stat & LCDC_FIFO_UNDERFLOW)
- dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow",
__func__, stat);
- /* For revision 2 only */
- if (priv->rev == 2) {
- if (stat & LCDC_FRAME_DONE) {
- tilcdc_crtc->frame_done = true;
- wake_up(&tilcdc_crtc->frame_done_wq);
- }
+ if (stat & LCDC_PL_LOAD_DONE) {
+ complete(&tilcdc_crtc->palette_loaded);
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_PL_INT_ENA);
+ else
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+ LCDC_V2_PL_INT_ENA);
+ }
- if (stat & LCDC_SYNC_LOST) {
- dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
- __func__, stat);
- tilcdc_crtc->frame_intact = false;
- if (tilcdc_crtc->sync_lost_count++ >
- SYNC_LOST_COUNT_LIMIT) {
- dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, disabling the interrupt", __func__, stat);
+ if (stat & LCDC_SYNC_LOST) {
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
+ __func__, stat);
+ tilcdc_crtc->frame_intact = false;
+ if (tilcdc_crtc->sync_lost_count++ >
+ SYNC_LOST_COUNT_LIMIT) {
+ dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat);
+ queue_work(system_wq, &tilcdc_crtc->recover_work);
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_SYNC_LOST_INT_ENA);
+ else
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_SYNC_LOST);
- }
+ tilcdc_crtc->sync_lost_count = 0;
}
+ }
+
+ if (stat & LCDC_FRAME_DONE) {
+ tilcdc_crtc->frame_done = true;
+ wake_up(&tilcdc_crtc->frame_done_wq);
+ /* rev 1 lcdc appears to hang if irq is not disbaled here */
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_FRAME_DONE_INT_ENA);
+ }
+ /* For revision 2 only */
+ if (priv->rev == 2) {
/* Indicate to LCDC that the interrupt service routine has
* completed, see 13.3.6.1.6 in AM335x TRM.
*/
@@ -787,7 +955,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
return IRQ_HANDLED;
}
-struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
+int tilcdc_crtc_create(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_crtc *tilcdc_crtc;
@@ -797,21 +965,33 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
if (!tilcdc_crtc) {
dev_err(dev->dev, "allocation failed\n");
- return NULL;
+ return -ENOMEM;
}
+ init_completion(&tilcdc_crtc->palette_loaded);
+ tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
+ TILCDC_PALETTE_SIZE,
+ &tilcdc_crtc->palette_dma_handle,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tilcdc_crtc->palette_base)
+ return -ENOMEM;
+ *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
+
crtc = &tilcdc_crtc->base;
ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
if (ret < 0)
goto fail;
+ mutex_init(&tilcdc_crtc->enable_lock);
+
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
drm_flip_work_init(&tilcdc_crtc->unref_work,
"unref", unref_worker);
spin_lock_init(&tilcdc_crtc->irq_lock);
+ INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
ret = drm_crtc_init_with_planes(dev, crtc,
&tilcdc_crtc->primary,
@@ -837,13 +1017,15 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
if (!crtc->port) { /* This should never happen */
dev_err(dev->dev, "Port node not found in %s\n",
dev->dev->of_node->full_name);
+ ret = -EINVAL;
goto fail;
}
}
- return crtc;
+ priv->crtc = crtc;
+ return 0;
fail:
tilcdc_crtc_destroy(crtc);
- return NULL;
+ return -ENOMEM;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index a694977c32f4..bd0a3bd07167 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -127,24 +127,16 @@ static int tilcdc_commit(struct drm_device *dev,
* current layout.
*/
- /* Keep HW on while we commit the state. */
- pm_runtime_get_sync(dev->dev);
-
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
- /* Now HW should remain on if need becase the crtc is enabled */
- pm_runtime_put_sync(dev->dev);
-
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
-
return 0;
}
@@ -155,15 +147,11 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_commit = tilcdc_commit,
};
-static int modeset_init(struct drm_device *dev)
+static void modeset_init(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_module *mod;
- drm_mode_config_init(dev);
-
- priv->crtc = tilcdc_crtc_create(dev);
-
list_for_each_entry(mod, &module_list, list) {
DBG("loading module: %s", mod->name);
mod->funcs->modeset_init(mod, dev);
@@ -174,8 +162,6 @@ static int modeset_init(struct drm_device *dev)
dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &mode_config_funcs;
-
- return 0;
}
#ifdef CONFIG_CPU_FREQ
@@ -196,22 +182,29 @@ static int cpufreq_transition(struct notifier_block *nb,
* DRM operations:
*/
-static int tilcdc_unload(struct drm_device *dev)
+static void tilcdc_fini(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- tilcdc_remove_external_encoders(dev);
+ if (priv->crtc)
+ tilcdc_crtc_shutdown(priv->crtc);
+
+ if (priv->is_registered)
+ drm_dev_unregister(dev);
- drm_fbdev_cma_fini(priv->fbdev);
drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
- drm_vblank_cleanup(dev);
+
+ if (priv->fbdev)
+ drm_fbdev_cma_fini(priv->fbdev);
drm_irq_uninstall(dev);
+ drm_mode_config_cleanup(dev);
+ tilcdc_remove_external_device(dev);
#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&priv->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
+ if (priv->freq_transition.notifier_call)
+ cpufreq_unregister_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
#endif
if (priv->clk)
@@ -220,61 +213,71 @@ static int tilcdc_unload(struct drm_device *dev)
if (priv->mmio)
iounmap(priv->mmio);
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ }
dev->dev_private = NULL;
pm_runtime_disable(dev->dev);
- return 0;
+ drm_dev_unref(dev);
}
-static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
{
- struct platform_device *pdev = dev->platformdev;
- struct device_node *node = pdev->dev.of_node;
+ struct drm_device *ddev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node;
struct tilcdc_drm_private *priv;
struct resource *res;
u32 bpp = 0;
int ret;
- priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(dev->dev, "failed to allocate private data\n");
+ dev_err(dev, "failed to allocate private data\n");
return -ENOMEM;
}
- dev->dev_private = priv;
+ ddev = drm_dev_alloc(ddrv, dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ddev->platformdev = pdev;
+ ddev->dev_private = priv;
+ platform_set_drvdata(pdev, ddev);
+ drm_mode_config_init(ddev);
priv->is_componentized =
- tilcdc_get_external_components(dev->dev, NULL) > 0;
+ tilcdc_get_external_components(dev, NULL) > 0;
priv->wq = alloc_ordered_workqueue("tilcdc", 0);
if (!priv->wq) {
ret = -ENOMEM;
- goto fail_unset_priv;
+ goto init_failed;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(dev->dev, "failed to get memory resource\n");
+ dev_err(dev, "failed to get memory resource\n");
ret = -EINVAL;
- goto fail_free_wq;
+ goto init_failed;
}
priv->mmio = ioremap_nocache(res->start, resource_size(res));
if (!priv->mmio) {
- dev_err(dev->dev, "failed to ioremap\n");
+ dev_err(dev, "failed to ioremap\n");
ret = -ENOMEM;
- goto fail_free_wq;
+ goto init_failed;
}
- priv->clk = clk_get(dev->dev, "fck");
+ priv->clk = clk_get(dev, "fck");
if (IS_ERR(priv->clk)) {
- dev_err(dev->dev, "failed to get functional clock\n");
+ dev_err(dev, "failed to get functional clock\n");
ret = -ENODEV;
- goto fail_iounmap;
+ goto init_failed;
}
#ifdef CONFIG_CPU_FREQ
@@ -282,8 +285,9 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
ret = cpufreq_register_notifier(&priv->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
- dev_err(dev->dev, "failed to register cpufreq notifier\n");
- goto fail_put_clk;
+ dev_err(dev, "failed to register cpufreq notifier\n");
+ priv->freq_transition.notifier_call = NULL;
+ goto init_failed;
}
#endif
@@ -292,22 +296,22 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
- if (of_property_read_u32(node, "ti,max-width", &priv->max_width))
+ if (of_property_read_u32(node, "max-width", &priv->max_width))
priv->max_width = TILCDC_DEFAULT_MAX_WIDTH;
DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
- if (of_property_read_u32(node, "ti,max-pixelclock",
+ if (of_property_read_u32(node, "max-pixelclock",
&priv->max_pixelclock))
priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
- pm_runtime_enable(dev->dev);
+ pm_runtime_enable(dev);
/* Determine LCD IP Version */
- pm_runtime_get_sync(dev->dev);
- switch (tilcdc_read(dev, LCDC_PID_REG)) {
+ pm_runtime_get_sync(dev);
+ switch (tilcdc_read(ddev, LCDC_PID_REG)) {
case 0x4c100102:
priv->rev = 1;
break;
@@ -316,14 +320,14 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
priv->rev = 2;
break;
default:
- dev_warn(dev->dev, "Unknown PID Reg value 0x%08x, "
- "defaulting to LCD revision 1\n",
- tilcdc_read(dev, LCDC_PID_REG));
+ dev_warn(dev, "Unknown PID Reg value 0x%08x, "
+ "defaulting to LCD revision 1\n",
+ tilcdc_read(ddev, LCDC_PID_REG));
priv->rev = 1;
break;
}
- pm_runtime_put_sync(dev->dev);
+ pm_runtime_put_sync(dev);
if (priv->rev == 1) {
DBG("Revision 1 LCDC supports only RGB565 format");
@@ -356,91 +360,67 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
}
}
- ret = modeset_init(dev);
+ ret = tilcdc_crtc_create(ddev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
- goto fail_cpufreq_unregister;
+ dev_err(dev, "failed to create crtc\n");
+ goto init_failed;
}
-
- platform_set_drvdata(pdev, dev);
+ modeset_init(ddev);
if (priv->is_componentized) {
- ret = component_bind_all(dev->dev, dev);
+ ret = component_bind_all(dev, ddev);
if (ret < 0)
- goto fail_mode_config_cleanup;
+ goto init_failed;
- ret = tilcdc_add_external_encoders(dev);
+ ret = tilcdc_add_component_encoder(ddev);
if (ret < 0)
- goto fail_component_cleanup;
+ goto init_failed;
+ } else {
+ ret = tilcdc_attach_external_device(ddev);
+ if (ret)
+ goto init_failed;
}
- if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
- dev_err(dev->dev, "no encoders/connectors found\n");
+ if (!priv->external_connector &&
+ ((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
+ dev_err(dev, "no encoders/connectors found\n");
ret = -ENXIO;
- goto fail_external_cleanup;
+ goto init_failed;
}
- ret = drm_vblank_init(dev, 1);
+ ret = drm_vblank_init(ddev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
- goto fail_external_cleanup;
+ dev_err(dev, "failed to initialize vblank\n");
+ goto init_failed;
}
- ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
+ ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
if (ret < 0) {
- dev_err(dev->dev, "failed to install IRQ handler\n");
- goto fail_vblank_cleanup;
+ dev_err(dev, "failed to install IRQ handler\n");
+ goto init_failed;
}
- drm_mode_config_reset(dev);
+ drm_mode_config_reset(ddev);
- priv->fbdev = drm_fbdev_cma_init(dev, bpp,
- dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
+ priv->fbdev = drm_fbdev_cma_init(ddev, bpp,
+ ddev->mode_config.num_crtc,
+ ddev->mode_config.num_connector);
if (IS_ERR(priv->fbdev)) {
ret = PTR_ERR(priv->fbdev);
- goto fail_irq_uninstall;
+ goto init_failed;
}
- drm_kms_helper_poll_init(dev);
+ drm_kms_helper_poll_init(ddev);
- return 0;
-
-fail_irq_uninstall:
- drm_irq_uninstall(dev);
-
-fail_vblank_cleanup:
- drm_vblank_cleanup(dev);
-
-fail_component_cleanup:
- if (priv->is_componentized)
- component_unbind_all(dev->dev, dev);
-
-fail_mode_config_cleanup:
- drm_mode_config_cleanup(dev);
-
-fail_external_cleanup:
- tilcdc_remove_external_encoders(dev);
-
-fail_cpufreq_unregister:
- pm_runtime_disable(dev->dev);
-#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&priv->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-
-fail_put_clk:
-#endif
- clk_put(priv->clk);
-
-fail_iounmap:
- iounmap(priv->mmio);
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto init_failed;
-fail_free_wq:
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
+ priv->is_registered = true;
+ return 0;
-fail_unset_priv:
- dev->dev_private = NULL;
+init_failed:
+ tilcdc_fini(ddev);
return ret;
}
@@ -575,9 +555,7 @@ static const struct file_operations fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
@@ -587,8 +565,6 @@ static const struct file_operations fops = {
static struct drm_driver tilcdc_driver = {
.driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
DRIVER_PRIME | DRIVER_ATOMIC),
- .load = tilcdc_load,
- .unload = tilcdc_unload,
.lastclose = tilcdc_lastclose,
.irq_handler = tilcdc_irq,
.get_vblank_counter = drm_vblank_no_hw_counter,
@@ -662,10 +638,9 @@ static const struct dev_pm_ops tilcdc_pm_ops = {
/*
* Platform driver:
*/
-
static int tilcdc_bind(struct device *dev)
{
- return drm_platform_init(&tilcdc_driver, to_platform_device(dev));
+ return tilcdc_init(&tilcdc_driver, dev);
}
static void tilcdc_unbind(struct device *dev)
@@ -676,7 +651,7 @@ static void tilcdc_unbind(struct device *dev)
if (!ddev->dev_private)
return;
- drm_put_dev(dev_get_drvdata(dev));
+ tilcdc_fini(dev_get_drvdata(dev));
}
static const struct component_master_ops tilcdc_comp_ops = {
@@ -699,7 +674,7 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
else if (ret == 0)
- return drm_platform_init(&tilcdc_driver, pdev);
+ return tilcdc_init(&tilcdc_driver, &pdev->dev);
else
return component_master_add_with_match(&pdev->dev,
&tilcdc_comp_ops,
@@ -714,7 +689,7 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
if (ret < 0)
return ret;
else if (ret == 0)
- drm_put_dev(platform_get_drvdata(pdev));
+ tilcdc_fini(platform_get_drvdata(pdev));
else
component_master_del(&pdev->dev, &tilcdc_comp_ops);
@@ -723,6 +698,7 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
static struct of_device_id tilcdc_of_match[] = {
{ .compatible = "ti,am33xx-tilcdc", },
+ { .compatible = "ti,da850-tilcdc", },
{ },
};
MODULE_DEVICE_TABLE(of, tilcdc_of_match);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 9780c37ec4cd..0e71daf5b5cb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -33,6 +33,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_bridge.h>
/* Defaulting to pixel clock defined on AM335x */
#define TILCDC_DEFAULT_MAX_PIXELCLOCK 126000
@@ -87,8 +88,12 @@ struct tilcdc_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[8];
- const struct drm_connector_helper_funcs *connector_funcs[8];
+ struct drm_encoder *external_encoder;
+ struct drm_connector *external_connector;
+ const struct drm_connector_helper_funcs *connector_funcs;
+
+ bool is_registered;
bool is_componentized;
};
@@ -163,7 +168,7 @@ struct tilcdc_panel_info {
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
-struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
+int tilcdc_crtc_create(struct drm_device *dev);
irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
@@ -172,7 +177,7 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
int tilcdc_crtc_max_width(struct drm_crtc *crtc);
-void tilcdc_crtc_disable(struct drm_crtc *crtc);
+void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 68e895021005..c67d7cd7d57e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -10,6 +10,7 @@
#include <linux/component.h>
#include <linux/of_graph.h>
+#include <drm/drm_of.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
@@ -27,44 +28,50 @@ static const struct tilcdc_panel_info panel_info_tda998x = {
.raster_order = 0,
};
+static const struct tilcdc_panel_info panel_info_default = {
+ .ac_bias = 255,
+ .ac_bias_intrpt = 0,
+ .dma_burst_sz = 16,
+ .bpp = 16,
+ .fdd = 0x80,
+ .tft_alt_mode = 0,
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
+};
+
static int tilcdc_external_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct tilcdc_drm_private *priv = connector->dev->dev_private;
- int ret, i;
+ int ret;
ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
if (ret != MODE_OK)
return ret;
- for (i = 0; i < priv->num_connectors &&
- priv->connectors[i] != connector; i++)
- ;
-
- BUG_ON(priv->connectors[i] != connector);
- BUG_ON(!priv->connector_funcs[i]);
+ BUG_ON(priv->external_connector != connector);
+ BUG_ON(!priv->connector_funcs);
/* If the connector has its own mode_valid call it. */
- if (!IS_ERR(priv->connector_funcs[i]) &&
- priv->connector_funcs[i]->mode_valid)
- return priv->connector_funcs[i]->mode_valid(connector, mode);
+ if (!IS_ERR(priv->connector_funcs) &&
+ priv->connector_funcs->mode_valid)
+ return priv->connector_funcs->mode_valid(connector, mode);
return MODE_OK;
}
-static int tilcdc_add_external_encoder(struct drm_device *dev,
- struct drm_connector *connector)
+static int tilcdc_add_external_connector(struct drm_device *dev,
+ struct drm_connector *connector)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_connector_helper_funcs *connector_funcs;
- priv->connectors[priv->num_connectors] = connector;
- priv->encoders[priv->num_encoders++] = connector->encoder;
-
- /* Only tda998x is supported at the moment. */
- tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
- tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
+ /* There should never be more than one connector */
+ if (WARN_ON(priv->external_connector))
+ return -EINVAL;
+ priv->external_connector = connector;
connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs),
GFP_KERNEL);
if (!connector_funcs)
@@ -77,56 +84,177 @@ static int tilcdc_add_external_encoder(struct drm_device *dev,
* everything else but use our own mode_valid() (above).
*/
if (connector->helper_private) {
- priv->connector_funcs[priv->num_connectors] =
- connector->helper_private;
- *connector_funcs = *priv->connector_funcs[priv->num_connectors];
+ priv->connector_funcs = connector->helper_private;
+ *connector_funcs = *priv->connector_funcs;
} else {
- priv->connector_funcs[priv->num_connectors] = ERR_PTR(-ENOENT);
+ priv->connector_funcs = ERR_PTR(-ENOENT);
}
connector_funcs->mode_valid = tilcdc_external_mode_valid;
drm_connector_helper_add(connector, connector_funcs);
- priv->num_connectors++;
- dev_dbg(dev->dev, "External encoder '%s' connected\n",
- connector->encoder->name);
+ dev_dbg(dev->dev, "External connector '%s' connected\n",
+ connector->name);
return 0;
}
-int tilcdc_add_external_encoders(struct drm_device *dev)
+static
+struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
+ struct drm_encoder *encoder)
{
- struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_connector *connector;
- int num_internal_connectors = priv->num_connectors;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- bool found = false;
- int i, ret;
-
- for (i = 0; i < num_internal_connectors; i++)
- if (connector == priv->connectors[i])
- found = true;
- if (!found) {
- ret = tilcdc_add_external_encoder(dev, connector);
- if (ret)
- return ret;
- }
+ int i;
+
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head)
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+ if (connector->encoder_ids[i] == encoder->base.id)
+ return connector;
+
+ dev_err(ddev->dev, "No connector found for %s encoder (id %d)\n",
+ encoder->name, encoder->base.id);
+
+ return NULL;
+}
+
+int tilcdc_add_component_encoder(struct drm_device *ddev)
+{
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
+ if (encoder->possible_crtcs & (1 << priv->crtc->index))
+ break;
+
+ if (!encoder) {
+ dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);
+ return -ENODEV;
}
- return 0;
+
+ connector = tilcdc_encoder_find_connector(ddev, encoder);
+
+ if (!connector)
+ return -ENODEV;
+
+ /* Only tda998x is supported at the moment. */
+ tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
+ tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
+
+ return tilcdc_add_external_connector(ddev, connector);
}
-void tilcdc_remove_external_encoders(struct drm_device *dev)
+void tilcdc_remove_external_device(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- int i;
/* Restore the original helper functions, if any. */
- for (i = 0; i < priv->num_connectors; i++)
- if (IS_ERR(priv->connector_funcs[i]))
- drm_connector_helper_add(priv->connectors[i], NULL);
- else if (priv->connector_funcs[i])
- drm_connector_helper_add(priv->connectors[i],
- priv->connector_funcs[i]);
+ if (IS_ERR(priv->connector_funcs))
+ drm_connector_helper_add(priv->external_connector, NULL);
+ else if (priv->connector_funcs)
+ drm_connector_helper_add(priv->external_connector,
+ priv->connector_funcs);
+}
+
+static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static
+int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
+{
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ struct drm_connector *connector;
+ int ret;
+
+ priv->external_encoder->possible_crtcs = BIT(0);
+ priv->external_encoder->bridge = bridge;
+ bridge->encoder = priv->external_encoder;
+
+ ret = drm_bridge_attach(ddev, bridge);
+ if (ret) {
+ dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret);
+ return ret;
+ }
+
+ tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_default);
+
+ connector = tilcdc_encoder_find_connector(ddev, priv->external_encoder);
+ if (!connector)
+ return -ENODEV;
+
+ ret = tilcdc_add_external_connector(ddev, connector);
+
+ return ret;
+}
+
+static int tilcdc_node_has_port(struct device_node *dev_node)
+{
+ struct device_node *node;
+
+ node = of_get_child_by_name(dev_node, "ports");
+ if (!node)
+ node = of_get_child_by_name(dev_node, "port");
+ if (!node)
+ return 0;
+ of_node_put(node);
+
+ return 1;
+}
+
+static
+struct device_node *tilcdc_get_remote_node(struct device_node *node)
+{
+ struct device_node *ep;
+ struct device_node *parent;
+
+ if (!tilcdc_node_has_port(node))
+ return NULL;
+
+ ep = of_graph_get_next_endpoint(node, NULL);
+ if (!ep)
+ return NULL;
+
+ parent = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+
+ return parent;
+}
+
+int tilcdc_attach_external_device(struct drm_device *ddev)
+{
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+ struct device_node *remote_node;
+ struct drm_bridge *bridge;
+ int ret;
+
+ remote_node = tilcdc_get_remote_node(ddev->dev->of_node);
+ if (!remote_node)
+ return 0;
+
+ bridge = of_drm_find_bridge(remote_node);
+ of_node_put(remote_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+
+ priv->external_encoder = devm_kzalloc(ddev->dev,
+ sizeof(*priv->external_encoder),
+ GFP_KERNEL);
+ if (!priv->external_encoder)
+ return -ENOMEM;
+
+ ret = drm_encoder_init(ddev, priv->external_encoder,
+ &tilcdc_external_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret) {
+ dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret);
+ return ret;
+ }
+
+ ret = tilcdc_attach_bridge(ddev, bridge);
+ if (ret)
+ drm_encoder_cleanup(priv->external_encoder);
+
+ return ret;
}
static int dev_match_of(struct device *dev, void *data)
@@ -140,16 +268,10 @@ int tilcdc_get_external_components(struct device *dev,
struct device_node *node;
struct device_node *ep = NULL;
int count = 0;
+ int ret = 0;
- /* Avoid error print by of_graph_get_next_endpoint() if there
- * is no ports present.
- */
- node = of_get_child_by_name(dev->of_node, "ports");
- if (!node)
- node = of_get_child_by_name(dev->of_node, "port");
- if (!node)
+ if (!tilcdc_node_has_port(dev->of_node))
return 0;
- of_node_put(node);
while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
node = of_graph_get_remote_port_parent(ep);
@@ -159,16 +281,20 @@ int tilcdc_get_external_components(struct device *dev,
}
dev_dbg(dev, "Subdevice node '%s' found\n", node->name);
- if (match)
- component_match_add(dev, match, dev_match_of, node);
- of_node_put(node);
- count++;
- }
- if (count > 1) {
- dev_err(dev, "Only one external encoder is supported\n");
- return -EINVAL;
+ if (of_device_is_compatible(node, "nxp,tda998x")) {
+ if (match)
+ drm_of_component_match_add(dev, match,
+ dev_match_of, node);
+ ret = 1;
+ }
+
+ of_node_put(node);
+ if (count++ > 1) {
+ dev_err(dev, "Only one port is supported\n");
+ return -EINVAL;
+ }
}
- return count;
+ return ret;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.h b/drivers/gpu/drm/tilcdc/tilcdc_external.h
index c700e0c1623e..763d18f006c7 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.h
@@ -18,8 +18,9 @@
#ifndef __TILCDC_EXTERNAL_H__
#define __TILCDC_EXTERNAL_H__
-int tilcdc_add_external_encoders(struct drm_device *dev);
-void tilcdc_remove_external_encoders(struct drm_device *dev);
+int tilcdc_add_component_encoder(struct drm_device *dev);
+void tilcdc_remove_external_device(struct drm_device *dev);
int tilcdc_get_external_components(struct device *dev,
struct component_match **match);
+int tilcdc_attach_external_device(struct drm_device *ddev);
#endif /* __TILCDC_SLAVE_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 2134bb20fbe9..28c3e2f44f64 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -144,13 +144,6 @@ static void panel_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static enum drm_connector_status panel_connector_detect(
- struct drm_connector *connector,
- bool force)
-{
- return connector_status_connected;
-}
-
static int panel_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -197,7 +190,6 @@ static struct drm_encoder *panel_connector_best_encoder(
static const struct drm_connector_funcs panel_connector_funcs = {
.destroy = panel_connector_destroy,
.dpms = drm_atomic_helper_connector_dpms,
- .detect = panel_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -240,8 +232,6 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_connector_register(connector);
-
return connector;
fail:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 74c65fa859b2..8a6a50d74aff 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -39,7 +39,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
struct drm_plane_state *old_state = plane->state;
- unsigned int depth, bpp;
+ unsigned int pitch;
if (!state->crtc)
return 0;
@@ -68,8 +68,9 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp);
- if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) {
+ pitch = crtc_state->mode.hdisplay *
+ drm_format_plane_cpp(state->fb->pixel_format, 0);
+ if (state->fb->pitches[0] != pitch) {
dev_err(plane->dev->dev,
"Invalid pitch: fb and crtc widths must be the same");
return -EINVAL;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index f57c0d62c76a..9d528c0a67a4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -34,11 +34,14 @@
/* LCDC DMA Control Register */
#define LCDC_DMA_BURST_SIZE(x) ((x) << 4)
+#define LCDC_DMA_BURST_SIZE_MASK ((0x7) << 4)
#define LCDC_DMA_BURST_1 0x0
#define LCDC_DMA_BURST_2 0x1
#define LCDC_DMA_BURST_4 0x2
#define LCDC_DMA_BURST_8 0x3
#define LCDC_DMA_BURST_16 0x4
+#define LCDC_DMA_FIFO_THRESHOLD(x) ((x) << 8)
+#define LCDC_DMA_FIFO_THRESHOLD_MASK ((0x3) << 8)
#define LCDC_V1_END_OF_FRAME_INT_ENA BIT(2)
#define LCDC_V2_END_OF_FRAME0_INT_ENA BIT(8)
#define LCDC_V2_END_OF_FRAME1_INT_ENA BIT(9)
@@ -46,10 +49,12 @@
/* LCDC Control Register */
#define LCDC_CLK_DIVISOR(x) ((x) << 8)
+#define LCDC_CLK_DIVISOR_MASK ((0xFF) << 8)
#define LCDC_RASTER_MODE 0x01
/* LCDC Raster Control Register */
#define LCDC_PALETTE_LOAD_MODE(x) ((x) << 20)
+#define LCDC_PALETTE_LOAD_MODE_MASK ((0x3) << 20)
#define PALETTE_AND_DATA 0x00
#define PALETTE_ONLY 0x01
#define DATA_ONLY 0x02
@@ -61,6 +66,8 @@
#define LCDC_V2_UNDERFLOW_INT_ENA BIT(5)
#define LCDC_V1_PL_INT_ENA BIT(4)
#define LCDC_V2_PL_INT_ENA BIT(6)
+#define LCDC_V1_SYNC_LOST_INT_ENA BIT(5)
+#define LCDC_V1_FRAME_DONE_INT_ENA BIT(3)
#define LCDC_MONOCHROME_MODE BIT(1)
#define LCDC_RASTER_ENABLE BIT(0)
#define LCDC_TFT_ALT_ENABLE BIT(23)
@@ -74,7 +81,9 @@
/* LCDC Raster Timing 2 Register */
#define LCDC_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
+#define LCDC_AC_BIAS_TRANSITIONS_PER_INT_MASK ((0xF) << 16)
#define LCDC_AC_BIAS_FREQUENCY(x) ((x) << 8)
+#define LCDC_AC_BIAS_FREQUENCY_MASK ((0xFF) << 8)
#define LCDC_SYNC_CTRL BIT(25)
#define LCDC_SYNC_EDGE BIT(24)
#define LCDC_INVERT_PIXEL_CLOCK BIT(22)
@@ -139,6 +148,12 @@ static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
return ioread32(priv->mmio + reg);
}
+static inline void tilcdc_write_mask(struct drm_device *dev, u32 reg,
+ u32 val, u32 mask)
+{
+ tilcdc_write(dev, reg, (tilcdc_read(dev, reg) & ~mask) | (val & mask));
+}
+
static inline void tilcdc_set(struct drm_device *dev, u32 reg, u32 mask)
{
tilcdc_write(dev, reg, tilcdc_read(dev, reg) | mask);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 458043a53995..aabfad882e23 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -249,8 +249,6 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_connector_register(connector);
-
return connector;
fail:
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index fc6217dfe401..d5063618efa7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
@@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i;
fobj = reservation_object_get_list(bo->resv);
fence = reservation_object_get_excl(bo->resv);
if (fence && !fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(bo->resv));
if (!fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
}
}
@@ -717,6 +717,20 @@ out:
return ret;
}
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ /* Don't evict this BO if it's outside of the
+ * requested placement range
+ */
+ if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+ (place->lpfn && place->lpfn <= bo->mem.start))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
const struct ttm_place *place,
@@ -731,21 +745,16 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (!ret) {
- if (place && (place->fpfn || place->lpfn)) {
- /* Don't evict this BO if it's outside of the
- * requested placement range
- */
- if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
- (place->lpfn && place->lpfn <= bo->mem.start)) {
- __ttm_bo_unreserve(bo);
- ret = -EBUSY;
- continue;
- }
- }
+ if (ret)
+ continue;
- break;
+ if (place && !bdev->driver->eviction_valuable(bo, place)) {
+ __ttm_bo_unreserve(bo);
+ ret = -EBUSY;
+ continue;
}
+
+ break;
}
if (ret) {
@@ -792,11 +801,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
@@ -806,7 +815,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
if (unlikely(ret))
return ret;
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = fence;
}
@@ -1286,7 +1295,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
/*
@@ -1309,12 +1318,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_unlock(&glob->lru_lock);
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
- ret = fence_wait(fence, false);
- fence_put(fence);
+ ret = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (ret) {
if (allow_errors) {
return ret;
@@ -1343,7 +1352,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
mem_type);
return ret;
}
- fence_put(man->move);
+ dma_fence_put(man->move);
man->use_type = false;
man->has_type = false;
@@ -1602,7 +1611,14 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
- long timeout = no_wait ? 0 : 15 * HZ;
+ long timeout = 15 * HZ;
+
+ if (no_wait) {
+ if (reservation_object_test_signaled_rcu(bo->resv, true))
+ return 0;
+ else
+ return -EBUSY;
+ }
timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
interruptible, timeout);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bf6e21655c57..d0459b392e5e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence,
+ struct dma_fence *fence,
bool evict,
struct ttm_mem_reg *new_mem)
{
@@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
*/
spin_lock(&from->move_lock);
- if (!from->move || fence_is_later(fence, from->move)) {
- fence_put(from->move);
- from->move = fence_get(fence);
+ if (!from->move || dma_fence_is_later(fence, from->move)) {
+ dma_fence_put(from->move);
+ from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_bo_free_old_node(bo);
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
} else {
/**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a6ed9d5e5167..68ef993ab431 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/*
* Quick non-stalling check for idle.
*/
- if (fence_is_signaled(bo->moving))
+ if (dma_fence_is_signaled(bo->moving))
goto out_clear;
/*
@@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
- (void) fence_wait(bo->moving, true);
+ (void) dma_fence_wait(bo->moving, true);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = fence_wait(bo->moving, true);
+ ret = dma_fence_wait(bo->moving, true);
if (unlikely(ret != 0)) {
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
@@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
}
out_clear:
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = NULL;
out_unlock:
@@ -101,7 +101,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
int ret;
int i;
- unsigned long address = (unsigned long)vmf->virtual_address;
+ unsigned long address = vmf->address;
int retval = VM_FAULT_NOPAGE;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index a80717b35dc6..d35bc491e8de 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list, struct fence *fence)
+ struct list_head *list,
+ struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index cc45d98f9bb5..cd8b01727734 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -44,9 +44,7 @@ static const struct file_operations udl_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 611b6b9bb3cb..167f42c67c7c 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -254,16 +254,10 @@ static int udl_fb_release(struct fb_info *info, int user)
static struct fb_ops udlfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
.fb_mmap = udl_fb_mmap,
.fb_open = udl_fb_open,
.fb_release = udl_fb_release,
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 818e70712b18..3c0c4bd3f750 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,13 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned int page_offset;
int ret = 0;
- page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
- PAGE_SHIFT;
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (!obj->pages)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ ret = vm_insert_page(vma, vmf->address, page);
switch (ret) {
case -EAGAIN:
case 0:
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 29f0207fa677..873f010d9616 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -98,17 +98,23 @@ success:
static int udl_select_std_channel(struct udl_device *udl)
{
int ret;
- u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
- 0x1C, 0x88, 0x5E, 0x15,
- 0x60, 0xFE, 0xC6, 0x97,
- 0x16, 0x3D, 0x47, 0xF2};
+ static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
+ 0x1C, 0x88, 0x5E, 0x15,
+ 0x60, 0xFE, 0xC6, 0x97,
+ 0x16, 0x3D, 0x47, 0xF2};
+ void *sendbuf;
+
+ sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
+ if (!sendbuf)
+ return -ENOMEM;
ret = usb_control_msg(udl->udev,
usb_sndctrlpipe(udl->udev, 0),
NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
- set_def_chn, sizeof(set_def_chn),
+ sendbuf, sizeof(set_def_chn),
USB_CTRL_SET_TIMEOUT);
+ kfree(sendbuf);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index fb77db755e0a..7757f69a8a77 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -11,6 +11,7 @@ vc4-y := \
vc4_kms.o \
vc4_gem.o \
vc4_hdmi.o \
+ vc4_vec.o \
vc4_hvs.o \
vc4_irq.o \
vc4_plane.o \
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7f08d681a74b..a0fd3e66bc4b 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -83,8 +83,7 @@ struct vc4_crtc_data {
/* Which channel of the HVS this pixelvalve sources from. */
int hvs_channel;
- enum vc4_encoder_type encoder0_type;
- enum vc4_encoder_type encoder1_type;
+ enum vc4_encoder_type encoder_types[4];
};
#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
@@ -669,6 +668,14 @@ void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id)
CRTC_WRITE(PV_INTEN, 0);
}
+/* Must be called with the event lock held */
+bool vc4_event_pending(struct drm_crtc *crtc)
+{
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+
+ return !!vc4_crtc->event;
+}
+
static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
{
struct drm_crtc *crtc = &vc4_crtc->base;
@@ -859,20 +866,26 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
static const struct vc4_crtc_data pv0_data = {
.hvs_channel = 0,
- .encoder0_type = VC4_ENCODER_TYPE_DSI0,
- .encoder1_type = VC4_ENCODER_TYPE_DPI,
+ .encoder_types = {
+ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
+ },
};
static const struct vc4_crtc_data pv1_data = {
.hvs_channel = 2,
- .encoder0_type = VC4_ENCODER_TYPE_DSI1,
- .encoder1_type = VC4_ENCODER_TYPE_SMI,
+ .encoder_types = {
+ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
+ },
};
static const struct vc4_crtc_data pv2_data = {
.hvs_channel = 1,
- .encoder0_type = VC4_ENCODER_TYPE_VEC,
- .encoder1_type = VC4_ENCODER_TYPE_HDMI,
+ .encoder_types = {
+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
+ [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
+ },
};
static const struct of_device_id vc4_crtc_dt_match[] = {
@@ -886,17 +899,20 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ const struct vc4_crtc_data *crtc_data = vc4_crtc->data;
+ const enum vc4_encoder_type *encoder_types = crtc_data->encoder_types;
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, drm) {
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
-
- if (vc4_encoder->type == vc4_crtc->data->encoder0_type) {
- vc4_encoder->clock_select = 0;
- encoder->possible_crtcs |= drm_crtc_mask(crtc);
- } else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) {
- vc4_encoder->clock_select = 1;
- encoder->possible_crtcs |= drm_crtc_mask(crtc);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) {
+ if (vc4_encoder->type == encoder_types[i]) {
+ vc4_encoder->clock_select = i;
+ encoder->possible_crtcs |= drm_crtc_mask(crtc);
+ break;
+ }
}
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 245115d49c46..caf817bac885 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -19,6 +19,7 @@ static const struct drm_info_list vc4_debugfs_list[] = {
{"bo_stats", vc4_bo_stats_debugfs, 0},
{"dpi_regs", vc4_dpi_debugfs_regs, 0},
{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
+ {"vec_regs", vc4_vec_debugfs_regs, 0},
{"hvs_regs", vc4_hvs_debugfs_regs, 0},
{"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
{"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8703f56b7947..ac09ca7ff430 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -61,23 +61,28 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT0);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT1);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT2);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+ case DRM_VC4_PARAM_SUPPORTS_ETC1:
+ case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
args->value = true;
break;
default:
@@ -103,9 +108,7 @@ static const struct file_operations vc4_drm_fops = {
.mmap = vc4_mmap,
.poll = drm_poll,
.read = drm_read,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
@@ -274,12 +277,14 @@ static void vc4_drm_unbind(struct device *dev)
struct drm_device *drm = platform_get_drvdata(pdev);
struct vc4_dev *vc4 = to_vc4_dev(drm);
+ drm_dev_unregister(drm);
+
if (vc4->fbdev)
drm_fbdev_cma_fini(vc4->fbdev);
drm_mode_config_cleanup(drm);
- drm_put_dev(drm);
+ drm_dev_unref(drm);
}
static const struct component_master_ops vc4_drm_ops = {
@@ -289,6 +294,7 @@ static const struct component_master_ops vc4_drm_ops = {
static struct platform_driver *const component_drivers[] = {
&vc4_hdmi_driver,
+ &vc4_vec_driver,
&vc4_dpi_driver,
&vc4_hvs_driver,
&vc4_crtc_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 7c1e4d97486f..b5c4bb14d0d1 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -17,6 +17,7 @@ struct vc4_dev {
struct vc4_crtc *crtc[3];
struct vc4_v3d *v3d;
struct vc4_dpi *dpi;
+ struct vc4_vec *vec;
struct drm_fbdev_cma *fbdev;
@@ -194,6 +195,7 @@ to_vc4_plane(struct drm_plane *plane)
}
enum vc4_encoder_type {
+ VC4_ENCODER_TYPE_NONE,
VC4_ENCODER_TYPE_HDMI,
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI0,
@@ -381,6 +383,8 @@ struct vc4_validated_shader_info {
uint32_t num_uniform_addr_offsets;
uint32_t *uniform_addr_offsets;
+
+ bool is_threaded;
};
/**
@@ -440,6 +444,7 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
extern struct platform_driver vc4_crtc_driver;
int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
+bool vc4_event_pending(struct drm_crtc *crtc);
int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
unsigned int flags, int *vpos, int *hpos,
@@ -483,6 +488,10 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
extern struct platform_driver vc4_hdmi_driver;
int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
+/* vc4_hdmi.c */
+extern struct platform_driver vc4_vec_driver;
+int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
+
/* vc4_irq.c */
irqreturn_t vc4_irq(int irq, void *arg);
void vc4_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 47a095f392f8..db920771bfb5 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -544,14 +544,15 @@ vc4_cl_lookup_bos(struct drm_device *dev,
handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
if (!handles) {
+ ret = -ENOMEM;
DRM_ERROR("Failed to allocate incoming GEM handles\n");
goto fail;
}
- ret = copy_from_user(handles,
- (void __user *)(uintptr_t)args->bo_handles,
- exec->bo_count * sizeof(uint32_t));
- if (ret) {
+ if (copy_from_user(handles,
+ (void __user *)(uintptr_t)args->bo_handles,
+ exec->bo_count * sizeof(uint32_t))) {
+ ret = -EFAULT;
DRM_ERROR("Failed to copy in GEM handles\n");
goto fail;
}
@@ -708,8 +709,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
}
mutex_lock(&vc4->power_lock);
- if (--vc4->power_refcount == 0)
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ if (--vc4->power_refcount == 0) {
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ }
mutex_unlock(&vc4->power_lock);
kfree(exec);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index c1f65c6c8e60..be8dd8262f27 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -61,7 +61,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
up(&vc4->async_modeset);
@@ -119,17 +119,34 @@ static int vc4_atomic_commit(struct drm_device *dev,
/* Make sure that any outstanding modesets have finished. */
if (nonblock) {
- ret = down_trylock(&vc4->async_modeset);
- if (ret) {
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ unsigned long flags;
+ bool busy = false;
+
+ /*
+ * If there's an undispatched event to send then we're
+ * obviously still busy. If there isn't, then we can
+ * unconditionally wait for the semaphore because it
+ * shouldn't be contended (for long).
+ *
+ * This is to prevent a race where queuing a new flip
+ * from userspace immediately on receipt of an event
+ * beats our clean-up and returns EBUSY.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ busy |= vc4_event_pending(crtc);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ if (busy) {
kfree(c);
return -EBUSY;
}
- } else {
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- kfree(c);
- return ret;
- }
+ }
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return ret;
}
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -173,6 +190,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
* current layout.
*/
+ drm_atomic_state_get(state);
if (nonblock) {
vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
vc4_atomic_complete_commit_seqno_cb);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 1aa44c2db556..39f6886b2410 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -177,8 +177,9 @@
# define PV_CONTROL_WAIT_HSTART BIT(12)
# define PV_CONTROL_PIXEL_REP_MASK VC4_MASK(5, 4)
# define PV_CONTROL_PIXEL_REP_SHIFT 4
-# define PV_CONTROL_CLK_SELECT_DSI_VEC 0
+# define PV_CONTROL_CLK_SELECT_DSI 0
# define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI 1
+# define PV_CONTROL_CLK_SELECT_VEC 2
# define PV_CONTROL_CLK_SELECT_MASK VC4_MASK(3, 2)
# define PV_CONTROL_CLK_SELECT_SHIFT 2
# define PV_CONTROL_FIFO_CLR BIT(1)
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e6d3c6028341..7cc346ad9b0b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -222,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 26503e307438..9fd171c361c2 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -644,6 +644,13 @@ reloc_tex(struct vc4_exec_info *exec,
cpp = 1;
break;
case VC4_TEXTURE_TYPE_ETC1:
+ /* ETC1 is arranged as 64-bit blocks, where each block is 4x4
+ * pixels.
+ */
+ cpp = 8;
+ width = (width + 3) >> 2;
+ height = (height + 3) >> 2;
+ break;
case VC4_TEXTURE_TYPE_BW1:
case VC4_TEXTURE_TYPE_A4:
case VC4_TEXTURE_TYPE_A1:
@@ -782,11 +789,6 @@ validate_gl_shader_rec(struct drm_device *dev,
exec->shader_rec_v += roundup(packet_size, 16);
exec->shader_rec_size -= packet_size;
- if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) {
- DRM_ERROR("Multi-threaded fragment shaders not supported.\n");
- return -EINVAL;
- }
-
for (i = 0; i < shader_reloc_count; i++) {
if (src_handles[i] > exec->bo_count) {
DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
@@ -803,6 +805,18 @@ validate_gl_shader_rec(struct drm_device *dev,
return -EINVAL;
}
+ if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) !=
+ to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) {
+ DRM_ERROR("Thread mode of CL and FS do not match\n");
+ return -EINVAL;
+ }
+
+ if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded ||
+ to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) {
+ DRM_ERROR("cs and vs cannot be threaded\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < shader_reloc_count; i++) {
struct vc4_validated_shader_info *validated_shader;
uint32_t o = shader_reloc_offsets[i];
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 2543cf5b8b51..5dba13dd1e9b 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -83,6 +83,13 @@ struct vc4_shader_validation_state {
* basic blocks.
*/
bool needs_uniform_address_for_loop;
+
+ /* Set when we find an instruction writing the top half of the
+ * register files. If we allowed writing the unusable regs in
+ * a threaded shader, then the other shader running on our
+ * QPU's clamp validation would be invalid.
+ */
+ bool all_registers_used;
};
static uint32_t
@@ -119,6 +126,13 @@ raddr_add_a_to_live_reg_index(uint64_t inst)
}
static bool
+live_reg_is_upper_half(uint32_t lri)
+{
+ return (lri >= 16 && lri < 32) ||
+ (lri >= 32 + 16 && lri < 32 + 32);
+}
+
+static bool
is_tmu_submit(uint32_t waddr)
{
return (waddr == QPU_W_TMU0_S ||
@@ -390,6 +404,9 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
} else {
validation_state->live_immediates[lri] = ~0;
}
+
+ if (live_reg_is_upper_half(lri))
+ validation_state->all_registers_used = true;
}
switch (waddr) {
@@ -598,6 +615,11 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
}
}
+ if ((raddr_a >= 16 && raddr_a < 32) ||
+ (raddr_b >= 16 && raddr_b < 32 && sig != QPU_SIG_SMALL_IMM)) {
+ validation_state->all_registers_used = true;
+ }
+
return true;
}
@@ -608,9 +630,7 @@ static bool
vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
{
uint32_t max_branch_target = 0;
- bool found_shader_end = false;
int ip;
- int shader_end_ip = 0;
int last_branch = -2;
for (ip = 0; ip < validation_state->max_ip; ip++) {
@@ -621,8 +641,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
uint32_t branch_target_ip;
if (sig == QPU_SIG_PROG_END) {
- shader_end_ip = ip;
- found_shader_end = true;
+ /* There are two delay slots after program end is
+ * signaled that are still executed, then we're
+ * finished. validation_state->max_ip is the
+ * instruction after the last valid instruction in the
+ * program.
+ */
+ validation_state->max_ip = ip + 3;
continue;
}
@@ -676,15 +701,9 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
}
set_bit(after_delay_ip, validation_state->branch_targets);
max_branch_target = max(max_branch_target, after_delay_ip);
-
- /* There are two delay slots after program end is signaled
- * that are still executed, then we're finished.
- */
- if (found_shader_end && ip == shader_end_ip + 2)
- break;
}
- if (max_branch_target > shader_end_ip) {
+ if (max_branch_target > validation_state->max_ip - 3) {
DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
return false;
}
@@ -756,6 +775,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
bool found_shader_end = false;
int shader_end_ip = 0;
+ uint32_t last_thread_switch_ip = -3;
uint32_t ip;
struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
@@ -788,6 +808,17 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
if (!vc4_handle_branch_target(&validation_state))
goto fail;
+ if (ip == last_thread_switch_ip + 3) {
+ /* Reset r0-r3 live clamp data */
+ int i;
+
+ for (i = 64; i < LIVE_REG_COUNT; i++) {
+ validation_state.live_min_clamp_offsets[i] = ~0;
+ validation_state.live_max_clamp_regs[i] = false;
+ validation_state.live_immediates[i] = ~0;
+ }
+ }
+
switch (sig) {
case QPU_SIG_NONE:
case QPU_SIG_WAIT_FOR_SCOREBOARD:
@@ -797,6 +828,8 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LOAD_TMU1:
case QPU_SIG_PROG_END:
case QPU_SIG_SMALL_IMM:
+ case QPU_SIG_THREAD_SWITCH:
+ case QPU_SIG_LAST_THREAD_SWITCH:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad write at ip %d\n", ip);
@@ -812,6 +845,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
shader_end_ip = ip;
}
+ if (sig == QPU_SIG_THREAD_SWITCH ||
+ sig == QPU_SIG_LAST_THREAD_SWITCH) {
+ validated_shader->is_threaded = true;
+
+ if (ip < last_thread_switch_ip + 3) {
+ DRM_ERROR("Thread switch too soon after "
+ "last switch at ip %d\n", ip);
+ goto fail;
+ }
+ last_thread_switch_ip = ip;
+ }
+
break;
case QPU_SIG_LOAD_IMM:
@@ -826,6 +871,13 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
if (!check_branch(inst, validated_shader,
&validation_state, ip))
goto fail;
+
+ if (ip < last_thread_switch_ip + 3) {
+ DRM_ERROR("Branch in thread switch at ip %d",
+ ip);
+ goto fail;
+ }
+
break;
default:
DRM_ERROR("Unsupported QPU signal %d at "
@@ -847,6 +899,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
goto fail;
}
+ /* Might corrupt other thread */
+ if (validated_shader->is_threaded &&
+ validation_state.all_registers_used) {
+ DRM_ERROR("Shader uses threading, but uses the upper "
+ "half of the registers, too\n");
+ goto fail;
+ }
+
/* If we did a backwards branch and we haven't emitted a uniforms
* reset since then, we still need the uniforms stream to have the
* uniforms address available so that the backwards branch can do its
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
new file mode 100644
index 000000000000..32bb8ef985fb
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * DOC: VC4 SDTV module
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+/* WSE Registers */
+#define VEC_WSE_RESET 0xc0
+
+#define VEC_WSE_CONTROL 0xc4
+#define VEC_WSE_WSS_ENABLE BIT(7)
+
+#define VEC_WSE_WSS_DATA 0xc8
+#define VEC_WSE_VPS_DATA1 0xcc
+#define VEC_WSE_VPS_CONTROL 0xd0
+
+/* VEC Registers */
+#define VEC_REVID 0x100
+
+#define VEC_CONFIG0 0x104
+#define VEC_CONFIG0_YDEL_MASK GENMASK(28, 26)
+#define VEC_CONFIG0_YDEL(x) ((x) << 26)
+#define VEC_CONFIG0_CDEL_MASK GENMASK(25, 24)
+#define VEC_CONFIG0_CDEL(x) ((x) << 24)
+#define VEC_CONFIG0_PBPR_FIL BIT(18)
+#define VEC_CONFIG0_CHROMA_GAIN_MASK GENMASK(17, 16)
+#define VEC_CONFIG0_CHROMA_GAIN_UNITY (0 << 16)
+#define VEC_CONFIG0_CHROMA_GAIN_1_32 (1 << 16)
+#define VEC_CONFIG0_CHROMA_GAIN_1_16 (2 << 16)
+#define VEC_CONFIG0_CHROMA_GAIN_1_8 (3 << 16)
+#define VEC_CONFIG0_CBURST_GAIN_MASK GENMASK(14, 13)
+#define VEC_CONFIG0_CBURST_GAIN_UNITY (0 << 13)
+#define VEC_CONFIG0_CBURST_GAIN_1_128 (1 << 13)
+#define VEC_CONFIG0_CBURST_GAIN_1_64 (2 << 13)
+#define VEC_CONFIG0_CBURST_GAIN_1_32 (3 << 13)
+#define VEC_CONFIG0_CHRBW1 BIT(11)
+#define VEC_CONFIG0_CHRBW0 BIT(10)
+#define VEC_CONFIG0_SYNCDIS BIT(9)
+#define VEC_CONFIG0_BURDIS BIT(8)
+#define VEC_CONFIG0_CHRDIS BIT(7)
+#define VEC_CONFIG0_PDEN BIT(6)
+#define VEC_CONFIG0_YCDELAY BIT(4)
+#define VEC_CONFIG0_RAMPEN BIT(2)
+#define VEC_CONFIG0_YCDIS BIT(2)
+#define VEC_CONFIG0_STD_MASK GENMASK(1, 0)
+#define VEC_CONFIG0_NTSC_STD 0
+#define VEC_CONFIG0_PAL_BDGHI_STD 1
+#define VEC_CONFIG0_PAL_N_STD 3
+
+#define VEC_SCHPH 0x108
+#define VEC_SOFT_RESET 0x10c
+#define VEC_CLMP0_START 0x144
+#define VEC_CLMP0_END 0x148
+#define VEC_FREQ3_2 0x180
+#define VEC_FREQ1_0 0x184
+
+#define VEC_CONFIG1 0x188
+#define VEC_CONFIG_VEC_RESYNC_OFF BIT(18)
+#define VEC_CONFIG_RGB219 BIT(17)
+#define VEC_CONFIG_CBAR_EN BIT(16)
+#define VEC_CONFIG_TC_OBB BIT(15)
+#define VEC_CONFIG1_OUTPUT_MODE_MASK GENMASK(12, 10)
+#define VEC_CONFIG1_C_Y_CVBS (0 << 10)
+#define VEC_CONFIG1_CVBS_Y_C (1 << 10)
+#define VEC_CONFIG1_PR_Y_PB (2 << 10)
+#define VEC_CONFIG1_RGB (4 << 10)
+#define VEC_CONFIG1_Y_C_CVBS (5 << 10)
+#define VEC_CONFIG1_C_CVBS_Y (6 << 10)
+#define VEC_CONFIG1_C_CVBS_CVBS (7 << 10)
+#define VEC_CONFIG1_DIS_CHR BIT(9)
+#define VEC_CONFIG1_DIS_LUMA BIT(8)
+#define VEC_CONFIG1_YCBCR_IN BIT(6)
+#define VEC_CONFIG1_DITHER_TYPE_LFSR 0
+#define VEC_CONFIG1_DITHER_TYPE_COUNTER BIT(5)
+#define VEC_CONFIG1_DITHER_EN BIT(4)
+#define VEC_CONFIG1_CYDELAY BIT(3)
+#define VEC_CONFIG1_LUMADIS BIT(2)
+#define VEC_CONFIG1_COMPDIS BIT(1)
+#define VEC_CONFIG1_CUSTOM_FREQ BIT(0)
+
+#define VEC_CONFIG2 0x18c
+#define VEC_CONFIG2_PROG_SCAN BIT(15)
+#define VEC_CONFIG2_SYNC_ADJ_MASK GENMASK(14, 12)
+#define VEC_CONFIG2_SYNC_ADJ(x) (((x) / 2) << 12)
+#define VEC_CONFIG2_PBPR_EN BIT(10)
+#define VEC_CONFIG2_UV_DIG_DIS BIT(6)
+#define VEC_CONFIG2_RGB_DIG_DIS BIT(5)
+#define VEC_CONFIG2_TMUX_MASK GENMASK(3, 2)
+#define VEC_CONFIG2_TMUX_DRIVE0 (0 << 2)
+#define VEC_CONFIG2_TMUX_RG_COMP (1 << 2)
+#define VEC_CONFIG2_TMUX_UV_YC (2 << 2)
+#define VEC_CONFIG2_TMUX_SYNC_YC (3 << 2)
+
+#define VEC_INTERRUPT_CONTROL 0x190
+#define VEC_INTERRUPT_STATUS 0x194
+#define VEC_FCW_SECAM_B 0x198
+#define VEC_SECAM_GAIN_VAL 0x19c
+
+#define VEC_CONFIG3 0x1a0
+#define VEC_CONFIG3_HORIZ_LEN_STD (0 << 0)
+#define VEC_CONFIG3_HORIZ_LEN_MPEG1_SIF (1 << 0)
+#define VEC_CONFIG3_SHAPE_NON_LINEAR BIT(1)
+
+#define VEC_STATUS0 0x200
+#define VEC_MASK0 0x204
+
+#define VEC_CFG 0x208
+#define VEC_CFG_SG_MODE_MASK GENMASK(6, 5)
+#define VEC_CFG_SG_MODE(x) ((x) << 5)
+#define VEC_CFG_SG_EN BIT(4)
+#define VEC_CFG_VEC_EN BIT(3)
+#define VEC_CFG_MB_EN BIT(2)
+#define VEC_CFG_ENABLE BIT(1)
+#define VEC_CFG_TB_EN BIT(0)
+
+#define VEC_DAC_TEST 0x20c
+
+#define VEC_DAC_CONFIG 0x210
+#define VEC_DAC_CONFIG_LDO_BIAS_CTRL(x) ((x) << 24)
+#define VEC_DAC_CONFIG_DRIVER_CTRL(x) ((x) << 16)
+#define VEC_DAC_CONFIG_DAC_CTRL(x) (x)
+
+#define VEC_DAC_MISC 0x214
+#define VEC_DAC_MISC_VCD_CTRL_MASK GENMASK(31, 16)
+#define VEC_DAC_MISC_VCD_CTRL(x) ((x) << 16)
+#define VEC_DAC_MISC_VID_ACT BIT(8)
+#define VEC_DAC_MISC_VCD_PWRDN BIT(6)
+#define VEC_DAC_MISC_BIAS_PWRDN BIT(5)
+#define VEC_DAC_MISC_DAC_PWRDN BIT(2)
+#define VEC_DAC_MISC_LDO_PWRDN BIT(1)
+#define VEC_DAC_MISC_DAC_RST_N BIT(0)
+
+
+/* General VEC hardware state. */
+struct vc4_vec {
+ struct platform_device *pdev;
+
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ void __iomem *regs;
+
+ struct clk *clock;
+
+ const struct vc4_vec_tv_mode *tv_mode;
+};
+
+#define VEC_READ(offset) readl(vec->regs + (offset))
+#define VEC_WRITE(offset, val) writel(val, vec->regs + (offset))
+
+/* VC4 VEC encoder KMS struct */
+struct vc4_vec_encoder {
+ struct vc4_encoder base;
+ struct vc4_vec *vec;
+};
+
+static inline struct vc4_vec_encoder *
+to_vc4_vec_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_vec_encoder, base.base);
+}
+
+/* VC4 VEC connector KMS struct */
+struct vc4_vec_connector {
+ struct drm_connector base;
+ struct vc4_vec *vec;
+
+ /* Since the connector is attached to just the one encoder,
+ * this is the reference to it so we can do the best_encoder()
+ * hook.
+ */
+ struct drm_encoder *encoder;
+};
+
+static inline struct vc4_vec_connector *
+to_vc4_vec_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct vc4_vec_connector, base);
+}
+
+enum vc4_vec_tv_mode_id {
+ VC4_VEC_TV_MODE_NTSC,
+ VC4_VEC_TV_MODE_NTSC_J,
+ VC4_VEC_TV_MODE_PAL,
+ VC4_VEC_TV_MODE_PAL_M,
+};
+
+struct vc4_vec_tv_mode {
+ const struct drm_display_mode *mode;
+ void (*mode_set)(struct vc4_vec *vec);
+};
+
+#define VEC_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} vec_regs[] = {
+ VEC_REG(VEC_WSE_CONTROL),
+ VEC_REG(VEC_WSE_WSS_DATA),
+ VEC_REG(VEC_WSE_VPS_DATA1),
+ VEC_REG(VEC_WSE_VPS_CONTROL),
+ VEC_REG(VEC_REVID),
+ VEC_REG(VEC_CONFIG0),
+ VEC_REG(VEC_SCHPH),
+ VEC_REG(VEC_CLMP0_START),
+ VEC_REG(VEC_CLMP0_END),
+ VEC_REG(VEC_FREQ3_2),
+ VEC_REG(VEC_FREQ1_0),
+ VEC_REG(VEC_CONFIG1),
+ VEC_REG(VEC_CONFIG2),
+ VEC_REG(VEC_INTERRUPT_CONTROL),
+ VEC_REG(VEC_INTERRUPT_STATUS),
+ VEC_REG(VEC_FCW_SECAM_B),
+ VEC_REG(VEC_SECAM_GAIN_VAL),
+ VEC_REG(VEC_CONFIG3),
+ VEC_REG(VEC_STATUS0),
+ VEC_REG(VEC_MASK0),
+ VEC_REG(VEC_CFG),
+ VEC_REG(VEC_DAC_TEST),
+ VEC_REG(VEC_DAC_CONFIG),
+ VEC_REG(VEC_DAC_MISC),
+};
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_vec_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_vec *vec = vc4->vec;
+ int i;
+
+ if (!vec)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(vec_regs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ vec_regs[i].name, vec_regs[i].reg,
+ VEC_READ(vec_regs[i].reg));
+ }
+
+ return 0;
+}
+#endif
+
+static void vc4_vec_ntsc_mode_set(struct vc4_vec *vec)
+{
+ VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN);
+ VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
+}
+
+static void vc4_vec_ntsc_j_mode_set(struct vc4_vec *vec)
+{
+ VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD);
+ VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
+}
+
+static const struct drm_display_mode ntsc_mode = {
+ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0,
+ 480, 480 + 3, 480 + 3 + 3, 480 + 3 + 3 + 16, 0,
+ DRM_MODE_FLAG_INTERLACE)
+};
+
+static void vc4_vec_pal_mode_set(struct vc4_vec *vec)
+{
+ VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
+ VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
+}
+
+static void vc4_vec_pal_m_mode_set(struct vc4_vec *vec)
+{
+ VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
+ VEC_WRITE(VEC_CONFIG1,
+ VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ);
+ VEC_WRITE(VEC_FREQ3_2, 0x223b);
+ VEC_WRITE(VEC_FREQ1_0, 0x61d1);
+}
+
+static const struct drm_display_mode pal_mode = {
+ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0,
+ 576, 576 + 2, 576 + 2 + 3, 576 + 2 + 3 + 20, 0,
+ DRM_MODE_FLAG_INTERLACE)
+};
+
+static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = {
+ [VC4_VEC_TV_MODE_NTSC] = {
+ .mode = &ntsc_mode,
+ .mode_set = vc4_vec_ntsc_mode_set,
+ },
+ [VC4_VEC_TV_MODE_NTSC_J] = {
+ .mode = &ntsc_mode,
+ .mode_set = vc4_vec_ntsc_j_mode_set,
+ },
+ [VC4_VEC_TV_MODE_PAL] = {
+ .mode = &pal_mode,
+ .mode_set = vc4_vec_pal_mode_set,
+ },
+ [VC4_VEC_TV_MODE_PAL_M] = {
+ .mode = &pal_mode,
+ .mode_set = vc4_vec_pal_m_mode_set,
+ },
+};
+
+static enum drm_connector_status
+vc4_vec_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_unknown;
+}
+
+static void vc4_vec_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static int vc4_vec_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_connector_state *state = connector->state;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev,
+ vc4_vec_tv_modes[state->tv.mode].mode);
+ if (!mode) {
+ DRM_ERROR("Failed to create a new display mode\n");
+ return -ENOMEM;
+ }
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_connector_funcs vc4_vec_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = vc4_vec_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = drm_atomic_helper_connector_set_property,
+ .destroy = vc4_vec_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs vc4_vec_connector_helper_funcs = {
+ .get_modes = vc4_vec_connector_get_modes,
+};
+
+static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
+ struct vc4_vec *vec)
+{
+ struct drm_connector *connector = NULL;
+ struct vc4_vec_connector *vec_connector;
+
+ vec_connector = devm_kzalloc(dev->dev, sizeof(*vec_connector),
+ GFP_KERNEL);
+ if (!vec_connector)
+ return ERR_PTR(-ENOMEM);
+
+ connector = &vec_connector->base;
+ connector->interlace_allowed = true;
+
+ vec_connector->encoder = vec->encoder;
+ vec_connector->vec = vec;
+
+ drm_connector_init(dev, connector, &vc4_vec_connector_funcs,
+ DRM_MODE_CONNECTOR_Composite);
+ drm_connector_helper_add(connector, &vc4_vec_connector_helper_funcs);
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_mode_property,
+ VC4_VEC_TV_MODE_NTSC);
+ vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC];
+
+ drm_mode_connector_attach_encoder(connector, vec->encoder);
+
+ return connector;
+}
+
+static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
+{
+ struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
+ struct vc4_vec *vec = vc4_vec_encoder->vec;
+ int ret;
+
+ VEC_WRITE(VEC_CFG, 0);
+ VEC_WRITE(VEC_DAC_MISC,
+ VEC_DAC_MISC_VCD_PWRDN |
+ VEC_DAC_MISC_BIAS_PWRDN |
+ VEC_DAC_MISC_DAC_PWRDN |
+ VEC_DAC_MISC_LDO_PWRDN);
+
+ clk_disable_unprepare(vec->clock);
+
+ ret = pm_runtime_put(&vec->pdev->dev);
+ if (ret < 0) {
+ DRM_ERROR("Failed to release power domain: %d\n", ret);
+ return;
+ }
+}
+
+static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
+{
+ struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
+ struct vc4_vec *vec = vc4_vec_encoder->vec;
+ int ret;
+
+ ret = pm_runtime_get_sync(&vec->pdev->dev);
+ if (ret < 0) {
+ DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ return;
+ }
+
+ /*
+ * We need to set the clock rate each time we enable the encoder
+ * because there's a chance we share the same parent with the HDMI
+ * clock, and both drivers are requesting different rates.
+ * The good news is, these 2 encoders cannot be enabled at the same
+ * time, thus preventing incompatible rate requests.
+ */
+ ret = clk_set_rate(vec->clock, 108000000);
+ if (ret) {
+ DRM_ERROR("Failed to set clock rate: %d\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(vec->clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+ return;
+ }
+
+ /* Reset the different blocks */
+ VEC_WRITE(VEC_WSE_RESET, 1);
+ VEC_WRITE(VEC_SOFT_RESET, 1);
+
+ /* Disable the CGSM-A and WSE blocks */
+ VEC_WRITE(VEC_WSE_CONTROL, 0);
+
+ /* Write config common to all modes. */
+
+ /*
+ * Color subcarrier phase: phase = 360 * SCHPH / 256.
+ * 0x28 <=> 39.375 deg.
+ */
+ VEC_WRITE(VEC_SCHPH, 0x28);
+
+ /*
+ * Reset to default values.
+ */
+ VEC_WRITE(VEC_CLMP0_START, 0xac);
+ VEC_WRITE(VEC_CLMP0_END, 0xec);
+ VEC_WRITE(VEC_CONFIG2,
+ VEC_CONFIG2_UV_DIG_DIS | VEC_CONFIG2_RGB_DIG_DIS);
+ VEC_WRITE(VEC_CONFIG3, VEC_CONFIG3_HORIZ_LEN_STD);
+ VEC_WRITE(VEC_DAC_CONFIG,
+ VEC_DAC_CONFIG_DAC_CTRL(0xc) |
+ VEC_DAC_CONFIG_DRIVER_CTRL(0xc) |
+ VEC_DAC_CONFIG_LDO_BIAS_CTRL(0x46));
+
+ /* Mask all interrupts. */
+ VEC_WRITE(VEC_MASK0, 0);
+
+ vec->tv_mode->mode_set(vec);
+
+ VEC_WRITE(VEC_DAC_MISC,
+ VEC_DAC_MISC_VID_ACT | VEC_DAC_MISC_DAC_RST_N);
+ VEC_WRITE(VEC_CFG, VEC_CFG_VEC_EN);
+}
+
+
+static bool vc4_vec_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void vc4_vec_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
+ struct vc4_vec *vec = vc4_vec_encoder->vec;
+
+ vec->tv_mode = &vc4_vec_tv_modes[conn_state->tv.mode];
+}
+
+static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ const struct vc4_vec_tv_mode *vec_mode;
+
+ vec_mode = &vc4_vec_tv_modes[conn_state->tv.mode];
+
+ if (conn_state->crtc &&
+ !drm_mode_equal(vec_mode->mode, &crtc_state->adjusted_mode))
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs vc4_vec_encoder_helper_funcs = {
+ .disable = vc4_vec_encoder_disable,
+ .enable = vc4_vec_encoder_enable,
+ .mode_fixup = vc4_vec_encoder_mode_fixup,
+ .atomic_check = vc4_vec_encoder_atomic_check,
+ .atomic_mode_set = vc4_vec_encoder_atomic_mode_set,
+};
+
+static const struct of_device_id vc4_vec_dt_match[] = {
+ { .compatible = "brcm,bcm2835-vec", .data = NULL },
+ { /* sentinel */ },
+};
+
+static const char * const tv_mode_names[] = {
+ [VC4_VEC_TV_MODE_NTSC] = "NTSC",
+ [VC4_VEC_TV_MODE_NTSC_J] = "NTSC-J",
+ [VC4_VEC_TV_MODE_PAL] = "PAL",
+ [VC4_VEC_TV_MODE_PAL_M] = "PAL-M",
+};
+
+static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_vec *vec;
+ struct vc4_vec_encoder *vc4_vec_encoder;
+ int ret;
+
+ ret = drm_mode_create_tv_properties(drm, ARRAY_SIZE(tv_mode_names),
+ tv_mode_names);
+ if (ret)
+ return ret;
+
+ vec = devm_kzalloc(dev, sizeof(*vec), GFP_KERNEL);
+ if (!vec)
+ return -ENOMEM;
+
+ vc4_vec_encoder = devm_kzalloc(dev, sizeof(*vc4_vec_encoder),
+ GFP_KERNEL);
+ if (!vc4_vec_encoder)
+ return -ENOMEM;
+ vc4_vec_encoder->base.type = VC4_ENCODER_TYPE_VEC;
+ vc4_vec_encoder->vec = vec;
+ vec->encoder = &vc4_vec_encoder->base.base;
+
+ vec->pdev = pdev;
+ vec->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(vec->regs))
+ return PTR_ERR(vec->regs);
+
+ vec->clock = devm_clk_get(dev, NULL);
+ if (IS_ERR(vec->clock)) {
+ ret = PTR_ERR(vec->clock);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ drm_encoder_init(drm, vec->encoder, &vc4_vec_encoder_funcs,
+ DRM_MODE_ENCODER_TVDAC, NULL);
+ drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
+
+ vec->connector = vc4_vec_connector_init(drm, vec);
+ if (IS_ERR(vec->connector)) {
+ ret = PTR_ERR(vec->connector);
+ goto err_destroy_encoder;
+ }
+
+ dev_set_drvdata(dev, vec);
+
+ vc4->vec = vec;
+
+ return 0;
+
+err_destroy_encoder:
+ drm_encoder_cleanup(vec->encoder);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static void vc4_vec_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_vec *vec = dev_get_drvdata(dev);
+
+ vc4_vec_connector_destroy(vec->connector);
+ drm_encoder_cleanup(vec->encoder);
+ pm_runtime_disable(dev);
+
+ vc4->vec = NULL;
+}
+
+static const struct component_ops vc4_vec_ops = {
+ .bind = vc4_vec_bind,
+ .unbind = vc4_vec_unbind,
+};
+
+static int vc4_vec_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_vec_ops);
+}
+
+static int vc4_vec_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_vec_ops);
+ return 0;
+}
+
+struct platform_driver vc4_vec_driver = {
+ .probe = vc4_vec_dev_probe,
+ .remove = vc4_vec_dev_remove,
+ .driver = {
+ .name = "vc4_vec",
+ .of_match_table = vc4_vec_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index f36c14729b55..477e07f0ecb6 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -54,7 +54,7 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
- unsigned long vaddr = (unsigned long)vmf->virtual_address;
+ unsigned long vaddr = vmf->address;
struct page *page;
page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 5c57c1ffa1f9..da25dfe7b80e 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -28,56 +28,57 @@
#define VGEM_FENCE_TIMEOUT (10*HZ)
struct vgem_fence {
- struct fence base;
+ struct dma_fence base;
struct spinlock lock;
struct timer_list timer;
};
-static const char *vgem_fence_get_driver_name(struct fence *fence)
+static const char *vgem_fence_get_driver_name(struct dma_fence *fence)
{
return "vgem";
}
-static const char *vgem_fence_get_timeline_name(struct fence *fence)
+static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
-static bool vgem_fence_signaled(struct fence *fence)
+static bool vgem_fence_signaled(struct dma_fence *fence)
{
return false;
}
-static bool vgem_fence_enable_signaling(struct fence *fence)
+static bool vgem_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static void vgem_fence_release(struct fence *base)
+static void vgem_fence_release(struct dma_fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
del_timer_sync(&fence->timer);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
{
snprintf(str, size, "%u", fence->seqno);
}
-static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size)
{
- snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+ snprintf(str, size, "%u",
+ dma_fence_is_signaled(fence) ? fence->seqno : 0);
}
-static const struct fence_ops vgem_fence_ops = {
+static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.enable_signaling = vgem_fence_enable_signaling,
.signaled = vgem_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = vgem_fence_release,
.fence_value_str = vgem_fence_value_str,
@@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data)
{
struct vgem_fence *fence = (struct vgem_fence *)data;
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
}
-static struct fence *vgem_fence_create(struct vgem_file *vfile,
- unsigned int flags)
+static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
+ unsigned int flags)
{
struct vgem_fence *fence;
@@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile,
return NULL;
spin_lock_init(&fence->lock);
- fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
- fence_context_alloc(1), 1);
+ dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+ dma_fence_context_alloc(1), 1);
setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
@@ -125,7 +126,6 @@ static int attach_dmabuf(struct drm_device *dev,
return PTR_ERR(dmabuf);
obj->dma_buf = dmabuf;
- drm_gem_object_reference(obj);
return 0;
}
@@ -157,7 +157,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
struct vgem_file *vfile = file->driver_priv;
struct reservation_object *resv;
struct drm_gem_object *obj;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
if (arg->flags & ~VGEM_FENCE_WRITE)
@@ -190,12 +190,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Expose the fence via the dma-buf */
ret = 0;
- mutex_lock(&resv->lock.base);
+ ww_mutex_lock(&resv->lock, NULL);
if (arg->flags & VGEM_FENCE_WRITE)
reservation_object_add_excl_fence(resv, fence);
else if ((ret = reservation_object_reserve_shared(resv)) == 0)
reservation_object_add_shared_fence(resv, fence);
- mutex_unlock(&resv->lock.base);
+ ww_mutex_unlock(&resv->lock);
/* Record the fence in our idr for later signaling */
if (ret == 0) {
@@ -209,8 +209,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
}
err_fence:
if (ret) {
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
}
err:
drm_gem_object_unreference_unlocked(obj);
@@ -239,7 +239,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
{
struct vgem_file *vfile = file->driver_priv;
struct drm_vgem_fence_signal *arg = data;
- struct fence *fence;
+ struct dma_fence *fence;
int ret = 0;
if (arg->flags)
@@ -253,11 +253,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
if (IS_ERR(fence))
return PTR_ERR(fence);
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
ret = -ETIMEDOUT;
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
return ret;
}
@@ -271,8 +271,8 @@ int vgem_fence_open(struct vgem_file *vfile)
static int __vgem_fence_idr_fini(int id, void *p, void *data)
{
- fence_signal(p);
- fence_put(p);
+ dma_fence_signal(p);
+ dma_fence_put(p);
return 0;
}
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index e5582bab7e3c..9e0e5392b6ec 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -64,9 +64,7 @@ static const struct file_operations via_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index e1afc3d3f8d9..81d1807ac228 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,10 +1,10 @@
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
- select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_KMS_HELPER
+ select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
- QEMU based VMMs (like KVM or Xen).
+ QEMU based VMMs (like KVM or Xen).
If unsure say M.
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 7cf3678623c3..58048709c34e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -338,8 +338,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_commit_planes(dev, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_hw_done(state);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 49e5996cb9f2..3b97d50fd392 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -28,16 +28,6 @@
#include "virtgpu_drv.h"
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
-{
- struct pci_dev *pdev = dev->pdev;
-
- if (pdev) {
- return drm_pci_set_busid(dev, master);
- }
- return 0;
-}
-
static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
{
struct apertures_struct *ap;
@@ -71,13 +61,22 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ const char *pname = dev_name(&pdev->dev);
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+ char unique[20];
- DRM_INFO("pci: %s detected\n",
- vga ? "virtio-vga" : "virtio-gpu-pci");
+ DRM_INFO("pci: %s detected at %s\n",
+ vga ? "virtio-vga" : "virtio-gpu-pci",
+ pname);
dev->pdev = pdev;
if (vga)
virtio_pci_kick_out_firmware_fb(pdev);
+
+ snprintf(unique, sizeof(unique), "pci:%s", pname);
+ ret = drm_dev_set_unique(dev, unique);
+ if (ret)
+ goto err_free;
+
}
ret = drm_dev_register(dev, 0);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 5820b7020ae5..d82489815096 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -108,16 +108,13 @@ static const struct file_operations virtio_gpu_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
-#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
-#endif
.llseek = noop_llseek,
};
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
- .set_busid = drm_virtio_set_busid,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
.open = virtio_gpu_driver_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index ae59080d63d1..08906c8ce3fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -49,7 +49,6 @@
#define DRIVER_PATCHLEVEL 1
/* virtgpu_drm_bus.c */
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
struct virtio_gpu_object {
@@ -82,7 +81,7 @@ struct virtio_gpu_fence_driver {
};
struct virtio_gpu_fence {
- struct fence f;
+ struct dma_fence f;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
uint64_t seq;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 2242a80866a9..dd21f950e129 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -200,16 +200,10 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info,
static struct fb_ops virtio_gpufb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+ DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = virtio_gpu_3d_fillrect,
.fb_copyarea = virtio_gpu_3d_copyarea,
.fb_imageblit = virtio_gpu_3d_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f3f70fa8a4c7..23353521f903 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -26,22 +26,22 @@
#include <drm/drmP.h>
#include "virtgpu_drv.h"
-static const char *virtio_get_driver_name(struct fence *f)
+static const char *virtio_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
}
-static const char *virtio_get_timeline_name(struct fence *f)
+static const char *virtio_get_timeline_name(struct dma_fence *f)
{
return "controlq";
}
-static bool virtio_enable_signaling(struct fence *f)
+static bool virtio_enable_signaling(struct dma_fence *f)
{
return true;
}
-static bool virtio_signaled(struct fence *f)
+static bool virtio_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f)
return false;
}
-static void virtio_fence_value_str(struct fence *f, char *str, int size)
+static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", fence->seq);
}
-static void virtio_timeline_value_str(struct fence *f, char *str, int size)
+static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
}
-static const struct fence_ops virtio_fence_ops = {
+static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
.enable_signaling = virtio_enable_signaling,
.signaled = virtio_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
@@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
spin_lock_irqsave(&drv->lock, irq_flags);
(*fence)->drv = drv;
(*fence)->seq = ++drv->sync_seq;
- fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
- drv->context, (*fence)->seq);
- fence_get(&(*fence)->f);
+ dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
+ drv->context, (*fence)->seq);
+ dma_fence_get(&(*fence)->f);
list_add_tail(&(*fence)->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
if (last_seq < fence->seq)
continue;
- fence_signal_locked(&fence->f);
+ dma_fence_signal_locked(&fence->f);
list_del(&fence->node);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
spin_unlock_irqrestore(&drv->lock, irq_flags);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 818478b4c4f0..61f3a963af95 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
drm_free_large(buflist);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
return 0;
out_unresv:
@@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return ret;
}
@@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return 0;
fail_unref:
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
//fail_obj:
// drm_gem_object_handle_unreference_unlocked(obj);
@@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
out_unres:
virtio_gpu_object_unreserve(qobj);
out:
@@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->level, &box, &fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
out_unres:
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 036b0fbae0fb..1235519853f4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
- vgdev->fence_drv.context = fence_context_alloc(1);
+ vgdev->fence_drv.context = dma_fence_context_alloc(1);
spin_lock_init(&vgdev->fence_drv.lock);
INIT_LIST_HEAD(&vgdev->fence_drv.fences);
INIT_LIST_HEAD(&vgdev->cap_cache);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ba28c0f6f28a..cb75f0663ba0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
fence = NULL;
virtio_gpu_object_unreserve(bo);
virtio_gpu_object_wait(bo, false);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 80482ac5f95d..4a1de9f81193 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -425,6 +425,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
.invalidate_caches = &virtio_gpu_invalidate_caches,
.init_mem_type = &virtio_gpu_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &virtio_gpu_evict_flags,
.move = &virtio_gpu_bo_move,
.verify_access = &virtio_gpu_verify_access,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a745b9d..974f9410474b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -75,7 +75,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq)
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_vbuffer *vbuf;
- int i, size, count = 0;
+ int i, size, count = 16;
void *ptr;
INIT_LIST_HEAD(&vgdev->free_vbufs);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 78b75ee3c931..c894a48a74a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d2d93959b119..723fd763da8e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -465,33 +465,34 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
static int vmw_fb_kms_framebuffer(struct fb_info *info)
{
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb;
struct vmw_framebuffer *vfb;
- int ret = 0;
+ int ret = 0, depth;
size_t new_bo_size;
- ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
+ ret = vmw_fb_compute_depth(var, &depth);
if (ret)
return ret;
mode_cmd.width = var->xres;
mode_cmd.height = var->yres;
- mode_cmd.bpp = var->bits_per_pixel;
- mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
+ mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
+ mode_cmd.pixel_format =
+ drm_mode_legacy_fb_format(var->bits_per_pixel,
+ ((var->bits_per_pixel + 7) / 8) * mode_cmd.width);
cur_fb = par->set_fb;
if (cur_fb && cur_fb->width == mode_cmd.width &&
cur_fb->height == mode_cmd.height &&
- cur_fb->bits_per_pixel == mode_cmd.bpp &&
- cur_fb->depth == mode_cmd.depth &&
- cur_fb->pitches[0] == mode_cmd.pitch)
+ cur_fb->pixel_format == mode_cmd.pixel_format &&
+ cur_fb->pitches[0] == mode_cmd.pitches[0])
return 0;
/* Need new buffer object ? */
- new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
+ new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
ret = vmw_fb_kms_detach(par,
par->bo_size < new_bo_size ||
par->bo_size > 2*new_bo_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 26ac8e80a478..6541dd8b82dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence)
* objects with actions attached to them.
*/
-static void vmw_fence_obj_destroy(struct fence *f)
+static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f)
fence->destroy(fence);
}
-static const char *vmw_fence_get_driver_name(struct fence *f)
+static const char *vmw_fence_get_driver_name(struct dma_fence *f)
{
return "vmwgfx";
}
-static const char *vmw_fence_get_timeline_name(struct fence *f)
+static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
{
return "svga";
}
-static bool vmw_fence_enable_signaling(struct fence *f)
+static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f)
}
struct vmwgfx_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
+vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct vmwgfx_wait_cb *wait =
container_of(cb, struct vmwgfx_wait_cb, base);
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
static void __vmw_fences_update(struct vmw_fence_manager *fman);
-static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
+static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
while (ret > 0) {
__vmw_fences_update(fman);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
break;
if (intr)
@@ -225,7 +225,7 @@ out:
return ret;
}
-static struct fence_ops vmw_fence_ops = {
+static struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
- fman->ctx = fence_context_alloc(1);
+ fman->ctx = dma_fence_context_alloc(1);
return fman;
}
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
unsigned long irq_flags;
int ret = 0;
- fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
- fman->ctx, seqno);
+ dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
+ fman->ctx, seqno);
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
u32 goal_seqno;
u32 *fifo_mem;
- if (fence_is_signaled_locked(&fence->base))
+ if (dma_fence_is_signaled_locked(&fence->base))
return false;
fifo_mem = fman->dev_priv->mmio_virt;
@@ -459,7 +459,7 @@ rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return 1;
vmw_fences_update(fman);
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
bool interruptible, unsigned long timeout)
{
- long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
+ long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
if (likely(ret > 0))
return 0;
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
int vmw_fence_create(struct vmw_fence_manager *fman,
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_unlock_irq(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false,
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
}
BUG_ON(!list_empty(&fence->head));
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
spin_lock_irq(&fman->lock);
}
spin_unlock_irq(&fman->lock);
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
spin_lock_irqsave(&fman->lock, irq_flags);
fman->pending_actions[action->type]++;
- if (fence_is_signaled_locked(&fence->base)) {
+ if (dma_fence_is_signaled_locked(&fence->base)) {
struct list_head action_list;
INIT_LIST_HEAD(&action_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 83ae301ee141..d9d85aa6ed20 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,7 +27,7 @@
#ifndef _VMWGFX_FENCE_H_
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
@@ -52,7 +52,7 @@ struct vmw_fence_action {
};
struct vmw_fence_obj {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
struct list_head seq_passed_actions;
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
*fence_p = NULL;
if (fence)
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
}
static inline struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence)
{
if (fence)
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bf28ccc150df..e7daf59bac80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -516,7 +516,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
- const struct drm_mode_fb_cmd
+ const struct drm_mode_fb_cmd2
*mode_cmd,
bool is_dmabuf_proxy)
@@ -525,6 +525,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
int ret;
+ struct drm_format_name_buf format_name;
/* 3D is only supported on HWv8 and newer hosts */
if (dev_priv->active_display_unit == vmw_du_legacy)
@@ -548,21 +549,22 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return -EINVAL;
}
- switch (mode_cmd->depth) {
- case 32:
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
format = SVGA3D_A8R8G8B8;
break;
- case 24:
+ case DRM_FORMAT_XRGB8888:
format = SVGA3D_X8R8G8B8;
break;
- case 16:
+ case DRM_FORMAT_RGB565:
format = SVGA3D_R5G6B5;
break;
- case 15:
+ case DRM_FORMAT_XRGB1555:
format = SVGA3D_A1R5G5B5;
break;
default:
- DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ DRM_ERROR("Invalid pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
@@ -581,14 +583,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- /* XXX get the first 3 from the surface info */
- vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbs->base.base.pitches[0] = mode_cmd->pitch;
- vfbs->base.base.depth = mode_cmd->depth;
- vfbs->base.base.width = mode_cmd->width;
- vfbs->base.base.height = mode_cmd->height;
+ drm_helper_mode_fill_fb_struct(&vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
- vfbs->base.user_handle = mode_cmd->handle;
+ vfbs->base.user_handle = mode_cmd->handles[0];
vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
*out = &vfbs->base;
@@ -755,7 +752,7 @@ static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
* 0 on success, error code otherwise
*/
static int vmw_create_dmabuf_proxy(struct drm_device *dev,
- const struct drm_mode_fb_cmd *mode_cmd,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
struct vmw_dma_buffer *dmabuf_mob,
struct vmw_surface **srf_out)
{
@@ -763,17 +760,18 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
struct drm_vmw_size content_base_size;
struct vmw_resource *res;
unsigned int bytes_pp;
+ struct drm_format_name_buf format_name;
int ret;
- switch (mode_cmd->depth) {
- case 32:
- case 24:
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
format = SVGA3D_X8R8G8B8;
bytes_pp = 4;
break;
- case 16:
- case 15:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
format = SVGA3D_R5G6B5;
bytes_pp = 2;
break;
@@ -784,11 +782,12 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
break;
default:
- DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
+ DRM_ERROR("Invalid framebuffer format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
- content_base_size.width = mode_cmd->pitch / bytes_pp;
+ content_base_size.width = mode_cmd->pitches[0] / bytes_pp;
content_base_size.height = mode_cmd->height;
content_base_size.depth = 1;
@@ -826,16 +825,17 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
- const struct drm_mode_fb_cmd
+ const struct drm_mode_fb_cmd2
*mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd;
unsigned int requested_size;
+ struct drm_format_name_buf format_name;
int ret;
- requested_size = mode_cmd->height * mode_cmd->pitch;
+ requested_size = mode_cmd->height * mode_cmd->pitches[0];
if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
@@ -844,27 +844,16 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
/* Limited framebuffer color depth support for screen objects */
if (dev_priv->active_display_unit == vmw_du_screen_object) {
- switch (mode_cmd->depth) {
- case 32:
- case 24:
- /* Only support 32 bpp for 32 and 24 depth fbs */
- if (mode_cmd->bpp == 32)
- break;
-
- DRM_ERROR("Invalid color depth/bbp: %d %d\n",
- mode_cmd->depth, mode_cmd->bpp);
- return -EINVAL;
- case 16:
- case 15:
- /* Only support 16 bpp for 16 and 15 depth fbs */
- if (mode_cmd->bpp == 16)
- break;
-
- DRM_ERROR("Invalid color depth/bbp: %d %d\n",
- mode_cmd->depth, mode_cmd->bpp);
- return -EINVAL;
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_RGB565:
+ break;
default:
- DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ DRM_ERROR("Invalid pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
}
@@ -875,14 +864,10 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err1;
}
- vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbd->base.base.pitches[0] = mode_cmd->pitch;
- vfbd->base.base.depth = mode_cmd->depth;
- vfbd->base.base.width = mode_cmd->width;
- vfbd->base.base.height = mode_cmd->height;
+ drm_helper_mode_fill_fb_struct(&vfbd->base.base, mode_cmd);
vfbd->base.dmabuf = true;
vfbd->buffer = vmw_dmabuf_reference(dmabuf);
- vfbd->base.user_handle = mode_cmd->handle;
+ vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
@@ -916,7 +901,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_surface *surface,
bool only_2d,
- const struct drm_mode_fb_cmd *mode_cmd)
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
bool is_dmabuf_proxy = false;
@@ -971,7 +956,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd2)
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -979,16 +964,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
- struct drm_mode_fb_cmd mode_cmd;
int ret;
- mode_cmd.width = mode_cmd2->width;
- mode_cmd.height = mode_cmd2->height;
- mode_cmd.pitch = mode_cmd2->pitches[0];
- mode_cmd.handle = mode_cmd2->handles[0];
- drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
- &mode_cmd.bpp);
-
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
@@ -996,8 +973,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
*/
if (!vmw_kms_validate_mode_vram(dev_priv,
- mode_cmd.pitch,
- mode_cmd.height)) {
+ mode_cmd->pitches[0],
+ mode_cmd->height)) {
DRM_ERROR("Requested mode exceed bounding box limit.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1011,7 +988,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* command stream using user-space handles.
*/
- user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
+ user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
@@ -1023,14 +1000,14 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
/* returns either a dmabuf or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
- mode_cmd.handle,
+ mode_cmd->handles[0],
&surface, &bo);
if (ret)
goto err_out;
vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
!(dev_priv->capabilities & SVGA_CAP_3D),
- &mode_cmd);
+ mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff4803c107bc..f42ce9a1c3ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -248,7 +248,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_surface *surface,
bool only_2d,
- const struct drm_mode_fb_cmd *mode_cmd);
+ const struct drm_mode_fb_cmd2 *mode_cmd);
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
unsigned unit,
u32 max_width,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 52ca1c9d070e..8e86d6d4141b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -575,7 +575,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
long lret;
lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
reservation_object_add_excl_fence(bo->resv, &fence->base);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
reservation_object_add_excl_fence(bo->resv, &fence->base);
}
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
new file mode 100644
index 000000000000..4065b2840f1c
--- /dev/null
+++ b/drivers/gpu/drm/zte/Kconfig
@@ -0,0 +1,8 @@
+config DRM_ZTE
+ tristate "DRM Support for ZTE SoCs"
+ depends on DRM && ARCH_ZX
+ select DRM_KMS_CMA_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_HELPER
+ help
+ Choose this option to enable DRM on ZTE ZX SoCs.
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
new file mode 100644
index 000000000000..699180bfd57c
--- /dev/null
+++ b/drivers/gpu/drm/zte/Makefile
@@ -0,0 +1,7 @@
+zxdrm-y := \
+ zx_drm_drv.o \
+ zx_hdmi.o \
+ zx_plane.o \
+ zx_vou.o
+
+obj-$(CONFIG_DRM_ZTE) += zxdrm.o
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
new file mode 100644
index 000000000000..3e76f72c92ff
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_vou.h"
+
+struct zx_drm_private {
+ struct drm_fbdev_cma *fbdev;
+};
+
+static void zx_drm_fb_output_poll_changed(struct drm_device *drm)
+{
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = zx_drm_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void zx_drm_lastclose(struct drm_device *drm)
+{
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static const struct file_operations zx_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver zx_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .lastclose = zx_drm_lastclose,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = zx_vou_enable_vblank,
+ .disable_vblank = zx_vou_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .fops = &zx_drm_fops,
+ .name = "zx-vou",
+ .desc = "ZTE VOU Controller DRM",
+ .date = "20160811",
+ .major = 1,
+ .minor = 0,
+};
+
+static int zx_drm_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ struct zx_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ drm = drm_dev_alloc(&zx_drm_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ drm->dev_private = priv;
+ dev_set_drvdata(dev, drm);
+
+ drm_mode_config_init(drm);
+ drm->mode_config.min_width = 16;
+ drm->mode_config.min_height = 16;
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+ drm->mode_config.funcs = &zx_drm_mode_config_funcs;
+
+ ret = component_bind_all(dev, drm);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to bind all components: %d\n", ret);
+ goto out_unregister;
+ }
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to init vblank: %d\n", ret);
+ goto out_unbind;
+ }
+
+ /*
+ * We will manage irq handler on our own. In this case, irq_enabled
+ * need to be true for using vblank core support.
+ */
+ drm->irq_enabled = true;
+
+ drm_mode_config_reset(drm);
+ drm_kms_helper_poll_init(drm);
+
+ priv->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev)) {
+ ret = PTR_ERR(priv->fbdev);
+ DRM_DEV_ERROR(dev, "failed to init cma fbdev: %d\n", ret);
+ priv->fbdev = NULL;
+ goto out_poll_fini;
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto out_fbdev_fini;
+
+ return 0;
+
+out_fbdev_fini:
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+out_poll_fini:
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+out_unbind:
+ component_unbind_all(dev, drm);
+out_unregister:
+ dev_set_drvdata(dev, NULL);
+ drm->dev_private = NULL;
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static void zx_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct zx_drm_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+ component_unbind_all(dev, drm);
+ dev_set_drvdata(dev, NULL);
+ drm->dev_private = NULL;
+ drm_dev_unref(drm);
+}
+
+static const struct component_master_ops zx_drm_master_ops = {
+ .bind = zx_drm_bind,
+ .unbind = zx_drm_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int zx_drm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent = dev->of_node;
+ struct device_node *child;
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = of_platform_populate(parent, NULL, NULL, dev);
+ if (ret)
+ return ret;
+
+ for_each_available_child_of_node(parent, child) {
+ component_match_add(dev, &match, compare_of, child);
+ of_node_put(child);
+ }
+
+ return component_master_add_with_match(dev, &zx_drm_master_ops, match);
+}
+
+static int zx_drm_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &zx_drm_master_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_drm_of_match[] = {
+ { .compatible = "zte,zx296718-vou", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_drm_of_match);
+
+static struct platform_driver zx_drm_platform_driver = {
+ .probe = zx_drm_probe,
+ .remove = zx_drm_remove,
+ .driver = {
+ .name = "zx-drm",
+ .of_match_table = zx_drm_of_match,
+ },
+};
+
+static struct platform_driver *drivers[] = {
+ &zx_crtc_driver,
+ &zx_hdmi_driver,
+ &zx_drm_platform_driver,
+};
+
+static int zx_drm_init(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(zx_drm_init);
+
+static void zx_drm_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(zx_drm_exit);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("ZTE ZX VOU DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h
new file mode 100644
index 000000000000..e65cd18a6cba
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_drm_drv.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_DRM_DRV_H__
+#define __ZX_DRM_DRV_H__
+
+extern struct platform_driver zx_crtc_driver;
+extern struct platform_driver zx_hdmi_driver;
+
+static inline u32 zx_readl(void __iomem *reg)
+{
+ return readl_relaxed(reg);
+}
+
+static inline void zx_writel(void __iomem *reg, u32 val)
+{
+ writel_relaxed(val, reg);
+}
+
+static inline void zx_writel_mask(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = zx_readl(reg);
+ tmp = (tmp & ~mask) | (val & mask);
+ zx_writel(reg, tmp);
+}
+
+#endif /* __ZX_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
new file mode 100644
index 000000000000..6bf6c364811e
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hdmi.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+
+#include "zx_hdmi_regs.h"
+#include "zx_vou.h"
+
+#define ZX_HDMI_INFOFRAME_SIZE 31
+#define DDC_SEGMENT_ADDR 0x30
+
+struct zx_hdmi_i2c {
+ struct i2c_adapter adap;
+ struct mutex lock;
+};
+
+struct zx_hdmi {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct zx_hdmi_i2c *ddc;
+ struct device *dev;
+ struct drm_device *drm;
+ void __iomem *mmio;
+ struct clk *cec_clk;
+ struct clk *osc_clk;
+ struct clk *xclk;
+ bool sink_is_hdmi;
+ bool sink_has_audio;
+ const struct vou_inf *inf;
+};
+
+#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
+
+static const struct vou_inf vou_inf_hdmi = {
+ .id = VOU_HDMI,
+ .data_sel = VOU_YUV444,
+ .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
+ .clocks_sel_bits = BIT(13) | BIT(2),
+};
+
+static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
+{
+ return readl_relaxed(hdmi->mmio + offset * 4);
+}
+
+static inline void hdmi_writeb(struct zx_hdmi *hdmi, u16 offset, u8 val)
+{
+ writel_relaxed(val, hdmi->mmio + offset * 4);
+}
+
+static inline void hdmi_writeb_mask(struct zx_hdmi *hdmi, u16 offset,
+ u8 mask, u8 val)
+{
+ u8 tmp;
+
+ tmp = hdmi_readb(hdmi, offset);
+ tmp = (tmp & ~mask) | (val & mask);
+ hdmi_writeb(hdmi, offset, tmp);
+}
+
+static int zx_hdmi_infoframe_trans(struct zx_hdmi *hdmi,
+ union hdmi_infoframe *frame, u8 fsel)
+{
+ u8 buffer[ZX_HDMI_INFOFRAME_SIZE];
+ int num;
+ int i;
+
+ hdmi_writeb(hdmi, TPI_INFO_FSEL, fsel);
+
+ num = hdmi_infoframe_pack(frame, buffer, ZX_HDMI_INFOFRAME_SIZE);
+ if (num < 0) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to pack infoframe: %d\n", num);
+ return num;
+ }
+
+ for (i = 0; i < num; i++)
+ hdmi_writeb(hdmi, TPI_INFO_B0 + i, buffer[i]);
+
+ hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_RPT,
+ TPI_INFO_TRANS_RPT);
+ hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_EN,
+ TPI_INFO_TRANS_EN);
+
+ return num;
+}
+
+static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ mode);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n",
+ ret);
+ return ret;
+ }
+
+ return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_VSIF);
+}
+
+static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* We always use YUV444 for HDMI output. */
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+
+ return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AVI);
+}
+
+static void zx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ if (hdmi->sink_is_hdmi) {
+ zx_hdmi_config_video_avi(hdmi, mode);
+ zx_hdmi_config_video_vsi(hdmi, mode);
+ }
+}
+
+static void zx_hdmi_phy_start(struct zx_hdmi *hdmi)
+{
+ /* Copy from ZTE BSP code */
+ hdmi_writeb(hdmi, 0x222, 0x0);
+ hdmi_writeb(hdmi, 0x224, 0x4);
+ hdmi_writeb(hdmi, 0x909, 0x0);
+ hdmi_writeb(hdmi, 0x7b0, 0x90);
+ hdmi_writeb(hdmi, 0x7b1, 0x00);
+ hdmi_writeb(hdmi, 0x7b2, 0xa7);
+ hdmi_writeb(hdmi, 0x7b8, 0xaa);
+ hdmi_writeb(hdmi, 0x7b2, 0xa7);
+ hdmi_writeb(hdmi, 0x7b3, 0x0f);
+ hdmi_writeb(hdmi, 0x7b4, 0x0f);
+ hdmi_writeb(hdmi, 0x7b5, 0x55);
+ hdmi_writeb(hdmi, 0x7b7, 0x03);
+ hdmi_writeb(hdmi, 0x7b9, 0x12);
+ hdmi_writeb(hdmi, 0x7ba, 0x32);
+ hdmi_writeb(hdmi, 0x7bc, 0x68);
+ hdmi_writeb(hdmi, 0x7be, 0x40);
+ hdmi_writeb(hdmi, 0x7bf, 0x84);
+ hdmi_writeb(hdmi, 0x7c1, 0x0f);
+ hdmi_writeb(hdmi, 0x7c8, 0x02);
+ hdmi_writeb(hdmi, 0x7c9, 0x03);
+ hdmi_writeb(hdmi, 0x7ca, 0x40);
+ hdmi_writeb(hdmi, 0x7dc, 0x31);
+ hdmi_writeb(hdmi, 0x7e2, 0x04);
+ hdmi_writeb(hdmi, 0x7e0, 0x06);
+ hdmi_writeb(hdmi, 0x7cb, 0x68);
+ hdmi_writeb(hdmi, 0x7f9, 0x02);
+ hdmi_writeb(hdmi, 0x7b6, 0x02);
+ hdmi_writeb(hdmi, 0x7f3, 0x0);
+}
+
+static void zx_hdmi_hw_enable(struct zx_hdmi *hdmi)
+{
+ /* Enable pclk */
+ hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, CLKPWD_PDIDCK);
+
+ /* Enable HDMI for TX */
+ hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, FUNC_HDMI_EN);
+
+ /* Enable deep color packet */
+ hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
+
+ /* Enable HDMI/MHL mode for output */
+ hdmi_writeb_mask(hdmi, TEST_TXCTRL, TEST_TXCTRL_HDMI_MODE,
+ TEST_TXCTRL_HDMI_MODE);
+
+ /* Configure reg_qc_sel */
+ hdmi_writeb(hdmi, HDMICTL4, 0x3);
+
+ /* Enable interrupt */
+ hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT,
+ INTR1_MONITOR_DETECT);
+
+ /* Start up phy */
+ zx_hdmi_phy_start(hdmi);
+}
+
+static void zx_hdmi_hw_disable(struct zx_hdmi *hdmi)
+{
+ /* Disable interrupt */
+ hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, 0);
+
+ /* Disable deep color packet */
+ hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
+
+ /* Disable HDMI for TX */
+ hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, 0);
+
+ /* Disable pclk */
+ hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, 0);
+}
+
+static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ clk_prepare_enable(hdmi->cec_clk);
+ clk_prepare_enable(hdmi->osc_clk);
+ clk_prepare_enable(hdmi->xclk);
+
+ zx_hdmi_hw_enable(hdmi);
+
+ vou_inf_enable(hdmi->inf, encoder->crtc);
+}
+
+static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
+
+ vou_inf_disable(hdmi->inf, encoder->crtc);
+
+ zx_hdmi_hw_disable(hdmi);
+
+ clk_disable_unprepare(hdmi->xclk);
+ clk_disable_unprepare(hdmi->osc_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
+}
+
+static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
+ .enable = zx_hdmi_encoder_enable,
+ .disable = zx_hdmi_encoder_disable,
+ .mode_set = zx_hdmi_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &hdmi->ddc->adap);
+ if (!edid)
+ return 0;
+
+ hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+ hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static enum drm_mode_status
+zx_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs zx_hdmi_connector_helper_funcs = {
+ .get_modes = zx_hdmi_connector_get_modes,
+ .mode_valid = zx_hdmi_connector_mode_valid,
+};
+
+static enum drm_connector_status
+zx_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct zx_hdmi *hdmi = to_zx_hdmi(connector);
+
+ return (hdmi_readb(hdmi, TPI_HPD_RSEN) & TPI_HPD_CONNECTION) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static const struct drm_connector_funcs zx_hdmi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = zx_hdmi_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder;
+
+ encoder->possible_crtcs = VOU_CRTC_MASK;
+
+ drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
+
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm, &hdmi->connector, &zx_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(&hdmi->connector,
+ &zx_hdmi_connector_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+
+ return 0;
+}
+
+static irqreturn_t zx_hdmi_irq_thread(int irq, void *dev_id)
+{
+ struct zx_hdmi *hdmi = dev_id;
+
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
+{
+ struct zx_hdmi *hdmi = dev_id;
+ u8 lstat;
+
+ lstat = hdmi_readb(hdmi, L1_INTR_STAT);
+
+ /* Monitor detect/HPD interrupt */
+ if (lstat & L1_INTR_STAT_INTR1) {
+ u8 stat;
+
+ stat = hdmi_readb(hdmi, INTR1_STAT);
+ hdmi_writeb(hdmi, INTR1_STAT, stat);
+
+ if (stat & INTR1_MONITOR_DETECT)
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
+{
+ int len = msg->len;
+ u8 *buf = msg->buf;
+ int retry = 0;
+ int ret = 0;
+
+ /* Bits [9:8] of bytes */
+ hdmi_writeb(hdmi, ZX_DDC_DIN_CNT2, (len >> 8) & 0xff);
+ /* Bits [7:0] of bytes */
+ hdmi_writeb(hdmi, ZX_DDC_DIN_CNT1, len & 0xff);
+
+ /* Clear FIFO */
+ hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, DDC_CMD_CLEAR_FIFO);
+
+ /* Kick off the read */
+ hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK,
+ DDC_CMD_SEQUENTIAL_READ);
+
+ while (len > 0) {
+ int cnt, i;
+
+ /* FIFO needs some time to get ready */
+ usleep_range(500, 1000);
+
+ cnt = hdmi_readb(hdmi, ZX_DDC_DOUT_CNT) & DDC_DOUT_CNT_MASK;
+ if (cnt == 0) {
+ if (++retry > 5) {
+ DRM_DEV_ERROR(hdmi->dev,
+ "DDC FIFO read timed out!");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ for (i = 0; i < cnt; i++)
+ *buf++ = hdmi_readb(hdmi, ZX_DDC_DATA);
+ len -= cnt;
+ }
+
+ return ret;
+}
+
+static int zx_hdmi_i2c_write(struct zx_hdmi *hdmi, struct i2c_msg *msg)
+{
+ /*
+ * The DDC I2C adapter is only for reading EDID data, so we assume
+ * that the write to this adapter must be the EDID data offset.
+ */
+ if ((msg->len != 1) ||
+ ((msg->addr != DDC_ADDR) && (msg->addr != DDC_SEGMENT_ADDR)))
+ return -EINVAL;
+
+ if (msg->addr == DDC_SEGMENT_ADDR)
+ hdmi_writeb(hdmi, ZX_DDC_SEGM, msg->addr << 1);
+ else if (msg->addr == DDC_ADDR)
+ hdmi_writeb(hdmi, ZX_DDC_ADDR, msg->addr << 1);
+
+ hdmi_writeb(hdmi, ZX_DDC_OFFSET, msg->buf[0]);
+
+ return 0;
+}
+
+static int zx_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct zx_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct zx_hdmi_i2c *ddc = hdmi->ddc;
+ int i, ret = 0;
+
+ mutex_lock(&ddc->lock);
+
+ /* Enable DDC master access */
+ hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, HW_DDC_MASTER);
+
+ for (i = 0; i < num; i++) {
+ DRM_DEV_DEBUG(hdmi->dev,
+ "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = zx_hdmi_i2c_read(hdmi, &msgs[i]);
+ else
+ ret = zx_hdmi_i2c_write(hdmi, &msgs[i]);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Disable DDC master access */
+ hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, 0);
+
+ mutex_unlock(&ddc->lock);
+
+ return ret;
+}
+
+static u32 zx_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm zx_hdmi_algorithm = {
+ .master_xfer = zx_hdmi_i2c_xfer,
+ .functionality = zx_hdmi_i2c_func,
+};
+
+static int zx_hdmi_ddc_register(struct zx_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct zx_hdmi_i2c *ddc;
+ int ret;
+
+ ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL);
+ if (!ddc)
+ return -ENOMEM;
+
+ hdmi->ddc = ddc;
+ mutex_init(&ddc->lock);
+
+ adap = &ddc->adap;
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_DDC;
+ adap->dev.parent = hdmi->dev;
+ adap->algo = &zx_hdmi_algorithm;
+ snprintf(adap->name, sizeof(adap->name), "zx hdmi i2c");
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "failed to add I2C adapter: %d\n",
+ ret);
+ return ret;
+ }
+
+ i2c_set_adapdata(adap, hdmi);
+
+ return 0;
+}
+
+static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct resource *res;
+ struct zx_hdmi *hdmi;
+ int irq;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+ hdmi->drm = drm;
+ hdmi->inf = &vou_inf_hdmi;
+
+ dev_set_drvdata(dev, hdmi);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hdmi->mmio)) {
+ ret = PTR_ERR(hdmi->mmio);
+ DRM_DEV_ERROR(dev, "failed to remap hdmi region: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ hdmi->cec_clk = devm_clk_get(hdmi->dev, "osc_cec");
+ if (IS_ERR(hdmi->cec_clk)) {
+ ret = PTR_ERR(hdmi->cec_clk);
+ DRM_DEV_ERROR(dev, "failed to get cec_clk: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->osc_clk = devm_clk_get(hdmi->dev, "osc_clk");
+ if (IS_ERR(hdmi->osc_clk)) {
+ ret = PTR_ERR(hdmi->osc_clk);
+ DRM_DEV_ERROR(dev, "failed to get osc_clk: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->xclk = devm_clk_get(hdmi->dev, "xclk");
+ if (IS_ERR(hdmi->xclk)) {
+ ret = PTR_ERR(hdmi->xclk);
+ DRM_DEV_ERROR(dev, "failed to get xclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_hdmi_ddc_register(hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_hdmi_register(drm, hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, zx_hdmi_irq_handler,
+ zx_hdmi_irq_thread, IRQF_SHARED,
+ dev_name(dev), hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void zx_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+}
+
+static const struct component_ops zx_hdmi_component_ops = {
+ .bind = zx_hdmi_bind,
+ .unbind = zx_hdmi_unbind,
+};
+
+static int zx_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &zx_hdmi_component_ops);
+}
+
+static int zx_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &zx_hdmi_component_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_hdmi_of_match[] = {
+ { .compatible = "zte,zx296718-hdmi", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_hdmi_of_match);
+
+struct platform_driver zx_hdmi_driver = {
+ .probe = zx_hdmi_probe,
+ .remove = zx_hdmi_remove,
+ .driver = {
+ .name = "zx-hdmi",
+ .of_match_table = zx_hdmi_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h
new file mode 100644
index 000000000000..de911f66b658
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_hdmi_regs.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_HDMI_REGS_H__
+#define __ZX_HDMI_REGS_H__
+
+#define FUNC_SEL 0x000b
+#define FUNC_HDMI_EN BIT(0)
+#define CLKPWD 0x000d
+#define CLKPWD_PDIDCK BIT(2)
+#define P2T_CTRL 0x0066
+#define P2T_DC_PKT_EN BIT(7)
+#define L1_INTR_STAT 0x007e
+#define L1_INTR_STAT_INTR1 BIT(0)
+#define INTR1_STAT 0x008f
+#define INTR1_MASK 0x0095
+#define INTR1_MONITOR_DETECT (BIT(5) | BIT(6))
+#define ZX_DDC_ADDR 0x00ed
+#define ZX_DDC_SEGM 0x00ee
+#define ZX_DDC_OFFSET 0x00ef
+#define ZX_DDC_DIN_CNT1 0x00f0
+#define ZX_DDC_DIN_CNT2 0x00f1
+#define ZX_DDC_CMD 0x00f3
+#define DDC_CMD_MASK 0xf
+#define DDC_CMD_CLEAR_FIFO 0x9
+#define DDC_CMD_SEQUENTIAL_READ 0x2
+#define ZX_DDC_DATA 0x00f4
+#define ZX_DDC_DOUT_CNT 0x00f5
+#define DDC_DOUT_CNT_MASK 0x1f
+#define TEST_TXCTRL 0x00f7
+#define TEST_TXCTRL_HDMI_MODE BIT(1)
+#define HDMICTL4 0x0235
+#define TPI_HPD_RSEN 0x063b
+#define TPI_HPD_CONNECTION (BIT(1) | BIT(2))
+#define TPI_INFO_FSEL 0x06bf
+#define FSEL_AVI 0
+#define FSEL_GBD 1
+#define FSEL_AUDIO 2
+#define FSEL_SPD 3
+#define FSEL_MPEG 4
+#define FSEL_VSIF 5
+#define TPI_INFO_B0 0x06c0
+#define TPI_INFO_EN 0x06df
+#define TPI_INFO_TRANS_EN BIT(7)
+#define TPI_INFO_TRANS_RPT BIT(6)
+#define TPI_DDC_MASTER_EN 0x06f8
+#define HW_DDC_MASTER BIT(7)
+
+#endif /* __ZX_HDMI_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
new file mode 100644
index 000000000000..546eb92a94e8
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_plane.h"
+#include "zx_plane_regs.h"
+#include "zx_vou.h"
+
+struct zx_plane {
+ struct drm_plane plane;
+ void __iomem *layer;
+ void __iomem *csc;
+ void __iomem *hbsc;
+ void __iomem *rsz;
+};
+
+#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
+
+static const uint32_t gl_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB4444,
+};
+
+static int zx_gl_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_rect clip;
+
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+ crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ /* nothing to check when disabling or disabled */
+ if (!crtc_state->enable)
+ return 0;
+
+ /* plane must be enabled */
+ if (!plane_state->crtc)
+ return -EINVAL;
+
+ clip.x1 = 0;
+ clip.y1 = 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+
+ return drm_plane_helper_check_state(plane_state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+}
+
+static int zx_gl_get_fmt(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ return GL_FMT_ARGB8888;
+ case DRM_FORMAT_RGB888:
+ return GL_FMT_RGB888;
+ case DRM_FORMAT_RGB565:
+ return GL_FMT_RGB565;
+ case DRM_FORMAT_ARGB1555:
+ return GL_FMT_ARGB1555;
+ case DRM_FORMAT_ARGB4444:
+ return GL_FMT_ARGB4444;
+ default:
+ WARN_ONCE(1, "invalid pixel format %d\n", format);
+ return -EINVAL;
+ }
+}
+
+static inline void zx_gl_set_update(struct zx_plane *zplane)
+{
+ void __iomem *layer = zplane->layer;
+
+ zx_writel_mask(layer + GL_CTRL0, GL_UPDATE, GL_UPDATE);
+}
+
+static inline void zx_gl_rsz_set_update(struct zx_plane *zplane)
+{
+ zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1);
+}
+
+void zx_plane_set_update(struct drm_plane *plane)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+
+ zx_gl_rsz_set_update(zplane);
+ zx_gl_set_update(zplane);
+}
+
+static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
+ u32 dst_w, u32 dst_h)
+{
+ void __iomem *rsz = zplane->rsz;
+
+ zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
+ zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
+
+ zx_gl_rsz_set_update(zplane);
+}
+
+static void zx_gl_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ void __iomem *layer = zplane->layer;
+ void __iomem *csc = zplane->csc;
+ void __iomem *hbsc = zplane->hbsc;
+ u32 src_x, src_y, src_w, src_h;
+ u32 dst_x, dst_y, dst_w, dst_h;
+ unsigned int bpp;
+ uint32_t format;
+ dma_addr_t paddr;
+ u32 stride;
+ int fmt;
+
+ if (!fb)
+ return;
+
+ format = fb->pixel_format;
+ stride = fb->pitches[0];
+
+ src_x = plane->state->src_x >> 16;
+ src_y = plane->state->src_y >> 16;
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+
+ dst_x = plane->state->crtc_x;
+ dst_y = plane->state->crtc_y;
+ dst_w = plane->state->crtc_w;
+ dst_h = plane->state->crtc_h;
+
+ bpp = drm_format_plane_cpp(format, 0);
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ paddr = cma_obj->paddr + fb->offsets[0];
+ paddr += src_y * stride + src_x * bpp / 8;
+ zx_writel(layer + GL_ADDR, paddr);
+
+ /* Set up source height/width register */
+ zx_writel(layer + GL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
+
+ /* Set up start position register */
+ zx_writel(layer + GL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
+
+ /* Set up end position register */
+ zx_writel(layer + GL_POS_END,
+ GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
+
+ /* Set up stride register */
+ zx_writel(layer + GL_STRIDE, stride & 0xffff);
+
+ /* Set up graphic layer data format */
+ fmt = zx_gl_get_fmt(format);
+ if (fmt >= 0)
+ zx_writel_mask(layer + GL_CTRL1, GL_DATA_FMT_MASK,
+ fmt << GL_DATA_FMT_SHIFT);
+
+ /* Initialize global alpha with a sane value */
+ zx_writel_mask(layer + GL_CTRL2, GL_GLOBAL_ALPHA_MASK,
+ 0xff << GL_GLOBAL_ALPHA_SHIFT);
+
+ /* Setup CSC for the GL */
+ if (dst_h > 720)
+ zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
+ CSC_BT709_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
+ else
+ zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
+ CSC_BT601_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
+ zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, CSC_WORK_ENABLE);
+
+ /* Always use scaler since it exists (set for not bypass) */
+ zx_writel_mask(layer + GL_CTRL3, GL_SCALER_BYPASS_MODE,
+ GL_SCALER_BYPASS_MODE);
+
+ zx_gl_rsz_setup(zplane, src_w, src_h, dst_w, dst_h);
+
+ /* Enable HBSC block */
+ zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
+
+ zx_gl_set_update(zplane);
+}
+
+static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
+ .atomic_check = zx_gl_plane_atomic_check,
+ .atomic_update = zx_gl_plane_atomic_update,
+};
+
+static void zx_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs zx_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = zx_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void zx_plane_hbsc_init(struct zx_plane *zplane)
+{
+ void __iomem *hbsc = zplane->hbsc;
+
+ /*
+ * Initialize HBSC block with a sane configuration per recommedation
+ * from ZTE BSP code.
+ */
+ zx_writel(hbsc + HBSC_SATURATION, 0x200);
+ zx_writel(hbsc + HBSC_HUE, 0x0);
+ zx_writel(hbsc + HBSC_BRIGHT, 0x0);
+ zx_writel(hbsc + HBSC_CONTRAST, 0x200);
+
+ zx_writel(hbsc + HBSC_THRESHOLD_COL1, (0x3ac << 16) | 0x40);
+ zx_writel(hbsc + HBSC_THRESHOLD_COL2, (0x3c0 << 16) | 0x40);
+ zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40);
+}
+
+struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
+ struct zx_layer_data *data,
+ enum drm_plane_type type)
+{
+ const struct drm_plane_helper_funcs *helper;
+ struct zx_plane *zplane;
+ struct drm_plane *plane;
+ const uint32_t *formats;
+ unsigned int format_count;
+ int ret;
+
+ zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
+ if (!zplane)
+ return ERR_PTR(-ENOMEM);
+
+ plane = &zplane->plane;
+
+ zplane->layer = data->layer;
+ zplane->hbsc = data->hbsc;
+ zplane->csc = data->csc;
+ zplane->rsz = data->rsz;
+
+ zx_plane_hbsc_init(zplane);
+
+ switch (type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ helper = &zx_gl_plane_helper_funcs;
+ formats = gl_formats;
+ format_count = ARRAY_SIZE(gl_formats);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ /* TODO: add video layer (vl) support */
+ break;
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
+ &zx_plane_funcs, formats, format_count,
+ type, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, helper);
+
+ return plane;
+}
diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h
new file mode 100644
index 000000000000..2b82cd558d9d
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_PLANE_H__
+#define __ZX_PLANE_H__
+
+struct zx_layer_data {
+ void __iomem *layer;
+ void __iomem *csc;
+ void __iomem *hbsc;
+ void __iomem *rsz;
+};
+
+struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
+ struct zx_layer_data *data,
+ enum drm_plane_type type);
+void zx_plane_set_update(struct drm_plane *plane);
+
+#endif /* __ZX_PLANE_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h
new file mode 100644
index 000000000000..3dde6716a558
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_plane_regs.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_PLANE_REGS_H__
+#define __ZX_PLANE_REGS_H__
+
+/* GL registers */
+#define GL_CTRL0 0x00
+#define GL_UPDATE BIT(5)
+#define GL_CTRL1 0x04
+#define GL_DATA_FMT_SHIFT 0
+#define GL_DATA_FMT_MASK (0xf << GL_DATA_FMT_SHIFT)
+#define GL_FMT_ARGB8888 0
+#define GL_FMT_RGB888 1
+#define GL_FMT_RGB565 2
+#define GL_FMT_ARGB1555 3
+#define GL_FMT_ARGB4444 4
+#define GL_CTRL2 0x08
+#define GL_GLOBAL_ALPHA_SHIFT 8
+#define GL_GLOBAL_ALPHA_MASK (0xff << GL_GLOBAL_ALPHA_SHIFT)
+#define GL_CTRL3 0x0c
+#define GL_SCALER_BYPASS_MODE BIT(0)
+#define GL_STRIDE 0x18
+#define GL_ADDR 0x1c
+#define GL_SRC_SIZE 0x38
+#define GL_SRC_W_SHIFT 16
+#define GL_SRC_W_MASK (0x3fff << GL_SRC_W_SHIFT)
+#define GL_SRC_H_SHIFT 0
+#define GL_SRC_H_MASK (0x3fff << GL_SRC_H_SHIFT)
+#define GL_POS_START 0x9c
+#define GL_POS_END 0xa0
+#define GL_POS_X_SHIFT 16
+#define GL_POS_X_MASK (0x1fff << GL_POS_X_SHIFT)
+#define GL_POS_Y_SHIFT 0
+#define GL_POS_Y_MASK (0x1fff << GL_POS_Y_SHIFT)
+
+#define GL_SRC_W(x) (((x) << GL_SRC_W_SHIFT) & GL_SRC_W_MASK)
+#define GL_SRC_H(x) (((x) << GL_SRC_H_SHIFT) & GL_SRC_H_MASK)
+#define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK)
+#define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK)
+
+/* CSC registers */
+#define CSC_CTRL0 0x30
+#define CSC_COV_MODE_SHIFT 16
+#define CSC_COV_MODE_MASK (0xffff << CSC_COV_MODE_SHIFT)
+#define CSC_BT601_IMAGE_RGB2YCBCR 0
+#define CSC_BT601_IMAGE_YCBCR2RGB 1
+#define CSC_BT601_VIDEO_RGB2YCBCR 2
+#define CSC_BT601_VIDEO_YCBCR2RGB 3
+#define CSC_BT709_IMAGE_RGB2YCBCR 4
+#define CSC_BT709_IMAGE_YCBCR2RGB 5
+#define CSC_BT709_VIDEO_RGB2YCBCR 6
+#define CSC_BT709_VIDEO_YCBCR2RGB 7
+#define CSC_BT2020_IMAGE_RGB2YCBCR 8
+#define CSC_BT2020_IMAGE_YCBCR2RGB 9
+#define CSC_BT2020_VIDEO_RGB2YCBCR 10
+#define CSC_BT2020_VIDEO_YCBCR2RGB 11
+#define CSC_WORK_ENABLE BIT(0)
+
+/* RSZ registers */
+#define RSZ_SRC_CFG 0x00
+#define RSZ_DEST_CFG 0x04
+#define RSZ_ENABLE_CFG 0x14
+
+#define RSZ_VER_SHIFT 16
+#define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT)
+#define RSZ_HOR_SHIFT 0
+#define RSZ_HOR_MASK (0xffff << RSZ_HOR_SHIFT)
+
+#define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK)
+#define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK)
+
+/* HBSC registers */
+#define HBSC_SATURATION 0x00
+#define HBSC_HUE 0x04
+#define HBSC_BRIGHT 0x08
+#define HBSC_CONTRAST 0x0c
+#define HBSC_THRESHOLD_COL1 0x10
+#define HBSC_THRESHOLD_COL2 0x14
+#define HBSC_THRESHOLD_COL3 0x18
+#define HBSC_CTRL0 0x28
+#define HBSC_CTRL_EN BIT(2)
+
+#endif /* __ZX_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
new file mode 100644
index 000000000000..73fe15c17c32
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -0,0 +1,661 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_address.h>
+#include <video/videomode.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_plane.h"
+#include "zx_vou.h"
+#include "zx_vou_regs.h"
+
+#define GL_NUM 2
+#define VL_NUM 3
+
+enum vou_chn_type {
+ VOU_CHN_MAIN,
+ VOU_CHN_AUX,
+};
+
+struct zx_crtc_regs {
+ u32 fir_active;
+ u32 fir_htiming;
+ u32 fir_vtiming;
+ u32 timing_shift;
+ u32 timing_pi_shift;
+};
+
+static const struct zx_crtc_regs main_crtc_regs = {
+ .fir_active = FIR_MAIN_ACTIVE,
+ .fir_htiming = FIR_MAIN_H_TIMING,
+ .fir_vtiming = FIR_MAIN_V_TIMING,
+ .timing_shift = TIMING_MAIN_SHIFT,
+ .timing_pi_shift = TIMING_MAIN_PI_SHIFT,
+};
+
+static const struct zx_crtc_regs aux_crtc_regs = {
+ .fir_active = FIR_AUX_ACTIVE,
+ .fir_htiming = FIR_AUX_H_TIMING,
+ .fir_vtiming = FIR_AUX_V_TIMING,
+ .timing_shift = TIMING_AUX_SHIFT,
+ .timing_pi_shift = TIMING_AUX_PI_SHIFT,
+};
+
+struct zx_crtc_bits {
+ u32 polarity_mask;
+ u32 polarity_shift;
+ u32 int_frame_mask;
+ u32 tc_enable;
+ u32 gl_enable;
+};
+
+static const struct zx_crtc_bits main_crtc_bits = {
+ .polarity_mask = MAIN_POL_MASK,
+ .polarity_shift = MAIN_POL_SHIFT,
+ .int_frame_mask = TIMING_INT_MAIN_FRAME,
+ .tc_enable = MAIN_TC_EN,
+ .gl_enable = OSD_CTRL0_GL0_EN,
+};
+
+static const struct zx_crtc_bits aux_crtc_bits = {
+ .polarity_mask = AUX_POL_MASK,
+ .polarity_shift = AUX_POL_SHIFT,
+ .int_frame_mask = TIMING_INT_AUX_FRAME,
+ .tc_enable = AUX_TC_EN,
+ .gl_enable = OSD_CTRL0_GL1_EN,
+};
+
+struct zx_crtc {
+ struct drm_crtc crtc;
+ struct drm_plane *primary;
+ struct zx_vou_hw *vou;
+ void __iomem *chnreg;
+ const struct zx_crtc_regs *regs;
+ const struct zx_crtc_bits *bits;
+ enum vou_chn_type chn_type;
+ struct clk *pixclk;
+};
+
+#define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc)
+
+struct zx_vou_hw {
+ struct device *dev;
+ void __iomem *osd;
+ void __iomem *timing;
+ void __iomem *vouctl;
+ void __iomem *otfppu;
+ void __iomem *dtrc;
+ struct clk *axi_clk;
+ struct clk *ppu_clk;
+ struct clk *main_clk;
+ struct clk *aux_clk;
+ struct zx_crtc *main_crtc;
+ struct zx_crtc *aux_crtc;
+};
+
+static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+
+ return zcrtc->vou;
+}
+
+void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ bool is_main = zcrtc->chn_type == VOU_CHN_MAIN;
+ u32 data_sel_shift = inf->id << 1;
+
+ /* Select data format */
+ zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift,
+ inf->data_sel << data_sel_shift);
+
+ /* Select channel */
+ zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << inf->id,
+ zcrtc->chn_type << inf->id);
+
+ /* Select interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits,
+ is_main ? 0 : inf->clocks_sel_bits);
+
+ /* Enable interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits,
+ inf->clocks_en_bits);
+
+ /* Enable the device */
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 1 << inf->id);
+}
+
+void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc)
+{
+ struct zx_vou_hw *vou = crtc_to_vou(crtc);
+
+ /* Disable the device */
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 0);
+
+ /* Disable interface clocks */
+ zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0);
+}
+
+static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
+{
+ zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
+}
+
+static void zx_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ const struct zx_crtc_regs *regs = zcrtc->regs;
+ const struct zx_crtc_bits *bits = zcrtc->bits;
+ struct videomode vm;
+ u32 pol = 0;
+ u32 val;
+ int ret;
+
+ drm_display_mode_to_videomode(mode, &vm);
+
+ /* Set up timing parameters */
+ val = V_ACTIVE(vm.vactive - 1);
+ val |= H_ACTIVE(vm.hactive - 1);
+ zx_writel(vou->timing + regs->fir_active, val);
+
+ val = SYNC_WIDE(vm.hsync_len - 1);
+ val |= BACK_PORCH(vm.hback_porch - 1);
+ val |= FRONT_PORCH(vm.hfront_porch - 1);
+ zx_writel(vou->timing + regs->fir_htiming, val);
+
+ val = SYNC_WIDE(vm.vsync_len - 1);
+ val |= BACK_PORCH(vm.vback_porch - 1);
+ val |= FRONT_PORCH(vm.vfront_porch - 1);
+ zx_writel(vou->timing + regs->fir_vtiming, val);
+
+ /* Set up polarities */
+ if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
+ pol |= 1 << POL_VSYNC_SHIFT;
+ if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
+ pol |= 1 << POL_HSYNC_SHIFT;
+
+ zx_writel_mask(vou->timing + TIMING_CTRL, bits->polarity_mask,
+ pol << bits->polarity_shift);
+
+ /* Setup SHIFT register by following what ZTE BSP does */
+ zx_writel(vou->timing + regs->timing_shift, H_SHIFT_VAL);
+ zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL);
+
+ /* Enable TIMING_CTRL */
+ zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable,
+ bits->tc_enable);
+
+ /* Configure channel screen size */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_W_MASK,
+ vm.hactive << CHN_SCREEN_W_SHIFT);
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK,
+ vm.vactive << CHN_SCREEN_H_SHIFT);
+
+ /* Update channel */
+ vou_chn_set_update(zcrtc);
+
+ /* Enable channel */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE);
+
+ /* Enable Graphic Layer */
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable,
+ bits->gl_enable);
+
+ drm_crtc_vblank_on(crtc);
+
+ ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vou->dev, "failed to set pixclk rate: %d\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(zcrtc->pixclk);
+ if (ret)
+ DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret);
+}
+
+static void zx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ const struct zx_crtc_bits *bits = zcrtc->bits;
+ struct zx_vou_hw *vou = zcrtc->vou;
+
+ clk_disable_unprepare(zcrtc->pixclk);
+
+ drm_crtc_vblank_off(crtc);
+
+ /* Disable Graphic Layer */
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable, 0);
+
+ /* Disable channel */
+ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0);
+
+ /* Disable TIMING_CTRL */
+ zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, 0);
+}
+
+static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ if (!event)
+ return;
+
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = {
+ .enable = zx_crtc_enable,
+ .disable = zx_crtc_disable,
+ .atomic_flush = zx_crtc_atomic_flush,
+};
+
+static const struct drm_crtc_funcs zx_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
+ enum vou_chn_type chn_type)
+{
+ struct device *dev = vou->dev;
+ struct zx_layer_data data;
+ struct zx_crtc *zcrtc;
+ int ret;
+
+ zcrtc = devm_kzalloc(dev, sizeof(*zcrtc), GFP_KERNEL);
+ if (!zcrtc)
+ return -ENOMEM;
+
+ zcrtc->vou = vou;
+ zcrtc->chn_type = chn_type;
+
+ if (chn_type == VOU_CHN_MAIN) {
+ data.layer = vou->osd + MAIN_GL_OFFSET;
+ data.csc = vou->osd + MAIN_CSC_OFFSET;
+ data.hbsc = vou->osd + MAIN_HBSC_OFFSET;
+ data.rsz = vou->otfppu + MAIN_RSZ_OFFSET;
+ zcrtc->chnreg = vou->osd + OSD_MAIN_CHN;
+ zcrtc->regs = &main_crtc_regs;
+ zcrtc->bits = &main_crtc_bits;
+ } else {
+ data.layer = vou->osd + AUX_GL_OFFSET;
+ data.csc = vou->osd + AUX_CSC_OFFSET;
+ data.hbsc = vou->osd + AUX_HBSC_OFFSET;
+ data.rsz = vou->otfppu + AUX_RSZ_OFFSET;
+ zcrtc->chnreg = vou->osd + OSD_AUX_CHN;
+ zcrtc->regs = &aux_crtc_regs;
+ zcrtc->bits = &aux_crtc_bits;
+ }
+
+ zcrtc->pixclk = devm_clk_get(dev, (chn_type == VOU_CHN_MAIN) ?
+ "main_wclk" : "aux_wclk");
+ if (IS_ERR(zcrtc->pixclk)) {
+ ret = PTR_ERR(zcrtc->pixclk);
+ DRM_DEV_ERROR(dev, "failed to get pix clk: %d\n", ret);
+ return ret;
+ }
+
+ zcrtc->primary = zx_plane_init(drm, dev, &data, DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(zcrtc->primary)) {
+ ret = PTR_ERR(zcrtc->primary);
+ DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL,
+ &zx_crtc_funcs, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init drm crtc: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&zcrtc->crtc, &zx_crtc_helper_funcs);
+
+ if (chn_type == VOU_CHN_MAIN)
+ vou->main_crtc = zcrtc;
+ else
+ vou->aux_crtc = zcrtc;
+
+ return 0;
+}
+
+static inline struct drm_crtc *zx_find_crtc(struct drm_device *drm, int pipe)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ if (crtc->index == pipe)
+ return crtc;
+
+ return NULL;
+}
+
+int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct drm_crtc *crtc;
+ struct zx_crtc *zcrtc;
+ struct zx_vou_hw *vou;
+ u32 int_frame_mask;
+
+ crtc = zx_find_crtc(drm, pipe);
+ if (!crtc)
+ return 0;
+
+ vou = crtc_to_vou(crtc);
+ zcrtc = to_zx_crtc(crtc);
+ int_frame_mask = zcrtc->bits->int_frame_mask;
+
+ zx_writel_mask(vou->timing + TIMING_INT_CTRL, int_frame_mask,
+ int_frame_mask);
+
+ return 0;
+}
+
+void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct drm_crtc *crtc;
+ struct zx_crtc *zcrtc;
+ struct zx_vou_hw *vou;
+
+ crtc = zx_find_crtc(drm, pipe);
+ if (!crtc)
+ return;
+
+ vou = crtc_to_vou(crtc);
+ zcrtc = to_zx_crtc(crtc);
+
+ zx_writel_mask(vou->timing + TIMING_INT_CTRL,
+ zcrtc->bits->int_frame_mask, 0);
+}
+
+static irqreturn_t vou_irq_handler(int irq, void *dev_id)
+{
+ struct zx_vou_hw *vou = dev_id;
+ u32 state;
+
+ /* Handle TIMING_CTRL frame interrupts */
+ state = zx_readl(vou->timing + TIMING_INT_STATE);
+ zx_writel(vou->timing + TIMING_INT_STATE, state);
+
+ if (state & TIMING_INT_MAIN_FRAME)
+ drm_crtc_handle_vblank(&vou->main_crtc->crtc);
+
+ if (state & TIMING_INT_AUX_FRAME)
+ drm_crtc_handle_vblank(&vou->aux_crtc->crtc);
+
+ /* Handle OSD interrupts */
+ state = zx_readl(vou->osd + OSD_INT_STA);
+ zx_writel(vou->osd + OSD_INT_CLRSTA, state);
+
+ if (state & OSD_INT_MAIN_UPT) {
+ vou_chn_set_update(vou->main_crtc);
+ zx_plane_set_update(vou->main_crtc->primary);
+ }
+
+ if (state & OSD_INT_AUX_UPT) {
+ vou_chn_set_update(vou->aux_crtc);
+ zx_plane_set_update(vou->aux_crtc->primary);
+ }
+
+ if (state & OSD_INT_ERROR)
+ DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state);
+
+ return IRQ_HANDLED;
+}
+
+static void vou_dtrc_init(struct zx_vou_hw *vou)
+{
+ /* Clear bit for bypass by ID */
+ zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL,
+ TILE2RASTESCAN_BYPASS_MODE, 0);
+
+ /* Select ARIDR mode */
+ zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL, DETILE_ARIDR_MODE_MASK,
+ DETILE_ARID_IN_ARIDR);
+
+ /* Bypass decompression for both frames */
+ zx_writel_mask(vou->dtrc + DTRC_F0_CTRL, DTRC_DECOMPRESS_BYPASS,
+ DTRC_DECOMPRESS_BYPASS);
+ zx_writel_mask(vou->dtrc + DTRC_F1_CTRL, DTRC_DECOMPRESS_BYPASS,
+ DTRC_DECOMPRESS_BYPASS);
+
+ /* Set up ARID register */
+ zx_writel(vou->dtrc + DTRC_ARID, DTRC_ARID3(0xf) | DTRC_ARID2(0xe) |
+ DTRC_ARID1(0xf) | DTRC_ARID0(0xe));
+}
+
+static void vou_hw_init(struct zx_vou_hw *vou)
+{
+ /* Set GL0 to main channel and GL1 to aux channel */
+ zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL0_SEL, 0);
+ zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL1_SEL,
+ OSD_CTRL0_GL1_SEL);
+
+ /* Release reset for all VOU modules */
+ zx_writel(vou->vouctl + VOU_SOFT_RST, ~0);
+
+ /* Select main clock for GL0 and aux clock for GL1 module */
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL0_SEL, 0);
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL1_SEL,
+ VOU_CLK_GL1_SEL);
+
+ /* Enable clock auto-gating for all VOU modules */
+ zx_writel(vou->vouctl + VOU_CLK_REQEN, ~0);
+
+ /* Enable all VOU module clocks */
+ zx_writel(vou->vouctl + VOU_CLK_EN, ~0);
+
+ /* Clear both OSD and TIMING_CTRL interrupt state */
+ zx_writel(vou->osd + OSD_INT_CLRSTA, ~0);
+ zx_writel(vou->timing + TIMING_INT_STATE, ~0);
+
+ /* Enable OSD and TIMING_CTRL interrrupts */
+ zx_writel(vou->osd + OSD_INT_MSK, OSD_INT_ENABLE);
+ zx_writel(vou->timing + TIMING_INT_CTRL, TIMING_INT_ENABLE);
+
+ /* Select GPC as input to gl/vl scaler as a sane default setting */
+ zx_writel(vou->otfppu + OTFPPU_RSZ_DATA_SOURCE, 0x2a);
+
+ /*
+ * Needs to reset channel and layer logic per frame when frame starts
+ * to get VOU work properly.
+ */
+ zx_writel_mask(vou->osd + OSD_RST_CLR, RST_PER_FRAME, RST_PER_FRAME);
+
+ vou_dtrc_init(vou);
+}
+
+static int zx_crtc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct zx_vou_hw *vou;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ vou = devm_kzalloc(dev, sizeof(*vou), GFP_KERNEL);
+ if (!vou)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osd");
+ vou->osd = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->osd)) {
+ ret = PTR_ERR(vou->osd);
+ DRM_DEV_ERROR(dev, "failed to remap osd region: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "timing_ctrl");
+ vou->timing = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->timing)) {
+ ret = PTR_ERR(vou->timing);
+ DRM_DEV_ERROR(dev, "failed to remap timing_ctrl region: %d\n",
+ ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dtrc");
+ vou->dtrc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->dtrc)) {
+ ret = PTR_ERR(vou->dtrc);
+ DRM_DEV_ERROR(dev, "failed to remap dtrc region: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vou_ctrl");
+ vou->vouctl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->vouctl)) {
+ ret = PTR_ERR(vou->vouctl);
+ DRM_DEV_ERROR(dev, "failed to remap vou_ctrl region: %d\n",
+ ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otfppu");
+ vou->otfppu = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vou->otfppu)) {
+ ret = PTR_ERR(vou->otfppu);
+ DRM_DEV_ERROR(dev, "failed to remap otfppu region: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ vou->axi_clk = devm_clk_get(dev, "aclk");
+ if (IS_ERR(vou->axi_clk)) {
+ ret = PTR_ERR(vou->axi_clk);
+ DRM_DEV_ERROR(dev, "failed to get axi_clk: %d\n", ret);
+ return ret;
+ }
+
+ vou->ppu_clk = devm_clk_get(dev, "ppu_wclk");
+ if (IS_ERR(vou->ppu_clk)) {
+ ret = PTR_ERR(vou->ppu_clk);
+ DRM_DEV_ERROR(dev, "failed to get ppu_clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vou->axi_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable axi_clk: %d\n", ret);
+ return ret;
+ }
+
+ clk_prepare_enable(vou->ppu_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable ppu_clk: %d\n", ret);
+ goto disable_axi_clk;
+ }
+
+ vou->dev = dev;
+ dev_set_drvdata(dev, vou);
+
+ vou_hw_init(vou);
+
+ ret = devm_request_irq(dev, irq, vou_irq_handler, 0, "zx_vou", vou);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to request vou irq: %d\n", ret);
+ goto disable_ppu_clk;
+ }
+
+ ret = zx_crtc_init(drm, vou, VOU_CHN_MAIN);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init main channel crtc: %d\n",
+ ret);
+ goto disable_ppu_clk;
+ }
+
+ ret = zx_crtc_init(drm, vou, VOU_CHN_AUX);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init aux channel crtc: %d\n",
+ ret);
+ goto disable_ppu_clk;
+ }
+
+ return 0;
+
+disable_ppu_clk:
+ clk_disable_unprepare(vou->ppu_clk);
+disable_axi_clk:
+ clk_disable_unprepare(vou->axi_clk);
+ return ret;
+}
+
+static void zx_crtc_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct zx_vou_hw *vou = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vou->axi_clk);
+ clk_disable_unprepare(vou->ppu_clk);
+}
+
+static const struct component_ops zx_crtc_component_ops = {
+ .bind = zx_crtc_bind,
+ .unbind = zx_crtc_unbind,
+};
+
+static int zx_crtc_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &zx_crtc_component_ops);
+}
+
+static int zx_crtc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &zx_crtc_component_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_crtc_of_match[] = {
+ { .compatible = "zte,zx296718-dpc", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_crtc_of_match);
+
+struct platform_driver zx_crtc_driver = {
+ .probe = zx_crtc_probe,
+ .remove = zx_crtc_remove,
+ .driver = {
+ .name = "zx-crtc",
+ .of_match_table = zx_crtc_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
new file mode 100644
index 000000000000..349e06cd86f4
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_VOU_H__
+#define __ZX_VOU_H__
+
+#define VOU_CRTC_MASK 0x3
+
+/* VOU output interfaces */
+enum vou_inf_id {
+ VOU_HDMI = 0,
+ VOU_RGB_LCD = 1,
+ VOU_TV_ENC = 2,
+ VOU_MIPI_DSI = 3,
+ VOU_LVDS = 4,
+ VOU_VGA = 5,
+};
+
+enum vou_inf_data_sel {
+ VOU_YUV444 = 0,
+ VOU_RGB_101010 = 1,
+ VOU_RGB_888 = 2,
+ VOU_RGB_666 = 3,
+};
+
+struct vou_inf {
+ enum vou_inf_id id;
+ enum vou_inf_data_sel data_sel;
+ u32 clocks_en_bits;
+ u32 clocks_sel_bits;
+};
+
+void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc);
+void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc);
+
+int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe);
+void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe);
+
+#endif /* __ZX_VOU_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h
new file mode 100644
index 000000000000..f44e7a4ae441
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_vou_regs.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 Linaro Ltd.
+ * Copyright 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_VOU_REGS_H__
+#define __ZX_VOU_REGS_H__
+
+/* Sub-module offset */
+#define MAIN_GL_OFFSET 0x130
+#define MAIN_CSC_OFFSET 0x580
+#define MAIN_HBSC_OFFSET 0x820
+#define MAIN_RSZ_OFFSET 0x600 /* OTFPPU sub-module */
+
+#define AUX_GL_OFFSET 0x200
+#define AUX_CSC_OFFSET 0x5d0
+#define AUX_HBSC_OFFSET 0x860
+#define AUX_RSZ_OFFSET 0x800
+
+/* OSD (GPC_GLOBAL) registers */
+#define OSD_INT_STA 0x04
+#define OSD_INT_CLRSTA 0x08
+#define OSD_INT_MSK 0x0c
+#define OSD_INT_AUX_UPT BIT(14)
+#define OSD_INT_MAIN_UPT BIT(13)
+#define OSD_INT_GL1_LBW BIT(10)
+#define OSD_INT_GL0_LBW BIT(9)
+#define OSD_INT_VL2_LBW BIT(8)
+#define OSD_INT_VL1_LBW BIT(7)
+#define OSD_INT_VL0_LBW BIT(6)
+#define OSD_INT_BUS_ERR BIT(3)
+#define OSD_INT_CFG_ERR BIT(2)
+#define OSD_INT_ERROR (\
+ OSD_INT_GL1_LBW | OSD_INT_GL0_LBW | \
+ OSD_INT_VL2_LBW | OSD_INT_VL1_LBW | OSD_INT_VL0_LBW | \
+ OSD_INT_BUS_ERR | OSD_INT_CFG_ERR \
+)
+#define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT)
+#define OSD_CTRL0 0x10
+#define OSD_CTRL0_GL0_EN BIT(7)
+#define OSD_CTRL0_GL0_SEL BIT(6)
+#define OSD_CTRL0_GL1_EN BIT(5)
+#define OSD_CTRL0_GL1_SEL BIT(4)
+#define OSD_RST_CLR 0x1c
+#define RST_PER_FRAME BIT(19)
+
+/* Main/Aux channel registers */
+#define OSD_MAIN_CHN 0x470
+#define OSD_AUX_CHN 0x4d0
+#define CHN_CTRL0 0x00
+#define CHN_ENABLE BIT(0)
+#define CHN_CTRL1 0x04
+#define CHN_SCREEN_W_SHIFT 18
+#define CHN_SCREEN_W_MASK (0x1fff << CHN_SCREEN_W_SHIFT)
+#define CHN_SCREEN_H_SHIFT 5
+#define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT)
+#define CHN_UPDATE 0x08
+
+/* TIMING_CTRL registers */
+#define TIMING_TC_ENABLE 0x04
+#define AUX_TC_EN BIT(1)
+#define MAIN_TC_EN BIT(0)
+#define FIR_MAIN_ACTIVE 0x08
+#define FIR_AUX_ACTIVE 0x0c
+#define V_ACTIVE_SHIFT 16
+#define V_ACTIVE_MASK (0xffff << V_ACTIVE_SHIFT)
+#define H_ACTIVE_SHIFT 0
+#define H_ACTIVE_MASK (0xffff << H_ACTIVE_SHIFT)
+#define FIR_MAIN_H_TIMING 0x10
+#define FIR_MAIN_V_TIMING 0x14
+#define FIR_AUX_H_TIMING 0x18
+#define FIR_AUX_V_TIMING 0x1c
+#define SYNC_WIDE_SHIFT 22
+#define SYNC_WIDE_MASK (0x3ff << SYNC_WIDE_SHIFT)
+#define BACK_PORCH_SHIFT 11
+#define BACK_PORCH_MASK (0x7ff << BACK_PORCH_SHIFT)
+#define FRONT_PORCH_SHIFT 0
+#define FRONT_PORCH_MASK (0x7ff << FRONT_PORCH_SHIFT)
+#define TIMING_CTRL 0x20
+#define AUX_POL_SHIFT 3
+#define AUX_POL_MASK (0x7 << AUX_POL_SHIFT)
+#define MAIN_POL_SHIFT 0
+#define MAIN_POL_MASK (0x7 << MAIN_POL_SHIFT)
+#define POL_DE_SHIFT 2
+#define POL_VSYNC_SHIFT 1
+#define POL_HSYNC_SHIFT 0
+#define TIMING_INT_CTRL 0x24
+#define TIMING_INT_STATE 0x28
+#define TIMING_INT_AUX_FRAME BIT(3)
+#define TIMING_INT_MAIN_FRAME BIT(1)
+#define TIMING_INT_AUX_FRAME_SEL_VSW (0x2 << 10)
+#define TIMING_INT_MAIN_FRAME_SEL_VSW (0x2 << 6)
+#define TIMING_INT_ENABLE (\
+ TIMING_INT_MAIN_FRAME_SEL_VSW | TIMING_INT_AUX_FRAME_SEL_VSW | \
+ TIMING_INT_MAIN_FRAME | TIMING_INT_AUX_FRAME \
+)
+#define TIMING_MAIN_SHIFT 0x2c
+#define TIMING_AUX_SHIFT 0x30
+#define H_SHIFT_VAL 0x0048
+#define TIMING_MAIN_PI_SHIFT 0x68
+#define TIMING_AUX_PI_SHIFT 0x6c
+#define H_PI_SHIFT_VAL 0x000f
+
+#define V_ACTIVE(x) (((x) << V_ACTIVE_SHIFT) & V_ACTIVE_MASK)
+#define H_ACTIVE(x) (((x) << H_ACTIVE_SHIFT) & H_ACTIVE_MASK)
+
+#define SYNC_WIDE(x) (((x) << SYNC_WIDE_SHIFT) & SYNC_WIDE_MASK)
+#define BACK_PORCH(x) (((x) << BACK_PORCH_SHIFT) & BACK_PORCH_MASK)
+#define FRONT_PORCH(x) (((x) << FRONT_PORCH_SHIFT) & FRONT_PORCH_MASK)
+
+/* DTRC registers */
+#define DTRC_F0_CTRL 0x2c
+#define DTRC_F1_CTRL 0x5c
+#define DTRC_DECOMPRESS_BYPASS BIT(17)
+#define DTRC_DETILE_CTRL 0x68
+#define TILE2RASTESCAN_BYPASS_MODE BIT(30)
+#define DETILE_ARIDR_MODE_MASK (0x3 << 0)
+#define DETILE_ARID_ALL 0
+#define DETILE_ARID_IN_ARIDR 1
+#define DETILE_ARID_BYP_BUT_ARIDR 2
+#define DETILE_ARID_IN_ARIDR2 3
+#define DTRC_ARID 0x6c
+#define DTRC_ARID3_SHIFT 24
+#define DTRC_ARID3_MASK (0xff << DTRC_ARID3_SHIFT)
+#define DTRC_ARID2_SHIFT 16
+#define DTRC_ARID2_MASK (0xff << DTRC_ARID2_SHIFT)
+#define DTRC_ARID1_SHIFT 8
+#define DTRC_ARID1_MASK (0xff << DTRC_ARID1_SHIFT)
+#define DTRC_ARID0_SHIFT 0
+#define DTRC_ARID0_MASK (0xff << DTRC_ARID0_SHIFT)
+#define DTRC_DEC2DDR_ARID 0x70
+
+#define DTRC_ARID3(x) (((x) << DTRC_ARID3_SHIFT) & DTRC_ARID3_MASK)
+#define DTRC_ARID2(x) (((x) << DTRC_ARID2_SHIFT) & DTRC_ARID2_MASK)
+#define DTRC_ARID1(x) (((x) << DTRC_ARID1_SHIFT) & DTRC_ARID1_MASK)
+#define DTRC_ARID0(x) (((x) << DTRC_ARID0_SHIFT) & DTRC_ARID0_MASK)
+
+/* VOU_CTRL registers */
+#define VOU_INF_EN 0x00
+#define VOU_INF_CH_SEL 0x04
+#define VOU_INF_DATA_SEL 0x08
+#define VOU_SOFT_RST 0x14
+#define VOU_CLK_SEL 0x18
+#define VOU_CLK_GL1_SEL BIT(5)
+#define VOU_CLK_GL0_SEL BIT(4)
+#define VOU_CLK_REQEN 0x20
+#define VOU_CLK_EN 0x24
+
+/* OTFPPU_CTRL registers */
+#define OTFPPU_RSZ_DATA_SOURCE 0x04
+
+#endif /* __ZX_VOU_REGS_H__ */
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 5220510f39da..06dd4f85125f 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2013, NVIDIA Corporation.
+ * Copyright (c) 2012-2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -120,6 +120,7 @@ struct host1x {
struct host1x_syncpt *nop_sp;
+ struct mutex syncpt_mutex;
struct mutex chlist_mutex;
struct host1x_channel chlist;
unsigned long allocated_channels;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index a91b7c4a6110..92c3df933303 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -1,7 +1,7 @@
/*
* Tegra host1x Job
*
- * Copyright (c) 2010-2013, NVIDIA Corporation.
+ * Copyright (c) 2010-2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -539,9 +539,12 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
g->base = job->gather_addr_phys[i];
- for (j = i + 1; j < job->num_gathers; j++)
- if (job->gathers[j].bo == g->bo)
+ for (j = i + 1; j < job->num_gathers; j++) {
+ if (job->gathers[j].bo == g->bo) {
job->gathers[j].handled = true;
+ job->gathers[j].base = g->base;
+ }
+ }
err = do_relocs(job, g->bo);
if (err)
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 95589328ad52..25c11a85050b 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -1,7 +1,7 @@
/*
* Tegra host1x Syncpoints
*
- * Copyright (c) 2010-2013, NVIDIA Corporation.
+ * Copyright (c) 2010-2015, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -61,22 +61,24 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
struct host1x_syncpt *sp = host->syncpt;
char *name;
+ mutex_lock(&host->syncpt_mutex);
+
for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
;
if (i >= host->info->nb_pts)
- return NULL;
+ goto unlock;
if (flags & HOST1X_SYNCPT_HAS_BASE) {
sp->base = host1x_syncpt_base_request(host);
if (!sp->base)
- return NULL;
+ goto unlock;
}
name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
dev ? dev_name(dev) : NULL);
if (!name)
- return NULL;
+ goto free_base;
sp->dev = dev;
sp->name = name;
@@ -86,7 +88,15 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
else
sp->client_managed = false;
+ mutex_unlock(&host->syncpt_mutex);
return sp;
+
+free_base:
+ host1x_syncpt_base_free(sp->base);
+ sp->base = NULL;
+unlock:
+ mutex_unlock(&host->syncpt_mutex);
+ return NULL;
}
u32 host1x_syncpt_id(struct host1x_syncpt *sp)
@@ -378,6 +388,7 @@ int host1x_syncpt_init(struct host1x *host)
for (i = 0; i < host->info->nb_bases; i++)
bases[i].id = i;
+ mutex_init(&host->syncpt_mutex);
host->syncpt = syncpt;
host->bases = bases;
@@ -405,12 +416,16 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
if (!sp)
return;
+ mutex_lock(&sp->host->syncpt_mutex);
+
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
sp->dev = NULL;
sp->name = NULL;
sp->client_managed = false;
+
+ mutex_unlock(&sp->host->syncpt_mutex);
}
EXPORT_SYMBOL(host1x_syncpt_free);
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index aefdff95356d..08766c6e7856 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,7 +1,6 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
- depends on RESET_CONTROLLER
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index b9539f7c5e9a..97218af4fe75 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -88,6 +88,8 @@ enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
@@ -1284,8 +1286,11 @@ static int ipu_irq_init(struct ipu_soc *ipu)
return ret;
}
- for (i = 0; i < IPU_NUM_IRQS; i += 32)
+ /* Mask and clear all interrupts */
+ for (i = 0; i < IPU_NUM_IRQS; i += 32) {
ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
+ ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
+ }
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
gc = irq_get_domain_generic_chip(ipu->domain, i);
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index fcb7dc86167b..4b2b67113d92 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -417,42 +417,6 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
-void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
- u32 pixel_format, int stride, int height)
-{
- int fourcc, u_offset, v_offset;
- int uv_stride = 0;
-
- fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
- switch (fourcc) {
- case DRM_FORMAT_YUV420:
- uv_stride = stride / 2;
- u_offset = stride * height;
- v_offset = u_offset + (uv_stride * height / 2);
- break;
- case DRM_FORMAT_YVU420:
- uv_stride = stride / 2;
- v_offset = stride * height;
- u_offset = v_offset + (uv_stride * height / 2);
- break;
- case DRM_FORMAT_YUV422:
- uv_stride = stride / 2;
- u_offset = stride * height;
- v_offset = u_offset + (uv_stride * height);
- break;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV16:
- uv_stride = stride;
- u_offset = stride * height;
- v_offset = 0;
- break;
- default:
- return;
- }
- ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
-
static const struct ipu_rgb def_xrgb_32 = {
.red = { .offset = 16, .length = 8, },
.green = { .offset = 8, .length = 8, },
@@ -590,6 +554,13 @@ int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
/* burst size */
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
break;
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ /* pix format */
+ ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0);
+ /* burst size */
+ ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+ break;
case DRM_FORMAT_NV12:
/* pix format */
ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index d6e5ded24418..63c7292f427a 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -529,6 +529,22 @@ void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
}
EXPORT_SYMBOL_GPL(ipu_csi_set_window);
+void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&csi->lock, flags);
+
+ reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+ reg &= ~(CSI_HORI_DOWNSIZE_EN | CSI_VERT_DOWNSIZE_EN);
+ reg |= (horiz ? CSI_HORI_DOWNSIZE_EN : 0) |
+ (vert ? CSI_VERT_DOWNSIZE_EN : 0);
+ ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
+
+ spin_unlock_irqrestore(&csi->lock, flags);
+}
+
void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
u32 r_value, u32 g_value, u32 b_value,
u32 pix_clk)
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index a8d87ddd8a17..d2f1bd9d3deb 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -535,7 +535,7 @@ int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
return -EINVAL;
}
- dev_warn(di->ipu->dev, "videomode adapted for IPU restrictions\n");
+ dev_dbg(di->ipu->dev, "videomode adapted for IPU restrictions\n");
return 0;
}
EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 2ba7d437a2af..805b6fa7b5f4 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -1617,7 +1617,7 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
complete, complete_context);
if (IS_ERR(ctx))
- return ERR_PTR(PTR_ERR(ctx));
+ return ERR_CAST(ctx);
run = kzalloc(sizeof(*run), GFP_KERNEL);
if (!run) {
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1887f199ccb7..0f5b2dd24507 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -31,6 +31,10 @@
#define pr_fmt(fmt) "vgaarb: " fmt
+#define vgaarb_dbg(dev, fmt, arg...) dev_dbg(dev, "vgaarb: " fmt, ##arg)
+#define vgaarb_info(dev, fmt, arg...) dev_info(dev, "vgaarb: " fmt, ##arg)
+#define vgaarb_err(dev, fmt, arg...) dev_err(dev, "vgaarb: " fmt, ##arg)
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -188,6 +192,7 @@ static void vga_check_first_use(void)
static struct vga_device *__vga_tryget(struct vga_device *vgadev,
unsigned int rsrc)
{
+ struct device *dev = &vgadev->pdev->dev;
unsigned int wants, legacy_wants, match;
struct vga_device *conflict;
unsigned int pci_bits;
@@ -203,8 +208,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
(vgadev->decodes & VGA_RSRC_LEGACY_MEM))
rsrc |= VGA_RSRC_LEGACY_MEM;
- pr_debug("%s: %d\n", __func__, rsrc);
- pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
+ vgaarb_dbg(dev, "%s: %d\n", __func__, rsrc);
+ vgaarb_dbg(dev, "%s: owns: %d\n", __func__, vgadev->owns);
/* Check what resources we need to acquire */
wants = rsrc & ~vgadev->owns;
@@ -336,9 +341,10 @@ lock_them:
static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
{
+ struct device *dev = &vgadev->pdev->dev;
unsigned int old_locks = vgadev->locks;
- pr_debug("%s\n", __func__);
+ vgaarb_dbg(dev, "%s\n", __func__);
/* Update our counters, and account for equivalent legacy resources
* if we decode them
@@ -611,7 +617,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Allocate structure */
vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL);
if (vgadev == NULL) {
- pr_err("failed to allocate pci device\n");
+ vgaarb_err(&pdev->dev, "failed to allocate VGA arbiter data\n");
/*
* What to do on allocation failure ? For now, let's just do
* nothing, I'm not sure there is anything saner to be done.
@@ -663,7 +669,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
*/
if (vga_default == NULL &&
((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
- pr_info("setting as boot device: PCI:%s\n", pci_name(pdev));
+ vgaarb_info(&pdev->dev, "setting as boot VGA device\n");
vga_set_default_device(pdev);
}
@@ -672,8 +678,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Add to the list */
list_add(&vgadev->list, &vga_list);
vga_count++;
- pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
- pci_name(pdev),
+ vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n",
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns),
vga_iostate_to_str(vgadev->locks));
@@ -725,6 +730,7 @@ bail:
static inline void vga_update_device_decodes(struct vga_device *vgadev,
int new_decodes)
{
+ struct device *dev = &vgadev->pdev->dev;
int old_decodes, decodes_removed, decodes_unlocked;
old_decodes = vgadev->decodes;
@@ -732,8 +738,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
decodes_unlocked = vgadev->locks & decodes_removed;
vgadev->decodes = new_decodes;
- pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
- pci_name(vgadev->pdev),
+ vgaarb_info(dev, "changed VGA decodes: olddecodes=%s,decodes=%s:owns=%s\n",
vga_iostate_to_str(old_decodes),
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns));
@@ -754,7 +759,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
new_decodes & VGA_RSRC_LEGACY_MASK)
vga_decode_count++;
- pr_debug("decoding count now is: %d\n", vga_decode_count);
+ vgaarb_dbg(dev, "decoding count now is: %d\n", vga_decode_count);
}
static void __vga_set_legacy_decoding(struct pci_dev *pdev,
@@ -1022,21 +1027,16 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
unsigned int io_state;
- char *kbuf, *curr_pos;
+ char kbuf[64], *curr_pos;
size_t remaining = count;
int ret_val;
int i;
-
- kbuf = kmalloc(count + 1, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- if (copy_from_user(kbuf, buf, count)) {
- kfree(kbuf);
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+ if (copy_from_user(kbuf, buf, count))
return -EFAULT;
- }
curr_pos = kbuf;
kbuf[count] = '\0'; /* Just to make sure... */
@@ -1189,24 +1189,25 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
ret_val = -EPROTO;
goto done;
}
- pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos,
- domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
-
pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
- pr_debug("pdev %p\n", pdev);
if (!pdev) {
- pr_err("invalid PCI address %x:%x:%x\n",
- domain, bus, devfn);
+ pr_debug("invalid PCI address %04x:%02x:%02x.%x\n",
+ domain, bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
ret_val = -ENODEV;
goto done;
}
+
+ pr_debug("%s ==> %04x:%02x:%02x.%x pdev %p\n", curr_pos,
+ domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ pdev);
}
vgadev = vgadev_find(pdev);
pr_debug("vgadev %p\n", vgadev);
if (vgadev == NULL) {
if (pdev) {
- pr_err("this pci device is not a vga device\n");
+ vgaarb_dbg(&pdev->dev, "not a VGA device\n");
pci_dev_put(pdev);
}
@@ -1226,7 +1227,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
}
}
if (i == MAX_USER_CARDS) {
- pr_err("maximum user cards (%d) number reached!\n",
+ vgaarb_dbg(&pdev->dev, "maximum user cards (%d) number reached, ignoring this one!\n",
MAX_USER_CARDS);
pci_dev_put(pdev);
/* XXX: which value to return? */
@@ -1259,11 +1260,9 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
goto done;
}
/* If we got here, the message written is not part of the protocol! */
- kfree(kbuf);
return -EPROTO;
done:
- kfree(kbuf);
return ret_val;
}
@@ -1317,8 +1316,8 @@ static int vga_arb_release(struct inode *inode, struct file *file)
uc = &priv->cards[i];
if (uc->pdev == NULL)
continue;
- pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
- uc->io_cnt, uc->mem_cnt);
+ vgaarb_dbg(&uc->pdev->dev, "uc->io_cnt == %d, uc->mem_cnt == %d\n",
+ uc->io_cnt, uc->mem_cnt);
while (uc->io_cnt--)
vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
while (uc->mem_cnt--)
@@ -1371,7 +1370,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action,
struct pci_dev *pdev = to_pci_dev(dev);
bool notify = false;
- pr_debug("%s\n", __func__);
+ vgaarb_dbg(dev, "%s\n", __func__);
/* For now we're only intereted in devices added and removed. I didn't
* test this thing here, so someone needs to double check for the
@@ -1423,9 +1422,8 @@ static int __init vga_arb_device_init(void)
PCI_ANY_ID, pdev)) != NULL)
vga_arbiter_add_pci_device(pdev);
- pr_info("loaded\n");
-
list_for_each_entry(vgadev, &vga_list, list) {
+ struct device *dev = &vgadev->pdev->dev;
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
/*
* Override vga_arbiter_add_pci_device()'s I/O based detection
@@ -1458,21 +1456,19 @@ static int __init vga_arb_device_init(void)
continue;
if (!vga_default_device())
- pr_info("setting as boot device: PCI:%s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "setting as boot device\n");
else if (vgadev->pdev != vga_default_device())
- pr_info("overriding boot device: PCI:%s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "overriding boot device\n");
vga_set_default_device(vgadev->pdev);
}
#endif
if (vgadev->bridge_has_one_vga)
- pr_info("bridge control possible %s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "bridge control possible\n");
else
- pr_info("no bridge control possible %s\n",
- pci_name(vgadev->pdev));
+ vgaarb_info(dev, "no bridge control possible\n");
}
+
+ pr_info("loaded\n");
return rc;
}
subsys_initcall(vga_arb_device_init);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cd4599c0523b..4070b7386e9d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -138,7 +138,7 @@ config HID_ASUS
tristate "Asus"
depends on I2C_HID
---help---
- Support for Asus notebook built-in keyboard via i2c.
+ Support for Asus notebook built-in keyboard and touchpad via i2c.
Supported devices:
- EeeBook X205TA
@@ -214,7 +214,7 @@ config HID_CMEDIA
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
- depends on USB_HID && I2C && GPIOLIB
+ depends on USB_HID && I2C && GPIOLIB && GPIOLIB_IRQCHIP
---help---
Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
This is a HID device driver which registers as an i2c adapter
@@ -512,6 +512,14 @@ config HID_MAGICMOUSE
Say Y here if you want support for the multi-touch features of the
Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad.
+config HID_MAYFLASH
+ tristate "Mayflash game controller adapter force feedback"
+ depends on HID
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have HJZ Mayflash PS3 game controller adapters
+ and want to enable force feedback support.
+
config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
depends on HID
@@ -861,6 +869,13 @@ config THRUSTMASTER_FF
a THRUSTMASTER Dual Trigger 3-in-1 or a THRUSTMASTER Ferrari GT
Rumble Force or Force Feedback Wheel.
+config HID_UDRAW_PS3
+ tristate "THQ PS3 uDraw tablet"
+ depends on HID
+ ---help---
+ Say Y here if you want to use the THQ uDraw gaming tablet for
+ the PS3.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 86b2b5785fd2..4d111f23e801 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
+obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
@@ -96,6 +97,7 @@ obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
+obj-$(CONFIG_HID_UDRAW_PS3) += hid-udraw-ps3.o
obj-$(CONFIG_HID_LED) += hid-led.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 7a811ec4f2e1..d40ed9fdf68d 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -11,6 +11,12 @@
* This module based on hid-ortek by
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
* Copyright (c) 2011 Jiri Kosina
+ *
+ * This module has been updated to add support for Asus i2c touchpad.
+ *
+ * Copyright (c) 2016 Brendan McGrath <redmcg@redmandi.dyndns.org>
+ * Copyright (c) 2016 Victor Vlasenko <victor.vlasenko@sysgears.com>
+ * Copyright (c) 2016 Frederik Wenigwieser <frederik.wenigwieser@gmail.com>
*/
/*
@@ -20,16 +26,287 @@
* any later version.
*/
-#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/input/mt.h>
#include "hid-ids.h"
+MODULE_AUTHOR("Yusuke Fujimaki <usk.fujimaki@gmail.com>");
+MODULE_AUTHOR("Brendan McGrath <redmcg@redmandi.dyndns.org>");
+MODULE_AUTHOR("Victor Vlasenko <victor.vlasenko@sysgears.com>");
+MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
+MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
+
+#define FEATURE_REPORT_ID 0x0d
+#define INPUT_REPORT_ID 0x5d
+
+#define INPUT_REPORT_SIZE 28
+
+#define MAX_CONTACTS 5
+
+#define MAX_X 2794
+#define MAX_Y 1758
+#define MAX_TOUCH_MAJOR 8
+#define MAX_PRESSURE 128
+
+#define CONTACT_DATA_SIZE 5
+
+#define BTN_LEFT_MASK 0x01
+#define CONTACT_TOOL_TYPE_MASK 0x80
+#define CONTACT_X_MSB_MASK 0xf0
+#define CONTACT_Y_MSB_MASK 0x0f
+#define CONTACT_TOUCH_MAJOR_MASK 0x07
+#define CONTACT_PRESSURE_MASK 0x7f
+
+#define QUIRK_FIX_NOTEBOOK_REPORT BIT(0)
+#define QUIRK_NO_INIT_REPORTS BIT(1)
+#define QUIRK_SKIP_INPUT_MAPPING BIT(2)
+#define QUIRK_IS_MULTITOUCH BIT(3)
+
+#define NOTEBOOK_QUIRKS QUIRK_FIX_NOTEBOOK_REPORT
+#define TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \
+ QUIRK_SKIP_INPUT_MAPPING | \
+ QUIRK_IS_MULTITOUCH)
+
+#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
+
+struct asus_drvdata {
+ unsigned long quirks;
+ struct input_dev *input;
+};
+
+static void asus_report_contact_down(struct input_dev *input,
+ int toolType, u8 *data)
+{
+ int touch_major, pressure;
+ int x = (data[0] & CONTACT_X_MSB_MASK) << 4 | data[1];
+ int y = MAX_Y - ((data[0] & CONTACT_Y_MSB_MASK) << 8 | data[2]);
+
+ if (toolType == MT_TOOL_PALM) {
+ touch_major = MAX_TOUCH_MAJOR;
+ pressure = MAX_PRESSURE;
+ } else {
+ touch_major = (data[3] >> 4) & CONTACT_TOUCH_MAJOR_MASK;
+ pressure = data[4] & CONTACT_PRESSURE_MASK;
+ }
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major);
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+}
+
+/* Required for Synaptics Palm Detection */
+static void asus_report_tool_width(struct input_dev *input)
+{
+ struct input_mt *mt = input->mt;
+ struct input_mt_slot *oldest;
+ int oldid, count, i;
+
+ oldest = NULL;
+ oldid = mt->trkid;
+ count = 0;
+
+ for (i = 0; i < mt->num_slots; ++i) {
+ struct input_mt_slot *ps = &mt->slots[i];
+ int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
+
+ if (id < 0)
+ continue;
+ if ((id - oldid) & TRKID_SGN) {
+ oldest = ps;
+ oldid = id;
+ }
+ count++;
+ }
+
+ if (oldest) {
+ input_report_abs(input, ABS_TOOL_WIDTH,
+ input_mt_get_value(oldest, ABS_MT_TOUCH_MAJOR));
+ }
+}
+
+static void asus_report_input(struct input_dev *input, u8 *data)
+{
+ int i;
+ u8 *contactData = data + 2;
+
+ for (i = 0; i < MAX_CONTACTS; i++) {
+ bool down = !!(data[1] & BIT(i+3));
+ int toolType = contactData[3] & CONTACT_TOOL_TYPE_MASK ?
+ MT_TOOL_PALM : MT_TOOL_FINGER;
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, toolType, down);
+
+ if (down) {
+ asus_report_contact_down(input, toolType, contactData);
+ contactData += CONTACT_DATA_SIZE;
+ }
+ }
+
+ input_report_key(input, BTN_LEFT, data[1] & BTN_LEFT_MASK);
+ asus_report_tool_width(input);
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static int asus_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH &&
+ data[0] == INPUT_REPORT_ID &&
+ size == INPUT_REPORT_SIZE) {
+ asus_report_input(drvdata->input, data);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ int ret;
+ struct input_dev *input = hi->input;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, MAX_PRESSURE, 0, 0);
+
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ ret = input_mt_init_slots(input, MAX_CONTACTS, INPUT_MT_POINTER);
+
+ if (ret) {
+ hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
+ return ret;
+ }
+
+ drvdata->input = input;
+ }
+
+ return 0;
+}
+
+static int asus_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit,
+ int *max)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_SKIP_INPUT_MAPPING) {
+ /* Don't map anything from the HID report.
+ * We do it all manually in asus_input_configured
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int asus_start_multitouch(struct hid_device *hdev)
+{
+ int ret;
+ const unsigned char buf[] = { FEATURE_REPORT_ID, 0x00, 0x03, 0x01, 0x00 };
+ unsigned char *dmabuf = kmemdup(buf, sizeof(buf), GFP_KERNEL);
+
+ if (!dmabuf) {
+ ret = -ENOMEM;
+ hid_err(hdev, "Asus failed to alloc dma buf: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_raw_request(hdev, dmabuf[0], dmabuf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+ kfree(dmabuf);
+
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "Asus failed to start multitouch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+ return asus_start_multitouch(hdev);
+
+ return 0;
+}
+
+static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct asus_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (drvdata == NULL) {
+ hid_err(hdev, "Can't alloc Asus descriptor\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, drvdata);
+
+ drvdata->quirks = id->driver_data;
+
+ if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "Asus hid parse failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "Asus hw start failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!drvdata->input) {
+ hid_err(hdev, "Asus input not registered\n");
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
+ drvdata->input->name = "Asus TouchPad";
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ ret = asus_start_multitouch(hdev);
+ if (ret)
+ goto err_stop_hw;
+ }
+
+ return 0;
+err_stop_hw:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_FIX_NOTEBOOK_REPORT &&
+ *rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
@@ -37,15 +314,25 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
static const struct hid_device_id asus_devices[] = {
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
static struct hid_driver asus_driver = {
- .name = "asus",
- .id_table = asus_devices,
- .report_fixup = asus_report_fixup
+ .name = "asus",
+ .id_table = asus_devices,
+ .report_fixup = asus_report_fixup,
+ .probe = asus_probe,
+ .input_mapping = asus_input_mapping,
+ .input_configured = asus_input_configured,
+#ifdef CONFIG_PM
+ .reset_resume = asus_reset_resume,
+#endif
+ .raw_event = asus_raw_event
};
module_hid_driver(asus_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2b89c701076f..cff060b56da9 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -727,8 +727,9 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
(hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -1857,6 +1858,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1883,6 +1885,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
@@ -1983,8 +1988,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
@@ -2059,6 +2065,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
@@ -2086,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 086d8a507157..f31a778b0851 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -24,6 +24,7 @@
* http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
*/
+#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/i2c.h>
@@ -32,6 +33,11 @@
#include <linux/usb/ch9.h>
#include "hid-ids.h"
+#define CP2112_REPORT_MAX_LENGTH 64
+#define CP2112_GPIO_CONFIG_LENGTH 5
+#define CP2112_GPIO_GET_LENGTH 2
+#define CP2112_GPIO_SET_LENGTH 3
+
enum {
CP2112_GPIO_CONFIG = 0x02,
CP2112_GPIO_GET = 0x03,
@@ -161,6 +167,14 @@ struct cp2112_device {
atomic_t read_avail;
atomic_t xfer_avail;
struct gpio_chip gc;
+ u8 *in_out_buffer;
+ spinlock_t lock;
+
+ struct gpio_desc *desc[8];
+ bool gpio_poll;
+ struct delayed_work gpio_poll_worker;
+ unsigned long irq_mask;
+ u8 gpio_prev_state;
};
static int gpio_push_pull = 0xFF;
@@ -171,62 +185,97 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[5];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
- sizeof(buf), HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- return ret;
+ goto exit;
}
buf[1] &= ~(1 << offset);
buf[2] = gpio_push_pull;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- return ret;
+ goto exit;
}
- return 0;
+ ret = 0;
+
+exit:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret <= 0 ? ret : -EIO;
}
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[3];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? 0xff : 0;
buf[2] = 1 << offset;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
+ CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
}
-static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int cp2112_gpio_get_all(struct gpio_chip *chip)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[2];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ spin_lock_irqsave(&dev->lock, flags);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
+ CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_GET_LENGTH) {
hid_err(hdev, "error requesting GPIO values: %d\n", ret);
- return ret;
+ ret = ret < 0 ? ret : -EIO;
+ goto exit;
}
- return (buf[1] >> offset) & 1;
+ ret = buf[1];
+
+exit:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return ret;
+}
+
+static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+
+ ret = cp2112_gpio_get_all(chip);
+ if (ret < 0)
+ return ret;
+
+ return (ret >> offset) & 1;
}
static int cp2112_gpio_direction_output(struct gpio_chip *chip,
@@ -234,27 +283,33 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[5];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
- sizeof(buf), HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- return ret;
+ goto fail;
}
buf[1] |= 1 << offset;
buf[2] = gpio_push_pull;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- return ret;
+ goto fail;
}
+ spin_unlock_irqrestore(&dev->lock, flags);
+
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
@@ -262,6 +317,10 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
cp2112_gpio_set(chip, offset, value);
return 0;
+
+fail:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret < 0 ? ret : -EIO;
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1000,6 +1059,166 @@ static void chmod_sysfs_attrs(struct hid_device *hdev)
}
}
+static void cp2112_gpio_irq_ack(struct irq_data *d)
+{
+}
+
+static void cp2112_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __clear_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __set_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_poll_callback(struct work_struct *work)
+{
+ struct cp2112_device *dev = container_of(work, struct cp2112_device,
+ gpio_poll_worker.work);
+ struct irq_data *d;
+ u8 gpio_mask;
+ u8 virqs = (u8)dev->irq_mask;
+ u32 irq_type;
+ int irq, virq, ret;
+
+ ret = cp2112_gpio_get_all(&dev->gc);
+ if (ret == -ENODEV) /* the hardware has been disconnected */
+ return;
+ if (ret < 0)
+ goto exit;
+
+ gpio_mask = ret;
+
+ while (virqs) {
+ virq = ffs(virqs) - 1;
+ virqs &= ~BIT(virq);
+
+ if (!dev->gc.to_irq)
+ break;
+
+ irq = dev->gc.to_irq(&dev->gc, virq);
+
+ d = irq_get_irq_data(irq);
+ if (!d)
+ continue;
+
+ irq_type = irqd_get_trigger_type(d);
+
+ if (gpio_mask & BIT(virq)) {
+ /* Level High */
+
+ if (irq_type & IRQ_TYPE_LEVEL_HIGH)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_RISING) &&
+ !(dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ } else {
+ /* Level Low */
+
+ if (irq_type & IRQ_TYPE_LEVEL_LOW)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_FALLING) &&
+ (dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ }
+ }
+
+ dev->gpio_prev_state = gpio_mask;
+
+exit:
+ if (dev->gpio_poll)
+ schedule_delayed_work(&dev->gpio_poll_worker, 10);
+}
+
+
+static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+
+ cp2112_gpio_direction_input(gc, d->hwirq);
+
+ if (!dev->gpio_poll) {
+ dev->gpio_poll = true;
+ schedule_delayed_work(&dev->gpio_poll_worker, 0);
+ }
+
+ cp2112_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+}
+
+static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ return 0;
+}
+
+static struct irq_chip cp2112_gpio_irqchip = {
+ .name = "cp2112-gpio",
+ .irq_startup = cp2112_gpio_irq_startup,
+ .irq_shutdown = cp2112_gpio_irq_shutdown,
+ .irq_ack = cp2112_gpio_irq_ack,
+ .irq_mask = cp2112_gpio_irq_mask,
+ .irq_unmask = cp2112_gpio_irq_unmask,
+ .irq_set_type = cp2112_gpio_irq_type,
+};
+
+static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
+ int pin)
+{
+ int ret;
+
+ if (dev->desc[pin])
+ return -EINVAL;
+
+ dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin,
+ "HID/I2C:Event");
+ if (IS_ERR(dev->desc[pin])) {
+ dev_err(dev->gc.parent, "Failed to request GPIO\n");
+ return PTR_ERR(dev->desc[pin]);
+ }
+
+ ret = gpiochip_lock_as_irq(&dev->gc, pin);
+ if (ret) {
+ dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
+ goto err_desc;
+ }
+
+ ret = gpiod_to_irq(dev->desc[pin]);
+ if (ret < 0) {
+ dev_err(dev->gc.parent, "Failed to translate GPIO to IRQ\n");
+ goto err_lock;
+ }
+
+ return ret;
+
+err_lock:
+ gpiochip_unlock_as_irq(&dev->gc, pin);
+err_desc:
+ gpiochip_free_own_desc(dev->desc[pin]);
+ dev->desc[pin] = NULL;
+ return ret;
+}
+
static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct cp2112_device *dev;
@@ -1007,6 +1226,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct cp2112_smbus_config_report config;
int ret;
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH,
+ GFP_KERNEL);
+ if (!dev->in_out_buffer)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
@@ -1063,12 +1293,6 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_power_normal;
}
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto err_power_normal;
- }
-
hid_set_drvdata(hdev, (void *)dev);
dev->hdev = hdev;
dev->adap.owner = THIS_MODULE;
@@ -1087,7 +1311,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret) {
hid_err(hdev, "error registering i2c adapter\n");
- goto err_free_dev;
+ goto err_power_normal;
}
hid_dbg(hdev, "adapter registered\n");
@@ -1117,14 +1341,21 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
chmod_sysfs_attrs(hdev);
hid_hw_power(hdev, PM_HINT_NORMAL);
+ ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev->gc.parent, "failed to add IRQ chip\n");
+ goto err_sysfs_remove;
+ }
+
return ret;
+err_sysfs_remove:
+ sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
err_gpiochip_remove:
gpiochip_remove(&dev->gc);
err_free_i2c:
i2c_del_adapter(&dev->adap);
-err_free_dev:
- kfree(dev);
err_power_normal:
hid_hw_power(hdev, PM_HINT_NORMAL);
err_hid_close:
@@ -1137,10 +1368,22 @@ err_hid_stop:
static void cp2112_remove(struct hid_device *hdev)
{
struct cp2112_device *dev = hid_get_drvdata(hdev);
+ int i;
sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
- gpiochip_remove(&dev->gc);
i2c_del_adapter(&dev->adap);
+
+ if (dev->gpio_poll) {
+ dev->gpio_poll = false;
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->desc); i++) {
+ gpiochip_unlock_as_irq(&dev->gc, i);
+ gpiochip_free_own_desc(dev->desc[i]);
+ }
+
+ gpiochip_remove(&dev->gc);
/* i2c_del_adapter has finished removing all i2c devices from our
* adapter. Well behaved devices should no longer call our cp2112_xfer
* and should have waited for any pending calls to finish. It has also
@@ -1149,7 +1392,6 @@ static void cp2112_remove(struct hid_device *hdev)
*/
hid_hw_close(hdev);
hid_hw_stop(hdev);
- kfree(dev);
}
static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6cfb5cacc253..ec277b96eaa1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -171,6 +171,7 @@
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
#define USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD 0x8585
+#define USB_DEVICE_ID_ASUSTEK_TOUCHPAD 0x0101
#define USB_VENDOR_ID_ATEN 0x0557
#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -179,6 +180,7 @@
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
#define USB_DEVICE_ID_ATEN_CS682 0x2213
+#define USB_DEVICE_ID_ATEN_CS692 0x8021
#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
@@ -314,8 +316,10 @@
#define USB_VENDOR_ID_DMI 0x0c0b
#define USB_DEVICE_ID_DMI_ENC 0x5fab
-#define USB_VENDOR_ID_DRAGONRISE 0x0079
-#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -717,8 +721,9 @@
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 0x07e4
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 0x07e8
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
-#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07de
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
@@ -902,6 +907,8 @@
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
@@ -958,6 +965,9 @@
#define USB_VENDOR_ID_THINGM 0x27b8
#define USB_DEVICE_ID_BLINK1 0x01ed
+#define USB_VENDOR_ID_THQ 0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
+
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TIVO 0x150a
@@ -1033,6 +1043,10 @@
#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH 0x0500
#define USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET 0x0502
+#define USB_VENDOR_ID_WEIDA 0x2575
+#define USB_DEVICE_ID_WEIDA_8752 0xC300
+#define USB_DEVICE_ID_WEIDA_8755 0xC301
+
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
#define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fb9ace1cef8b..d05f903c7614 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -253,6 +253,7 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
case ABS_RX:
case ABS_RY:
case ABS_RZ:
+ case ABS_WHEEL:
case ABS_TILT_X:
case ABS_TILT_Y:
if (field->unit == 0x14) { /* If degrees */
@@ -1468,6 +1469,31 @@ static void hidinput_cleanup_hidinput(struct hid_device *hid,
kfree(hidinput);
}
+static struct hid_input *hidinput_match(struct hid_report *report)
+{
+ struct hid_device *hid = report->device;
+ struct hid_input *hidinput;
+
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ if (hidinput->report &&
+ hidinput->report->id == report->id)
+ return hidinput;
+ }
+
+ return NULL;
+}
+
+static inline void hidinput_configure_usages(struct hid_input *hidinput,
+ struct hid_report *report)
+{
+ int i, j;
+
+ for (i = 0; i < report->maxfield; i++)
+ for (j = 0; j < report->field[i]->maxusage; j++)
+ hidinput_configure_usage(hidinput, report->field[i],
+ report->field[i]->usage + j);
+}
+
/*
* Register the input device; print a message.
* Configure the input layer interface
@@ -1478,8 +1504,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
{
struct hid_driver *drv = hid->driver;
struct hid_report *report;
- struct hid_input *hidinput = NULL;
- int i, j, k;
+ struct hid_input *next, *hidinput = NULL;
+ int i, k;
INIT_LIST_HEAD(&hid->inputs);
INIT_WORK(&hid->led_work, hidinput_led_worker);
@@ -1509,43 +1535,40 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
if (!report->maxfield)
continue;
+ /*
+ * Find the previous hidinput report attached
+ * to this report id.
+ */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
+ hidinput = hidinput_match(report);
+
if (!hidinput) {
hidinput = hidinput_allocate(hid);
if (!hidinput)
goto out_unwind;
}
- for (i = 0; i < report->maxfield; i++)
- for (j = 0; j < report->field[i]->maxusage; j++)
- hidinput_configure_usage(hidinput, report->field[i],
- report->field[i]->usage + j);
-
- if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput))
- continue;
+ hidinput_configure_usages(hidinput, report);
- if (hid->quirks & HID_QUIRK_MULTI_INPUT) {
- /* This will leave hidinput NULL, so that it
- * allocates another one if we have more inputs on
- * the same interface. Some devices (e.g. Happ's
- * UGCI) cram a lot of unrelated inputs into the
- * same interface. */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
hidinput->report = report;
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- hidinput = NULL;
- }
}
}
- if (hidinput && (hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput)) {
- /* no need to register an input device not populated */
- hidinput_cleanup_hidinput(hid, hidinput);
- hidinput = NULL;
+ list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
+ if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
+ !hidinput_has_been_populated(hidinput)) {
+ /* no need to register an input device not populated */
+ hidinput_cleanup_hidinput(hid, hidinput);
+ continue;
+ }
+
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_unwind;
+ if (input_register_device(hidinput->input))
+ goto out_unwind;
+ hidinput->registered = true;
}
if (list_empty(&hid->inputs)) {
@@ -1553,20 +1576,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
goto out_unwind;
}
- if (hidinput) {
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- }
-
return 0;
-out_cleanup:
- list_del(&hidinput->list);
- input_free_device(hidinput->input);
- kfree(hidinput);
out_unwind:
/* unwind the ones we already registered */
hidinput_disconnect(hid);
@@ -1583,7 +1594,10 @@ void hidinput_disconnect(struct hid_device *hid)
list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
list_del(&hidinput->list);
- input_unregister_device(hidinput->input);
+ if (hidinput->registered)
+ input_unregister_device(hidinput->input);
+ else
+ input_free_device(hidinput->input);
kfree(hidinput);
}
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 76f644deb0a7..c5c5fbe9d605 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -756,11 +756,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
/* Setup wireless link with Logitech Wii wheel */
if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
- unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret >= 0) {
/* insert a little delay of 10 jiffies ~ 40ms */
wait_queue_head_t wait;
@@ -772,9 +777,10 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
buf[1] = 0xB2;
get_random_bytes(&buf[2], 2);
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
}
+ kfree(buf);
}
if (drv_data->quirks & LG_FF)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index d6fa496d0ca2..20b40ad26325 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -493,7 +493,8 @@ static int magicmouse_input_configured(struct hid_device *hdev,
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- __u8 feature[] = { 0xd7, 0x01 };
+ const u8 feature[] = { 0xd7, 0x01 };
+ u8 *buf;
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
@@ -544,6 +545,12 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
+ buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
/*
* Some devices repond with 'invalid report id' when feature
* report switching it into multitouch mode is sent to it.
@@ -552,8 +559,9 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
if (ret != -EIO && ret != sizeof(feature)) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
new file mode 100644
index 000000000000..d9090765a6e5
--- /dev/null
+++ b/drivers/hid/hid-mf.c
@@ -0,0 +1,166 @@
+/*
+ * Force feedback support for Mayflash game controller adapters.
+ *
+ * These devices are manufactured by Mayflash but identify themselves
+ * using the vendor ID of DragonRise Inc.
+ *
+ * Tested with:
+ * 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter"
+ *
+ * The following adapters probably work too, but need to be tested:
+ * 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter"
+ * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
+ *
+ * Copyright (c) 2016 Marcel Hasler <mahasler@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct mf_device {
+ struct hid_report *report;
+};
+
+static int mf_play(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct mf_device *mf = data;
+ int strong, weak;
+
+ strong = effect->u.rumble.strong_magnitude;
+ weak = effect->u.rumble.weak_magnitude;
+
+ dbg_hid("Called with 0x%04x 0x%04x.\n", strong, weak);
+
+ strong = strong * 0xff / 0xffff;
+ weak = weak * 0xff / 0xffff;
+
+ dbg_hid("Running with 0x%02x 0x%02x.\n", strong, weak);
+
+ mf->report->field[0]->value[0] = weak;
+ mf->report->field[0]->value[1] = strong;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+
+ return 0;
+}
+
+static int mf_init(struct hid_device *hid)
+{
+ struct mf_device *mf;
+
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+
+ struct list_head *report_ptr;
+ struct hid_report *report;
+
+ struct list_head *input_ptr = &hid->inputs;
+ struct hid_input *input;
+
+ struct input_dev *dev;
+
+ int error;
+
+ /* Setup each of the four inputs */
+ list_for_each(report_ptr, report_list) {
+ report = list_entry(report_ptr, struct hid_report, list);
+
+ if (report->maxfield < 1 || report->field[0]->report_count < 2) {
+ hid_err(hid, "Invalid report, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ if (list_is_last(input_ptr, &hid->inputs)) {
+ hid_err(hid, "Missing input, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ input_ptr = input_ptr->next;
+ input = list_entry(input_ptr, struct hid_input, list);
+
+ mf = kzalloc(sizeof(struct mf_device), GFP_KERNEL);
+ if (!mf)
+ return -ENOMEM;
+
+ dev = input->input;
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ error = input_ff_create_memless(dev, mf, mf_play);
+ if (error) {
+ kfree(mf);
+ return error;
+ }
+
+ mf->report = report;
+ mf->report->field[0]->value[0] = 0x00;
+ mf->report->field[0]->value[1] = 0x00;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+ }
+
+ hid_info(hid, "Force feedback for HJZ Mayflash game controller "
+ "adapters by Marcel Hasler <mahasler@gmail.com>\n");
+
+ return 0;
+}
+
+static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+ int error;
+
+ dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n");
+
+ /* Split device into four inputs */
+ hid->quirks |= HID_QUIRK_MULTI_INPUT;
+
+ error = hid_parse(hid);
+ if (error) {
+ hid_err(hid, "HID parse failed.\n");
+ return error;
+ }
+
+ error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hid, "HID hw start failed\n");
+ return error;
+ }
+
+ error = mf_init(hid);
+ if (error) {
+ hid_err(hid, "Force feedback init failed.\n");
+ hid_hw_stop(hid);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id mf_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mf_devices);
+
+static struct hid_driver mf_driver = {
+ .name = "hid_mf",
+ .id_table = mf_devices,
+ .probe = mf_probe,
+};
+module_hid_driver(mf_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6cd392e9f99..74b7b84a0420 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -280,9 +280,11 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4),
+ .driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fb6f1f447279..6dca66806844 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -108,6 +108,7 @@ struct mt_device {
int cc_value_index; /* contact count value index in the field */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
+ unsigned long initial_quirks; /* initial quirks state */
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
__s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
@@ -318,13 +319,10 @@ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
u8 *buf;
/*
- * Only fetch the feature report if initial reports are not already
- * been retrieved. Currently this is only done for Windows 8 touch
- * devices.
+ * Do not fetch the feature report if the device has been explicitly
+ * marked as non-capable.
*/
- if (!(hdev->quirks & HID_QUIRK_NO_INIT_REPORTS))
- return;
- if (td->mtclass.name != MT_CLS_WIN_8)
+ if (td->initial_quirks & HID_QUIRK_NO_INIT_REPORTS)
return;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -567,6 +565,14 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_BUTTON:
code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE);
+ /*
+ * MS PTP spec says that external buttons left and right have
+ * usages 2 and 3.
+ */
+ if (cls->name == MT_CLS_WIN_8 &&
+ field->application == HID_DG_TOUCHPAD &&
+ (usage->hid & HID_USAGE) > 1)
+ code--;
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
input_set_capability(hi->input, EV_KEY, code);
return 1;
@@ -842,7 +848,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (!td->mtclass.export_all_inputs &&
field->application != HID_DG_TOUCHSCREEN &&
field->application != HID_DG_PEN &&
- field->application != HID_DG_TOUCHPAD)
+ field->application != HID_DG_TOUCHPAD &&
+ field->application != HID_GD_KEYBOARD &&
+ field->application != HID_CP_CONSUMER_CONTROL)
return -1;
/*
@@ -1083,36 +1091,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
-
- /*
- * This allows the driver to handle different input sensors
- * that emits events through different reports on the same HID
- * device.
- */
- hdev->quirks |= HID_QUIRK_MULTI_INPUT;
- hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
-
- /*
- * Handle special quirks for Windows 8 certified devices.
- */
- if (id->group == HID_GROUP_MULTITOUCH_WIN_8)
- /*
- * Some multitouch screens do not like to be polled for input
- * reports. Fortunately, the Win8 spec says that all touches
- * should be sent during each report, making the initialization
- * of input reports unnecessary.
- *
- * In addition some touchpads do not behave well if we read
- * all feature reports from them. Instead we prevent
- * initial report fetching and then selectively fetch each
- * report we are interested in.
- */
- hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
-
td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
@@ -1136,6 +1114,39 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
td->serial_maybe = true;
+ /*
+ * Store the initial quirk state
+ */
+ td->initial_quirks = hdev->quirks;
+
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+ /*
+ * This allows the driver to handle different input sensors
+ * that emits events through different reports on the same HID
+ * device.
+ */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
+
+ /*
+ * Some multitouch screens do not like to be polled for input
+ * reports. Fortunately, the Win8 spec says that all touches
+ * should be sent during each report, making the initialization
+ * of input reports unnecessary. For Win7 devices, well, let's hope
+ * they will still be happy (this is only be a problem if a touch
+ * was already there while probing the device).
+ *
+ * In addition some touchpads do not behave well if we read
+ * all feature reports from them. Instead we prevent
+ * initial report fetching and then selectively fetch each
+ * report we are interested in.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
ret = hid_parse(hdev);
if (ret != 0)
return ret;
@@ -1204,8 +1215,11 @@ static int mt_resume(struct hid_device *hdev)
static void mt_remove(struct hid_device *hdev)
{
+ struct mt_device *td = hid_get_drvdata(hdev);
+
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
+ hdev->quirks = td->initial_quirks;
}
/*
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 9cd2ca34a6be..be89bcbf6a71 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -188,10 +188,16 @@ static int rmi_set_page(struct hid_device *hdev, u8 page)
static int rmi_set_mode(struct hid_device *hdev, u8 mode)
{
int ret;
- u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+ const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+ u8 *buf;
- ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf,
+ buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf,
sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
if (ret < 0) {
dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode,
ret);
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 5614fee82347..3a84aaf1418b 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -292,11 +292,11 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
bool input = false;
int value = 0;
- if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage,
+ if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
feature = true;
field_index = index + sensor_inst->input_field_count;
- } else if (sscanf(attr->attr.name, "input-%d-%x-%s", &index, &usage,
+ } else if (sscanf(attr->attr.name, "input-%x-%x-%s", &index, &usage,
name) == 3) {
input = true;
field_index = index;
@@ -398,7 +398,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
char name[HID_CUSTOM_NAME_LENGTH];
int value;
- if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage,
+ if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
field_index = index + sensor_inst->input_field_count;
} else
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 658a607dc6d9..5c925228847c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -212,6 +212,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
__s32 value;
int ret = 0;
+ memset(buffer, 0, buffer_size);
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
if (!report || (field_index >= report->maxfield)) {
@@ -251,6 +252,9 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
int report_size;
int ret = 0;
+ u8 *val_ptr;
+ int buffer_index = 0;
+ int i;
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
@@ -271,7 +275,17 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
goto done_proc;
}
ret = min(report_size, buffer_size);
- memcpy(buffer, report->field[field_index]->value, ret);
+
+ val_ptr = (u8 *)report->field[field_index]->value;
+ for (i = 0; i < report->field[field_index]->report_count; ++i) {
+ if (buffer_index >= ret)
+ break;
+
+ memcpy(&((u8 *)buffer)[buffer_index], val_ptr,
+ report->field[field_index]->report_size / 8);
+ val_ptr += sizeof(__s32);
+ buffer_index += (report->field[field_index]->report_size / 8);
+ }
done_proc:
mutex_unlock(&data->mutex);
@@ -781,6 +795,12 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
USB_DEVICE_ID_MS_TYPE_COVER_2),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
+ 0x07bd), /* Microsoft Surface 3 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROCHIP,
+ 0x0f01), /* MM7150 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
USB_DEVICE_ID_STM_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b0bb99a821bd..7687c0875395 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -36,6 +36,8 @@
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/input/mt.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
#include "hid-ids.h"
@@ -374,7 +376,7 @@ static u8 dualshock4_usb_rdesc[] = {
0x65, 0x00, /* Unit, */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -403,14 +405,14 @@ static u8 dualshock4_usb_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -687,7 +689,7 @@ static u8 dualshock4_bt_rdesc[] = {
0x81, 0x42, /* Input (Variable, Null State), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -712,14 +714,14 @@ static u8 dualshock4_bt_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -975,6 +977,32 @@ static const unsigned int buzz_keymap[] = {
[20] = BTN_TRIGGER_HAPPY20,
};
+static const unsigned int ds4_absmap[] = {
+ [0x30] = ABS_X,
+ [0x31] = ABS_Y,
+ [0x32] = ABS_RX, /* right stick X */
+ [0x33] = ABS_Z, /* L2 */
+ [0x34] = ABS_RZ, /* R2 */
+ [0x35] = ABS_RY, /* right stick Y */
+};
+
+static const unsigned int ds4_keymap[] = {
+ [0x1] = BTN_WEST, /* Square */
+ [0x2] = BTN_SOUTH, /* Cross */
+ [0x3] = BTN_EAST, /* Circle */
+ [0x4] = BTN_NORTH, /* Triangle */
+ [0x5] = BTN_TL, /* L1 */
+ [0x6] = BTN_TR, /* R1 */
+ [0x7] = BTN_TL2, /* L2 */
+ [0x8] = BTN_TR2, /* R2 */
+ [0x9] = BTN_SELECT, /* Share */
+ [0xa] = BTN_START, /* Options */
+ [0xb] = BTN_THUMBL, /* L3 */
+ [0xc] = BTN_THUMBR, /* R3 */
+ [0xd] = BTN_MODE, /* PS */
+};
+
+
static enum power_supply_property sony_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CAPACITY,
@@ -1019,14 +1047,24 @@ struct motion_output_report_02 {
u8 rumble;
};
-#define DS4_REPORT_0x02_SIZE 37
-#define DS4_REPORT_0x05_SIZE 32
-#define DS4_REPORT_0x11_SIZE 78
-#define DS4_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0x02_SIZE 37
+#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_INPUT_REPORT_0x11_SIZE 78
+#define DS4_OUTPUT_REPORT_0x05_SIZE 32
+#define DS4_OUTPUT_REPORT_0x11_SIZE 78
#define SIXAXIS_REPORT_0xF2_SIZE 17
#define SIXAXIS_REPORT_0xF5_SIZE 8
#define MOTION_REPORT_0x02_SIZE 49
+/* Offsets relative to USB input report (0x1). Bluetooth (0x11) requires an
+ * additional +2.
+ */
+#define DS4_INPUT_REPORT_BUTTON_OFFSET 5
+#define DS4_INPUT_REPORT_BATTERY_OFFSET 30
+#define DS4_INPUT_REPORT_TOUCHPAD_OFFSET 33
+
+#define DS4_TOUCHPAD_SUFFIX " Touchpad"
+
static DEFINE_SPINLOCK(sony_dev_list_lock);
static LIST_HEAD(sony_device_list);
static DEFINE_IDA(sony_device_id_allocator);
@@ -1035,6 +1073,7 @@ struct sony_sc {
spinlock_t lock;
struct list_head list_node;
struct hid_device *hdev;
+ struct input_dev *touchpad;
struct led_classdev *leds[MAX_LEDS];
unsigned long quirks;
struct work_struct state_worker;
@@ -1130,6 +1169,37 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
+static int ds4_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
+ unsigned int key = usage->hid & HID_USAGE;
+
+ if (key >= ARRAY_SIZE(ds4_keymap))
+ return -1;
+
+ key = ds4_keymap[key];
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
+ return 1;
+ } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK) {
+ unsigned int abs = usage->hid & HID_USAGE;
+
+ /* Let the HID parser deal with the HAT. */
+ if (usage->hid == HID_GD_HATSWITCH)
+ return 0;
+
+ if (abs >= ARRAY_SIZE(ds4_absmap))
+ return -1;
+
+ abs = ds4_absmap[abs];
+ hid_map_usage_clear(hi, usage, bit, max, EV_ABS, abs);
+ return 1;
+ }
+
+ return 0;
+}
+
static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
@@ -1219,23 +1289,22 @@ static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
unsigned long flags;
- int n, offset;
+ int n, m, offset, num_touch_data, max_touch_data;
u8 cable_state, battery_capacity, battery_charging;
- /*
- * Battery and touchpad data starts at byte 30 in the USB report and
- * 32 in Bluetooth report.
- */
- offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 30 : 32;
+ /* When using Bluetooth the header is 2 bytes longer, so skip these. */
+ int data_offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 0 : 2;
+
+ /* Second bit of third button byte is for the touchpad button. */
+ offset = data_offset + DS4_INPUT_REPORT_BUTTON_OFFSET;
+ input_report_key(sc->touchpad, BTN_LEFT, rd[offset+2] & 0x2);
/*
- * The lower 4 bits of byte 30 contain the battery level
+ * The lower 4 bits of byte 30 (or 32 for BT) contain the battery level
* and the 5th bit contains the USB cable state.
*/
+ offset = data_offset + DS4_INPUT_REPORT_BATTERY_OFFSET;
cable_state = (rd[offset] >> 4) & 0x01;
battery_capacity = rd[offset] & 0x0F;
@@ -1262,30 +1331,52 @@ static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
sc->battery_charging = battery_charging;
spin_unlock_irqrestore(&sc->lock, flags);
- offset += 5;
-
/*
- * The Dualshock 4 multi-touch trackpad data starts at offset 35 on USB
- * and 37 on Bluetooth.
- * The first 7 bits of the first byte is a counter and bit 8 is a touch
- * indicator that is 0 when pressed and 1 when not pressed.
- * The next 3 bytes are two 12 bit touch coordinates, X and Y.
- * The data for the second touch is in the same format and immediatly
- * follows the data for the first.
+ * The Dualshock 4 multi-touch trackpad data starts at offset 33 on USB
+ * and 35 on Bluetooth.
+ * The first byte indicates the number of touch data in the report.
+ * Trackpad data starts 2 bytes later (e.g. 35 for USB).
*/
- for (n = 0; n < 2; n++) {
- u16 x, y;
+ offset = data_offset + DS4_INPUT_REPORT_TOUCHPAD_OFFSET;
+ max_touch_data = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 3 : 4;
+ if (rd[offset] > 0 && rd[offset] <= max_touch_data)
+ num_touch_data = rd[offset];
+ else
+ num_touch_data = 1;
+ offset += 1;
+
+ for (m = 0; m < num_touch_data; m++) {
+ /* Skip past timestamp */
+ offset += 1;
+
+ /*
+ * The first 7 bits of the first byte is a counter and bit 8 is
+ * a touch indicator that is 0 when pressed and 1 when not
+ * pressed.
+ * The next 3 bytes are two 12 bit touch coordinates, X and Y.
+ * The data for the second touch is in the same format and
+ * immediately follows the data for the first.
+ */
+ for (n = 0; n < 2; n++) {
+ u16 x, y;
+ bool active;
- x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
- y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
+ x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
+ y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
- input_mt_slot(input_dev, n);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
- !(rd[offset] >> 7));
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ active = !(rd[offset] >> 7);
+ input_mt_slot(sc->touchpad, n);
+ input_mt_report_slot_state(sc->touchpad, MT_TOOL_FINGER, active);
- offset += 4;
+ if (active) {
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_X, x);
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_Y, y);
+ }
+
+ offset += 4;
+ }
+ input_mt_sync_frame(sc->touchpad);
+ input_sync(sc->touchpad);
}
}
@@ -1324,6 +1415,21 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
} else if (((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && rd[0] == 0x01 &&
size == 64) || ((sc->quirks & DUALSHOCK4_CONTROLLER_BT)
&& rd[0] == 0x11 && size == 78)) {
+ if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
+ /* CRC check */
+ u8 bthdr = 0xA1;
+ u32 crc;
+ u32 report_crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, rd, DS4_INPUT_REPORT_0x11_SIZE-4);
+ report_crc = get_unaligned_le32(&rd[DS4_INPUT_REPORT_0x11_SIZE-4]);
+ if (crc != report_crc) {
+ hid_dbg(sc->hdev, "DualShock 4 input report's CRC check failed, received crc 0x%0x != 0x%0x\n",
+ report_crc, crc);
+ return -EILSEQ;
+ }
+ }
dualshock4_parse_report(sc, rd, size);
}
@@ -1367,47 +1473,84 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & PS3REMOTE)
return ps3remote_mapping(hdev, hi, field, usage, bit, max);
+
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ return ds4_mapping(hdev, hi, field, usage, bit, max);
+
/* Let hid-core decide for the others */
return 0;
}
-static int sony_register_touchpad(struct hid_input *hi, int touch_count,
+static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
int w, int h)
{
- struct input_dev *input_dev = hi->input;
+ size_t name_sz;
+ char *name;
int ret;
- ret = input_mt_init_slots(input_dev, touch_count, 0);
+ sc->touchpad = input_allocate_device();
+ if (!sc->touchpad)
+ return -ENOMEM;
+
+ input_set_drvdata(sc->touchpad, sc);
+ sc->touchpad->dev.parent = &sc->hdev->dev;
+ sc->touchpad->phys = sc->hdev->phys;
+ sc->touchpad->uniq = sc->hdev->uniq;
+ sc->touchpad->id.bustype = sc->hdev->bus;
+ sc->touchpad->id.vendor = sc->hdev->vendor;
+ sc->touchpad->id.product = sc->hdev->product;
+ sc->touchpad->id.version = sc->hdev->version;
+
+ /* Append a suffix to the controller name as there are various
+ * DS4 compatible non-Sony devices with different names.
+ */
+ name_sz = strlen(sc->hdev->name) + sizeof(DS4_TOUCHPAD_SUFFIX);
+ name = kzalloc(name_sz, GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ snprintf(name, name_sz, "%s" DS4_TOUCHPAD_SUFFIX, sc->hdev->name);
+ sc->touchpad->name = name;
+
+ ret = input_mt_init_slots(sc->touchpad, touch_count, 0);
if (ret < 0)
- return ret;
+ goto err;
+
+ /* We map the button underneath the touchpad to BTN_LEFT. */
+ __set_bit(EV_KEY, sc->touchpad->evbit);
+ __set_bit(BTN_LEFT, sc->touchpad->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, sc->touchpad->propbit);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_X, 0, w, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_Y, 0, h, 0, 0);
+
+ ret = input_register_device(sc->touchpad);
+ if (ret < 0)
+ goto err;
return 0;
+
+err:
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
+
+ input_free_device(sc->touchpad);
+ sc->touchpad = NULL;
+
+ return ret;
}
-static int sony_input_configured(struct hid_device *hdev,
- struct hid_input *hidinput)
+static void sony_unregister_touchpad(struct sony_sc *sc)
{
- struct sony_sc *sc = hid_get_drvdata(hdev);
- int ret;
+ if (!sc->touchpad)
+ return;
- /*
- * The Dualshock 4 touchpad supports 2 touches and has a
- * resolution of 1920x942 (44.86 dots/mm).
- */
- if (sc->quirks & DUALSHOCK4_CONTROLLER) {
- ret = sony_register_touchpad(hidinput, 2, 1920, 942);
- if (ret) {
- hid_err(sc->hdev,
- "Unable to initialize multi-touch slots: %d\n",
- ret);
- return ret;
- }
- }
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
- return 0;
+ input_unregister_device(sc->touchpad);
+ sc->touchpad = NULL;
}
/*
@@ -1483,11 +1626,11 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
u8 *buf;
int ret;
- buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x02_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE,
+ ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_FEATURE_REPORT_0x02_SIZE,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
kfree(buf);
@@ -1892,14 +2035,14 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
* 0xD0 - 66hz
*/
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- memset(buf, 0, DS4_REPORT_0x05_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x05_SIZE);
buf[0] = 0x05;
buf[1] = 0xFF;
offset = 4;
} else {
- memset(buf, 0, DS4_REPORT_0x11_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x11_SIZE);
buf[0] = 0x11;
- buf[1] = 0x80;
+ buf[1] = 0xC0; /* HID + CRC */
buf[3] = 0x0F;
offset = 6;
}
@@ -1925,10 +2068,17 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
buf[offset++] = sc->led_delay_off[3];
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE);
- else
- hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE,
- HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x05_SIZE);
+ else {
+ /* CRC generation */
+ u8 bthdr = 0xA2;
+ u32 crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, buf, DS4_OUTPUT_REPORT_0x11_SIZE-4);
+ put_unaligned_le32(crc, &buf[74]);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x11_SIZE);
+ }
}
static void motion_send_output_report(struct sony_sc *sc)
@@ -1972,10 +2122,10 @@ static int sony_allocate_output_report(struct sony_sc *sc)
kmalloc(sizeof(union sixaxis_output_report_01),
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x11_SIZE,
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x05_SIZE,
GFP_KERNEL);
else if (sc->quirks & MOTION_CONTROLLER)
sc->output_report_dmabuf = kmalloc(MOTION_REPORT_0x02_SIZE,
@@ -2220,7 +2370,7 @@ static int sony_check_add(struct sony_sc *sc)
return 0;
}
} else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x81_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -2230,10 +2380,10 @@ static int sony_check_add(struct sony_sc *sc)
* offset 1.
*/
ret = hid_hw_raw_request(sc->hdev, 0x81, buf,
- DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
+ DS4_FEATURE_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
- if (ret != DS4_REPORT_0x81_SIZE) {
+ if (ret != DS4_FEATURE_REPORT_0x81_SIZE) {
hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
ret = ret < 0 ? ret : -EINVAL;
goto out_free;
@@ -2329,45 +2479,12 @@ static inline void sony_cancel_work_sync(struct sony_sc *sc)
cancel_work_sync(&sc->state_worker);
}
-static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+static int sony_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
{
- int ret;
+ struct sony_sc *sc = hid_get_drvdata(hdev);
int append_dev_id;
- unsigned long quirks = id->driver_data;
- struct sony_sc *sc;
- unsigned int connect_mask = HID_CONNECT_DEFAULT;
-
- if (!strcmp(hdev->name, "FutureMax Dance Mat"))
- quirks |= FUTUREMAX_DANCE_MAT;
-
- sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
- if (sc == NULL) {
- hid_err(hdev, "can't alloc sony descriptor\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&sc->lock);
-
- sc->quirks = quirks;
- hid_set_drvdata(hdev, sc);
- sc->hdev = hdev;
-
- ret = hid_parse(hdev);
- if (ret) {
- hid_err(hdev, "parse failed\n");
- return ret;
- }
-
- if (sc->quirks & VAIO_RDESC_CONSTANT)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
- else if (sc->quirks & SIXAXIS_CONTROLLER)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
-
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- return ret;
- }
+ int ret;
ret = sony_set_device_id(sc);
if (ret < 0) {
@@ -2415,11 +2532,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sony_init_output_report(sc, sixaxis_send_output_report);
} else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
- /*
- * The DualShock 4 wants output reports sent on the ctrl
- * endpoint when connected via Bluetooth.
- */
- hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
ret = dualshock4_set_operational_bt(hdev);
if (ret < 0) {
hid_err(hdev, "failed to set the Dualshock 4 operational mode\n");
@@ -2427,6 +2539,18 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
+ /*
+ * The Dualshock 4 touchpad supports 2 touches and has a
+ * resolution of 1920x942 (44.86 dots/mm).
+ */
+ ret = sony_register_touchpad(sc, 2, 1920, 942);
+ if (ret) {
+ hid_err(sc->hdev,
+ "Unable to initialize multi-touch slots: %d\n",
+ ret);
+ return ret;
+ }
+
sony_init_output_report(sc, dualshock4_send_output_report);
} else if (sc->quirks & MOTION_CONTROLLER) {
sony_init_output_report(sc, motion_send_output_report);
@@ -2482,17 +2606,84 @@ err_stop:
return ret;
}
+static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ unsigned long quirks = id->driver_data;
+ struct sony_sc *sc;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+
+ if (!strcmp(hdev->name, "FutureMax Dance Mat"))
+ quirks |= FUTUREMAX_DANCE_MAT;
+
+ sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (sc == NULL) {
+ hid_err(hdev, "can't alloc sony descriptor\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&sc->lock);
+
+ sc->quirks = quirks;
+ hid_set_drvdata(hdev, sc);
+ sc->hdev = hdev;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (sc->quirks & VAIO_RDESC_CONSTANT)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+ else if (sc->quirks & SIXAXIS_CONTROLLER)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+
+ /* Patch the hw version on DS4 compatible devices, so applications can
+ * distinguish between the default HID mappings and the mappings defined
+ * by the Linux game controller spec. This is important for the SDL2
+ * library, which has a game controller database, which uses device ids
+ * in combination with version as a key.
+ */
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ hdev->version |= 0x8000;
+
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ /* sony_input_configured can fail, but this doesn't result
+ * in hid_hw_start failures (intended). Check whether
+ * the HID layer claimed the device else fail.
+ * We don't know the actual reason for the failure, most
+ * likely it is due to EEXIST in case of double connection
+ * of USB and Bluetooth, but could have been due to ENOMEM
+ * or other reasons as well.
+ */
+ if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
+ hid_err(hdev, "failed to claim input\n");
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
static void sony_remove(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
+ hid_hw_close(hdev);
+
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(sc);
- if (sc->quirks & SONY_BATTERY_SUPPORT) {
- hid_hw_close(hdev);
+ if (sc->quirks & SONY_BATTERY_SUPPORT)
sony_battery_remove(sc);
- }
+
+ if (sc->touchpad)
+ sony_unregister_touchpad(sc);
sony_cancel_work_sync(sc);
@@ -2596,6 +2787,12 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
/* Nyko Core Controller for PS3 */
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hid/hid-udraw-ps3.c b/drivers/hid/hid-udraw-ps3.c
new file mode 100644
index 000000000000..88ea390c10ad
--- /dev/null
+++ b/drivers/hid/hid-udraw-ps3.c
@@ -0,0 +1,474 @@
+/*
+ * HID driver for THQ PS3 uDraw tablet
+ *
+ * Copyright (C) 2016 Red Hat Inc. All Rights Reserved
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("PS3 uDraw tablet driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Protocol information from:
+ * http://brandonw.net/udraw/
+ * and the source code of:
+ * https://vvvv.org/contribution/udraw-hid
+ */
+
+/*
+ * The device is setup with multiple input devices:
+ * - the touch area which works as a touchpad
+ * - the tablet area which works as a touchpad/drawing tablet
+ * - a joypad with a d-pad, and 7 buttons
+ * - an accelerometer device
+ */
+
+enum {
+ TOUCH_NONE,
+ TOUCH_PEN,
+ TOUCH_FINGER,
+ TOUCH_TWOFINGER
+};
+
+enum {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z
+};
+
+/*
+ * Accelerometer min/max values
+ * in order, X, Y and Z
+ */
+static struct {
+ int min;
+ int max;
+} accel_limits[] = {
+ [AXIS_X] = { 490, 534 },
+ [AXIS_Y] = { 490, 534 },
+ [AXIS_Z] = { 492, 536 }
+};
+
+#define DEVICE_NAME "THQ uDraw Game Tablet for PS3"
+/* resolution in pixels */
+#define RES_X 1920
+#define RES_Y 1080
+/* size in mm */
+#define WIDTH 160
+#define HEIGHT 90
+#define PRESSURE_OFFSET 113
+#define MAX_PRESSURE (255 - PRESSURE_OFFSET)
+
+struct udraw {
+ struct input_dev *joy_input_dev;
+ struct input_dev *touch_input_dev;
+ struct input_dev *pen_input_dev;
+ struct input_dev *accel_input_dev;
+ struct hid_device *hdev;
+
+ /*
+ * The device's two-finger support is pretty unreliable, as
+ * the device could report a single touch when the two fingers
+ * are too close together, and the distance between fingers, even
+ * though reported is not in the same unit as the touches.
+ *
+ * We'll make do without it, and try to report the first touch
+ * as reliably as possible.
+ */
+ int last_one_finger_x;
+ int last_one_finger_y;
+ int last_two_finger_x;
+ int last_two_finger_y;
+};
+
+static int clamp_accel(int axis, int offset)
+{
+ axis = clamp(axis,
+ accel_limits[offset].min,
+ accel_limits[offset].max);
+ axis = (axis - accel_limits[offset].min) /
+ ((accel_limits[offset].max -
+ accel_limits[offset].min) * 0xFF);
+ return axis;
+}
+
+static int udraw_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int len)
+{
+ struct udraw *udraw = hid_get_drvdata(hdev);
+ int touch;
+ int x, y, z;
+
+ if (len != 27)
+ return 0;
+
+ if (data[11] == 0x00)
+ touch = TOUCH_NONE;
+ else if (data[11] == 0x40)
+ touch = TOUCH_PEN;
+ else if (data[11] == 0x80)
+ touch = TOUCH_FINGER;
+ else
+ touch = TOUCH_TWOFINGER;
+
+ /* joypad */
+ input_report_key(udraw->joy_input_dev, BTN_WEST, data[0] & 1);
+ input_report_key(udraw->joy_input_dev, BTN_SOUTH, !!(data[0] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_EAST, !!(data[0] & 4));
+ input_report_key(udraw->joy_input_dev, BTN_NORTH, !!(data[0] & 8));
+
+ input_report_key(udraw->joy_input_dev, BTN_SELECT, !!(data[1] & 1));
+ input_report_key(udraw->joy_input_dev, BTN_START, !!(data[1] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_MODE, !!(data[1] & 16));
+
+ x = y = 0;
+ switch (data[2]) {
+ case 0x0:
+ y = -127;
+ break;
+ case 0x1:
+ y = -127;
+ x = 127;
+ break;
+ case 0x2:
+ x = 127;
+ break;
+ case 0x3:
+ y = 127;
+ x = 127;
+ break;
+ case 0x4:
+ y = 127;
+ break;
+ case 0x5:
+ y = 127;
+ x = -127;
+ break;
+ case 0x6:
+ x = -127;
+ break;
+ case 0x7:
+ y = -127;
+ x = -127;
+ break;
+ default:
+ break;
+ }
+
+ input_report_abs(udraw->joy_input_dev, ABS_X, x);
+ input_report_abs(udraw->joy_input_dev, ABS_Y, y);
+
+ input_sync(udraw->joy_input_dev);
+
+ /* For pen and touchpad */
+ x = y = 0;
+ if (touch != TOUCH_NONE) {
+ if (data[15] != 0x0F)
+ x = data[15] * 256 + data[17];
+ if (data[16] != 0x0F)
+ y = data[16] * 256 + data[18];
+ }
+
+ if (touch == TOUCH_FINGER) {
+ /* Save the last one-finger touch */
+ udraw->last_one_finger_x = x;
+ udraw->last_one_finger_y = y;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+ } else if (touch == TOUCH_TWOFINGER) {
+ /*
+ * We have a problem because x/y is the one for the
+ * second finger but we want the first finger given
+ * to user-space otherwise it'll look as if it jumped.
+ *
+ * See the udraw struct definition for why this was
+ * implemented this way.
+ */
+ if (udraw->last_two_finger_x == -1) {
+ /* Save the position of the 2nd finger */
+ udraw->last_two_finger_x = x;
+ udraw->last_two_finger_y = y;
+
+ x = udraw->last_one_finger_x;
+ y = udraw->last_one_finger_y;
+ } else {
+ /*
+ * Offset the 2-finger coords using the
+ * saved data from the first finger
+ */
+ x = x - (udraw->last_two_finger_x
+ - udraw->last_one_finger_x);
+ y = y - (udraw->last_two_finger_y
+ - udraw->last_one_finger_y);
+ }
+ }
+
+ /* touchpad */
+ if (touch == TOUCH_FINGER || touch == TOUCH_TWOFINGER) {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 1);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER,
+ touch == TOUCH_FINGER);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP,
+ touch == TOUCH_TWOFINGER);
+
+ input_report_abs(udraw->touch_input_dev, ABS_X, x);
+ input_report_abs(udraw->touch_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP, 0);
+ }
+ input_sync(udraw->touch_input_dev);
+
+ /* pen */
+ if (touch == TOUCH_PEN) {
+ int level;
+
+ level = clamp(data[13] - PRESSURE_OFFSET,
+ 0, MAX_PRESSURE);
+
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, (level != 0));
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 1);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, level);
+ input_report_abs(udraw->pen_input_dev, ABS_X, x);
+ input_report_abs(udraw->pen_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 0);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, 0);
+ }
+ input_sync(udraw->pen_input_dev);
+
+ /* accel */
+ x = (data[19] + (data[20] << 8));
+ x = clamp_accel(x, AXIS_X);
+ y = (data[21] + (data[22] << 8));
+ y = clamp_accel(y, AXIS_Y);
+ z = (data[23] + (data[24] << 8));
+ z = clamp_accel(z, AXIS_Z);
+ input_report_abs(udraw->accel_input_dev, ABS_X, x);
+ input_report_abs(udraw->accel_input_dev, ABS_Y, y);
+ input_report_abs(udraw->accel_input_dev, ABS_Z, z);
+ input_sync(udraw->accel_input_dev);
+
+ /* let hidraw and hiddev handle the report */
+ return 0;
+}
+
+static int udraw_open(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ return hid_hw_open(udraw->hdev);
+}
+
+static void udraw_close(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ hid_hw_close(udraw->hdev);
+}
+
+static struct input_dev *allocate_and_setup(struct hid_device *hdev,
+ const char *name)
+{
+ struct input_dev *input_dev;
+
+ input_dev = devm_input_allocate_device(&hdev->dev);
+ if (!input_dev)
+ return NULL;
+
+ input_dev->name = name;
+ input_dev->phys = hdev->phys;
+ input_dev->dev.parent = &hdev->dev;
+ input_dev->open = udraw_open;
+ input_dev->close = udraw_close;
+ input_dev->uniq = hdev->uniq;
+ input_dev->id.bustype = hdev->bus;
+ input_dev->id.vendor = hdev->vendor;
+ input_dev->id.product = hdev->product;
+ input_dev->id.version = hdev->version;
+ input_set_drvdata(input_dev, hid_get_drvdata(hdev));
+
+ return input_dev;
+}
+
+static bool udraw_setup_touch(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Touchpad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->touch_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_pen(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Pen");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, MAX_PRESSURE, 0, 0);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_PEN, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->pen_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_accel(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Accelerometer");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS);
+
+ /* 1G accel is reported as ~256, so clamp to 2G */
+ input_set_abs_params(input_dev, ABS_X, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Z, -512, 512, 0, 0);
+
+ set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit);
+
+ udraw->accel_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_joypad(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Joypad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+
+ set_bit(BTN_SOUTH, input_dev->keybit);
+ set_bit(BTN_NORTH, input_dev->keybit);
+ set_bit(BTN_EAST, input_dev->keybit);
+ set_bit(BTN_WEST, input_dev->keybit);
+ set_bit(BTN_SELECT, input_dev->keybit);
+ set_bit(BTN_START, input_dev->keybit);
+ set_bit(BTN_MODE, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, -127, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -127, 127, 0, 0);
+
+ udraw->joy_input_dev = input_dev;
+
+ return true;
+}
+
+static int udraw_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct udraw *udraw;
+ int ret;
+
+ udraw = devm_kzalloc(&hdev->dev, sizeof(struct udraw), GFP_KERNEL);
+ if (!udraw)
+ return -ENOMEM;
+
+ udraw->hdev = hdev;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+
+ hid_set_drvdata(hdev, udraw);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (!udraw_setup_joypad(udraw, hdev) ||
+ !udraw_setup_touch(udraw, hdev) ||
+ !udraw_setup_pen(udraw, hdev) ||
+ !udraw_setup_accel(udraw, hdev)) {
+ hid_err(hdev, "could not allocate interfaces\n");
+ return -ENOMEM;
+ }
+
+ ret = input_register_device(udraw->joy_input_dev) ||
+ input_register_device(udraw->touch_input_dev) ||
+ input_register_device(udraw->pen_input_dev) ||
+ input_register_device(udraw->accel_input_dev);
+ if (ret) {
+ hid_err(hdev, "failed to register interfaces\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW | HID_CONNECT_DRIVER);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id udraw_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, udraw_devices);
+
+static struct hid_driver udraw_driver = {
+ .name = "hid-udraw",
+ .id_table = udraw_devices,
+ .raw_event = udraw_raw_event,
+ .probe = udraw_probe,
+};
+module_hid_driver(udraw_driver);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b3ec4f2de875..78fb32a7b103 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm.h>
@@ -37,10 +38,15 @@
#include <linux/mutex.h>
#include <linux/acpi.h>
#include <linux/of.h>
-#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
#include <linux/i2c/i2c-hid.h>
+#include "../hid-ids.h"
+
+/* quirks to control the device */
+#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
+
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
@@ -143,10 +149,9 @@ struct i2c_hid {
char *argsbuf; /* Command arguments buffer */
unsigned long flags; /* device flags */
+ unsigned long quirks; /* Various quirks */
wait_queue_head_t wait; /* For waiting the interrupt */
- struct gpio_desc *desc;
- int irq;
struct i2c_hid_platform_data pdata;
@@ -154,6 +159,39 @@ struct i2c_hid {
struct mutex reset_lock;
};
+static const struct i2c_hid_quirks {
+ __u16 idVendor;
+ __u16 idProduct;
+ __u32 quirks;
+} i2c_hid_quirks[] = {
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { 0, 0 }
+};
+
+/*
+ * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
+ * @idVendor: the 16-bit vendor ID
+ * @idProduct: the 16-bit product ID
+ *
+ * Returns: a u32 quirks value.
+ */
+static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
+{
+ u32 quirks = 0;
+ int n;
+
+ for (n = 0; i2c_hid_quirks[n].idVendor; n++)
+ if (i2c_hid_quirks[n].idVendor == idVendor &&
+ (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
+ i2c_hid_quirks[n].idProduct == idProduct))
+ quirks = i2c_hid_quirks[n].quirks;
+
+ return quirks;
+}
+
static int __i2c_hid_command(struct i2c_client *client,
const struct i2c_hid_cmd *command, u8 reportID,
u8 reportType, u8 *args, int args_len,
@@ -346,11 +384,27 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
i2c_hid_dbg(ihid, "%s\n", __func__);
+ /*
+ * Some devices require to send a command to wakeup before power on.
+ * The call will get a return value (EREMOTEIO) but device will be
+ * triggered and activated. After that, it goes like a normal device.
+ */
+ if (power_state == I2C_HID_PWR_ON &&
+ ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
+ ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
+
+ /* Device was already activated */
+ if (!ret)
+ goto set_pwr_exit;
+ }
+
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
+
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
+set_pwr_exit:
return ret;
}
@@ -716,9 +770,11 @@ static int i2c_hid_start(struct hid_device *hid)
i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
if (bufsize > ihid->bufsize) {
+ disable_irq(client->irq);
i2c_hid_free_buffers(ihid);
ret = i2c_hid_alloc_buffers(ihid, bufsize);
+ enable_irq(client->irq);
if (ret)
return ret;
@@ -806,18 +862,21 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
static int i2c_hid_init_irq(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
+ unsigned long irqflags = 0;
int ret;
- dev_dbg(&client->dev, "Requesting IRQ: %d\n", ihid->irq);
+ dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
- ret = request_threaded_irq(ihid->irq, NULL, i2c_hid_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- client->name, ihid);
+ if (!irq_get_trigger_type(client->irq))
+ irqflags = IRQF_TRIGGER_LOW;
+
+ ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
+ irqflags | IRQF_ONESHOT, client->name, ihid);
if (ret < 0) {
dev_warn(&client->dev,
"Could not register for %s interrupt, irq = %d,"
" ret = %d\n",
- client->name, ihid->irq, ret);
+ client->name, client->irq, ret);
return ret;
}
@@ -864,14 +923,6 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
}
#ifdef CONFIG_ACPI
-
-/* Default GPIO mapping */
-static const struct acpi_gpio_params i2c_hid_irq_gpio = { 0, 0, true };
-static const struct acpi_gpio_mapping i2c_hid_acpi_gpios[] = {
- { "gpios", &i2c_hid_irq_gpio, 1 },
- { },
-};
-
static int i2c_hid_acpi_pdata(struct i2c_client *client,
struct i2c_hid_platform_data *pdata)
{
@@ -882,7 +933,6 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
union acpi_object *obj;
struct acpi_device *adev;
acpi_handle handle;
- int ret;
handle = ACPI_HANDLE(&client->dev);
if (!handle || acpi_bus_get_device(handle, &adev))
@@ -898,9 +948,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
pdata->hid_descriptor_address = obj->integer.value;
ACPI_FREE(obj);
- /* GPIOs are optional */
- ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
- return ret < 0 && ret != -ENXIO ? ret : 0;
+ return 0;
}
static const struct acpi_device_id i2c_hid_acpi_match[] = {
@@ -964,6 +1012,19 @@ static int i2c_hid_probe(struct i2c_client *client,
dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
+ if (!client->irq) {
+ dev_err(&client->dev,
+ "HID over i2c has not been provided an Int IRQ\n");
+ return -EINVAL;
+ }
+
+ if (client->irq < 0) {
+ if (client->irq != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "HID over i2c doesn't have a valid IRQ\n");
+ return client->irq;
+ }
+
ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
if (!ihid)
return -ENOMEM;
@@ -983,23 +1044,6 @@ static int i2c_hid_probe(struct i2c_client *client,
ihid->pdata = *platform_data;
}
- if (client->irq > 0) {
- ihid->irq = client->irq;
- } else if (ACPI_COMPANION(&client->dev)) {
- ihid->desc = gpiod_get(&client->dev, NULL, GPIOD_IN);
- if (IS_ERR(ihid->desc)) {
- dev_err(&client->dev, "Failed to get GPIO interrupt\n");
- return PTR_ERR(ihid->desc);
- }
-
- ihid->irq = gpiod_to_irq(ihid->desc);
- if (ihid->irq < 0) {
- gpiod_put(ihid->desc);
- dev_err(&client->dev, "Failed to convert GPIO to IRQ\n");
- return ihid->irq;
- }
- }
-
i2c_set_clientdata(client, ihid);
ihid->client = client;
@@ -1050,6 +1094,8 @@ static int i2c_hid_probe(struct i2c_client *client,
client->name, hid->vendor, hid->product);
strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+ ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1064,16 +1110,13 @@ err_mem_free:
hid_destroy_device(hid);
err_irq:
- free_irq(ihid->irq, ihid);
+ free_irq(client->irq, ihid);
err_pm:
pm_runtime_put_noidle(&client->dev);
pm_runtime_disable(&client->dev);
err:
- if (ihid->desc)
- gpiod_put(ihid->desc);
-
i2c_hid_free_buffers(ihid);
kfree(ihid);
return ret;
@@ -1092,18 +1135,13 @@ static int i2c_hid_remove(struct i2c_client *client)
hid = ihid->hid;
hid_destroy_device(hid);
- free_irq(ihid->irq, ihid);
+ free_irq(client->irq, ihid);
if (ihid->bufsize)
i2c_hid_free_buffers(ihid);
- if (ihid->desc)
- gpiod_put(ihid->desc);
-
kfree(ihid);
- acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev));
-
return 0;
}
@@ -1142,11 +1180,11 @@ static int i2c_hid_suspend(struct device *dev)
/* Save some power */
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(ihid->irq);
+ disable_irq(client->irq);
}
if (device_may_wakeup(&client->dev)) {
- wake_status = enable_irq_wake(ihid->irq);
+ wake_status = enable_irq_wake(client->irq);
if (!wake_status)
ihid->irq_wake_enabled = true;
else
@@ -1166,7 +1204,7 @@ static int i2c_hid_resume(struct device *dev)
int wake_status;
if (device_may_wakeup(&client->dev) && ihid->irq_wake_enabled) {
- wake_status = disable_irq_wake(ihid->irq);
+ wake_status = disable_irq_wake(client->irq);
if (!wake_status)
ihid->irq_wake_enabled = false;
else
@@ -1179,7 +1217,7 @@ static int i2c_hid_resume(struct device *dev)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- enable_irq(ihid->irq);
+ enable_irq(client->irq);
ret = i2c_hid_hwreset(client);
if (ret)
return ret;
@@ -1197,19 +1235,17 @@ static int i2c_hid_resume(struct device *dev)
static int i2c_hid_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct i2c_hid *ihid = i2c_get_clientdata(client);
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(ihid->irq);
+ disable_irq(client->irq);
return 0;
}
static int i2c_hid_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct i2c_hid *ihid = i2c_get_clientdata(client);
- enable_irq(ihid->irq);
+ enable_irq(client->irq);
i2c_hid_set_power(client, I2C_HID_PWR_ON);
return 0;
}
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index e2517c11e0ee..842d8416a7a6 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -19,7 +19,6 @@
#include <linux/jiffies.h>
#include "client.h"
#include "hw-ish.h"
-#include "utils.h"
#include "hbm.h"
/* For FW reset flow */
@@ -310,6 +309,7 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
((uint32_t)tv_utc.tv_usec);
ts_format.ts1_source = HOST_SYSTEM_TIME_USEC;
ts_format.ts2_source = HOST_UTC_TIME_USEC;
+ ts_format.reserved = 0;
time_update.primary_host_time = usec_system;
time_update.secondary_host_time = usec_utc;
@@ -427,6 +427,59 @@ static int ipc_send_mng_msg(struct ishtp_device *dev, uint32_t msg_code,
sizeof(uint32_t) + size);
}
+#define WAIT_FOR_FW_RDY 0x1
+#define WAIT_FOR_INPUT_RDY 0x2
+
+/**
+ * timed_wait_for_timeout() - wait special event with timeout
+ * @dev: ISHTP device pointer
+ * @condition: indicate the condition for waiting
+ * @timeinc: time slice for every wait cycle, in ms
+ * @timeout: time in ms for timeout
+ *
+ * This function will check special event to be ready in a loop, the loop
+ * period is specificd in timeinc. Wait timeout will causes failure.
+ *
+ * Return: 0 for success else failure code
+ */
+static int timed_wait_for_timeout(struct ishtp_device *dev, int condition,
+ unsigned int timeinc, unsigned int timeout)
+{
+ bool complete = false;
+ int ret;
+
+ do {
+ if (condition == WAIT_FOR_FW_RDY) {
+ complete = ishtp_fw_is_ready(dev);
+ } else if (condition == WAIT_FOR_INPUT_RDY) {
+ complete = ish_is_input_ready(dev);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!complete) {
+ unsigned long left_time;
+
+ left_time = msleep_interruptible(timeinc);
+ timeout -= (timeinc - left_time);
+ }
+ } while (!complete && timeout > 0);
+
+ if (complete)
+ ret = 0;
+ else
+ ret = -EBUSY;
+
+out:
+ return ret;
+}
+
+#define TIME_SLICE_FOR_FW_RDY_MS 100
+#define TIME_SLICE_FOR_INPUT_RDY_MS 100
+#define TIMEOUT_FOR_FW_RDY_MS 2000
+#define TIMEOUT_FOR_INPUT_RDY_MS 2000
+
/**
* ish_fw_reset_handler() - FW reset handler
* @dev: ishtp device pointer
@@ -456,8 +509,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
ishtp_reset_handler(dev);
if (!ish_is_input_ready(dev))
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE,
- ish_is_input_ready(dev), (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+ TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
/* ISH FW is dead */
if (!ish_is_input_ready(dev))
@@ -472,8 +525,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
sizeof(uint32_t));
/* Wait for ISH FW'es ILUP and ISHTP_READY */
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE, ishtp_fw_is_ready(dev),
- (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+ TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
if (!ishtp_fw_is_ready(dev)) {
/* ISH FW is dead */
uint32_t ish_status;
@@ -487,6 +540,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
return 0;
}
+#define TIMEOUT_FOR_HW_RDY_MS 300
+
/**
* ish_fw_reset_work_fn() - FW reset worker function
* @unused: not used
@@ -500,7 +555,7 @@ static void fw_reset_work_fn(struct work_struct *unused)
rv = ish_fw_reset_handler(ishtp_dev);
if (!rv) {
/* ISH is ILUP & ISHTP-ready. Restart ISHTP */
- schedule_timeout(HZ / 3);
+ msleep_interruptible(TIMEOUT_FOR_HW_RDY_MS);
ishtp_dev->recvd_hw_ready = 1;
wake_up_interruptible(&ishtp_dev->wait_hw_ready);
@@ -638,6 +693,58 @@ eoi:
}
/**
+ * ish_disable_dma() - disable dma communication between host and ISHFW
+ * @dev: ishtp device pointer
+ *
+ * Clear the dma enable bit and wait for dma inactive.
+ *
+ * Return: 0 for success else error code.
+ */
+static int ish_disable_dma(struct ishtp_device *dev)
+{
+ unsigned int dma_delay;
+
+ /* Clear the dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
+
+ /* wait for dma inactive */
+ for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
+ _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
+ dma_delay += 5)
+ mdelay(5);
+
+ if (dma_delay >= MAX_DMA_DELAY) {
+ dev_err(dev->devc,
+ "Wait for DMA inactive timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_wakeup() - wakeup ishfw from waiting-for-host state
+ * @dev: ishtp device pointer
+ *
+ * Set the dma enable bit and send a void message to FW,
+ * it wil wakeup FW from waiting-for-host state.
+ */
+static void ish_wakeup(struct ishtp_device *dev)
+{
+ /* Set dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
+
+ /*
+ * Send 0 IPC message so that ISH FW wakes up if it was already
+ * asleep.
+ */
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
+
+ /* Flush writes to doorbell and REMAP2 */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+}
+
+/**
* _ish_hw_reset() - HW reset
* @dev: ishtp device pointer
*
@@ -649,7 +756,6 @@ static int _ish_hw_reset(struct ishtp_device *dev)
{
struct pci_dev *pdev = dev->pdev;
int rv;
- unsigned int dma_delay;
uint16_t csr;
if (!pdev)
@@ -664,15 +770,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
return -EINVAL;
}
- /* Now trigger reset to FW */
- ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
-
- for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
- _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
- dma_delay += 5)
- mdelay(5);
-
- if (dma_delay >= MAX_DMA_DELAY) {
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
dev_err(&pdev->dev,
"Can't reset - stuck with DMA in-progress\n");
return -EBUSY;
@@ -690,16 +789,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
csr |= PCI_D0;
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
- ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
-
- /*
- * Send 0 IPC message so that ISH FW wakes up if it was already
- * asleep
- */
- ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
-
- /* Flush writes to doorbell and REMAP2 */
- ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+ /* Now we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
return 0;
}
@@ -758,16 +849,9 @@ static int _ish_ipc_reset(struct ishtp_device *dev)
int ish_hw_start(struct ishtp_device *dev)
{
ish_set_host_rdy(dev);
- /* After that we can enable ISH DMA operation */
- ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
- /*
- * Send 0 IPC message so that ISH FW wakes up if it was already
- * asleep
- */
- ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
- /* Flush write to doorbell */
- ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+ /* After that we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
set_host_ready(dev);
@@ -876,6 +960,21 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
*/
void ish_device_disable(struct ishtp_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (!pdev)
+ return;
+
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
+ dev_err(&pdev->dev,
+ "Can't reset - stuck with DMA in-progress\n");
+ return;
+ }
+
+ /* Put ISH to D3hot state for power saving */
+ pci_set_power_state(pdev, PCI_D3hot);
+
dev->dev_state = ISHTP_DEV_DISABLED;
ish_clr_host_rdy(dev);
}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 42f0beeb09fd..20d647d2dd2c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -146,7 +146,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
/* request and enable interrupt */
- ret = request_irq(pdev->irq, ish_irq_handler, IRQF_NO_SUSPEND,
+ ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED,
KBUILD_MODNAME, dev);
if (ret) {
dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
@@ -202,6 +202,7 @@ static void ish_remove(struct pci_dev *pdev)
kfree(ishtp_dev);
}
+#ifdef CONFIG_PM
static struct device *ish_resume_device;
/**
@@ -293,7 +294,6 @@ static int ish_resume(struct device *device)
return 0;
}
-#ifdef CONFIG_PM
static const struct dev_pm_ops ish_pm_ops = {
.suspend = ish_suspend,
.resume = ish_resume,
@@ -301,7 +301,7 @@ static const struct dev_pm_ops ish_pm_ops = {
#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
#else
#define ISHTP_ISH_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM */
static struct pci_driver ish_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/hid/intel-ish-hid/ipc/utils.h b/drivers/hid/intel-ish-hid/ipc/utils.h
deleted file mode 100644
index 5a82123dc7b4..000000000000
--- a/drivers/hid/intel-ish-hid/ipc/utils.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Utility macros of ISH
- *
- * Copyright (c) 2014-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-#ifndef UTILS__H
-#define UTILS__H
-
-#define WAIT_FOR_SEND_SLICE (HZ / 10)
-#define WAIT_FOR_CONNECT_SLICE (HZ / 10)
-
-/*
- * Waits for specified event when a thread that triggers event can't signal
- * Also, waits *at_least* `timeinc` after condition is satisfied
- */
-#define timed_wait_for(timeinc, condition) \
- do { \
- int completed = 0; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- completed = (condition); \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- } while (!(completed)); \
- } while (0)
-
-
-/*
- * Waits for specified event when a thread that triggers event
- * can't signal with timeout (use whenever we may hang)
- */
-#define timed_wait_for_timeout(timeinc, condition, timeout) \
- do { \
- int t = timeout; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- t -= timeinc; \
- if (t <= 0) \
- break; \
- } while (!(condition)); \
- } while (0)
-
-#endif /* UTILS__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 256521509d20..f4cbc744e657 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -585,14 +585,7 @@ int ishtp_bus_new_client(struct ishtp_device *dev)
*/
i = dev->fw_client_presentation_num - 1;
device_uuid = dev->fw_clients[i].props.protocol_name;
- dev_name = kasprintf(GFP_KERNEL,
- "{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}",
- device_uuid.b[3], device_uuid.b[2], device_uuid.b[1],
- device_uuid.b[0], device_uuid.b[5], device_uuid.b[4],
- device_uuid.b[7], device_uuid.b[6], device_uuid.b[8],
- device_uuid.b[9], device_uuid.b[10], device_uuid.b[11],
- device_uuid.b[12], device_uuid.b[13], device_uuid.b[14],
- device_uuid.b[15]);
+ dev_name = kasprintf(GFP_KERNEL, "{%pUL}", device_uuid.b);
if (!dev_name)
return -ENOMEM;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 74bffee60774..59460b66e689 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -378,11 +378,10 @@ static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
list_for_each_entry(cl, &dev->cl_list, link) {
if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
cl->state = ISHTP_CL_DISCONNECTED;
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
@@ -431,11 +430,10 @@ static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
cl->state = ISHTP_CL_DISCONNECTED;
cl->status = -ENODEV;
}
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ae83af649a60..333108ef18cf 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1459,7 +1459,7 @@ static int hid_post_reset(struct usb_interface *intf)
rdesc = kmalloc(hid->dev_rsize, GFP_KERNEL);
if (!rdesc) {
dbg_hid("couldn't allocate rdesc memory (post_reset)\n");
- return 1;
+ return -ENOMEM;
}
status = hid_get_class_descriptor(dev,
interface->desc.bInterfaceNumber,
@@ -1467,13 +1467,13 @@ static int hid_post_reset(struct usb_interface *intf)
if (status < 0) {
dbg_hid("reading report descriptor failed (post_reset)\n");
kfree(rdesc);
- return 1;
+ return status;
}
status = memcmp(rdesc, hid->dev_rdesc, hid->dev_rsize);
kfree(rdesc);
if (status != 0) {
dbg_hid("report descriptor changed\n");
- return 1;
+ return -EPERM;
}
/* No need to do another reset or clear a halted endpoint */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 354d49ea36dd..b3e01c82af05 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -63,6 +63,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
@@ -81,6 +82,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
@@ -100,8 +103,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index b4800ea891cb..d303e413306d 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -210,7 +210,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
void wacom_wac_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage);
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value);
void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
void wacom_battery_work(struct work_struct *work);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5e7a5648e708..b9779bcbd140 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -122,6 +122,7 @@ static void wacom_feature_mapping(struct hid_device *hdev,
struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
u8 *data;
int ret;
+ int n;
switch (usage->hid) {
case HID_DG_CONTACTMAX:
@@ -159,22 +160,48 @@ static void wacom_feature_mapping(struct hid_device *hdev,
case HID_UP_DIGITIZER:
if (field->report->id == 0x0B &&
- (field->application == WACOM_G9_DIGITIZER ||
- field->application == WACOM_G11_DIGITIZER)) {
+ (field->application == WACOM_HID_G9_PEN ||
+ field->application == WACOM_HID_G11_PEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
- case WACOM_G9_PAGE:
- case WACOM_G11_PAGE:
+ case WACOM_HID_WD_DATAMODE:
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 2;
+ break;
+
+ case WACOM_HID_UP_G9:
+ case WACOM_HID_UP_G11:
if (field->report->id == 0x03 &&
- (field->application == WACOM_G9_TOUCHSCREEN ||
- field->application == WACOM_G11_TOUCHSCREEN)) {
+ (field->application == WACOM_HID_G9_TOUCHSCREEN ||
+ field->application == WACOM_HID_G11_TOUCHSCREEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
+ case WACOM_HID_WD_OFFSETLEFT:
+ case WACOM_HID_WD_OFFSETTOP:
+ case WACOM_HID_WD_OFFSETRIGHT:
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ /* read manually */
+ n = hid_report_len(field->report);
+ data = hid_alloc_report_buf(field->report, GFP_KERNEL);
+ if (!data)
+ break;
+ data[0] = field->report->id;
+ ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
+ data, n, WAC_CMD_RETRIES);
+ if (ret == n) {
+ ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT,
+ data, n, 0);
+ } else {
+ hid_warn(hdev, "%s: could not retrieve sensor offsets\n",
+ __func__);
+ }
+ kfree(data);
+ break;
}
}
@@ -240,6 +267,30 @@ static void wacom_usage_mapping(struct hid_device *hdev,
features->touch_max = 1;
}
+ /*
+ * ISDv4 devices which predate HID's adoption of the
+ * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
+ * position instead. We can accurately detect if a
+ * usage with that value should be HID_DG_BARRELSWITCH2
+ * based on the surrounding usages, which have remained
+ * constant across generations.
+ */
+ if (features->type == HID_GENERIC &&
+ usage->hid == 0x000D0000 &&
+ field->application == HID_DG_PEN &&
+ field->physical == HID_DG_STYLUS) {
+ int i = usage->usage_index;
+
+ if (i-4 >= 0 && i+1 < field->maxusage &&
+ field->usage[i-4].hid == HID_DG_TIPSWITCH &&
+ field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
+ field->usage[i-2].hid == HID_DG_ERASER &&
+ field->usage[i-1].hid == HID_DG_INVERT &&
+ field->usage[i+1].hid == HID_DG_INRANGE) {
+ usage->hid = HID_DG_BARRELSWITCH2;
+ }
+ }
+
switch (usage->hid) {
case HID_GD_X:
features->x_max = field->logical_maximum;
@@ -689,11 +740,6 @@ static int wacom_add_shared_data(struct hid_device *hdev)
return retval;
}
- if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
- wacom_wac->shared->touch = hdev;
- else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
- wacom_wac->shared->pen = hdev;
-
out:
mutex_unlock(&wacom_udev_list_lock);
return retval;
@@ -1916,6 +1962,19 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
/* shift everything including the terminator */
memmove(gap, gap+1, strlen(gap));
}
+
+ /* strip off excessive prefixing */
+ if (strstr(name, "Wacom Co.,Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co.,Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+ if (strstr(name, "Wacom Co., Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co., Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+
/* get rid of trailing whitespace */
if (name[strlen(name)-1] == ' ')
name[strlen(name)-1] = '\0';
@@ -1977,6 +2036,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
+ error = wacom_add_shared_data(hdev);
+ if (error)
+ goto fail;
+
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
* into debug mode for the touch part.
@@ -2017,9 +2080,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
wacom_update_name(wacom, wireless ? " (WL)" : "");
- error = wacom_add_shared_data(hdev);
- if (error)
- goto fail;
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+ wacom_wac->shared->touch = hdev;
+ else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+ wacom_wac->shared->pen = hdev;
if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1cb79925730d..b1a9a3ca6d56 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -41,6 +41,8 @@ MODULE_PARM_DESC(touch_arbitration, " on (Y) off (N)");
static void wacom_report_numbered_buttons(struct input_dev *input_dev,
int button_count, int mask);
+static int wacom_numbered_button_to_key(int n);
+
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@@ -588,6 +590,11 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
return 1;
}
+static int wacom_intuos_id_mangle(int tool_id)
+{
+ return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF);
+}
+
static int wacom_intuos_get_tool_type(int tool_id)
{
int tool_type;
@@ -595,7 +602,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
switch (tool_id) {
case 0x812: /* Inking pen */
case 0x801: /* Intuos3 Inking pen */
- case 0x120802: /* Intuos4/5 Inking Pen */
+ case 0x12802: /* Intuos4/5 Inking Pen */
case 0x012:
tool_type = BTN_TOOL_PENCIL;
break;
@@ -610,11 +617,11 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
- case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
- case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
- case 0x160802: /* Cintiq 13HD Pro Pen */
- case 0x180802: /* DTH2242 Pen */
- case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x16802: /* Cintiq 13HD Pro Pen */
+ case 0x18802: /* DTH2242 Pen */
+ case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
tool_type = BTN_TOOL_PEN;
break;
@@ -638,6 +645,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
break;
case 0x82a: /* Eraser */
+ case 0x84a:
case 0x85a:
case 0x91a:
case 0xd1a:
@@ -648,12 +656,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
- case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
- case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
- case 0x18080a: /* DTH2242 Eraser */
- case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ case 0x1480a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+ case 0x1090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x1080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+ case 0x1680a: /* Cintiq 13HD Pro Pen Eraser */
+ case 0x1880a: /* DTH2242 Eraser */
+ case 0x1080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
tool_type = BTN_TOOL_RUBBER;
break;
@@ -662,7 +670,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x112:
case 0x913: /* Intuos3 Airbrush */
case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
- case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
+ case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
tool_type = BTN_TOOL_AIRBRUSH;
break;
@@ -693,7 +701,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
(data[6] << 4) + (data[7] >> 4);
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
- ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
+ ((data[7] & 0x0f) << 16) | ((data[8] & 0xf0) << 8);
wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
@@ -923,7 +931,7 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
* don't report events for invalid data
*/
/* older I4 styli don't work with new Cintiqs */
- if ((!((wacom->id[idx] >> 20) & 0x01) &&
+ if ((!((wacom->id[idx] >> 16) & 0x01) &&
(features->type == WACOM_21UX2)) ||
/* Only large Intuos support Lense Cursor */
(wacom->tool[idx] == BTN_TOOL_LENS &&
@@ -1059,7 +1067,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
break;
}
- input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+ input_report_abs(input, ABS_MISC,
+ wacom_intuos_id_mangle(wacom->id[idx])); /* report tool id */
input_report_key(input, wacom->tool[idx], 1);
input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->reporting_data = true;
@@ -1435,11 +1444,59 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
+static int wacom_equivalent_usage(int usage)
+{
+ if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
+ int subpage = (usage & 0xFF00) << 8;
+ int subusage = (usage & 0xFF);
+
+ if (subpage == WACOM_HID_SP_PAD ||
+ subpage == WACOM_HID_SP_BUTTON ||
+ subpage == WACOM_HID_SP_DIGITIZER ||
+ subpage == WACOM_HID_SP_DIGITIZERINFO ||
+ usage == WACOM_HID_WD_SENSE ||
+ usage == WACOM_HID_WD_SERIALHI ||
+ usage == WACOM_HID_WD_TOOLTYPE ||
+ usage == WACOM_HID_WD_DISTANCE ||
+ usage == WACOM_HID_WD_TOUCHSTRIP ||
+ usage == WACOM_HID_WD_TOUCHSTRIP2 ||
+ usage == WACOM_HID_WD_TOUCHRING ||
+ usage == WACOM_HID_WD_TOUCHRINGSTATUS) {
+ return usage;
+ }
+
+ if (subpage == HID_UP_UNDEFINED)
+ subpage = HID_UP_DIGITIZER;
+
+ return subpage | subusage;
+ }
+
+ return usage;
+}
+
static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
struct hid_field *field, __u8 type, __u16 code, int fuzz)
{
+ struct wacom *wacom = input_get_drvdata(input);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
int fmin = field->logical_minimum;
int fmax = field->logical_maximum;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ int resolution_code = code;
+
+ if (equivalent_usage == HID_DG_TWIST) {
+ resolution_code = ABS_RZ;
+ }
+
+ if (equivalent_usage == HID_GD_X) {
+ fmin += features->offset_left;
+ fmax -= features->offset_right;
+ }
+ if (equivalent_usage == HID_GD_Y) {
+ fmin += features->offset_top;
+ fmax -= features->offset_bottom;
+ }
usage->type = type;
usage->code = code;
@@ -1450,7 +1507,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_ABS:
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
input_abs_set_res(input, code,
- hidinput_calc_abs_res(field, code));
+ hidinput_calc_abs_res(field, resolution_code));
break;
case EV_KEY:
input_set_capability(input, EV_KEY, code);
@@ -1458,6 +1515,172 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_MSC:
input_set_capability(input, EV_MSC, code);
break;
+ case EV_SW:
+ input_set_capability(input, EV_SW, code);
+ break;
+ }
+}
+
+static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_X:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Y:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Z:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_BUTTONHOME:
+ case WACOM_HID_WD_BUTTONUP:
+ case WACOM_HID_WD_BUTTONDOWN:
+ case WACOM_HID_WD_BUTTONLEFT:
+ case WACOM_HID_WD_BUTTONRIGHT:
+ case WACOM_HID_WD_BUTTONCENTER:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHONOFF:
+ wacom_map_usage(input, usage, field, EV_SW, SW_MUTE_DEVICE, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP2:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RY, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHRING:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+
+ switch (equivalent_usage & 0xfffffff0) {
+ case WACOM_HID_WD_EXPRESSKEY00:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+}
+
+static void wacom_wac_pad_battery_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ wacom_wac->hid_data.bat_charging = value;
+ wacom_wac->hid_data.ps_connected = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+ }
+}
+
+static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->pad_input;
+ struct wacom_features *features = &wacom_wac->features;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ wacom_wac->hid_data.inrange_state |= value;
+ }
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_TOUCHRINGSTATUS:
+ break;
+
+ default:
+ features->input_event_flag = true;
+ input_event(input, usage->type, usage->code, value);
+ break;
+ }
+}
+
+static void wacom_wac_pad_pre_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ wacom_wac->hid_data.inrange_state = 0;
+}
+
+static void wacom_wac_pad_battery_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+
+ if (features->quirks & WACOM_QUIRK_BATTERY) {
+ int capacity = wacom_wac->hid_data.battery_capacity;
+ bool charging = wacom_wac->hid_data.bat_charging;
+ bool connected = wacom_wac->hid_data.bat_connected;
+ bool powered = wacom_wac->hid_data.ps_connected;
+
+ wacom_notify_battery(wacom_wac, capacity, charging,
+ connected, powered);
+ }
+}
+
+static void wacom_wac_pad_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ bool active = wacom_wac->hid_data.inrange_state != 0;
+
+ /* report prox for expresskey events */
+ if (wacom_equivalent_usage(report->field[0]->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ features->input_event_flag = true;
+ input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
+ }
+
+ if (features->input_event_flag) {
+ features->input_event_flag = false;
+ input_sync(input);
}
}
@@ -1466,25 +1689,43 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
break;
case HID_GD_Y:
wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4);
break;
+ case WACOM_HID_WD_DISTANCE:
+ case HID_GD_Z:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_DISTANCE, 0);
+ break;
case HID_DG_TIPPRESSURE:
wacom_map_usage(input, usage, field, EV_ABS, ABS_PRESSURE, 0);
break;
case HID_DG_INRANGE:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
break;
+ case HID_DG_BATTERYSTRENGTH:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
case HID_DG_INVERT:
wacom_map_usage(input, usage, field, EV_KEY,
BTN_TOOL_RUBBER, 0);
break;
+ case HID_DG_TILT_X:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_X, 0);
+ break;
+ case HID_DG_TILT_Y:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_Y, 0);
+ break;
+ case HID_DG_TWIST:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ break;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
@@ -1498,39 +1739,131 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
case HID_DG_TOOLSERIALNUMBER:
wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
break;
+ case WACOM_HID_WD_SENSE:
+ features->quirks |= WACOM_QUIRK_SENSE;
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
+ break;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
+ set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ break;
+ case WACOM_HID_WD_FINGERWHEEL:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ break;
}
}
-static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
+static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- /* checking which Tool / tip switch to send */
- switch (usage->hid) {
+ switch (equivalent_usage) {
+ case HID_GD_Z:
+ /*
+ * HID_GD_Z "should increase as the control's position is
+ * moved from high to low", while ABS_DISTANCE instead
+ * increases in value as the tool moves from low to high.
+ */
+ value = field->logical_maximum - value;
+ break;
case HID_DG_INRANGE:
wacom_wac->hid_data.inrange_state = value;
- return 0;
+ if (!(features->quirks & WACOM_QUIRK_SENSE))
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case HID_DG_BATTERYSTRENGTH:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
case HID_DG_INVERT:
wacom_wac->hid_data.invert_state = value;
- return 0;
+ return;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_wac->hid_data.tipswitch |= value;
- return 0;
+ return;
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+ wacom_wac->serial[0] |= value;
+ return;
+ case WACOM_HID_WD_SENSE:
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ /*
+ * Non-USI EMR devices may contain additional tool type
+ * information here. See WACOM_HID_WD_TOOLTYPE case for
+ * more details.
+ */
+ if (value >> 20 == 1) {
+ wacom_wac->id[0] |= value & 0xFFFFF;
+ }
+ return;
+ case WACOM_HID_WD_TOOLTYPE:
+ /*
+ * Some devices (MobileStudio Pro, and possibly later
+ * devices as well) do not return the complete tool
+ * type in their WACOM_HID_WD_TOOLTYPE usage. Use a
+ * bitwise OR so the complete value can be built
+ * up over time :(
+ */
+ wacom_wac->id[0] |= value;
+ return;
+ case WACOM_HID_WD_OFFSETLEFT:
+ if (features->offset_left && value != features->offset_left)
+ hid_warn(hdev, "%s: overriding exising left offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_left);
+ features->offset_left = value;
+ return;
+ case WACOM_HID_WD_OFFSETRIGHT:
+ if (features->offset_right && value != features->offset_right)
+ hid_warn(hdev, "%s: overriding exising right offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_right);
+ features->offset_right = value;
+ return;
+ case WACOM_HID_WD_OFFSETTOP:
+ if (features->offset_top && value != features->offset_top)
+ hid_warn(hdev, "%s: overriding exising top offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_top);
+ features->offset_top = value;
+ return;
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ if (features->offset_bottom && value != features->offset_bottom)
+ hid_warn(hdev, "%s: overriding exising bottom offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_bottom);
+ features->offset_bottom = value;
+ return;
}
/* send pen events only when touch is up or forced out
* or touch arbitration is off
*/
if (!usage->type || delay_pen_events(wacom_wac))
- return 0;
+ return;
- input_event(input, usage->type, usage->code, value);
+ /* send pen events only when the pen is in/entering/leaving proximity */
+ if (!wacom_wac->hid_data.inrange_state && !wacom_wac->tool[0])
+ return;
- return 0;
+ input_event(input, usage->type, usage->code, value);
}
static void wacom_wac_pen_pre_report(struct hid_device *hdev,
@@ -1546,24 +1879,53 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->pen_input;
bool prox = wacom_wac->hid_data.inrange_state;
+ bool range = wacom_wac->hid_data.sense_state;
- if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */
+ if (!wacom_wac->tool[0] && prox) { /* first in prox */
/* Going into proximity select tool */
- wacom_wac->tool[0] = wacom_wac->hid_data.invert_state ?
- BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom_wac->hid_data.invert_state)
+ wacom_wac->tool[0] = BTN_TOOL_RUBBER;
+ else if (wacom_wac->id[0])
+ wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]);
+ else
+ wacom_wac->tool[0] = BTN_TOOL_PEN;
+ }
/* keep pen state for touch events */
- wacom_wac->shared->stylus_in_proximity = prox;
+ wacom_wac->shared->stylus_in_proximity = range;
- if (!delay_pen_events(wacom_wac)) {
+ if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) {
+ int id = wacom_wac->id[0];
+
+ /*
+ * Non-USI EMR tools should have their IDs mangled to
+ * match the legacy behavior of wacom_intuos_general
+ */
+ if (wacom_wac->serial[0] >> 52 == 1)
+ id = wacom_intuos_id_mangle(id);
+
+ /*
+ * To ensure compatibility with xf86-input-wacom, we should
+ * report the BTN_TOOL_* event prior to the ABS_MISC or
+ * MSC_SERIAL events.
+ */
input_report_key(input, BTN_TOUCH,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], prox);
+ if (wacom_wac->serial[0]) {
+ input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+ input_report_abs(input, ABS_MISC, id);
+ }
wacom_wac->hid_data.tipswitch = false;
input_sync(input);
}
+
+ if (!prox) {
+ wacom_wac->tool[0] = 0;
+ wacom_wac->id[0] = 0;
+ }
}
static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
@@ -1573,8 +1935,9 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
if (touch_max == 1)
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
@@ -1644,13 +2007,14 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
}
}
-static int wacom_wac_finger_event(struct hid_device *hdev,
+static void wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_wac->hid_data.x = value;
break;
@@ -1673,11 +2037,9 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
if (usage->usage_index + 1 == field->report_count) {
- if (usage->hid == wacom_wac->hid_data.last_slot_field)
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
-
- return 0;
}
static void wacom_wac_finger_pre_report(struct hid_device *hdev,
@@ -1762,28 +2124,30 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
/* currently, only direct devices have proper hid report descriptors */
features->device_type |= WACOM_DEVICETYPE_DIRECT;
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_usage_mapping(hdev, field, usage);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_usage_mapping(hdev, field, usage);
+ if (WACOM_PAD_FIELD(field))
+ wacom_wac_pad_usage_mapping(hdev, field, usage);
+ else if (WACOM_PEN_FIELD(field))
+ wacom_wac_pen_usage_mapping(hdev, field, usage);
+ else if (WACOM_FINGER_FIELD(field))
+ wacom_wac_finger_usage_mapping(hdev, field, usage);
}
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
if (wacom->wacom_wac.features.type != HID_GENERIC)
- return 0;
-
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_event(hdev, field, usage, value);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_event(hdev, field, usage, value);
+ return;
- return 0;
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_event(hdev, field, usage, value);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_event(hdev, field, usage, value);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_event(hdev, field, usage, value);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_event(hdev, field, usage, value);
}
static void wacom_report_events(struct hid_device *hdev, struct hid_report *report)
@@ -1814,19 +2178,23 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
if (wacom_wac->features.type != HID_GENERIC)
return;
- if (WACOM_PEN_FIELD(field))
+ if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+ wacom_wac_pad_pre_report(hdev, report);
+ else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_pre_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_pre_report(hdev, report);
wacom_report_events(hdev, report);
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_report(hdev, report);
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_report(hdev, report);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_report(hdev, report);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_report(hdev, report);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_report(hdev, report);
}
static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -2399,6 +2767,8 @@ void wacom_setup_device_quirks(struct wacom *wacom)
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
+ if (features->numbered_buttons > 0)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
features->type == DTUS ||
(features->type >= INTUOS3S && features->type <= WACOM_MO)) {
@@ -2448,7 +2818,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
/*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
- * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
+ * (i.e., WACOM_HID_WD_DIGITIZER). We route pen packets back
* out through the HID_GENERIC device created for interface 1,
* so rewrite this one to be of type WACOM_DEVICETYPE_TOUCH.
*/
@@ -2530,10 +2900,12 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
- input_set_abs_params(input_dev, ABS_X, features->x_min,
- features->x_max, features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, features->y_min,
- features->y_max, features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X, 0 + features->offset_left,
+ features->x_max - features->offset_right,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0 + features->offset_top,
+ features->y_max - features->offset_bottom,
+ features->y_fuzz, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0,
features->pressure_max, features->pressure_fuzz, 0);
@@ -2769,17 +3141,29 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
return 0;
}
+static int wacom_numbered_button_to_key(int n)
+{
+ if (n < 10)
+ return BTN_0 + n;
+ else if (n < 16)
+ return BTN_A + (n-10);
+ else if (n < 18)
+ return BTN_BASE + (n-16);
+ else
+ return 0;
+}
+
static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
int button_count)
{
int i;
- for (i = 0; i < button_count && i < 10; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
- for (i = 10; i < button_count && i < 16; i++)
- __set_bit(BTN_A + (i-10), input_dev->keybit);
- for (i = 16; i < button_count && i < 18; i++)
- __set_bit(BTN_BASE + (i-16), input_dev->keybit);
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ __set_bit(key, input_dev->keybit);
+ }
}
static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
@@ -2881,12 +3265,12 @@ static void wacom_report_numbered_buttons(struct input_dev *input_dev,
for (i = 0; i < wacom->led.count; i++)
wacom_update_led(wacom, button_count, mask, i);
- for (i = 0; i < button_count && i < 10; i++)
- input_report_key(input_dev, BTN_0 + i, mask & (1 << i));
- for (i = 10; i < button_count && i < 16; i++)
- input_report_key(input_dev, BTN_A + (i-10), mask & (1 << i));
- for (i = 16; i < button_count && i < 18; i++)
- input_report_key(input_dev, BTN_BASE + (i-16), mask & (1 << i));
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ input_report_key(input_dev, key, mask & (1 << i));
+ }
}
int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
@@ -2906,8 +3290,12 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
__set_bit(ABS_MISC, input_dev->absbit);
/* kept for making legacy xf86-input-wacom accepting the pad */
- input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_X].minimum ||
+ input_dev->absinfo[ABS_X].maximum)))
+ input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_Y].minimum ||
+ input_dev->absinfo[ABS_Y].maximum)))
+ input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
/* kept for making udev and libwacom accepting the pad */
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -3027,6 +3415,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
+ case HID_GENERIC:
+ break;
+
default:
/* no pad supported */
return -ENODEV;
@@ -3233,26 +3624,30 @@ static const struct wacom_features wacom_features_0x317 =
INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xF4 =
- { "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
+ { "Wacom Cintiq 24HD", 104480, 65600, 2047, 63,
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xF8 =
- { "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
+ { "Wacom Cintiq 24HD touch", 104480, 65600, 2047, 63, /* Pen */
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x32A =
- { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x32B =
- { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD touch", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C };
static const struct wacom_features wacom_features_0x32C =
{ "Wacom Cintiq 27QHD touch", .type = WACOM_27QHDT,
@@ -3267,13 +3662,15 @@ static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
static const struct wacom_features wacom_features_0x304 =
- { "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
+ { "Wacom Cintiq 13HD", 59552, 33848, 1023, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x333 =
- { "Wacom Cintiq 13HD touch", 59152, 33448, 2047, 63,
+ { "Wacom Cintiq 13HD touch", 59552, 33848, 2047, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x335 };
static const struct wacom_features wacom_features_0x335 =
{ "Wacom Cintiq 13HD touch", .type = WACOM_24HDT, /* Touch */
@@ -3290,42 +3687,50 @@ static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", 34623, 19553, 511, 0,
DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xFB =
- { "Wacom DTU1031", 21896, 13760, 511, 0,
+ { "Wacom DTU1031", 22096, 13960, 511, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x32F =
- { "Wacom DTU1031X", 22472, 12728, 511, 0,
+ { "Wacom DTU1031X", 22672, 12928, 511, 0,
DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 0,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x336 =
- { "Wacom DTU1141", 23472, 13203, 1023, 0,
+ { "Wacom DTU1141", 23672, 13403, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x57 =
- { "Wacom DTK2241", 95640, 54060, 2047, 63,
+ { "Wacom DTK2241", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x59 = /* Pen */
- { "Wacom DTH2242", 95640, 54060, 2047, 63,
+ { "Wacom DTH2242", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
static const struct wacom_features wacom_features_0x5D = /* Touch */
{ "Wacom DTH2242", .type = WACOM_24HDT,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xCC =
- { "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
+ { "Wacom Cintiq 21UX2", 87200, 65600, 2047, 63,
WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xFA =
- { "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HD", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x5B =
- { "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HDT", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
static const struct wacom_features wacom_features_0x5E =
{ "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
@@ -3469,18 +3874,20 @@ static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", 12800, 8000, 255, 0,
TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x307 =
- { "Wacom ISDv5 307", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 307", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
static const struct wacom_features wacom_features_0x309 =
{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x30A =
- { "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 30A", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
static const struct wacom_features wacom_features_0x30C =
{ "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */
@@ -3496,6 +3903,7 @@ static const struct wacom_features wacom_features_0x325 =
{ "Wacom ISDv5 325", 59552, 33848, 2047, 63,
CINTIQ_COMPANION_2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 11,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x326 };
static const struct wacom_features wacom_features_0x326 = /* Touch */
{ "Wacom ISDv5 326", .type = HID_GENERIC, .oVid = USB_VENDOR_ID_WACOM,
@@ -3525,8 +3933,9 @@ static const struct wacom_features wacom_features_0x33E =
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x343 =
- { "Wacom DTK1651", 34616, 19559, 1023, 0,
+ { "Wacom DTK1651", 34816, 19759, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 324c40b0c119..fb0e50acb10d 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -74,6 +74,7 @@
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
+#define WACOM_QUIRK_SENSE 0x0002
#define WACOM_QUIRK_BATTERY 0x0008
/* device types */
@@ -84,23 +85,66 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_DEVICETYPE_DIRECT 0x0010
-#define WACOM_VENDORDEFINED_PEN 0xff0d0001
-#define WACOM_G9_PAGE 0xff090000
-#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
-#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
-#define WACOM_G11_PAGE 0xff110000
-#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
-#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
+#define WACOM_HID_UP_WACOMDIGITIZER 0xff0d0000
+#define WACOM_HID_SP_PAD 0x00040000
+#define WACOM_HID_SP_BUTTON 0x00090000
+#define WACOM_HID_SP_DIGITIZER 0x000d0000
+#define WACOM_HID_SP_DIGITIZERINFO 0x00100000
+#define WACOM_HID_WD_DIGITIZER (WACOM_HID_UP_WACOMDIGITIZER | 0x01)
+#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
+#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
+#define WACOM_HID_WD_TOOLTYPE (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
+#define WACOM_HID_WD_DISTANCE (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
+#define WACOM_HID_WD_TOUCHSTRIP (WACOM_HID_UP_WACOMDIGITIZER | 0x0136)
+#define WACOM_HID_WD_TOUCHSTRIP2 (WACOM_HID_UP_WACOMDIGITIZER | 0x0137)
+#define WACOM_HID_WD_TOUCHRING (WACOM_HID_UP_WACOMDIGITIZER | 0x0138)
+#define WACOM_HID_WD_TOUCHRINGSTATUS (WACOM_HID_UP_WACOMDIGITIZER | 0x0139)
+#define WACOM_HID_WD_ACCELEROMETER_X (WACOM_HID_UP_WACOMDIGITIZER | 0x0401)
+#define WACOM_HID_WD_ACCELEROMETER_Y (WACOM_HID_UP_WACOMDIGITIZER | 0x0402)
+#define WACOM_HID_WD_ACCELEROMETER_Z (WACOM_HID_UP_WACOMDIGITIZER | 0x0403)
+#define WACOM_HID_WD_BATTERY_CHARGING (WACOM_HID_UP_WACOMDIGITIZER | 0x0404)
+#define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
+#define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
+#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
+#define WACOM_HID_WD_BUTTONHOME (WACOM_HID_UP_WACOMDIGITIZER | 0x0990)
+#define WACOM_HID_WD_BUTTONUP (WACOM_HID_UP_WACOMDIGITIZER | 0x0991)
+#define WACOM_HID_WD_BUTTONDOWN (WACOM_HID_UP_WACOMDIGITIZER | 0x0992)
+#define WACOM_HID_WD_BUTTONLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0993)
+#define WACOM_HID_WD_BUTTONRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0994)
+#define WACOM_HID_WD_BUTTONCENTER (WACOM_HID_UP_WACOMDIGITIZER | 0x0995)
+#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0996)
+#define WACOM_HID_WD_FINGERWHEEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0d03)
+#define WACOM_HID_WD_OFFSETLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d30)
+#define WACOM_HID_WD_OFFSETTOP (WACOM_HID_UP_WACOMDIGITIZER | 0x0d31)
+#define WACOM_HID_WD_OFFSETRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d32)
+#define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
+#define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
+#define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
+#define WACOM_HID_UP_G9 0xff090000
+#define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02)
+#define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11)
+#define WACOM_HID_UP_G11 0xff110000
+#define WACOM_HID_G11_PEN (WACOM_HID_UP_G11 | 0x02)
+#define WACOM_HID_G11_TOUCHSCREEN (WACOM_HID_UP_G11 | 0x11)
+
+#define WACOM_PAD_FIELD(f) (((f)->physical == HID_DG_TABLETFUNCTIONKEY) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERFNKEYS) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERINFO))
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_PEN) || \
((f)->application == HID_DG_PEN) || \
((f)->application == HID_DG_DIGITIZER) || \
- ((f)->application == WACOM_VENDORDEFINED_PEN))
+ ((f)->application == WACOM_HID_WD_DIGITIZER) || \
+ ((f)->application == WACOM_HID_G9_PEN) || \
+ ((f)->application == WACOM_HID_G11_PEN))
#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
((f)->physical == HID_DG_FINGER) || \
- ((f)->application == HID_DG_TOUCHSCREEN))
+ ((f)->application == HID_DG_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G9_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G11_TOUCHSCREEN))
enum {
PENPARTNER = 0,
@@ -167,8 +211,10 @@ struct wacom_features {
int x_resolution;
int y_resolution;
int numbered_buttons;
- int x_min;
- int y_min;
+ int offset_left;
+ int offset_right;
+ int offset_top;
+ int offset_bottom;
int device_type;
int x_phy;
int y_phy;
@@ -186,6 +232,7 @@ struct wacom_features {
int pktlen;
bool check_for_hid_type;
int hid_type;
+ bool input_event_flag;
};
struct wacom_shared {
@@ -202,6 +249,7 @@ struct wacom_shared {
struct hid_data {
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
+ bool sense_state;
bool inrange_state;
bool invert_state;
bool tipswitch;
@@ -217,6 +265,10 @@ struct hid_data {
int last_slot_field;
int num_expected;
int num_received;
+ int battery_capacity;
+ int bat_charging;
+ int bat_connected;
+ int ps_connected;
};
struct wacom_remote_data {
@@ -234,7 +286,7 @@ struct wacom_wac {
unsigned char data[WACOM_PKGLEN_MAX];
int tool[2];
int id[2];
- __u32 serial[2];
+ __u64 serial[2];
bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 6031cd146556..7ef819680acd 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -960,15 +960,6 @@ static int ssip_pn_stop(struct net_device *dev)
return 0;
}
-static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU)
- return -EINVAL;
- dev->mtu = new_mtu;
-
- return 0;
-}
-
static void ssip_xmit_work(struct work_struct *work)
{
struct ssi_protocol *ssi =
@@ -1060,7 +1051,6 @@ static const struct net_device_ops ssip_pn_ops = {
.ndo_open = ssip_pn_open,
.ndo_stop = ssip_pn_stop,
.ndo_start_xmit = ssip_pn_xmit,
- .ndo_change_mtu = ssip_pn_set_mtu,
};
static void ssip_pn_setup(struct net_device *dev)
@@ -1136,6 +1126,10 @@ static int ssi_protocol_probe(struct device *dev)
goto out1;
}
+ /* MTU range: 6 - 65535 */
+ ssi->netdev->min_mtu = PHONET_MIN_MTU;
+ ssi->netdev->max_mtu = SSIP_MAX_MTU;
+
SET_NETDEV_DEV(ssi->netdev, dev);
netif_carrier_off(ssi->netdev);
err = register_netdev(ssi->netdev);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 16f91c8490fe..5fb4c6d9209b 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -39,7 +39,7 @@
* vmbus_setevent- Trigger an event notification on the specified
* channel.
*/
-static void vmbus_setevent(struct vmbus_channel *channel)
+void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
@@ -65,6 +65,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
vmbus_set_event(channel);
}
}
+EXPORT_SYMBOL_GPL(vmbus_setevent);
/*
* vmbus_open - Open the specified channel.
@@ -635,8 +636,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
struct kvec bufferlist[3];
u64 aligned_data = 0;
- int ret;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
@@ -656,33 +655,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
- &signal, lock, channel->signal_policy);
-
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- * NOTE: in this case, the hvsock channel is an exception, because
- * it looks the host side's hvsock implementation has a throttling
- * mechanism which can hurt the performance otherwise.
- */
-
- if (((ret == 0) && kick_q && signal) ||
- (ret && !is_hvsock_channel(channel)))
- vmbus_setevent(channel);
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs,
+ lock, kick_q);
- return ret;
}
EXPORT_SYMBOL(vmbus_sendpacket_ctl);
@@ -723,7 +698,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 flags,
bool kick_q)
{
- int ret;
int i;
struct vmbus_channel_packet_page_buffer desc;
u32 descsize;
@@ -731,7 +705,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
@@ -769,29 +742,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- */
-
- if (((ret == 0) && kick_q && signal) || (ret))
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, kick_q);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
@@ -822,12 +774,10 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
u32 desc_size,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen;
@@ -848,13 +798,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -866,14 +811,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *multi_pagebuffer,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
struct vmbus_channel_packet_multipage_buffer desc;
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -913,13 +856,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
@@ -941,16 +879,9 @@ __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
bool raw)
{
- int ret;
- bool signal = false;
+ return hv_ringbuffer_read(channel, buffer, bufferlen,
+ buffer_actual_len, requestid, raw);
- ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen,
- buffer_actual_len, requestid, &signal, raw);
-
- if (signal)
- vmbus_setevent(channel);
-
- return ret;
}
int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 96a85cd39580..26b419203f16 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -134,7 +134,7 @@ static const struct vmbus_device vmbus_devs[] = {
},
/* Unknown GUID */
- { .dev_type = HV_UNKOWN,
+ { .dev_type = HV_UNKNOWN,
.perf_device = false,
},
};
@@ -163,9 +163,9 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
u16 i;
if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
- return HV_UNKOWN;
+ return HV_UNKNOWN;
- for (i = HV_IDE; i < HV_UNKOWN; i++) {
+ for (i = HV_IDE; i < HV_UNKNOWN; i++) {
if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
return i;
}
@@ -389,6 +389,7 @@ void vmbus_free_channels(void)
{
struct vmbus_channel *channel, *tmp;
+ mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
listentry) {
/* hv_process_channel_removal() needs this */
@@ -396,6 +397,7 @@ void vmbus_free_channels(void)
vmbus_device_unregister(channel->device_obj);
}
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
/*
@@ -447,8 +449,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
}
dev_type = hv_get_dev_type(newchannel);
- if (dev_type == HV_NIC)
- set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT);
init_vp_index(newchannel, dev_type);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 78e6368a4423..6ce8b874e833 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -39,6 +39,7 @@ struct vmbus_connection vmbus_connection = {
.conn_state = DISCONNECTED,
.next_gpadl_handle = ATOMIC_INIT(0xE1E10),
};
+EXPORT_SYMBOL_GPL(vmbus_connection);
/*
* Negotiated protocol version with the host.
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 60dbd6cb4640..446802ae8f1b 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -575,7 +575,7 @@ void hv_synic_clockevents_cleanup(void)
if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
return;
- for_each_online_cpu(cpu)
+ for_each_present_cpu(cpu)
clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
}
@@ -594,8 +594,10 @@ void hv_synic_cleanup(void *arg)
return;
/* Turn off clockevent device */
- if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
+ if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
+ clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
hv_ce_shutdown(hv_context.clk_evt[cpu]);
+ }
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index fdf8da929cbe..14c3dc4bd23c 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -564,6 +564,11 @@ struct hv_dynmem_device {
* next version to try.
*/
__u32 next_version;
+
+ /*
+ * The negotiated version agreed by host.
+ */
+ __u32 version;
};
static struct hv_dynmem_device dm_device;
@@ -645,6 +650,7 @@ static void hv_bring_pgs_online(struct hv_hotadd_state *has,
{
int i;
+ pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
for (i = 0; i < size; i++)
hv_page_online_one(has, pfn_to_page(start_pfn + i));
}
@@ -685,7 +691,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
(HA_CHUNK << PAGE_SHIFT));
if (ret) {
- pr_info("hot_add memory failed error is %d\n", ret);
+ pr_warn("hot_add memory failed error is %d\n", ret);
if (ret == -EEXIST) {
/*
* This error indicates that the error
@@ -814,6 +820,9 @@ static unsigned long handle_pg_range(unsigned long pg_start,
unsigned long old_covered_state;
unsigned long res = 0, flags;
+ pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
+ pg_start);
+
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
@@ -1025,8 +1034,13 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
switch (info_hdr->type) {
case INFO_TYPE_MAX_PAGE_CNT:
- pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
- pr_info("Data Size is %d\n", info_hdr->data_size);
+ if (info_hdr->data_size == sizeof(__u64)) {
+ __u64 *max_page_count = (__u64 *)&info_hdr[1];
+
+ pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
+ *max_page_count);
+ }
+
break;
default:
pr_info("Received Unknown type: %d\n", info_hdr->type);
@@ -1196,8 +1210,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
return num_pages;
}
-
-
static void balloon_up(struct work_struct *dummy)
{
unsigned int num_pages = dm_device.balloon_wrk.num_pages;
@@ -1224,6 +1236,10 @@ static void balloon_up(struct work_struct *dummy)
/* Refuse to balloon below the floor, keep the 2M granularity. */
if (avail_pages < num_pages || avail_pages - num_pages < floor) {
+ pr_warn("Balloon request will be partially fulfilled. %s\n",
+ avail_pages < num_pages ? "Not enough memory." :
+ "Balloon floor reached.");
+
num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
num_pages -= num_pages % PAGES_IN_2M;
}
@@ -1245,6 +1261,9 @@ static void balloon_up(struct work_struct *dummy)
}
if (num_ballooned == 0 || num_ballooned == num_pages) {
+ pr_debug("Ballooned %u out of %u requested pages.\n",
+ num_pages, dm_device.balloon_wrk.num_pages);
+
bl_resp->more_pages = 0;
done = true;
dm_device.state = DM_INITIALIZED;
@@ -1292,12 +1311,16 @@ static void balloon_down(struct hv_dynmem_device *dm,
int range_count = req->range_count;
struct dm_unballoon_response resp;
int i;
+ unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
for (i = 0; i < range_count; i++) {
free_balloon_pages(dm, &range_array[i]);
complete(&dm_device.config_event);
}
+ pr_debug("Freed %u ballooned pages.\n",
+ prev_pages_ballooned - dm->num_pages_ballooned);
+
if (req->more_pages == 1)
return;
@@ -1365,6 +1388,7 @@ static void version_resp(struct hv_dynmem_device *dm,
version_req.hdr.size = sizeof(struct dm_version_request);
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = dm->next_version;
+ dm->version = version_req.version.version;
/*
* Set the next version to try in case current version fails.
@@ -1501,7 +1525,11 @@ static int balloon_probe(struct hv_device *dev,
struct dm_version_request version_req;
struct dm_capabilities cap_msg;
+#ifdef CONFIG_MEMORY_HOTPLUG
do_hot_add = hot_add;
+#else
+ do_hot_add = false;
+#endif
/*
* First allocate a send buffer.
@@ -1553,6 +1581,7 @@ static int balloon_probe(struct hv_device *dev,
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
version_req.is_last_attempt = 0;
+ dm_device.version = version_req.version.version;
ret = vmbus_sendpacket(dev->channel, &version_req,
sizeof(struct dm_version_request),
@@ -1575,6 +1604,11 @@ static int balloon_probe(struct hv_device *dev,
ret = -ETIMEDOUT;
goto probe_error2;
}
+
+ pr_info("Using Dynamic Memory protocol version %u.%u\n",
+ DYNMEM_MAJOR_VERSION(dm_device.version),
+ DYNMEM_MINOR_VERSION(dm_device.version));
+
/*
* Now submit our capabilities to the host.
*/
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index a6707133c297..eee238cc60bd 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -31,7 +31,10 @@
#define VSS_MINOR 0
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
-#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
+/*
+ * Timeout values are based on expecations from host
+ */
+#define VSS_FREEZE_TIMEOUT (15 * 60)
/*
* Global state maintained for transaction that is being processed. For a class
@@ -120,7 +123,7 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
default:
return -EINVAL;
}
- pr_debug("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
+ pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
return 0;
}
@@ -128,8 +131,10 @@ static int vss_on_msg(void *msg, int len)
{
struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
- if (len != sizeof(*vss_msg))
+ if (len != sizeof(*vss_msg)) {
+ pr_debug("VSS: Message size does not match length\n");
return -EINVAL;
+ }
if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
@@ -137,8 +142,11 @@ static int vss_on_msg(void *msg, int len)
* Don't process registration messages if we're in the middle
* of a transaction processing.
*/
- if (vss_transaction.state > HVUTIL_READY)
+ if (vss_transaction.state > HVUTIL_READY) {
+ pr_debug("VSS: Got unexpected registration request\n");
return -EINVAL;
+ }
+
return vss_handle_handshake(vss_msg);
} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
vss_transaction.state = HVUTIL_USERSPACE_RECV;
@@ -155,7 +163,7 @@ static int vss_on_msg(void *msg, int len)
}
} else {
/* This is a spurious call! */
- pr_warn("VSS: Transaction not active\n");
+ pr_debug("VSS: Transaction not active\n");
return -EINVAL;
}
return 0;
@@ -168,8 +176,10 @@ static void vss_send_op(void)
struct hv_vss_msg *vss_msg;
/* The transaction state is wrong. */
- if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
+ if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
+ pr_debug("VSS: Unexpected attempt to send to daemon\n");
return;
+ }
vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
if (!vss_msg)
@@ -179,7 +189,8 @@ static void vss_send_op(void)
vss_transaction.state = HVUTIL_USERSPACE_REQ;
- schedule_delayed_work(&vss_timeout_work, VSS_USERSPACE_TIMEOUT);
+ schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
+ VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
@@ -210,9 +221,13 @@ static void vss_handle_request(struct work_struct *dummy)
case VSS_OP_HOT_BACKUP:
if (vss_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
+ pr_debug("VSS: Not ready for request.\n");
vss_respond_to_host(HV_E_FAIL);
return;
}
+
+ pr_debug("VSS: Received request for op code: %d\n",
+ vss_transaction.msg->vss_hdr.operation);
vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
vss_send_op();
return;
@@ -353,8 +368,10 @@ hv_vss_init(struct hv_util_service *srv)
hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
vss_on_msg, vss_on_reset);
- if (!hvt)
+ if (!hvt) {
+ pr_warn("VSS: Failed to initialize transport\n");
return -EFAULT;
+ }
return 0;
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index bcd06306f3e8..e7707747f56d 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -389,16 +389,19 @@ static int util_probe(struct hv_device *dev,
ts_srv_version = TS_VERSION_1;
hb_srv_version = HB_VERSION_1;
break;
- case(VERSION_WIN10):
+ case VERSION_WIN7:
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
util_fw_version = UTIL_FW_VERSION;
sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION;
+ ts_srv_version = TS_VERSION_3;
hb_srv_version = HB_VERSION;
break;
+ case VERSION_WIN10:
default:
util_fw_version = UTIL_FW_VERSION;
sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION_3;
+ ts_srv_version = TS_VERSION;
hb_srv_version = HB_VERSION;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index a5b4442433c8..0675b395ce5c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -38,7 +38,7 @@
/*
* Timeout for guest-host handshake for services.
*/
-#define HV_UTIL_NEGO_TIMEOUT 60
+#define HV_UTIL_NEGO_TIMEOUT 55
/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -527,14 +527,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
-int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_write(struct vmbus_channel *channel,
struct kvec *kv_list,
- u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy);
+ u32 kv_count, bool lock,
+ bool kick_q);
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw);
+ u64 *requestid, bool raw);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 08043da1a61c..cd49cb17eb7f 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -66,21 +66,25 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
* once the ring buffer is empty, it will clear the
* interrupt_mask and re-check to see if new data has
* arrived.
+ *
+ * KYS: Oct. 30, 2016:
+ * It looks like Windows hosts have logic to deal with DOS attacks that
+ * can be triggered if it receives interrupts when it is not expecting
+ * the interrupt. The host expects interrupts only when the ring
+ * transitions from empty to non-empty (or full to non full on the guest
+ * to host ring).
+ * So, base the signaling decision solely on the ring state until the
+ * host logic is fixed.
*/
-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
- enum hv_signal_policy policy)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
+ bool kick_q)
{
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
- return false;
-
- /*
- * When the client wants to control signaling,
- * we only honour the host interrupt mask.
- */
- if (policy == HV_SIGNAL_POLICY_EXPLICIT)
- return true;
+ return;
/* check interrupt_mask before read_index */
virt_rmb();
@@ -89,9 +93,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
* ring transitions from being empty to non-empty.
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
- return true;
+ vmbus_setevent(channel);
- return false;
+ return;
}
/* Get the next write location for the specified ring buffer. */
@@ -280,9 +284,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
}
/* Write to the ring buffer. */
-int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy)
+int hv_ringbuffer_write(struct vmbus_channel *channel,
+ struct kvec *kv_list, u32 kv_count, bool lock,
+ bool kick_q)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -292,6 +296,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
u32 old_write;
u64 prev_indices = 0;
unsigned long flags = 0;
+ struct hv_ring_buffer_info *outring_info = &channel->outbound;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
@@ -344,13 +349,13 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- *signal = hv_need_to_signal(old_write, outring_info, policy);
+ hv_signal_on_write(old_write, channel, kick_q);
return 0;
}
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw)
+ u64 *requestid, bool raw)
{
u32 bytes_avail_toread;
u32 next_read_location = 0;
@@ -359,6 +364,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
u32 offset;
u32 packetlen;
int ret = 0;
+ struct hv_ring_buffer_info *inring_info = &channel->inbound;
if (buflen <= 0)
return -EINVAL;
@@ -416,7 +422,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
- *signal = hv_need_to_signal_on_read(inring_info);
+ hv_signal_on_read(channel);
return ret;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a259e18d22d5..230c62e7f567 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,6 +45,11 @@
#include <linux/random.h>
#include "hyperv_vmbus.h"
+struct vmbus_dynid {
+ struct list_head node;
+ struct hv_vmbus_device_id id;
+};
+
static struct acpi_device *hv_acpi_dev;
static struct completion probe_event;
@@ -500,7 +505,7 @@ static ssize_t device_show(struct device *dev,
static DEVICE_ATTR_RO(device);
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
-static struct attribute *vmbus_attrs[] = {
+static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_id.attr,
&dev_attr_state.attr,
&dev_attr_monitor_id.attr,
@@ -528,7 +533,7 @@ static struct attribute *vmbus_attrs[] = {
&dev_attr_device.attr,
NULL,
};
-ATTRIBUTE_GROUPS(vmbus);
+ATTRIBUTE_GROUPS(vmbus_dev);
/*
* vmbus_uevent - add uevent for our device
@@ -565,10 +570,29 @@ static inline bool is_null_guid(const uuid_le *guid)
* Return a matching hv_vmbus_device_id pointer.
* If there is no match, return NULL.
*/
-static const struct hv_vmbus_device_id *hv_vmbus_get_id(
- const struct hv_vmbus_device_id *id,
+static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
const uuid_le *guid)
{
+ const struct hv_vmbus_device_id *id = NULL;
+ struct vmbus_dynid *dynid;
+
+ /* Look at the dynamic ids first, before the static ones */
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry(dynid, &drv->dynids.list, node) {
+ if (!uuid_le_cmp(dynid->id.guid, *guid)) {
+ id = &dynid->id;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ if (id)
+ return id;
+
+ id = drv->id_table;
+ if (id == NULL)
+ return NULL; /* empty device table */
+
for (; !is_null_guid(&id->guid); id++)
if (!uuid_le_cmp(id->guid, *guid))
return id;
@@ -576,6 +600,134 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(
return NULL;
}
+/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
+static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
+{
+ struct vmbus_dynid *dynid;
+
+ dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
+ if (!dynid)
+ return -ENOMEM;
+
+ dynid->id.guid = *guid;
+
+ spin_lock(&drv->dynids.lock);
+ list_add_tail(&dynid->node, &drv->dynids.list);
+ spin_unlock(&drv->dynids.lock);
+
+ return driver_attach(&drv->driver);
+}
+
+static void vmbus_free_dynids(struct hv_driver *drv)
+{
+ struct vmbus_dynid *dynid, *n;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ }
+ spin_unlock(&drv->dynids.lock);
+}
+
+/* Parse string of form: 1b4e28ba-2fa1-11d2-883f-b9a761bde3f */
+static int get_uuid_le(const char *str, uuid_le *uu)
+{
+ unsigned int b[16];
+ int i;
+
+ if (strlen(str) < 37)
+ return -1;
+
+ for (i = 0; i < 36; i++) {
+ switch (i) {
+ case 8: case 13: case 18: case 23:
+ if (str[i] != '-')
+ return -1;
+ break;
+ default:
+ if (!isxdigit(str[i]))
+ return -1;
+ }
+ }
+
+ /* unparse little endian output byte order */
+ if (sscanf(str,
+ "%2x%2x%2x%2x-%2x%2x-%2x%2x-%2x%2x-%2x%2x%2x%2x%2x%2x",
+ &b[3], &b[2], &b[1], &b[0],
+ &b[5], &b[4], &b[7], &b[6], &b[8], &b[9],
+ &b[10], &b[11], &b[12], &b[13], &b[14], &b[15]) != 16)
+ return -1;
+
+ for (i = 0; i < 16; i++)
+ uu->b[i] = b[i];
+ return 0;
+}
+
+/*
+ * store_new_id - sysfs frontend to vmbus_add_dynid()
+ *
+ * Allow GUIDs to be added to an existing driver via sysfs.
+ */
+static ssize_t new_id_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ uuid_le guid = NULL_UUID_LE;
+ ssize_t retval;
+
+ if (get_uuid_le(buf, &guid) != 0)
+ return -EINVAL;
+
+ if (hv_vmbus_get_id(drv, &guid))
+ return -EEXIST;
+
+ retval = vmbus_add_dynid(drv, &guid);
+ if (retval)
+ return retval;
+ return count;
+}
+static DRIVER_ATTR_WO(new_id);
+
+/*
+ * store_remove_id - remove a PCI device ID from this driver
+ *
+ * Removes a dynamic pci device ID to this driver.
+ */
+static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct hv_driver *drv = drv_to_hv_drv(driver);
+ struct vmbus_dynid *dynid, *n;
+ uuid_le guid = NULL_UUID_LE;
+ size_t retval = -ENODEV;
+
+ if (get_uuid_le(buf, &guid))
+ return -EINVAL;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ struct hv_vmbus_device_id *id = &dynid->id;
+
+ if (!uuid_le_cmp(id->guid, guid)) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ retval = count;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ return retval;
+}
+static DRIVER_ATTR_WO(remove_id);
+
+static struct attribute *vmbus_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ &driver_attr_remove_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vmbus_drv);
/*
@@ -590,7 +742,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
if (is_hvsock_channel(hv_dev->channel))
return drv->hvsock;
- if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
+ if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
return 1;
return 0;
@@ -607,7 +759,7 @@ static int vmbus_probe(struct device *child_device)
struct hv_device *dev = device_to_hv_device(child_device);
const struct hv_vmbus_device_id *dev_id;
- dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type);
+ dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
if (drv->probe) {
ret = drv->probe(dev, dev_id);
if (ret != 0)
@@ -684,7 +836,8 @@ static struct bus_type hv_bus = {
.remove = vmbus_remove,
.probe = vmbus_probe,
.uevent = vmbus_uevent,
- .dev_groups = vmbus_groups,
+ .dev_groups = vmbus_dev_groups,
+ .drv_groups = vmbus_drv_groups,
};
struct onmessage_work_context {
@@ -905,6 +1058,9 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
hv_driver->driver.mod_name = mod_name;
hv_driver->driver.bus = &hv_bus;
+ spin_lock_init(&hv_driver->dynids.lock);
+ INIT_LIST_HEAD(&hv_driver->dynids.list);
+
ret = driver_register(&hv_driver->driver);
return ret;
@@ -923,8 +1079,10 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
{
pr_info("unregistering driver %s\n", hv_driver->name);
- if (!vmbus_exists())
+ if (!vmbus_exists()) {
driver_unregister(&hv_driver->driver);
+ vmbus_free_dynids(hv_driver);
+ }
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
@@ -961,7 +1119,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
{
int ret = 0;
- dev_set_name(&child_device_obj->device, "vmbus-%pUl",
+ dev_set_name(&child_device_obj->device, "%pUl",
child_device_obj->channel->offermsg.offer.if_instance.b);
child_device_obj->device.bus = &hv_bus;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 45cef3d2c75c..190d270b20a2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -907,6 +907,17 @@ config SENSORS_MCP3021
This driver can also be built as a module. If so, the module
will be called mcp3021.
+config SENSORS_TC654
+ tristate "Microchip TC654/TC655 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for TC654 and TC655.
+ The TC654 and TC655 are PWM mode fan speed controllers with
+ FanSense technology for use with brushless DC fans.
+
+ This driver can also be built as a module. If so, the module
+ will be called tc654.
+
config SENSORS_MENF21BMC_HWMON
tristate "MEN 14F021P00 BMC Hardware Monitoring"
depends on MFD_MENF21BMC
@@ -1068,8 +1079,8 @@ config SENSORS_LM90
LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
- Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, and GMT G781
- sensor chips.
+ Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, GMT G781, and
+ Texas Instruments TMP451 sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
@@ -1591,6 +1602,17 @@ config SENSORS_TMP103
This driver can also be built as a module. If so, the module
will be called tmp103.
+config SENSORS_TMP108
+ tristate "Texas Instruments TMP108"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP108
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp108.
+
config SENSORS_TMP401
tristate "Texas Instruments TMP401 and compatibles"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index aecf4ba17460..d2cb7e804a0f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
+obj-$(CONFIG_SENSORS_TC654) += tc654.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
@@ -152,6 +153,7 @@ obj-$(CONFIG_SENSORS_TC74) += tc74.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
+obj-$(CONFIG_SENSORS_TMP108) += tmp108.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index d6c767ace916..1abb4609b412 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -93,7 +93,7 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 };
#define IN_FROM_REG(reg, scale) (((reg) * (scale) + 96) / 192)
#define IN_TO_REG(val, scale) ((val) <= 0 ? 0 : \
- (val) * 192 >= (scale) * 255 ? 255 : \
+ (val) >= (scale) * 255 / 192 ? 255 : \
((val) * 192 + (scale) / 2) / (scale))
#define TEMP_FROM_REG(reg) ((reg) * 1000)
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index e67b9a50ac7c..b2a5d9e5c590 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -197,8 +197,9 @@ static int adm1026_scaling[] = { /* .001 Volts */
};
#define NEG12_OFFSET 16000
#define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
-#define INS_TO_REG(n, val) (clamp_val(SCALE(val, adm1026_scaling[n], 192),\
- 0, 255))
+#define INS_TO_REG(n, val) \
+ SCALE(clamp_val(val, 0, 255 * adm1026_scaling[n] / 192), \
+ adm1026_scaling[n], 192)
#define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n]))
/*
@@ -215,11 +216,11 @@ static int adm1026_scaling[] = { /* .001 Volts */
#define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
/* Temperature is reported in 1 degC increments */
-#define TEMP_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
- / 1000, -127, 127))
+#define TEMP_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
+ 1000)
#define TEMP_FROM_REG(val) ((val) * 1000)
-#define OFFSET_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
- / 1000, -127, 127))
+#define OFFSET_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
+ 1000)
#define OFFSET_FROM_REG(val) ((val) * 1000)
#define PWM_TO_REG(val) (clamp_val(val, 0, 255))
@@ -233,7 +234,8 @@ static int adm1026_scaling[] = { /* .001 Volts */
* indicates that the DAC could be used to drive the fans, but in our
* example board (Arima HDAMA) it isn't connected to the fans at all.
*/
-#define DAC_TO_REG(val) (clamp_val(((((val) * 255) + 500) / 2500), 0, 255))
+#define DAC_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, 0, 2500) * 255, \
+ 2500)
#define DAC_FROM_REG(val) (((val) * 2500) / 255)
/*
@@ -593,7 +595,10 @@ static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_min[16] = INS_TO_REG(16, val + NEG12_OFFSET);
+ data->in_min[16] = INS_TO_REG(16,
+ clamp_val(val, INT_MIN,
+ INT_MAX - NEG12_OFFSET) +
+ NEG12_OFFSET);
adm1026_write_value(client, ADM1026_REG_IN_MIN[16], data->in_min[16]);
mutex_unlock(&data->update_lock);
return count;
@@ -618,7 +623,10 @@ static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_max[16] = INS_TO_REG(16, val+NEG12_OFFSET);
+ data->in_max[16] = INS_TO_REG(16,
+ clamp_val(val, INT_MIN,
+ INT_MAX - NEG12_OFFSET) +
+ NEG12_OFFSET);
adm1026_write_value(client, ADM1026_REG_IN_MAX[16], data->in_max[16]);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 2fe1828bd10b..72bf2489511e 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -98,13 +98,15 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
+ val = clamp_val(val, 0, nom_mv[n] * 255 / 192);
+ return SCALE(val, 192, nom_mv[n]);
}
/* temperature range: -40..125, 127 disables temperature alarm */
static inline s8 TEMP_TO_REG(long val)
{
- return clamp_val(SCALE(val, 1, 1000), -40, 127);
+ val = clamp_val(val, -40000, 127000);
+ return SCALE(val, 1, 1000);
}
/* two fans, each with low fan speed limit */
@@ -122,7 +124,8 @@ static inline unsigned int FAN_FROM_REG(u8 reg, u8 div)
/* analog out 0..1250mV */
static inline u8 AOUT_TO_REG(unsigned long val)
{
- return clamp_val(SCALE(val, 255, 1250), 0, 255);
+ val = clamp_val(val, 0, 1250);
+ return SCALE(val, 255, 1250);
}
static inline unsigned int AOUT_FROM_REG(u8 reg)
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 812fbc00f693..bdeaece9641d 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -55,7 +55,7 @@ struct adt7411_data {
struct mutex device_lock; /* for "atomic" device accesses */
struct mutex update_lock;
unsigned long next_update;
- int vref_cached;
+ long vref_cached;
struct i2c_client *client;
bool use_ext_temp;
};
@@ -114,85 +114,6 @@ static int adt7411_modify_bit(struct i2c_client *client, u8 reg, u8 bit,
return ret;
}
-static ssize_t adt7411_show_vdd(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct adt7411_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
- ADT7411_REG_VDD_MSB, 2);
-
- return ret < 0 ? ret : sprintf(buf, "%u\n", ret * 7000 / 1024);
-}
-
-static ssize_t adt7411_show_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int nr = to_sensor_dev_attr(attr)->index;
- struct adt7411_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int val;
- struct {
- u8 low;
- u8 high;
- } reg[2] = {
- { ADT7411_REG_INT_TEMP_VDD_LSB, ADT7411_REG_INT_TEMP_MSB },
- { ADT7411_REG_EXT_TEMP_AIN14_LSB,
- ADT7411_REG_EXT_TEMP_AIN1_MSB },
- };
-
- val = adt7411_read_10_bit(client, reg[nr].low, reg[nr].high, 0);
- if (val < 0)
- return val;
-
- val = val & 0x200 ? val - 0x400 : val; /* 10 bit signed */
-
- return sprintf(buf, "%d\n", val * 250);
-}
-
-static ssize_t adt7411_show_input(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int nr = to_sensor_dev_attr(attr)->index;
- struct adt7411_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int val;
- u8 lsb_reg, lsb_shift;
-
- mutex_lock(&data->update_lock);
- if (time_after_eq(jiffies, data->next_update)) {
- val = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
- if (val < 0)
- goto exit_unlock;
-
- if (val & ADT7411_CFG3_REF_VDD) {
- val = adt7411_read_10_bit(client,
- ADT7411_REG_INT_TEMP_VDD_LSB,
- ADT7411_REG_VDD_MSB, 2);
- if (val < 0)
- goto exit_unlock;
-
- data->vref_cached = val * 7000 / 1024;
- } else {
- data->vref_cached = 2250;
- }
-
- data->next_update = jiffies + HZ;
- }
-
- lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
- lsb_shift = 2 * (nr & 0x03);
- val = adt7411_read_10_bit(client, lsb_reg,
- ADT7411_REG_EXT_TEMP_AIN1_MSB + nr, lsb_shift);
- if (val < 0)
- goto exit_unlock;
-
- val = sprintf(buf, "%u\n", val * data->vref_cached / 1024);
- exit_unlock:
- mutex_unlock(&data->update_lock);
- return val;
-}
-
static ssize_t adt7411_show_bit(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -228,65 +149,157 @@ static ssize_t adt7411_set_bit(struct device *dev,
return ret < 0 ? ret : count;
}
-
#define ADT7411_BIT_ATTR(__name, __reg, __bit) \
SENSOR_DEVICE_ATTR_2(__name, S_IRUGO | S_IWUSR, adt7411_show_bit, \
adt7411_set_bit, __bit, __reg)
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, adt7411_show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, adt7411_show_temp, NULL, 1);
-static DEVICE_ATTR(in0_input, S_IRUGO, adt7411_show_vdd, NULL);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, adt7411_show_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, adt7411_show_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, adt7411_show_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, adt7411_show_input, NULL, 3);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, adt7411_show_input, NULL, 4);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, adt7411_show_input, NULL, 5);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, adt7411_show_input, NULL, 6);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, adt7411_show_input, NULL, 7);
static ADT7411_BIT_ATTR(no_average, ADT7411_REG_CFG2, ADT7411_CFG2_DISABLE_AVG);
static ADT7411_BIT_ATTR(fast_sampling, ADT7411_REG_CFG3, ADT7411_CFG3_ADC_CLK_225);
static ADT7411_BIT_ATTR(adc_ref_vdd, ADT7411_REG_CFG3, ADT7411_CFG3_REF_VDD);
static struct attribute *adt7411_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &dev_attr_in0_input.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in8_input.dev_attr.attr,
&sensor_dev_attr_no_average.dev_attr.attr,
&sensor_dev_attr_fast_sampling.dev_attr.attr,
&sensor_dev_attr_adc_ref_vdd.dev_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(adt7411);
-static umode_t adt7411_attrs_visible(struct kobject *kobj,
- struct attribute *attr, int index)
+static int adt7411_read_in_vdd(struct device *dev, u32 attr, long *val)
{
- struct device *dev = container_of(kobj, struct device, kobj);
struct adt7411_data *data = dev_get_drvdata(dev);
- bool visible = true;
+ struct i2c_client *client = data->client;
+ int ret;
- if (attr == &sensor_dev_attr_temp2_input.dev_attr.attr)
- visible = data->use_ext_temp;
- else if (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_in2_input.dev_attr.attr)
- visible = !data->use_ext_temp;
+ switch (attr) {
+ case hwmon_in_input:
+ ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
+ ADT7411_REG_VDD_MSB, 2);
+ if (ret < 0)
+ return ret;
+ *val = ret * 7000 / 1024;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
- return visible ? attr->mode : 0;
+ int ret;
+ int lsb_reg, lsb_shift;
+ int nr = channel - 1;
+
+ mutex_lock(&data->update_lock);
+ if (time_after_eq(jiffies, data->next_update)) {
+ ret = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
+ if (ret < 0)
+ goto exit_unlock;
+
+ if (ret & ADT7411_CFG3_REF_VDD) {
+ ret = adt7411_read_in_vdd(dev, hwmon_in_input,
+ &data->vref_cached);
+ if (ret < 0)
+ goto exit_unlock;
+ } else {
+ data->vref_cached = 2250;
+ }
+
+ data->next_update = jiffies + HZ;
+ }
+
+ switch (attr) {
+ case hwmon_in_input:
+ lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
+ lsb_shift = 2 * (nr & 0x03);
+ ret = adt7411_read_10_bit(client, lsb_reg,
+ ADT7411_REG_EXT_TEMP_AIN1_MSB + nr,
+ lsb_shift);
+ if (ret < 0)
+ goto exit_unlock;
+ *val = ret * data->vref_cached / 1024;
+ ret = 0;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ exit_unlock:
+ mutex_unlock(&data->update_lock);
+ return ret;
}
-static const struct attribute_group adt7411_group = {
- .attrs = adt7411_attrs,
- .is_visible = adt7411_attrs_visible,
-};
-__ATTRIBUTE_GROUPS(adt7411);
+static int adt7411_read_in(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ if (channel == 0)
+ return adt7411_read_in_vdd(dev, attr, val);
+ else
+ return adt7411_read_in_chan(dev, attr, channel, val);
+}
+
+static int adt7411_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret, regl, regh;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ regl = channel ? ADT7411_REG_EXT_TEMP_AIN14_LSB :
+ ADT7411_REG_INT_TEMP_VDD_LSB;
+ regh = channel ? ADT7411_REG_EXT_TEMP_AIN1_MSB :
+ ADT7411_REG_INT_TEMP_MSB;
+ ret = adt7411_read_10_bit(client, regl, regh, 0);
+ if (ret < 0)
+ return ret;
+ ret = ret & 0x200 ? ret - 0x400 : ret; /* 10 bit signed */
+ *val = ret * 250;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adt7411_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ return adt7411_read_in(dev, attr, channel, val);
+ case hwmon_temp:
+ return adt7411_read_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t adt7411_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct adt7411_data *data = _data;
+
+ switch (type) {
+ case hwmon_in:
+ if (channel > 0 && channel < 3)
+ return data->use_ext_temp ? 0 : S_IRUGO;
+ else
+ return S_IRUGO;
+ case hwmon_temp:
+ if (channel == 1)
+ return data->use_ext_temp ? S_IRUGO : 0;
+ else
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+}
static int adt7411_detect(struct i2c_client *client,
struct i2c_board_info *info)
@@ -358,6 +371,51 @@ static int adt7411_init_device(struct adt7411_data *data)
return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val);
}
+static const u32 adt7411_in_config[] = {
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info adt7411_in = {
+ .type = hwmon_in,
+ .config = adt7411_in_config,
+};
+
+static const u32 adt7411_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info adt7411_temp = {
+ .type = hwmon_temp,
+ .config = adt7411_temp_config,
+};
+
+static const struct hwmon_channel_info *adt7411_info[] = {
+ &adt7411_in,
+ &adt7411_temp,
+ NULL
+};
+
+static const struct hwmon_ops adt7411_hwmon_ops = {
+ .is_visible = adt7411_is_visible,
+ .read = adt7411_read,
+};
+
+static const struct hwmon_chip_info adt7411_chip_info = {
+ .ops = &adt7411_hwmon_ops,
+ .info = adt7411_info,
+};
+
static int adt7411_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -382,9 +440,10 @@ static int adt7411_probe(struct i2c_client *client,
/* force update on first occasion */
data->next_update = jiffies;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data,
- adt7411_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &adt7411_chip_info,
+ adt7411_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 5929e126da63..19f2a6d48bac 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -810,8 +810,8 @@ static ssize_t set_temp_min(struct device *dev,
if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
+ temp = clamp_val(temp, -64000, 191000);
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -848,8 +848,8 @@ static ssize_t set_temp_max(struct device *dev,
if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
+ temp = clamp_val(temp, -64000, 191000);
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -912,9 +912,9 @@ static ssize_t set_volt_max(struct device *dev,
if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
+ temp = clamp_val(temp, 0, 255 * x / 1000);
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_max[attr->index] = temp;
@@ -954,9 +954,9 @@ static ssize_t set_volt_min(struct device *dev,
if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
+ temp = clamp_val(temp, 0, 255 * x / 1000);
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_min[attr->index] = temp;
@@ -1220,8 +1220,8 @@ static ssize_t set_pwm_hyst(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, 0, 15000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, 0, 15);
/* package things up */
temp &= ADT7462_PWM_HYST_MASK;
@@ -1306,8 +1306,8 @@ static ssize_t set_pwm_tmin(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -64000, 191000);
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 6e60ca53406e..c9a1d9c25572 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -483,8 +483,8 @@ static ssize_t set_temp_min(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -128000, 127000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -517,8 +517,8 @@ static ssize_t set_temp_max(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -128000, 127000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -880,8 +880,8 @@ static ssize_t set_pwm_tmin(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
+ temp = clamp_val(temp, -128000, 127000);
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = clamp_val(temp, -128, 127);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 12e851a5af48..46b4e35fd555 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -188,8 +188,8 @@ static struct amc6821_data *amc6821_update_device(struct device *dev)
!data->valid) {
for (i = 0; i < TEMP_IDX_LEN; i++)
- data->temp[i] = i2c_smbus_read_byte_data(client,
- temp_reg[i]);
+ data->temp[i] = (int8_t)i2c_smbus_read_byte_data(
+ client, temp_reg[i]);
data->stat1 = i2c_smbus_read_byte_data(client,
AMC6821_REG_STAT1);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6a27eb2fed17..3ac4c03ba77b 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -51,6 +51,7 @@ static int force_tjmax;
module_param_named(tjmax, force_tjmax, int, 0444);
MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
@@ -58,7 +59,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-#define TO_PHYS_ID(cpu) (cpu_data(cpu).phys_proc_id)
#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id)
#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
@@ -102,20 +102,17 @@ struct temp_data {
/* Platform Data per Physical CPU */
struct platform_data {
- struct device *hwmon_dev;
- u16 phys_proc_id;
- struct temp_data *core_data[MAX_CORE_DATA];
+ struct device *hwmon_dev;
+ u16 pkg_id;
+ struct cpumask cpumask;
+ struct temp_data *core_data[MAX_CORE_DATA];
struct device_attribute name_attr;
};
-struct pdev_entry {
- struct list_head list;
- struct platform_device *pdev;
- u16 phys_proc_id;
-};
-
-static LIST_HEAD(pdev_list);
-static DEFINE_MUTEX(pdev_list_mutex);
+/* Keep track of how many package pointers we allocated in init() */
+static int max_packages __read_mostly;
+/* Array of package pointers. Serialized by cpu hotplug lock */
+static struct platform_device **pkg_devices;
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
@@ -125,7 +122,7 @@ static ssize_t show_label(struct device *dev,
struct temp_data *tdata = pdata->core_data[attr->index];
if (tdata->is_pkg_data)
- return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id);
+ return sprintf(buf, "Package id %u\n", pdata->pkg_id);
return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
}
@@ -138,7 +135,9 @@ static ssize_t show_crit_alarm(struct device *dev,
struct platform_data *pdata = dev_get_drvdata(dev);
struct temp_data *tdata = pdata->core_data[attr->index];
+ mutex_lock(&tdata->update_lock);
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+ mutex_unlock(&tdata->update_lock);
return sprintf(buf, "%d\n", (eax >> 5) & 1);
}
@@ -435,18 +434,10 @@ static int chk_ucode_version(unsigned int cpu)
static struct platform_device *coretemp_get_pdev(unsigned int cpu)
{
- u16 phys_proc_id = TO_PHYS_ID(cpu);
- struct pdev_entry *p;
-
- mutex_lock(&pdev_list_mutex);
+ int pkgid = topology_logical_package_id(cpu);
- list_for_each_entry(p, &pdev_list, list)
- if (p->phys_proc_id == phys_proc_id) {
- mutex_unlock(&pdev_list_mutex);
- return p->pdev;
- }
-
- mutex_unlock(&pdev_list_mutex);
+ if (pkgid >= 0 && pkgid < max_packages)
+ return pkg_devices[pkgid];
return NULL;
}
@@ -483,21 +474,11 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
* The attr number is always core id + 2
* The Pkgtemp will always show up as temp1_*, if available
*/
- attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu);
+ attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
if (attr_no > MAX_CORE_DATA - 1)
return -ERANGE;
- /*
- * Provide a single set of attributes for all HT siblings of a core
- * to avoid duplicate sensors (the processor ID and core ID of all
- * HT siblings of a core are the same).
- * Skip if a HT sibling of this core is already registered.
- * This is not an error.
- */
- if (pdata->core_data[attr_no] != NULL)
- return 0;
-
tdata = init_temp_data(cpu, pkg_flag);
if (!tdata)
return -ENOMEM;
@@ -539,21 +520,14 @@ exit_free:
return err;
}
-static void coretemp_add_core(unsigned int cpu, int pkg_flag)
+static void
+coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
{
- struct platform_device *pdev = coretemp_get_pdev(cpu);
- int err;
-
- if (!pdev)
- return;
-
- err = create_core_data(pdev, cpu, pkg_flag);
- if (err)
+ if (create_core_data(pdev, cpu, pkg_flag))
dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
}
-static void coretemp_remove_core(struct platform_data *pdata,
- int indx)
+static void coretemp_remove_core(struct platform_data *pdata, int indx)
{
struct temp_data *tdata = pdata->core_data[indx];
@@ -574,7 +548,7 @@ static int coretemp_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- pdata->phys_proc_id = pdev->id;
+ pdata->pkg_id = pdev->id;
platform_set_drvdata(pdev, pdata);
pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
@@ -602,85 +576,33 @@ static struct platform_driver coretemp_driver = {
.remove = coretemp_remove,
};
-static int coretemp_device_add(unsigned int cpu)
+static struct platform_device *coretemp_device_add(unsigned int cpu)
{
- int err;
+ int err, pkgid = topology_logical_package_id(cpu);
struct platform_device *pdev;
- struct pdev_entry *pdev_entry;
- mutex_lock(&pdev_list_mutex);
+ if (pkgid < 0)
+ return ERR_PTR(-ENOMEM);
- pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
- if (!pdev) {
- err = -ENOMEM;
- pr_err("Device allocation failed\n");
- goto exit;
- }
-
- pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
- if (!pdev_entry) {
- err = -ENOMEM;
- goto exit_device_put;
- }
+ pdev = platform_device_alloc(DRVNAME, pkgid);
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
err = platform_device_add(pdev);
if (err) {
- pr_err("Device addition failed (%d)\n", err);
- goto exit_device_free;
- }
-
- pdev_entry->pdev = pdev;
- pdev_entry->phys_proc_id = pdev->id;
-
- list_add_tail(&pdev_entry->list, &pdev_list);
- mutex_unlock(&pdev_list_mutex);
-
- return 0;
-
-exit_device_free:
- kfree(pdev_entry);
-exit_device_put:
- platform_device_put(pdev);
-exit:
- mutex_unlock(&pdev_list_mutex);
- return err;
-}
-
-static void coretemp_device_remove(unsigned int cpu)
-{
- struct pdev_entry *p, *n;
- u16 phys_proc_id = TO_PHYS_ID(cpu);
-
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- if (p->phys_proc_id != phys_proc_id)
- continue;
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
+ platform_device_put(pdev);
+ return ERR_PTR(err);
}
- mutex_unlock(&pdev_list_mutex);
-}
-
-static bool is_any_core_online(struct platform_data *pdata)
-{
- int i;
- /* Find online cores, except pkgtemp data */
- for (i = MAX_CORE_DATA - 1; i >= 0; --i) {
- if (pdata->core_data[i] &&
- !pdata->core_data[i]->is_pkg_data) {
- return true;
- }
- }
- return false;
+ pkg_devices[pkgid] = pdev;
+ return pdev;
}
-static void get_core_online(unsigned int cpu)
+static int coretemp_cpu_online(unsigned int cpu)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
struct platform_device *pdev = coretemp_get_pdev(cpu);
- int err;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct platform_data *pdata;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -688,12 +610,12 @@ static void get_core_online(unsigned int cpu)
* without thermal sensors will be filtered out.
*/
if (!cpu_has(c, X86_FEATURE_DTHERM))
- return;
+ return -ENODEV;
if (!pdev) {
/* Check the microcode version of the CPU */
if (chk_ucode_version(cpu))
- return;
+ return -EINVAL;
/*
* Alright, we have DTS support.
@@ -701,101 +623,100 @@ static void get_core_online(unsigned int cpu)
* online. So, initialize per-pkg data structures and
* then bring this core online.
*/
- err = coretemp_device_add(cpu);
- if (err)
- return;
+ pdev = coretemp_device_add(cpu);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
/*
* Check whether pkgtemp support is available.
* If so, add interfaces for pkgtemp.
*/
if (cpu_has(c, X86_FEATURE_PTS))
- coretemp_add_core(cpu, 1);
+ coretemp_add_core(pdev, cpu, 1);
}
+
+ pdata = platform_get_drvdata(pdev);
/*
- * Physical CPU device already exists.
- * So, just add interfaces for this core.
+ * Check whether a thread sibling is already online. If not add the
+ * interface for this CPU core.
*/
- coretemp_add_core(cpu, 0);
+ if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
+ coretemp_add_core(pdev, cpu, 0);
+
+ cpumask_set_cpu(cpu, &pdata->cpumask);
+ return 0;
}
-static void put_core_offline(unsigned int cpu)
+static int coretemp_cpu_offline(unsigned int cpu)
{
- int i, indx;
- struct platform_data *pdata;
struct platform_device *pdev = coretemp_get_pdev(cpu);
+ struct platform_data *pd;
+ struct temp_data *tdata;
+ int indx, target;
/* If the physical CPU device does not exist, just return */
if (!pdev)
- return;
-
- pdata = platform_get_drvdata(pdev);
-
- indx = TO_ATTR_NO(cpu);
+ return 0;
/* The core id is too big, just return */
+ indx = TO_ATTR_NO(cpu);
if (indx > MAX_CORE_DATA - 1)
- return;
+ return 0;
- if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
- coretemp_remove_core(pdata, indx);
+ pd = platform_get_drvdata(pdev);
+ tdata = pd->core_data[indx];
+
+ cpumask_clear_cpu(cpu, &pd->cpumask);
/*
- * If a HT sibling of a core is taken offline, but another HT sibling
- * of the same core is still online, register the alternate sibling.
- * This ensures that exactly one set of attributes is provided as long
- * as at least one HT sibling of a core is online.
+ * If this is the last thread sibling, remove the CPU core
+ * interface, If there is still a sibling online, transfer the
+ * target cpu of that core interface to it.
*/
- for_each_sibling(i, cpu) {
- if (i != cpu) {
- get_core_online(i);
- /*
- * Display temperature sensor data for one HT sibling
- * per core only, so abort the loop after one such
- * sibling has been found.
- */
- break;
- }
+ target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
+ if (target >= nr_cpu_ids) {
+ coretemp_remove_core(pd, indx);
+ } else if (tdata && tdata->cpu == cpu) {
+ mutex_lock(&tdata->update_lock);
+ tdata->cpu = target;
+ mutex_unlock(&tdata->update_lock);
}
+
/*
- * If all cores in this pkg are offline, remove the device.
- * coretemp_device_remove calls unregister_platform_device,
- * which in turn calls coretemp_remove. This removes the
- * pkgtemp entry and does other clean ups.
+ * If all cores in this pkg are offline, remove the device. This
+ * will invoke the platform driver remove function, which cleans up
+ * the rest.
*/
- if (!is_any_core_online(pdata))
- coretemp_device_remove(cpu);
-}
+ if (cpumask_empty(&pd->cpumask)) {
+ pkg_devices[topology_logical_package_id(cpu)] = NULL;
+ platform_device_unregister(pdev);
+ return 0;
+ }
-static int coretemp_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long) hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- get_core_online(cpu);
- break;
- case CPU_DOWN_PREPARE:
- put_core_offline(cpu);
- break;
+ /*
+ * Check whether this core is the target for the package
+ * interface. We need to assign it to some other cpu.
+ */
+ tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
+ if (tdata && tdata->cpu == cpu) {
+ target = cpumask_first(&pd->cpumask);
+ mutex_lock(&tdata->update_lock);
+ tdata->cpu = target;
+ mutex_unlock(&tdata->update_lock);
}
- return NOTIFY_OK;
+ return 0;
}
-
-static struct notifier_block coretemp_cpu_notifier __refdata = {
- .notifier_call = coretemp_cpu_callback,
-};
-
static const struct x86_cpu_id __initconst coretemp_ids[] = {
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
{}
};
MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
+static enum cpuhp_state coretemp_hp_online;
+
static int __init coretemp_init(void)
{
- int i, err;
+ int err;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -805,54 +726,38 @@ static int __init coretemp_init(void)
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
+ max_packages = topology_max_packages();
+ pkg_devices = kzalloc(max_packages * sizeof(struct platform_device *),
+ GFP_KERNEL);
+ if (!pkg_devices)
+ return -ENOMEM;
+
err = platform_driver_register(&coretemp_driver);
if (err)
- goto exit;
+ return err;
- cpu_notifier_register_begin();
- for_each_online_cpu(i)
- get_core_online(i);
-
-#ifndef CONFIG_HOTPLUG_CPU
- if (list_empty(&pdev_list)) {
- cpu_notifier_register_done();
- err = -ENODEV;
- goto exit_driver_unreg;
- }
-#endif
-
- __register_hotcpu_notifier(&coretemp_cpu_notifier);
- cpu_notifier_register_done();
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
+ coretemp_cpu_online, coretemp_cpu_offline);
+ if (err < 0)
+ goto outdrv;
+ coretemp_hp_online = err;
return 0;
-#ifndef CONFIG_HOTPLUG_CPU
-exit_driver_unreg:
+outdrv:
platform_driver_unregister(&coretemp_driver);
-#endif
-exit:
+ kfree(pkg_devices);
return err;
}
+module_init(coretemp_init)
static void __exit coretemp_exit(void)
{
- struct pdev_entry *p, *n;
-
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&coretemp_cpu_notifier);
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
- mutex_unlock(&pdev_list_mutex);
- cpu_notifier_register_done();
+ cpuhp_remove_state(coretemp_hp_online);
platform_driver_unregister(&coretemp_driver);
+ kfree(pkg_devices);
}
+module_exit(coretemp_exit)
MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
MODULE_DESCRIPTION("Intel Core temperature monitor");
MODULE_LICENSE("GPL");
-
-module_init(coretemp_init)
-module_exit(coretemp_exit)
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index edf550fc4eef..0043a4c02b85 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
if (res)
return res;
- val = (val * 10 / 625) * 8;
+ val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8;
mutex_lock(&data->update_lock);
data->temp[attr->index] = val;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 24e395c5907d..4b870ee9b0d3 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -251,7 +251,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -63000, 127000), 1000);
mutex_lock(&data->update_lock);
data->temp_min[nr] = val;
@@ -273,7 +273,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
if (result < 0)
return result;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -63000, 127000), 1000);
mutex_lock(&data->update_lock);
data->temp_max[nr] = val;
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index f37fe2011640..4aee5adf9ef2 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -215,12 +215,13 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
if (err < 0)
return err;
- val = DIV_ROUND_CLOSEST(val * 0xC0, nominal_mv[nr]);
+ val = clamp_val(val, 0, 255 * nominal_mv[nr] / 192);
+ val = DIV_ROUND_CLOSEST(val * 192, nominal_mv[nr]);
reg = (sf == min) ? EMC6W201_REG_IN_LOW(nr)
: EMC6W201_REG_IN_HIGH(nr);
mutex_lock(&data->update_lock);
- data->in[sf][nr] = clamp_val(val, 0, 255);
+ data->in[sf][nr] = val;
err = emc6w201_write8(client, reg, data->in[sf][nr]);
mutex_unlock(&data->update_lock);
@@ -252,12 +253,13 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
if (err < 0)
return err;
+ val = clamp_val(val, -127000, 127000);
val = DIV_ROUND_CLOSEST(val, 1000);
reg = (sf == min) ? EMC6W201_REG_TEMP_LOW(nr)
: EMC6W201_REG_TEMP_HIGH(nr);
mutex_lock(&data->update_lock);
- data->temp[sf][nr] = clamp_val(val, -127, 127);
+ data->temp[sf][nr] = val;
err = emc6w201_write8(client, reg, data->temp[sf][nr]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index b96a2a9e4df7..628be9c95ff9 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -193,14 +193,17 @@ static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p,
* Convert fan RPM value from sysfs into count value for fan controller
* register (FAN_SET_CNT).
*/
-static inline unsigned char cnt_from_rpm(u32 rpm, u32 clk_freq, u16 p,
+static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p,
u8 clk_div, u8 gear_mult)
{
- if (!rpm) /* to stop the fan, set cnt to 255 */
+ unsigned long f1 = clk_freq * 30 * gear_mult;
+ unsigned long f2 = p * clk_div;
+
+ if (!rpm) /* to stop the fan, set cnt to 255 */
return 0xff;
- return clamp_val(((clk_freq * 30 * gear_mult) / (rpm * p * clk_div)),
- 0, 255);
+ rpm = clamp_val(rpm, f1 / (255 * f2), ULONG_MAX / f2);
+ return DIV_ROUND_CLOSEST(f1, rpm * f2);
}
/* helper to grab and cache data, at most one time per second */
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index adae6848ffb2..3932f9276c07 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -38,12 +38,15 @@ struct hwmon_device {
#define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
+#define MAX_SYSFS_ATTR_NAME_LENGTH 32
+
struct hwmon_device_attribute {
struct device_attribute dev_attr;
const struct hwmon_ops *ops;
enum hwmon_sensor_types type;
u32 attr;
int index;
+ char name[MAX_SYSFS_ATTR_NAME_LENGTH];
};
#define to_hwmon_attr(d) \
@@ -178,6 +181,22 @@ static ssize_t hwmon_attr_show(struct device *dev,
return sprintf(buf, "%ld\n", val);
}
+static ssize_t hwmon_attr_show_string(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ char *s;
+ int ret;
+
+ ret = hattr->ops->read_string(dev, hattr->type, hattr->attr,
+ hattr->index, &s);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%s\n", s);
+}
+
static ssize_t hwmon_attr_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -205,6 +224,17 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
return 1;
}
+static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
+{
+ return (type == hwmon_temp && attr == hwmon_temp_label) ||
+ (type == hwmon_in && attr == hwmon_in_label) ||
+ (type == hwmon_curr && attr == hwmon_curr_label) ||
+ (type == hwmon_power && attr == hwmon_power_label) ||
+ (type == hwmon_energy && attr == hwmon_energy_label) ||
+ (type == hwmon_humidity && attr == hwmon_humidity_label) ||
+ (type == hwmon_fan && attr == hwmon_fan_label);
+}
+
static struct attribute *hwmon_genattr(struct device *dev,
const void *drvdata,
enum hwmon_sensor_types type,
@@ -218,6 +248,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
struct attribute *a;
umode_t mode;
char *name;
+ bool is_string = is_string_attr(type, attr);
/* The attribute is invisible if there is no template string */
if (!template)
@@ -227,32 +258,31 @@ static struct attribute *hwmon_genattr(struct device *dev,
if (!mode)
return ERR_PTR(-ENOENT);
- if ((mode & S_IRUGO) && !ops->read)
+ if ((mode & S_IRUGO) && ((is_string && !ops->read_string) ||
+ (!is_string && !ops->read)))
return ERR_PTR(-EINVAL);
if ((mode & S_IWUGO) && !ops->write)
return ERR_PTR(-EINVAL);
+ hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+ if (!hattr)
+ return ERR_PTR(-ENOMEM);
+
if (type == hwmon_chip) {
name = (char *)template;
} else {
- name = devm_kzalloc(dev, strlen(template) + 16, GFP_KERNEL);
- if (!name)
- return ERR_PTR(-ENOMEM);
- scnprintf(name, strlen(template) + 16, template,
+ scnprintf(hattr->name, sizeof(hattr->name), template,
index + hwmon_attr_base(type));
+ name = hattr->name;
}
- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
- if (!hattr)
- return ERR_PTR(-ENOMEM);
-
hattr->type = type;
hattr->attr = attr;
hattr->index = index;
hattr->ops = ops;
dattr = &hattr->dev_attr;
- dattr->show = hwmon_attr_show;
+ dattr->show = is_string ? hwmon_attr_show_string : hwmon_attr_show;
dattr->store = hwmon_attr_store;
a = &dattr->attr;
@@ -263,7 +293,11 @@ static struct attribute *hwmon_genattr(struct device *dev,
return a;
}
-static const char * const hwmon_chip_attr_templates[] = {
+/*
+ * Chip attributes are not attribute templates but actual sysfs attributes.
+ * See hwmon_genattr() for special handling.
+ */
+static const char * const hwmon_chip_attrs[] = {
[hwmon_chip_temp_reset_history] = "temp_reset_history",
[hwmon_chip_in_reset_history] = "in_reset_history",
[hwmon_chip_curr_reset_history] = "curr_reset_history",
@@ -400,7 +434,7 @@ static const char * const hwmon_pwm_attr_templates[] = {
};
static const char * const *__templates[] = {
- [hwmon_chip] = hwmon_chip_attr_templates,
+ [hwmon_chip] = hwmon_chip_attrs,
[hwmon_temp] = hwmon_temp_attr_templates,
[hwmon_in] = hwmon_in_attr_templates,
[hwmon_curr] = hwmon_curr_attr_templates,
@@ -412,7 +446,7 @@ static const char * const *__templates[] = {
};
static const int __templates_size[] = {
- [hwmon_chip] = ARRAY_SIZE(hwmon_chip_attr_templates),
+ [hwmon_chip] = ARRAY_SIZE(hwmon_chip_attrs),
[hwmon_temp] = ARRAY_SIZE(hwmon_temp_attr_templates),
[hwmon_in] = ARRAY_SIZE(hwmon_in_attr_templates),
[hwmon_curr] = ARRAY_SIZE(hwmon_curr_attr_templates),
@@ -526,9 +560,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hdev = &hwdev->dev;
- if (chip && chip->ops->is_visible) {
+ if (chip) {
struct attribute **attrs;
- int ngroups = 2;
+ int ngroups = 2; /* terminating NULL plus &hwdev->groups */
if (groups)
for (i = 0; groups[i]; i++)
@@ -536,8 +570,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
GFP_KERNEL);
- if (!hwdev->groups)
- return ERR_PTR(-ENOMEM);
+ if (!hwdev->groups) {
+ err = -ENOMEM;
+ goto free_hwmon;
+ }
attrs = __hwmon_create_attrs(dev, drvdata, chip);
if (IS_ERR(attrs)) {
@@ -570,7 +606,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (err)
goto free_hwmon;
- if (chip && chip->ops->is_visible && chip->ops->read &&
+ if (chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
const struct hwmon_channel_info **info = chip->info;
@@ -624,8 +660,8 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
* @dev: the parent device
* @name: hwmon name attribute
* @drvdata: driver data to attach to created device
- * @info: Pointer to hwmon chip information
- * @groups - pointer to list of driver specific attribute groups
+ * @info: pointer to hwmon chip information
+ * @extra_groups: pointer to list of additional non-standard attribute groups
*
* hwmon_device_unregister() must be called when the device is no
* longer needed.
@@ -636,12 +672,12 @@ struct device *
hwmon_device_register_with_info(struct device *dev, const char *name,
void *drvdata,
const struct hwmon_chip_info *chip,
- const struct attribute_group **groups)
+ const struct attribute_group **extra_groups)
{
- if (chip && (!chip->ops || !chip->info))
+ if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
return ERR_PTR(-EINVAL);
- return __hwmon_device_register(dev, name, drvdata, chip, groups);
+ return __hwmon_device_register(dev, name, drvdata, chip, extra_groups);
}
EXPORT_SYMBOL_GPL(hwmon_device_register_with_info);
@@ -656,6 +692,9 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_info);
*/
struct device *hwmon_device_register(struct device *dev)
{
+ dev_warn(dev,
+ "hwmon_device_register() is deprecated. Please convert the driver to use hwmon_device_register_with_info().\n");
+
return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
}
EXPORT_SYMBOL_GPL(hwmon_device_register);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 6ff773fcaefb..29c8136ce9c5 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -136,7 +136,8 @@ static const int lm85_scaling[] = { /* .001 Volts */
#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from))
#define INS_TO_REG(n, val) \
- clamp_val(SCALE(val, lm85_scaling[n], 192), 0, 255)
+ SCALE(clamp_val(val, 0, 255 * lm85_scaling[n] / 192), \
+ lm85_scaling[n], 192)
#define INSEXT_FROM_REG(n, val, ext) \
SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n])
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index a5e295826aea..13cca3606e06 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -121,7 +121,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
#define IN_FROM_REG(reg, scale) (((reg) * (scale) + 96) / 192)
#define IN_TO_REG(val, scale) ((val) <= 0 ? 0 : \
- (val) * 192 >= (scale) * 255 ? 255 : \
+ (val) >= (scale) * 255 / 192 ? 255 : \
((val) * 192 + (scale) / 2) / (scale))
#define TEMP_FROM_REG(reg) ((reg) * 1000)
@@ -154,7 +154,6 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
*/
struct lm87_data {
- struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -181,6 +180,8 @@ struct lm87_data {
u16 alarms; /* register values, combined */
u8 vid; /* register values, combined */
u8 vrm;
+
+ const struct attribute_group *attr_groups[6];
};
static inline int lm87_read_value(struct i2c_client *client, u8 reg)
@@ -195,7 +196,7 @@ static inline int lm87_write_value(struct i2c_client *client, u8 reg, u8 value)
static struct lm87_data *lm87_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
@@ -309,7 +310,7 @@ static ssize_t show_in_max(struct device *dev,
static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -330,7 +331,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -396,7 +397,7 @@ static ssize_t show_temp_high(struct device *dev,
static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -416,7 +417,7 @@ static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -495,7 +496,7 @@ static ssize_t show_fan_div(struct device *dev,
static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -522,7 +523,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
long val;
@@ -635,7 +636,7 @@ static ssize_t show_aout(struct device *dev, struct device_attribute *attr,
static ssize_t set_aout(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
long val;
int err;
@@ -841,23 +842,18 @@ static int lm87_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static void lm87_remove_files(struct i2c_client *client)
+static void lm87_restore_config(void *arg)
{
- struct device *dev = &client->dev;
-
- sysfs_remove_group(&dev->kobj, &lm87_group);
- sysfs_remove_group(&dev->kobj, &lm87_group_in6);
- sysfs_remove_group(&dev->kobj, &lm87_group_fan1);
- sysfs_remove_group(&dev->kobj, &lm87_group_in7);
- sysfs_remove_group(&dev->kobj, &lm87_group_fan2);
- sysfs_remove_group(&dev->kobj, &lm87_group_temp3);
- sysfs_remove_group(&dev->kobj, &lm87_group_in0_5);
- sysfs_remove_group(&dev->kobj, &lm87_group_vid);
+ struct i2c_client *client = arg;
+ struct lm87_data *data = i2c_get_clientdata(client);
+
+ lm87_write_value(client, LM87_REG_CONFIG, data->config);
}
-static void lm87_init_client(struct i2c_client *client)
+static int lm87_init_client(struct i2c_client *client)
{
struct lm87_data *data = i2c_get_clientdata(client);
+ int rc;
if (dev_get_platdata(&client->dev)) {
data->channel = *(u8 *)dev_get_platdata(&client->dev);
@@ -868,6 +864,10 @@ static void lm87_init_client(struct i2c_client *client)
}
data->config = lm87_read_value(client, LM87_REG_CONFIG) & 0x6F;
+ rc = devm_add_action(&client->dev, lm87_restore_config, client);
+ if (rc)
+ return rc;
+
if (!(data->config & 0x01)) {
int i;
@@ -895,12 +895,15 @@ static void lm87_init_client(struct i2c_client *client)
if ((data->config & 0x09) != 0x01)
lm87_write_value(client, LM87_REG_CONFIG,
(data->config & 0x77) | 0x01);
+ return 0;
}
static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct lm87_data *data;
+ struct device *hwmon_dev;
int err;
+ unsigned int group_tail = 0;
data = devm_kzalloc(&client->dev, sizeof(struct lm87_data), GFP_KERNEL);
if (!data)
@@ -910,7 +913,9 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
mutex_init(&data->update_lock);
/* Initialize the LM87 chip */
- lm87_init_client(client);
+ err = lm87_init_client(client);
+ if (err)
+ return err;
data->in_scale[0] = 2500;
data->in_scale[1] = 2700;
@@ -921,72 +926,34 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->in_scale[6] = 1875;
data->in_scale[7] = 1875;
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &lm87_group);
- if (err)
- goto exit_stop;
-
- if (data->channel & CHAN_NO_FAN(0)) {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_in6);
- if (err)
- goto exit_remove;
- } else {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_fan1);
- if (err)
- goto exit_remove;
- }
-
- if (data->channel & CHAN_NO_FAN(1)) {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_in7);
- if (err)
- goto exit_remove;
- } else {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_fan2);
- if (err)
- goto exit_remove;
- }
-
- if (data->channel & CHAN_TEMP3) {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_temp3);
- if (err)
- goto exit_remove;
- } else {
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_in0_5);
- if (err)
- goto exit_remove;
- }
+ /*
+ * Construct the list of attributes, the list depends on the
+ * configuration of the chip
+ */
+ data->attr_groups[group_tail++] = &lm87_group;
+ if (data->channel & CHAN_NO_FAN(0))
+ data->attr_groups[group_tail++] = &lm87_group_in6;
+ else
+ data->attr_groups[group_tail++] = &lm87_group_fan1;
+
+ if (data->channel & CHAN_NO_FAN(1))
+ data->attr_groups[group_tail++] = &lm87_group_in7;
+ else
+ data->attr_groups[group_tail++] = &lm87_group_fan2;
+
+ if (data->channel & CHAN_TEMP3)
+ data->attr_groups[group_tail++] = &lm87_group_temp3;
+ else
+ data->attr_groups[group_tail++] = &lm87_group_in0_5;
if (!(data->channel & CHAN_NO_VID)) {
data->vrm = vid_which_vrm();
- err = sysfs_create_group(&client->dev.kobj, &lm87_group_vid);
- if (err)
- goto exit_remove;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
+ data->attr_groups[group_tail++] = &lm87_group_vid;
}
- return 0;
-
-exit_remove:
- lm87_remove_files(client);
-exit_stop:
- lm87_write_value(client, LM87_REG_CONFIG, data->config);
- return err;
-}
-
-static int lm87_remove(struct i2c_client *client)
-{
- struct lm87_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- lm87_remove_files(client);
-
- lm87_write_value(client, LM87_REG_CONFIG, data->config);
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(
+ &client->dev, client->name, client, data->attr_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/*
@@ -1006,7 +973,6 @@ static struct i2c_driver lm87_driver = {
.name = "lm87",
},
.probe = lm87_probe,
- .remove = lm87_remove,
.id_table = lm87_id,
.detect = lm87_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index 972444a14cca..1929734c3b1d 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -4,6 +4,7 @@
* Copyright (C) 2008-2009, 2012 Freescale Semiconductor, Inc.
* Author: Mingkai Hu <Mingkai.hu@freescale.com>
* Reworked by Sven Schuchmann <schuchmann@schleissheimer.de>
+ * DT support added by Clemens Gruber <clemens.gruber@pqgruber.com>
*
* This driver export the value of analog input voltage to sysfs, the
* voltage unit is mV. Through the sysfs interface, lm-sensors tool
@@ -22,11 +23,13 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
-/* Vdd info */
-#define MCP3021_VDD_MAX 5500
-#define MCP3021_VDD_MIN 2700
-#define MCP3021_VDD_REF 3300
+/* Vdd / reference voltage in millivolt */
+#define MCP3021_VDD_REF_MAX 5500
+#define MCP3021_VDD_REF_MIN 2700
+#define MCP3021_VDD_REF_DEFAULT 3300
/* output format */
#define MCP3021_SAR_SHIFT 2
@@ -47,7 +50,7 @@ enum chips {
*/
struct mcp3021_data {
struct device *hwmon_dev;
- u32 vdd; /* device power supply */
+ u32 vdd; /* supply and reference voltage in millivolt */
u16 sar_shift;
u16 sar_mask;
u8 output_res;
@@ -99,13 +102,14 @@ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", in_input);
}
-static DEVICE_ATTR(in0_input, S_IRUGO, show_in_input, NULL);
+static DEVICE_ATTR(in0_input, 0444, show_in_input, NULL);
static int mcp3021_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err;
struct mcp3021_data *data = NULL;
+ struct device_node *np = client->dev.of_node;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -117,6 +121,21 @@ static int mcp3021_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
+ if (np) {
+ if (!of_property_read_u32(np, "reference-voltage-microvolt",
+ &data->vdd))
+ data->vdd /= 1000;
+ else
+ data->vdd = MCP3021_VDD_REF_DEFAULT;
+ } else {
+ u32 *pdata = dev_get_platdata(&client->dev);
+
+ if (pdata)
+ data->vdd = *pdata;
+ else
+ data->vdd = MCP3021_VDD_REF_DEFAULT;
+ }
+
switch (id->driver_data) {
case mcp3021:
data->sar_shift = MCP3021_SAR_SHIFT;
@@ -131,13 +150,8 @@ static int mcp3021_probe(struct i2c_client *client,
break;
}
- if (dev_get_platdata(&client->dev)) {
- data->vdd = *(u32 *)dev_get_platdata(&client->dev);
- if (data->vdd > MCP3021_VDD_MAX || data->vdd < MCP3021_VDD_MIN)
- return -EINVAL;
- } else {
- data->vdd = MCP3021_VDD_REF;
- }
+ if (data->vdd > MCP3021_VDD_REF_MAX || data->vdd < MCP3021_VDD_REF_MIN)
+ return -EINVAL;
err = sysfs_create_file(&client->dev.kobj, &dev_attr_in0_input.attr);
if (err)
@@ -173,9 +187,19 @@ static const struct i2c_device_id mcp3021_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mcp3021_id);
+#ifdef CONFIG_OF
+static const struct of_device_id of_mcp3021_match[] = {
+ { .compatible = "microchip,mcp3021", .data = (void *)mcp3021 },
+ { .compatible = "microchip,mcp3221", .data = (void *)mcp3221 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_mcp3021_match);
+#endif
+
static struct i2c_driver mcp3021_driver = {
.driver = {
.name = "mcp3021",
+ .of_match_table = of_match_ptr(of_mcp3021_match),
},
.probe = mcp3021_probe,
.remove = mcp3021_remove,
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 3ce33d244cc0..12b94b094c0d 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -259,13 +259,15 @@ static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low,
ret = 0;
else if (ret)
ret = DIV_ROUND_CLOSEST(1350000U, ret);
+ else
+ ret = 1350000U;
abort:
mutex_unlock(&data->access_lock);
return ret;
}
static int nct7802_write_fan_min(struct nct7802_data *data, u8 reg_fan_low,
- u8 reg_fan_high, unsigned int limit)
+ u8 reg_fan_high, unsigned long limit)
{
int err;
@@ -326,8 +328,8 @@ static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
int err;
+ voltage = clamp_val(voltage, 0, 0x3ff * nct7802_vmul[nr]);
voltage = DIV_ROUND_CLOSEST(voltage, nct7802_vmul[nr]);
- voltage = clamp_val(voltage, 0, 0x3ff);
mutex_lock(&data->access_lock);
err = regmap_write(data->regmap,
@@ -402,7 +404,7 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
err = regmap_write(data->regmap, nr, val & 0xff);
return err ? : count;
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 3baa4f4a8c5e..4ab5293c7bf0 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -499,15 +499,27 @@ static int adm1275_probe(struct i2c_client *client,
pindex = 2;
tindex = 3;
- info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT;
+ info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+
+ /* Enable VOUT if not enabled (it is disabled by default) */
+ if (!(config & ADM1278_VOUT_EN)) {
+ config |= ADM1278_VOUT_EN;
+ ret = i2c_smbus_write_byte_data(client,
+ ADM1275_PMON_CONFIG,
+ config);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to enable VOUT monitoring\n");
+ return -ENODEV;
+ }
+ }
+
if (config & ADM1278_TEMP1_EN)
info->func[0] |=
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
if (config & ADM1278_VIN_EN)
info->func[0] |= PMBUS_HAVE_VIN;
- if (config & ADM1278_VOUT_EN)
- info->func[0] |=
- PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
case adm1293:
case adm1294:
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 559a3dcd64d8..094f948f99ff 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -251,6 +251,7 @@ static const struct of_device_id scpi_of_match[] = {
{.compatible = "arm,scpi-sensors"},
{},
};
+MODULE_DEVICE_TABLE(of, scpi_of_match);
static struct platform_driver scpi_hwmon_platdrv = {
.driver = {
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 6ac7cda72d4c..15650f247679 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -77,14 +77,15 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
+ val = clamp_val(val, 0, nom_mv[n] * 255 / 192);
+ return SCALE(val, 192, nom_mv[n]);
}
/*
* TEMP: 0.001 degC units (-128C to +127C)
* REG: 1C/bit, two's complement
*/
-static inline s8 TEMP_TO_REG(int val)
+static inline s8 TEMP_TO_REG(long val)
{
return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
}
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
new file mode 100644
index 000000000000..18136e1f95fd
--- /dev/null
+++ b/drivers/hwmon/tc654.c
@@ -0,0 +1,514 @@
+/*
+ * tc654.c - Linux kernel modules for fan speed controller
+ *
+ * Copyright (C) 2016 Allied Telesis Labs NZ
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/util_macros.h>
+
+enum tc654_regs {
+ TC654_REG_RPM1 = 0x00, /* RPM Output 1 */
+ TC654_REG_RPM2 = 0x01, /* RPM Output 2 */
+ TC654_REG_FAN_FAULT1 = 0x02, /* Fan Fault 1 Threshold */
+ TC654_REG_FAN_FAULT2 = 0x03, /* Fan Fault 2 Threshold */
+ TC654_REG_CONFIG = 0x04, /* Configuration */
+ TC654_REG_STATUS = 0x05, /* Status */
+ TC654_REG_DUTY_CYCLE = 0x06, /* Fan Speed Duty Cycle */
+ TC654_REG_MFR_ID = 0x07, /* Manufacturer Identification */
+ TC654_REG_VER_ID = 0x08, /* Version Identification */
+};
+
+/* Macros to easily index the registers */
+#define TC654_REG_RPM(idx) (TC654_REG_RPM1 + (idx))
+#define TC654_REG_FAN_FAULT(idx) (TC654_REG_FAN_FAULT1 + (idx))
+
+/* Config register bits */
+#define TC654_REG_CONFIG_RES BIT(6) /* Resolution Selection */
+#define TC654_REG_CONFIG_DUTYC BIT(5) /* Duty Cycle Control */
+#define TC654_REG_CONFIG_SDM BIT(0) /* Shutdown Mode */
+
+/* Status register bits */
+#define TC654_REG_STATUS_F2F BIT(1) /* Fan 2 Fault */
+#define TC654_REG_STATUS_F1F BIT(0) /* Fan 1 Fault */
+
+/* RPM resolution for RPM Output registers */
+#define TC654_HIGH_RPM_RESOLUTION 25 /* 25 RPM resolution */
+#define TC654_LOW_RPM_RESOLUTION 50 /* 50 RPM resolution */
+
+/* Convert to the fan fault RPM threshold from register value */
+#define TC654_FAN_FAULT_FROM_REG(val) ((val) * 50) /* 50 RPM resolution */
+
+/* Convert to register value from the fan fault RPM threshold */
+#define TC654_FAN_FAULT_TO_REG(val) (((val) / 50) & 0xff)
+
+/* Register data is read (and cached) at most once per second. */
+#define TC654_UPDATE_INTERVAL HZ
+
+struct tc654_data {
+ struct i2c_client *client;
+
+ /* update mutex */
+ struct mutex update_lock;
+
+ /* tc654 register cache */
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ u8 rpm_output[2]; /* The fan RPM data for fans 1 and 2 is then
+ * written to registers RPM1 and RPM2
+ */
+ u8 fan_fault[2]; /* The Fan Fault Threshold Registers are used to
+ * set the fan fault threshold levels for fan 1
+ * and fan 2
+ */
+ u8 config; /* The Configuration Register is an 8-bit read/
+ * writable multi-function control register
+ * 7: Fan Fault Clear
+ * 1 = Clear Fan Fault
+ * 0 = Normal Operation (default)
+ * 6: Resolution Selection for RPM Output Registers
+ * RPM Output Registers (RPM1 and RPM2) will be
+ * set for
+ * 1 = 25 RPM (9-bit) resolution
+ * 0 = 50 RPM (8-bit) resolution (default)
+ * 5: Duty Cycle Control Method
+ * The V OUT duty cycle will be controlled via
+ * 1 = the SMBus interface.
+ * 0 = via the V IN analog input pin. (default)
+ * 4,3: Fan 2 Pulses Per Rotation
+ * 00 = 1
+ * 01 = 2 (default)
+ * 10 = 4
+ * 11 = 8
+ * 2,1: Fan 1 Pulses Per Rotation
+ * 00 = 1
+ * 01 = 2 (default)
+ * 10 = 4
+ * 11 = 8
+ * 0: Shutdown Mode
+ * 1 = Shutdown mode.
+ * 0 = Normal operation. (default)
+ */
+ u8 status; /* The Status register provides all the information
+ * about what is going on within the TC654/TC655
+ * devices.
+ * 7,6: Unimplemented, Read as '0'
+ * 5: Over-Temperature Fault Condition
+ * 1 = Over-Temperature condition has occurred
+ * 0 = Normal operation. V IN is less than 2.6V
+ * 4: RPM2 Counter Overflow
+ * 1 = Fault condition
+ * 0 = Normal operation
+ * 3: RPM1 Counter Overflow
+ * 1 = Fault condition
+ * 0 = Normal operation
+ * 2: V IN Input Status
+ * 1 = V IN is open
+ * 0 = Normal operation. voltage present at V IN
+ * 1: Fan 2 Fault
+ * 1 = Fault condition
+ * 0 = Normal operation
+ * 0: Fan 1 Fault
+ * 1 = Fault condition
+ * 0 = Normal operation
+ */
+ u8 duty_cycle; /* The DUTY_CYCLE register is a 4-bit read/
+ * writable register used to control the duty
+ * cycle of the V OUT output.
+ */
+};
+
+/* helper to grab and cache data, at most one time per second */
+static struct tc654_data *tc654_update_client(struct device *dev)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret = 0;
+
+ mutex_lock(&data->update_lock);
+ if (time_before(jiffies, data->last_updated + TC654_UPDATE_INTERVAL) &&
+ likely(data->valid))
+ goto out;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_RPM(0));
+ if (ret < 0)
+ goto out;
+ data->rpm_output[0] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_RPM(1));
+ if (ret < 0)
+ goto out;
+ data->rpm_output[1] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_FAN_FAULT(0));
+ if (ret < 0)
+ goto out;
+ data->fan_fault[0] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_FAN_FAULT(1));
+ if (ret < 0)
+ goto out;
+ data->fan_fault[1] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_CONFIG);
+ if (ret < 0)
+ goto out;
+ data->config = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_STATUS);
+ if (ret < 0)
+ goto out;
+ data->status = ret;
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_DUTY_CYCLE);
+ if (ret < 0)
+ goto out;
+ data->duty_cycle = ret & 0x0f;
+
+ data->last_updated = jiffies;
+ data->valid = true;
+out:
+ mutex_unlock(&data->update_lock);
+
+ if (ret < 0) /* upon error, encode it in return value */
+ data = ERR_PTR(ret);
+
+ return data;
+}
+
+/*
+ * sysfs attributes
+ */
+
+static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->config & TC654_REG_CONFIG_RES)
+ val = data->rpm_output[nr] * TC654_HIGH_RPM_RESOLUTION;
+ else
+ val = data->rpm_output[nr] * TC654_LOW_RPM_RESOLUTION;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n",
+ TC654_FAN_FAULT_FROM_REG(data->fan_fault[nr]));
+}
+
+static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ val = clamp_val(val, 0, 12750);
+
+ mutex_lock(&data->update_lock);
+
+ data->fan_fault[nr] = TC654_FAN_FAULT_TO_REG(val);
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_FAN_FAULT(nr),
+ data->fan_fault[nr]);
+
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (nr == 0)
+ val = !!(data->status & TC654_REG_STATUS_F1F);
+ else
+ val = !!(data->status & TC654_REG_STATUS_F2F);
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static const u8 TC654_FAN_PULSE_SHIFT[] = { 1, 3 };
+
+static ssize_t show_fan_pulses(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = tc654_update_client(dev);
+ u8 val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = BIT((data->config >> TC654_FAN_PULSE_SHIFT[nr]) & 0x03);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t set_fan_pulses(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(da)->index;
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ u8 config;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ switch (val) {
+ case 1:
+ config = 0;
+ break;
+ case 2:
+ config = 1;
+ break;
+ case 4:
+ config = 2;
+ break;
+ case 8:
+ config = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->update_lock);
+
+ data->config &= ~(0x03 << TC654_FAN_PULSE_SHIFT[nr]);
+ data->config |= (config << TC654_FAN_PULSE_SHIFT[nr]);
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_pwm_mode(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct tc654_data *data = tc654_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", !!(data->config & TC654_REG_CONFIG_DUTYC));
+}
+
+static ssize_t set_pwm_mode(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val)
+ data->config |= TC654_REG_CONFIG_DUTYC;
+ else
+ data->config &= ~TC654_REG_CONFIG_DUTYC;
+
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static const int tc654_pwm_map[16] = { 77, 88, 102, 112, 124, 136, 148, 160,
+ 172, 184, 196, 207, 219, 231, 243, 255};
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct tc654_data *data = tc654_update_client(dev);
+ int pwm;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->config & TC654_REG_CONFIG_SDM)
+ pwm = 0;
+ else
+ pwm = tc654_pwm_map[data->duty_cycle];
+
+ return sprintf(buf, "%d\n", pwm);
+}
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val > 255)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val == 0)
+ data->config |= TC654_REG_CONFIG_SDM;
+ else
+ data->config &= ~TC654_REG_CONFIG_SDM;
+
+ data->duty_cycle = find_closest(val, tc654_pwm_map,
+ ARRAY_SIZE(tc654_pwm_map));
+
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+ if (ret < 0)
+ goto out;
+
+ ret = i2c_smbus_write_byte_data(client, TC654_REG_DUTY_CYCLE,
+ data->duty_cycle);
+
+out:
+ mutex_unlock(&data->update_lock);
+ return ret < 0 ? ret : count;
+}
+
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
+ set_fan_min, 0);
+static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
+ set_fan_min, 1);
+static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
+ set_fan_pulses, 0);
+static SENSOR_DEVICE_ATTR(fan2_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
+ set_fan_pulses, 1);
+static SENSOR_DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO,
+ show_pwm_mode, set_pwm_mode, 0);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm,
+ set_pwm, 0);
+
+/* Driver data */
+static struct attribute *tc654_attrs[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan2_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan1_pulses.dev_attr.attr,
+ &sensor_dev_attr_fan2_pulses.dev_attr.attr,
+ &sensor_dev_attr_pwm1_mode.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(tc654);
+
+/*
+ * device probe and removal
+ */
+
+static int tc654_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tc654_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(struct tc654_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ mutex_init(&data->update_lock);
+
+ ret = i2c_smbus_read_byte_data(client, TC654_REG_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ data->config = ret;
+
+ hwmon_dev =
+ devm_hwmon_device_register_with_groups(dev, client->name, data,
+ tc654_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id tc654_id[] = {
+ {"tc654", 0},
+ {"tc655", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, tc654_id);
+
+static struct i2c_driver tc654_driver = {
+ .driver = {
+ .name = "tc654",
+ },
+ .probe = tc654_probe,
+ .id_table = tc654_id,
+};
+
+module_i2c_driver(tc654_driver);
+
+MODULE_AUTHOR("Allied Telesis Labs");
+MODULE_DESCRIPTION("Microchip TC654/TC655 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
new file mode 100644
index 000000000000..91bb94639286
--- /dev/null
+++ b/drivers/hwmon/tmp108.c
@@ -0,0 +1,469 @@
+/* Texas Instruments TMP108 SMBus temperature sensor driver
+ *
+ * Copyright (C) 2016 John Muir <john@jmuir.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "tmp108"
+
+#define TMP108_REG_TEMP 0x00
+#define TMP108_REG_CONF 0x01
+#define TMP108_REG_TLOW 0x02
+#define TMP108_REG_THIGH 0x03
+
+#define TMP108_TEMP_MIN_MC -50000 /* Minimum millicelcius. */
+#define TMP108_TEMP_MAX_MC 127937 /* Maximum millicelcius. */
+
+/* Configuration register bits.
+ * Note: these bit definitions are byte swapped.
+ */
+#define TMP108_CONF_M0 0x0100 /* Sensor mode. */
+#define TMP108_CONF_M1 0x0200
+#define TMP108_CONF_TM 0x0400 /* Thermostat mode. */
+#define TMP108_CONF_FL 0x0800 /* Watchdog flag - TLOW */
+#define TMP108_CONF_FH 0x1000 /* Watchdog flag - THIGH */
+#define TMP108_CONF_CR0 0x2000 /* Conversion rate. */
+#define TMP108_CONF_CR1 0x4000
+#define TMP108_CONF_ID 0x8000
+#define TMP108_CONF_HYS0 0x0010 /* Hysteresis. */
+#define TMP108_CONF_HYS1 0x0020
+#define TMP108_CONF_POL 0x0080 /* Polarity of alert. */
+
+/* Defaults set by the hardware upon reset. */
+#define TMP108_CONF_DEFAULTS (TMP108_CONF_CR0 | TMP108_CONF_TM |\
+ TMP108_CONF_HYS0 | TMP108_CONF_M1)
+/* These bits are read-only. */
+#define TMP108_CONF_READ_ONLY (TMP108_CONF_FL | TMP108_CONF_FH |\
+ TMP108_CONF_ID)
+
+#define TMP108_CONF_MODE_MASK (TMP108_CONF_M0|TMP108_CONF_M1)
+#define TMP108_MODE_SHUTDOWN 0x0000
+#define TMP108_MODE_ONE_SHOT TMP108_CONF_M0
+#define TMP108_MODE_CONTINUOUS TMP108_CONF_M1 /* Default */
+ /* When M1 is set, M0 is ignored. */
+
+#define TMP108_CONF_CONVRATE_MASK (TMP108_CONF_CR0|TMP108_CONF_CR1)
+#define TMP108_CONVRATE_0P25HZ 0x0000
+#define TMP108_CONVRATE_1HZ TMP108_CONF_CR0 /* Default */
+#define TMP108_CONVRATE_4HZ TMP108_CONF_CR1
+#define TMP108_CONVRATE_16HZ (TMP108_CONF_CR0|TMP108_CONF_CR1)
+
+#define TMP108_CONF_HYSTERESIS_MASK (TMP108_CONF_HYS0|TMP108_CONF_HYS1)
+#define TMP108_HYSTERESIS_0C 0x0000
+#define TMP108_HYSTERESIS_1C TMP108_CONF_HYS0 /* Default */
+#define TMP108_HYSTERESIS_2C TMP108_CONF_HYS1
+#define TMP108_HYSTERESIS_4C (TMP108_CONF_HYS0|TMP108_CONF_HYS1)
+
+#define TMP108_CONVERSION_TIME_MS 30 /* in milli-seconds */
+
+struct tmp108 {
+ struct regmap *regmap;
+ u16 orig_config;
+ unsigned long ready_time;
+};
+
+/* convert 12-bit TMP108 register value to milliCelsius */
+static inline int tmp108_temp_reg_to_mC(s16 val)
+{
+ return (val & ~0x0f) * 1000 / 256;
+}
+
+/* convert milliCelsius to left adjusted 12-bit TMP108 register value */
+static inline u16 tmp108_mC_to_temp_reg(int val)
+{
+ return (val * 256) / 1000;
+}
+
+static int tmp108_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err, hyst;
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval) {
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF,
+ &regval);
+ if (err < 0)
+ return err;
+ switch (regval & TMP108_CONF_CONVRATE_MASK) {
+ case TMP108_CONVRATE_0P25HZ:
+ default:
+ *temp = 4000;
+ break;
+ case TMP108_CONVRATE_1HZ:
+ *temp = 1000;
+ break;
+ case TMP108_CONVRATE_4HZ:
+ *temp = 250;
+ break;
+ case TMP108_CONVRATE_16HZ:
+ *temp = 63;
+ break;
+ }
+ return 0;
+ }
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ /* Is it too early to return a conversion ? */
+ if (time_before(jiffies, tmp108->ready_time)) {
+ dev_dbg(dev, "%s: Conversion not ready yet..\n",
+ __func__);
+ return -EAGAIN;
+ }
+ err = regmap_read(tmp108->regmap, TMP108_REG_TEMP, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp108_temp_reg_to_mC(regval);
+ break;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ err = regmap_read(tmp108->regmap, attr == hwmon_temp_min ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp108_temp_reg_to_mC(regval);
+ break;
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &regval);
+ if (err < 0)
+ return err;
+ *temp = !!(regval & (attr == hwmon_temp_min_alarm ?
+ TMP108_CONF_FL : TMP108_CONF_FH));
+ break;
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &regval);
+ if (err < 0)
+ return err;
+ switch (regval & TMP108_CONF_HYSTERESIS_MASK) {
+ case TMP108_HYSTERESIS_0C:
+ default:
+ hyst = 0;
+ break;
+ case TMP108_HYSTERESIS_1C:
+ hyst = 1000;
+ break;
+ case TMP108_HYSTERESIS_2C:
+ hyst = 2000;
+ break;
+ case TMP108_HYSTERESIS_4C:
+ hyst = 4000;
+ break;
+ }
+ err = regmap_read(tmp108->regmap, attr == hwmon_temp_min_hyst ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp108_temp_reg_to_mC(regval);
+ if (attr == hwmon_temp_min_hyst)
+ *temp += hyst;
+ else
+ *temp -= hyst;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tmp108_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+ u32 regval, mask;
+ int err;
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval) {
+ if (temp < 156)
+ mask = TMP108_CONVRATE_16HZ;
+ else if (temp < 625)
+ mask = TMP108_CONVRATE_4HZ;
+ else if (temp < 2500)
+ mask = TMP108_CONVRATE_1HZ;
+ else
+ mask = TMP108_CONVRATE_0P25HZ;
+ return regmap_update_bits(tmp108->regmap,
+ TMP108_REG_CONF,
+ TMP108_CONF_CONVRATE_MASK,
+ mask);
+ }
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ temp = clamp_val(temp, TMP108_TEMP_MIN_MC, TMP108_TEMP_MAX_MC);
+ return regmap_write(tmp108->regmap,
+ attr == hwmon_temp_min ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH,
+ tmp108_mC_to_temp_reg(temp));
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ temp = clamp_val(temp, TMP108_TEMP_MIN_MC, TMP108_TEMP_MAX_MC);
+ err = regmap_read(tmp108->regmap,
+ attr == hwmon_temp_min_hyst ?
+ TMP108_REG_TLOW : TMP108_REG_THIGH,
+ &regval);
+ if (err < 0)
+ return err;
+ if (attr == hwmon_temp_min_hyst)
+ temp -= tmp108_temp_reg_to_mC(regval);
+ else
+ temp = tmp108_temp_reg_to_mC(regval) - temp;
+ if (temp < 500)
+ mask = TMP108_HYSTERESIS_0C;
+ else if (temp < 1500)
+ mask = TMP108_HYSTERESIS_1C;
+ else if (temp < 3000)
+ mask = TMP108_HYSTERESIS_2C;
+ else
+ mask = TMP108_HYSTERESIS_4C;
+ return regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+ TMP108_CONF_HYSTERESIS_MASK, mask);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t tmp108_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_chip && attr == hwmon_chip_update_interval)
+ return 0644;
+
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_min_hyst:
+ case hwmon_temp_max_hyst:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static u32 tmp108_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info tmp108_chip = {
+ .type = hwmon_chip,
+ .config = tmp108_chip_config,
+};
+
+static u32 tmp108_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MIN_HYST
+ | HWMON_T_MAX_HYST | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+ 0
+};
+
+static const struct hwmon_channel_info tmp108_temp = {
+ .type = hwmon_temp,
+ .config = tmp108_temp_config,
+};
+
+static const struct hwmon_channel_info *tmp108_info[] = {
+ &tmp108_chip,
+ &tmp108_temp,
+ NULL
+};
+
+static const struct hwmon_ops tmp108_hwmon_ops = {
+ .is_visible = tmp108_is_visible,
+ .read = tmp108_read,
+ .write = tmp108_write,
+};
+
+static const struct hwmon_chip_info tmp108_chip_info = {
+ .ops = &tmp108_hwmon_ops,
+ .info = tmp108_info,
+};
+
+static void tmp108_restore_config(void *data)
+{
+ struct tmp108 *tmp108 = data;
+
+ regmap_write(tmp108->regmap, TMP108_REG_CONF, tmp108->orig_config);
+}
+
+static bool tmp108_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != TMP108_REG_TEMP;
+}
+
+static bool tmp108_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* Configuration register must be volatile to enable FL and FH. */
+ return reg == TMP108_REG_TEMP || reg == TMP108_REG_CONF;
+}
+
+static const struct regmap_config tmp108_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP108_REG_THIGH,
+ .writeable_reg = tmp108_is_writeable_reg,
+ .volatile_reg = tmp108_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
+static int tmp108_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct tmp108 *tmp108;
+ int err;
+ u32 config;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(dev,
+ "adapter doesn't support SMBus word transactions\n");
+ return -ENODEV;
+ }
+
+ tmp108 = devm_kzalloc(dev, sizeof(*tmp108), GFP_KERNEL);
+ if (!tmp108)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, tmp108);
+
+ tmp108->regmap = devm_regmap_init_i2c(client, &tmp108_regmap_config);
+ if (IS_ERR(tmp108->regmap)) {
+ err = PTR_ERR(tmp108->regmap);
+ dev_err(dev, "regmap init failed: %d", err);
+ return err;
+ }
+
+ err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &config);
+ if (err < 0) {
+ dev_err(dev, "error reading config register: %d", err);
+ return err;
+ }
+ tmp108->orig_config = config;
+
+ /* Only continuous mode is supported. */
+ config &= ~TMP108_CONF_MODE_MASK;
+ config |= TMP108_MODE_CONTINUOUS;
+
+ /* Only comparator mode is supported. */
+ config &= ~TMP108_CONF_TM;
+
+ err = regmap_write(tmp108->regmap, TMP108_REG_CONF, config);
+ if (err < 0) {
+ dev_err(dev, "error writing config register: %d", err);
+ return err;
+ }
+
+ tmp108->ready_time = jiffies;
+ if ((tmp108->orig_config & TMP108_CONF_MODE_MASK) ==
+ TMP108_MODE_SHUTDOWN)
+ tmp108->ready_time +=
+ msecs_to_jiffies(TMP108_CONVERSION_TIME_MS);
+
+ err = devm_add_action_or_reset(dev, tmp108_restore_config, tmp108);
+ if (err) {
+ dev_err(dev, "add action or reset failed: %d", err);
+ return err;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ tmp108,
+ &tmp108_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static int __maybe_unused tmp108_suspend(struct device *dev)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+ TMP108_CONF_MODE_MASK, TMP108_MODE_SHUTDOWN);
+}
+
+static int __maybe_unused tmp108_resume(struct device *dev)
+{
+ struct tmp108 *tmp108 = dev_get_drvdata(dev);
+ int err;
+
+ err = regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+ TMP108_CONF_MODE_MASK, TMP108_MODE_CONTINUOUS);
+ tmp108->ready_time = jiffies +
+ msecs_to_jiffies(TMP108_CONVERSION_TIME_MS);
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
+
+static const struct i2c_device_id tmp108_i2c_ids[] = {
+ { "tmp108", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp108_i2c_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id tmp108_of_ids[] = {
+ { .compatible = "ti,tmp108", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tmp108_of_ids);
+#endif
+
+static struct i2c_driver tmp108_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &tmp108_dev_pm_ops,
+ .of_match_table = of_match_ptr(tmp108_of_ids),
+ },
+ .probe = tmp108_probe,
+ .id_table = tmp108_i2c_ids,
+};
+
+module_i2c_driver(tmp108_driver);
+
+MODULE_AUTHOR("John Muir <john@jmuir.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP108 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index ac91c07e3f90..d1f209a5feac 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -220,7 +220,7 @@ struct pdev_entry {
static LIST_HEAD(pdev_list);
static DEFINE_MUTEX(pdev_list_mutex);
-static int via_cputemp_device_add(unsigned int cpu)
+static int via_cputemp_online(unsigned int cpu)
{
int err;
struct platform_device *pdev;
@@ -261,7 +261,7 @@ exit:
return err;
}
-static void via_cputemp_device_remove(unsigned int cpu)
+static int via_cputemp_down_prep(unsigned int cpu)
{
struct pdev_entry *p;
@@ -272,33 +272,13 @@ static void via_cputemp_device_remove(unsigned int cpu)
list_del(&p->list);
mutex_unlock(&pdev_list_mutex);
kfree(p);
- return;
+ return 0;
}
}
mutex_unlock(&pdev_list_mutex);
+ return 0;
}
-static int via_cputemp_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long) hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- via_cputemp_device_add(cpu);
- break;
- case CPU_DOWN_PREPARE:
- via_cputemp_device_remove(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
- .notifier_call = via_cputemp_cpu_callback,
-};
-
static const struct x86_cpu_id __initconst cputemp_ids[] = {
{ X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */
{ X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */
@@ -307,9 +287,11 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, cputemp_ids);
+static enum cpuhp_state via_temp_online;
+
static int __init via_cputemp_init(void)
{
- int i, err;
+ int err;
if (!x86_match_cpu(cputemp_ids))
return -ENODEV;
@@ -318,58 +300,33 @@ static int __init via_cputemp_init(void)
if (err)
goto exit;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- struct cpuinfo_x86 *c = &cpu_data(i);
-
- if (c->x86 != 6)
- continue;
-
- if (c->x86_model < 0x0a)
- continue;
-
- if (c->x86_model > 0x0f) {
- pr_warn("Unknown CPU model 0x%x\n", c->x86_model);
- continue;
- }
-
- via_cputemp_device_add(i);
- }
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/via:online",
+ via_cputemp_online, via_cputemp_down_prep);
+ if (err < 0)
+ goto exit_driver_unreg;
+ via_temp_online = err;
#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
- cpu_notifier_register_done();
err = -ENODEV;
- goto exit_driver_unreg;
+ goto exit_hp_unreg;
}
#endif
-
- __register_hotcpu_notifier(&via_cputemp_cpu_notifier);
- cpu_notifier_register_done();
return 0;
#ifndef CONFIG_HOTPLUG_CPU
+exit_hp_unreg:
+ cpuhp_remove_state_nocalls(via_temp_online);
+#endif
exit_driver_unreg:
platform_driver_unregister(&via_cputemp_driver);
-#endif
exit:
return err;
}
static void __exit via_cputemp_exit(void)
{
- struct pdev_entry *p, *n;
-
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
- mutex_unlock(&pdev_list_mutex);
- cpu_notifier_register_done();
+ cpuhp_remove_state(via_temp_online);
platform_driver_unregister(&via_cputemp_driver);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 2cd7c718198a..17741969026e 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -202,6 +202,21 @@ static void *etm_setup_aux(int event_cpu, void **pages,
if (!event_data)
return NULL;
+ /*
+ * In theory nothing prevent tracers in a trace session from being
+ * associated with different sinks, nor having a sink per tracer. But
+ * until we have HW with this kind of topology we need to assume tracers
+ * in a trace session are using the same sink. Therefore go through
+ * the coresight bus and pick the first enabled sink.
+ *
+ * When operated from sysFS users are responsible to enable the sink
+ * while from perf, the perf tools will do it based on the choice made
+ * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
+ */
+ sink = coresight_get_enabled_sink(true);
+ if (!sink)
+ goto err;
+
INIT_WORK(&event_data->work, free_event_data);
mask = &event_data->mask;
@@ -219,25 +234,11 @@ static void *etm_setup_aux(int event_cpu, void **pages,
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
*/
- event_data->path[cpu] = coresight_build_path(csdev);
+ event_data->path[cpu] = coresight_build_path(csdev, sink);
if (IS_ERR(event_data->path[cpu]))
goto err;
}
- /*
- * In theory nothing prevent tracers in a trace session from being
- * associated with different sinks, nor having a sink per tracer. But
- * until we have HW with this kind of topology and a way to convey
- * sink assignement from the perf cmd line we need to assume tracers
- * in a trace session are using the same sink. Therefore pick the sink
- * found at the end of the first available path.
- */
- cpu = cpumask_first(mask);
- /* Grab the sink at the end of the path */
- sink = coresight_get_sink(event_data->path[cpu]);
- if (!sink)
- goto err;
-
if (!sink_ops(sink)->alloc_buffer)
goto err;
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 4a18ee499965..ad063d7444e1 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -89,11 +89,13 @@
/* ETMCR - 0x00 */
#define ETMCR_PWD_DWN BIT(0)
#define ETMCR_STALL_MODE BIT(7)
+#define ETMCR_BRANCH_BROADCAST BIT(8)
#define ETMCR_ETM_PRG BIT(10)
#define ETMCR_ETM_EN BIT(11)
#define ETMCR_CYC_ACC BIT(12)
#define ETMCR_CTXID_SIZE (BIT(14)|BIT(15))
#define ETMCR_TIMESTAMP_EN BIT(28)
+#define ETMCR_RETURN_STACK BIT(29)
/* ETMCCR - 0x04 */
#define ETMCCR_FIFOFULL BIT(23)
/* ETMPDCR - 0x310 */
@@ -110,8 +112,11 @@
#define ETM_MODE_STALL BIT(2)
#define ETM_MODE_TIMESTAMP BIT(3)
#define ETM_MODE_CTXID BIT(4)
+#define ETM_MODE_BBROAD BIT(5)
+#define ETM_MODE_RET_STACK BIT(6)
#define ETM_MODE_ALL (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+ ETM_MODE_BBROAD | ETM_MODE_RET_STACK | \
ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
ETM_MODE_EXCL_USER)
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index e9b071953f80..ca98ad13bb8c 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -146,7 +146,7 @@ static ssize_t mode_store(struct device *dev,
goto err_unlock;
}
config->ctrl |= ETMCR_STALL_MODE;
- } else
+ } else
config->ctrl &= ~ETMCR_STALL_MODE;
if (config->mode & ETM_MODE_TIMESTAMP) {
@@ -164,6 +164,16 @@ static ssize_t mode_store(struct device *dev,
else
config->ctrl &= ~ETMCR_CTXID_SIZE;
+ if (config->mode & ETM_MODE_BBROAD)
+ config->ctrl |= ETMCR_BRANCH_BROADCAST;
+ else
+ config->ctrl &= ~ETMCR_BRANCH_BROADCAST;
+
+ if (config->mode & ETM_MODE_RET_STACK)
+ config->ctrl |= ETMCR_RETURN_STACK;
+ else
+ config->ctrl &= ~ETMCR_RETURN_STACK;
+
if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
etm_config_trace_mode(config);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 196a14be4b3d..ef9d8e93e3b2 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -111,7 +111,9 @@ static inline void CS_UNLOCK(void __iomem *addr)
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode);
struct coresight_device *coresight_get_sink(struct list_head *path);
-struct list_head *coresight_build_path(struct coresight_device *csdev);
+struct coresight_device *coresight_get_enabled_sink(bool reset);
+struct list_head *coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 49e0f1b925a5..944c17b48d23 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -419,10 +419,10 @@ static ssize_t stm_generic_packet(struct stm_data *stm_data,
struct stm_drvdata, stm);
if (!(drvdata && local_read(&drvdata->mode)))
- return 0;
+ return -EACCES;
if (channel >= drvdata->numsp)
- return 0;
+ return -EINVAL;
ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
@@ -920,6 +920,11 @@ static struct amba_id stm_ids[] = {
.mask = 0x0003ffff,
.data = "STM32",
},
+ {
+ .id = 0x0003b963,
+ .mask = 0x0003ffff,
+ .data = "STM500",
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d6941ea24d8d..1549436e2492 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -70,7 +70,7 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etb_dump_hw(drvdata);
tmc_disable_hw(drvdata);
@@ -103,19 +103,14 @@ static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
bool used = false;
char *buf = NULL;
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_SYSFS))
- return -EINVAL;
-
/*
* If we don't have a buffer release the lock and allocate memory.
* Otherwise keep the lock and move along.
@@ -138,13 +133,12 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
@@ -163,6 +157,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
drvdata->buf = buf;
}
+ drvdata->mode = CS_MODE_SYSFS;
tmc_etb_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -177,34 +172,29 @@ out:
return ret;
}
-static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
{
int ret = 0;
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_PERF))
- return -EINVAL;
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EINVAL;
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In Perf mode there can be only one writer per sink. There
* is also no need to continue if the ETB/ETR is already operated
* from sysFS.
*/
- if (val != CS_MODE_DISABLED) {
+ if (drvdata->mode != CS_MODE_DISABLED) {
ret = -EINVAL;
goto out;
}
+ drvdata->mode = CS_MODE_PERF;
tmc_etb_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -216,9 +206,9 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
{
switch (mode) {
case CS_MODE_SYSFS:
- return tmc_enable_etf_sink_sysfs(csdev, mode);
+ return tmc_enable_etf_sink_sysfs(csdev);
case CS_MODE_PERF:
- return tmc_enable_etf_sink_perf(csdev, mode);
+ return tmc_enable_etf_sink_perf(csdev);
}
/* We shouldn't be here */
@@ -227,7 +217,6 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
static void tmc_disable_etf_sink(struct coresight_device *csdev)
{
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -237,10 +226,11 @@ static void tmc_disable_etf_sink(struct coresight_device *csdev)
return;
}
- val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
/* Disable the TMC only if it needs to */
- if (val != CS_MODE_DISABLED)
+ if (drvdata->mode != CS_MODE_DISABLED) {
tmc_etb_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -260,7 +250,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
}
tmc_etf_enable_hw(drvdata);
- local_set(&drvdata->mode, CS_MODE_SYSFS);
+ drvdata->mode = CS_MODE_SYSFS;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_info(drvdata->dev, "TMC-ETF enabled\n");
@@ -280,7 +270,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
}
tmc_etf_disable_hw(drvdata);
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_info(drvdata->dev, "TMC disabled\n");
@@ -383,7 +373,7 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
return;
/* This shouldn't happen */
- if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
+ if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
return;
CS_UNLOCK(drvdata->base);
@@ -504,7 +494,6 @@ const struct coresight_ops tmc_etf_cs_ops = {
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
{
- long val;
enum tmc_mode mode;
int ret = 0;
unsigned long flags;
@@ -528,9 +517,8 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
goto out;
}
- val = local_read(&drvdata->mode);
/* Don't interfere if operated from Perf */
- if (val == CS_MODE_PERF) {
+ if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
goto out;
}
@@ -542,7 +530,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etb_disable_hw(drvdata);
drvdata->reading = true;
@@ -573,7 +561,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
}
/* Re-enable the TMC if need be */
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
* buffer. As such zero-out the buffer so that we don't end
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 886ea83c68e0..5d312699b3b9 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -86,26 +86,22 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etr_dump_hw(drvdata);
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
}
-static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
bool used = false;
- long val;
unsigned long flags;
void __iomem *vaddr = NULL;
dma_addr_t paddr;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_SYSFS))
- return -EINVAL;
/*
* If we don't have a buffer release the lock and allocate memory.
@@ -134,13 +130,12 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
@@ -155,8 +150,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
drvdata->buf = drvdata->vaddr;
}
- memset(drvdata->vaddr, 0, drvdata->size);
-
+ drvdata->mode = CS_MODE_SYSFS;
tmc_etr_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -171,34 +165,29 @@ out:
return ret;
}
-static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
{
int ret = 0;
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- /* This shouldn't be happening */
- if (WARN_ON(mode != CS_MODE_PERF))
- return -EINVAL;
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EINVAL;
goto out;
}
- val = local_xchg(&drvdata->mode, mode);
/*
* In Perf mode there can be only one writer per sink. There
* is also no need to continue if the ETR is already operated
* from sysFS.
*/
- if (val != CS_MODE_DISABLED) {
+ if (drvdata->mode != CS_MODE_DISABLED) {
ret = -EINVAL;
goto out;
}
+ drvdata->mode = CS_MODE_PERF;
tmc_etr_enable_hw(drvdata);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -210,9 +199,9 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
{
switch (mode) {
case CS_MODE_SYSFS:
- return tmc_enable_etr_sink_sysfs(csdev, mode);
+ return tmc_enable_etr_sink_sysfs(csdev);
case CS_MODE_PERF:
- return tmc_enable_etr_sink_perf(csdev, mode);
+ return tmc_enable_etr_sink_perf(csdev);
}
/* We shouldn't be here */
@@ -221,7 +210,6 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
static void tmc_disable_etr_sink(struct coresight_device *csdev)
{
- long val;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -231,10 +219,11 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
return;
}
- val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
/* Disable the TMC only if it needs to */
- if (val != CS_MODE_DISABLED)
+ if (drvdata->mode != CS_MODE_DISABLED) {
tmc_etr_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -253,7 +242,6 @@ const struct coresight_ops tmc_etr_cs_ops = {
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
{
int ret = 0;
- long val;
unsigned long flags;
/* config types are set a boot time and never change */
@@ -266,9 +254,8 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
- val = local_read(&drvdata->mode);
/* Don't interfere if operated from Perf */
- if (val == CS_MODE_PERF) {
+ if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
goto out;
}
@@ -280,7 +267,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
tmc_etr_disable_hw(drvdata);
drvdata->reading = true;
@@ -303,7 +290,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* RE-enable the TMC if need be */
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
* buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 44b3ae346118..51c01851533e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -117,7 +117,7 @@ struct tmc_drvdata {
void __iomem *vaddr;
u32 size;
u32 len;
- local_t mode;
+ u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 7bf00a0beb6f..0c37356e417c 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -368,6 +368,52 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
+static int coresight_enabled_sink(struct device *dev, void *data)
+{
+ bool *reset = data;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+ csdev->activated) {
+ /*
+ * Now that we have a handle on the sink for this session,
+ * disable the sysFS "enable_sink" flag so that possible
+ * concurrent perf session that wish to use another sink don't
+ * trip on it. Doing so has no ramification for the current
+ * session.
+ */
+ if (*reset)
+ csdev->activated = false;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * coresight_get_enabled_sink - returns the first enabled sink found on the bus
+ * @deactivate: Whether the 'enable_sink' flag should be reset
+ *
+ * When operated from perf the deactivate parameter should be set to 'true'.
+ * That way the "enabled_sink" flag of the sink that was selected can be reset,
+ * allowing for other concurrent perf sessions to choose a different sink.
+ *
+ * When operated from sysFS users have full control and as such the deactivate
+ * parameter should be set to 'false', hence mandating users to explicitly
+ * clear the flag.
+ */
+struct coresight_device *coresight_get_enabled_sink(bool deactivate)
+{
+ struct device *dev = NULL;
+
+ dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
+ coresight_enabled_sink);
+
+ return dev ? to_coresight_device(dev) : NULL;
+}
+
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
@@ -380,6 +426,7 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
* last one.
*/
static int _coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *sink,
struct list_head *path)
{
int i;
@@ -387,15 +434,15 @@ static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_node *node;
/* An activated sink has been found. Enqueue the element */
- if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+ if (csdev == sink)
goto out;
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->nr_outport; i++) {
struct coresight_device *child_dev = csdev->conns[i].child_dev;
- if (child_dev && _coresight_build_path(child_dev, path) == 0) {
+ if (child_dev &&
+ _coresight_build_path(child_dev, sink, path) == 0) {
found = true;
break;
}
@@ -422,18 +469,22 @@ out:
return 0;
}
-struct list_head *coresight_build_path(struct coresight_device *csdev)
+struct list_head *coresight_build_path(struct coresight_device *source,
+ struct coresight_device *sink)
{
struct list_head *path;
int rc;
+ if (!sink)
+ return ERR_PTR(-EINVAL);
+
path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!path)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(path);
- rc = _coresight_build_path(csdev, path);
+ rc = _coresight_build_path(source, sink, path);
if (rc) {
kfree(path);
return ERR_PTR(rc);
@@ -497,6 +548,7 @@ static int coresight_validate_source(struct coresight_device *csdev,
int coresight_enable(struct coresight_device *csdev)
{
int cpu, ret = 0;
+ struct coresight_device *sink;
struct list_head *path;
mutex_lock(&coresight_mutex);
@@ -508,7 +560,17 @@ int coresight_enable(struct coresight_device *csdev)
if (csdev->enable)
goto out;
- path = coresight_build_path(csdev);
+ /*
+ * Search for a valid sink for this session but don't reset the
+ * "enable_sink" flag in sysFS. Users get to do that explicitly.
+ */
+ sink = coresight_get_enabled_sink(false);
+ if (!sink) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ path = coresight_build_path(csdev, sink);
if (IS_ERR(path)) {
pr_err("building path(s) failed\n");
ret = PTR_ERR(path);
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 6f0a51a2c6ec..cdd9b3b26195 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -29,6 +29,9 @@
#include "intel_th.h"
#include "debug.h"
+static bool host_mode __read_mostly;
+module_param(host_mode, bool, 0444);
+
static DEFINE_IDA(intel_th_ida);
static int intel_th_match(struct device *dev, struct device_driver *driver)
@@ -380,7 +383,7 @@ static void intel_th_device_free(struct intel_th_device *thdev)
/*
* Intel(R) Trace Hub subdevices
*/
-static struct intel_th_subdevice {
+static const struct intel_th_subdevice {
const char *name;
struct resource res[3];
unsigned nres;
@@ -527,14 +530,19 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
{
struct resource res[3];
unsigned int req = 0;
- int i, err;
+ int src, dst, err;
/* create devices for each intel_th_subdevice */
- for (i = 0; i < ARRAY_SIZE(intel_th_subdevices); i++) {
- struct intel_th_subdevice *subdev = &intel_th_subdevices[i];
+ for (src = 0, dst = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+ const struct intel_th_subdevice *subdev =
+ &intel_th_subdevices[src];
struct intel_th_device *thdev;
int r;
+ /* only allow SOURCE and SWITCH devices in host mode */
+ if (host_mode && subdev->type == INTEL_TH_OUTPUT)
+ continue;
+
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
subdev->id);
if (!thdev) {
@@ -577,10 +585,12 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
}
if (subdev->type == INTEL_TH_OUTPUT) {
- thdev->dev.devt = MKDEV(th->major, i);
+ thdev->dev.devt = MKDEV(th->major, dst);
thdev->output.type = subdev->otype;
thdev->output.port = -1;
thdev->output.scratchpad = subdev->scrpd;
+ } else if (subdev->type == INTEL_TH_SWITCH) {
+ thdev->host_mode = host_mode;
}
err = device_add(&thdev->dev);
@@ -597,14 +607,14 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
req++;
}
- th->thdev[i] = thdev;
+ th->thdev[dst++] = thdev;
}
return 0;
kill_subdevs:
- for (i-- ; i >= 0; i--)
- intel_th_device_remove(th->thdev[i]);
+ for (; dst >= 0; dst--)
+ intel_th_device_remove(th->thdev[dst]);
return err;
}
@@ -717,7 +727,7 @@ void intel_th_free(struct intel_th *th)
intel_th_request_hub_module_flush(th);
for (i = 0; i < TH_SUBDEVICE_MAX; i++)
- if (th->thdev[i] != th->hub)
+ if (th->thdev[i] && th->thdev[i] != th->hub)
intel_th_device_remove(th->thdev[i]);
intel_th_device_remove(th->hub);
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 33e09369a491..dd32d0bad687 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -564,6 +564,9 @@ static int intel_th_gth_assign(struct intel_th_device *thdev,
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int i, id;
+ if (thdev->host_mode)
+ return -EBUSY;
+
if (othdev->type != INTEL_TH_OUTPUT)
return -EINVAL;
@@ -600,6 +603,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int port = othdev->output.port;
+ if (thdev->host_mode)
+ return;
+
spin_lock(&gth->gth_lock);
othdev->output.port = -1;
othdev->output.active = false;
@@ -654,9 +660,24 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
gth->base = base;
spin_lock_init(&gth->gth_lock);
+ /*
+ * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
+ * bit. Either way, don't reset HW in this case, and don't export any
+ * capture configuration attributes. Also, refuse to assign output
+ * drivers to ports, see intel_th_gth_assign().
+ */
+ if (thdev->host_mode)
+ goto done;
+
ret = intel_th_gth_reset(gth);
- if (ret)
- return ret;
+ if (ret) {
+ if (ret != -EBUSY)
+ return ret;
+
+ thdev->host_mode = true;
+
+ goto done;
+ }
for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
gth->master[i] = -1;
@@ -677,6 +698,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
return -ENOMEM;
}
+done:
dev_set_drvdata(dev, gth);
return 0;
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 4c195786bf1f..3096e7054f6d 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -54,6 +54,7 @@ struct intel_th_output {
* @num_resources: number of resources in @resource array
* @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH}
* @id: device instance or -1
+ * @host_mode: Intel TH is controlled by an external debug host
* @output: output descriptor for INTEL_TH_OUTPUT devices
* @name: device name to match the driver
*/
@@ -64,6 +65,9 @@ struct intel_th_device {
unsigned int type;
int id;
+ /* INTEL_TH_SWITCH specific */
+ bool host_mode;
+
/* INTEL_TH_OUTPUT specific */
struct intel_th_output output;
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 51f81d64ca37..a6ea387b5b00 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -361,7 +361,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
struct stm_file *stmf;
struct device *dev;
unsigned int major = imajor(inode);
- int err = -ENODEV;
+ int err = -ENOMEM;
dev = class_find_device(&stm_class, NULL, &major, major_match);
if (!dev)
@@ -369,8 +369,9 @@ static int stm_char_open(struct inode *inode, struct file *file)
stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
if (!stmf)
- return -ENOMEM;
+ goto err_put_device;
+ err = -ENODEV;
stm_output_init(&stmf->output);
stmf->stm = to_stm_device(dev);
@@ -382,9 +383,10 @@ static int stm_char_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
err_free:
+ kfree(stmf);
+err_put_device:
/* matches class_find_device() above */
put_device(dev);
- kfree(stmf);
return err;
}
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index d223650a97e4..11edabf425ae 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -59,7 +59,6 @@ config I2C_CHARDEV
config I2C_MUX
tristate "I2C bus multiplexing support"
- depends on HAS_IOMEM
help
Say Y here if you want the I2C core to support the ability to
handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 11e866d05368..b403fa5ecf49 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -91,9 +91,7 @@
DW_IC_INTR_TX_ABRT | \
DW_IC_INTR_STOP_DET)
-#define DW_IC_STATUS_ACTIVITY 0x1
-#define DW_IC_STATUS_TFE BIT(2)
-#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
+#define DW_IC_STATUS_ACTIVITY 0x1
#define DW_IC_SDA_HOLD_RX_SHIFT 16
#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
@@ -478,25 +476,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
u32 ic_tar = 0;
- bool enabled;
- enabled = dw_readl(dev, DW_IC_ENABLE_STATUS) & 1;
-
- if (enabled) {
- u32 ic_status;
-
- /*
- * Only disable adapter if ic_tar and ic_con can't be
- * dynamically updated
- */
- ic_status = dw_readl(dev, DW_IC_STATUS);
- if (!dev->dynamic_tar_update_enabled ||
- (ic_status & DW_IC_STATUS_MST_ACTIVITY) ||
- !(ic_status & DW_IC_STATUS_TFE)) {
- __i2c_dw_enable_and_wait(dev, false);
- enabled = false;
- }
- }
+ /* Disable the adapter */
+ __i2c_dw_enable_and_wait(dev, false);
/* if the slave address is ten bit address, enable 10BITADDR */
if (dev->dynamic_tar_update_enabled) {
@@ -526,8 +508,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
/* enforce disabled interrupts (due to HW issues) */
i2c_dw_disable_int(dev);
- if (!enabled)
- __i2c_dw_enable(dev, true);
+ /* Enable the adapter */
+ __i2c_dw_enable(dev, true);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
@@ -611,7 +593,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
/* avoid rx buffer overrun */
- if (rx_limit - dev->rx_outstanding <= 0)
+ if (dev->rx_outstanding >= dev->rx_fifo_depth)
break;
dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
@@ -708,8 +690,7 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
}
/*
- * Prepare controller for a transaction and start transfer by calling
- * i2c_dw_xfer_init()
+ * Prepare controller for a transaction and call i2c_dw_xfer_msg
*/
static int
i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
@@ -752,13 +733,23 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
goto done;
}
+ /*
+ * We must disable the adapter before returning and signaling the end
+ * of the current transfer. Otherwise the hardware might continue
+ * generating interrupts which in turn causes a race condition with
+ * the following transfer. Needs some more investigation if the
+ * additional interrupts are a hardware bug or this driver doesn't
+ * handle them correctly yet.
+ */
+ __i2c_dw_enable(dev, false);
+
if (dev->msg_err) {
ret = dev->msg_err;
goto done;
}
/* no error */
- if (likely(!dev->cmd_err)) {
+ if (likely(!dev->cmd_err && !dev->status)) {
ret = num;
goto done;
}
@@ -768,6 +759,11 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
ret = i2c_dw_handle_tx_abort(dev);
goto done;
}
+
+ if (dev->status)
+ dev_err(dev->dev,
+ "transfer terminated early - interrupt latency too high?\n");
+
ret = -EIO;
done:
@@ -888,19 +884,9 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
*/
tx_aborted:
- if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
- || dev->msg_err) {
- /*
- * We must disable interruts before returning and signaling
- * the end of the current transfer. Otherwise the hardware
- * might continue generating interrupts for non-existent
- * transfers.
- */
- i2c_dw_disable_int(dev);
- dw_readl(dev, DW_IC_CLR_INTR);
-
+ if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
complete(&dev->cmd_complete);
- } else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
+ else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
/* workaround to trigger pending interrupt */
stat = dw_readl(dev, DW_IC_INTR_MASK);
i2c_dw_disable_int(dev);
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 49f2084f7bb5..50813a24c541 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -347,7 +347,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
- clk_unprepare(i2c->clk);
+ clk_disable_unprepare(i2c->clk);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 419b54bfc7c7..5e63b17f935d 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -381,9 +381,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
- data[i] = octeon_i2c_data_read(i2c, &result);
- if (result)
- return result;
+ data[i] = octeon_i2c_data_read(i2c);
if (recv_len && i == 0) {
if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
return -EPROTO;
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 1db7c835a454..87151ea74acd 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -5,7 +5,6 @@
#include <linux/i2c.h>
#include <linux/i2c-smbus.h>
#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -145,9 +144,9 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
u64 tmp;
__raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
-
- readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, tmp & SW_TWSI_V,
- I2C_OCTEON_EVENT_WAIT, i2c->adap.timeout);
+ do {
+ tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ } while ((tmp & SW_TWSI_V) != 0);
}
#define octeon_i2c_ctl_write(i2c, val) \
@@ -164,28 +163,24 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
*
* The I2C core registers are accessed indirectly via the SW_TWSI CSR.
*/
-static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
- int *error)
+static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
{
u64 tmp;
- int ret;
__raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
+ do {
+ tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ } while ((tmp & SW_TWSI_V) != 0);
- ret = readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp,
- tmp & SW_TWSI_V, I2C_OCTEON_EVENT_WAIT,
- i2c->adap.timeout);
- if (error)
- *error = ret;
return tmp & 0xFF;
}
#define octeon_i2c_ctl_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL)
-#define octeon_i2c_data_read(i2c, error) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
+#define octeon_i2c_data_read(i2c) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
#define octeon_i2c_stat_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
/**
* octeon_i2c_read_int - read the TWSI_INT register
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 1704fc84d647..b432b64e307a 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2179,6 +2179,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -2189,7 +2190,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
pr_debug("driver [%s] registered\n", driver->driver.name);
- INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
i2c_for_each_dev(driver, __process_new_driver);
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index e280c8ecc0b5..96de9ce5669b 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
config I2C_MUX_REG
tristate "Register-based I2C multiplexer"
+ depends on HAS_IOMEM
help
If you say yes to this option, support will be included for a
register based I2C multiplexer. This driver provides access to
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index b3893f6282ba..3e6fe1760d82 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -69,10 +69,28 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
goto err_with_revert;
}
- p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
+ /*
+ * Check if there are pinctrl states at all. Note: we cant' use
+ * devm_pinctrl_get_select() because we need to distinguish between
+ * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
+ */
+ p = devm_pinctrl_get(adap->dev.parent);
if (IS_ERR(p)) {
ret = PTR_ERR(p);
- goto err_with_put;
+ /* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */
+ if (ret != -ENODEV)
+ goto err_with_put;
+ } else {
+ /* there are states. check and use them */
+ struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name);
+
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto err_with_put;
+ }
+ ret = pinctrl_select_state(p, s);
+ if (ret < 0)
+ goto err_with_put;
}
priv->chan[new_chan].parent_adap = adap;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 1091346f2480..8bc3d36d2837 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -268,9 +268,9 @@ static int pca954x_probe(struct i2c_client *client,
/* discard unconfigured channels */
break;
idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
- data->deselect |= (idle_disconnect_pd
- || idle_disconnect_dt) << num;
}
+ data->deselect |= (idle_disconnect_pd ||
+ idle_disconnect_dt) << num;
ret = i2c_mux_add_adapter(muxc, force, num, class);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 05352f490d60..f90ea221f7f2 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -211,7 +211,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
sense_rq->cmd[4] = cmd_len;
sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
- sense_rq->cmd_flags |= REQ_PREEMPT;
+ sense_rq->rq_flags |= RQF_PREEMPT;
if (drive->media == ide_tape)
sense_rq->cmd[13] = REQ_IDETAPE_PC1;
@@ -295,7 +295,7 @@ int ide_cd_expiry(ide_drive_t *drive)
wait = ATAPI_WAIT_PC;
break;
default:
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
printk(KERN_INFO PFX "cmd 0x%x timed out\n",
rq->cmd[0]);
wait = 0;
@@ -375,7 +375,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
}
if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
return 1;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index bf9a2ad296ed..9cbd217bc0c9 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -98,7 +98,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
struct request_sense *sense = &drive->sense_data;
int log = 0;
- if (!sense || !rq || (rq->cmd_flags & REQ_QUIET))
+ if (!sense || !rq || (rq->rq_flags & RQF_QUIET))
return 0;
ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key);
@@ -291,7 +291,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* (probably while trying to recover from a former error).
* Just give up.
*/
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
return 2;
}
@@ -311,7 +311,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
cdrom_saw_media_change(drive);
if (rq->cmd_type == REQ_TYPE_FS &&
- !(rq->cmd_flags & REQ_QUIET))
+ !(rq->rq_flags & RQF_QUIET))
printk(KERN_ERR PFX "%s: tray open\n",
drive->name);
}
@@ -346,7 +346,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* No point in retrying after an illegal request or data
* protect error.
*/
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
ide_dump_status(drive, "command error", stat);
do_end_request = 1;
break;
@@ -355,14 +355,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
* No point in re-trying a zillion times on a bad sector.
* If we got here the error is not correctable.
*/
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
ide_dump_status(drive, "media error "
"(bad sector)", stat);
do_end_request = 1;
break;
case BLANK_CHECK:
/* disk appears blank? */
- if (!(rq->cmd_flags & REQ_QUIET))
+ if (!(rq->rq_flags & RQF_QUIET))
ide_dump_status(drive, "media error (blank)",
stat);
do_end_request = 1;
@@ -380,7 +380,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
}
if (rq->cmd_type != REQ_TYPE_FS) {
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
do_end_request = 1;
}
@@ -422,19 +422,19 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
int write, void *buffer, unsigned *bufflen,
struct request_sense *sense, int timeout,
- unsigned int cmd_flags)
+ req_flags_t rq_flags)
{
struct cdrom_info *info = drive->driver_data;
struct request_sense local_sense;
int retries = 10;
- unsigned int flags = 0;
+ req_flags_t flags = 0;
if (!sense)
sense = &local_sense;
ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
- "cmd_flags: 0x%x",
- cmd[0], write, timeout, cmd_flags);
+ "rq_flags: 0x%x",
+ cmd[0], write, timeout, rq_flags);
/* start of retry loop */
do {
@@ -446,7 +446,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
memcpy(rq->cmd, cmd, BLK_MAX_CDB);
rq->cmd_type = REQ_TYPE_ATA_PC;
rq->sense = sense;
- rq->cmd_flags |= cmd_flags;
+ rq->rq_flags |= rq_flags;
rq->timeout = timeout;
if (buffer) {
error = blk_rq_map_kern(drive->queue, rq, buffer,
@@ -462,14 +462,14 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
if (buffer)
*bufflen = rq->resid_len;
- flags = rq->cmd_flags;
+ flags = rq->rq_flags;
blk_put_request(rq);
/*
* FIXME: we should probably abort/retry or something in case of
* failure.
*/
- if (flags & REQ_FAILED) {
+ if (flags & RQF_FAILED) {
/*
* The request failed. Retry if it was due to a unit
* attention status (usually means media was changed).
@@ -494,10 +494,10 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
}
/* end of retry loop */
- } while ((flags & REQ_FAILED) && retries >= 0);
+ } while ((flags & RQF_FAILED) && retries >= 0);
/* return an error if the command failed */
- return (flags & REQ_FAILED) ? -EIO : 0;
+ return (flags & RQF_FAILED) ? -EIO : 0;
}
/*
@@ -589,7 +589,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
"(%u bytes)\n", drive->name, __func__,
cmd->nleft);
if (!write)
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
uptodate = 0;
}
} else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
@@ -607,7 +607,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
if (!uptodate)
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
}
goto out_end;
}
@@ -745,9 +745,9 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
rq->cmd[0], rq->cmd_type);
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
else
- rq->cmd_flags &= ~REQ_FAILED;
+ rq->rq_flags &= ~RQF_FAILED;
drive->dma = 0;
@@ -867,7 +867,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
*/
cmd[7] = cdi->sanyo_slot % 3;
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET);
}
static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -890,7 +890,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
cmd[0] = GPCMD_READ_CDVD_CAPACITY;
stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0,
- REQ_QUIET);
+ RQF_QUIET);
if (stat)
return stat;
@@ -943,7 +943,7 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
if (msf_flag)
cmd[1] = 2;
- return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, REQ_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET);
}
/* Try to read the entire TOC for the disk into our internal buffer. */
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1efc936f5b66..eea60c986c4f 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -101,7 +101,7 @@ void ide_cd_log_error(const char *, struct request *, struct request_sense *);
/* ide-cd.c functions used by ide-cd_ioctl.c */
int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
- unsigned *, struct request_sense *, int, unsigned int);
+ unsigned *, struct request_sense *, int, req_flags_t);
int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
void ide_cdrom_update_speed(ide_drive_t *, u8 *);
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 5887a7a09e37..f085e3a2e1d6 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -305,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV;
- rq->cmd_flags = REQ_QUIET;
+ rq->rq_flags = RQF_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
blk_put_request(rq);
/*
@@ -449,7 +449,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
ide_drive_t *drive = cdi->handle;
- unsigned int flags = 0;
+ req_flags_t flags = 0;
unsigned len = cgc->buflen;
if (cgc->timeout <= 0)
@@ -463,7 +463,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
memset(cgc->sense, 0, sizeof(struct request_sense));
if (cgc->quiet)
- flags |= REQ_QUIET;
+ flags |= RQF_QUIET;
cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
cgc->data_direction == CGC_DATA_WRITE,
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 669ea1e45795..6360bbd37efe 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
ide_startstop_t startstop;
- BUG_ON(!(rq->cmd_flags & REQ_STARTED));
+ BUG_ON(!(rq->rq_flags & RQF_STARTED));
#ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n",
@@ -316,7 +316,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
/* bail early if we've exceeded max_failures */
if (drive->max_failures && (drive->failures > drive->max_failures)) {
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
goto kill_rq;
}
@@ -539,7 +539,7 @@ repeat:
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
- (rq->cmd_flags & REQ_PREEMPT) == 0) {
+ (rq->rq_flags & RQF_PREEMPT) == 0) {
/* there should be no pending command at this point */
ide_unlock_port(hwif);
goto plug_device;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index e34af488693a..a015acdffb39 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -53,7 +53,7 @@ static int ide_pm_execute_rq(struct request *rq)
spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dying(q))) {
- rq->cmd_flags |= REQ_QUIET;
+ rq->rq_flags |= RQF_QUIET;
rq->errors = -ENXIO;
__blk_end_request_all(rq, rq->errors);
spin_unlock_irq(q->queue_lock);
@@ -90,7 +90,7 @@ int generic_ide_resume(struct device *dev)
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
- rq->cmd_flags |= REQ_PREEMPT;
+ rq->rq_flags |= RQF_PREEMPT;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index 4732dfc15447..55bcf803841e 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -8,20 +8,3 @@ config INTEL_IDLE
native Intel hardware idle features. The acpi_idle driver
can be configured at the same time, in order to handle
processors intel_idle does not support.
-
-menu "Memory power savings"
-depends on X86_64
-
-config I7300_IDLE_IOAT_CHANNEL
- bool
-
-config I7300_IDLE
- tristate "Intel chipset idle memory power saving driver"
- select I7300_IDLE_IOAT_CHANNEL
- help
- Enable memory power savings when idle with certain Intel server
- chipsets. The chipset must have I/O AT support, such as the
- Intel 7300. The power savings depends on the type and quantity of
- DRAM devices.
-
-endmenu
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 23d295cf10f2..0007111d73e9 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,3 +1,2 @@
-obj-$(CONFIG_I7300_IDLE) += i7300_idle.o
obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
deleted file mode 100644
index ffeebc7e9f1c..000000000000
--- a/drivers/idle/i7300_idle.c
+++ /dev/null
@@ -1,612 +0,0 @@
-/*
- * (C) Copyright 2008 Intel Corporation
- * Authors:
- * Andy Henroid <andrew.d.henroid@intel.com>
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- */
-
-/*
- * Save DIMM power on Intel 7300-based platforms when all CPUs/cores
- * are idle, using the DIMM thermal throttling capability.
- *
- * This driver depends on the Intel integrated DMA controller (I/O AT).
- * If the driver for I/O AT (drivers/dma/ioatdma*) is also enabled,
- * this driver should work cooperatively.
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/sched.h>
-#include <linux/notifier.h>
-#include <linux/cpumask.h>
-#include <linux/ktime.h>
-#include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/stop_machine.h>
-#include <linux/i7300_idle.h>
-
-#include <asm/idle.h>
-
-#include "../dma/ioat/hw.h"
-#include "../dma/ioat/registers.h"
-
-#define I7300_IDLE_DRIVER_VERSION "1.55"
-#define I7300_PRINT "i7300_idle:"
-
-#define MAX_STOP_RETRIES 10
-
-static int debug;
-module_param_named(debug, debug, uint, 0644);
-MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
-
-static int forceload;
-module_param_named(forceload, forceload, uint, 0644);
-MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000");
-
-#define dprintk(fmt, arg...) \
- do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
-
-/*
- * Value to set THRTLOW to when initiating throttling
- * 0 = No throttling
- * 1 = Throttle when > 4 activations per eval window (Maximum throttling)
- * 2 = Throttle when > 8 activations
- * 168 = Throttle when > 672 activations (Minimum throttling)
- */
-#define MAX_THROTTLE_LOW_LIMIT 168
-static uint throttle_low_limit = 1;
-module_param_named(throttle_low_limit, throttle_low_limit, uint, 0644);
-MODULE_PARM_DESC(throttle_low_limit,
- "Value for THRTLOWLM activation field "
- "(0 = disable throttle, 1 = Max throttle, 168 = Min throttle)");
-
-/*
- * simple invocation and duration statistics
- */
-static unsigned long total_starts;
-static unsigned long total_us;
-
-#ifdef DEBUG
-static unsigned long past_skip;
-#endif
-
-static struct pci_dev *fbd_dev;
-
-static raw_spinlock_t i7300_idle_lock;
-static int i7300_idle_active;
-
-static u8 i7300_idle_thrtctl_saved;
-static u8 i7300_idle_thrtlow_saved;
-static u32 i7300_idle_mc_saved;
-
-static cpumask_var_t idle_cpumask;
-static ktime_t start_ktime;
-static unsigned long avg_idle_us;
-
-static struct dentry *debugfs_dir;
-
-/* Begin: I/O AT Helper routines */
-
-#define IOAT_CHANBASE(ioat_ctl, chan) (ioat_ctl + 0x80 + 0x80 * chan)
-/* Snoop control (disable snoops when coherency is not important) */
-#define IOAT_DESC_SADDR_SNP_CTL (1UL << 1)
-#define IOAT_DESC_DADDR_SNP_CTL (1UL << 2)
-
-static struct pci_dev *ioat_dev;
-static struct ioat_dma_descriptor *ioat_desc; /* I/O AT desc & data (1 page) */
-static unsigned long ioat_desc_phys;
-static u8 *ioat_iomap; /* I/O AT memory-mapped control regs (aka CB_BAR) */
-static u8 *ioat_chanbase;
-
-/* Start I/O AT memory copy */
-static int i7300_idle_ioat_start(void)
-{
- u32 err;
- /* Clear error (due to circular descriptor pointer) */
- err = readl(ioat_chanbase + IOAT_CHANERR_OFFSET);
- if (err)
- writel(err, ioat_chanbase + IOAT_CHANERR_OFFSET);
-
- writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
- return 0;
-}
-
-/* Stop I/O AT memory copy */
-static void i7300_idle_ioat_stop(void)
-{
- int i;
- u64 sts;
-
- for (i = 0; i < MAX_STOP_RETRIES; i++) {
- writeb(IOAT_CHANCMD_RESET,
- ioat_chanbase + IOAT1_CHANCMD_OFFSET);
-
- udelay(10);
-
- sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- if (sts != IOAT_CHANSTS_ACTIVE)
- break;
-
- }
-
- if (i == MAX_STOP_RETRIES) {
- dprintk("failed to stop I/O AT after %d retries\n",
- MAX_STOP_RETRIES);
- }
-}
-
-/* Test I/O AT by copying 1024 byte from 2k to 1k */
-static int __init i7300_idle_ioat_selftest(u8 *ctl,
- struct ioat_dma_descriptor *desc, unsigned long desc_phys)
-{
- u64 chan_sts;
-
- memset(desc, 0, 2048);
- memset((u8 *) desc + 2048, 0xab, 1024);
-
- desc[0].size = 1024;
- desc[0].ctl = 0;
- desc[0].src_addr = desc_phys + 2048;
- desc[0].dst_addr = desc_phys + 1024;
- desc[0].next = 0;
-
- writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
- writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
-
- udelay(1000);
-
- chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- if (chan_sts != IOAT_CHANSTS_DONE) {
- /* Not complete, reset the channel */
- writeb(IOAT_CHANCMD_RESET,
- ioat_chanbase + IOAT1_CHANCMD_OFFSET);
- return -1;
- }
-
- if (*(u32 *) ((u8 *) desc + 3068) != 0xabababab ||
- *(u32 *) ((u8 *) desc + 2044) != 0xabababab) {
- dprintk("Data values src 0x%x, dest 0x%x, memset 0x%x\n",
- *(u32 *) ((u8 *) desc + 2048),
- *(u32 *) ((u8 *) desc + 1024),
- *(u32 *) ((u8 *) desc + 3072));
- return -1;
- }
- return 0;
-}
-
-static struct device dummy_dma_dev = {
- .init_name = "fallback device",
- .coherent_dma_mask = DMA_BIT_MASK(64),
- .dma_mask = &dummy_dma_dev.coherent_dma_mask,
-};
-
-/* Setup and initialize I/O AT */
-/* This driver needs I/O AT as the throttling takes effect only when there is
- * some memory activity. We use I/O AT to set up a dummy copy, while all CPUs
- * go idle and memory is throttled.
- */
-static int __init i7300_idle_ioat_init(void)
-{
- u8 ver, chan_count, ioat_chan;
- u16 chan_ctl;
-
- ioat_iomap = (u8 *) ioremap_nocache(pci_resource_start(ioat_dev, 0),
- pci_resource_len(ioat_dev, 0));
-
- if (!ioat_iomap) {
- printk(KERN_ERR I7300_PRINT "failed to map I/O AT registers\n");
- goto err_ret;
- }
-
- ver = readb(ioat_iomap + IOAT_VER_OFFSET);
- if (ver != IOAT_VER_1_2) {
- printk(KERN_ERR I7300_PRINT "unknown I/O AT version (%u.%u)\n",
- ver >> 4, ver & 0xf);
- goto err_unmap;
- }
-
- chan_count = readb(ioat_iomap + IOAT_CHANCNT_OFFSET);
- if (!chan_count) {
- printk(KERN_ERR I7300_PRINT "unexpected # of I/O AT channels "
- "(%u)\n",
- chan_count);
- goto err_unmap;
- }
-
- ioat_chan = chan_count - 1;
- ioat_chanbase = IOAT_CHANBASE(ioat_iomap, ioat_chan);
-
- chan_ctl = readw(ioat_chanbase + IOAT_CHANCTRL_OFFSET);
- if (chan_ctl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
- printk(KERN_ERR I7300_PRINT "channel %d in use\n", ioat_chan);
- goto err_unmap;
- }
-
- writew(IOAT_CHANCTRL_CHANNEL_IN_USE,
- ioat_chanbase + IOAT_CHANCTRL_OFFSET);
-
- ioat_desc = (struct ioat_dma_descriptor *)dma_alloc_coherent(
- &dummy_dma_dev, 4096,
- (dma_addr_t *)&ioat_desc_phys, GFP_KERNEL);
- if (!ioat_desc) {
- printk(KERN_ERR I7300_PRINT "failed to allocate I/O AT desc\n");
- goto err_mark_unused;
- }
-
- writel(ioat_desc_phys & 0xffffffffUL,
- ioat_chanbase + IOAT1_CHAINADDR_OFFSET_LOW);
- writel(ioat_desc_phys >> 32,
- ioat_chanbase + IOAT1_CHAINADDR_OFFSET_HIGH);
-
- if (i7300_idle_ioat_selftest(ioat_iomap, ioat_desc, ioat_desc_phys)) {
- printk(KERN_ERR I7300_PRINT "I/O AT self-test failed\n");
- goto err_free;
- }
-
- /* Setup circular I/O AT descriptor chain */
- ioat_desc[0].ctl = IOAT_DESC_SADDR_SNP_CTL | IOAT_DESC_DADDR_SNP_CTL;
- ioat_desc[0].src_addr = ioat_desc_phys + 2048;
- ioat_desc[0].dst_addr = ioat_desc_phys + 3072;
- ioat_desc[0].size = 128;
- ioat_desc[0].next = ioat_desc_phys + sizeof(struct ioat_dma_descriptor);
-
- ioat_desc[1].ctl = ioat_desc[0].ctl;
- ioat_desc[1].src_addr = ioat_desc[0].src_addr;
- ioat_desc[1].dst_addr = ioat_desc[0].dst_addr;
- ioat_desc[1].size = ioat_desc[0].size;
- ioat_desc[1].next = ioat_desc_phys;
-
- return 0;
-
-err_free:
- dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
-err_mark_unused:
- writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
-err_unmap:
- iounmap(ioat_iomap);
-err_ret:
- return -ENODEV;
-}
-
-/* Cleanup I/O AT */
-static void __exit i7300_idle_ioat_exit(void)
-{
- int i;
- u64 chan_sts;
-
- i7300_idle_ioat_stop();
-
- /* Wait for a while for the channel to halt before releasing */
- for (i = 0; i < MAX_STOP_RETRIES; i++) {
- writeb(IOAT_CHANCMD_RESET,
- ioat_chanbase + IOAT1_CHANCMD_OFFSET);
-
- chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- if (chan_sts != IOAT_CHANSTS_ACTIVE) {
- writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
- break;
- }
- udelay(1000);
- }
-
- chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- /*
- * We tried to reset multiple times. If IO A/T channel is still active
- * flag an error and return without cleanup. Memory leak is better
- * than random corruption in that extreme error situation.
- */
- if (chan_sts == IOAT_CHANSTS_ACTIVE) {
- printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
- " Not freeing resources\n");
- return;
- }
-
- dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
- iounmap(ioat_iomap);
-}
-
-/* End: I/O AT Helper routines */
-
-#define DIMM_THRTLOW 0x64
-#define DIMM_THRTCTL 0x67
-#define DIMM_THRTCTL_THRMHUNT (1UL << 0)
-#define DIMM_MC 0x40
-#define DIMM_GTW_MODE (1UL << 17)
-#define DIMM_GBLACT 0x60
-
-/*
- * Keep track of an exponential-decaying average of recent idle durations.
- * The latest duration gets DURATION_WEIGHT_PCT percentage weight
- * in this average, with the old average getting the remaining weight.
- *
- * High weights emphasize recent history, low weights include long history.
- */
-#define DURATION_WEIGHT_PCT 55
-
-/*
- * When the decaying average of recent durations or the predicted duration
- * of the next timer interrupt is shorter than duration_threshold, the
- * driver will decline to throttle.
- */
-#define DURATION_THRESHOLD_US 100
-
-
-/* Store DIMM thermal throttle configuration */
-static int i7300_idle_thrt_save(void)
-{
- u32 new_mc_val;
- u8 gblactlm;
-
- pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &i7300_idle_thrtctl_saved);
- pci_read_config_byte(fbd_dev, DIMM_THRTLOW, &i7300_idle_thrtlow_saved);
- pci_read_config_dword(fbd_dev, DIMM_MC, &i7300_idle_mc_saved);
- /*
- * Make sure we have Global Throttling Window Mode set to have a
- * "short" window. This (mostly) works around an issue where
- * throttling persists until the end of the global throttling window
- * size. On the tested system, this was resulting in a maximum of
- * 64 ms to exit throttling (average 32 ms). The actual numbers
- * depends on system frequencies. Setting the short window reduces
- * this by a factor of 4096.
- *
- * We will only do this only if the system is set for
- * unlimited-activations while in open-loop throttling (i.e., when
- * Global Activation Throttle Limit is zero).
- */
- pci_read_config_byte(fbd_dev, DIMM_GBLACT, &gblactlm);
- dprintk("thrtctl_saved = 0x%02x, thrtlow_saved = 0x%02x\n",
- i7300_idle_thrtctl_saved,
- i7300_idle_thrtlow_saved);
- dprintk("mc_saved = 0x%08x, gblactlm = 0x%02x\n",
- i7300_idle_mc_saved,
- gblactlm);
- if (gblactlm == 0) {
- new_mc_val = i7300_idle_mc_saved | DIMM_GTW_MODE;
- pci_write_config_dword(fbd_dev, DIMM_MC, new_mc_val);
- return 0;
- } else {
- dprintk("could not set GTW_MODE = 1 (OLTT enabled)\n");
- return -ENODEV;
- }
-}
-
-/* Restore DIMM thermal throttle configuration */
-static void i7300_idle_thrt_restore(void)
-{
- pci_write_config_dword(fbd_dev, DIMM_MC, i7300_idle_mc_saved);
- pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
-}
-
-/* Enable DIMM thermal throttling */
-static void i7300_idle_start(void)
-{
- u8 new_ctl;
- u8 limit;
-
- new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
-
- limit = throttle_low_limit;
- if (unlikely(limit > MAX_THROTTLE_LOW_LIMIT))
- limit = MAX_THROTTLE_LOW_LIMIT;
-
- pci_write_config_byte(fbd_dev, DIMM_THRTLOW, limit);
-
- new_ctl = i7300_idle_thrtctl_saved | DIMM_THRTCTL_THRMHUNT;
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
-}
-
-/* Disable DIMM thermal throttling */
-static void i7300_idle_stop(void)
-{
- u8 new_ctl;
- u8 got_ctl;
-
- new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
-
- pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
- pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &got_ctl);
- WARN_ON_ONCE(got_ctl != i7300_idle_thrtctl_saved);
-}
-
-
-/*
- * i7300_avg_duration_check()
- * return 0 if the decaying average of recent idle durations is
- * more than DURATION_THRESHOLD_US
- */
-static int i7300_avg_duration_check(void)
-{
- if (avg_idle_us >= DURATION_THRESHOLD_US)
- return 0;
-
-#ifdef DEBUG
- past_skip++;
-#endif
- return 1;
-}
-
-/* Idle notifier to look at idle CPUs */
-static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- unsigned long flags;
- ktime_t now_ktime;
- static ktime_t idle_begin_time;
- static int time_init = 1;
-
- if (!throttle_low_limit)
- return 0;
-
- if (unlikely(time_init)) {
- time_init = 0;
- idle_begin_time = ktime_get();
- }
-
- raw_spin_lock_irqsave(&i7300_idle_lock, flags);
- if (val == IDLE_START) {
-
- cpumask_set_cpu(smp_processor_id(), idle_cpumask);
-
- if (cpumask_weight(idle_cpumask) != num_online_cpus())
- goto end;
-
- now_ktime = ktime_get();
- idle_begin_time = now_ktime;
-
- if (i7300_avg_duration_check())
- goto end;
-
- i7300_idle_active = 1;
- total_starts++;
- start_ktime = now_ktime;
-
- i7300_idle_start();
- i7300_idle_ioat_start();
-
- } else if (val == IDLE_END) {
- cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
- if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
- /* First CPU coming out of idle */
- u64 idle_duration_us;
-
- now_ktime = ktime_get();
-
- idle_duration_us = ktime_to_us(ktime_sub
- (now_ktime, idle_begin_time));
-
- avg_idle_us =
- ((100 - DURATION_WEIGHT_PCT) * avg_idle_us +
- DURATION_WEIGHT_PCT * idle_duration_us) / 100;
-
- if (i7300_idle_active) {
- ktime_t idle_ktime;
-
- idle_ktime = ktime_sub(now_ktime, start_ktime);
- total_us += ktime_to_us(idle_ktime);
-
- i7300_idle_ioat_stop();
- i7300_idle_stop();
- i7300_idle_active = 0;
- }
- }
- }
-end:
- raw_spin_unlock_irqrestore(&i7300_idle_lock, flags);
- return 0;
-}
-
-static struct notifier_block i7300_idle_nb = {
- .notifier_call = i7300_idle_notifier,
-};
-
-MODULE_DEVICE_TABLE(pci, pci_tbl);
-
-static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count,
- loff_t *off)
-{
- unsigned long *p = fp->private_data;
- char buf[32];
- int len;
-
- len = snprintf(buf, 32, "%lu\n", *p);
- return simple_read_from_buffer(ubuf, count, off, buf, len);
-}
-
-static const struct file_operations idle_fops = {
- .open = simple_open,
- .read = stats_read_ul,
- .llseek = default_llseek,
-};
-
-struct debugfs_file_info {
- void *ptr;
- char name[32];
- struct dentry *file;
-} debugfs_file_list[] = {
- {&total_starts, "total_starts", NULL},
- {&total_us, "total_us", NULL},
-#ifdef DEBUG
- {&past_skip, "past_skip", NULL},
-#endif
- {NULL, "", NULL}
- };
-
-static int __init i7300_idle_init(void)
-{
- raw_spin_lock_init(&i7300_idle_lock);
- total_us = 0;
-
- if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
- return -ENODEV;
-
- if (i7300_idle_thrt_save())
- return -ENODEV;
-
- if (i7300_idle_ioat_init())
- return -ENODEV;
-
- if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
- return -ENOMEM;
-
- debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
- if (debugfs_dir) {
- int i = 0;
-
- while (debugfs_file_list[i].ptr != NULL) {
- debugfs_file_list[i].file = debugfs_create_file(
- debugfs_file_list[i].name,
- S_IRUSR,
- debugfs_dir,
- debugfs_file_list[i].ptr,
- &idle_fops);
- i++;
- }
- }
-
- idle_notifier_register(&i7300_idle_nb);
-
- printk(KERN_INFO "i7300_idle: loaded v%s\n", I7300_IDLE_DRIVER_VERSION);
- return 0;
-}
-
-static void __exit i7300_idle_exit(void)
-{
- idle_notifier_unregister(&i7300_idle_nb);
- free_cpumask_var(idle_cpumask);
-
- if (debugfs_dir) {
- int i = 0;
-
- while (debugfs_file_list[i].file != NULL) {
- debugfs_remove(debugfs_file_list[i].file);
- i++;
- }
-
- debugfs_remove(debugfs_dir);
- }
- i7300_idle_thrt_restore();
- i7300_idle_ioat_exit();
-}
-
-module_init(i7300_idle_init);
-module_exit(i7300_idle_exit);
-
-MODULE_AUTHOR("Andy Henroid <andrew.d.henroid@intel.com>");
-MODULE_DESCRIPTION("Intel Chipset DIMM Idle Power Saving Driver v"
- I7300_IDLE_DRIVER_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 4466a2f969d7..7d8ea3d5fda6 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -98,8 +98,6 @@ static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static void intel_idle_freeze(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
-static int intel_idle_cpu_init(int cpu);
-
static struct cpuidle_state *cpuidle_state_table;
/*
@@ -724,6 +722,50 @@ static struct cpuidle_state atom_cstates[] = {
{
.enter = NULL }
};
+static struct cpuidle_state tangier_cstates[] = {
+ {
+ .name = "C1-TNG",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C4-TNG",
+ .desc = "MWAIT 0x30",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 100,
+ .target_residency = 400,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-TNG",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 560,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C7-TNG",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 4000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C9-TNG",
+ .desc = "MWAIT 0x64",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
static struct cpuidle_state avn_cstates[] = {
{
.name = "C1-AVN",
@@ -907,51 +949,15 @@ static void intel_idle_freeze(struct cpuidle_device *dev,
mwait_idle_with_hints(eax, ecx);
}
-static void __setup_broadcast_timer(void *arg)
+static void __setup_broadcast_timer(bool on)
{
- unsigned long on = (unsigned long)arg;
-
if (on)
tick_broadcast_enable();
else
tick_broadcast_disable();
}
-static int cpu_hotplug_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
-{
- int hotcpu = (unsigned long)hcpu;
- struct cpuidle_device *dev;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
-
- if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
- smp_call_function_single(hotcpu, __setup_broadcast_timer,
- (void *)true, 1);
-
- /*
- * Some systems can hotplug a cpu at runtime after
- * the kernel has booted, we have to initialize the
- * driver in this case
- */
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
- if (dev->registered)
- break;
-
- if (intel_idle_cpu_init(hotcpu))
- return NOTIFY_BAD;
-
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block cpu_hotplug_notifier = {
- .notifier_call = cpu_hotplug_notify,
-};
-
-static void auto_demotion_disable(void *dummy)
+static void auto_demotion_disable(void)
{
unsigned long long msr_bits;
@@ -959,7 +965,7 @@ static void auto_demotion_disable(void *dummy)
msr_bits &= ~(icpu->auto_demotion_disable_flags);
wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
}
-static void c1e_promotion_disable(void *dummy)
+static void c1e_promotion_disable(void)
{
unsigned long long msr_bits;
@@ -978,6 +984,10 @@ static const struct idle_cpu idle_cpu_atom = {
.state_table = atom_cstates,
};
+static const struct idle_cpu idle_cpu_tangier = {
+ .state_table = tangier_cstates,
+};
+
static const struct idle_cpu idle_cpu_lincroft = {
.state_table = atom_cstates,
.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
@@ -1066,6 +1076,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
+ ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
@@ -1084,6 +1095,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl),
ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
+ ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
{}
@@ -1373,12 +1385,11 @@ static void __init intel_idle_cpuidle_driver_init(void)
* allocate, initialize, register cpuidle_devices
* @cpu: cpu/core to initialize
*/
-static int intel_idle_cpu_init(int cpu)
+static int intel_idle_cpu_init(unsigned int cpu)
{
struct cpuidle_device *dev;
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
-
dev->cpu = cpu;
if (cpuidle_register_device(dev)) {
@@ -1387,17 +1398,36 @@ static int intel_idle_cpu_init(int cpu)
}
if (icpu->auto_demotion_disable_flags)
- smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+ auto_demotion_disable();
if (icpu->disable_promotion_to_c1e)
- smp_call_function_single(cpu, c1e_promotion_disable, NULL, 1);
+ c1e_promotion_disable();
+
+ return 0;
+}
+
+static int intel_idle_cpu_online(unsigned int cpu)
+{
+ struct cpuidle_device *dev;
+
+ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
+ __setup_broadcast_timer(true);
+
+ /*
+ * Some systems can hotplug a cpu at runtime after
+ * the kernel has booted, we have to initialize the
+ * driver in this case
+ */
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
+ if (!dev->registered)
+ return intel_idle_cpu_init(cpu);
return 0;
}
static int __init intel_idle_init(void)
{
- int retval, i;
+ int retval;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -1417,35 +1447,29 @@ static int __init intel_idle_init(void)
struct cpuidle_driver *drv = cpuidle_get_driver();
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
drv ? drv->name : "none");
- free_percpu(intel_idle_cpuidle_devices);
- return retval;
+ goto init_driver_fail;
}
- cpu_notifier_register_begin();
-
- for_each_online_cpu(i) {
- retval = intel_idle_cpu_init(i);
- if (retval) {
- intel_idle_cpuidle_devices_uninit();
- cpu_notifier_register_done();
- cpuidle_unregister_driver(&intel_idle_driver);
- free_percpu(intel_idle_cpuidle_devices);
- return retval;
- }
- }
- __register_cpu_notifier(&cpu_hotplug_notifier);
-
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
- else
- on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
- cpu_notifier_register_done();
+ retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
+ intel_idle_cpu_online, NULL);
+ if (retval < 0)
+ goto hp_setup_fail;
pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
lapic_timer_reliable_states);
return 0;
+
+hp_setup_fail:
+ intel_idle_cpuidle_devices_uninit();
+ cpuidle_unregister_driver(&intel_idle_driver);
+init_driver_fail:
+ free_percpu(intel_idle_cpuidle_devices);
+ return retval;
+
}
device_initcall(intel_idle_init);
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 6743b18194fb..a918270d6f54 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -73,6 +73,7 @@ source "drivers/iio/adc/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/chemical/Kconfig"
source "drivers/iio/common/Kconfig"
+source "drivers/iio/counter/Kconfig"
source "drivers/iio/dac/Kconfig"
source "drivers/iio/dummy/Kconfig"
source "drivers/iio/frequency/Kconfig"
@@ -87,6 +88,7 @@ if IIO_TRIGGER
source "drivers/iio/trigger/Kconfig"
endif #IIO_TRIGGER
source "drivers/iio/potentiometer/Kconfig"
+source "drivers/iio/potentiostat/Kconfig"
source "drivers/iio/pressure/Kconfig"
source "drivers/iio/proximity/Kconfig"
source "drivers/iio/temperature/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 87e4c4369e2f..33fa4026f92c 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -18,6 +18,7 @@ obj-y += amplifiers/
obj-y += buffer/
obj-y += chemical/
obj-y += common/
+obj-y += counter/
obj-y += dac/
obj-y += dummy/
obj-y += gyro/
@@ -29,6 +30,7 @@ obj-y += light/
obj-y += magnetometer/
obj-y += orientation/
obj-y += potentiometer/
+obj-y += potentiostat/
obj-y += pressure/
obj-y += proximity/
obj-y += temperature/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 2b791fe1e2bc..c68bdb649005 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -52,6 +52,26 @@ config BMC150_ACCEL_SPI
tristate
select REGMAP_SPI
+config DA280
+ tristate "MiraMEMS DA280 3-axis 14-bit digital accelerometer driver"
+ depends on I2C
+ help
+ Say yes here to build support for the MiraMEMS DA280 3-axis 14-bit
+ digital accelerometer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da280.
+
+config DA311
+ tristate "MiraMEMS DA311 3-axis 12-bit digital accelerometer driver"
+ depends on I2C
+ help
+ Say yes here to build support for the MiraMEMS DA311 3-axis 12-bit
+ digital accelerometer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da311.
+
config DMARD06
tristate "Domintech DMARD06 Digital Accelerometer Driver"
depends on OF || COMPILE_TEST
@@ -73,6 +93,16 @@ config DMARD09
Choosing M will build the driver as a module. If so, the module
will be called dmard09.
+config DMARD10
+ tristate "Domintech DMARD10 3-axis Accelerometer Driver"
+ depends on I2C
+ help
+ Say yes here to get support for the Domintech DMARD10 3-axis
+ accelerometer.
+
+ Choosing M will build the driver as a module. If so, the module
+ will be called dmard10.
+
config HID_SENSOR_ACCEL_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
@@ -97,7 +127,8 @@ config IIO_ST_ACCEL_3AXIS
help
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
- LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL.
+ LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
+ LNG2DM
This driver can also be built as a module. If so, these modules
will be created:
@@ -273,6 +304,18 @@ config MXC6255
To compile this driver as a module, choose M here: the module will be
called mxc6255.
+config SCA3000
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ depends on SPI
+ tristate "VTI SCA3000 series accelerometers"
+ help
+ Say Y here to build support for the VTI SCA3000 series of SPI
+ accelerometers. These devices use a hardware ring buffer.
+
+ To compile this driver as a module, say M here: the module will be
+ called sca3000.
+
config STK8312
tristate "Sensortek STK8312 3-Axis Accelerometer Driver"
depends on I2C
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index f5d3ddee619e..69fe8edc57a2 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -8,8 +8,11 @@ obj-$(CONFIG_BMA220) += bma220_spi.o
obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
+obj-$(CONFIG_DA280) += da280.o
+obj-$(CONFIG_DA311) += da311.o
obj-$(CONFIG_DMARD06) += dmard06.o
obj-$(CONFIG_DMARD09) += dmard09.o
+obj-$(CONFIG_DMARD10) += dmard10.o
obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o
obj-$(CONFIG_KXSD9) += kxsd9.o
@@ -32,6 +35,8 @@ obj-$(CONFIG_MMA9553) += mma9553.o
obj-$(CONFIG_MXC4005) += mxc4005.o
obj-$(CONFIG_MXC6255) += mxc6255.o
+obj-$(CONFIG_SCA3000) += sca3000.o
+
obj-$(CONFIG_STK8312) += stk8312.o
obj-$(CONFIG_STK8BA50) += stk8ba50.o
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
new file mode 100644
index 000000000000..ed8343aeac9c
--- /dev/null
+++ b/drivers/iio/accel/da280.c
@@ -0,0 +1,183 @@
+/**
+ * IIO driver for the MiraMEMS DA280 3-axis accelerometer and
+ * IIO driver for the MiraMEMS DA226 2-axis accelerometer
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DA280_REG_CHIP_ID 0x01
+#define DA280_REG_ACC_X_LSB 0x02
+#define DA280_REG_ACC_Y_LSB 0x04
+#define DA280_REG_ACC_Z_LSB 0x06
+#define DA280_REG_MODE_BW 0x11
+
+#define DA280_CHIP_ID 0x13
+#define DA280_MODE_ENABLE 0x1e
+#define DA280_MODE_DISABLE 0x9e
+
+enum { da226, da280 };
+
+/*
+ * a value of + or -4096 corresponds to + or - 1G
+ * scale = 9.81 / 4096 = 0.002395019
+ */
+
+static const int da280_nscale = 2395019;
+
+#define DA280_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec da280_channels[] = {
+ DA280_CHANNEL(DA280_REG_ACC_X_LSB, X),
+ DA280_CHANNEL(DA280_REG_ACC_Y_LSB, Y),
+ DA280_CHANNEL(DA280_REG_ACC_Z_LSB, Z),
+};
+
+struct da280_data {
+ struct i2c_client *client;
+};
+
+static int da280_enable(struct i2c_client *client, bool enable)
+{
+ u8 data = enable ? DA280_MODE_ENABLE : DA280_MODE_DISABLE;
+
+ return i2c_smbus_write_byte_data(client, DA280_REG_MODE_BW, data);
+}
+
+static int da280_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct da280_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ if (ret < 0)
+ return ret;
+ /*
+ * Values are 14 bits, stored as 16 bits with the 2
+ * least significant bits always 0.
+ */
+ *val = (short)ret >> 2;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = da280_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info da280_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = da280_read_raw,
+};
+
+static int da280_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct da280_data *data;
+
+ ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
+ if (ret != DA280_CHIP_ID)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &da280_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = da280_channels;
+ if (id->driver_data == da226) {
+ indio_dev->name = "da226";
+ indio_dev->num_channels = 2;
+ } else {
+ indio_dev->name = "da280";
+ indio_dev->num_channels = 3;
+ }
+
+ ret = da280_enable(client, true);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ da280_enable(client, false);
+ }
+
+ return ret;
+}
+
+static int da280_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return da280_enable(client, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int da280_suspend(struct device *dev)
+{
+ return da280_enable(to_i2c_client(dev), false);
+}
+
+static int da280_resume(struct device *dev)
+{
+ return da280_enable(to_i2c_client(dev), true);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
+
+static const struct i2c_device_id da280_i2c_id[] = {
+ { "da226", da226 },
+ { "da280", da280 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
+
+static struct i2c_driver da280_driver = {
+ .driver = {
+ .name = "da280",
+ .pm = &da280_pm_ops,
+ },
+ .probe = da280_probe,
+ .remove = da280_remove,
+ .id_table = da280_i2c_id,
+};
+
+module_i2c_driver(da280_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("MiraMEMS DA280 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
new file mode 100644
index 000000000000..537cfa8b6edf
--- /dev/null
+++ b/drivers/iio/accel/da311.c
@@ -0,0 +1,305 @@
+/**
+ * IIO driver for the MiraMEMS DA311 3-axis accelerometer
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (c) 2011-2013 MiraMEMS Sensing Technology Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DA311_CHIP_ID 0x13
+
+/*
+ * Note register addressed go from 0 - 0x3f and then wrap.
+ * For some reason there are 2 banks with 0 - 0x3f addresses,
+ * rather then a single 0-0x7f bank.
+ */
+
+/* Bank 0 regs */
+#define DA311_REG_BANK 0x0000
+#define DA311_REG_LDO_REG 0x0006
+#define DA311_REG_CHIP_ID 0x000f
+#define DA311_REG_TEMP_CFG_REG 0x001f
+#define DA311_REG_CTRL_REG1 0x0020
+#define DA311_REG_CTRL_REG3 0x0022
+#define DA311_REG_CTRL_REG4 0x0023
+#define DA311_REG_CTRL_REG5 0x0024
+#define DA311_REG_CTRL_REG6 0x0025
+#define DA311_REG_STATUS_REG 0x0027
+#define DA311_REG_OUT_X_L 0x0028
+#define DA311_REG_OUT_X_H 0x0029
+#define DA311_REG_OUT_Y_L 0x002a
+#define DA311_REG_OUT_Y_H 0x002b
+#define DA311_REG_OUT_Z_L 0x002c
+#define DA311_REG_OUT_Z_H 0x002d
+#define DA311_REG_INT1_CFG 0x0030
+#define DA311_REG_INT1_SRC 0x0031
+#define DA311_REG_INT1_THS 0x0032
+#define DA311_REG_INT1_DURATION 0x0033
+#define DA311_REG_INT2_CFG 0x0034
+#define DA311_REG_INT2_SRC 0x0035
+#define DA311_REG_INT2_THS 0x0036
+#define DA311_REG_INT2_DURATION 0x0037
+#define DA311_REG_CLICK_CFG 0x0038
+#define DA311_REG_CLICK_SRC 0x0039
+#define DA311_REG_CLICK_THS 0x003a
+#define DA311_REG_TIME_LIMIT 0x003b
+#define DA311_REG_TIME_LATENCY 0x003c
+#define DA311_REG_TIME_WINDOW 0x003d
+
+/* Bank 1 regs */
+#define DA311_REG_SOFT_RESET 0x0105
+#define DA311_REG_OTP_XOFF_L 0x0110
+#define DA311_REG_OTP_XOFF_H 0x0111
+#define DA311_REG_OTP_YOFF_L 0x0112
+#define DA311_REG_OTP_YOFF_H 0x0113
+#define DA311_REG_OTP_ZOFF_L 0x0114
+#define DA311_REG_OTP_ZOFF_H 0x0115
+#define DA311_REG_OTP_XSO 0x0116
+#define DA311_REG_OTP_YSO 0x0117
+#define DA311_REG_OTP_ZSO 0x0118
+#define DA311_REG_OTP_TRIM_OSC 0x011b
+#define DA311_REG_LPF_ABSOLUTE 0x011c
+#define DA311_REG_TEMP_OFF1 0x0127
+#define DA311_REG_TEMP_OFF2 0x0128
+#define DA311_REG_TEMP_OFF3 0x0129
+#define DA311_REG_OTP_TRIM_THERM_H 0x011a
+
+/*
+ * a value of + or -1024 corresponds to + or - 1G
+ * scale = 9.81 / 1024 = 0.009580078
+ */
+
+static const int da311_nscale = 9580078;
+
+#define DA311_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec da311_channels[] = {
+ /* | 0x80 comes from the android driver */
+ DA311_CHANNEL(DA311_REG_OUT_X_L | 0x80, X),
+ DA311_CHANNEL(DA311_REG_OUT_Y_L | 0x80, Y),
+ DA311_CHANNEL(DA311_REG_OUT_Z_L | 0x80, Z),
+};
+
+struct da311_data {
+ struct i2c_client *client;
+};
+
+static int da311_register_mask_write(struct i2c_client *client, u16 addr,
+ u8 mask, u8 data)
+{
+ int ret;
+ u8 tmp_data = 0;
+
+ if (addr & 0xff00) {
+ /* Select bank 1 */
+ ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x01);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (mask != 0xff) {
+ ret = i2c_smbus_read_byte_data(client, addr);
+ if (ret < 0)
+ return ret;
+ tmp_data = ret;
+ }
+
+ tmp_data &= ~mask;
+ tmp_data |= data & mask;
+ ret = i2c_smbus_write_byte_data(client, addr & 0xff, tmp_data);
+ if (ret < 0)
+ return ret;
+
+ if (addr & 0xff00) {
+ /* Back to bank 0 */
+ ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x00);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Init sequence taken from the android driver */
+static int da311_reset(struct i2c_client *client)
+{
+ const struct {
+ u16 addr;
+ u8 mask;
+ u8 data;
+ } init_data[] = {
+ { DA311_REG_TEMP_CFG_REG, 0xff, 0x08 },
+ { DA311_REG_CTRL_REG5, 0xff, 0x80 },
+ { DA311_REG_CTRL_REG4, 0x30, 0x00 },
+ { DA311_REG_CTRL_REG1, 0xff, 0x6f },
+ { DA311_REG_TEMP_CFG_REG, 0xff, 0x88 },
+ { DA311_REG_LDO_REG, 0xff, 0x02 },
+ { DA311_REG_OTP_TRIM_OSC, 0xff, 0x27 },
+ { DA311_REG_LPF_ABSOLUTE, 0xff, 0x30 },
+ { DA311_REG_TEMP_OFF1, 0xff, 0x3f },
+ { DA311_REG_TEMP_OFF2, 0xff, 0xff },
+ { DA311_REG_TEMP_OFF3, 0xff, 0x0f },
+ };
+ int i, ret;
+
+ /* Reset */
+ ret = da311_register_mask_write(client, DA311_REG_SOFT_RESET,
+ 0xff, 0xaa);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(init_data); i++) {
+ ret = da311_register_mask_write(client,
+ init_data[i].addr,
+ init_data[i].mask,
+ init_data[i].data);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int da311_enable(struct i2c_client *client, bool enable)
+{
+ u8 data = enable ? 0x00 : 0x20;
+
+ return da311_register_mask_write(client, DA311_REG_TEMP_CFG_REG,
+ 0x20, data);
+}
+
+static int da311_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct da311_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ if (ret < 0)
+ return ret;
+ /*
+ * Values are 12 bits, stored as 16 bits with the 4
+ * least significant bits always 0.
+ */
+ *val = (short)ret >> 4;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = da311_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info da311_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = da311_read_raw,
+};
+
+static int da311_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct da311_data *data;
+
+ ret = i2c_smbus_read_byte_data(client, DA311_REG_CHIP_ID);
+ if (ret != DA311_CHIP_ID)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &da311_info;
+ indio_dev->name = "da311";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = da311_channels;
+ indio_dev->num_channels = ARRAY_SIZE(da311_channels);
+
+ ret = da311_reset(client);
+ if (ret < 0)
+ return ret;
+
+ ret = da311_enable(client, true);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ da311_enable(client, false);
+ }
+
+ return ret;
+}
+
+static int da311_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return da311_enable(client, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int da311_suspend(struct device *dev)
+{
+ return da311_enable(to_i2c_client(dev), false);
+}
+
+static int da311_resume(struct device *dev)
+{
+ return da311_enable(to_i2c_client(dev), true);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(da311_pm_ops, da311_suspend, da311_resume);
+
+static const struct i2c_device_id da311_i2c_id[] = {
+ {"da311", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, da311_i2c_id);
+
+static struct i2c_driver da311_driver = {
+ .driver = {
+ .name = "da311",
+ .pm = &da311_pm_ops,
+ },
+ .probe = da311_probe,
+ .remove = da311_remove,
+ .id_table = da311_i2c_id,
+};
+
+module_i2c_driver(da311_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("MiraMEMS DA311 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
new file mode 100644
index 000000000000..b8736cc75656
--- /dev/null
+++ b/drivers/iio/accel/dmard10.c
@@ -0,0 +1,266 @@
+/**
+ * IIO driver for the 3-axis accelerometer Domintech ARD10.
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (c) 2012 Domintech Technology Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DMARD10_REG_ACTR 0x00
+#define DMARD10_REG_AFEM 0x0c
+#define DMARD10_REG_STADR 0x12
+#define DMARD10_REG_STAINT 0x1c
+#define DMARD10_REG_MISC2 0x1f
+#define DMARD10_REG_PD 0x21
+
+#define DMARD10_MODE_OFF 0x00
+#define DMARD10_MODE_STANDBY 0x02
+#define DMARD10_MODE_ACTIVE 0x06
+#define DMARD10_MODE_READ_OTP 0x12
+#define DMARD10_MODE_RESET_DATA_PATH 0x82
+
+/* AFEN set 1, ATM[2:0]=b'000 (normal), EN_Z/Y/X/T=1 */
+#define DMARD10_VALUE_AFEM_AFEN_NORMAL 0x8f
+/* ODR[3:0]=b'0111 (100Hz), CCK[3:0]=b'0100 (204.8kHZ) */
+#define DMARD10_VALUE_CKSEL_ODR_100_204 0x74
+/* INTC[6:5]=b'00 */
+#define DMARD10_VALUE_INTC 0x00
+/* TAP1/TAP2 Average 2 */
+#define DMARD10_VALUE_TAPNS_AVE_2 0x11
+
+#define DMARD10_VALUE_STADR 0x55
+#define DMARD10_VALUE_STAINT 0xaa
+#define DMARD10_VALUE_MISC2_OSCA_EN 0x08
+#define DMARD10_VALUE_PD_RST 0x52
+
+/* Offsets into the buffer read in dmard10_read_raw() */
+#define DMARD10_X_OFFSET 1
+#define DMARD10_Y_OFFSET 2
+#define DMARD10_Z_OFFSET 3
+
+/*
+ * a value of + or -128 corresponds to + or - 1G
+ * scale = 9.81 / 128 = 0.076640625
+ */
+
+static const int dmard10_nscale = 76640625;
+
+#define DMARD10_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec dmard10_channels[] = {
+ DMARD10_CHANNEL(DMARD10_X_OFFSET, X),
+ DMARD10_CHANNEL(DMARD10_Y_OFFSET, Y),
+ DMARD10_CHANNEL(DMARD10_Z_OFFSET, Z),
+};
+
+struct dmard10_data {
+ struct i2c_client *client;
+};
+
+/* Init sequence taken from the android driver */
+static int dmard10_reset(struct i2c_client *client)
+{
+ unsigned char buffer[7];
+ int ret;
+
+ /* 1. Powerdown reset */
+ ret = i2c_smbus_write_byte_data(client, DMARD10_REG_PD,
+ DMARD10_VALUE_PD_RST);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 2. ACTR => Standby mode => Download OTP to parameter reg =>
+ * Standby mode => Reset data path => Standby mode
+ */
+ buffer[0] = DMARD10_REG_ACTR;
+ buffer[1] = DMARD10_MODE_STANDBY;
+ buffer[2] = DMARD10_MODE_READ_OTP;
+ buffer[3] = DMARD10_MODE_STANDBY;
+ buffer[4] = DMARD10_MODE_RESET_DATA_PATH;
+ buffer[5] = DMARD10_MODE_STANDBY;
+ ret = i2c_master_send(client, buffer, 6);
+ if (ret < 0)
+ return ret;
+
+ /* 3. OSCA_EN = 1, TSTO = b'000 (INT1 = normal, TEST0 = normal) */
+ ret = i2c_smbus_write_byte_data(client, DMARD10_REG_MISC2,
+ DMARD10_VALUE_MISC2_OSCA_EN);
+ if (ret < 0)
+ return ret;
+
+ /* 4. AFEN = 1 (AFE will powerdown after ADC) */
+ buffer[0] = DMARD10_REG_AFEM;
+ buffer[1] = DMARD10_VALUE_AFEM_AFEN_NORMAL;
+ buffer[2] = DMARD10_VALUE_CKSEL_ODR_100_204;
+ buffer[3] = DMARD10_VALUE_INTC;
+ buffer[4] = DMARD10_VALUE_TAPNS_AVE_2;
+ buffer[5] = 0x00; /* DLYC, no delay timing */
+ buffer[6] = 0x07; /* INTD=1 push-pull, INTA=1 active high, AUTOT=1 */
+ ret = i2c_master_send(client, buffer, 7);
+ if (ret < 0)
+ return ret;
+
+ /* 5. Activation mode */
+ ret = i2c_smbus_write_byte_data(client, DMARD10_REG_ACTR,
+ DMARD10_MODE_ACTIVE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Shutdown sequence taken from the android driver */
+static int dmard10_shutdown(struct i2c_client *client)
+{
+ unsigned char buffer[3];
+
+ buffer[0] = DMARD10_REG_ACTR;
+ buffer[1] = DMARD10_MODE_STANDBY;
+ buffer[2] = DMARD10_MODE_OFF;
+
+ return i2c_master_send(client, buffer, 3);
+}
+
+static int dmard10_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dmard10_data *data = iio_priv(indio_dev);
+ __le16 buf[4];
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * Read 8 bytes starting at the REG_STADR register, trying to
+ * read the individual X, Y, Z registers will always read 0.
+ */
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ DMARD10_REG_STADR,
+ sizeof(buf), (u8 *)buf);
+ if (ret < 0)
+ return ret;
+ ret = le16_to_cpu(buf[chan->address]);
+ *val = sign_extend32(ret, 12);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = dmard10_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info dmard10_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = dmard10_read_raw,
+};
+
+static int dmard10_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct dmard10_data *data;
+
+ /* These 2 registers have special POR reset values used for id */
+ ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STADR);
+ if (ret != DMARD10_VALUE_STADR)
+ return (ret < 0) ? ret : -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STAINT);
+ if (ret != DMARD10_VALUE_STAINT)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &dmard10_info;
+ indio_dev->name = "dmard10";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = dmard10_channels;
+ indio_dev->num_channels = ARRAY_SIZE(dmard10_channels);
+
+ ret = dmard10_reset(client);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ dmard10_shutdown(client);
+ }
+
+ return ret;
+}
+
+static int dmard10_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return dmard10_shutdown(client);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dmard10_suspend(struct device *dev)
+{
+ return dmard10_shutdown(to_i2c_client(dev));
+}
+
+static int dmard10_resume(struct device *dev)
+{
+ return dmard10_reset(to_i2c_client(dev));
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dmard10_pm_ops, dmard10_suspend, dmard10_resume);
+
+static const struct i2c_device_id dmard10_i2c_id[] = {
+ {"dmard10", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, dmard10_i2c_id);
+
+static struct i2c_driver dmard10_driver = {
+ .driver = {
+ .name = "dmard10",
+ .pm = &dmard10_pm_ops,
+ },
+ .probe = dmard10_probe,
+ .remove = dmard10_remove,
+ .id_table = dmard10_i2c_id,
+};
+
+module_i2c_driver(dmard10_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Domintech ARD10 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 03beadf14ad3..3a40774cca74 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -39,7 +39,7 @@
#define MMA7660_SCALE_AVAIL "0.467142857"
-const int mma7660_nscale = 467142857;
+static const int mma7660_nscale = 467142857;
#define MMA7660_CHANNEL(reg, axis) { \
.type = IIO_ACCEL, \
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index d41e1b588e68..f418c588af6a 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -459,12 +459,14 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
mutex_lock(&data->lock);
ret = mma8452_read(data, buffer);
mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -664,37 +666,46 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
struct mma8452_data *data = iio_priv(indio_dev);
int i, ret;
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
i = mma8452_get_samp_freq_index(data, val, val2);
- if (i < 0)
- return i;
-
+ if (i < 0) {
+ ret = i;
+ break;
+ }
data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
- return mma8452_change_config(data, MMA8452_CTRL_REG1,
- data->ctrl_reg1);
+ ret = mma8452_change_config(data, MMA8452_CTRL_REG1,
+ data->ctrl_reg1);
+ break;
case IIO_CHAN_INFO_SCALE:
i = mma8452_get_scale_index(data, val, val2);
- if (i < 0)
- return i;
+ if (i < 0) {
+ ret = i;
+ break;
+ }
data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
data->data_cfg |= i;
- return mma8452_change_config(data, MMA8452_DATA_CFG,
- data->data_cfg);
+ ret = mma8452_change_config(data, MMA8452_DATA_CFG,
+ data->data_cfg);
+ break;
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < -128 || val > 127)
- return -EINVAL;
+ if (val < -128 || val > 127) {
+ ret = -EINVAL;
+ break;
+ }
- return mma8452_change_config(data,
- MMA8452_OFF_X + chan->scan_index,
- val);
+ ret = mma8452_change_config(data,
+ MMA8452_OFF_X + chan->scan_index,
+ val);
+ break;
case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
if (val == 0 && val2 == 0) {
@@ -703,23 +714,30 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
data->data_cfg |= MMA8452_DATA_CFG_HPF_MASK;
ret = mma8452_set_hp_filter_frequency(data, val, val2);
if (ret < 0)
- return ret;
+ break;
}
- return mma8452_change_config(data, MMA8452_DATA_CFG,
+ ret = mma8452_change_config(data, MMA8452_DATA_CFG,
data->data_cfg);
+ break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
ret = mma8452_get_odr_index(data);
for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
- if (mma8452_os_ratio[i][ret] == val)
- return mma8452_set_power_mode(data, i);
+ if (mma8452_os_ratio[i][ret] == val) {
+ ret = mma8452_set_power_mode(data, i);
+ break;
+ }
}
-
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
}
static int mma8452_read_thresh(struct iio_dev *indio_dev,
@@ -1347,20 +1365,9 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
return mma8452_change_config(data, MMA8452_CTRL_REG4, reg);
}
-static int mma8452_validate_device(struct iio_trigger *trig,
- struct iio_dev *indio_dev)
-{
- struct iio_dev *indio = iio_trigger_get_drvdata(trig);
-
- if (indio != indio_dev)
- return -EINVAL;
-
- return 0;
-}
-
static const struct iio_trigger_ops mma8452_trigger_ops = {
.set_trigger_state = mma8452_data_rdy_trigger_set_state,
- .validate_device = mma8452_validate_device,
+ .validate_device = iio_trigger_validate_own_device,
.owner = THIS_MODULE,
};
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
new file mode 100644
index 000000000000..cb1d83fa19a0
--- /dev/null
+++ b/drivers/iio/accel/sca3000.c
@@ -0,0 +1,1576 @@
+/*
+ * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
+ *
+ * See industrialio/accels/sca3000.h for comments.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
+#define SCA3000_READ_REG(a) ((a) << 2)
+
+#define SCA3000_REG_REVID_ADDR 0x00
+#define SCA3000_REG_REVID_MAJOR_MASK GENMASK(8, 4)
+#define SCA3000_REG_REVID_MINOR_MASK GENMASK(3, 0)
+
+#define SCA3000_REG_STATUS_ADDR 0x02
+#define SCA3000_LOCKED BIT(5)
+#define SCA3000_EEPROM_CS_ERROR BIT(1)
+#define SCA3000_SPI_FRAME_ERROR BIT(0)
+
+/* All reads done using register decrement so no need to directly access LSBs */
+#define SCA3000_REG_X_MSB_ADDR 0x05
+#define SCA3000_REG_Y_MSB_ADDR 0x07
+#define SCA3000_REG_Z_MSB_ADDR 0x09
+
+#define SCA3000_REG_RING_OUT_ADDR 0x0f
+
+/* Temp read untested - the e05 doesn't have the sensor */
+#define SCA3000_REG_TEMP_MSB_ADDR 0x13
+
+#define SCA3000_REG_MODE_ADDR 0x14
+#define SCA3000_MODE_PROT_MASK 0x28
+#define SCA3000_REG_MODE_RING_BUF_ENABLE BIT(7)
+#define SCA3000_REG_MODE_RING_BUF_8BIT BIT(6)
+
+/*
+ * Free fall detection triggers an interrupt if the acceleration
+ * is below a threshold for equivalent of 25cm drop
+ */
+#define SCA3000_REG_MODE_FREE_FALL_DETECT BIT(4)
+#define SCA3000_REG_MODE_MEAS_MODE_NORMAL 0x00
+#define SCA3000_REG_MODE_MEAS_MODE_OP_1 0x01
+#define SCA3000_REG_MODE_MEAS_MODE_OP_2 0x02
+
+/*
+ * In motion detection mode the accelerations are band pass filtered
+ * (approx 1 - 25Hz) and then a programmable threshold used to trigger
+ * and interrupt.
+ */
+#define SCA3000_REG_MODE_MEAS_MODE_MOT_DET 0x03
+#define SCA3000_REG_MODE_MODE_MASK 0x03
+
+#define SCA3000_REG_BUF_COUNT_ADDR 0x15
+
+#define SCA3000_REG_INT_STATUS_ADDR 0x16
+#define SCA3000_REG_INT_STATUS_THREE_QUARTERS BIT(7)
+#define SCA3000_REG_INT_STATUS_HALF BIT(6)
+
+#define SCA3000_INT_STATUS_FREE_FALL BIT(3)
+#define SCA3000_INT_STATUS_Y_TRIGGER BIT(2)
+#define SCA3000_INT_STATUS_X_TRIGGER BIT(1)
+#define SCA3000_INT_STATUS_Z_TRIGGER BIT(0)
+
+/* Used to allow access to multiplexed registers */
+#define SCA3000_REG_CTRL_SEL_ADDR 0x18
+/* Only available for SCA3000-D03 and SCA3000-D01 */
+#define SCA3000_REG_CTRL_SEL_I2C_DISABLE 0x01
+#define SCA3000_REG_CTRL_SEL_MD_CTRL 0x02
+#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
+#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
+#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
+/*
+ * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
+ * will not function
+ */
+#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
+
+#define SCA3000_REG_OUT_CTRL_PROT_MASK 0xE0
+#define SCA3000_REG_OUT_CTRL_BUF_X_EN 0x10
+#define SCA3000_REG_OUT_CTRL_BUF_Y_EN 0x08
+#define SCA3000_REG_OUT_CTRL_BUF_Z_EN 0x04
+#define SCA3000_REG_OUT_CTRL_BUF_DIV_MASK 0x03
+#define SCA3000_REG_OUT_CTRL_BUF_DIV_4 0x02
+#define SCA3000_REG_OUT_CTRL_BUF_DIV_2 0x01
+
+
+/*
+ * Control which motion detector interrupts are on.
+ * For now only OR combinations are supported.
+ */
+#define SCA3000_MD_CTRL_PROT_MASK 0xC0
+#define SCA3000_MD_CTRL_OR_Y BIT(0)
+#define SCA3000_MD_CTRL_OR_X BIT(1)
+#define SCA3000_MD_CTRL_OR_Z BIT(2)
+/* Currently unsupported */
+#define SCA3000_MD_CTRL_AND_Y BIT(3)
+#define SCA3000_MD_CTRL_AND_X BIT(4)
+#define SAC3000_MD_CTRL_AND_Z BIT(5)
+
+/*
+ * Some control registers of complex access methods requiring this register to
+ * be used to remove a lock.
+ */
+#define SCA3000_REG_UNLOCK_ADDR 0x1e
+
+#define SCA3000_REG_INT_MASK_ADDR 0x21
+#define SCA3000_REG_INT_MASK_PROT_MASK 0x1C
+
+#define SCA3000_REG_INT_MASK_RING_THREE_QUARTER BIT(7)
+#define SCA3000_REG_INT_MASK_RING_HALF BIT(6)
+
+#define SCA3000_REG_INT_MASK_ALL_INTS 0x02
+#define SCA3000_REG_INT_MASK_ACTIVE_HIGH 0x01
+#define SCA3000_REG_INT_MASK_ACTIVE_LOW 0x00
+/* Values of multiplexed registers (write to ctrl_data after select) */
+#define SCA3000_REG_CTRL_DATA_ADDR 0x22
+
+/*
+ * Measurement modes available on some sca3000 series chips. Code assumes others
+ * may become available in the future.
+ *
+ * Bypass - Bypass the low-pass filter in the signal channel so as to increase
+ * signal bandwidth.
+ *
+ * Narrow - Narrow low-pass filtering of the signal channel and half output
+ * data rate by decimation.
+ *
+ * Wide - Widen low-pass filtering of signal channel to increase bandwidth
+ */
+#define SCA3000_OP_MODE_BYPASS 0x01
+#define SCA3000_OP_MODE_NARROW 0x02
+#define SCA3000_OP_MODE_WIDE 0x04
+#define SCA3000_MAX_TX 6
+#define SCA3000_MAX_RX 2
+
+/**
+ * struct sca3000_state - device instance state information
+ * @us: the associated spi device
+ * @info: chip variant information
+ * @last_timestamp: the timestamp of the last event
+ * @mo_det_use_count: reference counter for the motion detection unit
+ * @lock: lock used to protect elements of sca3000_state
+ * and the underlying device state.
+ * @tx: dma-able transmit buffer
+ * @rx: dma-able receive buffer
+ **/
+struct sca3000_state {
+ struct spi_device *us;
+ const struct sca3000_chip_info *info;
+ s64 last_timestamp;
+ int mo_det_use_count;
+ struct mutex lock;
+ /* Can these share a cacheline ? */
+ u8 rx[384] ____cacheline_aligned;
+ u8 tx[6] ____cacheline_aligned;
+};
+
+/**
+ * struct sca3000_chip_info - model dependent parameters
+ * @scale: scale * 10^-6
+ * @temp_output: some devices have temperature sensors.
+ * @measurement_mode_freq: normal mode sampling frequency
+ * @measurement_mode_3db_freq: 3db cutoff frequency of the low pass filter for
+ * the normal measurement mode.
+ * @option_mode_1: first optional mode. Not all models have one
+ * @option_mode_1_freq: option mode 1 sampling frequency
+ * @option_mode_1_3db_freq: 3db cutoff frequency of the low pass filter for
+ * the first option mode.
+ * @option_mode_2: second optional mode. Not all chips have one
+ * @option_mode_2_freq: option mode 2 sampling frequency
+ * @option_mode_2_3db_freq: 3db cutoff frequency of the low pass filter for
+ * the second option mode.
+ * @mod_det_mult_xz: Bit wise multipliers to calculate the threshold
+ * for motion detection in the x and z axis.
+ * @mod_det_mult_y: Bit wise multipliers to calculate the threshold
+ * for motion detection in the y axis.
+ *
+ * This structure is used to hold information about the functionality of a given
+ * sca3000 variant.
+ **/
+struct sca3000_chip_info {
+ unsigned int scale;
+ bool temp_output;
+ int measurement_mode_freq;
+ int measurement_mode_3db_freq;
+ int option_mode_1;
+ int option_mode_1_freq;
+ int option_mode_1_3db_freq;
+ int option_mode_2;
+ int option_mode_2_freq;
+ int option_mode_2_3db_freq;
+ int mot_det_mult_xz[6];
+ int mot_det_mult_y[7];
+};
+
+enum sca3000_variant {
+ d01,
+ e02,
+ e04,
+ e05,
+};
+
+/*
+ * Note where option modes are not defined, the chip simply does not
+ * support any.
+ * Other chips in the sca3000 series use i2c and are not included here.
+ *
+ * Some of these devices are only listed in the family data sheet and
+ * do not actually appear to be available.
+ */
+static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
+ [d01] = {
+ .scale = 7357,
+ .temp_output = true,
+ .measurement_mode_freq = 250,
+ .measurement_mode_3db_freq = 45,
+ .option_mode_1 = SCA3000_OP_MODE_BYPASS,
+ .option_mode_1_freq = 250,
+ .option_mode_1_3db_freq = 70,
+ .mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
+ .mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
+ },
+ [e02] = {
+ .scale = 9810,
+ .measurement_mode_freq = 125,
+ .measurement_mode_3db_freq = 40,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 63,
+ .option_mode_1_3db_freq = 11,
+ .mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
+ .mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
+ },
+ [e04] = {
+ .scale = 19620,
+ .measurement_mode_freq = 100,
+ .measurement_mode_3db_freq = 38,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 50,
+ .option_mode_1_3db_freq = 9,
+ .option_mode_2 = SCA3000_OP_MODE_WIDE,
+ .option_mode_2_freq = 400,
+ .option_mode_2_3db_freq = 70,
+ .mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
+ .mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
+ },
+ [e05] = {
+ .scale = 61313,
+ .measurement_mode_freq = 200,
+ .measurement_mode_3db_freq = 60,
+ .option_mode_1 = SCA3000_OP_MODE_NARROW,
+ .option_mode_1_freq = 50,
+ .option_mode_1_3db_freq = 9,
+ .option_mode_2 = SCA3000_OP_MODE_WIDE,
+ .option_mode_2_freq = 400,
+ .option_mode_2_3db_freq = 75,
+ .mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
+ .mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
+ },
+};
+
+static int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
+{
+ st->tx[0] = SCA3000_WRITE_REG(address);
+ st->tx[1] = val;
+ return spi_write(st->us, st->tx, 2);
+}
+
+static int sca3000_read_data_short(struct sca3000_state *st,
+ u8 reg_address_high,
+ int len)
+{
+ struct spi_transfer xfer[2] = {
+ {
+ .len = 1,
+ .tx_buf = st->tx,
+ }, {
+ .len = len,
+ .rx_buf = st->rx,
+ }
+ };
+ st->tx[0] = SCA3000_READ_REG(reg_address_high);
+
+ return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+}
+
+/**
+ * sca3000_reg_lock_on() - test if the ctrl register lock is on
+ * @st: Driver specific device instance data.
+ *
+ * Lock must be held.
+ **/
+static int sca3000_reg_lock_on(struct sca3000_state *st)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_STATUS_ADDR, 1);
+ if (ret < 0)
+ return ret;
+
+ return !(st->rx[0] & SCA3000_LOCKED);
+}
+
+/**
+ * __sca3000_unlock_reg_lock() - unlock the control registers
+ * @st: Driver specific device instance data.
+ *
+ * Note the device does not appear to support doing this in a single transfer.
+ * This should only ever be used as part of ctrl reg read.
+ * Lock must be held before calling this
+ */
+static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
+{
+ struct spi_transfer xfer[3] = {
+ {
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx,
+ }, {
+ .len = 2,
+ .cs_change = 1,
+ .tx_buf = st->tx + 2,
+ }, {
+ .len = 2,
+ .tx_buf = st->tx + 4,
+ },
+ };
+ st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+ st->tx[1] = 0x00;
+ st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+ st->tx[3] = 0x50;
+ st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+ st->tx[5] = 0xA0;
+
+ return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+}
+
+/**
+ * sca3000_write_ctrl_reg() write to a lock protect ctrl register
+ * @st: Driver specific device instance data.
+ * @sel: selects which registers we wish to write to
+ * @val: the value to be written
+ *
+ * Certain control registers are protected against overwriting by the lock
+ * register and use a shared write address. This function allows writing of
+ * these registers.
+ * Lock must be held.
+ */
+static int sca3000_write_ctrl_reg(struct sca3000_state *st,
+ u8 sel,
+ uint8_t val)
+{
+ int ret;
+
+ ret = sca3000_reg_lock_on(st);
+ if (ret < 0)
+ goto error_ret;
+ if (ret) {
+ ret = __sca3000_unlock_reg_lock(st);
+ if (ret)
+ goto error_ret;
+ }
+
+ /* Set the control select register */
+ ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, sel);
+ if (ret)
+ goto error_ret;
+
+ /* Write the actual value into the register */
+ ret = sca3000_write_reg(st, SCA3000_REG_CTRL_DATA_ADDR, val);
+
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_read_ctrl_reg() read from lock protected control register.
+ * @st: Driver specific device instance data.
+ * @ctrl_reg: Which ctrl register do we want to read.
+ *
+ * Lock must be held.
+ */
+static int sca3000_read_ctrl_reg(struct sca3000_state *st,
+ u8 ctrl_reg)
+{
+ int ret;
+
+ ret = sca3000_reg_lock_on(st);
+ if (ret < 0)
+ goto error_ret;
+ if (ret) {
+ ret = __sca3000_unlock_reg_lock(st);
+ if (ret)
+ goto error_ret;
+ }
+ /* Set the control select register */
+ ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, ctrl_reg);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_read_data_short(st, SCA3000_REG_CTRL_DATA_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ return st->rx[0];
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_show_rev() - sysfs interface to read the chip revision number
+ * @indio_dev: Device instance specific generic IIO data.
+ * Driver specific device instance data can be obtained via
+ * via iio_priv(indio_dev)
+ */
+static int sca3000_print_rev(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_REVID_ADDR, 1);
+ if (ret < 0)
+ goto error_ret;
+ dev_info(&indio_dev->dev,
+ "sca3000 revision major=%lu, minor=%lu\n",
+ st->rx[0] & SCA3000_REG_REVID_MAJOR_MASK,
+ st->rx[0] & SCA3000_REG_REVID_MINOR_MASK);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static ssize_t
+sca3000_show_available_3db_freqs(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int len;
+
+ len = sprintf(buf, "%d", st->info->measurement_mode_3db_freq);
+ if (st->info->option_mode_1)
+ len += sprintf(buf + len, " %d",
+ st->info->option_mode_1_3db_freq);
+ if (st->info->option_mode_2)
+ len += sprintf(buf + len, " %d",
+ st->info->option_mode_2_3db_freq);
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(in_accel_filter_low_pass_3db_frequency_available,
+ S_IRUGO, sca3000_show_available_3db_freqs,
+ NULL, 0);
+
+static const struct iio_event_spec sca3000_event = {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+};
+
+/*
+ * Note the hack in the number of bits to pretend we have 2 more than
+ * we do in the fifo.
+ */
+#define SCA3000_CHAN(index, mod) \
+ { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |\
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),\
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 13, \
+ .storagebits = 16, \
+ .shift = 3, \
+ .endianness = IIO_BE, \
+ }, \
+ .event_spec = &sca3000_event, \
+ .num_event_specs = 1, \
+ }
+
+static const struct iio_event_spec sca3000_freefall_event_spec = {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_PERIOD),
+};
+
+static const struct iio_chan_spec sca3000_channels[] = {
+ SCA3000_CHAN(0, IIO_MOD_X),
+ SCA3000_CHAN(1, IIO_MOD_Y),
+ SCA3000_CHAN(2, IIO_MOD_Z),
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X_AND_Y_AND_Z,
+ .scan_index = -1, /* Fake channel */
+ .event_spec = &sca3000_freefall_event_spec,
+ .num_event_specs = 1,
+ },
+};
+
+static const struct iio_chan_spec sca3000_channels_with_temp[] = {
+ SCA3000_CHAN(0, IIO_MOD_X),
+ SCA3000_CHAN(1, IIO_MOD_Y),
+ SCA3000_CHAN(2, IIO_MOD_Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ /* No buffer support */
+ .scan_index = -1,
+ },
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X_AND_Y_AND_Z,
+ .scan_index = -1, /* Fake channel */
+ .event_spec = &sca3000_freefall_event_spec,
+ .num_event_specs = 1,
+ },
+};
+
+static u8 sca3000_addresses[3][3] = {
+ [0] = {SCA3000_REG_X_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_X_TH,
+ SCA3000_MD_CTRL_OR_X},
+ [1] = {SCA3000_REG_Y_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Y_TH,
+ SCA3000_MD_CTRL_OR_Y},
+ [2] = {SCA3000_REG_Z_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Z_TH,
+ SCA3000_MD_CTRL_OR_Z},
+};
+
+/**
+ * __sca3000_get_base_freq() - obtain mode specific base frequency
+ * @st: Private driver specific device instance specific state.
+ * @info: chip type specific information.
+ * @base_freq: Base frequency for the current measurement mode.
+ *
+ * lock must be held
+ */
+static inline int __sca3000_get_base_freq(struct sca3000_state *st,
+ const struct sca3000_chip_info *info,
+ int *base_freq)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ switch (SCA3000_REG_MODE_MODE_MASK & st->rx[0]) {
+ case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+ *base_freq = info->measurement_mode_freq;
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+ *base_freq = info->option_mode_1_freq;
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+ *base_freq = info->option_mode_2_freq;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+error_ret:
+ return ret;
+}
+
+/**
+ * sca3000_read_raw_samp_freq() - read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ * @st: Private driver specific device instance specific state.
+ * @val: The frequency read back.
+ *
+ * lock must be held
+ **/
+static int sca3000_read_raw_samp_freq(struct sca3000_state *st, int *val)
+{
+ int ret;
+
+ ret = __sca3000_get_base_freq(st, st->info, val);
+ if (ret)
+ return ret;
+
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ return ret;
+
+ if (*val > 0) {
+ ret &= SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
+ switch (ret) {
+ case SCA3000_REG_OUT_CTRL_BUF_DIV_2:
+ *val /= 2;
+ break;
+ case SCA3000_REG_OUT_CTRL_BUF_DIV_4:
+ *val /= 4;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sca3000_write_raw_samp_freq() - write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ * @st: Private driver specific device instance specific state.
+ * @val: The frequency desired.
+ *
+ * lock must be held
+ */
+static int sca3000_write_raw_samp_freq(struct sca3000_state *st, int val)
+{
+ int ret, base_freq, ctrlval;
+
+ ret = __sca3000_get_base_freq(st, st->info, &base_freq);
+ if (ret)
+ return ret;
+
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ctrlval = ret & ~SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
+
+ if (val == base_freq / 2)
+ ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_2;
+ if (val == base_freq / 4)
+ ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_4;
+ else if (val != base_freq)
+ return -EINVAL;
+
+ return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
+ ctrlval);
+}
+
+static int sca3000_read_3db_freq(struct sca3000_state *st, int *val)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+
+ /* mask bottom 2 bits - only ones that are relevant */
+ st->rx[0] &= SCA3000_REG_MODE_MODE_MASK;
+ switch (st->rx[0]) {
+ case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+ *val = st->info->measurement_mode_3db_freq;
+ return IIO_VAL_INT;
+ case SCA3000_REG_MODE_MEAS_MODE_MOT_DET:
+ return -EBUSY;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+ *val = st->info->option_mode_1_3db_freq;
+ return IIO_VAL_INT;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+ *val = st->info->option_mode_2_3db_freq;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sca3000_write_3db_freq(struct sca3000_state *st, int val)
+{
+ int ret;
+ int mode;
+
+ if (val == st->info->measurement_mode_3db_freq)
+ mode = SCA3000_REG_MODE_MEAS_MODE_NORMAL;
+ else if (st->info->option_mode_1 &&
+ (val == st->info->option_mode_1_3db_freq))
+ mode = SCA3000_REG_MODE_MEAS_MODE_OP_1;
+ else if (st->info->option_mode_2 &&
+ (val == st->info->option_mode_2_3db_freq))
+ mode = SCA3000_REG_MODE_MEAS_MODE_OP_2;
+ else
+ return -EINVAL;
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+
+ st->rx[0] &= ~SCA3000_REG_MODE_MODE_MASK;
+ st->rx[0] |= (mode & SCA3000_REG_MODE_MODE_MASK);
+
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR, st->rx[0]);
+}
+
+static int sca3000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+ u8 address;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ if (chan->type == IIO_ACCEL) {
+ if (st->mo_det_use_count) {
+ mutex_unlock(&st->lock);
+ return -EBUSY;
+ }
+ address = sca3000_addresses[chan->address][0];
+ ret = sca3000_read_data_short(st, address, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
+ *val = ((*val) << (sizeof(*val) * 8 - 13)) >>
+ (sizeof(*val) * 8 - 13);
+ } else {
+ /* get the temperature when available */
+ ret = sca3000_read_data_short(st,
+ SCA3000_REG_TEMP_MSB_ADDR,
+ 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = ((st->rx[0] & 0x3F) << 3) |
+ ((st->rx[1] & 0xE0) >> 5);
+ }
+ mutex_unlock(&st->lock);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if (chan->type == IIO_ACCEL)
+ *val2 = st->info->scale;
+ else /* temperature */
+ *val2 = 555556;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -214;
+ *val2 = 600000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&st->lock);
+ ret = sca3000_read_raw_samp_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret ? ret : IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&st->lock);
+ ret = sca3000_read_3db_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sca3000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&st->lock);
+ ret = sca3000_write_raw_samp_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&st->lock);
+ ret = sca3000_write_3db_freq(st, val);
+ mutex_unlock(&st->lock);
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * sca3000_read_av_freq() - sysfs function to get available frequencies
+ * @dev: Device structure for this device.
+ * @attr: Description of the attribute.
+ * @buf: Incoming string
+ *
+ * The later modes are only relevant to the ring buffer - and depend on current
+ * mode. Note that data sheet gives rather wide tolerances for these so integer
+ * division will give good enough answer and not all chips have them specified
+ * at all.
+ **/
+static ssize_t sca3000_read_av_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int len = 0, ret, val;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ val = st->rx[0];
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto error_ret;
+
+ switch (val & SCA3000_REG_MODE_MODE_MASK) {
+ case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->measurement_mode_freq,
+ st->info->measurement_mode_freq / 2,
+ st->info->measurement_mode_freq / 4);
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->option_mode_1_freq,
+ st->info->option_mode_1_freq / 2,
+ st->info->option_mode_1_freq / 4);
+ break;
+ case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+ len += sprintf(buf + len, "%d %d %d\n",
+ st->info->option_mode_2_freq,
+ st->info->option_mode_2_freq / 2,
+ st->info->option_mode_2_freq / 4);
+ break;
+ }
+ return len;
+error_ret:
+ return ret;
+}
+
+/*
+ * Should only really be registered if ring buffer support is compiled in.
+ * Does no harm however and doing it right would add a fair bit of complexity
+ */
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
+
+/**
+ * sca3000_read_event_value() - query of a threshold or period
+ **/
+static int sca3000_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ int ret, i;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ mutex_lock(&st->lock);
+ ret = sca3000_read_ctrl_reg(st,
+ sca3000_addresses[chan->address][1]);
+ mutex_unlock(&st->lock);
+ if (ret < 0)
+ return ret;
+ *val = 0;
+ if (chan->channel2 == IIO_MOD_Y)
+ for_each_set_bit(i, (unsigned long *)&ret,
+ ARRAY_SIZE(st->info->mot_det_mult_y))
+ *val += st->info->mot_det_mult_y[i];
+ else
+ for_each_set_bit(i, (unsigned long *)&ret,
+ ARRAY_SIZE(st->info->mot_det_mult_xz))
+ *val += st->info->mot_det_mult_xz[i];
+
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_PERIOD:
+ *val = 0;
+ *val2 = 226000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * sca3000_write_value() - control of threshold and period
+ * @indio_dev: Device instance specific IIO information.
+ * @chan: Description of the channel for which the event is being
+ * configured.
+ * @type: The type of event being configured, here magnitude rising
+ * as everything else is read only.
+ * @dir: Direction of the event (here rising)
+ * @info: What information about the event are we configuring.
+ * Here the threshold only.
+ * @val: Integer part of the value being written..
+ * @val2: Non integer part of the value being written. Here always 0.
+ */
+static int sca3000_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+ int i;
+ u8 nonlinear = 0;
+
+ if (chan->channel2 == IIO_MOD_Y) {
+ i = ARRAY_SIZE(st->info->mot_det_mult_y);
+ while (i > 0)
+ if (val >= st->info->mot_det_mult_y[--i]) {
+ nonlinear |= (1 << i);
+ val -= st->info->mot_det_mult_y[i];
+ }
+ } else {
+ i = ARRAY_SIZE(st->info->mot_det_mult_xz);
+ while (i > 0)
+ if (val >= st->info->mot_det_mult_xz[--i]) {
+ nonlinear |= (1 << i);
+ val -= st->info->mot_det_mult_xz[i];
+ }
+ }
+
+ mutex_lock(&st->lock);
+ ret = sca3000_write_ctrl_reg(st,
+ sca3000_addresses[chan->address][1],
+ nonlinear);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static struct attribute *sca3000_attributes[] = {
+ &iio_dev_attr_in_accel_filter_low_pass_3db_frequency_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sca3000_attribute_group = {
+ .attrs = sca3000_attributes,
+};
+
+static int sca3000_read_data(struct sca3000_state *st,
+ u8 reg_address_high,
+ u8 *rx,
+ int len)
+{
+ int ret;
+ struct spi_transfer xfer[2] = {
+ {
+ .len = 1,
+ .tx_buf = st->tx,
+ }, {
+ .len = len,
+ .rx_buf = rx,
+ }
+ };
+
+ st->tx[0] = SCA3000_READ_REG(reg_address_high);
+ ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+ if (ret) {
+ dev_err(get_device(&st->us->dev), "problem reading register");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * sca3000_ring_int_process() - ring specific interrupt handling.
+ * @val: Value of the interrupt status register.
+ * @indio_dev: Device instance specific IIO device structure.
+ */
+static void sca3000_ring_int_process(u8 val, struct iio_dev *indio_dev)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret, i, num_available;
+
+ mutex_lock(&st->lock);
+
+ if (val & SCA3000_REG_INT_STATUS_HALF) {
+ ret = sca3000_read_data_short(st, SCA3000_REG_BUF_COUNT_ADDR,
+ 1);
+ if (ret)
+ goto error_ret;
+ num_available = st->rx[0];
+ /*
+ * num_available is the total number of samples available
+ * i.e. number of time points * number of channels.
+ */
+ ret = sca3000_read_data(st, SCA3000_REG_RING_OUT_ADDR, st->rx,
+ num_available * 2);
+ if (ret)
+ goto error_ret;
+ for (i = 0; i < num_available / 3; i++) {
+ /*
+ * Dirty hack to cover for 11 bit in fifo, 13 bit
+ * direct reading.
+ *
+ * In theory the bottom two bits are undefined.
+ * In reality they appear to always be 0.
+ */
+ iio_push_to_buffers(indio_dev, st->rx + i * 3 * 2);
+ }
+ }
+error_ret:
+ mutex_unlock(&st->lock);
+}
+
+/**
+ * sca3000_event_handler() - handling ring and non ring events
+ * @irq: The irq being handled.
+ * @private: struct iio_device pointer for the device.
+ *
+ * Ring related interrupt handler. Depending on event, push to
+ * the ring buffer event chrdev or the event one.
+ *
+ * This function is complicated by the fact that the devices can signify ring
+ * and non ring events via the same interrupt line and they can only
+ * be distinguished via a read of the relevant status register.
+ */
+static irqreturn_t sca3000_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret, val;
+ s64 last_timestamp = iio_get_time_ns(indio_dev);
+
+ /*
+ * Could lead if badly timed to an extra read of status reg,
+ * but ensures no interrupt is missed.
+ */
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
+ val = st->rx[0];
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto done;
+
+ sca3000_ring_int_process(val, indio_dev);
+
+ if (val & SCA3000_INT_STATUS_FREE_FALL)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ last_timestamp);
+
+ if (val & SCA3000_INT_STATUS_Y_TRIGGER)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ last_timestamp);
+
+ if (val & SCA3000_INT_STATUS_X_TRIGGER)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ last_timestamp);
+
+ if (val & SCA3000_INT_STATUS_Z_TRIGGER)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ last_timestamp);
+
+done:
+ return IRQ_HANDLED;
+}
+
+/**
+ * sca3000_read_event_config() what events are enabled
+ **/
+static int sca3000_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+ /* read current value of mode register */
+ mutex_lock(&st->lock);
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X_AND_Y_AND_Z:
+ ret = !!(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT);
+ break;
+ case IIO_MOD_X:
+ case IIO_MOD_Y:
+ case IIO_MOD_Z:
+ /*
+ * Motion detection mode cannot run at the same time as
+ * acceleration data being read.
+ */
+ if ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+ != SCA3000_REG_MODE_MEAS_MODE_MOT_DET) {
+ ret = 0;
+ } else {
+ ret = sca3000_read_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL);
+ if (ret < 0)
+ goto error_ret;
+ /* only supporting logical or's for now */
+ ret = !!(ret & sca3000_addresses[chan->address][2]);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int sca3000_freefall_set_state(struct iio_dev *indio_dev, int state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /* read current value of mode register */
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+
+ /* if off and should be on */
+ if (state && !(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ st->rx[0] | SCA3000_REG_MODE_FREE_FALL_DETECT);
+ /* if on and should be off */
+ else if (!state && (st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ st->rx[0] & ~SCA3000_REG_MODE_FREE_FALL_DETECT);
+ else
+ return 0;
+}
+
+static int sca3000_motion_detect_set_state(struct iio_dev *indio_dev, int axis,
+ int state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret, ctrlval;
+
+ /*
+ * First read the motion detector config to find out if
+ * this axis is on
+ */
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
+ if (ret < 0)
+ return ret;
+ ctrlval = ret;
+ /* if off and should be on */
+ if (state && !(ctrlval & sca3000_addresses[axis][2])) {
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ ctrlval |
+ sca3000_addresses[axis][2]);
+ if (ret)
+ return ret;
+ st->mo_det_use_count++;
+ } else if (!state && (ctrlval & sca3000_addresses[axis][2])) {
+ ret = sca3000_write_ctrl_reg(st,
+ SCA3000_REG_CTRL_SEL_MD_CTRL,
+ ctrlval &
+ ~(sca3000_addresses[axis][2]));
+ if (ret)
+ return ret;
+ st->mo_det_use_count--;
+ }
+
+ /* read current value of mode register */
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ return ret;
+ /* if off and should be on */
+ if ((st->mo_det_use_count) &&
+ ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+ != SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ (st->rx[0] & ~SCA3000_REG_MODE_MODE_MASK)
+ | SCA3000_REG_MODE_MEAS_MODE_MOT_DET);
+ /* if on and should be off */
+ else if (!(st->mo_det_use_count) &&
+ ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+ == SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
+ return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ st->rx[0] & SCA3000_REG_MODE_MODE_MASK);
+ else
+ return 0;
+}
+
+/**
+ * sca3000_write_event_config() - simple on off control for motion detector
+ * @indio_dev: IIO device instance specific structure. Data specific to this
+ * particular driver may be accessed via iio_priv(indio_dev).
+ * @chan: Description of the channel whose event we are configuring.
+ * @type: The type of event.
+ * @dir: The direction of the event.
+ * @state: Desired state of event being configured.
+ *
+ * This is a per axis control, but enabling any will result in the
+ * motion detector unit being enabled.
+ * N.B. enabling motion detector stops normal data acquisition.
+ * There is a complexity in knowing which mode to return to when
+ * this mode is disabled. Currently normal mode is assumed.
+ **/
+static int sca3000_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ switch (chan->channel2) {
+ case IIO_MOD_X_AND_Y_AND_Z:
+ ret = sca3000_freefall_set_state(indio_dev, state);
+ break;
+
+ case IIO_MOD_X:
+ case IIO_MOD_Y:
+ case IIO_MOD_Z:
+ ret = sca3000_motion_detect_set_state(indio_dev,
+ chan->address,
+ state);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int sca3000_configure_ring(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+
+ buffer = iio_kfifo_allocate();
+ if (!buffer)
+ return -ENOMEM;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+
+ return 0;
+}
+
+static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ iio_kfifo_free(indio_dev->buffer);
+}
+
+static inline
+int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ if (state) {
+ dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_MODE_ADDR,
+ (st->rx[0] | SCA3000_REG_MODE_RING_BUF_ENABLE));
+ } else
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_MODE_ADDR,
+ (st->rx[0] & ~SCA3000_REG_MODE_RING_BUF_ENABLE));
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+/**
+ * sca3000_hw_ring_preenable() - hw ring buffer preenable function
+ * @indio_dev: structure representing the IIO device. Device instance
+ * specific state can be accessed via iio_priv(indio_dev).
+ *
+ * Very simple enable function as the chip will allows normal reads
+ * during ring buffer operation so as long as it is indeed running
+ * before we notify the core, the precise ordering does not matter.
+ */
+static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->lock);
+
+ /* Enable the 50% full interrupt */
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto error_unlock;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_INT_MASK_ADDR,
+ st->rx[0] | SCA3000_REG_INT_MASK_RING_HALF);
+ if (ret)
+ goto error_unlock;
+
+ mutex_unlock(&st->lock);
+
+ return __sca3000_hw_ring_state_set(indio_dev, 1);
+
+error_unlock:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ ret = __sca3000_hw_ring_state_set(indio_dev, 0);
+ if (ret)
+ return ret;
+
+ /* Disable the 50% full interrupt */
+ mutex_lock(&st->lock);
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto unlock;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_INT_MASK_ADDR,
+ st->rx[0] & ~SCA3000_REG_INT_MASK_RING_HALF);
+unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
+ .preenable = &sca3000_hw_ring_preenable,
+ .postdisable = &sca3000_hw_ring_postdisable,
+};
+
+/**
+ * sca3000_clean_setup() - get the device into a predictable state
+ * @st: Device instance specific private data structure
+ *
+ * Devices use flash memory to store many of the register values
+ * and hence can come up in somewhat unpredictable states.
+ * Hence reset everything on driver load.
+ */
+static int sca3000_clean_setup(struct sca3000_state *st)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ /* Ensure all interrupts have been acknowledged */
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
+ if (ret)
+ goto error_ret;
+
+ /* Turn off all motion detection channels */
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
+ if (ret < 0)
+ goto error_ret;
+ ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
+ ret & SCA3000_MD_CTRL_PROT_MASK);
+ if (ret)
+ goto error_ret;
+
+ /* Disable ring buffer */
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ goto error_ret;
+ ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
+ (ret & SCA3000_REG_OUT_CTRL_PROT_MASK)
+ | SCA3000_REG_OUT_CTRL_BUF_X_EN
+ | SCA3000_REG_OUT_CTRL_BUF_Y_EN
+ | SCA3000_REG_OUT_CTRL_BUF_Z_EN
+ | SCA3000_REG_OUT_CTRL_BUF_DIV_4);
+ if (ret)
+ goto error_ret;
+ /* Enable interrupts, relevant to mode and set up as active low */
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st,
+ SCA3000_REG_INT_MASK_ADDR,
+ (ret & SCA3000_REG_INT_MASK_PROT_MASK)
+ | SCA3000_REG_INT_MASK_ACTIVE_LOW);
+ if (ret)
+ goto error_ret;
+ /*
+ * Select normal measurement mode, free fall off, ring off
+ * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
+ * as that occurs in one of the example on the datasheet
+ */
+ ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+ (st->rx[0] & SCA3000_MODE_PROT_MASK));
+
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static const struct iio_info sca3000_info = {
+ .attrs = &sca3000_attribute_group,
+ .read_raw = &sca3000_read_raw,
+ .write_raw = &sca3000_write_raw,
+ .read_event_value = &sca3000_read_event_value,
+ .write_event_value = &sca3000_write_event_value,
+ .read_event_config = &sca3000_read_event_config,
+ .write_event_config = &sca3000_write_event_config,
+ .driver_module = THIS_MODULE,
+};
+
+static int sca3000_probe(struct spi_device *spi)
+{
+ int ret;
+ struct sca3000_state *st;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ st->us = spi;
+ mutex_init(&st->lock);
+ st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
+ ->driver_data];
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &sca3000_info;
+ if (st->info->temp_output) {
+ indio_dev->channels = sca3000_channels_with_temp;
+ indio_dev->num_channels =
+ ARRAY_SIZE(sca3000_channels_with_temp);
+ } else {
+ indio_dev->channels = sca3000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
+ }
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ sca3000_configure_ring(indio_dev);
+
+ if (spi->irq) {
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ &sca3000_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "sca3000",
+ indio_dev);
+ if (ret)
+ return ret;
+ }
+ indio_dev->setup_ops = &sca3000_ring_setup_ops;
+ ret = sca3000_clean_setup(st);
+ if (ret)
+ goto error_free_irq;
+
+ ret = sca3000_print_rev(indio_dev);
+ if (ret)
+ goto error_free_irq;
+
+ return iio_device_register(indio_dev);
+
+error_free_irq:
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+
+ return ret;
+}
+
+static int sca3000_stop_all_interrupts(struct sca3000_state *st)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+ if (ret)
+ goto error_ret;
+ ret = sca3000_write_reg(st, SCA3000_REG_INT_MASK_ADDR,
+ (st->rx[0] &
+ ~(SCA3000_REG_INT_MASK_RING_THREE_QUARTER |
+ SCA3000_REG_INT_MASK_RING_HALF |
+ SCA3000_REG_INT_MASK_ALL_INTS)));
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int sca3000_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct sca3000_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ /* Must ensure no interrupts can be generated after this! */
+ sca3000_stop_all_interrupts(st);
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+
+ sca3000_unconfigure_ring(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id sca3000_id[] = {
+ {"sca3000_d01", d01},
+ {"sca3000_e02", e02},
+ {"sca3000_e04", e04},
+ {"sca3000_e05", e05},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, sca3000_id);
+
+static struct spi_driver sca3000_driver = {
+ .driver = {
+ .name = "sca3000",
+ },
+ .probe = sca3000_probe,
+ .remove = sca3000_remove,
+ .id_table = sca3000_id,
+};
+module_spi_driver(sca3000_driver);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index f8dfdb690563..7c231687109a 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -30,6 +30,7 @@
#define LSM303AGR_ACCEL_DEV_NAME "lsm303agr_accel"
#define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel"
#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq"
+#define LNG2DM_ACCEL_DEV_NAME "lng2dm"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index da3fb069ec5c..f6b6d42385e1 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -43,194 +43,6 @@
#define ST_ACCEL_FS_AVL_200G 200
#define ST_ACCEL_FS_AVL_400G 400
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_ACCEL_1_WAI_EXP 0x33
-#define ST_ACCEL_1_ODR_ADDR 0x20
-#define ST_ACCEL_1_ODR_MASK 0xf0
-#define ST_ACCEL_1_ODR_AVL_1HZ_VAL 0x01
-#define ST_ACCEL_1_ODR_AVL_10HZ_VAL 0x02
-#define ST_ACCEL_1_ODR_AVL_25HZ_VAL 0x03
-#define ST_ACCEL_1_ODR_AVL_50HZ_VAL 0x04
-#define ST_ACCEL_1_ODR_AVL_100HZ_VAL 0x05
-#define ST_ACCEL_1_ODR_AVL_200HZ_VAL 0x06
-#define ST_ACCEL_1_ODR_AVL_400HZ_VAL 0x07
-#define ST_ACCEL_1_ODR_AVL_1600HZ_VAL 0x08
-#define ST_ACCEL_1_FS_ADDR 0x23
-#define ST_ACCEL_1_FS_MASK 0x30
-#define ST_ACCEL_1_FS_AVL_2_VAL 0x00
-#define ST_ACCEL_1_FS_AVL_4_VAL 0x01
-#define ST_ACCEL_1_FS_AVL_8_VAL 0x02
-#define ST_ACCEL_1_FS_AVL_16_VAL 0x03
-#define ST_ACCEL_1_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
-#define ST_ACCEL_1_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
-#define ST_ACCEL_1_FS_AVL_8_GAIN IIO_G_TO_M_S_2(4000)
-#define ST_ACCEL_1_FS_AVL_16_GAIN IIO_G_TO_M_S_2(12000)
-#define ST_ACCEL_1_BDU_ADDR 0x23
-#define ST_ACCEL_1_BDU_MASK 0x80
-#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_1_DRDY_IRQ_INT1_MASK 0x10
-#define ST_ACCEL_1_DRDY_IRQ_INT2_MASK 0x08
-#define ST_ACCEL_1_IHL_IRQ_ADDR 0x25
-#define ST_ACCEL_1_IHL_IRQ_MASK 0x02
-#define ST_ACCEL_1_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_ACCEL_2_WAI_EXP 0x32
-#define ST_ACCEL_2_ODR_ADDR 0x20
-#define ST_ACCEL_2_ODR_MASK 0x18
-#define ST_ACCEL_2_ODR_AVL_50HZ_VAL 0x00
-#define ST_ACCEL_2_ODR_AVL_100HZ_VAL 0x01
-#define ST_ACCEL_2_ODR_AVL_400HZ_VAL 0x02
-#define ST_ACCEL_2_ODR_AVL_1000HZ_VAL 0x03
-#define ST_ACCEL_2_PW_ADDR 0x20
-#define ST_ACCEL_2_PW_MASK 0xe0
-#define ST_ACCEL_2_FS_ADDR 0x23
-#define ST_ACCEL_2_FS_MASK 0x30
-#define ST_ACCEL_2_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_2_FS_AVL_4_VAL 0X01
-#define ST_ACCEL_2_FS_AVL_8_VAL 0x03
-#define ST_ACCEL_2_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
-#define ST_ACCEL_2_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
-#define ST_ACCEL_2_FS_AVL_8_GAIN IIO_G_TO_M_S_2(3900)
-#define ST_ACCEL_2_BDU_ADDR 0x23
-#define ST_ACCEL_2_BDU_MASK 0x80
-#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_2_DRDY_IRQ_INT1_MASK 0x02
-#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
-#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22
-#define ST_ACCEL_2_IHL_IRQ_MASK 0x80
-#define ST_ACCEL_2_OD_IRQ_ADDR 0x22
-#define ST_ACCEL_2_OD_IRQ_MASK 0x40
-#define ST_ACCEL_2_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_ACCEL_3_WAI_EXP 0x40
-#define ST_ACCEL_3_ODR_ADDR 0x20
-#define ST_ACCEL_3_ODR_MASK 0xf0
-#define ST_ACCEL_3_ODR_AVL_3HZ_VAL 0x01
-#define ST_ACCEL_3_ODR_AVL_6HZ_VAL 0x02
-#define ST_ACCEL_3_ODR_AVL_12HZ_VAL 0x03
-#define ST_ACCEL_3_ODR_AVL_25HZ_VAL 0x04
-#define ST_ACCEL_3_ODR_AVL_50HZ_VAL 0x05
-#define ST_ACCEL_3_ODR_AVL_100HZ_VAL 0x06
-#define ST_ACCEL_3_ODR_AVL_200HZ_VAL 0x07
-#define ST_ACCEL_3_ODR_AVL_400HZ_VAL 0x08
-#define ST_ACCEL_3_ODR_AVL_800HZ_VAL 0x09
-#define ST_ACCEL_3_ODR_AVL_1600HZ_VAL 0x0a
-#define ST_ACCEL_3_FS_ADDR 0x24
-#define ST_ACCEL_3_FS_MASK 0x38
-#define ST_ACCEL_3_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_3_FS_AVL_4_VAL 0X01
-#define ST_ACCEL_3_FS_AVL_6_VAL 0x02
-#define ST_ACCEL_3_FS_AVL_8_VAL 0x03
-#define ST_ACCEL_3_FS_AVL_16_VAL 0x04
-#define ST_ACCEL_3_FS_AVL_2_GAIN IIO_G_TO_M_S_2(61)
-#define ST_ACCEL_3_FS_AVL_4_GAIN IIO_G_TO_M_S_2(122)
-#define ST_ACCEL_3_FS_AVL_6_GAIN IIO_G_TO_M_S_2(183)
-#define ST_ACCEL_3_FS_AVL_8_GAIN IIO_G_TO_M_S_2(244)
-#define ST_ACCEL_3_FS_AVL_16_GAIN IIO_G_TO_M_S_2(732)
-#define ST_ACCEL_3_BDU_ADDR 0x20
-#define ST_ACCEL_3_BDU_MASK 0x08
-#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
-#define ST_ACCEL_3_DRDY_IRQ_INT1_MASK 0x80
-#define ST_ACCEL_3_DRDY_IRQ_INT2_MASK 0x00
-#define ST_ACCEL_3_IHL_IRQ_ADDR 0x23
-#define ST_ACCEL_3_IHL_IRQ_MASK 0x40
-#define ST_ACCEL_3_IG1_EN_ADDR 0x23
-#define ST_ACCEL_3_IG1_EN_MASK 0x08
-#define ST_ACCEL_3_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 4 */
-#define ST_ACCEL_4_WAI_EXP 0x3a
-#define ST_ACCEL_4_ODR_ADDR 0x20
-#define ST_ACCEL_4_ODR_MASK 0x30 /* DF1 and DF0 */
-#define ST_ACCEL_4_ODR_AVL_40HZ_VAL 0x00
-#define ST_ACCEL_4_ODR_AVL_160HZ_VAL 0x01
-#define ST_ACCEL_4_ODR_AVL_640HZ_VAL 0x02
-#define ST_ACCEL_4_ODR_AVL_2560HZ_VAL 0x03
-#define ST_ACCEL_4_PW_ADDR 0x20
-#define ST_ACCEL_4_PW_MASK 0xc0
-#define ST_ACCEL_4_FS_ADDR 0x21
-#define ST_ACCEL_4_FS_MASK 0x80
-#define ST_ACCEL_4_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_4_FS_AVL_6_VAL 0X01
-#define ST_ACCEL_4_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1024)
-#define ST_ACCEL_4_FS_AVL_6_GAIN IIO_G_TO_M_S_2(340)
-#define ST_ACCEL_4_BDU_ADDR 0x21
-#define ST_ACCEL_4_BDU_MASK 0x40
-#define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21
-#define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04
-#define ST_ACCEL_4_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 5 */
-#define ST_ACCEL_5_WAI_EXP 0x3b
-#define ST_ACCEL_5_ODR_ADDR 0x20
-#define ST_ACCEL_5_ODR_MASK 0x80
-#define ST_ACCEL_5_ODR_AVL_100HZ_VAL 0x00
-#define ST_ACCEL_5_ODR_AVL_400HZ_VAL 0x01
-#define ST_ACCEL_5_PW_ADDR 0x20
-#define ST_ACCEL_5_PW_MASK 0x40
-#define ST_ACCEL_5_FS_ADDR 0x20
-#define ST_ACCEL_5_FS_MASK 0x20
-#define ST_ACCEL_5_FS_AVL_2_VAL 0X00
-#define ST_ACCEL_5_FS_AVL_8_VAL 0X01
-/* TODO: check these resulting gain settings, these are not in the datsheet */
-#define ST_ACCEL_5_FS_AVL_2_GAIN IIO_G_TO_M_S_2(18000)
-#define ST_ACCEL_5_FS_AVL_8_GAIN IIO_G_TO_M_S_2(72000)
-#define ST_ACCEL_5_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_5_DRDY_IRQ_INT1_MASK 0x04
-#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20
-#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22
-#define ST_ACCEL_5_IHL_IRQ_MASK 0x80
-#define ST_ACCEL_5_OD_IRQ_ADDR 0x22
-#define ST_ACCEL_5_OD_IRQ_MASK 0x40
-#define ST_ACCEL_5_IG1_EN_ADDR 0x21
-#define ST_ACCEL_5_IG1_EN_MASK 0x08
-#define ST_ACCEL_5_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 6 */
-#define ST_ACCEL_6_WAI_EXP 0x32
-#define ST_ACCEL_6_ODR_ADDR 0x20
-#define ST_ACCEL_6_ODR_MASK 0x18
-#define ST_ACCEL_6_ODR_AVL_50HZ_VAL 0x00
-#define ST_ACCEL_6_ODR_AVL_100HZ_VAL 0x01
-#define ST_ACCEL_6_ODR_AVL_400HZ_VAL 0x02
-#define ST_ACCEL_6_ODR_AVL_1000HZ_VAL 0x03
-#define ST_ACCEL_6_PW_ADDR 0x20
-#define ST_ACCEL_6_PW_MASK 0x20
-#define ST_ACCEL_6_FS_ADDR 0x23
-#define ST_ACCEL_6_FS_MASK 0x30
-#define ST_ACCEL_6_FS_AVL_100_VAL 0x00
-#define ST_ACCEL_6_FS_AVL_200_VAL 0x01
-#define ST_ACCEL_6_FS_AVL_400_VAL 0x03
-#define ST_ACCEL_6_FS_AVL_100_GAIN IIO_G_TO_M_S_2(49000)
-#define ST_ACCEL_6_FS_AVL_200_GAIN IIO_G_TO_M_S_2(98000)
-#define ST_ACCEL_6_FS_AVL_400_GAIN IIO_G_TO_M_S_2(195000)
-#define ST_ACCEL_6_BDU_ADDR 0x23
-#define ST_ACCEL_6_BDU_MASK 0x80
-#define ST_ACCEL_6_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_6_DRDY_IRQ_INT1_MASK 0x02
-#define ST_ACCEL_6_DRDY_IRQ_INT2_MASK 0x10
-#define ST_ACCEL_6_IHL_IRQ_ADDR 0x22
-#define ST_ACCEL_6_IHL_IRQ_MASK 0x80
-#define ST_ACCEL_6_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 7 */
-#define ST_ACCEL_7_ODR_ADDR 0x20
-#define ST_ACCEL_7_ODR_MASK 0x30
-#define ST_ACCEL_7_ODR_AVL_280HZ_VAL 0x00
-#define ST_ACCEL_7_ODR_AVL_560HZ_VAL 0x01
-#define ST_ACCEL_7_ODR_AVL_1120HZ_VAL 0x02
-#define ST_ACCEL_7_ODR_AVL_4480HZ_VAL 0x03
-#define ST_ACCEL_7_PW_ADDR 0x20
-#define ST_ACCEL_7_PW_MASK 0xc0
-#define ST_ACCEL_7_FS_AVL_2_GAIN IIO_G_TO_M_S_2(488)
-#define ST_ACCEL_7_BDU_ADDR 0x21
-#define ST_ACCEL_7_BDU_MASK 0x40
-#define ST_ACCEL_7_DRDY_IRQ_ADDR 0x21
-#define ST_ACCEL_7_DRDY_IRQ_INT1_MASK 0x04
-#define ST_ACCEL_7_MULTIREAD_BIT false
-
static const struct iio_chan_spec st_accel_8bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -281,7 +93,7 @@ static const struct iio_chan_spec st_accel_16bit_channels[] = {
static const struct st_sensor_settings st_accel_sensors_settings[] = {
{
- .wai = ST_ACCEL_1_WAI_EXP,
+ .wai = 0x33,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3DH_ACCEL_DEV_NAME,
@@ -294,22 +106,22 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_1_ODR_ADDR,
- .mask = ST_ACCEL_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.odr_avl = {
- { 1, ST_ACCEL_1_ODR_AVL_1HZ_VAL, },
- { 10, ST_ACCEL_1_ODR_AVL_10HZ_VAL, },
- { 25, ST_ACCEL_1_ODR_AVL_25HZ_VAL, },
- { 50, ST_ACCEL_1_ODR_AVL_50HZ_VAL, },
- { 100, ST_ACCEL_1_ODR_AVL_100HZ_VAL, },
- { 200, ST_ACCEL_1_ODR_AVL_200HZ_VAL, },
- { 400, ST_ACCEL_1_ODR_AVL_400HZ_VAL, },
- { 1600, ST_ACCEL_1_ODR_AVL_1600HZ_VAL, },
+ { .hz = 1, .value = 0x01, },
+ { .hz = 10, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ { .hz = 400, .value = 0x07, },
+ { .hz = 1600, .value = 0x08, },
},
},
.pw = {
- .addr = ST_ACCEL_1_ODR_ADDR,
- .mask = ST_ACCEL_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.enable_axis = {
@@ -317,48 +129,48 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_1_FS_ADDR,
- .mask = ST_ACCEL_1_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_1_FS_AVL_2_VAL,
- .gain = ST_ACCEL_1_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(1000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_4G,
- .value = ST_ACCEL_1_FS_AVL_4_VAL,
- .gain = ST_ACCEL_1_FS_AVL_4_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(2000),
},
[2] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_1_FS_AVL_8_VAL,
- .gain = ST_ACCEL_1_FS_AVL_8_GAIN,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(4000),
},
[3] = {
.num = ST_ACCEL_FS_AVL_16G,
- .value = ST_ACCEL_1_FS_AVL_16_VAL,
- .gain = ST_ACCEL_1_FS_AVL_16_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(12000),
},
},
},
.bdu = {
- .addr = ST_ACCEL_1_BDU_ADDR,
- .mask = ST_ACCEL_1_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x10,
+ .mask_int2 = 0x08,
+ .addr_ihl = 0x25,
+ .mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_ACCEL_2_WAI_EXP,
+ .wai = 0x32,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DLH_ACCEL_DEV_NAME,
@@ -368,18 +180,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_2_ODR_ADDR,
- .mask = ST_ACCEL_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x18,
.odr_avl = {
- { 50, ST_ACCEL_2_ODR_AVL_50HZ_VAL, },
- { 100, ST_ACCEL_2_ODR_AVL_100HZ_VAL, },
- { 400, ST_ACCEL_2_ODR_AVL_400HZ_VAL, },
- { 1000, ST_ACCEL_2_ODR_AVL_1000HZ_VAL, },
+ { .hz = 50, .value = 0x00, },
+ { .hz = 100, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 1000, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_2_PW_ADDR,
- .mask = ST_ACCEL_2_PW_MASK,
+ .addr = 0x20,
+ .mask = 0xe0,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -388,69 +200,69 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_2_FS_ADDR,
- .mask = ST_ACCEL_2_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_2_FS_AVL_2_VAL,
- .gain = ST_ACCEL_2_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(1000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_4G,
- .value = ST_ACCEL_2_FS_AVL_4_VAL,
- .gain = ST_ACCEL_2_FS_AVL_4_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(2000),
},
[2] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_2_FS_AVL_8_VAL,
- .gain = ST_ACCEL_2_FS_AVL_8_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(3900),
},
},
},
.bdu = {
- .addr = ST_ACCEL_2_BDU_ADDR,
- .mask = ST_ACCEL_2_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
- .addr_od = ST_ACCEL_2_OD_IRQ_ADDR,
- .mask_od = ST_ACCEL_2_OD_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x02,
+ .mask_int2 = 0x10,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_ACCEL_3_WAI_EXP,
+ .wai = 0x40,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM330_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_16bit_channels,
.odr = {
- .addr = ST_ACCEL_3_ODR_ADDR,
- .mask = ST_ACCEL_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.odr_avl = {
- { 3, ST_ACCEL_3_ODR_AVL_3HZ_VAL },
- { 6, ST_ACCEL_3_ODR_AVL_6HZ_VAL, },
- { 12, ST_ACCEL_3_ODR_AVL_12HZ_VAL, },
- { 25, ST_ACCEL_3_ODR_AVL_25HZ_VAL, },
- { 50, ST_ACCEL_3_ODR_AVL_50HZ_VAL, },
- { 100, ST_ACCEL_3_ODR_AVL_100HZ_VAL, },
- { 200, ST_ACCEL_3_ODR_AVL_200HZ_VAL, },
- { 400, ST_ACCEL_3_ODR_AVL_400HZ_VAL, },
- { 800, ST_ACCEL_3_ODR_AVL_800HZ_VAL, },
- { 1600, ST_ACCEL_3_ODR_AVL_1600HZ_VAL, },
+ { .hz = 3, .value = 0x01, },
+ { .hz = 6, .value = 0x02, },
+ { .hz = 12, .value = 0x03, },
+ { .hz = 25, .value = 0x04, },
+ { .hz = 50, .value = 0x05, },
+ { .hz = 100, .value = 0x06, },
+ { .hz = 200, .value = 0x07, },
+ { .hz = 400, .value = 0x08, },
+ { .hz = 800, .value = 0x09, },
+ { .hz = 1600, .value = 0x0a, },
},
},
.pw = {
- .addr = ST_ACCEL_3_ODR_ADDR,
- .mask = ST_ACCEL_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xf0,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.enable_axis = {
@@ -458,75 +270,75 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_3_FS_ADDR,
- .mask = ST_ACCEL_3_FS_MASK,
+ .addr = 0x24,
+ .mask = 0x38,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_3_FS_AVL_2_VAL,
- .gain = ST_ACCEL_3_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(61),
},
[1] = {
.num = ST_ACCEL_FS_AVL_4G,
- .value = ST_ACCEL_3_FS_AVL_4_VAL,
- .gain = ST_ACCEL_3_FS_AVL_4_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(122),
},
[2] = {
.num = ST_ACCEL_FS_AVL_6G,
- .value = ST_ACCEL_3_FS_AVL_6_VAL,
- .gain = ST_ACCEL_3_FS_AVL_6_GAIN,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(183),
},
[3] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_3_FS_AVL_8_VAL,
- .gain = ST_ACCEL_3_FS_AVL_8_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(244),
},
[4] = {
.num = ST_ACCEL_FS_AVL_16G,
- .value = ST_ACCEL_3_FS_AVL_16_VAL,
- .gain = ST_ACCEL_3_FS_AVL_16_GAIN,
+ .value = 0x04,
+ .gain = IIO_G_TO_M_S_2(732),
},
},
},
.bdu = {
- .addr = ST_ACCEL_3_BDU_ADDR,
- .mask = ST_ACCEL_3_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
},
.drdy_irq = {
- .addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
+ .addr = 0x23,
+ .mask_int1 = 0x80,
+ .mask_int2 = 0x00,
+ .addr_ihl = 0x23,
+ .mask_ihl = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
.ig1 = {
- .en_addr = ST_ACCEL_3_IG1_EN_ADDR,
- .en_mask = ST_ACCEL_3_IG1_EN_MASK,
+ .en_addr = 0x23,
+ .en_mask = 0x08,
},
},
- .multi_read_bit = ST_ACCEL_3_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_ACCEL_4_WAI_EXP,
+ .wai = 0x3a,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3LV02DL_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_4_ODR_ADDR,
- .mask = ST_ACCEL_4_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x30, /* DF1 and DF0 */
.odr_avl = {
- { 40, ST_ACCEL_4_ODR_AVL_40HZ_VAL },
- { 160, ST_ACCEL_4_ODR_AVL_160HZ_VAL, },
- { 640, ST_ACCEL_4_ODR_AVL_640HZ_VAL, },
- { 2560, ST_ACCEL_4_ODR_AVL_2560HZ_VAL, },
+ { .hz = 40, .value = 0x00, },
+ { .hz = 160, .value = 0x01, },
+ { .hz = 640, .value = 0x02, },
+ { .hz = 2560, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_4_PW_ADDR,
- .mask = ST_ACCEL_4_PW_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -535,51 +347,51 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_4_FS_ADDR,
- .mask = ST_ACCEL_4_FS_MASK,
+ .addr = 0x21,
+ .mask = 0x80,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_4_FS_AVL_2_VAL,
- .gain = ST_ACCEL_4_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(1024),
},
[1] = {
.num = ST_ACCEL_FS_AVL_6G,
- .value = ST_ACCEL_4_FS_AVL_6_VAL,
- .gain = ST_ACCEL_4_FS_AVL_6_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(340),
},
},
},
.bdu = {
- .addr = ST_ACCEL_4_BDU_ADDR,
- .mask = ST_ACCEL_4_BDU_MASK,
+ .addr = 0x21,
+ .mask = 0x40,
},
.drdy_irq = {
- .addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
+ .addr = 0x21,
+ .mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2, /* guess */
},
{
- .wai = ST_ACCEL_5_WAI_EXP,
+ .wai = 0x3b,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DL_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_8bit_channels,
.odr = {
- .addr = ST_ACCEL_5_ODR_ADDR,
- .mask = ST_ACCEL_5_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x80,
.odr_avl = {
- { 100, ST_ACCEL_5_ODR_AVL_100HZ_VAL },
- { 400, ST_ACCEL_5_ODR_AVL_400HZ_VAL, },
+ { .hz = 100, .value = 0x00, },
+ { .hz = 400, .value = 0x01, },
},
},
.pw = {
- .addr = ST_ACCEL_5_PW_ADDR,
- .mask = ST_ACCEL_5_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x40,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -588,54 +400,58 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_5_FS_ADDR,
- .mask = ST_ACCEL_5_FS_MASK,
+ .addr = 0x20,
+ .mask = 0x20,
+ /*
+ * TODO: check these resulting gain settings, these are
+ * not in the datsheet
+ */
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .value = ST_ACCEL_5_FS_AVL_2_VAL,
- .gain = ST_ACCEL_5_FS_AVL_2_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(18000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_8G,
- .value = ST_ACCEL_5_FS_AVL_8_VAL,
- .gain = ST_ACCEL_5_FS_AVL_8_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(72000),
},
},
},
.drdy_irq = {
- .addr = ST_ACCEL_5_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_5_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
- .addr_od = ST_ACCEL_5_OD_IRQ_ADDR,
- .mask_od = ST_ACCEL_5_OD_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x04,
+ .mask_int2 = 0x20,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2, /* guess */
},
{
- .wai = ST_ACCEL_6_WAI_EXP,
+ .wai = 0x32,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = H3LIS331DL_DRIVER_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_6_ODR_ADDR,
- .mask = ST_ACCEL_6_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x18,
.odr_avl = {
- { 50, ST_ACCEL_6_ODR_AVL_50HZ_VAL },
- { 100, ST_ACCEL_6_ODR_AVL_100HZ_VAL, },
- { 400, ST_ACCEL_6_ODR_AVL_400HZ_VAL, },
- { 1000, ST_ACCEL_6_ODR_AVL_1000HZ_VAL, },
+ { .hz = 50, .value = 0x00, },
+ { .hz = 100, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 1000, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_6_PW_ADDR,
- .mask = ST_ACCEL_6_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x20,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -644,38 +460,38 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_ACCEL_6_FS_ADDR,
- .mask = ST_ACCEL_6_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_100G,
- .value = ST_ACCEL_6_FS_AVL_100_VAL,
- .gain = ST_ACCEL_6_FS_AVL_100_GAIN,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(49000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_200G,
- .value = ST_ACCEL_6_FS_AVL_200_VAL,
- .gain = ST_ACCEL_6_FS_AVL_200_GAIN,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(98000),
},
[2] = {
.num = ST_ACCEL_FS_AVL_400G,
- .value = ST_ACCEL_6_FS_AVL_400_VAL,
- .gain = ST_ACCEL_6_FS_AVL_400_GAIN,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(195000),
},
},
},
.bdu = {
- .addr = ST_ACCEL_6_BDU_ADDR,
- .mask = ST_ACCEL_6_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_ACCEL_6_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_6_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_ACCEL_6_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_ACCEL_6_IHL_IRQ_ADDR,
- .mask_ihl = ST_ACCEL_6_IHL_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x02,
+ .mask_int2 = 0x10,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
},
- .multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
@@ -685,18 +501,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
- .addr = ST_ACCEL_7_ODR_ADDR,
- .mask = ST_ACCEL_7_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x30,
.odr_avl = {
- { 280, ST_ACCEL_7_ODR_AVL_280HZ_VAL, },
- { 560, ST_ACCEL_7_ODR_AVL_560HZ_VAL, },
- { 1120, ST_ACCEL_7_ODR_AVL_1120HZ_VAL, },
- { 4480, ST_ACCEL_7_ODR_AVL_4480HZ_VAL, },
+ { .hz = 280, .value = 0x00, },
+ { .hz = 560, .value = 0x01, },
+ { .hz = 1120, .value = 0x02, },
+ { .hz = 4480, .value = 0x03, },
},
},
.pw = {
- .addr = ST_ACCEL_7_PW_ADDR,
- .mask = ST_ACCEL_7_PW_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -708,7 +524,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.fs_avl = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
- .gain = ST_ACCEL_7_FS_AVL_2_GAIN,
+ .gain = IIO_G_TO_M_S_2(488),
},
},
},
@@ -719,11 +535,78 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.bdu = {
},
.drdy_irq = {
- .addr = ST_ACCEL_7_DRDY_IRQ_ADDR,
- .mask_int1 = ST_ACCEL_7_DRDY_IRQ_INT1_MASK,
+ .addr = 0x21,
+ .mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_ACCEL_7_MULTIREAD_BIT,
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
+ {
+ .wai = 0x33,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LNG2DM_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_8bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01, },
+ { .hz = 10, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ { .hz = 400, .value = 0x07, },
+ { .hz = 1600, .value = 0x08, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(15600),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(31200),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(62500),
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(187500),
+ },
+ },
+ },
+ .drdy_irq = {
+ .addr = 0x22,
+ .mask_int1 = 0x10,
+ .mask_int2 = 0x08,
+ .addr_ihl = 0x25,
+ .mask_ihl = 0x02,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
+ .multi_read_bit = true,
.bootime = 2,
},
};
@@ -743,8 +626,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = adata->current_fullscale->gain;
+ *val = adata->current_fullscale->gain / 1000000;
+ *val2 = adata->current_fullscale->gain % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = adata->odr;
@@ -763,9 +646,13 @@ static int st_accel_write_raw(struct iio_dev *indio_dev,
int err;
switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ case IIO_CHAN_INFO_SCALE: {
+ int gain;
+
+ gain = val * 1000000 + val2;
+ err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
break;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index e9d427a5df7c..c0f8867aa1ea 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -84,6 +84,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis3l02dq",
.data = LIS3L02DQ_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lng2dm-accel",
+ .data = LNG2DM_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -135,6 +139,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
{ LIS3L02DQ_ACCEL_DEV_NAME },
+ { LNG2DM_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index efd43941d45d..c25ac50d4600 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -60,6 +60,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
{ LIS3L02DQ_ACCEL_DEV_NAME },
+ { LNG2DM_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 99c051490eff..38bc319904c4 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -58,6 +58,18 @@ config AD7476
To compile this driver as a module, choose M here: the
module will be called ad7476.
+config AD7766
+ tristate "Analog Devices AD7766/AD7767 ADC driver"
+ depends on SPI_MASTER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD7766, AD7766-1,
+ AD7766-2, AD7767, AD7767-1, AD7767-2 SPI analog to digital converters.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad7766.
+
config AD7791
tristate "Analog Devices AD7791 ADC driver"
depends on SPI
@@ -195,6 +207,16 @@ config DA9150_GPADC
To compile this driver as a module, choose M here: the module will be
called berlin2-adc.
+config ENVELOPE_DETECTOR
+ tristate "Envelope detector using a DAC and a comparator"
+ depends on OF
+ help
+ Say yes here to build support for an envelope detector using a DAC
+ and a comparator.
+
+ To compile this driver as a module, choose M here: the module will be
+ called envelope-detector.
+
config EXYNOS_ADC
tristate "Exynos ADC driver support"
depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
@@ -419,6 +441,28 @@ config ROCKCHIP_SARADC
To compile this driver as a module, choose M here: the
module will be called rockchip_saradc.
+config STM32_ADC_CORE
+ tristate "STMicroelectronics STM32 adc core"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on REGULATOR
+ help
+ Select this option to enable the core driver for STMicroelectronics
+ STM32 analog-to-digital converter (ADC).
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-adc-core.
+
+config STM32_ADC
+ tristate "STMicroelectronics STM32 adc"
+ depends on STM32_ADC_CORE
+ help
+ Say yes here to build support for STMicroelectronics stm32 Analog
+ to Digital Converter (ADC).
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-adc.
+
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on X86 && ISA_BUS_API
@@ -449,6 +493,8 @@ config TI_ADC081C
config TI_ADC0832
tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838"
depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADC0831,
ADC0832, ADC0834, ADC0838 ADC chips.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 7a40c04c311f..d36c4be8d1fc 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_AD7291) += ad7291.o
obj-$(CONFIG_AD7298) += ad7298.o
obj-$(CONFIG_AD7923) += ad7923.o
obj-$(CONFIG_AD7476) += ad7476.o
+obj-$(CONFIG_AD7766) += ad7766.o
obj-$(CONFIG_AD7791) += ad7791.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
@@ -20,6 +21,7 @@ obj-$(CONFIG_BCM_IPROC_ADC) += bcm_iproc_adc.o
obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
+obj-$(CONFIG_ENVELOPE_DETECTOR) += envelope-detector.o
obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o
obj-$(CONFIG_HI8435) += hi8435.o
@@ -41,6 +43,8 @@ obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_STX104) += stx104.o
+obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
+obj-$(CONFIG_STM32_ADC) += stm32-adc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
new file mode 100644
index 000000000000..75cca42b6e70
--- /dev/null
+++ b/drivers/iio/adc/ad7766.c
@@ -0,0 +1,330 @@
+/*
+ * AD7766/AD7767 SPI ADC driver
+ *
+ * Copyright 2016 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+struct ad7766_chip_info {
+ unsigned int decimation_factor;
+};
+
+enum {
+ AD7766_SUPPLY_AVDD = 0,
+ AD7766_SUPPLY_DVDD = 1,
+ AD7766_SUPPLY_VREF = 2,
+ AD7766_NUM_SUPPLIES = 3
+};
+
+struct ad7766 {
+ const struct ad7766_chip_info *chip_info;
+ struct spi_device *spi;
+ struct clk *mclk;
+ struct gpio_desc *pd_gpio;
+ struct regulator_bulk_data reg[AD7766_NUM_SUPPLIES];
+
+ struct iio_trigger *trig;
+
+ struct spi_transfer xfer;
+ struct spi_message msg;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * Make the buffer large enough for one 24 bit sample and one 64 bit
+ * aligned 64 bit timestamp.
+ */
+ unsigned char data[ALIGN(3, sizeof(s64)) + sizeof(s64)]
+ ____cacheline_aligned;
+};
+
+/*
+ * AD7766 and AD7767 variations are interface compatible, the main difference is
+ * analog performance. Both parts will use the same ID.
+ */
+enum ad7766_device_ids {
+ ID_AD7766,
+ ID_AD7766_1,
+ ID_AD7766_2,
+};
+
+static irqreturn_t ad7766_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+ int ret;
+
+ ret = spi_sync(ad7766->spi, &ad7766->msg);
+ if (ret < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, ad7766->data,
+ pf->timestamp);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ad7766_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+ if (ret < 0) {
+ dev_err(&ad7766->spi->dev, "Failed to enable supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ad7766->mclk);
+ if (ret < 0) {
+ dev_err(&ad7766->spi->dev, "Failed to enable MCLK: %d\n", ret);
+ regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+ return ret;
+ }
+
+ if (ad7766->pd_gpio)
+ gpiod_set_value(ad7766->pd_gpio, 0);
+
+ return 0;
+}
+
+static int ad7766_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+
+ if (ad7766->pd_gpio)
+ gpiod_set_value(ad7766->pd_gpio, 1);
+
+ /*
+ * The PD pin is synchronous to the clock, so give it some time to
+ * notice the change before we disable the clock.
+ */
+ msleep(20);
+
+ clk_disable_unprepare(ad7766->mclk);
+ regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+
+ return 0;
+}
+
+static int ad7766_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ struct ad7766 *ad7766 = iio_priv(indio_dev);
+ struct regulator *vref = ad7766->reg[AD7766_SUPPLY_VREF].consumer;
+ int scale_uv;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = regulator_get_voltage(vref);
+ if (scale_uv < 0)
+ return scale_uv;
+ *val = scale_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(ad7766->mclk) /
+ ad7766->chip_info->decimation_factor;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec ad7766_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7766_chip_info ad7766_chip_info[] = {
+ [ID_AD7766] = {
+ .decimation_factor = 8,
+ },
+ [ID_AD7766_1] = {
+ .decimation_factor = 16,
+ },
+ [ID_AD7766_2] = {
+ .decimation_factor = 32,
+ },
+};
+
+static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = {
+ .preenable = &ad7766_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &ad7766_postdisable,
+};
+
+static const struct iio_info ad7766_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7766_read_raw,
+};
+
+static irqreturn_t ad7766_irq(int irq, void *private)
+{
+ iio_trigger_poll(private);
+ return IRQ_HANDLED;
+}
+
+static int ad7766_set_trigger_state(struct iio_trigger *trig, bool enable)
+{
+ struct ad7766 *ad7766 = iio_trigger_get_drvdata(trig);
+
+ if (enable)
+ enable_irq(ad7766->spi->irq);
+ else
+ disable_irq(ad7766->spi->irq);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops ad7766_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ad7766_set_trigger_state,
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static int ad7766_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct ad7766 *ad7766;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*ad7766));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ad7766 = iio_priv(indio_dev);
+ ad7766->chip_info = &ad7766_chip_info[id->driver_data];
+
+ ad7766->mclk = devm_clk_get(&spi->dev, "mclk");
+ if (IS_ERR(ad7766->mclk))
+ return PTR_ERR(ad7766->mclk);
+
+ ad7766->reg[AD7766_SUPPLY_AVDD].supply = "avdd";
+ ad7766->reg[AD7766_SUPPLY_DVDD].supply = "dvdd";
+ ad7766->reg[AD7766_SUPPLY_VREF].supply = "vref";
+
+ ret = devm_regulator_bulk_get(&spi->dev, ARRAY_SIZE(ad7766->reg),
+ ad7766->reg);
+ if (ret)
+ return ret;
+
+ ad7766->pd_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ad7766->pd_gpio))
+ return PTR_ERR(ad7766->pd_gpio);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad7766_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7766_channels);
+ indio_dev->info = &ad7766_info;
+
+ if (spi->irq > 0) {
+ ad7766->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!ad7766->trig)
+ return -ENOMEM;
+
+ ad7766->trig->ops = &ad7766_trigger_ops;
+ ad7766->trig->dev.parent = &spi->dev;
+ iio_trigger_set_drvdata(ad7766->trig, ad7766);
+
+ ret = devm_request_irq(&spi->dev, spi->irq, ad7766_irq,
+ IRQF_TRIGGER_FALLING, dev_name(&spi->dev),
+ ad7766->trig);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The device generates interrupts as long as it is powered up.
+ * Some platforms might not allow the option to power it down so
+ * disable the interrupt to avoid extra load on the system
+ */
+ disable_irq(spi->irq);
+
+ ret = devm_iio_trigger_register(&spi->dev, ad7766->trig);
+ if (ret)
+ return ret;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ ad7766->spi = spi;
+
+ /* First byte always 0 */
+ ad7766->xfer.rx_buf = &ad7766->data[1];
+ ad7766->xfer.len = 3;
+
+ spi_message_init(&ad7766->msg);
+ spi_message_add_tail(&ad7766->xfer, &ad7766->msg);
+
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time, &ad7766_trigger_handler,
+ &ad7766_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static const struct spi_device_id ad7766_id[] = {
+ {"ad7766", ID_AD7766},
+ {"ad7766-1", ID_AD7766_1},
+ {"ad7766-2", ID_AD7766_2},
+ {"ad7767", ID_AD7766},
+ {"ad7767-1", ID_AD7766_1},
+ {"ad7767-2", ID_AD7766_2},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7766_id);
+
+static struct spi_driver ad7766_driver = {
+ .driver = {
+ .name = "ad7766",
+ },
+ .probe = ad7766_probe,
+ .id_table = ad7766_id,
+};
+module_spi_driver(ad7766_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD7766 and AD7767 ADCs driver support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index bbdac07f4aaa..34b928cefeed 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -30,6 +30,7 @@
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/pinctrl/consumer.h>
/* Registers */
#define AT91_ADC_CR 0x00 /* Control Register */
@@ -1347,6 +1348,32 @@ static int at91_adc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int at91_adc_suspend(struct device *dev)
+{
+ struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(idev);
+
+ pinctrl_pm_select_sleep_state(dev);
+ clk_disable_unprepare(st->clk);
+
+ return 0;
+}
+
+static int at91_adc_resume(struct device *dev)
+{
+ struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(idev);
+
+ clk_prepare_enable(st->clk);
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume);
+
static struct at91_adc_caps at91sam9260_caps = {
.calc_startup_ticks = calc_startup_ticks_9260,
.num_channels = 4,
@@ -1441,6 +1468,7 @@ static struct platform_driver at91_adc_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(at91_adc_dt_ids),
+ .pm = &at91_adc_pm_ops,
},
};
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
new file mode 100644
index 000000000000..fef15c0d7c9c
--- /dev/null
+++ b/drivers/iio/adc/envelope-detector.c
@@ -0,0 +1,422 @@
+/*
+ * Driver for an envelope detector using a DAC and a comparator
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * The DAC is used to find the peak level of an alternating voltage input
+ * signal by a binary search using the output of a comparator wired to
+ * an interrupt pin. Like so:
+ * _
+ * | \
+ * input +------>-------|+ \
+ * | \
+ * .-------. | }---.
+ * | | | / |
+ * | dac|-->--|- / |
+ * | | |_/ |
+ * | | |
+ * | | |
+ * | irq|------<-------'
+ * | |
+ * '-------'
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+struct envelope {
+ spinlock_t comp_lock; /* protects comp */
+ int comp;
+
+ struct mutex read_lock; /* protects everything else */
+
+ int comp_irq;
+ u32 comp_irq_trigger;
+ u32 comp_irq_trigger_inv;
+
+ struct iio_channel *dac;
+ struct delayed_work comp_timeout;
+
+ unsigned int comp_interval;
+ bool invert;
+ u32 dac_max;
+
+ int high;
+ int level;
+ int low;
+
+ struct completion done;
+};
+
+/*
+ * The envelope_detector_comp_latch function works together with the compare
+ * interrupt service routine below (envelope_detector_comp_isr) as a latch
+ * (one-bit memory) for if the interrupt has triggered since last calling
+ * this function.
+ * The ..._comp_isr function disables the interrupt so that the cpu does not
+ * need to service a possible interrupt flood from the comparator when no-one
+ * cares anyway, and this ..._comp_latch function reenables them again if
+ * needed.
+ */
+static int envelope_detector_comp_latch(struct envelope *env)
+{
+ int comp;
+
+ spin_lock_irq(&env->comp_lock);
+ comp = env->comp;
+ env->comp = 0;
+ spin_unlock_irq(&env->comp_lock);
+
+ if (!comp)
+ return 0;
+
+ /*
+ * The irq was disabled, and is reenabled just now.
+ * But there might have been a pending irq that
+ * happened while the irq was disabled that fires
+ * just as the irq is reenabled. That is not what
+ * is desired.
+ */
+ enable_irq(env->comp_irq);
+
+ /* So, synchronize this possibly pending irq... */
+ synchronize_irq(env->comp_irq);
+
+ /* ...and redo the whole dance. */
+ spin_lock_irq(&env->comp_lock);
+ comp = env->comp;
+ env->comp = 0;
+ spin_unlock_irq(&env->comp_lock);
+
+ if (comp)
+ enable_irq(env->comp_irq);
+
+ return 1;
+}
+
+static irqreturn_t envelope_detector_comp_isr(int irq, void *ctx)
+{
+ struct envelope *env = ctx;
+
+ spin_lock(&env->comp_lock);
+ env->comp = 1;
+ disable_irq_nosync(env->comp_irq);
+ spin_unlock(&env->comp_lock);
+
+ return IRQ_HANDLED;
+}
+
+static void envelope_detector_setup_compare(struct envelope *env)
+{
+ int ret;
+
+ /*
+ * Do a binary search for the peak input level, and stop
+ * when that level is "trapped" between two adjacent DAC
+ * values.
+ * When invert is active, use the midpoint floor so that
+ * env->level ends up as env->low when the termination
+ * criteria below is fulfilled, and use the midpoint
+ * ceiling when invert is not active so that env->level
+ * ends up as env->high in that case.
+ */
+ env->level = (env->high + env->low + !env->invert) / 2;
+
+ if (env->high == env->low + 1) {
+ complete(&env->done);
+ return;
+ }
+
+ /* Set a "safe" DAC level (if there is such a thing)... */
+ ret = iio_write_channel_raw(env->dac, env->invert ? 0 : env->dac_max);
+ if (ret < 0)
+ goto err;
+
+ /* ...clear the comparison result... */
+ envelope_detector_comp_latch(env);
+
+ /* ...set the real DAC level... */
+ ret = iio_write_channel_raw(env->dac, env->level);
+ if (ret < 0)
+ goto err;
+
+ /* ...and wait for a bit to see if the latch catches anything. */
+ schedule_delayed_work(&env->comp_timeout,
+ msecs_to_jiffies(env->comp_interval));
+ return;
+
+err:
+ env->level = ret;
+ complete(&env->done);
+}
+
+static void envelope_detector_timeout(struct work_struct *work)
+{
+ struct envelope *env = container_of(work, struct envelope,
+ comp_timeout.work);
+
+ /* Adjust low/high depending on the latch content... */
+ if (!envelope_detector_comp_latch(env) ^ !env->invert)
+ env->low = env->level;
+ else
+ env->high = env->level;
+
+ /* ...and continue the search. */
+ envelope_detector_setup_compare(env);
+}
+
+static int envelope_detector_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct envelope *env = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * When invert is active, start with high=max+1 and low=0
+ * since we will end up with the low value when the
+ * termination criteria is fulfilled (rounding down). And
+ * start with high=max and low=-1 when invert is not active
+ * since we will end up with the high value in that case.
+ * This ensures that the returned value in both cases are
+ * in the same range as the DAC and is a value that has not
+ * triggered the comparator.
+ */
+ mutex_lock(&env->read_lock);
+ env->high = env->dac_max + env->invert;
+ env->low = -1 + env->invert;
+ envelope_detector_setup_compare(env);
+ wait_for_completion(&env->done);
+ if (env->level < 0) {
+ ret = env->level;
+ goto err_unlock;
+ }
+ *val = env->invert ? env->dac_max - env->level : env->level;
+ mutex_unlock(&env->read_lock);
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ return iio_read_channel_scale(env->dac, val, val2);
+ }
+
+ return -EINVAL;
+
+err_unlock:
+ mutex_unlock(&env->read_lock);
+ return ret;
+}
+
+static ssize_t envelope_show_invert(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch, char *buf)
+{
+ struct envelope *env = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", env->invert);
+}
+
+static ssize_t envelope_store_invert(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch,
+ const char *buf, size_t len)
+{
+ struct envelope *env = iio_priv(indio_dev);
+ unsigned long invert;
+ int ret;
+ u32 trigger;
+
+ ret = kstrtoul(buf, 0, &invert);
+ if (ret < 0)
+ return ret;
+ if (invert > 1)
+ return -EINVAL;
+
+ trigger = invert ? env->comp_irq_trigger_inv : env->comp_irq_trigger;
+
+ mutex_lock(&env->read_lock);
+ if (invert != env->invert)
+ ret = irq_set_irq_type(env->comp_irq, trigger);
+ if (!ret) {
+ env->invert = invert;
+ ret = len;
+ }
+ mutex_unlock(&env->read_lock);
+
+ return ret;
+}
+
+static ssize_t envelope_show_comp_interval(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch,
+ char *buf)
+{
+ struct envelope *env = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", env->comp_interval);
+}
+
+static ssize_t envelope_store_comp_interval(struct iio_dev *indio_dev,
+ uintptr_t private,
+ struct iio_chan_spec const *ch,
+ const char *buf, size_t len)
+{
+ struct envelope *env = iio_priv(indio_dev);
+ unsigned long interval;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &interval);
+ if (ret < 0)
+ return ret;
+ if (interval > 1000)
+ return -EINVAL;
+
+ mutex_lock(&env->read_lock);
+ env->comp_interval = interval;
+ mutex_unlock(&env->read_lock);
+
+ return len;
+}
+
+static const struct iio_chan_spec_ext_info envelope_detector_ext_info[] = {
+ { .name = "invert",
+ .read = envelope_show_invert,
+ .write = envelope_store_invert, },
+ { .name = "compare_interval",
+ .read = envelope_show_comp_interval,
+ .write = envelope_store_comp_interval, },
+ { /* sentinel */ }
+};
+
+static const struct iio_chan_spec envelope_detector_iio_channel = {
+ .type = IIO_ALTVOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = envelope_detector_ext_info,
+ .indexed = 1,
+};
+
+static const struct iio_info envelope_detector_info = {
+ .read_raw = &envelope_detector_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int envelope_detector_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct envelope *env;
+ enum iio_chan_type type;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*env));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ env = iio_priv(indio_dev);
+ env->comp_interval = 50; /* some sensible default? */
+
+ spin_lock_init(&env->comp_lock);
+ mutex_init(&env->read_lock);
+ init_completion(&env->done);
+ INIT_DELAYED_WORK(&env->comp_timeout, envelope_detector_timeout);
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = dev->of_node;
+ indio_dev->info = &envelope_detector_info;
+ indio_dev->channels = &envelope_detector_iio_channel;
+ indio_dev->num_channels = 1;
+
+ env->dac = devm_iio_channel_get(dev, "dac");
+ if (IS_ERR(env->dac)) {
+ if (PTR_ERR(env->dac) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dac input channel\n");
+ return PTR_ERR(env->dac);
+ }
+
+ env->comp_irq = platform_get_irq_byname(pdev, "comp");
+ if (env->comp_irq < 0) {
+ if (env->comp_irq != -EPROBE_DEFER)
+ dev_err(dev, "failed to get compare interrupt\n");
+ return env->comp_irq;
+ }
+
+ ret = devm_request_irq(dev, env->comp_irq, envelope_detector_comp_isr,
+ 0, "envelope-detector", env);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to request interrupt\n");
+ return ret;
+ }
+ env->comp_irq_trigger = irq_get_trigger_type(env->comp_irq);
+ if (env->comp_irq_trigger & IRQF_TRIGGER_RISING)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_FALLING;
+ if (env->comp_irq_trigger & IRQF_TRIGGER_FALLING)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_RISING;
+ if (env->comp_irq_trigger & IRQF_TRIGGER_HIGH)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_LOW;
+ if (env->comp_irq_trigger & IRQF_TRIGGER_LOW)
+ env->comp_irq_trigger_inv |= IRQF_TRIGGER_HIGH;
+
+ ret = iio_get_channel_type(env->dac, &type);
+ if (ret < 0)
+ return ret;
+
+ if (type != IIO_VOLTAGE) {
+ dev_err(dev, "dac is of the wrong type\n");
+ return -EINVAL;
+ }
+
+ ret = iio_read_max_channel_raw(env->dac, &env->dac_max);
+ if (ret < 0) {
+ dev_err(dev, "dac does not indicate its raw maximum value\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id envelope_detector_match[] = {
+ { .compatible = "axentia,tse850-envelope-detector", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, envelope_detector_match);
+
+static struct platform_driver envelope_detector_driver = {
+ .probe = envelope_detector_probe,
+ .driver = {
+ .name = "iio-envelope-detector",
+ .of_match_table = envelope_detector_match,
+ },
+};
+module_platform_driver(envelope_detector_driver);
+
+MODULE_DESCRIPTION("Envelope detector using a DAC and a comparator");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 712fbd2b1f16..3b7c4f78f37a 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -238,7 +238,9 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
/* Configure conversion register with the requested chan */
st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) |
- MAX1027_NOSCAN | !!(chan->type == IIO_TEMP);
+ MAX1027_NOSCAN;
+ if (chan->type == IIO_TEMP)
+ st->reg |= MAX1027_TEMP;
ret = spi_write(st->spi, &st->reg, 1);
if (ret < 0) {
dev_err(&indio_dev->dev,
@@ -360,17 +362,6 @@ static int max1027_set_trigger_state(struct iio_trigger *trig, bool state)
return 0;
}
-static int max1027_validate_device(struct iio_trigger *trig,
- struct iio_dev *indio_dev)
-{
- struct iio_dev *indio = iio_trigger_get_drvdata(trig);
-
- if (indio != indio_dev)
- return -EINVAL;
-
- return 0;
-}
-
static irqreturn_t max1027_trigger_handler(int irq, void *private)
{
struct iio_poll_func *pf = (struct iio_poll_func *)private;
@@ -391,7 +382,7 @@ static irqreturn_t max1027_trigger_handler(int irq, void *private)
static const struct iio_trigger_ops max1027_trigger_ops = {
.owner = THIS_MODULE,
- .validate_device = &max1027_validate_device,
+ .validate_device = &iio_trigger_validate_own_device,
.set_trigger_state = &max1027_set_trigger_state,
};
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
new file mode 100644
index 000000000000..4214b0cd6b1b
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -0,0 +1,303 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * Inspired from: fsl-imx25-tsadc
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "stm32-adc-core.h"
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3 BIT(17)
+#define STM32F4_EOC2 BIT(9)
+#define STM32F4_EOC1 BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT 16
+#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16)
+
+/* STM32 F4 maximum analog clock rate (from datasheet) */
+#define STM32F4_ADC_MAX_CLK_RATE 36000000
+
+/**
+ * struct stm32_adc_priv - stm32 ADC core private data
+ * @irq: irq for ADC block
+ * @domain: irq domain reference
+ * @aclk: clock reference for the analog circuitry
+ * @vref: regulator reference
+ * @common: common data for all ADC instances
+ */
+struct stm32_adc_priv {
+ int irq;
+ struct irq_domain *domain;
+ struct clk *aclk;
+ struct regulator *vref;
+ struct stm32_adc_common common;
+};
+
+static struct stm32_adc_priv *to_stm32_adc_priv(struct stm32_adc_common *com)
+{
+ return container_of(com, struct stm32_adc_priv, common);
+}
+
+/* STM32F4 ADC internal common clock prescaler division ratios */
+static int stm32f4_pclk_div[] = {2, 4, 6, 8};
+
+/**
+ * stm32f4_adc_clk_sel() - Select stm32f4 ADC common clock prescaler
+ * @priv: stm32 ADC core private data
+ * Select clock prescaler used for analog conversions, before using ADC.
+ */
+static int stm32f4_adc_clk_sel(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ unsigned long rate;
+ u32 val;
+ int i;
+
+ rate = clk_get_rate(priv->aclk);
+ for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
+ if ((rate / stm32f4_pclk_div[i]) <= STM32F4_ADC_MAX_CLK_RATE)
+ break;
+ }
+ if (i >= ARRAY_SIZE(stm32f4_pclk_div))
+ return -EINVAL;
+
+ val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
+ val &= ~STM32F4_ADC_ADCPRE_MASK;
+ val |= i << STM32F4_ADC_ADCPRE_SHIFT;
+ writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
+
+ dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
+ rate / (stm32f4_pclk_div[i] * 1000));
+
+ return 0;
+}
+
+/* ADC common interrupt for all instances */
+static void stm32_adc_irq_handler(struct irq_desc *desc)
+{
+ struct stm32_adc_priv *priv = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+ status = readl_relaxed(priv->common.base + STM32F4_ADC_CSR);
+
+ if (status & STM32F4_EOC1)
+ generic_handle_irq(irq_find_mapping(priv->domain, 0));
+
+ if (status & STM32F4_EOC2)
+ generic_handle_irq(irq_find_mapping(priv->domain, 1));
+
+ if (status & STM32F4_EOC3)
+ generic_handle_irq(irq_find_mapping(priv->domain, 2));
+
+ chained_irq_exit(chip, desc);
+};
+
+static int stm32_adc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static void stm32_adc_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops stm32_adc_domain_ops = {
+ .map = stm32_adc_domain_map,
+ .unmap = stm32_adc_domain_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int stm32_adc_irq_probe(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return priv->irq;
+ }
+
+ priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
+ &stm32_adc_domain_ops,
+ priv);
+ if (!priv->domain) {
+ dev_err(&pdev->dev, "Failed to add irq domain\n");
+ return -ENOMEM;
+ }
+
+ irq_set_chained_handler(priv->irq, stm32_adc_irq_handler);
+ irq_set_handler_data(priv->irq, priv);
+
+ return 0;
+}
+
+static void stm32_adc_irq_remove(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ int hwirq;
+
+ for (hwirq = 0; hwirq < STM32_ADC_MAX_ADCS; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
+ irq_domain_remove(priv->domain);
+ irq_set_chained_handler(priv->irq, NULL);
+}
+
+static int stm32_adc_probe(struct platform_device *pdev)
+{
+ struct stm32_adc_priv *priv;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->common.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->common.base))
+ return PTR_ERR(priv->common.base);
+
+ priv->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(priv->vref)) {
+ ret = PTR_ERR(priv->vref);
+ dev_err(&pdev->dev, "vref get failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(priv->vref);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "vref enable failed\n");
+ return ret;
+ }
+
+ ret = regulator_get_voltage(priv->vref);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "vref get voltage failed, %d\n", ret);
+ goto err_regulator_disable;
+ }
+ priv->common.vref_mv = ret / 1000;
+ dev_dbg(&pdev->dev, "vref+=%dmV\n", priv->common.vref_mv);
+
+ priv->aclk = devm_clk_get(&pdev->dev, "adc");
+ if (IS_ERR(priv->aclk)) {
+ ret = PTR_ERR(priv->aclk);
+ dev_err(&pdev->dev, "Can't get 'adc' clock\n");
+ goto err_regulator_disable;
+ }
+
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "adc clk enable failed\n");
+ goto err_regulator_disable;
+ }
+
+ ret = stm32f4_adc_clk_sel(pdev, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "adc clk selection failed\n");
+ goto err_clk_disable;
+ }
+
+ ret = stm32_adc_irq_probe(pdev, priv);
+ if (ret < 0)
+ goto err_clk_disable;
+
+ platform_set_drvdata(pdev, &priv->common);
+
+ ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to populate DT children\n");
+ goto err_irq_remove;
+ }
+
+ return 0;
+
+err_irq_remove:
+ stm32_adc_irq_remove(pdev, priv);
+
+err_clk_disable:
+ clk_disable_unprepare(priv->aclk);
+
+err_regulator_disable:
+ regulator_disable(priv->vref);
+
+ return ret;
+}
+
+static int stm32_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_adc_common *common = platform_get_drvdata(pdev);
+ struct stm32_adc_priv *priv = to_stm32_adc_priv(common);
+
+ of_platform_depopulate(&pdev->dev);
+ stm32_adc_irq_remove(pdev, priv);
+ clk_disable_unprepare(priv->aclk);
+ regulator_disable(priv->vref);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_adc_of_match[] = {
+ { .compatible = "st,stm32f4-adc-core" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
+
+static struct platform_driver stm32_adc_driver = {
+ .probe = stm32_adc_probe,
+ .remove = stm32_adc_remove,
+ .driver = {
+ .name = "stm32-adc-core",
+ .of_match_table = stm32_adc_of_match,
+ },
+};
+module_platform_driver(stm32_adc_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 ADC core driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm32-adc-core");
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
new file mode 100644
index 000000000000..081fa5f55015
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __STM32_ADC_H
+#define __STM32_ADC_H
+
+/*
+ * STM32 - ADC global register map
+ * ________________________________________________________
+ * | Offset | Register |
+ * --------------------------------------------------------
+ * | 0x000 | Master ADC1 |
+ * --------------------------------------------------------
+ * | 0x100 | Slave ADC2 |
+ * --------------------------------------------------------
+ * | 0x200 | Slave ADC3 |
+ * --------------------------------------------------------
+ * | 0x300 | Master & Slave common regs |
+ * --------------------------------------------------------
+ */
+#define STM32_ADC_MAX_ADCS 3
+#define STM32_ADCX_COMN_OFFSET 0x300
+
+/**
+ * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
+ * @base: control registers base cpu addr
+ * @vref_mv: vref voltage (mv)
+ */
+struct stm32_adc_common {
+ void __iomem *base;
+ int vref_mv;
+};
+
+#endif
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
new file mode 100644
index 000000000000..5715e79f4935
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc.c
@@ -0,0 +1,518 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "stm32-adc-core.h"
+
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR 0x00
+#define STM32F4_ADC_CR1 0x04
+#define STM32F4_ADC_CR2 0x08
+#define STM32F4_ADC_SMPR1 0x0C
+#define STM32F4_ADC_SMPR2 0x10
+#define STM32F4_ADC_HTR 0x24
+#define STM32F4_ADC_LTR 0x28
+#define STM32F4_ADC_SQR1 0x2C
+#define STM32F4_ADC_SQR2 0x30
+#define STM32F4_ADC_SQR3 0x34
+#define STM32F4_ADC_JSQR 0x38
+#define STM32F4_ADC_JDR1 0x3C
+#define STM32F4_ADC_JDR2 0x40
+#define STM32F4_ADC_JDR3 0x44
+#define STM32F4_ADC_JDR4 0x48
+#define STM32F4_ADC_DR 0x4C
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT BIT(4)
+#define STM32F4_EOC BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_SCAN BIT(8)
+#define STM32F4_EOCIE BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART BIT(30)
+#define STM32F4_EXTEN_MASK GENMASK(29, 28)
+#define STM32F4_EOCS BIT(10)
+#define STM32F4_ADON BIT(0)
+
+/* STM32F4_ADC_SQR1 - bit fields */
+#define STM32F4_L_SHIFT 20
+#define STM32F4_L_MASK GENMASK(23, 20)
+
+/* STM32F4_ADC_SQR3 - bit fields */
+#define STM32F4_SQ1_SHIFT 0
+#define STM32F4_SQ1_MASK GENMASK(4, 0)
+
+#define STM32_ADC_TIMEOUT_US 100000
+#define STM32_ADC_TIMEOUT (msecs_to_jiffies(STM32_ADC_TIMEOUT_US / 1000))
+
+/**
+ * struct stm32_adc - private data of each ADC IIO instance
+ * @common: reference to ADC block common data
+ * @offset: ADC instance register offset in ADC block
+ * @completion: end of single conversion completion
+ * @buffer: data buffer
+ * @clk: clock for this adc instance
+ * @irq: interrupt for this adc instance
+ * @lock: spinlock
+ */
+struct stm32_adc {
+ struct stm32_adc_common *common;
+ u32 offset;
+ struct completion completion;
+ u16 *buffer;
+ struct clk *clk;
+ int irq;
+ spinlock_t lock; /* interrupt lock */
+};
+
+/**
+ * struct stm32_adc_chan_spec - specification of stm32 adc channel
+ * @type: IIO channel type
+ * @channel: channel number (single ended)
+ * @name: channel name (single ended)
+ */
+struct stm32_adc_chan_spec {
+ enum iio_chan_type type;
+ int channel;
+ const char *name;
+};
+
+/* Input definitions common for all STM32F4 instances */
+static const struct stm32_adc_chan_spec stm32f4_adc123_channels[] = {
+ { IIO_VOLTAGE, 0, "in0" },
+ { IIO_VOLTAGE, 1, "in1" },
+ { IIO_VOLTAGE, 2, "in2" },
+ { IIO_VOLTAGE, 3, "in3" },
+ { IIO_VOLTAGE, 4, "in4" },
+ { IIO_VOLTAGE, 5, "in5" },
+ { IIO_VOLTAGE, 6, "in6" },
+ { IIO_VOLTAGE, 7, "in7" },
+ { IIO_VOLTAGE, 8, "in8" },
+ { IIO_VOLTAGE, 9, "in9" },
+ { IIO_VOLTAGE, 10, "in10" },
+ { IIO_VOLTAGE, 11, "in11" },
+ { IIO_VOLTAGE, 12, "in12" },
+ { IIO_VOLTAGE, 13, "in13" },
+ { IIO_VOLTAGE, 14, "in14" },
+ { IIO_VOLTAGE, 15, "in15" },
+};
+
+/**
+ * STM32 ADC registers access routines
+ * @adc: stm32 adc instance
+ * @reg: reg offset in adc instance
+ *
+ * Note: All instances share same base, with 0x0, 0x100 or 0x200 offset resp.
+ * for adc1, adc2 and adc3.
+ */
+static u32 stm32_adc_readl(struct stm32_adc *adc, u32 reg)
+{
+ return readl_relaxed(adc->common->base + adc->offset + reg);
+}
+
+static u16 stm32_adc_readw(struct stm32_adc *adc, u32 reg)
+{
+ return readw_relaxed(adc->common->base + adc->offset + reg);
+}
+
+static void stm32_adc_writel(struct stm32_adc *adc, u32 reg, u32 val)
+{
+ writel_relaxed(val, adc->common->base + adc->offset + reg);
+}
+
+static void stm32_adc_set_bits(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adc->lock, flags);
+ stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) | bits);
+ spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+static void stm32_adc_clr_bits(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adc->lock, flags);
+ stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) & ~bits);
+ spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+/**
+ * stm32_adc_conv_irq_enable() - Enable end of conversion interrupt
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_conv_irq_enable(struct stm32_adc *adc)
+{
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+};
+
+/**
+ * stm32_adc_conv_irq_disable() - Disable end of conversion interrupt
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
+{
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+}
+
+/**
+ * stm32_adc_start_conv() - Start conversions for regular channels.
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_start_conv(struct stm32_adc *adc)
+{
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_EOCS | STM32F4_ADON);
+
+ /* Wait for Power-up time (tSTAB from datasheet) */
+ usleep_range(2, 3);
+
+ /* Software start ? (e.g. trigger detection disabled ?) */
+ if (!(stm32_adc_readl(adc, STM32F4_ADC_CR2) & STM32F4_EXTEN_MASK))
+ stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_SWSTART);
+}
+
+static void stm32_adc_stop_conv(struct stm32_adc *adc)
+{
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
+ stm32_adc_clr_bits(adc, STM32F4_ADC_SR, STM32F4_STRT);
+
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_ADON);
+}
+
+/**
+ * stm32_adc_single_conv() - Performs a single conversion
+ * @indio_dev: IIO device
+ * @chan: IIO channel
+ * @res: conversion result
+ *
+ * The function performs a single conversion on a given channel:
+ * - Program sequencer with one channel (e.g. in SQ1 with len = 1)
+ * - Use SW trigger
+ * - Start conversion, then wait for interrupt completion.
+ */
+static int stm32_adc_single_conv(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *res)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ long timeout;
+ u32 val;
+ u16 result;
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ adc->buffer = &result;
+
+ /* Program chan number in regular sequence */
+ val = stm32_adc_readl(adc, STM32F4_ADC_SQR3);
+ val &= ~STM32F4_SQ1_MASK;
+ val |= chan->channel << STM32F4_SQ1_SHIFT;
+ stm32_adc_writel(adc, STM32F4_ADC_SQR3, val);
+
+ /* Set regular sequence len (0 for 1 conversion) */
+ stm32_adc_clr_bits(adc, STM32F4_ADC_SQR1, STM32F4_L_MASK);
+
+ /* Trigger detection disabled (conversion can be launched in SW) */
+ stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
+
+ stm32_adc_conv_irq_enable(adc);
+
+ stm32_adc_start_conv(adc);
+
+ timeout = wait_for_completion_interruptible_timeout(
+ &adc->completion, STM32_ADC_TIMEOUT);
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ } else if (timeout < 0) {
+ ret = timeout;
+ } else {
+ *res = result;
+ ret = IIO_VAL_INT;
+ }
+
+ stm32_adc_stop_conv(adc);
+
+ stm32_adc_conv_irq_disable(adc);
+
+ return ret;
+}
+
+static int stm32_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ if (chan->type == IIO_VOLTAGE)
+ ret = stm32_adc_single_conv(indio_dev, chan, val);
+ else
+ ret = -EINVAL;
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = adc->common->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t stm32_adc_isr(int irq, void *data)
+{
+ struct stm32_adc *adc = data;
+ u32 status = stm32_adc_readl(adc, STM32F4_ADC_SR);
+
+ if (status & STM32F4_EOC) {
+ *adc->buffer = stm32_adc_readw(adc, STM32F4_ADC_DR);
+ complete(&adc->completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].channel == iiospec->args[0])
+ return i;
+
+ return -EINVAL;
+}
+
+/**
+ * stm32_adc_debugfs_reg_access - read or write register value
+ *
+ * To read a value from an ADC register:
+ * echo [ADC reg offset] > direct_reg_access
+ * cat direct_reg_access
+ *
+ * To write a value in a ADC register:
+ * echo [ADC_reg_offset] [value] > direct_reg_access
+ */
+static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+
+ if (!readval)
+ stm32_adc_writel(adc, reg, writeval);
+ else
+ *readval = stm32_adc_readl(adc, reg);
+
+ return 0;
+}
+
+static const struct iio_info stm32_adc_iio_info = {
+ .read_raw = stm32_adc_read_raw,
+ .debugfs_reg_access = stm32_adc_debugfs_reg_access,
+ .of_xlate = stm32_adc_of_xlate,
+ .driver_module = THIS_MODULE,
+};
+
+static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
+ struct iio_chan_spec *chan,
+ const struct stm32_adc_chan_spec *channel,
+ int scan_index)
+{
+ chan->type = channel->type;
+ chan->channel = channel->channel;
+ chan->datasheet_name = channel->name;
+ chan->scan_index = scan_index;
+ chan->indexed = 1;
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = 12;
+ chan->scan_type.storagebits = 16;
+}
+
+static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
+{
+ struct device_node *node = indio_dev->dev.of_node;
+ struct property *prop;
+ const __be32 *cur;
+ struct iio_chan_spec *channels;
+ int scan_index = 0, num_channels;
+ u32 val;
+
+ num_channels = of_property_count_u32_elems(node, "st,adc-channels");
+ if (num_channels < 0 ||
+ num_channels >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
+ return num_channels < 0 ? num_channels : -EINVAL;
+ }
+
+ channels = devm_kcalloc(&indio_dev->dev, num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
+ if (val >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+ dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
+ return -EINVAL;
+ }
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+ &stm32f4_adc123_channels[val],
+ scan_index);
+ scan_index++;
+ }
+
+ indio_dev->num_channels = scan_index;
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int stm32_adc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct stm32_adc *adc;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->common = dev_get_drvdata(pdev->dev.parent);
+ spin_lock_init(&adc->lock);
+ init_completion(&adc->completion);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &stm32_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ platform_set_drvdata(pdev, adc);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "reg", &adc->offset);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "missing reg property\n");
+ return -EINVAL;
+ }
+
+ adc->irq = platform_get_irq(pdev, 0);
+ if (adc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return adc->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, adc->irq, stm32_adc_isr,
+ 0, pdev->name, adc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ adc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(adc->clk)) {
+ dev_err(&pdev->dev, "Can't get clock\n");
+ return PTR_ERR(adc->clk);
+ }
+
+ ret = clk_prepare_enable(adc->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk enable failed\n");
+ return ret;
+ }
+
+ ret = stm32_adc_chan_of_init(indio_dev);
+ if (ret < 0)
+ goto err_clk_disable;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "iio dev register failed\n");
+ goto err_clk_disable;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(adc->clk);
+
+ return ret;
+}
+
+static int stm32_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_adc *adc = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+ iio_device_unregister(indio_dev);
+ clk_disable_unprepare(adc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_adc_of_match[] = {
+ { .compatible = "st,stm32f4-adc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
+
+static struct platform_driver stm32_adc_driver = {
+ .probe = stm32_adc_probe,
+ .remove = stm32_adc_remove,
+ .driver = {
+ .name = "stm32-adc",
+ .of_match_table = stm32_adc_of_match,
+ },
+};
+module_platform_driver(stm32_adc_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 ADC IIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm32-adc");
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index f4ba23effe9a..e952e94a14af 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -14,6 +14,10 @@
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
enum {
adc0831,
@@ -38,10 +42,16 @@ struct adc0832 {
.indexed = 1, \
.channel = chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = chan, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
}
-#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \
+#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -49,18 +59,26 @@ struct adc0832 {
.channel2 = (chan2), \
.differential = 1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
}
static const struct iio_chan_spec adc0831_channels[] = {
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(1),
};
static const struct iio_chan_spec adc0832_channels[] = {
ADC0832_VOLTAGE_CHANNEL(0),
ADC0832_VOLTAGE_CHANNEL(1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 2),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
};
static const struct iio_chan_spec adc0834_channels[] = {
@@ -68,10 +86,11 @@ static const struct iio_chan_spec adc0834_channels[] = {
ADC0832_VOLTAGE_CHANNEL(1),
ADC0832_VOLTAGE_CHANNEL(2),
ADC0832_VOLTAGE_CHANNEL(3),
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
- ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
- ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 4),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 5),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 6),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 7),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
};
static const struct iio_chan_spec adc0838_channels[] = {
@@ -83,14 +102,15 @@ static const struct iio_chan_spec adc0838_channels[] = {
ADC0832_VOLTAGE_CHANNEL(5),
ADC0832_VOLTAGE_CHANNEL(6),
ADC0832_VOLTAGE_CHANNEL(7),
- ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
- ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
- ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
- ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
- ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5),
- ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4),
- ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7),
- ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 8),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 9),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 10),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 11),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5, 12),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4, 13),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7, 14),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6, 15),
+ IIO_CHAN_SOFT_TIMESTAMP(16),
};
static int adc0831_adc_conversion(struct adc0832 *adc)
@@ -178,6 +198,42 @@ static const struct iio_info adc0832_info = {
.driver_module = THIS_MODULE,
};
+static irqreturn_t adc0832_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adc0832 *adc = iio_priv(indio_dev);
+ u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
+ int scan_index;
+ int i = 0;
+
+ mutex_lock(&adc->lock);
+
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ const struct iio_chan_spec *scan_chan =
+ &indio_dev->channels[scan_index];
+ int ret = adc0832_adc_conversion(adc, scan_chan->channel,
+ scan_chan->differential);
+ if (ret < 0) {
+ dev_warn(&adc->spi->dev,
+ "failed to get conversion data\n");
+ goto out;
+ }
+
+ data[i] = ret;
+ i++;
+ }
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns(indio_dev));
+out:
+ mutex_unlock(&adc->lock);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static int adc0832_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -233,9 +289,20 @@ static int adc0832_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ adc0832_trigger_handler, NULL);
+ if (ret)
+ goto err_reg_disable;
+
ret = iio_device_register(indio_dev);
if (ret)
- regulator_disable(adc->reg);
+ goto err_buffer_cleanup;
+
+ return 0;
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_reg_disable:
+ regulator_disable(adc->reg);
return ret;
}
@@ -246,6 +313,7 @@ static int adc0832_remove(struct spi_device *spi)
struct adc0832 *adc = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(adc->reg);
return 0;
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index f94b69f9c288..4836a0d7aef5 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -27,6 +27,7 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/regulator/consumer.h>
#define TI_ADC_DRV_NAME "ti-adc161s626"
@@ -39,7 +40,9 @@ static const struct iio_chan_spec ti_adc141s626_channels[] = {
{
.type = IIO_VOLTAGE,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -54,7 +57,9 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
{
.type = IIO_VOLTAGE,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -68,6 +73,8 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
struct ti_adc_data {
struct iio_dev *indio_dev;
struct spi_device *spi;
+ struct regulator *ref;
+
u8 read_size;
u8 shift;
@@ -135,18 +142,32 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
struct ti_adc_data *data = iio_priv(indio_dev);
int ret;
- if (mask != IIO_CHAN_INFO_RAW)
- return -EINVAL;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ ret = ti_adc_read_measurement(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
- ret = ti_adc_read_measurement(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- if (!ret)
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(data->ref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 1 << (chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ }
return 0;
}
@@ -191,10 +212,17 @@ static int ti_adc_probe(struct spi_device *spi)
break;
}
+ data->ref = devm_regulator_get(&spi->dev, "vdda");
+ if (!IS_ERR(data->ref)) {
+ ret = regulator_enable(data->ref);
+ if (ret < 0)
+ return ret;
+ }
+
ret = iio_triggered_buffer_setup(indio_dev, NULL,
ti_adc_trigger_handler, NULL);
if (ret)
- return ret;
+ goto error_regulator_disable;
ret = iio_device_register(indio_dev);
if (ret)
@@ -205,15 +233,20 @@ static int ti_adc_probe(struct spi_device *spi)
error_unreg_buffer:
iio_triggered_buffer_cleanup(indio_dev);
+error_regulator_disable:
+ regulator_disable(data->ref);
+
return ret;
}
static int ti_adc_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ti_adc_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
+ regulator_disable(data->ref);
return 0;
}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index c3cfacca2541..ad9dec30bb30 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -30,10 +30,28 @@
#include <linux/iio/buffer.h>
#include <linux/iio/kfifo_buf.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+#define DMA_BUFFER_SIZE SZ_2K
+
+struct tiadc_dma {
+ struct dma_slave_config conf;
+ struct dma_chan *chan;
+ dma_addr_t addr;
+ dma_cookie_t cookie;
+ u8 *buf;
+ int current_period;
+ int period_size;
+ u8 fifo_thresh;
+};
+
struct tiadc_device {
struct ti_tscadc_dev *mfd_tscadc;
+ struct tiadc_dma dma;
struct mutex fifo1_lock; /* to protect fifo access */
int channels;
+ int total_ch_enabled;
u8 channel_line[8];
u8 channel_step[8];
int buffer_en_ch_steps;
@@ -198,6 +216,67 @@ static irqreturn_t tiadc_worker_h(int irq, void *private)
return IRQ_HANDLED;
}
+static void tiadc_dma_rx_complete(void *param)
+{
+ struct iio_dev *indio_dev = param;
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
+ u8 *data;
+ int i;
+
+ data = dma->buf + dma->current_period * dma->period_size;
+ dma->current_period = 1 - dma->current_period; /* swap the buffer ID */
+
+ for (i = 0; i < dma->period_size; i += indio_dev->scan_bytes) {
+ iio_push_to_buffers(indio_dev, data);
+ data += indio_dev->scan_bytes;
+ }
+}
+
+static int tiadc_start_dma(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
+ struct dma_async_tx_descriptor *desc;
+
+ dma->current_period = 0; /* We start to fill period 0 */
+ /*
+ * Make the fifo thresh as the multiple of total number of
+ * channels enabled, so make sure that cyclic DMA period
+ * length is also a multiple of total number of channels
+ * enabled. This ensures that no invalid data is reported
+ * to the stack via iio_push_to_buffers().
+ */
+ dma->fifo_thresh = rounddown(FIFO1_THRESHOLD + 1,
+ adc_dev->total_ch_enabled) - 1;
+ /* Make sure that period length is multiple of fifo thresh level */
+ dma->period_size = rounddown(DMA_BUFFER_SIZE / 2,
+ (dma->fifo_thresh + 1) * sizeof(u16));
+
+ dma->conf.src_maxburst = dma->fifo_thresh + 1;
+ dmaengine_slave_config(dma->chan, &dma->conf);
+
+ desc = dmaengine_prep_dma_cyclic(dma->chan, dma->addr,
+ dma->period_size * 2,
+ dma->period_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = tiadc_dma_rx_complete;
+ desc->callback_param = indio_dev;
+
+ dma->cookie = dmaengine_submit(desc);
+
+ dma_async_issue_pending(dma->chan);
+
+ tiadc_writel(adc_dev, REG_FIFO1THR, dma->fifo_thresh);
+ tiadc_writel(adc_dev, REG_DMA1REQ, dma->fifo_thresh);
+ tiadc_writel(adc_dev, REG_DMAENABLE_SET, DMA_FIFO1);
+
+ return 0;
+}
+
static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -218,20 +297,30 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
+ unsigned int irq_enable;
unsigned int enb = 0;
u8 bit;
tiadc_step_config(indio_dev);
- for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
+ for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) {
enb |= (get_adc_step_bit(adc_dev, bit) << 1);
+ adc_dev->total_ch_enabled++;
+ }
adc_dev->buffer_en_ch_steps = enb;
+ if (dma->chan)
+ tiadc_start_dma(indio_dev);
+
am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb);
tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES
| IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW);
- tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES
- | IRQENB_FIFO1OVRRUN);
+
+ irq_enable = IRQENB_FIFO1OVRRUN;
+ if (!dma->chan)
+ irq_enable |= IRQENB_FIFO1THRES;
+ tiadc_writel(adc_dev, REG_IRQENABLE, irq_enable);
return 0;
}
@@ -239,12 +328,18 @@ static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
static int tiadc_buffer_predisable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
int fifo1count, i, read;
tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW));
am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps);
adc_dev->buffer_en_ch_steps = 0;
+ adc_dev->total_ch_enabled = 0;
+ if (dma->chan) {
+ tiadc_writel(adc_dev, REG_DMAENABLE_CLEAR, 0x2);
+ dmaengine_terminate_async(dma->chan);
+ }
/* Flush FIFO of leftover data in the time it takes to disable adc */
fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
@@ -430,6 +525,41 @@ static const struct iio_info tiadc_info = {
.driver_module = THIS_MODULE,
};
+static int tiadc_request_dma(struct platform_device *pdev,
+ struct tiadc_device *adc_dev)
+{
+ struct tiadc_dma *dma = &adc_dev->dma;
+ dma_cap_mask_t mask;
+
+ /* Default slave configuration parameters */
+ dma->conf.direction = DMA_DEV_TO_MEM;
+ dma->conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dma->conf.src_addr = adc_dev->mfd_tscadc->tscadc_phys_base + REG_FIFO1;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ /* Get a channel for RX */
+ dma->chan = dma_request_chan(adc_dev->mfd_tscadc->dev, "fifo1");
+ if (IS_ERR(dma->chan)) {
+ int ret = PTR_ERR(dma->chan);
+
+ dma->chan = NULL;
+ return ret;
+ }
+
+ /* RX buffer */
+ dma->buf = dma_alloc_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
+ &dma->addr, GFP_KERNEL);
+ if (!dma->buf)
+ goto err;
+
+ return 0;
+err:
+ dma_release_channel(dma->chan);
+ return -ENOMEM;
+}
+
static int tiadc_parse_dt(struct platform_device *pdev,
struct tiadc_device *adc_dev)
{
@@ -512,8 +642,14 @@ static int tiadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
+ err = tiadc_request_dma(pdev, adc_dev);
+ if (err && err == -EPROBE_DEFER)
+ goto err_dma;
+
return 0;
+err_dma:
+ iio_device_unregister(indio_dev);
err_buffer_unregister:
tiadc_iio_buffered_hardware_remove(indio_dev);
err_free_channels:
@@ -525,8 +661,14 @@ static int tiadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct tiadc_dma *dma = &adc_dev->dma;
u32 step_en;
+ if (dma->chan) {
+ dma_free_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
+ dma->buf, dma->addr);
+ dma_release_channel(dma->chan);
+ }
iio_device_unregister(indio_dev);
tiadc_iio_buffered_hardware_remove(indio_dev);
tiadc_channels_remove(indio_dev);
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index 26a6026de614..e108996a9627 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -2,6 +2,7 @@
# IIO common modules
#
+source "drivers/iio/common/cros_ec_sensors/Kconfig"
source "drivers/iio/common/hid-sensors/Kconfig"
source "drivers/iio/common/ms_sensors/Kconfig"
source "drivers/iio/common/ssp_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index 585da6a1b188..6fa760e1bdd5 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -7,6 +7,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-y += cros_ec_sensors/
obj-y += hid-sensors/
obj-y += ms_sensors/
obj-y += ssp_sensors/
diff --git a/drivers/iio/common/cros_ec_sensors/Kconfig b/drivers/iio/common/cros_ec_sensors/Kconfig
new file mode 100644
index 000000000000..135f6825903f
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Kconfig
@@ -0,0 +1,22 @@
+#
+# Chrome OS Embedded Controller managed sensors library
+#
+config IIO_CROS_EC_SENSORS_CORE
+ tristate "ChromeOS EC Sensors Core"
+ depends on SYSFS && MFD_CROS_EC
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Base module for the ChromeOS EC Sensors module.
+ Contains core functions used by other IIO CrosEC sensor
+ drivers.
+ Define common attributes and sysfs interrupt handler.
+
+config IIO_CROS_EC_SENSORS
+ tristate "ChromeOS EC Contiguous Sensors"
+ depends on IIO_CROS_EC_SENSORS_CORE
+ help
+ Module to handle 3d contiguous sensors like
+ Accelerometers, Gyroscope and Magnetometer that are
+ presented by the ChromeOS EC Sensor hub.
+ Creates an IIO device for each functions.
diff --git a/drivers/iio/common/cros_ec_sensors/Makefile b/drivers/iio/common/cros_ec_sensors/Makefile
new file mode 100644
index 000000000000..ec716ff2a775
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for sensors seen through the ChromeOS EC sensor hub.
+#
+
+obj-$(CONFIG_IIO_CROS_EC_SENSORS_CORE) += cros_ec_sensors_core.o
+obj-$(CONFIG_IIO_CROS_EC_SENSORS) += cros_ec_sensors.o
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
new file mode 100644
index 000000000000..d6c372bb433b
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -0,0 +1,322 @@
+/*
+ * cros_ec_sensors - Driver for Chrome OS Embedded Controller sensors.
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver uses the cros-ec interface to communicate with the Chrome OS
+ * EC about sensors data. Data access is presented through iio sysfs.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "cros_ec_sensors_core.h"
+
+#define CROS_EC_SENSORS_MAX_CHANNELS 4
+
+/* State data for ec_sensors iio driver. */
+struct cros_ec_sensors_state {
+ /* Shared by all sensors */
+ struct cros_ec_sensors_core_state core;
+
+ struct iio_chan_spec channels[CROS_EC_SENSORS_MAX_CHANNELS];
+};
+
+static int cros_ec_sensors_read(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cros_ec_sensors_state *st = iio_priv(indio_dev);
+ s16 data = 0;
+ s64 val64;
+ int i;
+ int ret;
+ int idx = chan->scan_index;
+
+ mutex_lock(&st->core.cmd_lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
+ if (ret < 0)
+ break;
+
+ *val = data;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+ st->core.param.sensor_offset.flags = 0;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret < 0)
+ break;
+
+ /* Save values */
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->core.calib[i] =
+ st->core.resp->sensor_offset.offset[i];
+
+ *val = st->core.calib[idx];
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+ st->core.param.sensor_range.data = EC_MOTION_SENSE_NO_VALUE;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret < 0)
+ break;
+
+ val64 = st->core.resp->sensor_range.ret;
+ switch (st->core.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ /*
+ * EC returns data in g, iio exepects m/s^2.
+ * Do not use IIO_G_TO_M_S_2 to avoid precision loss.
+ */
+ *val = div_s64(val64 * 980665, 10);
+ *val2 = 10000 << (CROS_EC_SENSOR_BITS - 1);
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ /*
+ * EC returns data in dps, iio expects rad/s.
+ * Do not use IIO_DEGREE_TO_RAD to avoid precision
+ * loss. Round to the nearest integer.
+ */
+ *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
+ *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ /*
+ * EC returns data in 16LSB / uT,
+ * iio expects Gauss
+ */
+ *val = val64;
+ *val2 = 100 << (CROS_EC_SENSOR_BITS - 1);
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = cros_ec_sensors_core_read(&st->core, chan, val, val2,
+ mask);
+ break;
+ }
+ mutex_unlock(&st->core.cmd_lock);
+
+ return ret;
+}
+
+static int cros_ec_sensors_write(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cros_ec_sensors_state *st = iio_priv(indio_dev);
+ int i;
+ int ret;
+ int idx = chan->scan_index;
+
+ mutex_lock(&st->core.cmd_lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ st->core.calib[idx] = val;
+
+ /* Send to EC for each axis, even if not complete */
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+ st->core.param.sensor_offset.flags =
+ MOTION_SENSE_SET_OFFSET;
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->core.param.sensor_offset.offset[i] =
+ st->core.calib[i];
+ st->core.param.sensor_offset.temp =
+ EC_MOTION_SENSE_INVALID_CALIB_TEMP;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ if (st->core.type == MOTIONSENSE_TYPE_MAG) {
+ ret = -EINVAL;
+ break;
+ }
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+ st->core.param.sensor_range.data = val;
+
+ /* Always roundup, so caller gets at least what it asks for. */
+ st->core.param.sensor_range.roundup = 1;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ break;
+ default:
+ ret = cros_ec_sensors_core_write(
+ &st->core, chan, val, val2, mask);
+ break;
+ }
+
+ mutex_unlock(&st->core.cmd_lock);
+
+ return ret;
+}
+
+static const struct iio_info ec_sensors_info = {
+ .read_raw = &cros_ec_sensors_read,
+ .write_raw = &cros_ec_sensors_write,
+ .driver_module = THIS_MODULE,
+};
+
+static int cros_ec_sensors_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+ struct cros_ec_device *ec_device;
+ struct iio_dev *indio_dev;
+ struct cros_ec_sensors_state *state;
+ struct iio_chan_spec *channel;
+ int ret, i;
+
+ if (!ec_dev || !ec_dev->ec_dev) {
+ dev_warn(&pdev->dev, "No CROS EC device found.\n");
+ return -EINVAL;
+ }
+ ec_device = ec_dev->ec_dev;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ if (ret)
+ return ret;
+
+ indio_dev->info = &ec_sensors_info;
+ state = iio_priv(indio_dev);
+ for (channel = state->channels, i = CROS_EC_SENSOR_X;
+ i < CROS_EC_SENSOR_MAX_AXIS; i++, channel++) {
+ /* Common part */
+ channel->info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS);
+ channel->info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
+ channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
+ channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
+ channel->scan_index = i;
+ channel->ext_info = cros_ec_sensors_ext_info;
+ channel->modified = 1;
+ channel->channel2 = IIO_MOD_X + i;
+ channel->scan_type.sign = 's';
+
+ /* Sensor specific */
+ switch (state->core.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ channel->type = IIO_ACCEL;
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ channel->type = IIO_ANGL_VEL;
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ channel->type = IIO_MAGN;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown motion sensor\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Timestamp */
+ channel->type = IIO_TIMESTAMP;
+ channel->channel = -1;
+ channel->scan_index = CROS_EC_SENSOR_MAX_AXIS;
+ channel->scan_type.sign = 's';
+ channel->scan_type.realbits = 64;
+ channel->scan_type.storagebits = 64;
+
+ indio_dev->channels = state->channels;
+ indio_dev->num_channels = CROS_EC_SENSORS_MAX_CHANNELS;
+
+ /* There is only enough room for accel and gyro in the io space */
+ if ((state->core.ec->cmd_readmem != NULL) &&
+ (state->core.type != MOTIONSENSE_TYPE_MAG))
+ state->core.read_ec_sensors_data = cros_ec_sensors_read_lpc;
+ else
+ state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ cros_ec_sensors_capture, NULL);
+ if (ret)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_uninit_buffer;
+
+ return 0;
+
+error_uninit_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int cros_ec_sensors_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct platform_device_id cros_ec_sensors_ids[] = {
+ {
+ .name = "cros-ec-accel",
+ },
+ {
+ .name = "cros-ec-gyro",
+ },
+ {
+ .name = "cros-ec-mag",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids);
+
+static struct platform_driver cros_ec_sensors_platform_driver = {
+ .driver = {
+ .name = "cros-ec-sensors",
+ },
+ .probe = cros_ec_sensors_probe,
+ .remove = cros_ec_sensors_remove,
+ .id_table = cros_ec_sensors_ids,
+};
+module_platform_driver(cros_ec_sensors_platform_driver);
+
+MODULE_DESCRIPTION("ChromeOS EC 3-axis sensors driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
new file mode 100644
index 000000000000..416cae5ebbd0
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -0,0 +1,450 @@
+/*
+ * cros_ec_sensors_core - Common function for Chrome OS EC sensor driver.
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#include "cros_ec_sensors_core.h"
+
+static char *cros_ec_loc[] = {
+ [MOTIONSENSE_LOC_BASE] = "base",
+ [MOTIONSENSE_LOC_LID] = "lid",
+ [MOTIONSENSE_LOC_MAX] = "unknown",
+};
+
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+ struct iio_dev *indio_dev,
+ bool physical_device)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
+ struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ state->ec = ec->ec_dev;
+ state->msg = devm_kzalloc(&pdev->dev,
+ max((u16)sizeof(struct ec_params_motion_sense),
+ state->ec->max_response), GFP_KERNEL);
+ if (!state->msg)
+ return -ENOMEM;
+
+ state->resp = (struct ec_response_motion_sense *)state->msg->data;
+
+ mutex_init(&state->cmd_lock);
+
+ /* Set up the host command structure. */
+ state->msg->version = 2;
+ state->msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ state->msg->outsize = sizeof(struct ec_params_motion_sense);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = pdev->name;
+
+ if (physical_device) {
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ state->param.cmd = MOTIONSENSE_CMD_INFO;
+ state->param.info.sensor_num = sensor_platform->sensor_num;
+ if (cros_ec_motion_send_host_cmd(state, 0)) {
+ dev_warn(dev, "Can not access sensor info\n");
+ return -EIO;
+ }
+ state->type = state->resp->info.type;
+ state->loc = state->resp->info.location;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init);
+
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
+ u16 opt_length)
+{
+ int ret;
+
+ if (opt_length)
+ state->msg->insize = min(opt_length, state->ec->max_response);
+ else
+ state->msg->insize = state->ec->max_response;
+
+ memcpy(state->msg->data, &state->param, sizeof(state->param));
+
+ ret = cros_ec_cmd_xfer_status(state->ec, state->msg);
+ if (ret < 0)
+ return -EIO;
+
+ if (ret &&
+ state->resp != (struct ec_response_motion_sense *)state->msg->data)
+ memcpy(state->resp, state->msg->data, ret);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_motion_send_host_cmd);
+
+static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int ret, i;
+ bool calibrate;
+
+ ret = strtobool(buf, &calibrate);
+ if (ret < 0)
+ return ret;
+ if (!calibrate)
+ return -EINVAL;
+
+ mutex_lock(&st->cmd_lock);
+ st->param.cmd = MOTIONSENSE_CMD_PERFORM_CALIB;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ if (ret != 0) {
+ dev_warn(&indio_dev->dev, "Unable to calibrate sensor\n");
+ } else {
+ /* Save values */
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->calib[i] = st->resp->perform_calib.offset[i];
+ }
+ mutex_unlock(&st->cmd_lock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t cros_ec_sensors_loc(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", cros_ec_loc[st->loc]);
+}
+
+const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[] = {
+ {
+ .name = "calibrate",
+ .shared = IIO_SHARED_BY_ALL,
+ .write = cros_ec_sensors_calibrate
+ },
+ {
+ .name = "location",
+ .shared = IIO_SHARED_BY_ALL,
+ .read = cros_ec_sensors_loc
+ },
+ { },
+};
+EXPORT_SYMBOL_GPL(cros_ec_sensors_ext_info);
+
+/**
+ * cros_ec_sensors_idx_to_reg - convert index into offset in shared memory
+ * @st: pointer to state information for device
+ * @idx: sensor index (should be element of enum sensor_index)
+ *
+ * Return: address to read at
+ */
+static unsigned int cros_ec_sensors_idx_to_reg(
+ struct cros_ec_sensors_core_state *st,
+ unsigned int idx)
+{
+ /*
+ * When using LPC interface, only space for 2 Accel and one Gyro.
+ * First halfword of MOTIONSENSE_TYPE_ACCEL is used by angle.
+ */
+ if (st->type == MOTIONSENSE_TYPE_ACCEL)
+ return EC_MEMMAP_ACC_DATA + sizeof(u16) *
+ (1 + idx + st->param.info.sensor_num *
+ CROS_EC_SENSOR_MAX_AXIS);
+
+ return EC_MEMMAP_GYRO_DATA + sizeof(u16) * idx;
+}
+
+static int cros_ec_sensors_cmd_read_u8(struct cros_ec_device *ec,
+ unsigned int offset, u8 *dest)
+{
+ return ec->cmd_readmem(ec, offset, 1, dest);
+}
+
+static int cros_ec_sensors_cmd_read_u16(struct cros_ec_device *ec,
+ unsigned int offset, u16 *dest)
+{
+ __le16 tmp;
+ int ret = ec->cmd_readmem(ec, offset, 2, &tmp);
+
+ if (ret >= 0)
+ *dest = le16_to_cpu(tmp);
+
+ return ret;
+}
+
+/**
+ * cros_ec_sensors_read_until_not_busy() - read until is not busy
+ *
+ * @st: pointer to state information for device
+ *
+ * Read from EC status byte until it reads not busy.
+ * Return: 8-bit status if ok, -errno on failure.
+ */
+static int cros_ec_sensors_read_until_not_busy(
+ struct cros_ec_sensors_core_state *st)
+{
+ struct cros_ec_device *ec = st->ec;
+ u8 status;
+ int ret, attempts = 0;
+
+ ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ while (status & EC_MEMMAP_ACC_STATUS_BUSY_BIT) {
+ /* Give up after enough attempts, return error. */
+ if (attempts++ >= 50)
+ return -EIO;
+
+ /* Small delay every so often. */
+ if (attempts % 5 == 0)
+ msleep(25);
+
+ ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+ }
+
+ return status;
+}
+
+/**
+ * read_ec_sensors_data_unsafe() - read acceleration data from EC shared memory
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * This is the unsafe function for reading the EC data. It does not guarantee
+ * that the EC will not modify the data as it is being read in.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ struct cros_ec_device *ec = st->ec;
+ unsigned int i;
+ int ret;
+
+ /* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
+ for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ ret = cros_ec_sensors_cmd_read_u16(ec,
+ cros_ec_sensors_idx_to_reg(st, i),
+ data);
+ if (ret < 0)
+ return ret;
+
+ data++;
+ }
+
+ return 0;
+}
+
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ struct cros_ec_device *ec = st->ec;
+ u8 samp_id = 0xff, status = 0;
+ int ret, attempts = 0;
+
+ /*
+ * Continually read all data from EC until the status byte after
+ * all reads reflects that the EC is not busy and the sample id
+ * matches the sample id from before all reads. This guarantees
+ * that data read in was not modified by the EC while reading.
+ */
+ while ((status & (EC_MEMMAP_ACC_STATUS_BUSY_BIT |
+ EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK)) != samp_id) {
+ /* If we have tried to read too many times, return error. */
+ if (attempts++ >= 5)
+ return -EIO;
+
+ /* Read status byte until EC is not busy. */
+ ret = cros_ec_sensors_read_until_not_busy(st);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Store the current sample id so that we can compare to the
+ * sample id after reading the data.
+ */
+ samp_id = ret & EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK;
+
+ /* Read all EC data, format it, and store it into data. */
+ ret = cros_ec_sensors_read_data_unsafe(indio_dev, scan_mask,
+ data);
+ if (ret < 0)
+ return ret;
+
+ /* Read status byte. */
+ ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc);
+
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned int i;
+
+ /* Read all sensor data through a command. */
+ st->param.cmd = MOTIONSENSE_CMD_DATA;
+ ret = cros_ec_motion_send_host_cmd(st, sizeof(st->resp->data));
+ if (ret != 0) {
+ dev_warn(&indio_dev->dev, "Unable to read sensor data\n");
+ return ret;
+ }
+
+ for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ *data = st->resp->data.data[i];
+ data++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd);
+
+irqreturn_t cros_ec_sensors_capture(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->cmd_lock);
+
+ /* Clear capture data. */
+ memset(st->samples, 0, indio_dev->scan_bytes);
+
+ /* Read data based on which channels are enabled in scan mask. */
+ ret = st->read_ec_sensors_data(indio_dev,
+ *(indio_dev->active_scan_mask),
+ (s16 *)st->samples);
+ if (ret < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
+ iio_get_time_ns(indio_dev));
+
+done:
+ /*
+ * Tell the core we are done with this trigger and ready for the
+ * next one.
+ */
+ iio_trigger_notify_done(indio_dev->trig);
+
+ mutex_unlock(&st->cmd_lock);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_capture);
+
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret = IIO_VAL_INT;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data =
+ EC_MOTION_SENSE_NO_VALUE;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ else
+ *val = st->resp->ec_rate.ret;
+ break;
+ case IIO_CHAN_INFO_FREQUENCY:
+ st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
+ st->param.sensor_odr.data =
+ EC_MOTION_SENSE_NO_VALUE;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ else
+ *val = st->resp->sensor_odr.ret;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read);
+
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_FREQUENCY:
+ st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
+ st->param.sensor_odr.data = val;
+
+ /* Always roundup, so caller gets at least what it asks for. */
+ st->param.sensor_odr.roundup = 1;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data = val;
+
+ if (cros_ec_motion_send_host_cmd(st, 0))
+ ret = -EIO;
+ else
+ st->curr_sampl_freq = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
+
+MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
new file mode 100644
index 000000000000..8bc2ca3c2e2e
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
@@ -0,0 +1,175 @@
+/*
+ * ChromeOS EC sensor hub
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CROS_EC_SENSORS_CORE_H
+#define __CROS_EC_SENSORS_CORE_H
+
+#include <linux/irqreturn.h>
+
+enum {
+ CROS_EC_SENSOR_X,
+ CROS_EC_SENSOR_Y,
+ CROS_EC_SENSOR_Z,
+ CROS_EC_SENSOR_MAX_AXIS,
+};
+
+/* EC returns sensor values using signed 16 bit registers */
+#define CROS_EC_SENSOR_BITS 16
+
+/*
+ * 4 16 bit channels are allowed.
+ * Good enough for current sensors, they use up to 3 16 bit vectors.
+ */
+#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2)
+
+/* Minimum sampling period to use when device is suspending */
+#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */
+
+/**
+ * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
+ * @ec: cros EC device structure
+ * @cmd_lock: lock used to prevent simultaneous access to the
+ * commands.
+ * @msg: cros EC command structure
+ * @param: motion sensor parameters structure
+ * @resp: motion sensor response structure
+ * @type: type of motion sensor
+ * @loc: location where the motion sensor is placed
+ * @calib: calibration parameters. Note that trigger
+ * captured data will always provide the calibrated
+ * data
+ * @samples: static array to hold data from a single capture.
+ * For each channel we need 2 bytes, except for
+ * the timestamp. The timestamp is always last and
+ * is always 8-byte aligned.
+ * @read_ec_sensors_data: function used for accessing sensors values
+ * @cuur_sampl_freq: current sampling period
+ */
+struct cros_ec_sensors_core_state {
+ struct cros_ec_device *ec;
+ struct mutex cmd_lock;
+
+ struct cros_ec_command *msg;
+ struct ec_params_motion_sense param;
+ struct ec_response_motion_sense *resp;
+
+ enum motionsensor_type type;
+ enum motionsensor_location loc;
+
+ s16 calib[CROS_EC_SENSOR_MAX_AXIS];
+
+ u8 samples[CROS_EC_SAMPLE_SIZE];
+
+ int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data);
+
+ int curr_sampl_freq;
+};
+
+/**
+ * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * This is the safe function for reading the EC data. It guarantees that the
+ * data sampled was not modified by the EC while being read.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+/**
+ * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+/**
+ * cros_ec_sensors_core_init() - basic initialization of the core structure
+ * @pdev: platform device created for the sensors
+ * @indio_dev: iio device structure of the device
+ * @physical_device: true if the device refers to a physical device
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+ struct iio_dev *indio_dev, bool physical_device);
+
+/**
+ * cros_ec_sensors_capture() - the trigger handler function
+ * @irq: the interrupt number.
+ * @p: a pointer to the poll function.
+ *
+ * On a trigger event occurring, if the pollfunc is attached then this
+ * handler is called as a threaded interrupt (and hence may sleep). It
+ * is responsible for grabbing data from the device and pushing it into
+ * the associated buffer.
+ *
+ * Return: IRQ_HANDLED
+ */
+irqreturn_t cros_ec_sensors_capture(int irq, void *p);
+
+/**
+ * cros_ec_motion_send_host_cmd() - send motion sense host command
+ * @st: pointer to state information for device
+ * @opt_length: optional length to reduce the response size, useful on the data
+ * path. Otherwise, the maximal allowed response size is used
+ *
+ * When called, the sub-command is assumed to be set in param->cmd.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
+ u16 opt_length);
+
+/**
+ * cros_ec_sensors_core_read() - function to request a value from the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: will contain one element making up the returned value
+ * @val2: will contain another element making up the returned value
+ * @mask: specifies which values to be requested
+ *
+ * Return: the type of value returned by the device
+ */
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+
+/**
+ * cros_ec_sensors_core_write() - function to write a value to the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: first part of value to write
+ * @val2: second part of value to write
+ * @mask: specifies which values to write
+ *
+ * Return: the type of value returned by the device
+ */
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+
+/* List of extended channel specification for all sensors */
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
+
+#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index dc33c1dd5191..7ef94a90ecf7 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -30,26 +30,26 @@ static struct {
u32 usage_id;
int unit; /* 0 for default others from HID sensor spec */
int scale_val0; /* scale, whole number */
- int scale_val1; /* scale, fraction in micros */
+ int scale_val1; /* scale, fraction in nanos */
} unit_conversion[] = {
- {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650},
+ {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000},
{HID_USAGE_SENSOR_ACCEL_3D,
HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0},
{HID_USAGE_SENSOR_ACCEL_3D,
- HID_USAGE_SENSOR_UNITS_G, 9, 806650},
+ HID_USAGE_SENSOR_UNITS_G, 9, 806650000},
- {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293},
{HID_USAGE_SENSOR_GYRO_3D,
HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0},
{HID_USAGE_SENSOR_GYRO_3D,
- HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453},
+ HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293},
- {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000},
+ {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000},
{HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0},
- {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293},
{HID_USAGE_SENSOR_INCLINOMETER_3D,
- HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453},
+ HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
{HID_USAGE_SENSOR_INCLINOMETER_3D,
HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0},
@@ -57,7 +57,7 @@ static struct {
{HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
{HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
- {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
+ {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000},
};
static int pow_10(unsigned power)
@@ -201,7 +201,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int ret;
if (val1 < 0 || val2 < 0)
- ret = -EINVAL;
+ return -EINVAL;
value = val1 * pow_10(6) + val2;
if (value) {
@@ -250,6 +250,9 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
s32 value;
int ret;
+ if (val1 < 0 || val2 < 0)
+ return -EINVAL;
+
value = convert_to_vtf_format(st->sensitivity.size,
st->sensitivity.unit_expo,
val1, val2);
@@ -266,15 +269,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
/*
* This fuction applies the unit exponent to the scale.
* For example:
- * 9.806650 ->exp:2-> val0[980]val1[665000]
- * 9.000806 ->exp:2-> val0[900]val1[80600]
- * 0.174535 ->exp:2-> val0[17]val1[453500]
- * 1.001745 ->exp:0-> val0[1]val1[1745]
- * 1.001745 ->exp:2-> val0[100]val1[174500]
- * 1.001745 ->exp:4-> val0[10017]val1[450000]
- * 9.806650 ->exp:-2-> val0[0]val1[98066]
+ * 9.806650000 ->exp:2-> val0[980]val1[665000000]
+ * 9.000806000 ->exp:2-> val0[900]val1[80600000]
+ * 0.174535293 ->exp:2-> val0[17]val1[453529300]
+ * 1.001745329 ->exp:0-> val0[1]val1[1745329]
+ * 1.001745329 ->exp:2-> val0[100]val1[174532900]
+ * 1.001745329 ->exp:4-> val0[10017]val1[453290000]
+ * 9.806650000 ->exp:-2-> val0[0]val1[98066500]
*/
-static void adjust_exponent_micro(int *val0, int *val1, int scale0,
+static void adjust_exponent_nano(int *val0, int *val1, int scale0,
int scale1, int exp)
{
int i;
@@ -285,32 +288,32 @@ static void adjust_exponent_micro(int *val0, int *val1, int scale0,
if (exp > 0) {
*val0 = scale0 * pow_10(exp);
res = 0;
- if (exp > 6) {
+ if (exp > 9) {
*val1 = 0;
return;
}
for (i = 0; i < exp; ++i) {
- x = scale1 / pow_10(5 - i);
+ x = scale1 / pow_10(8 - i);
res += (pow_10(exp - 1 - i) * x);
- scale1 = scale1 % pow_10(5 - i);
+ scale1 = scale1 % pow_10(8 - i);
}
*val0 += res;
*val1 = scale1 * pow_10(exp);
} else if (exp < 0) {
exp = abs(exp);
- if (exp > 6) {
+ if (exp > 9) {
*val0 = *val1 = 0;
return;
}
*val0 = scale0 / pow_10(exp);
rem = scale0 % pow_10(exp);
res = 0;
- for (i = 0; i < (6 - exp); ++i) {
- x = scale1 / pow_10(5 - i);
- res += (pow_10(5 - exp - i) * x);
- scale1 = scale1 % pow_10(5 - i);
+ for (i = 0; i < (9 - exp); ++i) {
+ x = scale1 / pow_10(8 - i);
+ res += (pow_10(8 - exp - i) * x);
+ scale1 = scale1 % pow_10(8 - i);
}
- *val1 = rem * pow_10(6 - exp) + res;
+ *val1 = rem * pow_10(9 - exp) + res;
} else {
*val0 = scale0;
*val1 = scale1;
@@ -332,14 +335,14 @@ int hid_sensor_format_scale(u32 usage_id,
unit_conversion[i].unit == attr_info->units) {
exp = hid_sensor_convert_exponent(
attr_info->unit_expo);
- adjust_exponent_micro(val0, val1,
+ adjust_exponent_nano(val0, val1,
unit_conversion[i].scale_val0,
unit_conversion[i].scale_val1, exp);
break;
}
}
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_NANO;
}
EXPORT_SYMBOL(hid_sensor_format_scale);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 285a64a589d7..975a1f19f747 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -612,7 +612,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i, len = 0;
+ int i, len = 0, q, r;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
@@ -621,8 +621,10 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- sdata->sensor_settings->fs.fs_avl[i].gain);
+ q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000;
+ r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
}
mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
new file mode 100644
index 000000000000..2d2ee353dde7
--- /dev/null
+++ b/drivers/iio/counter/104-quad-8.c
@@ -0,0 +1,593 @@
+/*
+ * IIO driver for the ACCES 104-QUAD-8
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#define QUAD8_EXTENT 32
+
+static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
+static unsigned int num_quad8;
+module_param_array(base, uint, &num_quad8, 0);
+MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
+
+#define QUAD8_NUM_COUNTERS 8
+
+/**
+ * struct quad8_iio - IIO device private data structure
+ * @preset: array of preset values
+ * @count_mode: array of count mode configurations
+ * @quadrature_mode: array of quadrature mode configurations
+ * @quadrature_scale: array of quadrature mode scale configurations
+ * @ab_enable: array of A and B inputs enable configurations
+ * @preset_enable: array of set_to_preset_on_index attribute configurations
+ * @synchronous_mode: array of index function synchronous mode configurations
+ * @index_polarity: array of index function polarity configurations
+ * @base: base port address of the IIO device
+ */
+struct quad8_iio {
+ unsigned int preset[QUAD8_NUM_COUNTERS];
+ unsigned int count_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
+ unsigned int ab_enable[QUAD8_NUM_COUNTERS];
+ unsigned int preset_enable[QUAD8_NUM_COUNTERS];
+ unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
+ unsigned int index_polarity[QUAD8_NUM_COUNTERS];
+ unsigned int base;
+};
+
+static int quad8_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int flags;
+ unsigned int borrow;
+ unsigned int carry;
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX) {
+ *val = !!(inb(priv->base + 0x16) & BIT(chan->channel));
+ return IIO_VAL_INT;
+ }
+
+ flags = inb(base_offset);
+ borrow = flags & BIT(0);
+ carry = !!(flags & BIT(1));
+
+ /* Borrow XOR Carry effectively doubles count range */
+ *val = (borrow ^ carry) << 24;
+
+ /* Reset Byte Pointer; transfer Counter to Output Latch */
+ outb(0x11, base_offset + 1);
+
+ for (i = 0; i < 3; i++)
+ *val |= (unsigned int)inb(base_offset) << (8 * i);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ *val = priv->ab_enable[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ *val2 = priv->quadrature_scale[chan->channel];
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static int quad8_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ int i;
+ unsigned int ior_cfg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX)
+ return -EINVAL;
+
+ /* Only 24-bit values are supported */
+ if ((unsigned int)val > 0xFFFFFF)
+ return -EINVAL;
+
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+
+ /* Counter can only be set via Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Transfer Preset Register to Counter */
+ outb(0x08, base_offset + 1);
+
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+
+ /* Set Preset Register back to original value */
+ val = priv->preset[chan->channel];
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(0x02, base_offset + 1);
+ /* Reset Error flag */
+ outb(0x06, base_offset + 1);
+
+ return 0;
+ case IIO_CHAN_INFO_ENABLE:
+ /* only boolean values accepted */
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ priv->ab_enable[chan->channel] = val;
+
+ ior_cfg = val | priv->preset_enable[chan->channel] << 1;
+
+ /* Load I/O control configuration */
+ outb(0x40 | ior_cfg, base_offset);
+
+ return 0;
+ case IIO_CHAN_INFO_SCALE:
+ /* Quadrature scaling only available in quadrature mode */
+ if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
+ return -EINVAL;
+
+ /* Only three gain states (1, 0.5, 0.25) */
+ if (val == 1 && !val2)
+ priv->quadrature_scale[chan->channel] = 0;
+ else if (!val)
+ switch (val2) {
+ case 500000:
+ priv->quadrature_scale[chan->channel] = 1;
+ break;
+ case 250000:
+ priv->quadrature_scale[chan->channel] = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info quad8_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = quad8_read_raw,
+ .write_raw = quad8_write_raw
+};
+
+static ssize_t quad8_read_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset[chan->channel]);
+}
+
+static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int preset;
+ int ret;
+ int i;
+
+ ret = kstrtouint(buf, 0, &preset);
+ if (ret)
+ return ret;
+
+ /* Only 24-bit values are supported */
+ if (preset > 0xFFFFFF)
+ return -EINVAL;
+
+ priv->preset[chan->channel] = preset;
+
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+
+ /* Set Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+
+ return len;
+}
+
+static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ priv->preset_enable[chan->channel]);
+}
+
+static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ bool preset_enable;
+ int ret;
+ unsigned int ior_cfg;
+
+ ret = kstrtobool(buf, &preset_enable);
+ if (ret)
+ return ret;
+
+ priv->preset_enable[chan->channel] = preset_enable;
+
+ ior_cfg = priv->ab_enable[chan->channel] |
+ (unsigned int)preset_enable << 1;
+
+ /* Load I/O control configuration to Input / Output Control Register */
+ outb(0x40 | ior_cfg, base_offset);
+
+ return len;
+}
+
+static const char *const quad8_noise_error_states[] = {
+ "No excessive noise is present at the count inputs",
+ "Excessive noise is present at the count inputs"
+};
+
+static int quad8_get_noise_error(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & BIT(4));
+}
+
+static const struct iio_enum quad8_noise_error_enum = {
+ .items = quad8_noise_error_states,
+ .num_items = ARRAY_SIZE(quad8_noise_error_states),
+ .get = quad8_get_noise_error
+};
+
+static const char *const quad8_count_direction_states[] = {
+ "down",
+ "up"
+};
+
+static int quad8_get_count_direction(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & BIT(5));
+}
+
+static const struct iio_enum quad8_count_direction_enum = {
+ .items = quad8_count_direction_states,
+ .num_items = ARRAY_SIZE(quad8_count_direction_states),
+ .get = quad8_get_count_direction
+};
+
+static const char *const quad8_count_modes[] = {
+ "normal",
+ "range limit",
+ "non-recycle",
+ "modulo-n"
+};
+
+static int quad8_set_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int count_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = count_mode << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->count_mode[chan->channel] = count_mode;
+
+ /* Add quadrature mode configuration */
+ if (priv->quadrature_mode[chan->channel])
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(0x20 | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->count_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_count_mode_enum = {
+ .items = quad8_count_modes,
+ .num_items = ARRAY_SIZE(quad8_count_modes),
+ .set = quad8_set_count_mode,
+ .get = quad8_get_count_mode
+};
+
+static const char *const quad8_synchronous_modes[] = {
+ "non-synchronous",
+ "synchronous"
+};
+
+static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int synchronous_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = synchronous_mode |
+ priv->index_polarity[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ /* Index function must be non-synchronous in non-quadrature mode */
+ if (synchronous_mode && !priv->quadrature_mode[chan->channel])
+ return -EINVAL;
+
+ priv->synchronous_mode[chan->channel] = synchronous_mode;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(0x40 | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->synchronous_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_synchronous_mode_enum = {
+ .items = quad8_synchronous_modes,
+ .num_items = ARRAY_SIZE(quad8_synchronous_modes),
+ .set = quad8_set_synchronous_mode,
+ .get = quad8_get_synchronous_mode
+};
+
+static const char *const quad8_quadrature_modes[] = {
+ "non-quadrature",
+ "quadrature"
+};
+
+static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int quadrature_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ if (quadrature_mode)
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+ else {
+ /* Quadrature scaling only available in quadrature mode */
+ priv->quadrature_scale[chan->channel] = 0;
+
+ /* Synchronous function not supported in non-quadrature mode */
+ if (priv->synchronous_mode[chan->channel])
+ quad8_set_synchronous_mode(indio_dev, chan, 0);
+ }
+
+ priv->quadrature_mode[chan->channel] = quadrature_mode;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(0x20 | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->quadrature_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_quadrature_mode_enum = {
+ .items = quad8_quadrature_modes,
+ .num_items = ARRAY_SIZE(quad8_quadrature_modes),
+ .set = quad8_set_quadrature_mode,
+ .get = quad8_get_quadrature_mode
+};
+
+static const char *const quad8_index_polarity_modes[] = {
+ "negative",
+ "positive"
+};
+
+static int quad8_set_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int index_polarity)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
+ index_polarity << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->index_polarity[chan->channel] = index_polarity;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(0x40 | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->index_polarity[chan->channel];
+}
+
+static const struct iio_enum quad8_index_polarity_enum = {
+ .items = quad8_index_polarity_modes,
+ .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
+ .set = quad8_set_index_polarity,
+ .get = quad8_get_index_polarity
+};
+
+static const struct iio_chan_spec_ext_info quad8_count_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_preset,
+ .write = quad8_write_preset
+ },
+ {
+ .name = "set_to_preset_on_index",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_set_to_preset_on_index,
+ .write = quad8_write_set_to_preset_on_index
+ },
+ IIO_ENUM("noise_error", IIO_SEPARATE, &quad8_noise_error_enum),
+ IIO_ENUM_AVAILABLE("noise_error", &quad8_noise_error_enum),
+ IIO_ENUM("count_direction", IIO_SEPARATE, &quad8_count_direction_enum),
+ IIO_ENUM_AVAILABLE("count_direction", &quad8_count_direction_enum),
+ IIO_ENUM("count_mode", IIO_SEPARATE, &quad8_count_mode_enum),
+ IIO_ENUM_AVAILABLE("count_mode", &quad8_count_mode_enum),
+ IIO_ENUM("quadrature_mode", IIO_SEPARATE, &quad8_quadrature_mode_enum),
+ IIO_ENUM_AVAILABLE("quadrature_mode", &quad8_quadrature_mode_enum),
+ {}
+};
+
+static const struct iio_chan_spec_ext_info quad8_index_ext_info[] = {
+ IIO_ENUM("synchronous_mode", IIO_SEPARATE,
+ &quad8_synchronous_mode_enum),
+ IIO_ENUM_AVAILABLE("synchronous_mode", &quad8_synchronous_mode_enum),
+ IIO_ENUM("index_polarity", IIO_SEPARATE, &quad8_index_polarity_enum),
+ IIO_ENUM_AVAILABLE("index_polarity", &quad8_index_polarity_enum),
+ {}
+};
+
+#define QUAD8_COUNT_CHAN(_chan) { \
+ .type = IIO_COUNT, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = quad8_count_ext_info, \
+ .indexed = 1 \
+}
+
+#define QUAD8_INDEX_CHAN(_chan) { \
+ .type = IIO_INDEX, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .ext_info = quad8_index_ext_info, \
+ .indexed = 1 \
+}
+
+static const struct iio_chan_spec quad8_channels[] = {
+ QUAD8_COUNT_CHAN(0), QUAD8_INDEX_CHAN(0),
+ QUAD8_COUNT_CHAN(1), QUAD8_INDEX_CHAN(1),
+ QUAD8_COUNT_CHAN(2), QUAD8_INDEX_CHAN(2),
+ QUAD8_COUNT_CHAN(3), QUAD8_INDEX_CHAN(3),
+ QUAD8_COUNT_CHAN(4), QUAD8_INDEX_CHAN(4),
+ QUAD8_COUNT_CHAN(5), QUAD8_INDEX_CHAN(5),
+ QUAD8_COUNT_CHAN(6), QUAD8_INDEX_CHAN(6),
+ QUAD8_COUNT_CHAN(7), QUAD8_INDEX_CHAN(7)
+};
+
+static int quad8_probe(struct device *dev, unsigned int id)
+{
+ struct iio_dev *indio_dev;
+ struct quad8_iio *priv;
+ int i, j;
+ unsigned int base_offset;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base[id], QUAD8_EXTENT,
+ dev_name(dev))) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base[id], base[id] + QUAD8_EXTENT);
+ return -EBUSY;
+ }
+
+ indio_dev->info = &quad8_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
+ indio_dev->channels = quad8_channels;
+ indio_dev->name = dev_name(dev);
+
+ priv = iio_priv(indio_dev);
+ priv->base = base[id];
+
+ /* Reset all counters and disable interrupt function */
+ outb(0x01, base[id] + 0x11);
+ /* Set initial configuration for all counters */
+ for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
+ base_offset = base[id] + 2 * i;
+ /* Reset Byte Pointer */
+ outb(0x01, base_offset + 1);
+ /* Reset Preset Register */
+ for (j = 0; j < 3; j++)
+ outb(0x00, base_offset);
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(0x04, base_offset + 1);
+ /* Reset Error flag */
+ outb(0x06, base_offset + 1);
+ /* Binary encoding; Normal count; non-quadrature mode */
+ outb(0x20, base_offset + 1);
+ /* Disable A and B inputs; preset on index; FLG1 as Carry */
+ outb(0x40, base_offset + 1);
+ /* Disable index function; negative index polarity */
+ outb(0x60, base_offset + 1);
+ }
+ /* Enable all counters */
+ outb(0x00, base[id] + 0x11);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct isa_driver quad8_driver = {
+ .probe = quad8_probe,
+ .driver = {
+ .name = "104-quad-8"
+ }
+};
+
+module_isa_driver(quad8_driver, num_quad8);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES 104-QUAD-8 IIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/counter/Kconfig b/drivers/iio/counter/Kconfig
new file mode 100644
index 000000000000..44627f6e4861
--- /dev/null
+++ b/drivers/iio/counter/Kconfig
@@ -0,0 +1,24 @@
+#
+# Counter devices
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Counters"
+
+config 104_QUAD_8
+ tristate "ACCES 104-QUAD-8 driver"
+ depends on X86 && ISA_BUS_API
+ help
+ Say yes here to build support for the ACCES 104-QUAD-8 quadrature
+ encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
+
+ Performing a write to a counter's IIO_CHAN_INFO_RAW sets the counter and
+ also clears the counter's respective error flag. Although the counters
+ have a 25-bit range, only the lower 24 bits may be set, either directly
+ or via a counter's preset attribute. Interrupts are not supported by
+ this driver.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
+endmenu
diff --git a/drivers/iio/counter/Makefile b/drivers/iio/counter/Makefile
new file mode 100644
index 000000000000..007e88411648
--- /dev/null
+++ b/drivers/iio/counter/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for IIO counter devices
+#
+
+# When adding new entries keep the list in alphabetical order
+
+obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 120b24478469..d3084028905b 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -200,6 +200,16 @@ config AD8801
To compile this driver as a module choose M here: the module will be called
ad8801.
+config DPOT_DAC
+ tristate "DAC emulation using a DPOT"
+ depends on OF
+ help
+ Say yes here to build support for DAC emulation using a digital
+ potentiometer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called dpot-dac.
+
config LPC18XX_DAC
tristate "NXP LPC18xx DAC driver"
depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 27642bbf75f2..f01bf4a99867 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_AD7303) += ad7303.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
+obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 0b235a2c7359..6eed5b7729be 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -17,7 +17,7 @@
#define AD5592R_GPIO_READBACK_EN BIT(10)
#define AD5592R_LDAC_READBACK_EN BIT(6)
-static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, u16 *buf)
+static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, __be16 *buf)
{
struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
struct spi_transfer t = {
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
new file mode 100644
index 000000000000..960a2b430480
--- /dev/null
+++ b/drivers/iio/dac/dpot-dac.c
@@ -0,0 +1,266 @@
+/*
+ * IIO DAC emulation driver using a digital potentiometer
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * It is assumed that the dpot is used as a voltage divider between the
+ * current dpot wiper setting and the maximum resistance of the dpot. The
+ * divided voltage is provided by a vref regulator.
+ *
+ * .------.
+ * .-----------. | |
+ * | vref |--' .---.
+ * | regulator |--. | |
+ * '-----------' | | d |
+ * | | p |
+ * | | o | wiper
+ * | | t |<---------+
+ * | | |
+ * | '---' dac output voltage
+ * | |
+ * '------+------------+
+ */
+
+#include <linux/err.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+struct dpot_dac {
+ struct regulator *vref;
+ struct iio_channel *dpot;
+ u32 max_ohms;
+};
+
+static const struct iio_chan_spec dpot_dac_iio_channel = {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ .output = 1,
+ .indexed = 1,
+};
+
+static int dpot_dac_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dpot_dac *dac = iio_priv(indio_dev);
+ int ret;
+ unsigned long long tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return iio_read_channel_raw(dac->dpot, val);
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = iio_read_channel_scale(dac->dpot, val, val2);
+ switch (ret) {
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = *val * 1000000000LL;
+ do_div(tmp, dac->max_ohms);
+ tmp *= regulator_get_voltage(dac->vref) / 1000;
+ do_div(tmp, 1000000000LL);
+ *val = tmp;
+ return ret;
+ case IIO_VAL_INT:
+ /*
+ * Convert integer scale to fractional scale by
+ * setting the denominator (val2) to one...
+ */
+ *val2 = 1;
+ ret = IIO_VAL_FRACTIONAL;
+ /* ...and fall through. */
+ case IIO_VAL_FRACTIONAL:
+ *val *= regulator_get_voltage(dac->vref) / 1000;
+ *val2 *= dac->max_ohms;
+ break;
+ }
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int dpot_dac_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct dpot_dac *dac = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *type = IIO_VAL_INT;
+ return iio_read_avail_channel_raw(dac->dpot, vals, length);
+ }
+
+ return -EINVAL;
+}
+
+static int dpot_dac_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct dpot_dac *dac = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return iio_write_channel_raw(dac->dpot, val);
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info dpot_dac_info = {
+ .read_raw = dpot_dac_read_raw,
+ .read_avail = dpot_dac_read_avail,
+ .write_raw = dpot_dac_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int dpot_dac_channel_max_ohms(struct iio_dev *indio_dev)
+{
+ struct device *dev = &indio_dev->dev;
+ struct dpot_dac *dac = iio_priv(indio_dev);
+ unsigned long long tmp;
+ int ret;
+ int val;
+ int val2;
+ int max;
+
+ ret = iio_read_max_channel_raw(dac->dpot, &max);
+ if (ret < 0) {
+ dev_err(dev, "dpot does not indicate its raw maximum value\n");
+ return ret;
+ }
+
+ switch (iio_read_channel_scale(dac->dpot, &val, &val2)) {
+ case IIO_VAL_INT:
+ return max * val;
+ case IIO_VAL_FRACTIONAL:
+ tmp = (unsigned long long)max * val;
+ do_div(tmp, val2);
+ return tmp;
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = val * 1000000000LL * max >> val2;
+ do_div(tmp, 1000000000LL);
+ return tmp;
+ default:
+ dev_err(dev, "dpot has a scale that is too weird\n");
+ }
+
+ return -EINVAL;
+}
+
+static int dpot_dac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct dpot_dac *dac;
+ enum iio_chan_type type;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*dac));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ dac = iio_priv(indio_dev);
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &dpot_dac_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &dpot_dac_iio_channel;
+ indio_dev->num_channels = 1;
+
+ dac->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(dac->vref)) {
+ if (PTR_ERR(dac->vref) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get vref regulator\n");
+ return PTR_ERR(dac->vref);
+ }
+
+ dac->dpot = devm_iio_channel_get(dev, "dpot");
+ if (IS_ERR(dac->dpot)) {
+ if (PTR_ERR(dac->dpot) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dpot input channel\n");
+ return PTR_ERR(dac->dpot);
+ }
+
+ ret = iio_get_channel_type(dac->dpot, &type);
+ if (ret < 0)
+ return ret;
+
+ if (type != IIO_RESISTANCE) {
+ dev_err(dev, "dpot is of the wrong type\n");
+ return -EINVAL;
+ }
+
+ ret = dpot_dac_channel_max_ohms(indio_dev);
+ if (ret < 0)
+ return ret;
+ dac->max_ohms = ret;
+
+ ret = regulator_enable(dac->vref);
+ if (ret) {
+ dev_err(dev, "failed to enable the vref regulator\n");
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "failed to register iio device\n");
+ goto disable_reg;
+ }
+
+ return 0;
+
+disable_reg:
+ regulator_disable(dac->vref);
+ return ret;
+}
+
+static int dpot_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct dpot_dac *dac = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(dac->vref);
+
+ return 0;
+}
+
+static const struct of_device_id dpot_dac_match[] = {
+ { .compatible = "dpot-dac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dpot_dac_match);
+
+static struct platform_driver dpot_dac_driver = {
+ .probe = dpot_dac_probe,
+ .remove = dpot_dac_remove,
+ .driver = {
+ .name = "iio-dpot-dac",
+ .of_match_table = dpot_dac_match,
+ },
+};
+module_platform_driver(dpot_dac_driver);
+
+MODULE_DESCRIPTION("DAC emulation driver using a digital potentiometer");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index cca935c06f2b..db109f0cdd8c 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -18,6 +18,8 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -26,12 +28,20 @@
#define MCP4725_DRV_NAME "mcp4725"
+#define MCP472X_REF_VDD 0x00
+#define MCP472X_REF_VREF_UNBUFFERED 0x02
+#define MCP472X_REF_VREF_BUFFERED 0x03
+
struct mcp4725_data {
struct i2c_client *client;
- u16 vref_mv;
+ int id;
+ unsigned ref_mode;
+ bool vref_buffered;
u16 dac_value;
bool powerdown;
unsigned powerdown_mode;
+ struct regulator *vdd_reg;
+ struct regulator *vref_reg;
};
static int mcp4725_suspend(struct device *dev)
@@ -86,6 +96,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
return 0;
inoutbuf[0] = 0x60; /* write EEPROM */
+ inoutbuf[0] |= data->ref_mode << 3;
inoutbuf[1] = data->dac_value >> 4;
inoutbuf[2] = (data->dac_value & 0xf) << 4;
@@ -278,18 +289,49 @@ static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
return 0;
}
+static int mcp4726_set_cfg(struct iio_dev *indio_dev)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+ u8 outbuf[3];
+ int ret;
+
+ outbuf[0] = 0x40;
+ outbuf[0] |= data->ref_mode << 3;
+ if (data->powerdown)
+ outbuf[0] |= data->powerdown << 1;
+ outbuf[1] = data->dac_value >> 4;
+ outbuf[2] = (data->dac_value & 0xf) << 4;
+
+ ret = i2c_master_send(data->client, outbuf, 3);
+ if (ret < 0)
+ return ret;
+ else if (ret != 3)
+ return -EIO;
+ else
+ return 0;
+}
+
static int mcp4725_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct mcp4725_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
*val = data->dac_value;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = data->vref_mv;
+ if (data->ref_mode == MCP472X_REF_VDD)
+ ret = regulator_get_voltage(data->vdd_reg);
+ else
+ ret = regulator_get_voltage(data->vref_reg);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
*val2 = 12;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -323,27 +365,98 @@ static const struct iio_info mcp4725_info = {
.driver_module = THIS_MODULE,
};
+#ifdef CONFIG_OF
+static int mcp4725_probe_dt(struct device *dev,
+ struct mcp4725_platform_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ /* check if is the vref-supply defined */
+ pdata->use_vref = of_property_read_bool(np, "vref-supply");
+ pdata->vref_buffered =
+ of_property_read_bool(np, "microchip,vref-buffered");
+
+ return 0;
+}
+#else
+static int mcp4725_probe_dt(struct device *dev,
+ struct mcp4725_platform_data *platform_data)
+{
+ return -ENODEV;
+}
+#endif
+
static int mcp4725_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mcp4725_data *data;
struct iio_dev *indio_dev;
- struct mcp4725_platform_data *platform_data = client->dev.platform_data;
- u8 inbuf[3];
+ struct mcp4725_platform_data *pdata, pdata_dt;
+ u8 inbuf[4];
u8 pd;
+ u8 ref;
int err;
- if (!platform_data || !platform_data->vref_mv) {
- dev_err(&client->dev, "invalid platform data");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (indio_dev == NULL)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ data->id = id->driver_data;
+ pdata = dev_get_platdata(&client->dev);
+
+ if (!pdata) {
+ err = mcp4725_probe_dt(&client->dev, &pdata_dt);
+ if (err) {
+ dev_err(&client->dev,
+ "invalid platform or devicetree data");
+ return err;
+ }
+ pdata = &pdata_dt;
+ }
+
+ if (data->id == MCP4725 && pdata->use_vref) {
+ dev_err(&client->dev,
+ "external reference is unavailable on MCP4725");
+ return -EINVAL;
+ }
+
+ if (!pdata->use_vref && pdata->vref_buffered) {
+ dev_err(&client->dev,
+ "buffering is unavailable on the internal reference");
+ return -EINVAL;
+ }
+
+ if (!pdata->use_vref)
+ data->ref_mode = MCP472X_REF_VDD;
+ else
+ data->ref_mode = pdata->vref_buffered ?
+ MCP472X_REF_VREF_BUFFERED :
+ MCP472X_REF_VREF_UNBUFFERED;
+
+ data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd_reg))
+ return PTR_ERR(data->vdd_reg);
+
+ err = regulator_enable(data->vdd_reg);
+ if (err)
+ return err;
+
+ if (pdata->use_vref) {
+ data->vref_reg = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(data->vref_reg)) {
+ err = PTR_ERR(data->vref_reg);
+ goto err_disable_vdd_reg;
+ }
+
+ err = regulator_enable(data->vref_reg);
+ if (err)
+ goto err_disable_vdd_reg;
+ }
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
@@ -352,25 +465,56 @@ static int mcp4725_probe(struct i2c_client *client,
indio_dev->num_channels = 1;
indio_dev->modes = INDIO_DIRECT_MODE;
- data->vref_mv = platform_data->vref_mv;
+ /* read current DAC value and settings */
+ err = i2c_master_recv(client, inbuf, data->id == MCP4725 ? 3 : 4);
- /* read current DAC value */
- err = i2c_master_recv(client, inbuf, 3);
if (err < 0) {
dev_err(&client->dev, "failed to read DAC value");
- return err;
+ goto err_disable_vref_reg;
}
pd = (inbuf[0] >> 1) & 0x3;
data->powerdown = pd > 0 ? true : false;
- data->powerdown_mode = pd ? pd - 1 : 2; /* largest register to gnd */
+ data->powerdown_mode = pd ? pd - 1 : 2; /* largest resistor to gnd */
data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
+ if (data->id == MCP4726)
+ ref = (inbuf[3] >> 3) & 0x3;
+
+ if (data->id == MCP4726 && ref != data->ref_mode) {
+ dev_info(&client->dev,
+ "voltage reference mode differs (conf: %u, eeprom: %u), setting %u",
+ data->ref_mode, ref, data->ref_mode);
+ err = mcp4726_set_cfg(indio_dev);
+ if (err < 0)
+ goto err_disable_vref_reg;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto err_disable_vref_reg;
+
+ return 0;
+
+err_disable_vref_reg:
+ if (data->vref_reg)
+ regulator_disable(data->vref_reg);
- return iio_device_register(indio_dev);
+err_disable_vdd_reg:
+ regulator_disable(data->vdd_reg);
+
+ return err;
}
static int mcp4725_remove(struct i2c_client *client)
{
- iio_device_unregister(i2c_get_clientdata(client));
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (data->vref_reg)
+ regulator_disable(data->vref_reg);
+ regulator_disable(data->vdd_reg);
+
return 0;
}
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 205a84420ae9..3126cf05e6b9 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -84,6 +84,24 @@ config HID_SENSOR_GYRO_3D
Say yes here to build support for the HID SENSOR
Gyroscope 3D.
+config MPU3050
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP
+
+config MPU3050_I2C
+ tristate "Invensense MPU3050 devices on I2C"
+ depends on !(INPUT_MPU3050=y || INPUT_MPU3050=m)
+ depends on I2C
+ select MPU3050
+ select REGMAP_I2C
+ select I2C_MUX
+ help
+ This driver supports the Invensense MPU3050 gyroscope over I2C.
+ This driver can be built as a module. The module will be called
+ inv-mpu3050-i2c.
+
config IIO_ST_GYRO_3AXIS
tristate "STMicroelectronics gyroscopes 3-Axis Driver"
depends on (I2C || SPI_MASTER) && SYSFS
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index f866a4be0667..f0e149a606b0 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -14,6 +14,11 @@ obj-$(CONFIG_BMG160_SPI) += bmg160_spi.o
obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
+# Currently this is rolled into one module, split it if
+# we ever create a separate SPI interface for MPU-3050
+obj-$(CONFIG_MPU3050) += mpu3050.o
+mpu3050-objs := mpu3050-core.o mpu3050-i2c.o
+
itg3200-y := itg3200_core.o
itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o
obj-$(CONFIG_ITG3200) += itg3200.o
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
new file mode 100644
index 000000000000..2be2a5d287e6
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -0,0 +1,1306 @@
+/*
+ * MPU3050 gyroscope driver
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the input subsystem driver, Copyright (C) 2011 Wistron Co.Ltd
+ * Joseph Lai <joseph_lai@wistron.com> and trimmed down by
+ * Alan Cox <alan@linux.intel.com> in turn based on bma023.c.
+ * Device behaviour based on a misc driver posted by Nathan Royer in 2011.
+ *
+ * TODO: add support for setting up the low pass 3dB frequency.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+#include "mpu3050.h"
+
+#define MPU3050_CHIP_ID 0x69
+
+/*
+ * Register map: anything suffixed *_H is a big-endian high byte and always
+ * followed by the corresponding low byte (*_L) even though these are not
+ * explicitly included in the register definitions.
+ */
+#define MPU3050_CHIP_ID_REG 0x00
+#define MPU3050_PRODUCT_ID_REG 0x01
+#define MPU3050_XG_OFFS_TC 0x05
+#define MPU3050_YG_OFFS_TC 0x08
+#define MPU3050_ZG_OFFS_TC 0x0B
+#define MPU3050_X_OFFS_USR_H 0x0C
+#define MPU3050_Y_OFFS_USR_H 0x0E
+#define MPU3050_Z_OFFS_USR_H 0x10
+#define MPU3050_FIFO_EN 0x12
+#define MPU3050_AUX_VDDIO 0x13
+#define MPU3050_SLV_ADDR 0x14
+#define MPU3050_SMPLRT_DIV 0x15
+#define MPU3050_DLPF_FS_SYNC 0x16
+#define MPU3050_INT_CFG 0x17
+#define MPU3050_AUX_ADDR 0x18
+#define MPU3050_INT_STATUS 0x1A
+#define MPU3050_TEMP_H 0x1B
+#define MPU3050_XOUT_H 0x1D
+#define MPU3050_YOUT_H 0x1F
+#define MPU3050_ZOUT_H 0x21
+#define MPU3050_DMP_CFG1 0x35
+#define MPU3050_DMP_CFG2 0x36
+#define MPU3050_BANK_SEL 0x37
+#define MPU3050_MEM_START_ADDR 0x38
+#define MPU3050_MEM_R_W 0x39
+#define MPU3050_FIFO_COUNT_H 0x3A
+#define MPU3050_FIFO_R 0x3C
+#define MPU3050_USR_CTRL 0x3D
+#define MPU3050_PWR_MGM 0x3E
+
+/* MPU memory bank read options */
+#define MPU3050_MEM_PRFTCH BIT(5)
+#define MPU3050_MEM_USER_BANK BIT(4)
+/* Bits 8-11 select memory bank */
+#define MPU3050_MEM_RAM_BANK_0 0
+#define MPU3050_MEM_RAM_BANK_1 1
+#define MPU3050_MEM_RAM_BANK_2 2
+#define MPU3050_MEM_RAM_BANK_3 3
+#define MPU3050_MEM_OTP_BANK_0 4
+
+#define MPU3050_AXIS_REGS(axis) (MPU3050_XOUT_H + (axis * 2))
+
+/* Register bits */
+
+/* FIFO Enable */
+#define MPU3050_FIFO_EN_FOOTER BIT(0)
+#define MPU3050_FIFO_EN_AUX_ZOUT BIT(1)
+#define MPU3050_FIFO_EN_AUX_YOUT BIT(2)
+#define MPU3050_FIFO_EN_AUX_XOUT BIT(3)
+#define MPU3050_FIFO_EN_GYRO_ZOUT BIT(4)
+#define MPU3050_FIFO_EN_GYRO_YOUT BIT(5)
+#define MPU3050_FIFO_EN_GYRO_XOUT BIT(6)
+#define MPU3050_FIFO_EN_TEMP_OUT BIT(7)
+
+/*
+ * Digital Low Pass filter (DLPF)
+ * Full Scale (FS)
+ * and Synchronization
+ */
+#define MPU3050_EXT_SYNC_NONE 0x00
+#define MPU3050_EXT_SYNC_TEMP 0x20
+#define MPU3050_EXT_SYNC_GYROX 0x40
+#define MPU3050_EXT_SYNC_GYROY 0x60
+#define MPU3050_EXT_SYNC_GYROZ 0x80
+#define MPU3050_EXT_SYNC_ACCELX 0xA0
+#define MPU3050_EXT_SYNC_ACCELY 0xC0
+#define MPU3050_EXT_SYNC_ACCELZ 0xE0
+#define MPU3050_EXT_SYNC_MASK 0xE0
+#define MPU3050_EXT_SYNC_SHIFT 5
+
+#define MPU3050_FS_250DPS 0x00
+#define MPU3050_FS_500DPS 0x08
+#define MPU3050_FS_1000DPS 0x10
+#define MPU3050_FS_2000DPS 0x18
+#define MPU3050_FS_MASK 0x18
+#define MPU3050_FS_SHIFT 3
+
+#define MPU3050_DLPF_CFG_256HZ_NOLPF2 0x00
+#define MPU3050_DLPF_CFG_188HZ 0x01
+#define MPU3050_DLPF_CFG_98HZ 0x02
+#define MPU3050_DLPF_CFG_42HZ 0x03
+#define MPU3050_DLPF_CFG_20HZ 0x04
+#define MPU3050_DLPF_CFG_10HZ 0x05
+#define MPU3050_DLPF_CFG_5HZ 0x06
+#define MPU3050_DLPF_CFG_2100HZ_NOLPF 0x07
+#define MPU3050_DLPF_CFG_MASK 0x07
+#define MPU3050_DLPF_CFG_SHIFT 0
+
+/* Interrupt config */
+#define MPU3050_INT_RAW_RDY_EN BIT(0)
+#define MPU3050_INT_DMP_DONE_EN BIT(1)
+#define MPU3050_INT_MPU_RDY_EN BIT(2)
+#define MPU3050_INT_ANYRD_2CLEAR BIT(4)
+#define MPU3050_INT_LATCH_EN BIT(5)
+#define MPU3050_INT_OPEN BIT(6)
+#define MPU3050_INT_ACTL BIT(7)
+/* Interrupt status */
+#define MPU3050_INT_STATUS_RAW_RDY BIT(0)
+#define MPU3050_INT_STATUS_DMP_DONE BIT(1)
+#define MPU3050_INT_STATUS_MPU_RDY BIT(2)
+#define MPU3050_INT_STATUS_FIFO_OVFLW BIT(7)
+/* USR_CTRL */
+#define MPU3050_USR_CTRL_FIFO_EN BIT(6)
+#define MPU3050_USR_CTRL_AUX_IF_EN BIT(5)
+#define MPU3050_USR_CTRL_AUX_IF_RST BIT(3)
+#define MPU3050_USR_CTRL_FIFO_RST BIT(1)
+#define MPU3050_USR_CTRL_GYRO_RST BIT(0)
+/* PWR_MGM */
+#define MPU3050_PWR_MGM_PLL_X 0x01
+#define MPU3050_PWR_MGM_PLL_Y 0x02
+#define MPU3050_PWR_MGM_PLL_Z 0x03
+#define MPU3050_PWR_MGM_CLKSEL_MASK 0x07
+#define MPU3050_PWR_MGM_STBY_ZG BIT(3)
+#define MPU3050_PWR_MGM_STBY_YG BIT(4)
+#define MPU3050_PWR_MGM_STBY_XG BIT(5)
+#define MPU3050_PWR_MGM_SLEEP BIT(6)
+#define MPU3050_PWR_MGM_RESET BIT(7)
+#define MPU3050_PWR_MGM_MASK 0xff
+
+/*
+ * Fullscale precision is (for finest precision) +/- 250 deg/s, so the full
+ * scale is actually 500 deg/s. All 16 bits are then used to cover this scale,
+ * in two's complement.
+ */
+static unsigned int mpu3050_fs_precision[] = {
+ IIO_DEGREE_TO_RAD(250),
+ IIO_DEGREE_TO_RAD(500),
+ IIO_DEGREE_TO_RAD(1000),
+ IIO_DEGREE_TO_RAD(2000)
+};
+
+/*
+ * Regulator names
+ */
+static const char mpu3050_reg_vdd[] = "vdd";
+static const char mpu3050_reg_vlogic[] = "vlogic";
+
+static unsigned int mpu3050_get_freq(struct mpu3050 *mpu3050)
+{
+ unsigned int freq;
+
+ if (mpu3050->lpf == MPU3050_DLPF_CFG_256HZ_NOLPF2)
+ freq = 8000;
+ else
+ freq = 1000;
+ freq /= (mpu3050->divisor + 1);
+
+ return freq;
+}
+
+static int mpu3050_start_sampling(struct mpu3050 *mpu3050)
+{
+ __be16 raw_val[3];
+ int ret;
+ int i;
+
+ /* Reset */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET, MPU3050_PWR_MGM_RESET);
+ if (ret)
+ return ret;
+
+ /* Turn on the Z-axis PLL */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_CLKSEL_MASK,
+ MPU3050_PWR_MGM_PLL_Z);
+ if (ret)
+ return ret;
+
+ /* Write calibration offset registers */
+ for (i = 0; i < 3; i++)
+ raw_val[i] = cpu_to_be16(mpu3050->calibration[i]);
+
+ ret = regmap_bulk_write(mpu3050->map, MPU3050_X_OFFS_USR_H, raw_val,
+ sizeof(raw_val));
+ if (ret)
+ return ret;
+
+ /* Set low pass filter (sample rate), sync and full scale */
+ ret = regmap_write(mpu3050->map, MPU3050_DLPF_FS_SYNC,
+ MPU3050_EXT_SYNC_NONE << MPU3050_EXT_SYNC_SHIFT |
+ mpu3050->fullscale << MPU3050_FS_SHIFT |
+ mpu3050->lpf << MPU3050_DLPF_CFG_SHIFT);
+ if (ret)
+ return ret;
+
+ /* Set up sampling frequency */
+ ret = regmap_write(mpu3050->map, MPU3050_SMPLRT_DIV, mpu3050->divisor);
+ if (ret)
+ return ret;
+
+ /*
+ * Max 50 ms start-up time after setting DLPF_FS_SYNC
+ * according to the data sheet, then wait for the next sample
+ * at this frequency T = 1000/f ms.
+ */
+ msleep(50 + 1000 / mpu3050_get_freq(mpu3050));
+
+ return 0;
+}
+
+static int mpu3050_set_8khz_samplerate(struct mpu3050 *mpu3050)
+{
+ int ret;
+ u8 divisor;
+ enum mpu3050_lpf lpf;
+
+ lpf = mpu3050->lpf;
+ divisor = mpu3050->divisor;
+
+ mpu3050->lpf = LPF_256_HZ_NOLPF; /* 8 kHz base frequency */
+ mpu3050->divisor = 0; /* Divide by 1 */
+ ret = mpu3050_start_sampling(mpu3050);
+
+ mpu3050->lpf = lpf;
+ mpu3050->divisor = divisor;
+
+ return ret;
+}
+
+static int mpu3050_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ int ret;
+ __be16 raw_val;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* The temperature scaling is (x+23000)/280 Celsius */
+ *val = 23000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = mpu3050->calibration[chan->scan_index-1];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = mpu3050_get_freq(mpu3050);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* Millidegrees, see about temperature scaling above */
+ *val = 1000;
+ *val2 = 280;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_ANGL_VEL:
+ /*
+ * Convert to the corresponding full scale in
+ * radians. All 16 bits are used with sign to
+ * span the available scale: to account for the one
+ * missing value if we multiply by 1/S16_MAX, instead
+ * multiply with 2/U16_MAX.
+ */
+ *val = mpu3050_fs_precision[mpu3050->fullscale] * 2;
+ *val2 = U16_MAX;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ /* Resume device */
+ pm_runtime_get_sync(mpu3050->dev);
+ mutex_lock(&mpu3050->lock);
+
+ ret = mpu3050_set_8khz_samplerate(mpu3050);
+ if (ret)
+ goto out_read_raw_unlock;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H,
+ &raw_val, sizeof(raw_val));
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "error reading temperature\n");
+ goto out_read_raw_unlock;
+ }
+
+ *val = be16_to_cpu(raw_val);
+ ret = IIO_VAL_INT;
+
+ goto out_read_raw_unlock;
+ case IIO_ANGL_VEL:
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_AXIS_REGS(chan->scan_index-1),
+ &raw_val,
+ sizeof(raw_val));
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "error reading axis data\n");
+ goto out_read_raw_unlock;
+ }
+
+ *val = be16_to_cpu(raw_val);
+ ret = IIO_VAL_INT;
+
+ goto out_read_raw_unlock;
+ default:
+ ret = -EINVAL;
+ goto out_read_raw_unlock;
+ }
+ default:
+ break;
+ }
+
+ return -EINVAL;
+
+out_read_raw_unlock:
+ mutex_unlock(&mpu3050->lock);
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+
+ return ret;
+}
+
+static int mpu3050_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ /*
+ * Couldn't figure out a way to precalculate these at compile time.
+ */
+ unsigned int fs250 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[0] * 1000000 * 2,
+ U16_MAX);
+ unsigned int fs500 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[1] * 1000000 * 2,
+ U16_MAX);
+ unsigned int fs1000 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[2] * 1000000 * 2,
+ U16_MAX);
+ unsigned int fs2000 =
+ DIV_ROUND_CLOSEST(mpu3050_fs_precision[3] * 1000000 * 2,
+ U16_MAX);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+ mpu3050->calibration[chan->scan_index-1] = val;
+ return 0;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /*
+ * The max samplerate is 8000 Hz, the minimum
+ * 1000 / 256 ~= 4 Hz
+ */
+ if (val < 4 || val > 8000)
+ return -EINVAL;
+
+ /*
+ * Above 1000 Hz we must turn off the digital low pass filter
+ * so we get a base frequency of 8kHz to the divider
+ */
+ if (val > 1000) {
+ mpu3050->lpf = LPF_256_HZ_NOLPF;
+ mpu3050->divisor = DIV_ROUND_CLOSEST(8000, val) - 1;
+ return 0;
+ }
+
+ mpu3050->lpf = LPF_188_HZ;
+ mpu3050->divisor = DIV_ROUND_CLOSEST(1000, val) - 1;
+ return 0;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+ /*
+ * We support +/-250, +/-500, +/-1000 and +/2000 deg/s
+ * which means we need to round to the closest radians
+ * which will be roughly +/-4.3, +/-8.7, +/-17.5, +/-35
+ * rad/s. The scale is then for the 16 bits used to cover
+ * it 2/(2^16) of that.
+ */
+
+ /* Just too large, set the max range */
+ if (val != 0) {
+ mpu3050->fullscale = FS_2000_DPS;
+ return 0;
+ }
+
+ /*
+ * Now we're dealing with fractions below zero in millirad/s
+ * do some integer interpolation and match with the closest
+ * fullscale in the table.
+ */
+ if (val2 <= fs250 ||
+ val2 < ((fs500 + fs250) / 2))
+ mpu3050->fullscale = FS_250_DPS;
+ else if (val2 <= fs500 ||
+ val2 < ((fs1000 + fs500) / 2))
+ mpu3050->fullscale = FS_500_DPS;
+ else if (val2 <= fs1000 ||
+ val2 < ((fs2000 + fs1000) / 2))
+ mpu3050->fullscale = FS_1000_DPS;
+ else
+ /* Catch-all */
+ mpu3050->fullscale = FS_2000_DPS;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
+{
+ const struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ int ret;
+ /*
+ * Temperature 1*16 bits
+ * Three axes 3*16 bits
+ * Timestamp 64 bits (4*16 bits)
+ * Sum total 8*16 bits
+ */
+ __be16 hw_values[8];
+ s64 timestamp;
+ unsigned int datums_from_fifo = 0;
+
+ /*
+ * If we're using the hardware trigger, get the precise timestamp from
+ * the top half of the threaded IRQ handler. Otherwise get the
+ * timestamp here so it will be close in time to the actual values
+ * read from the registers.
+ */
+ if (iio_trigger_using_own(indio_dev))
+ timestamp = mpu3050->hw_timestamp;
+ else
+ timestamp = iio_get_time_ns(indio_dev);
+
+ mutex_lock(&mpu3050->lock);
+
+ /* Using the hardware IRQ trigger? Check the buffer then. */
+ if (mpu3050->hw_irq_trigger) {
+ __be16 raw_fifocnt;
+ u16 fifocnt;
+ /* X, Y, Z + temperature */
+ unsigned int bytes_per_datum = 8;
+ bool fifo_overflow = false;
+
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_FIFO_COUNT_H,
+ &raw_fifocnt,
+ sizeof(raw_fifocnt));
+ if (ret)
+ goto out_trigger_unlock;
+ fifocnt = be16_to_cpu(raw_fifocnt);
+
+ if (fifocnt == 512) {
+ dev_info(mpu3050->dev,
+ "FIFO overflow! Emptying and resetting FIFO\n");
+ fifo_overflow = true;
+ /* Reset and enable the FIFO */
+ ret = regmap_update_bits(mpu3050->map,
+ MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST);
+ if (ret) {
+ dev_info(mpu3050->dev, "error resetting FIFO\n");
+ goto out_trigger_unlock;
+ }
+ mpu3050->pending_fifo_footer = false;
+ }
+
+ if (fifocnt)
+ dev_dbg(mpu3050->dev,
+ "%d bytes in the FIFO\n",
+ fifocnt);
+
+ while (!fifo_overflow && fifocnt > bytes_per_datum) {
+ unsigned int toread;
+ unsigned int offset;
+ __be16 fifo_values[5];
+
+ /*
+ * If there is a FIFO footer in the pipe, first clear
+ * that out. This follows the complex algorithm in the
+ * datasheet that states that you may never leave the
+ * FIFO empty after the first reading: you have to
+ * always leave two footer bytes in it. The footer is
+ * in practice just two zero bytes.
+ */
+ if (mpu3050->pending_fifo_footer) {
+ toread = bytes_per_datum + 2;
+ offset = 0;
+ } else {
+ toread = bytes_per_datum;
+ offset = 1;
+ /* Put in some dummy value */
+ fifo_values[0] = 0xAAAA;
+ }
+
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_FIFO_R,
+ &fifo_values[offset],
+ toread);
+
+ dev_dbg(mpu3050->dev,
+ "%04x %04x %04x %04x %04x\n",
+ fifo_values[0],
+ fifo_values[1],
+ fifo_values[2],
+ fifo_values[3],
+ fifo_values[4]);
+
+ /* Index past the footer (fifo_values[0]) and push */
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ &fifo_values[1],
+ timestamp);
+
+ fifocnt -= toread;
+ datums_from_fifo++;
+ mpu3050->pending_fifo_footer = true;
+
+ /*
+ * If we're emptying the FIFO, just make sure to
+ * check if something new appeared.
+ */
+ if (fifocnt < bytes_per_datum) {
+ ret = regmap_bulk_read(mpu3050->map,
+ MPU3050_FIFO_COUNT_H,
+ &raw_fifocnt,
+ sizeof(raw_fifocnt));
+ if (ret)
+ goto out_trigger_unlock;
+ fifocnt = be16_to_cpu(raw_fifocnt);
+ }
+
+ if (fifocnt < bytes_per_datum)
+ dev_dbg(mpu3050->dev,
+ "%d bytes left in the FIFO\n",
+ fifocnt);
+
+ /*
+ * At this point, the timestamp that triggered the
+ * hardware interrupt is no longer valid for what
+ * we are reading (the interrupt likely fired for
+ * the value on the top of the FIFO), so set the
+ * timestamp to zero and let userspace deal with it.
+ */
+ timestamp = 0;
+ }
+ }
+
+ /*
+ * If we picked some datums from the FIFO that's enough, else
+ * fall through and just read from the current value registers.
+ * This happens in two cases:
+ *
+ * - We are using some other trigger (external, like an HRTimer)
+ * than the sensor's own sample generator. In this case the
+ * sensor is just set to the max sampling frequency and we give
+ * the trigger a copy of the latest value every time we get here.
+ *
+ * - The hardware trigger is active but unused and we actually use
+ * another trigger which calls here with a frequency higher
+ * than what the device provides data. We will then just read
+ * duplicate values directly from the hardware registers.
+ */
+ if (datums_from_fifo) {
+ dev_dbg(mpu3050->dev,
+ "read %d datums from the FIFO\n",
+ datums_from_fifo);
+ goto out_trigger_unlock;
+ }
+
+ ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, &hw_values,
+ sizeof(hw_values));
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "error reading axis data\n");
+ goto out_trigger_unlock;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, hw_values, timestamp);
+
+out_trigger_unlock:
+ mutex_unlock(&mpu3050->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int mpu3050_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(mpu3050->dev);
+
+ /* Unless we have OUR trigger active, run at full speed */
+ if (!mpu3050->hw_irq_trigger)
+ return mpu3050_set_8khz_samplerate(mpu3050);
+
+ return 0;
+}
+
+static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops mpu3050_buffer_setup_ops = {
+ .preenable = mpu3050_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = mpu3050_buffer_postdisable,
+};
+
+static const struct iio_mount_matrix *
+mpu3050_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ return &mpu3050->orientation;
+}
+
+static const struct iio_chan_spec_ext_info mpu3050_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, mpu3050_get_mount_matrix),
+ { },
+};
+
+#define MPU3050_AXIS_CHANNEL(axis, index) \
+ { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .ext_info = mpu3050_ext_info, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec mpu3050_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ MPU3050_AXIS_CHANNEL(X, 1),
+ MPU3050_AXIS_CHANNEL(Y, 2),
+ MPU3050_AXIS_CHANNEL(Z, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+/* Four channels apart from timestamp, scan mask = 0x0f */
+static const unsigned long mpu3050_scan_masks[] = { 0xf, 0 };
+
+/*
+ * These are just the hardcoded factors resulting from the more elaborate
+ * calculations done with fractions in the scale raw get/set functions.
+ */
+static IIO_CONST_ATTR(anglevel_scale_available,
+ "0.000122070 "
+ "0.000274658 "
+ "0.000518798 "
+ "0.001068115");
+
+static struct attribute *mpu3050_attributes[] = {
+ &iio_const_attr_anglevel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mpu3050_attribute_group = {
+ .attrs = mpu3050_attributes,
+};
+
+static const struct iio_info mpu3050_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = mpu3050_read_raw,
+ .write_raw = mpu3050_write_raw,
+ .attrs = &mpu3050_attribute_group,
+};
+
+/**
+ * mpu3050_read_mem() - read MPU-3050 internal memory
+ * @mpu3050: device to read from
+ * @bank: target bank
+ * @addr: target address
+ * @len: number of bytes
+ * @buf: the buffer to store the read bytes in
+ */
+static int mpu3050_read_mem(struct mpu3050 *mpu3050,
+ u8 bank,
+ u8 addr,
+ u8 len,
+ u8 *buf)
+{
+ int ret;
+
+ ret = regmap_write(mpu3050->map,
+ MPU3050_BANK_SEL,
+ bank);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(mpu3050->map,
+ MPU3050_MEM_START_ADDR,
+ addr);
+ if (ret)
+ return ret;
+
+ return regmap_bulk_read(mpu3050->map,
+ MPU3050_MEM_R_W,
+ buf,
+ len);
+}
+
+static int mpu3050_hw_init(struct mpu3050 *mpu3050)
+{
+ int ret;
+ u8 otp[8];
+
+ /* Reset */
+ ret = regmap_update_bits(mpu3050->map,
+ MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET,
+ MPU3050_PWR_MGM_RESET);
+ if (ret)
+ return ret;
+
+ /* Turn on the PLL */
+ ret = regmap_update_bits(mpu3050->map,
+ MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_CLKSEL_MASK,
+ MPU3050_PWR_MGM_PLL_Z);
+ if (ret)
+ return ret;
+
+ /* Disable IRQs */
+ ret = regmap_write(mpu3050->map,
+ MPU3050_INT_CFG,
+ 0);
+ if (ret)
+ return ret;
+
+ /* Read out the 8 bytes of OTP (one-time-programmable) memory */
+ ret = mpu3050_read_mem(mpu3050,
+ (MPU3050_MEM_PRFTCH |
+ MPU3050_MEM_USER_BANK |
+ MPU3050_MEM_OTP_BANK_0),
+ 0,
+ sizeof(otp),
+ otp);
+ if (ret)
+ return ret;
+
+ /* This is device-unique data so it goes into the entropy pool */
+ add_device_randomness(otp, sizeof(otp));
+
+ dev_info(mpu3050->dev,
+ "die ID: %04X, wafer ID: %02X, A lot ID: %04X, "
+ "W lot ID: %03X, WP ID: %01X, rev ID: %02X\n",
+ /* Die ID, bits 0-12 */
+ (otp[1] << 8 | otp[0]) & 0x1fff,
+ /* Wafer ID, bits 13-17 */
+ ((otp[2] << 8 | otp[1]) & 0x03e0) >> 5,
+ /* A lot ID, bits 18-33 */
+ ((otp[4] << 16 | otp[3] << 8 | otp[2]) & 0x3fffc) >> 2,
+ /* W lot ID, bits 34-45 */
+ ((otp[5] << 8 | otp[4]) & 0x3ffc) >> 2,
+ /* WP ID, bits 47-49 */
+ ((otp[6] << 8 | otp[5]) & 0x0380) >> 7,
+ /* rev ID, bits 50-55 */
+ otp[6] >> 2);
+
+ return 0;
+}
+
+static int mpu3050_power_up(struct mpu3050 *mpu3050)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
+ if (ret) {
+ dev_err(mpu3050->dev, "cannot enable regulators\n");
+ return ret;
+ }
+ /*
+ * 20-100 ms start-up time for register read/write according to
+ * the datasheet, be on the safe side and wait 200 ms.
+ */
+ msleep(200);
+
+ /* Take device out of sleep mode */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_SLEEP, 0);
+ if (ret) {
+ dev_err(mpu3050->dev, "error setting power mode\n");
+ return ret;
+ }
+ msleep(10);
+
+ return 0;
+}
+
+static int mpu3050_power_down(struct mpu3050 *mpu3050)
+{
+ int ret;
+
+ /*
+ * Put MPU-3050 into sleep mode before cutting regulators.
+ * This is important, because we may not be the sole user
+ * of the regulator so the power may stay on after this, and
+ * then we would be wasting power unless we go to sleep mode
+ * first.
+ */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_SLEEP, MPU3050_PWR_MGM_SLEEP);
+ if (ret)
+ dev_err(mpu3050->dev, "error putting to sleep\n");
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
+ if (ret)
+ dev_err(mpu3050->dev, "error disabling regulators\n");
+
+ return 0;
+}
+
+static irqreturn_t mpu3050_irq_handler(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ if (!mpu3050->hw_irq_trigger)
+ return IRQ_NONE;
+
+ /* Get the time stamp as close in time as possible */
+ mpu3050->hw_timestamp = iio_get_time_ns(indio_dev);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mpu3050_irq_thread(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ /* ACK IRQ and check if it was from us */
+ ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+ if (ret) {
+ dev_err(mpu3050->dev, "error reading IRQ status\n");
+ return IRQ_HANDLED;
+ }
+ if (!(val & MPU3050_INT_STATUS_RAW_RDY))
+ return IRQ_NONE;
+
+ iio_trigger_poll_chained(p);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpu3050_drdy_trigger_set_state() - set data ready interrupt state
+ * @trig: trigger instance
+ * @enable: true if trigger should be enabled, false to disable
+ */
+static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
+ bool enable)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ /* Disabling trigger: disable interrupt and return */
+ if (!enable) {
+ /* Disable all interrupts */
+ ret = regmap_write(mpu3050->map,
+ MPU3050_INT_CFG,
+ 0);
+ if (ret)
+ dev_err(mpu3050->dev, "error disabling IRQ\n");
+
+ /* Clear IRQ flag */
+ ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+ if (ret)
+ dev_err(mpu3050->dev, "error clearing IRQ status\n");
+
+ /* Disable all things in the FIFO and reset it */
+ ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
+ if (ret)
+ dev_err(mpu3050->dev, "error disabling FIFO\n");
+
+ ret = regmap_write(mpu3050->map, MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_RST);
+ if (ret)
+ dev_err(mpu3050->dev, "error resetting FIFO\n");
+
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+ mpu3050->hw_irq_trigger = false;
+
+ return 0;
+ } else {
+ /* Else we're enabling the trigger from this point */
+ pm_runtime_get_sync(mpu3050->dev);
+ mpu3050->hw_irq_trigger = true;
+
+ /* Disable all things in the FIFO */
+ ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
+ if (ret)
+ return ret;
+
+ /* Reset and enable the FIFO */
+ ret = regmap_update_bits(mpu3050->map, MPU3050_USR_CTRL,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST,
+ MPU3050_USR_CTRL_FIFO_EN |
+ MPU3050_USR_CTRL_FIFO_RST);
+ if (ret)
+ return ret;
+
+ mpu3050->pending_fifo_footer = false;
+
+ /* Turn on the FIFO for temp+X+Y+Z */
+ ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN,
+ MPU3050_FIFO_EN_TEMP_OUT |
+ MPU3050_FIFO_EN_GYRO_XOUT |
+ MPU3050_FIFO_EN_GYRO_YOUT |
+ MPU3050_FIFO_EN_GYRO_ZOUT |
+ MPU3050_FIFO_EN_FOOTER);
+ if (ret)
+ return ret;
+
+ /* Configure the sample engine */
+ ret = mpu3050_start_sampling(mpu3050);
+ if (ret)
+ return ret;
+
+ /* Clear IRQ flag */
+ ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+ if (ret)
+ dev_err(mpu3050->dev, "error clearing IRQ status\n");
+
+ /* Give us interrupts whenever there is new data ready */
+ val = MPU3050_INT_RAW_RDY_EN;
+
+ if (mpu3050->irq_actl)
+ val |= MPU3050_INT_ACTL;
+ if (mpu3050->irq_latch)
+ val |= MPU3050_INT_LATCH_EN;
+ if (mpu3050->irq_opendrain)
+ val |= MPU3050_INT_OPEN;
+
+ ret = regmap_write(mpu3050->map, MPU3050_INT_CFG, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct iio_trigger_ops mpu3050_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = mpu3050_drdy_trigger_set_state,
+};
+
+static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
+{
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ unsigned long irq_trig;
+ int ret;
+
+ mpu3050->trig = devm_iio_trigger_alloc(&indio_dev->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!mpu3050->trig)
+ return -ENOMEM;
+
+ /* Check if IRQ is open drain */
+ if (of_property_read_bool(mpu3050->dev->of_node, "drive-open-drain"))
+ mpu3050->irq_opendrain = true;
+
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ /*
+ * Configure the interrupt generator hardware to supply whatever
+ * the interrupt is configured for, edges low/high level low/high,
+ * we can provide it all.
+ */
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ dev_info(&indio_dev->dev,
+ "pulse interrupts on the rising edge\n");
+ if (mpu3050->irq_opendrain) {
+ dev_info(&indio_dev->dev,
+ "rising edge incompatible with open drain\n");
+ mpu3050->irq_opendrain = false;
+ }
+ break;
+ case IRQF_TRIGGER_FALLING:
+ mpu3050->irq_actl = true;
+ dev_info(&indio_dev->dev,
+ "pulse interrupts on the falling edge\n");
+ break;
+ case IRQF_TRIGGER_HIGH:
+ mpu3050->irq_latch = true;
+ dev_info(&indio_dev->dev,
+ "interrupts active high level\n");
+ if (mpu3050->irq_opendrain) {
+ dev_info(&indio_dev->dev,
+ "active high incompatible with open drain\n");
+ mpu3050->irq_opendrain = false;
+ }
+ /*
+ * With level IRQs, we mask the IRQ until it is processed,
+ * but with edge IRQs (pulses) we can queue several interrupts
+ * in the top half.
+ */
+ irq_trig |= IRQF_ONESHOT;
+ break;
+ case IRQF_TRIGGER_LOW:
+ mpu3050->irq_latch = true;
+ mpu3050->irq_actl = true;
+ irq_trig |= IRQF_ONESHOT;
+ dev_info(&indio_dev->dev,
+ "interrupts active low level\n");
+ break;
+ default:
+ /* This is the most preferred mode, if possible */
+ dev_err(&indio_dev->dev,
+ "unsupported IRQ trigger specified (%lx), enforce "
+ "rising edge\n", irq_trig);
+ irq_trig = IRQF_TRIGGER_RISING;
+ break;
+ }
+
+ /* An open drain line can be shared with several devices */
+ if (mpu3050->irq_opendrain)
+ irq_trig |= IRQF_SHARED;
+
+ ret = request_threaded_irq(irq,
+ mpu3050_irq_handler,
+ mpu3050_irq_thread,
+ irq_trig,
+ mpu3050->trig->name,
+ mpu3050->trig);
+ if (ret) {
+ dev_err(mpu3050->dev,
+ "can't get IRQ %d, error %d\n", irq, ret);
+ return ret;
+ }
+
+ mpu3050->irq = irq;
+ mpu3050->trig->dev.parent = mpu3050->dev;
+ mpu3050->trig->ops = &mpu3050_trigger_ops;
+ iio_trigger_set_drvdata(mpu3050->trig, indio_dev);
+
+ ret = iio_trigger_register(mpu3050->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(mpu3050->trig);
+
+ return 0;
+}
+
+int mpu3050_common_probe(struct device *dev,
+ struct regmap *map,
+ int irq,
+ const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct mpu3050 *mpu3050;
+ unsigned int val;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*mpu3050));
+ if (!indio_dev)
+ return -ENOMEM;
+ mpu3050 = iio_priv(indio_dev);
+
+ mpu3050->dev = dev;
+ mpu3050->map = map;
+ mutex_init(&mpu3050->lock);
+ /* Default fullscale: 2000 degrees per second */
+ mpu3050->fullscale = FS_2000_DPS;
+ /* 1 kHz, divide by 100, default frequency = 10 Hz */
+ mpu3050->lpf = MPU3050_DLPF_CFG_188HZ;
+ mpu3050->divisor = 99;
+
+ /* Read the mounting matrix, if present */
+ ret = of_iio_read_mount_matrix(dev, "mount-matrix",
+ &mpu3050->orientation);
+ if (ret)
+ return ret;
+
+ /* Fetch and turn on regulators */
+ mpu3050->regs[0].supply = mpu3050_reg_vdd;
+ mpu3050->regs[1].supply = mpu3050_reg_vlogic;
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(mpu3050->regs),
+ mpu3050->regs);
+ if (ret) {
+ dev_err(dev, "Cannot get regulators\n");
+ return ret;
+ }
+
+ ret = mpu3050_power_up(mpu3050);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, MPU3050_CHIP_ID_REG, &val);
+ if (ret) {
+ dev_err(dev, "could not read device ID\n");
+ ret = -ENODEV;
+
+ goto err_power_down;
+ }
+
+ if (val != MPU3050_CHIP_ID) {
+ dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+ ret = -ENODEV;
+ goto err_power_down;
+ }
+
+ ret = regmap_read(map, MPU3050_PRODUCT_ID_REG, &val);
+ if (ret) {
+ dev_err(dev, "could not read device ID\n");
+ ret = -ENODEV;
+
+ goto err_power_down;
+ }
+ dev_info(dev, "found MPU-3050 part no: %d, version: %d\n",
+ ((val >> 4) & 0xf), (val & 0xf));
+
+ ret = mpu3050_hw_init(mpu3050);
+ if (ret)
+ goto err_power_down;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = mpu3050_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mpu3050_channels);
+ indio_dev->info = &mpu3050_info;
+ indio_dev->available_scan_masks = mpu3050_scan_masks;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = name;
+
+ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+ mpu3050_trigger_handler,
+ &mpu3050_buffer_setup_ops);
+ if (ret) {
+ dev_err(dev, "triggered buffer setup failed\n");
+ goto err_power_down;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "device register failed\n");
+ goto err_cleanup_buffer;
+ }
+
+ dev_set_drvdata(dev, indio_dev);
+
+ /* Check if we have an assigned IRQ to use as trigger */
+ if (irq) {
+ ret = mpu3050_trigger_probe(indio_dev, irq);
+ if (ret)
+ dev_err(dev, "failed to register trigger\n");
+ }
+
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Set autosuspend to two orders of magnitude larger than the
+ * start-up time. 100ms start-up time means 10000ms autosuspend,
+ * i.e. 10 seconds.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 10000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ return 0;
+
+err_cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_power_down:
+ mpu3050_power_down(mpu3050);
+
+ return ret;
+}
+EXPORT_SYMBOL(mpu3050_common_probe);
+
+int mpu3050_common_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (mpu3050->irq)
+ free_irq(mpu3050->irq, mpu3050);
+ iio_device_unregister(indio_dev);
+ mpu3050_power_down(mpu3050);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpu3050_common_remove);
+
+#ifdef CONFIG_PM
+static int mpu3050_runtime_suspend(struct device *dev)
+{
+ return mpu3050_power_down(iio_priv(dev_get_drvdata(dev)));
+}
+
+static int mpu3050_runtime_resume(struct device *dev)
+{
+ return mpu3050_power_up(iio_priv(dev_get_drvdata(dev)));
+}
+#endif /* CONFIG_PM */
+
+const struct dev_pm_ops mpu3050_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(mpu3050_runtime_suspend,
+ mpu3050_runtime_resume, NULL)
+};
+EXPORT_SYMBOL(mpu3050_dev_pm_ops);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("MPU3050 gyroscope driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
new file mode 100644
index 000000000000..06007200bf49
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -0,0 +1,124 @@
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include "mpu3050.h"
+
+static const struct regmap_config mpu3050_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int mpu3050_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
+
+ /* Just power up the device, that is all that is needed */
+ pm_runtime_get_sync(mpu3050->dev);
+ return 0;
+}
+
+static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
+
+ pm_runtime_mark_last_busy(mpu3050->dev);
+ pm_runtime_put_autosuspend(mpu3050->dev);
+ return 0;
+}
+
+static int mpu3050_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name;
+ struct mpu3050 *mpu3050;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EOPNOTSUPP;
+
+ if (id)
+ name = id->name;
+ else
+ return -ENODEV;
+
+ regmap = devm_regmap_init_i2c(client, &mpu3050_i2c_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ ret = mpu3050_common_probe(&client->dev, regmap, client->irq, name);
+ if (ret)
+ return ret;
+
+ /* The main driver is up, now register the I2C mux */
+ mpu3050 = iio_priv(dev_get_drvdata(&client->dev));
+ mpu3050->i2cmux = i2c_mux_alloc(client->adapter, &client->dev,
+ 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
+ mpu3050_i2c_bypass_select,
+ mpu3050_i2c_bypass_deselect);
+ /* Just fail the mux, there is no point in killing the driver */
+ if (!mpu3050->i2cmux)
+ dev_err(&client->dev, "failed to allocate I2C mux\n");
+ else {
+ mpu3050->i2cmux->priv = mpu3050;
+ ret = i2c_mux_add_adapter(mpu3050->i2cmux, 0, 0, 0);
+ if (ret)
+ dev_err(&client->dev, "failed to add I2C mux\n");
+ }
+
+ return 0;
+}
+
+static int mpu3050_i2c_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+ struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+ if (mpu3050->i2cmux)
+ i2c_mux_del_adapters(mpu3050->i2cmux);
+
+ return mpu3050_common_remove(&client->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id mpu3050_i2c_id[] = {
+ { "mpu3050" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mpu3050_i2c_id);
+
+static const struct of_device_id mpu3050_i2c_of_match[] = {
+ { .compatible = "invensense,mpu3050", .data = "mpu3050" },
+ /* Deprecated vendor ID from the Input driver */
+ { .compatible = "invn,mpu3050", .data = "mpu3050" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mpu3050_i2c_of_match);
+
+static struct i2c_driver mpu3050_i2c_driver = {
+ .probe = mpu3050_i2c_probe,
+ .remove = mpu3050_i2c_remove,
+ .id_table = mpu3050_i2c_id,
+ .driver = {
+ .of_match_table = mpu3050_i2c_of_match,
+ .name = "mpu3050-i2c",
+ .pm = &mpu3050_dev_pm_ops,
+ },
+};
+module_i2c_driver(mpu3050_i2c_driver);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("Invensense MPU3050 gyroscope driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050.h b/drivers/iio/gyro/mpu3050.h
new file mode 100644
index 000000000000..bef87a714dc5
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050.h
@@ -0,0 +1,96 @@
+#include <linux/iio/iio.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+
+/**
+ * enum mpu3050_fullscale - indicates the full range of the sensor in deg/sec
+ */
+enum mpu3050_fullscale {
+ FS_250_DPS = 0,
+ FS_500_DPS,
+ FS_1000_DPS,
+ FS_2000_DPS,
+};
+
+/**
+ * enum mpu3050_lpf - indicates the low pass filter width
+ */
+enum mpu3050_lpf {
+ /* This implicity sets sample frequency to 8 kHz */
+ LPF_256_HZ_NOLPF = 0,
+ /* All others sets the sample frequency to 1 kHz */
+ LPF_188_HZ,
+ LPF_98_HZ,
+ LPF_42_HZ,
+ LPF_20_HZ,
+ LPF_10_HZ,
+ LPF_5_HZ,
+ LPF_2100_HZ_NOLPF,
+};
+
+enum mpu3050_axis {
+ AXIS_X = 0,
+ AXIS_Y,
+ AXIS_Z,
+ AXIS_MAX,
+};
+
+/**
+ * struct mpu3050 - instance state container for the device
+ * @dev: parent device for this instance
+ * @orientation: mounting matrix, flipped axis etc
+ * @map: regmap to reach the registers
+ * @lock: serialization lock to marshal all requests
+ * @irq: the IRQ used for this device
+ * @regs: the regulators to power this device
+ * @fullscale: the current fullscale setting for the device
+ * @lpf: digital low pass filter setting for the device
+ * @divisor: base frequency divider: divides 8 or 1 kHz
+ * @calibration: the three signed 16-bit calibration settings that
+ * get written into the offset registers for each axis to compensate
+ * for DC offsets
+ * @trig: trigger for the MPU-3050 interrupt, if present
+ * @hw_irq_trigger: hardware interrupt trigger is in use
+ * @irq_actl: interrupt is active low
+ * @irq_latch: latched IRQ, this means that it is a level IRQ
+ * @irq_opendrain: the interrupt line shall be configured open drain
+ * @pending_fifo_footer: tells us if there is a pending footer in the FIFO
+ * that we have to read out first when handling the FIFO
+ * @hw_timestamp: latest hardware timestamp from the trigger IRQ, when in
+ * use
+ * @i2cmux: an I2C mux reflecting the fact that this sensor is a hub with
+ * a pass-through I2C interface coming out of it: this device needs to be
+ * powered up in order to reach devices on the other side of this mux
+ */
+struct mpu3050 {
+ struct device *dev;
+ struct iio_mount_matrix orientation;
+ struct regmap *map;
+ struct mutex lock;
+ int irq;
+ struct regulator_bulk_data regs[2];
+ enum mpu3050_fullscale fullscale;
+ enum mpu3050_lpf lpf;
+ u8 divisor;
+ s16 calibration[3];
+ struct iio_trigger *trig;
+ bool hw_irq_trigger;
+ bool irq_actl;
+ bool irq_latch;
+ bool irq_opendrain;
+ bool pending_fifo_footer;
+ s64 hw_timestamp;
+ struct i2c_mux_core *i2cmux;
+};
+
+/* Probe called from different transports */
+int mpu3050_common_probe(struct device *dev,
+ struct regmap *map,
+ int irq,
+ const char *name);
+int mpu3050_common_remove(struct device *dev);
+
+/* PM ops */
+extern const struct dev_pm_ops mpu3050_dev_pm_ops;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index aea034d8fe0f..2a42b3d583e8 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -39,79 +39,6 @@
#define ST_GYRO_FS_AVL_500DPS 500
#define ST_GYRO_FS_AVL_2000DPS 2000
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_GYRO_1_WAI_EXP 0xd3
-#define ST_GYRO_1_ODR_ADDR 0x20
-#define ST_GYRO_1_ODR_MASK 0xc0
-#define ST_GYRO_1_ODR_AVL_100HZ_VAL 0x00
-#define ST_GYRO_1_ODR_AVL_200HZ_VAL 0x01
-#define ST_GYRO_1_ODR_AVL_400HZ_VAL 0x02
-#define ST_GYRO_1_ODR_AVL_800HZ_VAL 0x03
-#define ST_GYRO_1_PW_ADDR 0x20
-#define ST_GYRO_1_PW_MASK 0x08
-#define ST_GYRO_1_FS_ADDR 0x23
-#define ST_GYRO_1_FS_MASK 0x30
-#define ST_GYRO_1_FS_AVL_250_VAL 0x00
-#define ST_GYRO_1_FS_AVL_500_VAL 0x01
-#define ST_GYRO_1_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_1_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_1_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_1_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_1_BDU_ADDR 0x23
-#define ST_GYRO_1_BDU_MASK 0x80
-#define ST_GYRO_1_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_1_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_1_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_GYRO_2_WAI_EXP 0xd4
-#define ST_GYRO_2_ODR_ADDR 0x20
-#define ST_GYRO_2_ODR_MASK 0xc0
-#define ST_GYRO_2_ODR_AVL_95HZ_VAL 0x00
-#define ST_GYRO_2_ODR_AVL_190HZ_VAL 0x01
-#define ST_GYRO_2_ODR_AVL_380HZ_VAL 0x02
-#define ST_GYRO_2_ODR_AVL_760HZ_VAL 0x03
-#define ST_GYRO_2_PW_ADDR 0x20
-#define ST_GYRO_2_PW_MASK 0x08
-#define ST_GYRO_2_FS_ADDR 0x23
-#define ST_GYRO_2_FS_MASK 0x30
-#define ST_GYRO_2_FS_AVL_250_VAL 0x00
-#define ST_GYRO_2_FS_AVL_500_VAL 0x01
-#define ST_GYRO_2_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_2_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_2_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_2_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_2_BDU_ADDR 0x23
-#define ST_GYRO_2_BDU_MASK 0x80
-#define ST_GYRO_2_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_2_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_2_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_GYRO_3_WAI_EXP 0xd7
-#define ST_GYRO_3_ODR_ADDR 0x20
-#define ST_GYRO_3_ODR_MASK 0xc0
-#define ST_GYRO_3_ODR_AVL_95HZ_VAL 0x00
-#define ST_GYRO_3_ODR_AVL_190HZ_VAL 0x01
-#define ST_GYRO_3_ODR_AVL_380HZ_VAL 0x02
-#define ST_GYRO_3_ODR_AVL_760HZ_VAL 0x03
-#define ST_GYRO_3_PW_ADDR 0x20
-#define ST_GYRO_3_PW_MASK 0x08
-#define ST_GYRO_3_FS_ADDR 0x23
-#define ST_GYRO_3_FS_MASK 0x30
-#define ST_GYRO_3_FS_AVL_250_VAL 0x00
-#define ST_GYRO_3_FS_AVL_500_VAL 0x01
-#define ST_GYRO_3_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_3_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_3_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_3_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_3_BDU_ADDR 0x23
-#define ST_GYRO_3_BDU_MASK 0x80
-#define ST_GYRO_3_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_3_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_3_MULTIREAD_BIT true
-
-
static const struct iio_chan_spec st_gyro_16bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -130,7 +57,7 @@ static const struct iio_chan_spec st_gyro_16bit_channels[] = {
static const struct st_sensor_settings st_gyro_sensors_settings[] = {
{
- .wai = ST_GYRO_1_WAI_EXP,
+ .wai = 0xd3,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3G4200D_GYRO_DEV_NAME,
@@ -138,18 +65,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_1_ODR_ADDR,
- .mask = ST_GYRO_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 100, ST_GYRO_1_ODR_AVL_100HZ_VAL, },
- { 200, ST_GYRO_1_ODR_AVL_200HZ_VAL, },
- { 400, ST_GYRO_1_ODR_AVL_400HZ_VAL, },
- { 800, ST_GYRO_1_ODR_AVL_800HZ_VAL, },
+ { .hz = 100, .value = 0x00, },
+ { .hz = 200, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 800, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_1_PW_ADDR,
- .mask = ST_GYRO_1_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -158,33 +85,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_1_FS_ADDR,
- .mask = ST_GYRO_1_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_1_FS_AVL_250_VAL,
- .gain = ST_GYRO_1_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_1_FS_AVL_500_VAL,
- .gain = ST_GYRO_1_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_1_FS_AVL_2000_VAL,
- .gain = ST_GYRO_1_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_1_BDU_ADDR,
- .mask = ST_GYRO_1_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_1_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -192,11 +119,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_GYRO_2_WAI_EXP,
+ .wai = 0xd4,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
@@ -208,18 +135,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_2_ODR_ADDR,
- .mask = ST_GYRO_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 95, ST_GYRO_2_ODR_AVL_95HZ_VAL, },
- { 190, ST_GYRO_2_ODR_AVL_190HZ_VAL, },
- { 380, ST_GYRO_2_ODR_AVL_380HZ_VAL, },
- { 760, ST_GYRO_2_ODR_AVL_760HZ_VAL, },
+ { .hz = 95, .value = 0x00, },
+ { .hz = 190, .value = 0x01, },
+ { .hz = 380, .value = 0x02, },
+ { .hz = 760, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_2_PW_ADDR,
- .mask = ST_GYRO_2_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -228,33 +155,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_2_FS_ADDR,
- .mask = ST_GYRO_2_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_2_FS_AVL_250_VAL,
- .gain = ST_GYRO_2_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_2_FS_AVL_500_VAL,
- .gain = ST_GYRO_2_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_2_FS_AVL_2000_VAL,
- .gain = ST_GYRO_2_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_2_BDU_ADDR,
- .mask = ST_GYRO_2_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_2_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -262,29 +189,29 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_GYRO_3_WAI_EXP,
+ .wai = 0xd7,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_3_ODR_ADDR,
- .mask = ST_GYRO_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 95, ST_GYRO_3_ODR_AVL_95HZ_VAL, },
- { 190, ST_GYRO_3_ODR_AVL_190HZ_VAL, },
- { 380, ST_GYRO_3_ODR_AVL_380HZ_VAL, },
- { 760, ST_GYRO_3_ODR_AVL_760HZ_VAL, },
+ { .hz = 95, .value = 0x00, },
+ { .hz = 190, .value = 0x01, },
+ { .hz = 380, .value = 0x02, },
+ { .hz = 760, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_3_PW_ADDR,
- .mask = ST_GYRO_3_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -293,33 +220,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_3_FS_ADDR,
- .mask = ST_GYRO_3_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_3_FS_AVL_250_VAL,
- .gain = ST_GYRO_3_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_3_FS_AVL_500_VAL,
- .gain = ST_GYRO_3_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_3_FS_AVL_2000_VAL,
- .gain = ST_GYRO_3_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_3_BDU_ADDR,
- .mask = ST_GYRO_3_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_3_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -327,7 +254,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
};
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index b17e2e2bd4f5..912477d54be2 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -27,6 +27,8 @@ config DHT11
config HDC100X
tristate "TI HDC100x relative humidity and temperature sensor"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the Texas Instruments
HDC1000 and HDC1008 relative humidity and temperature sensors.
@@ -34,6 +36,28 @@ config HDC100X
To compile this driver as a module, choose M here: the module
will be called hdc100x.
+config HTS221
+ tristate "STMicroelectronics HTS221 sensor Driver"
+ depends on (I2C || SPI)
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HTS221_I2C if (I2C)
+ select HTS221_SPI if (SPI_MASTER)
+ help
+ Say yes here to build support for STMicroelectronics HTS221
+ temperature-humidity sensor
+
+ To compile this driver as a module, choose M here: the module
+ will be called hts221.
+
+config HTS221_I2C
+ tristate
+ depends on HTS221
+
+config HTS221_SPI
+ tristate
+ depends on HTS221
+
config HTU21
tristate "Measurement Specialties HTU21 humidity & temperature sensor"
depends on I2C
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index 4a73442fcd9c..a6850e47c100 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -5,6 +5,13 @@
obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
+
+hts221-y := hts221_core.o \
+ hts221_buffer.o
+obj-$(CONFIG_HTS221) += hts221.o
+obj-$(CONFIG_HTS221_I2C) += hts221_i2c.o
+obj-$(CONFIG_HTS221_SPI) += hts221_spi.o
+
obj-$(CONFIG_HTU21) += htu21.o
obj-$(CONFIG_SI7005) += si7005.o
obj-$(CONFIG_SI7020) += si7020.o
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index e0c9c70c2a4a..265c34da52d1 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -22,11 +22,15 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#define HDC100X_REG_TEMP 0x00
#define HDC100X_REG_HUMIDITY 0x01
#define HDC100X_REG_CONFIG 0x02
+#define HDC100X_REG_CONFIG_ACQ_MODE BIT(12)
#define HDC100X_REG_CONFIG_HEATER_EN BIT(13)
struct hdc100x_data {
@@ -87,22 +91,40 @@ static const struct iio_chan_spec hdc100x_channels[] = {
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_INT_TIME) |
BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_HUMIDITYRELATIVE,
.address = HDC100X_REG_HUMIDITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_INT_TIME)
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_CURRENT,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.extend_name = "heater",
.output = 1,
+ .scan_index = -1,
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static const unsigned long hdc100x_scan_masks[] = {0x3, 0};
+
static int hdc100x_update_config(struct hdc100x_data *data, int mask, int val)
{
int tmp = (~mask & data->config) | val;
@@ -183,7 +205,14 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
*val = hdc100x_get_heater_status(data);
ret = IIO_VAL_INT;
} else {
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
ret = hdc100x_get_measurement(data, chan);
+ iio_device_release_direct_mode(indio_dev);
if (ret >= 0) {
*val = ret;
ret = IIO_VAL_INT;
@@ -246,6 +275,78 @@ static int hdc100x_write_raw(struct iio_dev *indio_dev,
}
}
+static int hdc100x_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct hdc100x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* Buffer is enabled. First set ACQ Mode, then attach poll func */
+ mutex_lock(&data->lock);
+ ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
+ HDC100X_REG_CONFIG_ACQ_MODE);
+ mutex_unlock(&data->lock);
+ if (ret)
+ return ret;
+
+ return iio_triggered_buffer_postenable(indio_dev);
+}
+
+static int hdc100x_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct hdc100x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* First detach poll func, then reset ACQ mode. OK to disable buffer */
+ ret = iio_triggered_buffer_predisable(indio_dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->lock);
+ ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops hdc_buffer_setup_ops = {
+ .postenable = hdc100x_buffer_postenable,
+ .predisable = hdc100x_buffer_predisable,
+};
+
+static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hdc100x_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int delay = data->adc_int_us[0] + data->adc_int_us[1];
+ int ret;
+ s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */
+
+ /* dual read starts at temp register */
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte(client, HDC100X_REG_TEMP);
+ if (ret < 0) {
+ dev_err(&client->dev, "cannot start measurement\n");
+ goto err;
+ }
+ usleep_range(delay, delay + 1000);
+
+ ret = i2c_master_recv(client, (u8 *)buf, 4);
+ if (ret < 0) {
+ dev_err(&client->dev, "cannot read sensor data\n");
+ goto err;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
+err:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const struct iio_info hdc100x_info = {
.read_raw = hdc100x_read_raw,
.write_raw = hdc100x_write_raw,
@@ -258,6 +359,7 @@ static int hdc100x_probe(struct i2c_client *client,
{
struct iio_dev *indio_dev;
struct hdc100x_data *data;
+ int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
@@ -279,12 +381,35 @@ static int hdc100x_probe(struct i2c_client *client,
indio_dev->channels = hdc100x_channels;
indio_dev->num_channels = ARRAY_SIZE(hdc100x_channels);
+ indio_dev->available_scan_masks = hdc100x_scan_masks;
/* be sure we are in a known state */
hdc100x_set_it_time(data, 0, hdc100x_int_time[0][0]);
hdc100x_set_it_time(data, 1, hdc100x_int_time[1][0]);
+ hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ hdc100x_trigger_handler,
+ &hdc_buffer_setup_ops);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int hdc100x_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
- return devm_iio_device_register(&client->dev, indio_dev);
+ return 0;
}
static const struct i2c_device_id hdc100x_id[] = {
@@ -298,6 +423,7 @@ static struct i2c_driver hdc100x_driver = {
.name = "hdc100x",
},
.probe = hdc100x_probe,
+ .remove = hdc100x_remove,
.id_table = hdc100x_id,
};
module_i2c_driver(hdc100x_driver);
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
new file mode 100644
index 000000000000..c7154665512e
--- /dev/null
+++ b/drivers/iio/humidity/hts221.h
@@ -0,0 +1,73 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef HTS221_H
+#define HTS221_H
+
+#define HTS221_DEV_NAME "hts221"
+
+#include <linux/iio/iio.h>
+
+#define HTS221_RX_MAX_LENGTH 8
+#define HTS221_TX_MAX_LENGTH 8
+
+#define HTS221_DATA_SIZE 2
+
+struct hts221_transfer_buffer {
+ u8 rx_buf[HTS221_RX_MAX_LENGTH];
+ u8 tx_buf[HTS221_TX_MAX_LENGTH] ____cacheline_aligned;
+};
+
+struct hts221_transfer_function {
+ int (*read)(struct device *dev, u8 addr, int len, u8 *data);
+ int (*write)(struct device *dev, u8 addr, int len, u8 *data);
+};
+
+#define HTS221_AVG_DEPTH 8
+struct hts221_avg_avl {
+ u16 avg;
+ u8 val;
+};
+
+enum hts221_sensor_type {
+ HTS221_SENSOR_H,
+ HTS221_SENSOR_T,
+ HTS221_SENSOR_MAX,
+};
+
+struct hts221_sensor {
+ u8 cur_avg_idx;
+ int slope, b_gen;
+};
+
+struct hts221_hw {
+ const char *name;
+ struct device *dev;
+
+ struct mutex lock;
+ struct iio_trigger *trig;
+ int irq;
+
+ struct hts221_sensor sensors[HTS221_SENSOR_MAX];
+
+ u8 odr;
+
+ const struct hts221_transfer_function *tf;
+ struct hts221_transfer_buffer tb;
+};
+
+int hts221_config_drdy(struct hts221_hw *hw, bool enable);
+int hts221_probe(struct iio_dev *iio_dev);
+int hts221_power_on(struct hts221_hw *hw);
+int hts221_power_off(struct hts221_hw *hw);
+int hts221_allocate_buffers(struct hts221_hw *hw);
+int hts221_allocate_trigger(struct hts221_hw *hw);
+
+#endif /* HTS221_H */
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
new file mode 100644
index 000000000000..72ddcdac21a2
--- /dev/null
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -0,0 +1,168 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/buffer.h>
+
+#include "hts221.h"
+
+#define HTS221_REG_STATUS_ADDR 0x27
+#define HTS221_RH_DRDY_MASK BIT(1)
+#define HTS221_TEMP_DRDY_MASK BIT(0)
+
+static int hts221_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *iio_dev = iio_trigger_get_drvdata(trig);
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ return hts221_config_drdy(hw, state);
+}
+
+static const struct iio_trigger_ops hts221_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = hts221_trig_set_state,
+};
+
+static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
+{
+ struct hts221_hw *hw = (struct hts221_hw *)private;
+ u8 status;
+ int err;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_STATUS_ADDR, sizeof(status),
+ &status);
+ if (err < 0)
+ return IRQ_HANDLED;
+
+ /*
+ * H_DA bit (humidity data available) is routed to DRDY line.
+ * Humidity sample is computed after temperature one.
+ * Here we can assume data channels are both available if H_DA bit
+ * is set in status register
+ */
+ if (!(status & HTS221_RH_DRDY_MASK))
+ return IRQ_NONE;
+
+ iio_trigger_poll_chained(hw->trig);
+
+ return IRQ_HANDLED;
+}
+
+int hts221_allocate_trigger(struct hts221_hw *hw)
+{
+ struct iio_dev *iio_dev = iio_priv_to_dev(hw);
+ unsigned long irq_type;
+ int err;
+
+ irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ break;
+ default:
+ dev_info(hw->dev,
+ "mode %lx unsupported, using IRQF_TRIGGER_RISING\n",
+ irq_type);
+ irq_type = IRQF_TRIGGER_RISING;
+ break;
+ }
+
+ err = devm_request_threaded_irq(hw->dev, hw->irq, NULL,
+ hts221_trigger_handler_thread,
+ irq_type | IRQF_ONESHOT,
+ hw->name, hw);
+ if (err) {
+ dev_err(hw->dev, "failed to request trigger irq %d\n",
+ hw->irq);
+ return err;
+ }
+
+ hw->trig = devm_iio_trigger_alloc(hw->dev, "%s-trigger",
+ iio_dev->name);
+ if (!hw->trig)
+ return -ENOMEM;
+
+ iio_trigger_set_drvdata(hw->trig, iio_dev);
+ hw->trig->ops = &hts221_trigger_ops;
+ hw->trig->dev.parent = hw->dev;
+ iio_dev->trig = iio_trigger_get(hw->trig);
+
+ return devm_iio_trigger_register(hw->dev, hw->trig);
+}
+
+static int hts221_buffer_preenable(struct iio_dev *iio_dev)
+{
+ return hts221_power_on(iio_priv(iio_dev));
+}
+
+static int hts221_buffer_postdisable(struct iio_dev *iio_dev)
+{
+ return hts221_power_off(iio_priv(iio_dev));
+}
+
+static const struct iio_buffer_setup_ops hts221_buffer_ops = {
+ .preenable = hts221_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = hts221_buffer_postdisable,
+};
+
+static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
+{
+ u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)];
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio_dev = pf->indio_dev;
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ struct iio_chan_spec const *ch;
+ int err;
+
+ /* humidity data */
+ ch = &iio_dev->channels[HTS221_SENSOR_H];
+ err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
+ buffer);
+ if (err < 0)
+ goto out;
+
+ /* temperature data */
+ ch = &iio_dev->channels[HTS221_SENSOR_T];
+ err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
+ buffer + HTS221_DATA_SIZE);
+ if (err < 0)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+ iio_get_time_ns(iio_dev));
+
+out:
+ iio_trigger_notify_done(hw->trig);
+
+ return IRQ_HANDLED;
+}
+
+int hts221_allocate_buffers(struct hts221_hw *hw)
+{
+ return devm_iio_triggered_buffer_setup(hw->dev, iio_priv_to_dev(hw),
+ NULL, hts221_buffer_handler_thread,
+ &hts221_buffer_ops);
+}
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 buffer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
new file mode 100644
index 000000000000..3f3ef4a1a474
--- /dev/null
+++ b/drivers/iio/humidity/hts221_core.c
@@ -0,0 +1,687 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+
+#include "hts221.h"
+
+#define HTS221_REG_WHOAMI_ADDR 0x0f
+#define HTS221_REG_WHOAMI_VAL 0xbc
+
+#define HTS221_REG_CNTRL1_ADDR 0x20
+#define HTS221_REG_CNTRL2_ADDR 0x21
+#define HTS221_REG_CNTRL3_ADDR 0x22
+
+#define HTS221_REG_AVG_ADDR 0x10
+#define HTS221_REG_H_OUT_L 0x28
+#define HTS221_REG_T_OUT_L 0x2a
+
+#define HTS221_HUMIDITY_AVG_MASK 0x07
+#define HTS221_TEMP_AVG_MASK 0x38
+
+#define HTS221_ODR_MASK 0x87
+#define HTS221_BDU_MASK BIT(2)
+
+#define HTS221_DRDY_MASK BIT(2)
+
+#define HTS221_ENABLE_SENSOR BIT(7)
+
+#define HTS221_HUMIDITY_AVG_4 0x00 /* 0.4 %RH */
+#define HTS221_HUMIDITY_AVG_8 0x01 /* 0.3 %RH */
+#define HTS221_HUMIDITY_AVG_16 0x02 /* 0.2 %RH */
+#define HTS221_HUMIDITY_AVG_32 0x03 /* 0.15 %RH */
+#define HTS221_HUMIDITY_AVG_64 0x04 /* 0.1 %RH */
+#define HTS221_HUMIDITY_AVG_128 0x05 /* 0.07 %RH */
+#define HTS221_HUMIDITY_AVG_256 0x06 /* 0.05 %RH */
+#define HTS221_HUMIDITY_AVG_512 0x07 /* 0.03 %RH */
+
+#define HTS221_TEMP_AVG_2 0x00 /* 0.08 degC */
+#define HTS221_TEMP_AVG_4 0x08 /* 0.05 degC */
+#define HTS221_TEMP_AVG_8 0x10 /* 0.04 degC */
+#define HTS221_TEMP_AVG_16 0x18 /* 0.03 degC */
+#define HTS221_TEMP_AVG_32 0x20 /* 0.02 degC */
+#define HTS221_TEMP_AVG_64 0x28 /* 0.015 degC */
+#define HTS221_TEMP_AVG_128 0x30 /* 0.01 degC */
+#define HTS221_TEMP_AVG_256 0x38 /* 0.007 degC */
+
+/* calibration registers */
+#define HTS221_REG_0RH_CAL_X_H 0x36
+#define HTS221_REG_1RH_CAL_X_H 0x3a
+#define HTS221_REG_0RH_CAL_Y_H 0x30
+#define HTS221_REG_1RH_CAL_Y_H 0x31
+#define HTS221_REG_0T_CAL_X_L 0x3c
+#define HTS221_REG_1T_CAL_X_L 0x3e
+#define HTS221_REG_0T_CAL_Y_H 0x32
+#define HTS221_REG_1T_CAL_Y_H 0x33
+#define HTS221_REG_T1_T0_CAL_Y_H 0x35
+
+struct hts221_odr {
+ u8 hz;
+ u8 val;
+};
+
+struct hts221_avg {
+ u8 addr;
+ u8 mask;
+ struct hts221_avg_avl avg_avl[HTS221_AVG_DEPTH];
+};
+
+static const struct hts221_odr hts221_odr_table[] = {
+ { 1, 0x01 }, /* 1Hz */
+ { 7, 0x02 }, /* 7Hz */
+ { 13, 0x03 }, /* 12.5Hz */
+};
+
+static const struct hts221_avg hts221_avg_list[] = {
+ {
+ .addr = HTS221_REG_AVG_ADDR,
+ .mask = HTS221_HUMIDITY_AVG_MASK,
+ .avg_avl = {
+ { 4, HTS221_HUMIDITY_AVG_4 },
+ { 8, HTS221_HUMIDITY_AVG_8 },
+ { 16, HTS221_HUMIDITY_AVG_16 },
+ { 32, HTS221_HUMIDITY_AVG_32 },
+ { 64, HTS221_HUMIDITY_AVG_64 },
+ { 128, HTS221_HUMIDITY_AVG_128 },
+ { 256, HTS221_HUMIDITY_AVG_256 },
+ { 512, HTS221_HUMIDITY_AVG_512 },
+ },
+ },
+ {
+ .addr = HTS221_REG_AVG_ADDR,
+ .mask = HTS221_TEMP_AVG_MASK,
+ .avg_avl = {
+ { 2, HTS221_TEMP_AVG_2 },
+ { 4, HTS221_TEMP_AVG_4 },
+ { 8, HTS221_TEMP_AVG_8 },
+ { 16, HTS221_TEMP_AVG_16 },
+ { 32, HTS221_TEMP_AVG_32 },
+ { 64, HTS221_TEMP_AVG_64 },
+ { 128, HTS221_TEMP_AVG_128 },
+ { 256, HTS221_TEMP_AVG_256 },
+ },
+ },
+};
+
+static const struct iio_chan_spec hts221_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .address = HTS221_REG_H_OUT_L,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ .address = HTS221_REG_T_OUT_L,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask,
+ u8 val)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&hw->lock);
+
+ err = hw->tf->read(hw->dev, addr, sizeof(data), &data);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to read %02x register\n", addr);
+ goto unlock;
+ }
+
+ data = (data & ~mask) | (val & mask);
+
+ err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
+ if (err < 0)
+ dev_err(hw->dev, "failed to write %02x register\n", addr);
+
+unlock:
+ mutex_unlock(&hw->lock);
+
+ return err;
+}
+
+static int hts221_check_whoami(struct hts221_hw *hw)
+{
+ u8 data;
+ int err;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_WHOAMI_ADDR, sizeof(data),
+ &data);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to read whoami register\n");
+ return err;
+ }
+
+ if (data != HTS221_REG_WHOAMI_VAL) {
+ dev_err(hw->dev, "wrong whoami {%02x vs %02x}\n",
+ data, HTS221_REG_WHOAMI_VAL);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int hts221_config_drdy(struct hts221_hw *hw, bool enable)
+{
+ u8 val = enable ? BIT(2) : 0;
+ int err;
+
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL3_ADDR,
+ HTS221_DRDY_MASK, val);
+
+ return err < 0 ? err : 0;
+}
+
+static int hts221_update_odr(struct hts221_hw *hw, u8 odr)
+{
+ int i, err;
+ u8 val;
+
+ for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
+ if (hts221_odr_table[i].hz == odr)
+ break;
+
+ if (i == ARRAY_SIZE(hts221_odr_table))
+ return -EINVAL;
+
+ val = HTS221_ENABLE_SENSOR | HTS221_BDU_MASK | hts221_odr_table[i].val;
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
+ HTS221_ODR_MASK, val);
+ if (err < 0)
+ return err;
+
+ hw->odr = odr;
+
+ return 0;
+}
+
+static int hts221_update_avg(struct hts221_hw *hw,
+ enum hts221_sensor_type type,
+ u16 val)
+{
+ int i, err;
+ const struct hts221_avg *avg = &hts221_avg_list[type];
+
+ for (i = 0; i < HTS221_AVG_DEPTH; i++)
+ if (avg->avg_avl[i].avg == val)
+ break;
+
+ if (i == HTS221_AVG_DEPTH)
+ return -EINVAL;
+
+ err = hts221_write_with_mask(hw, avg->addr, avg->mask,
+ avg->avg_avl[i].val);
+ if (err < 0)
+ return err;
+
+ hw->sensors[type].cur_avg_idx = i;
+
+ return 0;
+}
+
+static ssize_t hts221_sysfs_sampling_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+ ssize_t len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ hts221_odr_table[i].hz);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+hts221_sysfs_rh_oversampling_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_H];
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ avg->avg_avl[i].avg);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+hts221_sysfs_temp_oversampling_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_T];
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ avg->avg_avl[i].avg);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+int hts221_power_on(struct hts221_hw *hw)
+{
+ return hts221_update_odr(hw, hw->odr);
+}
+
+int hts221_power_off(struct hts221_hw *hw)
+{
+ u8 data[] = {0x00, 0x00};
+
+ return hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
+ data);
+}
+
+static int hts221_parse_temp_caldata(struct hts221_hw *hw)
+{
+ int err, *slope, *b_gen;
+ s16 cal_x0, cal_x1, cal_y0, cal_y1;
+ u8 cal0, cal1;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_Y_H,
+ sizeof(cal0), &cal0);
+ if (err < 0)
+ return err;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_T1_T0_CAL_Y_H,
+ sizeof(cal1), &cal1);
+ if (err < 0)
+ return err;
+ cal_y0 = (le16_to_cpu(cal1 & 0x3) << 8) | cal0;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_Y_H,
+ sizeof(cal0), &cal0);
+ if (err < 0)
+ return err;
+ cal_y1 = (((cal1 & 0xc) >> 2) << 8) | cal0;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_X_L, sizeof(cal_x0),
+ (u8 *)&cal_x0);
+ if (err < 0)
+ return err;
+ cal_x0 = le16_to_cpu(cal_x0);
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_X_L, sizeof(cal_x1),
+ (u8 *)&cal_x1);
+ if (err < 0)
+ return err;
+ cal_x1 = le16_to_cpu(cal_x1);
+
+ slope = &hw->sensors[HTS221_SENSOR_T].slope;
+ b_gen = &hw->sensors[HTS221_SENSOR_T].b_gen;
+
+ *slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
+ *b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
+ (cal_x1 - cal_x0);
+ *b_gen *= 8;
+
+ return 0;
+}
+
+static int hts221_parse_rh_caldata(struct hts221_hw *hw)
+{
+ int err, *slope, *b_gen;
+ s16 cal_x0, cal_x1, cal_y0, cal_y1;
+ u8 data;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_Y_H, sizeof(data),
+ &data);
+ if (err < 0)
+ return err;
+ cal_y0 = data;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_Y_H, sizeof(data),
+ &data);
+ if (err < 0)
+ return err;
+ cal_y1 = data;
+
+ err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_X_H, sizeof(cal_x0),
+ (u8 *)&cal_x0);
+ if (err < 0)
+ return err;
+ cal_x0 = le16_to_cpu(cal_x0);
+
+ err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_X_H, sizeof(cal_x1),
+ (u8 *)&cal_x1);
+ if (err < 0)
+ return err;
+ cal_x1 = le16_to_cpu(cal_x1);
+
+ slope = &hw->sensors[HTS221_SENSOR_H].slope;
+ b_gen = &hw->sensors[HTS221_SENSOR_H].b_gen;
+
+ *slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
+ *b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
+ (cal_x1 - cal_x0);
+ *b_gen *= 8;
+
+ return 0;
+}
+
+static int hts221_get_sensor_scale(struct hts221_hw *hw,
+ enum iio_chan_type ch_type,
+ int *val, int *val2)
+{
+ s64 tmp;
+ s32 rem, div, data;
+
+ switch (ch_type) {
+ case IIO_HUMIDITYRELATIVE:
+ data = hw->sensors[HTS221_SENSOR_H].slope;
+ div = (1 << 4) * 1000;
+ break;
+ case IIO_TEMP:
+ data = hw->sensors[HTS221_SENSOR_T].slope;
+ div = (1 << 6) * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tmp = div_s64(data * 1000000000LL, div);
+ tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+
+ *val = tmp;
+ *val2 = rem;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int hts221_get_sensor_offset(struct hts221_hw *hw,
+ enum iio_chan_type ch_type,
+ int *val, int *val2)
+{
+ s64 tmp;
+ s32 rem, div, data;
+
+ switch (ch_type) {
+ case IIO_HUMIDITYRELATIVE:
+ data = hw->sensors[HTS221_SENSOR_H].b_gen;
+ div = hw->sensors[HTS221_SENSOR_H].slope;
+ break;
+ case IIO_TEMP:
+ data = hw->sensors[HTS221_SENSOR_T].b_gen;
+ div = hw->sensors[HTS221_SENSOR_T].slope;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tmp = div_s64(data * 1000000000LL, div);
+ tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+
+ *val = tmp;
+ *val2 = rem;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
+{
+ u8 data[HTS221_DATA_SIZE];
+ int err;
+
+ err = hts221_power_on(hw);
+ if (err < 0)
+ return err;
+
+ msleep(50);
+
+ err = hw->tf->read(hw->dev, addr, sizeof(data), data);
+ if (err < 0)
+ return err;
+
+ hts221_power_off(hw);
+
+ *val = (s16)get_unaligned_le16(data);
+
+ return IIO_VAL_INT;
+}
+
+static int hts221_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *ch,
+ int *val, int *val2, long mask)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(iio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = hts221_read_oneshot(hw, ch->address, val);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ ret = hts221_get_sensor_scale(hw, ch->type, val, val2);
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ ret = hts221_get_sensor_offset(hw, ch->type, val, val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = hw->odr;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO: {
+ u8 idx;
+ const struct hts221_avg *avg;
+
+ switch (ch->type) {
+ case IIO_HUMIDITYRELATIVE:
+ avg = &hts221_avg_list[HTS221_SENSOR_H];
+ idx = hw->sensors[HTS221_SENSOR_H].cur_avg_idx;
+ *val = avg->avg_avl[idx].avg;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_TEMP:
+ avg = &hts221_avg_list[HTS221_SENSOR_T];
+ idx = hw->sensors[HTS221_SENSOR_T].cur_avg_idx;
+ *val = avg->avg_avl[idx].avg;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(iio_dev);
+
+ return ret;
+}
+
+static int hts221_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(iio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hts221_update_odr(hw, val);
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ ret = hts221_update_avg(hw, HTS221_SENSOR_H, val);
+ break;
+ case IIO_TEMP:
+ ret = hts221_update_avg(hw, HTS221_SENSOR_T, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(iio_dev);
+
+ return ret;
+}
+
+static int hts221_validate_trigger(struct iio_dev *iio_dev,
+ struct iio_trigger *trig)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ return hw->trig == trig ? 0 : -EINVAL;
+}
+
+static IIO_DEVICE_ATTR(in_humidity_oversampling_ratio_available, S_IRUGO,
+ hts221_sysfs_rh_oversampling_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available, S_IRUGO,
+ hts221_sysfs_temp_oversampling_avail, NULL, 0);
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(hts221_sysfs_sampling_freq);
+
+static struct attribute *hts221_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_humidity_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group hts221_attribute_group = {
+ .attrs = hts221_attributes,
+};
+
+static const struct iio_info hts221_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &hts221_attribute_group,
+ .read_raw = hts221_read_raw,
+ .write_raw = hts221_write_raw,
+ .validate_trigger = hts221_validate_trigger,
+};
+
+static const unsigned long hts221_scan_masks[] = {0x3, 0x0};
+
+int hts221_probe(struct iio_dev *iio_dev)
+{
+ struct hts221_hw *hw = iio_priv(iio_dev);
+ int err;
+ u8 data;
+
+ mutex_init(&hw->lock);
+
+ err = hts221_check_whoami(hw);
+ if (err < 0)
+ return err;
+
+ hw->odr = hts221_odr_table[0].hz;
+
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->dev.parent = hw->dev;
+ iio_dev->available_scan_masks = hts221_scan_masks;
+ iio_dev->channels = hts221_channels;
+ iio_dev->num_channels = ARRAY_SIZE(hts221_channels);
+ iio_dev->name = HTS221_DEV_NAME;
+ iio_dev->info = &hts221_info;
+
+ /* configure humidity sensor */
+ err = hts221_parse_rh_caldata(hw);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to get rh calibration data\n");
+ return err;
+ }
+
+ data = hts221_avg_list[HTS221_SENSOR_H].avg_avl[3].avg;
+ err = hts221_update_avg(hw, HTS221_SENSOR_H, data);
+ if (err < 0) {
+ dev_err(hw->dev, "failed to set rh oversampling ratio\n");
+ return err;
+ }
+
+ /* configure temperature sensor */
+ err = hts221_parse_temp_caldata(hw);
+ if (err < 0) {
+ dev_err(hw->dev,
+ "failed to get temperature calibration data\n");
+ return err;
+ }
+
+ data = hts221_avg_list[HTS221_SENSOR_T].avg_avl[3].avg;
+ err = hts221_update_avg(hw, HTS221_SENSOR_T, data);
+ if (err < 0) {
+ dev_err(hw->dev,
+ "failed to set temperature oversampling ratio\n");
+ return err;
+ }
+
+ if (hw->irq > 0) {
+ err = hts221_allocate_buffers(hw);
+ if (err < 0)
+ return err;
+
+ err = hts221_allocate_trigger(hw);
+ if (err)
+ return err;
+ }
+
+ return devm_iio_device_register(hw->dev, iio_dev);
+}
+EXPORT_SYMBOL(hts221_probe);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
new file mode 100644
index 000000000000..367ecd509f31
--- /dev/null
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -0,0 +1,110 @@
+/*
+ * STMicroelectronics hts221 i2c driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include "hts221.h"
+
+#define I2C_AUTO_INCREMENT 0x80
+
+static int hts221_i2c_read(struct device *dev, u8 addr, int len, u8 *data)
+{
+ struct i2c_msg msg[2];
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (len > 1)
+ addr |= I2C_AUTO_INCREMENT;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags;
+ msg[0].len = 1;
+ msg[0].buf = &addr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags | I2C_M_RD;
+ msg[1].len = len;
+ msg[1].buf = data;
+
+ return i2c_transfer(client->adapter, msg, 2);
+}
+
+static int hts221_i2c_write(struct device *dev, u8 addr, int len, u8 *data)
+{
+ u8 send[len + 1];
+ struct i2c_msg msg;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (len > 1)
+ addr |= I2C_AUTO_INCREMENT;
+
+ send[0] = addr;
+ memcpy(&send[1], data, len * sizeof(u8));
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.len = len + 1;
+ msg.buf = send;
+
+ return i2c_transfer(client->adapter, &msg, 1);
+}
+
+static const struct hts221_transfer_function hts221_transfer_fn = {
+ .read = hts221_i2c_read,
+ .write = hts221_i2c_write,
+};
+
+static int hts221_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hts221_hw *hw;
+ struct iio_dev *iio_dev;
+
+ iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*hw));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iio_dev);
+
+ hw = iio_priv(iio_dev);
+ hw->name = client->name;
+ hw->dev = &client->dev;
+ hw->irq = client->irq;
+ hw->tf = &hts221_transfer_fn;
+
+ return hts221_probe(iio_dev);
+}
+
+static const struct of_device_id hts221_i2c_of_match[] = {
+ { .compatible = "st,hts221", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hts221_i2c_of_match);
+
+static const struct i2c_device_id hts221_i2c_id_table[] = {
+ { HTS221_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, hts221_i2c_id_table);
+
+static struct i2c_driver hts221_driver = {
+ .driver = {
+ .name = "hts221_i2c",
+ .of_match_table = of_match_ptr(hts221_i2c_of_match),
+ },
+ .probe = hts221_i2c_probe,
+ .id_table = hts221_i2c_id_table,
+};
+module_i2c_driver(hts221_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
new file mode 100644
index 000000000000..70df5e7150c1
--- /dev/null
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -0,0 +1,125 @@
+/*
+ * STMicroelectronics hts221 spi driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include "hts221.h"
+
+#define SENSORS_SPI_READ 0x80
+#define SPI_AUTO_INCREMENT 0x40
+
+static int hts221_spi_read(struct device *dev, u8 addr, int len, u8 *data)
+{
+ int err;
+ struct spi_device *spi = to_spi_device(dev);
+ struct iio_dev *iio_dev = spi_get_drvdata(spi);
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = hw->tb.tx_buf,
+ .bits_per_word = 8,
+ .len = 1,
+ },
+ {
+ .rx_buf = hw->tb.rx_buf,
+ .bits_per_word = 8,
+ .len = len,
+ }
+ };
+
+ if (len > 1)
+ addr |= SPI_AUTO_INCREMENT;
+ hw->tb.tx_buf[0] = addr | SENSORS_SPI_READ;
+
+ err = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
+ if (err < 0)
+ return err;
+
+ memcpy(data, hw->tb.rx_buf, len * sizeof(u8));
+
+ return len;
+}
+
+static int hts221_spi_write(struct device *dev, u8 addr, int len, u8 *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct iio_dev *iio_dev = spi_get_drvdata(spi);
+ struct hts221_hw *hw = iio_priv(iio_dev);
+
+ struct spi_transfer xfers = {
+ .tx_buf = hw->tb.tx_buf,
+ .bits_per_word = 8,
+ .len = len + 1,
+ };
+
+ if (len >= HTS221_TX_MAX_LENGTH)
+ return -ENOMEM;
+
+ if (len > 1)
+ addr |= SPI_AUTO_INCREMENT;
+ hw->tb.tx_buf[0] = addr;
+ memcpy(&hw->tb.tx_buf[1], data, len);
+
+ return spi_sync_transfer(spi, &xfers, 1);
+}
+
+static const struct hts221_transfer_function hts221_transfer_fn = {
+ .read = hts221_spi_read,
+ .write = hts221_spi_write,
+};
+
+static int hts221_spi_probe(struct spi_device *spi)
+{
+ struct hts221_hw *hw;
+ struct iio_dev *iio_dev;
+
+ iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*hw));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, iio_dev);
+
+ hw = iio_priv(iio_dev);
+ hw->name = spi->modalias;
+ hw->dev = &spi->dev;
+ hw->irq = spi->irq;
+ hw->tf = &hts221_transfer_fn;
+
+ return hts221_probe(iio_dev);
+}
+
+static const struct of_device_id hts221_spi_of_match[] = {
+ { .compatible = "st,hts221", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hts221_spi_of_match);
+
+static const struct spi_device_id hts221_spi_id_table[] = {
+ { HTS221_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, hts221_spi_id_table);
+
+static struct spi_driver hts221_driver = {
+ .driver = {
+ .name = "hts221_spi",
+ .of_match_table = of_match_ptr(hts221_spi_of_match),
+ },
+ .probe = hts221_spi_probe,
+ .id_table = hts221_spi_id_table,
+};
+module_spi_driver(hts221_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index ffc2ccf6374e..345a7656c5ef 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -154,8 +154,17 @@ static const struct i2c_device_id si7020_id[] = {
};
MODULE_DEVICE_TABLE(i2c, si7020_id);
+static const struct of_device_id si7020_dt_ids[] = {
+ { .compatible = "silabs,si7020" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si7020_dt_ids);
+
static struct i2c_driver si7020_driver = {
- .driver.name = "si7020",
+ .driver = {
+ .name = "si7020",
+ .of_match_table = of_match_ptr(si7020_dt_ids),
+ },
.probe = si7020_probe,
.id_table = si7020_id,
};
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index e0251b8c1a52..5355507f8fa1 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -398,7 +398,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmi160_data *data = iio_priv(indio_dev);
- s16 buf[16]; /* 3 sens x 3 axis x s16 + 3 x s16 pad + 4 x s16 tstamp */
+ __le16 buf[16];
+ /* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */
int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
__le16 sample;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 19580d1db597..2c3f8964a3ea 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -126,7 +126,7 @@ static int inv_mpu_probe(struct i2c_client *client,
st = iio_priv(dev_get_drvdata(&client->dev));
st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
- 1, 0, I2C_MUX_LOCKED,
+ 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
inv_mpu6050_select_bypass,
inv_mpu6050_deselect_bypass);
if (!st->muxc) {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 158aaf44dd95..b12830b09c7d 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -307,10 +307,9 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
const unsigned long *mask;
unsigned long *trialmask;
- trialmask = kmalloc(sizeof(*trialmask)*
- BITS_TO_LONGS(indio_dev->masklength),
- GFP_KERNEL);
-
+ trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*trialmask),
+ GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index fc340ed3dca1..aaca42862389 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -81,6 +81,8 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_PH] = "ph",
[IIO_UVINDEX] = "uvindex",
[IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
+ [IIO_COUNT] = "count",
+ [IIO_INDEX] = "index",
};
static const char * const iio_modifier_names[] = {
@@ -575,66 +577,82 @@ int of_iio_read_mount_matrix(const struct device *dev,
#endif
EXPORT_SYMBOL(of_iio_read_mount_matrix);
-/**
- * iio_format_value() - Formats a IIO value into its string representation
- * @buf: The buffer to which the formatted value gets written
- * @type: One of the IIO_VAL_... constants. This decides how the val
- * and val2 parameters are formatted.
- * @size: Number of IIO value entries contained in vals
- * @vals: Pointer to the values, exact meaning depends on the
- * type parameter.
- *
- * Return: 0 by default, a negative number on failure or the
- * total number of characters written for a type that belongs
- * to the IIO_VAL_... constant.
- */
-ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
+static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
+ int size, const int *vals)
{
unsigned long long tmp;
+ int tmp0, tmp1;
bool scale_db = false;
switch (type) {
case IIO_VAL_INT:
- return sprintf(buf, "%d\n", vals[0]);
+ return snprintf(buf, len, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
- return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]),
- -vals[1], scale_db ? " dB" : "");
+ return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
+ -vals[1], scale_db ? " dB" : "");
else
- return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
- scale_db ? " dB" : "");
+ return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
+ scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (vals[1] < 0)
- return sprintf(buf, "-%d.%09u\n", abs(vals[0]),
- -vals[1]);
+ return snprintf(buf, len, "-%d.%09u", abs(vals[0]),
+ -vals[1]);
else
- return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ return snprintf(buf, len, "%d.%09u", vals[0], vals[1]);
case IIO_VAL_FRACTIONAL:
tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
- vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
- return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
+ tmp1 = vals[1];
+ tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
+ return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_FRACTIONAL_LOG2:
tmp = (s64)vals[0] * 1000000000LL >> vals[1];
- vals[1] = do_div(tmp, 1000000000LL);
- vals[0] = tmp;
- return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ tmp1 = do_div(tmp, 1000000000LL);
+ tmp0 = tmp;
+ return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
case IIO_VAL_INT_MULTIPLE:
{
int i;
- int len = 0;
+ int l = 0;
- for (i = 0; i < size; ++i)
- len += snprintf(&buf[len], PAGE_SIZE - len, "%d ",
- vals[i]);
- len += snprintf(&buf[len], PAGE_SIZE - len, "\n");
- return len;
+ for (i = 0; i < size; ++i) {
+ l += snprintf(&buf[l], len - l, "%d ", vals[i]);
+ if (l >= len)
+ break;
+ }
+ return l;
}
default:
return 0;
}
}
+
+/**
+ * iio_format_value() - Formats a IIO value into its string representation
+ * @buf: The buffer to which the formatted value gets written
+ * which is assumed to be big enough (i.e. PAGE_SIZE).
+ * @type: One of the IIO_VAL_... constants. This decides how the val
+ * and val2 parameters are formatted.
+ * @size: Number of IIO value entries contained in vals
+ * @vals: Pointer to the values, exact meaning depends on the
+ * type parameter.
+ *
+ * Return: 0 by default, a negative number on failure or the
+ * total number of characters written for a type that belongs
+ * to the IIO_VAL_... constant.
+ */
+ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
+{
+ ssize_t len;
+
+ len = __iio_format_value(buf, PAGE_SIZE, type, size, vals);
+ if (len >= PAGE_SIZE - 1)
+ return -EFBIG;
+
+ return len + sprintf(buf + len, "\n");
+}
EXPORT_SYMBOL_GPL(iio_format_value);
static ssize_t iio_read_channel_info(struct device *dev,
@@ -662,6 +680,119 @@ static ssize_t iio_read_channel_info(struct device *dev,
return iio_format_value(buf, ret, val_len, vals);
}
+static ssize_t iio_format_avail_list(char *buf, const int *vals,
+ int type, int length)
+{
+ int i;
+ ssize_t len = 0;
+
+ switch (type) {
+ case IIO_VAL_INT:
+ for (i = 0; i < length; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 1, &vals[i]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < length - 1)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ break;
+ default:
+ for (i = 0; i < length / 2; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 2, &vals[i * 2]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < length / 2 - 1)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
+{
+ int i;
+ ssize_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "[");
+ switch (type) {
+ case IIO_VAL_INT:
+ for (i = 0; i < 3; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 1, &vals[i]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < 2)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "]\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ break;
+ default:
+ for (i = 0; i < 3; i++) {
+ len += __iio_format_value(buf + len, PAGE_SIZE - len,
+ type, 2, &vals[i * 2]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ if (i < 2)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " ");
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "]\n");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t iio_read_channel_info_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ const int *vals;
+ int ret;
+ int length;
+ int type;
+
+ ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
+ &vals, &type, &length,
+ this_attr->address);
+
+ if (ret < 0)
+ return ret;
+ switch (ret) {
+ case IIO_AVAIL_LIST:
+ return iio_format_avail_list(buf, vals, type, length);
+ case IIO_AVAIL_RANGE:
+ return iio_format_avail_range(buf, vals, type);
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* iio_str_to_fixpoint() - Parse a fixed-point number from a string
* @str: The string to parse
@@ -978,6 +1109,40 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
return attrcount;
}
+static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_shared_by shared_by,
+ const long *infomask)
+{
+ int i, ret, attrcount = 0;
+ char *avail_postfix;
+
+ for_each_set_bit(i, infomask, sizeof(infomask) * 8) {
+ avail_postfix = kasprintf(GFP_KERNEL,
+ "%s_available",
+ iio_chan_info_postfix[i]);
+ if (!avail_postfix)
+ return -ENOMEM;
+
+ ret = __iio_add_chan_devattr(avail_postfix,
+ chan,
+ &iio_read_channel_info_avail,
+ NULL,
+ i,
+ shared_by,
+ &indio_dev->dev,
+ &indio_dev->channel_attr_list);
+ kfree(avail_postfix);
+ if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+ continue;
+ else if (ret < 0)
+ return ret;
+ attrcount++;
+ }
+
+ return attrcount;
+}
+
static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
@@ -993,6 +1158,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SEPARATE,
+ &chan->
+ info_mask_separate_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_TYPE,
&chan->info_mask_shared_by_type);
@@ -1000,6 +1173,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SHARED_BY_TYPE,
+ &chan->
+ info_mask_shared_by_type_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_DIR,
&chan->info_mask_shared_by_dir);
@@ -1007,6 +1188,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SHARED_BY_DIR,
+ &chan->info_mask_shared_by_dir_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_ALL,
&chan->info_mask_shared_by_all);
@@ -1014,6 +1202,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
attrcount += ret;
+ ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+ IIO_SHARED_BY_ALL,
+ &chan->info_mask_shared_by_all_available);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
if (chan->ext_info) {
unsigned int i = 0;
for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index e1e104845e38..978729f6d7c4 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -717,6 +717,27 @@ bool iio_trigger_using_own(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL(iio_trigger_using_own);
+/**
+ * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
+ * the same device
+ * @trig: The IIO trigger to check
+ * @indio_dev: the IIO device to check
+ *
+ * This function can be used as the validate_device callback for triggers that
+ * can only be attached to their own device.
+ *
+ * Return: 0 if both the trigger and the IIO device belong to the same
+ * device, -EINVAL otherwise.
+ */
+int iio_trigger_validate_own_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ if (indio_dev->dev.parent != trig->dev.parent)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL(iio_trigger_validate_own_device);
+
void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
indio_dev->groups[indio_dev->groupcounter++] =
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index c4757e6367e7..b0f4630a163f 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -658,6 +658,31 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
+static int iio_read_channel_attribute(struct iio_channel *chan,
+ int *val, int *val2,
+ enum iio_chan_info_enum attribute)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_read(chan, val, val2, attribute);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+
+int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
+{
+ return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_offset);
+
int iio_read_channel_processed(struct iio_channel *chan, int *val)
{
int ret;
@@ -687,21 +712,113 @@ EXPORT_SYMBOL_GPL(iio_read_channel_processed);
int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
{
+ return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_scale);
+
+static int iio_channel_read_avail(struct iio_channel *chan,
+ const int **vals, int *type, int *length,
+ enum iio_chan_info_enum info)
+{
+ if (!iio_channel_has_available(chan->channel, info))
+ return -EINVAL;
+
+ return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
+ vals, type, length, info);
+}
+
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+ const int **vals, int *length)
+{
int ret;
+ int type;
mutex_lock(&chan->indio_dev->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
- ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
+ ret = iio_channel_read_avail(chan,
+ vals, &type, length, IIO_CHAN_INFO_RAW);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
+ if (ret >= 0 && type != IIO_VAL_INT) {
+ /* raw values are assumed to be IIO_VAL_INT */
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
return ret;
}
-EXPORT_SYMBOL_GPL(iio_read_channel_scale);
+EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
+
+static int iio_channel_read_max(struct iio_channel *chan,
+ int *val, int *val2, int *type,
+ enum iio_chan_info_enum info)
+{
+ int unused;
+ const int *vals;
+ int length;
+ int ret;
+
+ if (!val2)
+ val2 = &unused;
+
+ ret = iio_channel_read_avail(chan, &vals, type, &length, info);
+ switch (ret) {
+ case IIO_AVAIL_RANGE:
+ switch (*type) {
+ case IIO_VAL_INT:
+ *val = vals[2];
+ break;
+ default:
+ *val = vals[4];
+ *val2 = vals[5];
+ }
+ return 0;
+
+ case IIO_AVAIL_LIST:
+ if (length <= 0)
+ return -EINVAL;
+ switch (*type) {
+ case IIO_VAL_INT:
+ *val = vals[--length];
+ while (length) {
+ if (vals[--length] > *val)
+ *val = vals[length];
+ }
+ break;
+ default:
+ /* FIXME: learn about max for other iio values */
+ return -EINVAL;
+ }
+ return 0;
+
+ default:
+ return ret;
+ }
+}
+
+int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
+{
+ int ret;
+ int type;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (!chan->indio_dev->info) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
{
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index ba2e64d7ee58..298ea5081a96 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -140,6 +140,18 @@ config GP2AP020A00F
To compile this driver as a module, choose M here: the
module will be called gp2ap020a00f.
+config SENSORS_ISL29018
+ tristate "Intersil 29018 light and proximity sensor"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for ambient light sensing and
+ proximity infrared sensing from Intersil ISL29018.
+ This driver will provide the measurements of ambient light intensity
+ in lux, proximity infrared sensing and normal infrared sensing.
+ Data from sensor is accessible via sysfs.
+
config ISL29125
tristate "Intersil ISL29125 digital color light sensor"
depends on I2C
@@ -326,6 +338,13 @@ config SENSORS_TSL2563
This driver can also be built as a module. If so, the module
will be called tsl2563.
+config TSL2583
+ tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
+ depends on I2C
+ help
+ Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
+ Access ALS data via iio, sysfs.
+
config TSL4531
tristate "TAOS TSL4531 ambient light sensors"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index c5768df87a17..4de520036e6e 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_CM36651) += cm36651.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
+obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_ISL29125) += isl29125.o
obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_SI1145) += si1145.o
obj-$(CONFIG_STK3310) += stk3310.o
obj-$(CONFIG_TCS3414) += tcs3414.o
obj-$(CONFIG_TCS3472) += tcs3472.o
+obj-$(CONFIG_TSL2583) += tsl2583.o
obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index a767a43c995c..917dd8b43e72 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -62,16 +62,6 @@
#define ISL29035_BOUT_SHIFT 0x07
#define ISL29035_BOUT_MASK (0x01 << ISL29035_BOUT_SHIFT)
-#define ISL29018_INT_TIME_AVAIL "0.090000 0.005630 0.000351 0.000021"
-#define ISL29023_INT_TIME_AVAIL "0.090000 0.005600 0.000352 0.000022"
-#define ISL29035_INT_TIME_AVAIL "0.105000 0.006500 0.000410 0.000025"
-
-static const char * const int_time_avail[] = {
- ISL29018_INT_TIME_AVAIL,
- ISL29023_INT_TIME_AVAIL,
- ISL29035_INT_TIME_AVAIL,
-};
-
enum isl29018_int_time {
ISL29018_INT_TIME_16,
ISL29018_INT_TIME_12,
@@ -110,7 +100,8 @@ struct isl29018_chip {
static int isl29018_set_integration_time(struct isl29018_chip *chip,
unsigned int utime)
{
- int i, ret;
+ unsigned int i;
+ int ret;
unsigned int int_time, new_int_time;
for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i) {
@@ -145,7 +136,8 @@ static int isl29018_set_integration_time(struct isl29018_chip *chip,
static int isl29018_set_scale(struct isl29018_chip *chip, int scale, int uscale)
{
- int i, ret;
+ unsigned int i;
+ int ret;
struct isl29018_scale new_scale;
for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i) {
@@ -276,29 +268,35 @@ static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
return 0;
}
-static ssize_t isl29018_show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_illuminance_scale_available_show
+ (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
- int i, len = 0;
+ unsigned int i;
+ int len = 0;
+ mutex_lock(&chip->lock);
for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i)
len += sprintf(buf + len, "%d.%06d ",
isl29018_scales[chip->int_time][i].scale,
isl29018_scales[chip->int_time][i].uscale);
+ mutex_unlock(&chip->lock);
buf[len - 1] = '\n';
return len;
}
-static ssize_t isl29018_show_int_time_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_illuminance_integration_time_available_show
+ (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
- int i, len = 0;
+ unsigned int i;
+ int len = 0;
for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i)
len += sprintf(buf + len, "0.%06d ",
@@ -309,9 +307,27 @@ static ssize_t isl29018_show_int_time_available(struct device *dev,
return len;
}
-static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/*
+ * From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the
+ * infrared suppression:
+ *
+ * Proximity Sensing Scheme: Bit 7. This bit programs the function
+ * of the proximity detection. Logic 0 of this bit, Scheme 0, makes
+ * full n (4, 8, 12, 16) bits (unsigned) proximity detection. The range
+ * of Scheme 0 proximity count is from 0 to 2^n. Logic 1 of this bit,
+ * Scheme 1, makes n-1 (3, 7, 11, 15) bits (2's complementary)
+ * proximity_less_ambient detection. The range of Scheme 1
+ * proximity count is from -2^(n-1) to 2^(n-1) . The sign bit is extended
+ * for resolutions less than 16. While Scheme 0 has wider dynamic
+ * range, Scheme 1 proximity detection is less affected by the
+ * ambient IR noise variation.
+ *
+ * 0 Sensing IR from LED and ambient
+ * 1 Sensing IR from LED with ambient IR rejection
+ */
+static ssize_t proximity_on_chip_ambient_infrared_suppression_show
+ (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -323,9 +339,9 @@ static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev,
return sprintf(buf, "%d\n", chip->prox_scheme);
}
-static ssize_t isl29018_store_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t proximity_on_chip_ambient_infrared_suppression_store
+ (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -357,6 +373,10 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
int ret = -EINVAL;
mutex_lock(&chip->lock);
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto write_done;
+ }
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_LIGHT) {
@@ -366,13 +386,8 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
}
break;
case IIO_CHAN_INFO_INT_TIME:
- if (chan->type == IIO_LIGHT) {
- if (val) {
- mutex_unlock(&chip->lock);
- return -EINVAL;
- }
+ if (chan->type == IIO_LIGHT && !val)
ret = isl29018_set_integration_time(chip, val2);
- }
break;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_LIGHT)
@@ -381,6 +396,8 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
default:
break;
}
+
+write_done:
mutex_unlock(&chip->lock);
return ret;
@@ -397,8 +414,8 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
mutex_lock(&chip->lock);
if (chip->suspended) {
- mutex_unlock(&chip->lock);
- return -EBUSY;
+ ret = -EBUSY;
+ goto read_done;
}
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -445,7 +462,10 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
default:
break;
}
+
+read_done:
mutex_unlock(&chip->lock);
+
return ret;
}
@@ -482,14 +502,9 @@ static const struct iio_chan_spec isl29023_channels[] = {
ISL29018_IR_CHANNEL,
};
-static IIO_DEVICE_ATTR(in_illuminance_integration_time_available, S_IRUGO,
- isl29018_show_int_time_available, NULL, 0);
-static IIO_DEVICE_ATTR(in_illuminance_scale_available, S_IRUGO,
- isl29018_show_scale_available, NULL, 0);
-static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_suppression,
- S_IRUGO | S_IWUSR,
- isl29018_show_prox_infrared_suppression,
- isl29018_store_prox_infrared_suppression, 0);
+static IIO_DEVICE_ATTR_RO(in_illuminance_integration_time_available, 0);
+static IIO_DEVICE_ATTR_RO(in_illuminance_scale_available, 0);
+static IIO_DEVICE_ATTR_RW(proximity_on_chip_ambient_infrared_suppression, 0);
#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
@@ -514,30 +529,6 @@ static const struct attribute_group isl29023_group = {
.attrs = isl29023_attributes,
};
-static int isl29035_detect(struct isl29018_chip *chip)
-{
- int status;
- unsigned int id;
- struct device *dev = regmap_get_device(chip->regmap);
-
- status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
- if (status < 0) {
- dev_err(dev,
- "Error reading ID register with error %d\n",
- status);
- return status;
- }
-
- id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
-
- if (id != ISL29035_DEVICE_ID)
- return -ENODEV;
-
- /* Clear brownout bit */
- return regmap_update_bits(chip->regmap, ISL29035_REG_DEVICE_ID,
- ISL29035_BOUT_MASK, 0);
-}
-
enum {
isl29018,
isl29023,
@@ -550,12 +541,31 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
struct device *dev = regmap_get_device(chip->regmap);
if (chip->type == isl29035) {
- status = isl29035_detect(chip);
+ unsigned int id;
+
+ status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
+ if (status < 0) {
+ dev_err(dev,
+ "Error reading ID register with error %d\n",
+ status);
+ return status;
+ }
+
+ id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
+
+ if (id != ISL29035_DEVICE_ID)
+ return -ENODEV;
+
+ /* Clear brownout bit */
+ status = regmap_update_bits(chip->regmap,
+ ISL29035_REG_DEVICE_ID,
+ ISL29035_BOUT_MASK, 0);
if (status < 0)
return status;
}
- /* Code added per Intersil Application Note 1534:
+ /*
+ * Code added per Intersil Application Note 1534:
* When VDD sinks to approximately 1.8V or below, some of
* the part's registers may change their state. When VDD
* recovers to 2.25V (or greater), the part may thus be in an
@@ -582,7 +592,8 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
return status;
}
- /* See Intersil AN1534 comments above.
+ /*
+ * See Intersil AN1534 comments above.
* "Operating Mode" (COMMAND1) register is reprogrammed when
* data is read from the device.
*/
@@ -605,12 +616,10 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
status = isl29018_set_integration_time(chip,
isl29018_int_utimes[chip->type][chip->int_time]);
- if (status < 0) {
+ if (status < 0)
dev_err(dev, "Init of isl29018 fails\n");
- return status;
- }
- return 0;
+ return status;
}
static const struct iio_info isl29018_info = {
@@ -713,6 +722,7 @@ static int isl29018_probe(struct i2c_client *client,
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
+
chip = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
@@ -752,6 +762,7 @@ static int isl29018_probe(struct i2c_client *client,
indio_dev->name = name;
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
+
return devm_iio_device_register(&client->dev, indio_dev);
}
@@ -762,13 +773,15 @@ static int isl29018_suspend(struct device *dev)
mutex_lock(&chip->lock);
- /* Since this driver uses only polling commands, we are by default in
+ /*
+ * Since this driver uses only polling commands, we are by default in
* auto shutdown (ie, power-down) mode.
* So we do not have much to do here.
*/
chip->suspended = true;
mutex_unlock(&chip->lock);
+
return 0;
}
@@ -784,6 +797,7 @@ static int isl29018_resume(struct device *dev)
chip->suspended = false;
mutex_unlock(&chip->lock);
+
return err;
}
@@ -807,7 +821,6 @@ static const struct i2c_device_id isl29018_id[] = {
{"isl29035", isl29035},
{}
};
-
MODULE_DEVICE_TABLE(i2c, isl29018_id);
static const struct of_device_id isl29018_of_match[] = {
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 3afc53a3d0b6..b30e0c1c6cc4 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -631,14 +631,16 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
-
switch (chan->type) {
case IIO_LIGHT:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&data->lock_als);
ret = ltr501_read_als(data, buf);
mutex_unlock(&data->lock_als);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ltr501_calculate_lux(le16_to_cpu(buf[1]),
@@ -648,8 +650,9 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (chan->type) {
case IIO_INTENSITY:
@@ -657,21 +660,28 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
ret = ltr501_read_als(data, buf);
mutex_unlock(&data->lock_als);
if (ret < 0)
- return ret;
+ break;
*val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
buf[0] : buf[1]);
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
case IIO_PROXIMITY:
mutex_lock(&data->lock_ps);
ret = ltr501_read_ps(data);
mutex_unlock(&data->lock_ps);
if (ret < 0)
- return ret;
+ break;
*val = ret & LTR501_PS_DATA_MASK;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_INTENSITY:
@@ -729,8 +739,9 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
int i, ret, freq_val, freq_val2;
struct ltr501_chip_info *info = data->chip_info;
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -739,85 +750,105 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
i = ltr501_get_gain_index(info->als_gain,
info->als_gain_tbl_size,
val, val2);
- if (i < 0)
- return -EINVAL;
+ if (i < 0) {
+ ret = -EINVAL;
+ break;
+ }
data->als_contr &= ~info->als_gain_mask;
data->als_contr |= i << info->als_gain_shift;
- return regmap_write(data->regmap, LTR501_ALS_CONTR,
- data->als_contr);
+ ret = regmap_write(data->regmap, LTR501_ALS_CONTR,
+ data->als_contr);
+ break;
case IIO_PROXIMITY:
i = ltr501_get_gain_index(info->ps_gain,
info->ps_gain_tbl_size,
val, val2);
- if (i < 0)
- return -EINVAL;
+ if (i < 0) {
+ ret = -EINVAL;
+ break;
+ }
data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK;
data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT;
- return regmap_write(data->regmap, LTR501_PS_CONTR,
- data->ps_contr);
+ ret = regmap_write(data->regmap, LTR501_PS_CONTR,
+ data->ps_contr);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ break;
+
case IIO_CHAN_INFO_INT_TIME:
switch (chan->type) {
case IIO_INTENSITY:
- if (val != 0)
- return -EINVAL;
+ if (val != 0) {
+ ret = -EINVAL;
+ break;
+ }
mutex_lock(&data->lock_als);
- i = ltr501_set_it_time(data, val2);
+ ret = ltr501_set_it_time(data, val2);
mutex_unlock(&data->lock_als);
- return i;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ break;
+
case IIO_CHAN_INFO_SAMP_FREQ:
switch (chan->type) {
case IIO_INTENSITY:
ret = ltr501_als_read_samp_freq(data, &freq_val,
&freq_val2);
if (ret < 0)
- return ret;
+ break;
ret = ltr501_als_write_samp_freq(data, val, val2);
if (ret < 0)
- return ret;
+ break;
/* update persistence count when changing frequency */
ret = ltr501_write_intr_prst(data, chan->type,
0, data->als_period);
if (ret < 0)
- return ltr501_als_write_samp_freq(data,
- freq_val,
- freq_val2);
- return ret;
+ ret = ltr501_als_write_samp_freq(data, freq_val,
+ freq_val2);
+ break;
case IIO_PROXIMITY:
ret = ltr501_ps_read_samp_freq(data, &freq_val,
&freq_val2);
if (ret < 0)
- return ret;
+ break;
ret = ltr501_ps_write_samp_freq(data, val, val2);
if (ret < 0)
- return ret;
+ break;
/* update persistence count when changing frequency */
ret = ltr501_write_intr_prst(data, chan->type,
0, data->ps_period);
if (ret < 0)
- return ltr501_ps_write_samp_freq(data,
- freq_val,
- freq_val2);
- return ret;
+ ret = ltr501_ps_write_samp_freq(data, freq_val,
+ freq_val2);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
}
- return -EINVAL;
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
}
static int ltr501_read_thresh(struct iio_dev *indio_dev,
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index 6511b20a2a29..a144ca3461fc 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -204,17 +204,18 @@ static int max44000_write_alspga(struct max44000_data *data, int val)
static int max44000_read_alsval(struct max44000_data *data)
{
u16 regval;
+ __be16 val;
int alstim, ret;
ret = regmap_bulk_read(data->regmap, MAX44000_REG_ALS_DATA_HI,
- &regval, sizeof(regval));
+ &val, sizeof(val));
if (ret < 0)
return ret;
alstim = ret = max44000_read_alstim(data);
if (ret < 0)
return ret;
- regval = be16_to_cpu(regval);
+ regval = be16_to_cpu(val);
/*
* Overflow is explained on datasheet page 17.
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
new file mode 100644
index 000000000000..a78b6025c465
--- /dev/null
+++ b/drivers/iio/light/tsl2583.c
@@ -0,0 +1,913 @@
+/*
+ * Device driver for monitoring ambient light intensity (lux)
+ * within the TAOS tsl258x family of devices (tsl2580, tsl2581, tsl2583).
+ *
+ * Copyright (c) 2011, TAOS Corporation.
+ * Copyright (c) 2016 Brian Masney <masneyb@onstation.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Device Registers and Masks */
+#define TSL2583_CNTRL 0x00
+#define TSL2583_ALS_TIME 0X01
+#define TSL2583_INTERRUPT 0x02
+#define TSL2583_GAIN 0x07
+#define TSL2583_REVID 0x11
+#define TSL2583_CHIPID 0x12
+#define TSL2583_ALS_CHAN0LO 0x14
+#define TSL2583_ALS_CHAN0HI 0x15
+#define TSL2583_ALS_CHAN1LO 0x16
+#define TSL2583_ALS_CHAN1HI 0x17
+#define TSL2583_TMR_LO 0x18
+#define TSL2583_TMR_HI 0x19
+
+/* tsl2583 cmd reg masks */
+#define TSL2583_CMD_REG 0x80
+#define TSL2583_CMD_SPL_FN 0x60
+#define TSL2583_CMD_ALS_INT_CLR 0x01
+
+/* tsl2583 cntrl reg masks */
+#define TSL2583_CNTL_ADC_ENBL 0x02
+#define TSL2583_CNTL_PWR_OFF 0x00
+#define TSL2583_CNTL_PWR_ON 0x01
+
+/* tsl2583 status reg masks */
+#define TSL2583_STA_ADC_VALID 0x01
+#define TSL2583_STA_ADC_INTR 0x10
+
+/* Lux calculation constants */
+#define TSL2583_LUX_CALC_OVER_FLOW 65535
+
+#define TSL2583_INTERRUPT_DISABLED 0x00
+
+#define TSL2583_CHIP_ID 0x90
+#define TSL2583_CHIP_ID_MASK 0xf0
+
+/* Per-device data */
+struct tsl2583_als_info {
+ u16 als_ch0;
+ u16 als_ch1;
+ u16 lux;
+};
+
+struct tsl2583_lux {
+ unsigned int ratio;
+ unsigned int ch0;
+ unsigned int ch1;
+};
+
+static const struct tsl2583_lux tsl2583_default_lux[] = {
+ { 9830, 8520, 15729 },
+ { 12452, 10807, 23344 },
+ { 14746, 6383, 11705 },
+ { 17695, 4063, 6554 },
+ { 0, 0, 0 } /* Termination segment */
+};
+
+#define TSL2583_MAX_LUX_TABLE_ENTRIES 11
+
+struct tsl2583_settings {
+ int als_time;
+ int als_gain;
+ int als_gain_trim;
+ int als_cal_target;
+
+ /*
+ * This structure is intentionally large to accommodate updates via
+ * sysfs. Sized to 11 = max 10 segments + 1 termination segment.
+ * Assumption is that one and only one type of glass used.
+ */
+ struct tsl2583_lux als_device_lux[TSL2583_MAX_LUX_TABLE_ENTRIES];
+};
+
+struct tsl2583_chip {
+ struct mutex als_mutex;
+ struct i2c_client *client;
+ struct tsl2583_als_info als_cur_info;
+ struct tsl2583_settings als_settings;
+ int als_time_scale;
+ int als_saturation;
+ bool suspended;
+};
+
+struct gainadj {
+ s16 ch0;
+ s16 ch1;
+ s16 mean;
+};
+
+/* Index = (0 - 3) Used to validate the gain selection index */
+static const struct gainadj gainadj[] = {
+ { 1, 1, 1 },
+ { 8, 8, 8 },
+ { 16, 16, 16 },
+ { 107, 115, 111 }
+};
+
+/*
+ * Provides initial operational parameter defaults.
+ * These defaults may be changed through the device's sysfs files.
+ */
+static void tsl2583_defaults(struct tsl2583_chip *chip)
+{
+ /*
+ * The integration time must be a multiple of 50ms and within the
+ * range [50, 600] ms.
+ */
+ chip->als_settings.als_time = 100;
+
+ /*
+ * This is an index into the gainadj table. Assume clear glass as the
+ * default.
+ */
+ chip->als_settings.als_gain = 0;
+
+ /* Default gain trim to account for aperture effects */
+ chip->als_settings.als_gain_trim = 1000;
+
+ /* Known external ALS reading used for calibration */
+ chip->als_settings.als_cal_target = 130;
+
+ /* Default lux table. */
+ memcpy(chip->als_settings.als_device_lux, tsl2583_default_lux,
+ sizeof(tsl2583_default_lux));
+}
+
+/*
+ * Reads and calculates current lux value.
+ * The raw ch0 and ch1 values of the ambient light sensed in the last
+ * integration cycle are read from the device.
+ * Time scale factor array values are adjusted based on the integration time.
+ * The raw values are multiplied by a scale factor, and device gain is obtained
+ * using gain index. Limit checks are done next, then the ratio of a multiple
+ * of ch1 value, to the ch0 value, is calculated. The array als_device_lux[]
+ * declared above is then scanned to find the first ratio value that is just
+ * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
+ * the array are then used along with the time scale factor array values, to
+ * calculate the lux.
+ */
+static int tsl2583_get_lux(struct iio_dev *indio_dev)
+{
+ u16 ch0, ch1; /* separated ch0/ch1 data from device */
+ u32 lux; /* raw lux calculated from device data */
+ u64 lux64;
+ u32 ratio;
+ u8 buf[5];
+ struct tsl2583_lux *p;
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int i, ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client, TSL2583_CMD_REG);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to read CMD_REG register\n",
+ __func__);
+ goto done;
+ }
+
+ /* is data new & valid */
+ if (!(ret & TSL2583_STA_ADC_INTR)) {
+ dev_err(&chip->client->dev, "%s: data not valid; returning last value\n",
+ __func__);
+ ret = chip->als_cur_info.lux; /* return LAST VALUE */
+ goto done;
+ }
+
+ for (i = 0; i < 4; i++) {
+ int reg = TSL2583_CMD_REG | (TSL2583_ALS_CHAN0LO + i);
+
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to read register %x\n",
+ __func__, reg);
+ goto done;
+ }
+ buf[i] = ret;
+ }
+
+ /*
+ * Clear the pending interrupt status bit on the chip to allow the next
+ * integration cycle to start. This has to be done even though this
+ * driver currently does not support interrupts.
+ */
+ ret = i2c_smbus_write_byte(chip->client,
+ (TSL2583_CMD_REG | TSL2583_CMD_SPL_FN |
+ TSL2583_CMD_ALS_INT_CLR));
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to clear the interrupt bit\n",
+ __func__);
+ goto done; /* have no data, so return failure */
+ }
+
+ /* extract ALS/lux data */
+ ch0 = le16_to_cpup((const __le16 *)&buf[0]);
+ ch1 = le16_to_cpup((const __le16 *)&buf[2]);
+
+ chip->als_cur_info.als_ch0 = ch0;
+ chip->als_cur_info.als_ch1 = ch1;
+
+ if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
+ goto return_max;
+
+ if (!ch0) {
+ /*
+ * The sensor appears to be in total darkness so set the
+ * calculated lux to 0 and return early to avoid a division by
+ * zero below when calculating the ratio.
+ */
+ ret = 0;
+ chip->als_cur_info.lux = 0;
+ goto done;
+ }
+
+ /* calculate ratio */
+ ratio = (ch1 << 15) / ch0;
+
+ /* convert to unscaled lux using the pointer to the table */
+ for (p = (struct tsl2583_lux *)chip->als_settings.als_device_lux;
+ p->ratio != 0 && p->ratio < ratio; p++)
+ ;
+
+ if (p->ratio == 0) {
+ lux = 0;
+ } else {
+ u32 ch0lux, ch1lux;
+
+ ch0lux = ((ch0 * p->ch0) +
+ (gainadj[chip->als_settings.als_gain].ch0 >> 1))
+ / gainadj[chip->als_settings.als_gain].ch0;
+ ch1lux = ((ch1 * p->ch1) +
+ (gainadj[chip->als_settings.als_gain].ch1 >> 1))
+ / gainadj[chip->als_settings.als_gain].ch1;
+
+ /* note: lux is 31 bit max at this point */
+ if (ch1lux > ch0lux) {
+ dev_dbg(&chip->client->dev, "%s: No Data - Returning 0\n",
+ __func__);
+ ret = 0;
+ chip->als_cur_info.lux = 0;
+ goto done;
+ }
+
+ lux = ch0lux - ch1lux;
+ }
+
+ /* adjust for active time scale */
+ if (chip->als_time_scale == 0)
+ lux = 0;
+ else
+ lux = (lux + (chip->als_time_scale >> 1)) /
+ chip->als_time_scale;
+
+ /*
+ * Adjust for active gain scale.
+ * The tsl2583_default_lux tables above have a factor of 8192 built in,
+ * so we need to shift right.
+ * User-specified gain provides a multiplier.
+ * Apply user-specified gain before shifting right to retain precision.
+ * Use 64 bits to avoid overflow on multiplication.
+ * Then go back to 32 bits before division to avoid using div_u64().
+ */
+ lux64 = lux;
+ lux64 = lux64 * chip->als_settings.als_gain_trim;
+ lux64 >>= 13;
+ lux = lux64;
+ lux = (lux + 500) / 1000;
+
+ if (lux > TSL2583_LUX_CALC_OVER_FLOW) { /* check for overflow */
+return_max:
+ lux = TSL2583_LUX_CALC_OVER_FLOW;
+ }
+
+ /* Update the structure with the latest VALID lux. */
+ chip->als_cur_info.lux = lux;
+ ret = lux;
+
+done:
+ return ret;
+}
+
+/*
+ * Obtain single reading and calculate the als_gain_trim (later used
+ * to derive actual lux).
+ * Return updated gain_trim value.
+ */
+static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ unsigned int gain_trim_val;
+ int ret;
+ int lux_val;
+
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_CNTRL);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to read from the CNTRL register\n",
+ __func__);
+ return ret;
+ }
+
+ if ((ret & (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON))
+ != (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON)) {
+ dev_err(&chip->client->dev,
+ "%s: Device is not powered on and/or ADC is not enabled\n",
+ __func__);
+ return -EINVAL;
+ } else if ((ret & TSL2583_STA_ADC_VALID) != TSL2583_STA_ADC_VALID) {
+ dev_err(&chip->client->dev,
+ "%s: The two ADC channels have not completed an integration cycle\n",
+ __func__);
+ return -ENODATA;
+ }
+
+ lux_val = tsl2583_get_lux(indio_dev);
+ if (lux_val < 0) {
+ dev_err(&chip->client->dev, "%s: failed to get lux\n",
+ __func__);
+ return lux_val;
+ }
+
+ gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
+ * chip->als_settings.als_gain_trim) / lux_val);
+ if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
+ dev_err(&chip->client->dev,
+ "%s: trim_val of %d is not within the range [250, 4000]\n",
+ __func__, gain_trim_val);
+ return -ENODATA;
+ }
+
+ chip->als_settings.als_gain_trim = (int)gain_trim_val;
+
+ return 0;
+}
+
+static int tsl2583_set_als_time(struct tsl2583_chip *chip)
+{
+ int als_count, als_time, ret;
+ u8 val;
+
+ /* determine als integration register */
+ als_count = (chip->als_settings.als_time * 100 + 135) / 270;
+ if (!als_count)
+ als_count = 1; /* ensure at least one cycle */
+
+ /* convert back to time (encompasses overrides) */
+ als_time = (als_count * 27 + 5) / 10;
+
+ val = 256 - als_count;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_ALS_TIME,
+ val);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "%s: failed to set the als time to %d\n",
+ __func__, val);
+ return ret;
+ }
+
+ /* set chip struct re scaling and saturation */
+ chip->als_saturation = als_count * 922; /* 90% of full scale */
+ chip->als_time_scale = (als_time + 25) / 50;
+
+ return ret;
+}
+
+static int tsl2583_set_als_gain(struct tsl2583_chip *chip)
+{
+ int ret;
+
+ /* Set the gain based on als_settings struct */
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_GAIN,
+ chip->als_settings.als_gain);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: failed to set the gain to %d\n", __func__,
+ chip->als_settings.als_gain);
+
+ return ret;
+}
+
+static int tsl2583_set_power_state(struct tsl2583_chip *chip, u8 state)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_CNTRL, state);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: failed to set the power state to %d\n", __func__,
+ state);
+
+ return ret;
+}
+
+/*
+ * Turn the device on.
+ * Configuration must be set before calling this function.
+ */
+static int tsl2583_chip_init_and_power_on(struct iio_dev *indio_dev)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ /* Power on the device; ADC off. */
+ ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2583_CMD_REG | TSL2583_INTERRUPT,
+ TSL2583_INTERRUPT_DISABLED);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to disable interrupts\n", __func__);
+ return ret;
+ }
+
+ ret = tsl2583_set_als_time(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = tsl2583_set_als_gain(chip);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 3500);
+
+ ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON |
+ TSL2583_CNTL_ADC_ENBL);
+ if (ret < 0)
+ return ret;
+
+ chip->suspended = false;
+
+ return ret;
+}
+
+/* Sysfs Interface Functions */
+
+static ssize_t in_illuminance_input_target_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+ ret = sprintf(buf, "%d\n", chip->als_settings.als_cal_target);
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static ssize_t in_illuminance_input_target_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int value;
+
+ if (kstrtoint(buf, 0, &value) || !value)
+ return -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+ chip->als_settings.als_cal_target = value;
+ mutex_unlock(&chip->als_mutex);
+
+ return len;
+}
+
+static ssize_t in_illuminance_calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int value, ret;
+
+ if (kstrtoint(buf, 0, &value) || value != 1)
+ return -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ ret = tsl2583_als_calibrate(indio_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = len;
+done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static ssize_t in_illuminance_lux_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ unsigned int i;
+ int offset = 0;
+
+ for (i = 0; i < ARRAY_SIZE(chip->als_settings.als_device_lux); i++) {
+ offset += sprintf(buf + offset, "%u,%u,%u,",
+ chip->als_settings.als_device_lux[i].ratio,
+ chip->als_settings.als_device_lux[i].ch0,
+ chip->als_settings.als_device_lux[i].ch1);
+ if (chip->als_settings.als_device_lux[i].ratio == 0) {
+ /*
+ * We just printed the first "0" entry.
+ * Now get rid of the extra "," and break.
+ */
+ offset--;
+ break;
+ }
+ }
+
+ offset += sprintf(buf + offset, "\n");
+
+ return offset;
+}
+
+static ssize_t in_illuminance_lux_table_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ const unsigned int max_ints = TSL2583_MAX_LUX_TABLE_ENTRIES * 3;
+ int value[TSL2583_MAX_LUX_TABLE_ENTRIES * 3 + 1];
+ int ret = -EINVAL;
+ unsigned int n;
+
+ mutex_lock(&chip->als_mutex);
+
+ get_options(buf, ARRAY_SIZE(value), value);
+
+ /*
+ * We now have an array of ints starting at value[1], and
+ * enumerated by value[0].
+ * We expect each group of three ints is one table entry,
+ * and the last table entry is all 0.
+ */
+ n = value[0];
+ if ((n % 3) || n < 6 || n > max_ints) {
+ dev_err(dev,
+ "%s: The number of entries in the lux table must be a multiple of 3 and within the range [6, %d]\n",
+ __func__, max_ints);
+ goto done;
+ }
+ if ((value[n - 2] | value[n - 1] | value[n]) != 0) {
+ dev_err(dev, "%s: The last 3 entries in the lux table must be zeros.\n",
+ __func__);
+ goto done;
+ }
+
+ memcpy(chip->als_settings.als_device_lux, &value[1],
+ value[0] * sizeof(value[1]));
+
+ ret = len;
+
+done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static IIO_CONST_ATTR(in_illuminance_calibscale_available, "1 8 16 111");
+static IIO_CONST_ATTR(in_illuminance_integration_time_available,
+ "0.000050 0.000100 0.000150 0.000200 0.000250 0.000300 0.000350 0.000400 0.000450 0.000500 0.000550 0.000600 0.000650");
+static IIO_DEVICE_ATTR_RW(in_illuminance_input_target, 0);
+static IIO_DEVICE_ATTR_WO(in_illuminance_calibrate, 0);
+static IIO_DEVICE_ATTR_RW(in_illuminance_lux_table, 0);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &iio_const_attr_in_illuminance_calibscale_available.dev_attr.attr,
+ &iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_input_target.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_calibrate.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_lux_table.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tsl2583_attribute_group = {
+ .attrs = sysfs_attrs_ctrl,
+};
+
+static const struct iio_chan_spec tsl2583_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ .type = IIO_LIGHT,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+};
+
+static int tsl2583_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto read_done;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_LIGHT) {
+ ret = tsl2583_get_lux(indio_dev);
+ if (ret < 0)
+ goto read_done;
+
+ /*
+ * From page 20 of the TSL2581, TSL2583 data
+ * sheet (TAOS134 − MARCH 2011):
+ *
+ * One of the photodiodes (channel 0) is
+ * sensitive to both visible and infrared light,
+ * while the second photodiode (channel 1) is
+ * sensitive primarily to infrared light.
+ */
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
+ *val = chip->als_cur_info.als_ch0;
+ else
+ *val = chip->als_cur_info.als_ch1;
+
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type == IIO_LIGHT) {
+ ret = tsl2583_get_lux(indio_dev);
+ if (ret < 0)
+ goto read_done;
+
+ *val = ret;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (chan->type == IIO_LIGHT) {
+ *val = chip->als_settings.als_gain_trim;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_LIGHT) {
+ *val = gainadj[chip->als_settings.als_gain].mean;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type == IIO_LIGHT) {
+ *val = 0;
+ *val2 = chip->als_settings.als_time;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ }
+ break;
+ default:
+ break;
+ }
+
+read_done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static int tsl2583_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ if (chip->suspended) {
+ ret = -EBUSY;
+ goto write_done;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (chan->type == IIO_LIGHT) {
+ chip->als_settings.als_gain_trim = val;
+ ret = 0;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_LIGHT) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gainadj); i++) {
+ if (gainadj[i].mean == val) {
+ chip->als_settings.als_gain = i;
+ ret = tsl2583_set_als_gain(chip);
+ break;
+ }
+ }
+ }
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type == IIO_LIGHT && !val && val2 >= 50 &&
+ val2 <= 650 && !(val2 % 50)) {
+ chip->als_settings.als_time = val2;
+ ret = tsl2583_set_als_time(chip);
+ }
+ break;
+ default:
+ break;
+ }
+
+write_done:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static const struct iio_info tsl2583_info = {
+ .attrs = &tsl2583_attribute_group,
+ .driver_module = THIS_MODULE,
+ .read_raw = tsl2583_read_raw,
+ .write_raw = tsl2583_write_raw,
+};
+
+static int tsl2583_probe(struct i2c_client *clientp,
+ const struct i2c_device_id *idp)
+{
+ int ret;
+ struct tsl2583_chip *chip;
+ struct iio_dev *indio_dev;
+
+ if (!i2c_check_functionality(clientp->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&clientp->dev, "%s: i2c smbus byte data functionality is unsupported\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ chip->client = clientp;
+ i2c_set_clientdata(clientp, indio_dev);
+
+ mutex_init(&chip->als_mutex);
+ chip->suspended = true;
+
+ ret = i2c_smbus_read_byte_data(clientp,
+ TSL2583_CMD_REG | TSL2583_CHIPID);
+ if (ret < 0) {
+ dev_err(&clientp->dev,
+ "%s: failed to read the chip ID register\n", __func__);
+ return ret;
+ }
+
+ if ((ret & TSL2583_CHIP_ID_MASK) != TSL2583_CHIP_ID) {
+ dev_err(&clientp->dev, "%s: received an unknown chip ID %x\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ indio_dev->info = &tsl2583_info;
+ indio_dev->channels = tsl2583_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tsl2583_channels);
+ indio_dev->dev.parent = &clientp->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = chip->client->name;
+
+ ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+ if (ret) {
+ dev_err(&clientp->dev, "%s: iio registration failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Load up the V2 defaults (these are hard coded defaults for now) */
+ tsl2583_defaults(chip);
+
+ /* Make sure the chip is on */
+ ret = tsl2583_chip_init_and_power_on(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(&clientp->dev, "Light sensor found.\n");
+
+ return 0;
+}
+
+static int __maybe_unused tsl2583_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+
+ ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
+ chip->suspended = true;
+
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static int __maybe_unused tsl2583_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct tsl2583_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+
+ ret = tsl2583_chip_init_and_power_on(indio_dev);
+
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(tsl2583_pm_ops, tsl2583_suspend, tsl2583_resume);
+
+static struct i2c_device_id tsl2583_idtable[] = {
+ { "tsl2580", 0 },
+ { "tsl2581", 1 },
+ { "tsl2583", 2 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tsl2583_idtable);
+
+static const struct of_device_id tsl2583_of_match[] = {
+ { .compatible = "amstaos,tsl2580", },
+ { .compatible = "amstaos,tsl2581", },
+ { .compatible = "amstaos,tsl2583", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tsl2583_of_match);
+
+/* Driver definition */
+static struct i2c_driver tsl2583_driver = {
+ .driver = {
+ .name = "tsl2583",
+ .pm = &tsl2583_pm_ops,
+ .of_match_table = tsl2583_of_match,
+ },
+ .id_table = tsl2583_idtable,
+ .probe = tsl2583_probe,
+};
+module_i2c_driver(tsl2583_driver);
+
+MODULE_AUTHOR("J. August Brenner <jbrenner@taosinc.com>");
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 217353145676..ce09d771c1fb 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -287,7 +287,7 @@ static int ak8974_await_drdy(struct ak8974 *ak8974)
return 0;
}
-static int ak8974_getresult(struct ak8974 *ak8974, s16 *result)
+static int ak8974_getresult(struct ak8974 *ak8974, __le16 *result)
{
unsigned int src;
int ret;
@@ -395,7 +395,7 @@ static int ak8974_selftest(struct ak8974 *ak8974)
static int ak8974_get_u16_val(struct ak8974 *ak8974, u8 reg, u16 *val)
{
int ret;
- u16 bulk;
+ __le16 bulk;
ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2);
if (ret)
@@ -453,7 +453,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
- s16 hw_values[3];
+ __le16 hw_values[3];
int ret = -EINVAL;
pm_runtime_get_sync(&ak8974->i2c->dev);
@@ -494,7 +494,7 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
int ret;
- s16 hw_values[8]; /* Three axes + 64bit padding */
+ __le16 hw_values[8]; /* Three axes + 64bit padding */
pm_runtime_get_sync(&ak8974->i2c->dev);
mutex_lock(&ak8974->lock);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index af8606cc7812..825369fb1c57 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -690,6 +690,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
struct ak8975_data *data = iio_priv(indio_dev);
const struct i2c_client *client = data->client;
const struct ak_def *def = data->def;
+ __le16 rval;
u16 buff;
int ret;
@@ -703,7 +704,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
ret = i2c_smbus_read_i2c_block_data_or_emulated(
client, def->data_regs[index],
- sizeof(buff), (u8*)&buff);
+ sizeof(rval), (u8*)&rval);
if (ret < 0)
goto exit;
@@ -713,7 +714,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
pm_runtime_put_autosuspend(&data->client->dev);
/* Swap bytes and convert to valid range. */
- buff = le16_to_cpu(buff);
+ buff = le16_to_cpu(rval);
*val = clamp_t(s16, buff, -def->range, def->range);
return IIO_VAL_INT;
@@ -813,6 +814,7 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
const struct ak_def *def = data->def;
int ret;
s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
+ __le16 fval[3];
mutex_lock(&data->lock);
@@ -826,17 +828,17 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
*/
ret = i2c_smbus_read_i2c_block_data_or_emulated(client,
def->data_regs[0],
- 3 * sizeof(buff[0]),
- (u8 *)buff);
+ 3 * sizeof(fval[0]),
+ (u8 *)fval);
if (ret < 0)
goto unlock;
mutex_unlock(&data->lock);
/* Clamp to valid range. */
- buff[0] = clamp_t(s16, le16_to_cpu(buff[0]), -def->range, def->range);
- buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
- buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
+ buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
+ buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
+ buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
iio_push_to_buffers_with_timestamp(indio_dev, buff,
iio_get_time_ns(indio_dev));
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index d8a0c8da8db0..0e791b02ed4a 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -42,9 +42,17 @@ enum magn_3d_channel {
MAGN_3D_CHANNEL_MAX,
};
+struct common_attributes {
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
struct magn_3d_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_common common_attributes;
+ struct hid_sensor_common magn_flux_attributes;
+ struct hid_sensor_common rot_attributes;
struct hid_sensor_hub_attribute_info magn[MAGN_3D_CHANNEL_MAX];
/* dynamically sized array to hold sensor values */
@@ -52,10 +60,8 @@ struct magn_3d_state {
/* array of pointers to sensor value */
u32 *magn_val_addr[MAGN_3D_CHANNEL_MAX];
- int scale_pre_decml;
- int scale_post_decml;
- int scale_precision;
- int value_offset;
+ struct common_attributes magn_flux_attr;
+ struct common_attributes rot_attr;
};
static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
@@ -162,41 +168,74 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
*val2 = 0;
switch (mask) {
case 0:
- hid_sensor_power_state(&magn_state->common_attributes, true);
+ hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
report_id =
magn_state->magn[chan->address].report_id;
address = magn_3d_addresses[chan->address];
if (report_id >= 0)
*val = sensor_hub_input_attr_get_raw_value(
- magn_state->common_attributes.hsdev,
+ magn_state->magn_flux_attributes.hsdev,
HID_USAGE_SENSOR_COMPASS_3D, address,
report_id,
SENSOR_HUB_SYNC);
else {
*val = 0;
- hid_sensor_power_state(&magn_state->common_attributes,
- false);
+ hid_sensor_power_state(
+ &magn_state->magn_flux_attributes,
+ false);
return -EINVAL;
}
- hid_sensor_power_state(&magn_state->common_attributes, false);
+ hid_sensor_power_state(&magn_state->magn_flux_attributes,
+ false);
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
- *val = magn_state->scale_pre_decml;
- *val2 = magn_state->scale_post_decml;
- ret_type = magn_state->scale_precision;
+ switch (chan->type) {
+ case IIO_MAGN:
+ *val = magn_state->magn_flux_attr.scale_pre_decml;
+ *val2 = magn_state->magn_flux_attr.scale_post_decml;
+ ret_type = magn_state->magn_flux_attr.scale_precision;
+ break;
+ case IIO_ROT:
+ *val = magn_state->rot_attr.scale_pre_decml;
+ *val2 = magn_state->rot_attr.scale_post_decml;
+ ret_type = magn_state->rot_attr.scale_precision;
+ break;
+ default:
+ ret_type = -EINVAL;
+ }
break;
case IIO_CHAN_INFO_OFFSET:
- *val = magn_state->value_offset;
- ret_type = IIO_VAL_INT;
+ switch (chan->type) {
+ case IIO_MAGN:
+ *val = magn_state->magn_flux_attr.value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_ROT:
+ *val = magn_state->rot_attr.value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ default:
+ ret_type = -EINVAL;
+ }
break;
case IIO_CHAN_INFO_SAMP_FREQ:
ret_type = hid_sensor_read_samp_freq_value(
- &magn_state->common_attributes, val, val2);
+ &magn_state->magn_flux_attributes, val, val2);
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret_type = hid_sensor_read_raw_hyst_value(
- &magn_state->common_attributes, val, val2);
+ switch (chan->type) {
+ case IIO_MAGN:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &magn_state->magn_flux_attributes, val, val2);
+ break;
+ case IIO_ROT:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &magn_state->rot_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ }
break;
default:
ret_type = -EINVAL;
@@ -219,11 +258,21 @@ static int magn_3d_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
ret = hid_sensor_write_samp_freq_value(
- &magn_state->common_attributes, val, val2);
+ &magn_state->magn_flux_attributes, val, val2);
break;
case IIO_CHAN_INFO_HYSTERESIS:
- ret = hid_sensor_write_raw_hyst_value(
- &magn_state->common_attributes, val, val2);
+ switch (chan->type) {
+ case IIO_MAGN:
+ ret = hid_sensor_write_raw_hyst_value(
+ &magn_state->magn_flux_attributes, val, val2);
+ break;
+ case IIO_ROT:
+ ret = hid_sensor_write_raw_hyst_value(
+ &magn_state->rot_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
break;
default:
ret = -EINVAL;
@@ -254,7 +303,7 @@ static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
struct magn_3d_state *magn_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n");
- if (atomic_read(&magn_state->common_attributes.data_ready))
+ if (atomic_read(&magn_state->magn_flux_attributes.data_ready))
hid_sensor_push_data(indio_dev, magn_state->iio_vals);
return 0;
@@ -389,21 +438,48 @@ static int magn_3d_parse_report(struct platform_device *pdev,
dev_dbg(&pdev->dev, "magn_3d Setup %d IIO channels\n",
*chan_count);
- st->scale_precision = hid_sensor_format_scale(
+ st->magn_flux_attr.scale_precision = hid_sensor_format_scale(
HID_USAGE_SENSOR_COMPASS_3D,
&st->magn[CHANNEL_SCAN_INDEX_X],
- &st->scale_pre_decml, &st->scale_post_decml);
+ &st->magn_flux_attr.scale_pre_decml,
+ &st->magn_flux_attr.scale_post_decml);
+ st->rot_attr.scale_precision
+ = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
+ &st->magn[CHANNEL_SCAN_INDEX_NORTH_MAGN_TILT_COMP],
+ &st->rot_attr.scale_pre_decml,
+ &st->rot_attr.scale_post_decml);
/* Set Sensitivity field ids, when there is no individual modifier */
- if (st->common_attributes.sensitivity.index < 0) {
+ if (st->magn_flux_attributes.sensitivity.index < 0) {
sensor_hub_input_get_attribute_info(hsdev,
HID_FEATURE_REPORT, usage_id,
HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
HID_USAGE_SENSOR_DATA_ORIENTATION,
- &st->common_attributes.sensitivity);
+ &st->magn_flux_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->magn_flux_attributes.sensitivity.index,
+ st->magn_flux_attributes.sensitivity.report_id);
+ }
+ if (st->magn_flux_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_ORIENT_MAGN_FLUX,
+ &st->magn_flux_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->magn_flux_attributes.sensitivity.index,
+ st->magn_flux_attributes.sensitivity.report_id);
+ }
+ if (st->rot_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
+ &st->rot_attributes.sensitivity);
dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
- st->common_attributes.sensitivity.index,
- st->common_attributes.sensitivity.report_id);
+ st->rot_attributes.sensitivity.index,
+ st->rot_attributes.sensitivity.report_id);
}
return 0;
@@ -428,16 +504,17 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
magn_state = iio_priv(indio_dev);
- magn_state->common_attributes.hsdev = hsdev;
- magn_state->common_attributes.pdev = pdev;
+ magn_state->magn_flux_attributes.hsdev = hsdev;
+ magn_state->magn_flux_attributes.pdev = pdev;
ret = hid_sensor_parse_common_attributes(hsdev,
HID_USAGE_SENSOR_COMPASS_3D,
- &magn_state->common_attributes);
+ &magn_state->magn_flux_attributes);
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes\n");
return ret;
}
+ magn_state->rot_attributes = magn_state->magn_flux_attributes;
ret = magn_3d_parse_report(pdev, hsdev,
&channels, &chan_count,
@@ -460,9 +537,9 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
return ret;
}
- atomic_set(&magn_state->common_attributes.data_ready, 0);
+ atomic_set(&magn_state->magn_flux_attributes.data_ready, 0);
ret = hid_sensor_setup_trigger(indio_dev, name,
- &magn_state->common_attributes);
+ &magn_state->magn_flux_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
goto error_unreg_buffer_funcs;
@@ -489,7 +566,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&magn_state->common_attributes);
+ hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
return ret;
@@ -504,7 +581,7 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&magn_state->common_attributes);
+ hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
iio_triggered_buffer_cleanup(indio_dev);
return 0;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 3e1f06b2224c..8e1b0861fbe4 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -46,139 +46,12 @@
#define ST_MAGN_FS_AVL_15000MG 15000
#define ST_MAGN_FS_AVL_16000MG 16000
-/* CUSTOM VALUES FOR SENSOR 0 */
-#define ST_MAGN_0_ODR_ADDR 0x00
-#define ST_MAGN_0_ODR_MASK 0x1c
-#define ST_MAGN_0_ODR_AVL_1HZ_VAL 0x00
-#define ST_MAGN_0_ODR_AVL_2HZ_VAL 0x01
-#define ST_MAGN_0_ODR_AVL_3HZ_VAL 0x02
-#define ST_MAGN_0_ODR_AVL_8HZ_VAL 0x03
-#define ST_MAGN_0_ODR_AVL_15HZ_VAL 0x04
-#define ST_MAGN_0_ODR_AVL_30HZ_VAL 0x05
-#define ST_MAGN_0_ODR_AVL_75HZ_VAL 0x06
-#define ST_MAGN_0_ODR_AVL_220HZ_VAL 0x07
-#define ST_MAGN_0_PW_ADDR 0x02
-#define ST_MAGN_0_PW_MASK 0x03
-#define ST_MAGN_0_PW_ON 0x00
-#define ST_MAGN_0_PW_OFF 0x03
-#define ST_MAGN_0_FS_ADDR 0x01
-#define ST_MAGN_0_FS_MASK 0xe0
-#define ST_MAGN_0_FS_AVL_1300_VAL 0x01
-#define ST_MAGN_0_FS_AVL_1900_VAL 0x02
-#define ST_MAGN_0_FS_AVL_2500_VAL 0x03
-#define ST_MAGN_0_FS_AVL_4000_VAL 0x04
-#define ST_MAGN_0_FS_AVL_4700_VAL 0x05
-#define ST_MAGN_0_FS_AVL_5600_VAL 0x06
-#define ST_MAGN_0_FS_AVL_8100_VAL 0x07
-#define ST_MAGN_0_FS_AVL_1300_GAIN_XY 1100
-#define ST_MAGN_0_FS_AVL_1900_GAIN_XY 855
-#define ST_MAGN_0_FS_AVL_2500_GAIN_XY 670
-#define ST_MAGN_0_FS_AVL_4000_GAIN_XY 450
-#define ST_MAGN_0_FS_AVL_4700_GAIN_XY 400
-#define ST_MAGN_0_FS_AVL_5600_GAIN_XY 330
-#define ST_MAGN_0_FS_AVL_8100_GAIN_XY 230
-#define ST_MAGN_0_FS_AVL_1300_GAIN_Z 980
-#define ST_MAGN_0_FS_AVL_1900_GAIN_Z 760
-#define ST_MAGN_0_FS_AVL_2500_GAIN_Z 600
-#define ST_MAGN_0_FS_AVL_4000_GAIN_Z 400
-#define ST_MAGN_0_FS_AVL_4700_GAIN_Z 355
-#define ST_MAGN_0_FS_AVL_5600_GAIN_Z 295
-#define ST_MAGN_0_FS_AVL_8100_GAIN_Z 205
-#define ST_MAGN_0_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_MAGN_1_WAI_EXP 0x3c
-#define ST_MAGN_1_ODR_ADDR 0x00
-#define ST_MAGN_1_ODR_MASK 0x1c
-#define ST_MAGN_1_ODR_AVL_1HZ_VAL 0x00
-#define ST_MAGN_1_ODR_AVL_2HZ_VAL 0x01
-#define ST_MAGN_1_ODR_AVL_3HZ_VAL 0x02
-#define ST_MAGN_1_ODR_AVL_8HZ_VAL 0x03
-#define ST_MAGN_1_ODR_AVL_15HZ_VAL 0x04
-#define ST_MAGN_1_ODR_AVL_30HZ_VAL 0x05
-#define ST_MAGN_1_ODR_AVL_75HZ_VAL 0x06
-#define ST_MAGN_1_ODR_AVL_220HZ_VAL 0x07
-#define ST_MAGN_1_PW_ADDR 0x02
-#define ST_MAGN_1_PW_MASK 0x03
-#define ST_MAGN_1_PW_ON 0x00
-#define ST_MAGN_1_PW_OFF 0x03
-#define ST_MAGN_1_FS_ADDR 0x01
-#define ST_MAGN_1_FS_MASK 0xe0
-#define ST_MAGN_1_FS_AVL_1300_VAL 0x01
-#define ST_MAGN_1_FS_AVL_1900_VAL 0x02
-#define ST_MAGN_1_FS_AVL_2500_VAL 0x03
-#define ST_MAGN_1_FS_AVL_4000_VAL 0x04
-#define ST_MAGN_1_FS_AVL_4700_VAL 0x05
-#define ST_MAGN_1_FS_AVL_5600_VAL 0x06
-#define ST_MAGN_1_FS_AVL_8100_VAL 0x07
-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909
-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169
-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492
-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222
-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500
-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030
-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347
-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020
-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315
-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666
-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500
-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816
-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389
-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878
-#define ST_MAGN_1_MULTIREAD_BIT false
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_MAGN_2_WAI_EXP 0x3d
-#define ST_MAGN_2_ODR_ADDR 0x20
-#define ST_MAGN_2_ODR_MASK 0x1c
-#define ST_MAGN_2_ODR_AVL_1HZ_VAL 0x00
-#define ST_MAGN_2_ODR_AVL_2HZ_VAL 0x01
-#define ST_MAGN_2_ODR_AVL_3HZ_VAL 0x02
-#define ST_MAGN_2_ODR_AVL_5HZ_VAL 0x03
-#define ST_MAGN_2_ODR_AVL_10HZ_VAL 0x04
-#define ST_MAGN_2_ODR_AVL_20HZ_VAL 0x05
-#define ST_MAGN_2_ODR_AVL_40HZ_VAL 0x06
-#define ST_MAGN_2_ODR_AVL_80HZ_VAL 0x07
-#define ST_MAGN_2_PW_ADDR 0x22
-#define ST_MAGN_2_PW_MASK 0x03
-#define ST_MAGN_2_PW_ON 0x00
-#define ST_MAGN_2_PW_OFF 0x03
-#define ST_MAGN_2_FS_ADDR 0x21
-#define ST_MAGN_2_FS_MASK 0x60
-#define ST_MAGN_2_FS_AVL_4000_VAL 0x00
-#define ST_MAGN_2_FS_AVL_8000_VAL 0x01
-#define ST_MAGN_2_FS_AVL_12000_VAL 0x02
-#define ST_MAGN_2_FS_AVL_16000_VAL 0x03
-#define ST_MAGN_2_FS_AVL_4000_GAIN 146
-#define ST_MAGN_2_FS_AVL_8000_GAIN 292
-#define ST_MAGN_2_FS_AVL_12000_GAIN 438
-#define ST_MAGN_2_FS_AVL_16000_GAIN 584
-#define ST_MAGN_2_MULTIREAD_BIT false
+/* Special L addresses for Sensor 2 */
#define ST_MAGN_2_OUT_X_L_ADDR 0x28
#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
#define ST_MAGN_2_OUT_Z_L_ADDR 0x2c
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_MAGN_3_WAI_ADDR 0x4f
-#define ST_MAGN_3_WAI_EXP 0x40
-#define ST_MAGN_3_ODR_ADDR 0x60
-#define ST_MAGN_3_ODR_MASK 0x0c
-#define ST_MAGN_3_ODR_AVL_10HZ_VAL 0x00
-#define ST_MAGN_3_ODR_AVL_20HZ_VAL 0x01
-#define ST_MAGN_3_ODR_AVL_50HZ_VAL 0x02
-#define ST_MAGN_3_ODR_AVL_100HZ_VAL 0x03
-#define ST_MAGN_3_PW_ADDR 0x60
-#define ST_MAGN_3_PW_MASK 0x03
-#define ST_MAGN_3_PW_ON 0x00
-#define ST_MAGN_3_PW_OFF 0x03
-#define ST_MAGN_3_BDU_ADDR 0x62
-#define ST_MAGN_3_BDU_MASK 0x10
-#define ST_MAGN_3_DRDY_IRQ_ADDR 0x62
-#define ST_MAGN_3_DRDY_INT_MASK 0x01
-#define ST_MAGN_3_IHL_IRQ_ADDR 0x63
-#define ST_MAGN_3_IHL_IRQ_MASK 0x04
-#define ST_MAGN_3_FS_AVL_15000_GAIN 1500
-#define ST_MAGN_3_MULTIREAD_BIT false
+/* Special L addresses for sensor 3 */
#define ST_MAGN_3_OUT_X_L_ADDR 0x68
#define ST_MAGN_3_OUT_Y_L_ADDR 0x6a
#define ST_MAGN_3_OUT_Z_L_ADDR 0x6c
@@ -240,77 +113,78 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_magn_16bit_channels,
.odr = {
- .addr = ST_MAGN_0_ODR_ADDR,
- .mask = ST_MAGN_0_ODR_MASK,
+ .addr = 0x00,
+ .mask = 0x1c,
.odr_avl = {
- { 1, ST_MAGN_0_ODR_AVL_1HZ_VAL, },
- { 2, ST_MAGN_0_ODR_AVL_2HZ_VAL, },
- { 3, ST_MAGN_0_ODR_AVL_3HZ_VAL, },
- { 8, ST_MAGN_0_ODR_AVL_8HZ_VAL, },
- { 15, ST_MAGN_0_ODR_AVL_15HZ_VAL, },
- { 30, ST_MAGN_0_ODR_AVL_30HZ_VAL, },
- { 75, ST_MAGN_0_ODR_AVL_75HZ_VAL, },
+ { .hz = 1, .value = 0x00 },
+ { .hz = 2, .value = 0x01 },
+ { .hz = 3, .value = 0x02 },
+ { .hz = 8, .value = 0x03 },
+ { .hz = 15, .value = 0x04 },
+ { .hz = 30, .value = 0x05 },
+ { .hz = 75, .value = 0x06 },
+ /* 220 Hz, 0x07 reportedly exist */
},
},
.pw = {
- .addr = ST_MAGN_0_PW_ADDR,
- .mask = ST_MAGN_0_PW_MASK,
- .value_on = ST_MAGN_0_PW_ON,
- .value_off = ST_MAGN_0_PW_OFF,
+ .addr = 0x02,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
- .addr = ST_MAGN_0_FS_ADDR,
- .mask = ST_MAGN_0_FS_MASK,
+ .addr = 0x01,
+ .mask = 0xe0,
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_1300MG,
- .value = ST_MAGN_0_FS_AVL_1300_VAL,
- .gain = ST_MAGN_0_FS_AVL_1300_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_1300_GAIN_Z,
+ .value = 0x01,
+ .gain = 1100,
+ .gain2 = 980,
},
[1] = {
.num = ST_MAGN_FS_AVL_1900MG,
- .value = ST_MAGN_0_FS_AVL_1900_VAL,
- .gain = ST_MAGN_0_FS_AVL_1900_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_1900_GAIN_Z,
+ .value = 0x02,
+ .gain = 855,
+ .gain2 = 760,
},
[2] = {
.num = ST_MAGN_FS_AVL_2500MG,
- .value = ST_MAGN_0_FS_AVL_2500_VAL,
- .gain = ST_MAGN_0_FS_AVL_2500_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_2500_GAIN_Z,
+ .value = 0x03,
+ .gain = 670,
+ .gain2 = 600,
},
[3] = {
.num = ST_MAGN_FS_AVL_4000MG,
- .value = ST_MAGN_0_FS_AVL_4000_VAL,
- .gain = ST_MAGN_0_FS_AVL_4000_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_4000_GAIN_Z,
+ .value = 0x04,
+ .gain = 450,
+ .gain2 = 400,
},
[4] = {
.num = ST_MAGN_FS_AVL_4700MG,
- .value = ST_MAGN_0_FS_AVL_4700_VAL,
- .gain = ST_MAGN_0_FS_AVL_4700_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_4700_GAIN_Z,
+ .value = 0x05,
+ .gain = 400,
+ .gain2 = 355,
},
[5] = {
.num = ST_MAGN_FS_AVL_5600MG,
- .value = ST_MAGN_0_FS_AVL_5600_VAL,
- .gain = ST_MAGN_0_FS_AVL_5600_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_5600_GAIN_Z,
+ .value = 0x06,
+ .gain = 330,
+ .gain2 = 295,
},
[6] = {
.num = ST_MAGN_FS_AVL_8100MG,
- .value = ST_MAGN_0_FS_AVL_8100_VAL,
- .gain = ST_MAGN_0_FS_AVL_8100_GAIN_XY,
- .gain2 = ST_MAGN_0_FS_AVL_8100_GAIN_Z,
+ .value = 0x07,
+ .gain = 230,
+ .gain2 = 205,
},
},
},
- .multi_read_bit = ST_MAGN_0_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_MAGN_1_WAI_EXP,
+ .wai = 0x3c,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM303DLHC_MAGN_DEV_NAME,
@@ -318,175 +192,175 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_magn_16bit_channels,
.odr = {
- .addr = ST_MAGN_1_ODR_ADDR,
- .mask = ST_MAGN_1_ODR_MASK,
+ .addr = 0x00,
+ .mask = 0x1c,
.odr_avl = {
- { 1, ST_MAGN_1_ODR_AVL_1HZ_VAL, },
- { 2, ST_MAGN_1_ODR_AVL_2HZ_VAL, },
- { 3, ST_MAGN_1_ODR_AVL_3HZ_VAL, },
- { 8, ST_MAGN_1_ODR_AVL_8HZ_VAL, },
- { 15, ST_MAGN_1_ODR_AVL_15HZ_VAL, },
- { 30, ST_MAGN_1_ODR_AVL_30HZ_VAL, },
- { 75, ST_MAGN_1_ODR_AVL_75HZ_VAL, },
- { 220, ST_MAGN_1_ODR_AVL_220HZ_VAL, },
+ { .hz = 1, .value = 0x00 },
+ { .hz = 2, .value = 0x01 },
+ { .hz = 3, .value = 0x02 },
+ { .hz = 8, .value = 0x03 },
+ { .hz = 15, .value = 0x04 },
+ { .hz = 30, .value = 0x05 },
+ { .hz = 75, .value = 0x06 },
+ { .hz = 220, .value = 0x07 },
},
},
.pw = {
- .addr = ST_MAGN_1_PW_ADDR,
- .mask = ST_MAGN_1_PW_MASK,
- .value_on = ST_MAGN_1_PW_ON,
- .value_off = ST_MAGN_1_PW_OFF,
+ .addr = 0x02,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
- .addr = ST_MAGN_1_FS_ADDR,
- .mask = ST_MAGN_1_FS_MASK,
+ .addr = 0x01,
+ .mask = 0xe0,
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_1300MG,
- .value = ST_MAGN_1_FS_AVL_1300_VAL,
- .gain = ST_MAGN_1_FS_AVL_1300_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_1300_GAIN_Z,
+ .value = 0x01,
+ .gain = 909,
+ .gain2 = 1020,
},
[1] = {
.num = ST_MAGN_FS_AVL_1900MG,
- .value = ST_MAGN_1_FS_AVL_1900_VAL,
- .gain = ST_MAGN_1_FS_AVL_1900_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_1900_GAIN_Z,
+ .value = 0x02,
+ .gain = 1169,
+ .gain2 = 1315,
},
[2] = {
.num = ST_MAGN_FS_AVL_2500MG,
- .value = ST_MAGN_1_FS_AVL_2500_VAL,
- .gain = ST_MAGN_1_FS_AVL_2500_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_2500_GAIN_Z,
+ .value = 0x03,
+ .gain = 1492,
+ .gain2 = 1666,
},
[3] = {
.num = ST_MAGN_FS_AVL_4000MG,
- .value = ST_MAGN_1_FS_AVL_4000_VAL,
- .gain = ST_MAGN_1_FS_AVL_4000_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_4000_GAIN_Z,
+ .value = 0x04,
+ .gain = 2222,
+ .gain2 = 2500,
},
[4] = {
.num = ST_MAGN_FS_AVL_4700MG,
- .value = ST_MAGN_1_FS_AVL_4700_VAL,
- .gain = ST_MAGN_1_FS_AVL_4700_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_4700_GAIN_Z,
+ .value = 0x05,
+ .gain = 2500,
+ .gain2 = 2816,
},
[5] = {
.num = ST_MAGN_FS_AVL_5600MG,
- .value = ST_MAGN_1_FS_AVL_5600_VAL,
- .gain = ST_MAGN_1_FS_AVL_5600_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_5600_GAIN_Z,
+ .value = 0x06,
+ .gain = 3030,
+ .gain2 = 3389,
},
[6] = {
.num = ST_MAGN_FS_AVL_8100MG,
- .value = ST_MAGN_1_FS_AVL_8100_VAL,
- .gain = ST_MAGN_1_FS_AVL_8100_GAIN_XY,
- .gain2 = ST_MAGN_1_FS_AVL_8100_GAIN_Z,
+ .value = 0x07,
+ .gain = 4347,
+ .gain2 = 4878,
},
},
},
- .multi_read_bit = ST_MAGN_1_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_MAGN_2_WAI_EXP,
+ .wai = 0x3d,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3MDL_MAGN_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_magn_2_16bit_channels,
.odr = {
- .addr = ST_MAGN_2_ODR_ADDR,
- .mask = ST_MAGN_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x1c,
.odr_avl = {
- { 1, ST_MAGN_2_ODR_AVL_1HZ_VAL, },
- { 2, ST_MAGN_2_ODR_AVL_2HZ_VAL, },
- { 3, ST_MAGN_2_ODR_AVL_3HZ_VAL, },
- { 5, ST_MAGN_2_ODR_AVL_5HZ_VAL, },
- { 10, ST_MAGN_2_ODR_AVL_10HZ_VAL, },
- { 20, ST_MAGN_2_ODR_AVL_20HZ_VAL, },
- { 40, ST_MAGN_2_ODR_AVL_40HZ_VAL, },
- { 80, ST_MAGN_2_ODR_AVL_80HZ_VAL, },
+ { .hz = 1, .value = 0x00 },
+ { .hz = 2, .value = 0x01 },
+ { .hz = 3, .value = 0x02 },
+ { .hz = 5, .value = 0x03 },
+ { .hz = 10, .value = 0x04 },
+ { .hz = 20, .value = 0x05 },
+ { .hz = 40, .value = 0x06 },
+ { .hz = 80, .value = 0x07 },
},
},
.pw = {
- .addr = ST_MAGN_2_PW_ADDR,
- .mask = ST_MAGN_2_PW_MASK,
- .value_on = ST_MAGN_2_PW_ON,
- .value_off = ST_MAGN_2_PW_OFF,
+ .addr = 0x22,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
- .addr = ST_MAGN_2_FS_ADDR,
- .mask = ST_MAGN_2_FS_MASK,
+ .addr = 0x21,
+ .mask = 0x60,
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_4000MG,
- .value = ST_MAGN_2_FS_AVL_4000_VAL,
- .gain = ST_MAGN_2_FS_AVL_4000_GAIN,
+ .value = 0x00,
+ .gain = 146,
},
[1] = {
.num = ST_MAGN_FS_AVL_8000MG,
- .value = ST_MAGN_2_FS_AVL_8000_VAL,
- .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
+ .value = 0x01,
+ .gain = 292,
},
[2] = {
.num = ST_MAGN_FS_AVL_12000MG,
- .value = ST_MAGN_2_FS_AVL_12000_VAL,
- .gain = ST_MAGN_2_FS_AVL_12000_GAIN,
+ .value = 0x02,
+ .gain = 438,
},
[3] = {
.num = ST_MAGN_FS_AVL_16000MG,
- .value = ST_MAGN_2_FS_AVL_16000_VAL,
- .gain = ST_MAGN_2_FS_AVL_16000_GAIN,
+ .value = 0x03,
+ .gain = 584,
},
},
},
- .multi_read_bit = ST_MAGN_2_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
{
- .wai = ST_MAGN_3_WAI_EXP,
- .wai_addr = ST_MAGN_3_WAI_ADDR,
+ .wai = 0x40,
+ .wai_addr = 0x4f,
.sensors_supported = {
[0] = LSM303AGR_MAGN_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_magn_3_16bit_channels,
.odr = {
- .addr = ST_MAGN_3_ODR_ADDR,
- .mask = ST_MAGN_3_ODR_MASK,
+ .addr = 0x60,
+ .mask = 0x0c,
.odr_avl = {
- { 10, ST_MAGN_3_ODR_AVL_10HZ_VAL, },
- { 20, ST_MAGN_3_ODR_AVL_20HZ_VAL, },
- { 50, ST_MAGN_3_ODR_AVL_50HZ_VAL, },
- { 100, ST_MAGN_3_ODR_AVL_100HZ_VAL, },
+ { .hz = 10, .value = 0x00 },
+ { .hz = 20, .value = 0x01 },
+ { .hz = 50, .value = 0x02 },
+ { .hz = 100, .value = 0x03 },
},
},
.pw = {
- .addr = ST_MAGN_3_PW_ADDR,
- .mask = ST_MAGN_3_PW_MASK,
- .value_on = ST_MAGN_3_PW_ON,
- .value_off = ST_MAGN_3_PW_OFF,
+ .addr = 0x60,
+ .mask = 0x03,
+ .value_on = 0x00,
+ .value_off = 0x03,
},
.fs = {
.fs_avl = {
[0] = {
.num = ST_MAGN_FS_AVL_15000MG,
- .gain = ST_MAGN_3_FS_AVL_15000_GAIN,
+ .gain = 1500,
},
},
},
.bdu = {
- .addr = ST_MAGN_3_BDU_ADDR,
- .mask = ST_MAGN_3_BDU_MASK,
+ .addr = 0x62,
+ .mask = 0x10,
},
.drdy_irq = {
- .addr = ST_MAGN_3_DRDY_IRQ_ADDR,
- .mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
- .addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
- .mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
+ .addr = 0x62,
+ .mask_int1 = 0x01,
+ .addr_ihl = 0x63,
+ .mask_ihl = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
+ .multi_read_bit = false,
.bootime = 2,
},
};
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index b98b9d94d184..a97e802ca523 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -335,6 +335,7 @@ static struct platform_driver hid_dev_rot_platform_driver = {
.id_table = hid_dev_rot_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_dev_rot_probe,
.remove = hid_dev_rot_remove,
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 13b6ae2fcf7b..0d1bcf89ae17 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -38,7 +38,7 @@
struct mcp4531_cfg {
int wipers;
- int max_pos;
+ int avail[3];
int kohms;
};
@@ -78,38 +78,38 @@ enum mcp4531_type {
};
static const struct mcp4531_cfg mcp4531_cfg[] = {
- [MCP453x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
- [MCP453x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
- [MCP453x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
- [MCP453x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
- [MCP454x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
- [MCP454x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
- [MCP454x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
- [MCP454x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
- [MCP455x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
- [MCP455x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
- [MCP455x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
- [MCP455x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
- [MCP456x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
- [MCP456x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
- [MCP456x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
- [MCP456x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
- [MCP463x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
- [MCP463x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
- [MCP463x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
- [MCP463x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
- [MCP464x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
- [MCP464x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
- [MCP464x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
- [MCP464x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
- [MCP465x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
- [MCP465x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
- [MCP465x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
- [MCP465x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
- [MCP466x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
- [MCP466x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
- [MCP466x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
- [MCP466x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+ [MCP453x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP453x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP453x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP453x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP454x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP454x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP454x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP454x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP455x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP455x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP455x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP455x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
+ [MCP456x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP456x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP456x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP456x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
+ [MCP463x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP463x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP463x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP463x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP464x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
+ [MCP464x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
+ [MCP464x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
+ [MCP464x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
+ [MCP465x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP465x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP465x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP465x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
+ [MCP466x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
+ [MCP466x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
+ [MCP466x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
+ [MCP466x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
};
#define MCP4531_WRITE (0 << 2)
@@ -124,13 +124,14 @@ struct mcp4531_data {
const struct mcp4531_cfg *cfg;
};
-#define MCP4531_CHANNEL(ch) { \
- .type = IIO_RESISTANCE, \
- .indexed = 1, \
- .output = 1, \
- .channel = (ch), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+#define MCP4531_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW), \
}
static const struct iio_chan_spec mcp4531_channels[] = {
@@ -156,13 +157,31 @@ static int mcp4531_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
- *val2 = data->cfg->max_pos;
+ *val2 = data->cfg->avail[2];
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
+static int mcp4531_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct mcp4531_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *length = ARRAY_SIZE(data->cfg->avail);
+ *vals = data->cfg->avail;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ }
+
+ return -EINVAL;
+}
+
static int mcp4531_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -172,7 +191,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val > data->cfg->max_pos || val < 0)
+ if (val > data->cfg->avail[2] || val < 0)
return -EINVAL;
break;
default:
@@ -186,6 +205,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
static const struct iio_info mcp4531_info = {
.read_raw = mcp4531_read_raw,
+ .read_avail = mcp4531_read_avail,
.write_raw = mcp4531_write_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/iio/potentiostat/Kconfig b/drivers/iio/potentiostat/Kconfig
new file mode 100644
index 000000000000..1e3baf2cc97d
--- /dev/null
+++ b/drivers/iio/potentiostat/Kconfig
@@ -0,0 +1,22 @@
+#
+# Potentiostat drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Digital potentiostats"
+
+config LMP91000
+ tristate "Texas Instruments LMP91000 potentiostat driver"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_BUFFER_CB
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Texas Instruments
+ LMP91000 digital potentiostat chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lmp91000
+
+endmenu
diff --git a/drivers/iio/potentiostat/Makefile b/drivers/iio/potentiostat/Makefile
new file mode 100644
index 000000000000..64d315ef4449
--- /dev/null
+++ b/drivers/iio/potentiostat/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O potentiostat drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_LMP91000) += lmp91000.o
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
new file mode 100644
index 000000000000..e22714365022
--- /dev/null
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -0,0 +1,446 @@
+/*
+ * lmp91000.c - Support for Texas Instruments digital potentiostats
+ *
+ * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * TODO: bias voltage + polarity control, and multiple chip support
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define LMP91000_REG_LOCK 0x01
+#define LMP91000_REG_TIACN 0x10
+#define LMP91000_REG_TIACN_GAIN_SHIFT 2
+
+#define LMP91000_REG_REFCN 0x11
+#define LMP91000_REG_REFCN_EXT_REF 0x20
+#define LMP91000_REG_REFCN_50_ZERO 0x80
+
+#define LMP91000_REG_MODECN 0x12
+#define LMP91000_REG_MODECN_3LEAD 0x03
+#define LMP91000_REG_MODECN_TEMP 0x07
+
+#define LMP91000_DRV_NAME "lmp91000"
+
+static const int lmp91000_tia_gain[] = { 0, 2750, 3500, 7000, 14000, 35000,
+ 120000, 350000 };
+
+static const int lmp91000_rload[] = { 10, 33, 50, 100 };
+
+#define LMP91000_TEMP_BASE -40
+
+static const u16 lmp91000_temp_lut[] = {
+ 1875, 1867, 1860, 1852, 1844, 1836, 1828, 1821, 1813, 1805,
+ 1797, 1789, 1782, 1774, 1766, 1758, 1750, 1742, 1734, 1727,
+ 1719, 1711, 1703, 1695, 1687, 1679, 1671, 1663, 1656, 1648,
+ 1640, 1632, 1624, 1616, 1608, 1600, 1592, 1584, 1576, 1568,
+ 1560, 1552, 1544, 1536, 1528, 1520, 1512, 1504, 1496, 1488,
+ 1480, 1472, 1464, 1456, 1448, 1440, 1432, 1424, 1415, 1407,
+ 1399, 1391, 1383, 1375, 1367, 1359, 1351, 1342, 1334, 1326,
+ 1318, 1310, 1302, 1293, 1285, 1277, 1269, 1261, 1253, 1244,
+ 1236, 1228, 1220, 1212, 1203, 1195, 1187, 1179, 1170, 1162,
+ 1154, 1146, 1137, 1129, 1121, 1112, 1104, 1096, 1087, 1079,
+ 1071, 1063, 1054, 1046, 1038, 1029, 1021, 1012, 1004, 996,
+ 987, 979, 971, 962, 954, 945, 937, 929, 920, 912,
+ 903, 895, 886, 878, 870, 861 };
+
+static const struct regmap_config lmp91000_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+struct lmp91000_data {
+ struct regmap *regmap;
+ struct device *dev;
+
+ struct iio_trigger *trig;
+ struct iio_cb_buffer *cb_buffer;
+ struct iio_channel *adc_chan;
+
+ struct completion completion;
+ u8 chan_select;
+
+ u32 buffer[4]; /* 64-bit data + 64-bit timestamp */
+};
+
+static const struct iio_chan_spec lmp91000_channels[] = {
+ { /* chemical channel mV */
+ .type = IIO_VOLTAGE,
+ .channel = 0,
+ .address = LMP91000_REG_MODECN_3LEAD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+ { /* temperature channel mV */
+ .type = IIO_TEMP,
+ .channel = 1,
+ .address = LMP91000_REG_MODECN_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = -1,
+ },
+};
+
+static int lmp91000_read(struct lmp91000_data *data, int channel, int *val)
+{
+ int state, ret;
+
+ ret = regmap_read(data->regmap, LMP91000_REG_MODECN, &state);
+ if (ret)
+ return -EINVAL;
+
+ ret = regmap_write(data->regmap, LMP91000_REG_MODECN, channel);
+ if (ret)
+ return -EINVAL;
+
+ /* delay till first temperature reading is complete */
+ if ((state != channel) && (channel == LMP91000_REG_MODECN_TEMP))
+ usleep_range(3000, 4000);
+
+ data->chan_select = channel != LMP91000_REG_MODECN_3LEAD;
+
+ iio_trigger_poll_chained(data->trig);
+
+ ret = wait_for_completion_timeout(&data->completion, HZ);
+ reinit_completion(&data->completion);
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ *val = data->buffer[data->chan_select];
+
+ return 0;
+}
+
+static irqreturn_t lmp91000_buffer_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct lmp91000_data *data = iio_priv(indio_dev);
+ int ret, val;
+
+ memset(data->buffer, 0, sizeof(data->buffer));
+
+ ret = lmp91000_read(data, LMP91000_REG_MODECN_3LEAD, &val);
+ if (!ret) {
+ data->buffer[0] = val;
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns(indio_dev));
+ }
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int lmp91000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED: {
+ int ret = iio_channel_start_all_cb(data->cb_buffer);
+
+ if (ret)
+ return ret;
+
+ ret = lmp91000_read(data, chan->address, val);
+
+ iio_channel_stop_all_cb(data->cb_buffer);
+
+ if (ret)
+ return ret;
+
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ int tmp, i;
+
+ ret = iio_convert_raw_to_processed(data->adc_chan,
+ *val, &tmp, 1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(lmp91000_temp_lut); i++)
+ if (lmp91000_temp_lut[i] < tmp)
+ break;
+
+ *val = (LMP91000_TEMP_BASE + i) * 1000;
+ }
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ return iio_read_channel_offset(data->adc_chan, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return iio_read_channel_scale(data->adc_chan, val, val2);
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lmp91000_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = lmp91000_read_raw,
+};
+
+static int lmp91000_read_config(struct lmp91000_data *data)
+{
+ struct device *dev = data->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int reg, val;
+ int i, ret;
+
+ ret = of_property_read_u32(np, "ti,tia-gain-ohm", &val);
+ if (ret) {
+ if (of_property_read_bool(np, "ti,external-tia-resistor"))
+ val = 0;
+ else {
+ dev_err(dev, "no ti,tia-gain-ohm defined");
+ return ret;
+ }
+ }
+
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(lmp91000_tia_gain); i++) {
+ if (lmp91000_tia_gain[i] == val) {
+ reg = i << LMP91000_REG_TIACN_GAIN_SHIFT;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret) {
+ dev_err(dev, "invalid ti,tia-gain-ohm %d\n", val);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "ti,rload-ohm", &val);
+ if (ret) {
+ val = 100;
+ dev_info(dev, "no ti,rload-ohm defined, default to %d\n", val);
+ }
+
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(lmp91000_rload); i++) {
+ if (lmp91000_rload[i] == val) {
+ reg |= i;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret) {
+ dev_err(dev, "invalid ti,rload-ohm %d\n", val);
+ return ret;
+ }
+
+ regmap_write(data->regmap, LMP91000_REG_LOCK, 0);
+ regmap_write(data->regmap, LMP91000_REG_TIACN, reg);
+ regmap_write(data->regmap, LMP91000_REG_REFCN, LMP91000_REG_REFCN_EXT_REF
+ | LMP91000_REG_REFCN_50_ZERO);
+ regmap_write(data->regmap, LMP91000_REG_LOCK, 1);
+
+ return 0;
+}
+
+static int lmp91000_buffer_cb(const void *val, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ data->buffer[data->chan_select] = *((int *)val);
+ complete_all(&data->completion);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops lmp91000_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+
+static int lmp91000_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ return iio_channel_start_all_cb(data->cb_buffer);
+}
+
+static int lmp91000_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ iio_channel_stop_all_cb(data->cb_buffer);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = {
+ .preenable = lmp91000_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = lmp91000_buffer_predisable,
+};
+
+static int lmp91000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct lmp91000_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &lmp91000_info;
+ indio_dev->channels = lmp91000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lmp91000_channels);
+ indio_dev->name = LMP91000_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ data->regmap = devm_regmap_init_i2c(client, &lmp91000_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "regmap initialization failed.\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ data->trig = devm_iio_trigger_alloc(data->dev, "%s-mux%d",
+ indio_dev->name, indio_dev->id);
+ if (!data->trig) {
+ dev_err(dev, "cannot allocate iio trigger.\n");
+ return -ENOMEM;
+ }
+
+ data->trig->ops = &lmp91000_trigger_ops;
+ data->trig->dev.parent = dev;
+ init_completion(&data->completion);
+
+ ret = lmp91000_read_config(data);
+ if (ret)
+ return ret;
+
+ ret = iio_trigger_set_immutable(iio_channel_cb_get_iio_dev(data->cb_buffer),
+ data->trig);
+ if (ret) {
+ dev_err(dev, "cannot set immutable trigger.\n");
+ return ret;
+ }
+
+ ret = iio_trigger_register(data->trig);
+ if (ret) {
+ dev_err(dev, "cannot register iio trigger.\n");
+ return ret;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &lmp91000_buffer_handler,
+ &lmp91000_buffer_setup_ops);
+ if (ret)
+ goto error_unreg_trigger;
+
+ data->cb_buffer = iio_channel_get_all_cb(dev, &lmp91000_buffer_cb,
+ indio_dev);
+
+ if (IS_ERR(data->cb_buffer)) {
+ if (PTR_ERR(data->cb_buffer) == -ENODEV)
+ ret = -EPROBE_DEFER;
+ else
+ ret = PTR_ERR(data->cb_buffer);
+
+ goto error_unreg_buffer;
+ }
+
+ data->adc_chan = iio_channel_cb_get_channels(data->cb_buffer);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_cb_buffer;
+
+ return 0;
+
+error_unreg_cb_buffer:
+ iio_channel_release_all_cb(data->cb_buffer);
+
+error_unreg_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+error_unreg_trigger:
+ iio_trigger_unregister(data->trig);
+
+ return ret;
+}
+
+static int lmp91000_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct lmp91000_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ iio_channel_stop_all_cb(data->cb_buffer);
+ iio_channel_release_all_cb(data->cb_buffer);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->trig);
+
+ return 0;
+}
+
+static const struct of_device_id lmp91000_of_match[] = {
+ { .compatible = "ti,lmp91000", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lmp91000_of_match);
+
+static const struct i2c_device_id lmp91000_id[] = {
+ { "lmp91000", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lmp91000_id);
+
+static struct i2c_driver lmp91000_driver = {
+ .driver = {
+ .name = LMP91000_DRV_NAME,
+ .of_match_table = of_match_ptr(lmp91000_of_match),
+ },
+ .probe = lmp91000_probe,
+ .remove = lmp91000_remove,
+ .id_table = lmp91000_id,
+};
+module_i2c_driver(lmp91000_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("LMP91000 digital potentiostat");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 15cd416365c1..bd8d96b96771 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,16 @@
menu "Pressure sensors"
+config ABP060MG
+ tristate "Honeywell ABP pressure sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for the Honeywell ABP pressure
+ sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called abp060mg.
+
config BMP280
tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
depends on (I2C || SPI_MASTER)
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index fff77185a5cc..de3dbc81dc5a 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ABP060MG) += abp060mg.o
obj-$(CONFIG_BMP280) += bmp280.o
bmp280-objs := bmp280-core.o bmp280-regmap.o
obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c
new file mode 100644
index 000000000000..43bdd0b9155f
--- /dev/null
+++ b/drivers/iio/pressure/abp060mg.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2016 - Marcin Malagowski <mrc@bourne.st>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+
+#define ABP060MG_ERROR_MASK 0xC000
+#define ABP060MG_RESP_TIME_MS 40
+#define ABP060MG_MIN_COUNTS 1638 /* = 0x0666 (10% of u14) */
+#define ABP060MG_MAX_COUNTS 14745 /* = 0x3999 (90% of u14) */
+#define ABP060MG_NUM_COUNTS (ABP060MG_MAX_COUNTS - ABP060MG_MIN_COUNTS)
+
+enum abp_variant {
+ /* gage [kPa] */
+ ABP006KG, ABP010KG, ABP016KG, ABP025KG, ABP040KG, ABP060KG, ABP100KG,
+ ABP160KG, ABP250KG, ABP400KG, ABP600KG, ABP001GG,
+ /* differential [kPa] */
+ ABP006KD, ABP010KD, ABP016KD, ABP025KD, ABP040KD, ABP060KD, ABP100KD,
+ ABP160KD, ABP250KD, ABP400KD,
+ /* gage [psi] */
+ ABP001PG, ABP005PG, ABP015PG, ABP030PG, ABP060PG, ABP100PG, ABP150PG,
+ /* differential [psi] */
+ ABP001PD, ABP005PD, ABP015PD, ABP030PD, ABP060PD,
+};
+
+struct abp_config {
+ int min;
+ int max;
+};
+
+static struct abp_config abp_config[] = {
+ /* mbar & kPa variants */
+ [ABP006KG] = { .min = 0, .max = 6000 },
+ [ABP010KG] = { .min = 0, .max = 10000 },
+ [ABP016KG] = { .min = 0, .max = 16000 },
+ [ABP025KG] = { .min = 0, .max = 25000 },
+ [ABP040KG] = { .min = 0, .max = 40000 },
+ [ABP060KG] = { .min = 0, .max = 60000 },
+ [ABP100KG] = { .min = 0, .max = 100000 },
+ [ABP160KG] = { .min = 0, .max = 160000 },
+ [ABP250KG] = { .min = 0, .max = 250000 },
+ [ABP400KG] = { .min = 0, .max = 400000 },
+ [ABP600KG] = { .min = 0, .max = 600000 },
+ [ABP001GG] = { .min = 0, .max = 1000000 },
+ [ABP006KD] = { .min = -6000, .max = 6000 },
+ [ABP010KD] = { .min = -10000, .max = 10000 },
+ [ABP016KD] = { .min = -16000, .max = 16000 },
+ [ABP025KD] = { .min = -25000, .max = 25000 },
+ [ABP040KD] = { .min = -40000, .max = 40000 },
+ [ABP060KD] = { .min = -60000, .max = 60000 },
+ [ABP100KD] = { .min = -100000, .max = 100000 },
+ [ABP160KD] = { .min = -160000, .max = 160000 },
+ [ABP250KD] = { .min = -250000, .max = 250000 },
+ [ABP400KD] = { .min = -400000, .max = 400000 },
+ /* psi variants (1 psi ~ 6895 Pa) */
+ [ABP001PG] = { .min = 0, .max = 6985 },
+ [ABP005PG] = { .min = 0, .max = 34474 },
+ [ABP015PG] = { .min = 0, .max = 103421 },
+ [ABP030PG] = { .min = 0, .max = 206843 },
+ [ABP060PG] = { .min = 0, .max = 413686 },
+ [ABP100PG] = { .min = 0, .max = 689476 },
+ [ABP150PG] = { .min = 0, .max = 1034214 },
+ [ABP001PD] = { .min = -6895, .max = 6895 },
+ [ABP005PD] = { .min = -34474, .max = 34474 },
+ [ABP015PD] = { .min = -103421, .max = 103421 },
+ [ABP030PD] = { .min = -206843, .max = 206843 },
+ [ABP060PD] = { .min = -413686, .max = 413686 },
+};
+
+struct abp_state {
+ struct i2c_client *client;
+ struct mutex lock;
+
+ /*
+ * bus-dependent MEASURE_REQUEST length.
+ * If no SMBUS_QUICK support, need to send dummy byte
+ */
+ int mreq_len;
+
+ /* model-dependent values (calculated on probe) */
+ int scale;
+ int offset;
+};
+
+static const struct iio_chan_spec abp060mg_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int abp060mg_get_measurement(struct abp_state *state, int *val)
+{
+ struct i2c_client *client = state->client;
+ __be16 buf[2];
+ u16 pressure;
+ int ret;
+
+ buf[0] = 0;
+ ret = i2c_master_send(client, (u8 *)&buf, state->mreq_len);
+ if (ret < 0)
+ return ret;
+
+ msleep_interruptible(ABP060MG_RESP_TIME_MS);
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ pressure = be16_to_cpu(buf[0]);
+ if (pressure & ABP060MG_ERROR_MASK)
+ return -EIO;
+
+ if (pressure < ABP060MG_MIN_COUNTS || pressure > ABP060MG_MAX_COUNTS)
+ return -EIO;
+
+ *val = pressure;
+
+ return IIO_VAL_INT;
+}
+
+static int abp060mg_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct abp_state *state = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&state->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = abp060mg_get_measurement(state, val);
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = state->offset;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = state->scale;
+ *val2 = ABP060MG_NUM_COUNTS * 1000; /* to kPa */
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static const struct iio_info abp060mg_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = abp060mg_read_raw,
+};
+
+static void abp060mg_init_device(struct iio_dev *indio_dev, unsigned long id)
+{
+ struct abp_state *state = iio_priv(indio_dev);
+ struct abp_config *cfg = &abp_config[id];
+
+ state->scale = cfg->max - cfg->min;
+ state->offset = -ABP060MG_MIN_COUNTS;
+
+ if (cfg->min < 0) /* differential */
+ state->offset -= ABP060MG_NUM_COUNTS >> 1;
+}
+
+static int abp060mg_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct abp_state *state;
+ unsigned long cfg_id = id->driver_data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ state = iio_priv(indio_dev);
+ i2c_set_clientdata(client, state);
+ state->client = client;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
+ state->mreq_len = 1;
+
+ abp060mg_init_device(indio_dev, cfg_id);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &abp060mg_info;
+
+ indio_dev->channels = abp060mg_channels;
+ indio_dev->num_channels = ARRAY_SIZE(abp060mg_channels);
+
+ mutex_init(&state->lock);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id abp060mg_id_table[] = {
+ /* mbar & kPa variants (abp060m [60 mbar] == abp006k [6 kPa]) */
+ /* gage: */
+ { "abp060mg", ABP006KG }, { "abp006kg", ABP006KG },
+ { "abp100mg", ABP010KG }, { "abp010kg", ABP010KG },
+ { "abp160mg", ABP016KG }, { "abp016kg", ABP016KG },
+ { "abp250mg", ABP025KG }, { "abp025kg", ABP025KG },
+ { "abp400mg", ABP040KG }, { "abp040kg", ABP040KG },
+ { "abp600mg", ABP060KG }, { "abp060kg", ABP060KG },
+ { "abp001bg", ABP100KG }, { "abp100kg", ABP100KG },
+ { "abp1_6bg", ABP160KG }, { "abp160kg", ABP160KG },
+ { "abp2_5bg", ABP250KG }, { "abp250kg", ABP250KG },
+ { "abp004bg", ABP400KG }, { "abp400kg", ABP400KG },
+ { "abp006bg", ABP600KG }, { "abp600kg", ABP600KG },
+ { "abp010bg", ABP001GG }, { "abp001gg", ABP001GG },
+ /* differential: */
+ { "abp060md", ABP006KD }, { "abp006kd", ABP006KD },
+ { "abp100md", ABP010KD }, { "abp010kd", ABP010KD },
+ { "abp160md", ABP016KD }, { "abp016kd", ABP016KD },
+ { "abp250md", ABP025KD }, { "abp025kd", ABP025KD },
+ { "abp400md", ABP040KD }, { "abp040kd", ABP040KD },
+ { "abp600md", ABP060KD }, { "abp060kd", ABP060KD },
+ { "abp001bd", ABP100KD }, { "abp100kd", ABP100KD },
+ { "abp1_6bd", ABP160KD }, { "abp160kd", ABP160KD },
+ { "abp2_5bd", ABP250KD }, { "abp250kd", ABP250KD },
+ { "abp004bd", ABP400KD }, { "abp400kd", ABP400KD },
+ /* psi variants */
+ /* gage: */
+ { "abp001pg", ABP001PG },
+ { "abp005pg", ABP005PG },
+ { "abp015pg", ABP015PG },
+ { "abp030pg", ABP030PG },
+ { "abp060pg", ABP060PG },
+ { "abp100pg", ABP100PG },
+ { "abp150pg", ABP150PG },
+ /* differential: */
+ { "abp001pd", ABP001PD },
+ { "abp005pd", ABP005PD },
+ { "abp015pd", ABP015PD },
+ { "abp030pd", ABP030PD },
+ { "abp060pd", ABP060PD },
+ { /* empty */ },
+};
+MODULE_DEVICE_TABLE(i2c, abp060mg_id_table);
+
+static struct i2c_driver abp060mg_driver = {
+ .driver = {
+ .name = "abp060mg",
+ },
+ .probe = abp060mg_probe,
+ .id_table = abp060mg_id_table,
+};
+module_i2c_driver(abp060mg_driver);
+
+MODULE_AUTHOR("Marcin Malagowski <mrc@bourne.st>");
+MODULE_DESCRIPTION("Honeywell ABP pressure sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 6392d7b62841..cc3f84139157 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -82,8 +82,9 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (chan->type) {
case IIO_PRESSURE: /* in 0.25 pascal / LSB */
@@ -91,32 +92,39 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
ret = mpl3115_request(data);
if (ret < 0) {
mutex_unlock(&data->lock);
- return ret;
+ break;
}
ret = i2c_smbus_read_i2c_block_data(data->client,
MPL3115_OUT_PRESS, 3, (u8 *) &tmp);
mutex_unlock(&data->lock);
if (ret < 0)
- return ret;
+ break;
*val = be32_to_cpu(tmp) >> 12;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
case IIO_TEMP: /* in 0.0625 celsius / LSB */
mutex_lock(&data->lock);
ret = mpl3115_request(data);
if (ret < 0) {
mutex_unlock(&data->lock);
- return ret;
+ break;
}
ret = i2c_smbus_read_i2c_block_data(data->client,
MPL3115_OUT_TEMP, 2, (u8 *) &tmp);
mutex_unlock(&data->lock);
if (ret < 0)
- return ret;
+ break;
*val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_PRESSURE:
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index a74ed1f0c880..6bd53e702667 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -392,17 +392,14 @@ static int ms5611_init(struct iio_dev *indio_dev)
/* Enable attached regulator if any. */
st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
- if (!IS_ERR(st->vdd)) {
- ret = regulator_enable(st->vdd);
- if (ret) {
- dev_err(indio_dev->dev.parent,
- "failed to enable Vdd supply: %d\n", ret);
- return ret;
- }
- } else {
- ret = PTR_ERR(st->vdd);
- if (ret != -ENODEV)
- return ret;
+ if (IS_ERR(st->vdd))
+ return PTR_ERR(st->vdd);
+
+ ret = regulator_enable(st->vdd);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "failed to enable Vdd supply: %d\n", ret);
+ return ret;
}
ret = ms5611_reset(indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 55df9a75eb3a..e19e0787864c 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -112,115 +112,24 @@
#define ST_PRESS_1_OUT_XL_ADDR 0x28
#define ST_TEMP_1_OUT_L_ADDR 0x2b
-/*
- * CUSTOM VALUES FOR LPS331AP SENSOR
- * See LPS331AP datasheet:
- * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
- */
-#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
-#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
-#define ST_PRESS_LPS331AP_ODR_MASK 0x70
-#define ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL 0x05
-#define ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL 0x06
-#define ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL 0x07
-#define ST_PRESS_LPS331AP_PW_ADDR 0x20
-#define ST_PRESS_LPS331AP_PW_MASK 0x80
-#define ST_PRESS_LPS331AP_FS_ADDR 0x23
-#define ST_PRESS_LPS331AP_FS_MASK 0x30
-#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
-#define ST_PRESS_LPS331AP_BDU_MASK 0x04
-#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
-#define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
-#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
-#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
-#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
-#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
-#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
-#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
-
-/*
- * CUSTOM VALUES FOR THE OBSOLETE LPS001WP SENSOR
- */
-
/* LPS001WP pressure resolution */
#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
/* LPS001WP temperature resolution */
#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
-
-#define ST_PRESS_LPS001WP_WAI_EXP 0xba
-#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
-#define ST_PRESS_LPS001WP_ODR_MASK 0x30
-#define ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL 0x02
-#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
-#define ST_PRESS_LPS001WP_PW_ADDR 0x20
-#define ST_PRESS_LPS001WP_PW_MASK 0x40
+/* LPS001WP pressure gain */
#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
(100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
-#define ST_PRESS_LPS001WP_BDU_ADDR 0x20
-#define ST_PRESS_LPS001WP_BDU_MASK 0x04
-#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
+/* LPS001WP pressure and temp L addresses */
#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
-/*
- * CUSTOM VALUES FOR LPS25H SENSOR
- * See LPS25H datasheet:
- * http://www2.st.com/resource/en/datasheet/lps25h.pdf
- */
-#define ST_PRESS_LPS25H_WAI_EXP 0xbd
-#define ST_PRESS_LPS25H_ODR_ADDR 0x20
-#define ST_PRESS_LPS25H_ODR_MASK 0x70
-#define ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL 0x02
-#define ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL 0x03
-#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
-#define ST_PRESS_LPS25H_PW_ADDR 0x20
-#define ST_PRESS_LPS25H_PW_MASK 0x80
-#define ST_PRESS_LPS25H_BDU_ADDR 0x20
-#define ST_PRESS_LPS25H_BDU_MASK 0x04
-#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
-#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
-#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
-#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
-#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
-#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
-#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
-#define ST_PRESS_LPS25H_MULTIREAD_BIT true
+/* LPS25H pressure and temp L addresses */
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
-/*
- * CUSTOM VALUES FOR LPS22HB SENSOR
- * See LPS22HB datasheet:
- * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
- */
-
/* LPS22HB temperature sensitivity */
#define ST_PRESS_LPS22HB_LSB_PER_CELSIUS 100UL
-#define ST_PRESS_LPS22HB_WAI_EXP 0xb1
-#define ST_PRESS_LPS22HB_ODR_ADDR 0x10
-#define ST_PRESS_LPS22HB_ODR_MASK 0x70
-#define ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL 0x01
-#define ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL 0x02
-#define ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL 0x03
-#define ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL 0x04
-#define ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL 0x05
-#define ST_PRESS_LPS22HB_PW_ADDR 0x10
-#define ST_PRESS_LPS22HB_PW_MASK 0x70
-#define ST_PRESS_LPS22HB_BDU_ADDR 0x10
-#define ST_PRESS_LPS22HB_BDU_MASK 0x02
-#define ST_PRESS_LPS22HB_DRDY_IRQ_ADDR 0x12
-#define ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK 0x04
-#define ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK 0x08
-#define ST_PRESS_LPS22HB_IHL_IRQ_ADDR 0x12
-#define ST_PRESS_LPS22HB_IHL_IRQ_MASK 0x80
-#define ST_PRESS_LPS22HB_OD_IRQ_ADDR 0x12
-#define ST_PRESS_LPS22HB_OD_IRQ_MASK 0x40
-#define ST_PRESS_LPS22HB_MULTIREAD_BIT true
-
static const struct iio_chan_spec st_press_1_channels[] = {
{
.type = IIO_PRESSURE,
@@ -321,7 +230,12 @@ static const struct iio_chan_spec st_press_lps22hb_channels[] = {
static const struct st_sensor_settings st_press_sensors_settings[] = {
{
- .wai = ST_PRESS_LPS331AP_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS331AP SENSOR
+ * See LPS331AP datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
+ */
+ .wai = 0xbb,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS331AP_PRESS_DEV_NAME,
@@ -329,24 +243,24 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_1_channels,
.num_ch = ARRAY_SIZE(st_press_1_channels),
.odr = {
- .addr = ST_PRESS_LPS331AP_ODR_ADDR,
- .mask = ST_PRESS_LPS331AP_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x70,
.odr_avl = {
- { 1, ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL, },
- { 25, ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 7, .value = 0x05 },
+ { .hz = 13, .value = 0x06 },
+ { .hz = 25, .value = 0x07 },
},
},
.pw = {
- .addr = ST_PRESS_LPS331AP_PW_ADDR,
- .mask = ST_PRESS_LPS331AP_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x80,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
- .addr = ST_PRESS_LPS331AP_FS_ADDR,
- .mask = ST_PRESS_LPS331AP_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
/*
* Pressure and temperature sensitivity values
@@ -360,24 +274,27 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS331AP_BDU_ADDR,
- .mask = ST_PRESS_LPS331AP_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x04,
},
.drdy_irq = {
- .addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
- .mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
- .addr_od = ST_PRESS_LPS331AP_OD_IRQ_ADDR,
- .mask_od = ST_PRESS_LPS331AP_OD_IRQ_MASK,
+ .addr = 0x22,
+ .mask_int1 = 0x04,
+ .mask_int2 = 0x20,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_PRESS_LPS001WP_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS001WP SENSOR
+ */
+ .wai = 0xba,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS001WP_PRESS_DEV_NAME,
@@ -385,17 +302,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_lps001wp_channels,
.num_ch = ARRAY_SIZE(st_press_lps001wp_channels),
.odr = {
- .addr = ST_PRESS_LPS001WP_ODR_ADDR,
- .mask = ST_PRESS_LPS001WP_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x30,
.odr_avl = {
- { 1, ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 7, .value = 0x02 },
+ { .hz = 13, .value = 0x03 },
},
},
.pw = {
- .addr = ST_PRESS_LPS001WP_PW_ADDR,
- .mask = ST_PRESS_LPS001WP_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x40,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -413,17 +330,22 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS001WP_BDU_ADDR,
- .mask = ST_PRESS_LPS001WP_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x04,
},
.drdy_irq = {
.addr = 0,
},
- .multi_read_bit = ST_PRESS_LPS001WP_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_PRESS_LPS25H_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS25H SENSOR
+ * See LPS25H datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps25h.pdf
+ */
+ .wai = 0xbd,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS25H_PRESS_DEV_NAME,
@@ -431,18 +353,18 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_1_channels,
.num_ch = ARRAY_SIZE(st_press_1_channels),
.odr = {
- .addr = ST_PRESS_LPS25H_ODR_ADDR,
- .mask = ST_PRESS_LPS25H_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0x70,
.odr_avl = {
- { 1, ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL, },
- { 7, ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL, },
- { 13, ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL, },
- { 25, ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 7, .value = 0x02 },
+ { .hz = 13, .value = 0x03 },
+ { .hz = 25, .value = 0x04 },
},
},
.pw = {
- .addr = ST_PRESS_LPS25H_PW_ADDR,
- .mask = ST_PRESS_LPS25H_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x80,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -460,24 +382,29 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS25H_BDU_ADDR,
- .mask = ST_PRESS_LPS25H_BDU_MASK,
+ .addr = 0x20,
+ .mask = 0x04,
},
.drdy_irq = {
- .addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
- .mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
- .addr_od = ST_PRESS_LPS25H_OD_IRQ_ADDR,
- .mask_od = ST_PRESS_LPS25H_OD_IRQ_MASK,
+ .addr = 0x23,
+ .mask_int1 = 0x01,
+ .mask_int2 = 0x10,
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_PRESS_LPS22HB_WAI_EXP,
+ /*
+ * CUSTOM VALUES FOR LPS22HB SENSOR
+ * See LPS22HB datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
+ */
+ .wai = 0xb1,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS22HB_PRESS_DEV_NAME,
@@ -485,19 +412,19 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
.num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
.odr = {
- .addr = ST_PRESS_LPS22HB_ODR_ADDR,
- .mask = ST_PRESS_LPS22HB_ODR_MASK,
+ .addr = 0x10,
+ .mask = 0x70,
.odr_avl = {
- { 1, ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL, },
- { 10, ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL, },
- { 25, ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL, },
- { 50, ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL, },
- { 75, ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL, },
+ { .hz = 1, .value = 0x01 },
+ { .hz = 10, .value = 0x02 },
+ { .hz = 25, .value = 0x03 },
+ { .hz = 50, .value = 0x04 },
+ { .hz = 75, .value = 0x05 },
},
},
.pw = {
- .addr = ST_PRESS_LPS22HB_PW_ADDR,
- .mask = ST_PRESS_LPS22HB_PW_MASK,
+ .addr = 0x10,
+ .mask = 0x70,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
@@ -514,20 +441,20 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
},
.bdu = {
- .addr = ST_PRESS_LPS22HB_BDU_ADDR,
- .mask = ST_PRESS_LPS22HB_BDU_MASK,
+ .addr = 0x10,
+ .mask = 0x02,
},
.drdy_irq = {
- .addr = ST_PRESS_LPS22HB_DRDY_IRQ_ADDR,
- .mask_int1 = ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK,
- .mask_int2 = ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK,
- .addr_ihl = ST_PRESS_LPS22HB_IHL_IRQ_ADDR,
- .mask_ihl = ST_PRESS_LPS22HB_IHL_IRQ_MASK,
- .addr_od = ST_PRESS_LPS22HB_OD_IRQ_ADDR,
- .mask_od = ST_PRESS_LPS22HB_OD_IRQ_MASK,
+ .addr = 0x12,
+ .mask_int1 = 0x04,
+ .mask_int2 = 0x08,
+ .addr_ihl = 0x12,
+ .mask_ihl = 0x80,
+ .addr_od = 0x12,
+ .mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_PRESS_LPS22HB_MULTIREAD_BIT,
+ .multi_read_bit = true,
},
};
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 19d2eb46fda6..c720c3ac0b9b 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -147,12 +147,8 @@ struct zpa2326_private {
#define zpa2326_warn(_idev, _format, _arg...) \
dev_warn(_idev->dev.parent, _format, ##_arg)
-#ifdef DEBUG
#define zpa2326_dbg(_idev, _format, _arg...) \
dev_dbg(_idev->dev.parent, _format, ##_arg)
-#else
-#define zpa2326_dbg(_idev, _format, _arg...)
-#endif
bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg)
{
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 3141c3c161bb..1fa9eefa0982 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -301,8 +301,6 @@ static int lidar_probe(struct i2c_client *client,
if (ret)
goto error_unreg_buffer;
pm_runtime_enable(&client->dev);
-
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_idle(&client->dev);
return 0;
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 066161a4bccd..f962f31a5eb2 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -136,6 +136,8 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
*val = be32_to_cpu(buf32);
break;
+ default:
+ ret = -EINVAL;
}
if (ret)
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index b136d3acc5bd..0f58f46dbad7 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
struct rdma_dev_addr *addr;
struct completion comp;
+ int status;
};
static void resolve_cb(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context)
{
- memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
- rdma_dev_addr));
+ if (!status)
+ memcpy(((struct resolve_cb_context *)context)->addr,
+ addr, sizeof(struct rdma_dev_addr));
+ ((struct resolve_cb_context *)context)->status = status;
complete(&((struct resolve_cb_context *)context)->comp);
}
@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
wait_for_completion(&ctx.comp);
+ ret = ctx.status;
+ if (ret)
+ return ret;
+
memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
if (!dev)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0d4b114b4011..cf1edfa1cbac 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -128,6 +128,8 @@ static struct ib_cm {
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
+ /* Sync on cm change port state */
+ spinlock_t state_lock;
} cm;
/* Counter indexes ordered by attribute ID */
@@ -209,6 +211,8 @@ struct cm_port {
struct ib_mad_agent *mad_agent;
struct kobject port_obj;
u8 port_num;
+ struct list_head cm_priv_prim_list;
+ struct list_head cm_priv_altr_list;
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
};
@@ -289,6 +293,12 @@ struct cm_id_private {
u8 service_timeout;
u8 target_ack_delay;
+ struct list_head prim_list;
+ struct list_head altr_list;
+ /* Indicates that the send port mad is registered and av is set */
+ int prim_send_port_not_ready;
+ int altr_send_port_not_ready;
+
struct list_head work_list;
atomic_t work_count;
};
@@ -307,20 +317,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
struct ib_ah *ah;
+ struct cm_av *av;
+ unsigned long flags, flags2;
+ int ret = 0;
+ /* don't let the port to be released till the agent is down */
+ spin_lock_irqsave(&cm.state_lock, flags2);
+ spin_lock_irqsave(&cm.lock, flags);
+ if (!cm_id_priv->prim_send_port_not_ready)
+ av = &cm_id_priv->av;
+ else if (!cm_id_priv->altr_send_port_not_ready &&
+ (cm_id_priv->alt_av.port))
+ av = &cm_id_priv->alt_av;
+ else {
+ pr_info("%s: not valid CM id\n", __func__);
+ ret = -ENODEV;
+ spin_unlock_irqrestore(&cm.lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&cm.lock, flags);
+ /* Make sure the port haven't released the mad yet */
mad_agent = cm_id_priv->av.port->mad_agent;
- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
- if (IS_ERR(ah))
- return PTR_ERR(ah);
+ if (!mad_agent) {
+ pr_info("%s: not a valid MAD agent\n", __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
+ if (IS_ERR(ah)) {
+ ret = PTR_ERR(ah);
+ goto out;
+ }
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
- cm_id_priv->av.pkey_index,
+ av->pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
- return PTR_ERR(m);
+ ret = PTR_ERR(m);
+ goto out;
}
/* Timeout set by caller if response is expected. */
@@ -330,7 +367,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
atomic_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
- return 0;
+
+out:
+ spin_unlock_irqrestore(&cm.state_lock, flags2);
+ return ret;
}
static int cm_alloc_response_msg(struct cm_port *port,
@@ -400,7 +440,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
+ struct cm_id_private *cm_id_priv)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
@@ -435,7 +476,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
- return 0;
+ spin_lock_irqsave(&cm.lock, flags);
+ if (&cm_id_priv->av == av)
+ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
+ else if (&cm_id_priv->alt_av == av)
+ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
+ else
+ ret = -EINVAL;
+
+ spin_unlock_irqrestore(&cm.lock, flags);
+
+ return ret;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@@ -725,6 +776,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
+ INIT_LIST_HEAD(&cm_id_priv->prim_list);
+ INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
@@ -940,6 +993,15 @@ retest:
break;
}
+ spin_lock_irq(&cm.lock);
+ if (!list_empty(&cm_id_priv->altr_list) &&
+ (!cm_id_priv->altr_send_port_not_ready))
+ list_del(&cm_id_priv->altr_list);
+ if (!list_empty(&cm_id_priv->prim_list) &&
+ (!cm_id_priv->prim_send_port_not_ready))
+ list_del(&cm_id_priv->prim_list);
+ spin_unlock_irq(&cm.lock);
+
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
@@ -1240,12 +1302,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
+ cm_id_priv);
if (ret)
goto error1;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path,
- &cm_id_priv->alt_av);
+ &cm_id_priv->alt_av, cm_id_priv);
if (ret)
goto error1;
}
@@ -1710,7 +1773,8 @@ static int cm_req_handler(struct cm_work *work)
dev_put(gid_attr.ndev);
}
work->path[0].gid_type = gid_attr.gid_type;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
+ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ cm_id_priv);
}
if (ret) {
int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -1729,7 +1793,8 @@ static int cm_req_handler(struct cm_work *work)
goto rejected;
}
if (req_msg->alt_local_lid) {
- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
+ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
+ cm_id_priv);
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
@@ -2797,7 +2862,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
+ cm_id_priv);
if (ret)
goto out;
cm_id_priv->alt_av.timeout =
@@ -2909,7 +2975,8 @@ static int cm_lap_handler(struct cm_work *work)
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
+ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
+ cm_id_priv);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3101,7 +3168,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
if (ret)
goto out;
@@ -3538,7 +3605,9 @@ out:
static int cm_migrate(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
+ struct cm_av tmp_av;
unsigned long flags;
+ int tmp_send_port_not_ready;
int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3547,7 +3616,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
cm_id->lap_state == IB_CM_LAP_IDLE)) {
cm_id->lap_state = IB_CM_LAP_IDLE;
+ /* Swap address vector */
+ tmp_av = cm_id_priv->av;
cm_id_priv->av = cm_id_priv->alt_av;
+ cm_id_priv->alt_av = tmp_av;
+ /* Swap port send ready state */
+ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
+ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
+ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
} else
ret = -EINVAL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -3958,6 +4034,9 @@ static void cm_add_one(struct ib_device *ib_device)
port->cm_dev = cm_dev;
port->port_num = i;
+ INIT_LIST_HEAD(&port->cm_priv_prim_list);
+ INIT_LIST_HEAD(&port->cm_priv_altr_list);
+
ret = cm_create_port_fs(port);
if (ret)
goto error1;
@@ -4015,6 +4094,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
{
struct cm_device *cm_dev = client_data;
struct cm_port *port;
+ struct cm_id_private *cm_id_priv;
+ struct ib_mad_agent *cur_mad_agent;
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_CM_SUP
};
@@ -4038,15 +4119,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
+ /* Mark all the cm_id's as not valid */
+ spin_lock_irq(&cm.lock);
+ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
+ cm_id_priv->altr_send_port_not_ready = 1;
+ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
+ cm_id_priv->prim_send_port_not_ready = 1;
+ spin_unlock_irq(&cm.lock);
/*
* We flush the queue here after the going_down set, this
* verify that no new works will be queued in the recv handler,
* after that we can call the unregister_mad_agent
*/
flush_workqueue(cm.wq);
- ib_unregister_mad_agent(port->mad_agent);
+ spin_lock_irq(&cm.state_lock);
+ cur_mad_agent = port->mad_agent;
+ port->mad_agent = NULL;
+ spin_unlock_irq(&cm.state_lock);
+ ib_unregister_mad_agent(cur_mad_agent);
cm_remove_port_fs(port);
}
+
device_unregister(cm_dev->device);
kfree(cm_dev);
}
@@ -4059,6 +4152,7 @@ static int __init ib_cm_init(void)
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
+ spin_lock_init(&cm.state_lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 8c30e3dedebe..e7dcfac877ca 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -159,7 +159,7 @@ static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
static struct workqueue_struct *cma_wq;
-static int cma_pernet_id;
+static unsigned int cma_pernet_id;
struct cma_pernet {
struct idr tcp_ps;
@@ -1137,47 +1137,47 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
}
}
-static void cma_save_ip4_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip4_info(struct sockaddr_in *src_addr,
+ struct sockaddr_in *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in *ip4;
-
if (src_addr) {
- ip4 = (struct sockaddr_in *)src_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
- ip4->sin_port = local_port;
+ *src_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
+ .sin_port = local_port,
+ };
}
if (dst_addr) {
- ip4 = (struct sockaddr_in *)dst_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
- ip4->sin_port = hdr->port;
+ *dst_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->src_addr.ip4.addr,
+ .sin_port = hdr->port,
+ };
}
}
-static void cma_save_ip6_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in6 *ip6;
-
if (src_addr) {
- ip6 = (struct sockaddr_in6 *)src_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->dst_addr.ip6;
- ip6->sin6_port = local_port;
+ *src_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->dst_addr.ip6,
+ .sin6_port = local_port,
+ };
}
if (dst_addr) {
- ip6 = (struct sockaddr_in6 *)dst_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->src_addr.ip6;
- ip6->sin6_port = hdr->port;
+ *dst_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->src_addr.ip6,
+ .sin6_port = hdr->port,
+ };
}
}
@@ -1202,10 +1202,12 @@ static int cma_save_ip_info(struct sockaddr *src_addr,
switch (cma_get_ip_ver(hdr)) {
case 4:
- cma_save_ip4_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip4_info((struct sockaddr_in *)src_addr,
+ (struct sockaddr_in *)dst_addr, hdr, port);
break;
case 6:
- cma_save_ip6_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
+ (struct sockaddr_in6 *)dst_addr, hdr, port);
break;
default:
return -EAFNOSUPPORT;
@@ -2479,6 +2481,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
+static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
+ unsigned long supported_gids,
+ enum ib_gid_type default_gid)
+{
+ if ((network_type == RDMA_NETWORK_IPV4 ||
+ network_type == RDMA_NETWORK_IPV6) &&
+ test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
+ return IB_GID_TYPE_ROCE_UDP_ENCAP;
+
+ return default_gid;
+}
+
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2504,6 +2518,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->num_paths = 1;
if (addr->dev_addr.bound_dev_if) {
+ unsigned long supported_gids;
+
ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
if (!ndev) {
ret = -ENODEV;
@@ -2527,7 +2543,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->path_rec->net = &init_net;
route->path_rec->ifindex = ndev->ifindex;
- route->path_rec->gid_type = id_priv->gid_type;
+ supported_gids = roce_gid_type_mask_support(id_priv->id.device,
+ id_priv->id.port_num);
+ route->path_rec->gid_type =
+ cma_route_gid_type(addr->dev_addr.network,
+ supported_gids,
+ id_priv->gid_type);
}
if (!ndev) {
ret = -ENODEV;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 1acc95b3aaa3..d29372624f3a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -124,14 +124,7 @@ void ib_cache_release_one(struct ib_device *device);
static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
struct net_device *upper)
{
- struct net_device *_upper = NULL;
- struct list_head *iter;
-
- netdev_for_each_all_upper_dev_rcu(dev, _upper, iter)
- if (_upper == upper)
- break;
-
- return _upper == upper;
+ return netdev_has_upper_dev_all_rcu(dev, upper);
}
int addr_init(void);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index c86ddcea7675..0621f4455732 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -434,6 +434,26 @@ static void callback_for_addr_gid_device_scan(struct ib_device *device,
&parsed->gid_attr);
}
+struct upper_list {
+ struct list_head list;
+ struct net_device *upper;
+};
+
+static int netdev_upper_walk(struct net_device *upper, void *data)
+{
+ struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ struct list_head *upper_list = data;
+
+ if (!entry)
+ return 0;
+
+ list_add_tail(&entry->list, upper_list);
+ dev_hold(upper);
+ entry->upper = upper;
+
+ return 0;
+}
+
static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
void *cookie,
void (*handle_netdev)(struct ib_device *ib_dev,
@@ -441,28 +461,12 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
struct net_device *ndev))
{
struct net_device *ndev = (struct net_device *)cookie;
- struct upper_list {
- struct list_head list;
- struct net_device *upper;
- };
- struct net_device *upper;
- struct list_head *iter;
struct upper_list *upper_iter;
struct upper_list *upper_temp;
LIST_HEAD(upper_list);
rcu_read_lock();
- netdev_for_each_all_upper_dev_rcu(ndev, upper, iter) {
- struct upper_list *entry = kmalloc(sizeof(*entry),
- GFP_ATOMIC);
-
- if (!entry)
- continue;
-
- list_add_tail(&entry->list, &upper_list);
- dev_hold(upper);
- entry->upper = upper;
- }
+ netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
rcu_read_unlock();
handle_netdev(ib_dev, port, ndev);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 0120e7ff449f..1e62a5f0cb28 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base = addr & PAGE_MASK;
- if (npages == 0) {
+ if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 1f0fe3217f23..6b079a31dced 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -578,7 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
- flags, local_page_list, NULL);
+ flags, local_page_list, NULL, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 257d0799b526..813593550c4b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -263,12 +263,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
container_of(uobj, struct ib_uqp_object, uevent.uobject);
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
- if (qp != qp->real_qp) {
- ib_close_qp(qp);
- } else {
+ if (qp == qp->real_qp)
ib_uverbs_detach_umcast(qp, uqp);
- ib_destroy_qp(qp);
- }
+ ib_destroy_qp(qp);
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 867b8cf82be8..19c6477af19f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -666,18 +666,6 @@ skip_cqe:
return ret;
}
-static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
-{
- struct c4iw_mr *mhp;
- unsigned long flags;
-
- spin_lock_irqsave(&rhp->lock, flags);
- mhp = get_mhp(rhp, rkey >> 8);
- if (mhp)
- mhp->attr.state = 0;
- spin_unlock_irqrestore(&rhp->lock, flags);
-}
-
/*
* Get one cq entry from c4iw and map it to openib.
*
@@ -733,7 +721,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
+ c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
}
} else {
switch (CQE_OPCODE(&cqe)) {
@@ -762,7 +750,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
/* Invalidate the MR if the fastreg failed */
if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
- invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe));
+ c4iw_invalidate_mr(qhp->rhp,
+ CQE_WRID_FR_STAG(&cqe));
break;
default:
printk(KERN_ERR MOD "Unexpected opcode %d "
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index cc7cf18411b8..516b0ae6dc3f 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1479,6 +1479,7 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
static struct cxgb4_uld_info c4iw_uld_info = {
.name = DRV_NAME,
.nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
.rxq_size = 511,
.ciq = true,
.lro = false,
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7e7f79e55006..4788e1a46fde 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -999,6 +999,6 @@ extern int db_coalescing_threshold;
extern int use_dsgl;
void c4iw_drain_rq(struct ib_qp *qp);
void c4iw_drain_sq(struct ib_qp *qp);
-
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 80e27749420a..410408f886c1 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -770,3 +770,15 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
kfree(mhp);
return 0;
}
+
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
+{
+ struct c4iw_mr *mhp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rhp->lock, flags);
+ mhp = get_mhp(rhp, rkey >> 8);
+ if (mhp)
+ mhp->attr.state = 0;
+ spin_unlock_irqrestore(&rhp->lock, flags);
+}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index f57deba6717c..b7ac97b27c88 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -706,12 +706,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{
- struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8);
-
- mhp->attr.state = 0;
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->inv.r2 = 0;
*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
@@ -797,11 +793,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -EINVAL;
}
num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -ENOMEM;
}
while (wr) {
@@ -840,10 +838,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ_WITH_INV:
fw_opcode = FW_RI_RDMA_READ_WR;
swsqe->opcode = FW_RI_READ_REQ;
- if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+ if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
+ c4iw_invalidate_mr(qhp->rhp,
+ wr->sg_list[0].lkey);
fw_flags = FW_RI_RDMA_READ_INVALIDATE;
- else
+ } else {
fw_flags = 0;
+ }
err = build_rdma_read(wqe, wr, &len16);
if (err)
break;
@@ -876,7 +877,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
fw_opcode = FW_RI_INV_LSTAG_WR;
swsqe->opcode = FW_RI_LOCAL_INV;
- err = build_inv_stag(qhp->rhp, wqe, wr, &len16);
+ err = build_inv_stag(wqe, wr, &len16);
+ c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
break;
default:
PDBG("%s post of type=%d TBD!\n", __func__,
@@ -934,11 +936,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -EINVAL;
}
num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -ENOMEM;
}
while (wr) {
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 4962b6ef1f34..7a3d906b3671 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -776,75 +776,3 @@ void hfi1_put_proc_affinity(int cpu)
}
mutex_unlock(&affinity->lock);
}
-
-int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
- size_t count)
-{
- struct hfi1_affinity_node *entry;
- cpumask_var_t mask;
- int ret, i;
-
- mutex_lock(&node_affinity.lock);
- entry = node_affinity_lookup(dd->node);
-
- if (!entry) {
- ret = -EINVAL;
- goto unlock;
- }
-
- ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
- if (!ret) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = cpulist_parse(buf, mask);
- if (ret)
- goto out;
-
- if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
- dd_dev_warn(dd, "Invalid CPU mask\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* reset the SDMA interrupt affinity details */
- init_cpu_mask_set(&entry->def_intr);
- cpumask_copy(&entry->def_intr.mask, mask);
-
- /* Reassign the affinity for each SDMA interrupt. */
- for (i = 0; i < dd->num_msix_entries; i++) {
- struct hfi1_msix_entry *msix;
-
- msix = &dd->msix_entries[i];
- if (msix->type != IRQ_SDMA)
- continue;
-
- ret = get_irq_affinity(dd, msix);
-
- if (ret)
- break;
- }
-out:
- free_cpumask_var(mask);
-unlock:
- mutex_unlock(&node_affinity.lock);
- return ret ? ret : strnlen(buf, PAGE_SIZE);
-}
-
-int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
-{
- struct hfi1_affinity_node *entry;
-
- mutex_lock(&node_affinity.lock);
- entry = node_affinity_lookup(dd->node);
-
- if (!entry) {
- mutex_unlock(&node_affinity.lock);
- return -EINVAL;
- }
-
- cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
- mutex_unlock(&node_affinity.lock);
- return strnlen(buf, PAGE_SIZE);
-}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index c9453b3d47b4..e78c7aa094e0 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -94,10 +94,6 @@ int hfi1_get_proc_affinity(int);
/* Release a CPU used by a user process. */
void hfi1_put_proc_affinity(int);
-int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf);
-int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
- size_t count);
-
struct hfi1_affinity_node {
int node;
struct cpu_mask_set def_intr;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 37d8af50cc13..ef72bc2a9e1d 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6301,19 +6301,8 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
/* leave shared count at zero for both global and VL15 */
write_global_credit(dd, vau, vl15buf, 0);
- /* We may need some credits for another VL when sending packets
- * with the snoop interface. Dividing it down the middle for VL15
- * and VL0 should suffice.
- */
- if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
- write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
- << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
- write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
- << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
- } else {
- write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
- << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
- }
+ write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
+ << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
}
/*
@@ -9918,9 +9907,6 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
u32 mask = ~((1U << ppd->lmc) - 1);
u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
- if (dd->hfi1_snoop.mode_flag)
- dd_dev_info(dd, "Set lid/lmc while snooping");
-
c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
@@ -12115,7 +12101,7 @@ static void update_synth_timer(unsigned long opaque)
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
}
-#define C_MAX_NAME 13 /* 12 chars + one for /0 */
+#define C_MAX_NAME 16 /* 15 chars + one for /0 */
static int init_cntrs(struct hfi1_devdata *dd)
{
int i, rcv_ctxts, j;
@@ -14466,7 +14452,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
* Any error printing is already done by the init code.
* On return, we have the chip mapped.
*/
- ret = hfi1_pcie_ddinit(dd, pdev, ent);
+ ret = hfi1_pcie_ddinit(dd, pdev);
if (ret < 0)
goto bail_free;
@@ -14694,6 +14680,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_free_cntrs;
+ init_completion(&dd->user_comp);
+
+ /* The user refcount starts with one to inidicate an active device */
+ atomic_set(&dd->user_refcount, 1);
+
goto bail;
bail_free_rcverr:
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 92345259a8f4..043fd21dc5f3 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -320,6 +320,9 @@
/* DC_DC8051_CFG_MODE.GENERAL bits */
#define DISABLE_SELF_GUID_CHECK 0x2
+/* Bad L2 frame error code */
+#define BAD_L2_ERR 0x6
+
/*
* Eager buffer minimum and maximum sizes supported by the hardware.
* All power-of-two sizes in between are supported as well.
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index d4261163bd25..4fbaee68012b 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -599,7 +599,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
dd->rhf_offset;
struct rvt_qp *qp;
struct ib_header *hdr;
- struct ib_other_headers *ohdr;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
@@ -615,18 +614,21 @@ static void __prescan_rxq(struct hfi1_packet *packet)
if (etype != RHF_RCV_TYPE_IB)
goto next;
- hdr = hfi1_get_msgheader(dd, rhf_addr);
+ packet->hdr = hfi1_get_msgheader(dd, rhf_addr);
+ hdr = packet->hdr;
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
+ if (lnh == HFI1_LRH_BTH) {
+ packet->ohdr = &hdr->u.oth;
+ } else if (lnh == HFI1_LRH_GRH) {
+ packet->ohdr = &hdr->u.l.oth;
+ packet->rcv_flags |= HFI1_HAS_GRH;
+ } else {
goto next; /* just in case */
+ }
- bth1 = be32_to_cpu(ohdr->bth[1]);
+ bth1 = be32_to_cpu(packet->ohdr->bth[1]);
is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
if (!is_ecn)
@@ -646,7 +648,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
/* turn off BECN, FECN */
bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK);
- ohdr->bth[1] = cpu_to_be32(bth1);
+ packet->ohdr->bth[1] = cpu_to_be32(bth1);
next:
update_ps_mdata(&mdata, rcd);
}
@@ -1359,12 +1361,25 @@ int process_receive_ib(struct hfi1_packet *packet)
int process_receive_bypass(struct hfi1_packet *packet)
{
+ struct hfi1_devdata *dd = packet->rcd->dd;
+
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
- dd_dev_err(packet->rcd->dd,
+ dd_dev_err(dd,
"Bypass packets are not supported in normal operation. Dropping\n");
- incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors);
+ incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
+ if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) {
+ u64 *flits = packet->ebuf;
+
+ if (flits && !(packet->rhf & RHF_LEN_ERR)) {
+ dd->err_info_rcvport.packet_flit1 = flits[0];
+ dd->err_info_rcvport.packet_flit2 =
+ packet->tlen > sizeof(flits[0]) ? flits[1] : 0;
+ }
+ dd->err_info_rcvport.status_and_code |=
+ (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
+ }
return RHF_RCV_CONTINUE;
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 677efa0e8cd6..bd786b7bd30b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -172,6 +172,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
struct hfi1_devdata,
user_cdev);
+ if (!atomic_inc_not_zero(&dd->user_refcount))
+ return -ENXIO;
+
/* Just take a ref now. Not all opens result in a context assign */
kobject_get(&dd->kobj);
@@ -183,11 +186,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
atomic_inc(&fd->mm->mm_count);
- }
+ fp->private_data = fd;
+ } else {
+ fp->private_data = NULL;
+
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
- fp->private_data = fd;
+ return -ENOMEM;
+ }
- return fd ? 0 : -ENOMEM;
+ return 0;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -798,6 +807,10 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
done:
mmdrop(fdata->mm);
kobject_put(&dd->kobj);
+
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+
kfree(fdata);
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 4163596ce4c9..751a0fb29fa5 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -367,26 +367,6 @@ struct hfi1_packet {
u8 etype;
};
-/*
- * Private data for snoop/capture support.
- */
-struct hfi1_snoop_data {
- int mode_flag;
- struct cdev cdev;
- struct device *class_dev;
- /* protect snoop data */
- spinlock_t snoop_lock;
- struct list_head queue;
- wait_queue_head_t waitq;
- void *filter_value;
- int (*filter_callback)(void *hdr, void *data, void *value);
- u64 dcc_cfg; /* saved value of DCC Cfg register */
-};
-
-/* snoop mode_flag values */
-#define HFI1_PORT_SNOOP_MODE 1U
-#define HFI1_PORT_CAPTURE_MODE 2U
-
struct rvt_sge_state;
/*
@@ -625,8 +605,6 @@ struct hfi1_pportdata {
struct mutex hls_lock;
u32 host_link_state;
- spinlock_t sdma_alllock ____cacheline_aligned_in_smp;
-
u32 lstate; /* logical link state */
/* these are the "32 bit" regs */
@@ -1094,8 +1072,6 @@ struct hfi1_devdata {
char *portcntrnames;
size_t portcntrnameslen;
- struct hfi1_snoop_data hfi1_snoop;
-
struct err_info_rcvport err_info_rcvport;
struct err_info_constraint err_info_rcv_constraint;
struct err_info_constraint err_info_xmit_constraint;
@@ -1133,8 +1109,8 @@ struct hfi1_devdata {
u64 lcb_err_en;
/*
- * Handlers for outgoing data so that snoop/capture does not
- * have to have its hooks in the send path
+ * Capability to have different send engines simply by changing a
+ * pointer value.
*/
send_routine process_pio_send ____cacheline_aligned_in_smp;
send_routine process_dma_send;
@@ -1184,6 +1160,10 @@ struct hfi1_devdata {
spinlock_t aspm_lock;
/* Number of verbs contexts which have disabled ASPM */
atomic_t aspm_disabled_cnt;
+ /* Keeps track of user space clients */
+ atomic_t user_refcount;
+ /* Used to wait for outstanding user space clients before dev removal */
+ struct completion user_comp;
bool eprom_available; /* true if EPROM is available for this device */
bool aspm_supported; /* Does HW support ASPM */
@@ -1234,8 +1214,6 @@ struct hfi1_devdata *hfi1_lookup(int unit);
extern u32 hfi1_cpulist_count;
extern unsigned long *hfi1_cpulist;
-extern unsigned int snoop_drop_send;
-extern unsigned int snoop_force_capture;
int hfi1_init(struct hfi1_devdata *, int);
int hfi1_count_units(int *npresentp, int *nupp);
int hfi1_count_active_units(void);
@@ -1570,13 +1548,6 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
-int snoop_recv_handler(struct hfi1_packet *packet);
-int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
- u64 pbc, const void *from, size_t count);
int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
@@ -1787,8 +1758,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
void hfi1_pcie_cleanup(struct pci_dev *);
-int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *,
- const struct pci_device_id *);
+int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
void hfi1_pcie_flr(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *);
@@ -1823,8 +1793,6 @@ int kdeth_process_expected(struct hfi1_packet *packet);
int kdeth_process_eager(struct hfi1_packet *packet);
int process_receive_invalid(struct hfi1_packet *packet);
-extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8];
-
void update_sge(struct rvt_sge_state *ss, u32 length);
/* global module parameter variables */
@@ -1851,9 +1819,6 @@ extern struct mutex hfi1_mutex;
#define DRIVER_NAME "hfi1"
#define HFI1_USER_MINOR_BASE 0
#define HFI1_TRACE_MINOR 127
-#define HFI1_DIAGPKT_MINOR 128
-#define HFI1_DIAG_MINOR_BASE 129
-#define HFI1_SNOOP_CAPTURE_BASE 200
#define HFI1_NMINORS 255
#define PCI_VENDOR_ID_INTEL 0x8086
@@ -1872,7 +1837,13 @@ extern struct mutex hfi1_mutex;
static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
u16 ctxt_type)
{
- u64 base_sc_integrity =
+ u64 base_sc_integrity;
+
+ /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+ if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+ return 0;
+
+ base_sc_integrity =
SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
@@ -1887,7 +1858,6 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
| SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
@@ -1896,18 +1866,23 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
else
base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
- if (is_ax(dd))
- /* turn off send-side job key checks - A0 */
- return base_sc_integrity &
- ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ /* turn on send-side job key checks if !A0 */
+ if (!is_ax(dd))
+ base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
return base_sc_integrity;
}
static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
{
- u64 base_sdma_integrity =
+ u64 base_sdma_integrity;
+
+ /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+ if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+ return 0;
+
+ base_sdma_integrity =
SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
| SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
@@ -1919,14 +1894,18 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
| SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
- if (is_ax(dd))
- /* turn off send-side job key checks - A0 */
- return base_sdma_integrity &
- ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
+ base_sdma_integrity |=
+ SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK;
+
+ /* turn on send-side job key checks if !A0 */
+ if (!is_ax(dd))
+ base_sdma_integrity |=
+ SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
return base_sdma_integrity;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 60db61536fed..e3b5bc93bc70 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -144,6 +144,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
struct hfi1_ctxtdata *rcd;
ppd = dd->pport + (i % dd->num_pports);
+
+ /* dd->rcd[i] gets assigned inside the callee */
rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
if (!rcd) {
dd_dev_err(dd,
@@ -169,8 +171,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
if (!rcd->sc) {
dd_dev_err(dd,
"Unable to allocate kernel send context, failing\n");
- dd->rcd[rcd->ctxt] = NULL;
- hfi1_free_ctxtdata(dd, rcd);
goto nomem;
}
@@ -178,9 +178,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
if (ret < 0) {
dd_dev_err(dd,
"Failed to setup kernel receive context, failing\n");
- sc_free(rcd->sc);
- dd->rcd[rcd->ctxt] = NULL;
- hfi1_free_ctxtdata(dd, rcd);
ret = -EFAULT;
goto bail;
}
@@ -196,6 +193,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
nomem:
ret = -ENOMEM;
bail:
+ if (dd->rcd) {
+ for (i = 0; i < dd->num_rcv_contexts; ++i)
+ hfi1_free_ctxtdata(dd, dd->rcd[i]);
+ }
kfree(dd->rcd);
dd->rcd = NULL;
return ret;
@@ -216,7 +217,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
dd->num_rcv_contexts - dd->first_user_ctxt)
kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
(dd->num_rcv_contexts - dd->first_user_ctxt));
- rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
+ rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
if (rcd) {
u32 rcvtids, max_entries;
@@ -261,13 +262,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
}
rcd->eager_base = base * dd->rcv_entries.group_size;
- /* Validate and initialize Rcv Hdr Q variables */
- if (rcvhdrcnt % HDRQ_INCREMENT) {
- dd_dev_err(dd,
- "ctxt%u: header queue count %d must be divisible by %lu\n",
- rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
- goto bail;
- }
rcd->rcvhdrq_cnt = rcvhdrcnt;
rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
/*
@@ -506,7 +500,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
mutex_init(&ppd->hls_lock);
- spin_lock_init(&ppd->sdma_alllock);
spin_lock_init(&ppd->qsfp_info.qsfp_lock);
ppd->qsfp_info.ppd = ppd;
@@ -1399,28 +1392,43 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
hfi1_free_devdata(dd);
}
+static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
+{
+ if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
+ hfi1_early_err(dev, "Receive header queue count too small\n");
+ return -EINVAL;
+ }
+
+ if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
+ hfi1_early_err(dev,
+ "Receive header queue count cannot be greater than %u\n",
+ HFI1_MAX_HDRQ_EGRBUF_CNT);
+ return -EINVAL;
+ }
+
+ if (thecnt % HDRQ_INCREMENT) {
+ hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
+ thecnt, HDRQ_INCREMENT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0, j, pidx, initfail;
- struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
+ struct hfi1_devdata *dd;
struct hfi1_pportdata *ppd;
/* First, lock the non-writable module parameters */
HFI1_CAP_LOCK();
/* Validate some global module parameters */
- if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(&pdev->dev, "Header queue count too small\n");
- ret = -EINVAL;
- goto bail;
- }
- if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(&pdev->dev,
- "Receive header queue count cannot be greater than %u\n",
- HFI1_MAX_HDRQ_EGRBUF_CNT);
- ret = -EINVAL;
+ ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
+ if (ret)
goto bail;
- }
+
/* use the encoding function as a sanitization check */
if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
@@ -1461,26 +1469,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto bail;
- /*
- * Do device-specific initialization, function table setup, dd
- * allocation, etc.
- */
- switch (ent->device) {
- case PCI_DEVICE_ID_INTEL0:
- case PCI_DEVICE_ID_INTEL1:
- dd = hfi1_init_dd(pdev, ent);
- break;
- default:
+ if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
+ ent->device == PCI_DEVICE_ID_INTEL1)) {
hfi1_early_err(&pdev->dev,
"Failing on unknown Intel deviceid 0x%x\n",
ent->device);
ret = -ENODEV;
+ goto clean_bail;
}
- if (IS_ERR(dd))
+ /*
+ * Do device-specific initialization, function table setup, dd
+ * allocation, etc.
+ */
+ dd = hfi1_init_dd(pdev, ent);
+
+ if (IS_ERR(dd)) {
ret = PTR_ERR(dd);
- if (ret)
goto clean_bail; /* error already printed */
+ }
ret = create_workqueues(dd);
if (ret)
@@ -1538,12 +1545,31 @@ bail:
return ret;
}
+static void wait_for_clients(struct hfi1_devdata *dd)
+{
+ /*
+ * Remove the device init value and complete the device if there is
+ * no clients or wait for active clients to finish.
+ */
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+
+ wait_for_completion(&dd->user_comp);
+}
+
static void remove_one(struct pci_dev *pdev)
{
struct hfi1_devdata *dd = pci_get_drvdata(pdev);
/* close debugfs files before ib unregister */
hfi1_dbg_ibdev_exit(&dd->verbs_dev);
+
+ /* remove the /dev hfi1 interface */
+ hfi1_device_remove(dd);
+
+ /* wait for existing user space clients to finish */
+ wait_for_clients(dd);
+
/* unregister from IB core */
hfi1_unregister_ib_device(dd);
@@ -1558,8 +1584,6 @@ static void remove_one(struct pci_dev *pdev)
/* wait until all of our (qsfp) queue_work() calls complete */
flush_workqueue(ib_wq);
- hfi1_device_remove(dd);
-
postinit_cleanup(dd);
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 89c68da1c273..4ac8f330c5cb 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -157,8 +157,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev)
* fields required to re-initialize after a chip reset, or for
* various other purposes
*/
-int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
{
unsigned long len;
resource_size_t addr;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 64c9eeb52d86..615be68e40b3 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -668,19 +668,12 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
void set_pio_integrity(struct send_context *sc)
{
struct hfi1_devdata *dd = sc->dd;
- u64 reg = 0;
u32 hw_context = sc->hw_context;
int type = sc->type;
- /*
- * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if
- * we're snooping.
- */
- if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
- dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
- reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
-
- write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
+ write_kctxt_csr(dd, hw_context,
+ SC(CHECK_ENABLE),
+ hfi1_pkt_default_send_ctxt_mask(dd, type));
}
static u32 get_buffers_allocated(struct send_context *sc)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 9db260fe782a..809b26eb6d3c 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -89,7 +89,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_WAIT_RNR;
- qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
+ priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
add_timer(&priv->s_rnr_timer);
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 7102a076146d..1d81cac1fa6c 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -2009,11 +2009,6 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
}
-#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
-(r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
-
-#define SET_STATIC_RATE_CONTROL_SMASK(r) \
-(r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
/*
* set_sdma_integrity
*
@@ -2022,19 +2017,9 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
static void set_sdma_integrity(struct sdma_engine *sde)
{
struct hfi1_devdata *dd = sde->dd;
- u64 reg;
-
- if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
- return;
-
- reg = hfi1_pkt_base_sdma_integrity(dd);
-
- if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
- CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
- else
- SET_STATIC_RATE_CONTROL_SMASK(reg);
- write_sde_csr(sde, SD(CHECK_ENABLE), reg);
+ write_sde_csr(sde, SD(CHECK_ENABLE),
+ hfi1_pkt_base_sdma_integrity(dd));
}
static void init_sdma_regs(
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index edba22461a9c..919a5474e651 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -49,7 +49,6 @@
#include "hfi.h"
#include "mad.h"
#include "trace.h"
-#include "affinity.h"
/*
* Start of per-port congestion control structures and support code
@@ -623,27 +622,6 @@ static ssize_t show_tempsense(struct device *device,
return ret;
}
-static ssize_t show_sdma_affinity(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- return hfi1_get_sdma_affinity(dd, buf);
-}
-
-static ssize_t store_sdma_affinity(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- return hfi1_set_sdma_affinity(dd, buf, count);
-}
-
/*
* end of per-unit (or driver, in some cases, but replicated
* per unit) functions
@@ -658,8 +636,6 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity,
- store_sdma_affinity);
static struct device_attribute *hfi1_attributes[] = {
&dev_attr_hw_rev,
@@ -670,7 +646,6 @@ static struct device_attribute *hfi1_attributes[] = {
&dev_attr_boardversion,
&dev_attr_tempsense,
&dev_attr_chip_reset,
- &dev_attr_sdma_affinity,
};
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 11e02b228922..f77e59fb43fe 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -253,66 +253,6 @@ TRACE_EVENT(hfi1_mmu_invalidate,
)
);
-#define SNOOP_PRN \
- "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
- "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
-
-TRACE_EVENT(snoop_capture,
- TP_PROTO(struct hfi1_devdata *dd,
- int hdr_len,
- struct ib_header *hdr,
- int data_len,
- void *data),
- TP_ARGS(dd, hdr_len, hdr, data_len, data),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, slid)
- __field(u16, dlid)
- __field(u32, qpn)
- __field(u8, opcode)
- __field(u8, sl)
- __field(u16, pkey)
- __field(u32, hdr_len)
- __field(u32, data_len)
- __field(u8, lnh)
- __dynamic_array(u8, raw_hdr, hdr_len)
- __dynamic_array(u8, raw_pkt, data_len)
- ),
- TP_fast_assign(
- struct ib_other_headers *ohdr;
-
- __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- DD_DEV_ASSIGN(dd);
- __entry->slid = be16_to_cpu(hdr->lrh[3]);
- __entry->dlid = be16_to_cpu(hdr->lrh[1]);
- __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->hdr_len = hdr_len;
- __entry->data_len = data_len;
- memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
- memcpy(__get_dynamic_array(raw_pkt), data, data_len);
- ),
- TP_printk(
- "[%s] " SNOOP_PRN,
- __get_str(dev),
- __entry->slid,
- __entry->dlid,
- __entry->qpn,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->sl,
- __entry->pkey,
- __entry->hdr_len,
- __entry->data_len
- )
-);
-
#endif /* __HFI1_TRACE_RX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 663980ef01a8..7d22f8ee98ef 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1166,7 +1166,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
rb_node = hfi1_mmu_rb_extract(pq->handler,
(unsigned long)iovec->iov.iov_base,
iovec->iov.iov_len);
- if (rb_node && !IS_ERR(rb_node))
+ if (rb_node)
node = container_of(rb_node, struct sdma_mmu_node, rb);
else
rb_node = NULL;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 20c6d17ac8b8..077c33d2dc75 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
if (vlan_tag < 0x1000)
vlan_tag |= (ah_attr->sl & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ ah->av.eth.gid_index = ret;
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
if (ah_attr->static_rate) {
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 1ea686b9e0f9..6a0fec357dae 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
err = -EFAULT;
- goto err_dbmap;
+ goto err_cq_free;
}
return &cq->ibcq;
+err_cq_free:
+ mlx4_cq_free(dev->dev, &cq->mcq);
+
err_dbmap:
if (context)
mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index d72a4367c891..b3ef47c3ab73 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -960,8 +960,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (err)
goto err_create;
} else {
- /* for now choose 64 bytes till we have a proper interface */
- cqe_size = 64;
+ cqe_size = cache_line_size() == 128 ? 128 : 64;
err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
&index, &inlen);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b81736d625fc..d566f6738833 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1056,7 +1056,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
- resp.cache_line_size = L1_CACHE_BYTES;
+ resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
@@ -1931,7 +1931,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = mlx5_create_auto_grouped_flow_table(ns, priority,
num_entries,
num_groups,
- 0);
+ 0, 0);
if (!IS_ERR(ft)) {
prio->refcount = 0;
@@ -1951,10 +1951,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
- u32 action;
int err = 0;
if (!is_valid_attr(flow_attr))
@@ -1979,12 +1979,12 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
}
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
- action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+ flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
handler->rule = mlx5_add_flow_rules(ft, spec,
- action,
- MLX5_FS_DEFAULT_FLOW_TAG,
- dst, 1);
+ &flow_act,
+ dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
@@ -2385,14 +2385,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
{
struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
struct ib_event ibev;
-
+ bool fatal = false;
u8 port = 0;
switch (event) {
case MLX5_DEV_EVENT_SYS_ERROR:
- ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
+ fatal = true;
break;
case MLX5_DEV_EVENT_PORT_UP:
@@ -2446,6 +2446,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (ibdev->ib_active)
ib_dispatch_event(&ibev);
+
+ if (fatal)
+ ibdev->ib_active = false;
}
static void get_ext_port_caps(struct mlx5_ib_dev *dev)
@@ -3209,7 +3212,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
}
err = init_node_data(dev);
if (err)
- goto err_dealloc;
+ goto err_free_port;
mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex);
@@ -3219,7 +3222,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_enable_eth(dev);
if (err)
- goto err_dealloc;
+ goto err_free_port;
}
err = create_dev_resources(&dev->devr);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index ab8961cc8bca..6c6057eb60ea 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -629,6 +629,8 @@ struct mlx5_ib_dev {
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
+ /* Prevents soft lock on massive reg MRs */
+ struct mutex slow_path_mutex;
int fill_delay;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_odp_caps odp_caps;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 67985c69f9b9..8f608debe141 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
int err;
int i;
+ mutex_init(&dev->slow_path_mutex);
cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
if (!cache->wq) {
mlx5_ib_warn(dev, "failed to create work queue\n");
@@ -1216,9 +1217,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto error;
}
- if (!mr)
+ if (!mr) {
+ mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
page_shift, access_flags);
+ mutex_unlock(&dev->slow_path_mutex);
+ }
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index cc24f2d429b9..a1b3125f0a6e 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -52,7 +52,6 @@ enum {
enum {
MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_CACHE_LINE_SIZE = 64,
};
static const u32 mlx5_ib_opcode[] = {
@@ -2082,8 +2081,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
- to_mcq(init_attr->recv_cq)->mcq.cqn,
- to_mcq(init_attr->send_cq)->mcq.cqn);
+ init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
+ init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
qp->trans_qp.xrcdn = xrcdn;
@@ -4896,6 +4895,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
udata->inlen))
return ERR_PTR(-EOPNOTSUPP);
+ if (init_attr->log_ind_tbl_size >
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
+ mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
+ init_attr->log_ind_tbl_size,
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
+ return ERR_PTR(-EINVAL);
+ }
+
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
if (udata->outlen && udata->outlen < min_resp_len)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 9badd0224ec5..5b9601014f0c 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -65,7 +65,6 @@ MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-int max_mtu = 9000;
int interrupt_mod_interval = 0;
/* Interoperability */
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index e7430c9254d3..85acd0843b50 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -83,6 +83,8 @@
#define NES_FIRST_QPN 64
#define NES_SW_CONTEXT_ALIGN 1024
+#define NES_MAX_MTU 9000
+
#define NES_NIC_MAX_NICS 16
#define NES_MAX_ARP_TABLE_SIZE 4096
@@ -169,8 +171,6 @@ do { \
#include "nes_cm.h"
#include "nes_mgt.h"
-extern int max_mtu;
-#define max_frame_len (max_mtu+ETH_HLEN)
extern int interrupt_mod_interval;
extern int nes_if_count;
extern int mpa_version;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 4dcfe669ebad..5921ea3d50ae 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -985,20 +985,16 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
- int ret = 0;
u8 jumbomode = 0;
u32 nic_active;
u32 nic_active_bit;
u32 uc_all_active;
u32 mc_all_active;
- if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
- return -EINVAL;
-
netdev->mtu = new_mtu;
nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN;
- if (netdev->mtu > 1500) {
+ if (netdev->mtu > ETH_DATA_LEN) {
jumbomode=1;
}
nes_nic_init_timer_defaults(nesdev, jumbomode);
@@ -1024,7 +1020,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
- return ret;
+ return 0;
}
@@ -1670,7 +1666,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netdev->watchdog_timeo = NES_TX_TIMEOUT;
netdev->irq = nesdev->pcidev->irq;
- netdev->mtu = ETH_DATA_LEN;
+ netdev->max_mtu = NES_MAX_MTU;
netdev->hard_header_len = ETH_HLEN;
netdev->addr_len = ETH_ALEN;
netdev->type = ARPHRD_ETHER;
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
index 7c06d85568d4..6c9f3923e838 100644
--- a/drivers/infiniband/hw/qedr/Kconfig
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -2,6 +2,7 @@ config INFINIBAND_QEDR
tristate "QLogic RoCE driver"
depends on 64BIT && QEDE
select QED_LL2
+ select QED_RDMA
---help---
This driver provides low-level InfiniBand over Ethernet
support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
index 01f71caa3ac4..f2cefb0d9180 100644
--- a/drivers/infiniband/sw/rdmavt/dma.c
+++ b/drivers/infiniband/sw/rdmavt/dma.c
@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
if (WARN_ON(!valid_dma_direction(direction)))
return BAD_DMA_ADDRESS;
- if (offset + size > PAGE_SIZE)
- return BAD_DMA_ADDRESS;
-
addr = (u64)page_address(page);
if (addr)
addr += offset;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index a576603304f7..16967cdb45df 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
{
int err;
struct socket *sock;
- struct udp_port_cfg udp_cfg;
- struct udp_tunnel_sock_cfg tnl_cfg;
-
- memset(&udp_cfg, 0, sizeof(udp_cfg));
+ struct udp_port_cfg udp_cfg = {0};
+ struct udp_tunnel_sock_cfg tnl_cfg = {0};
if (ipv6) {
udp_cfg.family = AF_INET6;
@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
return ERR_PTR(err);
}
- tnl_cfg.sk_user_data = NULL;
tnl_cfg.encap_type = 1;
tnl_cfg.encap_rcv = rxe_udp_encap_recv;
- tnl_cfg.encap_destroy = NULL;
/* Setup UDP tunnel */
setup_udp_tunnel_sock(net, sock, &tnl_cfg);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index b8036cfbce04..c3e60e4bde6e 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
if (qp->sq.queue) {
__rxe_do_task(&qp->comp.task);
__rxe_do_task(&qp->req.task);
+ rxe_queue_reset(qp->sq.queue);
}
/* cleanup attributes */
@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
{
qp->req.state = QP_STATE_ERROR;
qp->resp.state = QP_STATE_ERROR;
+ qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
rxe_run_task(&qp->resp.task, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 08274254eb88..d14bf496d62d 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -84,6 +84,15 @@ err1:
return -EINVAL;
}
+inline void rxe_queue_reset(struct rxe_queue *q)
+{
+ /* queue is comprised from header and the memory
+ * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
+ * reset only the queue itself and not the management header
+ */
+ memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
+}
+
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
int *num_elem,
unsigned int elem_size)
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 239fd609c31e..8c8641c87817 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
size_t buf_size,
struct rxe_mmap_info **ip_p);
+void rxe_queue_reset(struct rxe_queue *q);
+
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
int *num_elem,
unsigned int elem_size);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index b246653cf713..73d4a97603a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -698,7 +698,9 @@ next_wqe:
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- goto complete;
+ __rxe_do_task(&qp->comp.task);
+ rxe_drop_ref(qp);
+ return 0;
}
payload = mtu;
}
@@ -747,14 +749,16 @@ err:
wqe->status = IB_WC_LOC_PROT_ERR;
wqe->state = wqe_state_error;
-complete:
- if (qp_type(qp) != IB_QPT_RC) {
- while (rxe_completer(qp) == 0)
- ;
- }
- rxe_drop_ref(qp);
- return 0;
-
+ /*
+ * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
+ * ---------8<---------8<-------------
+ * ...Note that if a completion error occurs, a Work Completion
+ * will always be generated, even if the signaling
+ * indicator requests an Unsignaled Completion.
+ * ---------8<---------8<-------------
+ */
+ wqe->wr.send_flags |= IB_SEND_SIGNALED;
+ __rxe_do_task(&qp->comp.task);
exit:
rxe_drop_ref(qp);
return -EAGAIN;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 7b8d2d9e2263..da12717a3eb7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -63,6 +63,8 @@ enum ipoib_flush_level {
enum {
IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
@@ -134,15 +136,21 @@ struct ipoib_header {
u16 reserved;
};
-struct ipoib_cb {
- struct qdisc_skb_cb qdisc_cb;
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
};
-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
- return (struct ipoib_cb *)skb->cb;
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
}
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 46234f52ee29..096c4f6fbd65 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct sk_buff *skb;
int i;
- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
if (unlikely(!skb))
return NULL;
/*
- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
* IP header to a multiple of 16.
*/
- skb_reserve(skb, 12);
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
@@ -621,9 +623,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
- small_skb = dev_alloc_skb(dlen + 12);
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
if (small_skb) {
- skb_reserve(small_skb, 12);
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
@@ -660,8 +662,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 43cf8b8a8d2e..5038f9d2d753 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
if (unlikely(!skb))
return NULL;
/*
- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
- * header. So we need 4 more bytes to get to 48 and align the
- * IP header to a multiple of 16.
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
*/
- skb_reserve(skb, 4);
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 423b30dfe2d8..3ce0765a05ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -292,6 +292,25 @@ static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
return dev;
}
+struct ipoib_walk_data {
+ const struct sockaddr *addr;
+ struct net_device *result;
+};
+
+static int ipoib_upper_walk(struct net_device *upper, void *_data)
+{
+ struct ipoib_walk_data *data = _data;
+ int ret = 0;
+
+ if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
+ dev_hold(upper);
+ data->result = upper;
+ ret = 1;
+ }
+
+ return ret;
+}
+
/**
* Find a net_device matching the given address, which is an upper device of
* the given net_device.
@@ -304,27 +323,21 @@ static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
static struct net_device *ipoib_get_net_dev_match_addr(
const struct sockaddr *addr, struct net_device *dev)
{
- struct net_device *upper,
- *result = NULL;
- struct list_head *iter;
+ struct ipoib_walk_data data = {
+ .addr = addr,
+ };
rcu_read_lock();
if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
dev_hold(dev);
- result = dev;
+ data.result = dev;
goto out;
}
- netdev_for_each_all_upper_dev_rcu(dev, upper, iter) {
- if (ipoib_is_dev_match_addr_rcu(addr, upper)) {
- dev_hold(upper);
- result = upper;
- break;
- }
- }
+ netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data);
out:
rcu_read_unlock();
- return result;
+ return data.result;
}
/* returns the number of IPoIB netdevs on top a given ipoib device matching a
@@ -925,9 +938,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_free(neigh);
goto err_drop;
}
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&neigh->queue, skb);
- else {
+ } else {
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
skb_queue_len(&neigh->queue));
goto err_drop;
@@ -964,7 +980,7 @@ err_drop:
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_cb *cb)
+ struct ipoib_pseudo_header *phdr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -972,16 +988,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, cb->hwaddr + 4);
+ path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, cb->hwaddr + 4);
+ path = path_rec_create(dev, phdr->hwaddr + 4);
new_path = 1;
}
if (path) {
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1009,10 +1027,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1026,13 +1046,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
unsigned long flags;
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
header = (struct ipoib_header *) skb->data;
- if (unlikely(cb->hwaddr[4] == 0xff)) {
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
/* multicast, arrange "if" according to probability */
if ((header->proto != htons(ETH_P_IP)) &&
(header->proto != htons(ETH_P_IPV6)) &&
@@ -1045,13 +1067,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Add in the P_Key for multicast*/
- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- cb->hwaddr[9] = priv->pkey & 0xff;
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (likely(neigh))
goto send_using_neigh;
- ipoib_mcast_send(dev, cb->hwaddr, skb);
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
return NETDEV_TX_OK;
}
@@ -1060,16 +1082,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TIPC):
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, cb->hwaddr, dev);
+ neigh_add_path(skb, phdr->hwaddr, dev);
return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
/* for unicast ARP and RARP should always perform path find */
- unicast_arp_send(skb, dev, cb);
+ unicast_arp_send(skb, dev, phdr);
return NETDEV_TX_OK;
default:
/* ethertype not supported by IPoIB */
@@ -1086,11 +1108,13 @@ send_using_neigh:
goto unref;
}
} else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
goto unref;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(*phdr));
spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1122,8 +1146,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -1132,12 +1156,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
/*
* we don't rely on dst_entry structure, always stuff the
- * destination address into skb->cb so we can figure out where
+ * destination address into skb hard header so we can figure out where
* to send the packet later.
*/
- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
+ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
- return sizeof *header;
+ return IPOIB_HARD_LEN;
}
static void ipoib_set_mcast_list(struct net_device *dev)
@@ -1756,7 +1781,7 @@ void ipoib_setup(struct net_device *dev)
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->hard_header_len = IPOIB_ENCAP_LEN;
+ dev->hard_header_len = IPOIB_HARD_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
@@ -2001,6 +2026,7 @@ static struct net_device *ipoib_add_port(const char *format,
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+ priv->dev->max_mtu = IPOIB_CM_MTU;
priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 2a3980c2c670..fddff403d5d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -799,9 +799,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
}
- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb_queue_tail(&mcast->pkt_queue, skb);
- else {
+ } else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 982936334537..07ec465f1095 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,6 +37,8 @@ static void arizona_haptics_work(struct work_struct *work)
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(arizona->dapm);
int ret;
if (!haptics->arizona->dapm) {
@@ -66,7 +68,7 @@ static void arizona_haptics_work(struct work_struct *work)
return;
}
- ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_enable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
@@ -81,7 +83,7 @@ static void arizona_haptics_work(struct work_struct *work)
}
} else {
/* This disable sequence will be a noop if already enabled */
- ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_disable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
@@ -140,11 +142,14 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
+ struct snd_soc_component *component;
cancel_work_sync(&haptics->work);
- if (haptics->arizona->dapm)
- snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
+ if (haptics->arizona->dapm) {
+ component = snd_soc_dapm_to_component(haptics->arizona->dapm);
+ snd_soc_component_disable_pin(component, "HAPTICS");
+ }
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 227fbd2dbb71..3900875dec10 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -108,7 +108,8 @@ static irqreturn_t input_handler(int rq, void *dev_id)
static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
- int ret, i, abs;
+ int ret, i;
+ unsigned int abs;
struct xenkbd_info *info;
struct input_dev *kbd, *ptr;
@@ -127,8 +128,7 @@ static int xenkbd_probe(struct xenbus_device *dev,
if (!info->page)
goto error_nomem;
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
- abs = 0;
+ abs = xenbus_read_unsigned(dev->otherend, "feature-abs-pointer", 0);
if (abs) {
ret = xenbus_write(XBT_NIL, dev->nodename,
"request-abs-pointer", "1");
@@ -322,11 +322,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
case XenbusStateInitWait:
InitWait:
- ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-abs-pointer", "%d", &val);
- if (ret < 0)
- val = 0;
- if (val) {
+ if (xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-abs-pointer", 0)) {
ret = xenbus_write(XBT_NIL, info->xbdev->nodename,
"request-abs-pointer", "1");
if (ret)
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 54eceb30ede5..a7d39689bbfb 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -43,7 +43,7 @@ int focaltech_detect(struct psmouse *psmouse, bool set_properties)
if (set_properties) {
psmouse->vendor = "FocalTech";
- psmouse->name = "FocalTech Touchpad";
+ psmouse->name = "Touchpad";
}
return 0;
@@ -146,8 +146,8 @@ static void focaltech_report_state(struct psmouse *psmouse)
}
input_mt_report_pointer_emulation(dev, true);
- input_report_key(psmouse->dev, BTN_LEFT, state->pressed);
- input_sync(psmouse->dev);
+ input_report_key(dev, BTN_LEFT, state->pressed);
+ input_sync(dev);
}
static void focaltech_process_touch_packet(struct psmouse *psmouse,
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index fb4b185dea96..bee267424972 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1115,10 +1115,6 @@ static int psmouse_extensions(struct psmouse *psmouse,
if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
&max_proto, set_properties, true))
return PSMOUSE_TOUCHKIT_PS2;
-
- if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
- &max_proto, set_properties, true))
- return PSMOUSE_BYD;
}
/*
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index f4bfb4b2d50a..073246c7d163 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
},
+ {
+ /* Schenker XMG C504 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ },
+ },
{ }
};
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 90d6be3c26cc..83cf11312fd9 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->battery_dev, wm);
wm->battery_dev->dev.parent = dev;
- wm->battery_dev->dev.platform_data = pdata;
+ wm->battery_dev->dev.platform_data = pdata->batt_pdata;
ret = platform_device_add(wm->battery_dev);
if (ret < 0)
goto batt_reg_err;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 15c01c3cd540..e6f9b2d745ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2636,17 +2636,26 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
/* And we're up. Go go go! */
of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
#ifdef CONFIG_PCI
- pci_request_acs();
- ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
- if (ret)
- return ret;
+ if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
+ pci_request_acs();
+ ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
#endif
#ifdef CONFIG_ARM_AMBA
- ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
- if (ret)
- return ret;
+ if (amba_bustype.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
#endif
- return bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c841eb7a1a74..8f7281444551 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -324,8 +324,10 @@ struct arm_smmu_master_cfg {
#define INVALID_SMENDX -1
#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
+#define fwspec_smendx(fw, i) \
+ (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
#define for_each_cfg_sme(fw, i, idx) \
- for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
+ for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
struct arm_smmu_device {
struct device *dev;
@@ -1228,6 +1230,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -ENXIO;
}
+ /*
+ * FIXME: The arch/arm DMA API code tries to attach devices to its own
+ * domains between of_xlate() and add_device() - we have no way to cope
+ * with that, so until ARM gets converted to rely on groups and default
+ * domains, just say no (but more politely than by dereferencing NULL).
+ * This should be at least a WARN_ON once that's sorted.
+ */
+ if (!fwspec->iommu_priv)
+ return -ENODEV;
+
smmu = fwspec_smmu(fwspec);
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
@@ -1390,7 +1402,7 @@ static int arm_smmu_add_device(struct device *dev)
fwspec = dev->iommu_fwspec;
if (ret)
goto out_free;
- } else if (fwspec) {
+ } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
} else {
return -ENODEV;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 58470f5ced04..8c53748a769d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
struct pci_dev *pdev = to_pci_dev(data);
struct dmar_pci_notify_info *info;
- /* Only care about add/remove events for physical functions */
+ /* Only care about add/remove events for physical functions.
+ * For VFs we actually do the lookup based on the corresponding
+ * PF in device_to_iommu() anyway. */
if (pdev->is_virtfn)
return NOTIFY_DONE;
if (action != BUS_NOTIFY_ADD_DEVICE &&
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a4407eabf0e6..c66c273dfd8a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
return NULL;
if (dev_is_pci(dev)) {
+ struct pci_dev *pf_pdev;
+
pdev = to_pci_dev(dev);
+ /* VFs aren't listed in scope tables; we need to look up
+ * the PF instead to find the IOMMU. */
+ pf_pdev = pci_physfn(pdev);
+ dev = &pf_pdev->dev;
segment = pci_domain_nr(pdev->bus);
} else if (has_acpi_companion(dev))
dev = &ACPI_COMPANION(dev)->dev;
@@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, tmp) {
if (tmp == dev) {
+ /* For a VF use its original BDF# not that of the PF
+ * which we used for the IOMMU lookup. Strictly speaking
+ * we could do this for all PCI devices; we only need to
+ * get the BDF# from the scope table for ACPI matches. */
+ if (pdev->is_virtfn)
+ goto got_pdev;
+
*bus = drhd->devices[i].bus;
*devfn = drhd->devices[i].devfn;
goto out;
@@ -1711,6 +1724,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domain_ids)
return;
+again:
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
struct dmar_domain *domain;
@@ -1723,10 +1737,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
domain = info->domain;
- dmar_remove_one_dev_info(domain, info->dev);
+ __dmar_remove_one_dev_info(info);
- if (!domain_type_is_vm_or_si(domain))
+ if (!domain_type_is_vm_or_si(domain)) {
+ /*
+ * The domain_exit() function can't be called under
+ * device_domain_lock, as it takes this lock itself.
+ * So release the lock here and re-run the loop
+ * afterwards.
+ */
+ spin_unlock_irqrestore(&device_domain_lock, flags);
domain_exit(domain);
+ goto again;
+ }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -4665,25 +4688,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
}
}
-static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
- unsigned long action, void *v)
+static int intel_iommu_cpu_dead(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)v;
-
- switch (action) {
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- free_all_cpu_cached_iovas(cpu);
- flush_unmaps_timeout(cpu);
- break;
- }
- return NOTIFY_OK;
+ free_all_cpu_cached_iovas(cpu);
+ flush_unmaps_timeout(cpu);
+ return 0;
}
-static struct notifier_block intel_iommu_cpu_nb = {
- .notifier_call = intel_iommu_cpu_notifier,
-};
-
static ssize_t intel_iommu_show_version(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -4832,8 +4843,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
- register_hotcpu_notifier(&intel_iommu_cpu_nb);
-
+ cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
+ intel_iommu_cpu_dead);
intel_iommu_enabled = 1;
return 0;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 8ebb3530afa7..cb72e0011310 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
struct page *pages;
int order;
- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
- if (order < 0)
- order = 0;
-
+ /* Start at 2 because it's defined as 2^(1+PSS) */
+ iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
+
+ /* Eventually I'm promised we will get a multi-level PASID table
+ * and it won't have to be physically contiguous. Until then,
+ * limit the size because 8MiB contiguous allocations can be hard
+ * to come by. The limit of 0x20000, which is 1MiB for each of
+ * the PASID and PASID-state tables, is somewhat arbitrary. */
+ if (iommu->pasid_max > 0x20000)
+ iommu->pasid_max = 0x20000;
+
+ order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
if (ecap_dis(iommu->ecap)) {
+ /* Just making it explicit... */
+ BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (pages)
iommu->pasid_state_table = page_address(pages);
@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
{
- int order;
-
- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
- if (order < 0)
- order = 0;
+ int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
if (iommu->pasid_table) {
free_pages((unsigned long)iommu->pasid_table, order);
@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
svm->iommu = iommu;
- if (pasid_max > 2 << ecap_pss(iommu->ecap))
- pasid_max = 2 << ecap_pss(iommu->ecap);
+ if (pasid_max > iommu->pasid_max)
+ pasid_max = iommu->pasid_max;
/* Do not use PASID 0 in caching mode (virtualised IOMMU) */
ret = idr_alloc(&iommu->pasid_idr, svm,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bc0af3307bbf..ae96731cd2fb 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -211,6 +211,10 @@ config XTENSA_MX
bool
select IRQ_DOMAIN
+config XILINX_INTC
+ bool
+ select IRQ_DOMAIN
+
config IRQ_CROSSBAR
bool
help
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e4dbfc85abdb..0e55d94065bf 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
+obj-$(CONFIG_XILINX_INTC) += irq-xilinx-intc.o
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 353c54986211..c2662a1bfdd3 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -215,6 +215,31 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
return 0;
}
+static void bcm7038_l1_cpu_offline(struct irq_data *d)
+{
+ struct cpumask *mask = irq_data_get_affinity_mask(d);
+ int cpu = smp_processor_id();
+ cpumask_t new_affinity;
+
+ /* This CPU was not on the affinity mask */
+ if (!cpumask_test_cpu(cpu, mask))
+ return;
+
+ if (cpumask_weight(mask) > 1) {
+ /*
+ * Multiple CPU affinity, remove this CPU from the affinity
+ * mask
+ */
+ cpumask_copy(&new_affinity, mask);
+ cpumask_clear_cpu(cpu, &new_affinity);
+ } else {
+ /* Only CPU, put on the lowest online CPU */
+ cpumask_clear(&new_affinity);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
+ }
+ irq_set_affinity_locked(d, &new_affinity, false);
+}
+
static int __init bcm7038_l1_init_one(struct device_node *dn,
unsigned int idx,
struct bcm7038_l1_chip *intc)
@@ -266,6 +291,7 @@ static struct irq_chip bcm7038_l1_irq_chip = {
.irq_mask = bcm7038_l1_mask,
.irq_unmask = bcm7038_l1_unmask,
.irq_set_affinity = bcm7038_l1_set_affinity,
+ .irq_cpu_offline = bcm7038_l1_cpu_offline,
};
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c5dee300e8a3..69b040f47d56 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -37,7 +37,6 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
-#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/exception.h>
@@ -196,7 +195,7 @@ typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
{
- cmd->raw_cmd[0] &= ~0xffUL;
+ cmd->raw_cmd[0] &= ~0xffULL;
cmd->raw_cmd[0] |= cmd_nr;
}
@@ -208,43 +207,43 @@ static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
{
- cmd->raw_cmd[1] &= ~0xffffffffUL;
+ cmd->raw_cmd[1] &= ~0xffffffffULL;
cmd->raw_cmd[1] |= id;
}
static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
{
- cmd->raw_cmd[1] &= 0xffffffffUL;
+ cmd->raw_cmd[1] &= 0xffffffffULL;
cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
}
static void its_encode_size(struct its_cmd_block *cmd, u8 size)
{
- cmd->raw_cmd[1] &= ~0x1fUL;
+ cmd->raw_cmd[1] &= ~0x1fULL;
cmd->raw_cmd[1] |= size & 0x1f;
}
static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
- cmd->raw_cmd[2] &= ~0xffffffffffffUL;
- cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
+ cmd->raw_cmd[2] &= ~0xffffffffffffULL;
+ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00ULL;
}
static void its_encode_valid(struct its_cmd_block *cmd, int valid)
{
- cmd->raw_cmd[2] &= ~(1UL << 63);
+ cmd->raw_cmd[2] &= ~(1ULL << 63);
cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
}
static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
- cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
- cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
+ cmd->raw_cmd[2] &= ~(0xffffffffULL << 16);
+ cmd->raw_cmd[2] |= (target_addr & (0xffffffffULL << 16));
}
static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
{
- cmd->raw_cmd[2] &= ~0xffffUL;
+ cmd->raw_cmd[2] &= ~0xffffULL;
cmd->raw_cmd[2] |= col;
}
@@ -433,7 +432,7 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
* the ITS.
*/
if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
- __flush_dcache_area(cmd, sizeof(*cmd));
+ gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
else
dsb(ishst);
}
@@ -602,7 +601,7 @@ static void lpi_set_config(struct irq_data *d, bool enable)
* Humpf...
*/
if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
- __flush_dcache_area(cfg, sizeof(*cfg));
+ gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
else
dsb(ishst);
its_send_inv(its_dev, id);
@@ -657,8 +656,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
its = its_dev->its;
addr = its->phys_base + GITS_TRANSLATER;
- msg->address_lo = addr & ((1UL << 32) - 1);
- msg->address_hi = addr >> 32;
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
msg->data = its_get_event_id(d);
iommu_dma_map_msi_msg(d->irq, msg);
@@ -817,7 +816,7 @@ static int __init its_alloc_lpi_tables(void)
LPI_PROPBASE_SZ);
/* Make sure the GIC will observe the written configuration */
- __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
+ gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
return 0;
}
@@ -836,7 +835,7 @@ static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
{
u32 idx = baser - its->tables;
- return readq_relaxed(its->base + GITS_BASER + (idx << 3));
+ return gits_read_baser(its->base + GITS_BASER + (idx << 3));
}
static void its_write_baser(struct its_node *its, struct its_baser *baser,
@@ -844,7 +843,7 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser,
{
u32 idx = baser - its->tables;
- writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
+ gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
baser->val = its_read_baser(its, baser);
}
@@ -910,7 +909,7 @@ retry_baser:
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
- __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
+ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
@@ -935,9 +934,9 @@ retry_baser:
}
if (val != tmp) {
- pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
+ pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
- (unsigned long) val, (unsigned long) tmp);
+ val, tmp);
free_pages((unsigned long)base, order);
return -ENXIO;
}
@@ -948,7 +947,7 @@ retry_baser:
tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
- &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
+ &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
indirect ? "indirect" : "flat", (int)esz,
@@ -983,7 +982,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
* which is reported by ITS hardware times lvl1 table
* entry size.
*/
- ids -= ilog2(psz / esz);
+ ids -= ilog2(psz / (int)esz);
esz = GITS_LVL1_ENTRY_SIZE;
}
}
@@ -998,7 +997,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
new_order = max_t(u32, get_order(esz << ids), new_order);
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
- ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
+ ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
&its->phys_base, its->device_ids, ids);
}
@@ -1102,7 +1101,7 @@ static void its_cpu_init_lpis(void)
}
/* Make sure the GIC will observe the zero-ed page */
- __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
+ gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
paddr = page_to_phys(pend_page);
pr_info("CPU%d: using LPI pending table @%pa\n",
@@ -1126,8 +1125,8 @@ static void its_cpu_init_lpis(void)
GICR_PROPBASER_WaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
- writeq_relaxed(val, rbase + GICR_PROPBASER);
- tmp = readq_relaxed(rbase + GICR_PROPBASER);
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
+ tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
@@ -1139,7 +1138,7 @@ static void its_cpu_init_lpis(void)
val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
GICR_PROPBASER_CACHEABILITY_MASK);
val |= GICR_PROPBASER_nC;
- writeq_relaxed(val, rbase + GICR_PROPBASER);
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
}
pr_info_once("GIC: using cache flushing for LPI property table\n");
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
@@ -1150,8 +1149,8 @@ static void its_cpu_init_lpis(void)
GICR_PENDBASER_InnerShareable |
GICR_PENDBASER_WaWb);
- writeq_relaxed(val, rbase + GICR_PENDBASER);
- tmp = readq_relaxed(rbase + GICR_PENDBASER);
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
+ tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
/*
@@ -1161,7 +1160,7 @@ static void its_cpu_init_lpis(void)
val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
GICR_PENDBASER_CACHEABILITY_MASK);
val |= GICR_PENDBASER_nC;
- writeq_relaxed(val, rbase + GICR_PENDBASER);
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
}
/* Enable LPIs */
@@ -1287,13 +1286,13 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
/* Flush Lvl2 table to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
- __flush_dcache_area(page_address(page), baser->psz);
+ gic_flush_dcache_to_poc(page_address(page), baser->psz);
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
- __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
+ gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
/* Ensure updated table contents are visible to ITS hardware */
dsb(sy);
@@ -1340,7 +1339,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
return NULL;
}
- __flush_dcache_area(itt, sz);
+ gic_flush_dcache_to_poc(itt, sz);
dev->its = its;
dev->itt = itt;
@@ -1717,8 +1716,8 @@ static int __init its_probe_one(struct resource *res,
(ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
GITS_CBASER_VALID);
- writeq_relaxed(baser, its->base + GITS_CBASER);
- tmp = readq_relaxed(its->base + GITS_CBASER);
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
+ tmp = gits_read_cbaser(its->base + GITS_CBASER);
if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
@@ -1730,13 +1729,13 @@ static int __init its_probe_one(struct resource *res,
baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
GITS_CBASER_CACHEABILITY_MASK);
baser |= GITS_CBASER_nC;
- writeq_relaxed(baser, its->base + GITS_CBASER);
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
}
pr_info("ITS: using cache flushing for cmd queue\n");
its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
}
- writeq_relaxed(0, its->base + GITS_CWRITER);
+ gits_write_cwriter(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
err = its_init_domain(handle, its);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 19d642eae096..26e1d7fafb1e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void)
}
#ifdef CONFIG_ARM64
-static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
static u64 __maybe_unused gic_read_iar(void)
{
- if (static_branch_unlikely(&is_cavium_thunderx))
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
return gic_read_iar_cavium_thunderx();
else
return gic_read_iar_common();
@@ -905,14 +904,6 @@ static const struct irq_domain_ops partition_domain_ops = {
.select = gic_irq_domain_select,
};
-static void gicv3_enable_quirks(void)
-{
-#ifdef CONFIG_ARM64
- if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
- static_branch_enable(&is_cavium_thunderx);
-#endif
-}
-
static int __init gic_init_bases(void __iomem *dist_base,
struct redist_region *rdist_regs,
u32 nr_redist_regions,
@@ -935,8 +926,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.nr_redist_regions = nr_redist_regions;
gic_data.redist_stride = redist_stride;
- gicv3_enable_quirks();
-
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
new file mode 100644
index 000000000000..3db7ab1c9741
--- /dev/null
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012-2013 Xilinx, Inc.
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/jump_label.h>
+#include <linux/bug.h>
+#include <linux/of_irq.h>
+
+/* No one else should require these constants, so define them locally here. */
+#define ISR 0x00 /* Interrupt Status Register */
+#define IPR 0x04 /* Interrupt Pending Register */
+#define IER 0x08 /* Interrupt Enable Register */
+#define IAR 0x0c /* Interrupt Acknowledge Register */
+#define SIE 0x10 /* Set Interrupt Enable bits */
+#define CIE 0x14 /* Clear Interrupt Enable bits */
+#define IVR 0x18 /* Interrupt Vector Register */
+#define MER 0x1c /* Master Enable Register */
+
+#define MER_ME (1<<0)
+#define MER_HIE (1<<1)
+
+static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
+
+struct xintc_irq_chip {
+ void __iomem *base;
+ struct irq_domain *root_domain;
+ u32 intr_mask;
+};
+
+static struct xintc_irq_chip *xintc_irqc;
+
+static void xintc_write(int reg, u32 data)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ iowrite32be(data, xintc_irqc->base + reg);
+ else
+ iowrite32(data, xintc_irqc->base + reg);
+}
+
+static unsigned int xintc_read(int reg)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ return ioread32be(xintc_irqc->base + reg);
+ else
+ return ioread32(xintc_irqc->base + reg);
+}
+
+static void intc_enable_or_unmask(struct irq_data *d)
+{
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
+
+ /* ack level irqs because they can't be acked during
+ * ack function since the handle_level_irq function
+ * acks the irq before calling the interrupt handler
+ */
+ if (irqd_is_level_type(d))
+ xintc_write(IAR, mask);
+
+ xintc_write(SIE, mask);
+}
+
+static void intc_disable_or_mask(struct irq_data *d)
+{
+ pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
+ xintc_write(CIE, 1 << d->hwirq);
+}
+
+static void intc_ack(struct irq_data *d)
+{
+ pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
+ xintc_write(IAR, 1 << d->hwirq);
+}
+
+static void intc_mask_ack(struct irq_data *d)
+{
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
+ xintc_write(CIE, mask);
+ xintc_write(IAR, mask);
+}
+
+static struct irq_chip intc_dev = {
+ .name = "Xilinx INTC",
+ .irq_unmask = intc_enable_or_unmask,
+ .irq_mask = intc_disable_or_mask,
+ .irq_ack = intc_ack,
+ .irq_mask_ack = intc_mask_ack,
+};
+
+unsigned int xintc_get_irq(void)
+{
+ unsigned int hwirq, irq = -1;
+
+ hwirq = xintc_read(IVR);
+ if (hwirq != -1U)
+ irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
+
+ pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
+
+ return irq;
+}
+
+static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ if (xintc_irqc->intr_mask & (1 << hw)) {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ return 0;
+}
+
+static const struct irq_domain_ops xintc_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = xintc_map,
+};
+
+static void xil_intc_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+ do {
+ pending = xintc_get_irq();
+ if (pending == -1U)
+ break;
+ generic_handle_irq(pending);
+ } while (true);
+ chained_irq_exit(chip, desc);
+}
+
+static int __init xilinx_intc_of_init(struct device_node *intc,
+ struct device_node *parent)
+{
+ u32 nr_irq;
+ int ret, irq;
+ struct xintc_irq_chip *irqc;
+
+ if (xintc_irqc) {
+ pr_err("irq-xilinx: Multiple instances aren't supported\n");
+ return -EINVAL;
+ }
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ xintc_irqc = irqc;
+
+ irqc->base = of_iomap(intc, 0);
+ BUG_ON(!irqc->base);
+
+ ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
+ if (ret < 0) {
+ pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
+ goto err_alloc;
+ }
+
+ ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
+ if (ret < 0) {
+ pr_warn("irq-xilinx: unable to read xlnx,kind-of-intr\n");
+ irqc->intr_mask = 0;
+ }
+
+ if (irqc->intr_mask >> nr_irq)
+ pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
+
+ pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n",
+ intc->full_name, nr_irq, irqc->intr_mask);
+
+
+ /*
+ * Disable all external interrupts until they are
+ * explicity requested.
+ */
+ xintc_write(IER, 0);
+
+ /* Acknowledge any pending interrupts just in case. */
+ xintc_write(IAR, 0xffffffff);
+
+ /* Turn on the Master Enable. */
+ xintc_write(MER, MER_HIE | MER_ME);
+ if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
+ static_branch_enable(&xintc_is_be);
+ xintc_write(MER, MER_HIE | MER_ME);
+ }
+
+ irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
+ &xintc_irq_domain_ops, irqc);
+ if (!irqc->root_domain) {
+ pr_err("irq-xilinx: Unable to create IRQ domain\n");
+ goto err_alloc;
+ }
+
+ if (parent) {
+ irq = irq_of_parse_and_map(intc, 0);
+ if (irq) {
+ irq_set_chained_handler_and_data(irq,
+ xil_intc_irq_handler,
+ irqc);
+ } else {
+ pr_err("irq-xilinx: interrupts property not in DT\n");
+ ret = -EINVAL;
+ goto err_alloc;
+ }
+ } else {
+ irq_set_default_host(irqc->root_domain);
+ }
+
+ return 0;
+
+err_alloc:
+ xintc_irqc = NULL;
+ kfree(irqc);
+ return ret;
+
+}
+
+IRQCHIP_DECLARE(xilinx_intc_xps, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
+IRQCHIP_DECLARE(xilinx_intc_opb, "xlnx,opb-intc-1.00.c", xilinx_intc_of_init);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index d1f8ab915b15..b90776ef56ec 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -755,8 +755,10 @@ static int __init ser_gigaset_init(void)
driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
GIGASET_MODULENAME, GIGASET_DEVNAME,
&ops, THIS_MODULE);
- if (!driver)
+ if (!driver) {
+ rc = -ENOMEM;
goto error;
+ }
rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
if (rc != 0) {
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 9600cd771f1a..e034ed847ff3 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1499,6 +1499,7 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_INFO
"HFC-4S/8S: failed to request address space at 0x%04x\n",
hw->iobase);
+ err = -EBUSY;
goto out;
}
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index ba4beb25d872..298c8dba0321 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -855,7 +855,7 @@ struct DTag { /* Display tags */
{ 0x8c, "Reason" },
{ 0x8d, "Calling party name" },
{ 0x8e, "Called party name" },
- { 0x8f, "Orignal called name" },
+ { 0x8f, "Original called name" },
{ 0x90, "Redirecting name" },
{ 0x91, "Connected name" },
{ 0x92, "Originating restrictions" },
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index 5609deee7cd3..b93a4e9a8d34 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -232,7 +232,6 @@ static const struct net_device_ops hysdn_netdev_ops = {
.ndo_open = net_open,
.ndo_stop = net_close,
.ndo_start_xmit = net_send_packet,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 7a628c6516f6..c621cbbb5768 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -645,7 +645,7 @@ config LEDS_VERSATILE
config LEDS_PM8058
tristate "LED Support for the Qualcomm PM8058 PMIC"
- depends on MFD_PM8921_CORE
+ depends on MFD_PM8XXX
depends on LEDS_CLASS
help
Choose this option if you want to use the LED drivers in
@@ -659,6 +659,25 @@ config LEDS_MLXCPLD
This option enabled support for the LEDs on the Mellanox
boards. Say Y to enabled these.
+config LEDS_USER
+ tristate "Userspace LED support"
+ depends on LEDS_CLASS
+ help
+ This option enables support for userspace LEDs. Say 'y' to enable this
+ support in kernel. To compile this driver as a module, choose 'm' here:
+ the module will be called uleds.
+
+config LEDS_NIC78BX
+ tristate "LED support for NI PXI NIC78bx devices"
+ depends on LEDS_CLASS
+ depends on X86 && ACPI
+ help
+ This option enables support for the User1 and User2 LEDs on NI
+ PXI NIC78bx devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-nic78bx.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 3965070190f5..6b8273736478 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -71,9 +71,13 @@ obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o
obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
+obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
+# LED Userspace Drivers
+obj-$(CONFIG_LEDS_USER) += uleds.o
+
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index aa84e5b37593..326ee6e925a2 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <uapi/linux/uleds.h>
#include "leds.h"
static struct class *leds_class;
@@ -187,7 +188,7 @@ static int led_classdev_next_name(const char *init_name, char *name,
*/
int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
{
- char name[64];
+ char name[LED_MAX_NAME_SIZE];
int ret;
ret = led_classdev_next_name(led_cdev->name, name, sizeof(name));
@@ -203,6 +204,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
dev_warn(parent, "Led %s renamed to %s due to name collision",
led_cdev->name, dev_name(led_cdev->dev));
+ led_cdev->work_flags = 0;
#ifdef CONFIG_LEDS_TRIGGERS
init_rwsem(&led_cdev->trigger_lock);
#endif
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 3bce44893021..ef1360445413 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -53,30 +53,30 @@ static void led_timer_function(unsigned long data)
if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
led_set_brightness_nosleep(led_cdev, LED_OFF);
- led_cdev->flags &= ~LED_BLINK_SW;
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
return;
}
- if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
- led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
+ if (test_and_clear_bit(LED_BLINK_ONESHOT_STOP,
+ &led_cdev->work_flags)) {
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
return;
}
brightness = led_get_brightness(led_cdev);
if (!brightness) {
/* Time to switch the LED on. */
- brightness = led_cdev->blink_brightness;
+ if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE,
+ &led_cdev->work_flags))
+ brightness = led_cdev->new_blink_brightness;
+ else
+ brightness = led_cdev->blink_brightness;
delay = led_cdev->blink_delay_on;
} else {
/* Store the current brightness value to be able
* to restore it when the delay_off period is over.
- * Do it only if there is no pending blink brightness
- * change, to avoid overwriting the new value.
*/
- if (!(led_cdev->flags & LED_BLINK_BRIGHTNESS_CHANGE))
- led_cdev->blink_brightness = brightness;
- else
- led_cdev->flags &= ~LED_BLINK_BRIGHTNESS_CHANGE;
+ led_cdev->blink_brightness = brightness;
brightness = LED_OFF;
delay = led_cdev->blink_delay_off;
}
@@ -87,13 +87,15 @@ static void led_timer_function(unsigned long data)
* the final blink state so that the led is toggled each delay_on +
* delay_off milliseconds in worst case.
*/
- if (led_cdev->flags & LED_BLINK_ONESHOT) {
- if (led_cdev->flags & LED_BLINK_INVERT) {
+ if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags)) {
+ if (test_bit(LED_BLINK_INVERT, &led_cdev->work_flags)) {
if (brightness)
- led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
+ set_bit(LED_BLINK_ONESHOT_STOP,
+ &led_cdev->work_flags);
} else {
if (!brightness)
- led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
+ set_bit(LED_BLINK_ONESHOT_STOP,
+ &led_cdev->work_flags);
}
}
@@ -106,10 +108,9 @@ static void set_brightness_delayed(struct work_struct *ws)
container_of(ws, struct led_classdev, set_brightness_work);
int ret = 0;
- if (led_cdev->flags & LED_BLINK_DISABLE) {
+ if (test_and_clear_bit(LED_BLINK_DISABLE, &led_cdev->work_flags)) {
led_cdev->delayed_set_value = LED_OFF;
led_stop_software_blink(led_cdev);
- led_cdev->flags &= ~LED_BLINK_DISABLE;
}
ret = __led_set_brightness(led_cdev, led_cdev->delayed_set_value);
@@ -152,7 +153,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
return;
}
- led_cdev->flags |= LED_BLINK_SW;
+ set_bit(LED_BLINK_SW, &led_cdev->work_flags);
mod_timer(&led_cdev->blink_timer, jiffies + 1);
}
@@ -161,7 +162,7 @@ static void led_blink_setup(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- if (!(led_cdev->flags & LED_BLINK_ONESHOT) &&
+ if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) &&
led_cdev->blink_set &&
!led_cdev->blink_set(led_cdev, delay_on, delay_off))
return;
@@ -188,8 +189,8 @@ void led_blink_set(struct led_classdev *led_cdev,
{
del_timer_sync(&led_cdev->blink_timer);
- led_cdev->flags &= ~LED_BLINK_ONESHOT;
- led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+ clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
+ clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
led_blink_setup(led_cdev, delay_on, delay_off);
}
@@ -200,17 +201,17 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
unsigned long *delay_off,
int invert)
{
- if ((led_cdev->flags & LED_BLINK_ONESHOT) &&
+ if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) &&
timer_pending(&led_cdev->blink_timer))
return;
- led_cdev->flags |= LED_BLINK_ONESHOT;
- led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+ set_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
+ clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
if (invert)
- led_cdev->flags |= LED_BLINK_INVERT;
+ set_bit(LED_BLINK_INVERT, &led_cdev->work_flags);
else
- led_cdev->flags &= ~LED_BLINK_INVERT;
+ clear_bit(LED_BLINK_INVERT, &led_cdev->work_flags);
led_blink_setup(led_cdev, delay_on, delay_off);
}
@@ -221,7 +222,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev)
del_timer_sync(&led_cdev->blink_timer);
led_cdev->blink_delay_on = 0;
led_cdev->blink_delay_off = 0;
- led_cdev->flags &= ~LED_BLINK_SW;
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
}
EXPORT_SYMBOL_GPL(led_stop_software_blink);
@@ -232,18 +233,19 @@ void led_set_brightness(struct led_classdev *led_cdev,
* If software blink is active, delay brightness setting
* until the next timer tick.
*/
- if (led_cdev->flags & LED_BLINK_SW) {
+ if (test_bit(LED_BLINK_SW, &led_cdev->work_flags)) {
/*
* If we need to disable soft blinking delegate this to the
* work queue task to avoid problems in case we are called
* from hard irq context.
*/
if (brightness == LED_OFF) {
- led_cdev->flags |= LED_BLINK_DISABLE;
+ set_bit(LED_BLINK_DISABLE, &led_cdev->work_flags);
schedule_work(&led_cdev->set_brightness_work);
} else {
- led_cdev->flags |= LED_BLINK_BRIGHTNESS_CHANGE;
- led_cdev->blink_brightness = brightness;
+ set_bit(LED_BLINK_BRIGHTNESS_CHANGE,
+ &led_cdev->work_flags);
+ led_cdev->new_blink_brightness = brightness;
}
return;
}
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index b316df4a8c1e..8d066facdc73 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -115,8 +115,4 @@ static struct platform_driver cobalt_raq_led_driver = {
},
};
-static int __init cobalt_raq_led_init(void)
-{
- return platform_driver_register(&cobalt_raq_led_driver);
-}
-device_initcall(cobalt_raq_led_init);
+builtin_platform_driver(cobalt_raq_led_driver);
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index a73c8ff08530..4847e89883a7 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -274,6 +274,7 @@ static const struct i2c_device_id lp3952_id[] = {
{LP3952_NAME, 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, lp3952_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id lp3952_acpi_match[] = {
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index a2e4c1792e17..2421cf104991 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -84,8 +84,9 @@ static int mc13xxx_led_set(struct led_classdev *led_cdev,
case MC13892_LED_MD:
case MC13892_LED_AD:
case MC13892_LED_KP:
- reg = (led->id - MC13892_LED_MD) / 2;
- shift = 3 + (led->id - MC13892_LED_MD) * 12;
+ off = led->id - MC13892_LED_MD;
+ reg = off / 2;
+ shift = 3 + (off - reg * 2) * 12;
break;
case MC13892_LED_R:
case MC13892_LED_G:
diff --git a/drivers/leds/leds-mlxcpld.c b/drivers/leds/leds-mlxcpld.c
index 197ab9b29a9c..281482e1d50f 100644
--- a/drivers/leds/leds-mlxcpld.c
+++ b/drivers/leds/leds-mlxcpld.c
@@ -400,6 +400,9 @@ static int __init mlxcpld_led_init(void)
struct platform_device *pdev;
int err;
+ if (!dmi_match(DMI_CHASSIS_VENDOR, "Mellanox Technologies Ltd."))
+ return -ENODEV;
+
pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
if (IS_ERR(pdev)) {
pr_err("Device allocation failed\n");
@@ -426,5 +429,5 @@ module_exit(mlxcpld_led_exit);
MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
MODULE_DESCRIPTION("Mellanox board LED driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:leds_mlxcpld");
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 4b88b93244be..f48b1aed9b4e 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -534,6 +534,7 @@ static const struct of_device_id of_netxbig_leds_match[] = {
{ .compatible = "lacie,netxbig-leds", },
{},
};
+MODULE_DEVICE_TABLE(of, of_netxbig_leds_match);
#else
static inline int
netxbig_leds_get_of_pdata(struct device *dev,
diff --git a/drivers/leds/leds-nic78bx.c b/drivers/leds/leds-nic78bx.c
new file mode 100644
index 000000000000..8d69e2b74a27
--- /dev/null
+++ b/drivers/leds/leds-nic78bx.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2016 National Instruments Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define NIC78BX_USER1_LED_MASK 0x3
+#define NIC78BX_USER1_GREEN_LED BIT(0)
+#define NIC78BX_USER1_YELLOW_LED BIT(1)
+
+#define NIC78BX_USER2_LED_MASK 0xC
+#define NIC78BX_USER2_GREEN_LED BIT(2)
+#define NIC78BX_USER2_YELLOW_LED BIT(3)
+
+#define NIC78BX_LOCK_REG_OFFSET 1
+#define NIC78BX_LOCK_VALUE 0xA5
+#define NIC78BX_UNLOCK_VALUE 0x5A
+
+#define NIC78BX_USER_LED_IO_SIZE 2
+
+struct nic78bx_led_data {
+ u16 io_base;
+ spinlock_t lock;
+ struct platform_device *pdev;
+};
+
+struct nic78bx_led {
+ u8 bit;
+ u8 mask;
+ struct nic78bx_led_data *data;
+ struct led_classdev cdev;
+};
+
+static inline struct nic78bx_led *to_nic78bx_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct nic78bx_led, cdev);
+}
+
+static void nic78bx_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct nic78bx_led *nled = to_nic78bx_led(cdev);
+ unsigned long flags;
+ u8 value;
+
+ spin_lock_irqsave(&nled->data->lock, flags);
+ value = inb(nled->data->io_base);
+
+ if (brightness) {
+ value &= ~nled->mask;
+ value |= nled->bit;
+ } else {
+ value &= ~nled->bit;
+ }
+
+ outb(value, nled->data->io_base);
+ spin_unlock_irqrestore(&nled->data->lock, flags);
+}
+
+static enum led_brightness nic78bx_brightness_get(struct led_classdev *cdev)
+{
+ struct nic78bx_led *nled = to_nic78bx_led(cdev);
+ unsigned long flags;
+ u8 value;
+
+ spin_lock_irqsave(&nled->data->lock, flags);
+ value = inb(nled->data->io_base);
+ spin_unlock_irqrestore(&nled->data->lock, flags);
+
+ return (value & nled->bit) ? 1 : LED_OFF;
+}
+
+static struct nic78bx_led nic78bx_leds[] = {
+ {
+ .bit = NIC78BX_USER1_GREEN_LED,
+ .mask = NIC78BX_USER1_LED_MASK,
+ .cdev = {
+ .name = "nilrt:green:user1",
+ .max_brightness = 1,
+ .brightness_set = nic78bx_brightness_set,
+ .brightness_get = nic78bx_brightness_get,
+ }
+ },
+ {
+ .bit = NIC78BX_USER1_YELLOW_LED,
+ .mask = NIC78BX_USER1_LED_MASK,
+ .cdev = {
+ .name = "nilrt:yellow:user1",
+ .max_brightness = 1,
+ .brightness_set = nic78bx_brightness_set,
+ .brightness_get = nic78bx_brightness_get,
+ }
+ },
+ {
+ .bit = NIC78BX_USER2_GREEN_LED,
+ .mask = NIC78BX_USER2_LED_MASK,
+ .cdev = {
+ .name = "nilrt:green:user2",
+ .max_brightness = 1,
+ .brightness_set = nic78bx_brightness_set,
+ .brightness_get = nic78bx_brightness_get,
+ }
+ },
+ {
+ .bit = NIC78BX_USER2_YELLOW_LED,
+ .mask = NIC78BX_USER2_LED_MASK,
+ .cdev = {
+ .name = "nilrt:yellow:user2",
+ .max_brightness = 1,
+ .brightness_set = nic78bx_brightness_set,
+ .brightness_get = nic78bx_brightness_get,
+ }
+ }
+};
+
+static int nic78bx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nic78bx_led_data *led_data;
+ struct resource *io_rc;
+ int ret, i;
+
+ led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
+ if (!led_data)
+ return -ENOMEM;
+
+ led_data->pdev = pdev;
+ platform_set_drvdata(pdev, led_data);
+
+ io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!io_rc) {
+ dev_err(dev, "missing IO resources\n");
+ return -EINVAL;
+ }
+
+ if (resource_size(io_rc) < NIC78BX_USER_LED_IO_SIZE) {
+ dev_err(dev, "IO region too small\n");
+ return -EINVAL;
+ }
+
+ if (!devm_request_region(dev, io_rc->start, resource_size(io_rc),
+ KBUILD_MODNAME)) {
+ dev_err(dev, "failed to get IO region\n");
+ return -EBUSY;
+ }
+
+ led_data->io_base = io_rc->start;
+ spin_lock_init(&led_data->lock);
+
+ for (i = 0; i < ARRAY_SIZE(nic78bx_leds); i++) {
+ nic78bx_leds[i].data = led_data;
+
+ ret = devm_led_classdev_register(dev, &nic78bx_leds[i].cdev);
+ if (ret)
+ return ret;
+ }
+
+ /* Unlock LED register */
+ outb(NIC78BX_UNLOCK_VALUE,
+ led_data->io_base + NIC78BX_LOCK_REG_OFFSET);
+
+ return ret;
+}
+
+static int nic78bx_remove(struct platform_device *pdev)
+{
+ struct nic78bx_led_data *led_data = platform_get_drvdata(pdev);
+
+ /* Lock LED register */
+ outb(NIC78BX_LOCK_VALUE,
+ led_data->io_base + NIC78BX_LOCK_REG_OFFSET);
+
+ return 0;
+}
+
+static const struct acpi_device_id led_device_ids[] = {
+ {"NIC78B3", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, led_device_ids);
+
+static struct platform_driver led_driver = {
+ .probe = nic78bx_probe,
+ .remove = nic78bx_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .acpi_match_table = ACPI_PTR(led_device_ids),
+ },
+};
+
+module_platform_driver(led_driver);
+
+MODULE_DESCRIPTION("National Instruments PXI User LEDs driver");
+MODULE_AUTHOR("Hui Chun Ong <hui.chun.ong@ni.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 09a7cffbc46f..06e63106ae1e 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -369,7 +369,7 @@ static int pca9532_configure(struct i2c_client *client,
led->state = pled->state;
led->name = pled->name;
led->ldev.name = led->name;
- led->ldev.default_trigger = led->default_trigger;
+ led->ldev.default_trigger = pled->default_trigger;
led->ldev.brightness = LED_OFF;
led->ldev.brightness_set_blocking =
pca9532_set_brightness;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 840401ae9a4e..78a7ce816a47 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -40,6 +40,7 @@
* bits the chip supports.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -100,6 +101,15 @@ static const struct i2c_device_id pca955x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca955x_id);
+static const struct acpi_device_id pca955x_acpi_ids[] = {
+ { "PCA9550", pca9550 },
+ { "PCA9551", pca9551 },
+ { "PCA9552", pca9552 },
+ { "PCA9553", pca9553 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pca955x_acpi_ids);
+
struct pca955x {
struct mutex lock;
struct pca955x_led *leds;
@@ -250,7 +260,16 @@ static int pca955x_probe(struct i2c_client *client,
struct led_platform_data *pdata;
int i, err;
- chip = &pca955x_chipdefs[id->driver_data];
+ if (id) {
+ chip = &pca955x_chipdefs[id->driver_data];
+ } else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(pca955x_acpi_ids, &client->dev);
+ if (!acpi_id)
+ return -ENODEV;
+ chip = &pca955x_chipdefs[acpi_id->driver_data];
+ }
adapter = to_i2c_adapter(client->dev.parent);
pdata = dev_get_platdata(&client->dev);
@@ -264,7 +283,7 @@ static int pca955x_probe(struct i2c_client *client,
dev_info(&client->dev, "leds-pca955x: Using %s %d-bit LED driver at "
"slave address 0x%02x\n",
- id->name, chip->bits, client->addr);
+ client->name, chip->bits, client->addr);
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return -EIO;
@@ -358,6 +377,7 @@ static int pca955x_remove(struct i2c_client *client)
static struct i2c_driver pca955x_driver = {
.driver = {
.name = "leds-pca955x",
+ .acpi_match_table = ACPI_PTR(pca955x_acpi_ids),
},
.probe = pca955x_probe,
.remove = pca955x_remove,
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 407eba11e187..ded1e4dac36a 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -25,6 +25,7 @@
* or by adding the 'nxp,hw-blink' property to the DTS.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -59,6 +60,7 @@ struct pca963x_chipdef {
u8 grpfreq;
u8 ledout_base;
int n_leds;
+ unsigned int scaling;
};
static struct pca963x_chipdef pca963x_chipdefs[] = {
@@ -95,6 +97,15 @@ static const struct i2c_device_id pca963x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca963x_id);
+static const struct acpi_device_id pca963x_acpi_ids[] = {
+ { "PCA9632", pca9633 },
+ { "PCA9633", pca9633 },
+ { "PCA9634", pca9634 },
+ { "PCA9635", pca9635 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pca963x_acpi_ids);
+
struct pca963x_led;
struct pca963x {
@@ -102,6 +113,7 @@ struct pca963x {
struct mutex mutex;
struct i2c_client *client;
struct pca963x_led *leds;
+ unsigned long leds_on;
};
struct pca963x_led {
@@ -123,7 +135,6 @@ static int pca963x_brightness(struct pca963x_led *pca963x,
u8 mask = 0x3 << shift;
int ret;
- mutex_lock(&pca963x->chip->mutex);
ledout = i2c_smbus_read_byte_data(pca963x->chip->client, ledout_addr);
switch (brightness) {
case LED_FULL:
@@ -140,14 +151,13 @@ static int pca963x_brightness(struct pca963x_led *pca963x,
PCA963X_PWM_BASE + pca963x->led_num,
brightness);
if (ret < 0)
- goto unlock;
+ return ret;
ret = i2c_smbus_write_byte_data(pca963x->chip->client,
ledout_addr,
(ledout & ~mask) | (PCA963X_LED_PWM << shift));
break;
}
-unlock:
- mutex_unlock(&pca963x->chip->mutex);
+
return ret;
}
@@ -179,14 +189,49 @@ static void pca963x_blink(struct pca963x_led *pca963x)
mutex_unlock(&pca963x->chip->mutex);
}
+static int pca963x_power_state(struct pca963x_led *pca963x)
+{
+ unsigned long *leds_on = &pca963x->chip->leds_on;
+ unsigned long cached_leds = pca963x->chip->leds_on;
+
+ if (pca963x->led_cdev.brightness)
+ set_bit(pca963x->led_num, leds_on);
+ else
+ clear_bit(pca963x->led_num, leds_on);
+
+ if (!(*leds_on) != !cached_leds)
+ return i2c_smbus_write_byte_data(pca963x->chip->client,
+ PCA963X_MODE1, *leds_on ? 0 : BIT(4));
+
+ return 0;
+}
+
static int pca963x_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct pca963x_led *pca963x;
+ int ret;
pca963x = container_of(led_cdev, struct pca963x_led, led_cdev);
- return pca963x_brightness(pca963x, value);
+ mutex_lock(&pca963x->chip->mutex);
+
+ ret = pca963x_brightness(pca963x, value);
+ if (ret < 0)
+ goto unlock;
+ ret = pca963x_power_state(pca963x);
+
+unlock:
+ mutex_unlock(&pca963x->chip->mutex);
+ return ret;
+}
+
+static unsigned int pca963x_period_scale(struct pca963x_led *pca963x,
+ unsigned int val)
+{
+ unsigned int scaling = pca963x->chip->chipdef->scaling;
+
+ return scaling ? DIV_ROUND_CLOSEST(val * scaling, 1000) : val;
}
static int pca963x_blink_set(struct led_classdev *led_cdev,
@@ -207,14 +252,14 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
time_off = 500;
}
- period = time_on + time_off;
+ period = pca963x_period_scale(pca963x, time_on + time_off);
/* If period not supported by hardware, default to someting sane. */
if ((period < PCA963X_BLINK_PERIOD_MIN) ||
(period > PCA963X_BLINK_PERIOD_MAX)) {
time_on = 500;
time_off = 500;
- period = time_on + time_off;
+ period = pca963x_period_scale(pca963x, 1000);
}
/*
@@ -222,7 +267,7 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
* (time_on / period) = (GDC / 256) ->
* GDC = ((time_on * 256) / period)
*/
- gdc = (time_on * 256) / period;
+ gdc = (pca963x_period_scale(pca963x, time_on) * 256) / period;
/*
* From manual: period = ((GFRQ + 1) / 24) in seconds.
@@ -294,6 +339,9 @@ pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
else
pdata->blink_type = PCA963X_SW_BLINK;
+ if (of_property_read_u32(np, "nxp,period-scale", &chip->scaling))
+ chip->scaling = 1000;
+
return pdata;
}
@@ -322,7 +370,16 @@ static int pca963x_probe(struct i2c_client *client,
struct pca963x_chipdef *chip;
int i, err;
- chip = &pca963x_chipdefs[id->driver_data];
+ if (id) {
+ chip = &pca963x_chipdefs[id->driver_data];
+ } else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(pca963x_acpi_ids, &client->dev);
+ if (!acpi_id)
+ return -ENODEV;
+ chip = &pca963x_chipdefs[acpi_id->driver_data];
+ }
pdata = dev_get_platdata(&client->dev);
if (!pdata) {
@@ -391,8 +448,8 @@ static int pca963x_probe(struct i2c_client *client,
goto exit;
}
- /* Disable LED all-call address and set normal mode */
- i2c_smbus_write_byte_data(client, PCA963X_MODE1, 0x00);
+ /* Disable LED all-call address, and power down initially */
+ i2c_smbus_write_byte_data(client, PCA963X_MODE1, BIT(4));
if (pdata) {
/* Configure output: open-drain or totem pole (push-pull) */
@@ -426,6 +483,7 @@ static struct i2c_driver pca963x_driver = {
.driver = {
.name = "leds-pca963x",
.of_match_table = of_match_ptr(of_pca963x_match),
+ .acpi_match_table = ACPI_PTR(pca963x_acpi_ids),
},
.probe = pca963x_probe,
.remove = pca963x_remove,
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 22f0634dd3fa..9719caf7437c 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig);
* @evt: CPU event to be emitted
*
* Emit a CPU event on a CPU core, which will trigger a
- * binded LED to turn on or turn off.
+ * bound LED to turn on or turn off.
*/
void ledtrig_cpu(enum cpu_led_event ledevt)
{
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
new file mode 100644
index 000000000000..5e9e8a1fdefb
--- /dev/null
+++ b/drivers/leds/uleds.c
@@ -0,0 +1,235 @@
+/*
+ * Userspace driver for the LED subsystem
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * Based on uinput.c: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <uapi/linux/uleds.h>
+
+#define ULEDS_NAME "uleds"
+
+enum uleds_state {
+ ULEDS_STATE_UNKNOWN,
+ ULEDS_STATE_REGISTERED,
+};
+
+struct uleds_device {
+ struct uleds_user_dev user_dev;
+ struct led_classdev led_cdev;
+ struct mutex mutex;
+ enum uleds_state state;
+ wait_queue_head_t waitq;
+ int brightness;
+ bool new_data;
+};
+
+static struct miscdevice uleds_misc;
+
+static void uleds_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct uleds_device *udev = container_of(led_cdev, struct uleds_device,
+ led_cdev);
+
+ if (udev->brightness != brightness) {
+ udev->brightness = brightness;
+ udev->new_data = true;
+ wake_up_interruptible(&udev->waitq);
+ }
+}
+
+static int uleds_open(struct inode *inode, struct file *file)
+{
+ struct uleds_device *udev;
+
+ udev = kzalloc(sizeof(*udev), GFP_KERNEL);
+ if (!udev)
+ return -ENOMEM;
+
+ udev->led_cdev.name = udev->user_dev.name;
+ udev->led_cdev.brightness_set = uleds_brightness_set;
+
+ mutex_init(&udev->mutex);
+ init_waitqueue_head(&udev->waitq);
+ udev->state = ULEDS_STATE_UNKNOWN;
+
+ file->private_data = udev;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static ssize_t uleds_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct uleds_device *udev = file->private_data;
+ const char *name;
+ int ret;
+
+ if (count == 0)
+ return 0;
+
+ ret = mutex_lock_interruptible(&udev->mutex);
+ if (ret)
+ return ret;
+
+ if (udev->state == ULEDS_STATE_REGISTERED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (count != sizeof(struct uleds_user_dev)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_user(&udev->user_dev, buffer,
+ sizeof(struct uleds_user_dev))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ name = udev->user_dev.name;
+ if (!name[0] || !strcmp(name, ".") || !strcmp(name, "..") ||
+ strchr(name, '/')) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (udev->user_dev.max_brightness <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ udev->led_cdev.max_brightness = udev->user_dev.max_brightness;
+
+ ret = devm_led_classdev_register(uleds_misc.this_device,
+ &udev->led_cdev);
+ if (ret < 0)
+ goto out;
+
+ udev->new_data = true;
+ udev->state = ULEDS_STATE_REGISTERED;
+ ret = count;
+
+out:
+ mutex_unlock(&udev->mutex);
+
+ return ret;
+}
+
+static ssize_t uleds_read(struct file *file, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct uleds_device *udev = file->private_data;
+ ssize_t retval;
+
+ if (count < sizeof(udev->brightness))
+ return 0;
+
+ do {
+ retval = mutex_lock_interruptible(&udev->mutex);
+ if (retval)
+ return retval;
+
+ if (udev->state != ULEDS_STATE_REGISTERED) {
+ retval = -ENODEV;
+ } else if (!udev->new_data && (file->f_flags & O_NONBLOCK)) {
+ retval = -EAGAIN;
+ } else if (udev->new_data) {
+ retval = copy_to_user(buffer, &udev->brightness,
+ sizeof(udev->brightness));
+ udev->new_data = false;
+ retval = sizeof(udev->brightness);
+ }
+
+ mutex_unlock(&udev->mutex);
+
+ if (retval)
+ break;
+
+ if (!(file->f_flags & O_NONBLOCK))
+ retval = wait_event_interruptible(udev->waitq,
+ udev->new_data ||
+ udev->state != ULEDS_STATE_REGISTERED);
+ } while (retval == 0);
+
+ return retval;
+}
+
+static unsigned int uleds_poll(struct file *file, poll_table *wait)
+{
+ struct uleds_device *udev = file->private_data;
+
+ poll_wait(file, &udev->waitq, wait);
+
+ if (udev->new_data)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int uleds_release(struct inode *inode, struct file *file)
+{
+ struct uleds_device *udev = file->private_data;
+
+ if (udev->state == ULEDS_STATE_REGISTERED) {
+ udev->state = ULEDS_STATE_UNKNOWN;
+ devm_led_classdev_unregister(uleds_misc.this_device,
+ &udev->led_cdev);
+ }
+ kfree(udev);
+
+ return 0;
+}
+
+static const struct file_operations uleds_fops = {
+ .owner = THIS_MODULE,
+ .open = uleds_open,
+ .release = uleds_release,
+ .read = uleds_read,
+ .write = uleds_write,
+ .poll = uleds_poll,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice uleds_misc = {
+ .fops = &uleds_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = ULEDS_NAME,
+};
+
+static int __init uleds_init(void)
+{
+ return misc_register(&uleds_misc);
+}
+module_init(uleds_init);
+
+static void __exit uleds_exit(void)
+{
+ misc_deregister(&uleds_misc);
+}
+module_exit(uleds_exit);
+
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_DESCRIPTION("Userspace driver for the LED subsystem");
+MODULE_LICENSE("GPL");
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 19a32280731d..601f81c04873 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -109,10 +109,6 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
case LHCALL_SET_CLOCKEVENT:
guest_set_clockevent(cpu, args->arg1);
break;
- case LHCALL_TS:
- /* This sets the TS flag, as we saw used in run_guest(). */
- cpu->ts = args->arg1;
- break;
case LHCALL_HALT:
/* Similarly, this sets the halted flag for run_guest(). */
cpu->halted = 1;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 69b3814afd2f..2356a2318034 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -43,7 +43,6 @@ struct lg_cpu {
struct mm_struct *mm; /* == tsk->mm, but that becomes NULL on exit */
u32 cr2;
- int ts;
u32 esp1;
u16 ss1;
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6e9042e3d2a9..743253fc638f 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -247,14 +247,6 @@ unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any)
void lguest_arch_run_guest(struct lg_cpu *cpu)
{
/*
- * Remember the awfully-named TS bit? If the Guest has asked to set it
- * we set it now, so we can trap and pass that trap to the Guest if it
- * uses the FPU.
- */
- if (cpu->ts && fpregs_active())
- stts();
-
- /*
* SYSENTER is an optimized way of doing system calls. We can't allow
* it because it always jumps to privilege level 0. A normal Guest
* won't try it because we don't advertise it in CPUID, but a malicious
@@ -282,10 +274,6 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
if (boot_cpu_has(X86_FEATURE_SEP))
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
- /* Clear the host TS bit if it was set above. */
- if (cpu->ts && fpregs_active())
- clts();
-
/*
* If the Guest page faulted, then the cr2 register will tell us the
* bad virtual address. We have to grab this now, because once we
@@ -421,12 +409,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
kill_guest(cpu, "Writing cr2");
break;
case 7: /* We've intercepted a Device Not Available fault. */
- /*
- * If the Guest doesn't want to know, we already restored the
- * Floating Point Unit, so we just continue without telling it.
- */
- if (!cpu->ts)
- return;
+ /* No special handling is needed here. */
break;
case 32 ... 255:
/* This might be a syscall. */
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 1f6b6521016a..a7a0a22cf1a5 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@
# Makefile for Open-Channel SSDs.
#
-obj-$(CONFIG_NVM) := core.o sysblk.o sysfs.o
+obj-$(CONFIG_NVM) := core.o sysblk.o
obj-$(CONFIG_NVM_GENNVM) += gennvm.o
obj-$(CONFIG_NVM_RRPC) += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 1cac0f8bc0dc..02240a0b39c9 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -22,13 +22,11 @@
#include <linux/types.h>
#include <linux/sem.h>
#include <linux/bitmap.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
-#include "lightnvm.h"
-
static LIST_HEAD(nvm_tgt_types);
static DECLARE_RWSEM(nvm_tgtt_lock);
static LIST_HEAD(nvm_mgrs);
@@ -88,8 +86,7 @@ void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);
-void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
- dma_addr_t dma_handler)
+void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
{
dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
}
@@ -178,38 +175,133 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
return NULL;
}
-struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
- unsigned long flags)
+static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
+ struct nvm_rq *rqd)
{
- return dev->mt->get_blk(dev, lun, flags);
+ struct nvm_dev *dev = tgt_dev->parent;
+ int i;
+
+ if (rqd->nr_ppas > 1) {
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
+ rqd->ppa_list[i], TRANS_TGT_TO_DEV);
+ rqd->ppa_list[i] = generic_to_dev_addr(dev,
+ rqd->ppa_list[i]);
+ }
+ } else {
+ rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
+ TRANS_TGT_TO_DEV);
+ rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
+ }
+}
+
+int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+ int type)
+{
+ struct nvm_rq rqd;
+ int ret;
+
+ if (nr_ppas > dev->ops->max_phys_sect) {
+ pr_err("nvm: unable to update all sysblocks atomically\n");
+ return -EINVAL;
+ }
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+ nvm_generic_to_addr_mode(dev, &rqd);
+
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+ nvm_free_rqd_ppalist(dev, &rqd);
+ if (ret) {
+ pr_err("nvm: sysblk failed bb mark\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(nvm_get_blk);
+EXPORT_SYMBOL(nvm_set_bb_tbl);
-/* Assumes that all valid pages have already been moved on release to bm */
-void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
+int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
+ int nr_ppas, int type)
{
- return dev->mt->put_blk(dev, blk);
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_rq rqd;
+ int ret;
+
+ if (nr_ppas > dev->ops->max_phys_sect) {
+ pr_err("nvm: unable to update all blocks atomically\n");
+ return -EINVAL;
+ }
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+ nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
+
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+ nvm_free_rqd_ppalist(dev, &rqd);
+ if (ret) {
+ pr_err("nvm: sysblk failed bb mark\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(nvm_put_blk);
+EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
-void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
{
- return dev->mt->mark_blk(dev, ppa, type);
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->ops->max_phys_sect;
}
-EXPORT_SYMBOL(nvm_mark_blk);
+EXPORT_SYMBOL(nvm_max_phys_sects);
-int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
- return dev->mt->submit_io(dev, rqd);
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->mt->submit_io(tgt_dev, rqd);
}
EXPORT_SYMBOL(nvm_submit_io);
-int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
+int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
{
- return dev->mt->erase_blk(dev, blk, 0);
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->mt->erase_blk(tgt_dev, p, flags);
}
EXPORT_SYMBOL(nvm_erase_blk);
+int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
+ nvm_l2p_update_fn *update_l2p, void *priv)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ if (!dev->ops->get_l2p_tbl)
+ return 0;
+
+ return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
+}
+EXPORT_SYMBOL(nvm_get_l2p_tbl);
+
+int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ return dev->mt->get_area(dev, lba, len);
+}
+EXPORT_SYMBOL(nvm_get_area);
+
+void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ dev->mt->put_area(dev, lba);
+}
+EXPORT_SYMBOL(nvm_put_area);
+
void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int i;
@@ -241,10 +333,11 @@ EXPORT_SYMBOL(nvm_generic_to_addr_mode);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
const struct ppa_addr *ppas, int nr_ppas, int vblk)
{
+ struct nvm_geo *geo = &dev->geo;
int i, plane_cnt, pl_idx;
struct ppa_addr ppa;
- if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+ if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
rqd->nr_ppas = nr_ppas;
rqd->ppa_addr = ppas[0];
@@ -262,7 +355,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
for (i = 0; i < nr_ppas; i++)
rqd->ppa_list[i] = ppas[i];
} else {
- plane_cnt = dev->plane_mode;
+ plane_cnt = geo->plane_mode;
rqd->nr_ppas *= plane_cnt;
for (i = 0; i < nr_ppas; i++) {
@@ -287,7 +380,8 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_free_rqd_ppalist);
-int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
+int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+ int flags)
{
struct nvm_rq rqd;
int ret;
@@ -303,6 +397,8 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
nvm_generic_to_addr_mode(dev, &rqd);
+ rqd.flags = flags;
+
ret = dev->ops->erase_block(dev, &rqd);
nvm_free_rqd_ppalist(dev, &rqd);
@@ -341,7 +437,7 @@ static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
nvm_generic_to_addr_mode(dev, rqd);
- rqd->dev = dev;
+ rqd->dev = NULL;
rqd->opcode = opcode;
rqd->flags = flags;
rqd->bio = bio;
@@ -437,17 +533,18 @@ EXPORT_SYMBOL(nvm_submit_ppa);
*/
int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
{
+ struct nvm_geo *geo = &dev->geo;
int blk, offset, pl, blktype;
- if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+ if (nr_blks != geo->blks_per_lun * geo->plane_mode)
return -EINVAL;
- for (blk = 0; blk < dev->blks_per_lun; blk++) {
- offset = blk * dev->plane_mode;
+ for (blk = 0; blk < geo->blks_per_lun; blk++) {
+ offset = blk * geo->plane_mode;
blktype = blks[offset];
/* Bad blocks on any planes take precedence over other types */
- for (pl = 0; pl < dev->plane_mode; pl++) {
+ for (pl = 0; pl < geo->plane_mode; pl++) {
if (blks[offset + pl] &
(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
blktype = blks[offset + pl];
@@ -458,7 +555,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
blks[blk] = blktype;
}
- return dev->blks_per_lun;
+ return geo->blks_per_lun;
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);
@@ -470,11 +567,22 @@ int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
}
EXPORT_SYMBOL(nvm_get_bb_tbl);
+int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
+ u8 *blks)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+
+ ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
+ return nvm_get_bb_tbl(dev, ppa, blks);
+}
+EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
+
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
+ struct nvm_geo *geo = &dev->geo;
int i;
- dev->lps_per_blk = dev->pgs_per_blk;
+ dev->lps_per_blk = geo->pgs_per_blk;
dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
if (!dev->lptbl)
return -ENOMEM;
@@ -520,29 +628,32 @@ static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
struct nvm_id_group *grp = &id->groups[0];
+ struct nvm_geo *geo = &dev->geo;
int ret;
- /* device values */
- dev->nr_chnls = grp->num_ch;
- dev->luns_per_chnl = grp->num_lun;
- dev->pgs_per_blk = grp->num_pg;
- dev->blks_per_lun = grp->num_blk;
- dev->nr_planes = grp->num_pln;
- dev->fpg_size = grp->fpg_sz;
- dev->pfpg_size = grp->fpg_sz * grp->num_pln;
- dev->sec_size = grp->csecs;
- dev->oob_size = grp->sos;
- dev->sec_per_pg = grp->fpg_sz / grp->csecs;
- dev->mccap = grp->mccap;
- memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
-
- dev->plane_mode = NVM_PLANE_SINGLE;
- dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
+ /* Whole device values */
+ geo->nr_chnls = grp->num_ch;
+ geo->luns_per_chnl = grp->num_lun;
+
+ /* Generic device values */
+ geo->pgs_per_blk = grp->num_pg;
+ geo->blks_per_lun = grp->num_blk;
+ geo->nr_planes = grp->num_pln;
+ geo->fpg_size = grp->fpg_sz;
+ geo->pfpg_size = grp->fpg_sz * grp->num_pln;
+ geo->sec_size = grp->csecs;
+ geo->oob_size = grp->sos;
+ geo->sec_per_pg = grp->fpg_sz / grp->csecs;
+ geo->mccap = grp->mccap;
+ memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+
+ geo->plane_mode = NVM_PLANE_SINGLE;
+ geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
if (grp->mpos & 0x020202)
- dev->plane_mode = NVM_PLANE_DOUBLE;
+ geo->plane_mode = NVM_PLANE_DOUBLE;
if (grp->mpos & 0x040404)
- dev->plane_mode = NVM_PLANE_QUAD;
+ geo->plane_mode = NVM_PLANE_QUAD;
if (grp->mtype != 0) {
pr_err("nvm: memory type not supported\n");
@@ -550,13 +661,13 @@ static int nvm_core_init(struct nvm_dev *dev)
}
/* calculated values */
- dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
- dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
- dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
- dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
+ geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
+ geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
+ geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
+ geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
- dev->total_secs = dev->nr_luns * dev->sec_per_lun;
- dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+ dev->total_secs = geo->nr_luns * geo->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
@@ -583,7 +694,7 @@ static int nvm_core_init(struct nvm_dev *dev)
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
- blk_queue_logical_block_size(dev->q, dev->sec_size);
+ blk_queue_logical_block_size(dev->q, geo->sec_size);
return 0;
err_fmtype:
@@ -617,6 +728,7 @@ void nvm_free(struct nvm_dev *dev)
static int nvm_init(struct nvm_dev *dev)
{
+ struct nvm_geo *geo = &dev->geo;
int ret = -EINVAL;
if (!dev->q || !dev->ops)
@@ -648,20 +760,15 @@ static int nvm_init(struct nvm_dev *dev)
}
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
- dev->name, dev->sec_per_pg, dev->nr_planes,
- dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
- dev->nr_chnls);
+ dev->name, geo->sec_per_pg, geo->nr_planes,
+ geo->pgs_per_blk, geo->blks_per_lun,
+ geo->nr_luns, geo->nr_chnls);
return 0;
err:
pr_err("nvm: failed to initialize nvm\n");
return ret;
}
-static void nvm_exit(struct nvm_dev *dev)
-{
- nvm_sysfs_unregister_dev(dev);
-}
-
struct nvm_dev *nvm_alloc_dev(int node)
{
return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
@@ -691,10 +798,6 @@ int nvm_register(struct nvm_dev *dev)
}
}
- ret = nvm_sysfs_register_dev(dev);
- if (ret)
- goto err_ppalist;
-
if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
ret = nvm_get_sysblock(dev, &dev->sb);
if (!ret)
@@ -711,8 +814,6 @@ int nvm_register(struct nvm_dev *dev)
up_write(&nvm_lock);
return 0;
-err_ppalist:
- dev->ops->destroy_dma_pool(dev->dma_pool);
err_init:
kfree(dev->lun_map);
return ret;
@@ -725,7 +826,7 @@ void nvm_unregister(struct nvm_dev *dev)
list_del(&dev->devices);
up_write(&nvm_lock);
- nvm_exit(dev);
+ nvm_free(dev);
}
EXPORT_SYMBOL(nvm_unregister);
@@ -754,149 +855,15 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
}
s = &create->conf.s;
- if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
+ if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
pr_err("nvm: lun out of bound (%u:%u > %u)\n",
- s->lun_begin, s->lun_end, dev->nr_luns);
+ s->lun_begin, s->lun_end, dev->geo.nr_luns);
return -EINVAL;
}
return dev->mt->create_tgt(dev, create);
}
-#ifdef CONFIG_NVM_DEBUG
-static int nvm_configure_show(const char *val)
-{
- struct nvm_dev *dev;
- char opcode, devname[DISK_NAME_LEN];
- int ret;
-
- ret = sscanf(val, "%c %32s", &opcode, devname);
- if (ret != 2) {
- pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
- return -EINVAL;
- }
-
- down_write(&nvm_lock);
- dev = nvm_find_nvm_dev(devname);
- up_write(&nvm_lock);
- if (!dev) {
- pr_err("nvm: device not found\n");
- return -EINVAL;
- }
-
- if (!dev->mt)
- return 0;
-
- dev->mt->lun_info_print(dev);
-
- return 0;
-}
-
-static int nvm_configure_remove(const char *val)
-{
- struct nvm_ioctl_remove remove;
- struct nvm_dev *dev;
- char opcode;
- int ret = 0;
-
- ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
- if (ret != 2) {
- pr_err("nvm: invalid command. Use \"d targetname\".\n");
- return -EINVAL;
- }
-
- remove.flags = 0;
-
- list_for_each_entry(dev, &nvm_devices, devices) {
- ret = dev->mt->remove_tgt(dev, &remove);
- if (!ret)
- break;
- }
-
- return ret;
-}
-
-static int nvm_configure_create(const char *val)
-{
- struct nvm_ioctl_create create;
- char opcode;
- int lun_begin, lun_end, ret;
-
- ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
- create.tgtname, create.tgttype,
- &lun_begin, &lun_end);
- if (ret != 6) {
- pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
- return -EINVAL;
- }
-
- create.flags = 0;
- create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
- create.conf.s.lun_begin = lun_begin;
- create.conf.s.lun_end = lun_end;
-
- return __nvm_configure_create(&create);
-}
-
-
-/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
-static int nvm_configure_by_str_event(const char *val,
- const struct kernel_param *kp)
-{
- char opcode;
- int ret;
-
- ret = sscanf(val, "%c", &opcode);
- if (ret != 1) {
- pr_err("nvm: string must have the format of \"cmd ...\"\n");
- return -EINVAL;
- }
-
- switch (opcode) {
- case 'a':
- return nvm_configure_create(val);
- case 'd':
- return nvm_configure_remove(val);
- case 's':
- return nvm_configure_show(val);
- default:
- pr_err("nvm: invalid command\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int nvm_configure_get(char *buf, const struct kernel_param *kp)
-{
- int sz;
- struct nvm_dev *dev;
-
- sz = sprintf(buf, "available devices:\n");
- down_write(&nvm_lock);
- list_for_each_entry(dev, &nvm_devices, devices) {
- if (sz > 4095 - DISK_NAME_LEN - 2)
- break;
- sz += sprintf(buf + sz, " %32s\n", dev->name);
- }
- up_write(&nvm_lock);
-
- return sz;
-}
-
-static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
- .set = nvm_configure_by_str_event,
- .get = nvm_configure_get,
-};
-
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "lnvm."
-
-module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
- 0644);
-
-#endif /* CONFIG_NVM_DEBUG */
-
static long nvm_ioctl_info(struct file *file, void __user *arg)
{
struct nvm_ioctl_info *info;
@@ -1162,10 +1129,4 @@ static struct miscdevice _nvm_misc = {
.nodename = "lightnvm/control",
.fops = &_ctl_fops,
};
-module_misc_device(_nvm_misc);
-
-MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
-
-MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.1");
+builtin_misc_device(_nvm_misc);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index b74174c6d021..ca7880082d80 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -35,6 +35,165 @@ static const struct block_device_operations gen_fops = {
.owner = THIS_MODULE,
};
+static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
+ int lun_begin, int lun_end)
+{
+ int i;
+
+ for (i = lun_begin; i <= lun_end; i++) {
+ if (test_and_set_bit(i, dev->lun_map)) {
+ pr_err("nvm: lun %d already allocated\n", i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (--i > lun_begin)
+ clear_bit(i, dev->lun_map);
+
+ return -EBUSY;
+}
+
+static void gen_release_luns_err(struct nvm_dev *dev, int lun_begin,
+ int lun_end)
+{
+ int i;
+
+ for (i = lun_begin; i <= lun_end; i++)
+ WARN_ON(!test_and_clear_bit(i, dev->lun_map));
+}
+
+static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct gen_dev_map *dev_map = tgt_dev->map;
+ int i, j;
+
+ for (i = 0; i < dev_map->nr_chnls; i++) {
+ struct gen_ch_map *ch_map = &dev_map->chnls[i];
+ int *lun_offs = ch_map->lun_offs;
+ int ch = i + ch_map->ch_off;
+
+ for (j = 0; j < ch_map->nr_luns; j++) {
+ int lun = j + lun_offs[j];
+ int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+
+ WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+ }
+
+ kfree(ch_map->lun_offs);
+ }
+
+ kfree(dev_map->chnls);
+ kfree(dev_map);
+ kfree(tgt_dev->luns);
+ kfree(tgt_dev);
+}
+
+static struct nvm_tgt_dev *gen_create_tgt_dev(struct nvm_dev *dev,
+ int lun_begin, int lun_end)
+{
+ struct nvm_tgt_dev *tgt_dev = NULL;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ struct gen_dev_map *dev_map;
+ struct ppa_addr *luns;
+ int nr_luns = lun_end - lun_begin + 1;
+ int luns_left = nr_luns;
+ int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+ int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
+ int bch = lun_begin / dev->geo.luns_per_chnl;
+ int blun = lun_begin % dev->geo.luns_per_chnl;
+ int lunid = 0;
+ int lun_balanced = 1;
+ int prev_nr_luns;
+ int i, j;
+
+ nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+ nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
+
+ dev_map = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
+ if (!dev_map)
+ goto err_dev;
+
+ dev_map->chnls = kcalloc(nr_chnls, sizeof(struct gen_ch_map),
+ GFP_KERNEL);
+ if (!dev_map->chnls)
+ goto err_chnls;
+
+ luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
+ if (!luns)
+ goto err_luns;
+
+ prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
+ dev->geo.luns_per_chnl : luns_left;
+ for (i = 0; i < nr_chnls; i++) {
+ struct gen_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
+ int *lun_roffs = ch_rmap->lun_offs;
+ struct gen_ch_map *ch_map = &dev_map->chnls[i];
+ int *lun_offs;
+ int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
+ dev->geo.luns_per_chnl : luns_left;
+
+ if (lun_balanced && prev_nr_luns != luns_in_chnl)
+ lun_balanced = 0;
+
+ ch_map->ch_off = ch_rmap->ch_off = bch;
+ ch_map->nr_luns = luns_in_chnl;
+
+ lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+ if (!lun_offs)
+ goto err_ch;
+
+ for (j = 0; j < luns_in_chnl; j++) {
+ luns[lunid].ppa = 0;
+ luns[lunid].g.ch = i;
+ luns[lunid++].g.lun = j;
+
+ lun_offs[j] = blun;
+ lun_roffs[j + blun] = blun;
+ }
+
+ ch_map->lun_offs = lun_offs;
+
+ /* when starting a new channel, lun offset is reset */
+ blun = 0;
+ luns_left -= luns_in_chnl;
+ }
+
+ dev_map->nr_chnls = nr_chnls;
+
+ tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
+ if (!tgt_dev)
+ goto err_ch;
+
+ memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
+ /* Target device only owns a portion of the physical device */
+ tgt_dev->geo.nr_chnls = nr_chnls;
+ tgt_dev->geo.nr_luns = nr_luns;
+ tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
+ tgt_dev->q = dev->q;
+ tgt_dev->map = dev_map;
+ tgt_dev->luns = luns;
+ memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
+
+ tgt_dev->parent = dev;
+
+ return tgt_dev;
+err_ch:
+ while (--i > 0)
+ kfree(dev_map->chnls[i].lun_offs);
+ kfree(luns);
+err_luns:
+ kfree(dev_map->chnls);
+err_chnls:
+ kfree(dev_map);
+err_dev:
+ return tgt_dev;
+}
+
static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
struct gen_dev *gn = dev->mp;
@@ -43,6 +202,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
struct nvm_target *t;
+ struct nvm_tgt_dev *tgt_dev;
void *targetdata;
tt = nvm_find_target_type(create->tgttype, 1);
@@ -64,9 +224,18 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
if (!t)
return -ENOMEM;
+ if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end))
+ goto err_t;
+
+ tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+ if (!tgt_dev) {
+ pr_err("nvm: could not create target device\n");
+ goto err_reserve;
+ }
+
tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
if (!tqueue)
- goto err_t;
+ goto err_dev;
blk_queue_make_request(tqueue, tt->make_rq);
tdisk = alloc_disk(0);
@@ -80,7 +249,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
tdisk->fops = &gen_fops;
tdisk->queue = tqueue;
- targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
+ targetdata = tt->init(tgt_dev, tdisk);
if (IS_ERR(targetdata))
goto err_init;
@@ -94,7 +263,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
t->type = tt;
t->disk = tdisk;
- t->dev = dev;
+ t->dev = tgt_dev;
mutex_lock(&gn->lock);
list_add_tail(&t->list, &gn->targets);
@@ -105,6 +274,10 @@ err_init:
put_disk(tdisk);
err_queue:
blk_cleanup_queue(tqueue);
+err_dev:
+ kfree(tgt_dev);
+err_reserve:
+ gen_release_luns_err(dev, s->lun_begin, s->lun_end);
err_t:
kfree(t);
return -ENOMEM;
@@ -122,6 +295,7 @@ static void __gen_remove_target(struct nvm_target *t)
if (tt->exit)
tt->exit(tdisk->private_data);
+ gen_remove_tgt_dev(t->dev);
put_disk(tdisk);
list_del(&t->list);
@@ -160,10 +334,11 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
{
+ struct nvm_geo *geo = &dev->geo;
struct gen_dev *gn = dev->mp;
struct gen_area *area, *prev, *next;
sector_t begin = 0;
- sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
+ sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
if (len > max_sectors)
return -EINVAL;
@@ -220,240 +395,74 @@ static void gen_put_area(struct nvm_dev *dev, sector_t begin)
spin_unlock(&dev->lock);
}
-static void gen_blocks_free(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
- struct gen_lun *lun;
- int i;
-
- gen_for_each_lun(gn, lun, i) {
- if (!lun->vlun.blocks)
- break;
- vfree(lun->vlun.blocks);
- }
-}
-
-static void gen_luns_free(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
-
- kfree(gn->luns);
-}
-
-static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
-{
- struct gen_lun *lun;
- int i;
-
- gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
- if (!gn->luns)
- return -ENOMEM;
-
- gen_for_each_lun(gn, lun, i) {
- spin_lock_init(&lun->vlun.lock);
- INIT_LIST_HEAD(&lun->free_list);
- INIT_LIST_HEAD(&lun->used_list);
- INIT_LIST_HEAD(&lun->bb_list);
-
- lun->reserved_blocks = 2; /* for GC only */
- lun->vlun.id = i;
- lun->vlun.lun_id = i % dev->luns_per_chnl;
- lun->vlun.chnl_id = i / dev->luns_per_chnl;
- lun->vlun.nr_free_blocks = dev->blks_per_lun;
- }
- return 0;
-}
-
-static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
- u8 *blks, int nr_blks)
-{
- struct nvm_dev *dev = gn->dev;
- struct gen_lun *lun;
- struct nvm_block *blk;
- int i;
-
- nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
- if (nr_blks < 0)
- return nr_blks;
-
- lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == 0)
- continue;
-
- blk = &lun->vlun.blocks[i];
- list_move_tail(&blk->list, &lun->bb_list);
- lun->vlun.nr_free_blocks--;
- }
-
- return 0;
-}
-
-static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
-{
- struct nvm_dev *dev = private;
- struct gen_dev *gn = dev->mp;
- u64 elba = slba + nlb;
- struct gen_lun *lun;
- struct nvm_block *blk;
- u64 i;
- int lun_id;
-
- if (unlikely(elba > dev->total_secs)) {
- pr_err("gen: L2P data from device is out of bounds!\n");
- return -EINVAL;
- }
-
- for (i = 0; i < nlb; i++) {
- u64 pba = le64_to_cpu(entries[i]);
-
- if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("gen: L2P data entry is out of bounds!\n");
- return -EINVAL;
- }
-
- /* Address zero is a special one. The first page on a disk is
- * protected. It often holds internal device boot
- * information.
- */
- if (!pba)
- continue;
-
- /* resolve block from physical address */
- lun_id = div_u64(pba, dev->sec_per_lun);
- lun = &gn->luns[lun_id];
-
- /* Calculate block offset into lun */
- pba = pba - (dev->sec_per_lun * lun_id);
- blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
-
- if (!blk->state) {
- /* at this point, we don't know anything about the
- * block. It's up to the FTL on top to re-etablish the
- * block state. The block is assumed to be open.
- */
- list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_TGT;
- lun->vlun.nr_free_blocks--;
- }
- }
-
- return 0;
-}
-
-static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
-{
- struct gen_lun *lun;
- struct nvm_block *block;
- sector_t lun_iter, blk_iter, cur_block_id = 0;
- int ret, nr_blks;
- u8 *blks;
-
- nr_blks = dev->blks_per_lun * dev->plane_mode;
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- gen_for_each_lun(gn, lun, lun_iter) {
- lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
- dev->blks_per_lun);
- if (!lun->vlun.blocks) {
- kfree(blks);
- return -ENOMEM;
- }
-
- for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
- block = &lun->vlun.blocks[blk_iter];
-
- INIT_LIST_HEAD(&block->list);
-
- block->lun = &lun->vlun;
- block->id = cur_block_id++;
-
- /* First block is reserved for device */
- if (unlikely(lun_iter == 0 && blk_iter == 0)) {
- lun->vlun.nr_free_blocks--;
- continue;
- }
-
- list_add_tail(&block->list, &lun->free_list);
- }
-
- if (dev->ops->get_bb_tbl) {
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- ppa.g.ch = lun->vlun.chnl_id;
- ppa.g.lun = lun->vlun.lun_id;
-
- ret = nvm_get_bb_tbl(dev, ppa, blks);
- if (ret)
- pr_err("gen: could not get BB table\n");
-
- ret = gen_block_bb(gn, ppa, blks, nr_blks);
- if (ret)
- pr_err("gen: BB table map failed\n");
- }
- }
-
- if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
- gen_block_map, dev);
- if (ret) {
- pr_err("gen: could not read L2P table.\n");
- pr_warn("gen: default block initialization");
- }
- }
-
- kfree(blks);
- return 0;
-}
-
static void gen_free(struct nvm_dev *dev)
{
- gen_blocks_free(dev);
- gen_luns_free(dev);
kfree(dev->mp);
+ kfree(dev->rmap);
dev->mp = NULL;
}
static int gen_register(struct nvm_dev *dev)
{
struct gen_dev *gn;
- int ret;
+ struct gen_dev_map *dev_rmap;
+ int i, j;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
if (!gn)
- return -ENOMEM;
+ goto err_gn;
+
+ dev_rmap = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
+ if (!dev_rmap)
+ goto err_rmap;
+
+ dev_rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct gen_ch_map),
+ GFP_KERNEL);
+ if (!dev_rmap->chnls)
+ goto err_chnls;
+
+ for (i = 0; i < dev->geo.nr_chnls; i++) {
+ struct gen_ch_map *ch_rmap;
+ int *lun_roffs;
+ int luns_in_chnl = dev->geo.luns_per_chnl;
+
+ ch_rmap = &dev_rmap->chnls[i];
+
+ ch_rmap->ch_off = -1;
+ ch_rmap->nr_luns = luns_in_chnl;
+
+ lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+ if (!lun_roffs)
+ goto err_ch;
+
+ for (j = 0; j < luns_in_chnl; j++)
+ lun_roffs[j] = -1;
+
+ ch_rmap->lun_offs = lun_roffs;
+ }
gn->dev = dev;
- gn->nr_luns = dev->nr_luns;
+ gn->nr_luns = dev->geo.nr_luns;
INIT_LIST_HEAD(&gn->area_list);
mutex_init(&gn->lock);
INIT_LIST_HEAD(&gn->targets);
dev->mp = gn;
-
- ret = gen_luns_init(dev, gn);
- if (ret) {
- pr_err("gen: could not initialize luns\n");
- goto err;
- }
-
- ret = gen_blocks_init(dev, gn);
- if (ret) {
- pr_err("gen: could not initialize blocks\n");
- goto err;
- }
+ dev->rmap = dev_rmap;
return 1;
-err:
+err_ch:
+ while (--i >= 0)
+ kfree(dev_rmap->chnls[i].lun_offs);
+err_chnls:
+ kfree(dev_rmap);
+err_rmap:
gen_free(dev);
+err_gn:
module_put(THIS_MODULE);
- return ret;
+ return -ENOMEM;
}
static void gen_unregister(struct nvm_dev *dev)
@@ -463,7 +472,7 @@ static void gen_unregister(struct nvm_dev *dev)
mutex_lock(&gn->lock);
list_for_each_entry_safe(t, tmp, &gn->targets, list) {
- if (t->dev != dev)
+ if (t->dev->parent != dev)
continue;
__gen_remove_target(t);
}
@@ -473,168 +482,142 @@ static void gen_unregister(struct nvm_dev *dev)
module_put(THIS_MODULE);
}
-static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
- struct nvm_lun *vlun, unsigned long flags)
+static int gen_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
{
- struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
- struct nvm_block *blk = NULL;
- int is_gc = flags & NVM_IOTYPE_GC;
-
- spin_lock(&vlun->lock);
- if (list_empty(&lun->free_list)) {
- pr_err_ratelimited("gen: lun %u have no free pages available",
- lun->vlun.id);
- goto out;
+ struct gen_dev_map *dev_map = tgt_dev->map;
+ struct gen_ch_map *ch_map = &dev_map->chnls[p->g.ch];
+ int lun_off = ch_map->lun_offs[p->g.lun];
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ struct gen_ch_map *ch_rmap;
+ int lun_roff;
+
+ p->g.ch += ch_map->ch_off;
+ p->g.lun += lun_off;
+
+ ch_rmap = &dev_rmap->chnls[p->g.ch];
+ lun_roff = ch_rmap->lun_offs[p->g.lun];
+
+ if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
+ pr_err("nvm: corrupted device partition table\n");
+ return -EINVAL;
}
- if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
- goto out;
-
- blk = list_first_entry(&lun->free_list, struct nvm_block, list);
-
- list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_TGT;
- lun->vlun.nr_free_blocks--;
-out:
- spin_unlock(&vlun->lock);
- return blk;
-}
-
-static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
-{
- struct nvm_lun *vlun = blk->lun;
- struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
-
- spin_lock(&vlun->lock);
- if (blk->state & NVM_BLK_ST_TGT) {
- list_move_tail(&blk->list, &lun->free_list);
- lun->vlun.nr_free_blocks++;
- blk->state = NVM_BLK_ST_FREE;
- } else if (blk->state & NVM_BLK_ST_BAD) {
- list_move_tail(&blk->list, &lun->bb_list);
- blk->state = NVM_BLK_ST_BAD;
- } else {
- WARN_ON_ONCE(1);
- pr_err("gen: erroneous block type (%lu -> %u)\n",
- blk->id, blk->state);
- list_move_tail(&blk->list, &lun->bb_list);
- }
- spin_unlock(&vlun->lock);
+ return 0;
}
-static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+static int gen_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
{
- struct gen_dev *gn = dev->mp;
- struct gen_lun *lun;
- struct nvm_block *blk;
-
- pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
- ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
-
- if (unlikely(ppa.g.ch > dev->nr_chnls ||
- ppa.g.lun > dev->luns_per_chnl ||
- ppa.g.blk > dev->blks_per_lun)) {
- WARN_ON_ONCE(1);
- pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
- ppa.g.ch, dev->nr_chnls,
- ppa.g.lun, dev->luns_per_chnl,
- ppa.g.blk, dev->blks_per_lun);
- return;
- }
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ struct gen_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
+ int lun_roff = ch_rmap->lun_offs[p->g.lun];
- lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
- blk = &lun->vlun.blocks[ppa.g.blk];
+ p->g.ch -= ch_rmap->ch_off;
+ p->g.lun -= lun_roff;
- /* will be moved to bb list on put_blk from target */
- blk->state = type;
+ return 0;
}
-/*
- * mark block bad in gen. It is expected that the target recovers separately
- */
-static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int gen_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
+ int flag)
{
- int bit = -1;
- int max_secs = dev->ops->max_phys_sect;
- void *comp_bits = &rqd->ppa_status;
+ gen_trans_fn *f;
+ int i;
+ int ret = 0;
- nvm_addr_to_generic_mode(dev, rqd);
+ f = (flag == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
- /* look up blocks and mark them as bad */
- if (rqd->nr_ppas == 1) {
- gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
- return;
+ if (rqd->nr_ppas == 1)
+ return f(tgt_dev, &rqd->ppa_addr);
+
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ ret = f(tgt_dev, &rqd->ppa_list[i]);
+ if (ret)
+ goto out;
}
- while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
- gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
+out:
+ return ret;
}
static void gen_end_io(struct nvm_rq *rqd)
{
+ struct nvm_tgt_dev *tgt_dev = rqd->dev;
struct nvm_tgt_instance *ins = rqd->ins;
- if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- gen_mark_blk_bad(rqd->dev, rqd);
+ /* Convert address space */
+ if (tgt_dev)
+ gen_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
ins->tt->end_io(rqd);
}
-static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int gen_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
+ struct nvm_dev *dev = tgt_dev->parent;
+
if (!dev->ops->submit_io)
return -ENODEV;
/* Convert address space */
+ gen_trans_rq(tgt_dev, rqd, TRANS_TGT_TO_DEV);
nvm_generic_to_addr_mode(dev, rqd);
- rqd->dev = dev;
+ rqd->dev = tgt_dev;
rqd->end_io = gen_end_io;
return dev->ops->submit_io(dev, rqd);
}
-static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
- unsigned long flags)
+static int gen_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p,
+ int flags)
{
- struct ppa_addr addr = block_to_ppa(dev, blk);
+ /* Convert address space */
+ gen_map_to_dev(tgt_dev, p);
- return nvm_erase_ppa(dev, &addr, 1);
+ return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
}
-static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
+static struct ppa_addr gen_trans_ppa(struct nvm_tgt_dev *tgt_dev,
+ struct ppa_addr p, int direction)
{
- return test_and_set_bit(lunid, dev->lun_map);
-}
+ gen_trans_fn *f;
+ struct ppa_addr ppa = p;
-static void gen_release_lun(struct nvm_dev *dev, int lunid)
-{
- WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+ f = (direction == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
+ f(tgt_dev, &ppa);
+
+ return ppa;
}
-static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
+static void gen_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
+ int len)
{
- struct gen_dev *gn = dev->mp;
-
- if (unlikely(lunid >= dev->nr_luns))
- return NULL;
+ struct nvm_geo *geo = &dev->geo;
+ struct gen_dev_map *dev_rmap = dev->rmap;
+ u64 i;
- return &gn->luns[lunid].vlun;
-}
+ for (i = 0; i < len; i++) {
+ struct gen_ch_map *ch_rmap;
+ int *lun_roffs;
+ struct ppa_addr gaddr;
+ u64 pba = le64_to_cpu(entries[i]);
+ int off;
+ u64 diff;
-static void gen_lun_info_print(struct nvm_dev *dev)
-{
- struct gen_dev *gn = dev->mp;
- struct gen_lun *lun;
- unsigned int i;
+ if (!pba)
+ continue;
+ gaddr = linear_to_generic_addr(geo, pba);
+ ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
+ lun_roffs = ch_rmap->lun_offs;
- gen_for_each_lun(gn, lun, i) {
- spin_lock(&lun->vlun.lock);
+ off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
- pr_info("%s: lun%8u\t%u\n", dev->name, i,
- lun->vlun.nr_free_blocks);
+ diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
+ (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
- spin_unlock(&lun->vlun.lock);
+ entries[i] -= cpu_to_le64(diff);
}
}
@@ -648,22 +631,14 @@ static struct nvmm_type gen = {
.create_tgt = gen_create_tgt,
.remove_tgt = gen_remove_tgt,
- .get_blk = gen_get_blk,
- .put_blk = gen_put_blk,
-
.submit_io = gen_submit_io,
.erase_blk = gen_erase_blk,
- .mark_blk = gen_mark_blk,
-
- .get_lun = gen_get_lun,
- .reserve_lun = gen_reserve_lun,
- .release_lun = gen_release_lun,
- .lun_info_print = gen_lun_info_print,
-
.get_area = gen_get_area,
.put_area = gen_put_area,
+ .trans_ppa = gen_trans_ppa,
+ .part_to_tgt = gen_part_to_tgt,
};
static int __init gen_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 8ecfa817d21d..6a4b3f368848 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -20,37 +20,41 @@
#include <linux/lightnvm.h>
-struct gen_lun {
- struct nvm_lun vlun;
-
- int reserved_blocks;
- /* lun block lists */
- struct list_head used_list; /* In-use blocks */
- struct list_head free_list; /* Not used blocks i.e. released
- * and ready for use
- */
- struct list_head bb_list; /* Bad blocks. Mutually exclusive with
- * free_list and used_list
- */
-};
-
struct gen_dev {
struct nvm_dev *dev;
int nr_luns;
- struct gen_lun *luns;
struct list_head area_list;
struct mutex lock;
struct list_head targets;
};
+/* Map between virtual and physical channel and lun */
+struct gen_ch_map {
+ int ch_off;
+ int nr_luns;
+ int *lun_offs;
+};
+
+struct gen_dev_map {
+ struct gen_ch_map *chnls;
+ int nr_chnls;
+};
+
struct gen_area {
struct list_head list;
sector_t begin;
sector_t end; /* end is excluded */
};
+static inline void *ch_map_to_lun_offs(struct gen_ch_map *ch_map)
+{
+ return ch_map + 1;
+}
+
+typedef int (gen_trans_fn)(struct nvm_tgt_dev *, struct ppa_addr *);
+
#define gen_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/lightnvm.h b/drivers/lightnvm/lightnvm.h
deleted file mode 100644
index 305c181509a6..000000000000
--- a/drivers/lightnvm/lightnvm.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2016 CNEX Labs. All rights reserved.
- * Initial release: Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- */
-
-#ifndef LIGHTNVM_H
-#define LIGHTNVM_H
-
-#include <linux/lightnvm.h>
-
-/* core -> sysfs.c */
-int __must_check nvm_sysfs_register_dev(struct nvm_dev *);
-void nvm_sysfs_unregister_dev(struct nvm_dev *);
-int nvm_sysfs_register(void);
-void nvm_sysfs_unregister(void);
-
-/* sysfs > core */
-void nvm_free(struct nvm_dev *);
-
-#endif
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 37fcaadbf80c..9fb7de395915 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -28,6 +28,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block *rblk = a->rblk;
unsigned int pg_offset;
@@ -38,13 +39,13 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
+ div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
rblk->nr_invalid_pages++;
spin_unlock(&rblk->lock);
- rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+ rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
}
static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -116,62 +117,35 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- return (rblk->next_page == rrpc->dev->sec_per_blk);
+ struct nvm_tgt_dev *dev = rrpc->dev;
+
+ return (rblk->next_page == dev->geo.sec_per_blk);
}
/* Calculate relative addr for the given block, considering instantiated LUNs */
static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct nvm_block *blk = rblk->parent;
- int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
-
- return lun_blk * rrpc->dev->sec_per_blk;
-}
-
-/* Calculate global addr for the given block */
-static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_block *blk = rblk->parent;
-
- return blk->id * rrpc->dev->sec_per_blk;
-}
-
-static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, dev->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, dev->pgs_per_blk);
- div_u64_rem(ppa, dev->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, dev->blks_per_lun);
- div_u64_rem(ppa, dev->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, dev->luns_per_chnl);
- l.g.ch = ppa;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun = rblk->rlun;
- return l;
+ return rlun->id * dev->geo.sec_per_blk;
}
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
+static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
+ struct rrpc_addr *gp)
{
+ struct rrpc_block *rblk = gp->rblk;
+ struct rrpc_lun *rlun = rblk->rlun;
+ u64 addr = gp->addr;
struct ppa_addr paddr;
paddr.ppa = addr;
- return linear_to_generic_addr(dev, paddr);
+ paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
+ paddr.g.ch = rlun->bppa.g.ch;
+ paddr.g.lun = rlun->bppa.g.lun;
+ paddr.g.blk = rblk->id;
+
+ return paddr;
}
/* requires lun->lock taken */
@@ -188,21 +162,47 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
*cur_rblk = new_rblk;
}
+static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
+ struct rrpc_lun *rlun)
+{
+ struct rrpc_block *rblk = NULL;
+
+ if (list_empty(&rlun->free_list))
+ goto out;
+
+ rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
+
+ list_move_tail(&rblk->list, &rlun->used_list);
+ rblk->state = NVM_BLK_ST_TGT;
+ rlun->nr_free_blocks--;
+
+out:
+ return rblk;
+}
+
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
unsigned long flags)
{
- struct nvm_block *blk;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block *rblk;
+ int is_gc = flags & NVM_IOTYPE_GC;
+
+ spin_lock(&rlun->lock);
+ if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
+ pr_err("nvm: rrpc: cannot give block to non GC request\n");
+ spin_unlock(&rlun->lock);
+ return NULL;
+ }
- blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
- if (!blk) {
- pr_err("nvm: rrpc: cannot get new block from media manager\n");
+ rblk = __rrpc_get_blk(rrpc, rlun);
+ if (!rblk) {
+ pr_err("nvm: rrpc: cannot get new block\n");
+ spin_unlock(&rlun->lock);
return NULL;
}
+ spin_unlock(&rlun->lock);
- rblk = rrpc_get_rblk(rlun, blk->id);
- blk->priv = rblk;
- bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
+ bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
rblk->next_page = 0;
rblk->nr_invalid_pages = 0;
atomic_set(&rblk->data_cmnt_size, 0);
@@ -212,7 +212,24 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- nvm_put_blk(rrpc->dev, rblk->parent);
+ struct rrpc_lun *rlun = rblk->rlun;
+
+ spin_lock(&rlun->lock);
+ if (rblk->state & NVM_BLK_ST_TGT) {
+ list_move_tail(&rblk->list, &rlun->free_list);
+ rlun->nr_free_blocks++;
+ rblk->state = NVM_BLK_ST_FREE;
+ } else if (rblk->state & NVM_BLK_ST_BAD) {
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ rblk->state = NVM_BLK_ST_BAD;
+ } else {
+ WARN_ON_ONCE(1);
+ pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id, rblk->state);
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ }
+ spin_unlock(&rlun->lock);
}
static void rrpc_put_blks(struct rrpc *rrpc)
@@ -280,13 +297,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
*/
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct request_queue *q = rrpc->dev->q;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct request_queue *q = dev->q;
struct rrpc_rev_addr *rev;
struct nvm_rq *rqd;
struct bio *bio;
struct page *page;
int slot;
- int nr_sec_per_blk = rrpc->dev->sec_per_blk;
+ int nr_sec_per_blk = dev->geo.sec_per_blk;
u64 phys_addr;
DECLARE_COMPLETION_ONSTACK(wait);
@@ -309,12 +327,12 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
nr_sec_per_blk)) < nr_sec_per_blk) {
/* Lock laddr */
- phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
+ phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
try:
spin_lock(&rrpc->rev_lock);
/* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+ rev = &rrpc->rev_trans_map[phys_addr];
/* already updated by previous regular write */
if (rev->addr == ADDR_EMPTY) {
spin_unlock(&rrpc->rev_lock);
@@ -396,15 +414,23 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct ppa_addr ppa;
mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
+ pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
if (rrpc_move_valid_pages(rrpc, rblk))
goto put_back;
- if (nvm_erase_blk(dev, rblk->parent))
+ ppa.ppa = 0;
+ ppa.g.ch = rlun->bppa.g.ch;
+ ppa.g.lun = rlun->bppa.g.lun;
+ ppa.g.blk = rblk->id;
+
+ if (nvm_erase_blk(dev, &ppa, 0))
goto put_back;
rrpc_put_blk(rrpc, rblk);
@@ -420,7 +446,7 @@ put_back:
/* the block with highest number of invalid pages, will be in the beginning
* of the list
*/
-static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
+static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
struct rrpc_block *rb)
{
if (ra->nr_invalid_pages == rb->nr_invalid_pages)
@@ -435,13 +461,13 @@ static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
{
struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblock, *max;
+ struct rrpc_block *rblk, *max;
BUG_ON(list_empty(prio_list));
max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblock, prio_list, prio)
- max = rblock_max_invalid(max, rblock);
+ list_for_each_entry(rblk, prio_list, prio)
+ max = rblk_max_invalid(max, rblk);
return max;
}
@@ -450,36 +476,37 @@ static void rrpc_lun_gc(struct work_struct *work)
{
struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
struct rrpc *rrpc = rlun->rrpc;
- struct nvm_lun *lun = rlun->parent;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block_gc *gcb;
unsigned int nr_blocks_need;
- nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
+ nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
if (nr_blocks_need < rrpc->nr_luns)
nr_blocks_need = rrpc->nr_luns;
spin_lock(&rlun->lock);
- while (nr_blocks_need > lun->nr_free_blocks &&
+ while (nr_blocks_need > rlun->nr_free_blocks &&
!list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblock = block_prio_find_max(rlun);
- struct nvm_block *block = rblock->parent;
+ struct rrpc_block *rblk = block_prio_find_max(rlun);
- if (!rblock->nr_invalid_pages)
+ if (!rblk->nr_invalid_pages)
break;
gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
if (!gcb)
break;
- list_del_init(&rblock->prio);
+ list_del_init(&rblk->prio);
- BUG_ON(!block_is_full(rrpc, rblock));
+ WARN_ON(!block_is_full(rrpc, rblk));
- pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
+ pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
gcb->rrpc = rrpc;
- gcb->rblk = rblock;
+ gcb->rblk = rblk;
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
queue_work(rrpc->kgc_wq, &gcb->ws_gc);
@@ -504,8 +531,9 @@ static void rrpc_gc_queue(struct work_struct *work)
spin_unlock(&rlun->lock);
mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
- rblk->parent->id);
+ pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
}
static const struct block_device_operations rrpc_fops = {
@@ -529,8 +557,7 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
* estimate.
*/
rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->parent->nr_free_blocks >
- max_free->parent->nr_free_blocks)
+ if (rlun->nr_free_blocks > max_free->nr_free_blocks)
max_free = rlun;
}
@@ -553,7 +580,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
gp->addr = paddr;
gp->rblk = rblk;
- rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+ rev = &rrpc->rev_trans_map[gp->addr];
rev->addr = laddr;
spin_unlock(&rrpc->rev_lock);
@@ -568,7 +595,7 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
if (block_is_full(rrpc, rblk))
goto out;
- addr = block_to_addr(rrpc, rblk) + rblk->next_page;
+ addr = rblk->next_page;
rblk->next_page++;
out:
@@ -582,20 +609,22 @@ out:
* Returns rrpc_addr with the physical address and block. Returns NULL if no
* blocks in the next rlun are available.
*/
-static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
+static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
int is_gc)
{
+ struct nvm_tgt_dev *tgt_dev = rrpc->dev;
struct rrpc_lun *rlun;
struct rrpc_block *rblk, **cur_rblk;
- struct nvm_lun *lun;
+ struct rrpc_addr *p;
+ struct ppa_addr ppa;
u64 paddr;
int gc_force = 0;
+ ppa.ppa = ADDR_EMPTY;
rlun = rrpc_get_lun_rr(rrpc, is_gc);
- lun = rlun->parent;
- if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
- return NULL;
+ if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
+ return ppa;
/*
* page allocation steps:
@@ -652,10 +681,15 @@ new_blk:
}
pr_err("rrpc: failed to allocate new block\n");
- return NULL;
+ return ppa;
done:
spin_unlock(&rlun->lock);
- return rrpc_update_map(rrpc, laddr, rblk, paddr);
+ p = rrpc_update_map(rrpc, laddr, rblk, paddr);
+ if (!p)
+ return ppa;
+
+ /* return global address */
+ return rrpc_ppa_to_gaddr(tgt_dev, p);
}
static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -675,21 +709,70 @@ static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
queue_work(rrpc->kgc_wq, &gcb->ws_gc);
}
+static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
+{
+ struct rrpc_lun *rlun = NULL;
+ int i;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
+ rrpc->luns[i].bppa.g.lun == p.g.lun) {
+ rlun = &rrpc->luns[i];
+ break;
+ }
+ }
+
+ return rlun;
+}
+
+static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
+{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun;
+ struct rrpc_block *rblk;
+
+ rlun = rrpc_ppa_to_lun(rrpc, ppa);
+ rblk = &rlun->blocks[ppa.g.blk];
+ rblk->state = NVM_BLK_ST_BAD;
+
+ nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
+}
+
+static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
+ void *comp_bits = &rqd->ppa_status;
+ struct ppa_addr ppa, prev_ppa;
+ int nr_ppas = rqd->nr_ppas;
+ int bit;
+
+ if (rqd->nr_ppas == 1)
+ __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
+
+ ppa_set_empty(&prev_ppa);
+ bit = -1;
+ while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
+ ppa = rqd->ppa_list[bit];
+ if (ppa_cmp_blk(ppa, prev_ppa))
+ continue;
+
+ __rrpc_mark_bad_block(rrpc, ppa);
+ }
+}
+
static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
sector_t laddr, uint8_t npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_addr *p;
struct rrpc_block *rblk;
- struct nvm_lun *lun;
int cmnt_size, i;
for (i = 0; i < npages; i++) {
p = &rrpc->trans_map[laddr + i];
rblk = p->rblk;
- lun = rblk->parent->lun;
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
+ if (unlikely(cmnt_size == dev->geo.sec_per_blk))
rrpc_run_gc(rrpc, rblk);
}
}
@@ -697,12 +780,17 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
static void rrpc_end_io(struct nvm_rq *rqd)
{
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
uint8_t npages = rqd->nr_ppas;
sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
- if (bio_data_dir(rqd->bio) == WRITE)
+ if (bio_data_dir(rqd->bio) == WRITE) {
+ if (rqd->error == NVM_RSP_ERR_FAILWRITE)
+ rrpc_mark_bad_block(rrpc, rqd);
+
rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+ }
bio_put(rqd->bio);
@@ -712,7 +800,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
rrpc_unlock_rq(rrpc, rqd);
if (npages > 1)
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
mempool_free(rqd, rrpc->rq_pool);
}
@@ -720,6 +808,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, int npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
struct rrpc_addr *gp;
sector_t laddr = rrpc_get_laddr(bio);
@@ -727,7 +816,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
int i;
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
return NVM_IO_REQUEUE;
}
@@ -737,12 +826,11 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
gp = &rrpc->trans_map[laddr + i];
if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- gp->addr);
+ rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
} else {
BUG_ON(is_gc);
rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
rqd->dma_ppa_list);
return NVM_IO_DONE;
}
@@ -756,7 +844,6 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
unsigned long flags)
{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
int is_gc = flags & NVM_IOTYPE_GC;
sector_t laddr = rrpc_get_laddr(bio);
struct rrpc_addr *gp;
@@ -768,7 +855,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
gp = &rrpc->trans_map[laddr];
if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
+ rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
} else {
BUG_ON(is_gc);
rrpc_unlock_rq(rrpc, rqd);
@@ -776,7 +863,6 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
}
rqd->opcode = NVM_OP_HBREAD;
- rrqd->addr = gp;
return NVM_IO_OK;
}
@@ -784,31 +870,31 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, int npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *p;
+ struct ppa_addr p;
sector_t laddr = rrpc_get_laddr(bio);
int is_gc = flags & NVM_IOTYPE_GC;
int i;
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
return NVM_IO_REQUEUE;
}
for (i = 0; i < npages; i++) {
/* We assume that mapping occurs at 4KB granularity */
p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (!p) {
+ if (p.ppa == ADDR_EMPTY) {
BUG_ON(is_gc);
rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
rqd->dma_ppa_list);
rrpc_gc_kick(rrpc);
return NVM_IO_REQUEUE;
}
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- p->addr);
+ rqd->ppa_list[i] = p;
}
rqd->opcode = NVM_OP_HBWRITE;
@@ -819,8 +905,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags)
{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- struct rrpc_addr *p;
+ struct ppa_addr p;
int is_gc = flags & NVM_IOTYPE_GC;
sector_t laddr = rrpc_get_laddr(bio);
@@ -828,16 +913,15 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
return NVM_IO_REQUEUE;
p = rrpc_map_page(rrpc, laddr, is_gc);
- if (!p) {
+ if (p.ppa == ADDR_EMPTY) {
BUG_ON(is_gc);
rrpc_unlock_rq(rrpc, rqd);
rrpc_gc_kick(rrpc);
return NVM_IO_REQUEUE;
}
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
+ rqd->ppa_addr = p;
rqd->opcode = NVM_OP_HBWRITE;
- rrqd->addr = p;
return NVM_IO_OK;
}
@@ -845,8 +929,10 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+
if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
+ rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd->dma_ppa_list);
if (!rqd->ppa_list) {
pr_err("rrpc: not able to allocate ppa list\n");
@@ -869,14 +955,15 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags)
{
- int err;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
uint8_t nr_pages = rrpc_get_pages(bio);
int bio_size = bio_sectors(bio) << 9;
+ int err;
- if (bio_size < rrpc->dev->sec_size)
+ if (bio_size < dev->geo.sec_size)
return NVM_IO_ERR;
- else if (bio_size > rrpc->dev->max_rq_size)
+ else if (bio_size > dev->geo.max_rq_size)
return NVM_IO_ERR;
err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
@@ -889,15 +976,15 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
rqd->nr_ppas = nr_pages;
rrq->flags = flags;
- err = nvm_submit_io(rrpc->dev, rqd);
+ err = nvm_submit_io(dev, rqd);
if (err) {
pr_err("rrpc: I/O submission failed: %d\n", err);
bio_put(bio);
if (!(flags & NVM_IOTYPE_GC)) {
rrpc_unlock_rq(rrpc, rqd);
if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(rrpc->dev,
- rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
+ rqd->dma_ppa_list);
}
return NVM_IO_ERR;
}
@@ -911,6 +998,8 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
struct nvm_rq *rqd;
int err;
+ blk_queue_split(q, &bio, q->bio_split);
+
if (bio_op(bio) == REQ_OP_DISCARD) {
rrpc_discard(rrpc, bio);
return BLK_QC_T_NONE;
@@ -997,25 +1086,24 @@ static void rrpc_map_free(struct rrpc *rrpc)
static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
{
struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_addr *addr = rrpc->trans_map + slba;
struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- u64 elba = slba + nlb;
+ struct rrpc_lun *rlun;
+ struct rrpc_block *rblk;
u64 i;
- if (unlikely(elba > dev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- return -EINVAL;
- }
-
for (i = 0; i < nlb; i++) {
+ struct ppa_addr gaddr;
u64 pba = le64_to_cpu(entries[i]);
unsigned int mod;
+
/* LNVM treats address-spaces as silos, LBA and PBA are
* equally large and zero-indexed.
*/
if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
pr_err("nvm: L2P data entry is out of bounds!\n");
+ pr_err("nvm: Maybe loaded an old target L2P\n");
return -EINVAL;
}
@@ -1028,7 +1116,27 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
div_u64_rem(pba, rrpc->nr_sects, &mod);
+ gaddr = rrpc_recov_addr(dev, pba);
+ rlun = rrpc_ppa_to_lun(rrpc, gaddr);
+ if (!rlun) {
+ pr_err("rrpc: l2p corruption on lba %llu\n",
+ slba + i);
+ return -EINVAL;
+ }
+
+ rblk = &rlun->blocks[gaddr.g.blk];
+ if (!rblk->state) {
+ /* at this point, we don't know anything about the
+ * block. It's up to the FTL on top to re-etablish the
+ * block state. The block is assumed to be open.
+ */
+ list_move_tail(&rblk->list, &rlun->used_list);
+ rblk->state = NVM_BLK_ST_TGT;
+ rlun->nr_free_blocks--;
+ }
+
addr[i].addr = pba;
+ addr[i].rblk = rblk;
raddr[mod].addr = slba + i;
}
@@ -1037,7 +1145,7 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
static int rrpc_map_init(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
sector_t i;
int ret;
@@ -1058,12 +1166,9 @@ static int rrpc_map_init(struct rrpc *rrpc)
r->addr = ADDR_EMPTY;
}
- if (!dev->ops->get_l2p_tbl)
- return 0;
-
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
+ ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
+ rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1102,7 +1207,7 @@ static int rrpc_core_init(struct rrpc *rrpc)
if (!rrpc->page_pool)
return -ENOMEM;
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
+ rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
rrpc_gcb_cache);
if (!rrpc->gcb_pool)
return -ENOMEM;
@@ -1126,8 +1231,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvm_lun *lun;
struct rrpc_lun *rlun;
int i;
@@ -1136,23 +1239,74 @@ static void rrpc_luns_free(struct rrpc *rrpc)
for (i = 0; i < rrpc->nr_luns; i++) {
rlun = &rrpc->luns[i];
- lun = rlun->parent;
- if (!lun)
- break;
- dev->mt->release_lun(dev, lun->id);
vfree(rlun->blocks);
}
kfree(rrpc->luns);
}
-static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
+static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct rrpc_block *rblk;
+ struct ppa_addr ppa;
+ u8 *blks;
+ int nr_blks;
+ int i;
+ int ret;
+
+ if (!dev->parent->ops->get_bb_tbl)
+ return 0;
+
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
+
+ ppa.ppa = 0;
+ ppa.g.ch = rlun->bppa.g.ch;
+ ppa.g.lun = rlun->bppa.g.lun;
+
+ ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
+ if (ret) {
+ pr_err("rrpc: could not get BB table\n");
+ goto out;
+ }
+
+ nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
+ for (i = 0; i < nr_blks; i++) {
+ if (blks[i] == NVM_BLK_T_FREE)
+ continue;
+
+ rblk = &rlun->blocks[i];
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ rblk->state = NVM_BLK_ST_BAD;
+ rlun->nr_free_blocks--;
+ }
+
+out:
+ kfree(blks);
+ return ret;
+}
+
+static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
+{
+ rlun->bppa.ppa = 0;
+ rlun->bppa.g.ch = ppa.g.ch;
+ rlun->bppa.g.lun = ppa.g.lun;
+}
+
+static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
+{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
struct rrpc_lun *rlun;
int i, j, ret = -EINVAL;
- if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+ if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
pr_err("rrpc: number of pages per block too high.");
return -EINVAL;
}
@@ -1166,43 +1320,46 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
/* 1:1 mapping */
for (i = 0; i < rrpc->nr_luns; i++) {
- int lunid = lun_begin + i;
- struct nvm_lun *lun;
-
- if (dev->mt->reserve_lun(dev, lunid)) {
- pr_err("rrpc: lun %u is already allocated\n", lunid);
- goto err;
- }
-
- lun = dev->mt->get_lun(dev, lunid);
- if (!lun)
- goto err;
-
rlun = &rrpc->luns[i];
- rlun->parent = lun;
+ rlun->id = i;
+ rrpc_set_lun_ppa(rlun, luns[i]);
rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- rrpc->dev->blks_per_lun);
+ geo->blks_per_lun);
if (!rlun->blocks) {
ret = -ENOMEM;
goto err;
}
- for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
+ INIT_LIST_HEAD(&rlun->free_list);
+ INIT_LIST_HEAD(&rlun->used_list);
+ INIT_LIST_HEAD(&rlun->bb_list);
+
+ for (j = 0; j < geo->blks_per_lun; j++) {
struct rrpc_block *rblk = &rlun->blocks[j];
- struct nvm_block *blk = &lun->blocks[j];
- rblk->parent = blk;
+ rblk->id = j;
rblk->rlun = rlun;
+ rblk->state = NVM_BLK_T_FREE;
INIT_LIST_HEAD(&rblk->prio);
+ INIT_LIST_HEAD(&rblk->list);
spin_lock_init(&rblk->lock);
+
+ list_add_tail(&rblk->list, &rlun->free_list);
}
rlun->rrpc = rrpc;
+ rlun->nr_free_blocks = geo->blks_per_lun;
+ rlun->reserved_blocks = 2; /* for GC only */
+
INIT_LIST_HEAD(&rlun->prio_list);
INIT_LIST_HEAD(&rlun->wblk_list);
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock);
+
+ if (rrpc_bb_discovery(dev, rlun))
+ goto err;
+
}
return 0;
@@ -1213,27 +1370,25 @@ err:
/* returns 0 on success and stores the beginning address in *begin */
static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t size = rrpc->nr_sects * dev->sec_size;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ sector_t size = rrpc->nr_sects * dev->geo.sec_size;
int ret;
size >>= 9;
- ret = mt->get_area(dev, begin, size);
+ ret = nvm_get_area(dev, begin, size);
if (!ret)
- *begin >>= (ilog2(dev->sec_size) - 9);
+ *begin >>= (ilog2(dev->geo.sec_size) - 9);
return ret;
}
static void rrpc_area_free(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
- mt->put_area(dev, begin);
+ nvm_put_area(dev, begin);
}
static void rrpc_free(struct rrpc *rrpc)
@@ -1262,11 +1417,11 @@ static void rrpc_exit(void *private)
static sector_t rrpc_capacity(void *private)
{
struct rrpc *rrpc = private;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
sector_t reserved, provisioned;
/* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
+ reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
provisioned = rrpc->nr_sects - reserved;
if (reserved > rrpc->nr_sects) {
@@ -1285,13 +1440,13 @@ static sector_t rrpc_capacity(void *private)
*/
static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
int offset;
struct rrpc_addr *laddr;
u64 bpaddr, paddr, pladdr;
bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->sec_per_blk; offset++) {
+ for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
paddr = bpaddr + offset;
pladdr = rrpc->rev_trans_map[paddr].addr;
@@ -1311,6 +1466,7 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
static int rrpc_blocks_init(struct rrpc *rrpc)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_lun *rlun;
struct rrpc_block *rblk;
int lun_iter, blk_iter;
@@ -1318,7 +1474,7 @@ static int rrpc_blocks_init(struct rrpc *rrpc)
for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
rlun = &rrpc->luns[lun_iter];
- for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
+ for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
blk_iter++) {
rblk = &rlun->blocks[blk_iter];
rrpc_block_map_update(rrpc, rblk);
@@ -1357,11 +1513,11 @@ err:
static struct nvm_tgt_type tt_rrpc;
-static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
- int lun_begin, int lun_end)
+static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
{
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
+ struct nvm_geo *geo = &dev->geo;
struct rrpc *rrpc;
sector_t soffset;
int ret;
@@ -1384,9 +1540,8 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
spin_lock_init(&rrpc->bio_lock);
INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
- rrpc->nr_luns = lun_end - lun_begin + 1;
- rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
- rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
+ rrpc->nr_luns = geo->nr_luns;
+ rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
@@ -1398,15 +1553,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
}
rrpc->soffset = soffset;
- ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+ ret = rrpc_luns_init(rrpc, dev->luns);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
goto err;
}
- rrpc->poffset = dev->sec_per_lun * lun_begin;
- rrpc->lun_offset = lun_begin;
-
ret = rrpc_core_init(rrpc);
if (ret) {
pr_err("nvm: rrpc: could not initialize core\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 5e87d52cb983..94e4d73116b2 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -48,14 +48,15 @@ struct rrpc_inflight_rq {
struct rrpc_rq {
struct rrpc_inflight_rq inflight_rq;
- struct rrpc_addr *addr;
unsigned long flags;
};
struct rrpc_block {
- struct nvm_block *parent;
+ int id; /* id inside of LUN */
struct rrpc_lun *rlun;
- struct list_head prio;
+
+ struct list_head prio; /* LUN CG list */
+ struct list_head list; /* LUN free, used, bb list */
#define MAX_INVALID_PAGES_STORAGE 8
/* Bitmap for invalid page intries */
@@ -65,21 +66,38 @@ struct rrpc_block {
/* number of pages that are invalid, wrt host page size */
unsigned int nr_invalid_pages;
+ int state;
+
spinlock_t lock;
atomic_t data_cmnt_size; /* data pages committed to stable storage */
};
struct rrpc_lun {
struct rrpc *rrpc;
- struct nvm_lun *parent;
+
+ int id;
+ struct ppa_addr bppa;
+
struct rrpc_block *cur, *gc_cur;
struct rrpc_block *blocks; /* Reference to block allocation */
struct list_head prio_list; /* Blocks that may be GC'ed */
struct list_head wblk_list; /* Queued blocks to be written to */
+ /* lun block lists */
+ struct list_head used_list; /* In-use blocks */
+ struct list_head free_list; /* Not used blocks i.e. released
+ * and ready for use
+ */
+ struct list_head bb_list; /* Bad blocks. Mutually exclusive with
+ * free_list and used_list
+ */
+ unsigned int nr_free_blocks; /* Number of unused blocks */
+
struct work_struct ws_gc;
+ int reserved_blocks;
+
spinlock_t lock;
};
@@ -87,19 +105,16 @@ struct rrpc {
/* instance must be kept in top to resolve rrpc in unprep */
struct nvm_tgt_instance instance;
- struct nvm_dev *dev;
+ struct nvm_tgt_dev *dev;
struct gendisk *disk;
sector_t soffset; /* logical sector offset */
- u64 poffset; /* physical page offset */
- int lun_offset;
int nr_luns;
struct rrpc_lun *luns;
/* calculated values */
unsigned long long nr_sects;
- unsigned long total_blocks;
/* Write strategy variables. Move these into each for structure for each
* strategy
@@ -150,13 +165,37 @@ struct rrpc_rev_addr {
u64 addr;
};
-static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
- int blk_id)
+static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
+ struct ppa_addr r)
+{
+ struct ppa_addr l;
+ int secs, pgs;
+ sector_t ppa = r.ppa;
+
+ l.ppa = 0;
+
+ div_u64_rem(ppa, geo->sec_per_pg, &secs);
+ l.g.sec = secs;
+
+ sector_div(ppa, geo->sec_per_pg);
+ div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
+ l.g.pg = pgs;
+
+ return l;
+}
+
+static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
+{
+ return linear_to_generic_addr(&dev->geo, pba);
+}
+
+static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct rrpc *rrpc = rlun->rrpc;
- int lun_blk = blk_id % rrpc->dev->blks_per_lun;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct rrpc_lun *rlun = rblk->rlun;
- return &rlun->blocks[lun_blk];
+ return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
}
static inline sector_t rrpc_get_laddr(struct bio *bio)
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index a75bd28aaca3..12002bf4efc2 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -62,7 +62,8 @@ static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
{
- int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
+ struct nvm_geo *geo = &dev->geo;
+ int nr_rows = min_t(int, MAX_SYSBLKS, geo->nr_chnls);
int i;
for (i = 0; i < nr_rows; i++)
@@ -71,7 +72,7 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
/* if possible, place sysblk at first channel, middle channel and last
* channel of the device. If not, create only one or two sys blocks
*/
- switch (dev->nr_chnls) {
+ switch (geo->nr_chnls) {
case 2:
sysblk_ppas[1].g.ch = 1;
/* fall-through */
@@ -80,8 +81,8 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
break;
default:
sysblk_ppas[0].g.ch = 0;
- sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
- sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
+ sysblk_ppas[1].g.ch = geo->nr_chnls / 2;
+ sysblk_ppas[2].g.ch = geo->nr_chnls - 1;
break;
}
@@ -162,11 +163,12 @@ static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
struct ppa_addr *ppas, int get_free)
{
+ struct nvm_geo *geo = &dev->geo;
int i, nr_blks, ret = 0;
u8 *blks;
s->nr_ppas = 0;
- nr_blks = dev->blks_per_lun * dev->plane_mode;
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
@@ -210,13 +212,14 @@ err_get:
static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
struct nvm_system_block *sblk)
{
+ struct nvm_geo *geo = &dev->geo;
struct nvm_system_block *cur;
int pg, ret, found = 0;
/* the full buffer for a flash page is allocated. Only the first of it
* contains the system block information
*/
- cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
+ cur = kmalloc(geo->pfpg_size, GFP_KERNEL);
if (!cur)
return -ENOMEM;
@@ -225,7 +228,7 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
ppa->g.pg = ppa_to_slc(dev, pg);
ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
- cur, dev->pfpg_size);
+ cur, geo->pfpg_size);
if (ret) {
if (ret == NVM_RSP_ERR_EMPTYPAGE) {
pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
@@ -267,34 +270,16 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
return found;
}
-static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
+static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
+ int type)
{
- struct nvm_rq rqd;
- int ret;
-
- if (s->nr_ppas > dev->ops->max_phys_sect) {
- pr_err("nvm: unable to update all sysblocks atomically\n");
- return -EINVAL;
- }
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
- nvm_generic_to_addr_mode(dev, &rqd);
-
- ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
- nvm_free_rqd_ppalist(dev, &rqd);
- if (ret) {
- pr_err("nvm: sysblk failed bb mark\n");
- return -EINVAL;
- }
-
- return 0;
+ return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
}
static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
struct sysblk_scan *s)
{
+ struct nvm_geo *geo = &dev->geo;
struct nvm_system_block nvmsb;
void *buf;
int i, sect, ret = 0;
@@ -302,12 +287,12 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
nvm_cpu_to_sysblk(&nvmsb, info);
- buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
+ buf = kzalloc(geo->pfpg_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
- ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
+ ppas = kcalloc(geo->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
if (!ppas) {
ret = -ENOMEM;
goto err;
@@ -324,15 +309,15 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
ppas[0].g.pg);
/* Expand to all sectors within a flash page */
- if (dev->sec_per_pg > 1) {
- for (sect = 1; sect < dev->sec_per_pg; sect++) {
+ if (geo->sec_per_pg > 1) {
+ for (sect = 1; sect < geo->sec_per_pg; sect++) {
ppas[sect].ppa = ppas[0].ppa;
ppas[sect].g.sec = sect;
}
}
- ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
- NVM_IO_SLC_MODE, buf, dev->pfpg_size);
+ ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PWRITE,
+ NVM_IO_SLC_MODE, buf, geo->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed program (%u %u %u)\n",
ppas[0].g.ch,
@@ -341,8 +326,8 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
break;
}
- ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
- NVM_IO_SLC_MODE, buf, dev->pfpg_size);
+ ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PREAD,
+ NVM_IO_SLC_MODE, buf, geo->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed read (%u %u %u)\n",
ppas[0].g.ch,
@@ -379,7 +364,7 @@ static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
ppa->g.pg = ppa_to_slc(dev, 0);
- ret = nvm_erase_ppa(dev, ppa, 1);
+ ret = nvm_erase_ppa(dev, ppa, 1, 0);
if (ret)
return ret;
@@ -546,6 +531,7 @@ err_sysblk:
int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
int ret;
@@ -560,7 +546,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
return -EINVAL;
- if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
+ if (!(geo->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
pr_err("nvm: memory does not support SLC access\n");
return -EINVAL;
}
@@ -573,7 +559,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
if (ret)
goto err_mark;
- ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
+ ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
if (ret)
goto err_mark;
@@ -590,11 +576,11 @@ static int factory_nblks(int nblks)
return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
}
-static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
+static unsigned int factory_blk_offset(struct nvm_geo *geo, struct ppa_addr ppa)
{
- int nblks = factory_nblks(dev->blks_per_lun);
+ int nblks = factory_nblks(geo->blks_per_lun);
- return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
+ return ((ppa.g.ch * geo->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
BITS_PER_LONG;
}
@@ -608,7 +594,7 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
if (nr_blks < 0)
return nr_blks;
- lunoff = factory_blk_offset(dev, ppa);
+ lunoff = factory_blk_offset(&dev->geo, ppa);
/* non-set bits correspond to the block must be erased */
for (i = 0; i < nr_blks; i++) {
@@ -637,19 +623,19 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
int max_ppas, unsigned long *blk_bitmap)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr ppa;
int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
unsigned long *offset;
while (!done) {
done = 1;
- nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
- idx = factory_blk_offset(dev, ppa);
+ nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
+ idx = factory_blk_offset(geo, ppa);
offset = &blk_bitmap[idx];
- blkid = find_first_zero_bit(offset,
- dev->blks_per_lun);
- if (blkid >= dev->blks_per_lun)
+ blkid = find_first_zero_bit(offset, geo->blks_per_lun);
+ if (blkid >= geo->blks_per_lun)
continue;
set_bit(blkid, offset);
@@ -674,16 +660,17 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
int flags)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr ppa;
int ch, lun, nr_blks, ret = 0;
u8 *blks;
- nr_blks = dev->blks_per_lun * dev->plane_mode;
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
return -ENOMEM;
- nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+ nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
ret = nvm_get_bb_tbl(dev, ppa, blks);
if (ret)
pr_err("nvm: failed bb tbl for ch%u lun%u\n",
@@ -701,14 +688,15 @@ static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
int nvm_dev_factory(struct nvm_dev *dev, int flags)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr *ppas;
int ppa_cnt, ret = -ENOMEM;
- int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
+ int max_ppas = dev->ops->max_phys_sect / geo->nr_planes;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
unsigned long *blk_bitmap;
- blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
+ blk_bitmap = kzalloc(factory_nblks(geo->blks_per_lun) * geo->nr_luns,
GFP_KERNEL);
if (!blk_bitmap)
return ret;
@@ -725,7 +713,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
/* continue to erase until list of blks until empty */
while ((ppa_cnt =
nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
- nvm_erase_ppa(dev, ppas, ppa_cnt);
+ nvm_erase_ppa(dev, ppas, ppa_cnt, 0);
/* mark host reserved blocks free */
if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
@@ -733,7 +721,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
mutex_lock(&dev->mlock);
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (!ret)
- ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
+ ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
mutex_unlock(&dev->mlock);
}
err_ppas:
diff --git a/drivers/lightnvm/sysfs.c b/drivers/lightnvm/sysfs.c
deleted file mode 100644
index 0338c27ab95a..000000000000
--- a/drivers/lightnvm/sysfs.c
+++ /dev/null
@@ -1,198 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/lightnvm.h>
-#include <linux/miscdevice.h>
-#include <linux/kobject.h>
-#include <linux/blk-mq.h>
-
-#include "lightnvm.h"
-
-static ssize_t nvm_dev_attr_show(struct device *dev,
- struct device_attribute *dattr, char *page)
-{
- struct nvm_dev *ndev = container_of(dev, struct nvm_dev, dev);
- struct nvm_id *id = &ndev->identity;
- struct nvm_id_group *grp = &id->groups[0];
- struct attribute *attr = &dattr->attr;
-
- if (strcmp(attr->name, "version") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
- } else if (strcmp(attr->name, "vendor_opcode") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
- } else if (strcmp(attr->name, "capabilities") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
- } else if (strcmp(attr->name, "device_mode") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
- } else if (strcmp(attr->name, "media_manager") == 0) {
- if (!ndev->mt)
- return scnprintf(page, PAGE_SIZE, "%s\n", "none");
- return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
- } else if (strcmp(attr->name, "ppa_format") == 0) {
- return scnprintf(page, PAGE_SIZE,
- "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- id->ppaf.ch_offset, id->ppaf.ch_len,
- id->ppaf.lun_offset, id->ppaf.lun_len,
- id->ppaf.pln_offset, id->ppaf.pln_len,
- id->ppaf.blk_offset, id->ppaf.blk_len,
- id->ppaf.pg_offset, id->ppaf.pg_len,
- id->ppaf.sect_offset, id->ppaf.sect_len);
- } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
- } else if (strcmp(attr->name, "flash_media_type") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
- } else if (strcmp(attr->name, "num_channels") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
- } else if (strcmp(attr->name, "num_luns") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
- } else if (strcmp(attr->name, "num_planes") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
- } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
- } else if (strcmp(attr->name, "num_pages") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
- } else if (strcmp(attr->name, "page_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
- } else if (strcmp(attr->name, "hw_sector_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
- } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
- } else if (strcmp(attr->name, "read_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
- } else if (strcmp(attr->name, "read_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
- } else if (strcmp(attr->name, "prog_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
- } else if (strcmp(attr->name, "prog_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
- } else if (strcmp(attr->name, "erase_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
- } else if (strcmp(attr->name, "erase_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
- } else if (strcmp(attr->name, "multiplane_modes") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
- } else if (strcmp(attr->name, "media_capabilities") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
- } else if (strcmp(attr->name, "max_phys_secs") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n",
- ndev->ops->max_phys_sect);
- } else {
- return scnprintf(page,
- PAGE_SIZE,
- "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
- attr->name);
- }
-}
-
-#define NVM_DEV_ATTR_RO(_name) \
- DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
-
-static NVM_DEV_ATTR_RO(version);
-static NVM_DEV_ATTR_RO(vendor_opcode);
-static NVM_DEV_ATTR_RO(capabilities);
-static NVM_DEV_ATTR_RO(device_mode);
-static NVM_DEV_ATTR_RO(ppa_format);
-static NVM_DEV_ATTR_RO(media_manager);
-
-static NVM_DEV_ATTR_RO(media_type);
-static NVM_DEV_ATTR_RO(flash_media_type);
-static NVM_DEV_ATTR_RO(num_channels);
-static NVM_DEV_ATTR_RO(num_luns);
-static NVM_DEV_ATTR_RO(num_planes);
-static NVM_DEV_ATTR_RO(num_blocks);
-static NVM_DEV_ATTR_RO(num_pages);
-static NVM_DEV_ATTR_RO(page_size);
-static NVM_DEV_ATTR_RO(hw_sector_size);
-static NVM_DEV_ATTR_RO(oob_sector_size);
-static NVM_DEV_ATTR_RO(read_typ);
-static NVM_DEV_ATTR_RO(read_max);
-static NVM_DEV_ATTR_RO(prog_typ);
-static NVM_DEV_ATTR_RO(prog_max);
-static NVM_DEV_ATTR_RO(erase_typ);
-static NVM_DEV_ATTR_RO(erase_max);
-static NVM_DEV_ATTR_RO(multiplane_modes);
-static NVM_DEV_ATTR_RO(media_capabilities);
-static NVM_DEV_ATTR_RO(max_phys_secs);
-
-#define NVM_DEV_ATTR(_name) (dev_attr_##_name##)
-
-static struct attribute *nvm_dev_attrs[] = {
- &dev_attr_version.attr,
- &dev_attr_vendor_opcode.attr,
- &dev_attr_capabilities.attr,
- &dev_attr_device_mode.attr,
- &dev_attr_media_manager.attr,
-
- &dev_attr_ppa_format.attr,
- &dev_attr_media_type.attr,
- &dev_attr_flash_media_type.attr,
- &dev_attr_num_channels.attr,
- &dev_attr_num_luns.attr,
- &dev_attr_num_planes.attr,
- &dev_attr_num_blocks.attr,
- &dev_attr_num_pages.attr,
- &dev_attr_page_size.attr,
- &dev_attr_hw_sector_size.attr,
- &dev_attr_oob_sector_size.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
- &dev_attr_prog_typ.attr,
- &dev_attr_prog_max.attr,
- &dev_attr_erase_typ.attr,
- &dev_attr_erase_max.attr,
- &dev_attr_multiplane_modes.attr,
- &dev_attr_media_capabilities.attr,
- &dev_attr_max_phys_secs.attr,
- NULL,
-};
-
-static struct attribute_group nvm_dev_attr_group = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs,
-};
-
-static const struct attribute_group *nvm_dev_attr_groups[] = {
- &nvm_dev_attr_group,
- NULL,
-};
-
-static void nvm_dev_release(struct device *device)
-{
- struct nvm_dev *dev = container_of(device, struct nvm_dev, dev);
- struct request_queue *q = dev->q;
-
- pr_debug("nvm/sysfs: `nvm_dev_release`\n");
-
- blk_mq_unregister_dev(device, q);
-
- nvm_free(dev);
-}
-
-static struct device_type nvm_type = {
- .name = "lightnvm",
- .groups = nvm_dev_attr_groups,
- .release = nvm_dev_release,
-};
-
-int nvm_sysfs_register_dev(struct nvm_dev *dev)
-{
- int ret;
-
- if (!dev->parent_dev)
- return 0;
-
- dev->dev.parent = dev->parent_dev;
- dev_set_name(&dev->dev, "%s", dev->name);
- dev->dev.type = &nvm_type;
- device_initialize(&dev->dev);
- ret = device_add(&dev->dev);
-
- if (!ret)
- blk_mq_register_dev(&dev->dev, dev->q);
-
- return ret;
-}
-
-void nvm_sysfs_unregister_dev(struct nvm_dev *dev)
-{
- if (dev && dev->parent_dev)
- kobject_put(&dev->dev.kobj);
-}
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 08c87fadca8c..1f32688c312d 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -65,6 +65,7 @@
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <acpi/pcc.h>
#include "mailbox.h"
@@ -267,6 +268,8 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method |= TXDONE_BY_ACK;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
if (pcc_doorbell_irq[subspace_id] > 0) {
int rc;
@@ -275,12 +278,11 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
if (unlikely(rc)) {
dev_err(dev, "failed to register PCC interrupt %d\n",
pcc_doorbell_irq[subspace_id]);
+ pcc_mbox_free_channel(chan);
chan = ERR_PTR(rc);
}
}
- spin_unlock_irqrestore(&chan->lock, flags);
-
return chan;
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
@@ -304,20 +306,19 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
return;
}
+ if (pcc_doorbell_irq[id] > 0)
+ devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
+
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
chan->txdone_method = TXDONE_BY_POLL;
- if (pcc_doorbell_irq[id] > 0)
- devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
-
spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
-
/**
* pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 4ca2739b4fad..ee7fb6ec96bd 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -149,7 +149,7 @@ static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase,
reg = readl(*base);
bar_count = BAR_CNT(reg);
- if (bar_count <= 0 && bar_count > CHAMELEON_BAR_MAX)
+ if (bar_count <= 0 || bar_count > CHAMELEON_BAR_MAX)
return -ENODEV;
c = kcalloc(bar_count, sizeof(struct chameleon_bar),
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 02a5345a44a6..b7767da50c26 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -240,9 +240,17 @@ config DM_BUFIO
as a cache, holding recently-read blocks in memory and performing
delayed writes.
+config DM_DEBUG_BLOCK_MANAGER_LOCKING
+ bool "Block manager locking"
+ depends on DM_BUFIO
+ ---help---
+ Block manager locking can catch various metadata corruption issues.
+
+ If unsure, say N.
+
config DM_DEBUG_BLOCK_STACK_TRACING
bool "Keep stack trace of persistent data block lock holders"
- depends on STACKTRACE_SUPPORT && DM_BUFIO
+ depends on STACKTRACE_SUPPORT && DM_DEBUG_BLOCK_MANAGER_LOCKING
select STACKTRACE
---help---
Enable this for messages that may help debug problems with the
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81d3db40cd7b..6fdd8e252760 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -297,7 +297,7 @@ static void bch_btree_node_read(struct btree *b)
bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+ bio->bi_opf = REQ_OP_READ | REQ_META;
bch_bio_map(bio, b->keys.set[0].data);
@@ -393,7 +393,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
- bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
+ b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
bch_bio_map(b->bio, i);
/*
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 333a1e5f6ae6..06f55056aaae 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -52,7 +52,7 @@ void bch_btree_verify(struct btree *b)
bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+ bio->bi_opf = REQ_OP_READ | REQ_META;
bch_bio_map(bio, sorted);
submit_bio_wait(bio);
@@ -107,22 +107,26 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
struct bio *check;
- struct bio_vec bv;
- struct bvec_iter iter;
+ struct bio_vec bv, cbv;
+ struct bvec_iter iter, citer = { 0 };
check = bio_clone(bio, GFP_NOIO);
if (!check)
return;
- bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC);
+ check->bi_opf = REQ_OP_READ;
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
submit_bio_wait(check);
+ citer.bi_size = UINT_MAX;
bio_for_each_segment(bv, bio, iter) {
void *p1 = kmap_atomic(bv.bv_page);
- void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
+ void *p2;
+
+ cbv = bio_iter_iovec(check, citer);
+ p2 = page_address(cbv.bv_page);
cache_set_err_on(memcmp(p1 + bv.bv_offset,
p2 + bv.bv_offset,
@@ -133,6 +137,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
(uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1);
+ bio_advance_iter(check, &citer, bv.bv_len);
}
bio_free_pages(check);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index e97b0acf7b8d..db45a88c0ce9 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -24,9 +24,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio);
- bio->bi_max_vecs = bucket_pages(c);
- bio->bi_io_vec = bio->bi_inline_vecs;
+ bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6925023e12d4..1198e53d5670 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -448,13 +448,11 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio);
+ bio_init(bio, bio->bi_inline_vecs, 1);
bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
- bio->bi_max_vecs = 1;
- bio->bi_io_vec = bio->bi_inline_vecs;
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 5c4bddecfaf0..13b8a907006d 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -77,15 +77,13 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio);
+ bio_init(bio, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
- bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
- PAGE_SECTORS);
bio->bi_private = &io->cl;
- bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 40ffe5e424b3..f49c5417527d 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -404,8 +404,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (!congested &&
mode == CACHE_MODE_WRITEBACK &&
- op_is_write(bio_op(bio)) &&
- (bio->bi_opf & REQ_SYNC))
+ op_is_write(bio->bi_opf) &&
+ op_is_sync(bio->bi_opf))
goto rescale;
spin_lock(&dc->io_lock);
@@ -623,7 +623,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
{
struct bio *bio = &s->bio.bio;
- bio_init(bio);
+ bio_init(bio, NULL, 0);
__bio_clone_fast(bio, orig_bio);
bio->bi_end_io = request_endio;
bio->bi_private = &s->cl;
@@ -923,7 +923,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
flush->bi_bdev = bio->bi_bdev;
flush->bi_end_io = request_endio;
flush->bi_private = cl;
- bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
+ flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
closure_bio_submit(flush, cl);
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 849ad441cd76..2fb5bfeb43e2 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
- uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
+ uuid_io(c, REQ_OP_READ, 0, k, cl);
if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
struct uuid_entry_v0 *u0 = (void *) c->uuids;
@@ -600,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
ca->prio_last_buckets[bucket_nr] = bucket;
bucket_nr++;
- prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
+ prio_io(ca, bucket, REQ_OP_READ, 0);
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
@@ -1152,9 +1152,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
- bio_init(&dc->sb_bio);
- dc->sb_bio.bi_max_vecs = 1;
- dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
+ bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);
@@ -1814,9 +1812,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio);
- ca->journal.bio.bi_max_vecs = 8;
- ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
+ bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
@@ -1852,9 +1848,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
- bio_init(&ca->sb_bio);
- ca->sb_bio.bi_max_vecs = 1;
- ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
+ bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index e51644e503a5..69e1ae59cab8 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -106,14 +106,13 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio);
+ bio_init(bio, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
- bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
bio->bi_private = w;
- bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 301eaf565167..629bd1a502fd 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -57,8 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (would_skip)
return false;
- return bio->bi_opf & REQ_SYNC ||
- in_use <= CUTOFF_WRITEBACK;
+ return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
}
static inline void bch_writeback_queue(struct cached_dev *dc)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 2d826927a3bf..9fb2ccac958a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -27,6 +27,7 @@
#include <linux/mount.h>
#include <linux/buffer_head.h>
#include <linux/seq_file.h>
+#include <trace/events/block.h>
#include "md.h"
#include "bitmap.h"
@@ -208,11 +209,13 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
{
- struct md_rdev *rdev = NULL;
+ struct md_rdev *rdev;
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
+restart:
+ rdev = NULL;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
loff_t offset = mddev->bitmap_info.offset;
@@ -268,8 +271,8 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
page);
}
- if (wait)
- md_super_wait(mddev);
+ if (wait && md_super_wait(mddev) < 0)
+ goto restart;
return 0;
bad_alignment:
@@ -405,10 +408,10 @@ static int read_page(struct file *file, unsigned long index,
ret = -EIO;
out:
if (ret)
- printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n",
- (int)PAGE_SIZE,
- (unsigned long long)index << PAGE_SHIFT,
- ret);
+ pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
+ (int)PAGE_SIZE,
+ (unsigned long long)index << PAGE_SHIFT,
+ ret);
return ret;
}
@@ -416,6 +419,28 @@ out:
* bitmap file superblock operations
*/
+/*
+ * bitmap_wait_writes() should be called before writing any bitmap
+ * blocks, to ensure previous writes, particularly from
+ * bitmap_daemon_work(), have completed.
+ */
+static void bitmap_wait_writes(struct bitmap *bitmap)
+{
+ if (bitmap->storage.file)
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
+ else
+ /* Note that we ignore the return value. The writes
+ * might have failed, but that would just mean that
+ * some bits which should be cleared haven't been,
+ * which is safe. The relevant bitmap blocks will
+ * probably get written again, but there is no great
+ * loss if they aren't.
+ */
+ md_super_wait(bitmap->mddev);
+}
+
+
/* update the event counter and sync the superblock to disk */
void bitmap_update_sb(struct bitmap *bitmap)
{
@@ -455,24 +480,24 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->storage.sb_page)
return;
sb = kmap_atomic(bitmap->storage.sb_page);
- printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
- printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
- printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
- printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n",
- *(__u32 *)(sb->uuid+0),
- *(__u32 *)(sb->uuid+4),
- *(__u32 *)(sb->uuid+8),
- *(__u32 *)(sb->uuid+12));
- printk(KERN_DEBUG " events: %llu\n",
- (unsigned long long) le64_to_cpu(sb->events));
- printk(KERN_DEBUG "events cleared: %llu\n",
- (unsigned long long) le64_to_cpu(sb->events_cleared));
- printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state));
- printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize));
- printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
- printk(KERN_DEBUG " sync size: %llu KB\n",
- (unsigned long long)le64_to_cpu(sb->sync_size)/2);
- printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
+ pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
+ pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
+ pr_debug(" version: %d\n", le32_to_cpu(sb->version));
+ pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
+ *(__u32 *)(sb->uuid+0),
+ *(__u32 *)(sb->uuid+4),
+ *(__u32 *)(sb->uuid+8),
+ *(__u32 *)(sb->uuid+12));
+ pr_debug(" events: %llu\n",
+ (unsigned long long) le64_to_cpu(sb->events));
+ pr_debug("events cleared: %llu\n",
+ (unsigned long long) le64_to_cpu(sb->events_cleared));
+ pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
+ pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
+ pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
+ pr_debug(" sync size: %llu KB\n",
+ (unsigned long long)le64_to_cpu(sb->sync_size)/2);
+ pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb);
}
@@ -506,14 +531,14 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) {
kunmap_atomic(sb);
- printk(KERN_ERR "bitmap chunksize not a power of 2\n");
+ pr_warn("bitmap chunksize not a power of 2\n");
return -EINVAL;
}
sb->chunksize = cpu_to_le32(chunksize);
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
- printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
+ pr_debug("Choosing daemon_sleep default (5 sec)\n");
daemon_sleep = 5 * HZ;
}
sb->daemon_sleep = cpu_to_le32(daemon_sleep);
@@ -584,7 +609,7 @@ re_read:
/* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
- pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
+ pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
bitmap->cluster_slot, offset);
}
@@ -634,7 +659,7 @@ re_read:
else if (write_behind > COUNTER_MAX)
reason = "write-behind limit out of range (0 - 16383)";
if (reason) {
- printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
+ pr_warn("%s: invalid bitmap file superblock: %s\n",
bmname(bitmap), reason);
goto out;
}
@@ -648,18 +673,15 @@ re_read:
* bitmap's UUID and event counter to the mddev's
*/
if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
- printk(KERN_INFO
- "%s: bitmap superblock UUID mismatch\n",
- bmname(bitmap));
+ pr_warn("%s: bitmap superblock UUID mismatch\n",
+ bmname(bitmap));
goto out;
}
events = le64_to_cpu(sb->events);
if (!nodes && (events < bitmap->mddev->events)) {
- printk(KERN_INFO
- "%s: bitmap file is out of date (%llu < %llu) "
- "-- forcing full recovery\n",
- bmname(bitmap), events,
- (unsigned long long) bitmap->mddev->events);
+ pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
+ bmname(bitmap), events,
+ (unsigned long long) bitmap->mddev->events);
set_bit(BITMAP_STALE, &bitmap->flags);
}
}
@@ -679,8 +701,8 @@ out:
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
- pr_err("%s: Could not setup cluster service (%d)\n",
- bmname(bitmap), err);
+ pr_warn("%s: Could not setup cluster service (%d)\n",
+ bmname(bitmap), err);
goto out_no_sb;
}
bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
@@ -847,15 +869,13 @@ static void bitmap_file_kick(struct bitmap *bitmap)
ptr = file_path(bitmap->storage.file,
path, PAGE_SIZE);
- printk(KERN_ALERT
- "%s: kicking failed bitmap file %s from array!\n",
- bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
+ pr_warn("%s: kicking failed bitmap file %s from array!\n",
+ bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
kfree(path);
} else
- printk(KERN_ALERT
- "%s: disabling internal bitmap due to errors\n",
- bmname(bitmap));
+ pr_warn("%s: disabling internal bitmap due to errors\n",
+ bmname(bitmap));
}
}
@@ -983,6 +1003,7 @@ void bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
+ int writing = 0;
if (!bitmap || !bitmap->storage.filemap ||
test_bit(BITMAP_STALE, &bitmap->flags))
@@ -997,15 +1018,19 @@ void bitmap_unplug(struct bitmap *bitmap)
need_write = test_and_clear_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE);
if (dirty || need_write) {
+ if (!writing) {
+ bitmap_wait_writes(bitmap);
+ if (bitmap->mddev->queue)
+ blk_add_trace_msg(bitmap->mddev->queue,
+ "md bitmap_unplug");
+ }
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
write_page(bitmap, bitmap->storage.filemap[i], 0);
+ writing = 1;
}
}
- if (bitmap->storage.file)
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
- else
- md_super_wait(bitmap->mddev);
+ if (writing)
+ bitmap_wait_writes(bitmap);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap);
@@ -1056,14 +1081,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
if (outofdate)
- printk(KERN_INFO "%s: bitmap file is out of date, doing full "
- "recovery\n", bmname(bitmap));
+ pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
if (file && i_size_read(file->f_mapping->host) < store->bytes) {
- printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
- bmname(bitmap),
- (unsigned long) i_size_read(file->f_mapping->host),
- store->bytes);
+ pr_warn("%s: bitmap file too short %lu < %lu\n",
+ bmname(bitmap),
+ (unsigned long) i_size_read(file->f_mapping->host),
+ store->bytes);
goto err;
}
@@ -1137,16 +1161,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
offset = 0;
}
- printk(KERN_INFO "%s: bitmap initialized from disk: "
- "read %lu pages, set %lu of %lu bits\n",
- bmname(bitmap), store->file_pages,
- bit_cnt, chunks);
+ pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
+ bmname(bitmap), store->file_pages,
+ bit_cnt, chunks);
return 0;
err:
- printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
- bmname(bitmap), ret);
+ pr_warn("%s: bitmap initialisation failed: %d\n",
+ bmname(bitmap), ret);
return ret;
}
@@ -1225,6 +1248,10 @@ void bitmap_daemon_work(struct mddev *mddev)
}
bitmap->allclean = 1;
+ if (bitmap->mddev->queue)
+ blk_add_trace_msg(bitmap->mddev->queue,
+ "md bitmap_daemon_work");
+
/* Any file-page which is PENDING now needs to be written.
* So set NEEDWRITE now, then after we make any last-minute changes
* we will write it.
@@ -1289,6 +1316,7 @@ void bitmap_daemon_work(struct mddev *mddev)
}
spin_unlock_irq(&counts->lock);
+ bitmap_wait_writes(bitmap);
/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
* DIRTY pages need to be written by bitmap_unplug so it can wait
* for them.
@@ -1595,7 +1623,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
atomic_read(&bitmap->mddev->recovery_active) == 0);
bitmap->mddev->curr_resync_completed = sector;
- set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
@@ -1825,8 +1853,8 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
if (err)
goto error;
- printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
- bitmap->counts.pages, bmname(bitmap));
+ pr_debug("created bitmap (%lu pages) for device %s\n",
+ bitmap->counts.pages, bmname(bitmap));
err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
if (err)
@@ -2029,8 +2057,10 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
!bitmap->mddev->bitmap_info.external,
mddev_is_clustered(bitmap->mddev)
? bitmap->cluster_slot : 0);
- if (ret)
+ if (ret) {
+ bitmap_file_unmap(&store);
goto err;
+ }
pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
@@ -2089,7 +2119,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
BITMAP_BLOCK_SHIFT);
blocks = old_counts.chunks << old_counts.chunkshift;
- pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n");
+ pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break;
} else
bitmap->counts.bp[page].count += 1;
@@ -2266,7 +2296,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
/* Ensure new bitmap info is stored in
* metadata promptly.
*/
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
rv = 0;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 125aedc3875f..84d2f0e4c754 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -611,9 +611,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
char *ptr;
int len;
- bio_init(&b->bio);
- b->bio.bi_io_vec = b->bio_vec;
- b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
+ bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = inline_endio;
@@ -822,12 +820,14 @@ enum new_flag {
static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
{
struct dm_buffer *b;
+ bool tried_noio_alloc = false;
/*
* dm-bufio is resistant to allocation failures (it just keeps
* one buffer reserved in cases all the allocations fail).
* So set flags to not try too hard:
- * GFP_NOIO: don't recurse into the I/O layer
+ * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
+ * mutex and wait ourselves.
* __GFP_NORETRY: don't retry and rather return failure
* __GFP_NOMEMALLOC: don't use emergency reserves
* __GFP_NOWARN: don't print a warning in case of failure
@@ -837,7 +837,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
*/
while (1) {
if (dm_bufio_cache_size_latch != 1) {
- b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (b)
return b;
}
@@ -845,6 +845,15 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
if (nf == NF_PREFETCH)
return NULL;
+ if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
+ dm_bufio_unlock(c);
+ b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ dm_bufio_lock(c);
+ if (b)
+ return b;
+ tried_noio_alloc = true;
+ }
+
if (!list_empty(&c->reserved_buffers)) {
b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
@@ -1316,7 +1325,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = WRITE_FLUSH,
+ .bi_op_flags = REQ_PREFLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
@@ -1587,18 +1596,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static unsigned long
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c;
- unsigned long count;
-
- c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (sc->gfp_mask & __GFP_FS)
- dm_bufio_lock(c);
- else if (!dm_bufio_trylock(c))
- return 0;
+ struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
- dm_bufio_unlock(c);
- return count;
+ return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 695577812cf6..624fe4319b24 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -383,7 +383,6 @@ static int __format_metadata(struct dm_cache_metadata *cmd)
goto bad;
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
-
r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
if (r < 0)
goto bad;
@@ -789,7 +788,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
{
if (cmd->data_block_size != data_block_size) {
- DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+ DMERR("data_block_size (%llu) different from that in metadata (%llu)",
(unsigned long long) data_block_size,
(unsigned long long) cmd->data_block_size);
return false;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index c33f4a6e1d7d..f19c6930a67c 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1361,7 +1361,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
static unsigned random_level(dm_cblock_t cblock)
{
- return hash_32_generic(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
+ return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
}
static int smq_load_mapping(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 59b2c50562e4..e04c61e0839e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -989,7 +989,8 @@ static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mod
enum cache_metadata_mode old_mode = get_cache_mode(cache);
if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
- DMERR("unable to read needs_check flag, setting failure mode");
+ DMERR("%s: unable to read needs_check flag, setting failure mode.",
+ cache_device_name(cache));
new_mode = CM_FAIL;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a2768835d394..7c6c57216bf2 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/key.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
@@ -23,12 +24,14 @@
#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <linux/rbtree.h>
+#include <linux/ctype.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/algapi.h>
#include <crypto/skcipher.h>
+#include <keys/user-type.h>
#include <linux/device-mapper.h>
@@ -140,8 +143,9 @@ struct crypt_config {
char *cipher;
char *cipher_string;
+ char *key_string;
- struct crypt_iv_operations *iv_gen_ops;
+ const struct crypt_iv_operations *iv_gen_ops;
union {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
@@ -758,15 +762,15 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
return r;
}
-static struct crypt_iv_operations crypt_iv_plain_ops = {
+static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
-static struct crypt_iv_operations crypt_iv_plain64_ops = {
+static const struct crypt_iv_operations crypt_iv_plain64_ops = {
.generator = crypt_iv_plain64_gen
};
-static struct crypt_iv_operations crypt_iv_essiv_ops = {
+static const struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
.init = crypt_iv_essiv_init,
@@ -774,17 +778,17 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
.generator = crypt_iv_essiv_gen
};
-static struct crypt_iv_operations crypt_iv_benbi_ops = {
+static const struct crypt_iv_operations crypt_iv_benbi_ops = {
.ctr = crypt_iv_benbi_ctr,
.dtr = crypt_iv_benbi_dtr,
.generator = crypt_iv_benbi_gen
};
-static struct crypt_iv_operations crypt_iv_null_ops = {
+static const struct crypt_iv_operations crypt_iv_null_ops = {
.generator = crypt_iv_null_gen
};
-static struct crypt_iv_operations crypt_iv_lmk_ops = {
+static const struct crypt_iv_operations crypt_iv_lmk_ops = {
.ctr = crypt_iv_lmk_ctr,
.dtr = crypt_iv_lmk_dtr,
.init = crypt_iv_lmk_init,
@@ -793,7 +797,7 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = {
.post = crypt_iv_lmk_post
};
-static struct crypt_iv_operations crypt_iv_tcw_ops = {
+static const struct crypt_iv_operations crypt_iv_tcw_ops = {
.ctr = crypt_iv_tcw_ctr,
.dtr = crypt_iv_tcw_dtr,
.init = crypt_iv_tcw_init,
@@ -994,7 +998,6 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned i, len, remaining_size;
struct page *page;
- struct bio_vec *bvec;
retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
@@ -1019,12 +1022,7 @@ retry:
len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
- bvec = &clone->bi_io_vec[clone->bi_vcnt++];
- bvec->bv_page = page;
- bvec->bv_len = len;
- bvec->bv_offset = 0;
-
- clone->bi_iter.bi_size += len;
+ bio_add_page(clone, page, len, 0);
remaining_size -= len;
}
@@ -1135,7 +1133,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
+ clone->bi_opf = io->base_bio->bi_opf;
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1471,7 +1469,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
return 0;
}
-static int crypt_setkey_allcpus(struct crypt_config *cc)
+static int crypt_setkey(struct crypt_config *cc)
{
unsigned subkey_size;
int err = 0, i, r;
@@ -1490,25 +1488,157 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
return err;
}
+#ifdef CONFIG_KEYS
+
+static bool contains_whitespace(const char *str)
+{
+ while (*str)
+ if (isspace(*str++))
+ return true;
+ return false;
+}
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+ char *new_key_string, *key_desc;
+ int ret;
+ struct key *key;
+ const struct user_key_payload *ukp;
+
+ /*
+ * Reject key_string with whitespace. dm core currently lacks code for
+ * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
+ */
+ if (contains_whitespace(key_string)) {
+ DMERR("whitespace chars not allowed in key string");
+ return -EINVAL;
+ }
+
+ /* look for next ':' separating key_type from key_description */
+ key_desc = strpbrk(key_string, ":");
+ if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
+ return -EINVAL;
+
+ if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
+ strncmp(key_string, "user:", key_desc - key_string + 1))
+ return -EINVAL;
+
+ new_key_string = kstrdup(key_string, GFP_KERNEL);
+ if (!new_key_string)
+ return -ENOMEM;
+
+ key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
+ key_desc + 1, NULL);
+ if (IS_ERR(key)) {
+ kzfree(new_key_string);
+ return PTR_ERR(key);
+ }
+
+ rcu_read_lock();
+
+ ukp = user_key_payload(key);
+ if (!ukp) {
+ rcu_read_unlock();
+ key_put(key);
+ kzfree(new_key_string);
+ return -EKEYREVOKED;
+ }
+
+ if (cc->key_size != ukp->datalen) {
+ rcu_read_unlock();
+ key_put(key);
+ kzfree(new_key_string);
+ return -EINVAL;
+ }
+
+ memcpy(cc->key, ukp->data, cc->key_size);
+
+ rcu_read_unlock();
+ key_put(key);
+
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+
+ ret = crypt_setkey(cc);
+
+ /* wipe the kernel key payload copy in each case */
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
+
+ if (!ret) {
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ kzfree(cc->key_string);
+ cc->key_string = new_key_string;
+ } else
+ kzfree(new_key_string);
+
+ return ret;
+}
+
+static int get_key_size(char **key_string)
+{
+ char *colon, dummy;
+ int ret;
+
+ if (*key_string[0] != ':')
+ return strlen(*key_string) >> 1;
+
+ /* look for next ':' in key string */
+ colon = strpbrk(*key_string + 1, ":");
+ if (!colon)
+ return -EINVAL;
+
+ if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
+ return -EINVAL;
+
+ *key_string = colon;
+
+ /* remaining key string should be :<logon|user>:<key_desc> */
+
+ return ret;
+}
+
+#else
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+ return -EINVAL;
+}
+
+static int get_key_size(char **key_string)
+{
+ return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
+}
+
+#endif
+
static int crypt_set_key(struct crypt_config *cc, char *key)
{
int r = -EINVAL;
int key_string_len = strlen(key);
- /* The key size may not be changed. */
- if (cc->key_size != (key_string_len >> 1))
- goto out;
-
/* Hyphen (which gives a key_size of zero) means there is no key. */
if (!cc->key_size && strcmp(key, "-"))
goto out;
- if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ /* ':' means the key is in kernel keyring, short-circuit normal key processing */
+ if (key[0] == ':') {
+ r = crypt_set_keyring_key(cc, key + 1);
goto out;
+ }
- set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- r = crypt_setkey_allcpus(cc);
+ /* wipe references to any kernel keyring key */
+ kzfree(cc->key_string);
+ cc->key_string = NULL;
+
+ if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ goto out;
+
+ r = crypt_setkey(cc);
+ if (!r)
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
out:
/* Hex key string not needed after here, so wipe it. */
@@ -1521,8 +1651,10 @@ static int crypt_wipe_key(struct crypt_config *cc)
{
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
memset(&cc->key, 0, cc->key_size * sizeof(u8));
+ kzfree(cc->key_string);
+ cc->key_string = NULL;
- return crypt_setkey_allcpus(cc);
+ return crypt_setkey(cc);
}
static void crypt_dtr(struct dm_target *ti)
@@ -1558,6 +1690,7 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher);
kzfree(cc->cipher_string);
+ kzfree(cc->key_string);
/* Must zero key material before freeing */
kzfree(cc);
@@ -1726,12 +1859,13 @@ bad_mem:
/*
* Construct an encryption mapping:
- * <cipher> <key> <iv_offset> <dev_path> <start>
+ * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
*/
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
- unsigned int key_size, opt_params;
+ int key_size;
+ unsigned int opt_params;
unsigned long long tmpll;
int ret;
size_t iv_size_padding;
@@ -1748,7 +1882,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- key_size = strlen(argv[1]) >> 1;
+ key_size = get_key_size(&argv[1]);
+ if (key_size < 0) {
+ ti->error = "Cannot parse key size";
+ return -EINVAL;
+ }
cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
if (!cc) {
@@ -1955,10 +2093,13 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
DMEMIT("%s ", cc->cipher_string);
- if (cc->key_size > 0)
- for (i = 0; i < cc->key_size; i++)
- DMEMIT("%02x", cc->key[i]);
- else
+ if (cc->key_size > 0) {
+ if (cc->key_string)
+ DMEMIT(":%u:%s", cc->key_size, cc->key_string);
+ else
+ for (i = 0; i < cc->key_size; i++)
+ DMEMIT("%02x", cc->key[i]);
+ } else
DMEMIT("-");
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
@@ -2014,7 +2155,7 @@ static void crypt_resume(struct dm_target *ti)
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct crypt_config *cc = ti->private;
- int ret = -EINVAL;
+ int key_size, ret = -EINVAL;
if (argc < 2)
goto error;
@@ -2025,6 +2166,13 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
return -EINVAL;
}
if (argc == 3 && !strcasecmp(argv[1], "set")) {
+ /* The key size may not be changed. */
+ key_size = get_key_size(&argv[2]);
+ if (key_size < 0 || cc->key_size != key_size) {
+ memset(argv[2], '0', strlen(argv[2]));
+ return -EINVAL;
+ }
+
ret = crypt_set_key(cc, argv[2]);
if (ret)
return ret;
@@ -2068,7 +2216,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 14, 1},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 6a2e8dd44a1b..13305a182611 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -36,7 +36,8 @@ struct flakey_c {
};
enum feature_flag_bits {
- DROP_WRITES
+ DROP_WRITES,
+ ERROR_WRITES
};
struct per_bio_data {
@@ -76,6 +77,25 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
ti->error = "Feature drop_writes duplicated";
return -EINVAL;
+ } else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ ti->error = "Feature drop_writes conflicts with feature error_writes";
+ return -EINVAL;
+ }
+
+ continue;
+ }
+
+ /*
+ * error_writes
+ */
+ if (!strcasecmp(arg_name, "error_writes")) {
+ if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
+ ti->error = "Feature error_writes duplicated";
+ return -EINVAL;
+
+ } else if (test_bit(DROP_WRITES, &fc->flags)) {
+ ti->error = "Feature error_writes conflicts with feature drop_writes";
+ return -EINVAL;
}
continue;
@@ -135,6 +155,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
+
+ } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
+ ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
}
return 0;
@@ -200,11 +224,13 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!(fc->up_interval + fc->down_interval)) {
ti->error = "Total (up + down) interval is zero";
+ r = -EINVAL;
goto bad;
}
if (fc->up_interval + fc->down_interval < fc->up_interval) {
ti->error = "Interval overflow";
+ r = -EINVAL;
goto bad;
}
@@ -289,22 +315,27 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
pb->bio_submitted = true;
/*
- * Error reads if neither corrupt_bio_byte or drop_writes are set.
+ * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
* Otherwise, flakey_end_io() will decide if the reads should be modified.
*/
if (bio_data_dir(bio) == READ) {
- if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
+ if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
+ !test_bit(ERROR_WRITES, &fc->flags))
return -EIO;
goto map_bio;
}
/*
- * Drop writes?
+ * Drop or error writes?
*/
if (test_bit(DROP_WRITES, &fc->flags)) {
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
+ else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
/*
* Corrupt matching writes.
@@ -340,10 +371,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
*/
corrupt_bio_data(bio, fc);
- } else if (!test_bit(DROP_WRITES, &fc->flags)) {
+ } else if (!test_bit(DROP_WRITES, &fc->flags) &&
+ !test_bit(ERROR_WRITES, &fc->flags)) {
/*
* Error read during the down_interval if drop_writes
- * wasn't configured.
+ * and error_writes were not configured.
*/
return -EIO;
}
@@ -357,7 +389,7 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
{
unsigned sz = 0;
struct flakey_c *fc = ti->private;
- unsigned drop_writes;
+ unsigned drop_writes, error_writes;
switch (type) {
case STATUSTYPE_INFO:
@@ -370,10 +402,13 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
fc->down_interval);
drop_writes = test_bit(DROP_WRITES, &fc->flags);
- DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
+ error_writes = test_bit(ERROR_WRITES, &fc->flags);
+ DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
if (drop_writes)
DMEMIT("drop_writes ");
+ else if (error_writes)
+ DMEMIT("error_writes ");
if (fc->corrupt_bio_byte)
DMEMIT("corrupt_bio_byte %u %c %u %u ",
@@ -410,7 +445,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 3, 1},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 0bf1a12e35fe..03940bf36f6c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -162,7 +162,10 @@ struct dpages {
struct page **p, unsigned long *len, unsigned *offset);
void (*next_page)(struct dpages *dp);
- unsigned context_u;
+ union {
+ unsigned context_u;
+ struct bvec_iter context_bi;
+ };
void *context_ptr;
void *vma_invalidate_address;
@@ -204,25 +207,36 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
static void bio_get_page(struct dpages *dp, struct page **p,
unsigned long *len, unsigned *offset)
{
- struct bio_vec *bvec = dp->context_ptr;
- *p = bvec->bv_page;
- *len = bvec->bv_len - dp->context_u;
- *offset = bvec->bv_offset + dp->context_u;
+ struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
+ dp->context_bi);
+
+ *p = bvec.bv_page;
+ *len = bvec.bv_len;
+ *offset = bvec.bv_offset;
+
+ /* avoid figuring it out again in bio_next_page() */
+ dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
}
static void bio_next_page(struct dpages *dp)
{
- struct bio_vec *bvec = dp->context_ptr;
- dp->context_ptr = bvec + 1;
- dp->context_u = 0;
+ unsigned int len = (unsigned int)dp->context_bi.bi_sector;
+
+ bvec_iter_advance((struct bio_vec *)dp->context_ptr,
+ &dp->context_bi, len);
}
static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
dp->get_page = bio_get_page;
dp->next_page = bio_next_page;
- dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
- dp->context_u = bio->bi_iter.bi_bvec_done;
+
+ /*
+ * We just use bvec iterator to retrieve pages, so it is ok to
+ * access the bvec table directly here
+ */
+ dp->context_ptr = bio->bi_io_vec;
+ dp->context_bi = bio->bi_iter;
}
/*
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 966eb4b61aed..c72a77048b73 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1697,7 +1697,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
{
struct dm_ioctl *dmi;
int secure_data;
- const size_t minimum_data_size = sizeof(*param_kernel) - sizeof(param_kernel->data);
+ const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 07fc1ad42ec5..33e71ea6cc14 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc)
};
lc->io_req.bi_op = REQ_OP_WRITE;
- lc->io_req.bi_op_flags = WRITE_FLUSH;
+ lc->io_req.bi_op_flags = REQ_PREFLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL);
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e477af8596e2..6400cffb986d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -372,16 +372,13 @@ static int __pg_init_all_paths(struct multipath *m)
return atomic_read(&m->pg_init_in_progress);
}
-static int pg_init_all_paths(struct multipath *m)
+static void pg_init_all_paths(struct multipath *m)
{
- int r;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- r = __pg_init_all_paths(m);
+ __pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
-
- return r;
}
static void __switch_pg(struct multipath *m, struct priority_group *pg)
@@ -583,16 +580,17 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
* .request_fn stacked on blk-mq path(s) and
* blk-mq stacked on blk-mq path(s).
*/
- *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
- rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(*__clone)) {
- /* ENOMEM, requeue */
+ clone = blk_mq_alloc_request(bdev_get_queue(bdev),
+ rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(clone)) {
+ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
clear_request_fn_mpio(m, map_context);
return r;
}
- (*__clone)->bio = (*__clone)->biotail = NULL;
- (*__clone)->rq_disk = bdev->bd_disk;
- (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ clone->bio = clone->biotail = NULL;
+ clone->rq_disk = bdev->bd_disk;
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ *__clone = clone;
}
if (pgpath->pg->ps.type->start_io)
@@ -852,18 +850,22 @@ retain:
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) {
/*
+ * Clear any hw_handler_params associated with a
+ * handler that isn't already attached.
+ */
+ if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+ kfree(m->hw_handler_params);
+ m->hw_handler_params = NULL;
+ }
+
+ /*
* Reset hw_handler_name to match the attached handler
- * and clear any hw_handler_params associated with the
- * ignored handler.
*
* NB. This modifies the table line to show the actual
* handler instead of the original table passed in.
*/
kfree(m->hw_handler_name);
m->hw_handler_name = attached_handler_name;
-
- kfree(m->hw_handler_params);
- m->hw_handler_params = NULL;
}
}
@@ -1002,6 +1004,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
}
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
+ if (!m->hw_handler_name)
+ return -EINVAL;
if (hw_argc > 1) {
char *p;
@@ -1362,7 +1366,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
- (pgnum > m->nr_priority_groups)) {
+ !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to switch_pg_num");
return -EINVAL;
}
@@ -1394,7 +1398,7 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
- (pgnum > m->nr_priority_groups)) {
+ !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to bypass_pg");
return -EINVAL;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6d53810963f7..b8f978e551d7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -160,7 +160,6 @@ struct raid_dev {
CTR_FLAG_DAEMON_SLEEP | \
CTR_FLAG_MIN_RECOVERY_RATE | \
CTR_FLAG_MAX_RECOVERY_RATE | \
- CTR_FLAG_MAX_WRITE_BEHIND | \
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
@@ -171,7 +170,6 @@ struct raid_dev {
CTR_FLAG_DAEMON_SLEEP | \
CTR_FLAG_MIN_RECOVERY_RATE | \
CTR_FLAG_MAX_RECOVERY_RATE | \
- CTR_FLAG_MAX_WRITE_BEHIND | \
CTR_FLAG_STRIPE_CACHE | \
CTR_FLAG_REGION_SIZE | \
CTR_FLAG_DELTA_DISKS | \
@@ -2011,7 +2009,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
/* Force writing of superblocks to disk */
- set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
/* Any superblock is better than none, choose that if given */
return refdev ? 0 : 1;
@@ -2050,16 +2048,17 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
mddev->reshape_position = MaxSector;
+ mddev->raid_disks = le32_to_cpu(sb->num_devices);
+ mddev->level = le32_to_cpu(sb->level);
+ mddev->layout = le32_to_cpu(sb->layout);
+ mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
+
/*
* Reshaping is supported, e.g. reshape_position is valid
* in superblock and superblock content is authoritative.
*/
if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
/* Superblock is authoritative wrt given raid set layout! */
- mddev->raid_disks = le32_to_cpu(sb->num_devices);
- mddev->level = le32_to_cpu(sb->level);
- mddev->layout = le32_to_cpu(sb->layout);
- mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
@@ -2087,38 +2086,44 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
/*
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
*/
- if (le32_to_cpu(sb->level) != mddev->new_level) {
- DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
- return -EINVAL;
- }
- if (le32_to_cpu(sb->layout) != mddev->new_layout) {
- DMERR("Reshaping raid sets not yet supported. (raid layout change)");
- DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
- DMERR(" Old layout: %s w/ %d copies",
- raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
- raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
- DMERR(" New layout: %s w/ %d copies",
- raid10_md_layout_to_format(mddev->layout),
- raid10_md_layout_to_copies(mddev->layout));
- return -EINVAL;
- }
- if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
- DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
- return -EINVAL;
- }
+ struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
+ struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
- /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */
- if (!rt_is_raid1(rs->raid_type) &&
- (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
- DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)",
- sb->num_devices, mddev->raid_disks);
+ if (rs_takeover_requested(rs)) {
+ if (rt_cur && rt_new)
+ DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
+ rt_cur->name, rt_new->name);
+ else
+ DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
+ return -EINVAL;
+ } else if (rs_reshape_requested(rs)) {
+ DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
+ if (mddev->layout != mddev->new_layout) {
+ if (rt_cur && rt_new)
+ DMERR(" current layout %s vs new layout %s",
+ rt_cur->name, rt_new->name);
+ else
+ DMERR(" current layout 0x%X vs new layout 0x%X",
+ le32_to_cpu(sb->layout), mddev->new_layout);
+ }
+ if (mddev->chunk_sectors != mddev->new_chunk_sectors)
+ DMERR(" current stripe sectors %u vs new stripe sectors %u",
+ mddev->chunk_sectors, mddev->new_chunk_sectors);
+ if (rs->delta_disks)
+ DMERR(" current %u disks vs new %u disks",
+ mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
+ if (rs_is_raid10(rs)) {
+ DMERR(" Old layout: %s w/ %u copies",
+ raid10_md_layout_to_format(mddev->layout),
+ raid10_md_layout_to_copies(mddev->layout));
+ DMERR(" New layout: %s w/ %u copies",
+ raid10_md_layout_to_format(mddev->new_layout),
+ raid10_md_layout_to_copies(mddev->new_layout));
+ }
return -EINVAL;
}
DMINFO("Discovered old metadata format; upgrading to extended metadata format");
-
- /* Table line is checked vs. authoritative superblock */
- rs_set_new(rs);
}
if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
@@ -2211,7 +2216,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
continue;
if (role != r->raid_disk) {
- if (__is_raid10_near(mddev->layout)) {
+ if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
rs->raid_disks % rs->raid10_copies) {
rs->ti->error =
@@ -2994,6 +2999,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
}
+ /* Disable/enable discard support on raid set. */
+ configure_discard_support(rs);
+
mddev_unlock(&rs->md);
return 0;
@@ -3497,7 +3505,7 @@ static void rs_update_sbs(struct raid_set *rs)
struct mddev *mddev = &rs->md;
int ro = mddev->ro;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev->ro = 0;
md_update_sb(mddev, 1);
mddev->ro = ro;
@@ -3580,12 +3588,6 @@ static int raid_preresume(struct dm_target *ti)
if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
rs_update_sbs(rs);
- /*
- * Disable/enable discard support on raid set after any
- * conversion, because devices can have been added
- */
- configure_discard_support(rs);
-
/* Load the bitmap from disk unless raid0 */
r = __load_dirty_region_bitmap(rs);
if (r)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9a8b71067c6e..2ddc2d20e62d 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = WRITE_FLUSH,
+ .bi_op_flags = REQ_PREFLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = ms->io_client,
@@ -656,7 +656,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
+ .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = write_callback,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1d0d2adc050a..9d7275fb541a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -23,11 +23,7 @@ static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
#define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-#ifdef CONFIG_DM_MQ_DEFAULT
-static bool use_blk_mq = true;
-#else
-static bool use_blk_mq = false;
-#endif
+static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
bool dm_use_blk_mq_default(void)
{
@@ -75,12 +71,6 @@ static void dm_old_start_queue(struct request_queue *q)
static void dm_mq_start_queue(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
blk_mq_start_stopped_hw_queues(q, true);
blk_mq_kick_requeue_list(q);
}
@@ -105,20 +95,10 @@ static void dm_old_stop_queue(struct request_queue *q)
static void dm_mq_stop_queue(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q)) {
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (blk_mq_queue_stopped(q))
return;
- }
- queue_flag_set(QUEUE_FLAG_STOPPED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- /* Avoid that requeuing could restart the queue. */
- blk_mq_cancel_requeue_work(q);
- blk_mq_stop_hw_queues(q);
+ blk_mq_quiesce_queue(q);
}
void dm_stop_queue(struct request_queue *q)
@@ -226,6 +206,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
@@ -238,8 +221,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (!md->queue->mq_ops && run_queue)
- blk_run_queue_async(md->queue);
+ if (!q->mq_ops && run_queue) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
/*
* dm_put() must be at the end of this function. See the comment above
@@ -313,7 +299,7 @@ static void dm_unprep_request(struct request *rq)
if (!rq->q->mq_ops) {
rq->special = NULL;
- rq->cmd_flags &= ~REQ_DONTPREP;
+ rq->rq_flags &= ~RQF_DONTPREP;
}
if (clone)
@@ -338,12 +324,7 @@ static void dm_old_requeue_request(struct request *rq)
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (!blk_queue_stopped(q))
- blk_mq_delay_kick_requeue_list(q, msecs);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_mq_delay_kick_requeue_list(q, msecs);
}
void dm_mq_kick_requeue_list(struct mapped_device *md)
@@ -354,7 +335,7 @@ EXPORT_SYMBOL(dm_mq_kick_requeue_list);
static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
{
- blk_mq_requeue_request(rq);
+ blk_mq_requeue_request(rq, false);
__dm_mq_kick_requeue_list(rq->q, msecs);
}
@@ -431,7 +412,7 @@ static void dm_softirq_done(struct request *rq)
return;
}
- if (rq->cmd_flags & REQ_FAILED)
+ if (rq->rq_flags & RQF_FAILED)
mapped = false;
dm_done(clone, tio->error, mapped);
@@ -460,7 +441,7 @@ static void dm_complete_request(struct request *rq, int error)
*/
static void dm_kill_unmapped_request(struct request *rq, int error)
{
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
dm_complete_request(rq, error);
}
@@ -476,7 +457,7 @@ static void end_clone_request(struct request *clone, int error)
* For just cleaning up the information of the queue in which
* the clone was dispatched.
* The clone is *NOT* freed actually here because it is alloced
- * from dm own mempool (REQ_ALLOCED isn't set).
+ * from dm own mempool (RQF_ALLOCED isn't set).
*/
__blk_put_request(clone->q, clone);
}
@@ -497,7 +478,7 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
int r;
if (blk_queue_io_stat(clone->q))
- clone->cmd_flags |= REQ_IO_STAT;
+ clone->rq_flags |= RQF_IO_STAT;
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
@@ -633,7 +614,7 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_DEFER;
rq->special = tio;
- rq->cmd_flags |= REQ_DONTPREP;
+ rq->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
@@ -819,7 +800,7 @@ static void dm_old_request_fn(struct request_queue *q)
pos = blk_rq_pos(rq);
if ((dm_old_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
(ti->type->busy && ti->type->busy(ti))) {
blk_delay_queue(q, 10);
@@ -904,17 +885,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
dm_put_live_table(md, srcu_idx);
}
- /*
- * On suspend dm_stop_queue() handles stopping the blk-mq
- * request_queue BUT: even though the hw_queues are marked
- * BLK_MQ_S_STOPPED at that point there is still a race that
- * is allowing block/blk-mq.c to call ->queue_rq against a
- * hctx that it really shouldn't. The following check guards
- * against this rarity (albeit _not_ race-free).
- */
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
- return BLK_MQ_RQ_QUEUE_BUSY;
-
if (ti->type->busy && ti->type->busy(ti))
return BLK_MQ_RQ_QUEUE_BUSY;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index b8cf956b577b..b93476c3ba3f 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -741,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA))
+ if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
ps->valid = 0;
/*
@@ -818,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+ r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index c4b53b332607..0a427de23ed2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -871,7 +871,7 @@ static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- bool verify_blk_mq = false;
+ unsigned sq_count = 0, mq_count = 0;
struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
@@ -924,12 +924,6 @@ static int dm_table_determine_type(struct dm_table *t)
BUG_ON(!request_based); /* No targets in this table */
- if (list_empty(devices) && __table_type_request_based(live_md_type)) {
- /* inherit live MD type */
- t->type = live_md_type;
- return 0;
- }
-
/*
* The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
* having a compatible target use dm_table_set_type.
@@ -948,6 +942,19 @@ verify_rq_based:
return -EINVAL;
}
+ if (list_empty(devices)) {
+ int srcu_idx;
+ struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
+
+ /* inherit live table's type and all_blk_mq */
+ if (live_table) {
+ t->type = live_table->type;
+ t->all_blk_mq = live_table->all_blk_mq;
+ }
+ dm_put_live_table(t->md, srcu_idx);
+ return 0;
+ }
+
/* Non-request-stackable devices can't be used for request-based dm */
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
@@ -959,19 +966,19 @@ verify_rq_based:
}
if (q->mq_ops)
- verify_blk_mq = true;
+ mq_count++;
+ else
+ sq_count++;
}
+ if (sq_count && mq_count) {
+ DMERR("table load rejected: not all devices are blk-mq request-stackable");
+ return -EINVAL;
+ }
+ t->all_blk_mq = mq_count > 0;
- if (verify_blk_mq) {
- /* verify _all_ devices in the table are blk-mq devices */
- list_for_each_entry(dd, devices, list)
- if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
- DMERR("table load rejected: not all devices"
- " are blk-mq request-stackable");
- return -EINVAL;
- }
-
- t->all_blk_mq = true;
+ if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
+ DMERR("table load rejected: all devices are not blk-mq request-stackable");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0aba34a7b3b3..7335d8a3fc47 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -868,7 +868,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
if (r) {
- ti->error = "Data device lookup failed";
+ ti->error = "Hash device lookup failed";
goto bad;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ef7bf1dd6900..3086da5664f3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1525,9 +1525,9 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->bdev)
goto bad;
- bio_init(&md->flush_bio);
+ bio_init(&md->flush_bio, NULL, 0);
md->flush_bio.bi_bdev = md->bdev;
- bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+ md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
dm_stats_init(&md->stats);
@@ -1886,9 +1886,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(q->queue_lock);
+ blk_set_queue_dying(q);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 86f5d435901d..5975c9915684 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "linear.h"
@@ -101,8 +102,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
- printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
- mdname(mddev));
+ pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -123,8 +124,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
discard_supported = true;
}
if (cnt != raid_disks) {
- printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
- mdname(mddev));
+ pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -227,22 +228,22 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
}
do {
- tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+ tmp_dev = which_dev(mddev, bio_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
end_sector = tmp_dev->end_sector;
data_offset = tmp_dev->rdev->data_offset;
bio->bi_bdev = tmp_dev->rdev->bdev;
- if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
- bio->bi_iter.bi_sector < start_sector))
+ if (unlikely(bio_sector >= end_sector ||
+ bio_sector < start_sector))
goto out_of_bounds;
if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to
* split it.
*/
- split = bio_split(bio, end_sector -
- bio->bi_iter.bi_sector,
+ split = bio_split(bio, end_sector - bio_sector,
GFP_NOIO, fs_bio_set);
bio_chain(split, bio);
} else {
@@ -256,15 +257,18 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
- } else
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+ split, disk_devt(mddev->gendisk),
+ bio_sector);
generic_make_request(split);
+ }
} while (split != bio);
return;
out_of_bounds:
- printk(KERN_ERR
- "md/linear:%s: make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
+ pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
mdname(mddev),
(unsigned long long)bio->bi_iter.bi_sector,
bdevname(tmp_dev->rdev->bdev, b),
@@ -275,7 +279,6 @@ out_of_bounds:
static void linear_status (struct seq_file *seq, struct mddev *mddev)
{
-
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eac84d8ff724..82821ee0d57f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -30,6 +30,18 @@
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Errors, Warnings, etc.
+ Please use:
+ pr_crit() for error conditions that risk data loss
+ pr_err() for error conditions that are unexpected, like an IO error
+ or internal inconsistency
+ pr_warn() for error conditions that could have been predicated, like
+ adding a device to an array when it has incompatible metadata
+ pr_info() for every interesting, very rare events, like an array starting
+ or stopping, or resync starting or stopping
+ pr_debug() for everything else.
+
*/
#include <linux/kthread.h>
@@ -52,6 +64,7 @@
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "bitmap.h"
#include "md-cluster.h"
@@ -394,7 +407,7 @@ static void submit_flushes(struct work_struct *ws)
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
- bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
+ bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
rcu_read_lock();
@@ -684,11 +697,8 @@ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
static int alloc_disk_sb(struct md_rdev *rdev)
{
rdev->sb_page = alloc_page(GFP_KERNEL);
- if (!rdev->sb_page) {
- printk(KERN_ALERT "md: out of memory.\n");
+ if (!rdev->sb_page)
return -ENOMEM;
- }
-
return 0;
}
@@ -715,9 +725,15 @@ static void super_written(struct bio *bio)
struct mddev *mddev = rdev->mddev;
if (bio->bi_error) {
- printk("md: super_written gets error=%d\n", bio->bi_error);
+ pr_err("md: super_written gets error=%d\n", bio->bi_error);
md_error(mddev, rdev);
- }
+ if (!test_bit(Faulty, &rdev->flags)
+ && (bio->bi_opf & MD_FAILFAST)) {
+ set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
+ set_bit(LastDev, &rdev->flags);
+ }
+ } else
+ clear_bit(LastDev, &rdev->flags);
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
@@ -734,7 +750,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
* if zero is reached.
* If an error occurred, call md_error
*/
- struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+ struct bio *bio;
+ int ff = 0;
+
+ if (test_bit(Faulty, &rdev->flags))
+ return;
+
+ bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
atomic_inc(&rdev->nr_pending);
@@ -743,16 +765,24 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
- bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+
+ if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
+ test_bit(FailFast, &rdev->flags) &&
+ !test_bit(LastDev, &rdev->flags))
+ ff = MD_FAILFAST;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
atomic_inc(&mddev->pending_writes);
submit_bio(bio);
}
-void md_super_wait(struct mddev *mddev)
+int md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+ if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
+ return -EAGAIN;
+ return 0;
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
@@ -795,8 +825,8 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return 0;
fail:
- printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
- bdevname(rdev->bdev,b));
+ pr_err("md: disabled device %s, could not read superblock.\n",
+ bdevname(rdev->bdev,b));
return -EINVAL;
}
@@ -818,7 +848,6 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
if (!tmp1 || !tmp2) {
ret = 0;
- printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
@@ -932,7 +961,7 @@ int md_check_no_bitmap(struct mddev *mddev)
{
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
- printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
+ pr_warn("%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
@@ -956,7 +985,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
- if (ret) return ret;
+ if (ret)
+ return ret;
ret = -EINVAL;
@@ -964,17 +994,15 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
- printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
- b);
+ pr_warn("md: invalid raid superblock magic on %s\n", b);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
- printk(KERN_WARNING "Bad version number %d.%d on %s\n",
- sb->major_version, sb->minor_version,
- b);
+ pr_warn("Bad version number %d.%d on %s\n",
+ sb->major_version, sb->minor_version, b);
goto abort;
}
@@ -982,8 +1010,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
- printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
- b);
+ pr_warn("md: invalid superblock checksum on %s\n", b);
goto abort;
}
@@ -1004,14 +1031,13 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
- printk(KERN_WARNING "md: %s has different UUID to %s\n",
+ pr_warn("md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
- printk(KERN_WARNING "md: %s has same UUID"
- " but different superblock to %s\n",
- b, bdevname(refdev->bdev, b2));
+ pr_warn("md: %s has same UUID but different superblock to %s\n",
+ b, bdevname(refdev->bdev, b2));
goto abort;
}
ev1 = md_event(sb);
@@ -1158,6 +1184,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
+ if (desc->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
@@ -1283,6 +1311,8 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
}
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
+ if (test_bit(FailFast, &rdev2->flags))
+ d->state |= (1<<MD_DISK_FAILFAST);
}
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
@@ -1324,9 +1354,10 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ do {
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
- md_super_wait(rdev->mddev);
+ } while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -1413,13 +1444,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
- printk("md: invalid superblock checksum on %s\n",
+ pr_warn("md: invalid superblock checksum on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
- printk("md: data_size too small on %s\n",
- bdevname(rdev->bdev,b));
+ pr_warn("md: data_size too small on %s\n",
+ bdevname(rdev->bdev,b));
return -EINVAL;
}
if (sb->pad0 ||
@@ -1503,8 +1534,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
- printk(KERN_WARNING "md: %s has strangely different"
- " superblock to %s\n",
+ pr_warn("md: %s has strangely different superblock to %s\n",
bdevname(rdev->bdev,b),
bdevname(refdev->bdev,b2));
return -EINVAL;
@@ -1646,8 +1676,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
case MD_DISK_ROLE_JOURNAL: /* journal device */
if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
/* journal device without journal feature */
- printk(KERN_WARNING
- "md: journal device provided without journal feature, ignoring the device\n");
+ pr_warn("md: journal device provided without journal feature, ignoring the device\n");
return -EINVAL;
}
set_bit(Journal, &rdev->flags);
@@ -1669,6 +1698,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
+ if (sb->devflags & FailFast1)
+ set_bit(FailFast, &rdev->flags);
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
@@ -1707,6 +1738,10 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
+ if (test_bit(FailFast, &rdev->flags))
+ sb->devflags |= FailFast1;
+ else
+ sb->devflags &= ~FailFast1;
if (test_bit(WriteMostly, &rdev->flags))
sb->devflags |= WriteMostly1;
@@ -1863,9 +1898,10 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
- md_super_wait(rdev->mddev);
+ do {
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+ } while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -2004,9 +2040,9 @@ int md_integrity_register(struct mddev *mddev)
blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev));
- printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
+ pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
- printk(KERN_ERR "md: failed to create integrity pool for %s\n",
+ pr_err("md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
}
@@ -2034,8 +2070,8 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
return 0;
if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
- printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
- mdname(mddev), bdevname(rdev->bdev, name));
+ pr_err("%s: incompatible integrity profile for %s\n",
+ mdname(mddev), bdevname(rdev->bdev, name));
return -ENXIO;
}
@@ -2089,15 +2125,15 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
rcu_read_unlock();
if (!test_bit(Journal, &rdev->flags) &&
mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
- printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
- mdname(mddev), mddev->max_disks);
+ pr_warn("md: %s: array is limited to %d devices\n",
+ mdname(mddev), mddev->max_disks);
return -EBUSY;
}
bdevname(rdev->bdev,b);
strreplace(b, '/', '!');
rdev->mddev = mddev;
- printk(KERN_INFO "md: bind<%s>\n", b);
+ pr_debug("md: bind<%s>\n", b);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
@@ -2116,8 +2152,8 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
return 0;
fail:
- printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
- b, mdname(mddev));
+ pr_warn("md: failed to register dev-%s for %s\n",
+ b, mdname(mddev));
return err;
}
@@ -2134,7 +2170,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
- printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
+ pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
@@ -2164,8 +2200,7 @@ static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
- printk(KERN_ERR "md: could not open %s.\n",
- __bdevname(dev, b));
+ pr_warn("md: could not open %s.\n", __bdevname(dev, b));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
@@ -2185,8 +2220,7 @@ static void export_rdev(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md: export_rdev(%s)\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
@@ -2288,24 +2322,24 @@ void md_update_sb(struct mddev *mddev, int force_change)
if (mddev->ro) {
if (force_change)
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
return;
}
repeat:
if (mddev_is_clustered(mddev)) {
- if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
force_change = 1;
- if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
nospares = 1;
ret = md_cluster_ops->metadata_update_start(mddev);
/* Has someone else has updated the sb */
if (!does_sb_need_changing(mddev)) {
if (ret == 0)
md_cluster_ops->metadata_update_cancel(mddev);
- bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
- BIT(MD_CHANGE_DEVS) |
- BIT(MD_CHANGE_CLEAN));
+ bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+ BIT(MD_SB_CHANGE_DEVS) |
+ BIT(MD_SB_CHANGE_CLEAN));
return;
}
}
@@ -2321,10 +2355,10 @@ repeat:
}
if (!mddev->persistent) {
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
- clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (!mddev->external) {
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed) {
rdev->badblocks.changed = 0;
@@ -2344,9 +2378,9 @@ repeat:
mddev->utime = ktime_get_real_seconds();
- if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
force_change = 1;
- if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
/* just a clean<-> dirty transition, possibly leave spares alone,
* though if events isn't the right even/odd, we will have to do
* spares after all
@@ -2402,6 +2436,9 @@ repeat:
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
+ if (mddev->queue)
+ blk_add_trace_msg(mddev->queue, "md md_update_sb");
+rewrite:
bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
@@ -2433,15 +2470,16 @@ repeat:
/* only need to write one superblock... */
break;
}
- md_super_wait(mddev);
- /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
+ if (md_super_wait(mddev) < 0)
+ goto rewrite;
+ /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
if (mddev_is_clustered(mddev) && ret == 0)
md_cluster_ops->metadata_update_finish(mddev);
if (mddev->in_sync != sync_req ||
- !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
+ !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
/* have to write it out again */
goto repeat;
wake_up(&mddev->sb_wait);
@@ -2485,7 +2523,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -2523,51 +2561,41 @@ struct rdev_sysfs_entry {
static ssize_t
state_show(struct md_rdev *rdev, char *page)
{
- char *sep = "";
+ char *sep = ",";
size_t len = 0;
unsigned long flags = ACCESS_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
- rdev->badblocks.unacked_exist) {
- len+= sprintf(page+len, "%sfaulty",sep);
- sep = ",";
- }
- if (test_bit(In_sync, &flags)) {
- len += sprintf(page+len, "%sin_sync",sep);
- sep = ",";
- }
- if (test_bit(Journal, &flags)) {
- len += sprintf(page+len, "%sjournal",sep);
- sep = ",";
- }
- if (test_bit(WriteMostly, &flags)) {
- len += sprintf(page+len, "%swrite_mostly",sep);
- sep = ",";
- }
+ (!test_bit(ExternalBbl, &flags) &&
+ rdev->badblocks.unacked_exist))
+ len += sprintf(page+len, "faulty%s", sep);
+ if (test_bit(In_sync, &flags))
+ len += sprintf(page+len, "in_sync%s", sep);
+ if (test_bit(Journal, &flags))
+ len += sprintf(page+len, "journal%s", sep);
+ if (test_bit(WriteMostly, &flags))
+ len += sprintf(page+len, "write_mostly%s", sep);
if (test_bit(Blocked, &flags) ||
(rdev->badblocks.unacked_exist
- && !test_bit(Faulty, &flags))) {
- len += sprintf(page+len, "%sblocked", sep);
- sep = ",";
- }
+ && !test_bit(Faulty, &flags)))
+ len += sprintf(page+len, "blocked%s", sep);
if (!test_bit(Faulty, &flags) &&
!test_bit(Journal, &flags) &&
- !test_bit(In_sync, &flags)) {
- len += sprintf(page+len, "%sspare", sep);
- sep = ",";
- }
- if (test_bit(WriteErrorSeen, &flags)) {
- len += sprintf(page+len, "%swrite_error", sep);
- sep = ",";
- }
- if (test_bit(WantReplacement, &flags)) {
- len += sprintf(page+len, "%swant_replacement", sep);
- sep = ",";
- }
- if (test_bit(Replacement, &flags)) {
- len += sprintf(page+len, "%sreplacement", sep);
- sep = ",";
- }
+ !test_bit(In_sync, &flags))
+ len += sprintf(page+len, "spare%s", sep);
+ if (test_bit(WriteErrorSeen, &flags))
+ len += sprintf(page+len, "write_error%s", sep);
+ if (test_bit(WantReplacement, &flags))
+ len += sprintf(page+len, "want_replacement%s", sep);
+ if (test_bit(Replacement, &flags))
+ len += sprintf(page+len, "replacement%s", sep);
+ if (test_bit(ExternalBbl, &flags))
+ len += sprintf(page+len, "external_bbl%s", sep);
+ if (test_bit(FailFast, &flags))
+ len += sprintf(page+len, "failfast%s", sep);
+
+ if (len)
+ len -= strlen(sep);
return len+sprintf(page+len, "\n");
}
@@ -2587,6 +2615,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* so that it gets rebuilt based on bitmap
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
+ * {,-}failfast - set/clear FailFast
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -2610,8 +2639,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (err == 0) {
md_kick_rdev_from_array(rdev);
- if (mddev->pers)
- md_update_sb(mddev, 1);
+ if (mddev->pers) {
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ md_wakeup_thread(mddev->thread);
+ }
md_new_event(mddev);
}
}
@@ -2626,6 +2657,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
} else if (cmd_match(buf, "-blocked")) {
if (!test_bit(Faulty, &rdev->flags) &&
+ !test_bit(ExternalBbl, &rdev->flags) &&
rdev->badblocks.unacked_exist) {
/* metadata handler doesn't understand badblocks,
* so we need to fail the device
@@ -2642,6 +2674,12 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "failfast")) {
+ set_bit(FailFast, &rdev->flags);
+ err = 0;
+ } else if (cmd_match(buf, "-failfast")) {
+ clear_bit(FailFast, &rdev->flags);
+ err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags)) {
if (rdev->mddev->pers == NULL) {
@@ -2708,6 +2746,13 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
}
} else
err = -EBUSY;
+ } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
+ set_bit(ExternalBbl, &rdev->flags);
+ rdev->badblocks.shift = 0;
+ err = 0;
+ } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
+ clear_bit(ExternalBbl, &rdev->flags);
+ err = 0;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -3211,10 +3256,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
sector_t size;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- printk(KERN_ERR "md: could not alloc mem for new device!\n");
+ if (!rdev)
return ERR_PTR(-ENOMEM);
- }
err = md_rdev_init(rdev);
if (err)
@@ -3231,8 +3274,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
- printk(KERN_WARNING
- "md: %s has zero or unknown size, marking faulty!\n",
+ pr_warn("md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
goto abort_free;
@@ -3242,16 +3284,13 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
- printk(KERN_WARNING
- "md: %s does not have a valid v%d.%d "
- "superblock, not importing!\n",
+ pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
bdevname(rdev->bdev,b),
- super_format, super_minor);
+ super_format, super_minor);
goto abort_free;
}
if (err < 0) {
- printk(KERN_WARNING
- "md: could not read %s's sb, not importing!\n",
+ pr_warn("md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
}
@@ -3287,9 +3326,7 @@ static void analyze_sbs(struct mddev *mddev)
case 0:
break;
default:
- printk( KERN_ERR \
- "md: fatal superblock inconsistency in %s"
- " -- removing from array\n",
+ pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
}
@@ -3302,18 +3339,16 @@ static void analyze_sbs(struct mddev *mddev)
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
- printk(KERN_WARNING
- "md: %s: %s: only %d devices permitted\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mddev->max_disks);
+ pr_warn("md: %s: %s: only %d devices permitted\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mddev->max_disks);
md_kick_rdev_from_array(rdev);
continue;
}
if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
- printk(KERN_WARNING "md: kicking non-fresh %s"
- " from array!\n",
+ pr_warn("md: kicking non-fresh %s from array!\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
continue;
@@ -3384,7 +3419,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
unsigned long msec;
if (mddev_is_clustered(mddev)) {
- pr_info("md: Safemode is disabled for clustered mode\n");
+ pr_warn("md: Safemode is disabled for clustered mode\n");
return -EINVAL;
}
@@ -3472,8 +3507,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
rv = -EINVAL;
if (!mddev->pers->quiesce) {
- printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
- mdname(mddev), mddev->pers->name);
+ pr_warn("md: %s: %s does not support online personality change\n",
+ mdname(mddev), mddev->pers->name);
goto out_unlock;
}
@@ -3491,7 +3526,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
+ pr_warn("md: personality %s not loaded\n", clevel);
rv = -EINVAL;
goto out_unlock;
}
@@ -3505,8 +3540,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
}
if (!pers->takeover) {
module_put(pers->owner);
- printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
- mdname(mddev), clevel);
+ pr_warn("md: %s: %s does not support personality takeover\n",
+ mdname(mddev), clevel);
rv = -EINVAL;
goto out_unlock;
}
@@ -3526,8 +3561,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
module_put(pers->owner);
- printk(KERN_WARNING "md: %s: %s would not accept array\n",
- mdname(mddev), clevel);
+ pr_warn("md: %s: %s would not accept array\n",
+ mdname(mddev), clevel);
rv = PTR_ERR(priv);
goto out_unlock;
}
@@ -3570,9 +3605,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
- printk(KERN_WARNING
- "md: cannot register extra attributes for %s\n",
- mdname(mddev));
+ pr_warn("md: cannot register extra attributes for %s\n",
+ mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (oldpers->sync_request != NULL &&
@@ -3603,9 +3637,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
clear_bit(In_sync, &rdev->flags);
else {
if (sysfs_link_rdev(mddev, rdev))
- printk(KERN_WARNING "md: cannot register rd%d"
- " for %s after level change\n",
- rdev->raid_disk, mdname(mddev));
+ pr_warn("md: cannot register rd%d for %s after level change\n",
+ rdev->raid_disk, mdname(mddev));
}
}
@@ -3618,7 +3651,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
}
blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
@@ -3813,7 +3846,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
if (!err) {
mddev->recovery_cp = n;
if (mddev->pers)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
mddev_unlock(mddev);
return err ?: len;
@@ -3887,10 +3920,10 @@ array_state_show(struct mddev *mddev, char *page)
st = read_auto;
break;
case 0:
- if (mddev->in_sync)
- st = clean;
- else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
st = write_pending;
+ else if (mddev->in_sync)
+ st = clean;
else if (mddev->safemode)
st = active_idle;
else
@@ -3925,7 +3958,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
spin_lock(&mddev->lock);
if (st == active) {
restart_array(mddev);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
+ md_wakeup_thread(mddev->thread);
wake_up(&mddev->sb_wait);
err = 0;
} else /* st == clean */ {
@@ -3935,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
err = 0;
} else
@@ -4001,7 +4035,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
err = 0;
} else
@@ -4015,7 +4049,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
err = restart_array(mddev);
if (err)
break;
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -5071,13 +5105,13 @@ static int md_alloc(dev_t dev, char *name)
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
- printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
- disk->disk_name);
+ pr_debug("md: cannot register %s/md - name in use\n",
+ disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
- printk(KERN_DEBUG "pointless warning\n");
+ pr_debug("pointless warning\n");
mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
@@ -5179,15 +5213,15 @@ int md_run(struct mddev *mddev)
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
- printk("md: %s: data overlaps metadata\n",
- mdname(mddev));
+ pr_warn("md: %s: data overlaps metadata\n",
+ mdname(mddev));
return -EINVAL;
}
} else {
if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
- printk("md: %s: metadata overlaps data\n",
- mdname(mddev));
+ pr_warn("md: %s: metadata overlaps data\n",
+ mdname(mddev));
return -EINVAL;
}
}
@@ -5202,11 +5236,11 @@ int md_run(struct mddev *mddev)
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
if (mddev->level != LEVEL_NONE)
- printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
- mddev->level);
+ pr_warn("md: personality for level %d is not loaded!\n",
+ mddev->level);
else
- printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
- mddev->clevel);
+ pr_warn("md: personality for level %s is not loaded!\n",
+ mddev->clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
@@ -5236,21 +5270,16 @@ int md_run(struct mddev *mddev)
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
- printk(KERN_WARNING
- "%s: WARNING: %s appears to be"
- " on the same physical disk as"
- " %s.\n",
- mdname(mddev),
- bdevname(rdev->bdev,b),
- bdevname(rdev2->bdev,b2));
+ pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev,b),
+ bdevname(rdev2->bdev,b2));
warned = 1;
}
}
if (warned)
- printk(KERN_WARNING
- "True protection against single-disk"
- " failure might be compromised.\n");
+ pr_warn("True protection against single-disk failure might be compromised.\n");
}
mddev->recovery = 0;
@@ -5264,14 +5293,14 @@ int md_run(struct mddev *mddev)
err = pers->run(mddev);
if (err)
- printk(KERN_ERR "md: pers->run() failed ...\n");
+ pr_warn("md: pers->run() failed ...\n");
else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
- WARN_ONCE(!mddev->external_size, "%s: default size too small,"
- " but 'external_size' not in effect?\n", __func__);
- printk(KERN_ERR
- "md: invalid array_size %llu > default size %llu\n",
- (unsigned long long)mddev->array_sectors / 2,
- (unsigned long long)pers->size(mddev, 0, 0) / 2);
+ WARN_ONCE(!mddev->external_size,
+ "%s: default size too small, but 'external_size' not in effect?\n",
+ __func__);
+ pr_warn("md: invalid array_size %llu > default size %llu\n",
+ (unsigned long long)mddev->array_sectors / 2,
+ (unsigned long long)pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
}
if (err == 0 && pers->sync_request &&
@@ -5281,8 +5310,8 @@ int md_run(struct mddev *mddev)
bitmap = bitmap_create(mddev, -1);
if (IS_ERR(bitmap)) {
err = PTR_ERR(bitmap);
- printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
- mdname(mddev), err);
+ pr_warn("%s: failed to create bitmap (%d)\n",
+ mdname(mddev), err);
} else
mddev->bitmap = bitmap;
@@ -5318,9 +5347,8 @@ int md_run(struct mddev *mddev)
if (pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
- printk(KERN_WARNING
- "md: cannot register extra attributes for %s\n",
- mdname(mddev));
+ pr_warn("md: cannot register extra attributes for %s\n",
+ mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
@@ -5350,7 +5378,7 @@ int md_run(struct mddev *mddev)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->sb_flags)
md_update_sb(mddev, 0);
md_new_event(mddev);
@@ -5421,8 +5449,7 @@ static int restart_array(struct mddev *mddev)
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
- printk(KERN_INFO "md: %s switched to read-write mode.\n",
- mdname(mddev));
+ pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -5446,6 +5473,7 @@ static void md_clean(struct mddev *mddev)
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
mddev->flags = 0;
+ mddev->sb_flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
mddev->chunk_sectors = 0;
@@ -5490,12 +5518,15 @@ static void __md_stop_writes(struct mddev *mddev)
del_timer_sync(&mddev->safemode_timer);
+ if (mddev->pers && mddev->pers->quiesce) {
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ }
bitmap_flush(mddev);
- md_super_wait(mddev);
if (mddev->ro == 0 &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
- (mddev->flags & MD_UPDATE_SB_FLAGS))) {
+ mddev->sb_flags)) {
/* mark array as shutdown cleanly */
if (!mddev_is_clustered(mddev))
mddev->in_sync = 1;
@@ -5516,8 +5547,8 @@ static void mddev_detach(struct mddev *mddev)
struct bitmap *bitmap = mddev->bitmap;
/* wait for behind writes to complete */
if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
- mdname(mddev));
+ pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
+ mdname(mddev));
/* need to kick something here to make sure I/O goes? */
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
@@ -5578,20 +5609,20 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
- if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EBUSY;
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- printk("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5653,7 +5684,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
mddev->sysfs_active ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- printk("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -5690,7 +5721,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
* Free resources if final stop
*/
if (mode == 0) {
- printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
+ pr_info("md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
@@ -5722,17 +5753,17 @@ static void autorun_array(struct mddev *mddev)
if (list_empty(&mddev->disks))
return;
- printk(KERN_INFO "md: running: ");
+ pr_info("md: running: ");
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
- printk("<%s>", bdevname(rdev->bdev,b));
+ pr_cont("<%s>", bdevname(rdev->bdev,b));
}
- printk("\n");
+ pr_cont("\n");
err = do_md_run(mddev);
if (err) {
- printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
+ pr_warn("md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, NULL);
}
}
@@ -5755,7 +5786,7 @@ static void autorun_devices(int part)
struct mddev *mddev;
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md: autorun ...\n");
+ pr_info("md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
int unit;
dev_t dev;
@@ -5763,13 +5794,12 @@ static void autorun_devices(int part)
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
- printk(KERN_INFO "md: considering %s ...\n",
- bdevname(rdev0->bdev,b));
+ pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
- printk(KERN_INFO "md: adding %s ...\n",
- bdevname(rdev->bdev,b));
+ pr_debug("md: adding %s ...\n",
+ bdevname(rdev->bdev,b));
list_move(&rdev->same_set, &candidates);
}
/*
@@ -5786,8 +5816,8 @@ static void autorun_devices(int part)
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
- printk(KERN_INFO "md: unit number in %s is bad: %d\n",
- bdevname(rdev0->bdev, b), rdev0->preferred_minor);
+ pr_warn("md: unit number in %s is bad: %d\n",
+ bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
@@ -5796,21 +5826,17 @@ static void autorun_devices(int part)
if (!mddev || !mddev->gendisk) {
if (mddev)
mddev_put(mddev);
- printk(KERN_ERR
- "md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
- printk(KERN_WARNING "md: %s locked, cannot run\n",
- mdname(mddev));
+ pr_warn("md: %s locked, cannot run\n", mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: %s already running, cannot run %s\n",
+ pr_warn("md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
} else {
- printk(KERN_INFO "md: created %s\n", mdname(mddev));
+ pr_debug("md: created %s\n", mdname(mddev));
mddev->persistent = 1;
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
@@ -5829,7 +5855,7 @@ static void autorun_devices(int part)
}
mddev_put(mddev);
}
- printk(KERN_INFO "md: ... autorun DONE.\n");
+ pr_info("md: ... autorun DONE.\n");
}
#endif /* !MODULE */
@@ -5964,6 +5990,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
info.state |= (1<<MD_DISK_JOURNAL);
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
+ if (test_bit(FailFast, &rdev->flags))
+ info.state |= (1<<MD_DISK_FAILFAST);
} else {
info.major = info.minor = 0;
info.raid_disk = -1;
@@ -5985,8 +6013,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (mddev_is_clustered(mddev) &&
!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
- pr_err("%s: Cannot add to clustered mddev.\n",
- mdname(mddev));
+ pr_warn("%s: Cannot add to clustered mddev.\n",
+ mdname(mddev));
return -EINVAL;
}
@@ -5998,8 +6026,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: md_import_device returned %ld\n",
+ pr_warn("md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6010,8 +6037,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
- printk(KERN_WARNING
- "md: %s has different UUID to %s\n",
+ pr_warn("md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
@@ -6032,9 +6058,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
- printk(KERN_WARNING
- "%s: personality does not support diskops!\n",
- mdname(mddev));
+ pr_warn("%s: personality does not support diskops!\n",
+ mdname(mddev));
return -EINVAL;
}
if (mddev->persistent)
@@ -6043,8 +6068,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: md_import_device returned %ld\n",
+ pr_warn("md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6075,6 +6099,10 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
set_bit(WriteMostly, &rdev->flags);
else
clear_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
+ else
+ clear_bit(FailFast, &rdev->flags);
if (info->state & (1<<MD_DISK_JOURNAL)) {
struct md_rdev *rdev2;
@@ -6140,8 +6168,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
- printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
- mdname(mddev));
+ pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
return -EINVAL;
}
@@ -6149,8 +6176,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: error, md_import_device() returned %ld\n",
+ pr_warn("md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
@@ -6166,9 +6192,11 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
+ if (info->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
if (!mddev->persistent) {
- printk(KERN_INFO "md: nonpersistent superblock ...\n");
+ pr_debug("md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev);
@@ -6207,13 +6235,17 @@ kick_rdev:
md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev);
- md_update_sb(mddev, 1);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ if (mddev->thread)
+ md_wakeup_thread(mddev->thread);
+ else
+ md_update_sb(mddev, 1);
md_new_event(mddev);
return 0;
busy:
- printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
- bdevname(rdev->bdev,b), mdname(mddev));
+ pr_debug("md: cannot remove active disk %s from %s ...\n",
+ bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
}
@@ -6227,22 +6259,19 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
return -ENODEV;
if (mddev->major_version != 0) {
- printk(KERN_WARNING "%s: HOT_ADD may only be used with"
- " version-0 superblocks.\n",
+ pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
mdname(mddev));
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
- printk(KERN_WARNING
- "%s: personality does not support diskops!\n",
+ pr_warn("%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
- printk(KERN_WARNING
- "md: error, md_import_device() returned %ld\n",
+ pr_warn("md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
}
@@ -6255,8 +6284,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
- printk(KERN_WARNING
- "md: can not hot-add faulty %s disk to %s!\n",
+ pr_warn("md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
@@ -6276,7 +6304,9 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->raid_disk = -1;
- md_update_sb(mddev, 1);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ if (!mddev->thread)
+ md_update_sb(mddev, 1);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
@@ -6312,23 +6342,23 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
f = fget(fd);
if (f == NULL) {
- printk(KERN_ERR "%s: error: failed to get bitmap file\n",
- mdname(mddev));
+ pr_warn("%s: error: failed to get bitmap file\n",
+ mdname(mddev));
return -EBADF;
}
inode = f->f_mapping->host;
if (!S_ISREG(inode->i_mode)) {
- printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file must be a regular file\n",
+ mdname(mddev));
err = -EBADF;
} else if (!(f->f_mode & FMODE_WRITE)) {
- printk(KERN_ERR "%s: error: bitmap file must open for write\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file must open for write\n",
+ mdname(mddev));
err = -EBADF;
} else if (atomic_read(&inode->i_writecount) != 1) {
- printk(KERN_ERR "%s: error: bitmap file is already in use\n",
- mdname(mddev));
+ pr_warn("%s: error: bitmap file is already in use\n",
+ mdname(mddev));
err = -EBUSY;
}
if (err) {
@@ -6393,8 +6423,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
- printk(KERN_INFO
- "md: superblock version %d not known\n",
+ pr_warn("md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
}
@@ -6432,9 +6461,11 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->max_disks = MD_SB_DISKS;
- if (mddev->persistent)
+ if (mddev->persistent) {
mddev->flags = 0;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ mddev->sb_flags = 0;
+ }
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
@@ -6660,8 +6691,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
if (mddev->bitmap_info.nodes) {
/* hold PW on all the bitmap lock */
if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
- printk("md: can't change bitmap to none since the"
- " array is in use by more than one node\n");
+ pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
rv = -EPERM;
md_cluster_ops->unlock_all_bitmaps(mddev);
goto err;
@@ -6829,7 +6859,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/* need to ensure recovery thread has run */
wait_event_interruptible_timeout(mddev->sb_wait,
!test_bit(MD_RECOVERY_NEEDED,
- &mddev->flags),
+ &mddev->recovery),
msecs_to_jiffies(5000));
if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
/* Need to flush page cache, and ensure no-one else opens
@@ -6847,9 +6877,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
err = mddev_lock(mddev);
if (err) {
- printk(KERN_INFO
- "md: ioctl lock interrupted, reason %d, cmd %d\n",
- err, cmd);
+ pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
+ err, cmd);
goto out;
}
@@ -6864,30 +6893,24 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
- printk(KERN_WARNING "md: couldn't update"
- " array info. %d\n", err);
+ pr_warn("md: couldn't update array info. %d\n", err);
goto unlock;
}
goto unlock;
}
if (!list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: array %s already has disks!\n",
- mdname(mddev));
+ pr_warn("md: array %s already has disks!\n", mdname(mddev));
err = -EBUSY;
goto unlock;
}
if (mddev->raid_disks) {
- printk(KERN_WARNING
- "md: array %s already initialised!\n",
- mdname(mddev));
+ pr_warn("md: array %s already initialised!\n", mdname(mddev));
err = -EBUSY;
goto unlock;
}
err = set_array_info(mddev, &info);
if (err) {
- printk(KERN_WARNING "md: couldn't set"
- " array info. %d\n", err);
+ pr_warn("md: couldn't set array info. %d\n", err);
goto unlock;
}
goto unlock;
@@ -6987,11 +7010,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/* If a device failed while we were read-only, we
* need to make sure the metadata is updated now.
*/
- if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+ if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
mddev_unlock(mddev);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
}
} else {
@@ -7092,7 +7115,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
if (test_bit(MD_CLOSING, &mddev->flags)) {
mutex_unlock(&mddev->open_mutex);
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
err = 0;
@@ -7101,6 +7125,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
check_disk_change(bdev);
out:
+ if (err)
+ mddev_put(mddev);
return err;
}
@@ -7171,10 +7197,12 @@ static int md_thread(void *arg)
wait_event_interruptible_timeout
(thread->wqueue,
test_bit(THREAD_WAKEUP, &thread->flags)
- || kthread_should_stop(),
+ || kthread_should_stop() || kthread_should_park(),
thread->timeout);
clear_bit(THREAD_WAKEUP, &thread->flags);
+ if (kthread_should_park())
+ kthread_parkme();
if (!kthread_should_stop())
thread->run(thread);
}
@@ -7588,8 +7616,8 @@ static const struct file_operations md_seq_fops = {
int register_md_personality(struct md_personality *p)
{
- printk(KERN_INFO "md: %s personality registered for level %d\n",
- p->name, p->level);
+ pr_debug("md: %s personality registered for level %d\n",
+ p->name, p->level);
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
spin_unlock(&pers_lock);
@@ -7599,7 +7627,7 @@ EXPORT_SYMBOL(register_md_personality);
int unregister_md_personality(struct md_personality *p)
{
- printk(KERN_INFO "md: %s personality unregistered\n", p->name);
+ pr_debug("md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
list_del_init(&p->list);
spin_unlock(&pers_lock);
@@ -7639,7 +7667,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
spin_lock(&pers_lock);
/* ensure module won't be unloaded */
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
- pr_err("can't find md-cluster module or get it's reference.\n");
+ pr_warn("can't find md-cluster module or get it's reference.\n");
spin_unlock(&pers_lock);
return -ENOENT;
}
@@ -7741,8 +7769,8 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
@@ -7751,7 +7779,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
}
EXPORT_SYMBOL(md_write_start);
@@ -7772,7 +7800,7 @@ EXPORT_SYMBOL(md_write_end);
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
*
- * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
+ * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/
int md_allow_write(struct mddev *mddev)
@@ -7787,8 +7815,8 @@ int md_allow_write(struct mddev *mddev)
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
@@ -7798,7 +7826,7 @@ int md_allow_write(struct mddev *mddev)
} else
spin_unlock(&mddev->lock);
- if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EAGAIN;
else
return 0;
@@ -7914,11 +7942,9 @@ void md_do_sync(struct md_thread *thread)
mddev2->curr_resync >= mddev->curr_resync) {
if (mddev2_minor != mddev2->md_minor) {
mddev2_minor = mddev2->md_minor;
- printk(KERN_INFO "md: delaying %s of %s"
- " until %s has finished (they"
- " share one or more physical units)\n",
- desc, mdname(mddev),
- mdname(mddev2));
+ pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
+ desc, mdname(mddev),
+ mdname(mddev2));
}
mddev_put(mddev2);
if (signal_pending(current))
@@ -7975,12 +8001,10 @@ void md_do_sync(struct md_thread *thread)
}
}
- printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
- printk(KERN_INFO "md: minimum _guaranteed_ speed:"
- " %d KB/sec/disk.\n", speed_min(mddev));
- printk(KERN_INFO "md: using maximum available idle IO bandwidth "
- "(but not more than %d KB/sec) for %s.\n",
- speed_max(mddev), desc);
+ pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
+ pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
+ pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
+ speed_max(mddev), desc);
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
@@ -7997,16 +8021,15 @@ void md_do_sync(struct md_thread *thread)
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
- printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
- window/2, (unsigned long long)max_sectors/2);
+ pr_debug("md: using %dk window, over a total of %lluk.\n",
+ window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
if (j>2) {
- printk(KERN_INFO
- "md: resuming %s of %s from checkpoint.\n",
- desc, mdname(mddev));
+ pr_debug("md: resuming %s of %s from checkpoint.\n",
+ desc, mdname(mddev));
mddev->curr_resync = j;
} else
mddev->curr_resync = 3; /* no longer delayed */
@@ -8038,7 +8061,7 @@ void md_do_sync(struct md_thread *thread)
j > mddev->recovery_cp)
mddev->recovery_cp = j;
update_time = jiffies;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -8133,9 +8156,9 @@ void md_do_sync(struct md_thread *thread)
}
}
}
- printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
- test_bit(MD_RECOVERY_INTR, &mddev->recovery)
- ? "interrupted" : "done");
+ pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery)
+ ? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
@@ -8144,20 +8167,19 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
- printk(KERN_INFO
- "md: checkpointing %s of %s.\n",
- desc, mdname(mddev));
+ pr_debug("md: checkpointing %s of %s.\n",
+ desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
mddev->recovery_cp =
@@ -8187,8 +8209,8 @@ void md_do_sync(struct md_thread *thread)
/* set CHANGE_PENDING here since maybe another update is needed,
* so other nodes are informed. It should be harmless for normal
* raid */
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8288,12 +8310,12 @@ static int remove_and_add_spares(struct mddev *mddev,
if (!test_bit(Journal, &rdev->flags))
spares++;
md_new_event(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
no_add:
if (removed)
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
return spares;
}
@@ -8305,8 +8327,8 @@ static void md_start_sync(struct work_struct *ws)
mddev,
"resync");
if (!mddev->sync_thread) {
- printk(KERN_ERR "%s: could not start resync thread...\n",
- mdname(mddev));
+ pr_warn("%s: could not start resync thread...\n",
+ mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8356,8 +8378,8 @@ void md_check_recovery(struct mddev *mddev)
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
- printk(KERN_INFO "md: %s in immediate safe mode\n",
- mdname(mddev));
+ pr_debug("md: %s in immediate safe mode\n",
+ mdname(mddev));
mddev->safemode = 2;
}
flush_signals(current);
@@ -8366,7 +8388,7 @@ void md_check_recovery(struct mddev *mddev)
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
- (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
+ (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
test_bit(MD_RELOAD_SB, &mddev->flags) ||
@@ -8404,7 +8426,7 @@ void md_check_recovery(struct mddev *mddev)
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
goto unlock;
}
@@ -8432,7 +8454,7 @@ void md_check_recovery(struct mddev *mddev)
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
@@ -8441,7 +8463,7 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->sb_flags)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
@@ -8537,7 +8559,7 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev->pers->spare_active(mddev)) {
sysfs_notify(&mddev->kobj, NULL,
"degraded");
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
@@ -8552,7 +8574,7 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
- /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
+ /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
* call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
* clustered raid */
if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
@@ -8614,9 +8636,12 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
if (rv == 0) {
/* Make sure they get written out promptly */
+ if (test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify(&rdev->kobj, NULL,
+ "unacknowledged_bad_blocks");
sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(rdev->mddev->thread);
return 1;
} else
@@ -8627,12 +8652,15 @@ EXPORT_SYMBOL_GPL(rdev_set_badblocks);
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
+ int rv;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
- return badblocks_clear(&rdev->badblocks,
- s, sectors);
+ rv = badblocks_clear(&rdev->badblocks, s, sectors);
+ if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
+ return rv;
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
@@ -8749,7 +8777,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %s\n",
- bdevname(rdev2->bdev,b));
+ bdevname(rdev2->bdev,b));
/* wakeup mddev->thread here, so array could
* perform resync with the new activated disk */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -8785,15 +8813,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
* variable in case we err in the future
*/
rdev->sb_page = NULL;
- alloc_disk_sb(rdev);
- ClearPageUptodate(rdev->sb_page);
- rdev->sb_loaded = 0;
- err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version);
-
+ err = alloc_disk_sb(rdev);
+ if (err == 0) {
+ ClearPageUptodate(rdev->sb_page);
+ rdev->sb_loaded = 0;
+ err = super_types[mddev->major_version].
+ load_super(rdev, NULL, mddev->minor_version);
+ }
if (err < 0) {
pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
__func__, __LINE__, rdev->desc_nr, err);
- put_page(rdev->sb_page);
+ if (rdev->sb_page)
+ put_page(rdev->sb_page);
rdev->sb_page = swapout;
rdev->sb_loaded = 1;
return err;
@@ -8871,9 +8902,6 @@ void md_autodetect_dev(dev_t dev)
mutex_lock(&detected_devices_mutex);
list_add_tail(&node_detected_dev->list, &all_detected_devices);
mutex_unlock(&detected_devices_mutex);
- } else {
- printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
- ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
}
}
@@ -8887,7 +8915,7 @@ static void autostart_arrays(int part)
i_scanned = 0;
i_passed = 0;
- printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
+ pr_info("md: Autodetecting RAID arrays.\n");
mutex_lock(&detected_devices_mutex);
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
@@ -8912,8 +8940,7 @@ static void autostart_arrays(int part)
}
mutex_unlock(&detected_devices_mutex);
- printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
- i_scanned, i_passed);
+ pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
autorun_devices(part);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2b2041773e79..e38936d05df1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -30,6 +30,16 @@
#define MaxSector (~(sector_t)0)
/*
+ * These flags should really be called "NO_RETRY" rather than
+ * "FAILFAST" because they don't make any promise about time lapse,
+ * only about the number of retries, which will be zero.
+ * REQ_FAILFAST_DRIVER is not included because
+ * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
+ * seems to suggest that the errors it avoids retrying should usually
+ * be retried.
+ */
+#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+/*
* MD's 'extended' device
*/
struct md_rdev {
@@ -168,6 +178,19 @@ enum flag_bits {
* so it is safe to remove without
* another synchronize_rcu() call.
*/
+ ExternalBbl, /* External metadata provides bad
+ * block management for a disk
+ */
+ FailFast, /* Minimal retries should be attempted on
+ * this device, so use REQ_FAILFAST_DEV.
+ * Also don't try to repair failed reads.
+ * It is expects that no bad block log
+ * is present.
+ */
+ LastDev, /* Seems to be the last working dev as
+ * it didn't fail, so don't use FailFast
+ * any more for metadata
+ */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -189,6 +212,31 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
struct md_cluster_info;
+enum mddev_flags {
+ MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
+ MD_CLOSING, /* If set, we are closing the array, do not open
+ * it then */
+ MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
+ MD_HAS_JOURNAL, /* The raid array has journal feature set */
+ MD_RELOAD_SB, /* Reload the superblock because another node
+ * updated it.
+ */
+ MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
+ * already took resync lock, need to
+ * release the lock */
+ MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
+ * supported as calls to md_error() will
+ * never cause the array to become failed.
+ */
+};
+
+enum mddev_sb_flags {
+ MD_SB_CHANGE_DEVS, /* Some device status has changed */
+ MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
+ MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
+ MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
+};
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -196,21 +244,7 @@ struct mddev {
int md_minor;
struct list_head disks;
unsigned long flags;
-#define MD_CHANGE_DEVS 0 /* Some device status has changed */
-#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
-#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
-#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
-#define MD_CLOSING 4 /* If set, we are closing the array, do not open
- * it then */
-#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
-#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
-#define MD_RELOAD_SB 7 /* Reload the superblock because another node
- * updated it.
- */
-#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node
- * already took resync lock, need to
- * release the lock */
+ unsigned long sb_flags;
int suspended;
atomic_t active_io;
@@ -304,31 +338,6 @@ struct mddev {
int parallel_resync;
int ok_start_degraded;
- /* recovery/resync flags
- * NEEDED: we might need to start a resync/recover
- * RUNNING: a thread is running, or about to be started
- * SYNC: actually doing a resync, not a recovery
- * RECOVER: doing recovery, or need to try it.
- * INTR: resync needs to be aborted for some reason
- * DONE: thread is done and is waiting to be reaped
- * REQUEST: user-space has requested a sync (used with SYNC)
- * CHECK: user-space request for check-only, no repair
- * RESHAPE: A reshape is happening
- * ERROR: sync-action interrupted because io-error
- *
- * If neither SYNC or RESHAPE are set, then it is a recovery.
- */
-#define MD_RECOVERY_RUNNING 0
-#define MD_RECOVERY_SYNC 1
-#define MD_RECOVERY_RECOVER 2
-#define MD_RECOVERY_INTR 3
-#define MD_RECOVERY_DONE 4
-#define MD_RECOVERY_NEEDED 5
-#define MD_RECOVERY_REQUESTED 6
-#define MD_RECOVERY_CHECK 7
-#define MD_RECOVERY_RESHAPE 8
-#define MD_RECOVERY_FROZEN 9
-#define MD_RECOVERY_ERROR 10
unsigned long recovery;
/* If a RAID personality determines that recovery (of a particular
@@ -442,6 +451,23 @@ struct mddev {
unsigned int good_device_nr; /* good device num within cluster raid */
};
+enum recovery_flags {
+ /*
+ * If neither SYNC or RESHAPE are set, then it is a recovery.
+ */
+ MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
+ MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
+ MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
+ MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
+ MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
+ MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
+ MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
+ MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
+ MD_RECOVERY_RESHAPE, /* A reshape is happening */
+ MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
+ MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
+};
+
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
@@ -623,7 +649,7 @@ extern int mddev_congested(struct mddev *mddev, int bits);
extern void md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
-extern void md_super_wait(struct mddev *mddev);
+extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags,
bool metadata_op);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 673efbd6fc47..aa8c4e5c1ee2 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -52,7 +52,7 @@ static int multipath_map (struct mpconf *conf)
}
rcu_read_unlock();
- printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
+ pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n");
return (-1);
}
@@ -97,9 +97,9 @@ static void multipath_end_request(struct bio *bio)
*/
char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
- printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_info("multipath: %s: rescheduling sector %llu\n",
+ bdevname(rdev->bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
multipath_end_bh_io(mp_bh, bio->bi_error);
@@ -130,7 +130,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- bio_init(&mp_bh->bio);
+ bio_init(&mp_bh->bio, NULL, 0);
__bio_clone_fast(&mp_bh->bio, bio);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
@@ -194,8 +194,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
* first check if this is a queued request for a device
* which has just failed.
*/
- printk(KERN_ALERT
- "multipath: only one IO path left and IO error.\n");
+ pr_warn("multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */
return;
}
@@ -209,11 +208,9 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irqrestore(&conf->device_lock, flags);
}
set_bit(Faulty, &rdev->flags);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "multipath: IO failure on %s,"
- " disabling IO path.\n"
- "multipath: Operation continuing"
- " on %d IO paths.\n",
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ pr_err("multipath: IO failure on %s, disabling IO path.\n"
+ "multipath: Operation continuing on %d IO paths.\n",
bdevname(rdev->bdev, b),
conf->raid_disks - mddev->degraded);
}
@@ -223,21 +220,21 @@ static void print_multipath_conf (struct mpconf *conf)
int i;
struct multipath_info *tmp;
- printk("MULTIPATH conf printout:\n");
+ pr_debug("MULTIPATH conf printout:\n");
if (!conf) {
- printk("(conf==NULL)\n");
+ pr_debug("(conf==NULL)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->multipaths + i;
if (tmp->rdev)
- printk(" disk%d, o:%d, dev:%s\n",
- i,!test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ pr_debug(" disk%d, o:%d, dev:%s\n",
+ i,!test_bit(Faulty, &tmp->rdev->flags),
+ bdevname(tmp->rdev->bdev,b));
}
}
@@ -292,8 +289,7 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
- printk(KERN_ERR "hot-remove-disk, slot %d is identified"
- " but is still operational!\n", number);
+ pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number);
err = -EBUSY;
goto abort;
}
@@ -346,16 +342,14 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
- printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
- " error for block %llu\n",
- bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
+ bdevname(bio->bi_bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO);
} else {
- printk(KERN_ERR "multipath: %s: redirecting sector %llu"
- " to another IO path\n",
- bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
+ bdevname(bio->bi_bdev,b),
+ (unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset;
@@ -389,8 +383,8 @@ static int multipath_run (struct mddev *mddev)
return -EINVAL;
if (mddev->level != LEVEL_MULTIPATH) {
- printk("multipath: %s: raid level not set to multipath IO (%d)\n",
- mdname(mddev), mddev->level);
+ pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n",
+ mdname(mddev), mddev->level);
goto out;
}
/*
@@ -401,21 +395,13 @@ static int multipath_run (struct mddev *mddev)
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
mddev->private = conf;
- if (!conf) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf)
goto out;
- }
conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
- if (!conf->multipaths) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf->multipaths)
goto out_free_conf;
- }
working_disks = 0;
rdev_for_each(rdev, mddev) {
@@ -439,7 +425,7 @@ static int multipath_run (struct mddev *mddev)
INIT_LIST_HEAD(&conf->retry_list);
if (!working_disks) {
- printk(KERN_ERR "multipath: no operational IO paths for %s\n",
+ pr_warn("multipath: no operational IO paths for %s\n",
mdname(mddev));
goto out_free_conf;
}
@@ -447,27 +433,17 @@ static int multipath_run (struct mddev *mddev)
conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh));
- if (conf->pool == NULL) {
- printk(KERN_ERR
- "multipath: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (conf->pool == NULL)
goto out_free_conf;
- }
- {
- mddev->thread = md_register_thread(multipathd, mddev,
- "multipath");
- if (!mddev->thread) {
- printk(KERN_ERR "multipath: couldn't allocate thread"
- " for %s\n", mdname(mddev));
- goto out_free_conf;
- }
- }
+ mddev->thread = md_register_thread(multipathd, mddev,
+ "multipath");
+ if (!mddev->thread)
+ goto out_free_conf;
- printk(KERN_INFO
- "multipath: array %s active with %d out of %d IO paths\n",
+ pr_info("multipath: array %s active with %d out of %d IO paths\n",
mdname(mddev), conf->raid_disks - mddev->degraded,
- mddev->raid_disks);
+ mddev->raid_disks);
/*
* Ok, everything is just fine now
*/
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index e83047cbb2da..7938cd21fa4c 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -700,13 +700,11 @@ static int populate_ablock_with_values(struct dm_array_info *info, struct array_
{
int r;
unsigned i;
- uint32_t nr_entries;
struct dm_btree_value_type *vt = &info->value_type;
BUG_ON(le32_to_cpu(ab->nr_entries));
BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
- nr_entries = le32_to_cpu(ab->nr_entries);
for (i = 0; i < new_nr; i++) {
r = fn(base + i, element_at(info, ab, i), context);
if (r)
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 1e33dd51c21f..a6dde7cab458 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -18,6 +18,8 @@
/*----------------------------------------------------------------*/
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+
/*
* This is a read/write semaphore with a couple of differences.
*
@@ -302,6 +304,18 @@ static void report_recursive_bug(dm_block_t b, int r)
(unsigned long long) b);
}
+#else /* !CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
+#define bl_init(x) do { } while (0)
+#define bl_down_read(x) 0
+#define bl_down_read_nonblock(x) 0
+#define bl_up_read(x) do { } while (0)
+#define bl_down_write(x) 0
+#define bl_up_write(x) do { } while (0)
+#define report_recursive_bug(x, y) do { } while (0)
+
+#endif /* CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
/*----------------------------------------------------------------*/
/*
@@ -330,8 +344,11 @@ EXPORT_SYMBOL_GPL(dm_block_data);
struct buffer_aux {
struct dm_block_validator *validator;
- struct block_lock lock;
int write_locked;
+
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+ struct block_lock lock;
+#endif
};
static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 306d2e4502c4..4c28608a0c94 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -464,7 +464,8 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
ll->nr_allocated--;
le32_add_cpu(&ie_disk.nr_free, 1);
ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
- }
+ } else
+ *ev = SM_NONE;
return ll->save_ie(ll, index, &ie_disk);
}
@@ -547,7 +548,6 @@ static int metadata_ll_init_index(struct ll_disk *ll)
if (r < 0)
return r;
- memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
ll->bitmap_root = dm_block_location(b);
dm_tm_unlock(ll->tm, b);
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 7e44005595c1..20557e2c60c6 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -775,17 +775,15 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
r = sm_ll_new_metadata(&smm->ll, tm);
+ if (!r) {
+ if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+ nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
+ r = sm_ll_extend(&smm->ll, nr_blocks);
+ }
+ memcpy(&smm->sm, &ops, sizeof(smm->sm));
if (r)
return r;
- if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
- nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
- r = sm_ll_extend(&smm->ll, nr_blocks);
- if (r)
- return r;
-
- memcpy(&smm->sm, &ops, sizeof(smm->sm));
-
/*
* Now we need to update the newly created data structures with the
* allocated blocks that they were built from.
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 258986a2699d..a162fedeb51a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid0.h"
#include "raid5.h"
@@ -51,20 +52,21 @@ static void dump_zones(struct mddev *mddev)
char b[BDEVNAME_SIZE];
struct r0conf *conf = mddev->private;
int raid_disks = conf->strip_zone[0].nb_dev;
- printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
- mdname(mddev),
- conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
+ pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
+ mdname(mddev),
+ conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
for (j = 0; j < conf->nr_strip_zones; j++) {
- printk(KERN_INFO "md: zone%d=[", j);
+ char line[200];
+ int len = 0;
+
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- printk(KERN_CONT "%s%s", k?"/":"",
- bdevname(conf->devlist[j*raid_disks
- + k]->bdev, b));
- printk(KERN_CONT "]\n");
+ len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
+ bdevname(conf->devlist[j*raid_disks
+ + k]->bdev, b));
+ pr_debug("md: zone%d=[%s]\n", j, line);
zone_size = conf->strip_zone[j].zone_end - zone_start;
- printk(KERN_INFO " zone-offset=%10lluKB, "
- "device-offset=%10lluKB, size=%10lluKB\n",
+ pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
(unsigned long long)zone_start>>1,
(unsigned long long)conf->strip_zone[j].dev_start>>1,
(unsigned long long)zone_size>>1);
@@ -142,9 +144,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
* chunk size is a multiple of that sector size
*/
if ((mddev->chunk_sectors << 9) % blksize) {
- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
- mdname(mddev),
- mddev->chunk_sectors << 9, blksize);
+ pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
+ mdname(mddev),
+ mddev->chunk_sectors << 9, blksize);
err = -EINVAL;
goto abort;
}
@@ -186,19 +188,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
if (j < 0) {
- printk(KERN_ERR
- "md/raid0:%s: remove inactive devices before converting to RAID0\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
+ mdname(mddev));
goto abort;
}
if (j >= mddev->raid_disks) {
- printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
- "aborting!\n", mdname(mddev), j);
+ pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
+ mdname(mddev), j);
goto abort;
}
if (dev[j]) {
- printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
- "aborting!\n", mdname(mddev), j);
+ pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
+ mdname(mddev), j);
goto abort;
}
dev[j] = rdev1;
@@ -208,8 +209,8 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
cnt++;
}
if (cnt != mddev->raid_disks) {
- printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
- "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
+ pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
+ mdname(mddev), cnt, mddev->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
@@ -357,8 +358,7 @@ static int raid0_run(struct mddev *mddev)
int ret;
if (mddev->chunk_sectors == 0) {
- printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
return -EINVAL;
}
if (md_check_no_bitmap(mddev))
@@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev)
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
- printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
- mdname(mddev),
- (unsigned long long)mddev->array_sectors);
+ pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
+ mdname(mddev),
+ (unsigned long long)mddev->array_sectors);
if (mddev->queue) {
/* calculate the max read-ahead size.
@@ -464,7 +464,8 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
}
do {
- sector_t sector = bio->bi_iter.bi_sector;
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+ sector_t sector = bio_sector;
unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects -
@@ -473,7 +474,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
: sector_div(sector, chunk_sects));
/* Restore due to sector_div */
- sector = bio->bi_iter.bi_sector;
+ sector = bio_sector;
if (sectors < bio_sectors(bio)) {
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
@@ -492,8 +493,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
- } else
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+ split, disk_devt(mddev->gendisk),
+ bio_sector);
generic_make_request(split);
+ }
} while (split != bio);
}
@@ -509,17 +515,17 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
struct r0conf *priv_conf;
if (mddev->degraded != 1) {
- printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
- mdname(mddev),
- mddev->degraded);
+ pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+ mdname(mddev),
+ mddev->degraded);
return ERR_PTR(-EINVAL);
}
rdev_for_each(rdev, mddev) {
/* check slot number for a disk */
if (rdev->raid_disk == mddev->raid_disks-1) {
- printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
rdev->sectors = mddev->dev_sectors;
@@ -533,8 +539,11 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+ clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
+
return priv_conf;
}
@@ -549,19 +558,19 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
* - all mirrors must be already degraded
*/
if (mddev->layout != ((1 << 8) + 2)) {
- printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
- mdname(mddev),
- mddev->layout);
+ pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
+ mdname(mddev),
+ mddev->layout);
return ERR_PTR(-EINVAL);
}
if (mddev->raid_disks & 1) {
- printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
if (mddev->degraded != (mddev->raid_disks>>1)) {
- printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
- mdname(mddev));
+ pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -574,6 +583,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -588,7 +598,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
* - (N - 1) mirror drives must be already faulty
*/
if ((mddev->raid_disks - 1) != mddev->degraded) {
- printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+ pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -616,6 +626,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -631,8 +642,8 @@ static void *raid0_takeover(struct mddev *mddev)
*/
if (mddev->bitmap) {
- printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n",
- mdname(mddev));
+ pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
+ mdname(mddev));
return ERR_PTR(-EBUSY);
}
if (mddev->level == 4)
@@ -642,8 +653,8 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->layout == ALGORITHM_PARITY_N)
return raid0_takeover_raid45(mddev);
- printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
- mdname(mddev), ALGORITHM_PARITY_N);
+ pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+ mdname(mddev), ALGORITHM_PARITY_N);
}
if (mddev->level == 10)
@@ -652,7 +663,7 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->level == 1)
return raid0_takeover_raid1(mddev);
- printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+ pr_warn("Takeover from raid%i to raid0 not supported\n",
mddev->level);
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1961d827dbd1..a1f3fbed9100 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -37,6 +37,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid1.h"
#include "bitmap.h"
@@ -70,6 +71,9 @@ static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
sector_t bi_sector);
static void lower_barrier(struct r1conf *conf);
+#define raid1_log(md, fmt, args...) \
+ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
+
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
@@ -325,6 +329,11 @@ static void raid1_end_read_request(struct bio *bio)
if (uptodate)
set_bit(R1BIO_Uptodate, &r1_bio->state);
+ else if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ /* This was a fail-fast read so we definitely
+ * want to retry */
+ ;
else {
/* If all other devices have failed, we want to return
* the error upwards rather than fail the last device.
@@ -347,13 +356,10 @@ static void raid1_end_read_request(struct bio *bio)
* oops, read error:
*/
char b[BDEVNAME_SIZE];
- printk_ratelimited(
- KERN_ERR "md/raid1:%s: %s: "
- "rescheduling sector %llu\n",
- mdname(conf->mddev),
- bdevname(rdev->bdev,
- b),
- (unsigned long long)r1_bio->sector);
+ pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
/* don't drop the reference on read_disk yet */
@@ -403,17 +409,37 @@ static void raid1_end_write_request(struct bio *bio)
struct bio *to_put = NULL;
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
/*
* 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
conf->mddev->recovery);
- set_bit(R1BIO_WriteError, &r1_bio->state);
+ if (test_bit(FailFast, &rdev->flags) &&
+ (bio->bi_opf & MD_FAILFAST) &&
+ /* We never try FailFast to WriteMostly devices */
+ !test_bit(WriteMostly, &rdev->flags)) {
+ md_error(r1_bio->mddev, rdev);
+ if (!test_bit(Faulty, &rdev->flags))
+ /* This is the only remaining device,
+ * We need to retry the write without
+ * FailFast
+ */
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ else {
+ /* Finished with this branch */
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
+ }
+ } else
+ set_bit(R1BIO_WriteError, &r1_bio->state);
} else {
/*
* Set R1BIO_Uptodate in our master bio, so that we
@@ -444,7 +470,7 @@ static void raid1_end_write_request(struct bio *bio)
/* Maybe we can clear some bad blocks. */
if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
@@ -531,6 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
best_good_sectors = 0;
has_nonrot_disk = 0;
choose_next_idle = 0;
+ clear_bit(R1BIO_FailFast, &r1_bio->state);
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) &&
@@ -604,6 +631,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
} else
best_good_sectors = sectors;
+ if (best_disk >= 0)
+ /* At least two disks to choose from so failfast is OK */
+ set_bit(R1BIO_FailFast, &r1_bio->state);
+
nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
@@ -642,11 +673,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
}
break;
}
- /* If device is idle, use it */
- if (pending == 0) {
- best_disk = disk;
- break;
- }
if (choose_next_idle)
continue;
@@ -669,7 +695,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
* mixed ratation/non-rotational disks depending on workload.
*/
if (best_disk == -1) {
- if (has_nonrot_disk)
+ if (has_nonrot_disk || min_pending == 0)
best_disk = best_pending_disk;
else
best_disk = best_dist_disk;
@@ -742,9 +768,14 @@ static void flush_pending_writes(struct r1conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -829,7 +860,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
else if (conf->barrier && bio_data_dir(bio) == WRITE) {
if ((conf->mddev->curr_resync_completed
>= bio_end_sector(bio)) ||
- (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ (conf->start_next_window + NEXT_NORMALIO_DISTANCE
<= bio->bi_iter.bi_sector))
wait = false;
else
@@ -855,6 +886,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
* that queue to allow conf->start_next_window
* to increase.
*/
+ raid1_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
(!conf->barrier ||
@@ -934,6 +966,7 @@ static void freeze_array(struct r1conf *conf, int extra)
*/
spin_lock_irq(&conf->resync_lock);
conf->array_frozen = 1;
+ raid1_log(conf->mddev, "wait freeze");
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
@@ -1016,9 +1049,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -1133,6 +1171,7 @@ read_again:
* take care not to over-take any writes
* that are 'behind'
*/
+ raid1_log(mddev, "wait behind writes");
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
}
@@ -1150,8 +1189,16 @@ read_again:
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &mirror->rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r1_bio;
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+
if (max_sectors < r1_bio->sectors) {
/* could not read all from this device, so we will
* need another r1_bio.
@@ -1192,6 +1239,7 @@ read_again:
*/
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
+ raid1_log(mddev, "wait queued");
wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests);
}
@@ -1283,6 +1331,7 @@ read_again:
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
+ raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
start_next_window = wait_barrier(conf, bio);
/*
@@ -1360,10 +1409,21 @@ read_again:
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
+ !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
+ conf->raid_disks - mddev->degraded > 1)
+ mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
+
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
plug = container_of(cb, struct raid1_plug_cb, cb);
@@ -1433,6 +1493,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* next level up know.
* else mark the drive as failed
*/
+ spin_lock_irqsave(&conf->device_lock, flags);
if (test_bit(In_sync, &rdev->flags)
&& (conf->raid_disks - mddev->degraded) == 1) {
/*
@@ -1442,10 +1503,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* it is very likely to fail.
*/
conf->recovery_disabled = mddev->recovery_disabled;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
return;
}
set_bit(Blocked, &rdev->flags);
- spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
@@ -1456,36 +1517,35 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* if recovery is running, make sure it aborts.
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- printk(KERN_ALERT
- "md/raid1:%s: Disk failure on %s, disabling device.\n"
- "md/raid1:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->raid_disks - mddev->degraded);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+ pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
+ "md/raid1:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(struct r1conf *conf)
{
int i;
- printk(KERN_DEBUG "RAID1 conf printout:\n");
+ pr_debug("RAID1 conf printout:\n");
if (!conf) {
- printk(KERN_DEBUG "(!conf)\n");
+ pr_debug("(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ conf->raid_disks);
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &rdev->flags),
- !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
rcu_read_unlock();
}
@@ -1785,12 +1845,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
sector_t sect = r1_bio->sector;
int sectors = r1_bio->sectors;
int idx = 0;
+ struct md_rdev *rdev;
+
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (test_bit(FailFast, &rdev->flags)) {
+ /* Don't try recovering from here - just fail it
+ * ... unless it is the last working device of course */
+ md_error(mddev, rdev);
+ if (test_bit(Faulty, &rdev->flags))
+ /* Don't try to read from here, but make sure
+ * put_buf does it's thing
+ */
+ bio->bi_end_io = end_sync_write;
+ }
while(sectors) {
int s = sectors;
int d = r1_bio->read_disk;
int success = 0;
- struct md_rdev *rdev;
int start;
if (s > (PAGE_SIZE>>9))
@@ -1822,11 +1894,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
* work just disable and interrupt the recovery.
* Don't fail devices as that won't really help.
*/
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
- " for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev, b),
- (unsigned long long)r1_bio->sector);
+ pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev),
+ bdevname(bio->bi_bdev, b),
+ (unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags))
@@ -2010,6 +2081,9 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
continue;
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+ wbio->bi_opf |= MD_FAILFAST;
+
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@@ -2119,13 +2193,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
- printk(KERN_INFO
- "md/raid1:%s: read error corrected "
- "(%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(sect +
- rdev->data_offset),
- bdevname(rdev->bdev, b));
+ pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(sect +
+ rdev->data_offset),
+ bdevname(rdev->bdev, b));
}
rdev_dec_pending(rdev, mddev);
} else
@@ -2284,6 +2356,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct bio *bio;
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
+ dev_t bio_dev;
+ sector_t bio_sector;
clear_bit(R1BIO_ReadError, &r1_bio->state);
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -2294,48 +2368,53 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
* This is all done synchronously while the array is
* frozen
*/
- if (mddev->ro == 0) {
+
+ bio = r1_bio->bios[r1_bio->read_disk];
+ bdevname(bio->bi_bdev, b);
+ bio_dev = bio->bi_bdev->bd_dev;
+ bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
+ bio_put(bio);
+ r1_bio->bios[r1_bio->read_disk] = NULL;
+
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (mddev->ro == 0
+ && !test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
unfreeze_array(conf);
- } else
- md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
- rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
+ } else {
+ r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
+ }
+
+ rdev_dec_pending(rdev, conf->mddev);
- bio = r1_bio->bios[r1_bio->read_disk];
- bdevname(bio->bi_bdev, b);
read_more:
disk = read_balance(conf, r1_bio, &max_sectors);
if (disk == -1) {
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev), b, (unsigned long long)r1_bio->sector);
+ pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), b, (unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
const unsigned long do_sync
= r1_bio->master_bio->bi_opf & REQ_SYNC;
- if (bio) {
- r1_bio->bios[r1_bio->read_disk] =
- mddev->ro ? IO_BLOCKED : NULL;
- bio_put(bio);
- }
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
- printk_ratelimited(KERN_ERR
- "md/raid1:%s: redirecting sector %llu"
- " to other mirror: %s\n",
- mdname(mddev),
- (unsigned long long)r1_bio->sector,
- bdevname(rdev->bdev, b));
+ pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev, b));
bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r1_bio;
if (max_sectors < r1_bio->sectors) {
/* Drat - have to split this up more */
@@ -2349,6 +2428,8 @@ read_more:
else
mbio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev, bio_sector);
generic_make_request(bio);
bio = NULL;
@@ -2363,8 +2444,11 @@ read_more:
sectors_handled;
goto read_more;
- } else
+ } else {
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev, bio_sector);
generic_make_request(bio);
+ }
}
}
@@ -2380,10 +2464,10 @@ static void raid1d(struct md_thread *thread)
md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--;
@@ -2437,7 +2521,7 @@ static void raid1d(struct md_thread *thread)
generic_make_request(r1_bio->bios[r1_bio->read_disk]);
cond_resched();
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev);
}
blk_finish_plug(&plug);
@@ -2619,6 +2703,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio;
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
}
}
rcu_read_unlock();
@@ -2638,7 +2724,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
min_bad, 0
) && ok;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
*skipped = 1;
put_buf(r1_bio);
@@ -2749,6 +2835,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) {
read_targets--;
md_sync_acct(bio->bi_bdev, nr_sectors);
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
}
}
@@ -2756,6 +2844,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
md_sync_acct(bio->bi_bdev, nr_sectors);
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
}
@@ -2871,12 +2961,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -ENOMEM;
conf->thread = md_register_thread(raid1d, mddev, "raid1");
- if (!conf->thread) {
- printk(KERN_ERR
- "md/raid1:%s: couldn't allocate thread\n",
- mdname(mddev));
+ if (!conf->thread)
goto abort;
- }
return conf;
@@ -2901,13 +2987,13 @@ static int raid1_run(struct mddev *mddev)
bool discard_supported = false;
if (mddev->level != 1) {
- printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
- mdname(mddev), mddev->level);
+ pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
+ mdname(mddev), mddev->level);
return -EIO;
}
if (mddev->reshape_position != MaxSector) {
- printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
- mdname(mddev));
+ pr_warn("md/raid1:%s: reshape_position set but not supported\n",
+ mdname(mddev));
return -EIO;
}
/*
@@ -2946,11 +3032,9 @@ static int raid1_run(struct mddev *mddev)
mddev->recovery_cp = MaxSector;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid1:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
- printk(KERN_INFO
- "md/raid1:%s: active with %d out of %d mirrors\n",
+ pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
@@ -2960,6 +3044,7 @@ static int raid1_run(struct mddev *mddev)
mddev->thread = conf->thread;
conf->thread = NULL;
mddev->private = conf;
+ set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
@@ -3103,9 +3188,8 @@ static int raid1_reshape(struct mddev *mddev)
rdev->raid_disk = d2;
sysfs_unlink_rdev(mddev, rdev);
if (sysfs_link_rdev(mddev, rdev))
- printk(KERN_WARNING
- "md/raid1:%s: cannot register rd%d\n",
- mdname(mddev), rdev->raid_disk);
+ pr_warn("md/raid1:%s: cannot register rd%d\n",
+ mdname(mddev), rdev->raid_disk);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
@@ -3159,9 +3243,12 @@ static void *raid1_takeover(struct mddev *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
conf = setup_conf(mddev);
- if (!IS_ERR(conf))
+ if (!IS_ERR(conf)) {
/* Array must appear to be quiesced */
conf->array_frozen = 1;
+ clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+ clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ }
return conf;
}
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 61c39b390cd8..c52ef424a24b 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -161,14 +161,15 @@ struct r1bio {
};
/* bits for r1bio.state */
-#define R1BIO_Uptodate 0
-#define R1BIO_IsSync 1
-#define R1BIO_Degraded 2
-#define R1BIO_BehindIO 3
+enum r1bio_state {
+ R1BIO_Uptodate,
+ R1BIO_IsSync,
+ R1BIO_Degraded,
+ R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them.
*/
-#define R1BIO_ReadError 4
+ R1BIO_ReadError,
/* For write-behind requests, we call bi_end_io when
* the last non-write-behind device completes, providing
* any write was successful. Otherwise we call when
@@ -176,10 +177,12 @@ struct r1bio {
* with failure when last write completes (and all failed).
* Record that bi_end_io was called with this flag...
*/
-#define R1BIO_Returned 6
+ R1BIO_Returned,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag
*/
-#define R1BIO_MadeGood 7
-#define R1BIO_WriteError 8
+ R1BIO_MadeGood,
+ R1BIO_WriteError,
+ R1BIO_FailFast,
+};
#endif
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index be1a9fca3b2d..ab5e86209322 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -25,6 +25,7 @@
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
+#include <trace/events/block.h>
#include "md.h"
#include "raid10.h"
#include "raid0.h"
@@ -99,12 +100,16 @@ static int max_queued_requests = 1024;
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
static int _enough(struct r10conf *conf, int previous, int ignore);
+static int enough(struct r10conf *conf, int ignore);
static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
int *skipped);
static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
static void end_reshape_write(struct bio *bio);
static void end_reshape(struct r10conf *conf);
+#define raid10_log(md, fmt, args...) \
+ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
+
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct r10conf *conf = data;
@@ -404,8 +409,7 @@ static void raid10_end_read_request(struct bio *bio)
* oops, read error - keep the refcount on the rdev
*/
char b[BDEVNAME_SIZE];
- printk_ratelimited(KERN_ERR
- "md/raid10:%s: %s: rescheduling sector %llu\n",
+ pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
mdname(conf->mddev),
bdevname(rdev->bdev, b),
(unsigned long long)r10_bio->sector);
@@ -447,6 +451,10 @@ static void raid10_end_write_request(struct bio *bio)
struct r10conf *conf = r10_bio->mddev->private;
int slot, repl;
struct md_rdev *rdev = NULL;
+ struct bio *to_put = NULL;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
@@ -460,7 +468,7 @@ static void raid10_end_write_request(struct bio *bio)
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
if (repl)
/* Never record new bad blocks to replacement,
* just fail it.
@@ -471,8 +479,24 @@ static void raid10_end_write_request(struct bio *bio)
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
&rdev->mddev->recovery);
- set_bit(R10BIO_WriteError, &r10_bio->state);
+
dec_rdev = 0;
+ if (test_bit(FailFast, &rdev->flags) &&
+ (bio->bi_opf & MD_FAILFAST)) {
+ md_error(rdev->mddev, rdev);
+ if (!test_bit(Faulty, &rdev->flags))
+ /* This is the only remaining device,
+ * We need to retry the write without
+ * FailFast
+ */
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ else {
+ r10_bio->devs[slot].bio = NULL;
+ to_put = bio;
+ dec_rdev = 1;
+ }
+ } else
+ set_bit(R10BIO_WriteError, &r10_bio->state);
}
} else {
/*
@@ -503,7 +527,7 @@ static void raid10_end_write_request(struct bio *bio)
if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
bio_put(bio);
if (repl)
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
@@ -522,6 +546,8 @@ static void raid10_end_write_request(struct bio *bio)
one_write_done(r10_bio);
if (dec_rdev)
rdev_dec_pending(rdev, conf->mddev);
+ if (to_put)
+ bio_put(to_put);
}
/*
@@ -713,6 +739,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
best_dist = MaxSector;
best_good_sectors = 0;
do_balance = 1;
+ clear_bit(R10BIO_FailFast, &r10_bio->state);
/*
* Check if we can balance. We can balance on the whole
* device if no resync is going on (recovery is ok), or below
@@ -777,15 +804,18 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance)
break;
+ if (best_slot >= 0)
+ /* At least 2 disks to choose from so failfast is OK */
+ set_bit(R10BIO_FailFast, &r10_bio->state);
/* This optimisation is debatable, and completely destroys
* sequential read speed for 'far copies' arrays. So only
* keep it for 'near' arrays, and review those later.
*/
if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
- break;
+ new_distance = 0;
/* for far > 1 always use the lowest address */
- if (geo->far_copies > 1)
+ else if (geo->far_copies > 1)
new_distance = r10_bio->devs[slot].addr;
else
new_distance = abs(r10_bio->devs[slot].addr -
@@ -856,9 +886,14 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -934,6 +969,7 @@ static void wait_barrier(struct r10conf *conf)
* that queue to get the nr_pending
* count down.
*/
+ raid10_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier,
!conf->barrier ||
(atomic_read(&conf->nr_pending) &&
@@ -1034,9 +1070,14 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ bio->bi_bdev = rdev->bdev;
+ if (test_bit(Faulty, &rdev->flags)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
else
@@ -1080,6 +1121,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
/* IO spans the reshape position. Need to wait for
* reshape to pass
*/
+ raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
@@ -1096,11 +1138,12 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress;
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(mddev->thread);
+ raid10_log(conf->mddev, "wait reshape metadata");
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
conf->reshape_safe = mddev->reshape_position;
}
@@ -1151,8 +1194,15 @@ read_again:
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r10_bio;
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r10_bio->sector);
if (max_sectors < r10_bio->sectors) {
/* Could not read all from this device, so we will
* need another r10_bio.
@@ -1192,6 +1242,7 @@ read_again:
*/
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
+ raid10_log(mddev, "wait queued");
wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests);
}
@@ -1319,6 +1370,7 @@ retry_write:
}
}
allow_barrier(conf);
+ raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf);
goto retry_write;
@@ -1358,8 +1410,18 @@ retry_write:
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags) &&
+ enough(conf, d))
+ mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r10_bio;
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(conf->mddev->gendisk),
+ r10_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)rdev;
+
atomic_inc(&r10_bio->remaining);
cb = blk_check_plugged(raid10_unplug, mddev,
@@ -1402,6 +1464,13 @@ retry_write:
bio_set_op_attrs(mbio, op, do_sync | do_fua);
mbio->bi_private = r10_bio;
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ mbio, disk_devt(conf->mddev->gendisk),
+ r10_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void*)rdev;
+
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
@@ -1583,14 +1652,13 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
spin_unlock_irqrestore(&conf->device_lock, flags);
- printk(KERN_ALERT
- "md/raid10:%s: Disk failure on %s, disabling device.\n"
- "md/raid10:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->geo.raid_disks - mddev->degraded);
+ pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
+ "md/raid10:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded);
}
static void print_conf(struct r10conf *conf)
@@ -1598,13 +1666,13 @@ static void print_conf(struct r10conf *conf)
int i;
struct md_rdev *rdev;
- printk(KERN_DEBUG "RAID10 conf printout:\n");
+ pr_debug("RAID10 conf printout:\n");
if (!conf) {
- printk(KERN_DEBUG "(!conf)\n");
+ pr_debug("(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
- conf->geo.raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
+ conf->geo.raid_disks);
/* This is only called with ->reconfix_mutex held, so
* rcu protection of rdev is not needed */
@@ -1612,10 +1680,10 @@ static void print_conf(struct r10conf *conf)
char b[BDEVNAME_SIZE];
rdev = conf->mirrors[i].rdev;
if (rdev)
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &rdev->flags),
- !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
}
@@ -1950,6 +2018,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
/* now find blocks with errors */
for (i=0 ; i < conf->copies ; i++) {
int j, d;
+ struct md_rdev *rdev;
tbio = r10_bio->devs[i].bio;
@@ -1957,6 +2026,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue;
if (i == first)
continue;
+ d = r10_bio->devs[i].devnum;
+ rdev = conf->mirrors[d].rdev;
if (!r10_bio->devs[i].bio->bi_error) {
/* We know that the bi_io_vec layout is the same for
* both 'first' and 'i', so we just compare them.
@@ -1979,6 +2050,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
/* Don't fix anything. */
continue;
+ } else if (test_bit(FailFast, &rdev->flags)) {
+ /* Just give up on this device */
+ md_error(rdev->mddev, rdev);
+ continue;
}
/* Ok, we need to write this bio, either to correct an
* inconsistency or to correct an unreadable block.
@@ -1996,11 +2071,12 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_copy_data(tbio, fbio);
- d = r10_bio->devs[i].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
generic_make_request(tbio);
@@ -2106,10 +2182,8 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
ok = rdev_set_badblocks(rdev2, addr, s, 0);
if (!ok) {
/* just abort the recovery */
- printk(KERN_NOTICE
- "md/raid10:%s: recovery aborted"
- " due to read error\n",
- mdname(mddev));
+ pr_notice("md/raid10:%s: recovery aborted due to read error\n",
+ mdname(mddev));
conf->mirrors[dw].recovery_disabled
= mddev->recovery_disabled;
@@ -2256,14 +2330,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
char b[BDEVNAME_SIZE];
bdevname(rdev->bdev, b);
- printk(KERN_NOTICE
- "md/raid10:%s: %s: Raid device exceeded "
- "read_error threshold [cur %d:max %d]\n",
- mdname(mddev), b,
- atomic_read(&rdev->read_errors), max_read_errors);
- printk(KERN_NOTICE
- "md/raid10:%s: %s: Failing raid device\n",
- mdname(mddev), b);
+ pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
+ mdname(mddev), b,
+ atomic_read(&rdev->read_errors), max_read_errors);
+ pr_notice("md/raid10:%s: %s: Failing raid device\n",
+ mdname(mddev), b);
md_error(mddev, rdev);
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return;
@@ -2353,20 +2424,16 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: read correction "
- "write failed"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect +
- choose_data_offset(r10_bio,
- rdev)),
- bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
- mdname(mddev),
- bdevname(rdev->bdev, b));
+ pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect +
+ choose_data_offset(r10_bio,
+ rdev)),
+ bdevname(rdev->bdev, b));
+ pr_notice("md/raid10:%s: %s: failing drive\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
}
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
@@ -2394,24 +2461,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
READ)) {
case 0:
/* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: unable to read back "
- "corrected sectors"
- " (%d sectors at %llu on %s)\n",
+ pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
+ pr_notice("md/raid10:%s: %s: failing drive\n",
mdname(mddev),
bdevname(rdev->bdev, b));
break;
case 1:
- printk(KERN_INFO
- "md/raid10:%s: read error corrected"
- " (%d sectors at %llu on %s)\n",
+ pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
sect +
@@ -2500,6 +2561,8 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
char b[BDEVNAME_SIZE];
unsigned long do_sync;
int max_sectors;
+ dev_t bio_dev;
+ sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it.
@@ -2511,38 +2574,38 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
*/
bio = r10_bio->devs[slot].bio;
bdevname(bio->bi_bdev, b);
+ bio_dev = bio->bi_bdev->bd_dev;
+ bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio);
r10_bio->devs[slot].bio = NULL;
- if (mddev->ro == 0) {
+ if (mddev->ro)
+ r10_bio->devs[slot].bio = IO_BLOCKED;
+ else if (!test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1);
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
} else
- r10_bio->devs[slot].bio = IO_BLOCKED;
+ md_error(mddev, rdev);
rdev_dec_pending(rdev, mddev);
read_more:
rdev = read_balance(conf, r10_bio, &max_sectors);
if (rdev == NULL) {
- printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev), b,
- (unsigned long long)r10_bio->sector);
+ pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), b,
+ (unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
return;
}
do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
slot = r10_bio->read_slot;
- printk_ratelimited(
- KERN_ERR
- "md/raid10:%s: %s: redirecting "
- "sector %llu to another mirror\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- (unsigned long long)r10_bio->sector);
+ pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
@@ -2552,8 +2615,15 @@ read_more:
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ bio, bio_dev,
+ bio_last_sector - r10_bio->sectors);
+
if (max_sectors < r10_bio->sectors) {
/* Drat - have to split this up more */
struct bio *mbio = r10_bio->master_bio;
@@ -2691,10 +2761,10 @@ static void raid10d(struct md_thread *thread)
md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--;
@@ -2752,7 +2822,7 @@ static void raid10d(struct md_thread *thread)
}
cond_resched();
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev);
}
blk_finish_plug(&plug);
@@ -3069,6 +3139,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr +
rdev->data_offset;
@@ -3157,8 +3229,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (!any_working) {
if (!test_and_set_bit(MD_RECOVERY_INTR,
&mddev->recovery))
- printk(KERN_INFO "md/raid10:%s: insufficient "
- "working devices for recovery.\n",
+ pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
mdname(mddev));
mirror->recovery_disabled
= mddev->recovery_disabled;
@@ -3175,6 +3246,23 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rdev_dec_pending(mrdev, mddev);
if (mreplace)
rdev_dec_pending(mreplace, mddev);
+ if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
+ /* Only want this if there is elsewhere to
+ * read from. 'j' is currently the first
+ * readable copy.
+ */
+ int targets = 1;
+ for (; j < conf->copies; j++) {
+ int d = r10_bio->devs[j].devnum;
+ if (conf->mirrors[d].rdev &&
+ test_bit(In_sync,
+ &conf->mirrors[d].rdev->flags))
+ targets++;
+ }
+ if (targets == 1)
+ r10_bio->devs[0].bio->bi_opf
+ &= ~MD_FAILFAST;
+ }
}
if (biolist == NULL) {
while (r10_bio) {
@@ -3253,6 +3341,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
count++;
@@ -3276,6 +3366,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
count++;
@@ -3486,15 +3578,14 @@ static struct r10conf *setup_conf(struct mddev *mddev)
copies = setup_geo(&geo, mddev, geo_new);
if (copies == -2) {
- printk(KERN_ERR "md/raid10:%s: chunk size must be "
- "at least PAGE_SIZE(%ld) and be a power of 2.\n",
- mdname(mddev), PAGE_SIZE);
+ pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
+ mdname(mddev), PAGE_SIZE);
goto out;
}
if (copies < 2 || copies > mddev->raid_disks) {
- printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
- mdname(mddev), mddev->new_layout);
+ pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
+ mdname(mddev), mddev->new_layout);
goto out;
}
@@ -3554,9 +3645,6 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf;
out:
- if (err == -ENOMEM)
- printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
- mdname(mddev));
if (conf) {
mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);
@@ -3653,7 +3741,7 @@ static int raid10_run(struct mddev *mddev)
}
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
- printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
+ pr_err("md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
}
@@ -3695,11 +3783,9 @@ static int raid10_run(struct mddev *mddev)
}
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid10:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
- printk(KERN_INFO
- "md/raid10:%s: active with %d out of %d devices\n",
+ pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid10:%s: active with %d out of %d devices\n",
mdname(mddev), conf->geo.raid_disks - mddev->degraded,
conf->geo.raid_disks);
/*
@@ -3709,6 +3795,7 @@ static int raid10_run(struct mddev *mddev)
size = raid10_size(mddev, 0, 0);
md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size;
+ set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
if (mddev->queue) {
int stripe = conf->geo.raid_disks *
@@ -3736,7 +3823,7 @@ static int raid10_run(struct mddev *mddev)
if (max(before_length, after_length) > min_offset_diff) {
/* This cannot work */
- printk("md/raid10: offset difference not enough to continue reshape\n");
+ pr_warn("md/raid10: offset difference not enough to continue reshape\n");
goto out_free_conf;
}
conf->offset_diff = min_offset_diff;
@@ -3843,8 +3930,8 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
struct r10conf *conf;
if (mddev->degraded > 0) {
- printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: Error: degraded raid0!\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
sector_div(size, devs);
@@ -3884,9 +3971,8 @@ static void *raid10_takeover(struct mddev *mddev)
/* for raid0 takeover only one zone is supported */
raid0_conf = mddev->private;
if (raid0_conf->nr_strip_zones > 1) {
- printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
- " with more than one zone.\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
return raid10_takeover_raid0(mddev,
@@ -4075,8 +4161,8 @@ static int raid10_start_reshape(struct mddev *mddev)
sector_t size = raid10_size(mddev, 0, 0);
if (size < mddev->array_sectors) {
spin_unlock_irq(&conf->device_lock);
- printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
- mdname(mddev));
+ pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
+ mdname(mddev));
return -EINVAL;
}
mddev->resync_max_sectors = size;
@@ -4123,7 +4209,7 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock);
mddev->raid_disks = conf->geo.raid_disks;
mddev->reshape_position = conf->reshape_progress;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -4318,9 +4404,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
else
mddev->curr_resync_completed = conf->reshape_progress;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->flags == 0 ||
+ wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
allow_barrier(conf);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 18ec1f7a98bf..3162615e57bd 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -156,5 +156,7 @@ enum r10bio_state {
* flag is set
*/
R10BIO_Previous,
+/* failfast devices did receive failfast requests. */
+ R10BIO_FailFast,
};
#endif
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 1b1ab4a1d132..d7bfb6fc8aef 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2015 Shaohua Li <shli@fb.com>
+ * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -18,8 +19,10 @@
#include <linux/raid/md_p.h>
#include <linux/crc32c.h>
#include <linux/random.h>
+#include <linux/kthread.h>
#include "md.h"
#include "raid5.h"
+#include "bitmap.h"
/*
* metadata/data stored in disk with 4k size unit (a block) regardless
@@ -28,18 +31,70 @@
#define BLOCK_SECTORS (8)
/*
- * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
- * recovery scans a very long log
+ * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
+ *
+ * In write through mode, the reclaim runs every log->max_free_space.
+ * This can prevent the recovery scans for too long
*/
#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
+/* wake up reclaim thread periodically */
+#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
+/* start flush with these full stripes */
+#define R5C_FULL_STRIPE_FLUSH_BATCH 256
+/* reclaim stripes in groups */
+#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
+
/*
* We only need 2 bios per I/O unit to make progress, but ensure we
* have a few more available to not get too tight.
*/
#define R5L_POOL_SIZE 4
+/*
+ * r5c journal modes of the array: write-back or write-through.
+ * write-through mode has identical behavior as existing log only
+ * implementation.
+ */
+enum r5c_journal_mode {
+ R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
+ R5C_JOURNAL_MODE_WRITE_BACK = 1,
+};
+
+static char *r5c_journal_mode_str[] = {"write-through",
+ "write-back"};
+/*
+ * raid5 cache state machine
+ *
+ * With the RAID cache, each stripe works in two phases:
+ * - caching phase
+ * - writing-out phase
+ *
+ * These two phases are controlled by bit STRIPE_R5C_CACHING:
+ * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
+ * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
+ *
+ * When there is no journal, or the journal is in write-through mode,
+ * the stripe is always in writing-out phase.
+ *
+ * For write-back journal, the stripe is sent to caching phase on write
+ * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
+ * the write-out phase by clearing STRIPE_R5C_CACHING.
+ *
+ * Stripes in caching phase do not write the raid disks. Instead, all
+ * writes are committed from the log device. Therefore, a stripe in
+ * caching phase handles writes as:
+ * - write to log device
+ * - return IO
+ *
+ * Stripes in writing-out phase handle writes as:
+ * - calculate parity
+ * - write pending data and parity to journal
+ * - write data and parity to raid disks
+ * - return IO for pending writes
+ */
+
struct r5l_log {
struct md_rdev *rdev;
@@ -58,7 +113,6 @@ struct r5l_log {
u64 seq; /* log head sequence */
sector_t next_checkpoint;
- u64 next_cp_seq;
struct mutex io_mutex;
struct r5l_io_unit *current_io; /* current io_unit accepting new data */
@@ -96,6 +150,18 @@ struct r5l_log {
spinlock_t no_space_stripes_lock;
bool need_cache_flush;
+
+ /* for r5c_cache */
+ enum r5c_journal_mode r5c_journal_mode;
+
+ /* all stripes in r5cache, in the order of seq at sh->log_start */
+ struct list_head stripe_in_journal_list;
+
+ spinlock_t stripe_in_journal_lock;
+ atomic_t stripe_in_journal_count;
+
+ /* to submit async io_units, to fulfill ordering of flush */
+ struct work_struct deferred_io_work;
};
/*
@@ -122,6 +188,18 @@ struct r5l_io_unit {
int state;
bool need_split_bio;
+ struct bio *split_bio;
+
+ unsigned int has_flush:1; /* include flush request */
+ unsigned int has_fua:1; /* include fua request */
+ unsigned int has_null_flush:1; /* include empty flush request */
+ /*
+ * io isn't sent yet, flush/fua request can only be submitted till it's
+ * the first IO in running_ios list
+ */
+ unsigned int io_deferred:1;
+
+ struct bio_list flush_barriers; /* size == 0 flush bios */
};
/* r5l_io_unit state */
@@ -133,6 +211,12 @@ enum r5l_io_unit_state {
IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
};
+bool r5c_is_writeback(struct r5l_log *log)
+{
+ return (log != NULL &&
+ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
+}
+
static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
{
start += inc;
@@ -168,12 +252,235 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
io->state = state;
}
+static void
+r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev,
+ struct bio_list *return_bi)
+{
+ struct bio *wbi, *wbi2;
+
+ wbi = dev->written;
+ dev->written = NULL;
+ while (wbi && wbi->bi_iter.bi_sector <
+ dev->sector + STRIPE_SECTORS) {
+ wbi2 = r5_next_bio(wbi, dev->sector);
+ if (!raid5_dec_bi_active_stripes(wbi)) {
+ md_write_end(conf->mddev);
+ bio_list_add(return_bi, wbi);
+ }
+ wbi = wbi2;
+ }
+}
+
+void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks, struct bio_list *return_bi)
+{
+ int i;
+
+ for (i = sh->disks; i--; ) {
+ if (sh->dev[i].written) {
+ set_bit(R5_UPTODATE, &sh->dev[i].flags);
+ r5c_return_dev_pending_writes(conf, &sh->dev[i],
+ return_bi);
+ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS,
+ !test_bit(STRIPE_DEGRADED, &sh->state),
+ 0);
+ }
+ }
+}
+
+/* Check whether we should flush some stripes to free up stripe cache */
+void r5c_check_stripe_cache_usage(struct r5conf *conf)
+{
+ int total_cached;
+
+ if (!r5c_is_writeback(conf->log))
+ return;
+
+ total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+ atomic_read(&conf->r5c_cached_full_stripes);
+
+ /*
+ * The following condition is true for either of the following:
+ * - stripe cache pressure high:
+ * total_cached > 3/4 min_nr_stripes ||
+ * empty_inactive_list_nr > 0
+ * - stripe cache pressure moderate:
+ * total_cached > 1/2 min_nr_stripes
+ */
+ if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ atomic_read(&conf->empty_inactive_list_nr) > 0)
+ r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
+ * stripes in the cache
+ */
+void r5c_check_cached_full_stripe(struct r5conf *conf)
+{
+ if (!r5c_is_writeback(conf->log))
+ return;
+
+ /*
+ * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
+ * or a full stripe (chunk size / 4k stripes).
+ */
+ if (atomic_read(&conf->r5c_cached_full_stripes) >=
+ min(R5C_FULL_STRIPE_FLUSH_BATCH,
+ conf->chunk_sectors >> STRIPE_SHIFT))
+ r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * Total log space (in sectors) needed to flush all data in cache
+ *
+ * Currently, writing-out phase automatically includes all pending writes
+ * to the same sector. So the reclaim of each stripe takes up to
+ * (conf->raid_disks + 1) pages of log space.
+ *
+ * To totally avoid deadlock due to log space, the code reserves
+ * (conf->raid_disks + 1) pages for each stripe in cache, which is not
+ * necessary in most cases.
+ *
+ * To improve this, we will need writing-out phase to be able to NOT include
+ * pending writes, which will reduce the requirement to
+ * (conf->max_degraded + 1) pages per stripe in cache.
+ */
+static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
+{
+ struct r5l_log *log = conf->log;
+
+ if (!r5c_is_writeback(log))
+ return 0;
+
+ return BLOCK_SECTORS * (conf->raid_disks + 1) *
+ atomic_read(&log->stripe_in_journal_count);
+}
+
+/*
+ * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
+ *
+ * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
+ * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
+ * device is less than 2x of reclaim_required_space.
+ */
+static inline void r5c_update_log_state(struct r5l_log *log)
+{
+ struct r5conf *conf = log->rdev->mddev->private;
+ sector_t free_space;
+ sector_t reclaim_space;
+ bool wake_reclaim = false;
+
+ if (!r5c_is_writeback(log))
+ return;
+
+ free_space = r5l_ring_distance(log, log->log_start,
+ log->last_checkpoint);
+ reclaim_space = r5c_log_required_to_flush_cache(conf);
+ if (free_space < 2 * reclaim_space)
+ set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+ else {
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+ wake_reclaim = true;
+ clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+ }
+ if (free_space < 3 * reclaim_space)
+ set_bit(R5C_LOG_TIGHT, &conf->cache_state);
+ else
+ clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
+
+ if (wake_reclaim)
+ r5l_wake_reclaim(log, 0);
+}
+
+/*
+ * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
+ * This function should only be called in write-back mode.
+ */
+void r5c_make_stripe_write_out(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct r5l_log *log = conf->log;
+
+ BUG_ON(!r5c_is_writeback(log));
+
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ clear_bit(STRIPE_R5C_CACHING, &sh->state);
+
+ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
+
+ if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
+ BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
+ atomic_dec(&conf->r5c_cached_partial_stripes);
+ }
+
+ if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
+ BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
+ atomic_dec(&conf->r5c_cached_full_stripes);
+ }
+}
+
+static void r5c_handle_data_cached(struct stripe_head *sh)
+{
+ int i;
+
+ for (i = sh->disks; i--; )
+ if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+ set_bit(R5_InJournal, &sh->dev[i].flags);
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ }
+ clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
+}
+
+/*
+ * this journal write must contain full parity,
+ * it may also contain some data pages
+ */
+static void r5c_handle_parity_cached(struct stripe_head *sh)
+{
+ int i;
+
+ for (i = sh->disks; i--; )
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
+}
+
+/*
+ * Setting proper flags after writing (or flushing) data and/or parity to the
+ * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
+ */
+static void r5c_finish_cache_stripe(struct stripe_head *sh)
+{
+ struct r5l_log *log = sh->raid_conf->log;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+ /*
+ * Set R5_InJournal for parity dev[pd_idx]. This means
+ * all data AND parity in the journal. For RAID 6, it is
+ * NOT necessary to set the flag for dev[qd_idx], as the
+ * two parities are written out together.
+ */
+ set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+ } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ r5c_handle_data_cached(sh);
+ } else {
+ r5c_handle_parity_cached(sh);
+ set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+ }
+}
+
static void r5l_io_run_stripes(struct r5l_io_unit *io)
{
struct stripe_head *sh, *next;
list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
list_del_init(&sh->log_list);
+
+ r5c_finish_cache_stripe(sh);
+
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
}
@@ -209,9 +516,11 @@ static void r5l_move_to_end_ios(struct r5l_log *log)
}
}
+static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
static void r5l_log_endio(struct bio *bio)
{
struct r5l_io_unit *io = bio->bi_private;
+ struct r5l_io_unit *io_deferred;
struct r5l_log *log = io->log;
unsigned long flags;
@@ -227,18 +536,89 @@ static void r5l_log_endio(struct bio *bio)
r5l_move_to_end_ios(log);
else
r5l_log_run_stripes(log);
+ if (!list_empty(&log->running_ios)) {
+ /*
+ * FLUSH/FUA io_unit is deferred because of ordering, now we
+ * can dispatch it
+ */
+ io_deferred = list_first_entry(&log->running_ios,
+ struct r5l_io_unit, log_sibling);
+ if (io_deferred->io_deferred)
+ schedule_work(&log->deferred_io_work);
+ }
+
spin_unlock_irqrestore(&log->io_list_lock, flags);
if (log->need_cache_flush)
md_wakeup_thread(log->rdev->mddev->thread);
+
+ if (io->has_null_flush) {
+ struct bio *bi;
+
+ WARN_ON(bio_list_empty(&io->flush_barriers));
+ while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
+ bio_endio(bi);
+ atomic_dec(&io->pending_stripe);
+ }
+ if (atomic_read(&io->pending_stripe) == 0)
+ __r5l_stripe_write_finished(io);
+ }
+}
+
+static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+ if (io->has_flush)
+ io->current_bio->bi_opf |= REQ_PREFLUSH;
+ if (io->has_fua)
+ io->current_bio->bi_opf |= REQ_FUA;
+ submit_bio(io->current_bio);
+
+ if (!io->split_bio)
+ return;
+
+ if (io->has_flush)
+ io->split_bio->bi_opf |= REQ_PREFLUSH;
+ if (io->has_fua)
+ io->split_bio->bi_opf |= REQ_FUA;
+ submit_bio(io->split_bio);
+}
+
+/* deferred io_unit will be dispatched here */
+static void r5l_submit_io_async(struct work_struct *work)
+{
+ struct r5l_log *log = container_of(work, struct r5l_log,
+ deferred_io_work);
+ struct r5l_io_unit *io = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&log->io_list_lock, flags);
+ if (!list_empty(&log->running_ios)) {
+ io = list_first_entry(&log->running_ios, struct r5l_io_unit,
+ log_sibling);
+ if (!io->io_deferred)
+ io = NULL;
+ else
+ io->io_deferred = 0;
+ }
+ spin_unlock_irqrestore(&log->io_list_lock, flags);
+ if (io)
+ r5l_do_submit_io(log, io);
}
static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
+ struct bio *bio;
struct r5l_meta_block *block;
unsigned long flags;
u32 crc;
+ bool do_submit = true;
if (!io)
return;
@@ -247,13 +627,20 @@ static void r5l_submit_current_io(struct r5l_log *log)
block->meta_size = cpu_to_le32(io->meta_offset);
crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
block->checksum = cpu_to_le32(crc);
+ bio = io->current_bio;
log->current_io = NULL;
spin_lock_irqsave(&log->io_list_lock, flags);
- __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+ if (io->has_flush || io->has_fua) {
+ if (io != list_first_entry(&log->running_ios,
+ struct r5l_io_unit, log_sibling)) {
+ io->io_deferred = 1;
+ do_submit = false;
+ }
+ }
spin_unlock_irqrestore(&log->io_list_lock, flags);
-
- submit_bio(io->current_bio);
+ if (do_submit)
+ r5l_do_submit_io(log, io);
}
static struct bio *r5l_bio_alloc(struct r5l_log *log)
@@ -271,6 +658,7 @@ static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
{
log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
+ r5c_update_log_state(log);
/*
* If we filled up the log device start from the beginning again,
* which will require a new bio.
@@ -297,6 +685,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
io->log = log;
INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list);
+ bio_list_init(&io->flush_barriers);
io->state = IO_UNIT_RUNNING;
io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
@@ -367,12 +756,11 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
struct r5l_io_unit *io = log->current_io;
if (io->need_split_bio) {
- struct bio *prev = io->current_bio;
-
+ BUG_ON(io->split_bio);
+ io->split_bio = io->current_bio;
io->current_bio = r5l_bio_alloc(log);
- bio_chain(io->current_bio, prev);
-
- submit_bio(prev);
+ bio_chain(io->current_bio, io->split_bio);
+ io->need_split_bio = false;
}
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
@@ -401,50 +789,85 @@ static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
io = log->current_io;
+ if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
+ io->has_flush = 1;
+
for (i = 0; i < sh->disks; i++) {
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+ test_bit(R5_InJournal, &sh->dev[i].flags))
continue;
if (i == sh->pd_idx || i == sh->qd_idx)
continue;
+ if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
+ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+ io->has_fua = 1;
+ /*
+ * we need to flush journal to make sure recovery can
+ * reach the data with fua flag
+ */
+ io->has_flush = 1;
+ }
r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
raid5_compute_blocknr(sh, i, 0),
sh->dev[i].log_checksum, 0, false);
r5l_append_payload_page(log, sh->dev[i].page);
}
- if (sh->qd_idx >= 0) {
+ if (parity_pages == 2) {
r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
sh->sector, sh->dev[sh->pd_idx].log_checksum,
sh->dev[sh->qd_idx].log_checksum, true);
r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
- } else {
+ } else if (parity_pages == 1) {
r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
sh->sector, sh->dev[sh->pd_idx].log_checksum,
0, false);
r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
- }
+ } else /* Just writing data, not parity, in caching phase */
+ BUG_ON(parity_pages != 0);
list_add_tail(&sh->log_list, &io->stripe_list);
atomic_inc(&io->pending_stripe);
sh->log_io = io;
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return 0;
+
+ if (sh->log_start == MaxSector) {
+ BUG_ON(!list_empty(&sh->r5c));
+ sh->log_start = io->log_start;
+ spin_lock_irq(&log->stripe_in_journal_lock);
+ list_add_tail(&sh->r5c,
+ &log->stripe_in_journal_list);
+ spin_unlock_irq(&log->stripe_in_journal_lock);
+ atomic_inc(&log->stripe_in_journal_count);
+ }
return 0;
}
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+/* add stripe to no_space_stripes, and then wake up reclaim */
+static inline void r5l_add_no_space_stripe(struct r5l_log *log,
+ struct stripe_head *sh)
+{
+ spin_lock(&log->no_space_stripes_lock);
+ list_add_tail(&sh->log_list, &log->no_space_stripes);
+ spin_unlock(&log->no_space_stripes_lock);
+}
+
/*
* running in raid5d, where reclaim could wait for raid5d too (when it flushes
* data from log to raid disks), so we shouldn't wait for reclaim here
*/
int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
{
+ struct r5conf *conf = sh->raid_conf;
int write_disks = 0;
int data_pages, parity_pages;
- int meta_size;
int reserve;
int i;
int ret = 0;
+ bool wake_reclaim = false;
if (!log)
return -EAGAIN;
@@ -456,11 +879,15 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
return -EAGAIN;
}
+ WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+
for (i = 0; i < sh->disks; i++) {
void *addr;
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+ test_bit(R5_InJournal, &sh->dev[i].flags))
continue;
+
write_disks++;
/* checksum is already calculated in last run */
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
@@ -473,15 +900,6 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
parity_pages = 1 + !!(sh->qd_idx >= 0);
data_pages = write_disks - parity_pages;
- meta_size =
- ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
- * data_pages) +
- sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) * parity_pages;
- /* Doesn't work with very big raid array */
- if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
- return -EINVAL;
-
set_bit(STRIPE_LOG_TRAPPED, &sh->state);
/*
* The stripe must enter state machine again to finish the write, so
@@ -493,22 +911,49 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
mutex_lock(&log->io_mutex);
/* meta + data */
reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
- if (!r5l_has_free_space(log, reserve)) {
- spin_lock(&log->no_space_stripes_lock);
- list_add_tail(&sh->log_list, &log->no_space_stripes);
- spin_unlock(&log->no_space_stripes_lock);
- r5l_wake_reclaim(log, reserve);
- } else {
- ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
- if (ret) {
- spin_lock_irq(&log->io_list_lock);
- list_add_tail(&sh->log_list, &log->no_mem_stripes);
- spin_unlock_irq(&log->io_list_lock);
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ if (!r5l_has_free_space(log, reserve)) {
+ r5l_add_no_space_stripe(log, sh);
+ wake_reclaim = true;
+ } else {
+ ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list,
+ &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
+ } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
+ /*
+ * log space critical, do not process stripes that are
+ * not in cache yet (sh->log_start == MaxSector).
+ */
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+ sh->log_start == MaxSector) {
+ r5l_add_no_space_stripe(log, sh);
+ wake_reclaim = true;
+ reserve = 0;
+ } else if (!r5l_has_free_space(log, reserve)) {
+ if (sh->log_start == log->last_checkpoint)
+ BUG();
+ else
+ r5l_add_no_space_stripe(log, sh);
+ } else {
+ ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list,
+ &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
}
}
mutex_unlock(&log->io_mutex);
+ if (wake_reclaim)
+ r5l_wake_reclaim(log, reserve);
return 0;
}
@@ -525,17 +970,34 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
{
if (!log)
return -ENODEV;
- /*
- * we flush log disk cache first, then write stripe data to raid disks.
- * So if bio is finished, the log disk cache is flushed already. The
- * recovery guarantees we can recovery the bio from log disk, so we
- * don't need to flush again
- */
- if (bio->bi_iter.bi_size == 0) {
- bio_endio(bio);
- return 0;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ /*
+ * in write through (journal only)
+ * we flush log disk cache first, then write stripe data to
+ * raid disks. So if bio is finished, the log disk cache is
+ * flushed already. The recovery guarantees we can recovery
+ * the bio from log disk, so we don't need to flush again
+ */
+ if (bio->bi_iter.bi_size == 0) {
+ bio_endio(bio);
+ return 0;
+ }
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ } else {
+ /* write back (with cache) */
+ if (bio->bi_iter.bi_size == 0) {
+ mutex_lock(&log->io_mutex);
+ r5l_get_meta(log, 0);
+ bio_list_add(&log->current_io->flush_barriers, bio);
+ log->current_io->has_flush = 1;
+ log->current_io->has_null_flush = 1;
+ atomic_inc(&log->current_io->pending_stripe);
+ r5l_submit_current_io(log);
+ mutex_unlock(&log->io_mutex);
+ return 0;
+ }
}
- bio->bi_opf &= ~REQ_PREFLUSH;
return -EAGAIN;
}
@@ -555,10 +1017,40 @@ static void r5l_run_no_space_stripes(struct r5l_log *log)
spin_unlock(&log->no_space_stripes_lock);
}
+/*
+ * calculate new last_checkpoint
+ * for write through mode, returns log->next_checkpoint
+ * for write back, returns log_start of first sh in stripe_in_journal_list
+ */
+static sector_t r5c_calculate_new_cp(struct r5conf *conf)
+{
+ struct stripe_head *sh;
+ struct r5l_log *log = conf->log;
+ sector_t new_cp;
+ unsigned long flags;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return log->next_checkpoint;
+
+ spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+ if (list_empty(&conf->log->stripe_in_journal_list)) {
+ /* all stripes flushed */
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ return log->next_checkpoint;
+ }
+ sh = list_first_entry(&conf->log->stripe_in_journal_list,
+ struct stripe_head, r5c);
+ new_cp = sh->log_start;
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ return new_cp;
+}
+
static sector_t r5l_reclaimable_space(struct r5l_log *log)
{
+ struct r5conf *conf = log->rdev->mddev->private;
+
return r5l_ring_distance(log, log->last_checkpoint,
- log->next_checkpoint);
+ r5c_calculate_new_cp(conf));
}
static void r5l_run_no_mem_stripe(struct r5l_log *log)
@@ -589,7 +1081,6 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
break;
log->next_checkpoint = io->log_start;
- log->next_cp_seq = io->seq;
list_del(&io->log_sibling);
mempool_free(io, log->io_pool);
@@ -604,6 +1095,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
{
struct r5l_log *log = io->log;
+ struct r5conf *conf = log->rdev->mddev->private;
unsigned long flags;
spin_lock_irqsave(&log->io_list_lock, flags);
@@ -614,7 +1106,8 @@ static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
return;
}
- if (r5l_reclaimable_space(log) > log->max_free_space)
+ if (r5l_reclaimable_space(log) > log->max_free_space ||
+ test_bit(R5C_LOG_TIGHT, &conf->cache_state))
r5l_wake_reclaim(log, 0);
spin_unlock_irqrestore(&log->io_list_lock, flags);
@@ -685,7 +1178,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
bio_reset(&log->flush_bio);
log->flush_bio.bi_bdev = log->rdev->bdev;
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+ log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
}
@@ -713,8 +1206,8 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
* there is a deadlock. We workaround this issue with a trylock.
* FIXME: we could miss discard if we can't take reconfig mutex
*/
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
if (!mddev_trylock(mddev))
return;
md_update_sb(mddev, 1);
@@ -735,15 +1228,148 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
}
}
+/*
+ * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
+ * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
+ *
+ * must hold conf->device_lock
+ */
+static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
+{
+ BUG_ON(list_empty(&sh->lru));
+ BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
+
+ /*
+ * The stripe is not ON_RELEASE_LIST, so it is safe to call
+ * raid5_release_stripe() while holding conf->device_lock
+ */
+ BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
+ assert_spin_locked(&conf->device_lock);
+
+ list_del_init(&sh->lru);
+ atomic_inc(&sh->count);
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ atomic_inc(&conf->active_stripes);
+ r5c_make_stripe_write_out(sh);
+
+ raid5_release_stripe(sh);
+}
+
+/*
+ * if num == 0, flush all full stripes
+ * if num > 0, flush all full stripes. If less than num full stripes are
+ * flushed, flush some partial stripes until totally num stripes are
+ * flushed or there is no more cached stripes.
+ */
+void r5c_flush_cache(struct r5conf *conf, int num)
+{
+ int count;
+ struct stripe_head *sh, *next;
+
+ assert_spin_locked(&conf->device_lock);
+ if (!conf->log)
+ return;
+
+ count = 0;
+ list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
+ r5c_flush_stripe(conf, sh);
+ count++;
+ }
+
+ if (count >= num)
+ return;
+ list_for_each_entry_safe(sh, next,
+ &conf->r5c_partial_stripe_list, lru) {
+ r5c_flush_stripe(conf, sh);
+ if (++count >= num)
+ break;
+ }
+}
+
+static void r5c_do_reclaim(struct r5conf *conf)
+{
+ struct r5l_log *log = conf->log;
+ struct stripe_head *sh;
+ int count = 0;
+ unsigned long flags;
+ int total_cached;
+ int stripes_to_flush;
+
+ if (!r5c_is_writeback(log))
+ return;
+
+ total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+ atomic_read(&conf->r5c_cached_full_stripes);
+
+ if (total_cached > conf->min_nr_stripes * 3 / 4 ||
+ atomic_read(&conf->empty_inactive_list_nr) > 0)
+ /*
+ * if stripe cache pressure high, flush all full stripes and
+ * some partial stripes
+ */
+ stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
+ else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ atomic_read(&conf->r5c_cached_full_stripes) >
+ R5C_FULL_STRIPE_FLUSH_BATCH)
+ /*
+ * if stripe cache pressure moderate, or if there is many full
+ * stripes,flush all full stripes
+ */
+ stripes_to_flush = 0;
+ else
+ /* no need to flush */
+ stripes_to_flush = -1;
+
+ if (stripes_to_flush >= 0) {
+ spin_lock_irqsave(&conf->device_lock, flags);
+ r5c_flush_cache(conf, stripes_to_flush);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
+
+ /* if log space is tight, flush stripes on stripe_in_journal_list */
+ if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
+ spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+ spin_lock(&conf->device_lock);
+ list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
+ /*
+ * stripes on stripe_in_journal_list could be in any
+ * state of the stripe_cache state machine. In this
+ * case, we only want to flush stripe on
+ * r5c_cached_full/partial_stripes. The following
+ * condition makes sure the stripe is on one of the
+ * two lists.
+ */
+ if (!list_empty(&sh->lru) &&
+ !test_bit(STRIPE_HANDLE, &sh->state) &&
+ atomic_read(&sh->count) == 0) {
+ r5c_flush_stripe(conf, sh);
+ }
+ if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
+ break;
+ }
+ spin_unlock(&conf->device_lock);
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ }
+
+ if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+ r5l_run_no_space_stripes(log);
+
+ md_wakeup_thread(conf->mddev->thread);
+}
static void r5l_do_reclaim(struct r5l_log *log)
{
+ struct r5conf *conf = log->rdev->mddev->private;
sector_t reclaim_target = xchg(&log->reclaim_target, 0);
sector_t reclaimable;
sector_t next_checkpoint;
- u64 next_cp_seq;
+ bool write_super;
spin_lock_irq(&log->io_list_lock);
+ write_super = r5l_reclaimable_space(log) > log->max_free_space ||
+ reclaim_target != 0 || !list_empty(&log->no_space_stripes);
/*
* move proper io_unit to reclaim list. We should not change the order.
* reclaimable/unreclaimable io_unit can be mixed in the list, we
@@ -764,12 +1390,12 @@ static void r5l_do_reclaim(struct r5l_log *log)
log->io_list_lock);
}
- next_checkpoint = log->next_checkpoint;
- next_cp_seq = log->next_cp_seq;
+ next_checkpoint = r5c_calculate_new_cp(conf);
spin_unlock_irq(&log->io_list_lock);
BUG_ON(reclaimable < 0);
- if (reclaimable == 0)
+
+ if (reclaimable == 0 || !write_super)
return;
/*
@@ -781,7 +1407,7 @@ static void r5l_do_reclaim(struct r5l_log *log)
mutex_lock(&log->io_mutex);
log->last_checkpoint = next_checkpoint;
- log->last_cp_seq = next_cp_seq;
+ r5c_update_log_state(log);
mutex_unlock(&log->io_mutex);
r5l_run_no_space_stripes(log);
@@ -795,14 +1421,17 @@ static void r5l_reclaim_thread(struct md_thread *thread)
if (!log)
return;
+ r5c_do_reclaim(conf);
r5l_do_reclaim(log);
}
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
+void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
{
unsigned long target;
unsigned long new = (unsigned long)space; /* overflow in theory */
+ if (!log)
+ return;
do {
target = log->reclaim_target;
if (new < target)
@@ -816,22 +1445,14 @@ void r5l_quiesce(struct r5l_log *log, int state)
struct mddev *mddev;
if (!log || state == 2)
return;
- if (state == 0) {
- /*
- * This is a special case for hotadd. In suspend, the array has
- * no journal. In resume, journal is initialized as well as the
- * reclaim thread.
- */
- if (log->reclaim_thread)
- return;
- log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
- log->rdev->mddev, "reclaim");
- } else if (state == 1) {
+ if (state == 0)
+ kthread_unpark(log->reclaim_thread->tsk);
+ else if (state == 1) {
/* make sure r5l_write_super_and_discard_space exits */
mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
- r5l_wake_reclaim(log, -1L);
- md_unregister_thread(&log->reclaim_thread);
+ kthread_park(log->reclaim_thread->tsk);
+ r5l_wake_reclaim(log, MaxSector);
r5l_do_reclaim(log);
}
}
@@ -857,10 +1478,13 @@ struct r5l_recovery_ctx {
sector_t meta_total_blocks; /* total size of current meta and data */
sector_t pos; /* recovery position */
u64 seq; /* recovery position seq */
+ int data_parity_stripes; /* number of data_parity stripes */
+ int data_only_stripes; /* number of data_only stripes */
+ struct list_head cached_list;
};
-static int r5l_read_meta_block(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+static int r5l_recovery_read_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
{
struct page *page = ctx->meta_page;
struct r5l_meta_block *mb;
@@ -892,170 +1516,618 @@ static int r5l_read_meta_block(struct r5l_log *log,
return 0;
}
-static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx,
- sector_t stripe_sect,
- int *offset, sector_t *log_offset)
+static void
+r5l_recovery_create_empty_meta_block(struct r5l_log *log,
+ struct page *page,
+ sector_t pos, u64 seq)
{
- struct r5conf *conf = log->rdev->mddev->private;
- struct stripe_head *sh;
- struct r5l_payload_data_parity *payload;
- int disk_index;
+ struct r5l_meta_block *mb;
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
- while (1) {
- payload = page_address(ctx->meta_page) + *offset;
+ mb = page_address(page);
+ clear_page(mb);
+ mb->magic = cpu_to_le32(R5LOG_MAGIC);
+ mb->version = R5LOG_VERSION;
+ mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
+ mb->seq = cpu_to_le64(seq);
+ mb->position = cpu_to_le64(pos);
+}
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
- raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0,
- &disk_index, sh);
+static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
+ u64 seq)
+{
+ struct page *page;
+ struct r5l_meta_block *mb;
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_READ, 0,
- false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
- ctx->meta_total_blocks += BLOCK_SECTORS;
- } else {
- disk_index = sh->pd_idx;
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, REQ_OP_READ, 0,
- false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
-
- if (sh->qd_idx >= 0) {
- disk_index = sh->qd_idx;
- sync_page_io(log->rdev,
- r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
- PAGE_SIZE, sh->dev[disk_index].page,
- REQ_OP_READ, 0, false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[1]);
- set_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags);
- }
- ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
- }
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ r5l_recovery_create_empty_meta_block(log, page, pos, seq);
+ mb = page_address(page);
+ mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+ mb, PAGE_SIZE));
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+ REQ_FUA, false)) {
+ __free_page(page);
+ return -EIO;
+ }
+ __free_page(page);
+ return 0;
+}
- *log_offset = r5l_ring_add(log, *log_offset,
- le32_to_cpu(payload->size));
- *offset += sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) *
- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
- break;
+/*
+ * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
+ * to mark valid (potentially not flushed) data in the journal.
+ *
+ * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
+ * so there should not be any mismatch here.
+ */
+static void r5l_recovery_load_data(struct r5l_log *log,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx,
+ struct r5l_payload_data_parity *payload,
+ sector_t log_offset)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ int dd_idx;
+
+ raid5_compute_sector(conf,
+ le64_to_cpu(payload->location), 0,
+ &dd_idx, sh);
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ sh->dev[dd_idx].page, REQ_OP_READ, 0, false);
+ sh->dev[dd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ ctx->meta_total_blocks += BLOCK_SECTORS;
+
+ set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
+ set_bit(STRIPE_R5C_CACHING, &sh->state);
+}
+
+static void r5l_recovery_load_parity(struct r5l_log *log,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx,
+ struct r5l_payload_data_parity *payload,
+ sector_t log_offset)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+
+ ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ sh->dev[sh->pd_idx].page, REQ_OP_READ, 0, false);
+ sh->dev[sh->pd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[0]);
+ set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
+
+ if (sh->qd_idx >= 0) {
+ sync_page_io(log->rdev,
+ r5l_ring_add(log, log_offset, BLOCK_SECTORS),
+ PAGE_SIZE, sh->dev[sh->qd_idx].page,
+ REQ_OP_READ, 0, false);
+ sh->dev[sh->qd_idx].log_checksum =
+ le32_to_cpu(payload->checksum[1]);
+ set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
}
+ clear_bit(STRIPE_R5C_CACHING, &sh->state);
+}
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- void *addr;
- u32 checksum;
+static void r5l_recovery_reset_stripe(struct stripe_head *sh)
+{
+ int i;
+ sh->state = 0;
+ sh->log_start = MaxSector;
+ for (i = sh->disks; i--; )
+ sh->dev[i].flags = 0;
+}
+
+static void
+r5l_recovery_replay_one_stripe(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct md_rdev *rdev, *rrdev;
+ int disk_index;
+ int data_count = 0;
+
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
continue;
- addr = kmap_atomic(sh->dev[disk_index].page);
- checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
- kunmap_atomic(addr);
- if (checksum != sh->dev[disk_index].log_checksum)
- goto error;
+ if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
+ continue;
+ data_count++;
}
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- struct md_rdev *rdev, *rrdev;
+ /*
+ * stripes that only have parity must have been flushed
+ * before the crash that we are now recovering from, so
+ * there is nothing more to recovery.
+ */
+ if (data_count == 0)
+ goto out;
- if (!test_and_clear_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags))
+ for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+ if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
continue;
/* in case device is broken */
+ rcu_read_lock();
rdev = rcu_dereference(conf->disks[disk_index].rdev);
- if (rdev)
- sync_page_io(rdev, stripe_sect, PAGE_SIZE,
+ if (rdev) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ sync_page_io(rdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0,
false);
+ rdev_dec_pending(rdev, rdev->mddev);
+ rcu_read_lock();
+ }
rrdev = rcu_dereference(conf->disks[disk_index].replacement);
- if (rrdev)
- sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
+ if (rrdev) {
+ atomic_inc(&rrdev->nr_pending);
+ rcu_read_unlock();
+ sync_page_io(rrdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE, 0,
false);
+ rdev_dec_pending(rrdev, rrdev->mddev);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
}
- raid5_release_stripe(sh);
+ ctx->data_parity_stripes++;
+out:
+ r5l_recovery_reset_stripe(sh);
+}
+
+static struct stripe_head *
+r5c_recovery_alloc_stripe(struct r5conf *conf,
+ sector_t stripe_sect,
+ sector_t log_start)
+{
+ struct stripe_head *sh;
+
+ sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+ if (!sh)
+ return NULL; /* no more stripe available */
+
+ r5l_recovery_reset_stripe(sh);
+ sh->log_start = log_start;
+
+ return sh;
+}
+
+static struct stripe_head *
+r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
+{
+ struct stripe_head *sh;
+
+ list_for_each_entry(sh, list, lru)
+ if (sh->sector == sect)
+ return sh;
+ return NULL;
+}
+
+static void
+r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh, *next;
+
+ list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
+ r5l_recovery_reset_stripe(sh);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+}
+
+static void
+r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh, *next;
+
+ list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
+ }
+}
+
+/* if matches return 0; otherwise return -EINVAL */
+static int
+r5l_recovery_verify_data_checksum(struct r5l_log *log, struct page *page,
+ sector_t log_offset, __le32 log_checksum)
+{
+ void *addr;
+ u32 checksum;
+
+ sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+ page, REQ_OP_READ, 0, false);
+ addr = kmap_atomic(page);
+ checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
+}
+
+/*
+ * before loading data to stripe cache, we need verify checksum for all data,
+ * if there is mismatch for any data page, we drop all data in the mata block
+ */
+static int
+r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ struct r5l_meta_block *mb = page_address(ctx->meta_page);
+ sector_t mb_offset = sizeof(struct r5l_meta_block);
+ sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+ struct page *page;
+ struct r5l_payload_data_parity *payload;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ while (mb_offset < le32_to_cpu(mb->meta_size)) {
+ payload = (void *)mb + mb_offset;
+
+ if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (r5l_recovery_verify_data_checksum(
+ log, page, log_offset,
+ payload->checksum[0]) < 0)
+ goto mismatch;
+ } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) {
+ if (r5l_recovery_verify_data_checksum(
+ log, page, log_offset,
+ payload->checksum[0]) < 0)
+ goto mismatch;
+ if (conf->max_degraded == 2 && /* q for RAID 6 */
+ r5l_recovery_verify_data_checksum(
+ log, page,
+ r5l_ring_add(log, log_offset,
+ BLOCK_SECTORS),
+ payload->checksum[1]) < 0)
+ goto mismatch;
+ } else /* not R5LOG_PAYLOAD_DATA or R5LOG_PAYLOAD_PARITY */
+ goto mismatch;
+
+ log_offset = r5l_ring_add(log, log_offset,
+ le32_to_cpu(payload->size));
+
+ mb_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ }
+
+ put_page(page);
return 0;
-error:
- for (disk_index = 0; disk_index < sh->disks; disk_index++)
- sh->dev[disk_index].flags = 0;
- raid5_release_stripe(sh);
+mismatch:
+ put_page(page);
return -EINVAL;
}
-static int r5l_recovery_flush_one_meta(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+/*
+ * Analyze all data/parity pages in one meta block
+ * Returns:
+ * 0 for success
+ * -EINVAL for unknown playload type
+ * -EAGAIN for checksum mismatch of data page
+ * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
+ */
+static int
+r5c_recovery_analyze_meta_block(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx,
+ struct list_head *cached_stripe_list)
{
- struct r5conf *conf = log->rdev->mddev->private;
- struct r5l_payload_data_parity *payload;
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
struct r5l_meta_block *mb;
- int offset;
+ struct r5l_payload_data_parity *payload;
+ int mb_offset;
sector_t log_offset;
- sector_t stripe_sector;
+ sector_t stripe_sect;
+ struct stripe_head *sh;
+ int ret;
+
+ /*
+ * for mismatch in data blocks, we will drop all data in this mb, but
+ * we will still read next mb for other data with FLUSH flag, as
+ * io_unit could finish out of order.
+ */
+ ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
+ if (ret == -EINVAL)
+ return -EAGAIN;
+ else if (ret)
+ return ret; /* -ENOMEM duo to alloc_page() failed */
mb = page_address(ctx->meta_page);
- offset = sizeof(struct r5l_meta_block);
+ mb_offset = sizeof(struct r5l_meta_block);
log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
- while (offset < le32_to_cpu(mb->meta_size)) {
+ while (mb_offset < le32_to_cpu(mb->meta_size)) {
int dd;
- payload = (void *)mb + offset;
- stripe_sector = raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0, &dd, NULL);
- if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
- &offset, &log_offset))
+ payload = (void *)mb + mb_offset;
+ stripe_sect = (payload->header.type == R5LOG_PAYLOAD_DATA) ?
+ raid5_compute_sector(
+ conf, le64_to_cpu(payload->location), 0, &dd,
+ NULL)
+ : le64_to_cpu(payload->location);
+
+ sh = r5c_recovery_lookup_stripe(cached_stripe_list,
+ stripe_sect);
+
+ if (!sh) {
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+ /*
+ * cannot get stripe from raid5_get_active_stripe
+ * try replay some stripes
+ */
+ if (!sh) {
+ r5c_recovery_replay_stripes(
+ cached_stripe_list, ctx);
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, ctx->pos);
+ }
+ if (!sh) {
+ pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
+ mdname(mddev),
+ conf->min_nr_stripes * 2);
+ raid5_set_cache_size(mddev,
+ conf->min_nr_stripes * 2);
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, ctx->pos);
+ }
+ if (!sh) {
+ pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
+ mdname(mddev));
+ return -ENOMEM;
+ }
+ list_add_tail(&sh->lru, cached_stripe_list);
+ }
+
+ if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+ test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
+ r5l_recovery_replay_one_stripe(conf, sh, ctx);
+ sh->log_start = ctx->pos;
+ list_move_tail(&sh->lru, cached_stripe_list);
+ }
+ r5l_recovery_load_data(log, sh, ctx, payload,
+ log_offset);
+ } else if (payload->header.type == R5LOG_PAYLOAD_PARITY)
+ r5l_recovery_load_parity(log, sh, ctx, payload,
+ log_offset);
+ else
return -EINVAL;
+
+ log_offset = r5l_ring_add(log, log_offset,
+ le32_to_cpu(payload->size));
+
+ mb_offset += sizeof(struct r5l_payload_data_parity) +
+ sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
}
+
return 0;
}
-/* copy data/parity from log to raid disks */
-static void r5l_recovery_flush_log(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
+/*
+ * Load the stripe into cache. The stripe will be written out later by
+ * the stripe cache state machine.
+ */
+static void r5c_recovery_load_one_stripe(struct r5l_log *log,
+ struct stripe_head *sh)
{
+ struct r5dev *dev;
+ int i;
+
+ for (i = sh->disks; i--; ) {
+ dev = sh->dev + i;
+ if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
+ set_bit(R5_InJournal, &dev->flags);
+ set_bit(R5_UPTODATE, &dev->flags);
+ }
+ }
+ list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+ atomic_inc(&log->stripe_in_journal_count);
+}
+
+/*
+ * Scan through the log for all to-be-flushed data
+ *
+ * For stripes with data and parity, namely Data-Parity stripe
+ * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
+ *
+ * For stripes with only data, namely Data-Only stripe
+ * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
+ *
+ * For a stripe, if we see data after parity, we should discard all previous
+ * data and parity for this stripe, as these data are already flushed to
+ * the array.
+ *
+ * At the end of the scan, we return the new journal_tail, which points to
+ * first data-only stripe on the journal device, or next invalid meta block.
+ */
+static int r5c_recovery_flush_log(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct stripe_head *sh;
+ int ret = 0;
+
+ /* scan through the log */
while (1) {
- if (r5l_read_meta_block(log, ctx))
- return;
- if (r5l_recovery_flush_one_meta(log, ctx))
- return;
+ if (r5l_recovery_read_meta_block(log, ctx))
+ break;
+
+ ret = r5c_recovery_analyze_meta_block(log, ctx,
+ &ctx->cached_list);
+ /*
+ * -EAGAIN means mismatch in data block, in this case, we still
+ * try scan the next metablock
+ */
+ if (ret && ret != -EAGAIN)
+ break; /* ret == -EINVAL or -ENOMEM */
ctx->seq++;
ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
}
+
+ if (ret == -ENOMEM) {
+ r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
+ return ret;
+ }
+
+ /* replay data-parity stripes */
+ r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
+
+ /* load data-only stripes to stripe cache */
+ list_for_each_entry(sh, &ctx->cached_list, lru) {
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ r5c_recovery_load_one_stripe(log, sh);
+ ctx->data_only_stripes++;
+ }
+
+ return 0;
}
-static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
- u64 seq)
+/*
+ * we did a recovery. Now ctx.pos points to an invalid meta block. New
+ * log will start here. but we can't let superblock point to last valid
+ * meta block. The log might looks like:
+ * | meta 1| meta 2| meta 3|
+ * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
+ * superblock points to meta 1, we write a new valid meta 2n. if crash
+ * happens again, new recovery will start from meta 1. Since meta 2n is
+ * valid now, recovery will think meta 3 is valid, which is wrong.
+ * The solution is we create a new meta in meta2 with its seq == meta
+ * 1's seq + 10000 and let superblock points to meta2. The same recovery
+ * will not think meta 3 is a valid meta, because its seq doesn't match
+ */
+
+/*
+ * Before recovery, the log looks like the following
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^
+ * |- log->last_checkpoint
+ * |- log->last_cp_seq
+ *
+ * Now we scan through the log until we see invalid entry
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos
+ * |- log->last_cp_seq |- ctx->seq
+ *
+ * From this point, we need to increase seq number by 10 to avoid
+ * confusing next recovery.
+ *
+ * ---------------------------------------------
+ * | valid log | invalid log |
+ * ---------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+1
+ * |- log->last_cp_seq |- ctx->seq+10001
+ *
+ * However, it is not safe to start the state machine yet, because data only
+ * parities are not yet secured in RAID. To save these data only parities, we
+ * rewrite them from seq+11.
+ *
+ * -----------------------------------------------------------------
+ * | valid log | data only stripes | invalid log |
+ * -----------------------------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+n
+ * |- log->last_cp_seq |- ctx->seq+10000+n
+ *
+ * If failure happens again during this process, the recovery can safe start
+ * again from log->last_checkpoint.
+ *
+ * Once data only stripes are rewritten to journal, we move log_tail
+ *
+ * -----------------------------------------------------------------
+ * | old log | data only stripes | invalid log |
+ * -----------------------------------------------------------------
+ * ^ ^
+ * |- log->last_checkpoint |- ctx->pos+n
+ * |- log->last_cp_seq |- ctx->seq+10000+n
+ *
+ * Then we can safely start the state machine. If failure happens from this
+ * point on, the recovery will start from new log->last_checkpoint.
+ */
+static int
+r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
{
+ struct stripe_head *sh, *next;
+ struct mddev *mddev = log->rdev->mddev;
struct page *page;
- struct r5l_meta_block *mb;
- u32 crc;
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
+ mdname(mddev));
return -ENOMEM;
- mb = page_address(page);
- mb->magic = cpu_to_le32(R5LOG_MAGIC);
- mb->version = R5LOG_VERSION;
- mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
- mb->seq = cpu_to_le64(seq);
- mb->position = cpu_to_le64(pos);
- crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
- mb->checksum = cpu_to_le32(crc);
+ }
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
- WRITE_FUA, false)) {
- __free_page(page);
- return -EIO;
+ list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ struct r5l_meta_block *mb;
+ int i;
+ int offset;
+ sector_t write_pos;
+
+ WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+ r5l_recovery_create_empty_meta_block(log, page,
+ ctx->pos, ctx->seq);
+ mb = page_address(page);
+ offset = le32_to_cpu(mb->meta_size);
+ write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+
+ for (i = sh->disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ struct r5l_payload_data_parity *payload;
+ void *addr;
+
+ if (test_bit(R5_InJournal, &dev->flags)) {
+ payload = (void *)mb + offset;
+ payload->header.type = cpu_to_le16(
+ R5LOG_PAYLOAD_DATA);
+ payload->size = BLOCK_SECTORS;
+ payload->location = cpu_to_le64(
+ raid5_compute_blocknr(sh, i, 0));
+ addr = kmap_atomic(dev->page);
+ payload->checksum[0] = cpu_to_le32(
+ crc32c_le(log->uuid_checksum, addr,
+ PAGE_SIZE));
+ kunmap_atomic(addr);
+ sync_page_io(log->rdev, write_pos, PAGE_SIZE,
+ dev->page, REQ_OP_WRITE, 0, false);
+ write_pos = r5l_ring_add(log, write_pos,
+ BLOCK_SECTORS);
+ offset += sizeof(__le32) +
+ sizeof(struct r5l_payload_data_parity);
+
+ }
+ }
+ mb->meta_size = cpu_to_le32(offset);
+ mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+ mb, PAGE_SIZE));
+ sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
+ REQ_OP_WRITE, REQ_FUA, false);
+ sh->log_start = ctx->pos;
+ ctx->pos = write_pos;
+ ctx->seq += 1;
+
+ list_del_init(&sh->lru);
+ raid5_release_stripe(sh);
}
__free_page(page);
return 0;
@@ -1063,43 +2135,60 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
static int r5l_recovery_log(struct r5l_log *log)
{
+ struct mddev *mddev = log->rdev->mddev;
struct r5l_recovery_ctx ctx;
+ int ret;
+ sector_t pos;
+ struct stripe_head *sh;
ctx.pos = log->last_checkpoint;
ctx.seq = log->last_cp_seq;
ctx.meta_page = alloc_page(GFP_KERNEL);
+ ctx.data_only_stripes = 0;
+ ctx.data_parity_stripes = 0;
+ INIT_LIST_HEAD(&ctx.cached_list);
+
if (!ctx.meta_page)
return -ENOMEM;
- r5l_recovery_flush_log(log, &ctx);
+ ret = r5c_recovery_flush_log(log, &ctx);
__free_page(ctx.meta_page);
- /*
- * we did a recovery. Now ctx.pos points to an invalid meta block. New
- * log will start here. but we can't let superblock point to last valid
- * meta block. The log might looks like:
- * | meta 1| meta 2| meta 3|
- * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
- * superblock points to meta 1, we write a new valid meta 2n. if crash
- * happens again, new recovery will start from meta 1. Since meta 2n is
- * valid now, recovery will think meta 3 is valid, which is wrong.
- * The solution is we create a new meta in meta2 with its seq == meta
- * 1's seq + 10 and let superblock points to meta2. The same recovery will
- * not think meta 3 is a valid meta, because its seq doesn't match
- */
- if (ctx.seq > log->last_cp_seq + 1) {
- int ret;
-
- ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
- if (ret)
- return ret;
- log->seq = ctx.seq + 11;
- log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- r5l_write_super(log, ctx.pos);
+ if (ret)
+ return ret;
+
+ pos = ctx.pos;
+ ctx.seq += 10000;
+
+ if (ctx.data_only_stripes == 0) {
+ log->next_checkpoint = ctx.pos;
+ r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+ ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
} else {
- log->log_start = ctx.pos;
- log->seq = ctx.seq;
+ sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
+ log->next_checkpoint = sh->log_start;
+ }
+
+ if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
+ pr_debug("md/raid:%s: starting from clean shutdown\n",
+ mdname(mddev));
+ else {
+ pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+ mdname(mddev), ctx.data_only_stripes,
+ ctx.data_parity_stripes);
+
+ if (ctx.data_only_stripes > 0)
+ if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+ pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+ mdname(mddev));
+ return -EIO;
+ }
}
+
+ log->log_start = ctx.pos;
+ log->seq = ctx.seq;
+ log->last_checkpoint = pos;
+ r5l_write_super(log, pos);
return 0;
}
@@ -1108,7 +2197,293 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp)
struct mddev *mddev = log->rdev->mddev;
log->rdev->journal_tail = cp;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+}
+
+static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ int ret;
+
+ if (!conf->log)
+ return 0;
+
+ switch (conf->log->r5c_journal_mode) {
+ case R5C_JOURNAL_MODE_WRITE_THROUGH:
+ ret = snprintf(
+ page, PAGE_SIZE, "[%s] %s\n",
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+ break;
+ case R5C_JOURNAL_MODE_WRITE_BACK:
+ ret = snprintf(
+ page, PAGE_SIZE, "%s [%s]\n",
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+ r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+ break;
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+static ssize_t r5c_journal_mode_store(struct mddev *mddev,
+ const char *page, size_t length)
+{
+ struct r5conf *conf = mddev->private;
+ struct r5l_log *log = conf->log;
+ int val = -1, i;
+ int len = length;
+
+ if (!log)
+ return -ENODEV;
+
+ if (len && page[len - 1] == '\n')
+ len -= 1;
+ for (i = 0; i < ARRAY_SIZE(r5c_journal_mode_str); i++)
+ if (strlen(r5c_journal_mode_str[i]) == len &&
+ strncmp(page, r5c_journal_mode_str[i], len) == 0) {
+ val = i;
+ break;
+ }
+ if (val < R5C_JOURNAL_MODE_WRITE_THROUGH ||
+ val > R5C_JOURNAL_MODE_WRITE_BACK)
+ return -EINVAL;
+
+ mddev_suspend(mddev);
+ conf->log->r5c_journal_mode = val;
+ mddev_resume(mddev);
+
+ pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
+ mdname(mddev), val, r5c_journal_mode_str[val]);
+ return length;
+}
+
+struct md_sysfs_entry
+r5c_journal_mode = __ATTR(journal_mode, 0644,
+ r5c_journal_mode_show, r5c_journal_mode_store);
+
+/*
+ * Try handle write operation in caching phase. This function should only
+ * be called in write-back mode.
+ *
+ * If all outstanding writes can be handled in caching phase, returns 0
+ * If writes requires write-out phase, call r5c_make_stripe_write_out()
+ * and returns -EAGAIN
+ */
+int r5c_try_caching_write(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
+{
+ struct r5l_log *log = conf->log;
+ int i;
+ struct r5dev *dev;
+ int to_cache = 0;
+
+ BUG_ON(!r5c_is_writeback(log));
+
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ /*
+ * There are two different scenarios here:
+ * 1. The stripe has some data cached, and it is sent to
+ * write-out phase for reclaim
+ * 2. The stripe is clean, and this is the first write
+ *
+ * For 1, return -EAGAIN, so we continue with
+ * handle_stripe_dirtying().
+ *
+ * For 2, set STRIPE_R5C_CACHING and continue with caching
+ * write.
+ */
+
+ /* case 1: anything injournal or anything in written */
+ if (s->injournal > 0 || s->written > 0)
+ return -EAGAIN;
+ /* case 2 */
+ set_bit(STRIPE_R5C_CACHING, &sh->state);
+ }
+
+ for (i = disks; i--; ) {
+ dev = &sh->dev[i];
+ /* if non-overwrite, use writing-out phase */
+ if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
+ !test_bit(R5_InJournal, &dev->flags)) {
+ r5c_make_stripe_write_out(sh);
+ return -EAGAIN;
+ }
+ }
+
+ for (i = disks; i--; ) {
+ dev = &sh->dev[i];
+ if (dev->towrite) {
+ set_bit(R5_Wantwrite, &dev->flags);
+ set_bit(R5_Wantdrain, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ to_cache++;
+ }
+ }
+
+ if (to_cache) {
+ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+ /*
+ * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
+ * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
+ * r5c_handle_data_cached()
+ */
+ set_bit(STRIPE_LOG_TRAPPED, &sh->state);
+ }
+
+ return 0;
+}
+
+/*
+ * free extra pages (orig_page) we allocated for prexor
+ */
+void r5c_release_extra_page(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int i;
+ bool using_disk_info_extra_page;
+
+ using_disk_info_extra_page =
+ sh->dev[0].orig_page == conf->disks[0].extra_page;
+
+ for (i = sh->disks; i--; )
+ if (sh->dev[i].page != sh->dev[i].orig_page) {
+ struct page *p = sh->dev[i].orig_page;
+
+ sh->dev[i].orig_page = sh->dev[i].page;
+ if (!using_disk_info_extra_page)
+ put_page(p);
+ }
+
+ if (using_disk_info_extra_page) {
+ clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
+ md_wakeup_thread(conf->mddev->thread);
+ }
+}
+
+void r5c_use_extra_page(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int i;
+ struct r5dev *dev;
+
+ for (i = sh->disks; i--; ) {
+ dev = &sh->dev[i];
+ if (dev->orig_page != dev->page)
+ put_page(dev->orig_page);
+ dev->orig_page = conf->disks[i].extra_page;
+ }
+}
+
+/*
+ * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
+ * stripe is committed to RAID disks.
+ */
+void r5c_finish_stripe_write_out(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s)
+{
+ int i;
+ int do_wakeup = 0;
+
+ if (!conf->log ||
+ !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
+ return;
+
+ WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+ clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+
+ if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+
+ for (i = sh->disks; i--; ) {
+ clear_bit(R5_InJournal, &sh->dev[i].flags);
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ do_wakeup = 1;
+ }
+
+ /*
+ * analyse_stripe() runs before r5c_finish_stripe_write_out(),
+ * We updated R5_InJournal, so we also update s->injournal.
+ */
+ s->injournal = 0;
+
+ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
+ if (atomic_dec_and_test(&conf->pending_full_writes))
+ md_wakeup_thread(conf->mddev->thread);
+
+ if (do_wakeup)
+ wake_up(&conf->wait_for_overlap);
+
+ if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+
+ spin_lock_irq(&conf->log->stripe_in_journal_lock);
+ list_del_init(&sh->r5c);
+ spin_unlock_irq(&conf->log->stripe_in_journal_lock);
+ sh->log_start = MaxSector;
+ atomic_dec(&conf->log->stripe_in_journal_count);
+ r5c_update_log_state(conf->log);
+}
+
+int
+r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+ struct stripe_head_state *s)
+{
+ struct r5conf *conf = sh->raid_conf;
+ int pages = 0;
+ int reserve;
+ int i;
+ int ret = 0;
+
+ BUG_ON(!log);
+
+ for (i = 0; i < sh->disks; i++) {
+ void *addr;
+
+ if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+ continue;
+ addr = kmap_atomic(sh->dev[i].page);
+ sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
+ addr, PAGE_SIZE);
+ kunmap_atomic(addr);
+ pages++;
+ }
+ WARN_ON(pages == 0);
+
+ /*
+ * The stripe must enter state machine again to call endio, so
+ * don't delay.
+ */
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ atomic_inc(&sh->count);
+
+ mutex_lock(&log->io_mutex);
+ /* meta + data */
+ reserve = (1 + pages) << (PAGE_SHIFT - 9);
+
+ if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+ sh->log_start == MaxSector)
+ r5l_add_no_space_stripe(log, sh);
+ else if (!r5l_has_free_space(log, reserve)) {
+ if (sh->log_start == log->last_checkpoint)
+ BUG();
+ else
+ r5l_add_no_space_stripe(log, sh);
+ } else {
+ ret = r5l_log_stripe(log, sh, pages, 0);
+ if (ret) {
+ spin_lock_irq(&log->io_list_lock);
+ list_add_tail(&sh->log_list, &log->no_mem_stripes);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
+
+ mutex_unlock(&log->io_mutex);
+ return 0;
}
static int r5l_load_log(struct r5l_log *log)
@@ -1119,7 +2494,7 @@ static int r5l_load_log(struct r5l_log *log)
sector_t cp = log->rdev->journal_tail;
u32 stored_crc, expected_crc;
bool create_super = false;
- int ret;
+ int ret = 0;
/* Make sure it's valid */
if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
@@ -1154,6 +2529,7 @@ create:
if (create_super) {
log->last_cp_seq = prandom_u32();
cp = 0;
+ r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
/*
* Make sure super points to correct address. Log might have
* data very soon. If super hasn't correct log tail address,
@@ -1171,7 +2547,15 @@ create:
__free_page(page);
- return r5l_recovery_log(log);
+ if (create_super) {
+ log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
+ log->seq = log->last_cp_seq + 1;
+ log->next_checkpoint = cp;
+ } else
+ ret = r5l_recovery_log(log);
+
+ r5c_update_log_state(log);
+ return ret;
ioerr:
__free_page(page);
return ret;
@@ -1184,6 +2568,22 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
if (PAGE_SIZE != 4096)
return -EINVAL;
+
+ /*
+ * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
+ * raid_disks r5l_payload_data_parity.
+ *
+ * Write journal and cache does not work for very big array
+ * (raid_disks > 203)
+ */
+ if (sizeof(struct r5l_meta_block) +
+ ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
+ conf->raid_disks) > PAGE_SIZE) {
+ pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
+ mdname(conf->mddev), conf->raid_disks);
+ return -EINVAL;
+ }
+
log = kzalloc(sizeof(*log), GFP_KERNEL);
if (!log)
return -ENOMEM;
@@ -1201,7 +2601,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->io_end_ios);
INIT_LIST_HEAD(&log->flushing_ios);
INIT_LIST_HEAD(&log->finished_ios);
- bio_init(&log->flush_bio);
+ bio_init(&log->flush_bio, NULL, 0);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc)
@@ -1223,6 +2623,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
log->rdev->mddev, "reclaim");
if (!log->reclaim_thread)
goto reclaim_thread;
+ log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
+
init_waitqueue_head(&log->iounit_wait);
INIT_LIST_HEAD(&log->no_mem_stripes);
@@ -1230,6 +2632,13 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->no_space_stripes);
spin_lock_init(&log->no_space_stripes_lock);
+ INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ INIT_LIST_HEAD(&log->stripe_in_journal_list);
+ spin_lock_init(&log->stripe_in_journal_lock);
+ atomic_set(&log->stripe_in_journal_count, 0);
+
if (r5l_load_log(log))
goto error;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 92ac251e91e6..06d7279bdd04 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -70,19 +70,6 @@ module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
"Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
static struct workqueue_struct *raid5_wq;
-/*
- * Stripe cache
- */
-
-#define NR_STRIPES 256
-#define STRIPE_SIZE PAGE_SIZE
-#define STRIPE_SHIFT (PAGE_SHIFT - 9)
-#define STRIPE_SECTORS (STRIPE_SIZE>>9)
-#define IO_THRESHOLD 1
-#define BYPASS_THRESHOLD 1
-#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
-#define HASH_MASK (NR_HASH - 1)
-#define MAX_STRIPE_BATCH 8
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
{
@@ -126,64 +113,6 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
local_irq_enable();
}
-/* bio's attached to a stripe+device for I/O are linked together in bi_sector
- * order without overlap. There may be several bio's per stripe+device, and
- * a bio could span several devices.
- * When walking this list for a particular stripe+device, we must never proceed
- * beyond a bio that extends past this device, as the next bio might no longer
- * be valid.
- * This function is used to determine the 'next' bio in the list, given the sector
- * of the current stripe+device
- */
-static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
-{
- int sectors = bio_sectors(bio);
- if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
- return bio->bi_next;
- else
- return NULL;
-}
-
-/*
- * We maintain a biased count of active stripes in the bottom 16 bits of
- * bi_phys_segments, and a count of processed stripes in the upper 16 bits
- */
-static inline int raid5_bi_processed_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- return (atomic_read(segments) >> 16) & 0xffff;
-}
-
-static inline int raid5_dec_bi_active_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- return atomic_sub_return(1, segments) & 0xffff;
-}
-
-static inline void raid5_inc_bi_active_stripes(struct bio *bio)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- atomic_inc(segments);
-}
-
-static inline void raid5_set_bi_processed_stripes(struct bio *bio,
- unsigned int cnt)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- int old, new;
-
- do {
- old = atomic_read(segments);
- new = (old & 0xffff) | (cnt << 16);
- } while (atomic_cmpxchg(segments, old, new) != old);
-}
-
-static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
-{
- atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
- atomic_set(segments, cnt);
-}
-
/* Find first data disk in a raid6 stripe */
static inline int raid6_d0(struct stripe_head *sh)
{
@@ -289,8 +218,27 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
struct list_head *temp_inactive_list)
{
+ int i;
+ int injournal = 0; /* number of date pages with R5_InJournal */
+
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
+
+ if (r5c_is_writeback(conf->log))
+ for (i = sh->disks; i--; )
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ injournal++;
+ /*
+ * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with
+ * data in journal, so they are not released to cached lists
+ */
+ if (conf->quiesce && r5c_is_writeback(conf->log) &&
+ !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) {
+ if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+ r5c_make_stripe_write_out(sh);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state) &&
!test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
@@ -316,8 +264,30 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
< IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
atomic_dec(&conf->active_stripes);
- if (!test_bit(STRIPE_EXPANDING, &sh->state))
- list_add_tail(&sh->lru, temp_inactive_list);
+ if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
+ if (!r5c_is_writeback(conf->log))
+ list_add_tail(&sh->lru, temp_inactive_list);
+ else {
+ WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags));
+ if (injournal == 0)
+ list_add_tail(&sh->lru, temp_inactive_list);
+ else if (injournal == conf->raid_disks - conf->max_degraded) {
+ /* full stripe */
+ if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state))
+ atomic_inc(&conf->r5c_cached_full_stripes);
+ if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
+ atomic_dec(&conf->r5c_cached_partial_stripes);
+ list_add_tail(&sh->lru, &conf->r5c_full_stripe_list);
+ r5c_check_cached_full_stripe(conf);
+ } else {
+ /* partial stripe */
+ if (!test_and_set_bit(STRIPE_R5C_PARTIAL_STRIPE,
+ &sh->state))
+ atomic_inc(&conf->r5c_cached_partial_stripes);
+ list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list);
+ }
+ }
+ }
}
}
@@ -541,7 +511,7 @@ retry:
if (dev->toread || dev->read || dev->towrite || dev->written ||
test_bit(R5_LOCKED, &dev->flags)) {
- printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
+ pr_err("sector=%llx i=%d %p %p %p %p %d\n",
(unsigned long long)sh->sector, i, dev->toread,
dev->read, dev->towrite, dev->written,
test_bit(R5_LOCKED, &dev->flags));
@@ -680,9 +650,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
}
if (noblock && sh == NULL)
break;
+
+ r5c_check_stripe_cache_usage(conf);
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
wait_event_lock_irq(
conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
@@ -901,8 +874,19 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
might_sleep();
- if (r5l_write_stripe(conf->log, sh) == 0)
- return;
+ if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+ /* writing out phase */
+ if (s->waiting_extra_page)
+ return;
+ if (r5l_write_stripe(conf->log, sh) == 0)
+ return;
+ } else { /* caching phase */
+ if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) {
+ r5c_cache_data(conf->log, sh, s);
+ return;
+ }
+ }
+
for (i = disks; i--; ) {
int op, op_flags = 0;
int replace_only = 0;
@@ -913,7 +897,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
op = REQ_OP_WRITE;
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
- op_flags = WRITE_FUA;
+ op_flags = REQ_FUA;
if (test_bit(R5_Discard, &sh->dev[i].flags))
op = REQ_OP_DISCARD;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
@@ -977,7 +961,7 @@ again:
if (bad < 0) {
set_bit(BlockedBadBlocks, &rdev->flags);
if (!conf->mddev->external &&
- conf->mddev->flags) {
+ conf->mddev->sb_flags) {
/* It is very unlikely, but we might
* still need to write out the
* bad block log - better give it
@@ -1115,7 +1099,7 @@ again:
static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page **page,
sector_t sector, struct dma_async_tx_descriptor *tx,
- struct stripe_head *sh)
+ struct stripe_head *sh, int no_skipcopy)
{
struct bio_vec bvl;
struct bvec_iter iter;
@@ -1155,7 +1139,8 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
if (frombio) {
if (sh->raid_conf->skip_copy &&
b_offset == 0 && page_offset == 0 &&
- clen == STRIPE_SIZE)
+ clen == STRIPE_SIZE &&
+ !no_skipcopy)
*page = bio_page;
else
tx = async_memcpy(*page, bio_page, page_offset,
@@ -1237,7 +1222,7 @@ static void ops_run_biofill(struct stripe_head *sh)
while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, &dev->page,
- dev->sector, tx, sh);
+ dev->sector, tx, sh, 0);
rbi = r5_next_bio(rbi, dev->sector);
}
}
@@ -1364,10 +1349,15 @@ static int set_syndrome_sources(struct page **srcs,
if (i == sh->qd_idx || i == sh->pd_idx ||
(srctype == SYNDROME_SRC_ALL) ||
(srctype == SYNDROME_SRC_WANT_DRAIN &&
- test_bit(R5_Wantdrain, &dev->flags)) ||
+ (test_bit(R5_Wantdrain, &dev->flags) ||
+ test_bit(R5_InJournal, &dev->flags))) ||
(srctype == SYNDROME_SRC_WRITTEN &&
- dev->written))
- srcs[slot] = sh->dev[i].page;
+ dev->written)) {
+ if (test_bit(R5_InJournal, &dev->flags))
+ srcs[slot] = sh->dev[i].orig_page;
+ else
+ srcs[slot] = sh->dev[i].page;
+ }
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
@@ -1546,6 +1536,13 @@ static void ops_complete_prexor(void *stripe_head_ref)
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
+
+ if (r5c_is_writeback(sh->raid_conf->log))
+ /*
+ * raid5-cache write back uses orig_page during prexor.
+ * After prexor, it is time to free orig_page
+ */
+ r5c_release_extra_page(sh);
}
static struct dma_async_tx_descriptor *
@@ -1567,7 +1564,9 @@ ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* Only process blocks that are known to be uptodate */
- if (test_bit(R5_Wantdrain, &dev->flags))
+ if (test_bit(R5_InJournal, &dev->flags))
+ xor_srcs[count++] = dev->orig_page;
+ else if (test_bit(R5_Wantdrain, &dev->flags))
xor_srcs[count++] = dev->page;
}
@@ -1601,6 +1600,7 @@ ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
+ struct r5conf *conf = sh->raid_conf;
int disks = sh->disks;
int i;
struct stripe_head *head_sh = sh;
@@ -1618,6 +1618,11 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
again:
dev = &sh->dev[i];
+ /*
+ * clear R5_InJournal, so when rewriting a page in
+ * journal, it is not skipped by r5l_log_stripe()
+ */
+ clear_bit(R5_InJournal, &dev->flags);
spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;
dev->towrite = NULL;
@@ -1637,8 +1642,10 @@ again:
set_bit(R5_Discard, &dev->flags);
else {
tx = async_copy_data(1, wbi, &dev->page,
- dev->sector, tx, sh);
- if (dev->page != dev->orig_page) {
+ dev->sector, tx, sh,
+ r5c_is_writeback(conf->log));
+ if (dev->page != dev->orig_page &&
+ !r5c_is_writeback(conf->log)) {
set_bit(R5_SkipCopy, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
clear_bit(R5_OVERWRITE, &dev->flags);
@@ -1746,7 +1753,8 @@ again:
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (head_sh->dev[i].written)
+ if (head_sh->dev[i].written ||
+ test_bit(R5_InJournal, &head_sh->dev[i].flags))
xor_srcs[count++] = dev->page;
}
} else {
@@ -2000,17 +2008,15 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
spin_lock_init(&sh->batch_lock);
INIT_LIST_HEAD(&sh->batch_list);
INIT_LIST_HEAD(&sh->lru);
+ INIT_LIST_HEAD(&sh->r5c);
+ INIT_LIST_HEAD(&sh->log_list);
atomic_set(&sh->count, 1);
+ sh->log_start = MaxSector;
for (i = 0; i < disks; i++) {
struct r5dev *dev = &sh->dev[i];
- bio_init(&dev->req);
- dev->req.bi_io_vec = &dev->vec;
- dev->req.bi_max_vecs = 1;
-
- bio_init(&dev->rreq);
- dev->rreq.bi_io_vec = &dev->rvec;
- dev->rreq.bi_max_vecs = 1;
+ bio_init(&dev->req, &dev->vec, 1);
+ bio_init(&dev->rreq, &dev->rvec, 1);
}
}
return sh;
@@ -2245,10 +2251,24 @@ static int resize_stripes(struct r5conf *conf, int newsize)
*/
ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
if (ndisks) {
- for (i=0; i<conf->raid_disks; i++)
+ for (i = 0; i < conf->pool_size; i++)
ndisks[i] = conf->disks[i];
- kfree(conf->disks);
- conf->disks = ndisks;
+
+ for (i = conf->pool_size; i < newsize; i++) {
+ ndisks[i].extra_page = alloc_page(GFP_NOIO);
+ if (!ndisks[i].extra_page)
+ err = -ENOMEM;
+ }
+
+ if (err) {
+ for (i = conf->pool_size; i < newsize; i++)
+ if (ndisks[i].extra_page)
+ put_page(ndisks[i].extra_page);
+ kfree(ndisks);
+ } else {
+ kfree(conf->disks);
+ conf->disks = ndisks;
+ }
} else
err = -ENOMEM;
@@ -2347,10 +2367,8 @@ static void raid5_end_read_request(struct bio * bi)
* replacement device. We just fail those on
* any error
*/
- printk_ratelimited(
- KERN_INFO
- "md/raid:%s: read error corrected"
- " (%lu sectors at %llu on %s)\n",
+ pr_info_ratelimited(
+ "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
(unsigned long long)s,
bdevname(rdev->bdev, b));
@@ -2370,36 +2388,29 @@ static void raid5_end_read_request(struct bio * bi)
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error on replacement device "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
else if (conf->mddev->degraded >= conf->max_degraded) {
set_bad = 1;
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error not correctable "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error not correctable (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
set_bad = 1;
- printk_ratelimited(
- KERN_WARNING
- "md/raid:%s: read error NOT corrected!! "
- "(sector %llu on %s).\n",
+ pr_warn_ratelimited(
+ "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)s,
bdn);
} else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
- printk(KERN_WARNING
- "md/raid:%s: Too many read errors, failing device %s.\n",
+ pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
mdname(conf->mddev), bdn);
else
retry = 1;
@@ -2531,15 +2542,14 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- printk(KERN_ALERT
- "md/raid:%s: Disk failure on %s, disabling device.\n"
- "md/raid:%s: Operation continuing on %d devices.\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- mdname(mddev),
- conf->raid_disks - mddev->degraded);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
+ "md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ mdname(mddev),
+ conf->raid_disks - mddev->degraded);
}
/*
@@ -2861,8 +2871,8 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
- printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
- mdname(conf->mddev));
+ pr_warn("md/raid:%s: compute_blocknr: map not correct\n",
+ mdname(conf->mddev));
return 0;
}
return r_sector;
@@ -2877,6 +2887,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int level = conf->level;
if (rcw) {
+ /*
+ * In some cases, handle_stripe_dirtying initially decided to
+ * run rmw and allocates extra page for prexor. However, rcw is
+ * cheaper later on. We need to free the extra page now,
+ * because we won't be able to do that in ops_complete_prexor().
+ */
+ r5c_release_extra_page(sh);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -2887,6 +2904,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
if (!expand)
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
+ } else if (test_bit(R5_InJournal, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ s->locked++;
}
}
/* if we are not expanding this is a proper write request, and
@@ -2926,6 +2946,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
set_bit(R5_LOCKED, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
+ } else if (test_bit(R5_InJournal, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ s->locked++;
}
}
if (!s->locked)
@@ -3569,10 +3592,10 @@ unhash:
break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
}
-static void handle_stripe_dirtying(struct r5conf *conf,
- struct stripe_head *sh,
- struct stripe_head_state *s,
- int disks)
+static int handle_stripe_dirtying(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
{
int rmw = 0, rcw = 0, i;
sector_t recovery_cp = conf->mddev->recovery_cp;
@@ -3597,9 +3620,12 @@ static void handle_stripe_dirtying(struct r5conf *conf,
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+ test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
+ !((test_bit(R5_UPTODATE, &dev->flags) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ dev->page != dev->orig_page)) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rmw++;
@@ -3611,13 +3637,15 @@ static void handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags))) {
+ test_bit(R5_InJournal, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rcw++;
else
rcw += 2*disks;
}
}
+
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -3629,10 +3657,44 @@ static void handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+ if (test_bit(R5_InJournal, &dev->flags) &&
+ dev->page == dev->orig_page &&
+ !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) {
+ /* alloc page for prexor */
+ struct page *p = alloc_page(GFP_NOIO);
+
+ if (p) {
+ dev->orig_page = p;
+ continue;
+ }
+
+ /*
+ * alloc_page() failed, try use
+ * disk_info->extra_page
+ */
+ if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE,
+ &conf->cache_state)) {
+ r5c_use_extra_page(sh);
+ break;
+ }
+
+ /* extra_page in use, add to delayed_list */
+ set_bit(STRIPE_DELAYED, &sh->state);
+ s->waiting_extra_page = 1;
+ return -EAGAIN;
+ }
+ }
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if ((dev->towrite ||
+ i == sh->pd_idx || i == sh->qd_idx ||
+ test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags)) &&
+ !((test_bit(R5_UPTODATE, &dev->flags) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ dev->page != dev->orig_page)) ||
+ test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (test_bit(STRIPE_PREREAD_ACTIVE,
&sh->state)) {
@@ -3658,6 +3720,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_InJournal, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
rcw++;
if (test_bit(R5_Insync, &dev->flags) &&
@@ -3697,8 +3760,9 @@ static void handle_stripe_dirtying(struct r5conf *conf,
*/
if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
(s->locked == 0 && (rcw == 0 || rmw == 0) &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)))
+ !test_bit(STRIPE_BIT_DELAY, &sh->state)))
schedule_reconstruction(sh, s, rcw == 0, 0);
+ return 0;
}
static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
@@ -3782,7 +3846,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
case check_state_compute_run:
break;
default:
- printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
+ pr_err("%s: unknown check_state: %d sector: %llu\n",
__func__, sh->check_state,
(unsigned long long) sh->sector);
BUG();
@@ -3946,9 +4010,9 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
case check_state_compute_run:
break;
default:
- printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
- __func__, sh->check_state,
- (unsigned long long) sh->sector);
+ pr_warn("%s: unknown check_state: %d sector: %llu\n",
+ __func__, sh->check_state,
+ (unsigned long long) sh->sector);
BUG();
}
}
@@ -4188,6 +4252,11 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
}
+
+ if (test_bit(R5_InJournal, &dev->flags))
+ s->injournal++;
+ if (test_bit(R5_InJournal, &dev->flags) && dev->written)
+ s->just_cached++;
}
if (test_bit(STRIPE_SYNCING, &sh->state)) {
/* If there is a failed device being replaced,
@@ -4416,7 +4485,8 @@ static void handle_stripe(struct stripe_head *sh)
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_LOCKED, &dev->flags) &&
(i == sh->pd_idx || i == sh->qd_idx ||
- dev->written)) {
+ dev->written || test_bit(R5_InJournal,
+ &dev->flags))) {
pr_debug("Writing block %d\n", i);
set_bit(R5_Wantwrite, &dev->flags);
if (prexor)
@@ -4456,6 +4526,10 @@ static void handle_stripe(struct stripe_head *sh)
test_bit(R5_Discard, &qdev->flags))))))
handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
+ if (s.just_cached)
+ r5c_handle_cached_data_endio(conf, sh, disks, &s.return_bi);
+ r5l_stripe_write_finished(sh);
+
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
@@ -4467,14 +4541,51 @@ static void handle_stripe(struct stripe_head *sh)
|| s.expanding)
handle_stripe_fill(sh, &s, disks);
- /* Now to consider new write requests and what else, if anything
- * should be read. We do not handle new writes when:
+ /*
+ * When the stripe finishes full journal write cycle (write to journal
+ * and raid disk), this is the clean up procedure so it is ready for
+ * next operation.
+ */
+ r5c_finish_stripe_write_out(conf, sh, &s);
+
+ /*
+ * Now to consider new write requests, cache write back and what else,
+ * if anything should be read. We do not handle new writes when:
* 1/ A 'write' operation (copy+xor) is already in flight.
* 2/ A 'check' operation is in flight, as it may clobber the parity
* block.
+ * 3/ A r5c cache log write is in flight.
*/
- if (s.to_write && !sh->reconstruct_state && !sh->check_state)
- handle_stripe_dirtying(conf, sh, &s, disks);
+
+ if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) {
+ if (!r5c_is_writeback(conf->log)) {
+ if (s.to_write)
+ handle_stripe_dirtying(conf, sh, &s, disks);
+ } else { /* write back cache */
+ int ret = 0;
+
+ /* First, try handle writes in caching phase */
+ if (s.to_write)
+ ret = r5c_try_caching_write(conf, sh, &s,
+ disks);
+ /*
+ * If caching phase failed: ret == -EAGAIN
+ * OR
+ * stripe under reclaim: !caching && injournal
+ *
+ * fall back to handle_stripe_dirtying()
+ */
+ if (ret == -EAGAIN ||
+ /* stripe under reclaim: !caching && injournal */
+ (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+ s.injournal > 0)) {
+ ret = handle_stripe_dirtying(conf, sh, &s,
+ disks);
+ if (ret == -EAGAIN)
+ goto finish;
+ }
+ }
+ }
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
@@ -4645,9 +4756,7 @@ finish:
}
if (!bio_list_empty(&s.return_bi)) {
- if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
- (s.failed <= conf->max_degraded ||
- conf->mddev->external == 0)) {
+ if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->return_bi, &s.return_bi);
spin_unlock_irq(&conf->device_lock);
@@ -4703,6 +4812,10 @@ static int raid5_congested(struct mddev *mddev, int bits)
if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return 1;
+
+ /* Also checks whether there is pressure on r5cache log space */
+ if (test_bit(R5C_LOG_TIGHT, &conf->cache_state))
+ return 1;
if (conf->quiesce)
return 1;
if (atomic_read(&conf->empty_inactive_list_nr))
@@ -5172,6 +5285,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
int remaining;
DEFINE_WAIT(w);
bool do_prepare;
+ bool do_flush = false;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = r5l_handle_flush_request(conf->log, bi);
@@ -5183,6 +5297,11 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
return;
}
/* ret == -EAGAIN, fallback */
+ /*
+ * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
+ * we need to flush journal device
+ */
+ do_flush = bi->bi_opf & REQ_PREFLUSH;
}
md_write_start(mddev, bi);
@@ -5193,6 +5312,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
* data on failed drives.
*/
if (rw == READ && mddev->degraded == 0 &&
+ !r5c_is_writeback(conf->log) &&
mddev->reshape_position == MaxSector) {
bi = chunk_aligned_read(mddev, bi);
if (!bi)
@@ -5321,6 +5441,12 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
do_prepare = true;
goto retry;
}
+ if (do_flush) {
+ set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
+ /* we only need flush for one stripe */
+ do_flush = false;
+ }
+
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
@@ -5486,9 +5612,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait, mddev->flags == 0 ||
+ wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
return 0;
@@ -5584,10 +5710,10 @@ finish:
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_DEVS, &mddev->flags)
+ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
|| test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto ret;
@@ -5862,10 +5988,10 @@ static void raid5d(struct md_thread *thread)
md_check_recovery(mddev);
if (!bio_list_empty(&conf->return_bi) &&
- !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
struct bio_list tmp = BIO_EMPTY_LIST;
spin_lock_irq(&conf->device_lock);
- if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
bio_list_merge(&tmp, &conf->return_bi);
bio_list_init(&conf->return_bi);
}
@@ -5912,7 +6038,7 @@ static void raid5d(struct md_thread *thread)
break;
handled += batch_size;
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
+ if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
@@ -6242,6 +6368,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
&raid5_rmw_level.attr,
+ &r5c_journal_mode.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -6368,6 +6495,8 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ int i;
+
if (conf->log)
r5l_exit_log(conf->log);
if (conf->shrinker.nr_deferred)
@@ -6376,6 +6505,9 @@ static void free_conf(struct r5conf *conf)
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
+ for (i = 0; i < conf->pool_size; i++)
+ if (conf->disks[i].extra_page)
+ put_page(conf->disks[i].extra_page);
kfree(conf->disks);
kfree(conf->stripe_hashtbl);
kfree(conf);
@@ -6387,8 +6519,8 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
if (alloc_scratch_buffer(conf, percpu)) {
- pr_err("%s: failed memory allocation for cpu%u\n",
- __func__, cpu);
+ pr_warn("%s: failed memory allocation for cpu%u\n",
+ __func__, cpu);
return -ENOMEM;
}
return 0;
@@ -6458,29 +6590,29 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (mddev->new_level != 5
&& mddev->new_level != 4
&& mddev->new_level != 6) {
- printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
- mdname(mddev), mddev->new_level);
+ pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n",
+ mdname(mddev), mddev->new_level);
return ERR_PTR(-EIO);
}
if ((mddev->new_level == 5
&& !algorithm_valid_raid5(mddev->new_layout)) ||
(mddev->new_level == 6
&& !algorithm_valid_raid6(mddev->new_layout))) {
- printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
- mdname(mddev), mddev->new_layout);
+ pr_warn("md/raid:%s: layout %d not supported\n",
+ mdname(mddev), mddev->new_layout);
return ERR_PTR(-EIO);
}
if (mddev->new_level == 6 && mddev->raid_disks < 4) {
- printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
- mdname(mddev), mddev->raid_disks);
+ pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n",
+ mdname(mddev), mddev->raid_disks);
return ERR_PTR(-EINVAL);
}
if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
- printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
- mdname(mddev), mddev->new_chunk_sectors << 9);
+ pr_warn("md/raid:%s: invalid chunk size %d\n",
+ mdname(mddev), mddev->new_chunk_sectors << 9);
return ERR_PTR(-EINVAL);
}
@@ -6522,9 +6654,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
GFP_KERNEL);
+
if (!conf->disks)
goto abort;
+ for (i = 0; i < max_disks; i++) {
+ conf->disks[i].extra_page = alloc_page(GFP_KERNEL);
+ if (!conf->disks[i].extra_page)
+ goto abort;
+ }
+
conf->mddev = mddev;
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
@@ -6545,6 +6684,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
INIT_LIST_HEAD(conf->temp_inactive_list + i);
+ atomic_set(&conf->r5c_cached_full_stripes, 0);
+ INIT_LIST_HEAD(&conf->r5c_full_stripe_list);
+ atomic_set(&conf->r5c_cached_partial_stripes, 0);
+ INIT_LIST_HEAD(&conf->r5c_partial_stripe_list);
+
conf->level = mddev->new_level;
conf->chunk_sectors = mddev->new_chunk_sectors;
if (raid5_alloc_percpu(conf) != 0)
@@ -6571,9 +6715,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "md/raid:%s: device %s operational as raid"
- " disk %d\n",
- mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
+ pr_info("md/raid:%s: device %s operational as raid disk %d\n",
+ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -6607,21 +6750,18 @@ static struct r5conf *setup_conf(struct mddev *mddev)
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
conf->min_nr_stripes = max(NR_STRIPES, stripes);
if (conf->min_nr_stripes != NR_STRIPES)
- printk(KERN_INFO
- "md/raid:%s: force stripe size %d for reshape\n",
+ pr_info("md/raid:%s: force stripe size %d for reshape\n",
mdname(mddev), conf->min_nr_stripes);
}
memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
if (grow_stripes(conf, conf->min_nr_stripes)) {
- printk(KERN_ERR
- "md/raid:%s: couldn't allocate %dkB for buffers\n",
- mdname(mddev), memory);
+ pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n",
+ mdname(mddev), memory);
goto abort;
} else
- printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
- mdname(mddev), memory);
+ pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
/*
* Losing a stripe head costs more than the time to refill it,
* it reduces the queue depth and so can hurt throughput.
@@ -6633,18 +6773,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
if (register_shrinker(&conf->shrinker)) {
- printk(KERN_ERR
- "md/raid:%s: couldn't register shrinker.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: couldn't register shrinker.\n",
+ mdname(mddev));
goto abort;
}
sprintf(pers_name, "raid%d", mddev->new_level);
conf->thread = md_register_thread(raid5d, mddev, pers_name);
if (!conf->thread) {
- printk(KERN_ERR
- "md/raid:%s: couldn't allocate thread.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: couldn't allocate thread.\n",
+ mdname(mddev));
goto abort;
}
@@ -6697,9 +6835,8 @@ static int raid5_run(struct mddev *mddev)
int first = 1;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
+ pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
rdev_for_each(rdev, mddev) {
long long diff;
@@ -6742,15 +6879,14 @@ static int raid5_run(struct mddev *mddev)
int new_data_disks;
if (journal_dev) {
- printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
if (mddev->new_level != mddev->level) {
- printk(KERN_ERR "md/raid:%s: unsupported reshape "
- "required - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
old_disks = mddev->raid_disks - mddev->delta_disks;
@@ -6765,8 +6901,8 @@ static int raid5_run(struct mddev *mddev)
chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
new_data_disks = mddev->raid_disks - max_degraded;
if (sector_div(here_new, chunk_sectors * new_data_disks)) {
- printk(KERN_ERR "md/raid:%s: reshape_position not "
- "on a stripe boundary\n", mdname(mddev));
+ pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
+ mdname(mddev));
return -EINVAL;
}
reshape_offset = here_new * chunk_sectors;
@@ -6787,10 +6923,8 @@ static int raid5_run(struct mddev *mddev)
abs(min_offset_diff) >= mddev->new_chunk_sectors)
/* not really in-place - so OK */;
else if (mddev->ro == 0) {
- printk(KERN_ERR "md/raid:%s: in-place reshape "
- "must be started in read-only mode "
- "- aborting\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
+ mdname(mddev));
return -EINVAL;
}
} else if (mddev->reshape_backwards
@@ -6799,13 +6933,11 @@ static int raid5_run(struct mddev *mddev)
: (here_new * chunk_sectors >=
here_old * chunk_sectors + (-min_offset_diff))) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "md/raid:%s: reshape_position too early for "
- "auto-recovery - aborting.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
- printk(KERN_INFO "md/raid:%s: reshape will continue\n",
- mdname(mddev));
+ pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
/* OK, we should be able to continue; */
} else {
BUG_ON(mddev->level != mddev->new_level);
@@ -6824,8 +6956,8 @@ static int raid5_run(struct mddev *mddev)
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
if (!journal_dev) {
- pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: journal disk is missing, force array readonly\n",
+ mdname(mddev));
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
} else if (mddev->recovery_cp == MaxSector)
@@ -6852,8 +6984,7 @@ static int raid5_run(struct mddev *mddev)
if (conf->disks[i].replacement &&
conf->reshape_progress != MaxSector) {
/* replacements and reshape simply do not mix. */
- printk(KERN_ERR "md: cannot handle concurrent "
- "replacement and reshape.\n");
+ pr_warn("md: cannot handle concurrent replacement and reshape.\n");
goto abort;
}
if (test_bit(In_sync, &rdev->flags)) {
@@ -6895,8 +7026,7 @@ static int raid5_run(struct mddev *mddev)
mddev->degraded = calc_degraded(conf);
if (has_failed(conf)) {
- printk(KERN_ERR "md/raid:%s: not enough operational devices"
- " (%d/%d failed)\n",
+ pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
goto abort;
}
@@ -6908,29 +7038,19 @@ static int raid5_run(struct mddev *mddev)
if (mddev->degraded > dirty_parity_disks &&
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
- printk(KERN_WARNING
- "md/raid:%s: starting dirty degraded array"
- " - data corruption possible.\n",
- mdname(mddev));
+ pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n",
+ mdname(mddev));
else {
- printk(KERN_ERR
- "md/raid:%s: cannot start dirty degraded array.\n",
- mdname(mddev));
+ pr_crit("md/raid:%s: cannot start dirty degraded array.\n",
+ mdname(mddev));
goto abort;
}
}
- if (mddev->degraded == 0)
- printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
- " devices, algorithm %d\n", mdname(mddev), conf->level,
- mddev->raid_disks-mddev->degraded, mddev->raid_disks,
- mddev->new_layout);
- else
- printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
- " out of %d devices, algorithm %d\n",
- mdname(mddev), conf->level,
- mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, mddev->new_layout);
+ pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n",
+ mdname(mddev), conf->level,
+ mddev->raid_disks-mddev->degraded, mddev->raid_disks,
+ mddev->new_layout);
print_raid5_conf(conf);
@@ -6950,9 +7070,8 @@ static int raid5_run(struct mddev *mddev)
mddev->to_remove = NULL;
else if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
- printk(KERN_WARNING
- "raid5: failed to create sysfs attributes for %s\n",
- mdname(mddev));
+ pr_warn("raid5: failed to create sysfs attributes for %s\n",
+ mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
if (mddev->queue) {
@@ -6984,6 +7103,15 @@ static int raid5_run(struct mddev *mddev)
stripe = (stripe | (stripe-1)) + 1;
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
+
+ /*
+ * We use 16-bit counter of active stripes in bi_phys_segments
+ * (minus one for over-loaded initialization)
+ */
+ blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
+ blk_queue_max_discard_sectors(mddev->queue,
+ 0xfffe * STRIPE_SECTORS);
+
/*
* unaligned part of discard request will be ignored, so can't
* guarantee discard_zeroes_data
@@ -7040,9 +7168,10 @@ static int raid5_run(struct mddev *mddev)
if (journal_dev) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
- mdname(mddev), bdevname(journal_dev->bdev, b));
- r5l_init_log(conf, journal_dev);
+ pr_debug("md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(journal_dev->bdev, b));
+ if (r5l_init_log(conf, journal_dev))
+ goto abort;
}
return 0;
@@ -7051,7 +7180,7 @@ abort:
print_raid5_conf(conf);
free_conf(conf);
mddev->private = NULL;
- printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
+ pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
return -EIO;
}
@@ -7085,12 +7214,12 @@ static void print_raid5_conf (struct r5conf *conf)
int i;
struct disk_info *tmp;
- printk(KERN_DEBUG "RAID conf printout:\n");
+ pr_debug("RAID conf printout:\n");
if (!conf) {
- printk("(conf==NULL)\n");
+ pr_debug("(conf==NULL)\n");
return;
}
- printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
+ pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level,
conf->raid_disks,
conf->raid_disks - conf->mddev->degraded);
@@ -7098,7 +7227,7 @@ static void print_raid5_conf (struct r5conf *conf)
char b[BDEVNAME_SIZE];
tmp = conf->disks + i;
if (tmp->rdev)
- printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
+ pr_debug(" disk %d, o:%d, dev:%s\n",
i, !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev, b));
}
@@ -7246,8 +7375,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* write requests running. We should be safe
*/
r5l_init_log(conf, rdev);
- printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
- mdname(mddev), bdevname(rdev->bdev, b));
+ pr_debug("md/raid:%s: using device %s as journal\n",
+ mdname(mddev), bdevname(rdev->bdev, b));
return 0;
}
if (mddev->recovery_disabled == conf->recovery_disabled)
@@ -7351,10 +7480,10 @@ static int check_stripe_cache(struct mddev *mddev)
> conf->min_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->min_nr_stripes) {
- printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
- mdname(mddev),
- ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
- / STRIPE_SIZE)*4);
+ pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n",
+ mdname(mddev),
+ ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
+ / STRIPE_SIZE)*4);
return 0;
}
return 1;
@@ -7435,8 +7564,8 @@ static int raid5_start_reshape(struct mddev *mddev)
*/
if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
< mddev->array_sectors) {
- printk(KERN_ERR "md/raid:%s: array size must be reduced "
- "before number of disks\n", mdname(mddev));
+ pr_warn("md/raid:%s: array size must be reduced before number of disks\n",
+ mdname(mddev));
return -EINVAL;
}
@@ -7506,7 +7635,7 @@ static int raid5_start_reshape(struct mddev *mddev)
}
mddev->raid_disks = conf->raid_disks;
mddev->reshape_position = conf->reshape_progress;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -7624,6 +7753,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
*/
+ r5c_flush_cache(conf, INT_MAX);
conf->quiesce = 2;
wait_event_cmd(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0 &&
@@ -7654,8 +7784,8 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
/* for raid0 takeover only one zone is supported */
if (raid0_conf->nr_strip_zones > 1) {
- printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
- mdname(mddev));
+ pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n",
+ mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -7676,6 +7806,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
static void *raid5_takeover_raid1(struct mddev *mddev)
{
int chunksect;
+ void *ret;
if (mddev->raid_disks != 2 ||
mddev->degraded > 1)
@@ -7697,7 +7828,10 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
mddev->new_chunk_sectors = chunksect;
- return setup_conf(mddev);
+ ret = setup_conf(mddev);
+ if (!IS_ERR_VALUE(ret))
+ clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ return ret;
}
static void *raid5_takeover_raid6(struct mddev *mddev)
@@ -7767,7 +7901,7 @@ static int raid5_check_reshape(struct mddev *mddev)
conf->chunk_sectors = new_chunk ;
mddev->chunk_sectors = new_chunk;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
return check_reshape(mddev);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 57ec49f0839e..ed8e1362ab36 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -226,6 +226,8 @@ struct stripe_head {
struct r5l_io_unit *log_io;
struct list_head log_list;
+ sector_t log_start; /* first meta block on the journal */
+ struct list_head r5c; /* for r5c_cache->stripe_in_journal */
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -264,6 +266,7 @@ struct stripe_head_state {
int syncing, expanding, expanded, replacing;
int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite;
+ int injournal, just_cached;
int failed_num[2];
int p_failed, q_failed;
int dec_preread_active;
@@ -273,6 +276,7 @@ struct stripe_head_state {
struct md_rdev *blocked_rdev;
int handle_bad_blocks;
int log_failed;
+ int waiting_extra_page;
};
/* Flags for struct r5dev.flags */
@@ -313,6 +317,11 @@ enum r5dev_flags {
*/
R5_Discard, /* Discard the stripe */
R5_SkipCopy, /* Don't copy data from bio to stripe cache */
+ R5_InJournal, /* data being written is in the journal device.
+ * if R5_InJournal is set for parity pd_idx, all the
+ * data and parity being written are in the journal
+ * device
+ */
};
/*
@@ -345,7 +354,30 @@ enum {
STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
* to batch yet.
*/
- STRIPE_LOG_TRAPPED, /* trapped into log */
+ STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
+ * this bit is used in two scenarios:
+ *
+ * 1. write-out phase
+ * set in first entry of r5l_write_stripe
+ * clear in second entry of r5l_write_stripe
+ * used to bypass logic in handle_stripe
+ *
+ * 2. caching phase
+ * set in r5c_try_caching_write()
+ * clear when journal write is done
+ * used to initiate r5c_cache_data()
+ * also used to bypass logic in handle_stripe
+ */
+ STRIPE_R5C_CACHING, /* the stripe is in caching phase
+ * see more detail in the raid5-cache.c
+ */
+ STRIPE_R5C_PARTIAL_STRIPE, /* in r5c cache (to-be/being handled or
+ * in conf->r5c_partial_stripe_list)
+ */
+ STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or
+ * in conf->r5c_full_stripe_list)
+ */
+ STRIPE_R5C_PREFLUSH, /* need to flush journal device */
};
#define STRIPE_EXPAND_SYNC_FLAGS \
@@ -408,8 +440,86 @@ enum {
struct disk_info {
struct md_rdev *rdev, *replacement;
+ struct page *extra_page; /* extra page to use in prexor */
};
+/*
+ * Stripe cache
+ */
+
+#define NR_STRIPES 256
+#define STRIPE_SIZE PAGE_SIZE
+#define STRIPE_SHIFT (PAGE_SHIFT - 9)
+#define STRIPE_SECTORS (STRIPE_SIZE>>9)
+#define IO_THRESHOLD 1
+#define BYPASS_THRESHOLD 1
+#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
+#define HASH_MASK (NR_HASH - 1)
+#define MAX_STRIPE_BATCH 8
+
+/* bio's attached to a stripe+device for I/O are linked together in bi_sector
+ * order without overlap. There may be several bio's per stripe+device, and
+ * a bio could span several devices.
+ * When walking this list for a particular stripe+device, we must never proceed
+ * beyond a bio that extends past this device, as the next bio might no longer
+ * be valid.
+ * This function is used to determine the 'next' bio in the list, given the
+ * sector of the current stripe+device
+ */
+static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
+{
+ int sectors = bio_sectors(bio);
+
+ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
+ return bio->bi_next;
+ else
+ return NULL;
+}
+
+/*
+ * We maintain a biased count of active stripes in the bottom 16 bits of
+ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
+ */
+static inline int raid5_bi_processed_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ return (atomic_read(segments) >> 16) & 0xffff;
+}
+
+static inline int raid5_dec_bi_active_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ return atomic_sub_return(1, segments) & 0xffff;
+}
+
+static inline void raid5_inc_bi_active_stripes(struct bio *bio)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ atomic_inc(segments);
+}
+
+static inline void raid5_set_bi_processed_stripes(struct bio *bio,
+ unsigned int cnt)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+ int old, new;
+
+ do {
+ old = atomic_read(segments);
+ new = (old & 0xffff) | (cnt << 16);
+ } while (atomic_cmpxchg(segments, old, new) != old);
+}
+
+static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
+{
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+ atomic_set(segments, cnt);
+}
+
/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
* This is because we sometimes take all the spinlocks
* and creating that much locking depth can cause
@@ -432,6 +542,30 @@ struct r5worker_group {
int stripes_cnt;
};
+enum r5_cache_state {
+ R5_INACTIVE_BLOCKED, /* release of inactive stripes blocked,
+ * waiting for 25% to be free
+ */
+ R5_ALLOC_MORE, /* It might help to allocate another
+ * stripe.
+ */
+ R5_DID_ALLOC, /* A stripe was allocated, don't allocate
+ * more until at least one has been
+ * released. This avoids flooding
+ * the cache.
+ */
+ R5C_LOG_TIGHT, /* log device space tight, need to
+ * prioritize stripes at last_checkpoint
+ */
+ R5C_LOG_CRITICAL, /* log device is running out of space,
+ * only process stripes that are already
+ * occupying the log
+ */
+ R5C_EXTRA_PAGE_IN_USE, /* a stripe is using disk_info.extra_page
+ * for prexor
+ */
+};
+
struct r5conf {
struct hlist_head *stripe_hashtbl;
/* only protect corresponding hash list and inactive_list */
@@ -519,23 +653,18 @@ struct r5conf {
*/
atomic_t active_stripes;
struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
+
+ atomic_t r5c_cached_full_stripes;
+ struct list_head r5c_full_stripe_list;
+ atomic_t r5c_cached_partial_stripes;
+ struct list_head r5c_partial_stripe_list;
+
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
-#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
- * waiting for 25% to be free
- */
-#define R5_ALLOC_MORE 2 /* It might help to allocate another
- * stripe.
- */
-#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
- * more until at least one has been
- * released. This avoids flooding
- * the cache.
- */
struct shrinker shrinker;
int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock;
@@ -633,4 +762,23 @@ extern void r5l_stripe_write_finished(struct stripe_head *sh);
extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
extern void r5l_quiesce(struct r5l_log *log, int state);
extern bool r5l_log_disk_error(struct r5conf *conf);
+extern bool r5c_is_writeback(struct r5l_log *log);
+extern int
+r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s, int disks);
+extern void
+r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s);
+extern void r5c_release_extra_page(struct stripe_head *sh);
+extern void r5c_use_extra_page(struct stripe_head *sh);
+extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+extern void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks, struct bio_list *return_bi);
+extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+ struct stripe_head_state *s);
+extern void r5c_make_stripe_write_out(struct stripe_head *sh);
+extern void r5c_flush_cache(struct r5conf *conf, int num);
+extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
+extern void r5c_check_cached_full_stripe(struct r5conf *conf);
+extern struct md_sysfs_entry r5c_journal_mode;
#endif
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 9914f69a4a02..0da622f5fe69 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -1198,7 +1198,6 @@ static const struct net_device_ops dvb_netdev_ops = {
.ndo_start_xmit = dvb_net_tx,
.ndo_set_rx_mode = dvb_net_set_multicast_list,
.ndo_set_mac_address = dvb_net_set_mac,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1209,6 +1208,7 @@ static void dvb_net_setup(struct net_device *dev)
dev->header_ops = &dvb_header_ops;
dev->netdev_ops = &dvb_netdev_ops;
dev->mtu = 4096;
+ dev->max_mtu = 4096;
dev->flags |= IFF_NOARP;
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 012225587c25..b71b747ee0ba 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -513,6 +513,11 @@ config DVB_AS102_FE
depends on DVB_CORE
default DVB_AS102
+config DVB_GP8PSK_FE
+ tristate
+ depends on DVB_CORE
+ default DVB_USB_GP8PSK
+
comment "DVB-C (cable) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index e90165ad361b..93921a4eaa27 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -121,6 +121,7 @@ obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o
obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
obj-$(CONFIG_DVB_AF9033) += af9033.o
obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
+obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o
obj-$(CONFIG_DVB_TC90522) += tc90522.o
obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
diff --git a/drivers/media/usb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index db6eb79cde07..93f59bfea092 100644
--- a/drivers/media/usb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -1,5 +1,5 @@
-/* DVB USB compliant Linux driver for the
- * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
+/*
+ * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
*
* Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com)
* Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com)
@@ -8,17 +8,31 @@
*
* This module is based off the vp7045 and vp702x modules
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
- * see Documentation/dvb/README.dvb-usb for more information
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
*/
-#include "gp8psk.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "gp8psk-fe.h"
+#include "dvb_frontend.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct gp8psk_fe_state {
struct dvb_frontend fe;
- struct dvb_usb_device *d;
+ void *priv;
+ const struct gp8psk_fe_ops *ops;
+ bool is_rev1;
u8 lock;
u16 snr;
unsigned long next_status_check;
@@ -29,22 +43,24 @@ static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe)
{
struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 status;
- gp8psk_usb_in_op(st->d, GET_8PSK_CONFIG, 0, 0, &status, 1);
+
+ st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1);
return status & bmDCtuned;
}
static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
- return gp8psk_usb_out_op(state->d, SET_8PSK_CONFIG, mode, 0, NULL, 0);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0);
}
static int gp8psk_fe_update_status(struct gp8psk_fe_state *st)
{
u8 buf[6];
if (time_after(jiffies,st->next_status_check)) {
- gp8psk_usb_in_op(st->d, GET_SIGNAL_LOCK, 0,0,&st->lock,1);
- gp8psk_usb_in_op(st->d, GET_SIGNAL_STRENGTH, 0,0,buf,6);
+ st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1);
+ st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6);
st->snr = (buf[1]) << 8 | buf[0];
st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000;
}
@@ -116,13 +132,12 @@ static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front
static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 cmd[10];
u32 freq = c->frequency * 1000;
- int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct);
- deb_fe("%s()\n", __func__);
+ dprintk("%s()\n", __func__);
cmd[4] = freq & 0xff;
cmd[5] = (freq >> 8) & 0xff;
@@ -136,21 +151,21 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_DVBS:
if (c->modulation != QPSK) {
- deb_fe("%s: unsupported modulation selected (%d)\n",
+ dprintk("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
}
c->fec_inner = FEC_AUTO;
break;
case SYS_DVBS2: /* kept for backwards compatibility */
- deb_fe("%s: DVB-S2 delivery system selected\n", __func__);
+ dprintk("%s: DVB-S2 delivery system selected\n", __func__);
break;
case SYS_TURBO:
- deb_fe("%s: Turbo-FEC delivery system selected\n", __func__);
+ dprintk("%s: Turbo-FEC delivery system selected\n", __func__);
break;
default:
- deb_fe("%s: unsupported delivery system selected (%d)\n",
+ dprintk("%s: unsupported delivery system selected (%d)\n",
__func__, c->delivery_system);
return -EOPNOTSUPP;
}
@@ -161,9 +176,9 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
cmd[3] = (c->symbol_rate >> 24) & 0xff;
switch (c->modulation) {
case QPSK:
- if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
+ if (st->is_rev1)
if (gp8psk_tuned_to_DCII(fe))
- gp8psk_bcm4500_reload(state->d);
+ st->ops->reload(st->priv);
switch (c->fec_inner) {
case FEC_1_2:
cmd[9] = 0; break;
@@ -207,18 +222,18 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
cmd[9] = 0;
break;
default: /* Unknown modulation */
- deb_fe("%s: unsupported modulation selected (%d)\n",
+ dprintk("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
}
- if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
+ if (st->is_rev1)
gp8psk_set_tuner_mode(fe, 0);
- gp8psk_usb_out_op(state->d, TUNE_8PSK, 0, 0, cmd, 10);
+ st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10);
- state->lock = 0;
- state->next_status_check = jiffies;
- state->status_check_interval = 200;
+ st->lock = 0;
+ st->next_status_check = jiffies;
+ st->status_check_interval = 200;
return 0;
}
@@ -228,9 +243,9 @@ static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe,
{
struct gp8psk_fe_state *st = fe->demodulator_priv;
- deb_fe("%s\n",__func__);
+ dprintk("%s\n", __func__);
- if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, m->msg[0], 0,
+ if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0,
m->msg, m->msg_len)) {
return -EINVAL;
}
@@ -243,12 +258,12 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 cmd;
- deb_fe("%s\n",__func__);
+ dprintk("%s\n", __func__);
/* These commands are certainly wrong */
cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01;
- if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, cmd, 0,
+ if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0,
&cmd, 0)) {
return -EINVAL;
}
@@ -258,10 +273,10 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
- if (gp8psk_usb_out_op(state->d,SET_22KHZ_TONE,
- (tone == SEC_TONE_ON), 0, NULL, 0)) {
+ if (st->ops->out(st->priv, SET_22KHZ_TONE,
+ (tone == SEC_TONE_ON), 0, NULL, 0)) {
return -EINVAL;
}
return 0;
@@ -270,9 +285,9 @@ static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
- if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE,
+ if (st->ops->out(st->priv, SET_LNB_VOLTAGE,
voltage == SEC_VOLTAGE_18, 0, NULL, 0)) {
return -EINVAL;
}
@@ -281,52 +296,60 @@ static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
- return gp8psk_usb_out_op(state->d, USE_EXTRA_VOLT, onoff, 0,NULL,0);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0);
}
static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 cmd = sw_cmd & 0x7f;
- if (gp8psk_usb_out_op(state->d,SET_DN_SWITCH, cmd, 0,
- NULL, 0)) {
+ if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0))
return -EINVAL;
- }
- if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, !!(sw_cmd & 0x80),
- 0, NULL, 0)) {
+
+ if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80),
+ 0, NULL, 0))
return -EINVAL;
- }
return 0;
}
static void gp8psk_fe_release(struct dvb_frontend* fe)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
- kfree(state);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ kfree(st);
}
static struct dvb_frontend_ops gp8psk_fe_ops;
-struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d)
+struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
+ void *priv, bool is_rev1)
{
- struct gp8psk_fe_state *s = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL);
- if (s == NULL)
- goto error;
-
- s->d = d;
- memcpy(&s->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
- s->fe.demodulator_priv = s;
-
- goto success;
-error:
- return NULL;
-success:
- return &s->fe;
-}
+ struct gp8psk_fe_state *st;
+ if (!ops || !ops->in || !ops->out || !ops->reload) {
+ pr_err("Error! gp8psk-fe ops not defined.\n");
+ return NULL;
+ }
+
+ st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
+ st->fe.demodulator_priv = st;
+ st->ops = ops;
+ st->priv = priv;
+ st->is_rev1 = is_rev1;
+
+ pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : "");
+
+ return &st->fe;
+}
+EXPORT_SYMBOL_GPL(gp8psk_fe_attach);
static struct dvb_frontend_ops gp8psk_fe_ops = {
.delsys = { SYS_DVBS },
@@ -370,3 +393,8 @@ static struct dvb_frontend_ops gp8psk_fe_ops = {
.dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd,
.enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage
};
+
+MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
+MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S");
+MODULE_VERSION("1.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.h b/drivers/media/dvb-frontends/gp8psk-fe.h
new file mode 100644
index 000000000000..6c7944b1ecd6
--- /dev/null
+++ b/drivers/media/dvb-frontends/gp8psk-fe.h
@@ -0,0 +1,82 @@
+/*
+ * gp8psk_fe driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef GP8PSK_FE_H
+#define GP8PSK_FE_H
+
+#include <linux/types.h>
+
+/* gp8psk commands */
+
+#define GET_8PSK_CONFIG 0x80 /* in */
+#define SET_8PSK_CONFIG 0x81
+#define I2C_WRITE 0x83
+#define I2C_READ 0x84
+#define ARM_TRANSFER 0x85
+#define TUNE_8PSK 0x86
+#define GET_SIGNAL_STRENGTH 0x87 /* in */
+#define LOAD_BCM4500 0x88
+#define BOOT_8PSK 0x89 /* in */
+#define START_INTERSIL 0x8A /* in */
+#define SET_LNB_VOLTAGE 0x8B
+#define SET_22KHZ_TONE 0x8C
+#define SEND_DISEQC_COMMAND 0x8D
+#define SET_DVB_MODE 0x8E
+#define SET_DN_SWITCH 0x8F
+#define GET_SIGNAL_LOCK 0x90 /* in */
+#define GET_FW_VERS 0x92
+#define GET_SERIAL_NUMBER 0x93 /* in */
+#define USE_EXTRA_VOLT 0x94
+#define GET_FPGA_VERS 0x95
+#define CW3K_INIT 0x9d
+
+/* PSK_configuration bits */
+#define bm8pskStarted 0x01
+#define bm8pskFW_Loaded 0x02
+#define bmIntersilOn 0x04
+#define bmDVBmode 0x08
+#define bm22kHz 0x10
+#define bmSEL18V 0x20
+#define bmDCtuned 0x40
+#define bmArmed 0x80
+
+/* Satellite modulation modes */
+#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
+#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
+#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
+#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
+
+#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
+#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
+#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
+#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
+#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
+#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
+
+/* firmware revision id's */
+#define GP8PSK_FW_REV1 0x020604
+#define GP8PSK_FW_REV2 0x020704
+#define GP8PSK_FW_VERS(_fw_vers) \
+ ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
+
+struct gp8psk_fe_ops {
+ int (*in)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
+ int (*out)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
+ int (*reload)(void *priv);
+};
+
+struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
+ void *priv, bool is_rev1);
+
+#endif
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index f95a6bc839d5..cede3975d04b 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -118,7 +118,7 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
*protocol = RC_TYPE_RC6_MCE;
dev &= 0x7f;
dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n",
- toggle, vendor, dev, code);
+ *ptoggle, vendor, dev, code);
} else {
*ptoggle = 0;
*protocol = RC_TYPE_RC6_6A_32;
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 317ef63ee789..8d96a22647b3 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv)
int i;
tuner_dbg("%s called\n", __func__);
+ /* free allocated f/w string */
+ if (priv->fname != firmware_name)
+ kfree(priv->fname);
+ priv->fname = NULL;
+
+ priv->state = XC2028_NO_FIRMWARE;
+ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+
if (!priv->firm)
return;
@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv)
priv->firm = NULL;
priv->firm_size = 0;
- priv->state = XC2028_NO_FIRMWARE;
-
- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
}
static int load_all_firmwares(struct dvb_frontend *fe,
@@ -884,9 +889,8 @@ read_not_reliable:
return 0;
fail:
- priv->state = XC2028_NO_FIRMWARE;
+ free_firmware(priv);
- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (retry_count < 8) {
msleep(50);
retry_count++;
@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
mutex_lock(&xc2028_list_mutex);
/* only perform final cleanup if this is the last instance */
- if (hybrid_tuner_report_instance_count(priv) == 1) {
+ if (hybrid_tuner_report_instance_count(priv) == 1)
free_firmware(priv);
- kfree(priv->ctrl.fname);
- priv->ctrl.fname = NULL;
- }
if (priv)
hybrid_tuner_release_state(priv);
@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
/*
* Copy the config data.
- * For the firmware name, keep a local copy of the string,
- * in order to avoid troubles during device release.
*/
- kfree(priv->ctrl.fname);
- priv->ctrl.fname = NULL;
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
- if (p->fname) {
- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
- if (priv->ctrl.fname == NULL) {
- rc = -ENOMEM;
- goto unlock;
- }
- }
/*
* If firmware name changed, frees firmware. As free_firmware will
@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
if (priv->state == XC2028_NO_FIRMWARE) {
if (!firmware_name[0])
- priv->fname = priv->ctrl.fname;
+ priv->fname = kstrdup(p->fname, GFP_KERNEL);
else
priv->fname = firmware_name;
+ if (!priv->fname) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
rc = request_firmware_nowait(THIS_MODULE, 1,
priv->fname,
priv->i2c_props.adap->dev.parent,
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index d4bdba60b0f7..52bc42da8a4c 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -73,23 +73,34 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI,
u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR;
u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) |
(read ? 0x80 : 0);
+ int ret;
+
+ mutex_lock(&fc_usb->data_mutex);
+ if (!read)
+ memcpy(fc_usb->data, val, sizeof(*val));
- int len = usb_control_msg(fc_usb->udev,
+ ret = usb_control_msg(fc_usb->udev,
read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT,
request,
request_type, /* 0xc0 read or 0x40 write */
wAddress,
0,
- val,
+ fc_usb->data,
sizeof(u32),
B2C2_WAIT_FOR_OPERATION_RDW * HZ);
- if (len != sizeof(u32)) {
+ if (ret != sizeof(u32)) {
err("error while %s dword from %d (%d).", read ? "reading" :
"writing", wAddress, wRegOffsPCI);
- return -EIO;
+ if (ret >= 0)
+ ret = -EIO;
}
- return 0;
+
+ if (read && ret >= 0)
+ memcpy(val, fc_usb->data, sizeof(*val));
+ mutex_unlock(&fc_usb->data_mutex);
+
+ return ret;
}
/*
* DKT 010817 - add support for V8 memory read/write and flash update
@@ -100,9 +111,14 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
{
u8 request_type = USB_TYPE_VENDOR;
u16 wIndex;
- int nWaitTime, pipe, len;
+ int nWaitTime, pipe, ret;
wIndex = page << 8;
+ if (buflen > sizeof(fc_usb->data)) {
+ err("Buffer size bigger than max URB control message\n");
+ return -EIO;
+ }
+
switch (req) {
case B2C2_USB_READ_V8_MEM:
nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ;
@@ -127,17 +143,32 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req,
wAddress, wIndex, buflen);
- len = usb_control_msg(fc_usb->udev, pipe,
+ mutex_lock(&fc_usb->data_mutex);
+
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
+ memcpy(fc_usb->data, pbBuffer, buflen);
+
+ ret = usb_control_msg(fc_usb->udev, pipe,
req,
request_type,
wAddress,
wIndex,
- pbBuffer,
+ fc_usb->data,
buflen,
nWaitTime * HZ);
+ if (ret != buflen)
+ ret = -EIO;
+
+ if (ret >= 0) {
+ ret = 0;
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ memcpy(pbBuffer, fc_usb->data, buflen);
+ }
- debug_dump(pbBuffer, len, deb_v8);
- return len == buflen ? 0 : -EIO;
+ mutex_unlock(&fc_usb->data_mutex);
+
+ debug_dump(pbBuffer, ret, deb_v8);
+ return ret;
}
#define bytes_left_to_read_on_page(paddr,buflen) \
@@ -196,29 +227,6 @@ static int flexcop_usb_get_mac_addr(struct flexcop_device *fc, int extended)
fc->dvb_adapter.proposed_mac, 6);
}
-#if 0
-static int flexcop_usb_utility_req(struct flexcop_usb *fc_usb, int set,
- flexcop_usb_utility_function_t func, u8 extra, u16 wIndex,
- u16 buflen, u8 *pvBuffer)
-{
- u16 wValue;
- u8 request_type = (set ? USB_DIR_OUT : USB_DIR_IN) | USB_TYPE_VENDOR;
- int nWaitTime = 2,
- pipe = set ? B2C2_USB_CTRL_PIPE_OUT : B2C2_USB_CTRL_PIPE_IN, len;
- wValue = (func << 8) | extra;
-
- len = usb_control_msg(fc_usb->udev,pipe,
- B2C2_USB_UTILITY,
- request_type,
- wValue,
- wIndex,
- pvBuffer,
- buflen,
- nWaitTime * HZ);
- return len == buflen ? 0 : -EIO;
-}
-#endif
-
/* usb i2c stuff */
static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
flexcop_usb_request_t req, flexcop_usb_i2c_function_t func,
@@ -226,9 +234,14 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
{
struct flexcop_usb *fc_usb = i2c->fc->bus_specific;
u16 wValue, wIndex;
- int nWaitTime,pipe,len;
+ int nWaitTime, pipe, ret;
u8 request_type = USB_TYPE_VENDOR;
+ if (buflen > sizeof(fc_usb->data)) {
+ err("Buffer size bigger than max URB control message\n");
+ return -EIO;
+ }
+
switch (func) {
case USB_FUNC_I2C_WRITE:
case USB_FUNC_I2C_MULTIWRITE:
@@ -257,15 +270,32 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
wValue & 0xff, wValue >> 8,
wIndex & 0xff, wIndex >> 8);
- len = usb_control_msg(fc_usb->udev,pipe,
+ mutex_lock(&fc_usb->data_mutex);
+
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
+ memcpy(fc_usb->data, buf, buflen);
+
+ ret = usb_control_msg(fc_usb->udev, pipe,
req,
request_type,
wValue,
wIndex,
- buf,
+ fc_usb->data,
buflen,
nWaitTime * HZ);
- return len == buflen ? 0 : -EREMOTEIO;
+
+ if (ret != buflen)
+ ret = -EIO;
+
+ if (ret >= 0) {
+ ret = 0;
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ memcpy(buf, fc_usb->data, buflen);
+ }
+
+ mutex_unlock(&fc_usb->data_mutex);
+
+ return 0;
}
/* actual bus specific access functions,
@@ -516,6 +546,7 @@ static int flexcop_usb_probe(struct usb_interface *intf,
/* general flexcop init */
fc_usb = fc->bus_specific;
fc_usb->fc_dev = fc;
+ mutex_init(&fc_usb->data_mutex);
fc->read_ibi_reg = flexcop_usb_read_ibi_reg;
fc->write_ibi_reg = flexcop_usb_write_ibi_reg;
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h
index 92529a9c4475..25ad43166e78 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.h
+++ b/drivers/media/usb/b2c2/flexcop-usb.h
@@ -29,6 +29,10 @@ struct flexcop_usb {
u8 tmp_buffer[1023+190];
int tmp_buffer_length;
+
+ /* for URB control messages */
+ u8 data[80];
+ struct mutex data_mutex;
};
#if 0
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 13620cdf0599..e9100a235831 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -545,18 +545,30 @@ static void free_sbufs(struct camera_data *cam)
static int write_packet(struct usb_device *udev,
u8 request, u8 * registers, u16 start, size_t size)
{
+ unsigned char *buf;
+ int ret;
+
if (!registers || size <= 0)
return -EINVAL;
- return usb_control_msg(udev,
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, registers, size);
+
+ ret = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
start, /* value */
0, /* index */
- registers, /* buffer */
+ buf, /* buffer */
size,
HZ);
+
+ kfree(buf);
+ return ret;
}
/****************************************************************************
@@ -567,18 +579,32 @@ static int write_packet(struct usb_device *udev,
static int read_packet(struct usb_device *udev,
u8 request, u8 * registers, u16 start, size_t size)
{
+ unsigned char *buf;
+ int ret;
+
if (!registers || size <= 0)
return -EINVAL;
- return usb_control_msg(udev,
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
request,
USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
start, /* value */
0, /* index */
- registers, /* buffer */
+ buf, /* buffer */
size,
HZ);
+
+ if (ret >= 0)
+ memcpy(registers, buf, size);
+
+ kfree(buf);
+
+ return ret;
}
/******************************************************************************
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 941ceff9b268..29011dfabb11 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1455,7 +1455,7 @@ static const struct usb_device_id af9015_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU,
&af9015_props, "Conceptronic USB2.0 DVB-T CTVDIGRCU V3.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810,
- &af9015_props, "KWorld Digial MC-810", NULL) },
+ &af9015_props, "KWorld Digital MC-810", NULL) },
{ DVB_USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03,
&af9015_props, "Genius TVGo DVB-T03", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2,
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index 2a7b5a963acf..3b3f32b426d1 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_DVB_USB_VP7045) += dvb-usb-vp7045.o
dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o
obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o
-dvb-usb-gp8psk-objs := gp8psk.o gp8psk-fe.o
+dvb-usb-gp8psk-objs := gp8psk.o
obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o
dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index efa782ed6e2d..7853261906b1 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -52,17 +52,15 @@ u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
struct af9005_device_state {
u8 sequence;
int led_state;
+ unsigned char data[256];
};
static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
int readwrite, int type, u8 * values, int len)
{
struct af9005_device_state *st = d->priv;
- u8 obuf[16] = { 0 };
- u8 ibuf[17] = { 0 };
- u8 command;
- int i;
- int ret;
+ u8 command, seq;
+ int i, ret;
if (len < 1) {
err("generic read/write, less than 1 byte. Makes no sense.");
@@ -73,16 +71,17 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
return -EINVAL;
}
- obuf[0] = 14; /* rest of buffer length low */
- obuf[1] = 0; /* rest of buffer length high */
+ mutex_lock(&d->data_mutex);
+ st->data[0] = 14; /* rest of buffer length low */
+ st->data[1] = 0; /* rest of buffer length high */
- obuf[2] = AF9005_REGISTER_RW; /* register operation */
- obuf[3] = 12; /* rest of buffer length */
+ st->data[2] = AF9005_REGISTER_RW; /* register operation */
+ st->data[3] = 12; /* rest of buffer length */
- obuf[4] = st->sequence++; /* sequence number */
+ st->data[4] = seq = st->sequence++; /* sequence number */
- obuf[5] = (u8) (reg >> 8); /* register address */
- obuf[6] = (u8) (reg & 0xff);
+ st->data[5] = (u8) (reg >> 8); /* register address */
+ st->data[6] = (u8) (reg & 0xff);
if (type == AF9005_OFDM_REG) {
command = AF9005_CMD_OFDM_REG;
@@ -96,51 +95,52 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
command |= readwrite;
if (readwrite == AF9005_CMD_WRITE)
for (i = 0; i < len; i++)
- obuf[8 + i] = values[i];
+ st->data[8 + i] = values[i];
else if (type == AF9005_TUNER_REG)
/* read command for tuner, the first byte contains the i2c address */
- obuf[8] = values[0];
- obuf[7] = command;
+ st->data[8] = values[0];
+ st->data[7] = command;
- ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 17, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 17, 0);
if (ret)
- return ret;
+ goto ret;
/* sanity check */
- if (ibuf[2] != AF9005_REGISTER_RW_ACK) {
+ if (st->data[2] != AF9005_REGISTER_RW_ACK) {
err("generic read/write, wrong reply code.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- if (ibuf[3] != 0x0d) {
+ if (st->data[3] != 0x0d) {
err("generic read/write, wrong length in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- if (ibuf[4] != obuf[4]) {
+ if (st->data[4] != seq) {
err("generic read/write, wrong sequence in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
/*
- Windows driver doesn't check these fields, in fact sometimes
- the register in the reply is different that what has been sent
-
- if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) {
- err("generic read/write, wrong register in reply.");
- return -EIO;
- }
- if (ibuf[7] != command) {
- err("generic read/write wrong command in reply.");
- return -EIO;
- }
+ * In thesis, both input and output buffers should have
+ * identical values for st->data[5] to st->data[8].
+ * However, windows driver doesn't check these fields, in fact
+ * sometimes the register in the reply is different that what
+ * has been sent
*/
- if (ibuf[16] != 0x01) {
+ if (st->data[16] != 0x01) {
err("generic read/write wrong status code in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
+
if (readwrite == AF9005_CMD_READ)
for (i = 0; i < len; i++)
- values[i] = ibuf[8 + i];
+ values[i] = st->data[8 + i];
- return 0;
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
@@ -464,8 +464,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
struct af9005_device_state *st = d->priv;
int ret, i, packet_len;
- u8 buf[64];
- u8 ibuf[64];
+ u8 seq;
if (wlen < 0) {
err("send command, wlen less than 0 bytes. Makes no sense.");
@@ -480,94 +479,97 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
return -EINVAL;
}
packet_len = wlen + 5;
- buf[0] = (u8) (packet_len & 0xff);
- buf[1] = (u8) ((packet_len & 0xff00) >> 8);
-
- buf[2] = 0x26; /* packet type */
- buf[3] = wlen + 3;
- buf[4] = st->sequence++;
- buf[5] = command;
- buf[6] = wlen;
+
+ mutex_lock(&d->data_mutex);
+
+ st->data[0] = (u8) (packet_len & 0xff);
+ st->data[1] = (u8) ((packet_len & 0xff00) >> 8);
+
+ st->data[2] = 0x26; /* packet type */
+ st->data[3] = wlen + 3;
+ st->data[4] = seq = st->sequence++;
+ st->data[5] = command;
+ st->data[6] = wlen;
for (i = 0; i < wlen; i++)
- buf[7 + i] = wbuf[i];
- ret = dvb_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0);
- if (ret)
- return ret;
- if (ibuf[2] != 0x27) {
+ st->data[7 + i] = wbuf[i];
+ ret = dvb_usb_generic_rw(d, st->data, wlen + 7, st->data, rlen + 7, 0);
+ if (st->data[2] != 0x27) {
err("send command, wrong reply code.");
- return -EIO;
- }
- if (ibuf[4] != buf[4]) {
+ ret = -EIO;
+ } else if (st->data[4] != seq) {
err("send command, wrong sequence in reply.");
- return -EIO;
- }
- if (ibuf[5] != 0x01) {
+ ret = -EIO;
+ } else if (st->data[5] != 0x01) {
err("send command, wrong status code in reply.");
- return -EIO;
- }
- if (ibuf[6] != rlen) {
+ ret = -EIO;
+ } else if (st->data[6] != rlen) {
err("send command, invalid data length in reply.");
- return -EIO;
+ ret = -EIO;
}
- for (i = 0; i < rlen; i++)
- rbuf[i] = ibuf[i + 7];
- return 0;
+ if (!ret) {
+ for (i = 0; i < rlen; i++)
+ rbuf[i] = st->data[i + 7];
+ }
+
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
int len)
{
struct af9005_device_state *st = d->priv;
- u8 obuf[16], ibuf[14];
+ u8 seq;
int ret, i;
- memset(obuf, 0, sizeof(obuf));
- memset(ibuf, 0, sizeof(ibuf));
+ mutex_lock(&d->data_mutex);
- obuf[0] = 14; /* length of rest of packet low */
- obuf[1] = 0; /* length of rest of packer high */
+ memset(st->data, 0, sizeof(st->data));
- obuf[2] = 0x2a; /* read/write eeprom */
+ st->data[0] = 14; /* length of rest of packet low */
+ st->data[1] = 0; /* length of rest of packer high */
- obuf[3] = 12; /* size */
+ st->data[2] = 0x2a; /* read/write eeprom */
- obuf[4] = st->sequence++;
+ st->data[3] = 12; /* size */
- obuf[5] = 0; /* read */
+ st->data[4] = seq = st->sequence++;
- obuf[6] = len;
- obuf[7] = address;
- ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 14, 0);
- if (ret)
- return ret;
- if (ibuf[2] != 0x2b) {
+ st->data[5] = 0; /* read */
+
+ st->data[6] = len;
+ st->data[7] = address;
+ ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 14, 0);
+ if (st->data[2] != 0x2b) {
err("Read eeprom, invalid reply code");
- return -EIO;
- }
- if (ibuf[3] != 10) {
+ ret = -EIO;
+ } else if (st->data[3] != 10) {
err("Read eeprom, invalid reply length");
- return -EIO;
- }
- if (ibuf[4] != obuf[4]) {
+ ret = -EIO;
+ } else if (st->data[4] != seq) {
err("Read eeprom, wrong sequence in reply ");
- return -EIO;
- }
- if (ibuf[5] != 1) {
+ ret = -EIO;
+ } else if (st->data[5] != 1) {
err("Read eeprom, wrong status in reply ");
- return -EIO;
+ ret = -EIO;
}
- for (i = 0; i < len; i++) {
- values[i] = ibuf[6 + i];
+
+ if (!ret) {
+ for (i = 0; i < len; i++)
+ values[i] = st->data[6 + i];
}
- return 0;
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
-static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply)
+static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
+ u8 *buf, int size)
{
- u8 buf[FW_BULKOUT_SIZE + 2];
u16 checksum;
int act_len, i, ret;
- memset(buf, 0, sizeof(buf));
+
+ memset(buf, 0, size);
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
switch (type) {
@@ -720,15 +722,21 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
{
int i, packets, ret, act_len;
- u8 buf[FW_BULKOUT_SIZE + 2];
+ u8 *buf;
u8 reply;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != 0x01) {
err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
packets = fw->size / FW_BULKOUT_SIZE;
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
@@ -743,28 +751,35 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
buf, FW_BULKOUT_SIZE + 2, &act_len, 1000);
if (ret) {
err("firmware download failed at packet %d with code %d", i, ret);
- return ret;
+ goto err;
}
}
- ret = af9005_boot_packet(udev, FW_CONFIRM, &reply);
+ ret = af9005_boot_packet(udev, FW_CONFIRM, &reply,
+ buf, FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != (u8) (packets & 0xff)) {
err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
- ret = af9005_boot_packet(udev, FW_BOOT, &reply);
+ ret = af9005_boot_packet(udev, FW_BOOT, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ goto err;
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != 0x02) {
err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
- return 0;
+err:
+ kfree(buf);
+ return ret;
}
@@ -823,53 +838,59 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
{
struct af9005_device_state *st = d->priv;
int ret, len;
-
- u8 obuf[5];
- u8 ibuf[256];
+ u8 seq;
*state = REMOTE_NO_KEY_PRESSED;
if (rc_decode == NULL) {
/* it shouldn't never come here */
return 0;
}
+
+ mutex_lock(&d->data_mutex);
+
/* deb_info("rc_query\n"); */
- obuf[0] = 3; /* rest of packet length low */
- obuf[1] = 0; /* rest of packet lentgh high */
- obuf[2] = 0x40; /* read remote */
- obuf[3] = 1; /* rest of packet length */
- obuf[4] = st->sequence++; /* sequence number */
- ret = dvb_usb_generic_rw(d, obuf, 5, ibuf, 256, 0);
+ st->data[0] = 3; /* rest of packet length low */
+ st->data[1] = 0; /* rest of packet lentgh high */
+ st->data[2] = 0x40; /* read remote */
+ st->data[3] = 1; /* rest of packet length */
+ st->data[4] = seq = st->sequence++; /* sequence number */
+ ret = dvb_usb_generic_rw(d, st->data, 5, st->data, 256, 0);
if (ret) {
err("rc query failed");
- return ret;
+ goto ret;
}
- if (ibuf[2] != 0x41) {
+ if (st->data[2] != 0x41) {
err("rc query bad header.");
- return -EIO;
- }
- if (ibuf[4] != obuf[4]) {
+ ret = -EIO;
+ goto ret;
+ } else if (st->data[4] != seq) {
err("rc query bad sequence.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- len = ibuf[5];
+ len = st->data[5];
if (len > 246) {
err("rc query invalid length");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
if (len > 0) {
deb_rc("rc data (%d) ", len);
- debug_dump((ibuf + 6), len, deb_rc);
- ret = rc_decode(d, &ibuf[6], len, event, state);
+ debug_dump((st->data + 6), len, deb_rc);
+ ret = rc_decode(d, &st->data[6], len, event, state);
if (ret) {
err("rc_decode failed");
- return ret;
+ goto ret;
} else {
deb_rc("rc_decode state %x event %x\n", *state, *event);
if (*state == REMOTE_KEY_REPEAT)
*event = d->last_event;
}
}
- return 0;
+
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff)
@@ -953,10 +974,16 @@ static int af9005_identify_state(struct usb_device *udev,
int *cold)
{
int ret;
- u8 reply;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ u8 reply, *buf;
+
+ buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply,
+ buf, FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
deb_info("result of FW_CONFIG in identify state %d\n", reply);
if (reply == 0x01)
*cold = 1;
@@ -965,7 +992,10 @@ static int af9005_identify_state(struct usb_device *udev,
else
return -EIO;
deb_info("Identify state cold = %d\n", *cold);
- return 0;
+
+err:
+ kfree(buf);
+ return ret;
}
static struct dvb_usb_device_properties af9005_properties;
@@ -974,7 +1004,7 @@ static int af9005_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return dvb_usb_device_init(intf, &af9005_properties,
- THIS_MODULE, NULL, adapter_nr);
+ THIS_MODULE, NULL, adapter_nr);
}
enum af9005_usb_table_entry {
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 9fd1527494eb..290275bc7fde 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -41,6 +41,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct cinergyt2_state {
u8 rc_counter;
+ unsigned char data[64];
};
/* We are missing a release hook with usb_device data */
@@ -50,38 +51,57 @@ static struct dvb_usb_device_properties cinergyt2_properties;
static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
{
- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
- char result[64];
- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
- sizeof(result), 0);
+ struct dvb_usb_device *d = adap->dev;
+ struct cinergyt2_state *st = d->priv;
+ int ret;
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
+ st->data[1] = enable ? 1 : 0;
+
+ ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0);
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
{
- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
- char state[3];
- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
+ struct cinergyt2_state *st = d->priv;
+ int ret;
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_SLEEP_MODE;
+ st->data[1] = enable ? 0 : 1;
+
+ ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0);
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
{
- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
- char state[3];
+ struct dvb_usb_device *d = adap->dev;
+ struct cinergyt2_state *st = d->priv;
int ret;
adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
- sizeof(state), 0);
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
+
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
if (ret < 0) {
deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
"state info\n");
}
+ mutex_unlock(&d->data_mutex);
/* Copy this pointer as we are gonna need it in the release phase */
cinergyt2_usb_device = adap->dev;
- return 0;
+ return ret;
}
static struct rc_map_table rc_map_cinergyt2_table[] = {
@@ -141,13 +161,18 @@ static int repeatable_keys[] = {
static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
struct cinergyt2_state *st = d->priv;
- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
- int i;
+ int i, ret;
*state = REMOTE_NO_KEY_PRESSED;
- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
- if (key[4] == 0xff) {
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS;
+
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ if (st->data[4] == 0xff) {
/* key repeat */
st->rc_counter++;
if (st->rc_counter > RC_REPEAT_DELAY) {
@@ -157,34 +182,36 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
*event = d->last_event;
deb_rc("repeat key, event %x\n",
*event);
- return 0;
+ goto ret;
}
}
deb_rc("repeated key (non repeatable)\n");
}
- return 0;
+ goto ret;
}
/* hack to pass checksum on the custom field */
- key[2] = ~key[1];
- dvb_usb_nec_rc_key_to_event(d, key, event, state);
- if (key[0] != 0) {
+ st->data[2] = ~st->data[1];
+ dvb_usb_nec_rc_key_to_event(d, st->data, event, state);
+ if (st->data[0] != 0) {
if (*event != d->last_event)
st->rc_counter = 0;
- deb_rc("key: %*ph\n", 5, key);
+ deb_rc("key: %*ph\n", 5, st->data);
}
- return 0;
+
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int cinergyt2_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return dvb_usb_device_init(intf, &cinergyt2_properties,
- THIS_MODULE, NULL, adapter_nr);
+ THIS_MODULE, NULL, adapter_nr);
}
-
static struct usb_device_id cinergyt2_usb_table[] = {
{ USB_DEVICE(USB_VID_TERRATEC, 0x0038) },
{ 0 }
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index b3ec743a7a2e..2d29b4174dba 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -139,32 +139,42 @@ static uint16_t compute_tps(struct dtv_frontend_properties *op)
struct cinergyt2_fe_state {
struct dvb_frontend fe;
struct dvb_usb_device *d;
+
+ unsigned char data[64];
+ struct mutex data_mutex;
+
+ struct dvbt_get_status_msg status;
};
static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg result;
- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
- sizeof(result), 0);
+ mutex_lock(&state->data_mutex);
+ state->data[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1,
+ state->data, sizeof(state->status), 0);
+ if (!ret)
+ memcpy(&state->status, state->data, sizeof(state->status));
+ mutex_unlock(&state->data_mutex);
+
if (ret < 0)
return ret;
*status = 0;
- if (0xffff - le16_to_cpu(result.gain) > 30)
+ if (0xffff - le16_to_cpu(state->status.gain) > 30)
*status |= FE_HAS_SIGNAL;
- if (result.lock_bits & (1 << 6))
+ if (state->status.lock_bits & (1 << 6))
*status |= FE_HAS_LOCK;
- if (result.lock_bits & (1 << 5))
+ if (state->status.lock_bits & (1 << 5))
*status |= FE_HAS_SYNC;
- if (result.lock_bits & (1 << 4))
+ if (state->status.lock_bits & (1 << 4))
*status |= FE_HAS_CARRIER;
- if (result.lock_bits & (1 << 1))
+ if (state->status.lock_bits & (1 << 1))
*status |= FE_HAS_VITERBI;
if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
@@ -177,34 +187,16 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
-
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0)
- return ret;
- *ber = le32_to_cpu(status.viterbi_error_rate);
+ *ber = le32_to_cpu(state->status.viterbi_error_rate);
return 0;
}
static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
- ret);
- return ret;
- }
- *unc = le32_to_cpu(status.uncorrected_block_count);
+ *unc = le32_to_cpu(state->status.uncorrected_block_count);
return 0;
}
@@ -212,35 +204,16 @@ static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
u16 *strength)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_signal_strength() Failed!"
- " (Error=%d)\n", ret);
- return ret;
- }
- *strength = (0xffff - le16_to_cpu(status.gain));
+ *strength = (0xffff - le16_to_cpu(state->status.gain));
return 0;
}
static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
- return ret;
- }
- *snr = (status.snr << 8) | status.snr;
+ *snr = (state->status.snr << 8) | state->status.snr;
return 0;
}
@@ -266,34 +239,36 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_set_parameters_msg param;
- char result[2];
+ struct dvbt_set_parameters_msg *param;
int err;
- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
- param.tps = cpu_to_le16(compute_tps(fep));
- param.freq = cpu_to_le32(fep->frequency / 1000);
- param.flags = 0;
+ mutex_lock(&state->data_mutex);
+
+ param = (void *)state->data;
+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
+ param->tps = cpu_to_le16(compute_tps(fep));
+ param->freq = cpu_to_le32(fep->frequency / 1000);
+ param->flags = 0;
switch (fep->bandwidth_hz) {
default:
case 8000000:
- param.bandwidth = 8;
+ param->bandwidth = 8;
break;
case 7000000:
- param.bandwidth = 7;
+ param->bandwidth = 7;
break;
case 6000000:
- param.bandwidth = 6;
+ param->bandwidth = 6;
break;
}
- err = dvb_usb_generic_rw(state->d,
- (char *)&param, sizeof(param),
- result, sizeof(result), 0);
+ err = dvb_usb_generic_rw(state->d, state->data, sizeof(*param),
+ state->data, 2, 0);
if (err < 0)
err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
+ mutex_unlock(&state->data_mutex);
return (err < 0) ? err : 0;
}
@@ -315,6 +290,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
s->d = d;
memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops));
s->fe.demodulator_priv = s;
+ mutex_init(&s->data_mutex);
return &s->fe;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 907ac01ae297..243403081fa5 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -45,9 +45,6 @@
#include "si2168.h"
#include "si2157.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 80
-
/* debug */
static int dvb_usb_cxusb_debug;
module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
@@ -61,23 +58,27 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int cxusb_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 sndbuf[MAX_XFER_SIZE];
+ struct cxusb_state *st = d->priv;
+ int ret, wo;
- if (1 + wlen > sizeof(sndbuf)) {
- warn("i2c wr: len=%d is too big!\n",
- wlen);
+ if (1 + wlen > MAX_XFER_SIZE) {
+ warn("i2c wr: len=%d is too big!\n", wlen);
return -EOPNOTSUPP;
}
- memset(sndbuf, 0, 1+wlen);
+ wo = (rbuf == NULL || rlen == 0); /* write-only */
- sndbuf[0] = cmd;
- memcpy(&sndbuf[1], wbuf, wlen);
+ mutex_lock(&d->data_mutex);
+ st->data[0] = cmd;
+ memcpy(&st->data[1], wbuf, wlen);
if (wo)
- return dvb_usb_generic_write(d, sndbuf, 1+wlen);
+ ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
else
- return dvb_usb_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
+ rbuf, rlen, 0);
+
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
/* GPIO */
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 527ff7905e15..18acda19527a 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -28,10 +28,15 @@
#define CMD_ANALOG 0x50
#define CMD_DIGITAL 0x51
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 80
+
struct cxusb_state {
u8 gpio_write_state[3];
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
+
+ unsigned char data[MAX_XFER_SIZE];
};
#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index f3196658fb70..47ce9d5de4c6 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -213,7 +213,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
usb_rcvctrlpipe(d->udev, 0),
REQUEST_NEW_I2C_READ,
USB_TYPE_VENDOR | USB_DIR_IN,
- value, index, msg[i].buf,
+ value, index, st->buf,
msg[i].len,
USB_CTRL_GET_TIMEOUT);
if (result < 0) {
@@ -221,6 +221,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
break;
}
+ if (msg[i].len > sizeof(st->buf)) {
+ deb_info("buffer too small to fit %d bytes\n",
+ msg[i].len);
+ return -EIO;
+ }
+
+ memcpy(msg[i].buf, st->buf, msg[i].len);
+
deb_data("<<< ");
debug_dump(msg[i].buf, msg[i].len, deb_data);
@@ -238,6 +246,13 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
/* I2C ctrl + FE bus; */
st->buf[3] = ((gen_mode << 6) & 0xC0) |
((bus_mode << 4) & 0x30);
+
+ if (msg[i].len > sizeof(st->buf) - 4) {
+ deb_info("i2c message to big: %d\n",
+ msg[i].len);
+ return -EIO;
+ }
+
/* The Actual i2c payload */
memcpy(&st->buf[4], msg[i].buf, msg[i].len);
@@ -283,6 +298,11 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
/* fill in the address */
st->buf[1] = msg[i].addr << 1;
/* fill the buffer */
+ if (msg[i].len > sizeof(st->buf) - 2) {
+ deb_info("i2c xfer to big: %d\n",
+ msg[i].len);
+ return -EIO;
+ }
memcpy(&st->buf[2], msg[i].buf, msg[i].len);
/* write/read request */
@@ -292,13 +312,20 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
/* special thing in the current firmware: when length is zero the read-failed */
len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2,
- msg[i+1].buf, msg[i+1].len);
+ st->buf, msg[i + 1].len);
if (len <= 0) {
deb_info("I2C read failed on address 0x%02x\n",
msg[i].addr);
break;
}
+ if (msg[i + 1].len > sizeof(st->buf)) {
+ deb_info("i2c xfer buffer to small for %d\n",
+ msg[i].len);
+ return -EIO;
+ }
+ memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len);
+
msg[i+1].len = len;
i++;
@@ -677,7 +704,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
struct dvb_usb_device *d = purb->context;
struct dib0700_rc_response *poll_reply;
enum rc_type protocol;
- u32 uninitialized_var(keycode);
+ u32 keycode;
u8 toggle;
deb_info("%s()\n", __func__);
@@ -718,7 +745,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
poll_reply->nec.data == 0x00 &&
poll_reply->nec.not_data == 0xff) {
poll_reply->data_state = 2;
- break;
+ rc_repeat(d->rc_dev);
+ goto resubmit;
}
if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 0857b56e652c..ef1b8ee75c57 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -508,8 +508,6 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap)
#define DEFAULT_RC_INTERVAL 50
-static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
-
/*
* This function is used only when firmware is < 1.20 version. Newer
* firmwares use bulk mode, with functions implemented at dib0700_core,
@@ -517,7 +515,6 @@ static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
*/
static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
{
- u8 key[4];
enum rc_type protocol;
u32 scancode;
u8 toggle;
@@ -532,39 +529,43 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
return 0;
}
- i = dib0700_ctrl_rd(d, rc_request, 2, key, 4);
+ st->buf[0] = REQUEST_POLL_RC;
+ st->buf[1] = 0;
+
+ i = dib0700_ctrl_rd(d, st->buf, 2, st->buf, 4);
if (i <= 0) {
err("RC Query Failed");
- return -1;
+ return -EIO;
}
/* losing half of KEY_0 events from Philipps rc5 remotes.. */
- if (key[0] == 0 && key[1] == 0 && key[2] == 0 && key[3] == 0)
+ if (st->buf[0] == 0 && st->buf[1] == 0
+ && st->buf[2] == 0 && st->buf[3] == 0)
return 0;
- /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)key[3-2],(int)key[3-3],(int)key[3-1],(int)key[3]); */
+ /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)st->buf[3 - 2],(int)st->buf[3 - 3],(int)st->buf[3 - 1],(int)st->buf[3]); */
dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */
switch (d->props.rc.core.protocol) {
case RC_BIT_NEC:
/* NEC protocol sends repeat code as 0 0 0 FF */
- if ((key[3-2] == 0x00) && (key[3-3] == 0x00) &&
- (key[3] == 0xff)) {
+ if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) &&
+ (st->buf[3] == 0xff)) {
rc_repeat(d->rc_dev);
return 0;
}
protocol = RC_TYPE_NEC;
- scancode = RC_SCANCODE_NEC(key[3-2], key[3-3]);
+ scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]);
toggle = 0;
break;
default:
/* RC-5 protocol changes toggle bit on new keypress */
protocol = RC_TYPE_RC5;
- scancode = RC_SCANCODE_RC5(key[3-2], key[3-3]);
- toggle = key[3-1];
+ scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]);
+ toggle = st->buf[3 - 1];
break;
}
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 18ed3bfbb5e2..de3ee2547479 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -62,72 +62,117 @@ EXPORT_SYMBOL(dibusb_pid_filter_ctrl);
int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 b[3];
+ u8 *b;
int ret;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP;
- ret = dvb_usb_generic_write(d,b,3);
+
+ ret = dvb_usb_generic_write(d, b, 3);
+
+ kfree(b);
+
msleep(10);
+
return ret;
}
EXPORT_SYMBOL(dibusb_power_ctrl);
int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- u8 b[3] = { 0 };
int ret;
+ u8 *b;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0)
- return ret;
+ goto ret;
if (onoff) {
b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
b[1] = 0x00;
- if ((ret = dvb_usb_generic_write(adap->dev,b,2)) < 0)
- return ret;
+ ret = dvb_usb_generic_write(adap->dev, b, 2);
+ if (ret < 0)
+ goto ret;
}
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
- return dvb_usb_generic_write(adap->dev,b,3);
+ ret = dvb_usb_generic_write(adap->dev, b, 3);
+
+ret:
+ kfree(b);
+ return ret;
}
EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- if (onoff) {
- u8 b[3] = { DIBUSB_REQ_SET_IOCTL, DIBUSB_IOCTL_CMD_POWER_MODE, DIBUSB_IOCTL_POWER_WAKEUP };
- return dvb_usb_generic_write(d,b,3);
- } else
+ u8 *b;
+ int ret;
+
+ if (!onoff)
return 0;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ b[0] = DIBUSB_REQ_SET_IOCTL;
+ b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
+ b[2] = DIBUSB_IOCTL_POWER_WAKEUP;
+
+ ret = dvb_usb_generic_write(d, b, 3);
+
+ kfree(b);
+
+ return ret;
}
EXPORT_SYMBOL(dibusb2_0_power_ctrl);
static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
- u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
+ u8 *sndbuf;
+ int ret, wo, len;
+
/* write only ? */
- int wo = (rbuf == NULL || rlen == 0),
- len = 2 + wlen + (wo ? 0 : 2);
+ wo = (rbuf == NULL || rlen == 0);
+
+ len = 2 + wlen + (wo ? 0 : 2);
+
+ sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL);
+ if (!sndbuf)
+ return -ENOMEM;
- if (4 + wlen > sizeof(sndbuf)) {
+ if (4 + wlen > MAX_XFER_SIZE) {
warn("i2c wr: len=%d is too big!\n", wlen);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto ret;
}
sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
- memcpy(&sndbuf[2],wbuf,wlen);
+ memcpy(&sndbuf[2], wbuf, wlen);
if (!wo) {
- sndbuf[wlen+2] = (rlen >> 8) & 0xff;
- sndbuf[wlen+3] = rlen & 0xff;
+ sndbuf[wlen + 2] = (rlen >> 8) & 0xff;
+ sndbuf[wlen + 3] = rlen & 0xff;
}
- return dvb_usb_generic_rw(d,sndbuf,len,rbuf,rlen,0);
+ ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0);
+
+ret:
+ kfree(sndbuf);
+ return ret;
}
/*
@@ -319,11 +364,27 @@ EXPORT_SYMBOL(rc_map_dibusb_table);
int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- u8 key[5],cmd = DIBUSB_REQ_POLL_REMOTE;
- dvb_usb_generic_rw(d,&cmd,1,key,5,0);
- dvb_usb_nec_rc_key_to_event(d,key,event,state);
- if (key[0] != 0)
- deb_info("key: %*ph\n", 5, key);
- return 0;
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(5, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = DIBUSB_REQ_POLL_REMOTE;
+
+ ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ dvb_usb_nec_rc_key_to_event(d, buf, event, state);
+
+ if (buf[0] != 0)
+ deb_info("key: %*ph\n", 5, buf);
+
+ kfree(buf);
+
+ret:
+ return ret;
}
EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h
index 3f82163d8ab8..697be2a17ade 100644
--- a/drivers/media/usb/dvb-usb/dibusb.h
+++ b/drivers/media/usb/dvb-usb/dibusb.h
@@ -96,6 +96,9 @@
#define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01
#define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
struct dibusb_state {
struct dib_fe_xfer_ops ops;
int mt2060_present;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 63134335c994..4284f6984dc1 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -28,22 +28,26 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int digitv_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 sndbuf[7],rcvbuf[7];
- memset(sndbuf,0,7); memset(rcvbuf,0,7);
+ struct digitv_state *st = d->priv;
+ int ret, wo;
- sndbuf[0] = cmd;
- sndbuf[1] = vv;
- sndbuf[2] = wo ? wlen : rlen;
+ wo = (rbuf == NULL || rlen == 0); /* write-only */
+
+ memset(st->sndbuf, 0, 7);
+ memset(st->rcvbuf, 0, 7);
+
+ st->sndbuf[0] = cmd;
+ st->sndbuf[1] = vv;
+ st->sndbuf[2] = wo ? wlen : rlen;
if (wo) {
- memcpy(&sndbuf[3],wbuf,wlen);
- dvb_usb_generic_write(d,sndbuf,7);
+ memcpy(&st->sndbuf[3], wbuf, wlen);
+ ret = dvb_usb_generic_write(d, st->sndbuf, 7);
} else {
- dvb_usb_generic_rw(d,sndbuf,7,rcvbuf,7,10);
- memcpy(rbuf,&rcvbuf[3],rlen);
+ ret = dvb_usb_generic_rw(d, st->sndbuf, 7, st->rcvbuf, 7, 10);
+ memcpy(rbuf, &st->rcvbuf[3], rlen);
}
- return 0;
+ return ret;
}
/* I2C */
diff --git a/drivers/media/usb/dvb-usb/digitv.h b/drivers/media/usb/dvb-usb/digitv.h
index 908c09f4966b..581e09c25491 100644
--- a/drivers/media/usb/dvb-usb/digitv.h
+++ b/drivers/media/usb/dvb-usb/digitv.h
@@ -5,7 +5,10 @@
#include "dvb-usb.h"
struct digitv_state {
- int is_nxt6000;
+ int is_nxt6000;
+
+ unsigned char sndbuf[7];
+ unsigned char rcvbuf[7];
};
/* protocol (from usblogging and the SDK:
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index c09332bd99cb..f5c042baa254 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -18,17 +18,28 @@ struct dtt200u_fe_state {
struct dtv_frontend_properties fep;
struct dvb_frontend frontend;
+
+ unsigned char data[80];
+ struct mutex data_mutex;
};
static int dtt200u_fe_read_status(struct dvb_frontend *fe,
enum fe_status *stat)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 st = GET_TUNE_STATUS, b[3];
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_TUNE_STATUS;
- dvb_usb_generic_rw(state->d,&st,1,b,3,0);
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
+ if (ret < 0) {
+ *stat = 0;
+ mutex_unlock(&state->data_mutex);
+ return ret;
+ }
- switch (b[0]) {
+ switch (state->data[0]) {
case 0x01:
*stat = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
@@ -41,51 +52,86 @@ static int dtt200u_fe_read_status(struct dvb_frontend *fe,
*stat = 0;
break;
}
+ mutex_unlock(&state->data_mutex);
return 0;
}
static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_VIT_ERR_CNT,b[3];
- dvb_usb_generic_rw(state->d,&bw,1,b,3,0);
- *ber = (b[0] << 16) | (b[1] << 8) | b[2];
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_VIT_ERR_CNT;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
+ if (ret >= 0)
+ *ber = (state->data[0] << 16) | (state->data[1] << 8) | state->data[2];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_RS_UNCOR_BLK_CNT,b[2];
+ int ret;
- dvb_usb_generic_rw(state->d,&bw,1,b,2,0);
- *unc = (b[0] << 8) | b[1];
- return 0;
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_RS_UNCOR_BLK_CNT;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 2, 0);
+ if (ret >= 0)
+ *unc = (state->data[0] << 8) | state->data[1];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_AGC, b;
- dvb_usb_generic_rw(state->d,&bw,1,&b,1,0);
- *strength = (b << 8) | b;
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_AGC;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
+ if (ret >= 0)
+ *strength = (state->data[0] << 8) | state->data[0];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_SNR,br;
- dvb_usb_generic_rw(state->d,&bw,1,&br,1,0);
- *snr = ~((br << 8) | br);
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_SNR;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
+ if (ret >= 0)
+ *snr = ~((state->data[0] << 8) | state->data[0]);
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_init(struct dvb_frontend* fe)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 b = SET_INIT;
- return dvb_usb_generic_write(state->d,&b,1);
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = SET_INIT;
+
+ ret = dvb_usb_generic_write(state->d, state->data, 1);
+ mutex_unlock(&state->data_mutex);
+
+ return ret;
}
static int dtt200u_fe_sleep(struct dvb_frontend* fe)
@@ -105,39 +151,40 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dtt200u_fe_state *state = fe->demodulator_priv;
- int i;
- enum fe_status st;
+ int ret;
u16 freq = fep->frequency / 250000;
- u8 bwbuf[2] = { SET_BANDWIDTH, 0 },freqbuf[3] = { SET_RF_FREQ, 0, 0 };
+ mutex_lock(&state->data_mutex);
+ state->data[0] = SET_BANDWIDTH;
switch (fep->bandwidth_hz) {
case 8000000:
- bwbuf[1] = 8;
+ state->data[1] = 8;
break;
case 7000000:
- bwbuf[1] = 7;
+ state->data[1] = 7;
break;
case 6000000:
- bwbuf[1] = 6;
+ state->data[1] = 6;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto ret;
}
- dvb_usb_generic_write(state->d,bwbuf,2);
+ ret = dvb_usb_generic_write(state->d, state->data, 2);
+ if (ret < 0)
+ goto ret;
- freqbuf[1] = freq & 0xff;
- freqbuf[2] = (freq >> 8) & 0xff;
- dvb_usb_generic_write(state->d,freqbuf,3);
+ state->data[0] = SET_RF_FREQ;
+ state->data[1] = freq & 0xff;
+ state->data[2] = (freq >> 8) & 0xff;
+ ret = dvb_usb_generic_write(state->d, state->data, 3);
+ if (ret < 0)
+ goto ret;
- for (i = 0; i < 30; i++) {
- msleep(20);
- dtt200u_fe_read_status(fe, &st);
- if (st & FE_TIMEDOUT)
- continue;
- }
-
- return 0;
+ret:
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_get_frontend(struct dvb_frontend* fe,
@@ -169,6 +216,7 @@ struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
deb_info("attaching frontend dtt200u\n");
state->d = d;
+ mutex_init(&state->data_mutex);
memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index d2a01b50af0d..fcbff7fb0c4e 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -20,75 +20,115 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2 (or-able))." DVB_USB
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+struct dtt200u_state {
+ unsigned char data[80];
+};
+
static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 b = SET_INIT;
+ struct dtt200u_state *st = d->priv;
+ int ret = 0;
+
+ mutex_lock(&d->data_mutex);
+
+ st->data[0] = SET_INIT;
if (onoff)
- dvb_usb_generic_write(d,&b,2);
+ ret = dvb_usb_generic_write(d, st->data, 2);
- return 0;
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- u8 b_streaming[2] = { SET_STREAMING, onoff };
- u8 b_rst_pid = RESET_PID_FILTER;
+ struct dvb_usb_device *d = adap->dev;
+ struct dtt200u_state *st = d->priv;
+ int ret;
- dvb_usb_generic_write(adap->dev, b_streaming, 2);
+ mutex_lock(&d->data_mutex);
+ st->data[0] = SET_STREAMING;
+ st->data[1] = onoff;
- if (onoff == 0)
- dvb_usb_generic_write(adap->dev, &b_rst_pid, 1);
- return 0;
+ ret = dvb_usb_generic_write(adap->dev, st->data, 2);
+ if (ret < 0)
+ goto ret;
+
+ if (onoff)
+ goto ret;
+
+ st->data[0] = RESET_PID_FILTER;
+ ret = dvb_usb_generic_write(adap->dev, st->data, 1);
+
+ret:
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff)
{
- u8 b_pid[4];
+ struct dvb_usb_device *d = adap->dev;
+ struct dtt200u_state *st = d->priv;
+ int ret;
+
pid = onoff ? pid : 0;
- b_pid[0] = SET_PID_FILTER;
- b_pid[1] = index;
- b_pid[2] = pid & 0xff;
- b_pid[3] = (pid >> 8) & 0x1f;
+ mutex_lock(&d->data_mutex);
+ st->data[0] = SET_PID_FILTER;
+ st->data[1] = index;
+ st->data[2] = pid & 0xff;
+ st->data[3] = (pid >> 8) & 0x1f;
+
+ ret = dvb_usb_generic_write(adap->dev, st->data, 4);
+ mutex_unlock(&d->data_mutex);
- return dvb_usb_generic_write(adap->dev, b_pid, 4);
+ return ret;
}
static int dtt200u_rc_query(struct dvb_usb_device *d)
{
- u8 key[5],cmd = GET_RC_CODE;
+ struct dtt200u_state *st = d->priv;
u32 scancode;
+ int ret;
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = GET_RC_CODE;
- dvb_usb_generic_rw(d,&cmd,1,key,5,0);
- if (key[0] == 1) {
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ if (st->data[0] == 1) {
enum rc_type proto = RC_TYPE_NEC;
- scancode = key[1];
- if ((u8) ~key[1] != key[2]) {
+ scancode = st->data[1];
+ if ((u8) ~st->data[1] != st->data[2]) {
/* Extended NEC */
scancode = scancode << 8;
- scancode |= key[2];
+ scancode |= st->data[2];
proto = RC_TYPE_NECX;
}
scancode = scancode << 8;
- scancode |= key[3];
+ scancode |= st->data[3];
/* Check command checksum is ok */
- if ((u8) ~key[3] == key[4])
+ if ((u8) ~st->data[3] == st->data[4])
rc_keydown(d->rc_dev, proto, scancode, 0);
else
rc_keyup(d->rc_dev);
- } else if (key[0] == 2) {
+ } else if (st->data[0] == 2) {
rc_repeat(d->rc_dev);
} else {
rc_keyup(d->rc_dev);
}
- if (key[0] != 0)
- deb_info("key: %*ph\n", 5, key);
+ if (st->data[0] != 0)
+ deb_info("st->data: %*ph\n", 5, st->data);
- return 0;
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap)
@@ -140,6 +180,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-dtt200u-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -190,6 +232,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-02.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -240,6 +284,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-fc03.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -290,6 +336,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-zl0353-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -340,6 +388,8 @@ static struct dvb_usb_device_properties wt220u_miglia_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-miglia-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.generic_bulk_ctrl_endpoint = 0x01,
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index 3d11df41cac0..c60fb54f445f 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -31,9 +31,14 @@ module_param_named(debug, dvb_usb_dtv5100_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+struct dtv5100_state {
+ unsigned char data[80];
+};
+
static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
+ struct dtv5100_state *st = d->priv;
u8 request;
u8 type;
u16 value;
@@ -60,9 +65,10 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
}
index = (addr << 8) + wbuf[0];
+ memcpy(st->data, rbuf, rlen);
msleep(1); /* avoid I2C errors */
return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
- type, value, index, rbuf, rlen,
+ type, value, index, st->data, rlen,
DTV5100_USB_TIMEOUT);
}
@@ -176,7 +182,7 @@ static struct dvb_usb_device_properties dtv5100_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .size_of_priv = 0,
+ .size_of_priv = sizeof(struct dtv5100_state),
.num_adapters = 1,
.adapter = {{
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 3896ba9a4179..84308569e7dc 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -142,6 +142,7 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
{
int ret = 0;
+ mutex_init(&d->data_mutex);
mutex_init(&d->usb_mutex);
mutex_init(&d->i2c_mutex);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 639c4678c65b..107255b08b2b 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -404,8 +404,12 @@ struct dvb_usb_adapter {
* Powered is in/decremented for each call to modify the state.
* @udev: pointer to the device's struct usb_device.
*
- * @usb_mutex: semaphore of USB control messages (reading needs two messages)
- * @i2c_mutex: semaphore for i2c-transfers
+ * @data_mutex: mutex to protect the data structure used to store URB data
+ * @usb_mutex: mutex of USB control messages (reading needs two messages).
+ * Please notice that this mutex is used internally at the generic
+ * URB control functions. So, drivers using dvb_usb_generic_rw() and
+ * derivated functions should not lock it internally.
+ * @i2c_mutex: mutex for i2c-transfers
*
* @i2c_adap: device's i2c_adapter if it uses I2CoverUSB
*
@@ -433,6 +437,7 @@ struct dvb_usb_device {
int powered;
/* locking */
+ struct mutex data_mutex;
struct mutex usb_mutex;
/* i2c */
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 5fb0c650926e..2c720cb2fb00 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -852,7 +852,7 @@ static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
if (i && !state->initialized) {
state->initialized = 1;
/* reset board */
- dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+ return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
}
return 0;
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 5d0384dd45b5..993bb7a72985 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -15,6 +15,7 @@
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "gp8psk.h"
+#include "gp8psk-fe.h"
/* debug */
static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw";
@@ -24,37 +25,19 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DV
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
-{
- return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6));
-}
-
-static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
-{
- return (gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1));
-}
-
-static void gp8psk_info(struct dvb_usb_device *d)
-{
- u8 fpga_vers, fw_vers[6];
-
- if (!gp8psk_get_fw_version(d, fw_vers))
- info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
- fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
- 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
- else
- info("failed to get FW version");
-
- if (!gp8psk_get_fpga_version(d, &fpga_vers))
- info("FPGA Version = %i", fpga_vers);
- else
- info("failed to get FPGA version");
-}
+struct gp8psk_state {
+ unsigned char data[80];
+};
-int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen)
+static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
{
+ struct gp8psk_state *st = d->priv;
int ret = 0,try = 0;
+ if (blen > sizeof(st->data))
+ return -EIO;
+
if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
return ret;
@@ -63,7 +46,7 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
usb_rcvctrlpipe(d->udev,0),
req,
USB_TYPE_VENDOR | USB_DIR_IN,
- value,index,b,blen,
+ value, index, st->data, blen,
2000);
deb_info("reading number %d (ret: %d)\n",try,ret);
try++;
@@ -72,8 +55,10 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
if (ret < 0 || ret != blen) {
warn("usb in %d operation failed.", req);
ret = -EIO;
- } else
+ } else {
ret = 0;
+ memcpy(b, st->data, blen);
+ }
deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
debug_dump(b,blen,deb_xfer);
@@ -83,22 +68,27 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
return ret;
}
-int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
+static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
u16 index, u8 *b, int blen)
{
+ struct gp8psk_state *st = d->priv;
int ret;
deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
debug_dump(b,blen,deb_xfer);
+ if (blen > sizeof(st->data))
+ return -EIO;
+
if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
return ret;
+ memcpy(st->data, b, blen);
if (usb_control_msg(d->udev,
usb_sndctrlpipe(d->udev,0),
req,
USB_TYPE_VENDOR | USB_DIR_OUT,
- value,index,b,blen,
+ value, index, st->data, blen,
2000) != blen) {
warn("usb out operation failed.");
ret = -EIO;
@@ -109,6 +99,34 @@ int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
return ret;
}
+
+static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
+{
+ return gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6);
+}
+
+static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
+{
+ return gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1);
+}
+
+static void gp8psk_info(struct dvb_usb_device *d)
+{
+ u8 fpga_vers, fw_vers[6];
+
+ if (!gp8psk_get_fw_version(d, fw_vers))
+ info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
+ fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
+ 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
+ else
+ info("failed to get FW version");
+
+ if (!gp8psk_get_fpga_version(d, &fpga_vers))
+ info("FPGA Version = %i", fpga_vers);
+ else
+ info("failed to get FPGA version");
+}
+
static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
{
int ret;
@@ -143,6 +161,11 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
err("failed to load bcm4500 firmware.");
goto out_free;
}
+ if (buflen > 64) {
+ err("firmare chunk size bigger than 64 bytes.");
+ goto out_free;
+ }
+
memcpy(buf, ptr, buflen);
if (dvb_usb_generic_write(d, buf, buflen)) {
err("failed to load bcm4500 firmware.");
@@ -206,10 +229,13 @@ static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
return 0;
}
-int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
+static int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
{
u8 buf;
int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
+
+ deb_xfer("reloading firmware\n");
+
/* Turn off 8psk power */
if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1))
return -EINVAL;
@@ -228,9 +254,47 @@ static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0);
}
+/* Callbacks for gp8psk-fe.c */
+
+static int gp8psk_fe_in(void *priv, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_usb_in_op(d, req, value, index, b, blen);
+}
+
+static int gp8psk_fe_out(void *priv, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_usb_out_op(d, req, value, index, b, blen);
+}
+
+static int gp8psk_fe_reload(void *priv)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_bcm4500_reload(d);
+}
+
+const struct gp8psk_fe_ops gp8psk_fe_ops = {
+ .in = gp8psk_fe_in,
+ .out = gp8psk_fe_out,
+ .reload = gp8psk_fe_reload,
+};
+
static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe_adap[0].fe = gp8psk_fe_attach(adap->dev);
+ struct dvb_usb_device *d = adap->dev;
+ int id = le16_to_cpu(d->udev->descriptor.idProduct);
+ int is_rev1;
+
+ is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false;
+
+ adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach,
+ &gp8psk_fe_ops, d, is_rev1);
return 0;
}
@@ -265,6 +329,8 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-gp8psk-01.fw",
+ .size_of_priv = sizeof(struct gp8psk_state),
+
.num_adapters = 1,
.adapter = {
{
diff --git a/drivers/media/usb/dvb-usb/gp8psk.h b/drivers/media/usb/dvb-usb/gp8psk.h
index ed32b9da4843..d8975b866dee 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.h
+++ b/drivers/media/usb/dvb-usb/gp8psk.h
@@ -24,58 +24,6 @@ extern int dvb_usb_gp8psk_debug;
#define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args)
#define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args)
#define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args)
-#define deb_fe(args...) dprintk(dvb_usb_gp8psk_debug,0x08,args)
-
-/* Twinhan Vendor requests */
-#define TH_COMMAND_IN 0xC0
-#define TH_COMMAND_OUT 0xC1
-
-/* gp8psk commands */
-
-#define GET_8PSK_CONFIG 0x80 /* in */
-#define SET_8PSK_CONFIG 0x81
-#define I2C_WRITE 0x83
-#define I2C_READ 0x84
-#define ARM_TRANSFER 0x85
-#define TUNE_8PSK 0x86
-#define GET_SIGNAL_STRENGTH 0x87 /* in */
-#define LOAD_BCM4500 0x88
-#define BOOT_8PSK 0x89 /* in */
-#define START_INTERSIL 0x8A /* in */
-#define SET_LNB_VOLTAGE 0x8B
-#define SET_22KHZ_TONE 0x8C
-#define SEND_DISEQC_COMMAND 0x8D
-#define SET_DVB_MODE 0x8E
-#define SET_DN_SWITCH 0x8F
-#define GET_SIGNAL_LOCK 0x90 /* in */
-#define GET_FW_VERS 0x92
-#define GET_SERIAL_NUMBER 0x93 /* in */
-#define USE_EXTRA_VOLT 0x94
-#define GET_FPGA_VERS 0x95
-#define CW3K_INIT 0x9d
-
-/* PSK_configuration bits */
-#define bm8pskStarted 0x01
-#define bm8pskFW_Loaded 0x02
-#define bmIntersilOn 0x04
-#define bmDVBmode 0x08
-#define bm22kHz 0x10
-#define bmSEL18V 0x20
-#define bmDCtuned 0x40
-#define bmArmed 0x80
-
-/* Satellite modulation modes */
-#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
-#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
-#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
-#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
-
-#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
-#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
-#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
-#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
-#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
-#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
#define GET_USB_SPEED 0x07
@@ -86,15 +34,4 @@ extern int dvb_usb_gp8psk_debug;
#define PRODUCT_STRING_READ 0x0D
#define FW_BCD_VERSION_READ 0x14
-/* firmware revision id's */
-#define GP8PSK_FW_REV1 0x020604
-#define GP8PSK_FW_REV2 0x020704
-#define GP8PSK_FW_VERS(_fw_vers) ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
-
-extern struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d);
-extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen);
-extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen);
-extern int gp8psk_bcm4500_reload(struct dvb_usb_device *d);
-
#endif
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index fc7569e2728d..1babd3341910 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -74,22 +74,31 @@ static struct rc_map_table rc_map_haupp_table[] = {
*/
static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- u8 key[5],cmd[2] = { DIBUSB_REQ_POLL_REMOTE, 0x35 }, data,toggle,custom;
+ u8 *buf, data, toggle, custom;
u16 raw;
- int i;
+ int i, ret;
struct dibusb_device_state *st = d->priv;
- dvb_usb_generic_rw(d,cmd,2,key,5,0);
+ buf = kmalloc(5, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = DIBUSB_REQ_POLL_REMOTE;
+ buf[1] = 0x35;
+ ret = dvb_usb_generic_rw(d, buf, 2, buf, 5, 0);
+ if (ret < 0)
+ goto ret;
*state = REMOTE_NO_KEY_PRESSED;
- switch (key[0]) {
+ switch (buf[0]) {
case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED:
- raw = ((key[1] << 8) | key[2]) >> 3;
+ raw = ((buf[1] << 8) | buf[2]) >> 3;
toggle = !!(raw & 0x800);
data = raw & 0x3f;
custom = (raw >> 6) & 0x1f;
- deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle);
+ deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",
+ buf[1], buf[2], buf[3], custom, data, toggle);
for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) {
if (rc5_data(&rc_map_haupp_table[i]) == data &&
@@ -117,7 +126,9 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
break;
}
- return 0;
+ret:
+ kfree(buf);
+ return ret;
}
static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index c05de1b088a4..07fa08be9e99 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,48 +97,53 @@ struct pctv452e_state {
u8 c; /* transaction counter, wraps around... */
u8 initialized; /* set to 1 if 0x15 has been sent */
u16 last_rc_key;
+
+ unsigned char data[80];
};
static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
unsigned int write_len, unsigned int read_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 buf[64];
u8 id;
unsigned int rlen;
int ret;
- BUG_ON(NULL == data && 0 != (write_len | read_len));
- BUG_ON(write_len > 64 - 4);
- BUG_ON(read_len > 64 - 4);
+ if (!data || (write_len > 64 - 4) || (read_len > 64 - 4)) {
+ err("%s: transfer data invalid", __func__);
+ return -EIO;
+ }
+ mutex_lock(&state->ca_mutex);
id = state->c++;
- buf[0] = SYNC_BYTE_OUT;
- buf[1] = id;
- buf[2] = cmd;
- buf[3] = write_len;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = cmd;
+ state->data[3] = write_len;
- memcpy(buf + 4, data, write_len);
+ memcpy(state->data + 4, data, write_len);
rlen = (read_len > 0) ? 64 : 0;
- ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
- buf, rlen, /* delay_ms */ 0);
+ ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
+ state->data, rlen, /* delay_ms */ 0);
if (0 != ret)
goto failed;
ret = -EIO;
- if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
goto failed;
- memcpy(data, buf + 4, read_len);
+ memcpy(data, state->data + 4, read_len);
+ mutex_unlock(&state->ca_mutex);
return 0;
failed:
err("CI error %d; %02X %02X %02X -> %*ph.",
- ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
+ ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+ mutex_unlock(&state->ca_mutex);
return ret;
}
@@ -405,52 +410,53 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *rcv_buf, u8 rcv_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 buf[64];
u8 id;
int ret;
+ mutex_lock(&state->ca_mutex);
id = state->c++;
ret = -EINVAL;
if (snd_len > 64 - 7 || rcv_len > 64 - 7)
goto failed;
- buf[0] = SYNC_BYTE_OUT;
- buf[1] = id;
- buf[2] = PCTV_CMD_I2C;
- buf[3] = snd_len + 3;
- buf[4] = addr << 1;
- buf[5] = snd_len;
- buf[6] = rcv_len;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = PCTV_CMD_I2C;
+ state->data[3] = snd_len + 3;
+ state->data[4] = addr << 1;
+ state->data[5] = snd_len;
+ state->data[6] = rcv_len;
- memcpy(buf + 7, snd_buf, snd_len);
+ memcpy(state->data + 7, snd_buf, snd_len);
- ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
- buf, /* rcv_len */ 64,
+ ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
+ state->data, /* rcv_len */ 64,
/* delay_ms */ 0);
if (ret < 0)
goto failed;
/* TT USB protocol error. */
ret = -EIO;
- if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
goto failed;
/* I2C device didn't respond as expected. */
ret = -EREMOTEIO;
- if (buf[5] < snd_len || buf[6] < rcv_len)
+ if (state->data[5] < snd_len || state->data[6] < rcv_len)
goto failed;
- memcpy(rcv_buf, buf + 7, rcv_len);
+ memcpy(rcv_buf, state->data + 7, rcv_len);
+ mutex_unlock(&state->ca_mutex);
return rcv_len;
failed:
- err("I2C error %d; %02X %02X %02X %02X %02X -> "
- "%02X %02X %02X %02X %02X.",
+ err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
- buf[0], buf[1], buf[4], buf[5], buf[6]);
+ 7, state->data);
+ mutex_unlock(&state->ca_mutex);
return ret;
}
@@ -499,8 +505,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 };
- u8 rx[PCTV_ANSWER_LEN];
+ u8 *rx;
int ret;
info("%s: %d\n", __func__, i);
@@ -511,6 +516,11 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
if (state->initialized)
return 0;
+ rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ mutex_lock(&state->ca_mutex);
/* hmm where shoud this should go? */
ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
if (ret != 0)
@@ -518,65 +528,75 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
__func__, ret);
/* this is a one-time initialization, dont know where to put */
- b0[1] = state->c++;
+ state->data[0] = 0xaa;
+ state->data[1] = state->c++;
+ state->data[2] = PCTV_CMD_RESET;
+ state->data[3] = 1;
+ state->data[4] = 0;
/* reset board */
- ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
- return ret;
+ goto ret;
- b0[1] = state->c++;
- b0[4] = 1;
+ state->data[1] = state->c++;
+ state->data[4] = 1;
/* reset board (again?) */
- ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
- return ret;
+ goto ret;
state->initialized = 1;
- return 0;
+ret:
+ mutex_unlock(&state->ca_mutex);
+ kfree(rx);
+ return ret;
}
static int pctv452e_rc_query(struct dvb_usb_device *d)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 b[CMD_BUFFER_SIZE];
- u8 rx[PCTV_ANSWER_LEN];
int ret, i;
- u8 id = state->c++;
+ u8 id;
+
+ mutex_lock(&state->ca_mutex);
+ id = state->c++;
/* prepare command header */
- b[0] = SYNC_BYTE_OUT;
- b[1] = id;
- b[2] = PCTV_CMD_IR;
- b[3] = 0;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = PCTV_CMD_IR;
+ state->data[3] = 0;
/* send ir request */
- ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 4,
+ state->data, PCTV_ANSWER_LEN, 0);
if (ret != 0)
- return ret;
+ goto ret;
if (debug > 3) {
- info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
- for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
- info(" %02x", rx[i+3]);
+ info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
+ for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
+ info(" %02x", state->data[i + 3]);
info("\n");
}
- if ((rx[3] == 9) && (rx[12] & 0x01)) {
+ if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
/* got a "press" event */
- state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
+ state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
if (debug > 2)
info("%s: cmd=0x%02x sys=0x%02x\n",
- __func__, rx[6], rx[7]);
+ __func__, state->data[6], state->data[7]);
rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
} else if (state->last_rc_key) {
rc_keyup(d->rc_dev);
state->last_rc_key = 0;
}
-
- return 0;
+ret:
+ mutex_unlock(&state->ca_mutex);
+ return ret;
}
static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index d9f3262bf071..4706628a3ed5 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -89,9 +89,13 @@ struct technisat_usb2_state {
static int technisat_usb2_i2c_access(struct usb_device *udev,
u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
{
- u8 b[64];
+ u8 *b;
int ret, actual_length;
+ b = kmalloc(64, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
deb_i2c("i2c-access: %02x, tx: ", device_addr);
debug_dump(tx, txlen, deb_i2c);
deb_i2c(" ");
@@ -123,7 +127,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (ret < 0) {
err("i2c-error: out failed %02x = %d", device_addr, ret);
- return -ENODEV;
+ goto err;
}
ret = usb_bulk_msg(udev,
@@ -131,7 +135,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
b, 64, &actual_length, 1000);
if (ret < 0) {
err("i2c-error: in failed %02x = %d", device_addr, ret);
- return -ENODEV;
+ goto err;
}
if (b[0] != I2C_STATUS_OK) {
@@ -140,7 +144,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (!(b[0] == I2C_STATUS_NAK &&
device_addr == 0x60
/* && device_is_technisat_usb2 */))
- return -ENODEV;
+ goto err;
}
deb_i2c("status: %d, ", b[0]);
@@ -154,7 +158,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
deb_i2c("\n");
- return 0;
+err:
+ kfree(b);
+ return ret;
}
static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index c3a0e87066eb..f7bb78c1873c 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1901,19 +1901,30 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
s32 TransferBufferLength, int bOut)
{
int r;
+ unsigned char *buf;
+
+ buf = kmalloc(TransferBufferLength, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
if (!bOut) {
r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
Request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
- Value, Index, TransferBuffer,
+ Value, Index, buf,
TransferBufferLength, HZ * 5);
+
+ if (r >= 0)
+ memcpy(TransferBuffer, buf, TransferBufferLength);
} else {
+ memcpy(buf, TransferBuffer, TransferBufferLength);
r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- Value, Index, TransferBuffer,
+ Value, Index, buf,
TransferBufferLength, HZ * 5);
}
+ kfree(buf);
return r;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index db200c9d796d..22a9aae16291 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -147,20 +147,26 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
{
struct usb_device *udev = dev->udev;
+ unsigned char *buf;
int ret;
+ buf = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00,
index,
- (u8 *) value,
+ buf,
sizeof(u8),
500);
- if (ret < 0)
- return ret;
- else
- return 0;
+ if (ret >= 0)
+ memcpy(value, buf, sizeof(u8));
+
+ kfree(buf);
+ return ret;
}
static int stk_start_stream(struct stk_camera *dev)
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index dc76fd41e00f..ceb953be0770 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -71,6 +71,7 @@ static int usbtv_probe(struct usb_interface *intf,
int size;
struct device *dev = &intf->dev;
struct usbtv *usbtv;
+ struct usb_host_endpoint *ep;
/* Checks that the device is what we think it is. */
if (intf->num_altsetting != 2)
@@ -78,10 +79,12 @@ static int usbtv_probe(struct usb_interface *intf,
if (intf->altsetting[1].desc.bNumEndpoints != 4)
return -ENODEV;
+ ep = &intf->altsetting[1].endpoint[0];
+
/* Packet size is split into 11 bits of base size and count of
* extra multiplies of it.*/
- size = usb_endpoint_maxp(&intf->altsetting[1].endpoint[0].desc);
- size = (size & 0x07ff) * (((size & 0x1800) >> 11) + 1);
+ size = usb_endpoint_maxp(&ep->desc);
+ size = (size & 0x07ff) * usb_endpoint_maxp_mult(&ep->desc);
/* Device structure */
usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index b5589d5f5da4..f3c1c852e401 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1467,6 +1467,7 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
struct usb_host_endpoint *ep)
{
u16 psize;
+ u16 mult;
switch (dev->speed) {
case USB_SPEED_SUPER:
@@ -1474,7 +1475,8 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
case USB_SPEED_HIGH:
psize = usb_endpoint_maxp(&ep->desc);
- return (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
+ mult = usb_endpoint_maxp_mult(&ep->desc);
+ return (psize & 0x07ff) * mult;
case USB_SPEED_WIRELESS:
psize = usb_endpoint_maxp(&ep->desc);
return psize;
@@ -1551,7 +1553,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
u16 psize;
u32 size;
- psize = usb_endpoint_maxp(&ep->desc) & 0x7ff;
+ psize = usb_endpoint_maxp(&ep->desc);
size = stream->ctrl.dwMaxPayloadTransferSize;
stream->bulk.max_payload_size = size;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 1db0af6c7f94..ba63ca57ed7e 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -439,13 +439,12 @@ static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
- (unsigned long)vmf->virtual_address,
- vma->vm_start, vma->vm_end);
+ vmf->address, vma->vm_start, vma->vm_end);
page = alloc_page(GFP_USER | __GFP_DMA32);
if (!page)
return VM_FAULT_OOM;
- clear_user_highpage(page, (unsigned long)vmf->virtual_address);
+ clear_user_highpage(page, vmf->address);
vmf->page = page;
return 0;
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index aacf584f2a42..f3512404bc52 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2006,7 +2006,7 @@ static int msb_prepare_req(struct request_queue *q, struct request *req)
blk_dump_rq_flags(req, "MS unsupported request");
return BLKPREP_KILL;
}
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c1472275fe57..fa0746d182ff 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -834,7 +834,7 @@ static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
}
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 89c7ed16b4df..1e73064b0fb2 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c